@@ -288,6 +288,61 @@ adjust_ipv6_pktlen(struct rte_mbuf *m, const struct rte_ipv6_hdr *iph,
}
}
+#ifdef ENABLE_STATS
+static uint64_t timer_period = 10; /* default period is 10 seconds */
+
+/* Print out statistics on packet distribution */
+static void
+print_stats(void)
+{
+ uint64_t total_packets_dropped, total_packets_tx, total_packets_rx;
+ unsigned int coreid;
+ float burst_percent;
+
+ total_packets_dropped = 0;
+ total_packets_tx = 0;
+ total_packets_rx = 0;
+
+ const char clr[] = { 27, '[', '2', 'J', '\0' };
+ const char topLeft[] = { 27, '[', '1', ';', '1', 'H', '\0' };
+
+ /* Clear screen and move to top left */
+ printf("%s%s", clr, topLeft);
+
+ printf("\nCore statistics ====================================");
+
+ for (coreid = 0; coreid < RTE_MAX_LCORE; coreid++) {
+ /* skip disabled cores */
+ if (rte_lcore_is_enabled(coreid) == 0)
+ continue;
+ burst_percent = (float)(core_statistics[coreid].burst_rx * 100)/
+ core_statistics[coreid].rx;
+ printf("\nStatistics for core %u ------------------------------"
+ "\nPackets received: %20"PRIu64
+ "\nPackets sent: %24"PRIu64
+ "\nPackets dropped: %21"PRIu64
+ "\nBurst percent: %23.2f",
+ coreid,
+ core_statistics[coreid].rx,
+ core_statistics[coreid].tx,
+ core_statistics[coreid].dropped,
+ burst_percent);
+
+ total_packets_dropped += core_statistics[coreid].dropped;
+ total_packets_tx += core_statistics[coreid].tx;
+ total_packets_rx += core_statistics[coreid].rx;
+ }
+ printf("\nAggregate statistics ==============================="
+ "\nTotal packets received: %14"PRIu64
+ "\nTotal packets sent: %18"PRIu64
+ "\nTotal packets dropped: %15"PRIu64,
+ total_packets_rx,
+ total_packets_tx,
+ total_packets_dropped);
+ printf("\n====================================================\n");
+}
+#endif /* ENABLE_STATS */
+
static inline void
prepare_one_packet(struct rte_mbuf *pkt, struct ipsec_traffic *t)
{
@@ -351,6 +406,7 @@ prepare_one_packet(struct rte_mbuf *pkt, struct ipsec_traffic *t)
RTE_LOG(ERR, IPSEC, "Unsupported packet type 0x%x\n",
rte_be_to_cpu_16(eth->ether_type));
rte_pktmbuf_free(pkt);
+ core_stats_update_drop(1);
return;
}
@@ -471,6 +527,11 @@ send_burst(struct lcore_conf *qconf, uint16_t n, uint16_t port)
int32_t ret;
uint16_t queueid;
+#ifdef ENABLE_STATS
+ int lcore_id = rte_lcore_id();
+ core_statistics[lcore_id].tx += n;
+#endif /* ENABLE_STATS */
+
queueid = qconf->tx_queue_id[port];
m_table = (struct rte_mbuf **)qconf->tx_mbufs[port].m_table;
@@ -478,6 +539,9 @@ send_burst(struct lcore_conf *qconf, uint16_t n, uint16_t port)
ret = rte_eth_tx_burst(port, queueid, m_table, n);
if (unlikely(ret < n)) {
+#ifdef ENABLE_STATS
+ core_statistics[lcore_id].dropped += n-ret;
+#endif /* ENABLE_STATS */
do {
rte_pktmbuf_free(m_table[ret]);
} while (++ret < n);
@@ -584,18 +648,21 @@ inbound_sp_sa(struct sp_ctx *sp, struct sa_ctx *sa, struct traffic_type *ip,
continue;
}
if (res == DISCARD) {
+ core_stats_update_drop(1);
rte_pktmbuf_free(m);
continue;
}
/* Only check SPI match for processed IPSec packets */
if (i < lim && ((m->ol_flags & PKT_RX_SEC_OFFLOAD) == 0)) {
+ core_stats_update_drop(1);
rte_pktmbuf_free(m);
continue;
}
sa_idx = res - 1;
if (!inbound_sa_check(sa, m, sa_idx)) {
+ core_stats_update_drop(1);
rte_pktmbuf_free(m);
continue;
}
@@ -630,8 +697,10 @@ split46_traffic(struct ipsec_traffic *trf, struct rte_mbuf *mb[], uint32_t num)
uint8_t *,
offsetof(struct ip6_hdr, ip6_nxt));
n6++;
- } else
+ } else {
+ core_stats_update_drop(1);
rte_pktmbuf_free(m);
+ }
}
trf->ip4.num = n4;
@@ -682,11 +751,12 @@ outbound_sp(struct sp_ctx *sp, struct traffic_type *ip,
for (i = 0; i < ip->num; i++) {
m = ip->pkts[i];
sa_idx = ip->res[i] - 1;
- if (ip->res[i] == DISCARD)
+ if (ip->res[i] == DISCARD) {
+ core_stats_update_drop(1);
rte_pktmbuf_free(m);
- else if (ip->res[i] == BYPASS)
+ } else if (ip->res[i] == BYPASS) {
ip->pkts[j++] = m;
- else {
+ } else {
ipsec->res[ipsec->num] = sa_idx;
ipsec->pkts[ipsec->num++] = m;
}
@@ -705,6 +775,8 @@ process_pkts_outbound(struct ipsec_ctx *ipsec_ctx,
for (i = 0; i < traffic->ipsec.num; i++)
rte_pktmbuf_free(traffic->ipsec.pkts[i]);
+ core_stats_update_drop(traffic->ipsec.num);
+
traffic->ipsec.num = 0;
outbound_sp(ipsec_ctx->sp4_ctx, &traffic->ip4, &traffic->ipsec);
@@ -745,12 +817,14 @@ process_pkts_inbound_nosp(struct ipsec_ctx *ipsec_ctx,
/* Drop any IPv4 traffic from unprotected ports */
for (i = 0; i < traffic->ip4.num; i++)
rte_pktmbuf_free(traffic->ip4.pkts[i]);
+ core_stats_update_drop(traffic->ip4.num);
traffic->ip4.num = 0;
/* Drop any IPv6 traffic from unprotected ports */
for (i = 0; i < traffic->ip6.num; i++)
rte_pktmbuf_free(traffic->ip6.pkts[i]);
+ core_stats_update_drop(traffic->ip6.num);
traffic->ip6.num = 0;
@@ -788,6 +862,7 @@ process_pkts_outbound_nosp(struct ipsec_ctx *ipsec_ctx,
/* Drop any IPsec traffic from protected ports */
for (i = 0; i < traffic->ipsec.num; i++)
rte_pktmbuf_free(traffic->ipsec.pkts[i]);
+ core_stats_update_drop(traffic->ipsec.num);
n = 0;
@@ -901,6 +976,7 @@ route4_pkts(struct rt_ctx *rt_ctx, struct rte_mbuf *pkts[], uint8_t nb_pkts)
}
if ((pkt_hop & RTE_LPM_LOOKUP_SUCCESS) == 0) {
+ core_stats_update_drop(1);
rte_pktmbuf_free(pkts[i]);
continue;
}
@@ -953,6 +1029,7 @@ route6_pkts(struct rt_ctx *rt_ctx, struct rte_mbuf *pkts[], uint8_t nb_pkts)
}
if (pkt_hop == -1) {
+ core_stats_update_drop(1);
rte_pktmbuf_free(pkts[i]);
continue;
}
@@ -1099,6 +1176,9 @@ ipsec_poll_mode_worker(void)
const uint64_t drain_tsc = (rte_get_tsc_hz() + US_PER_S - 1)
/ US_PER_S * BURST_TX_DRAIN_US;
struct lcore_rx_queue *rxql;
+#ifdef ENABLE_STATS
+ uint64_t timer_tsc = 0;
+#endif /* ENABLE_STATS */
prev_tsc = 0;
lcore_id = rte_lcore_id();
@@ -1159,6 +1239,19 @@ ipsec_poll_mode_worker(void)
drain_tx_buffers(qconf);
drain_crypto_buffers(qconf);
prev_tsc = cur_tsc;
+#ifdef ENABLE_STATS
+ if (lcore_id == rte_get_master_lcore()) {
+ /* advance the timer */
+ timer_tsc += diff_tsc;
+
+ /* if timer has reached its timeout */
+ if (unlikely(timer_tsc >= timer_period)) {
+ print_stats();
+ /* reset the timer */
+ timer_tsc = 0;
+ }
+ }
+#endif /* ENABLE_STATS */
}
for (i = 0; i < qconf->nb_rx_queue; ++i) {
@@ -1169,6 +1262,12 @@ ipsec_poll_mode_worker(void)
nb_rx = rte_eth_rx_burst(portid, queueid,
pkts, MAX_PKT_BURST);
+#ifdef ENABLE_STATS
+ core_statistics[lcore_id].rx += nb_rx;
+ if (nb_rx == MAX_PKT_BURST)
+ core_statistics[lcore_id].burst_rx += nb_rx;
+#endif /* ENABLE_STATS */
+
if (nb_rx > 0)
process_pkts(qconf, pkts, nb_rx, portid);
@@ -2747,6 +2846,11 @@ main(int32_t argc, char **argv)
signal(SIGINT, signal_handler);
signal(SIGTERM, signal_handler);
+#ifdef ENABLE_STATS
+ /* convert to number of cycles */
+ timer_period *= rte_get_timer_hz();
+#endif /* ENABLE_STATS */
+
/* initialize event helper configuration */
eh_conf = eh_conf_init();
if (eh_conf == NULL)
@@ -6,6 +6,8 @@
#include <stdbool.h>
+//#define ENABLE_STATS
+
#define NB_SOCKETS 4
#define MAX_PKT_BURST 32
@@ -46,6 +46,17 @@
#define IP6_VERSION (6)
+#ifdef ENABLE_STATS
+struct ipsec_core_statistics {
+ uint64_t tx;
+ uint64_t rx;
+ uint64_t dropped;
+ uint64_t burst_rx;
+} __rte_cache_aligned;
+
+struct ipsec_core_statistics core_statistics[RTE_MAX_ETHPORTS];
+#endif /* ENABLE_STATS */
+
struct rte_crypto_xform;
struct ipsec_xform;
struct rte_mbuf;
@@ -416,4 +427,15 @@ check_flow_params(uint16_t fdir_portid, uint8_t fdir_qid);
int
create_ipsec_esp_flow(struct ipsec_sa *sa);
+static inline void
+core_stats_update_drop(int n)
+{
+#ifdef ENABLE_STATS
+ int lcore_id = rte_lcore_id();
+ core_statistics[lcore_id].dropped += n;
+#else
+ RTE_SET_USED(n);
+#endif /* ENABLE_STATS */
+}
+
#endif /* __IPSEC_H__ */
@@ -24,6 +24,11 @@ free_pkts(struct rte_mbuf *mb[], uint32_t n)
{
uint32_t i;
+#ifdef ENABLE_STATS
+ int lcore_id = rte_lcore_id();
+ core_statistics[lcore_id].dropped += n;
+#endif /* ENABLE_STATS */
+
for (i = 0; i != n; i++)
rte_pktmbuf_free(mb[i]);
}