[v3] examples/ipsec-secgw: add per core packet stats

Message ID 1588769253-10405-1-git-send-email-anoobj@marvell.com (mailing list archive)
State Superseded, archived
Delegated to: akhil goyal
Headers
Series [v3] examples/ipsec-secgw: add per core packet stats |

Checks

Context Check Description
ci/checkpatch success coding style OK
ci/iol-intel-Performance success Performance Testing PASS
ci/iol-nxp-Performance success Performance Testing PASS
ci/iol-mellanox-Performance success Performance Testing PASS
ci/travis-robot warning Travis build: failed
ci/Intel-compilation fail Compilation issues
ci/iol-testing success Testing PASS

Commit Message

Anoob Joseph May 6, 2020, 12:47 p.m. UTC
  Adding per core packet handling stats to analyze traffic distribution
when multiple cores are engaged.

Since aggregating the packet stats across cores would affect
performance, keeping the feature disabled using compile time flags.

Signed-off-by: Anoob Joseph <anoobj@marvell.com>
---

v3:
* Added wrapper functions for updating rx, tx & dropped counts
* Updated free_pkts() to have stats updated internally
* Introduced similar free_pkt() function which updates stats and frees one packet
* Moved all inline functions and macros to ipsec-secgw.h
* Made STATS_INTERVAL macro to control the interval of the stats update.
  STATS_INTERVAL = 0 would disable the feature.

v2:
* Added lookup failure cases to drop count

 examples/ipsec-secgw/ipsec-secgw.c   | 113 ++++++++++++++++++++++++++++-------
 examples/ipsec-secgw/ipsec-secgw.h   |  68 +++++++++++++++++++++
 examples/ipsec-secgw/ipsec.c         |  20 +++----
 examples/ipsec-secgw/ipsec_process.c |  11 +---
 4 files changed, 171 insertions(+), 41 deletions(-)
  

Comments

Ananyev, Konstantin May 7, 2020, 4:12 p.m. UTC | #1
> @@ -1099,6 +1151,10 @@ ipsec_poll_mode_worker(void)
>  	const uint64_t drain_tsc = (rte_get_tsc_hz() + US_PER_S - 1)
>  			/ US_PER_S * BURST_TX_DRAIN_US;
>  	struct lcore_rx_queue *rxql;
> +#if (STATS_INTERVAL > 0)
> +	const uint64_t timer_period = STATS_INTERVAL * rte_get_timer_hz();
> +	uint64_t timer_tsc = 0;
> +#endif /* STATS_INTERVAL */
> 
>  	prev_tsc = 0;
>  	lcore_id = rte_lcore_id();
> @@ -1159,6 +1215,19 @@ ipsec_poll_mode_worker(void)
>  			drain_tx_buffers(qconf);
>  			drain_crypto_buffers(qconf);
>  			prev_tsc = cur_tsc;
> +#if (STATS_INTERVAL > 0)
> +			if (lcore_id == rte_get_master_lcore()) {
> +				/* advance the timer */
> +				timer_tsc += diff_tsc;
> +
> +				/* if timer has reached its timeout */
> +				if (unlikely(timer_tsc >= timer_period)) {
> +					print_stats();
> +					/* reset the timer */
> +					timer_tsc = 0;
> +				}
> +			}
> +#endif /* STATS_INTERVAL */

I still don't understand why to do it in data-path thread.
As I said in previous comments, in DPDK there is a control
thread that can be used for such house-keeping tasks.
Why not to use it (via rte_alarm or so) and keep data-path
threads less affected.

>  		}
> 
>  		for (i = 0; i < qconf->nb_rx_queue; ++i) {
> @@ -1169,8 +1238,10 @@ ipsec_poll_mode_worker(void)
>  			nb_rx = rte_eth_rx_burst(portid, queueid,
>  					pkts, MAX_PKT_BURST);
> 
> -			if (nb_rx > 0)
> +			if (nb_rx > 0) {
> +				core_stats_update_rx(nb_rx);
>  				process_pkts(qconf, pkts, nb_rx, portid);
> +			}
> 
>  			/* dequeue and process completed crypto-ops */
>  			if (is_unprotected_port(portid))
> diff --git a/examples/ipsec-secgw/ipsec-secgw.h b/examples/ipsec-secgw/ipsec-secgw.h
> index 4b53cb5..5b3561f 100644
> --- a/examples/ipsec-secgw/ipsec-secgw.h
> +++ b/examples/ipsec-secgw/ipsec-secgw.h
> @@ -6,6 +6,8 @@
> 
>  #include <stdbool.h>
> 
> +#define STATS_INTERVAL 0

Shouldn't it be:
#ifndef STATS_INTERVAL
#define STATS_INTERVAL	0
#endif
?

To allow user specify statis interval via EXTRA_CFLAGS='-DSTATS_INTERVAL=10'
or so.

> +
>  #define NB_SOCKETS 4
> 
>  #define MAX_PKT_BURST 32
> @@ -69,6 +71,17 @@ struct ethaddr_info {
>  	uint64_t src, dst;
>  };
> 
> +#if (STATS_INTERVAL > 0)
> +struct ipsec_core_statistics {
> +	uint64_t tx;
> +	uint64_t rx;
> +	uint64_t dropped;
> +	uint64_t burst_rx;
> +} __rte_cache_aligned;
> +
> +struct ipsec_core_statistics core_statistics[RTE_MAX_LCORE];
> +#endif /* STATS_INTERVAL */
> +
>  extern struct ethaddr_info ethaddr_tbl[RTE_MAX_ETHPORTS];
> 
>  /* Port mask to identify the unprotected ports */
> @@ -85,4 +98,59 @@ is_unprotected_port(uint16_t port_id)
>  	return unprotected_port_mask & (1 << port_id);
>  }
> 
> +static inline void
> +core_stats_update_rx(int n)
> +{
> +#if (STATS_INTERVAL > 0)
> +	int lcore_id = rte_lcore_id();
> +	core_statistics[lcore_id].rx += n;
> +	if (n == MAX_PKT_BURST)
> +		core_statistics[lcore_id].burst_rx += n;
> +#else
> +	RTE_SET_USED(n);
> +#endif /* STATS_INTERVAL */
> +}
> +
> +static inline void
> +core_stats_update_tx(int n)
> +{
> +#if (STATS_INTERVAL > 0)
> +	int lcore_id = rte_lcore_id();
> +	core_statistics[lcore_id].tx += n;
> +#else
> +	RTE_SET_USED(n);
> +#endif /* STATS_INTERVAL */
> +}
> +
> +static inline void
> +core_stats_update_drop(int n)
> +{
> +#if (STATS_INTERVAL > 0)
> +	int lcore_id = rte_lcore_id();
> +	core_statistics[lcore_id].dropped += n;
> +#else
> +	RTE_SET_USED(n);
> +#endif /* STATS_INTERVAL */
> +}
> +
> +/* helper routine to free bulk of packets */
> +static inline void
> +free_pkts(struct rte_mbuf *mb[], uint32_t n)
> +{
> +	uint32_t i;
> +
> +	for (i = 0; i != n; i++)
> +		rte_pktmbuf_free(mb[i]);
> +
> +	core_stats_update_drop(n);
> +}
> +
> +/* helper routine to free single packet */
> +static inline void
> +free_pkt(struct rte_mbuf *mb)
> +{
> +	rte_pktmbuf_free(mb);
> +	core_stats_update_drop(1);

Probably just:
free_pkts(&mb, 1);
?

> +}
> +
>  #endif /* _IPSEC_SECGW_H_ */
  
Ananyev, Konstantin May 8, 2020, 8:08 a.m. UTC | #2
> 
> +#if (STATS_INTERVAL > 0)
> +
> +/* Print out statistics on packet distribution */
> +static void
> +print_stats(void)
> +{
> +	uint64_t total_packets_dropped, total_packets_tx, total_packets_rx;
> +	unsigned int coreid;
> +	float burst_percent;
> +
> +	total_packets_dropped = 0;
> +	total_packets_tx = 0;
> +	total_packets_rx = 0;
> +
> +	const char clr[] = { 27, '[', '2', 'J', '\0' };
> +
> +	/* Clear screen and move to top left */
> +	printf("%s", clr);
> +
> +	printf("\nCore statistics ====================================");
> +
> +	for (coreid = 0; coreid < RTE_MAX_LCORE; coreid++) {
> +		/* skip disabled cores */
> +		if (rte_lcore_is_enabled(coreid) == 0)
> +			continue;
> +		burst_percent = (float)(core_statistics[coreid].burst_rx * 100)/
> +					core_statistics[coreid].rx;
> +		printf("\nStatistics for core %u ------------------------------"
> +			   "\nPackets received: %20"PRIu64
> +			   "\nPackets sent: %24"PRIu64
> +			   "\nPackets dropped: %21"PRIu64
> +			   "\nBurst percent: %23.2f",
> +			   coreid,
> +			   core_statistics[coreid].rx,
> +			   core_statistics[coreid].tx,
> +			   core_statistics[coreid].dropped,
> +			   burst_percent);


As one more comment:
Can we also collect number of calls and display average pkt/call
(for both rx and tx)?

> +
> +		total_packets_dropped += core_statistics[coreid].dropped;
> +		total_packets_tx += core_statistics[coreid].tx;
> +		total_packets_rx += core_statistics[coreid].rx;
> +	}
> +	printf("\nAggregate statistics ==============================="
> +		   "\nTotal packets received: %14"PRIu64
> +		   "\nTotal packets sent: %18"PRIu64
> +		   "\nTotal packets dropped: %15"PRIu64,
> +		   total_packets_rx,
> +		   total_packets_tx,
> +		   total_packets_dropped);
> +	printf("\n====================================================\n");
> +}
> +#endif /* STATS_INTERVAL */
> +
>  static inline void
>  prepare_one_packet(struct rte_mbuf *pkt, struct ipsec_traffic *t)
>  {
> @@ -333,7 +386,7 @@ prepare_one_packet(struct rte_mbuf *pkt, struct ipsec_traffic *t)
> 
>  		/* drop packet when IPv6 header exceeds first segment length */
>  		if (unlikely(l3len > pkt->data_len)) {
> -			rte_pktmbuf_free(pkt);
> +			free_pkt(pkt);
>  			return;
>  		}
> 
> @@ -350,7 +403,7 @@ prepare_one_packet(struct rte_mbuf *pkt, struct ipsec_traffic *t)
>  		/* Unknown/Unsupported type, drop the packet */
>  		RTE_LOG(ERR, IPSEC, "Unsupported packet type 0x%x\n",
>  			rte_be_to_cpu_16(eth->ether_type));
> -		rte_pktmbuf_free(pkt);
> +		free_pkt(pkt);
>  		return;
>  	}
> 
> @@ -477,9 +530,12 @@ send_burst(struct lcore_conf *qconf, uint16_t n, uint16_t port)
>  	prepare_tx_burst(m_table, n, port, qconf);
> 
>  	ret = rte_eth_tx_burst(port, queueid, m_table, n);
> +
> +	core_stats_update_tx(ret);
> +
>  	if (unlikely(ret < n)) {
>  		do {
> -			rte_pktmbuf_free(m_table[ret]);
> +			free_pkt(m_table[ret]);
>  		} while (++ret < n);
>  	}
> 
> @@ -525,7 +581,7 @@ send_fragment_packet(struct lcore_conf *qconf, struct rte_mbuf *m,
>  			"error code: %d\n",
>  			__func__, m->pkt_len, rte_errno);
> 
> -	rte_pktmbuf_free(m);
> +	free_pkt(m);
>  	return len;
>  }
> 
> @@ -550,7 +606,7 @@ send_single_packet(struct rte_mbuf *m, uint16_t port, uint8_t proto)
>  	} else if (frag_tbl_sz > 0)
>  		len = send_fragment_packet(qconf, m, port, proto);
>  	else
> -		rte_pktmbuf_free(m);
> +		free_pkt(m);
> 
>  	/* enough pkts to be sent */
>  	if (unlikely(len == MAX_PKT_BURST)) {
> @@ -584,19 +640,19 @@ inbound_sp_sa(struct sp_ctx *sp, struct sa_ctx *sa, struct traffic_type *ip,
>  			continue;
>  		}
>  		if (res == DISCARD) {
> -			rte_pktmbuf_free(m);
> +			free_pkt(m);
>  			continue;
>  		}
> 
>  		/* Only check SPI match for processed IPSec packets */
>  		if (i < lim && ((m->ol_flags & PKT_RX_SEC_OFFLOAD) == 0)) {
> -			rte_pktmbuf_free(m);
> +			free_pkt(m);
>  			continue;
>  		}
> 
>  		sa_idx = res - 1;
>  		if (!inbound_sa_check(sa, m, sa_idx)) {
> -			rte_pktmbuf_free(m);
> +			free_pkt(m);
>  			continue;
>  		}
>  		ip->pkts[j++] = m;
> @@ -631,7 +687,7 @@ split46_traffic(struct ipsec_traffic *trf, struct rte_mbuf *mb[], uint32_t num)
>  					offsetof(struct ip6_hdr, ip6_nxt));
>  			n6++;
>  		} else
> -			rte_pktmbuf_free(m);
> +			free_pkt(m);
>  	}
> 
>  	trf->ip4.num = n4;
> @@ -683,7 +739,7 @@ outbound_sp(struct sp_ctx *sp, struct traffic_type *ip,
>  		m = ip->pkts[i];
>  		sa_idx = ip->res[i] - 1;
>  		if (ip->res[i] == DISCARD)
> -			rte_pktmbuf_free(m);
> +			free_pkt(m);
>  		else if (ip->res[i] == BYPASS)
>  			ip->pkts[j++] = m;
>  		else {
> @@ -702,8 +758,7 @@ process_pkts_outbound(struct ipsec_ctx *ipsec_ctx,
>  	uint16_t idx, nb_pkts_out, i;
> 
>  	/* Drop any IPsec traffic from protected ports */
> -	for (i = 0; i < traffic->ipsec.num; i++)
> -		rte_pktmbuf_free(traffic->ipsec.pkts[i]);
> +	free_pkts(traffic->ipsec.pkts, traffic->ipsec.num);
> 
>  	traffic->ipsec.num = 0;
> 
> @@ -743,14 +798,12 @@ process_pkts_inbound_nosp(struct ipsec_ctx *ipsec_ctx,
>  	uint32_t nb_pkts_in, i, idx;
> 
>  	/* Drop any IPv4 traffic from unprotected ports */
> -	for (i = 0; i < traffic->ip4.num; i++)
> -		rte_pktmbuf_free(traffic->ip4.pkts[i]);
> +	free_pkts(traffic->ip4.pkts, traffic->ip4.num);
> 
>  	traffic->ip4.num = 0;
> 
>  	/* Drop any IPv6 traffic from unprotected ports */
> -	for (i = 0; i < traffic->ip6.num; i++)
> -		rte_pktmbuf_free(traffic->ip6.pkts[i]);
> +	free_pkts(traffic->ip6.pkts, traffic->ip6.num);
> 
>  	traffic->ip6.num = 0;
> 
> @@ -786,8 +839,7 @@ process_pkts_outbound_nosp(struct ipsec_ctx *ipsec_ctx,
>  	struct ip *ip;
> 
>  	/* Drop any IPsec traffic from protected ports */
> -	for (i = 0; i < traffic->ipsec.num; i++)
> -		rte_pktmbuf_free(traffic->ipsec.pkts[i]);
> +	free_pkts(traffic->ipsec.pkts, traffic->ipsec.num);
> 
>  	n = 0;
> 
> @@ -901,7 +953,7 @@ route4_pkts(struct rt_ctx *rt_ctx, struct rte_mbuf *pkts[], uint8_t nb_pkts)
>  		}
> 
>  		if ((pkt_hop & RTE_LPM_LOOKUP_SUCCESS) == 0) {
> -			rte_pktmbuf_free(pkts[i]);
> +			free_pkt(pkts[i]);
>  			continue;
>  		}
>  		send_single_packet(pkts[i], pkt_hop & 0xff, IPPROTO_IP);
> @@ -953,7 +1005,7 @@ route6_pkts(struct rt_ctx *rt_ctx, struct rte_mbuf *pkts[], uint8_t nb_pkts)
>  		}
> 
>  		if (pkt_hop == -1) {
> -			rte_pktmbuf_free(pkts[i]);
> +			free_pkt(pkts[i]);
>  			continue;
>  		}
>  		send_single_packet(pkts[i], pkt_hop & 0xff, IPPROTO_IPV6);
> @@ -1099,6 +1151,10 @@ ipsec_poll_mode_worker(void)
>  	const uint64_t drain_tsc = (rte_get_tsc_hz() + US_PER_S - 1)
>  			/ US_PER_S * BURST_TX_DRAIN_US;
>  	struct lcore_rx_queue *rxql;
> +#if (STATS_INTERVAL > 0)
> +	const uint64_t timer_period = STATS_INTERVAL * rte_get_timer_hz();
> +	uint64_t timer_tsc = 0;
> +#endif /* STATS_INTERVAL */
> 
>  	prev_tsc = 0;
>  	lcore_id = rte_lcore_id();
> @@ -1159,6 +1215,19 @@ ipsec_poll_mode_worker(void)
>  			drain_tx_buffers(qconf);
>  			drain_crypto_buffers(qconf);
>  			prev_tsc = cur_tsc;
> +#if (STATS_INTERVAL > 0)
> +			if (lcore_id == rte_get_master_lcore()) {
> +				/* advance the timer */
> +				timer_tsc += diff_tsc;
> +
> +				/* if timer has reached its timeout */
> +				if (unlikely(timer_tsc >= timer_period)) {
> +					print_stats();
> +					/* reset the timer */
> +					timer_tsc = 0;
> +				}
> +			}
> +#endif /* STATS_INTERVAL */
>  		}
> 
>  		for (i = 0; i < qconf->nb_rx_queue; ++i) {
> @@ -1169,8 +1238,10 @@ ipsec_poll_mode_worker(void)
>  			nb_rx = rte_eth_rx_burst(portid, queueid,
>  					pkts, MAX_PKT_BURST);
> 
> -			if (nb_rx > 0)
> +			if (nb_rx > 0) {
> +				core_stats_update_rx(nb_rx);
>  				process_pkts(qconf, pkts, nb_rx, portid);
> +			}
> 
>  			/* dequeue and process completed crypto-ops */
>  			if (is_unprotected_port(portid))
> diff --git a/examples/ipsec-secgw/ipsec-secgw.h b/examples/ipsec-secgw/ipsec-secgw.h
> index 4b53cb5..5b3561f 100644
> --- a/examples/ipsec-secgw/ipsec-secgw.h
> +++ b/examples/ipsec-secgw/ipsec-secgw.h
> @@ -6,6 +6,8 @@
> 
>  #include <stdbool.h>
> 
> +#define STATS_INTERVAL 0
> +
>  #define NB_SOCKETS 4
> 
>  #define MAX_PKT_BURST 32
> @@ -69,6 +71,17 @@ struct ethaddr_info {
>  	uint64_t src, dst;
>  };
> 
> +#if (STATS_INTERVAL > 0)
> +struct ipsec_core_statistics {
> +	uint64_t tx;
> +	uint64_t rx;
> +	uint64_t dropped;
> +	uint64_t burst_rx;
> +} __rte_cache_aligned;
> +
> +struct ipsec_core_statistics core_statistics[RTE_MAX_LCORE];
> +#endif /* STATS_INTERVAL */
> +
>  extern struct ethaddr_info ethaddr_tbl[RTE_MAX_ETHPORTS];
> 
>  /* Port mask to identify the unprotected ports */
> @@ -85,4 +98,59 @@ is_unprotected_port(uint16_t port_id)
>  	return unprotected_port_mask & (1 << port_id);
>  }
> 
> +static inline void
> +core_stats_update_rx(int n)
> +{
> +#if (STATS_INTERVAL > 0)
> +	int lcore_id = rte_lcore_id();
> +	core_statistics[lcore_id].rx += n;
> +	if (n == MAX_PKT_BURST)
> +		core_statistics[lcore_id].burst_rx += n;
> +#else
> +	RTE_SET_USED(n);
> +#endif /* STATS_INTERVAL */
> +}
> +
> +static inline void
> +core_stats_update_tx(int n)
> +{
> +#if (STATS_INTERVAL > 0)
> +	int lcore_id = rte_lcore_id();
> +	core_statistics[lcore_id].tx += n;
> +#else
> +	RTE_SET_USED(n);
> +#endif /* STATS_INTERVAL */
> +}
> +
> +static inline void
> +core_stats_update_drop(int n)
> +{
> +#if (STATS_INTERVAL > 0)
> +	int lcore_id = rte_lcore_id();
> +	core_statistics[lcore_id].dropped += n;
> +#else
> +	RTE_SET_USED(n);
> +#endif /* STATS_INTERVAL */
> +}
> +
> +/* helper routine to free bulk of packets */
> +static inline void
> +free_pkts(struct rte_mbuf *mb[], uint32_t n)
> +{
> +	uint32_t i;
> +
> +	for (i = 0; i != n; i++)
> +		rte_pktmbuf_free(mb[i]);
> +
> +	core_stats_update_drop(n);
> +}
> +
> +/* helper routine to free single packet */
> +static inline void
> +free_pkt(struct rte_mbuf *mb)
> +{
> +	rte_pktmbuf_free(mb);
> +	core_stats_update_drop(1);
> +}
> +
>  #endif /* _IPSEC_SECGW_H_ */
> diff --git a/examples/ipsec-secgw/ipsec.c b/examples/ipsec-secgw/ipsec.c
> index bf88d80..351f1f1 100644
> --- a/examples/ipsec-secgw/ipsec.c
> +++ b/examples/ipsec-secgw/ipsec.c
> @@ -500,7 +500,7 @@ enqueue_cop_burst(struct cdev_qp *cqp)
>  			cqp->id, cqp->qp, ret, len);
>  			/* drop packets that we fail to enqueue */
>  			for (i = ret; i < len; i++)
> -				rte_pktmbuf_free(cqp->buf[i]->sym->m_src);
> +				free_pkt(cqp->buf[i]->sym->m_src);
>  	}
>  	cqp->in_flight += ret;
>  	cqp->len = 0;
> @@ -528,7 +528,7 @@ ipsec_enqueue(ipsec_xform_fn xform_func, struct ipsec_ctx *ipsec_ctx,
> 
>  	for (i = 0; i < nb_pkts; i++) {
>  		if (unlikely(sas[i] == NULL)) {
> -			rte_pktmbuf_free(pkts[i]);
> +			free_pkt(pkts[i]);
>  			continue;
>  		}
> 
> @@ -549,7 +549,7 @@ ipsec_enqueue(ipsec_xform_fn xform_func, struct ipsec_ctx *ipsec_ctx,
> 
>  			if ((unlikely(ips->security.ses == NULL)) &&
>  				create_lookaside_session(ipsec_ctx, sa, ips)) {
> -				rte_pktmbuf_free(pkts[i]);
> +				free_pkt(pkts[i]);
>  				continue;
>  			}
> 
> @@ -563,7 +563,7 @@ ipsec_enqueue(ipsec_xform_fn xform_func, struct ipsec_ctx *ipsec_ctx,
>  		case RTE_SECURITY_ACTION_TYPE_CPU_CRYPTO:
>  			RTE_LOG(ERR, IPSEC, "CPU crypto is not supported by the"
>  					" legacy mode.");
> -			rte_pktmbuf_free(pkts[i]);
> +			free_pkt(pkts[i]);
>  			continue;
> 
>  		case RTE_SECURITY_ACTION_TYPE_NONE:
> @@ -575,7 +575,7 @@ ipsec_enqueue(ipsec_xform_fn xform_func, struct ipsec_ctx *ipsec_ctx,
> 
>  			if ((unlikely(ips->crypto.ses == NULL)) &&
>  				create_lookaside_session(ipsec_ctx, sa, ips)) {
> -				rte_pktmbuf_free(pkts[i]);
> +				free_pkt(pkts[i]);
>  				continue;
>  			}
> 
> @@ -584,7 +584,7 @@ ipsec_enqueue(ipsec_xform_fn xform_func, struct ipsec_ctx *ipsec_ctx,
> 
>  			ret = xform_func(pkts[i], sa, &priv->cop);
>  			if (unlikely(ret)) {
> -				rte_pktmbuf_free(pkts[i]);
> +				free_pkt(pkts[i]);
>  				continue;
>  			}
>  			break;
> @@ -608,7 +608,7 @@ ipsec_enqueue(ipsec_xform_fn xform_func, struct ipsec_ctx *ipsec_ctx,
> 
>  			ret = xform_func(pkts[i], sa, &priv->cop);
>  			if (unlikely(ret)) {
> -				rte_pktmbuf_free(pkts[i]);
> +				free_pkt(pkts[i]);
>  				continue;
>  			}
> 
> @@ -643,7 +643,7 @@ ipsec_inline_dequeue(ipsec_xform_fn xform_func, struct ipsec_ctx *ipsec_ctx,
>  		sa = priv->sa;
>  		ret = xform_func(pkt, sa, &priv->cop);
>  		if (unlikely(ret)) {
> -			rte_pktmbuf_free(pkt);
> +			free_pkt(pkt);
>  			continue;
>  		}
>  		pkts[nb_pkts++] = pkt;
> @@ -690,13 +690,13 @@ ipsec_dequeue(ipsec_xform_fn xform_func, struct ipsec_ctx *ipsec_ctx,
>  				RTE_SECURITY_ACTION_TYPE_NONE) {
>  				ret = xform_func(pkt, sa, cops[j]);
>  				if (unlikely(ret)) {
> -					rte_pktmbuf_free(pkt);
> +					free_pkt(pkt);
>  					continue;
>  				}
>  			} else if (ipsec_get_action_type(sa) ==
>  				RTE_SECURITY_ACTION_TYPE_LOOKASIDE_PROTOCOL) {
>  				if (cops[j]->status) {
> -					rte_pktmbuf_free(pkt);
> +					free_pkt(pkt);
>  					continue;
>  				}
>  			}
> diff --git a/examples/ipsec-secgw/ipsec_process.c b/examples/ipsec-secgw/ipsec_process.c
> index bb2f2b8..4748299 100644
> --- a/examples/ipsec-secgw/ipsec_process.c
> +++ b/examples/ipsec-secgw/ipsec_process.c
> @@ -12,22 +12,13 @@
>  #include <rte_mbuf.h>
> 
>  #include "ipsec.h"
> +#include "ipsec-secgw.h"
> 
>  #define SATP_OUT_IPV4(t)	\
>  	((((t) & RTE_IPSEC_SATP_MODE_MASK) == RTE_IPSEC_SATP_MODE_TRANS && \
>  	(((t) & RTE_IPSEC_SATP_IPV_MASK) == RTE_IPSEC_SATP_IPV4)) || \
>  	((t) & RTE_IPSEC_SATP_MODE_MASK) == RTE_IPSEC_SATP_MODE_TUNLV4)
> 
> -/* helper routine to free bulk of packets */
> -static inline void
> -free_pkts(struct rte_mbuf *mb[], uint32_t n)
> -{
> -	uint32_t i;
> -
> -	for (i = 0; i != n; i++)
> -		rte_pktmbuf_free(mb[i]);
> -}
> -
>  /* helper routine to free bulk of crypto-ops and related packets */
>  static inline void
>  free_cops(struct rte_crypto_op *cop[], uint32_t n)
> --
> 2.7.4
  
Anoob Joseph May 12, 2020, 12:14 p.m. UTC | #3
Hi Konstantin,

Please see inline.

Thanks,
Anoob

> -----Original Message-----
> From: Ananyev, Konstantin <konstantin.ananyev@intel.com>
> Sent: Thursday, May 7, 2020 9:42 PM
> To: Anoob Joseph <anoobj@marvell.com>; Akhil Goyal <akhil.goyal@nxp.com>;
> Nicolau, Radu <radu.nicolau@intel.com>
> Cc: Narayana Prasad Raju Athreya <pathreya@marvell.com>; dev@dpdk.org
> Subject: [EXT] RE: [PATCH v3] examples/ipsec-secgw: add per core packet stats
> 
> External Email
> 
> ----------------------------------------------------------------------
> > @@ -1099,6 +1151,10 @@ ipsec_poll_mode_worker(void)
> >  	const uint64_t drain_tsc = (rte_get_tsc_hz() + US_PER_S - 1)
> >  			/ US_PER_S * BURST_TX_DRAIN_US;
> >  	struct lcore_rx_queue *rxql;
> > +#if (STATS_INTERVAL > 0)
> > +	const uint64_t timer_period = STATS_INTERVAL * rte_get_timer_hz();
> > +	uint64_t timer_tsc = 0;
> > +#endif /* STATS_INTERVAL */
> >
> >  	prev_tsc = 0;
> >  	lcore_id = rte_lcore_id();
> > @@ -1159,6 +1215,19 @@ ipsec_poll_mode_worker(void)
> >  			drain_tx_buffers(qconf);
> >  			drain_crypto_buffers(qconf);
> >  			prev_tsc = cur_tsc;
> > +#if (STATS_INTERVAL > 0)
> > +			if (lcore_id == rte_get_master_lcore()) {
> > +				/* advance the timer */
> > +				timer_tsc += diff_tsc;
> > +
> > +				/* if timer has reached its timeout */
> > +				if (unlikely(timer_tsc >= timer_period)) {
> > +					print_stats();
> > +					/* reset the timer */
> > +					timer_tsc = 0;
> > +				}
> > +			}
> > +#endif /* STATS_INTERVAL */
> 
> I still don't understand why to do it in data-path thread.
> As I said in previous comments, in DPDK there is a control thread that can be
> used for such house-keeping tasks.
> Why not to use it (via rte_alarm or so) and keep data-path threads less affected.

[Anoob] From Marvell's estimates, this stats collection and reporting will be expensive and so cannot be enabled by default. This is required for analyzing the traffic distribution in cases where the performance isn't scaling as expected. And this patch achieves the desired feature. If Intel would like to improve the approach, that can be taken up as a separate patch.
 
> 
> >  		}
> >
> >  		for (i = 0; i < qconf->nb_rx_queue; ++i) { @@ -1169,8 +1238,10
> @@
> > ipsec_poll_mode_worker(void)
> >  			nb_rx = rte_eth_rx_burst(portid, queueid,
> >  					pkts, MAX_PKT_BURST);
> >
> > -			if (nb_rx > 0)
> > +			if (nb_rx > 0) {
> > +				core_stats_update_rx(nb_rx);
> >  				process_pkts(qconf, pkts, nb_rx, portid);
> > +			}
> >
> >  			/* dequeue and process completed crypto-ops */
> >  			if (is_unprotected_port(portid))
> > diff --git a/examples/ipsec-secgw/ipsec-secgw.h
> > b/examples/ipsec-secgw/ipsec-secgw.h
> > index 4b53cb5..5b3561f 100644
> > --- a/examples/ipsec-secgw/ipsec-secgw.h
> > +++ b/examples/ipsec-secgw/ipsec-secgw.h
> > @@ -6,6 +6,8 @@
> >
> >  #include <stdbool.h>
> >
> > +#define STATS_INTERVAL 0
> 
> Shouldn't it be:
> #ifndef STATS_INTERVAL
> #define STATS_INTERVAL	0
> #endif
> ?

[Anoob] Will update in v4.
 
> 
> To allow user specify statis interval via EXTRA_CFLAGS='-DSTATS_INTERVAL=10'
> or so.
> 
> > +
> >  #define NB_SOCKETS 4
> >
> >  #define MAX_PKT_BURST 32
> > @@ -69,6 +71,17 @@ struct ethaddr_info {
> >  	uint64_t src, dst;
> >  };
> >
> > +#if (STATS_INTERVAL > 0)
> > +struct ipsec_core_statistics {
> > +	uint64_t tx;
> > +	uint64_t rx;
> > +	uint64_t dropped;
> > +	uint64_t burst_rx;
> > +} __rte_cache_aligned;
> > +
> > +struct ipsec_core_statistics core_statistics[RTE_MAX_LCORE]; #endif
> > +/* STATS_INTERVAL */
> > +
> >  extern struct ethaddr_info ethaddr_tbl[RTE_MAX_ETHPORTS];
> >
> >  /* Port mask to identify the unprotected ports */ @@ -85,4 +98,59 @@
> > is_unprotected_port(uint16_t port_id)
> >  	return unprotected_port_mask & (1 << port_id);  }
> >
> > +static inline void
> > +core_stats_update_rx(int n)
> > +{
> > +#if (STATS_INTERVAL > 0)
> > +	int lcore_id = rte_lcore_id();
> > +	core_statistics[lcore_id].rx += n;
> > +	if (n == MAX_PKT_BURST)
> > +		core_statistics[lcore_id].burst_rx += n; #else
> > +	RTE_SET_USED(n);
> > +#endif /* STATS_INTERVAL */
> > +}
> > +
> > +static inline void
> > +core_stats_update_tx(int n)
> > +{
> > +#if (STATS_INTERVAL > 0)
> > +	int lcore_id = rte_lcore_id();
> > +	core_statistics[lcore_id].tx += n;
> > +#else
> > +	RTE_SET_USED(n);
> > +#endif /* STATS_INTERVAL */
> > +}
> > +
> > +static inline void
> > +core_stats_update_drop(int n)
> > +{
> > +#if (STATS_INTERVAL > 0)
> > +	int lcore_id = rte_lcore_id();
> > +	core_statistics[lcore_id].dropped += n; #else
> > +	RTE_SET_USED(n);
> > +#endif /* STATS_INTERVAL */
> > +}
> > +
> > +/* helper routine to free bulk of packets */ static inline void
> > +free_pkts(struct rte_mbuf *mb[], uint32_t n) {
> > +	uint32_t i;
> > +
> > +	for (i = 0; i != n; i++)
> > +		rte_pktmbuf_free(mb[i]);
> > +
> > +	core_stats_update_drop(n);
> > +}
> > +
> > +/* helper routine to free single packet */ static inline void
> > +free_pkt(struct rte_mbuf *mb) {
> > +	rte_pktmbuf_free(mb);
> > +	core_stats_update_drop(1);
> 
> Probably just:
> free_pkts(&mb, 1);
> ?

[Anoob] Will update in v4.
 
> 
> > +}
> > +
> >  #endif /* _IPSEC_SECGW_H_ */
  
Anoob Joseph May 12, 2020, 12:16 p.m. UTC | #4
Hi Konstantin,

Please see inline.

Thanks,
Anoob

> -----Original Message-----
> From: Ananyev, Konstantin <konstantin.ananyev@intel.com>
> Sent: Friday, May 8, 2020 1:39 PM
> To: Anoob Joseph <anoobj@marvell.com>; Akhil Goyal <akhil.goyal@nxp.com>;
> Nicolau, Radu <radu.nicolau@intel.com>
> Cc: Narayana Prasad Raju Athreya <pathreya@marvell.com>; dev@dpdk.org
> Subject: [EXT] RE: [PATCH v3] examples/ipsec-secgw: add per core packet stats
> 
> External Email
> 
> ----------------------------------------------------------------------
> 
> >
> > +#if (STATS_INTERVAL > 0)
> > +
> > +/* Print out statistics on packet distribution */ static void
> > +print_stats(void)
> > +{
> > +	uint64_t total_packets_dropped, total_packets_tx, total_packets_rx;
> > +	unsigned int coreid;
> > +	float burst_percent;
> > +
> > +	total_packets_dropped = 0;
> > +	total_packets_tx = 0;
> > +	total_packets_rx = 0;
> > +
> > +	const char clr[] = { 27, '[', '2', 'J', '\0' };
> > +
> > +	/* Clear screen and move to top left */
> > +	printf("%s", clr);
> > +
> > +	printf("\nCore statistics ====================================");
> > +
> > +	for (coreid = 0; coreid < RTE_MAX_LCORE; coreid++) {
> > +		/* skip disabled cores */
> > +		if (rte_lcore_is_enabled(coreid) == 0)
> > +			continue;
> > +		burst_percent = (float)(core_statistics[coreid].burst_rx * 100)/
> > +					core_statistics[coreid].rx;
> > +		printf("\nStatistics for core %u ------------------------------"
> > +			   "\nPackets received: %20"PRIu64
> > +			   "\nPackets sent: %24"PRIu64
> > +			   "\nPackets dropped: %21"PRIu64
> > +			   "\nBurst percent: %23.2f",
> > +			   coreid,
> > +			   core_statistics[coreid].rx,
> > +			   core_statistics[coreid].tx,
> > +			   core_statistics[coreid].dropped,
> > +			   burst_percent);
> 
> 
> As one more comment:
> Can we also collect number of calls and display average pkt/call (for both rx and
> tx)?
> 

[Anoob] Can you describe which "call" you meant? We can capture more logs if required. 
 
> > +
> > +		total_packets_dropped += core_statistics[coreid].dropped;
> > +		total_packets_tx += core_statistics[coreid].tx;
> > +		total_packets_rx += core_statistics[coreid].rx;
> > +	}
> > +	printf("\nAggregate statistics ==============================="
> > +		   "\nTotal packets received: %14"PRIu64
> > +		   "\nTotal packets sent: %18"PRIu64
> > +		   "\nTotal packets dropped: %15"PRIu64,
> > +		   total_packets_rx,
> > +		   total_packets_tx,
> > +		   total_packets_dropped);
> > +
> 	printf("\n===================================================
> =\n");
> > +}
> > +#endif /* STATS_INTERVAL */
> > +
> >  static inline void
> >  prepare_one_packet(struct rte_mbuf *pkt, struct ipsec_traffic *t)  {
> > @@ -333,7 +386,7 @@ prepare_one_packet(struct rte_mbuf *pkt, struct
> > ipsec_traffic *t)
> >
> >  		/* drop packet when IPv6 header exceeds first segment length
> */
> >  		if (unlikely(l3len > pkt->data_len)) {
> > -			rte_pktmbuf_free(pkt);
> > +			free_pkt(pkt);
> >  			return;
> >  		}
> >
> > @@ -350,7 +403,7 @@ prepare_one_packet(struct rte_mbuf *pkt, struct
> ipsec_traffic *t)
> >  		/* Unknown/Unsupported type, drop the packet */
> >  		RTE_LOG(ERR, IPSEC, "Unsupported packet type 0x%x\n",
> >  			rte_be_to_cpu_16(eth->ether_type));
> > -		rte_pktmbuf_free(pkt);
> > +		free_pkt(pkt);
> >  		return;
> >  	}
> >
> > @@ -477,9 +530,12 @@ send_burst(struct lcore_conf *qconf, uint16_t n,
> uint16_t port)
> >  	prepare_tx_burst(m_table, n, port, qconf);
> >
> >  	ret = rte_eth_tx_burst(port, queueid, m_table, n);
> > +
> > +	core_stats_update_tx(ret);
> > +
> >  	if (unlikely(ret < n)) {
> >  		do {
> > -			rte_pktmbuf_free(m_table[ret]);
> > +			free_pkt(m_table[ret]);
> >  		} while (++ret < n);
> >  	}
> >
> > @@ -525,7 +581,7 @@ send_fragment_packet(struct lcore_conf *qconf,
> struct rte_mbuf *m,
> >  			"error code: %d\n",
> >  			__func__, m->pkt_len, rte_errno);
> >
> > -	rte_pktmbuf_free(m);
> > +	free_pkt(m);
> >  	return len;
> >  }
> >
> > @@ -550,7 +606,7 @@ send_single_packet(struct rte_mbuf *m, uint16_t
> port, uint8_t proto)
> >  	} else if (frag_tbl_sz > 0)
> >  		len = send_fragment_packet(qconf, m, port, proto);
> >  	else
> > -		rte_pktmbuf_free(m);
> > +		free_pkt(m);
> >
> >  	/* enough pkts to be sent */
> >  	if (unlikely(len == MAX_PKT_BURST)) { @@ -584,19 +640,19 @@
> > inbound_sp_sa(struct sp_ctx *sp, struct sa_ctx *sa, struct traffic_type *ip,
> >  			continue;
> >  		}
> >  		if (res == DISCARD) {
> > -			rte_pktmbuf_free(m);
> > +			free_pkt(m);
> >  			continue;
> >  		}
> >
> >  		/* Only check SPI match for processed IPSec packets */
> >  		if (i < lim && ((m->ol_flags & PKT_RX_SEC_OFFLOAD) == 0)) {
> > -			rte_pktmbuf_free(m);
> > +			free_pkt(m);
> >  			continue;
> >  		}
> >
> >  		sa_idx = res - 1;
> >  		if (!inbound_sa_check(sa, m, sa_idx)) {
> > -			rte_pktmbuf_free(m);
> > +			free_pkt(m);
> >  			continue;
> >  		}
> >  		ip->pkts[j++] = m;
> > @@ -631,7 +687,7 @@ split46_traffic(struct ipsec_traffic *trf, struct rte_mbuf
> *mb[], uint32_t num)
> >  					offsetof(struct ip6_hdr, ip6_nxt));
> >  			n6++;
> >  		} else
> > -			rte_pktmbuf_free(m);
> > +			free_pkt(m);
> >  	}
> >
> >  	trf->ip4.num = n4;
> > @@ -683,7 +739,7 @@ outbound_sp(struct sp_ctx *sp, struct traffic_type *ip,
> >  		m = ip->pkts[i];
> >  		sa_idx = ip->res[i] - 1;
> >  		if (ip->res[i] == DISCARD)
> > -			rte_pktmbuf_free(m);
> > +			free_pkt(m);
> >  		else if (ip->res[i] == BYPASS)
> >  			ip->pkts[j++] = m;
> >  		else {
> > @@ -702,8 +758,7 @@ process_pkts_outbound(struct ipsec_ctx *ipsec_ctx,
> >  	uint16_t idx, nb_pkts_out, i;
> >
> >  	/* Drop any IPsec traffic from protected ports */
> > -	for (i = 0; i < traffic->ipsec.num; i++)
> > -		rte_pktmbuf_free(traffic->ipsec.pkts[i]);
> > +	free_pkts(traffic->ipsec.pkts, traffic->ipsec.num);
> >
> >  	traffic->ipsec.num = 0;
> >
> > @@ -743,14 +798,12 @@ process_pkts_inbound_nosp(struct ipsec_ctx
> *ipsec_ctx,
> >  	uint32_t nb_pkts_in, i, idx;
> >
> >  	/* Drop any IPv4 traffic from unprotected ports */
> > -	for (i = 0; i < traffic->ip4.num; i++)
> > -		rte_pktmbuf_free(traffic->ip4.pkts[i]);
> > +	free_pkts(traffic->ip4.pkts, traffic->ip4.num);
> >
> >  	traffic->ip4.num = 0;
> >
> >  	/* Drop any IPv6 traffic from unprotected ports */
> > -	for (i = 0; i < traffic->ip6.num; i++)
> > -		rte_pktmbuf_free(traffic->ip6.pkts[i]);
> > +	free_pkts(traffic->ip6.pkts, traffic->ip6.num);
> >
> >  	traffic->ip6.num = 0;
> >
> > @@ -786,8 +839,7 @@ process_pkts_outbound_nosp(struct ipsec_ctx
> *ipsec_ctx,
> >  	struct ip *ip;
> >
> >  	/* Drop any IPsec traffic from protected ports */
> > -	for (i = 0; i < traffic->ipsec.num; i++)
> > -		rte_pktmbuf_free(traffic->ipsec.pkts[i]);
> > +	free_pkts(traffic->ipsec.pkts, traffic->ipsec.num);
> >
> >  	n = 0;
> >
> > @@ -901,7 +953,7 @@ route4_pkts(struct rt_ctx *rt_ctx, struct rte_mbuf
> *pkts[], uint8_t nb_pkts)
> >  		}
> >
> >  		if ((pkt_hop & RTE_LPM_LOOKUP_SUCCESS) == 0) {
> > -			rte_pktmbuf_free(pkts[i]);
> > +			free_pkt(pkts[i]);
> >  			continue;
> >  		}
> >  		send_single_packet(pkts[i], pkt_hop & 0xff, IPPROTO_IP); @@ -
> 953,7
> > +1005,7 @@ route6_pkts(struct rt_ctx *rt_ctx, struct rte_mbuf *pkts[],
> uint8_t nb_pkts)
> >  		}
> >
> >  		if (pkt_hop == -1) {
> > -			rte_pktmbuf_free(pkts[i]);
> > +			free_pkt(pkts[i]);
> >  			continue;
> >  		}
> >  		send_single_packet(pkts[i], pkt_hop & 0xff, IPPROTO_IPV6);
> @@
> > -1099,6 +1151,10 @@ ipsec_poll_mode_worker(void)
> >  	const uint64_t drain_tsc = (rte_get_tsc_hz() + US_PER_S - 1)
> >  			/ US_PER_S * BURST_TX_DRAIN_US;
> >  	struct lcore_rx_queue *rxql;
> > +#if (STATS_INTERVAL > 0)
> > +	const uint64_t timer_period = STATS_INTERVAL * rte_get_timer_hz();
> > +	uint64_t timer_tsc = 0;
> > +#endif /* STATS_INTERVAL */
> >
> >  	prev_tsc = 0;
> >  	lcore_id = rte_lcore_id();
> > @@ -1159,6 +1215,19 @@ ipsec_poll_mode_worker(void)
> >  			drain_tx_buffers(qconf);
> >  			drain_crypto_buffers(qconf);
> >  			prev_tsc = cur_tsc;
> > +#if (STATS_INTERVAL > 0)
> > +			if (lcore_id == rte_get_master_lcore()) {
> > +				/* advance the timer */
> > +				timer_tsc += diff_tsc;
> > +
> > +				/* if timer has reached its timeout */
> > +				if (unlikely(timer_tsc >= timer_period)) {
> > +					print_stats();
> > +					/* reset the timer */
> > +					timer_tsc = 0;
> > +				}
> > +			}
> > +#endif /* STATS_INTERVAL */
> >  		}
> >
> >  		for (i = 0; i < qconf->nb_rx_queue; ++i) { @@ -1169,8 +1238,10
> @@
> > ipsec_poll_mode_worker(void)
> >  			nb_rx = rte_eth_rx_burst(portid, queueid,
> >  					pkts, MAX_PKT_BURST);
> >
> > -			if (nb_rx > 0)
> > +			if (nb_rx > 0) {
> > +				core_stats_update_rx(nb_rx);
> >  				process_pkts(qconf, pkts, nb_rx, portid);
> > +			}
> >
> >  			/* dequeue and process completed crypto-ops */
> >  			if (is_unprotected_port(portid))
> > diff --git a/examples/ipsec-secgw/ipsec-secgw.h
> > b/examples/ipsec-secgw/ipsec-secgw.h
> > index 4b53cb5..5b3561f 100644
> > --- a/examples/ipsec-secgw/ipsec-secgw.h
> > +++ b/examples/ipsec-secgw/ipsec-secgw.h
> > @@ -6,6 +6,8 @@
> >
> >  #include <stdbool.h>
> >
> > +#define STATS_INTERVAL 0
> > +
> >  #define NB_SOCKETS 4
> >
> >  #define MAX_PKT_BURST 32
> > @@ -69,6 +71,17 @@ struct ethaddr_info {
> >  	uint64_t src, dst;
> >  };
> >
> > +#if (STATS_INTERVAL > 0)
> > +struct ipsec_core_statistics {
> > +	uint64_t tx;
> > +	uint64_t rx;
> > +	uint64_t dropped;
> > +	uint64_t burst_rx;
> > +} __rte_cache_aligned;
> > +
> > +struct ipsec_core_statistics core_statistics[RTE_MAX_LCORE]; #endif
> > +/* STATS_INTERVAL */
> > +
> >  extern struct ethaddr_info ethaddr_tbl[RTE_MAX_ETHPORTS];
> >
> >  /* Port mask to identify the unprotected ports */ @@ -85,4 +98,59 @@
> > is_unprotected_port(uint16_t port_id)
> >  	return unprotected_port_mask & (1 << port_id);  }
> >
> > +static inline void
> > +core_stats_update_rx(int n)
> > +{
> > +#if (STATS_INTERVAL > 0)
> > +	int lcore_id = rte_lcore_id();
> > +	core_statistics[lcore_id].rx += n;
> > +	if (n == MAX_PKT_BURST)
> > +		core_statistics[lcore_id].burst_rx += n; #else
> > +	RTE_SET_USED(n);
> > +#endif /* STATS_INTERVAL */
> > +}
> > +
> > +static inline void
> > +core_stats_update_tx(int n)
> > +{
> > +#if (STATS_INTERVAL > 0)
> > +	int lcore_id = rte_lcore_id();
> > +	core_statistics[lcore_id].tx += n;
> > +#else
> > +	RTE_SET_USED(n);
> > +#endif /* STATS_INTERVAL */
> > +}
> > +
> > +static inline void
> > +core_stats_update_drop(int n)
> > +{
> > +#if (STATS_INTERVAL > 0)
> > +	int lcore_id = rte_lcore_id();
> > +	core_statistics[lcore_id].dropped += n; #else
> > +	RTE_SET_USED(n);
> > +#endif /* STATS_INTERVAL */
> > +}
> > +
> > +/* helper routine to free bulk of packets */ static inline void
> > +free_pkts(struct rte_mbuf *mb[], uint32_t n) {
> > +	uint32_t i;
> > +
> > +	for (i = 0; i != n; i++)
> > +		rte_pktmbuf_free(mb[i]);
> > +
> > +	core_stats_update_drop(n);
> > +}
> > +
> > +/* helper routine to free single packet */ static inline void
> > +free_pkt(struct rte_mbuf *mb) {
> > +	rte_pktmbuf_free(mb);
> > +	core_stats_update_drop(1);
> > +}
> > +
> >  #endif /* _IPSEC_SECGW_H_ */
> > diff --git a/examples/ipsec-secgw/ipsec.c
> > b/examples/ipsec-secgw/ipsec.c index bf88d80..351f1f1 100644
> > --- a/examples/ipsec-secgw/ipsec.c
> > +++ b/examples/ipsec-secgw/ipsec.c
> > @@ -500,7 +500,7 @@ enqueue_cop_burst(struct cdev_qp *cqp)
> >  			cqp->id, cqp->qp, ret, len);
> >  			/* drop packets that we fail to enqueue */
> >  			for (i = ret; i < len; i++)
> > -				rte_pktmbuf_free(cqp->buf[i]->sym->m_src);
> > +				free_pkt(cqp->buf[i]->sym->m_src);
> >  	}
> >  	cqp->in_flight += ret;
> >  	cqp->len = 0;
> > @@ -528,7 +528,7 @@ ipsec_enqueue(ipsec_xform_fn xform_func, struct
> > ipsec_ctx *ipsec_ctx,
> >
> >  	for (i = 0; i < nb_pkts; i++) {
> >  		if (unlikely(sas[i] == NULL)) {
> > -			rte_pktmbuf_free(pkts[i]);
> > +			free_pkt(pkts[i]);
> >  			continue;
> >  		}
> >
> > @@ -549,7 +549,7 @@ ipsec_enqueue(ipsec_xform_fn xform_func, struct
> > ipsec_ctx *ipsec_ctx,
> >
> >  			if ((unlikely(ips->security.ses == NULL)) &&
> >  				create_lookaside_session(ipsec_ctx, sa, ips)) {
> > -				rte_pktmbuf_free(pkts[i]);
> > +				free_pkt(pkts[i]);
> >  				continue;
> >  			}
> >
> > @@ -563,7 +563,7 @@ ipsec_enqueue(ipsec_xform_fn xform_func, struct
> ipsec_ctx *ipsec_ctx,
> >  		case RTE_SECURITY_ACTION_TYPE_CPU_CRYPTO:
> >  			RTE_LOG(ERR, IPSEC, "CPU crypto is not supported by
> the"
> >  					" legacy mode.");
> > -			rte_pktmbuf_free(pkts[i]);
> > +			free_pkt(pkts[i]);
> >  			continue;
> >
> >  		case RTE_SECURITY_ACTION_TYPE_NONE:
> > @@ -575,7 +575,7 @@ ipsec_enqueue(ipsec_xform_fn xform_func, struct
> > ipsec_ctx *ipsec_ctx,
> >
> >  			if ((unlikely(ips->crypto.ses == NULL)) &&
> >  				create_lookaside_session(ipsec_ctx, sa, ips)) {
> > -				rte_pktmbuf_free(pkts[i]);
> > +				free_pkt(pkts[i]);
> >  				continue;
> >  			}
> >
> > @@ -584,7 +584,7 @@ ipsec_enqueue(ipsec_xform_fn xform_func, struct
> > ipsec_ctx *ipsec_ctx,
> >
> >  			ret = xform_func(pkts[i], sa, &priv->cop);
> >  			if (unlikely(ret)) {
> > -				rte_pktmbuf_free(pkts[i]);
> > +				free_pkt(pkts[i]);
> >  				continue;
> >  			}
> >  			break;
> > @@ -608,7 +608,7 @@ ipsec_enqueue(ipsec_xform_fn xform_func, struct
> > ipsec_ctx *ipsec_ctx,
> >
> >  			ret = xform_func(pkts[i], sa, &priv->cop);
> >  			if (unlikely(ret)) {
> > -				rte_pktmbuf_free(pkts[i]);
> > +				free_pkt(pkts[i]);
> >  				continue;
> >  			}
> >
> > @@ -643,7 +643,7 @@ ipsec_inline_dequeue(ipsec_xform_fn xform_func,
> struct ipsec_ctx *ipsec_ctx,
> >  		sa = priv->sa;
> >  		ret = xform_func(pkt, sa, &priv->cop);
> >  		if (unlikely(ret)) {
> > -			rte_pktmbuf_free(pkt);
> > +			free_pkt(pkt);
> >  			continue;
> >  		}
> >  		pkts[nb_pkts++] = pkt;
> > @@ -690,13 +690,13 @@ ipsec_dequeue(ipsec_xform_fn xform_func, struct
> ipsec_ctx *ipsec_ctx,
> >  				RTE_SECURITY_ACTION_TYPE_NONE) {
> >  				ret = xform_func(pkt, sa, cops[j]);
> >  				if (unlikely(ret)) {
> > -					rte_pktmbuf_free(pkt);
> > +					free_pkt(pkt);
> >  					continue;
> >  				}
> >  			} else if (ipsec_get_action_type(sa) ==
> >
> 	RTE_SECURITY_ACTION_TYPE_LOOKASIDE_PROTOCOL) {
> >  				if (cops[j]->status) {
> > -					rte_pktmbuf_free(pkt);
> > +					free_pkt(pkt);
> >  					continue;
> >  				}
> >  			}
> > diff --git a/examples/ipsec-secgw/ipsec_process.c
> > b/examples/ipsec-secgw/ipsec_process.c
> > index bb2f2b8..4748299 100644
> > --- a/examples/ipsec-secgw/ipsec_process.c
> > +++ b/examples/ipsec-secgw/ipsec_process.c
> > @@ -12,22 +12,13 @@
> >  #include <rte_mbuf.h>
> >
> >  #include "ipsec.h"
> > +#include "ipsec-secgw.h"
> >
> >  #define SATP_OUT_IPV4(t)	\
> >  	((((t) & RTE_IPSEC_SATP_MODE_MASK) ==
> RTE_IPSEC_SATP_MODE_TRANS && \
> >  	(((t) & RTE_IPSEC_SATP_IPV_MASK) == RTE_IPSEC_SATP_IPV4)) || \
> >  	((t) & RTE_IPSEC_SATP_MODE_MASK) ==
> RTE_IPSEC_SATP_MODE_TUNLV4)
> >
> > -/* helper routine to free bulk of packets */ -static inline void
> > -free_pkts(struct rte_mbuf *mb[], uint32_t n) -{
> > -	uint32_t i;
> > -
> > -	for (i = 0; i != n; i++)
> > -		rte_pktmbuf_free(mb[i]);
> > -}
> > -
> >  /* helper routine to free bulk of crypto-ops and related packets */
> > static inline void  free_cops(struct rte_crypto_op *cop[], uint32_t n)
> > --
> > 2.7.4
  
Ananyev, Konstantin May 13, 2020, 12:12 p.m. UTC | #5
> > >
> > > +#if (STATS_INTERVAL > 0)
> > > +
> > > +/* Print out statistics on packet distribution */ static void
> > > +print_stats(void)
> > > +{
> > > +	uint64_t total_packets_dropped, total_packets_tx, total_packets_rx;
> > > +	unsigned int coreid;
> > > +	float burst_percent;
> > > +
> > > +	total_packets_dropped = 0;
> > > +	total_packets_tx = 0;
> > > +	total_packets_rx = 0;
> > > +
> > > +	const char clr[] = { 27, '[', '2', 'J', '\0' };
> > > +
> > > +	/* Clear screen and move to top left */
> > > +	printf("%s", clr);
> > > +
> > > +	printf("\nCore statistics ====================================");
> > > +
> > > +	for (coreid = 0; coreid < RTE_MAX_LCORE; coreid++) {
> > > +		/* skip disabled cores */
> > > +		if (rte_lcore_is_enabled(coreid) == 0)
> > > +			continue;
> > > +		burst_percent = (float)(core_statistics[coreid].burst_rx * 100)/
> > > +					core_statistics[coreid].rx;
> > > +		printf("\nStatistics for core %u ------------------------------"
> > > +			   "\nPackets received: %20"PRIu64
> > > +			   "\nPackets sent: %24"PRIu64
> > > +			   "\nPackets dropped: %21"PRIu64
> > > +			   "\nBurst percent: %23.2f",
> > > +			   coreid,
> > > +			   core_statistics[coreid].rx,
> > > +			   core_statistics[coreid].tx,
> > > +			   core_statistics[coreid].dropped,
> > > +			   burst_percent);
> >
> >
> > As one more comment:
> > Can we also collect number of calls and display average pkt/call (for both rx and
> > tx)?
> >
> 
> [Anoob] Can you describe which "call" you meant? We can capture more logs if required.

I meant something like that:
+static inline void
+core_stats_update_rx(int n)
+{
+#if (STATS_INTERVAL > 0)
+	int lcore_id = rte_lcore_id();
+	core_statistics[lcore_id].rx += n;
+	core_statistics[lcore_id].rx_call++;
+	if (n == MAX_PKT_BURST)
+		core_statistics[lcore_id].burst_rx += n;
+#else
+	RTE_SET_USED(n);
+#endif /* STATS_INTERVAL */
+}

And then in print_stats():
"\nPackets received: %20"PRIu64
",RX calls: %" PRIu64
"RX packets/call: %.2f"
core_statistics[coreid].rx,
core_statistics[coreid].rx_call,
(double)core_statistics[coreid].rx /  core_statistics[coreid].rx_call

> 
> > > +
> > > +		total_packets_dropped += core_statistics[coreid].dropped;
> > > +		total_packets_tx += core_statistics[coreid].tx;
> > > +		total_packets_rx += core_statistics[coreid].rx;
> > > +	}
> > > +	printf("\nAggregate statistics ==============================="
> > > +		   "\nTotal packets received: %14"PRIu64
> > > +		   "\nTotal packets sent: %18"PRIu64
> > > +		   "\nTotal packets dropped: %15"PRIu64,
> > > +		   total_packets_rx,
> > > +		   total_packets_tx,
> > > +		   total_packets_dropped);
> > > +
> > 	printf("\n===================================================
> > =\n");
> > > +}
> > > +#endif /* STATS_INTERVAL */
> > > +
> > >  static inline void
> > >  prepare_one_packet(struct rte_mbuf *pkt, struct ipsec_traffic *t)  {
> > > @@ -333,7 +386,7 @@ prepare_one_packet(struct rte_mbuf *pkt, struct
> > > ipsec_traffic *t)
> > >
> > >  		/* drop packet when IPv6 header exceeds first segment length
> > */
> > >  		if (unlikely(l3len > pkt->data_len)) {
> > > -			rte_pktmbuf_free(pkt);
> > > +			free_pkt(pkt);
> > >  			return;
> > >  		}
> > >
> > > @@ -350,7 +403,7 @@ prepare_one_packet(struct rte_mbuf *pkt, struct
> > ipsec_traffic *t)
> > >  		/* Unknown/Unsupported type, drop the packet */
> > >  		RTE_LOG(ERR, IPSEC, "Unsupported packet type 0x%x\n",
> > >  			rte_be_to_cpu_16(eth->ether_type));
> > > -		rte_pktmbuf_free(pkt);
> > > +		free_pkt(pkt);
> > >  		return;
> > >  	}
> > >
> > > @@ -477,9 +530,12 @@ send_burst(struct lcore_conf *qconf, uint16_t n,
> > uint16_t port)
> > >  	prepare_tx_burst(m_table, n, port, qconf);
> > >
> > >  	ret = rte_eth_tx_burst(port, queueid, m_table, n);
> > > +
> > > +	core_stats_update_tx(ret);
> > > +
> > >  	if (unlikely(ret < n)) {
> > >  		do {
> > > -			rte_pktmbuf_free(m_table[ret]);
> > > +			free_pkt(m_table[ret]);
> > >  		} while (++ret < n);
> > >  	}
> > >
> > > @@ -525,7 +581,7 @@ send_fragment_packet(struct lcore_conf *qconf,
> > struct rte_mbuf *m,
> > >  			"error code: %d\n",
> > >  			__func__, m->pkt_len, rte_errno);
> > >
> > > -	rte_pktmbuf_free(m);
> > > +	free_pkt(m);
> > >  	return len;
> > >  }
> > >
> > > @@ -550,7 +606,7 @@ send_single_packet(struct rte_mbuf *m, uint16_t
> > port, uint8_t proto)
> > >  	} else if (frag_tbl_sz > 0)
> > >  		len = send_fragment_packet(qconf, m, port, proto);
> > >  	else
> > > -		rte_pktmbuf_free(m);
> > > +		free_pkt(m);
> > >
> > >  	/* enough pkts to be sent */
> > >  	if (unlikely(len == MAX_PKT_BURST)) { @@ -584,19 +640,19 @@
> > > inbound_sp_sa(struct sp_ctx *sp, struct sa_ctx *sa, struct traffic_type *ip,
> > >  			continue;
> > >  		}
> > >  		if (res == DISCARD) {
> > > -			rte_pktmbuf_free(m);
> > > +			free_pkt(m);
> > >  			continue;
> > >  		}
> > >
> > >  		/* Only check SPI match for processed IPSec packets */
> > >  		if (i < lim && ((m->ol_flags & PKT_RX_SEC_OFFLOAD) == 0)) {
> > > -			rte_pktmbuf_free(m);
> > > +			free_pkt(m);
> > >  			continue;
> > >  		}
> > >
> > >  		sa_idx = res - 1;
> > >  		if (!inbound_sa_check(sa, m, sa_idx)) {
> > > -			rte_pktmbuf_free(m);
> > > +			free_pkt(m);
> > >  			continue;
> > >  		}
> > >  		ip->pkts[j++] = m;
> > > @@ -631,7 +687,7 @@ split46_traffic(struct ipsec_traffic *trf, struct rte_mbuf
> > *mb[], uint32_t num)
> > >  					offsetof(struct ip6_hdr, ip6_nxt));
> > >  			n6++;
> > >  		} else
> > > -			rte_pktmbuf_free(m);
> > > +			free_pkt(m);
> > >  	}
> > >
> > >  	trf->ip4.num = n4;
> > > @@ -683,7 +739,7 @@ outbound_sp(struct sp_ctx *sp, struct traffic_type *ip,
> > >  		m = ip->pkts[i];
> > >  		sa_idx = ip->res[i] - 1;
> > >  		if (ip->res[i] == DISCARD)
> > > -			rte_pktmbuf_free(m);
> > > +			free_pkt(m);
> > >  		else if (ip->res[i] == BYPASS)
> > >  			ip->pkts[j++] = m;
> > >  		else {
> > > @@ -702,8 +758,7 @@ process_pkts_outbound(struct ipsec_ctx *ipsec_ctx,
> > >  	uint16_t idx, nb_pkts_out, i;
> > >
> > >  	/* Drop any IPsec traffic from protected ports */
> > > -	for (i = 0; i < traffic->ipsec.num; i++)
> > > -		rte_pktmbuf_free(traffic->ipsec.pkts[i]);
> > > +	free_pkts(traffic->ipsec.pkts, traffic->ipsec.num);
> > >
> > >  	traffic->ipsec.num = 0;
> > >
> > > @@ -743,14 +798,12 @@ process_pkts_inbound_nosp(struct ipsec_ctx
> > *ipsec_ctx,
> > >  	uint32_t nb_pkts_in, i, idx;
> > >
> > >  	/* Drop any IPv4 traffic from unprotected ports */
> > > -	for (i = 0; i < traffic->ip4.num; i++)
> > > -		rte_pktmbuf_free(traffic->ip4.pkts[i]);
> > > +	free_pkts(traffic->ip4.pkts, traffic->ip4.num);
> > >
> > >  	traffic->ip4.num = 0;
> > >
> > >  	/* Drop any IPv6 traffic from unprotected ports */
> > > -	for (i = 0; i < traffic->ip6.num; i++)
> > > -		rte_pktmbuf_free(traffic->ip6.pkts[i]);
> > > +	free_pkts(traffic->ip6.pkts, traffic->ip6.num);
> > >
> > >  	traffic->ip6.num = 0;
> > >
> > > @@ -786,8 +839,7 @@ process_pkts_outbound_nosp(struct ipsec_ctx
> > *ipsec_ctx,
> > >  	struct ip *ip;
> > >
> > >  	/* Drop any IPsec traffic from protected ports */
> > > -	for (i = 0; i < traffic->ipsec.num; i++)
> > > -		rte_pktmbuf_free(traffic->ipsec.pkts[i]);
> > > +	free_pkts(traffic->ipsec.pkts, traffic->ipsec.num);
> > >
> > >  	n = 0;
> > >
> > > @@ -901,7 +953,7 @@ route4_pkts(struct rt_ctx *rt_ctx, struct rte_mbuf
> > *pkts[], uint8_t nb_pkts)
> > >  		}
> > >
> > >  		if ((pkt_hop & RTE_LPM_LOOKUP_SUCCESS) == 0) {
> > > -			rte_pktmbuf_free(pkts[i]);
> > > +			free_pkt(pkts[i]);
> > >  			continue;
> > >  		}
> > >  		send_single_packet(pkts[i], pkt_hop & 0xff, IPPROTO_IP); @@ -
> > 953,7
> > > +1005,7 @@ route6_pkts(struct rt_ctx *rt_ctx, struct rte_mbuf *pkts[],
> > uint8_t nb_pkts)
> > >  		}
> > >
> > >  		if (pkt_hop == -1) {
> > > -			rte_pktmbuf_free(pkts[i]);
> > > +			free_pkt(pkts[i]);
> > >  			continue;
> > >  		}
> > >  		send_single_packet(pkts[i], pkt_hop & 0xff, IPPROTO_IPV6);
> > @@
> > > -1099,6 +1151,10 @@ ipsec_poll_mode_worker(void)
> > >  	const uint64_t drain_tsc = (rte_get_tsc_hz() + US_PER_S - 1)
> > >  			/ US_PER_S * BURST_TX_DRAIN_US;
> > >  	struct lcore_rx_queue *rxql;
> > > +#if (STATS_INTERVAL > 0)
> > > +	const uint64_t timer_period = STATS_INTERVAL * rte_get_timer_hz();
> > > +	uint64_t timer_tsc = 0;
> > > +#endif /* STATS_INTERVAL */
> > >
> > >  	prev_tsc = 0;
> > >  	lcore_id = rte_lcore_id();
> > > @@ -1159,6 +1215,19 @@ ipsec_poll_mode_worker(void)
> > >  			drain_tx_buffers(qconf);
> > >  			drain_crypto_buffers(qconf);
> > >  			prev_tsc = cur_tsc;
> > > +#if (STATS_INTERVAL > 0)
> > > +			if (lcore_id == rte_get_master_lcore()) {
> > > +				/* advance the timer */
> > > +				timer_tsc += diff_tsc;
> > > +
> > > +				/* if timer has reached its timeout */
> > > +				if (unlikely(timer_tsc >= timer_period)) {
> > > +					print_stats();
> > > +					/* reset the timer */
> > > +					timer_tsc = 0;
> > > +				}
> > > +			}
> > > +#endif /* STATS_INTERVAL */
> > >  		}
> > >
> > >  		for (i = 0; i < qconf->nb_rx_queue; ++i) { @@ -1169,8 +1238,10
> > @@
> > > ipsec_poll_mode_worker(void)
> > >  			nb_rx = rte_eth_rx_burst(portid, queueid,
> > >  					pkts, MAX_PKT_BURST);
> > >
> > > -			if (nb_rx > 0)
> > > +			if (nb_rx > 0) {
> > > +				core_stats_update_rx(nb_rx);
> > >  				process_pkts(qconf, pkts, nb_rx, portid);
> > > +			}
> > >
> > >  			/* dequeue and process completed crypto-ops */
> > >  			if (is_unprotected_port(portid))
> > > diff --git a/examples/ipsec-secgw/ipsec-secgw.h
> > > b/examples/ipsec-secgw/ipsec-secgw.h
> > > index 4b53cb5..5b3561f 100644
> > > --- a/examples/ipsec-secgw/ipsec-secgw.h
> > > +++ b/examples/ipsec-secgw/ipsec-secgw.h
> > > @@ -6,6 +6,8 @@
> > >
> > >  #include <stdbool.h>
> > >
> > > +#define STATS_INTERVAL 0
> > > +
> > >  #define NB_SOCKETS 4
> > >
> > >  #define MAX_PKT_BURST 32
> > > @@ -69,6 +71,17 @@ struct ethaddr_info {
> > >  	uint64_t src, dst;
> > >  };
> > >
> > > +#if (STATS_INTERVAL > 0)
> > > +struct ipsec_core_statistics {
> > > +	uint64_t tx;
> > > +	uint64_t rx;
> > > +	uint64_t dropped;
> > > +	uint64_t burst_rx;
> > > +} __rte_cache_aligned;
> > > +
> > > +struct ipsec_core_statistics core_statistics[RTE_MAX_LCORE]; #endif
> > > +/* STATS_INTERVAL */
> > > +
> > >  extern struct ethaddr_info ethaddr_tbl[RTE_MAX_ETHPORTS];
> > >
> > >  /* Port mask to identify the unprotected ports */ @@ -85,4 +98,59 @@
> > > is_unprotected_port(uint16_t port_id)
> > >  	return unprotected_port_mask & (1 << port_id);  }
> > >
> > > +static inline void
> > > +core_stats_update_rx(int n)
> > > +{
> > > +#if (STATS_INTERVAL > 0)
> > > +	int lcore_id = rte_lcore_id();
> > > +	core_statistics[lcore_id].rx += n;
> > > +	if (n == MAX_PKT_BURST)
> > > +		core_statistics[lcore_id].burst_rx += n; #else
> > > +	RTE_SET_USED(n);
> > > +#endif /* STATS_INTERVAL */
> > > +}
> > > +
> > > +static inline void
> > > +core_stats_update_tx(int n)
> > > +{
> > > +#if (STATS_INTERVAL > 0)
> > > +	int lcore_id = rte_lcore_id();
> > > +	core_statistics[lcore_id].tx += n;
> > > +#else
> > > +	RTE_SET_USED(n);
> > > +#endif /* STATS_INTERVAL */
> > > +}
> > > +
> > > +static inline void
> > > +core_stats_update_drop(int n)
> > > +{
> > > +#if (STATS_INTERVAL > 0)
> > > +	int lcore_id = rte_lcore_id();
> > > +	core_statistics[lcore_id].dropped += n; #else
> > > +	RTE_SET_USED(n);
> > > +#endif /* STATS_INTERVAL */
> > > +}
> > > +
> > > +/* helper routine to free bulk of packets */ static inline void
> > > +free_pkts(struct rte_mbuf *mb[], uint32_t n) {
> > > +	uint32_t i;
> > > +
> > > +	for (i = 0; i != n; i++)
> > > +		rte_pktmbuf_free(mb[i]);
> > > +
> > > +	core_stats_update_drop(n);
> > > +}
> > > +
> > > +/* helper routine to free single packet */ static inline void
> > > +free_pkt(struct rte_mbuf *mb) {
> > > +	rte_pktmbuf_free(mb);
> > > +	core_stats_update_drop(1);
> > > +}
> > > +
> > >  #endif /* _IPSEC_SECGW_H_ */
> > > diff --git a/examples/ipsec-secgw/ipsec.c
> > > b/examples/ipsec-secgw/ipsec.c index bf88d80..351f1f1 100644
> > > --- a/examples/ipsec-secgw/ipsec.c
> > > +++ b/examples/ipsec-secgw/ipsec.c
> > > @@ -500,7 +500,7 @@ enqueue_cop_burst(struct cdev_qp *cqp)
> > >  			cqp->id, cqp->qp, ret, len);
> > >  			/* drop packets that we fail to enqueue */
> > >  			for (i = ret; i < len; i++)
> > > -				rte_pktmbuf_free(cqp->buf[i]->sym->m_src);
> > > +				free_pkt(cqp->buf[i]->sym->m_src);
> > >  	}
> > >  	cqp->in_flight += ret;
> > >  	cqp->len = 0;
> > > @@ -528,7 +528,7 @@ ipsec_enqueue(ipsec_xform_fn xform_func, struct
> > > ipsec_ctx *ipsec_ctx,
> > >
> > >  	for (i = 0; i < nb_pkts; i++) {
> > >  		if (unlikely(sas[i] == NULL)) {
> > > -			rte_pktmbuf_free(pkts[i]);
> > > +			free_pkt(pkts[i]);
> > >  			continue;
> > >  		}
> > >
> > > @@ -549,7 +549,7 @@ ipsec_enqueue(ipsec_xform_fn xform_func, struct
> > > ipsec_ctx *ipsec_ctx,
> > >
> > >  			if ((unlikely(ips->security.ses == NULL)) &&
> > >  				create_lookaside_session(ipsec_ctx, sa, ips)) {
> > > -				rte_pktmbuf_free(pkts[i]);
> > > +				free_pkt(pkts[i]);
> > >  				continue;
> > >  			}
> > >
> > > @@ -563,7 +563,7 @@ ipsec_enqueue(ipsec_xform_fn xform_func, struct
> > ipsec_ctx *ipsec_ctx,
> > >  		case RTE_SECURITY_ACTION_TYPE_CPU_CRYPTO:
> > >  			RTE_LOG(ERR, IPSEC, "CPU crypto is not supported by
> > the"
> > >  					" legacy mode.");
> > > -			rte_pktmbuf_free(pkts[i]);
> > > +			free_pkt(pkts[i]);
> > >  			continue;
> > >
> > >  		case RTE_SECURITY_ACTION_TYPE_NONE:
> > > @@ -575,7 +575,7 @@ ipsec_enqueue(ipsec_xform_fn xform_func, struct
> > > ipsec_ctx *ipsec_ctx,
> > >
> > >  			if ((unlikely(ips->crypto.ses == NULL)) &&
> > >  				create_lookaside_session(ipsec_ctx, sa, ips)) {
> > > -				rte_pktmbuf_free(pkts[i]);
> > > +				free_pkt(pkts[i]);
> > >  				continue;
> > >  			}
> > >
> > > @@ -584,7 +584,7 @@ ipsec_enqueue(ipsec_xform_fn xform_func, struct
> > > ipsec_ctx *ipsec_ctx,
> > >
> > >  			ret = xform_func(pkts[i], sa, &priv->cop);
> > >  			if (unlikely(ret)) {
> > > -				rte_pktmbuf_free(pkts[i]);
> > > +				free_pkt(pkts[i]);
> > >  				continue;
> > >  			}
> > >  			break;
> > > @@ -608,7 +608,7 @@ ipsec_enqueue(ipsec_xform_fn xform_func, struct
> > > ipsec_ctx *ipsec_ctx,
> > >
> > >  			ret = xform_func(pkts[i], sa, &priv->cop);
> > >  			if (unlikely(ret)) {
> > > -				rte_pktmbuf_free(pkts[i]);
> > > +				free_pkt(pkts[i]);
> > >  				continue;
> > >  			}
> > >
> > > @@ -643,7 +643,7 @@ ipsec_inline_dequeue(ipsec_xform_fn xform_func,
> > struct ipsec_ctx *ipsec_ctx,
> > >  		sa = priv->sa;
> > >  		ret = xform_func(pkt, sa, &priv->cop);
> > >  		if (unlikely(ret)) {
> > > -			rte_pktmbuf_free(pkt);
> > > +			free_pkt(pkt);
> > >  			continue;
> > >  		}
> > >  		pkts[nb_pkts++] = pkt;
> > > @@ -690,13 +690,13 @@ ipsec_dequeue(ipsec_xform_fn xform_func, struct
> > ipsec_ctx *ipsec_ctx,
> > >  				RTE_SECURITY_ACTION_TYPE_NONE) {
> > >  				ret = xform_func(pkt, sa, cops[j]);
> > >  				if (unlikely(ret)) {
> > > -					rte_pktmbuf_free(pkt);
> > > +					free_pkt(pkt);
> > >  					continue;
> > >  				}
> > >  			} else if (ipsec_get_action_type(sa) ==
> > >
> > 	RTE_SECURITY_ACTION_TYPE_LOOKASIDE_PROTOCOL) {
> > >  				if (cops[j]->status) {
> > > -					rte_pktmbuf_free(pkt);
> > > +					free_pkt(pkt);
> > >  					continue;
> > >  				}
> > >  			}
> > > diff --git a/examples/ipsec-secgw/ipsec_process.c
> > > b/examples/ipsec-secgw/ipsec_process.c
> > > index bb2f2b8..4748299 100644
> > > --- a/examples/ipsec-secgw/ipsec_process.c
> > > +++ b/examples/ipsec-secgw/ipsec_process.c
> > > @@ -12,22 +12,13 @@
> > >  #include <rte_mbuf.h>
> > >
> > >  #include "ipsec.h"
> > > +#include "ipsec-secgw.h"
> > >
> > >  #define SATP_OUT_IPV4(t)	\
> > >  	((((t) & RTE_IPSEC_SATP_MODE_MASK) ==
> > RTE_IPSEC_SATP_MODE_TRANS && \
> > >  	(((t) & RTE_IPSEC_SATP_IPV_MASK) == RTE_IPSEC_SATP_IPV4)) || \
> > >  	((t) & RTE_IPSEC_SATP_MODE_MASK) ==
> > RTE_IPSEC_SATP_MODE_TUNLV4)
> > >
> > > -/* helper routine to free bulk of packets */ -static inline void
> > > -free_pkts(struct rte_mbuf *mb[], uint32_t n) -{
> > > -	uint32_t i;
> > > -
> > > -	for (i = 0; i != n; i++)
> > > -		rte_pktmbuf_free(mb[i]);
> > > -}
> > > -
> > >  /* helper routine to free bulk of crypto-ops and related packets */
> > > static inline void  free_cops(struct rte_crypto_op *cop[], uint32_t n)
> > > --
> > > 2.7.4
  
Ananyev, Konstantin May 13, 2020, 12:42 p.m. UTC | #6
> > > @@ -1099,6 +1151,10 @@ ipsec_poll_mode_worker(void)
> > >  	const uint64_t drain_tsc = (rte_get_tsc_hz() + US_PER_S - 1)
> > >  			/ US_PER_S * BURST_TX_DRAIN_US;
> > >  	struct lcore_rx_queue *rxql;
> > > +#if (STATS_INTERVAL > 0)
> > > +	const uint64_t timer_period = STATS_INTERVAL * rte_get_timer_hz();
> > > +	uint64_t timer_tsc = 0;
> > > +#endif /* STATS_INTERVAL */
> > >
> > >  	prev_tsc = 0;
> > >  	lcore_id = rte_lcore_id();
> > > @@ -1159,6 +1215,19 @@ ipsec_poll_mode_worker(void)
> > >  			drain_tx_buffers(qconf);
> > >  			drain_crypto_buffers(qconf);
> > >  			prev_tsc = cur_tsc;
> > > +#if (STATS_INTERVAL > 0)
> > > +			if (lcore_id == rte_get_master_lcore()) {
> > > +				/* advance the timer */
> > > +				timer_tsc += diff_tsc;
> > > +
> > > +				/* if timer has reached its timeout */
> > > +				if (unlikely(timer_tsc >= timer_period)) {
> > > +					print_stats();
> > > +					/* reset the timer */
> > > +					timer_tsc = 0;
> > > +				}
> > > +			}
> > > +#endif /* STATS_INTERVAL */
> >
> > I still don't understand why to do it in data-path thread.
> > As I said in previous comments, in DPDK there is a control thread that can be
> > used for such house-keeping tasks.
> > Why not to use it (via rte_alarm or so) and keep data-path threads less affected.
> 
> [Anoob] From Marvell's estimates, this stats collection and reporting will be expensive and so cannot be enabled by default. This is required
> for analyzing the traffic distribution in cases where the performance isn't scaling as expected.

Understood.

 > And this patch achieves the desired feature. 

Ok, but why not to do it in control (house-keeping) thread?
That would achieve desired goal and keep data-path unaffected.

> If Intel would like to improve the approach, that can be taken up as a separate patch.

This is not a vendor specific part. 
You making changes in common data-path code that is used by all ipsec-secgw users.
I think it is everyone benefit (and responsibility) to keep common data-path
code clean, tidy and fast.
If we can avoid polluting it with extra code, I don't see a reason not to do it. 

> 
> >
> > >  		}
> > >
> > >  		for (i = 0; i < qconf->nb_rx_queue; ++i) { @@ -1169,8 +1238,10
> > @@
> > > ipsec_poll_mode_worker(void)
> > >  			nb_rx = rte_eth_rx_burst(portid, queueid,
> > >  					pkts, MAX_PKT_BURST);
> > >
> > > -			if (nb_rx > 0)
> > > +			if (nb_rx > 0) {
> > > +				core_stats_update_rx(nb_rx);
> > >  				process_pkts(qconf, pkts, nb_rx, portid);
> > > +			}
> > >
> > >  			/* dequeue and process completed crypto-ops */
> > >  			if (is_unprotected_port(portid))
> > > diff --git a/examples/ipsec-secgw/ipsec-secgw.h
> > > b/examples/ipsec-secgw/ipsec-secgw.h
> > > index 4b53cb5..5b3561f 100644
> > > --- a/examples/ipsec-secgw/ipsec-secgw.h
> > > +++ b/examples/ipsec-secgw/ipsec-secgw.h
> > > @@ -6,6 +6,8 @@
> > >
> > >  #include <stdbool.h>
> > >
> > > +#define STATS_INTERVAL 0
> >
> > Shouldn't it be:
> > #ifndef STATS_INTERVAL
> > #define STATS_INTERVAL	0
> > #endif
> > ?
> 
> [Anoob] Will update in v4.
> 
> >
> > To allow user specify statis interval via EXTRA_CFLAGS='-DSTATS_INTERVAL=10'
> > or so.
> >
> > > +
> > >  #define NB_SOCKETS 4
> > >
> > >  #define MAX_PKT_BURST 32
> > > @@ -69,6 +71,17 @@ struct ethaddr_info {
> > >  	uint64_t src, dst;
> > >  };
> > >
> > > +#if (STATS_INTERVAL > 0)
> > > +struct ipsec_core_statistics {
> > > +	uint64_t tx;
> > > +	uint64_t rx;
> > > +	uint64_t dropped;
> > > +	uint64_t burst_rx;
> > > +} __rte_cache_aligned;
> > > +
> > > +struct ipsec_core_statistics core_statistics[RTE_MAX_LCORE]; #endif
> > > +/* STATS_INTERVAL */
> > > +
> > >  extern struct ethaddr_info ethaddr_tbl[RTE_MAX_ETHPORTS];
> > >
> > >  /* Port mask to identify the unprotected ports */ @@ -85,4 +98,59 @@
> > > is_unprotected_port(uint16_t port_id)
> > >  	return unprotected_port_mask & (1 << port_id);  }
> > >
> > > +static inline void
> > > +core_stats_update_rx(int n)
> > > +{
> > > +#if (STATS_INTERVAL > 0)
> > > +	int lcore_id = rte_lcore_id();
> > > +	core_statistics[lcore_id].rx += n;
> > > +	if (n == MAX_PKT_BURST)
> > > +		core_statistics[lcore_id].burst_rx += n; #else
> > > +	RTE_SET_USED(n);
> > > +#endif /* STATS_INTERVAL */
> > > +}
> > > +
> > > +static inline void
> > > +core_stats_update_tx(int n)
> > > +{
> > > +#if (STATS_INTERVAL > 0)
> > > +	int lcore_id = rte_lcore_id();
> > > +	core_statistics[lcore_id].tx += n;
> > > +#else
> > > +	RTE_SET_USED(n);
> > > +#endif /* STATS_INTERVAL */
> > > +}
> > > +
> > > +static inline void
> > > +core_stats_update_drop(int n)
> > > +{
> > > +#if (STATS_INTERVAL > 0)
> > > +	int lcore_id = rte_lcore_id();
> > > +	core_statistics[lcore_id].dropped += n; #else
> > > +	RTE_SET_USED(n);
> > > +#endif /* STATS_INTERVAL */
> > > +}
> > > +
> > > +/* helper routine to free bulk of packets */ static inline void
> > > +free_pkts(struct rte_mbuf *mb[], uint32_t n) {
> > > +	uint32_t i;
> > > +
> > > +	for (i = 0; i != n; i++)
> > > +		rte_pktmbuf_free(mb[i]);
> > > +
> > > +	core_stats_update_drop(n);
> > > +}
> > > +
> > > +/* helper routine to free single packet */ static inline void
> > > +free_pkt(struct rte_mbuf *mb) {
> > > +	rte_pktmbuf_free(mb);
> > > +	core_stats_update_drop(1);
> >
> > Probably just:
> > free_pkts(&mb, 1);
> > ?
> 
> [Anoob] Will update in v4.
> 
> >
> > > +}
> > > +
> > >  #endif /* _IPSEC_SECGW_H_ */
  
Anoob Joseph May 14, 2020, 4:11 a.m. UTC | #7
Hi Konstantin,

Please see inline.

Thanks,
Anoob

> -----Original Message-----
> From: Ananyev, Konstantin <konstantin.ananyev@intel.com>
> Sent: Wednesday, May 13, 2020 6:13 PM
> To: Anoob Joseph <anoobj@marvell.com>; Akhil Goyal <akhil.goyal@nxp.com>;
> Nicolau, Radu <radu.nicolau@intel.com>
> Cc: Narayana Prasad Raju Athreya <pathreya@marvell.com>; dev@dpdk.org
> Subject: [EXT] RE: [PATCH v3] examples/ipsec-secgw: add per core packet stats
> 
> External Email
> 
> ----------------------------------------------------------------------
> 
> > > > @@ -1099,6 +1151,10 @@ ipsec_poll_mode_worker(void)
> > > >  	const uint64_t drain_tsc = (rte_get_tsc_hz() + US_PER_S - 1)
> > > >  			/ US_PER_S * BURST_TX_DRAIN_US;
> > > >  	struct lcore_rx_queue *rxql;
> > > > +#if (STATS_INTERVAL > 0)
> > > > +	const uint64_t timer_period = STATS_INTERVAL * rte_get_timer_hz();
> > > > +	uint64_t timer_tsc = 0;
> > > > +#endif /* STATS_INTERVAL */
> > > >
> > > >  	prev_tsc = 0;
> > > >  	lcore_id = rte_lcore_id();
> > > > @@ -1159,6 +1215,19 @@ ipsec_poll_mode_worker(void)
> > > >  			drain_tx_buffers(qconf);
> > > >  			drain_crypto_buffers(qconf);
> > > >  			prev_tsc = cur_tsc;
> > > > +#if (STATS_INTERVAL > 0)
> > > > +			if (lcore_id == rte_get_master_lcore()) {
> > > > +				/* advance the timer */
> > > > +				timer_tsc += diff_tsc;
> > > > +
> > > > +				/* if timer has reached its timeout */
> > > > +				if (unlikely(timer_tsc >= timer_period)) {
> > > > +					print_stats();
> > > > +					/* reset the timer */
> > > > +					timer_tsc = 0;
> > > > +				}
> > > > +			}
> > > > +#endif /* STATS_INTERVAL */
> > >
> > > I still don't understand why to do it in data-path thread.
> > > As I said in previous comments, in DPDK there is a control thread
> > > that can be used for such house-keeping tasks.
> > > Why not to use it (via rte_alarm or so) and keep data-path threads less
> affected.
> >
> > [Anoob] From Marvell's estimates, this stats collection and reporting
> > will be expensive and so cannot be enabled by default. This is required for
> analyzing the traffic distribution in cases where the performance isn't scaling as
> expected.
> 
> Understood.
> 
>  > And this patch achieves the desired feature.
> 
> Ok, but why not to do it in control (house-keeping) thread?
> That would achieve desired goal and keep data-path unaffected.
> 
> > If Intel would like to improve the approach, that can be taken up as a separate
> patch.
> 
> This is not a vendor specific part.
> You making changes in common data-path code that is used by all ipsec-secgw
> users.
> I think it is everyone benefit (and responsibility) to keep common data-path code
> clean, tidy and fast.
> If we can avoid polluting it with extra code, I don't see a reason not to do it.

[Anoob] I cannot say I've fully understood what you have suggested. I've submitted v4 based on what I understood from your comments. Please have a look at it.
 
> 
> >
> > >
> > > >  		}
> > > >
> > > >  		for (i = 0; i < qconf->nb_rx_queue; ++i) { @@ -1169,8 +1238,10
> > > @@
> > > > ipsec_poll_mode_worker(void)
> > > >  			nb_rx = rte_eth_rx_burst(portid, queueid,
> > > >  					pkts, MAX_PKT_BURST);
> > > >
> > > > -			if (nb_rx > 0)
> > > > +			if (nb_rx > 0) {
> > > > +				core_stats_update_rx(nb_rx);
> > > >  				process_pkts(qconf, pkts, nb_rx, portid);
> > > > +			}
> > > >
> > > >  			/* dequeue and process completed crypto-ops */
> > > >  			if (is_unprotected_port(portid)) diff --git
> > > > a/examples/ipsec-secgw/ipsec-secgw.h
> > > > b/examples/ipsec-secgw/ipsec-secgw.h
> > > > index 4b53cb5..5b3561f 100644
> > > > --- a/examples/ipsec-secgw/ipsec-secgw.h
> > > > +++ b/examples/ipsec-secgw/ipsec-secgw.h
> > > > @@ -6,6 +6,8 @@
> > > >
> > > >  #include <stdbool.h>
> > > >
> > > > +#define STATS_INTERVAL 0
> > >
> > > Shouldn't it be:
> > > #ifndef STATS_INTERVAL
> > > #define STATS_INTERVAL	0
> > > #endif
> > > ?
> >
> > [Anoob] Will update in v4.
> >
> > >
> > > To allow user specify statis interval via EXTRA_CFLAGS='-
> DSTATS_INTERVAL=10'
> > > or so.
> > >
> > > > +
> > > >  #define NB_SOCKETS 4
> > > >
> > > >  #define MAX_PKT_BURST 32
> > > > @@ -69,6 +71,17 @@ struct ethaddr_info {
> > > >  	uint64_t src, dst;
> > > >  };
> > > >
> > > > +#if (STATS_INTERVAL > 0)
> > > > +struct ipsec_core_statistics {
> > > > +	uint64_t tx;
> > > > +	uint64_t rx;
> > > > +	uint64_t dropped;
> > > > +	uint64_t burst_rx;
> > > > +} __rte_cache_aligned;
> > > > +
> > > > +struct ipsec_core_statistics core_statistics[RTE_MAX_LCORE];
> > > > +#endif
> > > > +/* STATS_INTERVAL */
> > > > +
> > > >  extern struct ethaddr_info ethaddr_tbl[RTE_MAX_ETHPORTS];
> > > >
> > > >  /* Port mask to identify the unprotected ports */ @@ -85,4 +98,59
> > > > @@ is_unprotected_port(uint16_t port_id)
> > > >  	return unprotected_port_mask & (1 << port_id);  }
> > > >
> > > > +static inline void
> > > > +core_stats_update_rx(int n)
> > > > +{
> > > > +#if (STATS_INTERVAL > 0)
> > > > +	int lcore_id = rte_lcore_id();
> > > > +	core_statistics[lcore_id].rx += n;
> > > > +	if (n == MAX_PKT_BURST)
> > > > +		core_statistics[lcore_id].burst_rx += n; #else
> > > > +	RTE_SET_USED(n);
> > > > +#endif /* STATS_INTERVAL */
> > > > +}
> > > > +
> > > > +static inline void
> > > > +core_stats_update_tx(int n)
> > > > +{
> > > > +#if (STATS_INTERVAL > 0)
> > > > +	int lcore_id = rte_lcore_id();
> > > > +	core_statistics[lcore_id].tx += n; #else
> > > > +	RTE_SET_USED(n);
> > > > +#endif /* STATS_INTERVAL */
> > > > +}
> > > > +
> > > > +static inline void
> > > > +core_stats_update_drop(int n)
> > > > +{
> > > > +#if (STATS_INTERVAL > 0)
> > > > +	int lcore_id = rte_lcore_id();
> > > > +	core_statistics[lcore_id].dropped += n; #else
> > > > +	RTE_SET_USED(n);
> > > > +#endif /* STATS_INTERVAL */
> > > > +}
> > > > +
> > > > +/* helper routine to free bulk of packets */ static inline void
> > > > +free_pkts(struct rte_mbuf *mb[], uint32_t n) {
> > > > +	uint32_t i;
> > > > +
> > > > +	for (i = 0; i != n; i++)
> > > > +		rte_pktmbuf_free(mb[i]);
> > > > +
> > > > +	core_stats_update_drop(n);
> > > > +}
> > > > +
> > > > +/* helper routine to free single packet */ static inline void
> > > > +free_pkt(struct rte_mbuf *mb) {
> > > > +	rte_pktmbuf_free(mb);
> > > > +	core_stats_update_drop(1);
> > >
> > > Probably just:
> > > free_pkts(&mb, 1);
> > > ?
> >
> > [Anoob] Will update in v4.
> >
> > >
> > > > +}
> > > > +
> > > >  #endif /* _IPSEC_SECGW_H_ */
  

Patch

diff --git a/examples/ipsec-secgw/ipsec-secgw.c b/examples/ipsec-secgw/ipsec-secgw.c
index 6d02341..e97a4f8 100644
--- a/examples/ipsec-secgw/ipsec-secgw.c
+++ b/examples/ipsec-secgw/ipsec-secgw.c
@@ -288,6 +288,59 @@  adjust_ipv6_pktlen(struct rte_mbuf *m, const struct rte_ipv6_hdr *iph,
 	}
 }
 
+#if (STATS_INTERVAL > 0)
+
+/* Print out statistics on packet distribution */
+static void
+print_stats(void)
+{
+	uint64_t total_packets_dropped, total_packets_tx, total_packets_rx;
+	unsigned int coreid;
+	float burst_percent;
+
+	total_packets_dropped = 0;
+	total_packets_tx = 0;
+	total_packets_rx = 0;
+
+	const char clr[] = { 27, '[', '2', 'J', '\0' };
+
+	/* Clear screen and move to top left */
+	printf("%s", clr);
+
+	printf("\nCore statistics ====================================");
+
+	for (coreid = 0; coreid < RTE_MAX_LCORE; coreid++) {
+		/* skip disabled cores */
+		if (rte_lcore_is_enabled(coreid) == 0)
+			continue;
+		burst_percent = (float)(core_statistics[coreid].burst_rx * 100)/
+					core_statistics[coreid].rx;
+		printf("\nStatistics for core %u ------------------------------"
+			   "\nPackets received: %20"PRIu64
+			   "\nPackets sent: %24"PRIu64
+			   "\nPackets dropped: %21"PRIu64
+			   "\nBurst percent: %23.2f",
+			   coreid,
+			   core_statistics[coreid].rx,
+			   core_statistics[coreid].tx,
+			   core_statistics[coreid].dropped,
+			   burst_percent);
+
+		total_packets_dropped += core_statistics[coreid].dropped;
+		total_packets_tx += core_statistics[coreid].tx;
+		total_packets_rx += core_statistics[coreid].rx;
+	}
+	printf("\nAggregate statistics ==============================="
+		   "\nTotal packets received: %14"PRIu64
+		   "\nTotal packets sent: %18"PRIu64
+		   "\nTotal packets dropped: %15"PRIu64,
+		   total_packets_rx,
+		   total_packets_tx,
+		   total_packets_dropped);
+	printf("\n====================================================\n");
+}
+#endif /* STATS_INTERVAL */
+
 static inline void
 prepare_one_packet(struct rte_mbuf *pkt, struct ipsec_traffic *t)
 {
@@ -333,7 +386,7 @@  prepare_one_packet(struct rte_mbuf *pkt, struct ipsec_traffic *t)
 
 		/* drop packet when IPv6 header exceeds first segment length */
 		if (unlikely(l3len > pkt->data_len)) {
-			rte_pktmbuf_free(pkt);
+			free_pkt(pkt);
 			return;
 		}
 
@@ -350,7 +403,7 @@  prepare_one_packet(struct rte_mbuf *pkt, struct ipsec_traffic *t)
 		/* Unknown/Unsupported type, drop the packet */
 		RTE_LOG(ERR, IPSEC, "Unsupported packet type 0x%x\n",
 			rte_be_to_cpu_16(eth->ether_type));
-		rte_pktmbuf_free(pkt);
+		free_pkt(pkt);
 		return;
 	}
 
@@ -477,9 +530,12 @@  send_burst(struct lcore_conf *qconf, uint16_t n, uint16_t port)
 	prepare_tx_burst(m_table, n, port, qconf);
 
 	ret = rte_eth_tx_burst(port, queueid, m_table, n);
+
+	core_stats_update_tx(ret);
+
 	if (unlikely(ret < n)) {
 		do {
-			rte_pktmbuf_free(m_table[ret]);
+			free_pkt(m_table[ret]);
 		} while (++ret < n);
 	}
 
@@ -525,7 +581,7 @@  send_fragment_packet(struct lcore_conf *qconf, struct rte_mbuf *m,
 			"error code: %d\n",
 			__func__, m->pkt_len, rte_errno);
 
-	rte_pktmbuf_free(m);
+	free_pkt(m);
 	return len;
 }
 
@@ -550,7 +606,7 @@  send_single_packet(struct rte_mbuf *m, uint16_t port, uint8_t proto)
 	} else if (frag_tbl_sz > 0)
 		len = send_fragment_packet(qconf, m, port, proto);
 	else
-		rte_pktmbuf_free(m);
+		free_pkt(m);
 
 	/* enough pkts to be sent */
 	if (unlikely(len == MAX_PKT_BURST)) {
@@ -584,19 +640,19 @@  inbound_sp_sa(struct sp_ctx *sp, struct sa_ctx *sa, struct traffic_type *ip,
 			continue;
 		}
 		if (res == DISCARD) {
-			rte_pktmbuf_free(m);
+			free_pkt(m);
 			continue;
 		}
 
 		/* Only check SPI match for processed IPSec packets */
 		if (i < lim && ((m->ol_flags & PKT_RX_SEC_OFFLOAD) == 0)) {
-			rte_pktmbuf_free(m);
+			free_pkt(m);
 			continue;
 		}
 
 		sa_idx = res - 1;
 		if (!inbound_sa_check(sa, m, sa_idx)) {
-			rte_pktmbuf_free(m);
+			free_pkt(m);
 			continue;
 		}
 		ip->pkts[j++] = m;
@@ -631,7 +687,7 @@  split46_traffic(struct ipsec_traffic *trf, struct rte_mbuf *mb[], uint32_t num)
 					offsetof(struct ip6_hdr, ip6_nxt));
 			n6++;
 		} else
-			rte_pktmbuf_free(m);
+			free_pkt(m);
 	}
 
 	trf->ip4.num = n4;
@@ -683,7 +739,7 @@  outbound_sp(struct sp_ctx *sp, struct traffic_type *ip,
 		m = ip->pkts[i];
 		sa_idx = ip->res[i] - 1;
 		if (ip->res[i] == DISCARD)
-			rte_pktmbuf_free(m);
+			free_pkt(m);
 		else if (ip->res[i] == BYPASS)
 			ip->pkts[j++] = m;
 		else {
@@ -702,8 +758,7 @@  process_pkts_outbound(struct ipsec_ctx *ipsec_ctx,
 	uint16_t idx, nb_pkts_out, i;
 
 	/* Drop any IPsec traffic from protected ports */
-	for (i = 0; i < traffic->ipsec.num; i++)
-		rte_pktmbuf_free(traffic->ipsec.pkts[i]);
+	free_pkts(traffic->ipsec.pkts, traffic->ipsec.num);
 
 	traffic->ipsec.num = 0;
 
@@ -743,14 +798,12 @@  process_pkts_inbound_nosp(struct ipsec_ctx *ipsec_ctx,
 	uint32_t nb_pkts_in, i, idx;
 
 	/* Drop any IPv4 traffic from unprotected ports */
-	for (i = 0; i < traffic->ip4.num; i++)
-		rte_pktmbuf_free(traffic->ip4.pkts[i]);
+	free_pkts(traffic->ip4.pkts, traffic->ip4.num);
 
 	traffic->ip4.num = 0;
 
 	/* Drop any IPv6 traffic from unprotected ports */
-	for (i = 0; i < traffic->ip6.num; i++)
-		rte_pktmbuf_free(traffic->ip6.pkts[i]);
+	free_pkts(traffic->ip6.pkts, traffic->ip6.num);
 
 	traffic->ip6.num = 0;
 
@@ -786,8 +839,7 @@  process_pkts_outbound_nosp(struct ipsec_ctx *ipsec_ctx,
 	struct ip *ip;
 
 	/* Drop any IPsec traffic from protected ports */
-	for (i = 0; i < traffic->ipsec.num; i++)
-		rte_pktmbuf_free(traffic->ipsec.pkts[i]);
+	free_pkts(traffic->ipsec.pkts, traffic->ipsec.num);
 
 	n = 0;
 
@@ -901,7 +953,7 @@  route4_pkts(struct rt_ctx *rt_ctx, struct rte_mbuf *pkts[], uint8_t nb_pkts)
 		}
 
 		if ((pkt_hop & RTE_LPM_LOOKUP_SUCCESS) == 0) {
-			rte_pktmbuf_free(pkts[i]);
+			free_pkt(pkts[i]);
 			continue;
 		}
 		send_single_packet(pkts[i], pkt_hop & 0xff, IPPROTO_IP);
@@ -953,7 +1005,7 @@  route6_pkts(struct rt_ctx *rt_ctx, struct rte_mbuf *pkts[], uint8_t nb_pkts)
 		}
 
 		if (pkt_hop == -1) {
-			rte_pktmbuf_free(pkts[i]);
+			free_pkt(pkts[i]);
 			continue;
 		}
 		send_single_packet(pkts[i], pkt_hop & 0xff, IPPROTO_IPV6);
@@ -1099,6 +1151,10 @@  ipsec_poll_mode_worker(void)
 	const uint64_t drain_tsc = (rte_get_tsc_hz() + US_PER_S - 1)
 			/ US_PER_S * BURST_TX_DRAIN_US;
 	struct lcore_rx_queue *rxql;
+#if (STATS_INTERVAL > 0)
+	const uint64_t timer_period = STATS_INTERVAL * rte_get_timer_hz();
+	uint64_t timer_tsc = 0;
+#endif /* STATS_INTERVAL */
 
 	prev_tsc = 0;
 	lcore_id = rte_lcore_id();
@@ -1159,6 +1215,19 @@  ipsec_poll_mode_worker(void)
 			drain_tx_buffers(qconf);
 			drain_crypto_buffers(qconf);
 			prev_tsc = cur_tsc;
+#if (STATS_INTERVAL > 0)
+			if (lcore_id == rte_get_master_lcore()) {
+				/* advance the timer */
+				timer_tsc += diff_tsc;
+
+				/* if timer has reached its timeout */
+				if (unlikely(timer_tsc >= timer_period)) {
+					print_stats();
+					/* reset the timer */
+					timer_tsc = 0;
+				}
+			}
+#endif /* STATS_INTERVAL */
 		}
 
 		for (i = 0; i < qconf->nb_rx_queue; ++i) {
@@ -1169,8 +1238,10 @@  ipsec_poll_mode_worker(void)
 			nb_rx = rte_eth_rx_burst(portid, queueid,
 					pkts, MAX_PKT_BURST);
 
-			if (nb_rx > 0)
+			if (nb_rx > 0) {
+				core_stats_update_rx(nb_rx);
 				process_pkts(qconf, pkts, nb_rx, portid);
+			}
 
 			/* dequeue and process completed crypto-ops */
 			if (is_unprotected_port(portid))
diff --git a/examples/ipsec-secgw/ipsec-secgw.h b/examples/ipsec-secgw/ipsec-secgw.h
index 4b53cb5..5b3561f 100644
--- a/examples/ipsec-secgw/ipsec-secgw.h
+++ b/examples/ipsec-secgw/ipsec-secgw.h
@@ -6,6 +6,8 @@ 
 
 #include <stdbool.h>
 
+#define STATS_INTERVAL 0
+
 #define NB_SOCKETS 4
 
 #define MAX_PKT_BURST 32
@@ -69,6 +71,17 @@  struct ethaddr_info {
 	uint64_t src, dst;
 };
 
+#if (STATS_INTERVAL > 0)
+struct ipsec_core_statistics {
+	uint64_t tx;
+	uint64_t rx;
+	uint64_t dropped;
+	uint64_t burst_rx;
+} __rte_cache_aligned;
+
+struct ipsec_core_statistics core_statistics[RTE_MAX_LCORE];
+#endif /* STATS_INTERVAL */
+
 extern struct ethaddr_info ethaddr_tbl[RTE_MAX_ETHPORTS];
 
 /* Port mask to identify the unprotected ports */
@@ -85,4 +98,59 @@  is_unprotected_port(uint16_t port_id)
 	return unprotected_port_mask & (1 << port_id);
 }
 
+static inline void
+core_stats_update_rx(int n)
+{
+#if (STATS_INTERVAL > 0)
+	int lcore_id = rte_lcore_id();
+	core_statistics[lcore_id].rx += n;
+	if (n == MAX_PKT_BURST)
+		core_statistics[lcore_id].burst_rx += n;
+#else
+	RTE_SET_USED(n);
+#endif /* STATS_INTERVAL */
+}
+
+static inline void
+core_stats_update_tx(int n)
+{
+#if (STATS_INTERVAL > 0)
+	int lcore_id = rte_lcore_id();
+	core_statistics[lcore_id].tx += n;
+#else
+	RTE_SET_USED(n);
+#endif /* STATS_INTERVAL */
+}
+
+static inline void
+core_stats_update_drop(int n)
+{
+#if (STATS_INTERVAL > 0)
+	int lcore_id = rte_lcore_id();
+	core_statistics[lcore_id].dropped += n;
+#else
+	RTE_SET_USED(n);
+#endif /* STATS_INTERVAL */
+}
+
+/* helper routine to free bulk of packets */
+static inline void
+free_pkts(struct rte_mbuf *mb[], uint32_t n)
+{
+	uint32_t i;
+
+	for (i = 0; i != n; i++)
+		rte_pktmbuf_free(mb[i]);
+
+	core_stats_update_drop(n);
+}
+
+/* helper routine to free single packet */
+static inline void
+free_pkt(struct rte_mbuf *mb)
+{
+	rte_pktmbuf_free(mb);
+	core_stats_update_drop(1);
+}
+
 #endif /* _IPSEC_SECGW_H_ */
diff --git a/examples/ipsec-secgw/ipsec.c b/examples/ipsec-secgw/ipsec.c
index bf88d80..351f1f1 100644
--- a/examples/ipsec-secgw/ipsec.c
+++ b/examples/ipsec-secgw/ipsec.c
@@ -500,7 +500,7 @@  enqueue_cop_burst(struct cdev_qp *cqp)
 			cqp->id, cqp->qp, ret, len);
 			/* drop packets that we fail to enqueue */
 			for (i = ret; i < len; i++)
-				rte_pktmbuf_free(cqp->buf[i]->sym->m_src);
+				free_pkt(cqp->buf[i]->sym->m_src);
 	}
 	cqp->in_flight += ret;
 	cqp->len = 0;
@@ -528,7 +528,7 @@  ipsec_enqueue(ipsec_xform_fn xform_func, struct ipsec_ctx *ipsec_ctx,
 
 	for (i = 0; i < nb_pkts; i++) {
 		if (unlikely(sas[i] == NULL)) {
-			rte_pktmbuf_free(pkts[i]);
+			free_pkt(pkts[i]);
 			continue;
 		}
 
@@ -549,7 +549,7 @@  ipsec_enqueue(ipsec_xform_fn xform_func, struct ipsec_ctx *ipsec_ctx,
 
 			if ((unlikely(ips->security.ses == NULL)) &&
 				create_lookaside_session(ipsec_ctx, sa, ips)) {
-				rte_pktmbuf_free(pkts[i]);
+				free_pkt(pkts[i]);
 				continue;
 			}
 
@@ -563,7 +563,7 @@  ipsec_enqueue(ipsec_xform_fn xform_func, struct ipsec_ctx *ipsec_ctx,
 		case RTE_SECURITY_ACTION_TYPE_CPU_CRYPTO:
 			RTE_LOG(ERR, IPSEC, "CPU crypto is not supported by the"
 					" legacy mode.");
-			rte_pktmbuf_free(pkts[i]);
+			free_pkt(pkts[i]);
 			continue;
 
 		case RTE_SECURITY_ACTION_TYPE_NONE:
@@ -575,7 +575,7 @@  ipsec_enqueue(ipsec_xform_fn xform_func, struct ipsec_ctx *ipsec_ctx,
 
 			if ((unlikely(ips->crypto.ses == NULL)) &&
 				create_lookaside_session(ipsec_ctx, sa, ips)) {
-				rte_pktmbuf_free(pkts[i]);
+				free_pkt(pkts[i]);
 				continue;
 			}
 
@@ -584,7 +584,7 @@  ipsec_enqueue(ipsec_xform_fn xform_func, struct ipsec_ctx *ipsec_ctx,
 
 			ret = xform_func(pkts[i], sa, &priv->cop);
 			if (unlikely(ret)) {
-				rte_pktmbuf_free(pkts[i]);
+				free_pkt(pkts[i]);
 				continue;
 			}
 			break;
@@ -608,7 +608,7 @@  ipsec_enqueue(ipsec_xform_fn xform_func, struct ipsec_ctx *ipsec_ctx,
 
 			ret = xform_func(pkts[i], sa, &priv->cop);
 			if (unlikely(ret)) {
-				rte_pktmbuf_free(pkts[i]);
+				free_pkt(pkts[i]);
 				continue;
 			}
 
@@ -643,7 +643,7 @@  ipsec_inline_dequeue(ipsec_xform_fn xform_func, struct ipsec_ctx *ipsec_ctx,
 		sa = priv->sa;
 		ret = xform_func(pkt, sa, &priv->cop);
 		if (unlikely(ret)) {
-			rte_pktmbuf_free(pkt);
+			free_pkt(pkt);
 			continue;
 		}
 		pkts[nb_pkts++] = pkt;
@@ -690,13 +690,13 @@  ipsec_dequeue(ipsec_xform_fn xform_func, struct ipsec_ctx *ipsec_ctx,
 				RTE_SECURITY_ACTION_TYPE_NONE) {
 				ret = xform_func(pkt, sa, cops[j]);
 				if (unlikely(ret)) {
-					rte_pktmbuf_free(pkt);
+					free_pkt(pkt);
 					continue;
 				}
 			} else if (ipsec_get_action_type(sa) ==
 				RTE_SECURITY_ACTION_TYPE_LOOKASIDE_PROTOCOL) {
 				if (cops[j]->status) {
-					rte_pktmbuf_free(pkt);
+					free_pkt(pkt);
 					continue;
 				}
 			}
diff --git a/examples/ipsec-secgw/ipsec_process.c b/examples/ipsec-secgw/ipsec_process.c
index bb2f2b8..4748299 100644
--- a/examples/ipsec-secgw/ipsec_process.c
+++ b/examples/ipsec-secgw/ipsec_process.c
@@ -12,22 +12,13 @@ 
 #include <rte_mbuf.h>
 
 #include "ipsec.h"
+#include "ipsec-secgw.h"
 
 #define SATP_OUT_IPV4(t)	\
 	((((t) & RTE_IPSEC_SATP_MODE_MASK) == RTE_IPSEC_SATP_MODE_TRANS && \
 	(((t) & RTE_IPSEC_SATP_IPV_MASK) == RTE_IPSEC_SATP_IPV4)) || \
 	((t) & RTE_IPSEC_SATP_MODE_MASK) == RTE_IPSEC_SATP_MODE_TUNLV4)
 
-/* helper routine to free bulk of packets */
-static inline void
-free_pkts(struct rte_mbuf *mb[], uint32_t n)
-{
-	uint32_t i;
-
-	for (i = 0; i != n; i++)
-		rte_pktmbuf_free(mb[i]);
-}
-
 /* helper routine to free bulk of crypto-ops and related packets */
 static inline void
 free_cops(struct rte_crypto_op *cop[], uint32_t n)