[dpdk-dev] [PATCH v3 1/1] net/pcap: imissed stats support

Ido Goshen ido at cgstowernetworks.com
Thu Feb 4 00:07:50 CET 2021


get value from pcap_stats.ps_drop (see man pcap_stats)
the value is adjusted in this cases:
 - port stop - pcap is closed and will lose count
 - stats reset - pcap doesn't provide reset api
 - rollover - pcap counter size is u_32 only

Signed-off-by: Ido Goshen <ido at cgstowernetworks.com>
---
v3:
* code cleanup by dedicated struct and functions extraction
* multi stop support by menmonic+= accumulation
* rollover fixup

v2:
* sum all queues (rx_missed_total += fix)
* null pcap protection
* inter stop/start persistancy (counter won't reset on stop)

 drivers/net/pcap/rte_eth_pcap.c | 59 +++++++++++++++++++++++++++++++++
 1 file changed, 59 insertions(+)

diff --git a/drivers/net/pcap/rte_eth_pcap.c b/drivers/net/pcap/rte_eth_pcap.c
index a32b1f3f3..16e8752f3 100644
--- a/drivers/net/pcap/rte_eth_pcap.c
+++ b/drivers/net/pcap/rte_eth_pcap.c
@@ -60,11 +60,21 @@ struct queue_stat {
 	volatile unsigned long err_pkts;
 };
 
+struct queue_missed_stat {
+	/* last value retrieved from pcap */
+	volatile unsigned int pcap;
+	/* stores values lost by pcap stop or rollover */
+	volatile unsigned long mnemonic;
+	/* value on last reset */
+	volatile unsigned long reset;
+};
+
 struct pcap_rx_queue {
 	uint16_t port_id;
 	uint16_t queue_id;
 	struct rte_mempool *mb_pool;
 	struct queue_stat rx_stat;
+	struct queue_missed_stat missed_stat;
 	char name[PATH_MAX];
 	char type[ETH_PCAP_ARG_MAXLEN];
 
@@ -144,6 +154,49 @@ RTE_LOG_REGISTER(eth_pcap_logtype, pmd.net.pcap, NOTICE);
 	rte_log(RTE_LOG_ ## level, eth_pcap_logtype, \
 		"%s(): " fmt "\n", __func__, ##args)
 
+static struct queue_missed_stat*
+queue_missed_stat_update(struct rte_eth_dev *dev, unsigned int qid)
+{
+	struct pmd_internals *internals = dev->data->dev_private;
+	struct queue_missed_stat *missed_stat =
+			&internals->rx_queue[qid].missed_stat;
+	const struct pmd_process_private *pp = dev->process_private;
+	pcap_t *pcap = pp->rx_pcap[qid];
+	struct pcap_stat stat;
+	if (!pcap || (pcap_stats(pcap, &stat) != 0))
+		return missed_stat;
+	/* rollover check - best effort fixup assuming single rollover */
+	if (stat.ps_drop < missed_stat->pcap)
+		missed_stat->mnemonic += UINT_MAX;
+	missed_stat->pcap = stat.ps_drop;
+	return missed_stat;
+}
+
+static void
+queue_missed_stat_on_stop_update(struct rte_eth_dev *dev, unsigned int qid)
+{
+	struct queue_missed_stat *missed_stat =
+			queue_missed_stat_update(dev, qid);
+	missed_stat->mnemonic += missed_stat->pcap;
+}
+
+static void
+queue_missed_stat_reset(struct rte_eth_dev *dev, unsigned int qid)
+{
+	struct queue_missed_stat *missed_stat =
+			queue_missed_stat_update(dev, qid);
+	missed_stat->reset = missed_stat->pcap;
+	missed_stat->mnemonic = 0;
+}
+
+static unsigned long
+queue_missed_stat_get(struct rte_eth_dev *dev, unsigned int qid)
+{
+	const struct queue_missed_stat *missed_stat =
+			queue_missed_stat_update(dev, qid);
+	return missed_stat->pcap + missed_stat->mnemonic - missed_stat->reset;
+}
+
 static int
 eth_pcap_rx_jumbo(struct rte_mempool *mb_pool, struct rte_mbuf *mbuf,
 		const u_char *data, uint16_t data_len)
@@ -621,6 +674,7 @@ eth_dev_stop(struct rte_eth_dev *dev)
 
 	/* Special iface case. Single pcap is open and shared between tx/rx. */
 	if (internals->single_iface) {
+		queue_missed_stat_on_stop_update(dev, 0);
 		pcap_close(pp->tx_pcap[0]);
 		pp->tx_pcap[0] = NULL;
 		pp->rx_pcap[0] = NULL;
@@ -641,6 +695,7 @@ eth_dev_stop(struct rte_eth_dev *dev)
 
 	for (i = 0; i < dev->data->nb_rx_queues; i++) {
 		if (pp->rx_pcap[i] != NULL) {
+			queue_missed_stat_on_stop_update(dev, i);
 			pcap_close(pp->rx_pcap[i]);
 			pp->rx_pcap[i] = NULL;
 		}
@@ -685,6 +740,7 @@ eth_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
 {
 	unsigned int i;
 	unsigned long rx_packets_total = 0, rx_bytes_total = 0;
+	unsigned long rx_missed_total = 0;
 	unsigned long tx_packets_total = 0, tx_bytes_total = 0;
 	unsigned long tx_packets_err_total = 0;
 	const struct pmd_internals *internal = dev->data->dev_private;
@@ -695,6 +751,7 @@ eth_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
 		stats->q_ibytes[i] = internal->rx_queue[i].rx_stat.bytes;
 		rx_packets_total += stats->q_ipackets[i];
 		rx_bytes_total += stats->q_ibytes[i];
+		rx_missed_total += queue_missed_stat_get(dev, i);
 	}
 
 	for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS &&
@@ -708,6 +765,7 @@ eth_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
 
 	stats->ipackets = rx_packets_total;
 	stats->ibytes = rx_bytes_total;
+	stats->imissed = rx_missed_total;
 	stats->opackets = tx_packets_total;
 	stats->obytes = tx_bytes_total;
 	stats->oerrors = tx_packets_err_total;
@@ -724,6 +782,7 @@ eth_stats_reset(struct rte_eth_dev *dev)
 	for (i = 0; i < dev->data->nb_rx_queues; i++) {
 		internal->rx_queue[i].rx_stat.pkts = 0;
 		internal->rx_queue[i].rx_stat.bytes = 0;
+		queue_missed_stat_reset(dev, i);
 	}
 
 	for (i = 0; i < dev->data->nb_tx_queues; i++) {
-- 
2.17.1



More information about the dev mailing list