[v2,2/3] net/bonding: support Tx prepare fail stats

Message ID 20220725040842.35027-3-fengchengwen@huawei.com (mailing list archive)
State Superseded, archived
Delegated to: Ferruh Yigit
Headers
Series add Tx prepare support for bonding driver |

Checks

Context Check Description
ci/checkpatch success coding style OK

Commit Message

fengchengwen July 25, 2022, 4:08 a.m. UTC
  If the Tx prepare fails, the bonding driver will free the corresponding
packets internally, and only the packets of the Tx prepare OK are xmit.

In this patch, the number of Tx prepare fails will be counted, the
result is added in the 'struct rte_eth_stats' oerrors field.

Signed-off-by: Chengwen Feng <fengchengwen@huawei.com>
---
 drivers/net/bonding/eth_bond_private.h |  7 +++++++
 drivers/net/bonding/rte_eth_bond_pmd.c | 16 ++++++++++++++++
 2 files changed, 23 insertions(+)
  

Patch

diff --git a/drivers/net/bonding/eth_bond_private.h b/drivers/net/bonding/eth_bond_private.h
index 9996f6673c..aa33fa0043 100644
--- a/drivers/net/bonding/eth_bond_private.h
+++ b/drivers/net/bonding/eth_bond_private.h
@@ -72,6 +72,13 @@  struct bond_tx_queue {
 	/**< Number of TX descriptors available for the queue */
 	struct rte_eth_txconf tx_conf;
 	/**< Copy of TX configuration structure for queue */
+
+	/*
+	 * The following fields are statistical value, and maybe update
+	 * at runtime, so start with one new cache line.
+	 */
+	uint64_t prepare_fails __rte_cache_aligned;
+	/**< Tx prepare fail cnt */
 };
 
 /** Bonded slave devices structure */
diff --git a/drivers/net/bonding/rte_eth_bond_pmd.c b/drivers/net/bonding/rte_eth_bond_pmd.c
index c32c7e6c6c..84fd0e5a73 100644
--- a/drivers/net/bonding/rte_eth_bond_pmd.c
+++ b/drivers/net/bonding/rte_eth_bond_pmd.c
@@ -602,6 +602,7 @@  bond_ethdev_tx_wrap(struct bond_tx_queue *bd_tx_q, uint16_t slave_port_id,
 		rte_pktmbuf_free(fail_pkts[i]);
 	}
 
+	bd_tx_q->prepare_fails += fail_cnt;
 	if (fail_cnt == nb_pkts)
 		return nb_pkts;
 tx_burst:
@@ -2399,6 +2400,8 @@  bond_ethdev_tx_queue_setup(struct rte_eth_dev *dev, uint16_t tx_queue_id,
 	bd_tx_q->nb_tx_desc = nb_tx_desc;
 	memcpy(&(bd_tx_q->tx_conf), tx_conf, sizeof(bd_tx_q->tx_conf));
 
+	bd_tx_q->prepare_fails = 0;
+
 	dev->data->tx_queues[tx_queue_id] = bd_tx_q;
 
 	return 0;
@@ -2609,6 +2612,7 @@  bond_ethdev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
 {
 	struct bond_dev_private *internals = dev->data->dev_private;
 	struct rte_eth_stats slave_stats;
+	struct bond_tx_queue *bd_tx_q;
 	int i, j;
 
 	for (i = 0; i < internals->slave_count; i++) {
@@ -2630,7 +2634,12 @@  bond_ethdev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
 			stats->q_obytes[j] += slave_stats.q_obytes[j];
 			stats->q_errors[j] += slave_stats.q_errors[j];
 		}
+	}
 
+	for (i = 0; i < dev->data->nb_tx_queues; i++) {
+		bd_tx_q = (struct bond_tx_queue *)dev->data->tx_queues[i];
+		if (bd_tx_q)
+			stats->oerrors += bd_tx_q->prepare_fails;
 	}
 
 	return 0;
@@ -2640,6 +2649,7 @@  static int
 bond_ethdev_stats_reset(struct rte_eth_dev *dev)
 {
 	struct bond_dev_private *internals = dev->data->dev_private;
+	struct bond_tx_queue *bd_tx_q;
 	int i;
 	int err;
 	int ret;
@@ -2650,6 +2660,12 @@  bond_ethdev_stats_reset(struct rte_eth_dev *dev)
 			err = ret;
 	}
 
+	for (i = 0; i < dev->data->nb_tx_queues; i++) {
+		bd_tx_q = (struct bond_tx_queue *)dev->data->tx_queues[i];
+		if (bd_tx_q)
+			bd_tx_q->prepare_fails = 0;
+	}
+
 	return err;
 }