[dpdk-dev] [PATCH v2 07/25] bnxt: add support for xstats get/reset

Ajit Khaparde ajit.khaparde at broadcom.com
Fri May 26 20:39:23 CEST 2017


This patch adds support to get and reset xstats dev_ops

dev_ops added:
xstats_get, xstats_get_name, xstats_reset

HWRM commands added:
hwrm_port_qstats, hwrm_port_clr_stats

Signed-off-by: Ajit Khaparde <ajit.khaparde at broadcom.com>

--
v1->v2: regroup related patches and incorporate other review comments
---
 drivers/net/bnxt/bnxt.h        |   7 ++
 drivers/net/bnxt/bnxt_ethdev.c |  84 +++++++++++++++++
 drivers/net/bnxt/bnxt_hwrm.c   |  36 +++++++
 drivers/net/bnxt/bnxt_hwrm.h   |   3 +-
 drivers/net/bnxt/bnxt_stats.c  | 207 +++++++++++++++++++++++++++++++++++++++++
 drivers/net/bnxt/bnxt_stats.h  |  10 ++
 6 files changed, 346 insertions(+), 1 deletion(-)

diff --git a/drivers/net/bnxt/bnxt.h b/drivers/net/bnxt/bnxt.h
index 3bc3f99..fde2202 100644
--- a/drivers/net/bnxt/bnxt.h
+++ b/drivers/net/bnxt/bnxt.h
@@ -130,6 +130,7 @@ struct bnxt {
 	uint32_t		flags;
 #define BNXT_FLAG_REGISTERED	(1 << 0)
 #define BNXT_FLAG_VF		(1 << 1)
+#define BNXT_FLAG_PORT_STATS	(1 << 2)
 #define BNXT_PF(bp)		(!((bp)->flags & BNXT_FLAG_VF))
 #define BNXT_VF(bp)		((bp)->flags & BNXT_FLAG_VF)
 #define BNXT_NPAR_ENABLED(bp)	((bp)->port_partition_type)
@@ -138,10 +139,16 @@ struct bnxt {
 	unsigned int		rx_nr_rings;
 	unsigned int		rx_cp_nr_rings;
 	struct bnxt_rx_queue **rx_queues;
+	const void		*rx_mem_zone;
+	struct rx_port_stats    *hw_rx_port_stats;
+	phys_addr_t		hw_rx_port_stats_map;
 
 	unsigned int		tx_nr_rings;
 	unsigned int		tx_cp_nr_rings;
 	struct bnxt_tx_queue **tx_queues;
+	const void		*tx_mem_zone;
+	struct tx_port_stats    *hw_tx_port_stats;
+	phys_addr_t		hw_tx_port_stats_map;
 
 	/* Default completion ring */
 	struct bnxt_cp_ring_info	*def_cp_ring;
diff --git a/drivers/net/bnxt/bnxt_ethdev.c b/drivers/net/bnxt/bnxt_ethdev.c
index e7288da..5de7567 100644
--- a/drivers/net/bnxt/bnxt_ethdev.c
+++ b/drivers/net/bnxt/bnxt_ethdev.c
@@ -533,6 +533,7 @@ static void bnxt_dev_stop_op(struct rte_eth_dev *eth_dev)
 		eth_dev->data->dev_link.link_status = 0;
 	}
 	bnxt_set_hwrm_link_config(bp, false);
+	bnxt_hwrm_port_clr_stats(bp);
 	bnxt_shutdown_nic(bp);
 	bp->dev_stopped = 1;
 }
@@ -1123,6 +1124,9 @@ static const struct eth_dev_ops bnxt_dev_ops = {
 	.flow_ctrl_set = bnxt_flow_ctrl_set_op,
 	.udp_tunnel_port_add  = bnxt_udp_tunnel_port_add_op,
 	.udp_tunnel_port_del  = bnxt_udp_tunnel_port_del_op,
+	.xstats_get = bnxt_dev_xstats_get_op,
+	.xstats_get_names = bnxt_dev_xstats_get_names_op,
+	.xstats_reset = bnxt_dev_xstats_reset_op,
 };
 
 static bool bnxt_vf_pciid(uint16_t id)
@@ -1182,7 +1186,11 @@ static int
 bnxt_dev_init(struct rte_eth_dev *eth_dev)
 {
 	struct rte_pci_device *pci_dev = RTE_DEV_TO_PCI(eth_dev->device);
+	char mz_name[RTE_MEMZONE_NAMESIZE];
+	const struct rte_memzone *mz = NULL;
 	static int version_printed;
+	uint32_t total_alloc_len;
+	phys_addr_t mz_phys_addr;
 	struct bnxt *bp;
 	int rc;
 
@@ -1208,6 +1216,80 @@ bnxt_dev_init(struct rte_eth_dev *eth_dev)
 	eth_dev->rx_pkt_burst = &bnxt_recv_pkts;
 	eth_dev->tx_pkt_burst = &bnxt_xmit_pkts;
 
+	if (BNXT_PF(bp) && pci_dev->id.device_id != BROADCOM_DEV_ID_NS2) {
+		snprintf(mz_name, RTE_MEMZONE_NAMESIZE,
+			 "bnxt_%04x:%02x:%02x:%02x-%s", pci_dev->addr.domain,
+			 pci_dev->addr.bus, pci_dev->addr.devid,
+			 pci_dev->addr.function, "rx_port_stats");
+		mz_name[RTE_MEMZONE_NAMESIZE - 1] = 0;
+		mz = rte_memzone_lookup(mz_name);
+		total_alloc_len = RTE_CACHE_LINE_ROUNDUP(
+				sizeof(struct rx_port_stats) + 512);
+		if (!mz) {
+			mz = rte_memzone_reserve(mz_name, total_alloc_len,
+						 SOCKET_ID_ANY,
+						 RTE_MEMZONE_2MB |
+						 RTE_MEMZONE_SIZE_HINT_ONLY);
+			if (mz == NULL)
+				return -ENOMEM;
+		}
+		memset(mz->addr, 0, mz->len);
+		mz_phys_addr = mz->phys_addr;
+		if ((unsigned long)mz->addr == mz_phys_addr) {
+			RTE_LOG(WARNING, PMD,
+				"Memzone physical address same as virtual.\n");
+			RTE_LOG(WARNING, PMD,
+				"Using rte_mem_virt2phy()\n");
+			mz_phys_addr = rte_mem_virt2phy(mz->addr);
+			if (mz_phys_addr == 0) {
+				RTE_LOG(ERR, PMD,
+				"unable to map address to physical memory\n");
+				return -ENOMEM;
+			}
+		}
+
+		bp->rx_mem_zone = (const void *)mz;
+		bp->hw_rx_port_stats = mz->addr;
+		bp->hw_rx_port_stats_map = mz_phys_addr;
+
+		snprintf(mz_name, RTE_MEMZONE_NAMESIZE,
+			 "bnxt_%04x:%02x:%02x:%02x-%s", pci_dev->addr.domain,
+			 pci_dev->addr.bus, pci_dev->addr.devid,
+			 pci_dev->addr.function, "tx_port_stats");
+		mz_name[RTE_MEMZONE_NAMESIZE - 1] = 0;
+		mz = rte_memzone_lookup(mz_name);
+		total_alloc_len = RTE_CACHE_LINE_ROUNDUP(
+				sizeof(struct tx_port_stats) + 512);
+		if (!mz) {
+			mz = rte_memzone_reserve(mz_name, total_alloc_len,
+						 SOCKET_ID_ANY,
+						 RTE_MEMZONE_2MB |
+						 RTE_MEMZONE_SIZE_HINT_ONLY);
+			if (mz == NULL)
+				return -ENOMEM;
+		}
+		memset(mz->addr, 0, mz->len);
+		mz_phys_addr = mz->phys_addr;
+		if ((unsigned long)mz->addr == mz_phys_addr) {
+			RTE_LOG(WARNING, PMD,
+				"Memzone physical address same as virtual.\n");
+			RTE_LOG(WARNING, PMD,
+				"Using rte_mem_virt2phy()\n");
+			mz_phys_addr = rte_mem_virt2phy(mz->addr);
+			if (mz_phys_addr == 0) {
+				RTE_LOG(ERR, PMD,
+				"unable to map address to physical memory\n");
+				return -ENOMEM;
+			}
+		}
+
+		bp->tx_mem_zone = (const void *)mz;
+		bp->hw_tx_port_stats = mz->addr;
+		bp->hw_tx_port_stats_map = mz_phys_addr;
+
+		bp->flags |= BNXT_FLAG_PORT_STATS;
+	}
+
 	rc = bnxt_alloc_hwrm_resources(bp);
 	if (rc) {
 		RTE_LOG(ERR, PMD,
@@ -1366,6 +1448,8 @@ bnxt_dev_uninit(struct rte_eth_dev *eth_dev) {
 	}
 	rc = bnxt_hwrm_func_driver_unregister(bp, 0);
 	bnxt_free_hwrm_resources(bp);
+	rte_memzone_free((const struct rte_memzone *)bp->tx_mem_zone);
+	rte_memzone_free((const struct rte_memzone *)bp->rx_mem_zone);
 	if (bp->dev_stopped == 0)
 		bnxt_dev_close_op(eth_dev);
 	if (bp->pf.vf_info)
diff --git a/drivers/net/bnxt/bnxt_hwrm.c b/drivers/net/bnxt/bnxt_hwrm.c
index dab4171..d942648 100644
--- a/drivers/net/bnxt/bnxt_hwrm.c
+++ b/drivers/net/bnxt/bnxt_hwrm.c
@@ -2246,3 +2246,39 @@ int bnxt_hwrm_exec_fwd_resp(struct bnxt *bp, uint16_t target_id,
 
 	return rc;
 }
+
+int bnxt_hwrm_port_qstats(struct bnxt *bp)
+{
+	struct hwrm_port_qstats_input req = {0};
+	struct hwrm_port_qstats_output *resp = bp->hwrm_cmd_resp_addr;
+	struct bnxt_pf_info *pf = &bp->pf;
+	int rc;
+
+	if (!(bp->flags & BNXT_FLAG_PORT_STATS))
+		return 0;
+
+	HWRM_PREP(req, PORT_QSTATS, -1, resp);
+	req.port_id = rte_cpu_to_le_16(pf->port_id);
+	req.tx_stat_host_addr = rte_cpu_to_le_64(bp->hw_tx_port_stats_map);
+	req.rx_stat_host_addr = rte_cpu_to_le_64(bp->hw_rx_port_stats_map);
+	rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
+	HWRM_CHECK_RESULT;
+	return rc;
+}
+
+int bnxt_hwrm_port_clr_stats(struct bnxt *bp)
+{
+	struct hwrm_port_clr_stats_input req = {0};
+	struct hwrm_port_clr_stats_output *resp = bp->hwrm_cmd_resp_addr;
+	struct bnxt_pf_info *pf = &bp->pf;
+	int rc;
+
+	if (!(bp->flags & BNXT_FLAG_PORT_STATS))
+		return 0;
+
+	HWRM_PREP(req, PORT_CLR_STATS, -1, resp);
+	req.port_id = rte_cpu_to_le_16(pf->port_id);
+	rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
+	HWRM_CHECK_RESULT;
+	return rc;
+}
diff --git a/drivers/net/bnxt/bnxt_hwrm.h b/drivers/net/bnxt/bnxt_hwrm.h
index cee8fca..8f7d103 100644
--- a/drivers/net/bnxt/bnxt_hwrm.h
+++ b/drivers/net/bnxt/bnxt_hwrm.h
@@ -119,5 +119,6 @@ int bnxt_hwrm_tunnel_dst_port_alloc(struct bnxt *bp, uint16_t port,
 int bnxt_hwrm_tunnel_dst_port_free(struct bnxt *bp, uint16_t port,
 				uint8_t tunnel_type);
 void bnxt_free_tunnel_ports(struct bnxt *bp);
-
+int bnxt_hwrm_port_qstats(struct bnxt *bp);
+int bnxt_hwrm_port_clr_stats(struct bnxt *bp);
 #endif
diff --git a/drivers/net/bnxt/bnxt_stats.c b/drivers/net/bnxt/bnxt_stats.c
index 40c9cac..154dc0c 100644
--- a/drivers/net/bnxt/bnxt_stats.c
+++ b/drivers/net/bnxt/bnxt_stats.c
@@ -43,6 +43,128 @@
 #include "bnxt_txq.h"
 #include "hsi_struct_def_dpdk.h"
 
+static const struct bnxt_xstats_name_off bnxt_rx_stats_strings[] = {
+	{"rx_64b_frames", offsetof(struct rx_port_stats,
+				rx_64b_frames)},
+	{"rx_65b_127b_frames", offsetof(struct rx_port_stats,
+				rx_65b_127b_frames)},
+	{"rx_128b_255b_frames", offsetof(struct rx_port_stats,
+				rx_128b_255b_frames)},
+	{"rx_256b_511b_frames", offsetof(struct rx_port_stats,
+				rx_256b_511b_frames)},
+	{"rx_512b_1023b_frames", offsetof(struct rx_port_stats,
+				rx_512b_1023b_frames)},
+	{"rx_1024b_1518_frames", offsetof(struct rx_port_stats,
+				rx_1024b_1518_frames)},
+	{"rx_good_vlan_frames", offsetof(struct rx_port_stats,
+				rx_good_vlan_frames)},
+	{"rx_1519b_2047b_frames", offsetof(struct rx_port_stats,
+				rx_1519b_2047b_frames)},
+	{"rx_2048b_4095b_frames", offsetof(struct rx_port_stats,
+				rx_2048b_4095b_frames)},
+	{"rx_4096b_9216b_frames", offsetof(struct rx_port_stats,
+				rx_4096b_9216b_frames)},
+	{"rx_9217b_16383b_frames", offsetof(struct rx_port_stats,
+				rx_9217b_16383b_frames)},
+	{"rx_total_frames", offsetof(struct rx_port_stats,
+				rx_total_frames)},
+	{"rx_ucast_frames", offsetof(struct rx_port_stats,
+				rx_ucast_frames)},
+	{"rx_mcast_frames", offsetof(struct rx_port_stats,
+				rx_mcast_frames)},
+	{"rx_bcast_frames", offsetof(struct rx_port_stats,
+				rx_bcast_frames)},
+	{"rx_fcs_err_frames", offsetof(struct rx_port_stats,
+				rx_fcs_err_frames)},
+	{"rx_ctrl_frames", offsetof(struct rx_port_stats,
+				rx_ctrl_frames)},
+	{"rx_pause_frames", offsetof(struct rx_port_stats,
+				rx_pause_frames)},
+	{"rx_pfc_frames", offsetof(struct rx_port_stats,
+				rx_pfc_frames)},
+	{"rx_align_err_frames", offsetof(struct rx_port_stats,
+				rx_align_err_frames)},
+	{"rx_ovrsz_frames", offsetof(struct rx_port_stats,
+				rx_ovrsz_frames)},
+	{"rx_jbr_frames", offsetof(struct rx_port_stats,
+				rx_jbr_frames)},
+	{"rx_mtu_err_frames", offsetof(struct rx_port_stats,
+				rx_mtu_err_frames)},
+	{"rx_tagged_frames", offsetof(struct rx_port_stats,
+				rx_tagged_frames)},
+	{"rx_double_tagged_frames", offsetof(struct rx_port_stats,
+				rx_double_tagged_frames)},
+	{"rx_good_frames", offsetof(struct rx_port_stats,
+				rx_good_frames)},
+	{"rx_undrsz_frames", offsetof(struct rx_port_stats,
+				rx_undrsz_frames)},
+	{"rx_eee_lpi_events", offsetof(struct rx_port_stats,
+				rx_eee_lpi_events)},
+	{"rx_eee_lpi_duration", offsetof(struct rx_port_stats,
+				rx_eee_lpi_duration)},
+	{"rx_bytes", offsetof(struct rx_port_stats,
+				rx_bytes)},
+	{"rx_runt_bytes", offsetof(struct rx_port_stats,
+				rx_runt_bytes)},
+	{"rx_runt_frames", offsetof(struct rx_port_stats,
+				rx_runt_frames)},
+};
+
+static const struct bnxt_xstats_name_off bnxt_tx_stats_strings[] = {
+	{"tx_64b_frames", offsetof(struct tx_port_stats,
+				tx_64b_frames)},
+	{"tx_65b_127b_frames", offsetof(struct tx_port_stats,
+				tx_65b_127b_frames)},
+	{"tx_128b_255b_frames", offsetof(struct tx_port_stats,
+				tx_128b_255b_frames)},
+	{"tx_256b_511b_frames", offsetof(struct tx_port_stats,
+				tx_256b_511b_frames)},
+	{"tx_512b_1023b_frames", offsetof(struct tx_port_stats,
+				tx_512b_1023b_frames)},
+	{"tx_1024b_1518_frames", offsetof(struct tx_port_stats,
+				tx_1024b_1518_frames)},
+	{"tx_good_vlan_frames", offsetof(struct tx_port_stats,
+				tx_good_vlan_frames)},
+	{"tx_1519b_2047_frames", offsetof(struct tx_port_stats,
+				tx_1519b_2047_frames)},
+	{"tx_2048b_4095b_frames", offsetof(struct tx_port_stats,
+				tx_2048b_4095b_frames)},
+	{"tx_4096b_9216b_frames", offsetof(struct tx_port_stats,
+				tx_4096b_9216b_frames)},
+	{"tx_9217b_16383b_frames", offsetof(struct tx_port_stats,
+				tx_9217b_16383b_frames)},
+	{"tx_good_frames", offsetof(struct tx_port_stats,
+				tx_good_frames)},
+	{"tx_total_frames", offsetof(struct tx_port_stats,
+				tx_total_frames)},
+	{"tx_ucast_frames", offsetof(struct tx_port_stats,
+				tx_ucast_frames)},
+	{"tx_mcast_frames", offsetof(struct tx_port_stats,
+				tx_mcast_frames)},
+	{"tx_bcast_frames", offsetof(struct tx_port_stats,
+				tx_bcast_frames)},
+	{"tx_pause_frames", offsetof(struct tx_port_stats,
+				tx_pause_frames)},
+	{"tx_pfc_frames", offsetof(struct tx_port_stats,
+				tx_pfc_frames)},
+	{"tx_jabber_frames", offsetof(struct tx_port_stats,
+				tx_jabber_frames)},
+	{"tx_fcs_err_frames", offsetof(struct tx_port_stats,
+				tx_fcs_err_frames)},
+	{"tx_err", offsetof(struct tx_port_stats,
+				tx_err)},
+	{"tx_fifo_underruns", offsetof(struct tx_port_stats,
+				tx_fifo_underruns)},
+	{"tx_eee_lpi_events", offsetof(struct tx_port_stats,
+				tx_eee_lpi_events)},
+	{"tx_eee_lpi_duration", offsetof(struct tx_port_stats,
+				tx_eee_lpi_duration)},
+	{"tx_total_collisions", offsetof(struct tx_port_stats,
+				tx_total_collisions)},
+	{"tx_bytes", offsetof(struct tx_port_stats,
+				tx_bytes)},
+};
+
 /*
  * Statistics functions
  */
@@ -140,3 +262,88 @@ void bnxt_stats_reset_op(struct rte_eth_dev *eth_dev)
 
 	bnxt_clear_all_hwrm_stat_ctxs(bp);
 }
+
+int bnxt_dev_xstats_get_op(struct rte_eth_dev *eth_dev,
+			   struct rte_eth_xstat *xstats, unsigned int n)
+{
+	struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private;
+
+	unsigned int count, i;
+
+	if (!(bp->flags & BNXT_FLAG_PORT_STATS)) {
+		RTE_LOG(ERR, PMD, "xstats not supported for VF\n");
+		return 0;
+	}
+
+	bnxt_hwrm_port_qstats(bp);
+
+	count = RTE_DIM(bnxt_rx_stats_strings) +
+		RTE_DIM(bnxt_tx_stats_strings);
+
+	if (n < count)
+		return count;
+
+	count = 0;
+	for (i = 0; i < RTE_DIM(bnxt_rx_stats_strings); i++) {
+		uint64_t *rx_stats = (uint64_t *)bp->hw_rx_port_stats;
+		xstats[count].value = rte_le_to_cpu_64(
+				*(uint64_t *)((char *)rx_stats +
+				bnxt_rx_stats_strings[i].offset));
+		count++;
+	}
+
+	for (i = 0; i < RTE_DIM(bnxt_tx_stats_strings); i++) {
+		uint64_t *tx_stats = (uint64_t *)bp->hw_tx_port_stats;
+		xstats[count].value = rte_le_to_cpu_64(
+				 *(uint64_t *)((char *)tx_stats +
+				bnxt_tx_stats_strings[i].offset));
+		count++;
+	}
+
+	return count;
+}
+
+int bnxt_dev_xstats_get_names_op(__rte_unused struct rte_eth_dev *eth_dev,
+	struct rte_eth_xstat_name *xstats_names,
+	__rte_unused unsigned int limit)
+{
+	const unsigned int stat_cnt = RTE_DIM(bnxt_rx_stats_strings) +
+				RTE_DIM(bnxt_tx_stats_strings);
+	unsigned int i, count;
+
+	if (xstats_names != NULL) {
+		count = 0;
+
+		for (i = 0; i < RTE_DIM(bnxt_rx_stats_strings); i++) {
+			snprintf(xstats_names[count].name,
+				sizeof(xstats_names[count].name),
+				"%s",
+				bnxt_rx_stats_strings[i].name);
+			count++;
+		}
+
+		for (i = 0; i < RTE_DIM(bnxt_tx_stats_strings); i++) {
+			snprintf(xstats_names[count].name,
+				sizeof(xstats_names[count].name),
+				"%s",
+				bnxt_tx_stats_strings[i].name);
+			count++;
+		}
+	}
+	return stat_cnt;
+}
+
+void bnxt_dev_xstats_reset_op(struct rte_eth_dev *eth_dev)
+{
+	struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private;
+
+	if (bp->flags & BNXT_FLAG_PORT_STATS && !BNXT_NPAR_PF(bp))
+		bnxt_hwrm_port_clr_stats(bp);
+
+	if (BNXT_VF(bp))
+		RTE_LOG(ERR, PMD, "Operation not supported on a VF device\n");
+	if (BNXT_NPAR_PF(bp))
+		RTE_LOG(ERR, PMD, "Operation not supported on a MF device\n");
+	if (!(bp->flags & BNXT_FLAG_PORT_STATS))
+		RTE_LOG(ERR, PMD, "Operation not supported\n");
+}
diff --git a/drivers/net/bnxt/bnxt_stats.h b/drivers/net/bnxt/bnxt_stats.h
index 65408a4..b6d133e 100644
--- a/drivers/net/bnxt/bnxt_stats.h
+++ b/drivers/net/bnxt/bnxt_stats.h
@@ -40,5 +40,15 @@ void bnxt_free_stats(struct bnxt *bp);
 void bnxt_stats_get_op(struct rte_eth_dev *eth_dev,
 			   struct rte_eth_stats *bnxt_stats);
 void bnxt_stats_reset_op(struct rte_eth_dev *eth_dev);
+int bnxt_dev_xstats_get_names_op(__rte_unused struct rte_eth_dev *eth_dev,
+	struct rte_eth_xstat_name *xstats_names,
+	__rte_unused unsigned int limit);
+int bnxt_dev_xstats_get_op(struct rte_eth_dev *eth_dev,
+			   struct rte_eth_xstat *xstats, unsigned int n);
+void bnxt_dev_xstats_reset_op(struct rte_eth_dev *eth_dev);
 
+struct bnxt_xstats_name_off {
+	char name[RTE_ETH_XSTATS_NAME_SIZE];
+	uint64_t offset;
+};
 #endif
-- 
2.10.1 (Apple Git-78)



More information about the dev mailing list