patch 'net/mana: avoid unnecessary assignments in data path' has been queued to stable release 22.11.3

Xueming Li xuemingl at nvidia.com
Sun Jun 25 08:34:08 CEST 2023


Hi,

FYI, your patch has been queued to stable release 22.11.3

Note it hasn't been pushed to http://dpdk.org/browse/dpdk-stable yet.
It will be pushed if I get no objections before 06/27/23. So please
shout if anyone has objections.

Also note that after the patch there's a diff of the upstream commit vs the
patch applied to the branch. This will indicate if there was any rebasing
needed to apply to the stable branch. If there were code changes for rebasing
(ie: not only metadata diffs), please double check that the rebase was
correctly done.

Queued patches are on a temporary branch at:
https://git.dpdk.org/dpdk-stable/log/?h=22.11-staging

This queued commit can be viewed at:
https://git.dpdk.org/dpdk-stable/commit/?h=22.11-staging&id=5b1a78987f11be0d44d658feac20e72d5b517869

Thanks.

Xueming Li <xuemingl at nvidia.com>

---
>From 5b1a78987f11be0d44d658feac20e72d5b517869 Mon Sep 17 00:00:00 2001
From: Long Li <longli at microsoft.com>
Date: Fri, 17 Mar 2023 16:32:43 -0700
Subject: [PATCH] net/mana: avoid unnecessary assignments in data path
Cc: Xueming Li <xuemingl at nvidia.com>

[ upstream commit b5dfcaece13add5d874d805197856c8b8aa643ab ]

Unnecessary assignments involve memset and waste CPU cycles.
Removing them to reduce CPU usage.

Fixes: 517ed6e2d590 ("net/mana: add basic driver with build environment")

Signed-off-by: Long Li <longli at microsoft.com>
---
 drivers/net/mana/gdma.c | 11 ++---------
 drivers/net/mana/mana.h |  2 +-
 drivers/net/mana/rx.c   |  9 ++++-----
 drivers/net/mana/tx.c   | 17 ++++++++++-------
 4 files changed, 17 insertions(+), 22 deletions(-)

diff --git a/drivers/net/mana/gdma.c b/drivers/net/mana/gdma.c
index f637084137..7d5bb08927 100644
--- a/drivers/net/mana/gdma.c
+++ b/drivers/net/mana/gdma.c
@@ -123,7 +123,7 @@ write_scatter_gather_list(uint8_t *work_queue_head_pointer,
 int
 gdma_post_work_request(struct mana_gdma_queue *queue,
 		       struct gdma_work_request *work_req,
-		       struct gdma_posted_wqe_info *wqe_info)
+		       uint32_t *wqe_size_in_bu)
 {
 	uint32_t client_oob_size =
 		work_req->inline_oob_size_in_bytes >
@@ -149,14 +149,7 @@ gdma_post_work_request(struct mana_gdma_queue *queue,
 	DP_LOG(DEBUG, "client_oob_size %u sgl_data_size %u wqe_size %u",
 	       client_oob_size, sgl_data_size, wqe_size);
 
-	if (wqe_info) {
-		wqe_info->wqe_index =
-			((queue->head * GDMA_WQE_ALIGNMENT_UNIT_SIZE) &
-			 (queue->size - 1)) / GDMA_WQE_ALIGNMENT_UNIT_SIZE;
-		wqe_info->unmasked_queue_offset = queue->head;
-		wqe_info->wqe_size_in_bu =
-			wqe_size / GDMA_WQE_ALIGNMENT_UNIT_SIZE;
-	}
+	*wqe_size_in_bu = wqe_size / GDMA_WQE_ALIGNMENT_UNIT_SIZE;
 
 	wq_buffer_pointer = gdma_get_wqe_pointer(queue);
 	wq_buffer_pointer += write_dma_client_oob(wq_buffer_pointer, work_req,
diff --git a/drivers/net/mana/mana.h b/drivers/net/mana/mana.h
index 7b8c27df2a..ce16e7efff 100644
--- a/drivers/net/mana/mana.h
+++ b/drivers/net/mana/mana.h
@@ -462,7 +462,7 @@ int mana_rq_ring_doorbell(struct mana_rxq *rxq, uint8_t arm);
 
 int gdma_post_work_request(struct mana_gdma_queue *queue,
 			   struct gdma_work_request *work_req,
-			   struct gdma_posted_wqe_info *wqe_info);
+			   uint32_t *wqe_size_in_bu);
 uint8_t *gdma_get_wqe_pointer(struct mana_gdma_queue *queue);
 
 uint16_t mana_rx_burst(void *dpdk_rxq, struct rte_mbuf **rx_pkts,
diff --git a/drivers/net/mana/rx.c b/drivers/net/mana/rx.c
index 10392ae292..afd153424b 100644
--- a/drivers/net/mana/rx.c
+++ b/drivers/net/mana/rx.c
@@ -52,8 +52,8 @@ mana_alloc_and_post_rx_wqe(struct mana_rxq *rxq)
 {
 	struct rte_mbuf *mbuf = NULL;
 	struct gdma_sgl_element sgl[1];
-	struct gdma_work_request request = {0};
-	struct gdma_posted_wqe_info wqe_info = {0};
+	struct gdma_work_request request;
+	uint32_t wqe_size_in_bu;
 	struct mana_priv *priv = rxq->priv;
 	int ret;
 	struct mana_mr_cache *mr;
@@ -72,7 +72,6 @@ mana_alloc_and_post_rx_wqe(struct mana_rxq *rxq)
 	}
 
 	request.gdma_header.struct_size = sizeof(request);
-	wqe_info.gdma_header.struct_size = sizeof(wqe_info);
 
 	sgl[0].address = rte_cpu_to_le_64(rte_pktmbuf_mtod(mbuf, uint64_t));
 	sgl[0].memory_key = mr->lkey;
@@ -87,14 +86,14 @@ mana_alloc_and_post_rx_wqe(struct mana_rxq *rxq)
 	request.flags = 0;
 	request.client_data_unit = NOT_USING_CLIENT_DATA_UNIT;
 
-	ret = gdma_post_work_request(&rxq->gdma_rq, &request, &wqe_info);
+	ret = gdma_post_work_request(&rxq->gdma_rq, &request, &wqe_size_in_bu);
 	if (!ret) {
 		struct mana_rxq_desc *desc =
 			&rxq->desc_ring[rxq->desc_ring_head];
 
 		/* update queue for tracking pending packets */
 		desc->pkt = mbuf;
-		desc->wqe_size_in_bu = wqe_info.wqe_size_in_bu;
+		desc->wqe_size_in_bu = wqe_size_in_bu;
 		rxq->desc_ring_head = (rxq->desc_ring_head + 1) % rxq->num_desc;
 	} else {
 		DP_LOG(DEBUG, "failed to post recv ret %d", ret);
diff --git a/drivers/net/mana/tx.c b/drivers/net/mana/tx.c
index a45b5e289c..b593f98bb1 100644
--- a/drivers/net/mana/tx.c
+++ b/drivers/net/mana/tx.c
@@ -208,8 +208,8 @@ mana_tx_burst(void *dpdk_txq, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
 	for (uint16_t pkt_idx = 0; pkt_idx < nb_pkts; pkt_idx++) {
 		struct rte_mbuf *m_pkt = tx_pkts[pkt_idx];
 		struct rte_mbuf *m_seg = m_pkt;
-		struct transmit_oob_v2 tx_oob = {0};
-		struct one_sgl sgl = {0};
+		struct transmit_oob_v2 tx_oob;
+		struct one_sgl sgl;
 		uint16_t seg_idx;
 
 		/* Drop the packet if it exceeds max segments */
@@ -263,6 +263,8 @@ mana_tx_burst(void *dpdk_txq, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
 			tx_oob.short_oob.tx_compute_TCP_checksum = 1;
 			tx_oob.short_oob.tx_transport_header_offset =
 				m_pkt->l2_len + m_pkt->l3_len;
+		} else {
+			tx_oob.short_oob.tx_compute_TCP_checksum = 0;
 		}
 
 		if ((m_pkt->ol_flags & RTE_MBUF_F_TX_L4_MASK) ==
@@ -301,6 +303,8 @@ mana_tx_burst(void *dpdk_txq, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
 			}
 
 			tx_oob.short_oob.tx_compute_UDP_checksum = 1;
+		} else {
+			tx_oob.short_oob.tx_compute_UDP_checksum = 0;
 		}
 
 		tx_oob.short_oob.suppress_tx_CQE_generation = 0;
@@ -355,11 +359,10 @@ mana_tx_burst(void *dpdk_txq, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
 		if (seg_idx != m_pkt->nb_segs)
 			continue;
 
-		struct gdma_work_request work_req = {0};
-		struct gdma_posted_wqe_info wqe_info = {0};
+		struct gdma_work_request work_req;
+		uint32_t wqe_size_in_bu;
 
 		work_req.gdma_header.struct_size = sizeof(work_req);
-		wqe_info.gdma_header.struct_size = sizeof(wqe_info);
 
 		work_req.sgl = sgl.gdma_sgl;
 		work_req.num_sgl_elements = m_pkt->nb_segs;
@@ -370,14 +373,14 @@ mana_tx_burst(void *dpdk_txq, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
 		work_req.client_data_unit = NOT_USING_CLIENT_DATA_UNIT;
 
 		ret = gdma_post_work_request(&txq->gdma_sq, &work_req,
-					     &wqe_info);
+					     &wqe_size_in_bu);
 		if (!ret) {
 			struct mana_txq_desc *desc =
 				&txq->desc_ring[txq->desc_ring_head];
 
 			/* Update queue for tracking pending requests */
 			desc->pkt = m_pkt;
-			desc->wqe_size_in_bu = wqe_info.wqe_size_in_bu;
+			desc->wqe_size_in_bu = wqe_size_in_bu;
 			txq->desc_ring_head =
 				(txq->desc_ring_head + 1) % txq->num_desc;
 
-- 
2.25.1

---
  Diff of the applied patch vs upstream commit (please double-check if non-empty:
---
--- -	2023-06-25 14:31:59.337143500 +0800
+++ 0030-net-mana-avoid-unnecessary-assignments-in-data-path.patch	2023-06-25 14:31:58.315773900 +0800
@@ -1 +1 @@
-From b5dfcaece13add5d874d805197856c8b8aa643ab Mon Sep 17 00:00:00 2001
+From 5b1a78987f11be0d44d658feac20e72d5b517869 Mon Sep 17 00:00:00 2001
@@ -4,0 +5,3 @@
+Cc: Xueming Li <xuemingl at nvidia.com>
+
+[ upstream commit b5dfcaece13add5d874d805197856c8b8aa643ab ]
@@ -10 +12,0 @@
-Cc: stable at dpdk.org


More information about the stable mailing list