patch 'net/vmxnet3: fix drop of empty segments in Tx' has been queued to stable release 22.11.3

Xueming Li xuemingl at nvidia.com
Sun Jun 25 08:34:13 CEST 2023


Hi,

FYI, your patch has been queued to stable release 22.11.3

Note it hasn't been pushed to http://dpdk.org/browse/dpdk-stable yet.
It will be pushed if I get no objections before 06/27/23. So please
shout if anyone has objections.

Also note that after the patch there's a diff of the upstream commit vs the
patch applied to the branch. This will indicate if there was any rebasing
needed to apply to the stable branch. If there were code changes for rebasing
(ie: not only metadata diffs), please double check that the rebase was
correctly done.

Queued patches are on a temporary branch at:
https://git.dpdk.org/dpdk-stable/log/?h=22.11-staging

This queued commit can be viewed at:
https://git.dpdk.org/dpdk-stable/commit/?h=22.11-staging&id=799cc0612c1273aadc989d786269ecbf10752c9a

Thanks.

Xueming Li <xuemingl at nvidia.com>

---
>From 799cc0612c1273aadc989d786269ecbf10752c9a Mon Sep 17 00:00:00 2001
From: Ronak Doshi <doshir at vmware.com>
Date: Mon, 8 May 2023 19:21:06 -0700
Subject: [PATCH] net/vmxnet3: fix drop of empty segments in Tx
Cc: Xueming Li <xuemingl at nvidia.com>

[ upstream commit b44f3e1381f3910be50d03c784f5c073c25245dd ]

When empty segments are dropped, some descriptor variable values are
updated in the segment processing loop before it is exited. This can
lead to a wedged queue where all subsequent packets are dropped for
this queue.

Also move the check for empty packet to catch the case of a zero
length packet with multiple segments.

Fixes: d863f19efa4f ("net/vmxnet3: skip empty segments in transmission")

Signed-off-by: Ronak Doshi <doshir at vmware.com>
Acked-by: Jochen Behrens <jbehrens at vmware.com>
---
 drivers/net/vmxnet3/vmxnet3_rxtx.c | 33 +++++++++++++++++-------------
 1 file changed, 19 insertions(+), 14 deletions(-)

diff --git a/drivers/net/vmxnet3/vmxnet3_rxtx.c b/drivers/net/vmxnet3/vmxnet3_rxtx.c
index a875ffec07..f4cade0954 100644
--- a/drivers/net/vmxnet3/vmxnet3_rxtx.c
+++ b/drivers/net/vmxnet3/vmxnet3_rxtx.c
@@ -412,8 +412,8 @@ vmxnet3_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
 
 	nb_tx = 0;
 	while (nb_tx < nb_pkts) {
-		Vmxnet3_GenericDesc *gdesc;
-		vmxnet3_buf_info_t *tbi;
+		Vmxnet3_GenericDesc *gdesc = NULL;
+		vmxnet3_buf_info_t *tbi = NULL;
 		uint32_t first2fill, avail, dw2;
 		struct rte_mbuf *txm = tx_pkts[nb_tx];
 		struct rte_mbuf *m_seg = txm;
@@ -457,18 +457,18 @@ vmxnet3_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
 			continue;
 		}
 
+		/* Skip empty packets */
+		if (unlikely(rte_pktmbuf_pkt_len(txm) == 0)) {
+			txq->stats.drop_total++;
+			rte_pktmbuf_free(txm);
+			nb_tx++;
+			continue;
+		}
+
 		if (txm->nb_segs == 1 &&
 		    rte_pktmbuf_pkt_len(txm) <= txq->txdata_desc_size) {
 			struct Vmxnet3_TxDataDesc *tdd;
 
-			/* Skip empty packets */
-			if (unlikely(rte_pktmbuf_pkt_len(txm) == 0)) {
-				txq->stats.drop_total++;
-				rte_pktmbuf_free(txm);
-				nb_tx++;
-				continue;
-			}
-
 			tdd = (struct Vmxnet3_TxDataDesc *)
 				((uint8 *)txq->data_ring.base +
 				 txq->cmd_ring.next2fill *
@@ -481,6 +481,10 @@ vmxnet3_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
 		dw2 = (txq->cmd_ring.gen ^ 0x1) << VMXNET3_TXD_GEN_SHIFT;
 		first2fill = txq->cmd_ring.next2fill;
 		do {
+			/* Skip empty segments */
+			if (unlikely(m_seg->data_len == 0))
+				continue;
+
 			/* Remember the transmit buffer for cleanup */
 			tbi = txq->cmd_ring.buf_info + txq->cmd_ring.next2fill;
 
@@ -490,10 +494,6 @@ vmxnet3_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
 			 */
 			gdesc = txq->cmd_ring.base + txq->cmd_ring.next2fill;
 
-			/* Skip empty segments */
-			if (unlikely(m_seg->data_len == 0))
-				continue;
-
 			if (copy_size) {
 				uint64 offset =
 					(uint64)txq->cmd_ring.next2fill *
@@ -514,6 +514,11 @@ vmxnet3_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
 			/* use the right gen for non-SOP desc */
 			dw2 = txq->cmd_ring.gen << VMXNET3_TXD_GEN_SHIFT;
 		} while ((m_seg = m_seg->next) != NULL);
+		/* We must have executed the complete preceding loop at least
+		 * once without skipping an empty segment, as we can't have
+		 * a packet with only empty segments.
+		 * Thus, tbi and gdesc have been initialized.
+		 */
 
 		/* set the last buf_info for the pkt */
 		tbi->m = txm;
-- 
2.25.1

---
  Diff of the applied patch vs upstream commit (please double-check if non-empty:
---
--- -	2023-06-25 14:31:59.466290900 +0800
+++ 0035-net-vmxnet3-fix-drop-of-empty-segments-in-Tx.patch	2023-06-25 14:31:58.325773900 +0800
@@ -1 +1 @@
-From b44f3e1381f3910be50d03c784f5c073c25245dd Mon Sep 17 00:00:00 2001
+From 799cc0612c1273aadc989d786269ecbf10752c9a Mon Sep 17 00:00:00 2001
@@ -4,0 +5,3 @@
+Cc: Xueming Li <xuemingl at nvidia.com>
+
+[ upstream commit b44f3e1381f3910be50d03c784f5c073c25245dd ]
@@ -15 +17,0 @@
-Cc: stable at dpdk.org
@@ -24 +26 @@
-index 39ad0726cb..148f65383e 100644
+index a875ffec07..f4cade0954 100644
@@ -27 +29 @@
-@@ -418,8 +418,8 @@ vmxnet3_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
+@@ -412,8 +412,8 @@ vmxnet3_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
@@ -38 +40 @@
-@@ -465,18 +465,18 @@ vmxnet3_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
+@@ -457,18 +457,18 @@ vmxnet3_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
@@ -65 +67 @@
-@@ -489,6 +489,10 @@ vmxnet3_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
+@@ -481,6 +481,10 @@ vmxnet3_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
@@ -76 +78 @@
-@@ -498,10 +502,6 @@ vmxnet3_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
+@@ -490,10 +494,6 @@ vmxnet3_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
@@ -87 +89 @@
-@@ -522,6 +522,11 @@ vmxnet3_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
+@@ -514,6 +514,11 @@ vmxnet3_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,


More information about the stable mailing list