[dpdk-stable] patch 'net/virtio: fix vectorized Rx queue rearm' has been queued to stable release 19.11.9

Christian Ehrhardt christian.ehrhardt at canonical.com
Mon May 17 18:21:06 CEST 2021


Hi,

FYI, your patch has been queued to stable release 19.11.9

Note it hasn't been pushed to http://dpdk.org/browse/dpdk-stable yet.
It will be pushed if I get no objections before 05/19/21. So please
shout if anyone has objections.

Also note that after the patch there's a diff of the upstream commit vs the
patch applied to the branch. This will indicate if there was any rebasing
needed to apply to the stable branch. If there were code changes for rebasing
(ie: not only metadata diffs), please double check that the rebase was
correctly done.

Queued patches are on a temporary branch at:
https://github.com/cpaelzer/dpdk-stable-queue

This queued commit can be viewed at:
https://github.com/cpaelzer/dpdk-stable-queue/commit/a40eb7c57dc15bac2d74d3d4e2c13d6e8578a85a

Thanks.

Christian Ehrhardt <christian.ehrhardt at canonical.com>

---
>From a40eb7c57dc15bac2d74d3d4e2c13d6e8578a85a Mon Sep 17 00:00:00 2001
From: Xueming Li <xuemingl at nvidia.com>
Date: Wed, 14 Apr 2021 22:14:04 +0800
Subject: [PATCH] net/virtio: fix vectorized Rx queue rearm

[ upstream commit d99088431ea5d6bb568c5cb5542688453084dcc6 ]

When Rx queue worked in vectorized mode and rxd <= 512, under traffic of
high PPS rate, testpmd often start and receive packets of rxd without
further growth.

Testpmd started with rxq flush which tried to rx MAX_PKT_BURST(512)
packets and drop. When Rx burst size >= Rx queue size, all descriptors
in used queue consumed without rearm, device can't receive more packets.
The next Rx burst returned at once since no used descriptors found,
rearm logic was skipped, rx vq kept in starving state.

To avoid rx vq starving, this patch always check the available queue,
rearm if needed even no used descriptor reported by device.

Fixes: fc3d66212fed ("virtio: add vector Rx")
Fixes: 2d7c37194ee4 ("net/virtio: add NEON based Rx handler")
Fixes: 52b5a707e6ca ("net/virtio: add Altivec Rx")

Signed-off-by: Xueming Li <xuemingl at nvidia.com>
Reviewed-by: David Christensen <drc at linux.vnet.ibm.com>
Reviewed-by: Maxime Coquelin <maxime.coquelin at redhat.com>
---
 drivers/net/virtio/virtio_rxtx_simple_altivec.c | 12 ++++++------
 drivers/net/virtio/virtio_rxtx_simple_neon.c    | 12 ++++++------
 drivers/net/virtio/virtio_rxtx_simple_sse.c     | 12 ++++++------
 3 files changed, 18 insertions(+), 18 deletions(-)

diff --git a/drivers/net/virtio/virtio_rxtx_simple_altivec.c b/drivers/net/virtio/virtio_rxtx_simple_altivec.c
index 003b6ec3f6..19dc37e774 100644
--- a/drivers/net/virtio/virtio_rxtx_simple_altivec.c
+++ b/drivers/net/virtio/virtio_rxtx_simple_altivec.c
@@ -85,6 +85,12 @@ virtio_recv_pkts_vec(void *rx_queue, struct rte_mbuf **rx_pkts,
 	if (unlikely(nb_pkts < RTE_VIRTIO_DESC_PER_LOOP))
 		return 0;
 
+	if (vq->vq_free_cnt >= RTE_VIRTIO_VPMD_RX_REARM_THRESH) {
+		virtio_rxq_rearm_vec(rxvq);
+		if (unlikely(virtqueue_kick_prepare(vq)))
+			virtqueue_notify(vq);
+	}
+
 	nb_used = VIRTQUEUE_NUSED(vq);
 
 	rte_compiler_barrier();
@@ -102,12 +108,6 @@ virtio_recv_pkts_vec(void *rx_queue, struct rte_mbuf **rx_pkts,
 
 	rte_prefetch0(rused);
 
-	if (vq->vq_free_cnt >= RTE_VIRTIO_VPMD_RX_REARM_THRESH) {
-		virtio_rxq_rearm_vec(rxvq);
-		if (unlikely(virtqueue_kick_prepare(vq)))
-			virtqueue_notify(vq);
-	}
-
 	nb_total = nb_used;
 	ref_rx_pkts = rx_pkts;
 	for (nb_pkts_received = 0;
diff --git a/drivers/net/virtio/virtio_rxtx_simple_neon.c b/drivers/net/virtio/virtio_rxtx_simple_neon.c
index 992e71f010..588a3af2a1 100644
--- a/drivers/net/virtio/virtio_rxtx_simple_neon.c
+++ b/drivers/net/virtio/virtio_rxtx_simple_neon.c
@@ -83,6 +83,12 @@ virtio_recv_pkts_vec(void *rx_queue, struct rte_mbuf **rx_pkts,
 	if (unlikely(nb_pkts < RTE_VIRTIO_DESC_PER_LOOP))
 		return 0;
 
+	if (vq->vq_free_cnt >= RTE_VIRTIO_VPMD_RX_REARM_THRESH) {
+		virtio_rxq_rearm_vec(rxvq);
+		if (unlikely(virtqueue_kick_prepare(vq)))
+			virtqueue_notify(vq);
+	}
+
 	nb_used = VIRTQUEUE_NUSED(vq);
 
 	rte_rmb();
@@ -100,12 +106,6 @@ virtio_recv_pkts_vec(void *rx_queue, struct rte_mbuf **rx_pkts,
 
 	rte_prefetch_non_temporal(rused);
 
-	if (vq->vq_free_cnt >= RTE_VIRTIO_VPMD_RX_REARM_THRESH) {
-		virtio_rxq_rearm_vec(rxvq);
-		if (unlikely(virtqueue_kick_prepare(vq)))
-			virtqueue_notify(vq);
-	}
-
 	nb_total = nb_used;
 	ref_rx_pkts = rx_pkts;
 	for (nb_pkts_received = 0;
diff --git a/drivers/net/virtio/virtio_rxtx_simple_sse.c b/drivers/net/virtio/virtio_rxtx_simple_sse.c
index f9ec4ae699..9a45886820 100644
--- a/drivers/net/virtio/virtio_rxtx_simple_sse.c
+++ b/drivers/net/virtio/virtio_rxtx_simple_sse.c
@@ -85,6 +85,12 @@ virtio_recv_pkts_vec(void *rx_queue, struct rte_mbuf **rx_pkts,
 	if (unlikely(nb_pkts < RTE_VIRTIO_DESC_PER_LOOP))
 		return 0;
 
+	if (vq->vq_free_cnt >= RTE_VIRTIO_VPMD_RX_REARM_THRESH) {
+		virtio_rxq_rearm_vec(rxvq);
+		if (unlikely(virtqueue_kick_prepare(vq)))
+			virtqueue_notify(vq);
+	}
+
 	nb_used = VIRTQUEUE_NUSED(vq);
 
 	rte_compiler_barrier();
@@ -102,12 +108,6 @@ virtio_recv_pkts_vec(void *rx_queue, struct rte_mbuf **rx_pkts,
 
 	rte_prefetch0(rused);
 
-	if (vq->vq_free_cnt >= RTE_VIRTIO_VPMD_RX_REARM_THRESH) {
-		virtio_rxq_rearm_vec(rxvq);
-		if (unlikely(virtqueue_kick_prepare(vq)))
-			virtqueue_notify(vq);
-	}
-
 	nb_total = nb_used;
 	ref_rx_pkts = rx_pkts;
 	for (nb_pkts_received = 0;
-- 
2.31.1

---
  Diff of the applied patch vs upstream commit (please double-check if non-empty:
---
--- -	2021-05-17 17:40:36.868113942 +0200
+++ 0191-net-virtio-fix-vectorized-Rx-queue-rearm.patch	2021-05-17 17:40:29.551812470 +0200
@@ -1 +1 @@
-From d99088431ea5d6bb568c5cb5542688453084dcc6 Mon Sep 17 00:00:00 2001
+From a40eb7c57dc15bac2d74d3d4e2c13d6e8578a85a Mon Sep 17 00:00:00 2001
@@ -5,0 +6,2 @@
+[ upstream commit d99088431ea5d6bb568c5cb5542688453084dcc6 ]
+
@@ -22 +23,0 @@
-Cc: stable at dpdk.org
@@ -34 +35 @@
-index 62e5100a48..7534974ef4 100644
+index 003b6ec3f6..19dc37e774 100644
@@ -47 +48 @@
- 	nb_used = virtqueue_nused(vq);
+ 	nb_used = VIRTQUEUE_NUSED(vq);
@@ -64 +65 @@
-index c8e4b13a02..7fd92d1b0c 100644
+index 992e71f010..588a3af2a1 100644
@@ -67 +68 @@
-@@ -84,6 +84,12 @@ virtio_recv_pkts_vec(void *rx_queue,
+@@ -83,6 +83,12 @@ virtio_recv_pkts_vec(void *rx_queue, struct rte_mbuf **rx_pkts,
@@ -77,2 +78 @@
- 	/* virtqueue_nused has a load-acquire or rte_io_rmb inside */
- 	nb_used = virtqueue_nused(vq);
+ 	nb_used = VIRTQUEUE_NUSED(vq);
@@ -80 +80,2 @@
-@@ -100,12 +106,6 @@ virtio_recv_pkts_vec(void *rx_queue,
+ 	rte_rmb();
+@@ -100,12 +106,6 @@ virtio_recv_pkts_vec(void *rx_queue, struct rte_mbuf **rx_pkts,
@@ -94 +95 @@
-index ff4eba33d6..7577f5e86d 100644
+index f9ec4ae699..9a45886820 100644
@@ -107 +108 @@
- 	nb_used = virtqueue_nused(vq);
+ 	nb_used = VIRTQUEUE_NUSED(vq);
@@ -109,2 +110,2 @@
- 	if (unlikely(nb_used == 0))
-@@ -100,12 +106,6 @@ virtio_recv_pkts_vec(void *rx_queue, struct rte_mbuf **rx_pkts,
+ 	rte_compiler_barrier();
+@@ -102,12 +108,6 @@ virtio_recv_pkts_vec(void *rx_queue, struct rte_mbuf **rx_pkts,


More information about the stable mailing list