[dpdk-stable] patch 'net/mlx5: fix Rx queue count calculation' has been queued to LTS release 18.11.11

Kevin Traynor ktraynor at redhat.com
Thu Nov 5 13:39:58 CET 2020


Hi,

FYI, your patch has been queued to LTS release 18.11.11

Note it hasn't been pushed to http://dpdk.org/browse/dpdk-stable yet.
It will be pushed if I get no objections before 11/10/20. So please
shout if anyone has objections.

Also note that after the patch there's a diff of the upstream commit vs the
patch applied to the branch. This will indicate if there was any rebasing
needed to apply to the stable branch. If there were code changes for rebasing
(ie: not only metadata diffs), please double check that the rebase was
correctly done.

Queued patches are on a temporary branch at:
https://github.com/kevintraynor/dpdk-stable-queue

This queued commit can be viewed at:
https://github.com/kevintraynor/dpdk-stable-queue/commit/e0a4de61c71f57aa0d3de220aaa23bb3d092cf60

Thanks.

Kevin.

---
>From e0a4de61c71f57aa0d3de220aaa23bb3d092cf60 Mon Sep 17 00:00:00 2001
From: Alexander Kozyrev <akozyrev at nvidia.com>
Date: Tue, 29 Sep 2020 18:36:23 +0000
Subject: [PATCH] net/mlx5: fix Rx queue count calculation

[ upstream commit d2d57605522d4a43be17e22e649e54033f6d8835 ]

There are a few discrepancies in the Rx queue count calculation.

The wrong index is used to calculate the number of used descriptors
in an Rx queue in case of the compressed CQE processing. The global
CQ index is used while we really need an internal index in a single
compressed session to get the right number of elements processed.

The total number of CQs should be used instead of the number of mbufs
to find out about the maximum number of Rx descriptors. These numbers
are not equal for the Multi-Packet Rx queue.

Allow the Rx queue count calculation for all possible Rx bursts since
CQ handling is the same for regular, vectorized, and multi-packet Rx
queues.

Fixes: 26f04883441a ("net/mlx5: support Rx queue count API")

Signed-off-by: Alexander Kozyrev <akozyrev at nvidia.com>
Acked-by: Viacheslav Ovsiienko <viacheslavo at nvidia.com>
---
 drivers/net/mlx5/mlx5_rxtx.c | 27 ++++++++++++---------------
 1 file changed, 12 insertions(+), 15 deletions(-)

diff --git a/drivers/net/mlx5/mlx5_rxtx.c b/drivers/net/mlx5/mlx5_rxtx.c
index 38ce0e29a2..2896a9b4ef 100644
--- a/drivers/net/mlx5/mlx5_rxtx.c
+++ b/drivers/net/mlx5/mlx5_rxtx.c
@@ -431,17 +431,9 @@ rx_queue_count(struct mlx5_rxq_data *rxq)
 	struct rxq_zip *zip = &rxq->zip;
 	volatile struct mlx5_cqe *cqe;
+	unsigned int cq_ci = rxq->cq_ci;
 	const unsigned int cqe_n = (1 << rxq->cqe_n);
 	const unsigned int cqe_cnt = cqe_n - 1;
-	unsigned int cq_ci;
-	unsigned int used;
+	unsigned int used = 0;
 
-	/* if we are processing a compressed cqe */
-	if (zip->ai) {
-		used = zip->cqe_cnt - zip->ca;
-		cq_ci = zip->cq_ci;
-	} else {
-		used = 0;
-		cq_ci = rxq->cq_ci;
-	}
 	cqe = &(*rxq->cqes)[cq_ci & cqe_cnt];
 	while (check_cqe(cqe, cqe_n, cq_ci) == 0) {
@@ -451,5 +443,8 @@ rx_queue_count(struct mlx5_rxq_data *rxq)
 		op_own = cqe->op_own;
 		if (MLX5_CQE_FORMAT(op_own) == MLX5_COMPRESSED)
-			n = rte_be_to_cpu_32(cqe->byte_cnt);
+			if (unlikely(zip->ai))
+				n = zip->cqe_cnt - zip->ai;
+			else
+				n = rte_be_to_cpu_32(cqe->byte_cnt);
 		else
 			n = 1;
@@ -458,5 +453,5 @@ rx_queue_count(struct mlx5_rxq_data *rxq)
 		cqe = &(*rxq->cqes)[cq_ci & cqe_cnt];
 	}
-	used = RTE_MIN(used, (1U << rxq->elts_n) - 1);
+	used = RTE_MIN(used, cqe_n);
 	return used;
 }
@@ -481,9 +476,10 @@ mlx5_rx_descriptor_status(void *rx_queue, uint16_t offset)
 	struct rte_eth_dev *dev = ETH_DEV(rxq_ctrl->priv);
 
-	if (dev->rx_pkt_burst != mlx5_rx_burst) {
+	if (dev->rx_pkt_burst == NULL ||
+	    dev->rx_pkt_burst == removed_rx_burst) {
 		rte_errno = ENOTSUP;
 		return -rte_errno;
 	}
-	if (offset >= (1 << rxq->elts_n)) {
+	if (offset >= (1 << rxq->cqe_n)) {
 		rte_errno = EINVAL;
 		return -rte_errno;
@@ -513,5 +509,6 @@ mlx5_rx_queue_count(struct rte_eth_dev *dev, uint16_t rx_queue_id)
 	struct mlx5_rxq_data *rxq;
 
-	if (dev->rx_pkt_burst != mlx5_rx_burst) {
+	if (dev->rx_pkt_burst == NULL ||
+	    dev->rx_pkt_burst == removed_rx_burst) {
 		rte_errno = ENOTSUP;
 		return -rte_errno;
-- 
2.26.2

---
  Diff of the applied patch vs upstream commit (please double-check if non-empty:
---
--- -	2020-11-05 12:38:55.210327070 +0000
+++ 0051-net-mlx5-fix-Rx-queue-count-calculation.patch	2020-11-05 12:38:54.251896028 +0000
@@ -1 +1 @@
-From d2d57605522d4a43be17e22e649e54033f6d8835 Mon Sep 17 00:00:00 2001
+From e0a4de61c71f57aa0d3de220aaa23bb3d092cf60 Mon Sep 17 00:00:00 2001
@@ -5,0 +6,2 @@
+[ upstream commit d2d57605522d4a43be17e22e649e54033f6d8835 ]
+
@@ -22 +23,0 @@
-Cc: stable at dpdk.org
@@ -31 +32 @@
-index 0b87be15b4..2422a4d6ca 100644
+index 38ce0e29a2..2896a9b4ef 100644
@@ -34 +35 @@
-@@ -466,17 +466,9 @@ rx_queue_count(struct mlx5_rxq_data *rxq)
+@@ -431,17 +431,9 @@ rx_queue_count(struct mlx5_rxq_data *rxq)
@@ -53,2 +54,2 @@
- 	while (check_cqe(cqe, cqe_n, cq_ci) != MLX5_CQE_STATUS_HW_OWN) {
-@@ -486,5 +478,8 @@ rx_queue_count(struct mlx5_rxq_data *rxq)
+ 	while (check_cqe(cqe, cqe_n, cq_ci) == 0) {
+@@ -451,5 +443,8 @@ rx_queue_count(struct mlx5_rxq_data *rxq)
@@ -64 +65 @@
-@@ -493,5 +488,5 @@ rx_queue_count(struct mlx5_rxq_data *rxq)
+@@ -458,5 +453,5 @@ rx_queue_count(struct mlx5_rxq_data *rxq)
@@ -71 +72 @@
-@@ -516,9 +511,10 @@ mlx5_rx_descriptor_status(void *rx_queue, uint16_t offset)
+@@ -481,9 +476,10 @@ mlx5_rx_descriptor_status(void *rx_queue, uint16_t offset)
@@ -84 +85 @@
-@@ -631,5 +627,6 @@ mlx5_rx_queue_count(struct rte_eth_dev *dev, uint16_t rx_queue_id)
+@@ -513,5 +509,6 @@ mlx5_rx_queue_count(struct rte_eth_dev *dev, uint16_t rx_queue_id)



More information about the stable mailing list