[dpdk-stable] [PATCH] net/mlx5: fix Rx queues completion index consistency

Viacheslav Ovsiienko viacheslavo at nvidia.com
Fri Nov 6 18:16:10 CET 2020


The Rx queue completion consumer index got temporary
wrong value pointing to the midst of the compressed CQE
session. If application crashed at the moment the next
queue restart caused handling wrong CQEs pointed by index
and losing consuming index synchronization, that made
reliable queue restart impossible.

Fixes: 88c0733535d6 ("net/mlx5: extend Rx completion with error handling")
Cc: stable at dpdk.org

Signed-off-by: Viacheslav Ovsiienko <viacheslavo at nvidia.com>
---
 drivers/net/mlx5/mlx5_rxq.c  |  2 +-
 drivers/net/mlx5/mlx5_rxtx.c | 16 +++++++++++-----
 2 files changed, 12 insertions(+), 6 deletions(-)

diff --git a/drivers/net/mlx5/mlx5_rxq.c b/drivers/net/mlx5/mlx5_rxq.c
index d95a573..5a84214 100644
--- a/drivers/net/mlx5/mlx5_rxq.c
+++ b/drivers/net/mlx5/mlx5_rxq.c
@@ -622,7 +622,7 @@
 	rte_io_wmb();
 	*rxq->cq_db = rte_cpu_to_be_32(rxq->cq_ci);
 	rte_io_wmb();
-	/* Reset RQ consumer before moving queue ro READY state. */
+	/* Reset RQ consumer before moving queue to READY state. */
 	*rxq->rq_db = rte_cpu_to_be_32(0);
 	rte_io_wmb();
 	ret = priv->obj_ops.rxq_obj_modify(rxq_ctrl->obj, MLX5_RXQ_MOD_RST2RDY);
diff --git a/drivers/net/mlx5/mlx5_rxtx.c b/drivers/net/mlx5/mlx5_rxtx.c
index 402e7d1..a5829f0 100644
--- a/drivers/net/mlx5/mlx5_rxtx.c
+++ b/drivers/net/mlx5/mlx5_rxtx.c
@@ -1181,6 +1181,7 @@ enum mlx5_txcmp_code {
 		} else {
 			int ret;
 			int8_t op_own;
+			uint32_t cq_ci;
 
 			ret = check_cqe(cqe, cqe_n, rxq->cq_ci);
 			if (unlikely(ret != MLX5_CQE_STATUS_SW_OWN)) {
@@ -1194,14 +1195,19 @@ enum mlx5_txcmp_code {
 					return 0;
 				}
 			}
-			++rxq->cq_ci;
+			/*
+			 * Introduce the local variable to have queue cq_ci
+			 * index in queue structure always consistent with
+			 * actual CQE boundary (not pointing to the middle
+			 * of compressed CQE session).
+			 */
+			cq_ci = rxq->cq_ci + 1;
 			op_own = cqe->op_own;
 			if (MLX5_CQE_FORMAT(op_own) == MLX5_COMPRESSED) {
 				volatile struct mlx5_mini_cqe8 (*mc)[8] =
 					(volatile struct mlx5_mini_cqe8 (*)[8])
 					(uintptr_t)(&(*rxq->cqes)
-						[rxq->cq_ci &
-						 cqe_cnt].pkt_info);
+						[cq_ci & cqe_cnt].pkt_info);
 
 				/* Fix endianness. */
 				zip->cqe_cnt = rte_be_to_cpu_32(cqe->byte_cnt);
@@ -1214,10 +1220,9 @@ enum mlx5_txcmp_code {
 				 * 7 CQEs after the initial CQE instead of 8
 				 * for subsequent ones.
 				 */
-				zip->ca = rxq->cq_ci;
+				zip->ca = cq_ci;
 				zip->na = zip->ca + 7;
 				/* Compute the next non compressed CQE. */
-				--rxq->cq_ci;
 				zip->cq_ci = rxq->cq_ci + zip->cqe_cnt;
 				/* Get packet size to return. */
 				len = rte_be_to_cpu_32((*mc)[0].byte_cnt &
@@ -1233,6 +1238,7 @@ enum mlx5_txcmp_code {
 					++idx;
 				}
 			} else {
+				rxq->cq_ci = cq_ci;
 				len = rte_be_to_cpu_32(cqe->byte_cnt);
 			}
 		}
-- 
1.8.3.1



More information about the stable mailing list