[dpdk-stable] patch 'net/mlx5: add missing sanity checks for Tx completion queue' has been queued to LTS release 17.11.4

Yongseok Koh yskoh at mellanox.com
Fri Jul 27 04:32:04 CEST 2018


Hi,

FYI, your patch has been queued to LTS release 17.11.4

Note it hasn't been pushed to http://dpdk.org/browse/dpdk-stable yet.
It will be pushed if I get no objections before 07/28/18. So please
shout if anyone has objections.

Thanks.

Yongseok

---
>From bcbe141318c63055bf3f87df0965ae84d01c0d94 Mon Sep 17 00:00:00 2001
From: Yongseok Koh <yskoh at mellanox.com>
Date: Tue, 26 Dec 2017 19:55:45 -0800
Subject: [PATCH] net/mlx5: add missing sanity checks for Tx completion queue

[ upstream commit 2eefbec531c7abf4c9c668f7941b285858c06733 ]

Signed-off-by: Yongseok Koh <yskoh at mellanox.com>
---
 drivers/net/mlx5/mlx5_rxtx.c          | 19 ++++++++++++++++++-
 drivers/net/mlx5/mlx5_rxtx.h          |  2 ++
 drivers/net/mlx5/mlx5_rxtx_vec_neon.h |  8 ++++++++
 drivers/net/mlx5/mlx5_rxtx_vec_sse.h  |  8 ++++++++
 drivers/net/mlx5/mlx5_txq.c           |  2 ++
 5 files changed, 38 insertions(+), 1 deletion(-)

diff --git a/drivers/net/mlx5/mlx5_rxtx.c b/drivers/net/mlx5/mlx5_rxtx.c
index 2e003aeac..38f036c4b 100644
--- a/drivers/net/mlx5/mlx5_rxtx.c
+++ b/drivers/net/mlx5/mlx5_rxtx.c
@@ -401,6 +401,8 @@ mlx5_tx_burst(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n)
 	/* Start processing. */
 	mlx5_tx_complete(txq);
 	max_elts = (elts_n - (elts_head - txq->elts_tail));
+	/* A CQE slot must always be available. */
+	assert((1u << txq->cqe_n) - (txq->cq_pi - txq->cq_ci));
 	max_wqe = (1u << txq->wqe_n) - (txq->wqe_ci - txq->wqe_pi);
 	if (unlikely(!max_wqe))
 		return 0;
@@ -745,6 +747,9 @@ next_wqe:
 		/* Save elts_head in unused "immediate" field of WQE. */
 		last_wqe->ctrl3 = txq->elts_head;
 		txq->elts_comp = 0;
+#ifndef NDEBUG
+		++txq->cq_pi;
+#endif
 	} else {
 		txq->elts_comp = comp;
 	}
@@ -863,6 +868,8 @@ mlx5_tx_burst_mpw(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n)
 	/* Start processing. */
 	mlx5_tx_complete(txq);
 	max_elts = (elts_n - (elts_head - txq->elts_tail));
+	/* A CQE slot must always be available. */
+	assert((1u << txq->cqe_n) - (txq->cq_pi - txq->cq_ci));
 	max_wqe = (1u << txq->wqe_n) - (txq->wqe_ci - txq->wqe_pi);
 	if (unlikely(!max_wqe))
 		return 0;
@@ -956,6 +963,9 @@ mlx5_tx_burst_mpw(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n)
 		/* Save elts_head in unused "immediate" field of WQE. */
 		wqe->ctrl[3] = elts_head;
 		txq->elts_comp = 0;
+#ifndef NDEBUG
+		++txq->cq_pi;
+#endif
 	} else {
 		txq->elts_comp = comp;
 	}
@@ -1087,6 +1097,8 @@ mlx5_tx_burst_mpw_inline(void *dpdk_txq, struct rte_mbuf **pkts,
 	/* Start processing. */
 	mlx5_tx_complete(txq);
 	max_elts = (elts_n - (elts_head - txq->elts_tail));
+	/* A CQE slot must always be available. */
+	assert((1u << txq->cqe_n) - (txq->cq_pi - txq->cq_ci));
 	do {
 		struct rte_mbuf *buf = *(pkts++);
 		uintptr_t addr;
@@ -1248,6 +1260,9 @@ mlx5_tx_burst_mpw_inline(void *dpdk_txq, struct rte_mbuf **pkts,
 		/* Save elts_head in unused "immediate" field of WQE. */
 		wqe->ctrl[3] = elts_head;
 		txq->elts_comp = 0;
+#ifndef NDEBUG
+		++txq->cq_pi;
+#endif
 	} else {
 		txq->elts_comp = comp;
 	}
@@ -1590,7 +1605,9 @@ mlx5_tx_burst_empw(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n)
 		wqe->ctrl[3] = elts_head;
 		txq->elts_comp = 0;
 		txq->mpw_comp = txq->wqe_ci;
-		txq->cq_pi++;
+#ifndef NDEBUG
+		++txq->cq_pi;
+#endif
 	} else {
 		txq->elts_comp += j;
 	}
diff --git a/drivers/net/mlx5/mlx5_rxtx.h b/drivers/net/mlx5/mlx5_rxtx.h
index 29019f792..dac3b39ff 100644
--- a/drivers/net/mlx5/mlx5_rxtx.h
+++ b/drivers/net/mlx5/mlx5_rxtx.h
@@ -185,7 +185,9 @@ struct mlx5_txq_data {
 	uint16_t elts_comp; /* Counter since last completion request. */
 	uint16_t mpw_comp; /* WQ index since last completion request. */
 	uint16_t cq_ci; /* Consumer index for completion queue. */
+#ifndef NDEBUG
 	uint16_t cq_pi; /* Producer index for completion queue. */
+#endif
 	uint16_t wqe_ci; /* Consumer index for work queue. */
 	uint16_t wqe_pi; /* Producer index for work queue. */
 	uint16_t elts_n:4; /* (*elts)[] length (in log2). */
diff --git a/drivers/net/mlx5/mlx5_rxtx_vec_neon.h b/drivers/net/mlx5/mlx5_rxtx_vec_neon.h
index 1604d0430..32d1e6de9 100644
--- a/drivers/net/mlx5/mlx5_rxtx_vec_neon.h
+++ b/drivers/net/mlx5/mlx5_rxtx_vec_neon.h
@@ -135,6 +135,8 @@ txq_scatter_v(struct mlx5_txq_data *txq, struct rte_mbuf **pkts,
 
 	assert(elts_n > pkts_n);
 	mlx5_tx_complete(txq);
+	/* A CQE slot must always be available. */
+	assert((1u << txq->cqe_n) - (txq->cq_pi - txq->cq_ci));
 	if (unlikely(!pkts_n))
 		return 0;
 	for (n = 0; n < pkts_n; ++n) {
@@ -205,7 +207,9 @@ txq_scatter_v(struct mlx5_txq_data *txq, struct rte_mbuf **pkts,
 		wqe->ctrl[2] = rte_cpu_to_be_32(8);
 		wqe->ctrl[3] = txq->elts_head;
 		txq->elts_comp = 0;
+#ifndef NDEBUG
 		++txq->cq_pi;
+#endif
 	}
 #ifdef MLX5_PMD_SOFT_COUNTERS
 	txq->stats.opackets += n;
@@ -269,6 +273,8 @@ txq_burst_v(struct mlx5_txq_data *txq, struct rte_mbuf **pkts, uint16_t pkts_n,
 	assert(elts_n > pkts_n);
 	mlx5_tx_complete(txq);
 	max_elts = (elts_n - (elts_head - txq->elts_tail));
+	/* A CQE slot must always be available. */
+	assert((1u << txq->cqe_n) - (txq->cq_pi - txq->cq_ci));
 	max_wqe = (1u << txq->wqe_n) - (txq->wqe_ci - txq->wqe_pi);
 	pkts_n = RTE_MIN((unsigned int)RTE_MIN(pkts_n, max_wqe), max_elts);
 	if (unlikely(!pkts_n))
@@ -306,7 +312,9 @@ txq_burst_v(struct mlx5_txq_data *txq, struct rte_mbuf **pkts, uint16_t pkts_n,
 	} else {
 		/* Request a completion. */
 		txq->elts_comp = 0;
+#ifndef NDEBUG
 		++txq->cq_pi;
+#endif
 		comp_req = 8;
 	}
 	/* Fill CTRL in the header. */
diff --git a/drivers/net/mlx5/mlx5_rxtx_vec_sse.h b/drivers/net/mlx5/mlx5_rxtx_vec_sse.h
index 01a93a4b4..1a56fca9f 100644
--- a/drivers/net/mlx5/mlx5_rxtx_vec_sse.h
+++ b/drivers/net/mlx5/mlx5_rxtx_vec_sse.h
@@ -135,6 +135,8 @@ txq_scatter_v(struct mlx5_txq_data *txq, struct rte_mbuf **pkts,
 
 	assert(elts_n > pkts_n);
 	mlx5_tx_complete(txq);
+	/* A CQE slot must always be available. */
+	assert((1u << txq->cqe_n) - (txq->cq_pi - txq->cq_ci));
 	if (unlikely(!pkts_n))
 		return 0;
 	for (n = 0; n < pkts_n; ++n) {
@@ -206,7 +208,9 @@ txq_scatter_v(struct mlx5_txq_data *txq, struct rte_mbuf **pkts,
 		wqe->ctrl[2] = rte_cpu_to_be_32(8);
 		wqe->ctrl[3] = txq->elts_head;
 		txq->elts_comp = 0;
+#ifndef NDEBUG
 		++txq->cq_pi;
+#endif
 	}
 #ifdef MLX5_PMD_SOFT_COUNTERS
 	txq->stats.opackets += n;
@@ -268,6 +272,8 @@ txq_burst_v(struct mlx5_txq_data *txq, struct rte_mbuf **pkts, uint16_t pkts_n,
 	assert(elts_n > pkts_n);
 	mlx5_tx_complete(txq);
 	max_elts = (elts_n - (elts_head - txq->elts_tail));
+	/* A CQE slot must always be available. */
+	assert((1u << txq->cqe_n) - (txq->cq_pi - txq->cq_ci));
 	max_wqe = (1u << txq->wqe_n) - (txq->wqe_ci - txq->wqe_pi);
 	pkts_n = RTE_MIN((unsigned int)RTE_MIN(pkts_n, max_wqe), max_elts);
 	assert(pkts_n <= MLX5_DSEG_MAX - nb_dword_in_hdr);
@@ -307,7 +313,9 @@ txq_burst_v(struct mlx5_txq_data *txq, struct rte_mbuf **pkts, uint16_t pkts_n,
 	} else {
 		/* Request a completion. */
 		txq->elts_comp = 0;
+#ifndef NDEBUG
 		++txq->cq_pi;
+#endif
 		comp_req = 8;
 	}
 	/* Fill CTRL in the header. */
diff --git a/drivers/net/mlx5/mlx5_txq.c b/drivers/net/mlx5/mlx5_txq.c
index a5c6b5851..760ac92d4 100644
--- a/drivers/net/mlx5/mlx5_txq.c
+++ b/drivers/net/mlx5/mlx5_txq.c
@@ -467,7 +467,9 @@ mlx5_txq_ibv_new(struct rte_eth_dev *dev, uint16_t idx)
 		(volatile struct mlx5_cqe (*)[])
 		(uintptr_t)cq_info.buf;
 	txq_data->cq_ci = 0;
+#ifndef NDEBUG
 	txq_data->cq_pi = 0;
+#endif
 	txq_data->wqe_ci = 0;
 	txq_data->wqe_pi = 0;
 	txq_ibv->qp = tmpl.qp;
-- 
2.11.0



More information about the stable mailing list