[dpdk-stable] patch 'net/mlx5: check Tx queue size overflow' has been queued to LTS release 17.11.7

Yongseok Koh yskoh at mellanox.com
Tue Jul 23 03:01:08 CEST 2019


Hi,

FYI, your patch has been queued to LTS release 17.11.7

Note it hasn't been pushed to http://dpdk.org/browse/dpdk-stable yet.
It will be pushed if I get no objection by 07/27/19. So please
shout if anyone has objection.

Also note that after the patch there's a diff of the upstream commit vs the
patch applied to the branch. This will indicate if there was any rebasing
needed to apply to the stable branch. If there were code changes for rebasing
(ie: not only metadata diffs), please double check that the rebase was
correctly done.

Thanks.

Yongseok

---
>From 938c064f1133d84bc735071f12f9c603ed3ea7bf Mon Sep 17 00:00:00 2001
From: Yongseok Koh <yskoh at mellanox.com>
Date: Tue, 30 Apr 2019 18:01:43 -0700
Subject: [PATCH] net/mlx5: check Tx queue size overflow

[ backported from upstream commit f6d9ab4e769f0f95ecac1b418106e9f8137ca60c ]

If Tx packet inlining is enabled, rdma-core library should allocate large
Tx WQ enough to support it. It is better for PMD to calculate the size of
WQ based on the parameters and return error with appropriate message if it
exceeds the device capability.

Signed-off-by: Yongseok Koh <yskoh at mellanox.com>
Acked-by: Shahaf Shuler <shahafs at mellanox.com>
---
 drivers/net/mlx5/mlx5_txq.c | 36 ++++++++++++++++++++++++++++++++----
 1 file changed, 32 insertions(+), 4 deletions(-)

diff --git a/drivers/net/mlx5/mlx5_txq.c b/drivers/net/mlx5/mlx5_txq.c
index 9c25efa451..e3deef004e 100644
--- a/drivers/net/mlx5/mlx5_txq.c
+++ b/drivers/net/mlx5/mlx5_txq.c
@@ -595,6 +595,27 @@ mlx5_txq_ibv_verify(struct rte_eth_dev *dev)
 	return ret;
 }
 
+/**
+ * Calcuate the total number of WQEBB for Tx queue.
+ *
+ * Simplified version of calc_sq_size() in rdma-core.
+ *
+ * @param txq_ctrl
+ *   Pointer to Tx queue control structure.
+ *
+ * @return
+ *   The number of WQEBB.
+ */
+static int
+txq_calc_wqebb_cnt(struct mlx5_txq_ctrl *txq_ctrl)
+{
+	unsigned int wqe_size;
+	const unsigned int desc = 1 << txq_ctrl->txq.elts_n;
+
+	wqe_size = MLX5_WQE_SIZE + txq_ctrl->max_inline_data;
+	return rte_align32pow2(wqe_size * desc) / MLX5_WQE_SIZE;
+}
+
 /**
  * Create a DPDK Tx queue.
  *
@@ -640,10 +661,6 @@ mlx5_txq_new(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
 	tmpl->idx = idx;
 	if (priv->mps == MLX5_MPW_ENHANCED)
 		tmpl->txq.mpw_hdr_dseg = priv->mpw_hdr_dseg;
-	DRV_LOG(DEBUG, "port %u priv->device_attr.max_qp_wr is %d",
-		dev->data->port_id, priv->device_attr.orig_attr.max_qp_wr);
-	DRV_LOG(DEBUG, "port %u priv->device_attr.max_sge is %d",
-		dev->data->port_id, priv->device_attr.orig_attr.max_sge);
 	if (priv->txq_inline && (priv->txqs_n >= priv->txqs_inline)) {
 		unsigned int ds_cnt;
 
@@ -698,6 +715,17 @@ mlx5_txq_new(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
 	}
 	if (priv->tunnel_en)
 		tmpl->txq.tunnel_en = 1;
+	if (txq_calc_wqebb_cnt(tmpl) >
+	    priv->device_attr.orig_attr.max_qp_wr) {
+		DRV_LOG(ERR,
+			"port %u Tx WQEBB count (%d) exceeds the limit (%d),"
+			" try smaller queue size",
+			dev->data->port_id, txq_calc_wqebb_cnt(tmpl),
+			priv->device_attr.orig_attr.max_qp_wr);
+		rte_free(tmpl);
+		rte_errno = ENOMEM;
+		return NULL;
+	}
 	tmpl->txq.elts =
 		(struct rte_mbuf *(*)[1 << tmpl->txq.elts_n])(tmpl + 1);
 	tmpl->txq.mr_ctrl.cache_bh =
-- 
2.21.0

---
  Diff of the applied patch vs upstream commit (please double-check if non-empty:
---
--- -	2019-07-22 17:55:11.923539948 -0700
+++ 0101-net-mlx5-check-Tx-queue-size-overflow.patch	2019-07-22 17:55:06.509477000 -0700
@@ -1,26 +1,26 @@
-From f6d9ab4e769f0f95ecac1b418106e9f8137ca60c Mon Sep 17 00:00:00 2001
+From 938c064f1133d84bc735071f12f9c603ed3ea7bf Mon Sep 17 00:00:00 2001
 From: Yongseok Koh <yskoh at mellanox.com>
 Date: Tue, 30 Apr 2019 18:01:43 -0700
 Subject: [PATCH] net/mlx5: check Tx queue size overflow
 
+[ backported from upstream commit f6d9ab4e769f0f95ecac1b418106e9f8137ca60c ]
+
 If Tx packet inlining is enabled, rdma-core library should allocate large
 Tx WQ enough to support it. It is better for PMD to calculate the size of
 WQ based on the parameters and return error with appropriate message if it
 exceeds the device capability.
 
-Cc: stable at dpdk.org
-
 Signed-off-by: Yongseok Koh <yskoh at mellanox.com>
 Acked-by: Shahaf Shuler <shahafs at mellanox.com>
 ---
- drivers/net/mlx5/mlx5_txq.c | 35 +++++++++++++++++++++++++++++++----
- 1 file changed, 31 insertions(+), 4 deletions(-)
+ drivers/net/mlx5/mlx5_txq.c | 36 ++++++++++++++++++++++++++++++++----
+ 1 file changed, 32 insertions(+), 4 deletions(-)
 
 diff --git a/drivers/net/mlx5/mlx5_txq.c b/drivers/net/mlx5/mlx5_txq.c
-index 4d55fd413c..b281c45027 100644
+index 9c25efa451..e3deef004e 100644
 --- a/drivers/net/mlx5/mlx5_txq.c
 +++ b/drivers/net/mlx5/mlx5_txq.c
-@@ -678,6 +678,27 @@ mlx5_txq_ibv_verify(struct rte_eth_dev *dev)
+@@ -595,6 +595,27 @@ mlx5_txq_ibv_verify(struct rte_eth_dev *dev)
  	return ret;
  }
  
@@ -46,29 +46,37 @@
 +}
 +
  /**
-  * Set Tx queue parameters from device configuration.
+  * Create a DPDK Tx queue.
   *
-@@ -824,10 +845,16 @@ mlx5_txq_new(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
- 	tmpl->txq.port_id = dev->data->port_id;
- 	tmpl->txq.idx = idx;
- 	txq_set_params(tmpl);
--	DRV_LOG(DEBUG, "port %u device_attr.max_qp_wr is %d",
--		dev->data->port_id, priv->sh->device_attr.orig_attr.max_qp_wr);
--	DRV_LOG(DEBUG, "port %u device_attr.max_sge is %d",
--		dev->data->port_id, priv->sh->device_attr.orig_attr.max_sge);
+@@ -640,10 +661,6 @@ mlx5_txq_new(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
+ 	tmpl->idx = idx;
+ 	if (priv->mps == MLX5_MPW_ENHANCED)
+ 		tmpl->txq.mpw_hdr_dseg = priv->mpw_hdr_dseg;
+-	DRV_LOG(DEBUG, "port %u priv->device_attr.max_qp_wr is %d",
+-		dev->data->port_id, priv->device_attr.orig_attr.max_qp_wr);
+-	DRV_LOG(DEBUG, "port %u priv->device_attr.max_sge is %d",
+-		dev->data->port_id, priv->device_attr.orig_attr.max_sge);
+ 	if (priv->txq_inline && (priv->txqs_n >= priv->txqs_inline)) {
+ 		unsigned int ds_cnt;
+ 
+@@ -698,6 +715,17 @@ mlx5_txq_new(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
+ 	}
+ 	if (priv->tunnel_en)
+ 		tmpl->txq.tunnel_en = 1;
 +	if (txq_calc_wqebb_cnt(tmpl) >
-+	    priv->sh->device_attr.orig_attr.max_qp_wr) {
++	    priv->device_attr.orig_attr.max_qp_wr) {
 +		DRV_LOG(ERR,
 +			"port %u Tx WQEBB count (%d) exceeds the limit (%d),"
 +			" try smaller queue size",
 +			dev->data->port_id, txq_calc_wqebb_cnt(tmpl),
-+			priv->sh->device_attr.orig_attr.max_qp_wr);
++			priv->device_attr.orig_attr.max_qp_wr);
++		rte_free(tmpl);
 +		rte_errno = ENOMEM;
-+		goto error;
++		return NULL;
 +	}
  	tmpl->txq.elts =
  		(struct rte_mbuf *(*)[1 << tmpl->txq.elts_n])(tmpl + 1);
- 	rte_atomic32_inc(&tmpl->refcnt);
+ 	tmpl->txq.mr_ctrl.cache_bh =
 -- 
 2.21.0
 


More information about the stable mailing list