[PATCH 20.11] net/mlx5: handle MPRQ incompatibility with external buffers

Alexander Kozyrev akozyrev at nvidia.com
Thu Aug 11 02:35:56 CEST 2022


[ upstream commit 3a29cb3a730ba0def6b088c969da379a9ffea988 ]

Multi-Packet Rx queue uses PMD-managed buffers to store packets.
These buffers are externally attached to user mbufs.
This conflicts with the feature that allows using user-managed
externally attached buffers in an application.
Fall back to SPRQ in case external buffers mempool is configured.
The limitation is already documented in mlx5 guide.

Signed-off-by: Alexander Kozyrev <akozyrev at nvidia.com>
Acked-by: Viacheslav Ovsiienko <viacheslavo at nvidia.com>
---
 drivers/net/mlx5/mlx5_rxq.c  | 23 ++++++++++++++++-------
 drivers/net/mlx5/mlx5_rxtx.h |  2 +-
 2 files changed, 17 insertions(+), 8 deletions(-)

diff --git a/drivers/net/mlx5/mlx5_rxq.c b/drivers/net/mlx5/mlx5_rxq.c
index 4a263a5803..80d9d2fe12 100644
--- a/drivers/net/mlx5/mlx5_rxq.c
+++ b/drivers/net/mlx5/mlx5_rxq.c
@@ -754,6 +754,7 @@ mlx5_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
 				(struct rte_eth_rxseg_split *)conf->rx_seg;
 	struct rte_eth_rxseg_split rx_single = {.mp = mp};
 	uint16_t n_seg = conf->rx_nseg;
+	bool is_extmem = false;
 	int res;
 
 	if (mp) {
@@ -764,6 +765,8 @@ mlx5_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
 		 */
 		rx_seg = &rx_single;
 		n_seg = 1;
+		is_extmem = rte_pktmbuf_priv_flags(mp) &
+				RTE_PKTMBUF_POOL_F_PINNED_EXT_BUF;
 	}
 	if (n_seg > 1) {
 		uint64_t offloads = conf->offloads |
@@ -783,7 +786,8 @@ mlx5_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
 	res = mlx5_rx_queue_pre_setup(dev, idx, &desc);
 	if (res)
 		return res;
-	rxq_ctrl = mlx5_rxq_new(dev, idx, desc, socket, conf, rx_seg, n_seg);
+	rxq_ctrl = mlx5_rxq_new(dev, idx, desc, socket,
+				conf, rx_seg, n_seg, is_extmem);
 	if (!rxq_ctrl) {
 		DRV_LOG(ERR, "port %u unable to allocate queue index %u",
 			dev->data->port_id, idx);
@@ -1397,6 +1401,8 @@ mlx5_max_lro_msg_size_adjust(struct rte_eth_dev *dev, uint16_t idx,
  *   Log number of strides to configure for this queue.
  * @param actual_log_stride_size
  *   Log stride size to configure for this queue.
+ * @param is_extmem
+ *   Is external pinned memory pool used.
  *
  * @return
  *   0 if Multi-Packet RQ is supported, otherwise -1.
@@ -1405,7 +1411,8 @@ static int
 mlx5_mprq_prepare(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
 		  bool rx_seg_en, uint32_t min_mbuf_size,
 		  uint32_t *actual_log_stride_num,
-		  uint32_t *actual_log_stride_size)
+		  uint32_t *actual_log_stride_size,
+		  bool is_extmem)
 {
 	struct mlx5_priv *priv = dev->data->dev_private;
 	struct mlx5_dev_config *config = &priv->config;
@@ -1423,7 +1430,7 @@ mlx5_mprq_prepare(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
 				log_max_stride_size);
 	uint32_t log_stride_wqe_size;
 
-	if (mlx5_check_mprq_support(dev) != 1 || rx_seg_en)
+	if (mlx5_check_mprq_support(dev) != 1 || rx_seg_en || is_extmem)
 		goto unsupport;
 	/* Checks if chosen number of strides is in supported range. */
 	if (config->mprq.log_stride_num > log_max_stride_num ||
@@ -1489,7 +1496,7 @@ mlx5_mprq_prepare(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
 			" rxq_num = %u, stride_sz = %u, stride_num = %u\n"
 			"  supported: min_rxqs_num = %u, min_buf_wqe_sz = %u"
 			" min_stride_sz = %u, max_stride_sz = %u).\n"
-			"Rx segment is %senable.",
+			"Rx segment is %senabled. External mempool is %sused.",
 			dev->data->port_id, min_mbuf_size, desc, priv->rxqs_n,
 			RTE_BIT32(config->mprq.log_stride_size),
 			RTE_BIT32(config->mprq.log_stride_num),
@@ -1497,7 +1504,7 @@ mlx5_mprq_prepare(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
 			RTE_BIT32(config->mprq.log_min_stride_wqe_size),
 			RTE_BIT32(config->mprq.log_min_stride_size),
 			RTE_BIT32(config->mprq.log_max_stride_size),
-			rx_seg_en ? "" : "not ");
+			rx_seg_en ? "" : "not ", is_extmem ? "" : "not ");
 	return -1;
 }
 
@@ -1519,7 +1526,8 @@ mlx5_mprq_prepare(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
 struct mlx5_rxq_ctrl *
 mlx5_rxq_new(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
 	     unsigned int socket, const struct rte_eth_rxconf *conf,
-	     const struct rte_eth_rxseg_split *rx_seg, uint16_t n_seg)
+	     const struct rte_eth_rxseg_split *rx_seg, uint16_t n_seg,
+	     bool is_extmem)
 {
 	struct mlx5_priv *priv = dev->data->dev_private;
 	struct mlx5_rxq_ctrl *tmpl;
@@ -1541,7 +1549,8 @@ mlx5_rxq_new(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
 	const int mprq_en = !mlx5_mprq_prepare(dev, idx, desc, rx_seg_en,
 					       non_scatter_min_mbuf_size,
 					       &mprq_log_actual_stride_num,
-					       &mprq_log_actual_stride_size);
+					       &mprq_log_actual_stride_size,
+					       is_extmem);
 	/*
 	 * Always allocate extra slots, even if eventually
 	 * the vector Rx will not be used.
diff --git a/drivers/net/mlx5/mlx5_rxtx.h b/drivers/net/mlx5/mlx5_rxtx.h
index 237a7faa5c..9e00031ed6 100644
--- a/drivers/net/mlx5/mlx5_rxtx.h
+++ b/drivers/net/mlx5/mlx5_rxtx.h
@@ -336,7 +336,7 @@ struct mlx5_rxq_ctrl *mlx5_rxq_new(struct rte_eth_dev *dev, uint16_t idx,
 				   uint16_t desc, unsigned int socket,
 				   const struct rte_eth_rxconf *conf,
 				   const struct rte_eth_rxseg_split *rx_seg,
-				   uint16_t n_seg);
+				   uint16_t n_seg, bool is_extmem);
 struct mlx5_rxq_ctrl *mlx5_rxq_hairpin_new
 	(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
 	 const struct rte_eth_hairpin_conf *hairpin_conf);
-- 
2.18.2



More information about the stable mailing list