[dpdk-stable] patch 'net/mlx5: fix shared RSS and mark actions combination' has been queued to stable release 20.11.1

luca.boccassi at gmail.com luca.boccassi at gmail.com
Fri Feb 5 12:16:09 CET 2021


Hi,

FYI, your patch has been queued to stable release 20.11.1

Note it hasn't been pushed to http://dpdk.org/browse/dpdk-stable yet.
It will be pushed if I get no objections before 02/07/21. So please
shout if anyone has objections.

Also note that after the patch there's a diff of the upstream commit vs the
patch applied to the branch. This will indicate if there was any rebasing
needed to apply to the stable branch. If there were code changes for rebasing
(ie: not only metadata diffs), please double check that the rebase was
correctly done.

Queued patches are on a temporary branch at:
https://github.com/bluca/dpdk-stable

This queued commit can be viewed at:
https://github.com/bluca/dpdk-stable/commit/1102e4b5747e4e3e0db882d6296ac0ae6636a179

Thanks.

Luca Boccassi

---
>From 1102e4b5747e4e3e0db882d6296ac0ae6636a179 Mon Sep 17 00:00:00 2001
From: Suanming Mou <suanmingm at nvidia.com>
Date: Tue, 15 Dec 2020 11:46:24 +0800
Subject: [PATCH] net/mlx5: fix shared RSS and mark actions combination

[ upstream commit 8e61555657b227d967a91b8964c8d1e4e9d17695 ]

In order to allow mbuf mark ID update in Rx data-path, there is a
mechanism in the PMD to enable it according to the rte_flows.
When a flow with mark ID and RSS/QUEUE action exists, all the relevant
Rx queues will be enabled to report the mark ID.

When shared RSS action is combined with mark action, the PMD mechanism
misses the Rx queues updates.

This commit handles the shared RSS case in the mechanism too.

Fixes: e1592b6c4dea ("net/mlx5: make Rx queue thread safe")

Signed-off-by: Suanming Mou <suanmingm at nvidia.com>
Acked-by: Matan Azrad <matan at nvidia.com>
---
 drivers/net/mlx5/mlx5_flow.c    | 52 +++++++++++++-----
 drivers/net/mlx5/mlx5_flow_dv.c | 95 +++++++++++++--------------------
 2 files changed, 75 insertions(+), 72 deletions(-)

diff --git a/drivers/net/mlx5/mlx5_flow.c b/drivers/net/mlx5/mlx5_flow.c
index 522d238c27..5138af9208 100644
--- a/drivers/net/mlx5/mlx5_flow.c
+++ b/drivers/net/mlx5/mlx5_flow.c
@@ -1002,17 +1002,29 @@ flow_drv_rxq_flags_set(struct rte_eth_dev *dev,
 	struct mlx5_priv *priv = dev->data->dev_private;
 	const int mark = dev_handle->mark;
 	const int tunnel = !!(dev_handle->layers & MLX5_FLOW_LAYER_TUNNEL);
-	struct mlx5_hrxq *hrxq;
+	struct mlx5_ind_table_obj *ind_tbl = NULL;
 	unsigned int i;
 
-	if (dev_handle->fate_action != MLX5_FLOW_FATE_QUEUE)
-		return;
-	hrxq = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_HRXQ],
+	if (dev_handle->fate_action == MLX5_FLOW_FATE_QUEUE) {
+		struct mlx5_hrxq *hrxq;
+
+		hrxq = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_HRXQ],
 			      dev_handle->rix_hrxq);
-	if (!hrxq)
+		if (hrxq)
+			ind_tbl = hrxq->ind_table;
+	} else if (dev_handle->fate_action == MLX5_FLOW_FATE_SHARED_RSS) {
+		struct mlx5_shared_action_rss *shared_rss;
+
+		shared_rss = mlx5_ipool_get
+			(priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS],
+			 dev_handle->rix_srss);
+		if (shared_rss)
+			ind_tbl = shared_rss->ind_tbl;
+	}
+	if (!ind_tbl)
 		return;
-	for (i = 0; i != hrxq->ind_table->queues_n; ++i) {
-		int idx = hrxq->ind_table->queues[i];
+	for (i = 0; i != ind_tbl->queues_n; ++i) {
+		int idx = ind_tbl->queues[i];
 		struct mlx5_rxq_ctrl *rxq_ctrl =
 			container_of((*priv->rxqs)[idx],
 				     struct mlx5_rxq_ctrl, rxq);
@@ -1084,18 +1096,30 @@ flow_drv_rxq_flags_trim(struct rte_eth_dev *dev,
 	struct mlx5_priv *priv = dev->data->dev_private;
 	const int mark = dev_handle->mark;
 	const int tunnel = !!(dev_handle->layers & MLX5_FLOW_LAYER_TUNNEL);
-	struct mlx5_hrxq *hrxq;
+	struct mlx5_ind_table_obj *ind_tbl = NULL;
 	unsigned int i;
 
-	if (dev_handle->fate_action != MLX5_FLOW_FATE_QUEUE)
-		return;
-	hrxq = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_HRXQ],
+	if (dev_handle->fate_action == MLX5_FLOW_FATE_QUEUE) {
+		struct mlx5_hrxq *hrxq;
+
+		hrxq = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_HRXQ],
 			      dev_handle->rix_hrxq);
-	if (!hrxq)
+		if (hrxq)
+			ind_tbl = hrxq->ind_table;
+	} else if (dev_handle->fate_action == MLX5_FLOW_FATE_SHARED_RSS) {
+		struct mlx5_shared_action_rss *shared_rss;
+
+		shared_rss = mlx5_ipool_get
+			(priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS],
+			 dev_handle->rix_srss);
+		if (shared_rss)
+			ind_tbl = shared_rss->ind_tbl;
+	}
+	if (!ind_tbl)
 		return;
 	MLX5_ASSERT(dev->data->dev_started);
-	for (i = 0; i != hrxq->ind_table->queues_n; ++i) {
-		int idx = hrxq->ind_table->queues[i];
+	for (i = 0; i != ind_tbl->queues_n; ++i) {
+		int idx = ind_tbl->queues[i];
 		struct mlx5_rxq_ctrl *rxq_ctrl =
 			container_of((*priv->rxqs)[idx],
 				     struct mlx5_rxq_ctrl, rxq);
diff --git a/drivers/net/mlx5/mlx5_flow_dv.c b/drivers/net/mlx5/mlx5_flow_dv.c
index 30ceb143ef..ddeaa32b66 100644
--- a/drivers/net/mlx5/mlx5_flow_dv.c
+++ b/drivers/net/mlx5/mlx5_flow_dv.c
@@ -10652,47 +10652,6 @@ __flow_dv_action_rss_hrxq_lookup(struct rte_eth_dev *dev, uint32_t idx,
 	}
 }
 
-/**
- * Retrieves hash RX queue suitable for the *flow*.
- * If shared action configured for *flow* suitable hash RX queue will be
- * retrieved from attached shared action.
- *
- * @param[in] dev
- *   Pointer to the Ethernet device structure.
- * @param[in] dev_flow
- *   Pointer to the sub flow.
- * @param[in] rss_desc
- *   Pointer to the RSS descriptor.
- * @param[out] hrxq
- *   Pointer to retrieved hash RX queue object.
- *
- * @return
- *   Valid hash RX queue index, otherwise 0 and rte_errno is set.
- */
-static uint32_t
-__flow_dv_rss_get_hrxq(struct rte_eth_dev *dev, struct mlx5_flow *dev_flow,
-		       struct mlx5_flow_rss_desc *rss_desc,
-		       struct mlx5_hrxq **hrxq)
-{
-	struct mlx5_priv *priv = dev->data->dev_private;
-	uint32_t hrxq_idx;
-
-	if (rss_desc->shared_rss) {
-		hrxq_idx = __flow_dv_action_rss_hrxq_lookup
-				(dev, rss_desc->shared_rss,
-				 dev_flow->hash_fields,
-				 !!(dev_flow->handle->layers &
-				    MLX5_FLOW_LAYER_TUNNEL));
-		if (hrxq_idx)
-			*hrxq = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_HRXQ],
-					       hrxq_idx);
-	} else {
-		*hrxq = flow_dv_hrxq_prepare(dev, dev_flow, rss_desc,
-					     &hrxq_idx);
-	}
-	return hrxq_idx;
-}
-
 /**
  * Apply the flow to the NIC, lock free,
  * (mutex should be acquired by caller).
@@ -10724,11 +10683,6 @@ flow_dv_apply(struct rte_eth_dev *dev, struct rte_flow *flow,
 	struct mlx5_flow_rss_desc *rss_desc = &wks->rss_desc;
 
 	MLX5_ASSERT(wks);
-	if (rss_desc->shared_rss) {
-		dh = wks->flows[wks->flow_idx - 1].handle;
-		MLX5_ASSERT(dh->fate_action == MLX5_FLOW_FATE_SHARED_RSS);
-		dh->rix_srss = rss_desc->shared_rss;
-	}
 	for (idx = wks->flow_idx - 1; idx >= 0; idx--) {
 		dev_flow = &wks->flows[idx];
 		dv = &dev_flow->dv;
@@ -10744,11 +10698,34 @@ flow_dv_apply(struct rte_eth_dev *dev, struct rte_flow *flow,
 						priv->drop_queue.hrxq->action;
 			}
 		} else if ((dh->fate_action == MLX5_FLOW_FATE_QUEUE &&
-			   !dv_h->rix_sample && !dv_h->rix_dest_array) ||
-			    (dh->fate_action == MLX5_FLOW_FATE_SHARED_RSS)) {
+			   !dv_h->rix_sample && !dv_h->rix_dest_array)) {
+			struct mlx5_hrxq *hrxq;
+			uint32_t hrxq_idx;
+
+			hrxq = flow_dv_hrxq_prepare(dev, dev_flow, rss_desc,
+						    &hrxq_idx);
+			if (!hrxq) {
+				rte_flow_error_set
+					(error, rte_errno,
+					 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
+					 "cannot get hash queue");
+				goto error;
+			}
+			dh->rix_hrxq = hrxq_idx;
+			dv->actions[n++] = hrxq->action;
+		} else if (dh->fate_action == MLX5_FLOW_FATE_SHARED_RSS) {
 			struct mlx5_hrxq *hrxq = NULL;
-			uint32_t hrxq_idx = __flow_dv_rss_get_hrxq
-					(dev, dev_flow, rss_desc, &hrxq);
+			uint32_t hrxq_idx;
+
+			hrxq_idx = __flow_dv_action_rss_hrxq_lookup(dev,
+						rss_desc->shared_rss,
+						dev_flow->hash_fields,
+						!!(dh->layers &
+						MLX5_FLOW_LAYER_TUNNEL));
+			if (hrxq_idx)
+				hrxq = mlx5_ipool_get
+					(priv->sh->ipool[MLX5_IPOOL_HRXQ],
+					 hrxq_idx);
 			if (!hrxq) {
 				rte_flow_error_set
 					(error, rte_errno,
@@ -10756,8 +10733,7 @@ flow_dv_apply(struct rte_eth_dev *dev, struct rte_flow *flow,
 					 "cannot get hash queue");
 				goto error;
 			}
-			if (dh->fate_action == MLX5_FLOW_FATE_QUEUE)
-				dh->rix_hrxq = hrxq_idx;
+			dh->rix_srss = rss_desc->shared_rss;
 			dv->actions[n++] = hrxq->action;
 		} else if (dh->fate_action == MLX5_FLOW_FATE_DEFAULT_MISS) {
 			if (!priv->sh->default_miss_action) {
@@ -10799,12 +10775,12 @@ error:
 		if (dh->fate_action == MLX5_FLOW_FATE_QUEUE && dh->rix_hrxq) {
 			mlx5_hrxq_release(dev, dh->rix_hrxq);
 			dh->rix_hrxq = 0;
+		} else if (dh->fate_action == MLX5_FLOW_FATE_SHARED_RSS) {
+			dh->rix_srss = 0;
 		}
 		if (dh->vf_vlan.tag && dh->vf_vlan.created)
 			mlx5_vlan_vmwa_release(dev, &dh->vf_vlan);
 	}
-	if (rss_desc->shared_rss)
-		wks->flows[wks->flow_idx - 1].handle->rix_srss = 0;
 	rte_errno = err; /* Restore rte_errno. */
 	return -rte_errno;
 }
@@ -11072,9 +11048,6 @@ flow_dv_fate_resource_release(struct rte_eth_dev *dev,
 		flow_dv_port_id_action_resource_release(dev,
 				handle->rix_port_id_action);
 		break;
-	case MLX5_FLOW_FATE_SHARED_RSS:
-		flow_dv_shared_rss_action_release(dev, handle->rix_srss);
-		break;
 	default:
 		DRV_LOG(DEBUG, "Incorrect fate action:%d", handle->fate_action);
 		break;
@@ -11237,6 +11210,7 @@ flow_dv_destroy(struct rte_eth_dev *dev, struct rte_flow *flow)
 {
 	struct mlx5_flow_handle *dev_handle;
 	struct mlx5_priv *priv = dev->data->dev_private;
+	uint32_t srss = 0;
 
 	if (!flow)
 		return;
@@ -11281,10 +11255,15 @@ flow_dv_destroy(struct rte_eth_dev *dev, struct rte_flow *flow)
 		if (dev_handle->dvh.rix_tag)
 			flow_dv_tag_release(dev,
 					    dev_handle->dvh.rix_tag);
-		flow_dv_fate_resource_release(dev, dev_handle);
+		if (dev_handle->fate_action != MLX5_FLOW_FATE_SHARED_RSS)
+			flow_dv_fate_resource_release(dev, dev_handle);
+		else if (!srss)
+			srss = dev_handle->rix_srss;
 		mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_MLX5_FLOW],
 			   tmp_idx);
 	}
+	if (srss)
+		flow_dv_shared_rss_action_release(dev, srss);
 }
 
 /**
-- 
2.29.2

---
  Diff of the applied patch vs upstream commit (please double-check if non-empty:
---
--- -	2021-02-05 11:18:32.996158504 +0000
+++ 0083-net-mlx5-fix-shared-RSS-and-mark-actions-combination.patch	2021-02-05 11:18:28.922693320 +0000
@@ -1 +1 @@
-From 8e61555657b227d967a91b8964c8d1e4e9d17695 Mon Sep 17 00:00:00 2001
+From 1102e4b5747e4e3e0db882d6296ac0ae6636a179 Mon Sep 17 00:00:00 2001
@@ -5,0 +6,2 @@
+[ upstream commit 8e61555657b227d967a91b8964c8d1e4e9d17695 ]
+
@@ -17 +18,0 @@
-Cc: stable at dpdk.org
@@ -27 +28 @@
-index f110c6b714..2a4073c126 100644
+index 522d238c27..5138af9208 100644
@@ -106 +107 @@
-index ce229dbe85..50673ced25 100644
+index 30ceb143ef..ddeaa32b66 100644
@@ -109 +110 @@
-@@ -10680,47 +10680,6 @@ __flow_dv_action_rss_hrxq_lookup(struct rte_eth_dev *dev, uint32_t idx,
+@@ -10652,47 +10652,6 @@ __flow_dv_action_rss_hrxq_lookup(struct rte_eth_dev *dev, uint32_t idx,
@@ -157 +158 @@
-@@ -10752,11 +10711,6 @@ flow_dv_apply(struct rte_eth_dev *dev, struct rte_flow *flow,
+@@ -10724,11 +10683,6 @@ flow_dv_apply(struct rte_eth_dev *dev, struct rte_flow *flow,
@@ -169 +170 @@
-@@ -10772,11 +10726,34 @@ flow_dv_apply(struct rte_eth_dev *dev, struct rte_flow *flow,
+@@ -10744,11 +10698,34 @@ flow_dv_apply(struct rte_eth_dev *dev, struct rte_flow *flow,
@@ -208 +209 @@
-@@ -10784,8 +10761,7 @@ flow_dv_apply(struct rte_eth_dev *dev, struct rte_flow *flow,
+@@ -10756,8 +10733,7 @@ flow_dv_apply(struct rte_eth_dev *dev, struct rte_flow *flow,
@@ -218 +219 @@
-@@ -10827,12 +10803,12 @@ error:
+@@ -10799,12 +10775,12 @@ error:
@@ -233 +234 @@
-@@ -11100,9 +11076,6 @@ flow_dv_fate_resource_release(struct rte_eth_dev *dev,
+@@ -11072,9 +11048,6 @@ flow_dv_fate_resource_release(struct rte_eth_dev *dev,
@@ -243 +244 @@
-@@ -11265,6 +11238,7 @@ flow_dv_destroy(struct rte_eth_dev *dev, struct rte_flow *flow)
+@@ -11237,6 +11210,7 @@ flow_dv_destroy(struct rte_eth_dev *dev, struct rte_flow *flow)
@@ -251 +252 @@
-@@ -11309,10 +11283,15 @@ flow_dv_destroy(struct rte_eth_dev *dev, struct rte_flow *flow)
+@@ -11281,10 +11255,15 @@ flow_dv_destroy(struct rte_eth_dev *dev, struct rte_flow *flow)


More information about the stable mailing list