patch 'net/mlx5: fix destroying external representor flow' has been queued to stable release 22.11.4

Xueming Li xuemingl at nvidia.com
Mon Dec 11 11:11:53 CET 2023


Hi,

FYI, your patch has been queued to stable release 22.11.4

Note it hasn't been pushed to http://dpdk.org/browse/dpdk-stable yet.
It will be pushed if I get no objections before 12/13/23. So please
shout if anyone has objections.

Also note that after the patch there's a diff of the upstream commit vs the
patch applied to the branch. This will indicate if there was any rebasing
needed to apply to the stable branch. If there were code changes for rebasing
(ie: not only metadata diffs), please double check that the rebase was
correctly done.

Queued patches are on a temporary branch at:
https://git.dpdk.org/dpdk-stable/log/?h=22.11-staging

This queued commit can be viewed at:
https://git.dpdk.org/dpdk-stable/commit/?h=22.11-staging&id=d32e9e689b63f151a4a3a0ebd806ec5cc1a5c1a2

Thanks.

Xueming Li <xuemingl at nvidia.com>

---
>From d32e9e689b63f151a4a3a0ebd806ec5cc1a5c1a2 Mon Sep 17 00:00:00 2001
From: Suanming Mou <suanmingm at nvidia.com>
Date: Thu, 9 Nov 2023 16:55:47 +0800
Subject: [PATCH] net/mlx5: fix destroying external representor flow
Cc: Xueming Li <xuemingl at nvidia.com>

[ upstream commit f37c184a0ea41e6244f5b1404fe866efe6d312a5 ]

The external representor matched SQ flows are managed by external
SQ, PMD traffic enable/disable should not touch these flows.

This commit adds an extra external list for the external representor
matched SQ flows.

Fixes: 26e1eaf2dac4 ("net/mlx5: support device control for E-Switch default rule")

Signed-off-by: Suanming Mou <suanmingm at nvidia.com>
---
 drivers/net/mlx5/mlx5.h         |  1 +
 drivers/net/mlx5/mlx5_flow.h    |  4 +--
 drivers/net/mlx5/mlx5_flow_hw.c | 47 ++++++++++++++++++++++++---------
 drivers/net/mlx5/mlx5_trigger.c |  4 +--
 drivers/net/mlx5/mlx5_txq.c     |  4 +--
 5 files changed, 41 insertions(+), 19 deletions(-)

diff --git a/drivers/net/mlx5/mlx5.h b/drivers/net/mlx5/mlx5.h
index 8a46ba90b0..deb9959f67 100644
--- a/drivers/net/mlx5/mlx5.h
+++ b/drivers/net/mlx5/mlx5.h
@@ -1730,6 +1730,7 @@ struct mlx5_priv {
 	void *root_drop_action; /* Pointer to root drop action. */
 	rte_spinlock_t hw_ctrl_lock;
 	LIST_HEAD(hw_ctrl_flow, mlx5_hw_ctrl_flow) hw_ctrl_flows;
+	LIST_HEAD(hw_ext_ctrl_flow, mlx5_hw_ctrl_flow) hw_ext_ctrl_flows;
 	struct rte_flow_template_table *hw_esw_sq_miss_root_tbl;
 	struct rte_flow_template_table *hw_esw_sq_miss_tbl;
 	struct rte_flow_template_table *hw_esw_zero_tbl;
diff --git a/drivers/net/mlx5/mlx5_flow.h b/drivers/net/mlx5/mlx5_flow.h
index f03734f991..1192735750 100644
--- a/drivers/net/mlx5/mlx5_flow.h
+++ b/drivers/net/mlx5/mlx5_flow.h
@@ -2581,12 +2581,12 @@ int mlx5_flow_pick_transfer_proxy(struct rte_eth_dev *dev,
 int mlx5_flow_hw_flush_ctrl_flows(struct rte_eth_dev *dev);
 
 int mlx5_flow_hw_esw_create_sq_miss_flow(struct rte_eth_dev *dev,
-					 uint32_t sqn);
+					 uint32_t sqn, bool external);
 int mlx5_flow_hw_esw_destroy_sq_miss_flow(struct rte_eth_dev *dev,
 					  uint32_t sqn);
 int mlx5_flow_hw_esw_create_default_jump_flow(struct rte_eth_dev *dev);
 int mlx5_flow_hw_create_tx_default_mreg_copy_flow(struct rte_eth_dev *dev);
-int mlx5_flow_hw_tx_repr_matching_flow(struct rte_eth_dev *dev, uint32_t sqn);
+int mlx5_flow_hw_tx_repr_matching_flow(struct rte_eth_dev *dev, uint32_t sqn, bool external);
 int mlx5_flow_actions_validate(struct rte_eth_dev *dev,
 		const struct rte_flow_actions_template_attr *attr,
 		const struct rte_flow_action actions[],
diff --git a/drivers/net/mlx5/mlx5_flow_hw.c b/drivers/net/mlx5/mlx5_flow_hw.c
index 3f3ab4859b..28d0bbecc4 100644
--- a/drivers/net/mlx5/mlx5_flow_hw.c
+++ b/drivers/net/mlx5/mlx5_flow_hw.c
@@ -6999,6 +6999,7 @@ flow_hw_configure(struct rte_eth_dev *dev,
 	priv->nb_queue = nb_q_updated;
 	rte_spinlock_init(&priv->hw_ctrl_lock);
 	LIST_INIT(&priv->hw_ctrl_flows);
+	LIST_INIT(&priv->hw_ext_ctrl_flows);
 	ret = flow_hw_create_ctrl_rx_tables(dev);
 	if (ret) {
 		rte_flow_error_set(error, -ret, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
@@ -8439,6 +8440,8 @@ const struct mlx5_flow_driver_ops mlx5_flow_hw_drv_ops = {
  *   Index of an action template associated with @p table.
  * @param info
  *   Additional info about control flow rule.
+ * @param external
+ *   External ctrl flow.
  *
  * @return
  *   0 on success, negative errno value otherwise and rte_errno set.
@@ -8451,7 +8454,8 @@ flow_hw_create_ctrl_flow(struct rte_eth_dev *owner_dev,
 			 uint8_t item_template_idx,
 			 struct rte_flow_action actions[],
 			 uint8_t action_template_idx,
-			 struct mlx5_hw_ctrl_flow_info *info)
+			 struct mlx5_hw_ctrl_flow_info *info,
+			 bool external)
 {
 	struct mlx5_priv *priv = proxy_dev->data->dev_private;
 	uint32_t queue = CTRL_QUEUE_ID(priv);
@@ -8502,7 +8506,10 @@ flow_hw_create_ctrl_flow(struct rte_eth_dev *owner_dev,
 		entry->info = *info;
 	else
 		entry->info.type = MLX5_HW_CTRL_FLOW_TYPE_GENERAL;
-	LIST_INSERT_HEAD(&priv->hw_ctrl_flows, entry, next);
+	if (external)
+		LIST_INSERT_HEAD(&priv->hw_ext_ctrl_flows, entry, next);
+	else
+		LIST_INSERT_HEAD(&priv->hw_ctrl_flows, entry, next);
 	rte_spinlock_unlock(&priv->hw_ctrl_lock);
 	return 0;
 error:
@@ -8676,11 +8683,23 @@ flow_hw_flush_all_ctrl_flows(struct rte_eth_dev *dev)
 		mlx5_free(cf);
 		cf = cf_next;
 	}
+	cf = LIST_FIRST(&priv->hw_ext_ctrl_flows);
+	while (cf != NULL) {
+		cf_next = LIST_NEXT(cf, next);
+		ret = flow_hw_destroy_ctrl_flow(dev, cf->flow);
+		if (ret) {
+			rte_errno = ret;
+			return -ret;
+		}
+		LIST_REMOVE(cf, next);
+		mlx5_free(cf);
+		cf = cf_next;
+	}
 	return 0;
 }
 
 int
-mlx5_flow_hw_esw_create_sq_miss_flow(struct rte_eth_dev *dev, uint32_t sqn)
+mlx5_flow_hw_esw_create_sq_miss_flow(struct rte_eth_dev *dev, uint32_t sqn, bool external)
 {
 	uint16_t port_id = dev->data->port_id;
 	struct rte_flow_item_ethdev esw_mgr_spec = {
@@ -8764,7 +8783,7 @@ mlx5_flow_hw_esw_create_sq_miss_flow(struct rte_eth_dev *dev, uint32_t sqn)
 		.type = RTE_FLOW_ACTION_TYPE_END,
 	};
 	ret = flow_hw_create_ctrl_flow(dev, proxy_dev, proxy_priv->hw_esw_sq_miss_root_tbl,
-				       items, 0, actions, 0, &flow_info);
+				       items, 0, actions, 0, &flow_info, external);
 	if (ret) {
 		DRV_LOG(ERR, "Port %u failed to create root SQ miss flow rule for SQ %u, ret %d",
 			port_id, sqn, ret);
@@ -8795,7 +8814,7 @@ mlx5_flow_hw_esw_create_sq_miss_flow(struct rte_eth_dev *dev, uint32_t sqn)
 	};
 	flow_info.type = MLX5_HW_CTRL_FLOW_TYPE_SQ_MISS;
 	ret = flow_hw_create_ctrl_flow(dev, proxy_dev, proxy_priv->hw_esw_sq_miss_tbl,
-				       items, 0, actions, 0, &flow_info);
+				       items, 0, actions, 0, &flow_info, external);
 	if (ret) {
 		DRV_LOG(ERR, "Port %u failed to create HWS SQ miss flow rule for SQ %u, ret %d",
 			port_id, sqn, ret);
@@ -8917,7 +8936,7 @@ mlx5_flow_hw_esw_create_default_jump_flow(struct rte_eth_dev *dev)
 	}
 	return flow_hw_create_ctrl_flow(dev, proxy_dev,
 					proxy_priv->hw_esw_zero_tbl,
-					items, 0, actions, 0, &flow_info);
+					items, 0, actions, 0, &flow_info, false);
 }
 
 int
@@ -8972,11 +8991,11 @@ mlx5_flow_hw_create_tx_default_mreg_copy_flow(struct rte_eth_dev *dev)
 		return 0;
 	return flow_hw_create_ctrl_flow(dev, dev,
 					priv->hw_tx_meta_cpy_tbl,
-					eth_all, 0, copy_reg_action, 0, &flow_info);
+					eth_all, 0, copy_reg_action, 0, &flow_info, false);
 }
 
 int
-mlx5_flow_hw_tx_repr_matching_flow(struct rte_eth_dev *dev, uint32_t sqn)
+mlx5_flow_hw_tx_repr_matching_flow(struct rte_eth_dev *dev, uint32_t sqn, bool external)
 {
 	struct mlx5_priv *priv = dev->data->dev_private;
 	struct mlx5_rte_flow_item_sq sq_spec = {
@@ -9030,7 +9049,7 @@ mlx5_flow_hw_tx_repr_matching_flow(struct rte_eth_dev *dev, uint32_t sqn)
 		actions[2].type = RTE_FLOW_ACTION_TYPE_JUMP;
 	}
 	return flow_hw_create_ctrl_flow(dev, dev, priv->hw_tx_repr_tagging_tbl,
-					items, 0, actions, 0, &flow_info);
+					items, 0, actions, 0, &flow_info, external);
 }
 
 static uint32_t
@@ -9161,7 +9180,7 @@ __flow_hw_ctrl_flows_single(struct rte_eth_dev *dev,
 	items[3] = flow_hw_get_ctrl_rx_l4_item(rss_type);
 	items[4] = (struct rte_flow_item){ .type = RTE_FLOW_ITEM_TYPE_END };
 	/* Without VLAN filtering, only a single flow rule must be created. */
-	return flow_hw_create_ctrl_flow(dev, dev, tbl, items, 0, actions, 0, &flow_info);
+	return flow_hw_create_ctrl_flow(dev, dev, tbl, items, 0, actions, 0, &flow_info, false);
 }
 
 static int
@@ -9202,7 +9221,8 @@ __flow_hw_ctrl_flows_single_vlan(struct rte_eth_dev *dev,
 		};
 
 		items[1].spec = &vlan_spec;
-		if (flow_hw_create_ctrl_flow(dev, dev, tbl, items, 0, actions, 0, &flow_info))
+		if (flow_hw_create_ctrl_flow(dev, dev,
+					     tbl, items, 0, actions, 0, &flow_info, false))
 			return -rte_errno;
 	}
 	return 0;
@@ -9246,7 +9266,8 @@ __flow_hw_ctrl_flows_unicast(struct rte_eth_dev *dev,
 		if (!memcmp(mac, &cmp, sizeof(*mac)))
 			continue;
 		memcpy(&eth_spec.dst.addr_bytes, mac->addr_bytes, RTE_ETHER_ADDR_LEN);
-		if (flow_hw_create_ctrl_flow(dev, dev, tbl, items, 0, actions, 0, &flow_info))
+		if (flow_hw_create_ctrl_flow(dev, dev,
+					     tbl, items, 0, actions, 0, &flow_info, false))
 			return -rte_errno;
 	}
 	return 0;
@@ -9300,7 +9321,7 @@ __flow_hw_ctrl_flows_unicast_vlan(struct rte_eth_dev *dev,
 
 			items[1].spec = &vlan_spec;
 			if (flow_hw_create_ctrl_flow(dev, dev, tbl, items, 0, actions, 0,
-						     &flow_info))
+						     &flow_info, false))
 				return -rte_errno;
 		}
 	}
diff --git a/drivers/net/mlx5/mlx5_trigger.c b/drivers/net/mlx5/mlx5_trigger.c
index 6479e44a94..2f95b8fe77 100644
--- a/drivers/net/mlx5/mlx5_trigger.c
+++ b/drivers/net/mlx5/mlx5_trigger.c
@@ -1494,13 +1494,13 @@ mlx5_traffic_enable_hws(struct rte_eth_dev *dev)
 			continue;
 		queue = mlx5_txq_get_sqn(txq);
 		if ((priv->representor || priv->master) && config->dv_esw_en) {
-			if (mlx5_flow_hw_esw_create_sq_miss_flow(dev, queue)) {
+			if (mlx5_flow_hw_esw_create_sq_miss_flow(dev, queue, false)) {
 				mlx5_txq_release(dev, i);
 				goto error;
 			}
 		}
 		if (config->dv_esw_en && config->repr_matching) {
-			if (mlx5_flow_hw_tx_repr_matching_flow(dev, queue)) {
+			if (mlx5_flow_hw_tx_repr_matching_flow(dev, queue, false)) {
 				mlx5_txq_release(dev, i);
 				goto error;
 			}
diff --git a/drivers/net/mlx5/mlx5_txq.c b/drivers/net/mlx5/mlx5_txq.c
index 8c48e7e2a8..d617784dba 100644
--- a/drivers/net/mlx5/mlx5_txq.c
+++ b/drivers/net/mlx5/mlx5_txq.c
@@ -1311,10 +1311,10 @@ rte_pmd_mlx5_external_sq_enable(uint16_t port_id, uint32_t sq_num)
 	}
 #ifdef HAVE_MLX5_HWS_SUPPORT
 	if (priv->sh->config.dv_flow_en == 2) {
-		if (mlx5_flow_hw_esw_create_sq_miss_flow(dev, sq_num))
+		if (mlx5_flow_hw_esw_create_sq_miss_flow(dev, sq_num, true))
 			return -rte_errno;
 		if (priv->sh->config.repr_matching &&
-		    mlx5_flow_hw_tx_repr_matching_flow(dev, sq_num)) {
+		    mlx5_flow_hw_tx_repr_matching_flow(dev, sq_num, true)) {
 			mlx5_flow_hw_esw_destroy_sq_miss_flow(dev, sq_num);
 			return -rte_errno;
 		}
-- 
2.25.1

---
  Diff of the applied patch vs upstream commit (please double-check if non-empty:
---
--- -	2023-12-11 17:56:25.933926300 +0800
+++ 0088-net-mlx5-fix-destroying-external-representor-flow.patch	2023-12-11 17:56:23.187652300 +0800
@@ -1 +1 @@
-From f37c184a0ea41e6244f5b1404fe866efe6d312a5 Mon Sep 17 00:00:00 2001
+From d32e9e689b63f151a4a3a0ebd806ec5cc1a5c1a2 Mon Sep 17 00:00:00 2001
@@ -4,0 +5,3 @@
+Cc: Xueming Li <xuemingl at nvidia.com>
+
+[ upstream commit f37c184a0ea41e6244f5b1404fe866efe6d312a5 ]
@@ -13 +15,0 @@
-Cc: stable at dpdk.org
@@ -25 +27 @@
-index 45ad0701f1..795748eddc 100644
+index 8a46ba90b0..deb9959f67 100644
@@ -28 +30 @@
-@@ -1855,6 +1855,7 @@ struct mlx5_priv {
+@@ -1730,6 +1730,7 @@ struct mlx5_priv {
@@ -37 +39 @@
-index d57b3b5465..8c0b9a4b60 100644
+index f03734f991..1192735750 100644
@@ -40 +42 @@
-@@ -2874,12 +2874,12 @@ int flow_null_counter_query(struct rte_eth_dev *dev,
+@@ -2581,12 +2581,12 @@ int mlx5_flow_pick_transfer_proxy(struct rte_eth_dev *dev,
@@ -56 +58 @@
-index d512889682..c35064518a 100644
+index 3f3ab4859b..28d0bbecc4 100644
@@ -59 +61 @@
-@@ -9189,6 +9189,7 @@ flow_hw_configure(struct rte_eth_dev *dev,
+@@ -6999,6 +6999,7 @@ flow_hw_configure(struct rte_eth_dev *dev,
@@ -67 +69 @@
-@@ -11343,6 +11344,8 @@ const struct mlx5_flow_driver_ops mlx5_flow_hw_drv_ops = {
+@@ -8439,6 +8440,8 @@ const struct mlx5_flow_driver_ops mlx5_flow_hw_drv_ops = {
@@ -76 +78 @@
-@@ -11355,7 +11358,8 @@ flow_hw_create_ctrl_flow(struct rte_eth_dev *owner_dev,
+@@ -8451,7 +8454,8 @@ flow_hw_create_ctrl_flow(struct rte_eth_dev *owner_dev,
@@ -86 +88 @@
-@@ -11406,7 +11410,10 @@ flow_hw_create_ctrl_flow(struct rte_eth_dev *owner_dev,
+@@ -8502,7 +8506,10 @@ flow_hw_create_ctrl_flow(struct rte_eth_dev *owner_dev,
@@ -98 +100 @@
-@@ -11580,11 +11587,23 @@ flow_hw_flush_all_ctrl_flows(struct rte_eth_dev *dev)
+@@ -8676,11 +8683,23 @@ flow_hw_flush_all_ctrl_flows(struct rte_eth_dev *dev)
@@ -123 +125 @@
-@@ -11668,7 +11687,7 @@ mlx5_flow_hw_esw_create_sq_miss_flow(struct rte_eth_dev *dev, uint32_t sqn)
+@@ -8764,7 +8783,7 @@ mlx5_flow_hw_esw_create_sq_miss_flow(struct rte_eth_dev *dev, uint32_t sqn)
@@ -132 +134 @@
-@@ -11699,7 +11718,7 @@ mlx5_flow_hw_esw_create_sq_miss_flow(struct rte_eth_dev *dev, uint32_t sqn)
+@@ -8795,7 +8814,7 @@ mlx5_flow_hw_esw_create_sq_miss_flow(struct rte_eth_dev *dev, uint32_t sqn)
@@ -141 +143 @@
-@@ -11821,7 +11840,7 @@ mlx5_flow_hw_esw_create_default_jump_flow(struct rte_eth_dev *dev)
+@@ -8917,7 +8936,7 @@ mlx5_flow_hw_esw_create_default_jump_flow(struct rte_eth_dev *dev)
@@ -150 +152 @@
-@@ -11876,11 +11895,11 @@ mlx5_flow_hw_create_tx_default_mreg_copy_flow(struct rte_eth_dev *dev)
+@@ -8972,11 +8991,11 @@ mlx5_flow_hw_create_tx_default_mreg_copy_flow(struct rte_eth_dev *dev)
@@ -164 +166 @@
-@@ -11934,7 +11953,7 @@ mlx5_flow_hw_tx_repr_matching_flow(struct rte_eth_dev *dev, uint32_t sqn)
+@@ -9030,7 +9049,7 @@ mlx5_flow_hw_tx_repr_matching_flow(struct rte_eth_dev *dev, uint32_t sqn)
@@ -173 +175 @@
-@@ -12065,7 +12084,7 @@ __flow_hw_ctrl_flows_single(struct rte_eth_dev *dev,
+@@ -9161,7 +9180,7 @@ __flow_hw_ctrl_flows_single(struct rte_eth_dev *dev,
@@ -182 +184 @@
-@@ -12106,7 +12125,8 @@ __flow_hw_ctrl_flows_single_vlan(struct rte_eth_dev *dev,
+@@ -9202,7 +9221,8 @@ __flow_hw_ctrl_flows_single_vlan(struct rte_eth_dev *dev,
@@ -192 +194 @@
-@@ -12150,7 +12170,8 @@ __flow_hw_ctrl_flows_unicast(struct rte_eth_dev *dev,
+@@ -9246,7 +9266,8 @@ __flow_hw_ctrl_flows_unicast(struct rte_eth_dev *dev,
@@ -195 +197 @@
- 		memcpy(&eth_spec.hdr.dst_addr.addr_bytes, mac->addr_bytes, RTE_ETHER_ADDR_LEN);
+ 		memcpy(&eth_spec.dst.addr_bytes, mac->addr_bytes, RTE_ETHER_ADDR_LEN);
@@ -202 +204 @@
-@@ -12204,7 +12225,7 @@ __flow_hw_ctrl_flows_unicast_vlan(struct rte_eth_dev *dev,
+@@ -9300,7 +9321,7 @@ __flow_hw_ctrl_flows_unicast_vlan(struct rte_eth_dev *dev,
@@ -212 +214 @@
-index 7bdb897612..d7ecb149fa 100644
+index 6479e44a94..2f95b8fe77 100644
@@ -232 +234 @@
-index ccdf2ffb14..1ac43548b2 100644
+index 8c48e7e2a8..d617784dba 100644


More information about the stable mailing list