patch 'net/mlx5: fix VXLAN-GPE next protocol translation' has been queued to stable release 20.11.4

Xueming Li xuemingl at nvidia.com
Sun Nov 28 15:53:48 CET 2021


Hi,

FYI, your patch has been queued to stable release 20.11.4

Note it hasn't been pushed to http://dpdk.org/browse/dpdk-stable yet.
It will be pushed if I get no objections before 11/30/21. So please
shout if anyone has objections.

Also note that after the patch there's a diff of the upstream commit vs the
patch applied to the branch. This will indicate if there was any rebasing
needed to apply to the stable branch. If there were code changes for rebasing
(ie: not only metadata diffs), please double check that the rebase was
correctly done.

Queued patches are on a temporary branch at:
https://github.com/steevenlee/dpdk

This queued commit can be viewed at:
https://github.com/steevenlee/dpdk/commit/121e062b7f6259e544ecef31d48fb123ea3d6599

Thanks.

Xueming Li <xuemingl at nvidia.com>

---
>From 121e062b7f6259e544ecef31d48fb123ea3d6599 Mon Sep 17 00:00:00 2001
From: Gregory Etelson <getelson at nvidia.com>
Date: Sun, 14 Nov 2021 17:36:12 +0200
Subject: [PATCH] net/mlx5: fix VXLAN-GPE next protocol translation
Cc: Xueming Li <xuemingl at nvidia.com>

[ upstream commit 861fa3796f75748ccc4a6dae55e5a7e34c97dea4 ]

VXLAN-GPE extends VXLAN protocol and provides the next protocol
field specifying the first inner header type.

The application can assign some explicit value to
VXLAN-GPE::next_protocol field or set it to the default one. In the
latter case, the rdma-core library cannot recognize the matcher
built by PMD correctly, and it results in hardware configuration
missing inner headers match.

The patch forces VXLAN-GPE::next_protocol assignment if the
application did not explicitly assign it to the non-default value

Fixes: 90456726eb80 ("net/mlx5: fix VXLAN-GPE item translation")

Signed-off-by: Gregory Etelson <getelson at nvidia.com>
Acked-by: Viacheslav Ovsiienko <viacheslavo at nvidia.com>
---
 drivers/net/mlx5/mlx5_flow_dv.c | 76 ++++++++++++++++++---------------
 1 file changed, 42 insertions(+), 34 deletions(-)

diff --git a/drivers/net/mlx5/mlx5_flow_dv.c b/drivers/net/mlx5/mlx5_flow_dv.c
index 37c5a7eb88..27a6554c7e 100644
--- a/drivers/net/mlx5/mlx5_flow_dv.c
+++ b/drivers/net/mlx5/mlx5_flow_dv.c
@@ -7182,46 +7182,40 @@ flow_dv_translate_item_vxlan(void *matcher, void *key,
 
 static void
 flow_dv_translate_item_vxlan_gpe(void *matcher, void *key,
-				 const struct rte_flow_item *item, int inner)
+				 const struct rte_flow_item *item,
+				 const uint64_t pattern_flags)
 {
+	static const struct rte_flow_item_vxlan_gpe dummy_vxlan_gpe_hdr = {0, };
 	const struct rte_flow_item_vxlan_gpe *vxlan_m = item->mask;
 	const struct rte_flow_item_vxlan_gpe *vxlan_v = item->spec;
-	void *headers_m;
-	void *headers_v;
+	/* The item was validated to be on the outer side */
+	void *headers_m = MLX5_ADDR_OF(fte_match_param, matcher, outer_headers);
+	void *headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
 	void *misc_m =
 		MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters_3);
 	void *misc_v =
 		MLX5_ADDR_OF(fte_match_param, key, misc_parameters_3);
-	char *vni_m;
-	char *vni_v;
-	uint16_t dport;
-	int size;
-	int i;
+	char *vni_m =
+		MLX5_ADDR_OF(fte_match_set_misc3, misc_m, outer_vxlan_gpe_vni);
+	char *vni_v =
+		MLX5_ADDR_OF(fte_match_set_misc3, misc_v, outer_vxlan_gpe_vni);
+	int i, size = sizeof(vxlan_m->vni);
 	uint8_t flags_m = 0xff;
 	uint8_t flags_v = 0xc;
+	uint8_t m_protocol, v_protocol;
 
-	if (inner) {
-		headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
-					 inner_headers);
-		headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
-	} else {
-		headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
-					 outer_headers);
-		headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
-	}
-	dport = item->type == RTE_FLOW_ITEM_TYPE_VXLAN ?
-		MLX5_UDP_PORT_VXLAN : MLX5_UDP_PORT_VXLAN_GPE;
 	if (!MLX5_GET16(fte_match_set_lyr_2_4, headers_v, udp_dport)) {
 		MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_dport, 0xFFFF);
-		MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport, dport);
+		MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport,
+			 MLX5_UDP_PORT_VXLAN_GPE);
+	}
+	if (!vxlan_v) {
+		vxlan_v = &dummy_vxlan_gpe_hdr;
+		vxlan_m = &dummy_vxlan_gpe_hdr;
+	} else {
+		if (!vxlan_m)
+			vxlan_m = &rte_flow_item_vxlan_gpe_mask;
 	}
-	if (!vxlan_v)
-		return;
-	if (!vxlan_m)
-		vxlan_m = &rte_flow_item_vxlan_gpe_mask;
-	size = sizeof(vxlan_m->vni);
-	vni_m = MLX5_ADDR_OF(fte_match_set_misc3, misc_m, outer_vxlan_gpe_vni);
-	vni_v = MLX5_ADDR_OF(fte_match_set_misc3, misc_v, outer_vxlan_gpe_vni);
 	memcpy(vni_m, vxlan_m->vni, size);
 	for (i = 0; i < size; ++i)
 		vni_v[i] = vni_m[i] & vxlan_v->vni[i];
@@ -7231,10 +7225,22 @@ flow_dv_translate_item_vxlan_gpe(void *matcher, void *key,
 	}
 	MLX5_SET(fte_match_set_misc3, misc_m, outer_vxlan_gpe_flags, flags_m);
 	MLX5_SET(fte_match_set_misc3, misc_v, outer_vxlan_gpe_flags, flags_v);
-	MLX5_SET(fte_match_set_misc3, misc_m, outer_vxlan_gpe_next_protocol,
-		 vxlan_m->protocol);
-	MLX5_SET(fte_match_set_misc3, misc_v, outer_vxlan_gpe_next_protocol,
-		 vxlan_v->protocol);
+	m_protocol = vxlan_m->protocol;
+	v_protocol = vxlan_v->protocol;
+	if (!m_protocol) {
+		m_protocol = 0xff;
+		/* Force next protocol to ensure next headers parsing. */
+		if (pattern_flags & MLX5_FLOW_LAYER_INNER_L2)
+			v_protocol = RTE_VXLAN_GPE_TYPE_ETH;
+		else if (pattern_flags & MLX5_FLOW_LAYER_INNER_L3_IPV4)
+			v_protocol = RTE_VXLAN_GPE_TYPE_IPV4;
+		else if (pattern_flags & MLX5_FLOW_LAYER_INNER_L3_IPV6)
+			v_protocol = RTE_VXLAN_GPE_TYPE_IPV6;
+	}
+	MLX5_SET(fte_match_set_misc3, misc_m,
+		 outer_vxlan_gpe_next_protocol, m_protocol);
+	MLX5_SET(fte_match_set_misc3, misc_v,
+		 outer_vxlan_gpe_next_protocol, m_protocol & v_protocol);
 }
 
 /**
@@ -9758,6 +9764,7 @@ flow_dv_translate(struct rte_eth_dev *dev,
 		.skip_scale = !!dev_flow->skip_scale,
 		.std_tbl_fix = true,
 	};
+	const struct rte_flow_item *tunnel_item = NULL;
 
 	if (!wks)
 		return rte_flow_error_set(error, ENOMEM,
@@ -10549,11 +10556,9 @@ flow_dv_translate(struct rte_eth_dev *dev,
 			last_item = MLX5_FLOW_LAYER_VXLAN;
 			break;
 		case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
-			flow_dv_translate_item_vxlan_gpe(match_mask,
-							 match_value, items,
-							 tunnel);
 			matcher.priority = MLX5_TUNNEL_PRIO_GET(rss_desc);
 			last_item = MLX5_FLOW_LAYER_VXLAN_GPE;
+			tunnel_item = items;
 			break;
 		case RTE_FLOW_ITEM_TYPE_GENEVE:
 			flow_dv_translate_item_geneve(match_mask, match_value,
@@ -10648,6 +10653,9 @@ flow_dv_translate(struct rte_eth_dev *dev,
 						   match_value, NULL, attr))
 			return -rte_errno;
 	}
+	if (item_flags & MLX5_FLOW_LAYER_VXLAN_GPE)
+		flow_dv_translate_item_vxlan_gpe(match_mask, match_value,
+						 tunnel_item, item_flags);
 #ifdef RTE_LIBRTE_MLX5_DEBUG
 	MLX5_ASSERT(!flow_dv_check_valid_spec(matcher.mask.buf,
 					      dev_flow->dv.value.buf));
-- 
2.34.0

---
  Diff of the applied patch vs upstream commit (please double-check if non-empty:
---
--- -	2021-11-28 22:41:05.607589874 +0800
+++ 0044-net-mlx5-fix-VXLAN-GPE-next-protocol-translation.patch	2021-11-28 22:41:03.333540739 +0800
@@ -1 +1 @@
-From 861fa3796f75748ccc4a6dae55e5a7e34c97dea4 Mon Sep 17 00:00:00 2001
+From 121e062b7f6259e544ecef31d48fb123ea3d6599 Mon Sep 17 00:00:00 2001
@@ -4,0 +5,3 @@
+Cc: Xueming Li <xuemingl at nvidia.com>
+
+[ upstream commit 861fa3796f75748ccc4a6dae55e5a7e34c97dea4 ]
@@ -19 +21,0 @@
-Cc: stable at dpdk.org
@@ -28 +30 @@
-index 1b4e15dff1..f9acb69cca 100644
+index 37c5a7eb88..27a6554c7e 100644
@@ -31 +33 @@
-@@ -8962,46 +8962,40 @@ flow_dv_translate_item_vxlan(struct rte_eth_dev *dev,
+@@ -7182,46 +7182,40 @@ flow_dv_translate_item_vxlan(void *matcher, void *key,
@@ -99 +101 @@
-@@ -9011,10 +9005,22 @@ flow_dv_translate_item_vxlan_gpe(void *matcher, void *key,
+@@ -7231,10 +7225,22 @@ flow_dv_translate_item_vxlan_gpe(void *matcher, void *key,
@@ -126 +128,2 @@
-@@ -12644,6 +12650,7 @@ flow_dv_translate(struct rte_eth_dev *dev,
+@@ -9758,6 +9764,7 @@ flow_dv_translate(struct rte_eth_dev *dev,
+ 		.skip_scale = !!dev_flow->skip_scale,
@@ -129 +131,0 @@
- 	const struct rte_flow_item *integrity_items[2] = {NULL, NULL};
@@ -134 +136 @@
-@@ -13437,11 +13444,9 @@ flow_dv_translate(struct rte_eth_dev *dev,
+@@ -10549,11 +10556,9 @@ flow_dv_translate(struct rte_eth_dev *dev,
@@ -147,3 +149,3 @@
-@@ -13573,6 +13578,9 @@ flow_dv_translate(struct rte_eth_dev *dev,
- 						      integrity_items,
- 						      item_flags);
+@@ -10648,6 +10653,9 @@ flow_dv_translate(struct rte_eth_dev *dev,
+ 						   match_value, NULL, attr))
+ 			return -rte_errno;


More information about the stable mailing list