patch 'net/mlx5: fix tunnel offload validation' has been queued to stable release 20.11.4

Xueming Li xuemingl at nvidia.com
Sun Nov 28 15:53:11 CET 2021


Hi,

FYI, your patch has been queued to stable release 20.11.4

Note it hasn't been pushed to http://dpdk.org/browse/dpdk-stable yet.
It will be pushed if I get no objections before 11/30/21. So please
shout if anyone has objections.

Also note that after the patch there's a diff of the upstream commit vs the
patch applied to the branch. This will indicate if there was any rebasing
needed to apply to the stable branch. If there were code changes for rebasing
(ie: not only metadata diffs), please double check that the rebase was
correctly done.

Queued patches are on a temporary branch at:
https://github.com/steevenlee/dpdk

This queued commit can be viewed at:
https://github.com/steevenlee/dpdk/commit/5744208d62383b276d955922ad6fb5783ad9451e

Thanks.

Xueming Li <xuemingl at nvidia.com>

---
>From 5744208d62383b276d955922ad6fb5783ad9451e Mon Sep 17 00:00:00 2001
From: Gregory Etelson <getelson at nvidia.com>
Date: Wed, 3 Nov 2021 10:55:56 +0200
Subject: [PATCH] net/mlx5: fix tunnel offload validation
Cc: Xueming Li <xuemingl at nvidia.com>

[ upstream commit aaa6a7ec0f3e19388f1262b7af298e6668d401cf ]

Tunnel offload API allows the application to restore packet to
its original form if the chain of flows is missed after DECAP action.

MLX5 PMD provides tunnel offload support only if DV API was enabled.

The patch verifies DV availability before processing with
tunnel offload tasks.

Fixes: 4ec6360de37d ("net/mlx5: implement tunnel offload")

Signed-off-by: Gregory Etelson <getelson at nvidia.com>
Acked-by: Viacheslav Ovsiienko <viacheslavo at nvidia.com>
---
 drivers/net/mlx5/mlx5_flow.c    | 54 +++++++++++++++------------------
 drivers/net/mlx5/mlx5_flow_dv.c |  5 +++
 2 files changed, 30 insertions(+), 29 deletions(-)

diff --git a/drivers/net/mlx5/mlx5_flow.c b/drivers/net/mlx5/mlx5_flow.c
index 49619d95e1..1fa361b318 100644
--- a/drivers/net/mlx5/mlx5_flow.c
+++ b/drivers/net/mlx5/mlx5_flow.c
@@ -7736,33 +7736,37 @@ err:
 	return err;
 }
 
-static inline bool
+static inline int
 mlx5_flow_tunnel_validate(struct rte_eth_dev *dev,
 			  struct rte_flow_tunnel *tunnel,
-			  const char *err_msg)
+			  struct rte_flow_error *error)
 {
-	err_msg = NULL;
-	if (!is_tunnel_offload_active(dev)) {
-		err_msg = "tunnel offload was not activated";
-		goto out;
-	} else if (!tunnel) {
-		err_msg = "no application tunnel";
-		goto out;
-	}
+	struct mlx5_priv *priv = dev->data->dev_private;
 
+	if (!priv->config.dv_flow_en)
+		return rte_flow_error_set(error, ENOTSUP,
+					  RTE_FLOW_ERROR_TYPE_ACTION_CONF, NULL,
+					  "flow DV interface is off");
+	if (!is_tunnel_offload_active(dev))
+		return rte_flow_error_set(error, ENOTSUP,
+					  RTE_FLOW_ERROR_TYPE_ACTION_CONF, NULL,
+					  "tunnel offload was not activated");
+	if (!tunnel)
+		return rte_flow_error_set(error, EINVAL,
+					  RTE_FLOW_ERROR_TYPE_ACTION_CONF, NULL,
+					  "no application tunnel");
 	switch (tunnel->type) {
 	default:
-		err_msg = "unsupported tunnel type";
-		goto out;
+		return rte_flow_error_set(error, EINVAL,
+					  RTE_FLOW_ERROR_TYPE_ACTION_CONF, NULL,
+					  "unsupported tunnel type");
 	case RTE_FLOW_ITEM_TYPE_VXLAN:
 	case RTE_FLOW_ITEM_TYPE_GRE:
 	case RTE_FLOW_ITEM_TYPE_NVGRE:
 	case RTE_FLOW_ITEM_TYPE_GENEVE:
 		break;
 	}
-
-out:
-	return !err_msg;
+	return 0;
 }
 
 static int
@@ -7772,15 +7776,11 @@ mlx5_flow_tunnel_decap_set(struct rte_eth_dev *dev,
 		    uint32_t *num_of_actions,
 		    struct rte_flow_error *error)
 {
-	int ret;
 	struct mlx5_flow_tunnel *tunnel;
-	const char *err_msg = NULL;
-	bool verdict = mlx5_flow_tunnel_validate(dev, app_tunnel, err_msg);
+	int ret = mlx5_flow_tunnel_validate(dev, app_tunnel, error);
 
-	if (!verdict)
-		return rte_flow_error_set(error, EINVAL,
-					  RTE_FLOW_ERROR_TYPE_ACTION_CONF, NULL,
-					  err_msg);
+	if (ret)
+		return ret;
 	ret = mlx5_get_flow_tunnel(dev, app_tunnel, &tunnel);
 	if (ret < 0) {
 		return rte_flow_error_set(error, ret,
@@ -7799,15 +7799,11 @@ mlx5_flow_tunnel_match(struct rte_eth_dev *dev,
 		       uint32_t *num_of_items,
 		       struct rte_flow_error *error)
 {
-	int ret;
 	struct mlx5_flow_tunnel *tunnel;
-	const char *err_msg = NULL;
-	bool verdict = mlx5_flow_tunnel_validate(dev, app_tunnel, err_msg);
+	int ret = mlx5_flow_tunnel_validate(dev, app_tunnel, error);
 
-	if (!verdict)
-		return rte_flow_error_set(error, EINVAL,
-					  RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
-					  err_msg);
+	if (ret)
+		return ret;
 	ret = mlx5_get_flow_tunnel(dev, app_tunnel, &tunnel);
 	if (ret < 0) {
 		return rte_flow_error_set(error, ret,
diff --git a/drivers/net/mlx5/mlx5_flow_dv.c b/drivers/net/mlx5/mlx5_flow_dv.c
index e8d4a006d4..37c5a7eb88 100644
--- a/drivers/net/mlx5/mlx5_flow_dv.c
+++ b/drivers/net/mlx5/mlx5_flow_dv.c
@@ -5337,6 +5337,11 @@ flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr,
 	tunnel = is_tunnel_offload_active(dev) ?
 		 mlx5_get_tof(items, actions, &tof_rule_type) : NULL;
 	if (tunnel) {
+		if (!priv->config.dv_flow_en)
+			return rte_flow_error_set
+				(error, ENOTSUP,
+				 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+				 NULL, "tunnel offload requires DV flow interface");
 		if (priv->representor)
 			return rte_flow_error_set
 				(error, ENOTSUP,
-- 
2.34.0

---
  Diff of the applied patch vs upstream commit (please double-check if non-empty:
---
--- -	2021-11-28 22:41:03.903624575 +0800
+++ 0007-net-mlx5-fix-tunnel-offload-validation.patch	2021-11-28 22:41:03.206876365 +0800
@@ -1 +1 @@
-From aaa6a7ec0f3e19388f1262b7af298e6668d401cf Mon Sep 17 00:00:00 2001
+From 5744208d62383b276d955922ad6fb5783ad9451e Mon Sep 17 00:00:00 2001
@@ -4,0 +5,3 @@
+Cc: Xueming Li <xuemingl at nvidia.com>
+
+[ upstream commit aaa6a7ec0f3e19388f1262b7af298e6668d401cf ]
@@ -15 +17,0 @@
-Cc: stable at dpdk.org
@@ -25 +27 @@
-index 40625688b0..29134b4825 100644
+index 49619d95e1..1fa361b318 100644
@@ -28 +30 @@
-@@ -9489,33 +9489,37 @@ err:
+@@ -7736,33 +7736,37 @@ err:
@@ -81 +83 @@
-@@ -9525,15 +9529,11 @@ mlx5_flow_tunnel_decap_set(struct rte_eth_dev *dev,
+@@ -7772,15 +7776,11 @@ mlx5_flow_tunnel_decap_set(struct rte_eth_dev *dev,
@@ -100 +102 @@
-@@ -9552,15 +9552,11 @@ mlx5_flow_tunnel_match(struct rte_eth_dev *dev,
+@@ -7799,15 +7799,11 @@ mlx5_flow_tunnel_match(struct rte_eth_dev *dev,
@@ -120 +122 @@
-index fa017c5521..8fa7829341 100644
+index e8d4a006d4..37c5a7eb88 100644
@@ -123 +125 @@
-@@ -6871,6 +6871,11 @@ flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr,
+@@ -5337,6 +5337,11 @@ flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr,


More information about the stable mailing list