patch 'net/mlx5: fix indirect action async job initialization' has been queued to stable release 23.11.1

Xueming Li xuemingl at nvidia.com
Sat Apr 13 14:49:34 CEST 2024


Hi,

FYI, your patch has been queued to stable release 23.11.1

Note it hasn't been pushed to http://dpdk.org/browse/dpdk-stable yet.
It will be pushed if I get no objections before 04/15/24. So please
shout if anyone has objections.

Also note that after the patch there's a diff of the upstream commit vs the
patch applied to the branch. This will indicate if there was any rebasing
needed to apply to the stable branch. If there were code changes for rebasing
(ie: not only metadata diffs), please double check that the rebase was
correctly done.

Queued patches are on a temporary branch at:
https://git.dpdk.org/dpdk-stable/log/?h=23.11-staging

This queued commit can be viewed at:
https://git.dpdk.org/dpdk-stable/commit/?h=23.11-staging&id=1994df02c988a2f1d70cfd192ecd2098edfc6713

Thanks.

Xueming Li <xuemingl at nvidia.com>

---
>From 1994df02c988a2f1d70cfd192ecd2098edfc6713 Mon Sep 17 00:00:00 2001
From: Gregory Etelson <getelson at nvidia.com>
Date: Thu, 7 Mar 2024 12:19:10 +0200
Subject: [PATCH] net/mlx5: fix indirect action async job initialization
Cc: Xueming Li <xuemingl at nvidia.com>

[ upstream commit 1a8b80329748033eb3bb9ed7433e0aef1bbcd838 ]

MLX5 PMD driver supports 2 types of indirect actions:
legacy INDIRECT and INDIRECT_LIST.
PMD has different handlers for each of indirection actions types.
Therefore PMD marks async `job::indirect_type` with relevant value.

PMD set the type only during indirect action creation.
Legacy INDIRECT query could have get a job object used previously by
INDIRECT_LIST action. In that case such job object was handled as
INDIRECT_LIST because the `job::indirect_type` was not re-assigned.

The patch sets `job::indirect_type` during the job initialization
according to operation type.

Fixes: 59155721936e ("net/mlx5: fix indirect flow completion processing")

Signed-off-by: Gregory Etelson <getelson at nvidia.com>
Acked-by: Dariusz Sosnowski <dsosnowski at nvidia.com>
---
 drivers/net/mlx5/mlx5_flow_hw.c | 24 +++++++++++++-----------
 1 file changed, 13 insertions(+), 11 deletions(-)

diff --git a/drivers/net/mlx5/mlx5_flow_hw.c b/drivers/net/mlx5/mlx5_flow_hw.c
index f43ffb1d4e..6d0f1beeec 100644
--- a/drivers/net/mlx5/mlx5_flow_hw.c
+++ b/drivers/net/mlx5/mlx5_flow_hw.c
@@ -109,6 +109,7 @@ flow_hw_action_job_init(struct mlx5_priv *priv, uint32_t queue,
 			const struct rte_flow_action_handle *handle,
 			void *user_data, void *query_data,
 			enum mlx5_hw_job_type type,
+			enum mlx5_hw_indirect_type indirect_type,
 			struct rte_flow_error *error);
 static int
 mlx5_tbl_multi_pattern_process(struct rte_eth_dev *dev,
@@ -1583,7 +1584,8 @@ flow_hw_meter_mark_compile(struct rte_eth_dev *dev,
 	struct mlx5_aso_mtr *aso_mtr;
 	struct mlx5_hw_q_job *job =
 		flow_hw_action_job_init(priv, queue, NULL, NULL, NULL,
-					MLX5_HW_Q_JOB_TYPE_CREATE, NULL);
+					MLX5_HW_Q_JOB_TYPE_CREATE,
+					MLX5_HW_INDIRECT_TYPE_LEGACY, NULL);
 
 	if (!job)
 		return -1;
@@ -10057,6 +10059,7 @@ flow_hw_action_job_init(struct mlx5_priv *priv, uint32_t queue,
 			const struct rte_flow_action_handle *handle,
 			void *user_data, void *query_data,
 			enum mlx5_hw_job_type type,
+			enum mlx5_hw_indirect_type indirect_type,
 			struct rte_flow_error *error)
 {
 	struct mlx5_hw_q_job *job;
@@ -10074,6 +10077,7 @@ flow_hw_action_job_init(struct mlx5_priv *priv, uint32_t queue,
 	job->action = handle;
 	job->user_data = user_data;
 	job->query.user = query_data;
+	job->indirect_type = indirect_type;
 	return job;
 }
 
@@ -10085,7 +10089,7 @@ mlx5_flow_action_job_init(struct mlx5_priv *priv, uint32_t queue,
 			  struct rte_flow_error *error)
 {
 	return flow_hw_action_job_init(priv, queue, handle, user_data, query_data,
-				       type, error);
+				       type, MLX5_HW_INDIRECT_TYPE_LEGACY, error);
 }
 
 static __rte_always_inline void
@@ -10155,7 +10159,7 @@ flow_hw_action_handle_create(struct rte_eth_dev *dev, uint32_t queue,
 	if (attr || force_job) {
 		job = flow_hw_action_job_init(priv, queue, NULL, user_data,
 					      NULL, MLX5_HW_Q_JOB_TYPE_CREATE,
-					      error);
+					      MLX5_HW_INDIRECT_TYPE_LEGACY, error);
 		if (!job)
 			return NULL;
 	}
@@ -10224,7 +10228,6 @@ flow_hw_action_handle_create(struct rte_eth_dev *dev, uint32_t queue,
 	}
 	if (job && !force_job) {
 		job->action = handle;
-		job->indirect_type = MLX5_HW_INDIRECT_TYPE_LEGACY;
 		flow_hw_action_finalize(dev, queue, job, push, aso,
 					handle != NULL);
 	}
@@ -10316,7 +10319,7 @@ flow_hw_action_handle_update(struct rte_eth_dev *dev, uint32_t queue,
 	if (attr || force_job) {
 		job = flow_hw_action_job_init(priv, queue, handle, user_data,
 					      NULL, MLX5_HW_Q_JOB_TYPE_UPDATE,
-					      error);
+					      MLX5_HW_INDIRECT_TYPE_LEGACY, error);
 		if (!job)
 			return -rte_errno;
 	}
@@ -10398,7 +10401,7 @@ flow_hw_action_handle_destroy(struct rte_eth_dev *dev, uint32_t queue,
 	if (attr || force_job) {
 		job = flow_hw_action_job_init(priv, queue, handle, user_data,
 					      NULL, MLX5_HW_Q_JOB_TYPE_DESTROY,
-					      error);
+					      MLX5_HW_INDIRECT_TYPE_LEGACY, error);
 		if (!job)
 			return -rte_errno;
 	}
@@ -10711,7 +10714,7 @@ flow_hw_action_handle_query(struct rte_eth_dev *dev, uint32_t queue,
 	if (attr) {
 		job = flow_hw_action_job_init(priv, queue, handle, user_data,
 					      data, MLX5_HW_Q_JOB_TYPE_QUERY,
-					      error);
+					      MLX5_HW_INDIRECT_TYPE_LEGACY, error);
 		if (!job)
 			return -rte_errno;
 	}
@@ -10765,7 +10768,7 @@ flow_hw_async_action_handle_query_update
 		job = flow_hw_action_job_init(priv, queue, handle, user_data,
 					      query,
 					      MLX5_HW_Q_JOB_TYPE_UPDATE_QUERY,
-					      error);
+					      MLX5_HW_INDIRECT_TYPE_LEGACY, error);
 		if (!job)
 			return -rte_errno;
 	}
@@ -11445,7 +11448,7 @@ flow_hw_async_action_list_handle_create(struct rte_eth_dev *dev, uint32_t queue,
 	if (attr) {
 		job = flow_hw_action_job_init(priv, queue, NULL, user_data,
 					      NULL, MLX5_HW_Q_JOB_TYPE_CREATE,
-					      error);
+					      MLX5_HW_INDIRECT_TYPE_LIST, error);
 		if (!job)
 			return NULL;
 	}
@@ -11465,7 +11468,6 @@ flow_hw_async_action_list_handle_create(struct rte_eth_dev *dev, uint32_t queue,
 	}
 	if (job) {
 		job->action = handle;
-		job->indirect_type = MLX5_HW_INDIRECT_TYPE_LIST;
 		flow_hw_action_finalize(dev, queue, job, push, false,
 					handle != NULL);
 	}
@@ -11510,7 +11512,7 @@ flow_hw_async_action_list_handle_destroy
 	if (attr) {
 		job = flow_hw_action_job_init(priv, queue, NULL, user_data,
 					      NULL, MLX5_HW_Q_JOB_TYPE_DESTROY,
-					      error);
+					      MLX5_HW_INDIRECT_TYPE_LIST, error);
 		if (!job)
 			return rte_errno;
 	}
-- 
2.34.1

---
  Diff of the applied patch vs upstream commit (please double-check if non-empty:
---
--- -	2024-04-13 20:43:07.886808758 +0800
+++ 0094-net-mlx5-fix-indirect-action-async-job-initializatio.patch	2024-04-13 20:43:05.057753853 +0800
@@ -1 +1 @@
-From 1a8b80329748033eb3bb9ed7433e0aef1bbcd838 Mon Sep 17 00:00:00 2001
+From 1994df02c988a2f1d70cfd192ecd2098edfc6713 Mon Sep 17 00:00:00 2001
@@ -4,0 +5,3 @@
+Cc: Xueming Li <xuemingl at nvidia.com>
+
+[ upstream commit 1a8b80329748033eb3bb9ed7433e0aef1bbcd838 ]
@@ -20 +22,0 @@
-Cc: stable at dpdk.org
@@ -29 +31 @@
-index 8f004b5435..b9ba05f695 100644
+index f43ffb1d4e..6d0f1beeec 100644
@@ -32 +34 @@
-@@ -188,6 +188,7 @@ flow_hw_action_job_init(struct mlx5_priv *priv, uint32_t queue,
+@@ -109,6 +109,7 @@ flow_hw_action_job_init(struct mlx5_priv *priv, uint32_t queue,
@@ -40 +42 @@
-@@ -1692,7 +1693,8 @@ flow_hw_meter_mark_compile(struct rte_eth_dev *dev,
+@@ -1583,7 +1584,8 @@ flow_hw_meter_mark_compile(struct rte_eth_dev *dev,
@@ -50 +52 @@
-@@ -10998,6 +11000,7 @@ flow_hw_action_job_init(struct mlx5_priv *priv, uint32_t queue,
+@@ -10057,6 +10059,7 @@ flow_hw_action_job_init(struct mlx5_priv *priv, uint32_t queue,
@@ -58 +60 @@
-@@ -11015,6 +11018,7 @@ flow_hw_action_job_init(struct mlx5_priv *priv, uint32_t queue,
+@@ -10074,6 +10077,7 @@ flow_hw_action_job_init(struct mlx5_priv *priv, uint32_t queue,
@@ -66 +68 @@
-@@ -11026,7 +11030,7 @@ mlx5_flow_action_job_init(struct mlx5_priv *priv, uint32_t queue,
+@@ -10085,7 +10089,7 @@ mlx5_flow_action_job_init(struct mlx5_priv *priv, uint32_t queue,
@@ -75 +77 @@
-@@ -11096,7 +11100,7 @@ flow_hw_action_handle_create(struct rte_eth_dev *dev, uint32_t queue,
+@@ -10155,7 +10159,7 @@ flow_hw_action_handle_create(struct rte_eth_dev *dev, uint32_t queue,
@@ -84 +86 @@
-@@ -11165,7 +11169,6 @@ flow_hw_action_handle_create(struct rte_eth_dev *dev, uint32_t queue,
+@@ -10224,7 +10228,6 @@ flow_hw_action_handle_create(struct rte_eth_dev *dev, uint32_t queue,
@@ -92 +94 @@
-@@ -11257,7 +11260,7 @@ flow_hw_action_handle_update(struct rte_eth_dev *dev, uint32_t queue,
+@@ -10316,7 +10319,7 @@ flow_hw_action_handle_update(struct rte_eth_dev *dev, uint32_t queue,
@@ -101 +103 @@
-@@ -11339,7 +11342,7 @@ flow_hw_action_handle_destroy(struct rte_eth_dev *dev, uint32_t queue,
+@@ -10398,7 +10401,7 @@ flow_hw_action_handle_destroy(struct rte_eth_dev *dev, uint32_t queue,
@@ -110 +112 @@
-@@ -11663,7 +11666,7 @@ flow_hw_action_handle_query(struct rte_eth_dev *dev, uint32_t queue,
+@@ -10711,7 +10714,7 @@ flow_hw_action_handle_query(struct rte_eth_dev *dev, uint32_t queue,
@@ -119 +121 @@
-@@ -11717,7 +11720,7 @@ flow_hw_async_action_handle_query_update
+@@ -10765,7 +10768,7 @@ flow_hw_async_action_handle_query_update
@@ -128 +130 @@
-@@ -12397,7 +12400,7 @@ flow_hw_async_action_list_handle_create(struct rte_eth_dev *dev, uint32_t queue,
+@@ -11445,7 +11448,7 @@ flow_hw_async_action_list_handle_create(struct rte_eth_dev *dev, uint32_t queue,
@@ -137 +139 @@
-@@ -12417,7 +12420,6 @@ flow_hw_async_action_list_handle_create(struct rte_eth_dev *dev, uint32_t queue,
+@@ -11465,7 +11468,6 @@ flow_hw_async_action_list_handle_create(struct rte_eth_dev *dev, uint32_t queue,
@@ -145 +147 @@
-@@ -12462,7 +12464,7 @@ flow_hw_async_action_list_handle_destroy
+@@ -11510,7 +11512,7 @@ flow_hw_async_action_list_handle_destroy


More information about the stable mailing list