[dpdk-stable] patch 'net/mlx5: fix counter container usage' has been queued to stable release 19.11.3

luca.boccassi at gmail.com luca.boccassi at gmail.com
Tue May 19 15:03:17 CEST 2020


Hi,

FYI, your patch has been queued to stable release 19.11.3

Note it hasn't been pushed to http://dpdk.org/browse/dpdk-stable yet.
It will be pushed if I get no objections before 05/21/20. So please
shout if anyone has objections.

Also note that after the patch there's a diff of the upstream commit vs the
patch applied to the branch. This will indicate if there was any rebasing
needed to apply to the stable branch. If there were code changes for rebasing
(ie: not only metadata diffs), please double check that the rebase was
correctly done.

Thanks.

Luca Boccassi

---
>From 03527bd3937cbd3b00250749918d82774d39e9c9 Mon Sep 17 00:00:00 2001
From: Suanming Mou <suanmingm at mellanox.com>
Date: Tue, 7 Apr 2020 11:59:40 +0800
Subject: [PATCH] net/mlx5: fix counter container usage

[ upstream commit 92a0a3a13876789b1f5e3217b4a07e52b96dd7d0 ]

As none-batch counter pool allocates only one counter every time, after
the new allocated counter pop out, the pool will be empty and moved to
the end of the container list in the container.

Currently, the new non-batch counter allocation maybe happened with new
counter pool allocated, it means the new counter comes from a new pool.
While new pool is allocated, the container resize and switch happens.
In this case, after the pool becomes empty, it should be added to the
new container pool list as the pool belongs.

Update the container pointer accordingly with pool allocation to avoid
add the pool to the incorrect container.

Fixes: 5382d28c2110 ("net/mlx5: accelerate DV flow counter transactions")

Signed-off-by: Suanming Mou <suanmingm at mellanox.com>
Acked-by: Matan Azrad <matan at mellanox.com>
---
 drivers/net/mlx5/mlx5_flow_dv.c | 38 +++++++++++++++++++--------------
 1 file changed, 22 insertions(+), 16 deletions(-)

diff --git a/drivers/net/mlx5/mlx5_flow_dv.c b/drivers/net/mlx5/mlx5_flow_dv.c
index f0edb98c02..88793eb7a7 100644
--- a/drivers/net/mlx5/mlx5_flow_dv.c
+++ b/drivers/net/mlx5/mlx5_flow_dv.c
@@ -3947,11 +3947,13 @@ _flow_dv_query_count(struct rte_eth_dev *dev,
  *   The devX counter handle.
  * @param[in] batch
  *   Whether the pool is for counter that was allocated by batch command.
+ * @param[in/out] cont_cur
+ *   Pointer to the container pointer, it will be update in pool resize.
  *
  * @return
- *   A new pool pointer on success, NULL otherwise and rte_errno is set.
+ *   The pool container pointer on success, NULL otherwise and rte_errno is set.
  */
-static struct mlx5_flow_counter_pool *
+static struct mlx5_pools_container *
 flow_dv_pool_create(struct rte_eth_dev *dev, struct mlx5_devx_obj *dcs,
 		    uint32_t batch)
 {
@@ -3985,12 +3987,12 @@ flow_dv_pool_create(struct rte_eth_dev *dev, struct mlx5_devx_obj *dcs,
 	 */
 	rte_atomic64_set(&pool->query_gen, 0x2);
 	TAILQ_INIT(&pool->counters);
-	TAILQ_INSERT_TAIL(&cont->pool_list, pool, next);
+	TAILQ_INSERT_HEAD(&cont->pool_list, pool, next);
 	cont->pools[n_valid] = pool;
 	/* Pool initialization must be updated before host thread access. */
 	rte_cio_wmb();
 	rte_atomic16_add(&cont->n_valid, 1);
-	return pool;
+	return cont;
 }
 
 /**
@@ -4004,33 +4006,35 @@ flow_dv_pool_create(struct rte_eth_dev *dev, struct mlx5_devx_obj *dcs,
  *   Whether the pool is for counter that was allocated by batch command.
  *
  * @return
- *   The free counter pool pointer and @p cnt_free is set on success,
+ *   The counter container pointer and @p cnt_free is set on success,
  *   NULL otherwise and rte_errno is set.
  */
-static struct mlx5_flow_counter_pool *
+static struct mlx5_pools_container *
 flow_dv_counter_pool_prepare(struct rte_eth_dev *dev,
 			     struct mlx5_flow_counter **cnt_free,
 			     uint32_t batch)
 {
 	struct mlx5_priv *priv = dev->data->dev_private;
+	struct mlx5_pools_container *cont;
 	struct mlx5_flow_counter_pool *pool;
 	struct mlx5_devx_obj *dcs = NULL;
 	struct mlx5_flow_counter *cnt;
 	uint32_t i;
 
+	cont = MLX5_CNT_CONTAINER(priv->sh, batch, 0);
 	if (!batch) {
 		/* bulk_bitmap must be 0 for single counter allocation. */
 		dcs = mlx5_devx_cmd_flow_counter_alloc(priv->sh->ctx, 0);
 		if (!dcs)
 			return NULL;
-		pool = flow_dv_find_pool_by_id
-			(MLX5_CNT_CONTAINER(priv->sh, batch, 0), dcs->id);
+		pool = flow_dv_find_pool_by_id(cont, dcs->id);
 		if (!pool) {
-			pool = flow_dv_pool_create(dev, dcs, batch);
-			if (!pool) {
+			cont = flow_dv_pool_create(dev, dcs, batch);
+			if (!cont) {
 				mlx5_devx_cmd_destroy(dcs);
 				return NULL;
 			}
+			pool = TAILQ_FIRST(&cont->pool_list);
 		} else if (dcs->id < pool->min_dcs->id) {
 			rte_atomic64_set(&pool->a64_dcs,
 					 (int64_t)(uintptr_t)dcs);
@@ -4039,7 +4043,7 @@ flow_dv_counter_pool_prepare(struct rte_eth_dev *dev,
 		TAILQ_INSERT_HEAD(&pool->counters, cnt, next);
 		cnt->dcs = dcs;
 		*cnt_free = cnt;
-		return pool;
+		return cont;
 	}
 	/* bulk_bitmap is in 128 counters units. */
 	if (priv->config.hca_attr.flow_counter_bulk_alloc_bitmap & 0x4)
@@ -4048,18 +4052,19 @@ flow_dv_counter_pool_prepare(struct rte_eth_dev *dev,
 		rte_errno = ENODATA;
 		return NULL;
 	}
-	pool = flow_dv_pool_create(dev, dcs, batch);
-	if (!pool) {
+	cont = flow_dv_pool_create(dev, dcs, batch);
+	if (!cont) {
 		mlx5_devx_cmd_destroy(dcs);
 		return NULL;
 	}
+	pool = TAILQ_FIRST(&cont->pool_list);
 	for (i = 0; i < MLX5_COUNTERS_PER_POOL; ++i) {
 		cnt = &pool->counters_raw[i];
 		cnt->pool = pool;
 		TAILQ_INSERT_HEAD(&pool->counters, cnt, next);
 	}
 	*cnt_free = &pool->counters_raw[0];
-	return pool;
+	return cont;
 }
 
 /**
@@ -4160,9 +4165,10 @@ flow_dv_counter_alloc(struct rte_eth_dev *dev, uint32_t shared, uint32_t id,
 		cnt_free = NULL;
 	}
 	if (!cnt_free) {
-		pool = flow_dv_counter_pool_prepare(dev, &cnt_free, batch);
-		if (!pool)
+		cont = flow_dv_counter_pool_prepare(dev, &cnt_free, batch);
+		if (!cont)
 			return NULL;
+		pool = TAILQ_FIRST(&cont->pool_list);
 	}
 	cnt_free->batch = batch;
 	/* Create a DV counter action only in the first time usage. */
-- 
2.20.1

---
  Diff of the applied patch vs upstream commit (please double-check if non-empty:
---
--- -	2020-05-19 14:04:47.018109957 +0100
+++ 0062-net-mlx5-fix-counter-container-usage.patch	2020-05-19 14:04:44.216648373 +0100
@@ -1,8 +1,10 @@
-From 92a0a3a13876789b1f5e3217b4a07e52b96dd7d0 Mon Sep 17 00:00:00 2001
+From 03527bd3937cbd3b00250749918d82774d39e9c9 Mon Sep 17 00:00:00 2001
 From: Suanming Mou <suanmingm at mellanox.com>
 Date: Tue, 7 Apr 2020 11:59:40 +0800
 Subject: [PATCH] net/mlx5: fix counter container usage
 
+[ upstream commit 92a0a3a13876789b1f5e3217b4a07e52b96dd7d0 ]
+
 As none-batch counter pool allocates only one counter every time, after
 the new allocated counter pop out, the pool will be empty and moved to
 the end of the container list in the container.
@@ -17,7 +19,6 @@
 add the pool to the incorrect container.
 
 Fixes: 5382d28c2110 ("net/mlx5: accelerate DV flow counter transactions")
-Cc: stable at dpdk.org
 
 Signed-off-by: Suanming Mou <suanmingm at mellanox.com>
 Acked-by: Matan Azrad <matan at mellanox.com>
@@ -26,10 +27,10 @@
  1 file changed, 22 insertions(+), 16 deletions(-)
 
 diff --git a/drivers/net/mlx5/mlx5_flow_dv.c b/drivers/net/mlx5/mlx5_flow_dv.c
-index f5d98d267b..6a49bc9311 100644
+index f0edb98c02..88793eb7a7 100644
 --- a/drivers/net/mlx5/mlx5_flow_dv.c
 +++ b/drivers/net/mlx5/mlx5_flow_dv.c
-@@ -4153,11 +4153,13 @@ _flow_dv_query_count(struct rte_eth_dev *dev,
+@@ -3947,11 +3947,13 @@ _flow_dv_query_count(struct rte_eth_dev *dev,
   *   The devX counter handle.
   * @param[in] batch
   *   Whether the pool is for counter that was allocated by batch command.
@@ -45,7 +46,7 @@
  flow_dv_pool_create(struct rte_eth_dev *dev, struct mlx5_devx_obj *dcs,
  		    uint32_t batch)
  {
-@@ -4191,12 +4193,12 @@ flow_dv_pool_create(struct rte_eth_dev *dev, struct mlx5_devx_obj *dcs,
+@@ -3985,12 +3987,12 @@ flow_dv_pool_create(struct rte_eth_dev *dev, struct mlx5_devx_obj *dcs,
  	 */
  	rte_atomic64_set(&pool->query_gen, 0x2);
  	TAILQ_INIT(&pool->counters);
@@ -60,7 +61,7 @@
  }
  
  /**
-@@ -4210,33 +4212,35 @@ flow_dv_pool_create(struct rte_eth_dev *dev, struct mlx5_devx_obj *dcs,
+@@ -4004,33 +4006,35 @@ flow_dv_pool_create(struct rte_eth_dev *dev, struct mlx5_devx_obj *dcs,
   *   Whether the pool is for counter that was allocated by batch command.
   *
   * @return
@@ -102,7 +103,7 @@
  		} else if (dcs->id < pool->min_dcs->id) {
  			rte_atomic64_set(&pool->a64_dcs,
  					 (int64_t)(uintptr_t)dcs);
-@@ -4245,7 +4249,7 @@ flow_dv_counter_pool_prepare(struct rte_eth_dev *dev,
+@@ -4039,7 +4043,7 @@ flow_dv_counter_pool_prepare(struct rte_eth_dev *dev,
  		TAILQ_INSERT_HEAD(&pool->counters, cnt, next);
  		cnt->dcs = dcs;
  		*cnt_free = cnt;
@@ -111,7 +112,7 @@
  	}
  	/* bulk_bitmap is in 128 counters units. */
  	if (priv->config.hca_attr.flow_counter_bulk_alloc_bitmap & 0x4)
-@@ -4254,18 +4258,19 @@ flow_dv_counter_pool_prepare(struct rte_eth_dev *dev,
+@@ -4048,18 +4052,19 @@ flow_dv_counter_pool_prepare(struct rte_eth_dev *dev,
  		rte_errno = ENODATA;
  		return NULL;
  	}
@@ -134,7 +135,7 @@
  }
  
  /**
-@@ -4366,9 +4371,10 @@ flow_dv_counter_alloc(struct rte_eth_dev *dev, uint32_t shared, uint32_t id,
+@@ -4160,9 +4165,10 @@ flow_dv_counter_alloc(struct rte_eth_dev *dev, uint32_t shared, uint32_t id,
  		cnt_free = NULL;
  	}
  	if (!cnt_free) {


More information about the stable mailing list