[dpdk-stable] patch 'net/ice: fix memzone leak on queue re-configure' has been queued to stable release 20.11.4

Xueming Li xuemingl at nvidia.com
Wed Nov 10 07:29:52 CET 2021


Hi,

FYI, your patch has been queued to stable release 20.11.4

Note it hasn't been pushed to http://dpdk.org/browse/dpdk-stable yet.
It will be pushed if I get no objections before 11/12/21. So please
shout if anyone has objections.

Also note that after the patch there's a diff of the upstream commit vs the
patch applied to the branch. This will indicate if there was any rebasing
needed to apply to the stable branch. If there were code changes for rebasing
(ie: not only metadata diffs), please double check that the rebase was
correctly done.

Queued patches are on a temporary branch at:
https://github.com/steevenlee/dpdk

This queued commit can be viewed at:
https://github.com/steevenlee/dpdk/commit/8b56d276440b7e97f35858362ae766ac4ac4a012

Thanks.

Xueming Li <xuemingl at nvidia.com>

---
>From 8b56d276440b7e97f35858362ae766ac4ac4a012 Mon Sep 17 00:00:00 2001
From: Yunjian Wang <wangyunjian at huawei.com>
Date: Wed, 22 Sep 2021 21:28:36 +0800
Subject: [PATCH] net/ice: fix memzone leak on queue re-configure
Cc: Xueming Li <xuemingl at nvidia.com>

[ upstream commit d3778bf39a1dd36181b11459ad3936501ca17f4d ]

Normally when closing the device the queue memzone should be
freed. But the memzone will be not freed, when device setup
ops like:

rte_eth_bond_slave_remove
-->__eth_bond_slave_remove_lock_free
---->slave_remove
------>rte_eth_dev_internal_reset
-------->rte_eth_dev_rx_queue_config
---------->eth_dev_rx_queue_config
------------>ice_rx_queue_release
rte_eth_dev_close
-->ice_dev_close
---->ice_free_queues
------>ice_rx_queue_release
      (not been called due to nb_rx_queues and nb_tx_queues are 0)

And when queue number is changed to small size, the BIG memzone
queue index will be lost. This will lead to a memory leak. So we
should release the memzone when releasing queues.

Fixes: 460d1679586e ("drivers/net: delete HW rings while freeing queues")

Signed-off-by: Yunjian Wang <wangyunjian at huawei.com>
Acked-by: Haiyue Wang <haiyue.wang at intel.com>
---
 drivers/net/ice/ice_fdir_filter.c | 2 --
 drivers/net/ice/ice_rxtx.c        | 8 ++++++--
 drivers/net/ice/ice_rxtx.h        | 2 ++
 3 files changed, 8 insertions(+), 4 deletions(-)

diff --git a/drivers/net/ice/ice_fdir_filter.c b/drivers/net/ice/ice_fdir_filter.c
index bb720d1036..4a071254ce 100644
--- a/drivers/net/ice/ice_fdir_filter.c
+++ b/drivers/net/ice/ice_fdir_filter.c
@@ -659,10 +659,8 @@ ice_fdir_teardown(struct ice_pf *pf)
 
 	ice_tx_queue_release(pf->fdir.txq);
 	pf->fdir.txq = NULL;
-	rte_eth_dma_zone_free(eth_dev, "fdir_tx_ring", ICE_FDIR_QUEUE_ID);
 	ice_rx_queue_release(pf->fdir.rxq);
 	pf->fdir.rxq = NULL;
-	rte_eth_dma_zone_free(eth_dev, "fdir_rx_ring", ICE_FDIR_QUEUE_ID);
 	ice_fdir_prof_rm_all(pf);
 	ice_fdir_prof_free(hw);
 	ice_release_vsi(vsi);
diff --git a/drivers/net/ice/ice_rxtx.c b/drivers/net/ice/ice_rxtx.c
index 8ac9411083..906fca15b0 100644
--- a/drivers/net/ice/ice_rxtx.c
+++ b/drivers/net/ice/ice_rxtx.c
@@ -1096,6 +1096,7 @@ ice_rx_queue_setup(struct rte_eth_dev *dev,
 		return -ENOMEM;
 	}
 
+	rxq->mz = rz;
 	/* Zero all the descriptors in the ring. */
 	memset(rz->addr, 0, ring_size);
 
@@ -1151,6 +1152,7 @@ ice_rx_queue_release(void *rxq)
 
 	q->rx_rel_mbufs(q);
 	rte_free(q->sw_ring);
+	rte_memzone_free(q->mz);
 	rte_free(q);
 }
 
@@ -1297,6 +1299,7 @@ ice_tx_queue_setup(struct rte_eth_dev *dev,
 		return -ENOMEM;
 	}
 
+	txq->mz = tz;
 	txq->nb_tx_desc = nb_desc;
 	txq->tx_rs_thresh = tx_rs_thresh;
 	txq->tx_free_thresh = tx_free_thresh;
@@ -1347,6 +1350,7 @@ ice_tx_queue_release(void *txq)
 
 	q->tx_rel_mbufs(q);
 	rte_free(q->sw_ring);
+	rte_memzone_free(q->mz);
 	rte_free(q);
 }
 
@@ -2037,7 +2041,6 @@ ice_free_queues(struct rte_eth_dev *dev)
 			continue;
 		ice_rx_queue_release(dev->data->rx_queues[i]);
 		dev->data->rx_queues[i] = NULL;
-		rte_eth_dma_zone_free(dev, "rx_ring", i);
 	}
 	dev->data->nb_rx_queues = 0;
 
@@ -2046,7 +2049,6 @@ ice_free_queues(struct rte_eth_dev *dev)
 			continue;
 		ice_tx_queue_release(dev->data->tx_queues[i]);
 		dev->data->tx_queues[i] = NULL;
-		rte_eth_dma_zone_free(dev, "tx_ring", i);
 	}
 	dev->data->nb_tx_queues = 0;
 }
@@ -2093,6 +2095,7 @@ ice_fdir_setup_tx_resources(struct ice_pf *pf)
 		return -ENOMEM;
 	}
 
+	txq->mz = tz;
 	txq->nb_tx_desc = ICE_FDIR_NUM_TX_DESC;
 	txq->queue_id = ICE_FDIR_QUEUE_ID;
 	txq->reg_idx = pf->fdir.fdir_vsi->base_queue;
@@ -2151,6 +2154,7 @@ ice_fdir_setup_rx_resources(struct ice_pf *pf)
 		return -ENOMEM;
 	}
 
+	rxq->mz = rz;
 	rxq->nb_rx_desc = ICE_FDIR_NUM_RX_DESC;
 	rxq->queue_id = ICE_FDIR_QUEUE_ID;
 	rxq->reg_idx = pf->fdir.fdir_vsi->base_queue;
diff --git a/drivers/net/ice/ice_rxtx.h b/drivers/net/ice/ice_rxtx.h
index e9edb7bb29..4bea2bc2be 100644
--- a/drivers/net/ice/ice_rxtx.h
+++ b/drivers/net/ice/ice_rxtx.h
@@ -88,6 +88,7 @@ struct ice_rx_queue {
 	uint64_t xtr_ol_flag; /* Protocol extraction offload flag */
 	ice_rxd_to_pkt_fields_t rxd_to_pkt_fields; /* handle FlexiMD by RXDID */
 	ice_rx_release_mbufs_t rx_rel_mbufs;
+	const struct rte_memzone *mz;
 };
 
 struct ice_tx_entry {
@@ -132,6 +133,7 @@ struct ice_tx_queue {
 	bool tx_deferred_start; /* don't start this queue in dev start */
 	bool q_set; /* indicate if tx queue has been configured */
 	ice_tx_release_mbufs_t tx_rel_mbufs;
+	const struct rte_memzone *mz;
 };
 
 /* Offload features */
-- 
2.33.0

---
  Diff of the applied patch vs upstream commit (please double-check if non-empty:
---
--- -	2021-11-10 14:17:06.787205309 +0800
+++ 0108-net-ice-fix-memzone-leak-on-queue-re-configure.patch	2021-11-10 14:17:01.870746091 +0800
@@ -1 +1 @@
-From d3778bf39a1dd36181b11459ad3936501ca17f4d Mon Sep 17 00:00:00 2001
+From 8b56d276440b7e97f35858362ae766ac4ac4a012 Mon Sep 17 00:00:00 2001
@@ -4,0 +5,3 @@
+Cc: Xueming Li <xuemingl at nvidia.com>
+
+[ upstream commit d3778bf39a1dd36181b11459ad3936501ca17f4d ]
@@ -28 +30,0 @@
-Cc: stable at dpdk.org
@@ -39 +41 @@
-index e0cca7cb3c..afc956e0a2 100644
+index bb720d1036..4a071254ce 100644
@@ -42 +44 @@
-@@ -651,10 +651,8 @@ ice_fdir_teardown(struct ice_pf *pf)
+@@ -659,10 +659,8 @@ ice_fdir_teardown(struct ice_pf *pf)
@@ -54 +56 @@
-index 3f9de55a42..7a2220daa4 100644
+index 8ac9411083..906fca15b0 100644
@@ -57 +59 @@
-@@ -1152,6 +1152,7 @@ ice_rx_queue_setup(struct rte_eth_dev *dev,
+@@ -1096,6 +1096,7 @@ ice_rx_queue_setup(struct rte_eth_dev *dev,
@@ -65 +67 @@
-@@ -1207,6 +1208,7 @@ ice_rx_queue_release(void *rxq)
+@@ -1151,6 +1152,7 @@ ice_rx_queue_release(void *rxq)
@@ -73 +75 @@
-@@ -1353,6 +1355,7 @@ ice_tx_queue_setup(struct rte_eth_dev *dev,
+@@ -1297,6 +1299,7 @@ ice_tx_queue_setup(struct rte_eth_dev *dev,
@@ -81 +83 @@
-@@ -1415,6 +1418,7 @@ ice_tx_queue_release(void *txq)
+@@ -1347,6 +1350,7 @@ ice_tx_queue_release(void *txq)
@@ -89 +91 @@
-@@ -2156,7 +2160,6 @@ ice_free_queues(struct rte_eth_dev *dev)
+@@ -2037,7 +2041,6 @@ ice_free_queues(struct rte_eth_dev *dev)
@@ -97 +99 @@
-@@ -2165,7 +2168,6 @@ ice_free_queues(struct rte_eth_dev *dev)
+@@ -2046,7 +2049,6 @@ ice_free_queues(struct rte_eth_dev *dev)
@@ -105 +107 @@
-@@ -2212,6 +2214,7 @@ ice_fdir_setup_tx_resources(struct ice_pf *pf)
+@@ -2093,6 +2095,7 @@ ice_fdir_setup_tx_resources(struct ice_pf *pf)
@@ -113 +115 @@
-@@ -2270,6 +2273,7 @@ ice_fdir_setup_rx_resources(struct ice_pf *pf)
+@@ -2151,6 +2154,7 @@ ice_fdir_setup_rx_resources(struct ice_pf *pf)
@@ -122 +124 @@
-index 0a38740f45..c5ec6b7d1a 100644
+index e9edb7bb29..4bea2bc2be 100644
@@ -125 +127,3 @@
-@@ -93,6 +93,7 @@ struct ice_rx_queue {
+@@ -88,6 +88,7 @@ struct ice_rx_queue {
+ 	uint64_t xtr_ol_flag; /* Protocol extraction offload flag */
+ 	ice_rxd_to_pkt_fields_t rxd_to_pkt_fields; /* handle FlexiMD by RXDID */
@@ -127,2 +130,0 @@
- 	uint64_t offloads;
- 	uint32_t time_high;
@@ -133 +135 @@
-@@ -137,6 +138,7 @@ struct ice_tx_queue {
+@@ -132,6 +133,7 @@ struct ice_tx_queue {


More information about the stable mailing list