[dpdk-stable] patch 'net/e1000: fix memzone leak on queue re-configure' has been queued to stable release 20.11.4

Xueming Li xuemingl at nvidia.com
Wed Nov 10 07:29:51 CET 2021


Hi,

FYI, your patch has been queued to stable release 20.11.4

Note it hasn't been pushed to http://dpdk.org/browse/dpdk-stable yet.
It will be pushed if I get no objections before 11/12/21. So please
shout if anyone has objections.

Also note that after the patch there's a diff of the upstream commit vs the
patch applied to the branch. This will indicate if there was any rebasing
needed to apply to the stable branch. If there were code changes for rebasing
(ie: not only metadata diffs), please double check that the rebase was
correctly done.

Queued patches are on a temporary branch at:
https://github.com/steevenlee/dpdk

This queued commit can be viewed at:
https://github.com/steevenlee/dpdk/commit/b51d3a497044316f72fc87a1707ab2033a998ea4

Thanks.

Xueming Li <xuemingl at nvidia.com>

---
>From b51d3a497044316f72fc87a1707ab2033a998ea4 Mon Sep 17 00:00:00 2001
From: Yunjian Wang <wangyunjian at huawei.com>
Date: Wed, 22 Sep 2021 21:28:20 +0800
Subject: [PATCH] net/e1000: fix memzone leak on queue re-configure
Cc: Xueming Li <xuemingl at nvidia.com>

[ upstream commit 09cbfa2da4268b4f789ffd34587365b51fa656b4 ]

Normally when closing the device the queue memzone should be
freed. But the memzone will be not freed, when device setup
ops like:

rte_eth_bond_slave_remove
-->__eth_bond_slave_remove_lock_free
---->slave_remove
------>rte_eth_dev_internal_reset
-------->rte_eth_dev_rx_queue_config
---------->eth_dev_rx_queue_config
------------>em_rx_queue_release
rte_eth_dev_close
-->eth_em_close
---->em_dev_free_queues
------>em_rx_queue_release
      (not been called due to nb_rx_queues and nb_tx_queues are 0)

And when queue number is changed to small size, the BIG memzone
queue index will be lost. This will lead to a memory leak. So we
should release the memzone when releasing queues.

Fixes: 460d1679586e ("drivers/net: delete HW rings while freeing queues")

Signed-off-by: Yunjian Wang <wangyunjian at huawei.com>
Acked-by: Haiyue Wang <haiyue.wang at intel.com>
---
 drivers/net/e1000/em_rxtx.c  | 8 ++++++--
 drivers/net/e1000/igb_rxtx.c | 9 +++++++--
 2 files changed, 13 insertions(+), 4 deletions(-)

diff --git a/drivers/net/e1000/em_rxtx.c b/drivers/net/e1000/em_rxtx.c
index 19e3bffd46..a7439c3aa7 100644
--- a/drivers/net/e1000/em_rxtx.c
+++ b/drivers/net/e1000/em_rxtx.c
@@ -104,6 +104,7 @@ struct em_rx_queue {
 	uint8_t             hthresh;    /**< Host threshold register. */
 	uint8_t             wthresh;    /**< Write-back threshold register. */
 	uint8_t             crc_len;    /**< 0 if CRC stripped, 4 otherwise. */
+	const struct rte_memzone *mz;
 };
 
 /**
@@ -173,6 +174,7 @@ struct em_tx_queue {
 	struct em_ctx_info ctx_cache;
 	/**< Hardware context history.*/
 	uint64_t	       offloads; /**< offloads of DEV_TX_OFFLOAD_* */
+	const struct rte_memzone *mz;
 };
 
 #if 1
@@ -1116,6 +1118,7 @@ em_tx_queue_release(struct em_tx_queue *txq)
 	if (txq != NULL) {
 		em_tx_queue_release_mbufs(txq);
 		rte_free(txq->sw_ring);
+		rte_memzone_free(txq->mz);
 		rte_free(txq);
 	}
 }
@@ -1286,6 +1289,7 @@ eth_em_tx_queue_setup(struct rte_eth_dev *dev,
 			RTE_CACHE_LINE_SIZE)) == NULL)
 		return -ENOMEM;
 
+	txq->mz = tz;
 	/* Allocate software ring */
 	if ((txq->sw_ring = rte_zmalloc("txq->sw_ring",
 			sizeof(txq->sw_ring[0]) * nb_desc,
@@ -1338,6 +1342,7 @@ em_rx_queue_release(struct em_rx_queue *rxq)
 	if (rxq != NULL) {
 		em_rx_queue_release_mbufs(rxq);
 		rte_free(rxq->sw_ring);
+		rte_memzone_free(rxq->mz);
 		rte_free(rxq);
 	}
 }
@@ -1452,6 +1457,7 @@ eth_em_rx_queue_setup(struct rte_eth_dev *dev,
 			RTE_CACHE_LINE_SIZE)) == NULL)
 		return -ENOMEM;
 
+	rxq->mz = rz;
 	/* Allocate software ring. */
 	if ((rxq->sw_ring = rte_zmalloc("rxq->sw_ring",
 			sizeof (rxq->sw_ring[0]) * nb_desc,
@@ -1611,14 +1617,12 @@ em_dev_free_queues(struct rte_eth_dev *dev)
 	for (i = 0; i < dev->data->nb_rx_queues; i++) {
 		eth_em_rx_queue_release(dev->data->rx_queues[i]);
 		dev->data->rx_queues[i] = NULL;
-		rte_eth_dma_zone_free(dev, "rx_ring", i);
 	}
 	dev->data->nb_rx_queues = 0;
 
 	for (i = 0; i < dev->data->nb_tx_queues; i++) {
 		eth_em_tx_queue_release(dev->data->tx_queues[i]);
 		dev->data->tx_queues[i] = NULL;
-		rte_eth_dma_zone_free(dev, "tx_ring", i);
 	}
 	dev->data->nb_tx_queues = 0;
 }
diff --git a/drivers/net/e1000/igb_rxtx.c b/drivers/net/e1000/igb_rxtx.c
index 3bf13d1420..620281232e 100644
--- a/drivers/net/e1000/igb_rxtx.c
+++ b/drivers/net/e1000/igb_rxtx.c
@@ -112,6 +112,7 @@ struct igb_rx_queue {
 	uint8_t             drop_en;  /**< If not 0, set SRRCTL.Drop_En. */
 	uint32_t            flags;      /**< RX flags. */
 	uint64_t	    offloads;   /**< offloads of DEV_RX_OFFLOAD_* */
+	const struct rte_memzone *mz;
 };
 
 /**
@@ -186,6 +187,7 @@ struct igb_tx_queue {
 	struct igb_advctx_info ctx_cache[IGB_CTX_NUM];
 	/**< Hardware context history.*/
 	uint64_t	       offloads; /**< offloads of DEV_TX_OFFLOAD_* */
+	const struct rte_memzone *mz;
 };
 
 #if 1
@@ -1276,6 +1278,7 @@ igb_tx_queue_release(struct igb_tx_queue *txq)
 	if (txq != NULL) {
 		igb_tx_queue_release_mbufs(txq);
 		rte_free(txq->sw_ring);
+		rte_memzone_free(txq->mz);
 		rte_free(txq);
 	}
 }
@@ -1545,6 +1548,7 @@ eth_igb_tx_queue_setup(struct rte_eth_dev *dev,
 		return -ENOMEM;
 	}
 
+	txq->mz = tz;
 	txq->nb_tx_desc = nb_desc;
 	txq->pthresh = tx_conf->tx_thresh.pthresh;
 	txq->hthresh = tx_conf->tx_thresh.hthresh;
@@ -1601,6 +1605,7 @@ igb_rx_queue_release(struct igb_rx_queue *rxq)
 	if (rxq != NULL) {
 		igb_rx_queue_release_mbufs(rxq);
 		rte_free(rxq->sw_ring);
+		rte_memzone_free(rxq->mz);
 		rte_free(rxq);
 	}
 }
@@ -1746,6 +1751,8 @@ eth_igb_rx_queue_setup(struct rte_eth_dev *dev,
 		igb_rx_queue_release(rxq);
 		return -ENOMEM;
 	}
+
+	rxq->mz = rz;
 	rxq->rdt_reg_addr = E1000_PCI_REG_ADDR(hw, E1000_RDT(rxq->reg_idx));
 	rxq->rdh_reg_addr = E1000_PCI_REG_ADDR(hw, E1000_RDH(rxq->reg_idx));
 	rxq->rx_ring_phys_addr = rz->iova;
@@ -1885,14 +1892,12 @@ igb_dev_free_queues(struct rte_eth_dev *dev)
 	for (i = 0; i < dev->data->nb_rx_queues; i++) {
 		eth_igb_rx_queue_release(dev->data->rx_queues[i]);
 		dev->data->rx_queues[i] = NULL;
-		rte_eth_dma_zone_free(dev, "rx_ring", i);
 	}
 	dev->data->nb_rx_queues = 0;
 
 	for (i = 0; i < dev->data->nb_tx_queues; i++) {
 		eth_igb_tx_queue_release(dev->data->tx_queues[i]);
 		dev->data->tx_queues[i] = NULL;
-		rte_eth_dma_zone_free(dev, "tx_ring", i);
 	}
 	dev->data->nb_tx_queues = 0;
 }
-- 
2.33.0

---
  Diff of the applied patch vs upstream commit (please double-check if non-empty:
---
--- -	2021-11-10 14:17:06.740388092 +0800
+++ 0107-net-e1000-fix-memzone-leak-on-queue-re-configure.patch	2021-11-10 14:17:01.867412785 +0800
@@ -1 +1 @@
-From 09cbfa2da4268b4f789ffd34587365b51fa656b4 Mon Sep 17 00:00:00 2001
+From b51d3a497044316f72fc87a1707ab2033a998ea4 Mon Sep 17 00:00:00 2001
@@ -4,0 +5,3 @@
+Cc: Xueming Li <xuemingl at nvidia.com>
+
+[ upstream commit 09cbfa2da4268b4f789ffd34587365b51fa656b4 ]
@@ -28 +30,0 @@
-Cc: stable at dpdk.org
@@ -38 +40 @@
-index 00a8af6d39..8542a15320 100644
+index 19e3bffd46..a7439c3aa7 100644
@@ -91 +93 @@
- 		eth_em_rx_queue_release(dev, i);
+ 		eth_em_rx_queue_release(dev->data->rx_queues[i]);
@@ -98 +100 @@
- 		eth_em_tx_queue_release(dev, i);
+ 		eth_em_tx_queue_release(dev->data->tx_queues[i]);
@@ -105 +107 @@
-index d97ca1a011..8d64d7397a 100644
+index 3bf13d1420..620281232e 100644
@@ -159 +161 @@
- 		eth_igb_rx_queue_release(dev, i);
+ 		eth_igb_rx_queue_release(dev->data->rx_queues[i]);
@@ -166 +168 @@
- 		eth_igb_tx_queue_release(dev, i);
+ 		eth_igb_tx_queue_release(dev->data->tx_queues[i]);


More information about the stable mailing list