patch 'net/iavf: fix mbuf release in multi-process' has been queued to stable release 21.11.2

Kevin Traynor ktraynor at redhat.com
Thu Jun 9 13:36:49 CEST 2022


Hi,

FYI, your patch has been queued to stable release 21.11.2

Note it hasn't been pushed to http://dpdk.org/browse/dpdk-stable yet.
It will be pushed if I get no objections before 06/13/22. So please
shout if anyone has objections.

Also note that after the patch there's a diff of the upstream commit vs the
patch applied to the branch. This will indicate if there was any rebasing
needed to apply to the stable branch. If there were code changes for rebasing
(ie: not only metadata diffs), please double check that the rebase was
correctly done.

Queued patches are on a temporary branch at:
https://github.com/kevintraynor/dpdk-stable

This queued commit can be viewed at:
https://github.com/kevintraynor/dpdk-stable/commit/6d3526ad40c2db79b895fd0d25649b4526bcf44d

Thanks.

Kevin

---
>From 6d3526ad40c2db79b895fd0d25649b4526bcf44d Mon Sep 17 00:00:00 2001
From: Ke Zhang <ke1x.zhang at intel.com>
Date: Thu, 19 May 2022 07:36:04 +0000
Subject: [PATCH] net/iavf: fix mbuf release in multi-process

[ upstream commit fced83c1229e0ad89f26d07fa7bd46b8767d9f5c ]

In the multiple process environment, the subprocess operates on the
shared memory and changes the function pointer of the main process,
resulting in the failure to find the address of the function when main
process releasing, resulting in crash.

Fixes: 319c421f3890 ("net/avf: enable SSE Rx Tx")

Signed-off-by: Ke Zhang <ke1x.zhang at intel.com>
Acked-by: Qi Zhang <qi.z.zhang at intel.com>
---
 drivers/net/iavf/iavf_rxtx.c            | 36 ++++++++++++++++---------
 drivers/net/iavf/iavf_rxtx.h            | 11 ++++++++
 drivers/net/iavf/iavf_rxtx_vec_avx512.c |  8 ++----
 drivers/net/iavf/iavf_rxtx_vec_sse.c    | 16 +++--------
 4 files changed, 41 insertions(+), 30 deletions(-)

diff --git a/drivers/net/iavf/iavf_rxtx.c b/drivers/net/iavf/iavf_rxtx.c
index 4c8007043a..0d380bf80e 100644
--- a/drivers/net/iavf/iavf_rxtx.c
+++ b/drivers/net/iavf/iavf_rxtx.c
@@ -364,10 +364,22 @@ release_txq_mbufs(struct iavf_tx_queue *txq)
 }
 
-static const struct iavf_rxq_ops def_rxq_ops = {
-	.release_mbufs = release_rxq_mbufs,
+static const
+struct iavf_rxq_ops iavf_rxq_release_mbufs_ops[] = {
+	[IAVF_REL_MBUFS_DEFAULT].release_mbufs = release_rxq_mbufs,
+#ifdef RTE_ARCH_X86
+	[IAVF_REL_MBUFS_SSE_VEC].release_mbufs = iavf_rx_queue_release_mbufs_sse,
+#endif
 };
 
-static const struct iavf_txq_ops def_txq_ops = {
-	.release_mbufs = release_txq_mbufs,
+static const
+struct iavf_txq_ops iavf_txq_release_mbufs_ops[] = {
+	[IAVF_REL_MBUFS_DEFAULT].release_mbufs = release_txq_mbufs,
+#ifdef RTE_ARCH_X86
+	[IAVF_REL_MBUFS_SSE_VEC].release_mbufs = iavf_tx_queue_release_mbufs_sse,
+#ifdef CC_AVX512_SUPPORT
+	[IAVF_REL_MBUFS_AVX512_VEC].release_mbufs = iavf_tx_queue_release_mbufs_avx512,
+#endif
+#endif
+
 };
 
@@ -676,5 +688,5 @@ iavf_dev_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
 	dev->data->rx_queues[queue_idx] = rxq;
 	rxq->qrx_tail = hw->hw_addr + IAVF_QRX_TAIL1(rxq->queue_id);
-	rxq->ops = &def_rxq_ops;
+	rxq->rel_mbufs_type = IAVF_REL_MBUFS_DEFAULT;
 
 	if (check_rx_bulk_allow(rxq) == true) {
@@ -813,5 +825,5 @@ iavf_dev_tx_queue_setup(struct rte_eth_dev *dev,
 	dev->data->tx_queues[queue_idx] = txq;
 	txq->qtx_tail = hw->hw_addr + IAVF_QTX_TAIL1(queue_idx);
-	txq->ops = &def_txq_ops;
+	txq->rel_mbufs_type = IAVF_REL_MBUFS_DEFAULT;
 
 	if (check_tx_vec_allow(txq) == false) {
@@ -945,5 +957,5 @@ iavf_dev_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id)
 
 	rxq = dev->data->rx_queues[rx_queue_id];
-	rxq->ops->release_mbufs(rxq);
+	iavf_rxq_release_mbufs_ops[rxq->rel_mbufs_type].release_mbufs(rxq);
 	reset_rx_queue(rxq);
 	dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
@@ -973,5 +985,5 @@ iavf_dev_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id)
 
 	txq = dev->data->tx_queues[tx_queue_id];
-	txq->ops->release_mbufs(txq);
+	iavf_txq_release_mbufs_ops[txq->rel_mbufs_type].release_mbufs(txq);
 	reset_tx_queue(txq);
 	dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
@@ -988,5 +1000,5 @@ iavf_dev_rx_queue_release(struct rte_eth_dev *dev, uint16_t qid)
 		return;
 
-	q->ops->release_mbufs(q);
+	iavf_rxq_release_mbufs_ops[q->rel_mbufs_type].release_mbufs(q);
 	rte_free(q->sw_ring);
 	rte_memzone_free(q->mz);
@@ -1002,5 +1014,5 @@ iavf_dev_tx_queue_release(struct rte_eth_dev *dev, uint16_t qid)
 		return;
 
-	q->ops->release_mbufs(q);
+	iavf_txq_release_mbufs_ops[q->rel_mbufs_type].release_mbufs(q);
 	rte_free(q->sw_ring);
 	rte_memzone_free(q->mz);
@@ -1036,5 +1048,5 @@ iavf_stop_queues(struct rte_eth_dev *dev)
 		if (!txq)
 			continue;
-		txq->ops->release_mbufs(txq);
+		iavf_txq_release_mbufs_ops[txq->rel_mbufs_type].release_mbufs(txq);
 		reset_tx_queue(txq);
 		dev->data->tx_queue_state[i] = RTE_ETH_QUEUE_STATE_STOPPED;
@@ -1044,5 +1056,5 @@ iavf_stop_queues(struct rte_eth_dev *dev)
 		if (!rxq)
 			continue;
-		rxq->ops->release_mbufs(rxq);
+		iavf_rxq_release_mbufs_ops[rxq->rel_mbufs_type].release_mbufs(rxq);
 		reset_rx_queue(rxq);
 		dev->data->rx_queue_state[i] = RTE_ETH_QUEUE_STATE_STOPPED;
diff --git a/drivers/net/iavf/iavf_rxtx.h b/drivers/net/iavf/iavf_rxtx.h
index bf8aebbce8..48cc0da6f5 100644
--- a/drivers/net/iavf/iavf_rxtx.h
+++ b/drivers/net/iavf/iavf_rxtx.h
@@ -188,4 +188,5 @@ struct iavf_rx_queue {
 	struct rte_mbuf fake_mbuf;      /* dummy mbuf */
 	uint8_t rxdid;
+	uint8_t rel_mbufs_type;
 
 	/* used for VPMD */
@@ -247,4 +248,5 @@ struct iavf_tx_queue {
 	uint16_t free_thresh;
 	uint16_t rs_thresh;
+	uint8_t rel_mbufs_type;
 
 	uint16_t port_id;
@@ -390,4 +392,10 @@ struct iavf_32b_rx_flex_desc_comms_ipsec {
 };
 
+enum iavf_rxtx_rel_mbufs_type {
+	IAVF_REL_MBUFS_DEFAULT		= 0,
+	IAVF_REL_MBUFS_SSE_VEC		= 1,
+	IAVF_REL_MBUFS_AVX512_VEC	= 2,
+};
+
 /* Receive Flex Descriptor profile IDs: There are a total
  * of 64 profiles where profile IDs 0/1 are for legacy; and
@@ -693,4 +701,7 @@ uint8_t iavf_proto_xtr_type_to_rxdid(uint8_t xtr_type);
 
 void iavf_set_default_ptype_table(struct rte_eth_dev *dev);
+void iavf_tx_queue_release_mbufs_avx512(struct iavf_tx_queue *txq);
+void iavf_rx_queue_release_mbufs_sse(struct iavf_rx_queue *rxq);
+void iavf_tx_queue_release_mbufs_sse(struct iavf_tx_queue *txq);
 
 static inline
diff --git a/drivers/net/iavf/iavf_rxtx_vec_avx512.c b/drivers/net/iavf/iavf_rxtx_vec_avx512.c
index 6ff38ac368..c975a5e7d7 100644
--- a/drivers/net/iavf/iavf_rxtx_vec_avx512.c
+++ b/drivers/net/iavf/iavf_rxtx_vec_avx512.c
@@ -1995,5 +1995,5 @@ iavf_xmit_pkts_vec_avx512(void *tx_queue, struct rte_mbuf **tx_pkts,
 }
 
-static inline void
+void __rte_cold
 iavf_tx_queue_release_mbufs_avx512(struct iavf_tx_queue *txq)
 {
@@ -2015,12 +2015,8 @@ iavf_tx_queue_release_mbufs_avx512(struct iavf_tx_queue *txq)
 }
 
-static const struct iavf_txq_ops avx512_vec_txq_ops = {
-	.release_mbufs = iavf_tx_queue_release_mbufs_avx512,
-};
-
 int __rte_cold
 iavf_txq_vec_setup_avx512(struct iavf_tx_queue *txq)
 {
-	txq->ops = &avx512_vec_txq_ops;
+	txq->rel_mbufs_type = IAVF_REL_MBUFS_AVX512_VEC;
 	return 0;
 }
diff --git a/drivers/net/iavf/iavf_rxtx_vec_sse.c b/drivers/net/iavf/iavf_rxtx_vec_sse.c
index d582a36326..4b23ca8d82 100644
--- a/drivers/net/iavf/iavf_rxtx_vec_sse.c
+++ b/drivers/net/iavf/iavf_rxtx_vec_sse.c
@@ -1201,5 +1201,5 @@ iavf_xmit_pkts_vec(void *tx_queue, struct rte_mbuf **tx_pkts,
 }
 
-static void __rte_cold
+void __rte_cold
 iavf_rx_queue_release_mbufs_sse(struct iavf_rx_queue *rxq)
 {
@@ -1207,5 +1207,5 @@ iavf_rx_queue_release_mbufs_sse(struct iavf_rx_queue *rxq)
 }
 
-static void __rte_cold
+void __rte_cold
 iavf_tx_queue_release_mbufs_sse(struct iavf_tx_queue *txq)
 {
@@ -1213,16 +1213,8 @@ iavf_tx_queue_release_mbufs_sse(struct iavf_tx_queue *txq)
 }
 
-static const struct iavf_rxq_ops sse_vec_rxq_ops = {
-	.release_mbufs = iavf_rx_queue_release_mbufs_sse,
-};
-
-static const struct iavf_txq_ops sse_vec_txq_ops = {
-	.release_mbufs = iavf_tx_queue_release_mbufs_sse,
-};
-
 int __rte_cold
 iavf_txq_vec_setup(struct iavf_tx_queue *txq)
 {
-	txq->ops = &sse_vec_txq_ops;
+	txq->rel_mbufs_type = IAVF_REL_MBUFS_SSE_VEC;
 	return 0;
 }
@@ -1231,5 +1223,5 @@ int __rte_cold
 iavf_rxq_vec_setup(struct iavf_rx_queue *rxq)
 {
-	rxq->ops = &sse_vec_rxq_ops;
+	rxq->rel_mbufs_type = IAVF_REL_MBUFS_SSE_VEC;
 	return iavf_rxq_vec_setup_default(rxq);
 }
-- 
2.34.3

---
  Diff of the applied patch vs upstream commit (please double-check if non-empty:
---
--- -	2022-06-09 12:34:31.190059316 +0100
+++ 0062-net-iavf-fix-mbuf-release-in-multi-process.patch	2022-06-09 12:34:29.801980736 +0100
@@ -1 +1 @@
-From fced83c1229e0ad89f26d07fa7bd46b8767d9f5c Mon Sep 17 00:00:00 2001
+From 6d3526ad40c2db79b895fd0d25649b4526bcf44d Mon Sep 17 00:00:00 2001
@@ -5,0 +6,2 @@
+[ upstream commit fced83c1229e0ad89f26d07fa7bd46b8767d9f5c ]
+
@@ -12 +13,0 @@
-Cc: stable at dpdk.org
@@ -24 +25 @@
-index 73e4960257..ff0c98ffc3 100644
+index 4c8007043a..0d380bf80e 100644
@@ -27 +28 @@
-@@ -363,10 +363,22 @@ release_txq_mbufs(struct iavf_tx_queue *txq)
+@@ -364,10 +364,22 @@ release_txq_mbufs(struct iavf_tx_queue *txq)
@@ -54 +55 @@
-@@ -682,5 +694,5 @@ iavf_dev_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
+@@ -676,5 +688,5 @@ iavf_dev_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
@@ -61 +62 @@
-@@ -822,5 +834,5 @@ iavf_dev_tx_queue_setup(struct rte_eth_dev *dev,
+@@ -813,5 +825,5 @@ iavf_dev_tx_queue_setup(struct rte_eth_dev *dev,
@@ -68 +69 @@
-@@ -954,5 +966,5 @@ iavf_dev_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id)
+@@ -945,5 +957,5 @@ iavf_dev_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id)
@@ -75 +76 @@
-@@ -982,5 +994,5 @@ iavf_dev_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id)
+@@ -973,5 +985,5 @@ iavf_dev_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id)
@@ -82 +83 @@
-@@ -997,5 +1009,5 @@ iavf_dev_rx_queue_release(struct rte_eth_dev *dev, uint16_t qid)
+@@ -988,5 +1000,5 @@ iavf_dev_rx_queue_release(struct rte_eth_dev *dev, uint16_t qid)
@@ -89 +90 @@
-@@ -1011,5 +1023,5 @@ iavf_dev_tx_queue_release(struct rte_eth_dev *dev, uint16_t qid)
+@@ -1002,5 +1014,5 @@ iavf_dev_tx_queue_release(struct rte_eth_dev *dev, uint16_t qid)
@@ -96 +97 @@
-@@ -1045,5 +1057,5 @@ iavf_stop_queues(struct rte_eth_dev *dev)
+@@ -1036,5 +1048,5 @@ iavf_stop_queues(struct rte_eth_dev *dev)
@@ -103 +104 @@
-@@ -1053,5 +1065,5 @@ iavf_stop_queues(struct rte_eth_dev *dev)
+@@ -1044,5 +1056,5 @@ iavf_stop_queues(struct rte_eth_dev *dev)
@@ -111 +112 @@
-index 642b9a700a..e8362bbd1d 100644
+index bf8aebbce8..48cc0da6f5 100644
@@ -114 +115 @@
-@@ -191,4 +191,5 @@ struct iavf_rx_queue {
+@@ -188,4 +188,5 @@ struct iavf_rx_queue {
@@ -120 +121 @@
-@@ -250,4 +251,5 @@ struct iavf_tx_queue {
+@@ -247,4 +248,5 @@ struct iavf_tx_queue {
@@ -126 +127 @@
-@@ -393,4 +395,10 @@ struct iavf_32b_rx_flex_desc_comms_ipsec {
+@@ -390,4 +392,10 @@ struct iavf_32b_rx_flex_desc_comms_ipsec {
@@ -137 +138 @@
-@@ -696,4 +704,7 @@ uint8_t iavf_proto_xtr_type_to_rxdid(uint8_t xtr_type);
+@@ -693,4 +701,7 @@ uint8_t iavf_proto_xtr_type_to_rxdid(uint8_t xtr_type);
@@ -146 +147 @@
-index 7319d4cb65..3bfec63851 100644
+index 6ff38ac368..c975a5e7d7 100644
@@ -149 +150 @@
-@@ -1993,5 +1993,5 @@ iavf_xmit_pkts_vec_avx512(void *tx_queue, struct rte_mbuf **tx_pkts,
+@@ -1995,5 +1995,5 @@ iavf_xmit_pkts_vec_avx512(void *tx_queue, struct rte_mbuf **tx_pkts,
@@ -156 +157 @@
-@@ -2013,12 +2013,8 @@ iavf_tx_queue_release_mbufs_avx512(struct iavf_tx_queue *txq)
+@@ -2015,12 +2015,8 @@ iavf_tx_queue_release_mbufs_avx512(struct iavf_tx_queue *txq)
@@ -171 +172 @@
-index 717a227b2c..4a5232c1d2 100644
+index d582a36326..4b23ca8d82 100644
@@ -174 +175 @@
-@@ -1199,5 +1199,5 @@ iavf_xmit_pkts_vec(void *tx_queue, struct rte_mbuf **tx_pkts,
+@@ -1201,5 +1201,5 @@ iavf_xmit_pkts_vec(void *tx_queue, struct rte_mbuf **tx_pkts,
@@ -181 +182 @@
-@@ -1205,5 +1205,5 @@ iavf_rx_queue_release_mbufs_sse(struct iavf_rx_queue *rxq)
+@@ -1207,5 +1207,5 @@ iavf_rx_queue_release_mbufs_sse(struct iavf_rx_queue *rxq)
@@ -188 +189 @@
-@@ -1211,16 +1211,8 @@ iavf_tx_queue_release_mbufs_sse(struct iavf_tx_queue *txq)
+@@ -1213,16 +1213,8 @@ iavf_tx_queue_release_mbufs_sse(struct iavf_tx_queue *txq)
@@ -206 +207 @@
-@@ -1229,5 +1221,5 @@ int __rte_cold
+@@ -1231,5 +1223,5 @@ int __rte_cold



More information about the stable mailing list