[dpdk-stable] patch 'net/ice: fix function pointer in multi-process' has been queued to stable release 20.11.4

Xueming Li xuemingl at nvidia.com
Wed Nov 10 07:31:35 CET 2021


Hi,

FYI, your patch has been queued to stable release 20.11.4

Note it hasn't been pushed to http://dpdk.org/browse/dpdk-stable yet.
It will be pushed if I get no objections before 11/12/21. So please
shout if anyone has objections.

Also note that after the patch there's a diff of the upstream commit vs the
patch applied to the branch. This will indicate if there was any rebasing
needed to apply to the stable branch. If there were code changes for rebasing
(ie: not only metadata diffs), please double check that the rebase was
correctly done.

Queued patches are on a temporary branch at:
https://github.com/steevenlee/dpdk

This queued commit can be viewed at:
https://github.com/steevenlee/dpdk/commit/62ad28d888e291d10a59c4218bc4d2deeeb96c5a

Thanks.

Xueming Li <xuemingl at nvidia.com>

---
>From 62ad28d888e291d10a59c4218bc4d2deeeb96c5a Mon Sep 17 00:00:00 2001
From: Dapeng Yu <dapengx.yu at intel.com>
Date: Tue, 26 Oct 2021 09:55:42 +0800
Subject: [PATCH] net/ice: fix function pointer in multi-process
Cc: Xueming Li <xuemingl at nvidia.com>

[ upstream commit 20b631efe785819eb77aabbf500b3352e5731bdb ]

This patch uses the index value to call the function, instead of the
function pointer assignment to save the selection of Receive Flex
Descriptor profile ID.

Otherwise the secondary process will run with wrong function address
from primary process.

Fixes: 7a340b0b4e03 ("net/ice: refactor Rx FlexiMD handling")

Signed-off-by: Dapeng Yu <dapengx.yu at intel.com>
Acked-by: Haiyue Wang <haiyue.wang at intel.com>
---
 drivers/net/ice/ice_rxtx.c | 32 ++++++++++++++++++--------------
 drivers/net/ice/ice_rxtx.h |  2 +-
 2 files changed, 19 insertions(+), 15 deletions(-)

diff --git a/drivers/net/ice/ice_rxtx.c b/drivers/net/ice/ice_rxtx.c
index 906fca15b0..e16ec5be64 100644
--- a/drivers/net/ice/ice_rxtx.c
+++ b/drivers/net/ice/ice_rxtx.c
@@ -170,51 +170,55 @@ ice_rxd_to_pkt_fields_by_comms_aux_v2(struct ice_rx_queue *rxq,
 #endif
 }
 
+static const ice_rxd_to_pkt_fields_t rxd_to_pkt_fields_ops[] = {
+	[ICE_RXDID_COMMS_AUX_VLAN] = ice_rxd_to_pkt_fields_by_comms_aux_v1,
+	[ICE_RXDID_COMMS_AUX_IPV4] = ice_rxd_to_pkt_fields_by_comms_aux_v1,
+	[ICE_RXDID_COMMS_AUX_IPV6] = ice_rxd_to_pkt_fields_by_comms_aux_v1,
+	[ICE_RXDID_COMMS_AUX_IPV6_FLOW] = ice_rxd_to_pkt_fields_by_comms_aux_v1,
+	[ICE_RXDID_COMMS_AUX_TCP] = ice_rxd_to_pkt_fields_by_comms_aux_v1,
+	[ICE_RXDID_COMMS_AUX_IP_OFFSET] = ice_rxd_to_pkt_fields_by_comms_aux_v2,
+	[ICE_RXDID_COMMS_GENERIC] = ice_rxd_to_pkt_fields_by_comms_generic,
+	[ICE_RXDID_COMMS_OVS] = ice_rxd_to_pkt_fields_by_comms_ovs,
+};
+
 void
 ice_select_rxd_to_pkt_fields_handler(struct ice_rx_queue *rxq, uint32_t rxdid)
 {
+	rxq->rxdid = rxdid;
+
 	switch (rxdid) {
 	case ICE_RXDID_COMMS_AUX_VLAN:
 		rxq->xtr_ol_flag = rte_net_ice_dynflag_proto_xtr_vlan_mask;
-		rxq->rxd_to_pkt_fields = ice_rxd_to_pkt_fields_by_comms_aux_v1;
 		break;
 
 	case ICE_RXDID_COMMS_AUX_IPV4:
 		rxq->xtr_ol_flag = rte_net_ice_dynflag_proto_xtr_ipv4_mask;
-		rxq->rxd_to_pkt_fields = ice_rxd_to_pkt_fields_by_comms_aux_v1;
 		break;
 
 	case ICE_RXDID_COMMS_AUX_IPV6:
 		rxq->xtr_ol_flag = rte_net_ice_dynflag_proto_xtr_ipv6_mask;
-		rxq->rxd_to_pkt_fields = ice_rxd_to_pkt_fields_by_comms_aux_v1;
 		break;
 
 	case ICE_RXDID_COMMS_AUX_IPV6_FLOW:
 		rxq->xtr_ol_flag = rte_net_ice_dynflag_proto_xtr_ipv6_flow_mask;
-		rxq->rxd_to_pkt_fields = ice_rxd_to_pkt_fields_by_comms_aux_v1;
 		break;
 
 	case ICE_RXDID_COMMS_AUX_TCP:
 		rxq->xtr_ol_flag = rte_net_ice_dynflag_proto_xtr_tcp_mask;
-		rxq->rxd_to_pkt_fields = ice_rxd_to_pkt_fields_by_comms_aux_v1;
 		break;
 
 	case ICE_RXDID_COMMS_AUX_IP_OFFSET:
 		rxq->xtr_ol_flag = rte_net_ice_dynflag_proto_xtr_ip_offset_mask;
-		rxq->rxd_to_pkt_fields = ice_rxd_to_pkt_fields_by_comms_aux_v2;
 		break;
 
 	case ICE_RXDID_COMMS_GENERIC:
-		rxq->rxd_to_pkt_fields = ice_rxd_to_pkt_fields_by_comms_generic;
-		break;
-
+		/* fallthrough */
 	case ICE_RXDID_COMMS_OVS:
-		rxq->rxd_to_pkt_fields = ice_rxd_to_pkt_fields_by_comms_ovs;
 		break;
 
 	default:
 		/* update this according to the RXDID for PROTO_XTR_NONE */
-		rxq->rxd_to_pkt_fields = ice_rxd_to_pkt_fields_by_comms_ovs;
+		rxq->rxdid = ICE_RXDID_COMMS_OVS;
 		break;
 	}
 
@@ -1552,7 +1556,7 @@ ice_rx_scan_hw_ring(struct ice_rx_queue *rxq)
 			mb->packet_type = ptype_tbl[ICE_RX_FLEX_DESC_PTYPE_M &
 				rte_le_to_cpu_16(rxdp[j].wb.ptype_flex_flags0)];
 			ice_rxd_to_vlan_tci(mb, &rxdp[j]);
-			rxq->rxd_to_pkt_fields(rxq, mb, &rxdp[j]);
+			rxd_to_pkt_fields_ops[rxq->rxdid](rxq, mb, &rxdp[j]);
 
 			mb->ol_flags |= pkt_flags;
 		}
@@ -1845,7 +1849,7 @@ ice_recv_scattered_pkts(void *rx_queue,
 		first_seg->packet_type = ptype_tbl[ICE_RX_FLEX_DESC_PTYPE_M &
 			rte_le_to_cpu_16(rxd.wb.ptype_flex_flags0)];
 		ice_rxd_to_vlan_tci(first_seg, &rxd);
-		rxq->rxd_to_pkt_fields(rxq, first_seg, &rxd);
+		rxd_to_pkt_fields_ops[rxq->rxdid](rxq, first_seg, &rxd);
 		pkt_flags = ice_rxd_error_to_pkt_flags(rx_stat_err0);
 		first_seg->ol_flags |= pkt_flags;
 		/* Prefetch data of first segment, if configured to do so. */
@@ -2247,7 +2251,7 @@ ice_recv_pkts(void *rx_queue,
 		rxm->packet_type = ptype_tbl[ICE_RX_FLEX_DESC_PTYPE_M &
 			rte_le_to_cpu_16(rxd.wb.ptype_flex_flags0)];
 		ice_rxd_to_vlan_tci(rxm, &rxd);
-		rxq->rxd_to_pkt_fields(rxq, rxm, &rxd);
+		rxd_to_pkt_fields_ops[rxq->rxdid](rxq, rxm, &rxd);
 		pkt_flags = ice_rxd_error_to_pkt_flags(rx_stat_err0);
 		rxm->ol_flags |= pkt_flags;
 		/* copy old mbuf to rx_pkts */
diff --git a/drivers/net/ice/ice_rxtx.h b/drivers/net/ice/ice_rxtx.h
index 4bea2bc2be..a74c4b3492 100644
--- a/drivers/net/ice/ice_rxtx.h
+++ b/drivers/net/ice/ice_rxtx.h
@@ -86,7 +86,7 @@ struct ice_rx_queue {
 	bool rx_deferred_start; /* don't start this queue in dev start */
 	uint8_t proto_xtr; /* Protocol extraction from flexible descriptor */
 	uint64_t xtr_ol_flag; /* Protocol extraction offload flag */
-	ice_rxd_to_pkt_fields_t rxd_to_pkt_fields; /* handle FlexiMD by RXDID */
+	uint32_t rxdid; /* Receive Flex Descriptor profile ID */
 	ice_rx_release_mbufs_t rx_rel_mbufs;
 	const struct rte_memzone *mz;
 };
-- 
2.33.0

---
  Diff of the applied patch vs upstream commit (please double-check if non-empty:
---
--- -	2021-11-10 14:17:11.228558485 +0800
+++ 0211-net-ice-fix-function-pointer-in-multi-process.patch	2021-11-10 14:17:02.020744864 +0800
@@ -1 +1 @@
-From 20b631efe785819eb77aabbf500b3352e5731bdb Mon Sep 17 00:00:00 2001
+From 62ad28d888e291d10a59c4218bc4d2deeeb96c5a Mon Sep 17 00:00:00 2001
@@ -4,0 +5,3 @@
+Cc: Xueming Li <xuemingl at nvidia.com>
+
+[ upstream commit 20b631efe785819eb77aabbf500b3352e5731bdb ]
@@ -14 +16,0 @@
-Cc: stable at dpdk.org
@@ -24 +26 @@
-index e7217661dd..c3cad2fbbb 100644
+index 906fca15b0..e16ec5be64 100644
@@ -27 +29 @@
-@@ -204,51 +204,55 @@ ice_rxd_to_pkt_fields_by_comms_aux_v2(struct ice_rx_queue *rxq,
+@@ -170,51 +170,55 @@ ice_rxd_to_pkt_fields_by_comms_aux_v2(struct ice_rx_queue *rxq,
@@ -94 +96 @@
-@@ -1608,7 +1612,7 @@ ice_rx_scan_hw_ring(struct ice_rx_queue *rxq)
+@@ -1552,7 +1556,7 @@ ice_rx_scan_hw_ring(struct ice_rx_queue *rxq)
@@ -100,4 +102,4 @@
- #ifndef RTE_LIBRTE_ICE_16BYTE_RX_DESC
- 			if (rxq->offloads & RTE_ETH_RX_OFFLOAD_TIMESTAMP) {
- 				ts_ns = ice_tstamp_convert_32b_64b(hw,
-@@ -1925,7 +1929,7 @@ ice_recv_scattered_pkts(void *rx_queue,
+ 
+ 			mb->ol_flags |= pkt_flags;
+ 		}
+@@ -1845,7 +1849,7 @@ ice_recv_scattered_pkts(void *rx_queue,
@@ -110,3 +112,3 @@
- #ifndef RTE_LIBRTE_ICE_16BYTE_RX_DESC
- 		if (rxq->offloads & RTE_ETH_RX_OFFLOAD_TIMESTAMP) {
-@@ -2356,7 +2360,7 @@ ice_recv_pkts(void *rx_queue,
+ 		first_seg->ol_flags |= pkt_flags;
+ 		/* Prefetch data of first segment, if configured to do so. */
+@@ -2247,7 +2251,7 @@ ice_recv_pkts(void *rx_queue,
@@ -119,2 +121,2 @@
- #ifndef RTE_LIBRTE_ICE_16BYTE_RX_DESC
- 		if (rxq->offloads & RTE_ETH_RX_OFFLOAD_TIMESTAMP) {
+ 		rxm->ol_flags |= pkt_flags;
+ 		/* copy old mbuf to rx_pkts */
@@ -122 +124 @@
-index e1c644fb63..146dc1f95d 100644
+index 4bea2bc2be..a74c4b3492 100644
@@ -125 +127 @@
-@@ -89,7 +89,7 @@ struct ice_rx_queue {
+@@ -86,7 +86,7 @@ struct ice_rx_queue {
@@ -132,2 +134,2 @@
- 	uint64_t offloads;
- 	uint32_t time_high;
+ 	const struct rte_memzone *mz;
+ };


More information about the stable mailing list