patch 'net/ice: fix Tx preparation' has been queued to stable release 20.11.10

luca.boccassi at gmail.com luca.boccassi at gmail.com
Wed Nov 8 20:25:35 CET 2023


Hi,

FYI, your patch has been queued to stable release 20.11.10

Note it hasn't been pushed to http://dpdk.org/browse/dpdk-stable yet.
It will be pushed if I get no objections before 11/10/23. So please
shout if anyone has objections.

Also note that after the patch there's a diff of the upstream commit vs the
patch applied to the branch. This will indicate if there was any rebasing
needed to apply to the stable branch. If there were code changes for rebasing
(ie: not only metadata diffs), please double check that the rebase was
correctly done.

Queued patches are on a temporary branch at:
https://github.com/bluca/dpdk-stable

This queued commit can be viewed at:
https://github.com/bluca/dpdk-stable/commit/632656a03fd9444fc40fa833f54d38184d167a9d

Thanks.

Luca Boccassi

---
>From 632656a03fd9444fc40fa833f54d38184d167a9d Mon Sep 17 00:00:00 2001
From: Qi Zhang <qi.z.zhang at intel.com>
Date: Thu, 2 Nov 2023 10:22:07 -0400
Subject: [PATCH] net/ice: fix Tx preparation

[ upstream commit 2f13ba5333b06589ba0e0e307dadcfaa95daf3dc ]

1. Check nb_segs > 8 for NO TSO case
2. Check nb_segs > Tx ring size for TSO case
3. report nb_mtu_seg_max and nb_seg_max in dev_info.

Fixes: 17c7d0f9d6a4 ("net/ice: support basic Rx/Tx")

Signed-off-by: Qi Zhang <qi.z.zhang at intel.com>
Acked-by: Qiming Yang <qiming.yang at intel.com>
---
 drivers/net/ice/ice_ethdev.c |  2 ++
 drivers/net/ice/ice_rxtx.c   | 18 ++++++++++++++++--
 drivers/net/ice/ice_rxtx.h   |  2 ++
 3 files changed, 20 insertions(+), 2 deletions(-)

diff --git a/drivers/net/ice/ice_ethdev.c b/drivers/net/ice/ice_ethdev.c
index 017059fc87..bcdac15604 100644
--- a/drivers/net/ice/ice_ethdev.c
+++ b/drivers/net/ice/ice_ethdev.c
@@ -3718,6 +3718,8 @@ ice_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
 		.nb_max = ICE_MAX_RING_DESC,
 		.nb_min = ICE_MIN_RING_DESC,
 		.nb_align = ICE_ALIGN_RING_DESC,
+		.nb_mtu_seg_max = ICE_TX_MTU_SEG_MAX,
+		.nb_seg_max = ICE_MAX_RING_DESC,
 	};
 
 	dev_info->speed_capa = ETH_LINK_SPEED_10M |
diff --git a/drivers/net/ice/ice_rxtx.c b/drivers/net/ice/ice_rxtx.c
index 603136fa91..848f7a9da5 100644
--- a/drivers/net/ice/ice_rxtx.c
+++ b/drivers/net/ice/ice_rxtx.c
@@ -3227,7 +3227,7 @@ ice_check_empty_mbuf(struct rte_mbuf *tx_pkt)
 }
 
 uint16_t
-ice_prep_pkts(__rte_unused void *tx_queue, struct rte_mbuf **tx_pkts,
+ice_prep_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
 	      uint16_t nb_pkts)
 {
 	int i, ret;
@@ -3238,9 +3238,23 @@ ice_prep_pkts(__rte_unused void *tx_queue, struct rte_mbuf **tx_pkts,
 		m = tx_pkts[i];
 		ol_flags = m->ol_flags;
 
-		if (ol_flags & PKT_TX_TCP_SEG &&
+		if (!(ol_flags & PKT_TX_TCP_SEG) &&
+		    /**
+		     * No TSO case: nb->segs, pkt_len to not exceed
+		     * the limites.
+		     */
+		    (m->nb_segs > ICE_TX_MTU_SEG_MAX ||
+		     m->pkt_len > ICE_FRAME_SIZE_MAX)) {
+			rte_errno = EINVAL;
+			return i;
+		} else if (ol_flags & PKT_TX_TCP_SEG &&
+		    /** TSO case: tso_segsz, nb_segs, pkt_len not exceed
+		     * the limits.
+		     */
 		    (m->tso_segsz < ICE_MIN_TSO_MSS ||
 		     m->tso_segsz > ICE_MAX_TSO_MSS ||
+		     m->nb_segs >
+			((struct ice_tx_queue *)tx_queue)->nb_tx_desc ||
 		     m->pkt_len > ICE_MAX_TSO_FRAME_SIZE)) {
 			/**
 			 * MSS outside the range are considered malicious
diff --git a/drivers/net/ice/ice_rxtx.h b/drivers/net/ice/ice_rxtx.h
index 02da387897..25d95f8515 100644
--- a/drivers/net/ice/ice_rxtx.h
+++ b/drivers/net/ice/ice_rxtx.h
@@ -45,6 +45,8 @@
 /* Max data buffer size must be 16K - 128 bytes */
 #define ICE_RX_MAX_DATA_BUF_SIZE	(16 * 1024 - 128)
 
+#define ICE_TX_MTU_SEG_MAX	8
+
 typedef void (*ice_rx_release_mbufs_t)(struct ice_rx_queue *rxq);
 typedef void (*ice_tx_release_mbufs_t)(struct ice_tx_queue *txq);
 typedef void (*ice_rxd_to_pkt_fields_t)(struct ice_rx_queue *rxq,
-- 
2.39.2

---
  Diff of the applied patch vs upstream commit (please double-check if non-empty:
---
--- -	2023-11-08 19:23:53.711442362 +0000
+++ 0037-net-ice-fix-Tx-preparation.patch	2023-11-08 19:23:51.841397700 +0000
@@ -1 +1 @@
-From 2f13ba5333b06589ba0e0e307dadcfaa95daf3dc Mon Sep 17 00:00:00 2001
+From 632656a03fd9444fc40fa833f54d38184d167a9d Mon Sep 17 00:00:00 2001
@@ -5,0 +6,2 @@
+[ upstream commit 2f13ba5333b06589ba0e0e307dadcfaa95daf3dc ]
+
@@ -11 +12,0 @@
-Cc: stable at dpdk.org
@@ -22 +23 @@
-index 6ef06b9926..3ccba4db80 100644
+index 017059fc87..bcdac15604 100644
@@ -25 +26 @@
-@@ -3918,6 +3918,8 @@ ice_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
+@@ -3718,6 +3718,8 @@ ice_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
@@ -33 +34 @@
- 	dev_info->speed_capa = RTE_ETH_LINK_SPEED_10M |
+ 	dev_info->speed_capa = ETH_LINK_SPEED_10M |
@@ -35 +36 @@
-index ee9cb7b955..73e47ae92d 100644
+index 603136fa91..848f7a9da5 100644
@@ -38 +39 @@
-@@ -3679,7 +3679,7 @@ ice_check_empty_mbuf(struct rte_mbuf *tx_pkt)
+@@ -3227,7 +3227,7 @@ ice_check_empty_mbuf(struct rte_mbuf *tx_pkt)
@@ -47 +48 @@
-@@ -3690,9 +3690,23 @@ ice_prep_pkts(__rte_unused void *tx_queue, struct rte_mbuf **tx_pkts,
+@@ -3238,9 +3238,23 @@ ice_prep_pkts(__rte_unused void *tx_queue, struct rte_mbuf **tx_pkts,
@@ -51,2 +52,2 @@
--		if (ol_flags & RTE_MBUF_F_TX_TCP_SEG &&
-+		if (!(ol_flags & RTE_MBUF_F_TX_TCP_SEG) &&
+-		if (ol_flags & PKT_TX_TCP_SEG &&
++		if (!(ol_flags & PKT_TX_TCP_SEG) &&
@@ -61 +62 @@
-+		} else if (ol_flags & RTE_MBUF_F_TX_TCP_SEG &&
++		} else if (ol_flags & PKT_TX_TCP_SEG &&
@@ -73 +74 @@
-index 268289716e..bd2c4abec9 100644
+index 02da387897..25d95f8515 100644
@@ -76,3 +77,3 @@
-@@ -56,6 +56,8 @@ extern int ice_timestamp_dynfield_offset;
- 
- #define ICE_HEADER_SPLIT_ENA   BIT(0)
+@@ -45,6 +45,8 @@
+ /* Max data buffer size must be 16K - 128 bytes */
+ #define ICE_RX_MAX_DATA_BUF_SIZE	(16 * 1024 - 128)


More information about the stable mailing list