[dpdk-stable] patch 'net/ice: fix Tx when TSO is enabled' has been queued to stable release 19.11.1

luca.boccassi at gmail.com luca.boccassi at gmail.com
Tue Feb 11 12:20:42 CET 2020


Hi,

FYI, your patch has been queued to stable release 19.11.1

Note it hasn't been pushed to http://dpdk.org/browse/dpdk-stable yet.
It will be pushed if I get no objections before 02/13/20. So please
shout if anyone has objections.

Also note that after the patch there's a diff of the upstream commit vs the
patch applied to the branch. This will indicate if there was any rebasing
needed to apply to the stable branch. If there were code changes for rebasing
(ie: not only metadata diffs), please double check that the rebase was
correctly done.

Thanks.

Luca Boccassi

---
>From 600e7b622abdaf4405005fcb6e8226e4e5f34195 Mon Sep 17 00:00:00 2001
From: Xiaoyun Li <xiaoyun.li at intel.com>
Date: Thu, 26 Dec 2019 14:54:28 +0800
Subject: [PATCH] net/ice: fix Tx when TSO is enabled

[ upstream commit f1514bcb27214557d349ca96a94857421135a844 ]

Hardware limits that max buffer size per Tx descriptor should be
(16K-1)B. So when TSO enabled, the mbuf data size may exceed the
limit and cause malicious behavior to the NIC. This patch fixes
this issue by using more Tx descs for this kind of large buffer.

Fixes: 17c7d0f9d6a4 ("net/ice: support basic Rx/Tx")

Signed-off-by: Xiaoyun Li <xiaoyun.li at intel.com>
Acked-by: Qi Zhang <qi.z.zhang at intel.com>
---
 drivers/net/ice/ice_rxtx.c | 58 ++++++++++++++++++++++++++++++++++----
 1 file changed, 53 insertions(+), 5 deletions(-)

diff --git a/drivers/net/ice/ice_rxtx.c b/drivers/net/ice/ice_rxtx.c
index b4f5367c5a..ccc2c93398 100644
--- a/drivers/net/ice/ice_rxtx.c
+++ b/drivers/net/ice/ice_rxtx.c
@@ -2421,6 +2421,24 @@ ice_set_tso_ctx(struct rte_mbuf *mbuf, union ice_tx_offload tx_offload)
 	return ctx_desc;
 }
 
+/* HW requires that TX buffer size ranges from 1B up to (16K-1)B. */
+#define ICE_MAX_DATA_PER_TXD \
+	(ICE_TXD_QW1_TX_BUF_SZ_M >> ICE_TXD_QW1_TX_BUF_SZ_S)
+/* Calculate the number of TX descriptors needed for each pkt */
+static inline uint16_t
+ice_calc_pkt_desc(struct rte_mbuf *tx_pkt)
+{
+	struct rte_mbuf *txd = tx_pkt;
+	uint16_t count = 0;
+
+	while (txd != NULL) {
+		count += DIV_ROUND_UP(txd->data_len, ICE_MAX_DATA_PER_TXD);
+		txd = txd->next;
+	}
+
+	return count;
+}
+
 uint16_t
 ice_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
 {
@@ -2440,6 +2458,7 @@ ice_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
 	uint32_t td_offset = 0;
 	uint32_t td_tag = 0;
 	uint16_t tx_last;
+	uint16_t slen;
 	uint64_t buf_dma_addr;
 	uint64_t ol_flags;
 	union ice_tx_offload tx_offload = {0};
@@ -2471,8 +2490,15 @@ ice_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
 		/* The number of descriptors that must be allocated for
 		 * a packet equals to the number of the segments of that
 		 * packet plus the number of context descriptor if needed.
+		 * Recalculate the needed tx descs when TSO enabled in case
+		 * the mbuf data size exceeds max data size that hw allows
+		 * per tx desc.
 		 */
-		nb_used = (uint16_t)(tx_pkt->nb_segs + nb_ctx);
+		if (ol_flags & PKT_TX_TCP_SEG)
+			nb_used = (uint16_t)(ice_calc_pkt_desc(tx_pkt) +
+					     nb_ctx);
+		else
+			nb_used = (uint16_t)(tx_pkt->nb_segs + nb_ctx);
 		tx_last = (uint16_t)(tx_id + nb_used - 1);
 
 		/* Circular ring */
@@ -2562,15 +2588,37 @@ ice_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
 			txe->mbuf = m_seg;
 
 			/* Setup TX Descriptor */
+			slen = m_seg->data_len;
 			buf_dma_addr = rte_mbuf_data_iova(m_seg);
+
+			while ((ol_flags & PKT_TX_TCP_SEG) &&
+				unlikely(slen > ICE_MAX_DATA_PER_TXD)) {
+				txd->buf_addr = rte_cpu_to_le_64(buf_dma_addr);
+				txd->cmd_type_offset_bsz =
+				rte_cpu_to_le_64(ICE_TX_DESC_DTYPE_DATA |
+				((uint64_t)td_cmd << ICE_TXD_QW1_CMD_S) |
+				((uint64_t)td_offset << ICE_TXD_QW1_OFFSET_S) |
+				((uint64_t)ICE_MAX_DATA_PER_TXD <<
+				 ICE_TXD_QW1_TX_BUF_SZ_S) |
+				((uint64_t)td_tag << ICE_TXD_QW1_L2TAG1_S));
+
+				buf_dma_addr += ICE_MAX_DATA_PER_TXD;
+				slen -= ICE_MAX_DATA_PER_TXD;
+
+				txe->last_id = tx_last;
+				tx_id = txe->next_id;
+				txe = txn;
+				txd = &tx_ring[tx_id];
+				txn = &sw_ring[txe->next_id];
+			}
+
 			txd->buf_addr = rte_cpu_to_le_64(buf_dma_addr);
 			txd->cmd_type_offset_bsz =
 				rte_cpu_to_le_64(ICE_TX_DESC_DTYPE_DATA |
-				((uint64_t)td_cmd  << ICE_TXD_QW1_CMD_S) |
+				((uint64_t)td_cmd << ICE_TXD_QW1_CMD_S) |
 				((uint64_t)td_offset << ICE_TXD_QW1_OFFSET_S) |
-				((uint64_t)m_seg->data_len  <<
-				 ICE_TXD_QW1_TX_BUF_SZ_S) |
-				((uint64_t)td_tag  << ICE_TXD_QW1_L2TAG1_S));
+				((uint64_t)slen << ICE_TXD_QW1_TX_BUF_SZ_S) |
+				((uint64_t)td_tag << ICE_TXD_QW1_L2TAG1_S));
 
 			txe->last_id = tx_last;
 			tx_id = txe->next_id;
-- 
2.20.1

---
  Diff of the applied patch vs upstream commit (please double-check if non-empty:
---
--- -	2020-02-11 11:17:42.211429873 +0000
+++ 0096-net-ice-fix-Tx-when-TSO-is-enabled.patch	2020-02-11 11:17:38.536003890 +0000
@@ -1,15 +1,16 @@
-From f1514bcb27214557d349ca96a94857421135a844 Mon Sep 17 00:00:00 2001
+From 600e7b622abdaf4405005fcb6e8226e4e5f34195 Mon Sep 17 00:00:00 2001
 From: Xiaoyun Li <xiaoyun.li at intel.com>
 Date: Thu, 26 Dec 2019 14:54:28 +0800
 Subject: [PATCH] net/ice: fix Tx when TSO is enabled
 
+[ upstream commit f1514bcb27214557d349ca96a94857421135a844 ]
+
 Hardware limits that max buffer size per Tx descriptor should be
 (16K-1)B. So when TSO enabled, the mbuf data size may exceed the
 limit and cause malicious behavior to the NIC. This patch fixes
 this issue by using more Tx descs for this kind of large buffer.
 
 Fixes: 17c7d0f9d6a4 ("net/ice: support basic Rx/Tx")
-Cc: stable at dpdk.org
 
 Signed-off-by: Xiaoyun Li <xiaoyun.li at intel.com>
 Acked-by: Qi Zhang <qi.z.zhang at intel.com>
@@ -18,7 +19,7 @@
  1 file changed, 53 insertions(+), 5 deletions(-)
 
 diff --git a/drivers/net/ice/ice_rxtx.c b/drivers/net/ice/ice_rxtx.c
-index 5f4fc42a8e..2fa737a97c 100644
+index b4f5367c5a..ccc2c93398 100644
 --- a/drivers/net/ice/ice_rxtx.c
 +++ b/drivers/net/ice/ice_rxtx.c
 @@ -2421,6 +2421,24 @@ ice_set_tso_ctx(struct rte_mbuf *mbuf, union ice_tx_offload tx_offload)


More information about the stable mailing list