patch 'net/cxgbe: fix Tx queue stuck with mbuf chain coalescing' has been queued to stable release 20.11.6

Xueming Li xuemingl at nvidia.com
Tue Jun 21 10:01:32 CEST 2022


Hi,

FYI, your patch has been queued to stable release 20.11.6

Note it hasn't been pushed to http://dpdk.org/browse/dpdk-stable yet.
It will be pushed if I get no objections before 06/23/22. So please
shout if anyone has objections.

Also note that after the patch there's a diff of the upstream commit vs the
patch applied to the branch. This will indicate if there was any rebasing
needed to apply to the stable branch. If there were code changes for rebasing
(ie: not only metadata diffs), please double check that the rebase was
correctly done.

Queued patches are on a temporary branch at:
https://github.com/steevenlee/dpdk

This queued commit can be viewed at:
https://github.com/steevenlee/dpdk/commit/730c2afc40bc64bb590c545b0221dfb94fef0425

Thanks.

Xueming Li <xuemingl at nvidia.com>

---
>From 730c2afc40bc64bb590c545b0221dfb94fef0425 Mon Sep 17 00:00:00 2001
From: Rahul Lakkireddy <rahul.lakkireddy at chelsio.com>
Date: Tue, 19 Apr 2022 03:54:19 +0530
Subject: [PATCH] net/cxgbe: fix Tx queue stuck with mbuf chain coalescing
Cc: Xueming Li <xuemingl at nvidia.com>

[ upstream commit 151e828f6427667faf3fdfaa00d14a65c7f57cd6 ]

When trying to coalesce mbufs with chain on Tx side, it is possible
to get stuck during queue wrap around. When coalescing this mbuf
chain fails, the Tx path returns EBUSY and when the same packet
is retried again, it couldn't get coalesced again, and the loop
repeats. Fix by pushing the packet through the normal Tx path.
Also use FW_ETH_TX_PKTS_WR to handle mbufs with chain for FW
to optimize.

Fixes: 6c2809628cd5 ("net/cxgbe: improve latency for slow traffic")

Signed-off-by: Rahul Lakkireddy <rahul.lakkireddy at chelsio.com>
---
 drivers/net/cxgbe/sge.c | 38 +++++++++++++++++++++++---------------
 1 file changed, 23 insertions(+), 15 deletions(-)

diff --git a/drivers/net/cxgbe/sge.c b/drivers/net/cxgbe/sge.c
index c18ea7c248..0998864269 100644
--- a/drivers/net/cxgbe/sge.c
+++ b/drivers/net/cxgbe/sge.c
@@ -793,9 +793,9 @@ static inline void txq_advance(struct sge_txq *q, unsigned int n)
 
 #define MAX_COALESCE_LEN 64000
 
-static inline int wraps_around(struct sge_txq *q, int ndesc)
+static inline bool wraps_around(struct sge_txq *q, int ndesc)
 {
-	return (q->pidx + ndesc) > q->size ? 1 : 0;
+	return (q->pidx + ndesc) > q->size ? true : false;
 }
 
 static void tx_timer_cb(void *data)
@@ -846,7 +846,6 @@ static inline void ship_tx_pkt_coalesce_wr(struct adapter *adap,
 
 	/* fill the pkts WR header */
 	wr = (void *)&q->desc[q->pidx];
-	wr->op_pkd = htonl(V_FW_WR_OP(FW_ETH_TX_PKTS2_WR));
 	vmwr = (void *)&q->desc[q->pidx];
 
 	wr_mid = V_FW_WR_LEN16(DIV_ROUND_UP(q->coalesce.flits, 2));
@@ -856,8 +855,11 @@ static inline void ship_tx_pkt_coalesce_wr(struct adapter *adap,
 	wr->npkt = q->coalesce.idx;
 	wr->r3 = 0;
 	if (is_pf4(adap)) {
-		wr->op_pkd = htonl(V_FW_WR_OP(FW_ETH_TX_PKTS2_WR));
 		wr->type = q->coalesce.type;
+		if (likely(wr->type != 0))
+			wr->op_pkd = htonl(V_FW_WR_OP(FW_ETH_TX_PKTS2_WR));
+		else
+			wr->op_pkd = htonl(V_FW_WR_OP(FW_ETH_TX_PKTS_WR));
 	} else {
 		wr->op_pkd = htonl(V_FW_WR_OP(FW_ETH_TX_PKTS_VM_WR));
 		vmwr->r4 = 0;
@@ -936,13 +938,16 @@ static inline int should_tx_packet_coalesce(struct sge_eth_txq *txq,
 		ndesc = DIV_ROUND_UP(q->coalesce.flits + flits, 8);
 		credits = txq_avail(q) - ndesc;
 
+		if (unlikely(wraps_around(q, ndesc)))
+			return 0;
+
 		/* If we are wrapping or this is last mbuf then, send the
 		 * already coalesced mbufs and let the non-coalesce pass
 		 * handle the mbuf.
 		 */
-		if (unlikely(credits < 0 || wraps_around(q, ndesc))) {
+		if (unlikely(credits < 0)) {
 			ship_tx_pkt_coalesce_wr(adap, txq);
-			return 0;
+			return -EBUSY;
 		}
 
 		/* If the max coalesce len or the max WR len is reached
@@ -966,8 +971,12 @@ new:
 	ndesc = flits_to_desc(q->coalesce.flits + flits);
 	credits = txq_avail(q) - ndesc;
 
-	if (unlikely(credits < 0 || wraps_around(q, ndesc)))
+	if (unlikely(wraps_around(q, ndesc)))
 		return 0;
+
+	if (unlikely(credits < 0))
+		return -EBUSY;
+
 	q->coalesce.flits += wr_size / sizeof(__be64);
 	q->coalesce.type = type;
 	q->coalesce.ptr = (unsigned char *)&q->desc[q->pidx] +
@@ -1110,7 +1119,7 @@ int t4_eth_xmit(struct sge_eth_txq *txq, struct rte_mbuf *mbuf,
 	unsigned int flits, ndesc, cflits;
 	int l3hdr_len, l4hdr_len, eth_xtra_len;
 	int len, last_desc;
-	int credits;
+	int should_coal, credits;
 	u32 wr_mid;
 	u64 cntrl, *end;
 	bool v6;
@@ -1141,9 +1150,9 @@ out_free:
 	/* align the end of coalesce WR to a 512 byte boundary */
 	txq->q.coalesce.max = (8 - (txq->q.pidx & 7)) * 8;
 
-	if (!((m->ol_flags & PKT_TX_TCP_SEG) ||
-			m->pkt_len > RTE_ETHER_MAX_LEN)) {
-		if (should_tx_packet_coalesce(txq, mbuf, &cflits, adap)) {
+	if ((m->ol_flags & PKT_TX_TCP_SEG) == 0) {
+		should_coal = should_tx_packet_coalesce(txq, mbuf, &cflits, adap);
+		if (should_coal > 0) {
 			if (unlikely(map_mbuf(mbuf, addr) < 0)) {
 				dev_warn(adap, "%s: mapping err for coalesce\n",
 					 __func__);
@@ -1152,8 +1161,8 @@ out_free:
 			}
 			return tx_do_packet_coalesce(txq, mbuf, cflits, adap,
 						     pi, addr, nb_pkts);
-		} else {
-			return -EBUSY;
+		} else if (should_coal < 0) {
+			return should_coal;
 		}
 	}
 
@@ -1200,8 +1209,7 @@ out_free:
 		end = (u64 *)vmwr + flits;
 	}
 
-	len = 0;
-	len += sizeof(*cpl);
+	len = sizeof(*cpl);
 
 	/* Coalescing skipped and we send through normal path */
 	if (!(m->ol_flags & PKT_TX_TCP_SEG)) {
-- 
2.35.1

---
  Diff of the applied patch vs upstream commit (please double-check if non-empty:
---
--- -	2022-06-21 15:37:50.523416752 +0800
+++ 0026-net-cxgbe-fix-Tx-queue-stuck-with-mbuf-chain-coalesc.patch	2022-06-21 15:37:49.004451134 +0800
@@ -1 +1 @@
-From 151e828f6427667faf3fdfaa00d14a65c7f57cd6 Mon Sep 17 00:00:00 2001
+From 730c2afc40bc64bb590c545b0221dfb94fef0425 Mon Sep 17 00:00:00 2001
@@ -4,0 +5,3 @@
+Cc: Xueming Li <xuemingl at nvidia.com>
+
+[ upstream commit 151e828f6427667faf3fdfaa00d14a65c7f57cd6 ]
@@ -15 +17,0 @@
-Cc: stable at dpdk.org
@@ -23 +25 @@
-index 5c176004f9..566cd48406 100644
+index c18ea7c248..0998864269 100644
@@ -26 +28 @@
-@@ -789,9 +789,9 @@ static inline void txq_advance(struct sge_txq *q, unsigned int n)
+@@ -793,9 +793,9 @@ static inline void txq_advance(struct sge_txq *q, unsigned int n)
@@ -38 +40 @@
-@@ -842,7 +842,6 @@ static inline void ship_tx_pkt_coalesce_wr(struct adapter *adap,
+@@ -846,7 +846,6 @@ static inline void ship_tx_pkt_coalesce_wr(struct adapter *adap,
@@ -46 +48 @@
-@@ -852,8 +851,11 @@ static inline void ship_tx_pkt_coalesce_wr(struct adapter *adap,
+@@ -856,8 +855,11 @@ static inline void ship_tx_pkt_coalesce_wr(struct adapter *adap,
@@ -59 +61 @@
-@@ -932,13 +934,16 @@ static inline int should_tx_packet_coalesce(struct sge_eth_txq *txq,
+@@ -936,13 +938,16 @@ static inline int should_tx_packet_coalesce(struct sge_eth_txq *txq,
@@ -78 +80 @@
-@@ -962,8 +967,12 @@ new:
+@@ -966,8 +971,12 @@ new:
@@ -92 +94 @@
-@@ -1106,7 +1115,7 @@ int t4_eth_xmit(struct sge_eth_txq *txq, struct rte_mbuf *mbuf,
+@@ -1110,7 +1119,7 @@ int t4_eth_xmit(struct sge_eth_txq *txq, struct rte_mbuf *mbuf,
@@ -101 +103 @@
-@@ -1138,9 +1147,9 @@ out_free:
+@@ -1141,9 +1150,9 @@ out_free:
@@ -105 +107 @@
--	if (!((m->ol_flags & RTE_MBUF_F_TX_TCP_SEG) ||
+-	if (!((m->ol_flags & PKT_TX_TCP_SEG) ||
@@ -108 +110 @@
-+	if ((m->ol_flags & RTE_MBUF_F_TX_TCP_SEG) == 0) {
++	if ((m->ol_flags & PKT_TX_TCP_SEG) == 0) {
@@ -114 +116 @@
-@@ -1149,8 +1158,8 @@ out_free:
+@@ -1152,8 +1161,8 @@ out_free:
@@ -125 +127 @@
-@@ -1197,8 +1206,7 @@ out_free:
+@@ -1200,8 +1209,7 @@ out_free:
@@ -134 +136 @@
- 	if (!(m->ol_flags & RTE_MBUF_F_TX_TCP_SEG)) {
+ 	if (!(m->ol_flags & PKT_TX_TCP_SEG)) {


More information about the stable mailing list