[PATCH v2 03/16] net/enetfec: fix buffer leak issue

Gagandeep Singh g.singh at nxp.com
Fri Oct 7 05:27:30 CEST 2022


From: Apeksha Gupta <apeksha.gupta at nxp.com>

Driver has no proper handling to free unused
allocated mbufs in case of error or when the rx
processing complete because of which mempool
can be empty after some time.

This patch fixes this issue by moving the buffer
allocation code to the right place in driver.

Fixes: ecae71571b0d ("net/enetfec: support Rx/Tx")
Cc: stable at dpdk.org

Signed-off-by: Apeksha Gupta <apeksha.gupta at nxp.com>
Signed-off-by: Sachin Saxena <sachin.saxena at nxp.com>
Acked-by: Hemant Agrawal <hemant.agrawal at nxp.com>
---
 drivers/net/enetfec/enet_rxtx.c | 29 ++++++++++++++++-------------
 1 file changed, 16 insertions(+), 13 deletions(-)

diff --git a/drivers/net/enetfec/enet_rxtx.c b/drivers/net/enetfec/enet_rxtx.c
index 49b326315d..0aea8b240d 100644
--- a/drivers/net/enetfec/enet_rxtx.c
+++ b/drivers/net/enetfec/enet_rxtx.c
@@ -39,11 +39,6 @@ enetfec_recv_pkts(void *rxq1, struct rte_mbuf **rx_pkts,
 		if (pkt_received >= nb_pkts)
 			break;
 
-		new_mbuf = rte_pktmbuf_alloc(pool);
-		if (unlikely(new_mbuf == NULL)) {
-			stats->rx_nombuf++;
-			break;
-		}
 		/* Check for errors. */
 		status ^= RX_BD_LAST;
 		if (status & (RX_BD_LG | RX_BD_SH | RX_BD_NO |
@@ -72,6 +67,12 @@ enetfec_recv_pkts(void *rxq1, struct rte_mbuf **rx_pkts,
 			goto rx_processing_done;
 		}
 
+		new_mbuf = rte_pktmbuf_alloc(pool);
+		if (unlikely(new_mbuf == NULL)) {
+			stats->rx_nombuf++;
+			break;
+		}
+
 		/* Process the incoming frame. */
 		stats->ipackets++;
 		pkt_len = rte_le_to_cpu_16(rte_read16(&bdp->bd_datlen));
@@ -193,7 +194,16 @@ enetfec_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
 			tx_st = 0;
 			break;
 		}
+
+		mbuf = *(tx_pkts);
+		if (mbuf->nb_segs > 1) {
+			ENETFEC_DP_LOG(DEBUG, "SG not supported");
+			return pkt_transmitted;
+		}
+
+		tx_pkts++;
 		bdp = txq->bd.cur;
+
 		/* First clean the ring */
 		index = enet_get_bd_index(bdp, &txq->bd);
 		status = rte_le_to_cpu_16(rte_read16(&bdp->bd_sc));
@@ -207,9 +217,6 @@ enetfec_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
 			txq->tx_mbuf[index] = NULL;
 		}
 
-		mbuf = *(tx_pkts);
-		tx_pkts++;
-
 		/* Fill in a Tx ring entry */
 		last_bdp = bdp;
 		status &= ~TX_BD_STATS;
@@ -219,10 +226,6 @@ enetfec_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
 		stats->opackets++;
 		stats->obytes += buflen;
 
-		if (mbuf->nb_segs > 1) {
-			ENETFEC_DP_LOG(DEBUG, "SG not supported");
-			return -1;
-		}
 		status |= (TX_BD_LAST);
 		data = rte_pktmbuf_mtod(mbuf, void *);
 		for (i = 0; i <= buflen; i += RTE_CACHE_LINE_SIZE)
@@ -268,5 +271,5 @@ enetfec_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
 		 */
 		txq->bd.cur = bdp;
 	}
-	return nb_pkts;
+	return pkt_transmitted;
 }
-- 
2.25.1



More information about the stable mailing list