[14/20] net/ena: fix cleanup for out of order packets

Message ID 20181214131846.22439-15-mk@semihalf.com (mailing list archive)
State Superseded, archived
Delegated to: Ferruh Yigit
Headers
Series net/ena: ENAv2 release |

Checks

Context Check Description
ci/checkpatch success coding style OK
ci/Intel-compilation fail Compilation issues

Commit Message

Michal Krawczyk Dec. 14, 2018, 1:18 p.m. UTC
  From: Rafal Kozik <rk@semihalf.com>

When wrong req_id is detected some previous mbufs could be used for
receiving different segments of received packets. In such cases chained
mbufs will be twice returned to pool.

To prevent it chained mbuf is now freed just after error detection.

To simplify cleaning, pointers taken for Rx ring are set to NULL.

As after ena_rx_queue_release_bufs and ena_tx_queue_release_bufs queues
are not used updating of next_to_clean pointer is not necessary.

Fixes: c2034976673d ("net/ena: add Rx out of order completion")
Cc: stable@dpdk.org

Signed-off-by: Rafal Kozik <rk@semihalf.com>
Acked-by: Michal Krawczyk <mk@semihalf.com>
---
 drivers/net/ena/ena_ethdev.c | 24 ++++++++++--------------
 1 file changed, 10 insertions(+), 14 deletions(-)
  

Patch

diff --git a/drivers/net/ena/ena_ethdev.c b/drivers/net/ena/ena_ethdev.c
index 14165561e..ce0ca40c4 100644
--- a/drivers/net/ena/ena_ethdev.c
+++ b/drivers/net/ena/ena_ethdev.c
@@ -770,17 +770,11 @@  static void ena_tx_queue_release(void *queue)
 
 static void ena_rx_queue_release_bufs(struct ena_ring *ring)
 {
-	unsigned int ring_mask = ring->ring_size - 1;
-
-	while (ring->next_to_clean != ring->next_to_use) {
-		struct rte_mbuf *m =
-			ring->rx_buffer_info[ring->next_to_clean & ring_mask];
-
-		if (m)
-			rte_mbuf_raw_free(m);
-
-		ring->next_to_clean++;
-	}
+	for (unsigned int i = 0; i < ring->ring_size; ++i)
+		if (ring->rx_buffer_info[i]) {
+			rte_mbuf_raw_free(ring->rx_buffer_info[i]);
+			ring->rx_buffer_info[i] = NULL;
+		}
 }
 
 static void ena_tx_queue_release_bufs(struct ena_ring *ring)
@@ -792,8 +786,6 @@  static void ena_tx_queue_release_bufs(struct ena_ring *ring)
 
 		if (tx_buf->mbuf)
 			rte_pktmbuf_free(tx_buf->mbuf);
-
-		ring->next_to_clean++;
 	}
 }
 
@@ -2077,10 +2069,14 @@  static uint16_t eth_ena_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
 		while (segments < ena_rx_ctx.descs) {
 			req_id = ena_rx_ctx.ena_bufs[segments].req_id;
 			rc = validate_rx_req_id(rx_ring, req_id);
-			if (unlikely(rc))
+			if (unlikely(rc)) {
+				if (segments != 0)
+					rte_mbuf_raw_free(mbuf_head);
 				break;
+			}
 
 			mbuf = rx_buff_info[req_id];
+			rx_buff_info[req_id] = NULL;
 			mbuf->data_len = ena_rx_ctx.ena_bufs[segments].len;
 			mbuf->data_off = RTE_PKTMBUF_HEADROOM;
 			mbuf->refcnt = 1;