[dpdk-stable] patch 'net/ena: fix out of order completion' has been queued to stable release 18.08.1
Kevin Traynor
ktraynor at redhat.com
Thu Nov 29 14:21:16 CET 2018
Hi,
FYI, your patch has been queued to stable release 18.08.1
Note it hasn't been pushed to http://dpdk.org/browse/dpdk-stable yet.
It will be pushed if I get no objections before 12/08/18. So please
shout if anyone has objections.
Also note that after the patch there's a diff of the upstream commit vs the patch applied
to the branch. If the code is different (ie: not only metadata diffs), due for example to
a change in context or macro names, please double check it.
Thanks.
Kevin Traynor
---
>From 68433fad2007db5d17b15a254bb7a34fa8b66951 Mon Sep 17 00:00:00 2001
From: Rafal Kozik <rk at semihalf.com>
Date: Wed, 21 Nov 2018 09:21:14 +0100
Subject: [PATCH] net/ena: fix out of order completion
[ upstream commit 79405ee175857cfdbb508f9d55e2a51d95483be6 ]
rx_buffer_info should be refill not linearly, but out of order.
IDs should be taken from empty_rx_reqs array.
rx_refill_buffer is introduced to temporary storage
bulk of mbufs taken from pool.
In case of error unused mbufs are put back to pool.
Fixes: c2034976673d ("net/ena: add Rx out of order completion")
Signed-off-by: Rafal Kozik <rk at semihalf.com>
Acked-by: Michal Krawczyk <mk at semihalf.com>
---
drivers/net/ena/ena_ethdev.c | 40 +++++++++++++++++++++++++-----------
drivers/net/ena/ena_ethdev.h | 1 +
2 files changed, 29 insertions(+), 12 deletions(-)
diff --git a/drivers/net/ena/ena_ethdev.c b/drivers/net/ena/ena_ethdev.c
index 9e462099f..87c95b2e7 100644
--- a/drivers/net/ena/ena_ethdev.c
+++ b/drivers/net/ena/ena_ethdev.c
@@ -761,4 +761,8 @@ static void ena_rx_queue_release(void *queue)
ring->rx_buffer_info = NULL;
+ if (ring->rx_refill_buffer)
+ rte_free(ring->rx_refill_buffer);
+ ring->rx_refill_buffer = NULL;
+
if (ring->empty_rx_reqs)
rte_free(ring->empty_rx_reqs);
@@ -1303,4 +1307,15 @@ static int ena_rx_queue_setup(struct rte_eth_dev *dev,
}
+ rxq->rx_refill_buffer = rte_zmalloc("rxq->rx_refill_buffer",
+ sizeof(struct rte_mbuf *) * nb_desc,
+ RTE_CACHE_LINE_SIZE);
+
+ if (!rxq->rx_refill_buffer) {
+ RTE_LOG(ERR, PMD, "failed to alloc mem for rx refill buffer\n");
+ rte_free(rxq->rx_buffer_info);
+ rxq->rx_buffer_info = NULL;
+ return -ENOMEM;
+ }
+
rxq->empty_rx_reqs = rte_zmalloc("rxq->empty_rx_reqs",
sizeof(uint16_t) * nb_desc,
@@ -1310,4 +1325,6 @@ static int ena_rx_queue_setup(struct rte_eth_dev *dev,
rte_free(rxq->rx_buffer_info);
rxq->rx_buffer_info = NULL;
+ rte_free(rxq->rx_refill_buffer);
+ rxq->rx_refill_buffer = NULL;
return -ENOMEM;
}
@@ -1331,5 +1348,5 @@ static int ena_populate_rx_queue(struct ena_ring *rxq, unsigned int count)
uint16_t next_to_use = rxq->next_to_use;
uint16_t in_use, req_id;
- struct rte_mbuf **mbufs = &rxq->rx_buffer_info[0];
+ struct rte_mbuf **mbufs = rxq->rx_refill_buffer;
if (unlikely(!count))
@@ -1339,11 +1356,6 @@ static int ena_populate_rx_queue(struct ena_ring *rxq, unsigned int count)
ena_assert_msg(((in_use + count) < ring_size), "bad ring state");
- count = RTE_MIN(count,
- (uint16_t)(ring_size - (next_to_use & ring_mask)));
-
/* get resources for incoming packets */
- rc = rte_mempool_get_bulk(rxq->mb_pool,
- (void **)(&mbufs[next_to_use & ring_mask]),
- count);
+ rc = rte_mempool_get_bulk(rxq->mb_pool, (void **)mbufs, count);
if (unlikely(rc < 0)) {
rte_atomic64_inc(&rxq->adapter->drv_stats->rx_nombuf);
@@ -1354,8 +1366,9 @@ static int ena_populate_rx_queue(struct ena_ring *rxq, unsigned int count)
for (i = 0; i < count; i++) {
uint16_t next_to_use_masked = next_to_use & ring_mask;
- struct rte_mbuf *mbuf = mbufs[next_to_use_masked];
+ struct rte_mbuf *mbuf = mbufs[i];
struct ena_com_buf ebuf;
- rte_prefetch0(mbufs[((next_to_use + 4) & ring_mask)]);
+ if (likely((i + 4) < count))
+ rte_prefetch0(mbufs[i + 4]);
req_id = rxq->empty_rx_reqs[next_to_use_masked];
@@ -1363,4 +1376,5 @@ static int ena_populate_rx_queue(struct ena_ring *rxq, unsigned int count)
if (unlikely(rc < 0))
break;
+ rxq->rx_buffer_info[req_id] = mbuf;
/* prepare physical address for DMA transaction */
@@ -1371,7 +1385,6 @@ static int ena_populate_rx_queue(struct ena_ring *rxq, unsigned int count)
&ebuf, req_id);
if (unlikely(rc)) {
- rte_mempool_put_bulk(rxq->mb_pool, (void **)(&mbuf),
- count - i);
RTE_LOG(WARNING, PMD, "failed adding rx desc\n");
+ rxq->rx_buffer_info[req_id] = NULL;
break;
}
@@ -1379,7 +1392,10 @@ static int ena_populate_rx_queue(struct ena_ring *rxq, unsigned int count)
}
- if (unlikely(i < count))
+ if (unlikely(i < count)) {
RTE_LOG(WARNING, PMD, "refilled rx qid %d with only %d "
"buffers (from %d)\n", rxq->id, i, count);
+ rte_mempool_put_bulk(rxq->mb_pool, (void **)(&mbufs[i]),
+ count - i);
+ }
/* When we submitted free recources to device... */
diff --git a/drivers/net/ena/ena_ethdev.h b/drivers/net/ena/ena_ethdev.h
index 2dc8129e0..322e90ace 100644
--- a/drivers/net/ena/ena_ethdev.h
+++ b/drivers/net/ena/ena_ethdev.h
@@ -88,4 +88,5 @@ struct ena_ring {
struct rte_mbuf **rx_buffer_info; /* contex of rx packet */
};
+ struct rte_mbuf **rx_refill_buffer;
unsigned int ring_size; /* number of tx/rx_buffer_info's entries */
--
2.19.0
---
Diff of the applied patch vs upstream commit (please double-check if non-empty:
---
--- - 2018-11-29 13:11:36.920935195 +0000
+++ 0075-net-ena-fix-out-of-order-completion.patch 2018-11-29 13:11:35.000000000 +0000
@@ -1,8 +1,10 @@
-From 79405ee175857cfdbb508f9d55e2a51d95483be6 Mon Sep 17 00:00:00 2001
+From 68433fad2007db5d17b15a254bb7a34fa8b66951 Mon Sep 17 00:00:00 2001
From: Rafal Kozik <rk at semihalf.com>
Date: Wed, 21 Nov 2018 09:21:14 +0100
Subject: [PATCH] net/ena: fix out of order completion
+[ upstream commit 79405ee175857cfdbb508f9d55e2a51d95483be6 ]
+
rx_buffer_info should be refill not linearly, but out of order.
IDs should be taken from empty_rx_reqs array.
@@ -12,7 +14,6 @@
In case of error unused mbufs are put back to pool.
Fixes: c2034976673d ("net/ena: add Rx out of order completion")
-Cc: stable at dpdk.org
Signed-off-by: Rafal Kozik <rk at semihalf.com>
Acked-by: Michal Krawczyk <mk at semihalf.com>
@@ -22,10 +23,10 @@
2 files changed, 29 insertions(+), 12 deletions(-)
diff --git a/drivers/net/ena/ena_ethdev.c b/drivers/net/ena/ena_ethdev.c
-index 3690afe3b..a07bd2b49 100644
+index 9e462099f..87c95b2e7 100644
--- a/drivers/net/ena/ena_ethdev.c
+++ b/drivers/net/ena/ena_ethdev.c
-@@ -777,4 +777,8 @@ static void ena_rx_queue_release(void *queue)
+@@ -761,4 +761,8 @@ static void ena_rx_queue_release(void *queue)
ring->rx_buffer_info = NULL;
+ if (ring->rx_refill_buffer)
@@ -34,7 +35,7 @@
+
if (ring->empty_rx_reqs)
rte_free(ring->empty_rx_reqs);
-@@ -1319,4 +1323,15 @@ static int ena_rx_queue_setup(struct rte_eth_dev *dev,
+@@ -1303,4 +1307,15 @@ static int ena_rx_queue_setup(struct rte_eth_dev *dev,
}
+ rxq->rx_refill_buffer = rte_zmalloc("rxq->rx_refill_buffer",
@@ -50,21 +51,21 @@
+
rxq->empty_rx_reqs = rte_zmalloc("rxq->empty_rx_reqs",
sizeof(uint16_t) * nb_desc,
-@@ -1326,4 +1341,6 @@ static int ena_rx_queue_setup(struct rte_eth_dev *dev,
+@@ -1310,4 +1325,6 @@ static int ena_rx_queue_setup(struct rte_eth_dev *dev,
rte_free(rxq->rx_buffer_info);
rxq->rx_buffer_info = NULL;
+ rte_free(rxq->rx_refill_buffer);
+ rxq->rx_refill_buffer = NULL;
return -ENOMEM;
}
-@@ -1347,5 +1364,5 @@ static int ena_populate_rx_queue(struct ena_ring *rxq, unsigned int count)
+@@ -1331,5 +1348,5 @@ static int ena_populate_rx_queue(struct ena_ring *rxq, unsigned int count)
uint16_t next_to_use = rxq->next_to_use;
uint16_t in_use, req_id;
- struct rte_mbuf **mbufs = &rxq->rx_buffer_info[0];
+ struct rte_mbuf **mbufs = rxq->rx_refill_buffer;
if (unlikely(!count))
-@@ -1355,11 +1372,6 @@ static int ena_populate_rx_queue(struct ena_ring *rxq, unsigned int count)
+@@ -1339,11 +1356,6 @@ static int ena_populate_rx_queue(struct ena_ring *rxq, unsigned int count)
ena_assert_msg(((in_use + count) < ring_size), "bad ring state");
- count = RTE_MIN(count,
@@ -77,7 +78,7 @@
+ rc = rte_mempool_get_bulk(rxq->mb_pool, (void **)mbufs, count);
if (unlikely(rc < 0)) {
rte_atomic64_inc(&rxq->adapter->drv_stats->rx_nombuf);
-@@ -1370,8 +1382,9 @@ static int ena_populate_rx_queue(struct ena_ring *rxq, unsigned int count)
+@@ -1354,8 +1366,9 @@ static int ena_populate_rx_queue(struct ena_ring *rxq, unsigned int count)
for (i = 0; i < count; i++) {
uint16_t next_to_use_masked = next_to_use & ring_mask;
- struct rte_mbuf *mbuf = mbufs[next_to_use_masked];
@@ -89,13 +90,13 @@
+ rte_prefetch0(mbufs[i + 4]);
req_id = rxq->empty_rx_reqs[next_to_use_masked];
-@@ -1379,4 +1392,5 @@ static int ena_populate_rx_queue(struct ena_ring *rxq, unsigned int count)
+@@ -1363,4 +1376,5 @@ static int ena_populate_rx_queue(struct ena_ring *rxq, unsigned int count)
if (unlikely(rc < 0))
break;
+ rxq->rx_buffer_info[req_id] = mbuf;
/* prepare physical address for DMA transaction */
-@@ -1387,7 +1401,6 @@ static int ena_populate_rx_queue(struct ena_ring *rxq, unsigned int count)
+@@ -1371,7 +1385,6 @@ static int ena_populate_rx_queue(struct ena_ring *rxq, unsigned int count)
&ebuf, req_id);
if (unlikely(rc)) {
- rte_mempool_put_bulk(rxq->mb_pool, (void **)(&mbuf),
@@ -104,7 +105,7 @@
+ rxq->rx_buffer_info[req_id] = NULL;
break;
}
-@@ -1395,7 +1408,10 @@ static int ena_populate_rx_queue(struct ena_ring *rxq, unsigned int count)
+@@ -1379,7 +1392,10 @@ static int ena_populate_rx_queue(struct ena_ring *rxq, unsigned int count)
}
- if (unlikely(i < count))
More information about the stable
mailing list