[dpdk-stable] patch 'net/af_xdp: fix fill queue addresses' has been queued to stable release 19.11.1

luca.boccassi at gmail.com luca.boccassi at gmail.com
Mon Feb 17 18:45:23 CET 2020


Hi,

FYI, your patch has been queued to stable release 19.11.1

Note it hasn't been pushed to http://dpdk.org/browse/dpdk-stable yet.
It will be pushed if I get no objections before 02/19/20. So please
shout if anyone has objections.

Also note that after the patch there's a diff of the upstream commit vs the
patch applied to the branch. This will indicate if there was any rebasing
needed to apply to the stable branch. If there were code changes for rebasing
(ie: not only metadata diffs), please double check that the rebase was
correctly done.

Thanks.

Luca Boccassi

---
>From 0a88b449453f744d638b5d8ef63a5d4f2d8eb3b7 Mon Sep 17 00:00:00 2001
From: Ciara Loftus <ciara.loftus at intel.com>
Date: Thu, 13 Feb 2020 08:49:13 +0000
Subject: [PATCH] net/af_xdp: fix fill queue addresses

[ upstream commit 96d8ae9990c0a213255b4dc45c53368ea2c92bfa ]

The fill queue addresses should start at the beginning of the mempool
object instead of the beginning of the mbuf. This is because the umem
frame headroom includes the mp hdrobj size. Starting at this point
ensures AF_XDP doesn't write past the available room in the frame, in
the case of larger packets which are close to the size of the mbuf.

Fixes: d8a210774e1d ("net/af_xdp: support unaligned umem chunks")

Signed-off-by: Ciara Loftus <ciara.loftus at intel.com>
Reviewed-by: Xiaolong Ye <xiaolong.ye at intel.com>
---
 drivers/net/af_xdp/rte_eth_af_xdp.c | 25 +++++++++++++++++--------
 1 file changed, 17 insertions(+), 8 deletions(-)

diff --git a/drivers/net/af_xdp/rte_eth_af_xdp.c b/drivers/net/af_xdp/rte_eth_af_xdp.c
index 348dbde5e6..24c70473c3 100644
--- a/drivers/net/af_xdp/rte_eth_af_xdp.c
+++ b/drivers/net/af_xdp/rte_eth_af_xdp.c
@@ -172,7 +172,8 @@ reserve_fill_queue_zc(struct xsk_umem_info *umem, uint16_t reserve_size,
 		uint64_t addr;
 
 		fq_addr = xsk_ring_prod__fill_addr(fq, idx++);
-		addr = (uint64_t)bufs[i] - (uint64_t)umem->buffer;
+		addr = (uint64_t)bufs[i] - (uint64_t)umem->buffer -
+				umem->mb_pool->header_size;
 		*fq_addr = addr;
 	}
 
@@ -271,8 +272,11 @@ af_xdp_rx_zc(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
 		addr = xsk_umem__extract_addr(addr);
 
 		bufs[i] = (struct rte_mbuf *)
-				xsk_umem__get_data(umem->buffer, addr);
-		bufs[i]->data_off = offset - sizeof(struct rte_mbuf);
+				xsk_umem__get_data(umem->buffer, addr +
+					umem->mb_pool->header_size);
+		bufs[i]->data_off = offset - sizeof(struct rte_mbuf) -
+			rte_pktmbuf_priv_size(umem->mb_pool) -
+			umem->mb_pool->header_size;
 
 		rte_pktmbuf_pkt_len(bufs[i]) = len;
 		rte_pktmbuf_data_len(bufs[i]) = len;
@@ -385,7 +389,8 @@ pull_umem_cq(struct xsk_umem_info *umem, int size)
 #if defined(XDP_UMEM_UNALIGNED_CHUNK_FLAG)
 		addr = xsk_umem__extract_addr(addr);
 		rte_pktmbuf_free((struct rte_mbuf *)
-					xsk_umem__get_data(umem->buffer, addr));
+					xsk_umem__get_data(umem->buffer,
+					addr + umem->mb_pool->header_size));
 #else
 		rte_ring_enqueue(umem->buf_ring, (void *)addr);
 #endif
@@ -443,9 +448,11 @@ af_xdp_tx_zc(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
 			}
 			desc = xsk_ring_prod__tx_desc(&txq->tx, idx_tx);
 			desc->len = mbuf->pkt_len;
-			addr = (uint64_t)mbuf - (uint64_t)umem->buffer;
+			addr = (uint64_t)mbuf - (uint64_t)umem->buffer -
+					umem->mb_pool->header_size;
 			offset = rte_pktmbuf_mtod(mbuf, uint64_t) -
-					(uint64_t)mbuf;
+					(uint64_t)mbuf +
+					umem->mb_pool->header_size;
 			offset = offset << XSK_UNALIGNED_BUF_OFFSET_SHIFT;
 			desc->addr = addr | offset;
 			count++;
@@ -466,9 +473,11 @@ af_xdp_tx_zc(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
 			desc = xsk_ring_prod__tx_desc(&txq->tx, idx_tx);
 			desc->len = mbuf->pkt_len;
 
-			addr = (uint64_t)local_mbuf - (uint64_t)umem->buffer;
+			addr = (uint64_t)local_mbuf - (uint64_t)umem->buffer -
+					umem->mb_pool->header_size;
 			offset = rte_pktmbuf_mtod(local_mbuf, uint64_t) -
-					(uint64_t)local_mbuf;
+					(uint64_t)local_mbuf +
+					umem->mb_pool->header_size;
 			pkt = xsk_umem__get_data(umem->buffer, addr + offset);
 			offset = offset << XSK_UNALIGNED_BUF_OFFSET_SHIFT;
 			desc->addr = addr | offset;
-- 
2.20.1

---
  Diff of the applied patch vs upstream commit (please double-check if non-empty:
---
--- -	2020-02-17 17:00:16.194093620 +0000
+++ 0031-net-af_xdp-fix-fill-queue-addresses.patch	2020-02-17 17:00:15.347951022 +0000
@@ -1,8 +1,10 @@
-From 96d8ae9990c0a213255b4dc45c53368ea2c92bfa Mon Sep 17 00:00:00 2001
+From 0a88b449453f744d638b5d8ef63a5d4f2d8eb3b7 Mon Sep 17 00:00:00 2001
 From: Ciara Loftus <ciara.loftus at intel.com>
 Date: Thu, 13 Feb 2020 08:49:13 +0000
 Subject: [PATCH] net/af_xdp: fix fill queue addresses
 
+[ upstream commit 96d8ae9990c0a213255b4dc45c53368ea2c92bfa ]
+
 The fill queue addresses should start at the beginning of the mempool
 object instead of the beginning of the mbuf. This is because the umem
 frame headroom includes the mp hdrobj size. Starting at this point
@@ -10,7 +12,6 @@
 the case of larger packets which are close to the size of the mbuf.
 
 Fixes: d8a210774e1d ("net/af_xdp: support unaligned umem chunks")
-Cc: stable at dpdk.org
 
 Signed-off-by: Ciara Loftus <ciara.loftus at intel.com>
 Reviewed-by: Xiaolong Ye <xiaolong.ye at intel.com>
@@ -19,10 +20,10 @@
  1 file changed, 17 insertions(+), 8 deletions(-)
 
 diff --git a/drivers/net/af_xdp/rte_eth_af_xdp.c b/drivers/net/af_xdp/rte_eth_af_xdp.c
-index 111ab000cc..a0edfc3cd3 100644
+index 348dbde5e6..24c70473c3 100644
 --- a/drivers/net/af_xdp/rte_eth_af_xdp.c
 +++ b/drivers/net/af_xdp/rte_eth_af_xdp.c
-@@ -171,7 +171,8 @@ reserve_fill_queue_zc(struct xsk_umem_info *umem, uint16_t reserve_size,
+@@ -172,7 +172,8 @@ reserve_fill_queue_zc(struct xsk_umem_info *umem, uint16_t reserve_size,
  		uint64_t addr;
  
  		fq_addr = xsk_ring_prod__fill_addr(fq, idx++);
@@ -32,7 +33,7 @@
  		*fq_addr = addr;
  	}
  
-@@ -270,8 +271,11 @@ af_xdp_rx_zc(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
+@@ -271,8 +272,11 @@ af_xdp_rx_zc(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
  		addr = xsk_umem__extract_addr(addr);
  
  		bufs[i] = (struct rte_mbuf *)
@@ -46,7 +47,7 @@
  
  		rte_pktmbuf_pkt_len(bufs[i]) = len;
  		rte_pktmbuf_data_len(bufs[i]) = len;
-@@ -384,7 +388,8 @@ pull_umem_cq(struct xsk_umem_info *umem, int size)
+@@ -385,7 +389,8 @@ pull_umem_cq(struct xsk_umem_info *umem, int size)
  #if defined(XDP_UMEM_UNALIGNED_CHUNK_FLAG)
  		addr = xsk_umem__extract_addr(addr);
  		rte_pktmbuf_free((struct rte_mbuf *)
@@ -56,7 +57,7 @@
  #else
  		rte_ring_enqueue(umem->buf_ring, (void *)addr);
  #endif
-@@ -442,9 +447,11 @@ af_xdp_tx_zc(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
+@@ -443,9 +448,11 @@ af_xdp_tx_zc(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
  			}
  			desc = xsk_ring_prod__tx_desc(&txq->tx, idx_tx);
  			desc->len = mbuf->pkt_len;
@@ -70,7 +71,7 @@
  			offset = offset << XSK_UNALIGNED_BUF_OFFSET_SHIFT;
  			desc->addr = addr | offset;
  			count++;
-@@ -465,9 +472,11 @@ af_xdp_tx_zc(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
+@@ -466,9 +473,11 @@ af_xdp_tx_zc(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
  			desc = xsk_ring_prod__tx_desc(&txq->tx, idx_tx);
  			desc->len = mbuf->pkt_len;
  


More information about the stable mailing list