[dpdk-stable] patch 'net/dpaa2: set port in mbuf' has been queued to LTS release 17.11.10

luca.boccassi at gmail.com luca.boccassi at gmail.com
Thu Dec 19 15:33:32 CET 2019


Hi,

FYI, your patch has been queued to LTS release 17.11.10

Note it hasn't been pushed to http://dpdk.org/browse/dpdk-stable yet.
It will be pushed if I get no objections before 12/21/19. So please
shout if anyone has objections.

Also note that after the patch there's a diff of the upstream commit vs the
patch applied to the branch. This will indicate if there was any rebasing
needed to apply to the stable branch. If there were code changes for rebasing
(ie: not only metadata diffs), please double check that the rebase was
correctly done.

Thanks.

Luca Boccassi

---
>From 3c00d0177e1068db12b6d2e598a1bf8d6a594ad6 Mon Sep 17 00:00:00 2001
From: Nipun Gupta <nipun.gupta at nxp.com>
Date: Wed, 9 Oct 2019 16:43:23 +0530
Subject: [PATCH] net/dpaa2: set port in mbuf

[ upstream commit 005d943e57ceaf62ac8a2240696c3f9aa3980513 ]

This patch sets the port in mbuf for event scenarios as well

Fixes: b677d4c6d281 ("net/dpaa2: add API for event Rx adapter")
Fixes: 2d3788631862 ("net/dpaa2: support atomic queues")
Fixes: 16c4a3c46ab7 ("bus/fslmc: add enqueue response read in qbman")

Signed-off-by: Nipun Gupta <nipun.gupta at nxp.com>
Acked-by: Hemant Agrawal <hemant.agrawal at nxp.com>
---
 drivers/net/dpaa2/dpaa2_rxtx.c | 15 +++++++++------
 1 file changed, 9 insertions(+), 6 deletions(-)

diff --git a/drivers/net/dpaa2/dpaa2_rxtx.c b/drivers/net/dpaa2/dpaa2_rxtx.c
index 8ecd238ddb..bcac19af5e 100644
--- a/drivers/net/dpaa2/dpaa2_rxtx.c
+++ b/drivers/net/dpaa2/dpaa2_rxtx.c
@@ -132,7 +132,8 @@ dpaa2_dev_rx_offload(uint64_t hw_annot_addr, struct rte_mbuf *mbuf)
 }
 
 static inline struct rte_mbuf *__attribute__((hot))
-eth_sg_fd_to_mbuf(const struct qbman_fd *fd)
+eth_sg_fd_to_mbuf(const struct qbman_fd *fd,
+		  int port_id)
 {
 	struct qbman_sge *sgt, *sge;
 	dma_addr_t sg_addr;
@@ -159,6 +160,7 @@ eth_sg_fd_to_mbuf(const struct qbman_fd *fd)
 	first_seg->pkt_len = DPAA2_GET_FD_LEN(fd);
 	first_seg->nb_segs = 1;
 	first_seg->next = NULL;
+	first_seg->port = port_id;
 
 	first_seg->packet_type = dpaa2_dev_rx_parse(
 			 (uint64_t)DPAA2_IOVA_TO_VADDR(DPAA2_GET_FD_ADDR(fd))
@@ -192,7 +194,8 @@ eth_sg_fd_to_mbuf(const struct qbman_fd *fd)
 }
 
 static inline struct rte_mbuf *__attribute__((hot))
-eth_fd_to_mbuf(const struct qbman_fd *fd)
+eth_fd_to_mbuf(const struct qbman_fd *fd,
+	       int port_id)
 {
 	struct rte_mbuf *mbuf = DPAA2_INLINE_MBUF_FROM_BUF(
 		DPAA2_IOVA_TO_VADDR(DPAA2_GET_FD_ADDR(fd)),
@@ -206,6 +209,7 @@ eth_fd_to_mbuf(const struct qbman_fd *fd)
 	mbuf->data_off = DPAA2_GET_FD_OFFSET(fd);
 	mbuf->data_len = DPAA2_GET_FD_LEN(fd);
 	mbuf->pkt_len = mbuf->data_len;
+	mbuf->port = port_id;
 
 	/* Parse the packet */
 	/* parse results are after the private - sw annotation area */
@@ -470,10 +474,9 @@ dpaa2_dev_prefetch_rx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
 				+ DPAA2_FD_PTA_SIZE + 16));
 
 		if (unlikely(DPAA2_FD_GET_FORMAT(fd[num_rx]) == qbman_fd_sg))
-			bufs[num_rx] = eth_sg_fd_to_mbuf(fd[num_rx]);
+			bufs[num_rx] = eth_sg_fd_to_mbuf(fd[num_rx], eth_data->port_id);
 		else
-			bufs[num_rx] = eth_fd_to_mbuf(fd[num_rx]);
-		bufs[num_rx]->port = dev->data->port_id;
+			bufs[num_rx] = eth_fd_to_mbuf(fd[num_rx], eth_data->port_id);
 
 		if (dev->data->dev_conf.rxmode.hw_vlan_strip)
 			rte_vlan_strip(bufs[num_rx]);
@@ -521,7 +524,7 @@ dpaa2_dev_process_parallel_event(struct qbman_swp *swp,
 				 struct dpaa2_queue *rxq,
 				 struct rte_event *ev)
 {
-	ev->mbuf = eth_fd_to_mbuf(fd);
+	ev->mbuf = eth_fd_to_mbuf(fd, rxq->eth_data->port_id);
 
 	ev->flow_id = rxq->ev.flow_id;
 	ev->sub_event_type = rxq->ev.sub_event_type;
-- 
2.20.1

---
  Diff of the applied patch vs upstream commit (please double-check if non-empty:
---
--- -	2019-12-19 14:32:28.911153807 +0000
+++ 0065-net-dpaa2-set-port-in-mbuf.patch	2019-12-19 14:32:26.141298539 +0000
@@ -1,26 +1,27 @@
-From 005d943e57ceaf62ac8a2240696c3f9aa3980513 Mon Sep 17 00:00:00 2001
+From 3c00d0177e1068db12b6d2e598a1bf8d6a594ad6 Mon Sep 17 00:00:00 2001
 From: Nipun Gupta <nipun.gupta at nxp.com>
 Date: Wed, 9 Oct 2019 16:43:23 +0530
 Subject: [PATCH] net/dpaa2: set port in mbuf
 
+[ upstream commit 005d943e57ceaf62ac8a2240696c3f9aa3980513 ]
+
 This patch sets the port in mbuf for event scenarios as well
 
 Fixes: b677d4c6d281 ("net/dpaa2: add API for event Rx adapter")
 Fixes: 2d3788631862 ("net/dpaa2: support atomic queues")
 Fixes: 16c4a3c46ab7 ("bus/fslmc: add enqueue response read in qbman")
-Cc: stable at dpdk.org
 
 Signed-off-by: Nipun Gupta <nipun.gupta at nxp.com>
 Acked-by: Hemant Agrawal <hemant.agrawal at nxp.com>
 ---
- drivers/net/dpaa2/dpaa2_rxtx.c | 30 ++++++++++++++++++------------
- 1 file changed, 18 insertions(+), 12 deletions(-)
+ drivers/net/dpaa2/dpaa2_rxtx.c | 15 +++++++++------
+ 1 file changed, 9 insertions(+), 6 deletions(-)
 
 diff --git a/drivers/net/dpaa2/dpaa2_rxtx.c b/drivers/net/dpaa2/dpaa2_rxtx.c
-index 7cea109d5f..b7b2d8652a 100644
+index 8ecd238ddb..bcac19af5e 100644
 --- a/drivers/net/dpaa2/dpaa2_rxtx.c
 +++ b/drivers/net/dpaa2/dpaa2_rxtx.c
-@@ -260,7 +260,8 @@ dpaa2_dev_rx_parse(struct rte_mbuf *mbuf, void *hw_annot_addr)
+@@ -132,7 +132,8 @@ dpaa2_dev_rx_offload(uint64_t hw_annot_addr, struct rte_mbuf *mbuf)
  }
  
  static inline struct rte_mbuf *__attribute__((hot))
@@ -29,16 +30,16 @@
 +		  int port_id)
  {
  	struct qbman_sge *sgt, *sge;
- 	size_t sg_addr, fd_addr;
-@@ -286,6 +287,7 @@ eth_sg_fd_to_mbuf(const struct qbman_fd *fd)
+ 	dma_addr_t sg_addr;
+@@ -159,6 +160,7 @@ eth_sg_fd_to_mbuf(const struct qbman_fd *fd)
  	first_seg->pkt_len = DPAA2_GET_FD_LEN(fd);
  	first_seg->nb_segs = 1;
  	first_seg->next = NULL;
 +	first_seg->port = port_id;
- 	if (dpaa2_svr_family == SVR_LX2160A)
- 		dpaa2_dev_rx_parse_new(first_seg, fd);
- 	else
-@@ -319,7 +321,8 @@ eth_sg_fd_to_mbuf(const struct qbman_fd *fd)
+ 
+ 	first_seg->packet_type = dpaa2_dev_rx_parse(
+ 			 (uint64_t)DPAA2_IOVA_TO_VADDR(DPAA2_GET_FD_ADDR(fd))
+@@ -192,7 +194,8 @@ eth_sg_fd_to_mbuf(const struct qbman_fd *fd)
  }
  
  static inline struct rte_mbuf *__attribute__((hot))
@@ -48,80 +49,36 @@
  {
  	struct rte_mbuf *mbuf = DPAA2_INLINE_MBUF_FROM_BUF(
  		DPAA2_IOVA_TO_VADDR(DPAA2_GET_FD_ADDR(fd)),
-@@ -333,6 +336,7 @@ eth_fd_to_mbuf(const struct qbman_fd *fd)
+@@ -206,6 +209,7 @@ eth_fd_to_mbuf(const struct qbman_fd *fd)
  	mbuf->data_off = DPAA2_GET_FD_OFFSET(fd);
  	mbuf->data_len = DPAA2_GET_FD_LEN(fd);
  	mbuf->pkt_len = mbuf->data_len;
 +	mbuf->port = port_id;
- 	mbuf->next = NULL;
- 	rte_mbuf_refcnt_set(mbuf, 1);
  
-@@ -621,10 +625,9 @@ dpaa2_dev_prefetch_rx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
- 		}
- 
- 		if (unlikely(DPAA2_FD_GET_FORMAT(fd) == qbman_fd_sg))
--			bufs[num_rx] = eth_sg_fd_to_mbuf(fd);
-+			bufs[num_rx] = eth_sg_fd_to_mbuf(fd, eth_data->port_id);
+ 	/* Parse the packet */
+ 	/* parse results are after the private - sw annotation area */
+@@ -470,10 +474,9 @@ dpaa2_dev_prefetch_rx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
+ 				+ DPAA2_FD_PTA_SIZE + 16));
+ 
+ 		if (unlikely(DPAA2_FD_GET_FORMAT(fd[num_rx]) == qbman_fd_sg))
+-			bufs[num_rx] = eth_sg_fd_to_mbuf(fd[num_rx]);
++			bufs[num_rx] = eth_sg_fd_to_mbuf(fd[num_rx], eth_data->port_id);
  		else
--			bufs[num_rx] = eth_fd_to_mbuf(fd);
--		bufs[num_rx]->port = eth_data->port_id;
-+			bufs[num_rx] = eth_fd_to_mbuf(fd, eth_data->port_id);
- #if defined(RTE_LIBRTE_IEEE1588)
- 		priv->rx_timestamp = bufs[num_rx]->timestamp;
- #endif
-@@ -679,7 +682,7 @@ dpaa2_dev_process_parallel_event(struct qbman_swp *swp,
- 	ev->queue_id = rxq->ev.queue_id;
- 	ev->priority = rxq->ev.priority;
- 
--	ev->mbuf = eth_fd_to_mbuf(fd);
-+	ev->mbuf = eth_fd_to_mbuf(fd, rxq->eth_data->port_id);
- 
- 	qbman_swp_dqrr_consume(swp, dq);
- }
-@@ -704,7 +707,7 @@ dpaa2_dev_process_atomic_event(struct qbman_swp *swp __attribute__((unused)),
- 	ev->queue_id = rxq->ev.queue_id;
- 	ev->priority = rxq->ev.priority;
- 
--	ev->mbuf = eth_fd_to_mbuf(fd);
-+	ev->mbuf = eth_fd_to_mbuf(fd, rxq->eth_data->port_id);
- 
- 	dqrr_index = qbman_get_dqrr_idx(dq);
- 	ev->mbuf->seqn = dqrr_index + 1;
-@@ -731,7 +734,7 @@ dpaa2_dev_process_ordered_event(struct qbman_swp *swp,
- 	ev->queue_id = rxq->ev.queue_id;
- 	ev->priority = rxq->ev.priority;
- 
+-			bufs[num_rx] = eth_fd_to_mbuf(fd[num_rx]);
+-		bufs[num_rx]->port = dev->data->port_id;
++			bufs[num_rx] = eth_fd_to_mbuf(fd[num_rx], eth_data->port_id);
+ 
+ 		if (dev->data->dev_conf.rxmode.hw_vlan_strip)
+ 			rte_vlan_strip(bufs[num_rx]);
+@@ -521,7 +524,7 @@ dpaa2_dev_process_parallel_event(struct qbman_swp *swp,
+ 				 struct dpaa2_queue *rxq,
+ 				 struct rte_event *ev)
+ {
 -	ev->mbuf = eth_fd_to_mbuf(fd);
 +	ev->mbuf = eth_fd_to_mbuf(fd, rxq->eth_data->port_id);
  
- 	ev->mbuf->seqn = DPAA2_ENQUEUE_FLAG_ORP;
- 	ev->mbuf->seqn |= qbman_result_DQ_odpid(dq) << DPAA2_EQCR_OPRID_SHIFT;
-@@ -823,10 +826,11 @@ dpaa2_dev_rx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
- 					+ DPAA2_FD_PTA_SIZE + 16));
- 
- 			if (unlikely(DPAA2_FD_GET_FORMAT(fd) == qbman_fd_sg))
--				bufs[num_rx] = eth_sg_fd_to_mbuf(fd);
-+				bufs[num_rx] = eth_sg_fd_to_mbuf(fd,
-+							eth_data->port_id);
- 			else
--				bufs[num_rx] = eth_fd_to_mbuf(fd);
--			bufs[num_rx]->port = eth_data->port_id;
-+				bufs[num_rx] = eth_fd_to_mbuf(fd,
-+							eth_data->port_id);
- 
- 		if (eth_data->dev_conf.rxmode.offloads &
- 				DEV_RX_OFFLOAD_VLAN_STRIP) {
-@@ -1170,7 +1174,9 @@ dpaa2_dev_free_eqresp_buf(uint16_t eqresp_ci)
- 	struct rte_mbuf *m;
- 
- 	fd = qbman_result_eqresp_fd(&dpio_dev->eqresp[eqresp_ci]);
--	m = eth_fd_to_mbuf(fd);
-+
-+	/* Setting port id does not matter as we are to free the mbuf */
-+	m = eth_fd_to_mbuf(fd, 0);
- 	rte_pktmbuf_free(m);
- }
- 
+ 	ev->flow_id = rxq->ev.flow_id;
+ 	ev->sub_event_type = rxq->ev.sub_event_type;
 -- 
 2.20.1
 


More information about the stable mailing list