[dpdk-dev] [PATCH v3 3/7] net/mlx4: support multi-segments Rx

Ophir Munk ophirmu at mellanox.com
Wed Oct 4 23:49:02 CEST 2017


From: Vasily Philipov <vasilyf at mellanox.com>

Getting hw directly on RX fast path without verbs call.

Now the number of scatters is calculated on the fly according to the
maximum expected packet size.

Signed-off-by: Vasily Philipov <vasilyf at mellanox.com>
Signed-off-by: Ophir Munk <ophirmu at mellanox.com>
---
This commit is a split from a previous commit
"net/mlx4: get back Rx flow functionality"

 drivers/net/mlx4/mlx4_rxq.c  | 29 ++++++++++++++++++++++-------
 drivers/net/mlx4/mlx4_rxtx.c | 10 +++++++---
 drivers/net/mlx4/mlx4_rxtx.h |  1 +
 3 files changed, 30 insertions(+), 10 deletions(-)

diff --git a/drivers/net/mlx4/mlx4_rxq.c b/drivers/net/mlx4/mlx4_rxq.c
index cb18f20..7d13121 100644
--- a/drivers/net/mlx4/mlx4_rxq.c
+++ b/drivers/net/mlx4/mlx4_rxq.c
@@ -78,6 +78,7 @@
 mlx4_rxq_alloc_elts(struct rxq *rxq, unsigned int elts_n)
 {
 	unsigned int i;
+	const unsigned int sge_n = 1 << rxq->sge_n;
 	struct rte_mbuf *(*elts)[elts_n] =
 		rte_calloc_socket("RXQ", 1, sizeof(*elts), 0, rxq->socket);
 
@@ -105,6 +106,9 @@
 		assert(rte_pktmbuf_data_len(buf) == 0);
 		assert(rte_pktmbuf_pkt_len(buf) == 0);
 		assert(!buf->next);
+		/* Only the first segment keeps headroom. */
+		if (i % sge_n)
+			buf->data_off = 0;
 		buf->port = rxq->port_id;
 		buf->data_len = rte_pktmbuf_tailroom(buf);
 		buf->pkt_len = rte_pktmbuf_tailroom(buf);
@@ -119,8 +123,8 @@
 		};
 		(*rxq->elts)[i] = buf;
 	}
-	DEBUG("%p: allocated and configured %u single-segment WRs",
-	      (void *)rxq, elts_n);
+	DEBUG("%p: allocated and configured %u segments (max %u packets)",
+	      (void *)rxq, elts_n, elts_n >> rxq->sge_n);
 	rxq->elts_n = log2above(elts_n);
 	return 0;
 error:
@@ -199,7 +203,8 @@
  *   QP pointer or NULL in case of error and rte_errno is set.
  */
 static struct ibv_qp *
-mlx4_rxq_setup_qp(struct priv *priv, struct ibv_cq *cq, uint16_t desc)
+mlx4_rxq_setup_qp(struct priv *priv, struct ibv_cq *cq,
+		  uint16_t desc, unsigned int sge_n)
 {
 	struct ibv_qp *qp;
 	struct ibv_qp_init_attr attr = {
@@ -213,7 +218,7 @@
 					priv->device_attr.max_qp_wr :
 					desc),
 			/* Max number of scatter/gather elements in a WR. */
-			.max_recv_sge = 1,
+			.max_recv_sge = sge_n,
 		},
 		.qp_type = IBV_QPT_RAW_PACKET,
 	};
@@ -267,8 +272,9 @@
 	assert(mb_len >= RTE_PKTMBUF_HEADROOM);
 	if (dev->data->dev_conf.rxmode.max_rx_pkt_len <=
 	    (mb_len - RTE_PKTMBUF_HEADROOM)) {
-		;
+		tmpl.sge_n = 0;
 	} else if (dev->data->dev_conf.rxmode.enable_scatter) {
+		unsigned int sge_n;
 		unsigned int rx_pkt_len =
 				dev->data->dev_conf.rxmode.jumbo_frame ?
 				dev->data->dev_conf.rxmode.max_rx_pkt_len :
@@ -278,6 +284,13 @@
 			rx_pkt_len = ETHER_MTU;
 		/* Only the first mbuf has a headroom */
 		rx_pkt_len = rx_pkt_len - mb_len + RTE_PKTMBUF_HEADROOM;
+		/*
+		 * Determine the number of SGEs needed for a full packet
+		 * and round it to the next power of two.
+		 */
+		sge_n = (rx_pkt_len / mb_len) + !!(rx_pkt_len % mb_len) + 1;
+		tmpl.sge_n = log2above(sge_n);
+		desc >>= tmpl.sge_n;
 	} else {
 		WARN("%p: the requested maximum Rx packet size (%u) is"
 		     " larger than a single mbuf (%u) and scattered"
@@ -286,6 +299,8 @@
 		     dev->data->dev_conf.rxmode.max_rx_pkt_len,
 		     mb_len - RTE_PKTMBUF_HEADROOM);
 	}
+	DEBUG("%p: number of sges %u (%u WRs)",
+	      (void *)dev, 1 << tmpl.sge_n, desc);
 	/* Use the entire Rx mempool as the memory region. */
 	tmpl.mr = mlx4_mp2mr(priv->pd, mp);
 	if (tmpl.mr == NULL) {
@@ -321,7 +336,7 @@
 	      priv->device_attr.max_qp_wr);
 	DEBUG("priv->device_attr.max_sge is %d",
 	      priv->device_attr.max_sge);
-	tmpl.qp = mlx4_rxq_setup_qp(priv, tmpl.cq, desc);
+	tmpl.qp = mlx4_rxq_setup_qp(priv, tmpl.cq, desc, 1 << tmpl.sge_n);
 	if (tmpl.qp == NULL) {
 		ERROR("%p: QP creation failure: %s",
 		      (void *)dev, strerror(rte_errno));
@@ -373,7 +388,7 @@
 	/* Save port ID. */
 	tmpl.port_id = dev->data->port_id;
 	DEBUG("%p: RTE port ID: %u", (void *)rxq, tmpl.port_id);
-	ret = mlx4_rxq_alloc_elts(&tmpl, desc);
+	ret = mlx4_rxq_alloc_elts(&tmpl, desc << tmpl.sge_n);
 	if (ret) {
 		ERROR("%p: RXQ allocation failed: %s",
 		      (void *)dev, strerror(rte_errno));
diff --git a/drivers/net/mlx4/mlx4_rxtx.c b/drivers/net/mlx4/mlx4_rxtx.c
index b4391bf..f517505 100644
--- a/drivers/net/mlx4/mlx4_rxtx.c
+++ b/drivers/net/mlx4/mlx4_rxtx.c
@@ -577,10 +577,11 @@
 {
 	struct rxq *rxq = dpdk_rxq;
 	const unsigned int wr_cnt = (1 << rxq->elts_n) - 1;
+	const unsigned int sge_n = rxq->sge_n;
 	struct rte_mbuf *pkt = NULL;
 	struct rte_mbuf *seg = NULL;
 	unsigned int i = 0;
-	unsigned int rq_ci = (rxq->hw.rq_ci);
+	unsigned int rq_ci = (rxq->hw.rq_ci << sge_n);
 	int len = 0;
 
 	while (pkts_n) {
@@ -661,12 +662,15 @@
 		--pkts_n;
 		++i;
 skip:
+		/* Align consumer index to the next stride. */
+		rq_ci >>= sge_n;
 		++rq_ci;
+		rq_ci <<= sge_n;
 	}
-	if (unlikely(i == 0 && rq_ci == rxq->hw.rq_ci))
+	if (unlikely(i == 0 && (rq_ci >> sge_n) == rxq->hw.rq_ci))
 		return 0;
 	/* Update the consumer index. */
-	rxq->hw.rq_ci = rq_ci;
+	rxq->hw.rq_ci = rq_ci >> sge_n;
 	rte_wmb();
 	*rxq->hw.rq_db = rte_cpu_to_be_32(rxq->hw.rq_ci);
 	*rxq->mcq.set_ci_db =
diff --git a/drivers/net/mlx4/mlx4_rxtx.h b/drivers/net/mlx4/mlx4_rxtx.h
index fa2481c..df83552 100644
--- a/drivers/net/mlx4/mlx4_rxtx.h
+++ b/drivers/net/mlx4/mlx4_rxtx.h
@@ -79,6 +79,7 @@ struct rxq {
 		uint16_t rq_ci;
 	} hw;
 	struct mlx4_cq mcq;  /**< Info for directly manipulating the CQ. */
+	unsigned int sge_n; /**< Log 2 of SGEs number. */
 	struct mlx4_rxq_stats stats; /**< Rx queue counters. */
 	unsigned int socket; /**< CPU socket ID for allocations. */
 };
-- 
1.8.3.1



More information about the dev mailing list