[dpdk-stable] [PATCH 1/2] net/qede: fix MTU set and max Rx pkt len usage

Rasesh Mody rasesh.mody at cavium.com
Mon Feb 26 06:16:12 CET 2018


[ backported from upstream commit 9e334305178fd3715c17088632544bf58e5836a9 ]
[ backported from upstream commit 1ef4c3a5c1f76b810620b76d82a31ce33a83d3fe ]
[ backported from upstream commit f6033f2497e7d01d8c6f0d691e2f86937597aa35 ]

This patch fixes issues related to MTU set and max_rx_pkt_len usage.
 - Adjust MTU during device configuration when jumbo is enabled

 - In qede_set_mtu():
   Return not supported for VF as currently we do not support it.

   Add check for RXQ allocation before calculating RX buffer size
   if not allocated defer RX buffer size calculation till RXQ setup.

   Add check for before performing device start/stop.

 - Use max_rx_pkt_len appropriately

 - Change QEDE_ETH_OVERHEAD macro to adjust driver specifics

 -  The driver can handle dynamic MTU change without needing the port to be
    stopped explicitly by the application. However, there is currently no
    check to prevent I/Os from happening on a different thread while the
    port is going thru' reset internally. This patch fixes this issue by
    assigning RX/TX burst functions to a dummy function and also reconfigure
    RX bufsize for each rx queue based on the new MTU value.

 - Fix minimum RX buffer size to 1024B

 - Force enable scatter/gather mode if given RX buf size is lesser than MTU

 - Adjust RX buffer size to cache-line size with overhead included

Fixes: bec0228816c0 ("net/qede: support scatter gather")
Fixes: 2ea6f76aff40 ("qede: add core driver")

Signed-off-by: Harish Patil <harish.patil at cavium.com>
Signed-off-by: Rasesh Mody <rasesh.mody at cavium.com>
---
 drivers/net/qede/qede_ethdev.c |   89 +++++++++++++++++++++++++++++++---------
 drivers/net/qede/qede_rxtx.c   |   55 +++++++++++++------------
 drivers/net/qede/qede_rxtx.h   |   15 ++++++-
 3 files changed, 111 insertions(+), 48 deletions(-)

diff --git a/drivers/net/qede/qede_ethdev.c b/drivers/net/qede/qede_ethdev.c
index 8fdff14..d1b89ea 100644
--- a/drivers/net/qede/qede_ethdev.c
+++ b/drivers/net/qede/qede_ethdev.c
@@ -629,7 +629,7 @@ static int qede_init_vport(struct qede_dev *qdev)
 
 	start.remove_inner_vlan = 1;
 	start.gro_enable = 0;
-	start.mtu = ETHER_MTU + QEDE_ETH_OVERHEAD;
+	start.mtu = qdev->mtu;
 	start.vport_id = 0;
 	start.drop_ttl0 = false;
 	start.clear_stats = 1;
@@ -717,6 +717,14 @@ static int qede_dev_configure(struct rte_eth_dev *eth_dev)
 	if (rc != 0)
 		return rc;
 
+	/* If jumbo enabled adjust MTU */
+	if (eth_dev->data->dev_conf.rxmode.jumbo_frame)
+		eth_dev->data->mtu =
+			eth_dev->data->dev_conf.rxmode.max_rx_pkt_len -
+			ETHER_HDR_LEN - ETHER_CRC_LEN;
+
+	qdev->mtu = eth_dev->data->mtu;
+
 	/* Issue VPORT-START with default config values to allow
 	 * other port configurations early on.
 	 */
@@ -764,8 +772,7 @@ static int qede_dev_configure(struct rte_eth_dev *eth_dev)
 
 	PMD_INIT_FUNC_TRACE(edev);
 
-	dev_info->min_rx_bufsize = (uint32_t)(ETHER_MIN_MTU +
-					      QEDE_ETH_OVERHEAD);
+	dev_info->min_rx_bufsize = (uint32_t)QEDE_MIN_RX_BUFF_SIZE;
 	dev_info->max_rx_pktlen = (uint32_t)ETH_TX_MAX_NON_LSO_PKT_LEN;
 	dev_info->rx_desc_lim = qede_rx_desc_lim;
 	dev_info->tx_desc_lim = qede_tx_desc_lim;
@@ -1403,32 +1410,76 @@ int qede_rss_reta_query(struct rte_eth_dev *eth_dev,
 
 int qede_set_mtu(struct rte_eth_dev *dev, uint16_t mtu)
 {
-	uint32_t frame_size;
-	struct qede_dev *qdev = dev->data->dev_private;
+	struct qede_dev *qdev = QEDE_INIT_QDEV(dev);
+	struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
 	struct rte_eth_dev_info dev_info = {0};
+	struct qede_fastpath *fp;
+	uint32_t max_rx_pkt_len;
+	uint32_t frame_size;
+	uint16_t rx_buf_size;
+	uint16_t bufsz;
+	bool restart = false;
+	int i;
 
+	PMD_INIT_FUNC_TRACE(edev);
+	if (IS_VF(edev))
+		return -ENOTSUP;
 	qede_dev_info_get(dev, &dev_info);
-
-	/* VLAN_TAG = 4 */
-	frame_size = mtu + ETHER_HDR_LEN + ETHER_CRC_LEN + 4;
-
-	if ((mtu < ETHER_MIN_MTU) || (frame_size > dev_info.max_rx_pktlen))
+	max_rx_pkt_len = mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
+	frame_size = max_rx_pkt_len + QEDE_ETH_OVERHEAD;
+	if ((mtu < ETHER_MIN_MTU) || (frame_size > dev_info.max_rx_pktlen)) {
+		DP_ERR(edev, "MTU %u out of range, %u is maximum allowable\n",
+		       mtu, dev_info.max_rx_pktlen - ETHER_HDR_LEN -
+			ETHER_CRC_LEN - QEDE_ETH_OVERHEAD);
 		return -EINVAL;
-
+	}
 	if (!dev->data->scattered_rx &&
-	    frame_size > dev->data->min_rx_buf_size - RTE_PKTMBUF_HEADROOM)
+	    frame_size > dev->data->min_rx_buf_size - RTE_PKTMBUF_HEADROOM) {
+		DP_INFO(edev, "MTU greater than minimum RX buffer size of %u\n",
+			dev->data->min_rx_buf_size);
 		return -EINVAL;
-
-	if (frame_size > ETHER_MAX_LEN)
+	}
+	/* Temporarily replace I/O functions with dummy ones. It cannot
+	 * be set to NULL because rte_eth_rx_burst() doesn't check for NULL.
+	 */
+	dev->rx_pkt_burst = qede_rxtx_pkts_dummy;
+	dev->tx_pkt_burst = qede_rxtx_pkts_dummy;
+	if (dev->data->dev_started) {
+		dev->data->dev_started = 0;
+		qede_dev_stop(dev);
+		restart = true;
+	}
+	rte_delay_ms(1000);
+	qdev->mtu = mtu;
+	/* Fix up RX buf size for all queues of the port */
+	for_each_queue(i) {
+		fp = &qdev->fp_array[i];
+		if ((fp->type & QEDE_FASTPATH_RX) && (fp->rxq != NULL)) {
+			bufsz = (uint16_t)rte_pktmbuf_data_room_size(
+				fp->rxq->mb_pool) - RTE_PKTMBUF_HEADROOM;
+			if (dev->data->scattered_rx)
+				rx_buf_size = bufsz + ETHER_HDR_LEN +
+					      ETHER_CRC_LEN + QEDE_ETH_OVERHEAD;
+			else
+				rx_buf_size = frame_size;
+			rx_buf_size = QEDE_CEIL_TO_CACHE_LINE_SIZE(rx_buf_size);
+			fp->rxq->rx_buf_size = rx_buf_size;
+			DP_INFO(edev, "buf_size adjusted to %u\n", rx_buf_size);
+		}
+	}
+	if (max_rx_pkt_len > ETHER_MAX_LEN)
 		dev->data->dev_conf.rxmode.jumbo_frame = 1;
 	else
 		dev->data->dev_conf.rxmode.jumbo_frame = 0;
-
+	if (!dev->data->dev_started && restart) {
+		qede_dev_start(dev);
+		dev->data->dev_started = 1;
+	}
 	/* update max frame size */
-	dev->data->dev_conf.rxmode.max_rx_pkt_len = frame_size;
-	qdev->mtu = mtu;
-	qede_dev_stop(dev);
-	qede_dev_start(dev);
+	dev->data->dev_conf.rxmode.max_rx_pkt_len = max_rx_pkt_len;
+	/* Reassign back */
+	dev->rx_pkt_burst = qede_recv_pkts;
+	dev->tx_pkt_burst = qede_xmit_pkts;
 
 	return 0;
 }
diff --git a/drivers/net/qede/qede_rxtx.c b/drivers/net/qede/qede_rxtx.c
index 9948d2e..e586dc7 100644
--- a/drivers/net/qede/qede_rxtx.c
+++ b/drivers/net/qede/qede_rxtx.c
@@ -89,11 +89,11 @@ static void qede_tx_queue_release_mbufs(struct qede_tx_queue *txq)
 {
 	struct qede_dev *qdev = dev->data->dev_private;
 	struct ecore_dev *edev = &qdev->edev;
-	struct rte_eth_dev_data *eth_data = dev->data;
+	struct rte_eth_rxmode *rxmode = &dev->data->dev_conf.rxmode;
 	struct qede_rx_queue *rxq;
-	uint16_t pkt_len = (uint16_t)dev->data->dev_conf.rxmode.max_rx_pkt_len;
+	uint16_t max_rx_pkt_len;
+	uint16_t bufsz;
 	size_t size;
-	uint16_t data_size;
 	int rc;
 	int i;
 
@@ -127,34 +127,27 @@ static void qede_tx_queue_release_mbufs(struct qede_tx_queue *txq)
 	rxq->nb_rx_desc = nb_desc;
 	rxq->queue_id = queue_idx;
 	rxq->port_id = dev->data->port_id;
-
-	/* Sanity check */
-	data_size = (uint16_t)rte_pktmbuf_data_room_size(mp) -
-				RTE_PKTMBUF_HEADROOM;
-
-	if (pkt_len > data_size && !dev->data->scattered_rx) {
-		DP_ERR(edev, "MTU %u should not exceed dataroom %u\n",
-		       pkt_len, data_size);
-		rte_free(rxq);
-		return -EINVAL;
+	max_rx_pkt_len = (uint16_t)rxmode->max_rx_pkt_len;
+
+	/* Fix up RX buffer size */
+	bufsz = (uint16_t)rte_pktmbuf_data_room_size(mp) - RTE_PKTMBUF_HEADROOM;
+	if ((rxmode->enable_scatter)			||
+	    (max_rx_pkt_len + QEDE_ETH_OVERHEAD) > bufsz) {
+		if (!dev->data->scattered_rx) {
+			DP_INFO(edev, "Forcing scatter-gather mode\n");
+			dev->data->scattered_rx = 1;
+		}
 	}
-
 	if (dev->data->scattered_rx)
-		rxq->rx_buf_size = data_size;
+		rxq->rx_buf_size = bufsz + ETHER_HDR_LEN +
+				   ETHER_CRC_LEN + QEDE_ETH_OVERHEAD;
 	else
-		rxq->rx_buf_size = pkt_len + QEDE_ETH_OVERHEAD;
+		rxq->rx_buf_size = max_rx_pkt_len + QEDE_ETH_OVERHEAD;
+	/* Align to cache-line size if needed */
+	rxq->rx_buf_size = QEDE_CEIL_TO_CACHE_LINE_SIZE(rxq->rx_buf_size);
 
-	qdev->mtu = pkt_len;
-
-	DP_INFO(edev, "MTU = %u ; RX buffer = %u\n",
-		qdev->mtu, rxq->rx_buf_size);
-
-	if (pkt_len > ETHER_MAX_LEN) {
-		dev->data->dev_conf.rxmode.jumbo_frame = 1;
-		DP_NOTICE(edev, false, "jumbo frame enabled\n");
-	} else {
-		dev->data->dev_conf.rxmode.jumbo_frame = 0;
-	}
+	DP_INFO(edev, "mtu %u mbufsz %u bd_max_bytes %u scatter_mode %d\n",
+		qdev->mtu, bufsz, rxq->rx_buf_size, dev->data->scattered_rx);
 
 	/* Allocate the parallel driver ring for Rx buffers */
 	size = sizeof(*rxq->sw_rx_ring) * rxq->nb_rx_desc;
@@ -1541,3 +1534,11 @@ void qede_dev_stop(struct rte_eth_dev *eth_dev)
 
 	DP_INFO(edev, "dev_state is QEDE_DEV_STOP\n");
 }
+
+uint16_t
+qede_rxtx_pkts_dummy(__rte_unused void *p_rxq,
+		     __rte_unused struct rte_mbuf **pkts,
+		     __rte_unused uint16_t nb_pkts)
+{
+	return 0;
+}
diff --git a/drivers/net/qede/qede_rxtx.h b/drivers/net/qede/qede_rxtx.h
index ed9a529..d1f3e99 100644
--- a/drivers/net/qede/qede_rxtx.h
+++ b/drivers/net/qede/qede_rxtx.h
@@ -55,14 +55,21 @@
 	((flags) & (PARSING_AND_ERR_FLAGS_TUNNEL8021QTAGEXIST_MASK \
 		<< PARSING_AND_ERR_FLAGS_TUNNEL8021QTAGEXIST_SHIFT))
 
+#define QEDE_MIN_RX_BUFF_SIZE		(1024)
+#define QEDE_VLAN_TAG_SIZE		(4)
+#define QEDE_LLC_SNAP_HDR_LEN		(8)
+
 /* Max supported alignment is 256 (8 shift)
  * minimal alignment shift 6 is optimal for 57xxx HW performance
  */
 #define QEDE_L1_CACHE_SHIFT	6
 #define QEDE_RX_ALIGN_SHIFT	(RTE_MAX(6, RTE_MIN(8, QEDE_L1_CACHE_SHIFT)))
 #define QEDE_FW_RX_ALIGN_END	(1UL << QEDE_RX_ALIGN_SHIFT)
-
-#define QEDE_ETH_OVERHEAD       (ETHER_HDR_LEN + 8 + 8 + QEDE_FW_RX_ALIGN_END)
+#define QEDE_CEIL_TO_CACHE_LINE_SIZE(n) (((n) + (QEDE_FW_RX_ALIGN_END - 1)) & \
+					~(QEDE_FW_RX_ALIGN_END - 1))
+/* Note: QEDE_LLC_SNAP_HDR_LEN is optional */
+#define QEDE_ETH_OVERHEAD	(((2 * QEDE_VLAN_TAG_SIZE)) - (ETHER_CRC_LEN) \
+				+ (QEDE_LLC_SNAP_HDR_LEN))
 
 /* TBD: Excluding IPV6 */
 #define QEDE_RSS_OFFLOAD_ALL    (ETH_RSS_IPV4 | ETH_RSS_NONFRAG_IPV4_TCP | \
@@ -180,6 +187,10 @@ uint16_t qede_xmit_pkts(void *p_txq, struct rte_mbuf **tx_pkts,
 uint16_t qede_recv_pkts(void *p_rxq, struct rte_mbuf **rx_pkts,
 			uint16_t nb_pkts);
 
+uint16_t qede_rxtx_pkts_dummy(__rte_unused void *p_rxq,
+			      __rte_unused struct rte_mbuf **pkts,
+			      __rte_unused uint16_t nb_pkts);
+
 /* Fastpath resource alloc/dealloc helpers */
 int qede_alloc_fp_resc(struct qede_dev *qdev);
 
-- 
1.7.10.3



More information about the stable mailing list