[dpdk-dev] [PATCH v2 2/5] net/qede: refactoring multi-queue implementation

Rasesh Mody rasesh.mody at cavium.com
Wed Jun 7 09:42:19 CEST 2017


From: Harish Patil <harish.patil at cavium.com>

This patch does the following refactoring and cleanup:
- As part of multi-queue support a struct member called 'type' was added
in struct qede_fastpath in order to identify whether a queue is RX or TX
and take actions based on that. This was unnecessary in the first place
since pointers to RX and TX queues are already available in
rte_eth_dev->data. So all usage of fp->type is removed.
- Remove remaining additional layer of internal callbacks for RX/TX queues
and fastpath related operations from the struct qed_eth_ops_pass. With this
change the files qede_eth_if.[c,h] are no longer needed.
- Add new per-queue start/stop APIs instead of clubbing it all together.
- Remove multiple TXQs references (num_tc and fp->txqs) since CoS is not
supported.
- Enable sharing of the status block for each queue pair.
- Remove enum qede_dev_state and instead make use of existing port states
RTE_ETH_QUEUE_STATE_STOPPED/RTE_ETH_QUEUE_STATE_STARTED.
- Move qede_dev_start() and qede_dev_stop() to qede_ethdev.c from
qede_rxtc.c.

Signed-off-by: Harish Patil <harish.patil at cavium.com>
---
 drivers/net/qede/Makefile      |    1 -
 drivers/net/qede/qede_eth_if.c |  209 ---------
 drivers/net/qede/qede_eth_if.h |   97 ----
 drivers/net/qede/qede_ethdev.c |  346 ++++++++++-----
 drivers/net/qede/qede_ethdev.h |   30 +-
 drivers/net/qede/qede_if.h     |   81 +---
 drivers/net/qede/qede_main.c   |   29 +-
 drivers/net/qede/qede_rxtx.c   |  953 +++++++++++++++++-----------------------
 drivers/net/qede/qede_rxtx.h   |   29 +-
 9 files changed, 683 insertions(+), 1092 deletions(-)
 delete mode 100644 drivers/net/qede/qede_eth_if.c
 delete mode 100644 drivers/net/qede/qede_eth_if.h

diff --git a/drivers/net/qede/Makefile b/drivers/net/qede/Makefile
index 3323914..f03441d 100644
--- a/drivers/net/qede/Makefile
+++ b/drivers/net/qede/Makefile
@@ -101,7 +101,6 @@ SRCS-$(CONFIG_RTE_LIBRTE_QEDE_PMD) += bcm_osal.c
 SRCS-$(CONFIG_RTE_LIBRTE_QEDE_PMD) += ecore_sriov.c
 SRCS-$(CONFIG_RTE_LIBRTE_QEDE_PMD) += ecore_vf.c
 SRCS-$(CONFIG_RTE_LIBRTE_QEDE_PMD) += qede_ethdev.c
-SRCS-$(CONFIG_RTE_LIBRTE_QEDE_PMD) += qede_eth_if.c
 SRCS-$(CONFIG_RTE_LIBRTE_QEDE_PMD) += qede_main.c
 SRCS-$(CONFIG_RTE_LIBRTE_QEDE_PMD) += qede_rxtx.c
 SRCS-$(CONFIG_RTE_LIBRTE_QEDE_PMD) += qede_fdir.c
diff --git a/drivers/net/qede/qede_eth_if.c b/drivers/net/qede/qede_eth_if.c
deleted file mode 100644
index 7557943..0000000
--- a/drivers/net/qede/qede_eth_if.c
+++ /dev/null
@@ -1,209 +0,0 @@
-/*
- * Copyright (c) 2016 QLogic Corporation.
- * All rights reserved.
- * www.qlogic.com
- *
- * See LICENSE.qede_pmd for copyright and licensing details.
- */
-
-#include "qede_ethdev.h"
-
-static int
-qed_start_rxq(struct ecore_dev *edev,
-	      uint8_t rss_num,
-	      struct ecore_queue_start_common_params *p_params,
-	      uint16_t bd_max_bytes,
-	      dma_addr_t bd_chain_phys_addr,
-	      dma_addr_t cqe_pbl_addr,
-	      uint16_t cqe_pbl_size,
-	      struct ecore_rxq_start_ret_params *ret_params)
-{
-	struct ecore_hwfn *p_hwfn;
-	int rc, hwfn_index;
-
-	hwfn_index = rss_num % edev->num_hwfns;
-	p_hwfn = &edev->hwfns[hwfn_index];
-
-	p_params->queue_id = p_params->queue_id / edev->num_hwfns;
-	p_params->stats_id = p_params->vport_id;
-
-	rc = ecore_eth_rx_queue_start(p_hwfn,
-				      p_hwfn->hw_info.opaque_fid,
-				      p_params,
-				      bd_max_bytes,
-				      bd_chain_phys_addr,
-				      cqe_pbl_addr,
-				      cqe_pbl_size,
-				      ret_params);
-
-	if (rc) {
-		DP_ERR(edev, "Failed to start RXQ#%d\n", p_params->queue_id);
-		return rc;
-	}
-
-	DP_VERBOSE(edev, ECORE_MSG_SPQ,
-		   "Started RX-Q %d [rss_num %d] on V-PORT %d and SB %d\n",
-		   p_params->queue_id, rss_num, p_params->vport_id,
-		   p_params->sb);
-
-	return 0;
-}
-
-static int
-qed_stop_rxq(struct ecore_dev *edev, uint8_t rss_id, void *handle)
-{
-	int rc, hwfn_index;
-	struct ecore_hwfn *p_hwfn;
-
-	hwfn_index = rss_id % edev->num_hwfns;
-	p_hwfn = &edev->hwfns[hwfn_index];
-
-	rc = ecore_eth_rx_queue_stop(p_hwfn, handle, true, false);
-	if (rc) {
-		DP_ERR(edev, "Failed to stop RXQ#%02x\n", rss_id);
-		return rc;
-	}
-
-	return 0;
-}
-
-static int
-qed_start_txq(struct ecore_dev *edev,
-	      uint8_t rss_num,
-	      struct ecore_queue_start_common_params *p_params,
-	      dma_addr_t pbl_addr,
-	      uint16_t pbl_size,
-	      struct ecore_txq_start_ret_params *ret_params)
-{
-	struct ecore_hwfn *p_hwfn;
-	int rc, hwfn_index;
-
-	hwfn_index = rss_num % edev->num_hwfns;
-	p_hwfn = &edev->hwfns[hwfn_index];
-
-	p_params->queue_id = p_params->queue_id / edev->num_hwfns;
-	p_params->stats_id = p_params->vport_id;
-
-	rc = ecore_eth_tx_queue_start(p_hwfn,
-				      p_hwfn->hw_info.opaque_fid,
-				      p_params, 0 /* tc */,
-				      pbl_addr, pbl_size,
-				      ret_params);
-
-	if (rc) {
-		DP_ERR(edev, "Failed to start TXQ#%d\n", p_params->queue_id);
-		return rc;
-	}
-
-	DP_VERBOSE(edev, ECORE_MSG_SPQ,
-		   "Started TX-Q %d [rss_num %d] on V-PORT %d and SB %d\n",
-		   p_params->queue_id, rss_num, p_params->vport_id,
-		   p_params->sb);
-
-	return 0;
-}
-
-static int
-qed_stop_txq(struct ecore_dev *edev, uint8_t rss_id, void *handle)
-{
-	struct ecore_hwfn *p_hwfn;
-	int rc, hwfn_index;
-
-	hwfn_index = rss_id % edev->num_hwfns;
-	p_hwfn = &edev->hwfns[hwfn_index];
-
-	rc = ecore_eth_tx_queue_stop(p_hwfn, handle);
-	if (rc) {
-		DP_ERR(edev, "Failed to stop TXQ#%02x\n", rss_id);
-		return rc;
-	}
-
-	return 0;
-}
-
-static int
-qed_fp_cqe_completion(struct ecore_dev *edev,
-		      uint8_t rss_id, struct eth_slow_path_rx_cqe *cqe)
-{
-	return ecore_eth_cqe_completion(&edev->hwfns[rss_id % edev->num_hwfns],
-					cqe);
-}
-
-static int qed_fastpath_stop(struct ecore_dev *edev)
-{
-	ecore_hw_stop_fastpath(edev);
-
-	return 0;
-}
-
-static void qed_fastpath_start(struct ecore_dev *edev)
-{
-	struct ecore_hwfn *p_hwfn;
-	int i;
-
-	for_each_hwfn(edev, i) {
-		p_hwfn = &edev->hwfns[i];
-		ecore_hw_start_fastpath(p_hwfn);
-	}
-}
-
-static void
-qed_get_vport_stats(struct ecore_dev *edev, struct ecore_eth_stats *stats)
-{
-	ecore_get_vport_stats(edev, stats);
-}
-
-int qed_configure_filter_rx_mode(struct rte_eth_dev *eth_dev,
-				 enum qed_filter_rx_mode_type type)
-{
-	struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
-	struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
-	struct ecore_filter_accept_flags flags;
-
-	memset(&flags, 0, sizeof(flags));
-
-	flags.update_rx_mode_config = 1;
-	flags.update_tx_mode_config = 1;
-	flags.rx_accept_filter = ECORE_ACCEPT_UCAST_MATCHED |
-					ECORE_ACCEPT_MCAST_MATCHED |
-					ECORE_ACCEPT_BCAST;
-
-	flags.tx_accept_filter = ECORE_ACCEPT_UCAST_MATCHED |
-				 ECORE_ACCEPT_MCAST_MATCHED |
-				 ECORE_ACCEPT_BCAST;
-
-	if (type == QED_FILTER_RX_MODE_TYPE_PROMISC) {
-		flags.rx_accept_filter |= ECORE_ACCEPT_UCAST_UNMATCHED;
-		if (IS_VF(edev)) {
-			flags.tx_accept_filter |= ECORE_ACCEPT_UCAST_UNMATCHED;
-			DP_INFO(edev, "Enabling Tx unmatched flag for VF\n");
-		}
-	} else if (type == QED_FILTER_RX_MODE_TYPE_MULTI_PROMISC) {
-		flags.rx_accept_filter |= ECORE_ACCEPT_MCAST_UNMATCHED;
-	} else if (type == (QED_FILTER_RX_MODE_TYPE_MULTI_PROMISC |
-			    QED_FILTER_RX_MODE_TYPE_PROMISC)) {
-		flags.rx_accept_filter |= ECORE_ACCEPT_UCAST_UNMATCHED |
-					  ECORE_ACCEPT_MCAST_UNMATCHED;
-	}
-
-	return ecore_filter_accept_cmd(edev, 0, flags, false, false,
-				       ECORE_SPQ_MODE_CB, NULL);
-}
-
-static const struct qed_eth_ops qed_eth_ops_pass = {
-	INIT_STRUCT_FIELD(common, &qed_common_ops_pass),
-	INIT_STRUCT_FIELD(fill_dev_info, &qed_fill_eth_dev_info),
-	INIT_STRUCT_FIELD(q_rx_start, &qed_start_rxq),
-	INIT_STRUCT_FIELD(q_tx_start, &qed_start_txq),
-	INIT_STRUCT_FIELD(q_rx_stop, &qed_stop_rxq),
-	INIT_STRUCT_FIELD(q_tx_stop, &qed_stop_txq),
-	INIT_STRUCT_FIELD(eth_cqe_completion, &qed_fp_cqe_completion),
-	INIT_STRUCT_FIELD(fastpath_stop, &qed_fastpath_stop),
-	INIT_STRUCT_FIELD(fastpath_start, &qed_fastpath_start),
-	INIT_STRUCT_FIELD(get_vport_stats, &qed_get_vport_stats),
-};
-
-const struct qed_eth_ops *qed_get_eth_ops(void)
-{
-	return &qed_eth_ops_pass;
-}
diff --git a/drivers/net/qede/qede_eth_if.h b/drivers/net/qede/qede_eth_if.h
deleted file mode 100644
index be6c7c1..0000000
--- a/drivers/net/qede/qede_eth_if.h
+++ /dev/null
@@ -1,97 +0,0 @@
-/*
- * Copyright (c) 2016 QLogic Corporation.
- * All rights reserved.
- * www.qlogic.com
- *
- * See LICENSE.qede_pmd for copyright and licensing details.
- */
-
-#ifndef _QEDE_ETH_IF_H
-#define _QEDE_ETH_IF_H
-
-#include "qede_if.h"
-
-/*forward decl */
-struct eth_slow_path_rx_cqe;
-
-#define INIT_STRUCT_FIELD(field, value) .field = value
-
-#define QEDE_MAX_MCAST_FILTERS		64
-
-enum qed_filter_rx_mode_type {
-	QED_FILTER_RX_MODE_TYPE_REGULAR,
-	QED_FILTER_RX_MODE_TYPE_MULTI_PROMISC,
-	QED_FILTER_RX_MODE_TYPE_PROMISC,
-};
-
-enum qed_filter_type {
-	QED_FILTER_TYPE_UCAST,
-	QED_FILTER_TYPE_MCAST,
-	QED_FILTER_TYPE_RX_MODE,
-	QED_MAX_FILTER_TYPES,
-};
-
-struct qed_dev_eth_info {
-	struct qed_dev_info common;
-
-	uint8_t num_queues;
-	uint8_t num_tc;
-
-	struct ether_addr port_mac;
-	uint16_t num_vlan_filters;
-	uint32_t num_mac_filters;
-
-	/* Legacy VF - this affects the datapath */
-	bool is_legacy;
-};
-
-struct qed_eth_ops {
-	const struct qed_common_ops *common;
-
-	int (*fill_dev_info)(struct ecore_dev *edev,
-			     struct qed_dev_eth_info *info);
-
-	int (*q_rx_start)(struct ecore_dev *cdev,
-			  uint8_t rss_num,
-			  struct ecore_queue_start_common_params *p_params,
-			  uint16_t bd_max_bytes,
-			  dma_addr_t bd_chain_phys_addr,
-			  dma_addr_t cqe_pbl_addr,
-			  uint16_t cqe_pbl_size,
-			  struct ecore_rxq_start_ret_params *ret_params);
-
-	int (*q_rx_stop)(struct ecore_dev *edev,
-			 uint8_t rss_id, void *handle);
-
-	int (*q_tx_start)(struct ecore_dev *edev,
-			  uint8_t rss_num,
-			  struct ecore_queue_start_common_params *p_params,
-			  dma_addr_t pbl_addr,
-			  uint16_t pbl_size,
-			  struct ecore_txq_start_ret_params *ret_params);
-
-	int (*q_tx_stop)(struct ecore_dev *edev,
-			 uint8_t rss_id, void *handle);
-
-	int (*eth_cqe_completion)(struct ecore_dev *edev,
-				  uint8_t rss_id,
-				  struct eth_slow_path_rx_cqe *cqe);
-
-	int (*fastpath_stop)(struct ecore_dev *edev);
-
-	void (*fastpath_start)(struct ecore_dev *edev);
-
-	void (*get_vport_stats)(struct ecore_dev *edev,
-				struct ecore_eth_stats *stats);
-};
-
-/* externs */
-
-extern const struct qed_common_ops qed_common_ops_pass;
-
-const struct qed_eth_ops *qed_get_eth_ops(void);
-
-int qed_configure_filter_rx_mode(struct rte_eth_dev *eth_dev,
-				 enum qed_filter_rx_mode_type type);
-
-#endif /* _QEDE_ETH_IF_H */
diff --git a/drivers/net/qede/qede_ethdev.c b/drivers/net/qede/qede_ethdev.c
index f8d9f7c..6380c2b 100644
--- a/drivers/net/qede/qede_ethdev.c
+++ b/drivers/net/qede/qede_ethdev.c
@@ -294,7 +294,6 @@ static void qede_interrupt_action(struct ecore_hwfn *p_hwfn)
 qede_alloc_etherdev(struct qede_dev *qdev, struct qed_dev_eth_info *info)
 {
 	rte_memcpy(&qdev->dev_info, info, sizeof(*info));
-	qdev->num_tc = qdev->dev_info.num_tc;
 	qdev->ops = qed_ops;
 }
 
@@ -509,6 +508,43 @@ static void qede_set_ucast_cmn_params(struct ecore_filter_ucast *ucast)
 	/* ucast->assert_on_error = true; - For debug */
 }
 
+static int
+qed_configure_filter_rx_mode(struct rte_eth_dev *eth_dev,
+			     enum qed_filter_rx_mode_type type)
+{
+	struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
+	struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
+	struct ecore_filter_accept_flags flags;
+
+	memset(&flags, 0, sizeof(flags));
+
+	flags.update_rx_mode_config = 1;
+	flags.update_tx_mode_config = 1;
+	flags.rx_accept_filter = ECORE_ACCEPT_UCAST_MATCHED |
+		ECORE_ACCEPT_MCAST_MATCHED |
+		ECORE_ACCEPT_BCAST;
+
+	flags.tx_accept_filter = ECORE_ACCEPT_UCAST_MATCHED |
+		ECORE_ACCEPT_MCAST_MATCHED |
+		ECORE_ACCEPT_BCAST;
+
+	if (type == QED_FILTER_RX_MODE_TYPE_PROMISC) {
+		flags.rx_accept_filter |= ECORE_ACCEPT_UCAST_UNMATCHED;
+		if (IS_VF(edev)) {
+			flags.tx_accept_filter |= ECORE_ACCEPT_UCAST_UNMATCHED;
+			DP_INFO(edev, "Enabling Tx unmatched flag for VF\n");
+		}
+	} else if (type == QED_FILTER_RX_MODE_TYPE_MULTI_PROMISC) {
+		flags.rx_accept_filter |= ECORE_ACCEPT_MCAST_UNMATCHED;
+	} else if (type == (QED_FILTER_RX_MODE_TYPE_MULTI_PROMISC |
+				QED_FILTER_RX_MODE_TYPE_PROMISC)) {
+		flags.rx_accept_filter |= ECORE_ACCEPT_UCAST_UNMATCHED |
+			ECORE_ACCEPT_MCAST_UNMATCHED;
+	}
+
+	return ecore_filter_accept_cmd(edev, 0, flags, false, false,
+			ECORE_SPQ_MODE_CB, NULL);
+}
 static void qede_set_cmn_tunn_param(struct ecore_tunnel_info *p_tunn,
 				    uint8_t clss, bool mode, bool mask)
 {
@@ -971,66 +1007,151 @@ int qede_config_rss(struct rte_eth_dev *eth_dev)
 	return 0;
 }
 
+static void qede_fastpath_start(struct ecore_dev *edev)
+{
+	struct ecore_hwfn *p_hwfn;
+	int i;
+
+	for_each_hwfn(edev, i) {
+		p_hwfn = &edev->hwfns[i];
+		ecore_hw_start_fastpath(p_hwfn);
+	}
+}
+
+static int qede_dev_start(struct rte_eth_dev *eth_dev)
+{
+	struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
+	struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
+
+	PMD_INIT_FUNC_TRACE(edev);
+
+	/* Update MTU only if it has changed */
+	if (qdev->mtu != qdev->new_mtu) {
+		if (qede_update_mtu(eth_dev, qdev->new_mtu))
+			goto err;
+		qdev->mtu = qdev->new_mtu;
+		/* If MTU has changed then update TPA too */
+		if (qdev->enable_lro)
+			if (qede_enable_tpa(eth_dev, true))
+				goto err;
+	}
+
+	/* Start queues */
+	if (qede_start_queues(eth_dev))
+		goto err;
+
+	/* Newer SR-IOV PF driver expects RX/TX queues to be started before
+	 * enabling RSS. Hence RSS configuration is deferred upto this point.
+	 * Also, we would like to retain similar behavior in PF case, so we
+	 * don't do PF/VF specific check here.
+	 */
+	if (eth_dev->data->dev_conf.rxmode.mq_mode == ETH_MQ_RX_RSS)
+		if (qede_config_rss(eth_dev))
+			goto err;
+
+	/* Enable vport*/
+	if (qede_activate_vport(eth_dev, true))
+		goto err;
+
+	/* Bring-up the link */
+	qede_dev_set_link_state(eth_dev, true);
+
+	/* Start/resume traffic */
+	qede_fastpath_start(edev);
+
+	DP_INFO(edev, "Device started\n");
+
+	return 0;
+err:
+	DP_ERR(edev, "Device start fails\n");
+	return -1; /* common error code is < 0 */
+}
+
+static void qede_dev_stop(struct rte_eth_dev *eth_dev)
+{
+	struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
+	struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
+
+	PMD_INIT_FUNC_TRACE(edev);
+
+	/* Disable vport */
+	if (qede_activate_vport(eth_dev, false))
+		return;
+
+	if (qdev->enable_lro)
+		qede_enable_tpa(eth_dev, false);
+
+	/* TODO: Do we need disable LRO or RSS */
+	/* Stop queues */
+	qede_stop_queues(eth_dev);
+
+	/* Disable traffic */
+	ecore_hw_stop_fastpath(edev); /* TBD - loop */
+
+	/* Bring the link down */
+	qede_dev_set_link_state(eth_dev, false);
+
+	DP_INFO(edev, "Device is stopped\n");
+}
+
 static int qede_dev_configure(struct rte_eth_dev *eth_dev)
 {
-	struct qede_dev *qdev = eth_dev->data->dev_private;
-	struct ecore_dev *edev = &qdev->edev;
+	struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
+	struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
 	struct rte_eth_rxmode *rxmode = &eth_dev->data->dev_conf.rxmode;
-	int rc;
 
 	PMD_INIT_FUNC_TRACE(edev);
 
 	/* Check requirements for 100G mode */
 	if (edev->num_hwfns > 1) {
 		if (eth_dev->data->nb_rx_queues < 2 ||
-		    eth_dev->data->nb_tx_queues < 2) {
+				eth_dev->data->nb_tx_queues < 2) {
 			DP_ERR(edev, "100G mode needs min. 2 RX/TX queues\n");
 			return -EINVAL;
 		}
 
 		if ((eth_dev->data->nb_rx_queues % 2 != 0) ||
-		    (eth_dev->data->nb_tx_queues % 2 != 0)) {
+				(eth_dev->data->nb_tx_queues % 2 != 0)) {
 			DP_ERR(edev,
-				  "100G mode needs even no. of RX/TX queues\n");
+					"100G mode needs even no. of RX/TX queues\n");
 			return -EINVAL;
 		}
 	}
 
 	/* Sanity checks and throw warnings */
-	if (rxmode->enable_scatter == 1)
+	if (rxmode->enable_scatter)
 		eth_dev->data->scattered_rx = 1;
-
 	if (!rxmode->hw_strip_crc)
 		DP_INFO(edev, "L2 CRC stripping is always enabled in hw\n");
-
 	if (!rxmode->hw_ip_checksum)
 		DP_INFO(edev, "IP/UDP/TCP checksum offload is always enabled "
-			      "in hw\n");
-
-	if (rxmode->enable_lro) {
-		qdev->enable_lro = true;
-		/* Enable scatter mode for LRO */
-		if (!rxmode->enable_scatter)
-			eth_dev->data->scattered_rx = 1;
+				"in hw\n");
+	if (rxmode->header_split)
+		DP_INFO(edev, "Header split enable is not supported\n");
+	if (!(rxmode->mq_mode == ETH_MQ_RX_NONE || rxmode->mq_mode ==
+				ETH_MQ_RX_RSS)) {
+		DP_ERR(edev, "Unsupported multi-queue mode\n");
+		return -ENOTSUP;
 	}
+	/* Flow director mode check */
+	if (qede_check_fdir_support(eth_dev))
+		return -ENOTSUP;
 
-	/* Check for the port restart case */
-	if (qdev->state != QEDE_DEV_INIT) {
-		qede_stop_vport(edev);
+	/* Deallocate resources if held previously. It is needed only if the
+	 * queue count has been changed from previous configuration. If its
+	 * going to change then it means RX/TX queue setup will be called
+	 * again and the fastpath pointers will be reinitialized there.
+	 */
+	if (qdev->num_tx_queues != eth_dev->data->nb_tx_queues ||
+			qdev->num_rx_queues != eth_dev->data->nb_rx_queues) {
 		qede_dealloc_fp_resc(eth_dev);
+		/* Proceed with updated queue count */
+		qdev->num_tx_queues = eth_dev->data->nb_tx_queues;
+		qdev->num_rx_queues = eth_dev->data->nb_rx_queues;
+		if (qede_alloc_fp_resc(qdev))
+			return -ENOMEM;
 	}
 
-	qdev->fp_num_tx = eth_dev->data->nb_tx_queues;
-	qdev->fp_num_rx = eth_dev->data->nb_rx_queues;
-	qdev->num_queues = qdev->fp_num_tx + qdev->fp_num_rx;
-
-	/* Fastpath status block should be initialized before sending
-	 * VPORT-START in the case of VF. Anyway, do it for both VF/PF.
-	 */
-	rc = qede_alloc_fp_resc(qdev);
-	if (rc != 0)
-		return rc;
-
 	/* VF's MTU has to be set using vport-start where as
 	 * PF's MTU can be updated via vport-update.
 	 */
@@ -1045,35 +1166,23 @@ static int qede_dev_configure(struct rte_eth_dev *eth_dev)
 	qdev->mtu = rxmode->max_rx_pkt_len;
 	qdev->new_mtu = qdev->mtu;
 
-	if (!(rxmode->mq_mode == ETH_MQ_RX_RSS ||
-	    rxmode->mq_mode == ETH_MQ_RX_NONE)) {
-		DP_ERR(edev, "Unsupported RSS mode\n");
-		qede_stop_vport(edev);
-		qede_dealloc_fp_resc(eth_dev);
-		return -EINVAL;
-	}
-
-	/* Flow director mode check */
-	rc = qede_check_fdir_support(eth_dev);
-	if (rc) {
-		qede_stop_vport(edev);
-		qede_dealloc_fp_resc(eth_dev);
-		return -EINVAL;
+	/* Configure TPA parameters */
+	if (rxmode->enable_lro) {
+		if (qede_enable_tpa(eth_dev, true))
+			return -EINVAL;
+		/* Enable scatter mode for LRO */
+		if (!rxmode->enable_scatter)
+			eth_dev->data->scattered_rx = 1;
 	}
-	SLIST_INIT(&qdev->fdir_info.fdir_list_head);
-
-	SLIST_INIT(&qdev->vlan_list_head);
+	qdev->enable_lro = rxmode->enable_lro;
 
 	/* Enable VLAN offloads by default */
 	qede_vlan_offload_set(eth_dev, ETH_VLAN_STRIP_MASK  |
-				       ETH_VLAN_FILTER_MASK |
-				       ETH_VLAN_EXTEND_MASK);
-
-	qdev->state = QEDE_DEV_CONFIG;
+			ETH_VLAN_FILTER_MASK |
+			ETH_VLAN_EXTEND_MASK);
 
-	DP_INFO(edev, "Allocated RSS=%d TSS=%d (with CoS=%d)\n",
-		(int)QEDE_RSS_COUNT(qdev), (int)QEDE_TSS_COUNT(qdev),
-		qdev->num_tc);
+	DP_INFO(edev, "Device configured with RSS=%d TSS=%d\n",
+			QEDE_RSS_COUNT(qdev), QEDE_TSS_COUNT(qdev));
 
 	return 0;
 }
@@ -1264,38 +1373,38 @@ static void qede_dev_close(struct rte_eth_dev *eth_dev)
 	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
 	struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
 	struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
+	uint8_t i;
 
 	PMD_INIT_FUNC_TRACE(edev);
 
-	qede_fdir_dealloc_resc(eth_dev);
-
 	/* dev_stop() shall cleanup fp resources in hw but without releasing
 	 * dma memories and sw structures so that dev_start() can be called
 	 * by the app without reconfiguration. However, in dev_close() we
 	 * can release all the resources and device can be brought up newly
 	 */
-	if (qdev->state != QEDE_DEV_STOP)
+	if (eth_dev->data->dev_started)
 		qede_dev_stop(eth_dev);
-	else
-		DP_INFO(edev, "Device is already stopped\n");
 
 	qede_stop_vport(edev);
-
+	qede_fdir_dealloc_resc(eth_dev);
 	qede_dealloc_fp_resc(eth_dev);
 
-	qdev->ops->common->slowpath_stop(edev);
+	for (i = 0; i < eth_dev->data->nb_rx_queues; i++)
+		if (eth_dev->data->rx_queues[i])
+			eth_dev->data->rx_queues[i] = NULL;
+	for (i = 0; i < eth_dev->data->nb_tx_queues; i++)
+		if (eth_dev->data->tx_queues[i])
+			eth_dev->data->tx_queues[i] = NULL;
+	eth_dev->data->nb_rx_queues = 0;
+	eth_dev->data->nb_tx_queues = 0;
 
+	qdev->ops->common->slowpath_stop(edev);
 	qdev->ops->common->remove(edev);
-
 	rte_intr_disable(&pci_dev->intr_handle);
-
 	rte_intr_callback_unregister(&pci_dev->intr_handle,
 				     qede_interrupt_handler, (void *)eth_dev);
-
 	if (edev->num_hwfns > 1)
 		rte_eal_alarm_cancel(qede_poll_sp_sb_cb, (void *)eth_dev);
-
-	qdev->state = QEDE_DEV_INIT; /* Go back to init state */
 }
 
 static void
@@ -1308,7 +1417,7 @@ static void qede_dev_close(struct rte_eth_dev *eth_dev)
 	unsigned int rxq_stat_cntrs, txq_stat_cntrs;
 	struct qede_tx_queue *txq;
 
-	qdev->ops->get_vport_stats(edev, &stats);
+	ecore_get_vport_stats(edev, &stats);
 
 	/* RX Stats */
 	eth_stats->ipackets = stats.rx_ucast_pkts +
@@ -1350,38 +1459,34 @@ static void qede_dev_close(struct rte_eth_dev *eth_dev)
 		       " RTE_ETHDEV_QUEUE_STAT_CNTRS config param"
 		       " appropriately and retry.\n");
 
-	for (qid = 0; qid < QEDE_QUEUE_CNT(qdev); qid++) {
-		if (qdev->fp_array[qid].type & QEDE_FASTPATH_RX) {
-			eth_stats->q_ipackets[i] =
-				*(uint64_t *)(
-					((char *)(qdev->fp_array[(qid)].rxq)) +
-					offsetof(struct qede_rx_queue,
-					rcv_pkts));
-			eth_stats->q_errors[i] =
-				*(uint64_t *)(
-					((char *)(qdev->fp_array[(qid)].rxq)) +
-					offsetof(struct qede_rx_queue,
-					rx_hw_errors)) +
-				*(uint64_t *)(
-					((char *)(qdev->fp_array[(qid)].rxq)) +
-					offsetof(struct qede_rx_queue,
-					rx_alloc_errors));
-			i++;
-		}
+	for_each_rss(qid) {
+		eth_stats->q_ipackets[i] =
+			*(uint64_t *)(
+				((char *)(qdev->fp_array[qid].rxq)) +
+				offsetof(struct qede_rx_queue,
+				rcv_pkts));
+		eth_stats->q_errors[i] =
+			*(uint64_t *)(
+				((char *)(qdev->fp_array[qid].rxq)) +
+				offsetof(struct qede_rx_queue,
+				rx_hw_errors)) +
+			*(uint64_t *)(
+				((char *)(qdev->fp_array[qid].rxq)) +
+				offsetof(struct qede_rx_queue,
+				rx_alloc_errors));
+		i++;
 		if (i == rxq_stat_cntrs)
 			break;
 	}
 
-	for (qid = 0; qid < QEDE_QUEUE_CNT(qdev); qid++) {
-		if (qdev->fp_array[qid].type & QEDE_FASTPATH_TX) {
-			txq = qdev->fp_array[(qid)].txqs[0];
-			eth_stats->q_opackets[j] =
-				*((uint64_t *)(uintptr_t)
-					(((uint64_t)(uintptr_t)(txq)) +
-					 offsetof(struct qede_tx_queue,
-						  xmit_pkts)));
-			j++;
-		}
+	for_each_tss(qid) {
+		txq = qdev->fp_array[qid].txq;
+		eth_stats->q_opackets[j] =
+			*((uint64_t *)(uintptr_t)
+				(((uint64_t)(uintptr_t)(txq)) +
+				 offsetof(struct qede_tx_queue,
+					  xmit_pkts)));
+		j++;
 		if (j == txq_stat_cntrs)
 			break;
 	}
@@ -1445,7 +1550,7 @@ static void qede_dev_close(struct rte_eth_dev *eth_dev)
 	if (n < num)
 		return num;
 
-	qdev->ops->get_vport_stats(edev, &stats);
+	ecore_get_vport_stats(edev, &stats);
 
 	for (i = 0; i < RTE_DIM(qede_xstats_strings); i++) {
 		xstats[stat_idx].value = *(uint64_t *)(((char *)&stats) +
@@ -1457,10 +1562,10 @@ static void qede_dev_close(struct rte_eth_dev *eth_dev)
 	rxq_stat_cntrs = RTE_MIN(QEDE_RSS_COUNT(qdev),
 				 RTE_ETHDEV_QUEUE_STAT_CNTRS);
 	for (qid = 0; qid < rxq_stat_cntrs; qid++) {
-		if (qdev->fp_array[qid].type & QEDE_FASTPATH_RX) {
+		for_each_rss(qid) {
 			for (i = 0; i < RTE_DIM(qede_rxq_xstats_strings); i++) {
 				xstats[stat_idx].value = *(uint64_t *)(
-					((char *)(qdev->fp_array[(qid)].rxq)) +
+					((char *)(qdev->fp_array[qid].rxq)) +
 					 qede_rxq_xstats_strings[i].offset);
 				xstats[stat_idx].id = stat_idx;
 				stat_idx++;
@@ -1878,6 +1983,8 @@ static int qede_rss_reta_query(struct rte_eth_dev *eth_dev,
 	return 0;
 }
 
+
+
 static int qede_set_mtu(struct rte_eth_dev *dev, uint16_t mtu)
 {
 	struct qede_dev *qdev = QEDE_INIT_QDEV(dev);
@@ -1911,19 +2018,17 @@ static int qede_set_mtu(struct rte_eth_dev *dev, uint16_t mtu)
 	rte_delay_ms(1000);
 	qdev->mtu = mtu;
 	/* Fix up RX buf size for all queues of the port */
-	for_each_queue(i) {
+	for_each_rss(i) {
 		fp = &qdev->fp_array[i];
-		if (fp->type & QEDE_FASTPATH_RX) {
-			bufsz = (uint16_t)rte_pktmbuf_data_room_size(
-				fp->rxq->mb_pool) - RTE_PKTMBUF_HEADROOM;
-			if (dev->data->scattered_rx)
-				rx_buf_size = bufsz + QEDE_ETH_OVERHEAD;
-			else
-				rx_buf_size = mtu + QEDE_ETH_OVERHEAD;
-			rx_buf_size = QEDE_CEIL_TO_CACHE_LINE_SIZE(rx_buf_size);
-			fp->rxq->rx_buf_size = rx_buf_size;
-			DP_INFO(edev, "buf_size adjusted to %u\n", rx_buf_size);
-		}
+		bufsz = (uint16_t)rte_pktmbuf_data_room_size(
+			fp->rxq->mb_pool) - RTE_PKTMBUF_HEADROOM;
+		if (dev->data->scattered_rx)
+			rx_buf_size = bufsz + QEDE_ETH_OVERHEAD;
+		else
+			rx_buf_size = mtu + QEDE_ETH_OVERHEAD;
+		rx_buf_size = QEDE_CEIL_TO_CACHE_LINE_SIZE(rx_buf_size);
+		fp->rxq->rx_buf_size = rx_buf_size;
+		DP_INFO(edev, "buf_size adjusted to %u\n", rx_buf_size);
 	}
 	qede_dev_start(dev);
 	if (frame_size > ETHER_MAX_LEN)
@@ -2350,20 +2455,15 @@ static int qede_common_dev_init(struct rte_eth_dev *eth_dev, bool is_vf)
 	}
 
 	DP_INFO(edev, "Starting qede probe\n");
-
-	rc = qed_ops->common->probe(edev, pci_dev, QED_PROTOCOL_ETH,
-				    dp_module, dp_level, is_vf);
-
+	rc = qed_ops->common->probe(edev, pci_dev, dp_module,
+				    dp_level, is_vf);
 	if (rc != 0) {
 		DP_ERR(edev, "qede probe failed rc %d\n", rc);
 		return -ENODEV;
 	}
-
 	qede_update_pf_params(edev);
-
 	rte_intr_callback_register(&pci_dev->intr_handle,
 				   qede_interrupt_handler, (void *)eth_dev);
-
 	if (rte_intr_enable(&pci_dev->intr_handle)) {
 		DP_ERR(edev, "rte_intr_enable() failed\n");
 		return -ENODEV;
@@ -2476,7 +2576,11 @@ static int qede_common_dev_init(struct rte_eth_dev *eth_dev, bool is_vf)
 		do_once = false;
 	}
 
-	adapter->state = QEDE_DEV_INIT;
+	adapter->num_tx_queues = 0;
+	adapter->num_rx_queues = 0;
+	SLIST_INIT(&adapter->fdir_info.fdir_list_head);
+	SLIST_INIT(&adapter->vlan_list_head);
+	SLIST_INIT(&adapter->uc_list_head);
 	adapter->mtu = ETHER_MTU;
 	adapter->new_mtu = ETHER_MTU;
 	if (!is_vf)
@@ -2491,7 +2595,9 @@ static int qede_common_dev_init(struct rte_eth_dev *eth_dev, bool is_vf)
 		  adapter->primary_mac.addr_bytes[4],
 		  adapter->primary_mac.addr_bytes[5]);
 
-	return rc;
+	DP_INFO(edev, "Device initialized\n");
+
+	return 0;
 }
 
 static int qedevf_eth_dev_init(struct rte_eth_dev *eth_dev)
diff --git a/drivers/net/qede/qede_ethdev.h b/drivers/net/qede/qede_ethdev.h
index 62b1de4..5bf431e 100644
--- a/drivers/net/qede/qede_ethdev.h
+++ b/drivers/net/qede/qede_ethdev.h
@@ -41,8 +41,6 @@
 
 #include "qede_logs.h"
 #include "qede_if.h"
-#include "qede_eth_if.h"
-
 #include "qede_rxtx.h"
 
 #define qede_stringify1(x...)		#x
@@ -73,12 +71,8 @@
 					(edev)->dev_info.num_tc)
 
 #define QEDE_QUEUE_CNT(qdev) ((qdev)->num_queues)
-#define QEDE_RSS_COUNT(qdev) ((qdev)->num_queues - (qdev)->fp_num_tx)
-#define QEDE_TSS_COUNT(qdev) (((qdev)->num_queues - (qdev)->fp_num_rx) * \
-					(qdev)->num_tc)
-
-#define QEDE_FASTPATH_TX        (1 << 0)
-#define QEDE_FASTPATH_RX        (1 << 1)
+#define QEDE_RSS_COUNT(qdev) ((qdev)->num_rx_queues)
+#define QEDE_TSS_COUNT(qdev) ((qdev)->num_tx_queues)
 
 #define QEDE_DUPLEX_FULL	1
 #define QEDE_DUPLEX_HALF	2
@@ -138,12 +132,12 @@
 /* Maximum number of flowdir filters */
 #define QEDE_RFS_MAX_FLTR		(256)
 
-/* Port/function states */
-enum qede_dev_state {
-	QEDE_DEV_INIT, /* Init the chip and Slowpath */
-	QEDE_DEV_CONFIG, /* Create Vport/Fastpath resources */
-	QEDE_DEV_START, /* Start RX/TX queues, enable traffic */
-	QEDE_DEV_STOP, /* Deactivate vport and stop traffic */
+#define QEDE_MAX_MCAST_FILTERS		(64)
+
+enum qed_filter_rx_mode_type {
+	QED_FILTER_RX_MODE_TYPE_REGULAR,
+	QED_FILTER_RX_MODE_TYPE_MULTI_PROMISC,
+	QED_FILTER_RX_MODE_TYPE_PROMISC,
 };
 
 struct qede_vlan_entry {
@@ -183,12 +177,10 @@ struct qede_fdir_info {
  */
 struct qede_dev {
 	struct ecore_dev edev;
-	uint8_t protocol;
 	const struct qed_eth_ops *ops;
 	struct qed_dev_eth_info dev_info;
 	struct ecore_sb_info *sb_array;
 	struct qede_fastpath *fp_array;
-	uint8_t num_tc;
 	uint16_t mtu;
 	uint16_t new_mtu;
 	bool rss_enable;
@@ -197,10 +189,8 @@ struct qede_dev {
 	uint64_t rss_hf;
 	uint8_t rss_key_len;
 	bool enable_lro;
-	uint16_t num_queues;
-	uint8_t fp_num_tx;
-	uint8_t fp_num_rx;
-	enum qede_dev_state state;
+	uint8_t num_rx_queues;
+	uint8_t num_tx_queues;
 	SLIST_HEAD(vlan_list_head, qede_vlan_entry)vlan_list_head;
 	uint16_t configured_vlans;
 	bool accept_any_vlan;
diff --git a/drivers/net/qede/qede_if.h b/drivers/net/qede/qede_if.h
index 405c525..9864bb4 100644
--- a/drivers/net/qede/qede_if.h
+++ b/drivers/net/qede/qede_if.h
@@ -50,14 +50,26 @@ struct qed_dev_info {
 	bool geneve_enable;
 };
 
-enum qed_sb_type {
-	QED_SB_TYPE_L2_QUEUE,
-	QED_SB_TYPE_STORAGE,
-	QED_SB_TYPE_CNQ,
+struct qed_dev_eth_info {
+	struct qed_dev_info common;
+
+	uint8_t num_queues;
+	uint8_t num_tc;
+
+	struct ether_addr port_mac;
+	uint16_t num_vlan_filters;
+	uint32_t num_mac_filters;
+
+	/* Legacy VF - this affects the datapath */
+	bool is_legacy;
 };
 
-enum qed_protocol {
-	QED_PROTOCOL_ETH,
+#define INIT_STRUCT_FIELD(field, value) .field = value
+
+struct qed_eth_ops {
+	const struct qed_common_ops *common;
+	int (*fill_dev_info)(struct ecore_dev *edev,
+			     struct qed_dev_eth_info *info);
 };
 
 struct qed_link_params {
@@ -99,64 +111,13 @@ struct qed_slowpath_params {
 	uint8_t name[NAME_SIZE];
 };
 
-#define ILT_PAGE_SIZE_TCFC 0x8000	/* 32KB */
-
-struct qed_eth_tlvs {
-	u16 feat_flags;
-	u8 mac[3][ETH_ALEN];
-	u16 lso_maxoff;
-	u16 lso_minseg;
-	bool prom_mode;
-	u16 num_txqs;
-	u16 num_rxqs;
-	u16 num_netqs;
-	u16 flex_vlan;
-	u32 tcp4_offloads;
-	u32 tcp6_offloads;
-	u16 tx_avg_qdepth;
-	u16 rx_avg_qdepth;
-	u8 txqs_empty;
-	u8 rxqs_empty;
-	u8 num_txqs_full;
-	u8 num_rxqs_full;
-};
-
-struct qed_tunn_update_params {
-	unsigned long   tunn_mode_update_mask;
-	unsigned long   tunn_mode;
-	u16             vxlan_udp_port;
-	u16             geneve_udp_port;
-	u8              update_rx_pf_clss;
-	u8              update_tx_pf_clss;
-	u8              update_vxlan_udp_port;
-	u8              update_geneve_udp_port;
-	u8              tunn_clss_vxlan;
-	u8              tunn_clss_l2geneve;
-	u8              tunn_clss_ipgeneve;
-	u8              tunn_clss_l2gre;
-	u8              tunn_clss_ipgre;
-};
-
 struct qed_common_cb_ops {
 	void (*link_update)(void *dev, struct qed_link_output *link);
-	void (*get_tlv_data)(void *dev, struct qed_eth_tlvs *data);
-};
-
-struct qed_selftest_ops {
-/**
- * @brief registers - Perform register tests
- *
- * @param edev
- *
- * @return 0 on success, error otherwise.
- */
-	int (*registers)(struct ecore_dev *edev);
 };
 
 struct qed_common_ops {
 	int (*probe)(struct ecore_dev *edev,
 		     struct rte_pci_device *pci_dev,
-		     enum qed_protocol protocol,
 		     uint32_t dp_module, uint8_t dp_level, bool is_vf);
 	void (*set_name)(struct ecore_dev *edev, char name[]);
 	enum _ecore_status_t
@@ -196,7 +157,7 @@ struct qed_common_ops {
 			    struct ecore_sb_info *sb_info,
 			    void *sb_virt_addr,
 			    dma_addr_t sb_phy_addr,
-			    uint16_t sb_id, enum qed_sb_type type);
+			    uint16_t sb_id);
 
 	int (*get_sb_info)(struct ecore_dev *edev,
 			   struct ecore_sb_info *sb, u16 qid,
@@ -210,4 +171,8 @@ struct qed_common_ops {
 	int (*send_drv_state)(struct ecore_dev *edev, bool active);
 };
 
+/* Externs */
+
+const struct qed_eth_ops *qed_get_eth_ops(void);
+
 #endif /* _QEDE_IF_H */
diff --git a/drivers/net/qede/qede_main.c b/drivers/net/qede/qede_main.c
index 712c03f..4b85814 100644
--- a/drivers/net/qede/qede_main.c
+++ b/drivers/net/qede/qede_main.c
@@ -40,16 +40,14 @@ static void qed_init_pci(struct ecore_dev *edev, struct rte_pci_device *pci_dev)
 
 static int
 qed_probe(struct ecore_dev *edev, struct rte_pci_device *pci_dev,
-	  enum qed_protocol protocol, uint32_t dp_module,
-	  uint8_t dp_level, bool is_vf)
+	  uint32_t dp_module, uint8_t dp_level, bool is_vf)
 {
 	struct ecore_hw_prepare_params hw_prepare_params;
-	struct qede_dev *qdev = (struct qede_dev *)edev;
 	int rc;
 
 	ecore_init_struct(edev);
 	edev->drv_type = DRV_ID_DRV_TYPE_LINUX;
-	qdev->protocol = protocol;
+	/* Protocol type is always fixed to PROTOCOL_ETH */
 
 	if (is_vf)
 		edev->b_is_vf = true;
@@ -460,23 +458,14 @@ static void qed_set_name(struct ecore_dev *edev, char name[NAME_SIZE])
 
 static uint32_t
 qed_sb_init(struct ecore_dev *edev, struct ecore_sb_info *sb_info,
-	    void *sb_virt_addr, dma_addr_t sb_phy_addr,
-	    uint16_t sb_id, enum qed_sb_type type)
+	    void *sb_virt_addr, dma_addr_t sb_phy_addr, uint16_t sb_id)
 {
 	struct ecore_hwfn *p_hwfn;
 	int hwfn_index;
 	uint16_t rel_sb_id;
-	uint8_t n_hwfns;
+	uint8_t n_hwfns = edev->num_hwfns;
 	uint32_t rc;
 
-	/* RoCE uses single engine and CMT uses two engines. When using both
-	 * we force only a single engine. Storage uses only engine 0 too.
-	 */
-	if (type == QED_SB_TYPE_L2_QUEUE)
-		n_hwfns = edev->num_hwfns;
-	else
-		n_hwfns = 1;
-
 	hwfn_index = sb_id % n_hwfns;
 	p_hwfn = &edev->hwfns[hwfn_index];
 	rel_sb_id = sb_id / n_hwfns;
@@ -737,3 +726,13 @@ static int qed_get_sb_info(struct ecore_dev *edev, struct ecore_sb_info *sb,
 	INIT_STRUCT_FIELD(remove, &qed_remove),
 	INIT_STRUCT_FIELD(send_drv_state, &qed_send_drv_state),
 };
+
+const struct qed_eth_ops qed_eth_ops_pass = {
+	INIT_STRUCT_FIELD(common, &qed_common_ops_pass),
+	INIT_STRUCT_FIELD(fill_dev_info, &qed_fill_eth_dev_info),
+};
+
+const struct qed_eth_ops *qed_get_eth_ops(void)
+{
+	return &qed_eth_ops_pass;
+}
diff --git a/drivers/net/qede/qede_rxtx.c b/drivers/net/qede/qede_rxtx.c
index 67496f3..00fda8c 100644
--- a/drivers/net/qede/qede_rxtx.c
+++ b/drivers/net/qede/qede_rxtx.c
@@ -37,64 +37,20 @@ static inline int qede_alloc_rx_buffer(struct qede_rx_queue *rxq)
 	return 0;
 }
 
-static void qede_rx_queue_release_mbufs(struct qede_rx_queue *rxq)
-{
-	uint16_t i;
-
-	if (rxq->sw_rx_ring != NULL) {
-		for (i = 0; i < rxq->nb_rx_desc; i++) {
-			if (rxq->sw_rx_ring[i].mbuf != NULL) {
-				rte_pktmbuf_free(rxq->sw_rx_ring[i].mbuf);
-				rxq->sw_rx_ring[i].mbuf = NULL;
-			}
-		}
-	}
-}
-
-void qede_rx_queue_release(void *rx_queue)
-{
-	struct qede_rx_queue *rxq = rx_queue;
-
-	if (rxq != NULL) {
-		qede_rx_queue_release_mbufs(rxq);
-		rte_free(rxq->sw_rx_ring);
-		rxq->sw_rx_ring = NULL;
-		rte_free(rxq);
-		rxq = NULL;
-	}
-}
-
-static void qede_tx_queue_release_mbufs(struct qede_tx_queue *txq)
-{
-	unsigned int i;
-
-	PMD_TX_LOG(DEBUG, txq, "releasing %u mbufs", txq->nb_tx_desc);
-
-	if (txq->sw_tx_ring) {
-		for (i = 0; i < txq->nb_tx_desc; i++) {
-			if (txq->sw_tx_ring[i].mbuf) {
-				rte_pktmbuf_free(txq->sw_tx_ring[i].mbuf);
-				txq->sw_tx_ring[i].mbuf = NULL;
-			}
-		}
-	}
-}
-
 int
 qede_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
 		    uint16_t nb_desc, unsigned int socket_id,
 		    __rte_unused const struct rte_eth_rxconf *rx_conf,
 		    struct rte_mempool *mp)
 {
-	struct qede_dev *qdev = dev->data->dev_private;
-	struct ecore_dev *edev = &qdev->edev;
+	struct qede_dev *qdev = QEDE_INIT_QDEV(dev);
+	struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
 	struct rte_eth_rxmode *rxmode = &dev->data->dev_conf.rxmode;
 	struct qede_rx_queue *rxq;
 	uint16_t max_rx_pkt_len;
 	uint16_t bufsz;
 	size_t size;
 	int rc;
-	int i;
 
 	PMD_INIT_FUNC_TRACE(edev);
 
@@ -157,7 +113,6 @@ static void qede_tx_queue_release_mbufs(struct qede_tx_queue *txq)
 			  "Unable to alloc memory for sw_rx_ring on socket %u\n",
 			  socket_id);
 		rte_free(rxq);
-		rxq = NULL;
 		return -ENOMEM;
 	}
 
@@ -176,9 +131,7 @@ static void qede_tx_queue_release_mbufs(struct qede_tx_queue *txq)
 			  "Unable to alloc memory for rxbd ring on socket %u\n",
 			  socket_id);
 		rte_free(rxq->sw_rx_ring);
-		rxq->sw_rx_ring = NULL;
 		rte_free(rxq);
-		rxq = NULL;
 		return -ENOMEM;
 	}
 
@@ -198,45 +151,87 @@ static void qede_tx_queue_release_mbufs(struct qede_tx_queue *txq)
 			  socket_id);
 		/* TBD: Freeing RX BD ring */
 		rte_free(rxq->sw_rx_ring);
-		rxq->sw_rx_ring = NULL;
 		rte_free(rxq);
 		return -ENOMEM;
 	}
 
-	/* Allocate buffers for the Rx ring */
-	for (i = 0; i < rxq->nb_rx_desc; i++) {
-		rc = qede_alloc_rx_buffer(rxq);
-		if (rc) {
-			DP_NOTICE(edev, false,
-				  "RX buffer allocation failed at idx=%d\n", i);
-			goto err4;
-		}
-	}
-
 	dev->data->rx_queues[queue_idx] = rxq;
+	qdev->fp_array[queue_idx].rxq = rxq;
 
 	DP_INFO(edev, "rxq %d num_desc %u rx_buf_size=%u socket %u\n",
 		  queue_idx, nb_desc, qdev->mtu, socket_id);
 
 	return 0;
-err4:
-	qede_rx_queue_release(rxq);
-	return -ENOMEM;
 }
 
-void qede_tx_queue_release(void *tx_queue)
+static void
+qede_rx_queue_reset(__rte_unused struct qede_dev *qdev,
+		    struct qede_rx_queue *rxq)
 {
-	struct qede_tx_queue *txq = tx_queue;
+	DP_INFO(&qdev->edev, "Reset RX queue %u\n", rxq->queue_id);
+	ecore_chain_reset(&rxq->rx_bd_ring);
+	ecore_chain_reset(&rxq->rx_comp_ring);
+	rxq->sw_rx_prod = 0;
+	rxq->sw_rx_cons = 0;
+	*rxq->hw_cons_ptr = 0;
+}
 
-	if (txq != NULL) {
-		qede_tx_queue_release_mbufs(txq);
-		if (txq->sw_tx_ring) {
-			rte_free(txq->sw_tx_ring);
-			txq->sw_tx_ring = NULL;
+static void qede_rx_queue_release_mbufs(struct qede_rx_queue *rxq)
+{
+	uint16_t i;
+
+	if (rxq->sw_rx_ring) {
+		for (i = 0; i < rxq->nb_rx_desc; i++) {
+			if (rxq->sw_rx_ring[i].mbuf) {
+				rte_pktmbuf_free(rxq->sw_rx_ring[i].mbuf);
+				rxq->sw_rx_ring[i].mbuf = NULL;
+			}
 		}
-		rte_free(txq);
 	}
-	txq = NULL;
+}
+
+void qede_rx_queue_release(void *rx_queue)
+{
+	struct qede_rx_queue *rxq = rx_queue;
+
+	if (rxq) {
+		qede_rx_queue_release_mbufs(rxq);
+		rte_free(rxq->sw_rx_ring);
+		rte_free(rxq);
+	}
+}
+
+/* Stops a given RX queue in the HW */
+static int qede_rx_queue_stop(struct rte_eth_dev *eth_dev, uint16_t rx_queue_id)
+{
+	struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
+	struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
+	struct ecore_hwfn *p_hwfn;
+	struct qede_rx_queue *rxq;
+	int hwfn_index;
+	int rc;
+
+	if (rx_queue_id < eth_dev->data->nb_rx_queues) {
+		rxq = eth_dev->data->rx_queues[rx_queue_id];
+		hwfn_index = rx_queue_id % edev->num_hwfns;
+		p_hwfn = &edev->hwfns[hwfn_index];
+		rc = ecore_eth_rx_queue_stop(p_hwfn, rxq->handle,
+				true, false);
+		if (rc != ECORE_SUCCESS) {
+			DP_ERR(edev, "RX queue %u stop fails\n", rx_queue_id);
+			return -1;
+		}
+		qede_rx_queue_release_mbufs(rxq);
+		qede_rx_queue_reset(qdev, rxq);
+		eth_dev->data->rx_queue_state[rx_queue_id] =
+			RTE_ETH_QUEUE_STATE_STOPPED;
+		DP_INFO(edev, "RX queue %u stopped\n", rx_queue_id);
+	} else {
+		DP_ERR(edev, "RX queue %u is not in range\n", rx_queue_id);
+		rc = -EINVAL;
+	}
+
+	return rc;
 }
 
 int
@@ -318,6 +313,7 @@ void qede_tx_queue_release(void *tx_queue)
 	    (txq->nb_tx_desc - QEDE_DEFAULT_TX_FREE_THRESH);
 
 	dev->data->tx_queues[queue_idx] = txq;
+	qdev->fp_array[queue_idx].txq = txq;
 
 	DP_INFO(edev,
 		  "txq %u num_desc %u tx_free_thresh %u socket %u\n",
@@ -326,71 +322,40 @@ void qede_tx_queue_release(void *tx_queue)
 	return 0;
 }
 
-/* This function inits fp content and resets the SB, RXQ and TXQ arrays */
-static void qede_init_fp(struct qede_dev *qdev)
+static void
+qede_tx_queue_reset(__rte_unused struct qede_dev *qdev,
+		    struct qede_tx_queue *txq)
 {
-	struct qede_fastpath *fp;
-	uint8_t i;
-	int fp_rx = qdev->fp_num_rx;
-
-	memset((void *)qdev->fp_array, 0, (QEDE_QUEUE_CNT(qdev) *
-					   sizeof(*qdev->fp_array)));
-	memset((void *)qdev->sb_array, 0, (QEDE_QUEUE_CNT(qdev) *
-					   sizeof(*qdev->sb_array)));
-	for_each_queue(i) {
-		fp = &qdev->fp_array[i];
-		if (fp_rx) {
-			fp->type = QEDE_FASTPATH_RX;
-			fp_rx--;
-		} else{
-			fp->type = QEDE_FASTPATH_TX;
-		}
-		fp->qdev = qdev;
-		fp->id = i;
-		fp->sb_info = &qdev->sb_array[i];
-		snprintf(fp->name, sizeof(fp->name), "%s-fp-%d", "qdev", i);
-	}
-
+	DP_INFO(&qdev->edev, "Reset TX queue %u\n", txq->queue_id);
+	ecore_chain_reset(&txq->tx_pbl);
+	txq->sw_tx_cons = 0;
+	txq->sw_tx_prod = 0;
+	*txq->hw_cons_ptr = 0;
 }
 
-void qede_free_fp_arrays(struct qede_dev *qdev)
+static void qede_tx_queue_release_mbufs(struct qede_tx_queue *txq)
 {
-	/* It asseumes qede_free_mem_load() is called before */
-	if (qdev->fp_array != NULL) {
-		rte_free(qdev->fp_array);
-		qdev->fp_array = NULL;
-	}
+	uint16_t i;
 
-	if (qdev->sb_array != NULL) {
-		rte_free(qdev->sb_array);
-		qdev->sb_array = NULL;
+	if (txq->sw_tx_ring) {
+		for (i = 0; i < txq->nb_tx_desc; i++) {
+			if (txq->sw_tx_ring[i].mbuf) {
+				rte_pktmbuf_free(txq->sw_tx_ring[i].mbuf);
+				txq->sw_tx_ring[i].mbuf = NULL;
+			}
+		}
 	}
 }
 
-static int qede_alloc_fp_array(struct qede_dev *qdev)
+void qede_tx_queue_release(void *tx_queue)
 {
-	struct ecore_dev *edev = &qdev->edev;
-
-	qdev->fp_array = rte_calloc("fp", QEDE_QUEUE_CNT(qdev),
-				    sizeof(*qdev->fp_array),
-				    RTE_CACHE_LINE_SIZE);
-
-	if (!qdev->fp_array) {
-		DP_ERR(edev, "fp array allocation failed\n");
-		return -ENOMEM;
-	}
-
-	qdev->sb_array = rte_calloc("sb", QEDE_QUEUE_CNT(qdev),
-				    sizeof(*qdev->sb_array),
-				    RTE_CACHE_LINE_SIZE);
+	struct qede_tx_queue *txq = tx_queue;
 
-	if (!qdev->sb_array) {
-		DP_ERR(edev, "sb array allocation failed\n");
-		rte_free(qdev->fp_array);
-		return -ENOMEM;
+	if (txq) {
+		qede_tx_queue_release_mbufs(txq);
+		rte_free(txq->sw_tx_ring);
+		rte_free(txq);
 	}
-
-	return 0;
 }
 
 /* This function allocates fast-path status block memory */
@@ -410,9 +375,8 @@ static int qede_alloc_fp_array(struct qede_dev *qdev)
 		return -ENOMEM;
 	}
 
-	rc = qdev->ops->common->sb_init(edev, sb_info,
-					sb_virt, sb_phys, sb_id,
-					QED_SB_TYPE_L2_QUEUE);
+	rc = qdev->ops->common->sb_init(edev, sb_info, sb_virt,
+					sb_phys, sb_id);
 	if (rc) {
 		DP_ERR(edev, "Status block initialization failed\n");
 		/* TBD: No dma_free_coherent possible */
@@ -427,9 +391,7 @@ int qede_alloc_fp_resc(struct qede_dev *qdev)
 	struct ecore_dev *edev = &qdev->edev;
 	struct qede_fastpath *fp;
 	uint32_t num_sbs;
-	uint16_t i;
 	uint16_t sb_idx;
-	int rc;
 
 	if (IS_VF(edev))
 		ecore_vf_get_num_sbs(ECORE_LEADING_HWFN(edev), &num_sbs);
@@ -442,25 +404,31 @@ int qede_alloc_fp_resc(struct qede_dev *qdev)
 		return -EINVAL;
 	}
 
-	if (qdev->fp_array)
-		qede_free_fp_arrays(qdev);
+	qdev->fp_array = rte_calloc("fp", QEDE_RXTX_MAX(qdev),
+				sizeof(*qdev->fp_array), RTE_CACHE_LINE_SIZE);
 
-	rc = qede_alloc_fp_array(qdev);
-	if (rc != 0)
-		return rc;
+	if (!qdev->fp_array) {
+		DP_ERR(edev, "fp array allocation failed\n");
+		return -ENOMEM;
+	}
 
-	qede_init_fp(qdev);
+	memset((void *)qdev->fp_array, 0, QEDE_RXTX_MAX(qdev) *
+			sizeof(*qdev->fp_array));
 
-	for (i = 0; i < QEDE_QUEUE_CNT(qdev); i++) {
-		fp = &qdev->fp_array[i];
-		if (IS_VF(edev))
-			sb_idx = i % num_sbs;
-		else
-			sb_idx = i;
+	for (sb_idx = 0; sb_idx < QEDE_RXTX_MAX(qdev); sb_idx++) {
+		fp = &qdev->fp_array[sb_idx];
+		fp->sb_info = rte_calloc("sb", 1, sizeof(struct ecore_sb_info),
+				RTE_CACHE_LINE_SIZE);
+		if (!fp->sb_info) {
+			DP_ERR(edev, "FP sb_info allocation fails\n");
+			return -1;
+		}
 		if (qede_alloc_mem_sb(qdev, fp->sb_info, sb_idx)) {
-			qede_free_fp_arrays(qdev);
-			return -ENOMEM;
+			DP_ERR(edev, "FP status block allocation fails\n");
+			return -1;
 		}
+		DP_INFO(edev, "sb_info idx 0x%x initialized\n",
+				fp->sb_info->igu_sb_id);
 	}
 
 	return 0;
@@ -469,9 +437,23 @@ int qede_alloc_fp_resc(struct qede_dev *qdev)
 void qede_dealloc_fp_resc(struct rte_eth_dev *eth_dev)
 {
 	struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
+	__rte_unused struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
+	struct qede_fastpath *fp;
+	uint16_t sb_idx;
 
-	qede_free_mem_load(eth_dev);
-	qede_free_fp_arrays(qdev);
+	PMD_INIT_FUNC_TRACE(edev);
+
+	for (sb_idx = 0; sb_idx < QEDE_RXTX_MAX(qdev); sb_idx++) {
+		fp = &qdev->fp_array[sb_idx];
+		DP_INFO(edev, "Free sb_info index 0x%x\n",
+				fp->sb_info->igu_sb_id);
+		if (fp->sb_info)
+			rte_free(fp->sb_info);
+		fp->sb_info = NULL;
+	}
+	if (qdev->fp_array)
+		rte_free(qdev->fp_array);
+	qdev->fp_array = NULL;
 }
 
 static inline void
@@ -506,107 +488,299 @@ void qede_dealloc_fp_resc(struct rte_eth_dev *eth_dev)
 	PMD_RX_LOG(DEBUG, rxq, "bd_prod %u  cqe_prod %u", bd_prod, cqe_prod);
 }
 
-static int qede_start_queues(struct rte_eth_dev *eth_dev)
+/* Starts a given RX queue in HW */
+static int
+qede_rx_queue_start(struct rte_eth_dev *eth_dev, uint16_t rx_queue_id)
 {
-	struct qede_dev *qdev = eth_dev->data->dev_private;
-	struct ecore_dev *edev = &qdev->edev;
-	struct ecore_queue_start_common_params q_params;
-	struct qede_tx_queue *txq;
+	struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
+	struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
+	struct ecore_queue_start_common_params params;
+	struct ecore_rxq_start_ret_params ret_params;
+	struct qede_rx_queue *rxq;
 	struct qede_fastpath *fp;
+	struct ecore_hwfn *p_hwfn;
 	dma_addr_t p_phys_table;
-	int txq_index;
 	uint16_t page_cnt;
-	int rc, tc, i;
-
-	for_each_queue(i) {
-		fp = &qdev->fp_array[i];
-		if (fp->type & QEDE_FASTPATH_RX) {
-			struct ecore_rxq_start_ret_params ret_params;
-
-			p_phys_table =
-			    ecore_chain_get_pbl_phys(&fp->rxq->rx_comp_ring);
-			page_cnt =
-			    ecore_chain_get_page_cnt(&fp->rxq->rx_comp_ring);
-
-			memset(&ret_params, 0, sizeof(ret_params));
-			memset(&q_params, 0, sizeof(q_params));
-			q_params.queue_id = i;
-			q_params.vport_id = 0;
-			q_params.sb = fp->sb_info->igu_sb_id;
-			q_params.sb_idx = RX_PI;
-
-			ecore_sb_ack(fp->sb_info, IGU_INT_DISABLE, 0);
-
-			rc = qdev->ops->q_rx_start(edev, i, &q_params,
-					   fp->rxq->rx_buf_size,
-					   fp->rxq->rx_bd_ring.p_phys_addr,
-					   p_phys_table,
-					   page_cnt,
-					   &ret_params);
+	uint16_t j;
+	int hwfn_index;
+	int rc;
+
+	if (rx_queue_id < eth_dev->data->nb_rx_queues) {
+		fp = &qdev->fp_array[rx_queue_id];
+		rxq = eth_dev->data->rx_queues[rx_queue_id];
+		/* Allocate buffers for the Rx ring */
+		for (j = 0; j < rxq->nb_rx_desc; j++) {
+			rc = qede_alloc_rx_buffer(rxq);
 			if (rc) {
-				DP_ERR(edev, "Start rxq #%d failed %d\n",
-				       fp->rxq->queue_id, rc);
-				return rc;
+				DP_ERR(edev, "RX buffer allocation failed"
+						" for rxq = %u\n", rx_queue_id);
+				return -ENOMEM;
 			}
+		}
+		/* disable interrupts */
+		ecore_sb_ack(fp->sb_info, IGU_INT_DISABLE, 0);
+		/* Prepare ramrod */
+		memset(&params, 0, sizeof(params));
+		params.queue_id = rx_queue_id;
+		params.vport_id = 0;
+		params.sb = fp->sb_info->igu_sb_id;
+		DP_INFO(edev, "rxq %u igu_sb_id 0x%x\n",
+				fp->rxq->queue_id, fp->sb_info->igu_sb_id);
+		params.sb_idx = RX_PI;
+		hwfn_index = rx_queue_id % edev->num_hwfns;
+		p_hwfn = &edev->hwfns[hwfn_index];
+		p_phys_table = ecore_chain_get_pbl_phys(&fp->rxq->rx_comp_ring);
+		page_cnt = ecore_chain_get_page_cnt(&fp->rxq->rx_comp_ring);
+		memset(&ret_params, 0, sizeof(ret_params));
+		rc = ecore_eth_rx_queue_start(p_hwfn,
+				p_hwfn->hw_info.opaque_fid,
+				&params, fp->rxq->rx_buf_size,
+				fp->rxq->rx_bd_ring.p_phys_addr,
+				p_phys_table, page_cnt,
+				&ret_params);
+		if (rc) {
+			DP_ERR(edev, "RX queue %u could not be started, rc = %d\n",
+					rx_queue_id, rc);
+			return -1;
+		}
+		/* Update with the returned parameters */
+		fp->rxq->hw_rxq_prod_addr = ret_params.p_prod;
+		fp->rxq->handle = ret_params.p_handle;
+
+		fp->rxq->hw_cons_ptr = &fp->sb_info->sb_virt->pi_array[RX_PI];
+		qede_update_rx_prod(qdev, fp->rxq);
+		eth_dev->data->rx_queue_state[rx_queue_id] =
+			RTE_ETH_QUEUE_STATE_STARTED;
+		DP_INFO(edev, "RX queue %u started\n", rx_queue_id);
+	} else {
+		DP_ERR(edev, "RX queue %u is not in range\n", rx_queue_id);
+		rc = -EINVAL;
+	}
 
-			/* Use the return parameters */
-			fp->rxq->hw_rxq_prod_addr = ret_params.p_prod;
-			fp->rxq->handle = ret_params.p_handle;
+	return rc;
+}
 
-			fp->rxq->hw_cons_ptr =
-					&fp->sb_info->sb_virt->pi_array[RX_PI];
+static int
+qede_tx_queue_start(struct rte_eth_dev *eth_dev, uint16_t tx_queue_id)
+{
+	struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
+	struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
+	struct ecore_queue_start_common_params params;
+	struct ecore_txq_start_ret_params ret_params;
+	struct ecore_hwfn *p_hwfn;
+	dma_addr_t p_phys_table;
+	struct qede_tx_queue *txq;
+	struct qede_fastpath *fp;
+	uint16_t page_cnt;
+	int hwfn_index;
+	int rc;
 
-			qede_update_rx_prod(qdev, fp->rxq);
+	if (tx_queue_id < eth_dev->data->nb_tx_queues) {
+		txq = eth_dev->data->tx_queues[tx_queue_id];
+		fp = &qdev->fp_array[tx_queue_id];
+		memset(&params, 0, sizeof(params));
+		params.queue_id = tx_queue_id;
+		params.vport_id = 0;
+		params.sb = fp->sb_info->igu_sb_id;
+		DP_INFO(edev, "txq %u igu_sb_id 0x%x\n",
+				fp->txq->queue_id, fp->sb_info->igu_sb_id);
+		params.sb_idx = TX_PI(0); /* tc = 0 */
+		p_phys_table = ecore_chain_get_pbl_phys(&txq->tx_pbl);
+		page_cnt = ecore_chain_get_page_cnt(&txq->tx_pbl);
+		hwfn_index = tx_queue_id % edev->num_hwfns;
+		p_hwfn = &edev->hwfns[hwfn_index];
+		if (qdev->dev_info.is_legacy)
+			fp->txq->is_legacy = true;
+		rc = ecore_eth_tx_queue_start(p_hwfn,
+				p_hwfn->hw_info.opaque_fid,
+				&params, 0 /* tc */,
+				p_phys_table, page_cnt,
+				&ret_params);
+		if (rc != ECORE_SUCCESS) {
+			DP_ERR(edev, "TX queue %u couldn't be started, rc=%d\n",
+					tx_queue_id, rc);
+			return -1;
 		}
+		txq->doorbell_addr = ret_params.p_doorbell;
+		txq->handle = ret_params.p_handle;
+
+		txq->hw_cons_ptr = &fp->sb_info->sb_virt->pi_array[TX_PI(0)];
+		SET_FIELD(txq->tx_db.data.params, ETH_DB_DATA_DEST,
+				DB_DEST_XCM);
+		SET_FIELD(txq->tx_db.data.params, ETH_DB_DATA_AGG_CMD,
+				DB_AGG_CMD_SET);
+		SET_FIELD(txq->tx_db.data.params,
+				ETH_DB_DATA_AGG_VAL_SEL,
+				DQ_XCM_ETH_TX_BD_PROD_CMD);
+		txq->tx_db.data.agg_flags = DQ_XCM_ETH_DQ_CF_CMD;
+		eth_dev->data->tx_queue_state[tx_queue_id] =
+			RTE_ETH_QUEUE_STATE_STARTED;
+		DP_INFO(edev, "TX queue %u started\n", tx_queue_id);
+	} else {
+		DP_ERR(edev, "TX queue %u is not in range\n", tx_queue_id);
+		rc = -EINVAL;
+	}
 
-		if (!(fp->type & QEDE_FASTPATH_TX))
-			continue;
-		for (tc = 0; tc < qdev->num_tc; tc++) {
-			struct ecore_txq_start_ret_params ret_params;
+	return rc;
+}
 
-			txq = fp->txqs[tc];
-			txq_index = tc * QEDE_RSS_COUNT(qdev) + i;
+static inline void
+qede_free_tx_pkt(struct qede_tx_queue *txq)
+{
+	struct rte_mbuf *mbuf;
+	uint16_t nb_segs;
+	uint16_t idx;
 
-			p_phys_table = ecore_chain_get_pbl_phys(&txq->tx_pbl);
-			page_cnt = ecore_chain_get_page_cnt(&txq->tx_pbl);
+	idx = TX_CONS(txq);
+	mbuf = txq->sw_tx_ring[idx].mbuf;
+	if (mbuf) {
+		nb_segs = mbuf->nb_segs;
+		PMD_TX_LOG(DEBUG, txq, "nb_segs to free %u\n", nb_segs);
+		while (nb_segs) {
+			/* It's like consuming rxbuf in recv() */
+			ecore_chain_consume(&txq->tx_pbl);
+			txq->nb_tx_avail++;
+			nb_segs--;
+		}
+		rte_pktmbuf_free(mbuf);
+		txq->sw_tx_ring[idx].mbuf = NULL;
+		txq->sw_tx_cons++;
+		PMD_TX_LOG(DEBUG, txq, "Freed tx packet\n");
+	} else {
+		ecore_chain_consume(&txq->tx_pbl);
+		txq->nb_tx_avail++;
+	}
+}
 
-			memset(&q_params, 0, sizeof(q_params));
-			memset(&ret_params, 0, sizeof(ret_params));
-			q_params.queue_id = txq->queue_id;
-			q_params.vport_id = 0;
-			q_params.sb = fp->sb_info->igu_sb_id;
-			q_params.sb_idx = TX_PI(tc);
+static inline void
+qede_process_tx_compl(__rte_unused struct ecore_dev *edev,
+		      struct qede_tx_queue *txq)
+{
+	uint16_t hw_bd_cons;
+#ifdef RTE_LIBRTE_QEDE_DEBUG_TX
+	uint16_t sw_tx_cons;
+#endif
 
-			rc = qdev->ops->q_tx_start(edev, i, &q_params,
-						   p_phys_table,
-						   page_cnt, /* **pp_doorbell */
-						   &ret_params);
-			if (rc) {
-				DP_ERR(edev, "Start txq %u failed %d\n",
-				       txq_index, rc);
-				return rc;
-			}
+	rte_compiler_barrier();
+	hw_bd_cons = rte_le_to_cpu_16(*txq->hw_cons_ptr);
+#ifdef RTE_LIBRTE_QEDE_DEBUG_TX
+	sw_tx_cons = ecore_chain_get_cons_idx(&txq->tx_pbl);
+	PMD_TX_LOG(DEBUG, txq, "Tx Completions = %u\n",
+		   abs(hw_bd_cons - sw_tx_cons));
+#endif
+	while (hw_bd_cons !=  ecore_chain_get_cons_idx(&txq->tx_pbl))
+		qede_free_tx_pkt(txq);
+}
 
-			txq->doorbell_addr = ret_params.p_doorbell;
-			txq->handle = ret_params.p_handle;
 
-			txq->hw_cons_ptr =
-			    &fp->sb_info->sb_virt->pi_array[TX_PI(tc)];
-			SET_FIELD(txq->tx_db.data.params,
-				  ETH_DB_DATA_DEST, DB_DEST_XCM);
-			SET_FIELD(txq->tx_db.data.params, ETH_DB_DATA_AGG_CMD,
-				  DB_AGG_CMD_SET);
-			SET_FIELD(txq->tx_db.data.params,
-				  ETH_DB_DATA_AGG_VAL_SEL,
-				  DQ_XCM_ETH_TX_BD_PROD_CMD);
+static int qede_drain_txq(struct qede_dev *qdev,
+			  struct qede_tx_queue *txq, bool allow_drain)
+{
+	struct ecore_dev *edev = &qdev->edev;
+	int rc, cnt = 1000;
 
-			txq->tx_db.data.agg_flags = DQ_XCM_ETH_DQ_CF_CMD;
+	while (txq->sw_tx_cons != txq->sw_tx_prod) {
+		qede_process_tx_compl(edev, txq);
+		if (!cnt) {
+			if (allow_drain) {
+				DP_ERR(edev, "Tx queue[%u] is stuck,"
+					  "requesting MCP to drain\n",
+					  txq->queue_id);
+				rc = qdev->ops->common->drain(edev);
+				if (rc)
+					return rc;
+				return qede_drain_txq(qdev, txq, false);
+			}
+			DP_ERR(edev, "Timeout waiting for tx queue[%d]:"
+				  "PROD=%d, CONS=%d\n",
+				  txq->queue_id, txq->sw_tx_prod,
+				  txq->sw_tx_cons);
+			return -1;
 		}
+		cnt--;
+		DELAY(1000);
+		rte_compiler_barrier();
 	}
 
+	/* FW finished processing, wait for HW to transmit all tx packets */
+	DELAY(2000);
+
 	return 0;
 }
 
+
+/* Stops a given TX queue in the HW */
+static int qede_tx_queue_stop(struct rte_eth_dev *eth_dev, uint16_t tx_queue_id)
+{
+	struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
+	struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
+	struct ecore_hwfn *p_hwfn;
+	struct qede_tx_queue *txq;
+	int hwfn_index;
+	int rc;
+
+	if (tx_queue_id < eth_dev->data->nb_tx_queues) {
+		txq = eth_dev->data->tx_queues[tx_queue_id];
+		/* Drain txq */
+		if (qede_drain_txq(qdev, txq, true))
+			return -1; /* For the lack of retcodes */
+		/* Stop txq */
+		hwfn_index = tx_queue_id % edev->num_hwfns;
+		p_hwfn = &edev->hwfns[hwfn_index];
+		rc = ecore_eth_tx_queue_stop(p_hwfn, txq->handle);
+		if (rc != ECORE_SUCCESS) {
+			DP_ERR(edev, "TX queue %u stop fails\n", tx_queue_id);
+			return -1;
+		}
+		qede_tx_queue_release_mbufs(txq);
+		qede_tx_queue_reset(qdev, txq);
+		eth_dev->data->tx_queue_state[tx_queue_id] =
+			RTE_ETH_QUEUE_STATE_STOPPED;
+		DP_INFO(edev, "TX queue %u stopped\n", tx_queue_id);
+	} else {
+		DP_ERR(edev, "TX queue %u is not in range\n", tx_queue_id);
+		rc = -EINVAL;
+	}
+
+	return rc;
+}
+
+int qede_start_queues(struct rte_eth_dev *eth_dev)
+{
+	struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
+	uint8_t id;
+	int rc;
+
+	for_each_rss(id) {
+		rc = qede_rx_queue_start(eth_dev, id);
+		if (rc != ECORE_SUCCESS)
+			return -1;
+	}
+
+	for_each_tss(id) {
+		rc = qede_tx_queue_start(eth_dev, id);
+		if (rc != ECORE_SUCCESS)
+			return -1;
+	}
+
+	return rc;
+}
+
+void qede_stop_queues(struct rte_eth_dev *eth_dev)
+{
+	struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
+	uint8_t id;
+
+	/* Stopping RX/TX queues */
+	for_each_tss(id) {
+		qede_tx_queue_stop(eth_dev, id);
+	}
+
+	for_each_rss(id) {
+		qede_rx_queue_stop(eth_dev, id);
+	}
+}
+
 static bool qede_tunn_exist(uint16_t flag)
 {
 	return !!((PARSING_AND_ERR_FLAGS_TUNNELEXIST_MASK <<
@@ -901,7 +1075,6 @@ static inline uint32_t qede_rx_cqe_to_tunn_pkt_type(uint16_t flags)
 	struct qede_rx_queue *rxq = p_rxq;
 	struct qede_dev *qdev = rxq->qdev;
 	struct ecore_dev *edev = &qdev->edev;
-	struct qede_fastpath *fp = &qdev->fp_array[rxq->queue_id];
 	uint16_t hw_comp_cons, sw_comp_cons, sw_rx_index;
 	uint16_t rx_pkt = 0;
 	union eth_rx_cqe *cqe;
@@ -987,7 +1160,8 @@ static inline uint32_t qede_rx_cqe_to_tunn_pkt_type(uint16_t flags)
 			goto tpa_end;
 		case ETH_RX_CQE_TYPE_SLOW_PATH:
 			PMD_RX_LOG(INFO, rxq, "Got unexpected slowpath CQE\n");
-			qdev->ops->eth_cqe_completion(edev, fp->id,
+			ecore_eth_cqe_completion(
+				&edev->hwfns[rxq->queue_id % edev->num_hwfns],
 				(struct eth_slow_path_rx_cqe *)cqe);
 			/* fall-thru */
 		default:
@@ -1175,53 +1349,6 @@ static inline uint32_t qede_rx_cqe_to_tunn_pkt_type(uint16_t flags)
 	return rx_pkt;
 }
 
-static inline void
-qede_free_tx_pkt(struct qede_tx_queue *txq)
-{
-	struct rte_mbuf *mbuf;
-	uint16_t nb_segs;
-	uint16_t idx;
-
-	idx = TX_CONS(txq);
-	mbuf = txq->sw_tx_ring[idx].mbuf;
-	if (mbuf) {
-		nb_segs = mbuf->nb_segs;
-		PMD_TX_LOG(DEBUG, txq, "nb_segs to free %u\n", nb_segs);
-		while (nb_segs) {
-			/* It's like consuming rxbuf in recv() */
-			ecore_chain_consume(&txq->tx_pbl);
-			txq->nb_tx_avail++;
-			nb_segs--;
-		}
-		rte_pktmbuf_free(mbuf);
-		txq->sw_tx_ring[idx].mbuf = NULL;
-		txq->sw_tx_cons++;
-		PMD_TX_LOG(DEBUG, txq, "Freed tx packet\n");
-	} else {
-		ecore_chain_consume(&txq->tx_pbl);
-		txq->nb_tx_avail++;
-	}
-}
-
-static inline void
-qede_process_tx_compl(__rte_unused struct ecore_dev *edev,
-		      struct qede_tx_queue *txq)
-{
-	uint16_t hw_bd_cons;
-#ifdef RTE_LIBRTE_QEDE_DEBUG_TX
-	uint16_t sw_tx_cons;
-#endif
-
-	rte_compiler_barrier();
-	hw_bd_cons = rte_le_to_cpu_16(*txq->hw_cons_ptr);
-#ifdef RTE_LIBRTE_QEDE_DEBUG_TX
-	sw_tx_cons = ecore_chain_get_cons_idx(&txq->tx_pbl);
-	PMD_TX_LOG(DEBUG, txq, "Tx Completions = %u\n",
-		   abs(hw_bd_cons - sw_tx_cons));
-#endif
-	while (hw_bd_cons !=  ecore_chain_get_cons_idx(&txq->tx_pbl))
-		qede_free_tx_pkt(txq);
-}
 
 /* Populate scatter gather buffer descriptor fields */
 static inline uint8_t
@@ -1599,286 +1726,6 @@ static inline uint32_t qede_rx_cqe_to_tunn_pkt_type(uint16_t flags)
 	return nb_pkt_sent;
 }
 
-static void qede_init_fp_queue(struct rte_eth_dev *eth_dev)
-{
-	struct qede_dev *qdev = eth_dev->data->dev_private;
-	struct qede_fastpath *fp;
-	uint8_t i, txq_index, tc;
-	int rxq = 0, txq = 0;
-
-	for_each_queue(i) {
-		fp = &qdev->fp_array[i];
-		if (fp->type & QEDE_FASTPATH_RX) {
-			fp->rxq = eth_dev->data->rx_queues[i];
-			fp->rxq->queue_id = rxq++;
-		}
-
-		if (fp->type & QEDE_FASTPATH_TX) {
-			for (tc = 0; tc < qdev->num_tc; tc++) {
-				txq_index = tc * QEDE_TSS_COUNT(qdev) + txq;
-				fp->txqs[tc] =
-					eth_dev->data->tx_queues[txq_index];
-				fp->txqs[tc]->queue_id = txq_index;
-				if (qdev->dev_info.is_legacy)
-					fp->txqs[tc]->is_legacy = true;
-			}
-			txq++;
-		}
-	}
-}
-
-int qede_dev_start(struct rte_eth_dev *eth_dev)
-{
-	struct qede_dev *qdev = eth_dev->data->dev_private;
-	struct ecore_dev *edev = &qdev->edev;
-	int rc;
-
-	DP_INFO(edev, "Device state is %d\n", qdev->state);
-
-	if (qdev->state == QEDE_DEV_START) {
-		DP_INFO(edev, "Port is already started\n");
-		return 0;
-	}
-
-	if (qdev->state == QEDE_DEV_CONFIG)
-		qede_init_fp_queue(eth_dev);
-
-	/* Update MTU only if it has changed */
-	if (qdev->mtu != qdev->new_mtu) {
-		if (qede_update_mtu(eth_dev, qdev->new_mtu))
-			return -1;
-		qdev->mtu = qdev->new_mtu;
-		/* If MTU has changed then update TPA too */
-		if (qdev->enable_lro)
-			if (qede_enable_tpa(eth_dev, true))
-				return -1;
-	}
-
-	rc = qede_start_queues(eth_dev);
-	if (rc) {
-		DP_ERR(edev, "Failed to start queues\n");
-		/* TBD: free */
-		return rc;
-	}
-
-	/* Newer SR-IOV PF driver expects RX/TX queues to be started before
-	 * enabling RSS. Hence RSS configuration is deferred upto this point.
-	 * Also, we would like to retain similar behavior in PF case, so we
-	 * don't do PF/VF specific check here.
-	 */
-	if (eth_dev->data->dev_conf.rxmode.mq_mode  == ETH_MQ_RX_RSS)
-		if (qede_config_rss(eth_dev))
-			return -1;
-
-	/* Enable vport*/
-	if (qede_activate_vport(eth_dev, true))
-		return -1;
-
-	/* Bring-up the link */
-	qede_dev_set_link_state(eth_dev, true);
-
-	/* Start/resume traffic */
-	qdev->ops->fastpath_start(edev);
-
-	qdev->state = QEDE_DEV_START;
-
-	DP_INFO(edev, "dev_state is QEDE_DEV_START\n");
-
-	return 0;
-}
-
-static int qede_drain_txq(struct qede_dev *qdev,
-			  struct qede_tx_queue *txq, bool allow_drain)
-{
-	struct ecore_dev *edev = &qdev->edev;
-	int rc, cnt = 1000;
-
-	while (txq->sw_tx_cons != txq->sw_tx_prod) {
-		qede_process_tx_compl(edev, txq);
-		if (!cnt) {
-			if (allow_drain) {
-				DP_ERR(edev, "Tx queue[%u] is stuck,"
-					  "requesting MCP to drain\n",
-					  txq->queue_id);
-				rc = qdev->ops->common->drain(edev);
-				if (rc)
-					return rc;
-				return qede_drain_txq(qdev, txq, false);
-			}
-			DP_ERR(edev, "Timeout waiting for tx queue[%d]:"
-				  "PROD=%d, CONS=%d\n",
-				  txq->queue_id, txq->sw_tx_prod,
-				  txq->sw_tx_cons);
-			return -1;
-		}
-		cnt--;
-		DELAY(1000);
-		rte_compiler_barrier();
-	}
-
-	/* FW finished processing, wait for HW to transmit all tx packets */
-	DELAY(2000);
-
-	return 0;
-}
-
-static int qede_stop_queues(struct qede_dev *qdev)
-{
-	struct ecore_dev *edev = &qdev->edev;
-	struct qede_fastpath *fp;
-	int rc, tc, i;
-
-	DP_INFO(edev, "Flushing tx queues\n");
-
-	/* Flush Tx queues. If needed, request drain from MCP */
-	for_each_queue(i) {
-		fp = &qdev->fp_array[i];
-
-		if (fp->type & QEDE_FASTPATH_TX) {
-			for (tc = 0; tc < qdev->num_tc; tc++) {
-				struct qede_tx_queue *txq = fp->txqs[tc];
-
-				rc = qede_drain_txq(qdev, txq, true);
-				if (rc)
-					return rc;
-			}
-		}
-	}
-
-	/* Stop all Queues in reverse order */
-	for (i = QEDE_QUEUE_CNT(qdev) - 1; i >= 0; i--) {
-		fp = &qdev->fp_array[i];
-
-		/* Stop the Tx Queue(s) */
-		if (qdev->fp_array[i].type & QEDE_FASTPATH_TX) {
-			for (tc = 0; tc < qdev->num_tc; tc++) {
-				struct qede_tx_queue *txq = fp->txqs[tc];
-				DP_INFO(edev, "Stopping tx queues\n");
-				rc = qdev->ops->q_tx_stop(edev, i, txq->handle);
-				if (rc) {
-					DP_ERR(edev, "Failed to stop TXQ #%d\n",
-					       i);
-					return rc;
-				}
-			}
-		}
-
-		/* Stop the Rx Queue */
-		if (qdev->fp_array[i].type & QEDE_FASTPATH_RX) {
-			DP_INFO(edev, "Stopping rx queues\n");
-			rc = qdev->ops->q_rx_stop(edev, i, fp->rxq->handle);
-			if (rc) {
-				DP_ERR(edev, "Failed to stop RXQ #%d\n", i);
-				return rc;
-			}
-		}
-	}
-	qede_reset_fp_rings(qdev);
-
-	return 0;
-}
-
-int qede_reset_fp_rings(struct qede_dev *qdev)
-{
-	struct qede_fastpath *fp;
-	struct qede_tx_queue *txq;
-	uint8_t tc;
-	uint16_t id, i;
-
-	for_each_queue(id) {
-		fp = &qdev->fp_array[id];
-
-		if (fp->type & QEDE_FASTPATH_RX) {
-			DP_INFO(&qdev->edev,
-				"Reset FP chain for RSS %u\n", id);
-			qede_rx_queue_release_mbufs(fp->rxq);
-			ecore_chain_reset(&fp->rxq->rx_bd_ring);
-			ecore_chain_reset(&fp->rxq->rx_comp_ring);
-			fp->rxq->sw_rx_prod = 0;
-			fp->rxq->sw_rx_cons = 0;
-			*fp->rxq->hw_cons_ptr = 0;
-			for (i = 0; i < fp->rxq->nb_rx_desc; i++) {
-				if (qede_alloc_rx_buffer(fp->rxq)) {
-					DP_ERR(&qdev->edev,
-					       "RX buffer allocation failed\n");
-					return -ENOMEM;
-				}
-			}
-		}
-		if (fp->type & QEDE_FASTPATH_TX) {
-			for (tc = 0; tc < qdev->num_tc; tc++) {
-				txq = fp->txqs[tc];
-				qede_tx_queue_release_mbufs(txq);
-				ecore_chain_reset(&txq->tx_pbl);
-				txq->sw_tx_cons = 0;
-				txq->sw_tx_prod = 0;
-				*txq->hw_cons_ptr = 0;
-			}
-		}
-	}
-
-	return 0;
-}
-
-/* This function frees all memory of a single fp */
-void qede_free_mem_load(struct rte_eth_dev *eth_dev)
-{
-	struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
-	struct qede_fastpath *fp;
-	uint16_t txq_idx;
-	uint8_t id;
-	uint8_t tc;
-
-	for_each_queue(id) {
-		fp = &qdev->fp_array[id];
-		if (fp->type & QEDE_FASTPATH_RX) {
-			if (!fp->rxq)
-				continue;
-			qede_rx_queue_release(fp->rxq);
-			eth_dev->data->rx_queues[id] = NULL;
-		} else {
-			for (tc = 0; tc < qdev->num_tc; tc++) {
-				if (!fp->txqs[tc])
-					continue;
-				txq_idx = fp->txqs[tc]->queue_id;
-				qede_tx_queue_release(fp->txqs[tc]);
-				eth_dev->data->tx_queues[txq_idx] = NULL;
-			}
-		}
-	}
-}
-
-void qede_dev_stop(struct rte_eth_dev *eth_dev)
-{
-	struct qede_dev *qdev = eth_dev->data->dev_private;
-	struct ecore_dev *edev = &qdev->edev;
-
-	DP_INFO(edev, "port %u\n", eth_dev->data->port_id);
-
-	if (qdev->state != QEDE_DEV_START) {
-		DP_INFO(edev, "Device not yet started\n");
-		return;
-	}
-
-	/* Disable vport */
-	if (qede_activate_vport(eth_dev, false))
-		return;
-
-	if (qede_stop_queues(qdev))
-		DP_ERR(edev, "Didn't succeed to close queues\n");
-
-	DP_INFO(edev, "Stopped queues\n");
-
-	qdev->ops->fastpath_stop(edev);
-
-	/* Bring the link down */
-	qede_dev_set_link_state(eth_dev, false);
-
-	qdev->state = QEDE_DEV_STOP;
-
-	DP_INFO(edev, "dev_state is QEDE_DEV_STOP\n");
-}
-
 uint16_t
 qede_rxtx_pkts_dummy(__rte_unused void *p_rxq,
 		     __rte_unused struct rte_mbuf **pkts,
diff --git a/drivers/net/qede/qede_rxtx.h b/drivers/net/qede/qede_rxtx.h
index edbd923..f9f52ea 100644
--- a/drivers/net/qede/qede_rxtx.h
+++ b/drivers/net/qede/qede_rxtx.h
@@ -77,10 +77,10 @@
 
 #define QEDE_TXQ_FLAGS		((uint32_t)ETH_TXQ_FLAGS_NOMULTSEGS)
 
-#define MAX_NUM_TC		8
-
-#define for_each_queue(i) for (i = 0; i < qdev->num_queues; i++)
-
+#define for_each_rss(i)		for (i = 0; i < qdev->num_rx_queues; i++)
+#define for_each_tss(i)		for (i = 0; i < qdev->num_tx_queues; i++)
+#define QEDE_RXTX_MAX(qdev) \
+	(RTE_MAX(QEDE_RSS_COUNT(qdev), QEDE_TSS_COUNT(qdev)))
 
 /* Macros for non-tunnel packet types lkup table */
 #define QEDE_PKT_TYPE_UNKNOWN				0x0
@@ -164,6 +164,7 @@ struct qede_rx_queue {
 	uint16_t *hw_cons_ptr;
 	void OSAL_IOMEM *hw_rxq_prod_addr;
 	struct qede_rx_entry *sw_rx_ring;
+	struct ecore_sb_info *sb_info;
 	uint16_t sw_rx_cons;
 	uint16_t sw_rx_prod;
 	uint16_t nb_rx_desc;
@@ -212,13 +213,9 @@ struct qede_tx_queue {
 };
 
 struct qede_fastpath {
-	struct qede_dev *qdev;
-	u8 type;
-	uint8_t id;
 	struct ecore_sb_info *sb_info;
 	struct qede_rx_queue *rxq;
-	struct qede_tx_queue *txqs[MAX_NUM_TC];
-	char name[80];
+	struct qede_tx_queue *txq;
 };
 
 /*
@@ -239,16 +236,6 @@ int qede_tx_queue_setup(struct rte_eth_dev *dev,
 
 void qede_tx_queue_release(void *tx_queue);
 
-int qede_dev_start(struct rte_eth_dev *eth_dev);
-
-void qede_dev_stop(struct rte_eth_dev *eth_dev);
-
-int qede_reset_fp_rings(struct qede_dev *qdev);
-
-void qede_free_fp_arrays(struct qede_dev *qdev);
-
-void qede_free_mem_load(struct rte_eth_dev *eth_dev);
-
 uint16_t qede_xmit_pkts(void *p_txq, struct rte_mbuf **tx_pkts,
 			uint16_t nb_pkts);
 
@@ -262,6 +249,10 @@ uint16_t qede_rxtx_pkts_dummy(void *p_rxq,
 			      struct rte_mbuf **pkts,
 			      uint16_t nb_pkts);
 
+int qede_start_queues(struct rte_eth_dev *eth_dev);
+
+void qede_stop_queues(struct rte_eth_dev *eth_dev);
+
 /* Fastpath resource alloc/dealloc helpers */
 int qede_alloc_fp_resc(struct qede_dev *qdev);
 
-- 
1.7.10.3



More information about the dev mailing list