[dpdk-dev] [PATCH 06/53] net/qede/base: changes for VF queue zone

Rasesh Mody rasesh.mody at cavium.com
Tue Sep 19 03:29:46 CEST 2017


Allow opening Multiple Tx queues on a single qzone for VFs.
This is supported by Rx/Tx TLVs now having an additional extended TLV that
passes the `qid_usage_idx', a unique number per each queue-cid that was
opened for a given queue-zone.

Fix to overcome TX timeout issue due to more than 16 CIDs by adding an
additional VF legacy mode. This will detach the CIDs from the original
only-existing legacy mode suited for older releases.
Following this change, only VFs that would publish VFPF_ACQUIRE_CAP_QIDS
would have the new CIDs scheme applied. I.e., the new 'legacy' mode is
actually whether this capability is published or not.

Changed the logic to clear doorbells for legacy and non-legacy VFs, so
the PF is cleaning the doorbells for both cases.

Signed-off-by: Rasesh Mody <rasesh.mody at cavium.com>
---
 drivers/net/qede/base/ecore_cxt.c      |   19 +-
 drivers/net/qede/base/ecore_l2.c       |   29 +--
 drivers/net/qede/base/ecore_l2.h       |    6 +-
 drivers/net/qede/base/ecore_proto_if.h |    4 +
 drivers/net/qede/base/ecore_sriov.c    |  322 ++++++++++++++++++++------------
 drivers/net/qede/base/ecore_sriov.h    |    4 +
 drivers/net/qede/base/ecore_vf.c       |   76 +++++---
 drivers/net/qede/base/ecore_vf.h       |    5 +
 drivers/net/qede/base/ecore_vfpf_if.h  |   55 +++++-
 9 files changed, 345 insertions(+), 175 deletions(-)

diff --git a/drivers/net/qede/base/ecore_cxt.c b/drivers/net/qede/base/ecore_cxt.c
index 688118b..8c45315 100644
--- a/drivers/net/qede/base/ecore_cxt.c
+++ b/drivers/net/qede/base/ecore_cxt.c
@@ -1993,19 +1993,16 @@ enum _ecore_status_t ecore_cxt_set_pf_params(struct ecore_hwfn *p_hwfn)
 	switch (p_hwfn->hw_info.personality) {
 	case ECORE_PCI_ETH:
 		{
-			struct ecore_eth_pf_params *p_params =
+		struct ecore_eth_pf_params *p_params =
 			    &p_hwfn->pf_params.eth_pf_params;
 
-			/* TODO - we probably want to add VF number to the PF
-			 * params;
-			 * As of now, allocates 16 * 2 per-VF [to retain regular
-			 * functionality].
-			 */
-			ecore_cxt_set_proto_cid_count(p_hwfn, PROTOCOLID_ETH,
-						      p_params->num_cons, 32);
-			p_hwfn->p_cxt_mngr->arfs_count =
-						p_params->num_arfs_filters;
-			break;
+		if (!p_params->num_vf_cons)
+			p_params->num_vf_cons = ETH_PF_PARAMS_VF_CONS_DEFAULT;
+		ecore_cxt_set_proto_cid_count(p_hwfn, PROTOCOLID_ETH,
+					      p_params->num_cons,
+					      p_params->num_vf_cons);
+		p_hwfn->p_cxt_mngr->arfs_count = p_params->num_arfs_filters;
+		break;
 		}
 	default:
 		return ECORE_INVAL;
diff --git a/drivers/net/qede/base/ecore_l2.c b/drivers/net/qede/base/ecore_l2.c
index e58b8fa..839bd46 100644
--- a/drivers/net/qede/base/ecore_l2.c
+++ b/drivers/net/qede/base/ecore_l2.c
@@ -173,16 +173,19 @@ static void ecore_eth_queue_qid_usage_del(struct ecore_hwfn *p_hwfn,
 void ecore_eth_queue_cid_release(struct ecore_hwfn *p_hwfn,
 				 struct ecore_queue_cid *p_cid)
 {
-	/* For VF-queues, stuff is a bit complicated as:
-	 *  - They always maintain the qid_usage on their own.
-	 *  - In legacy mode, they also maintain their CIDs.
-	 */
+	bool b_legacy_vf = !!(p_cid->vf_legacy &
+			      ECORE_QCID_LEGACY_VF_CID);
 
-	/* VFs' CIDs are 0-based in PF-view, and uninitialized on VF */
-	if (IS_PF(p_hwfn->p_dev) && !p_cid->b_legacy_vf)
+	/* VFs' CIDs are 0-based in PF-view, and uninitialized on VF.
+	 * For legacy vf-queues, the CID doesn't go through here.
+	 */
+	if (IS_PF(p_hwfn->p_dev) && !b_legacy_vf)
 		_ecore_cxt_release_cid(p_hwfn, p_cid->cid, p_cid->vfid);
-	if (!p_cid->b_legacy_vf)
+
+	/* VFs maintain the index inside queue-zone on their own */
+	if (p_cid->vfid == ECORE_QUEUE_CID_PF)
 		ecore_eth_queue_qid_usage_del(p_hwfn, p_cid);
+
 	OSAL_VFREE(p_hwfn->p_dev, p_cid);
 }
 
@@ -211,7 +214,7 @@ void ecore_eth_queue_cid_release(struct ecore_hwfn *p_hwfn,
 	if (p_vf_params != OSAL_NULL) {
 		p_cid->vfid = p_vf_params->vfid;
 		p_cid->vf_qid = p_vf_params->vf_qid;
-		p_cid->b_legacy_vf = p_vf_params->b_legacy;
+		p_cid->vf_legacy = p_vf_params->vf_legacy;
 	} else {
 		p_cid->vfid = ECORE_QUEUE_CID_PF;
 	}
@@ -296,7 +299,8 @@ struct ecore_queue_cid *
 	if (p_vf_params) {
 		vfid = p_vf_params->vfid;
 
-		if (p_vf_params->b_legacy) {
+		if (p_vf_params->vf_legacy &
+		    ECORE_QCID_LEGACY_VF_CID) {
 			b_legacy_vf = true;
 			cid = p_vf_params->vf_qid;
 		}
@@ -928,12 +932,15 @@ enum _ecore_status_t
 	DMA_REGPAIR_LE(p_ramrod->cqe_pbl_addr, cqe_pbl_addr);
 
 	if (p_cid->vfid != ECORE_QUEUE_CID_PF) {
+		bool b_legacy_vf = !!(p_cid->vf_legacy &
+				      ECORE_QCID_LEGACY_VF_RX_PROD);
+
 		p_ramrod->vf_rx_prod_index = p_cid->vf_qid;
 		DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
 			   "Queue%s is meant for VF rxq[%02x]\n",
-			   !!p_cid->b_legacy_vf ? " [legacy]" : "",
+			   b_legacy_vf ? " [legacy]" : "",
 			   p_cid->vf_qid);
-		p_ramrod->vf_rx_prod_use_zone_a = !!p_cid->b_legacy_vf;
+		p_ramrod->vf_rx_prod_use_zone_a = b_legacy_vf;
 	}
 
 	return ecore_spq_post(p_hwfn, p_ent, OSAL_NULL);
diff --git a/drivers/net/qede/base/ecore_l2.h b/drivers/net/qede/base/ecore_l2.h
index 7fe4cbc..33f1fad 100644
--- a/drivers/net/qede/base/ecore_l2.h
+++ b/drivers/net/qede/base/ecore_l2.h
@@ -34,7 +34,7 @@ struct ecore_queue_cid_vf_params {
 	 *  - Producers would be placed in a different place.
 	 *  - Makes assumptions regarding the CIDs.
 	 */
-	bool b_legacy;
+	u8 vf_legacy;
 
 	/* For VFs, this index arrives via TLV to diffrentiate between
 	 * different queues opened on the same qzone, and is passed
@@ -69,7 +69,9 @@ struct ecore_queue_cid {
 	u8 qid_usage_idx;
 
 	/* Legacy VFs might have Rx producer located elsewhere */
-	bool b_legacy_vf;
+	u8 vf_legacy;
+#define ECORE_QCID_LEGACY_VF_RX_PROD	(1 << 0)
+#define ECORE_QCID_LEGACY_VF_CID	(1 << 1)
 
 	struct ecore_hwfn *p_owner;
 };
diff --git a/drivers/net/qede/base/ecore_proto_if.h b/drivers/net/qede/base/ecore_proto_if.h
index 226e3d2..5d4b2b3 100644
--- a/drivers/net/qede/base/ecore_proto_if.h
+++ b/drivers/net/qede/base/ecore_proto_if.h
@@ -22,6 +22,10 @@ struct ecore_eth_pf_params {
 	 */
 	u16	num_cons;
 
+	/* per-VF number of CIDs */
+	u8	num_vf_cons;
+#define ETH_PF_PARAMS_VF_CONS_DEFAULT	(32)
+
 	/* To enable arfs, previous to HW-init a positive number needs to be
 	 * set [as filters require allocated searcher ILT memory].
 	 * This will set the maximal number of configured steering-filters.
diff --git a/drivers/net/qede/base/ecore_sriov.c b/drivers/net/qede/base/ecore_sriov.c
index cb3f4c3..0886560 100644
--- a/drivers/net/qede/base/ecore_sriov.c
+++ b/drivers/net/qede/base/ecore_sriov.c
@@ -53,9 +53,26 @@
 	"CHANNEL_TLV_VPORT_UPDATE_SGE_TPA",
 	"CHANNEL_TLV_UPDATE_TUNN_PARAM",
 	"CHANNEL_TLV_COALESCE_UPDATE",
+	"CHANNEL_TLV_QID",
 	"CHANNEL_TLV_MAX"
 };
 
+static u8 ecore_vf_calculate_legacy(struct ecore_hwfn *p_hwfn,
+				    struct ecore_vf_info *p_vf)
+{
+	u8 legacy = 0;
+
+	if (p_vf->acquire.vfdev_info.eth_fp_hsi_minor ==
+	    ETH_HSI_VER_NO_PKT_LEN_TUNN)
+		legacy |= ECORE_QCID_LEGACY_VF_RX_PROD;
+
+	if (!(p_vf->acquire.vfdev_info.capabilities &
+	     VFPF_ACQUIRE_CAP_QUEUE_QIDS))
+		legacy |= ECORE_QCID_LEGACY_VF_CID;
+
+	return legacy;
+}
+
 /* IOV ramrods */
 static enum _ecore_status_t ecore_sp_vf_start(struct ecore_hwfn *p_hwfn,
 					      struct ecore_vf_info *p_vf)
@@ -1558,6 +1575,10 @@ static u8 ecore_iov_vf_mbx_acquire_resc(struct ecore_hwfn *p_hwfn,
 	p_resp->num_vlan_filters = OSAL_MIN_T(u8, p_vf->num_vlan_filters,
 					      p_req->num_vlan_filters);
 
+	p_resp->num_cids =
+		OSAL_MIN_T(u8, p_req->num_cids,
+			   p_hwfn->pf_params.eth_pf_params.num_vf_cons);
+
 	/* This isn't really needed/enforced, but some legacy VFs might depend
 	 * on the correct filling of this field.
 	 */
@@ -1569,18 +1590,18 @@ static u8 ecore_iov_vf_mbx_acquire_resc(struct ecore_hwfn *p_hwfn,
 	    p_resp->num_sbs < p_req->num_sbs ||
 	    p_resp->num_mac_filters < p_req->num_mac_filters ||
 	    p_resp->num_vlan_filters < p_req->num_vlan_filters ||
-	    p_resp->num_mc_filters < p_req->num_mc_filters) {
+	    p_resp->num_mc_filters < p_req->num_mc_filters ||
+	    p_resp->num_cids < p_req->num_cids) {
 		DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
-			   "VF[%d] - Insufficient resources: rxq [%02x/%02x]"
-			   " txq [%02x/%02x] sbs [%02x/%02x] mac [%02x/%02x]"
-			   " vlan [%02x/%02x] mc [%02x/%02x]\n",
+			   "VF[%d] - Insufficient resources: rxq [%02x/%02x] txq [%02x/%02x] sbs [%02x/%02x] mac [%02x/%02x] vlan [%02x/%02x] mc [%02x/%02x] cids [%02x/%02x]\n",
 			   p_vf->abs_vf_id,
 			   p_req->num_rxqs, p_resp->num_rxqs,
 			   p_req->num_rxqs, p_resp->num_txqs,
 			   p_req->num_sbs, p_resp->num_sbs,
 			   p_req->num_mac_filters, p_resp->num_mac_filters,
 			   p_req->num_vlan_filters, p_resp->num_vlan_filters,
-			   p_req->num_mc_filters, p_resp->num_mc_filters);
+			   p_req->num_mc_filters, p_resp->num_mc_filters,
+			   p_req->num_cids, p_resp->num_cids);
 
 		/* Some legacy OSes are incapable of correctly handling this
 		 * failure.
@@ -1715,6 +1736,12 @@ static void ecore_iov_vf_mbx_acquire(struct ecore_hwfn       *p_hwfn,
 	if (p_hwfn->p_dev->num_hwfns > 1)
 		pfdev_info->capabilities |= PFVF_ACQUIRE_CAP_100G;
 
+	/* Share our ability to use multiple queue-ids only with VFs
+	 * that request it.
+	 */
+	if (req->vfdev_info.capabilities & VFPF_ACQUIRE_CAP_QUEUE_QIDS)
+		pfdev_info->capabilities |= PFVF_ACQUIRE_CAP_QUEUE_QIDS;
+
 	ecore_iov_vf_mbx_acquire_stats(p_hwfn, &pfdev_info->stats_info);
 
 	OSAL_MEMCPY(pfdev_info->port_mac, p_hwfn->hw_info.hw_mac_addr,
@@ -2158,6 +2185,42 @@ static void ecore_iov_vf_mbx_start_rxq_resp(struct ecore_hwfn *p_hwfn,
 	ecore_iov_send_response(p_hwfn, p_ptt, vf, length, status);
 }
 
+static u8 ecore_iov_vf_mbx_qid(struct ecore_hwfn *p_hwfn,
+			       struct ecore_vf_info *p_vf, bool b_is_tx)
+{
+	struct ecore_iov_vf_mbx *p_mbx = &p_vf->vf_mbx;
+	struct vfpf_qid_tlv *p_qid_tlv;
+
+	/* Search for the qid if the VF published if its going to provide it */
+	if (!(p_vf->acquire.vfdev_info.capabilities &
+	      VFPF_ACQUIRE_CAP_QUEUE_QIDS)) {
+		if (b_is_tx)
+			return ECORE_IOV_LEGACY_QID_TX;
+		else
+			return ECORE_IOV_LEGACY_QID_RX;
+	}
+
+	p_qid_tlv = (struct vfpf_qid_tlv *)
+		    ecore_iov_search_list_tlvs(p_hwfn, p_mbx->req_virt,
+					       CHANNEL_TLV_QID);
+	if (p_qid_tlv == OSAL_NULL) {
+		DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
+			   "VF[%2x]: Failed to provide qid\n",
+			   p_vf->relative_vf_id);
+
+		return ECORE_IOV_QID_INVALID;
+	}
+
+	if (p_qid_tlv->qid >= MAX_QUEUES_PER_QZONE) {
+		DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
+			   "VF[%02x]: Provided qid out-of-bounds %02x\n",
+			   p_vf->relative_vf_id, p_qid_tlv->qid);
+		return ECORE_IOV_QID_INVALID;
+	}
+
+	return p_qid_tlv->qid;
+}
+
 static void ecore_iov_vf_mbx_start_rxq(struct ecore_hwfn *p_hwfn,
 				       struct ecore_ptt *p_ptt,
 				       struct ecore_vf_info *vf)
@@ -2166,11 +2229,10 @@ static void ecore_iov_vf_mbx_start_rxq(struct ecore_hwfn *p_hwfn,
 	struct ecore_queue_cid_vf_params vf_params;
 	struct ecore_iov_vf_mbx *mbx = &vf->vf_mbx;
 	u8 status = PFVF_STATUS_NO_RESOURCE;
+	u8 qid_usage_idx, vf_legacy = 0;
 	struct ecore_vf_queue *p_queue;
 	struct vfpf_start_rxq_tlv *req;
 	struct ecore_queue_cid *p_cid;
-	bool b_legacy_vf = false;
-	u8 qid_usage_idx;
 	enum _ecore_status_t rc;
 
 	req = &mbx->req_virt->start_rxq;
@@ -2180,18 +2242,17 @@ static void ecore_iov_vf_mbx_start_rxq(struct ecore_hwfn *p_hwfn,
 	    !ecore_iov_validate_sb(p_hwfn, vf, req->hw_sb))
 		goto out;
 
-	/* Legacy VFs made assumptions on the CID their queues connected to,
-	 * assuming queue X used CID X.
-	 * TODO - need to validate that there was no official release post
-	 * the current legacy scheme that still made that assumption.
-	 */
-	if (vf->acquire.vfdev_info.eth_fp_hsi_minor ==
-	    ETH_HSI_VER_NO_PKT_LEN_TUNN)
-		b_legacy_vf = true;
+	qid_usage_idx = ecore_iov_vf_mbx_qid(p_hwfn, vf, false);
+	if (qid_usage_idx == ECORE_IOV_QID_INVALID)
+		goto out;
 
-	/* Acquire a new queue-cid */
 	p_queue = &vf->vf_queues[req->rx_qid];
+	if (p_queue->cids[qid_usage_idx].p_cid)
+		goto out;
+
+	vf_legacy = ecore_vf_calculate_legacy(p_hwfn, vf);
 
+	/* Acquire a new queue-cid */
 	OSAL_MEMSET(&params, 0, sizeof(params));
 	params.queue_id = (u8)p_queue->fw_rx_qid;
 	params.vport_id = vf->vport_id;
@@ -2199,15 +2260,10 @@ static void ecore_iov_vf_mbx_start_rxq(struct ecore_hwfn *p_hwfn,
 	params.sb = req->hw_sb;
 	params.sb_idx = req->sb_index;
 
-	/* TODO - set qid_usage_idx according to extended TLV. For now, use
-	 * '0' for Rx.
-	 */
-	qid_usage_idx = 0;
-
 	OSAL_MEM_ZERO(&vf_params, sizeof(vf_params));
 	vf_params.vfid = vf->relative_vf_id;
 	vf_params.vf_qid = (u8)req->rx_qid;
-	vf_params.b_legacy = b_legacy_vf;
+	vf_params.vf_legacy = vf_legacy;
 	vf_params.qid_usage_idx = qid_usage_idx;
 
 	p_cid = ecore_eth_queue_to_cid(p_hwfn, vf->opaque_fid,
@@ -2218,7 +2274,7 @@ static void ecore_iov_vf_mbx_start_rxq(struct ecore_hwfn *p_hwfn,
 	/* Legacy VFs have their Producers in a different location, which they
 	 * calculate on their own and clean the producer prior to this.
 	 */
-	if (!b_legacy_vf)
+	if (!(vf_legacy & ECORE_QCID_LEGACY_VF_RX_PROD))
 		REG_WR(p_hwfn,
 		       GTT_BAR0_MAP_REG_MSDM_RAM +
 		       MSTORM_ETH_VF_PRODS_OFFSET(vf->abs_vf_id, req->rx_qid),
@@ -2241,7 +2297,8 @@ static void ecore_iov_vf_mbx_start_rxq(struct ecore_hwfn *p_hwfn,
 
 out:
 	ecore_iov_vf_mbx_start_rxq_resp(p_hwfn, p_ptt, vf, status,
-					b_legacy_vf);
+					!!(vf_legacy &
+					   ECORE_QCID_LEGACY_VF_RX_PROD));
 }
 
 static void
@@ -2443,8 +2500,7 @@ static void ecore_iov_vf_mbx_start_txq(struct ecore_hwfn *p_hwfn,
 	struct ecore_vf_queue *p_queue;
 	struct vfpf_start_txq_tlv *req;
 	struct ecore_queue_cid *p_cid;
-	bool b_legacy_vf = false;
-	u8 qid_usage_idx;
+	u8 qid_usage_idx, vf_legacy;
 	u32 cid = 0;
 	enum _ecore_status_t rc;
 	u16 pq;
@@ -2457,35 +2513,27 @@ static void ecore_iov_vf_mbx_start_txq(struct ecore_hwfn *p_hwfn,
 	    !ecore_iov_validate_sb(p_hwfn, vf, req->hw_sb))
 		goto out;
 
-	/* In case this is a legacy VF - need to know to use the right cids.
-	 * TODO - need to validate that there was no official release post
-	 * the current legacy scheme that still made that assumption.
-	 */
-	if (vf->acquire.vfdev_info.eth_fp_hsi_minor ==
-	    ETH_HSI_VER_NO_PKT_LEN_TUNN)
-		b_legacy_vf = true;
+	qid_usage_idx = ecore_iov_vf_mbx_qid(p_hwfn, vf, true);
+	if (qid_usage_idx == ECORE_IOV_QID_INVALID)
+		goto out;
 
-	/* Acquire a new queue-cid */
 	p_queue = &vf->vf_queues[req->tx_qid];
+	if (p_queue->cids[qid_usage_idx].p_cid)
+		goto out;
+
+	vf_legacy = ecore_vf_calculate_legacy(p_hwfn, vf);
 
+	/* Acquire a new queue-cid */
 	params.queue_id = p_queue->fw_tx_qid;
 	params.vport_id = vf->vport_id;
 	params.stats_id = vf->abs_vf_id + 0x10;
 	params.sb = req->hw_sb;
 	params.sb_idx = req->sb_index;
 
-	/* TODO - set qid_usage_idx according to extended TLV. For now, use
-	 * '1' for Tx.
-	 */
-	qid_usage_idx = 1;
-
-	if (p_queue->cids[qid_usage_idx].p_cid)
-		goto out;
-
 	OSAL_MEM_ZERO(&vf_params, sizeof(vf_params));
 	vf_params.vfid = vf->relative_vf_id;
 	vf_params.vf_qid = (u8)req->tx_qid;
-	vf_params.b_legacy = b_legacy_vf;
+	vf_params.vf_legacy = vf_legacy;
 	vf_params.qid_usage_idx = qid_usage_idx;
 
 	p_cid = ecore_eth_queue_to_cid(p_hwfn, vf->opaque_fid,
@@ -2515,80 +2563,74 @@ static void ecore_iov_vf_mbx_start_txq(struct ecore_hwfn *p_hwfn,
 static enum _ecore_status_t ecore_iov_vf_stop_rxqs(struct ecore_hwfn *p_hwfn,
 						   struct ecore_vf_info *vf,
 						   u16 rxq_id,
-						   u8 num_rxqs,
+						   u8 qid_usage_idx,
 						   bool cqe_completion)
 {
+	struct ecore_vf_queue *p_queue;
 	enum _ecore_status_t rc = ECORE_SUCCESS;
-	int qid, i;
 
-	/* TODO - improve validation [wrap around] */
-	if (rxq_id + num_rxqs > OSAL_ARRAY_SIZE(vf->vf_queues))
+	if (!ecore_iov_validate_rxq(p_hwfn, vf, rxq_id,
+				    ECORE_IOV_VALIDATE_Q_NA)) {
+		DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
+			   "VF[%d] Tried Closing Rx 0x%04x.%02x which is inactive\n",
+			   vf->relative_vf_id, rxq_id, qid_usage_idx);
 		return ECORE_INVAL;
+	}
 
-	for (qid = rxq_id; qid < rxq_id + num_rxqs; qid++) {
-		struct ecore_vf_queue *p_queue = &vf->vf_queues[qid];
-		struct ecore_queue_cid **pp_cid = OSAL_NULL;
-
-		/* There can be at most a single Rx per qzone. Find it */
-		for (i = 0; i < MAX_QUEUES_PER_QZONE; i++) {
-			if (p_queue->cids[i].p_cid &&
-			    !p_queue->cids[i].b_is_tx) {
-				pp_cid = &p_queue->cids[i].p_cid;
-				break;
-			}
-		}
-		if (pp_cid == OSAL_NULL) {
-			DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
-				   "Ignoring VF[%02x] request of closing Rx queue %04x - closed\n",
-				   vf->relative_vf_id, qid);
-			continue;
-		}
+	p_queue = &vf->vf_queues[rxq_id];
 
-		rc = ecore_eth_rx_queue_stop(p_hwfn, *pp_cid,
-					     false, cqe_completion);
-		if (rc != ECORE_SUCCESS)
-			return rc;
+	/* We've validated the index and the existence of the active RXQ -
+	 * now we need to make sure that it's using the correct qid.
+	 */
+	if (!p_queue->cids[qid_usage_idx].p_cid ||
+	    p_queue->cids[qid_usage_idx].b_is_tx) {
+		struct ecore_queue_cid *p_cid;
 
-		*pp_cid = OSAL_NULL;
-		vf->num_active_rxqs--;
+		p_cid = ecore_iov_get_vf_rx_queue_cid(p_hwfn, vf, p_queue);
+		DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
+			   "VF[%d] - Tried Closing Rx 0x%04x.%02x, but Rx is at %04x.%02x\n",
+			    vf->relative_vf_id, rxq_id, qid_usage_idx,
+			    rxq_id, p_cid->qid_usage_idx);
+		return ECORE_INVAL;
 	}
 
-	return rc;
+	/* Now that we know we have a valid Rx-queue - close it */
+	rc = ecore_eth_rx_queue_stop(p_hwfn,
+				     p_queue->cids[qid_usage_idx].p_cid,
+				     false, cqe_completion);
+	if (rc != ECORE_SUCCESS)
+		return rc;
+
+	p_queue->cids[qid_usage_idx].p_cid = OSAL_NULL;
+	vf->num_active_rxqs--;
+
+	return ECORE_SUCCESS;
 }
 
 static enum _ecore_status_t ecore_iov_vf_stop_txqs(struct ecore_hwfn *p_hwfn,
 						   struct ecore_vf_info *vf,
-						   u16 txq_id, u8 num_txqs)
+						   u16 txq_id,
+						   u8 qid_usage_idx)
 {
-	enum _ecore_status_t rc = ECORE_SUCCESS;
 	struct ecore_vf_queue *p_queue;
-	int qid, j;
+	enum _ecore_status_t rc = ECORE_SUCCESS;
 
 	if (!ecore_iov_validate_txq(p_hwfn, vf, txq_id,
-				    ECORE_IOV_VALIDATE_Q_NA) ||
-	    !ecore_iov_validate_txq(p_hwfn, vf, txq_id + num_txqs,
 				    ECORE_IOV_VALIDATE_Q_NA))
 		return ECORE_INVAL;
 
-	for (qid = txq_id; qid < txq_id + num_txqs; qid++) {
-		p_queue = &vf->vf_queues[qid];
-		for (j = 0; j < MAX_QUEUES_PER_QZONE; j++) {
-			if (p_queue->cids[j].p_cid == OSAL_NULL)
-				continue;
-
-			if (!p_queue->cids[j].b_is_tx)
-				continue;
-
-			rc = ecore_eth_tx_queue_stop(p_hwfn,
-						     p_queue->cids[j].p_cid);
-			if (rc != ECORE_SUCCESS)
-				return rc;
+	p_queue = &vf->vf_queues[txq_id];
+	if (!p_queue->cids[qid_usage_idx].p_cid ||
+	    !p_queue->cids[qid_usage_idx].b_is_tx)
+		return ECORE_INVAL;
 
-			p_queue->cids[j].p_cid = OSAL_NULL;
-		}
-	}
+	rc = ecore_eth_tx_queue_stop(p_hwfn,
+				     p_queue->cids[qid_usage_idx].p_cid);
+	if (rc != ECORE_SUCCESS)
+		return rc;
 
-	return rc;
+	p_queue->cids[qid_usage_idx].p_cid = OSAL_NULL;
+	return ECORE_SUCCESS;
 }
 
 static void ecore_iov_vf_mbx_stop_rxqs(struct ecore_hwfn *p_hwfn,
@@ -2597,20 +2639,34 @@ static void ecore_iov_vf_mbx_stop_rxqs(struct ecore_hwfn *p_hwfn,
 {
 	u16 length = sizeof(struct pfvf_def_resp_tlv);
 	struct ecore_iov_vf_mbx *mbx = &vf->vf_mbx;
-	u8 status = PFVF_STATUS_SUCCESS;
+	u8 status = PFVF_STATUS_FAILURE;
 	struct vfpf_stop_rxqs_tlv *req;
+	u8 qid_usage_idx;
 	enum _ecore_status_t rc;
 
-	/* We give the option of starting from qid != 0, in this case we
-	 * need to make sure that qid + num_qs doesn't exceed the actual
-	 * amount of queues that exist.
+	/* Starting with CHANNEL_TLV_QID, it's assumed the 'num_rxqs'
+	 * would be one. Since no older ecore passed multiple queues
+	 * using this API, sanitize on the value.
 	 */
 	req = &mbx->req_virt->stop_rxqs;
-	rc = ecore_iov_vf_stop_rxqs(p_hwfn, vf, req->rx_qid,
-				    req->num_rxqs, req->cqe_completion);
-	if (rc)
-		status = PFVF_STATUS_FAILURE;
+	if (req->num_rxqs != 1) {
+		DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
+			   "Odd; VF[%d] tried stopping multiple Rx queues\n",
+			   vf->relative_vf_id);
+		status = PFVF_STATUS_NOT_SUPPORTED;
+		goto out;
+	}
 
+	/* Find which qid-index is associated with the queue */
+	qid_usage_idx = ecore_iov_vf_mbx_qid(p_hwfn, vf, false);
+	if (qid_usage_idx == ECORE_IOV_QID_INVALID)
+		goto out;
+
+	rc = ecore_iov_vf_stop_rxqs(p_hwfn, vf, req->rx_qid,
+				    qid_usage_idx, req->cqe_completion);
+	if (rc == ECORE_SUCCESS)
+		status = PFVF_STATUS_SUCCESS;
+out:
 	ecore_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_STOP_RXQS,
 			       length, status);
 }
@@ -2621,19 +2677,35 @@ static void ecore_iov_vf_mbx_stop_txqs(struct ecore_hwfn *p_hwfn,
 {
 	u16 length = sizeof(struct pfvf_def_resp_tlv);
 	struct ecore_iov_vf_mbx *mbx = &vf->vf_mbx;
-	u8 status = PFVF_STATUS_SUCCESS;
+	u8 status = PFVF_STATUS_FAILURE;
 	struct vfpf_stop_txqs_tlv *req;
+	u8 qid_usage_idx;
 	enum _ecore_status_t rc;
 
-	/* We give the option of starting from qid != 0, in this case we
-	 * need to make sure that qid + num_qs doesn't exceed the actual
-	 * amount of queues that exist.
+	/* Starting with CHANNEL_TLV_QID, it's assumed the 'num_txqs'
+	 * would be one. Since no older ecore passed multiple queues
+	 * using this API, sanitize on the value.
 	 */
 	req = &mbx->req_virt->stop_txqs;
-	rc = ecore_iov_vf_stop_txqs(p_hwfn, vf, req->tx_qid, req->num_txqs);
-	if (rc)
-		status = PFVF_STATUS_FAILURE;
+	if (req->num_txqs != 1) {
+		DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
+			   "Odd; VF[%d] tried stopping multiple Tx queues\n",
+			   vf->relative_vf_id);
+		status = PFVF_STATUS_NOT_SUPPORTED;
+		goto out;
+	}
 
+	/* Find which qid-index is associated with the queue */
+	qid_usage_idx = ecore_iov_vf_mbx_qid(p_hwfn, vf, true);
+	if (qid_usage_idx == ECORE_IOV_QID_INVALID)
+		goto out;
+
+	rc = ecore_iov_vf_stop_txqs(p_hwfn, vf, req->tx_qid,
+				    qid_usage_idx);
+	if (rc == ECORE_SUCCESS)
+		status = PFVF_STATUS_SUCCESS;
+
+out:
 	ecore_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_STOP_TXQS,
 			       length, status);
 }
@@ -2649,6 +2721,7 @@ static void ecore_iov_vf_mbx_update_rxqs(struct ecore_hwfn *p_hwfn,
 	u8 status = PFVF_STATUS_FAILURE;
 	u8 complete_event_flg;
 	u8 complete_cqe_flg;
+	u8 qid_usage_idx;
 	enum _ecore_status_t rc;
 	u16 i;
 
@@ -2656,10 +2729,30 @@ static void ecore_iov_vf_mbx_update_rxqs(struct ecore_hwfn *p_hwfn,
 	complete_cqe_flg = !!(req->flags & VFPF_RXQ_UPD_COMPLETE_CQE_FLAG);
 	complete_event_flg = !!(req->flags & VFPF_RXQ_UPD_COMPLETE_EVENT_FLAG);
 
-	/* Validate inputs */
+	qid_usage_idx = ecore_iov_vf_mbx_qid(p_hwfn, vf, false);
+	if (qid_usage_idx == ECORE_IOV_QID_INVALID)
+		goto out;
+
+	/* Starting with the addition of CHANNEL_TLV_QID, this API started
+	 * expecting a single queue at a time. Validate this.
+	 */
+	if ((vf->acquire.vfdev_info.capabilities &
+	     VFPF_ACQUIRE_CAP_QUEUE_QIDS) &&
+	     req->num_rxqs != 1) {
+		DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
+			   "VF[%d] supports QIDs but sends multiple queues\n",
+			   vf->relative_vf_id);
+		goto out;
+	}
+
+	/* Validate inputs - for the legacy case this is still true since
+	 * qid_usage_idx for each Rx queue would be LEGACY_QID_RX.
+	 */
 	for (i = req->rx_qid; i < req->rx_qid + req->num_rxqs; i++) {
 		if (!ecore_iov_validate_rxq(p_hwfn, vf, i,
-					    ECORE_IOV_VALIDATE_Q_ENABLE)) {
+					    ECORE_IOV_VALIDATE_Q_NA) ||
+		    !vf->vf_queues[i].cids[qid_usage_idx].p_cid ||
+		    vf->vf_queues[i].cids[qid_usage_idx].b_is_tx) {
 			DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
 				   "VF[%d]: Incorrect Rxqs [%04x, %02x]\n",
 				   vf->relative_vf_id, req->rx_qid,
@@ -2669,12 +2762,9 @@ static void ecore_iov_vf_mbx_update_rxqs(struct ecore_hwfn *p_hwfn,
 	}
 
 	for (i = 0; i < req->num_rxqs; i++) {
-		struct ecore_vf_queue *p_queue;
 		u16 qid = req->rx_qid + i;
 
-		p_queue = &vf->vf_queues[qid];
-		handlers[i] = ecore_iov_get_vf_rx_queue_cid(p_hwfn, vf,
-							    p_queue);
+		handlers[i] = vf->vf_queues[qid].cids[qid_usage_idx].p_cid;
 	}
 
 	rc = ecore_sp_eth_rx_queues_update(p_hwfn, (void **)&handlers,
@@ -2683,7 +2773,7 @@ static void ecore_iov_vf_mbx_update_rxqs(struct ecore_hwfn *p_hwfn,
 					   complete_event_flg,
 					   ECORE_SPQ_MODE_EBLOCK,
 					   OSAL_NULL);
-	if (rc)
+	if (rc != ECORE_SUCCESS)
 		goto out;
 
 	status = PFVF_STATUS_SUCCESS;
diff --git a/drivers/net/qede/base/ecore_sriov.h b/drivers/net/qede/base/ecore_sriov.h
index 5eb3484..1750f0d 100644
--- a/drivers/net/qede/base/ecore_sriov.h
+++ b/drivers/net/qede/base/ecore_sriov.h
@@ -63,6 +63,10 @@ struct ecore_iov_vf_mbx {
 					 */
 };
 
+#define ECORE_IOV_LEGACY_QID_RX (0)
+#define ECORE_IOV_LEGACY_QID_TX (1)
+#define ECORE_IOV_QID_INVALID (0xFE)
+
 struct ecore_vf_queue_cid {
 	bool b_is_tx;
 	struct ecore_queue_cid *p_cid;
diff --git a/drivers/net/qede/base/ecore_vf.c b/drivers/net/qede/base/ecore_vf.c
index 7a52621..e4e2517 100644
--- a/drivers/net/qede/base/ecore_vf.c
+++ b/drivers/net/qede/base/ecore_vf.c
@@ -135,22 +135,36 @@ static void ecore_vf_pf_req_end(struct ecore_hwfn *p_hwfn,
 	return rc;
 }
 
+static void ecore_vf_pf_add_qid(struct ecore_hwfn *p_hwfn,
+				struct ecore_queue_cid *p_cid)
+{
+	struct ecore_vf_iov *p_iov = p_hwfn->vf_iov_info;
+	struct vfpf_qid_tlv *p_qid_tlv;
+
+	/* Only add QIDs for the queue if it was negotiated with PF */
+	if (!(p_iov->acquire_resp.pfdev_info.capabilities &
+	      PFVF_ACQUIRE_CAP_QUEUE_QIDS))
+		return;
+
+	p_qid_tlv = ecore_add_tlv(p_hwfn, &p_iov->offset,
+				  CHANNEL_TLV_QID, sizeof(*p_qid_tlv));
+	p_qid_tlv->qid = p_cid->qid_usage_idx;
+}
+
 #define VF_ACQUIRE_THRESH 3
 static void ecore_vf_pf_acquire_reduce_resc(struct ecore_hwfn *p_hwfn,
 					    struct vf_pf_resc_request *p_req,
 					    struct pf_vf_resc *p_resp)
 {
 	DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
-		   "PF unwilling to fullill resource request: rxq [%02x/%02x]"
-		   " txq [%02x/%02x] sbs [%02x/%02x] mac [%02x/%02x]"
-		   " vlan [%02x/%02x] mc [%02x/%02x]."
-		   " Try PF recommended amount\n",
+		   "PF unwilling to fullill resource request: rxq [%02x/%02x] txq [%02x/%02x] sbs [%02x/%02x] mac [%02x/%02x] vlan [%02x/%02x] mc [%02x/%02x] cids [%02x/%02x]. Try PF recommended amount\n",
 		   p_req->num_rxqs, p_resp->num_rxqs,
 		   p_req->num_rxqs, p_resp->num_txqs,
 		   p_req->num_sbs, p_resp->num_sbs,
 		   p_req->num_mac_filters, p_resp->num_mac_filters,
 		   p_req->num_vlan_filters, p_resp->num_vlan_filters,
-		   p_req->num_mc_filters, p_resp->num_mc_filters);
+		   p_req->num_mc_filters, p_resp->num_mc_filters,
+		   p_req->num_cids, p_resp->num_cids);
 
 	/* humble our request */
 	p_req->num_txqs = p_resp->num_txqs;
@@ -159,6 +173,7 @@ static void ecore_vf_pf_acquire_reduce_resc(struct ecore_hwfn *p_hwfn,
 	p_req->num_mac_filters = p_resp->num_mac_filters;
 	p_req->num_vlan_filters = p_resp->num_vlan_filters;
 	p_req->num_mc_filters = p_resp->num_mc_filters;
+	p_req->num_cids = p_resp->num_cids;
 }
 
 static enum _ecore_status_t ecore_vf_pf_acquire(struct ecore_hwfn *p_hwfn)
@@ -185,6 +200,7 @@ static enum _ecore_status_t ecore_vf_pf_acquire(struct ecore_hwfn *p_hwfn)
 	p_resc->num_sbs = ECORE_MAX_VF_CHAINS_PER_PF;
 	p_resc->num_mac_filters = ECORE_ETH_VF_NUM_MAC_FILTERS;
 	p_resc->num_vlan_filters = ECORE_ETH_VF_NUM_VLAN_FILTERS;
+	p_resc->num_cids = ECORE_ETH_VF_DEFAULT_NUM_CIDS;
 
 	OSAL_MEMSET(&vf_sw_info, 0, sizeof(vf_sw_info));
 	OSAL_VF_FILL_ACQUIRE_RESC_REQ(p_hwfn, &req->resc_request, &vf_sw_info);
@@ -310,6 +326,15 @@ static enum _ecore_status_t ecore_vf_pf_acquire(struct ecore_hwfn *p_hwfn)
 	    VFPF_ACQUIRE_CAP_PRE_FP_HSI)
 		p_iov->b_pre_fp_hsi = true;
 
+	/* In case PF doesn't support multi-queue Tx, update the number of
+	 * CIDs to reflect the number of queues [older PFs didn't fill that
+	 * field].
+	 */
+	if (!(resp->pfdev_info.capabilities &
+	      PFVF_ACQUIRE_CAP_QUEUE_QIDS))
+		resp->resc.num_cids = resp->resc.num_rxqs +
+				      resp->resc.num_txqs;
+
 	rc = OSAL_VF_UPDATE_ACQUIRE_RESC_RESP(p_hwfn, &resp->resc);
 	if (rc) {
 		DP_NOTICE(p_hwfn, true,
@@ -649,6 +674,8 @@ enum _ecore_status_t
 				  (u32 *)(&init_prod_val));
 	}
 
+	ecore_vf_pf_add_qid(p_hwfn, p_cid);
+
 	/* add list termination tlv */
 	ecore_add_tlv(p_hwfn, &p_iov->offset,
 		      CHANNEL_TLV_LIST_END,
@@ -704,6 +731,8 @@ enum _ecore_status_t ecore_vf_pf_rxq_stop(struct ecore_hwfn *p_hwfn,
 	req->num_rxqs = 1;
 	req->cqe_completion = cqe_completion;
 
+	ecore_vf_pf_add_qid(p_hwfn, p_cid);
+
 	/* add list termination tlv */
 	ecore_add_tlv(p_hwfn, &p_iov->offset,
 		      CHANNEL_TLV_LIST_END,
@@ -748,6 +777,8 @@ enum _ecore_status_t
 	req->hw_sb = p_cid->rel.sb;
 	req->sb_index = p_cid->rel.sb_idx;
 
+	ecore_vf_pf_add_qid(p_hwfn, p_cid);
+
 	/* add list termination tlv */
 	ecore_add_tlv(p_hwfn, &p_iov->offset,
 		      CHANNEL_TLV_LIST_END,
@@ -799,6 +830,8 @@ enum _ecore_status_t ecore_vf_pf_txq_stop(struct ecore_hwfn *p_hwfn,
 	req->tx_qid = p_cid->rel.queue_id;
 	req->num_txqs = 1;
 
+	ecore_vf_pf_add_qid(p_hwfn, p_cid);
+
 	/* add list termination tlv */
 	ecore_add_tlv(p_hwfn, &p_iov->offset,
 		      CHANNEL_TLV_LIST_END,
@@ -831,32 +864,30 @@ enum _ecore_status_t ecore_vf_pf_rxqs_update(struct ecore_hwfn *p_hwfn,
 	struct vfpf_update_rxq_tlv *req;
 	enum _ecore_status_t rc;
 
-	/* TODO - API is limited to assuming continuous regions of queues,
-	 * but VF queues might not fullfil this requirement.
-	 * Need to consider whether we need new TLVs for this, or whether
-	 * simply doing it iteratively is good enough.
+	/* Starting with CHANNEL_TLV_QID and the need for additional queue
+	 * information, this API stopped supporting multiple rxqs.
+	 * TODO - remove this and change the API to accept a single queue-cid
+	 * in a follow-up patch.
 	 */
-	if (!num_rxqs)
+	if (num_rxqs != 1) {
+		DP_NOTICE(p_hwfn, true,
+			  "VFs can no longer update more than a single queue\n");
 		return ECORE_INVAL;
+	}
 
-again:
 	/* clear mailbox and prep first tlv */
 	req = ecore_vf_pf_prep(p_hwfn, CHANNEL_TLV_UPDATE_RXQ, sizeof(*req));
 
-	/* Find the length of the current contagious range of queues beginning
-	 * at first queue's index.
-	 */
 	req->rx_qid = (*pp_cid)->rel.queue_id;
-	for (req->num_rxqs = 1; req->num_rxqs < num_rxqs; req->num_rxqs++)
-		if (pp_cid[req->num_rxqs]->rel.queue_id !=
-		    req->rx_qid + req->num_rxqs)
-			break;
+	req->num_rxqs = 1;
 
 	if (comp_cqe_flg)
 		req->flags |= VFPF_RXQ_UPD_COMPLETE_CQE_FLAG;
 	if (comp_event_flg)
 		req->flags |= VFPF_RXQ_UPD_COMPLETE_EVENT_FLAG;
 
+	ecore_vf_pf_add_qid(p_hwfn, *pp_cid);
+
 	/* add list termination tlv */
 	ecore_add_tlv(p_hwfn, &p_iov->offset,
 		      CHANNEL_TLV_LIST_END,
@@ -871,15 +902,6 @@ enum _ecore_status_t ecore_vf_pf_rxqs_update(struct ecore_hwfn *p_hwfn,
 		goto exit;
 	}
 
-	/* Make sure we're done with all the queues */
-	if (req->num_rxqs < num_rxqs) {
-		num_rxqs -= req->num_rxqs;
-		pp_cid += req->num_rxqs;
-		/* TODO - should we give a non-locked variant instead? */
-		ecore_vf_pf_req_end(p_hwfn, rc);
-		goto again;
-	}
-
 exit:
 	ecore_vf_pf_req_end(p_hwfn, rc);
 	return rc;
diff --git a/drivers/net/qede/base/ecore_vf.h b/drivers/net/qede/base/ecore_vf.h
index f471388..4096d5d 100644
--- a/drivers/net/qede/base/ecore_vf.h
+++ b/drivers/net/qede/base/ecore_vf.h
@@ -14,6 +14,11 @@
 #include "ecore_l2_api.h"
 #include "ecore_vfpf_if.h"
 
+/* Default number of CIDs [total of both Rx and Tx] to be requested
+ * by default.
+ */
+#define ECORE_ETH_VF_DEFAULT_NUM_CIDS	(32)
+
 /* This data is held in the ecore_hwfn structure for VFs only. */
 struct ecore_vf_iov {
 	union vfpf_tlvs			*vf2pf_request;
diff --git a/drivers/net/qede/base/ecore_vfpf_if.h b/drivers/net/qede/base/ecore_vfpf_if.h
index 6618442..4df5619 100644
--- a/drivers/net/qede/base/ecore_vfpf_if.h
+++ b/drivers/net/qede/base/ecore_vfpf_if.h
@@ -19,13 +19,14 @@
  *
  **/
 struct vf_pf_resc_request {
-	u8  num_rxqs;
-	u8  num_txqs;
-	u8  num_sbs;
-	u8  num_mac_filters;
-	u8  num_vlan_filters;
-	u8  num_mc_filters; /* No limit  so superfluous */
-	u16 padding;
+	u8 num_rxqs;
+	u8 num_txqs;
+	u8 num_sbs;
+	u8 num_mac_filters;
+	u8 num_vlan_filters;
+	u8 num_mc_filters; /* No limit  so superfluous */
+	u8 num_cids;
+	u8 padding;
 };
 
 struct hw_sb_info {
@@ -92,6 +93,14 @@ struct vfpf_acquire_tlv {
 /* VF pre-FP hsi version */
 #define VFPF_ACQUIRE_CAP_PRE_FP_HSI	(1 << 0)
 #define VFPF_ACQUIRE_CAP_100G		(1 << 1) /* VF can support 100g */
+
+	/* A requirement for supporting multi-Tx queues on a single queue-zone,
+	 * VF would pass qids as additional information whenever passing queue
+	 * references.
+	 * TODO - due to the CID limitations in Bar0, VFs currently don't pass
+	 * this, and use the legacy CID scheme.
+	 */
+#define VFPF_ACQUIRE_CAP_QUEUE_QIDS	(1 << 2)
 		u64 capabilities;
 		u8 fw_major;
 		u8 fw_minor;
@@ -170,6 +179,9 @@ struct pfvf_acquire_resp_tlv {
 #endif
 #define PFVF_ACQUIRE_CAP_POST_FW_OVERRIDE	(1 << 2)
 
+	/* PF expects queues to be received with additional qids */
+#define PFVF_ACQUIRE_CAP_QUEUE_QIDS		(1 << 3)
+
 		u16 db_size;
 		u8  indices_per_sb;
 		u8 os_type;
@@ -210,7 +222,8 @@ struct pfvf_acquire_resp_tlv {
 		u8      num_mac_filters;
 		u8      num_vlan_filters;
 		u8      num_mc_filters;
-		u8      padding[2];
+		u8	num_cids;
+		u8      padding;
 	} resc;
 
 	u32 bulletin_size;
@@ -223,6 +236,16 @@ struct pfvf_start_queue_resp_tlv {
 	u8 padding[4];
 };
 
+/* Extended queue information - additional index for reference inside qzone.
+ * If commmunicated between VF/PF, each TLV relating to queues should be
+ * extended by one such [or have a future base TLV that already contains info].
+ */
+struct vfpf_qid_tlv {
+	struct channel_tlv	tl;
+	u8			qid;
+	u8			padding[3];
+};
+
 /* Setup Queue */
 struct vfpf_start_rxq_tlv {
 	struct vfpf_first_tlv	first_tlv;
@@ -265,7 +288,15 @@ struct vfpf_stop_rxqs_tlv {
 	struct vfpf_first_tlv	first_tlv;
 
 	u16			rx_qid;
+
+	/* While the API supports multiple Rx-queues on a single TLV
+	 * message, in practice older VFs always used it as one [ecore].
+	 * And there are PFs [starting with the CHANNEL_TLV_QID] which
+	 * would start assuming this is always a '1'. So in practice this
+	 * field should be considered deprecated and *Always* set to '1'.
+	 */
 	u8			num_rxqs;
+
 	u8			cqe_completion;
 	u8			padding[4];
 };
@@ -275,6 +306,13 @@ struct vfpf_stop_txqs_tlv {
 	struct vfpf_first_tlv	first_tlv;
 
 	u16			tx_qid;
+
+	/* While the API supports multiple Tx-queues on a single TLV
+	 * message, in practice older VFs always used it as one [ecore].
+	 * And there are PFs [starting with the CHANNEL_TLV_QID] which
+	 * would start assuming this is always a '1'. So in practice this
+	 * field should be considered deprecated and *Always* set to '1'.
+	 */
 	u8			num_txqs;
 	u8			padding[5];
 };
@@ -605,6 +643,7 @@ enum {
 	CHANNEL_TLV_VPORT_UPDATE_SGE_TPA,
 	CHANNEL_TLV_UPDATE_TUNN_PARAM,
 	CHANNEL_TLV_COALESCE_UPDATE,
+	CHANNEL_TLV_QID,
 	CHANNEL_TLV_MAX,
 
 	/* Required for iterating over vport-update tlvs.
-- 
1.7.10.3



More information about the dev mailing list