[dpdk-dev] [PATCH v2 20/61] net/qede/base: qm initialization revamp

Rasesh Mody rasesh.mody at cavium.com
Sat Mar 18 08:05:45 CET 2017


This patch revamps queue initialization.

Signed-off-by: Rasesh Mody <rasesh.mody at cavium.com>
---
 drivers/net/qede/base/bcm_osal.h    |    2 +
 drivers/net/qede/base/ecore.h       |   34 +-
 drivers/net/qede/base/ecore_cxt.c   |   14 +-
 drivers/net/qede/base/ecore_dev.c   |  869 ++++++++++++++++++++++++-----------
 drivers/net/qede/base/ecore_hw.c    |   38 --
 drivers/net/qede/base/ecore_l2.c    |   12 +-
 drivers/net/qede/base/ecore_l2.h    |    2 +-
 drivers/net/qede/base/ecore_spq.c   |    9 +-
 drivers/net/qede/base/ecore_sriov.c |   13 +-
 9 files changed, 645 insertions(+), 348 deletions(-)

diff --git a/drivers/net/qede/base/bcm_osal.h b/drivers/net/qede/base/bcm_osal.h
index 0d239c9..63ee6d5 100644
--- a/drivers/net/qede/base/bcm_osal.h
+++ b/drivers/net/qede/base/bcm_osal.h
@@ -320,6 +320,8 @@ u32 qede_find_first_zero_bit(unsigned long *, u32);
 #define OSAL_BUILD_BUG_ON(cond)		nothing
 #define ETH_ALEN			ETHER_ADDR_LEN
 
+#define OSAL_BITMAP_WEIGHT(bitmap, count) 0
+
 #define OSAL_LINK_UPDATE(hwfn) qed_link_update(hwfn)
 #define OSAL_DCBX_AEN(hwfn, mib_type) nothing
 
diff --git a/drivers/net/qede/base/ecore.h b/drivers/net/qede/base/ecore.h
index 842a3b5..58c97a3 100644
--- a/drivers/net/qede/base/ecore.h
+++ b/drivers/net/qede/base/ecore.h
@@ -445,11 +445,13 @@ struct ecore_qm_info {
 	struct init_qm_port_params  *qm_port_params;
 	u16			start_pq;
 	u8			start_vport;
-	u8			pure_lb_pq;
-	u8			offload_pq;
-	u8			pure_ack_pq;
-	u8			ooo_pq;
-	u8			vf_queues_offset;
+	u16			pure_lb_pq;
+	u16			offload_pq;
+	u16			pure_ack_pq;
+	u16			ooo_pq;
+	u16			first_vf_pq;
+	u16			first_mcos_pq;
+	u16			first_rl_pq;
 	u16			num_pqs;
 	u16			num_vf_pqs;
 	u8			num_vports;
@@ -828,6 +830,28 @@ int ecore_device_num_ports(struct ecore_dev *p_dev);
 void ecore_set_fw_mac_addr(__le16 *fw_msb, __le16 *fw_mid, __le16 *fw_lsb,
 			   u8 *mac);
 
+/* Flags for indication of required queues */
+#define PQ_FLAGS_RLS	(1 << 0)
+#define PQ_FLAGS_MCOS	(1 << 1)
+#define PQ_FLAGS_LB	(1 << 2)
+#define PQ_FLAGS_OOO	(1 << 3)
+#define PQ_FLAGS_ACK    (1 << 4)
+#define PQ_FLAGS_OFLD	(1 << 5)
+#define PQ_FLAGS_VFS	(1 << 6)
+
+/* physical queue index for cm context intialization */
+u16 ecore_get_cm_pq_idx(struct ecore_hwfn *p_hwfn, u32 pq_flags);
+u16 ecore_get_cm_pq_idx_mcos(struct ecore_hwfn *p_hwfn, u8 tc);
+u16 ecore_get_cm_pq_idx_vf(struct ecore_hwfn *p_hwfn, u16 vf);
+u16 ecore_get_cm_pq_idx_rl(struct ecore_hwfn *p_hwfn, u8 qpid);
+
+/* amount of resources used in qm init */
+u8 ecore_init_qm_get_num_tcs(struct ecore_hwfn *p_hwfn);
+u16 ecore_init_qm_get_num_vfs(struct ecore_hwfn *p_hwfn);
+u16 ecore_init_qm_get_num_pf_rls(struct ecore_hwfn *p_hwfn);
+u16 ecore_init_qm_get_num_vports(struct ecore_hwfn *p_hwfn);
+u16 ecore_init_qm_get_num_pqs(struct ecore_hwfn *p_hwfn);
+
 #define ECORE_LEADING_HWFN(dev)	(&dev->hwfns[0])
 
 #endif /* __ECORE_H */
diff --git a/drivers/net/qede/base/ecore_cxt.c b/drivers/net/qede/base/ecore_cxt.c
index 2635030..aeeabf1 100644
--- a/drivers/net/qede/base/ecore_cxt.c
+++ b/drivers/net/qede/base/ecore_cxt.c
@@ -1372,18 +1372,10 @@ void ecore_qm_init_pf(struct ecore_hwfn *p_hwfn)
 }
 
 /* CM PF */
-static enum _ecore_status_t ecore_cm_init_pf(struct ecore_hwfn *p_hwfn)
+void ecore_cm_init_pf(struct ecore_hwfn *p_hwfn)
 {
-	union ecore_qm_pq_params pq_params;
-	u16 pq;
-
-	/* XCM pure-LB queue */
-	OSAL_MEMSET(&pq_params, 0, sizeof(pq_params));
-	pq_params.core.tc = LB_TC;
-	pq = ecore_get_qm_pq(p_hwfn, PROTOCOLID_CORE, &pq_params);
-	STORE_RT_REG(p_hwfn, XCM_REG_CON_PHY_Q3_RT_OFFSET, pq);
-
-	return ECORE_SUCCESS;
+	STORE_RT_REG(p_hwfn, XCM_REG_CON_PHY_Q3_RT_OFFSET,
+		     ecore_get_cm_pq_idx(p_hwfn, PQ_FLAGS_LB));
 }
 
 /* DQ PF */
diff --git a/drivers/net/qede/base/ecore_dev.c b/drivers/net/qede/base/ecore_dev.c
index e2d4132..380c5ba 100644
--- a/drivers/net/qede/base/ecore_dev.c
+++ b/drivers/net/qede/base/ecore_dev.c
@@ -178,282 +178,575 @@ void ecore_resc_free(struct ecore_dev *p_dev)
 	}
 }
 
-static enum _ecore_status_t ecore_init_qm_info(struct ecore_hwfn *p_hwfn,
-					       bool b_sleepable)
+/******************** QM initialization *******************/
+
+/* bitmaps for indicating active traffic classes.
+ * Special case for Arrowhead 4 port
+ */
+/* 0..3 actualy used, 4 serves OOO, 7 serves high priority stuff (e.g. DCQCN) */
+#define ACTIVE_TCS_BMAP 0x9f
+/* 0..3 actually used, OOO and high priority stuff all use 3 */
+#define ACTIVE_TCS_BMAP_4PORT_K2 0xf
+
+/* determines the physical queue flags for a given PF. */
+static u32 ecore_get_pq_flags(struct ecore_hwfn *p_hwfn)
 {
-	u8 num_vports, vf_offset = 0, i, vport_id, num_ports, curr_queue;
-	struct ecore_qm_info *qm_info = &p_hwfn->qm_info;
-	struct init_qm_port_params *p_qm_port;
-	bool init_rdma_offload_pq = false;
-	bool init_pure_ack_pq = false;
-	bool init_ooo_pq = false;
-	u16 num_pqs, protocol_pqs;
-	u16 num_pf_rls = 0;
-	u16 num_vfs = 0;
-	u32 pf_rl;
-	u8 pf_wfq;
-
-	/* @TMP - saving the existing min/max bw config before resetting the
-	 * qm_info to restore them.
-	 */
-	pf_rl = qm_info->pf_rl;
-	pf_wfq = qm_info->pf_wfq;
+	u32 flags;
 
-#ifdef CONFIG_ECORE_SRIOV
-	if (p_hwfn->p_dev->p_iov_info)
-		num_vfs = p_hwfn->p_dev->p_iov_info->total_vfs;
-#endif
-	OSAL_MEM_ZERO(qm_info, sizeof(*qm_info));
+	/* common flags */
+	flags = PQ_FLAGS_LB;
 
-#ifndef ASIC_ONLY
-	/* @TMP - Don't allocate QM queues for VFs on emulation */
-	if (CHIP_REV_IS_EMUL(p_hwfn->p_dev)) {
-		DP_NOTICE(p_hwfn, false,
-			  "Emulation - skip configuring QM queues for VFs\n");
-		num_vfs = 0;
+	/* feature flags */
+	if (IS_ECORE_SRIOV(p_hwfn->p_dev))
+		flags |= PQ_FLAGS_VFS;
+
+	/* protocol flags */
+	switch (p_hwfn->hw_info.personality) {
+	case ECORE_PCI_ETH:
+		flags |= PQ_FLAGS_MCOS;
+		break;
+	case ECORE_PCI_FCOE:
+		flags |= PQ_FLAGS_OFLD;
+		break;
+	case ECORE_PCI_ISCSI:
+		flags |= PQ_FLAGS_ACK | PQ_FLAGS_OOO | PQ_FLAGS_OFLD;
+		break;
+	case ECORE_PCI_ETH_ROCE:
+		flags |= PQ_FLAGS_MCOS | PQ_FLAGS_OFLD;
+		break;
+	case ECORE_PCI_ETH_IWARP:
+		flags |= PQ_FLAGS_MCOS | PQ_FLAGS_ACK | PQ_FLAGS_OOO |
+			 PQ_FLAGS_OFLD;
+		break;
+	default:
+		DP_ERR(p_hwfn, "unknown personality %d\n",
+		       p_hwfn->hw_info.personality);
+		return 0;
 	}
-#endif
+	return flags;
+}
 
-	/* ethernet PFs require a pq per tc. Even if only a subset of the TCs
-	 * active, we want physical queues allocated for all of them, since we
-	 * don't have a good recycle flow. Non ethernet PFs require only a
-	 * single physical queue.
-	 */
-	if (ECORE_IS_L2_PERSONALITY(p_hwfn))
-		protocol_pqs = p_hwfn->hw_info.num_hw_tc;
-	else
-		protocol_pqs = 1;
-
-	num_pqs = protocol_pqs + num_vfs + 1;	/* The '1' is for pure-LB */
-	num_vports = (u8)RESC_NUM(p_hwfn, ECORE_VPORT);
-
-	if (ECORE_IS_ROCE_PERSONALITY(p_hwfn)) {
-		num_pqs++;	/* for RoCE queue */
-		init_rdma_offload_pq = true;
-		if (p_hwfn->pf_params.rdma_pf_params.enable_dcqcn) {
-			/* Due to FW assumption that rl==vport, we limit the
-			 * number of rate limiters by the minimum between its
-			 * allocated number and the allocated number of vports.
-			 * Another limitation is the number of supported qps
-			 * with rate limiters in FW.
-			 */
-			num_pf_rls =
-			    (u16)OSAL_MIN_T(u32, RESC_NUM(p_hwfn, ECORE_RL),
-					     RESC_NUM(p_hwfn, ECORE_VPORT));
+/* Getters for resource amounts necessary for qm initialization */
+u8 ecore_init_qm_get_num_tcs(struct ecore_hwfn *p_hwfn)
+{
+	return p_hwfn->hw_info.num_hw_tc;
+}
 
-			/* we subtract num_vfs because each one requires a rate
-			 * limiter, and one default rate limiter.
-			 */
-			if (num_pf_rls < num_vfs + 1) {
-				DP_ERR(p_hwfn, "No RL for DCQCN");
-				DP_ERR(p_hwfn, "[num_pf_rls %d num_vfs %d]\n",
-				       num_pf_rls, num_vfs);
-				return ECORE_INVAL;
-			}
-			num_pf_rls -= num_vfs + 1;
-		}
+u16 ecore_init_qm_get_num_vfs(struct ecore_hwfn *p_hwfn)
+{
+	return IS_ECORE_SRIOV(p_hwfn->p_dev) ?
+			p_hwfn->p_dev->p_iov_info->total_vfs : 0;
+}
 
-		num_pqs += num_pf_rls;
-		qm_info->num_pf_rls = (u8)num_pf_rls;
-	}
+#define NUM_DEFAULT_RLS 1
 
-	if (ECORE_IS_IWARP_PERSONALITY(p_hwfn)) {
-		num_pqs += 3;	/* for iwarp queue / pure-ack / ooo */
-		init_rdma_offload_pq = true;
-		init_pure_ack_pq = true;
-		init_ooo_pq = true;
-	}
+u16 ecore_init_qm_get_num_pf_rls(struct ecore_hwfn *p_hwfn)
+{
+	u16 num_pf_rls, num_vfs = ecore_init_qm_get_num_vfs(p_hwfn);
 
-	if (p_hwfn->hw_info.personality == ECORE_PCI_ISCSI) {
-		num_pqs += 2;	/* for iSCSI pure-ACK / OOO queue */
-		init_pure_ack_pq = true;
-		init_ooo_pq = true;
-	}
+	/* @DPDK */
+	/* num RLs can't exceed resource amount of rls or vports or the
+	 * dcqcn qps
+	 */
+	num_pf_rls = (u16)OSAL_MIN_T(u32, RESC_NUM(p_hwfn, ECORE_RL),
+				     (u16)RESC_NUM(p_hwfn, ECORE_VPORT));
 
-	/* Sanity checking that setup requires legal number of resources */
-	if (num_pqs > RESC_NUM(p_hwfn, ECORE_PQ)) {
-		DP_ERR(p_hwfn,
-		       "Need too many Physical queues - 0x%04x avail %04x",
-		       num_pqs, RESC_NUM(p_hwfn, ECORE_PQ));
-		return ECORE_INVAL;
+	/* make sure after we reserve the default and VF rls we'll have
+	 * something left
+	 */
+	if (num_pf_rls < num_vfs + NUM_DEFAULT_RLS) {
+		DP_NOTICE(p_hwfn, false,
+			  "no rate limiters left for PF rate limiting"
+			  " [num_pf_rls %d num_vfs %d]\n", num_pf_rls, num_vfs);
+		return 0;
 	}
 
-	/* PQs will be arranged as follows: First per-TC PQ, then pure-LB queue,
-	 * then special queues (iSCSI pure-ACK / RoCE), then per-VF PQ.
+	/* subtract rls necessary for VFs and one default one for the PF */
+	num_pf_rls -= num_vfs + NUM_DEFAULT_RLS;
+
+	return num_pf_rls;
+}
+
+u16 ecore_init_qm_get_num_vports(struct ecore_hwfn *p_hwfn)
+{
+	u32 pq_flags = ecore_get_pq_flags(p_hwfn);
+
+	/* all pqs share the same vport (hence the 1 below), except for vfs
+	 * and pf_rl pqs
 	 */
-	qm_info->qm_pq_params = OSAL_ZALLOC(p_hwfn->p_dev,
-					    b_sleepable ? GFP_KERNEL :
-					    GFP_ATOMIC,
-					    sizeof(struct init_qm_pq_params) *
-					    num_pqs);
-	if (!qm_info->qm_pq_params)
-		goto alloc_err;
+	return (!!(PQ_FLAGS_RLS & pq_flags)) *
+		ecore_init_qm_get_num_pf_rls(p_hwfn) +
+	       (!!(PQ_FLAGS_VFS & pq_flags)) *
+		ecore_init_qm_get_num_vfs(p_hwfn) + 1;
+}
 
-	qm_info->qm_vport_params = OSAL_ZALLOC(p_hwfn->p_dev,
-					       b_sleepable ? GFP_KERNEL :
-					       GFP_ATOMIC,
-					       sizeof(struct
-						      init_qm_vport_params) *
-					       num_vports);
-	if (!qm_info->qm_vport_params)
-		goto alloc_err;
+/* calc amount of PQs according to the requested flags */
+u16 ecore_init_qm_get_num_pqs(struct ecore_hwfn *p_hwfn)
+{
+	u32 pq_flags = ecore_get_pq_flags(p_hwfn);
+
+	return (!!(PQ_FLAGS_RLS & pq_flags)) *
+		ecore_init_qm_get_num_pf_rls(p_hwfn) +
+	       (!!(PQ_FLAGS_MCOS & pq_flags)) *
+		ecore_init_qm_get_num_tcs(p_hwfn) +
+	       (!!(PQ_FLAGS_LB & pq_flags)) +
+	       (!!(PQ_FLAGS_OOO & pq_flags)) +
+	       (!!(PQ_FLAGS_ACK & pq_flags)) +
+	       (!!(PQ_FLAGS_OFLD & pq_flags)) +
+	       (!!(PQ_FLAGS_VFS & pq_flags)) *
+		ecore_init_qm_get_num_vfs(p_hwfn);
+}
 
-	qm_info->qm_port_params = OSAL_ZALLOC(p_hwfn->p_dev,
-					      b_sleepable ? GFP_KERNEL :
-					      GFP_ATOMIC,
-					      sizeof(struct init_qm_port_params)
-					      * MAX_NUM_PORTS);
-	if (!qm_info->qm_port_params)
-		goto alloc_err;
+/* initialize the top level QM params */
+static void ecore_init_qm_params(struct ecore_hwfn *p_hwfn)
+{
+	struct ecore_qm_info *qm_info = &p_hwfn->qm_info;
 
-	qm_info->wfq_data = OSAL_ZALLOC(p_hwfn->p_dev,
-					b_sleepable ? GFP_KERNEL :
-					GFP_ATOMIC,
-					sizeof(struct ecore_wfq_data) *
-					num_vports);
+	/* pq and vport bases for this PF */
+	qm_info->start_pq = (u16)RESC_START(p_hwfn, ECORE_PQ);
+	qm_info->start_vport = (u8)RESC_START(p_hwfn, ECORE_VPORT);
 
-	if (!qm_info->wfq_data)
-		goto alloc_err;
+	/* rate limiting and weighted fair queueing are always enabled */
+	qm_info->vport_rl_en = 1;
+	qm_info->vport_wfq_en = 1;
 
-	vport_id = (u8)RESC_START(p_hwfn, ECORE_VPORT);
+	/* in AH 4 port we have fewer TCs per port */
+	qm_info->max_phys_tcs_per_port =
+		p_hwfn->p_dev->num_ports_in_engines == MAX_NUM_PORTS_K2 ?
+			NUM_PHYS_TCS_4PORT_K2 : NUM_OF_PHYS_TCS;
+}
 
-	/* First init rate limited queues ( Due to RoCE assumption of
-	 * qpid=rlid )
-	 */
-	for (curr_queue = 0; curr_queue < num_pf_rls; curr_queue++) {
-		qm_info->qm_pq_params[curr_queue].vport_id = vport_id++;
-		qm_info->qm_pq_params[curr_queue].tc_id =
-		    p_hwfn->hw_info.offload_tc;
-		qm_info->qm_pq_params[curr_queue].wrr_group = 1;
-		qm_info->qm_pq_params[curr_queue].rl_valid = 1;
-	};
-
-	/* Protocol PQs */
-	for (i = 0; i < protocol_pqs; i++) {
-		struct init_qm_pq_params *params =
-		    &qm_info->qm_pq_params[curr_queue++];
-
-		if (ECORE_IS_L2_PERSONALITY(p_hwfn)) {
-			params->vport_id = vport_id;
-			params->tc_id = i;
-			/* Note: this assumes that if we had a configuration
-			 * with N tcs and subsequently another configuration
-			 * With Fewer TCs, the in flight traffic (in QM queues,
-			 * in FW, from driver to FW) will still trickle out and
-			 * not get "stuck" in the QM. This is determined by the
-			 * NIG_REG_TX_ARB_CLIENT_IS_SUBJECT2WFQ. Unused TCs are
-			 * supposed to be cleared in this map, allowing traffic
-			 * to flush out. If this is not the case, we would need
-			 * to set the TC of unused queues to 0, and reconfigure
-			 * QM every time num of TCs changes. Unused queues in
-			 * this context would mean those intended for TCs where
-			 * tc_id > hw_info.num_active_tcs.
-			 */
-			params->wrr_group = 1;	/* @@@TBD ECORE_WRR_MEDIUM */
-		} else {
-			params->vport_id = vport_id;
-			params->tc_id = p_hwfn->hw_info.offload_tc;
-			params->wrr_group = 1;	/* @@@TBD ECORE_WRR_MEDIUM */
-		}
-	}
+/* initialize qm vport params */
+static void ecore_init_qm_vport_params(struct ecore_hwfn *p_hwfn)
+{
+	struct ecore_qm_info *qm_info = &p_hwfn->qm_info;
+	u8 i;
 
-	/* Then init pure-LB PQ */
-	qm_info->pure_lb_pq = curr_queue;
-	qm_info->qm_pq_params[curr_queue].vport_id =
-	    (u8)RESC_START(p_hwfn, ECORE_VPORT);
-	qm_info->qm_pq_params[curr_queue].tc_id = PURE_LB_TC;
-	qm_info->qm_pq_params[curr_queue].wrr_group = 1;
-	curr_queue++;
-
-	qm_info->offload_pq = 0;	/* Already initialized for iSCSI/FCoE */
-	if (init_rdma_offload_pq) {
-		qm_info->offload_pq = curr_queue;
-		qm_info->qm_pq_params[curr_queue].vport_id = vport_id;
-		qm_info->qm_pq_params[curr_queue].tc_id =
-		    p_hwfn->hw_info.offload_tc;
-		qm_info->qm_pq_params[curr_queue].wrr_group = 1;
-		curr_queue++;
-	}
-
-	if (init_pure_ack_pq) {
-		qm_info->pure_ack_pq = curr_queue;
-		qm_info->qm_pq_params[curr_queue].vport_id = vport_id;
-		qm_info->qm_pq_params[curr_queue].tc_id =
-		    p_hwfn->hw_info.offload_tc;
-		qm_info->qm_pq_params[curr_queue].wrr_group = 1;
-		curr_queue++;
-	}
-
-	if (init_ooo_pq) {
-		qm_info->ooo_pq = curr_queue;
-		qm_info->qm_pq_params[curr_queue].vport_id = vport_id;
-		qm_info->qm_pq_params[curr_queue].tc_id = DCBX_ISCSI_OOO_TC;
-		qm_info->qm_pq_params[curr_queue].wrr_group = 1;
-		curr_queue++;
-	}
-
-	/* Then init per-VF PQs */
-	vf_offset = curr_queue;
-	for (i = 0; i < num_vfs; i++) {
-		/* First vport is used by the PF */
-		qm_info->qm_pq_params[curr_queue].vport_id = vport_id + i + 1;
-		/* @@@TBD VF Multi-cos */
-		qm_info->qm_pq_params[curr_queue].tc_id = 0;
-		qm_info->qm_pq_params[curr_queue].wrr_group = 1;
-		qm_info->qm_pq_params[curr_queue].rl_valid = 1;
-		curr_queue++;
-	};
-
-	qm_info->vf_queues_offset = vf_offset;
-	qm_info->num_pqs = num_pqs;
-	qm_info->num_vports = num_vports;
+	/* all vports participate in weighted fair queueing */
+	for (i = 0; i < ecore_init_qm_get_num_vports(p_hwfn); i++)
+		qm_info->qm_vport_params[i].vport_wfq = 1;
+}
 
+/* initialize qm port params */
+static void ecore_init_qm_port_params(struct ecore_hwfn *p_hwfn)
+{
 	/* Initialize qm port parameters */
-	num_ports = p_hwfn->p_dev->num_ports_in_engines;
+	u8 i, active_phys_tcs, num_ports = p_hwfn->p_dev->num_ports_in_engines;
+
+	/* indicate how ooo and high pri traffic is dealt with */
+	active_phys_tcs = num_ports == MAX_NUM_PORTS_K2 ?
+		ACTIVE_TCS_BMAP_4PORT_K2 : ACTIVE_TCS_BMAP;
+
 	for (i = 0; i < num_ports; i++) {
-		p_qm_port = &qm_info->qm_port_params[i];
+		struct init_qm_port_params *p_qm_port =
+			&p_hwfn->qm_info.qm_port_params[i];
+
 		p_qm_port->active = 1;
-		/* @@@TMP - was NUM_OF_PHYS_TCS; Changed until dcbx will
-		 * be in place
-		 */
-		if (num_ports == 4)
-			p_qm_port->active_phys_tcs = 0xf;
-		else
-			p_qm_port->active_phys_tcs = 0x9f;
+		p_qm_port->active_phys_tcs = active_phys_tcs;
 		p_qm_port->num_pbf_cmd_lines = PBF_MAX_CMD_LINES / num_ports;
 		p_qm_port->num_btb_blocks = BTB_MAX_BLOCKS / num_ports;
 	}
+}
 
-	if (ECORE_IS_AH(p_hwfn->p_dev) && (num_ports == 4))
-		qm_info->max_phys_tcs_per_port = NUM_PHYS_TCS_4PORT_K2;
-	else
-		qm_info->max_phys_tcs_per_port = NUM_OF_PHYS_TCS;
+/* Reset the params which must be reset for qm init. QM init may be called as
+ * a result of flows other than driver load (e.g. dcbx renegotiation). Other
+ * params may be affected by the init but would simply recalculate to the same
+ * values. The allocations made for QM init, ports, vports, pqs and vfqs are not
+ * affected as these amounts stay the same.
+ */
+static void ecore_init_qm_reset_params(struct ecore_hwfn *p_hwfn)
+{
+	struct ecore_qm_info *qm_info = &p_hwfn->qm_info;
 
-	qm_info->start_pq = (u16)RESC_START(p_hwfn, ECORE_PQ);
+	qm_info->num_pqs = 0;
+	qm_info->num_vports = 0;
+	qm_info->num_pf_rls = 0;
+	qm_info->num_vf_pqs = 0;
+	qm_info->first_vf_pq = 0;
+	qm_info->first_mcos_pq = 0;
+	qm_info->first_rl_pq = 0;
+}
+
+static void ecore_init_qm_advance_vport(struct ecore_hwfn *p_hwfn)
+{
+	struct ecore_qm_info *qm_info = &p_hwfn->qm_info;
+
+	qm_info->num_vports++;
+
+	if (qm_info->num_vports > ecore_init_qm_get_num_vports(p_hwfn))
+		DP_ERR(p_hwfn,
+		       "vport overflow! qm_info->num_vports %d,"
+		       " qm_init_get_num_vports() %d\n",
+		       qm_info->num_vports,
+		       ecore_init_qm_get_num_vports(p_hwfn));
+}
+
+/* initialize a single pq and manage qm_info resources accounting.
+ * The pq_init_flags param determines whether the PQ is rate limited
+ * (for VF or PF)
+ * and whether a new vport is allocated to the pq or not (i.e. vport will be
+ * shared)
+ */
+
+/* flags for pq init */
+#define PQ_INIT_SHARE_VPORT	(1 << 0)
+#define PQ_INIT_PF_RL		(1 << 1)
+#define PQ_INIT_VF_RL		(1 << 2)
+
+/* defines for pq init */
+#define PQ_INIT_DEFAULT_WRR_GROUP	1
+#define PQ_INIT_DEFAULT_TC		0
+#define PQ_INIT_OFLD_TC			(p_hwfn->hw_info.offload_tc)
+
+static void ecore_init_qm_pq(struct ecore_hwfn *p_hwfn,
+			     struct ecore_qm_info *qm_info,
+			     u8 tc, u32 pq_init_flags)
+{
+	u16 pq_idx = qm_info->num_pqs, max_pq =
+					ecore_init_qm_get_num_pqs(p_hwfn);
+
+	if (pq_idx > max_pq)
+		DP_ERR(p_hwfn,
+		       "pq overflow! pq %d, max pq %d\n", pq_idx, max_pq);
+
+	/* init pq params */
+	qm_info->qm_pq_params[pq_idx].vport_id = qm_info->start_vport +
+						 qm_info->num_vports;
+	qm_info->qm_pq_params[pq_idx].tc_id = tc;
+	qm_info->qm_pq_params[pq_idx].wrr_group = PQ_INIT_DEFAULT_WRR_GROUP;
+	qm_info->qm_pq_params[pq_idx].rl_valid =
+		(pq_init_flags & PQ_INIT_PF_RL ||
+		 pq_init_flags & PQ_INIT_VF_RL);
+
+	/* qm params accounting */
+	qm_info->num_pqs++;
+	if (!(pq_init_flags & PQ_INIT_SHARE_VPORT))
+		qm_info->num_vports++;
+
+	if (pq_init_flags & PQ_INIT_PF_RL)
+		qm_info->num_pf_rls++;
+
+	if (qm_info->num_vports > ecore_init_qm_get_num_vports(p_hwfn))
+		DP_ERR(p_hwfn,
+		       "vport overflow! qm_info->num_vports %d,"
+		       " qm_init_get_num_vports() %d\n",
+		       qm_info->num_vports,
+		       ecore_init_qm_get_num_vports(p_hwfn));
+
+	if (qm_info->num_pf_rls > ecore_init_qm_get_num_pf_rls(p_hwfn))
+		DP_ERR(p_hwfn, "rl overflow! qm_info->num_pf_rls %d,"
+		       " qm_init_get_num_pf_rls() %d\n",
+		       qm_info->num_pf_rls,
+		       ecore_init_qm_get_num_pf_rls(p_hwfn));
+}
+
+/* get pq index according to PQ_FLAGS */
+static u16 *ecore_init_qm_get_idx_from_flags(struct ecore_hwfn *p_hwfn,
+					     u32 pq_flags)
+{
+	struct ecore_qm_info *qm_info = &p_hwfn->qm_info;
+
+	/* Can't have multiple flags set here */
+	if (OSAL_BITMAP_WEIGHT((unsigned long *)&pq_flags,
+				sizeof(pq_flags)) > 1)
+		goto err;
+
+	switch (pq_flags) {
+	case PQ_FLAGS_RLS:
+		return &qm_info->first_rl_pq;
+	case PQ_FLAGS_MCOS:
+		return &qm_info->first_mcos_pq;
+	case PQ_FLAGS_LB:
+		return &qm_info->pure_lb_pq;
+	case PQ_FLAGS_OOO:
+		return &qm_info->ooo_pq;
+	case PQ_FLAGS_ACK:
+		return &qm_info->pure_ack_pq;
+	case PQ_FLAGS_OFLD:
+		return &qm_info->offload_pq;
+	case PQ_FLAGS_VFS:
+		return &qm_info->first_vf_pq;
+	default:
+		goto err;
+	}
+
+err:
+	DP_ERR(p_hwfn, "BAD pq flags %d\n", pq_flags);
+	return OSAL_NULL;
+}
+
+/* save pq index in qm info */
+static void ecore_init_qm_set_idx(struct ecore_hwfn *p_hwfn,
+				  u32 pq_flags, u16 pq_val)
+{
+	u16 *base_pq_idx = ecore_init_qm_get_idx_from_flags(p_hwfn, pq_flags);
+
+	*base_pq_idx = p_hwfn->qm_info.start_pq + pq_val;
+}
+
+/* get tx pq index, with the PQ TX base already set (ready for context init) */
+u16 ecore_get_cm_pq_idx(struct ecore_hwfn *p_hwfn, u32 pq_flags)
+{
+	u16 *base_pq_idx = ecore_init_qm_get_idx_from_flags(p_hwfn, pq_flags);
+
+	return *base_pq_idx + CM_TX_PQ_BASE;
+}
+
+u16 ecore_get_cm_pq_idx_mcos(struct ecore_hwfn *p_hwfn, u8 tc)
+{
+	u8 max_tc = ecore_init_qm_get_num_tcs(p_hwfn);
+
+	if (tc > max_tc)
+		DP_ERR(p_hwfn, "tc %d must be smaller than %d\n", tc, max_tc);
+
+	return ecore_get_cm_pq_idx(p_hwfn, PQ_FLAGS_MCOS) + tc;
+}
+
+u16 ecore_get_cm_pq_idx_vf(struct ecore_hwfn *p_hwfn, u16 vf)
+{
+	u16 max_vf = ecore_init_qm_get_num_vfs(p_hwfn);
+
+	if (vf > max_vf)
+		DP_ERR(p_hwfn, "vf %d must be smaller than %d\n", vf, max_vf);
+
+	return ecore_get_cm_pq_idx(p_hwfn, PQ_FLAGS_VFS) + vf;
+}
+
+u16 ecore_get_cm_pq_idx_rl(struct ecore_hwfn *p_hwfn, u8 rl)
+{
+	u16 max_rl = ecore_init_qm_get_num_pf_rls(p_hwfn);
+
+	if (rl > max_rl)
+		DP_ERR(p_hwfn, "rl %d must be smaller than %d\n", rl, max_rl);
+
+	return ecore_get_cm_pq_idx(p_hwfn, PQ_FLAGS_RLS) + rl;
+}
+
+/* Functions for creating specific types of pqs */
+static void ecore_init_qm_lb_pq(struct ecore_hwfn *p_hwfn)
+{
+	struct ecore_qm_info *qm_info = &p_hwfn->qm_info;
+
+	if (!(ecore_get_pq_flags(p_hwfn) & PQ_FLAGS_LB))
+		return;
+
+	ecore_init_qm_set_idx(p_hwfn, PQ_FLAGS_LB, qm_info->num_pqs);
+	ecore_init_qm_pq(p_hwfn, qm_info, PURE_LB_TC, PQ_INIT_SHARE_VPORT);
+}
+
+static void ecore_init_qm_ooo_pq(struct ecore_hwfn *p_hwfn)
+{
+	struct ecore_qm_info *qm_info = &p_hwfn->qm_info;
+
+	if (!(ecore_get_pq_flags(p_hwfn) & PQ_FLAGS_OOO))
+		return;
+
+	ecore_init_qm_set_idx(p_hwfn, PQ_FLAGS_OOO, qm_info->num_pqs);
+	ecore_init_qm_pq(p_hwfn, qm_info, DCBX_ISCSI_OOO_TC,
+			 PQ_INIT_SHARE_VPORT);
+}
+
+static void ecore_init_qm_pure_ack_pq(struct ecore_hwfn *p_hwfn)
+{
+	struct ecore_qm_info *qm_info = &p_hwfn->qm_info;
+
+	if (!(ecore_get_pq_flags(p_hwfn) & PQ_FLAGS_ACK))
+		return;
+
+	ecore_init_qm_set_idx(p_hwfn, PQ_FLAGS_ACK, qm_info->num_pqs);
+	ecore_init_qm_pq(p_hwfn, qm_info, PQ_INIT_OFLD_TC, PQ_INIT_SHARE_VPORT);
+}
+
+static void ecore_init_qm_offload_pq(struct ecore_hwfn *p_hwfn)
+{
+	struct ecore_qm_info *qm_info = &p_hwfn->qm_info;
+
+	if (!(ecore_get_pq_flags(p_hwfn) & PQ_FLAGS_OFLD))
+		return;
+
+	ecore_init_qm_set_idx(p_hwfn, PQ_FLAGS_OFLD, qm_info->num_pqs);
+	ecore_init_qm_pq(p_hwfn, qm_info, PQ_INIT_OFLD_TC, PQ_INIT_SHARE_VPORT);
+}
+
+static void ecore_init_qm_mcos_pqs(struct ecore_hwfn *p_hwfn)
+{
+	struct ecore_qm_info *qm_info = &p_hwfn->qm_info;
+	u8 tc_idx;
+
+	if (!(ecore_get_pq_flags(p_hwfn) & PQ_FLAGS_MCOS))
+		return;
+
+	ecore_init_qm_set_idx(p_hwfn, PQ_FLAGS_MCOS, qm_info->num_pqs);
+	for (tc_idx = 0; tc_idx < ecore_init_qm_get_num_tcs(p_hwfn); tc_idx++)
+		ecore_init_qm_pq(p_hwfn, qm_info, tc_idx, PQ_INIT_SHARE_VPORT);
+}
+
+static void ecore_init_qm_vf_pqs(struct ecore_hwfn *p_hwfn)
+{
+	struct ecore_qm_info *qm_info = &p_hwfn->qm_info;
+	u16 vf_idx, num_vfs = ecore_init_qm_get_num_vfs(p_hwfn);
+
+	if (!(ecore_get_pq_flags(p_hwfn) & PQ_FLAGS_VFS))
+		return;
+
+	ecore_init_qm_set_idx(p_hwfn, PQ_FLAGS_VFS, qm_info->num_pqs);
 
 	qm_info->num_vf_pqs = num_vfs;
-	qm_info->start_vport = (u8)RESC_START(p_hwfn, ECORE_VPORT);
+	for (vf_idx = 0; vf_idx < num_vfs; vf_idx++)
+		ecore_init_qm_pq(p_hwfn, qm_info, PQ_INIT_DEFAULT_TC,
+				 PQ_INIT_VF_RL);
+}
 
-	for (i = 0; i < qm_info->num_vports; i++)
-		qm_info->qm_vport_params[i].vport_wfq = 1;
+static void ecore_init_qm_rl_pqs(struct ecore_hwfn *p_hwfn)
+{
+	u16 pf_rls_idx, num_pf_rls = ecore_init_qm_get_num_pf_rls(p_hwfn);
+	struct ecore_qm_info *qm_info = &p_hwfn->qm_info;
 
-	qm_info->vport_rl_en = 1;
-	qm_info->vport_wfq_en = 1;
-	qm_info->pf_rl = pf_rl;
-	qm_info->pf_wfq = pf_wfq;
+	if (!(ecore_get_pq_flags(p_hwfn) & PQ_FLAGS_RLS))
+		return;
+
+	ecore_init_qm_set_idx(p_hwfn, PQ_FLAGS_RLS, qm_info->num_pqs);
+	for (pf_rls_idx = 0; pf_rls_idx < num_pf_rls; pf_rls_idx++)
+		ecore_init_qm_pq(p_hwfn, qm_info, PQ_INIT_OFLD_TC,
+				 PQ_INIT_PF_RL);
+}
+
+static void ecore_init_qm_pq_params(struct ecore_hwfn *p_hwfn)
+{
+	/* rate limited pqs, must come first (FW assumption) */
+	ecore_init_qm_rl_pqs(p_hwfn);
+
+	/* pqs for multi cos */
+	ecore_init_qm_mcos_pqs(p_hwfn);
+
+	/* pure loopback pq */
+	ecore_init_qm_lb_pq(p_hwfn);
+
+	/* out of order pq */
+	ecore_init_qm_ooo_pq(p_hwfn);
+
+	/* pure ack pq */
+	ecore_init_qm_pure_ack_pq(p_hwfn);
+
+	/* pq for offloaded protocol */
+	ecore_init_qm_offload_pq(p_hwfn);
+
+	/* done sharing vports */
+	ecore_init_qm_advance_vport(p_hwfn);
+
+	/* pqs for vfs */
+	ecore_init_qm_vf_pqs(p_hwfn);
+}
+
+/* compare values of getters against resources amounts */
+static enum _ecore_status_t ecore_init_qm_sanity(struct ecore_hwfn *p_hwfn)
+{
+	if (ecore_init_qm_get_num_vports(p_hwfn) >
+	    RESC_NUM(p_hwfn, ECORE_VPORT)) {
+		DP_ERR(p_hwfn, "requested amount of vports exceeds resource\n");
+		return ECORE_INVAL;
+	}
+
+	if (ecore_init_qm_get_num_pqs(p_hwfn) > RESC_NUM(p_hwfn, ECORE_PQ)) {
+		DP_ERR(p_hwfn, "requested amount of pqs exceeds resource\n");
+		return ECORE_INVAL;
+	}
 
 	return ECORE_SUCCESS;
+}
 
- alloc_err:
-	DP_NOTICE(p_hwfn, false, "Failed to allocate memory for QM params\n");
-	ecore_qm_info_free(p_hwfn);
-	return ECORE_NOMEM;
+/*
+ * Function for verbose printing of the qm initialization results
+ */
+static void ecore_dp_init_qm_params(struct ecore_hwfn *p_hwfn)
+{
+	struct ecore_qm_info *qm_info = &p_hwfn->qm_info;
+	struct init_qm_vport_params *vport;
+	struct init_qm_port_params *port;
+	struct init_qm_pq_params *pq;
+	int i, tc;
+
+	/* top level params */
+	DP_VERBOSE(p_hwfn, ECORE_MSG_HW,
+		   "qm init top level params: start_pq %d, start_vport %d,"
+		   " pure_lb_pq %d, offload_pq %d, pure_ack_pq %d\n",
+		   qm_info->start_pq, qm_info->start_vport, qm_info->pure_lb_pq,
+		   qm_info->offload_pq, qm_info->pure_ack_pq);
+	DP_VERBOSE(p_hwfn, ECORE_MSG_HW,
+		   "ooo_pq %d, first_vf_pq %d, num_pqs %d, num_vf_pqs %d,"
+		   " num_vports %d, max_phys_tcs_per_port %d\n",
+		   qm_info->ooo_pq, qm_info->first_vf_pq, qm_info->num_pqs,
+		   qm_info->num_vf_pqs, qm_info->num_vports,
+		   qm_info->max_phys_tcs_per_port);
+	DP_VERBOSE(p_hwfn, ECORE_MSG_HW,
+		   "pf_rl_en %d, pf_wfq_en %d, vport_rl_en %d, vport_wfq_en %d,"
+		   " pf_wfq %d, pf_rl %d, num_pf_rls %d, pq_flags %x\n",
+		   qm_info->pf_rl_en, qm_info->pf_wfq_en, qm_info->vport_rl_en,
+		   qm_info->vport_wfq_en, qm_info->pf_wfq, qm_info->pf_rl,
+		   qm_info->num_pf_rls, ecore_get_pq_flags(p_hwfn));
+
+	/* port table */
+	for (i = 0; i < p_hwfn->p_dev->num_ports_in_engines; i++) {
+		port = &qm_info->qm_port_params[i];
+		DP_VERBOSE(p_hwfn, ECORE_MSG_HW,
+			   "port idx %d, active %d, active_phys_tcs %d,"
+			   " num_pbf_cmd_lines %d, num_btb_blocks %d,"
+			   " reserved %d\n",
+			   i, port->active, port->active_phys_tcs,
+			   port->num_pbf_cmd_lines, port->num_btb_blocks,
+			   port->reserved);
+	}
+
+	/* vport table */
+	for (i = 0; i < qm_info->num_vports; i++) {
+		vport = &qm_info->qm_vport_params[i];
+		DP_VERBOSE(p_hwfn, ECORE_MSG_HW,
+			   "vport idx %d, vport_rl %d, wfq %d,"
+			   " first_tx_pq_id [ ",
+			   qm_info->start_vport + i, vport->vport_rl,
+			   vport->vport_wfq);
+		for (tc = 0; tc < NUM_OF_TCS; tc++)
+			DP_VERBOSE(p_hwfn, ECORE_MSG_HW, "%d ",
+				   vport->first_tx_pq_id[tc]);
+		DP_VERBOSE(p_hwfn, ECORE_MSG_HW, "]\n");
+	}
+
+	/* pq table */
+	for (i = 0; i < qm_info->num_pqs; i++) {
+		pq = &qm_info->qm_pq_params[i];
+		DP_VERBOSE(p_hwfn, ECORE_MSG_HW,
+			   "pq idx %d, vport_id %d, tc %d, wrr_grp %d,"
+			   " rl_valid %d\n",
+			   qm_info->start_pq + i, pq->vport_id, pq->tc_id,
+			   pq->wrr_group, pq->rl_valid);
+	}
+}
+
+static void ecore_init_qm_info(struct ecore_hwfn *p_hwfn)
+{
+	/* reset params required for init run */
+	ecore_init_qm_reset_params(p_hwfn);
+
+	/* init QM top level params */
+	ecore_init_qm_params(p_hwfn);
+
+	/* init QM port params */
+	ecore_init_qm_port_params(p_hwfn);
+
+	/* init QM vport params */
+	ecore_init_qm_vport_params(p_hwfn);
+
+	/* init QM physical queue params */
+	ecore_init_qm_pq_params(p_hwfn);
+
+	/* display all that init */
+	ecore_dp_init_qm_params(p_hwfn);
 }
 
 /* This function reconfigures the QM pf on the fly.
  * For this purpose we:
  * 1. reconfigure the QM database
- * 2. set new values to runtime arrat
+ * 2. set new values to runtime array
  * 3. send an sdm_qm_cmd through the rbc interface to stop the QM
  * 4. activate init tool in QM_PF stage
  * 5. send an sdm_qm_cmd through rbc interface to release the QM
@@ -462,20 +755,11 @@ enum _ecore_status_t ecore_qm_reconf(struct ecore_hwfn *p_hwfn,
 				     struct ecore_ptt *p_ptt)
 {
 	struct ecore_qm_info *qm_info = &p_hwfn->qm_info;
-	bool b_rc;
 	enum _ecore_status_t rc;
-
-	/* qm_info is allocated in ecore_init_qm_info() which is already called
-	 * from ecore_resc_alloc() or previous call of ecore_qm_reconf().
-	 * The allocated size may change each init, so we free it before next
-	 * allocation.
-	 */
-	ecore_qm_info_free(p_hwfn);
+	bool b_rc;
 
 	/* initialize ecore's qm data structure */
-	rc = ecore_init_qm_info(p_hwfn, false);
-	if (rc != ECORE_SUCCESS)
-		return rc;
+	ecore_init_qm_info(p_hwfn);
 
 	/* stop PF's qm queues */
 	OSAL_SPIN_LOCK(&qm_lock);
@@ -508,6 +792,48 @@ enum _ecore_status_t ecore_qm_reconf(struct ecore_hwfn *p_hwfn,
 	return ECORE_SUCCESS;
 }
 
+static enum _ecore_status_t ecore_alloc_qm_data(struct ecore_hwfn *p_hwfn)
+{
+	struct ecore_qm_info *qm_info = &p_hwfn->qm_info;
+	enum _ecore_status_t rc;
+
+	rc = ecore_init_qm_sanity(p_hwfn);
+	if (rc != ECORE_SUCCESS)
+		goto alloc_err;
+
+	qm_info->qm_pq_params = OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL,
+					    sizeof(struct init_qm_pq_params) *
+					    ecore_init_qm_get_num_pqs(p_hwfn));
+	if (!qm_info->qm_pq_params)
+		goto alloc_err;
+
+	qm_info->qm_vport_params = OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL,
+				       sizeof(struct init_qm_vport_params) *
+				       ecore_init_qm_get_num_vports(p_hwfn));
+	if (!qm_info->qm_vport_params)
+		goto alloc_err;
+
+	qm_info->qm_port_params = OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL,
+				      sizeof(struct init_qm_port_params) *
+				      p_hwfn->p_dev->num_ports_in_engines);
+	if (!qm_info->qm_port_params)
+		goto alloc_err;
+
+	qm_info->wfq_data = OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL,
+					sizeof(struct ecore_wfq_data) *
+					ecore_init_qm_get_num_vports(p_hwfn));
+	if (!qm_info->wfq_data)
+		goto alloc_err;
+
+	return ECORE_SUCCESS;
+
+alloc_err:
+	DP_NOTICE(p_hwfn, false, "Failed to allocate memory for QM params\n");
+	ecore_qm_info_free(p_hwfn);
+	return ECORE_NOMEM;
+}
+/******************** End QM initialization ***************/
+
 enum _ecore_status_t ecore_resc_alloc(struct ecore_dev *p_dev)
 {
 	struct ecore_consq *p_consq;
@@ -572,11 +898,13 @@ enum _ecore_status_t ecore_resc_alloc(struct ecore_dev *p_dev)
 		if (rc)
 			goto alloc_err;
 
-		/* Prepare and process QM requirements */
-		rc = ecore_init_qm_info(p_hwfn, true);
+		rc = ecore_alloc_qm_data(p_hwfn);
 		if (rc)
 			goto alloc_err;
 
+		/* init qm info */
+		ecore_init_qm_info(p_hwfn);
+
 		/* Compute the ILT client partition */
 		rc = ecore_cxt_cfg_ilt_compute(p_hwfn);
 		if (rc)
@@ -618,18 +946,18 @@ enum _ecore_status_t ecore_resc_alloc(struct ecore_dev *p_dev)
 			 * worst case:
 			 * - Core - according to SPQ.
 			 * - RoCE - per QP there are a couple of ICIDs, one
-			 *          responder and one requester, each can
-			 *          generate an EQE => n_eqes_qp = 2 * n_qp.
-			 *          Each CQ can generate an EQE. There are 2 CQs
-			 *          per QP => n_eqes_cq = 2 * n_qp.
-			 *          Hence the RoCE total is 4 * n_qp or
-			 *          2 * num_cons.
+			 *	  responder and one requester, each can
+			 *	  generate an EQE => n_eqes_qp = 2 * n_qp.
+			 *	  Each CQ can generate an EQE. There are 2 CQs
+			 *	  per QP => n_eqes_cq = 2 * n_qp.
+			 *	  Hence the RoCE total is 4 * n_qp or
+			 *	  2 * num_cons.
 			 * - ENet - There can be up to two events per VF. One
-			 *          for VF-PF channel and another for VF FLR
-			 *          initial cleanup. The number of VFs is
-			 *          bounded by MAX_NUM_VFS_BB, and is much
-			 *          smaller than RoCE's so we avoid exact
-			 *          calculation.
+			 *	  for VF-PF channel and another for VF FLR
+			 *	  initial cleanup. The number of VFs is
+			 *	  bounded by MAX_NUM_VFS_BB, and is much
+			 *	  smaller than RoCE's so we avoid exact
+			 *	  calculation.
 			 */
 			if (ECORE_IS_ROCE_PERSONALITY(p_hwfn)) {
 				num_cons =
@@ -683,7 +1011,8 @@ enum _ecore_status_t ecore_resc_alloc(struct ecore_dev *p_dev)
 		rc = ecore_dmae_info_alloc(p_hwfn);
 		if (rc) {
 			DP_NOTICE(p_hwfn, true,
-				  "Failed to allocate memory for dmae_info structure\n");
+				  "Failed to allocate memory for dmae_info"
+				  " structure\n");
 			goto alloc_err;
 		}
 
@@ -705,9 +1034,9 @@ enum _ecore_status_t ecore_resc_alloc(struct ecore_dev *p_dev)
 
 	return ECORE_SUCCESS;
 
- alloc_no_mem:
+alloc_no_mem:
 	rc = ECORE_NOMEM;
- alloc_err:
+alloc_err:
 	ecore_resc_free(p_dev);
 	return rc;
 }
@@ -2353,7 +2682,7 @@ static enum _ecore_status_t ecore_hw_set_resc_info(struct ecore_hwfn *p_hwfn,
 			*p_resc_start = dflt_resc_start;
 		}
 	}
- out:
+out:
 	return ECORE_SUCCESS;
 }
 
@@ -3139,13 +3468,13 @@ ecore_hw_prepare_single(struct ecore_hwfn *p_hwfn,
 #endif
 
 	return rc;
- err2:
+err2:
 	if (IS_LEAD_HWFN(p_hwfn))
 		ecore_iov_free_hw_info(p_dev);
 	ecore_mcp_free(p_hwfn);
- err1:
+err1:
 	ecore_hw_hwfn_free(p_hwfn);
- err0:
+err0:
 	return rc;
 }
 
@@ -3309,7 +3638,7 @@ static void ecore_chain_free_pbl(struct ecore_dev *p_dev,
 	if (!p_chain->pbl.external)
 		OSAL_DMA_FREE_COHERENT(p_dev, p_chain->pbl.p_virt_table,
 				       p_chain->pbl.p_phys_table, pbl_size);
- out:
+out:
 	OSAL_VFREE(p_dev, p_chain->pbl.pp_virt_addr_tbl);
 }
 
@@ -3521,7 +3850,7 @@ enum _ecore_status_t ecore_chain_alloc(struct ecore_dev *p_dev,
 
 	return ECORE_SUCCESS;
 
- nomem:
+nomem:
 	ecore_chain_free(p_dev, p_chain);
 	return rc;
 }
@@ -3956,7 +4285,7 @@ enum _ecore_status_t ecore_set_rxq_coalesce(struct ecore_hwfn *p_hwfn,
 		goto out;
 
 	p_hwfn->p_dev->rx_coalesce_usecs = coalesce;
- out:
+out:
 	return rc;
 }
 
@@ -4000,7 +4329,7 @@ enum _ecore_status_t ecore_set_txq_coalesce(struct ecore_hwfn *p_hwfn,
 		goto out;
 
 	p_hwfn->p_dev->tx_coalesce_usecs = coalesce;
- out:
+out:
 	return rc;
 }
 
diff --git a/drivers/net/qede/base/ecore_hw.c b/drivers/net/qede/base/ecore_hw.c
index 49d52c0..396edc2 100644
--- a/drivers/net/qede/base/ecore_hw.c
+++ b/drivers/net/qede/base/ecore_hw.c
@@ -905,44 +905,6 @@ ecore_dmae_host2host(struct ecore_hwfn *p_hwfn,
 	return rc;
 }
 
-u16 ecore_get_qm_pq(struct ecore_hwfn *p_hwfn,
-		    enum protocol_type proto,
-		    union ecore_qm_pq_params *p_params)
-{
-	u16 pq_id = 0;
-
-	if ((proto == PROTOCOLID_CORE ||
-	     proto == PROTOCOLID_ETH) && !p_params) {
-		DP_NOTICE(p_hwfn, true,
-			  "Protocol %d received NULL PQ params\n", proto);
-		return 0;
-	}
-
-	switch (proto) {
-	case PROTOCOLID_CORE:
-		if (p_params->core.tc == LB_TC)
-			pq_id = p_hwfn->qm_info.pure_lb_pq;
-		else if (p_params->core.tc == PKT_LB_TC)
-			pq_id = p_hwfn->qm_info.ooo_pq;
-		else
-			pq_id = p_hwfn->qm_info.offload_pq;
-		break;
-	case PROTOCOLID_ETH:
-		pq_id = p_params->eth.tc;
-		/* TODO - multi-CoS for VFs? */
-		if (p_params->eth.is_vf)
-			pq_id += p_hwfn->qm_info.vf_queues_offset +
-			    p_params->eth.vf_id;
-		break;
-	default:
-		pq_id = 0;
-	}
-
-	pq_id = CM_TX_PQ_BASE + pq_id + RESC_START(p_hwfn, ECORE_PQ);
-
-	return pq_id;
-}
-
 void ecore_hw_err_notify(struct ecore_hwfn *p_hwfn,
 			 enum ecore_hw_err_type err_type)
 {
diff --git a/drivers/net/qede/base/ecore_l2.c b/drivers/net/qede/base/ecore_l2.c
index d2e1719..0220d19 100644
--- a/drivers/net/qede/base/ecore_l2.c
+++ b/drivers/net/qede/base/ecore_l2.c
@@ -834,13 +834,13 @@ ecore_sp_eth_txq_start_ramrod(struct ecore_hwfn *p_hwfn,
 			      struct ecore_queue_start_common_params *p_params,
 			      dma_addr_t pbl_addr,
 			      u16 pbl_size,
-			      union ecore_qm_pq_params *p_pq_params)
+			      u16 pq_id)
 {
 	struct tx_queue_start_ramrod_data *p_ramrod = OSAL_NULL;
 	struct ecore_spq_entry *p_ent = OSAL_NULL;
 	struct ecore_sp_init_data init_data;
 	struct ecore_hw_cid_data *p_tx_cid;
-	u16 pq_id, abs_tx_qzone_id = 0;
+	u16 abs_tx_qzone_id = 0;
 	enum _ecore_status_t rc = ECORE_NOTIMPL;
 	u8 abs_vport_id;
 
@@ -882,7 +882,6 @@ ecore_sp_eth_txq_start_ramrod(struct ecore_hwfn *p_hwfn,
 	p_ramrod->pbl_size = OSAL_CPU_TO_LE16(pbl_size);
 	DMA_REGPAIR_LE(p_ramrod->pbl_base_addr, pbl_addr);
 
-	pq_id = ecore_get_qm_pq(p_hwfn, PROTOCOLID_ETH, p_pq_params);
 	p_ramrod->qm_pq_id = OSAL_CPU_TO_LE16(pq_id);
 
 	return ecore_spq_post(p_hwfn, p_ent, OSAL_NULL);
@@ -898,7 +897,6 @@ ecore_sp_eth_tx_queue_start(struct ecore_hwfn *p_hwfn,
 			    void OSAL_IOMEM * *pp_doorbell)
 {
 	struct ecore_hw_cid_data *p_tx_cid;
-	union ecore_qm_pq_params pq_params;
 	u8 abs_stats_id = 0;
 	enum _ecore_status_t rc;
 
@@ -918,9 +916,6 @@ ecore_sp_eth_tx_queue_start(struct ecore_hwfn *p_hwfn,
 
 	p_tx_cid = &p_hwfn->p_tx_cids[p_params->queue_id];
 	OSAL_MEMSET(p_tx_cid, 0, sizeof(*p_tx_cid));
-	OSAL_MEMSET(&pq_params, 0, sizeof(pq_params));
-
-	pq_params.eth.tc = tc;
 
 	/* Allocate a CID for the queue */
 	rc = ecore_cxt_acquire_cid(p_hwfn, PROTOCOLID_ETH, &p_tx_cid->cid);
@@ -944,7 +939,8 @@ ecore_sp_eth_tx_queue_start(struct ecore_hwfn *p_hwfn,
 					   p_params,
 					   pbl_addr,
 					   pbl_size,
-					   &pq_params);
+					   ecore_get_cm_pq_idx_mcos(p_hwfn,
+								    tc));
 
 	*pp_doorbell = (u8 OSAL_IOMEM *)p_hwfn->doorbells +
 	    DB_ADDR(p_tx_cid->cid, DQ_DEMS_LEGACY);
diff --git a/drivers/net/qede/base/ecore_l2.h b/drivers/net/qede/base/ecore_l2.h
index 9c1bd38..b598eda 100644
--- a/drivers/net/qede/base/ecore_l2.h
+++ b/drivers/net/qede/base/ecore_l2.h
@@ -81,7 +81,7 @@ ecore_sp_eth_txq_start_ramrod(struct ecore_hwfn	*p_hwfn,
 			      struct ecore_queue_start_common_params *p_params,
 			      dma_addr_t pbl_addr,
 			      u16 pbl_size,
-			      union ecore_qm_pq_params *p_pq_params);
+			      u16 pq_id);
 
 u8 ecore_mcast_bin_from_mac(u8 *mac);
 
diff --git a/drivers/net/qede/base/ecore_spq.c b/drivers/net/qede/base/ecore_spq.c
index 9035d3b..ba26d45 100644
--- a/drivers/net/qede/base/ecore_spq.c
+++ b/drivers/net/qede/base/ecore_spq.c
@@ -173,11 +173,10 @@ ecore_spq_fill_entry(struct ecore_hwfn *p_hwfn, struct ecore_spq_entry *p_ent)
 static void ecore_spq_hw_initialize(struct ecore_hwfn *p_hwfn,
 				    struct ecore_spq *p_spq)
 {
-	u16 pq;
 	struct ecore_cxt_info cxt_info;
 	struct core_conn_context *p_cxt;
-	union ecore_qm_pq_params pq_params;
 	enum _ecore_status_t rc;
+	u16 physical_q;
 
 	cxt_info.iid = p_spq->cid;
 
@@ -206,10 +205,8 @@ static void ecore_spq_hw_initialize(struct ecore_hwfn *p_hwfn,
 	/* CDU validation - FIXME currently disabled */
 
 	/* QM physical queue */
-	OSAL_MEMSET(&pq_params, 0, sizeof(pq_params));
-	pq_params.core.tc = LB_TC;
-	pq = ecore_get_qm_pq(p_hwfn, PROTOCOLID_CORE, &pq_params);
-	p_cxt->xstorm_ag_context.physical_q0 = OSAL_CPU_TO_LE16(pq);
+	physical_q = ecore_get_cm_pq_idx(p_hwfn, PQ_FLAGS_LB);
+	p_cxt->xstorm_ag_context.physical_q0 = OSAL_CPU_TO_LE16(physical_q);
 
 	p_cxt->xstorm_st_context.spq_base_lo =
 	    DMA_LO_LE(p_spq->chain.p_phys_addr);
diff --git a/drivers/net/qede/base/ecore_sriov.c b/drivers/net/qede/base/ecore_sriov.c
index a302e9e..365be25 100644
--- a/drivers/net/qede/base/ecore_sriov.c
+++ b/drivers/net/qede/base/ecore_sriov.c
@@ -632,8 +632,8 @@ enum _ecore_status_t ecore_iov_hw_info(struct ecore_hwfn *p_hwfn)
 	return ECORE_SUCCESS;
 }
 
-bool _ecore_iov_pf_sanity_check(struct ecore_hwfn *p_hwfn, int vfid,
-				bool b_fail_malicious)
+static bool _ecore_iov_pf_sanity_check(struct ecore_hwfn *p_hwfn, int vfid,
+				       bool b_fail_malicious)
 {
 	/* Check PF supports sriov */
 	if (IS_VF(p_hwfn->p_dev) || !IS_ECORE_SRIOV(p_hwfn->p_dev) ||
@@ -2103,15 +2103,9 @@ static void ecore_iov_vf_mbx_start_txq(struct ecore_hwfn *p_hwfn,
 	struct ecore_queue_start_common_params params;
 	struct ecore_iov_vf_mbx *mbx = &vf->vf_mbx;
 	u8 status = PFVF_STATUS_NO_RESOURCE;
-	union ecore_qm_pq_params pq_params;
 	struct vfpf_start_txq_tlv *req;
 	enum _ecore_status_t rc;
 
-	/* Prepare the parameters which would choose the right PQ */
-	OSAL_MEMSET(&pq_params, 0, sizeof(pq_params));
-	pq_params.eth.is_vf = 1;
-	pq_params.eth.vf_id = vf->relative_vf_id;
-
 	OSAL_MEMSET(&params, 0, sizeof(params));
 	req = &mbx->req_virt->start_txq;
 
@@ -2132,7 +2126,8 @@ static void ecore_iov_vf_mbx_start_txq(struct ecore_hwfn *p_hwfn,
 					   &params,
 					   req->pbl_addr,
 					   req->pbl_size,
-					   &pq_params);
+					   ecore_get_cm_pq_idx_vf(p_hwfn,
+							vf->relative_vf_id));
 
 	if (rc)
 		status = PFVF_STATUS_FAILURE;
-- 
1.7.10.3



More information about the dev mailing list