[dpdk-dev] [PATCH v3 06/32] qede/base: additional formatting/comment changes

Rasesh Mody rasesh.mody at qlogic.com
Sat Oct 15 22:07:43 CEST 2016


Change details:
 - adds new comments
 - modifies some of the existing comments
 - abstract code into macros
 - split long lines

Signed-off-by: Rasesh Mody <rasesh.mody at qlogic.com>
---
 drivers/net/qede/base/ecore.h               |  3 +-
 drivers/net/qede/base/ecore_chain.h         | 14 ++---
 drivers/net/qede/base/ecore_cxt.c           | 52 +++++++++----------
 drivers/net/qede/base/ecore_cxt.h           |  3 +-
 drivers/net/qede/base/ecore_dcbx.c          |  6 ++-
 drivers/net/qede/base/ecore_dev.c           | 70 +++++++++++++------------
 drivers/net/qede/base/ecore_dev_api.h       | 33 ++++++++----
 drivers/net/qede/base/ecore_hsi_eth.h       |  8 +--
 drivers/net/qede/base/ecore_hw.c            |  2 +-
 drivers/net/qede/base/ecore_hw.h            | 31 +++++++----
 drivers/net/qede/base/ecore_hw_defs.h       | 22 ++++----
 drivers/net/qede/base/ecore_init_fw_funcs.c |  3 ++
 drivers/net/qede/base/ecore_init_fw_funcs.h | 80 +++++++++++++++++++----------
 drivers/net/qede/base/ecore_init_ops.h      |  8 ++-
 drivers/net/qede/base/ecore_int.c           |  9 ++--
 drivers/net/qede/base/ecore_iov_api.h       | 57 ++++++++++++++------
 drivers/net/qede/base/ecore_l2.c            | 25 +++++----
 drivers/net/qede/base/ecore_l2_api.h        |  9 ++--
 drivers/net/qede/base/ecore_mcp.c           |  3 +-
 drivers/net/qede/base/ecore_sp_commands.c   | 17 +++---
 drivers/net/qede/base/ecore_spq.c           | 12 ++---
 drivers/net/qede/base/ecore_spq.h           | 21 +++++---
 drivers/net/qede/base/eth_common.h          | 15 ++++--
 drivers/net/qede/base/nvm_cfg.h             | 17 ++++--
 24 files changed, 321 insertions(+), 199 deletions(-)

diff --git a/drivers/net/qede/base/ecore.h b/drivers/net/qede/base/ecore.h
index b9127de..9f456e3 100644
--- a/drivers/net/qede/base/ecore.h
+++ b/drivers/net/qede/base/ecore.h
@@ -94,7 +94,6 @@ static OSAL_INLINE u32 DB_ADDR(u32 cid, u32 DEMS)
 	return db_addr;
 }
 
-/* @DPDK: This is a backport from latest ecore for TSS fix */
 static OSAL_INLINE u32 DB_ADDR_VF(u32 cid, u32 DEMS)
 {
 	u32 db_addr = FIELD_VALUE(DB_LEGACY_ADDR_DEMS, DEMS) |
@@ -107,6 +106,7 @@ static OSAL_INLINE u32 DB_ADDR_VF(u32 cid, u32 DEMS)
 	((sizeof(type_name) + (u32)(1 << (p_hwfn->p_dev->cache_shift)) - 1) & \
 	 ~((1 << (p_hwfn->p_dev->cache_shift)) - 1))
 
+#ifndef LINUX_REMOVE
 #ifndef U64_HI
 #define U64_HI(val) ((u32)(((u64)(val))  >> 32))
 #endif
@@ -114,6 +114,7 @@ static OSAL_INLINE u32 DB_ADDR_VF(u32 cid, u32 DEMS)
 #ifndef U64_LO
 #define U64_LO(val) ((u32)(((u64)(val)) & 0xffffffff))
 #endif
+#endif
 
 #ifndef __EXTRACT__LINUX__
 enum DP_LEVEL {
diff --git a/drivers/net/qede/base/ecore_chain.h b/drivers/net/qede/base/ecore_chain.h
index bc18c41..56b7b4d 100644
--- a/drivers/net/qede/base/ecore_chain.h
+++ b/drivers/net/qede/base/ecore_chain.h
@@ -307,21 +307,23 @@ ecore_chain_advance_page(struct ecore_chain *p_chain, void **p_next_elem,
 	(((p)->u.chain32.idx & (p)->elem_per_page_mask) == (p)->usable_per_page)
 
 #define is_unusable_next_idx(p, idx)		\
-	((((p)->u.chain16.idx + 1) & (p)->elem_per_page_mask) == \
-	(p)->usable_per_page)
+	((((p)->u.chain16.idx + 1) &		\
+	(p)->elem_per_page_mask) == (p)->usable_per_page)
 
 #define is_unusable_next_idx_u32(p, idx)	\
-	((((p)->u.chain32.idx + 1) & (p)->elem_per_page_mask) \
-	== (p)->usable_per_page)
+	((((p)->u.chain32.idx + 1) &		\
+	(p)->elem_per_page_mask) == (p)->usable_per_page)
 
 #define test_and_skip(p, idx)						\
 	do {								\
 		if (is_chain_u16(p)) {					\
 			if (is_unusable_idx(p, idx))			\
-				(p)->u.chain16.idx += (p)->elem_unusable; \
+				(p)->u.chain16.idx +=			\
+					(p)->elem_unusable;		\
 		} else {						\
 			if (is_unusable_idx_u32(p, idx))		\
-				(p)->u.chain32.idx += (p)->elem_unusable; \
+				(p)->u.chain32.idx +=			\
+					(p)->elem_unusable;		\
 		}							\
 	} while (0)
 
diff --git a/drivers/net/qede/base/ecore_cxt.c b/drivers/net/qede/base/ecore_cxt.c
index 415d1c8..22d0b25 100644
--- a/drivers/net/qede/base/ecore_cxt.c
+++ b/drivers/net/qede/base/ecore_cxt.c
@@ -378,7 +378,7 @@ static void ecore_ilt_cli_blk_fill(struct ecore_ilt_client_cfg *p_cli,
 {
 	u32 ilt_size = ILT_PAGE_IN_BYTES(p_cli->p_size.val);
 
-	/* verfiy called once for each block */
+	/* verify that it's called once for each block */
 	if (p_blk->total_size)
 		return;
 
@@ -405,7 +405,8 @@ static void ecore_ilt_cli_adv_line(struct ecore_hwfn *p_hwfn,
 	p_cli->last.val = *p_line - 1;
 
 	DP_VERBOSE(p_hwfn, ECORE_MSG_ILT,
-		   "ILT[Client %d] - Lines: [%08x - %08x]. Block - Size %08x [Real %08x] Start line %d\n",
+		   "ILT[Client %d] - Lines: [%08x - %08x]. Block - Size %08x"
+		   " [Real %08x] Start line %d\n",
 		   client_id, p_cli->first.val, p_cli->last.val,
 		   p_blk->total_size, p_blk->real_size_in_page,
 		   p_blk->start_line);
@@ -453,7 +454,7 @@ enum _ecore_status_t ecore_cxt_cfg_ilt_compute(struct ecore_hwfn *p_hwfn)
 	p_mngr->pf_start_line = RESC_START(p_hwfn, ECORE_ILT);
 
 	DP_VERBOSE(p_hwfn, ECORE_MSG_ILT,
-		   "hwfn [%d] - Set context manager starting line to be 0x%08x\n",
+		   "hwfn [%d] - Set context mngr starting line to be 0x%08x\n",
 		   p_hwfn->my_id, p_hwfn->p_cxt_mngr->pf_start_line);
 
 	/* CDUC */
@@ -797,16 +798,20 @@ t2_fail:
 	return rc;
 }
 
+#define for_each_ilt_valid_client(pos, clients)		\
+	for (pos = 0; pos < ILT_CLI_MAX; pos++)		\
+		if (!clients[pos].active) {		\
+			continue;			\
+		} else					\
+
+
 /* Total number of ILT lines used by this PF */
 static u32 ecore_cxt_ilt_shadow_size(struct ecore_ilt_client_cfg *ilt_clients)
 {
 	u32 size = 0;
 	u32 i;
 
-	for (i = 0; i < ILT_CLI_MAX; i++)
-		if (!ilt_clients[i].active)
-			continue;
-		else
+	for_each_ilt_valid_client(i, ilt_clients)
 		size += (ilt_clients[i].last.val -
 			 ilt_clients[i].first.val + 1);
 
@@ -876,9 +881,9 @@ ecore_ilt_blk_alloc(struct ecore_hwfn *p_hwfn,
 		ilt_shadow[line].size = size;
 
 		DP_VERBOSE(p_hwfn, ECORE_MSG_ILT,
-			   "ILT shadow: Line [%d] Physical 0x%" PRIx64
+			   "ILT shadow: Line [%d] Physical 0x%lx"
 			   " Virtual %p Size %d\n",
-			   line, (u64)p_phys, p_virt, size);
+			   line, (unsigned long)p_phys, p_virt, size);
 
 		sz_left -= size;
 		line++;
@@ -892,15 +897,16 @@ static enum _ecore_status_t ecore_ilt_shadow_alloc(struct ecore_hwfn *p_hwfn)
 	struct ecore_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
 	struct ecore_ilt_client_cfg *clients = p_mngr->clients;
 	struct ecore_ilt_cli_blk *p_blk;
-	enum _ecore_status_t rc;
 	u32 size, i, j, k;
+	enum _ecore_status_t rc;
 
 	size = ecore_cxt_ilt_shadow_size(clients);
 	p_mngr->ilt_shadow = OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL,
 					 size * sizeof(struct ecore_dma_mem));
 
 	if (!p_mngr->ilt_shadow) {
-		DP_NOTICE(p_hwfn, true, "Failed to allocate ilt shadow table");
+		DP_NOTICE(p_hwfn, true,
+			  "Failed to allocate ilt shadow table\n");
 		rc = ECORE_NOMEM;
 		goto ilt_shadow_fail;
 	}
@@ -909,10 +915,7 @@ static enum _ecore_status_t ecore_ilt_shadow_alloc(struct ecore_hwfn *p_hwfn)
 		   "Allocated 0x%x bytes for ilt shadow\n",
 		   (u32)(size * sizeof(struct ecore_dma_mem)));
 
-	for (i = 0; i < ILT_CLI_MAX; i++)
-		if (!clients[i].active) {
-			continue;
-		} else {
+	for_each_ilt_valid_client(i, clients) {
 		for (j = 0; j < ILT_CLI_PF_BLOCKS; j++) {
 			p_blk = &clients[i].pf_blks[j];
 			rc = ecore_ilt_blk_alloc(p_hwfn, p_blk, i, 0);
@@ -1362,10 +1365,7 @@ static void ecore_ilt_bounds_init(struct ecore_hwfn *p_hwfn)
 	int i;
 
 	ilt_clients = p_hwfn->p_cxt_mngr->clients;
-	for (i = 0; i < ILT_CLI_MAX; i++)
-		if (!ilt_clients[i].active) {
-			continue;
-		} else {
+	for_each_ilt_valid_client(i, ilt_clients) {
 		STORE_RT_REG(p_hwfn,
 			     ilt_clients[i].first.reg,
 			     ilt_clients[i].first.val);
@@ -1448,10 +1448,7 @@ static void ecore_ilt_init_pf(struct ecore_hwfn *p_hwfn)
 	p_shdw = p_mngr->ilt_shadow;
 	clients = p_hwfn->p_cxt_mngr->clients;
 
-	for (i = 0; i < ILT_CLI_MAX; i++)
-		if (!clients[i].active) {
-			continue;
-		} else {
+	for_each_ilt_valid_client(i, clients) {
 		/* Client's 1st val and RT array are absolute, ILT shadows'
 		 * lines are relative.
 		 */
@@ -1474,9 +1471,10 @@ static void ecore_ilt_init_pf(struct ecore_hwfn *p_hwfn)
 				DP_VERBOSE(p_hwfn, ECORE_MSG_ILT,
 					"Setting RT[0x%08x] from"
 					" ILT[0x%08x] [Client is %d] to"
-					" Physical addr: 0x%" PRIx64 "\n",
+					" Physical addr: 0x%lx\n",
 					rt_offst, line, i,
-					(u64)(p_shdw[line].p_phys >> 12));
+					(unsigned long)(p_shdw[line].
+							p_phys >> 12));
 			}
 
 			STORE_RT_REG_AGG(p_hwfn, rt_offst, ilt_hw_entry);
@@ -1557,7 +1555,7 @@ static void ecore_tm_init_pf(struct ecore_hwfn *p_hwfn)
 	SET_FIELD(cfg_word, TM_CFG_NUM_IDS, tm_iids.pf_cids);
 	SET_FIELD(cfg_word, TM_CFG_PRE_SCAN_OFFSET, 0);
 	SET_FIELD(cfg_word, TM_CFG_PARENT_PF, 0);	/* n/a for PF */
-	SET_FIELD(cfg_word, TM_CFG_CID_PRE_SCAN_ROWS, 0);
+	SET_FIELD(cfg_word, TM_CFG_CID_PRE_SCAN_ROWS, 0); /* scan all   */
 
 	rt_reg = TM_REG_CONFIG_CONN_MEM_RT_OFFSET +
 	    (sizeof(cfg_word) / sizeof(u32)) *
@@ -1650,7 +1648,7 @@ enum _ecore_status_t ecore_cxt_acquire_cid(struct ecore_hwfn *p_hwfn,
 					   p_mngr->acquired[type].max_count);
 
 	if (rel_cid >= p_mngr->acquired[type].max_count) {
-		DP_NOTICE(p_hwfn, false, "no CID available for protocol %d",
+		DP_NOTICE(p_hwfn, false, "no CID available for protocol %d\n",
 			  type);
 		return ECORE_NORESOURCES;
 	}
diff --git a/drivers/net/qede/base/ecore_cxt.h b/drivers/net/qede/base/ecore_cxt.h
index 1ac95f9..ba02410 100644
--- a/drivers/net/qede/base/ecore_cxt.h
+++ b/drivers/net/qede/base/ecore_cxt.h
@@ -152,6 +152,7 @@ enum _ecore_status_t ecore_cxt_free_proto_ilt(struct ecore_hwfn *p_hwfn,
 #define ECORE_CTX_FL_MEM 1
 enum _ecore_status_t ecore_cxt_get_task_ctx(struct ecore_hwfn *p_hwfn,
 					    u32 tid,
-					    u8 ctx_type, void **task_ctx);
+					    u8 ctx_type,
+					    void **task_ctx);
 
 #endif /* _ECORE_CID_ */
diff --git a/drivers/net/qede/base/ecore_dcbx.c b/drivers/net/qede/base/ecore_dcbx.c
index 18843c4..db73658 100644
--- a/drivers/net/qede/base/ecore_dcbx.c
+++ b/drivers/net/qede/base/ecore_dcbx.c
@@ -348,14 +348,16 @@ ecore_dcbx_copy_mib(struct ecore_hwfn *p_hwfn,
 		read_count++;
 
 		DP_VERBOSE(p_hwfn, ECORE_MSG_DCB,
-			   "mib type = %d, try count = %d prefix seq num  = %d suffix seq num = %d\n",
+			   "mib type = %d, try count = %d prefix seq num  ="
+			   " %d suffix seq num = %d\n",
 			   type, read_count, prefix_seq_num, suffix_seq_num);
 	} while ((prefix_seq_num != suffix_seq_num) &&
 		 (read_count < ECORE_DCBX_MAX_MIB_READ_TRY));
 
 	if (read_count >= ECORE_DCBX_MAX_MIB_READ_TRY) {
 		DP_ERR(p_hwfn,
-		       "MIB read err, mib type = %d, try count = %d prefix seq num = %d suffix seq num = %d\n",
+		       "MIB read err, mib type = %d, try count ="
+		       " %d prefix seq num = %d suffix seq num = %d\n",
 		       type, read_count, prefix_seq_num, suffix_seq_num);
 		rc = ECORE_IO;
 	}
diff --git a/drivers/net/qede/base/ecore_dev.c b/drivers/net/qede/base/ecore_dev.c
index 9e32279..fd38215 100644
--- a/drivers/net/qede/base/ecore_dev.c
+++ b/drivers/net/qede/base/ecore_dev.c
@@ -323,8 +323,8 @@ enum _ecore_status_t ecore_qm_reconf(struct ecore_hwfn *p_hwfn,
 				     struct ecore_ptt *p_ptt)
 {
 	struct ecore_qm_info *qm_info = &p_hwfn->qm_info;
-	enum _ecore_status_t rc;
 	bool b_rc;
+	enum _ecore_status_t rc;
 
 	/* qm_info is allocated in ecore_init_qm_info() which is already called
 	 * from ecore_resc_alloc() or previous call of ecore_qm_reconf().
@@ -467,12 +467,20 @@ enum _ecore_status_t ecore_resc_alloc(struct ecore_dev *p_dev)
 			goto alloc_no_mem;
 		p_hwfn->p_consq = p_consq;
 
+#ifdef CONFIG_ECORE_LL2
+		if (p_hwfn->using_ll2) {
+			p_ll2_info = ecore_ll2_alloc(p_hwfn);
+			if (!p_ll2_info)
+				goto alloc_no_mem;
+			p_hwfn->p_ll2_info = p_ll2_info;
+		}
+#endif
+
 		/* DMA info initialization */
 		rc = ecore_dmae_info_alloc(p_hwfn);
 		if (rc) {
 			DP_NOTICE(p_hwfn, true,
-				  "Failed to allocate memory for"
-				  " dmae_info structure\n");
+				  "Failed to allocate memory for dmae_info structure\n");
 			goto alloc_err;
 		}
 
@@ -480,7 +488,7 @@ enum _ecore_status_t ecore_resc_alloc(struct ecore_dev *p_dev)
 		rc = ecore_dcbx_info_alloc(p_hwfn);
 		if (rc) {
 			DP_NOTICE(p_hwfn, true,
-				  "Failed to allocate memory for dcbxstruct\n");
+				  "Failed to allocate memory for dcbx structure\n");
 			goto alloc_err;
 		}
 	}
@@ -558,9 +566,11 @@ enum _ecore_status_t ecore_final_cleanup(struct ecore_hwfn *p_hwfn,
 	command |= SDM_COMP_TYPE_AGG_INT << SDM_OP_GEN_COMP_TYPE_SHIFT;
 
 /* Make sure notification is not set before initiating final cleanup */
+
 	if (REG_RD(p_hwfn, addr)) {
 		DP_NOTICE(p_hwfn, false,
-			  "Unexpected; Found final cleanup notification "
+			  "Unexpected; Found final cleanup notification");
+		DP_NOTICE(p_hwfn, false,
 			  " before initiating final cleanup\n");
 		REG_WR(p_hwfn, addr, 0);
 	}
@@ -742,11 +752,11 @@ static enum _ecore_status_t ecore_hw_init_common(struct ecore_hwfn *p_hwfn,
 						 int hw_mode)
 {
 	struct ecore_qm_info *qm_info = &p_hwfn->qm_info;
-	enum _ecore_status_t rc = ECORE_SUCCESS;
 	struct ecore_dev *p_dev = p_hwfn->p_dev;
 	u8 vf_id, max_num_vfs;
 	u16 num_pfs, pf_id;
 	u32 concrete_fid;
+	enum _ecore_status_t rc = ECORE_SUCCESS;
 
 	ecore_init_cau_rt_data(p_dev);
 
@@ -906,11 +916,15 @@ static void ecore_emul_link_init(struct ecore_hwfn *p_hwfn,
 		return;
 	}
 
+	/* XLPORT MAC MODE *//* 0 Quad, 4 Single... */
 	ecore_wr_nw_port(p_hwfn, p_ptt, XLPORT_MODE_REG, (0x4 << 4) | 0x4, 1,
 			 port);
 	ecore_wr_nw_port(p_hwfn, p_ptt, XLPORT_MAC_CONTROL, 0, 1, port);
+	/* XLMAC: SOFT RESET */
 	ecore_wr_nw_port(p_hwfn, p_ptt, XLMAC_CTRL, 0x40, 0, port);
+	/* XLMAC: Port Speed >= 10Gbps */
 	ecore_wr_nw_port(p_hwfn, p_ptt, XLMAC_MODE, 0x40, 0, port);
+	/* XLMAC: Max Size */
 	ecore_wr_nw_port(p_hwfn, p_ptt, XLMAC_RX_MAX_SIZE, 0x3fff, 0, port);
 	ecore_wr_nw_port(p_hwfn, p_ptt, XLMAC_TX_CTRL,
 			 0x01000000800ULL | (0xa << 12) | ((u64)1 << 38),
@@ -1103,13 +1117,12 @@ ecore_hw_init_pf(struct ecore_hwfn *p_hwfn,
 		 bool b_hw_start,
 		 enum ecore_int_mode int_mode, bool allow_npar_tx_switch)
 {
-	enum _ecore_status_t rc = ECORE_SUCCESS;
 	u8 rel_pf_id = p_hwfn->rel_pf_id;
 	u32 prs_reg;
+	enum _ecore_status_t rc = ECORE_SUCCESS;
 	u16 ctrl;
 	int pos;
 
-	/* ILT/DQ/CM/QM */
 	if (p_hwfn->mcp_info) {
 		struct ecore_mcp_function_info *p_info;
 
@@ -1344,6 +1357,7 @@ enum _ecore_status_t ecore_hw_init(struct ecore_dev *p_dev,
 			if (rc)
 				break;
 
+#ifndef REAL_ASIC_ONLY
 			if (ENABLE_EAGLE_ENG1_WORKAROUND(p_hwfn)) {
 				struct init_nig_pri_tc_map_req tc_map;
 
@@ -1360,7 +1374,8 @@ enum _ecore_status_t ecore_hw_init(struct ecore_dev *p_dev,
 							  p_hwfn->p_main_ptt,
 							  &tc_map);
 			}
-			/* fallthrough */
+#endif
+			/* Fall into */
 		case FW_MSG_CODE_DRV_LOAD_FUNCTION:
 			rc = ecore_hw_init_pf(p_hwfn, p_hwfn->p_main_ptt,
 					      p_tunn, p_hwfn->hw_info.hw_mode,
@@ -1374,7 +1389,7 @@ enum _ecore_status_t ecore_hw_init(struct ecore_dev *p_dev,
 
 		if (rc != ECORE_SUCCESS)
 			DP_NOTICE(p_hwfn, true,
-				  "init phase failed loadcode 0x%x (rc %d)\n",
+				  "init phase failed for loadcode 0x%x (rc %d)\n",
 				  load_code, rc);
 
 		/* ACK mfw regardless of success or failure of initialization */
@@ -1391,8 +1406,7 @@ enum _ecore_status_t ecore_hw_init(struct ecore_dev *p_dev,
 
 		/* send DCBX attention request command */
 		DP_VERBOSE(p_hwfn, ECORE_MSG_DCB,
-			   "sending phony dcbx set command to trigger DCBx"
-			   " attention handling\n");
+			   "sending phony dcbx set command to trigger DCBx attention handling\n");
 		mfw_rc = ecore_mcp_cmd(p_hwfn, p_hwfn->p_main_ptt,
 				       DRV_MSG_CODE_SET_DCBX,
 				       1 << DRV_MB_PARAM_DCBX_NOTIFY_SHIFT,
@@ -1419,8 +1433,8 @@ static OSAL_INLINE void ecore_hw_timers_stop(struct ecore_dev *p_dev,
 	/* close timers */
 	ecore_wr(p_hwfn, p_ptt, TM_REG_PF_ENABLE_CONN, 0x0);
 	ecore_wr(p_hwfn, p_ptt, TM_REG_PF_ENABLE_TASK, 0x0);
-	for (i = 0; i < ECORE_HW_STOP_RETRY_LIMIT &&
-					!p_dev->recov_in_prog; i++) {
+	for (i = 0; i < ECORE_HW_STOP_RETRY_LIMIT && !p_dev->recov_in_prog;
+									i++) {
 		if ((!ecore_rd(p_hwfn, p_ptt,
 			       TM_REG_PF_SCAN_ACTIVE_CONN)) &&
 		    (!ecore_rd(p_hwfn, p_ptt, TM_REG_PF_SCAN_ACTIVE_TASK)))
@@ -1433,8 +1447,7 @@ static OSAL_INLINE void ecore_hw_timers_stop(struct ecore_dev *p_dev,
 	}
 	if (i == ECORE_HW_STOP_RETRY_LIMIT)
 		DP_NOTICE(p_hwfn, true,
-			  "Timers linear scans are not over"
-			  " [Connection %02x Tasks %02x]\n",
+			  "Timers linear scans are not over [Connection %02x Tasks %02x]\n",
 			  (u8)ecore_rd(p_hwfn, p_ptt,
 					TM_REG_PF_SCAN_ACTIVE_CONN),
 			  (u8)ecore_rd(p_hwfn, p_ptt,
@@ -1475,9 +1488,7 @@ enum _ecore_status_t ecore_hw_stop(struct ecore_dev *p_dev)
 		rc = ecore_sp_pf_stop(p_hwfn);
 		if (rc)
 			DP_NOTICE(p_hwfn, true,
-				  "Failed to close PF against FW. Continue to"
-				  " stop HW to prevent illegal host access"
-				  " by the device\n");
+				  "Failed to close PF against FW. Continue to stop HW to prevent illegal host access by the device\n");
 
 		/* perform debug action after PF stop was sent */
 		OSAL_AFTER_PF_STOP((void *)p_hwfn->p_dev, p_hwfn->my_id);
@@ -1938,8 +1949,7 @@ static enum _ecore_status_t ecore_hw_get_nvm_info(struct ecore_hwfn *p_hwfn,
 	link->loopback_mode = 0;
 
 	DP_VERBOSE(p_hwfn, ECORE_MSG_LINK,
-		   "Read default link: Speed 0x%08x, Adv. Speed 0x%08x,"
-		   " AN: 0x%02x, PAUSE AN: 0x%02x\n",
+		   "Read default link: Speed 0x%08x, Adv. Speed 0x%08x, AN: 0x%02x, PAUSE AN: 0x%02x\n",
 		   link->speed.forced_speed, link->speed.advertised_speeds,
 		   link->speed.autoneg, link->pause.autoneg);
 
@@ -2217,8 +2227,7 @@ static enum _ecore_status_t ecore_get_dev_info(struct ecore_dev *p_dev)
 					   MISCS_REG_CHIP_METAL);
 	MASK_FIELD(CHIP_METAL, p_dev->chip_metal);
 	DP_INFO(p_dev->hwfns,
-		"Chip details - %s%d, Num: %04x Rev: %04x Bond id: %04x"
-		" Metal: %04x\n",
+		"Chip details - %s%d, Num: %04x Rev: %04x Bond id: %04x Metal: %04x\n",
 		ECORE_IS_BB(p_dev) ? "BB" : "AH",
 		CHIP_REV_IS_A0(p_dev) ? 0 : 1,
 		p_dev->chip_num, p_dev->chip_rev, p_dev->chip_bond_id,
@@ -2527,8 +2536,7 @@ ecore_chain_alloc_sanity_check(struct ecore_dev *p_dev,
 	    (cnt_type == ECORE_CHAIN_CNT_TYPE_U32 &&
 	     chain_size > ECORE_U32_MAX)) {
 		DP_NOTICE(p_dev, true,
-			  "The actual chain size (0x%lx) is larger than"
-			  " the maximal possible value\n",
+			  "The actual chain size (0x%lx) is larger than the maximal possible value\n",
 			  (unsigned long)chain_size);
 		return ECORE_INVAL;
 	}
@@ -2706,8 +2714,7 @@ enum _ecore_status_t ecore_fw_l2_queue(struct ecore_hwfn *p_hwfn,
 		min = (u16)RESC_START(p_hwfn, ECORE_L2_QUEUE);
 		max = min + RESC_NUM(p_hwfn, ECORE_L2_QUEUE);
 		DP_NOTICE(p_hwfn, true,
-			  "l2_queue id [%d] is not valid, available"
-			  " indices [%d - %d]\n",
+			  "l2_queue id [%d] is not valid, available indices [%d - %d]\n",
 			  src_id, min, max);
 
 		return ECORE_INVAL;
@@ -2727,8 +2734,7 @@ enum _ecore_status_t ecore_fw_vport(struct ecore_hwfn *p_hwfn,
 		min = (u8)RESC_START(p_hwfn, ECORE_VPORT);
 		max = min + RESC_NUM(p_hwfn, ECORE_VPORT);
 		DP_NOTICE(p_hwfn, true,
-			  "vport id [%d] is not valid, available"
-			  " indices [%d - %d]\n",
+			  "vport id [%d] is not valid, available indices [%d - %d]\n",
 			  src_id, min, max);
 
 		return ECORE_INVAL;
@@ -2748,7 +2754,7 @@ enum _ecore_status_t ecore_fw_rss_eng(struct ecore_hwfn *p_hwfn,
 		min = (u8)RESC_START(p_hwfn, ECORE_RSS_ENG);
 		max = min + RESC_NUM(p_hwfn, ECORE_RSS_ENG);
 		DP_NOTICE(p_hwfn, true,
-			  "rss_eng id [%d] is not valid,avail idx [%d - %d]\n",
+			  "rss_eng id [%d] is not valid, available indices [%d - %d]\n",
 			  src_id, min, max);
 
 		return ECORE_INVAL;
@@ -3333,7 +3339,7 @@ int ecore_configure_vport_wfq(struct ecore_dev *p_dev, u16 vp_id, u32 rate)
 	/* TBD - for multiple hardware functions - that is 100 gig */
 	if (p_dev->num_hwfns > 1) {
 		DP_NOTICE(p_dev, false,
-			  "WFQ configuration is not supported for this dev\n");
+			  "WFQ configuration is not supported for this device\n");
 		return rc;
 	}
 
@@ -3367,7 +3373,7 @@ void ecore_configure_vp_wfq_on_link_change(struct ecore_dev *p_dev,
 	/* TBD - for multiple hardware functions - that is 100 gig */
 	if (p_dev->num_hwfns > 1) {
 		DP_VERBOSE(p_dev, ECORE_MSG_LINK,
-			   "WFQ configuration is not supported for this dev\n");
+			   "WFQ configuration is not supported for this device\n");
 		return;
 	}
 
diff --git a/drivers/net/qede/base/ecore_dev_api.h b/drivers/net/qede/base/ecore_dev_api.h
index 1b78c32..77f4869 100644
--- a/drivers/net/qede/base/ecore_dev_api.h
+++ b/drivers/net/qede/base/ecore_dev_api.h
@@ -24,7 +24,9 @@ struct ecore_tunn_start_params;
  * @param dp_ctx
  */
 void ecore_init_dp(struct ecore_dev *p_dev,
-		   u32 dp_module, u8 dp_level, void *dp_ctx);
+		   u32 dp_module,
+		   u8 dp_level,
+		   void *dp_ctx);
 
 /**
  * @brief ecore_init_struct - initialize the device structure to
@@ -172,7 +174,8 @@ struct ecore_ptt *ecore_ptt_acquire(struct ecore_hwfn *p_hwfn);
  * @param p_hwfn
  * @param p_ptt
  */
-void ecore_ptt_release(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt);
+void ecore_ptt_release(struct ecore_hwfn *p_hwfn,
+		       struct ecore_ptt *p_ptt);
 
 #ifndef __EXTRACT__LINUX__
 struct ecore_eth_stats {
@@ -290,7 +293,9 @@ enum _ecore_status_t
 ecore_dmae_host2grc(struct ecore_hwfn *p_hwfn,
 		    struct ecore_ptt *p_ptt,
 		    u64 source_addr,
-		    u32 grc_addr, u32 size_in_dwords, u32 flags);
+		    u32 grc_addr,
+		    u32 size_in_dwords,
+		    u32 flags);
 
 /**
  * @brief ecore_dmae_grc2host - Read data from dmae data offset
@@ -306,7 +311,9 @@ enum _ecore_status_t
 ecore_dmae_grc2host(struct ecore_hwfn *p_hwfn,
 		    struct ecore_ptt *p_ptt,
 		    u32 grc_addr,
-		    dma_addr_t dest_addr, u32 size_in_dwords, u32 flags);
+		    dma_addr_t dest_addr,
+		    u32 size_in_dwords,
+		    u32 flags);
 
 /**
  * @brief ecore_dmae_host2host - copy data from to source address
@@ -324,7 +331,8 @@ ecore_dmae_host2host(struct ecore_hwfn *p_hwfn,
 		     struct ecore_ptt *p_ptt,
 		     dma_addr_t source_addr,
 		     dma_addr_t dest_addr,
-		     u32 size_in_dwords, struct ecore_dmae_params *p_params);
+		     u32 size_in_dwords,
+		     struct ecore_dmae_params *p_params);
 
 /**
  * @brief ecore_chain_alloc - Allocate and initialize a chain
@@ -344,7 +352,8 @@ ecore_chain_alloc(struct ecore_dev *p_dev,
 		  enum ecore_chain_mode mode,
 		  enum ecore_chain_cnt_type cnt_type,
 		  u32 num_elems,
-		  osal_size_t elem_size, struct ecore_chain *p_chain);
+		  osal_size_t elem_size,
+		  struct ecore_chain *p_chain);
 
 /**
  * @brief ecore_chain_free - Free chain DMA memory
@@ -352,7 +361,8 @@ ecore_chain_alloc(struct ecore_dev *p_dev,
  * @param p_hwfn
  * @param p_chain
  */
-void ecore_chain_free(struct ecore_dev *p_dev, struct ecore_chain *p_chain);
+void ecore_chain_free(struct ecore_dev *p_dev,
+		      struct ecore_chain *p_chain);
 
 /**
  * @@brief ecore_fw_l2_queue - Get absolute L2 queue ID
@@ -364,7 +374,8 @@ void ecore_chain_free(struct ecore_dev *p_dev, struct ecore_chain *p_chain);
  *  @return enum _ecore_status_t
  */
 enum _ecore_status_t ecore_fw_l2_queue(struct ecore_hwfn *p_hwfn,
-				       u16 src_id, u16 *dst_id);
+				       u16 src_id,
+				       u16 *dst_id);
 
 /**
  * @@brief ecore_fw_vport - Get absolute vport ID
@@ -376,7 +387,8 @@ enum _ecore_status_t ecore_fw_l2_queue(struct ecore_hwfn *p_hwfn,
  *  @return enum _ecore_status_t
  */
 enum _ecore_status_t ecore_fw_vport(struct ecore_hwfn *p_hwfn,
-				    u8 src_id, u8 *dst_id);
+				    u8 src_id,
+				    u8 *dst_id);
 
 /**
  * @@brief ecore_fw_rss_eng - Get absolute RSS engine ID
@@ -388,7 +400,8 @@ enum _ecore_status_t ecore_fw_vport(struct ecore_hwfn *p_hwfn,
  *  @return enum _ecore_status_t
  */
 enum _ecore_status_t ecore_fw_rss_eng(struct ecore_hwfn *p_hwfn,
-				      u8 src_id, u8 *dst_id);
+				      u8 src_id,
+				      u8 *dst_id);
 
 /**
  * @brief ecore_llh_add_mac_filter - configures a MAC filter in llh
diff --git a/drivers/net/qede/base/ecore_hsi_eth.h b/drivers/net/qede/base/ecore_hsi_eth.h
index 78cc55d..dd94d31 100644
--- a/drivers/net/qede/base/ecore_hsi_eth.h
+++ b/drivers/net/qede/base/ecore_hsi_eth.h
@@ -872,7 +872,7 @@ struct eth_vport_tpa_param {
 	u8 tpa_ipv6_tunn_en_flg /* Enable TPA for IPv6 over tunnel */;
 	u8 tpa_pkt_split_flg;
 	u8 tpa_hdr_data_split_flg
-/* If set, put header of first TPA segment on bd and data on SGE */
+	    /* If set, put header of first TPA segment on bd and data on SGE */
 	   ;
 	u8 tpa_gro_consistent_flg
 	    /* If set, GRO data consistent will checked for TPA continue */;
@@ -882,10 +882,10 @@ struct eth_vport_tpa_param {
 	__le16 tpa_min_size_to_start
 	    /* minimum TCP payload size for a packet to start aggregation */;
 	__le16 tpa_min_size_to_cont
-/* minimum TCP payload size for a packet to continue aggregation */
+	    /* minimum TCP payload size for a packet to continue aggregation */
 	   ;
 	u8 max_buff_num
-/* maximal number of buffers that can be used for one aggregation */
+	    /* maximal number of buffers that can be used for one aggregation */
 	   ;
 	u8 reserved;
 };
@@ -998,7 +998,7 @@ struct rx_queue_start_ramrod_data {
 };
 
 /*
- * Ramrod data for rx queue start ramrod
+ * Ramrod data for rx queue stop ramrod
  */
 struct rx_queue_stop_ramrod_data {
 	__le16 rx_queue_id /* ID of RX queue */;
diff --git a/drivers/net/qede/base/ecore_hw.c b/drivers/net/qede/base/ecore_hw.c
index 72bc6de..04ec1ea 100644
--- a/drivers/net/qede/base/ecore_hw.c
+++ b/drivers/net/qede/base/ecore_hw.c
@@ -580,8 +580,8 @@ void ecore_dmae_info_free(struct ecore_hwfn *p_hwfn)
 
 static enum _ecore_status_t ecore_dmae_operation_wait(struct ecore_hwfn *p_hwfn)
 {
-	enum _ecore_status_t ecore_status = ECORE_SUCCESS;
 	u32 wait_cnt_limit = 10000, wait_cnt = 0;
+	enum _ecore_status_t ecore_status = ECORE_SUCCESS;
 
 #ifndef ASIC_ONLY
 	u32 factor = (CHIP_REV_IS_EMUL(p_hwfn->p_dev) ?
diff --git a/drivers/net/qede/base/ecore_hw.h b/drivers/net/qede/base/ecore_hw.h
index 9603c99..154eb3c 100644
--- a/drivers/net/qede/base/ecore_hw.h
+++ b/drivers/net/qede/base/ecore_hw.h
@@ -105,7 +105,8 @@ void ecore_ptt_pool_free(struct ecore_hwfn *p_hwfn);
  *
  * @return u32
  */
-u32 ecore_ptt_get_hw_addr(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt);
+u32 ecore_ptt_get_hw_addr(struct ecore_hwfn	*p_hwfn,
+			  struct ecore_ptt	*p_ptt);
 
 /**
  * @brief ecore_ptt_get_bar_addr - Get PPT's external BAR address
@@ -125,7 +126,8 @@ u32 ecore_ptt_get_bar_addr(struct ecore_ptt	*p_ptt);
  * @param p_ptt
  */
 void ecore_ptt_set_win(struct ecore_hwfn	*p_hwfn,
-		       struct ecore_ptt *p_ptt, u32 new_hw_addr);
+		       struct ecore_ptt		*p_ptt,
+		       u32			new_hw_addr);
 
 /**
  * @brief ecore_get_reserved_ptt - Get a specific reserved PTT
@@ -147,7 +149,9 @@ struct ecore_ptt *ecore_get_reserved_ptt(struct ecore_hwfn	*p_hwfn,
  * @param hw_addr
  */
 void ecore_wr(struct ecore_hwfn	*p_hwfn,
-	      struct ecore_ptt *p_ptt, u32 hw_addr, u32 val);
+	      struct ecore_ptt	*p_ptt,
+	      u32		hw_addr,
+	      u32		val);
 
 /**
  * @brief ecore_rd - Read value from BAR using the given ptt
@@ -157,7 +161,9 @@ void ecore_wr(struct ecore_hwfn	*p_hwfn,
  * @param val
  * @param hw_addr
  */
-u32 ecore_rd(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt, u32 hw_addr);
+u32 ecore_rd(struct ecore_hwfn	*p_hwfn,
+	     struct ecore_ptt	*p_ptt,
+	     u32		hw_addr);
 
 /**
  * @brief ecore_memcpy_from - copy n bytes from BAR using the given
@@ -171,7 +177,9 @@ u32 ecore_rd(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt, u32 hw_addr);
  */
 void ecore_memcpy_from(struct ecore_hwfn	*p_hwfn,
 		       struct ecore_ptt		*p_ptt,
-		       void *dest, u32 hw_addr, osal_size_t n);
+		       void			*dest,
+		       u32			hw_addr,
+		       osal_size_t		n);
 
 /**
  * @brief ecore_memcpy_to - copy n bytes to BAR using the given
@@ -185,7 +193,9 @@ void ecore_memcpy_from(struct ecore_hwfn	*p_hwfn,
  */
 void ecore_memcpy_to(struct ecore_hwfn	*p_hwfn,
 		     struct ecore_ptt	*p_ptt,
-		     u32 hw_addr, void *src, osal_size_t n);
+		     u32		hw_addr,
+		     void		*src,
+		     osal_size_t	n);
 /**
  * @brief ecore_fid_pretend - pretend to another function when
  *        accessing the ptt window. There is no way to unpretend
@@ -198,7 +208,8 @@ void ecore_memcpy_to(struct ecore_hwfn	*p_hwfn,
  *            either pf / vf, port/path fields are don't care.
  */
 void ecore_fid_pretend(struct ecore_hwfn	*p_hwfn,
-		       struct ecore_ptt *p_ptt, u16 fid);
+		       struct ecore_ptt		*p_ptt,
+		       u16			fid);
 
 /**
  * @brief ecore_port_pretend - pretend to another port when
@@ -209,7 +220,8 @@ void ecore_fid_pretend(struct ecore_hwfn	*p_hwfn,
  * @param port_id - the port to pretend to
  */
 void ecore_port_pretend(struct ecore_hwfn	*p_hwfn,
-			struct ecore_ptt *p_ptt, u8 port_id);
+			struct ecore_ptt	*p_ptt,
+			u8			port_id);
 
 /**
  * @brief ecore_port_unpretend - cancel any previously set port
@@ -218,7 +230,8 @@ void ecore_port_pretend(struct ecore_hwfn	*p_hwfn,
  * @param p_hwfn
  * @param p_ptt
  */
-void ecore_port_unpretend(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt);
+void ecore_port_unpretend(struct ecore_hwfn	*p_hwfn,
+			  struct ecore_ptt	*p_ptt);
 
 /**
  * @brief ecore_vfid_to_concrete - build a concrete FID for a
diff --git a/drivers/net/qede/base/ecore_hw_defs.h b/drivers/net/qede/base/ecore_hw_defs.h
index 19816ff..deb8e34 100644
--- a/drivers/net/qede/base/ecore_hw_defs.h
+++ b/drivers/net/qede/base/ecore_hw_defs.h
@@ -10,19 +10,19 @@
 #define _ECORE_IGU_DEF_H_
 
 /* Fields of IGU PF CONFIGRATION REGISTER */
-#define IGU_PF_CONF_FUNC_EN       (0x1 << 0)	/* function enable        */
-#define IGU_PF_CONF_MSI_MSIX_EN   (0x1 << 1)	/* MSI/MSIX enable        */
-#define IGU_PF_CONF_INT_LINE_EN   (0x1 << 2)	/* INT enable             */
-#define IGU_PF_CONF_ATTN_BIT_EN   (0x1 << 3)	/* attention enable       */
-#define IGU_PF_CONF_SINGLE_ISR_EN (0x1 << 4)	/* single ISR mode enable */
-#define IGU_PF_CONF_SIMD_MODE     (0x1 << 5)	/* simd all ones mode     */
+#define IGU_PF_CONF_FUNC_EN       (0x1 << 0)   /* function enable        */
+#define IGU_PF_CONF_MSI_MSIX_EN   (0x1 << 1)   /* MSI/MSIX enable        */
+#define IGU_PF_CONF_INT_LINE_EN   (0x1 << 2)   /* INT enable             */
+#define IGU_PF_CONF_ATTN_BIT_EN   (0x1 << 3)   /* attention enable       */
+#define IGU_PF_CONF_SINGLE_ISR_EN (0x1 << 4)   /* single ISR mode enable */
+#define IGU_PF_CONF_SIMD_MODE     (0x1 << 5)   /* simd all ones mode     */
 
 /* Fields of IGU VF CONFIGRATION REGISTER */
-#define IGU_VF_CONF_FUNC_EN        (0x1 << 0)	/* function enable        */
-#define IGU_VF_CONF_MSI_MSIX_EN    (0x1 << 1)	/* MSI/MSIX enable        */
-#define IGU_VF_CONF_SINGLE_ISR_EN  (0x1 << 4)	/* single ISR mode enable */
-#define IGU_VF_CONF_PARENT_MASK    (0xF)	/* Parent PF              */
-#define IGU_VF_CONF_PARENT_SHIFT   5	/* Parent PF              */
+#define IGU_VF_CONF_FUNC_EN        (0x1 << 0)  /* function enable        */
+#define IGU_VF_CONF_MSI_MSIX_EN    (0x1 << 1)  /* MSI/MSIX enable        */
+#define IGU_VF_CONF_SINGLE_ISR_EN  (0x1 << 4)  /* single ISR mode enable */
+#define IGU_VF_CONF_PARENT_MASK    (0xF)       /* Parent PF              */
+#define IGU_VF_CONF_PARENT_SHIFT   5   /* Parent PF              */
 
 /* Igu control commands
  */
diff --git a/drivers/net/qede/base/ecore_init_fw_funcs.c b/drivers/net/qede/base/ecore_init_fw_funcs.c
index 0844194..bffc73c 100644
--- a/drivers/net/qede/base/ecore_init_fw_funcs.c
+++ b/drivers/net/qede/base/ecore_init_fw_funcs.c
@@ -1099,6 +1099,8 @@ void ecore_init_brb_ram(struct ecore_hwfn *p_hwfn,
 			ecore_wr(p_hwfn, p_ptt,
 				 BRB_REG_MAIN_TC_GUARANTIED_HYST_0 + reg_offset,
 				 BRB_HYST_BLOCKS);
+/* init pause/full thresholds per physical TC - for loopback traffic */
+
 			ecore_wr(p_hwfn, p_ptt,
 				 BRB_REG_LB_TC_FULL_XOFF_THRESHOLD_0 +
 				 reg_offset, full_xoff_th);
@@ -1111,6 +1113,7 @@ void ecore_init_brb_ram(struct ecore_hwfn *p_hwfn,
 			ecore_wr(p_hwfn, p_ptt,
 				 BRB_REG_LB_TC_PAUSE_XON_THRESHOLD_0 +
 				 reg_offset, pause_xon_th);
+/* init pause/full thresholds per physical TC - for main traffic */
 			ecore_wr(p_hwfn, p_ptt,
 				 BRB_REG_MAIN_TC_FULL_XOFF_THRESHOLD_0 +
 				 reg_offset, full_xoff_th);
diff --git a/drivers/net/qede/base/ecore_init_fw_funcs.h b/drivers/net/qede/base/ecore_init_fw_funcs.h
index 0c8d1fb..f5df764 100644
--- a/drivers/net/qede/base/ecore_init_fw_funcs.h
+++ b/drivers/net/qede/base/ecore_init_fw_funcs.h
@@ -40,7 +40,7 @@ u32 ecore_qm_pf_mem_size(u8 pf_id,
  * @param pf_wfq_en				- enable per-PF WFQ
  * @param vport_rl_en			- enable per-VPORT rate limiters
  * @param vport_wfq_en			- enable per-VPORT WFQ
- * @param port_params- array of size MAX_NUM_PORTS with parameters for each port
+ * @param port_params - array of size MAX_NUM_PORTS with params for each port
  *
  * @return 0 on success, -1 on error.
  */
@@ -83,7 +83,9 @@ int ecore_qm_pf_rt_init(struct ecore_hwfn *p_hwfn,
  * @return 0 on success, -1 on error.
  */
 int ecore_init_pf_wfq(struct ecore_hwfn *p_hwfn,
-		      struct ecore_ptt *p_ptt, u8 pf_id, u16 pf_wfq);
+					  struct ecore_ptt *p_ptt,
+					  u8 pf_id,
+					  u16 pf_wfq);
 /**
  * @brief ecore_init_pf_rl  Initializes the rate limit of the specified PF
  *
@@ -95,9 +97,11 @@ int ecore_init_pf_wfq(struct ecore_hwfn *p_hwfn,
  * @return 0 on success, -1 on error.
  */
 int ecore_init_pf_rl(struct ecore_hwfn *p_hwfn,
-		     struct ecore_ptt *p_ptt, u8 pf_id, u32 pf_rl);
+					 struct ecore_ptt *p_ptt,
+					 u8 pf_id,
+					 u32 pf_rl);
 /**
- * @brief ecore_init_vport_wfq Initializes the WFQ weight of the specified VPORT
+ * @brief ecore_init_vport_wfq  Initializes the WFQ weight of specified VPORT
  *
  * @param p_hwfn
  * @param p_ptt			- ptt window used for writing the registers
@@ -110,7 +114,8 @@ int ecore_init_pf_rl(struct ecore_hwfn *p_hwfn,
  */
 int ecore_init_vport_wfq(struct ecore_hwfn *p_hwfn,
 						 struct ecore_ptt *p_ptt,
-			 u16 first_tx_pq_id[NUM_OF_TCS], u16 vport_wfq);
+						 u16 first_tx_pq_id[NUM_OF_TCS],
+						 u16 vport_wfq);
 /**
  * @brief ecore_init_vport_rl  Initializes the rate limit of the specified VPORT
  *
@@ -122,7 +127,9 @@ int ecore_init_vport_wfq(struct ecore_hwfn *p_hwfn,
  * @return 0 on success, -1 on error.
  */
 int ecore_init_vport_rl(struct ecore_hwfn *p_hwfn,
-			struct ecore_ptt *p_ptt, u8 vport_id, u32 vport_rl);
+						struct ecore_ptt *p_ptt,
+						u8 vport_id,
+						u32 vport_rl);
 /**
  * @brief ecore_send_qm_stop_cmd  Sends a stop command to the QM
  *
@@ -133,13 +140,16 @@ int ecore_init_vport_rl(struct ecore_hwfn *p_hwfn,
  * @param start_pq       - first PQ ID to stop
  * @param num_pqs        - Number of PQs to stop, starting from start_pq.
  *
- * @return bool, true if successful, false if timeout occurred while
- * waiting for QM command done.
+ * @return bool, true if successful, false if timeout occurred while waiting
+ *  for QM command done.
  */
 bool ecore_send_qm_stop_cmd(struct ecore_hwfn *p_hwfn,
 							struct ecore_ptt *p_ptt,
 							bool is_release_cmd,
-			    bool is_tx_pq, u16 start_pq, u16 num_pqs);
+							bool is_tx_pq,
+							u16 start_pq,
+							u16 num_pqs);
+#ifndef UNUSED_HSI_FUNC
 /**
  * @brief ecore_init_nig_ets - initializes the NIG ETS arbiter
  *
@@ -153,7 +163,8 @@ bool ecore_send_qm_stop_cmd(struct ecore_hwfn *p_hwfn,
  */
 void ecore_init_nig_ets(struct ecore_hwfn *p_hwfn,
 						struct ecore_ptt *p_ptt,
-			struct init_ets_req *req, bool is_lb);
+						struct init_ets_req *req,
+						bool is_lb);
 /**
  * @brief ecore_init_nig_lb_rl - initializes the NIG LB RLs
  *
@@ -165,6 +176,7 @@ void ecore_init_nig_ets(struct ecore_hwfn *p_hwfn,
 void ecore_init_nig_lb_rl(struct ecore_hwfn *p_hwfn,
 				  struct ecore_ptt *p_ptt,
 				  struct init_nig_lb_rl_req *req);
+#endif /* UNUSED_HSI_FUNC */
 /**
  * @brief ecore_init_nig_pri_tc_map - initializes the NIG priority to TC map.
  *
@@ -176,6 +188,7 @@ void ecore_init_nig_lb_rl(struct ecore_hwfn *p_hwfn,
 void ecore_init_nig_pri_tc_map(struct ecore_hwfn *p_hwfn,
 					   struct ecore_ptt *p_ptt,
 					   struct init_nig_pri_tc_map_req *req);
+#ifndef UNUSED_HSI_FUNC
 /**
  * @brief ecore_init_prs_ets - initializes the PRS Rx ETS arbiter
  *
@@ -185,7 +198,10 @@ void ecore_init_nig_pri_tc_map(struct ecore_hwfn *p_hwfn,
  * @param req	- the PRS ETS initialization requirements.
  */
 void ecore_init_prs_ets(struct ecore_hwfn *p_hwfn,
-			struct ecore_ptt *p_ptt, struct init_ets_req *req);
+						struct ecore_ptt *p_ptt,
+						struct init_ets_req *req);
+#endif /* UNUSED_HSI_FUNC */
+#ifndef UNUSED_HSI_FUNC
 /**
  * @brief ecore_init_brb_ram - initializes BRB RAM sizes per TC
  *
@@ -195,35 +211,43 @@ void ecore_init_prs_ets(struct ecore_hwfn *p_hwfn,
  * @param req	- the BRB RAM initialization requirements.
  */
 void ecore_init_brb_ram(struct ecore_hwfn *p_hwfn,
-			struct ecore_ptt *p_ptt, struct init_brb_ram_req *req);
+						struct ecore_ptt *p_ptt,
+						struct init_brb_ram_req *req);
+#endif /* UNUSED_HSI_FUNC */
+#ifndef UNUSED_HSI_FUNC
 /**
- * @brief ecore_set_engine_mf_ovlan_eth_type - initializes Nig,Prs,Pbf
- * and llh ethType Regs to  input ethType
- * should Be called once per engine if engine is in BD mode.
+ * @brief ecore_set_engine_mf_ovlan_eth_type - initializes Nig,Prs,Pbf and llh
+ *                                             ethType Regs to  input ethType
+ *                                             should Be called once per engine
+ *                                             if engine
+ *  is in BD mode.
  *
  * @param p_ptt    - ptt window used for writing the registers.
  * @param ethType - etherType to configure
  */
 void ecore_set_engine_mf_ovlan_eth_type(struct ecore_hwfn *p_hwfn,
-					struct ecore_ptt *p_ptt, u32 eth_type);
+			struct ecore_ptt *p_ptt, u32 ethType);
 /**
- * @brief ecore_set_port_mf_ovlan_eth_type - initializes DORQ ethType Regs
- * to input ethType
- * should Be called once per port.
+ * @brief ecore_set_port_mf_ovlan_eth_type - initializes DORQ ethType Regs to
+ *                                           input ethType should Be called
+ *                                           once per port.
  *
  * @param p_ptt    - ptt window used for writing the registers.
  * @param ethType - etherType to configure
  */
 void ecore_set_port_mf_ovlan_eth_type(struct ecore_hwfn *p_hwfn,
-				      struct ecore_ptt *p_ptt, u32 eth_type);
+			struct ecore_ptt *p_ptt, u32 ethType);
+#endif /* UNUSED_HSI_FUNC */
 /**
- * @brief ecore_set_vxlan_dest_port - init vxlan tunnel destination udp port
+ * @brief ecore_set_vxlan_dest_port - initializes vxlan tunnel destination udp
+ *                                    port
  *
  * @param p_ptt     - ptt window used for writing the registers.
  * @param dest_port - vxlan destination udp port.
  */
 void ecore_set_vxlan_dest_port(struct ecore_hwfn *p_hwfn,
-			       struct ecore_ptt *p_ptt, u16 dest_port);
+			       struct ecore_ptt *p_ptt,
+			       u16 dest_port);
 /**
  * @brief ecore_set_vxlan_enable - enable or disable VXLAN tunnel in HW
  *
@@ -231,7 +255,8 @@ void ecore_set_vxlan_dest_port(struct ecore_hwfn *p_hwfn,
  * @param vxlan_enable - vxlan enable flag.
  */
 void ecore_set_vxlan_enable(struct ecore_hwfn *p_hwfn,
-			    struct ecore_ptt *p_ptt, bool vxlan_enable);
+			    struct ecore_ptt *p_ptt,
+			    bool vxlan_enable);
 /**
  * @brief ecore_set_gre_enable - enable or disable GRE tunnel in HW
  *
@@ -241,15 +266,18 @@ void ecore_set_vxlan_enable(struct ecore_hwfn *p_hwfn,
  */
 void ecore_set_gre_enable(struct ecore_hwfn *p_hwfn,
 			  struct ecore_ptt *p_ptt,
-			  bool eth_gre_enable, bool ip_gre_enable);
+			  bool eth_gre_enable,
+			  bool ip_gre_enable);
 /**
- * @brief ecore_set_geneve_dest_port - init geneve tunnel destination udp port
+ * @brief ecore_set_geneve_dest_port - initializes geneve tunnel destination
+ *                                     udp port
  *
  * @param p_ptt     - ptt window used for writing the registers.
  * @param dest_port - geneve destination udp port.
  */
 void ecore_set_geneve_dest_port(struct ecore_hwfn *p_hwfn,
-				struct ecore_ptt *p_ptt, u16 dest_port);
+				struct ecore_ptt *p_ptt,
+				u16 dest_port);
 /**
  * @brief ecore_set_gre_enable - enable or disable GRE tunnel in HW
  *
diff --git a/drivers/net/qede/base/ecore_init_ops.h b/drivers/net/qede/base/ecore_init_ops.h
index 8a6fce4..f6b0a2d 100644
--- a/drivers/net/qede/base/ecore_init_ops.h
+++ b/drivers/net/qede/base/ecore_init_ops.h
@@ -68,7 +68,9 @@ void ecore_init_clear_rt_data(struct ecore_hwfn *p_hwfn);
  * @param rt_offset
  * @param val
  */
-void ecore_init_store_rt_reg(struct ecore_hwfn *p_hwfn, u32 rt_offset, u32 val);
+void ecore_init_store_rt_reg(struct ecore_hwfn *p_hwfn,
+			     u32               rt_offset,
+			     u32               val);
 
 #define STORE_RT_REG(hwfn, offset, val)				\
 	ecore_init_store_rt_reg(hwfn, offset, val)
@@ -87,7 +89,9 @@ void ecore_init_store_rt_reg(struct ecore_hwfn *p_hwfn, u32 rt_offset, u32 val);
 */
 
 void ecore_init_store_rt_agg(struct ecore_hwfn *p_hwfn,
-			     u32 rt_offset, u32 *val, osal_size_t size);
+			     u32               rt_offset,
+			     u32               *val,
+			     osal_size_t       size);
 
 #define STORE_RT_REG_AGG(hwfn, offset, val)			\
 	ecore_init_store_rt_agg(hwfn, offset, (u32 *)&val, sizeof(val))
diff --git a/drivers/net/qede/base/ecore_int.c b/drivers/net/qede/base/ecore_int.c
index 04c4947..4d5543a 100644
--- a/drivers/net/qede/base/ecore_int.c
+++ b/drivers/net/qede/base/ecore_int.c
@@ -944,7 +944,7 @@ static enum _ecore_status_t ecore_int_deassertion(struct ecore_hwfn *p_hwfn,
 			 * previous assertion.
 			 */
 			for (j = 0, bit_idx = 0; bit_idx < 32; j++) {
-				unsigned long bitmask;
+				unsigned long int bitmask;
 				u8 bit, bit_len;
 
 				p_aeu = &sb_attn_sw->p_aeu_desc[i].bits[j];
@@ -1021,8 +1021,8 @@ static enum _ecore_status_t ecore_int_attentions(struct ecore_hwfn *p_hwfn)
 	struct ecore_sb_attn_info *p_sb_attn_sw = p_hwfn->p_sb_attn;
 	struct atten_status_block *p_sb_attn = p_sb_attn_sw->sb_attn;
 	u16 index = 0, asserted_bits, deasserted_bits;
-	enum _ecore_status_t rc = ECORE_SUCCESS;
 	u32 attn_bits = 0, attn_acks = 0;
+	enum _ecore_status_t rc = ECORE_SUCCESS;
 
 	/* Read current attention bits/acks - safeguard against attentions
 	 * by guaranting work on a synchronized timeframe
@@ -1162,6 +1162,7 @@ void ecore_int_sp_dpc(osal_int_ptr_t hwfn_cookie)
 	}
 
 /* Check the validity of the DPC ptt. If not ack interrupts and fail */
+
 	if (!p_hwfn->p_dpc_ptt) {
 		DP_NOTICE(p_hwfn->p_dev, true, "Failed to allocate PTT\n");
 		ecore_sb_ack(sb_info, IGU_INT_ENABLE, 1);
@@ -1582,7 +1583,7 @@ static enum _ecore_status_t ecore_int_sp_sb_alloc(struct ecore_hwfn *p_hwfn,
 	p_virt = OSAL_DMA_ALLOC_COHERENT(p_hwfn->p_dev,
 					 &p_phys, SB_ALIGNED_SIZE(p_hwfn));
 	if (!p_virt) {
-		DP_NOTICE(p_hwfn, true, "Failed to allocate status block");
+		DP_NOTICE(p_hwfn, true, "Failed to allocate status block\n");
 		OSAL_FREE(p_hwfn->p_dev, p_sb);
 		return ECORE_NOMEM;
 	}
@@ -1691,6 +1692,7 @@ static void ecore_int_igu_enable_attn(struct ecore_hwfn *p_hwfn,
 	ecore_wr(p_hwfn, p_ptt, IGU_REG_TRAILING_EDGE_LATCH, 0xfff);
 	ecore_wr(p_hwfn, p_ptt, IGU_REG_ATTENTION_ENABLE, 0xfff);
 
+	/* Flush the writes to IGU */
 	OSAL_MMIOWB(p_hwfn->p_dev);
 
 	/* Unmask AEU signals toward IGU */
@@ -1782,6 +1784,7 @@ void ecore_int_igu_cleanup_sb(struct ecore_hwfn *p_hwfn,
 
 	ecore_wr(p_hwfn, p_ptt, IGU_REG_COMMAND_REG_CTRL, cmd_ctrl);
 
+	/* Flush the write to IGU */
 	OSAL_MMIOWB(p_hwfn->p_dev);
 
 	/* calculate where to read the status bit from */
diff --git a/drivers/net/qede/base/ecore_iov_api.h b/drivers/net/qede/base/ecore_iov_api.h
index 5ad4ec6..0085726 100644
--- a/drivers/net/qede/base/ecore_iov_api.h
+++ b/drivers/net/qede/base/ecore_iov_api.h
@@ -53,6 +53,14 @@ struct ecore_mcp_link_capabilities;
 struct ecore_vf_acquire_sw_info {
 	u32 driver_version;
 	u8 os_type;
+
+	/* We have several close releases that all use ~same FW with different
+	 * versions [making it incompatible as the versioning scheme is still
+	 * tied directly to FW version], allow to override the checking. Only
+	 * those versions would actually support this feature [so it would not
+	 * break forward compatibility with newer HV drivers that are no longer
+	 * suited].
+	 */
 	bool override_fw_version;
 };
 
@@ -132,7 +140,8 @@ void ecore_iov_set_vf_to_disable(struct ecore_hwfn *p_hwfn,
  */
 enum _ecore_status_t ecore_iov_init_hw_for_vf(struct ecore_hwfn *p_hwfn,
 					      struct ecore_ptt *p_ptt,
-					      u16 rel_vf_id, u16 num_rx_queues);
+					      u16 rel_vf_id,
+					      u16 num_rx_queues);
 
 /**
  * @brief ecore_iov_process_mbx_req - process a request received
@@ -143,7 +152,8 @@ enum _ecore_status_t ecore_iov_init_hw_for_vf(struct ecore_hwfn *p_hwfn,
  * @param vfid
  */
 void ecore_iov_process_mbx_req(struct ecore_hwfn *p_hwfn,
-			       struct ecore_ptt *p_ptt, int vfid);
+			       struct ecore_ptt *p_ptt,
+			       int vfid);
 
 /**
  * @brief ecore_iov_release_hw_for_vf - called once upper layer
@@ -197,7 +207,8 @@ enum _ecore_status_t ecore_iov_vf_flr_cleanup(struct ecore_hwfn *p_hwfn,
  */
 enum _ecore_status_t
 ecore_iov_single_vf_flr_cleanup(struct ecore_hwfn *p_hwfn,
-				struct ecore_ptt *p_ptt, u16 rel_vf_id);
+				struct ecore_ptt *p_ptt,
+				u16 rel_vf_id);
 
 /**
  * @brief Update the bulletin with link information. Notice this does NOT
@@ -238,7 +249,8 @@ void ecore_iov_get_link(struct ecore_hwfn *p_hwfn,
  *
  * @return bool
  */
-bool ecore_iov_is_vf_pending_flr(struct ecore_hwfn *p_hwfn, u16 rel_vf_id);
+bool ecore_iov_is_vf_pending_flr(struct ecore_hwfn *p_hwfn,
+				 u16 rel_vf_id);
 
 /**
  * @brief Check if given VF ID @vfid is valid
@@ -253,7 +265,8 @@ bool ecore_iov_is_vf_pending_flr(struct ecore_hwfn *p_hwfn, u16 rel_vf_id);
  * @return bool - true for valid VF ID
  */
 bool ecore_iov_is_valid_vfid(struct ecore_hwfn *p_hwfn,
-			     int rel_vf_id, bool b_enabled_only);
+			     int rel_vf_id,
+			     bool b_enabled_only);
 
 /**
  * @brief Get VF's public info structure
@@ -264,9 +277,9 @@ bool ecore_iov_is_valid_vfid(struct ecore_hwfn *p_hwfn,
  *
  * @return struct ecore_public_vf_info *
  */
-struct ecore_public_vf_info *ecore_iov_get_public_vf_info(struct ecore_hwfn
-							  *p_hwfn, u16 vfid,
-							  bool b_enabled_only);
+struct ecore_public_vf_info*
+ecore_iov_get_public_vf_info(struct ecore_hwfn *p_hwfn,
+			     u16 vfid, bool b_enabled_only);
 
 /**
  * @brief Set pending events bitmap for given @vfid
@@ -295,7 +308,8 @@ void ecore_iov_pf_get_and_clear_pending_events(struct ecore_hwfn *p_hwfn,
  * @return enum _ecore_status_t
  */
 enum _ecore_status_t ecore_iov_copy_vf_msg(struct ecore_hwfn *p_hwfn,
-					   struct ecore_ptt *ptt, int vfid);
+					   struct ecore_ptt *ptt,
+					   int vfid);
 /**
  * @brief Set forced MAC address in PFs copy of bulletin board
  *        and configures FW/HW to support the configuration.
@@ -342,7 +356,9 @@ void ecore_iov_bulletin_set_forced_vlan(struct ecore_hwfn *p_hwfn,
  */
 enum _ecore_status_t
 ecore_iov_bulletin_set_forced_untagged_default(struct ecore_hwfn *p_hwfn,
-					       bool b_untagged_only, int vfid);
+					       bool b_untagged_only,
+					       int vfid);
+
 /**
  * @brief Get VFs opaque fid.
  *
@@ -486,7 +502,8 @@ u32 ecore_iov_pfvf_msg_length(void);
  *
  * @return OSAL_NULL if mac isn't forced; Otherwise, returns MAC.
  */
-u8 *ecore_iov_bulletin_get_forced_mac(struct ecore_hwfn *p_hwfn, u16 rel_vf_id);
+u8 *ecore_iov_bulletin_get_forced_mac(struct ecore_hwfn *p_hwfn,
+				      u16 rel_vf_id);
 
 /**
  * @brief Returns pvid if one is configured
@@ -535,7 +552,8 @@ enum _ecore_status_t ecore_iov_get_vf_stats(struct ecore_hwfn *p_hwfn,
  *
  * @return num of rxqs chains.
  */
-u8 ecore_iov_get_vf_num_rxqs(struct ecore_hwfn *p_hwfn, u16 rel_vf_id);
+u8 ecore_iov_get_vf_num_rxqs(struct ecore_hwfn *p_hwfn,
+			     u16 rel_vf_id);
 
 /**
  * @brief - Retrieves num of active rxqs chains
@@ -545,7 +563,8 @@ u8 ecore_iov_get_vf_num_rxqs(struct ecore_hwfn *p_hwfn, u16 rel_vf_id);
  *
  * @return
  */
-u8 ecore_iov_get_vf_num_active_rxqs(struct ecore_hwfn *p_hwfn, u16 rel_vf_id);
+u8 ecore_iov_get_vf_num_active_rxqs(struct ecore_hwfn *p_hwfn,
+				    u16 rel_vf_id);
 
 /**
  * @brief - Retrieves ctx pointer
@@ -555,7 +574,8 @@ u8 ecore_iov_get_vf_num_active_rxqs(struct ecore_hwfn *p_hwfn, u16 rel_vf_id);
  *
  * @return
  */
-void *ecore_iov_get_vf_ctx(struct ecore_hwfn *p_hwfn, u16 rel_vf_id);
+void *ecore_iov_get_vf_ctx(struct ecore_hwfn *p_hwfn,
+			   u16 rel_vf_id);
 
 /**
  * @brief - Retrieves VF`s num sbs
@@ -565,7 +585,8 @@ void *ecore_iov_get_vf_ctx(struct ecore_hwfn *p_hwfn, u16 rel_vf_id);
  *
  * @return
  */
-u8 ecore_iov_get_vf_num_sbs(struct ecore_hwfn *p_hwfn, u16 rel_vf_id);
+u8 ecore_iov_get_vf_num_sbs(struct ecore_hwfn *p_hwfn,
+			    u16 rel_vf_id);
 
 /**
  * @brief - Returm true if VF is waiting for acquire
@@ -575,7 +596,8 @@ u8 ecore_iov_get_vf_num_sbs(struct ecore_hwfn *p_hwfn, u16 rel_vf_id);
  *
  * @return
  */
-bool ecore_iov_is_vf_wait_for_acquire(struct ecore_hwfn *p_hwfn, u16 rel_vf_id);
+bool ecore_iov_is_vf_wait_for_acquire(struct ecore_hwfn *p_hwfn,
+				      u16 rel_vf_id);
 
 /**
  * @brief - Returm true if VF is acquired but not initialized
@@ -596,7 +618,8 @@ bool ecore_iov_is_vf_acquired_not_initialized(struct ecore_hwfn *p_hwfn,
  *
  * @return
  */
-bool ecore_iov_is_vf_initialized(struct ecore_hwfn *p_hwfn, u16 rel_vf_id);
+bool ecore_iov_is_vf_initialized(struct ecore_hwfn *p_hwfn,
+				 u16 rel_vf_id);
 
 /**
  * @brief - Get VF's vport min rate configured.
diff --git a/drivers/net/qede/base/ecore_l2.c b/drivers/net/qede/base/ecore_l2.c
index b31523b..bc6b59d 100644
--- a/drivers/net/qede/base/ecore_l2.c
+++ b/drivers/net/qede/base/ecore_l2.c
@@ -35,9 +35,9 @@ ecore_sp_eth_vport_start(struct ecore_hwfn *p_hwfn,
 {
 	struct vport_start_ramrod_data *p_ramrod = OSAL_NULL;
 	struct ecore_spq_entry *p_ent = OSAL_NULL;
-	enum _ecore_status_t rc = ECORE_NOTIMPL;
 	struct ecore_sp_init_data init_data;
 	u8 abs_vport_id = 0;
+	enum _ecore_status_t rc = ECORE_NOTIMPL;
 	u16 rx_mode = 0;
 
 	rc = ecore_fw_vport(p_hwfn, p_params->vport_id, &abs_vport_id);
@@ -449,8 +449,8 @@ enum _ecore_status_t ecore_sp_vport_stop(struct ecore_hwfn *p_hwfn,
 	struct vport_stop_ramrod_data *p_ramrod;
 	struct ecore_sp_init_data init_data;
 	struct ecore_spq_entry *p_ent;
-	enum _ecore_status_t rc;
 	u8 abs_vport_id = 0;
+	enum _ecore_status_t rc;
 
 	if (IS_VF(p_hwfn->p_dev))
 		return ecore_vf_pf_vport_stop(p_hwfn);
@@ -703,10 +703,10 @@ ecore_sp_eth_rx_queues_update(struct ecore_hwfn *p_hwfn,
 {
 	struct rx_queue_update_ramrod_data *p_ramrod = OSAL_NULL;
 	struct ecore_spq_entry *p_ent = OSAL_NULL;
-	enum _ecore_status_t rc = ECORE_NOTIMPL;
 	struct ecore_sp_init_data init_data;
 	struct ecore_hw_cid_data *p_rx_cid;
 	u16 qid, abs_rx_q_id = 0;
+	enum _ecore_status_t rc = ECORE_NOTIMPL;
 	u8 i;
 
 	if (IS_VF(p_hwfn->p_dev))
@@ -758,9 +758,9 @@ ecore_sp_eth_rx_queue_stop(struct ecore_hwfn *p_hwfn,
 	struct ecore_hw_cid_data *p_rx_cid = &p_hwfn->p_rx_cids[rx_queue_id];
 	struct rx_queue_stop_ramrod_data *p_ramrod = OSAL_NULL;
 	struct ecore_spq_entry *p_ent = OSAL_NULL;
-	enum _ecore_status_t rc = ECORE_NOTIMPL;
 	struct ecore_sp_init_data init_data;
 	u16 abs_rx_q_id = 0;
+	enum _ecore_status_t rc = ECORE_NOTIMPL;
 
 	if (IS_VF(p_hwfn->p_dev))
 		return ecore_vf_pf_rxq_stop(p_hwfn, rx_queue_id,
@@ -816,15 +816,16 @@ ecore_sp_eth_txq_start_ramrod(struct ecore_hwfn *p_hwfn,
 			      u16 pbl_size,
 			      union ecore_qm_pq_params *p_pq_params)
 {
-	struct ecore_hw_cid_data *p_tx_cid = &p_hwfn->p_tx_cids[tx_queue_id];
 	struct tx_queue_start_ramrod_data *p_ramrod = OSAL_NULL;
 	struct ecore_spq_entry *p_ent = OSAL_NULL;
-	enum _ecore_status_t rc = ECORE_NOTIMPL;
 	struct ecore_sp_init_data init_data;
+	struct ecore_hw_cid_data *p_tx_cid;
 	u16 pq_id, abs_tx_q_id = 0;
 	u8 abs_vport_id;
+	enum _ecore_status_t rc = ECORE_NOTIMPL;
 
 	/* Store information for the stop */
+	p_tx_cid = &p_hwfn->p_tx_cids[tx_queue_id];
 	p_tx_cid->cid = cid;
 	p_tx_cid->opaque_fid = opaque_fid;
 
@@ -908,7 +909,8 @@ enum _ecore_status_t ecore_sp_eth_tx_queue_start(struct ecore_hwfn *p_hwfn,
 
 	DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
 		   "opaque_fid=0x%x, cid=0x%x, tx_qid=0x%x, vport_id=0x%x, sb_id=0x%x\n",
-		   opaque_fid, p_tx_cid->cid, tx_queue_id, vport_id, sb);
+		    opaque_fid, p_tx_cid->cid, tx_queue_id,
+		    vport_id, sb);
 
 	/* TODO - set tc in the pq_params for multi-cos */
 	rc = ecore_sp_eth_txq_start_ramrod(p_hwfn,
@@ -919,7 +921,9 @@ enum _ecore_status_t ecore_sp_eth_tx_queue_start(struct ecore_hwfn *p_hwfn,
 					   abs_stats_id,
 					   sb,
 					   sb_index,
-					   pbl_addr, pbl_size, &pq_params);
+					   pbl_addr,
+					   pbl_size,
+					   &pq_params);
 
 	*pp_doorbell = (u8 OSAL_IOMEM *)p_hwfn->doorbells +
 	    DB_ADDR(p_tx_cid->cid, DQ_DEMS_LEGACY);
@@ -1011,8 +1015,8 @@ ecore_filter_ucast_common(struct ecore_hwfn *p_hwfn,
 			  enum spq_mode comp_mode,
 			  struct ecore_spq_comp_cb *p_comp_data)
 {
-	struct vport_filter_update_ramrod_data *p_ramrod;
 	u8 vport_to_add_to = 0, vport_to_remove_from = 0;
+	struct vport_filter_update_ramrod_data *p_ramrod;
 	struct eth_filter_cmd *p_first_filter;
 	struct eth_filter_cmd *p_second_filter;
 	struct ecore_sp_init_data init_data;
@@ -1304,11 +1308,10 @@ ecore_sp_eth_filter_mcast(struct ecore_hwfn *p_hwfn,
 		    0, sizeof(p_ramrod->approx_mcast.bins));
 	OSAL_MEMSET(bins, 0, sizeof(unsigned long) *
 		    ETH_MULTICAST_MAC_BINS_IN_REGS);
-
-	if (p_filter_cmd->opcode == ECORE_FILTER_ADD) {
 	/* filter ADD op is explicit set op and it removes
 	*  any existing filters for the vport.
 	*/
+	if (p_filter_cmd->opcode == ECORE_FILTER_ADD) {
 		for (i = 0; i < p_filter_cmd->num_mc_addrs; i++) {
 			u32 bit;
 
diff --git a/drivers/net/qede/base/ecore_l2_api.h b/drivers/net/qede/base/ecore_l2_api.h
index ab9aca0..65a508c 100644
--- a/drivers/net/qede/base/ecore_l2_api.h
+++ b/drivers/net/qede/base/ecore_l2_api.h
@@ -137,7 +137,8 @@ ecore_filter_mcast_cmd(struct ecore_dev *p_dev,
 
 /* Set "accept" filters */
 enum _ecore_status_t
-ecore_filter_accept_cmd(struct ecore_dev *p_dev,
+ecore_filter_accept_cmd(
+	struct ecore_dev		 *p_dev,
 	u8				 vport,
 	struct ecore_filter_accept_flags accept_flags,
 	u8				 update_accept_any_vlan,
@@ -204,7 +205,8 @@ enum _ecore_status_t ecore_sp_eth_rx_queue_start(struct ecore_hwfn *p_hwfn,
 enum _ecore_status_t
 ecore_sp_eth_rx_queue_stop(struct ecore_hwfn *p_hwfn,
 			   u16 rx_queue_id,
-			   bool eq_completion_only, bool cqe_completion);
+			   bool eq_completion_only,
+			   bool cqe_completion);
 
 /**
  * @brief ecore_sp_eth_tx_queue_start - TX Queue Start Ramrod
@@ -351,7 +353,8 @@ ecore_sp_vport_update(struct ecore_hwfn *p_hwfn,
  * @return enum _ecore_status_t
  */
 enum _ecore_status_t ecore_sp_vport_stop(struct ecore_hwfn *p_hwfn,
-					 u16 opaque_fid, u8 vport_id);
+					 u16 opaque_fid,
+					 u8 vport_id);
 
 enum _ecore_status_t
 ecore_sp_eth_filter_ucast(struct ecore_hwfn *p_hwfn,
diff --git a/drivers/net/qede/base/ecore_mcp.c b/drivers/net/qede/base/ecore_mcp.c
index 2823113..b29e630 100644
--- a/drivers/net/qede/base/ecore_mcp.c
+++ b/drivers/net/qede/base/ecore_mcp.c
@@ -135,7 +135,8 @@ static enum _ecore_status_t ecore_load_mcp_offsets(struct ecore_hwfn *p_hwfn,
 						       PUBLIC_DRV_MB));
 	p_info->drv_mb_addr = SECTION_ADDR(drv_mb_offsize, mcp_pf_id);
 	DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
-		   "drv_mb_offsiz = 0x%x, drv_mb_addr = 0x%x mcp_pf_id = 0x%x\n",
+		   "drv_mb_offsiz = 0x%x, drv_mb_addr = 0x%x"
+		   " mcp_pf_id = 0x%x\n",
 		   drv_mb_offsize, p_info->drv_mb_addr, mcp_pf_id);
 
 	/* Set the MFW MB address */
diff --git a/drivers/net/qede/base/ecore_sp_commands.c b/drivers/net/qede/base/ecore_sp_commands.c
index 7ba43e8..478c825 100644
--- a/drivers/net/qede/base/ecore_sp_commands.c
+++ b/drivers/net/qede/base/ecore_sp_commands.c
@@ -323,11 +323,11 @@ enum _ecore_status_t ecore_sp_pf_start(struct ecore_hwfn *p_hwfn,
 				       bool allow_npar_tx_switch)
 {
 	struct pf_start_ramrod_data *p_ramrod = OSAL_NULL;
-	struct ecore_spq_entry *p_ent = OSAL_NULL;
 	u16 sb = ecore_int_get_sp_sb_id(p_hwfn);
 	u8 sb_index = p_hwfn->p_eq->eq_sb_index;
-	enum _ecore_status_t rc = ECORE_NOTIMPL;
+	struct ecore_spq_entry *p_ent = OSAL_NULL;
 	struct ecore_sp_init_data init_data;
+	enum _ecore_status_t rc = ECORE_NOTIMPL;
 	u8 page_cnt;
 
 	/* update initial eq producer */
@@ -416,8 +416,8 @@ enum _ecore_status_t ecore_sp_pf_start(struct ecore_hwfn *p_hwfn,
 enum _ecore_status_t ecore_sp_pf_update(struct ecore_hwfn *p_hwfn)
 {
 	struct ecore_spq_entry *p_ent = OSAL_NULL;
-	enum _ecore_status_t rc = ECORE_NOTIMPL;
 	struct ecore_sp_init_data init_data;
+	enum _ecore_status_t rc = ECORE_NOTIMPL;
 
 	/* Get SPQ entry */
 	OSAL_MEMSET(&init_data, 0, sizeof(init_data));
@@ -445,8 +445,8 @@ ecore_sp_pf_update_tunn_cfg(struct ecore_hwfn *p_hwfn,
 			    struct ecore_spq_comp_cb *p_comp_data)
 {
 	struct ecore_spq_entry *p_ent = OSAL_NULL;
-	enum _ecore_status_t rc = ECORE_NOTIMPL;
 	struct ecore_sp_init_data init_data;
+	enum _ecore_status_t rc = ECORE_NOTIMPL;
 
 	/* Get SPQ entry */
 	OSAL_MEMSET(&init_data, 0, sizeof(init_data));
@@ -473,20 +473,19 @@ ecore_sp_pf_update_tunn_cfg(struct ecore_hwfn *p_hwfn,
 	if (p_tunn->update_geneve_udp_port)
 		ecore_set_geneve_dest_port(p_hwfn, p_hwfn->p_main_ptt,
 					   p_tunn->geneve_udp_port);
+	}
 
-		ecore_set_hw_tunn_mode(p_hwfn, p_hwfn->p_main_ptt,
-				       p_tunn->tunn_mode);
+	ecore_set_hw_tunn_mode(p_hwfn, p_hwfn->p_main_ptt, p_tunn->tunn_mode);
 	p_hwfn->p_dev->tunn_mode = p_tunn->tunn_mode;
-	}
 
 	return rc;
 }
 
 enum _ecore_status_t ecore_sp_pf_stop(struct ecore_hwfn *p_hwfn)
 {
-	enum _ecore_status_t rc = ECORE_NOTIMPL;
 	struct ecore_spq_entry *p_ent = OSAL_NULL;
 	struct ecore_sp_init_data init_data;
+	enum _ecore_status_t rc = ECORE_NOTIMPL;
 
 	/* Get SPQ entry */
 	OSAL_MEMSET(&init_data, 0, sizeof(init_data));
@@ -506,8 +505,8 @@ enum _ecore_status_t ecore_sp_pf_stop(struct ecore_hwfn *p_hwfn)
 enum _ecore_status_t ecore_sp_heartbeat_ramrod(struct ecore_hwfn *p_hwfn)
 {
 	struct ecore_spq_entry *p_ent = OSAL_NULL;
-	enum _ecore_status_t rc = ECORE_NOTIMPL;
 	struct ecore_sp_init_data init_data;
+	enum _ecore_status_t rc = ECORE_NOTIMPL;
 
 	/* Get SPQ entry */
 	OSAL_MEMSET(&init_data, 0, sizeof(init_data));
diff --git a/drivers/net/qede/base/ecore_spq.c b/drivers/net/qede/base/ecore_spq.c
index 1839659..edff2d7 100644
--- a/drivers/net/qede/base/ecore_spq.c
+++ b/drivers/net/qede/base/ecore_spq.c
@@ -157,7 +157,7 @@ static void ecore_spq_hw_initialize(struct ecore_hwfn *p_hwfn,
 	rc = ecore_cxt_get_cid_info(p_hwfn, &cxt_info);
 
 	if (rc < 0) {
-		DP_NOTICE(p_hwfn, true, "Cannot find context info for cid=%d",
+		DP_NOTICE(p_hwfn, true, "Cannot find context info for cid=%d\n",
 			  p_spq->cid);
 		return;
 	}
@@ -352,7 +352,7 @@ struct ecore_eq *ecore_eq_alloc(struct ecore_hwfn *p_hwfn, u16 num_elem)
 			      ECORE_CHAIN_CNT_TYPE_U16,
 			      num_elem,
 			      sizeof(union event_ring_element), &p_eq->chain)) {
-		DP_NOTICE(p_hwfn, true, "Failed to allocate eq chain");
+		DP_NOTICE(p_hwfn, true, "Failed to allocate eq chain\n");
 		goto eq_allocate_fail;
 	}
 
@@ -419,8 +419,8 @@ enum _ecore_status_t ecore_eth_cqe_completion(struct ecore_hwfn *p_hwfn,
  ***************************************************************************/
 void ecore_spq_setup(struct ecore_hwfn *p_hwfn)
 {
-	struct ecore_spq_entry *p_virt = OSAL_NULL;
 	struct ecore_spq *p_spq = p_hwfn->p_spq;
+	struct ecore_spq_entry *p_virt = OSAL_NULL;
 	dma_addr_t p_phys = 0;
 	u32 i, capacity;
 
@@ -475,7 +475,7 @@ enum _ecore_status_t ecore_spq_alloc(struct ecore_hwfn *p_hwfn)
 	    OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL, sizeof(struct ecore_spq));
 	if (!p_spq) {
 		DP_NOTICE(p_hwfn, true,
-			  "Failed to allocate `struct ecore_spq'");
+			  "Failed to allocate `struct ecore_spq'\n");
 		return ECORE_NOMEM;
 	}
 
@@ -484,7 +484,7 @@ enum _ecore_status_t ecore_spq_alloc(struct ecore_hwfn *p_hwfn)
 			ECORE_CHAIN_MODE_SINGLE, ECORE_CHAIN_CNT_TYPE_U16, 0,
 			/* N/A when the mode is SINGLE */
 			sizeof(struct slow_path_element), &p_spq->chain)) {
-		DP_NOTICE(p_hwfn, true, "Failed to allocate spq chain");
+		DP_NOTICE(p_hwfn, true, "Failed to allocate spq chain\n");
 		goto spq_allocate_fail;
 	}
 
@@ -745,7 +745,7 @@ enum _ecore_status_t ecore_spq_post(struct ecore_hwfn *p_hwfn,
 	if (p_hwfn->p_dev->recov_in_prog) {
 		DP_VERBOSE(p_hwfn, ECORE_MSG_SPQ,
 			   "Recovery is in progress -> skip spq post"
-			   " [cmd %02x protocol %02x]",
+			   " [cmd %02x protocol %02x]\n",
 			   p_ent->elem.hdr.cmd_id, p_ent->elem.hdr.protocol_id);
 		/* Return success to let the flows to be completed successfully
 		 * w/o any error handling.
diff --git a/drivers/net/qede/base/ecore_spq.h b/drivers/net/qede/base/ecore_spq.h
index 74484ab..490b7d9 100644
--- a/drivers/net/qede/base/ecore_spq.h
+++ b/drivers/net/qede/base/ecore_spq.h
@@ -175,7 +175,8 @@ void ecore_spq_free(struct ecore_hwfn *p_hwfn);
  * @return enum _ecore_status_t
  */
 enum _ecore_status_t
-ecore_spq_get_entry(struct ecore_hwfn *p_hwfn, struct ecore_spq_entry **pp_ent);
+ecore_spq_get_entry(struct ecore_hwfn		*p_hwfn,
+		    struct ecore_spq_entry	**pp_ent);
 
 /**
  * @brief ecore_spq_return_entry - Return an entry to spq free
@@ -194,7 +195,8 @@ void ecore_spq_return_entry(struct ecore_hwfn		*p_hwfn,
  *
  * @return struct ecore_eq* - a newly allocated structure; NULL upon error.
  */
-struct ecore_eq *ecore_eq_alloc(struct ecore_hwfn *p_hwfn, u16 num_elem);
+struct ecore_eq *ecore_eq_alloc(struct ecore_hwfn	*p_hwfn,
+				 u16			num_elem);
 
 /**
  * @brief ecore_eq_setup - Reset the SPQ to its start state.
@@ -202,7 +204,8 @@ struct ecore_eq *ecore_eq_alloc(struct ecore_hwfn *p_hwfn, u16 num_elem);
  * @param p_hwfn
  * @param p_eq
  */
-void ecore_eq_setup(struct ecore_hwfn *p_hwfn, struct ecore_eq *p_eq);
+void ecore_eq_setup(struct ecore_hwfn *p_hwfn,
+		    struct ecore_eq   *p_eq);
 
 /**
  * @brief ecore_eq_deallocate - deallocates the given EQ struct.
@@ -210,7 +213,8 @@ void ecore_eq_setup(struct ecore_hwfn *p_hwfn, struct ecore_eq *p_eq);
  * @param p_hwfn
  * @param p_eq
  */
-void ecore_eq_free(struct ecore_hwfn *p_hwfn, struct ecore_eq *p_eq);
+void ecore_eq_free(struct ecore_hwfn *p_hwfn,
+		   struct ecore_eq   *p_eq);
 
 /**
  * @brief ecore_eq_prod_update - update the FW with default EQ producer
@@ -218,7 +222,8 @@ void ecore_eq_free(struct ecore_hwfn *p_hwfn, struct ecore_eq *p_eq);
  * @param p_hwfn
  * @param prod
  */
-void ecore_eq_prod_update(struct ecore_hwfn *p_hwfn, u16 prod);
+void ecore_eq_prod_update(struct ecore_hwfn	*p_hwfn,
+			  u16			prod);
 
 /**
  * @brief ecore_eq_completion - Completes currently pending EQ elements
@@ -271,7 +276,8 @@ struct ecore_consq *ecore_consq_alloc(struct ecore_hwfn	*p_hwfn);
  * @param p_hwfn
  * @param p_eq
  */
-void ecore_consq_setup(struct ecore_hwfn *p_hwfn, struct ecore_consq *p_consq);
+void ecore_consq_setup(struct ecore_hwfn *p_hwfn,
+		    struct ecore_consq   *p_consq);
 
 /**
  * @brief ecore_consq_free - deallocates the given ConsQ struct.
@@ -279,6 +285,7 @@ void ecore_consq_setup(struct ecore_hwfn *p_hwfn, struct ecore_consq *p_consq);
  * @param p_hwfn
  * @param p_eq
  */
-void ecore_consq_free(struct ecore_hwfn *p_hwfn, struct ecore_consq *p_consq);
+void ecore_consq_free(struct ecore_hwfn *p_hwfn,
+		   struct ecore_consq   *p_consq);
 
 #endif /* __ECORE_SPQ_H__ */
diff --git a/drivers/net/qede/base/eth_common.h b/drivers/net/qede/base/eth_common.h
index bd73f7d..cc310e3 100644
--- a/drivers/net/qede/base/eth_common.h
+++ b/drivers/net/qede/base/eth_common.h
@@ -41,14 +41,18 @@
 #define ETH_NUM_VLAN_FILTERS                512
 
 /* approx. multicast constants */
+/* CRC seed for multicast bin calculation */
 #define ETH_MULTICAST_BIN_FROM_MAC_SEED     0
 #define ETH_MULTICAST_MAC_BINS              256
 #define ETH_MULTICAST_MAC_BINS_IN_REGS      (ETH_MULTICAST_MAC_BINS / 32)
 
 /*  ethernet vport update constants */
 #define ETH_FILTER_RULES_COUNT              10
+/* number of RSS indirection table entries, per Vport) */
 #define ETH_RSS_IND_TABLE_ENTRIES_NUM       128
+/* Length of RSS key (in regs) */
 #define ETH_RSS_KEY_SIZE_REGS               10
+/* number of available RSS engines in K2 */
 #define ETH_RSS_ENGINE_NUM_K2               207
 #define ETH_RSS_ENGINE_NUM_BB               127
 
@@ -156,10 +160,10 @@ struct eth_tx_data_2nd_bd {
  * Firmware data for L2-EDPM packet.
  */
 struct eth_edpm_fw_data {
-	struct eth_tx_data_1st_bd data_1st_bd
-	    /* Parsing information data from the 1st BD. */;
-	struct eth_tx_data_2nd_bd data_2nd_bd
-	    /* Parsing information data from the 2nd BD. */;
+/* Parsing information data from the 1st BD. */
+	struct eth_tx_data_1st_bd data_1st_bd;
+/* Parsing information data from the 2nd BD. */
+	struct eth_tx_data_2nd_bd data_2nd_bd;
 	__le32 reserved;
 };
 
@@ -348,7 +352,8 @@ enum eth_rx_cqe_type {
 };
 
 /*
- * Wrapp for PD RX CQE used in order to cover full cache line when writing CQE
+ * Wrapper for PD RX CQE - used in order to cover full cache line when writing
+ * CQE
  */
 struct eth_rx_pmd_cqe {
 	union eth_rx_cqe cqe /* CQE data itself */;
diff --git a/drivers/net/qede/base/nvm_cfg.h b/drivers/net/qede/base/nvm_cfg.h
index 8d99880..fe980d5 100644
--- a/drivers/net/qede/base/nvm_cfg.h
+++ b/drivers/net/qede/base/nvm_cfg.h
@@ -225,7 +225,8 @@ struct nvm_cfg1_glob {
 		#define NVM_CFG1_GLOB_ON_CHIP_SENSOR_MODE_INT_EXT_I2C 0x1
 		#define NVM_CFG1_GLOB_ON_CHIP_SENSOR_MODE_INT_ONLY 0x2
 		#define NVM_CFG1_GLOB_ON_CHIP_SENSOR_MODE_INT_EXT_SMBUS 0x3
-#define NVM_CFG1_GLOB_TEMPERATURE_MONITORING_MODE_MASK          0x06000000
+		#define NVM_CFG1_GLOB_TEMPERATURE_MONITORING_MODE_MASK \
+			0x06000000
 		#define NVM_CFG1_GLOB_TEMPERATURE_MONITORING_MODE_OFFSET 25
 		#define NVM_CFG1_GLOB_TEMPERATURE_MONITORING_MODE_DISABLE 0x0
 		#define NVM_CFG1_GLOB_TEMPERATURE_MONITORING_MODE_INTERNAL 0x1
@@ -272,10 +273,12 @@ struct nvm_cfg1_glob {
 		#define NVM_CFG1_GLOB_TX_LANE3_POL_FLIP_MASK 0x00000080
 		#define NVM_CFG1_GLOB_TX_LANE3_POL_FLIP_OFFSET 7
 	/*  Control the period between two successive checks */
-#define NVM_CFG1_GLOB_TEMPERATURE_PERIOD_BETWEEN_CHECKS_MASK    0x0000FF00
+		#define NVM_CFG1_GLOB_TEMPERATURE_PERIOD_BETWEEN_CHECKS_MASK \
+			0x0000FF00
 		#define NVM_CFG1_GLOB_TEMPERATURE_PERIOD_BETWEEN_CHECKS_OFFSET 8
 	/*  Set shutdown temperature */
-#define NVM_CFG1_GLOB_SHUTDOWN_THRESHOLD_TEMPERATURE_MASK       0x00FF0000
+		#define NVM_CFG1_GLOB_SHUTDOWN_THRESHOLD_TEMPERATURE_MASK \
+			0x00FF0000
 		#define NVM_CFG1_GLOB_SHUTDOWN_THRESHOLD_TEMPERATURE_OFFSET 16
 	/*  Set max. count for over operational temperature */
 		#define NVM_CFG1_GLOB_MAX_COUNT_OPER_THRESHOLD_MASK 0xFF000000
@@ -320,10 +323,12 @@ struct nvm_cfg1_glob {
 		#define NVM_CFG1_GLOB_VENDOR_ID_MASK 0x0000FFFF
 		#define NVM_CFG1_GLOB_VENDOR_ID_OFFSET 0
 	/*  Set caution temperature */
-#define NVM_CFG1_GLOB_CAUTION_THRESHOLD_TEMPERATURE_MASK        0x00FF0000
+		#define NVM_CFG1_GLOB_CAUTION_THRESHOLD_TEMPERATURE_MASK \
+			0x00FF0000
 		#define NVM_CFG1_GLOB_CAUTION_THRESHOLD_TEMPERATURE_OFFSET 16
 	/*  Set external thermal sensor I2C address */
-#define NVM_CFG1_GLOB_EXTERNAL_THERMAL_SENSOR_ADDRESS_MASK      0xFF000000
+		#define NVM_CFG1_GLOB_EXTERNAL_THERMAL_SENSOR_ADDRESS_MASK \
+			0xFF000000
 		#define NVM_CFG1_GLOB_EXTERNAL_THERMAL_SENSOR_ADDRESS_OFFSET 24
 	u32 pci_subsys_id; /* 0x54 */
 		#define NVM_CFG1_GLOB_SUBSYSTEM_VENDOR_ID_MASK 0x0000FFFF
@@ -349,6 +354,7 @@ struct nvm_cfg1_glob {
 		#define NVM_CFG1_GLOB_EXPANSION_ROM_SIZE_8M 0xD
 		#define NVM_CFG1_GLOB_EXPANSION_ROM_SIZE_16M 0xE
 		#define NVM_CFG1_GLOB_EXPANSION_ROM_SIZE_32M 0xF
+	/*  BB VF BAR2 size */
 		#define NVM_CFG1_GLOB_VF_PCI_BAR2_SIZE_MASK 0x000000F0
 		#define NVM_CFG1_GLOB_VF_PCI_BAR2_SIZE_OFFSET 4
 		#define NVM_CFG1_GLOB_VF_PCI_BAR2_SIZE_DISABLED 0x0
@@ -367,6 +373,7 @@ struct nvm_cfg1_glob {
 		#define NVM_CFG1_GLOB_VF_PCI_BAR2_SIZE_16M 0xD
 		#define NVM_CFG1_GLOB_VF_PCI_BAR2_SIZE_32M 0xE
 		#define NVM_CFG1_GLOB_VF_PCI_BAR2_SIZE_64M 0xF
+	/*  BB BAR2 size (global) */
 		#define NVM_CFG1_GLOB_BAR2_SIZE_MASK 0x00000F00
 		#define NVM_CFG1_GLOB_BAR2_SIZE_OFFSET 8
 		#define NVM_CFG1_GLOB_BAR2_SIZE_DISABLED 0x0
-- 
1.8.3.1



More information about the dev mailing list