[dpdk-dev] [PATCH 7/8] net/bnxt: fix HWRM_*() macros and locking

Ajit Khaparde ajit.khaparde at broadcom.com
Thu Aug 24 18:29:55 CEST 2017


Obtain the spinlock in HWRM_PREP()
Eliminate two unnecessary arguments in HWRM_PREP().
Unlock the spinlock before returning in HWRM_ERROR_CHECK()
Add new HWRM_UNLOCK() macro
Update usage of the thre macros.

Signed-off-by: Ajit Khaparde <ajit.khaparde at broadcom.com>
---
 drivers/net/bnxt/bnxt_hwrm.c | 413 ++++++++++++++++++++++++++++---------------
 1 file changed, 269 insertions(+), 144 deletions(-)

diff --git a/drivers/net/bnxt/bnxt_hwrm.c b/drivers/net/bnxt/bnxt_hwrm.c
index e710e63..0fc5244 100644
--- a/drivers/net/bnxt/bnxt_hwrm.c
+++ b/drivers/net/bnxt/bnxt_hwrm.c
@@ -54,7 +54,7 @@
 
 #include <rte_io.h>
 
-#define HWRM_CMD_TIMEOUT		2000
+#define HWRM_CMD_TIMEOUT		10000
 
 struct bnxt_plcmodes_cfg {
 	uint32_t	flags;
@@ -95,7 +95,7 @@ static int page_roundup(size_t size)
  * command was failed by the ChiMP.
  */
 
-static int bnxt_hwrm_send_message_locked(struct bnxt *bp, void *msg,
+static int bnxt_hwrm_send_message(struct bnxt *bp, void *msg,
 					uint32_t msg_len)
 {
 	unsigned int i;
@@ -171,29 +171,34 @@ static int bnxt_hwrm_send_message_locked(struct bnxt *bp, void *msg,
 	return -1;
 }
 
-static int bnxt_hwrm_send_message(struct bnxt *bp, void *msg, uint32_t msg_len)
-{
-	int rc;
-
-	rte_spinlock_lock(&bp->hwrm_lock);
-	rc = bnxt_hwrm_send_message_locked(bp, msg, msg_len);
-	rte_spinlock_unlock(&bp->hwrm_lock);
-	return rc;
-}
-
-#define HWRM_PREP(req, type, cr, resp) \
+/*
+ * HWRM_PREP() should be used to prepare *ALL* HWRM commands.  It grabs the
+ * spinlock, and does initial processing.
+ *
+ * HWRM_CHECK_RESULT() returns errors on failure and may not be used.  It
+ * releases the spinlock only if it returns.  If the regular int return codes
+ * are not used by the function, HWRM_CHECK_RESULT() should not be used
+ * directly, rather it should be copied and modified to suit the function.
+ *
+ * HWRM_UNLOCK() must be called after all response processing is completed.
+ */
+#define HWRM_PREP(x, type) do { \
+	typeof(x) req = (x); \
+	rte_spinlock_lock(&bp->hwrm_lock); \
 	memset(bp->hwrm_cmd_resp_addr, 0, bp->max_resp_len); \
 	req.req_type = rte_cpu_to_le_16(HWRM_##type); \
-	req.cmpl_ring = rte_cpu_to_le_16(cr); \
+	req.cmpl_ring = rte_cpu_to_le_16(-1); \
 	req.seq_id = rte_cpu_to_le_16(bp->hwrm_cmd_seq++); \
 	req.target_id = rte_cpu_to_le_16(0xffff); \
-	req.resp_addr = rte_cpu_to_le_64(bp->hwrm_cmd_resp_dma_addr)
+	req.resp_addr = rte_cpu_to_le_64(bp->hwrm_cmd_resp_dma_addr); \
+} while (0)
 
-#define HWRM_CHECK_RESULT \
-	{ \
+#define HWRM_CHECK_RESULT() \
+	do {\
 		if (rc) { \
 			RTE_LOG(ERR, PMD, "%s failed rc:%d\n", \
 				__func__, rc); \
+			rte_spinlock_unlock(&bp->hwrm_lock); \
 			return rc; \
 		} \
 		if (resp->error_code) { \
@@ -214,9 +219,12 @@ static int bnxt_hwrm_send_message(struct bnxt *bp, void *msg, uint32_t msg_len)
 				RTE_LOG(ERR, PMD, \
 					"%s error %d\n", __func__, rc); \
 			} \
+			rte_spinlock_unlock(&bp->hwrm_lock); \
 			return rc; \
 		} \
-	}
+	} while (0)
+
+#define HWRM_UNLOCK()		rte_spinlock_unlock(&bp->hwrm_lock)
 
 int bnxt_hwrm_cfa_l2_clear_rx_mask(struct bnxt *bp, struct bnxt_vnic_info *vnic)
 {
@@ -224,13 +232,14 @@ int bnxt_hwrm_cfa_l2_clear_rx_mask(struct bnxt *bp, struct bnxt_vnic_info *vnic)
 	struct hwrm_cfa_l2_set_rx_mask_input req = {.req_type = 0 };
 	struct hwrm_cfa_l2_set_rx_mask_output *resp = bp->hwrm_cmd_resp_addr;
 
-	HWRM_PREP(req, CFA_L2_SET_RX_MASK, -1, resp);
+	HWRM_PREP(req, CFA_L2_SET_RX_MASK);
 	req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
 	req.mask = 0;
 
 	rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
 
-	HWRM_CHECK_RESULT;
+	HWRM_CHECK_RESULT();
+	HWRM_UNLOCK();
 
 	return rc;
 }
@@ -245,7 +254,7 @@ int bnxt_hwrm_cfa_l2_set_rx_mask(struct bnxt *bp,
 	struct hwrm_cfa_l2_set_rx_mask_output *resp = bp->hwrm_cmd_resp_addr;
 	uint32_t mask = 0;
 
-	HWRM_PREP(req, CFA_L2_SET_RX_MASK, -1, resp);
+	HWRM_PREP(req, CFA_L2_SET_RX_MASK);
 	req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
 
 	/* FIXME add multicast flag, when multicast adding options is supported
@@ -278,7 +287,8 @@ int bnxt_hwrm_cfa_l2_set_rx_mask(struct bnxt *bp,
 
 	rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
 
-	HWRM_CHECK_RESULT;
+	HWRM_CHECK_RESULT();
+	HWRM_UNLOCK();
 
 	return rc;
 }
@@ -307,7 +317,7 @@ int bnxt_hwrm_cfa_vlan_antispoof_cfg(struct bnxt *bp, uint16_t fid,
 				return 0;
 		}
 	}
-	HWRM_PREP(req, CFA_VLAN_ANTISPOOF_CFG, -1, resp);
+	HWRM_PREP(req, CFA_VLAN_ANTISPOOF_CFG);
 	req.fid = rte_cpu_to_le_16(fid);
 
 	req.vlan_tag_mask_tbl_addr =
@@ -316,7 +326,8 @@ int bnxt_hwrm_cfa_vlan_antispoof_cfg(struct bnxt *bp, uint16_t fid,
 
 	rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
 
-	HWRM_CHECK_RESULT;
+	HWRM_CHECK_RESULT();
+	HWRM_UNLOCK();
 
 	return rc;
 }
@@ -331,13 +342,14 @@ int bnxt_hwrm_clear_filter(struct bnxt *bp,
 	if (filter->fw_l2_filter_id == UINT64_MAX)
 		return 0;
 
-	HWRM_PREP(req, CFA_L2_FILTER_FREE, -1, resp);
+	HWRM_PREP(req, CFA_L2_FILTER_FREE);
 
 	req.l2_filter_id = rte_cpu_to_le_64(filter->fw_l2_filter_id);
 
 	rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
 
-	HWRM_CHECK_RESULT;
+	HWRM_CHECK_RESULT();
+	HWRM_UNLOCK();
 
 	filter->fw_l2_filter_id = -1;
 
@@ -356,7 +368,7 @@ int bnxt_hwrm_set_filter(struct bnxt *bp,
 	if (filter->fw_l2_filter_id != UINT64_MAX)
 		bnxt_hwrm_clear_filter(bp, filter);
 
-	HWRM_PREP(req, CFA_L2_FILTER_ALLOC, -1, resp);
+	HWRM_PREP(req, CFA_L2_FILTER_ALLOC);
 
 	req.flags = rte_cpu_to_le_32(filter->flags);
 
@@ -387,9 +399,10 @@ int bnxt_hwrm_set_filter(struct bnxt *bp,
 
 	rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
 
-	HWRM_CHECK_RESULT;
+	HWRM_CHECK_RESULT();
 
 	filter->fw_l2_filter_id = rte_le_to_cpu_64(resp->l2_filter_id);
+	HWRM_UNLOCK();
 
 	return rc;
 }
@@ -402,13 +415,13 @@ int bnxt_hwrm_func_qcaps(struct bnxt *bp)
 	uint16_t new_max_vfs;
 	int i;
 
-	HWRM_PREP(req, FUNC_QCAPS, -1, resp);
+	HWRM_PREP(req, FUNC_QCAPS);
 
 	req.fid = rte_cpu_to_le_16(0xffff);
 
 	rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
 
-	HWRM_CHECK_RESULT;
+	HWRM_CHECK_RESULT();
 
 	bp->max_ring_grps = rte_le_to_cpu_32(resp->max_hw_ring_grps);
 	if (BNXT_PF(bp)) {
@@ -469,6 +482,7 @@ int bnxt_hwrm_func_qcaps(struct bnxt *bp)
 	bp->max_stat_ctx = rte_le_to_cpu_16(resp->max_stat_ctx);
 	if (BNXT_PF(bp))
 		bp->pf.total_vnics = rte_le_to_cpu_16(resp->max_vnics);
+	HWRM_UNLOCK();
 
 	return rc;
 }
@@ -479,13 +493,14 @@ int bnxt_hwrm_func_reset(struct bnxt *bp)
 	struct hwrm_func_reset_input req = {.req_type = 0 };
 	struct hwrm_func_reset_output *resp = bp->hwrm_cmd_resp_addr;
 
-	HWRM_PREP(req, FUNC_RESET, -1, resp);
+	HWRM_PREP(req, FUNC_RESET);
 
 	req.enables = rte_cpu_to_le_32(0);
 
 	rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
 
-	HWRM_CHECK_RESULT;
+	HWRM_CHECK_RESULT();
+	HWRM_UNLOCK();
 
 	return rc;
 }
@@ -499,7 +514,7 @@ int bnxt_hwrm_func_driver_register(struct bnxt *bp)
 	if (bp->flags & BNXT_FLAG_REGISTERED)
 		return 0;
 
-	HWRM_PREP(req, FUNC_DRV_RGTR, -1, resp);
+	HWRM_PREP(req, FUNC_DRV_RGTR);
 	req.enables = rte_cpu_to_le_32(HWRM_FUNC_DRV_RGTR_INPUT_ENABLES_VER |
 			HWRM_FUNC_DRV_RGTR_INPUT_ENABLES_ASYNC_EVENT_FWD);
 	req.ver_maj = RTE_VER_YEAR;
@@ -519,7 +534,8 @@ int bnxt_hwrm_func_driver_register(struct bnxt *bp)
 
 	rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
 
-	HWRM_CHECK_RESULT;
+	HWRM_CHECK_RESULT();
+	HWRM_UNLOCK();
 
 	bp->flags |= BNXT_FLAG_REGISTERED;
 
@@ -538,19 +554,15 @@ int bnxt_hwrm_ver_get(struct bnxt *bp)
 	uint32_t dev_caps_cfg;
 
 	bp->max_req_len = HWRM_MAX_REQ_LEN;
-	HWRM_PREP(req, VER_GET, -1, resp);
+	HWRM_PREP(req, VER_GET);
 
 	req.hwrm_intf_maj = HWRM_VERSION_MAJOR;
 	req.hwrm_intf_min = HWRM_VERSION_MINOR;
 	req.hwrm_intf_upd = HWRM_VERSION_UPDATE;
 
-	/*
-	 * Hold the lock since we may be adjusting the response pointers.
-	 */
-	rte_spinlock_lock(&bp->hwrm_lock);
-	rc = bnxt_hwrm_send_message_locked(bp, &req, sizeof(req));
+	rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
 
-	HWRM_CHECK_RESULT;
+	HWRM_CHECK_RESULT();
 
 	RTE_LOG(INFO, PMD, "%d.%d.%d:%d.%d.%d\n",
 		resp->hwrm_intf_maj, resp->hwrm_intf_min,
@@ -651,7 +663,7 @@ int bnxt_hwrm_ver_get(struct bnxt *bp)
 	}
 
 error:
-	rte_spinlock_unlock(&bp->hwrm_lock);
+	HWRM_UNLOCK();
 	return rc;
 }
 
@@ -664,12 +676,13 @@ int bnxt_hwrm_func_driver_unregister(struct bnxt *bp, uint32_t flags)
 	if (!(bp->flags & BNXT_FLAG_REGISTERED))
 		return 0;
 
-	HWRM_PREP(req, FUNC_DRV_UNRGTR, -1, resp);
+	HWRM_PREP(req, FUNC_DRV_UNRGTR);
 	req.flags = flags;
 
 	rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
 
-	HWRM_CHECK_RESULT;
+	HWRM_CHECK_RESULT();
+	HWRM_UNLOCK();
 
 	bp->flags &= ~BNXT_FLAG_REGISTERED;
 
@@ -685,7 +698,7 @@ static int bnxt_hwrm_port_phy_cfg(struct bnxt *bp, struct bnxt_link_info *conf)
 	uint32_t link_speed_mask =
 		HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_LINK_SPEED_MASK;
 
-	HWRM_PREP(req, PORT_PHY_CFG, -1, resp);
+	HWRM_PREP(req, PORT_PHY_CFG);
 
 	if (conf->link_up) {
 		req.flags = rte_cpu_to_le_32(conf->phy_flags);
@@ -729,7 +742,8 @@ static int bnxt_hwrm_port_phy_cfg(struct bnxt *bp, struct bnxt_link_info *conf)
 
 	rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
 
-	HWRM_CHECK_RESULT;
+	HWRM_CHECK_RESULT();
+	HWRM_UNLOCK();
 
 	return rc;
 }
@@ -741,11 +755,11 @@ static int bnxt_hwrm_port_phy_qcfg(struct bnxt *bp,
 	struct hwrm_port_phy_qcfg_input req = {0};
 	struct hwrm_port_phy_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
 
-	HWRM_PREP(req, PORT_PHY_QCFG, -1, resp);
+	HWRM_PREP(req, PORT_PHY_QCFG);
 
 	rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
 
-	HWRM_CHECK_RESULT;
+	HWRM_CHECK_RESULT();
 
 	link_info->phy_link_status = resp->link;
 	link_info->link_up =
@@ -765,6 +779,8 @@ static int bnxt_hwrm_port_phy_qcfg(struct bnxt *bp,
 	link_info->phy_ver[1] = resp->phy_min;
 	link_info->phy_ver[2] = resp->phy_bld;
 
+	HWRM_UNLOCK();
+
 	return rc;
 }
 
@@ -774,11 +790,11 @@ int bnxt_hwrm_queue_qportcfg(struct bnxt *bp)
 	struct hwrm_queue_qportcfg_input req = {.req_type = 0 };
 	struct hwrm_queue_qportcfg_output *resp = bp->hwrm_cmd_resp_addr;
 
-	HWRM_PREP(req, QUEUE_QPORTCFG, -1, resp);
+	HWRM_PREP(req, QUEUE_QPORTCFG);
 
 	rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
 
-	HWRM_CHECK_RESULT;
+	HWRM_CHECK_RESULT();
 
 #define GET_QUEUE_INFO(x) \
 	bp->cos_queue[x].id = resp->queue_id##x; \
@@ -793,6 +809,8 @@ int bnxt_hwrm_queue_qportcfg(struct bnxt *bp)
 	GET_QUEUE_INFO(6);
 	GET_QUEUE_INFO(7);
 
+	HWRM_UNLOCK();
+
 	return rc;
 }
 
@@ -806,7 +824,7 @@ int bnxt_hwrm_ring_alloc(struct bnxt *bp,
 	struct hwrm_ring_alloc_input req = {.req_type = 0 };
 	struct hwrm_ring_alloc_output *resp = bp->hwrm_cmd_resp_addr;
 
-	HWRM_PREP(req, RING_ALLOC, -1, resp);
+	HWRM_PREP(req, RING_ALLOC);
 
 	req.page_tbl_addr = rte_cpu_to_le_64(ring->bd_dma);
 	req.fbo = rte_cpu_to_le_32(0);
@@ -837,6 +855,7 @@ int bnxt_hwrm_ring_alloc(struct bnxt *bp,
 	default:
 		RTE_LOG(ERR, PMD, "hwrm alloc invalid ring type %d\n",
 			ring_type);
+		HWRM_UNLOCK();
 		return -1;
 	}
 	req.enables = rte_cpu_to_le_32(enables);
@@ -850,22 +869,27 @@ int bnxt_hwrm_ring_alloc(struct bnxt *bp,
 		case HWRM_RING_FREE_INPUT_RING_TYPE_L2_CMPL:
 			RTE_LOG(ERR, PMD,
 				"hwrm_ring_alloc cp failed. rc:%d\n", rc);
+			HWRM_UNLOCK();
 			return rc;
 		case HWRM_RING_FREE_INPUT_RING_TYPE_RX:
 			RTE_LOG(ERR, PMD,
 				"hwrm_ring_alloc rx failed. rc:%d\n", rc);
+			HWRM_UNLOCK();
 			return rc;
 		case HWRM_RING_FREE_INPUT_RING_TYPE_TX:
 			RTE_LOG(ERR, PMD,
 				"hwrm_ring_alloc tx failed. rc:%d\n", rc);
+			HWRM_UNLOCK();
 			return rc;
 		default:
 			RTE_LOG(ERR, PMD, "Invalid ring. rc:%d\n", rc);
+			HWRM_UNLOCK();
 			return rc;
 		}
 	}
 
 	ring->fw_ring_id = rte_le_to_cpu_16(resp->ring_id);
+	HWRM_UNLOCK();
 	return rc;
 }
 
@@ -876,7 +900,7 @@ int bnxt_hwrm_ring_free(struct bnxt *bp,
 	struct hwrm_ring_free_input req = {.req_type = 0 };
 	struct hwrm_ring_free_output *resp = bp->hwrm_cmd_resp_addr;
 
-	HWRM_PREP(req, RING_FREE, -1, resp);
+	HWRM_PREP(req, RING_FREE);
 
 	req.ring_type = ring_type;
 	req.ring_id = rte_cpu_to_le_16(ring->fw_ring_id);
@@ -886,6 +910,7 @@ int bnxt_hwrm_ring_free(struct bnxt *bp,
 	if (rc || resp->error_code) {
 		if (rc == 0 && resp->error_code)
 			rc = rte_le_to_cpu_16(resp->error_code);
+		HWRM_UNLOCK();
 
 		switch (ring_type) {
 		case HWRM_RING_FREE_INPUT_RING_TYPE_L2_CMPL:
@@ -905,6 +930,7 @@ int bnxt_hwrm_ring_free(struct bnxt *bp,
 			return rc;
 		}
 	}
+	HWRM_UNLOCK();
 	return 0;
 }
 
@@ -914,7 +940,7 @@ int bnxt_hwrm_ring_grp_alloc(struct bnxt *bp, unsigned int idx)
 	struct hwrm_ring_grp_alloc_input req = {.req_type = 0 };
 	struct hwrm_ring_grp_alloc_output *resp = bp->hwrm_cmd_resp_addr;
 
-	HWRM_PREP(req, RING_GRP_ALLOC, -1, resp);
+	HWRM_PREP(req, RING_GRP_ALLOC);
 
 	req.cr = rte_cpu_to_le_16(bp->grp_info[idx].cp_fw_ring_id);
 	req.rr = rte_cpu_to_le_16(bp->grp_info[idx].rx_fw_ring_id);
@@ -923,11 +949,13 @@ int bnxt_hwrm_ring_grp_alloc(struct bnxt *bp, unsigned int idx)
 
 	rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
 
-	HWRM_CHECK_RESULT;
+	HWRM_CHECK_RESULT();
 
 	bp->grp_info[idx].fw_grp_id =
 	    rte_le_to_cpu_16(resp->ring_group_id);
 
+	HWRM_UNLOCK();
+
 	return rc;
 }
 
@@ -937,13 +965,14 @@ int bnxt_hwrm_ring_grp_free(struct bnxt *bp, unsigned int idx)
 	struct hwrm_ring_grp_free_input req = {.req_type = 0 };
 	struct hwrm_ring_grp_free_output *resp = bp->hwrm_cmd_resp_addr;
 
-	HWRM_PREP(req, RING_GRP_FREE, -1, resp);
+	HWRM_PREP(req, RING_GRP_FREE);
 
 	req.ring_group_id = rte_cpu_to_le_16(bp->grp_info[idx].fw_grp_id);
 
 	rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
 
-	HWRM_CHECK_RESULT;
+	HWRM_CHECK_RESULT();
+	HWRM_UNLOCK();
 
 	bp->grp_info[idx].fw_grp_id = INVALID_HW_RING_ID;
 	return rc;
@@ -958,13 +987,14 @@ int bnxt_hwrm_stat_clear(struct bnxt *bp, struct bnxt_cp_ring_info *cpr)
 	if (cpr->hw_stats_ctx_id == (uint32_t)HWRM_NA_SIGNATURE)
 		return rc;
 
-	HWRM_PREP(req, STAT_CTX_CLR_STATS, -1, resp);
+	HWRM_PREP(req, STAT_CTX_CLR_STATS);
 
 	req.stat_ctx_id = rte_cpu_to_le_16(cpr->hw_stats_ctx_id);
 
 	rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
 
-	HWRM_CHECK_RESULT;
+	HWRM_CHECK_RESULT();
+	HWRM_UNLOCK();
 
 	return rc;
 }
@@ -976,7 +1006,7 @@ int bnxt_hwrm_stat_ctx_alloc(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
 	struct hwrm_stat_ctx_alloc_input req = {.req_type = 0 };
 	struct hwrm_stat_ctx_alloc_output *resp = bp->hwrm_cmd_resp_addr;
 
-	HWRM_PREP(req, STAT_CTX_ALLOC, -1, resp);
+	HWRM_PREP(req, STAT_CTX_ALLOC);
 
 	req.update_period_ms = rte_cpu_to_le_32(0);
 
@@ -985,10 +1015,12 @@ int bnxt_hwrm_stat_ctx_alloc(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
 
 	rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
 
-	HWRM_CHECK_RESULT;
+	HWRM_CHECK_RESULT();
 
 	cpr->hw_stats_ctx_id = rte_le_to_cpu_16(resp->stat_ctx_id);
 
+	HWRM_UNLOCK();
+
 	return rc;
 }
 
@@ -999,13 +1031,14 @@ int bnxt_hwrm_stat_ctx_free(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
 	struct hwrm_stat_ctx_free_input req = {.req_type = 0 };
 	struct hwrm_stat_ctx_free_output *resp = bp->hwrm_cmd_resp_addr;
 
-	HWRM_PREP(req, STAT_CTX_FREE, -1, resp);
+	HWRM_PREP(req, STAT_CTX_FREE);
 
 	req.stat_ctx_id = rte_cpu_to_le_16(cpr->hw_stats_ctx_id);
 
 	rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
 
-	HWRM_CHECK_RESULT;
+	HWRM_CHECK_RESULT();
+	HWRM_UNLOCK();
 
 	return rc;
 }
@@ -1027,15 +1060,16 @@ int bnxt_hwrm_vnic_alloc(struct bnxt *bp, struct bnxt_vnic_info *vnic)
 	vnic->lb_rule = (uint16_t)HWRM_NA_SIGNATURE;
 	vnic->mru = bp->eth_dev->data->mtu + ETHER_HDR_LEN +
 				ETHER_CRC_LEN + VLAN_TAG_SIZE;
-	HWRM_PREP(req, VNIC_ALLOC, -1, resp);
+	HWRM_PREP(req, VNIC_ALLOC);
 
 	if (vnic->func_default)
 		req.flags = HWRM_VNIC_ALLOC_INPUT_FLAGS_DEFAULT;
 	rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
 
-	HWRM_CHECK_RESULT;
+	HWRM_CHECK_RESULT();
 
 	vnic->fw_vnic_id = rte_le_to_cpu_16(resp->vnic_id);
+	HWRM_UNLOCK();
 	RTE_LOG(DEBUG, PMD, "VNIC ID %x\n", vnic->fw_vnic_id);
 	return rc;
 }
@@ -1048,13 +1082,13 @@ static int bnxt_hwrm_vnic_plcmodes_qcfg(struct bnxt *bp,
 	struct hwrm_vnic_plcmodes_qcfg_input req = {.req_type = 0 };
 	struct hwrm_vnic_plcmodes_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
 
-	HWRM_PREP(req, VNIC_PLCMODES_QCFG, -1, resp);
+	HWRM_PREP(req, VNIC_PLCMODES_QCFG);
 
 	req.vnic_id = rte_cpu_to_le_32(vnic->fw_vnic_id);
 
 	rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
 
-	HWRM_CHECK_RESULT;
+	HWRM_CHECK_RESULT();
 
 	pmode->flags = rte_le_to_cpu_32(resp->flags);
 	/* dflt_vnic bit doesn't exist in the _cfg command */
@@ -1063,6 +1097,8 @@ static int bnxt_hwrm_vnic_plcmodes_qcfg(struct bnxt *bp,
 	pmode->hds_offset = rte_le_to_cpu_16(resp->hds_offset);
 	pmode->hds_threshold = rte_le_to_cpu_16(resp->hds_threshold);
 
+	HWRM_UNLOCK();
+
 	return rc;
 }
 
@@ -1074,7 +1110,7 @@ static int bnxt_hwrm_vnic_plcmodes_cfg(struct bnxt *bp,
 	struct hwrm_vnic_plcmodes_cfg_input req = {.req_type = 0 };
 	struct hwrm_vnic_plcmodes_cfg_output *resp = bp->hwrm_cmd_resp_addr;
 
-	HWRM_PREP(req, VNIC_PLCMODES_CFG, -1, resp);
+	HWRM_PREP(req, VNIC_PLCMODES_CFG);
 
 	req.vnic_id = rte_cpu_to_le_32(vnic->fw_vnic_id);
 	req.flags = rte_cpu_to_le_32(pmode->flags);
@@ -1089,7 +1125,8 @@ static int bnxt_hwrm_vnic_plcmodes_cfg(struct bnxt *bp,
 
 	rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
 
-	HWRM_CHECK_RESULT;
+	HWRM_CHECK_RESULT();
+	HWRM_UNLOCK();
 
 	return rc;
 }
@@ -1111,7 +1148,7 @@ int bnxt_hwrm_vnic_cfg(struct bnxt *bp, struct bnxt_vnic_info *vnic)
 	if (rc)
 		return rc;
 
-	HWRM_PREP(req, VNIC_CFG, -1, resp);
+	HWRM_PREP(req, VNIC_CFG);
 
 	/* Only RSS support for now TBD: COS & LB */
 	req.enables =
@@ -1151,7 +1188,8 @@ int bnxt_hwrm_vnic_cfg(struct bnxt *bp, struct bnxt_vnic_info *vnic)
 
 	rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
 
-	HWRM_CHECK_RESULT;
+	HWRM_CHECK_RESULT();
+	HWRM_UNLOCK();
 
 	rc = bnxt_hwrm_vnic_plcmodes_cfg(bp, vnic, &pmodes);
 
@@ -1169,7 +1207,7 @@ int bnxt_hwrm_vnic_qcfg(struct bnxt *bp, struct bnxt_vnic_info *vnic,
 		RTE_LOG(DEBUG, PMD, "VNIC QCFG ID %d\n", vnic->fw_vnic_id);
 		return rc;
 	}
-	HWRM_PREP(req, VNIC_QCFG, -1, resp);
+	HWRM_PREP(req, VNIC_QCFG);
 
 	req.enables =
 		rte_cpu_to_le_32(HWRM_VNIC_QCFG_INPUT_ENABLES_VF_ID_VALID);
@@ -1178,7 +1216,7 @@ int bnxt_hwrm_vnic_qcfg(struct bnxt *bp, struct bnxt_vnic_info *vnic,
 
 	rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
 
-	HWRM_CHECK_RESULT;
+	HWRM_CHECK_RESULT();
 
 	vnic->dflt_ring_grp = rte_le_to_cpu_16(resp->dflt_ring_grp);
 	vnic->rss_rule = rte_le_to_cpu_16(resp->rss_rule);
@@ -1198,6 +1236,8 @@ int bnxt_hwrm_vnic_qcfg(struct bnxt *bp, struct bnxt_vnic_info *vnic,
 	vnic->rss_dflt_cr = rte_le_to_cpu_32(resp->flags) &
 			HWRM_VNIC_QCFG_OUTPUT_FLAGS_RSS_DFLT_CR_MODE;
 
+	HWRM_UNLOCK();
+
 	return rc;
 }
 
@@ -1208,13 +1248,14 @@ int bnxt_hwrm_vnic_ctx_alloc(struct bnxt *bp, struct bnxt_vnic_info *vnic)
 	struct hwrm_vnic_rss_cos_lb_ctx_alloc_output *resp =
 						bp->hwrm_cmd_resp_addr;
 
-	HWRM_PREP(req, VNIC_RSS_COS_LB_CTX_ALLOC, -1, resp);
+	HWRM_PREP(req, VNIC_RSS_COS_LB_CTX_ALLOC);
 
 	rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
 
-	HWRM_CHECK_RESULT;
+	HWRM_CHECK_RESULT();
 
 	vnic->rss_rule = rte_le_to_cpu_16(resp->rss_cos_lb_ctx_id);
+	HWRM_UNLOCK();
 	RTE_LOG(DEBUG, PMD, "VNIC RSS Rule %x\n", vnic->rss_rule);
 
 	return rc;
@@ -1231,13 +1272,14 @@ int bnxt_hwrm_vnic_ctx_free(struct bnxt *bp, struct bnxt_vnic_info *vnic)
 		RTE_LOG(DEBUG, PMD, "VNIC RSS Rule %x\n", vnic->rss_rule);
 		return rc;
 	}
-	HWRM_PREP(req, VNIC_RSS_COS_LB_CTX_FREE, -1, resp);
+	HWRM_PREP(req, VNIC_RSS_COS_LB_CTX_FREE);
 
 	req.rss_cos_lb_ctx_id = rte_cpu_to_le_16(vnic->rss_rule);
 
 	rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
 
-	HWRM_CHECK_RESULT;
+	HWRM_CHECK_RESULT();
+	HWRM_UNLOCK();
 
 	vnic->rss_rule = INVALID_HW_RING_ID;
 
@@ -1255,13 +1297,14 @@ int bnxt_hwrm_vnic_free(struct bnxt *bp, struct bnxt_vnic_info *vnic)
 		return rc;
 	}
 
-	HWRM_PREP(req, VNIC_FREE, -1, resp);
+	HWRM_PREP(req, VNIC_FREE);
 
 	req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
 
 	rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
 
-	HWRM_CHECK_RESULT;
+	HWRM_CHECK_RESULT();
+	HWRM_UNLOCK();
 
 	vnic->fw_vnic_id = INVALID_HW_RING_ID;
 	return rc;
@@ -1274,7 +1317,7 @@ int bnxt_hwrm_vnic_rss_cfg(struct bnxt *bp,
 	struct hwrm_vnic_rss_cfg_input req = {.req_type = 0 };
 	struct hwrm_vnic_rss_cfg_output *resp = bp->hwrm_cmd_resp_addr;
 
-	HWRM_PREP(req, VNIC_RSS_CFG, -1, resp);
+	HWRM_PREP(req, VNIC_RSS_CFG);
 
 	req.hash_type = rte_cpu_to_le_32(vnic->hash_type);
 
@@ -1286,7 +1329,8 @@ int bnxt_hwrm_vnic_rss_cfg(struct bnxt *bp,
 
 	rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
 
-	HWRM_CHECK_RESULT;
+	HWRM_CHECK_RESULT();
+	HWRM_UNLOCK();
 
 	return rc;
 }
@@ -1299,7 +1343,7 @@ int bnxt_hwrm_vnic_plcmode_cfg(struct bnxt *bp,
 	struct hwrm_vnic_plcmodes_cfg_output *resp = bp->hwrm_cmd_resp_addr;
 	uint16_t size;
 
-	HWRM_PREP(req, VNIC_PLCMODES_CFG, -1, resp);
+	HWRM_PREP(req, VNIC_PLCMODES_CFG);
 
 	req.flags = rte_cpu_to_le_32(
 			HWRM_VNIC_PLCMODES_CFG_INPUT_FLAGS_JUMBO_PLACEMENT);
@@ -1315,7 +1359,8 @@ int bnxt_hwrm_vnic_plcmode_cfg(struct bnxt *bp,
 
 	rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
 
-	HWRM_CHECK_RESULT;
+	HWRM_CHECK_RESULT();
+	HWRM_UNLOCK();
 
 	return rc;
 }
@@ -1327,7 +1372,7 @@ int bnxt_hwrm_vnic_tpa_cfg(struct bnxt *bp,
 	struct hwrm_vnic_tpa_cfg_input req = {.req_type = 0 };
 	struct hwrm_vnic_tpa_cfg_output *resp = bp->hwrm_cmd_resp_addr;
 
-	HWRM_PREP(req, VNIC_TPA_CFG, -1, resp);
+	HWRM_PREP(req, VNIC_TPA_CFG);
 
 	if (enable) {
 		req.enables = rte_cpu_to_le_32(
@@ -1350,7 +1395,8 @@ int bnxt_hwrm_vnic_tpa_cfg(struct bnxt *bp,
 
 	rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
 
-	HWRM_CHECK_RESULT;
+	HWRM_CHECK_RESULT();
+	HWRM_UNLOCK();
 
 	return rc;
 }
@@ -1367,10 +1413,11 @@ int bnxt_hwrm_func_vf_mac(struct bnxt *bp, uint16_t vf, const uint8_t *mac_addr)
 	memcpy(req.dflt_mac_addr, mac_addr, sizeof(req.dflt_mac_addr));
 	req.fid = rte_cpu_to_le_16(bp->pf.vf_info[vf].fid);
 
-	HWRM_PREP(req, FUNC_CFG, -1, resp);
+	HWRM_PREP(req, FUNC_CFG);
 
 	rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
-	HWRM_CHECK_RESULT;
+	HWRM_CHECK_RESULT();
+	HWRM_UNLOCK();
 
 	bp->pf.vf_info[vf].random_mac = false;
 
@@ -1384,17 +1431,19 @@ int bnxt_hwrm_func_qstats_tx_drop(struct bnxt *bp, uint16_t fid,
 	struct hwrm_func_qstats_input req = {.req_type = 0};
 	struct hwrm_func_qstats_output *resp = bp->hwrm_cmd_resp_addr;
 
-	HWRM_PREP(req, FUNC_QSTATS, -1, resp);
+	HWRM_PREP(req, FUNC_QSTATS);
 
 	req.fid = rte_cpu_to_le_16(fid);
 
 	rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
 
-	HWRM_CHECK_RESULT;
+	HWRM_CHECK_RESULT();
 
 	if (dropped)
 		*dropped = rte_le_to_cpu_64(resp->tx_drop_pkts);
 
+	HWRM_UNLOCK();
+
 	return rc;
 }
 
@@ -1405,13 +1454,13 @@ int bnxt_hwrm_func_qstats(struct bnxt *bp, uint16_t fid,
 	struct hwrm_func_qstats_input req = {.req_type = 0};
 	struct hwrm_func_qstats_output *resp = bp->hwrm_cmd_resp_addr;
 
-	HWRM_PREP(req, FUNC_QSTATS, -1, resp);
+	HWRM_PREP(req, FUNC_QSTATS);
 
 	req.fid = rte_cpu_to_le_16(fid);
 
 	rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
 
-	HWRM_CHECK_RESULT;
+	HWRM_CHECK_RESULT();
 
 	stats->ipackets = rte_le_to_cpu_64(resp->rx_ucast_pkts);
 	stats->ipackets += rte_le_to_cpu_64(resp->rx_mcast_pkts);
@@ -1432,6 +1481,8 @@ int bnxt_hwrm_func_qstats(struct bnxt *bp, uint16_t fid,
 
 	stats->imissed = rte_le_to_cpu_64(resp->rx_drop_pkts);
 
+	HWRM_UNLOCK();
+
 	return rc;
 }
 
@@ -1441,13 +1492,14 @@ int bnxt_hwrm_func_clr_stats(struct bnxt *bp, uint16_t fid)
 	struct hwrm_func_clr_stats_input req = {.req_type = 0};
 	struct hwrm_func_clr_stats_output *resp = bp->hwrm_cmd_resp_addr;
 
-	HWRM_PREP(req, FUNC_CLR_STATS, -1, resp);
+	HWRM_PREP(req, FUNC_CLR_STATS);
 
 	req.fid = rte_cpu_to_le_16(fid);
 
 	rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
 
-	HWRM_CHECK_RESULT;
+	HWRM_CHECK_RESULT();
+	HWRM_UNLOCK();
 
 	return rc;
 }
@@ -2038,12 +2090,12 @@ int bnxt_hwrm_func_qcfg(struct bnxt *bp)
 	struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
 	int rc = 0;
 
-	HWRM_PREP(req, FUNC_QCFG, -1, resp);
+	HWRM_PREP(req, FUNC_QCFG);
 	req.fid = rte_cpu_to_le_16(0xffff);
 
 	rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
 
-	HWRM_CHECK_RESULT;
+	HWRM_CHECK_RESULT();
 
 	/* Hard Coded.. 0xfff VLAN ID mask */
 	bp->vlan = rte_le_to_cpu_16(resp->vlan) & 0xfff;
@@ -2059,6 +2111,8 @@ int bnxt_hwrm_func_qcfg(struct bnxt *bp)
 		break;
 	}
 
+	HWRM_UNLOCK();
+
 	return rc;
 }
 
@@ -2118,10 +2172,12 @@ static int bnxt_hwrm_pf_func_cfg(struct bnxt *bp, int tx_rings)
 	req.num_hw_ring_grps = rte_cpu_to_le_16(bp->max_ring_grps);
 	req.fid = rte_cpu_to_le_16(0xffff);
 
-	HWRM_PREP(req, FUNC_CFG, -1, resp);
+	HWRM_PREP(req, FUNC_CFG);
 
 	rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
-	HWRM_CHECK_RESULT;
+
+	HWRM_CHECK_RESULT();
+	HWRM_UNLOCK();
 
 	return rc;
 }
@@ -2187,7 +2243,7 @@ static void reserve_resources_from_vf(struct bnxt *bp,
 	int rc;
 
 	/* Get the actual allocated values now */
-	HWRM_PREP(req, FUNC_QCAPS, -1, resp);
+	HWRM_PREP(req, FUNC_QCAPS);
 	req.fid = rte_cpu_to_le_16(bp->pf.vf_info[vf].fid);
 	rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
 
@@ -2212,6 +2268,8 @@ static void reserve_resources_from_vf(struct bnxt *bp,
 	 */
 	//bp->max_vnics -= rte_le_to_cpu_16(esp->max_vnics);
 	bp->max_ring_grps -= rte_le_to_cpu_16(resp->max_hw_ring_grps);
+
+	HWRM_UNLOCK();
 }
 
 int bnxt_hwrm_func_qcfg_current_vf_vlan(struct bnxt *bp, int vf)
@@ -2221,7 +2279,7 @@ int bnxt_hwrm_func_qcfg_current_vf_vlan(struct bnxt *bp, int vf)
 	int rc;
 
 	/* Check for zero MAC address */
-	HWRM_PREP(req, FUNC_QCFG, -1, resp);
+	HWRM_PREP(req, FUNC_QCFG);
 	req.fid = rte_cpu_to_le_16(bp->pf.vf_info[vf].fid);
 	rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
 	if (rc) {
@@ -2232,7 +2290,11 @@ int bnxt_hwrm_func_qcfg_current_vf_vlan(struct bnxt *bp, int vf)
 		RTE_LOG(ERR, PMD, "hwrm_func_qcfg error %d\n", rc);
 		return -1;
 	}
-	return rte_le_to_cpu_16(resp->vlan);
+	rc = rte_le_to_cpu_16(resp->vlan);
+
+	HWRM_UNLOCK();
+
+	return rc;
 }
 
 static int update_pf_resource_max(struct bnxt *bp)
@@ -2242,15 +2304,17 @@ static int update_pf_resource_max(struct bnxt *bp)
 	int rc;
 
 	/* And copy the allocated numbers into the pf struct */
-	HWRM_PREP(req, FUNC_QCFG, -1, resp);
+	HWRM_PREP(req, FUNC_QCFG);
 	req.fid = rte_cpu_to_le_16(0xffff);
 	rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
-	HWRM_CHECK_RESULT;
+	HWRM_CHECK_RESULT();
 
 	/* Only TX ring value reflects actual allocation? TODO */
 	bp->max_tx_rings = rte_le_to_cpu_16(resp->alloc_tx_rings);
 	bp->pf.evb_mode = resp->evb_mode;
 
+	HWRM_UNLOCK();
+
 	return rc;
 }
 
@@ -2342,7 +2406,7 @@ int bnxt_hwrm_allocate_vfs(struct bnxt *bp, int num_vfs)
 	for (i = 0; i < num_vfs; i++) {
 		add_random_mac_if_needed(bp, &req, i);
 
-		HWRM_PREP(req, FUNC_CFG, -1, resp);
+		HWRM_PREP(req, FUNC_CFG);
 		req.flags = rte_cpu_to_le_32(bp->pf.vf_info[i].func_cfg_flags);
 		req.fid = rte_cpu_to_le_16(bp->pf.vf_info[i].fid);
 		rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
@@ -2357,9 +2421,12 @@ int bnxt_hwrm_allocate_vfs(struct bnxt *bp, int num_vfs)
 			RTE_LOG(ERR, PMD,
 				"Not all VFs available. (%d, %d)\n",
 				rc, resp->error_code);
+			HWRM_UNLOCK();
 			break;
 		}
 
+		HWRM_UNLOCK();
+
 		reserve_resources_from_vf(bp, &req, i);
 		bp->pf.active_vfs++;
 		bnxt_hwrm_func_clr_stats(bp, bp->pf.vf_info[i].fid);
@@ -2392,14 +2459,15 @@ int bnxt_hwrm_pf_evb_mode(struct bnxt *bp)
 	struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
 	int rc;
 
-	HWRM_PREP(req, FUNC_CFG, -1, resp);
+	HWRM_PREP(req, FUNC_CFG);
 
 	req.fid = rte_cpu_to_le_16(0xffff);
 	req.enables = rte_cpu_to_le_32(HWRM_FUNC_CFG_INPUT_ENABLES_EVB_MODE);
 	req.evb_mode = bp->pf.evb_mode;
 
 	rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
-	HWRM_CHECK_RESULT;
+	HWRM_CHECK_RESULT();
+	HWRM_UNLOCK();
 
 	return rc;
 }
@@ -2411,11 +2479,11 @@ int bnxt_hwrm_tunnel_dst_port_alloc(struct bnxt *bp, uint16_t port,
 	struct hwrm_tunnel_dst_port_alloc_output *resp = bp->hwrm_cmd_resp_addr;
 	int rc = 0;
 
-	HWRM_PREP(req, TUNNEL_DST_PORT_ALLOC, -1, resp);
+	HWRM_PREP(req, TUNNEL_DST_PORT_ALLOC);
 	req.tunnel_type = tunnel_type;
 	req.tunnel_dst_port_val = port;
 	rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
-	HWRM_CHECK_RESULT;
+	HWRM_CHECK_RESULT();
 
 	switch (tunnel_type) {
 	case HWRM_TUNNEL_DST_PORT_ALLOC_INPUT_TUNNEL_TYPE_VXLAN:
@@ -2429,6 +2497,9 @@ int bnxt_hwrm_tunnel_dst_port_alloc(struct bnxt *bp, uint16_t port,
 	default:
 		break;
 	}
+
+	HWRM_UNLOCK();
+
 	return rc;
 }
 
@@ -2439,11 +2510,14 @@ int bnxt_hwrm_tunnel_dst_port_free(struct bnxt *bp, uint16_t port,
 	struct hwrm_tunnel_dst_port_free_output *resp = bp->hwrm_cmd_resp_addr;
 	int rc = 0;
 
-	HWRM_PREP(req, TUNNEL_DST_PORT_FREE, -1, resp);
+	HWRM_PREP(req, TUNNEL_DST_PORT_FREE);
+
 	req.tunnel_type = tunnel_type;
 	req.tunnel_dst_port_id = rte_cpu_to_be_16(port);
 	rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
-	HWRM_CHECK_RESULT;
+
+	HWRM_CHECK_RESULT();
+	HWRM_UNLOCK();
 
 	return rc;
 }
@@ -2455,11 +2529,14 @@ int bnxt_hwrm_func_cfg_vf_set_flags(struct bnxt *bp, uint16_t vf,
 	struct hwrm_func_cfg_input req = {0};
 	int rc;
 
-	HWRM_PREP(req, FUNC_CFG, -1, resp);
+	HWRM_PREP(req, FUNC_CFG);
+
 	req.fid = rte_cpu_to_le_16(bp->pf.vf_info[vf].fid);
 	req.flags = rte_cpu_to_le_32(flags);
 	rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
-	HWRM_CHECK_RESULT;
+
+	HWRM_CHECK_RESULT();
+	HWRM_UNLOCK();
 
 	return rc;
 }
@@ -2482,7 +2559,7 @@ int bnxt_hwrm_func_buf_rgtr(struct bnxt *bp)
 	struct hwrm_func_buf_rgtr_input req = {.req_type = 0 };
 	struct hwrm_func_buf_rgtr_output *resp = bp->hwrm_cmd_resp_addr;
 
-	HWRM_PREP(req, FUNC_BUF_RGTR, -1, resp);
+	HWRM_PREP(req, FUNC_BUF_RGTR);
 
 	req.req_buf_num_pages = rte_cpu_to_le_16(1);
 	req.req_buf_page_size = rte_cpu_to_le_16(
@@ -2498,7 +2575,8 @@ int bnxt_hwrm_func_buf_rgtr(struct bnxt *bp)
 
 	rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
 
-	HWRM_CHECK_RESULT;
+	HWRM_CHECK_RESULT();
+	HWRM_UNLOCK();
 
 	return rc;
 }
@@ -2509,11 +2587,12 @@ int bnxt_hwrm_func_buf_unrgtr(struct bnxt *bp)
 	struct hwrm_func_buf_unrgtr_input req = {.req_type = 0 };
 	struct hwrm_func_buf_unrgtr_output *resp = bp->hwrm_cmd_resp_addr;
 
-	HWRM_PREP(req, FUNC_BUF_UNRGTR, -1, resp);
+	HWRM_PREP(req, FUNC_BUF_UNRGTR);
 
 	rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
 
-	HWRM_CHECK_RESULT;
+	HWRM_CHECK_RESULT();
+	HWRM_UNLOCK();
 
 	return rc;
 }
@@ -2524,7 +2603,8 @@ int bnxt_hwrm_func_cfg_def_cp(struct bnxt *bp)
 	struct hwrm_func_cfg_input req = {0};
 	int rc;
 
-	HWRM_PREP(req, FUNC_CFG, -1, resp);
+	HWRM_PREP(req, FUNC_CFG);
+
 	req.fid = rte_cpu_to_le_16(0xffff);
 	req.flags = rte_cpu_to_le_32(bp->pf.func_cfg_flags);
 	req.enables = rte_cpu_to_le_32(
@@ -2532,7 +2612,9 @@ int bnxt_hwrm_func_cfg_def_cp(struct bnxt *bp)
 	req.async_event_cr = rte_cpu_to_le_16(
 			bp->def_cp_ring->cp_ring_struct->fw_ring_id);
 	rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
-	HWRM_CHECK_RESULT;
+
+	HWRM_CHECK_RESULT();
+	HWRM_UNLOCK();
 
 	return rc;
 }
@@ -2543,13 +2625,16 @@ int bnxt_hwrm_vf_func_cfg_def_cp(struct bnxt *bp)
 	struct hwrm_func_vf_cfg_input req = {0};
 	int rc;
 
-	HWRM_PREP(req, FUNC_VF_CFG, -1, resp);
+	HWRM_PREP(req, FUNC_VF_CFG);
+
 	req.enables = rte_cpu_to_le_32(
 			HWRM_FUNC_CFG_INPUT_ENABLES_ASYNC_EVENT_CR);
 	req.async_event_cr = rte_cpu_to_le_16(
 			bp->def_cp_ring->cp_ring_struct->fw_ring_id);
 	rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
-	HWRM_CHECK_RESULT;
+
+	HWRM_CHECK_RESULT();
+	HWRM_UNLOCK();
 
 	return rc;
 }
@@ -2562,7 +2647,7 @@ int bnxt_hwrm_set_default_vlan(struct bnxt *bp, int vf, uint8_t is_vf)
 	uint32_t func_cfg_flags;
 	int rc = 0;
 
-	HWRM_PREP(req, FUNC_CFG, -1, resp);
+	HWRM_PREP(req, FUNC_CFG);
 
 	if (is_vf) {
 		dflt_vlan = bp->pf.vf_info[vf].dflt_vlan;
@@ -2580,7 +2665,9 @@ int bnxt_hwrm_set_default_vlan(struct bnxt *bp, int vf, uint8_t is_vf)
 	req.dflt_vlan = rte_cpu_to_le_16(dflt_vlan);
 
 	rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
-	HWRM_CHECK_RESULT;
+
+	HWRM_CHECK_RESULT();
+	HWRM_UNLOCK();
 
 	return rc;
 }
@@ -2592,13 +2679,16 @@ int bnxt_hwrm_func_bw_cfg(struct bnxt *bp, uint16_t vf,
 	struct hwrm_func_cfg_input req = {0};
 	int rc;
 
-	HWRM_PREP(req, FUNC_CFG, -1, resp);
+	HWRM_PREP(req, FUNC_CFG);
+
 	req.fid = rte_cpu_to_le_16(bp->pf.vf_info[vf].fid);
 	req.enables |= rte_cpu_to_le_32(enables);
 	req.flags = rte_cpu_to_le_32(bp->pf.vf_info[vf].func_cfg_flags);
 	req.max_bw = rte_cpu_to_le_32(max_bw);
 	rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
-	HWRM_CHECK_RESULT;
+
+	HWRM_CHECK_RESULT();
+	HWRM_UNLOCK();
 
 	return rc;
 }
@@ -2609,14 +2699,17 @@ int bnxt_hwrm_set_vf_vlan(struct bnxt *bp, int vf)
 	struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
 	int rc = 0;
 
-	HWRM_PREP(req, FUNC_CFG, -1, resp);
+	HWRM_PREP(req, FUNC_CFG);
+
 	req.flags = rte_cpu_to_le_32(bp->pf.vf_info[vf].func_cfg_flags);
 	req.fid = rte_cpu_to_le_16(bp->pf.vf_info[vf].fid);
 	req.enables |= rte_cpu_to_le_32(HWRM_FUNC_CFG_INPUT_ENABLES_DFLT_VLAN);
 	req.dflt_vlan = rte_cpu_to_le_16(bp->pf.vf_info[vf].dflt_vlan);
 
 	rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
-	HWRM_CHECK_RESULT;
+
+	HWRM_CHECK_RESULT();
+	HWRM_UNLOCK();
 
 	return rc;
 }
@@ -2631,14 +2724,15 @@ int bnxt_hwrm_reject_fwd_resp(struct bnxt *bp, uint16_t target_id,
 	if (ec_size > sizeof(req.encap_request))
 		return -1;
 
-	HWRM_PREP(req, REJECT_FWD_RESP, -1, resp);
+	HWRM_PREP(req, REJECT_FWD_RESP);
 
 	req.encap_resp_target_id = rte_cpu_to_le_16(target_id);
 	memcpy(req.encap_request, encaped, ec_size);
 
 	rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
 
-	HWRM_CHECK_RESULT;
+	HWRM_CHECK_RESULT();
+	HWRM_UNLOCK();
 
 	return rc;
 }
@@ -2650,13 +2744,17 @@ int bnxt_hwrm_func_qcfg_vf_default_mac(struct bnxt *bp, uint16_t vf,
 	struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
 	int rc;
 
-	HWRM_PREP(req, FUNC_QCFG, -1, resp);
+	HWRM_PREP(req, FUNC_QCFG);
+
 	req.fid = rte_cpu_to_le_16(bp->pf.vf_info[vf].fid);
 	rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
 
-	HWRM_CHECK_RESULT;
+	HWRM_CHECK_RESULT();
 
 	memcpy(mac->addr_bytes, resp->mac_address, ETHER_ADDR_LEN);
+
+	HWRM_UNLOCK();
+
 	return rc;
 }
 
@@ -2670,14 +2768,15 @@ int bnxt_hwrm_exec_fwd_resp(struct bnxt *bp, uint16_t target_id,
 	if (ec_size > sizeof(req.encap_request))
 		return -1;
 
-	HWRM_PREP(req, EXEC_FWD_RESP, -1, resp);
+	HWRM_PREP(req, EXEC_FWD_RESP);
 
 	req.encap_resp_target_id = rte_cpu_to_le_16(target_id);
 	memcpy(req.encap_request, encaped, ec_size);
 
 	rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
 
-	HWRM_CHECK_RESULT;
+	HWRM_CHECK_RESULT();
+	HWRM_UNLOCK();
 
 	return rc;
 }
@@ -2689,13 +2788,13 @@ int bnxt_hwrm_ctx_qstats(struct bnxt *bp, uint32_t cid, int idx,
 	struct hwrm_stat_ctx_query_input req = {.req_type = 0};
 	struct hwrm_stat_ctx_query_output *resp = bp->hwrm_cmd_resp_addr;
 
-	HWRM_PREP(req, STAT_CTX_QUERY, -1, resp);
+	HWRM_PREP(req, STAT_CTX_QUERY);
 
 	req.stat_ctx_id = rte_cpu_to_le_32(cid);
 
 	rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
 
-	HWRM_CHECK_RESULT;
+	HWRM_CHECK_RESULT();
 
 	stats->q_ipackets[idx] = rte_le_to_cpu_64(resp->rx_ucast_pkts);
 	stats->q_ipackets[idx] += rte_le_to_cpu_64(resp->rx_mcast_pkts);
@@ -2715,6 +2814,8 @@ int bnxt_hwrm_ctx_qstats(struct bnxt *bp, uint32_t cid, int idx,
 	stats->q_errors[idx] += rte_le_to_cpu_64(resp->tx_err_pkts);
 	stats->q_errors[idx] += rte_le_to_cpu_64(resp->rx_drop_pkts);
 
+	HWRM_UNLOCK();
+
 	return rc;
 }
 
@@ -2728,12 +2829,16 @@ int bnxt_hwrm_port_qstats(struct bnxt *bp)
 	if (!(bp->flags & BNXT_FLAG_PORT_STATS))
 		return 0;
 
-	HWRM_PREP(req, PORT_QSTATS, -1, resp);
+	HWRM_PREP(req, PORT_QSTATS);
+
 	req.port_id = rte_cpu_to_le_16(pf->port_id);
 	req.tx_stat_host_addr = rte_cpu_to_le_64(bp->hw_tx_port_stats_map);
 	req.rx_stat_host_addr = rte_cpu_to_le_64(bp->hw_rx_port_stats_map);
 	rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
-	HWRM_CHECK_RESULT;
+
+	HWRM_CHECK_RESULT();
+	HWRM_UNLOCK();
+
 	return rc;
 }
 
@@ -2747,10 +2852,14 @@ int bnxt_hwrm_port_clr_stats(struct bnxt *bp)
 	if (!(bp->flags & BNXT_FLAG_PORT_STATS))
 		return 0;
 
-	HWRM_PREP(req, PORT_CLR_STATS, -1, resp);
+	HWRM_PREP(req, PORT_CLR_STATS);
+
 	req.port_id = rte_cpu_to_le_16(pf->port_id);
 	rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
-	HWRM_CHECK_RESULT;
+
+	HWRM_CHECK_RESULT();
+	HWRM_UNLOCK();
+
 	return rc;
 }
 
@@ -2763,10 +2872,11 @@ int bnxt_hwrm_port_led_qcaps(struct bnxt *bp)
 	if (BNXT_VF(bp))
 		return 0;
 
-	HWRM_PREP(req, PORT_LED_QCAPS, -1, resp);
+	HWRM_PREP(req, PORT_LED_QCAPS);
 	req.port_id = bp->pf.port_id;
 	rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
-	HWRM_CHECK_RESULT;
+
+	HWRM_CHECK_RESULT();
 
 	if (resp->num_leds > 0 && resp->num_leds < BNXT_MAX_LED) {
 		unsigned int i;
@@ -2786,6 +2896,9 @@ int bnxt_hwrm_port_led_qcaps(struct bnxt *bp)
 			}
 		}
 	}
+
+	HWRM_UNLOCK();
+
 	return rc;
 }
 
@@ -2801,7 +2914,8 @@ int bnxt_hwrm_port_led_cfg(struct bnxt *bp, bool led_on)
 	if (!bp->num_leds || BNXT_VF(bp))
 		return -EOPNOTSUPP;
 
-	HWRM_PREP(req, PORT_LED_CFG, -1, resp);
+	HWRM_PREP(req, PORT_LED_CFG);
+
 	if (led_on) {
 		led_state = HWRM_PORT_LED_CFG_INPUT_LED0_STATE_BLINKALT;
 		duration = rte_cpu_to_le_16(500);
@@ -2819,7 +2933,9 @@ int bnxt_hwrm_port_led_cfg(struct bnxt *bp, bool led_on)
 	}
 
 	rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
-	HWRM_CHECK_RESULT;
+
+	HWRM_CHECK_RESULT();
+	HWRM_UNLOCK();
 
 	return rc;
 }
@@ -2857,28 +2973,34 @@ static int bnxt_hwrm_func_vf_vnic_query(struct bnxt *bp, uint16_t vf,
 	int rc;
 
 	/* First query all VNIC ids */
-	HWRM_PREP(req, FUNC_VF_VNIC_IDS_QUERY, -1, resp_vf_vnic_ids);
+	HWRM_PREP(req, FUNC_VF_VNIC_IDS_QUERY);
 
 	req.vf_id = rte_cpu_to_le_16(bp->pf.first_vf_id + vf);
 	req.max_vnic_id_cnt = rte_cpu_to_le_32(bp->pf.total_vnics);
 	req.vnic_id_tbl_addr = rte_cpu_to_le_64(rte_mem_virt2phy(vnic_ids));
 
 	if (req.vnic_id_tbl_addr == 0) {
+		HWRM_UNLOCK();
 		RTE_LOG(ERR, PMD,
 		"unable to map VNIC ID table address to physical memory\n");
 		return -ENOMEM;
 	}
 	rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
 	if (rc) {
+		HWRM_UNLOCK();
 		RTE_LOG(ERR, PMD, "hwrm_func_vf_vnic_query failed rc:%d\n", rc);
 		return -1;
 	} else if (resp->error_code) {
 		rc = rte_le_to_cpu_16(resp->error_code);
+		HWRM_UNLOCK();
 		RTE_LOG(ERR, PMD, "hwrm_func_vf_vnic_query error %d\n", rc);
 		return -1;
 	}
+	rc = rte_le_to_cpu_32(resp->vnic_id_cnt);
 
-	return rte_le_to_cpu_32(resp->vnic_id_cnt);
+	HWRM_UNLOCK();
+
+	return rc;
 }
 
 /*
@@ -2943,7 +3065,8 @@ int bnxt_hwrm_func_cfg_vf_set_vlan_anti_spoof(struct bnxt *bp, uint16_t vf,
 	struct hwrm_func_cfg_input req = {0};
 	int rc;
 
-	HWRM_PREP(req, FUNC_CFG, -1, resp);
+	HWRM_PREP(req, FUNC_CFG);
+
 	req.fid = rte_cpu_to_le_16(bp->pf.vf_info[vf].fid);
 	req.enables |= rte_cpu_to_le_32(
 			HWRM_FUNC_CFG_INPUT_ENABLES_VLAN_ANTISPOOF_MODE);
@@ -2951,7 +3074,9 @@ int bnxt_hwrm_func_cfg_vf_set_vlan_anti_spoof(struct bnxt *bp, uint16_t vf,
 		HWRM_FUNC_CFG_INPUT_VLAN_ANTISPOOF_MODE_VALIDATE_VLAN :
 		HWRM_FUNC_CFG_INPUT_VLAN_ANTISPOOF_MODE_NOCHECK;
 	rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
-	HWRM_CHECK_RESULT;
+
+	HWRM_CHECK_RESULT();
+	HWRM_UNLOCK();
 
 	return rc;
 }
-- 
2.10.1 (Apple Git-78)



More information about the dev mailing list