[dpdk-dev] [PATCH v2 02/14] net/qede/base: protect DMAE transactions

Rasesh Mody rasesh.mody at cavium.com
Mon Apr 9 06:47:58 CEST 2018


Protect DMAE transactions with a spinlock instead of a mutex

Signed-off-by: Rasesh Mody <rasesh.mody at cavium.com>
---
 drivers/net/qede/base/ecore.h     |    6 ++++--
 drivers/net/qede/base/ecore_dev.c |    6 +++---
 drivers/net/qede/base/ecore_hw.c  |   31 ++++++++++++++++++++-----------
 3 files changed, 27 insertions(+), 16 deletions(-)

diff --git a/drivers/net/qede/base/ecore.h b/drivers/net/qede/base/ecore.h
index ce5f3a9..7c642af 100644
--- a/drivers/net/qede/base/ecore.h
+++ b/drivers/net/qede/base/ecore.h
@@ -432,8 +432,10 @@ struct ecore_hw_info {
 #define DMAE_MAX_RW_SIZE	0x2000
 
 struct ecore_dmae_info {
-	/* Mutex for synchronizing access to functions */
-	osal_mutex_t	mutex;
+	/* Spinlock for synchronizing access to functions */
+	osal_spinlock_t lock;
+
+	bool b_mem_ready;
 
 	u8 channel;
 
diff --git a/drivers/net/qede/base/ecore_dev.c b/drivers/net/qede/base/ecore_dev.c
index cd274c3..b15af03 100644
--- a/drivers/net/qede/base/ecore_dev.c
+++ b/drivers/net/qede/base/ecore_dev.c
@@ -423,9 +423,9 @@ void ecore_init_struct(struct ecore_dev *p_dev)
 		p_hwfn->b_active = false;
 
 #ifdef CONFIG_ECORE_LOCK_ALLOC
-		OSAL_MUTEX_ALLOC(p_hwfn, &p_hwfn->dmae_info.mutex);
+		OSAL_SPIN_LOCK_ALLOC(p_hwfn, &p_hwfn->dmae_info.lock);
 #endif
-		OSAL_MUTEX_INIT(&p_hwfn->dmae_info.mutex);
+		OSAL_SPIN_LOCK_INIT(&p_hwfn->dmae_info.lock);
 	}
 
 	/* hwfn 0 is always active */
@@ -4238,7 +4238,7 @@ void ecore_hw_remove(struct ecore_dev *p_dev)
 		ecore_mcp_free(p_hwfn);
 
 #ifdef CONFIG_ECORE_LOCK_ALLOC
-		OSAL_MUTEX_DEALLOC(&p_hwfn->dmae_info.mutex);
+		OSAL_SPIN_LOCK_DEALLOC(&p_hwfn->dmae_info.lock);
 #endif
 	}
 
diff --git a/drivers/net/qede/base/ecore_hw.c b/drivers/net/qede/base/ecore_hw.c
index 84f273b..1e76509 100644
--- a/drivers/net/qede/base/ecore_hw.c
+++ b/drivers/net/qede/base/ecore_hw.c
@@ -592,7 +592,8 @@ enum _ecore_status_t ecore_dmae_info_alloc(struct ecore_hwfn *p_hwfn)
 		goto err;
 	}
 
-	p_hwfn->dmae_info.channel = p_hwfn->rel_pf_id;
+		p_hwfn->dmae_info.channel = p_hwfn->rel_pf_id;
+		p_hwfn->dmae_info.b_mem_ready = true;
 
 	return ECORE_SUCCESS;
 err:
@@ -604,8 +605,9 @@ void ecore_dmae_info_free(struct ecore_hwfn *p_hwfn)
 {
 	dma_addr_t p_phys;
 
-	/* Just make sure no one is in the middle */
-	OSAL_MUTEX_ACQUIRE(&p_hwfn->dmae_info.mutex);
+	OSAL_SPIN_LOCK(&p_hwfn->dmae_info.lock);
+	p_hwfn->dmae_info.b_mem_ready = false;
+	OSAL_SPIN_UNLOCK(&p_hwfn->dmae_info.lock);
 
 	if (p_hwfn->dmae_info.p_completion_word != OSAL_NULL) {
 		p_phys = p_hwfn->dmae_info.completion_word_phys_addr;
@@ -630,8 +632,6 @@ void ecore_dmae_info_free(struct ecore_hwfn *p_hwfn)
 				       p_phys, sizeof(u32) * DMAE_MAX_RW_SIZE);
 		p_hwfn->dmae_info.p_intermediate_buffer = OSAL_NULL;
 	}
-
-	OSAL_MUTEX_RELEASE(&p_hwfn->dmae_info.mutex);
 }
 
 static enum _ecore_status_t ecore_dmae_operation_wait(struct ecore_hwfn *p_hwfn)
@@ -777,6 +777,15 @@ static enum _ecore_status_t ecore_dmae_operation_wait(struct ecore_hwfn *p_hwfn)
 	enum _ecore_status_t ecore_status = ECORE_SUCCESS;
 	u32 offset = 0;
 
+	if (!p_hwfn->dmae_info.b_mem_ready) {
+		DP_VERBOSE(p_hwfn, ECORE_MSG_HW,
+			   "No buffers allocated. Avoid DMAE transaction [{src: addr 0x%lx, type %d}, {dst: addr 0x%lx, type %d}, size %d].\n",
+			   (unsigned long)src_addr, src_type,
+			   (unsigned long)dst_addr, dst_type,
+			   size_in_dwords);
+		return ECORE_NOMEM;
+	}
+
 	if (p_hwfn->p_dev->recov_in_prog) {
 		DP_VERBOSE(p_hwfn, ECORE_MSG_HW,
 			   "Recovery is in progress. Avoid DMAE transaction [{src: addr 0x%lx, type %d}, {dst: addr 0x%lx, type %d}, size %d].\n",
@@ -870,7 +879,7 @@ enum _ecore_status_t
 	OSAL_MEMSET(&params, 0, sizeof(struct ecore_dmae_params));
 	params.flags = flags;
 
-	OSAL_MUTEX_ACQUIRE(&p_hwfn->dmae_info.mutex);
+	OSAL_SPIN_LOCK(&p_hwfn->dmae_info.lock);
 
 	rc = ecore_dmae_execute_command(p_hwfn, p_ptt, source_addr,
 					grc_addr_in_dw,
@@ -878,7 +887,7 @@ enum _ecore_status_t
 					ECORE_DMAE_ADDRESS_GRC,
 					size_in_dwords, &params);
 
-	OSAL_MUTEX_RELEASE(&p_hwfn->dmae_info.mutex);
+	OSAL_SPIN_UNLOCK(&p_hwfn->dmae_info.lock);
 
 	return rc;
 }
@@ -896,14 +905,14 @@ enum _ecore_status_t
 	OSAL_MEMSET(&params, 0, sizeof(struct ecore_dmae_params));
 	params.flags = flags;
 
-	OSAL_MUTEX_ACQUIRE(&p_hwfn->dmae_info.mutex);
+	OSAL_SPIN_LOCK(&p_hwfn->dmae_info.lock);
 
 	rc = ecore_dmae_execute_command(p_hwfn, p_ptt, grc_addr_in_dw,
 					dest_addr, ECORE_DMAE_ADDRESS_GRC,
 					ECORE_DMAE_ADDRESS_HOST_VIRT,
 					size_in_dwords, &params);
 
-	OSAL_MUTEX_RELEASE(&p_hwfn->dmae_info.mutex);
+	OSAL_SPIN_UNLOCK(&p_hwfn->dmae_info.lock);
 
 	return rc;
 }
@@ -917,7 +926,7 @@ enum _ecore_status_t
 {
 	enum _ecore_status_t rc;
 
-	OSAL_MUTEX_ACQUIRE(&p_hwfn->dmae_info.mutex);
+	OSAL_SPIN_LOCK(&p_hwfn->dmae_info.lock);
 
 	rc = ecore_dmae_execute_command(p_hwfn, p_ptt, source_addr,
 					dest_addr,
@@ -925,7 +934,7 @@ enum _ecore_status_t
 					ECORE_DMAE_ADDRESS_HOST_PHYS,
 					size_in_dwords, p_params);
 
-	OSAL_MUTEX_RELEASE(&p_hwfn->dmae_info.mutex);
+	OSAL_SPIN_UNLOCK(&p_hwfn->dmae_info.lock);
 
 	return rc;
 }
-- 
1.7.10.3



More information about the dev mailing list