[v4,18/27] event/dlb2: add v2.5 sparse cq mode

Message ID 1618451359-20693-19-git-send-email-timothy.mcdaniel@intel.com (mailing list archive)
State Superseded, archived
Delegated to: Jerin Jacob
Headers
Series Add DLB v2.5 |

Checks

Context Check Description
ci/checkpatch warning coding style issues

Commit Message

Timothy McDaniel April 15, 2021, 1:49 a.m. UTC
  Update the low level HW functions responsible for
configuring sparse CQ mode, where each cache line
contains just one QE instead of 4.

The logic is very similar to what was done for v2.0,
but the new combined register map for v2.0 and v2.5
uses new register names and bit names.  Additionally,
new register access macros are used so that the code
can perform the correct action, based on the hardware.

Signed-off-by: Timothy McDaniel <timothy.mcdaniel@intel.com>
---
 drivers/event/dlb2/pf/base/dlb2_resource.c    | 22 -----------
 .../event/dlb2/pf/base/dlb2_resource_new.c    | 39 +++++++++++++++++++
 2 files changed, 39 insertions(+), 22 deletions(-)
  

Patch

diff --git a/drivers/event/dlb2/pf/base/dlb2_resource.c b/drivers/event/dlb2/pf/base/dlb2_resource.c
index f05f750f5..d53cce643 100644
--- a/drivers/event/dlb2/pf/base/dlb2_resource.c
+++ b/drivers/event/dlb2/pf/base/dlb2_resource.c
@@ -32,28 +32,6 @@ 
 #define DLB2_FUNC_LIST_FOR_SAFE(head, ptr, ptr_tmp, it, it_tmp) \
 	DLB2_LIST_FOR_EACH_SAFE((head), ptr, ptr_tmp, func_list, it, it_tmp)
 
-void dlb2_hw_enable_sparse_dir_cq_mode(struct dlb2_hw *hw)
-{
-	union dlb2_chp_cfg_chp_csr_ctrl r0;
-
-	r0.val = DLB2_CSR_RD(hw, DLB2_CHP_CFG_CHP_CSR_CTRL);
-
-	r0.field.cfg_64bytes_qe_dir_cq_mode = 1;
-
-	DLB2_CSR_WR(hw, DLB2_CHP_CFG_CHP_CSR_CTRL, r0.val);
-}
-
-void dlb2_hw_enable_sparse_ldb_cq_mode(struct dlb2_hw *hw)
-{
-	union dlb2_chp_cfg_chp_csr_ctrl r0;
-
-	r0.val = DLB2_CSR_RD(hw, DLB2_CHP_CFG_CHP_CSR_CTRL);
-
-	r0.field.cfg_64bytes_qe_ldb_cq_mode = 1;
-
-	DLB2_CSR_WR(hw, DLB2_CHP_CFG_CHP_CSR_CTRL, r0.val);
-}
-
 int dlb2_get_group_sequence_numbers(struct dlb2_hw *hw, unsigned int group_id)
 {
 	if (group_id >= DLB2_MAX_NUM_SEQUENCE_NUMBER_GROUPS)
diff --git a/drivers/event/dlb2/pf/base/dlb2_resource_new.c b/drivers/event/dlb2/pf/base/dlb2_resource_new.c
index 8cd1762cf..0f18bfeff 100644
--- a/drivers/event/dlb2/pf/base/dlb2_resource_new.c
+++ b/drivers/event/dlb2/pf/base/dlb2_resource_new.c
@@ -6089,3 +6089,42 @@  unsigned int dlb2_finish_map_qid_procedures(struct dlb2_hw *hw)
 
 	return num;
 }
+
+/**
+ * dlb2_hw_enable_sparse_dir_cq_mode() - enable sparse mode for directed ports.
+ * @hw: dlb2_hw handle for a particular device.
+ *
+ * This function must be called prior to configuring scheduling domains.
+ */
+
+void dlb2_hw_enable_sparse_dir_cq_mode(struct dlb2_hw *hw)
+{
+	u32 ctrl;
+
+	ctrl = DLB2_CSR_RD(hw, DLB2_CHP_CFG_CHP_CSR_CTRL);
+
+	DLB2_BIT_SET(ctrl,
+		     DLB2_CHP_CFG_CHP_CSR_CTRL_CFG_64BYTES_QE_DIR_CQ_MODE);
+
+	DLB2_CSR_WR(hw, DLB2_CHP_CFG_CHP_CSR_CTRL, ctrl);
+}
+
+/**
+ * dlb2_hw_enable_sparse_ldb_cq_mode() - enable sparse mode for load-balanced
+ *	ports.
+ * @hw: dlb2_hw handle for a particular device.
+ *
+ * This function must be called prior to configuring scheduling domains.
+ */
+void dlb2_hw_enable_sparse_ldb_cq_mode(struct dlb2_hw *hw)
+{
+	u32 ctrl;
+
+	ctrl = DLB2_CSR_RD(hw, DLB2_CHP_CFG_CHP_CSR_CTRL);
+
+	DLB2_BIT_SET(ctrl,
+		     DLB2_CHP_CFG_CHP_CSR_CTRL_CFG_64BYTES_QE_LDB_CQ_MODE);
+
+	DLB2_CSR_WR(hw, DLB2_CHP_CFG_CHP_CSR_CTRL, ctrl);
+}
+