[PATCH v1 2/6] baseband/acc: acc100 use desc helper functions

Hernan Vargas hernan.vargas at intel.com
Thu Jan 5 01:11:05 CET 2023


Use the designated descriptor helper functions for descriptor address
calculations.
No functional impact.

Signed-off-by: Hernan Vargas <hernan.vargas at intel.com>
---
 drivers/baseband/acc/rte_acc100_pmd.c | 38 +++++++++------------------
 1 file changed, 13 insertions(+), 25 deletions(-)

diff --git a/drivers/baseband/acc/rte_acc100_pmd.c b/drivers/baseband/acc/rte_acc100_pmd.c
index 2b3e9090b4..97c800913a 100644
--- a/drivers/baseband/acc/rte_acc100_pmd.c
+++ b/drivers/baseband/acc/rte_acc100_pmd.c
@@ -2423,9 +2423,8 @@ enqueue_ldpc_enc_part_tb(struct acc_queue *q, struct rte_bbdev_enc_op *op,
 	struct rte_mbuf *output_head, *output;
 	int i, next_triplet;
 	struct rte_bbdev_op_ldpc_enc *enc = &op->ldpc_enc;
-	uint16_t desc_idx = ((q->sw_ring_head + total_enqueued_descs) & q->sw_ring_wrap_mask);
 
-	desc = q->ring_addr + desc_idx;
+	desc = acc_desc(q, total_enqueued_descs);
 	acc_fcw_le_fill(op, &desc->req.fcw_le, num_cbs, e);
 
 	/* This could be done at polling. */
@@ -2614,7 +2613,6 @@ enqueue_ldpc_enc_one_op_tb(struct acc_queue *q, struct rte_bbdev_enc_op *op,
 	}
 #endif
 	uint8_t num_a, num_b;
-	uint16_t desc_idx;
 	uint8_t r = op->ldpc_enc.tb_params.r;
 	uint8_t cab =  op->ldpc_enc.tb_params.cab;
 	union acc_dma_desc *desc;
@@ -2656,16 +2654,15 @@ enqueue_ldpc_enc_one_op_tb(struct acc_queue *q, struct rte_bbdev_enc_op *op,
 
 	return_descs = enq_descs - init_enq_descs;
 	/* Keep total number of CBs in first TB. */
-	desc_idx = ((q->sw_ring_head + init_enq_descs) & q->sw_ring_wrap_mask);
-	desc = q->ring_addr + desc_idx;
+	desc = acc_desc(q, init_enq_descs);
 	desc->req.cbs_in_tb = return_descs; /** Actual number of descriptors. */
 	desc->req.op_addr = op;
 
 	/* Set SDone on last CB descriptor for TB mode. */
-	desc_idx = ((q->sw_ring_head + enq_descs - 1) & q->sw_ring_wrap_mask);
-	desc = q->ring_addr + desc_idx;
+	desc = acc_desc(q, enq_descs - 1);
 	desc->req.sdone_enable = 1;
 	desc->req.op_addr = op;
+
 	return return_descs;
 }
 
@@ -3276,7 +3273,7 @@ enqueue_dec_one_op_tb(struct acc_queue *q, struct rte_bbdev_dec_op *op,
 		h_out_length, mbuf_total_left, seg_total_left;
 	struct rte_mbuf *input, *h_output_head, *h_output,
 		*s_output_head, *s_output;
-	uint16_t desc_idx, current_enqueued_cbs = 0;
+	uint16_t current_enqueued_cbs = 0;
 	uint64_t fcw_offset;
 
 #ifndef RTE_LIBRTE_BBDEV_SKIP_VALIDATE
@@ -3291,9 +3288,8 @@ enqueue_dec_one_op_tb(struct acc_queue *q, struct rte_bbdev_dec_op *op,
 	}
 #endif
 
-	desc_idx = acc_desc_idx(q, total_enqueued_cbs);
-	desc = q->ring_addr + desc_idx;
-	fcw_offset = (desc_idx << 8) + ACC_DESC_FCW_OFFSET;
+	desc = acc_desc(q, total_enqueued_cbs);
+	fcw_offset = (acc_desc_idx(q, total_enqueued_cbs) << 8) + ACC_DESC_FCW_OFFSET;
 	acc100_fcw_td_fill(op, &desc->req.fcw_td);
 
 	input = op->turbo_dec.input.data;
@@ -3423,8 +3419,7 @@ acc100_enqueue_ldpc_enc_cb(struct rte_bbdev_queue_data *q_data,
 		avail--;
 		enq = RTE_MIN(left, ACC_MUX_5GDL_DESC);
 		if (check_mux(&ops[i], enq)) {
-			ret = enqueue_ldpc_enc_n_op_cb(q, &ops[i],
-					desc_idx, enq);
+			ret = enqueue_ldpc_enc_n_op_cb(q, &ops[i], desc_idx, enq);
 			if (ret < 0) {
 				acc_enqueue_invalid(q_data);
 				break;
@@ -3778,7 +3773,6 @@ dequeue_enc_one_op_cb(struct acc_queue *q, struct rte_bbdev_enc_op **ref_op,
 
 	/* Clearing status, it will be set based on response */
 	op->status = 0;
-
 	op->status |= ((rsp.dma_err) ? (1 << RTE_BBDEV_DRV_ERROR) : 0);
 	op->status |= ((rsp.fcw_err) ? (1 << RTE_BBDEV_DRV_ERROR) : 0);
 
@@ -4024,8 +4018,8 @@ dequeue_dec_one_op_tb(struct acc_queue *q, struct rte_bbdev_dec_op **ref_op,
 		atom_desc.atom_hdr = __atomic_load_n((uint64_t *)desc,
 				__ATOMIC_RELAXED);
 		rsp.val = atom_desc.rsp.val;
-		rte_bbdev_log_debug("Resp. desc %p: %x", desc,
-				rsp.val);
+		rte_bbdev_log_debug("Resp. desc %p: %x r %d c %d\n",
+						desc, rsp.val, cb_idx, cbs_in_tb);
 
 		op->status |= ((rsp.input_err) ? (1 << RTE_BBDEV_DATA_ERROR) : 0);
 		op->status |= ((rsp.dma_err) ? (1 << RTE_BBDEV_DRV_ERROR) : 0);
@@ -4074,8 +4068,7 @@ acc100_dequeue_enc(struct rte_bbdev_queue_data *q_data,
 		return 0;
 	}
 #endif
-	op = (q->ring_addr + (q->sw_ring_tail &
-			q->sw_ring_wrap_mask))->req.op_addr;
+	op = acc_op_tail(q, 0);
 	if (unlikely(ops == NULL || op == NULL))
 		return 0;
 	cbm = op->turbo_enc.code_block_mode;
@@ -4117,7 +4110,6 @@ acc100_dequeue_ldpc_enc(struct rte_bbdev_queue_data *q_data,
 	uint16_t i, dequeued_ops = 0, dequeued_descs = 0;
 	int ret, cbm;
 	struct rte_bbdev_enc_op *op;
-	union acc_dma_desc *desc;
 
 	if (q == NULL)
 		return 0;
@@ -4125,10 +4117,7 @@ acc100_dequeue_ldpc_enc(struct rte_bbdev_queue_data *q_data,
 	if (unlikely(ops == 0))
 		return 0;
 #endif
-	desc = q->ring_addr + (q->sw_ring_tail & q->sw_ring_wrap_mask);
-	if (unlikely(desc == NULL))
-		return 0;
-	op = desc->req.op_addr;
+	op = acc_op_tail(q, 0);
 	if (unlikely(ops == NULL || op == NULL))
 		return 0;
 	cbm = op->ldpc_enc.code_block_mode;
@@ -4224,8 +4213,7 @@ acc100_dequeue_ldpc_dec(struct rte_bbdev_queue_data *q_data,
 	dequeue_num = RTE_MIN(avail, num);
 
 	for (i = 0; i < dequeue_num; ++i) {
-		op = (q->ring_addr + ((q->sw_ring_tail + dequeued_cbs)
-			& q->sw_ring_wrap_mask))->req.op_addr;
+		op = acc_op_tail(q, dequeued_cbs);
 		if (unlikely(op == NULL))
 			break;
 		if (op->ldpc_dec.code_block_mode == RTE_BBDEV_TRANSPORT_BLOCK)
-- 
2.37.1



More information about the dev mailing list