[PATCH v2 4/9] baseband/acc: prevent to dequeue more than requested

Nicolas Chautru nicolas.chautru at intel.com
Fri Feb 10 18:58:36 CET 2023


Add support for corner-case when more operations are
requested than expected, in the case of encoder muxing
operations.

Fixes: e640f6cdfa84 ("baseband/acc200: add LDPC processing")
Cc: stable at dpdk.org

Signed-off-by: Nicolas Chautru <nicolas.chautru at intel.com>
Reviewed-by: Maxime Coquelin <maxime.coquelin at redhat.com>
---
 drivers/baseband/acc/rte_vrb_pmd.c | 27 +++++++++++++++------------
 1 file changed, 15 insertions(+), 12 deletions(-)

diff --git a/drivers/baseband/acc/rte_vrb_pmd.c b/drivers/baseband/acc/rte_vrb_pmd.c
index b1134f244d..a7d0b1e33c 100644
--- a/drivers/baseband/acc/rte_vrb_pmd.c
+++ b/drivers/baseband/acc/rte_vrb_pmd.c
@@ -2640,7 +2640,8 @@ vrb_enqueue_ldpc_dec(struct rte_bbdev_queue_data *q_data,
 /* Dequeue one encode operations from device in CB mode. */
 static inline int
 vrb_dequeue_enc_one_op_cb(struct acc_queue *q, struct rte_bbdev_enc_op **ref_op,
-		uint16_t *dequeued_ops, uint32_t *aq_dequeued, uint16_t *dequeued_descs)
+		uint16_t *dequeued_ops, uint32_t *aq_dequeued, uint16_t *dequeued_descs,
+		uint16_t max_requested_ops)
 {
 	union acc_dma_desc *desc, atom_desc;
 	union acc_dma_rsp_desc rsp;
@@ -2653,6 +2654,9 @@ vrb_dequeue_enc_one_op_cb(struct acc_queue *q, struct rte_bbdev_enc_op **ref_op,
 	desc = q->ring_addr + desc_idx;
 	atom_desc.atom_hdr = __atomic_load_n((uint64_t *)desc, __ATOMIC_RELAXED);
 
+	if (*dequeued_ops + desc->req.numCBs > max_requested_ops)
+		return -1;
+
 	/* Check fdone bit. */
 	if (!(atom_desc.rsp.val & ACC_FDONE))
 		return -1;
@@ -2694,7 +2698,7 @@ vrb_dequeue_enc_one_op_cb(struct acc_queue *q, struct rte_bbdev_enc_op **ref_op,
 static inline int
 vrb_dequeue_enc_one_op_tb(struct acc_queue *q, struct rte_bbdev_enc_op **ref_op,
 		uint16_t *dequeued_ops, uint32_t *aq_dequeued,
-		uint16_t *dequeued_descs)
+		uint16_t *dequeued_descs, uint16_t max_requested_ops)
 {
 	union acc_dma_desc *desc, *last_desc, atom_desc;
 	union acc_dma_rsp_desc rsp;
@@ -2705,6 +2709,9 @@ vrb_dequeue_enc_one_op_tb(struct acc_queue *q, struct rte_bbdev_enc_op **ref_op,
 	desc = acc_desc_tail(q, *dequeued_descs);
 	atom_desc.atom_hdr = __atomic_load_n((uint64_t *)desc, __ATOMIC_RELAXED);
 
+	if (*dequeued_ops + 1 > max_requested_ops)
+		return -1;
+
 	/* Check fdone bit. */
 	if (!(atom_desc.rsp.val & ACC_FDONE))
 		return -1;
@@ -2965,25 +2972,23 @@ vrb_dequeue_enc(struct rte_bbdev_queue_data *q_data,
 
 	cbm = op->turbo_enc.code_block_mode;
 
-	for (i = 0; i < num; i++) {
+	for (i = 0; i < avail; i++) {
 		if (cbm == RTE_BBDEV_TRANSPORT_BLOCK)
 			ret = vrb_dequeue_enc_one_op_tb(q, &ops[dequeued_ops],
 					&dequeued_ops, &aq_dequeued,
-					&dequeued_descs);
+					&dequeued_descs, num);
 		else
 			ret = vrb_dequeue_enc_one_op_cb(q, &ops[dequeued_ops],
 					&dequeued_ops, &aq_dequeued,
-					&dequeued_descs);
+					&dequeued_descs, num);
 		if (ret < 0)
 			break;
-		if (dequeued_ops >= num)
-			break;
 	}
 
 	q->aq_dequeued += aq_dequeued;
 	q->sw_ring_tail += dequeued_descs;
 
-	/* Update enqueue stats */
+	/* Update enqueue stats. */
 	q_data->queue_stats.dequeued_count += dequeued_ops;
 
 	return dequeued_ops;
@@ -3009,15 +3014,13 @@ vrb_dequeue_ldpc_enc(struct rte_bbdev_queue_data *q_data,
 		if (cbm == RTE_BBDEV_TRANSPORT_BLOCK)
 			ret = vrb_dequeue_enc_one_op_tb(q, &ops[dequeued_ops],
 					&dequeued_ops, &aq_dequeued,
-					&dequeued_descs);
+					&dequeued_descs, num);
 		else
 			ret = vrb_dequeue_enc_one_op_cb(q, &ops[dequeued_ops],
 					&dequeued_ops, &aq_dequeued,
-					&dequeued_descs);
+					&dequeued_descs, num);
 		if (ret < 0)
 			break;
-		if (dequeued_ops >= num)
-			break;
 	}
 
 	q->aq_dequeued += aq_dequeued;
-- 
2.34.1



More information about the stable mailing list