[PATCH v3 02/30] baseband/acc100: add function to check AQ availability

Maxime Coquelin maxime.coquelin at redhat.com
Fri Oct 14 11:25:06 CEST 2022



On 10/12/22 04:53, Hernan Vargas wrote:
> It is possible for some corner case to run more batch enqueue than
> supported. A protection is required to avoid that corner case.
> Enhance all ACC100 enqueue operations with check to see if there is room
> in the atomic queue for enqueueing batches into the queue manager
> Check room in AQ for the enqueues batches into Qmgr
> 
> Fixes: 5ad5060f8f7 ("baseband/acc100: add LDPC processing functions")
> Cc: stable at dpdk.org
> 
> Signed-off-by: Hernan Vargas <hernan.vargas at intel.com>
> ---
>   drivers/baseband/acc/rte_acc100_pmd.c | 30 ++++++++++++++++++++-------
>   1 file changed, 22 insertions(+), 8 deletions(-)
> 
> diff --git a/drivers/baseband/acc/rte_acc100_pmd.c b/drivers/baseband/acc/rte_acc100_pmd.c
> index 733766ad3e..b436bd9078 100644
> --- a/drivers/baseband/acc/rte_acc100_pmd.c
> +++ b/drivers/baseband/acc/rte_acc100_pmd.c
> @@ -2995,12 +2995,27 @@ acc100_enqueue_enc_tb(struct rte_bbdev_queue_data *q_data,
>   	return i;
>   }
>   
> +/* Check room in AQ for the enqueues batches into Qmgr */
> +static int32_t
> +acc100_aq_avail(struct rte_bbdev_queue_data *q_data, uint16_t num_ops)
> +{
> +	struct acc_queue *q = q_data->queue_private;
> +	int32_t aq_avail = q->aq_depth -
> +			((q->aq_enqueued - q->aq_dequeued + ACC_MAX_QUEUE_DEPTH)
> +			% ACC_MAX_QUEUE_DEPTH) - (num_ops >> 7);
> +	if (aq_avail <= 0)

Maybe use unlikely() here.


> +		acc_enqueue_queue_full(q_data);
> +
> +	return aq_avail;
> +}
> +
>   /* Enqueue encode operations for ACC100 device. */
>   static uint16_t
>   acc100_enqueue_enc(struct rte_bbdev_queue_data *q_data,
>   		struct rte_bbdev_enc_op **ops, uint16_t num)
>   {
> -	if (unlikely(num == 0))
> +	int32_t aq_avail = acc100_aq_avail(q_data, num);

Please insert new line for readability.

> +	if (unlikely((aq_avail <= 0) || (num == 0)))
>   		return 0;

Ditto
>   	if (ops[0]->turbo_enc.code_block_mode == RTE_BBDEV_TRANSPORT_BLOCK)
>   		return acc100_enqueue_enc_tb(q_data, ops, num);
> @@ -3013,7 +3028,8 @@ static uint16_t
>   acc100_enqueue_ldpc_enc(struct rte_bbdev_queue_data *q_data,
>   		struct rte_bbdev_enc_op **ops, uint16_t num)
>   {
> -	if (unlikely(num == 0))
> +	int32_t aq_avail = acc100_aq_avail(q_data, num);

New line.

> +	if (unlikely((aq_avail <= 0) || (num == 0)))
>   		return 0;
>   	if (ops[0]->ldpc_enc.code_block_mode == RTE_BBDEV_TRANSPORT_BLOCK)
>   		return acc100_enqueue_enc_tb(q_data, ops, num);
> @@ -3183,7 +3199,8 @@ static uint16_t
>   acc100_enqueue_dec(struct rte_bbdev_queue_data *q_data,
>   		struct rte_bbdev_dec_op **ops, uint16_t num)
>   {
> -	if (unlikely(num == 0))
> +	int32_t aq_avail = acc100_aq_avail(q_data, num);

New line.

> +	if (unlikely((aq_avail <= 0) || (num == 0)))
>   		return 0;

New line.

>   	if (ops[0]->turbo_dec.code_block_mode == RTE_BBDEV_TRANSPORT_BLOCK)
>   		return acc100_enqueue_dec_tb(q_data, ops, num);
> @@ -3196,11 +3213,8 @@ static uint16_t
>   acc100_enqueue_ldpc_dec(struct rte_bbdev_queue_data *q_data,
>   		struct rte_bbdev_dec_op **ops, uint16_t num)
>   {
> -	struct acc_queue *q = q_data->queue_private;
> -	int32_t aq_avail = q->aq_depth +
> -			(q->aq_dequeued - q->aq_enqueued) / 128;
> -
> -	if (unlikely((aq_avail == 0) || (num == 0)))
> +	int32_t aq_avail = acc100_aq_avail(q_data, num);

New line.

> +	if (unlikely((aq_avail <= 0) || (num == 0)))
>   		return 0;
>   
>   	if (ops[0]->ldpc_dec.code_block_mode == RTE_BBDEV_TRANSPORT_BLOCK)



More information about the stable mailing list