[PATCH v2] net/ice: fix scalar Rx and Tx path segment

Zhang, Qi Z qi.z.zhang at intel.com
Thu Nov 10 11:37:16 CET 2022



> -----Original Message-----
> From: Ye, MingjinX <mingjinx.ye at intel.com>
> Sent: Wednesday, November 9, 2022 8:56 PM
> To: dev at dpdk.org
> Cc: Yang, Qiming <qiming.yang at intel.com>; stable at dpdk.org; Zhou, YidingX
> <yidingx.zhou at intel.com>; Ye, MingjinX <mingjinx.ye at intel.com>; Zhang, Qi
> Z <qi.z.zhang at intel.com>; Lu, Wenzhuo <wenzhuo.lu at intel.com>; Wu,
> Jingjing <jingjing.wu at intel.com>; Li, Xiaoyun <xiaoyun.li at intel.com>; Ferruh
> Yigit <ferruh.yigit at intel.com>
> Subject: [PATCH v2] net/ice: fix scalar Rx and Tx path segment
> 
> CRC is stripped by the hardware in the scattered Rx path. If the last buffer
> packet length is '0', the scalar Tx path would send empty buffer that causes
> the Tx queue to overflow.

Please separate this patch into two, one for Rx and one for Tx, they are independent.

For the Tx implementation, I think we can move them into tx_prepare where is place to check Tx violation.
 
> 
> This patch adds a judgment for the last buffer length to fix this issue, so that
> it would free the mbuf associated to the last one if the last buffer is empty.
> 
> Fixes: 6eac0b7fde95 ("net/ice: support advance Rx/Tx")
> Cc: stable at dpdk.org
> 
> Signed-off-by: Mingjin Ye <mingjinx.ye at intel.com>
> 
> v2:
> 	* Fix log level in ice_rxtx.c source file.
> ---
>  drivers/net/ice/ice_rxtx.c | 53 ++++++++++++++++++++++++++++++++++++--
>  1 file changed, 51 insertions(+), 2 deletions(-)
> 
> diff --git a/drivers/net/ice/ice_rxtx.c b/drivers/net/ice/ice_rxtx.c index
> 0a2b0376ac..b181f66aad 100644
> --- a/drivers/net/ice/ice_rxtx.c
> +++ b/drivers/net/ice/ice_rxtx.c
> @@ -2111,6 +2111,10 @@ ice_recv_scattered_pkts(void *rx_queue,
>  			} else
>  				rxm->data_len = (uint16_t)(rx_packet_len -
> 
> RTE_ETHER_CRC_LEN);
> +		} else if (rx_packet_len == 0) {
> +			rte_pktmbuf_free_seg(rxm);
> +			first_seg->nb_segs--;
> +			last_seg->next = NULL;
>  		}
> 
>  		first_seg->port = rxq->port_id;
> @@ -2903,6 +2907,35 @@ ice_calc_pkt_desc(struct rte_mbuf *tx_pkt)
>  	return count;
>  }
> 
> +/*Check the number of valid mbufs and free the invalid mbufs*/ static
> +inline uint16_t ice_check_mbuf(struct rte_mbuf *tx_pkt) {
> +	struct rte_mbuf *txd = tx_pkt;
> +	struct rte_mbuf *txd_removal = NULL;
> +	struct rte_mbuf *txd_pre = NULL;
> +	uint16_t count = 0;
> +	uint16_t removal = 0;
> +
> +	while (txd != NULL) {
> +		if (removal == 1 || txd->data_len == 0) {
> +			txd_removal = txd;
> +			txd = txd->next;
> +			if (removal == 0) {
> +				removal = 1;
> +				txd_pre->next = NULL;
> +			}
> +			rte_pktmbuf_free_seg(txd_removal);
> +		} else {
> +			++count;
> +			txd_pre = txd;
> +			txd = txd->next;
> +		}
> +	}
> +
> +	return count;
> +}
> +
>  uint16_t
>  ice_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
> { @@ -2960,11 +2993,27 @@ ice_xmit_pkts(void *tx_queue, struct rte_mbuf
> **tx_pkts, uint16_t nb_pkts)
>  		 * the mbuf data size exceeds max data size that hw allows
>  		 * per tx desc.
>  		 */
> -		if (ol_flags & RTE_MBUF_F_TX_TCP_SEG)
> +		if (ol_flags & RTE_MBUF_F_TX_TCP_SEG) {
>  			nb_used = (uint16_t)(ice_calc_pkt_desc(tx_pkt) +
>  					     nb_ctx);
> -		else
> +		} else {
> +			nb_used = ice_check_mbuf(tx_pkt);
> +			if (nb_used == 0) {
> +				PMD_TX_LOG(ERR,
> +				"Check packets is empty "
> +				"(port=%d queue=%d)\n",
> +				txq->port_id, txq->queue_id);
> +				continue;
> +			} else if (nb_used < tx_pkt->nb_segs) {
> +				PMD_TX_LOG(DEBUG,
> +				"Check packets valid num ="
> +				"%4u total num = %4u (port=%d
> queue=%d)\n",
> +				nb_used, tx_pkt->nb_segs, txq->port_id, txq-
> >queue_id);
> +				tx_pkt->nb_segs = nb_used;
> +			}
>  			nb_used = (uint16_t)(tx_pkt->nb_segs + nb_ctx);
> +		}
> +
>  		tx_last = (uint16_t)(tx_id + nb_used - 1);
> 
>  		/* Circular ring */
> --
> 2.34.1



More information about the stable mailing list