[dpdk-dev] [PATCH v2] ixgbe: remove vector pmd burst size restriction

Zoltan Kiss zoltan.kiss at linaro.org
Tue Aug 4 11:02:02 CEST 2015



On 04/08/15 08:32, Cunming Liang wrote:
> On receive side, the burst size now floor aligns to RTE_IXGBE_DESCS_PER_LOOP power of 2.
> According to this rule, the burst size less than 4 still won't receive anything.
> (Before this change, the burst size less than 32 can't receive anything.)
> _recv_*_pkts_vec returns no more than 32(RTE_IXGBE_RXQ_REARM_THRESH) packets.
>
> On transmit side, the max burst size no longer bind with a constant, however it still
> require to check the cross tx_rs_thresh violation.
>
> There's no obvious performance drop found on both recv_pkts_vec
> and recv_scattered_pkts_vec on burst size 32.
>
> Signed-off-by: Cunming Liang <cunming.liang at intel.com>
> ---
>   v2 change:
>    - keep max rx burst size in 32
>    - reword some comments
>
>   drivers/net/ixgbe/ixgbe_rxtx.c     |  4 +++-
>   drivers/net/ixgbe/ixgbe_rxtx.h     |  5 ++---
>   drivers/net/ixgbe/ixgbe_rxtx_vec.c | 39 +++++++++++++++++++++-----------------
>   3 files changed, 27 insertions(+), 21 deletions(-)
>
> diff --git a/drivers/net/ixgbe/ixgbe_rxtx.c b/drivers/net/ixgbe/ixgbe_rxtx.c
> index 91023b9..d06aaae 100644
> --- a/drivers/net/ixgbe/ixgbe_rxtx.c
> +++ b/drivers/net/ixgbe/ixgbe_rxtx.c
> @@ -4008,7 +4008,9 @@ ixgbe_set_rx_function(struct rte_eth_dev *dev)
>   	 */
>   	} else if (adapter->rx_vec_allowed) {
>   		PMD_INIT_LOG(DEBUG, "Vector rx enabled, please make sure RX "
> -				   "burst size no less than 32.");
> +				    "burst size no less than "
> +				    "RTE_IXGBE_DESCS_PER_LOOP(=4) (port=%d).",

I think you should write in this line:

"%d (port=%d)", RTE_IXGBE_DESCS_PER_LOOP,
> +			     dev->data->port_id);
>
>   		dev->rx_pkt_burst = ixgbe_recv_pkts_vec;
>   	} else if (adapter->rx_bulk_alloc_allowed) {
> diff --git a/drivers/net/ixgbe/ixgbe_rxtx.h b/drivers/net/ixgbe/ixgbe_rxtx.h
> index 113682a..b9eca67 100644
> --- a/drivers/net/ixgbe/ixgbe_rxtx.h
> +++ b/drivers/net/ixgbe/ixgbe_rxtx.h
> @@ -47,9 +47,8 @@
>   	(uint64_t) ((mb)->buf_physaddr + RTE_PKTMBUF_HEADROOM)
>
>   #ifdef RTE_IXGBE_INC_VECTOR
> -#define RTE_IXGBE_VPMD_RX_BURST         32
> -#define RTE_IXGBE_VPMD_TX_BURST         32
> -#define RTE_IXGBE_RXQ_REARM_THRESH      RTE_IXGBE_VPMD_RX_BURST
> +#define RTE_IXGBE_RXQ_REARM_THRESH      32
> +#define RTE_IXGBE_MAX_RX_BURST          RTE_IXGBE_RXQ_REARM_THRESH
>   #define RTE_IXGBE_TX_MAX_FREE_BUF_SZ    64
>   #endif
>
> diff --git a/drivers/net/ixgbe/ixgbe_rxtx_vec.c b/drivers/net/ixgbe/ixgbe_rxtx_vec.c
> index cf25a53..2ca0e4c 100644
> --- a/drivers/net/ixgbe/ixgbe_rxtx_vec.c
> +++ b/drivers/net/ixgbe/ixgbe_rxtx_vec.c
> @@ -245,13 +245,13 @@ desc_to_olflags_v(__m128i descs[4], struct rte_mbuf **rx_pkts)
>   #endif
>
>   /*
> - * vPMD receive routine, now only accept (nb_pkts == RTE_IXGBE_VPMD_RX_BURST)
> - * in one loop
> + * vPMD raw receive routine, only accept(nb_pkts >= RTE_IXGBE_DESCS_PER_LOOP)
>    *
>    * Notice:
> - * - nb_pkts < RTE_IXGBE_VPMD_RX_BURST, just return no packet
> - * - nb_pkts > RTE_IXGBE_VPMD_RX_BURST, only scan RTE_IXGBE_VPMD_RX_BURST
> + * - nb_pkts < RTE_IXGBE_DESCS_PER_LOOP, just return no packet
> + * - nb_pkts > RTE_IXGBE_MAX_RX_BURST, only scan RTE_IXGBE_MAX_RX_BURST
>    *   numbers of DD bit
> + * - floor align nb_pkts to a RTE_IXGBE_DESC_PER_LOOP power-of-two
>    * - don't support ol_flags for rss and csum err
>    */
>   static inline uint16_t
> @@ -286,8 +286,11 @@ _recv_raw_pkts_vec(struct ixgbe_rx_queue *rxq, struct rte_mbuf **rx_pkts,
>   	__m128i dd_check, eop_check;
>   #endif /* RTE_NEXT_ABI */
>
> -	if (unlikely(nb_pkts < RTE_IXGBE_VPMD_RX_BURST))
> -		return 0;
> +	/* nb_pkts shall be less equal than RTE_IXGBE_MAX_RX_BURST */
> +	nb_pkts = RTE_MIN(nb_pkts, RTE_IXGBE_MAX_RX_BURST);
> +
> +	/* nb_pkts has to be floor-aligned to RTE_IXGBE_DESCS_PER_LOOP */
> +	nb_pkts = RTE_ALIGN_FLOOR(nb_pkts, RTE_IXGBE_DESCS_PER_LOOP);
>
>   	/* Just the act of getting into the function from the application is
>   	 * going to cost about 7 cycles */
> @@ -356,7 +359,7 @@ _recv_raw_pkts_vec(struct ixgbe_rx_queue *rxq, struct rte_mbuf **rx_pkts,
>   	 * D. fill info. from desc to mbuf
>   	 */
>   #endif /* RTE_NEXT_ABI */
> -	for (pos = 0, nb_pkts_recd = 0; pos < RTE_IXGBE_VPMD_RX_BURST;
> +	for (pos = 0, nb_pkts_recd = 0; pos < nb_pkts;
>   			pos += RTE_IXGBE_DESCS_PER_LOOP,
>   			rxdp += RTE_IXGBE_DESCS_PER_LOOP) {
>   #ifdef RTE_NEXT_ABI
> @@ -518,13 +521,13 @@ _recv_raw_pkts_vec(struct ixgbe_rx_queue *rxq, struct rte_mbuf **rx_pkts,
>   }
>
>   /*
> - * vPMD receive routine, now only accept (nb_pkts == RTE_IXGBE_VPMD_RX_BURST)
> - * in one loop
> + * vPMD receive routine, only accept(nb_pkts >= RTE_IXGBE_DESCS_PER_LOOP)
>    *
>    * Notice:
> - * - nb_pkts < RTE_IXGBE_VPMD_RX_BURST, just return no packet
> - * - nb_pkts > RTE_IXGBE_VPMD_RX_BURST, only scan RTE_IXGBE_VPMD_RX_BURST
> + * - nb_pkts < RTE_IXGBE_DESCS_PER_LOOP, just return no packet
> + * - nb_pkts > RTE_IXGBE_MAX_RX_BURST, only scan RTE_IXGBE_MAX_RX_BURST
>    *   numbers of DD bit
> + * - floor align nb_pkts to a RTE_IXGBE_DESC_PER_LOOP power-of-two
>    * - don't support ol_flags for rss and csum err
>    */
>   uint16_t
> @@ -538,12 +541,11 @@ static inline uint16_t
>   reassemble_packets(struct ixgbe_rx_queue *rxq, struct rte_mbuf **rx_bufs,
>   		uint16_t nb_bufs, uint8_t *split_flags)
>   {
> -	struct rte_mbuf *pkts[RTE_IXGBE_VPMD_RX_BURST]; /*finished pkts*/
> +	struct rte_mbuf *pkts[nb_bufs]; /*finished pkts*/
>   	struct rte_mbuf *start = rxq->pkt_first_seg;
>   	struct rte_mbuf *end =  rxq->pkt_last_seg;
>   	unsigned pkt_idx, buf_idx;
>
> -
>   	for (buf_idx = 0, pkt_idx = 0; buf_idx < nb_bufs; buf_idx++) {
>   		if (end != NULL) {
>   			/* processing a split packet */
> @@ -603,14 +605,17 @@ reassemble_packets(struct ixgbe_rx_queue *rxq, struct rte_mbuf **rx_bufs,
>    *
>    * Notice:
>    * - don't support ol_flags for rss and csum err
> - * - now only accept (nb_pkts == RTE_IXGBE_VPMD_RX_BURST)
> + * - nb_pkts < RTE_IXGBE_DESCS_PER_LOOP, just return no packet
> + * - nb_pkts > RTE_IXGBE_MAX_RX_BURST, only scan RTE_IXGBE_MAX_RX_BURST
> + *   numbers of DD bit
> + * - floor align nb_pkts to a RTE_IXGBE_DESC_PER_LOOP power-of-two
>    */
>   uint16_t
>   ixgbe_recv_scattered_pkts_vec(void *rx_queue, struct rte_mbuf **rx_pkts,
>   		uint16_t nb_pkts)
>   {
>   	struct ixgbe_rx_queue *rxq = rx_queue;
> -	uint8_t split_flags[RTE_IXGBE_VPMD_RX_BURST] = {0};
> +	uint8_t split_flags[RTE_IXGBE_MAX_RX_BURST] = {0};
>
>   	/* get some new buffers */
>   	uint16_t nb_bufs = _recv_raw_pkts_vec(rxq, rx_pkts, nb_pkts,

I don't know if it actually matters from performance point of view, but 
you check the whole split_flags array, even if you received only 4 
packets. Though the overhead of the a for loop might be bigger.

> @@ -735,8 +740,8 @@ ixgbe_xmit_pkts_vec(void *tx_queue, struct rte_mbuf **tx_pkts,
>   	uint64_t rs = IXGBE_ADVTXD_DCMD_RS|DCMD_DTYP_FLAGS;
>   	int i;
>
> -	if (unlikely(nb_pkts > RTE_IXGBE_VPMD_TX_BURST))
> -		nb_pkts = RTE_IXGBE_VPMD_TX_BURST;
> +	/* cross rx_thresh boundary is not allowed */
> +	nb_pkts = RTE_MIN(nb_pkts, txq->tx_rs_thresh);
>
>   	if (txq->nb_tx_free < txq->tx_free_thresh)
>   		ixgbe_tx_free_bufs(txq);
>


More information about the dev mailing list