[dpdk-dev] [PATCH v2] net/mlx4: enhance Rx packet type offloads

Mordechay Haimovsky motih at mellanox.com
Sun Nov 5 16:41:36 CET 2017


See inline

> -----Original Message-----
> From: Adrien Mazarguil [mailto:adrien.mazarguil at 6wind.com]
> Sent: Friday, November 3, 2017 4:23 PM
> To: Mordechay Haimovsky <motih at mellanox.com>
> Cc: dev at dpdk.org
> Subject: Re: [PATCH v2] net/mlx4: enhance Rx packet type offloads
> 
> On Thu, Nov 02, 2017 at 02:14:34PM +0200, Moti Haimovsky wrote:
> > This patch enhances the Rx packet type offload to also report the L4
> > protocol information in the hw ptype filled by the PMD for each
> > received packet.
> >
> > Signed-off-by: Moti Haimovsky <motih at mellanox.com>
> > ---
> > V2:
> > * Modifications according to review by Adrien Mazarguil
> >   <adrien.mazarguil at 6wind.com>
> >   Re: [PATCH] net/mlx4: enhance Rx packet type offloads
> 
> Except for the bit about using a loop in mlx4_ptype_table() to populate
> mlx4_ptype_table[] in fewer LoCs :)
> 
> All right, let's keep it as an improvement for later.
> 
> > * Added mlx4_dev_supported_ptypes_get used in
> .dev_supported_ptypes_get
> >   for reporting supported packet types.
> 
> More comments below.
> 
> > ---
> >  drivers/net/mlx4/mlx4.c        |   3 +
> >  drivers/net/mlx4/mlx4.h        |   1 +
> >  drivers/net/mlx4/mlx4_ethdev.c |  33 ++++++
> >  drivers/net/mlx4/mlx4_prm.h    |  15 +++
> >  drivers/net/mlx4/mlx4_rxtx.c   | 258
> +++++++++++++++++++++++++++++++++++++----
> >  drivers/net/mlx4/mlx4_rxtx.h   |   1 +
> >  6 files changed, 288 insertions(+), 23 deletions(-)
> >
> > diff --git a/drivers/net/mlx4/mlx4.c b/drivers/net/mlx4/mlx4.c index
> > 5d35a50..a3dca5d 100644
> > --- a/drivers/net/mlx4/mlx4.c
> > +++ b/drivers/net/mlx4/mlx4.c
> > @@ -244,6 +244,7 @@ struct mlx4_conf {
> >  	.stats_get = mlx4_stats_get,
> >  	.stats_reset = mlx4_stats_reset,
> >  	.dev_infos_get = mlx4_dev_infos_get,
> > +	.dev_supported_ptypes_get = mlx4_dev_supported_ptypes_get,
> >  	.vlan_filter_set = mlx4_vlan_filter_set,
> >  	.rx_queue_setup = mlx4_rx_queue_setup,
> >  	.tx_queue_setup = mlx4_tx_queue_setup, @@ -706,6 +707,8 @@
> struct
> > mlx4_conf {  static void
> >  rte_mlx4_pmd_init(void)
> >  {
> > +	/* Build the static table for ptype conversion. */
> > +	mlx4_set_ptype_table();
> >  	/*
> >  	 * RDMAV_HUGEPAGES_SAFE tells ibv_fork_init() we intend to use
> >  	 * huge pages. Calling ibv_fork_init() during init allows diff --git
> > a/drivers/net/mlx4/mlx4.h b/drivers/net/mlx4/mlx4.h index
> > e0a9853..fd4426c 100644
> > --- a/drivers/net/mlx4/mlx4.h
> > +++ b/drivers/net/mlx4/mlx4.h
> > @@ -149,6 +149,7 @@ int mlx4_flow_ctrl_get(struct rte_eth_dev *dev,
> >  		       struct rte_eth_fc_conf *fc_conf);  int
> > mlx4_flow_ctrl_set(struct rte_eth_dev *dev,
> >  		       struct rte_eth_fc_conf *fc_conf);
> > +const uint32_t *mlx4_dev_supported_ptypes_get(struct rte_eth_dev
> > +*dev);
> >
> >  /* mlx4_intr.c */
> >
> > diff --git a/drivers/net/mlx4/mlx4_ethdev.c
> > b/drivers/net/mlx4/mlx4_ethdev.c index b0acd12..7be66fc 100644
> > --- a/drivers/net/mlx4/mlx4_ethdev.c
> > +++ b/drivers/net/mlx4/mlx4_ethdev.c
> > @@ -1013,3 +1013,36 @@ enum rxmode_toggle {
> >  	assert(ret >= 0);
> >  	return -ret;
> >  }
> > +
> > +/**
> > + * DPDK callback to retrieve the received packet types that are
> > +recognizes
> > + * by the device.
> 
> recognizes => recognized / supported
> 
> > + *
> > + * @param dev
> > + *   Pointer to Ethernet device structure.
> > + *
> > + * @return
> > + *   pointer to an array of recognized packet types if in Rx burst mode,
> 
> pointer => Pointer
> 
> Why only "in Rx burst mode"?
> 
> > + *   NULL otherwise.
> > + */
> > +const uint32_t *
> > +mlx4_dev_supported_ptypes_get(struct rte_eth_dev *dev) {
> > +	static const uint32_t ptypes[] = {
> > +		/* refers to rxq_cq_to_pkt_type() */
> > +		RTE_PTYPE_L2_ETHER,
> > +		RTE_PTYPE_L3_IPV4_EXT_UNKNOWN,
> > +		RTE_PTYPE_L3_IPV6_EXT_UNKNOWN,
> > +		RTE_PTYPE_L4_FRAG,
> > +		RTE_PTYPE_L4_TCP,
> > +		RTE_PTYPE_L4_UDP,
> > +		RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN,
> > +		RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN,
> > +		RTE_PTYPE_UNKNOWN
> > +	};
> > +
> > +	if (dev->rx_pkt_burst == mlx4_rx_burst)
> > +		return ptypes;
> > +	return NULL;
> 
> How about just returning the array regardless?

>From DPDK documentation (and as done in other drivers):
http://dpdk.org/doc/api/rte__ethdev_8h.html#aa63202d322632467f9cc5fc460e04ea4
Note
Better to invoke this API after the device is already started or rx burst function is decided, to obtain correct supported ptypes.
if a given PMD does not report what ptypes it supports, then the supported ptype count is reported as 0.

In our case rx_pkt_burst can also point to mlx4_rx_burst_removed

> 
> > +}
> 
> This function was added at the wrong spot, you should keep the same order
> as declarations in mlx4.h. When in doubt, always add new functions at the
> end of files you're modifying.
> 
Moved the function to EOF.

> > +
> > diff --git a/drivers/net/mlx4/mlx4_prm.h b/drivers/net/mlx4/mlx4_prm.h
> > index b0fd982..09abd72 100644
> > --- a/drivers/net/mlx4/mlx4_prm.h
> > +++ b/drivers/net/mlx4/mlx4_prm.h
> > @@ -75,9 +75,24 @@ enum {
> >  	MLX4_CQE_L2_TUNNEL_IPV4 = (int)(1u << 25),
> >  	MLX4_CQE_L2_TUNNEL_L4_CSUM = (int)(1u << 26),
> >  	MLX4_CQE_L2_TUNNEL = (int)(1u << 27),
> > +	MLX4_CQE_L2_VLAN_MASK = (int)(3u << 29),
> >  	MLX4_CQE_L2_TUNNEL_IPOK = (int)(1u << 31),  };
> >
> > +/* CQE status flags. */
> > +#define MLX4_CQE_STATUS_IPV4 (1 << 22) #define
> MLX4_CQE_STATUS_IPV4F
> > +(1 << 23) #define MLX4_CQE_STATUS_IPV6 (1 << 24) #define
> > +MLX4_CQE_STATUS_IPV4OPT (1 << 25) #define MLX4_CQE_STATUS_TCP
> (1 <<
> > +26) #define MLX4_CQE_STATUS_UDP (1 << 27) #define
> > +MLX4_CQE_STATUS_PTYPE_MASK (MLX4_CQE_STATUS_IPV4 | \
> > +				    MLX4_CQE_STATUS_IPV4F | \
> > +				    MLX4_CQE_STATUS_IPV6 | \
> > +				    MLX4_CQE_STATUS_IPV4OPT | \
> > +				    MLX4_CQE_STATUS_TCP | \
> > +				    MLX4_CQE_STATUS_UDP)
> > +
> 
> OK except one last suggestion to enhance readability:
> 
>  #define MLX4_CQE_STATUS_PTYPE_MASK \
>  <tab>(MLX4_CQE_STATUS_IPV4 | \
>  <tab> MLX4_CQE_STATUS_IPV4F | \
>  ...
>  <tab> MLX4_CQE_STATUS_UDP)
> 
> >  /* Send queue information. */
> >  struct mlx4_sq {
> >  	uint8_t *buf; /**< SQ buffer. */
> > diff --git a/drivers/net/mlx4/mlx4_rxtx.c
> > b/drivers/net/mlx4/mlx4_rxtx.c index 67dc712..765e79e 100644
> > --- a/drivers/net/mlx4/mlx4_rxtx.c
> > +++ b/drivers/net/mlx4/mlx4_rxtx.c
> > @@ -71,6 +71,210 @@ struct pv {
> >  	uint32_t val;
> >  };
> >
> > +/** A table to translate Rx completion flags to packet type. */
> > +uint32_t mlx4_ptype_table[] __rte_cache_aligned = {
> > +	[0xff] = RTE_PTYPE_UNKNOWN, /**<Last entry. */
> 
> Missing space before "Last". However since RTE_PTYPE_UNKNOWN resolves
> to 0 and all holes in this table are implicitly zeroed as well, this last entry has
> nothing special.
> 
> How about not initializing it explicitly and instead size the table
> properly:
> 
>  uint32_t mlx4_ptype_table[0x100] __rte_cache_aligned;
> 
> > +};
> > +
> > +/**
> > + * Build a table to translate Rx completion flags to packet type.
> > + *
> > + * @note: fix mlx4_dev_supported_ptypes_get() if any change here.
> > + */
> > +void
> > +mlx4_set_ptype_table(void)
> > +{
> > +	unsigned int i;
> > +	uint32_t *p = mlx4_ptype_table;
> > +
> > +	/* Last entry must not be overwritten, reserved for errored packet.
> */
> > +	for (i = 0; i < RTE_DIM(mlx4_ptype_table) - 1; ++i)
> > +		p[i] = RTE_PTYPE_UNKNOWN;
> 
> The above suggestion allows this loop to be removed as well as the
> exception for the last entry.
> 
> > +	/*
> > +	 * The index to the array should have:
> > +	 *  bit[7] - MLX4_CQE_L2_TUNNEL
> > +	 *  bit[6] - MLX4_CQE_L2_TUNNEL_IPV4
> > +	 *  bit[5] - MLX4_CQE_STATUS_UDP
> > +	 *  bit[4] - MLX4_CQE_STATUS_TCP
> > +	 *  bit[3] - MLX4_CQE_STATUS_IPV4OPT
> > +	 *  bit[2] - MLX4_CQE_STATUS_IPV6
> > +	 *  bit[1] - MLX4_CQE_STATUS_IPV4F
> > +	 *  bit[0] - MLX4_CQE_STATUS_IPV4
> > +	 * giving a total of up to 256 entries.
> > +	 */
> > +	p[0x00] = RTE_PTYPE_L2_ETHER;
> > +	p[0x01] = RTE_PTYPE_L2_ETHER |
> RTE_PTYPE_L3_IPV4_EXT_UNKNOWN;
> > +	p[0x02] = RTE_PTYPE_L2_ETHER |
> RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
> > +		  RTE_PTYPE_L4_FRAG;
> > +	p[0x03] = RTE_PTYPE_L2_ETHER |
> RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
> > +		  RTE_PTYPE_L4_FRAG;
> > +	p[0x04] = RTE_PTYPE_L2_ETHER |
> RTE_PTYPE_L3_IPV6_EXT_UNKNOWN;
> > +	p[0x09] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT;
> > +	p[0x0a] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT |
> > +		  RTE_PTYPE_L4_FRAG;
> > +	p[0x11] = RTE_PTYPE_L2_ETHER |
> RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
> > +		  RTE_PTYPE_L4_TCP;
> > +	p[0x12] = RTE_PTYPE_L2_ETHER |
> RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
> > +		  RTE_PTYPE_L4_TCP;
> > +	p[0x14] = RTE_PTYPE_L2_ETHER |
> RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
> > +		  RTE_PTYPE_L4_TCP;
> > +	p[0x18] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT |
> > +		  RTE_PTYPE_L4_TCP;
> > +	p[0x19] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT |
> > +		  RTE_PTYPE_L4_TCP;
> > +	p[0x1a] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT |
> > +		  RTE_PTYPE_L4_TCP;
> > +	p[0x21] = RTE_PTYPE_L2_ETHER |
> RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
> > +		  RTE_PTYPE_L4_UDP;
> > +	p[0x22] = RTE_PTYPE_L2_ETHER |
> RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
> > +		  RTE_PTYPE_L4_UDP;
> > +	p[0x24] = RTE_PTYPE_L2_ETHER |
> RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
> > +		  RTE_PTYPE_L4_UDP;
> > +	p[0x28] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT |
> > +		  RTE_PTYPE_L4_UDP;
> > +	p[0x29] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT |
> > +		  RTE_PTYPE_L4_UDP;
> > +	p[0x2a] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT |
> > +		  RTE_PTYPE_L4_UDP;
> > +	/* Tunneled - L3 IPV6 */
> > +	p[0x80] = RTE_PTYPE_L2_ETHER |
> RTE_PTYPE_L3_IPV6_EXT_UNKNOWN;
> > +	p[0x81] = RTE_PTYPE_L2_ETHER |
> RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
> > +		  RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN;
> > +	p[0x82] = RTE_PTYPE_L2_ETHER |
> RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
> > +		  RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
> > +		  RTE_PTYPE_INNER_L4_FRAG;
> > +	p[0x83] = RTE_PTYPE_L2_ETHER |
> RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
> > +		  RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
> > +		  RTE_PTYPE_INNER_L4_FRAG;
> > +	p[0x84] = RTE_PTYPE_L2_ETHER |
> RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
> > +		  RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN;
> > +	p[0x88] = RTE_PTYPE_L2_ETHER |
> RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
> > +		  RTE_PTYPE_INNER_L3_IPV4_EXT;
> > +	p[0x89] = RTE_PTYPE_L2_ETHER |
> RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
> > +		  RTE_PTYPE_INNER_L3_IPV4_EXT;
> > +	p[0x8a] = RTE_PTYPE_L2_ETHER |
> RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
> > +		  RTE_PTYPE_INNER_L3_IPV4_EXT |
> RTE_PTYPE_INNER_L4_FRAG;
> > +	/* Tunneled - L3 IPV6, TCP */
> > +	p[0x91] = RTE_PTYPE_L2_ETHER |
> RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
> > +		  RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
> > +		  RTE_PTYPE_INNER_L4_TCP;
> > +	p[0x92] = RTE_PTYPE_L2_ETHER |
> RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
> > +		  RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
> > +		  RTE_PTYPE_INNER_L4_FRAG |
> > +		  RTE_PTYPE_INNER_L4_TCP;
> > +	p[0x93] = RTE_PTYPE_L2_ETHER |
> RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
> > +		  RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
> > +		  RTE_PTYPE_INNER_L4_FRAG |
> > +		  RTE_PTYPE_INNER_L4_TCP;
> > +	p[0x94] = RTE_PTYPE_L2_ETHER |
> RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
> > +		  RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
> > +		  RTE_PTYPE_INNER_L4_TCP;
> > +	p[0x98] = RTE_PTYPE_L2_ETHER |
> RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
> > +		  RTE_PTYPE_INNER_L3_IPV4_EXT |
> > +		  RTE_PTYPE_INNER_L4_TCP;
> > +	p[0x99] = RTE_PTYPE_L2_ETHER |
> RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
> > +		  RTE_PTYPE_INNER_L3_IPV4_EXT |
> > +		  RTE_PTYPE_INNER_L4_TCP;
> > +	p[0x9a] = RTE_PTYPE_L2_ETHER |
> RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
> > +		  RTE_PTYPE_INNER_L3_IPV4_EXT |
> RTE_PTYPE_INNER_L4_FRAG |
> > +		  RTE_PTYPE_INNER_L4_TCP;
> > +	/* Tunneled - L3 IPV6, UDP */
> > +	p[0xa1] = RTE_PTYPE_L2_ETHER |
> RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
> > +		  RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
> > +		  RTE_PTYPE_INNER_L4_UDP;
> > +	p[0xa2] = RTE_PTYPE_L2_ETHER |
> RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
> > +		  RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
> > +		  RTE_PTYPE_INNER_L4_FRAG |
> > +		  RTE_PTYPE_INNER_L4_UDP;
> > +	p[0xa3] = RTE_PTYPE_L2_ETHER |
> RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
> > +		  RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
> > +		  RTE_PTYPE_INNER_L4_FRAG |
> > +		  RTE_PTYPE_INNER_L4_UDP;
> > +	p[0xa4] = RTE_PTYPE_L2_ETHER |
> RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
> > +		  RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
> > +		  RTE_PTYPE_INNER_L4_UDP;
> > +	p[0xa8] = RTE_PTYPE_L2_ETHER |
> RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
> > +		  RTE_PTYPE_INNER_L3_IPV4_EXT |
> > +		  RTE_PTYPE_INNER_L4_UDP;
> > +	p[0xa9] = RTE_PTYPE_L2_ETHER |
> RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
> > +		  RTE_PTYPE_INNER_L3_IPV4_EXT |
> > +		  RTE_PTYPE_INNER_L4_UDP;
> > +	p[0xaa] = RTE_PTYPE_L2_ETHER |
> RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
> > +		  RTE_PTYPE_INNER_L3_IPV4_EXT |
> RTE_PTYPE_INNER_L4_FRAG |
> > +		  RTE_PTYPE_INNER_L4_UDP;
> > +	/* Tunneled - L3 IPV4 */
> > +	p[0xc0] = RTE_PTYPE_L2_ETHER |
> RTE_PTYPE_L3_IPV4_EXT_UNKNOWN;
> > +	p[0xc1] = RTE_PTYPE_L2_ETHER |
> RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
> > +		  RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN;
> > +	p[0xc2] = RTE_PTYPE_L2_ETHER |
> RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
> > +		  RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
> > +		  RTE_PTYPE_INNER_L4_FRAG;
> > +	p[0xc3] = RTE_PTYPE_L2_ETHER |
> RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
> > +		  RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
> > +		  RTE_PTYPE_INNER_L4_FRAG;
> > +	p[0xc4] = RTE_PTYPE_L2_ETHER |
> RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
> > +		  RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN;
> > +	p[0xc8] = RTE_PTYPE_L2_ETHER |
> RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
> > +		  RTE_PTYPE_INNER_L3_IPV4_EXT;
> > +	p[0xc9] = RTE_PTYPE_L2_ETHER |
> RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
> > +		  RTE_PTYPE_INNER_L3_IPV4_EXT;
> > +	p[0xca] = RTE_PTYPE_L2_ETHER |
> RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
> > +		  RTE_PTYPE_INNER_L3_IPV4_EXT |
> > +		  RTE_PTYPE_INNER_L4_FRAG;
> > +	/* Tunneled - L3 IPV4, TCP */
> > +	p[0xd0] = RTE_PTYPE_L2_ETHER |
> RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
> > +		  RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
> > +		  RTE_PTYPE_INNER_L4_TCP;
> > +	p[0xd1] = RTE_PTYPE_L2_ETHER |
> RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
> > +		  RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
> > +		  RTE_PTYPE_INNER_L4_TCP;
> > +	p[0xd2] = RTE_PTYPE_L2_ETHER |
> RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
> > +		  RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
> > +		  RTE_PTYPE_INNER_L4_FRAG |
> > +		  RTE_PTYPE_INNER_L4_TCP;
> > +	p[0xd3] = RTE_PTYPE_L2_ETHER |
> RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
> > +		  RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
> > +		  RTE_PTYPE_INNER_L4_FRAG |
> > +		  RTE_PTYPE_INNER_L4_TCP;
> > +	p[0xd4] = RTE_PTYPE_L2_ETHER |
> RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
> > +		  RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
> > +		  RTE_PTYPE_INNER_L4_TCP;
> > +	p[0xd8] = RTE_PTYPE_L2_ETHER |
> RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
> > +		  RTE_PTYPE_INNER_L3_IPV4_EXT |
> > +		  RTE_PTYPE_INNER_L4_TCP;
> > +	p[0xd9] = RTE_PTYPE_L2_ETHER |
> RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
> > +		  RTE_PTYPE_INNER_L3_IPV4_EXT |
> > +		  RTE_PTYPE_INNER_L4_TCP;
> > +	p[0xda] = RTE_PTYPE_L2_ETHER |
> RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
> > +		  RTE_PTYPE_INNER_L3_IPV4_EXT |
> RTE_PTYPE_INNER_L4_FRAG |
> > +		  RTE_PTYPE_INNER_L4_TCP;
> > +	/* Tunneled - L3 IPV4, UDP */
> > +	p[0xe0] = RTE_PTYPE_L2_ETHER |
> RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
> > +		  RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
> > +		  RTE_PTYPE_INNER_L4_UDP;
> > +	p[0xe1] = RTE_PTYPE_L2_ETHER |
> RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
> > +		  RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
> > +		  RTE_PTYPE_INNER_L4_UDP;
> > +	p[0xe2] = RTE_PTYPE_L2_ETHER |
> RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
> > +		  RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
> > +		  RTE_PTYPE_INNER_L4_FRAG |
> > +		  RTE_PTYPE_INNER_L4_UDP;
> > +	p[0xe3] = RTE_PTYPE_L2_ETHER |
> RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
> > +		  RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
> > +		  RTE_PTYPE_INNER_L4_FRAG |
> > +		  RTE_PTYPE_INNER_L4_UDP;
> > +	p[0xe4] = RTE_PTYPE_L2_ETHER |
> RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
> > +		  RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
> > +		  RTE_PTYPE_INNER_L4_UDP;
> > +	p[0xe8] = RTE_PTYPE_L2_ETHER |
> RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
> > +		  RTE_PTYPE_INNER_L3_IPV4_EXT |
> RTE_PTYPE_INNER_L4_UDP;
> > +	p[0xe9] = RTE_PTYPE_L2_ETHER |
> RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
> > +		  RTE_PTYPE_INNER_L3_IPV4_EXT |
> RTE_PTYPE_INNER_L4_UDP;
> > +	p[0xea] = RTE_PTYPE_L2_ETHER |
> RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
> > +		  RTE_PTYPE_INNER_L3_IPV4_EXT |
> RTE_PTYPE_INNER_L4_FRAG |
> > +		  RTE_PTYPE_INNER_L4_UDP;
> > +}
> > +
> 
> Besides being also in the wrong spot regarding mlx4_rxtx.h, this initialization
> function is in the wrong file since it's not called from the data plane, it should
> be moved either to mlx4.c or mlx4_rxq.c.
Will remove it altogether, Initialization will be done statically at array definition.
> 
> >  /**
> >   * Stamp a WQE so it won't be reused by the HW.
> >   *
> > @@ -568,30 +772,39 @@ struct pv {
> >  /**
> >   * Translate Rx completion flags to packet type.
> >   *
> > - * @param flags
> > - *   Rx completion flags returned by mlx4_cqe_flags().
> > + * @param[in] cqe
> > + *   Pointer to CQE.
> >   *
> >   * @return
> > - *   Packet type in mbuf format.
> > + *   Packet type for struct rte_mbuf.
> >   */
> >  static inline uint32_t
> > -rxq_cq_to_pkt_type(uint32_t flags)
> > +rxq_cq_to_pkt_type(volatile struct mlx4_cqe *cqe)
> >  {
> > -	uint32_t pkt_type;
> > +	uint8_t idx = 0;
> > +	uint32_t pinfo = rte_be_to_cpu_32(cqe->vlan_my_qpn);
> > +	uint32_t status = rte_be_to_cpu_32(cqe->status);
> >
> > -	if (flags & MLX4_CQE_L2_TUNNEL)
> > -		pkt_type =
> > -			mlx4_transpose(flags,
> > -				       MLX4_CQE_L2_TUNNEL_IPV4,
> > -				       RTE_PTYPE_L3_IPV4_EXT_UNKNOWN) |
> > -			mlx4_transpose(flags,
> > -				       MLX4_CQE_STATUS_IPV4_PKT,
> > -
> RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN);
> > -	else
> > -		pkt_type = mlx4_transpose(flags,
> > -					  MLX4_CQE_STATUS_IPV4_PKT,
> > -
> RTE_PTYPE_L3_IPV4_EXT_UNKNOWN);
> > -	return pkt_type;
> > +	/*
> > +	 * The index to the array should have:
> > +	 *  bit[7] - MLX4_CQE_L2_TUNNEL
> > +	 *  bit[6] - MLX4_CQE_L2_TUNNEL_IPV4
> > +	 */
> > +	if (!(pinfo & MLX4_CQE_L2_VLAN_MASK) && (pinfo &
> MLX4_CQE_L2_TUNNEL))
> > +		idx |= ((pinfo & MLX4_CQE_L2_TUNNEL) >> 20) |
> > +		       ((pinfo & MLX4_CQE_L2_TUNNEL_IPV4) >> 19);
> > +	/*
> > +	 * The index to the array should have:
> > +	 *  bit[5] - MLX4_CQE_STATUS_UDP
> > +	 *  bit[4] - MLX4_CQE_STATUS_TCP
> > +	 *  bit[3] - MLX4_CQE_STATUS_IPV4OPT
> > +	 *  bit[2] - MLX4_CQE_STATUS_IPV6
> > +	 *  bit[1] - MLX4_CQE_STATUS_IPV4F
> > +	 *  bit[0] - MLX4_CQE_STATUS_IPV4
> > +	 * giving a total of up to 256 entries.
> > +	 */
> > +	idx |= ((status & MLX4_CQE_STATUS_PTYPE_MASK) >> 22);
> > +	return mlx4_ptype_table[idx];
> >  }
> >
> >  /**
> > @@ -774,6 +987,10 @@ struct pv {
> >  				goto skip;
> >  			}
> >  			pkt = seg;
> > +			/* Update packet information. */
> > +			pkt->packet_type = rxq_cq_to_pkt_type(cqe);
> > +			pkt->ol_flags = 0;
> > +			pkt->pkt_len = len;
> >  			if (rxq->csum | rxq->csum_l2tun) {
> >  				uint32_t flags =
> >  					mlx4_cqe_flags(cqe,
> > @@ -784,12 +1001,7 @@ struct pv {
> >  					rxq_cq_to_ol_flags(flags,
> >  							   rxq->csum,
> >  							   rxq->csum_l2tun);
> > -				pkt->packet_type =
> rxq_cq_to_pkt_type(flags);
> > -			} else {
> > -				pkt->packet_type = 0;
> > -				pkt->ol_flags = 0;
> >  			}
> > -			pkt->pkt_len = len;
> >  		}
> >  		rep->nb_segs = 1;
> >  		rep->port = rxq->port_id;
> > diff --git a/drivers/net/mlx4/mlx4_rxtx.h
> > b/drivers/net/mlx4/mlx4_rxtx.h index 7d67748..e5810ac 100644
> > --- a/drivers/net/mlx4/mlx4_rxtx.h
> > +++ b/drivers/net/mlx4/mlx4_rxtx.h
> > @@ -174,6 +174,7 @@ uint16_t mlx4_tx_burst_removed(void *dpdk_txq,
> struct rte_mbuf **pkts,
> >  			       uint16_t pkts_n);
> >  uint16_t mlx4_rx_burst_removed(void *dpdk_rxq, struct rte_mbuf
> **pkts,
> >  			       uint16_t pkts_n);
> > +void mlx4_set_ptype_table(void);
> >
> >  /* mlx4_txq.c */
> >
> > --
> > 1.8.3.1
> >
> 
> --
> Adrien Mazarguil
> 6WIND


More information about the dev mailing list