[dpdk-dev] [PATCH v8 1/3] net/i40e: enable per dev PTYPE mapping table

Zhang, Qi Z qi.z.zhang at intel.com
Fri Apr 7 04:36:58 CEST 2017


Hi Chao:
	Would you help to check the PowerPC part. (i40e_rxtx_vec_altivec.c)
Thanks
Qi

> -----Original Message-----
> From: Zhang, Qi Z
> Sent: Friday, April 7, 2017 11:25 AM
> To: Wu, Jingjing <jingjing.wu at intel.com>; Zhang, Helin
> <helin.zhang at intel.com>
> Cc: Yigit, Ferruh <ferruh.yigit at intel.com>; jianbo.liu at linaro.org;
> chaozhu at linux.vnet.ibm.com; dev at dpdk.org; Zhang, Qi Z
> <qi.z.zhang at intel.com>
> Subject: [PATCH v8 1/3] net/i40e: enable per dev PTYPE mapping table
> 
> The mapping from hardware defined packet type to software defined packet
> type is static for i40e device, the patch let each ethdev to to have their own
> copy of mapping table, this give the possibility that different ethdev can be
> set different PTYPE mapping rule which is the requirement to support
> following hardware's dynamic PTYPE feature.
> 
> Signed-off-by: Qi Zhang <qi.z.zhang at intel.com>
> ---
> 
> v8:
> 
> - Fix compile error for ARM vPMD.
> 
> v7:
> 
> - Add Altivec vPMD support.
> 
> v5:
> 
> - Rebase to dpdk-next-net
> 
>  drivers/net/i40e/i40e_ethdev.c           |  1 +
>  drivers/net/i40e/i40e_ethdev.h           |  5 +++++
>  drivers/net/i40e/i40e_ethdev_vf.c        |  2 +-
>  drivers/net/i40e/i40e_rxtx.c             | 30
> ++++++++++++++++++++++--------
>  drivers/net/i40e/i40e_rxtx.h             |  3 ++-
>  drivers/net/i40e/i40e_rxtx_vec_altivec.c | 18 ++++++++----------
>  drivers/net/i40e/i40e_rxtx_vec_neon.c    |  8 +++++---
>  drivers/net/i40e/i40e_rxtx_vec_sse.c     | 14 ++++++++------
>  8 files changed, 52 insertions(+), 29 deletions(-)
> 
> diff --git a/drivers/net/i40e/i40e_ethdev.c b/drivers/net/i40e/i40e_ethdev.c
> index 6927fde..2b95996 100644
> --- a/drivers/net/i40e/i40e_ethdev.c
> +++ b/drivers/net/i40e/i40e_ethdev.c
> @@ -1065,6 +1065,7 @@ eth_i40e_dev_init(struct rte_eth_dev *dev)
>  		i40e_set_tx_function(dev);
>  		return 0;
>  	}
> +	i40e_set_default_ptype_table(dev);
>  	pci_dev = I40E_DEV_TO_PCI(dev);
>  	intr_handle = &pci_dev->intr_handle;
> 
> diff --git a/drivers/net/i40e/i40e_ethdev.h b/drivers/net/i40e/i40e_ethdev.h
> index 69c6684..a1535b0 100644
> --- a/drivers/net/i40e/i40e_ethdev.h
> +++ b/drivers/net/i40e/i40e_ethdev.h
> @@ -744,6 +744,8 @@ struct i40e_vf {
>  	uint64_t flags;
>  };
> 
> +#define I40E_MAX_PKT_TYPE 256
> +
>  /*
>   * Structure to store private data for each PF/VF instance.
>   */
> @@ -768,6 +770,9 @@ struct i40e_adapter {
>  	struct rte_timecounter systime_tc;
>  	struct rte_timecounter rx_tstamp_tc;
>  	struct rte_timecounter tx_tstamp_tc;
> +
> +	/* ptype mapping table */
> +	uint32_t ptype_tbl[I40E_MAX_PKT_TYPE] __rte_cache_min_aligned;
>  };
> 
>  extern const struct rte_flow_ops i40e_flow_ops; diff --git
> a/drivers/net/i40e/i40e_ethdev_vf.c b/drivers/net/i40e/i40e_ethdev_vf.c
> index 7e48fea..3db5b6f 100644
> --- a/drivers/net/i40e/i40e_ethdev_vf.c
> +++ b/drivers/net/i40e/i40e_ethdev_vf.c
> @@ -1477,7 +1477,7 @@ i40evf_dev_init(struct rte_eth_dev *eth_dev)
>  		i40e_set_tx_function(eth_dev);
>  		return 0;
>  	}
> -
> +	i40e_set_default_ptype_table(eth_dev);
>  	rte_eth_copy_pci_info(eth_dev, pci_dev);
>  	eth_dev->data->dev_flags |= RTE_ETH_DEV_DETACHABLE;
> 
> diff --git a/drivers/net/i40e/i40e_rxtx.c b/drivers/net/i40e/i40e_rxtx.c index
> ff70c06..e5471b1 100644
> --- a/drivers/net/i40e/i40e_rxtx.c
> +++ b/drivers/net/i40e/i40e_rxtx.c
> @@ -61,7 +61,6 @@
> 
>  #define DEFAULT_TX_RS_THRESH   32
>  #define DEFAULT_TX_FREE_THRESH 32
> -#define I40E_MAX_PKT_TYPE      256
> 
>  #define I40E_TX_MAX_BURST  32
> 
> @@ -458,6 +457,7 @@ i40e_rx_scan_hw_ring(struct i40e_rx_queue *rxq)
>  	int32_t s[I40E_LOOK_AHEAD], nb_dd;
>  	int32_t i, j, nb_rx = 0;
>  	uint64_t pkt_flags;
> +	uint32_t *ptype_tbl = rxq->vsi->adapter->ptype_tbl;
> 
>  	rxdp = &rxq->rx_ring[rxq->rx_tail];
>  	rxep = &rxq->sw_ring[rxq->rx_tail];
> @@ -506,9 +506,9 @@ i40e_rx_scan_hw_ring(struct i40e_rx_queue *rxq)
>  			pkt_flags = i40e_rxd_status_to_pkt_flags(qword1);
>  			pkt_flags |= i40e_rxd_error_to_pkt_flags(qword1);
>  			mb->packet_type =
> -				i40e_rxd_pkt_type_mapping((uint8_t)((qword1 &
> -						I40E_RXD_QW1_PTYPE_MASK) >>
> -						I40E_RXD_QW1_PTYPE_SHIFT));
> +				ptype_tbl[(uint8_t)((qword1 &
> +				I40E_RXD_QW1_PTYPE_MASK) >>
> +				I40E_RXD_QW1_PTYPE_SHIFT)];
>  			if (pkt_flags & PKT_RX_RSS_HASH)
>  				mb->hash.rss = rte_le_to_cpu_32(\
>  					rxdp[j].wb.qword0.hi_dword.rss);
> @@ -700,6 +700,7 @@ i40e_recv_pkts(void *rx_queue, struct rte_mbuf
> **rx_pkts, uint16_t nb_pkts)
>  	uint16_t rx_id, nb_hold;
>  	uint64_t dma_addr;
>  	uint64_t pkt_flags;
> +	uint32_t *ptype_tbl;
> 
>  	nb_rx = 0;
>  	nb_hold = 0;
> @@ -707,6 +708,7 @@ i40e_recv_pkts(void *rx_queue, struct rte_mbuf
> **rx_pkts, uint16_t nb_pkts)
>  	rx_id = rxq->rx_tail;
>  	rx_ring = rxq->rx_ring;
>  	sw_ring = rxq->sw_ring;
> +	ptype_tbl = rxq->vsi->adapter->ptype_tbl;
> 
>  	while (nb_rx < nb_pkts) {
>  		rxdp = &rx_ring[rx_id];
> @@ -763,8 +765,8 @@ i40e_recv_pkts(void *rx_queue, struct rte_mbuf
> **rx_pkts, uint16_t nb_pkts)
>  		pkt_flags = i40e_rxd_status_to_pkt_flags(qword1);
>  		pkt_flags |= i40e_rxd_error_to_pkt_flags(qword1);
>  		rxm->packet_type =
> -			i40e_rxd_pkt_type_mapping((uint8_t)((qword1 &
> -			I40E_RXD_QW1_PTYPE_MASK) >>
> I40E_RXD_QW1_PTYPE_SHIFT));
> +			ptype_tbl[(uint8_t)((qword1 &
> +			I40E_RXD_QW1_PTYPE_MASK) >>
> I40E_RXD_QW1_PTYPE_SHIFT)];
>  		if (pkt_flags & PKT_RX_RSS_HASH)
>  			rxm->hash.rss =
>  				rte_le_to_cpu_32(rxd.wb.qword0.hi_dword.rss);
> @@ -818,6 +820,7 @@ i40e_recv_scattered_pkts(void *rx_queue,
>  	uint64_t qword1;
>  	uint64_t dma_addr;
>  	uint64_t pkt_flags;
> +	uint32_t *ptype_tbl = rxq->vsi->adapter->ptype_tbl;
> 
>  	while (nb_rx < nb_pkts) {
>  		rxdp = &rx_ring[rx_id];
> @@ -925,8 +928,8 @@ i40e_recv_scattered_pkts(void *rx_queue,
>  		pkt_flags = i40e_rxd_status_to_pkt_flags(qword1);
>  		pkt_flags |= i40e_rxd_error_to_pkt_flags(qword1);
>  		first_seg->packet_type =
> -			i40e_rxd_pkt_type_mapping((uint8_t)((qword1 &
> -			I40E_RXD_QW1_PTYPE_MASK) >>
> I40E_RXD_QW1_PTYPE_SHIFT));
> +			ptype_tbl[(uint8_t)((qword1 &
> +			I40E_RXD_QW1_PTYPE_MASK) >>
> I40E_RXD_QW1_PTYPE_SHIFT)];
>  		if (pkt_flags & PKT_RX_RSS_HASH)
>  			first_seg->hash.rss =
>  				rte_le_to_cpu_32(rxd.wb.qword0.hi_dword.rss);
> @@ -2922,6 +2925,17 @@ i40e_set_tx_function(struct rte_eth_dev *dev)
>  	}
>  }
> 
> +void __attribute__((cold))
> +i40e_set_default_ptype_table(struct rte_eth_dev *dev) {
> +	struct i40e_adapter *ad =
> +		I40E_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
> +	int i;
> +
> +	for (i = 0; i <= I40E_MAX_PKT_TYPE; i++)
> +		ad->ptype_tbl[i] = i40e_get_default_pkt_type(i); }
> +
>  /* Stubs needed for linkage when CONFIG_RTE_I40E_INC_VECTOR is set to
> 'n' */  int __attribute__((weak))
> i40e_rx_vec_dev_conf_condition_check(struct rte_eth_dev __rte_unused
> *dev) diff --git a/drivers/net/i40e/i40e_rxtx.h b/drivers/net/i40e/i40e_rxtx.h
> index 2fa7f37..20084d6 100644
> --- a/drivers/net/i40e/i40e_rxtx.h
> +++ b/drivers/net/i40e/i40e_rxtx.h
> @@ -254,13 +254,14 @@ void i40e_set_rx_function(struct rte_eth_dev
> *dev);  void i40e_set_tx_function_flag(struct rte_eth_dev *dev,
>  			       struct i40e_tx_queue *txq);
>  void i40e_set_tx_function(struct rte_eth_dev *dev);
> +void i40e_set_default_ptype_table(struct rte_eth_dev *dev);
> 
>  /* For each value it means, datasheet of hardware can tell more details
>   *
>   * @note: fix i40e_dev_supported_ptypes_get() if any change here.
>   */
>  static inline uint32_t
> -i40e_rxd_pkt_type_mapping(uint8_t ptype)
> +i40e_get_default_pkt_type(uint8_t ptype)
>  {
>  	static const uint32_t type_table[UINT8_MAX + 1] __rte_cache_aligned
> = {
>  		/* L2 types */
> diff --git a/drivers/net/i40e/i40e_rxtx_vec_altivec.c
> b/drivers/net/i40e/i40e_rxtx_vec_altivec.c
> index 2f6f70a..5f525ff 100644
> --- a/drivers/net/i40e/i40e_rxtx_vec_altivec.c
> +++ b/drivers/net/i40e/i40e_rxtx_vec_altivec.c
> @@ -209,7 +209,8 @@ desc_to_olflags_v(vector unsigned long descs[4],
> struct rte_mbuf **rx_pkts)
>  #define PKTLEN_SHIFT     10
> 
>  static inline void
> -desc_to_ptype_v(vector unsigned long descs[4], struct rte_mbuf **rx_pkts)
> +desc_to_ptype_v(vector unsigned long descs[4], struct rte_mbuf **rx_pkts,
> +		uint32_t *ptype_tbl)
>  {
>  	vector unsigned long ptype0 = vec_mergel(descs[0], descs[1]);
>  	vector unsigned long ptype1 = vec_mergel(descs[2], descs[3]); @@
> -217,14 +218,10 @@ desc_to_ptype_v(vector unsigned long descs[4], struct
> rte_mbuf **rx_pkts)
>  	ptype0 = vec_sr(ptype0, (vector unsigned long){30, 30});
>  	ptype1 = vec_sr(ptype1, (vector unsigned long){30, 30});
> 
> -	rx_pkts[0]->packet_type = i40e_rxd_pkt_type_mapping(
> -					(*(vector unsigned char *)&ptype0)[0]);
> -	rx_pkts[1]->packet_type = i40e_rxd_pkt_type_mapping(
> -					(*(vector unsigned char *)&ptype0)[8]);
> -	rx_pkts[2]->packet_type = i40e_rxd_pkt_type_mapping(
> -					(*(vector unsigned char *)&ptype1)[0]);
> -	rx_pkts[3]->packet_type = i40e_rxd_pkt_type_mapping(
> -					(*(vector unsigned char *)&ptype1)[8]);
> +	rx_pkts[0]->packet_type = ptype_tbl[(*(vector unsigned char
> *)&ptype0)[0])];
> +	rx_pkts[1]->packet_type = ptype_tbl[(*(vector unsigned char
> *)&ptype0)[8])];
> +	rx_pkts[2]->packet_type = ptype_tbl[(*(vector unsigned char
> *)&ptype1)[0])];
> +	rx_pkts[3]->packet_type = ptype_tbl[(*(vector unsigned char
> +*)&ptype1)[8])];
>  }
> 
>   /* Notice:
> @@ -242,6 +239,7 @@ _recv_raw_pkts_vec(struct i40e_rx_queue *rxq,
> struct rte_mbuf **rx_pkts,
>  	int pos;
>  	uint64_t var;
>  	vector unsigned char shuf_msk;
> +	uint32_t *ptype_tbl = rxq->vsi->adapter->ptype_tbl;
> 
>  	vector unsigned short crc_adjust = (vector unsigned short){
>  		0, 0,         /* ignore pkt_type field */
> @@ -466,7 +464,7 @@ _recv_raw_pkts_vec(struct i40e_rx_queue *rxq,
> struct rte_mbuf **rx_pkts,
>  		vec_st(pkt_mb1, 0,
>  		 (vector unsigned char *)&rx_pkts[pos]->rx_descriptor_fields1
>  		);
> -		desc_to_ptype_v(descs, &rx_pkts[pos]);
> +		desc_to_ptype_v(descs, &rx_pkts[pos], ptype_tbl);
>  		desc_to_olflags_v(descs, &rx_pkts[pos]);
> 
>  		/* C.4 calc avaialbe number of desc */ diff --git
> a/drivers/net/i40e/i40e_rxtx_vec_neon.c
> b/drivers/net/i40e/i40e_rxtx_vec_neon.c
> index bd7239b..515931e 100644
> --- a/drivers/net/i40e/i40e_rxtx_vec_neon.c
> +++ b/drivers/net/i40e/i40e_rxtx_vec_neon.c
> @@ -196,7 +196,8 @@ desc_to_olflags_v(uint64x2_t descs[4], struct
> rte_mbuf **rx_pkts)
>  #define I40E_VPMD_DESC_DD_MASK	0x0001000100010001ULL
> 
>  static inline void
> -desc_to_ptype_v(uint64x2_t descs[4], struct rte_mbuf **rx_pkts)
> +desc_to_ptype_v(uint64x2_t descs[4], struct rte_mbuf **rx_pkts,
> +		uint32_t *ptype_tbl)
>  {
>  	int i;
>  	uint8_t ptype;
> @@ -205,7 +206,7 @@ desc_to_ptype_v(uint64x2_t descs[4], struct
> rte_mbuf **rx_pkts)
>  	for (i = 0; i < 4; i++) {
>  		tmp = vreinterpretq_u8_u64(vshrq_n_u64(descs[i], 30));
>  		ptype = vgetq_lane_u8(tmp, 8);
> -		rx_pkts[i]->packet_type = i40e_rxd_pkt_type_mapping(ptype);
> +		rx_pkts[i]->packet_type = ptype_tbl[ptype];
>  	}
> 
>  }
> @@ -225,6 +226,7 @@ _recv_raw_pkts_vec(struct i40e_rx_queue *rxq,
> struct rte_mbuf **rx_pkts,
>  	uint16_t nb_pkts_recd;
>  	int pos;
>  	uint64_t var;
> +	uint32_t *ptype_tbl = rxq->vsi->adapter->ptype_tbl;
> 
>  	/* mask to shuffle from desc. to mbuf */
>  	uint8x16_t shuf_msk = {
> @@ -429,7 +431,7 @@ _recv_raw_pkts_vec(struct i40e_rx_queue *rxq,
> struct rte_mbuf **rx_pkts,
>  			 pkt_mb2);
>  		vst1q_u8((void *)&rx_pkts[pos]->rx_descriptor_fields1,
>  			 pkt_mb1);
> -		desc_to_ptype_v(descs, &rx_pkts[pos]);
> +		desc_to_ptype_v(descs, &rx_pkts[pos], ptype_tbl);
>  		/* C.4 calc avaialbe number of desc */
>  		var = __builtin_popcountll(stat & I40E_VPMD_DESC_DD_MASK);
>  		nb_pkts_recd += var;
> diff --git a/drivers/net/i40e/i40e_rxtx_vec_sse.c
> b/drivers/net/i40e/i40e_rxtx_vec_sse.c
> index fdd4a34..335098a 100644
> --- a/drivers/net/i40e/i40e_rxtx_vec_sse.c
> +++ b/drivers/net/i40e/i40e_rxtx_vec_sse.c
> @@ -215,7 +215,8 @@ desc_to_olflags_v(__m128i descs[4], struct
> rte_mbuf **rx_pkts)
>  #define PKTLEN_SHIFT     10
> 
>  static inline void
> -desc_to_ptype_v(__m128i descs[4], struct rte_mbuf **rx_pkts)
> +desc_to_ptype_v(__m128i descs[4], struct rte_mbuf **rx_pkts,
> +		uint32_t *ptype_tbl)
>  {
>  	__m128i ptype0 = _mm_unpackhi_epi64(descs[0], descs[1]);
>  	__m128i ptype1 = _mm_unpackhi_epi64(descs[2], descs[3]); @@
> -223,10 +224,10 @@ desc_to_ptype_v(__m128i descs[4], struct rte_mbuf
> **rx_pkts)
>  	ptype0 = _mm_srli_epi64(ptype0, 30);
>  	ptype1 = _mm_srli_epi64(ptype1, 30);
> 
> -	rx_pkts[0]->packet_type =
> i40e_rxd_pkt_type_mapping(_mm_extract_epi8(ptype0, 0));
> -	rx_pkts[1]->packet_type =
> i40e_rxd_pkt_type_mapping(_mm_extract_epi8(ptype0, 8));
> -	rx_pkts[2]->packet_type =
> i40e_rxd_pkt_type_mapping(_mm_extract_epi8(ptype1, 0));
> -	rx_pkts[3]->packet_type =
> i40e_rxd_pkt_type_mapping(_mm_extract_epi8(ptype1, 8));
> +	rx_pkts[0]->packet_type = ptype_tbl[_mm_extract_epi8(ptype0, 0)];
> +	rx_pkts[1]->packet_type = ptype_tbl[_mm_extract_epi8(ptype0, 8)];
> +	rx_pkts[2]->packet_type = ptype_tbl[_mm_extract_epi8(ptype1, 0)];
> +	rx_pkts[3]->packet_type = ptype_tbl[_mm_extract_epi8(ptype1, 8)];
>  }
> 
>   /*
> @@ -245,6 +246,7 @@ _recv_raw_pkts_vec(struct i40e_rx_queue *rxq,
> struct rte_mbuf **rx_pkts,
>  	int pos;
>  	uint64_t var;
>  	__m128i shuf_msk;
> +	uint32_t *ptype_tbl = rxq->vsi->adapter->ptype_tbl;
> 
>  	__m128i crc_adjust = _mm_set_epi16(
>  				0, 0, 0,    /* ignore non-length fields */
> @@ -432,7 +434,7 @@ _recv_raw_pkts_vec(struct i40e_rx_queue *rxq,
> struct rte_mbuf **rx_pkts,
>  				 pkt_mb2);
>  		_mm_storeu_si128((void *)&rx_pkts[pos]->rx_descriptor_fields1,
>  				 pkt_mb1);
> -		desc_to_ptype_v(descs, &rx_pkts[pos]);
> +		desc_to_ptype_v(descs, &rx_pkts[pos], ptype_tbl);
>  		/* C.4 calc avaialbe number of desc */
>  		var = __builtin_popcountll(_mm_cvtsi128_si64(staterr));
>  		nb_pkts_recd += var;
> --
> 2.9.3



More information about the dev mailing list