[dpdk-dev] [PATCH v3 04/14] net/mlx5: support Rx tunnel type identification

Xueming(Steven) Li xuemingl at mellanox.com
Sat Apr 14 14:57:58 CEST 2018


+Adrien

> -----Original Message-----
> From: Nélio Laranjeiro <nelio.laranjeiro at 6wind.com>
> Sent: Friday, April 13, 2018 9:03 PM
> To: Xueming(Steven) Li <xuemingl at mellanox.com>
> Cc: Shahaf Shuler <shahafs at mellanox.com>; dev at dpdk.org; Olivier Matz
> <olivier.matz at 6wind.com>
> Subject: Re: [PATCH v3 04/14] net/mlx5: support Rx tunnel type
> identification
> 
> +Olivier,
> 
> On Fri, Apr 13, 2018 at 07:20:13PM +0800, Xueming Li wrote:
> > This patch introduced tunnel type identification based on flow rules.
> > If flows of multiple tunnel types built on same queue,
> > RTE_PTYPE_TUNNEL_MASK will be returned, user application could use
> > bits in flow mark as tunnel type identifier.
> 
> For an application it will mean the packet embed all tunnel types defined
> in DPDK, to make such thing you need a RTE_PTYPE_TUNNEL_UNKNOWN which does
> not exists currently.

There was a RTE_PTYPE_TUNNEL_UNKNOWN definition, but removed due to discussion.
So I think it good to add it in the patchset of reviewed by Adrien.

> Even with it, the application still needs to parse the packet to discover
> which tunnel the packet embed, is there any benefit having such bit?  Not
> so sure.

With a tunnel flag, checksum status represent inner checksum.
Setting flow mark for different flow type could save time of parsing tunnel.

> 
> Thanks,
> 
> > Signed-off-by: Xueming Li <xuemingl at mellanox.com>
> > ---
> >  drivers/net/mlx5/mlx5_flow.c          | 127
> +++++++++++++++++++++++++++++-----
> >  drivers/net/mlx5/mlx5_rxq.c           |  11 ++-
> >  drivers/net/mlx5/mlx5_rxtx.c          |  12 ++--
> >  drivers/net/mlx5/mlx5_rxtx.h          |   9 ++-
> >  drivers/net/mlx5/mlx5_rxtx_vec_neon.h |  21 +++---
> > drivers/net/mlx5/mlx5_rxtx_vec_sse.h  |  17 +++--
> >  6 files changed, 159 insertions(+), 38 deletions(-)
> >
> > diff --git a/drivers/net/mlx5/mlx5_flow.c
> > b/drivers/net/mlx5/mlx5_flow.c index 644f26a95..7d04b4d65 100644
> > --- a/drivers/net/mlx5/mlx5_flow.c
> > +++ b/drivers/net/mlx5/mlx5_flow.c
> > @@ -225,6 +225,7 @@ struct rte_flow {
> >  	struct rte_flow_action_rss rss_conf; /**< RSS configuration */
> >  	uint16_t (*queues)[]; /**< Queues indexes to use. */
> >  	uint8_t rss_key[40]; /**< copy of the RSS key. */
> > +	uint32_t tunnel; /**< Tunnel type of RTE_PTYPE_TUNNEL_XXX. */
> >  	struct ibv_counter_set *cs; /**< Holds the counters for the rule. */
> >  	struct mlx5_flow_counter_stats counter_stats;/**<The counter stats.
> */
> >  	struct mlx5_flow frxq[RTE_DIM(hash_rxq_init)]; @@ -241,6 +242,19 @@
> > struct rte_flow {
> >  	(type) == RTE_FLOW_ITEM_TYPE_VXLAN || \
> >  	(type) == RTE_FLOW_ITEM_TYPE_GRE)
> >
> > +const uint32_t flow_ptype[] = {
> > +	[RTE_FLOW_ITEM_TYPE_VXLAN] = RTE_PTYPE_TUNNEL_VXLAN,
> > +	[RTE_FLOW_ITEM_TYPE_GRE] = RTE_PTYPE_TUNNEL_GRE, };
> > +
> > +#define PTYPE_IDX(t) ((RTE_PTYPE_TUNNEL_MASK & (t)) >> 12)
> > +
> > +const uint32_t ptype_ext[] = {
> > +	[PTYPE_IDX(RTE_PTYPE_TUNNEL_VXLAN)] = RTE_PTYPE_TUNNEL_VXLAN |
> > +					      RTE_PTYPE_L4_UDP,
> > +	[PTYPE_IDX(RTE_PTYPE_TUNNEL_GRE)] = RTE_PTYPE_TUNNEL_GRE, };
> > +
> >  /** Structure to generate a simple graph of layers supported by the
> > NIC. */  struct mlx5_flow_items {
> >  	/** List of possible actions for these items. */ @@ -440,6 +454,7 @@
> > struct mlx5_flow_parse {
> >  	uint16_t queues[RTE_MAX_QUEUES_PER_PORT]; /**< Queues indexes to use.
> */
> >  	uint8_t rss_key[40]; /**< copy of the RSS key. */
> >  	enum hash_rxq_type layer; /**< Last pattern layer detected. */
> > +	uint32_t tunnel; /**< Tunnel type of RTE_PTYPE_TUNNEL_XXX. */
> >  	struct ibv_counter_set *cs; /**< Holds the counter set for the rule
> */
> >  	struct {
> >  		struct ibv_flow_attr *ibv_attr;
> > @@ -858,7 +873,7 @@ mlx5_flow_convert_items_validate(const struct
> rte_flow_item items[],
> >  		if (ret)
> >  			goto exit_item_not_supported;
> >  		if (IS_TUNNEL(items->type)) {
> > -			if (parser->inner) {
> > +			if (parser->tunnel) {
> >  				rte_flow_error_set(error, ENOTSUP,
> >  						   RTE_FLOW_ERROR_TYPE_ITEM,
> >  						   items,
> > @@ -867,6 +882,7 @@ mlx5_flow_convert_items_validate(const struct
> rte_flow_item items[],
> >  				return -rte_errno;
> >  			}
> >  			parser->inner = IBV_FLOW_SPEC_INNER;
> > +			parser->tunnel = flow_ptype[items->type];
> >  		}
> >  		if (parser->drop) {
> >  			parser->queue[HASH_RXQ_ETH].offset += cur_item->dst_sz;
> @@ -1175,6
> > +1191,7 @@ mlx5_flow_convert(struct rte_eth_dev *dev,
> >  	}
> >  	/* Third step. Conversion parse, fill the specifications. */
> >  	parser->inner = 0;
> > +	parser->tunnel = 0;
> >  	for (; items->type != RTE_FLOW_ITEM_TYPE_END; ++items) {
> >  		struct mlx5_flow_data data = {
> >  			.parser = parser,
> > @@ -1643,6 +1660,7 @@ mlx5_flow_create_vxlan(const struct
> > rte_flow_item *item,
> >
> >  	id.vni[0] = 0;
> >  	parser->inner = IBV_FLOW_SPEC_INNER;
> > +	parser->tunnel = ptype_ext[PTYPE_IDX(RTE_PTYPE_TUNNEL_VXLAN)];
> >  	if (spec) {
> >  		if (!mask)
> >  			mask = default_mask;
> > @@ -1696,6 +1714,7 @@ mlx5_flow_create_gre(const struct rte_flow_item
> *item __rte_unused,
> >  	};
> >
> >  	parser->inner = IBV_FLOW_SPEC_INNER;
> > +	parser->tunnel = ptype_ext[PTYPE_IDX(RTE_PTYPE_TUNNEL_GRE)];
> >  	mlx5_flow_create_copy(parser, &tunnel, size);
> >  	return 0;
> >  }
> > @@ -1874,7 +1893,8 @@ mlx5_flow_create_action_queue_rss(struct
> rte_eth_dev *dev,
> >  				      parser->rss_conf.key_len,
> >  				      hash_fields,
> >  				      parser->rss_conf.queue,
> > -				      parser->rss_conf.queue_num);
> > +				      parser->rss_conf.queue_num,
> > +				      parser->tunnel);
> >  		if (flow->frxq[i].hrxq)
> >  			continue;
> >  		flow->frxq[i].hrxq =
> > @@ -1883,7 +1903,8 @@ mlx5_flow_create_action_queue_rss(struct
> rte_eth_dev *dev,
> >  				      parser->rss_conf.key_len,
> >  				      hash_fields,
> >  				      parser->rss_conf.queue,
> > -				      parser->rss_conf.queue_num);
> > +				      parser->rss_conf.queue_num,
> > +				      parser->tunnel);
> >  		if (!flow->frxq[i].hrxq) {
> >  			return rte_flow_error_set(error, ENOMEM,
> >  						  RTE_FLOW_ERROR_TYPE_HANDLE,
> > @@ -1895,6 +1916,40 @@ mlx5_flow_create_action_queue_rss(struct
> > rte_eth_dev *dev,  }
> >
> >  /**
> > + * RXQ update after flow rule creation.
> > + *
> > + * @param dev
> > + *   Pointer to Ethernet device.
> > + * @param flow
> > + *   Pointer to the flow rule.
> > + */
> > +static void
> > +mlx5_flow_create_update_rxqs(struct rte_eth_dev *dev, struct rte_flow
> > +*flow) {
> > +	struct priv *priv = dev->data->dev_private;
> > +	unsigned int i;
> > +
> > +	if (!dev->data->dev_started)
> > +		return;
> > +	for (i = 0; i != flow->rss_conf.queue_num; ++i) {
> > +		struct mlx5_rxq_data *rxq_data = (*priv->rxqs)
> > +						 [(*flow->queues)[i]];
> > +		struct mlx5_rxq_ctrl *rxq_ctrl =
> > +			container_of(rxq_data, struct mlx5_rxq_ctrl, rxq);
> > +		uint8_t tunnel = PTYPE_IDX(flow->tunnel);
> > +
> > +		rxq_data->mark |= flow->mark;
> > +		if (!tunnel)
> > +			continue;
> > +		rxq_ctrl->tunnel_types[tunnel] += 1;
> > +		if (rxq_data->tunnel != flow->tunnel)
> > +			rxq_data->tunnel = rxq_data->tunnel ?
> > +					   RTE_PTYPE_TUNNEL_MASK :
> > +					   flow->tunnel;
> > +	}
> > +}
> > +
> > +/**
> >   * Complete flow rule creation.
> >   *
> >   * @param dev
> > @@ -1954,12 +2009,7 @@ mlx5_flow_create_action_queue(struct rte_eth_dev
> *dev,
> >  				   NULL, "internal error in flow creation");
> >  		goto error;
> >  	}
> > -	for (i = 0; i != parser->rss_conf.queue_num; ++i) {
> > -		struct mlx5_rxq_data *q =
> > -			(*priv->rxqs)[parser->rss_conf.queue[i]];
> > -
> > -		q->mark |= parser->mark;
> > -	}
> > +	mlx5_flow_create_update_rxqs(dev, flow);
> >  	return 0;
> >  error:
> >  	ret = rte_errno; /* Save rte_errno before cleanup. */ @@ -2032,6
> > +2082,7 @@ mlx5_flow_list_create(struct rte_eth_dev *dev,
> >  	}
> >  	/* Copy configuration. */
> >  	flow->queues = (uint16_t (*)[])(flow + 1);
> > +	flow->tunnel = parser.tunnel;
> >  	flow->rss_conf = (struct rte_flow_action_rss){
> >  		.func = RTE_ETH_HASH_FUNCTION_DEFAULT,
> >  		.level = 0,
> > @@ -2123,9 +2174,38 @@ mlx5_flow_list_destroy(struct rte_eth_dev *dev,
> struct mlx5_flows *list,
> >  	struct priv *priv = dev->data->dev_private;
> >  	unsigned int i;
> >
> > -	if (flow->drop || !flow->mark)
> > +	if (flow->drop || !dev->data->dev_started)
> >  		goto free;
> > -	for (i = 0; i != flow->rss_conf.queue_num; ++i) {
> > +	for (i = 0; flow->tunnel && i != flow->rss_conf.queue_num; ++i) {
> > +		/* Update queue tunnel type. */
> > +		struct mlx5_rxq_data *rxq_data = (*priv->rxqs)
> > +						 [(*flow->queues)[i]];
> > +		struct mlx5_rxq_ctrl *rxq_ctrl =
> > +			container_of(rxq_data, struct mlx5_rxq_ctrl, rxq);
> > +		uint8_t tunnel = PTYPE_IDX(flow->tunnel);
> > +
> > +		assert(rxq_ctrl->tunnel_types[tunnel] > 0);
> > +		rxq_ctrl->tunnel_types[tunnel] -= 1;
> > +		if (!rxq_ctrl->tunnel_types[tunnel]) {
> > +			/* Update tunnel type. */
> > +			uint8_t j;
> > +			uint8_t types = 0;
> > +			uint8_t last;
> > +
> > +			for (j = 0; j < RTE_DIM(rxq_ctrl->tunnel_types); j++)
> > +				if (rxq_ctrl->tunnel_types[j]) {
> > +					types += 1;
> > +					last = j;
> > +				}
> > +			/* Keep same if more than one tunnel types left. */
> > +			if (types == 1)
> > +				rxq_data->tunnel = ptype_ext[last];
> > +			else if (types == 0)
> > +				/* No tunnel type left. */
> > +				rxq_data->tunnel = 0;
> > +		}
> > +	}
> > +	for (i = 0; flow->mark && i != flow->rss_conf.queue_num; ++i) {
> >  		struct rte_flow *tmp;
> >  		int mark = 0;
> >
> > @@ -2344,9 +2424,9 @@ mlx5_flow_stop(struct rte_eth_dev *dev, struct
> > mlx5_flows *list)  {
> >  	struct priv *priv = dev->data->dev_private;
> >  	struct rte_flow *flow;
> > +	unsigned int i;
> >
> >  	TAILQ_FOREACH_REVERSE(flow, list, mlx5_flows, next) {
> > -		unsigned int i;
> >  		struct mlx5_ind_table_ibv *ind_tbl = NULL;
> >
> >  		if (flow->drop) {
> > @@ -2392,6 +2472,18 @@ mlx5_flow_stop(struct rte_eth_dev *dev, struct
> mlx5_flows *list)
> >  		DRV_LOG(DEBUG, "port %u flow %p removed", dev->data->port_id,
> >  			(void *)flow);
> >  	}
> > +	/* Cleanup Rx queue tunnel info. */
> > +	for (i = 0; i != priv->rxqs_n; ++i) {
> > +		struct mlx5_rxq_data *q = (*priv->rxqs)[i];
> > +		struct mlx5_rxq_ctrl *rxq_ctrl =
> > +			container_of(q, struct mlx5_rxq_ctrl, rxq);
> > +
> > +		if (!q)
> > +			continue;
> > +		memset((void *)rxq_ctrl->tunnel_types, 0,
> > +		       sizeof(rxq_ctrl->tunnel_types));
> > +		q->tunnel = 0;
> > +	}
> >  }
> >
> >  /**
> > @@ -2439,7 +2531,8 @@ mlx5_flow_start(struct rte_eth_dev *dev, struct
> mlx5_flows *list)
> >  					      flow->rss_conf.key_len,
> >  					      hash_rxq_init[i].hash_fields,
> >  					      flow->rss_conf.queue,
> > -					      flow->rss_conf.queue_num);
> > +					      flow->rss_conf.queue_num,
> > +					      flow->tunnel);
> >  			if (flow->frxq[i].hrxq)
> >  				goto flow_create;
> >  			flow->frxq[i].hrxq =
> > @@ -2447,7 +2540,8 @@ mlx5_flow_start(struct rte_eth_dev *dev, struct
> mlx5_flows *list)
> >  					      flow->rss_conf.key_len,
> >  					      hash_rxq_init[i].hash_fields,
> >  					      flow->rss_conf.queue,
> > -					      flow->rss_conf.queue_num);
> > +					      flow->rss_conf.queue_num,
> > +					      flow->tunnel);
> >  			if (!flow->frxq[i].hrxq) {
> >  				DRV_LOG(DEBUG,
> >  					"port %u flow %p cannot be applied", @@ -
> 2469,10 +2563,7 @@
> > mlx5_flow_start(struct rte_eth_dev *dev, struct mlx5_flows *list)
> >  			DRV_LOG(DEBUG, "port %u flow %p applied",
> >  				dev->data->port_id, (void *)flow);
> >  		}
> > -		if (!flow->mark)
> > -			continue;
> > -		for (i = 0; i != flow->rss_conf.queue_num; ++i)
> > -			(*priv->rxqs)[flow->rss_conf.queue[i]]->mark = 1;
> > +		mlx5_flow_create_update_rxqs(dev, flow);
> >  	}
> >  	return 0;
> >  }
> > diff --git a/drivers/net/mlx5/mlx5_rxq.c b/drivers/net/mlx5/mlx5_rxq.c
> > index 1e4354ab3..351acfc0f 100644
> > --- a/drivers/net/mlx5/mlx5_rxq.c
> > +++ b/drivers/net/mlx5/mlx5_rxq.c
> > @@ -1386,6 +1386,8 @@ mlx5_ind_table_ibv_verify(struct rte_eth_dev *dev)
> >   *   first queue index will be taken for the indirection table.
> >   * @param queues_n
> >   *   Number of queues.
> > + * @param tunnel
> > + *   Tunnel type.
> >   *
> >   * @return
> >   *   The Verbs object initialised, NULL otherwise and rte_errno is set.
> > @@ -1394,7 +1396,7 @@ struct mlx5_hrxq *  mlx5_hrxq_new(struct
> > rte_eth_dev *dev,
> >  	      const uint8_t *rss_key, uint32_t rss_key_len,
> >  	      uint64_t hash_fields,
> > -	      const uint16_t *queues, uint32_t queues_n)
> > +	      const uint16_t *queues, uint32_t queues_n, uint32_t tunnel)
> >  {
> >  	struct priv *priv = dev->data->dev_private;
> >  	struct mlx5_hrxq *hrxq;
> > @@ -1438,6 +1440,7 @@ mlx5_hrxq_new(struct rte_eth_dev *dev,
> >  	hrxq->qp = qp;
> >  	hrxq->rss_key_len = rss_key_len;
> >  	hrxq->hash_fields = hash_fields;
> > +	hrxq->tunnel = tunnel;
> >  	memcpy(hrxq->rss_key, rss_key, rss_key_len);
> >  	rte_atomic32_inc(&hrxq->refcnt);
> >  	LIST_INSERT_HEAD(&priv->hrxqs, hrxq, next); @@ -1466,6 +1469,8 @@
> > mlx5_hrxq_new(struct rte_eth_dev *dev,
> >   *   first queue index will be taken for the indirection table.
> >   * @param queues_n
> >   *   Number of queues.
> > + * @param tunnel
> > + *   Tunnel type.
> >   *
> >   * @return
> >   *   An hash Rx queue on success.
> > @@ -1474,7 +1479,7 @@ struct mlx5_hrxq *  mlx5_hrxq_get(struct
> > rte_eth_dev *dev,
> >  	      const uint8_t *rss_key, uint32_t rss_key_len,
> >  	      uint64_t hash_fields,
> > -	      const uint16_t *queues, uint32_t queues_n)
> > +	      const uint16_t *queues, uint32_t queues_n, uint32_t tunnel)
> >  {
> >  	struct priv *priv = dev->data->dev_private;
> >  	struct mlx5_hrxq *hrxq;
> > @@ -1489,6 +1494,8 @@ mlx5_hrxq_get(struct rte_eth_dev *dev,
> >  			continue;
> >  		if (hrxq->hash_fields != hash_fields)
> >  			continue;
> > +		if (hrxq->tunnel != tunnel)
> > +			continue;
> >  		ind_tbl = mlx5_ind_table_ibv_get(dev, queues, queues_n);
> >  		if (!ind_tbl)
> >  			continue;
> > diff --git a/drivers/net/mlx5/mlx5_rxtx.c
> > b/drivers/net/mlx5/mlx5_rxtx.c index 1f422c70b..d061dfc8a 100644
> > --- a/drivers/net/mlx5/mlx5_rxtx.c
> > +++ b/drivers/net/mlx5/mlx5_rxtx.c
> > @@ -34,7 +34,7 @@
> >  #include "mlx5_prm.h"
> >
> >  static __rte_always_inline uint32_t
> > -rxq_cq_to_pkt_type(volatile struct mlx5_cqe *cqe);
> > +rxq_cq_to_pkt_type(struct mlx5_rxq_data *rxq, volatile struct
> > +mlx5_cqe *cqe);
> >
> >  static __rte_always_inline int
> >  mlx5_rx_poll_len(struct mlx5_rxq_data *rxq, volatile struct mlx5_cqe
> > *cqe, @@ -125,12 +125,14 @@ mlx5_set_ptype_table(void)
> >  	(*p)[0x8a] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
> >  		     RTE_PTYPE_L4_UDP;
> >  	/* Tunneled - L3 */
> > +	(*p)[0x40] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN;
> >  	(*p)[0x41] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
> >  		     RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
> >  		     RTE_PTYPE_INNER_L4_NONFRAG;
> >  	(*p)[0x42] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
> >  		     RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
> >  		     RTE_PTYPE_INNER_L4_NONFRAG;
> > +	(*p)[0xc0] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN;
> >  	(*p)[0xc1] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
> >  		     RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
> >  		     RTE_PTYPE_INNER_L4_NONFRAG;
> > @@ -1577,6 +1579,8 @@ mlx5_tx_burst_empw(void *dpdk_txq, struct
> > rte_mbuf **pkts, uint16_t pkts_n)
> >  /**
> >   * Translate RX completion flags to packet type.
> >   *
> > + * @param[in] rxq
> > + *   Pointer to RX queue structure.
> >   * @param[in] cqe
> >   *   Pointer to CQE.
> >   *
> > @@ -1586,7 +1590,7 @@ mlx5_tx_burst_empw(void *dpdk_txq, struct rte_mbuf
> **pkts, uint16_t pkts_n)
> >   *   Packet type for struct rte_mbuf.
> >   */
> >  static inline uint32_t
> > -rxq_cq_to_pkt_type(volatile struct mlx5_cqe *cqe)
> > +rxq_cq_to_pkt_type(struct mlx5_rxq_data *rxq, volatile struct
> > +mlx5_cqe *cqe)
> >  {
> >  	uint8_t idx;
> >  	uint8_t pinfo = cqe->pkt_info;
> > @@ -1601,7 +1605,7 @@ rxq_cq_to_pkt_type(volatile struct mlx5_cqe *cqe)
> >  	 * bit[7] = outer_l3_type
> >  	 */
> >  	idx = ((pinfo & 0x3) << 6) | ((ptype & 0xfc00) >> 10);
> > -	return mlx5_ptype_table[idx];
> > +	return mlx5_ptype_table[idx] | rxq->tunnel * !!(idx & (1 << 6));
> >  }
> >
> >  /**
> > @@ -1833,7 +1837,7 @@ mlx5_rx_burst(void *dpdk_rxq, struct rte_mbuf
> **pkts, uint16_t pkts_n)
> >  			pkt = seg;
> >  			assert(len >= (rxq->crc_present << 2));
> >  			/* Update packet information. */
> > -			pkt->packet_type = rxq_cq_to_pkt_type(cqe);
> > +			pkt->packet_type = rxq_cq_to_pkt_type(rxq, cqe);
> >  			pkt->ol_flags = 0;
> >  			if (rss_hash_res && rxq->rss_hash) {
> >  				pkt->hash.rss = rss_hash_res;
> > diff --git a/drivers/net/mlx5/mlx5_rxtx.h
> > b/drivers/net/mlx5/mlx5_rxtx.h index a702cb603..6866f6818 100644
> > --- a/drivers/net/mlx5/mlx5_rxtx.h
> > +++ b/drivers/net/mlx5/mlx5_rxtx.h
> > @@ -104,6 +104,7 @@ struct mlx5_rxq_data {
> >  	void *cq_uar; /* CQ user access region. */
> >  	uint32_t cqn; /* CQ number. */
> >  	uint8_t cq_arm_sn; /* CQ arm seq number. */
> > +	uint32_t tunnel; /* Tunnel information. */
> >  } __rte_cache_aligned;
> >
> >  /* Verbs Rx queue elements. */
> > @@ -125,6 +126,7 @@ struct mlx5_rxq_ctrl {
> >  	struct mlx5_rxq_ibv *ibv; /* Verbs elements. */
> >  	struct mlx5_rxq_data rxq; /* Data path structure. */
> >  	unsigned int socket; /* CPU socket ID for allocations. */
> > +	uint32_t tunnel_types[16]; /* Tunnel type counter. */
> >  	unsigned int irq:1; /* Whether IRQ is enabled. */
> >  	uint16_t idx; /* Queue index. */
> >  };
> > @@ -145,6 +147,7 @@ struct mlx5_hrxq {
> >  	struct mlx5_ind_table_ibv *ind_table; /* Indirection table. */
> >  	struct ibv_qp *qp; /* Verbs queue pair. */
> >  	uint64_t hash_fields; /* Verbs Hash fields. */
> > +	uint32_t tunnel; /* Tunnel type. */
> >  	uint32_t rss_key_len; /* Hash key length in bytes. */
> >  	uint8_t rss_key[]; /* Hash key. */
> >  };
> > @@ -248,11 +251,13 @@ int mlx5_ind_table_ibv_verify(struct rte_eth_dev
> > *dev);  struct mlx5_hrxq *mlx5_hrxq_new(struct rte_eth_dev *dev,
> >  				const uint8_t *rss_key, uint32_t rss_key_len,
> >  				uint64_t hash_fields,
> > -				const uint16_t *queues, uint32_t queues_n);
> > +				const uint16_t *queues, uint32_t queues_n,
> > +				uint32_t tunnel);
> >  struct mlx5_hrxq *mlx5_hrxq_get(struct rte_eth_dev *dev,
> >  				const uint8_t *rss_key, uint32_t rss_key_len,
> >  				uint64_t hash_fields,
> > -				const uint16_t *queues, uint32_t queues_n);
> > +				const uint16_t *queues, uint32_t queues_n,
> > +				uint32_t tunnel);
> >  int mlx5_hrxq_release(struct rte_eth_dev *dev, struct mlx5_hrxq
> > *hxrq);  int mlx5_hrxq_ibv_verify(struct rte_eth_dev *dev);  uint64_t
> > mlx5_get_rx_port_offloads(void); diff --git
> > a/drivers/net/mlx5/mlx5_rxtx_vec_neon.h
> > b/drivers/net/mlx5/mlx5_rxtx_vec_neon.h
> > index bbe1818ef..9f9136108 100644
> > --- a/drivers/net/mlx5/mlx5_rxtx_vec_neon.h
> > +++ b/drivers/net/mlx5/mlx5_rxtx_vec_neon.h
> > @@ -551,6 +551,7 @@ rxq_cq_to_ptype_oflags_v(struct mlx5_rxq_data *rxq,
> >  	const uint64x1_t mbuf_init = vld1_u64(&rxq->mbuf_initializer);
> >  	const uint64x1_t r32_mask = vcreate_u64(0xffffffff);
> >  	uint64x2_t rearm0, rearm1, rearm2, rearm3;
> > +	uint8_t pt_idx0, pt_idx1, pt_idx2, pt_idx3;
> >
> >  	if (rxq->mark) {
> >  		const uint32x4_t ft_def = vdupq_n_u32(MLX5_FLOW_MARK_DEFAULT);
> > @@ -583,14 +584,18 @@ rxq_cq_to_ptype_oflags_v(struct mlx5_rxq_data *rxq,
> >  	ptype = vshrn_n_u32(ptype_info, 10);
> >  	/* Errored packets will have RTE_PTYPE_ALL_MASK. */
> >  	ptype = vorr_u16(ptype, op_err);
> > -	pkts[0]->packet_type =
> > -		mlx5_ptype_table[vget_lane_u8(vreinterpret_u8_u16(ptype), 6)];
> > -	pkts[1]->packet_type =
> > -		mlx5_ptype_table[vget_lane_u8(vreinterpret_u8_u16(ptype), 4)];
> > -	pkts[2]->packet_type =
> > -		mlx5_ptype_table[vget_lane_u8(vreinterpret_u8_u16(ptype), 2)];
> > -	pkts[3]->packet_type =
> > -		mlx5_ptype_table[vget_lane_u8(vreinterpret_u8_u16(ptype), 0)];
> > +	pt_idx0 = vget_lane_u8(vreinterpret_u8_u16(ptype), 6);
> > +	pt_idx1 = vget_lane_u8(vreinterpret_u8_u16(ptype), 4);
> > +	pt_idx2 = vget_lane_u8(vreinterpret_u8_u16(ptype), 2);
> > +	pt_idx3 = vget_lane_u8(vreinterpret_u8_u16(ptype), 0);
> > +	pkts[0]->packet_type = mlx5_ptype_table[pt_idx0] |
> > +			       !!(pt_idx0 & (1 << 6)) * rxq->tunnel;
> > +	pkts[1]->packet_type = mlx5_ptype_table[pt_idx1] |
> > +			       !!(pt_idx1 & (1 << 6)) * rxq->tunnel;
> > +	pkts[2]->packet_type = mlx5_ptype_table[pt_idx2] |
> > +			       !!(pt_idx2 & (1 << 6)) * rxq->tunnel;
> > +	pkts[3]->packet_type = mlx5_ptype_table[pt_idx3] |
> > +			       !!(pt_idx3 & (1 << 6)) * rxq->tunnel;
> >  	/* Fill flags for checksum and VLAN. */
> >  	pinfo = vandq_u32(ptype_info, ptype_ol_mask);
> >  	pinfo = vreinterpretq_u32_u8(
> > diff --git a/drivers/net/mlx5/mlx5_rxtx_vec_sse.h
> > b/drivers/net/mlx5/mlx5_rxtx_vec_sse.h
> > index c088bcb51..d2492481d 100644
> > --- a/drivers/net/mlx5/mlx5_rxtx_vec_sse.h
> > +++ b/drivers/net/mlx5/mlx5_rxtx_vec_sse.h
> > @@ -542,6 +542,7 @@ rxq_cq_to_ptype_oflags_v(struct mlx5_rxq_data *rxq,
> __m128i cqes[4],
> >  	const __m128i mbuf_init =
> >  		_mm_loadl_epi64((__m128i *)&rxq->mbuf_initializer);
> >  	__m128i rearm0, rearm1, rearm2, rearm3;
> > +	uint8_t pt_idx0, pt_idx1, pt_idx2, pt_idx3;
> >
> >  	/* Extract pkt_info field. */
> >  	pinfo0 = _mm_unpacklo_epi32(cqes[0], cqes[1]); @@ -595,10 +596,18 @@
> > rxq_cq_to_ptype_oflags_v(struct mlx5_rxq_data *rxq, __m128i cqes[4],
> >  	/* Errored packets will have RTE_PTYPE_ALL_MASK. */
> >  	op_err = _mm_srli_epi16(op_err, 8);
> >  	ptype = _mm_or_si128(ptype, op_err);
> > -	pkts[0]->packet_type = mlx5_ptype_table[_mm_extract_epi8(ptype, 0)];
> > -	pkts[1]->packet_type = mlx5_ptype_table[_mm_extract_epi8(ptype, 2)];
> > -	pkts[2]->packet_type = mlx5_ptype_table[_mm_extract_epi8(ptype, 4)];
> > -	pkts[3]->packet_type = mlx5_ptype_table[_mm_extract_epi8(ptype, 6)];
> > +	pt_idx0 = _mm_extract_epi8(ptype, 0);
> > +	pt_idx1 = _mm_extract_epi8(ptype, 2);
> > +	pt_idx2 = _mm_extract_epi8(ptype, 4);
> > +	pt_idx3 = _mm_extract_epi8(ptype, 6);
> > +	pkts[0]->packet_type = mlx5_ptype_table[pt_idx0] |
> > +			       !!(pt_idx0 & (1 << 6)) * rxq->tunnel;
> > +	pkts[1]->packet_type = mlx5_ptype_table[pt_idx1] |
> > +			       !!(pt_idx1 & (1 << 6)) * rxq->tunnel;
> > +	pkts[2]->packet_type = mlx5_ptype_table[pt_idx2] |
> > +			       !!(pt_idx2 & (1 << 6)) * rxq->tunnel;
> > +	pkts[3]->packet_type = mlx5_ptype_table[pt_idx3] |
> > +			       !!(pt_idx3 & (1 << 6)) * rxq->tunnel;
> >  	/* Fill flags for checksum and VLAN. */
> >  	pinfo = _mm_and_si128(pinfo, ptype_ol_mask);
> >  	pinfo = _mm_shuffle_epi8(cv_flag_sel, pinfo);
> > --
> > 2.13.3
> 
> 
> --
> Nélio Laranjeiro
> 6WIND


More information about the dev mailing list