[dpdk-dev,v2,3/4] net/i40e: support tunnel filter to VF

Message ID 1490265990-121019-4-git-send-email-beilei.xing@intel.com (mailing list archive)
State Superseded, archived
Delegated to: Ferruh Yigit
Headers

Checks

Context Check Description
ci/checkpatch warning coding style issues
ci/Intel-compilation fail Compilation issues

Commit Message

Xing, Beilei March 23, 2017, 10:46 a.m. UTC
  Previously, only tunnel filter to PF is supported.
This patch adds i40e_dev_consistent_tunnel_filter_set
function for consistent filter API to support tunnel
filter to VF.

Signed-off-by: Beilei Xing <beilei.xing@intel.com>
---
 drivers/net/i40e/i40e_ethdev.c | 145 +++++++++++++++++++++++++++++++++++++++++
 drivers/net/i40e/i40e_ethdev.h |  32 +++++++++
 drivers/net/i40e/i40e_flow.c   |  50 +++++++++-----
 3 files changed, 212 insertions(+), 15 deletions(-)
  

Comments

Jingjing Wu March 27, 2017, 9:54 a.m. UTC | #1
> -----Original Message-----
> From: Xing, Beilei
> Sent: Thursday, March 23, 2017 6:46 PM
> To: Wu, Jingjing <jingjing.wu@intel.com>
> Cc: Zhang, Helin <helin.zhang@intel.com>; dev@dpdk.org
> Subject: [PATCH v2 3/4] net/i40e: support tunnel filter to VF
> 
> Previously, only tunnel filter to PF is supported.
> This patch adds i40e_dev_consistent_tunnel_filter_set
> function for consistent filter API to support tunnel filter to VF.
> 
> Signed-off-by: Beilei Xing <beilei.xing@intel.com>
> ---
>  drivers/net/i40e/i40e_ethdev.c | 145
> +++++++++++++++++++++++++++++++++++++++++
>  drivers/net/i40e/i40e_ethdev.h |  32 +++++++++
>  drivers/net/i40e/i40e_flow.c   |  50 +++++++++-----
>  3 files changed, 212 insertions(+), 15 deletions(-)
> 
> diff --git a/drivers/net/i40e/i40e_ethdev.c b/drivers/net/i40e/i40e_ethdev.c
> index 5e6cc59..2b3d41b 100644
> --- a/drivers/net/i40e/i40e_ethdev.c
> +++ b/drivers/net/i40e/i40e_ethdev.c
> @@ -6935,6 +6935,151 @@ i40e_dev_tunnel_filter_set(struct i40e_pf *pf,
>  	return ret;
>  }
> 
> +int
> +i40e_dev_consistent_tunnel_filter_set(struct i40e_pf *pf,
> +		      struct i40e_tunnel_filter_conf *tunnel_filter,
> +		      uint8_t add)
> +{
> +	uint16_t ip_type;
> +	uint32_t ipv4_addr;
> +	uint8_t i, tun_type = 0;
> +	/* internal variable to convert ipv6 byte order */
> +	uint32_t convert_ipv6[4];
> +	int val, ret = 0;
> +	struct i40e_pf_vf *vf = NULL;
> +	struct i40e_hw *hw = I40E_PF_TO_HW(pf);
> +	struct i40e_vsi *vsi;
> +	struct i40e_aqc_add_rm_cloud_filt_elem_ext *cld_filter;
> +	struct i40e_aqc_add_rm_cloud_filt_elem_ext *pfilter;
> +	struct i40e_tunnel_rule *tunnel_rule = &pf->tunnel;
> +	struct i40e_tunnel_filter *tunnel, *node;
> +	struct i40e_tunnel_filter check_filter; /* Check if filter exists */
> +	bool big_buffer = 0;
> +
> +	cld_filter = rte_zmalloc("tunnel_filter",
> +			 sizeof(struct i40e_aqc_add_rm_cloud_filt_elem_ext),
> +			 0);
> +
> +	if (cld_filter == NULL) {
> +		PMD_DRV_LOG(ERR, "Failed to alloc memory.");
> +		return -EINVAL;
-EINVAL? I think -ENOMEM is better.


> +	if (add) {
> +		if (big_buffer)
> +			ret = i40e_aq_add_cloud_filters_big_buffer(hw,
> +						   vsi->seid, cld_filter, 1);
> +		else
> +			ret = i40e_aq_add_cloud_filters(hw,
> +					vsi->seid, &cld_filter->element, 1);
> +		if (ret < 0) {
> +			PMD_DRV_LOG(ERR, "Failed to add a tunnel filter.");
> +			return ret;
Ret is type of "enum i40e_status_code". It will not be consistent.


> +
>  #define I40E_MIRROR_MAX_ENTRIES_PER_RULE   64
>  #define I40E_MAX_MIRROR_RULES           64
>  /*
> @@ -718,6 +746,7 @@ union i40e_filter_t {
>  	struct rte_eth_ethertype_filter ethertype_filter;
>  	struct rte_eth_fdir_filter fdir_filter;
>  	struct rte_eth_tunnel_filter_conf tunnel_filter;
> +	struct i40e_tunnel_filter_conf consistent_tunnel_filter;
>  };
> 
I like to use driver specified structures instead of the old rte_eth_XX.
Glad to see this change, it will be better to change other structs. :)

Acked-by Jingjing Wu <jingjing.wu@intel.com>

Thanks
Jingjing
  
Xing, Beilei March 27, 2017, 10:37 a.m. UTC | #2
> -----Original Message-----
> From: Wu, Jingjing
> Sent: Monday, March 27, 2017 5:55 PM
> To: Xing, Beilei <beilei.xing@intel.com>
> Cc: Zhang, Helin <helin.zhang@intel.com>; dev@dpdk.org
> Subject: RE: [PATCH v2 3/4] net/i40e: support tunnel filter to VF
> 
> 
> 
> > -----Original Message-----
> > From: Xing, Beilei
> > Sent: Thursday, March 23, 2017 6:46 PM
> > To: Wu, Jingjing <jingjing.wu@intel.com>
> > Cc: Zhang, Helin <helin.zhang@intel.com>; dev@dpdk.org
> > Subject: [PATCH v2 3/4] net/i40e: support tunnel filter to VF
> >
> > Previously, only tunnel filter to PF is supported.
> > This patch adds i40e_dev_consistent_tunnel_filter_set
> > function for consistent filter API to support tunnel filter to VF.
> >
> > Signed-off-by: Beilei Xing <beilei.xing@intel.com>
> > ---
> >  drivers/net/i40e/i40e_ethdev.c | 145
> > +++++++++++++++++++++++++++++++++++++++++
> >  drivers/net/i40e/i40e_ethdev.h |  32 +++++++++
> >  drivers/net/i40e/i40e_flow.c   |  50 +++++++++-----
> >  3 files changed, 212 insertions(+), 15 deletions(-)
> >
> > diff --git a/drivers/net/i40e/i40e_ethdev.c
> > b/drivers/net/i40e/i40e_ethdev.c index 5e6cc59..2b3d41b 100644
> > --- a/drivers/net/i40e/i40e_ethdev.c
> > +++ b/drivers/net/i40e/i40e_ethdev.c
> > @@ -6935,6 +6935,151 @@ i40e_dev_tunnel_filter_set(struct i40e_pf *pf,
> >  	return ret;
> >  }
> >
> > +int
> > +i40e_dev_consistent_tunnel_filter_set(struct i40e_pf *pf,
> > +		      struct i40e_tunnel_filter_conf *tunnel_filter,
> > +		      uint8_t add)
> > +{
> > +	uint16_t ip_type;
> > +	uint32_t ipv4_addr;
> > +	uint8_t i, tun_type = 0;
> > +	/* internal variable to convert ipv6 byte order */
> > +	uint32_t convert_ipv6[4];
> > +	int val, ret = 0;
> > +	struct i40e_pf_vf *vf = NULL;
> > +	struct i40e_hw *hw = I40E_PF_TO_HW(pf);
> > +	struct i40e_vsi *vsi;
> > +	struct i40e_aqc_add_rm_cloud_filt_elem_ext *cld_filter;
> > +	struct i40e_aqc_add_rm_cloud_filt_elem_ext *pfilter;
> > +	struct i40e_tunnel_rule *tunnel_rule = &pf->tunnel;
> > +	struct i40e_tunnel_filter *tunnel, *node;
> > +	struct i40e_tunnel_filter check_filter; /* Check if filter exists */
> > +	bool big_buffer = 0;
> > +
> > +	cld_filter = rte_zmalloc("tunnel_filter",
> > +			 sizeof(struct
> i40e_aqc_add_rm_cloud_filt_elem_ext),
> > +			 0);
> > +
> > +	if (cld_filter == NULL) {
> > +		PMD_DRV_LOG(ERR, "Failed to alloc memory.");
> > +		return -EINVAL;
> -EINVAL? I think -ENOMEM is better.
Agree.

> 
> 
> > +	if (add) {
> > +		if (big_buffer)
> > +			ret = i40e_aq_add_cloud_filters_big_buffer(hw,
> > +						   vsi->seid, cld_filter, 1);
> > +		else
> > +			ret = i40e_aq_add_cloud_filters(hw,
> > +					vsi->seid, &cld_filter->element, 1);
> > +		if (ret < 0) {
> > +			PMD_DRV_LOG(ERR, "Failed to add a tunnel filter.");
> > +			return ret;
> Ret is type of "enum i40e_status_code". It will not be consistent.
> 
> 
> > +
> >  #define I40E_MIRROR_MAX_ENTRIES_PER_RULE   64
> >  #define I40E_MAX_MIRROR_RULES           64
> >  /*
> > @@ -718,6 +746,7 @@ union i40e_filter_t {
> >  	struct rte_eth_ethertype_filter ethertype_filter;
> >  	struct rte_eth_fdir_filter fdir_filter;
> >  	struct rte_eth_tunnel_filter_conf tunnel_filter;
> > +	struct i40e_tunnel_filter_conf consistent_tunnel_filter;
> >  };
> >
> I like to use driver specified structures instead of the old rte_eth_XX.
> Glad to see this change, it will be better to change other structs. :)

Thanks, what do you think we change other structures after this release?

> 
> Acked-by Jingjing Wu <jingjing.wu@intel.com>
> 
> Thanks
> Jingjing
  

Patch

diff --git a/drivers/net/i40e/i40e_ethdev.c b/drivers/net/i40e/i40e_ethdev.c
index 5e6cc59..2b3d41b 100644
--- a/drivers/net/i40e/i40e_ethdev.c
+++ b/drivers/net/i40e/i40e_ethdev.c
@@ -6935,6 +6935,151 @@  i40e_dev_tunnel_filter_set(struct i40e_pf *pf,
 	return ret;
 }
 
+int
+i40e_dev_consistent_tunnel_filter_set(struct i40e_pf *pf,
+		      struct i40e_tunnel_filter_conf *tunnel_filter,
+		      uint8_t add)
+{
+	uint16_t ip_type;
+	uint32_t ipv4_addr;
+	uint8_t i, tun_type = 0;
+	/* internal variable to convert ipv6 byte order */
+	uint32_t convert_ipv6[4];
+	int val, ret = 0;
+	struct i40e_pf_vf *vf = NULL;
+	struct i40e_hw *hw = I40E_PF_TO_HW(pf);
+	struct i40e_vsi *vsi;
+	struct i40e_aqc_add_rm_cloud_filt_elem_ext *cld_filter;
+	struct i40e_aqc_add_rm_cloud_filt_elem_ext *pfilter;
+	struct i40e_tunnel_rule *tunnel_rule = &pf->tunnel;
+	struct i40e_tunnel_filter *tunnel, *node;
+	struct i40e_tunnel_filter check_filter; /* Check if filter exists */
+	bool big_buffer = 0;
+
+	cld_filter = rte_zmalloc("tunnel_filter",
+			 sizeof(struct i40e_aqc_add_rm_cloud_filt_elem_ext),
+			 0);
+
+	if (cld_filter == NULL) {
+		PMD_DRV_LOG(ERR, "Failed to alloc memory.");
+		return -EINVAL;
+	}
+	pfilter = cld_filter;
+
+	ether_addr_copy(&tunnel_filter->outer_mac,
+			(struct ether_addr *)&pfilter->element.outer_mac);
+	ether_addr_copy(&tunnel_filter->inner_mac,
+			(struct ether_addr *)&pfilter->element.inner_mac);
+
+	pfilter->element.inner_vlan =
+		rte_cpu_to_le_16(tunnel_filter->inner_vlan);
+	if (tunnel_filter->ip_type == RTE_TUNNEL_IPTYPE_IPV4) {
+		ip_type = I40E_AQC_ADD_CLOUD_FLAGS_IPV4;
+		ipv4_addr = rte_be_to_cpu_32(tunnel_filter->ip_addr.ipv4_addr);
+		rte_memcpy(&pfilter->element.ipaddr.v4.data,
+				&rte_cpu_to_le_32(ipv4_addr),
+				sizeof(pfilter->element.ipaddr.v4.data));
+	} else {
+		ip_type = I40E_AQC_ADD_CLOUD_FLAGS_IPV6;
+		for (i = 0; i < 4; i++) {
+			convert_ipv6[i] =
+			rte_cpu_to_le_32(rte_be_to_cpu_32(
+					 tunnel_filter->ip_addr.ipv6_addr[i]));
+		}
+		rte_memcpy(&pfilter->element.ipaddr.v6.data,
+			   &convert_ipv6,
+			   sizeof(pfilter->element.ipaddr.v6.data));
+	}
+
+	/* check tunneled type */
+	switch (tunnel_filter->tunnel_type) {
+	case RTE_TUNNEL_TYPE_VXLAN:
+		tun_type = I40E_AQC_ADD_CLOUD_TNL_TYPE_VXLAN;
+		break;
+	case RTE_TUNNEL_TYPE_NVGRE:
+		tun_type = I40E_AQC_ADD_CLOUD_TNL_TYPE_NVGRE_OMAC;
+		break;
+	case RTE_TUNNEL_TYPE_IP_IN_GRE:
+		tun_type = I40E_AQC_ADD_CLOUD_TNL_TYPE_IP;
+		break;
+	default:
+		/* Other tunnel types is not supported. */
+		PMD_DRV_LOG(ERR, "tunnel type is not supported.");
+		rte_free(cld_filter);
+		return -EINVAL;
+	}
+
+	val = i40e_dev_get_filter_type(tunnel_filter->filter_type,
+				       &pfilter->element.flags);
+	if (val < 0) {
+		rte_free(cld_filter);
+		return -EINVAL;
+	}
+
+	pfilter->element.flags |= rte_cpu_to_le_16(
+		I40E_AQC_ADD_CLOUD_FLAGS_TO_QUEUE |
+		ip_type | (tun_type << I40E_AQC_ADD_CLOUD_TNL_TYPE_SHIFT));
+	pfilter->element.tenant_id = rte_cpu_to_le_32(tunnel_filter->tenant_id);
+	pfilter->element.queue_number =
+		rte_cpu_to_le_16(tunnel_filter->queue_id);
+
+	if (!tunnel_filter->is_to_vf)
+		vsi = pf->main_vsi;
+	else {
+		if (tunnel_filter->vf_id >= pf->vf_num) {
+			PMD_DRV_LOG(ERR, "Invalid argument.");
+			return -EINVAL;
+		}
+		vf = &pf->vfs[tunnel_filter->vf_id];
+		vsi = vf->vsi;
+	}
+
+	/* Check if there is the filter in SW list */
+	memset(&check_filter, 0, sizeof(check_filter));
+	i40e_tunnel_filter_convert(cld_filter, &check_filter);
+	node = i40e_sw_tunnel_filter_lookup(tunnel_rule, &check_filter.input);
+	if (add && node) {
+		PMD_DRV_LOG(ERR, "Conflict with existing tunnel rules!");
+		return -EINVAL;
+	}
+
+	if (!add && !node) {
+		PMD_DRV_LOG(ERR, "There's no corresponding tunnel filter!");
+		return -EINVAL;
+	}
+
+	if (add) {
+		if (big_buffer)
+			ret = i40e_aq_add_cloud_filters_big_buffer(hw,
+						   vsi->seid, cld_filter, 1);
+		else
+			ret = i40e_aq_add_cloud_filters(hw,
+					vsi->seid, &cld_filter->element, 1);
+		if (ret < 0) {
+			PMD_DRV_LOG(ERR, "Failed to add a tunnel filter.");
+			return ret;
+		}
+		tunnel = rte_zmalloc("tunnel_filter", sizeof(*tunnel), 0);
+		rte_memcpy(tunnel, &check_filter, sizeof(check_filter));
+		ret = i40e_sw_tunnel_filter_insert(pf, tunnel);
+	} else {
+		if (big_buffer)
+			ret = i40e_aq_remove_cloud_filters_big_buffer(
+				hw, vsi->seid, cld_filter, 1);
+		else
+			ret = i40e_aq_remove_cloud_filters(hw, vsi->seid,
+					   &cld_filter->element, 1);
+		if (ret < 0) {
+			PMD_DRV_LOG(ERR, "Failed to delete a tunnel filter.");
+			return ret;
+		}
+		ret = i40e_sw_tunnel_filter_del(pf, &node->input);
+	}
+
+	rte_free(cld_filter);
+	return ret;
+}
+
 static int
 i40e_get_vxlan_port_idx(struct i40e_pf *pf, uint16_t port)
 {
diff --git a/drivers/net/i40e/i40e_ethdev.h b/drivers/net/i40e/i40e_ethdev.h
index 2ca0fe5..d976f7a 100644
--- a/drivers/net/i40e/i40e_ethdev.h
+++ b/drivers/net/i40e/i40e_ethdev.h
@@ -514,6 +514,7 @@  struct i40e_tunnel_filter_input {
 	uint16_t flags;          /* Filter type flag */
 	uint32_t tenant_id;      /* Tenant id to match */
 	uint16_t general_fields[32];  /* Big buffer */
+	uint16_t vf_id;         /* VF id for tunnel filtering. */
 };
 
 struct i40e_tunnel_filter {
@@ -530,6 +531,33 @@  struct i40e_tunnel_rule {
 	struct rte_hash *hash_table;
 };
 
+/**
+ * Tunneling Packet filter configuration.
+ */
+struct i40e_tunnel_filter_conf {
+	struct ether_addr outer_mac;    /**< Outer MAC address to match. */
+	struct ether_addr inner_mac;    /**< Inner MAC address to match. */
+	uint16_t inner_vlan;            /**< Inner VLAN to match. */
+	uint32_t outer_vlan;            /**< Outer VLAN to match */
+	enum rte_tunnel_iptype ip_type; /**< IP address type. */
+	/**
+	 * Outer destination IP address to match if ETH_TUNNEL_FILTER_OIP
+	 * is set in filter_type, or inner destination IP address to match
+	 * if ETH_TUNNEL_FILTER_IIP is set in filter_type.
+	 */
+	union {
+		uint32_t ipv4_addr;     /**< IPv4 address in big endian. */
+		uint32_t ipv6_addr[4];  /**< IPv6 address in big endian. */
+	} ip_addr;
+	/** Flags from ETH_TUNNEL_FILTER_XX - see above. */
+	uint16_t filter_type;
+	enum rte_eth_tunnel_type tunnel_type; /**< Tunnel Type. */
+	uint32_t tenant_id;     /**< Tenant ID to match. VNI, GRE key... */
+	uint16_t queue_id;      /**< Queue assigned to if match. */
+	uint8_t is_to_vf;       /**< 0 - to PF, 1 - to VF */
+	uint16_t vf_id;         /**< VF id for tunnel filter insertion. */
+};
+
 #define I40E_MIRROR_MAX_ENTRIES_PER_RULE   64
 #define I40E_MAX_MIRROR_RULES           64
 /*
@@ -718,6 +746,7 @@  union i40e_filter_t {
 	struct rte_eth_ethertype_filter ethertype_filter;
 	struct rte_eth_fdir_filter fdir_filter;
 	struct rte_eth_tunnel_filter_conf tunnel_filter;
+	struct i40e_tunnel_filter_conf consistent_tunnel_filter;
 };
 
 typedef int (*parse_filter_t)(struct rte_eth_dev *dev,
@@ -811,6 +840,9 @@  int i40e_add_del_fdir_filter(struct rte_eth_dev *dev,
 int i40e_dev_tunnel_filter_set(struct i40e_pf *pf,
 			       struct rte_eth_tunnel_filter_conf *tunnel_filter,
 			       uint8_t add);
+int i40e_dev_consistent_tunnel_filter_set(struct i40e_pf *pf,
+				  struct i40e_tunnel_filter_conf *tunnel_filter,
+				  uint8_t add);
 int i40e_fdir_flush(struct rte_eth_dev *dev);
 
 #define I40E_DEV_TO_PCI(eth_dev) \
diff --git a/drivers/net/i40e/i40e_flow.c b/drivers/net/i40e/i40e_flow.c
index 412cd22..449299a 100644
--- a/drivers/net/i40e/i40e_flow.c
+++ b/drivers/net/i40e/i40e_flow.c
@@ -93,7 +93,7 @@  static int i40e_flow_parse_fdir_action(struct rte_eth_dev *dev,
 static int i40e_flow_parse_tunnel_action(struct rte_eth_dev *dev,
 				 const struct rte_flow_action *actions,
 				 struct rte_flow_error *error,
-				 struct rte_eth_tunnel_filter_conf *filter);
+				 struct i40e_tunnel_filter_conf *filter);
 static int i40e_flow_parse_attr(const struct rte_flow_attr *attr,
 				struct rte_flow_error *error);
 static int i40e_flow_parse_ethertype_filter(struct rte_eth_dev *dev,
@@ -1127,34 +1127,54 @@  i40e_flow_parse_fdir_filter(struct rte_eth_dev *dev,
 }
 
 /* Parse to get the action info of a tunnle filter
- * Tunnel action only supports QUEUE.
+ * Tunnel action only supports PF, VF and QUEUE.
  */
 static int
 i40e_flow_parse_tunnel_action(struct rte_eth_dev *dev,
 			      const struct rte_flow_action *actions,
 			      struct rte_flow_error *error,
-			      struct rte_eth_tunnel_filter_conf *filter)
+			      struct i40e_tunnel_filter_conf *filter)
 {
 	struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
 	const struct rte_flow_action *act;
 	const struct rte_flow_action_queue *act_q;
+	const struct rte_flow_action_vf *act_vf;
 	uint32_t index = 0;
 
-	/* Check if the first non-void action is QUEUE. */
+	/* Check if the first non-void action is PF or VF. */
 	NEXT_ITEM_OF_ACTION(act, actions, index);
-	if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE) {
+	if (act->type != RTE_FLOW_ACTION_TYPE_PF &&
+	    act->type != RTE_FLOW_ACTION_TYPE_VF) {
 		rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
 				   act, "Not supported action.");
 		return -rte_errno;
 	}
 
-	act_q = (const struct rte_flow_action_queue *)act->conf;
-	filter->queue_id = act_q->index;
-	if (filter->queue_id >= pf->dev_data->nb_rx_queues) {
-		rte_flow_error_set(error, EINVAL,
+	if (act->type == RTE_FLOW_ACTION_TYPE_VF) {
+		act_vf = (const struct rte_flow_action_vf *)act->conf;
+		filter->vf_id = act_vf->id;
+		filter->is_to_vf = 1;
+		if (filter->vf_id >= pf->vf_num) {
+			rte_flow_error_set(error, EINVAL,
+				   RTE_FLOW_ERROR_TYPE_ACTION,
+				   act, "Invalid VF ID for tunnel filter");
+			return -rte_errno;
+		}
+	}
+
+	/* Check if the next non-void item is QUEUE */
+	index++;
+	NEXT_ITEM_OF_ACTION(act, actions, index);
+	if (act->type == RTE_FLOW_ACTION_TYPE_QUEUE) {
+		act_q = (const struct rte_flow_action_queue *)act->conf;
+		filter->queue_id = act_q->index;
+		if (!filter->is_to_vf)
+			if (filter->queue_id >= pf->dev_data->nb_rx_queues) {
+				rte_flow_error_set(error, EINVAL,
 				   RTE_FLOW_ERROR_TYPE_ACTION,
 				   act, "Invalid queue ID for tunnel filter");
-		return -rte_errno;
+				return -rte_errno;
+			}
 	}
 
 	/* Check if the next non-void item is END */
@@ -1204,7 +1224,7 @@  static int
 i40e_flow_parse_vxlan_pattern(__rte_unused struct rte_eth_dev *dev,
 			      const struct rte_flow_item *pattern,
 			      struct rte_flow_error *error,
-			      struct rte_eth_tunnel_filter_conf *filter)
+			      struct i40e_tunnel_filter_conf *filter)
 {
 	const struct rte_flow_item *item = pattern;
 	const struct rte_flow_item_eth *eth_spec;
@@ -1473,8 +1493,8 @@  i40e_flow_parse_vxlan_filter(struct rte_eth_dev *dev,
 			     struct rte_flow_error *error,
 			     union i40e_filter_t *filter)
 {
-	struct rte_eth_tunnel_filter_conf *tunnel_filter =
-		&filter->tunnel_filter;
+	struct i40e_tunnel_filter_conf *tunnel_filter =
+		&filter->consistent_tunnel_filter;
 	int ret;
 
 	ret = i40e_flow_parse_vxlan_pattern(dev, pattern,
@@ -1605,8 +1625,8 @@  i40e_flow_create(struct rte_eth_dev *dev,
 					i40e_fdir_filter_list);
 		break;
 	case RTE_ETH_FILTER_TUNNEL:
-		ret = i40e_dev_tunnel_filter_set(pf,
-					 &cons_filter.tunnel_filter, 1);
+		ret = i40e_dev_consistent_tunnel_filter_set(pf,
+			    &cons_filter.consistent_tunnel_filter, 1);
 		if (ret)
 			goto free_flow;
 		flow->rule = TAILQ_LAST(&pf->tunnel.tunnel_list,