[dpdk-dev] [PATCH 13/24] net/i40e: parse VXLAN filter

Beilei Xing beilei.xing at intel.com
Fri Dec 2 12:53:34 CET 2016


Check if the rule is a VXLAN rule, and get the VXLAN
info.

Signed-off-by: Beilei Xing <beilei.xing at intel.com>
---
 drivers/net/i40e/i40e_ethdev.c | 349 +++++++++++++++++++++++++++++++++++++++++
 1 file changed, 349 insertions(+)

diff --git a/drivers/net/i40e/i40e_ethdev.c b/drivers/net/i40e/i40e_ethdev.c
index 18247c0..3bdef8e 100644
--- a/drivers/net/i40e/i40e_ethdev.c
+++ b/drivers/net/i40e/i40e_ethdev.c
@@ -497,6 +497,11 @@ static int i40e_parse_macvlan_filter(const struct rte_flow_attr *attr,
 				     const struct rte_flow_action *actions,
 				     struct rte_eth_mac_filter *filter,
 				     struct rte_flow_error *error);
+static int i40e_parse_tunnel_filter(const struct rte_flow_attr *attr,
+				    const struct rte_flow_item *pattern,
+				    const struct rte_flow_action *actions,
+				    struct rte_eth_tunnel_filter_conf *filter,
+				    struct rte_flow_error *error);
 static int i40e_flow_validate(__rte_unused struct rte_eth_dev *dev,
 			      const struct rte_flow_attr *attr,
 			      const struct rte_flow_item *pattern,
@@ -10394,6 +10399,344 @@ i40e_parse_macvlan_filter(const struct rte_flow_attr *attr,
 	return i40e_parse_attr(attr, error);
 }
 
+/* Parse to get the action and attr info of a tunnle filter */
+static int
+i40e_parse_tunnel_act_attr(const struct rte_flow_attr *attr,
+			   const struct rte_flow_action *actions,
+			   struct rte_eth_tunnel_filter_conf *filter,
+			   struct rte_flow_error *error)
+{
+	const struct rte_flow_action *act;
+	const struct rte_flow_action_queue *act_q;
+	uint32_t i;
+
+	/* parse action */
+	i = 0;
+
+	/* Check if the first not void action is QUEUE. */
+	ACTION_SKIP_VOID(filter, struct rte_eth_tunnel_filter_conf,
+			 RTE_FLOW_ERROR_TYPE_ACTION_NUM);
+	if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE) {
+		error->type = RTE_FLOW_ERROR_TYPE_ACTION;
+		return -EINVAL;
+	}
+
+	act_q = (const struct rte_flow_action_queue *)act->conf;
+	filter->queue_id = act_q->index;
+
+	/* Check if the next not void item is END */
+	i++;
+	ACTION_SKIP_VOID(filter, struct rte_eth_tunnel_filter_conf,
+			 RTE_FLOW_ERROR_TYPE_ACTION_NUM);
+	if (act->type != RTE_FLOW_ACTION_TYPE_END) {
+		error->type = RTE_FLOW_ERROR_TYPE_ACTION;
+		return -EINVAL;
+	}
+
+	return i40e_parse_attr(attr, error);
+}
+
+/**
+ * Parse the rule to see if it is a vxlan rule.
+ * And get the tunnel filter info BTW.
+ */
+static int
+i40e_parse_vxlan_tunnel_filter(const struct rte_flow_attr *attr,
+			       const struct rte_flow_item *pattern,
+			       const struct rte_flow_action *actions,
+			       struct rte_eth_tunnel_filter_conf *filter,
+			       struct rte_flow_error *error)
+{
+	const struct rte_flow_item *item;
+	const struct rte_flow_item_eth *o_eth_spec = NULL;
+	const struct rte_flow_item_eth *o_eth_mask = NULL;
+	const struct rte_flow_item_vxlan *vxlan_spec = NULL;
+	const struct rte_flow_item_vxlan *vxlan_mask = NULL;
+	const struct rte_flow_item_eth *i_eth_spec, *i_eth_mask;
+	const struct rte_flow_item_vlan *vlan_spec = NULL;
+	const struct rte_flow_item_vlan *vlan_mask = NULL;
+	struct ether_addr macaddr_unmasked = {
+		.addr_bytes = {0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF}
+	};
+	struct ether_addr macaddr_masked = {
+		.addr_bytes = {0x0, 0x0, 0x0, 0x0, 0x0, 0x0}
+	};
+	bool is_vni_masked = 0;
+	uint32_t i;
+
+	/* parse pattern */
+	i = 0;
+
+	/* The first not void item should be ETH or IP or UDP or VXLAN */
+	PATTERN_SKIP_VOID(filter, struct rte_eth_tunnel_filter_conf,
+			  RTE_FLOW_ERROR_TYPE_ITEM_NUM);
+	if (item->type != RTE_FLOW_ITEM_TYPE_ETH &&
+	    item->type != RTE_FLOW_ITEM_TYPE_IPV4 &&
+	    item->type != RTE_FLOW_ITEM_TYPE_IPV6 &&
+	    item->type != RTE_FLOW_ITEM_TYPE_UDP &&
+	    item->type != RTE_FLOW_ITEM_TYPE_VXLAN) {
+		error->type = RTE_FLOW_ERROR_TYPE_ITEM;
+		return -EINVAL;
+	}
+
+	if (item->type == RTE_FLOW_ITEM_TYPE_ETH) {
+		o_eth_spec = (const struct rte_flow_item_eth *)item->spec;
+		o_eth_mask = (const struct rte_flow_item_eth *)item->mask;
+
+		if ((!o_eth_spec && o_eth_mask) ||
+		    (o_eth_spec && !o_eth_mask)) {
+			error->type = RTE_FLOW_ERROR_TYPE_ITEM;
+			return -EINVAL;
+		}
+
+		if (o_eth_spec)
+			rte_memcpy(&filter->outer_mac, &o_eth_spec->dst,
+				   ETHER_ADDR_LEN);
+
+		if (o_eth_mask) {
+			/**
+			 * DST MAC address shouldn't be masked.
+			 * SRC MAC address should be masked.
+			 * Ethertype should be masked.
+			 */
+			if (!is_same_ether_addr(&o_eth_mask->dst,
+						&macaddr_unmasked) ||
+			    !is_same_ether_addr(&o_eth_mask->src,
+						&macaddr_masked) ||
+			    o_eth_mask->type) {
+				error->type = RTE_FLOW_ERROR_TYPE_ITEM;
+				return -EINVAL;
+			}
+		}
+
+		i++;
+		PATTERN_SKIP_VOID(filter, struct rte_eth_tunnel_filter_conf,
+				  RTE_FLOW_ERROR_TYPE_ITEM_NUM);
+		if (item->type != RTE_FLOW_ITEM_TYPE_IPV4 &&
+		    item->type != RTE_FLOW_ITEM_TYPE_IPV6) {
+			error->type = RTE_FLOW_ERROR_TYPE_ITEM;
+			return -EINVAL;
+		}
+	}
+
+	if (item->type == RTE_FLOW_ITEM_TYPE_IPV4 ||
+	    item->type == RTE_FLOW_ITEM_TYPE_IPV6) {
+		/**
+		 * If the item is IP, the content should be NULL.
+		 * Only used to describe the protocol stack.
+		 */
+		if (item->spec || item->mask) {
+			error->type = RTE_FLOW_ERROR_TYPE_ITEM;
+			return -EINVAL;
+		}
+
+		/* Check if the next not void item is UDP */
+		i++;
+		PATTERN_SKIP_VOID(filter, struct rte_eth_tunnel_filter_conf,
+				  RTE_FLOW_ERROR_TYPE_ITEM_NUM);
+		if (item->type != RTE_FLOW_ITEM_TYPE_UDP) {
+			error->type = RTE_FLOW_ERROR_TYPE_ITEM;
+			return -EINVAL;
+		}
+	}
+
+	if (item->type == RTE_FLOW_ITEM_TYPE_UDP) {
+		/**
+		 * If the item is UDP, the content should be NULL
+		 * Only used to describe the protocol stack.
+		 */
+		if (item->spec || item->mask) {
+			error->type = RTE_FLOW_ERROR_TYPE_ITEM;
+			return -EINVAL;
+		}
+
+		/* Check if the next not void item is VXLAN */
+		i++;
+		PATTERN_SKIP_VOID(filter, struct rte_eth_tunnel_filter_conf,
+				  RTE_FLOW_ERROR_TYPE_ITEM_NUM);
+	}
+
+	if (item->type != RTE_FLOW_ITEM_TYPE_VXLAN) {
+		error->type = RTE_FLOW_ERROR_TYPE_ITEM;
+		return -EINVAL;
+	}
+
+	vxlan_spec = (const struct rte_flow_item_vxlan *)item->spec;
+	vxlan_mask = (const struct rte_flow_item_vxlan *)item->mask;
+
+	/**
+	 * Check if VXLAN item is used to describe the protocol stack.
+	 * If yes, both vxlan_spec and vxlan_mask should be NULL.
+	 * If no, either vxlan_spec or vxlan_mask shouldn't be NULL.
+	 */
+	if ((!vxlan_spec && vxlan_mask) ||
+	    (vxlan_spec && !vxlan_mask)) {
+		error->type = RTE_FLOW_ERROR_TYPE_ITEM;
+		return -EINVAL;
+	}
+
+	/* Check if VNI is masked. */
+	if (vxlan_mask) {
+		for (uint32_t j = 0; j < RTE_DIM(vxlan_mask->vni); j++) {
+			if (vxlan_mask->vni[j] == 0xFF) {
+				if (j > 0 &&
+				    (vxlan_mask->vni[j] !=
+				     vxlan_mask->vni[j - 1])) {
+					error->type = RTE_FLOW_ERROR_TYPE_ITEM;
+					return -EINVAL;
+				}
+				is_vni_masked = 0;
+			} else if (vxlan_mask->vni[j] == 0) {
+				if (j > 0 &&
+				    (vxlan_mask->vni[j] !=
+				     vxlan_mask->vni[j - 1])) {
+					error->type = RTE_FLOW_ERROR_TYPE_ITEM;
+					return -EINVAL;
+				}
+				is_vni_masked = 1;
+			} else {
+				error->type = RTE_FLOW_ERROR_TYPE_ITEM;
+				return -EINVAL;
+			}
+		}
+	}
+
+	/* Check if the next not void item is ETH. */
+	i++;
+	PATTERN_SKIP_VOID(filter, struct rte_eth_tunnel_filter_conf,
+			  RTE_FLOW_ERROR_TYPE_ITEM_NUM);
+	if (item->type != RTE_FLOW_ITEM_TYPE_ETH) {
+		error->type = RTE_FLOW_ERROR_TYPE_ITEM;
+		return -EINVAL;
+	}
+
+	if (item->type == RTE_FLOW_ITEM_TYPE_ETH) {
+		i_eth_spec = (const struct rte_flow_item_eth *)item->spec;
+		i_eth_mask = (const struct rte_flow_item_eth *)item->mask;
+
+		if (!i_eth_spec || !i_eth_mask) {
+			error->type = RTE_FLOW_ERROR_TYPE_ITEM;
+			return -EINVAL;
+		}
+
+		/**
+		 * DST address of inner MAC shouldn't be masked.
+		 * SRC address of Inner MAC should be masked.
+		 */
+		if (!is_same_ether_addr(&i_eth_mask->dst, &macaddr_unmasked) ||
+		    !is_same_ether_addr(&i_eth_mask->src, &macaddr_masked) ||
+		    i_eth_mask->type) {
+			error->type = RTE_FLOW_ERROR_TYPE_ITEM;
+			return -EINVAL;
+		}
+
+		rte_memcpy(&filter->inner_mac, &i_eth_spec->dst,
+			   ETHER_ADDR_LEN);
+
+		/* Check if the next not void item is VLAN or END. */
+		i++;
+		PATTERN_SKIP_VOID(filter, struct rte_eth_tunnel_filter_conf,
+				  RTE_FLOW_ERROR_TYPE_ITEM_NUM);
+		if (item->type != RTE_FLOW_ITEM_TYPE_VLAN &&
+		    item->type != RTE_FLOW_ITEM_TYPE_END) {
+			error->type = RTE_FLOW_ERROR_TYPE_ITEM;
+			return -EINVAL;
+		}
+	}
+
+	if (item->type == RTE_FLOW_ITEM_TYPE_VLAN) {
+		vlan_spec = (const struct rte_flow_item_vlan *)item->spec;
+		vlan_mask = (const struct rte_flow_item_vlan *)item->mask;
+
+		if (!(vlan_spec && vlan_mask)) {
+			error->type = RTE_FLOW_ERROR_TYPE_ITEM;
+			return -EINVAL;
+		}
+
+		/* Check if the next not void item is END. */
+		i++;
+		PATTERN_SKIP_VOID(filter, struct rte_eth_tunnel_filter_conf,
+				  RTE_FLOW_ERROR_TYPE_ITEM_NUM);
+		if (item->type != RTE_FLOW_ITEM_TYPE_END) {
+			error->type = RTE_FLOW_ERROR_TYPE_ITEM;
+			return -EINVAL;
+		}
+	}
+
+	if (vlan_spec && vlan_mask &&
+	    (vlan_mask->tci == rte_cpu_to_be_16(0x0FFF))) {
+		filter->inner_vlan = rte_be_to_cpu_16(vlan_spec->tci) & 0xFFF;
+		if (vxlan_spec && vxlan_mask && !is_vni_masked) {
+			rte_memcpy(&filter->tenant_id, vxlan_spec->vni,
+				   RTE_DIM(vxlan_spec->vni));
+			if (!o_eth_spec && !o_eth_mask)
+				filter->filter_type =
+					RTE_TUNNEL_FILTER_IMAC_IVLAN_TENID;
+			else {
+				error->type = RTE_FLOW_ERROR_TYPE_ITEM;
+				return -EINVAL;
+			}
+		} else if (!vxlan_spec && !vxlan_mask) {
+			if (!o_eth_spec && !o_eth_mask)
+				filter->filter_type =
+					RTE_TUNNEL_FILTER_IMAC_IVLAN;
+			else {
+				error->type = RTE_FLOW_ERROR_TYPE_ITEM;
+				return -EINVAL;
+			}
+		} else {
+			error->type = RTE_FLOW_ERROR_TYPE_ITEM;
+			return -EINVAL;
+		}
+	} else if ((!vlan_spec && !vlan_mask) ||
+		   (vlan_spec && vlan_mask && vlan_mask->tci == 0x0)) {
+		if (vxlan_spec && vxlan_mask && !is_vni_masked) {
+			rte_memcpy(&filter->tenant_id, vxlan_spec->vni,
+				   RTE_DIM(vxlan_spec->vni));
+			if (!o_eth_spec && !o_eth_mask)
+				filter->filter_type =
+					RTE_TUNNEL_FILTER_IMAC_TENID;
+			else
+				filter->filter_type =
+					RTE_TUNNEL_FILTER_OMAC_TENID_IMAC;
+		} else if (!vxlan_spec && !vxlan_mask) {
+			if (!o_eth_spec && !o_eth_mask)
+				filter->filter_type = ETH_TUNNEL_FILTER_IMAC;
+			else {
+				error->type = RTE_FLOW_ERROR_TYPE_ITEM;
+				return -EINVAL;
+			}
+		} else {
+			error->type = RTE_FLOW_ERROR_TYPE_ITEM;
+			return -EINVAL;
+		}
+	} else {
+		error->type = RTE_FLOW_ERROR_TYPE_ITEM;
+		return -EINVAL;
+	}
+
+	filter->tunnel_type = RTE_TUNNEL_TYPE_VXLAN;
+
+	return i40e_parse_tunnel_act_attr(attr, actions, filter, error);
+}
+
+static int
+i40e_parse_tunnel_filter(const struct rte_flow_attr *attr,
+			 const struct rte_flow_item *pattern,
+			 const struct rte_flow_action *actions,
+			 struct rte_eth_tunnel_filter_conf *rule,
+			 struct rte_flow_error *error)
+{
+	int ret;
+
+	ret = i40e_parse_vxlan_tunnel_filter(attr, pattern,
+					     actions, rule, error);
+	if (!ret)
+		return 0;
+
+	return ret;
+}
+
 static int
 i40e_flow_validate(__rte_unused struct rte_eth_dev *dev,
 		   const struct rte_flow_attr *attr,
@@ -10403,6 +10746,7 @@ i40e_flow_validate(__rte_unused struct rte_eth_dev *dev,
 {
 	struct rte_eth_ethertype_filter ethertype_filter;
 	struct rte_eth_mac_filter macvlan_filter;
+	struct rte_eth_tunnel_filter_conf tunnel_filter;
 	int ret;
 
 	ret = cons_parse_ethertype_filter(attr, pattern, actions,
@@ -10415,5 +10759,10 @@ i40e_flow_validate(__rte_unused struct rte_eth_dev *dev,
 	if (!ret)
 		return 0;
 
+	ret = i40e_parse_tunnel_filter(attr, pattern, actions,
+				       &tunnel_filter, error);
+	if (!ret)
+		return 0;
+
 	return ret;
 }
-- 
2.5.5



More information about the dev mailing list