[dpdk-dev] [PATCH 12/18] net/ixgbe: parse ethertype filter

Wei Zhao wei.zhao1 at intel.com
Fri Dec 2 11:43:08 CET 2016


From: wei zhao1 <wei.zhao1 at intel.com>

check if the rule is a ethertype rule, and get the ethertype info.

Signed-off-by: wei zhao1 <wei.zhao1 at intel.com>
Signed-off-by: Wenzhuo Lu <wenzhuo.lu at intel.com>
---
 drivers/net/ixgbe/ixgbe_ethdev.c | 166 ++++++++++++++++++++++++++++++++++++++-
 1 file changed, 162 insertions(+), 4 deletions(-)

diff --git a/drivers/net/ixgbe/ixgbe_ethdev.c b/drivers/net/ixgbe/ixgbe_ethdev.c
index d3768c6..a421062 100644
--- a/drivers/net/ixgbe/ixgbe_ethdev.c
+++ b/drivers/net/ixgbe/ixgbe_ethdev.c
@@ -405,11 +405,21 @@ rte_flow_error_type ixgbe_parse_ntuple_filter(const struct rte_flow_attr *attr,
 					const struct rte_flow_item pattern[],
 					const struct rte_flow_action actions[],
 					struct rte_eth_ntuple_filter *filter);
+static enum rte_flow_error_type
+cons_parse_ethertype_filter(const struct rte_flow_attr *attr,
+				const struct rte_flow_item pattern[],
+				const struct rte_flow_action actions[],
+				struct rte_eth_ethertype_filter *filter);
+static enum rte_flow_error_type
+ixgbe_parse_ethertype_filter(const struct rte_flow_attr *attr,
+				const struct rte_flow_item pattern[],
+				const struct rte_flow_action actions[],
+				struct rte_eth_ethertype_filter *filter);
 enum rte_flow_error_type
 ixgbe_flow_rule_validate(__rte_unused struct rte_eth_dev *dev,
-					const struct rte_flow_attr *attr,
-					const struct rte_flow_item pattern[],
-					const struct rte_flow_action actions[]);
+				const struct rte_flow_attr *attr,
+				const struct rte_flow_item pattern[],
+				const struct rte_flow_action actions[]);
 int ixgbe_flow_validate(struct rte_eth_dev *dev,
 		const struct rte_flow_attr *attr,
 		const struct rte_flow_item pattern[],
@@ -8306,6 +8316,149 @@ ixgbe_parse_ntuple_filter(const struct rte_flow_attr *attr,
 }
 
 /**
+ * Parse the rule to see if it is a ethertype rule.
+ * And get the ethertype filter info BTW.
+ */
+static enum rte_flow_error_type
+cons_parse_ethertype_filter(const struct rte_flow_attr *attr,
+			    const struct rte_flow_item pattern[],
+			    const struct rte_flow_action actions[],
+			    struct rte_eth_ethertype_filter *filter)
+{
+	const struct rte_flow_item *item;
+	const struct rte_flow_action *act;
+	const struct rte_flow_item_eth *eth_spec;
+	const struct rte_flow_item_eth *eth_mask;
+	const struct rte_flow_action_queue *act_q;
+	uint32_t i, j;
+
+	/************************************************
+	 * parse pattern
+	 ************************************************/
+	i = 0;
+
+	/* the first not void item should be MAC */
+	PATTERN_SKIP_VOID(filter, struct rte_eth_ethertype_filter,
+			  RTE_FLOW_ERROR_TYPE_ITEM_NUM);
+	if (item->type != RTE_FLOW_ITEM_TYPE_ETH)
+		return RTE_FLOW_ERROR_TYPE_ITEM;
+
+	/* get the MAC info */
+	if (!item->spec || !item->mask)
+		return RTE_FLOW_ERROR_TYPE_ITEM;
+	eth_spec = (const struct rte_flow_item_eth *)item->spec;
+	eth_mask = (const struct rte_flow_item_eth *)item->mask;
+	/**
+	 * Source MAC address must be masked.
+	 * Destination MAC address must be totally masked or not.
+	 */
+	if (eth_mask->src.addr_bytes[0] ||
+	    (eth_mask->dst.addr_bytes[0] != 0xFF &&
+	     eth_mask->dst.addr_bytes[0]))
+		return RTE_FLOW_ERROR_TYPE_ITEM;
+	for (j = 1; j < ETHER_ADDR_LEN; j++) {
+		if (eth_mask->src.addr_bytes[j] !=
+			eth_mask->src.addr_bytes[0] ||
+		    eth_mask->dst.addr_bytes[j] !=
+			 eth_mask->dst.addr_bytes[0])
+			return RTE_FLOW_ERROR_TYPE_ITEM;
+	}
+	if ((rte_be_to_cpu_32(eth_mask->type) & 0xFFFF) != 0xFFFF)
+		return RTE_FLOW_ERROR_TYPE_ITEM;
+
+	if (eth_mask->dst.addr_bytes[0]) {
+		filter->mac_addr = eth_spec->dst;
+		filter->flags |= RTE_ETHTYPE_FLAGS_MAC;
+	} else {
+		filter->flags &= ~RTE_ETHTYPE_FLAGS_MAC;
+	}
+	filter->ether_type = (uint16_t)rte_be_to_cpu_32(eth_spec->type);
+
+	/* check if the next not void item is END */
+	i++;
+	PATTERN_SKIP_VOID(filter, struct rte_eth_ethertype_filter,
+			  RTE_FLOW_ERROR_TYPE_ITEM);
+	if (item->type != RTE_FLOW_ITEM_TYPE_END) {
+		memset(filter, 0, sizeof(struct rte_eth_ethertype_filter));
+		return RTE_FLOW_ERROR_TYPE_ITEM;
+	}
+
+	/************************************************
+	 * parse action
+	 ************************************************/
+	i = 0;
+
+	/* check if the first not void action is QUEUE or DROP. */
+	ACTION_SKIP_VOID(filter, struct rte_eth_ethertype_filter,
+			 RTE_FLOW_ERROR_TYPE_ACTION_NUM);
+	if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE &&
+	    act->type != RTE_FLOW_ACTION_TYPE_DROP) {
+		memset(filter, 0, sizeof(struct rte_eth_ethertype_filter));
+		return RTE_FLOW_ERROR_TYPE_ACTION;
+	}
+
+	if (act->type == RTE_FLOW_ACTION_TYPE_QUEUE) {
+		act_q = (const struct rte_flow_action_queue *)act->conf;
+		filter->queue = act_q->index;
+	} else {
+		filter->flags |= RTE_ETHTYPE_FLAGS_DROP;
+	}
+
+	/* check if the next not void item is END */
+	i++;
+	ACTION_SKIP_VOID(filter, struct rte_eth_ethertype_filter,
+			 RTE_FLOW_ERROR_TYPE_ACTION);
+	if (act->type != RTE_FLOW_ACTION_TYPE_END) {
+		memset(filter, 0, sizeof(struct rte_eth_ethertype_filter));
+		return RTE_FLOW_ERROR_TYPE_ACTION;
+	}
+
+	/************************************************
+	 * parse attr
+	 ************************************************/
+	/* must be input direction */
+	if (!attr->ingress) {
+		memset(filter, 0, sizeof(struct rte_eth_ethertype_filter));
+		return RTE_FLOW_ERROR_TYPE_ATTR_INGRESS;
+	}
+
+	/* not supported */
+	if (attr->egress) {
+		memset(filter, 0, sizeof(struct rte_eth_ethertype_filter));
+		return RTE_FLOW_ERROR_TYPE_ATTR_EGRESS;
+	}
+
+	/* not supported */
+	if (attr->priority) {
+		memset(filter, 0, sizeof(struct rte_eth_ethertype_filter));
+		return RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY;
+	}
+
+	return 0;
+}
+static enum rte_flow_error_type
+ixgbe_parse_ethertype_filter(const struct rte_flow_attr *attr,
+			     const struct rte_flow_item pattern[],
+			     const struct rte_flow_action actions[],
+			     struct rte_eth_ethertype_filter *filter)
+{
+	int ret;
+
+	ret = cons_parse_ethertype_filter(attr, pattern, actions, filter);
+
+	if (ret)
+		return ret;
+
+	/* Ixgbe doesn't support MAC address. */
+	if (filter->flags & RTE_ETHTYPE_FLAGS_MAC) {
+		memset(filter, 0, sizeof(struct rte_eth_ethertype_filter));
+		return RTE_FLOW_ERROR_TYPE_ITEM;
+	}
+
+	return 0;
+}
+
+/**
  * Check if the flow rule is supported by ixgbe.
  * It only checkes the format. Don't guarantee the rule can be programmed into
  * the HW. Because there can be no enough room for the rule.
@@ -8318,12 +8471,17 @@ ixgbe_flow_rule_validate(__rte_unused struct rte_eth_dev *dev,
 {
 	int ret;
 	struct rte_eth_ntuple_filter ntuple_filter;
+	struct rte_eth_ethertype_filter ethertype_filter;
 
 	memset(&ntuple_filter, 0, sizeof(struct rte_eth_ntuple_filter));
 	ret = ixgbe_parse_ntuple_filter(attr, pattern, actions, &ntuple_filter);
 	if (!ret)
 		return RTE_FLOW_ERROR_TYPE_NONE;
-
+	memset(&ethertype_filter, 0, sizeof(struct rte_eth_ethertype_filter));
+	ret = ixgbe_parse_ethertype_filter(attr, pattern,
+						actions, &ethertype_filter);
+	if (!ret)
+		return RTE_FLOW_ERROR_TYPE_NONE;
 
 	return ret;
 }
-- 
2.5.5



More information about the dev mailing list