[dpdk-dev] [PATCH 15/24] net/i40e: parse flow director filter

Beilei Xing beilei.xing at intel.com
Fri Dec 2 12:53:36 CET 2016


Check if the rule is a flow director rule, and get the
flow director info.

Signed-off-by: Beilei Xing <beilei.xing at intel.com>
---
 drivers/net/i40e/i40e_ethdev.c | 537 +++++++++++++++++++++++++++++++++++++++++
 1 file changed, 537 insertions(+)

diff --git a/drivers/net/i40e/i40e_ethdev.c b/drivers/net/i40e/i40e_ethdev.c
index 1ffafa0..12255fa 100644
--- a/drivers/net/i40e/i40e_ethdev.c
+++ b/drivers/net/i40e/i40e_ethdev.c
@@ -502,6 +502,11 @@ static int i40e_parse_tunnel_filter(const struct rte_flow_attr *attr,
 				    const struct rte_flow_action *actions,
 				    struct rte_eth_tunnel_filter_conf *filter,
 				    struct rte_flow_error *error);
+static int i40e_parse_fdir_filter(const struct rte_flow_attr *attr,
+				  const struct rte_flow_item *pattern,
+				  const struct rte_flow_action *actions,
+				  struct rte_eth_fdir_filter *filter,
+				  struct rte_flow_error *error);
 static int i40e_flow_validate(__rte_unused struct rte_eth_dev *dev,
 			      const struct rte_flow_attr *attr,
 			      const struct rte_flow_item *pattern,
@@ -11006,6 +11011,532 @@ i40e_parse_tunnel_filter(const struct rte_flow_attr *attr,
 	return ret;
 }
 
+/**
+ * Parse the rule to see if it is a flow firector rule.
+ * And get the flow director filter info.
+ */
+static int
+i40e_parse_fdir_filter(const struct rte_flow_attr *attr,
+		       const struct rte_flow_item *pattern,
+		       const struct rte_flow_action *actions,
+		       struct rte_eth_fdir_filter *filter,
+		       struct rte_flow_error *error)
+{
+	const struct rte_flow_item *item;
+	const struct rte_flow_action *act;
+	const struct rte_flow_item_eth *eth_spec, *eth_mask;
+	const struct rte_flow_item_vlan *vlan_spec, *vlan_mask;
+	const struct rte_flow_item_ipv4 *ipv4_spec, *ipv4_mask;
+	const struct rte_flow_item_ipv6 *ipv6_spec, *ipv6_mask;
+	const struct rte_flow_item_tcp *tcp_spec, *tcp_mask;
+	const struct rte_flow_item_udp *udp_spec, *udp_mask;
+	const struct rte_flow_item_sctp *sctp_spec, *sctp_mask;
+	const struct rte_flow_item_vf *vf_spec;
+	const struct rte_flow_action_mark *mark_spec;
+	const struct rte_flow_action_queue *act_q;
+	struct ether_addr macaddr_masked = {
+		.addr_bytes = {0x0, 0x0, 0x0, 0x0, 0x0, 0x0}
+	};
+	uint32_t i;
+	uint32_t flow_type = RTE_ETH_FLOW_UNKNOWN;
+	enum i40e_filter_pctype pctype;
+	uint64_t input_set = I40E_INSET_NONE;
+	uint16_t l3 = 0;
+	uint16_t flag_offset;
+
+	/* parse pattern */
+	i = 0;
+
+	/* The first not void item should be ETH or IPv4 or IPv6 */
+	PATTERN_SKIP_VOID(filter, struct rte_eth_fdir_filter,
+			  RTE_FLOW_ERROR_TYPE_ITEM_NUM);
+	if (item->type != RTE_FLOW_ITEM_TYPE_ETH &&
+	    item->type != RTE_FLOW_ITEM_TYPE_VLAN &&
+	    item->type != RTE_FLOW_ITEM_TYPE_IPV4 &&
+	    item->type != RTE_FLOW_ITEM_TYPE_IPV6) {
+		error->type = RTE_FLOW_ERROR_TYPE_ITEM;
+		return -EINVAL;
+	}
+
+	/* Check if the not void item is ETH. */
+	if (item->type == RTE_FLOW_ITEM_TYPE_ETH) {
+		eth_spec = (const struct rte_flow_item_eth *)item->spec;
+		eth_mask = (const struct rte_flow_item_eth *)item->mask;
+
+		if ((!eth_spec && eth_mask) || (eth_spec && !eth_mask)) {
+			error->type = RTE_FLOW_ERROR_TYPE_ITEM;
+			return -EINVAL;
+		}
+
+		if (eth_spec) {
+			filter->input.flow.l2_flow.ether_type =
+				(uint16_t)eth_spec->type;
+		}
+
+		if (eth_mask) {
+			/* SRC address and DST address should be masked. */
+			if (!is_same_ether_addr(&eth_mask->src,
+						&macaddr_masked) ||
+			    !is_same_ether_addr(&eth_mask->dst,
+						&macaddr_masked)) {
+				error->type = RTE_FLOW_ERROR_TYPE_ITEM;
+				return -EINVAL;
+			}
+
+			if (eth_mask->type && eth_mask->type != 0xFFFF) {
+				error->type = RTE_FLOW_ERROR_TYPE_ITEM;
+				return -EINVAL;
+			}
+
+			flow_type = RTE_ETH_FLOW_L2_PAYLOAD;
+			input_set |= I40E_INSET_LAST_ETHER_TYPE;
+		}
+
+		i++;
+		PATTERN_SKIP_VOID(filter, struct rte_eth_fdir_filter,
+				  RTE_FLOW_ERROR_TYPE_ITEM);
+		if (item->type != RTE_FLOW_ITEM_TYPE_VLAN &&
+		    item->type != RTE_FLOW_ITEM_TYPE_IPV4 &&
+		    item->type != RTE_FLOW_ITEM_TYPE_IPV6 &&
+		    item->type != RTE_FLOW_ITEM_TYPE_VF &&
+		    item->type != RTE_FLOW_ITEM_TYPE_END) {
+			error->type = RTE_FLOW_ERROR_TYPE_ITEM;
+			return -EINVAL;
+		}
+	}
+
+	/* Check if the not void item is VLAN. */
+	if (item->type == RTE_FLOW_ITEM_TYPE_VLAN) {
+		vlan_spec = (const struct rte_flow_item_vlan *)item->spec;
+		vlan_mask = (const struct rte_flow_item_vlan *)item->mask;
+
+		if (i == 1) {
+			if ((vlan_spec && !vlan_mask) ||
+			    (!vlan_spec && vlan_mask)) {
+				error->type = RTE_FLOW_ERROR_TYPE_ITEM;
+				return -EINVAL;
+			}
+		} else if (i == 2) {
+			if (!vlan_spec || !vlan_mask) {
+				error->type = RTE_FLOW_ERROR_TYPE_ITEM;
+				return -EINVAL;
+			}
+		}
+
+		if (vlan_spec && vlan_mask) {
+			filter->input.flow_ext.vlan_tci =
+				rte_be_to_cpu_16(vlan_spec->tci) & 0x0FFF;
+			if (vlan_mask->tpid ||
+			    (vlan_mask->tci && vlan_mask->tci !=
+			     rte_cpu_to_be_16(0x0FFF))) {
+				error->type = RTE_FLOW_ERROR_TYPE_ITEM;
+				return -EINVAL;
+			}
+
+			input_set |= I40E_INSET_VLAN_INNER;
+		}
+
+		/* Check if the next not void item is IPV4 or IPV6. */
+		i++;
+		PATTERN_SKIP_VOID(filter, struct rte_eth_fdir_filter,
+				  RTE_FLOW_ERROR_TYPE_ITEM);
+		if (item->type != RTE_FLOW_ITEM_TYPE_IPV4 &&
+		    item->type != RTE_FLOW_ITEM_TYPE_IPV6) {
+			error->type = RTE_FLOW_ERROR_TYPE_ITEM;
+			return -EINVAL;
+		}
+	}
+
+	/* Check if the not void item is IPV4. */
+	if (item->type == RTE_FLOW_ITEM_TYPE_IPV4) {
+		l3 = RTE_FLOW_ITEM_TYPE_IPV4;
+		ipv4_spec = (const struct rte_flow_item_ipv4 *)item->spec;
+		ipv4_mask = (const struct rte_flow_item_ipv4 *)item->mask;
+
+		/* Check if it is fragment. */
+		flag_offset = rte_be_to_cpu_16(ipv4_spec->hdr.fragment_offset);
+		if (flag_offset & !IPV4_HDR_MF_FLAG)
+			flow_type = RTE_ETH_FLOW_NONFRAG_IPV4_OTHER;
+		if (flag_offset & IPV4_HDR_OFFSET_MASK ||
+		    flag_offset & IPV4_HDR_MF_FLAG)
+			flow_type = RTE_ETH_FLOW_FRAG_IPV4;
+
+		if (ipv4_spec) {
+			filter->input.flow.ip4_flow.proto =
+				ipv4_spec->hdr.next_proto_id;
+			filter->input.flow.ip4_flow.tos =
+				ipv4_spec->hdr.type_of_service;
+			filter->input.flow.ip4_flow.ttl =
+				ipv4_spec->hdr.time_to_live;
+			filter->input.flow.ip4_flow.src_ip =
+				ipv4_spec->hdr.src_addr;
+			filter->input.flow.ip4_flow.dst_ip =
+				ipv4_spec->hdr.dst_addr;
+		}
+
+		if (ipv4_mask) {
+			if (ipv4_mask->hdr.version_ihl ||
+			    ipv4_mask->hdr.total_length ||
+			    ipv4_mask->hdr.packet_id ||
+			    ipv4_mask->hdr.fragment_offset ||
+			    ipv4_mask->hdr.hdr_checksum) {
+				error->type = RTE_FLOW_ERROR_TYPE_ITEM;
+				return -EINVAL;
+			}
+
+			if (ipv4_mask->hdr.src_addr == 0xFFFFFFFF)
+				input_set |= I40E_INSET_IPV4_SRC;
+			if (ipv4_mask->hdr.dst_addr == 0xFFFFFFFF)
+				input_set |= I40E_INSET_IPV4_DST;
+			if (ipv4_mask->hdr.type_of_service == 0xFF)
+				input_set |= I40E_INSET_IPV4_TOS;
+			if (ipv4_mask->hdr.time_to_live == 0xFF)
+				input_set |= I40E_INSET_IPV4_TTL;
+			if (ipv4_mask->hdr.next_proto_id == 0xFF)
+				input_set |= I40E_INSET_IPV4_PROTO;
+		}
+	}
+
+	/* Check if the not void item is IPV6. */
+	if (item->type == RTE_FLOW_ITEM_TYPE_IPV6) {
+		l3 = RTE_FLOW_ITEM_TYPE_IPV6;
+		ipv6_spec = (const struct rte_flow_item_ipv6 *)item->spec;
+		ipv6_mask = (const struct rte_flow_item_ipv6 *)item->mask;
+
+		/* Check if it is fragment. */
+		if (ipv6_spec->hdr.proto == 44)
+			flow_type = RTE_ETH_FLOW_FRAG_IPV6;
+		else
+			flow_type = RTE_ETH_FLOW_NONFRAG_IPV6_OTHER;
+
+		uint32_t j;
+
+		if (ipv6_spec) {
+			filter->input.flow.ipv6_flow.tc =
+				(uint8_t)(ipv6_spec->hdr.vtc_flow << 4);
+			filter->input.flow.ipv6_flow.proto =
+				ipv6_spec->hdr.proto;
+			filter->input.flow.ipv6_flow.hop_limits =
+				ipv6_spec->hdr.hop_limits;
+
+			rte_memcpy(filter->input.flow.ipv6_flow.src_ip,
+				   ipv6_spec->hdr.src_addr, 16);
+			rte_memcpy(filter->input.flow.ipv6_flow.dst_ip,
+				   ipv6_spec->hdr.dst_addr, 16);
+		}
+
+		if (ipv6_mask) {
+			if (ipv6_mask->hdr.payload_len) {
+				error->type = RTE_FLOW_ERROR_TYPE_ITEM;
+				return -EINVAL;
+			}
+
+			for (j = 0; j < RTE_DIM(ipv6_mask->hdr.src_addr); j++) {
+				if (ipv6_mask->hdr.src_addr[j] != 0xFF ||
+				    ipv6_mask->hdr.dst_addr[j] != 0xFF) {
+					error->type = RTE_FLOW_ERROR_TYPE_ITEM;
+					return -EINVAL;
+				}
+			}
+
+			input_set |= I40E_INSET_IPV6_SRC;
+			input_set |= I40E_INSET_IPV6_DST;
+
+			if ((ipv6_mask->hdr.vtc_flow & rte_cpu_to_be_16(0xFF0))
+			    == rte_cpu_to_be_16(0xFF0))
+				input_set |= I40E_INSET_IPV6_TC;
+			if (ipv6_mask->hdr.proto == 0xFF)
+				input_set |= I40E_INSET_IPV6_NEXT_HDR;
+			if (ipv6_mask->hdr.hop_limits == 0xFF)
+				input_set |= I40E_INSET_IPV6_HOP_LIMIT;
+		}
+	}
+
+	if (item->type == RTE_FLOW_ITEM_TYPE_IPV4 ||
+	    item->type == RTE_FLOW_ITEM_TYPE_IPV6) {
+		/* Check the next not void item */
+		i++;
+		PATTERN_SKIP_VOID(filter, struct rte_eth_fdir_filter,
+				  RTE_FLOW_ERROR_TYPE_ITEM);
+		if ((flow_type == RTE_ETH_FLOW_FRAG_IPV4) ||
+		    (flow_type == RTE_ETH_FLOW_FRAG_IPV6)) {
+			if (item->type != RTE_FLOW_ITEM_TYPE_END) {
+				error->type = RTE_FLOW_ERROR_TYPE_ITEM;
+				return -EINVAL;
+			}
+		} else {
+			if (item->type != RTE_FLOW_ITEM_TYPE_TCP &&
+			    item->type != RTE_FLOW_ITEM_TYPE_UDP &&
+			    item->type != RTE_FLOW_ITEM_TYPE_SCTP &&
+			    item->type != RTE_FLOW_ITEM_TYPE_VF &&
+			    item->type != RTE_FLOW_ITEM_TYPE_END) {
+				error->type = RTE_FLOW_ERROR_TYPE_ITEM;
+				return -EINVAL;
+			}
+		}
+	}
+
+	/* Check if the next not void item is TCP. */
+	if (item->type == RTE_FLOW_ITEM_TYPE_TCP) {
+		tcp_spec = (const struct rte_flow_item_tcp *)item->spec;
+		tcp_mask = (const struct rte_flow_item_tcp *)item->mask;
+
+		if (!tcp_spec || !tcp_mask) {
+			error->type = RTE_FLOW_ERROR_TYPE_ITEM;
+			return -EINVAL;
+		}
+
+		if (l3 == RTE_FLOW_ITEM_TYPE_IPV4)
+			flow_type = RTE_ETH_FLOW_NONFRAG_IPV4_TCP;
+		else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6)
+			flow_type = RTE_ETH_FLOW_NONFRAG_IPV6_TCP;
+
+		if (tcp_spec) {
+			if (l3 == RTE_FLOW_ITEM_TYPE_IPV4) {
+				filter->input.flow.tcp4_flow.src_port =
+					tcp_spec->hdr.src_port;
+				filter->input.flow.tcp4_flow.dst_port =
+					tcp_spec->hdr.dst_port;
+			} else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6) {
+				filter->input.flow.tcp6_flow.src_port =
+					tcp_spec->hdr.src_port;
+				filter->input.flow.tcp6_flow.dst_port =
+					tcp_spec->hdr.dst_port;
+			}
+		}
+
+		if (tcp_mask) {
+			if (tcp_mask->hdr.sent_seq ||
+			    tcp_mask->hdr.recv_ack ||
+			    tcp_mask->hdr.data_off ||
+			    tcp_mask->hdr.tcp_flags ||
+			    tcp_mask->hdr.rx_win ||
+			    tcp_mask->hdr.cksum ||
+			    tcp_mask->hdr.tcp_urp) {
+				error->type = RTE_FLOW_ERROR_TYPE_ITEM;
+				return -EINVAL;
+			}
+
+			if ((tcp_mask->hdr.src_port != 0xFFFF &&
+			     tcp_mask->hdr.src_port != 0) ||
+			    (tcp_mask->hdr.dst_port != 0xFFFF &&
+			     tcp_mask->hdr.dst_port != 0)) {
+				error->type = RTE_FLOW_ERROR_TYPE_ITEM;
+				return -EINVAL;
+			}
+
+			if (tcp_mask->hdr.src_port == 0xFFFF)
+				input_set |= I40E_INSET_SRC_PORT;
+			if (tcp_mask->hdr.dst_port == 0xFFFF)
+				input_set |= I40E_INSET_DST_PORT;
+		}
+	}
+
+	/* Check if the not void item is UDP. */
+	if (item->type == RTE_FLOW_ITEM_TYPE_UDP) {
+		udp_spec = (const struct rte_flow_item_udp *)item->spec;
+		udp_mask = (const struct rte_flow_item_udp *)item->mask;
+
+		if (!udp_spec || !udp_mask) {
+			error->type = RTE_FLOW_ERROR_TYPE_ITEM;
+			return -EINVAL;
+		}
+
+		if (l3 == RTE_FLOW_ITEM_TYPE_IPV4)
+			flow_type = RTE_ETH_FLOW_NONFRAG_IPV4_UDP;
+		else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6)
+			flow_type = RTE_ETH_FLOW_NONFRAG_IPV6_UDP;
+
+		if (udp_spec) {
+			if (l3 == RTE_FLOW_ITEM_TYPE_IPV4) {
+				filter->input.flow.udp4_flow.src_port =
+					udp_spec->hdr.src_port;
+				filter->input.flow.udp4_flow.dst_port =
+					udp_spec->hdr.dst_port;
+			} else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6) {
+				filter->input.flow.udp6_flow.src_port =
+					udp_spec->hdr.src_port;
+				filter->input.flow.udp6_flow.dst_port =
+					udp_spec->hdr.dst_port;
+			}
+		}
+
+		if (udp_mask) {
+			if (udp_mask->hdr.dgram_len ||
+			    udp_mask->hdr.dgram_cksum) {
+				error->type = RTE_FLOW_ERROR_TYPE_ITEM;
+				return -EINVAL;
+			}
+
+			if ((udp_mask->hdr.src_port != 0xFFFF &&
+			     udp_mask->hdr.src_port != 0) ||
+			    (udp_mask->hdr.dst_port != 0xFFFF &&
+			     udp_mask->hdr.dst_port != 0)) {
+				error->type = RTE_FLOW_ERROR_TYPE_ITEM;
+				return -EINVAL;
+			}
+
+			if (udp_mask->hdr.src_port == 0xFFFF)
+				input_set |= I40E_INSET_SRC_PORT;
+			if (udp_mask->hdr.dst_port == 0xFFFF)
+				input_set |= I40E_INSET_DST_PORT;
+		}
+	}
+
+	/* Check if the not void item is SCTP. */
+	if (item->type == RTE_FLOW_ITEM_TYPE_SCTP) {
+		sctp_spec = (const struct rte_flow_item_sctp *)item->spec;
+		sctp_mask = (const struct rte_flow_item_sctp *)item->mask;
+
+		if (!sctp_spec || !sctp_mask) {
+			error->type = RTE_FLOW_ERROR_TYPE_ITEM;
+			return -EINVAL;
+		}
+
+		if (l3 == RTE_FLOW_ITEM_TYPE_IPV4)
+			flow_type = RTE_ETH_FLOW_NONFRAG_IPV4_SCTP;
+		else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6)
+			flow_type = RTE_ETH_FLOW_NONFRAG_IPV6_SCTP;
+
+		if (sctp_spec) {
+			if (l3 == RTE_FLOW_ITEM_TYPE_IPV4) {
+				filter->input.flow.sctp4_flow.src_port =
+					sctp_spec->hdr.src_port;
+				filter->input.flow.sctp4_flow.dst_port =
+					sctp_spec->hdr.dst_port;
+				filter->input.flow.sctp4_flow.verify_tag =
+					sctp_spec->hdr.tag;
+			} else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6) {
+				filter->input.flow.sctp6_flow.src_port =
+					sctp_spec->hdr.src_port;
+				filter->input.flow.sctp6_flow.dst_port =
+					sctp_spec->hdr.dst_port;
+				filter->input.flow.sctp6_flow.verify_tag =
+					sctp_spec->hdr.tag;
+			}
+		}
+
+		if (sctp_mask) {
+			if (sctp_mask->hdr.cksum) {
+				error->type = RTE_FLOW_ERROR_TYPE_ITEM;
+				return -EINVAL;
+			}
+
+			if ((sctp_mask->hdr.src_port != 0xFFFF &&
+			     sctp_mask->hdr.src_port != 0) ||
+			    (sctp_mask->hdr.dst_port != 0xFFFF &&
+			     sctp_mask->hdr.dst_port != 0) ||
+			    (sctp_mask->hdr.tag != 0xFFFFFFFF &&
+			     sctp_mask->hdr.tag != 0)) {
+				error->type = RTE_FLOW_ERROR_TYPE_ITEM;
+				return -EINVAL;
+			}
+
+			if (sctp_mask->hdr.src_port == 0xFFFF)
+				input_set |= I40E_INSET_SRC_PORT;
+			if (sctp_mask->hdr.dst_port == 0xFFFF)
+				input_set |= I40E_INSET_DST_PORT;
+			if (sctp_mask->hdr.tag == 0xFFFFFFFF)
+				input_set |= I40E_INSET_SCTP_VT;
+		}
+	}
+
+	if (item->type == RTE_FLOW_ITEM_TYPE_TCP ||
+	    item->type == RTE_FLOW_ITEM_TYPE_UDP ||
+	    item->type == RTE_FLOW_ITEM_TYPE_SCTP) {
+		i++;
+		PATTERN_SKIP_VOID(filter, struct rte_eth_fdir_filter,
+				  RTE_FLOW_ERROR_TYPE_ITEM);
+		if (item->type != RTE_FLOW_ITEM_TYPE_VF &&
+		    item->type != RTE_FLOW_ITEM_TYPE_END) {
+			error->type = RTE_FLOW_ERROR_TYPE_ITEM;
+			return -EINVAL;
+		}
+	}
+
+	/* Check if the next not void item is VF. */
+	if (item->type == RTE_FLOW_ITEM_TYPE_VF) {
+		vf_spec = (const struct rte_flow_item_vf *)item->spec;
+		filter->input.flow_ext.is_vf = 1;
+		filter->input.flow_ext.dst_id = vf_spec->id;
+	}
+
+	if (item->type != RTE_FLOW_ITEM_TYPE_END) {
+		/* Check if the next not void item is END. */
+		i++;
+		PATTERN_SKIP_VOID(filter, struct rte_eth_fdir_filter,
+				  RTE_FLOW_ERROR_TYPE_ITEM);
+		if (item->type != RTE_FLOW_ITEM_TYPE_END) {
+			error->type = RTE_FLOW_ERROR_TYPE_ITEM;
+			return -EINVAL;
+		}
+	}
+
+	pctype = i40e_flowtype_to_pctype(flow_type);
+	if (pctype == 0 || pctype > I40E_FILTER_PCTYPE_L2_PAYLOAD) {
+		PMD_DRV_LOG(ERR, "Not supported flow type (%u)",
+			    conf->flow_type);
+		error->type = RTE_FLOW_ERROR_TYPE_ITEM;
+		return -EINVAL;
+	}
+
+	if (input_set != i40e_get_default_input_set(pctype)) {
+		PMD_DRV_LOG(ERR, "Invalid input set");
+		error->type = RTE_FLOW_ERROR_TYPE_ITEM;
+		return -EINVAL;
+	}
+
+	filter->input.flow_type = flow_type;
+
+	/* parse action */
+	i = 0;
+
+	/* Check if the first not void action is QUEUE or DROP. */
+	ACTION_SKIP_VOID(filter, struct rte_eth_fdir_filter,
+			 RTE_FLOW_ERROR_TYPE_ACTION_NUM);
+	if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE &&
+	    act->type != RTE_FLOW_ACTION_TYPE_DROP) {
+		error->type = RTE_FLOW_ERROR_TYPE_ACTION;
+		return -EINVAL;
+	}
+
+	act_q = (const struct rte_flow_action_queue *)act->conf;
+	filter->action.flex_off = 0;
+	if (act->type == RTE_FLOW_ACTION_TYPE_QUEUE)
+		filter->action.behavior = RTE_ETH_FDIR_ACCEPT;
+	else
+		filter->action.behavior = RTE_ETH_FDIR_REJECT;
+
+	filter->action.report_status = RTE_ETH_FDIR_REPORT_ID;
+	filter->action.rx_queue = act_q->index;
+
+	/* Check if the next not void item is MARK or END. */
+	i++;
+	ACTION_SKIP_VOID(filter, struct rte_eth_fdir_filter,
+			 RTE_FLOW_ERROR_TYPE_ACTION_NUM);
+	if (act->type != RTE_FLOW_ACTION_TYPE_MARK &&
+	    act->type != RTE_FLOW_ACTION_TYPE_END) {
+		error->type = RTE_FLOW_ERROR_TYPE_ACTION;
+		return -EINVAL;
+	}
+
+	if (act->type == RTE_FLOW_ACTION_TYPE_MARK) {
+		mark_spec = (const struct rte_flow_action_mark *)act->conf;
+		filter->soft_id = mark_spec->id;
+
+		/* Check if the next not void item is END. */
+		i++;
+		ACTION_SKIP_VOID(filter, struct rte_eth_fdir_filter,
+				 RTE_FLOW_ERROR_TYPE_ACTION_NUM);
+		if (act->type != RTE_FLOW_ACTION_TYPE_END) {
+			error->type = RTE_FLOW_ERROR_TYPE_ACTION;
+			return -EINVAL;
+		}
+	}
+
+	return i40e_parse_attr(attr, error);
+}
+
 static int
 i40e_flow_validate(__rte_unused struct rte_eth_dev *dev,
 		   const struct rte_flow_attr *attr,
@@ -11014,6 +11545,7 @@ i40e_flow_validate(__rte_unused struct rte_eth_dev *dev,
 		   struct rte_flow_error *error)
 {
 	struct rte_eth_ethertype_filter ethertype_filter;
+	struct rte_eth_fdir_filter fdir_filter;
 	struct rte_eth_mac_filter macvlan_filter;
 	struct rte_eth_tunnel_filter_conf tunnel_filter;
 	int ret;
@@ -11023,6 +11555,11 @@ i40e_flow_validate(__rte_unused struct rte_eth_dev *dev,
 	if (!ret)
 		return 0;
 
+	ret = i40e_parse_fdir_filter(attr, pattern, actions,
+				     &fdir_filter, error);
+	if (!ret)
+		return 0;
+
 	ret = i40e_parse_macvlan_filter(attr, pattern, actions,
 					&macvlan_filter, error);
 	if (!ret)
-- 
2.5.5



More information about the dev mailing list