[dpdk-dev,v5] net/ixgbe: add flow parser ntuple support

Message ID 20180105061350.97658-1-wei.zhao1@intel.com (mailing list archive)
State Accepted, archived
Delegated to: Helin Zhang
Headers

Checks

Context Check Description
ci/checkpatch success coding style OK
ci/Intel-compilation success Compilation OK

Commit Message

Zhao1, Wei Jan. 5, 2018, 6:13 a.m. UTC
  Ixgbe ntuple filter in rte_flow need to support diversion data
with less than 5 tuple parameters.So add this new support in parser
code.This patch also add parser vlan pattern code in ntuple filter
in order to handle some case including vlan in flow API.

Signed-off-by: Wei Zhao <wei.zhao1@intel.com>

---

v2:
-fix coding style issue.

v3:
-add parser vlan pattern code.

v4:
-fix patch check issue.

v5:
-add more commit log message.
---
 drivers/net/ixgbe/ixgbe_flow.c | 135 +++++++++++++++++++++++++++++------------
 1 file changed, 95 insertions(+), 40 deletions(-)
  

Comments

Wenzhuo Lu Jan. 5, 2018, 7:57 a.m. UTC | #1
Hi,

> -----Original Message-----
> From: Zhao1, Wei
> Sent: Friday, January 5, 2018 2:14 PM
> To: dev@dpdk.org
> Cc: Lu, Wenzhuo <wenzhuo.lu@intel.com>; Zhao1, Wei
> <wei.zhao1@intel.com>
> Subject: [PATCH v5] net/ixgbe: add flow parser ntuple support
> 
> Ixgbe ntuple filter in rte_flow need to support diversion data with less than
> 5 tuple parameters.So add this new support in parser code.This patch also
> add parser vlan pattern code in ntuple filter in order to handle some case
> including vlan in flow API.
> 
> Signed-off-by: Wei Zhao <wei.zhao1@intel.com>
Acked-by: Wenzhuo Lu <wenzhuo.lu@intel.com>
  
Zhang, Helin Jan. 7, 2018, 8:53 a.m. UTC | #2
> -----Original Message-----
> From: dev [mailto:dev-bounces@dpdk.org] On Behalf Of Lu, Wenzhuo
> Sent: Friday, January 5, 2018 3:58 PM
> To: Zhao1, Wei; dev@dpdk.org
> Subject: Re: [dpdk-dev] [PATCH v5] net/ixgbe: add flow parser ntuple support
> 
> Hi,
> 
> > -----Original Message-----
> > From: Zhao1, Wei
> > Sent: Friday, January 5, 2018 2:14 PM
> > To: dev@dpdk.org
> > Cc: Lu, Wenzhuo <wenzhuo.lu@intel.com>; Zhao1, Wei
> > <wei.zhao1@intel.com>
> > Subject: [PATCH v5] net/ixgbe: add flow parser ntuple support
> >
> > Ixgbe ntuple filter in rte_flow need to support diversion data with
> > less than
> > 5 tuple parameters.So add this new support in parser code.This patch
> > also add parser vlan pattern code in ntuple filter in order to handle
> > some case including vlan in flow API.
> >
> > Signed-off-by: Wei Zhao <wei.zhao1@intel.com>
> Acked-by: Wenzhuo Lu <wenzhuo.lu@intel.com>
Applied into dpdk-next-net-intel. Thanks!

/Helin
  

Patch

diff --git a/drivers/net/ixgbe/ixgbe_flow.c b/drivers/net/ixgbe/ixgbe_flow.c
index 8f964cf..6af5ce1 100644
--- a/drivers/net/ixgbe/ixgbe_flow.c
+++ b/drivers/net/ixgbe/ixgbe_flow.c
@@ -214,6 +214,12 @@  cons_parse_ntuple_filter(const struct rte_flow_attr *attr,
 	const struct rte_flow_item_udp *udp_mask;
 	const struct rte_flow_item_sctp *sctp_spec;
 	const struct rte_flow_item_sctp *sctp_mask;
+	const struct rte_flow_item_eth *eth_spec;
+	const struct rte_flow_item_eth *eth_mask;
+	const struct rte_flow_item_vlan *vlan_spec;
+	const struct rte_flow_item_vlan *vlan_mask;
+	struct rte_flow_item_eth eth_null;
+	struct rte_flow_item_vlan vlan_null;
 
 	if (!pattern) {
 		rte_flow_error_set(error,
@@ -235,6 +241,9 @@  cons_parse_ntuple_filter(const struct rte_flow_attr *attr,
 		return -rte_errno;
 	}
 
+	memset(&eth_null, 0, sizeof(struct rte_flow_item_eth));
+	memset(&vlan_null, 0, sizeof(struct rte_flow_item_vlan));
+
 #ifdef RTE_LIBRTE_SECURITY
 	/**
 	 *  Special case for flow action type RTE_FLOW_ACTION_TYPE_SECURITY
@@ -284,6 +293,8 @@  cons_parse_ntuple_filter(const struct rte_flow_attr *attr,
 	}
 	/* Skip Ethernet */
 	if (item->type == RTE_FLOW_ITEM_TYPE_ETH) {
+		eth_spec = (const struct rte_flow_item_eth *)item->spec;
+		eth_mask = (const struct rte_flow_item_eth *)item->mask;
 		/*Not supported last point for range*/
 		if (item->last) {
 			rte_flow_error_set(error,
@@ -294,15 +305,20 @@  cons_parse_ntuple_filter(const struct rte_flow_attr *attr,
 
 		}
 		/* if the first item is MAC, the content should be NULL */
-		if (item->spec || item->mask) {
+		if ((item->spec || item->mask) &&
+			(memcmp(eth_spec, &eth_null,
+				sizeof(struct rte_flow_item_eth)) ||
+			 memcmp(eth_mask, &eth_null,
+				sizeof(struct rte_flow_item_eth)))) {
 			rte_flow_error_set(error, EINVAL,
 				RTE_FLOW_ERROR_TYPE_ITEM,
 				item, "Not supported by ntuple filter");
 			return -rte_errno;
 		}
-		/* check if the next not void item is IPv4 */
+		/* check if the next not void item is IPv4 or Vlan */
 		item = next_no_void_pattern(pattern, item);
-		if (item->type != RTE_FLOW_ITEM_TYPE_IPV4) {
+		if (item->type != RTE_FLOW_ITEM_TYPE_IPV4 &&
+			item->type != RTE_FLOW_ITEM_TYPE_VLAN) {
 			rte_flow_error_set(error,
 			  EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
 			  item, "Not supported by ntuple filter");
@@ -310,48 +326,82 @@  cons_parse_ntuple_filter(const struct rte_flow_attr *attr,
 		}
 	}
 
-	/* get the IPv4 info */
-	if (!item->spec || !item->mask) {
-		rte_flow_error_set(error, EINVAL,
-			RTE_FLOW_ERROR_TYPE_ITEM,
-			item, "Invalid ntuple mask");
-		return -rte_errno;
-	}
-	/*Not supported last point for range*/
-	if (item->last) {
-		rte_flow_error_set(error, EINVAL,
-			RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
-			item, "Not supported last point for range");
-		return -rte_errno;
+	if (item->type == RTE_FLOW_ITEM_TYPE_VLAN) {
+		vlan_spec = (const struct rte_flow_item_vlan *)item->spec;
+		vlan_mask = (const struct rte_flow_item_vlan *)item->mask;
+		/*Not supported last point for range*/
+		if (item->last) {
+			rte_flow_error_set(error,
+			  EINVAL,
+			  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+			  item, "Not supported last point for range");
+			return -rte_errno;
+		}
+		/* the content should be NULL */
+		if ((item->spec || item->mask) &&
+			(memcmp(vlan_spec, &vlan_null,
+				sizeof(struct rte_flow_item_vlan)) ||
+			 memcmp(vlan_mask, &vlan_null,
+				sizeof(struct rte_flow_item_vlan)))) {
 
+			rte_flow_error_set(error, EINVAL,
+				RTE_FLOW_ERROR_TYPE_ITEM,
+				item, "Not supported by ntuple filter");
+			return -rte_errno;
+		}
+		/* check if the next not void item is IPv4 */
+		item = next_no_void_pattern(pattern, item);
+		if (item->type != RTE_FLOW_ITEM_TYPE_IPV4) {
+			rte_flow_error_set(error,
+			  EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
+			  item, "Not supported by ntuple filter");
+			return -rte_errno;
+		}
 	}
 
-	ipv4_mask = (const struct rte_flow_item_ipv4 *)item->mask;
-	/**
-	 * Only support src & dst addresses, protocol,
-	 * others should be masked.
-	 */
-	if (ipv4_mask->hdr.version_ihl ||
-	    ipv4_mask->hdr.type_of_service ||
-	    ipv4_mask->hdr.total_length ||
-	    ipv4_mask->hdr.packet_id ||
-	    ipv4_mask->hdr.fragment_offset ||
-	    ipv4_mask->hdr.time_to_live ||
-	    ipv4_mask->hdr.hdr_checksum) {
+	if (item->mask) {
+		/* get the IPv4 info */
+		if (!item->spec || !item->mask) {
+			rte_flow_error_set(error, EINVAL,
+				RTE_FLOW_ERROR_TYPE_ITEM,
+				item, "Invalid ntuple mask");
+			return -rte_errno;
+		}
+		/*Not supported last point for range*/
+		if (item->last) {
+			rte_flow_error_set(error, EINVAL,
+				RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+				item, "Not supported last point for range");
+			return -rte_errno;
+		}
+
+		ipv4_mask = (const struct rte_flow_item_ipv4 *)item->mask;
+		/**
+		 * Only support src & dst addresses, protocol,
+		 * others should be masked.
+		 */
+		if (ipv4_mask->hdr.version_ihl ||
+		    ipv4_mask->hdr.type_of_service ||
+		    ipv4_mask->hdr.total_length ||
+		    ipv4_mask->hdr.packet_id ||
+		    ipv4_mask->hdr.fragment_offset ||
+		    ipv4_mask->hdr.time_to_live ||
+		    ipv4_mask->hdr.hdr_checksum) {
 			rte_flow_error_set(error,
-			EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
-			item, "Not supported by ntuple filter");
-		return -rte_errno;
-	}
+				EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
+				item, "Not supported by ntuple filter");
+			return -rte_errno;
+		}
 
-	filter->dst_ip_mask = ipv4_mask->hdr.dst_addr;
-	filter->src_ip_mask = ipv4_mask->hdr.src_addr;
-	filter->proto_mask  = ipv4_mask->hdr.next_proto_id;
+		filter->dst_ip_mask = ipv4_mask->hdr.dst_addr;
+		filter->src_ip_mask = ipv4_mask->hdr.src_addr;
+		filter->proto_mask  = ipv4_mask->hdr.next_proto_id;
 
-	ipv4_spec = (const struct rte_flow_item_ipv4 *)item->spec;
-	filter->dst_ip = ipv4_spec->hdr.dst_addr;
-	filter->src_ip = ipv4_spec->hdr.src_addr;
-	filter->proto  = ipv4_spec->hdr.next_proto_id;
+		ipv4_spec = (const struct rte_flow_item_ipv4 *)item->spec;
+		filter->dst_ip = ipv4_spec->hdr.dst_addr;
+		filter->src_ip = ipv4_spec->hdr.src_addr;
+		filter->proto  = ipv4_spec->hdr.next_proto_id;
+	}
 
 	/* check if the next not void item is TCP or UDP */
 	item = next_no_void_pattern(pattern, item);
@@ -366,8 +416,13 @@  cons_parse_ntuple_filter(const struct rte_flow_attr *attr,
 		return -rte_errno;
 	}
 
-	/* get the TCP/UDP info */
 	if ((item->type != RTE_FLOW_ITEM_TYPE_END) &&
+		(!item->spec && !item->mask)) {
+		goto action;
+	}
+
+	/* get the TCP/UDP/SCTP info */
+	if (item->type != RTE_FLOW_ITEM_TYPE_END &&
 		(!item->spec || !item->mask)) {
 		memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
 		rte_flow_error_set(error, EINVAL,