[dpdk-stable] patch 'net/ice: add input set byte number check' has been queued to stable release 19.11.4

luca.boccassi at gmail.com luca.boccassi at gmail.com
Fri Jul 24 13:59:14 CEST 2020


Hi,

FYI, your patch has been queued to stable release 19.11.4

Note it hasn't been pushed to http://dpdk.org/browse/dpdk-stable yet.
It will be pushed if I get no objections before 07/26/20. So please
shout if anyone has objections.

Also note that after the patch there's a diff of the upstream commit vs the
patch applied to the branch. This will indicate if there was any rebasing
needed to apply to the stable branch. If there were code changes for rebasing
(ie: not only metadata diffs), please double check that the rebase was
correctly done.

Thanks.

Luca Boccassi

---
>From 21637a6e327be94e6b741394ac5a50bc24a63341 Mon Sep 17 00:00:00 2001
From: Wei Zhao <wei.zhao1 at intel.com>
Date: Fri, 3 Jul 2020 14:19:37 +0800
Subject: [PATCH] net/ice: add input set byte number check

[ upstream commit 8de93729837d265de5b34cd318c9249720badfc8 ]

This patch add the total input set byte number check,
as there is a hardware requirement for the total number
of 32 byte.

Fixes: 47d460d63233 ("net/ice: rework switch filter")

Signed-off-by: Wei Zhao <wei.zhao1 at intel.com>
Acked-by: Qi Zhang <qi.z.zhang at intel.com>
---
 drivers/net/ice/ice_switch_filter.c | 37 +++++++++++++++++++++++++++--
 1 file changed, 35 insertions(+), 2 deletions(-)

diff --git a/drivers/net/ice/ice_switch_filter.c b/drivers/net/ice/ice_switch_filter.c
index b00ac2abe..c2f5aa78d 100644
--- a/drivers/net/ice/ice_switch_filter.c
+++ b/drivers/net/ice/ice_switch_filter.c
@@ -25,7 +25,8 @@
 #include "ice_generic_flow.h"
 
 
-#define MAX_QGRP_NUM_TYPE 7
+#define MAX_QGRP_NUM_TYPE	7
+#define MAX_INPUT_SET_BYTE	32
 
 #define ICE_SW_INSET_ETHER ( \
 	ICE_INSET_DMAC | ICE_INSET_SMAC | ICE_INSET_ETHERTYPE)
@@ -320,6 +321,7 @@ ice_switch_inset_get(const struct rte_flow_item pattern[],
 		0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
 		0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF};
 	uint64_t input_set = ICE_INSET_NONE;
+	uint16_t input_set_byte = 0;
 	uint16_t j, t = 0;
 	uint16_t tunnel_valid = 0;
 
@@ -369,6 +371,7 @@ ice_switch_inset_get(const struct rte_flow_item pattern[],
 						m->src_addr[j] =
 						eth_mask->src.addr_bytes[j];
 						i = 1;
+						input_set_byte++;
 					}
 					if (eth_mask->dst.addr_bytes[j] ==
 								UINT8_MAX) {
@@ -377,6 +380,7 @@ ice_switch_inset_get(const struct rte_flow_item pattern[],
 						m->dst_addr[j] =
 						eth_mask->dst.addr_bytes[j];
 						i = 1;
+						input_set_byte++;
 					}
 				}
 				if (i)
@@ -387,6 +391,7 @@ ice_switch_inset_get(const struct rte_flow_item pattern[],
 						eth_spec->type;
 					list[t].m_u.ethertype.ethtype_id =
 						UINT16_MAX;
+					input_set_byte += 2;
 					t++;
 				}
 			} else if (!eth_spec && !eth_mask) {
@@ -458,30 +463,35 @@ ice_switch_inset_get(const struct rte_flow_item pattern[],
 						ipv4_spec->hdr.src_addr;
 					list[t].m_u.ipv4_hdr.src_addr =
 						UINT32_MAX;
+					input_set_byte += 2;
 				}
 				if (ipv4_mask->hdr.dst_addr == UINT32_MAX) {
 					list[t].h_u.ipv4_hdr.dst_addr =
 						ipv4_spec->hdr.dst_addr;
 					list[t].m_u.ipv4_hdr.dst_addr =
 						UINT32_MAX;
+					input_set_byte += 2;
 				}
 				if (ipv4_mask->hdr.time_to_live == UINT8_MAX) {
 					list[t].h_u.ipv4_hdr.time_to_live =
 						ipv4_spec->hdr.time_to_live;
 					list[t].m_u.ipv4_hdr.time_to_live =
 						UINT8_MAX;
+					input_set_byte++;
 				}
 				if (ipv4_mask->hdr.next_proto_id == UINT8_MAX) {
 					list[t].h_u.ipv4_hdr.protocol =
 						ipv4_spec->hdr.next_proto_id;
 					list[t].m_u.ipv4_hdr.protocol =
 						UINT8_MAX;
+					input_set_byte++;
 				}
 				if (ipv4_mask->hdr.type_of_service ==
 						UINT8_MAX) {
 					list[t].h_u.ipv4_hdr.tos =
 						ipv4_spec->hdr.type_of_service;
 					list[t].m_u.ipv4_hdr.tos = UINT8_MAX;
+					input_set_byte++;
 				}
 				t++;
 			} else if (!ipv4_spec && !ipv4_mask) {
@@ -563,6 +573,7 @@ ice_switch_inset_get(const struct rte_flow_item pattern[],
 						ipv6_spec->hdr.src_addr[j];
 						s->src_addr[j] =
 						ipv6_mask->hdr.src_addr[j];
+						input_set_byte++;
 					}
 					if (ipv6_mask->hdr.dst_addr[j] ==
 								UINT8_MAX) {
@@ -570,17 +581,20 @@ ice_switch_inset_get(const struct rte_flow_item pattern[],
 						ipv6_spec->hdr.dst_addr[j];
 						s->dst_addr[j] =
 						ipv6_mask->hdr.dst_addr[j];
+						input_set_byte++;
 					}
 				}
 				if (ipv6_mask->hdr.proto == UINT8_MAX) {
 					f->next_hdr =
 						ipv6_spec->hdr.proto;
 					s->next_hdr = UINT8_MAX;
+					input_set_byte++;
 				}
 				if (ipv6_mask->hdr.hop_limits == UINT8_MAX) {
 					f->hop_limit =
 						ipv6_spec->hdr.hop_limits;
 					s->hop_limit = UINT8_MAX;
+					input_set_byte++;
 				}
 				if ((ipv6_mask->hdr.vtc_flow &
 						rte_cpu_to_be_32
@@ -597,6 +611,7 @@ ice_switch_inset_get(const struct rte_flow_item pattern[],
 					f->be_ver_tc_flow = CPU_TO_BE32(vtf.u.val);
 					vtf.u.fld.tc = UINT8_MAX;
 					s->be_ver_tc_flow = CPU_TO_BE32(vtf.u.val);
+					input_set_byte += 4;
 				}
 				t++;
 			} else if (!ipv6_spec && !ipv6_mask) {
@@ -648,14 +663,16 @@ ice_switch_inset_get(const struct rte_flow_item pattern[],
 						udp_spec->hdr.src_port;
 					list[t].m_u.l4_hdr.src_port =
 						udp_mask->hdr.src_port;
+					input_set_byte += 2;
 				}
 				if (udp_mask->hdr.dst_port == UINT16_MAX) {
 					list[t].h_u.l4_hdr.dst_port =
 						udp_spec->hdr.dst_port;
 					list[t].m_u.l4_hdr.dst_port =
 						udp_mask->hdr.dst_port;
+					input_set_byte += 2;
 				}
-						t++;
+				t++;
 			} else if (!udp_spec && !udp_mask) {
 				list[t].type = ICE_UDP_ILOS;
 			}
@@ -705,12 +722,14 @@ ice_switch_inset_get(const struct rte_flow_item pattern[],
 						tcp_spec->hdr.src_port;
 					list[t].m_u.l4_hdr.src_port =
 						tcp_mask->hdr.src_port;
+					input_set_byte += 2;
 				}
 				if (tcp_mask->hdr.dst_port == UINT16_MAX) {
 					list[t].h_u.l4_hdr.dst_port =
 						tcp_spec->hdr.dst_port;
 					list[t].m_u.l4_hdr.dst_port =
 						tcp_mask->hdr.dst_port;
+					input_set_byte += 2;
 				}
 				t++;
 			} else if (!tcp_spec && !tcp_mask) {
@@ -756,12 +775,14 @@ ice_switch_inset_get(const struct rte_flow_item pattern[],
 						sctp_spec->hdr.src_port;
 					list[t].m_u.sctp_hdr.src_port =
 						sctp_mask->hdr.src_port;
+					input_set_byte += 2;
 				}
 				if (sctp_mask->hdr.dst_port == UINT16_MAX) {
 					list[t].h_u.sctp_hdr.dst_port =
 						sctp_spec->hdr.dst_port;
 					list[t].m_u.sctp_hdr.dst_port =
 						sctp_mask->hdr.dst_port;
+					input_set_byte += 2;
 				}
 				t++;
 			} else if (!sctp_spec && !sctp_mask) {
@@ -799,6 +820,7 @@ ice_switch_inset_get(const struct rte_flow_item pattern[],
 						UINT32_MAX;
 					input_set |=
 						ICE_INSET_TUN_VXLAN_VNI;
+					input_set_byte += 2;
 				}
 				t++;
 			} else if (!vxlan_spec && !vxlan_mask) {
@@ -835,6 +857,7 @@ ice_switch_inset_get(const struct rte_flow_item pattern[],
 						UINT32_MAX;
 					input_set |=
 						ICE_INSET_TUN_NVGRE_TNI;
+					input_set_byte += 2;
 				}
 				t++;
 			} else if (!nvgre_spec && !nvgre_mask) {
@@ -865,6 +888,7 @@ ice_switch_inset_get(const struct rte_flow_item pattern[],
 					list[t].m_u.vlan_hdr.vlan =
 						UINT16_MAX;
 					input_set |= ICE_INSET_VLAN_OUTER;
+					input_set_byte += 2;
 				}
 				if (vlan_mask->inner_type == UINT16_MAX) {
 					list[t].h_u.vlan_hdr.type =
@@ -872,6 +896,7 @@ ice_switch_inset_get(const struct rte_flow_item pattern[],
 					list[t].m_u.vlan_hdr.type =
 						UINT16_MAX;
 					input_set |= ICE_INSET_ETHERTYPE;
+					input_set_byte += 2;
 				}
 				t++;
 			} else if (!vlan_spec && !vlan_mask) {
@@ -906,6 +931,14 @@ ice_switch_inset_get(const struct rte_flow_item pattern[],
 		}
 	}
 
+	if (input_set_byte > MAX_INPUT_SET_BYTE) {
+		rte_flow_error_set(error, EINVAL,
+			RTE_FLOW_ERROR_TYPE_ITEM,
+			item,
+			"too much input set");
+		return -ENOTSUP;
+	}
+
 	*lkups_num = t;
 
 	return input_set;
-- 
2.20.1

---
  Diff of the applied patch vs upstream commit (please double-check if non-empty:
---
--- -	2020-07-24 12:53:53.012212079 +0100
+++ 0116-net-ice-add-input-set-byte-number-check.patch	2020-07-24 12:53:48.387008323 +0100
@@ -1,52 +1,53 @@
-From 8de93729837d265de5b34cd318c9249720badfc8 Mon Sep 17 00:00:00 2001
+From 21637a6e327be94e6b741394ac5a50bc24a63341 Mon Sep 17 00:00:00 2001
 From: Wei Zhao <wei.zhao1 at intel.com>
 Date: Fri, 3 Jul 2020 14:19:37 +0800
 Subject: [PATCH] net/ice: add input set byte number check
 
+[ upstream commit 8de93729837d265de5b34cd318c9249720badfc8 ]
+
 This patch add the total input set byte number check,
 as there is a hardware requirement for the total number
 of 32 byte.
 
 Fixes: 47d460d63233 ("net/ice: rework switch filter")
-Cc: stable at dpdk.org
 
 Signed-off-by: Wei Zhao <wei.zhao1 at intel.com>
 Acked-by: Qi Zhang <qi.z.zhang at intel.com>
 ---
- drivers/net/ice/ice_switch_filter.c | 43 +++++++++++++++++++++++++++--
- 1 file changed, 40 insertions(+), 3 deletions(-)
+ drivers/net/ice/ice_switch_filter.c | 37 +++++++++++++++++++++++++++--
+ 1 file changed, 35 insertions(+), 2 deletions(-)
 
 diff --git a/drivers/net/ice/ice_switch_filter.c b/drivers/net/ice/ice_switch_filter.c
-index afdc116ee..9db89a307 100644
+index b00ac2abe..c2f5aa78d 100644
 --- a/drivers/net/ice/ice_switch_filter.c
 +++ b/drivers/net/ice/ice_switch_filter.c
-@@ -26,7 +26,8 @@
- #include "ice_dcf_ethdev.h"
+@@ -25,7 +25,8 @@
+ #include "ice_generic_flow.h"
  
  
 -#define MAX_QGRP_NUM_TYPE 7
 +#define MAX_QGRP_NUM_TYPE	7
 +#define MAX_INPUT_SET_BYTE	32
- #define ICE_PPP_IPV4_PROTO	0x0021
- #define ICE_PPP_IPV6_PROTO	0x0057
- #define ICE_IPV4_PROTO_NVGRE	0x002F
-@@ -472,6 +473,7 @@ ice_switch_inset_get(const struct rte_flow_item pattern[],
- 	const struct rte_flow_item_l2tpv3oip *l2tp_spec, *l2tp_mask;
- 	const struct rte_flow_item_pfcp *pfcp_spec, *pfcp_mask;
+ 
+ #define ICE_SW_INSET_ETHER ( \
+ 	ICE_INSET_DMAC | ICE_INSET_SMAC | ICE_INSET_ETHERTYPE)
+@@ -320,6 +321,7 @@ ice_switch_inset_get(const struct rte_flow_item pattern[],
+ 		0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
+ 		0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF};
  	uint64_t input_set = ICE_INSET_NONE;
 +	uint16_t input_set_byte = 0;
- 	bool pppoe_elem_valid = 0;
- 	bool pppoe_patt_valid = 0;
- 	bool pppoe_prot_valid = 0;
-@@ -541,6 +543,7 @@ ice_switch_inset_get(const struct rte_flow_item pattern[],
+ 	uint16_t j, t = 0;
+ 	uint16_t tunnel_valid = 0;
+ 
+@@ -369,6 +371,7 @@ ice_switch_inset_get(const struct rte_flow_item pattern[],
  						m->src_addr[j] =
  						eth_mask->src.addr_bytes[j];
  						i = 1;
 +						input_set_byte++;
  					}
- 					if (eth_mask->dst.addr_bytes[j]) {
- 						h->dst_addr[j] =
-@@ -548,6 +551,7 @@ ice_switch_inset_get(const struct rte_flow_item pattern[],
+ 					if (eth_mask->dst.addr_bytes[j] ==
+ 								UINT8_MAX) {
+@@ -377,6 +380,7 @@ ice_switch_inset_get(const struct rte_flow_item pattern[],
  						m->dst_addr[j] =
  						eth_mask->dst.addr_bytes[j];
  						i = 1;
@@ -54,96 +55,94 @@
  					}
  				}
  				if (i)
-@@ -558,6 +562,7 @@ ice_switch_inset_get(const struct rte_flow_item pattern[],
+@@ -387,6 +391,7 @@ ice_switch_inset_get(const struct rte_flow_item pattern[],
  						eth_spec->type;
  					list[t].m_u.ethertype.ethtype_id =
- 						eth_mask->type;
+ 						UINT16_MAX;
 +					input_set_byte += 2;
  					t++;
  				}
- 			}
-@@ -617,24 +622,28 @@ ice_switch_inset_get(const struct rte_flow_item pattern[],
+ 			} else if (!eth_spec && !eth_mask) {
+@@ -458,30 +463,35 @@ ice_switch_inset_get(const struct rte_flow_item pattern[],
  						ipv4_spec->hdr.src_addr;
  					list[t].m_u.ipv4_hdr.src_addr =
- 						ipv4_mask->hdr.src_addr;
+ 						UINT32_MAX;
 +					input_set_byte += 2;
  				}
- 				if (ipv4_mask->hdr.dst_addr) {
+ 				if (ipv4_mask->hdr.dst_addr == UINT32_MAX) {
  					list[t].h_u.ipv4_hdr.dst_addr =
  						ipv4_spec->hdr.dst_addr;
  					list[t].m_u.ipv4_hdr.dst_addr =
- 						ipv4_mask->hdr.dst_addr;
+ 						UINT32_MAX;
 +					input_set_byte += 2;
  				}
- 				if (ipv4_mask->hdr.time_to_live) {
+ 				if (ipv4_mask->hdr.time_to_live == UINT8_MAX) {
  					list[t].h_u.ipv4_hdr.time_to_live =
  						ipv4_spec->hdr.time_to_live;
  					list[t].m_u.ipv4_hdr.time_to_live =
- 						ipv4_mask->hdr.time_to_live;
+ 						UINT8_MAX;
 +					input_set_byte++;
  				}
- 				if (ipv4_mask->hdr.next_proto_id) {
+ 				if (ipv4_mask->hdr.next_proto_id == UINT8_MAX) {
  					list[t].h_u.ipv4_hdr.protocol =
  						ipv4_spec->hdr.next_proto_id;
  					list[t].m_u.ipv4_hdr.protocol =
- 						ipv4_mask->hdr.next_proto_id;
+ 						UINT8_MAX;
 +					input_set_byte++;
  				}
- 				if ((ipv4_spec->hdr.next_proto_id &
- 					ipv4_mask->hdr.next_proto_id) ==
-@@ -645,6 +654,7 @@ ice_switch_inset_get(const struct rte_flow_item pattern[],
+ 				if (ipv4_mask->hdr.type_of_service ==
+ 						UINT8_MAX) {
+ 					list[t].h_u.ipv4_hdr.tos =
  						ipv4_spec->hdr.type_of_service;
- 					list[t].m_u.ipv4_hdr.tos =
- 						ipv4_mask->hdr.type_of_service;
+ 					list[t].m_u.ipv4_hdr.tos = UINT8_MAX;
 +					input_set_byte++;
  				}
  				t++;
- 			}
-@@ -722,12 +732,14 @@ ice_switch_inset_get(const struct rte_flow_item pattern[],
+ 			} else if (!ipv4_spec && !ipv4_mask) {
+@@ -563,6 +573,7 @@ ice_switch_inset_get(const struct rte_flow_item pattern[],
  						ipv6_spec->hdr.src_addr[j];
  						s->src_addr[j] =
  						ipv6_mask->hdr.src_addr[j];
 +						input_set_byte++;
  					}
- 					if (ipv6_mask->hdr.dst_addr[j]) {
- 						f->dst_addr[j] =
+ 					if (ipv6_mask->hdr.dst_addr[j] ==
+ 								UINT8_MAX) {
+@@ -570,17 +581,20 @@ ice_switch_inset_get(const struct rte_flow_item pattern[],
  						ipv6_spec->hdr.dst_addr[j];
  						s->dst_addr[j] =
  						ipv6_mask->hdr.dst_addr[j];
 +						input_set_byte++;
  					}
  				}
- 				if (ipv6_mask->hdr.proto) {
-@@ -735,12 +747,14 @@ ice_switch_inset_get(const struct rte_flow_item pattern[],
+ 				if (ipv6_mask->hdr.proto == UINT8_MAX) {
+ 					f->next_hdr =
  						ipv6_spec->hdr.proto;
- 					s->next_hdr =
- 						ipv6_mask->hdr.proto;
+ 					s->next_hdr = UINT8_MAX;
 +					input_set_byte++;
  				}
- 				if (ipv6_mask->hdr.hop_limits) {
+ 				if (ipv6_mask->hdr.hop_limits == UINT8_MAX) {
  					f->hop_limit =
  						ipv6_spec->hdr.hop_limits;
- 					s->hop_limit =
- 						ipv6_mask->hdr.hop_limits;
+ 					s->hop_limit = UINT8_MAX;
 +					input_set_byte++;
  				}
- 				if (ipv6_mask->hdr.vtc_flow &
+ 				if ((ipv6_mask->hdr.vtc_flow &
  						rte_cpu_to_be_32
-@@ -758,6 +772,7 @@ ice_switch_inset_get(const struct rte_flow_item pattern[],
- 							RTE_IPV6_HDR_TC_MASK) >>
- 							RTE_IPV6_HDR_TC_SHIFT;
+@@ -597,6 +611,7 @@ ice_switch_inset_get(const struct rte_flow_item pattern[],
+ 					f->be_ver_tc_flow = CPU_TO_BE32(vtf.u.val);
+ 					vtf.u.fld.tc = UINT8_MAX;
  					s->be_ver_tc_flow = CPU_TO_BE32(vtf.u.val);
 +					input_set_byte += 4;
  				}
  				t++;
- 			}
-@@ -803,14 +818,16 @@ ice_switch_inset_get(const struct rte_flow_item pattern[],
+ 			} else if (!ipv6_spec && !ipv6_mask) {
+@@ -648,14 +663,16 @@ ice_switch_inset_get(const struct rte_flow_item pattern[],
  						udp_spec->hdr.src_port;
  					list[t].m_u.l4_hdr.src_port =
  						udp_mask->hdr.src_port;
 +					input_set_byte += 2;
  				}
- 				if (udp_mask->hdr.dst_port) {
+ 				if (udp_mask->hdr.dst_port == UINT16_MAX) {
  					list[t].h_u.l4_hdr.dst_port =
  						udp_spec->hdr.dst_port;
  					list[t].m_u.l4_hdr.dst_port =
@@ -152,16 +151,16 @@
  				}
 -						t++;
 +				t++;
+ 			} else if (!udp_spec && !udp_mask) {
+ 				list[t].type = ICE_UDP_ILOS;
  			}
- 			break;
- 
-@@ -855,12 +872,14 @@ ice_switch_inset_get(const struct rte_flow_item pattern[],
+@@ -705,12 +722,14 @@ ice_switch_inset_get(const struct rte_flow_item pattern[],
  						tcp_spec->hdr.src_port;
  					list[t].m_u.l4_hdr.src_port =
  						tcp_mask->hdr.src_port;
 +					input_set_byte += 2;
  				}
- 				if (tcp_mask->hdr.dst_port) {
+ 				if (tcp_mask->hdr.dst_port == UINT16_MAX) {
  					list[t].h_u.l4_hdr.dst_port =
  						tcp_spec->hdr.dst_port;
  					list[t].m_u.l4_hdr.dst_port =
@@ -169,14 +168,14 @@
 +					input_set_byte += 2;
  				}
  				t++;
- 			}
-@@ -900,12 +919,14 @@ ice_switch_inset_get(const struct rte_flow_item pattern[],
+ 			} else if (!tcp_spec && !tcp_mask) {
+@@ -756,12 +775,14 @@ ice_switch_inset_get(const struct rte_flow_item pattern[],
  						sctp_spec->hdr.src_port;
  					list[t].m_u.sctp_hdr.src_port =
  						sctp_mask->hdr.src_port;
 +					input_set_byte += 2;
  				}
- 				if (sctp_mask->hdr.dst_port) {
+ 				if (sctp_mask->hdr.dst_port == UINT16_MAX) {
  					list[t].h_u.sctp_hdr.dst_port =
  						sctp_spec->hdr.dst_port;
  					list[t].m_u.sctp_hdr.dst_port =
@@ -184,82 +183,41 @@
 +					input_set_byte += 2;
  				}
  				t++;
- 			}
-@@ -943,6 +964,7 @@ ice_switch_inset_get(const struct rte_flow_item pattern[],
- 						vxlan_mask->vni[0];
+ 			} else if (!sctp_spec && !sctp_mask) {
+@@ -799,6 +820,7 @@ ice_switch_inset_get(const struct rte_flow_item pattern[],
+ 						UINT32_MAX;
  					input_set |=
  						ICE_INSET_TUN_VXLAN_VNI;
 +					input_set_byte += 2;
  				}
  				t++;
- 			}
-@@ -980,6 +1002,7 @@ ice_switch_inset_get(const struct rte_flow_item pattern[],
- 						nvgre_mask->tni[0];
+ 			} else if (!vxlan_spec && !vxlan_mask) {
+@@ -835,6 +857,7 @@ ice_switch_inset_get(const struct rte_flow_item pattern[],
+ 						UINT32_MAX;
  					input_set |=
  						ICE_INSET_TUN_NVGRE_TNI;
 +					input_set_byte += 2;
  				}
  				t++;
- 			}
-@@ -1008,6 +1031,7 @@ ice_switch_inset_get(const struct rte_flow_item pattern[],
+ 			} else if (!nvgre_spec && !nvgre_mask) {
+@@ -865,6 +888,7 @@ ice_switch_inset_get(const struct rte_flow_item pattern[],
  					list[t].m_u.vlan_hdr.vlan =
- 						vlan_mask->tci;
+ 						UINT16_MAX;
  					input_set |= ICE_INSET_VLAN_OUTER;
 +					input_set_byte += 2;
  				}
- 				if (vlan_mask->inner_type) {
+ 				if (vlan_mask->inner_type == UINT16_MAX) {
  					list[t].h_u.vlan_hdr.type =
-@@ -1015,6 +1039,7 @@ ice_switch_inset_get(const struct rte_flow_item pattern[],
+@@ -872,6 +896,7 @@ ice_switch_inset_get(const struct rte_flow_item pattern[],
  					list[t].m_u.vlan_hdr.type =
- 						vlan_mask->inner_type;
+ 						UINT16_MAX;
  					input_set |= ICE_INSET_ETHERTYPE;
 +					input_set_byte += 2;
  				}
  				t++;
- 			}
-@@ -1055,6 +1080,7 @@ ice_switch_inset_get(const struct rte_flow_item pattern[],
- 					list[t].m_u.pppoe_hdr.session_id =
- 						pppoe_mask->session_id;
- 					input_set |= ICE_INSET_PPPOE_SESSION;
-+					input_set_byte += 2;
- 				}
- 				t++;
- 				pppoe_elem_valid = 1;
-@@ -1087,7 +1113,7 @@ ice_switch_inset_get(const struct rte_flow_item pattern[],
- 					list[t].m_u.pppoe_hdr.ppp_prot_id =
- 						pppoe_proto_mask->proto_id;
- 					input_set |= ICE_INSET_PPPOE_PROTO;
--
-+					input_set_byte += 2;
- 					pppoe_prot_valid = 1;
- 				}
- 				if ((pppoe_proto_mask->proto_id &
-@@ -1144,6 +1170,7 @@ ice_switch_inset_get(const struct rte_flow_item pattern[],
- 				list[t].m_u.esp_hdr.spi =
- 					esp_mask->hdr.spi;
- 				input_set |= ICE_INSET_ESP_SPI;
-+				input_set_byte += 4;
- 				t++;
- 			}
- 
-@@ -1200,6 +1227,7 @@ ice_switch_inset_get(const struct rte_flow_item pattern[],
- 				list[t].m_u.ah_hdr.spi =
- 					ah_mask->spi;
- 				input_set |= ICE_INSET_AH_SPI;
-+				input_set_byte += 4;
- 				t++;
- 			}
- 
-@@ -1239,6 +1267,7 @@ ice_switch_inset_get(const struct rte_flow_item pattern[],
- 				list[t].m_u.l2tpv3_sess_hdr.session_id =
- 					l2tp_mask->session_id;
- 				input_set |= ICE_INSET_L2TPV3OIP_SESSION_ID;
-+				input_set_byte += 4;
- 				t++;
- 			}
- 
-@@ -1344,6 +1373,14 @@ ice_switch_inset_get(const struct rte_flow_item pattern[],
- 			*tun_type = ICE_SW_IPV6_UDP;
+ 			} else if (!vlan_spec && !vlan_mask) {
+@@ -906,6 +931,14 @@ ice_switch_inset_get(const struct rte_flow_item pattern[],
+ 		}
  	}
  
 +	if (input_set_byte > MAX_INPUT_SET_BYTE) {


More information about the stable mailing list