[v2,3/3] net/ice: support FDIR for IP fragment packet

Message ID 20210324135407.60637-4-jia.guo@intel.com (mailing list archive)
State Superseded, archived
Delegated to: Qi Zhang
Headers
Series support flow for IP fragment in ICE |

Checks

Context Check Description
ci/checkpatch warning coding style issues
ci/travis-robot fail travis build: failed
ci/github-robot success github build: passed
ci/iol-abi-testing success Testing PASS
ci/iol-intel-Performance success Performance Testing PASS
ci/iol-testing success Testing PASS
ci/iol-mellanox-Performance success Performance Testing PASS
ci/Intel-compilation fail Compilation issues
ci/intel-Testing success Testing PASS

Commit Message

Guo, Jia March 24, 2021, 1:54 p.m. UTC
  New FDIR parsing are added to handle the fragmented IPv4/IPv6 packet.

Signed-off-by: Jeff Guo <jia.guo@intel.com>
---
 drivers/net/ice/ice_fdir_filter.c | 96 +++++++++++++++++++++++++++----
 1 file changed, 85 insertions(+), 11 deletions(-)
  

Comments

Xu, Ting March 30, 2021, 3:25 a.m. UTC | #1
Hi, Jeff

> -----Original Message-----
> From: Guo, Jia <jia.guo@intel.com>
> Sent: Wednesday, March 24, 2021 9:54 PM
> To: Yang, Qiming <qiming.yang@intel.com>; Zhang, Qi Z
> <qi.z.zhang@intel.com>
> Cc: Xu, Ting <ting.xu@intel.com>; dev@dpdk.org; Guo, Jia <jia.guo@intel.com>
> Subject: [PATCH v2 3/3] net/ice: support FDIR for IP fragment packet
> 
> New FDIR parsing are added to handle the fragmented IPv4/IPv6 packet.
> 
> Signed-off-by: Jeff Guo <jia.guo@intel.com>
> ---
>  drivers/net/ice/ice_fdir_filter.c | 96 +++++++++++++++++++++++++++----
>  1 file changed, 85 insertions(+), 11 deletions(-)
> 
> diff --git a/drivers/net/ice/ice_fdir_filter.c b/drivers/net/ice/ice_fdir_filter.c
> index 3af5812660..3504d3c6c2 100644
> --- a/drivers/net/ice/ice_fdir_filter.c
> +++ b/drivers/net/ice/ice_fdir_filter.c
> @@ -24,7 +24,7 @@
>  #define ICE_FDIR_INSET_ETH_IPV4 (\
>  	ICE_FDIR_INSET_ETH | \
>  	ICE_INSET_IPV4_SRC | ICE_INSET_IPV4_DST | ICE_INSET_IPV4_TOS | \
> -	ICE_INSET_IPV4_TTL | ICE_INSET_IPV4_PROTO)
> +	ICE_INSET_IPV4_TTL | ICE_INSET_IPV4_PROTO | ICE_INSET_IPV4_PKID)
> 
>  #define ICE_FDIR_INSET_ETH_IPV4_UDP (\
>  	ICE_FDIR_INSET_ETH_IPV4 | \
> @@ -41,7 +41,8 @@
>  #define ICE_FDIR_INSET_ETH_IPV6 (\
>  	ICE_INSET_DMAC | \
>  	ICE_INSET_IPV6_SRC | ICE_INSET_IPV6_DST | ICE_INSET_IPV6_TC | \
> -	ICE_INSET_IPV6_HOP_LIMIT | ICE_INSET_IPV6_NEXT_HDR)
> +	ICE_INSET_IPV6_HOP_LIMIT | ICE_INSET_IPV6_NEXT_HDR | \
> +	ICE_INSET_IPV6_PKID)
> 
>  #define ICE_FDIR_INSET_ETH_IPV6_UDP (\
>  	ICE_FDIR_INSET_ETH_IPV6 | \
> @@ -56,7 +57,8 @@
>  	ICE_INSET_SCTP_SRC_PORT | ICE_INSET_SCTP_DST_PORT)
> 
>  #define ICE_FDIR_INSET_IPV4 (\
> -	ICE_INSET_IPV4_SRC | ICE_INSET_IPV4_DST)
> +	ICE_INSET_IPV4_SRC | ICE_INSET_IPV4_DST | \
> +	ICE_INSET_IPV4_PKID)
> 
>  #define ICE_FDIR_INSET_IPV4_TCP (\
>  	ICE_FDIR_INSET_IPV4 | \
> @@ -72,7 +74,8 @@
> 
>  #define ICE_FDIR_INSET_ETH_IPV4_VXLAN (\
>  	ICE_FDIR_INSET_ETH | \
> -	ICE_INSET_IPV4_SRC | ICE_INSET_IPV4_DST)
> +	ICE_INSET_IPV4_SRC | ICE_INSET_IPV4_DST | \
> +	ICE_INSET_IPV4_PKID)
> 
>  #define ICE_FDIR_INSET_IPV4_GTPU (\
>  	ICE_INSET_IPV4_SRC | ICE_INSET_IPV4_DST | ICE_INSET_GTPU_TEID)
> @@ -95,6 +98,7 @@ static struct ice_pattern_match_item
> ice_fdir_pattern_list[] = {
>  	{pattern_eth_ipv4_tcp,
> 	ICE_FDIR_INSET_ETH_IPV4_TCP,	ICE_INSET_NONE,
> 	ICE_INSET_NONE},
>  	{pattern_eth_ipv4_sctp,
> 	ICE_FDIR_INSET_ETH_IPV4_SCTP,	ICE_INSET_NONE,
> 		ICE_INSET_NONE},
>  	{pattern_eth_ipv6,
> 	ICE_FDIR_INSET_ETH_IPV6,	ICE_INSET_NONE,
> 	ICE_INSET_NONE},
> +	{pattern_eth_ipv6_frag_ext,
> 	ICE_FDIR_INSET_ETH_IPV6,	ICE_INSET_NONE,
> 	ICE_INSET_NONE},
>  	{pattern_eth_ipv6_udp,
> 	ICE_FDIR_INSET_ETH_IPV6_UDP,	ICE_INSET_NONE,
> 		ICE_INSET_NONE},
>  	{pattern_eth_ipv6_tcp,
> 	ICE_FDIR_INSET_ETH_IPV6_TCP,	ICE_INSET_NONE,
> 	ICE_INSET_NONE},
>  	{pattern_eth_ipv6_sctp,
> 	ICE_FDIR_INSET_ETH_IPV6_SCTP,	ICE_INSET_NONE,
> 		ICE_INSET_NONE},
> @@ -882,11 +886,13 @@ ice_fdir_input_set_parse(uint64_t inset, enum
> ice_flow_field *field)
>  		{ICE_INSET_IPV4_TOS, ICE_FLOW_FIELD_IDX_IPV4_DSCP},
>  		{ICE_INSET_IPV4_TTL, ICE_FLOW_FIELD_IDX_IPV4_TTL},
>  		{ICE_INSET_IPV4_PROTO, ICE_FLOW_FIELD_IDX_IPV4_PROT},
> +		{ICE_INSET_IPV4_PKID, ICE_FLOW_FIELD_IDX_IPV4_ID},
>  		{ICE_INSET_IPV6_SRC, ICE_FLOW_FIELD_IDX_IPV6_SA},
>  		{ICE_INSET_IPV6_DST, ICE_FLOW_FIELD_IDX_IPV6_DA},
>  		{ICE_INSET_IPV6_TC, ICE_FLOW_FIELD_IDX_IPV6_DSCP},
>  		{ICE_INSET_IPV6_NEXT_HDR,
> ICE_FLOW_FIELD_IDX_IPV6_PROT},
>  		{ICE_INSET_IPV6_HOP_LIMIT, ICE_FLOW_FIELD_IDX_IPV6_TTL},
> +		{ICE_INSET_IPV6_PKID, ICE_FLOW_FIELD_IDX_IPV6_ID},
>  		{ICE_INSET_TCP_SRC_PORT,
> ICE_FLOW_FIELD_IDX_TCP_SRC_PORT},
>  		{ICE_INSET_TCP_DST_PORT,
> ICE_FLOW_FIELD_IDX_TCP_DST_PORT},
>  		{ICE_INSET_UDP_SRC_PORT,
> ICE_FLOW_FIELD_IDX_UDP_SRC_PORT}, @@ -935,6 +941,10 @@
> ice_fdir_input_set_hdrs(enum ice_fltr_ptype flow, struct ice_flow_seg_info
> *seg)
>  		ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_IPV4 |
>  				  ICE_FLOW_SEG_HDR_IPV_OTHER);
>  		break;
> +	case ICE_FLTR_PTYPE_FRAG_IPV4:
> +		ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_IPV4 |
> +				  ICE_FLOW_SEG_HDR_IPV_FRAG);
> +		break;
>  	case ICE_FLTR_PTYPE_NONF_IPV6_UDP:
>  		ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_UDP |
>  				  ICE_FLOW_SEG_HDR_IPV6 |
> @@ -951,6 +961,10 @@ ice_fdir_input_set_hdrs(enum ice_fltr_ptype flow,
> struct ice_flow_seg_info *seg)
>  				  ICE_FLOW_SEG_HDR_IPV_OTHER);
>  		break;
>  	case ICE_FLTR_PTYPE_NONF_IPV6_OTHER:
> +		ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_IPV6 |
> +				  ICE_FLOW_SEG_HDR_IPV_FRAG);
> +		break;
> +	case ICE_FLTR_PTYPE_FRAG_IPV6:

Are the frag and non-frag cases inverted?

>  		ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_IPV6 |
>  				  ICE_FLOW_SEG_HDR_IPV_OTHER);
>  		break;
> @@ -1592,8 +1606,10 @@ ice_fdir_parse_pattern(__rte_unused struct
> ice_adapter *ad,
>  	enum rte_flow_item_type l3 = RTE_FLOW_ITEM_TYPE_END;
>  	enum ice_fdir_tunnel_type tunnel_type =
> ICE_FDIR_TUNNEL_TYPE_NONE;
>  	const struct rte_flow_item_eth *eth_spec, *eth_mask;
> -	const struct rte_flow_item_ipv4 *ipv4_spec, *ipv4_mask;
> +	const struct rte_flow_item_ipv4 *ipv4_spec, *ipv4_last, *ipv4_mask;
>  	const struct rte_flow_item_ipv6 *ipv6_spec, *ipv6_mask;
> +	const struct rte_flow_item_ipv6_frag_ext *ipv6_frag_spec,
> +					*ipv6_frag_last, *ipv6_frag_mask;
>  	const struct rte_flow_item_tcp *tcp_spec, *tcp_mask;
>  	const struct rte_flow_item_udp *udp_spec, *udp_mask;
>  	const struct rte_flow_item_sctp *sctp_spec, *sctp_mask; @@ -1615,6
> +1631,7 @@ ice_fdir_parse_pattern(__rte_unused struct ice_adapter *ad,
>  	struct ice_fdir_extra *p_ext_data;
>  	struct ice_fdir_v4 *p_v4 = NULL;
>  	struct ice_fdir_v6 *p_v6 = NULL;
> +	bool spec_all_pid = false;
> 
>  	for (item = pattern; item->type != RTE_FLOW_ITEM_TYPE_END; item++)
> {
>  		if (item->type == RTE_FLOW_ITEM_TYPE_VXLAN) @@ -1632,13
> +1649,14 @@ ice_fdir_parse_pattern(__rte_unused struct ice_adapter *ad,
>  	 * flow. input_set_i is used for inner part.
>  	 */
>  	for (item = pattern; item->type != RTE_FLOW_ITEM_TYPE_END; item++)
> {
> -		if (item->last) {
> +		if (item->last && (item_type != RTE_FLOW_ITEM_TYPE_IPV4 ||
> +				   item_type !=
> +				   RTE_FLOW_ITEM_TYPE_IPV6_FRAG_EXT)) {
>  			rte_flow_error_set(error, EINVAL,
> -					   RTE_FLOW_ERROR_TYPE_ITEM,
> -					   item,
> +					   RTE_FLOW_ERROR_TYPE_ITEM, item,
>  					   "Not support range");
> -			return -rte_errno;
>  		}
> +
>  		item_type = item->type;
> 
>  		input_set = (tunnel_type && !is_outer) ?
> @@ -1689,6 +1707,7 @@ ice_fdir_parse_pattern(__rte_unused struct
> ice_adapter *ad,
>  			flow_type = ICE_FLTR_PTYPE_NONF_IPV4_OTHER;
>  			l3 = RTE_FLOW_ITEM_TYPE_IPV4;
>  			ipv4_spec = item->spec;
> +			ipv4_last = item->last;
>  			ipv4_mask = item->mask;
>  			p_v4 = (tunnel_type && is_outer) ?
>  			       &filter->input.ip_outer.v4 :
> @@ -1700,8 +1719,6 @@ ice_fdir_parse_pattern(__rte_unused struct
> ice_adapter *ad,
>  			/* Check IPv4 mask and update input set */
>  			if (ipv4_mask->hdr.version_ihl ||
>  			    ipv4_mask->hdr.total_length ||
> -			    ipv4_mask->hdr.packet_id ||
> -			    ipv4_mask->hdr.fragment_offset ||
>  			    ipv4_mask->hdr.hdr_checksum) {
>  				rte_flow_error_set(error, EINVAL,
> 
> RTE_FLOW_ERROR_TYPE_ITEM,
> @@ -1710,6 +1727,20 @@ ice_fdir_parse_pattern(__rte_unused struct
> ice_adapter *ad,
>  				return -rte_errno;
>  			}
> 
> +			if (ipv4_last->hdr.version_ihl ||
> +			    ipv4_last->hdr.type_of_service ||
> +			    ipv4_last->hdr.time_to_live ||
> +			    ipv4_last->hdr.total_length |
> +			    ipv4_last->hdr.next_proto_id ||
> +			    ipv4_last->hdr.hdr_checksum ||
> +			    ipv4_last->hdr.src_addr ||
> +			    ipv4_last->hdr.dst_addr) {
> +				rte_flow_error_set(error, EINVAL,
> +
> RTE_FLOW_ERROR_TYPE_ITEM,
> +						   item, "Invalid IPv4 last.");
> +				return -rte_errno;
> +			}
> +
>  			if (ipv4_mask->hdr.dst_addr == UINT32_MAX)
>  				*input_set |= ICE_INSET_IPV4_DST;
>  			if (ipv4_mask->hdr.src_addr == UINT32_MAX) @@ -
> 1726,6 +1757,24 @@ ice_fdir_parse_pattern(__rte_unused struct ice_adapter
> *ad,
>  			p_v4->ttl = ipv4_spec->hdr.time_to_live;
>  			p_v4->proto = ipv4_spec->hdr.next_proto_id;
>  			p_v4->tos = ipv4_spec->hdr.type_of_service;
> +
> +			if (ipv4_mask->hdr.packet_id == UINT16_MAX) {
> +				if (ipv4_last &&
> +				    ipv4_spec->hdr.packet_id == 0 &&
> +				    ipv4_last->hdr.packet_id == 0xffff)
> +					spec_all_pid = true;
> +
> +				/* All IPv4 fragment packet has the same
> +				 * ethertype, if the spec is for all invalid
> +				 * packet id, set ethertype into input set.
> +				 */
> +				*input_set |= spec_all_pid ?
> +						ICE_INSET_ETHERTYPE :
> +						ICE_INSET_IPV4_PKID;
> +			}
> +
> +			if (ipv4_mask->hdr.fragment_offset == UINT16_MAX)
> +				flow_type = ICE_FLTR_PTYPE_FRAG_IPV4;
>  			break;
>  		case RTE_FLOW_ITEM_TYPE_IPV6:
>  			flow_type = ICE_FLTR_PTYPE_NONF_IPV6_OTHER; @@
> -1771,6 +1820,31 @@ ice_fdir_parse_pattern(__rte_unused struct
> ice_adapter *ad,
>  			p_v6->proto = ipv6_spec->hdr.proto;
>  			p_v6->hlim = ipv6_spec->hdr.hop_limits;
>  			break;
> +		case RTE_FLOW_ITEM_TYPE_IPV6_FRAG_EXT:
> +			l3 = RTE_FLOW_ITEM_TYPE_IPV6_FRAG_EXT;
> +			flow_type = ICE_FLTR_PTYPE_FRAG_IPV6;
> +			ipv6_frag_spec = item->spec;
> +			ipv6_frag_last = item->last;
> +			ipv6_frag_mask = item->mask;
> +
> +			if (!(ipv6_frag_spec && ipv6_frag_mask))
> +				break;
> +
> +			if (ipv6_frag_last &&
> +			    ipv6_frag_spec->hdr.id == 0 &&
> +			    ipv6_frag_last->hdr.id ==
> +			    0xffffffff)
> +				spec_all_pid = true;
> +
> +			/* All IPv6 fragment packet has the same ethertype, if
> +			 * the spec is for all invalid packet id, set ethertype
> +			 * into input set.
> +			 */
> +			*input_set |= spec_all_pid ?
> +					ICE_INSET_ETHERTYPE :
> +					ICE_INSET_IPV6_PKID;
> +			break;
> +
>  		case RTE_FLOW_ITEM_TYPE_TCP:
>  			if (l3 == RTE_FLOW_ITEM_TYPE_IPV4)
>  				flow_type = ICE_FLTR_PTYPE_NONF_IPV4_TCP;
> --
> 2.20.1
  
Xu, Ting April 1, 2021, 2:08 a.m. UTC | #2
Hi, Jeff

See inline

Best Regards,
Xu Ting

> -----Original Message-----
> From: Guo, Jia <jia.guo@intel.com>
> Sent: Wednesday, March 24, 2021 9:54 PM
> To: Yang, Qiming <qiming.yang@intel.com>; Zhang, Qi Z
> <qi.z.zhang@intel.com>
> Cc: Xu, Ting <ting.xu@intel.com>; dev@dpdk.org; Guo, Jia <jia.guo@intel.com>
> Subject: [PATCH v2 3/3] net/ice: support FDIR for IP fragment packet
> 
> New FDIR parsing are added to handle the fragmented IPv4/IPv6 packet.
> 
> Signed-off-by: Jeff Guo <jia.guo@intel.com>
> ---
>  drivers/net/ice/ice_fdir_filter.c | 96 +++++++++++++++++++++++++++----
>  1 file changed, 85 insertions(+), 11 deletions(-)
> 
> diff --git a/drivers/net/ice/ice_fdir_filter.c b/drivers/net/ice/ice_fdir_filter.c
> index 3af5812660..3504d3c6c2 100644
> --- a/drivers/net/ice/ice_fdir_filter.c
> +++ b/drivers/net/ice/ice_fdir_filter.c
> @@ -24,7 +24,7 @@
>  #define ICE_FDIR_INSET_ETH_IPV4 (\
>  	ICE_FDIR_INSET_ETH | \
>  	ICE_INSET_IPV4_SRC | ICE_INSET_IPV4_DST | ICE_INSET_IPV4_TOS | \
> -	ICE_INSET_IPV4_TTL | ICE_INSET_IPV4_PROTO)
> +	ICE_INSET_IPV4_TTL | ICE_INSET_IPV4_PROTO | ICE_INSET_IPV4_PKID)
> 

Skip...

> @@ -1700,8 +1719,6 @@ ice_fdir_parse_pattern(__rte_unused struct
> ice_adapter *ad,
>  			/* Check IPv4 mask and update input set */
>  			if (ipv4_mask->hdr.version_ihl ||
>  			    ipv4_mask->hdr.total_length ||
> -			    ipv4_mask->hdr.packet_id ||
> -			    ipv4_mask->hdr.fragment_offset ||
>  			    ipv4_mask->hdr.hdr_checksum) {
>  				rte_flow_error_set(error, EINVAL,
> 
> RTE_FLOW_ERROR_TYPE_ITEM,
> @@ -1710,6 +1727,20 @@ ice_fdir_parse_pattern(__rte_unused struct
> ice_adapter *ad,
>  				return -rte_errno;
>  			}
> 

May need to check if ipv4_last exists before using it.

> +			if (ipv4_last->hdr.version_ihl ||
> +			    ipv4_last->hdr.type_of_service ||
> +			    ipv4_last->hdr.time_to_live ||
> +			    ipv4_last->hdr.total_length |
> +			    ipv4_last->hdr.next_proto_id ||
> +			    ipv4_last->hdr.hdr_checksum ||
> +			    ipv4_last->hdr.src_addr ||
> +			    ipv4_last->hdr.dst_addr) {
> +				rte_flow_error_set(error, EINVAL,
> +
> RTE_FLOW_ERROR_TYPE_ITEM,
> +						   item, "Invalid IPv4 last.");
> +				return -rte_errno;
> +			}
> +
>  			if (ipv4_mask->hdr.dst_addr == UINT32_MAX)
>  				*input_set |= ICE_INSET_IPV4_DST;
>  			if (ipv4_mask->hdr.src_addr == UINT32_MAX) @@ -

Skip...

> 2.20.1
  
Guo, Jia April 2, 2021, 1:53 a.m. UTC | #3
Hi, ting

> -----Original Message-----
> From: Xu, Ting <ting.xu@intel.com>
> Sent: Thursday, April 1, 2021 10:08 AM
> To: Guo, Jia <jia.guo@intel.com>; Yang, Qiming <qiming.yang@intel.com>;
> Zhang, Qi Z <qi.z.zhang@intel.com>
> Cc: dev@dpdk.org
> Subject: RE: [PATCH v2 3/3] net/ice: support FDIR for IP fragment packet
> 
> Hi, Jeff
> 
> See inline
> 
> Best Regards,
> Xu Ting
> 
> > -----Original Message-----
> > From: Guo, Jia <jia.guo@intel.com>
> > Sent: Wednesday, March 24, 2021 9:54 PM
> > To: Yang, Qiming <qiming.yang@intel.com>; Zhang, Qi Z
> > <qi.z.zhang@intel.com>
> > Cc: Xu, Ting <ting.xu@intel.com>; dev@dpdk.org; Guo, Jia
> > <jia.guo@intel.com>
> > Subject: [PATCH v2 3/3] net/ice: support FDIR for IP fragment packet
> >
> > New FDIR parsing are added to handle the fragmented IPv4/IPv6 packet.
> >
> > Signed-off-by: Jeff Guo <jia.guo@intel.com>
> > ---
> >  drivers/net/ice/ice_fdir_filter.c | 96
> > +++++++++++++++++++++++++++----
> >  1 file changed, 85 insertions(+), 11 deletions(-)
> >
> > diff --git a/drivers/net/ice/ice_fdir_filter.c
> > b/drivers/net/ice/ice_fdir_filter.c
> > index 3af5812660..3504d3c6c2 100644
> > --- a/drivers/net/ice/ice_fdir_filter.c
> > +++ b/drivers/net/ice/ice_fdir_filter.c
> > @@ -24,7 +24,7 @@
> >  #define ICE_FDIR_INSET_ETH_IPV4 (\
> >  ICE_FDIR_INSET_ETH | \
> >  ICE_INSET_IPV4_SRC | ICE_INSET_IPV4_DST | ICE_INSET_IPV4_TOS | \
> > -ICE_INSET_IPV4_TTL | ICE_INSET_IPV4_PROTO)
> > +ICE_INSET_IPV4_TTL | ICE_INSET_IPV4_PROTO | ICE_INSET_IPV4_PKID)
> >
> 
> Skip...
> 
> > @@ -1700,8 +1719,6 @@ ice_fdir_parse_pattern(__rte_unused struct
> > ice_adapter *ad,
> >  /* Check IPv4 mask and update input set */  if
> > (ipv4_mask->hdr.version_ihl ||
> >      ipv4_mask->hdr.total_length ||
> > -    ipv4_mask->hdr.packet_id ||
> > -    ipv4_mask->hdr.fragment_offset ||
> >      ipv4_mask->hdr.hdr_checksum) {
> >  rte_flow_error_set(error, EINVAL,
> >
> > RTE_FLOW_ERROR_TYPE_ITEM,
> > @@ -1710,6 +1727,20 @@ ice_fdir_parse_pattern(__rte_unused struct
> > ice_adapter *ad,  return -rte_errno;  }
> >
> 
> May need to check if ipv4_last exists before using it.
> 

Sure, will add the check in next version.

> > +if (ipv4_last->hdr.version_ihl ||
> > +    ipv4_last->hdr.type_of_service ||
> > +    ipv4_last->hdr.time_to_live ||
> > +    ipv4_last->hdr.total_length |
> > +    ipv4_last->hdr.next_proto_id ||
> > +    ipv4_last->hdr.hdr_checksum ||
> > +    ipv4_last->hdr.src_addr ||
> > +    ipv4_last->hdr.dst_addr) {
> > +rte_flow_error_set(error, EINVAL,
> > +
> > RTE_FLOW_ERROR_TYPE_ITEM,
> > +   item, "Invalid IPv4 last.");
> > +return -rte_errno;
> > +}
> > +
> >  if (ipv4_mask->hdr.dst_addr == UINT32_MAX)  *input_set |=
> > ICE_INSET_IPV4_DST;  if (ipv4_mask->hdr.src_addr == UINT32_MAX) @@ -
> 
> Skip...
> 
> > 2.20.1
>
  
Guo, Jia April 2, 2021, 2:06 a.m. UTC | #4
Hi, ting

> -----Original Message-----
> From: Xu, Ting <ting.xu@intel.com>
> Sent: Tuesday, March 30, 2021 11:25 AM
> To: Guo, Jia <jia.guo@intel.com>; Yang, Qiming <qiming.yang@intel.com>;
> Zhang, Qi Z <qi.z.zhang@intel.com>
> Cc: dev@dpdk.org
> Subject: RE: [PATCH v2 3/3] net/ice: support FDIR for IP fragment packet
> 
> Hi, Jeff
> 
> > -----Original Message-----
> > From: Guo, Jia <jia.guo@intel.com>
> > Sent: Wednesday, March 24, 2021 9:54 PM
> > To: Yang, Qiming <qiming.yang@intel.com>; Zhang, Qi Z
> > <qi.z.zhang@intel.com>
> > Cc: Xu, Ting <ting.xu@intel.com>; dev@dpdk.org; Guo, Jia
> > <jia.guo@intel.com>
> > Subject: [PATCH v2 3/3] net/ice: support FDIR for IP fragment packet
> >
> > New FDIR parsing are added to handle the fragmented IPv4/IPv6 packet.
> >
> > Signed-off-by: Jeff Guo <jia.guo@intel.com>
> > ---
> >  drivers/net/ice/ice_fdir_filter.c | 96
> > +++++++++++++++++++++++++++----
> >  1 file changed, 85 insertions(+), 11 deletions(-)
> >
> > diff --git a/drivers/net/ice/ice_fdir_filter.c
> > b/drivers/net/ice/ice_fdir_filter.c
> > index 3af5812660..3504d3c6c2 100644
> > --- a/drivers/net/ice/ice_fdir_filter.c
> > +++ b/drivers/net/ice/ice_fdir_filter.c
> > @@ -24,7 +24,7 @@
> >  #define ICE_FDIR_INSET_ETH_IPV4 (\
> >  ICE_FDIR_INSET_ETH | \
> >  ICE_INSET_IPV4_SRC | ICE_INSET_IPV4_DST | ICE_INSET_IPV4_TOS | \
> > -ICE_INSET_IPV4_TTL | ICE_INSET_IPV4_PROTO)
> > +ICE_INSET_IPV4_TTL | ICE_INSET_IPV4_PROTO | ICE_INSET_IPV4_PKID)
> >
> >  #define ICE_FDIR_INSET_ETH_IPV4_UDP (\
> >  ICE_FDIR_INSET_ETH_IPV4 | \
> > @@ -41,7 +41,8 @@
> >  #define ICE_FDIR_INSET_ETH_IPV6 (\
> >  ICE_INSET_DMAC | \
> >  ICE_INSET_IPV6_SRC | ICE_INSET_IPV6_DST | ICE_INSET_IPV6_TC | \
> > -ICE_INSET_IPV6_HOP_LIMIT | ICE_INSET_IPV6_NEXT_HDR)
> > +ICE_INSET_IPV6_HOP_LIMIT | ICE_INSET_IPV6_NEXT_HDR | \
> > +ICE_INSET_IPV6_PKID)
> >
> >  #define ICE_FDIR_INSET_ETH_IPV6_UDP (\
> >  ICE_FDIR_INSET_ETH_IPV6 | \
> > @@ -56,7 +57,8 @@
> >  ICE_INSET_SCTP_SRC_PORT | ICE_INSET_SCTP_DST_PORT)
> >
> >  #define ICE_FDIR_INSET_IPV4 (\
> > -ICE_INSET_IPV4_SRC | ICE_INSET_IPV4_DST)
> > +ICE_INSET_IPV4_SRC | ICE_INSET_IPV4_DST | \
> > +ICE_INSET_IPV4_PKID)
> >
> >  #define ICE_FDIR_INSET_IPV4_TCP (\
> >  ICE_FDIR_INSET_IPV4 | \
> > @@ -72,7 +74,8 @@
> >
> >  #define ICE_FDIR_INSET_ETH_IPV4_VXLAN (\  ICE_FDIR_INSET_ETH | \
> > -ICE_INSET_IPV4_SRC | ICE_INSET_IPV4_DST)
> > +ICE_INSET_IPV4_SRC | ICE_INSET_IPV4_DST | \
> > +ICE_INSET_IPV4_PKID)
> >
> >  #define ICE_FDIR_INSET_IPV4_GTPU (\
> >  ICE_INSET_IPV4_SRC | ICE_INSET_IPV4_DST | ICE_INSET_GTPU_TEID)
> @@
> > -95,6 +98,7 @@ static struct ice_pattern_match_item
> > ice_fdir_pattern_list[] = {  {pattern_eth_ipv4_tcp,
> > ICE_FDIR_INSET_ETH_IPV4_TCP,ICE_INSET_NONE,
> > ICE_INSET_NONE},
> >  {pattern_eth_ipv4_sctp,
> > ICE_FDIR_INSET_ETH_IPV4_SCTP,ICE_INSET_NONE,
> > ICE_INSET_NONE},
> >  {pattern_eth_ipv6,
> > ICE_FDIR_INSET_ETH_IPV6,ICE_INSET_NONE,
> > ICE_INSET_NONE},
> > +{pattern_eth_ipv6_frag_ext,
> > ICE_FDIR_INSET_ETH_IPV6,ICE_INSET_NONE,
> > ICE_INSET_NONE},
> >  {pattern_eth_ipv6_udp,
> > ICE_FDIR_INSET_ETH_IPV6_UDP,ICE_INSET_NONE,
> > ICE_INSET_NONE},
> >  {pattern_eth_ipv6_tcp,
> > ICE_FDIR_INSET_ETH_IPV6_TCP,ICE_INSET_NONE,
> > ICE_INSET_NONE},
> >  {pattern_eth_ipv6_sctp,
> > ICE_FDIR_INSET_ETH_IPV6_SCTP,ICE_INSET_NONE,
> > ICE_INSET_NONE},
> > @@ -882,11 +886,13 @@ ice_fdir_input_set_parse(uint64_t inset, enum
> > ice_flow_field *field)  {ICE_INSET_IPV4_TOS,
> > ICE_FLOW_FIELD_IDX_IPV4_DSCP},  {ICE_INSET_IPV4_TTL,
> > ICE_FLOW_FIELD_IDX_IPV4_TTL},  {ICE_INSET_IPV4_PROTO,
> > ICE_FLOW_FIELD_IDX_IPV4_PROT},
> > +{ICE_INSET_IPV4_PKID, ICE_FLOW_FIELD_IDX_IPV4_ID},
> >  {ICE_INSET_IPV6_SRC, ICE_FLOW_FIELD_IDX_IPV6_SA},
> > {ICE_INSET_IPV6_DST, ICE_FLOW_FIELD_IDX_IPV6_DA},
> {ICE_INSET_IPV6_TC,
> > ICE_FLOW_FIELD_IDX_IPV6_DSCP},  {ICE_INSET_IPV6_NEXT_HDR,
> > ICE_FLOW_FIELD_IDX_IPV6_PROT},  {ICE_INSET_IPV6_HOP_LIMIT,
> > ICE_FLOW_FIELD_IDX_IPV6_TTL},
> > +{ICE_INSET_IPV6_PKID, ICE_FLOW_FIELD_IDX_IPV6_ID},
> >  {ICE_INSET_TCP_SRC_PORT,
> > ICE_FLOW_FIELD_IDX_TCP_SRC_PORT},
> >  {ICE_INSET_TCP_DST_PORT,
> > ICE_FLOW_FIELD_IDX_TCP_DST_PORT},
> >  {ICE_INSET_UDP_SRC_PORT,
> > ICE_FLOW_FIELD_IDX_UDP_SRC_PORT}, @@ -935,6 +941,10 @@
> > ice_fdir_input_set_hdrs(enum ice_fltr_ptype flow, struct
> > ice_flow_seg_info
> > *seg)
> >  ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_IPV4 |
> >    ICE_FLOW_SEG_HDR_IPV_OTHER);
> >  break;
> > +case ICE_FLTR_PTYPE_FRAG_IPV4:
> > +ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_IPV4 |
> > +  ICE_FLOW_SEG_HDR_IPV_FRAG);
> > +break;
> >  case ICE_FLTR_PTYPE_NONF_IPV6_UDP:
> >  ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_UDP |
> >    ICE_FLOW_SEG_HDR_IPV6 |
> > @@ -951,6 +961,10 @@ ice_fdir_input_set_hdrs(enum ice_fltr_ptype flow,
> > struct ice_flow_seg_info *seg)
> >    ICE_FLOW_SEG_HDR_IPV_OTHER);
> >  break;
> >  case ICE_FLTR_PTYPE_NONF_IPV6_OTHER:
> > +ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_IPV6 |
> > +  ICE_FLOW_SEG_HDR_IPV_FRAG);
> > +break;
> > +case ICE_FLTR_PTYPE_FRAG_IPV6:
> 
> Are the frag and non-frag cases inverted?
> 

That is right as you said, will correct it in coming version. Thanks.

> >  ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_IPV6 |
> >    ICE_FLOW_SEG_HDR_IPV_OTHER);
> >  break;
> > @@ -1592,8 +1606,10 @@ ice_fdir_parse_pattern(__rte_unused struct
> > ice_adapter *ad,  enum rte_flow_item_type l3 =
> RTE_FLOW_ITEM_TYPE_END;
> > enum ice_fdir_tunnel_type tunnel_type = ICE_FDIR_TUNNEL_TYPE_NONE;
> > const struct rte_flow_item_eth *eth_spec, *eth_mask; -const struct
> > rte_flow_item_ipv4 *ipv4_spec, *ipv4_mask;
> > +const struct rte_flow_item_ipv4 *ipv4_spec, *ipv4_last, *ipv4_mask;
> >  const struct rte_flow_item_ipv6 *ipv6_spec, *ipv6_mask;
> > +const struct rte_flow_item_ipv6_frag_ext *ipv6_frag_spec,
> > +*ipv6_frag_last, *ipv6_frag_mask;
> >  const struct rte_flow_item_tcp *tcp_spec, *tcp_mask;  const struct
> > rte_flow_item_udp *udp_spec, *udp_mask;  const struct
> > rte_flow_item_sctp *sctp_spec, *sctp_mask; @@ -1615,6
> > +1631,7 @@ ice_fdir_parse_pattern(__rte_unused struct ice_adapter *ad,
> >  struct ice_fdir_extra *p_ext_data;
> >  struct ice_fdir_v4 *p_v4 = NULL;
> >  struct ice_fdir_v6 *p_v6 = NULL;
> > +bool spec_all_pid = false;
> >
> >  for (item = pattern; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
> > if (item->type == RTE_FLOW_ITEM_TYPE_VXLAN) @@ -1632,13
> > +1649,14 @@ ice_fdir_parse_pattern(__rte_unused struct ice_adapter
> > +*ad,
> >   * flow. input_set_i is used for inner part.
> >   */
> >  for (item = pattern; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
> > -if (item->last) {
> > +if (item->last && (item_type != RTE_FLOW_ITEM_TYPE_IPV4 ||
> > +   item_type !=
> > +   RTE_FLOW_ITEM_TYPE_IPV6_FRAG_EXT)) {
> >  rte_flow_error_set(error, EINVAL,
> > -   RTE_FLOW_ERROR_TYPE_ITEM,
> > -   item,
> > +   RTE_FLOW_ERROR_TYPE_ITEM, item,
> >     "Not support range");
> > -return -rte_errno;
> >  }
> > +
> >  item_type = item->type;
> >
> >  input_set = (tunnel_type && !is_outer) ?
> > @@ -1689,6 +1707,7 @@ ice_fdir_parse_pattern(__rte_unused struct
> > ice_adapter *ad,  flow_type = ICE_FLTR_PTYPE_NONF_IPV4_OTHER;
> >  l3 = RTE_FLOW_ITEM_TYPE_IPV4;
> >  ipv4_spec = item->spec;
> > +ipv4_last = item->last;
> >  ipv4_mask = item->mask;
> >  p_v4 = (tunnel_type && is_outer) ?
> >         &filter->input.ip_outer.v4 :
> > @@ -1700,8 +1719,6 @@ ice_fdir_parse_pattern(__rte_unused struct
> > ice_adapter *ad,
> >  /* Check IPv4 mask and update input set */  if
> > (ipv4_mask->hdr.version_ihl ||
> >      ipv4_mask->hdr.total_length ||
> > -    ipv4_mask->hdr.packet_id ||
> > -    ipv4_mask->hdr.fragment_offset ||
> >      ipv4_mask->hdr.hdr_checksum) {
> >  rte_flow_error_set(error, EINVAL,
> >
> > RTE_FLOW_ERROR_TYPE_ITEM,
> > @@ -1710,6 +1727,20 @@ ice_fdir_parse_pattern(__rte_unused struct
> > ice_adapter *ad,  return -rte_errno;  }
> >
> > +if (ipv4_last->hdr.version_ihl ||
> > +    ipv4_last->hdr.type_of_service ||
> > +    ipv4_last->hdr.time_to_live ||
> > +    ipv4_last->hdr.total_length |
> > +    ipv4_last->hdr.next_proto_id ||
> > +    ipv4_last->hdr.hdr_checksum ||
> > +    ipv4_last->hdr.src_addr ||
> > +    ipv4_last->hdr.dst_addr) {
> > +rte_flow_error_set(error, EINVAL,
> > +
> > RTE_FLOW_ERROR_TYPE_ITEM,
> > +   item, "Invalid IPv4 last.");
> > +return -rte_errno;
> > +}
> > +
> >  if (ipv4_mask->hdr.dst_addr == UINT32_MAX)  *input_set |=
> > ICE_INSET_IPV4_DST;  if (ipv4_mask->hdr.src_addr == UINT32_MAX) @@ -
> > 1726,6 +1757,24 @@ ice_fdir_parse_pattern(__rte_unused struct
> > ice_adapter *ad,  p_v4->ttl = ipv4_spec->hdr.time_to_live;
> > p_v4->proto = ipv4_spec->hdr.next_proto_id;  p_v4->tos =
> > ipv4_spec->hdr.type_of_service;
> > +
> > +if (ipv4_mask->hdr.packet_id == UINT16_MAX) { if (ipv4_last &&
> > +    ipv4_spec->hdr.packet_id == 0 &&
> > +    ipv4_last->hdr.packet_id == 0xffff) spec_all_pid = true;
> > +
> > +/* All IPv4 fragment packet has the same
> > + * ethertype, if the spec is for all invalid
> > + * packet id, set ethertype into input set.
> > + */
> > +*input_set |= spec_all_pid ?
> > +ICE_INSET_ETHERTYPE :
> > +ICE_INSET_IPV4_PKID;
> > +}
> > +
> > +if (ipv4_mask->hdr.fragment_offset == UINT16_MAX) flow_type =
> > +ICE_FLTR_PTYPE_FRAG_IPV4;
> >  break;
> >  case RTE_FLOW_ITEM_TYPE_IPV6:
> >  flow_type = ICE_FLTR_PTYPE_NONF_IPV6_OTHER; @@
> > -1771,6 +1820,31 @@ ice_fdir_parse_pattern(__rte_unused struct
> > ice_adapter *ad,  p_v6->proto = ipv6_spec->hdr.proto;  p_v6->hlim =
> > ipv6_spec->hdr.hop_limits;  break;
> > +case RTE_FLOW_ITEM_TYPE_IPV6_FRAG_EXT:
> > +l3 = RTE_FLOW_ITEM_TYPE_IPV6_FRAG_EXT; flow_type =
> > +ICE_FLTR_PTYPE_FRAG_IPV6; ipv6_frag_spec = item->spec;
> ipv6_frag_last
> > += item->last; ipv6_frag_mask = item->mask;
> > +
> > +if (!(ipv6_frag_spec && ipv6_frag_mask)) break;
> > +
> > +if (ipv6_frag_last &&
> > +    ipv6_frag_spec->hdr.id == 0 &&
> > +    ipv6_frag_last->hdr.id ==
> > +    0xffffffff)
> > +spec_all_pid = true;
> > +
> > +/* All IPv6 fragment packet has the same ethertype, if
> > + * the spec is for all invalid packet id, set ethertype
> > + * into input set.
> > + */
> > +*input_set |= spec_all_pid ?
> > +ICE_INSET_ETHERTYPE :
> > +ICE_INSET_IPV6_PKID;
> > +break;
> > +
> >  case RTE_FLOW_ITEM_TYPE_TCP:
> >  if (l3 == RTE_FLOW_ITEM_TYPE_IPV4)
> >  flow_type = ICE_FLTR_PTYPE_NONF_IPV4_TCP;
> > --
> > 2.20.1
>
  

Patch

diff --git a/drivers/net/ice/ice_fdir_filter.c b/drivers/net/ice/ice_fdir_filter.c
index 3af5812660..3504d3c6c2 100644
--- a/drivers/net/ice/ice_fdir_filter.c
+++ b/drivers/net/ice/ice_fdir_filter.c
@@ -24,7 +24,7 @@ 
 #define ICE_FDIR_INSET_ETH_IPV4 (\
 	ICE_FDIR_INSET_ETH | \
 	ICE_INSET_IPV4_SRC | ICE_INSET_IPV4_DST | ICE_INSET_IPV4_TOS | \
-	ICE_INSET_IPV4_TTL | ICE_INSET_IPV4_PROTO)
+	ICE_INSET_IPV4_TTL | ICE_INSET_IPV4_PROTO | ICE_INSET_IPV4_PKID)
 
 #define ICE_FDIR_INSET_ETH_IPV4_UDP (\
 	ICE_FDIR_INSET_ETH_IPV4 | \
@@ -41,7 +41,8 @@ 
 #define ICE_FDIR_INSET_ETH_IPV6 (\
 	ICE_INSET_DMAC | \
 	ICE_INSET_IPV6_SRC | ICE_INSET_IPV6_DST | ICE_INSET_IPV6_TC | \
-	ICE_INSET_IPV6_HOP_LIMIT | ICE_INSET_IPV6_NEXT_HDR)
+	ICE_INSET_IPV6_HOP_LIMIT | ICE_INSET_IPV6_NEXT_HDR | \
+	ICE_INSET_IPV6_PKID)
 
 #define ICE_FDIR_INSET_ETH_IPV6_UDP (\
 	ICE_FDIR_INSET_ETH_IPV6 | \
@@ -56,7 +57,8 @@ 
 	ICE_INSET_SCTP_SRC_PORT | ICE_INSET_SCTP_DST_PORT)
 
 #define ICE_FDIR_INSET_IPV4 (\
-	ICE_INSET_IPV4_SRC | ICE_INSET_IPV4_DST)
+	ICE_INSET_IPV4_SRC | ICE_INSET_IPV4_DST | \
+	ICE_INSET_IPV4_PKID)
 
 #define ICE_FDIR_INSET_IPV4_TCP (\
 	ICE_FDIR_INSET_IPV4 | \
@@ -72,7 +74,8 @@ 
 
 #define ICE_FDIR_INSET_ETH_IPV4_VXLAN (\
 	ICE_FDIR_INSET_ETH | \
-	ICE_INSET_IPV4_SRC | ICE_INSET_IPV4_DST)
+	ICE_INSET_IPV4_SRC | ICE_INSET_IPV4_DST | \
+	ICE_INSET_IPV4_PKID)
 
 #define ICE_FDIR_INSET_IPV4_GTPU (\
 	ICE_INSET_IPV4_SRC | ICE_INSET_IPV4_DST | ICE_INSET_GTPU_TEID)
@@ -95,6 +98,7 @@  static struct ice_pattern_match_item ice_fdir_pattern_list[] = {
 	{pattern_eth_ipv4_tcp,				ICE_FDIR_INSET_ETH_IPV4_TCP,	ICE_INSET_NONE,			ICE_INSET_NONE},
 	{pattern_eth_ipv4_sctp,				ICE_FDIR_INSET_ETH_IPV4_SCTP,	ICE_INSET_NONE,			ICE_INSET_NONE},
 	{pattern_eth_ipv6,				ICE_FDIR_INSET_ETH_IPV6,	ICE_INSET_NONE,			ICE_INSET_NONE},
+	{pattern_eth_ipv6_frag_ext,			ICE_FDIR_INSET_ETH_IPV6,	ICE_INSET_NONE,			ICE_INSET_NONE},
 	{pattern_eth_ipv6_udp,				ICE_FDIR_INSET_ETH_IPV6_UDP,	ICE_INSET_NONE,			ICE_INSET_NONE},
 	{pattern_eth_ipv6_tcp,				ICE_FDIR_INSET_ETH_IPV6_TCP,	ICE_INSET_NONE,			ICE_INSET_NONE},
 	{pattern_eth_ipv6_sctp,				ICE_FDIR_INSET_ETH_IPV6_SCTP,	ICE_INSET_NONE,			ICE_INSET_NONE},
@@ -882,11 +886,13 @@  ice_fdir_input_set_parse(uint64_t inset, enum ice_flow_field *field)
 		{ICE_INSET_IPV4_TOS, ICE_FLOW_FIELD_IDX_IPV4_DSCP},
 		{ICE_INSET_IPV4_TTL, ICE_FLOW_FIELD_IDX_IPV4_TTL},
 		{ICE_INSET_IPV4_PROTO, ICE_FLOW_FIELD_IDX_IPV4_PROT},
+		{ICE_INSET_IPV4_PKID, ICE_FLOW_FIELD_IDX_IPV4_ID},
 		{ICE_INSET_IPV6_SRC, ICE_FLOW_FIELD_IDX_IPV6_SA},
 		{ICE_INSET_IPV6_DST, ICE_FLOW_FIELD_IDX_IPV6_DA},
 		{ICE_INSET_IPV6_TC, ICE_FLOW_FIELD_IDX_IPV6_DSCP},
 		{ICE_INSET_IPV6_NEXT_HDR, ICE_FLOW_FIELD_IDX_IPV6_PROT},
 		{ICE_INSET_IPV6_HOP_LIMIT, ICE_FLOW_FIELD_IDX_IPV6_TTL},
+		{ICE_INSET_IPV6_PKID, ICE_FLOW_FIELD_IDX_IPV6_ID},
 		{ICE_INSET_TCP_SRC_PORT, ICE_FLOW_FIELD_IDX_TCP_SRC_PORT},
 		{ICE_INSET_TCP_DST_PORT, ICE_FLOW_FIELD_IDX_TCP_DST_PORT},
 		{ICE_INSET_UDP_SRC_PORT, ICE_FLOW_FIELD_IDX_UDP_SRC_PORT},
@@ -935,6 +941,10 @@  ice_fdir_input_set_hdrs(enum ice_fltr_ptype flow, struct ice_flow_seg_info *seg)
 		ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_IPV4 |
 				  ICE_FLOW_SEG_HDR_IPV_OTHER);
 		break;
+	case ICE_FLTR_PTYPE_FRAG_IPV4:
+		ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_IPV4 |
+				  ICE_FLOW_SEG_HDR_IPV_FRAG);
+		break;
 	case ICE_FLTR_PTYPE_NONF_IPV6_UDP:
 		ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_UDP |
 				  ICE_FLOW_SEG_HDR_IPV6 |
@@ -951,6 +961,10 @@  ice_fdir_input_set_hdrs(enum ice_fltr_ptype flow, struct ice_flow_seg_info *seg)
 				  ICE_FLOW_SEG_HDR_IPV_OTHER);
 		break;
 	case ICE_FLTR_PTYPE_NONF_IPV6_OTHER:
+		ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_IPV6 |
+				  ICE_FLOW_SEG_HDR_IPV_FRAG);
+		break;
+	case ICE_FLTR_PTYPE_FRAG_IPV6:
 		ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_IPV6 |
 				  ICE_FLOW_SEG_HDR_IPV_OTHER);
 		break;
@@ -1592,8 +1606,10 @@  ice_fdir_parse_pattern(__rte_unused struct ice_adapter *ad,
 	enum rte_flow_item_type l3 = RTE_FLOW_ITEM_TYPE_END;
 	enum ice_fdir_tunnel_type tunnel_type = ICE_FDIR_TUNNEL_TYPE_NONE;
 	const struct rte_flow_item_eth *eth_spec, *eth_mask;
-	const struct rte_flow_item_ipv4 *ipv4_spec, *ipv4_mask;
+	const struct rte_flow_item_ipv4 *ipv4_spec, *ipv4_last, *ipv4_mask;
 	const struct rte_flow_item_ipv6 *ipv6_spec, *ipv6_mask;
+	const struct rte_flow_item_ipv6_frag_ext *ipv6_frag_spec,
+					*ipv6_frag_last, *ipv6_frag_mask;
 	const struct rte_flow_item_tcp *tcp_spec, *tcp_mask;
 	const struct rte_flow_item_udp *udp_spec, *udp_mask;
 	const struct rte_flow_item_sctp *sctp_spec, *sctp_mask;
@@ -1615,6 +1631,7 @@  ice_fdir_parse_pattern(__rte_unused struct ice_adapter *ad,
 	struct ice_fdir_extra *p_ext_data;
 	struct ice_fdir_v4 *p_v4 = NULL;
 	struct ice_fdir_v6 *p_v6 = NULL;
+	bool spec_all_pid = false;
 
 	for (item = pattern; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
 		if (item->type == RTE_FLOW_ITEM_TYPE_VXLAN)
@@ -1632,13 +1649,14 @@  ice_fdir_parse_pattern(__rte_unused struct ice_adapter *ad,
 	 * flow. input_set_i is used for inner part.
 	 */
 	for (item = pattern; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
-		if (item->last) {
+		if (item->last && (item_type != RTE_FLOW_ITEM_TYPE_IPV4 ||
+				   item_type !=
+				   RTE_FLOW_ITEM_TYPE_IPV6_FRAG_EXT)) {
 			rte_flow_error_set(error, EINVAL,
-					   RTE_FLOW_ERROR_TYPE_ITEM,
-					   item,
+					   RTE_FLOW_ERROR_TYPE_ITEM, item,
 					   "Not support range");
-			return -rte_errno;
 		}
+
 		item_type = item->type;
 
 		input_set = (tunnel_type && !is_outer) ?
@@ -1689,6 +1707,7 @@  ice_fdir_parse_pattern(__rte_unused struct ice_adapter *ad,
 			flow_type = ICE_FLTR_PTYPE_NONF_IPV4_OTHER;
 			l3 = RTE_FLOW_ITEM_TYPE_IPV4;
 			ipv4_spec = item->spec;
+			ipv4_last = item->last;
 			ipv4_mask = item->mask;
 			p_v4 = (tunnel_type && is_outer) ?
 			       &filter->input.ip_outer.v4 :
@@ -1700,8 +1719,6 @@  ice_fdir_parse_pattern(__rte_unused struct ice_adapter *ad,
 			/* Check IPv4 mask and update input set */
 			if (ipv4_mask->hdr.version_ihl ||
 			    ipv4_mask->hdr.total_length ||
-			    ipv4_mask->hdr.packet_id ||
-			    ipv4_mask->hdr.fragment_offset ||
 			    ipv4_mask->hdr.hdr_checksum) {
 				rte_flow_error_set(error, EINVAL,
 						   RTE_FLOW_ERROR_TYPE_ITEM,
@@ -1710,6 +1727,20 @@  ice_fdir_parse_pattern(__rte_unused struct ice_adapter *ad,
 				return -rte_errno;
 			}
 
+			if (ipv4_last->hdr.version_ihl ||
+			    ipv4_last->hdr.type_of_service ||
+			    ipv4_last->hdr.time_to_live ||
+			    ipv4_last->hdr.total_length |
+			    ipv4_last->hdr.next_proto_id ||
+			    ipv4_last->hdr.hdr_checksum ||
+			    ipv4_last->hdr.src_addr ||
+			    ipv4_last->hdr.dst_addr) {
+				rte_flow_error_set(error, EINVAL,
+						   RTE_FLOW_ERROR_TYPE_ITEM,
+						   item, "Invalid IPv4 last.");
+				return -rte_errno;
+			}
+
 			if (ipv4_mask->hdr.dst_addr == UINT32_MAX)
 				*input_set |= ICE_INSET_IPV4_DST;
 			if (ipv4_mask->hdr.src_addr == UINT32_MAX)
@@ -1726,6 +1757,24 @@  ice_fdir_parse_pattern(__rte_unused struct ice_adapter *ad,
 			p_v4->ttl = ipv4_spec->hdr.time_to_live;
 			p_v4->proto = ipv4_spec->hdr.next_proto_id;
 			p_v4->tos = ipv4_spec->hdr.type_of_service;
+
+			if (ipv4_mask->hdr.packet_id == UINT16_MAX) {
+				if (ipv4_last &&
+				    ipv4_spec->hdr.packet_id == 0 &&
+				    ipv4_last->hdr.packet_id == 0xffff)
+					spec_all_pid = true;
+
+				/* All IPv4 fragment packet has the same
+				 * ethertype, if the spec is for all invalid
+				 * packet id, set ethertype into input set.
+				 */
+				*input_set |= spec_all_pid ?
+						ICE_INSET_ETHERTYPE :
+						ICE_INSET_IPV4_PKID;
+			}
+
+			if (ipv4_mask->hdr.fragment_offset == UINT16_MAX)
+				flow_type = ICE_FLTR_PTYPE_FRAG_IPV4;
 			break;
 		case RTE_FLOW_ITEM_TYPE_IPV6:
 			flow_type = ICE_FLTR_PTYPE_NONF_IPV6_OTHER;
@@ -1771,6 +1820,31 @@  ice_fdir_parse_pattern(__rte_unused struct ice_adapter *ad,
 			p_v6->proto = ipv6_spec->hdr.proto;
 			p_v6->hlim = ipv6_spec->hdr.hop_limits;
 			break;
+		case RTE_FLOW_ITEM_TYPE_IPV6_FRAG_EXT:
+			l3 = RTE_FLOW_ITEM_TYPE_IPV6_FRAG_EXT;
+			flow_type = ICE_FLTR_PTYPE_FRAG_IPV6;
+			ipv6_frag_spec = item->spec;
+			ipv6_frag_last = item->last;
+			ipv6_frag_mask = item->mask;
+
+			if (!(ipv6_frag_spec && ipv6_frag_mask))
+				break;
+
+			if (ipv6_frag_last &&
+			    ipv6_frag_spec->hdr.id == 0 &&
+			    ipv6_frag_last->hdr.id ==
+			    0xffffffff)
+				spec_all_pid = true;
+
+			/* All IPv6 fragment packet has the same ethertype, if
+			 * the spec is for all invalid packet id, set ethertype
+			 * into input set.
+			 */
+			*input_set |= spec_all_pid ?
+					ICE_INSET_ETHERTYPE :
+					ICE_INSET_IPV6_PKID;
+			break;
+
 		case RTE_FLOW_ITEM_TYPE_TCP:
 			if (l3 == RTE_FLOW_ITEM_TYPE_IPV4)
 				flow_type = ICE_FLTR_PTYPE_NONF_IPV4_TCP;