[dpdk-dev] [PATCH v2 4/6] net/i40e: add FDIR support for GTP-C and GTP-U
Beilei Xing
beilei.xing at intel.com
Thu Sep 7 13:21:01 CEST 2017
This patch adds FDIR support for GTP-C and GTP-U.
Signed-off-by: Beilei Xing <beilei.xing at intel.com>
---
drivers/net/i40e/i40e_ethdev.c | 1 +
drivers/net/i40e/i40e_ethdev.h | 11 +++
drivers/net/i40e/i40e_fdir.c | 170 +++++++++++++++++++++++++++--------------
drivers/net/i40e/i40e_flow.c | 165 ++++++++++++++++++++++++++++++---------
4 files changed, 255 insertions(+), 92 deletions(-)
diff --git a/drivers/net/i40e/i40e_ethdev.c b/drivers/net/i40e/i40e_ethdev.c
index 5483622..18b3d8c 100644
--- a/drivers/net/i40e/i40e_ethdev.c
+++ b/drivers/net/i40e/i40e_ethdev.c
@@ -1051,6 +1051,7 @@ i40e_init_customer_pctype(struct i40e_pf *pf)
rte_memcpy(pf->new_pctype[i].name, "GTPU",
sizeof("GTPU"));
}
+ pf->new_pctype_used = false;
}
static int
diff --git a/drivers/net/i40e/i40e_ethdev.h b/drivers/net/i40e/i40e_ethdev.h
index ca8d201..9fff85f 100644
--- a/drivers/net/i40e/i40e_ethdev.h
+++ b/drivers/net/i40e/i40e_ethdev.h
@@ -233,6 +233,7 @@ enum i40e_flxpld_layer_idx {
#define I40E_INSET_TUNNEL_SRC_PORT 0x0000000800000000ULL
#define I40E_INSET_TUNNEL_DST_PORT 0x0000001000000000ULL
#define I40E_INSET_TUNNEL_ID 0x0000002000000000ULL
+#define I40E_INSET_GTP_TEID 0x0000004000000000ULL
/* bit 48 ~ bit 55 */
#define I40E_INSET_LAST_ETHER_TYPE 0x0001000000000000ULL
@@ -461,6 +462,14 @@ struct i40e_vmdq_info {
#define I40E_FDIR_IPv6_TC_OFFSET 20
/**
+ * A structure used to define the input for IPV4 GTP flow
+ */
+struct i40e_gtpv4_flow {
+ struct rte_eth_udpv4_flow udp; /**< IPv4 UDP fields to match. */
+ uint32_t teid; /**< TEID in big endian. */
+};
+
+/**
* A union contains the inputs for all types of flow
* Items in flows need to be in big endian
*/
@@ -474,6 +483,7 @@ union i40e_fdir_flow {
struct rte_eth_tcpv6_flow tcp6_flow;
struct rte_eth_sctpv6_flow sctp6_flow;
struct rte_eth_ipv6_flow ipv6_flow;
+ struct i40e_gtpv4_flow gtpv4_flow;
};
/**
@@ -888,6 +898,7 @@ struct i40e_pf {
struct i40e_tm_conf tm_conf;
/* customer personalized pctype */
struct i40e_personalized_pctype new_pctype[I40E_PERSONALIZED_MAX];
+ bool new_pctype_used; /* Check if new PCTYPE is used for FDIR */
};
enum pending_msg {
diff --git a/drivers/net/i40e/i40e_fdir.c b/drivers/net/i40e/i40e_fdir.c
index b0ba819..25a9c7d 100644
--- a/drivers/net/i40e/i40e_fdir.c
+++ b/drivers/net/i40e/i40e_fdir.c
@@ -71,6 +71,7 @@
#define I40E_FDIR_IPv6_DEFAULT_HOP_LIMITS 0xFF
#define I40E_FDIR_IPv6_PAYLOAD_LEN 380
#define I40E_FDIR_UDP_DEFAULT_LEN 400
+#define I40E_FDIR_GTP_DEFAULT_LEN 384
/* Wait time for fdir filter programming */
#define I40E_FDIR_MAX_WAIT_US 10000
@@ -939,16 +940,33 @@ i40e_fdir_construct_pkt(struct i40e_pf *pf,
return 0;
}
+static struct i40e_personalized_pctype *
+i40e_flow_fdir_check_new_pctype(struct i40e_pf *pf, uint8_t pctype)
+{
+ struct i40e_personalized_pctype *cus_pctype;
+ enum i40e_new_proto i = I40E_PERSONALIZED_GTPC;
+
+ for (; i < I40E_PERSONALIZED_MAX; i++) {
+ cus_pctype = &pf->new_pctype[i];
+ if (pctype == cus_pctype->pctype)
+ return cus_pctype;
+ }
+ return NULL;
+}
+
static inline int
-i40e_flow_fdir_fill_eth_ip_head(const struct i40e_fdir_input *fdir_input,
+i40e_flow_fdir_fill_eth_ip_head(struct i40e_pf *pf,
+ const struct i40e_fdir_input *fdir_input,
unsigned char *raw_pkt,
bool vlan)
{
+ struct i40e_personalized_pctype *cus_pctype;
static uint8_t vlan_frame[] = {0x81, 0, 0, 0};
uint16_t *ether_type;
uint8_t len = 2 * sizeof(struct ether_addr);
struct ipv4_hdr *ip;
struct ipv6_hdr *ip6;
+ uint8_t pctype = fdir_input->pctype;
static const uint8_t next_proto[] = {
[I40E_FILTER_PCTYPE_FRAG_IPV4] = IPPROTO_IP,
[I40E_FILTER_PCTYPE_NONF_IPV4_TCP] = IPPROTO_TCP,
@@ -975,15 +993,13 @@ i40e_flow_fdir_fill_eth_ip_head(const struct i40e_fdir_input *fdir_input,
raw_pkt += sizeof(uint16_t);
len += sizeof(uint16_t);
- switch (fdir_input->pctype) {
- case I40E_FILTER_PCTYPE_L2_PAYLOAD:
+ if (pctype == I40E_FILTER_PCTYPE_L2_PAYLOAD)
*ether_type = fdir_input->flow.l2_flow.ether_type;
- break;
- case I40E_FILTER_PCTYPE_NONF_IPV4_TCP:
- case I40E_FILTER_PCTYPE_NONF_IPV4_UDP:
- case I40E_FILTER_PCTYPE_NONF_IPV4_SCTP:
- case I40E_FILTER_PCTYPE_NONF_IPV4_OTHER:
- case I40E_FILTER_PCTYPE_FRAG_IPV4:
+ else if (pctype == I40E_FILTER_PCTYPE_NONF_IPV4_TCP ||
+ pctype == I40E_FILTER_PCTYPE_NONF_IPV4_UDP ||
+ pctype == I40E_FILTER_PCTYPE_NONF_IPV4_SCTP ||
+ pctype == I40E_FILTER_PCTYPE_NONF_IPV4_OTHER ||
+ pctype == I40E_FILTER_PCTYPE_FRAG_IPV4) {
ip = (struct ipv4_hdr *)raw_pkt;
*ether_type = rte_cpu_to_be_16(ETHER_TYPE_IPv4);
@@ -991,11 +1007,11 @@ i40e_flow_fdir_fill_eth_ip_head(const struct i40e_fdir_input *fdir_input,
/* set len to by default */
ip->total_length = rte_cpu_to_be_16(I40E_FDIR_IP_DEFAULT_LEN);
ip->next_proto_id = fdir_input->flow.ip4_flow.proto ?
- fdir_input->flow.ip4_flow.proto :
- next_proto[fdir_input->pctype];
+ fdir_input->flow.ip4_flow.proto :
+ next_proto[fdir_input->pctype];
ip->time_to_live = fdir_input->flow.ip4_flow.ttl ?
- fdir_input->flow.ip4_flow.ttl :
- I40E_FDIR_IP_DEFAULT_TTL;
+ fdir_input->flow.ip4_flow.ttl :
+ I40E_FDIR_IP_DEFAULT_TTL;
ip->type_of_service = fdir_input->flow.ip4_flow.tos;
/**
* The source and destination fields in the transmitted packet
@@ -1005,12 +1021,11 @@ i40e_flow_fdir_fill_eth_ip_head(const struct i40e_fdir_input *fdir_input,
ip->src_addr = fdir_input->flow.ip4_flow.dst_ip;
ip->dst_addr = fdir_input->flow.ip4_flow.src_ip;
len += sizeof(struct ipv4_hdr);
- break;
- case I40E_FILTER_PCTYPE_NONF_IPV6_TCP:
- case I40E_FILTER_PCTYPE_NONF_IPV6_UDP:
- case I40E_FILTER_PCTYPE_NONF_IPV6_SCTP:
- case I40E_FILTER_PCTYPE_NONF_IPV6_OTHER:
- case I40E_FILTER_PCTYPE_FRAG_IPV6:
+ } else if (pctype == I40E_FILTER_PCTYPE_NONF_IPV6_TCP ||
+ pctype == I40E_FILTER_PCTYPE_NONF_IPV6_UDP ||
+ pctype == I40E_FILTER_PCTYPE_NONF_IPV6_SCTP ||
+ pctype == I40E_FILTER_PCTYPE_NONF_IPV6_OTHER ||
+ pctype == I40E_FILTER_PCTYPE_FRAG_IPV6) {
ip6 = (struct ipv6_hdr *)raw_pkt;
*ether_type = rte_cpu_to_be_16(ETHER_TYPE_IPv6);
@@ -1021,11 +1036,11 @@ i40e_flow_fdir_fill_eth_ip_head(const struct i40e_fdir_input *fdir_input,
ip6->payload_len =
rte_cpu_to_be_16(I40E_FDIR_IPv6_PAYLOAD_LEN);
ip6->proto = fdir_input->flow.ipv6_flow.proto ?
- fdir_input->flow.ipv6_flow.proto :
- next_proto[fdir_input->pctype];
+ fdir_input->flow.ipv6_flow.proto :
+ next_proto[fdir_input->pctype];
ip6->hop_limits = fdir_input->flow.ipv6_flow.hop_limits ?
- fdir_input->flow.ipv6_flow.hop_limits :
- I40E_FDIR_IPv6_DEFAULT_HOP_LIMITS;
+ fdir_input->flow.ipv6_flow.hop_limits :
+ I40E_FDIR_IPv6_DEFAULT_HOP_LIMITS;
/**
* The source and destination fields in the transmitted packet
* need to be presented in a reversed order with respect
@@ -1038,12 +1053,39 @@ i40e_flow_fdir_fill_eth_ip_head(const struct i40e_fdir_input *fdir_input,
&fdir_input->flow.ipv6_flow.src_ip,
IPV6_ADDR_LEN);
len += sizeof(struct ipv6_hdr);
- break;
- default:
+ } else if (pf->new_pctype_used) {
+ cus_pctype = i40e_flow_fdir_check_new_pctype(pf, pctype);
+ ip = (struct ipv4_hdr *)raw_pkt;
+
+ *ether_type = rte_cpu_to_be_16(ETHER_TYPE_IPv4);
+ ip->version_ihl = I40E_FDIR_IP_DEFAULT_VERSION_IHL;
+ /* set len to by default */
+ ip->total_length = rte_cpu_to_be_16(I40E_FDIR_IP_DEFAULT_LEN);
+ ip->next_proto_id = fdir_input->flow.ip4_flow.proto ?
+ fdir_input->flow.ip4_flow.proto :
+ next_proto[fdir_input->pctype];
+ ip->time_to_live = fdir_input->flow.ip4_flow.ttl ?
+ fdir_input->flow.ip4_flow.ttl :
+ I40E_FDIR_IP_DEFAULT_TTL;
+ ip->type_of_service = fdir_input->flow.ip4_flow.tos;
+
+ if (!memcmp(cus_pctype->name, "GTPC", sizeof("GTPC")) ||
+ !memcmp(cus_pctype->name, "GTPU", sizeof("GTPU")))
+ ip->next_proto_id = IPPROTO_UDP;
+ /**
+ * The source and destination fields in the transmitted packet
+ * need to be presented in a reversed order with respect
+ * to the expected received packets.
+ */
+ ip->src_addr = fdir_input->flow.ip4_flow.dst_ip;
+ ip->dst_addr = fdir_input->flow.ip4_flow.src_ip;
+ len += sizeof(struct ipv4_hdr);
+ } else {
PMD_DRV_LOG(ERR, "unknown pctype %u.",
fdir_input->pctype);
return -1;
}
+
return len;
}
@@ -1058,23 +1100,26 @@ i40e_flow_fdir_construct_pkt(struct i40e_pf *pf,
const struct i40e_fdir_input *fdir_input,
unsigned char *raw_pkt)
{
- unsigned char *payload, *ptr;
+ unsigned char *payload = NULL;
+ unsigned char *ptr;
struct udp_hdr *udp;
struct tcp_hdr *tcp;
struct sctp_hdr *sctp;
+ struct rte_flow_item_gtp *gtp;
uint8_t size, dst = 0;
uint8_t i, pit_idx, set_idx = I40E_FLXPLD_L4_IDX; /* use l4 by default*/
int len;
+ uint8_t pctype = fdir_input->pctype;
+ struct i40e_personalized_pctype *cus_pctype;
/* fill the ethernet and IP head */
- len = i40e_flow_fdir_fill_eth_ip_head(fdir_input, raw_pkt,
+ len = i40e_flow_fdir_fill_eth_ip_head(pf, fdir_input, raw_pkt,
!!fdir_input->flow_ext.vlan_tci);
if (len < 0)
return -EINVAL;
/* fill the L4 head */
- switch (fdir_input->pctype) {
- case I40E_FILTER_PCTYPE_NONF_IPV4_UDP:
+ if (pctype == I40E_FILTER_PCTYPE_NONF_IPV4_UDP) {
udp = (struct udp_hdr *)(raw_pkt + len);
payload = (unsigned char *)udp + sizeof(struct udp_hdr);
/**
@@ -1085,9 +1130,7 @@ i40e_flow_fdir_construct_pkt(struct i40e_pf *pf,
udp->src_port = fdir_input->flow.udp4_flow.dst_port;
udp->dst_port = fdir_input->flow.udp4_flow.src_port;
udp->dgram_len = rte_cpu_to_be_16(I40E_FDIR_UDP_DEFAULT_LEN);
- break;
-
- case I40E_FILTER_PCTYPE_NONF_IPV4_TCP:
+ } else if (pctype == I40E_FILTER_PCTYPE_NONF_IPV4_TCP) {
tcp = (struct tcp_hdr *)(raw_pkt + len);
payload = (unsigned char *)tcp + sizeof(struct tcp_hdr);
/**
@@ -1098,9 +1141,7 @@ i40e_flow_fdir_construct_pkt(struct i40e_pf *pf,
tcp->src_port = fdir_input->flow.tcp4_flow.dst_port;
tcp->dst_port = fdir_input->flow.tcp4_flow.src_port;
tcp->data_off = I40E_FDIR_TCP_DEFAULT_DATAOFF;
- break;
-
- case I40E_FILTER_PCTYPE_NONF_IPV4_SCTP:
+ } else if (pctype == I40E_FILTER_PCTYPE_NONF_IPV4_SCTP) {
sctp = (struct sctp_hdr *)(raw_pkt + len);
payload = (unsigned char *)sctp + sizeof(struct sctp_hdr);
/**
@@ -1111,15 +1152,11 @@ i40e_flow_fdir_construct_pkt(struct i40e_pf *pf,
sctp->src_port = fdir_input->flow.sctp4_flow.dst_port;
sctp->dst_port = fdir_input->flow.sctp4_flow.src_port;
sctp->tag = fdir_input->flow.sctp4_flow.verify_tag;
- break;
-
- case I40E_FILTER_PCTYPE_NONF_IPV4_OTHER:
- case I40E_FILTER_PCTYPE_FRAG_IPV4:
+ } else if (pctype == I40E_FILTER_PCTYPE_NONF_IPV4_OTHER ||
+ pctype == I40E_FILTER_PCTYPE_FRAG_IPV4) {
payload = raw_pkt + len;
set_idx = I40E_FLXPLD_L3_IDX;
- break;
-
- case I40E_FILTER_PCTYPE_NONF_IPV6_UDP:
+ } else if (pctype == I40E_FILTER_PCTYPE_NONF_IPV6_UDP) {
udp = (struct udp_hdr *)(raw_pkt + len);
payload = (unsigned char *)udp + sizeof(struct udp_hdr);
/**
@@ -1130,9 +1167,7 @@ i40e_flow_fdir_construct_pkt(struct i40e_pf *pf,
udp->src_port = fdir_input->flow.udp6_flow.dst_port;
udp->dst_port = fdir_input->flow.udp6_flow.src_port;
udp->dgram_len = rte_cpu_to_be_16(I40E_FDIR_IPv6_PAYLOAD_LEN);
- break;
-
- case I40E_FILTER_PCTYPE_NONF_IPV6_TCP:
+ } else if (pctype == I40E_FILTER_PCTYPE_NONF_IPV6_TCP) {
tcp = (struct tcp_hdr *)(raw_pkt + len);
payload = (unsigned char *)tcp + sizeof(struct tcp_hdr);
/**
@@ -1143,9 +1178,7 @@ i40e_flow_fdir_construct_pkt(struct i40e_pf *pf,
tcp->data_off = I40E_FDIR_TCP_DEFAULT_DATAOFF;
tcp->src_port = fdir_input->flow.udp6_flow.dst_port;
tcp->dst_port = fdir_input->flow.udp6_flow.src_port;
- break;
-
- case I40E_FILTER_PCTYPE_NONF_IPV6_SCTP:
+ } else if (pctype == I40E_FILTER_PCTYPE_NONF_IPV6_SCTP) {
sctp = (struct sctp_hdr *)(raw_pkt + len);
payload = (unsigned char *)sctp + sizeof(struct sctp_hdr);
/**
@@ -1156,14 +1189,11 @@ i40e_flow_fdir_construct_pkt(struct i40e_pf *pf,
sctp->src_port = fdir_input->flow.sctp6_flow.dst_port;
sctp->dst_port = fdir_input->flow.sctp6_flow.src_port;
sctp->tag = fdir_input->flow.sctp6_flow.verify_tag;
- break;
-
- case I40E_FILTER_PCTYPE_NONF_IPV6_OTHER:
- case I40E_FILTER_PCTYPE_FRAG_IPV6:
+ } else if (pctype == I40E_FILTER_PCTYPE_NONF_IPV6_OTHER ||
+ pctype == I40E_FILTER_PCTYPE_FRAG_IPV6) {
payload = raw_pkt + len;
set_idx = I40E_FLXPLD_L3_IDX;
- break;
- case I40E_FILTER_PCTYPE_L2_PAYLOAD:
+ } else if (pctype == I40E_FILTER_PCTYPE_L2_PAYLOAD) {
payload = raw_pkt + len;
/**
* ARP packet is a special case on which the payload
@@ -1173,10 +1203,34 @@ i40e_flow_fdir_construct_pkt(struct i40e_pf *pf,
rte_cpu_to_be_16(ETHER_TYPE_ARP))
payload += sizeof(struct arp_hdr);
set_idx = I40E_FLXPLD_L2_IDX;
- break;
- default:
- PMD_DRV_LOG(ERR, "unknown pctype %u.", fdir_input->pctype);
- return -EINVAL;
+ } else if (pf->new_pctype_used) {
+ cus_pctype = i40e_flow_fdir_check_new_pctype(pf, pctype);
+ if (!memcmp(cus_pctype->name, "GTPC", sizeof("GTPC")) ||
+ !memcmp(cus_pctype->name, "GTPU", sizeof("GTPU"))) {
+ udp = (struct udp_hdr *)(raw_pkt + len);
+ udp->dgram_len =
+ rte_cpu_to_be_16(I40E_FDIR_UDP_DEFAULT_LEN);
+
+ gtp = (struct rte_flow_item_gtp *)
+ ((unsigned char *)udp + sizeof(struct udp_hdr));
+ gtp->v_pt_rsv_flags = 0x30;
+ gtp->msg_len =
+ rte_cpu_to_be_16(I40E_FDIR_GTP_DEFAULT_LEN);
+ gtp->teid = fdir_input->flow.gtpv4_flow.teid;
+ gtp->msg_type = 0x1;
+
+ if (!memcmp(cus_pctype->name, "GTPC", sizeof("GTPC")))
+ udp->dst_port = rte_cpu_to_be_16(2123);
+ else
+ udp->dst_port = rte_cpu_to_be_16(2152);
+
+ payload = (unsigned char *)gtp +
+ sizeof(struct rte_flow_item_gtp);
+ }
+ } else {
+ PMD_DRV_LOG(ERR, "unknown pctype %u.",
+ fdir_input->pctype);
+ return -1;
}
/* fill the flexbytes to payload */
diff --git a/drivers/net/i40e/i40e_flow.c b/drivers/net/i40e/i40e_flow.c
index 73af7fd..6716855 100644
--- a/drivers/net/i40e/i40e_flow.c
+++ b/drivers/net/i40e/i40e_flow.c
@@ -189,6 +189,22 @@ static enum rte_flow_item_type pattern_fdir_ipv4_sctp[] = {
RTE_FLOW_ITEM_TYPE_END,
};
+static enum rte_flow_item_type pattern_fdir_ipv4_gtpc[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_IPV4,
+ RTE_FLOW_ITEM_TYPE_UDP,
+ RTE_FLOW_ITEM_TYPE_GTPC,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_ipv4_gtpu[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_IPV4,
+ RTE_FLOW_ITEM_TYPE_UDP,
+ RTE_FLOW_ITEM_TYPE_GTPU,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
static enum rte_flow_item_type pattern_fdir_ipv6[] = {
RTE_FLOW_ITEM_TYPE_ETH,
RTE_FLOW_ITEM_TYPE_IPV6,
@@ -216,6 +232,22 @@ static enum rte_flow_item_type pattern_fdir_ipv6_sctp[] = {
RTE_FLOW_ITEM_TYPE_END,
};
+static enum rte_flow_item_type pattern_fdir_ipv6_gtpc[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_IPV6,
+ RTE_FLOW_ITEM_TYPE_UDP,
+ RTE_FLOW_ITEM_TYPE_GTPC,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_ipv6_gtpu[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_IPV6,
+ RTE_FLOW_ITEM_TYPE_UDP,
+ RTE_FLOW_ITEM_TYPE_GTPU,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
static enum rte_flow_item_type pattern_fdir_ethertype_raw_1[] = {
RTE_FLOW_ITEM_TYPE_ETH,
RTE_FLOW_ITEM_TYPE_RAW,
@@ -1576,10 +1608,14 @@ static struct i40e_valid_pattern i40e_supported_patterns[] = {
{ pattern_fdir_ipv4_udp, i40e_flow_parse_fdir_filter },
{ pattern_fdir_ipv4_tcp, i40e_flow_parse_fdir_filter },
{ pattern_fdir_ipv4_sctp, i40e_flow_parse_fdir_filter },
+ { pattern_fdir_ipv4_gtpc, i40e_flow_parse_fdir_filter },
+ { pattern_fdir_ipv4_gtpu, i40e_flow_parse_fdir_filter },
{ pattern_fdir_ipv6, i40e_flow_parse_fdir_filter },
{ pattern_fdir_ipv6_udp, i40e_flow_parse_fdir_filter },
{ pattern_fdir_ipv6_tcp, i40e_flow_parse_fdir_filter },
{ pattern_fdir_ipv6_sctp, i40e_flow_parse_fdir_filter },
+ { pattern_fdir_ipv6_gtpc, i40e_flow_parse_fdir_filter },
+ { pattern_fdir_ipv6_gtpu, i40e_flow_parse_fdir_filter },
/* FDIR - support default flow type with flexible payload */
{ pattern_fdir_ethertype_raw_1, i40e_flow_parse_fdir_filter },
{ pattern_fdir_ethertype_raw_2, i40e_flow_parse_fdir_filter },
@@ -2302,6 +2338,32 @@ i40e_flow_set_fdir_inset(struct i40e_pf *pf,
return 0;
}
+static int
+i40e_flow_find_new_pctype(struct i40e_pf *pf,
+ enum rte_flow_item_type item_type)
+{
+ struct i40e_personalized_pctype *cus_pctype;
+
+ switch (item_type) {
+ case RTE_FLOW_ITEM_TYPE_GTPC:
+ cus_pctype = i40e_find_personalized_pctype(pf,
+ I40E_PERSONALIZED_GTPC);
+ break;
+ case RTE_FLOW_ITEM_TYPE_GTPU:
+ cus_pctype = i40e_find_personalized_pctype(pf,
+ I40E_PERSONALIZED_GTPU);
+ break;
+ default:
+ PMD_DRV_LOG(ERR, "Unsupported item type");
+ break;
+ }
+
+ if (cus_pctype)
+ return cus_pctype->pctype;
+
+ return I40E_INVALID_PCTYPE;
+}
+
/* 1. Last in item should be NULL as range is not supported.
* 2. Supported patterns: refer to array i40e_supported_patterns.
* 3. Supported flow type and input set: refer to array
@@ -2326,10 +2388,11 @@ i40e_flow_parse_fdir_pattern(struct rte_eth_dev *dev,
const struct rte_flow_item_tcp *tcp_spec, *tcp_mask;
const struct rte_flow_item_udp *udp_spec, *udp_mask;
const struct rte_flow_item_sctp *sctp_spec, *sctp_mask;
+ const struct rte_flow_item_gtp *gtp_spec, *gtp_mask;
const struct rte_flow_item_raw *raw_spec, *raw_mask;
const struct rte_flow_item_vf *vf_spec;
- enum i40e_filter_pctype pctype = 0;
+ int pctype = 0;
uint64_t input_set = I40E_INSET_NONE;
uint16_t frag_off;
enum rte_flow_item_type item_type;
@@ -2636,6 +2699,38 @@ i40e_flow_parse_fdir_pattern(struct rte_eth_dev *dev,
layer_idx = I40E_FLXPLD_L4_IDX;
break;
+ case RTE_FLOW_ITEM_TYPE_GTPC:
+ case RTE_FLOW_ITEM_TYPE_GTPU:
+ gtp_spec = (const struct rte_flow_item_gtp *)item->spec;
+ gtp_mask = (const struct rte_flow_item_gtp *)item->mask;
+
+ pctype = i40e_flow_find_new_pctype(pf, item_type);
+ if (pctype == I40E_INVALID_PCTYPE) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Unsupported protocol");
+ return -rte_errno;
+ }
+
+ if (gtp_spec && gtp_mask) {
+ if (gtp_mask->v_pt_rsv_flags ||
+ gtp_mask->msg_type ||
+ gtp_mask->msg_len ||
+ gtp_mask->teid != UINT32_MAX) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Invalid GTP mask");
+ return -rte_errno;
+ }
+
+ pf->new_pctype_used = true;
+ input_set |= I40E_INSET_GTP_TEID;
+ filter->input.flow.gtpv4_flow.teid =
+ gtp_spec->teid;
+ }
+ break;
case RTE_FLOW_ITEM_TYPE_SCTP:
sctp_spec =
(const struct rte_flow_item_sctp *)item->spec;
@@ -2774,43 +2869,45 @@ i40e_flow_parse_fdir_pattern(struct rte_eth_dev *dev,
}
}
- ret = i40e_flow_set_fdir_inset(pf, pctype, input_set);
- if (ret == -1) {
- rte_flow_error_set(error, EINVAL,
- RTE_FLOW_ERROR_TYPE_ITEM, item,
- "Conflict with the first rule's input set.");
- return -rte_errno;
- } else if (ret == -EINVAL) {
- rte_flow_error_set(error, EINVAL,
- RTE_FLOW_ERROR_TYPE_ITEM, item,
- "Invalid pattern mask.");
- return -rte_errno;
- }
+ if (!pf->new_pctype_used) {
+ ret = i40e_flow_set_fdir_inset(pf, pctype, input_set);
+ if (ret == -1) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM, item,
+ "Conflict with the first rule's input set.");
+ return -rte_errno;
+ } else if (ret == -EINVAL) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM, item,
+ "Invalid pattern mask.");
+ return -rte_errno;
+ }
- filter->input.pctype = pctype;
+ /* Store flex mask to SW */
+ ret = i40e_flow_store_flex_mask(pf, pctype, flex_mask);
+ if (ret == -1) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Exceed maximal number of bitmasks");
+ return -rte_errno;
+ } else if (ret == -2) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Conflict with the first flexible rule");
+ return -rte_errno;
+ } else if (ret > 0)
+ cfg_flex_msk = false;
- /* Store flex mask to SW */
- ret = i40e_flow_store_flex_mask(pf, pctype, flex_mask);
- if (ret == -1) {
- rte_flow_error_set(error, EINVAL,
- RTE_FLOW_ERROR_TYPE_ITEM,
- item,
- "Exceed maximal number of bitmasks");
- return -rte_errno;
- } else if (ret == -2) {
- rte_flow_error_set(error, EINVAL,
- RTE_FLOW_ERROR_TYPE_ITEM,
- item,
- "Conflict with the first flexible rule");
- return -rte_errno;
- } else if (ret > 0)
- cfg_flex_msk = false;
+ if (cfg_flex_pit)
+ i40e_flow_set_fdir_flex_pit(pf, layer_idx, raw_id);
- if (cfg_flex_pit)
- i40e_flow_set_fdir_flex_pit(pf, layer_idx, raw_id);
+ if (cfg_flex_msk)
+ i40e_flow_set_fdir_flex_msk(pf, pctype);
+ }
- if (cfg_flex_msk)
- i40e_flow_set_fdir_flex_msk(pf, pctype);
+ filter->input.pctype = pctype;
return 0;
}
--
2.5.5
More information about the dev
mailing list