[dpdk-dev] [PATCH 2/3] net/ice: enable protocol agnostic flow offloading in FDIR
Junfeng Guo
junfeng.guo at intel.com
Fri Sep 24 18:22:22 CEST 2021
Protocol agnostic flow offloading in Flow Director is enabled by this
patch based on the Parser Library, using existing rte_flow raw API,
without public API change.
Note that the raw flow requires:
1. byte string of raw target packet bits.
2. byte string of mask of target packet.
Here is an example:
FDIR matching ipv4 dst addr with 1.2.3.4 and redirect to queue 3:
flow create 0 ingress pattern raw \
pattern spec \
00000000000000000000000008004500001400004000401000000000000001020304 \
pattern mask \
000000000000000000000000000000000000000000000000000000000000ffffffff \
/ end actions queue index 3 / mark id 3 / end
Signed-off-by: Junfeng Guo <junfeng.guo at intel.com>
---
drivers/net/ice/ice_ethdev.h | 5 +
drivers/net/ice/ice_fdir_filter.c | 167 +++++++++++++++++++++++++++++
drivers/net/ice/ice_generic_flow.c | 7 ++
drivers/net/ice/ice_generic_flow.h | 3 +
4 files changed, 182 insertions(+)
diff --git a/drivers/net/ice/ice_ethdev.h b/drivers/net/ice/ice_ethdev.h
index b4bf651c1c..8dc0c54bd7 100644
--- a/drivers/net/ice/ice_ethdev.h
+++ b/drivers/net/ice/ice_ethdev.h
@@ -304,6 +304,11 @@ struct ice_fdir_filter_conf {
uint64_t input_set_o; /* used for non-tunnel or tunnel outer fields */
uint64_t input_set_i; /* only for tunnel inner fields */
uint32_t mark_flag;
+
+ struct ice_parser_profile *prof;
+ const u8 *pkt_buf;
+ bool parser_ena;
+ u8 pkt_len;
};
#define ICE_MAX_FDIR_FILTER_NUM (1024 * 16)
diff --git a/drivers/net/ice/ice_fdir_filter.c b/drivers/net/ice/ice_fdir_filter.c
index 7ba65b9b04..375308781f 100644
--- a/drivers/net/ice/ice_fdir_filter.c
+++ b/drivers/net/ice/ice_fdir_filter.c
@@ -107,6 +107,7 @@
ICE_INSET_NAT_T_ESP_SPI)
static struct ice_pattern_match_item ice_fdir_pattern_list[] = {
+ {pattern_raw, ICE_INSET_NONE, ICE_INSET_NONE, ICE_INSET_NONE},
{pattern_ethertype, ICE_FDIR_INSET_ETH, ICE_INSET_NONE, ICE_INSET_NONE},
{pattern_eth_ipv4, ICE_FDIR_INSET_ETH_IPV4, ICE_INSET_NONE, ICE_INSET_NONE},
{pattern_eth_ipv4_udp, ICE_FDIR_INSET_ETH_IPV4_UDP, ICE_INSET_NONE, ICE_INSET_NONE},
@@ -1190,6 +1191,24 @@ ice_fdir_is_tunnel_profile(enum ice_fdir_tunnel_type tunnel_type)
return 0;
}
+static int
+ice_fdir_add_del_raw(struct ice_pf *pf,
+ struct ice_fdir_filter_conf *filter,
+ bool add)
+{
+ struct ice_hw *hw = ICE_PF_TO_HW(pf);
+
+ unsigned char *pkt = (unsigned char *)pf->fdir.prg_pkt;
+ rte_memcpy(pkt, filter->pkt_buf, filter->pkt_len);
+
+ struct ice_fltr_desc desc;
+ memset(&desc, 0, sizeof(desc));
+ filter->input.comp_report = ICE_FXD_FLTR_QW0_COMP_REPORT_SW;
+ ice_fdir_get_prgm_desc(hw, &filter->input, &desc, add);
+
+ return ice_fdir_programming(pf, &desc);
+}
+
static int
ice_fdir_add_del_filter(struct ice_pf *pf,
struct ice_fdir_filter_conf *filter,
@@ -1306,6 +1325,45 @@ ice_fdir_create_filter(struct ice_adapter *ad,
bool is_tun;
int ret;
+ if (filter->parser_ena) {
+ struct ice_hw *hw = ICE_PF_TO_HW(pf);
+
+ u16 ctrl_vsi = pf->fdir.fdir_vsi->idx;
+ u16 main_vsi = pf->main_vsi->idx;
+
+ ret = ice_flow_set_hw_prof(hw, main_vsi, ctrl_vsi,
+ filter->prof, ICE_BLK_FD);
+ if (ret)
+ return -rte_errno;
+
+ ret = ice_fdir_add_del_raw(pf, filter, true);
+ if (ret)
+ return -rte_errno;
+
+ if (filter->mark_flag == 1)
+ ice_fdir_rx_parsing_enable(ad, 1);
+
+ entry = rte_zmalloc("fdir_entry", sizeof(*entry), 0);
+ if (!entry)
+ return -rte_errno;
+
+ entry->pkt_buf = (u8 *)ice_malloc(hw, filter->pkt_len);
+ if (!entry->pkt_buf)
+ return -ENOMEM;
+
+ u8 *pkt_buf = (u8 *)ice_malloc(hw, filter->pkt_len);
+ if (!pkt_buf)
+ return -ENOMEM;
+
+ rte_memcpy(entry, filter, sizeof(*filter));
+ rte_memcpy(pkt_buf, filter->pkt_buf, filter->pkt_len);
+ entry->pkt_buf = pkt_buf;
+
+ flow->rule = entry;
+
+ return 0;
+ }
+
ice_fdir_extract_fltr_key(&key, filter);
node = ice_fdir_entry_lookup(fdir_info, &key);
if (node) {
@@ -1401,6 +1459,19 @@ ice_fdir_destroy_filter(struct ice_adapter *ad,
filter = (struct ice_fdir_filter_conf *)flow->rule;
+ if (filter->parser_ena) {
+ ret = ice_fdir_add_del_raw(pf, filter, false);
+ if (ret)
+ return -rte_errno;
+
+ filter->pkt_buf = NULL;
+ flow->rule = NULL;
+
+ rte_free(filter);
+
+ return 0;
+ }
+
is_tun = ice_fdir_is_tunnel_profile(filter->tunnel_type);
if (filter->counter) {
@@ -1679,6 +1750,7 @@ ice_fdir_parse_pattern(__rte_unused struct ice_adapter *ad,
enum rte_flow_item_type l3 = RTE_FLOW_ITEM_TYPE_END;
enum rte_flow_item_type l4 = RTE_FLOW_ITEM_TYPE_END;
enum ice_fdir_tunnel_type tunnel_type = ICE_FDIR_TUNNEL_TYPE_NONE;
+ const struct rte_flow_item_raw *raw_spec, *raw_mask;
const struct rte_flow_item_eth *eth_spec, *eth_mask;
const struct rte_flow_item_ipv4 *ipv4_spec, *ipv4_last, *ipv4_mask;
const struct rte_flow_item_ipv6 *ipv6_spec, *ipv6_mask;
@@ -1706,6 +1778,9 @@ ice_fdir_parse_pattern(__rte_unused struct ice_adapter *ad,
struct ice_fdir_extra *p_ext_data;
struct ice_fdir_v4 *p_v4 = NULL;
struct ice_fdir_v6 *p_v6 = NULL;
+ struct ice_parser_result rslt;
+ struct ice_parser *psr;
+ uint8_t item_num = 0;
for (item = pattern; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
if (item->type == RTE_FLOW_ITEM_TYPE_VXLAN)
@@ -1717,6 +1792,7 @@ ice_fdir_parse_pattern(__rte_unused struct ice_adapter *ad,
item->type == RTE_FLOW_ITEM_TYPE_GTP_PSC) {
is_outer = false;
}
+ item_num++;
}
/* This loop parse flow pattern and distinguish Non-tunnel and tunnel
@@ -1737,6 +1813,90 @@ ice_fdir_parse_pattern(__rte_unused struct ice_adapter *ad,
&input_set_i : &input_set_o;
switch (item_type) {
+ case RTE_FLOW_ITEM_TYPE_RAW:
+ raw_spec = item->spec;
+ raw_mask = item->mask;
+
+ if (item_num != 1)
+ break;
+
+ /* convert raw sepc & mask from byte string to int */
+ unsigned char *tmp_spec =
+ (uint8_t *)(uintptr_t)raw_spec->pattern;
+ unsigned char *tmp_mask =
+ (uint8_t *)(uintptr_t)raw_mask->pattern;
+ uint16_t tmp_val = 0;
+ uint8_t pkt_len = 0;
+ uint8_t tmp = 0;
+ int i, j;
+
+ pkt_len = strlen((char *)(uintptr_t)raw_spec->pattern);
+ if (strlen((char *)(uintptr_t)raw_mask->pattern) !=
+ pkt_len)
+ return -rte_errno;
+
+ for (i = 0, j = 0; i < pkt_len; i += 2, j++) {
+ tmp = tmp_spec[i];
+ if (tmp >= 'a' && tmp <= 'f')
+ tmp_val = tmp - 'a' + 10;
+ if (tmp >= 'A' && tmp <= 'F')
+ tmp_val = tmp - 'A' + 10;
+ if (tmp >= '0' && tmp <= '9')
+ tmp_val = tmp - '0';
+
+ tmp_val *= 16;
+ tmp = tmp_spec[i + 1];
+ if (tmp >= 'a' && tmp <= 'f')
+ tmp_spec[j] = tmp_val + tmp - 'a' + 10;
+ if (tmp >= 'A' && tmp <= 'F')
+ tmp_spec[j] = tmp_val + tmp - 'A' + 10;
+ if (tmp >= '0' && tmp <= '9')
+ tmp_spec[j] = tmp_val + tmp - '0';
+
+ tmp = tmp_mask[i];
+ if (tmp >= 'a' && tmp <= 'f')
+ tmp_val = tmp - 'a' + 10;
+ if (tmp >= 'A' && tmp <= 'F')
+ tmp_val = tmp - 'A' + 10;
+ if (tmp >= '0' && tmp <= '9')
+ tmp_val = tmp - '0';
+
+ tmp_val *= 16;
+ tmp = tmp_mask[i + 1];
+ if (tmp >= 'a' && tmp <= 'f')
+ tmp_mask[j] = tmp_val + tmp - 'a' + 10;
+ if (tmp >= 'A' && tmp <= 'F')
+ tmp_mask[j] = tmp_val + tmp - 'A' + 10;
+ if (tmp >= '0' && tmp <= '9')
+ tmp_mask[j] = tmp_val + tmp - '0';
+ }
+
+ pkt_len /= 2;
+
+ if (ice_parser_create(&ad->hw, &psr))
+ return -rte_errno;
+ if (ice_parser_run(psr, tmp_spec, pkt_len, &rslt))
+ return -rte_errno;
+ ice_parser_destroy(psr);
+
+ if (!tmp_mask)
+ return -rte_errno;
+
+ filter->prof = (struct ice_parser_profile *)
+ ice_malloc(&ad->hw, sizeof(filter->prof));
+ if (!filter->prof)
+ return -ENOMEM;
+ if (ice_parser_profile_init(&rslt, tmp_spec, tmp_mask,
+ pkt_len, ICE_BLK_FD, true, filter->prof))
+ return -rte_errno;
+
+ filter->pkt_buf = tmp_spec;
+ filter->pkt_len = pkt_len;
+
+ filter->parser_ena = true;
+
+ break;
+
case RTE_FLOW_ITEM_TYPE_ETH:
flow_type = ICE_FLTR_PTYPE_NON_IP_L2;
eth_spec = item->spec;
@@ -2202,6 +2362,7 @@ ice_fdir_parse(struct ice_adapter *ad,
struct ice_fdir_filter_conf *filter = &pf->fdir.conf;
struct ice_pattern_match_item *item = NULL;
uint64_t input_set;
+ bool raw = false;
int ret;
memset(filter, 0, sizeof(*filter));
@@ -2213,7 +2374,13 @@ ice_fdir_parse(struct ice_adapter *ad,
ret = ice_fdir_parse_pattern(ad, pattern, error, filter);
if (ret)
goto error;
+
+ if (item->pattern_list[0] == RTE_FLOW_ITEM_TYPE_RAW)
+ raw = true;
+
input_set = filter->input_set_o | filter->input_set_i;
+ input_set = raw ? ~input_set : input_set;
+
if (!input_set || filter->input_set_o &
~(item->input_set_mask_o | ICE_INSET_ETHERTYPE) ||
filter->input_set_i & ~item->input_set_mask_i) {
diff --git a/drivers/net/ice/ice_generic_flow.c b/drivers/net/ice/ice_generic_flow.c
index 9e03c2856c..c979fce080 100644
--- a/drivers/net/ice/ice_generic_flow.c
+++ b/drivers/net/ice/ice_generic_flow.c
@@ -65,6 +65,12 @@ enum rte_flow_item_type pattern_empty[] = {
RTE_FLOW_ITEM_TYPE_END,
};
+/* raw */
+enum rte_flow_item_type pattern_raw[] = {
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
/* L2 */
enum rte_flow_item_type pattern_ethertype[] = {
RTE_FLOW_ITEM_TYPE_ETH,
@@ -2081,6 +2087,7 @@ struct ice_ptype_match {
};
static struct ice_ptype_match ice_ptype_map[] = {
+ {pattern_raw, ICE_PTYPE_IPV4_PAY},
{pattern_eth_ipv4, ICE_PTYPE_IPV4_PAY},
{pattern_eth_ipv4_udp, ICE_PTYPE_IPV4_UDP_PAY},
{pattern_eth_ipv4_tcp, ICE_PTYPE_IPV4_TCP_PAY},
diff --git a/drivers/net/ice/ice_generic_flow.h b/drivers/net/ice/ice_generic_flow.h
index 8845a3e156..1b030c0466 100644
--- a/drivers/net/ice/ice_generic_flow.h
+++ b/drivers/net/ice/ice_generic_flow.h
@@ -124,6 +124,9 @@
/* empty pattern */
extern enum rte_flow_item_type pattern_empty[];
+/* raw pattern */
+extern enum rte_flow_item_type pattern_raw[];
+
/* L2 */
extern enum rte_flow_item_type pattern_ethertype[];
extern enum rte_flow_item_type pattern_ethertype_vlan[];
--
2.25.1
More information about the dev
mailing list