[dpdk-stable] patch 'ethdev: fix expand RSS flows' has been queued to LTS release 18.11.6

Kevin Traynor ktraynor at redhat.com
Wed Dec 11 22:26:06 CET 2019


Hi,

FYI, your patch has been queued to LTS release 18.11.6

Note it hasn't been pushed to http://dpdk.org/browse/dpdk-stable yet.
It will be pushed if I get no objections before 12/17/19. So please
shout if anyone has objections.

Also note that after the patch there's a diff of the upstream commit vs the
patch applied to the branch. This will indicate if there was any rebasing
needed to apply to the stable branch. If there were code changes for rebasing
(ie: not only metadata diffs), please double check that the rebase was
correctly done.

Queued patches are on a temporary branch at:
https://github.com/kevintraynor/dpdk-stable-queue

This queued commit can be viewed at:
https://github.com/kevintraynor/dpdk-stable-queue/commit/5136c9fb9075bdb2e9215dadebc388cb3875df35

Thanks.

Kevin.

---
>From 5136c9fb9075bdb2e9215dadebc388cb3875df35 Mon Sep 17 00:00:00 2001
From: Xiaoyu Min <jackmin at mellanox.com>
Date: Tue, 5 Nov 2019 15:42:43 +0200
Subject: [PATCH] ethdev: fix expand RSS flows

[ upstream commit fc2dd8dd492fade39a4c4de037ff3c869daff47d ]

rte_flow_expand_rss expands rte_flow item list based on the RSS
types. In another word, some additional rules are added if the user
specified items are not complete enough according to the RSS type,
for example:

  ... pattern eth / end actions rss type tcp end ...

User only provides item eth but want to do RSS on tcp traffic.
The pattern is not complete enough to filter TCP traffic only.
This will be a problem for some HWs.
So some PMDs use rte_flow_expand_rss to expand above user provided
flow to:

  ... pattern eth / end actions rss types tcp
  ... pattern eth / ipv4 / tcp / end actions rss types tcp ...
  ... pattern eth / ipv6 / tcp / end actions rss types tcp ...

in order to filter TCP traffic only and do RSS correctly.

However the current expansion cannot handle pattern as below, which
provides ethertype or ip next proto instead of providing an item:

  ... pattern eth type is 0x86DD / end actions rss types tcp ...

rte_flow_expand_rss will expand above flow to:

  ... pattern eth type is 0x86DD / ipv4 / tcp end ...

which has conflicting values: 0x86DD vs. ipv4 and some HWs will refuse
to create flow.

This patch will fix above by checking the last item's spec and to
expand RSS flows correctly.

Currently only support to complete item list based on ether type or ip
next proto.

Fixes: 4ed05fcd441b ("ethdev: add flow API to expand RSS flows")

Signed-off-by: Xiaoyu Min <jackmin at mellanox.com>
Acked-by: Ori Kam <orika at mellanox.com>
---
 lib/librte_ethdev/rte_flow.c | 132 +++++++++++++++++++++++++++++++++--
 1 file changed, 127 insertions(+), 5 deletions(-)

diff --git a/lib/librte_ethdev/rte_flow.c b/lib/librte_ethdev/rte_flow.c
index 3277be1ed..d545b15f5 100644
--- a/lib/librte_ethdev/rte_flow.c
+++ b/lib/librte_ethdev/rte_flow.c
@@ -158,4 +158,65 @@ flow_err(uint16_t port_id, int ret, struct rte_flow_error *error)
 }
 
+static enum rte_flow_item_type
+rte_flow_expand_rss_item_complete(const struct rte_flow_item *item)
+{
+	enum rte_flow_item_type ret = RTE_FLOW_ITEM_TYPE_VOID;
+	uint16_t ether_type = 0;
+	uint8_t ip_next_proto = 0;
+
+	if (item == NULL || item->spec == NULL)
+		return ret;
+	switch (item->type) {
+	case RTE_FLOW_ITEM_TYPE_ETH:
+		ether_type = ((const struct rte_flow_item_eth *)
+				(item->spec))->type;
+		if (rte_be_to_cpu_16(ether_type) == ETHER_TYPE_IPv4)
+			ret = RTE_FLOW_ITEM_TYPE_IPV4;
+		else if (rte_be_to_cpu_16(ether_type) == ETHER_TYPE_IPv6)
+			ret = RTE_FLOW_ITEM_TYPE_IPV6;
+		else if (rte_be_to_cpu_16(ether_type) == ETHER_TYPE_VLAN)
+			ret = RTE_FLOW_ITEM_TYPE_VLAN;
+		break;
+	case RTE_FLOW_ITEM_TYPE_VLAN:
+		ether_type = ((const struct rte_flow_item_vlan *)
+				(item->spec))->inner_type;
+		if (rte_be_to_cpu_16(ether_type) == ETHER_TYPE_IPv4)
+			ret = RTE_FLOW_ITEM_TYPE_IPV4;
+		else if (rte_be_to_cpu_16(ether_type) == ETHER_TYPE_IPv6)
+			ret = RTE_FLOW_ITEM_TYPE_IPV6;
+		else if (rte_be_to_cpu_16(ether_type) == ETHER_TYPE_VLAN)
+			ret = RTE_FLOW_ITEM_TYPE_VLAN;
+		break;
+	case RTE_FLOW_ITEM_TYPE_IPV4:
+		ip_next_proto = ((const struct rte_flow_item_ipv4 *)
+				(item->spec))->hdr.next_proto_id;
+		if (ip_next_proto == IPPROTO_UDP)
+			ret = RTE_FLOW_ITEM_TYPE_UDP;
+		else if (ip_next_proto == IPPROTO_TCP)
+			ret = RTE_FLOW_ITEM_TYPE_TCP;
+		else if (ip_next_proto == IPPROTO_IP)
+			ret = RTE_FLOW_ITEM_TYPE_IPV4;
+		else if (ip_next_proto == IPPROTO_IPV6)
+			ret = RTE_FLOW_ITEM_TYPE_IPV6;
+		break;
+	case RTE_FLOW_ITEM_TYPE_IPV6:
+		ip_next_proto = ((const struct rte_flow_item_ipv6 *)
+				(item->spec))->hdr.proto;
+		if (ip_next_proto == IPPROTO_UDP)
+			ret = RTE_FLOW_ITEM_TYPE_UDP;
+		else if (ip_next_proto == IPPROTO_TCP)
+			ret = RTE_FLOW_ITEM_TYPE_TCP;
+		else if (ip_next_proto == IPPROTO_IP)
+			ret = RTE_FLOW_ITEM_TYPE_IPV4;
+		else if (ip_next_proto == IPPROTO_IPV6)
+			ret = RTE_FLOW_ITEM_TYPE_IPV6;
+		break;
+	default:
+		ret = RTE_FLOW_ITEM_TYPE_VOID;
+		break;
+	}
+	return ret;
+}
+
 /* Get generic flow operations structure from a port. */
 const struct rte_flow_ops *
@@ -917,4 +978,9 @@ rte_flow_expand_rss(struct rte_flow_expand_rss *buf, size_t size,
 	size_t user_pattern_size = 0;
 	void *addr = NULL;
+	const struct rte_flow_expand_node *next = NULL;
+	struct rte_flow_item missed_item;
+	int missed = 0;
+	int elt = 0;
+	const struct rte_flow_item *last_item = NULL;
 
 	lsize = offsetof(struct rte_flow_expand_rss, entry) +
@@ -927,6 +993,6 @@ rte_flow_expand_rss(struct rte_flow_expand_rss *buf, size_t size,
 	}
 	for (item = pattern; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
-		const struct rte_flow_expand_node *next = NULL;
-
+		if (item->type != RTE_FLOW_ITEM_TYPE_VOID)
+			last_item = item;
 		for (i = 0; node->next && node->next[i]; ++i) {
 			next = &graph[node->next[i]];
@@ -949,4 +1015,39 @@ rte_flow_expand_rss(struct rte_flow_expand_rss *buf, size_t size,
 	memset(flow_items, 0, sizeof(flow_items));
 	user_pattern_size -= sizeof(*item);
+	/*
+	 * Check if the last valid item has spec set
+	 * and need complete pattern.
+	 */
+	missed_item.type = rte_flow_expand_rss_item_complete(last_item);
+	if (missed_item.type != RTE_FLOW_ITEM_TYPE_VOID) {
+		next = NULL;
+		missed = 1;
+		for (i = 0; node->next && node->next[i]; ++i) {
+			next = &graph[node->next[i]];
+			if (next->type == missed_item.type) {
+				flow_items[0].type = missed_item.type;
+				flow_items[1].type = RTE_FLOW_ITEM_TYPE_END;
+				break;
+			}
+			next = NULL;
+		}
+	}
+	if (next && missed) {
+		elt = 2; /* missed item + item end. */
+		node = next;
+		lsize += elt * sizeof(*item) + user_pattern_size;
+		if ((node->rss_types & types) && lsize <= size) {
+			buf->entry[buf->entries].priority = 1;
+			buf->entry[buf->entries].pattern = addr;
+			buf->entries++;
+			rte_memcpy(addr, buf->entry[0].pattern,
+				   user_pattern_size);
+			addr = (void *)(((uintptr_t)addr) + user_pattern_size);
+			rte_memcpy(addr, flow_items, elt * sizeof(*item));
+			addr = (void *)(((uintptr_t)addr) +
+					elt * sizeof(*item));
+		}
+	}
+	memset(flow_items, 0, sizeof(flow_items));
 	next_node = node->next;
 	stack[stack_pos] = next_node;
@@ -961,6 +1062,5 @@ rte_flow_expand_rss(struct rte_flow_expand_rss *buf, size_t size,
 			 * plus the addition END item.
 			 */
-			int elt = stack_pos + 2;
-
+			elt = stack_pos + 2;
 			flow_items[stack_pos + 1].type = RTE_FLOW_ITEM_TYPE_END;
 			lsize += elt * sizeof(*item) + user_pattern_size;
@@ -969,5 +1069,5 @@ rte_flow_expand_rss(struct rte_flow_expand_rss *buf, size_t size,
 
 				buf->entry[buf->entries].priority =
-					stack_pos + 1;
+					stack_pos + 1 + missed;
 				buf->entry[buf->entries].pattern = addr;
 				buf->entries++;
@@ -976,4 +1076,8 @@ rte_flow_expand_rss(struct rte_flow_expand_rss *buf, size_t size,
 				addr = (void *)(((uintptr_t)addr) +
 						user_pattern_size);
+				rte_memcpy(addr, &missed_item,
+					   missed * sizeof(*item));
+				addr = (void *)(((uintptr_t)addr) +
+					missed * sizeof(*item));
 				rte_memcpy(addr, flow_items, n);
 				addr = (void *)(((uintptr_t)addr) + n);
@@ -1000,4 +1104,22 @@ rte_flow_expand_rss(struct rte_flow_expand_rss *buf, size_t size,
 		node = *next_node ? &graph[*next_node] : NULL;
 	};
+	/* no expanded flows but we have missed item, create one rule for it */
+	if (buf->entries == 1 && missed != 0) {
+		elt = 2;
+		lsize += elt * sizeof(*item) + user_pattern_size;
+		if (lsize <= size) {
+			buf->entry[buf->entries].priority = 1;
+			buf->entry[buf->entries].pattern = addr;
+			buf->entries++;
+			flow_items[0].type = missed_item.type;
+			flow_items[1].type = RTE_FLOW_ITEM_TYPE_END;
+			rte_memcpy(addr, buf->entry[0].pattern,
+				   user_pattern_size);
+			addr = (void *)(((uintptr_t)addr) + user_pattern_size);
+			rte_memcpy(addr, flow_items, elt * sizeof(*item));
+			addr = (void *)(((uintptr_t)addr) +
+					elt * sizeof(*item));
+		}
+	}
 	return lsize;
 }
-- 
2.21.0

---
  Diff of the applied patch vs upstream commit (please double-check if non-empty:
---
--- -	2019-12-11 21:24:13.897111509 +0000
+++ 0014-ethdev-fix-expand-RSS-flows.patch	2019-12-11 21:24:12.596652685 +0000
@@ -1 +1 @@
-From fc2dd8dd492fade39a4c4de037ff3c869daff47d Mon Sep 17 00:00:00 2001
+From 5136c9fb9075bdb2e9215dadebc388cb3875df35 Mon Sep 17 00:00:00 2001
@@ -5,0 +6,2 @@
+[ upstream commit fc2dd8dd492fade39a4c4de037ff3c869daff47d ]
+
@@ -44 +45,0 @@
-Cc: stable at dpdk.org
@@ -53 +54 @@
-index 33e30111a..8ec9c90cd 100644
+index 3277be1ed..d545b15f5 100644
@@ -56 +57 @@
-@@ -214,4 +214,65 @@ flow_err(uint16_t port_id, int ret, struct rte_flow_error *error)
+@@ -158,4 +158,65 @@ flow_err(uint16_t port_id, int ret, struct rte_flow_error *error)
@@ -72 +73 @@
-+		if (rte_be_to_cpu_16(ether_type) == RTE_ETHER_TYPE_IPV4)
++		if (rte_be_to_cpu_16(ether_type) == ETHER_TYPE_IPv4)
@@ -74 +75 @@
-+		else if (rte_be_to_cpu_16(ether_type) == RTE_ETHER_TYPE_IPV6)
++		else if (rte_be_to_cpu_16(ether_type) == ETHER_TYPE_IPv6)
@@ -76 +77 @@
-+		else if (rte_be_to_cpu_16(ether_type) == RTE_ETHER_TYPE_VLAN)
++		else if (rte_be_to_cpu_16(ether_type) == ETHER_TYPE_VLAN)
@@ -82 +83 @@
-+		if (rte_be_to_cpu_16(ether_type) == RTE_ETHER_TYPE_IPV4)
++		if (rte_be_to_cpu_16(ether_type) == ETHER_TYPE_IPv4)
@@ -84 +85 @@
-+		else if (rte_be_to_cpu_16(ether_type) == RTE_ETHER_TYPE_IPV6)
++		else if (rte_be_to_cpu_16(ether_type) == ETHER_TYPE_IPv6)
@@ -86 +87 @@
-+		else if (rte_be_to_cpu_16(ether_type) == RTE_ETHER_TYPE_VLAN)
++		else if (rte_be_to_cpu_16(ether_type) == ETHER_TYPE_VLAN)
@@ -122 +123 @@
-@@ -973,4 +1034,9 @@ rte_flow_expand_rss(struct rte_flow_expand_rss *buf, size_t size,
+@@ -917,4 +978,9 @@ rte_flow_expand_rss(struct rte_flow_expand_rss *buf, size_t size,
@@ -132 +133 @@
-@@ -983,6 +1049,6 @@ rte_flow_expand_rss(struct rte_flow_expand_rss *buf, size_t size,
+@@ -927,6 +993,6 @@ rte_flow_expand_rss(struct rte_flow_expand_rss *buf, size_t size,
@@ -141 +142 @@
-@@ -1005,4 +1071,39 @@ rte_flow_expand_rss(struct rte_flow_expand_rss *buf, size_t size,
+@@ -949,4 +1015,39 @@ rte_flow_expand_rss(struct rte_flow_expand_rss *buf, size_t size,
@@ -181 +182 @@
-@@ -1017,6 +1118,5 @@ rte_flow_expand_rss(struct rte_flow_expand_rss *buf, size_t size,
+@@ -961,6 +1062,5 @@ rte_flow_expand_rss(struct rte_flow_expand_rss *buf, size_t size,
@@ -189 +190 @@
-@@ -1025,5 +1125,5 @@ rte_flow_expand_rss(struct rte_flow_expand_rss *buf, size_t size,
+@@ -969,5 +1069,5 @@ rte_flow_expand_rss(struct rte_flow_expand_rss *buf, size_t size,
@@ -196 +197 @@
-@@ -1032,4 +1132,8 @@ rte_flow_expand_rss(struct rte_flow_expand_rss *buf, size_t size,
+@@ -976,4 +1076,8 @@ rte_flow_expand_rss(struct rte_flow_expand_rss *buf, size_t size,
@@ -205 +206 @@
-@@ -1056,4 +1160,22 @@ rte_flow_expand_rss(struct rte_flow_expand_rss *buf, size_t size,
+@@ -1000,4 +1104,22 @@ rte_flow_expand_rss(struct rte_flow_expand_rss *buf, size_t size,



More information about the stable mailing list