[PATCH v1 1/5] net/cpfl: setup rte flow skeleton
Yuying Zhang
yuying.zhang at intel.com
Sat Aug 12 09:55:02 CEST 2023
Setup the rte_flow backend skeleton. Introduce the framework
to support different engines as rte_flow backend. Bridge rte_flow
driver API to flow engines.
Signed-off-by: Yuying Zhang <yuying.zhang at intel.com>
Signed-off-by: Qi Zhang <qi.z.zhang at intel.com>
---
drivers/net/cpfl/cpfl_ethdev.c | 54 ++++++
drivers/net/cpfl/cpfl_ethdev.h | 5 +
drivers/net/cpfl/cpfl_flow.c | 331 +++++++++++++++++++++++++++++++++
drivers/net/cpfl/cpfl_flow.h | 88 +++++++++
drivers/net/cpfl/meson.build | 3 +-
5 files changed, 480 insertions(+), 1 deletion(-)
create mode 100644 drivers/net/cpfl/cpfl_flow.c
create mode 100644 drivers/net/cpfl/cpfl_flow.h
diff --git a/drivers/net/cpfl/cpfl_ethdev.c b/drivers/net/cpfl/cpfl_ethdev.c
index 34b7c22ee1..23e5181588 100644
--- a/drivers/net/cpfl/cpfl_ethdev.c
+++ b/drivers/net/cpfl/cpfl_ethdev.c
@@ -16,6 +16,7 @@
#include "cpfl_ethdev.h"
#include <ethdev_private.h>
#include "cpfl_rxtx.h"
+#include "cpfl_flow.h"
#define CPFL_REPRESENTOR "representor"
#define CPFL_TX_SINGLE_Q "tx_single"
@@ -1199,6 +1200,19 @@ cpfl_dev_stop(struct rte_eth_dev *dev)
return 0;
}
+static void
+cpfl_flow_free(struct cpfl_vport *vport)
+{
+ struct rte_flow *p_flow;
+
+ while ((p_flow = TAILQ_FIRST(&vport->itf.flow_list))) {
+ TAILQ_REMOVE(&vport->itf.flow_list, p_flow, next);
+ if (p_flow->engine->free)
+ p_flow->engine->free(p_flow);
+ rte_free(p_flow);
+ }
+}
+
static int
cpfl_p2p_queue_grps_del(struct idpf_vport *vport)
{
@@ -1231,6 +1245,7 @@ cpfl_dev_close(struct rte_eth_dev *dev)
cpfl_p2p_queue_grps_del(vport);
if (!cpfl_vport->exceptional) {
+ cpfl_flow_free(cpfl_vport);
adapter->cur_vports &= ~RTE_BIT32(vport->devarg_id);
adapter->cur_vport_nb--;
adapter->vports[vport->sw_idx] = NULL;
@@ -1248,6 +1263,29 @@ cpfl_dev_close(struct rte_eth_dev *dev)
return 0;
}
+static int
+cpfl_dev_flow_ops_get(struct rte_eth_dev *dev,
+ const struct rte_flow_ops **ops)
+{
+ struct cpfl_itf *itf;
+
+ if (!dev)
+ return -EINVAL;
+
+ itf = CPFL_DEV_TO_ITF(dev);
+
+ /* only vport support rte_flow */
+ if (itf->type != CPFL_ITF_TYPE_VPORT)
+ return -ENOTSUP;
+#ifdef CPFL_FLOW_JSON_SUPPORT
+ *ops = &cpfl_flow_ops;
+#else
+ *ops = NULL;
+ PMD_DRV_LOG(NOTICE, "not support rte_flow, please install json-c library.");
+#endif
+ return 0;
+}
+
static int
cpfl_hairpin_get_peer_ports(struct rte_eth_dev *dev, uint16_t *peer_ports,
size_t len, uint32_t tx)
@@ -1449,6 +1487,7 @@ static const struct eth_dev_ops cpfl_eth_dev_ops = {
.xstats_get = cpfl_dev_xstats_get,
.xstats_get_names = cpfl_dev_xstats_get_names,
.xstats_reset = cpfl_dev_xstats_reset,
+ .flow_ops_get = cpfl_dev_flow_ops_get,
.hairpin_cap_get = cpfl_hairpin_cap_get,
.rx_hairpin_queue_setup = cpfl_rx_hairpin_queue_setup,
.tx_hairpin_queue_setup = cpfl_tx_hairpin_queue_setup,
@@ -2411,6 +2450,13 @@ cpfl_adapter_ext_init(struct rte_pci_device *pci_dev, struct cpfl_adapter_ext *a
goto err_create_ctrl_vport;
}
+#ifdef CPFL_FLOW_JSON_SUPPORT
+ ret = cpfl_flow_init(adapter);
+ if (ret) {
+ PMD_INIT_LOG(ERR, "Failed to init flow module");
+ goto err_flow_init;
+ }
+#endif
adapter->cur_vports = 0;
adapter->cur_vport_nb = 0;
@@ -2418,6 +2464,10 @@ cpfl_adapter_ext_init(struct rte_pci_device *pci_dev, struct cpfl_adapter_ext *a
return ret;
+#ifdef CPFL_FLOW_JSON_SUPPORT
+err_flow_init:
+ cpfl_ctrl_path_close(adapter);
+#endif
err_create_ctrl_vport:
rte_free(adapter->vports);
err_vports_alloc:
@@ -2574,6 +2624,7 @@ cpfl_dev_vport_init(struct rte_eth_dev *dev, void *init_params)
cpfl_vport->itf.type = CPFL_ITF_TYPE_VPORT;
cpfl_vport->itf.adapter = adapter;
cpfl_vport->itf.data = dev->data;
+ TAILQ_INIT(&cpfl_vport->itf.flow_list);
adapter->vports[param->idx] = cpfl_vport;
adapter->cur_vports |= RTE_BIT32(param->devarg_id);
adapter->cur_vport_nb++;
@@ -2713,6 +2764,9 @@ cpfl_find_adapter_ext(struct rte_pci_device *pci_dev)
static void
cpfl_adapter_ext_deinit(struct cpfl_adapter_ext *adapter)
{
+#ifdef CPFL_FLOW_JSON_SUPPORT
+ cpfl_flow_uninit(adapter);
+#endif
cpfl_ctrl_path_close(adapter);
rte_eal_alarm_cancel(cpfl_dev_alarm_handler, adapter);
cpfl_vport_map_uninit(adapter);
diff --git a/drivers/net/cpfl/cpfl_ethdev.h b/drivers/net/cpfl/cpfl_ethdev.h
index 2e9480ffc1..c71f16ac60 100644
--- a/drivers/net/cpfl/cpfl_ethdev.h
+++ b/drivers/net/cpfl/cpfl_ethdev.h
@@ -143,9 +143,12 @@ enum cpfl_itf_type {
CPFL_ITF_TYPE_REPRESENTOR
};
+TAILQ_HEAD(cpfl_flow_list, rte_flow);
+
struct cpfl_itf {
enum cpfl_itf_type type;
struct cpfl_adapter_ext *adapter;
+ struct cpfl_flow_list flow_list;
void *data;
};
@@ -222,6 +225,8 @@ struct cpfl_adapter_ext {
rte_spinlock_t repr_lock;
struct rte_hash *repr_whitelist_hash;
+ struct cpfl_flow_js_parser *flow_parser;
+
/* ctrl vport and ctrl queues. */
struct cpfl_vport ctrl_vport;
uint8_t ctrl_vport_recv_info[IDPF_DFLT_MBX_BUF_SIZE];
diff --git a/drivers/net/cpfl/cpfl_flow.c b/drivers/net/cpfl/cpfl_flow.c
new file mode 100644
index 0000000000..e303936081
--- /dev/null
+++ b/drivers/net/cpfl/cpfl_flow.c
@@ -0,0 +1,331 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Intel Corporation
+ */
+#include <rte_flow_driver.h>
+#include <rte_tailq.h>
+
+#include "cpfl_flow.h"
+#include "cpfl_flow_parser.h"
+
+TAILQ_HEAD(cpfl_flow_engine_list, cpfl_flow_engine);
+
+static struct cpfl_flow_engine_list engine_list = TAILQ_HEAD_INITIALIZER(engine_list);
+
+void
+cpfl_flow_engine_register(struct cpfl_flow_engine *engine)
+{
+ TAILQ_INSERT_TAIL(&engine_list, engine, node);
+}
+
+struct cpfl_flow_engine *
+cpfl_flow_engine_match(struct rte_eth_dev *dev,
+ const struct rte_flow_attr *attr,
+ const struct rte_flow_item pattern[],
+ const struct rte_flow_action actions[],
+ void **meta)
+{
+ struct cpfl_flow_engine *engine = NULL;
+ void *temp;
+
+ RTE_TAILQ_FOREACH_SAFE(engine, &engine_list, node, temp) {
+ if (!engine->parse_pattern_action)
+ continue;
+ if (engine->parse_pattern_action(dev, attr, pattern, actions, meta) < 0)
+ continue;
+ return engine;
+ }
+
+ return NULL;
+}
+
+int
+cpfl_flow_engine_init(struct cpfl_adapter_ext *adapter)
+{
+ struct cpfl_flow_engine *engine = NULL;
+ void *temp;
+ int ret;
+
+ RTE_TAILQ_FOREACH_SAFE(engine, &engine_list, node, temp) {
+ if (!engine->init) {
+ PMD_INIT_LOG(ERR, "Invalid engine type (%d)",
+ engine->type);
+ return -ENOTSUP;
+ }
+
+ ret = engine->init(adapter);
+ if (ret) {
+ PMD_INIT_LOG(ERR, "Failed to initialize engine %d",
+ engine->type);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+void
+cpfl_flow_engine_uninit(struct cpfl_adapter_ext *adapter)
+{
+ struct cpfl_flow_engine *engine = NULL;
+ void *temp;
+
+ RTE_TAILQ_FOREACH_SAFE(engine, &engine_list, node, temp) {
+ if (engine->uninit)
+ engine->uninit(adapter);
+ }
+}
+
+static int
+cpfl_flow_valid_attr(const struct rte_flow_attr *attr,
+ struct rte_flow_error *error)
+{
+ if (attr->priority > 6) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
+ attr, "Only support priority 0-6.");
+ return -rte_errno;
+ }
+
+ return 0;
+}
+
+static int
+cpfl_flow_param_valid(const struct rte_flow_attr *attr,
+ const struct rte_flow_item pattern[],
+ const struct rte_flow_action actions[],
+ struct rte_flow_error *error)
+{
+ int ret;
+
+ if (!pattern) {
+ rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM_NUM,
+ NULL, "NULL pattern.");
+ return -rte_errno;
+ }
+
+ if (!attr) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ATTR,
+ NULL, "NULL attribute.");
+ return -rte_errno;
+ }
+
+ ret = cpfl_flow_valid_attr(attr, error);
+ if (ret)
+ return ret;
+
+ if (!actions || actions->type == RTE_FLOW_ACTION_TYPE_END) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION_NUM,
+ NULL, "NULL action.");
+ return -rte_errno;
+ }
+
+ return 0;
+}
+
+int
+cpfl_flow_validate(struct rte_eth_dev *dev,
+ const struct rte_flow_attr *attr,
+ const struct rte_flow_item pattern[],
+ const struct rte_flow_action actions[],
+ struct rte_flow_error *error)
+{
+ struct cpfl_flow_engine *engine;
+ int ret;
+
+ ret = cpfl_flow_param_valid(attr, pattern, actions, error);
+ if (ret)
+ return ret;
+
+ engine = cpfl_flow_engine_match(dev, attr, pattern, actions, NULL);
+
+ if (!engine) {
+ rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+ NULL, "No matched engine.");
+ return -rte_errno;
+ }
+
+ return 0;
+}
+
+struct rte_flow *
+cpfl_flow_create(struct rte_eth_dev *dev __rte_unused,
+ const struct rte_flow_attr *attr __rte_unused,
+ const struct rte_flow_item pattern[] __rte_unused,
+ const struct rte_flow_action actions[] __rte_unused,
+ struct rte_flow_error *error __rte_unused)
+{
+ struct cpfl_itf *itf = CPFL_DEV_TO_ITF(dev);
+ struct cpfl_flow_engine *engine;
+ struct rte_flow *flow;
+ void *meta;
+ int ret;
+
+ flow = rte_malloc(NULL, sizeof(struct rte_flow), 0);
+ if (!flow) {
+ rte_flow_error_set(error, ENOMEM,
+ RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
+ "Failed to allocate memory");
+ return NULL;
+ }
+
+ ret = cpfl_flow_param_valid(attr, pattern, actions, error);
+ if (ret) {
+ rte_free(flow);
+ return NULL;
+ }
+
+ engine = cpfl_flow_engine_match(dev, attr, pattern, actions, &meta);
+ if (!engine) {
+ rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+ NULL, "No matched engine");
+ rte_free(flow);
+ return NULL;
+ }
+
+ if (!engine->create) {
+ rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+ NULL, "No matched flow creation function");
+ rte_free(flow);
+ return NULL;
+ }
+
+ ret = engine->create(dev, flow, meta, error);
+ if (ret) {
+ rte_free(flow);
+ return NULL;
+ }
+
+ flow->engine = engine;
+ TAILQ_INSERT_TAIL(&itf->flow_list, flow, next);
+
+ return flow;
+}
+
+int
+cpfl_flow_destroy(struct rte_eth_dev *dev,
+ struct rte_flow *flow,
+ struct rte_flow_error *error)
+{
+ struct cpfl_itf *itf = CPFL_DEV_TO_ITF(dev);
+ int ret = 0;
+
+ if (!flow || !flow->engine || !flow->engine->destroy) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_HANDLE,
+ NULL, "Invalid flow");
+ return -rte_errno;
+ }
+
+ ret = flow->engine->destroy(dev, flow, error);
+ if (!ret)
+ TAILQ_REMOVE(&itf->flow_list, flow, next);
+ else
+ PMD_DRV_LOG(ERR, "Failed to destroy flow");
+
+ return ret;
+}
+
+int
+cpfl_flow_flush(struct rte_eth_dev *dev,
+ struct rte_flow_error *error)
+{
+ struct cpfl_itf *itf = CPFL_DEV_TO_ITF(dev);
+ struct rte_flow *p_flow;
+ void *temp;
+ int ret = 0;
+
+ RTE_TAILQ_FOREACH_SAFE(p_flow, &itf->flow_list, next, temp) {
+ ret = cpfl_flow_destroy(dev, p_flow, error);
+ if (ret) {
+ PMD_DRV_LOG(ERR, "Failed to flush flows");
+ return -EINVAL;
+ }
+ }
+
+ return ret;
+}
+
+int
+cpfl_flow_query(struct rte_eth_dev *dev __rte_unused,
+ struct rte_flow *flow __rte_unused,
+ const struct rte_flow_action *actions __rte_unused,
+ void *data __rte_unused,
+ struct rte_flow_error *error __rte_unused)
+{
+ struct rte_flow_query_count *count = data;
+ int ret = -EINVAL;
+
+ if (!flow || !flow->engine || !flow->engine->query_count) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_HANDLE,
+ NULL, "Invalid flow");
+ return -rte_errno;
+ }
+
+ for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
+ switch (actions->type) {
+ case RTE_FLOW_ACTION_TYPE_VOID:
+ break;
+ case RTE_FLOW_ACTION_TYPE_COUNT:
+ ret = flow->engine->query_count(dev, flow, count, error);
+ break;
+ default:
+ ret = rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ actions,
+ "action not supported");
+ break;
+ }
+ }
+
+ return ret;
+}
+
+const struct rte_flow_ops cpfl_flow_ops = {
+ .validate = cpfl_flow_validate,
+ .create = cpfl_flow_create,
+ .destroy = cpfl_flow_destroy,
+ .flush = cpfl_flow_flush,
+ .query = cpfl_flow_query,
+};
+
+int
+cpfl_flow_init(struct cpfl_adapter_ext *ad)
+{
+ int ret;
+
+ if (ad->devargs.flow_parser[0] == '\0') {
+ PMD_INIT_LOG(WARNING, "flow module is not initialized");
+ return 0;
+ }
+
+ ret = cpfl_flow_engine_init(ad);
+ if (ret) {
+ PMD_DRV_LOG(ERR, "Failed to init flow engines");
+ goto err;
+ }
+
+ ret = cpfl_parser_create(&ad->flow_parser, ad->devargs.flow_parser);
+ if (ret) {
+ PMD_DRV_LOG(ERR, "Failed to create flow parser");
+ goto err;
+ }
+
+ return ret;
+
+err:
+ cpfl_flow_engine_uninit(ad);
+ return ret;
+}
+
+void
+cpfl_flow_uninit(struct cpfl_adapter_ext *ad)
+{
+ if (ad->devargs.flow_parser[0] == '\0')
+ return;
+
+ cpfl_parser_destroy(ad->flow_parser);
+ cpfl_flow_engine_uninit(ad);
+}
diff --git a/drivers/net/cpfl/cpfl_flow.h b/drivers/net/cpfl/cpfl_flow.h
new file mode 100644
index 0000000000..04f4cc1149
--- /dev/null
+++ b/drivers/net/cpfl/cpfl_flow.h
@@ -0,0 +1,88 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Intel Corporation
+ */
+
+#ifndef _CPFL_FLOW_H_
+#define _CPFL_FLOW_H_
+
+#include <rte_flow.h>
+#include "cpfl_ethdev.h"
+
+extern const struct rte_flow_ops cpfl_flow_ops;
+
+enum cpfl_flow_engine_type {
+ CPFL_FLOW_ENGINE_NONE = 0,
+ CPFL_FLOW_ENGINE_FXP,
+};
+
+typedef int (*engine_init_t)(struct cpfl_adapter_ext *ad);
+typedef void (*engine_uninit_t)(struct cpfl_adapter_ext *ad);
+typedef int (*engine_create_t)(struct rte_eth_dev *dev,
+ struct rte_flow *flow,
+ void *meta,
+ struct rte_flow_error *error);
+typedef int (*engine_destroy_t)(struct rte_eth_dev *dev,
+ struct rte_flow *flow,
+ struct rte_flow_error *error);
+typedef int (*engine_query_t)(struct rte_eth_dev *dev,
+ struct rte_flow *flow,
+ struct rte_flow_query_count *count,
+ struct rte_flow_error *error);
+typedef void (*engine_free_t) (struct rte_flow *flow);
+typedef int (*engine_parse_pattern_action_t)(struct rte_eth_dev *dev,
+ const struct rte_flow_attr *attr,
+ const struct rte_flow_item pattern[],
+ const struct rte_flow_action actions[],
+ void **meta);
+
+struct cpfl_flow_engine {
+ TAILQ_ENTRY(cpfl_flow_engine) node;
+ enum cpfl_flow_engine_type type;
+ engine_init_t init;
+ engine_uninit_t uninit;
+ engine_create_t create;
+ engine_destroy_t destroy;
+ engine_query_t query_count;
+ engine_free_t free;
+ engine_parse_pattern_action_t parse_pattern_action;
+};
+
+struct rte_flow {
+ TAILQ_ENTRY(rte_flow) next;
+ struct cpfl_flow_engine *engine;
+ void *rule;
+};
+
+void cpfl_flow_engine_register(struct cpfl_flow_engine *engine);
+
+struct cpfl_flow_engine *
+cpfl_flow_engine_match(struct rte_eth_dev *dev,
+ const struct rte_flow_attr *attr,
+ const struct rte_flow_item pattern[],
+ const struct rte_flow_action actions[],
+ void **meta);
+int
+cpfl_flow_engine_init(struct cpfl_adapter_ext *adapter);
+void
+cpfl_flow_engine_uninit(struct cpfl_adapter_ext *adapter);
+
+int cpfl_flow_init(struct cpfl_adapter_ext *ad);
+void cpfl_flow_uninit(struct cpfl_adapter_ext *ad);
+struct rte_flow *cpfl_flow_create(struct rte_eth_dev *dev,
+ const struct rte_flow_attr *attr,
+ const struct rte_flow_item pattern[],
+ const struct rte_flow_action actions[],
+ struct rte_flow_error *error);
+int cpfl_flow_validate(struct rte_eth_dev *dev,
+ const struct rte_flow_attr *attr,
+ const struct rte_flow_item pattern[],
+ const struct rte_flow_action actions[],
+ struct rte_flow_error *error);
+int cpfl_flow_destroy(struct rte_eth_dev *dev, struct rte_flow *flow, struct rte_flow_error *error);
+int cpfl_flow_flush(struct rte_eth_dev *dev, struct rte_flow_error *error);
+int cpfl_flow_query(struct rte_eth_dev *dev,
+ struct rte_flow *flow,
+ const struct rte_flow_action *actions,
+ void *data,
+ struct rte_flow_error *error);
+#endif
diff --git a/drivers/net/cpfl/meson.build b/drivers/net/cpfl/meson.build
index 84ba994469..222497f7c2 100644
--- a/drivers/net/cpfl/meson.build
+++ b/drivers/net/cpfl/meson.build
@@ -42,10 +42,11 @@ endif
js_dep = dependency('json-c', required: false, method : 'pkg-config')
if js_dep.found()
sources += files(
+ 'cpfl_flow.c',
'cpfl_flow_parser.c',
'cpfl_rules.c',
'cpfl_controlq.c',
)
dpdk_conf.set('CPFL_FLOW_JSON_SUPPORT', true)
ext_deps += js_dep
-endif
\ No newline at end of file
+endif
--
2.25.1
More information about the dev
mailing list