@@ -96,6 +96,7 @@ Features
- Per packet no-inline hint flag to disable packet data copying into Tx descriptors.
- Hardware LRO.
- Hairpin.
+- Multiple-thread flow insertion.
Limitations
-----------
@@ -358,6 +358,7 @@ New Features
* Added support for QinQ packets matching.
* Added support for the new vlan fields ``has_vlan`` in the eth item and
``has_more_vlan`` in the vlan item.
+ * Added support for PMD level multiple-thread flow insertion.
* **Updated vhost sample application.**
@@ -321,7 +321,6 @@
err = errno;
goto error;
}
- pthread_mutex_init(&sh->dv_mutex, NULL);
sh->tx_domain = domain;
#ifdef HAVE_MLX5DV_DR_ESWITCH
if (priv->config.dv_esw_en) {
@@ -435,7 +434,6 @@
mlx5_glue->destroy_flow_action(sh->pop_vlan_action);
sh->pop_vlan_action = NULL;
}
- pthread_mutex_destroy(&sh->dv_mutex);
#endif /* HAVE_MLX5DV_DR */
if (sh->default_miss_action)
mlx5_glue->destroy_flow_action
@@ -1536,6 +1534,8 @@
}
rte_spinlock_init(&priv->shared_act_sl);
mlx5_flow_counter_mode_config(eth_dev);
+ if (priv->config.dv_flow_en)
+ eth_dev->data->dev_flags |= RTE_ETH_DEV_FLOW_OPS_THREAD_SAFE;
return eth_dev;
error:
if (priv) {
@@ -639,7 +639,6 @@ struct mlx5_dev_ctx_shared {
/* Packet pacing related structure. */
struct mlx5_dev_txpp txpp;
/* Shared DV/DR flow data section. */
- pthread_mutex_t dv_mutex; /* DV context mutex. */
uint32_t dv_meta_mask; /* flow META metadata supported mask. */
uint32_t dv_mark_mask; /* flow MARK metadata supported mask. */
uint32_t dv_regc0_mask; /* available bits of metatada reg_c[0]. */
@@ -276,45 +276,6 @@ struct field_modify_info modify_tcp[] = {
}
}
-/**
- * Acquire the synchronizing object to protect multithreaded access
- * to shared dv context. Lock occurs only if context is actually
- * shared, i.e. we have multiport IB device and representors are
- * created.
- *
- * @param[in] dev
- * Pointer to the rte_eth_dev structure.
- */
-static void
-flow_dv_shared_lock(struct rte_eth_dev *dev)
-{
- struct mlx5_priv *priv = dev->data->dev_private;
- struct mlx5_dev_ctx_shared *sh = priv->sh;
-
- if (sh->refcnt > 1) {
- int ret;
-
- ret = pthread_mutex_lock(&sh->dv_mutex);
- MLX5_ASSERT(!ret);
- (void)ret;
- }
-}
-
-static void
-flow_dv_shared_unlock(struct rte_eth_dev *dev)
-{
- struct mlx5_priv *priv = dev->data->dev_private;
- struct mlx5_dev_ctx_shared *sh = priv->sh;
-
- if (sh->refcnt > 1) {
- int ret;
-
- ret = pthread_mutex_unlock(&sh->dv_mutex);
- MLX5_ASSERT(!ret);
- (void)ret;
- }
-}
-
/* Update VLAN's VID/PCP based on input rte_flow_action.
*
* @param[in] action
@@ -5075,7 +5036,7 @@ struct mlx5_hlist_entry *
* Index to the counter handler.
*/
static void
-flow_dv_counter_release(struct rte_eth_dev *dev, uint32_t counter)
+flow_dv_counter_free(struct rte_eth_dev *dev, uint32_t counter)
{
struct mlx5_priv *priv = dev->data->dev_private;
struct mlx5_flow_counter_pool *pool = NULL;
@@ -8622,7 +8583,7 @@ struct mlx5_hlist_entry *
act_res->rix_tag = 0;
}
if (act_res->cnt) {
- flow_dv_counter_release(dev, act_res->cnt);
+ flow_dv_counter_free(dev, act_res->cnt);
act_res->cnt = 0;
}
}
@@ -9296,12 +9257,12 @@ struct mlx5_cache_entry *
* 0 on success, a negative errno value otherwise and rte_errno is set.
*/
static int
-__flow_dv_translate(struct rte_eth_dev *dev,
- struct mlx5_flow *dev_flow,
- const struct rte_flow_attr *attr,
- const struct rte_flow_item items[],
- const struct rte_flow_action actions[],
- struct rte_flow_error *error)
+flow_dv_translate(struct rte_eth_dev *dev,
+ struct mlx5_flow *dev_flow,
+ const struct rte_flow_attr *attr,
+ const struct rte_flow_item items[],
+ const struct rte_flow_action actions[],
+ struct rte_flow_error *error)
{
struct mlx5_priv *priv = dev->data->dev_private;
struct mlx5_dev_config *dev_conf = &priv->config;
@@ -10378,8 +10339,8 @@ struct mlx5_cache_entry *
* 0 on success, a negative errno value otherwise and rte_errno is set.
*/
static int
-__flow_dv_apply(struct rte_eth_dev *dev, struct rte_flow *flow,
- struct rte_flow_error *error)
+flow_dv_apply(struct rte_eth_dev *dev, struct rte_flow *flow,
+ struct rte_flow_error *error)
{
struct mlx5_flow_dv_workspace *dv;
struct mlx5_flow_handle *dh;
@@ -10835,7 +10796,7 @@ struct mlx5_cache_entry *
* Pointer to flow structure.
*/
static void
-__flow_dv_remove(struct rte_eth_dev *dev, struct rte_flow *flow)
+flow_dv_remove(struct rte_eth_dev *dev, struct rte_flow *flow)
{
struct mlx5_flow_handle *dh;
uint32_t handle_idx;
@@ -10871,7 +10832,7 @@ struct mlx5_cache_entry *
* Pointer to flow structure.
*/
static void
-__flow_dv_destroy(struct rte_eth_dev *dev, struct rte_flow *flow)
+flow_dv_destroy(struct rte_eth_dev *dev, struct rte_flow *flow)
{
struct rte_flow_shared_action *shared;
struct mlx5_flow_handle *dev_handle;
@@ -10879,12 +10840,12 @@ struct mlx5_cache_entry *
if (!flow)
return;
- __flow_dv_remove(dev, flow);
+ flow_dv_remove(dev, flow);
shared = mlx5_flow_get_shared_rss(flow);
if (shared)
__atomic_sub_fetch(&shared->refcnt, 1, __ATOMIC_RELAXED);
if (flow->counter) {
- flow_dv_counter_release(dev, flow->counter);
+ flow_dv_counter_free(dev, flow->counter);
flow->counter = 0;
}
if (flow->meter) {
@@ -11167,10 +11128,10 @@ struct mlx5_cache_entry *
* rte_errno is set.
*/
static struct rte_flow_shared_action *
-__flow_dv_action_create(struct rte_eth_dev *dev,
- const struct rte_flow_shared_action_conf *conf,
- const struct rte_flow_action *action,
- struct rte_flow_error *error)
+flow_dv_action_create(struct rte_eth_dev *dev,
+ const struct rte_flow_shared_action_conf *conf,
+ const struct rte_flow_action *action,
+ struct rte_flow_error *error)
{
struct rte_flow_shared_action *shared_action = NULL;
struct mlx5_priv *priv = dev->data->dev_private;
@@ -11214,9 +11175,9 @@ struct mlx5_cache_entry *
* 0 on success, otherwise negative errno value.
*/
static int
-__flow_dv_action_destroy(struct rte_eth_dev *dev,
- struct rte_flow_shared_action *action,
- struct rte_flow_error *error)
+flow_dv_action_destroy(struct rte_eth_dev *dev,
+ struct rte_flow_shared_action *action,
+ struct rte_flow_error *error)
{
struct mlx5_priv *priv = dev->data->dev_private;
int ret;
@@ -11336,7 +11297,7 @@ struct mlx5_cache_entry *
* 0 on success, otherwise negative errno value.
*/
static int
-__flow_dv_action_update(struct rte_eth_dev *dev,
+flow_dv_action_update(struct rte_eth_dev *dev,
struct rte_flow_shared_action *action,
const void *action_conf,
struct rte_flow_error *error)
@@ -12100,85 +12061,12 @@ struct mlx5_cache_entry *
}
/*
- * Mutex-protected thunk to lock-free __flow_dv_translate().
- */
-static int
-flow_dv_translate(struct rte_eth_dev *dev,
- struct mlx5_flow *dev_flow,
- const struct rte_flow_attr *attr,
- const struct rte_flow_item items[],
- const struct rte_flow_action actions[],
- struct rte_flow_error *error)
-{
- int ret;
-
- flow_dv_shared_lock(dev);
- ret = __flow_dv_translate(dev, dev_flow, attr, items, actions, error);
- flow_dv_shared_unlock(dev);
- return ret;
-}
-
-/*
- * Mutex-protected thunk to lock-free __flow_dv_apply().
- */
-static int
-flow_dv_apply(struct rte_eth_dev *dev,
- struct rte_flow *flow,
- struct rte_flow_error *error)
-{
- int ret;
-
- flow_dv_shared_lock(dev);
- ret = __flow_dv_apply(dev, flow, error);
- flow_dv_shared_unlock(dev);
- return ret;
-}
-
-/*
- * Mutex-protected thunk to lock-free __flow_dv_remove().
- */
-static void
-flow_dv_remove(struct rte_eth_dev *dev, struct rte_flow *flow)
-{
- flow_dv_shared_lock(dev);
- __flow_dv_remove(dev, flow);
- flow_dv_shared_unlock(dev);
-}
-
-/*
- * Mutex-protected thunk to lock-free __flow_dv_destroy().
- */
-static void
-flow_dv_destroy(struct rte_eth_dev *dev, struct rte_flow *flow)
-{
- flow_dv_shared_lock(dev);
- __flow_dv_destroy(dev, flow);
- flow_dv_shared_unlock(dev);
-}
-
-/*
* Mutex-protected thunk to lock-free flow_dv_counter_alloc().
*/
static uint32_t
flow_dv_counter_allocate(struct rte_eth_dev *dev)
{
- uint32_t cnt;
-
- flow_dv_shared_lock(dev);
- cnt = flow_dv_counter_alloc(dev, 0);
- flow_dv_shared_unlock(dev);
- return cnt;
-}
-
-/*
- * Mutex-protected thunk to lock-free flow_dv_counter_release().
- */
-static void
-flow_dv_counter_free(struct rte_eth_dev *dev, uint32_t cnt)
-{
- flow_dv_shared_lock(dev);
- flow_dv_counter_release(dev, cnt);
- flow_dv_shared_unlock(dev);
+ return flow_dv_counter_alloc(dev, 0);
}
/**
@@ -12216,57 +12104,6 @@ struct mlx5_cache_entry *
}
}
-/*
- * Mutex-protected thunk to lock-free __flow_dv_action_create().
- */
-static struct rte_flow_shared_action *
-flow_dv_action_create(struct rte_eth_dev *dev,
- const struct rte_flow_shared_action_conf *conf,
- const struct rte_flow_action *action,
- struct rte_flow_error *error)
-{
- struct rte_flow_shared_action *shared_action = NULL;
-
- flow_dv_shared_lock(dev);
- shared_action = __flow_dv_action_create(dev, conf, action, error);
- flow_dv_shared_unlock(dev);
- return shared_action;
-}
-
-/*
- * Mutex-protected thunk to lock-free __flow_dv_action_destroy().
- */
-static int
-flow_dv_action_destroy(struct rte_eth_dev *dev,
- struct rte_flow_shared_action *action,
- struct rte_flow_error *error)
-{
- int ret;
-
- flow_dv_shared_lock(dev);
- ret = __flow_dv_action_destroy(dev, action, error);
- flow_dv_shared_unlock(dev);
- return ret;
-}
-
-/*
- * Mutex-protected thunk to lock-free __flow_dv_action_update().
- */
-static int
-flow_dv_action_update(struct rte_eth_dev *dev,
- struct rte_flow_shared_action *action,
- const void *action_conf,
- struct rte_flow_error *error)
-{
- int ret;
-
- flow_dv_shared_lock(dev);
- ret = __flow_dv_action_update(dev, action, action_conf,
- error);
- flow_dv_shared_unlock(dev);
- return ret;
-}
-
static int
flow_dv_sync_domain(struct rte_eth_dev *dev, uint32_t domains, uint32_t flags)
{