@@ -69,6 +69,44 @@ static rte_spinlock_t mlx5_shared_data_lock = RTE_SPINLOCK_INITIALIZER;
/* Process local data for secondary processes. */
static struct mlx5_local_data mlx5_local_data;
+/* rte flow indexed pool configuration. */
+static struct mlx5_indexed_pool_config icfg[] = {
+ {
+ .size = sizeof(struct rte_flow),
+ .trunk_size = 64,
+ .need_lock = 1,
+ .release_mem_en = 0,
+ .malloc = mlx5_malloc,
+ .free = mlx5_free,
+ .per_core_cache = 0,
+ .type = "ctl_flow_ipool",
+ },
+ {
+ .size = sizeof(struct rte_flow),
+ .trunk_size = 64,
+ .grow_trunk = 3,
+ .grow_shift = 2,
+ .need_lock = 1,
+ .release_mem_en = 0,
+ .malloc = mlx5_malloc,
+ .free = mlx5_free,
+ .per_core_cache = 1 << 14,
+ .type = "rte_flow_ipool",
+ },
+ {
+ .size = sizeof(struct rte_flow),
+ .trunk_size = 64,
+ .grow_trunk = 3,
+ .grow_shift = 2,
+ .need_lock = 1,
+ .release_mem_en = 0,
+ .malloc = mlx5_malloc,
+ .free = mlx5_free,
+ .per_core_cache = 0,
+ .type = "mcp_flow_ipool",
+ },
+};
+
/**
* Set the completion channel file descriptor interrupt as non-blocking.
*
@@ -832,6 +870,7 @@ mlx5_dev_spawn(struct rte_device *dpdk_dev,
int own_domain_id = 0;
uint16_t port_id;
struct mlx5_port_info vport_info = { .query_flags = 0 };
+ int i;
/* Determine if this port representor is supposed to be spawned. */
if (switch_info->representor && dpdk_dev->devargs &&
@@ -1575,7 +1614,6 @@ mlx5_dev_spawn(struct rte_device *dpdk_dev,
mlx5_ifindex(eth_dev),
eth_dev->data->mac_addrs,
MLX5_MAX_MAC_ADDRESSES);
- priv->flows = 0;
priv->ctrl_flows = 0;
rte_spinlock_init(&priv->flow_list_lock);
TAILQ_INIT(&priv->flow_meters);
@@ -1611,6 +1649,14 @@ mlx5_dev_spawn(struct rte_device *dpdk_dev,
mlx5_set_min_inline(spawn, config);
/* Store device configuration on private structure. */
priv->config = *config;
+ for (i = 0; i < MLX5_FLOW_TYPE_MAXI; i++) {
+ icfg[i].release_mem_en = !!config->reclaim_mode;
+ if (config->reclaim_mode)
+ icfg[i].per_core_cache = 0;
+ priv->flows[i] = mlx5_ipool_create(&icfg[i]);
+ if (!priv->flows[i])
+ goto error;
+ }
/* Create context for virtual machine VLAN workaround. */
priv->vmwa_context = mlx5_vlan_vmwa_init(eth_dev, spawn->ifindex);
if (config->dv_flow_en) {
@@ -325,7 +325,8 @@ static const struct mlx5_indexed_pool_config mlx5_ipool_cfg[] = {
.grow_trunk = 3,
.grow_shift = 2,
.need_lock = 1,
- .release_mem_en = 1,
+ .release_mem_en = 0,
+ .per_core_cache = 1 << 19,
.malloc = mlx5_malloc,
.free = mlx5_free,
.type = "mlx5_flow_handle_ipool",
@@ -793,8 +794,10 @@ mlx5_flow_ipool_create(struct mlx5_dev_ctx_shared *sh,
MLX5_FLOW_HANDLE_VERBS_SIZE;
break;
}
- if (config->reclaim_mode)
+ if (config->reclaim_mode) {
cfg.release_mem_en = 1;
+ cfg.per_core_cache = 0;
+ }
sh->ipool[i] = mlx5_ipool_create(&cfg);
}
}
@@ -1529,7 +1532,7 @@ mlx5_dev_close(struct rte_eth_dev *dev)
* If all the flows are already flushed in the device stop stage,
* then this will return directly without any action.
*/
- mlx5_flow_list_flush(dev, &priv->flows, true);
+ mlx5_flow_list_flush(dev, MLX5_FLOW_TYPE_GEN, true);
mlx5_action_handle_flush(dev);
mlx5_flow_meter_flush(dev, NULL);
/* Prevent crashes when queues are still in use. */
@@ -71,6 +71,14 @@ enum mlx5_reclaim_mem_mode {
MLX5_RCM_AGGR, /* Reclaim PMD and rdma-core level. */
};
+/* The type of flow. */
+enum mlx5_flow_type {
+ MLX5_FLOW_TYPE_CTL, /* Control flow. */
+ MLX5_FLOW_TYPE_GEN, /* General flow. */
+ MLX5_FLOW_TYPE_MCP, /* MCP flow. */
+ MLX5_FLOW_TYPE_MAXI,
+};
+
/* Hash and cache list callback context. */
struct mlx5_flow_cb_ctx {
struct rte_eth_dev *dev;
@@ -1367,7 +1375,8 @@ struct mlx5_priv {
unsigned int (*reta_idx)[]; /* RETA index table. */
unsigned int reta_idx_n; /* RETA index size. */
struct mlx5_drop drop_queue; /* Flow drop queues. */
- uint32_t flows; /* RTE Flow rules. */
+ struct mlx5_indexed_pool *flows[MLX5_FLOW_TYPE_MAXI];
+ /* RTE Flow rules. */
uint32_t ctrl_flows; /* Control flow rules. */
rte_spinlock_t flow_list_lock;
struct mlx5_obj_ops obj_ops; /* HW objects operations. */
@@ -1627,7 +1636,8 @@ struct rte_flow *mlx5_flow_create(struct rte_eth_dev *dev,
struct rte_flow_error *error);
int mlx5_flow_destroy(struct rte_eth_dev *dev, struct rte_flow *flow,
struct rte_flow_error *error);
-void mlx5_flow_list_flush(struct rte_eth_dev *dev, uint32_t *list, bool active);
+void mlx5_flow_list_flush(struct rte_eth_dev *dev, enum mlx5_flow_type type,
+ bool active);
int mlx5_flow_flush(struct rte_eth_dev *dev, struct rte_flow_error *error);
int mlx5_flow_query(struct rte_eth_dev *dev, struct rte_flow *flow,
const struct rte_flow_action *action, void *data,
@@ -3109,31 +3109,6 @@ mlx5_flow_validate_item_ecpri(const struct rte_flow_item *item,
MLX5_ITEM_RANGE_NOT_ACCEPTED, error);
}
-/**
- * Release resource related QUEUE/RSS action split.
- *
- * @param dev
- * Pointer to Ethernet device.
- * @param flow
- * Flow to release id's from.
- */
-static void
-flow_mreg_split_qrss_release(struct rte_eth_dev *dev,
- struct rte_flow *flow)
-{
- struct mlx5_priv *priv = dev->data->dev_private;
- uint32_t handle_idx;
- struct mlx5_flow_handle *dev_handle;
-
- SILIST_FOREACH(priv->sh->ipool[MLX5_IPOOL_MLX5_FLOW], flow->dev_handles,
- handle_idx, dev_handle, next)
- if (dev_handle->split_flow_id &&
- !dev_handle->is_meter_flow_id)
- mlx5_ipool_free(priv->sh->ipool
- [MLX5_IPOOL_RSS_EXPANTION_FLOW_ID],
- dev_handle->split_flow_id);
-}
-
static int
flow_null_validate(struct rte_eth_dev *dev __rte_unused,
const struct rte_flow_attr *attr __rte_unused,
@@ -3429,7 +3404,6 @@ flow_drv_destroy(struct rte_eth_dev *dev, struct rte_flow *flow)
const struct mlx5_flow_driver_ops *fops;
enum mlx5_flow_drv_type type = flow->drv_type;
- flow_mreg_split_qrss_release(dev, flow);
MLX5_ASSERT(type > MLX5_FLOW_TYPE_MIN && type < MLX5_FLOW_TYPE_MAX);
fops = flow_get_drv_ops(type);
fops->destroy(dev, flow);
@@ -4055,14 +4029,14 @@ flow_check_hairpin_split(struct rte_eth_dev *dev,
/* Declare flow create/destroy prototype in advance. */
static uint32_t
-flow_list_create(struct rte_eth_dev *dev, uint32_t *list,
+flow_list_create(struct rte_eth_dev *dev, enum mlx5_flow_type type,
const struct rte_flow_attr *attr,
const struct rte_flow_item items[],
const struct rte_flow_action actions[],
bool external, struct rte_flow_error *error);
static void
-flow_list_destroy(struct rte_eth_dev *dev, uint32_t *list,
+flow_list_destroy(struct rte_eth_dev *dev, enum mlx5_flow_type type,
uint32_t flow_idx);
int
@@ -4184,8 +4158,8 @@ flow_dv_mreg_create_cb(struct mlx5_hlist *list, uint64_t key,
* be applied, removed, deleted in ardbitrary order
* by list traversing.
*/
- mcp_res->rix_flow = flow_list_create(dev, NULL, &attr, items,
- actions, false, error);
+ mcp_res->rix_flow = flow_list_create(dev, MLX5_FLOW_TYPE_MCP,
+ &attr, items, actions, false, error);
if (!mcp_res->rix_flow) {
mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_MCP], idx);
return NULL;
@@ -4247,7 +4221,7 @@ flow_dv_mreg_remove_cb(struct mlx5_hlist *list, struct mlx5_hlist_entry *entry)
struct mlx5_priv *priv = dev->data->dev_private;
MLX5_ASSERT(mcp_res->rix_flow);
- flow_list_destroy(dev, NULL, mcp_res->rix_flow);
+ flow_list_destroy(dev, MLX5_FLOW_TYPE_MCP, mcp_res->rix_flow);
mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_MCP], mcp_res->idx);
}
@@ -6187,7 +6161,7 @@ flow_rss_workspace_adjust(struct mlx5_flow_workspace *wks,
* A flow index on success, 0 otherwise and rte_errno is set.
*/
static uint32_t
-flow_list_create(struct rte_eth_dev *dev, uint32_t *list,
+flow_list_create(struct rte_eth_dev *dev, enum mlx5_flow_type type,
const struct rte_flow_attr *attr,
const struct rte_flow_item items[],
const struct rte_flow_action original_actions[],
@@ -6255,7 +6229,7 @@ flow_list_create(struct rte_eth_dev *dev, uint32_t *list,
external, hairpin_flow, error);
if (ret < 0)
goto error_before_hairpin_split;
- flow = mlx5_ipool_zmalloc(priv->sh->ipool[MLX5_IPOOL_RTE_FLOW], &idx);
+ flow = mlx5_ipool_zmalloc(priv->flows[type], &idx);
if (!flow) {
rte_errno = ENOMEM;
goto error_before_hairpin_split;
@@ -6385,12 +6359,7 @@ flow_list_create(struct rte_eth_dev *dev, uint32_t *list,
if (ret < 0)
goto error;
}
- if (list) {
- rte_spinlock_lock(&priv->flow_list_lock);
- ILIST_INSERT(priv->sh->ipool[MLX5_IPOOL_RTE_FLOW], list, idx,
- flow, next);
- rte_spinlock_unlock(&priv->flow_list_lock);
- }
+ flow->type = type;
flow_rxq_flags_set(dev, flow);
rte_free(translated_actions);
tunnel = flow_tunnel_from_rule(wks->flows);
@@ -6412,7 +6381,7 @@ flow_list_create(struct rte_eth_dev *dev, uint32_t *list,
mlx5_ipool_get
(priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS],
rss_desc->shared_rss))->refcnt, 1, __ATOMIC_RELAXED);
- mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_RTE_FLOW], idx);
+ mlx5_ipool_free(priv->flows[type], idx);
rte_errno = ret; /* Restore rte_errno. */
ret = rte_errno;
rte_errno = ret;
@@ -6464,10 +6433,9 @@ mlx5_flow_create_esw_table_zero_flow(struct rte_eth_dev *dev)
.type = RTE_FLOW_ACTION_TYPE_END,
},
};
- struct mlx5_priv *priv = dev->data->dev_private;
struct rte_flow_error error;
- return (void *)(uintptr_t)flow_list_create(dev, &priv->ctrl_flows,
+ return (void *)(uintptr_t)flow_list_create(dev, MLX5_FLOW_TYPE_CTL,
&attr, &pattern,
actions, false, &error);
}
@@ -6519,8 +6487,6 @@ mlx5_flow_create(struct rte_eth_dev *dev,
const struct rte_flow_action actions[],
struct rte_flow_error *error)
{
- struct mlx5_priv *priv = dev->data->dev_private;
-
/*
* If the device is not started yet, it is not allowed to created a
* flow from application. PMD default flows and traffic control flows
@@ -6536,8 +6502,9 @@ mlx5_flow_create(struct rte_eth_dev *dev,
return NULL;
}
- return (void *)(uintptr_t)flow_list_create(dev, &priv->flows,
- attr, items, actions, true, error);
+ return (void *)(uintptr_t)flow_list_create(dev, MLX5_FLOW_TYPE_GEN,
+ attr, items, actions,
+ true, error);
}
/**
@@ -6545,24 +6512,19 @@ mlx5_flow_create(struct rte_eth_dev *dev,
*
* @param dev
* Pointer to Ethernet device.
- * @param list
- * Pointer to the Indexed flow list. If this parameter NULL,
- * there is no flow removal from the list. Be noted that as
- * flow is add to the indexed list, memory of the indexed
- * list points to maybe changed as flow destroyed.
* @param[in] flow_idx
* Index of flow to destroy.
*/
static void
-flow_list_destroy(struct rte_eth_dev *dev, uint32_t *list,
+flow_list_destroy(struct rte_eth_dev *dev, enum mlx5_flow_type type,
uint32_t flow_idx)
{
struct mlx5_priv *priv = dev->data->dev_private;
- struct rte_flow *flow = mlx5_ipool_get(priv->sh->ipool
- [MLX5_IPOOL_RTE_FLOW], flow_idx);
+ struct rte_flow *flow = mlx5_ipool_get(priv->flows[type], flow_idx);
if (!flow)
return;
+ MLX5_ASSERT(flow->type == type);
/*
* Update RX queue flags only if port is started, otherwise it is
* already clean.
@@ -6570,12 +6532,6 @@ flow_list_destroy(struct rte_eth_dev *dev, uint32_t *list,
if (dev->data->dev_started)
flow_rxq_flags_trim(dev, flow);
flow_drv_destroy(dev, flow);
- if (list) {
- rte_spinlock_lock(&priv->flow_list_lock);
- ILIST_REMOVE(priv->sh->ipool[MLX5_IPOOL_RTE_FLOW], list,
- flow_idx, flow, next);
- rte_spinlock_unlock(&priv->flow_list_lock);
- }
if (flow->tunnel) {
struct mlx5_flow_tunnel *tunnel;
@@ -6585,7 +6541,7 @@ flow_list_destroy(struct rte_eth_dev *dev, uint32_t *list,
mlx5_flow_tunnel_free(dev, tunnel);
}
flow_mreg_del_copy_action(dev, flow);
- mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_RTE_FLOW], flow_idx);
+ mlx5_ipool_free(priv->flows[type], flow_idx);
}
/**
@@ -6593,18 +6549,21 @@ flow_list_destroy(struct rte_eth_dev *dev, uint32_t *list,
*
* @param dev
* Pointer to Ethernet device.
- * @param list
- * Pointer to the Indexed flow list.
+ * @param type
+ * Flow type to be flushed.
* @param active
* If flushing is called avtively.
*/
void
-mlx5_flow_list_flush(struct rte_eth_dev *dev, uint32_t *list, bool active)
+mlx5_flow_list_flush(struct rte_eth_dev *dev, enum mlx5_flow_type type,
+ bool active)
{
- uint32_t num_flushed = 0;
+ struct mlx5_priv *priv = dev->data->dev_private;
+ uint32_t num_flushed = 0, fidx = 1;
+ struct rte_flow *flow;
- while (*list) {
- flow_list_destroy(dev, list, *list);
+ MLX5_IPOOL_FOREACH(priv->flows[type], fidx, flow) {
+ flow_list_destroy(dev, type, fidx);
num_flushed++;
}
if (active) {
@@ -6776,18 +6735,19 @@ mlx5_flow_pop_thread_workspace(void)
* @return the number of flows not released.
*/
int
-mlx5_flow_verify(struct rte_eth_dev *dev)
+mlx5_flow_verify(struct rte_eth_dev *dev __rte_unused)
{
struct mlx5_priv *priv = dev->data->dev_private;
struct rte_flow *flow;
- uint32_t idx;
- int ret = 0;
+ uint32_t idx = 0;
+ int ret = 0, i;
- ILIST_FOREACH(priv->sh->ipool[MLX5_IPOOL_RTE_FLOW], priv->flows, idx,
- flow, next) {
- DRV_LOG(DEBUG, "port %u flow %p still referenced",
- dev->data->port_id, (void *)flow);
- ++ret;
+ for (i = 0; i < MLX5_FLOW_TYPE_MAXI; i++) {
+ MLX5_IPOOL_FOREACH(priv->flows[i], idx, flow) {
+ DRV_LOG(DEBUG, "port %u flow %p still referenced",
+ dev->data->port_id, (void *)flow);
+ ret++;
+ }
}
return ret;
}
@@ -6807,7 +6767,6 @@ int
mlx5_ctrl_flow_source_queue(struct rte_eth_dev *dev,
uint32_t queue)
{
- struct mlx5_priv *priv = dev->data->dev_private;
const struct rte_flow_attr attr = {
.egress = 1,
.priority = 0,
@@ -6840,8 +6799,8 @@ mlx5_ctrl_flow_source_queue(struct rte_eth_dev *dev,
actions[0].type = RTE_FLOW_ACTION_TYPE_JUMP;
actions[0].conf = &jump;
actions[1].type = RTE_FLOW_ACTION_TYPE_END;
- flow_idx = flow_list_create(dev, &priv->ctrl_flows,
- &attr, items, actions, false, &error);
+ flow_idx = flow_list_create(dev, MLX5_FLOW_TYPE_CTL,
+ &attr, items, actions, false, &error);
if (!flow_idx) {
DRV_LOG(DEBUG,
"Failed to create ctrl flow: rte_errno(%d),"
@@ -6930,8 +6889,8 @@ mlx5_ctrl_flow_vlan(struct rte_eth_dev *dev,
action_rss.types = 0;
for (i = 0; i != priv->reta_idx_n; ++i)
queue[i] = (*priv->reta_idx)[i];
- flow_idx = flow_list_create(dev, &priv->ctrl_flows,
- &attr, items, actions, false, &error);
+ flow_idx = flow_list_create(dev, MLX5_FLOW_TYPE_CTL,
+ &attr, items, actions, false, &error);
if (!flow_idx)
return -rte_errno;
return 0;
@@ -6972,7 +6931,6 @@ mlx5_ctrl_flow(struct rte_eth_dev *dev,
int
mlx5_flow_lacp_miss(struct rte_eth_dev *dev)
{
- struct mlx5_priv *priv = dev->data->dev_private;
/*
* The LACP matching is done by only using ether type since using
* a multicast dst mac causes kernel to give low priority to this flow.
@@ -7006,8 +6964,9 @@ mlx5_flow_lacp_miss(struct rte_eth_dev *dev)
},
};
struct rte_flow_error error;
- uint32_t flow_idx = flow_list_create(dev, &priv->ctrl_flows,
- &attr, items, actions, false, &error);
+ uint32_t flow_idx = flow_list_create(dev, MLX5_FLOW_TYPE_CTL,
+ &attr, items, actions,
+ false, &error);
if (!flow_idx)
return -rte_errno;
@@ -7025,9 +6984,8 @@ mlx5_flow_destroy(struct rte_eth_dev *dev,
struct rte_flow *flow,
struct rte_flow_error *error __rte_unused)
{
- struct mlx5_priv *priv = dev->data->dev_private;
-
- flow_list_destroy(dev, &priv->flows, (uintptr_t)(void *)flow);
+ flow_list_destroy(dev, MLX5_FLOW_TYPE_GEN,
+ (uintptr_t)(void *)flow);
return 0;
}
@@ -7041,9 +6999,7 @@ int
mlx5_flow_flush(struct rte_eth_dev *dev,
struct rte_flow_error *error __rte_unused)
{
- struct mlx5_priv *priv = dev->data->dev_private;
-
- mlx5_flow_list_flush(dev, &priv->flows, false);
+ mlx5_flow_list_flush(dev, MLX5_FLOW_TYPE_GEN, false);
return 0;
}
@@ -7094,8 +7050,7 @@ flow_drv_query(struct rte_eth_dev *dev,
{
struct mlx5_priv *priv = dev->data->dev_private;
const struct mlx5_flow_driver_ops *fops;
- struct rte_flow *flow = mlx5_ipool_get(priv->sh->ipool
- [MLX5_IPOOL_RTE_FLOW],
+ struct rte_flow *flow = mlx5_ipool_get(priv->flows[MLX5_FLOW_TYPE_GEN],
flow_idx);
enum mlx5_flow_drv_type ftype;
@@ -7961,14 +7916,14 @@ mlx5_flow_discover_mreg_c(struct rte_eth_dev *dev)
if (!config->dv_flow_en)
break;
/* Create internal flow, validation skips copy action. */
- flow_idx = flow_list_create(dev, NULL, &attr, items,
- actions, false, &error);
- flow = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_RTE_FLOW],
+ flow_idx = flow_list_create(dev, MLX5_FLOW_TYPE_GEN, &attr,
+ items, actions, false, &error);
+ flow = mlx5_ipool_get(priv->flows[MLX5_FLOW_TYPE_GEN],
flow_idx);
if (!flow)
continue;
config->flow_mreg_c[n++] = idx;
- flow_list_destroy(dev, NULL, flow_idx);
+ flow_list_destroy(dev, MLX5_FLOW_TYPE_GEN, flow_idx);
}
for (; n < MLX5_MREG_C_NUM; ++n)
config->flow_mreg_c[n] = REG_NON;
@@ -8163,8 +8118,7 @@ mlx5_flow_dev_dump(struct rte_eth_dev *dev, struct rte_flow *flow_idx,
/* dump all */
if (!flow_idx) {
#ifdef HAVE_IBV_FLOW_DV_SUPPORT
- ILIST_FOREACH(priv->sh->ipool[MLX5_IPOOL_RTE_FLOW],
- priv->flows, idx, flow, next)
+ MLX5_IPOOL_FOREACH(priv->flows[MLX5_FLOW_TYPE_GEN], idx, flow)
mlx5_flow_dev_dump_ipool(dev, flow, file, error);
#endif
return mlx5_devx_cmd_flow_dump(sh->fdb_domain,
@@ -8172,8 +8126,8 @@ mlx5_flow_dev_dump(struct rte_eth_dev *dev, struct rte_flow *flow_idx,
sh->tx_domain, file);
}
/* dump one */
- flow = mlx5_ipool_get(priv->sh->ipool
- [MLX5_IPOOL_RTE_FLOW], (uintptr_t)(void *)flow_idx);
+ flow = mlx5_ipool_get(priv->flows[MLX5_FLOW_TYPE_GEN],
+ (uintptr_t)(void *)flow_idx);
if (!flow)
return -ENOENT;
@@ -998,9 +998,9 @@ flow_items_to_tunnel(const struct rte_flow_item items[])
/* Flow structure. */
struct rte_flow {
- ILIST_ENTRY(uint32_t)next; /**< Index to the next flow structure. */
uint32_t dev_handles;
/**< Device flow handles that are part of the flow. */
+ uint32_t type:2;
uint32_t drv_type:2; /**< Driver type. */
uint32_t tunnel:1;
uint32_t meter:24; /**< Holds flow meter id. */
@@ -13901,6 +13901,11 @@ flow_dv_destroy(struct rte_eth_dev *dev, struct rte_flow *flow)
dev_handle->split_flow_id)
mlx5_ipool_free(fm->flow_ipool,
dev_handle->split_flow_id);
+ else if (dev_handle->split_flow_id &&
+ !dev_handle->is_meter_flow_id)
+ mlx5_ipool_free(priv->sh->ipool
+ [MLX5_IPOOL_RSS_EXPANTION_FLOW_ID],
+ dev_handle->split_flow_id);
mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_MLX5_FLOW],
tmp_idx);
}
@@ -1187,7 +1187,7 @@ mlx5_dev_stop(struct rte_eth_dev *dev)
/* Control flows for default traffic can be removed firstly. */
mlx5_traffic_disable(dev);
/* All RX queue flags will be cleared in the flush interface. */
- mlx5_flow_list_flush(dev, &priv->flows, true);
+ mlx5_flow_list_flush(dev, MLX5_FLOW_TYPE_GEN, true);
mlx5_flow_meter_rxq_flush(dev);
mlx5_rx_intr_vec_disable(dev);
priv->sh->port[priv->dev_port - 1].ih_port_id = RTE_MAX_ETHPORTS;
@@ -1370,7 +1370,7 @@ mlx5_traffic_enable(struct rte_eth_dev *dev)
return 0;
error:
ret = rte_errno; /* Save rte_errno before cleanup. */
- mlx5_flow_list_flush(dev, &priv->ctrl_flows, false);
+ mlx5_flow_list_flush(dev, MLX5_FLOW_TYPE_CTL, false);
rte_errno = ret; /* Restore rte_errno. */
return -rte_errno;
}
@@ -1385,9 +1385,7 @@ mlx5_traffic_enable(struct rte_eth_dev *dev)
void
mlx5_traffic_disable(struct rte_eth_dev *dev)
{
- struct mlx5_priv *priv = dev->data->dev_private;
-
- mlx5_flow_list_flush(dev, &priv->ctrl_flows, false);
+ mlx5_flow_list_flush(dev, MLX5_FLOW_TYPE_CTL, false);
}
/**
@@ -563,7 +563,6 @@ mlx5_dev_spawn(struct rte_device *dpdk_dev,
eth_dev->rx_queue_count = mlx5_rx_queue_count;
/* Register MAC address. */
claim_zero(mlx5_mac_addr_add(eth_dev, &mac, 0, 0));
- priv->flows = 0;
priv->ctrl_flows = 0;
TAILQ_INIT(&priv->flow_meters);
priv->mtr_profile_tbl = mlx5_l3t_create(MLX5_L3T_TYPE_PTR);