@@ -1299,7 +1299,7 @@ mlx5_dev_spawn(struct rte_device *dpdk_dev,
log_obj_size <=
config->hca_attr.qos.log_meter_aso_max_alloc) {
sh->meter_aso_en = 1;
- err = mlx5_aso_flow_mtrs_mng_init(priv);
+ err = mlx5_aso_flow_mtrs_mng_init(priv->sh);
if (err) {
err = -err;
goto error;
@@ -350,6 +350,20 @@ static const struct mlx5_indexed_pool_config mlx5_ipool_cfg[] = {
.free = mlx5_free,
.type = "mlx5_shared_action_rss",
},
+ [MLX5_IPOOL_MTR_POLICY] = {
+ /**
+ * The ipool index should grow continually from small to big,
+ * for policy idx, so not set grow_trunk to avoid policy index
+ * not jump continually.
+ */
+ .size = sizeof(struct mlx5_flow_meter_sub_policy),
+ .trunk_size = 64,
+ .need_lock = 1,
+ .release_mem_en = 1,
+ .malloc = mlx5_malloc,
+ .free = mlx5_free,
+ .type = "mlx5_meter_policy_ipool",
+ },
};
@@ -569,27 +583,25 @@ mlx5_flow_counters_mng_close(struct mlx5_dev_ctx_shared *sh)
* Pointer to mlx5_dev_ctx_shared object to free
*/
int
-mlx5_aso_flow_mtrs_mng_init(struct mlx5_priv *priv)
+mlx5_aso_flow_mtrs_mng_init(struct mlx5_dev_ctx_shared *sh)
{
- if (!priv->mtr_idx_tbl) {
- priv->mtr_idx_tbl = mlx5_l3t_create(MLX5_L3T_TYPE_DWORD);
- if (!priv->mtr_idx_tbl) {
- DRV_LOG(ERR, "fail to create meter lookup table.");
- rte_errno = ENOMEM;
- return -ENOMEM;
- }
- }
- if (!priv->sh->mtrmng) {
- priv->sh->mtrmng = mlx5_malloc(MLX5_MEM_ZERO,
- sizeof(*priv->sh->mtrmng),
+ if (!sh->mtrmng) {
+ sh->mtrmng = mlx5_malloc(MLX5_MEM_ZERO,
+ sizeof(*sh->mtrmng),
RTE_CACHE_LINE_SIZE, SOCKET_ID_ANY);
- if (!priv->sh->mtrmng) {
- DRV_LOG(ERR, "mlx5_aso_mtr_pools_mng allocation was failed.");
+ if (!sh->mtrmng) {
+ DRV_LOG(ERR,
+ "meter management allocation was failed.");
rte_errno = ENOMEM;
return -ENOMEM;
}
- rte_spinlock_init(&priv->sh->mtrmng->mtrsl);
- LIST_INIT(&priv->sh->mtrmng->meters);
+ if (sh->meter_aso_en) {
+ rte_spinlock_init(&sh->mtrmng->pools_mng.mtrsl);
+ LIST_INIT(&sh->mtrmng->pools_mng.meters);
+ sh->mtrmng->policy_idx_tbl =
+ mlx5_l3t_create(MLX5_L3T_TYPE_DWORD);
+ }
+ sh->mtrmng->def_policy_id = MLX5_INVALID_POLICY_ID;
}
return 0;
}
@@ -605,31 +617,34 @@ static void
mlx5_aso_flow_mtrs_mng_close(struct mlx5_dev_ctx_shared *sh)
{
struct mlx5_aso_mtr_pool *mtr_pool;
- struct mlx5_aso_mtr_pools_mng *mtrmng = sh->mtrmng;
+ struct mlx5_flow_mtr_mng *mtrmng = sh->mtrmng;
uint32_t idx;
#ifdef HAVE_MLX5_DR_CREATE_ACTION_ASO
struct mlx5_aso_mtr *aso_mtr;
int i;
#endif /* HAVE_MLX5_DR_CREATE_ACTION_ASO */
- mlx5_aso_queue_uninit(sh, ASO_OPC_MOD_POLICER);
- idx = mtrmng->n_valid;
- while (idx--) {
- mtr_pool = mtrmng->pools[idx];
+ if (sh->meter_aso_en) {
+ mlx5_aso_queue_uninit(sh, ASO_OPC_MOD_POLICER);
+ idx = mtrmng->pools_mng.n_valid;
+ while (idx--) {
+ mtr_pool = mtrmng->pools_mng.pools[idx];
#ifdef HAVE_MLX5_DR_CREATE_ACTION_ASO
- for (i = 0; i < MLX5_ASO_MTRS_PER_POOL; i++) {
- aso_mtr = &mtr_pool->mtrs[i];
- if (aso_mtr->fm.meter_action)
- claim_zero(mlx5_glue->destroy_flow_action
- (aso_mtr->fm.meter_action));
- }
+ for (i = 0; i < MLX5_ASO_MTRS_PER_POOL; i++) {
+ aso_mtr = &mtr_pool->mtrs[i];
+ if (aso_mtr->fm.meter_action)
+ claim_zero
+ (mlx5_glue->destroy_flow_action
+ (aso_mtr->fm.meter_action));
+ }
#endif /* HAVE_MLX5_DR_CREATE_ACTION_ASO */
- claim_zero(mlx5_devx_cmd_destroy
+ claim_zero(mlx5_devx_cmd_destroy
(mtr_pool->devx_obj));
- mtrmng->n_valid--;
- mlx5_free(mtr_pool);
+ mtrmng->pools_mng.n_valid--;
+ mlx5_free(mtr_pool);
+ }
+ mlx5_free(sh->mtrmng->pools_mng.pools);
}
- mlx5_free(sh->mtrmng->pools);
mlx5_free(sh->mtrmng);
sh->mtrmng = NULL;
}
@@ -56,6 +56,7 @@ enum mlx5_ipool_index {
MLX5_IPOOL_RTE_FLOW, /* Pool for rte_flow. */
MLX5_IPOOL_RSS_EXPANTION_FLOW_ID, /* Pool for Queue/RSS flow ID. */
MLX5_IPOOL_RSS_SHARED_ACTIONS, /* Pool for RSS shared actions. */
+ MLX5_IPOOL_MTR_POLICY, /* Pool for meter policy resource. */
MLX5_IPOOL_MAX,
};
@@ -580,9 +581,126 @@ struct mlx5_dev_shared_port {
/* Aging information for per port. */
};
+/*
+ * Max number of actions per DV flow.
+ * See CREATE_FLOW_MAX_FLOW_ACTIONS_SUPPORTED
+ * in rdma-core file providers/mlx5/verbs.c.
+ */
+#define MLX5_DV_MAX_NUMBER_OF_ACTIONS 8
+
/*ASO flow meter structures*/
/* Modify this value if enum rte_mtr_color changes. */
#define RTE_MTR_DROPPED RTE_COLORS
+/* Yellow is not supported. */
+#define MLX5_MTR_RTE_COLORS (RTE_COLOR_GREEN + 1)
+/* table_id 22 bits in mlx5_flow_tbl_key so limit policy number. */
+#define MLX5_MAX_SUB_POLICY_TBL_NUM 0x3FFFFF
+#define MLX5_INVALID_POLICY_ID UINT32_MAX
+/* Suffix table_id on MLX5_FLOW_TABLE_LEVEL_METER. */
+#define MLX5_MTR_TABLE_ID_SUFFIX 1
+/* Drop table_id on MLX5_FLOW_TABLE_LEVEL_METER. */
+#define MLX5_MTR_TABLE_ID_DROP 2
+
+enum mlx5_meter_domain {
+ MLX5_MTR_DOMAIN_INGRESS,
+ MLX5_MTR_DOMAIN_EGRESS,
+ MLX5_MTR_DOMAIN_TRANSFER,
+ MLX5_MTR_DOMAIN_MAX,
+};
+#define MLX5_MTR_DOMAIN_INGRESS_BIT (1 << MLX5_MTR_DOMAIN_INGRESS)
+#define MLX5_MTR_DOMAIN_EGRESS_BIT (1 << MLX5_MTR_DOMAIN_EGRESS)
+#define MLX5_MTR_DOMAIN_TRANSFER_BIT (1 << MLX5_MTR_DOMAIN_TRANSFER)
+#define MLX5_MTR_ALL_DOMAIN_BIT (MLX5_MTR_DOMAIN_INGRESS_BIT | \
+ MLX5_MTR_DOMAIN_EGRESS_BIT | \
+ MLX5_MTR_DOMAIN_TRANSFER_BIT)
+
+/*
+ * Meter sub-policy structure.
+ * Each RSS TIR in meter policy need its own sub-policy resource.
+ */
+struct mlx5_flow_meter_sub_policy {
+ uint32_t main_policy_id:1;
+ /* Main policy id is same as this sub_policy id. */
+ uint32_t idx:31;
+ /* Index to sub_policy ipool entity. */
+ void *main_policy;
+ /* Point to struct mlx5_flow_meter_policy. */
+ struct mlx5_flow_tbl_resource *tbl_rsc;
+ /* The sub-policy table resource. */
+ uint32_t rix_hrxq[MLX5_MTR_RTE_COLORS];
+ /* Index to TIR resource. */
+ struct mlx5_flow_tbl_resource *jump_tbl[MLX5_MTR_RTE_COLORS];
+ /* Meter jump/drop table. */
+ struct mlx5_flow_dv_matcher *color_matcher[RTE_COLORS];
+ /* Matcher for Color. */
+ void *color_rule[RTE_COLORS];
+ /* Meter green/yellow/drop rule. */
+};
+
+struct mlx5_meter_policy_acts {
+ uint8_t actions_n;
+ /* Number of actions. */
+ void *dv_actions[MLX5_DV_MAX_NUMBER_OF_ACTIONS];
+ /* Action list. */
+};
+
+struct mlx5_meter_policy_action_container {
+ uint32_t rix_mark;
+ /* Index to the mark action. */
+ struct mlx5_flow_dv_modify_hdr_resource *modify_hdr;
+ /* Pointer to modify header resource in cache. */
+ uint8_t fate_action;
+ /* Fate action type. */
+ union {
+ struct rte_flow_action *rss;
+ /* Rss action configuration. */
+ uint32_t rix_port_id_action;
+ /* Index to port ID action resource. */
+ void *dr_jump_action[MLX5_MTR_DOMAIN_MAX];
+ /* Jump/drop action per color. */
+ };
+};
+
+/* Flow meter policy parameter structure. */
+struct mlx5_flow_meter_policy {
+ uint32_t is_rss:1;
+ /* Is RSS policy table. */
+ uint32_t ingress:1;
+ /* Rule applies to ingress domain. */
+ uint32_t egress:1;
+ /* Rule applies to egress domain. */
+ uint32_t transfer:1;
+ /* Rule applies to transfer domain. */
+ rte_spinlock_t sl;
+ uint32_t ref_cnt;
+ /* Use count. */
+ struct mlx5_meter_policy_action_container act_cnt[MLX5_MTR_RTE_COLORS];
+ /* Policy actions container. */
+ void *dr_drop_action[MLX5_MTR_DOMAIN_MAX];
+ /* drop action for red color. */
+ uint16_t sub_policy_num;
+ /* Count sub policy tables, 3 bits per domain. */
+ struct mlx5_flow_meter_sub_policy **sub_policys[MLX5_MTR_DOMAIN_MAX];
+ /* Sub policy table array must be the end of struct. */
+};
+
+/* The maximum sub policy is relate to struct mlx5_rss_hash_fields[]. */
+#define MLX5_MTR_RSS_MAX_SUB_POLICY 7
+#define MLX5_MTR_SUB_POLICY_NUM_SHIFT 3
+#define MLX5_MTR_SUB_POLICY_NUM_MASK 0x7
+#define MLX5_MTRS_DEFAULT_RULE_PRIORITY 0xFFFF
+
+/* Flow meter default policy parameter structure.
+ * Policy index 0 is reserved by default policy table.
+ * Action per color as below:
+ * green - do nothing, yellow - do nothing, red - drop
+ */
+struct mlx5_flow_meter_def_policy {
+ struct mlx5_flow_meter_sub_policy sub_policy;
+ /* Policy rules jump to other tables. */
+ void *dr_jump_action[RTE_COLORS];
+ /* Jump action per color. */
+};
/* Meter table structure. */
struct mlx5_meter_domain_info {
@@ -746,6 +864,28 @@ struct mlx5_aso_mtr_pools_mng {
struct mlx5_aso_mtr_pool **pools; /* ASO flow meter pool array. */
};
+/* Meter management structure for global flow meter resource. */
+struct mlx5_flow_mtr_mng {
+ struct mlx5_aso_mtr_pools_mng pools_mng;
+ /* Pools management structure for ASO flow meter pools. */
+ struct mlx5_flow_meter_def_policy *def_policy[MLX5_MTR_DOMAIN_MAX];
+ /* Default policy table. */
+ uint32_t def_policy_id;
+ /* Default policy id. */
+ uint32_t def_policy_ref_cnt;
+ /** def_policy meter use count. */
+ struct mlx5_l3t_tbl *policy_idx_tbl;
+ /* Policy index lookup table. */
+ struct mlx5_flow_tbl_resource *drop_tbl[MLX5_MTR_DOMAIN_MAX];
+ /* Meter drop table. */
+ struct mlx5_flow_dv_matcher *drop_matcher[MLX5_MTR_DOMAIN_MAX];
+ /* Matcher meter in drop table. */
+ struct mlx5_flow_dv_matcher *def_matcher[MLX5_MTR_DOMAIN_MAX];
+ /* Default matcher in drop table. */
+ void *def_rule[MLX5_MTR_DOMAIN_MAX];
+ /* Default rule in drop table. */
+};
+
/* Table key of the hash organization. */
union mlx5_flow_tbl_key {
struct {
@@ -772,9 +912,9 @@ struct mlx5_flow_tbl_resource {
#define MLX5_FLOW_MREG_ACT_TABLE_GROUP (MLX5_MAX_TABLES - 1)
#define MLX5_FLOW_MREG_CP_TABLE_GROUP (MLX5_MAX_TABLES - 2)
/* Tables for metering splits should be added here. */
-#define MLX5_FLOW_TABLE_LEVEL_SUFFIX (MLX5_MAX_TABLES - 3)
-#define MLX5_FLOW_TABLE_LEVEL_METER (MLX5_MAX_TABLES - 4)
-#define MLX5_MAX_TABLES_EXTERNAL MLX5_FLOW_TABLE_LEVEL_METER
+#define MLX5_FLOW_TABLE_LEVEL_METER (MLX5_MAX_TABLES - 3)
+#define MLX5_FLOW_TABLE_LEVEL_POLICY (MLX5_MAX_TABLES - 4)
+#define MLX5_MAX_TABLES_EXTERNAL MLX5_FLOW_TABLE_LEVEL_POLICY
#define MLX5_MAX_TABLES_FDB UINT16_MAX
#define MLX5_FLOW_TABLE_FACTOR 10
@@ -935,8 +1075,8 @@ struct mlx5_dev_ctx_shared {
struct mlx5_geneve_tlv_option_resource *geneve_tlv_option_resource;
/* Management structure for geneve tlv option */
rte_spinlock_t geneve_tlv_opt_sl; /* Lock for geneve tlv resource */
- struct mlx5_aso_mtr_pools_mng *mtrmng;
- /* Meter pools management structure. */
+ struct mlx5_flow_mtr_mng *mtrmng;
+ /* Meter management structure. */
struct mlx5_dev_shared_port port[]; /* per device port data array. */
};
@@ -1237,7 +1377,7 @@ int mlx5_hairpin_cap_get(struct rte_eth_dev *dev,
bool mlx5_flex_parser_ecpri_exist(struct rte_eth_dev *dev);
int mlx5_flex_parser_ecpri_alloc(struct rte_eth_dev *dev);
int mlx5_flow_aso_age_mng_init(struct mlx5_dev_ctx_shared *sh);
-int mlx5_aso_flow_mtrs_mng_init(struct mlx5_priv *priv);
+int mlx5_aso_flow_mtrs_mng_init(struct mlx5_dev_ctx_shared *sh);
/* mlx5_ethdev.c */
@@ -1463,6 +1603,12 @@ int mlx5_flow_meter_attach(struct mlx5_priv *priv,
struct rte_flow_error *error);
void mlx5_flow_meter_detach(struct mlx5_priv *priv,
struct mlx5_flow_meter_info *fm);
+struct mlx5_flow_meter_policy *mlx5_flow_meter_policy_find
+ (struct rte_eth_dev *dev,
+ uint32_t policy_id,
+ uint32_t *policy_idx);
+int mlx5_flow_meter_flush(struct rte_eth_dev *dev,
+ struct rte_mtr_error *error);
/* mlx5_os.c */
struct rte_pci_driver;
@@ -1028,7 +1028,7 @@ flow_rxq_tunnel_ptype_update(struct mlx5_rxq_ctrl *rxq_ctrl)
* @param[in] dev_handle
* Pointer to device flow handle structure.
*/
-static void
+void
flow_drv_rxq_flags_set(struct rte_eth_dev *dev,
struct mlx5_flow_handle *dev_handle)
{
@@ -4479,8 +4479,8 @@ flow_meter_split_prep(struct rte_eth_dev *dev,
actions_pre++;
jump_data = (struct rte_flow_action_jump *)actions_pre;
jump_data->group = attr->transfer ?
- (MLX5_FLOW_TABLE_LEVEL_METER - 1) :
- MLX5_FLOW_TABLE_LEVEL_METER;
+ (MLX5_FLOW_TABLE_LEVEL_POLICY - 1) :
+ MLX5_FLOW_TABLE_LEVEL_POLICY;
hw_mtr_action->conf = jump_data;
actions_pre = (struct rte_flow_action *)(jump_data + 1);
} else {
@@ -5079,8 +5079,8 @@ flow_create_split_metadata(struct rte_eth_dev *dev,
if (qrss) {
/* Check if it is in meter suffix table. */
mtr_sfx = attr->group == (attr->transfer ?
- (MLX5_FLOW_TABLE_LEVEL_SUFFIX - 1) :
- MLX5_FLOW_TABLE_LEVEL_SUFFIX);
+ (MLX5_FLOW_TABLE_LEVEL_METER - 1) :
+ MLX5_FLOW_TABLE_LEVEL_METER);
/*
* Q/RSS action on NIC Rx should be split in order to pass by
* the mreg copy table (RX_CP_TBL) and then it jumps to the
@@ -5344,8 +5344,8 @@ flow_create_split_meter(struct rte_eth_dev *dev,
dev_flow->handle->is_meter_flow_id = 1;
/* Setting the sfx group atrr. */
sfx_attr.group = sfx_attr.transfer ?
- (MLX5_FLOW_TABLE_LEVEL_SUFFIX - 1) :
- MLX5_FLOW_TABLE_LEVEL_SUFFIX;
+ (MLX5_FLOW_TABLE_LEVEL_METER - 1) :
+ MLX5_FLOW_TABLE_LEVEL_METER;
flow_split_info->prefix_layers =
flow_get_prefix_layer_flags(dev_flow);
flow_split_info->prefix_mark = dev_flow->handle->mark;
@@ -6608,6 +6608,169 @@ mlx5_flow_ops_get(struct rte_eth_dev *dev __rte_unused,
return 0;
}
+/**
+ * Validate meter policy actions.
+ * Dispatcher for action type specific validation.
+ *
+ * @param[in] dev
+ * Pointer to the Ethernet device structure.
+ * @param[in] action
+ * The meter policy action object to validate.
+ * @param[in] attr
+ * Attributes of flow to determine steering domain.
+ * @param[out] is_rss
+ * Is RSS or not.
+ * @param[out] domain_bitmap
+ * Domain bitmap.
+ * @param[out] is_def_policy
+ * Is default policy or not.
+ * @param[out] error
+ * Perform verbose error reporting if not NULL. Initialized in case of
+ * error only.
+ *
+ * @return
+ * 0 on success, otherwise negative errno value.
+ */
+int
+mlx5_flow_validate_mtr_acts(struct rte_eth_dev *dev,
+ const struct rte_flow_action *actions[RTE_COLORS],
+ struct rte_flow_attr *attr,
+ bool *is_rss,
+ uint8_t *domain_bitmap,
+ bool *is_def_policy,
+ struct rte_mtr_error *error)
+{
+ const struct mlx5_flow_driver_ops *fops;
+
+ fops = flow_get_drv_ops(MLX5_FLOW_TYPE_DV);
+ return fops->validate_mtr_acts(dev, actions, attr,
+ is_rss, domain_bitmap, is_def_policy, error);
+}
+
+/**
+ * Destroy the meter table set.
+ *
+ * @param[in] dev
+ * Pointer to Ethernet device.
+ * @param[in] mtr_policy
+ * Meter policy struct.
+ */
+void
+mlx5_flow_destroy_mtr_acts(struct rte_eth_dev *dev,
+ struct mlx5_flow_meter_policy *mtr_policy)
+{
+ const struct mlx5_flow_driver_ops *fops;
+
+ fops = flow_get_drv_ops(MLX5_FLOW_TYPE_DV);
+ fops->destroy_mtr_acts(dev, mtr_policy);
+}
+
+/**
+ * Create policy action, lock free,
+ * (mutex should be acquired by caller).
+ * Dispatcher for action type specific call.
+ *
+ * @param[in] dev
+ * Pointer to the Ethernet device structure.
+ * @param[in] mtr_policy
+ * Meter policy struct.
+ * @param[in] action
+ * Action specification used to create meter actions.
+ * @param[out] error
+ * Perform verbose error reporting if not NULL. Initialized in case of
+ * error only.
+ *
+ * @return
+ * 0 on success, otherwise negative errno value.
+ */
+int
+mlx5_flow_create_mtr_acts(struct rte_eth_dev *dev,
+ struct mlx5_flow_meter_policy *mtr_policy,
+ const struct rte_flow_action *actions[RTE_COLORS],
+ struct rte_mtr_error *error)
+{
+ const struct mlx5_flow_driver_ops *fops;
+
+ fops = flow_get_drv_ops(MLX5_FLOW_TYPE_DV);
+ return fops->create_mtr_acts(dev, mtr_policy, actions, error);
+}
+
+/**
+ * Create policy rules, lock free,
+ * (mutex should be acquired by caller).
+ * Dispatcher for action type specific call.
+ *
+ * @param[in] dev
+ * Pointer to the Ethernet device structure.
+ * @param[in] mtr_policy
+ * Meter policy struct.
+ *
+ * @return
+ * 0 on success, -1 otherwise.
+ */
+int
+mlx5_flow_create_policy_rules(struct rte_eth_dev *dev,
+ struct mlx5_flow_meter_policy *mtr_policy)
+{
+ const struct mlx5_flow_driver_ops *fops;
+
+ fops = flow_get_drv_ops(MLX5_FLOW_TYPE_DV);
+ return fops->create_policy_rules(dev, mtr_policy);
+}
+
+/**
+ * Destroy policy rules, lock free,
+ * (mutex should be acquired by caller).
+ * Dispatcher for action type specific call.
+ *
+ * @param[in] dev
+ * Pointer to the Ethernet device structure.
+ * @param[in] mtr_policy
+ * Meter policy struct.
+ */
+void
+mlx5_flow_destroy_policy_rules(struct rte_eth_dev *dev,
+ struct mlx5_flow_meter_policy *mtr_policy)
+{
+ const struct mlx5_flow_driver_ops *fops;
+
+ fops = flow_get_drv_ops(MLX5_FLOW_TYPE_DV);
+ fops->destroy_policy_rules(dev, mtr_policy);
+}
+
+/**
+ * Destroy the default policy table set.
+ *
+ * @param[in] dev
+ * Pointer to Ethernet device.
+ */
+void
+mlx5_flow_destroy_def_policy(struct rte_eth_dev *dev)
+{
+ const struct mlx5_flow_driver_ops *fops;
+
+ fops = flow_get_drv_ops(MLX5_FLOW_TYPE_DV);
+ fops->destroy_def_policy(dev);
+}
+
+/**
+ * Destroy the default policy table set.
+ *
+ * @param[in] dev
+ * Pointer to Ethernet device.
+ *
+ * @return
+ * 0 on success, -1 otherwise.
+ */
+int
+mlx5_flow_create_def_policy(struct rte_eth_dev *dev)
+{
+ const struct mlx5_flow_driver_ops *fops;
+
+ fops = flow_get_drv_ops(MLX5_FLOW_TYPE_DV);
+ return fops->create_def_policy(dev);
+}
+
/**
* Create the needed meter and suffix tables.
*
@@ -6647,6 +6810,21 @@ mlx5_flow_destroy_mtr_tbls(struct rte_eth_dev *dev,
return fops->destroy_mtr_tbls(dev, tbls);
}
+/**
+ * Destroy the global meter drop table.
+ *
+ * @param[in] dev
+ * Pointer to Ethernet device.
+ */
+void
+mlx5_flow_destroy_mtr_drop_tbls(struct rte_eth_dev *dev)
+{
+ const struct mlx5_flow_driver_ops *fops;
+
+ fops = flow_get_drv_ops(MLX5_FLOW_TYPE_DV);
+ fops->destroy_mtr_drop_tbls(dev);
+}
+
/**
* Allocate the needed aso flow meter id.
*
@@ -691,13 +691,6 @@ struct mlx5_flow_handle {
#define MLX5_FLOW_HANDLE_VERBS_SIZE (sizeof(struct mlx5_flow_handle))
#endif
-/*
- * Max number of actions per DV flow.
- * See CREATE_FLOW_MAX_FLOW_ACTIONS_SUPPORTED
- * in rdma-core file providers/mlx5/verbs.c.
- */
-#define MLX5_DV_MAX_NUMBER_OF_ACTIONS 8
-
/** Device flow structure only for DV flow creation. */
struct mlx5_flow_dv_workspace {
uint32_t group; /**< The group index. */
@@ -1099,6 +1092,7 @@ typedef struct mlx5_meter_domains_infos *(*mlx5_flow_create_mtr_tbls_t)
(struct rte_eth_dev *dev);
typedef int (*mlx5_flow_destroy_mtr_tbls_t)(struct rte_eth_dev *dev,
struct mlx5_meter_domains_infos *tbls);
+typedef void (*mlx5_flow_destroy_mtr_drop_tbls_t)(struct rte_eth_dev *dev);
typedef uint32_t (*mlx5_flow_mtr_alloc_t)
(struct rte_eth_dev *dev);
typedef void (*mlx5_flow_mtr_free_t)(struct rte_eth_dev *dev,
@@ -1144,6 +1138,32 @@ typedef int (*mlx5_flow_sync_domain_t)
(struct rte_eth_dev *dev,
uint32_t domains,
uint32_t flags);
+typedef int (*mlx5_flow_validate_mtr_acts_t)
+ (struct rte_eth_dev *dev,
+ const struct rte_flow_action *actions[RTE_COLORS],
+ struct rte_flow_attr *attr,
+ bool *is_rss,
+ uint8_t *domain_bitmap,
+ bool *is_def_policy,
+ struct rte_mtr_error *error);
+typedef int (*mlx5_flow_create_mtr_acts_t)
+ (struct rte_eth_dev *dev,
+ struct mlx5_flow_meter_policy *mtr_policy,
+ const struct rte_flow_action *actions[RTE_COLORS],
+ struct rte_mtr_error *error);
+typedef void (*mlx5_flow_destroy_mtr_acts_t)
+ (struct rte_eth_dev *dev,
+ struct mlx5_flow_meter_policy *mtr_policy);
+typedef int (*mlx5_flow_create_policy_rules_t)
+ (struct rte_eth_dev *dev,
+ struct mlx5_flow_meter_policy *mtr_policy);
+typedef void (*mlx5_flow_destroy_policy_rules_t)
+ (struct rte_eth_dev *dev,
+ struct mlx5_flow_meter_policy *mtr_policy);
+typedef int (*mlx5_flow_create_def_policy_t)
+ (struct rte_eth_dev *dev);
+typedef void (*mlx5_flow_destroy_def_policy_t)
+ (struct rte_eth_dev *dev);
struct mlx5_flow_driver_ops {
mlx5_flow_validate_t validate;
@@ -1155,8 +1175,16 @@ struct mlx5_flow_driver_ops {
mlx5_flow_query_t query;
mlx5_flow_create_mtr_tbls_t create_mtr_tbls;
mlx5_flow_destroy_mtr_tbls_t destroy_mtr_tbls;
+ mlx5_flow_destroy_mtr_drop_tbls_t destroy_mtr_drop_tbls;
mlx5_flow_mtr_alloc_t create_meter;
mlx5_flow_mtr_free_t free_meter;
+ mlx5_flow_validate_mtr_acts_t validate_mtr_acts;
+ mlx5_flow_create_mtr_acts_t create_mtr_acts;
+ mlx5_flow_destroy_mtr_acts_t destroy_mtr_acts;
+ mlx5_flow_create_policy_rules_t create_policy_rules;
+ mlx5_flow_destroy_policy_rules_t destroy_policy_rules;
+ mlx5_flow_create_def_policy_t create_def_policy;
+ mlx5_flow_destroy_def_policy_t destroy_def_policy;
mlx5_flow_counter_alloc_t counter_alloc;
mlx5_flow_counter_free_t counter_free;
mlx5_flow_counter_query_t counter_query;
@@ -1233,12 +1261,13 @@ static inline struct mlx5_aso_mtr *
mlx5_aso_meter_by_idx(struct mlx5_priv *priv, uint32_t idx)
{
struct mlx5_aso_mtr_pool *pool;
- struct mlx5_aso_mtr_pools_mng *mtrmng = priv->sh->mtrmng;
+ struct mlx5_aso_mtr_pools_mng *pools_mng =
+ &priv->sh->mtrmng->pools_mng;
/* Decrease to original index. */
idx--;
- MLX5_ASSERT(idx / MLX5_ASO_MTRS_PER_POOL < mtrmng->n);
- pool = mtrmng->pools[idx / MLX5_ASO_MTRS_PER_POOL];
+ MLX5_ASSERT(idx / MLX5_ASO_MTRS_PER_POOL < pools_mng->n);
+ pool = pools_mng->pools[idx / MLX5_ASO_MTRS_PER_POOL];
return &pool->mtrs[idx % MLX5_ASO_MTRS_PER_POOL];
}
@@ -1384,8 +1413,7 @@ struct mlx5_meter_domains_infos *mlx5_flow_create_mtr_tbls
(struct rte_eth_dev *dev);
int mlx5_flow_destroy_mtr_tbls(struct rte_eth_dev *dev,
struct mlx5_meter_domains_infos *tbl);
-int mlx5_flow_meter_flush(struct rte_eth_dev *dev,
- struct rte_mtr_error *error);
+void mlx5_flow_destroy_mtr_drop_tbls(struct rte_eth_dev *dev);
int mlx5_flow_dv_discover_counter_offset_support(struct rte_eth_dev *dev);
int mlx5_shared_action_flush(struct rte_eth_dev *dev);
void mlx5_release_tunnel_hub(struct mlx5_dev_ctx_shared *sh, uint16_t port_id);
@@ -1486,4 +1514,25 @@ int mlx5_flow_os_set_specific_workspace(struct mlx5_flow_workspace *data);
void mlx5_flow_os_release_workspace(void);
uint32_t mlx5_flow_mtr_alloc(struct rte_eth_dev *dev);
void mlx5_flow_mtr_free(struct rte_eth_dev *dev, uint32_t mtr_idx);
+int mlx5_flow_validate_mtr_acts(struct rte_eth_dev *dev,
+ const struct rte_flow_action *actions[RTE_COLORS],
+ struct rte_flow_attr *attr,
+ bool *is_rss,
+ uint8_t *domain_bitmap,
+ bool *is_def_policy,
+ struct rte_mtr_error *error);
+void mlx5_flow_destroy_mtr_acts(struct rte_eth_dev *dev,
+ struct mlx5_flow_meter_policy *mtr_policy);
+int mlx5_flow_create_mtr_acts(struct rte_eth_dev *dev,
+ struct mlx5_flow_meter_policy *mtr_policy,
+ const struct rte_flow_action *actions[RTE_COLORS],
+ struct rte_mtr_error *error);
+int mlx5_flow_create_policy_rules(struct rte_eth_dev *dev,
+ struct mlx5_flow_meter_policy *mtr_policy);
+void mlx5_flow_destroy_policy_rules(struct rte_eth_dev *dev,
+ struct mlx5_flow_meter_policy *mtr_policy);
+int mlx5_flow_create_def_policy(struct rte_eth_dev *dev);
+void mlx5_flow_destroy_def_policy(struct rte_eth_dev *dev);
+void flow_drv_rxq_flags_set(struct rte_eth_dev *dev,
+ struct mlx5_flow_handle *dev_handle);
#endif /* RTE_PMD_MLX5_FLOW_H_ */
@@ -311,11 +311,11 @@ mlx5_aso_queue_init(struct mlx5_dev_ctx_shared *sh,
mlx5_aso_age_init_sq(&sh->aso_age_mng->aso_sq);
break;
case ASO_OPC_MOD_POLICER:
- if (mlx5_aso_sq_create(sh->ctx, &sh->mtrmng->sq, 0,
+ if (mlx5_aso_sq_create(sh->ctx, &sh->mtrmng->pools_mng.sq, 0,
sh->tx_uar, sh->pdn, MLX5_ASO_QUEUE_LOG_DESC,
sh->sq_ts_format))
return -1;
- mlx5_aso_mtr_init_sq(&sh->mtrmng->sq);
+ mlx5_aso_mtr_init_sq(&sh->mtrmng->pools_mng.sq);
break;
default:
DRV_LOG(ERR, "Unknown ASO operation mode");
@@ -342,7 +342,7 @@ mlx5_aso_queue_uninit(struct mlx5_dev_ctx_shared *sh,
sq = &sh->aso_age_mng->aso_sq;
break;
case ASO_OPC_MOD_POLICER:
- sq = &sh->mtrmng->sq;
+ sq = &sh->mtrmng->pools_mng.sq;
break;
default:
DRV_LOG(ERR, "Unknown ASO operation mode");
@@ -798,7 +798,7 @@ int
mlx5_aso_meter_update_by_wqe(struct mlx5_dev_ctx_shared *sh,
struct mlx5_aso_mtr *mtr)
{
- struct mlx5_aso_sq *sq = &sh->mtrmng->sq;
+ struct mlx5_aso_sq *sq = &sh->mtrmng->pools_mng.sq;
uint32_t poll_wqe_times = MLX5_MTR_POLL_WQE_CQE_TIMES;
do {
@@ -830,7 +830,7 @@ int
mlx5_aso_mtr_wait(struct mlx5_dev_ctx_shared *sh,
struct mlx5_aso_mtr *mtr)
{
- struct mlx5_aso_sq *sq = &sh->mtrmng->sq;
+ struct mlx5_aso_sq *sq = &sh->mtrmng->pools_mng.sq;
uint32_t poll_cqe_times = MLX5_MTR_POLL_WQE_CQE_TIMES;
if (__atomic_load_n(&mtr->state, __ATOMIC_RELAXED) ==
@@ -21,6 +21,8 @@
#include <rte_gtp.h>
#include <rte_eal_paging.h>
#include <rte_mpls.h>
+#include <rte_mtr.h>
+#include <rte_mtr_driver.h>
#include <mlx5_glue.h>
#include <mlx5_devx_cmds.h>
@@ -184,6 +186,31 @@ flow_dv_attr_init(const struct rte_flow_item *item, union flow_dv_attr *attr,
attr->valid = 1;
}
+/**
+ * Convert rte_mtr_color to mlx5 color.
+ *
+ * @param[in] rcol
+ * rte_mtr_color.
+ *
+ * @return
+ * mlx5 color.
+ */
+static int
+rte_col_2_mlx5_col(enum rte_color rcol)
+{
+ switch (rcol) {
+ case RTE_COLOR_GREEN:
+ return MLX5_FLOW_COLOR_GREEN;
+ case RTE_COLOR_YELLOW:
+ return MLX5_FLOW_COLOR_YELLOW;
+ case RTE_COLOR_RED:
+ return MLX5_FLOW_COLOR_RED;
+ default:
+ break;
+ }
+ return MLX5_FLOW_COLOR_UNDEFINED;
+}
+
struct field_modify_info {
uint32_t size; /* Size of field in protocol header, in bytes. */
uint32_t offset; /* Offset of field in protocol header, in bytes. */
@@ -4695,10 +4722,6 @@ flow_dv_validate_action_jump(struct rte_eth_dev *dev,
RTE_FLOW_ERROR_TYPE_ACTION, NULL,
"can't have 2 fate actions in"
" same flow");
- if (action_flags & MLX5_FLOW_ACTION_METER)
- return rte_flow_error_set(error, ENOTSUP,
- RTE_FLOW_ERROR_TYPE_ACTION, NULL,
- "jump with meter not support");
if (!action->conf)
return rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ACTION_CONF,
@@ -5928,9 +5951,10 @@ static int
flow_dv_mtr_container_resize(struct rte_eth_dev *dev)
{
struct mlx5_priv *priv = dev->data->dev_private;
- struct mlx5_aso_mtr_pools_mng *mtrmng = priv->sh->mtrmng;
- void *old_pools = mtrmng->pools;
- uint32_t resize = mtrmng->n + MLX5_MTRS_CONTAINER_RESIZE;
+ struct mlx5_aso_mtr_pools_mng *pools_mng =
+ &priv->sh->mtrmng->pools_mng;
+ void *old_pools = pools_mng->pools;
+ uint32_t resize = pools_mng->n + MLX5_MTRS_CONTAINER_RESIZE;
uint32_t mem_size = sizeof(struct mlx5_aso_mtr_pool *) * resize;
void *pools = mlx5_malloc(MLX5_MEM_ZERO, mem_size, 0, SOCKET_ID_ANY);
@@ -5938,16 +5962,16 @@ flow_dv_mtr_container_resize(struct rte_eth_dev *dev)
rte_errno = ENOMEM;
return -ENOMEM;
}
- if (!mtrmng->n)
+ if (!pools_mng->n)
if (mlx5_aso_queue_init(priv->sh, ASO_OPC_MOD_POLICER)) {
mlx5_free(pools);
return -ENOMEM;
}
if (old_pools)
- memcpy(pools, old_pools, mtrmng->n *
+ memcpy(pools, old_pools, pools_mng->n *
sizeof(struct mlx5_aso_mtr_pool *));
- mtrmng->n = resize;
- mtrmng->pools = pools;
+ pools_mng->n = resize;
+ pools_mng->pools = pools;
if (old_pools)
mlx5_free(old_pools);
return 0;
@@ -5970,7 +5994,8 @@ flow_dv_mtr_pool_create(struct rte_eth_dev *dev,
struct mlx5_aso_mtr **mtr_free)
{
struct mlx5_priv *priv = dev->data->dev_private;
- struct mlx5_aso_mtr_pools_mng *mtrmng = priv->sh->mtrmng;
+ struct mlx5_aso_mtr_pools_mng *pools_mng =
+ &priv->sh->mtrmng->pools_mng;
struct mlx5_aso_mtr_pool *pool = NULL;
struct mlx5_devx_obj *dcs = NULL;
uint32_t i;
@@ -5990,17 +6015,17 @@ flow_dv_mtr_pool_create(struct rte_eth_dev *dev,
return NULL;
}
pool->devx_obj = dcs;
- pool->index = mtrmng->n_valid;
- if (pool->index == mtrmng->n && flow_dv_mtr_container_resize(dev)) {
+ pool->index = pools_mng->n_valid;
+ if (pool->index == pools_mng->n && flow_dv_mtr_container_resize(dev)) {
mlx5_free(pool);
claim_zero(mlx5_devx_cmd_destroy(dcs));
return NULL;
}
- mtrmng->pools[pool->index] = pool;
- mtrmng->n_valid++;
+ pools_mng->pools[pool->index] = pool;
+ pools_mng->n_valid++;
for (i = 1; i < MLX5_ASO_MTRS_PER_POOL; ++i) {
pool->mtrs[i].offset = i;
- LIST_INSERT_HEAD(&mtrmng->meters,
+ LIST_INSERT_HEAD(&pools_mng->meters,
&pool->mtrs[i], next);
}
pool->mtrs[0].offset = 0;
@@ -6020,15 +6045,16 @@ static void
flow_dv_aso_mtr_release_to_pool(struct rte_eth_dev *dev, uint32_t mtr_idx)
{
struct mlx5_priv *priv = dev->data->dev_private;
- struct mlx5_aso_mtr_pools_mng *mtrmng = priv->sh->mtrmng;
+ struct mlx5_aso_mtr_pools_mng *pools_mng =
+ &priv->sh->mtrmng->pools_mng;
struct mlx5_aso_mtr *aso_mtr = mlx5_aso_meter_by_idx(priv, mtr_idx);
MLX5_ASSERT(aso_mtr);
- rte_spinlock_lock(&mtrmng->mtrsl);
+ rte_spinlock_lock(&pools_mng->mtrsl);
memset(&aso_mtr->fm, 0, sizeof(struct mlx5_flow_meter_info));
aso_mtr->state = ASO_METER_FREE;
- LIST_INSERT_HEAD(&mtrmng->meters, aso_mtr, next);
- rte_spinlock_unlock(&mtrmng->mtrsl);
+ LIST_INSERT_HEAD(&pools_mng->meters, aso_mtr, next);
+ rte_spinlock_unlock(&pools_mng->mtrsl);
}
/**
@@ -6045,7 +6071,8 @@ flow_dv_mtr_alloc(struct rte_eth_dev *dev)
{
struct mlx5_priv *priv = dev->data->dev_private;
struct mlx5_aso_mtr *mtr_free = NULL;
- struct mlx5_aso_mtr_pools_mng *mtrmng = priv->sh->mtrmng;
+ struct mlx5_aso_mtr_pools_mng *pools_mng =
+ &priv->sh->mtrmng->pools_mng;
struct mlx5_aso_mtr_pool *pool;
uint32_t mtr_idx = 0;
@@ -6055,16 +6082,16 @@ flow_dv_mtr_alloc(struct rte_eth_dev *dev)
}
/* Allocate the flow meter memory. */
/* Get free meters from management. */
- rte_spinlock_lock(&mtrmng->mtrsl);
- mtr_free = LIST_FIRST(&mtrmng->meters);
+ rte_spinlock_lock(&pools_mng->mtrsl);
+ mtr_free = LIST_FIRST(&pools_mng->meters);
if (mtr_free)
LIST_REMOVE(mtr_free, next);
if (!mtr_free && !flow_dv_mtr_pool_create(dev, &mtr_free)) {
- rte_spinlock_unlock(&mtrmng->mtrsl);
+ rte_spinlock_unlock(&pools_mng->mtrsl);
return 0;
}
mtr_free->state = ASO_METER_WAIT;
- rte_spinlock_unlock(&mtrmng->mtrsl);
+ rte_spinlock_unlock(&pools_mng->mtrsl);
pool = container_of(mtr_free,
struct mlx5_aso_mtr_pool,
mtrs[mtr_free->offset]);
@@ -13390,6 +13417,556 @@ flow_dv_action_query(struct rte_eth_dev *dev,
}
}
+/**
+ * Destroy the meter sub policy table rules.
+ * Lock free, (mutex should be acquired by caller).
+ *
+ * @param[in] dev
+ * Pointer to Ethernet device.
+ * @param[in] sub_policy
+ * Pointer to meter sub policy table.
+ */
+static void
+__flow_dv_destroy_sub_policy_rules(struct rte_eth_dev *dev,
+ struct mlx5_flow_meter_sub_policy *sub_policy)
+{
+ struct mlx5_flow_tbl_data_entry *tbl;
+ int i;
+
+ for (i = 0; i < RTE_COLORS; i++) {
+ if (sub_policy->color_rule[i]) {
+ claim_zero(mlx5_flow_os_destroy_flow
+ (sub_policy->color_rule[i]));
+ sub_policy->color_rule[i] = NULL;
+ }
+ if (sub_policy->color_matcher[i]) {
+ tbl = container_of(sub_policy->color_matcher[i]->tbl,
+ typeof(*tbl), tbl);
+ mlx5_cache_unregister(&tbl->matchers,
+ &sub_policy->color_matcher[i]->entry);
+ sub_policy->color_matcher[i] = NULL;
+ }
+ }
+ for (i = 0; i < MLX5_MTR_RTE_COLORS; i++) {
+ if (sub_policy->rix_hrxq[i]) {
+ mlx5_hrxq_release(dev, sub_policy->rix_hrxq[i]);
+ sub_policy->rix_hrxq[i] = 0;
+ }
+ if (sub_policy->jump_tbl[i]) {
+ flow_dv_tbl_resource_release(MLX5_SH(dev),
+ sub_policy->jump_tbl[i]);
+ sub_policy->jump_tbl[i] = NULL;
+ }
+ }
+ if (sub_policy->tbl_rsc) {
+ flow_dv_tbl_resource_release(MLX5_SH(dev),
+ sub_policy->tbl_rsc);
+ sub_policy->tbl_rsc = NULL;
+ }
+}
+
+/**
+ * Destroy policy rules, lock free,
+ * (mutex should be acquired by caller).
+ * Dispatcher for action type specific call.
+ *
+ * @param[in] dev
+ * Pointer to the Ethernet device structure.
+ * @param[in] mtr_policy
+ * Meter policy struct.
+ */
+static void
+flow_dv_destroy_policy_rules(struct rte_eth_dev *dev,
+ struct mlx5_flow_meter_policy *mtr_policy)
+{
+ uint32_t i, j;
+ struct mlx5_flow_meter_sub_policy *sub_policy;
+ uint16_t sub_policy_num;
+
+ for (i = 0; i < MLX5_MTR_DOMAIN_MAX; i++) {
+ sub_policy_num = (mtr_policy->sub_policy_num >>
+ (MLX5_MTR_SUB_POLICY_NUM_SHIFT * i)) &
+ MLX5_MTR_SUB_POLICY_NUM_MASK;
+ for (j = 0; j < sub_policy_num; j++) {
+ sub_policy = mtr_policy->sub_policys[i][j];
+ if (sub_policy)
+ __flow_dv_destroy_sub_policy_rules
+ (dev, sub_policy);
+ }
+ }
+}
+
+/**
+ * Destroy policy action, lock free,
+ * (mutex should be acquired by caller).
+ * Dispatcher for action type specific call.
+ *
+ * @param[in] dev
+ * Pointer to the Ethernet device structure.
+ * @param[in] mtr_policy
+ * Meter policy struct.
+ */
+static void
+flow_dv_destroy_mtr_policy_acts(struct rte_eth_dev *dev,
+ struct mlx5_flow_meter_policy *mtr_policy)
+{
+ struct rte_flow_action *rss_action;
+ struct mlx5_flow_handle dev_handle;
+ uint32_t i, j;
+
+ for (i = 0; i < MLX5_MTR_RTE_COLORS; i++) {
+ if (mtr_policy->act_cnt[i].rix_mark) {
+ flow_dv_tag_release(dev,
+ mtr_policy->act_cnt[i].rix_mark);
+ mtr_policy->act_cnt[i].rix_mark = 0;
+ }
+ if (mtr_policy->act_cnt[i].modify_hdr) {
+ dev_handle.dvh.modify_hdr =
+ mtr_policy->act_cnt[i].modify_hdr;
+ flow_dv_modify_hdr_resource_release(dev, &dev_handle);
+ }
+ switch (mtr_policy->act_cnt[i].fate_action) {
+ case MLX5_FLOW_FATE_SHARED_RSS:
+ rss_action = mtr_policy->act_cnt[i].rss;
+ mlx5_free(rss_action);
+ break;
+ case MLX5_FLOW_FATE_PORT_ID:
+ if (mtr_policy->act_cnt[i].rix_port_id_action) {
+ flow_dv_port_id_action_resource_release(dev,
+ mtr_policy->act_cnt[i].rix_port_id_action);
+ mtr_policy->act_cnt[i].rix_port_id_action = 0;
+ }
+ break;
+ case MLX5_FLOW_FATE_DROP:
+ case MLX5_FLOW_FATE_JUMP:
+ for (j = 0; j < MLX5_MTR_DOMAIN_MAX; j++)
+ mtr_policy->act_cnt[i].dr_jump_action[j] =
+ NULL;
+ break;
+ default:
+ /*Queue action do nothing*/
+ break;
+ }
+ }
+ for (j = 0; j < MLX5_MTR_DOMAIN_MAX; j++)
+ mtr_policy->dr_drop_action[j] = NULL;
+}
+
+/**
+ * Create policy action per domain, lock free,
+ * (mutex should be acquired by caller).
+ * Dispatcher for action type specific call.
+ *
+ * @param[in] dev
+ * Pointer to the Ethernet device structure.
+ * @param[in] mtr_policy
+ * Meter policy struct.
+ * @param[in] action
+ * Action specification used to create meter actions.
+ * @param[out] error
+ * Perform verbose error reporting if not NULL. Initialized in case of
+ * error only.
+ *
+ * @return
+ * 0 on success, otherwise negative errno value.
+ */
+static int
+__flow_dv_create_domain_policy_acts(struct rte_eth_dev *dev,
+ struct mlx5_flow_meter_policy *mtr_policy,
+ const struct rte_flow_action *actions[RTE_COLORS],
+ enum mlx5_meter_domain domain,
+ struct rte_mtr_error *error)
+{
+ struct mlx5_priv *priv = dev->data->dev_private;
+ struct rte_flow_error flow_err;
+ const struct rte_flow_action *act;
+ uint64_t action_flags = 0;
+ struct mlx5_flow_handle dh;
+ struct mlx5_flow dev_flow;
+ struct mlx5_flow_dv_port_id_action_resource port_id_action;
+ int i, ret;
+ uint8_t egress, transfer;
+ struct mlx5_meter_policy_action_container *act_cnt = NULL;
+ union {
+ struct mlx5_flow_dv_modify_hdr_resource res;
+ uint8_t len[sizeof(struct mlx5_flow_dv_modify_hdr_resource) +
+ sizeof(struct mlx5_modification_cmd) *
+ (MLX5_MAX_MODIFY_NUM + 1)];
+ } mhdr_dummy;
+
+ egress = (domain == MLX5_MTR_DOMAIN_EGRESS) ? 1 : 0;
+ transfer = (domain == MLX5_MTR_DOMAIN_TRANSFER) ? 1 : 0;
+ memset(&dh, 0, sizeof(struct mlx5_flow_handle));
+ memset(&dev_flow, 0, sizeof(struct mlx5_flow));
+ memset(&port_id_action, 0,
+ sizeof(struct mlx5_flow_dv_port_id_action_resource));
+ dev_flow.handle = &dh;
+ dev_flow.dv.port_id_action = &port_id_action;
+ dev_flow.external = true;
+ for (i = 0; i < RTE_COLORS; i++) {
+ if (i < MLX5_MTR_RTE_COLORS)
+ act_cnt = &mtr_policy->act_cnt[i];
+ for (act = actions[i];
+ act && act->type != RTE_FLOW_ACTION_TYPE_END;
+ act++) {
+ switch (act->type) {
+ case RTE_FLOW_ACTION_TYPE_MARK:
+ {
+ uint32_t tag_be = mlx5_flow_mark_set
+ (((const struct rte_flow_action_mark *)
+ (act->conf))->id);
+
+ if (i >= MLX5_MTR_RTE_COLORS)
+ return -rte_mtr_error_set(error,
+ ENOTSUP,
+ RTE_MTR_ERROR_TYPE_METER_POLICY,
+ NULL,
+ "cannot create policy "
+ "mark action for this color");
+ dev_flow.handle->mark = 1;
+ if (flow_dv_tag_resource_register(dev, tag_be,
+ &dev_flow, &flow_err))
+ return -rte_mtr_error_set(error,
+ ENOTSUP,
+ RTE_MTR_ERROR_TYPE_METER_POLICY,
+ NULL,
+ "cannot setup policy mark action");
+ MLX5_ASSERT(dev_flow.dv.tag_resource);
+ act_cnt->rix_mark =
+ dev_flow.handle->dvh.rix_tag;
+ if (action_flags & MLX5_FLOW_ACTION_QUEUE) {
+ dev_flow.handle->rix_hrxq =
+ mtr_policy->sub_policys[domain][0]->rix_hrxq[i];
+ flow_drv_rxq_flags_set(dev,
+ dev_flow.handle);
+ }
+ action_flags |= MLX5_FLOW_ACTION_MARK;
+ break;
+ }
+ case RTE_FLOW_ACTION_TYPE_SET_TAG:
+ {
+ struct mlx5_flow_dv_modify_hdr_resource
+ *mhdr_res = &mhdr_dummy.res;
+
+ if (i >= MLX5_MTR_RTE_COLORS)
+ return -rte_mtr_error_set(error,
+ ENOTSUP,
+ RTE_MTR_ERROR_TYPE_METER_POLICY,
+ NULL,
+ "cannot create policy "
+ "set tag action for this color");
+ memset(mhdr_res, 0, sizeof(*mhdr_res));
+ mhdr_res->ft_type = transfer ?
+ MLX5DV_FLOW_TABLE_TYPE_FDB :
+ egress ?
+ MLX5DV_FLOW_TABLE_TYPE_NIC_TX :
+ MLX5DV_FLOW_TABLE_TYPE_NIC_RX;
+ if (flow_dv_convert_action_set_tag
+ (dev, mhdr_res,
+ (const struct rte_flow_action_set_tag *)
+ act->conf, &flow_err))
+ return -rte_mtr_error_set(error,
+ ENOTSUP,
+ RTE_MTR_ERROR_TYPE_METER_POLICY,
+ NULL, "cannot convert policy "
+ "set tag action");
+ if (!mhdr_res->actions_num)
+ return -rte_mtr_error_set(error,
+ ENOTSUP,
+ RTE_MTR_ERROR_TYPE_METER_POLICY,
+ NULL, "cannot find policy "
+ "set tag action");
+ /* create modify action if needed. */
+ dev_flow.dv.group = 1;
+ if (flow_dv_modify_hdr_resource_register
+ (dev, mhdr_res, &dev_flow, &flow_err))
+ return -rte_mtr_error_set(error,
+ ENOTSUP,
+ RTE_MTR_ERROR_TYPE_METER_POLICY,
+ NULL, "cannot register policy "
+ "set tag action");
+ act_cnt->modify_hdr =
+ dev_flow.handle->dvh.modify_hdr;
+ if (action_flags & MLX5_FLOW_ACTION_QUEUE) {
+ dev_flow.handle->rix_hrxq =
+ mtr_policy->sub_policys[domain][0]->rix_hrxq[i];
+ flow_drv_rxq_flags_set(dev,
+ dev_flow.handle);
+ }
+ action_flags |= MLX5_FLOW_ACTION_SET_TAG;
+ break;
+ }
+ case RTE_FLOW_ACTION_TYPE_DROP:
+ {
+ struct mlx5_flow_mtr_mng *mtrmng =
+ priv->sh->mtrmng;
+ struct mlx5_flow_tbl_data_entry *tbl_data;
+
+ /*
+ * Create the drop table with
+ * METER DROP level.
+ */
+ if (!mtrmng->drop_tbl[domain]) {
+ mtrmng->drop_tbl[domain] =
+ flow_dv_tbl_resource_get(dev,
+ MLX5_FLOW_TABLE_LEVEL_METER,
+ egress, transfer, false, NULL, 0,
+ 0, MLX5_MTR_TABLE_ID_DROP, &flow_err);
+ if (!mtrmng->drop_tbl[domain])
+ return -rte_mtr_error_set
+ (error, ENOTSUP,
+ RTE_MTR_ERROR_TYPE_METER_POLICY,
+ NULL,
+ "Failed to create meter drop table");
+ }
+ tbl_data = container_of
+ (mtrmng->drop_tbl[domain],
+ struct mlx5_flow_tbl_data_entry, tbl);
+ if (i < MLX5_MTR_RTE_COLORS) {
+ act_cnt->dr_jump_action[domain] =
+ tbl_data->jump.action;
+ act_cnt->fate_action =
+ MLX5_FLOW_FATE_DROP;
+ }
+ if (i == RTE_COLOR_RED)
+ mtr_policy->dr_drop_action[domain] =
+ tbl_data->jump.action;
+ action_flags |= MLX5_FLOW_ACTION_DROP;
+ break;
+ }
+ case RTE_FLOW_ACTION_TYPE_QUEUE:
+ {
+ struct mlx5_hrxq *hrxq;
+ uint32_t hrxq_idx;
+ struct mlx5_flow_rss_desc rss_desc;
+ struct mlx5_flow_meter_sub_policy *sub_policy =
+ mtr_policy->sub_policys[domain][0];
+
+ if (i >= MLX5_MTR_RTE_COLORS)
+ return -rte_mtr_error_set(error,
+ ENOTSUP,
+ RTE_MTR_ERROR_TYPE_METER_POLICY,
+ NULL, "cannot create policy "
+ "fate queue for this color");
+ memset(&rss_desc, 0,
+ sizeof(struct mlx5_flow_rss_desc));
+ rss_desc.queue_num = 1;
+ rss_desc.const_q = act->conf;
+ hrxq = flow_dv_hrxq_prepare(dev, &dev_flow,
+ &rss_desc, &hrxq_idx);
+ if (!hrxq)
+ return -rte_mtr_error_set(error,
+ ENOTSUP,
+ RTE_MTR_ERROR_TYPE_METER_POLICY,
+ NULL,
+ "cannot create policy fate queue");
+ sub_policy->rix_hrxq[i] = hrxq_idx;
+ act_cnt->fate_action =
+ MLX5_FLOW_FATE_QUEUE;
+ dev_flow.handle->fate_action =
+ MLX5_FLOW_FATE_QUEUE;
+ if (action_flags & MLX5_FLOW_ACTION_MARK ||
+ action_flags & MLX5_FLOW_ACTION_SET_TAG) {
+ dev_flow.handle->rix_hrxq = hrxq_idx;
+ flow_drv_rxq_flags_set(dev,
+ dev_flow.handle);
+ }
+ action_flags |= MLX5_FLOW_ACTION_QUEUE;
+ break;
+ }
+ case RTE_FLOW_ACTION_TYPE_RSS:
+ {
+ int rss_size;
+
+ if (i >= MLX5_MTR_RTE_COLORS)
+ return -rte_mtr_error_set(error,
+ ENOTSUP,
+ RTE_MTR_ERROR_TYPE_METER_POLICY,
+ NULL,
+ "cannot create policy "
+ "rss action for this color");
+ /*
+ * Save RSS conf into policy struct
+ * for translate stage.
+ */
+ rss_size = (int)rte_flow_conv
+ (RTE_FLOW_CONV_OP_ACTION,
+ NULL, 0, act, &flow_err);
+ if (rss_size <= 0)
+ return -rte_mtr_error_set(error,
+ ENOTSUP,
+ RTE_MTR_ERROR_TYPE_METER_POLICY,
+ NULL, "Get the wrong "
+ "rss action struct size");
+ act_cnt->rss = mlx5_malloc(MLX5_MEM_ZERO,
+ rss_size, 0, SOCKET_ID_ANY);
+ if (!act_cnt->rss)
+ return -rte_mtr_error_set(error,
+ ENOTSUP,
+ RTE_MTR_ERROR_TYPE_METER_POLICY,
+ NULL,
+ "Fail to malloc rss action memory");
+ ret = rte_flow_conv(RTE_FLOW_CONV_OP_ACTION,
+ act_cnt->rss, rss_size,
+ act, &flow_err);
+ if (ret < 0)
+ return -rte_mtr_error_set(error,
+ ENOTSUP,
+ RTE_MTR_ERROR_TYPE_METER_POLICY,
+ NULL, "Fail to save "
+ "rss action into policy struct");
+ act_cnt->fate_action =
+ MLX5_FLOW_FATE_SHARED_RSS;
+ action_flags |= MLX5_FLOW_ACTION_RSS;
+ break;
+ }
+ case RTE_FLOW_ACTION_TYPE_PORT_ID:
+ {
+ struct mlx5_flow_dv_port_id_action_resource
+ port_id_resource;
+ uint32_t port_id = 0;
+
+ if (i >= MLX5_MTR_RTE_COLORS)
+ return -rte_mtr_error_set(error,
+ ENOTSUP,
+ RTE_MTR_ERROR_TYPE_METER_POLICY,
+ NULL, "cannot create policy "
+ "port action for this color");
+ memset(&port_id_resource, 0,
+ sizeof(port_id_resource));
+ if (flow_dv_translate_action_port_id(dev, act,
+ &port_id, &flow_err))
+ return -rte_mtr_error_set(error,
+ ENOTSUP,
+ RTE_MTR_ERROR_TYPE_METER_POLICY,
+ NULL, "cannot translate "
+ "policy port action");
+ port_id_resource.port_id = port_id;
+ if (flow_dv_port_id_action_resource_register
+ (dev, &port_id_resource,
+ &dev_flow, &flow_err))
+ return -rte_mtr_error_set(error,
+ ENOTSUP,
+ RTE_MTR_ERROR_TYPE_METER_POLICY,
+ NULL, "cannot setup "
+ "policy port action");
+ act_cnt->rix_port_id_action =
+ dev_flow.handle->rix_port_id_action;
+ act_cnt->fate_action =
+ MLX5_FLOW_FATE_PORT_ID;
+ action_flags |= MLX5_FLOW_ACTION_PORT_ID;
+ break;
+ }
+ case RTE_FLOW_ACTION_TYPE_JUMP:
+ {
+ uint32_t jump_group = 0;
+ uint32_t table = 0;
+ struct mlx5_flow_tbl_data_entry *tbl_data;
+ struct flow_grp_info grp_info = {
+ .external = !!dev_flow.external,
+ .transfer = !!transfer,
+ .fdb_def_rule = !!priv->fdb_def_rule,
+ .std_tbl_fix = 0,
+ .skip_scale = dev_flow.skip_scale &
+ (1 << MLX5_SCALE_FLOW_GROUP_BIT),
+ };
+ struct mlx5_flow_meter_sub_policy *sub_policy =
+ mtr_policy->sub_policys[domain][0];
+
+ if (i >= MLX5_MTR_RTE_COLORS)
+ return -rte_mtr_error_set(error,
+ ENOTSUP,
+ RTE_MTR_ERROR_TYPE_METER_POLICY,
+ NULL,
+ "cannot create policy "
+ "jump action for this color");
+ jump_group =
+ ((const struct rte_flow_action_jump *)
+ act->conf)->group;
+ if (mlx5_flow_group_to_table(dev, NULL,
+ jump_group,
+ &table,
+ &grp_info, &flow_err))
+ return -rte_mtr_error_set(error,
+ ENOTSUP,
+ RTE_MTR_ERROR_TYPE_METER_POLICY,
+ NULL, "cannot setup "
+ "policy jump action");
+ sub_policy->jump_tbl[i] =
+ flow_dv_tbl_resource_get(dev,
+ table, egress,
+ transfer,
+ !!dev_flow.external,
+ NULL, jump_group, 0,
+ 0, &flow_err);
+ if
+ (!sub_policy->jump_tbl[i])
+ return -rte_mtr_error_set(error,
+ ENOTSUP,
+ RTE_MTR_ERROR_TYPE_METER_POLICY,
+ NULL, "cannot create jump action.");
+ tbl_data = container_of
+ (sub_policy->jump_tbl[i],
+ struct mlx5_flow_tbl_data_entry, tbl);
+ act_cnt->dr_jump_action[domain] =
+ tbl_data->jump.action;
+ act_cnt->fate_action =
+ MLX5_FLOW_FATE_JUMP;
+ action_flags |= MLX5_FLOW_ACTION_JUMP;
+ break;
+ }
+ default:
+ return -rte_mtr_error_set(error, ENOTSUP,
+ RTE_MTR_ERROR_TYPE_METER_POLICY,
+ NULL, "action type not supported");
+ }
+ }
+ }
+ return 0;
+}
+
+/**
+ * Create policy action per domain, lock free,
+ * (mutex should be acquired by caller).
+ * Dispatcher for action type specific call.
+ *
+ * @param[in] dev
+ * Pointer to the Ethernet device structure.
+ * @param[in] mtr_policy
+ * Meter policy struct.
+ * @param[in] action
+ * Action specification used to create meter actions.
+ * @param[out] error
+ * Perform verbose error reporting if not NULL. Initialized in case of
+ * error only.
+ *
+ * @return
+ * 0 on success, otherwise negative errno value.
+ */
+static int
+flow_dv_create_mtr_policy_acts(struct rte_eth_dev *dev,
+ struct mlx5_flow_meter_policy *mtr_policy,
+ const struct rte_flow_action *actions[RTE_COLORS],
+ struct rte_mtr_error *error)
+{
+ int ret, i;
+ uint16_t sub_policy_num;
+
+ for (i = 0; i < MLX5_MTR_DOMAIN_MAX; i++) {
+ sub_policy_num = (mtr_policy->sub_policy_num >>
+ (MLX5_MTR_SUB_POLICY_NUM_SHIFT * i)) &
+ MLX5_MTR_SUB_POLICY_NUM_MASK;
+ if (sub_policy_num) {
+ ret = __flow_dv_create_domain_policy_acts(dev,
+ mtr_policy, actions,
+ (enum mlx5_meter_domain)i, error);
+ if (ret)
+ return ret;
+ }
+ }
+ return 0;
+}
+
/**
* Query a dv flow rule for its statistics via devx.
*
@@ -13570,9 +14147,484 @@ flow_dv_destroy_mtr_tbl(struct rte_eth_dev *dev,
return 0;
}
+static void
+flow_dv_destroy_mtr_drop_tbls(struct rte_eth_dev *dev)
+{
+ struct mlx5_priv *priv = dev->data->dev_private;
+ struct mlx5_flow_mtr_mng *mtrmng = priv->sh->mtrmng;
+ struct mlx5_flow_tbl_data_entry *tbl;
+ int i;
+
+ for (i = 0; i < MLX5_MTR_DOMAIN_MAX; i++) {
+ if (mtrmng->def_rule[i]) {
+ claim_zero(mlx5_flow_os_destroy_flow
+ (mtrmng->def_rule[i]));
+ mtrmng->def_rule[i] = NULL;
+ }
+ if (mtrmng->def_matcher[i]) {
+ tbl = container_of(mtrmng->def_matcher[i]->tbl,
+ struct mlx5_flow_tbl_data_entry, tbl);
+ mlx5_cache_unregister(&tbl->matchers,
+ &mtrmng->def_matcher[i]->entry);
+ mtrmng->def_matcher[i] = NULL;
+ }
+ if (mtrmng->drop_matcher[i]) {
+ tbl = container_of(mtrmng->drop_matcher[i]->tbl,
+ struct mlx5_flow_tbl_data_entry, tbl);
+ mlx5_cache_unregister(&tbl->matchers,
+ &mtrmng->drop_matcher[i]->entry);
+ mtrmng->drop_matcher[i] = NULL;
+ }
+ if (mtrmng->drop_tbl[i]) {
+ flow_dv_tbl_resource_release(MLX5_SH(dev),
+ mtrmng->drop_tbl[i]);
+ mtrmng->drop_tbl[i] = NULL;
+ }
+ }
+}
+
/* Number of meter flow actions, count and jump or count and drop. */
#define METER_ACTIONS 2
+static void
+__flow_dv_destroy_domain_def_policy(struct rte_eth_dev *dev,
+ enum mlx5_meter_domain domain)
+{
+ struct mlx5_priv *priv = dev->data->dev_private;
+ struct mlx5_flow_meter_def_policy *def_policy =
+ priv->sh->mtrmng->def_policy[domain];
+
+ __flow_dv_destroy_sub_policy_rules(dev, &def_policy->sub_policy);
+ mlx5_free(def_policy);
+ priv->sh->mtrmng->def_policy[domain] = NULL;
+}
+
+/**
+ * Destroy the default policy table set.
+ *
+ * @param[in] dev
+ * Pointer to Ethernet device.
+ */
+static void
+flow_dv_destroy_def_policy(struct rte_eth_dev *dev)
+{
+ struct mlx5_priv *priv = dev->data->dev_private;
+ int i;
+
+ for (i = 0; i < MLX5_MTR_DOMAIN_MAX; i++)
+ if (priv->sh->mtrmng->def_policy[i])
+ __flow_dv_destroy_domain_def_policy(dev,
+ (enum mlx5_meter_domain)i);
+ priv->sh->mtrmng->def_policy_id = MLX5_INVALID_POLICY_ID;
+}
+
+static int
+__flow_dv_create_policy_flow(struct rte_eth_dev *dev,
+ uint32_t color_reg_c_idx,
+ enum rte_color color, void *matcher_object,
+ int actions_n, void *actions,
+ bool is_default_policy, void **rule,
+ const struct rte_flow_attr *attr)
+{
+ int ret;
+ struct mlx5_flow_dv_match_params value = {
+ .size = sizeof(value.buf) -
+ MLX5_ST_SZ_BYTES(fte_match_set_misc4),
+ };
+ struct mlx5_flow_dv_match_params matcher = {
+ .size = sizeof(matcher.buf) -
+ MLX5_ST_SZ_BYTES(fte_match_set_misc4),
+ };
+ struct mlx5_priv *priv = dev->data->dev_private;
+
+ if (!is_default_policy && (priv->representor || priv->master)) {
+ if (flow_dv_translate_item_port_id(dev, matcher.buf,
+ value.buf, NULL, attr)) {
+ DRV_LOG(ERR,
+ "Failed to create meter policy flow with port.");
+ return -1;
+ }
+ }
+ flow_dv_match_meta_reg(matcher.buf, value.buf,
+ (enum modify_reg)color_reg_c_idx,
+ rte_col_2_mlx5_col(color),
+ UINT32_MAX);
+ ret = mlx5_flow_os_create_flow(matcher_object,
+ (void *)&value, actions_n, actions, rule);
+ if (ret) {
+ DRV_LOG(ERR, "Failed to create meter policy flow.");
+ return -1;
+ }
+ return 0;
+}
+
+static int
+__flow_dv_create_policy_matcher(struct rte_eth_dev *dev,
+ uint32_t color_reg_c_idx,
+ uint16_t priority,
+ struct mlx5_flow_meter_sub_policy *sub_policy,
+ const struct rte_flow_attr *attr,
+ bool is_default_policy,
+ struct rte_flow_error *error)
+{
+ struct mlx5_cache_entry *entry;
+ struct mlx5_flow_tbl_resource *tbl_rsc = sub_policy->tbl_rsc;
+ struct mlx5_flow_dv_matcher matcher = {
+ .mask = {
+ .size = sizeof(matcher.mask.buf) -
+ MLX5_ST_SZ_BYTES(fte_match_set_misc4),
+ },
+ .tbl = tbl_rsc,
+ };
+ struct mlx5_flow_dv_match_params value = {
+ .size = sizeof(value.buf) -
+ MLX5_ST_SZ_BYTES(fte_match_set_misc4),
+ };
+ struct mlx5_flow_cb_ctx ctx = {
+ .error = error,
+ .data = &matcher,
+ };
+ struct mlx5_flow_tbl_data_entry *tbl_data;
+ struct mlx5_priv *priv = dev->data->dev_private;
+ uint32_t color_mask = (UINT32_C(1) << MLX5_MTR_COLOR_BITS) - 1;
+
+ if (!is_default_policy && (priv->representor || priv->master)) {
+ if (flow_dv_translate_item_port_id(dev, matcher.mask.buf,
+ value.buf, NULL, attr)) {
+ DRV_LOG(ERR,
+ "Failed to register meter drop matcher with port.");
+ return -1;
+ }
+ }
+ tbl_data = container_of(tbl_rsc, struct mlx5_flow_tbl_data_entry, tbl);
+ if (priority < RTE_COLOR_RED)
+ flow_dv_match_meta_reg(matcher.mask.buf, value.buf,
+ (enum modify_reg)color_reg_c_idx, 0, color_mask);
+ matcher.priority = priority;
+ matcher.crc = rte_raw_cksum((const void *)matcher.mask.buf,
+ matcher.mask.size);
+ entry = mlx5_cache_register(&tbl_data->matchers, &ctx);
+ if (!entry) {
+ DRV_LOG(ERR, "Failed to register meter drop matcher.");
+ return -1;
+ }
+ sub_policy->color_matcher[priority] =
+ container_of(entry, struct mlx5_flow_dv_matcher, entry);
+ return 0;
+}
+
+/**
+ * Create the policy rules per domain.
+ *
+ * @param[in] dev
+ * Pointer to Ethernet device.
+ * @param[in] sub_policy
+ * Pointer to sub policy table..
+ * @param[in] egress
+ * Direction of the table.
+ * @param[in] transfer
+ * E-Switch or NIC flow.
+ * @param[in] acts
+ * Pointer to policy action list per color.
+ *
+ * @return
+ * 0 on success, -1 otherwise.
+ */
+static int
+__flow_dv_create_domain_policy_rules(struct rte_eth_dev *dev,
+ struct mlx5_flow_meter_sub_policy *sub_policy,
+ uint8_t egress, uint8_t transfer, bool is_default_policy,
+ struct mlx5_meter_policy_acts acts[RTE_COLORS])
+{
+ struct rte_flow_error flow_err;
+ uint32_t color_reg_c_idx;
+ struct rte_flow_attr attr = {
+ .group = MLX5_FLOW_TABLE_LEVEL_POLICY,
+ .priority = 0,
+ .ingress = 0,
+ .egress = !!egress,
+ .transfer = !!transfer,
+ .reserved = 0,
+ };
+ int i;
+ int ret = mlx5_flow_get_reg_id(dev, MLX5_MTR_COLOR, 0, &flow_err);
+
+ if (ret < 0)
+ return -1;
+ /* Create policy table with POLICY level. */
+ if (!sub_policy->tbl_rsc)
+ sub_policy->tbl_rsc = flow_dv_tbl_resource_get(dev,
+ MLX5_FLOW_TABLE_LEVEL_POLICY,
+ egress, transfer, false, NULL, 0, 0,
+ sub_policy->idx, &flow_err);
+ if (!sub_policy->tbl_rsc) {
+ DRV_LOG(ERR,
+ "Failed to create meter sub policy table.");
+ return -1;
+ }
+ /* Prepare matchers. */
+ color_reg_c_idx = ret;
+ for (i = 0; i < RTE_COLORS; i++) {
+ if (i == RTE_COLOR_YELLOW || !acts[i].actions_n)
+ continue;
+ attr.priority = i;
+ if (!sub_policy->color_matcher[i]) {
+ /* Create matchers for Color. */
+ if (__flow_dv_create_policy_matcher(dev,
+ color_reg_c_idx, i, sub_policy,
+ &attr, is_default_policy, &flow_err))
+ return -1;
+ }
+ /* Create flow, matching color. */
+ if (acts[i].actions_n)
+ if (__flow_dv_create_policy_flow(dev,
+ color_reg_c_idx, (enum rte_color)i,
+ sub_policy->color_matcher[i]->matcher_object,
+ acts[i].actions_n,
+ acts[i].dv_actions,
+ is_default_policy,
+ &sub_policy->color_rule[i],
+ &attr))
+ return -1;
+ }
+ return 0;
+}
+
+static int
+__flow_dv_create_policy_acts_rules(struct rte_eth_dev *dev,
+ struct mlx5_flow_meter_policy *mtr_policy,
+ struct mlx5_flow_meter_sub_policy *sub_policy,
+ uint32_t domain)
+{
+ struct mlx5_priv *priv = dev->data->dev_private;
+ struct mlx5_meter_policy_acts acts[RTE_COLORS];
+ struct mlx5_flow_dv_tag_resource *tag;
+ struct mlx5_flow_dv_port_id_action_resource *port_action;
+ struct mlx5_hrxq *hrxq;
+ uint8_t egress, transfer;
+ int i;
+
+ for (i = 0; i < RTE_COLORS; i++) {
+ acts[i].actions_n = 0;
+ if (i == RTE_COLOR_YELLOW)
+ continue;
+ if (i == RTE_COLOR_RED) {
+ /* Only support drop on red. */
+ acts[i].dv_actions[0] =
+ mtr_policy->dr_drop_action[domain];
+ acts[i].actions_n = 1;
+ continue;
+ }
+ if (mtr_policy->act_cnt[i].rix_mark) {
+ tag = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_TAG],
+ mtr_policy->act_cnt[i].rix_mark);
+ if (!tag) {
+ DRV_LOG(ERR, "Failed to find "
+ "mark action for policy.");
+ return -1;
+ }
+ acts[i].dv_actions[acts[i].actions_n] =
+ tag->action;
+ acts[i].actions_n++;
+ }
+ if (mtr_policy->act_cnt[i].modify_hdr) {
+ acts[i].dv_actions[acts[i].actions_n] =
+ mtr_policy->act_cnt[i].modify_hdr->action;
+ acts[i].actions_n++;
+ }
+ if (mtr_policy->act_cnt[i].fate_action) {
+ switch (mtr_policy->act_cnt[i].fate_action) {
+ case MLX5_FLOW_FATE_PORT_ID:
+ port_action = mlx5_ipool_get
+ (priv->sh->ipool[MLX5_IPOOL_PORT_ID],
+ mtr_policy->act_cnt[i].rix_port_id_action);
+ if (!port_action) {
+ DRV_LOG(ERR, "Failed to find "
+ "port action for policy.");
+ return -1;
+ }
+ acts[i].dv_actions[acts[i].actions_n] =
+ port_action->action;
+ acts[i].actions_n++;
+ break;
+ case MLX5_FLOW_FATE_DROP:
+ case MLX5_FLOW_FATE_JUMP:
+ acts[i].dv_actions[acts[i].actions_n] =
+ mtr_policy->act_cnt[i].dr_jump_action[domain];
+ acts[i].actions_n++;
+ break;
+ case MLX5_FLOW_FATE_SHARED_RSS:
+ case MLX5_FLOW_FATE_QUEUE:
+ hrxq = mlx5_ipool_get
+ (priv->sh->ipool[MLX5_IPOOL_HRXQ],
+ sub_policy->rix_hrxq[i]);
+ if (!hrxq) {
+ DRV_LOG(ERR, "Failed to find "
+ "queue action for policy.");
+ return -1;
+ }
+ acts[i].dv_actions[acts[i].actions_n] =
+ hrxq->action;
+ acts[i].actions_n++;
+ break;
+ default:
+ /*Queue action do nothing*/
+ break;
+ }
+ }
+ }
+ egress = (domain == MLX5_MTR_DOMAIN_EGRESS) ? 1 : 0;
+ transfer = (domain == MLX5_MTR_DOMAIN_TRANSFER) ? 1 : 0;
+ if (__flow_dv_create_domain_policy_rules(dev, sub_policy,
+ egress, transfer, false, acts)) {
+ DRV_LOG(ERR,
+ "Failed to create policy rules per domain.");
+ return -1;
+ }
+ return 0;
+}
+
+/**
+ * Create the policy rules.
+ *
+ * @param[in] dev
+ * Pointer to Ethernet device.
+ * @param[in,out] mtr_policy
+ * Pointer to meter policy table.
+ *
+ * @return
+ * 0 on success, -1 otherwise.
+ */
+static int
+flow_dv_create_policy_rules(struct rte_eth_dev *dev,
+ struct mlx5_flow_meter_policy *mtr_policy)
+{
+ int i;
+ uint16_t sub_policy_num;
+
+ for (i = 0; i < MLX5_MTR_DOMAIN_MAX; i++) {
+ sub_policy_num = (mtr_policy->sub_policy_num >>
+ (MLX5_MTR_SUB_POLICY_NUM_SHIFT * i)) &
+ MLX5_MTR_SUB_POLICY_NUM_MASK;
+ if (!sub_policy_num)
+ continue;
+ /* Prepare actions list and create policy rules. */
+ if (__flow_dv_create_policy_acts_rules(dev, mtr_policy,
+ mtr_policy->sub_policys[i][0], i)) {
+ DRV_LOG(ERR,
+ "Failed to create policy action list per domain.");
+ return -1;
+ }
+ }
+ return 0;
+}
+
+static int
+__flow_dv_create_domain_def_policy(struct rte_eth_dev *dev, uint32_t domain)
+{
+ struct mlx5_priv *priv = dev->data->dev_private;
+ struct mlx5_flow_mtr_mng *mtrmng = priv->sh->mtrmng;
+ struct mlx5_flow_meter_def_policy *def_policy;
+ struct mlx5_flow_tbl_resource *jump_tbl;
+ struct mlx5_flow_tbl_data_entry *tbl_data;
+ uint8_t egress, transfer;
+ struct rte_flow_error error;
+ struct mlx5_meter_policy_acts acts[RTE_COLORS];
+ int ret;
+
+ egress = (domain == MLX5_MTR_DOMAIN_EGRESS) ? 1 : 0;
+ transfer = (domain == MLX5_MTR_DOMAIN_TRANSFER) ? 1 : 0;
+ def_policy = mtrmng->def_policy[domain];
+ if (!def_policy) {
+ def_policy = mlx5_malloc(MLX5_MEM_ZERO,
+ sizeof(struct mlx5_flow_meter_def_policy),
+ RTE_CACHE_LINE_SIZE, SOCKET_ID_ANY);
+ if (!def_policy) {
+ DRV_LOG(ERR, "Failed to alloc "
+ "default policy table.");
+ goto def_policy_error;
+ }
+ mtrmng->def_policy[domain] = def_policy;
+ /* Create the meter suffix table with SUFFIX level. */
+ jump_tbl = flow_dv_tbl_resource_get(dev,
+ MLX5_FLOW_TABLE_LEVEL_METER,
+ egress, transfer, false, NULL, 0,
+ 0, MLX5_MTR_TABLE_ID_SUFFIX, &error);
+ if (!jump_tbl) {
+ DRV_LOG(ERR,
+ "Failed to create meter suffix table.");
+ goto def_policy_error;
+ }
+ def_policy->sub_policy.jump_tbl[RTE_COLOR_GREEN] = jump_tbl;
+ tbl_data = container_of(jump_tbl,
+ struct mlx5_flow_tbl_data_entry, tbl);
+ def_policy->dr_jump_action[RTE_COLOR_GREEN] =
+ tbl_data->jump.action;
+ acts[RTE_COLOR_GREEN].dv_actions[0] =
+ tbl_data->jump.action;
+ acts[RTE_COLOR_GREEN].actions_n = 1;
+ /* Create jump action to the drop table. */
+ if (!mtrmng->drop_tbl[domain]) {
+ mtrmng->drop_tbl[domain] = flow_dv_tbl_resource_get
+ (dev, MLX5_FLOW_TABLE_LEVEL_METER,
+ egress, transfer, false, NULL, 0,
+ 0, MLX5_MTR_TABLE_ID_DROP, &error);
+ if (!mtrmng->drop_tbl[domain]) {
+ DRV_LOG(ERR, "Failed to create "
+ "meter drop table for default policy.");
+ goto def_policy_error;
+ }
+ }
+ tbl_data = container_of(mtrmng->drop_tbl[domain],
+ struct mlx5_flow_tbl_data_entry, tbl);
+ def_policy->dr_jump_action[RTE_COLOR_RED] =
+ tbl_data->jump.action;
+ acts[RTE_COLOR_RED].dv_actions[0] = tbl_data->jump.action;
+ acts[RTE_COLOR_RED].actions_n = 1;
+ /* Create default policy rules. */
+ ret = __flow_dv_create_domain_policy_rules(dev,
+ &def_policy->sub_policy,
+ egress, transfer, true, acts);
+ if (ret) {
+ DRV_LOG(ERR, "Failed to create "
+ "default policy rules.");
+ goto def_policy_error;
+ }
+ }
+ return 0;
+def_policy_error:
+ __flow_dv_destroy_domain_def_policy(dev,
+ (enum mlx5_meter_domain)domain);
+ return -1;
+}
+
+/**
+ * Create the default policy table set.
+ *
+ * @param[in] dev
+ * Pointer to Ethernet device.
+ * @return
+ * 0 on success, -1 otherwise.
+ */
+static int
+flow_dv_create_def_policy(struct rte_eth_dev *dev)
+{
+ struct mlx5_priv *priv = dev->data->dev_private;
+ int i;
+
+ /* Non-termination policy table. */
+ for (i = 0; i < MLX5_MTR_DOMAIN_MAX; i++) {
+ if (!priv->config.dv_esw_en && i == MLX5_MTR_DOMAIN_TRANSFER)
+ continue;
+ if (__flow_dv_create_domain_def_policy(dev, i)) {
+ DRV_LOG(ERR,
+ "Failed to create default policy");
+ return -1;
+ }
+ }
+ return 0;
+}
+
/**
* Create specify domain meter table and suffix table.
*
@@ -13602,19 +14654,11 @@ flow_dv_prepare_mtr_tables(struct rte_eth_dev *dev,
dtb = &mtb->egress;
else
dtb = &mtb->ingress;
- /* Create the meter table with METER level. */
- dtb->tbl = flow_dv_tbl_resource_get(dev, MLX5_FLOW_TABLE_LEVEL_METER,
- egress, transfer, false, NULL, 0,
- 0, 0, &error);
- if (!dtb->tbl) {
- DRV_LOG(ERR, "Failed to create meter policer table.");
- return -1;
- }
/* Create the meter suffix table with SUFFIX level. */
dtb->sfx_tbl = flow_dv_tbl_resource_get(dev,
- MLX5_FLOW_TABLE_LEVEL_SUFFIX,
- egress, transfer, false, NULL, 0,
- 0, 0, &error);
+ MLX5_FLOW_TABLE_LEVEL_METER,
+ egress, transfer, false, NULL, 0,
+ 0, MLX5_MTR_TABLE_ID_SUFFIX, &error);
if (!dtb->sfx_tbl) {
DRV_LOG(ERR, "Failed to create meter suffix table.");
return -1;
@@ -13933,6 +14977,292 @@ flow_dv_action_validate(struct rte_eth_dev *dev,
}
}
+/**
+ * Validate meter policy actions.
+ * Dispatcher for action type specific validation.
+ *
+ * @param[in] dev
+ * Pointer to the Ethernet device structure.
+ * @param[in] action
+ * The meter policy action object to validate.
+ * @param[in] attr
+ * Attributes of flow to determine steering domain.
+ * @param[out] error
+ * Perform verbose error reporting if not NULL. Initialized in case of
+ * error only.
+ *
+ * @return
+ * 0 on success, otherwise negative errno value.
+ */
+static int
+flow_dv_validate_mtr_policy_acts(struct rte_eth_dev *dev,
+ const struct rte_flow_action *actions[RTE_COLORS],
+ struct rte_flow_attr *attr,
+ bool *is_rss,
+ uint8_t *domain_bitmap,
+ bool *is_def_policy,
+ struct rte_mtr_error *error)
+{
+ struct mlx5_priv *priv = dev->data->dev_private;
+ struct mlx5_dev_config *dev_conf = &priv->config;
+ const struct rte_flow_action *act;
+ uint64_t action_flags = 0;
+ int actions_n;
+ int i, ret;
+ struct rte_flow_error flow_err;
+ uint8_t domain_color[RTE_COLORS] = {0};
+ uint8_t def_domain = MLX5_MTR_ALL_DOMAIN_BIT;
+
+ if (!priv->config.dv_esw_en)
+ def_domain &= ~MLX5_MTR_DOMAIN_TRANSFER_BIT;
+ *domain_bitmap = def_domain;
+ if (actions[RTE_COLOR_YELLOW] &&
+ actions[RTE_COLOR_YELLOW]->type != RTE_FLOW_ACTION_TYPE_END)
+ return -rte_mtr_error_set(error, ENOTSUP,
+ RTE_MTR_ERROR_TYPE_METER_POLICY,
+ NULL,
+ "Yellow color does not support any action.");
+ if (actions[RTE_COLOR_YELLOW] &&
+ actions[RTE_COLOR_YELLOW]->type != RTE_FLOW_ACTION_TYPE_DROP)
+ return -rte_mtr_error_set(error, ENOTSUP,
+ RTE_MTR_ERROR_TYPE_METER_POLICY,
+ NULL, "Red color only supports drop action.");
+ /*
+ * Check default policy actions:
+ * Green/Yellow: no action, Red: drop action
+ */
+ if ((!actions[RTE_COLOR_GREEN] ||
+ actions[RTE_COLOR_GREEN]->type == RTE_FLOW_ACTION_TYPE_END)) {
+ *is_def_policy = true;
+ return 0;
+ }
+ flow_err.message = NULL;
+ for (i = 0; i < RTE_COLORS; i++) {
+ act = actions[i];
+ for (action_flags = 0, actions_n = 0;
+ act && act->type != RTE_FLOW_ACTION_TYPE_END;
+ act++) {
+ if (actions_n == MLX5_DV_MAX_NUMBER_OF_ACTIONS)
+ return -rte_mtr_error_set(error, ENOTSUP,
+ RTE_MTR_ERROR_TYPE_METER_POLICY,
+ NULL, "too many actions");
+ switch (act->type) {
+ case RTE_FLOW_ACTION_TYPE_PORT_ID:
+ if (!priv->config.dv_esw_en)
+ return -rte_mtr_error_set(error,
+ ENOTSUP,
+ RTE_MTR_ERROR_TYPE_METER_POLICY,
+ NULL, "PORT action validate check"
+ " fail for ESW disable");
+ ret = flow_dv_validate_action_port_id(dev,
+ action_flags,
+ act, attr, &flow_err);
+ if (ret)
+ return -rte_mtr_error_set(error,
+ ENOTSUP,
+ RTE_MTR_ERROR_TYPE_METER_POLICY,
+ NULL, flow_err.message ?
+ flow_err.message :
+ "PORT action validate check fail");
+ ++actions_n;
+ action_flags |= MLX5_FLOW_ACTION_PORT_ID;
+ break;
+ case RTE_FLOW_ACTION_TYPE_MARK:
+ ret = flow_dv_validate_action_mark(dev, act,
+ action_flags,
+ attr, &flow_err);
+ if (ret < 0)
+ return -rte_mtr_error_set(error,
+ ENOTSUP,
+ RTE_MTR_ERROR_TYPE_METER_POLICY,
+ NULL, flow_err.message ?
+ flow_err.message :
+ "Mark action validate check fail");
+ if (dev_conf->dv_xmeta_en !=
+ MLX5_XMETA_MODE_LEGACY)
+ return -rte_mtr_error_set(error,
+ ENOTSUP,
+ RTE_MTR_ERROR_TYPE_METER_POLICY,
+ NULL, "Extend MARK action is "
+ "not supported. Please try use "
+ "default policy for meter.");
+ action_flags |= MLX5_FLOW_ACTION_MARK;
+ ++actions_n;
+ break;
+ case RTE_FLOW_ACTION_TYPE_SET_TAG:
+ ret = flow_dv_validate_action_set_tag(dev,
+ act, action_flags,
+ attr, &flow_err);
+ if (ret)
+ return -rte_mtr_error_set(error,
+ ENOTSUP,
+ RTE_MTR_ERROR_TYPE_METER_POLICY,
+ NULL, flow_err.message ?
+ flow_err.message :
+ "Set tag action validate check fail");
+ /*
+ * Count all modify-header actions
+ * as one action.
+ */
+ if (!(action_flags &
+ MLX5_FLOW_MODIFY_HDR_ACTIONS))
+ ++actions_n;
+ action_flags |= MLX5_FLOW_ACTION_SET_TAG;
+ break;
+ case RTE_FLOW_ACTION_TYPE_DROP:
+ ret = mlx5_flow_validate_action_drop
+ (action_flags,
+ attr, &flow_err);
+ if (ret < 0)
+ return -rte_mtr_error_set(error,
+ ENOTSUP,
+ RTE_MTR_ERROR_TYPE_METER_POLICY,
+ NULL, flow_err.message ?
+ flow_err.message :
+ "Drop action validate check fail");
+ action_flags |= MLX5_FLOW_ACTION_DROP;
+ ++actions_n;
+ break;
+ case RTE_FLOW_ACTION_TYPE_QUEUE:
+ /*
+ * Check whether extensive
+ * metadata feature is engaged.
+ */
+ if (dev_conf->dv_flow_en &&
+ (dev_conf->dv_xmeta_en !=
+ MLX5_XMETA_MODE_LEGACY) &&
+ mlx5_flow_ext_mreg_supported(dev))
+ return -rte_mtr_error_set(error,
+ ENOTSUP,
+ RTE_MTR_ERROR_TYPE_METER_POLICY,
+ NULL, "Queue action with meta "
+ "is not supported. Please try use "
+ "default policy for meter.");
+ ret = mlx5_flow_validate_action_queue(act,
+ action_flags, dev,
+ attr, &flow_err);
+ if (ret < 0)
+ return -rte_mtr_error_set(error,
+ ENOTSUP,
+ RTE_MTR_ERROR_TYPE_METER_POLICY,
+ NULL, flow_err.message ?
+ flow_err.message :
+ "Queue action validate check fail");
+ action_flags |= MLX5_FLOW_ACTION_QUEUE;
+ ++actions_n;
+ break;
+ case RTE_FLOW_ACTION_TYPE_RSS:
+ if (dev_conf->dv_flow_en &&
+ (dev_conf->dv_xmeta_en !=
+ MLX5_XMETA_MODE_LEGACY) &&
+ mlx5_flow_ext_mreg_supported(dev))
+ return -rte_mtr_error_set(error,
+ ENOTSUP,
+ RTE_MTR_ERROR_TYPE_METER_POLICY,
+ NULL, "RSS action with meta "
+ "is not supported. Please try use "
+ "default policy for meter.");
+ ret = mlx5_validate_action_rss(dev, act,
+ &flow_err);
+ if (ret < 0)
+ return -rte_mtr_error_set(error,
+ ENOTSUP,
+ RTE_MTR_ERROR_TYPE_METER_POLICY,
+ NULL, flow_err.message ?
+ flow_err.message :
+ "RSS action validate check fail");
+ action_flags |= MLX5_FLOW_ACTION_RSS;
+ ++actions_n;
+ *is_rss = true;
+ break;
+ case RTE_FLOW_ACTION_TYPE_JUMP:
+ ret = flow_dv_validate_action_jump(dev,
+ NULL, act, action_flags,
+ attr, true, &flow_err);
+ if (ret)
+ return -rte_mtr_error_set(error,
+ ENOTSUP,
+ RTE_MTR_ERROR_TYPE_METER_POLICY,
+ NULL, flow_err.message ?
+ flow_err.message :
+ "Jump action validate check fail");
+ ++actions_n;
+ action_flags |= MLX5_FLOW_ACTION_JUMP;
+ break;
+ default:
+ return -rte_mtr_error_set(error, ENOTSUP,
+ RTE_MTR_ERROR_TYPE_METER_POLICY,
+ NULL,
+ "Doesn't support optional action");
+ }
+ }
+ /* Yellow is not supported, just skip. */
+ if (i == RTE_COLOR_YELLOW)
+ continue;
+ if (action_flags & MLX5_FLOW_ACTION_PORT_ID)
+ domain_color[i] = MLX5_MTR_DOMAIN_TRANSFER_BIT;
+ else if ((action_flags &
+ (MLX5_FLOW_ACTION_RSS | MLX5_FLOW_ACTION_QUEUE)) ||
+ (action_flags & MLX5_FLOW_ACTION_MARK))
+ /*
+ * Only support MLX5_XMETA_MODE_LEGACY
+ * so MARK action only in ingress domain.
+ */
+ domain_color[i] = MLX5_MTR_DOMAIN_INGRESS_BIT;
+ else
+ domain_color[i] = def_domain;
+ /*
+ * Validate the drop action mutual exclusion
+ * with other actions. Drop action is mutually-exclusive
+ * with any other action, except for Count action.
+ */
+ if ((action_flags & MLX5_FLOW_ACTION_DROP) &&
+ (action_flags & ~MLX5_FLOW_ACTION_DROP)) {
+ return -rte_mtr_error_set(error, ENOTSUP,
+ RTE_MTR_ERROR_TYPE_METER_POLICY,
+ NULL, "Drop action is mutually-exclusive "
+ "with any other action");
+ }
+ /* Eswitch has few restrictions on using items and actions */
+ if (domain_color[i] & MLX5_MTR_DOMAIN_TRANSFER_BIT) {
+ if (!mlx5_flow_ext_mreg_supported(dev) &&
+ action_flags & MLX5_FLOW_ACTION_MARK)
+ return -rte_mtr_error_set(error, ENOTSUP,
+ RTE_MTR_ERROR_TYPE_METER_POLICY,
+ NULL, "unsupported action MARK");
+ if (action_flags & MLX5_FLOW_ACTION_QUEUE)
+ return -rte_mtr_error_set(error, ENOTSUP,
+ RTE_MTR_ERROR_TYPE_METER_POLICY,
+ NULL, "unsupported action QUEUE");
+ if (action_flags & MLX5_FLOW_ACTION_RSS)
+ return -rte_mtr_error_set(error, ENOTSUP,
+ RTE_MTR_ERROR_TYPE_METER_POLICY,
+ NULL, "unsupported action RSS");
+ if (!(action_flags & MLX5_FLOW_FATE_ESWITCH_ACTIONS))
+ return -rte_mtr_error_set(error, ENOTSUP,
+ RTE_MTR_ERROR_TYPE_METER_POLICY,
+ NULL, "no fate action is found");
+ } else {
+ if (!(action_flags & MLX5_FLOW_FATE_ACTIONS) &&
+ (domain_color[i] &
+ MLX5_MTR_DOMAIN_INGRESS_BIT)) {
+ if ((domain_color[i] &
+ MLX5_MTR_DOMAIN_EGRESS_BIT))
+ domain_color[i] =
+ MLX5_MTR_DOMAIN_EGRESS_BIT;
+ else
+ return -rte_mtr_error_set(error,
+ ENOTSUP,
+ RTE_MTR_ERROR_TYPE_METER_POLICY,
+ NULL, "no fate action is found");
+ }
+ }
+ if (domain_color[i] != def_domain)
+ *domain_bitmap = domain_color[i];
+ }
+ return 0;
+}
+
static int
flow_dv_sync_domain(struct rte_eth_dev *dev, uint32_t domains, uint32_t flags)
{
@@ -13968,8 +15298,16 @@ const struct mlx5_flow_driver_ops mlx5_flow_dv_drv_ops = {
.query = flow_dv_query,
.create_mtr_tbls = flow_dv_create_mtr_tbl,
.destroy_mtr_tbls = flow_dv_destroy_mtr_tbl,
+ .destroy_mtr_drop_tbls = flow_dv_destroy_mtr_drop_tbls,
.create_meter = flow_dv_mtr_alloc,
.free_meter = flow_dv_aso_mtr_release_to_pool,
+ .validate_mtr_acts = flow_dv_validate_mtr_policy_acts,
+ .create_mtr_acts = flow_dv_create_mtr_policy_acts,
+ .destroy_mtr_acts = flow_dv_destroy_mtr_policy_acts,
+ .create_policy_rules = flow_dv_create_policy_rules,
+ .destroy_policy_rules = flow_dv_destroy_policy_rules,
+ .create_def_policy = flow_dv_create_def_policy,
+ .destroy_def_policy = flow_dv_destroy_def_policy,
.counter_alloc = flow_dv_counter_allocate,
.counter_free = flow_dv_counter_free,
.counter_query = flow_dv_counter_query,
@@ -38,6 +38,12 @@ mlx5_flow_meter_action_create(struct mlx5_priv *priv,
uint32_t cbs_cir = rte_be_to_cpu_32(srtcm->cbs_cir);
uint32_t ebs_eir = rte_be_to_cpu_32(srtcm->ebs_eir);
uint32_t val;
+ enum mlx5_meter_domain domain =
+ fm->transfer ? MLX5_MTR_DOMAIN_TRANSFER :
+ fm->egress ? MLX5_MTR_DOMAIN_EGRESS :
+ MLX5_MTR_DOMAIN_INGRESS;
+ struct mlx5_flow_meter_def_policy *def_policy =
+ priv->sh->mtrmng->def_policy[domain];
memset(fmp, 0, MLX5_ST_SZ_BYTES(flow_meter_parameters));
MLX5_SET(flow_meter_parameters, fmp, valid, 1);
@@ -57,10 +63,7 @@ mlx5_flow_meter_action_create(struct mlx5_priv *priv,
MLX5_SET(flow_meter_parameters, fmp, ebs_exponent, val);
val = (ebs_eir >> ASO_DSEG_EBS_MAN_OFFSET) & ASO_DSEG_MAN_MASK;
MLX5_SET(flow_meter_parameters, fmp, ebs_mantissa, val);
- mtr_init.next_table =
- fm->transfer ? fm->mfts->transfer.tbl->obj :
- fm->egress ? fm->mfts->egress.tbl->obj :
- fm->mfts->ingress.tbl->obj;
+ mtr_init.next_table = def_policy->sub_policy.tbl_rsc->obj;
mtr_init.reg_c_index = priv->mtr_color_reg - REG_C_0;
mtr_init.flow_meter_parameter = fmp;
mtr_init.flow_meter_parameter_sz =
@@ -317,7 +320,7 @@ mlx5_flow_mtr_cap_get(struct rte_eth_dev *dev,
"Meter is not supported");
memset(cap, 0, sizeof(*cap));
if (priv->sh->meter_aso_en)
- /* 2 meters per one ASO cache line. */
+ /* 2 meters per one ASO cache line. */
cap->n_max = 1 << (qattr->log_max_num_meter_aso + 1);
else
cap->n_max = 1 << qattr->log_max_flow_meter;
@@ -435,6 +438,347 @@ mlx5_flow_meter_profile_delete(struct rte_eth_dev *dev,
return 0;
}
+/**
+ * Find policy by id.
+ *
+ * @param[in] dev
+ * Pointer to Ethernet device.
+ * @param policy_id
+ * Policy id.
+ *
+ * @return
+ * Pointer to the policy found on success, NULL otherwise.
+ */
+struct mlx5_flow_meter_policy *
+mlx5_flow_meter_policy_find(struct rte_eth_dev *dev,
+ uint32_t policy_id,
+ uint32_t *policy_idx)
+{
+ struct mlx5_priv *priv = dev->data->dev_private;
+ struct mlx5_flow_meter_sub_policy *sub_policy = NULL;
+ union mlx5_l3t_data data;
+
+ if (policy_id > MLX5_MAX_SUB_POLICY_TBL_NUM ||
+ !priv->sh->mtrmng->policy_idx_tbl)
+ return NULL;
+ if (mlx5_l3t_get_entry(priv->sh->mtrmng->policy_idx_tbl,
+ policy_id, &data) ||
+ !data.dword)
+ return NULL;
+ if (policy_idx)
+ *policy_idx = data.dword;
+ sub_policy = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_MTR_POLICY],
+ data.dword);
+ /* Remove reference taken by the mlx5_l3t_get_entry. */
+ mlx5_l3t_clear_entry(priv->sh->mtrmng->policy_idx_tbl,
+ policy_id);
+ if (sub_policy)
+ if (sub_policy->main_policy_id)
+ return sub_policy->main_policy;
+ return NULL;
+}
+
+/**
+ * Callback to check MTR policy action validate
+ *
+ * @param[in] dev
+ * Pointer to Ethernet device.
+ * @param[in] actions
+ * Pointer to meter policy action detail.
+ * @param[out] error
+ * Pointer to the error structure.
+ *
+ * @return
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
+ */
+static int
+mlx5_flow_meter_policy_validate(struct rte_eth_dev *dev,
+ struct rte_mtr_meter_policy_params *policy,
+ struct rte_mtr_error *error)
+{
+ struct mlx5_priv *priv = dev->data->dev_private;
+ struct rte_flow_attr attr = { .transfer =
+ priv->config.dv_esw_en ? 1 : 0};
+ bool is_rss = false;
+ bool is_def_policy = false;
+ uint8_t domain_bitmap;
+ int ret;
+
+ if (!priv->mtr_en || !priv->sh->meter_aso_en)
+ return -rte_mtr_error_set(error, ENOTSUP,
+ RTE_MTR_ERROR_TYPE_METER_POLICY,
+ NULL, "meter policy unsupported.");
+ ret = mlx5_flow_validate_mtr_acts(dev, policy->actions, &attr,
+ &is_rss, &domain_bitmap, &is_def_policy, error);
+ if (ret)
+ return ret;
+ return 0;
+}
+
+static int
+__mlx5_flow_meter_policy_delete(struct rte_eth_dev *dev,
+ uint32_t policy_id,
+ struct mlx5_flow_meter_policy *mtr_policy,
+ struct rte_mtr_error *error)
+{
+ struct mlx5_priv *priv = dev->data->dev_private;
+ struct mlx5_flow_meter_sub_policy *sub_policy;
+ uint32_t i, j;
+ uint16_t sub_policy_num;
+
+ rte_spinlock_lock(&mtr_policy->sl);
+ if (mtr_policy->ref_cnt) {
+ rte_spinlock_unlock(&mtr_policy->sl);
+ return -rte_mtr_error_set(error, EBUSY,
+ RTE_MTR_ERROR_TYPE_METER_POLICY_ID,
+ NULL,
+ "Meter policy object is being used.");
+ }
+ mlx5_flow_destroy_policy_rules(dev, mtr_policy);
+ mlx5_flow_destroy_mtr_acts(dev, mtr_policy);
+ for (i = 0; i < MLX5_MTR_DOMAIN_MAX; i++) {
+ sub_policy_num = (mtr_policy->sub_policy_num >>
+ (MLX5_MTR_SUB_POLICY_NUM_SHIFT * i)) &
+ MLX5_MTR_SUB_POLICY_NUM_MASK;
+ if (sub_policy_num) {
+ for (j = 0; j < sub_policy_num; j++) {
+ sub_policy = mtr_policy->sub_policys[i][j];
+ if (sub_policy)
+ mlx5_ipool_free
+ (priv->sh->ipool[MLX5_IPOOL_MTR_POLICY],
+ sub_policy->idx);
+ }
+ }
+ }
+ if (priv->sh->mtrmng->policy_idx_tbl) {
+ if (mlx5_l3t_clear_entry(priv->sh->mtrmng->policy_idx_tbl,
+ policy_id)) {
+ rte_spinlock_unlock(&mtr_policy->sl);
+ return -rte_mtr_error_set(error, ENOTSUP,
+ RTE_MTR_ERROR_TYPE_METER_POLICY_ID, NULL,
+ "Fail to delete policy in index table.");
+ }
+ }
+ rte_spinlock_unlock(&mtr_policy->sl);
+ return 0;
+}
+
+/**
+ * Callback to add MTR policy.
+ *
+ * @param[in] dev
+ * Pointer to Ethernet device.
+ * @param[out] policy_id
+ * Pointer to policy id
+ * @param[in] actions
+ * Pointer to meter policy action detail.
+ * @param[out] error
+ * Pointer to the error structure.
+ *
+ * @return
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
+ */
+static int
+mlx5_flow_meter_policy_add(struct rte_eth_dev *dev,
+ uint32_t policy_id,
+ struct rte_mtr_meter_policy_params *policy,
+ struct rte_mtr_error *error)
+{
+ struct mlx5_priv *priv = dev->data->dev_private;
+ struct rte_flow_attr attr = { .transfer =
+ priv->config.dv_esw_en ? 1 : 0};
+ uint32_t sub_policy_idx = 0;
+ uint32_t policy_idx = 0;
+ struct mlx5_flow_meter_policy *mtr_policy = NULL;
+ struct mlx5_flow_meter_sub_policy *sub_policy;
+ bool is_rss = false;
+ bool is_def_policy = false;
+ uint32_t i;
+ int ret;
+ uint32_t policy_size = sizeof(struct mlx5_flow_meter_policy);
+ uint16_t sub_policy_num;
+ uint8_t domain_bitmap = 0;
+ union mlx5_l3t_data data;
+
+ if (!priv->mtr_en)
+ return -rte_mtr_error_set(error, ENOTSUP,
+ RTE_MTR_ERROR_TYPE_METER_POLICY,
+ NULL, "meter policy unsupported.");
+ if (policy_id == MLX5_INVALID_POLICY_ID)
+ return -rte_mtr_error_set(error, ENOTSUP,
+ RTE_MTR_ERROR_TYPE_METER_POLICY_ID, NULL,
+ "policy ID is invalid. ");
+ if (policy_id == priv->sh->mtrmng->def_policy_id)
+ return -rte_mtr_error_set(error, EEXIST,
+ RTE_MTR_ERROR_TYPE_METER_POLICY_ID, NULL,
+ "policy ID exists. ");
+ mtr_policy = mlx5_flow_meter_policy_find(dev, policy_id,
+ &policy_idx);
+ if (mtr_policy)
+ return -rte_mtr_error_set(error, EEXIST,
+ RTE_MTR_ERROR_TYPE_METER_POLICY_ID, NULL,
+ "policy ID exists. ");
+ ret = mlx5_flow_validate_mtr_acts(dev, policy->actions, &attr,
+ &is_rss, &domain_bitmap, &is_def_policy, error);
+ if (ret)
+ return ret;
+ if (!domain_bitmap)
+ return -rte_mtr_error_set(error, ENOTSUP,
+ RTE_MTR_ERROR_TYPE_METER_POLICY,
+ NULL, "fail to find policy domain.");
+ if (is_def_policy) {
+ if (priv->sh->mtrmng->def_policy_id != MLX5_INVALID_POLICY_ID)
+ return -rte_mtr_error_set(error, EEXIST,
+ RTE_MTR_ERROR_TYPE_METER_POLICY_ID,
+ NULL, "a policy with similar actions "
+ "is already configured");
+ if (mlx5_flow_create_def_policy(dev))
+ return -rte_mtr_error_set(error, ENOTSUP,
+ RTE_MTR_ERROR_TYPE_METER_POLICY,
+ NULL,
+ "fail to create non-terminated policy.");
+ priv->sh->mtrmng->def_policy_id = policy_id;
+ return 0;
+ }
+ if (!priv->sh->meter_aso_en)
+ return -rte_mtr_error_set(error, ENOTSUP,
+ RTE_MTR_ERROR_TYPE_METER_POLICY, NULL,
+ "no ASO capability to support the policy ");
+ for (i = 0; i < MLX5_MTR_DOMAIN_MAX; i++) {
+ if (!(domain_bitmap & (1 << i)))
+ continue;
+ if (is_rss) {
+ policy_size +=
+ sizeof(struct mlx5_flow_meter_sub_policy *) *
+ MLX5_MTR_RSS_MAX_SUB_POLICY;
+ break;
+ }
+ policy_size += sizeof(struct mlx5_flow_meter_sub_policy *);
+ }
+ mtr_policy = mlx5_malloc(MLX5_MEM_ZERO, policy_size,
+ RTE_CACHE_LINE_SIZE, SOCKET_ID_ANY);
+ if (!mtr_policy)
+ return -rte_mtr_error_set(error, ENOMEM,
+ RTE_MTR_ERROR_TYPE_METER_POLICY, NULL,
+ "Memory alloc failed for meter policy.");
+ policy_size = sizeof(struct mlx5_flow_meter_policy);
+ for (i = 0; i < MLX5_MTR_DOMAIN_MAX; i++) {
+ if (!(domain_bitmap & (1 << i)))
+ continue;
+ mtr_policy->ingress = (i == MLX5_MTR_DOMAIN_INGRESS) ? 1 : 0;
+ mtr_policy->egress = (i == MLX5_MTR_DOMAIN_EGRESS) ? 1 : 0;
+ mtr_policy->transfer = (i == MLX5_MTR_DOMAIN_TRANSFER) ? 1 : 0;
+ sub_policy = mlx5_ipool_zmalloc
+ (priv->sh->ipool[MLX5_IPOOL_MTR_POLICY],
+ &sub_policy_idx);
+ if (!sub_policy)
+ goto policy_add_err;
+ if (sub_policy_idx > MLX5_MAX_SUB_POLICY_TBL_NUM)
+ goto policy_add_err;
+ sub_policy->idx = sub_policy_idx;
+ sub_policy->main_policy = mtr_policy;
+ if (!policy_idx) {
+ policy_idx = sub_policy_idx;
+ sub_policy->main_policy_id = 1;
+ }
+ mtr_policy->sub_policys[i] =
+ (struct mlx5_flow_meter_sub_policy **)
+ ((uint8_t *)mtr_policy + policy_size);
+ mtr_policy->sub_policys[i][0] = sub_policy;
+ sub_policy_num = (mtr_policy->sub_policy_num >>
+ (MLX5_MTR_SUB_POLICY_NUM_SHIFT * i)) &
+ MLX5_MTR_SUB_POLICY_NUM_MASK;
+ sub_policy_num++;
+ mtr_policy->sub_policy_num &= ~(MLX5_MTR_SUB_POLICY_NUM_MASK <<
+ (MLX5_MTR_SUB_POLICY_NUM_SHIFT * i));
+ mtr_policy->sub_policy_num |=
+ (sub_policy_num & MLX5_MTR_SUB_POLICY_NUM_MASK) <<
+ (MLX5_MTR_SUB_POLICY_NUM_SHIFT * i);
+ if (is_rss) {
+ mtr_policy->is_rss = 1;
+ break;
+ }
+ policy_size += sizeof(struct mlx5_flow_meter_sub_policy *);
+ }
+ rte_spinlock_init(&mtr_policy->sl);
+ ret = mlx5_flow_create_mtr_acts(dev, mtr_policy,
+ policy->actions, error);
+ if (ret)
+ goto policy_add_err;
+ if (!is_rss) {
+ /* Create policy rules in HW. */
+ ret = mlx5_flow_create_policy_rules(dev, mtr_policy);
+ if (ret)
+ goto policy_add_err;
+ }
+ data.dword = policy_idx;
+ if (!priv->sh->mtrmng->policy_idx_tbl) {
+ priv->sh->mtrmng->policy_idx_tbl =
+ mlx5_l3t_create(MLX5_L3T_TYPE_DWORD);
+ if (!priv->sh->mtrmng->policy_idx_tbl)
+ goto policy_add_err;
+ }
+ if (mlx5_l3t_set_entry(priv->sh->mtrmng->policy_idx_tbl,
+ policy_id, &data))
+ goto policy_add_err;
+ return 0;
+policy_add_err:
+ if (mtr_policy) {
+ ret = __mlx5_flow_meter_policy_delete(dev, policy_id,
+ mtr_policy, error);
+ mlx5_free(mtr_policy);
+ if (ret)
+ return ret;
+ }
+ return -rte_mtr_error_set(error, ENOTSUP,
+ RTE_MTR_ERROR_TYPE_UNSPECIFIED,
+ NULL, "Failed to create devx policy.");
+}
+
+/**
+ * Callback to delete MTR policy.
+ *
+ * @param[in] dev
+ * Pointer to Ethernet device.
+ * @param[in] policy_id
+ * Meter policy id.
+ * @param[out] error
+ * Pointer to the error structure.
+ *
+ * @return
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
+ */
+static int
+mlx5_flow_meter_policy_delete(struct rte_eth_dev *dev,
+ uint32_t policy_id,
+ struct rte_mtr_error *error)
+{
+ struct mlx5_priv *priv = dev->data->dev_private;
+ struct mlx5_flow_meter_policy *mtr_policy;
+ uint32_t policy_idx;
+ int ret;
+
+ if (policy_id == priv->sh->mtrmng->def_policy_id) {
+ if (priv->sh->mtrmng->def_policy_ref_cnt > 0)
+ return -rte_mtr_error_set(error, ENOTSUP,
+ RTE_MTR_ERROR_TYPE_METER_POLICY_ID, NULL,
+ "Meter policy object is being used.");
+ priv->sh->mtrmng->def_policy_id = MLX5_INVALID_POLICY_ID;
+ return 0;
+ }
+ mtr_policy = mlx5_flow_meter_policy_find(dev, policy_id, &policy_idx);
+ if (!mtr_policy)
+ return -rte_mtr_error_set(error, ENOTSUP,
+ RTE_MTR_ERROR_TYPE_METER_POLICY_ID, NULL,
+ "Meter policy id is invalid. ");
+ ret = __mlx5_flow_meter_policy_delete(dev, policy_id, mtr_policy,
+ error);
+ if (ret)
+ return ret;
+ mlx5_free(mtr_policy);
+ return 0;
+}
+
/**
* Modify the flow meter action.
*
@@ -942,6 +1286,9 @@ static const struct rte_mtr_ops mlx5_flow_mtr_ops = {
.capabilities_get = mlx5_flow_mtr_cap_get,
.meter_profile_add = mlx5_flow_meter_profile_add,
.meter_profile_delete = mlx5_flow_meter_profile_delete,
+ .meter_policy_validate = mlx5_flow_meter_policy_validate,
+ .meter_policy_add = mlx5_flow_meter_policy_add,
+ .meter_policy_delete = mlx5_flow_meter_policy_delete,
.destroy = mlx5_flow_meter_destroy,
.meter_enable = mlx5_flow_meter_enable,
.meter_disable = mlx5_flow_meter_disable,
@@ -989,22 +1336,32 @@ mlx5_flow_meter_find(struct mlx5_priv *priv, uint32_t meter_id,
struct mlx5_legacy_flow_meter *legacy_fm;
struct mlx5_legacy_flow_meters *fms = &priv->flow_meters;
struct mlx5_aso_mtr *aso_mtr;
- struct mlx5_aso_mtr_pools_mng *mtrmng = priv->sh->mtrmng;
+ struct mlx5_aso_mtr_pools_mng *pools_mng =
+ &priv->sh->mtrmng->pools_mng;
union mlx5_l3t_data data;
if (priv->sh->meter_aso_en) {
- rte_spinlock_lock(&mtrmng->mtrsl);
- if (mlx5_l3t_get_entry(priv->mtr_idx_tbl, meter_id, &data) ||
- !data.dword) {
- rte_spinlock_unlock(&mtrmng->mtrsl);
- return NULL;
+ rte_spinlock_lock(&pools_mng->mtrsl);
+ if (priv->mtr_idx_tbl) {
+ if (mlx5_l3t_get_entry(priv->mtr_idx_tbl,
+ meter_id, &data) ||
+ !data.dword) {
+ rte_spinlock_unlock(&pools_mng->mtrsl);
+ return NULL;
+ }
+ if (mtr_idx)
+ *mtr_idx = data.dword;
+ aso_mtr = mlx5_aso_meter_by_idx(priv, data.dword);
+ /* Remove reference taken by the mlx5_l3t_get_entry. */
+ mlx5_l3t_clear_entry(priv->mtr_idx_tbl, meter_id);
+ } else {
+ if (mtr_idx)
+ *mtr_idx = meter_id;
+ aso_mtr = mlx5_aso_meter_by_idx(priv, meter_id);
}
- if (mtr_idx)
- *mtr_idx = data.dword;
- aso_mtr = mlx5_aso_meter_by_idx(priv, data.dword);
- /* Remove reference taken by the mlx5_l3t_get_entry. */
- mlx5_l3t_clear_entry(priv->mtr_idx_tbl, meter_id);
- rte_spinlock_unlock(&mtrmng->mtrsl);
+ rte_spinlock_unlock(&pools_mng->mtrsl);
+ if (!aso_mtr || aso_mtr->state == ASO_METER_FREE)
+ return NULL;
return &aso_mtr->fm;
}
TAILQ_FOREACH(legacy_fm, fms, next)
@@ -1169,30 +1526,31 @@ int
mlx5_flow_meter_flush(struct rte_eth_dev *dev, struct rte_mtr_error *error)
{
struct mlx5_priv *priv = dev->data->dev_private;
- struct mlx5_aso_mtr_pools_mng *mtrmng = priv->sh->mtrmng;
+ struct mlx5_aso_mtr_pools_mng *pools_mng =
+ &priv->sh->mtrmng->pools_mng;
struct mlx5_legacy_flow_meters *fms = &priv->flow_meters;
struct mlx5_mtr_profiles *fmps = &priv->flow_meter_profiles;
struct mlx5_flow_meter_profile *fmp;
struct mlx5_legacy_flow_meter *legacy_fm;
struct mlx5_flow_meter_info *fm;
struct mlx5_aso_mtr_pool *mtr_pool;
+ struct mlx5_flow_meter_sub_policy *sub_policy;
void *tmp;
- uint32_t i, offset, mtr_idx;
+ uint32_t i, offset, mtr_idx, policy_idx;
+ void *entry;
+ if (!priv->mtr_en)
+ return 0;
if (priv->sh->meter_aso_en) {
- i = mtrmng->n_valid;
+ i = pools_mng->n_valid;
while (i--) {
- mtr_pool = mtrmng->pools[i];
+ mtr_pool = pools_mng->pools[i];
for (offset = 0; offset < MLX5_ASO_MTRS_PER_POOL;
offset++) {
fm = &mtr_pool->mtrs[offset].fm;
mtr_idx = MLX5_MAKE_MTR_IDX(i, offset);
- if (mlx5_flow_meter_params_flush(dev,
- fm, mtr_idx))
- return -rte_mtr_error_set
- (error, EINVAL,
- RTE_MTR_ERROR_TYPE_METER_PROFILE_ID,
- NULL, "MTR object meter profile invalid.");
+ (void)mlx5_flow_meter_params_flush(dev,
+ fm, mtr_idx);
}
}
} else {
@@ -1200,9 +1558,35 @@ mlx5_flow_meter_flush(struct rte_eth_dev *dev, struct rte_mtr_error *error)
fm = &legacy_fm->fm;
if (mlx5_flow_meter_params_flush(dev, fm, 0))
return -rte_mtr_error_set(error, EINVAL,
- RTE_MTR_ERROR_TYPE_METER_PROFILE_ID,
- NULL, "MTR object meter profile invalid.");
+ RTE_MTR_ERROR_TYPE_METER_PROFILE_ID,
+ NULL, "MTR object meter profile invalid.");
+ }
+ }
+ if (priv->sh->mtrmng->policy_idx_tbl) {
+ MLX5_L3T_FOREACH(priv->sh->mtrmng->policy_idx_tbl,
+ i, entry) {
+ policy_idx = *(uint32_t *)entry;
+ sub_policy = mlx5_ipool_get
+ (priv->sh->ipool[MLX5_IPOOL_MTR_POLICY],
+ policy_idx);
+ if (!sub_policy)
+ return -rte_mtr_error_set(error,
+ EINVAL,
+ RTE_MTR_ERROR_TYPE_METER_POLICY_ID,
+ NULL, "MTR object "
+ "meter policy invalid.");
+ if (__mlx5_flow_meter_policy_delete(dev, i,
+ sub_policy->main_policy,
+ error))
+ return -rte_mtr_error_set(error,
+ EINVAL,
+ RTE_MTR_ERROR_TYPE_METER_POLICY_ID,
+ NULL, "MTR object "
+ "meter policy invalid.");
+ mlx5_free(sub_policy->main_policy);
}
+ mlx5_l3t_destroy(priv->sh->mtrmng->policy_idx_tbl);
+ priv->sh->mtrmng->policy_idx_tbl = NULL;
}
TAILQ_FOREACH_SAFE(fmp, fmps, next, tmp) {
/* Check unused. */
@@ -1211,5 +1595,8 @@ mlx5_flow_meter_flush(struct rte_eth_dev *dev, struct rte_mtr_error *error)
TAILQ_REMOVE(&priv->flow_meter_profiles, fmp, next);
mlx5_free(fmp);
}
+ /* Delete default policy table. */
+ mlx5_flow_destroy_def_policy(dev);
+ mlx5_flow_destroy_mtr_drop_tbls(dev);
return 0;
}
@@ -1179,6 +1179,7 @@ mlx5_dev_stop(struct rte_eth_dev *dev)
mlx5_traffic_disable(dev);
/* All RX queue flags will be cleared in the flush interface. */
mlx5_flow_list_flush(dev, &priv->flows, true);
+ mlx5_flow_meter_flush(dev, NULL);
mlx5_rx_intr_vec_disable(dev);
priv->sh->port[priv->dev_port - 1].ih_port_id = RTE_MAX_ETHPORTS;
priv->sh->port[priv->dev_port - 1].devx_ih_port_id = RTE_MAX_ETHPORTS;