@@ -697,6 +697,7 @@ mlx5_dev_spawn(struct rte_device *dpdk_dev,
int own_domain_id = 0;
uint16_t port_id;
unsigned int i;
+ uint32_t log_obj_size;
#ifdef HAVE_MLX5DV_DR_DEVX_PORT
struct mlx5dv_devx_port devx_port = { .comp_mask = 0 };
#endif
@@ -1267,6 +1268,22 @@ mlx5_dev_spawn(struct rte_device *dpdk_dev,
priv->mtr_color_reg);
}
}
+ if (config->hca_attr.qos.sup &&
+ config->hca_attr.qos.flow_meter_aso_sup) {
+ log_obj_size =
+ rte_log2_u32(MLX5_ASO_MTRS_PER_POOL >> 1);
+ if (log_obj_size >=
+ config->hca_attr.qos.log_meter_aso_granularity &&
+ log_obj_size <=
+ config->hca_attr.qos.log_meter_aso_max_alloc) {
+ sh->meter_aso_en = 1;
+ err = mlx5_aso_flow_mtrs_mng_init(sh);
+ if (err) {
+ err = -err;
+ goto error;
+ }
+ }
+ }
#endif
#ifdef HAVE_MLX5_DR_CREATE_ACTION_ASO
if (config->hca_attr.flow_hit_aso &&
@@ -16,7 +16,7 @@ sources = files(
'mlx5_flow_meter.c',
'mlx5_flow_dv.c',
'mlx5_flow_verbs.c',
- 'mlx5_flow_age.c',
+ 'mlx5_flow_aso.c',
'mlx5_mac.c',
'mlx5_mr.c',
'mlx5_rss.c',
@@ -383,7 +383,7 @@ mlx5_flow_aso_age_mng_init(struct mlx5_dev_ctx_shared *sh)
rte_errno = ENOMEM;
return -ENOMEM;
}
- err = mlx5_aso_queue_init(sh);
+ err = mlx5_aso_queue_init(sh, ASO_OPC_MOD_FLOW_HIT);
if (err) {
mlx5_free(sh->aso_age_mng);
return -1;
@@ -405,8 +405,8 @@ mlx5_flow_aso_age_mng_close(struct mlx5_dev_ctx_shared *sh)
{
int i, j;
- mlx5_aso_queue_stop(sh);
- mlx5_aso_queue_uninit(sh);
+ mlx5_aso_flow_hit_queue_poll_stop(sh);
+ mlx5_aso_queue_uninit(sh, ASO_OPC_MOD_FLOW_HIT);
if (sh->aso_age_mng->pools) {
struct mlx5_aso_age_pool *pool;
@@ -544,6 +544,65 @@ mlx5_flow_counters_mng_close(struct mlx5_dev_ctx_shared *sh)
memset(&sh->cmng, 0, sizeof(sh->cmng));
}
+/**
+ * Initialize the aso flow meters management structure.
+ *
+ * @param[in] sh
+ * Pointer to mlx5_dev_ctx_shared object to free
+ */
+int
+mlx5_aso_flow_mtrs_mng_init(struct mlx5_dev_ctx_shared *sh)
+{
+ if (sh->mtrmng)
+ return 0;
+ sh->mtrmng = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*sh->mtrmng),
+ RTE_CACHE_LINE_SIZE, SOCKET_ID_ANY);
+ if (!sh->mtrmng) {
+ DRV_LOG(ERR, "mlx5_aso_mtr_pools_mng allocation was failed.");
+ rte_errno = ENOMEM;
+ return -ENOMEM;
+ }
+ sh->mtrmng->mtr_idx_tbl = mlx5_l3t_create(MLX5_L3T_TYPE_DWORD);
+ if (!sh->mtrmng->mtr_idx_tbl) {
+ DRV_LOG(ERR, "fail to create meter lookup table.");
+ rte_errno = ENOMEM;
+ return -ENOMEM;
+ }
+ rte_spinlock_init(&sh->mtrmng->mtrsl);
+ LIST_INIT(&sh->mtrmng->meters);
+ return 0;
+}
+
+/**
+ * Close and release all the resources of
+ * the ASO flow meter management structure.
+ *
+ * @param[in] sh
+ * Pointer to mlx5_dev_ctx_shared object to free.
+ */
+static void
+mlx5_aso_flow_mtrs_mng_close(struct mlx5_dev_ctx_shared *sh)
+{
+ struct mlx5_aso_mtr_pool *mtr_pool;
+ struct mlx5_aso_mtr_pools_mng *mtrmng = sh->mtrmng;
+ uint32_t idx;
+
+ mlx5_aso_queue_uninit(sh, ASO_OPC_MOD_POLICER);
+ idx = mtrmng->n_valid;
+ while (idx--) {
+ mtr_pool = mtrmng->pools[idx];
+ claim_zero(mlx5_devx_cmd_destroy
+ (mtr_pool->devx_obj));
+ mtrmng->n_valid--;
+ mlx5_free(mtr_pool);
+ }
+ mlx5_free(sh->mtrmng->pools);
+ mlx5_l3t_destroy(mtrmng->mtr_idx_tbl);
+ mtrmng->mtr_idx_tbl = NULL;
+ mlx5_free(sh->mtrmng);
+ sh->mtrmng = NULL;
+}
+
/* Send FLOW_AGED event if needed. */
void
mlx5_age_event_prepare(struct mlx5_dev_ctx_shared *sh)
@@ -1092,6 +1151,8 @@ mlx5_free_shared_dev_ctx(struct mlx5_dev_ctx_shared *sh)
mlx5_flow_aso_age_mng_close(sh);
sh->aso_age_mng = NULL;
}
+ if (sh->mtrmng)
+ mlx5_aso_flow_mtrs_mng_close(sh);
mlx5_flow_ipool_destroy(sh);
mlx5_os_dev_shared_handler_uninstall(sh);
if (sh->cnt_id_tbl) {
@@ -500,8 +500,13 @@ struct mlx5_aso_devx_mr {
};
struct mlx5_aso_sq_elem {
- struct mlx5_aso_age_pool *pool;
- uint16_t burst_size;
+ union {
+ struct {
+ struct mlx5_aso_age_pool *pool;
+ uint16_t burst_size;
+ };
+ struct mlx5_aso_mtr *mtr;
+ };
};
struct mlx5_aso_sq {
@@ -1244,6 +1249,7 @@ int mlx5_hairpin_cap_get(struct rte_eth_dev *dev,
bool mlx5_flex_parser_ecpri_exist(struct rte_eth_dev *dev);
int mlx5_flex_parser_ecpri_alloc(struct rte_eth_dev *dev);
int mlx5_flow_aso_age_mng_init(struct mlx5_dev_ctx_shared *sh);
+int mlx5_aso_flow_mtrs_mng_init(struct mlx5_dev_ctx_shared *sh);
/* mlx5_ethdev.c */
@@ -1505,9 +1511,11 @@ eth_tx_burst_t mlx5_select_tx_function(struct rte_eth_dev *dev);
/* mlx5_flow_aso.c */
-int mlx5_aso_queue_init(struct mlx5_dev_ctx_shared *sh);
-int mlx5_aso_queue_start(struct mlx5_dev_ctx_shared *sh);
-int mlx5_aso_queue_stop(struct mlx5_dev_ctx_shared *sh);
-void mlx5_aso_queue_uninit(struct mlx5_dev_ctx_shared *sh);
+int mlx5_aso_queue_init(struct mlx5_dev_ctx_shared *sh,
+ enum mlx5_access_aso_opc_mod aso_opc_mod);
+int mlx5_aso_flow_hit_queue_poll_start(struct mlx5_dev_ctx_shared *sh);
+int mlx5_aso_flow_hit_queue_poll_stop(struct mlx5_dev_ctx_shared *sh);
+void mlx5_aso_queue_uninit(struct mlx5_dev_ctx_shared *sh,
+ enum mlx5_access_aso_opc_mod aso_opc_mod);
#endif /* RTE_PMD_MLX5_H_ */
@@ -811,8 +811,8 @@ struct mlx5_flow {
#define MLX5_FLOW_METER_DISABLE 0
#define MLX5_FLOW_METER_ENABLE 1
-#define MLX5_ASO_CQE_RESPONSE_DELAY 10
-#define MLX5_MTR_POLL_CQE_TIMES 100000u /* 1s*/
+#define MLX5_ASO_WQE_CQE_RESPONSE_DELAY 10u
+#define MLX5_MTR_POLL_WQE_CQE_TIMES 100000u
#define MLX5_MAN_WIDTH 8
/* Legacy Meter parameter structure. */
similarity index 87%
rename from drivers/net/mlx5/mlx5_flow_age.c
rename to drivers/net/mlx5/mlx5_flow_aso.c
@@ -196,7 +196,6 @@ mlx5_aso_destroy_sq(struct mlx5_aso_sq *sq)
}
if (sq->cq.cq)
mlx5_aso_cq_destroy(&sq->cq);
- mlx5_aso_devx_dereg_mr(&sq->mr);
memset(sq, 0, sizeof(*sq));
}
@@ -207,7 +206,7 @@ mlx5_aso_destroy_sq(struct mlx5_aso_sq *sq)
* ASO SQ to initialize.
*/
static void
-mlx5_aso_init_sq(struct mlx5_aso_sq *sq)
+mlx5_aso_age_init_sq(struct mlx5_aso_sq *sq)
{
volatile struct mlx5_aso_wqe *restrict wqe;
int i;
@@ -233,6 +232,39 @@ mlx5_aso_init_sq(struct mlx5_aso_sq *sq)
}
}
+/**
+ * Initialize Send Queue used for ASO flow meter access.
+ *
+ * @param[in] sq
+ * ASO SQ to initialize.
+ */
+static void
+mlx5_aso_mtr_init_sq(struct mlx5_aso_sq *sq)
+{
+ volatile struct mlx5_aso_wqe *restrict wqe;
+ int i;
+ int size = 1 << sq->log_desc_n;
+ uint32_t idx;
+
+ /* All the next fields state should stay constant. */
+ for (i = 0, wqe = &sq->wqes[0]; i < size; ++i, ++wqe) {
+ wqe->general_cseg.sq_ds = rte_cpu_to_be_32((sq->sqn << 8) |
+ (sizeof(*wqe) >> 4));
+ wqe->aso_cseg.operand_masks = RTE_BE32(0u |
+ (ASO_OPER_LOGICAL_OR << ASO_CSEG_COND_OPER_OFFSET) |
+ (ASO_OP_ALWAYS_TRUE << ASO_CSEG_COND_1_OPER_OFFSET) |
+ (ASO_OP_ALWAYS_TRUE << ASO_CSEG_COND_0_OPER_OFFSET) |
+ (BYTEWISE_64BYTE << ASO_CSEG_DATA_MASK_MODE_OFFSET));
+ wqe->general_cseg.flags = RTE_BE32(MLX5_COMP_ALWAYS <<
+ MLX5_COMP_MODE_OFFSET);
+ for (idx = 0; idx < MLX5_ASO_METERS_PER_WQE;
+ idx++)
+ wqe->aso_dseg.mtrs[idx].v_bo_sc_bbog_mm =
+ RTE_BE32((1 << ASO_DSEG_VALID_OFFSET) |
+ (MLX5_FLOW_COLOR_GREEN << ASO_DSEG_SC_OFFSET));
+ }
+}
+
/**
* Create Send Queue used for ASO access.
*
@@ -267,9 +299,6 @@ mlx5_aso_sq_create(void *ctx, struct mlx5_aso_sq *sq, int socket,
uint32_t wq_size = sizeof(struct mlx5_aso_wqe) * sq_desc_n;
int ret;
- if (mlx5_aso_devx_reg_mr(ctx, (MLX5_ASO_AGE_ACTIONS_PER_POOL / 8) *
- sq_desc_n, &sq->mr, socket, pdn))
- return -1;
if (mlx5_aso_cq_create(ctx, &sq->cq, log_desc_n, socket,
mlx5_os_get_devx_uar_page_id(uar), eqn))
goto error;
@@ -326,7 +355,6 @@ mlx5_aso_sq_create(void *ctx, struct mlx5_aso_sq *sq, int socket,
sq->sqn = sq->sq->id;
sq->db_rec = RTE_PTR_ADD(sq->umem_buf, (uintptr_t)(wq_attr->dbr_addr));
sq->uar_addr = (volatile uint64_t *)((uint8_t *)uar->base_addr + 0x800);
- mlx5_aso_init_sq(sq);
return 0;
error:
mlx5_aso_destroy_sq(sq);
@@ -343,11 +371,37 @@ mlx5_aso_sq_create(void *ctx, struct mlx5_aso_sq *sq, int socket,
* 0 on success, a negative errno value otherwise and rte_errno is set.
*/
int
-mlx5_aso_queue_init(struct mlx5_dev_ctx_shared *sh)
+mlx5_aso_queue_init(struct mlx5_dev_ctx_shared *sh,
+ enum mlx5_access_aso_opc_mod aso_opc_mod)
{
- return mlx5_aso_sq_create(sh->ctx, &sh->aso_age_mng->aso_sq, 0,
- sh->tx_uar, sh->pdn, sh->eqn,
- MLX5_ASO_QUEUE_LOG_DESC);
+ uint32_t sq_desc_n = 1 << MLX5_ASO_QUEUE_LOG_DESC;
+
+ switch (aso_opc_mod) {
+ case ASO_OPC_MOD_FLOW_HIT:
+ if (mlx5_aso_devx_reg_mr(sh->ctx,
+ (MLX5_ASO_AGE_ACTIONS_PER_POOL / 8) *
+ sq_desc_n, &sh->aso_age_mng->aso_sq.mr, 0, sh->pdn))
+ return -1;
+ if (mlx5_aso_sq_create(sh->ctx, &sh->aso_age_mng->aso_sq, 0,
+ sh->tx_uar, sh->pdn, sh->eqn,
+ MLX5_ASO_QUEUE_LOG_DESC)) {
+ mlx5_aso_devx_dereg_mr(&sh->aso_age_mng->aso_sq.mr);
+ return -1;
+ }
+ mlx5_aso_age_init_sq(&sh->aso_age_mng->aso_sq);
+ break;
+ case ASO_OPC_MOD_POLICER:
+ if (mlx5_aso_sq_create(sh->ctx, &sh->mtrmng->sq, 0,
+ sh->tx_uar, sh->pdn, sh->eqn,
+ MLX5_ASO_QUEUE_LOG_DESC))
+ return -1;
+ mlx5_aso_mtr_init_sq(&sh->mtrmng->sq);
+ break;
+ default:
+ DRV_LOG(ERR, "Unknown ASO operation mode");
+ return -1;
+ }
+ return 0;
}
/**
@@ -357,9 +411,24 @@ mlx5_aso_queue_init(struct mlx5_dev_ctx_shared *sh)
* Pointer to shared device context.
*/
void
-mlx5_aso_queue_uninit(struct mlx5_dev_ctx_shared *sh)
+mlx5_aso_queue_uninit(struct mlx5_dev_ctx_shared *sh,
+ enum mlx5_access_aso_opc_mod aso_opc_mod)
{
- mlx5_aso_destroy_sq(&sh->aso_age_mng->aso_sq);
+ struct mlx5_aso_sq *sq;
+
+ switch (aso_opc_mod) {
+ case ASO_OPC_MOD_FLOW_HIT:
+ mlx5_aso_devx_dereg_mr(&sh->aso_age_mng->aso_sq.mr);
+ sq = &sh->aso_age_mng->aso_sq;
+ break;
+ case ASO_OPC_MOD_POLICER:
+ sq = &sh->mtrmng->sq;
+ break;
+ default:
+ DRV_LOG(ERR, "Unknown ASO operation mode");
+ return;
+ }
+ mlx5_aso_destroy_sq(sq);
}
/**
@@ -634,7 +703,7 @@ mlx5_flow_aso_alarm(void *arg)
* 0 on success, a negative errno value otherwise and rte_errno is set.
*/
int
-mlx5_aso_queue_start(struct mlx5_dev_ctx_shared *sh)
+mlx5_aso_flow_hit_queue_poll_start(struct mlx5_dev_ctx_shared *sh)
{
if (rte_eal_alarm_set(US_PER_S, mlx5_flow_aso_alarm, sh)) {
DRV_LOG(ERR, "Cannot reinitialize ASO age alarm.");
@@ -653,7 +722,7 @@ mlx5_aso_queue_start(struct mlx5_dev_ctx_shared *sh)
* 0 on success, a negative errno value otherwise and rte_errno is set.
*/
int
-mlx5_aso_queue_stop(struct mlx5_dev_ctx_shared *sh)
+mlx5_aso_flow_hit_queue_poll_stop(struct mlx5_dev_ctx_shared *sh)
{
int retries = 1024;
@@ -5283,6 +5283,11 @@ flow_dv_mtr_container_resize(struct rte_eth_dev *dev)
rte_errno = ENOMEM;
return -ENOMEM;
}
+ if (!mtrmng->n)
+ if (mlx5_aso_queue_init(priv->sh, ASO_OPC_MOD_POLICER)) {
+ mlx5_free(pools);
+ return -ENOMEM;
+ }
if (old_pools)
memcpy(pools, old_pools, mtrmng->n *
sizeof(struct mlx5_aso_mtr_pool *));
@@ -9980,7 +9985,7 @@ flow_dv_aso_age_pools_resize(struct rte_eth_dev *dev)
mlx5_free(old_pools);
} else {
/* First ASO flow hit allocation - starting ASO data-path. */
- int ret = mlx5_aso_queue_start(priv->sh);
+ int ret = mlx5_aso_flow_hit_queue_poll_start(priv->sh);
if (ret) {
mlx5_free(pools);