@@ -2274,6 +2274,29 @@ mlx5_ind_table_obj_new(struct rte_eth_dev *dev, const uint16_t *queues,
return NULL;
}
+/**
+ * Match queues listed in arguments to queues contained in indirection table
+ * object.
+ *
+ * @param ind_tbl
+ * Pointer to indirection table to match.
+ * @param queues
+ * Queues to match to ques in indirection table.
+ * @param queues_n
+ * Number of queues in the array.
+ *
+ * @return
+ * 1 if all queues in indirection table match 0 othrwise.
+ */
+static int
+mlx5_ind_table_obj_match_queues(const struct mlx5_ind_table_obj *ind_tbl,
+ const uint16_t *queues, uint32_t queues_n)
+{
+ return (ind_tbl->queues_n == queues_n) &&
+ (!memcmp(ind_tbl->queues, queues,
+ ind_tbl->queues_n * sizeof(ind_tbl->queues[0])));
+}
+
/**
* Get an indirection table.
*
@@ -2370,6 +2393,102 @@ mlx5_ind_table_obj_verify(struct rte_eth_dev *dev)
return ret;
}
+/*
+ * Set TIR attribute struct with relevant input values.
+ *
+ * @param[in] dev
+ * Pointer to Ethernet device.
+ * @param[in] rss_key
+ * RSS key for the Rx hash queue.
+ * @param[in] rss_key_len
+ * RSS key length.
+ * @param[in] hash_fields
+ * Verbs protocol hash field to make the RSS on.
+ * @param[in] queues
+ * Queues entering in hash queue. In case of empty hash_fields only the
+ * first queue index will be taken for the indirection table.
+ * @param[in] queues_n
+ * Number of queues.
+ * @param[in] tunnel
+ * Tunnel type.
+ * @param[out] tir_attr
+ * Parameters structure for TIR creation/modification.
+ *
+ * @return
+ * The Verbs/DevX object initialised index, 0 otherwise and rte_errno is set.
+ */
+static void
+mlx5_devx_tir_attr_set(struct rte_eth_dev *dev,
+ const uint8_t *rss_key, uint32_t rss_key_len,
+ uint64_t hash_fields,
+ const uint16_t *queues, uint32_t queues_n,
+ int tunnel,
+ enum mlx5_rxq_obj_type rxq_obj_type, int ind_tbl_id,
+ struct mlx5_devx_tir_attr *tir_attr)
+{
+ struct mlx5_priv *priv = dev->data->dev_private;
+ uint32_t i;
+ uint32_t lro = 1;
+
+ /* Enable TIR LRO only if all the queues were configured for. */
+ for (i = 0; i < queues_n; ++i) {
+ if (!(*priv->rxqs)[queues[i]]->lro) {
+ lro = 0;
+ break;
+ }
+ }
+ memset(tir_attr, 0, sizeof(*tir_attr));
+ tir_attr->disp_type = MLX5_TIRC_DISP_TYPE_INDIRECT;
+ tir_attr->rx_hash_fn = MLX5_RX_HASH_FN_TOEPLITZ;
+ tir_attr->tunneled_offload_en = !!tunnel;
+ /* If needed, translate hash_fields bitmap to PRM format. */
+ if (hash_fields) {
+#ifdef HAVE_IBV_DEVICE_TUNNEL_SUPPORT
+ struct mlx5_rx_hash_field_select *rx_hash_field_select =
+ hash_fields & IBV_RX_HASH_INNER ?
+ &tir_attr->rx_hash_field_selector_inner :
+ &tir_attr->rx_hash_field_selector_outer;
+#else
+ struct mlx5_rx_hash_field_select *rx_hash_field_select =
+ &tir_attr->rx_hash_field_selector_outer;
+#endif
+
+ /* 1 bit: 0: IPv4, 1: IPv6. */
+ rx_hash_field_select->l3_prot_type =
+ !!(hash_fields & MLX5_IPV6_IBV_RX_HASH);
+ /* 1 bit: 0: TCP, 1: UDP. */
+ rx_hash_field_select->l4_prot_type =
+ !!(hash_fields & MLX5_UDP_IBV_RX_HASH);
+ /* Bitmask which sets which fields to use in RX Hash. */
+ rx_hash_field_select->selected_fields =
+ ((!!(hash_fields & MLX5_L3_SRC_IBV_RX_HASH)) <<
+ MLX5_RX_HASH_FIELD_SELECT_SELECTED_FIELDS_SRC_IP) |
+ (!!(hash_fields & MLX5_L3_DST_IBV_RX_HASH)) <<
+ MLX5_RX_HASH_FIELD_SELECT_SELECTED_FIELDS_DST_IP |
+ (!!(hash_fields & MLX5_L4_SRC_IBV_RX_HASH)) <<
+ MLX5_RX_HASH_FIELD_SELECT_SELECTED_FIELDS_L4_SPORT |
+ (!!(hash_fields & MLX5_L4_DST_IBV_RX_HASH)) <<
+ MLX5_RX_HASH_FIELD_SELECT_SELECTED_FIELDS_L4_DPORT;
+ }
+ if (rxq_obj_type == MLX5_RXQ_OBJ_TYPE_DEVX_HAIRPIN)
+ tir_attr->transport_domain = priv->sh->td->id;
+ else
+ tir_attr->transport_domain = priv->sh->tdn;
+ memcpy(tir_attr->rx_hash_toeplitz_key, rss_key, rss_key_len);
+ tir_attr->indirect_table = ind_tbl_id;
+ if (dev->data->dev_conf.lpbk_mode)
+ tir_attr->self_lb_block =
+ MLX5_TIRC_SELF_LB_BLOCK_BLOCK_UNICAST;
+ if (lro) {
+ tir_attr->lro_timeout_period_usecs =
+ priv->config.lro.timeout;
+ tir_attr->lro_max_msg_sz = priv->max_lro_msg_size;
+ tir_attr->lro_enable_mask =
+ MLX5_TIRC_LRO_ENABLE_MASK_IPV4_LRO |
+ MLX5_TIRC_LRO_ENABLE_MASK_IPV6_LRO;
+ }
+}
+
/**
* Create an Rx Hash queue.
*
@@ -2493,67 +2612,11 @@ mlx5_hrxq_new(struct rte_eth_dev *dev,
}
} else { /* ind_tbl->type == MLX5_IND_TBL_TYPE_DEVX */
struct mlx5_devx_tir_attr tir_attr;
- uint32_t i;
- uint32_t lro = 1;
-
- /* Enable TIR LRO only if all the queues were configured for. */
- for (i = 0; i < queues_n; ++i) {
- if (!(*priv->rxqs)[queues[i]]->lro) {
- lro = 0;
- break;
- }
- }
- memset(&tir_attr, 0, sizeof(tir_attr));
- tir_attr.disp_type = MLX5_TIRC_DISP_TYPE_INDIRECT;
- tir_attr.rx_hash_fn = MLX5_RX_HASH_FN_TOEPLITZ;
- tir_attr.tunneled_offload_en = !!tunnel;
- /* If needed, translate hash_fields bitmap to PRM format. */
- if (hash_fields) {
-#ifdef HAVE_IBV_DEVICE_TUNNEL_SUPPORT
- struct mlx5_rx_hash_field_select *rx_hash_field_select =
- hash_fields & IBV_RX_HASH_INNER ?
- &tir_attr.rx_hash_field_selector_inner :
- &tir_attr.rx_hash_field_selector_outer;
-#else
- struct mlx5_rx_hash_field_select *rx_hash_field_select =
- &tir_attr.rx_hash_field_selector_outer;
-#endif
-
- /* 1 bit: 0: IPv4, 1: IPv6. */
- rx_hash_field_select->l3_prot_type =
- !!(hash_fields & MLX5_IPV6_IBV_RX_HASH);
- /* 1 bit: 0: TCP, 1: UDP. */
- rx_hash_field_select->l4_prot_type =
- !!(hash_fields & MLX5_UDP_IBV_RX_HASH);
- /* Bitmask which sets which fields to use in RX Hash. */
- rx_hash_field_select->selected_fields =
- ((!!(hash_fields & MLX5_L3_SRC_IBV_RX_HASH)) <<
- MLX5_RX_HASH_FIELD_SELECT_SELECTED_FIELDS_SRC_IP) |
- (!!(hash_fields & MLX5_L3_DST_IBV_RX_HASH)) <<
- MLX5_RX_HASH_FIELD_SELECT_SELECTED_FIELDS_DST_IP |
- (!!(hash_fields & MLX5_L4_SRC_IBV_RX_HASH)) <<
- MLX5_RX_HASH_FIELD_SELECT_SELECTED_FIELDS_L4_SPORT |
- (!!(hash_fields & MLX5_L4_DST_IBV_RX_HASH)) <<
- MLX5_RX_HASH_FIELD_SELECT_SELECTED_FIELDS_L4_DPORT;
- }
- if (rxq_ctrl->obj->type == MLX5_RXQ_OBJ_TYPE_DEVX_HAIRPIN)
- tir_attr.transport_domain = priv->sh->td->id;
- else
- tir_attr.transport_domain = priv->sh->tdn;
- memcpy(tir_attr.rx_hash_toeplitz_key, rss_key,
- MLX5_RSS_HASH_KEY_LEN);
- tir_attr.indirect_table = ind_tbl->rqt->id;
- if (dev->data->dev_conf.lpbk_mode)
- tir_attr.self_lb_block =
- MLX5_TIRC_SELF_LB_BLOCK_BLOCK_UNICAST;
- if (lro) {
- tir_attr.lro_timeout_period_usecs =
- priv->config.lro.timeout;
- tir_attr.lro_max_msg_sz = priv->max_lro_msg_size;
- tir_attr.lro_enable_mask =
- MLX5_TIRC_LRO_ENABLE_MASK_IPV4_LRO |
- MLX5_TIRC_LRO_ENABLE_MASK_IPV6_LRO;
- }
+ mlx5_devx_tir_attr_set
+ (dev, rss_key, rss_key_len, hash_fields,
+ queues, queues_n, tunnel,
+ rxq_ctrl->obj->type, ind_tbl->rqt->id,
+ &tir_attr);
tir = mlx5_devx_cmd_create_tir(priv->sh->ctx, &tir_attr);
if (!tir) {
DRV_LOG(ERR, "port %u cannot create DevX TIR",
@@ -2616,6 +2679,7 @@ mlx5_hrxq_new(struct rte_eth_dev *dev,
* Queues entering in hash queue. In case of empty hash_fields only the
* first queue index will be taken for the indirection table.
* @param queues_n
+ *
* Number of queues.
*
* @return
@@ -2655,6 +2719,120 @@ mlx5_hrxq_get(struct rte_eth_dev *dev,
return 0;
}
+/**
+ * Modify an Rx Hash queue configuration.
+ *
+ * @param dev
+ * Pointer to Ethernet device.
+ * @param hrxq
+ * Index to Hash Rx queue to modify.
+ * @param rss_key
+ * RSS key for the Rx hash queue.
+ * @param rss_key_len
+ * RSS key length.
+ * @param hash_fields
+ * Verbs protocol hash field to make the RSS on.
+ * @param queues
+ * Queues entering in hash queue. In case of empty hash_fields only the
+ * first queue index will be taken for the indirection table.
+ * @param queues_n
+ * Number of queues.
+ *
+ * @return
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
+ */
+int
+mlx5_hrxq_modify(struct rte_eth_dev *dev, uint32_t hrxq_idx,
+ const uint8_t *rss_key, uint32_t rss_key_len,
+ uint64_t hash_fields,
+ const uint16_t *queues, uint32_t queues_n)
+{
+ int err;
+ struct mlx5_priv *priv = dev->data->dev_private;
+ struct mlx5_rxq_data *rxq_data = (*priv->rxqs)[queues[0]];
+ struct mlx5_rxq_ctrl *rxq_ctrl =
+ container_of(rxq_data, struct mlx5_rxq_ctrl, rxq);
+ struct mlx5_devx_modify_tir_attr modify_tir = {0};
+ struct mlx5_ind_table_obj *ind_tbl = NULL;
+ enum mlx5_ind_tbl_type rxq_obj_type =
+ rxq_ctrl->obj->type == MLX5_RXQ_OBJ_TYPE_IBV ?
+ MLX5_IND_TBL_TYPE_IBV : MLX5_IND_TBL_TYPE_DEVX;
+ struct mlx5_hrxq *hrxq =
+ mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_HRXQ], hrxq_idx);
+
+ if (!hrxq) {
+ rte_errno = EINVAL;
+ return -rte_errno;
+ }
+ /* validations */
+ if (hrxq->ind_table->type != MLX5_IND_TBL_TYPE_DEVX ||
+ rxq_obj_type != MLX5_IND_TBL_TYPE_DEVX) {
+ /* shared action supported by devx interface only */
+ rte_errno = EINVAL;
+ return -rte_errno;
+ }
+ if (hrxq->rss_key_len != rss_key_len) {
+ /* rss_key_len is fixed size 40 byte & not supposed to change */
+ rte_errno = EINVAL;
+ return -rte_errno;
+ }
+
+ queues_n = hash_fields ? queues_n : 1;
+ if (mlx5_ind_table_obj_match_queues(hrxq->ind_table,
+ queues, queues_n)) {
+ ind_tbl = hrxq->ind_table;
+ } else {
+ ind_tbl = mlx5_ind_table_obj_get(dev, queues, queues_n);
+ if (!ind_tbl)
+ ind_tbl = mlx5_ind_table_obj_new(dev, queues, queues_n,
+ rxq_obj_type);
+ }
+ if (!ind_tbl) {
+ rte_errno = ENOMEM;
+ return -rte_errno;
+ }
+
+ /*
+ * untested for modification fields:
+ * - rx_hash_symmetric not set in hrxq_new(),
+ * - rx_hash_fn set hard-coded in hrxq_new(),
+ * - lro_xxx not set after rxq setup
+ */
+ modify_tir.modify_bitmask |=
+ (MLX5_MODIFY_TIR_IN_MODIFY_BITMASK_INDIRECT_TABLE &
+ !!(ind_tbl != hrxq->ind_table));
+ modify_tir.modify_bitmask |=
+ (MLX5_MODIFY_TIR_IN_MODIFY_BITMASK_HASH &
+ !!(hash_fields != hrxq->hash_fields ||
+ hrxq->rss_key_len != rss_key_len ||
+ memcmp(hrxq->rss_key, rss_key, rss_key_len)));
+
+ mlx5_devx_tir_attr_set(dev, rss_key, rss_key_len, hash_fields,
+ queues, queues_n,
+ 0, /* N/A - tunnel modification unsupported */
+ rxq_obj_type, ind_tbl->rqt->id,
+ &modify_tir.tir);
+ if (mlx5_devx_cmd_modify_tir(hrxq->tir, &modify_tir)) {
+ DRV_LOG(ERR, "port %u cannot modify DevX TIR",
+ dev->data->port_id);
+ rte_errno = errno;
+ goto error;
+ }
+ if (ind_tbl != hrxq->ind_table) {
+ mlx5_ind_table_obj_release(dev, hrxq->ind_table);
+ hrxq->ind_table = ind_tbl;
+ }
+ hrxq->hash_fields = hash_fields;
+ memcpy(hrxq->rss_key, rss_key, rss_key_len);
+ return 0;
+error:
+ err = rte_errno;
+ if (ind_tbl != hrxq->ind_table)
+ mlx5_ind_table_obj_release(dev, ind_tbl);
+ rte_errno = err;
+ return -rte_errno;
+}
+
/**
* Release the hash Rx queue.
*
@@ -424,6 +424,10 @@ struct mlx5_hrxq *mlx5_hrxq_drop_new(struct rte_eth_dev *dev);
void mlx5_hrxq_drop_release(struct rte_eth_dev *dev);
uint64_t mlx5_get_rx_port_offloads(void);
uint64_t mlx5_get_rx_queue_offloads(struct rte_eth_dev *dev);
+int mlx5_hrxq_modify(struct rte_eth_dev *dev, uint32_t hxrq_idx,
+ const uint8_t *rss_key, uint32_t rss_key_len,
+ uint64_t hash_fields,
+ const uint16_t *queues, uint32_t queues_n);
/* mlx5_txq.c */