[dpdk-dev] [PATCH v1 12/21] net/mlx5: remove queue drop support

Nelio Laranjeiro nelio.laranjeiro at 6wind.com
Wed Aug 2 16:10:28 CEST 2017


In prevision of the huge rework on Rx hash queues and the fact this
feature will be totally different, the drop flow is removed from now and
will be re-introduced later.

Signed-off-by: Nelio Laranjeiro <nelio.laranjeiro at 6wind.com>
---
 drivers/net/mlx5/mlx5.h      |   1 -
 drivers/net/mlx5/mlx5_flow.c | 228 +++----------------------------------------
 2 files changed, 15 insertions(+), 214 deletions(-)

diff --git a/drivers/net/mlx5/mlx5.h b/drivers/net/mlx5/mlx5.h
index 448995e..a0266d4 100644
--- a/drivers/net/mlx5/mlx5.h
+++ b/drivers/net/mlx5/mlx5.h
@@ -143,7 +143,6 @@ struct priv {
 	struct rte_intr_handle intr_handle; /* Interrupt handler. */
 	unsigned int (*reta_idx)[]; /* RETA index table. */
 	unsigned int reta_idx_n; /* RETA index size. */
-	struct rte_flow_drop *flow_drop_queue; /* Flow drop queue. */
 	TAILQ_HEAD(mlx5_flows, rte_flow) flows; /* RTE Flow rules. */
 	LIST_HEAD(mr, mlx5_mr) mr; /* Memory region. */
 	LIST_HEAD(rxq, mlx5_rxq_ctrl) rxqsctrl; /* DPDK Rx queues. */
diff --git a/drivers/net/mlx5/mlx5_flow.c b/drivers/net/mlx5/mlx5_flow.c
index 9ed8d05..151854a 100644
--- a/drivers/net/mlx5/mlx5_flow.c
+++ b/drivers/net/mlx5/mlx5_flow.c
@@ -96,7 +96,6 @@ struct rte_flow {
 	struct ibv_exp_wq *wq; /**< Verbs work queue. */
 	struct ibv_cq *cq; /**< Verbs completion queue. */
 	uint32_t mark:1; /**< Set if the flow is marked. */
-	uint32_t drop:1; /**< Drop queue. */
 	uint64_t hash_fields; /**< Fields that participate in the hash. */
 	uint16_t queues[RTE_MAX_QUEUES_PER_PORT]; /**< List of queues. */
 	uint16_t queues_n; /**< Number of queues in the list. */
@@ -274,7 +273,6 @@ static const struct mlx5_flow_items mlx5_flow_items[] = {
 /* Structure to parse actions. */
 struct mlx5_flow_action {
 	uint32_t queue:1; /**< Target is a receive queue. */
-	uint32_t drop:1; /**< Target is a drop queue. */
 	uint32_t mark:1; /**< Mark is present in the flow. */
 	uint32_t mark_id; /**< Mark identifier. */
 	uint16_t queues[RTE_MAX_QUEUES_PER_PORT]; /**< Queues indexes to use. */
@@ -290,14 +288,6 @@ struct mlx5_flow_parse {
 	struct mlx5_flow_action actions; /**< Parsed action result. */
 };
 
-/** Structure for Drop queue. */
-struct rte_flow_drop {
-	struct ibv_exp_rwq_ind_table *ind_table; /**< Indirection table. */
-	struct ibv_qp *qp; /**< Verbs queue pair. */
-	struct ibv_exp_wq *wq; /**< Verbs work queue. */
-	struct ibv_cq *cq; /**< Verbs completion queue. */
-};
-
 static const struct rte_flow_ops mlx5_flow_ops = {
 	.validate = mlx5_flow_validate,
 	.create = mlx5_flow_create,
@@ -512,7 +502,11 @@ priv_flow_validate(struct priv *priv,
 		if (actions->type == RTE_FLOW_ACTION_TYPE_VOID) {
 			continue;
 		} else if (actions->type == RTE_FLOW_ACTION_TYPE_DROP) {
-			flow->actions.drop = 1;
+			rte_flow_error_set(error, ENOTSUP,
+				   RTE_FLOW_ERROR_TYPE_ACTION,
+				   actions,
+				   "Drop queue not supported");
+			return -rte_errno;
 		} else if (actions->type == RTE_FLOW_ACTION_TYPE_QUEUE) {
 			const struct rte_flow_action_queue *queue =
 				(const struct rte_flow_action_queue *)
@@ -614,11 +608,9 @@ priv_flow_validate(struct priv *priv,
 			goto exit_action_not_supported;
 		}
 	}
-	if (flow->actions.mark && !flow->ibv_attr && !flow->actions.drop)
+	if (flow->actions.mark && !flow->ibv_attr)
 		flow->offset += sizeof(struct ibv_exp_flow_spec_action_tag);
-	if (!flow->ibv_attr && flow->actions.drop)
-		flow->offset += sizeof(struct ibv_exp_flow_spec_action_drop);
-	if (!flow->actions.queue && !flow->actions.drop) {
+	if (!flow->actions.queue) {
 		rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_HANDLE,
 				   NULL, "no valid action");
 		return -rte_errno;
@@ -1015,62 +1007,6 @@ mlx5_flow_create_flag_mark(struct mlx5_flow_parse *flow, uint32_t mark_id)
 }
 
 /**
- * Complete flow rule creation with a drop queue.
- *
- * @param priv
- *   Pointer to private structure.
- * @param flow
- *   MLX5 flow attributes (filled by mlx5_flow_validate()).
- * @param[out] error
- *   Perform verbose error reporting if not NULL.
- *
- * @return
- *   A flow if the rule could be created.
- */
-static struct rte_flow *
-priv_flow_create_action_queue_drop(struct priv *priv,
-				   struct mlx5_flow_parse *flow,
-				   struct rte_flow_error *error)
-{
-	struct rte_flow *rte_flow;
-	struct ibv_exp_flow_spec_action_drop *drop;
-	unsigned int size = sizeof(struct ibv_exp_flow_spec_action_drop);
-
-	assert(priv->pd);
-	assert(priv->ctx);
-	rte_flow = rte_calloc(__func__, 1, sizeof(*rte_flow), 0);
-	if (!rte_flow) {
-		rte_flow_error_set(error, ENOMEM, RTE_FLOW_ERROR_TYPE_HANDLE,
-				   NULL, "cannot allocate flow memory");
-		return NULL;
-	}
-	rte_flow->drop = 1;
-	drop = (void *)((uintptr_t)flow->ibv_attr + flow->offset);
-	*drop = (struct ibv_exp_flow_spec_action_drop){
-			.type = IBV_EXP_FLOW_SPEC_ACTION_DROP,
-			.size = size,
-	};
-	++flow->ibv_attr->num_of_specs;
-	flow->offset += sizeof(struct ibv_exp_flow_spec_action_drop);
-	rte_flow->ibv_attr = flow->ibv_attr;
-	if (!priv->dev->data->dev_started)
-		return rte_flow;
-	rte_flow->qp = priv->flow_drop_queue->qp;
-	rte_flow->ibv_flow = ibv_exp_create_flow(rte_flow->qp,
-						 rte_flow->ibv_attr);
-	if (!rte_flow->ibv_flow) {
-		rte_flow_error_set(error, ENOMEM, RTE_FLOW_ERROR_TYPE_HANDLE,
-				   NULL, "flow rule creation failure");
-		goto error;
-	}
-	return rte_flow;
-error:
-	assert(rte_flow);
-	rte_free(rte_flow);
-	return NULL;
-}
-
-/**
  * Complete flow rule creation.
  *
  * @param priv
@@ -1237,15 +1173,11 @@ priv_flow_create(struct priv *priv,
 	flow.hash_fields = 0;
 	claim_zero(priv_flow_validate(priv, attr, items, actions,
 				      error, &flow));
-	if (flow.actions.mark && !flow.actions.drop) {
+	if (flow.actions.mark) {
 		mlx5_flow_create_flag_mark(&flow, flow.actions.mark_id);
 		flow.offset += sizeof(struct ibv_exp_flow_spec_action_tag);
 	}
-	if (flow.actions.drop)
-		rte_flow =
-			priv_flow_create_action_queue_drop(priv, &flow, error);
-	else
-		rte_flow = priv_flow_create_action_queue(priv, &flow, error);
+	rte_flow = priv_flow_create_action_queue(priv, &flow, error);
 	if (!rte_flow)
 		goto exit;
 	return rte_flow;
@@ -1297,8 +1229,6 @@ priv_flow_destroy(struct priv *priv,
 	TAILQ_REMOVE(&priv->flows, flow, next);
 	if (flow->ibv_flow)
 		claim_zero(ibv_exp_destroy_flow(flow->ibv_flow));
-	if (flow->drop)
-		goto free;
 	if (flow->qp)
 		claim_zero(ibv_destroy_qp(flow->qp));
 	if (flow->ind_table)
@@ -1319,8 +1249,6 @@ priv_flow_destroy(struct priv *priv,
 			TAILQ_FOREACH(tmp, &priv->flows, next) {
 				unsigned int j;
 
-				if (tmp->drop)
-					continue;
 				if (!tmp->mark)
 					continue;
 				for (j = 0; (j != tmp->queues_n) && !mark; j++)
@@ -1331,7 +1259,6 @@ priv_flow_destroy(struct priv *priv,
 		}
 		mlx5_priv_rxq_ibv_release(priv, rxq_ctrl->ibv);
 	}
-free:
 	rte_free(flow->ibv_attr);
 	DEBUG("Flow destroyed %p", (void *)flow);
 	rte_free(flow);
@@ -1394,122 +1321,6 @@ mlx5_flow_flush(struct rte_eth_dev *dev,
 }
 
 /**
- * Create drop queue.
- *
- * @param priv
- *   Pointer to private structure.
- *
- * @return
- *   0 on success.
- */
-static int
-priv_flow_create_drop_queue(struct priv *priv)
-{
-	struct rte_flow_drop *fdq = NULL;
-
-	assert(priv->pd);
-	assert(priv->ctx);
-	fdq = rte_calloc(__func__, 1, sizeof(*fdq), 0);
-	if (!fdq) {
-		WARN("cannot allocate memory for drop queue");
-		goto error;
-	}
-	fdq->cq = ibv_exp_create_cq(priv->ctx, 1, NULL, NULL, 0,
-			&(struct ibv_exp_cq_init_attr){
-			.comp_mask = 0,
-			});
-	if (!fdq->cq) {
-		WARN("cannot allocate CQ for drop queue");
-		goto error;
-	}
-	fdq->wq = ibv_exp_create_wq(priv->ctx,
-			&(struct ibv_exp_wq_init_attr){
-			.wq_type = IBV_EXP_WQT_RQ,
-			.max_recv_wr = 1,
-			.max_recv_sge = 1,
-			.pd = priv->pd,
-			.cq = fdq->cq,
-			});
-	if (!fdq->wq) {
-		WARN("cannot allocate WQ for drop queue");
-		goto error;
-	}
-	fdq->ind_table = ibv_exp_create_rwq_ind_table(priv->ctx,
-			&(struct ibv_exp_rwq_ind_table_init_attr){
-			.pd = priv->pd,
-			.log_ind_tbl_size = 0,
-			.ind_tbl = &fdq->wq,
-			.comp_mask = 0,
-			});
-	if (!fdq->ind_table) {
-		WARN("cannot allocate indirection table for drop queue");
-		goto error;
-	}
-	fdq->qp = ibv_exp_create_qp(priv->ctx,
-		&(struct ibv_exp_qp_init_attr){
-			.qp_type = IBV_QPT_RAW_PACKET,
-			.comp_mask =
-				IBV_EXP_QP_INIT_ATTR_PD |
-				IBV_EXP_QP_INIT_ATTR_PORT |
-				IBV_EXP_QP_INIT_ATTR_RX_HASH,
-			.pd = priv->pd,
-			.rx_hash_conf = &(struct ibv_exp_rx_hash_conf){
-				.rx_hash_function =
-					IBV_EXP_RX_HASH_FUNC_TOEPLITZ,
-				.rx_hash_key_len = rss_hash_default_key_len,
-				.rx_hash_key = rss_hash_default_key,
-				.rx_hash_fields_mask = 0,
-				.rwq_ind_tbl = fdq->ind_table,
-				},
-			.port_num = priv->port,
-			});
-	if (!fdq->qp) {
-		WARN("cannot allocate QP for drop queue");
-		goto error;
-	}
-	priv->flow_drop_queue = fdq;
-	return 0;
-error:
-	if (fdq->qp)
-		claim_zero(ibv_destroy_qp(fdq->qp));
-	if (fdq->ind_table)
-		claim_zero(ibv_exp_destroy_rwq_ind_table(fdq->ind_table));
-	if (fdq->wq)
-		claim_zero(ibv_exp_destroy_wq(fdq->wq));
-	if (fdq->cq)
-		claim_zero(ibv_destroy_cq(fdq->cq));
-	if (fdq)
-		rte_free(fdq);
-	priv->flow_drop_queue = NULL;
-	return -1;
-}
-
-/**
- * Delete drop queue.
- *
- * @param priv
- *   Pointer to private structure.
- */
-static void
-priv_flow_delete_drop_queue(struct priv *priv)
-{
-	struct rte_flow_drop *fdq = priv->flow_drop_queue;
-
-	if (!fdq)
-		return;
-	if (fdq->qp)
-		claim_zero(ibv_destroy_qp(fdq->qp));
-	if (fdq->ind_table)
-		claim_zero(ibv_exp_destroy_rwq_ind_table(fdq->ind_table));
-	if (fdq->wq)
-		claim_zero(ibv_exp_destroy_wq(fdq->wq));
-	if (fdq->cq)
-		claim_zero(ibv_destroy_cq(fdq->cq));
-	rte_free(fdq);
-	priv->flow_drop_queue = NULL;
-}
-
-/**
  * Remove all flows.
  *
  * Called by dev_stop() to remove all flows.
@@ -1523,17 +1334,15 @@ priv_flow_stop(struct priv *priv)
 	struct rte_flow *flow;
 
 	TAILQ_FOREACH_REVERSE(flow, &priv->flows, mlx5_flows, next) {
+		unsigned int i;
+
 		claim_zero(ibv_exp_destroy_flow(flow->ibv_flow));
 		flow->ibv_flow = NULL;
-		if (flow->mark) {
-			unsigned int n;
-
-			for (n = 0; n < flow->queues_n; ++n)
-				(*priv->rxqs)[flow->queues[n]]->mark = 0;
-		}
+		/* Disable mark on all queues. */
+		for (i = 0; i != priv->rxqs_n; ++i)
+			(*priv->rxqs)[i]->mark = 0;
 		DEBUG("Flow %p removed", (void *)flow);
 	}
-	priv_flow_delete_drop_queue(priv);
 }
 
 /**
@@ -1548,19 +1357,12 @@ priv_flow_stop(struct priv *priv)
 int
 priv_flow_start(struct priv *priv)
 {
-	int ret;
 	struct rte_flow *flow;
 
-	ret = priv_flow_create_drop_queue(priv);
-	if (ret)
-		return -1;
 	TAILQ_FOREACH(flow, &priv->flows, next) {
 		struct ibv_qp *qp;
 
-		if (flow->drop)
-			qp = priv->flow_drop_queue->qp;
-		else
-			qp = flow->qp;
+		qp = flow->qp;
 		flow->ibv_flow = ibv_exp_create_flow(qp, flow->ibv_attr);
 		if (!flow->ibv_flow) {
 			DEBUG("Flow %p cannot be applied", (void *)flow);
-- 
2.1.4



More information about the dev mailing list