[dpdk-dev,v3,08/14] net/mlx5: add hardware flow debug dump
Checks
Commit Message
Dump verb flow detail including flow spec type and size for debugging
purpose.
Signed-off-by: Xueming Li <xuemingl@mellanox.com>
---
drivers/net/mlx5/mlx5_flow.c | 68 ++++++++++++++++++++++++++++++++++++-------
drivers/net/mlx5/mlx5_rxq.c | 25 +++++++++++++---
drivers/net/mlx5/mlx5_utils.h | 6 ++++
3 files changed, 85 insertions(+), 14 deletions(-)
Comments
On Fri, Apr 13, 2018 at 07:20:17PM +0800, Xueming Li wrote:
> Dump verb flow detail including flow spec type and size for debugging
> purpose.
>
> Signed-off-by: Xueming Li <xuemingl@mellanox.com>
> ---
> drivers/net/mlx5/mlx5_flow.c | 68 ++++++++++++++++++++++++++++++++++++-------
> drivers/net/mlx5/mlx5_rxq.c | 25 +++++++++++++---
> drivers/net/mlx5/mlx5_utils.h | 6 ++++
> 3 files changed, 85 insertions(+), 14 deletions(-)
>
> diff --git a/drivers/net/mlx5/mlx5_flow.c b/drivers/net/mlx5/mlx5_flow.c
> index a22554706..c99722770 100644
> --- a/drivers/net/mlx5/mlx5_flow.c
> +++ b/drivers/net/mlx5/mlx5_flow.c
> @@ -2049,6 +2049,57 @@ mlx5_flow_create_update_rxqs(struct rte_eth_dev *dev, struct rte_flow *flow)
> }
>
> /**
> + * Dump flow hash RX queue detail.
> + *
> + * @param dev
> + * Pointer to Ethernet device.
> + * @param flow
> + * Pointer to the rte_flow.
> + * @param i
> + * Hash RX queue index.
> + */
> +static void
> +mlx5_flow_dump(struct rte_eth_dev *dev __rte_unused,
> + struct rte_flow *flow __rte_unused,
> + unsigned int i __rte_unused)
> +{
> +#ifndef NDEBUG
> + uintptr_t spec_ptr;
> + uint16_t j;
> + char buf[256];
> + uint8_t off;
> +
> + spec_ptr = (uintptr_t)(flow->frxq[i].ibv_attr + 1);
> + for (j = 0, off = 0; j < flow->frxq[i].ibv_attr->num_of_specs;
> + j++) {
> + struct ibv_flow_spec *spec = (void *)spec_ptr;
> + off += sprintf(buf + off, " %x(%hu)", spec->hdr.type,
> + spec->hdr.size);
> + spec_ptr += spec->hdr.size;
> + }
> + DRV_LOG(DEBUG,
> + "port %u Verbs flow %p type %u: hrxq:%p qp:%p ind:%p, hash:%lx/%u"
> + " specs:%hhu(%hu), priority:%hu, type:%d, flags:%x,"
> + " comp_mask:%x specs:%s",
> + dev->data->port_id, (void *)flow, i,
> + (void *)flow->frxq[i].hrxq,
> + (void *)flow->frxq[i].hrxq->qp,
> + (void *)flow->frxq[i].hrxq->ind_table,
> + flow->frxq[i].hash_fields |
> + (flow->tunnel &&
> + flow->rss_conf.level ? (uint32_t)IBV_RX_HASH_INNER : 0),
> + flow->rss_conf.queue_num,
> + flow->frxq[i].ibv_attr->num_of_specs,
> + flow->frxq[i].ibv_attr->size,
> + flow->frxq[i].ibv_attr->priority,
> + flow->frxq[i].ibv_attr->type,
> + flow->frxq[i].ibv_attr->flags,
> + flow->frxq[i].ibv_attr->comp_mask,
> + buf);
> +#endif
> +}
> +
> +/**
> * Complete flow rule creation.
> *
> * @param dev
> @@ -2090,6 +2141,7 @@ mlx5_flow_create_action_queue(struct rte_eth_dev *dev,
> flow->frxq[i].ibv_flow =
> mlx5_glue->create_flow(flow->frxq[i].hrxq->qp,
> flow->frxq[i].ibv_attr);
> + mlx5_flow_dump(dev, flow, i);
> if (!flow->frxq[i].ibv_flow) {
> rte_flow_error_set(error, ENOMEM,
> RTE_FLOW_ERROR_TYPE_HANDLE,
> @@ -2097,11 +2149,6 @@ mlx5_flow_create_action_queue(struct rte_eth_dev *dev,
> goto error;
> }
> ++flows_n;
> - DRV_LOG(DEBUG, "port %u %p type %d QP %p ibv_flow %p",
> - dev->data->port_id,
> - (void *)flow, i,
> - (void *)flow->frxq[i].hrxq->qp,
> - (void *)flow->frxq[i].ibv_flow);
> }
> if (!flows_n) {
> rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_HANDLE,
> @@ -2645,24 +2692,25 @@ mlx5_flow_start(struct rte_eth_dev *dev, struct mlx5_flows *list)
> flow->rss_conf.level);
> if (!flow->frxq[i].hrxq) {
> DRV_LOG(DEBUG,
> - "port %u flow %p cannot be applied",
> + "port %u flow %p cannot create hash"
> + " rxq",
> dev->data->port_id, (void *)flow);
> rte_errno = EINVAL;
> return -rte_errno;
> }
> flow_create:
> + mlx5_flow_dump(dev, flow, i);
> flow->frxq[i].ibv_flow =
> mlx5_glue->create_flow(flow->frxq[i].hrxq->qp,
> flow->frxq[i].ibv_attr);
> if (!flow->frxq[i].ibv_flow) {
> DRV_LOG(DEBUG,
> - "port %u flow %p cannot be applied",
> - dev->data->port_id, (void *)flow);
> + "port %u flow %p type %u cannot be"
> + " applied",
> + dev->data->port_id, (void *)flow, i);
> rte_errno = EINVAL;
> return -rte_errno;
> }
> - DRV_LOG(DEBUG, "port %u flow %p applied",
> - dev->data->port_id, (void *)flow);
> }
> mlx5_flow_create_update_rxqs(dev, flow);
> }
> diff --git a/drivers/net/mlx5/mlx5_rxq.c b/drivers/net/mlx5/mlx5_rxq.c
> index 1997609ec..f55980836 100644
> --- a/drivers/net/mlx5/mlx5_rxq.c
> +++ b/drivers/net/mlx5/mlx5_rxq.c
> @@ -1259,9 +1259,9 @@ mlx5_ind_table_ibv_new(struct rte_eth_dev *dev, const uint16_t *queues,
> }
> rte_atomic32_inc(&ind_tbl->refcnt);
> LIST_INSERT_HEAD(&priv->ind_tbls, ind_tbl, next);
> - DRV_LOG(DEBUG, "port %u indirection table %p: refcnt %d",
> - dev->data->port_id, (void *)ind_tbl,
> - rte_atomic32_read(&ind_tbl->refcnt));
> + DEBUG("port %u new indirection table %p: queues:%u refcnt:%d",
> + dev->data->port_id, (void *)ind_tbl, 1 << wq_n,
> + rte_atomic32_read(&ind_tbl->refcnt));
> return ind_tbl;
> error:
> rte_free(ind_tbl);
> @@ -1330,9 +1330,12 @@ mlx5_ind_table_ibv_release(struct rte_eth_dev *dev,
> DRV_LOG(DEBUG, "port %u indirection table %p: refcnt %d",
> ((struct priv *)dev->data->dev_private)->port,
> (void *)ind_tbl, rte_atomic32_read(&ind_tbl->refcnt));
> - if (rte_atomic32_dec_and_test(&ind_tbl->refcnt))
> + if (rte_atomic32_dec_and_test(&ind_tbl->refcnt)) {
> claim_zero(mlx5_glue->destroy_rwq_ind_table
> (ind_tbl->ind_table));
> + DEBUG("port %u delete indirection table %p: queues: %u",
> + dev->data->port_id, (void *)ind_tbl, ind_tbl->queues_n);
> + }
> for (i = 0; i != ind_tbl->queues_n; ++i)
> claim_nonzero(mlx5_rxq_release(dev, ind_tbl->queues[i]));
> if (!rte_atomic32_read(&ind_tbl->refcnt)) {
> @@ -1445,6 +1448,12 @@ mlx5_hrxq_new(struct rte_eth_dev *dev,
> .pd = priv->pd,
> },
> &qp_init_attr);
> + DEBUG("port %u new QP:%p ind_tbl:%p hash_fields:0x%lx tunnel:0x%x"
> + " level:%hhu dv_attr:comp_mask:0x%lx create_flags:0x%x",
> + dev->data->port_id, (void *)qp, (void *)ind_tbl,
> + (tunnel && rss_level ? (uint32_t)IBV_RX_HASH_INNER : 0) |
> + hash_fields, tunnel, rss_level,
> + qp_init_attr.comp_mask, qp_init_attr.create_flags);
> #else
> qp = mlx5_glue->create_qp_ex
> (priv->ctx,
> @@ -1466,6 +1475,10 @@ mlx5_hrxq_new(struct rte_eth_dev *dev,
> .rwq_ind_tbl = ind_tbl->ind_table,
> .pd = priv->pd,
> });
> + DEBUG("port %u new QP:%p ind_tbl:%p hash_fields:0x%lx tunnel:0x%x"
> + " level:%hhu",
> + dev->data->port_id, (void *)qp, (void *)ind_tbl,
> + hash_fields, tunnel, rss_level);
> #endif
> if (!qp) {
> rte_errno = errno;
> @@ -1577,6 +1590,10 @@ mlx5_hrxq_release(struct rte_eth_dev *dev, struct mlx5_hrxq *hrxq)
> (void *)hrxq, rte_atomic32_read(&hrxq->refcnt));
> if (rte_atomic32_dec_and_test(&hrxq->refcnt)) {
> claim_zero(mlx5_glue->destroy_qp(hrxq->qp));
> + DEBUG("port %u delete QP %p: hash: 0x%lx, tunnel:"
> + " 0x%x, level: %hhu",
> + dev->data->port_id, (void *)hrxq, hrxq->hash_fields,
> + hrxq->tunnel, hrxq->rss_level);
> mlx5_ind_table_ibv_release(dev, hrxq->ind_table);
> LIST_REMOVE(hrxq, next);
> rte_free(hrxq);
> diff --git a/drivers/net/mlx5/mlx5_utils.h b/drivers/net/mlx5/mlx5_utils.h
> index 85d2aae2b..9a3181b1f 100644
> --- a/drivers/net/mlx5/mlx5_utils.h
> +++ b/drivers/net/mlx5/mlx5_utils.h
> @@ -103,16 +103,22 @@ extern int mlx5_logtype;
> /* claim_zero() does not perform any check when debugging is disabled. */
> #ifndef NDEBUG
>
> +#define DEBUG(...) DRV_LOG(DEBUG, __VA_ARGS__)
> #define claim_zero(...) assert((__VA_ARGS__) == 0)
> #define claim_nonzero(...) assert((__VA_ARGS__) != 0)
>
> #else /* NDEBUG */
>
> +#define DEBUG(...) (void)0
> #define claim_zero(...) (__VA_ARGS__)
> #define claim_nonzero(...) (__VA_ARGS__)
>
> #endif /* NDEBUG */
>
> +#define INFO(...) DRV_LOG(INFO, __VA_ARGS__)
> +#define WARN(...) DRV_LOG(WARNING, __VA_ARGS__)
> +#define ERROR(...) DRV_LOG(ERR, __VA_ARGS__)
> +
> /* Convenience macros for accessing mbuf fields. */
> #define NEXT(m) ((m)->next)
> #define DATA_LEN(m) ((m)->data_len)
> --
> 2.13.3
This is a really good first step, even if you could also spread the user
of the DEBUG macro all over the Verbs object creations, they are purely
developer needs.
Thanks,
@@ -2049,6 +2049,57 @@ mlx5_flow_create_update_rxqs(struct rte_eth_dev *dev, struct rte_flow *flow)
}
/**
+ * Dump flow hash RX queue detail.
+ *
+ * @param dev
+ * Pointer to Ethernet device.
+ * @param flow
+ * Pointer to the rte_flow.
+ * @param i
+ * Hash RX queue index.
+ */
+static void
+mlx5_flow_dump(struct rte_eth_dev *dev __rte_unused,
+ struct rte_flow *flow __rte_unused,
+ unsigned int i __rte_unused)
+{
+#ifndef NDEBUG
+ uintptr_t spec_ptr;
+ uint16_t j;
+ char buf[256];
+ uint8_t off;
+
+ spec_ptr = (uintptr_t)(flow->frxq[i].ibv_attr + 1);
+ for (j = 0, off = 0; j < flow->frxq[i].ibv_attr->num_of_specs;
+ j++) {
+ struct ibv_flow_spec *spec = (void *)spec_ptr;
+ off += sprintf(buf + off, " %x(%hu)", spec->hdr.type,
+ spec->hdr.size);
+ spec_ptr += spec->hdr.size;
+ }
+ DRV_LOG(DEBUG,
+ "port %u Verbs flow %p type %u: hrxq:%p qp:%p ind:%p, hash:%lx/%u"
+ " specs:%hhu(%hu), priority:%hu, type:%d, flags:%x,"
+ " comp_mask:%x specs:%s",
+ dev->data->port_id, (void *)flow, i,
+ (void *)flow->frxq[i].hrxq,
+ (void *)flow->frxq[i].hrxq->qp,
+ (void *)flow->frxq[i].hrxq->ind_table,
+ flow->frxq[i].hash_fields |
+ (flow->tunnel &&
+ flow->rss_conf.level ? (uint32_t)IBV_RX_HASH_INNER : 0),
+ flow->rss_conf.queue_num,
+ flow->frxq[i].ibv_attr->num_of_specs,
+ flow->frxq[i].ibv_attr->size,
+ flow->frxq[i].ibv_attr->priority,
+ flow->frxq[i].ibv_attr->type,
+ flow->frxq[i].ibv_attr->flags,
+ flow->frxq[i].ibv_attr->comp_mask,
+ buf);
+#endif
+}
+
+/**
* Complete flow rule creation.
*
* @param dev
@@ -2090,6 +2141,7 @@ mlx5_flow_create_action_queue(struct rte_eth_dev *dev,
flow->frxq[i].ibv_flow =
mlx5_glue->create_flow(flow->frxq[i].hrxq->qp,
flow->frxq[i].ibv_attr);
+ mlx5_flow_dump(dev, flow, i);
if (!flow->frxq[i].ibv_flow) {
rte_flow_error_set(error, ENOMEM,
RTE_FLOW_ERROR_TYPE_HANDLE,
@@ -2097,11 +2149,6 @@ mlx5_flow_create_action_queue(struct rte_eth_dev *dev,
goto error;
}
++flows_n;
- DRV_LOG(DEBUG, "port %u %p type %d QP %p ibv_flow %p",
- dev->data->port_id,
- (void *)flow, i,
- (void *)flow->frxq[i].hrxq->qp,
- (void *)flow->frxq[i].ibv_flow);
}
if (!flows_n) {
rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_HANDLE,
@@ -2645,24 +2692,25 @@ mlx5_flow_start(struct rte_eth_dev *dev, struct mlx5_flows *list)
flow->rss_conf.level);
if (!flow->frxq[i].hrxq) {
DRV_LOG(DEBUG,
- "port %u flow %p cannot be applied",
+ "port %u flow %p cannot create hash"
+ " rxq",
dev->data->port_id, (void *)flow);
rte_errno = EINVAL;
return -rte_errno;
}
flow_create:
+ mlx5_flow_dump(dev, flow, i);
flow->frxq[i].ibv_flow =
mlx5_glue->create_flow(flow->frxq[i].hrxq->qp,
flow->frxq[i].ibv_attr);
if (!flow->frxq[i].ibv_flow) {
DRV_LOG(DEBUG,
- "port %u flow %p cannot be applied",
- dev->data->port_id, (void *)flow);
+ "port %u flow %p type %u cannot be"
+ " applied",
+ dev->data->port_id, (void *)flow, i);
rte_errno = EINVAL;
return -rte_errno;
}
- DRV_LOG(DEBUG, "port %u flow %p applied",
- dev->data->port_id, (void *)flow);
}
mlx5_flow_create_update_rxqs(dev, flow);
}
@@ -1259,9 +1259,9 @@ mlx5_ind_table_ibv_new(struct rte_eth_dev *dev, const uint16_t *queues,
}
rte_atomic32_inc(&ind_tbl->refcnt);
LIST_INSERT_HEAD(&priv->ind_tbls, ind_tbl, next);
- DRV_LOG(DEBUG, "port %u indirection table %p: refcnt %d",
- dev->data->port_id, (void *)ind_tbl,
- rte_atomic32_read(&ind_tbl->refcnt));
+ DEBUG("port %u new indirection table %p: queues:%u refcnt:%d",
+ dev->data->port_id, (void *)ind_tbl, 1 << wq_n,
+ rte_atomic32_read(&ind_tbl->refcnt));
return ind_tbl;
error:
rte_free(ind_tbl);
@@ -1330,9 +1330,12 @@ mlx5_ind_table_ibv_release(struct rte_eth_dev *dev,
DRV_LOG(DEBUG, "port %u indirection table %p: refcnt %d",
((struct priv *)dev->data->dev_private)->port,
(void *)ind_tbl, rte_atomic32_read(&ind_tbl->refcnt));
- if (rte_atomic32_dec_and_test(&ind_tbl->refcnt))
+ if (rte_atomic32_dec_and_test(&ind_tbl->refcnt)) {
claim_zero(mlx5_glue->destroy_rwq_ind_table
(ind_tbl->ind_table));
+ DEBUG("port %u delete indirection table %p: queues: %u",
+ dev->data->port_id, (void *)ind_tbl, ind_tbl->queues_n);
+ }
for (i = 0; i != ind_tbl->queues_n; ++i)
claim_nonzero(mlx5_rxq_release(dev, ind_tbl->queues[i]));
if (!rte_atomic32_read(&ind_tbl->refcnt)) {
@@ -1445,6 +1448,12 @@ mlx5_hrxq_new(struct rte_eth_dev *dev,
.pd = priv->pd,
},
&qp_init_attr);
+ DEBUG("port %u new QP:%p ind_tbl:%p hash_fields:0x%lx tunnel:0x%x"
+ " level:%hhu dv_attr:comp_mask:0x%lx create_flags:0x%x",
+ dev->data->port_id, (void *)qp, (void *)ind_tbl,
+ (tunnel && rss_level ? (uint32_t)IBV_RX_HASH_INNER : 0) |
+ hash_fields, tunnel, rss_level,
+ qp_init_attr.comp_mask, qp_init_attr.create_flags);
#else
qp = mlx5_glue->create_qp_ex
(priv->ctx,
@@ -1466,6 +1475,10 @@ mlx5_hrxq_new(struct rte_eth_dev *dev,
.rwq_ind_tbl = ind_tbl->ind_table,
.pd = priv->pd,
});
+ DEBUG("port %u new QP:%p ind_tbl:%p hash_fields:0x%lx tunnel:0x%x"
+ " level:%hhu",
+ dev->data->port_id, (void *)qp, (void *)ind_tbl,
+ hash_fields, tunnel, rss_level);
#endif
if (!qp) {
rte_errno = errno;
@@ -1577,6 +1590,10 @@ mlx5_hrxq_release(struct rte_eth_dev *dev, struct mlx5_hrxq *hrxq)
(void *)hrxq, rte_atomic32_read(&hrxq->refcnt));
if (rte_atomic32_dec_and_test(&hrxq->refcnt)) {
claim_zero(mlx5_glue->destroy_qp(hrxq->qp));
+ DEBUG("port %u delete QP %p: hash: 0x%lx, tunnel:"
+ " 0x%x, level: %hhu",
+ dev->data->port_id, (void *)hrxq, hrxq->hash_fields,
+ hrxq->tunnel, hrxq->rss_level);
mlx5_ind_table_ibv_release(dev, hrxq->ind_table);
LIST_REMOVE(hrxq, next);
rte_free(hrxq);
@@ -103,16 +103,22 @@ extern int mlx5_logtype;
/* claim_zero() does not perform any check when debugging is disabled. */
#ifndef NDEBUG
+#define DEBUG(...) DRV_LOG(DEBUG, __VA_ARGS__)
#define claim_zero(...) assert((__VA_ARGS__) == 0)
#define claim_nonzero(...) assert((__VA_ARGS__) != 0)
#else /* NDEBUG */
+#define DEBUG(...) (void)0
#define claim_zero(...) (__VA_ARGS__)
#define claim_nonzero(...) (__VA_ARGS__)
#endif /* NDEBUG */
+#define INFO(...) DRV_LOG(INFO, __VA_ARGS__)
+#define WARN(...) DRV_LOG(WARNING, __VA_ARGS__)
+#define ERROR(...) DRV_LOG(ERR, __VA_ARGS__)
+
/* Convenience macros for accessing mbuf fields. */
#define NEXT(m) ((m)->next)
#define DATA_LEN(m) ((m)->data_len)