[PATCH 19.11] net/mlx5: fix mark enabling for Rx
Christian Ehrhardt
christian.ehrhardt at canonical.com
Tue Apr 12 11:54:54 CEST 2022
On Tue, Apr 12, 2022 at 11:27 AM Raja Zidane <rzidane at nvidia.com> wrote:
>
> To optimize datapath, the mlx5 pmd checked for mark action on flow
> creation, and flagged possible destination rxqs (through queue/RSS
> actions), then it enabled the mark action logic only for flagged rxqs.
>
> Mark action didn't work if no queue/rss action was in the same flow,
> even when the user use multi-group logic to manage the flows.
> So, if mark action is performed in group X and the packet is moved to
> group Y > X when the packet is forwarded to Rx queues, SW did not get
> the mark ID to the mbuf.
>
> Flag Rx datapath to report mark action for any queue when the driver
> detects the first mark action after dev_start operation.
Thanks enqueued for 19.11.13
> Signed-off-by: Raja Zidane <rzidane at nvidia.com>
> Acked-by: Matan Azrad <matan at nvidia.com>
> ---
> drivers/net/mlx5/mlx5.h | 1 +
> drivers/net/mlx5/mlx5_flow.c | 37 ++++++++++++++++++++++--------------
> drivers/net/mlx5/mlx5_rxtx.h | 1 -
> 3 files changed, 24 insertions(+), 15 deletions(-)
>
> diff --git a/drivers/net/mlx5/mlx5.h b/drivers/net/mlx5/mlx5.h
> index 9f6b355182..e59662a99d 100644
> --- a/drivers/net/mlx5/mlx5.h
> +++ b/drivers/net/mlx5/mlx5.h
> @@ -739,6 +739,7 @@ struct mlx5_priv {
> unsigned int counter_fallback:1; /* Use counter fallback management. */
> unsigned int mtr_en:1; /* Whether support meter. */
> unsigned int mtr_reg_share:1; /* Whether support meter REG_C share. */
> + uint32_t mark_enabled:1; /* If mark action is enabled on rxqs. */
> unsigned int root_verbs_drop_action; /* Root uses verbs drop action. */
> uint16_t domain_id; /* Switch domain identifier. */
> uint16_t vport_id; /* Associated VF vport index (if any). */
> diff --git a/drivers/net/mlx5/mlx5_flow.c b/drivers/net/mlx5/mlx5_flow.c
> index 0d73eebcfc..1a6f90912a 100644
> --- a/drivers/net/mlx5/mlx5_flow.c
> +++ b/drivers/net/mlx5/mlx5_flow.c
> @@ -728,12 +728,11 @@ flow_rxq_tunnel_ptype_update(struct mlx5_rxq_ctrl *rxq_ctrl)
> * Pointer to device flow structure.
> */
> static void
> -flow_drv_rxq_flags_set(struct rte_eth_dev *dev, struct mlx5_flow *dev_flow)
> +flow_drv_rxq_flags_set(struct rte_eth_dev *dev, struct mlx5_flow *dev_flow,
> + int mark)
> {
> struct mlx5_priv *priv = dev->data->dev_private;
> struct rte_flow *flow = dev_flow->flow;
> - const int mark = !!(dev_flow->actions &
> - (MLX5_FLOW_ACTION_FLAG | MLX5_FLOW_ACTION_MARK));
> const int tunnel = !!(dev_flow->layers & MLX5_FLOW_LAYER_TUNNEL);
> unsigned int i;
>
> @@ -752,10 +751,8 @@ flow_drv_rxq_flags_set(struct rte_eth_dev *dev, struct mlx5_flow *dev_flow)
> priv->config.dv_xmeta_en != MLX5_XMETA_MODE_LEGACY &&
> mlx5_flow_ext_mreg_supported(dev)) {
> rxq_ctrl->rxq.mark = 1;
> - rxq_ctrl->flow_mark_n = 1;
> } else if (mark) {
> rxq_ctrl->rxq.mark = 1;
> - rxq_ctrl->flow_mark_n++;
> }
> if (tunnel) {
> unsigned int j;
> @@ -774,6 +771,20 @@ flow_drv_rxq_flags_set(struct rte_eth_dev *dev, struct mlx5_flow *dev_flow)
> }
> }
>
> +static void
> +flow_rxq_mark_flag_set(struct rte_eth_dev *dev)
> +{
> + struct mlx5_priv *priv = dev->data->dev_private;
> + struct mlx5_rxq_ctrl *rxq_ctrl;
> +
> + if (priv->mark_enabled)
> + return;
> + LIST_FOREACH(rxq_ctrl, &priv->rxqsctrl, next) {
> + rxq_ctrl->rxq.mark = 1;
> + }
> + priv->mark_enabled = 1;
> +}
> +
> /**
> * Set the Rx queue flags (Mark/Flag and Tunnel Ptypes) for a flow
> *
> @@ -786,9 +797,14 @@ static void
> flow_rxq_flags_set(struct rte_eth_dev *dev, struct rte_flow *flow)
> {
> struct mlx5_flow *dev_flow;
> -
> + int mark = 0;
> LIST_FOREACH(dev_flow, &flow->dev_flows, next)
> - flow_drv_rxq_flags_set(dev, dev_flow);
> + mark = mark | (!!(dev_flow->actions & (MLX5_FLOW_ACTION_FLAG |
> + MLX5_FLOW_ACTION_MARK)));
> + if (mark)
> + flow_rxq_mark_flag_set(dev);
> + LIST_FOREACH(dev_flow, &flow->dev_flows, next)
> + flow_drv_rxq_flags_set(dev, dev_flow, mark);
> }
>
> /**
> @@ -805,8 +821,6 @@ flow_drv_rxq_flags_trim(struct rte_eth_dev *dev, struct mlx5_flow *dev_flow)
> {
> struct mlx5_priv *priv = dev->data->dev_private;
> struct rte_flow *flow = dev_flow->flow;
> - const int mark = !!(dev_flow->actions &
> - (MLX5_FLOW_ACTION_FLAG | MLX5_FLOW_ACTION_MARK));
> const int tunnel = !!(dev_flow->layers & MLX5_FLOW_LAYER_TUNNEL);
> unsigned int i;
>
> @@ -821,10 +835,6 @@ flow_drv_rxq_flags_trim(struct rte_eth_dev *dev, struct mlx5_flow *dev_flow)
> priv->config.dv_xmeta_en != MLX5_XMETA_MODE_LEGACY &&
> mlx5_flow_ext_mreg_supported(dev)) {
> rxq_ctrl->rxq.mark = 1;
> - rxq_ctrl->flow_mark_n = 1;
> - } else if (mark) {
> - rxq_ctrl->flow_mark_n--;
> - rxq_ctrl->rxq.mark = !!rxq_ctrl->flow_mark_n;
> }
> if (tunnel) {
> unsigned int j;
> @@ -881,7 +891,6 @@ flow_rxq_flags_clear(struct rte_eth_dev *dev)
> continue;
> rxq_ctrl = container_of((*priv->rxqs)[i],
> struct mlx5_rxq_ctrl, rxq);
> - rxq_ctrl->flow_mark_n = 0;
> rxq_ctrl->rxq.mark = 0;
> for (j = 0; j != MLX5_FLOW_TUNNEL; ++j)
> rxq_ctrl->flow_tunnels_n[j] = 0;
> diff --git a/drivers/net/mlx5/mlx5_rxtx.h b/drivers/net/mlx5/mlx5_rxtx.h
> index 34ec66a3ae..68a935ca66 100644
> --- a/drivers/net/mlx5/mlx5_rxtx.h
> +++ b/drivers/net/mlx5/mlx5_rxtx.h
> @@ -198,7 +198,6 @@ struct mlx5_rxq_ctrl {
> unsigned int socket; /* CPU socket ID for allocations. */
> unsigned int irq:1; /* Whether IRQ is enabled. */
> unsigned int dbr_umem_id_valid:1; /* dbr_umem_id holds a valid value. */
> - uint32_t flow_mark_n; /* Number of Mark/Flag flows using this Queue. */
> uint32_t flow_tunnels_n[MLX5_FLOW_TUNNEL]; /* Tunnels counters. */
> uint32_t wqn; /* WQ number. */
> uint16_t dump_file_n; /* Number of dump files. */
> --
> 2.21.0
>
--
Christian Ehrhardt
Staff Engineer, Ubuntu Server
Canonical Ltd
More information about the stable
mailing list