[PATCH v2 3/3] net/cnxk: skip red drop for ingress policer

Rakesh Kudurumalla rkudurumalla at marvell.com
Wed Jan 25 08:32:31 CET 2023


Dropping of packets is based on action configured
to meter.If both skip_red and drop actions are configured
then tail dropping in invoked else if only drop action is
configured then RED drop is invoked.This action is supported
only when RED is configured using rte_eth_cman_config_set()

Signed-off-by: Rakesh Kudurumalla <rkudurumalla at marvell.com>
---
 drivers/net/cnxk/cnxk_ethdev.h     |  1 +
 drivers/net/cnxk/cnxk_ethdev_mtr.c | 50 ++++++++++++++++++++++++++++++
 2 files changed, 51 insertions(+)

diff --git a/drivers/net/cnxk/cnxk_ethdev.h b/drivers/net/cnxk/cnxk_ethdev.h
index ea8c70b8b7..7c53122b9f 100644
--- a/drivers/net/cnxk/cnxk_ethdev.h
+++ b/drivers/net/cnxk/cnxk_ethdev.h
@@ -238,6 +238,7 @@ struct policy_actions {
 		uint16_t queue;
 		uint32_t mtr_id;
 		struct action_rss *rss_desc;
+		bool skip_red;
 	};
 };
 
diff --git a/drivers/net/cnxk/cnxk_ethdev_mtr.c b/drivers/net/cnxk/cnxk_ethdev_mtr.c
index dcfa4223d5..27a6e4ef3d 100644
--- a/drivers/net/cnxk/cnxk_ethdev_mtr.c
+++ b/drivers/net/cnxk/cnxk_ethdev_mtr.c
@@ -358,6 +358,9 @@ cnxk_nix_mtr_policy_validate(struct rte_eth_dev *dev,
 				if (action->type == RTE_FLOW_ACTION_TYPE_VOID)
 					supported[i] = true;
 
+				if (action->type == RTE_FLOW_ACTION_TYPE_SKIP_CMAN)
+					supported[i] = true;
+
 				if (!supported[i])
 					return update_mtr_err(i, error, true);
 			}
@@ -397,6 +400,10 @@ cnxk_fill_policy_actions(struct cnxk_mtr_policy_node *fmp,
 					fmp->actions[i].action_fate =
 						action->type;
 				}
+
+				if (action->type ==
+					RTE_FLOW_ACTION_TYPE_SKIP_CMAN)
+					fmp->actions[i].skip_red = true;
 			}
 		}
 	}
@@ -1306,6 +1313,45 @@ nix_mtr_config_map(struct cnxk_meter_node *mtr, struct roc_nix_bpf_cfg *cfg)
 		cfg->action[ROC_NIX_BPF_COLOR_RED] = ROC_NIX_BPF_ACTION_DROP;
 }
 
+static void
+nix_mtr_config_red(struct cnxk_meter_node *mtr, struct roc_nix_rq *rq,
+		   struct roc_nix_bpf_cfg *cfg)
+{
+	struct cnxk_mtr_policy_node *policy = mtr->policy;
+
+	if ((rq->red_pass && rq->red_pass >= rq->red_drop) ||
+	   (rq->spb_red_pass && rq->spb_red_pass >= rq->spb_red_drop)	||
+	   (rq->xqe_red_pass && rq->xqe_red_pass >= rq->xqe_red_drop)) {
+		if (policy->actions[RTE_COLOR_GREEN].action_fate ==
+			RTE_FLOW_ACTION_TYPE_DROP) {
+			if (policy->actions[RTE_COLOR_GREEN].skip_red)
+				cfg->action[ROC_NIX_BPF_COLOR_GREEN] =
+						ROC_NIX_BPF_ACTION_DROP;
+			else
+				cfg->action[ROC_NIX_BPF_COLOR_GREEN] =
+						ROC_NIX_BPF_ACTION_RED;
+		}
+		if (policy->actions[RTE_COLOR_YELLOW].action_fate ==
+			RTE_FLOW_ACTION_TYPE_DROP) {
+			if (policy->actions[RTE_COLOR_YELLOW].skip_red)
+				cfg->action[ROC_NIX_BPF_COLOR_YELLOW] =
+						ROC_NIX_BPF_ACTION_DROP;
+			else
+				cfg->action[ROC_NIX_BPF_COLOR_YELLOW] =
+						ROC_NIX_BPF_ACTION_RED;
+		}
+		if (policy->actions[RTE_COLOR_RED].action_fate ==
+			RTE_FLOW_ACTION_TYPE_DROP) {
+			if (policy->actions[RTE_COLOR_RED].skip_red)
+				cfg->action[ROC_NIX_BPF_COLOR_RED] =
+						ROC_NIX_BPF_ACTION_DROP;
+			else
+				cfg->action[ROC_NIX_BPF_COLOR_RED] =
+						ROC_NIX_BPF_ACTION_RED;
+		}
+	}
+}
+
 static void
 nix_precolor_table_map(struct cnxk_meter_node *mtr,
 		       struct roc_nix_bpf_precolor *tbl,
@@ -1483,6 +1529,10 @@ nix_mtr_configure(struct rte_eth_dev *eth_dev, uint32_t id)
 			if (!mtr[i]->is_used) {
 				memset(&cfg, 0, sizeof(struct roc_nix_bpf_cfg));
 				nix_mtr_config_map(mtr[i], &cfg);
+				for (j = 0; j < mtr[i]->rq_num; j++) {
+					rq = &dev->rqs[mtr[i]->rq_id[j]];
+					nix_mtr_config_red(mtr[i], rq, &cfg);
+				}
 				rc = roc_nix_bpf_config(nix, mtr[i]->bpf_id,
 							lvl_map[mtr[i]->level],
 							&cfg);
-- 
2.25.1



More information about the dev mailing list