[dpdk-dev] [PATCH v2 20/20] net/ixgbe: support committing TM hierarchy

Wenzhuo Lu wenzhuo.lu at intel.com
Mon Jun 19 07:43:56 CEST 2017


Add the support of the Traffic Management API,
rte_tm_hierarchy_commit.
When calling this API, the driver tries to enable
the TM configuration on HW.

Signed-off-by: Wenzhuo Lu <wenzhuo.lu at intel.com>
---
 drivers/net/ixgbe/ixgbe_ethdev.c |  8 ++---
 drivers/net/ixgbe/ixgbe_ethdev.h |  2 ++
 drivers/net/ixgbe/ixgbe_tm.c     | 65 ++++++++++++++++++++++++++++++++++++++++
 3 files changed, 70 insertions(+), 5 deletions(-)

diff --git a/drivers/net/ixgbe/ixgbe_ethdev.c b/drivers/net/ixgbe/ixgbe_ethdev.c
index 26eaece..6c11558 100644
--- a/drivers/net/ixgbe/ixgbe_ethdev.c
+++ b/drivers/net/ixgbe/ixgbe_ethdev.c
@@ -302,9 +302,6 @@ static void ixgbe_set_ivar_map(struct ixgbe_hw *hw, int8_t direction,
 			       uint8_t queue, uint8_t msix_vector);
 static void ixgbe_configure_msix(struct rte_eth_dev *dev);
 
-static int ixgbe_set_queue_rate_limit(struct rte_eth_dev *dev,
-		uint16_t queue_idx, uint16_t tx_rate);
-
 static int ixgbevf_add_mac_addr(struct rte_eth_dev *dev,
 				struct ether_addr *mac_addr,
 				uint32_t index, uint32_t pool);
@@ -5605,8 +5602,9 @@ static void ixgbevf_set_vfta_all(struct rte_eth_dev *dev, bool on)
 	IXGBE_WRITE_REG(hw, IXGBE_EIAC, mask);
 }
 
-static int ixgbe_set_queue_rate_limit(struct rte_eth_dev *dev,
-	uint16_t queue_idx, uint16_t tx_rate)
+int
+ixgbe_set_queue_rate_limit(struct rte_eth_dev *dev,
+			   uint16_t queue_idx, uint16_t tx_rate)
 {
 	struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
 	uint32_t rf_dec, rf_int;
diff --git a/drivers/net/ixgbe/ixgbe_ethdev.h b/drivers/net/ixgbe/ixgbe_ethdev.h
index 118c271..e226542 100644
--- a/drivers/net/ixgbe/ixgbe_ethdev.h
+++ b/drivers/net/ixgbe/ixgbe_ethdev.h
@@ -730,6 +730,8 @@ int ixgbe_set_vf_rate_limit(struct rte_eth_dev *dev, uint16_t vf,
 int ixgbe_tm_ops_get(struct rte_eth_dev *dev, void *ops);
 void ixgbe_tm_conf_init(struct rte_eth_dev *dev);
 void ixgbe_tm_conf_uninit(struct rte_eth_dev *dev);
+int ixgbe_set_queue_rate_limit(struct rte_eth_dev *dev, uint16_t queue_idx,
+			       uint16_t tx_rate);
 
 static inline int
 ixgbe_ethertype_filter_lookup(struct ixgbe_filter_info *filter_info,
diff --git a/drivers/net/ixgbe/ixgbe_tm.c b/drivers/net/ixgbe/ixgbe_tm.c
index 422b8c5..f33f1d6 100644
--- a/drivers/net/ixgbe/ixgbe_tm.c
+++ b/drivers/net/ixgbe/ixgbe_tm.c
@@ -62,6 +62,9 @@ static int ixgbe_node_capabilities_get(struct rte_eth_dev *dev,
 				       uint32_t node_id,
 				       struct rte_tm_node_capabilities *cap,
 				       struct rte_tm_error *error);
+static int ixgbe_hierarchy_commit(struct rte_eth_dev *dev,
+				  int clear_on_fail,
+				  struct rte_tm_error *error);
 
 const struct rte_tm_ops ixgbe_tm_ops = {
 	.capabilities_get = ixgbe_tm_capabilities_get,
@@ -72,6 +75,7 @@ static int ixgbe_node_capabilities_get(struct rte_eth_dev *dev,
 	.node_type_get = ixgbe_node_type_get,
 	.level_capabilities_get = ixgbe_level_capabilities_get,
 	.node_capabilities_get = ixgbe_node_capabilities_get,
+	.hierarchy_commit = ixgbe_hierarchy_commit,
 };
 
 int
@@ -903,3 +907,64 @@ static int ixgbe_node_capabilities_get(struct rte_eth_dev *dev,
 
 	return 0;
 }
+
+static int
+ixgbe_hierarchy_commit(struct rte_eth_dev *dev,
+		       int clear_on_fail,
+		       struct rte_tm_error *error)
+{
+	struct ixgbe_tm_conf *tm_conf =
+		IXGBE_DEV_PRIVATE_TO_TM_CONF(dev->data->dev_private);
+	struct ixgbe_tm_node *tm_node;
+	uint64_t bw;
+	int ret;
+
+	if (!error)
+		return -EINVAL;
+
+	/* check the setting */
+	if (!tm_conf->root)
+		return 0;
+
+	/* not support port max bandwidth yet */
+	if (tm_conf->root->shaper_profile->profile.peak.rate) {
+		error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE;
+		error->message = "no port max bandwidth";
+		goto fail_clear;
+	}
+
+	/* HW not support TC max bandwidth */
+	TAILQ_FOREACH(tm_node, &tm_conf->tc_list, node) {
+		if (tm_node->shaper_profile->profile.peak.rate) {
+			error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE;
+			error->message = "no TC max bandwidth";
+			goto fail_clear;
+		}
+	}
+
+	/* queue max bandwidth */
+	TAILQ_FOREACH(tm_node, &tm_conf->queue_list, node) {
+		bw = tm_node->shaper_profile->profile.peak.rate;
+		if (bw) {
+			/* interpret Bps to Mbps */
+			bw = bw * 8 / 1000 / 1000;
+			ret = ixgbe_set_queue_rate_limit(dev, tm_node->no, bw);
+			if (ret) {
+				error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE;
+				error->message =
+					"failed to set queue max bandwidth";
+				goto fail_clear;
+			}
+		}
+	}
+
+	return 0;
+
+fail_clear:
+	/* clear all the traffic manager configuration */
+	if (clear_on_fail) {
+		ixgbe_tm_conf_uninit(dev);
+		ixgbe_tm_conf_init(dev);
+	}
+	return -EINVAL;
+}
-- 
1.9.3



More information about the dev mailing list