[dpdk-dev,v2,12/20] net/ixgbe: support getting TM capability

Message ID 1497851036-96016-13-git-send-email-wenzhuo.lu@intel.com (mailing list archive)
State Superseded, archived
Headers

Checks

Context Check Description
ci/checkpatch success coding style OK
ci/Intel-compilation fail Compilation issues

Commit Message

Wenzhuo Lu June 19, 2017, 5:43 a.m. UTC
  Add the support of the Traffic Management API,
rte_tm_capabilities_get.

Signed-off-by: Wenzhuo Lu <wenzhuo.lu@intel.com>
---
 drivers/net/ixgbe/ixgbe_tm.c | 90 +++++++++++++++++++++++++++++++++++++++++++-
 1 file changed, 89 insertions(+), 1 deletion(-)
  

Patch

diff --git a/drivers/net/ixgbe/ixgbe_tm.c b/drivers/net/ixgbe/ixgbe_tm.c
index 0a222a1..77066b7 100644
--- a/drivers/net/ixgbe/ixgbe_tm.c
+++ b/drivers/net/ixgbe/ixgbe_tm.c
@@ -33,8 +33,12 @@ 
 
 #include "ixgbe_ethdev.h"
 
+static int ixgbe_tm_capabilities_get(struct rte_eth_dev *dev,
+				     struct rte_tm_capabilities *cap,
+				     struct rte_tm_error *error);
+
 const struct rte_tm_ops ixgbe_tm_ops = {
-	NULL,
+	.capabilities_get = ixgbe_tm_capabilities_get,
 };
 
 int
@@ -48,3 +52,87 @@ 
 
 	return 0;
 }
+
+static inline uint8_t
+ixgbe_tc_nb_get(struct rte_eth_dev *dev)
+{
+	struct rte_eth_conf *eth_conf;
+	uint8_t nb_tcs = 0;
+
+	eth_conf = &dev->data->dev_conf;
+	if (eth_conf->txmode.mq_mode == ETH_MQ_TX_DCB) {
+		nb_tcs = eth_conf->tx_adv_conf.dcb_tx_conf.nb_tcs;
+	} else if (eth_conf->txmode.mq_mode == ETH_MQ_TX_VMDQ_DCB) {
+		if (eth_conf->tx_adv_conf.vmdq_dcb_tx_conf.nb_queue_pools ==
+		    ETH_32_POOLS)
+			nb_tcs = ETH_4_TCS;
+		else
+			nb_tcs = ETH_8_TCS;
+	} else {
+		nb_tcs = 1;
+	}
+
+	return nb_tcs;
+}
+
+static int
+ixgbe_tm_capabilities_get(struct rte_eth_dev *dev,
+			  struct rte_tm_capabilities *cap,
+			  struct rte_tm_error *error)
+{
+	uint8_t nb_tcs;
+	uint8_t nb_queues;
+
+	if (!cap || !error)
+		return -EINVAL;
+
+	error->type = RTE_TM_ERROR_TYPE_NONE;
+
+	/* set all the parameters to 0 first. */
+	memset(cap, 0, sizeof(struct rte_tm_capabilities));
+
+	nb_tcs = ixgbe_tc_nb_get(dev);
+	nb_queues = dev->data->nb_tx_queues;
+	/* port + TCs + queues */
+	cap->n_nodes_max = 1 + nb_tcs + nb_queues;
+	cap->n_levels_max = 3;
+	cap->non_leaf_nodes_identical = 0;
+	cap->leaf_nodes_identical = 0;
+	cap->shaper_n_max = cap->n_nodes_max;
+	cap->shaper_private_n_max = cap->n_nodes_max;
+	cap->shaper_private_dual_rate_n_max = 0;
+	cap->shaper_private_rate_min = 0;
+	/* 10Gbps -> 1.25GBps */
+	cap->shaper_private_rate_max = 1250000000ull;
+	cap->shaper_shared_n_max = 0;
+	cap->shaper_shared_n_nodes_per_shaper_max = 0;
+	cap->shaper_shared_n_shapers_per_node_max = 0;
+	cap->shaper_shared_dual_rate_n_max = 0;
+	cap->shaper_shared_rate_min = 0;
+	cap->shaper_shared_rate_max = 0;
+	cap->sched_n_children_max = (nb_tcs > nb_queues) ? nb_tcs : nb_queues;
+	cap->sched_sp_n_priorities_max = 0;
+	cap->sched_wfq_n_children_per_group_max = 0;
+	cap->sched_wfq_n_groups_max = 0;
+	cap->sched_wfq_weight_max = 0;
+	cap->cman_head_drop_supported = 0;
+	cap->dynamic_update_mask = 0;
+
+	/**
+	 * not supported parameters are 0, below,
+	 * shaper_pkt_length_adjust_min
+	 * shaper_pkt_length_adjust_max
+	 * cman_wred_context_n_max
+	 * cman_wred_context_private_n_max
+	 * cman_wred_context_shared_n_max
+	 * cman_wred_context_shared_n_nodes_per_context_max
+	 * cman_wred_context_shared_n_contexts_per_node_max
+	 * mark_vlan_dei_supported
+	 * mark_ip_ecn_tcp_supported
+	 * mark_ip_ecn_sctp_supported
+	 * mark_ip_dscp_supported
+	 * stats_mask
+	 */
+
+	return 0;
+}