[dpdk-dev] [PATCH v2 1/3] i40e: enable DCB in VMDQ VSIs

Zhang, Helin helin.zhang at intel.com
Wed Feb 24 07:58:38 CET 2016



> -----Original Message-----
> From: Wu, Jingjing
> Sent: Wednesday, February 17, 2016 2:58 PM
> To: Richardson, Bruce
> Cc: dev at dpdk.org; Wu, Jingjing; Zhang, Helin
> Subject: [PATCH v2 1/3] i40e: enable DCB in VMDQ VSIs
> 
> Previously, DCB(Data Center Bridging) is only enabled on PF, queue mapping
> and BW configuration is only done on PF.
> This patch enabled DCB for VMDQ VSIs(Virtual Station Interfaces) by
> following steps:
>   1. Take BW and ETS(Enhanced Transmission Selection)
>      configuration on VEB(Virtual Ethernet Bridge).
>   2. Take BW and ETS configuration on VMDQ VSIs.
>   3. Update TC(Traffic Class) and queues mapping on VMDQ VSIs.
> To enable DCB on VMDQ, the number of TCs should not be lager than the
> number of queues in VMDQ pools, and the number of queues per VMDQ
> pool is specified by CONFIG_RTE_LIBRTE_I40E_QUEUE_NUM_PER_VM
> in config/common_* file.
> 
> Signed-off-by: Jingjing Wu <jingjing.wu at intel.com>
> ---
>  doc/guides/rel_notes/release_16_04.rst |   3 +
>  drivers/net/i40e/i40e_ethdev.c         | 153
> +++++++++++++++++++++++++++++----
>  drivers/net/i40e/i40e_ethdev.h         |  28 +++---
>  3 files changed, 152 insertions(+), 32 deletions(-)
> 
> diff --git a/doc/guides/rel_notes/release_16_04.rst
> b/doc/guides/rel_notes/release_16_04.rst
> index 81f62f1..d3b035c 100644
> --- a/doc/guides/rel_notes/release_16_04.rst
> +++ b/doc/guides/rel_notes/release_16_04.rst
> @@ -56,6 +56,9 @@ This section should contain new features added in this
> release. Sample format:
>    Added support for sw-firmware sync for resource sharing.
>    Use the PHY token, shared between sw-fw for PHY access on X550EM_a.
> 
> +* **VMDQ DCB mode in i40e.**
> +
> +  Added support for DCB in VMDQ mode to i40e driver.
> 
>  Resolved Issues
>  ---------------
> diff --git a/drivers/net/i40e/i40e_ethdev.c b/drivers/net/i40e/i40e_ethdev.c
> index ef24122..fc06612 100644
> --- a/drivers/net/i40e/i40e_ethdev.c
> +++ b/drivers/net/i40e/i40e_ethdev.c
> @@ -8087,6 +8087,8 @@ i40e_vsi_update_queue_mapping(struct i40e_vsi
> *vsi,
>  	int i, total_tc = 0;
>  	uint16_t qpnum_per_tc, bsf, qp_idx;
>  	struct rte_eth_dev_data *dev_data = I40E_VSI_TO_DEV_DATA(vsi);
> +	struct i40e_pf *pf = I40E_VSI_TO_PF(vsi);
> +	uint16_t used_queues;
> 
>  	ret = validate_tcmap_parameter(vsi, enabled_tcmap);
>  	if (ret != I40E_SUCCESS)
> @@ -8100,7 +8102,18 @@ i40e_vsi_update_queue_mapping(struct i40e_vsi
> *vsi,
>  		total_tc = 1;
>  	vsi->enabled_tc = enabled_tcmap;
> 
> -	qpnum_per_tc = dev_data->nb_rx_queues / total_tc;
> +	/* different VSI has different queues assigned */
> +	if (vsi->type == I40E_VSI_MAIN)
> +		used_queues = dev_data->nb_rx_queues -
> +			pf->nb_cfg_vmdq_vsi *
> RTE_LIBRTE_I40E_QUEUE_NUM_PER_VM;
> +	else if (vsi->type == I40E_VSI_VMDQ2)
> +		used_queues = RTE_LIBRTE_I40E_QUEUE_NUM_PER_VM;
> +	else {
> +		PMD_INIT_LOG(ERR, "unsupported VSI type.");
> +		return I40E_ERR_NO_AVAILABLE_VSI;
> +	}
> +
> +	qpnum_per_tc = used_queues / total_tc;
>  	/* Number of queues per enabled TC */
>  	if (qpnum_per_tc == 0) {
>  		PMD_INIT_LOG(ERR, " number of queues is less that tcs.");
> @@ -8145,6 +8158,93 @@ i40e_vsi_update_queue_mapping(struct i40e_vsi
> *vsi,  }
> 
>  /*
> + * i40e_config_switch_comp_tc - Configure VEB tc setting for given TC
> +map
> + * @veb: VEB to be configured
> + * @tc_map: enabled TC bitmap
> + *
> + * Returns 0 on success, negative value on failure  */ static enum
> +i40e_status_code i40e_config_switch_comp_tc(struct i40e_veb *veb,
> +uint8_t tc_map) {
> +	struct i40e_aqc_configure_switching_comp_bw_config_data
> veb_bw;
> +	struct i40e_aqc_query_switching_comp_bw_config_resp bw_query;
> +	struct i40e_aqc_query_switching_comp_ets_config_resp ets_query;
> +	struct i40e_hw *hw = I40E_VSI_TO_HW(veb->associate_vsi);
> +	enum i40e_status_code ret = I40E_SUCCESS;
> +	int i;
> +	uint32_t bw_max;
> +
> +	/* Check if enabled_tc is same as existing or new TCs */
> +	if (veb->enabled_tc == tc_map)
> +		return ret;
> +
> +	/* configure tc bandwidth */
> +	memset(&veb_bw, 0, sizeof(veb_bw));
> +	veb_bw.tc_valid_bits = tc_map;
> +	/* Enable ETS TCs with equal BW Share for now across all VSIs */
> +	for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
> +		if (tc_map & BIT_ULL(i))
> +			veb_bw.tc_bw_share_credits[i] = 1;
> +	}
> +	ret = i40e_aq_config_switch_comp_bw_config(hw, veb->seid,
> +						   &veb_bw, NULL);
> +	if (ret) {
> +		PMD_INIT_LOG(ERR, "AQ command Config switch_comp BW
> allocation"
> +				  " per TC failed = %d",
> +				  hw->aq.asq_last_status);
> +		return ret;
> +	}
> +
> +	memset(&ets_query, 0, sizeof(ets_query));
> +	ret = i40e_aq_query_switch_comp_ets_config(hw, veb->seid,
> +						   &ets_query, NULL);
> +	if (ret != I40E_SUCCESS) {
> +		PMD_DRV_LOG(ERR, "Failed to get switch_comp ETS"
> +				 " configuration %u", hw-
> >aq.asq_last_status);
> +		return ret;
> +	}
> +	memset(&bw_query, 0, sizeof(bw_query));
> +	ret = i40e_aq_query_switch_comp_bw_config(hw, veb->seid,
> +						  &bw_query, NULL);
> +	if (ret != I40E_SUCCESS) {
> +		PMD_DRV_LOG(ERR, "Failed to get switch_comp bandwidth"
> +				 " configuration %u", hw-
> >aq.asq_last_status);
> +		return ret;
> +	}
> +
> +	/* store and print out BW info */
> +	veb->bw_info.bw_limit =
> rte_le_to_cpu_16(ets_query.port_bw_limit);
> +	veb->bw_info.bw_max = ets_query.tc_bw_max;
> +	PMD_DRV_LOG(DEBUG, "switch_comp bw limit:%u", veb-
> >bw_info.bw_limit);
> +	PMD_DRV_LOG(DEBUG, "switch_comp max_bw:%u", veb-
> >bw_info.bw_max);
> +	bw_max = rte_le_to_cpu_16(bw_query.tc_bw_max[0]) |
> +		    (rte_le_to_cpu_16(bw_query.tc_bw_max[1]) <<
> +		     I40E_16_BIT_WIDTH);
> +	for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
> +		veb->bw_info.bw_ets_share_credits[i] =
> +				bw_query.tc_bw_share_credits[i];
> +		veb->bw_info.bw_ets_credits[i] =
> +				rte_le_to_cpu_16(bw_query.tc_bw_limits[i]);
> +		/* 4 bits per TC, 4th bit is reserved */
> +		veb->bw_info.bw_ets_max[i] =
> +			(uint8_t)((bw_max >> (i * I40E_4_BIT_WIDTH)) &
> +				  I40E_3_BIT_MASK);
Use RTE_LEN2MASK() to replace I40E_3_BIT_MASK.

> +		PMD_DRV_LOG(DEBUG, "\tVEB TC%u:share credits %u", i,
> +			    veb->bw_info.bw_ets_share_credits[i]);
> +		PMD_DRV_LOG(DEBUG, "\tVEB TC%u:credits %u", i,
> +			    veb->bw_info.bw_ets_credits[i]);
> +		PMD_DRV_LOG(DEBUG, "\tVEB TC%u: max credits: %u", i,
> +			    veb->bw_info.bw_ets_max[i]);
> +	}
> +
> +	veb->enabled_tc = tc_map;
> +
> +	return ret;
> +}
> +
> +
> +/*
>   * i40e_vsi_config_tc - Configure VSI tc setting for given TC map
>   * @vsi: VSI to be configured
>   * @tc_map: enabled TC bitmap
> @@ -8152,7 +8252,7 @@ i40e_vsi_update_queue_mapping(struct i40e_vsi
> *vsi,
>   * Returns 0 on success, negative value on failure
>   */
>  static enum i40e_status_code
> -i40e_vsi_config_tc(struct i40e_vsi *vsi, u8 tc_map)
> +i40e_vsi_config_tc(struct i40e_vsi *vsi, uint8_t tc_map)
>  {
>  	struct i40e_aqc_configure_vsi_tc_bw_data bw_data;
>  	struct i40e_vsi_context ctxt;
> @@ -8294,15 +8394,27 @@ i40e_dcb_hw_configure(struct i40e_pf *pf,
>  	i40e_aq_get_dcb_config(hw, I40E_AQ_LLDP_MIB_LOCAL, 0,
>  				     &hw->local_dcbx_config);
> 
> +	/* if Veb is created, need to update TC of it at first */
> +	if (main_vsi->veb) {
> +		ret = i40e_config_switch_comp_tc(main_vsi->veb, tc_map);
> +		if (ret)
> +			PMD_INIT_LOG(WARNING,
> +				 "Failed configuring TC for VEB seid=%d\n",
> +				 main_vsi->veb->seid);
> +	}
How about if VEB hasn't been created?

>  	/* Update each VSI */
>  	i40e_vsi_config_tc(main_vsi, tc_map);
>  	if (main_vsi->veb) {
>  		TAILQ_FOREACH(vsi_list, &main_vsi->veb->head, list) {
> -			/* Beside main VSI, only enable default
> +			/* Beside main VSI and VMDQ VSIs, only enable
> default
>  			 * TC for other VSIs
>  			 */
> -			ret = i40e_vsi_config_tc(vsi_list->vsi,
> -						I40E_DEFAULT_TCMAP);
> +			if (vsi_list->vsi->type == I40E_VSI_VMDQ2)
> +				ret = i40e_vsi_config_tc(vsi_list->vsi,
> +							 tc_map);
> +			else
> +				ret = i40e_vsi_config_tc(vsi_list->vsi,
> +
> I40E_DEFAULT_TCMAP);
>  			if (ret)
>  				PMD_INIT_LOG(WARNING,
>  					 "Failed configuring TC for VSI
> seid=%d\n", @@ -8422,9 +8534,8 @@ i40e_dcb_setup(struct rte_eth_dev
> *dev)
>  		return -ENOTSUP;
>  	}
> 
> -	if (pf->vf_num != 0 ||
> -	    (dev->data->dev_conf.rxmode.mq_mode &
> ETH_MQ_RX_VMDQ_FLAG))
> -		PMD_INIT_LOG(DEBUG, " DCB only works on main vsi.");
> +	if (pf->vf_num != 0)
> +		PMD_INIT_LOG(DEBUG, " DCB only works on pf and vmdq
> vsis.");
> 
>  	ret = i40e_parse_dcb_configure(dev, &dcb_cfg, &tc_map);
>  	if (ret) {
> @@ -8449,7 +8560,7 @@ i40e_dev_get_dcb_info(struct rte_eth_dev *dev,
>  	struct i40e_vsi *vsi = pf->main_vsi;
>  	struct i40e_dcbx_config *dcb_cfg = &hw->local_dcbx_config;
>  	uint16_t bsf, tc_mapping;
> -	int i;
> +	int i, j;
> 
>  	if (dev->data->dev_conf.rxmode.mq_mode &
> ETH_MQ_RX_DCB_FLAG)
>  		dcb_info->nb_tcs = rte_bsf32(vsi->enabled_tc + 1); @@ -
> 8460,23 +8571,27 @@ i40e_dev_get_dcb_info(struct rte_eth_dev *dev,
>  	for (i = 0; i < dcb_info->nb_tcs; i++)
>  		dcb_info->tc_bws[i] = dcb_cfg->etscfg.tcbwtable[i];
> 
> -	for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
> -		if (vsi->enabled_tc & (1 << i)) {
> +	j = 0;
> +	do {
> +		for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
> +			if (!(vsi->enabled_tc & (1 << i)))
> +				continue;
>  			tc_mapping = rte_le_to_cpu_16(vsi-
> >info.tc_mapping[i]);
>  			/* only main vsi support multi TCs */
> -			dcb_info->tc_queue.tc_rxq[0][i].base =
> +			dcb_info->tc_queue.tc_rxq[j][i].base =
>  				(tc_mapping &
> I40E_AQ_VSI_TC_QUE_OFFSET_MASK) >>
>  				I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT;
> -			dcb_info->tc_queue.tc_txq[0][i].base =
> -				dcb_info->tc_queue.tc_rxq[0][i].base;
> +			dcb_info->tc_queue.tc_txq[j][i].base =
> +				dcb_info->tc_queue.tc_rxq[j][i].base;
>  			bsf = (tc_mapping &
> I40E_AQ_VSI_TC_QUE_NUMBER_MASK) >>
>  				I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT;
> -			dcb_info->tc_queue.tc_rxq[0][i].nb_queue = 1 <<
> bsf;
> -			dcb_info->tc_queue.tc_txq[0][i].nb_queue =
> -				dcb_info->tc_queue.tc_rxq[0][i].nb_queue;
> +			dcb_info->tc_queue.tc_rxq[j][i].nb_queue = 1 << bsf;
> +			dcb_info->tc_queue.tc_txq[j][i].nb_queue =
> +				dcb_info->tc_queue.tc_rxq[j][i].nb_queue;
>  		}
> -	}
> -
> +		vsi = pf->vmdq[j].vsi;
> +		j++;
> +	} while (j < RTE_MIN(pf->nb_cfg_vmdq_vsi,
> ETH_MAX_VMDQ_POOL));
>  	return 0;
>  }
> 
> diff --git a/drivers/net/i40e/i40e_ethdev.h
> b/drivers/net/i40e/i40e_ethdev.h index 6edd7dd..c9c440c 100644
> --- a/drivers/net/i40e/i40e_ethdev.h
> +++ b/drivers/net/i40e/i40e_ethdev.h
> @@ -199,6 +199,19 @@ struct i40e_vsi_list {  struct i40e_rx_queue;  struct
> i40e_tx_queue;
> 
> +/* Bandwidth limit information */
> +struct i40e_bw_info {
> +	uint16_t bw_limit;      /* BW Limit (0 = disabled) */
> +	uint8_t  bw_max;        /* Max BW limit if enabled */
> +
> +	/* Relative credits within same TC with respect to other VSIs or
> Comps */
> +	uint8_t  bw_ets_share_credits[I40E_MAX_TRAFFIC_CLASS];
> +	/* Bandwidth limit per TC */
> +	uint8_t  bw_ets_credits[I40E_MAX_TRAFFIC_CLASS];
> +	/* Max bandwidth limit per TC */
> +	uint8_t  bw_ets_max[I40E_MAX_TRAFFIC_CLASS];
> +};
> +
>  /* Structure that defines a VEB */
>  struct i40e_veb {
>  	struct i40e_vsi_list_head head;
> @@ -207,6 +220,8 @@ struct i40e_veb {
>  	uint16_t uplink_seid; /* The uplink seid of this VEB */
>  	uint16_t stats_idx;
>  	struct i40e_eth_stats stats;
> +	uint8_t enabled_tc;   /* The traffic class enabled */
> +	struct i40e_bw_info bw_info; /* VEB bandwidth information */
>  };
> 
>  /* i40e MACVLAN filter structure */
> @@ -216,19 +231,6 @@ struct i40e_macvlan_filter {
>  	uint16_t vlan_id;
>  };
> 
> -/* Bandwidth limit information */
> -struct i40e_bw_info {
> -	uint16_t bw_limit;      /* BW Limit (0 = disabled) */
> -	uint8_t  bw_max;        /* Max BW limit if enabled */
> -
> -	/* Relative VSI credits within same TC with respect to other VSIs */
> -	uint8_t  bw_ets_share_credits[I40E_MAX_TRAFFIC_CLASS];
> -	/* Bandwidth limit per TC */
> -	uint8_t  bw_ets_credits[I40E_MAX_TRAFFIC_CLASS];
> -	/* Max bandwidth limit per TC */
> -	uint8_t  bw_ets_max[I40E_MAX_TRAFFIC_CLASS];
> -};
> -
>  /*
>   * Structure that defines a VSI, associated with a adapter.
>   */
> --
> 2.4.0



More information about the dev mailing list