[RFC,v3] net/i40e: enable multi-queue Rx interrupt for VF

Message ID 20190916144308.119187-1-lunyuanx.cui@intel.com (mailing list archive)
State Superseded, archived
Delegated to: xiaolong ye
Headers
Series [RFC,v3] net/i40e: enable multi-queue Rx interrupt for VF |

Checks

Context Check Description
ci/checkpatch warning coding style issues
ci/Intel-compilation success Compilation OK

Commit Message

Cui, LunyuanX Sept. 16, 2019, 2:43 p.m. UTC
  Current implementation is that only one Rx queue can support interrupt,
because all queues are mapped in the same vector id in vfio_enable_msix().
So VF can not support multi-queue Rx interrupt in the interrupt mode.

In this patch, if the packet I/O interrupt on datapath is enabled
(rte_intr_dp_is_en(intr_handle) is true), we map different interrupt
vectors to each queue and send this map to PF.
After PF sets the map to the register,
all Rx queue interrupts will be received.

In addition, because of the i40e performance in ioctl(),
the maximum supported interrupt vector id is 4.
If vector id is more than 4, i40e driver will fail to start.
So when queue number is more than 4,
we set up a loop of interrupt vectors map from 1 to 4.

Signed-off-by: Lunyuan Cui <lunyuanx.cui@intel.com>
---
v3:
* combined 2 lines into 1 line
  before:
	map_info->vecmap[i].rxq_map = 0;
	map_info->vecmap[i].rxq_map |= 1 << i;
  after:
	map_info->vecmap[i].rxq_map = 1 << i;

v2:
* set up a loop of interrupt vectors map from 1 to 4, and sent
  message from VF to PF by one time.
---
 drivers/net/i40e/i40e_ethdev_vf.c | 27 +++++++++++++++++----------
 1 file changed, 17 insertions(+), 10 deletions(-)
  

Comments

Qi Zhang Sept. 17, 2019, 2:33 a.m. UTC | #1
> -----Original Message-----
> From: Cui, LunyuanX
> Sent: Monday, September 16, 2019 10:43 PM
> To: Yang, Qiming <qiming.yang@intel.com>; Wu, Jingjing
> <jingjing.wu@intel.com>; Xing, Beilei <beilei.xing@intel.com>; Zhang, Qi Z
> <qi.z.zhang@intel.com>; Wang, ShougangX <shougangx.wang@intel.com>;
> Stillwell Jr, Paul M <paul.m.stillwell.jr@intel.com>
> Cc: dev@dpdk.org; Cui, LunyuanX <lunyuanx.cui@intel.com>
> Subject: [RFC v3] net/i40e: enable multi-queue Rx interrupt for VF
> 
> Current implementation is that only one Rx queue can support interrupt,
> because all queues are mapped in the same vector id in vfio_enable_msix().
> So VF can not support multi-queue Rx interrupt in the interrupt mode.
> 
> In this patch, if the packet I/O interrupt on datapath is enabled
> (rte_intr_dp_is_en(intr_handle) is true), we map different interrupt vectors to
> each queue and send this map to PF.
> After PF sets the map to the register,
> all Rx queue interrupts will be received.
> 
> In addition, because of the i40e performance in ioctl(), the maximum
> supported interrupt vector id is 4.
> If vector id is more than 4, i40e driver will fail to start.
> So when queue number is more than 4,
> we set up a loop of interrupt vectors map from 1 to 4.

Don't know why we need to limit max interrupt to 4, why i40e driver will fail to start, could share more detail?
I think you can do the similar implementation as iavf driver does (ref iavf_config_irq_map)

> 
> Signed-off-by: Lunyuan Cui <lunyuanx.cui@intel.com>
> ---
> v3:
> * combined 2 lines into 1 line
>   before:
> 	map_info->vecmap[i].rxq_map = 0;
> 	map_info->vecmap[i].rxq_map |= 1 << i;
>   after:
> 	map_info->vecmap[i].rxq_map = 1 << i;
> 
> v2:
> * set up a loop of interrupt vectors map from 1 to 4, and sent
>   message from VF to PF by one time.
> ---
>  drivers/net/i40e/i40e_ethdev_vf.c | 27 +++++++++++++++++----------
>  1 file changed, 17 insertions(+), 10 deletions(-)
> 
> diff --git a/drivers/net/i40e/i40e_ethdev_vf.c
> b/drivers/net/i40e/i40e_ethdev_vf.c
> index 308fb9835..abb2d1353 100644
> --- a/drivers/net/i40e/i40e_ethdev_vf.c
> +++ b/drivers/net/i40e/i40e_ethdev_vf.c
> @@ -645,13 +645,15 @@ i40evf_configure_vsi_queues(struct rte_eth_dev
> *dev)
>  	return ret;
>  }
> 
> +#define RTE_LIBRTE_I40E_IRQ_NUM_PER_VF  4
> +
>  static int
>  i40evf_config_irq_map(struct rte_eth_dev *dev)  {
>  	struct i40e_vf *vf =
> I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
>  	struct vf_cmd_info args;
>  	uint8_t cmd_buffer[sizeof(struct virtchnl_irq_map_info) + \
> -		sizeof(struct virtchnl_vector_map)];
> +		sizeof(struct virtchnl_vector_map) * dev->data->nb_rx_queues];
>  	struct virtchnl_irq_map_info *map_info;
>  	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
>  	struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; @@ -665,18
> +667,23 @@ i40evf_config_irq_map(struct rte_eth_dev *dev)
>  		vector_id = I40E_MISC_VEC_ID;
> 
>  	map_info = (struct virtchnl_irq_map_info *)cmd_buffer;
> -	map_info->num_vectors = 1;
> -	map_info->vecmap[0].rxitr_idx = I40E_ITR_INDEX_DEFAULT;
> -	map_info->vecmap[0].vsi_id = vf->vsi_res->vsi_id;
> -	/* Alway use default dynamic MSIX interrupt */
> -	map_info->vecmap[0].vector_id = vector_id;
> -	/* Don't map any tx queue */
> -	map_info->vecmap[0].txq_map = 0;
> -	map_info->vecmap[0].rxq_map = 0;
> +	map_info->num_vectors = dev->data->nb_rx_queues;
>  	for (i = 0; i < dev->data->nb_rx_queues; i++) {
> -		map_info->vecmap[0].rxq_map |= 1 << i;
> +		map_info->vecmap[i].rxitr_idx = I40E_ITR_INDEX_DEFAULT;
> +		map_info->vecmap[i].vsi_id = vf->vsi_res->vsi_id;
> +		/* Alway use default dynamic MSIX interrupt */
> +		map_info->vecmap[i].vector_id = vector_id;
> +		/* Don't map any tx queue */
> +		map_info->vecmap[i].txq_map = 0;
> +		map_info->vecmap[i].rxq_map = 1 << i;
>  		if (rte_intr_dp_is_en(intr_handle))
>  			intr_handle->intr_vec[i] = vector_id;
> +		if (vector_id > I40E_MISC_VEC_ID) {
> +			if (vector_id < RTE_LIBRTE_I40E_IRQ_NUM_PER_VF)
> +				vector_id++;
> +			else
> +				vector_id = I40E_RX_VEC_START;
> +		}
>  	}
> 
>  	args.ops = VIRTCHNL_OP_CONFIG_IRQ_MAP;
> --
> 2.17.1
  

Patch

diff --git a/drivers/net/i40e/i40e_ethdev_vf.c b/drivers/net/i40e/i40e_ethdev_vf.c
index 308fb9835..abb2d1353 100644
--- a/drivers/net/i40e/i40e_ethdev_vf.c
+++ b/drivers/net/i40e/i40e_ethdev_vf.c
@@ -645,13 +645,15 @@  i40evf_configure_vsi_queues(struct rte_eth_dev *dev)
 	return ret;
 }
 
+#define RTE_LIBRTE_I40E_IRQ_NUM_PER_VF  4
+
 static int
 i40evf_config_irq_map(struct rte_eth_dev *dev)
 {
 	struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
 	struct vf_cmd_info args;
 	uint8_t cmd_buffer[sizeof(struct virtchnl_irq_map_info) + \
-		sizeof(struct virtchnl_vector_map)];
+		sizeof(struct virtchnl_vector_map) * dev->data->nb_rx_queues];
 	struct virtchnl_irq_map_info *map_info;
 	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
 	struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
@@ -665,18 +667,23 @@  i40evf_config_irq_map(struct rte_eth_dev *dev)
 		vector_id = I40E_MISC_VEC_ID;
 
 	map_info = (struct virtchnl_irq_map_info *)cmd_buffer;
-	map_info->num_vectors = 1;
-	map_info->vecmap[0].rxitr_idx = I40E_ITR_INDEX_DEFAULT;
-	map_info->vecmap[0].vsi_id = vf->vsi_res->vsi_id;
-	/* Alway use default dynamic MSIX interrupt */
-	map_info->vecmap[0].vector_id = vector_id;
-	/* Don't map any tx queue */
-	map_info->vecmap[0].txq_map = 0;
-	map_info->vecmap[0].rxq_map = 0;
+	map_info->num_vectors = dev->data->nb_rx_queues;
 	for (i = 0; i < dev->data->nb_rx_queues; i++) {
-		map_info->vecmap[0].rxq_map |= 1 << i;
+		map_info->vecmap[i].rxitr_idx = I40E_ITR_INDEX_DEFAULT;
+		map_info->vecmap[i].vsi_id = vf->vsi_res->vsi_id;
+		/* Alway use default dynamic MSIX interrupt */
+		map_info->vecmap[i].vector_id = vector_id;
+		/* Don't map any tx queue */
+		map_info->vecmap[i].txq_map = 0;
+		map_info->vecmap[i].rxq_map = 1 << i;
 		if (rte_intr_dp_is_en(intr_handle))
 			intr_handle->intr_vec[i] = vector_id;
+		if (vector_id > I40E_MISC_VEC_ID) {
+			if (vector_id < RTE_LIBRTE_I40E_IRQ_NUM_PER_VF)
+				vector_id++;
+			else
+				vector_id = I40E_RX_VEC_START;
+		}
 	}
 
 	args.ops = VIRTCHNL_OP_CONFIG_IRQ_MAP;