[dpdk-dev] [PATCH 4/8] net/virtio: allocate queue at init stage

Kevin Traynor ktraynor at redhat.com
Fri Nov 4 21:30:00 CET 2016


On 11/04/2016 03:21 PM, Kevin Traynor wrote:
> On 11/03/2016 04:09 PM, Yuanhan Liu wrote:
>> Queue allocation should be done once, since the queue related info (such
>> as vring addreess) will only be informed to the vhost-user backend once
>> without virtio device reset.
>>
>> That means, if you allocate queues again after the vhost-user negotiation,
>> the vhost-user backend will not be informed any more. Leading to a state
>> that the vring info mismatches between virtio PMD driver and vhost-backend:
>> the driver switches to the new address has just been allocated, while the
>> vhost-backend still sticks to the old address has been assigned in the init
>> stage.
>>
>> Unfortunately, that is exactly how the virtio driver is coded so far: queue
>> allocation is done at queue_setup stage (when rte_eth_tx/rx_queue_setup is
>> invoked). This is wrong, because queue_setup can be invoked several times.
>> For example,
>>
>>     $ start_testpmd.sh ... --txq=1 --rxq=1 ...
>>     > port stop 0
>>     > port config all txq 1 # just trigger the queue_setup callback again
>>     > port config all rxq 1
>>     > port start 0
>>
>> The right way to do is allocate the queues in the init stage, so that the
>> vring info could be persistent with the vhost-user backend.
>>
>> Besides that, we should allocate max_queue pairs the device supports, but
>> not nr queue pairs firstly configured, to make following case work.
>>
>>     $ start_testpmd.sh ... --txq=1 --rxq=1 ...
>>     > port stop 0
>>     > port config all txq 2
>>     > port config all rxq 2
>>     > port start 0
> 
> hi Yuanhan, firstly - thanks for this patchset. It is certainly needed
> to fix the silent failure after increase num q's.
> 
> I tried a few tests and I'm seeing an issue. I can stop the port,
> increase the number of queues and traffic is ok, but if I try to
> decrease the number of queues it hangs on port start. I'm running head
> of the master with your patches in the guest and 16.07 in the host.
> 
> $ testpmd -c 0x5f -n 4 --socket-mem 1024 -- --burst=64 -i
> --disable-hw-vlan --rxq=2 --txq=2 --rxd=256 --txd=256 --forward-mode=io
>> port stop all
>> port config all rxq 1
>> port config all txq 1
>> port start all
> Configuring Port 0 (socket 0)
> (hang here)
> 
> I've tested a few different scenarios and anytime the queues are
> decreased from the previous number the hang occurs.
> 
> I can debug further but wanted to report early as maybe issue is an
> obvious one?

virtio_dev_start() is getting stuck as soon as it needs to send a
command using virtio_send_command() fn. at this line of code:

	while (VIRTQUEUE_NUSED(vq) == 0) {
		rte_rmb();
		usleep(100);
	}

whereby:
#define VIRTQUEUE_NUSED(vq) ((uint16_t)((vq)->vq_ring.used->idx -
(vq)->vq_used_cons_idx))

and they are both the same value (7 in my case).

> 
> thanks,
> Kevin.
> 
>>
>> Signed-off-by: Yuanhan Liu <yuanhan.liu at linux.intel.com>
>> ---
>>  drivers/net/virtio/virtio_ethdev.c | 105 +++++++++++++++++++++++--------------
>>  drivers/net/virtio/virtio_ethdev.h |   8 ---
>>  drivers/net/virtio/virtio_pci.h    |   2 +
>>  drivers/net/virtio/virtio_rxtx.c   |  38 +++++++-------
>>  4 files changed, 85 insertions(+), 68 deletions(-)
>>
>> diff --git a/drivers/net/virtio/virtio_ethdev.c b/drivers/net/virtio/virtio_ethdev.c
>> index 5a2c14b..253bcb5 100644
>> --- a/drivers/net/virtio/virtio_ethdev.c
>> +++ b/drivers/net/virtio/virtio_ethdev.c
>> @@ -295,13 +295,19 @@ virtio_dev_queue_release(struct virtqueue *vq)
>>  	}
>>  }
>>  
>> -int virtio_dev_queue_setup(struct rte_eth_dev *dev,
>> -			int queue_type,
>> -			uint16_t queue_idx,
>> -			uint16_t vtpci_queue_idx,
>> -			uint16_t nb_desc,
>> -			unsigned int socket_id,
>> -			void **pvq)
>> +static int
>> +virtio_get_queue_type(struct virtio_hw *hw, uint16_t vtpci_queue_idx)
>> +{
>> +	if (vtpci_queue_idx == hw->max_queue_pairs * 2)
>> +		return VTNET_CQ;
>> +	else if (vtpci_queue_idx % 2 == 0)
>> +		return VTNET_RQ;
>> +	else
>> +		return VTNET_TQ;
>> +}
>> +
>> +static int
>> +virtio_init_queue(struct rte_eth_dev *dev, uint16_t vtpci_queue_idx)
>>  {
>>  	char vq_name[VIRTQUEUE_MAX_NAME_SZ];
>>  	char vq_hdr_name[VIRTQUEUE_MAX_NAME_SZ];
>> @@ -314,6 +320,7 @@ int virtio_dev_queue_setup(struct rte_eth_dev *dev,
>>  	struct virtqueue *vq;
>>  	size_t sz_hdr_mz = 0;
>>  	void *sw_ring = NULL;
>> +	int queue_type = virtio_get_queue_type(hw, vtpci_queue_idx);
>>  	int ret;
>>  
>>  	PMD_INIT_LOG(DEBUG, "setting up queue: %u", vtpci_queue_idx);
>> @@ -351,18 +358,18 @@ int virtio_dev_queue_setup(struct rte_eth_dev *dev,
>>  		sz_hdr_mz = PAGE_SIZE;
>>  	}
>>  
>> -	vq = rte_zmalloc_socket(vq_name, size, RTE_CACHE_LINE_SIZE, socket_id);
>> +	vq = rte_zmalloc_socket(vq_name, size, RTE_CACHE_LINE_SIZE,
>> +				SOCKET_ID_ANY);
>>  	if (vq == NULL) {
>>  		PMD_INIT_LOG(ERR, "can not allocate vq");
>>  		return -ENOMEM;
>>  	}
>> +	hw->vqs[vtpci_queue_idx] = vq;
>> +
>>  	vq->hw = hw;
>>  	vq->vq_queue_index = vtpci_queue_idx;
>>  	vq->vq_nentries = vq_size;
>> -
>> -	if (nb_desc == 0 || nb_desc > vq_size)
>> -		nb_desc = vq_size;
>> -	vq->vq_free_cnt = nb_desc;
>> +	vq->vq_free_cnt = vq_size;
>>  
>>  	/*
>>  	 * Reserve a memzone for vring elements
>> @@ -372,7 +379,8 @@ int virtio_dev_queue_setup(struct rte_eth_dev *dev,
>>  	PMD_INIT_LOG(DEBUG, "vring_size: %d, rounded_vring_size: %d",
>>  		     size, vq->vq_ring_size);
>>  
>> -	mz = rte_memzone_reserve_aligned(vq_name, vq->vq_ring_size, socket_id,
>> +	mz = rte_memzone_reserve_aligned(vq_name, vq->vq_ring_size,
>> +					 SOCKET_ID_ANY,
>>  					 0, VIRTIO_PCI_VRING_ALIGN);
>>  	if (mz == NULL) {
>>  		if (rte_errno == EEXIST)
>> @@ -396,7 +404,7 @@ int virtio_dev_queue_setup(struct rte_eth_dev *dev,
>>  		snprintf(vq_hdr_name, sizeof(vq_hdr_name), "port%d_vq%d_hdr",
>>  			 dev->data->port_id, vtpci_queue_idx);
>>  		hdr_mz = rte_memzone_reserve_aligned(vq_hdr_name, sz_hdr_mz,
>> -						     socket_id, 0,
>> +						     SOCKET_ID_ANY, 0,
>>  						     RTE_CACHE_LINE_SIZE);
>>  		if (hdr_mz == NULL) {
>>  			if (rte_errno == EEXIST)
>> @@ -413,7 +421,7 @@ int virtio_dev_queue_setup(struct rte_eth_dev *dev,
>>  			       sizeof(vq->sw_ring[0]);
>>  
>>  		sw_ring = rte_zmalloc_socket("sw_ring", sz_sw,
>> -					     RTE_CACHE_LINE_SIZE, socket_id);
>> +				RTE_CACHE_LINE_SIZE, SOCKET_ID_ANY);
>>  		if (!sw_ring) {
>>  			PMD_INIT_LOG(ERR, "can not allocate RX soft ring");
>>  			ret = -ENOMEM;
>> @@ -424,19 +432,14 @@ int virtio_dev_queue_setup(struct rte_eth_dev *dev,
>>  		rxvq = &vq->rxq;
>>  		rxvq->vq = vq;
>>  		rxvq->port_id = dev->data->port_id;
>> -		rxvq->queue_id = queue_idx;
>>  		rxvq->mz = mz;
>> -		*pvq = rxvq;
>>  	} else if (queue_type == VTNET_TQ) {
>>  		txvq = &vq->txq;
>>  		txvq->vq = vq;
>>  		txvq->port_id = dev->data->port_id;
>> -		txvq->queue_id = queue_idx;
>>  		txvq->mz = mz;
>>  		txvq->virtio_net_hdr_mz = hdr_mz;
>>  		txvq->virtio_net_hdr_mem = hdr_mz->phys_addr;
>> -
>> -		*pvq = txvq;
>>  	} else if (queue_type == VTNET_CQ) {
>>  		cvq = &vq->cq;
>>  		cvq->vq = vq;
>> @@ -444,7 +447,8 @@ int virtio_dev_queue_setup(struct rte_eth_dev *dev,
>>  		cvq->virtio_net_hdr_mz = hdr_mz;
>>  		cvq->virtio_net_hdr_mem = hdr_mz->phys_addr;
>>  		memset(cvq->virtio_net_hdr_mz->addr, 0, PAGE_SIZE);
>> -		*pvq = cvq;
>> +
>> +		hw->cvq = cvq;
>>  	}
>>  
>>  	/* For virtio_user case (that is when dev->pci_dev is NULL), we use
>> @@ -502,23 +506,45 @@ fail_q_alloc:
>>  }
>>  
>>  static int
>> -virtio_dev_cq_queue_setup(struct rte_eth_dev *dev, uint16_t vtpci_queue_idx,
>> -		uint32_t socket_id)
>> +virtio_alloc_queues(struct rte_eth_dev *dev)
>>  {
>> -	struct virtnet_ctl *cvq;
>> -	int ret;
>>  	struct virtio_hw *hw = dev->data->dev_private;
>> +	uint16_t nr_vq = hw->max_queue_pairs * 2;
>> +	uint16_t i;
>> +	int ret;
>>  
>> -	PMD_INIT_FUNC_TRACE();
>> -	ret = virtio_dev_queue_setup(dev, VTNET_CQ, VTNET_SQ_CQ_QUEUE_IDX,
>> -			vtpci_queue_idx, 0, socket_id, (void **)&cvq);
>> -	if (ret < 0) {
>> -		PMD_INIT_LOG(ERR, "control vq initialization failed");
>> -		return ret;
>> +	if (vtpci_with_feature(hw, VIRTIO_NET_F_CTRL_VQ))
>> +		nr_vq += 1;
>> +
>> +	hw->vqs = rte_zmalloc(NULL, sizeof(struct virtqueue *) * nr_vq, 0);
>> +	if (!hw->vqs) {
>> +		PMD_INIT_LOG(ERR, "failed to allocate vqs");
>> +		return -ENOMEM;
>> +	}
>> +
>> +	for (i = 0; i < nr_vq; i++) {
>> +		ret = virtio_init_queue(dev, i);
>> +		if (ret < 0)
>> +			goto cleanup;
>>  	}
>>  
>> -	hw->cvq = cvq;
>>  	return 0;
>> +
>> +cleanup:
>> +	/*
>> +	 * ctrl queue is the last queue; if we go here, it means the ctrl
>> +	 * queue is not allocated, that we can do no cleanup for it here.
>> +	 */
>> +	while (i > 0) {
>> +		i--;
>> +		if (i % 2 == 0)
>> +			virtio_dev_rx_queue_release(&hw->vqs[i]->rxq);
>> +		else
>> +			virtio_dev_tx_queue_release(&hw->vqs[i]->txq);
>> +	}
>> +	rte_free(hw->vqs);
>> +
>> +	return ret;
>>  }
>>  
>>  static void
>> @@ -1141,6 +1167,7 @@ virtio_init_device(struct rte_eth_dev *eth_dev, uint64_t req_features)
>>  	struct virtio_net_config *config;
>>  	struct virtio_net_config local_config;
>>  	struct rte_pci_device *pci_dev = eth_dev->pci_dev;
>> +	int ret;
>>  
>>  	/* Reset the device although not necessary at startup */
>>  	vtpci_reset(hw);
>> @@ -1222,6 +1249,10 @@ virtio_init_device(struct rte_eth_dev *eth_dev, uint64_t req_features)
>>  		hw->max_queue_pairs = 1;
>>  	}
>>  
>> +	ret = virtio_alloc_queues(eth_dev);
>> +	if (ret < 0)
>> +		return ret;
>> +
>>  	if (pci_dev)
>>  		PMD_INIT_LOG(DEBUG, "port %d vendorID=0x%x deviceID=0x%x",
>>  			eth_dev->data->port_id, pci_dev->id.vendor_id,
>> @@ -1390,15 +1421,9 @@ virtio_dev_configure(struct rte_eth_dev *dev)
>>  		return -ENOTSUP;
>>  	}
>>  
>> -	/* Setup and start control queue */
>> -	if (vtpci_with_feature(hw, VIRTIO_NET_F_CTRL_VQ)) {
>> -		ret = virtio_dev_cq_queue_setup(dev,
>> -			hw->max_queue_pairs * 2,
>> -			SOCKET_ID_ANY);
>> -		if (ret < 0)
>> -			return ret;
>> +	/* start control queue */
>> +	if (vtpci_with_feature(hw, VIRTIO_NET_F_CTRL_VQ))
>>  		virtio_dev_cq_start(dev);
>> -	}
>>  
>>  	hw->vlan_strip = rxmode->hw_vlan_strip;
>>  
>> diff --git a/drivers/net/virtio/virtio_ethdev.h b/drivers/net/virtio/virtio_ethdev.h
>> index de33b32..5db8632 100644
>> --- a/drivers/net/virtio/virtio_ethdev.h
>> +++ b/drivers/net/virtio/virtio_ethdev.h
>> @@ -80,14 +80,6 @@ void virtio_dev_cq_start(struct rte_eth_dev *dev);
>>   */
>>  void virtio_dev_rxtx_start(struct rte_eth_dev *dev);
>>  
>> -int virtio_dev_queue_setup(struct rte_eth_dev *dev,
>> -			int queue_type,
>> -			uint16_t queue_idx,
>> -			uint16_t vtpci_queue_idx,
>> -			uint16_t nb_desc,
>> -			unsigned int socket_id,
>> -			void **pvq);
>> -
>>  void virtio_dev_queue_release(struct virtqueue *vq);
>>  
>>  int  virtio_dev_rx_queue_setup(struct rte_eth_dev *dev, uint16_t rx_queue_id,
>> diff --git a/drivers/net/virtio/virtio_pci.h b/drivers/net/virtio/virtio_pci.h
>> index 0c5ed31..f63f76c 100644
>> --- a/drivers/net/virtio/virtio_pci.h
>> +++ b/drivers/net/virtio/virtio_pci.h
>> @@ -264,6 +264,8 @@ struct virtio_hw {
>>  	struct virtio_net_config *dev_cfg;
>>  	const struct virtio_pci_ops *vtpci_ops;
>>  	void	    *virtio_user_dev;
>> +
>> +	struct virtqueue **vqs;
>>  };
>>  
>>  /*
>> diff --git a/drivers/net/virtio/virtio_rxtx.c b/drivers/net/virtio/virtio_rxtx.c
>> index b4c4aa4..fb703d2 100644
>> --- a/drivers/net/virtio/virtio_rxtx.c
>> +++ b/drivers/net/virtio/virtio_rxtx.c
>> @@ -530,24 +530,24 @@ int
>>  virtio_dev_rx_queue_setup(struct rte_eth_dev *dev,
>>  			uint16_t queue_idx,
>>  			uint16_t nb_desc,
>> -			unsigned int socket_id,
>> +			unsigned int socket_id __rte_unused,
>>  			__rte_unused const struct rte_eth_rxconf *rx_conf,
>>  			struct rte_mempool *mp)
>>  {
>>  	uint16_t vtpci_queue_idx = 2 * queue_idx + VTNET_SQ_RQ_QUEUE_IDX;
>> +	struct virtio_hw *hw = dev->data->dev_private;
>> +	struct virtqueue *vq = hw->vqs[vtpci_queue_idx];
>>  	struct virtnet_rx *rxvq;
>> -	int ret;
>>  
>>  	PMD_INIT_FUNC_TRACE();
>> -	ret = virtio_dev_queue_setup(dev, VTNET_RQ, queue_idx, vtpci_queue_idx,
>> -			nb_desc, socket_id, (void **)&rxvq);
>> -	if (ret < 0) {
>> -		PMD_INIT_LOG(ERR, "rvq initialization failed");
>> -		return ret;
>> -	}
>>  
>> -	/* Create mempool for rx mbuf allocation */
>> +	if (nb_desc == 0 || nb_desc > vq->vq_nentries)
>> +		nb_desc = vq->vq_nentries;
>> +	vq->vq_free_cnt = RTE_MIN(vq->vq_free_cnt, nb_desc);
>> +
>> +	rxvq = &vq->rxq;
>>  	rxvq->mpool = mp;
>> +	rxvq->queue_id = queue_idx;
>>  
>>  	dev->data->rx_queues[queue_idx] = rxvq;
>>  
>> @@ -613,27 +613,25 @@ int
>>  virtio_dev_tx_queue_setup(struct rte_eth_dev *dev,
>>  			uint16_t queue_idx,
>>  			uint16_t nb_desc,
>> -			unsigned int socket_id,
>> +			unsigned int socket_id __rte_unused,
>>  			const struct rte_eth_txconf *tx_conf)
>>  {
>>  	uint8_t vtpci_queue_idx = 2 * queue_idx + VTNET_SQ_TQ_QUEUE_IDX;
>> +	struct virtio_hw *hw = dev->data->dev_private;
>> +	struct virtqueue *vq = hw->vqs[vtpci_queue_idx];
>>  	struct virtnet_tx *txvq;
>> -	struct virtqueue *vq;
>>  	uint16_t tx_free_thresh;
>> -	int ret;
>>  
>>  	PMD_INIT_FUNC_TRACE();
>>  
>> -
>>  	virtio_update_rxtx_handler(dev, tx_conf);
>>  
>> -	ret = virtio_dev_queue_setup(dev, VTNET_TQ, queue_idx, vtpci_queue_idx,
>> -			nb_desc, socket_id, (void **)&txvq);
>> -	if (ret < 0) {
>> -		PMD_INIT_LOG(ERR, "tvq initialization failed");
>> -		return ret;
>> -	}
>> -	vq = txvq->vq;
>> +	if (nb_desc == 0 || nb_desc > vq->vq_nentries)
>> +		nb_desc = vq->vq_nentries;
>> +	vq->vq_free_cnt = RTE_MIN(vq->vq_free_cnt, nb_desc);
>> +
>> +	txvq = &vq->txq;
>> +	txvq->queue_id = queue_idx;
>>  
>>  	tx_free_thresh = tx_conf->tx_free_thresh;
>>  	if (tx_free_thresh == 0)
>>
> 



More information about the dev mailing list