[PATCH v2 1/2] vhost: introduce DMA vchannel unconfiguration

Xia, Chenbo chenbo.xia at intel.com
Mon Sep 26 08:06:58 CEST 2022


> -----Original Message-----
> From: Ding, Xuan <xuan.ding at intel.com>
> Sent: Tuesday, September 6, 2022 1:22 PM
> To: maxime.coquelin at redhat.com; Xia, Chenbo <chenbo.xia at intel.com>
> Cc: dev at dpdk.org; Hu, Jiayu <jiayu.hu at intel.com>; He, Xingguang
> <xingguang.he at intel.com>; Yang, YvonneX <yvonnex.yang at intel.com>; Jiang,
> Cheng1 <cheng1.jiang at intel.com>; Wang, YuanX <yuanx.wang at intel.com>; Ma,
> WenwuX <wenwux.ma at intel.com>; Ding, Xuan <xuan.ding at intel.com>
> Subject: [PATCH v2 1/2] vhost: introduce DMA vchannel unconfiguration
> 
> From: Xuan Ding <xuan.ding at intel.com>
> 
> This patch adds a new API rte_vhost_async_dma_unconfigure() to unconfigure
> DMA vchannels in vhost async data path.
> 
> Lock protection are also added to protect DMA vchannels configuration and
> unconfiguration from concurrent calls.
> 
> Signed-off-by: Xuan Ding <xuan.ding at intel.com>
> ---
>  doc/guides/prog_guide/vhost_lib.rst    |  5 ++
>  doc/guides/rel_notes/release_22_11.rst |  2 +
>  lib/vhost/rte_vhost_async.h            | 17 +++++++
>  lib/vhost/version.map                  |  3 ++
>  lib/vhost/vhost.c                      | 69 ++++++++++++++++++++++++--
>  5 files changed, 91 insertions(+), 5 deletions(-)
> 
> diff --git a/doc/guides/prog_guide/vhost_lib.rst
> b/doc/guides/prog_guide/vhost_lib.rst
> index bad4d819e1..22764cbeaa 100644
> --- a/doc/guides/prog_guide/vhost_lib.rst
> +++ b/doc/guides/prog_guide/vhost_lib.rst
> @@ -323,6 +323,11 @@ The following is an overview of some key Vhost API
> functions:
>    Get device type of vDPA device, such as VDPA_DEVICE_TYPE_NET,
>    VDPA_DEVICE_TYPE_BLK.
> 
> +* ``rte_vhost_async_dma_unconfigure(dma_id, vchan_id)``
> +
> +  Clear DMA vChannels finished to use. This function needs to
> +  be called after the deregisterration of async path has been finished.

Deregistration

> +
>  Vhost-user Implementations
>  --------------------------
> 
> diff --git a/doc/guides/rel_notes/release_22_11.rst
> b/doc/guides/rel_notes/release_22_11.rst
> index 8c021cf050..e94c006e39 100644
> --- a/doc/guides/rel_notes/release_22_11.rst
> +++ b/doc/guides/rel_notes/release_22_11.rst
> @@ -55,6 +55,8 @@ New Features
>       Also, make sure to start the actual text at the margin.
>       =======================================================
> 
> +* **Added vhost API to unconfigure DMA vchannels.**
> +  Added an API which helps to unconfigure DMA vchannels.

Added XXX for async vhost

Overall LGTM. It seems it needs some rebasing too.

Thanks,
Chenbo

> 
>  Removed Items
>  -------------
> diff --git a/lib/vhost/rte_vhost_async.h b/lib/vhost/rte_vhost_async.h
> index 1db2a10124..0442e027fd 100644
> --- a/lib/vhost/rte_vhost_async.h
> +++ b/lib/vhost/rte_vhost_async.h
> @@ -266,6 +266,23 @@ rte_vhost_async_try_dequeue_burst(int vid, uint16_t
> queue_id,
>  	struct rte_mempool *mbuf_pool, struct rte_mbuf **pkts, uint16_t
> count,
>  	int *nr_inflight, int16_t dma_id, uint16_t vchan_id);
> 
> +/**
> + * @warning
> + * @b EXPERIMENTAL: this API may change, or be removed, without prior
> notice.
> + *
> + * Unconfigure DMA vChannels in asynchronous data path.
> + *
> + * @param dma_id
> + *  the identifier of DMA device
> + * @param vchan_id
> + *  the identifier of virtual DMA channel
> + * @return
> + *  0 on success, and -1 on failure
> + */
> +__rte_experimental
> +int
> +rte_vhost_async_dma_unconfigure(int16_t dma_id, uint16_t vchan_id);
> +
>  #ifdef __cplusplus
>  }
>  #endif
> diff --git a/lib/vhost/version.map b/lib/vhost/version.map
> index 18574346d5..013a6bcc42 100644
> --- a/lib/vhost/version.map
> +++ b/lib/vhost/version.map
> @@ -96,6 +96,9 @@ EXPERIMENTAL {
>  	rte_vhost_async_try_dequeue_burst;
>  	rte_vhost_driver_get_vdpa_dev_type;
>  	rte_vhost_clear_queue;
> +
> +	# added in 22.11
> +	rte_vhost_async_dma_unconfigure;
>  };
> 
>  INTERNAL {
> diff --git a/lib/vhost/vhost.c b/lib/vhost/vhost.c
> index 60cb05a0ff..273616da11 100644
> --- a/lib/vhost/vhost.c
> +++ b/lib/vhost/vhost.c
> @@ -23,6 +23,7 @@
> 
>  struct virtio_net *vhost_devices[RTE_MAX_VHOST_DEVICE];
>  pthread_mutex_t vhost_dev_lock = PTHREAD_MUTEX_INITIALIZER;
> +static rte_spinlock_t vhost_dma_lock = RTE_SPINLOCK_INITIALIZER;
> 
>  struct vhost_vq_stats_name_off {
>  	char name[RTE_VHOST_STATS_NAME_SIZE];
> @@ -1870,19 +1871,20 @@ rte_vhost_async_dma_configure(int16_t dma_id,
> uint16_t vchan_id)
>  	void *pkts_cmpl_flag_addr;
>  	uint16_t max_desc;
> 
> +	rte_spinlock_lock(&vhost_dma_lock);
>  	if (!rte_dma_is_valid(dma_id)) {
>  		VHOST_LOG_CONFIG("dma", ERR, "DMA %d is not found.\n", dma_id);
> -		return -1;
> +		goto error;
>  	}
> 
>  	if (rte_dma_info_get(dma_id, &info) != 0) {
>  		VHOST_LOG_CONFIG("dma", ERR, "Fail to get DMA %d
> information.\n", dma_id);
> -		return -1;
> +		goto error;
>  	}
> 
>  	if (vchan_id >= info.max_vchans) {
>  		VHOST_LOG_CONFIG("dma", ERR, "Invalid DMA %d vChannel %u.\n",
> dma_id, vchan_id);
> -		return -1;
> +		goto error;
>  	}
> 
>  	if (!dma_copy_track[dma_id].vchans) {
> @@ -1894,7 +1896,7 @@ rte_vhost_async_dma_configure(int16_t dma_id,
> uint16_t vchan_id)
>  			VHOST_LOG_CONFIG("dma", ERR,
>  				"Failed to allocate vchans for DMA %d
> vChannel %u.\n",
>  				dma_id, vchan_id);
> -			return -1;
> +			goto error;
>  		}
> 
>  		dma_copy_track[dma_id].vchans = vchans;
> @@ -1903,6 +1905,7 @@ rte_vhost_async_dma_configure(int16_t dma_id,
> uint16_t vchan_id)
>  	if (dma_copy_track[dma_id].vchans[vchan_id].pkts_cmpl_flag_addr) {
>  		VHOST_LOG_CONFIG("dma", INFO, "DMA %d vChannel %u already
> registered.\n",
>  			dma_id, vchan_id);
> +		rte_spinlock_unlock(&vhost_dma_lock);
>  		return 0;
>  	}
> 
> @@ -1920,7 +1923,7 @@ rte_vhost_async_dma_configure(int16_t dma_id,
> uint16_t vchan_id)
>  			rte_free(dma_copy_track[dma_id].vchans);
>  			dma_copy_track[dma_id].vchans = NULL;
>  		}
> -		return -1;
> +		goto error;
>  	}
> 
>  	dma_copy_track[dma_id].vchans[vchan_id].pkts_cmpl_flag_addr =
> pkts_cmpl_flag_addr;
> @@ -1928,7 +1931,12 @@ rte_vhost_async_dma_configure(int16_t dma_id,
> uint16_t vchan_id)
>  	dma_copy_track[dma_id].vchans[vchan_id].ring_mask = max_desc - 1;
>  	dma_copy_track[dma_id].nr_vchans++;
> 
> +	rte_spinlock_unlock(&vhost_dma_lock);
>  	return 0;
> +
> +error:
> +	rte_spinlock_unlock(&vhost_dma_lock);
> +	return -1;
>  }
> 
>  int
> @@ -2117,5 +2125,56 @@ int rte_vhost_vring_stats_reset(int vid, uint16_t
> queue_id)
>  	return 0;
>  }
> 
> +int
> +rte_vhost_async_dma_unconfigure(int16_t dma_id, uint16_t vchan_id)
> +{
> +	struct rte_dma_info info;
> +	uint16_t max_desc;
> +	int i;
> +
> +	rte_spinlock_lock(&vhost_dma_lock);
> +	if (!rte_dma_is_valid(dma_id)) {
> +		VHOST_LOG_CONFIG("dma", ERR, "DMA %d is not found.\n", dma_id);
> +		goto error;
> +	}
> +
> +	if (rte_dma_info_get(dma_id, &info) != 0) {
> +		VHOST_LOG_CONFIG("dma", ERR, "Fail to get DMA %d
> information.\n", dma_id);
> +		goto error;
> +	}
> +
> +	if (vchan_id >= info.max_vchans) {
> +		VHOST_LOG_CONFIG("dma", ERR, "Invalid DMA %d vChannel %u.\n",
> dma_id, vchan_id);
> +		goto error;
> +	}
> +
> +	max_desc = info.max_desc;
> +	for (i = 0; i < max_desc; i++) {
> +		if
> (dma_copy_track[dma_id].vchans[vchan_id].pkts_cmpl_flag_addr[i] != NULL) {
> +
> 	rte_free(dma_copy_track[dma_id].vchans[vchan_id].pkts_cmpl_flag_addr
> [i]);
> +
> 	dma_copy_track[dma_id].vchans[vchan_id].pkts_cmpl_flag_addr[i] =
> NULL;
> +		}
> +	}
> +
> +	if (dma_copy_track[dma_id].vchans[vchan_id].pkts_cmpl_flag_addr !=
> NULL) {
> +
> 	rte_free(dma_copy_track[dma_id].vchans[vchan_id].pkts_cmpl_flag_addr
> );
> +		dma_copy_track[dma_id].vchans[vchan_id].pkts_cmpl_flag_addr =
> NULL;
> +	}
> +
> +	if (dma_copy_track[dma_id].vchans != NULL) {
> +		rte_free(dma_copy_track[dma_id].vchans);
> +		dma_copy_track[dma_id].vchans = NULL;
> +	}
> +
> +	dma_copy_track[dma_id].nr_vchans--;
> +
> +	rte_spinlock_unlock(&vhost_dma_lock);
> +	return 0;
> +
> +error:
> +	rte_spinlock_unlock(&vhost_dma_lock);
> +	return -1;
> +}
> +
>  RTE_LOG_REGISTER_SUFFIX(vhost_config_log_level, config, INFO);
>  RTE_LOG_REGISTER_SUFFIX(vhost_data_log_level, data, WARNING);
> --
> 2.17.1



More information about the dev mailing list