[dpdk-dev] [PATCH v2 02/24] virtio: Use weaker barriers

Xie, Huawei huawei.xie at intel.com
Tue Jan 27 08:03:47 CET 2015



> -----Original Message-----
> From: dev [mailto:dev-bounces at dpdk.org] On Behalf Of Ouyang Changchun
> Sent: Tuesday, January 27, 2015 10:36 AM
> To: dev at dpdk.org
> Subject: [dpdk-dev] [PATCH v2 02/24] virtio: Use weaker barriers
> 
> The DPDK driver only has to deal with the case of running on PCI
> and with SMP. In this case, the code can use the weaker barriers
> instead of using hard (fence) barriers. This will help performance.
> The rationale is explained in Linux kernel virtio_ring.h.
> 
> To make it clearer that this is a virtio thing and not some generic
> barrier, prefix the barrier calls with virtio_.
> 
> Add missing (and needed) barrier between updating ring data
> structure and notifying host.
> 
> Signed-off-by: Stephen Hemminger <stephen at networkplumber.org>
> Signed-off-by: Changchun Ouyang <changchun.ouyang at intel.com>
> ---
>  lib/librte_pmd_virtio/virtio_ethdev.c |  2 +-
>  lib/librte_pmd_virtio/virtio_rxtx.c   |  8 +++++---
>  lib/librte_pmd_virtio/virtqueue.h     | 19 ++++++++++++++-----
>  3 files changed, 20 insertions(+), 9 deletions(-)
> 
> diff --git a/lib/librte_pmd_virtio/virtio_ethdev.c
> b/lib/librte_pmd_virtio/virtio_ethdev.c
> index 662a49c..dc47e72 100644
> --- a/lib/librte_pmd_virtio/virtio_ethdev.c
> +++ b/lib/librte_pmd_virtio/virtio_ethdev.c
> @@ -175,7 +175,7 @@ virtio_send_command(struct virtqueue *vq, struct
> virtio_pmd_ctrl *ctrl,
>  		uint32_t idx, desc_idx, used_idx;
>  		struct vring_used_elem *uep;
> 
> -		rmb();
> +		virtio_rmb();
> 
>  		used_idx = (uint32_t)(vq->vq_used_cons_idx
>  				& (vq->vq_nentries - 1));
> diff --git a/lib/librte_pmd_virtio/virtio_rxtx.c
> b/lib/librte_pmd_virtio/virtio_rxtx.c
> index c013f97..78af334 100644
> --- a/lib/librte_pmd_virtio/virtio_rxtx.c
> +++ b/lib/librte_pmd_virtio/virtio_rxtx.c
> @@ -456,7 +456,7 @@ virtio_recv_pkts(void *rx_queue, struct rte_mbuf
> **rx_pkts, uint16_t nb_pkts)
> 
>  	nb_used = VIRTQUEUE_NUSED(rxvq);
> 
> -	rmb();
> +	virtio_rmb();
> 
>  	num = (uint16_t)(likely(nb_used <= nb_pkts) ? nb_used : nb_pkts);
>  	num = (uint16_t)(likely(num <= VIRTIO_MBUF_BURST_SZ) ? num :
> VIRTIO_MBUF_BURST_SZ);
> @@ -516,6 +516,7 @@ virtio_recv_pkts(void *rx_queue, struct rte_mbuf
> **rx_pkts, uint16_t nb_pkts)
>  	}
> 
>  	if (likely(nb_enqueued)) {
> +		virtio_wmb();
>  		if (unlikely(virtqueue_kick_prepare(rxvq))) {
>  			virtqueue_notify(rxvq);
>  			PMD_RX_LOG(DEBUG, "Notified\n");
> @@ -547,7 +548,7 @@ virtio_recv_mergeable_pkts(void *rx_queue,
> 
>  	nb_used = VIRTQUEUE_NUSED(rxvq);
> 
> -	rmb();
> +	virtio_rmb();
> 
>  	if (nb_used == 0)
>  		return 0;
> @@ -694,7 +695,7 @@ virtio_xmit_pkts(void *tx_queue, struct rte_mbuf
> **tx_pkts, uint16_t nb_pkts)
>  	PMD_TX_LOG(DEBUG, "%d packets to xmit", nb_pkts);
>  	nb_used = VIRTQUEUE_NUSED(txvq);
> 
> -	rmb();
> +	virtio_rmb();
> 
>  	num = (uint16_t)(likely(nb_used < VIRTIO_MBUF_BURST_SZ) ? nb_used :
> VIRTIO_MBUF_BURST_SZ);
> 
> @@ -735,6 +736,7 @@ virtio_xmit_pkts(void *tx_queue, struct rte_mbuf
> **tx_pkts, uint16_t nb_pkts)
>  		}
>  	}
>  	vq_update_avail_idx(txvq);
> +	virtio_wmb();
> 
>  	txvq->packets += nb_tx;
> 
> diff --git a/lib/librte_pmd_virtio/virtqueue.h b/lib/librte_pmd_virtio/virtqueue.h
> index fdee054..f6ad98d 100644
> --- a/lib/librte_pmd_virtio/virtqueue.h
> +++ b/lib/librte_pmd_virtio/virtqueue.h
> @@ -46,9 +46,18 @@
>  #include "virtio_ring.h"
>  #include "virtio_logs.h"
> 
> -#define mb()  rte_mb()
> -#define wmb() rte_wmb()
> -#define rmb() rte_rmb()
> +/*
> + * Per virtio_config.h in Linux.
> + *     For virtio_pci on SMP, we don't need to order with respect to MMIO
> + *     accesses through relaxed memory I/O windows, so smp_mb() et al are
> + *     sufficient.
> + *
> + * This driver is for virtio_pci on SMP and therefore can assume
> + * weaker (compiler barriers)
> + */
> +#define virtio_mb()	rte_mb()
> +#define virtio_rmb()	rte_compiler_barrier()
> +#define virtio_wmb()	rte_compiler_barrier()
> 
>  #ifdef RTE_PMD_PACKET_PREFETCH
>  #define rte_packet_prefetch(p)  rte_prefetch1(p)
> @@ -225,7 +234,7 @@ virtqueue_full(const struct virtqueue *vq)
>  static inline void
>  vq_update_avail_idx(struct virtqueue *vq)
>  {
> -	rte_compiler_barrier();
> +	virtio_rmb();

I recall our original code is virtio_wmb(). 
Use store fence to ensure all updates to entries before updating the index.
Why do we need virtio_rmb() here and add virtio_wmb after vq_update_avail_idx()?

>  	vq->vq_ring.avail->idx = vq->vq_avail_idx;
>  }
> 
> @@ -255,7 +264,7 @@ static inline void
>  virtqueue_notify(struct virtqueue *vq)
>  {
>  	/*
> -	 * Ensure updated avail->idx is visible to host. mb() necessary?
> +	 * Ensure updated avail->idx is visible to host.
>  	 * For virtio on IA, the notificaiton is through io port operation
>  	 * which is a serialization instruction itself.
>  	 */
> --
> 1.8.4.2



More information about the dev mailing list