[RFC PATCH v3 2/8] vhost: annotate virtqueue access lock
David Marchand
david.marchand at redhat.com
Mon Apr 11 13:00:07 CEST 2022
This change simply annotates existing paths of the code leading to
manipulations of the vq->access_lock.
One small change is required: vhost_poll_enqueue_completed was getting
a queue_id to get hold of the vq, while its callers already knew of
the vq. For the annotation sake, vq is now directly passed.
vhost_user_lock_all_queue_pairs and vhost_user_unlock_all_queue_pairs
are skipped since vq->access_lock are conditionally held.
Signed-off-by: David Marchand <david.marchand at redhat.com>
---
lib/vhost/vhost.h | 2 ++
lib/vhost/vhost_user.c | 2 ++
lib/vhost/virtio_net.c | 16 ++++++++++++----
3 files changed, 16 insertions(+), 4 deletions(-)
diff --git a/lib/vhost/vhost.h b/lib/vhost/vhost.h
index a9edc271aa..4a0aa35306 100644
--- a/lib/vhost/vhost.h
+++ b/lib/vhost/vhost.h
@@ -834,6 +834,7 @@ vhost_need_event(uint16_t event_idx, uint16_t new_idx, uint16_t old)
static __rte_always_inline void
vhost_vring_call_split(struct virtio_net *dev, struct vhost_virtqueue *vq)
+ __rte_exclusive_locks_required(vq->access_lock)
{
/* Flush used->idx update before we read avail->flags. */
rte_atomic_thread_fence(__ATOMIC_SEQ_CST);
@@ -872,6 +873,7 @@ vhost_vring_call_split(struct virtio_net *dev, struct vhost_virtqueue *vq)
static __rte_always_inline void
vhost_vring_call_packed(struct virtio_net *dev, struct vhost_virtqueue *vq)
+ __rte_exclusive_locks_required(vq->access_lock)
{
uint16_t old, new, off, off_wrap;
bool signalled_used_valid, kick = false;
diff --git a/lib/vhost/vhost_user.c b/lib/vhost/vhost_user.c
index 1d390677fa..dfa24fec09 100644
--- a/lib/vhost/vhost_user.c
+++ b/lib/vhost/vhost_user.c
@@ -2909,6 +2909,7 @@ vhost_user_check_and_alloc_queue_pair(struct virtio_net *dev,
static void
vhost_user_lock_all_queue_pairs(struct virtio_net *dev)
+ __rte_no_thread_safety_analysis
{
unsigned int i = 0;
unsigned int vq_num = 0;
@@ -2926,6 +2927,7 @@ vhost_user_lock_all_queue_pairs(struct virtio_net *dev)
static void
vhost_user_unlock_all_queue_pairs(struct virtio_net *dev)
+ __rte_no_thread_safety_analysis
{
unsigned int i = 0;
unsigned int vq_num = 0;
diff --git a/lib/vhost/virtio_net.c b/lib/vhost/virtio_net.c
index 5f432b0d77..a8b91d4b20 100644
--- a/lib/vhost/virtio_net.c
+++ b/lib/vhost/virtio_net.c
@@ -1246,6 +1246,7 @@ vhost_enqueue_single_packed(struct virtio_net *dev,
static __rte_noinline uint32_t
virtio_dev_rx_split(struct virtio_net *dev, struct vhost_virtqueue *vq,
struct rte_mbuf **pkts, uint32_t count)
+ __rte_exclusive_locks_required(vq->access_lock)
{
uint32_t pkt_idx = 0;
uint16_t num_buffers;
@@ -1441,6 +1442,7 @@ virtio_dev_rx_packed(struct virtio_net *dev,
struct vhost_virtqueue *__rte_restrict vq,
struct rte_mbuf **__rte_restrict pkts,
uint32_t count)
+ __rte_exclusive_locks_required(vq->access_lock)
{
uint32_t pkt_idx = 0;
@@ -1955,11 +1957,11 @@ write_back_completed_descs_packed(struct vhost_virtqueue *vq,
}
static __rte_always_inline uint16_t
-vhost_poll_enqueue_completed(struct virtio_net *dev, uint16_t queue_id,
+vhost_poll_enqueue_completed(struct virtio_net *dev, struct vhost_virtqueue *vq,
struct rte_mbuf **pkts, uint16_t count, int16_t dma_id,
uint16_t vchan_id)
+ __rte_exclusive_locks_required(vq->access_lock)
{
- struct vhost_virtqueue *vq = dev->virtqueue[queue_id];
struct vhost_async *async = vq->async;
struct async_inflight_info *pkts_info = async->pkts_info;
uint16_t nr_cpl_pkts = 0;
@@ -2062,7 +2064,7 @@ rte_vhost_poll_enqueue_completed(int vid, uint16_t queue_id,
goto out;
}
- n_pkts_cpl = vhost_poll_enqueue_completed(dev, queue_id, pkts, count, dma_id, vchan_id);
+ n_pkts_cpl = vhost_poll_enqueue_completed(dev, vq, pkts, count, dma_id, vchan_id);
out:
rte_spinlock_unlock(&vq->access_lock);
@@ -2104,7 +2106,7 @@ rte_vhost_clear_queue_thread_unsafe(int vid, uint16_t queue_id,
return 0;
}
- n_pkts_cpl = vhost_poll_enqueue_completed(dev, queue_id, pkts, count, dma_id, vchan_id);
+ n_pkts_cpl = vhost_poll_enqueue_completed(dev, vq, pkts, count, dma_id, vchan_id);
return n_pkts_cpl;
}
@@ -2679,6 +2681,7 @@ static uint16_t
virtio_dev_tx_split(struct virtio_net *dev, struct vhost_virtqueue *vq,
struct rte_mempool *mbuf_pool, struct rte_mbuf **pkts, uint16_t count,
bool legacy_ol_flags)
+ __rte_exclusive_locks_required(vq->access_lock)
{
uint16_t i;
uint16_t free_entries;
@@ -2774,6 +2777,7 @@ static uint16_t
virtio_dev_tx_split_legacy(struct virtio_net *dev,
struct vhost_virtqueue *vq, struct rte_mempool *mbuf_pool,
struct rte_mbuf **pkts, uint16_t count)
+ __rte_exclusive_locks_required(vq->access_lock)
{
return virtio_dev_tx_split(dev, vq, mbuf_pool, pkts, count, true);
}
@@ -2783,6 +2787,7 @@ static uint16_t
virtio_dev_tx_split_compliant(struct virtio_net *dev,
struct vhost_virtqueue *vq, struct rte_mempool *mbuf_pool,
struct rte_mbuf **pkts, uint16_t count)
+ __rte_exclusive_locks_required(vq->access_lock)
{
return virtio_dev_tx_split(dev, vq, mbuf_pool, pkts, count, false);
}
@@ -2982,6 +2987,7 @@ virtio_dev_tx_packed(struct virtio_net *dev,
struct rte_mbuf **__rte_restrict pkts,
uint32_t count,
bool legacy_ol_flags)
+ __rte_exclusive_locks_required(vq->access_lock)
{
uint32_t pkt_idx = 0;
@@ -3025,6 +3031,7 @@ static uint16_t
virtio_dev_tx_packed_legacy(struct virtio_net *dev,
struct vhost_virtqueue *__rte_restrict vq, struct rte_mempool *mbuf_pool,
struct rte_mbuf **__rte_restrict pkts, uint32_t count)
+ __rte_exclusive_locks_required(vq->access_lock)
{
return virtio_dev_tx_packed(dev, vq, mbuf_pool, pkts, count, true);
}
@@ -3034,6 +3041,7 @@ static uint16_t
virtio_dev_tx_packed_compliant(struct virtio_net *dev,
struct vhost_virtqueue *__rte_restrict vq, struct rte_mempool *mbuf_pool,
struct rte_mbuf **__rte_restrict pkts, uint32_t count)
+ __rte_exclusive_locks_required(vq->access_lock)
{
return virtio_dev_tx_packed(dev, vq, mbuf_pool, pkts, count, false);
}
--
2.23.0
More information about the dev
mailing list