patch 'vhost: fix virtqueue access check in datapath' has been queued to stable release 23.11.1

Xueming Li xuemingl at nvidia.com
Tue Mar 5 10:46:36 CET 2024


Hi,

FYI, your patch has been queued to stable release 23.11.1

Note it hasn't been pushed to http://dpdk.org/browse/dpdk-stable yet.
It will be pushed if I get no objections before 03/31/24. So please
shout if anyone has objections.

Also note that after the patch there's a diff of the upstream commit vs the
patch applied to the branch. This will indicate if there was any rebasing
needed to apply to the stable branch. If there were code changes for rebasing
(ie: not only metadata diffs), please double check that the rebase was
correctly done.

Queued patches are on a temporary branch at:
https://git.dpdk.org/dpdk-stable/log/?h=23.11-staging

This queued commit can be viewed at:
https://git.dpdk.org/dpdk-stable/commit/?h=23.11-staging&id=bbba917213589deeedba18ad5671c4733cf0134e

Thanks.

Xueming Li <xuemingl at nvidia.com>

---
>From bbba917213589deeedba18ad5671c4733cf0134e Mon Sep 17 00:00:00 2001
From: David Marchand <david.marchand at redhat.com>
Date: Tue, 5 Dec 2023 10:45:31 +0100
Subject: [PATCH] vhost: fix virtqueue access check in datapath
Cc: Xueming Li <xuemingl at nvidia.com>

[ upstream commit 9fc93a1e23203e3053f5acc198e9203bc49024b5 ]

Now that a r/w lock is used, the access_ok field should only be updated
under a write lock.

Since the datapath code only takes a read lock on the virtqueue to check
access_ok, this lock must be released and a write lock taken before
calling vring_translate().

Fixes: 03f77d66d966 ("vhost: change virtqueue access lock to a read/write one")

Signed-off-by: David Marchand <david.marchand at redhat.com>
Acked-by: Eelco Chaudron <echaudro at redhat.com>
Reviewed-by: Maxime Coquelin <maxime.coquelin at redhat.com>
---
 lib/vhost/virtio_net.c | 60 +++++++++++++++++++++++++++++++-----------
 1 file changed, 44 insertions(+), 16 deletions(-)

diff --git a/lib/vhost/virtio_net.c b/lib/vhost/virtio_net.c
index 8af20f1487..d00f4b03aa 100644
--- a/lib/vhost/virtio_net.c
+++ b/lib/vhost/virtio_net.c
@@ -1696,6 +1696,17 @@ virtio_dev_rx_packed(struct virtio_net *dev,
 	return pkt_idx;
 }

+static void
+virtio_dev_vring_translate(struct virtio_net *dev, struct vhost_virtqueue *vq)
+{
+	rte_rwlock_write_lock(&vq->access_lock);
+	vhost_user_iotlb_rd_lock(vq);
+	if (!vq->access_ok)
+		vring_translate(dev, vq);
+	vhost_user_iotlb_rd_unlock(vq);
+	rte_rwlock_write_unlock(&vq->access_lock);
+}
+
 static __rte_always_inline uint32_t
 virtio_dev_rx(struct virtio_net *dev, struct vhost_virtqueue *vq,
 	struct rte_mbuf **pkts, uint32_t count)
@@ -1710,9 +1721,13 @@ virtio_dev_rx(struct virtio_net *dev, struct vhost_virtqueue *vq,

 	vhost_user_iotlb_rd_lock(vq);

-	if (unlikely(!vq->access_ok))
-		if (unlikely(vring_translate(dev, vq) < 0))
-			goto out;
+	if (unlikely(!vq->access_ok)) {
+		vhost_user_iotlb_rd_unlock(vq);
+		rte_rwlock_read_unlock(&vq->access_lock);
+
+		virtio_dev_vring_translate(dev, vq);
+		goto out_no_unlock;
+	}

 	count = RTE_MIN((uint32_t)MAX_PKT_BURST, count);
 	if (count == 0)
@@ -1731,6 +1746,7 @@ out:
 out_access_unlock:
 	rte_rwlock_read_unlock(&vq->access_lock);

+out_no_unlock:
 	return nb_tx;
 }

@@ -2528,9 +2544,13 @@ virtio_dev_rx_async_submit(struct virtio_net *dev, struct vhost_virtqueue *vq,

 	vhost_user_iotlb_rd_lock(vq);

-	if (unlikely(!vq->access_ok))
-		if (unlikely(vring_translate(dev, vq) < 0))
-			goto out;
+	if (unlikely(!vq->access_ok)) {
+		vhost_user_iotlb_rd_unlock(vq);
+		rte_rwlock_read_unlock(&vq->access_lock);
+
+		virtio_dev_vring_translate(dev, vq);
+		goto out_no_unlock;
+	}

 	count = RTE_MIN((uint32_t)MAX_PKT_BURST, count);
 	if (count == 0)
@@ -2551,6 +2571,7 @@ out:
 out_access_unlock:
 	rte_rwlock_write_unlock(&vq->access_lock);

+out_no_unlock:
 	return nb_tx;
 }

@@ -3581,11 +3602,13 @@ rte_vhost_dequeue_burst(int vid, uint16_t queue_id,

 	vhost_user_iotlb_rd_lock(vq);

-	if (unlikely(!vq->access_ok))
-		if (unlikely(vring_translate(dev, vq) < 0)) {
-			count = 0;
-			goto out;
-		}
+	if (unlikely(!vq->access_ok)) {
+		vhost_user_iotlb_rd_unlock(vq);
+		rte_rwlock_read_unlock(&vq->access_lock);
+
+		virtio_dev_vring_translate(dev, vq);
+		goto out_no_unlock;
+	}

 	/*
 	 * Construct a RARP broadcast packet, and inject it to the "pkts"
@@ -3646,6 +3669,7 @@ out_access_unlock:
 	if (unlikely(rarp_mbuf != NULL))
 		count += 1;

+out_no_unlock:
 	return count;
 }

@@ -4196,11 +4220,14 @@ rte_vhost_async_try_dequeue_burst(int vid, uint16_t queue_id,

 	vhost_user_iotlb_rd_lock(vq);

-	if (unlikely(vq->access_ok == 0))
-		if (unlikely(vring_translate(dev, vq) < 0)) {
-			count = 0;
-			goto out;
-		}
+	if (unlikely(vq->access_ok == 0)) {
+		vhost_user_iotlb_rd_unlock(vq);
+		rte_rwlock_read_unlock(&vq->access_lock);
+
+		virtio_dev_vring_translate(dev, vq);
+		count = 0;
+		goto out_no_unlock;
+	}

 	/*
 	 * Construct a RARP broadcast packet, and inject it to the "pkts"
@@ -4266,5 +4293,6 @@ out_access_unlock:
 	if (unlikely(rarp_mbuf != NULL))
 		count += 1;

+out_no_unlock:
 	return count;
 }
--
2.34.1

---
  Diff of the applied patch vs upstream commit (please double-check if non-empty:
---
--- -	2024-03-05 17:39:32.640555955 +0800
+++ 0055-vhost-fix-virtqueue-access-check-in-datapath.patch	2024-03-05 17:39:30.763566493 +0800
@@ -1 +1 @@
-From 9fc93a1e23203e3053f5acc198e9203bc49024b5 Mon Sep 17 00:00:00 2001
+From bbba917213589deeedba18ad5671c4733cf0134e Mon Sep 17 00:00:00 2001
@@ -4,0 +5,3 @@
+Cc: Xueming Li <xuemingl at nvidia.com>
+
+[ upstream commit 9fc93a1e23203e3053f5acc198e9203bc49024b5 ]
@@ -14 +16,0 @@
-Cc: stable at dpdk.org
@@ -24 +26 @@
-index 280d4845f8..c738b7edc9 100644
+index 8af20f1487..d00f4b03aa 100644


More information about the stable mailing list