[dpdk-stable] patch 'net/virtio-user: fix packed ring server mode' has been queued to stable release 19.11.1

luca.boccassi at gmail.com luca.boccassi at gmail.com
Tue Feb 11 12:20:40 CET 2020


Hi,

FYI, your patch has been queued to stable release 19.11.1

Note it hasn't been pushed to http://dpdk.org/browse/dpdk-stable yet.
It will be pushed if I get no objections before 02/13/20. So please
shout if anyone has objections.

Also note that after the patch there's a diff of the upstream commit vs the
patch applied to the branch. This will indicate if there was any rebasing
needed to apply to the stable branch. If there were code changes for rebasing
(ie: not only metadata diffs), please double check that the rebase was
correctly done.

Thanks.

Luca Boccassi

---
>From 1d8dce55d16abced072aac16a293cf5d3b49fdfb Mon Sep 17 00:00:00 2001
From: Xuan Ding <xuan.ding at intel.com>
Date: Wed, 15 Jan 2020 06:13:58 +0000
Subject: [PATCH] net/virtio-user: fix packed ring server mode

[ upstream commit 6ebbf4109f3529a1af880ce195869cee84647178 ]

This patch fixes the situation where data path does not work properly
when vhost reconnects to virtio in server mode with packed ring.

Currently, virtio and vhost share memory of vring. For split ring, vhost
can read the status of descriptors directly from the available ring and
the used ring during reconnection. Therefore, the data path can
continue.

But for packed ring, when reconnecting to virtio, vhost cannot get the
status of descriptors via the descriptor ring. By resetting Tx
and Rx queues, the data path can restart from the beginning.

Fixes: 4c3f5822eb21 ("net/virtio: add packed virtqueue defines")

Signed-off-by: Xuan Ding <xuan.ding at intel.com>
Reviewed-by: Maxime Coquelin <maxime.coquelin at redhat.com>
---
 drivers/net/virtio/virtio_ethdev.c      |  4 +-
 drivers/net/virtio/virtio_user_ethdev.c | 43 +++++++++++++++
 drivers/net/virtio/virtqueue.c          | 71 +++++++++++++++++++++++++
 drivers/net/virtio/virtqueue.h          |  4 ++
 4 files changed, 120 insertions(+), 2 deletions(-)

diff --git a/drivers/net/virtio/virtio_ethdev.c b/drivers/net/virtio/virtio_ethdev.c
index 044eb10a70..f9d0ea70db 100644
--- a/drivers/net/virtio/virtio_ethdev.c
+++ b/drivers/net/virtio/virtio_ethdev.c
@@ -1913,6 +1913,8 @@ eth_virtio_dev_init(struct rte_eth_dev *eth_dev)
 			goto err_vtpci_init;
 	}
 
+	rte_spinlock_init(&hw->state_lock);
+
 	/* reset device and negotiate default features */
 	ret = virtio_init_device(eth_dev, VIRTIO_PMD_DEFAULT_GUEST_FEATURES);
 	if (ret < 0)
@@ -2155,8 +2157,6 @@ virtio_dev_configure(struct rte_eth_dev *dev)
 			return -EBUSY;
 		}
 
-	rte_spinlock_init(&hw->state_lock);
-
 	hw->use_simple_rx = 1;
 
 	if (vtpci_with_feature(hw, VIRTIO_F_IN_ORDER)) {
diff --git a/drivers/net/virtio/virtio_user_ethdev.c b/drivers/net/virtio/virtio_user_ethdev.c
index 3fc1725736..f3b35d1bdd 100644
--- a/drivers/net/virtio/virtio_user_ethdev.c
+++ b/drivers/net/virtio/virtio_user_ethdev.c
@@ -13,6 +13,7 @@
 #include <rte_ethdev_vdev.h>
 #include <rte_bus_vdev.h>
 #include <rte_alarm.h>
+#include <rte_cycles.h>
 
 #include "virtio_ethdev.h"
 #include "virtio_logs.h"
@@ -25,12 +26,48 @@
 #define virtio_user_get_dev(hw) \
 	((struct virtio_user_dev *)(hw)->virtio_user_dev)
 
+static void
+virtio_user_reset_queues_packed(struct rte_eth_dev *dev)
+{
+	struct virtio_hw *hw = dev->data->dev_private;
+	struct virtnet_rx *rxvq;
+	struct virtnet_tx *txvq;
+	uint16_t i;
+
+	/* Add lock to avoid queue contention. */
+	rte_spinlock_lock(&hw->state_lock);
+	hw->started = 0;
+
+	/*
+	 * Waitting for datapath to complete before resetting queues.
+	 * 1 ms should be enough for the ongoing Tx/Rx function to finish.
+	 */
+	rte_delay_ms(1);
+
+	/* Vring reset for each Tx queue and Rx queue. */
+	for (i = 0; i < dev->data->nb_rx_queues; i++) {
+		rxvq = dev->data->rx_queues[i];
+		virtqueue_rxvq_reset_packed(rxvq->vq);
+		virtio_dev_rx_queue_setup_finish(dev, i);
+	}
+
+	for (i = 0; i < dev->data->nb_tx_queues; i++) {
+		txvq = dev->data->tx_queues[i];
+		virtqueue_txvq_reset_packed(txvq->vq);
+	}
+
+	hw->started = 1;
+	rte_spinlock_unlock(&hw->state_lock);
+}
+
+
 static int
 virtio_user_server_reconnect(struct virtio_user_dev *dev)
 {
 	int ret;
 	int connectfd;
 	struct rte_eth_dev *eth_dev = &rte_eth_devices[dev->port_id];
+	struct virtio_hw *hw = eth_dev->data->dev_private;
 
 	connectfd = accept(dev->listenfd, NULL, NULL);
 	if (connectfd < 0)
@@ -51,6 +88,12 @@ virtio_user_server_reconnect(struct virtio_user_dev *dev)
 
 	dev->features &= dev->device_features;
 
+	/* For packed ring, resetting queues is required in reconnection. */
+	if (vtpci_packed_queue(hw))
+		PMD_INIT_LOG(NOTICE, "Packets on the fly will be dropped"
+				" when packed ring reconnecting.");
+		virtio_user_reset_queues_packed(eth_dev);
+
 	ret = virtio_user_start_device(dev);
 	if (ret < 0)
 		return -1;
diff --git a/drivers/net/virtio/virtqueue.c b/drivers/net/virtio/virtqueue.c
index 5ff1e3587e..0b4e3bf3e2 100644
--- a/drivers/net/virtio/virtqueue.c
+++ b/drivers/net/virtio/virtqueue.c
@@ -141,3 +141,74 @@ virtqueue_rxvq_flush(struct virtqueue *vq)
 	else
 		virtqueue_rxvq_flush_split(vq);
 }
+
+int
+virtqueue_rxvq_reset_packed(struct virtqueue *vq)
+{
+	int size = vq->vq_nentries;
+	struct vq_desc_extra *dxp;
+	struct virtnet_rx *rxvq;
+	uint16_t desc_idx;
+
+	vq->vq_used_cons_idx = 0;
+	vq->vq_desc_head_idx = 0;
+	vq->vq_avail_idx = 0;
+	vq->vq_desc_tail_idx = (uint16_t)(vq->vq_nentries - 1);
+	vq->vq_free_cnt = vq->vq_nentries;
+
+	vq->vq_packed.used_wrap_counter = 1;
+	vq->vq_packed.cached_flags = VRING_PACKED_DESC_F_AVAIL;
+	vq->vq_packed.event_flags_shadow = 0;
+	vq->vq_packed.cached_flags |= VRING_DESC_F_WRITE;
+
+	rxvq = &vq->rxq;
+	memset(rxvq->mz->addr, 0, rxvq->mz->len);
+
+	for (desc_idx = 0; desc_idx < vq->vq_nentries; desc_idx++) {
+		dxp = &vq->vq_descx[desc_idx];
+		if (dxp->cookie != NULL) {
+			rte_pktmbuf_free(dxp->cookie);
+			dxp->cookie = NULL;
+		}
+	}
+
+	vring_desc_init_packed(vq, size);
+
+	return 0;
+}
+
+int
+virtqueue_txvq_reset_packed(struct virtqueue *vq)
+{
+	int size = vq->vq_nentries;
+	struct vq_desc_extra *dxp;
+	struct virtnet_tx *txvq;
+	uint16_t desc_idx;
+
+	vq->vq_used_cons_idx = 0;
+	vq->vq_desc_head_idx = 0;
+	vq->vq_avail_idx = 0;
+	vq->vq_desc_tail_idx = (uint16_t)(vq->vq_nentries - 1);
+	vq->vq_free_cnt = vq->vq_nentries;
+
+	vq->vq_packed.used_wrap_counter = 1;
+	vq->vq_packed.cached_flags = VRING_PACKED_DESC_F_AVAIL;
+	vq->vq_packed.event_flags_shadow = 0;
+
+	txvq = &vq->txq;
+	memset(txvq->mz->addr, 0, txvq->mz->len);
+	memset(txvq->virtio_net_hdr_mz->addr, 0,
+		txvq->virtio_net_hdr_mz->len);
+
+	for (desc_idx = 0; desc_idx < vq->vq_nentries; desc_idx++) {
+		dxp = &vq->vq_descx[desc_idx];
+		if (dxp->cookie != NULL) {
+			rte_pktmbuf_free(dxp->cookie);
+			dxp->cookie = NULL;
+		}
+	}
+
+	vring_desc_init_packed(vq, size);
+
+	return 0;
+}
diff --git a/drivers/net/virtio/virtqueue.h b/drivers/net/virtio/virtqueue.h
index 8d7f197b13..58ad7309ae 100644
--- a/drivers/net/virtio/virtqueue.h
+++ b/drivers/net/virtio/virtqueue.h
@@ -443,6 +443,10 @@ struct rte_mbuf *virtqueue_detach_unused(struct virtqueue *vq);
 /* Flush the elements in the used ring. */
 void virtqueue_rxvq_flush(struct virtqueue *vq);
 
+int virtqueue_rxvq_reset_packed(struct virtqueue *vq);
+
+int virtqueue_txvq_reset_packed(struct virtqueue *vq);
+
 static inline int
 virtqueue_full(const struct virtqueue *vq)
 {
-- 
2.20.1

---
  Diff of the applied patch vs upstream commit (please double-check if non-empty:
---
--- -	2020-02-11 11:17:42.152638556 +0000
+++ 0094-net-virtio-user-fix-packed-ring-server-mode.patch	2020-02-11 11:17:38.536003890 +0000
@@ -1,8 +1,10 @@
-From 6ebbf4109f3529a1af880ce195869cee84647178 Mon Sep 17 00:00:00 2001
+From 1d8dce55d16abced072aac16a293cf5d3b49fdfb Mon Sep 17 00:00:00 2001
 From: Xuan Ding <xuan.ding at intel.com>
 Date: Wed, 15 Jan 2020 06:13:58 +0000
 Subject: [PATCH] net/virtio-user: fix packed ring server mode
 
+[ upstream commit 6ebbf4109f3529a1af880ce195869cee84647178 ]
+
 This patch fixes the situation where data path does not work properly
 when vhost reconnects to virtio in server mode with packed ring.
 
@@ -16,7 +18,6 @@
 and Rx queues, the data path can restart from the beginning.
 
 Fixes: 4c3f5822eb21 ("net/virtio: add packed virtqueue defines")
-Cc: stable at dpdk.org
 
 Signed-off-by: Xuan Ding <xuan.ding at intel.com>
 Reviewed-by: Maxime Coquelin <maxime.coquelin at redhat.com>


More information about the stable mailing list