[dpdk-stable] patch 'net/vhost: fix invalid state' has been queued to LTS release 17.11.2

Yuanhan Liu yliu at fridaylinux.org
Sun Apr 22 17:09:48 CEST 2018


Hi,

FYI, your patch has been queued to LTS release 17.11.2

Note it hasn't been pushed to http://dpdk.org/browse/dpdk-stable yet.
It will be pushed if I get no objections before 04/29/18. So please
shout if anyone has objections.

Thanks.

	--yliu

---
>From a8d0841a426b59b29218bf5498eab7aaab19361a Mon Sep 17 00:00:00 2001
From: Junjie Chen <junjie.j.chen at intel.com>
Date: Wed, 11 Apr 2018 13:02:32 -0400
Subject: [PATCH] net/vhost: fix invalid state

[ upstream commit e6722dee533cda3756fbc5c9ea4ddfbf30276f1b ]

dev_start sets *dev_attached* after setup queues, this sets device to
invalid state since no frontend is attached. Also destroy_device set
*started* to zero which makes *allow_queuing* always zero until dev_start
get called again. Actually, we should not determine queues existence by
*dev_attached* but by queues pointers or other separated variable(s).

Fixes: 30a701a53737 ("net/vhost: fix crash when creating vdev dynamically")

Signed-off-by: Junjie Chen <junjie.j.chen at intel.com>
Tested-by: Jens Freimann <jfreimann at redhat.com>
Reviewed-by: Jianfeng Tan <jianfeng.tan at intel.com>
Reviewed-by: Maxime Coquelin <maxime.coquelin at redhat.com>
---
 drivers/net/vhost/rte_eth_vhost.c | 59 +++++++++++++++++++--------------------
 1 file changed, 29 insertions(+), 30 deletions(-)

diff --git a/drivers/net/vhost/rte_eth_vhost.c b/drivers/net/vhost/rte_eth_vhost.c
index 2299ce877..caf9fce0a 100644
--- a/drivers/net/vhost/rte_eth_vhost.c
+++ b/drivers/net/vhost/rte_eth_vhost.c
@@ -528,10 +528,11 @@ update_queuing_status(struct rte_eth_dev *dev)
 	unsigned int i;
 	int allow_queuing = 1;
 
-	if (rte_atomic32_read(&internal->dev_attached) == 0)
+	if (!dev->data->rx_queues || !dev->data->tx_queues)
 		return;
 
-	if (rte_atomic32_read(&internal->started) == 0)
+	if (rte_atomic32_read(&internal->started) == 0 ||
+	    rte_atomic32_read(&internal->dev_attached) == 0)
 		allow_queuing = 0;
 
 	/* Wait until rx/tx_pkt_burst stops accessing vhost device */
@@ -607,13 +608,10 @@ new_device(int vid)
 #endif
 
 	internal->vid = vid;
-	if (eth_dev->data->rx_queues && eth_dev->data->tx_queues) {
+	if (rte_atomic32_read(&internal->started) == 1)
 		queue_setup(eth_dev, internal);
-		rte_atomic32_set(&internal->dev_attached, 1);
-	} else {
-		RTE_LOG(INFO, PMD, "RX/TX queues have not setup yet\n");
-		rte_atomic32_set(&internal->dev_attached, 0);
-	}
+	else
+		RTE_LOG(INFO, PMD, "RX/TX queues not exist yet\n");
 
 	for (i = 0; i < rte_vhost_get_vring_num(vid); i++)
 		rte_vhost_enable_guest_notification(vid, i, 0);
@@ -622,6 +620,7 @@ new_device(int vid)
 
 	eth_dev->data->dev_link.link_status = ETH_LINK_UP;
 
+	rte_atomic32_set(&internal->dev_attached, 1);
 	update_queuing_status(eth_dev);
 
 	RTE_LOG(INFO, PMD, "New connection established\n");
@@ -652,23 +651,24 @@ destroy_device(int vid)
 	eth_dev = list->eth_dev;
 	internal = eth_dev->data->dev_private;
 
-	rte_atomic32_set(&internal->started, 0);
-	update_queuing_status(eth_dev);
 	rte_atomic32_set(&internal->dev_attached, 0);
+	update_queuing_status(eth_dev);
 
 	eth_dev->data->dev_link.link_status = ETH_LINK_DOWN;
 
-	for (i = 0; i < eth_dev->data->nb_rx_queues; i++) {
-		vq = eth_dev->data->rx_queues[i];
-		if (vq == NULL)
-			continue;
-		vq->vid = -1;
-	}
-	for (i = 0; i < eth_dev->data->nb_tx_queues; i++) {
-		vq = eth_dev->data->tx_queues[i];
-		if (vq == NULL)
-			continue;
-		vq->vid = -1;
+	if (eth_dev->data->rx_queues && eth_dev->data->tx_queues) {
+		for (i = 0; i < eth_dev->data->nb_rx_queues; i++) {
+			vq = eth_dev->data->rx_queues[i];
+			if (!vq)
+				continue;
+			vq->vid = -1;
+		}
+		for (i = 0; i < eth_dev->data->nb_tx_queues; i++) {
+			vq = eth_dev->data->tx_queues[i];
+			if (!vq)
+				continue;
+			vq->vid = -1;
+		}
 	}
 
 	state = vring_states[eth_dev->data->port_id];
@@ -795,11 +795,7 @@ eth_dev_start(struct rte_eth_dev *eth_dev)
 {
 	struct pmd_internal *internal = eth_dev->data->dev_private;
 
-	if (unlikely(rte_atomic32_read(&internal->dev_attached) == 0)) {
-		queue_setup(eth_dev, internal);
-		rte_atomic32_set(&internal->dev_attached, 1);
-	}
-
+	queue_setup(eth_dev, internal);
 	rte_atomic32_set(&internal->started, 1);
 	update_queuing_status(eth_dev);
 
@@ -839,10 +835,13 @@ eth_dev_close(struct rte_eth_dev *dev)
 	pthread_mutex_unlock(&internal_list_lock);
 	rte_free(list);
 
-	for (i = 0; i < dev->data->nb_rx_queues; i++)
-		rte_free(dev->data->rx_queues[i]);
-	for (i = 0; i < dev->data->nb_tx_queues; i++)
-		rte_free(dev->data->tx_queues[i]);
+	if (dev->data->rx_queues)
+		for (i = 0; i < dev->data->nb_rx_queues; i++)
+			rte_free(dev->data->rx_queues[i]);
+
+	if (dev->data->tx_queues)
+		for (i = 0; i < dev->data->nb_tx_queues; i++)
+			rte_free(dev->data->tx_queues[i]);
 
 	rte_free(dev->data->mac_addrs);
 	free(internal->dev_name);
-- 
2.11.0



More information about the stable mailing list