patch 'examples/vm_power_manager: use safe list iterator' has been queued to stable release 20.11.7

luca.boccassi at gmail.com luca.boccassi at gmail.com
Thu Nov 3 10:27:21 CET 2022


Hi,

FYI, your patch has been queued to stable release 20.11.7

Note it hasn't been pushed to http://dpdk.org/browse/dpdk-stable yet.
It will be pushed if I get no objections before 11/05/22. So please
shout if anyone has objections.

Also note that after the patch there's a diff of the upstream commit vs the
patch applied to the branch. This will indicate if there was any rebasing
needed to apply to the stable branch. If there were code changes for rebasing
(ie: not only metadata diffs), please double check that the rebase was
correctly done.

Queued patches are on a temporary branch at:
https://github.com/kevintraynor/dpdk-stable

This queued commit can be viewed at:
https://github.com/kevintraynor/dpdk-stable/commit/6c758fb1b4e3c5c7b5e6f9044055d8b9b3402ef2

Thanks.

Luca Boccassi

---
>From 6c758fb1b4e3c5c7b5e6f9044055d8b9b3402ef2 Mon Sep 17 00:00:00 2001
From: Hamza Khan <hamza.khan at intel.com>
Date: Tue, 4 Oct 2022 23:09:04 +0100
Subject: [PATCH] examples/vm_power_manager: use safe list iterator

[ upstream commit 9c20d0fdc536df2a320cb1ae6cce49c2c7a02ebb ]

Currently, when vm_power_manager exits, we are using a LIST_FOREACH
macro to iterate over VM info structures while freeing them. This
leads to use-after-free error. To address this, replace all usages of
LIST_* with TAILQ_* macros, and use the RTE_TAILQ_FOREACH_SAFE macro
to iterate and delete VM info structures.

Fixes: e8ae9b662506 ("examples/vm_power: channel manager and monitor in host")

Signed-off-by: Hamza Khan <hamza.khan at intel.com>
Signed-off-by: Reshma Pattan <reshma.pattan at intel.com>
Acked-by: David Hunt <david.hunt at intel.com>
---
 examples/vm_power_manager/channel_manager.c | 19 ++++++++++---------
 1 file changed, 10 insertions(+), 9 deletions(-)

diff --git a/examples/vm_power_manager/channel_manager.c b/examples/vm_power_manager/channel_manager.c
index 0a28cb643b..5e0bbbb4c9 100644
--- a/examples/vm_power_manager/channel_manager.c
+++ b/examples/vm_power_manager/channel_manager.c
@@ -23,6 +23,7 @@
 #include <rte_log.h>
 #include <rte_atomic.h>
 #include <rte_spinlock.h>
+#include <rte_tailq.h>
 
 #include <libvirt/libvirt.h>
 
@@ -59,16 +60,16 @@ struct virtual_machine_info {
 	virDomainInfo info;
 	rte_spinlock_t config_spinlock;
 	int allow_query;
-	LIST_ENTRY(virtual_machine_info) vms_info;
+	RTE_TAILQ_ENTRY(virtual_machine_info) vms_info;
 };
 
-LIST_HEAD(, virtual_machine_info) vm_list_head;
+RTE_TAILQ_HEAD(, virtual_machine_info) vm_list_head;
 
 static struct virtual_machine_info *
 find_domain_by_name(const char *name)
 {
 	struct virtual_machine_info *info;
-	LIST_FOREACH(info, &vm_list_head, vms_info) {
+	RTE_TAILQ_FOREACH(info, &vm_list_head, vms_info) {
 		if (!strncmp(info->name, name, CHANNEL_MGR_MAX_NAME_LEN-1))
 			return info;
 	}
@@ -877,7 +878,7 @@ add_vm(const char *vm_name)
 
 	new_domain->allow_query = 0;
 	rte_spinlock_init(&(new_domain->config_spinlock));
-	LIST_INSERT_HEAD(&vm_list_head, new_domain, vms_info);
+	TAILQ_INSERT_HEAD(&vm_list_head, new_domain, vms_info);
 	return 0;
 }
 
@@ -899,7 +900,7 @@ remove_vm(const char *vm_name)
 		rte_spinlock_unlock(&vm_info->config_spinlock);
 		return -1;
 	}
-	LIST_REMOVE(vm_info, vms_info);
+	TAILQ_REMOVE(&vm_list_head, vm_info, vms_info);
 	rte_spinlock_unlock(&vm_info->config_spinlock);
 	rte_free(vm_info);
 	return 0;
@@ -952,7 +953,7 @@ channel_manager_init(const char *path __rte_unused)
 {
 	virNodeInfo info;
 
-	LIST_INIT(&vm_list_head);
+	TAILQ_INIT(&vm_list_head);
 	if (connect_hypervisor(path) < 0) {
 		global_n_host_cpus = 64;
 		global_hypervisor_available = 0;
@@ -1004,9 +1005,9 @@ channel_manager_exit(void)
 {
 	unsigned i;
 	char mask[RTE_MAX_LCORE];
-	struct virtual_machine_info *vm_info;
+	struct virtual_machine_info *vm_info, *tmp;
 
-	LIST_FOREACH(vm_info, &vm_list_head, vms_info) {
+	RTE_TAILQ_FOREACH_SAFE(vm_info, &vm_list_head, vms_info, tmp) {
 
 		rte_spinlock_lock(&(vm_info->config_spinlock));
 
@@ -1021,7 +1022,7 @@ channel_manager_exit(void)
 		}
 		rte_spinlock_unlock(&(vm_info->config_spinlock));
 
-		LIST_REMOVE(vm_info, vms_info);
+		TAILQ_REMOVE(&vm_list_head, vm_info, vms_info);
 		rte_free(vm_info);
 	}
 
-- 
2.34.1

---
  Diff of the applied patch vs upstream commit (please double-check if non-empty:
---
--- -	2022-11-03 09:27:29.198316570 +0000
+++ 0063-examples-vm_power_manager-use-safe-list-iterator.patch	2022-11-03 09:27:25.485424608 +0000
@@ -1 +1 @@
-From 9c20d0fdc536df2a320cb1ae6cce49c2c7a02ebb Mon Sep 17 00:00:00 2001
+From 6c758fb1b4e3c5c7b5e6f9044055d8b9b3402ef2 Mon Sep 17 00:00:00 2001
@@ -5,0 +6,2 @@
+[ upstream commit 9c20d0fdc536df2a320cb1ae6cce49c2c7a02ebb ]
+
@@ -13 +14,0 @@
-Cc: stable at dpdk.org
@@ -23 +24 @@
-index 838465ab4b..7d7efdd05a 100644
+index 0a28cb643b..5e0bbbb4c9 100644
@@ -26,2 +27 @@
-@@ -22,6 +22,7 @@
- #include <rte_mempool.h>
+@@ -23,6 +23,7 @@
@@ -28,0 +29 @@
+ #include <rte_atomic.h>
@@ -34 +35 @@
-@@ -58,16 +59,16 @@ struct virtual_machine_info {
+@@ -59,16 +60,16 @@ struct virtual_machine_info {
@@ -54 +55 @@
-@@ -878,7 +879,7 @@ add_vm(const char *vm_name)
+@@ -877,7 +878,7 @@ add_vm(const char *vm_name)
@@ -63 +64 @@
-@@ -900,7 +901,7 @@ remove_vm(const char *vm_name)
+@@ -899,7 +900,7 @@ remove_vm(const char *vm_name)
@@ -72 +73 @@
-@@ -953,7 +954,7 @@ channel_manager_init(const char *path __rte_unused)
+@@ -952,7 +953,7 @@ channel_manager_init(const char *path __rte_unused)
@@ -81 +82 @@
-@@ -1005,9 +1006,9 @@ channel_manager_exit(void)
+@@ -1004,9 +1005,9 @@ channel_manager_exit(void)
@@ -93 +94 @@
-@@ -1022,7 +1023,7 @@ channel_manager_exit(void)
+@@ -1021,7 +1022,7 @@ channel_manager_exit(void)


More information about the stable mailing list