[v1,1/7] examples/power: add checks around hypervisor

Message ID 20180830105422.1198-2-david.hunt@intel.com (mailing list archive)
State Superseded, archived
Delegated to: Thomas Monjalon
Headers
Series add json power policy interface for containers |

Checks

Context Check Description
ci/checkpatch success coding style OK
ci/Intel-compilation success Compilation OK

Commit Message

Hunt, David Aug. 30, 2018, 10:54 a.m. UTC
  Allow vm_power_manager to run without requiring qemy to be present
on the machine. This will be required for instances where the JSON
interface is used for commands and polices, without any VMs present.
A use case for this is a container enviromnent.

Signed-off-by: David Hunt <david.hunt@intel.com>
---
 examples/vm_power_manager/channel_manager.c | 71 +++++++++++++--------
 examples/vm_power_manager/channel_monitor.c |  2 +-
 2 files changed, 44 insertions(+), 29 deletions(-)
  

Comments

Stephen Hemminger Aug. 30, 2018, 4:59 p.m. UTC | #1
On Thu, 30 Aug 2018 11:54:16 +0100
David Hunt <david.hunt@intel.com> wrote:

Minor nits

> +static unsigned int global_hypervisor_available;

Please use bool for boolean values.

>  /*
>   * Represents a single Virtual Machine
> @@ -198,7 +199,11 @@ get_pcpus_mask(struct channel_info *chan_info, unsigned vcpu)
>  {
>  	struct virtual_machine_info *vm_info =
>  			(struct virtual_machine_info *)chan_info->priv_info;
> -	return rte_atomic64_read(&vm_info->pcpu_mask[vcpu]);
> +
> +	if ((global_hypervisor_available) && (vm_info != NULL))


 parenthesis are unnecessary here.

I know this is pre-existing, but please don't use CamelCase:

+		if (virNodeGetInfo(global_vir_conn_ptr, &info)) {
  
Hunt, David Sept. 12, 2018, 10:53 a.m. UTC | #2
Hi Stephen,


On 30/8/2018 5:59 PM, Stephen Hemminger wrote:
> On Thu, 30 Aug 2018 11:54:16 +0100
> David Hunt <david.hunt@intel.com> wrote:
>
> Minor nits
>
>> +static unsigned int global_hypervisor_available;
> Please use bool for boolean values.

Will change in next revision.


>>   /*
>>    * Represents a single Virtual Machine
>> @@ -198,7 +199,11 @@ get_pcpus_mask(struct channel_info *chan_info, unsigned vcpu)
>>   {
>>   	struct virtual_machine_info *vm_info =
>>   			(struct virtual_machine_info *)chan_info->priv_info;
>> -	return rte_atomic64_read(&vm_info->pcpu_mask[vcpu]);
>> +
>> +	if ((global_hypervisor_available) && (vm_info != NULL))
>
>   parenthesis are unnecessary here.

Fixed in next version.

>
> I know this is pre-existing, but please don't use CamelCase:
>
> +		if (virNodeGetInfo(global_vir_conn_ptr, &info)) {

Unfortunately I've no control over this, it's part of the libvirt API.

Thanks,
Dave.
  

Patch

diff --git a/examples/vm_power_manager/channel_manager.c b/examples/vm_power_manager/channel_manager.c
index 927fc35ab..2bb8641d3 100644
--- a/examples/vm_power_manager/channel_manager.c
+++ b/examples/vm_power_manager/channel_manager.c
@@ -43,7 +43,8 @@  static unsigned char *global_cpumaps;
 static virVcpuInfo *global_vircpuinfo;
 static size_t global_maplen;
 
-static unsigned global_n_host_cpus;
+static unsigned int global_n_host_cpus;
+static unsigned int global_hypervisor_available;
 
 /*
  * Represents a single Virtual Machine
@@ -198,7 +199,11 @@  get_pcpus_mask(struct channel_info *chan_info, unsigned vcpu)
 {
 	struct virtual_machine_info *vm_info =
 			(struct virtual_machine_info *)chan_info->priv_info;
-	return rte_atomic64_read(&vm_info->pcpu_mask[vcpu]);
+
+	if ((global_hypervisor_available) && (vm_info != NULL))
+		return rte_atomic64_read(&vm_info->pcpu_mask[vcpu]);
+	else
+		return 0;
 }
 
 static inline int
@@ -559,6 +564,8 @@  get_all_vm(int *num_vm, int *num_vcpu)
 				VIR_CONNECT_LIST_DOMAINS_PERSISTENT;
 	unsigned int domain_flag = VIR_DOMAIN_VCPU_CONFIG;
 
+	if (!global_hypervisor_available)
+		return;
 
 	memset(global_cpumaps, 0, CHANNEL_CMDS_MAX_CPUS*global_maplen);
 	if (virNodeGetInfo(global_vir_conn_ptr, &node_info)) {
@@ -768,38 +775,42 @@  connect_hypervisor(const char *path)
 	}
 	return 0;
 }
-
 int
-channel_manager_init(const char *path)
+channel_manager_init(const char *path __rte_unused)
 {
 	virNodeInfo info;
 
 	LIST_INIT(&vm_list_head);
 	if (connect_hypervisor(path) < 0) {
-		RTE_LOG(ERR, CHANNEL_MANAGER, "Unable to initialize channel manager\n");
-		return -1;
-	}
-
-	global_maplen = VIR_CPU_MAPLEN(CHANNEL_CMDS_MAX_CPUS);
+		global_n_host_cpus = 64;
+		global_hypervisor_available = 0;
+		RTE_LOG(INFO, CHANNEL_MANAGER, "Unable to initialize channel manager\n");
+	} else {
+		global_hypervisor_available = 1;
+
+		global_maplen = VIR_CPU_MAPLEN(CHANNEL_CMDS_MAX_CPUS);
+
+		global_vircpuinfo = rte_zmalloc(NULL,
+				sizeof(*global_vircpuinfo) *
+				CHANNEL_CMDS_MAX_CPUS, RTE_CACHE_LINE_SIZE);
+		if (global_vircpuinfo == NULL) {
+			RTE_LOG(ERR, CHANNEL_MANAGER, "Error allocating memory for CPU Info\n");
+			goto error;
+		}
+		global_cpumaps = rte_zmalloc(NULL,
+				CHANNEL_CMDS_MAX_CPUS * global_maplen,
+				RTE_CACHE_LINE_SIZE);
+		if (global_cpumaps == NULL)
+			goto error;
 
-	global_vircpuinfo = rte_zmalloc(NULL, sizeof(*global_vircpuinfo) *
-			CHANNEL_CMDS_MAX_CPUS, RTE_CACHE_LINE_SIZE);
-	if (global_vircpuinfo == NULL) {
-		RTE_LOG(ERR, CHANNEL_MANAGER, "Error allocating memory for CPU Info\n");
-		goto error;
-	}
-	global_cpumaps = rte_zmalloc(NULL, CHANNEL_CMDS_MAX_CPUS * global_maplen,
-			RTE_CACHE_LINE_SIZE);
-	if (global_cpumaps == NULL) {
-		goto error;
+		if (virNodeGetInfo(global_vir_conn_ptr, &info)) {
+			RTE_LOG(ERR, CHANNEL_MANAGER, "Unable to retrieve node Info\n");
+			goto error;
+		}
+		global_n_host_cpus = (unsigned int)info.cpus;
 	}
 
-	if (virNodeGetInfo(global_vir_conn_ptr, &info)) {
-		RTE_LOG(ERR, CHANNEL_MANAGER, "Unable to retrieve node Info\n");
-		goto error;
-	}
 
-	global_n_host_cpus = (unsigned)info.cpus;
 
 	if (global_n_host_cpus > CHANNEL_CMDS_MAX_CPUS) {
 		RTE_LOG(WARNING, CHANNEL_MANAGER, "The number of host CPUs(%u) exceeds the "
@@ -811,7 +822,8 @@  channel_manager_init(const char *path)
 
 	return 0;
 error:
-	disconnect_hypervisor();
+	if (global_hypervisor_available)
+		disconnect_hypervisor();
 	return -1;
 }
 
@@ -838,7 +850,10 @@  channel_manager_exit(void)
 		rte_free(vm_info);
 	}
 
-	rte_free(global_cpumaps);
-	rte_free(global_vircpuinfo);
-	disconnect_hypervisor();
+	if (global_hypervisor_available) {
+		/* Only needed if hypervisor available */
+		rte_free(global_cpumaps);
+		rte_free(global_vircpuinfo);
+		disconnect_hypervisor();
+	}
 }
diff --git a/examples/vm_power_manager/channel_monitor.c b/examples/vm_power_manager/channel_monitor.c
index 7fa47ba97..f180d74e6 100644
--- a/examples/vm_power_manager/channel_monitor.c
+++ b/examples/vm_power_manager/channel_monitor.c
@@ -66,7 +66,7 @@  static void
 core_share_status(int pNo)
 {
 
-	int noVms, noVcpus, z, x, t;
+	int noVms = 0, noVcpus = 0, z, x, t;
 
 	get_all_vm(&noVms, &noVcpus);