[dts] [PATCH] Support numa test in vm_power_manager suite

Yong Liu yong.liu at intel.com
Wed Aug 12 06:04:27 CEST 2015


From: Marvin Liu <yong.liu at intel.com>

Add set_vm_cpu API in qemu_libvirtd module. Skip load global configuration
when user has been configured first. Start another VM with socket 1 cpus and
check core mask correct on host.

Signed-off-by: Marvin Liu <yong.liu at intel.com>

diff --git a/conf/vm_power_manager.cfg b/conf/vm_power_manager.cfg
index 5443730..c77819b 100644
--- a/conf/vm_power_manager.cfg
+++ b/conf/vm_power_manager.cfg
@@ -32,8 +32,6 @@ device =
     pf_idx=0,guestpci=00:08.0;
     pf_idx=1,guestpci=00:09.0;
 [vm1]
-cpu =
-    number=4,cpupin=9 10 11 12;
 mem =
     size=4096;
 disk =
diff --git a/framework/qemu_libvirt.py b/framework/qemu_libvirt.py
index 339b380..de69d8f 100644
--- a/framework/qemu_libvirt.py
+++ b/framework/qemu_libvirt.py
@@ -145,6 +145,16 @@ class LibvirtKvm(VirtBase):
             memoryBacking = ET.SubElement(self.domain, 'memoryBacking')
             ET.SubElement(memoryBacking, 'hugepages')
 
+    def set_vm_cpu(self, **options):
+        """
+        Set VM cpu.
+        """
+        index = self.find_option_index('cpu')
+        if index:
+            self.params[index] = {'cpu': [options]}
+        else:
+            self.params.append({'cpu': [options]})
+
     def add_vm_cpu(self, **options):
         """
         'number' : '4' #number of vcpus
diff --git a/framework/virt_base.py b/framework/virt_base.py
index d0dfa1c..a2b56d7 100644
--- a/framework/virt_base.py
+++ b/framework/virt_base.py
@@ -113,7 +113,8 @@ class VirtBase(object):
         global_conf = conf.get_virt_config()
         for param in global_conf:
             for key in param.keys():
-                self.__save_local_config(key, param[key])
+                if self.find_option_index(key) is None:
+                    self.__save_local_config(key, param[key])
 
     def load_local_config(self, suite_name):
         """
diff --git a/tests/TestSuite_vm_power_manager.py b/tests/TestSuite_vm_power_manager.py
index f06d245..f58fb45 100644
--- a/tests/TestSuite_vm_power_manager.py
+++ b/tests/TestSuite_vm_power_manager.py
@@ -249,6 +249,9 @@ class TestVmPowerManager(TestCase, IxiaPacketGenerator):
         Check power management channel connected in multiple VMs
         """
         vm_name = "vm1"
+        cpus = self.dut.get_core_list('1S/4C/1T', socket=1)
+        self.verify(len(cpus) == 4, "Can't allocate cores from numa 1")
+
         vm2 = LibvirtKvm(self.dut, vm_name, self.suite)
         channels = [
             {'path': '/tmp/powermonitor/%s.0' %
@@ -262,12 +265,27 @@ class TestVmPowerManager(TestCase, IxiaPacketGenerator):
         ]
         for channel in channels:
             vm2.add_vm_virtio_serial_channel(**channel)
+
+        # start vm2 with socket 1 cpus
+        cpupin = ''
+        for cpu in cpus:
+            cpupin += '%s ' % cpu
+        vm2_cpus = {'number': '4', 'cpupin': cpupin[:-1]}
+        vm2.set_vm_cpu(**vm2_cpus)
         vm2_dut = vm2.start()
 
         self.dut.send_expect("add_vm %s" % vm_name, "vmpower>")
         self.dut.send_expect("add_channels %s all" % vm_name, "vmpower>")
         vm_info = self.dut.send_expect("show_vm %s" % vm_name, "vmpower>")
 
+        # check host core has correct mapped
+        cpu_idx = 0
+        for cpu in cpus:
+            mask = dts.create_mask([cpu])
+            cpu_map = '[%d]: Physical CPU Mask %s' % (cpu_idx, mask)
+            self.verify(cpu_map in vm_info, "Faile to map host cpu %s" % cpu)
+            cpu_idx += 1
+
         out = vm2_dut.build_dpdk_apps("examples/vm_power_manager/guest_cli")
         self.verify("Error" not in out, "Compilation error")
         self.verify("No such" not in out, "Compilation error")
-- 
1.9.3



More information about the dts mailing list