All of lore.kernel.org
 help / color / mirror / Atom feed
* [PATCH] KVM test: Add cpu_hotplug subtest
@ 2011-08-24  4:05 Lucas Meneghel Rodrigues
  2011-08-24  4:25 ` pradeep
  0 siblings, 1 reply; 4+ messages in thread
From: Lucas Meneghel Rodrigues @ 2011-08-24  4:05 UTC (permalink / raw)
  To: autotest; +Cc: kvm, avi, gleb, Lucas Meneghel Rodrigues

Tests the ability of adding virtual cpus on the fly to qemu using
the monitor command cpu_set, then after everything is OK, run the
cpu_hotplug testsuite on the guest through autotest.

Updates: As of the latest qemu-kvm (08-24-2011) HEAD, trying to
online more CPUs than the ones already available leads to qemu
hanging:

File /home/lmr/Code/autotest-git/client/virt/kvm_monitor.py, line 279, in cmd
raise MonitorProtocolError(msg)
MonitorProtocolError: Could not find (qemu) prompt after command cpu_set 2 online. Output so far: ""

Signed-off-by: Lucas Meneghel Rodrigues <lmr@redhat.com>
---
 client/tests/kvm/tests/cpu_hotplug.py  |   99 ++++++++++++++++++++++++++++++++
 client/tests/kvm/tests_base.cfg.sample |    7 ++
 2 files changed, 106 insertions(+), 0 deletions(-)
 create mode 100644 client/tests/kvm/tests/cpu_hotplug.py

diff --git a/client/tests/kvm/tests/cpu_hotplug.py b/client/tests/kvm/tests/cpu_hotplug.py
new file mode 100644
index 0000000..fa75c9b
--- /dev/null
+++ b/client/tests/kvm/tests/cpu_hotplug.py
@@ -0,0 +1,99 @@
+import os, logging, re
+from autotest_lib.client.common_lib import error
+from autotest_lib.client.virt import virt_test_utils
+
+
+@error.context_aware
+def run_cpu_hotplug(test, params, env):
+    """
+    Runs CPU hotplug test:
+
+    1) Pick up a living guest
+    2) Send the monitor command cpu_set [cpu id] for each cpu we wish to have
+    3) Verify if guest has the additional CPUs showing up under
+        /sys/devices/system/cpu
+    4) Try to bring them online by writing 1 to the 'online' file inside that dir
+    5) Run the CPU Hotplug test suite shipped with autotest inside guest
+
+    @param test: KVM test object.
+    @param params: Dictionary with test parameters.
+    @param env: Dictionary with the test environment.
+    """
+    vm = env.get_vm(params["main_vm"])
+    vm.verify_alive()
+    timeout = int(params.get("login_timeout", 360))
+    session = vm.wait_for_login(timeout=timeout)
+
+    n_cpus_add = int(params.get("n_cpus_add", 1))
+    current_cpus = int(params.get("smp", 1))
+    total_cpus = current_cpus + n_cpus_add
+
+    error.context("getting guest dmesg before addition")
+    dmesg_before = session.cmd("dmesg -c")
+
+    error.context("Adding %d CPUs to guest" % n_cpus_add)
+    for i in range(total_cpus):
+        vm.monitor.cmd("cpu_set %s online" % i)
+
+    output = vm.monitor.cmd("info cpus")
+    logging.debug("Output of info cpus:\n%s", output)
+
+    cpu_regexp = re.compile("CPU #(\d+)")
+    total_cpus_monitor = len(cpu_regexp.findall(output))
+    if total_cpus_monitor != total_cpus:
+        raise error.TestFail("Monitor reports %s CPUs, when VM should have %s" %
+                             (total_cpus_monitor, total_cpus))
+
+    dmesg_after = session.cmd("dmesg -c")
+    logging.debug("Guest dmesg output after CPU add:\n%s" % dmesg_after)
+
+    # Verify whether the new cpus are showing up on /sys
+    error.context("verifying if new CPUs are showing on guest's /sys dir")
+    n_cmd = 'find /sys/devices/system/cpu/cpu[0-99] -maxdepth 0 -type d | wc -l'
+    output = session.cmd(n_cmd)
+    logging.debug("List of cpus on /sys:\n%s" % output)
+    try:
+        cpus_after_addition = int(output)
+    except ValueError:
+        logging.error("Output of '%s': %s", n_cmd, output)
+        raise error.TestFail("Unable to get CPU count after CPU addition")
+
+    if cpus_after_addition != total_cpus:
+        raise error.TestFail("%s CPUs are showing up under "
+                             "/sys/devices/system/cpu, was expecting %s" %
+                             (cpus_after_addition, total_cpus))
+
+    error.context("locating online files for guest's new CPUs")
+    r_cmd = 'find /sys/devices/system/cpu/cpu[0-99]/online -maxdepth 0 -type f'
+    online_files = session.cmd(r_cmd)
+    logging.debug("CPU online files detected: %s", online_files)
+    online_files = online_files.split().sort()
+
+    if not online_files:
+        raise error.TestFail("Could not find CPUs that can be "
+                             "enabled/disabled on guest")
+
+    for online_file in online_files:
+        cpu_regexp = re.compile("cpu(\d+)", re.IGNORECASE)
+        cpu_id = cpu_regexp.findall(online_file)[0]
+        error.context("changing online status for CPU %s" % cpu_id)
+        check_online_status = session.cmd("cat %s" % online_file)
+        try:
+            check_online_status = int(check_online_status)
+        except ValueError:
+            raise error.TestFail("Unable to get online status from CPU %s" %
+                                 cpu_id)
+        assert(check_online_status in [0, 1])
+        if check_online_status == 0:
+            error.context("Bringing CPU %s online" % cpu_id)
+            session.cmd("echo 1 > %s" % online_file)
+
+    # Now that all CPUs were onlined, let's execute the
+    # autotest CPU Hotplug test
+    control_path = os.path.join(test.bindir, "autotest_control",
+                                "cpu_hotplug.control")
+
+    timeout = int(params.get("cpu_hotplug_timeout"), 300)
+    error.context("running cpu_hotplug autotest after cpu addition")
+    virt_test_utils.run_autotest(vm, session, control_path, timeout,
+                                 test.outputdir, params)
diff --git a/client/tests/kvm/tests_base.cfg.sample b/client/tests/kvm/tests_base.cfg.sample
index ca8a9c0..b22118b 100644
--- a/client/tests/kvm/tests_base.cfg.sample
+++ b/client/tests/kvm/tests_base.cfg.sample
@@ -1104,6 +1104,13 @@ variants:
                 dd_timeout = 900
                 check_cmd_timeout = 900
 
+    - cpu_hotplug:
+        type = cpu_hotplug
+        cpu_hotplug_timeout = 600
+        n_cpus_add = 1
+        kill_vm = yes
+        iterations = 5
+
     # system_powerdown, system_reset and shutdown *must* be the last ones
     # defined (in this order), since the effect of such tests can leave
     # the VM on a bad state.
-- 
1.7.6


^ permalink raw reply related	[flat|nested] 4+ messages in thread

* Re: [PATCH] KVM test: Add cpu_hotplug subtest
  2011-08-24  4:05 [PATCH] KVM test: Add cpu_hotplug subtest Lucas Meneghel Rodrigues
@ 2011-08-24  4:25 ` pradeep
  2011-08-24  4:35   ` [Autotest] " Lucas Meneghel Rodrigues
  2011-08-25 22:44   ` Lucas Meneghel Rodrigues
  0 siblings, 2 replies; 4+ messages in thread
From: pradeep @ 2011-08-24  4:25 UTC (permalink / raw)
  To: Lucas Meneghel Rodrigues; +Cc: autotest, kvm, avi, gleb

On Wed, 24 Aug 2011 01:05:13 -0300
Lucas Meneghel Rodrigues <lmr@redhat.com> wrote:

> Tests the ability of adding virtual cpus on the fly to qemu using
> the monitor command cpu_set, then after everything is OK, run the
> cpu_hotplug testsuite on the guest through autotest.
> 
> Updates: As of the latest qemu-kvm (08-24-2011) HEAD, trying to
> online more CPUs than the ones already available leads to qemu
> hanging:
> 
> File /home/lmr/Code/autotest-git/client/virt/kvm_monitor.py, line
> 279, in cmd raise MonitorProtocolError(msg)
> MonitorProtocolError: Could not find (qemu) prompt after command
> cpu_set 2 online. Output so far: ""
> 
> Signed-off-by: Lucas Meneghel Rodrigues <lmr@redhat.com>
> ---
>  client/tests/kvm/tests/cpu_hotplug.py  |   99
> ++++++++++++++++++++++++++++++++
> client/tests/kvm/tests_base.cfg.sample |    7 ++ 2 files changed, 106
> insertions(+), 0 deletions(-) create mode 100644
> client/tests/kvm/tests/cpu_hotplug.py
> 
> diff --git a/client/tests/kvm/tests/cpu_hotplug.py
> b/client/tests/kvm/tests/cpu_hotplug.py new file mode 100644
> index 0000000..fa75c9b
> --- /dev/null
> +++ b/client/tests/kvm/tests/cpu_hotplug.py
> @@ -0,0 +1,99 @@
> +import os, logging, re
> +from autotest_lib.client.common_lib import error
> +from autotest_lib.client.virt import virt_test_utils
> +
> +
> +@error.context_aware
> +def run_cpu_hotplug(test, params, env):
> +    """
> +    Runs CPU hotplug test:
> +
> +    1) Pick up a living guest
> +    2) Send the monitor command cpu_set [cpu id] for each cpu we
> wish to have
> +    3) Verify if guest has the additional CPUs showing up under
> +        /sys/devices/system/cpu
> +    4) Try to bring them online by writing 1 to the 'online' file
> inside that dir
> +    5) Run the CPU Hotplug test suite shipped with autotest inside

It looks good to me.  How about adding 
	1) off-lining of vcpu. 
	2) Frequent offline-online of vcpus.  some thing like below. 

#!/bin/sh

SYS_CPU_DIR=/sys/devices/system/cpu

VICTIM_IRQ=15
IRQ_MASK=f0

iteration=0
while true; do
  echo $iteration
  echo $IRQ_MASK > /proc/irq/$VICTIM_IRQ/smp_affinity
  for cpudir in $SYS_CPU_DIR/cpu[1-9]; do
    echo 0 > $cpudir/online
  done
  for cpudir in $SYS_CPU_DIR/cpu[1-9]; do
    echo 1 > $cpudir/online
  done
  iteration=`expr $iteration + 1`
done


> guest +
> +    @param test: KVM test object.
> +    @param params: Dictionary with test parameters.
> +    @param env: Dictionary with the test environment.
> +    """
> +    vm = env.get_vm(params["main_vm"])
> +    vm.verify_alive()
> +    timeout = int(params.get("login_timeout", 360))
> +    session = vm.wait_for_login(timeout=timeout)
> +
> +    n_cpus_add = int(params.get("n_cpus_add", 1))
> +    current_cpus = int(params.get("smp", 1))
> +    total_cpus = current_cpus + n_cpus_add
> +
> +    error.context("getting guest dmesg before addition")
> +    dmesg_before = session.cmd("dmesg -c")
> +
> +    error.context("Adding %d CPUs to guest" % n_cpus_add)
> +    for i in range(total_cpus):
> +        vm.monitor.cmd("cpu_set %s online" % i)
> +
> +    output = vm.monitor.cmd("info cpus")
> +    logging.debug("Output of info cpus:\n%s", output)
> +
> +    cpu_regexp = re.compile("CPU #(\d+)")
> +    total_cpus_monitor = len(cpu_regexp.findall(output))
> +    if total_cpus_monitor != total_cpus:
> +        raise error.TestFail("Monitor reports %s CPUs, when VM
> should have %s" %
> +                             (total_cpus_monitor, total_cpus))
> +
> +    dmesg_after = session.cmd("dmesg -c")
> +    logging.debug("Guest dmesg output after CPU add:\n%s" %
> dmesg_after) +
> +    # Verify whether the new cpus are showing up on /sys
> +    error.context("verifying if new CPUs are showing on guest's /sys
> dir")
> +    n_cmd = 'find /sys/devices/system/cpu/cpu[0-99] -maxdepth 0
> -type d | wc -l'
> +    output = session.cmd(n_cmd)
> +    logging.debug("List of cpus on /sys:\n%s" % output)
> +    try:
> +        cpus_after_addition = int(output)
> +    except ValueError:
> +        logging.error("Output of '%s': %s", n_cmd, output)
> +        raise error.TestFail("Unable to get CPU count after CPU
> addition") +
> +    if cpus_after_addition != total_cpus:
> +        raise error.TestFail("%s CPUs are showing up under "
> +                             "/sys/devices/system/cpu, was expecting
> %s" %
> +                             (cpus_after_addition, total_cpus))
> +
> +    error.context("locating online files for guest's new CPUs")
> +    r_cmd = 'find /sys/devices/system/cpu/cpu[0-99]/online -maxdepth
> 0 -type f'
> +    online_files = session.cmd(r_cmd)
> +    logging.debug("CPU online files detected: %s", online_files)
> +    online_files = online_files.split().sort()
> +
> +    if not online_files:
> +        raise error.TestFail("Could not find CPUs that can be "
> +                             "enabled/disabled on guest")
> +
> +    for online_file in online_files:
> +        cpu_regexp = re.compile("cpu(\d+)", re.IGNORECASE)
> +        cpu_id = cpu_regexp.findall(online_file)[0]
> +        error.context("changing online status for CPU %s" % cpu_id)
> +        check_online_status = session.cmd("cat %s" % online_file)
> +        try:
> +            check_online_status = int(check_online_status)
> +        except ValueError:
> +            raise error.TestFail("Unable to get online status from
> CPU %s" %
> +                                 cpu_id)
> +        assert(check_online_status in [0, 1])
> +        if check_online_status == 0:
> +            error.context("Bringing CPU %s online" % cpu_id)
> +            session.cmd("echo 1 > %s" % online_file)
> +
> +    # Now that all CPUs were onlined, let's execute the
> +    # autotest CPU Hotplug test
> +    control_path = os.path.join(test.bindir, "autotest_control",
> +                                "cpu_hotplug.control")
> +
> +    timeout = int(params.get("cpu_hotplug_timeout"), 300)
> +    error.context("running cpu_hotplug autotest after cpu addition")
> +    virt_test_utils.run_autotest(vm, session, control_path, timeout,
> +                                 test.outputdir, params)
> diff --git a/client/tests/kvm/tests_base.cfg.sample
> b/client/tests/kvm/tests_base.cfg.sample index ca8a9c0..b22118b 100644
> --- a/client/tests/kvm/tests_base.cfg.sample
> +++ b/client/tests/kvm/tests_base.cfg.sample
> @@ -1104,6 +1104,13 @@ variants:
>                  dd_timeout = 900
>                  check_cmd_timeout = 900
> 
> +    - cpu_hotplug:
> +        type = cpu_hotplug
> +        cpu_hotplug_timeout = 600
> +        n_cpus_add = 1
> +        kill_vm = yes
> +        iterations = 5
> +
>      # system_powerdown, system_reset and shutdown *must* be the last
> ones # defined (in this order), since the effect of such tests can
> leave # the VM on a bad state.



^ permalink raw reply	[flat|nested] 4+ messages in thread

* Re: [Autotest] [PATCH] KVM test: Add cpu_hotplug subtest
  2011-08-24  4:25 ` pradeep
@ 2011-08-24  4:35   ` Lucas Meneghel Rodrigues
  2011-08-25 22:44   ` Lucas Meneghel Rodrigues
  1 sibling, 0 replies; 4+ messages in thread
From: Lucas Meneghel Rodrigues @ 2011-08-24  4:35 UTC (permalink / raw)
  To: pradeep; +Cc: autotest, gleb, avi, kvm

On Wed, Aug 24, 2011 at 1:25 AM, pradeep <psuriset@linux.vnet.ibm.com> wrote:
> On Wed, 24 Aug 2011 01:05:13 -0300
> Lucas Meneghel Rodrigues <lmr@redhat.com> wrote:
>
>> Tests the ability of adding virtual cpus on the fly to qemu using
>> the monitor command cpu_set, then after everything is OK, run the
>> cpu_hotplug testsuite on the guest through autotest.
>>
>> Updates: As of the latest qemu-kvm (08-24-2011) HEAD, trying to
>> online more CPUs than the ones already available leads to qemu
>> hanging:
>>
>> File /home/lmr/Code/autotest-git/client/virt/kvm_monitor.py, line
>> 279, in cmd raise MonitorProtocolError(msg)
>> MonitorProtocolError: Could not find (qemu) prompt after command
>> cpu_set 2 online. Output so far: ""
>>
>> Signed-off-by: Lucas Meneghel Rodrigues <lmr@redhat.com>
>> ---
>>  client/tests/kvm/tests/cpu_hotplug.py  |   99
>> ++++++++++++++++++++++++++++++++
>> client/tests/kvm/tests_base.cfg.sample |    7 ++ 2 files changed, 106
>> insertions(+), 0 deletions(-) create mode 100644
>> client/tests/kvm/tests/cpu_hotplug.py
>>
>> diff --git a/client/tests/kvm/tests/cpu_hotplug.py
>> b/client/tests/kvm/tests/cpu_hotplug.py new file mode 100644
>> index 0000000..fa75c9b
>> --- /dev/null
>> +++ b/client/tests/kvm/tests/cpu_hotplug.py
>> @@ -0,0 +1,99 @@
>> +import os, logging, re
>> +from autotest_lib.client.common_lib import error
>> +from autotest_lib.client.virt import virt_test_utils
>> +
>> +
>> +@error.context_aware
>> +def run_cpu_hotplug(test, params, env):
>> +    """
>> +    Runs CPU hotplug test:
>> +
>> +    1) Pick up a living guest
>> +    2) Send the monitor command cpu_set [cpu id] for each cpu we
>> wish to have
>> +    3) Verify if guest has the additional CPUs showing up under
>> +        /sys/devices/system/cpu
>> +    4) Try to bring them online by writing 1 to the 'online' file
>> inside that dir
>> +    5) Run the CPU Hotplug test suite shipped with autotest inside
>
> It looks good to me.  How about adding
>        1) off-lining of vcpu.
>        2) Frequent offline-online of vcpus.  some thing like below.
>
> #!/bin/sh
>
> SYS_CPU_DIR=/sys/devices/system/cpu
>
> VICTIM_IRQ=15
> IRQ_MASK=f0
>
> iteration=0
> while true; do
>  echo $iteration
>  echo $IRQ_MASK > /proc/irq/$VICTIM_IRQ/smp_affinity
>  for cpudir in $SYS_CPU_DIR/cpu[1-9]; do
>    echo 0 > $cpudir/online
>  done
>  for cpudir in $SYS_CPU_DIR/cpu[1-9]; do
>    echo 1 > $cpudir/online
>  done
>  iteration=`expr $iteration + 1`
> done

Ok, I'll look into implementing your suggestion as well, thanks!

^ permalink raw reply	[flat|nested] 4+ messages in thread

* Re: [PATCH] KVM test: Add cpu_hotplug subtest
  2011-08-24  4:25 ` pradeep
  2011-08-24  4:35   ` [Autotest] " Lucas Meneghel Rodrigues
@ 2011-08-25 22:44   ` Lucas Meneghel Rodrigues
  1 sibling, 0 replies; 4+ messages in thread
From: Lucas Meneghel Rodrigues @ 2011-08-25 22:44 UTC (permalink / raw)
  To: pradeep; +Cc: autotest, avi, gleb, kvm

On Wed, Aug 24, 2011 at 1:25 AM, pradeep <psuriset@linux.vnet.ibm.com> wrote:
> On Wed, 24 Aug 2011 01:05:13 -0300
> Lucas Meneghel Rodrigues <lmr@redhat.com> wrote:
>
>> Tests the ability of adding virtual cpus on the fly to qemu using
>> the monitor command cpu_set, then after everything is OK, run the
>> cpu_hotplug testsuite on the guest through autotest.
>>
>> Updates: As of the latest qemu-kvm (08-24-2011) HEAD, trying to
>> online more CPUs than the ones already available leads to qemu
>> hanging:
>>
>> File /home/lmr/Code/autotest-git/client/virt/kvm_monitor.py, line
>> 279, in cmd raise MonitorProtocolError(msg)
>> MonitorProtocolError: Could not find (qemu) prompt after command
>> cpu_set 2 online. Output so far: ""
>>
>> Signed-off-by: Lucas Meneghel Rodrigues <lmr@redhat.com>
>> ---
>>  client/tests/kvm/tests/cpu_hotplug.py  |   99
>> ++++++++++++++++++++++++++++++++
>> client/tests/kvm/tests_base.cfg.sample |    7 ++ 2 files changed, 106
>> insertions(+), 0 deletions(-) create mode 100644
>> client/tests/kvm/tests/cpu_hotplug.py
>>
>> diff --git a/client/tests/kvm/tests/cpu_hotplug.py
>> b/client/tests/kvm/tests/cpu_hotplug.py new file mode 100644
>> index 0000000..fa75c9b
>> --- /dev/null
>> +++ b/client/tests/kvm/tests/cpu_hotplug.py
>> @@ -0,0 +1,99 @@
>> +import os, logging, re
>> +from autotest_lib.client.common_lib import error
>> +from autotest_lib.client.virt import virt_test_utils
>> +
>> +
>> +@error.context_aware
>> +def run_cpu_hotplug(test, params, env):
>> +    """
>> +    Runs CPU hotplug test:
>> +
>> +    1) Pick up a living guest
>> +    2) Send the monitor command cpu_set [cpu id] for each cpu we
>> wish to have
>> +    3) Verify if guest has the additional CPUs showing up under
>> +        /sys/devices/system/cpu
>> +    4) Try to bring them online by writing 1 to the 'online' file
>> inside that dir
>> +    5) Run the CPU Hotplug test suite shipped with autotest inside
>
> It looks good to me.  How about adding
>        1) off-lining of vcpu.
>        2) Frequent offline-online of vcpus.  some thing like below.
>
> #!/bin/sh
>
> SYS_CPU_DIR=/sys/devices/system/cpu
>
> VICTIM_IRQ=15
> IRQ_MASK=f0
>
> iteration=0
> while true; do
>  echo $iteration
>  echo $IRQ_MASK > /proc/irq/$VICTIM_IRQ/smp_affinity
>  for cpudir in $SYS_CPU_DIR/cpu[1-9]; do
>    echo 0 > $cpudir/online
>  done
>  for cpudir in $SYS_CPU_DIR/cpu[1-9]; do
>    echo 1 > $cpudir/online
>  done
>  iteration=`expr $iteration + 1`
> done
>

Implemented your suggestion, see:

http://patchwork.test.kernel.org/patch/3613/

^ permalink raw reply	[flat|nested] 4+ messages in thread

end of thread, other threads:[~2011-08-25 22:44 UTC | newest]

Thread overview: 4+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2011-08-24  4:05 [PATCH] KVM test: Add cpu_hotplug subtest Lucas Meneghel Rodrigues
2011-08-24  4:25 ` pradeep
2011-08-24  4:35   ` [Autotest] " Lucas Meneghel Rodrigues
2011-08-25 22:44   ` Lucas Meneghel Rodrigues

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.