All of lore.kernel.org
 help / color / mirror / Atom feed
* [kvm-autotest] tests.cgroup: Add cpuset_mems_switching test
@ 2012-03-26 12:36 Lukas Doktor
  2012-03-26 12:36 ` [PATCH 1/2] tests.cgroup: Add cpuset.mems test Lukas Doktor
  2012-03-26 12:36 ` [PATCH 2/2] tests.cgroup: fix memory_move test Lukas Doktor
  0 siblings, 2 replies; 3+ messages in thread
From: Lukas Doktor @ 2012-03-26 12:36 UTC (permalink / raw)
  To: autotest, kvm, kvm-autotest, ldoktor, jzupka

Hi,

This patchset adds new cpuset_mems_switching test and fixes some issues of memory_move test.

See the current version on github:
https://github.com/autotest/autotest/pull/246

Regards,
Lukáš Doktor

_______________________________________________
Autotest mailing list
Autotest@test.kernel.org
http://test.kernel.org/cgi-bin/mailman/listinfo/autotest

^ permalink raw reply	[flat|nested] 3+ messages in thread

* [PATCH 1/2] tests.cgroup: Add cpuset.mems test
  2012-03-26 12:36 [kvm-autotest] tests.cgroup: Add cpuset_mems_switching test Lukas Doktor
@ 2012-03-26 12:36 ` Lukas Doktor
  2012-03-26 12:36 ` [PATCH 2/2] tests.cgroup: fix memory_move test Lukas Doktor
  1 sibling, 0 replies; 3+ messages in thread
From: Lukas Doktor @ 2012-03-26 12:36 UTC (permalink / raw)
  To: autotest, kvm, kvm-autotest, ldoktor, jzupka

This test tests the memory pinning via cpuset.mems cgroup feature. It
changes cgroups with different node setting.

Signed-off-by: Lukas Doktor <ldoktor@redhat.com>
---
 client/tests/kvm/tests/cgroup.py |   99 ++++++++++++++++++++++++++++++++++++++
 client/virt/subtests.cfg.sample  |    3 +
 2 files changed, 102 insertions(+), 0 deletions(-)

diff --git a/client/tests/kvm/tests/cgroup.py b/client/tests/kvm/tests/cgroup.py
index 5972ccb..8b11865 100644
--- a/client/tests/kvm/tests/cgroup.py
+++ b/client/tests/kvm/tests/cgroup.py
@@ -1293,6 +1293,105 @@ def run_cgroup(test, params, env):
             return ("VM survived %d cgroup switches" % i)
 
     @error.context_aware
+    def cpuset_mems_switching():
+        """
+        Tests the cpuset.mems pinning. It changes cgroups with different
+        mem nodes while stressing memory.
+        @param cfg: cgroup_test_time - test duration '60'
+        @param cfg: cgroup_cpuset_mems_mb - override the size of memory blocks
+                    'by default 1/2 of VM memory'
+        """
+        error.context("Init")
+        test_time = int(params.get('cgroup_test_time', 10))
+        vm = env.get_all_vms()[0]
+
+        error.context("Prepare")
+        modules = CgroupModules()
+        if (modules.init(['cpuset']) != 1):
+            raise error.TestFail("Can't mount cpuset cgroup modules")
+        cgroup = Cgroup('cpuset', '')
+        cgroup.initialize(modules)
+
+        mems = cgroup.get_property("cpuset.mems")[0]
+        mems = mems.split('-')
+        no_mems = len(mems)
+        if no_mems < 2:
+            raise error.TestError("This test needs at least 2 memory nodes, "
+                                  "detected %s" % mems)
+        # Create cgroups
+        all_cpus = cgroup.get_property("cpuset.cpus")[0]
+        mems = range(int(mems[0]), int(mems[1]) + 1)
+        for i in range(no_mems):
+            cgroup.mk_cgroup()
+            cgroup.set_property('cpuset.mems', mems[i], -1)
+            cgroup.set_property('cpuset.cpus', all_cpus, -1)
+            cgroup.set_property('cpuset.memory_migrate', 1)
+
+        timeout = int(params.get("login_timeout", 360))
+        sessions = []
+        sessions.append(vm.wait_for_login(timeout=timeout))
+        sessions.append(vm.wait_for_login(timeout=30))
+
+        # Don't allow to specify more than 1/2 of the VM's memory
+        size = int(params.get('mem', 1024)) / 2
+        if params.get('cgroup_cpuset_mems_mb') is not None:
+            size = min(size, int(params.get('cgroup_cpuset_mems_mb')))
+
+        error.context("Test")
+        err = ""
+        try:
+            logging.info("Some harmless IOError messages of non-existing "
+                         "processes might occur.")
+            sessions[0].sendline('dd if=/dev/zero of=/dev/null bs=%dM '
+                                 'iflag=fullblock' % size)
+
+            i = 0
+            sessions[1].cmd('killall -SIGUSR1 dd')
+            t_stop = time.time() + test_time
+            while time.time() < t_stop:
+                i += 1
+                assign_vm_into_cgroup(vm, cgroup, i % no_mems)
+            sessions[1].cmd('killall -SIGUSR1 dd; true')
+            try:
+                out = sessions[0].read_until_output_matches(
+                                                ['(\d+)\+\d records out'])[1]
+                if len(re.findall(r'(\d+)\+\d records out', out)) < 2:
+                    out += sessions[0].read_until_output_matches(
+                                                ['(\d+)\+\d records out'])[1]
+            except ExpectTimeoutError:
+                err = ("dd didn't produce expected output: %s" % out)
+
+            if not err:
+                sessions[1].cmd('killall dd; true')
+                dd_res = re.findall(r'(\d+)\+(\d+) records in', out)
+                dd_res += re.findall(r'(\d+)\+(\d+) records out', out)
+                dd_res = [int(_[0]) + int(_[1]) for _ in dd_res]
+                if dd_res[1] <= dd_res[0] or dd_res[3] <= dd_res[2]:
+                    err = ("dd stoped sending bytes: %s..%s, %s..%s" %
+                           (dd_res[0], dd_res[1], dd_res[2], dd_res[3]))
+            if err:
+                logging.error(err)
+            else:
+                out = ("Guest moved %stimes in %s seconds while moving %d "
+                       "blocks of %dMB each" % (i, test_time, dd_res[3], size))
+                logging.info(out)
+        finally:
+            error.context("Cleanup")
+            del(cgroup)
+            del(modules)
+
+            for session in sessions:
+                # try whether all sessions are clean
+                session.cmd("true")
+                session.close()
+
+        error.context("Results")
+        if err:
+            raise error.TestFail(err)
+        else:
+            return ("VM survived %d cgroup switches" % i)
+
+    @error.context_aware
     def devices_access():
         """
         Tests devices.list capability. It tries hot-adding disk with different
diff --git a/client/virt/subtests.cfg.sample b/client/virt/subtests.cfg.sample
index a5eac01..d96711d 100644
--- a/client/virt/subtests.cfg.sample
+++ b/client/virt/subtests.cfg.sample
@@ -1349,6 +1349,9 @@ variants:
             - cpuset_cpus_switching:
                 cgroup_test = "cpuset_cpus_switching"
                 # cgroup_test_time
+            - cpuset_mems_switching:
+                cgroup_test = "cpuset_mems"
+                # cgroup_test_time, cgroup_cpuset_mems_mb
             - devices_access:
                 cgroup_test = "devices_access"
             - freezer:
-- 
1.7.7.6

^ permalink raw reply related	[flat|nested] 3+ messages in thread

* [PATCH 2/2] tests.cgroup: fix memory_move test
  2012-03-26 12:36 [kvm-autotest] tests.cgroup: Add cpuset_mems_switching test Lukas Doktor
  2012-03-26 12:36 ` [PATCH 1/2] tests.cgroup: Add cpuset.mems test Lukas Doktor
@ 2012-03-26 12:36 ` Lukas Doktor
  1 sibling, 0 replies; 3+ messages in thread
From: Lukas Doktor @ 2012-03-26 12:36 UTC (permalink / raw)
  To: autotest, kvm, kvm-autotest, ldoktor, jzupka

Minor fixes in memory_move test.

Signed-off-by: Lukas Doktor <ldoktor@redhat.com>
---
 client/tests/kvm/tests/cgroup.py |    4 +---
 client/virt/subtests.cfg.sample  |    1 +
 2 files changed, 2 insertions(+), 3 deletions(-)

diff --git a/client/tests/kvm/tests/cgroup.py b/client/tests/kvm/tests/cgroup.py
index 8b11865..46ac91c 100644
--- a/client/tests/kvm/tests/cgroup.py
+++ b/client/tests/kvm/tests/cgroup.py
@@ -1951,15 +1951,13 @@ def run_cgroup(test, params, env):
                          "processes might occur.")
             sessions[0].sendline('dd if=/dev/zero of=/dev/null bs=%dM '
                                  'iflag=fullblock' % size)
-            time.sleep(2)
 
             i = 0
-            sessions[1].cmd('killall -SIGUSR1 dd')
+            sessions[1].cmd('killall -SIGUSR1 dd ; true')
             t_stop = time.time() + test_time
             while time.time() < t_stop:
                 i += 1
                 assign_vm_into_cgroup(vm, cgroup, i % 2)
-            time.sleep(2)
             sessions[1].cmd('killall -SIGUSR1 dd; true')
             try:
                 out = sessions[0].read_until_output_matches(
diff --git a/client/virt/subtests.cfg.sample b/client/virt/subtests.cfg.sample
index d96711d..cc133d9 100644
--- a/client/virt/subtests.cfg.sample
+++ b/client/virt/subtests.cfg.sample
@@ -1369,6 +1369,7 @@ variants:
                 # cgroup_memory_limit_kb = 2097152
             - memory_move:
                 cgroup_test = "memory_move"
+                restart_vm = "yes"
                 # cgroup_test_time, cgroup_memory_move_mb
                 # cgroup_memory_move_mb = 2048
 
-- 
1.7.7.6

^ permalink raw reply related	[flat|nested] 3+ messages in thread

end of thread, other threads:[~2012-03-26 12:36 UTC | newest]

Thread overview: 3+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2012-03-26 12:36 [kvm-autotest] tests.cgroup: Add cpuset_mems_switching test Lukas Doktor
2012-03-26 12:36 ` [PATCH 1/2] tests.cgroup: Add cpuset.mems test Lukas Doktor
2012-03-26 12:36 ` [PATCH 2/2] tests.cgroup: fix memory_move test Lukas Doktor

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.