All of lore.kernel.org
 help / color / mirror / Atom feed
From: Lukas Doktor <ldoktor@redhat.com>
To: autotest@test.kernel.org, kvm@vger.kernel.org,
	kvm-autotest@redhat.com, ldoktor@redhat.com, jzupka@redhat.com
Subject: [PATCH 1/2] tests.cgroup: Add cpuset.mems test
Date: Mon, 26 Mar 2012 14:36:30 +0200	[thread overview]
Message-ID: <1332765391-19294-2-git-send-email-ldoktor@redhat.com> (raw)
In-Reply-To: <1332765391-19294-1-git-send-email-ldoktor@redhat.com>

This test tests the memory pinning via cpuset.mems cgroup feature. It
changes cgroups with different node setting.

Signed-off-by: Lukas Doktor <ldoktor@redhat.com>
---
 client/tests/kvm/tests/cgroup.py |   99 ++++++++++++++++++++++++++++++++++++++
 client/virt/subtests.cfg.sample  |    3 +
 2 files changed, 102 insertions(+), 0 deletions(-)

diff --git a/client/tests/kvm/tests/cgroup.py b/client/tests/kvm/tests/cgroup.py
index 5972ccb..8b11865 100644
--- a/client/tests/kvm/tests/cgroup.py
+++ b/client/tests/kvm/tests/cgroup.py
@@ -1293,6 +1293,105 @@ def run_cgroup(test, params, env):
             return ("VM survived %d cgroup switches" % i)
 
     @error.context_aware
+    def cpuset_mems_switching():
+        """
+        Tests the cpuset.mems pinning. It changes cgroups with different
+        mem nodes while stressing memory.
+        @param cfg: cgroup_test_time - test duration '60'
+        @param cfg: cgroup_cpuset_mems_mb - override the size of memory blocks
+                    'by default 1/2 of VM memory'
+        """
+        error.context("Init")
+        test_time = int(params.get('cgroup_test_time', 10))
+        vm = env.get_all_vms()[0]
+
+        error.context("Prepare")
+        modules = CgroupModules()
+        if (modules.init(['cpuset']) != 1):
+            raise error.TestFail("Can't mount cpuset cgroup modules")
+        cgroup = Cgroup('cpuset', '')
+        cgroup.initialize(modules)
+
+        mems = cgroup.get_property("cpuset.mems")[0]
+        mems = mems.split('-')
+        no_mems = len(mems)
+        if no_mems < 2:
+            raise error.TestError("This test needs at least 2 memory nodes, "
+                                  "detected %s" % mems)
+        # Create cgroups
+        all_cpus = cgroup.get_property("cpuset.cpus")[0]
+        mems = range(int(mems[0]), int(mems[1]) + 1)
+        for i in range(no_mems):
+            cgroup.mk_cgroup()
+            cgroup.set_property('cpuset.mems', mems[i], -1)
+            cgroup.set_property('cpuset.cpus', all_cpus, -1)
+            cgroup.set_property('cpuset.memory_migrate', 1)
+
+        timeout = int(params.get("login_timeout", 360))
+        sessions = []
+        sessions.append(vm.wait_for_login(timeout=timeout))
+        sessions.append(vm.wait_for_login(timeout=30))
+
+        # Don't allow to specify more than 1/2 of the VM's memory
+        size = int(params.get('mem', 1024)) / 2
+        if params.get('cgroup_cpuset_mems_mb') is not None:
+            size = min(size, int(params.get('cgroup_cpuset_mems_mb')))
+
+        error.context("Test")
+        err = ""
+        try:
+            logging.info("Some harmless IOError messages of non-existing "
+                         "processes might occur.")
+            sessions[0].sendline('dd if=/dev/zero of=/dev/null bs=%dM '
+                                 'iflag=fullblock' % size)
+
+            i = 0
+            sessions[1].cmd('killall -SIGUSR1 dd')
+            t_stop = time.time() + test_time
+            while time.time() < t_stop:
+                i += 1
+                assign_vm_into_cgroup(vm, cgroup, i % no_mems)
+            sessions[1].cmd('killall -SIGUSR1 dd; true')
+            try:
+                out = sessions[0].read_until_output_matches(
+                                                ['(\d+)\+\d records out'])[1]
+                if len(re.findall(r'(\d+)\+\d records out', out)) < 2:
+                    out += sessions[0].read_until_output_matches(
+                                                ['(\d+)\+\d records out'])[1]
+            except ExpectTimeoutError:
+                err = ("dd didn't produce expected output: %s" % out)
+
+            if not err:
+                sessions[1].cmd('killall dd; true')
+                dd_res = re.findall(r'(\d+)\+(\d+) records in', out)
+                dd_res += re.findall(r'(\d+)\+(\d+) records out', out)
+                dd_res = [int(_[0]) + int(_[1]) for _ in dd_res]
+                if dd_res[1] <= dd_res[0] or dd_res[3] <= dd_res[2]:
+                    err = ("dd stoped sending bytes: %s..%s, %s..%s" %
+                           (dd_res[0], dd_res[1], dd_res[2], dd_res[3]))
+            if err:
+                logging.error(err)
+            else:
+                out = ("Guest moved %stimes in %s seconds while moving %d "
+                       "blocks of %dMB each" % (i, test_time, dd_res[3], size))
+                logging.info(out)
+        finally:
+            error.context("Cleanup")
+            del(cgroup)
+            del(modules)
+
+            for session in sessions:
+                # try whether all sessions are clean
+                session.cmd("true")
+                session.close()
+
+        error.context("Results")
+        if err:
+            raise error.TestFail(err)
+        else:
+            return ("VM survived %d cgroup switches" % i)
+
+    @error.context_aware
     def devices_access():
         """
         Tests devices.list capability. It tries hot-adding disk with different
diff --git a/client/virt/subtests.cfg.sample b/client/virt/subtests.cfg.sample
index a5eac01..d96711d 100644
--- a/client/virt/subtests.cfg.sample
+++ b/client/virt/subtests.cfg.sample
@@ -1349,6 +1349,9 @@ variants:
             - cpuset_cpus_switching:
                 cgroup_test = "cpuset_cpus_switching"
                 # cgroup_test_time
+            - cpuset_mems_switching:
+                cgroup_test = "cpuset_mems"
+                # cgroup_test_time, cgroup_cpuset_mems_mb
             - devices_access:
                 cgroup_test = "devices_access"
             - freezer:
-- 
1.7.7.6

  reply	other threads:[~2012-03-26 12:36 UTC|newest]

Thread overview: 3+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2012-03-26 12:36 [kvm-autotest] tests.cgroup: Add cpuset_mems_switching test Lukas Doktor
2012-03-26 12:36 ` Lukas Doktor [this message]
2012-03-26 12:36 ` [PATCH 2/2] tests.cgroup: fix memory_move test Lukas Doktor

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=1332765391-19294-2-git-send-email-ldoktor@redhat.com \
    --to=ldoktor@redhat.com \
    --cc=autotest@test.kernel.org \
    --cc=jzupka@redhat.com \
    --cc=kvm-autotest@redhat.com \
    --cc=kvm@vger.kernel.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.