diff mbox

[2/3] KVM test: Add Transparent Hugepages subtests

Message ID 1309192003-5456-3-git-send-email-lookkas@gmail.com (mailing list archive)
State New, archived
Headers show

Commit Message

Lucas Meneghel Rodrigues June 27, 2011, 4:26 p.m. UTC
From: Yiqiao Pu <ypu@redhat.com>

Transparent hugepage test includes:

1) Smoke test and stress test
Smoking test is test the transparent hugepage is used by kvm and guest.
Stress test test use a parallel dd to test the stability of transparent
hugepages

2) Swap test
Bootup a vm and verify that it can be swapped out and swapped in
correctly

3) Defrag test
Allocate hugepage for libhugetlbfs while defrag is on and off. Then
compare the results

Changes from v1:
* Different paths to mount debugfs and tmpfs on
* Use of autotest API to execute commands
* Use more current guest virt API to execute commands

Changes from v2:
* Add thp prepare in smoke and stress test
* Put setup and clean up in try: ... finally: .... branch to make sure
the env in host system will be clean up after test.

Changes from v3:
* Now the base THP test (smoke and stress tests) will be executed
sequentially, if smoke fails carry on and execute stress anyway. Then
after the test is done, report results.

Signed-off-by: Yiqiao Pu <ypu@redhat.com>
Signed-off-by: Lucas Meneghel Rodrigues <lmr@redhat.com>
---
 client/tests/kvm/tests/trans_hugepage.py          |  127 +++++++++++++++++++++
 client/tests/kvm/tests/trans_hugepage_defrag.py   |   86 ++++++++++++++
 client/tests/kvm/tests/trans_hugepage_swapping.py |  115 +++++++++++++++++++
 3 files changed, 328 insertions(+), 0 deletions(-)
 create mode 100644 client/tests/kvm/tests/trans_hugepage.py
 create mode 100644 client/tests/kvm/tests/trans_hugepage_defrag.py
 create mode 100644 client/tests/kvm/tests/trans_hugepage_swapping.py
diff mbox

Patch

diff --git a/client/tests/kvm/tests/trans_hugepage.py b/client/tests/kvm/tests/trans_hugepage.py
new file mode 100644
index 0000000..a533496
--- /dev/null
+++ b/client/tests/kvm/tests/trans_hugepage.py
@@ -0,0 +1,127 @@ 
+import logging, time, commands, os, string, re
+from autotest_lib.client.common_lib import error
+from autotest_lib.client.common_lib import utils
+from autotest_lib.client.virt import virt_test_utils, aexpect, virt_test_setup
+
+
+@error.context_aware
+def run_trans_hugepage(test, params, env):
+    """
+    KVM kernel hugepages user side test:
+    1) Smoke test
+    2) Stress test
+
+    @param test: KVM test object.
+    @param params: Dictionary with test parameters.
+    @param env: Dictionary with the test environment.
+    """
+    def get_mem_status(params, type):
+        if type == "host":
+            info = utils.system_output("cat /proc/meminfo")
+        else:
+            info = session.cmd("cat /proc/meminfo")
+        for h in re.split("\n+", info):
+            if h.startswith("%s" % params):
+                output = re.split('\s+', h)[1]
+        return output
+
+    dd_timeout = float(params.get("dd_timeout", 900))
+    nr_ah = []
+    mem = params['mem']
+    failures = []
+
+    debugfs_flag = 1
+    debugfs_path = os.path.join(test.tmpdir, 'debugfs')
+    mem_path = os.path.join("/tmp", 'thp_space')
+
+    login_timeout = float(params.get("login_timeout", "3600"))
+
+    error.context("smoke test setup")
+    if not os.path.ismount(debugfs_path):
+        if not os.path.isdir(debugfs_path):
+            os.makedirs(debugfs_path)
+        utils.run("mount -t debugfs none %s" % debugfs_path)
+
+    test_config = virt_test_setup.TransparentHugePageConfig(test, params)
+    vm = virt_test_utils.get_living_vm(env, params.get("main_vm"))
+    session = virt_test_utils.wait_for_login(vm, timeout=login_timeout)
+
+    try:
+        # Check khugepage is used by guest
+        test_config.setup()
+
+        logging.info("Smoke test start")
+        error.context("smoke test")
+
+        nr_ah_before = get_mem_status('AnonHugePages', 'host')
+        if nr_ah_before <= 0:
+            e_msg = 'smoke: Host is not using THP'
+            logging.error(e_msg)
+            failures.append(e_msg)
+
+        # Protect system from oom killer
+        if int(get_mem_status('MemFree', 'guest')) / 1024 < mem :
+            mem = int(get_mem_status('MemFree', 'guest')) / 1024
+
+        session.cmd("mkdir -p %s" % mem_path)
+
+        session.cmd("mount -t tmpfs -o size=%sM none %s" % (str(mem), mem_path))
+
+        count = mem / 4
+        session.cmd("dd if=/dev/zero of=%s/1 bs=4000000 count=%s" %
+                    (mem_path, count), timeout=dd_timeout)
+
+        nr_ah_after = get_mem_status('AnonHugePages', 'host')
+
+        if nr_ah_after <= nr_ah_before:
+            e_msg = ('smoke: Host did not use new THP during dd')
+            logging.error(e_msg)
+            failures.append(e_msg)
+
+        if debugfs_flag == 1:
+            if int(open('%s/kvm/largepages' % debugfs_path, 'r').read()) <= 0:
+                e_msg = 'smoke: KVM is not using THP'
+                logging.error(e_msg)
+                failures.append(e_msg)
+
+        logging.info("Smoke test finished")
+
+        # Use parallel dd as stress for memory
+        count = count / 3
+        logging.info("Stress test start")
+        error.context("stress test")
+        cmd = "rm -rf %s/*; for i in `seq %s`; do dd " % (mem_path, count)
+        cmd += "if=/dev/zero of=%s/$i bs=4000000 count=1& done;wait" % mem_path
+        output = session.cmd_output(cmd, timeout=dd_timeout)
+
+        if len(re.findall("No space", output)) > count * 0.05:
+            e_msg = "stress: Too many dd instances failed in guest"
+            logging.error(e_msg)
+            failures.append(e_msg)
+
+        try:
+            output = session.cmd('pidof dd')
+        except Exception:
+            output = None
+
+        if output is not None:
+            for i in re.split('\n+', output):
+                session.cmd('kill -9 %s' % i)
+
+        session.cmd("umount %s" % mem_path)
+
+        logging.info("Stress test finished")
+
+    finally:
+        error.context("all tests cleanup")
+        if os.path.ismount(debugfs_path):
+            utils.run("umount %s" % debugfs_path)
+        if os.path.isdir(debugfs_path):
+            os.removedirs(debugfs_path)
+        session.close()
+        test_config.cleanup()
+
+    error.context("")
+    if failures:
+        raise error.TestFail("THP base test reported %s failures:\n%s" %
+                             (len(failures), "\n".join(failures)))
diff --git a/client/tests/kvm/tests/trans_hugepage_defrag.py b/client/tests/kvm/tests/trans_hugepage_defrag.py
new file mode 100644
index 0000000..bf81362
--- /dev/null
+++ b/client/tests/kvm/tests/trans_hugepage_defrag.py
@@ -0,0 +1,86 @@ 
+import logging, time, commands, os, string, re
+from autotest_lib.client.common_lib import error
+from autotest_lib.client.bin import utils
+from autotest_lib.client.virt import virt_test_utils, virt_test_setup
+
+
+@error.context_aware
+def run_trans_hugepage_defrag(test, params, env):
+    """
+    KVM khugepage userspace side test:
+    1) Verify that the host supports kernel hugepages.
+        If it does proceed with the test.
+    2) Verify that the kernel hugepages can be used in host.
+    3) Verify that the kernel hugepages can be used in guest.
+    4) Migrate guest while using hugepages.
+
+    @param test: KVM test object.
+    @param params: Dictionary with test parameters.
+    @param env: Dictionary with the test environment.
+    """
+    def get_mem_status(params):
+        for line in file('/proc/meminfo', 'r').readlines():
+            if line.startswith("%s" % params):
+                output = re.split('\s+', line)[1]
+        return output
+
+
+    def set_libhugetlbfs(number):
+        f = file("/proc/sys/vm/nr_hugepages", "w+")
+        f.write(number)
+        f.seek(0)
+        ret = f.read()
+        return int(ret)
+
+    test_config = virt_test_setup.TransparentHugePageConfig(test, params)
+    # Test the defrag
+    logging.info("Defrag test start")
+    login_timeout = float(params.get("login_timeout", 360))
+    vm = virt_test_utils.get_living_vm(env, params.get("main_vm"))
+    session = virt_test_utils.wait_for_login(vm, timeout=login_timeout)
+    mem_path = os.path.join("/tmp", "thp_space")
+
+    try:
+        test_config.setup()
+        error.context("Fragmenting guest memory")
+        try:
+            if not os.path.isdir(mem_path):
+                os.makedirs(mem_path)
+            if os.system("mount -t tmpfs none %s" % mem_path):
+                raise error.TestError("Can not mount tmpfs")
+
+            # Try to fragment the memory a bit
+            cmd = ("for i in `seq 262144`; do dd if=/dev/urandom of=%s/$i "
+                   "bs=4K count=1 & done" % mem_path)
+            utils.run(cmd)
+        finally:
+            utils.run("umount %s" % mem_path)
+
+        total = int(get_mem_status('MemTotal'))
+        hugepagesize = int(get_mem_status('Hugepagesize'))
+        nr_full = str(total / hugepagesize)
+
+        error.context("activating khugepaged defrag functionality")
+        # Allocate hugepages for libhugetlbfs before and after enable defrag,
+        # and check out the difference.
+        nr_hp_before = set_libhugetlbfs(nr_full)
+        try:
+            defrag_path = os.path.join(test_config.thp_path, 'khugepaged',
+                                       'defrag')
+            file(str(defrag_path), 'w').write('yes')
+        except IOError, e:
+            raise error.TestFail("Can not start defrag on khugepaged: %s" % e)
+        # TODO: Is sitting an arbitrary amount of time appropriate? Aren't there
+        # better ways to do this?
+        time.sleep(1)
+        nr_hp_after = set_libhugetlbfs(nr_full)
+
+        if nr_hp_before >= nr_hp_after:
+            raise error.TestFail("There was no memory defragmentation on host: "
+                                 "%s huge pages allocated before turning "
+                                 "khugepaged defrag on, %s allocated after it" %
+                                 (nr_hp_before, nr_hp_after))
+        logging.info("Defrag test succeeded")
+        session.close()
+    finally:
+        test_config.cleanup()
diff --git a/client/tests/kvm/tests/trans_hugepage_swapping.py b/client/tests/kvm/tests/trans_hugepage_swapping.py
new file mode 100644
index 0000000..10600b0
--- /dev/null
+++ b/client/tests/kvm/tests/trans_hugepage_swapping.py
@@ -0,0 +1,115 @@ 
+import logging, time, commands, os, string, re
+from autotest_lib.client.common_lib import error
+from autotest_lib.client.bin import utils
+from autotest_lib.client.virt import virt_utils, virt_test_utils
+from autotest_lib.client.virt import virt_test_setup, virt_env_process
+
+
+@error.context_aware
+def run_trans_hugepage_swapping(test, params, env):
+    """
+    KVM khugepage user side test:
+    1) Verify that the hugepages can be swapped in/out.
+
+    @param test: KVM test object.
+    @param params: Dictionary with test parameters.
+    @param env: Dictionary with the test environment.
+    """
+    def get_args(args_list):
+        """
+        Get the memory arguments from system
+        """
+        args_list_tmp = args_list.copy()
+        for line in file('/proc/meminfo', 'r').readlines():
+            for key in args_list_tmp.keys():
+                if line.startswith("%s" % args_list_tmp[key]):
+                    args_list_tmp[key] = int(re.split('\s+', line)[1])
+        return args_list_tmp
+
+    test_config = virt_test_setup.TransparentHugePageConfig(test, params)
+    try:
+        test_config.setup()
+        # Swapping test
+        logging.info("Swapping test start")
+        # Parameters of memory information
+        # @total: Memory size
+        # @free: Free memory size
+        # @swap_size: Swap size
+        # @swap_free: Free swap size
+        # @hugepage_size: Page size of one hugepage
+        # @page_size: The biggest page size that app can ask for
+        args_dict_check = {"free" : "MemFree", "swap_size" : "SwapTotal",
+                           "swap_free" : "SwapFree", "total" : "MemTotal",
+                           "hugepage_size" : "Hugepagesize",}
+        args_dict = get_args(args_dict_check)
+        swap_free = []
+        total = int(args_dict['total']) / 1024
+        free = int(args_dict['free']) / 1024
+        swap_size = int(args_dict['swap_size']) / 1024
+        swap_free.append(int(args_dict['swap_free'])/1024)
+        hugepage_size = int(args_dict['hugepage_size']) / 1024
+        dd_timeout = float(params.get("dd_timeout", 900))
+        login_timeout = float(params.get("login_timeout", 360))
+        check_cmd_timeout = float(params.get("check_cmd_timeout", 900))
+        mem_path = os.path.join(test.tmpdir, 'thp_space')
+        tmpfs_path = "/space"
+
+        # If swap is enough fill all memory with dd
+        if swap_free > (total - free):
+            count = total / hugepage_size
+            tmpfs_size = total
+        else:
+            count = free / hugepage_size
+            tmpfs_size = free
+
+        if swap_size <= 0:
+            raise logging.info("Host does not have swap enabled")
+        session = None
+        try:
+            if not os.path.isdir(mem_path):
+                os.makedirs(mem_path)
+            utils.run("mount -t tmpfs  -o size=%sM none %s" % (tmpfs_size,
+                                                               mem_path))
+
+            # Set the memory size of vm
+            # To ignore the oom killer set it to the free swap size
+            vm = virt_test_utils.get_living_vm(env, params.get("main_vm"))
+            if int(params['mem']) > swap_free[0]:
+                vm.destroy()
+                vm_name = 'vmsw'
+                vm0 =  params.get("main_vm")
+                vm0_key = virt_utils.env_get_vm(env, vm0)
+                params['vms'] = params['vms'] + " " + vm_name
+                params['mem'] = str(swap_free[0])
+                vm_key = vm0_key.clone(vm0, params)
+                virt_utils.env_register_vm(env, vm_name, vm_key)
+                virt_env_process.preprocess_vm(test, params, env, vm_name)
+                vm_key.create()
+                session = virt_utils.wait_for(vm_key.remote_login,
+                                              timeout=login_timeout)
+            else:
+                session = virt_test_utils.wait_for_login(vm,
+                                                        timeout=login_timeout)
+
+            error.context("making guest to swap memory")
+            cmd = ("dd if=/dev/zero of=%s/zero bs=%s000000 count=%s" %
+                   (mem_path, hugepage_size, count))
+            utils.run(cmd)
+
+            args_dict = get_args(args_dict_check)
+            swap_free.append(int(args_dict['swap_free'])/1024)
+
+            if swap_free[1] - swap_free[0] >= 0:
+                raise error.TestFail("No data was swapped to memory")
+
+            # Try harder to make guest memory to be swapped
+            session.cmd("find / -name \"*\"", timeout=check_cmd_timeout)
+        finally:
+            if session is not None:
+                utils.run("umount %s" % mem_path)
+
+        logging.info("Swapping test succeed")
+
+    finally:
+        session.close()
+        test_config.cleanup()