From patchwork Tue Apr 26 10:22:31 2011 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 8bit X-Patchwork-Submitter: Jiri Zupka X-Patchwork-Id: 732172 Received: from vger.kernel.org (vger.kernel.org [209.132.180.67]) by demeter1.kernel.org (8.14.4/8.14.3) with ESMTP id p3QAMkkP012893 for ; Tue, 26 Apr 2011 10:22:46 GMT Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S1753840Ab1DZKWn (ORCPT ); Tue, 26 Apr 2011 06:22:43 -0400 Received: from mx1.redhat.com ([209.132.183.28]:13928 "EHLO mx1.redhat.com" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S1753321Ab1DZKWl (ORCPT ); Tue, 26 Apr 2011 06:22:41 -0400 Received: from int-mx02.intmail.prod.int.phx2.redhat.com (int-mx02.intmail.prod.int.phx2.redhat.com [10.5.11.12]) by mx1.redhat.com (8.14.4/8.14.4) with ESMTP id p3QAMcuf018078 (version=TLSv1/SSLv3 cipher=DHE-RSA-AES256-SHA bits=256 verify=OK); Tue, 26 Apr 2011 06:22:38 -0400 Received: from jzupka.local ([10.3.112.8]) by int-mx02.intmail.prod.int.phx2.redhat.com (8.13.8/8.13.8) with ESMTP id p3QAMYm7003156; Tue, 26 Apr 2011 06:22:35 -0400 From: =?UTF-8?q?Ji=C5=99=C3=AD=20=C5=BDupka?= To: kvm-autotest@redhat.com, kvm@vger.kernel.org, autotest@test.kernel.org, lmr@redhat.com, ldoktor@redhat.com, akong@redhat.com Cc: jzupka@redhat.com Subject: [AUTOTEST][PATCH] Add ability to call autotest client tests from kvm tests like a subtest. Date: Tue, 26 Apr 2011 12:22:31 +0200 Message-Id: <1303813351-7221-1-git-send-email-jzupka@redhat.com> MIME-Version: 1.0 X-Scanned-By: MIMEDefang 2.67 on 10.5.11.12 Sender: kvm-owner@vger.kernel.org Precedence: bulk List-ID: X-Mailing-List: kvm@vger.kernel.org X-Greylist: IP, sender and recipient auto-whitelisted, not delayed by milter-greylist-4.2.6 (demeter1.kernel.org [140.211.167.41]); Tue, 26 Apr 2011 10:22:46 +0000 (UTC) Example run autotest/client/netperf2 like a server. test.runsubtest("netperf2", tag="server", server_ip=host_ip, client_ip=guest_ip, role='server') Client part is called in paralel thread on virtual machine. guest = kvm_utils.Thread(kvm_test_utils.run_autotest, (vm, session, control_path, control_args, timeout, outputdir, params)) guest.start() On the guest is required to have installed the program mpstat for netpert2 test. Netperf2 test will be changed or will be created in new version. This patch are necessary to avoid of creation double version of test. netperf, multicast, etc.. Signed-off-by: Ji?í Župka --- client/bin/client_logging_config.py | 5 +- client/bin/net/net_utils.py | 16 ++++- client/common_lib/base_job.py | 2 + client/common_lib/logging_config.py | 3 +- client/common_lib/test.py | 21 ++++++- client/tests/kvm/html_report.py | 115 ++++++++++++++++++-------------- client/tests/kvm/kvm_test_utils.py | 19 ++++-- client/tests/kvm/tests/subtest.py | 43 ++++++++++++ client/tests/kvm/tests_base.cfg.sample | 10 +++- client/tests/netperf2/netperf2.py | 3 +- 10 files changed, 173 insertions(+), 64 deletions(-) create mode 100644 client/tests/kvm/tests/subtest.py diff --git a/client/bin/client_logging_config.py b/client/bin/client_logging_config.py index a59b078..28c007d 100644 --- a/client/bin/client_logging_config.py +++ b/client/bin/client_logging_config.py @@ -12,8 +12,9 @@ class ClientLoggingConfig(logging_config.LoggingConfig): def configure_logging(self, results_dir=None, verbose=False): - super(ClientLoggingConfig, self).configure_logging(use_console=True, - verbose=verbose) + super(ClientLoggingConfig, self).configure_logging( + use_console=self.use_console, + verbose=verbose) if results_dir: log_dir = os.path.join(results_dir, 'debug') diff --git a/client/bin/net/net_utils.py b/client/bin/net/net_utils.py index 868958c..ac9b494 100644 --- a/client/bin/net/net_utils.py +++ b/client/bin/net/net_utils.py @@ -5,7 +5,7 @@ This library is to release in the public repository. import commands, os, re, socket, sys, time, struct from autotest_lib.client.common_lib import error -import utils +from autotest_lib.client.common_lib import utils TIMEOUT = 10 # Used for socket timeout and barrier timeout @@ -27,6 +27,20 @@ class network_utils(object): utils.system('/sbin/ifconfig -a') + def get_corespond_local_ip(self, query_ip, netmask="24"): + """ + Get ip address in local system which can communicate with quert_ip. + + @param query_ip: IP of client which want communicate with autotest machine. + @return: IP address which can communicate with query_ip + """ + ip = utils.system_output("ip addr show to %s/%s" % (query_ip, netmask)) + ip = re.search(r"inet ([0-9.]*)/",ip) + if ip is None: + return ip + return ip.group(1) + + def disable_ip_local_loopback(self, ignore_status=False): utils.system("echo '1' > /proc/sys/net/ipv4/route/no_local_loopback", ignore_status=ignore_status) diff --git a/client/common_lib/base_job.py b/client/common_lib/base_job.py index 843c0e8..eef9efc 100644 --- a/client/common_lib/base_job.py +++ b/client/common_lib/base_job.py @@ -1117,6 +1117,7 @@ class base_job(object): tag_parts = [] # build up the parts of the tag used for the test name + master_testpath = dargs.get('master_testpath', "") base_tag = dargs.pop('tag', None) if base_tag: tag_parts.append(str(base_tag)) @@ -1132,6 +1133,7 @@ class base_job(object): if subdir_tag: tag_parts.append(subdir_tag) subdir = '.'.join([testname] + tag_parts) + subdir = os.path.join(master_testpath, subdir) tag = '.'.join(tag_parts) return full_testname, subdir, tag diff --git a/client/common_lib/logging_config.py b/client/common_lib/logging_config.py index afe754a..9114d7a 100644 --- a/client/common_lib/logging_config.py +++ b/client/common_lib/logging_config.py @@ -32,9 +32,10 @@ class LoggingConfig(object): fmt='%(asctime)s %(levelname)-5.5s| %(message)s', datefmt='%H:%M:%S') - def __init__(self): + def __init__(self, use_console=True): self.logger = logging.getLogger() self.global_level = logging.DEBUG + self.use_console = use_console @classmethod diff --git a/client/common_lib/test.py b/client/common_lib/test.py index c55d23b..b1a0904 100644 --- a/client/common_lib/test.py +++ b/client/common_lib/test.py @@ -465,6 +465,24 @@ class base_test(object): self.job.enable_warnings("NETWORK") + def runsubtest(self, url, *args, **dargs): + """ + This call subtest in running test. + + @param test: Parent test. + @param url: Url of new test. + @param tag: Tag added to test name. + @param args: Args for subtest. + @param dargs: Distionary args for subtest. + @iterations: Number of iteration of subtest. + @profile_inly: If true not profile. + """ + dargs["profile_only"] = dargs.get("profile_only", True) + test_basepath = self.outputdir[len(self.job.resultdir + "/"):] + self.job.run_test(url, master_testpath=test_basepath, + *args, **dargs) + + def _get_nonstar_args(func): """Extract all the (normal) function parameter names. @@ -658,7 +676,8 @@ def runtest(job, url, tag, args, dargs, if not bindir: raise error.TestError(testname + ': test does not exist') - outputdir = os.path.join(job.resultdir, testname) + subdir = os.path.join(dargs.pop('master_testpath', ""), testname) + outputdir = os.path.join(job.resultdir, subdir) if tag: outputdir += '.' + tag diff --git a/client/tests/kvm/html_report.py b/client/tests/kvm/html_report.py index 8b4b109..bac48bd 100755 --- a/client/tests/kvm/html_report.py +++ b/client/tests/kvm/html_report.py @@ -1380,8 +1380,6 @@ function processList(ul) { ## input and create a single html formatted result page. ## ################################################################# -stimelist = [] - def make_html_file(metadata, results, tag, host, output_file_name, dirname): html_prefix = """ @@ -1427,11 +1425,12 @@ return true; total_failed = 0 total_passed = 0 for res in results: - total_executed += 1 - if res['status'] == 'GOOD': - total_passed += 1 - else: - total_failed += 1 + if results[res][2] != None: + total_executed += 1 + if results[res][2]['status'] == 'GOOD': + total_passed += 1 + else: + total_failed += 1 stat_str = 'No test cases executed' if total_executed > 0: failed_perct = int(float(total_failed)/float(total_executed)*100) @@ -1468,39 +1467,46 @@ id="t1" class="stats table-autosort:4 table-autofilter table-stripeclass:alterna """ print >> output, result_table_prefix - for res in results: - print >> output, '' - print >> output, '%s' % res['time'] - print >> output, '%s' % res['testcase'] - if res['status'] == 'GOOD': - print >> output, 'PASS' - elif res['status'] == 'FAIL': - print >> output, 'FAIL' - elif res['status'] == 'ERROR': - print >> output, 'ERROR!' - else: - print >> output, '%s' % res['status'] - # print exec time (seconds) - print >> output, '%s' % res['exec_time_sec'] - # print log only if test failed.. - if res['log']: - #chop all '\n' from log text (to prevent html errors) - rx1 = re.compile('(\s+)') - log_text = rx1.sub(' ', res['log']) - - # allow only a-zA-Z0-9_ in html title name - # (due to bug in MS-explorer) - rx2 = re.compile('([^a-zA-Z_0-9])') - updated_tag = rx2.sub('_', res['title']) - - html_body_text = '%s%s' % (str(updated_tag), log_text) - print >> output, 'Info' % (str(updated_tag), str(html_body_text)) - else: - print >> output, '' - # print execution time - print >> output, 'Debug' % os.path.join(dirname, res['title'], "debug") + def print_result(result, indent): + while result != []: + r = result.pop(0) + print r + res = results[r][2] + print >> output, '' + print >> output, '%s' % res['time'] + print >> output, '%s' % (indent * 20, res['title']) + if res['status'] == 'GOOD': + print >> output, 'PASS' + elif res['status'] == 'FAIL': + print >> output, 'FAIL' + elif res['status'] == 'ERROR': + print >> output, 'ERROR!' + else: + print >> output, '%s' % res['status'] + # print exec time (seconds) + print >> output, '%s' % res['exec_time_sec'] + # print log only if test failed.. + if res['log']: + #chop all '\n' from log text (to prevent html errors) + rx1 = re.compile('(\s+)') + log_text = rx1.sub(' ', res['log']) + + # allow only a-zA-Z0-9_ in html title name + # (due to bug in MS-explorer) + rx2 = re.compile('([^a-zA-Z_0-9])') + updated_tag = rx2.sub('_', res['title']) + + html_body_text = '%s%s' % (str(updated_tag), log_text) + print >> output, 'Info' % (str(updated_tag), str(html_body_text)) + else: + print >> output, '' + # print execution time + print >> output, 'Debug' % os.path.join(dirname, res['subdir'], "debug") + + print >> output, '' + print_result(results[r][1], indent + 1) - print >> output, '' + print_result(results[""][1], 0) print >> output, "" @@ -1528,15 +1534,20 @@ id="t1" class="stats table-autosort:4 table-autofilter table-stripeclass:alterna output.close() -def parse_result(dirname, line): +def parse_result(dirname, line, results_data): parts = line.split() if len(parts) < 4: return None - global stimelist + global tests if parts[0] == 'START': pair = parts[3].split('=') stime = int(pair[1]) - stimelist.append(stime) + results_data[parts[1]] = [stime, [], None] + try: + parent_test = re.findall(r".*/", parts[1])[0][:-1] + results_data[parent_test][1].append(parts[1]) + except IndexError: + results_data[""][1].append(parts[1]) elif (parts[0] == 'END'): result = {} @@ -1553,21 +1564,25 @@ def parse_result(dirname, line): result['exec_time_sec'] = 'na' tag = parts[3] + result['subdir'] = parts[2] # assign actual values rx = re.compile('^(\w+)\.(.*)$') m1 = rx.findall(parts[3]) - result['testcase'] = m1[0][1] + if len(m1): + result['testcase'] = m1[0][1] + else: + result['testcase'] = parts[3] result['title'] = str(tag) result['status'] = parts[1] if result['status'] != 'GOOD': result['log'] = get_exec_log(dirname, tag) - if len(stimelist)>0: + if len(results_data)>0: pair = parts[4].split('=') etime = int(pair[1]) - stime = stimelist.pop() + stime = results_data[parts[2]][0] total_exec_time_sec = etime - stime result['exec_time_sec'] = total_exec_time_sec - return result + results_data[parts[2]][2] = result return None @@ -1689,16 +1704,15 @@ def main(argv): host = get_info_file('%s/hostname' % sysinfo_dir) rx = re.compile('^\s+[END|START].*$') # create the results set dict - results_data = [] + results_data = {} + results_data[""] = [0, [], None] if os.path.exists(status_file_name): f = open(status_file_name, "r") lines = f.readlines() f.close() for line in lines: if rx.match(line): - result_dict = parse_result(dirname, line) - if result_dict: - results_data.append(result_dict) + parse_result(dirname, line, results_data) # create the meta info dict metalist = { 'uname': get_info_file('%s/uname' % sysinfo_dir), @@ -1711,7 +1725,6 @@ def main(argv): 'dmesg':get_info_file('%s/dmesg' % sysinfo_dir), 'kvmver':get_kvm_version(dirname) } - make_html_file(metalist, results_data, tag, host, output_file_name, html_path) sys.exit(0) diff --git a/client/tests/kvm/kvm_test_utils.py b/client/tests/kvm/kvm_test_utils.py index b5c4a24..c2c9615 100644 --- a/client/tests/kvm/kvm_test_utils.py +++ b/client/tests/kvm/kvm_test_utils.py @@ -429,13 +429,15 @@ def get_memory_info(lvms): return meminfo -def run_autotest(vm, session, control_path, timeout, outputdir, params): +def run_autotest(vm, session, control_path, control_args, + timeout, outputdir, params): """ Run an autotest control file inside a guest (linux only utility). @param vm: VM object. @param session: A shell session on the VM provided. @param control_path: A path to an autotest control file. + @param control_args: An argumets for control file.. @param timeout: Timeout under which the autotest control file must complete. @param outputdir: Path on host where we should copy the guest autotest results to. @@ -560,6 +562,10 @@ def run_autotest(vm, session, control_path, timeout, outputdir, params): pass try: bg = None + if control_args != None: + control_args = ' -a "' + control_args + '"' + else: + control_args = "" try: logging.info("---------------- Test output ----------------") if migrate_background: @@ -567,7 +573,8 @@ def run_autotest(vm, session, control_path, timeout, outputdir, params): mig_protocol = params.get("migration_protocol", "tcp") bg = kvm_utils.Thread(session.cmd_output, - kwargs={'cmd': "bin/autotest control", + kwargs={'cmd': "bin/autotest control" + + control_args, 'timeout': timeout, 'print_func': logging.info}) @@ -578,8 +585,8 @@ def run_autotest(vm, session, control_path, timeout, outputdir, params): "migration ...") vm.migrate(timeout=mig_timeout, protocol=mig_protocol) else: - session.cmd_output("bin/autotest control", timeout=timeout, - print_func=logging.info) + session.cmd_output("bin/autotest control" + control_args, + timeout=timeout, print_func=logging.info) finally: logging.info("------------- End of test output ------------") if migrate_background and bg: @@ -623,8 +630,8 @@ def run_autotest(vm, session, control_path, timeout, outputdir, params): def get_loss_ratio(output): """ - Get the packet loss ratio from the output of ping -. + Get the packet loss ratio from the output of ping. + @param output: Ping output. """ try: diff --git a/client/tests/kvm/tests/subtest.py b/client/tests/kvm/tests/subtest.py new file mode 100644 index 0000000..380390e --- /dev/null +++ b/client/tests/kvm/tests/subtest.py @@ -0,0 +1,43 @@ +import os, logging +import kvm_test_utils, kvm_utils +from autotest_lib.client.bin import job +from autotest_lib.client.bin.net import net_utils + + +def run_subtest(test, params, env): + """ + Run an autotest test inside a guest and subtest on host side. + This test should be substitution netperf test in kvm. + + @param test: kvm test object. + @param params: Dictionary with test parameters. + @param env: Dictionary with the test environment. + """ + vm = env.get_vm(params["main_vm"]) + vm.verify_alive() + timeout = int(params.get("login_timeout", 360)) + session = vm.wait_for_login(timeout=timeout) + + # Collect test parameters + timeout = int(params.get("test_timeout", 300)) + control_path = os.path.join(test.bindir, "autotest_control", + params.get("test_control_file")) + control_args = params.get("test_control_args") + outputdir = test.outputdir + + guest_ip = vm.get_address() + host_ip = net_utils.network().get_corespond_local_ip(guest_ip) + if not host_ip is None: + control_args = host_ip + " " + guest_ip + + guest = kvm_utils.Thread(kvm_test_utils.run_autotest, + (vm, session, control_path, control_args, + timeout, outputdir, params)) + guest.start() + + test.runsubtest("netperf2", tag="server", server_ip=host_ip, + client_ip=guest_ip, role='server') + + else: + logging.error("Host cannot communicate with client by" + " normal network connection.") \ No newline at end of file diff --git a/client/tests/kvm/tests_base.cfg.sample b/client/tests/kvm/tests_base.cfg.sample index 5d274f8..f9efb4b 100644 --- a/client/tests/kvm/tests_base.cfg.sample +++ b/client/tests/kvm/tests_base.cfg.sample @@ -255,11 +255,19 @@ variants: test_control_file = rtc.control - iozone: test_control_file = iozone.control - - flail: + - flail: test_control_file = flail.control - systemtap: test_control_file = systemtap.control +- subtest: install setup unattended_install.cdrom + type = subtest + test_timeout = 1800 + variants: + - netperf2: + test_control_file = netperf2.control + nic_mode = tap + - linux_s3: install setup unattended_install.cdrom only Linux type = linux_s3 diff --git a/client/tests/netperf2/netperf2.py b/client/tests/netperf2/netperf2.py index 1b659dd..23d25c5 100644 --- a/client/tests/netperf2/netperf2.py +++ b/client/tests/netperf2/netperf2.py @@ -2,6 +2,7 @@ import os, time, re, logging from autotest_lib.client.bin import test, utils from autotest_lib.client.bin.net import net_utils from autotest_lib.client.common_lib import error +from autotest_lib.client.common_lib import barrier MPSTAT_IX = 0 NETPERF_IX = 1 @@ -36,7 +37,7 @@ class netperf2(test.test): def run_once(self, server_ip, client_ip, role, test = 'TCP_STREAM', test_time = 15, stream_list = [1], test_specific_args = '', - cpu_affinity = '', dev = '', bidi = False, wait_time = 5): + cpu_affinity = '', dev = '', bidi = False, wait_time = 2): """ server_ip: IP address of host running netserver client_ip: IP address of host running netperf client(s)