From patchwork Thu Oct 11 12:32:03 2012 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 8bit X-Patchwork-Submitter: Jiri Zupka X-Patchwork-Id: 1581891 Return-Path: X-Original-To: patchwork-kvm@patchwork.kernel.org Delivered-To: patchwork-process-083081@patchwork2.kernel.org Received: from vger.kernel.org (vger.kernel.org [209.132.180.67]) by patchwork2.kernel.org (Postfix) with ESMTP id B9EEFDFABE for ; Thu, 11 Oct 2012 12:32:33 +0000 (UTC) Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S1753915Ab2JKMca (ORCPT ); Thu, 11 Oct 2012 08:32:30 -0400 Received: from mx1.redhat.com ([209.132.183.28]:4894 "EHLO mx1.redhat.com" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S1750850Ab2JKMc3 (ORCPT ); Thu, 11 Oct 2012 08:32:29 -0400 Received: from int-mx11.intmail.prod.int.phx2.redhat.com (int-mx11.intmail.prod.int.phx2.redhat.com [10.5.11.24]) by mx1.redhat.com (8.14.4/8.14.4) with ESMTP id q9BCW76C004425 (version=TLSv1/SSLv3 cipher=DHE-RSA-AES256-SHA bits=256 verify=OK); Thu, 11 Oct 2012 08:32:07 -0400 Received: from jzupka-pc.local.com (vpn1-6-73.ams2.redhat.com [10.36.6.73]) by int-mx11.intmail.prod.int.phx2.redhat.com (8.14.4/8.14.4) with ESMTP id q9BCW5P0003869; Thu, 11 Oct 2012 08:32:05 -0400 From: =?UTF-8?q?Ji=C5=99=C3=AD=20=C5=BDupka?= To: autotest@test.kernel.org, kvm@vger.kernel.org, kvm-autotest@redhat.com, lmr@redhat.com, ldoktor@redhat.com, jzupka@redhat.com, shu@redhat.com Subject: [Autotest][PATCH] virt: Adds multi_host_migration early boot tests. Date: Thu, 11 Oct 2012 14:32:03 +0200 Message-Id: <1349958723-8090-1-git-send-email-jzupka@redhat.com> MIME-Version: 1.0 X-Scanned-By: MIMEDefang 2.68 on 10.5.11.24 Sender: kvm-owner@vger.kernel.org Precedence: bulk List-ID: X-Mailing-List: kvm@vger.kernel.org pull-request: https://github.com/autotest/virt-test/pull/56 Signed-off-by: Ji?í Župka --- kvm/cfg/multi-host-tests.cfg.sample | 2 ++ kvm/tests/migration_multi_host_fd.py | 7 +++++-- shared/cfg/subtests.cfg.sample | 17 +++++++++++++++-- virttest/env_process.py | 3 --- virttest/kvm_vm.py | 21 +++++++++++---------- virttest/utils_test.py | 34 ++++++++++++++++++++++++++-------- 6 files changed, 59 insertions(+), 25 deletions(-) diff --git a/kvm/cfg/multi-host-tests.cfg.sample b/kvm/cfg/multi-host-tests.cfg.sample index d47e756..20bc385 100644 --- a/kvm/cfg/multi-host-tests.cfg.sample +++ b/kvm/cfg/multi-host-tests.cfg.sample @@ -23,6 +23,8 @@ variants: only smallpages only pc only Fedora.17.64 + cpu_model = "core2duo" + cpu_model_flags = "+sse3" only migrate_multi_host # Runs qemu, f16 64 bit guest OS, install, boot, shutdown diff --git a/kvm/tests/migration_multi_host_fd.py b/kvm/tests/migration_multi_host_fd.py index ec9210a..bc6e404 100644 --- a/kvm/tests/migration_multi_host_fd.py +++ b/kvm/tests/migration_multi_host_fd.py @@ -34,8 +34,11 @@ def run_migration_multi_host_fd(test, params, env): fd_src=mig_data.params['migration_fd']) def _check_vms_source(self, mig_data): - for vm in mig_data.vms: - vm.wait_for_login(timeout=self.login_timeout) + start_mig_tout = mig_data.params.get("start_migration_timeout", + None) + if start_mig_tout is None: + for vm in mig_data.vms: + vm.wait_for_login(timeout=self.login_timeout) self._hosts_barrier(mig_data.hosts, mig_data.mig_id, 'prepare_VMS', 60) diff --git a/shared/cfg/subtests.cfg.sample b/shared/cfg/subtests.cfg.sample index 695d60e..61dd79a 100644 --- a/shared/cfg/subtests.cfg.sample +++ b/shared/cfg/subtests.cfg.sample @@ -973,7 +973,7 @@ variants: kill_vm_on_error = yes iterations = 2 used_mem = 1024 - mig_timeout = 4800 + mig_timeout = 480 disk_prepare_timeout = 360 comm_port = 13234 regain_ip_cmd = killall dhclient; sleep 10; dhclient; @@ -990,6 +990,20 @@ variants: -fd: type = migration_multi_host_fd + variants: + #Time when start migration + - after_login_vm: + paused_after_start_vm = no + - early_boot_vm: + no measure_migration_speed + login_timeout = 420 + paused_after_start_vm = yes + variants: + -timeout_0: + start_migration_timeout = 0 + -timeout_6: + start_migration_timeout = 6 + - migration_multi_host_with_file_transfer: install setup image_copy unattended_install.cdrom type = migration_multi_host_with_file_transfer vms = "vm1" @@ -1065,7 +1079,6 @@ variants: - rhel6.3.0: machine_type = "rhel6.3.0" - - boot_savevm: install setup image_copy unattended_install.cdrom type = boot_savevm savevm_delay = 0.3 diff --git a/virttest/env_process.py b/virttest/env_process.py index f309fe2..0b3bd4e 100644 --- a/virttest/env_process.py +++ b/virttest/env_process.py @@ -106,9 +106,6 @@ def preprocess_vm(test, params, env, name): vm.create(name, params, test.bindir, migration_mode=params.get("migration_mode"), migration_fd=params.get("migration_fd")) - if params.get("paused_after_start_vm") == "yes": - if vm.state() != "paused": - vm.pause() else: # Don't start the VM, just update its params vm.params = params diff --git a/virttest/kvm_vm.py b/virttest/kvm_vm.py index c9bbe57..7436553 100644 --- a/virttest/kvm_vm.py +++ b/virttest/kvm_vm.py @@ -1785,16 +1785,17 @@ class VM(virt_vm.BaseVM): output_func=utils_misc.log_line, output_params=(outfile,)) - # start guest - if self.monitor.verify_status("paused"): - try: - self.monitor.cmd("cont") - except kvm_monitor.QMPCmdError, e: - if ((e.data['class'] == "MigrationExpected") and - (migration_mode is not None)): - logging.debug("Migration did not start yet...") - else: - raise e + if params.get("paused_after_start_vm") != "yes": + # start guest + if self.monitor.verify_status("paused"): + try: + self.monitor.cmd("cont") + except kvm_monitor.QMPCmdError, e: + if ((e.data['class'] == "MigrationExpected") and + (migration_mode is not None)): + logging.debug("Migration did not start yet...") + else: + raise e finally: fcntl.lockf(lockfile, fcntl.LOCK_UN) diff --git a/virttest/utils_test.py b/virttest/utils_test.py index 984b95d..35beab6 100644 --- a/virttest/utils_test.py +++ b/virttest/utils_test.py @@ -556,8 +556,10 @@ class MultihostMigration(object): def _check_vms_source(self, mig_data): - for vm in mig_data.vms: - vm.wait_for_login(timeout=self.login_timeout) + start_mig_tout = mig_data.params.get("start_migration_timeout", None) + if start_mig_tout is None: + for vm in mig_data.vms: + vm.wait_for_login(timeout=self.login_timeout) sync = SyncData(self.master_id(), self.hostid, mig_data.hosts, mig_data.mig_id, self.sync_server) @@ -707,7 +709,8 @@ class MultihostMigration(object): check_work=None, params_append=None): logging.info("Starting migrate vms %s from host %s to %s" % (vms_name, srchost, dsthost)) - error = None + pause = self.params.get("paused_after_start_vm") + mig_error = None mig_data = MigrationData(self.params, srchost, dsthost, vms_name, params_append) try: @@ -721,7 +724,18 @@ class MultihostMigration(object): if mig_data.is_src(): if start_work: - start_work(mig_data) + if pause != "yes": + start_work(mig_data) + else: + raise error.TestNAError("Can't start work if " + "vm is paused.") + + # Starts VM and waits timeout before migration. + if self.params.get("paused_after_start_vm") == "yes": + for vm in mig_data.vms: + vm.resume() + wait = self.params.get("start_migration_timeout", 0) + time.sleep(int(wait)) self.migrate_vms(mig_data) @@ -734,13 +748,17 @@ class MultihostMigration(object): if mig_data.is_dst(): self.check_vms(mig_data) if check_work: - check_work(mig_data) - + if pause != "yes": + check_work(mig_data) + else: + raise error.TestNAError("Can't check work if " + "vm was paused before " + "migration.") except: - error = True + mig_error = True raise finally: - if not error: + if not mig_error: self._hosts_barrier(self.hosts, mig_data.mig_id, 'test_finihed',