From patchwork Mon May 23 07:28:32 2011 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Lucas Meneghel Rodrigues X-Patchwork-Id: 807852 Received: from vger.kernel.org (vger.kernel.org [209.132.180.67]) by demeter1.kernel.org (8.14.4/8.14.3) with ESMTP id p4N7SWqk025627 for ; Mon, 23 May 2011 07:28:33 GMT Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S1752925Ab1EWH22 (ORCPT ); Mon, 23 May 2011 03:28:28 -0400 Received: from mx1.redhat.com ([209.132.183.28]:1025 "EHLO mx1.redhat.com" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S1752750Ab1EWH22 (ORCPT ); Mon, 23 May 2011 03:28:28 -0400 Received: from int-mx09.intmail.prod.int.phx2.redhat.com (int-mx09.intmail.prod.int.phx2.redhat.com [10.5.11.22]) by mx1.redhat.com (8.14.4/8.14.4) with ESMTP id p4N7SRok009772 (version=TLSv1/SSLv3 cipher=DHE-RSA-AES256-SHA bits=256 verify=OK); Mon, 23 May 2011 03:28:27 -0400 Received: from freedom.redhat.com (vpn-8-6.rdu.redhat.com [10.11.8.6]) by int-mx09.intmail.prod.int.phx2.redhat.com (8.14.4/8.14.4) with ESMTP id p4N7SPWT003563; Mon, 23 May 2011 03:28:25 -0400 From: Lucas Meneghel Rodrigues To: autotest@test.kernel.org Cc: kvm@vger.kernel.org, Qingtang Zhou , Lucas Meneghel Rodrigues , Jason Wang Subject: [PATCH 2/2] KVM Test: Add a subtest lvm Date: Mon, 23 May 2011 04:28:32 -0300 Message-Id: <1306135712-4023-1-git-send-email-lmr@redhat.com> X-Scanned-By: MIMEDefang 2.68 on 10.5.11.22 Sender: kvm-owner@vger.kernel.org Precedence: bulk List-ID: X-Mailing-List: kvm@vger.kernel.org X-Greylist: IP, sender and recipient auto-whitelisted, not delayed by milter-greylist-4.2.6 (demeter1.kernel.org [140.211.167.41]); Mon, 23 May 2011 07:28:34 +0000 (UTC) From: Qingtang Zhou Changes from v1: * Made the test use more current kvm autotest api, namely: - Error contexts, and session.cmd for shorter, cleaner code - Removed pre command, as the functionality needed for image_create was implemented on the previous patch Signed-off-by: Lucas Meneghel Rodrigues This test sets up an lvm over two images and then format the lvm and finally checks the fs using fsck. Signed-off-by: Yolkfull Chow Remove the progress of filling up. Add a params of clean which could prevent the umount and volume removing command and let this case usd by the following benchmark or stress test. Add the dbench into the lvm tests. Signed-off-by: Jason Wang This test depends on fillup_disk test and ioquit test. Signed-off-by: Qingtang Zhou --- client/tests/kvm/tests_base.cfg.sample | 48 ++++++++++++++++++ client/virt/tests/lvm.py | 84 ++++++++++++++++++++++++++++++++ 2 files changed, 132 insertions(+), 0 deletions(-) create mode 100644 client/virt/tests/lvm.py diff --git a/client/tests/kvm/tests_base.cfg.sample b/client/tests/kvm/tests_base.cfg.sample index 5713513..d1a188d 100644 --- a/client/tests/kvm/tests_base.cfg.sample +++ b/client/tests/kvm/tests_base.cfg.sample @@ -879,6 +879,46 @@ variants: fillup_cmd = "dd if=/dev/zero of=/%s/fillup.%d bs=%dM count=1 oflag=direct" kill_vm = yes + - lvm: + only Linux + images += ' stg1 stg2' + image_name_stg1 = storage_4k + image_cluster_size_stg1 = 4096 + image_size_stg1 = 1G + image_format_stg1 = qcow2 + image_name_stg2 = storage_64k + image_cluster_size_stg2 = 65536 + image_size_stg2 = 1G + image_format_stg2 = qcow2 + guest_testdir = /mnt + disks = "/dev/sdb /dev/sdc" + kill_vm = no + post_command_noncritical = no + variants: + lvm_create: + type = lvm + force_create_image_stg1 = yes + force_create_image_stg2 = yes + clean = no + lvm_fill: lvm_create + type = fillup_disk + force_create_image_stg1 = no + force_create_image_stg2 = no + guest_testdir = /mnt/kvm_test_lvm + fillup_timeout = 120 + fillup_size = 20 + fillup_cmd = "dd if=/dev/zero of=%s/fillup.%d bs=%dM count=1 oflag=direct" + lvm_ioquit: lvm_create + type = ioquit + force_create_image_stg1 = no + force_create_image_stg2 = no + kill_vm = yes + background_cmd = "for i in 1 2 3 4; do (dd if=/dev/urandom of=/mnt/kvm_test_lvm/file bs=102400 count=10000000 &); done" + check_cmd = pgrep dd + clean = yes + remove_image_stg1 = yes + remove_image_stg2 = yes + - ioquit: only Linux type = ioquit @@ -1656,6 +1696,8 @@ variants: md5sum_1m_cd1 = 127081cbed825d7232331a2083975528 fillup_disk: fillup_cmd = "dd if=/dev/zero of=/%s/fillup.%d bs=%dM count=1" + lvm.lvm_fill: + fillup_cmd = "dd if=/dev/zero of=/%s/fillup.%d bs=%dM count=1" - 4.7.x86_64: no setup autotest @@ -1677,6 +1719,8 @@ variants: md5sum_1m_cd1 = 58fa63eaee68e269f4cb1d2edf479792 fillup_disk: fillup_cmd = "dd if=/dev/zero of=/%s/fillup.%d bs=%dM count=1" + lvm.lvm_fill: + fillup_cmd = "dd if=/dev/zero of=/%s/fillup.%d bs=%dM count=1" - 4.8.i386: no setup autotest @@ -1696,6 +1740,8 @@ variants: sys_path = "/sys/class/net/%s/driver" fillup_disk: fillup_cmd = "dd if=/dev/zero of=/%s/fillup.%d bs=%dM count=1" + lvm.lvm_fill: + fillup_cmd = "dd if=/dev/zero of=/%s/fillup.%d bs=%dM count=1" - 4.8.x86_64: @@ -1716,6 +1762,8 @@ variants: sys_path = "/sys/class/net/%s/driver" fillup_disk: fillup_cmd = "dd if=/dev/zero of=/%s/fillup.%d bs=%dM count=1" + lvm.lvm_fill: + fillup_cmd = "dd if=/dev/zero of=/%s/fillup.%d bs=%dM count=1" - 5.3.i386: diff --git a/client/virt/tests/lvm.py b/client/virt/tests/lvm.py new file mode 100644 index 0000000..d171747 --- /dev/null +++ b/client/virt/tests/lvm.py @@ -0,0 +1,84 @@ +import logging, os +from autotest_lib.client.common_lib import error + + +@error.context_aware +def mount_lv(lv_path, session): + error.context("mounting ext3 filesystem made on logical volume %s" % + os.path.basename(lv_path)) + session.cmd("mkdir -p /mnt/kvm_test_lvm") + session.cmd("mount %s /mnt/kvm_test_lvm" % lv_path) + + +@error.context_aware +def umount_lv(lv_path, session): + error.context("umounting ext3 filesystem made on logical volume %s" % + os.path.basename(lv_path)) + session.cmd("umount %s" % lv_path) + session.cmd("rm -rf /mnt/kvm_test_lvm") + + +@error.context_aware +def run_lvm(test, params, env): + """ + KVM reboot test: + 1) Log into a guest + 2) Create a volume group and add both disks as pv to the Group + 3) Create a logical volume on the VG + 5) `fsck' to check the partition that LV locates + + @param test: kvm test object + @param params: Dictionary with the test parameters + @param env: Dictionary with test environment. + """ + vm = env.get_vm(params["main_vm"]) + vm.verify_alive() + timeout = int(params.get("login_timeout", 360)) + session = vm.wait_for_login(timeout=timeout) + + vg_name = "vg_kvm_test" + lv_name = "lv_kvm_test" + lv_path = "/dev/%s/%s" % (vg_name, lv_name) + disks = params.get("disks", "/dev/hdb /dev/hdc") + clean = params.get("clean", "yes") + timeout = params.get("lvm_timeout", "600") + + try: + error.context("adding physical volumes %s" % disks) + session.cmd("pvcreate %s" % disks) + + error.context("creating a volume group out of %s" % disks) + session.cmd("vgcreate %s %s" % (vg_name, disks)) + + error.context("activating volume group %s" % vg_name) + session.cmd("vgchange -ay %s" % vg_name) + + error.context("creating logical volume on volume group %s" % vg_name) + session.cmd("lvcreate -L2000 -n %s %s" % (lv_name, vg_name)) + + error.context("creating ext3 filesystem on logical volume %s" % lv_name) + session.cmd("yes | mkfs.ext3 %s" % lv_path, timeout=int(timeout)) + + mount_lv(lv_path, session) + + umount_lv(lv_path, session) + + error.context("checking ext3 filesystem made on logical volume %s" % + lv_name) + session.cmd("fsck %s" % lv_path, timeout=int(timeout)) + + if clean == "no": + mount_lv(lv_path, session) + + finally: + if clean == "yes": + umount_lv(lv_path, session) + + error.context("removing logical volume %s" % lv_name) + session.cmd("lvremove %s" % lv_name) + + error.context("disabling volume group %s" % vg_name) + session.cmd("vgchange -a n %s" % vg_name) + + error.context("removing volume group %s" % vg_name) + session.cmd("vgremove -f %s" % vg_name)