提交 5ab81a77 编写于 作者: X Xu Han 提交者: GitHub

Merge pull request #1006 from XueqiangWei/lvm

generic.tests: for test lvm, remount lv for sub test "fillup_disk" and "ioquit" after guest reboot/shutdown
- lvm: install setup image_copy unattended_install.cdrom
virt_test_type = qemu libvirt
type = lvm
only Linux
images += ' stg1 stg2'
image_name_stg1 = images/storage_4k
......@@ -11,11 +12,12 @@
image_size_stg2 = 1G
image_format_stg2 = qcow2
guest_testdir = /mnt
kill_vm = no
post_command_noncritical = no
clean = no
check_mount = "mountpoint /mnt/kvm_test_lvm"
variants:
- lvm_create:
type = lvm
sub_type = lvm_create
force_create_image_stg1 = yes
blk_extra_params_stg1 = "serial=TARGET_DISK1"
force_create_image_stg2 = yes
......@@ -23,9 +25,8 @@
Host_RHEL.m6..ide:
blk_extra_params_stg1 = "wwn=0x5000123456789abc"
blk_extra_params_stg2 = "wwn=0x5000cba987654321"
clean = no
- lvm_fill: lvm_create
type = fillup_disk
sub_type = fillup_disk
force_create_image_stg1 = no
force_create_image_stg2 = no
guest_testdir = /mnt/kvm_test_lvm
......@@ -36,12 +37,15 @@
clean_cmd += "rm -f /%s/fillup.*"
show_fillup_dir_cmd = "ls %s"
- lvm_ioquit: lvm_create
type = ioquit
sub_type = ioquit
force_create_image_stg1 = no
force_create_image_stg2 = no
kill_vm = yes
background_cmd = "for i in 1 2 3 4; do (dd if=/dev/urandom of=/mnt/kvm_test_lvm/file bs=102400 count=10000000 &); done"
check_cmd = pgrep dd
- lvm_clean: lvm_create
sub_type = lvm_clean
clean = yes
force_create_image_stg1 = no
force_create_image_stg2 = no
remove_image_stg1 = yes
remove_image_stg2 = yes
......@@ -31,11 +31,11 @@ def run(test, params, env):
error.context("Verify the background process is running")
check_cmd = params.get("check_cmd")
session2.cmd(check_cmd, timeout=60)
session2.cmd(check_cmd, timeout=360)
error.context("Sleep for a random time", logging.info)
time.sleep(random.randrange(30, 100))
session2.cmd(check_cmd, timeout=60)
session2.cmd(check_cmd, timeout=360)
error.context("Kill the VM", logging.info)
vm.process.close()
import os
import logging
from autotest.client.shared import error
from virttest import error_context
from virttest import utils_misc
from virttest import utils_test
@error.context_aware
@error_context.context_aware
def mount_lv(lv_path, session):
error.context("mounting ext3 filesystem made on logical volume %s" %
os.path.basename(lv_path))
error_context.context("mounting ext3 filesystem made on logical volume %s"
% os.path.basename(lv_path), logging.info)
session.cmd("mkdir -p /mnt/kvm_test_lvm")
session.cmd("mount %s /mnt/kvm_test_lvm" % lv_path)
@error.context_aware
@error_context.context_aware
def umount_lv(lv_path, session):
error.context("umounting ext3 filesystem made on logical volume %s" %
os.path.basename(lv_path))
error_context.context("umounting ext3 filesystem made on logical volume "
"%s" % os.path.basename(lv_path), logging.info)
session.cmd("umount %s" % lv_path)
session.cmd("rm -rf /mnt/kvm_test_lvm")
@error.context_aware
@error_context.context_aware
def check_mount_lv(check_mount, session):
error_context.context("Check the lvm is mounted or not", logging.info)
s, o = session.cmd_status_output(check_mount)
if "is not a mountpoint" in o or s != 0:
logging.info("lvm is not mounted")
return False
else:
return True
@error_context.context_aware
def run(test, params, env):
"""
KVM reboot test:
......@@ -44,55 +55,59 @@ def run(test, params, env):
lv_path = "/dev/%s/%s" % (vg_name, lv_name)
clean = params.get("clean", "yes")
timeout = params.get("lvm_timeout", "600")
disk_list = []
for disk in params.objects("images")[-2:]:
d_id = params["blk_extra_params_%s" % disk].split("=")[1]
d_path = utils_misc.get_linux_drive_path(session, d_id)
if not d_path:
raise error.TestError("Failed to get '%s' drive path" % d_id)
disk_list.append(d_path)
disks = " ".join(disk_list)
check_mount = params.get("check_mount", "mountpoint /mnt/kvm_test_lvm")
sub_type = params.get("sub_type", "lvm_create")
try:
error.context("adding physical volumes %s" % disks, logging.info)
session.cmd("pvcreate %s" % disks)
error.context("creating a volume group out of %s" % disks,
logging.info)
session.cmd("vgcreate %s %s" % (vg_name, disks))
error.context("activating volume group %s" % vg_name)
session.cmd("vgchange -ay %s" % vg_name)
error.context("creating logical volume on volume group %s" % vg_name,
logging.info)
session.cmd("lvcreate -L2000 -n %s %s" % (lv_name, vg_name))
error.context(
"creating ext3 filesystem on logical volume %s" % lv_name)
session.cmd("yes | mkfs.ext3 %s" % lv_path, timeout=int(timeout))
mount_lv(lv_path, session)
umount_lv(lv_path, session)
error.context("checking ext3 filesystem made on logical volume %s" %
lv_name, logging.info)
session.cmd("fsck %s" % lv_path, timeout=int(timeout))
if clean == "no":
if sub_type == "lvm_create":
disk_list = []
for disk in params.objects("images")[-2:]:
d_id = params["blk_extra_params_%s" % disk].split("=")[1]
d_path = utils_misc.get_linux_drive_path(session, d_id)
if not d_path:
test.error("Failed to get '%s' drive path" % d_id)
disk_list.append(d_path)
disks = " ".join(disk_list)
error_context.context("adding physical volumes %s" % disks,
logging.info)
session.cmd("pvcreate %s" % disks)
error_context.context("creating a volume group out of %s" % disks,
logging.info)
session.cmd("vgcreate %s %s" % (vg_name, disks))
error_context.context("activating volume group %s" % vg_name,
logging.info)
session.cmd("vgchange -ay %s" % vg_name)
error_context.context("creating logical volume on volume group %s"
% vg_name, logging.info)
session.cmd("lvcreate -L2000 -n %s %s" % (lv_name, vg_name))
error_context.context("creating ext3 filesystem on logical volume"
" %s" % lv_name, logging.info)
session.cmd("yes | mkfs.ext3 %s" % lv_path, timeout=int(timeout))
mount_lv(lv_path, session)
umount_lv(lv_path, session)
error_context.context("checking ext3 filesystem made on logical "
"volume %s" % lv_name, logging.info)
session.cmd("fsck %s" % lv_path, timeout=int(timeout))
if clean == "no":
mount_lv(lv_path, session)
elif sub_type == "fillup_disk" or sub_type == "ioquit":
if not check_mount_lv(check_mount, session):
mount_lv(lv_path, session)
utils_test.run_virt_sub_test(test, params, env, sub_type)
elif sub_type == "lvm_clean":
pass
else:
test.error("Failed to get sub_type")
finally:
if clean == "yes":
umount_lv(lv_path, session)
error.context("removing logical volume %s" % lv_name)
session.cmd("lvremove %s" % lv_name)
error.context("disabling volume group %s" % vg_name)
if check_mount_lv(check_mount, session):
umount_lv(lv_path, session)
error_context.context("removing logical volume %s" % lv_path,
logging.info)
session.cmd("yes | lvremove %s" % lv_path)
error_context.context("disabling volume group %s" % vg_name,
logging.info)
session.cmd("vgchange -a n %s" % vg_name)
error.context("removing volume group %s" % vg_name)
error_context.context("removing volume group %s" % vg_name,
logging.info)
session.cmd("vgremove -f %s" % vg_name)
session.close()
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册