未验证 提交 b72b53c1 编写于 作者: X Xu Tian 提交者: GitHub

Merge pull request #2041 from xutian/blockdev_full_mirror

add bockdev full mirror test
......@@ -10,6 +10,37 @@ from provider.virt_storage.storage_admin import sp_admin
from provider import job_utils
def copy_out_dict_if_exists(params_in, keys):
"""
get sub-dict from by keys
:param params_in: original dict
:param keys: list or dict, key list or key with default value
:return dict: sub-dict of params_in
"""
params_out = dict()
if not isinstance(params_in, dict):
params_in = dict()
if isinstance(keys, list):
keys = params_in.fromkeys(keys, None)
for key, default in keys.items():
val = params_in.get(key, default)
if val is None:
continue
if key in ["speed", "granularity", "buf-size", "timeout"]:
params_out[key] = int(val)
continue
if key in ["auto-finalize", "auto-dismiss", "unmap", "persistent"]:
if val in ["yes", "true", "on", True]:
params_out[key] = True
continue
elif val in ["no", "false", "off", False]:
params_out[key] = False
continue
params_out[key] = val
return params_out
@fail_on
def generate_tempfile(vm, root_dir, filename, size="10M", timeout=720):
"""Generate temp data file in VM"""
......@@ -70,6 +101,35 @@ def blockdev_create(vm, **options):
job_utils.job_dismiss(vm, options["job-id"], timeout)
def blockdev_mirror_qmp_cmd(source, target, **extra_options):
random_id = utils_misc.generate_random_string(4)
job_id = "%s_%s" % (source, random_id)
options = [
"format",
"node-name",
"replaces",
"sync",
"mode",
"granularity",
"speed",
"copy-mode",
"buf-size",
"unmap"]
arguments = copy_out_dict_if_exists(extra_options, options)
arguments["device"] = source
arguments["target"] = target
arguments["job-id"] = job_id
return "blockdev-mirror", arguments
def blockdev_mirror(vm, source, target, **extra_options):
cmd, arguments = blockdev_mirror_qmp_cmd(source, target, **extra_options)
timeout = int(extra_options.pop("timeout", 600))
vm.monitor.cmd(cmd, arguments)
job_id = arguments.get("job-id", source)
job_utils.wait_until_block_job_completed(vm, job_id, timeout)
def blockdev_stream_qmp_cmd(device, **extra_options):
if not isinstance(extra_options, dict):
extra_options = dict()
......
......@@ -11,6 +11,7 @@ from virttest import qemu_storage
from virttest import error_context
from virttest import utils_disk
from virttest import qemu_vm
from virttest.qemu_capabilities import Flags
from provider import backup_utils
from provider.virt_storage.storage_admin import sp_admin
......@@ -49,6 +50,9 @@ class BlockdevBackupBaseTest(object):
self.disks_info = dict()
self._tmp_dir = data_dir.get_tmp_dir()
def is_blockdev_mode(self):
return self.main_vm.check_capability(Flags.BLOCKDEV)
def get_backup_options(self, params):
opts = params.objects("backup_options")
extra_options = params.copy_from_keys(opts)
......@@ -68,6 +72,14 @@ class BlockdevBackupBaseTest(object):
return self.__disk_define_by_params(params, image_name)
def __target_disk_define_by_params(self, params, image_name):
if params.get("random_cluster_size") == "yes":
blacklist = list(
map(int, params.objects("cluster_size_blacklist")))
cluster_size = generate_random_cluster_size(blacklist)
params["image_cluster_size"] = cluster_size
logging.info(
"set target image cluster size to '%s'" %
cluster_size)
params.setdefault("target_path", data_dir.get_data_dir())
return sp_admin.volume_define_by_params(image_name, params)
......
......@@ -40,14 +40,15 @@ def wait_until_job_status_match(vm, status, device, timeout):
def wait_until_block_job_completed(vm, job_id, timeout=900):
"""Block until block job completed"""
def _wait_until_block_job_completed():
finished = None
finished = False
status = get_job_status(vm, job_id)
if status == "pending":
block_job_finalize(vm, job_id)
if status == "ready":
block_job_complete(vm, job_id, timeout)
try:
for event in vm.monitor.get_events():
if event.get("event") != BLOCK_JOB_COMPLETED_EVENT:
finished = False
continue
data = event.get("data", dict())
if job_id in [data.get("id"), data.get("device")]:
......@@ -55,8 +56,6 @@ def wait_until_block_job_completed(vm, job_id, timeout=900):
assert not error, "block backup job finished with error: %s" % error
finished = True
break
else:
finished = False
finally:
status = get_job_status(vm, job_id)
if status == "concluded":
......@@ -70,6 +69,15 @@ def wait_until_block_job_completed(vm, job_id, timeout=900):
assert finished, "wait for block job complete event timeout in %s seconds" % timeout
@fail_on
def block_job_complete(vm, job_id, timeout=120):
info = get_job_by_id(vm, job_id)
if info.get("type") == "mirror":
wait_until_job_status_match(vm, "ready", job_id, timeout)
arguments = {"device": job_id}
vm.monitor.cmd("block-job-complete", arguments)
@fail_on
def block_job_dismiss(vm, job_id, timeout=120):
"""
......
import logging
from avocado.utils import memory
from virttest import error_context
from provider import backup_utils
from provider import blockdev_full_backup_base
class BlockDevFullMirrorTest(
blockdev_full_backup_base.BlockdevFullBackupBaseTest):
@error_context.context_aware
def blockdev_mirror(self):
source = "drive_%s" % self.source_disks[0]
target = "drive_%s" % self.target_disks[0]
try:
error_context.context(
"backup %s to %s, options: %s" %
(source, target, self.backup_options), logging.info)
backup_utils.blockdev_mirror(
self.main_vm,
source,
target,
**self.backup_options)
finally:
memory.drop_caches()
def verify_blockdev_mirror(self):
out = self.main_vm.monitor.query("block")
target_node = "drive_%s" % self.target_disks[0]
for item in out:
inserted = item["inserted"]
if self.is_blockdev_mode():
device = inserted.get("node-name")
else:
device = inserted.get("device")
if device == target_node:
return
self.test.fail("target node(%s) is not opening" % target_node)
@error_context.context_aware
def do_backup(self):
"""Backup source image to target image"""
self.blockdev_mirror()
self.verify_blockdev_mirror()
self.verify_target_disk()
def run(test, params, env):
"""
mirror block device to target:
1). boot guest with data disk with different cluster size
2). create data file in data disk and save md5sum
3). create target disk with different cluster size
4). mirror block device from data disk to target disk
5). boot guest with target disk
6). verify data md5sum in data disk
:param test: Kvm test object
:param params: Dictionary with the test parameters
:param env: Dictionary with test environment.
"""
mirror_test = BlockDevFullMirrorTest(test, params, env)
mirror_test.run_test()
- blockdev_full_mirror:
only Linux
virt_test_type = qemu
kill_vm_on_error = yes
login_timeout = 240
storage_pools = default
storage_type_default = "directory"
images += " src1"
start_vm = no
not_preprocess = yes
storage_pool = default
image_size_src1 = 100M
image_name_src1 = "sr1"
image_size_dst1 = 100M
image_name_dst1 = "dst1"
image_format_dst1 = qcow2
force_create_image_src1 = yes
force_remove_image_src1 = yes
source_images = src1
target_images = dst1
backup_options = "auto-dismiss auto-finalize sync timeout buf-size"
sync = full
auto-dismiss = true
auto-finalize = true
variants:
- with_data_plane:
only Host_RHEL
no Host_RHEL.m5, Host_RHEL.m6
only virtio_blk, virtio_scsi
iothreads = "iothread0 iothread1"
virtio_blk:
blk_extra_params_image1 = "iothread=iothread0"
blk_extra_params_src1 = "iothread=iothread1"
virtio_scsi:
no Host_RHEL.m7.u0, Host_RHEL.m7.u1, Host_RHEL.m7.u2
bus_extra_params_image1 = "iothread=iothread0"
bus_extra_params_src1 = "iothread=iothread1"
- @defaults:
variants:
- simple_test:
type = blockdev_full_mirror
variants:
- @dst_default_cluster_size:
- dst_cluster_size_512:
image_cluster_size_dst1 = 512
- dst_cluster_size_2M:
timeout = 900
buf-size = 1024
image_cluster_size_dst1 = 2097152
- dst_random_cluster_size:
cluster_size_blacklist = "512 65536 2097152"
random_cluster_size = yes
iterations = 3
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册