提交 ad441941 编写于 作者: X Xu Han

[qemu] Replace autotest modules - #ab

Signed-off-by: NXu Han <xuhan@redhat.com>
上级 d9bf87b9
import os
import logging
from autotest.client.shared import error
from virttest import utils_test
......@@ -45,7 +43,7 @@ def run(test, params, env):
if (mount_status != 0):
logging.error("mount failed")
raise error.TestFail('mount failed.')
test.fail('mount failed.')
# Collect test parameters
timeout = int(params.get("test_timeout", 14400))
......
from autotest.client.shared import error
from virttest import error_context
@error.context_aware
@error_context.context_aware
def run(test, params, env):
"""
Test guest audio:
......@@ -19,8 +19,8 @@ def run(test, params, env):
random_content_size = params.get("random_content_size")
audio_device = params.get("audio_device")
error.context("Verifying whether /dev/dsp is present")
error_context.context("Verifying whether /dev/dsp is present")
session.cmd("test -c %s" % audio_device)
error.context("Trying to write to the device")
error_context.context("Trying to write to the device")
session.cmd("dd if=/dev/urandom of=%s bs=%s count=1" %
(audio_device, random_content_size))
from autotest.client.shared import error
from virttest import qemu_monitor
......@@ -23,6 +21,5 @@ def run(test, params, env):
output = str(e)
if not ("has not been activated" in output or
"No balloon device has been activated" in output):
raise error.TestFail("Balloon driver still on when disable"
" it on command line")
test.fail("Balloon driver still on when disable it on command line")
session.close()
import time
import logging
import random
from autotest.client.shared import error
from virttest import error_context
@error.context_aware
@error_context.context_aware
def run(test, params, env):
"""
Query balloon memory size, stop and continue vm from monitor
......@@ -24,14 +25,14 @@ def run(test, params, env):
timeout = int(params.get("login_timeout", 360))
end_time = time.time() + repeat_timeout
while time.time() < end_time:
error.context("Query balloon memory from monitor", logging.info)
error_context.context("Query balloon memory from monitor", logging.info)
vm.monitor.info("balloon")
error.context("Stop and continue vm from monitor", logging.info)
error_context.context("Stop and continue vm from monitor", logging.info)
vm.monitor.cmd("stop")
vm.monitor.cmd('cont')
vm.verify_alive()
time.sleep(random.randint(0, 3))
error.context("Login guest after the test", logging.info)
error_context.context("Login guest after the test", logging.info)
session = vm.wait_for_login(timeout=timeout)
session.close()
import logging
from autotest.client.shared import error
from virttest import error_context
from virttest import utils_misc
from qemu.tests import block_copy
......@@ -25,7 +24,7 @@ class BlockStream(block_copy.BlockCopy):
self.default_params.update(default_params)
return super(BlockStream, self).parser_test_args()
@error.context_aware
@error_context.context_aware
def start(self):
"""
start block device streaming job;
......@@ -33,16 +32,16 @@ class BlockStream(block_copy.BlockCopy):
params = self.parser_test_args()
default_speed = params.get("default_speed")
error.context("start to stream block device", logging.info)
error_context.context("start to stream block device", logging.info)
self.vm.block_stream(self.device, default_speed, self.base_image, self.ext_args)
status = self.get_status()
if not status:
raise error.TestFail("no active job found")
self.test.fail("no active job found")
msg = "block stream job running, "
msg += "with limited speed %s B/s" % default_speed
logging.info(msg)
@error.context_aware
@error_context.context_aware
def create_snapshots(self):
"""
create live snapshot_chain, snapshots chain define in $snapshot_chain
......@@ -50,7 +49,7 @@ class BlockStream(block_copy.BlockCopy):
params = self.parser_test_args()
image_format = params["snapshot_format"]
snapshots = params["snapshot_chain"].split()
error.context("create live snapshots", logging.info)
error_context.context("create live snapshots", logging.info)
for snapshot in snapshots:
snapshot = utils_misc.get_path(self.data_dir, snapshot)
image_file = self.get_image_file()
......@@ -59,7 +58,7 @@ class BlockStream(block_copy.BlockCopy):
image_file = self.get_image_file()
logging.info("expect file: %s" % snapshot +
"opening file: %s" % image_file)
raise error.TestFail("create snapshot '%s' fail" % snapshot)
self.test.fail("create snapshot '%s' fail" % snapshot)
self.trash_files.append(snapshot)
def action_when_streaming(self):
......
......@@ -3,10 +3,10 @@ import time
import random
import logging
from autotest.client.shared import utils
from autotest.client.shared import error
from avocado.utils import process
from virttest import data_dir
from virttest import error_context
from virttest import storage
from virttest import qemu_storage
from virttest import utils_misc
......@@ -113,11 +113,11 @@ class BlockCopy(object):
fun = getattr(self, step)
fun()
else:
error.TestError("undefined step %s" % step)
self.test.error("undefined step %s" % step)
except KeyError:
logging.warn("Undefined test phase '%s'" % tag)
@error.context_aware
@error_context.context_aware
def cancel(self):
"""
cancel active job on given image;
......@@ -127,7 +127,7 @@ class BlockCopy(object):
ret &= bool(self.vm.monitor.get_event("BLOCK_JOB_CANCELLED"))
return ret
error.context("cancel block copy job", logging.info)
error_context.context("cancel block copy job", logging.info)
params = self.parser_test_args()
timeout = params.get("cancel_timeout")
self.vm.monitor.clear_event("BLOCK_JOB_CANCELLED")
......@@ -135,7 +135,7 @@ class BlockCopy(object):
cancelled = utils_misc.wait_for(is_cancelled, timeout=timeout)
if not cancelled:
msg = "Cancel block job timeout in %ss" % timeout
raise error.TestFail(msg)
self.test.fail(msg)
self.vm.monitor.clear_event("BLOCK_JOB_CANCELLED")
def is_paused(self):
......@@ -163,25 +163,25 @@ class BlockCopy(object):
pause active job;
"""
if self.is_paused():
raise error.TestError("Job has been already paused.")
self.test.error("Job has been already paused.")
logging.info("Pause block job.")
self.vm.pause_block_job(self.device)
time.sleep(5)
if not self.is_paused():
raise error.TestFail("Pause block job failed.")
self.test.fail("Pause block job failed.")
def resume_job(self):
"""
resume a paused job.
"""
if not self.is_paused():
raise error.TestError("Job is not paused, can't be resume.")
self.test.error("Job is not paused, can't be resume.")
logging.info("Resume block job.")
self.vm.resume_block_job(self.device)
if self.is_paused():
raise error.TestFail("Resume block job failed.")
self.test.fail("Resume block job failed.")
@error.context_aware
@error_context.context_aware
def set_speed(self):
"""
set limited speed for block job;
......@@ -189,23 +189,24 @@ class BlockCopy(object):
params = self.parser_test_args()
max_speed = params.get("max_speed")
expected_speed = int(params.get("expected_speed", max_speed))
error.context("set speed to %s B/s" % expected_speed, logging.info)
error_context.context("set speed to %s B/s" % expected_speed,
logging.info)
self.vm.set_job_speed(self.device, expected_speed)
status = self.get_status()
if not status:
raise error.TestFail("Unable to query job status.")
self.test.fail("Unable to query job status.")
speed = status["speed"]
if speed != expected_speed:
msg = "Set speed fail. (expected speed: %s B/s," % expected_speed
msg += "actual speed: %s B/s)" % speed
raise error.TestFail(msg)
self.test.fail(msg)
@error.context_aware
@error_context.context_aware
def reboot(self, method="shell", boot_check=True):
"""
reboot VM, alias of vm.reboot();
"""
error.context("reboot vm", logging.info)
error_context.context("reboot vm", logging.info)
params = self.parser_test_args()
timeout = params["login_timeout"]
......@@ -213,42 +214,42 @@ class BlockCopy(object):
session = self.get_session()
return self.vm.reboot(session=session,
timeout=timeout, method=method)
error.context("reset guest via system_reset", logging.info)
error_context.context("reset guest via system_reset", logging.info)
self.vm.monitor.clear_event("RESET")
self.vm.monitor.cmd("system_reset")
reseted = utils_misc.wait_for(lambda:
self.vm.monitor.get_event("RESET"),
timeout=timeout)
if not reseted:
raise error.TestFail("No RESET event received after"
self.test.fail("No RESET event received after"
"execute system_reset %ss" % timeout)
self.vm.monitor.clear_event("RESET")
return None
@error.context_aware
@error_context.context_aware
def stop(self):
"""
stop vm and verify it is really paused;
"""
error.context("stop vm", logging.info)
error_context.context("stop vm", logging.info)
self.vm.pause()
return self.vm.verify_status("paused")
@error.context_aware
@error_context.context_aware
def resume(self):
"""
resume vm and verify it is really running;
"""
error.context("resume vm", logging.info)
error_context.context("resume vm", logging.info)
self.vm.resume()
return self.vm.verify_status("running")
@error.context_aware
@error_context.context_aware
def verify_alive(self):
"""
check guest can response command correctly;
"""
error.context("verify guest alive", logging.info)
error_context.context("verify guest alive", logging.info)
params = self.parser_test_args()
session = self.get_session()
cmd = params.get("alive_check_cmd", "dir")
......@@ -314,7 +315,7 @@ class BlockCopy(object):
for test in self.params.get("when_start").split():
if hasattr(self, test):
fun = getattr(self, test)
bg = utils.InterruptedThread(fun)
bg = utils_misc.InterruptedThread(fun)
bg.start()
if bg.isAlive():
self.processes.append(bg)
......@@ -336,7 +337,7 @@ class BlockCopy(object):
timeout = params.get("wait_timeout")
finished = utils_misc.wait_for(self.job_finished, timeout=timeout)
if not finished:
raise error.TestFail("Job not finished in %s seconds" % timeout)
self.test.fail("Job not finished in %s seconds" % timeout)
time_end = time.time()
logging.info("Block job done.")
return time_end - time_start
......@@ -373,8 +374,7 @@ class BlockCopy(object):
steady = utils_misc.wait_for(self.is_steady, first=3.0,
step=3.0, timeout=timeout)
if not steady:
raise error.TestFail("Wait mirroring job ready "
"timeout in %ss" % timeout)
self.test.fail("Wait mirroring job ready timeout in %ss" % timeout)
def action_before_steady(self):
"""
......@@ -409,7 +409,7 @@ class BlockCopy(object):
self.vm.destroy()
while self.trash_files:
tmp_file = self.trash_files.pop()
utils.system("rm -f %s" % tmp_file, ignore_status=True)
process.system("rm -f %s" % tmp_file, ignore_status=True)
def create_file(self, file_name):
"""
......@@ -438,8 +438,8 @@ class BlockCopy(object):
status, output = session.cmd_status_output("md5sum -c %s.md5" % file_name,
timeout=200)
if status != 0:
raise error.TestFail("File %s changed, md5sum check output: %s" %
(file_name, output))
self.test.fail("File %s changed, md5sum check output: %s"
% (file_name, output))
def reopen(self, reopen_image):
"""
......
......@@ -2,14 +2,15 @@ import os
import re
import logging
from autotest.client import os_dep
from autotest.client.shared import error
from autotest.client.shared import utils
from avocado.utils import genio
from avocado.utils import path as utils_path
from avocado.utils import process
from virttest import env_process
from virttest import error_context
@error.context_aware
@error_context.context_aware
def run(test, params, env):
"""
Qemu discard support test:
......@@ -31,7 +32,7 @@ def run(test, params, env):
"""
Get latest scsi disk which emulated by scsi_debug module.
"""
scsi_disk_info = utils.system_output("lsscsi").splitlines()
scsi_disk_info = process.system_output("lsscsi").splitlines()
scsi_debug = [_ for _ in scsi_disk_info if 'scsi_debug' in _][-1]
scsi_debug = scsi_debug.split()
host_id = scsi_debug[0][1:-1]
......@@ -54,7 +55,7 @@ def run(test, params, env):
device_name = os.path.basename(device)
path = "/sys/block/%s/device/scsi_disk" % device_name
path += "/%s/provisioning_mode" % host_id
return utils.read_one_line(path).strip()
return genio.read_one_line(path).strip()
def get_allocation_bitmap():
"""
......@@ -62,7 +63,7 @@ def run(test, params, env):
"""
path = "/sys/bus/pseudo/drivers/scsi_debug/map"
try:
return utils.read_one_line(path).strip()
return genio.read_one_line(path).strip()
except IOError:
logging.warn("block allocation bitmap not exists")
return ""
......@@ -73,14 +74,14 @@ def run(test, params, env):
vm.destroy()
env.unregister_vm(vm.name)
os_dep.command("lsscsi")
utils_path.find_command("lsscsi")
host_id, disk_name = get_host_scsi_disk()
provisioning_mode = get_provisioning_mode(disk_name, host_id)
logging.info("Current provisioning_mode = '%s'", provisioning_mode)
bitmap = get_allocation_bitmap()
if bitmap:
logging.debug("block allocation bitmap: %s" % bitmap)
raise error.TestError("block allocation bitmap not empty before test.")
test.error("block allocation bitmap not empty before test.")
# prepare params to boot vm with scsi_debug disk.
vm_name = params["main_vm"]
......@@ -92,7 +93,8 @@ def run(test, params, env):
params["force_create_image_%s" % test_image] = "no"
params["images"] = " ".join([params["images"], test_image])
error.context("boot guest with disk '%s'" % disk_name, logging.info)
error_context.context("boot guest with disk '%s'" % disk_name,
logging.info)
# boot guest with scsi_debug disk
env_process.preprocess_vm(test, params, env, vm_name)
vm = env.get_vm(vm_name)
......@@ -100,7 +102,8 @@ def run(test, params, env):
timeout = float(params.get("login_timeout", 240))
session = vm.wait_for_login(timeout=timeout)
error.context("Fresh block allocation bitmap before test.", logging.info)
error_context.context("Fresh block allocation bitmap before test.",
logging.info)
device_name = get_guest_discard_disk(session)
rewrite_disk_cmd = params["rewrite_disk_cmd"]
rewrite_disk_cmd = rewrite_disk_cmd.replace("DISK", device_name)
......@@ -109,27 +112,28 @@ def run(test, params, env):
bitmap_before_trim = get_allocation_bitmap()
if not re.match(r"\d+-\d+", bitmap_before_trim):
logging.debug("bitmap before test: %s" % bitmap_before_trim)
raise error.TestFail("bitmap should be continuous before fstrim")
test.fail("bitmap should be continuous before fstrim")
format_disk_cmd = params["format_disk_cmd"]
format_disk_cmd = format_disk_cmd.replace("DISK", device_name)
error.context("format disk '%s' in guest" % device_name, logging.info)
error_context.context("format disk '%s' in guest" % device_name,
logging.info)
session.cmd(format_disk_cmd)
error.context("mount disk with discard options '%s'" % device_name,
error_context.context("mount disk with discard options '%s'" % device_name,
logging.info)
mount_disk_cmd = params["mount_disk_cmd"]
mount_disk_cmd = mount_disk_cmd.replace("DISK", device_name)
session.cmd(mount_disk_cmd)
error.context("execute fstrim in guest", logging.info)
error_context.context("execute fstrim in guest", logging.info)
fstrim_cmd = params["fstrim_cmd"]
session.cmd(fstrim_cmd, timeout=timeout)
bitmap_after_trim = get_allocation_bitmap()
if not re.match(r"\d+-\d+,.*\d+-\d+$", bitmap_after_trim):
logging.debug("bitmap after test: %s" % bitmap_before_trim)
raise error.TestFail("discard command doesn't issue"
test.fail("discard command doesn't issue"
"to scsi_debug disk, please report bug for qemu")
if vm:
vm.destroy()
import logging
import re
from autotest.client.shared import error
from virttest import error_context
from virttest import utils_misc
from virttest import utils_test
from virttest import storage
from virttest import data_dir
@error.context_aware
@error_context.context_aware
def run(test, params, env):
"""
KVM block resize test:
......@@ -34,7 +33,7 @@ def run(test, params, env):
return float(utils_misc.normalize_data_size(block_size[0],
order_magnitude="B"))
else:
raise error.TestError("Can not find the block size for the"
test.error("Can not find the block size for the"
" deivce. The output of command"
" is: %s" % output)
......@@ -83,19 +82,18 @@ def run(test, params, env):
test.fail("No available tag to get drive id")
drive_path = utils_misc.get_linux_drive_path(session, drive_id)
if not drive_path:
raise error.TestError("Failed to get '%s' drive path"
% data_image)
test.error("Failed to get '%s' drive path" % data_image)
block_size_cmd = params["block_size_cmd"].format(drive_path)
block_size_pattern = params.get("block_size_pattern")
need_reboot = params.get("need_reboot", "no") == "yes"
accept_ratio = float(params.get("accept_ratio", 0))
error.context("Check image size in guest", logging.info)
error_context.context("Check image size in guest", logging.info)
block_size = get_block_size(session, block_size_cmd, block_size_pattern)
if (block_size > data_image_size or
block_size < data_image_size * (1 - accept_ratio)):
raise error.TestError("Image size from guest and image not match"
test.error("Image size from guest and image not match"
"Block size get from guest: %s \n"
"Image size get from image: %s \n"
% (block_size, data_image_size))
......@@ -104,7 +102,7 @@ def run(test, params, env):
session.cmd(params.get("guest_prepare_cmd"))
if params.get("format_disk", "no") == "yes":
error.context("Format disk", logging.info)
error_context.context("Format disk", logging.info)
utils_misc.format_windows_disk(session, params["disk_index"],
mountpoint=params["disk_letter"])
......@@ -141,23 +139,23 @@ def run(test, params, env):
# We need shrink the disk in guest first, than in monitor
if block_size < old_block_size and disk_update_cmd:
error.context("Shrink disk size to %s in guest"
error_context.context("Shrink disk size to %s in guest"
% block_size, logging.info)
session.cmd(disk_update_cmd[index])
error.context("Change disk size to %s in monitor"
error_context.context("Change disk size to %s in monitor"
% block_size, logging.info)
vm.monitor.block_resize(data_image_dev, block_size)
if need_reboot:
session = vm.reboot(session=session)
elif disk_rescan_cmd:
error.context("Rescan disk", logging.info)
error_context.context("Rescan disk", logging.info)
session.cmd(disk_rescan_cmd)
# We need expand disk in monitor first than extend it in guest
if block_size > old_block_size and disk_update_cmd:
error.context("Extend disk to %s in guest"
error_context.context("Extend disk to %s in guest"
% block_size, logging.info)
session.cmd(disk_update_cmd[index])
......@@ -167,8 +165,7 @@ def run(test, params, env):
(session, block_size_cmd,
block_size_pattern),
20, 0, 1, "Block Resizing"):
raise error.TestFail("Block size get from guest is not"
test.fail("Block size get from guest is not"
"the same as expected \n"
"Reported: %s\n"
"Expect: %s\n" % (current_size,
block_size))
"Expect: %s\n" % (current_size, block_size))
......@@ -2,15 +2,15 @@ import os
import re
import logging
from autotest.client import utils
from autotest.client.shared import error
from avocado.utils import process
from virttest import error_context
from virttest import storage
from virttest import utils_misc
from virttest import data_dir
@error.context_aware
@error_context.context_aware
def run(test, params, env):
"""
block_stream_without_backingfile test:
......@@ -46,7 +46,7 @@ def run(test, params, env):
not vm.monitor.query_block_job(device_id),
timeout, first=0.2, step=2.0,
text="Wait for canceling block job") is None:
raise error.TestFail("Wait job finish timeout in %ss" % timeout)
test.fail("Wait job finish timeout in %ss" % timeout)
def verify_backingfile(expect_backingfile):
"""
......@@ -55,7 +55,7 @@ def run(test, params, env):
"""
backing_file = vm.monitor.get_backingfile(device_id)
if backing_file != expect_backingfile:
raise error.TestFail("Unexpect backingfile(%s)" % backing_file)
test.fail("Unexpect backingfile(%s)" % backing_file)
def get_openingfiles():
"""
......@@ -63,47 +63,48 @@ def run(test, params, env):
"""
pid = vm.get_pid()
cmd = params.get("snapshot_check_cmd") % pid
return set(utils.system_output(cmd, ignore_status=True).splitlines())
return set(process.system_output(cmd, ignore_status=True).splitlines())
snapshots = map(lambda x: os.path.join(image_dir, x), ["sn1", "sn2"])
try:
error.context("Create snapshots-chain(base->sn1->sn2)", logging.info)
error_context.context("Create snapshots-chain(base->sn1->sn2)",
logging.info)
for index, snapshot in enumerate(snapshots):
base_file = index and snapshots[index - 1] or image_file
device_id = vm.live_snapshot(base_file, snapshot)
if not device_id:
raise error.TestFail("Fail to create %s" % snapshot)
error.context("Check backing-file of sn2", logging.info)
test.fail("Fail to create %s" % snapshot)
error_context.context("Check backing-file of sn2", logging.info)
verify_backingfile(snapshots[0])
error.context("Merge sn1 to sn2", logging.info)
error_context.context("Merge sn1 to sn2", logging.info)
vm.monitor.block_stream(device_id, base=image_file, speed=speed)
wait_job_done(wait_timeout)
error.context("Check backing-file of sn2", logging.info)
error_context.context("Check backing-file of sn2", logging.info)
verify_backingfile(image_file)
error.context("Check sn1 is not opening by qemu process",
error_context.context("Check sn1 is not opening by qemu process",
logging.info)
if snapshots[0] in get_openingfiles():
raise error.TestFail("sn1 (%s) is opening by qemu" % snapshots[0])
test.fail("sn1 (%s) is opening by qemu" % snapshots[0])
error.context("Merge base to sn2", logging.info)
error_context.context("Merge base to sn2", logging.info)
vm.monitor.block_stream(device_id)
wait_job_done(wait_timeout)
error.context("Check backing-file of sn2", logging.info)
error_context.context("Check backing-file of sn2", logging.info)
verify_backingfile(None)
error.context("check sn1 and base are not opening by qemu process",
logging.info)
error_context.context("check sn1 and base are not opening "
"by qemu process", logging.info)
if set([snapshots[0], image_file]).issubset(get_openingfiles()):
raise error.TestFail("%s is opening by qemu" % set([snapshots[0],
image_file]))
error.context("Check backing-file of sn2 by qemu-img", logging.info)
test.fail("%s is opening by qemu"
% set([snapshots[0], image_file]))
error_context.context("Check backing-file of sn2 by qemu-img", logging.info)
cmd = "%s info %s" % (qemu_img, snapshots[1])
if re.search("backing file",
utils.system_output(cmd, ignore_status=True)):
raise error.TestFail("should no backing-file in this step")
process.system_output(cmd, ignore_status=True)):
test.fail("should no backing-file in this step")
error.context("Reboot VM to check it works fine", logging.info)
error_context.context("Reboot VM to check it works fine", logging.info)
session = vm.reboot(session=session, timeout=timeout)
session.cmd(alive_check_cmd)
finally:
map(lambda x: utils.system("rm -rf %s" % x), snapshots)
map(lambda x: process.system("rm -rf %s" % x), snapshots)
import logging
from autotest.client.shared import error
from virttest import error_context
from qemu.tests import blk_stream
......@@ -9,7 +10,7 @@ class BlockStreamNegative(blk_stream.BlockStream):
def __init__(self, test, params, env, tag):
super(BlockStreamNegative, self).__init__(test, params, env, tag)
@error.context_aware
@error_context.context_aware
def set_speed(self):
"""
set limited speed for block job;
......@@ -20,17 +21,17 @@ class BlockStreamNegative(blk_stream.BlockStream):
expected_speed = params.get("expected_speed", default_speed)
if params.get("need_convert_to_int", "no") == "yes":
expected_speed = int(expected_speed)
error.context("set speed to %s B/s" % expected_speed, logging.info)
error_context.context("set speed to %s B/s" % expected_speed,
logging.info)
args = {"device": self.device,
"speed": expected_speed}
response = str(self.vm.monitor.cmd_qmp("block-job-set-speed", args))
if "(core dump)" in response:
raise error.TestFail("Qemu core dump when reset speed"
" to a negative value.")
self.test.fail("Qemu core dump when reset "
"speed to a negative value.")
if match_str not in response:
raise error.TestFail("Fail to get expected result."
"%s is expected in %s" %
(match_str, response))
self.test.fail("Fail to get expected result. %s is expected in %s"
% (match_str, response))
logging.info("Keyword '%s' is found in QMP output '%s'." %
(match_str, response))
......
import logging
from autotest.client.shared import error
from virttest import error_context
from qemu.tests import blk_stream
......@@ -8,14 +10,14 @@ class BlockStreamSimple(blk_stream.BlockStream):
def __init__(self, test, params, env, tag):
super(BlockStreamSimple, self).__init__(test, params, env, tag)
@error.context_aware
@error_context.context_aware
def query_status(self):
"""
query running block streaming job info;
"""
error.context("query job status", logging.info)
error_context.context("query job status", logging.info)
if not self.get_status():
raise error.TestFail("No active job")
self.test.fail("No active job")
def run(test, params, env):
......
import logging
from autotest.client.shared import error
from virttest import env_process
from virttest import error_context
from virttest import utils_misc
from virttest import utils_test
@error.context_aware
@error_context.context_aware
def run(test, params, env):
"""
boot cpu model test:
......@@ -25,7 +24,7 @@ def run(test, params, env):
model_list = params.get("cpu_model")
if not model_list:
if cpu_vendor == "unknow":
raise error.TestError("unknow cpu vendor")
test.error("unknow cpu vendor")
else:
model_list = params.get("cpu_model_%s" % cpu_vendor,
host_model[-1])
......@@ -53,7 +52,5 @@ def run(test, params, env):
logging.info("shutdown guest successfully")
else:
if params.get("enable_check", "no") == "yes":
raise error.TestWarn("Can not test %s model on %s host, "
"pls use %s host" % (model,
host_model[0],
model))
test.cancel("Can not test %s model on %s host, pls use "
"%s host" % (model, host_model[0], model))
......@@ -3,9 +3,9 @@ import re
import time
import logging
from autotest.client import utils
from virttest import error_context
from avocado.utils import process
from virttest import error_context
from virttest import utils_misc
from virttest import data_dir
from virttest import qemu_storage
......@@ -35,9 +35,9 @@ def run(test, params, env):
logging.info("creating test cdrom")
cdrom_test = params.get("cdrom_test")
cdrom_test = utils_misc.get_path(data_dir.get_data_dir(), cdrom_test)
utils.run("dd if=/dev/urandom of=test bs=10M count=1")
utils.run("mkisofs -o %s test" % cdrom_test)
utils.run("rm -f test")
process.run("dd if=/dev/urandom of=test bs=10M count=1")
process.run("mkisofs -o %s test" % cdrom_test)
process.run("rm -f test")
def cleanup_cdroms():
"""
......
......@@ -2,7 +2,8 @@ import logging
import re
import time
from autotest.client import utils
from avocado.utils import process
from virttest import error_context
......@@ -30,7 +31,7 @@ def run(test, params, env):
# Disable nic device, boot fail from nic device except user model
if params['nettype'] != 'user':
for nic in vm.virtnet:
utils.system("ifconfig %s down" % nic.ifname)
process.system("ifconfig %s down" % nic.ifname)
vm.resume()
......
import logging
from autotest.client.shared import error
from virttest import error_context
from virttest import utils_misc
from virttest import env_process
from virttest.staging import utils_memory
@error.context_aware
@error_context.context_aware
def run(test, params, env):
"""
KVM boot time test:
......@@ -27,16 +26,17 @@ def run(test, params, env):
timeout = int(params.get("login_timeout", 360))
session = vm.wait_for_login(timeout=timeout)
error.context("Set guest run level to 1", logging.info)
error_context.context("Set guest run level to 1", logging.info)
single_user_cmd = params['single_user_cmd']
session.cmd(single_user_cmd)
try:
error.context("Shut down guest", logging.info)
error_context.context("Shut down guest", logging.info)
session.cmd('sync')
vm.destroy()
error.context("Boot up guest and measure the boot time", logging.info)
error_context.context("Boot up guest and measure the boot time",
logging.info)
utils_memory.drop_caches()
vm.create()
vm.verify_alive()
......@@ -48,7 +48,7 @@ def run(test, params, env):
finally:
try:
error.context("Restore guest run level", logging.info)
error_context.context("Restore guest run level", logging.info)
restore_level_cmd = params['restore_level_cmd']
session.cmd(restore_level_cmd)
session.cmd('sync')
......@@ -62,7 +62,6 @@ def run(test, params, env):
params["restore_image_after_testing"] = "yes"
if boot_time > expect_time:
raise error.TestFail(
"Guest boot up is taking too long: %ss" % boot_time)
test.fail("Guest boot up is taking too long: %ss" % boot_time)
session.close()
import logging
import re
from autotest.client.shared import error
from virttest import error_context
from virttest import utils_net
from virttest import utils_test
from virttest import env_process
from virttest import virt_vm
@error.context_aware
@error_context.context_aware
def run(test, params, env):
"""
Boot guest with different vectors, then do netperf testing.
......@@ -27,7 +26,8 @@ def run(test, params, env):
"""
def boot_guest_with_vectors(vectors):
error.context("Boot guest with vectors = %s" % vectors, logging.info)
error_context.context("Boot guest with vectors = %s" % vectors,
logging.info)
params["vectors"] = vectors
params["start_vm"] = "yes"
try:
......@@ -40,7 +40,7 @@ def run(test, params, env):
if int(vectors) < 0:
msg = "Qemu did not raise correct error"
msg += " when vectors = %s" % vectors
raise error.TestFail(msg)
test.fail(msg)
vm = env.get_vm(params["main_vm"])
vm.verify_alive()
......@@ -48,7 +48,7 @@ def run(test, params, env):
def enable_multi_queues(vm):
session = vm.wait_for_login(timeout=login_timeout)
error.context("Enable multi queues in guest.", logging.info)
error_context.context("Enable multi queues in guest.", logging.info)
for nic_index, nic in enumerate(vm.virtnet):
ifname = utils_net.get_linux_ifname(session, nic.mac)
queues = int(nic.queues)
......@@ -57,12 +57,13 @@ def run(test, params, env):
if status:
msg = "Fail to enable multi queue in guest."
msg += "Command %s, fail with output %s" % (mq_set_cmd, output)
error.TestError(msg)
test.error(msg)
def check_msi_support(session):
devices = session.cmd_output("lspci |grep Eth").strip()
vectors = params["vectors"]
error.context("Check if vnic inside guest support msi.", logging.info)
error_context.context("Check if vnic inside guest support msi.",
logging.info)
for device in devices.split("\n"):
if not device:
continue
......@@ -70,7 +71,7 @@ def run(test, params, env):
msi_check_cmd = "lspci -vvv -s %s |grep MSI" % d_id
output = session.cmd_output(msi_check_cmd)
if vectors == 0 and output:
error.TestFail("Guest do not support msi when vectors = 0.")
test.fail("Guest do not support msi when vectors = 0.")
if output:
if vectors == 1:
if "MSI-X: Enable-" in output:
......@@ -78,17 +79,18 @@ def run(test, params, env):
else:
msg = "Command %s get wrong output." % msi_check_cmd
msg += " when vectors = 1"
error.TestFail(msg)
test.fail(msg)
else:
if "MSI-X: Enable+" in output:
logging.info("MSI-X is enabled")
else:
msg = "Command %s get wrong output." % msi_check_cmd
msg += " when vectors = %s" % vectors
error.TestFail(msg)
test.fail(msg)
def check_interrupt(session, vectors):
error.context("Check the cpu interrupt of virito", logging.info)
error_context.context("Check the cpu interrupt of virito",
logging.info)
cmd = "cat /proc/interrupts |grep virtio"
output = session.cmd_output(cmd)
vectors = int(vectors)
......@@ -96,20 +98,20 @@ def run(test, params, env):
if "IO-APIC-fasteoi" not in output:
msg = "Could not find IO-APIC-fasteoi interrupt"
msg += " when vectors = %s" % vectors
error.TestFail(msg)
test.fail(msg)
elif 2 <= vectors and vectors <= 8:
if not re.findall("vritio[0-9]-virtqueues", output):
msg = "Could not find the device for msi interrupt "
msg += "when vectors = %s " % vectors
msg += "Command %s got output %s" % (cmd, output)
error.TestFail(msg)
test.fail(msg)
elif vectors == 9 or vectors == 10:
if not (re.findall("virtio[0-9]-input", output) and
re.findall("virtio[0-9]-output", output)):
msg = "Could not find the device for msi interrupt "
msg += "when vectors = %s " % vectors
msg += "Command %s got output %s" % (cmd, output)
error.TestFail(msg)
test.fail(msg)
vectors_list = params["vectors_list"]
login_timeout = int(params.get("login_timeout", 360))
......@@ -122,5 +124,5 @@ def run(test, params, env):
enable_multi_queues(vm)
check_msi_support(session)
check_interrupt(session, vectors)
error.context("Run netperf test in guest.", logging.info)
error_context.context("Run netperf test in guest.", logging.info)
utils_test.run_virt_sub_test(test, params, env, sub_type=sub_test)
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册