提交 c92fe88d 编写于 作者: X Xu Han

[qemu] Replace autotest modules - hi

Signed-off-by: NXu Han <xuhan@redhat.com>
上级 c1b0252f
......@@ -3,10 +3,10 @@ import logging
import aexpect
from autotest.client.shared import error
from virttest import error_context
@error.context_aware
@error_context.context_aware
def run(test, params, env):
"""
Test hdparm setting on linux guest os. This case will:
......@@ -30,20 +30,18 @@ def run(test, params, env):
failed_count = len(re.findall("failed:", output))
ignore_count = len(re.findall(ignore_string, output))
if failed_count > ignore_count:
raise error.TestError("Fail to get %s parameter value. "
"Output is:\n%s" % (param,
output.strip()))
test.error("Fail to get %s parameter value. "
"Output is:\n%s" % (param, output.strip()))
else:
check_value = False
unsupport_param += 1
logging.warn("Disk %s not support parameter %s" % (disk,
param))
if check_value and value not in output:
raise error.TestFail("Fail to set %s parameter to value: %s"
% (param, value))
test.fail("Fail to set %s parameter to value: %s"
% (param, value))
if len(params) == unsupport_param:
raise error.TestNAError("All parameters are not supported."
" Skip the test")
test.cancel("All parameters are not supported. Skip the test")
def perform_read_timing(disk, timeout, num=5):
results = 0
......@@ -51,8 +49,8 @@ def run(test, params, env):
cmd = params["device_cache_read_cmd"] % disk
(s, output) = session.cmd_status_output(cmd, timeout)
if s != 0:
raise error.TestFail("Fail to perform device/cache read"
" timings \nOutput is: %s\n" % output)
test.fail("Fail to perform device/cache read"
" timings \nOutput is: %s\n" % output)
logging.info("Output of device/cache read timing check (%s of %s):"
% (i + 1, num))
for line in output.strip().splitlines():
......@@ -74,7 +72,7 @@ def run(test, params, env):
output = session.cmd(cmd)
disk = output.strip()
error.context("Setting hard disk to lower performance")
error_context.context("Setting hard disk to lower performance")
cmd = params["low_status_cmd"] % disk
try:
session.cmd(cmd, timeout)
......@@ -82,17 +80,17 @@ def run(test, params, env):
failed_count = len(re.findall("failed:", err.output))
ignore_count = len(re.findall(ignore_string, err.output))
if failed_count > ignore_count:
raise error.TestError("Fail to setting hard disk to lower "
"performance. Output is:%s", err.output)
test.error("Fail to setting hard disk to lower "
"performance. Output is:%s", err.output)
error.context("Checking hard disk keyval under lower performance "
"settings")
error_context.context("Checking hard disk keyval under "
"lower performance settings")
check_setting_result(cmd, timeout)
low_result = perform_read_timing(disk, timeout)
logging.info("Average buffered disk read speed under low performance "
"settings: %.2f MB/sec" % low_result)
error.context("Setting hard disk to higher performance")
error_context.context("Setting hard disk to higher performance")
cmd = params["high_status_cmd"] % disk
try:
session.cmd(cmd, timeout)
......@@ -100,19 +98,18 @@ def run(test, params, env):
failed_count = len(re.findall("failed:", err.output))
ignore_count = len(re.findall(ignore_string, err.output))
if failed_count > ignore_count:
raise error.TestError("Fail to setting hard disk to higher "
"performance. Output is:%s", err.output)
test.error("Fail to setting hard disk to higher "
"performance. Output is:%s", err.output)
error.context("Checking hard disk keyval under higher performance "
"settings")
error_context.context("Checking hard disk keyval under "
"higher performance settings")
check_setting_result(cmd, timeout)
high_result = perform_read_timing(disk, timeout)
logging.info("Average buffered disk read speed under high performance "
"settings: %.2f MB/sec" % high_result)
if not float(high_result) > float(low_result):
raise error.TestFail("High performance setting does not "
"increase read speed\n")
test.fail("High performance setting does not increase read speed")
finally:
if session:
......
import logging
import time
from autotest.client import utils
from autotest.client.shared import error
from avocado.utils import process
from virttest import error_context
# This decorator makes the test function aware of context strings
@error.context_aware
@error_context.context_aware
def run(test, params, env):
"""
QEMU 'Hello, world!' test
......@@ -28,7 +30,7 @@ def run(test, params, env):
"""
# Error contexts are used to give more info on what was
# going on when one exception happened executing test code.
error.context("Get the main VM", logging.info)
error_context.context("Get the main VM", logging.info)
vm = env.get_vm(params["main_vm"])
vm.verify_alive()
......@@ -42,8 +44,8 @@ def run(test, params, env):
session = vm.wait_for_login(timeout=timeout)
# Send command to the guest, using session command.
error.context("Echo 'Hello, world!' in guest and get the output",
logging.info)
error_context.context("Echo 'Hello, world!' in guest and get the output",
logging.info)
# Here, timeout was passed explicitly to show it can be tweaked
guest_cmd = "echo 'Hello, world!'"
# If you just need the output, use session.cmd(). If the command fails,
......@@ -56,10 +58,10 @@ def run(test, params, env):
# Here, we will fail a test if the guest outputs something unexpected
if guest_cmd_output != 'Hello, world!':
raise error.TestFail("Unexpected output from guest")
test.fail("Unexpected output from guest")
# Send command to the guest, using monitor command.
error.context("Send a monitor command", logging.info)
error_context.context("Send a monitor command", logging.info)
monitor_cmd_ouput = vm.monitor.info("status")
logging.info("Monitor returns '%s'", monitor_cmd_ouput)
......@@ -69,18 +71,19 @@ def run(test, params, env):
vm.verify_status("running")
# Send command to host
error.context("Echo 'Hello, world!' in the host using shell", logging.info)
# If the command fails, it will raise a error.CmdError exception
host_cmd_output = utils.system_output("echo 'Hello, world!'")
error_context.context("Echo 'Hello, world!' in the host using shell",
logging.info)
# If the command fails, it will raise a process.CmdError exception
host_cmd_output = process.system_output("echo 'Hello, world!'")
logging.info("Host cmd output '%s'", host_cmd_output)
# Here, we will fail a test if the host outputs something unexpected
if host_cmd_output != 'Hello, world!':
raise error.TestFail("Unexpected output from guest")
test.fail("Unexpected output from guest")
# An example of getting a required parameter from the config file
error.context("Get a required parameter from the config file",
logging.info)
error_context.context("Get a required parameter from the config file",
logging.info)
sleep_time = int(params["sleep_time"])
logging.info("Sleep for '%d' seconds", sleep_time)
time.sleep(sleep_time)
import logging
from avocado.utils.wait import wait_for
from virttest import error_context
from virttest.utils_test import BackgroundTest
from virttest.utils_test import run_virt_sub_test
from virttest.utils_test.qemu import MemoryHotplugTest
from avocado.core import exceptions
# Make it work under both autotest-framework and avocado-framework
try:
from avocado.utils.wait import wait_for
except ImportError:
from autotest.client.shared.utils import wait_for
try:
from virttest import error_context as step_engine
except ImportError:
from autotest.client.shared.error import step_engine
class MemoryHotplugSimple(MemoryHotplugTest):
......@@ -26,7 +17,7 @@ class MemoryHotplugSimple(MemoryHotplugTest):
(self.params["sub_test"],
self.params["stage"],
self.params["operation"]))
step_engine.context(step, logging.info)
error_context.context(step, logging.info)
args = (self.test, self.params, self.env, self.params["sub_type"])
run_virt_sub_test(*args)
......@@ -83,9 +74,7 @@ class MemoryHotplugSimple(MemoryHotplugTest):
self.run_sub_test])[0]
func = getattr(self, "%s_memory" % operation)
if not callable(func):
raise exceptions.TestError(
"Unsupported memory operation '%s'" %
operation)
self.test.error("Unsupported memory operation '%s'" % operation)
vm = self.env.get_vm(self.params["main_vm"])
try:
if stage != "after":
......@@ -110,7 +99,7 @@ class MemoryHotplugSimple(MemoryHotplugTest):
self.close_sessions()
@step_engine.context_aware
@error_context.context_aware
def run(test, params, env):
"""
Qemu memory hotplug test:
......
import time
import logging
from virttest import error_context
from virttest.utils_test.qemu import MemoryHotplugTest
try:
from avocado.core import exceptions
except ImportError:
from autotest.client.shared import error as exceptions
try:
from virttest import error_context
except ImportError:
from autotest.client.shared import error as error_context
class MemoryHotplugRepeat(MemoryHotplugTest):
......@@ -35,9 +26,8 @@ class MemoryHotplugRepeat(MemoryHotplugTest):
self.turn(vm, target_mem, extra_params)
current_mem = self.get_guest_total_mem(vm)
if current_mem != original_mem:
raise exceptions.TestFail("Guest memory changed about repeat"
" hotpug/unplug memory %d times"
% repeat)
self.test.fail("Guest memory changed about repeat"
" hotpug/unplug memory %d times" % repeat)
time.sleep(1.5)
vm.verify_alive()
vm.reboot()
......
......@@ -3,7 +3,6 @@ Sanity check for hypervisor flag in guest.
"""
import logging
from autotest.client.shared import error
def run(test, params, env):
......@@ -26,4 +25,4 @@ def run(test, params, env):
cpuinfo = session.cmd("cat /proc/cpuinfo")
logging.debug("Guest '/proc/cpuinfo': %s", cpuinfo)
if "hypervisor" not in cpuinfo:
raise error.TestFail("hypervisor flag undefined in cpuinfo")
test.fail("hypervisor flag undefined in cpuinfo")
import logging
from autotest.client.shared import error
from virttest import env_process
from virttest import error_context
@error.context_aware
@error_context.context_aware
def run(test, params, env):
"""
Qemu invalid parameter in qemu command line test:
......@@ -19,16 +18,16 @@ def run(test, params, env):
vm_name = params["main_vm"]
params['start_vm'] = "yes"
try:
error.context("Start guest with invalid parameters.")
error_context.context("Start guest with invalid parameters.")
env_process.preprocess_vm(test, params, env, vm_name)
vm = env.get_vm(vm_name)
vm.destroy()
except Exception, emsg:
error.context("Check guest exit status.")
error_context.context("Check guest exit status.")
if "(core dumped)" in str(emsg):
raise error.TestFail("Guest core dumped with invalid parameters.")
test.fail("Guest core dumped with invalid parameters.")
else:
logging.info("Guest quit as expect: %s" % str(emsg))
return
raise error.TestFail("Guest start normally, didn't quit as expect.")
test.fail("Guest start normally, didn't quit as expect.")
......@@ -4,9 +4,8 @@ import re
import aexpect
from autotest.client.shared import error
from virttest import env_process
from virttest import error_context
def get_re_average(opt, re_str):
......@@ -25,7 +24,7 @@ def get_re_average(opt, re_str):
return vals / len(values)
@error.context_aware
@error_context.context_aware
def run(test, params, env):
"""
Measure overhead of IPI with and without x2apic:
......@@ -48,7 +47,7 @@ def run(test, params, env):
logging.warn("This case need at least 2 vcpu, but only 1 specified in"
" configuration. So change the vcpu to 2.")
vm_name = params.get("main_vm")
error.context("Boot guest with x2apic cpu flag.", logging.info)
error_context.context("Boot guest with x2apic cpu flag.", logging.info)
env_process.preprocess_vm(test, params, env, vm_name)
vm = env.get_vm(params["main_vm"])
vm.verify_alive()
......@@ -57,23 +56,23 @@ def run(test, params, env):
check_x2apic_cmd = params.get("check_x2apic_cmd")
if check_x2apic_cmd:
error.context("Check x2apic flag in guest", logging.info)
error_context.context("Check x2apic flag in guest", logging.info)
x2apic_output = session.cmd_output(check_x2apic_cmd).strip()
x2apic_check_string = params.get("x2apic_check_string").split(",")
for check_string in x2apic_check_string:
if check_string.strip() not in x2apic_output:
msg = "%s is not displayed in output" % check_string
raise error.TestFail(msg)
test.fail(msg)
pipetest_cmd = params.get("pipetest_cmd")
if session.get_command_status("test -x %s" % pipetest_cmd):
file_link = os.path.join(test.virtdir, "scripts/pipetest.c")
vm.copy_files_to(file_link, "/tmp/pipetest.c")
build_pipetest_cmd = params.get("build_pipetest_cmd")
error.context("Build pipetest script in guest.", logging.info)
error_context.context("Build pipetest script in guest.", logging.info)
session.cmd(build_pipetest_cmd, timeout=180)
error.context("Run pipetest script in guest.", logging.info)
error_context.context("Run pipetest script in guest.", logging.info)
try:
o = session.cmd(pipetest_cmd, timeout=180)
except aexpect.ShellTimeoutError, e:
......@@ -82,31 +81,33 @@ def run(test, params, env):
val1 = get_re_average(o, re_str)
session.close()
vm.destroy()
error.context("Boot guest without x2apic.", logging.info)
error_context.context("Boot guest without x2apic.", logging.info)
params["cpu_model_flags"] += ",-x2apic"
env_process.preprocess_vm(test, params, env, vm_name)
vm = env.get_vm(params["main_vm"])
vm.verify_alive()
session = vm.wait_for_login(timeout=int(params.get("login_timeout", 360)))
if check_x2apic_cmd:
error.context("Check x2apic flag in guest after reboot.", logging.info)
error_context.context("Check x2apic flag in guest after reboot.",
logging.info)
x2apic_output = session.cmd_output(check_x2apic_cmd).strip()
logging.info(x2apic_output)
if x2apic_output:
raise error.TestFail("Fail to disable x2apic in guest.")
test.fail("Fail to disable x2apic in guest.")
error.context("Run pipetest script in guest again.", logging.info)
error_context.context("Run pipetest script in guest again.", logging.info)
try:
o = session.cmd(pipetest_cmd, timeout=180)
except aexpect.ShellTimeoutError, e:
o = e
val2 = get_re_average(o, re_str)
error.context("Compare the output of pipetest script.", logging.info)
error_context.context("Compare the output of pipetest script.",
logging.info)
if val1 >= val2:
msg = "Overhead of IPI with x2apic is not smaller than that without"
msg += " x2apic. pipetest script output with x2apic: %s. " % val1
msg += "pipetest script output without x2apic: %s" % val2
raise error.TestFail(msg)
test.fail(msg)
msg = "pipetest script output with x2apic: %s. " % val1
msg += "pipetest script output without x2apic: %s" % val2
logging.info(msg)
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册