Moving kvm libvirt v2v to virt

Signed-off-by: NLucas Meneghel Rodrigues <lmr@redhat.com>
上级 5ef8de41
For the impatient:
Execute the get_started.py script located on this directory,
that will guide you through setting up the default kvm test
scenario:
* Guest install with Fedora 12
* Boot, reboot and shutdown test
The script will help you to create all the directories, and
even get the OS iso in case you don't have it yet.
For the not so impatient:
You are *strongly* advised to read the online docs:
https://github.com/autotest/autotest/wiki/KVMAutotest
So you can have a better idea of how the test is organized
and how it works.
# Copy this file to build.cfg and edit it.
vm_type = kvm
variants:
- build:
type = build
# Load modules built/installed by the build test?
load_modules = no
# Save the results of this build on test.resultsdir?
save_results = no
# Preserve the source code directory between tests?
preserve_srcdir = yes
######################################################################
# INSTALLERS DEFINITION SECTION
# Many different components can be defined. The ones that will
# actually be run have to be defined in the 'installers'
######################################################################
# QEMU installation from a local tarball
# local_tar_qemu_path = /tmp/qemu-0.15.1.tar.gz
# QEMU installation from a local source directory
# local_src_qemu_path = /tmp/qemu-0.15.1
# Guest Kernel installation from a GIT repo
git_repo_guest_kernel_build_helper = linux_kernel
git_repo_guest_kernel_uri = git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
git_repo_guest_kernel_branch = master
#git_repo_guest_kernel_patches = ['http://foo/bar.patch', 'http://foo/baz.patch']
git_repo_guest_kernel_config = http://foo/bar/kernel-config
git_repo_guest_kernel_build_target = bzImage
git_repo_guest_kernel_build_image = arch/x86/boot/bzImage
# Should be same as the kernel variable defined in guest-os.cfg. Which is
# used to boot the guest.
git_repo_guest_kernel_kernel_path = /tmp/kvm_autotest_root/images/bzImage
# QEMU installation from a GIT repo
git_repo_qemu_uri = git://git.qemu.org/qemu.git
git_repo_qemu_configure_options = --target-list=x86_64-softmmu --enable-spice
# if you have a git repo that is closer to you, you may
# use it to fetch object first from it, and then later from "upstream"
# git_repo_qemu_base_uri = /home/user/code/qemu
# QEMU (KVM) installation from a GIT repo
# git_repo_qemu_kvm_uri = git://git.kernel.org/pub/scm/virt/kvm/qemu-kvm.git
# git_repo_qemu_kvm_configure_options = --enable-spice
# SPICE installation from a GIT repo
git_repo_spice_uri = git://anongit.freedesktop.org/spice/spice
# spice-protocol installation from a GIT repo
git_repo_spice_protocol_uri = git://anongit.freedesktop.org/spice/spice-protocol
# QEMU (KVM) installation from a YUM repo
# yum_qemu_kvm_pkgs = ['qemu-kvm', 'qemu-kvm-tools', 'qemu-system-x86', 'qemu-common', 'qemu-img']
# QEMU (KVM) installation from koji/brew
# koji_qemu_kvm_tag = dist-f15
# koji_qemu_kvm_pkgs = :qemu:qemu-common,qemu-img,qemu-kvm,qemu-system-x86,qemu-kvm-tools seabios vgabios :gpxe:gpxe-roms-qemu :spice:spice-server
# Koji/brew scratch builds notes:
#
# Packages from scratch builds have a different syntax:
#
# user:task_id[:pkg1,pkg2]
#
# If you include a subset of packages and want to have debuginfo packages
# you must *manually* add it to the list, as there's no way to know for
# sure the main package name for scratch builds. If you set only the
# user name and task id, all packages, including -debuginfo will be
# installed.
#
# koji_qemu_kvm_scratch_pkgs = jdoe:1000:qemu-kvm,qemu-system-x86,qemu-kvm-tools,qemu-img,qemu-kvm-debuginfo
######################################################################
# INSTALLERS SELECTION
# Choose here what components you want to install
######################################################################
installers = git_repo_spice_protocol git_repo_spice git_repo_qemu
# Choose wether you want to include debug information/symbols
install_debug_info = yes
# Comment out the 'no build' line to enable the build test
no build
import os, sys
try:
import autotest.client.setup_modules as setup_modules
client_dir = os.path.dirname(setup_modules.__file__)
except ImportError:
dirname = os.path.dirname(sys.modules[__name__].__file__)
client_dir = os.path.abspath(os.path.join(dirname, "..", ".."))
sys.path.insert(0, client_dir)
import setup_modules
sys.path.pop(0)
setup_modules.setup(base_path=client_dir,
root_module_name="autotest.client")
AUTHOR = """
uril@redhat.com (Uri Lublin)
drusso@redhat.com (Dror Russo)
mgoldish@redhat.com (Michael Goldish)
dhuff@redhat.com (David Huff)
aeromenk@redhat.com (Alexey Eromenko)
mburns@redhat.com (Mike Burns)
"""
TIME = 'MEDIUM'
NAME = 'KVM Test'
TEST_TYPE = 'client'
TEST_CLASS = 'Virtualization'
TEST_CATEGORY = 'Functional'
DOC = """
Executes the KVM test framework on a given host. This module is separated in
minor functions, that execute different tests for doing Quality Assurance on
KVM (both kernelspace and userspace) code.
For online docs, please refer to http://www.linux-kvm.org/page/KVM-Autotest
"""
import sys, os, logging
from autotest.client.shared import cartesian_config
from autotest.client.virt import utils_misc
# set English environment (command output might be localized, need to be safe)
os.environ['LANG'] = 'en_US.UTF-8'
str = """
# This string will be parsed after build.cfg. Make any desired changes to the
# build configuration here. For example (to install from koji/brew):
# installers = koji_qemu_kvm
"""
parser = cartesian_config.Parser()
kvm_test_dir = os.path.join(os.environ['AUTODIR'],'tests/kvm')
parser.parse_file(os.path.join(kvm_test_dir, "build.cfg"))
parser.parse_string(str)
if not utils_misc.run_tests(parser, job):
logging.error("KVM build step failed, exiting.")
sys.exit(1)
str = """
# This string will be parsed after tests.cfg. Make any desired changes to the
# test configuration here. For example:
#display = sdl
#install, setup: timeout_multiplier = 3
"""
parser = cartesian_config.Parser()
parser.parse_file(os.path.join(kvm_test_dir, "tests.cfg"))
if args:
# We get test parameters from command line
for arg in args:
try:
(key, value) = re.findall("^(\w+)=(.*)", arg)[0]
if key == "only":
str += "only %s\n" % value
elif key == "no":
str += "no %s\n" % value
else:
str += "%s = %s\n" % (key, value)
except IndexError:
pass
parser.parse_string(str)
utils_misc.run_tests(parser, job)
AUTHOR = """
uril@redhat.com (Uri Lublin)
drusso@redhat.com (Dror Russo)
mgoldish@redhat.com (Michael Goldish)
dhuff@redhat.com (David Huff)
aeromenk@redhat.com (Alexey Eromenko)
mburns@redhat.com (Mike Burns)
"""
TIME = 'SHORT'
NAME = 'KVM Test (Parallel)'
TEST_TYPE = 'client'
TEST_CLASS = 'Virtualization'
TEST_CATEGORY = 'Functional'
DOC = """
Executes the KVM test framework on a given host (parallel version).
"""
import sys, os, commands, re
#-----------------------------------------------------------------------------
# set English environment (command output might be localized, need to be safe)
#-----------------------------------------------------------------------------
os.environ['LANG'] = 'en_US.UTF-8'
#---------------------------------------------------------
# Enable modules import from current directory (tests/kvm)
#---------------------------------------------------------
pwd = os.path.join(os.environ['AUTODIR'],'tests/kvm')
sys.path.append(pwd)
# ------------------------
# create required symlinks
# ------------------------
# When dispatching tests from autotest-server the links we need do not exist on
# the host (the client). The following lines create those symlinks. Change
# 'rootdir' here and/or mount appropriate directories in it.
#
# When dispatching tests on local host (client mode) one can either setup kvm
# links, or same as server mode use rootdir and set all appropriate links and
# mount-points there. For example, guest installation tests need to know where
# to find the iso-files.
#
# We create the links only if not already exist, so if one already set up the
# links for client/local run we do not touch the links.
rootdir='/tmp/kvm_autotest_root'
iso=os.path.join(rootdir, 'iso')
images=os.path.join(rootdir, 'images')
qemu=os.path.join(rootdir, 'qemu')
qemu_img=os.path.join(rootdir, 'qemu-img')
def link_if_not_exist(ldir, target, link_name):
t = target
l = os.path.join(ldir, link_name)
if not os.path.exists(l):
os.system('ln -s %s %s' % (t, l))
# Create links only if not already exist
link_if_not_exist(pwd, '../../', 'autotest')
link_if_not_exist(pwd, iso, 'isos')
link_if_not_exist(pwd, images, 'images')
link_if_not_exist(pwd, qemu, 'qemu')
link_if_not_exist(pwd, qemu_img, 'qemu-img')
# --------------------------------------------------------
# Params that will be passed to the KVM install/build test
# --------------------------------------------------------
params = {
"name": "build",
"shortname": "build",
"type": "build",
#"mode": "release",
#"mode": "snapshot",
#"mode": "localtar",
#"mode": "localsrc",
#"mode": "git",
"mode": "noinstall",
#"mode": "koji",
## Are we going to load modules built by this test?
## Defaults to 'yes', so if you are going to provide only userspace code to
## be built by this test, please set load_modules to 'no', and make sure
## the kvm and kvm-[vendor] module is already loaded by the time you start
## it.
#"load_modules": "no",
## Install from a kvm release ("mode": "release"). You can optionally
## specify a release tag. If you omit it, the test will get the latest
## release tag available.
#"release_tag": '84',
#"release_dir": 'http://downloads.sourceforge.net/project/kvm/',
# This is the place that contains the sourceforge project list of files
#"release_listing": 'http://sourceforge.net/projects/kvm/files/',
## Install from a kvm snapshot location ("mode": "snapshot"). You can
## optionally specify a snapshot date. If you omit it, the test will get
## yesterday's snapshot.
#"snapshot_date": '20090712'
#"snapshot_dir": 'http://foo.org/kvm-snapshots/',
## Install from a tarball ("mode": "localtar")
#"tarball": "/tmp/kvm-84.tar.gz",
## Install from a local source code dir ("mode": "localsrc")
#"srcdir": "/path/to/source-dir"
## Install from koji build server ("mode": "koji")
## Koji is the Fedora Project buildserver. It is possible to install
## packages right from Koji if you provide a release tag or a build.
## Tag (if available)
#"koji_tag": 'dist-f11',
## Build (if available, is going to override tag).
#"koji_build": 'qemu-0.10-16.fc11',
## Command to interact with the build server
#"koji_cmd": '/usr/bin/koji',
## The name of the source package that's being built
#"src_pkg": 'qemu',
## Name of the rpms we need installed
#"pkg_list": ['qemu-kvm', 'qemu-kvm-tools', 'qemu-system-x86', 'qemu-common', 'qemu-img'],
## Paths of the qemu relevant executables that should be checked
#"qemu_bin_paths": ['/usr/bin/qemu-kvm', '/usr/bin/qemu-img'],
## Install from git ("mode": "git")
## If you provide only "git_repo" and "user_git_repo", the build test
## will assume it will perform all build from the userspace dir, building
## modules trough make -C kernel LINUX=%s sync. As of today (07-13-2009)
## we need 3 git repos, "git_repo" (linux sources), "user_git_repo" and
## "kmod_repo" to build KVM userspace + kernel modules.
#"git_repo": 'git://git.kernel.org/pub/scm/linux/kernel/git/avi/kvm.git',
#"kernel_branch": 'kernel_branch_name',
#"kernel_lbranch": 'kernel_lbranch_name',
#"kernel_tag": 'kernel_tag_name',
#"user_git_repo": 'git://git.kernel.org/pub/scm/virt/kvm/qemu-kvm.git',
#"user_branch": 'user_branch_name',
#"user_lbranch": 'user_lbranch_name',
#"user_tag": 'user_tag_name',
#"kmod_repo": 'git://git.kernel.org/pub/scm/virt/kvm/kvm-kmod.git',
#"kmod_branch": 'kmod_branch_name',
#"kmod_lbranch": 'kmod_lbranch_name',
#"kmod_tag": 'kmod_tag_name',
}
# If you don't want to execute the build stage, just use 'noinstall' as the
# install type. If you run the tests from autotest-server, make sure that
# /tmp/kvm-autotest-root/qemu is a link to your existing executable. Note that
# if kvm_install is chose to run, it overwrites existing qemu and qemu-img
# links to point to the newly built executables.
if not params.get("mode") == "noinstall":
if not job.run_test("kvm", params=params, tag=params.get("shortname")):
print 'kvm_installation failed ... exiting'
sys.exit(1)
# ----------------------------------------------------------
# Get test set (dictionary list) from the configuration file
# ----------------------------------------------------------
from autotest.client.shared import cartesian_config
str = """
# This string will be parsed after tests.cfg. Make any desired changes to the
# test configuration here. For example:
#install, setup: timeout_multiplier = 3
#display = sdl
"""
parser = cartesian_config.Parser()
parser.parse_file(os.path.join(pwd, "tests.cfg"))
parser.parse_string(str)
tests = list(parser.get_dicts())
# -------------
# Run the tests
# -------------
from autotest.client.virt import scheduler
from autotest.client import utils
# total_cpus defaults to the number of CPUs reported by /proc/cpuinfo
total_cpus = utils.count_cpus()
# total_mem defaults to 3/4 of the total memory reported by 'free'
total_mem = int(commands.getoutput("free -m").splitlines()[1].split()[1]) * 3/4
# We probably won't need more workers than CPUs
num_workers = total_cpus
# Start the scheduler and workers
s = scheduler.scheduler(tests, num_workers, total_cpus, total_mem, pwd)
job.parallel([s.scheduler],
*[(s.worker, i, job.run_test) for i in range(num_workers)])
# create the html report in result dir
reporter = os.path.join(pwd, 'make_html_report.py')
html_file = os.path.join(job.resultdir,'results.html')
os.system('%s -r %s -f %s -R'%(reporter, job.resultdir, html_file))
AUTHOR = 'lkocman@redhat.com (Lubos Kocman)'
TIME = 'MEDIUM'
NAME = 'Spice test'
TEST_TYPE = 'client'
TEST_CLASS = 'Virtualization'
TEST_CATEGORY = 'Functional'
DOC = """
Executes the KVM test framework on a given host. This module is separated in
minor functions, that execute different tests for doing Quality Assurance on
KVM (both kernelspace and userspace) code.
For online docs, please refer to http://www.linux-kvm.org/page/KVM-Autotest
"""
import sys, os, logging
from autotest.client.shared import cartesian_config
from autotest.client.virt import utils_misc
# set English environment (command output might be localized, need to be safe)
os.environ['LANG'] = 'en_US.UTF-8'
str = """
# This string will be parsed after build.cfg. Make any desired changes to the
# build configuration here. For example (to install from koji/brew):
# installers = koji_qemu_kvm
"""
parser = cartesian_config.Parser()
kvm_test_dir = os.path.join(os.environ['AUTODIR'],'tests/kvm')
parser.parse_file(os.path.join(kvm_test_dir, "build.cfg"))
parser.parse_string(str)
if not utils_misc.run_tests(parser, job):
logging.error("KVM build step failed, exiting.")
sys.exit(1)
str = """
# This string will be parsed after tests-spice.cfg. Make any desired changes to the
# test configuration here. For example:
#display = sdl
#install, setup: timeout_multiplier = 3
"""
parser = cartesian_config.Parser()
parser.parse_file(os.path.join(kvm_test_dir, "tests-spice.cfg"))
if args:
# We get test parameters from command line
for arg in args:
try:
(key, value) = re.findall("^(\w+)=(.*)", arg)[0]
if key == "only":
str += "only %s\n" % value
elif key == "no":
str += "no %s\n" % value
else:
str += "%s = %s\n" % (key, value)
except IndexError:
pass
parser.parse_string(str)
utils_misc.run_tests(parser, job)
AUTHOR = """
mgoldish@redhat.com (Michael Goldish)
nsprei@redhat.com (Naphtali Sprei)
lmr@redhat.com (Lucas Meneghel Rodrigues)
"""
TIME = 'MEDIUM'
NAME = 'KVM Test (Unittests)'
TEST_TYPE = 'client'
TEST_CLASS = 'Virtualization'
TEST_CATEGORY = 'Unittest'
DOC = """
Runs the unittests available for a given KVM build.
"""
import sys, os, logging
from autotest.client.shared import cartesian_config
from autotest.client.virt import utils_misc
parser = cartesian_config.Parser()
kvm_test_dir = os.path.join(os.environ['AUTODIR'],'tests/kvm')
tests_cfg_path = os.path.join(kvm_test_dir, "unittests.cfg")
parser.parse_file(tests_cfg_path)
# Run the tests
utils_misc.run_tests(parser, job)
#!/usr/bin/python
"""
Program to help setup kvm test environment
@copyright: Red Hat 2010
"""
import os, sys
try:
import autotest.common as common
except ImportError:
import common
from autotest.client.virt import utils_misc
test_name = "kvm"
test_dir = os.path.dirname(sys.modules[__name__].__file__)
test_dir = os.path.abspath(test_dir)
base_dir = "/tmp/kvm_autotest_root"
default_userspace_paths = ["/usr/bin/qemu-kvm", "/usr/bin/qemu-img"]
check_modules = ["kvm", "kvm-%s" % utils_misc.get_cpu_vendor(verbose=False)]
online_docs_url = "https://github.com/autotest/autotest/wiki/KVMAutotest-GetStartedClient"
if __name__ == "__main__":
utils_misc.virt_test_assistant(test_name, test_dir, base_dir,
default_userspace_paths, check_modules,
online_docs_url)
from autotest.client.virt import virt_test
class kvm(virt_test.virt_test):
"""
Suite of KVM virtualization functional tests.
Contains tests for testing both KVM kernel code and userspace code.
@copyright: Red Hat 2008-2009
@author: Uri Lublin (uril@redhat.com)
@author: Dror Russo (drusso@redhat.com)
@author: Michael Goldish (mgoldish@redhat.com)
@author: David Huff (dhuff@redhat.com)
@author: Alexey Eromenko (aeromenk@redhat.com)
@author: Mike Burns (mburns@redhat.com)
@see: http://www.linux-kvm.org/page/KVM-Autotest/Client_Install
(Online doc - Getting started with KVM testing)
"""
pass
# Copy this file to multi-host-tests.cfg and edit it.
#
# This file contains the test set definitions for multi host tests.
# Include the shared test files.
include tests-shared.cfg
# Here are the test sets variants. The variant 'qemu_kvm_windows_quick' is
# fully commented, the following ones have comments only on noteworthy points
variants:
# Runs qemu-kvm, Windows Vista 64 bit guest OS, install, boot, shutdown
- @qemu_migrate_multi_host:
qemu_binary = /usr/bin/qemu-kvm
qemu_img_binary = /usr/bin/qemu-img
qemu_io_binary = /usr/bin/qemu-io
nic_mode = tap
only qcow2
only virtio_net
only virtio_blk
only smp2
only no_pci_assignable
only no_9p_export
only smallpages
only Fedora.15.64
only migrate_multi_host
# Runs qemu, f16 64 bit guest OS, install, boot, shutdown
- @qemu_cpuflags_multi_host:
qemu_binary = /usr/bin/qemu-kvm
qemu_img_binary = /usr/bin/qemu-img
qemu_io_binary = /usr/bin/qemu-io
nic_mode = tap
only qcow2
only virtio_net
only virtio_blk
only smp2
only no_pci_assignable
only no_9p_export
only smallpages
only Fedora.15.64
only cpuflags_multi_host
only qemu_migrate_multi_host
AUTHOR = "Jiri Zupka <jzupka@redhat.com>"
TIME = "SHORT"
NAME = ""
TEST_CATEGORY = "Functional"
TEST_CLASS = "Virtualization"
TEST_TYPE = "Server"
DOC = """
KVM tests (multi-host) server control
Runs tests across multiple hosts. It uses the config file
'multi-host-tests.cfg' in order to yield the appropriate
dicts for the multi host test.
"""
import sys, os, commands, glob, shutil, logging, random
from autotest.server import utils
from autotest.client.shared import cartesian_config, error
# Specify the directory of autotest before you start this test
AUTOTEST_DIR = job.clientdir
KVM_DIR = os.path.join(AUTOTEST_DIR, 'tests', 'kvm')
CONTROL_MAIN_PART = """
testname = "kvm"
bindir = os.path.join(job.testdir, testname)
job.install_pkg(testname, 'test', bindir)
kvm_test_dir = os.path.join(os.environ['AUTODIR'],'tests', 'kvm')
sys.path.append(kvm_test_dir)
"""
try:
import autotest.common
except ImportError:
import common
def generate_mac_address():
r = random.SystemRandom()
mac = "9a:%02x:%02x:%02x:%02x:%02x" % (r.randint(0x00, 0xff),
r.randint(0x00, 0xff),
r.randint(0x00, 0xff),
r.randint(0x00, 0xff),
r.randint(0x00, 0xff))
return mac
def run(machines):
logging.info("KVM test running on hosts %s\n", machines)
class Machines(object):
def __init__(self, host):
self.host = host
self.at = None
self.params = None
self.control = None
_hosts = {}
for machine in machines:
_hosts[machine] = Machines(hosts.create_host(machine))
ats = []
for host in _hosts.itervalues():
host.at = autotest_remote.Autotest(host.host)
cfg_file = os.path.join(KVM_DIR, "multi-host-tests.cfg")
if not os.path.exists(cfg_file):
raise error.JobError("Config file %s was not found", cfg_file)
# Get test set (dictionary list) from the configuration file
parser = cartesian_config.Parser()
parser.parse_file(cfg_file)
test_dicts = parser.get_dicts()
ips = []
for machine in machines:
host = _hosts[machine]
ips.append(host.host.ip)
for params in test_dicts:
params['hosts'] = ips
params['not_preprocess'] = "yes"
for vm in params.get("vms").split():
for nic in params.get('nics',"").split():
params['mac_%s_%s' % (nic, vm)] = generate_mac_address()
params['master_images_clone'] = "image1"
params['kill_vm'] = "yes"
s_host = _hosts[machines[0]]
s_host.params = params.copy()
s_host.params['clone_master'] = "yes"
s_host.params['hostid'] = machines[0]
for machine in machines[1:]:
host = _hosts[machine]
host.params = params.copy()
host.params['clone_master'] = "no"
host.params['hostid'] = machine
# Report the parameters we've received
logging.debug("Test parameters:")
keys = params.keys()
keys.sort()
for key in keys:
logging.debug(" %s = %s", key, params[key])
for machine in machines:
host = _hosts[machine]
host.control = CONTROL_MAIN_PART
for machine in machines:
host = _hosts[machine]
host.control += ("job.run_test('kvm', tag='%s', params=%s)" %
(host.params['shortname'], host.params))
logging.debug('Master control file:\n%s', _hosts[machines[0]].control)
for machine in machines[1:]:
host = _hosts[machine]
logging.debug('Slave control file:\n%s', host.control)
commands = []
for machine in machines:
host = _hosts[machine]
commands.append(subcommand(host.at.run,
[host.control, host.host.hostname]))
try:
parallel(commands)
except error.AutoservError as e:
logging.error(e)
if 'all' in args:
# Run test with all machines at once.
run(machines)
else:
# Grab the pairs (and failures)
(pairs, failures) = utils.form_ntuples_from_machines(machines, 2)
# Log the failures
for failure in failures:
job.record("FAIL", failure[0], "kvm", failure[1])
# Now run through each pair and run
job.parallel_simple(run, pairs, log=False)
# Copy this file to tests-shared.cfg and edit it.
#
# This file contains the base test set definitions, shared among single host
# and multi host jobs.
# Virtualization type (kvm or libvirt)
vm_type = kvm
# The hypervisor uri (default, qemu://hostname/system, etc.)
# where default or unset means derive from installed system
connect_uri = default
# Include the base config files.
include base.cfg
include subtests.cfg
include guest-os.cfg
include guest-hw.cfg
include cdkeys.cfg
include virtio-win.cfg
# Additional directory for find virt type tests. Relative to client/tests
other_tests_dirs = ""
# Modify/comment the following lines if you wish to modify the paths of the
# image files, ISO files or qemu binaries.
#
# As for the defaults:
# * qemu and qemu-img are expected to be found under /usr/bin/qemu-kvm and
# /usr/bin/qemu-img respectively.
# * All image files are expected under /tmp/kvm_autotest_root/images/
# * All install iso files are expected under /tmp/kvm_autotest_root/isos/
# * The parameters cdrom_unattended, floppy, kernel and initrd are generated
# by KVM autotest, so remember to put them under a writable location
# (for example, the cdrom share can be read only)
image_name(_.*)? ?<= /tmp/kvm_autotest_root/images/
cdrom(_.*)? ?<= /tmp/kvm_autotest_root/
floppy(_.*)? ?<= /tmp/kvm_autotest_root/
Linux..unattended_install:
kernel ?<= /tmp/kvm_autotest_root/
initrd ?<= /tmp/kvm_autotest_root/
# You may provide information about the DTM server for WHQL tests here:
#whql:
# server_address = 10.20.30.40
# server_shell_port = 10022
# server_file_transfer_port = 10023
# Note that the DTM server must run rss.exe (available under deps/),
# preferably with administrator privileges.
# Uncomment the following lines to enable abort-on-error mode:
#abort_on_error = yes
#kill_vm.* ?= no
#kill_unresponsive_vms.* ?= no
include tests-shared.cfg
variants:
- qxl:
variants:
- vnc:
display = vnc
vga = std
- spice:
vga = qxl
display = spice
variants:
- 1monitor:
qxl_dev_nr = 1
- 2monitor:
qxl_dev_nr = 2
- 3monitor:
qxl_dev_nr = 3
- 4monitor:
qxl_dev_nr = 4
variants:
- @no_password:
- password:
spice_password = 12456
variants:
- @no_ssl:
spice_ssl = no
spice_port = 3000
- bad_port:
spice_port = -1
- ssl:
spice_ssl = yes
spice_tls_port = 3200
spice_tls_ciphers = DEFAULT
spice_gen_x509 = yes
spice_x509_dir = yes
spice_x509_prefix = /tmp/spice_x509d
spice_x509_key_file = server-key.pem
spice_x509_cacert_file = ca-cert.pem
spice_x509_cert_file = server-cert.pem
spice_x509_key_password = testPassPhrase
spice_x509_cacert_subj = /C=CZ/L=BRNO/O=SPICE/CN=my CA
spice_x509_server_subj = /C=CZ/L=BRNO/O=SPICE/CN=my Server
spice_secure_channels = main, inputs
spice_client_host_subject = yes
variants:
- key_password:
spice_x509_secure = yes
- @no_key_password:
spice_x509_secure = no
variants:
- @default_ic:
spice_image_compression = auto_glz
- auto_glz_ic:
spice_image_compression = auto_glz
- auto_lz_ic:
spice_image_compression = auto_lz
- quic_ic:
spice_image_compression = quic
- glz_ic:
spice_image_compression = glz
- lz_ic:
spice_image_compression = lz
- no_ic:
spice_image_compression = off
- bad_ic:
spice_image_compression = bad_value
variants:
- @default_jpeg_wc:
spice_jpeg_wan_compression = auto
- auto_jpeg_wc:
spice_jpeg_wan_compression = auto
- off_jpeg_wc:
spice_jpeg_wan_compression = off
- on_jpeg_wc:
spice_jpeg_wan_compression = always
- bad_jpeg_wc:
spice_jpeg_wan_compression = bad_value
variants:
- @default_zlib_wc:
spice_zlib_glz_wan_compression = auto
- auto_zlib_wc:
spice_zlib_glz_wan_compression = auto
- off_zlib_wc:
spice_zlib_glz_wan_compression = off
- on_zlib_wc:
spice_zlib_glz_wan_compression = always
- bad_zlib_wc:
spice_zlib_glz_wan_compression = bad_value
variants:
- @default_sv:
spice_streaming_video = filter
- sv:
spice_streaming_video = all
- filter_sv:
spice_streaming_video = filter
- no_sv:
spice_streaming_video = off
- bad_sv:
spice_streaming_video = bad_value
variants:
-@default_pc:
spice_playback_compression = on
-pc:
spice_playback_compression = on
-no_pc:
spice_playback_compression = off
-bad_pc:
spice_playback_compression = bad_value
variants:
-ipv6:
spice_ipv6 = yes
spice_ipv4=no
-ipv4:
spice_ipv4=yes
spice_ipv6=no
-default_ipv:
spice_ipv4=no
spice_ipv6=no
variants:
- qemu_kvm_rhel63_install_client:
# Use this only when you need to create rhel63 image qcow
qemu_binary = /usr/libexec/qemu-kvm
qemu_img_binary = /usr/bin/qemu-img
qemu_io_binary = /usr/bin/qemu-io
image_name = /tmp/kvm_autotest_root/images/rhel63-64_client
only qcow2
only rtl8139
only ide
only smp2
only no_9p_export
only no_pci_assignable
only smallpages
only Linux.RHEL.6.3.x86_64
only spice.default_ipv.default_pc.default_sv.default_zlib_wc.default_jpeg_wc.default_ic.no_ssl.no_password.1monitor
only unattended_install.cdrom.extra_cdrom_ks
# Runs qemu-kvm Windows guest install
- @qemu_kvm_windows_install_guest:
# We want qemu-kvm for this run
qemu_binary = /usr/libexec/qemu-kvm
qemu_img_binary = /usr/bin/qemu-img
qemu_io_binary = /usr/bin/qemu-io
# Only qcow2 file format
only qcow2
# Only rtl8139 for nw card (default on qemu-kvm)
only rtl8139
# Only ide hard drives
only ide
# qemu-kvm will start only with -smp 2 (2 processors)
only smp2
# Disable 9p export by default
only no_9p_export
# No PCI assignable devices
only no_pci_assignable
# No large memory pages
only smallpages
# Operating system choice
only Win7.64.sp1
# Subtest choice. You can modify that line to add more subtests
only spice.default_ipv.default_pc.default_sv.default_zlib_wc.default_jpeg_wc.default_ic.no_ssl.no_password.1monitor
only unattended_install.cdrom
- qemu_kvm_rhel63_install_guest:
# Use this only when you need to create rhel63 image qcow
qemu_binary = /usr/libexec/qemu-kvm
qemu_img_binary = /usr/bin/qemu-img
qemu_io_binary = /usr/bin/qemu-io
only qcow2
only rtl8139
only ide
only smp2
only no_9p_export
only no_pci_assignable
only smallpages
only Linux.RHEL.6.3.x86_64
only spice.default_ipv.default_pc.default_sv.default_zlib_wc.default_jpeg_wc.default_ic.no_ssl.no_password.1monitor
only unattended_install.cdrom.extra_cdrom_ks
- @remote_viewer_rhel63ssl:
qemu_binary = /usr/libexec/qemu-kvm
qemu_img_binary = /usr/bin/qemu-img
qemu_io_binary = /usr/bin/qemu-io
rv_binary = /usr/bin/remote-viewer
only qcow2
only e1000
only ide
only up
only no_9p_export
only no_pci_assignable
only smallpages
only Linux.RHEL.6.3.x86_64
only spice.default_ipv.default_pc.default_sv.default_zlib_wc.default_jpeg_wc.default_ic.ssl.key_password.password.1monitor
only rv_connect.RHEL.6.3.x86_64, shutdown
- @remote_viewer_rhel63_quick:
qemu_binary = /usr/libexec/qemu-kvm
qemu_img_binary = /usr/bin/qemu-img
qemu_io_binary = /usr/bin/qemu-io
rv_binary = /usr/bin/remote-viewer
only qcow2
only e1000
only ide
only up
only no_9p_export
only no_pci_assignable
only smallpages
only Linux.RHEL.6.3.x86_64
only spice.default_ipv.default_pc.default_sv.default_zlib_wc.default_jpeg_wc.default_ic.no_ssl.no_password.1monitor
only rv_connect.RHEL.6.3.x86_64, shutdown
- @remote_viewer_win_guest_quick:
qemu_binary = /usr/libexec/qemu-kvm
qemu_img_binary = /usr/bin/qemu-img
qemu_io_binary = /usr/bin/qemu-io
rv_binary = /usr/bin/remote-viewer
only qcow2
only e1000
only ide
only up
only no_9p_export
only no_pci_assignable
only smallpages
only Win7.64.sp1
only spice.default_ipv.default_pc.default_sv.default_zlib_wc.default_jpeg_wc.default_ic.no_ssl.no_password.1monitor
#rv_connect_win is specifically a test meant for a windows guest and a rhel client, rv_connect cannot be used.
only rv_connect_win.RHEL.6.3.beta.x86_64, shutdown
- @spice_negative_rhel63_all:
qemu_binary = /usr/libexec/qemu-kvm
qemu_img_binary = /usr/bin/qemu-img
qemu_io_binary = /usr/bin/qemu-io
rv_binary = /usr/bin/remote-viewer
only qcow2
only e1000
only ide
only up
only no_9p_export
only no_pci_assignable
only smallpages
only Linux.RHEL.6.3.x86_64
only spice.default_ipv.default_pc.default_sv.default_zlib_wc.default_jpeg_wc.default_ic.bad_port.no_password.1monitor, spice.default_ipv.default_pc.default_sv.default_zlib_wc.default_jpeg_wc.bad_ic.no_ssl.no_password.1monitor, spice.default_ipv.default_pc.default_sv.default_zlib_wc.bad_jpeg_wc.default_ic.no_ssl.no_password.1monitor, spice.default_ipv.default_pc.default_sv.bad_zlib_wc.default_jpeg_wc.default_ic.no_ssl.no_password.1monitor, spice.default_ipv.default_pc.bad_sv.default_zlib_wc.default_jpeg_wc.default_ic.no_ssl.no_password.1monitor, spice.default_ipv.bad_pc.default_sv.default_zlib_wc.default_jpeg_wc.default_ic.no_ssl.no_password.1monitor
only negative_create
- @rv_disconnect_rhel63:
qemu_binary = /usr/libexec/qemu-kvm
qemu_img_binary = /usr/bin/qemu-img
qemu_io_binary = /usr/bin/qemu-io
rv_binary = /usr/bin/remote-viewer
only qcow2
only e1000
only ide
only up
only no_9p_export
only no_pci_assignable
only smallpages
only Linux.RHEL.6.3.x86_64
only spice.default_ipv.default_pc.default_sv.default_zlib_wc.default_jpeg_wc.default_ic.no_ssl.no_password.1monitor
only start, rv_connect.RHEL.6.3.x86_64, rv_disconnect.RHEL.6.3.x86_64, shutdown
variants:
- Create_VMs:
only qemu_kvm_rhel63_install_guest, qemu_kvm_rhel63_install_client
- Install_Win_Guest:
only qemu_kvm_windows_install_guest
- Negative_QEMU_Spice_Creation_Tests:
only spice_negative_rhel63_all
- Remote_Viewer_Test:
only remote_viewer_rhel63_quick
- Remote_Viewer_WinGuest_Test:
only remote_viewer_win_guest_quick
- Remote_Viewer_Disconnect_Test:
only rv_disconnect_rhel63
- Remote_Viewer_SSL_Test:
only remote_viewer_rhel63ssl
only Create_VMs, Install_Win_Guest, Negative_QEMU_Spice_Creation_Tests, Remote_Viewer_Test, Remote_Viewer_SSL_Test, Remote_Viewer_WinGuest_Test, Remote_Viewer_Disconnect_Test
# Choose your test list from the testsets defined
# the following is for remote viewer tests with the setup of a rhel client, and a windows guest
#only qemu_kvm_windows_install_guest, qemu_kvm_rhel63_install_client, remote_viewer_win_guest_quick
#only qemu_kvm_rhel63_install_guest, qemu_kvm_rhel63_install_client, remote_viewer_rhel63_quick, rv_disconnect_rhel63, spice_negative_rhel63_all
# Copy this file to tests.cfg and edit it.
#
# This file contains the test set definitions. Define your test sets here.
# Include the base config files.
include tests-shared.cfg
# Here you can override the image name for our custom linux and windows guests
#
CustomGuestLinux:
# Here you can override the default login credentials for your custom guest
username = root
password = 123456
image_name = custom_image_linux
image_size = 10G
# If you want to use a block device as the vm disk, uncomment the 2 lines
# below, pointing the image name for the device you want
#image_name = /dev/mapper/vg_linux_guest
#image_raw_device = yes
CustomGuestWindows:
image_name = custom_image_windows
image_size = 10G
# If you want to use a block device as the vm disk, uncomment the 2 lines
# below, pointing the image name for the device you want
#image_name = /dev/mapper/vg_windows_guest
#image_raw_device = yes
# Here are the test sets variants. The variant 'qemu_kvm_windows_quick' is
# fully commented, the following ones have comments only on noteworthy points
variants:
# Runs all variants defined. HUGE test set.
- @full:
# Runs qemu-kvm, Windows Vista 64 bit guest OS, install, boot, shutdown
- @qemu_kvm_windows_quick:
# We want qemu-kvm for this run
qemu_binary = /usr/bin/qemu-kvm
qemu_img_binary = /usr/bin/qemu-img
qemu_io_binary = /usr/bin/qemu-io
# Only qcow2 file format
only qcow2
# Only rtl8139 for nw card (default on qemu-kvm)
only rtl8139
# Only ide hard drives
only ide
# qemu-kvm will start only with -smp 2 (2 processors)
only smp2
# Disable 9p export by default
only no_9p_export
# No PCI assignable devices
only no_pci_assignable
# No large memory pages
only smallpages
# Operating system choice
only Win7.64.sp1
# Subtest choice. You can modify that line to add more subtests
only unattended_install.cdrom, boot, shutdown
# Runs qemu, f17 64 bit guest OS, install, boot, shutdown
- @qemu_f17_quick:
# We want qemu for this run
qemu_binary = /usr/bin/qemu
qemu_img_binary = /usr/bin/qemu-img
qemu_io_binary = /usr/bin/qemu-io
only qcow2
only virtio_net
only virtio_blk
# qemu using kvm doesn't support smp yet
only up
only no_9p_export
only no_pci_assignable
only smallpages
only Fedora.17.64
only unattended_install.cdrom.extra_cdrom_ks, boot, shutdown
# qemu needs -enable-kvm on the cmdline
extra_params += ' -enable-kvm'
# Runs qemu-kvm, f17 64 bit guest OS, install, boot, shutdown
- @qemu_kvm_f17_quick:
# We want qemu-kvm for this run
qemu_binary = /usr/bin/qemu-kvm
qemu_img_binary = /usr/bin/qemu-img
qemu_io_binary = /usr/bin/qemu-io
only qcow2
only virtio_net
only virtio_blk
only smp2
only no_9p_export
only no_pci_assignable
only smallpages
only Fedora.17.64
only unattended_install.cdrom.extra_cdrom_ks, boot, shutdown
# Runs qemu-kvm, f17 64 bit guest OS, install, starts qemu-kvm
# with 9P support and runs 9P CI tests
- @qemu_kvm_9p_export:
qemu_binary = /usr/bin/qemu-kvm
qemu_img_binary = /usr/bin/qemu-img
qemu_io_binary = /usr/bin/qemu-io
only raw
only virtio_net
only virtio_blk
only smp2
only no_pci_assignable
only smallpages
only 9p_export
only Fedora.17.64
only unattended_install.cdrom.extra_cdrom_ks, boot, 9p.9p_ci, shutdown
# Runs your own guest image (qcow2, can be adjusted), all migration tests
# (on a core2 duo laptop with HD and 4GB RAM, F15 host took 3 hours to run)
# Be warned, disk stress + migration can corrupt your image, so make sure
# you have proper backups
- @qemu_kvm_custom_migrate:
# We want qemu-kvm for this run
qemu_binary = /usr/bin/qemu-kvm
qemu_img_binary = /usr/bin/qemu-img
qemu_io_binary = /usr/bin/qemu-io
only qcow2
only virtio_net
only virtio_blk
only smp2
only no_9p_export
only no_pci_assignable
only smallpages
only CustomGuestLinux
only migrate
# Choose your test list from the testsets defined
only qemu_kvm_f17_quick
import os,logging
from autotest.client.shared import error
from autotest.client.virt import utils_test
def run_9p(test, params, env):
"""
Run an autotest test inside a guest.
@param test: kvm test object.
@param params: Dictionary with test parameters.
@param env: Dictionary with the test environment.
"""
vm = env.get_vm(params["main_vm"])
vm.verify_alive()
timeout = int(params.get("login_timeout", 360))
session = vm.wait_for_login(timeout=timeout)
mount_dir = params.get("9p_mount_dir")
if mount_dir is None:
logging.info("User Variable for mount dir is not set")
else:
session.cmd("mkdir -p %s" % mount_dir)
mount_option = " trans=virtio"
p9_proto_version = params.get("9p_proto_version", "9p2000.L")
mount_option += ",version=" + p9_proto_version
guest_cache = params.get("9p_guest_cache")
if guest_cache == "yes":
mount_option += ",cache=loose"
posix_acl = params.get("9p_posix_acl")
if posix_acl == "yes":
mount_option += ",posixacl"
logging.info("Mounting 9p mount point with options %s" % mount_option)
cmd = "mount -t 9p -o %s autotest_tag %s" % (mount_option, mount_dir)
mount_status = session.get_command_status(cmd)
if (mount_status != 0):
logging.error("mount failed")
raise error.TestFail('mount failed.')
# Collect test parameters
timeout = int(params.get("test_timeout", 14400))
control_path = os.path.join(test.virtdir, "autotest_control",
params.get("test_control_file"))
outputdir = test.outputdir
utils_test.run_autotest(vm, session, control_path,
timeout, outputdir, params)
import re, logging, random, time
from autotest.client.shared import error
from autotest.client.virt import kvm_monitor, utils_test
def run_balloon_check(test, params, env):
"""
Check Memory ballooning:
1) Boot a guest
2) Change the memory between MemFree to Assigned memory of memory
of guest using ballooning
3) check memory info
@param test: kvm test object
@param params: Dictionary with the test parameters
@param env: Dictionary with test environment.
"""
def check_ballooned_memory():
"""
Verify the actual memory reported by monitor command info balloon. If
the operation failed, increase the failure counter.
@return: Number of failures occurred during operation.
"""
fail = 0
try:
output = vm.monitor.info("balloon")
except kvm_monitor.MonitorError, e:
logging.error(e)
fail += 1
return 0, fail
return int(re.findall("\d+", str(output))[0]), fail
def balloon_memory(new_mem, offset):
"""
Baloon memory to new_mem and verifies on both qemu monitor and
guest OS if change worked.
@param new_mem: New desired memory.
@return: Number of failures occurred during operation.
"""
_, fail = check_ballooned_memory()
if params.get("monitor_type") == "qmp":
new_mem = new_mem * 1024 * 1024
logging.info("Changing VM memory to %s", new_mem)
# This should be replaced by proper monitor method call
vm.monitor.send_args_cmd("balloon value=%s" % new_mem)
time.sleep(20)
ballooned_mem, cfail = check_ballooned_memory()
fail += cfail
# Verify whether the VM machine reports the correct new memory
if ballooned_mem != new_mem:
logging.error("Memory ballooning failed while changing memory "
"to %s", new_mem)
fail += 1
# Verify whether the guest OS reports the correct new memory
current_mem_guest = vm.get_current_memory_size()
fail += cfail
current_mem_guest = current_mem_guest + offset
if params.get("monitor_type") == "qmp":
current_mem_guest = current_mem_guest * 1024 * 1024
# Current memory figures will allways be a little smaller than new
# memory. If they are higher, ballooning failed on guest perspective
if current_mem_guest > new_mem:
logging.error("Guest OS reports %s of RAM, but new ballooned RAM "
"is %s", current_mem_guest, new_mem)
fail += 1
return fail
fail = 0
vm = env.get_vm(params["main_vm"])
vm.verify_alive()
timeout = int(params.get("login_timeout", 360))
session = vm.wait_for_login(timeout=timeout)
# Upper limit that we can raise the memory
vm_assigned_mem = int(params.get("mem"))
# Check memory size
logging.info("Memory check")
boot_mem = vm.get_memory_size()
if boot_mem != vm_assigned_mem:
logging.error("Memory size mismatch:")
logging.error(" Assigned to VM: %s", vm_assigned_mem)
logging.error(" Reported by guest OS at boot: %s", boot_mem)
fail += 1
# Check if info balloon works or not
current_vm_mem, cfail = check_ballooned_memory()
if cfail:
fail += cfail
if current_vm_mem:
logging.info("Current VM memory according to ballooner: %s",
current_vm_mem)
# Get the offset of memory report by guest system
guest_memory = vm.get_current_memory_size()
offset = vm_assigned_mem - guest_memory
# Reduce memory to random size between Free memory
# to max memory size
s, o = session.cmd_status_output("cat /proc/meminfo")
if s != 0:
raise error.TestError("Can not get guest memory information")
vm_mem_free = int(re.findall('MemFree:\s+(\d+).*', o)[0]) / 1024
new_mem = int(random.uniform(vm_assigned_mem - vm_mem_free, vm_assigned_mem))
fail += balloon_memory(new_mem, offset)
# Run option test after evict memory
if params.has_key('sub_balloon_test_evict'):
balloon_test = params['sub_balloon_test_evict']
utils_test.run_virt_sub_test(test, params, env, sub_type=balloon_test)
if balloon_test == "shutdown" :
logging.info("Guest shutdown normally after balloon")
return
# Reset memory value to original memory assigned on qemu. This will ensure
# we won't trigger guest OOM killer while running multiple iterations
fail += balloon_memory(vm_assigned_mem, offset)
# Run sub test after enlarge memory
if params.has_key('sub_balloon_test_enlarge'):
balloon_test = params['sub_balloon_test_enlarge']
utils_test.run_virt_sub_test(test, params, env, sub_type=balloon_test)
if balloon_test == "shutdown" :
logging.info("Guest shutdown normally after balloon")
return
#Check memory after sub test running
logging.info("Check memory after tests")
boot_mem = vm.get_memory_size()
if boot_mem != vm_assigned_mem:
fail += 1
# Check if info balloon works or not
current_vm_mem, cfail = check_ballooned_memory()
if params.get("monitor_type") == "qmp":
current_vm_mem = current_vm_mem / 1024 / 1024
if current_vm_mem != vm_assigned_mem:
fail += 1
logging.error("Memory size after tests:")
logging.error(" Assigned to VM: %s", vm_assigned_mem)
logging.error(" Reported by guest OS: %s", boot_mem)
logging.error(" Reported by monitor: %s", current_vm_mem)
# Close stablished session
session.close()
# Check if any failures happen during the whole test
if fail != 0:
raise error.TestFail("Memory ballooning test failed")
import re, os, logging, time
from autotest.client.shared import utils, error
from autotest.client.virt import kvm_monitor
from autotest.client.virt import env_process
@error.context_aware
def run_block_stream(test, params, env):
"""
Test block streaming functionality.
1) Create a image_bak.img with the backing file image.img
2) Start the image_bak.img in qemu command line.
3) Request for block-stream ide0-hd0/virtio0
4) Wait till the block job finishs
5) Check for backing file in image_bak.img
6) TODO: Check for the size of the image_bak.img should not exceeds the image.img
7) TODO(extra): Block job completion can be check in QMP
"""
image_format = params.get("image_format")
image_name = params.get("image_name", "image")
drive_format = params.get("drive_format")
backing_file_name = "%s_bak" % (image_name)
qemu_img = params.get("qemu_img_binary")
block_stream_cmd = "block-stream"
def check_block_jobs_info():
"""
Verify the status of block-jobs reported by monitor command info block-jobs.
@return: parsed output of info block-jobs
"""
fail = 0
try:
output = vm.monitor.info("block-jobs")
except kvm_monitor.MonitorError, e:
logging.error(e)
fail += 1
return None, None
return (re.match("\w+", str(output)), re.findall("\d+", str(output)))
try:
# Remove the existing backing file
backing_file = "%s.%s" % (backing_file_name, image_format)
if os.path.isfile(backing_file):
os.remove(backing_file)
# Create the new backing file
create_cmd = "%s create -b %s.%s -f %s %s.%s" % (qemu_img,
image_name,
image_format,
image_format,
backing_file_name,
image_format)
error.context("Creating backing file")
utils.system(create_cmd)
info_cmd = "%s info %s.%s" % (qemu_img,image_name,image_format)
error.context("Image file can not be find")
results = utils.system_output(info_cmd)
logging.info("Infocmd output of basefile: %s" ,results)
# Set the qemu harddisk to the backing file
logging.info("Original image_name is: %s", params.get('image_name'))
params['image_name'] = backing_file_name
logging.info("Param image_name changed to: %s",
params.get('image_name'))
# Start virtual machine, using backing file as its harddisk
vm_name = params.get('main_vm')
env_process.preprocess_vm(test, params, env, vm_name)
vm = env.get_vm(vm_name)
vm.create()
timeout = int(params.get("login_timeout", 360))
session = vm.wait_for_login(timeout=timeout)
info_cmd = "%s info %s.%s" % (qemu_img, backing_file_name, image_format)
error.context("Image file can not be find")
results = utils.system_output(info_cmd)
logging.info("Infocmd output of backing file before block streaming: "
"%s", results)
if not re.search("backing file:", str(results)):
raise error.TestFail("Backing file is not available in the "
"backdrive image")
if vm.monitor.protocol == "human":
block_stream_cmd = "block_stream"
# Start streaming in qemu-cmd line
if 'ide' in drive_format:
error.context("Block streaming on qemu monitor (ide drive)")
vm.monitor.cmd("%s ide0-hd0" % block_stream_cmd)
elif 'virtio' in drive_format:
error.context("Block streaming on qemu monitor (virtio drive)")
vm.monitor.cmd("%s virtio0" % block_stream_cmd)
else:
raise error.TestError("The drive format is not supported")
while True:
blkjobout, blkjobstatus = check_block_jobs_info()
if 'Streaming' in blkjobout.group(0):
logging.info("[(Completed bytes): %s (Total bytes): %s "
"(Speed in bytes/s): %s]", blkjobstatus[-3],
blkjobstatus[-2], blkjobstatus[-1])
time.sleep(10)
continue
if 'No' in blkjobout.group(0):
logging.info("Block job completed")
break
info_cmd = "%s info %s.%s" % (qemu_img,backing_file_name,image_format)
error.context("Image file can not be find")
results = utils.system_output(info_cmd)
logging.info("Infocmd output of backing file after block streaming: %s",
results)
if re.search("backing file:", str(results)):
raise error.TestFail(" Backing file is still available in the "
"backdrive image")
# TODO
# The file size should be more/less equal to the "backing file" size
# Shutdown the virtual machine
vm.destroy()
# Relogin with the backup-harddrive
vm.create()
timeout = int(params.get("login_timeout", 360))
session = vm.wait_for_login(timeout=timeout)
logging.info("Checking whether the guest with backup-harddrive boot "
"and respond after block stream completion")
error.context("checking responsiveness of guest")
session.cmd(params.get("alive_test_cmd"))
# Finally shutdown the virtual machine
vm.destroy()
finally:
# Remove the backing file
if os.path.isfile(backing_file):
os.remove(backing_file)
"""
KVM cdrom test
@author: Amos Kong <akong@redhat.com>
@author: Lucas Meneghel Rodrigues <lmr@redhat.com>
@author: Lukas Doktor <ldoktor@redhat.com>
@copyright: 2011 Red Hat, Inc.
"""
import logging, re, time, os
from autotest.client.shared import error
from autotest.client import utils
from autotest.client.virt import utils_misc, aexpect, kvm_monitor
@error.context_aware
def run_cdrom(test, params, env):
"""
KVM cdrom test:
1) Boot up a VM with one iso.
2) Check if VM identifies correctly the iso file.
3) * If cdrom_test_autounlock is set, verifies that device is unlocked
<300s after boot
4) Eject cdrom using monitor and change with another iso several times.
5) * If cdrom_test_tray_status = yes, tests tray reporting.
6) Try to format cdrom and check the return string.
7) Mount cdrom device.
8) Copy file from cdrom and compare files using diff.
9) Umount and mount several times.
@param test: kvm test object
@param params: Dictionary with the test parameters
@param env: Dictionary with test environment.
@param cfg: workaround_eject_time - Some versions of qemu are unable to
eject CDROM directly after insert
@param cfg: cdrom_test_autounlock - Test whether guest OS unlocks cdrom
after boot (<300s after VM is booted)
@param cfg: cdrom_test_tray_status - Test tray reporting (eject and insert
CD couple of times in guest).
@warning: Check dmesg for block device failures
"""
def master_cdroms(params):
""" Creates 'new' cdrom with one file on it """
error.context("creating test cdrom")
os.chdir(test.tmpdir)
cdrom_cd1 = params.get("cdrom_cd1")
if not os.path.isabs(cdrom_cd1):
cdrom_cd1 = os.path.join(test.bindir, cdrom_cd1)
cdrom_dir = os.path.dirname(cdrom_cd1)
utils.run("dd if=/dev/urandom of=orig bs=10M count=1")
utils.run("dd if=/dev/urandom of=new bs=10M count=1")
utils.run("mkisofs -o %s/orig.iso orig" % cdrom_dir)
utils.run("mkisofs -o %s/new.iso new" % cdrom_dir)
return "%s/new.iso" % cdrom_dir
def cleanup_cdroms(cdrom_dir):
""" Removes created cdrom """
error.context("cleaning up temp cdrom images")
os.remove("%s/new.iso" % cdrom_dir)
def get_cdrom_file(device):
"""
@param device: qemu monitor device
@return: file associated with $device device
"""
blocks = vm.monitor.info("block")
cdfile = None
if isinstance(blocks, str):
cdfile = re.findall('%s: .*file=(\S*) ' % device, blocks)
if not cdfile:
return None
else:
cdfile = cdfile[0]
else:
for block in blocks:
if block['device'] == device:
try:
cdfile = block['inserted']['file']
except KeyError:
continue
return cdfile
def check_cdrom_tray(cdrom):
""" Checks whether the tray is opend """
blocks = vm.monitor.info("block")
if isinstance(blocks, str):
for block in blocks.splitlines():
if cdrom in block:
if "tray-open=1" in block:
return True
elif "tray-open=0" in block:
return False
else:
for block in blocks:
if block['device'] == cdrom and 'tray_open' in block.keys():
return block['tray_open']
return None
def eject_cdrom(device, monitor):
""" Ejects the cdrom using kvm-monitor """
if isinstance(monitor, kvm_monitor.HumanMonitor):
monitor.cmd("eject %s" % device)
elif isinstance(monitor, kvm_monitor.QMPMonitor):
monitor.cmd("eject", args={'device': device})
def change_cdrom(device, target, monitor):
""" Changes the medium using kvm-monitor """
if isinstance(monitor, kvm_monitor.HumanMonitor):
monitor.cmd("change %s %s" % (device, target))
elif isinstance(monitor, kvm_monitor.QMPMonitor):
monitor.cmd("change", args={'device': device, 'target': target})
cdrom_new = master_cdroms(params)
cdrom_dir = os.path.dirname(cdrom_new)
vm = env.get_vm(params["main_vm"])
vm.create()
# Some versions of qemu are unable to eject CDROM directly after insert
workaround_eject_time = float(params.get('workaround_eject_time', 0))
session = vm.wait_for_login(timeout=int(params.get("login_timeout", 360)))
cdrom_orig = params.get("cdrom_cd1")
if not os.path.isabs(cdrom_orig):
cdrom_orig = os.path.join(test.bindir, cdrom_orig)
cdrom = cdrom_orig
output = session.get_command_output("ls /dev/cdrom*")
cdrom_dev_list = re.findall("/dev/cdrom-\w+|/dev/cdrom\d*", output)
logging.debug("cdrom_dev_list: %s", cdrom_dev_list)
cdrom_dev = ""
test_cmd = "dd if=%s of=/dev/null bs=1 count=1"
for d in cdrom_dev_list:
try:
output = session.cmd(test_cmd % d)
cdrom_dev = d
break
except aexpect.ShellError:
logging.error(output)
if not cdrom_dev:
raise error.TestFail("Could not find a valid cdrom device")
error.context("Detecting the existence of a cdrom")
cdfile = cdrom
device = vm.get_block({'file': cdfile})
if not device:
device = vm.get_block({'backing_file': cdfile})
if not device:
raise error.TestFail("Could not find a valid cdrom device")
session.get_command_output("umount %s" % cdrom_dev)
if params.get('cdrom_test_autounlock') == 'yes':
error.context("Trying to unlock the cdrom")
if not utils_misc.wait_for(lambda: not vm.check_block_locked(device),
300):
raise error.TestFail("Device %s could not be unlocked" % device)
max_times = int(params.get("max_times", 100))
error.context("Eject the cdrom in monitor %s times" % max_times)
for i in range(1, max_times):
session.cmd('eject %s' % cdrom_dev)
eject_cdrom(device, vm.monitor)
time.sleep(2)
if get_cdrom_file(device) is not None:
raise error.TestFail("Device %s was not ejected (%s)" % (cdrom, i))
cdrom = cdrom_new
# On even attempts, try to change the cdrom
if i % 2 == 0:
cdrom = cdrom_orig
change_cdrom(device, cdrom, vm.monitor)
if get_cdrom_file(device) != cdrom:
raise error.TestFail("It wasn't possible to change cdrom %s (%s)"
% (cdrom, i))
time.sleep(workaround_eject_time)
error.context('Eject the cdrom in guest %s times' % max_times)
if params.get('cdrom_test_tray_status') != 'yes':
pass
elif check_cdrom_tray(device) is None:
logging.error("Tray reporting not supported by qemu!")
logging.error("cdrom_test_tray_status skipped...")
else:
for i in range(1, max_times):
session.cmd('eject %s' % cdrom_dev)
if not check_cdrom_tray(device):
raise error.TestFail("Monitor reports closed tray (%s)" % i)
session.cmd('dd if=%s of=/dev/null count=1' % cdrom_dev)
if check_cdrom_tray(device):
raise error.TestFail("Monitor reports opened tray (%s)" % i)
time.sleep(workaround_eject_time)
error.context("Check whether the cdrom is read-only")
try:
output = session.cmd("echo y | mkfs %s" % cdrom_dev)
raise error.TestFail("Attempt to format cdrom %s succeeded" %
cdrom_dev)
except aexpect.ShellError:
pass
error.context("Mounting the cdrom under /mnt")
session.cmd("mount %s %s" % (cdrom_dev, "/mnt"), timeout=30)
filename = "new"
error.context("File copying test")
session.cmd("rm -f /tmp/%s" % filename)
session.cmd("cp -f /mnt/%s /tmp/" % filename)
error.context("Compare file on disk and on cdrom")
f1_hash = session.cmd("md5sum /mnt/%s" % filename).split()[0].strip()
f2_hash = session.cmd("md5sum /tmp/%s" % filename).split()[0].strip()
if f1_hash != f2_hash:
raise error.TestFail("On disk and on cdrom files are different, "
"md5 mismatch")
error.context("Mount/Unmount cdrom for %s times" % max_times)
for i in range(1, max_times):
try:
session.cmd("umount %s" % cdrom_dev)
session.cmd("mount %s /mnt" % cdrom_dev)
except aexpect.ShellError:
logging.debug(session.cmd("cat /etc/mtab"))
raise
session.cmd("umount %s" % cdrom_dev)
error.context("Cleanup")
# Return the cdrom_orig
cdfile = get_cdrom_file(device)
if cdfile != cdrom_orig:
time.sleep(workaround_eject_time)
session.cmd('eject %s' % cdrom_dev)
eject_cdrom(device, vm.monitor)
if get_cdrom_file(device) is not None:
raise error.TestFail("Device %s was not ejected (%s)" % (cdrom, i))
change_cdrom(device, cdrom_orig, vm.monitor)
if get_cdrom_file(device) != cdrom_orig:
raise error.TestFail("It wasn't possible to change cdrom %s (%s)"
% (cdrom, i))
session.close()
cleanup_cdroms(cdrom_dir)
此差异已折叠。
import os, logging, re
from autotest.client.shared import error
from autotest.client.virt import utils_test
@error.context_aware
def run_cpu_hotplug(test, params, env):
"""
Runs CPU hotplug test:
1) Pick up a living guest
2) Send the monitor command cpu_set [cpu id] for each cpu we wish to have
3) Verify if guest has the additional CPUs showing up under
/sys/devices/system/cpu
4) Try to bring them online by writing 1 to the 'online' file inside that dir
5) Run the CPU Hotplug test suite shipped with autotest inside guest
@param test: KVM test object.
@param params: Dictionary with test parameters.
@param env: Dictionary with the test environment.
"""
vm = env.get_vm(params["main_vm"])
vm.verify_alive()
timeout = int(params.get("login_timeout", 360))
session = vm.wait_for_login(timeout=timeout)
n_cpus_add = int(params.get("n_cpus_add", 1))
current_cpus = int(params.get("smp", 1))
onoff_iterations = int(params.get("onoff_iterations", 20))
total_cpus = current_cpus + n_cpus_add
error.context("cleaning guest dmesg before addition")
session.cmd("dmesg -c")
error.context("Adding %d CPUs to guest" % n_cpus_add)
for i in range(total_cpus):
vm.monitor.cmd("cpu_set %s online" % i)
output = vm.monitor.cmd("info cpus")
logging.debug("Output of info cpus:\n%s", output)
cpu_regexp = re.compile("CPU #(\d+)")
total_cpus_monitor = len(cpu_regexp.findall(output))
if total_cpus_monitor != total_cpus:
raise error.TestFail("Monitor reports %s CPUs, when VM should have %s" %
(total_cpus_monitor, total_cpus))
dmesg_after = session.cmd("dmesg")
logging.debug("Guest dmesg output after CPU add:\n%s" % dmesg_after)
# Verify whether the new cpus are showing up on /sys
error.context("verifying if new CPUs are showing on guest's /sys dir")
n_cmd = 'find /sys/devices/system/cpu/cpu[0-99] -maxdepth 0 -type d | wc -l'
output = session.cmd(n_cmd)
logging.debug("List of cpus on /sys:\n%s" % output)
try:
cpus_after_addition = int(output)
except ValueError:
logging.error("Output of '%s': %s", n_cmd, output)
raise error.TestFail("Unable to get CPU count after CPU addition")
if cpus_after_addition != total_cpus:
raise error.TestFail("%s CPUs are showing up under "
"/sys/devices/system/cpu, was expecting %s" %
(cpus_after_addition, total_cpus))
error.context("locating online files for guest's new CPUs")
r_cmd = 'find /sys/devices/system/cpu/cpu[1-99]/online -maxdepth 0 -type f'
online_files = session.cmd(r_cmd)
logging.debug("CPU online files detected: %s", online_files)
online_files = online_files.split().sort()
if not online_files:
raise error.TestFail("Could not find CPUs that can be "
"enabled/disabled on guest")
for online_file in online_files:
cpu_regexp = re.compile("cpu(\d+)", re.IGNORECASE)
cpu_id = cpu_regexp.findall(online_file)[0]
error.context("changing online status for CPU %s" % cpu_id)
check_online_status = session.cmd("cat %s" % online_file)
try:
check_online_status = int(check_online_status)
except ValueError:
raise error.TestFail("Unable to get online status from CPU %s" %
cpu_id)
assert(check_online_status in [0, 1])
if check_online_status == 0:
error.context("Bringing CPU %s online" % cpu_id)
session.cmd("echo 1 > %s" % online_file)
# Now that all CPUs were onlined, let's execute the
# autotest CPU Hotplug test
control_path = os.path.join(test.virtdir, "autotest_control",
"cpu_hotplug.control")
timeout = int(params.get("cpu_hotplug_timeout"), 300)
error.context("running cpu_hotplug autotest after cpu addition")
utils_test.run_autotest(vm, session, control_path, timeout,
test.outputdir, params)
# Last, but not least, let's offline/online the CPUs in the guest
# several times
irq = 15
irq_mask = "f0"
for i in xrange(onoff_iterations):
session.cmd("echo %s > /proc/irq/%s/smp_affinity" % (irq_mask, irq))
for online_file in online_files:
session.cmd("echo 0 > %s" % online_file)
for online_file in online_files:
session.cmd("echo 1 > %s" % online_file)
此差异已折叠。
import logging, time, re, os
from autotest.client.shared import error
from autotest.client import utils
from autotest.client.virt import virt_vm, utils_misc, kvm_storage
class EnospcConfig(object):
"""
Performs setup for the test enospc. This is a borg class, similar to a
singleton. The idea is to keep state in memory for when we call cleanup()
on postprocessing.
"""
__shared_state = {}
def __init__(self, test, params):
self.__dict__ = self.__shared_state
root_dir = test.bindir
self.tmpdir = test.tmpdir
self.qemu_img_binary = params.get('qemu_img_binary')
if not os.path.isfile(self.qemu_img_binary):
self.qemu_img_binary = os.path.join(root_dir,
self.qemu_img_binary)
self.raw_file_path = os.path.join(self.tmpdir, 'enospc.raw')
# Here we're trying to choose fairly explanatory names so it's less
# likely that we run in conflict with other devices in the system
self.vgtest_name = params.get("vgtest_name")
self.lvtest_name = params.get("lvtest_name")
self.lvtest_device = "/dev/%s/%s" % (self.vgtest_name, self.lvtest_name)
image_dir = os.path.dirname(params.get("image_name"))
self.qcow_file_path = os.path.join(image_dir, 'enospc.qcow2')
try:
getattr(self, 'loopback')
except AttributeError:
self.loopback = ''
@error.context_aware
def setup(self):
logging.debug("Starting enospc setup")
error.context("performing enospc setup")
utils_misc.display_attributes(self)
# Double check if there aren't any leftovers
self.cleanup()
try:
utils.run("%s create -f raw %s 10G" %
(self.qemu_img_binary, self.raw_file_path))
# Associate a loopback device with the raw file.
# Subject to race conditions, that's why try here to associate
# it with the raw file as quickly as possible
l_result = utils.run("losetup -f")
utils.run("losetup -f %s" % self.raw_file_path)
self.loopback = l_result.stdout.strip()
# Add the loopback device configured to the list of pvs
# recognized by LVM
utils.run("pvcreate %s" % self.loopback)
utils.run("vgcreate %s %s" % (self.vgtest_name, self.loopback))
# Create an lv inside the vg with starting size of 200M
utils.run("lvcreate -L 200M -n %s %s" %
(self.lvtest_name, self.vgtest_name))
# Create a 10GB qcow2 image in the logical volume
utils.run("%s create -f qcow2 %s 10G" %
(self.qemu_img_binary, self.lvtest_device))
# Let's symlink the logical volume with the image name that autotest
# expects this device to have
os.symlink(self.lvtest_device, self.qcow_file_path)
except Exception:
self.cleanup()
raise
@error.context_aware
def cleanup(self):
error.context("performing enospc cleanup")
if os.path.isfile(self.lvtest_device):
utils.run("fuser -k %s" % self.lvtest_device)
time.sleep(2)
l_result = utils.run("lvdisplay")
# Let's remove all volumes inside the volume group created
if self.lvtest_name in l_result.stdout:
utils.run("lvremove -f %s" % self.lvtest_device)
# Now, removing the volume group itself
v_result = utils.run("vgdisplay")
if self.vgtest_name in v_result.stdout:
utils.run("vgremove -f %s" % self.vgtest_name)
# Now, if we can, let's remove the physical volume from lvm list
if self.loopback:
p_result = utils.run("pvdisplay")
if self.loopback in p_result.stdout:
utils.run("pvremove -f %s" % self.loopback)
l_result = utils.run('losetup -a')
if self.loopback and (self.loopback in l_result.stdout):
try:
utils.run("losetup -d %s" % self.loopback)
except error.CmdError:
logging.error("Failed to liberate loopback %s", self.loopback)
if os.path.islink(self.qcow_file_path):
os.remove(self.qcow_file_path)
if os.path.isfile(self.raw_file_path):
os.remove(self.raw_file_path)
def run_enospc(test, params, env):
"""
ENOSPC test
1) Create a virtual disk on lvm
2) Boot up guest with two disks
3) Continually write data to second disk
4) Check images and extend second disk when no space
5) Continue paused guest
6) Repeat step 3~5 several times
@param test: KVM test object.
@param params: Dictionary with the test parameters.
@param env: Dictionary with test environment.
"""
enospc_config = EnospcConfig(test, params)
enospc_config.setup()
vm = env.get_vm(params["main_vm"])
vm.create()
login_timeout = int(params.get("login_timeout", 360))
session_serial = vm.wait_for_serial_login(timeout=login_timeout)
vgtest_name = params.get("vgtest_name")
lvtest_name = params.get("lvtest_name")
logical_volume = "/dev/%s/%s" % (vgtest_name, lvtest_name)
drive_format = params.get("drive_format")
if drive_format == "virtio":
devname = "/dev/vdb"
elif drive_format == "ide":
output = session_serial.cmd_output("dir /dev")
devname = "/dev/" + re.findall("([sh]db)\s", output)[0]
elif drive_format == "scsi":
devname = "/dev/sdb"
cmd = params.get("background_cmd")
cmd %= devname
logging.info("Sending background cmd '%s'", cmd)
session_serial.sendline(cmd)
iterations = int(params.get("repeat_time", 40))
i = 0
pause_n = 0
while i < iterations:
if vm.monitor.verify_status("paused"):
pause_n += 1
logging.info("Checking all images in use by the VM")
for image_name in vm.params.objects("images"):
image_params = vm.params.object_params(image_name)
try:
image = kvm_storage.QemuImg(image_params, test.bindir,
image_name)
image.check_image(image_params, test.bindir)
except (virt_vm.VMError, error.TestWarn), e:
logging.error(e)
logging.info("Guest paused, extending Logical Volume size")
try:
utils.run("lvextend -L +200M %s" % logical_volume)
except error.CmdError, e:
logging.debug(e.result_obj.stdout)
vm.resume()
elif not vm.monitor.verify_status("running"):
status = str(vm.monitor.info("status"))
raise error.TestError("Unexpected guest status: %s" % status)
time.sleep(10)
i += 1
if pause_n == 0:
raise error.TestFail("Guest didn't pause during loop")
else:
logging.info("Guest paused %s times from %s iterations",
pause_n, iterations)
logging.info("Final %s", str(vm.monitor.info("status")))
enospc_config.cleanup()
import logging, time, os
from autotest.client.shared import error
from autotest.client import utils
@error.context_aware
def run_floppy(test, params, env):
"""
Test virtual floppy of guest:
1) Create a floppy disk image on host
2) Start the guest with this floppy image.
3) Make a file system on guest virtual floppy.
4) Calculate md5sum value of a file and copy it into floppy.
5) Verify whether the md5sum does match.
@param test: KVM test object.
@param params: Dictionary with the test parameters.
@param env: Dictionary with test environment.
"""
def master_floppy(params):
error.context("creating test floppy")
floppy = os.path.abspath(params.get("floppy_name"))
utils.run("dd if=/dev/zero of=%s bs=512 count=2880" % floppy)
master_floppy(params)
vm = env.get_vm(params["main_vm"])
vm.create()
timeout = int(params.get("login_timeout", 360))
session = vm.wait_for_login(timeout=timeout)
dest_dir = params.get("mount_dir")
# If mount_dir specified, treat guest as a Linux OS
# Some Linux distribution does not load floppy at boot and Windows
# needs time to load and init floppy driver
if dest_dir:
session.cmd("modprobe floppy")
else:
time.sleep(20)
error.context("Formating floppy disk before using it")
format_cmd = params.get("format_floppy_cmd")
session.cmd(format_cmd, timeout=120)
logging.info("Floppy disk formatted successfully")
source_file = params.get("source_file")
dest_file = params.get("dest_file")
if dest_dir:
error.context("Mounting floppy")
session.cmd("mount /dev/fd0 %s" % dest_dir)
error.context("Testing floppy")
session.cmd(params.get("test_floppy_cmd"))
try:
error.context("Copying file to the floppy")
session.cmd("%s %s %s" % (params.get("copy_cmd"), source_file,
dest_file))
logging.info("Succeed to copy file '%s' into floppy disk" % source_file)
error.context("Checking if the file is unchanged after copy")
session.cmd("%s %s %s" % (params.get("diff_file_cmd"), source_file,
dest_file))
finally:
clean_cmd = "%s %s" % (params.get("clean_cmd"), dest_file)
session.cmd(clean_cmd)
if dest_dir:
session.cmd("umount %s" % dest_dir)
session.close()
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
from autotest.client.virt import utils_test
def run_migration_multi_host(test, params, env):
"""
KVM multi-host migration test:
Migration execution progress is described in documentation
for migrate method in class MultihostMigration.
@param test: kvm test object.
@param params: Dictionary with test parameters.
@param env: Dictionary with the test environment.
"""
class TestMultihostMigration(utils_test.MultihostMigration):
def __init__(self, test, params, env):
super(TestMultihostMigration, self).__init__(test, params, env)
def migration_scenario(self):
srchost = self.params.get("hosts")[0]
dsthost = self.params.get("hosts")[1]
vms = params.get("vms").split()
self.migrate_wait(vms, srchost, dsthost)
mig = TestMultihostMigration(test, params, env)
mig.run()
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
import logging, time
def run_system_reset_bootable(test, params, env):
"""
KVM reset test:
1) Boot guest.
2) Reset system by monitor command for several times.
3) Log into the guest to verify it could normally boot.
@param test: kvm test object
@param params: Dictionary with the test parameters
@param env: Dictionary with test environment.
"""
vm = env.get_vm(params["main_vm"])
vm.verify_alive()
timeout = float(params.get("login_timeout", 240))
reset_times = int(params.get("reset_times",20))
interval = int(params.get("reset_interval",10))
wait_time = int(params.get("wait_time_for_reset",60))
logging.info("Wait for %d seconds before reset" % wait_time)
time.sleep(wait_time)
for _ in range(reset_times):
logging.info("Reset the system by monitor cmd")
vm.monitor.cmd("system_reset")
time.sleep(interval)
logging.info("Try to login guest after reset")
vm.wait_for_login(timeout=timeout)
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册