From c66355e228e2a12bcd8a333a22e01f7a05e8677b Mon Sep 17 00:00:00 2001 From: Lucas Meneghel Rodrigues Date: Mon, 27 Aug 2012 20:44:31 -0300 Subject: [PATCH] Moving kvm libvirt v2v to virt Signed-off-by: Lucas Meneghel Rodrigues --- kvm/README | 20 + kvm/__init__.py | 0 kvm/build.cfg.sample | 87 + kvm/common.py | 13 + kvm/control | 69 + kvm/control.parallel | 195 ++ kvm/control.spice | 62 + kvm/control.unittests | 26 + kvm/get_started.py | 25 + kvm/kvm.py | 20 + kvm/multi-host-tests.cfg.sample | 43 + kvm/multi_host.srv | 149 ++ kvm/tests-shared.cfg.sample | 52 + kvm/tests-spice.cfg.sample | 294 +++ kvm/tests.cfg.sample | 131 ++ kvm/tests/9p.py | 55 + kvm/tests/__init__.py | 0 kvm/tests/balloon_check.py | 149 ++ kvm/tests/block_stream.py | 141 ++ kvm/tests/cdrom.py | 244 ++ kvm/tests/cgroup.py | 2009 +++++++++++++++++ kvm/tests/cpu_hotplug.py | 111 + kvm/tests/cpuflags.py | 801 +++++++ kvm/tests/enospc.py | 172 ++ kvm/tests/floppy.py | 71 + kvm/tests/getfd.py | 71 + kvm/tests/hdparm.py | 89 + kvm/tests/kernel_install.py | 200 ++ kvm/tests/ksm_overcommit.py | 615 +++++ kvm/tests/migration.py | 117 + kvm/tests/migration_multi_host.py | 27 + kvm/tests/migration_multi_host_fd.py | 124 + ...migration_multi_host_with_file_transfer.py | 243 ++ ...ation_multi_host_with_speed_measurement.py | 197 ++ kvm/tests/migration_with_file_transfer.py | 85 + kvm/tests/migration_with_reboot.py | 43 + kvm/tests/migration_with_speed_measurement.py | 129 ++ kvm/tests/multi_disk.py | 292 +++ kvm/tests/nic_bonding.py | 77 + kvm/tests/nic_hotplug.py | 105 + kvm/tests/nmi_watchdog.py | 60 + kvm/tests/pci_hotplug.py | 203 ++ kvm/tests/perf_kvm.py | 38 + kvm/tests/performance.py | 204 ++ kvm/tests/physical_resources_check.py | 258 +++ kvm/tests/qemu_guest_agent.py | 36 + kvm/tests/qemu_img.py | 439 ++++ kvm/tests/qemu_io_blkdebug.py | 89 + kvm/tests/qemu_iotests.py | 48 + kvm/tests/qmp_basic.py | 407 ++++ kvm/tests/qmp_basic_rhel6.py | 389 ++++ kvm/tests/seabios.py | 59 + kvm/tests/set_link.py | 53 + kvm/tests/smbios_table.py | 67 + kvm/tests/stepmaker.py | 355 +++ kvm/tests/steps.py | 247 ++ kvm/tests/stop_continue.py | 43 + kvm/tests/system_reset_bootable.py | 31 + kvm/tests/time_manage.py | 127 ++ kvm/tests/timedrift.py | 181 ++ kvm/tests/timedrift_with_migration.py | 96 + kvm/tests/timedrift_with_reboot.py | 91 + kvm/tests/timedrift_with_stop.py | 103 + kvm/tests/unittest.py | 129 ++ kvm/tests/unittest_kvmctl.py | 30 + kvm/tests/usb.py | 291 +++ kvm/tests/virtio_console.py | 1313 +++++++++++ kvm/tests/vmstop.py | 85 + kvm/unittests.cfg.sample | 83 + 69 files changed, 12908 insertions(+) create mode 100644 kvm/README create mode 100644 kvm/__init__.py create mode 100644 kvm/build.cfg.sample create mode 100644 kvm/common.py create mode 100644 kvm/control create mode 100644 kvm/control.parallel create mode 100644 kvm/control.spice create mode 100644 kvm/control.unittests create mode 100755 kvm/get_started.py create mode 100644 kvm/kvm.py create mode 100644 kvm/multi-host-tests.cfg.sample create mode 100644 kvm/multi_host.srv create mode 100644 kvm/tests-shared.cfg.sample create mode 100644 kvm/tests-spice.cfg.sample create mode 100644 kvm/tests.cfg.sample create mode 100644 kvm/tests/9p.py create mode 100644 kvm/tests/__init__.py create mode 100644 kvm/tests/balloon_check.py create mode 100644 kvm/tests/block_stream.py create mode 100644 kvm/tests/cdrom.py create mode 100644 kvm/tests/cgroup.py create mode 100644 kvm/tests/cpu_hotplug.py create mode 100644 kvm/tests/cpuflags.py create mode 100644 kvm/tests/enospc.py create mode 100644 kvm/tests/floppy.py create mode 100644 kvm/tests/getfd.py create mode 100644 kvm/tests/hdparm.py create mode 100644 kvm/tests/kernel_install.py create mode 100644 kvm/tests/ksm_overcommit.py create mode 100644 kvm/tests/migration.py create mode 100644 kvm/tests/migration_multi_host.py create mode 100644 kvm/tests/migration_multi_host_fd.py create mode 100644 kvm/tests/migration_multi_host_with_file_transfer.py create mode 100644 kvm/tests/migration_multi_host_with_speed_measurement.py create mode 100644 kvm/tests/migration_with_file_transfer.py create mode 100644 kvm/tests/migration_with_reboot.py create mode 100644 kvm/tests/migration_with_speed_measurement.py create mode 100644 kvm/tests/multi_disk.py create mode 100644 kvm/tests/nic_bonding.py create mode 100644 kvm/tests/nic_hotplug.py create mode 100644 kvm/tests/nmi_watchdog.py create mode 100644 kvm/tests/pci_hotplug.py create mode 100644 kvm/tests/perf_kvm.py create mode 100644 kvm/tests/performance.py create mode 100644 kvm/tests/physical_resources_check.py create mode 100644 kvm/tests/qemu_guest_agent.py create mode 100644 kvm/tests/qemu_img.py create mode 100644 kvm/tests/qemu_io_blkdebug.py create mode 100644 kvm/tests/qemu_iotests.py create mode 100644 kvm/tests/qmp_basic.py create mode 100644 kvm/tests/qmp_basic_rhel6.py create mode 100644 kvm/tests/seabios.py create mode 100644 kvm/tests/set_link.py create mode 100644 kvm/tests/smbios_table.py create mode 100755 kvm/tests/stepmaker.py create mode 100644 kvm/tests/steps.py create mode 100644 kvm/tests/stop_continue.py create mode 100644 kvm/tests/system_reset_bootable.py create mode 100644 kvm/tests/time_manage.py create mode 100644 kvm/tests/timedrift.py create mode 100644 kvm/tests/timedrift_with_migration.py create mode 100644 kvm/tests/timedrift_with_reboot.py create mode 100644 kvm/tests/timedrift_with_stop.py create mode 100644 kvm/tests/unittest.py create mode 100644 kvm/tests/unittest_kvmctl.py create mode 100644 kvm/tests/usb.py create mode 100644 kvm/tests/virtio_console.py create mode 100644 kvm/tests/vmstop.py create mode 100644 kvm/unittests.cfg.sample diff --git a/kvm/README b/kvm/README new file mode 100644 index 00000000..cd666298 --- /dev/null +++ b/kvm/README @@ -0,0 +1,20 @@ +For the impatient: + +Execute the get_started.py script located on this directory, +that will guide you through setting up the default kvm test +scenario: + + * Guest install with Fedora 12 + * Boot, reboot and shutdown test + +The script will help you to create all the directories, and +even get the OS iso in case you don't have it yet. + +For the not so impatient: + +You are *strongly* advised to read the online docs: + +https://github.com/autotest/autotest/wiki/KVMAutotest + +So you can have a better idea of how the test is organized +and how it works. diff --git a/kvm/__init__.py b/kvm/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/kvm/build.cfg.sample b/kvm/build.cfg.sample new file mode 100644 index 00000000..7202b00a --- /dev/null +++ b/kvm/build.cfg.sample @@ -0,0 +1,87 @@ +# Copy this file to build.cfg and edit it. + +vm_type = kvm + +variants: + - build: + type = build + # Load modules built/installed by the build test? + load_modules = no + # Save the results of this build on test.resultsdir? + save_results = no + # Preserve the source code directory between tests? + preserve_srcdir = yes + + ###################################################################### + # INSTALLERS DEFINITION SECTION + # Many different components can be defined. The ones that will + # actually be run have to be defined in the 'installers' + ###################################################################### + # QEMU installation from a local tarball + # local_tar_qemu_path = /tmp/qemu-0.15.1.tar.gz + + # QEMU installation from a local source directory + # local_src_qemu_path = /tmp/qemu-0.15.1 + + # Guest Kernel installation from a GIT repo + git_repo_guest_kernel_build_helper = linux_kernel + git_repo_guest_kernel_uri = git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git + git_repo_guest_kernel_branch = master + #git_repo_guest_kernel_patches = ['http://foo/bar.patch', 'http://foo/baz.patch'] + git_repo_guest_kernel_config = http://foo/bar/kernel-config + git_repo_guest_kernel_build_target = bzImage + git_repo_guest_kernel_build_image = arch/x86/boot/bzImage + # Should be same as the kernel variable defined in guest-os.cfg. Which is + # used to boot the guest. + git_repo_guest_kernel_kernel_path = /tmp/kvm_autotest_root/images/bzImage + + # QEMU installation from a GIT repo + git_repo_qemu_uri = git://git.qemu.org/qemu.git + git_repo_qemu_configure_options = --target-list=x86_64-softmmu --enable-spice + # if you have a git repo that is closer to you, you may + # use it to fetch object first from it, and then later from "upstream" + # git_repo_qemu_base_uri = /home/user/code/qemu + + # QEMU (KVM) installation from a GIT repo + # git_repo_qemu_kvm_uri = git://git.kernel.org/pub/scm/virt/kvm/qemu-kvm.git + # git_repo_qemu_kvm_configure_options = --enable-spice + + # SPICE installation from a GIT repo + git_repo_spice_uri = git://anongit.freedesktop.org/spice/spice + + # spice-protocol installation from a GIT repo + git_repo_spice_protocol_uri = git://anongit.freedesktop.org/spice/spice-protocol + + # QEMU (KVM) installation from a YUM repo + # yum_qemu_kvm_pkgs = ['qemu-kvm', 'qemu-kvm-tools', 'qemu-system-x86', 'qemu-common', 'qemu-img'] + + # QEMU (KVM) installation from koji/brew + # koji_qemu_kvm_tag = dist-f15 + # koji_qemu_kvm_pkgs = :qemu:qemu-common,qemu-img,qemu-kvm,qemu-system-x86,qemu-kvm-tools seabios vgabios :gpxe:gpxe-roms-qemu :spice:spice-server + + # Koji/brew scratch builds notes: + # + # Packages from scratch builds have a different syntax: + # + # user:task_id[:pkg1,pkg2] + # + # If you include a subset of packages and want to have debuginfo packages + # you must *manually* add it to the list, as there's no way to know for + # sure the main package name for scratch builds. If you set only the + # user name and task id, all packages, including -debuginfo will be + # installed. + # + # koji_qemu_kvm_scratch_pkgs = jdoe:1000:qemu-kvm,qemu-system-x86,qemu-kvm-tools,qemu-img,qemu-kvm-debuginfo + + ###################################################################### + # INSTALLERS SELECTION + # Choose here what components you want to install + ###################################################################### + installers = git_repo_spice_protocol git_repo_spice git_repo_qemu + + # Choose wether you want to include debug information/symbols + install_debug_info = yes + + +# Comment out the 'no build' line to enable the build test +no build diff --git a/kvm/common.py b/kvm/common.py new file mode 100644 index 00000000..025386c4 --- /dev/null +++ b/kvm/common.py @@ -0,0 +1,13 @@ +import os, sys +try: + import autotest.client.setup_modules as setup_modules + client_dir = os.path.dirname(setup_modules.__file__) +except ImportError: + dirname = os.path.dirname(sys.modules[__name__].__file__) + client_dir = os.path.abspath(os.path.join(dirname, "..", "..")) + sys.path.insert(0, client_dir) + import setup_modules + sys.path.pop(0) + +setup_modules.setup(base_path=client_dir, + root_module_name="autotest.client") diff --git a/kvm/control b/kvm/control new file mode 100644 index 00000000..ee8d57bd --- /dev/null +++ b/kvm/control @@ -0,0 +1,69 @@ +AUTHOR = """ +uril@redhat.com (Uri Lublin) +drusso@redhat.com (Dror Russo) +mgoldish@redhat.com (Michael Goldish) +dhuff@redhat.com (David Huff) +aeromenk@redhat.com (Alexey Eromenko) +mburns@redhat.com (Mike Burns) +""" +TIME = 'MEDIUM' +NAME = 'KVM Test' +TEST_TYPE = 'client' +TEST_CLASS = 'Virtualization' +TEST_CATEGORY = 'Functional' + +DOC = """ +Executes the KVM test framework on a given host. This module is separated in +minor functions, that execute different tests for doing Quality Assurance on +KVM (both kernelspace and userspace) code. + +For online docs, please refer to http://www.linux-kvm.org/page/KVM-Autotest +""" + +import sys, os, logging +from autotest.client.shared import cartesian_config +from autotest.client.virt import utils_misc + +# set English environment (command output might be localized, need to be safe) +os.environ['LANG'] = 'en_US.UTF-8' + +str = """ +# This string will be parsed after build.cfg. Make any desired changes to the +# build configuration here. For example (to install from koji/brew): +# installers = koji_qemu_kvm +""" + +parser = cartesian_config.Parser() +kvm_test_dir = os.path.join(os.environ['AUTODIR'],'tests/kvm') +parser.parse_file(os.path.join(kvm_test_dir, "build.cfg")) +parser.parse_string(str) +if not utils_misc.run_tests(parser, job): + logging.error("KVM build step failed, exiting.") + sys.exit(1) + +str = """ +# This string will be parsed after tests.cfg. Make any desired changes to the +# test configuration here. For example: +#display = sdl +#install, setup: timeout_multiplier = 3 +""" + +parser = cartesian_config.Parser() +parser.parse_file(os.path.join(kvm_test_dir, "tests.cfg")) + +if args: + # We get test parameters from command line + for arg in args: + try: + (key, value) = re.findall("^(\w+)=(.*)", arg)[0] + if key == "only": + str += "only %s\n" % value + elif key == "no": + str += "no %s\n" % value + else: + str += "%s = %s\n" % (key, value) + except IndexError: + pass +parser.parse_string(str) + +utils_misc.run_tests(parser, job) diff --git a/kvm/control.parallel b/kvm/control.parallel new file mode 100644 index 00000000..a33365b7 --- /dev/null +++ b/kvm/control.parallel @@ -0,0 +1,195 @@ +AUTHOR = """ +uril@redhat.com (Uri Lublin) +drusso@redhat.com (Dror Russo) +mgoldish@redhat.com (Michael Goldish) +dhuff@redhat.com (David Huff) +aeromenk@redhat.com (Alexey Eromenko) +mburns@redhat.com (Mike Burns) +""" +TIME = 'SHORT' +NAME = 'KVM Test (Parallel)' +TEST_TYPE = 'client' +TEST_CLASS = 'Virtualization' +TEST_CATEGORY = 'Functional' + +DOC = """ +Executes the KVM test framework on a given host (parallel version). +""" + + +import sys, os, commands, re + +#----------------------------------------------------------------------------- +# set English environment (command output might be localized, need to be safe) +#----------------------------------------------------------------------------- +os.environ['LANG'] = 'en_US.UTF-8' + +#--------------------------------------------------------- +# Enable modules import from current directory (tests/kvm) +#--------------------------------------------------------- +pwd = os.path.join(os.environ['AUTODIR'],'tests/kvm') +sys.path.append(pwd) + +# ------------------------ +# create required symlinks +# ------------------------ +# When dispatching tests from autotest-server the links we need do not exist on +# the host (the client). The following lines create those symlinks. Change +# 'rootdir' here and/or mount appropriate directories in it. +# +# When dispatching tests on local host (client mode) one can either setup kvm +# links, or same as server mode use rootdir and set all appropriate links and +# mount-points there. For example, guest installation tests need to know where +# to find the iso-files. +# +# We create the links only if not already exist, so if one already set up the +# links for client/local run we do not touch the links. +rootdir='/tmp/kvm_autotest_root' +iso=os.path.join(rootdir, 'iso') +images=os.path.join(rootdir, 'images') +qemu=os.path.join(rootdir, 'qemu') +qemu_img=os.path.join(rootdir, 'qemu-img') + + +def link_if_not_exist(ldir, target, link_name): + t = target + l = os.path.join(ldir, link_name) + if not os.path.exists(l): + os.system('ln -s %s %s' % (t, l)) + +# Create links only if not already exist +link_if_not_exist(pwd, '../../', 'autotest') +link_if_not_exist(pwd, iso, 'isos') +link_if_not_exist(pwd, images, 'images') +link_if_not_exist(pwd, qemu, 'qemu') +link_if_not_exist(pwd, qemu_img, 'qemu-img') + +# -------------------------------------------------------- +# Params that will be passed to the KVM install/build test +# -------------------------------------------------------- +params = { + "name": "build", + "shortname": "build", + "type": "build", + #"mode": "release", + #"mode": "snapshot", + #"mode": "localtar", + #"mode": "localsrc", + #"mode": "git", + "mode": "noinstall", + #"mode": "koji", + + ## Are we going to load modules built by this test? + ## Defaults to 'yes', so if you are going to provide only userspace code to + ## be built by this test, please set load_modules to 'no', and make sure + ## the kvm and kvm-[vendor] module is already loaded by the time you start + ## it. + #"load_modules": "no", + + ## Install from a kvm release ("mode": "release"). You can optionally + ## specify a release tag. If you omit it, the test will get the latest + ## release tag available. + #"release_tag": '84', + #"release_dir": 'http://downloads.sourceforge.net/project/kvm/', + # This is the place that contains the sourceforge project list of files + #"release_listing": 'http://sourceforge.net/projects/kvm/files/', + + ## Install from a kvm snapshot location ("mode": "snapshot"). You can + ## optionally specify a snapshot date. If you omit it, the test will get + ## yesterday's snapshot. + #"snapshot_date": '20090712' + #"snapshot_dir": 'http://foo.org/kvm-snapshots/', + + ## Install from a tarball ("mode": "localtar") + #"tarball": "/tmp/kvm-84.tar.gz", + + ## Install from a local source code dir ("mode": "localsrc") + #"srcdir": "/path/to/source-dir" + + ## Install from koji build server ("mode": "koji") + ## Koji is the Fedora Project buildserver. It is possible to install + ## packages right from Koji if you provide a release tag or a build. + ## Tag (if available) + #"koji_tag": 'dist-f11', + ## Build (if available, is going to override tag). + #"koji_build": 'qemu-0.10-16.fc11', + ## Command to interact with the build server + #"koji_cmd": '/usr/bin/koji', + ## The name of the source package that's being built + #"src_pkg": 'qemu', + ## Name of the rpms we need installed + #"pkg_list": ['qemu-kvm', 'qemu-kvm-tools', 'qemu-system-x86', 'qemu-common', 'qemu-img'], + ## Paths of the qemu relevant executables that should be checked + #"qemu_bin_paths": ['/usr/bin/qemu-kvm', '/usr/bin/qemu-img'], + + ## Install from git ("mode": "git") + ## If you provide only "git_repo" and "user_git_repo", the build test + ## will assume it will perform all build from the userspace dir, building + ## modules trough make -C kernel LINUX=%s sync. As of today (07-13-2009) + ## we need 3 git repos, "git_repo" (linux sources), "user_git_repo" and + ## "kmod_repo" to build KVM userspace + kernel modules. + #"git_repo": 'git://git.kernel.org/pub/scm/linux/kernel/git/avi/kvm.git', + #"kernel_branch": 'kernel_branch_name', + #"kernel_lbranch": 'kernel_lbranch_name', + #"kernel_tag": 'kernel_tag_name', + #"user_git_repo": 'git://git.kernel.org/pub/scm/virt/kvm/qemu-kvm.git', + #"user_branch": 'user_branch_name', + #"user_lbranch": 'user_lbranch_name', + #"user_tag": 'user_tag_name', + #"kmod_repo": 'git://git.kernel.org/pub/scm/virt/kvm/kvm-kmod.git', + #"kmod_branch": 'kmod_branch_name', + #"kmod_lbranch": 'kmod_lbranch_name', + #"kmod_tag": 'kmod_tag_name', +} + +# If you don't want to execute the build stage, just use 'noinstall' as the +# install type. If you run the tests from autotest-server, make sure that +# /tmp/kvm-autotest-root/qemu is a link to your existing executable. Note that +# if kvm_install is chose to run, it overwrites existing qemu and qemu-img +# links to point to the newly built executables. + +if not params.get("mode") == "noinstall": + if not job.run_test("kvm", params=params, tag=params.get("shortname")): + print 'kvm_installation failed ... exiting' + sys.exit(1) + +# ---------------------------------------------------------- +# Get test set (dictionary list) from the configuration file +# ---------------------------------------------------------- +from autotest.client.shared import cartesian_config + +str = """ +# This string will be parsed after tests.cfg. Make any desired changes to the +# test configuration here. For example: +#install, setup: timeout_multiplier = 3 +#display = sdl +""" + +parser = cartesian_config.Parser() +parser.parse_file(os.path.join(pwd, "tests.cfg")) +parser.parse_string(str) + +tests = list(parser.get_dicts()) + +# ------------- +# Run the tests +# ------------- +from autotest.client.virt import scheduler +from autotest.client import utils + +# total_cpus defaults to the number of CPUs reported by /proc/cpuinfo +total_cpus = utils.count_cpus() +# total_mem defaults to 3/4 of the total memory reported by 'free' +total_mem = int(commands.getoutput("free -m").splitlines()[1].split()[1]) * 3/4 +# We probably won't need more workers than CPUs +num_workers = total_cpus + +# Start the scheduler and workers +s = scheduler.scheduler(tests, num_workers, total_cpus, total_mem, pwd) +job.parallel([s.scheduler], + *[(s.worker, i, job.run_test) for i in range(num_workers)]) + +# create the html report in result dir +reporter = os.path.join(pwd, 'make_html_report.py') +html_file = os.path.join(job.resultdir,'results.html') +os.system('%s -r %s -f %s -R'%(reporter, job.resultdir, html_file)) diff --git a/kvm/control.spice b/kvm/control.spice new file mode 100644 index 00000000..0133b330 --- /dev/null +++ b/kvm/control.spice @@ -0,0 +1,62 @@ +AUTHOR = 'lkocman@redhat.com (Lubos Kocman)' +TIME = 'MEDIUM' +NAME = 'Spice test' +TEST_TYPE = 'client' +TEST_CLASS = 'Virtualization' +TEST_CATEGORY = 'Functional' + +DOC = """ +Executes the KVM test framework on a given host. This module is separated in +minor functions, that execute different tests for doing Quality Assurance on +KVM (both kernelspace and userspace) code. + +For online docs, please refer to http://www.linux-kvm.org/page/KVM-Autotest +""" + +import sys, os, logging +from autotest.client.shared import cartesian_config +from autotest.client.virt import utils_misc + +# set English environment (command output might be localized, need to be safe) +os.environ['LANG'] = 'en_US.UTF-8' + +str = """ +# This string will be parsed after build.cfg. Make any desired changes to the +# build configuration here. For example (to install from koji/brew): +# installers = koji_qemu_kvm +""" + +parser = cartesian_config.Parser() +kvm_test_dir = os.path.join(os.environ['AUTODIR'],'tests/kvm') +parser.parse_file(os.path.join(kvm_test_dir, "build.cfg")) +parser.parse_string(str) +if not utils_misc.run_tests(parser, job): + logging.error("KVM build step failed, exiting.") + sys.exit(1) + +str = """ +# This string will be parsed after tests-spice.cfg. Make any desired changes to the +# test configuration here. For example: +#display = sdl +#install, setup: timeout_multiplier = 3 +""" + +parser = cartesian_config.Parser() +parser.parse_file(os.path.join(kvm_test_dir, "tests-spice.cfg")) + +if args: + # We get test parameters from command line + for arg in args: + try: + (key, value) = re.findall("^(\w+)=(.*)", arg)[0] + if key == "only": + str += "only %s\n" % value + elif key == "no": + str += "no %s\n" % value + else: + str += "%s = %s\n" % (key, value) + except IndexError: + pass +parser.parse_string(str) + +utils_misc.run_tests(parser, job) diff --git a/kvm/control.unittests b/kvm/control.unittests new file mode 100644 index 00000000..a3276456 --- /dev/null +++ b/kvm/control.unittests @@ -0,0 +1,26 @@ +AUTHOR = """ +mgoldish@redhat.com (Michael Goldish) +nsprei@redhat.com (Naphtali Sprei) +lmr@redhat.com (Lucas Meneghel Rodrigues) +""" +TIME = 'MEDIUM' +NAME = 'KVM Test (Unittests)' +TEST_TYPE = 'client' +TEST_CLASS = 'Virtualization' +TEST_CATEGORY = 'Unittest' + +DOC = """ +Runs the unittests available for a given KVM build. +""" + +import sys, os, logging +from autotest.client.shared import cartesian_config +from autotest.client.virt import utils_misc + +parser = cartesian_config.Parser() +kvm_test_dir = os.path.join(os.environ['AUTODIR'],'tests/kvm') +tests_cfg_path = os.path.join(kvm_test_dir, "unittests.cfg") +parser.parse_file(tests_cfg_path) + +# Run the tests +utils_misc.run_tests(parser, job) diff --git a/kvm/get_started.py b/kvm/get_started.py new file mode 100755 index 00000000..49abadfc --- /dev/null +++ b/kvm/get_started.py @@ -0,0 +1,25 @@ +#!/usr/bin/python +""" +Program to help setup kvm test environment + +@copyright: Red Hat 2010 +""" +import os, sys +try: + import autotest.common as common +except ImportError: + import common +from autotest.client.virt import utils_misc + +test_name = "kvm" +test_dir = os.path.dirname(sys.modules[__name__].__file__) +test_dir = os.path.abspath(test_dir) +base_dir = "/tmp/kvm_autotest_root" +default_userspace_paths = ["/usr/bin/qemu-kvm", "/usr/bin/qemu-img"] +check_modules = ["kvm", "kvm-%s" % utils_misc.get_cpu_vendor(verbose=False)] +online_docs_url = "https://github.com/autotest/autotest/wiki/KVMAutotest-GetStartedClient" + +if __name__ == "__main__": + utils_misc.virt_test_assistant(test_name, test_dir, base_dir, + default_userspace_paths, check_modules, + online_docs_url) diff --git a/kvm/kvm.py b/kvm/kvm.py new file mode 100644 index 00000000..117d84dd --- /dev/null +++ b/kvm/kvm.py @@ -0,0 +1,20 @@ +from autotest.client.virt import virt_test + + +class kvm(virt_test.virt_test): + """ + Suite of KVM virtualization functional tests. + Contains tests for testing both KVM kernel code and userspace code. + + @copyright: Red Hat 2008-2009 + @author: Uri Lublin (uril@redhat.com) + @author: Dror Russo (drusso@redhat.com) + @author: Michael Goldish (mgoldish@redhat.com) + @author: David Huff (dhuff@redhat.com) + @author: Alexey Eromenko (aeromenk@redhat.com) + @author: Mike Burns (mburns@redhat.com) + + @see: http://www.linux-kvm.org/page/KVM-Autotest/Client_Install + (Online doc - Getting started with KVM testing) + """ + pass diff --git a/kvm/multi-host-tests.cfg.sample b/kvm/multi-host-tests.cfg.sample new file mode 100644 index 00000000..bc0ce03d --- /dev/null +++ b/kvm/multi-host-tests.cfg.sample @@ -0,0 +1,43 @@ +# Copy this file to multi-host-tests.cfg and edit it. +# +# This file contains the test set definitions for multi host tests. + +# Include the shared test files. +include tests-shared.cfg + +# Here are the test sets variants. The variant 'qemu_kvm_windows_quick' is +# fully commented, the following ones have comments only on noteworthy points +variants: + # Runs qemu-kvm, Windows Vista 64 bit guest OS, install, boot, shutdown + - @qemu_migrate_multi_host: + qemu_binary = /usr/bin/qemu-kvm + qemu_img_binary = /usr/bin/qemu-img + qemu_io_binary = /usr/bin/qemu-io + nic_mode = tap + only qcow2 + only virtio_net + only virtio_blk + only smp2 + only no_pci_assignable + only no_9p_export + only smallpages + only Fedora.15.64 + only migrate_multi_host + + # Runs qemu, f16 64 bit guest OS, install, boot, shutdown + - @qemu_cpuflags_multi_host: + qemu_binary = /usr/bin/qemu-kvm + qemu_img_binary = /usr/bin/qemu-img + qemu_io_binary = /usr/bin/qemu-io + nic_mode = tap + only qcow2 + only virtio_net + only virtio_blk + only smp2 + only no_pci_assignable + only no_9p_export + only smallpages + only Fedora.15.64 + only cpuflags_multi_host + +only qemu_migrate_multi_host diff --git a/kvm/multi_host.srv b/kvm/multi_host.srv new file mode 100644 index 00000000..72939dcd --- /dev/null +++ b/kvm/multi_host.srv @@ -0,0 +1,149 @@ +AUTHOR = "Jiri Zupka " +TIME = "SHORT" +NAME = "" +TEST_CATEGORY = "Functional" +TEST_CLASS = "Virtualization" +TEST_TYPE = "Server" + +DOC = """ +KVM tests (multi-host) server control + +Runs tests across multiple hosts. It uses the config file +'multi-host-tests.cfg' in order to yield the appropriate +dicts for the multi host test. +""" + +import sys, os, commands, glob, shutil, logging, random +from autotest.server import utils +from autotest.client.shared import cartesian_config, error + +# Specify the directory of autotest before you start this test +AUTOTEST_DIR = job.clientdir + +KVM_DIR = os.path.join(AUTOTEST_DIR, 'tests', 'kvm') + +CONTROL_MAIN_PART = """ +testname = "kvm" +bindir = os.path.join(job.testdir, testname) +job.install_pkg(testname, 'test', bindir) + +kvm_test_dir = os.path.join(os.environ['AUTODIR'],'tests', 'kvm') +sys.path.append(kvm_test_dir) +""" + +try: + import autotest.common +except ImportError: + import common + +def generate_mac_address(): + r = random.SystemRandom() + mac = "9a:%02x:%02x:%02x:%02x:%02x" % (r.randint(0x00, 0xff), + r.randint(0x00, 0xff), + r.randint(0x00, 0xff), + r.randint(0x00, 0xff), + r.randint(0x00, 0xff)) + return mac + + +def run(machines): + logging.info("KVM test running on hosts %s\n", machines) + class Machines(object): + def __init__(self, host): + self.host = host + self.at = None + self.params = None + self.control = None + + _hosts = {} + for machine in machines: + _hosts[machine] = Machines(hosts.create_host(machine)) + + ats = [] + for host in _hosts.itervalues(): + host.at = autotest_remote.Autotest(host.host) + + cfg_file = os.path.join(KVM_DIR, "multi-host-tests.cfg") + + if not os.path.exists(cfg_file): + raise error.JobError("Config file %s was not found", cfg_file) + + # Get test set (dictionary list) from the configuration file + parser = cartesian_config.Parser() + parser.parse_file(cfg_file) + test_dicts = parser.get_dicts() + + ips = [] + for machine in machines: + host = _hosts[machine] + ips.append(host.host.ip) + + for params in test_dicts: + + params['hosts'] = ips + + params['not_preprocess'] = "yes" + for vm in params.get("vms").split(): + for nic in params.get('nics',"").split(): + params['mac_%s_%s' % (nic, vm)] = generate_mac_address() + + params['master_images_clone'] = "image1" + params['kill_vm'] = "yes" + + s_host = _hosts[machines[0]] + s_host.params = params.copy() + s_host.params['clone_master'] = "yes" + s_host.params['hostid'] = machines[0] + + for machine in machines[1:]: + host = _hosts[machine] + host.params = params.copy() + host.params['clone_master'] = "no" + host.params['hostid'] = machine + + # Report the parameters we've received + logging.debug("Test parameters:") + keys = params.keys() + keys.sort() + for key in keys: + logging.debug(" %s = %s", key, params[key]) + + for machine in machines: + host = _hosts[machine] + host.control = CONTROL_MAIN_PART + + for machine in machines: + host = _hosts[machine] + host.control += ("job.run_test('kvm', tag='%s', params=%s)" % + (host.params['shortname'], host.params)) + + logging.debug('Master control file:\n%s', _hosts[machines[0]].control) + for machine in machines[1:]: + host = _hosts[machine] + logging.debug('Slave control file:\n%s', host.control) + + commands = [] + + for machine in machines: + host = _hosts[machine] + commands.append(subcommand(host.at.run, + [host.control, host.host.hostname])) + + try: + parallel(commands) + except error.AutoservError as e: + logging.error(e) + +if 'all' in args: + # Run test with all machines at once. + run(machines) +else: + # Grab the pairs (and failures) + (pairs, failures) = utils.form_ntuples_from_machines(machines, 2) + + # Log the failures + for failure in failures: + job.record("FAIL", failure[0], "kvm", failure[1]) + + # Now run through each pair and run + job.parallel_simple(run, pairs, log=False) diff --git a/kvm/tests-shared.cfg.sample b/kvm/tests-shared.cfg.sample new file mode 100644 index 00000000..b8d930e8 --- /dev/null +++ b/kvm/tests-shared.cfg.sample @@ -0,0 +1,52 @@ +# Copy this file to tests-shared.cfg and edit it. +# +# This file contains the base test set definitions, shared among single host +# and multi host jobs. + +# Virtualization type (kvm or libvirt) +vm_type = kvm +# The hypervisor uri (default, qemu://hostname/system, etc.) +# where default or unset means derive from installed system +connect_uri = default + +# Include the base config files. +include base.cfg +include subtests.cfg +include guest-os.cfg +include guest-hw.cfg +include cdkeys.cfg +include virtio-win.cfg + +# Additional directory for find virt type tests. Relative to client/tests +other_tests_dirs = "" + +# Modify/comment the following lines if you wish to modify the paths of the +# image files, ISO files or qemu binaries. +# +# As for the defaults: +# * qemu and qemu-img are expected to be found under /usr/bin/qemu-kvm and +# /usr/bin/qemu-img respectively. +# * All image files are expected under /tmp/kvm_autotest_root/images/ +# * All install iso files are expected under /tmp/kvm_autotest_root/isos/ +# * The parameters cdrom_unattended, floppy, kernel and initrd are generated +# by KVM autotest, so remember to put them under a writable location +# (for example, the cdrom share can be read only) +image_name(_.*)? ?<= /tmp/kvm_autotest_root/images/ +cdrom(_.*)? ?<= /tmp/kvm_autotest_root/ +floppy(_.*)? ?<= /tmp/kvm_autotest_root/ +Linux..unattended_install: + kernel ?<= /tmp/kvm_autotest_root/ + initrd ?<= /tmp/kvm_autotest_root/ + +# You may provide information about the DTM server for WHQL tests here: +#whql: +# server_address = 10.20.30.40 +# server_shell_port = 10022 +# server_file_transfer_port = 10023 +# Note that the DTM server must run rss.exe (available under deps/), +# preferably with administrator privileges. + +# Uncomment the following lines to enable abort-on-error mode: +#abort_on_error = yes +#kill_vm.* ?= no +#kill_unresponsive_vms.* ?= no diff --git a/kvm/tests-spice.cfg.sample b/kvm/tests-spice.cfg.sample new file mode 100644 index 00000000..e59c69ef --- /dev/null +++ b/kvm/tests-spice.cfg.sample @@ -0,0 +1,294 @@ +include tests-shared.cfg + +variants: + - qxl: + +variants: + - vnc: + display = vnc + vga = std + + - spice: + vga = qxl + display = spice + + variants: + - 1monitor: + qxl_dev_nr = 1 + - 2monitor: + qxl_dev_nr = 2 + - 3monitor: + qxl_dev_nr = 3 + - 4monitor: + qxl_dev_nr = 4 + + variants: + - @no_password: + - password: + spice_password = 12456 + variants: + - @no_ssl: + spice_ssl = no + spice_port = 3000 + - bad_port: + spice_port = -1 + - ssl: + spice_ssl = yes + spice_tls_port = 3200 + spice_tls_ciphers = DEFAULT + spice_gen_x509 = yes + spice_x509_dir = yes + spice_x509_prefix = /tmp/spice_x509d + spice_x509_key_file = server-key.pem + spice_x509_cacert_file = ca-cert.pem + spice_x509_cert_file = server-cert.pem + spice_x509_key_password = testPassPhrase + spice_x509_cacert_subj = /C=CZ/L=BRNO/O=SPICE/CN=my CA + spice_x509_server_subj = /C=CZ/L=BRNO/O=SPICE/CN=my Server + spice_secure_channels = main, inputs + spice_client_host_subject = yes + variants: + - key_password: + spice_x509_secure = yes + - @no_key_password: + spice_x509_secure = no + variants: + - @default_ic: + spice_image_compression = auto_glz + - auto_glz_ic: + spice_image_compression = auto_glz + - auto_lz_ic: + spice_image_compression = auto_lz + - quic_ic: + spice_image_compression = quic + - glz_ic: + spice_image_compression = glz + - lz_ic: + spice_image_compression = lz + - no_ic: + spice_image_compression = off + - bad_ic: + spice_image_compression = bad_value + + variants: + - @default_jpeg_wc: + spice_jpeg_wan_compression = auto + - auto_jpeg_wc: + spice_jpeg_wan_compression = auto + - off_jpeg_wc: + spice_jpeg_wan_compression = off + - on_jpeg_wc: + spice_jpeg_wan_compression = always + - bad_jpeg_wc: + spice_jpeg_wan_compression = bad_value + + variants: + - @default_zlib_wc: + spice_zlib_glz_wan_compression = auto + - auto_zlib_wc: + spice_zlib_glz_wan_compression = auto + - off_zlib_wc: + spice_zlib_glz_wan_compression = off + - on_zlib_wc: + spice_zlib_glz_wan_compression = always + - bad_zlib_wc: + spice_zlib_glz_wan_compression = bad_value + + variants: + - @default_sv: + spice_streaming_video = filter + - sv: + spice_streaming_video = all + - filter_sv: + spice_streaming_video = filter + - no_sv: + spice_streaming_video = off + - bad_sv: + spice_streaming_video = bad_value + + + variants: + -@default_pc: + spice_playback_compression = on + -pc: + spice_playback_compression = on + -no_pc: + spice_playback_compression = off + -bad_pc: + spice_playback_compression = bad_value + + variants: + -ipv6: + spice_ipv6 = yes + spice_ipv4=no + -ipv4: + spice_ipv4=yes + spice_ipv6=no + -default_ipv: + spice_ipv4=no + spice_ipv6=no +variants: + + - qemu_kvm_rhel63_install_client: + # Use this only when you need to create rhel63 image qcow + qemu_binary = /usr/libexec/qemu-kvm + qemu_img_binary = /usr/bin/qemu-img + qemu_io_binary = /usr/bin/qemu-io + image_name = /tmp/kvm_autotest_root/images/rhel63-64_client + only qcow2 + only rtl8139 + only ide + only smp2 + only no_9p_export + only no_pci_assignable + only smallpages + only Linux.RHEL.6.3.x86_64 + only spice.default_ipv.default_pc.default_sv.default_zlib_wc.default_jpeg_wc.default_ic.no_ssl.no_password.1monitor + only unattended_install.cdrom.extra_cdrom_ks + + # Runs qemu-kvm Windows guest install + - @qemu_kvm_windows_install_guest: + # We want qemu-kvm for this run + qemu_binary = /usr/libexec/qemu-kvm + qemu_img_binary = /usr/bin/qemu-img + qemu_io_binary = /usr/bin/qemu-io + # Only qcow2 file format + only qcow2 + # Only rtl8139 for nw card (default on qemu-kvm) + only rtl8139 + # Only ide hard drives + only ide + # qemu-kvm will start only with -smp 2 (2 processors) + only smp2 + # Disable 9p export by default + only no_9p_export + # No PCI assignable devices + only no_pci_assignable + # No large memory pages + only smallpages + # Operating system choice + only Win7.64.sp1 + # Subtest choice. You can modify that line to add more subtests + only spice.default_ipv.default_pc.default_sv.default_zlib_wc.default_jpeg_wc.default_ic.no_ssl.no_password.1monitor + only unattended_install.cdrom + + + - qemu_kvm_rhel63_install_guest: + # Use this only when you need to create rhel63 image qcow + qemu_binary = /usr/libexec/qemu-kvm + qemu_img_binary = /usr/bin/qemu-img + qemu_io_binary = /usr/bin/qemu-io + only qcow2 + only rtl8139 + only ide + only smp2 + only no_9p_export + only no_pci_assignable + only smallpages + only Linux.RHEL.6.3.x86_64 + only spice.default_ipv.default_pc.default_sv.default_zlib_wc.default_jpeg_wc.default_ic.no_ssl.no_password.1monitor + only unattended_install.cdrom.extra_cdrom_ks + + - @remote_viewer_rhel63ssl: + qemu_binary = /usr/libexec/qemu-kvm + qemu_img_binary = /usr/bin/qemu-img + qemu_io_binary = /usr/bin/qemu-io + rv_binary = /usr/bin/remote-viewer + only qcow2 + only e1000 + only ide + only up + only no_9p_export + only no_pci_assignable + only smallpages + only Linux.RHEL.6.3.x86_64 + only spice.default_ipv.default_pc.default_sv.default_zlib_wc.default_jpeg_wc.default_ic.ssl.key_password.password.1monitor + only rv_connect.RHEL.6.3.x86_64, shutdown + + - @remote_viewer_rhel63_quick: + qemu_binary = /usr/libexec/qemu-kvm + qemu_img_binary = /usr/bin/qemu-img + qemu_io_binary = /usr/bin/qemu-io + rv_binary = /usr/bin/remote-viewer + only qcow2 + only e1000 + only ide + only up + only no_9p_export + only no_pci_assignable + only smallpages + only Linux.RHEL.6.3.x86_64 + only spice.default_ipv.default_pc.default_sv.default_zlib_wc.default_jpeg_wc.default_ic.no_ssl.no_password.1monitor + only rv_connect.RHEL.6.3.x86_64, shutdown + + - @remote_viewer_win_guest_quick: + qemu_binary = /usr/libexec/qemu-kvm + qemu_img_binary = /usr/bin/qemu-img + qemu_io_binary = /usr/bin/qemu-io + rv_binary = /usr/bin/remote-viewer + only qcow2 + only e1000 + only ide + only up + only no_9p_export + only no_pci_assignable + only smallpages + only Win7.64.sp1 + only spice.default_ipv.default_pc.default_sv.default_zlib_wc.default_jpeg_wc.default_ic.no_ssl.no_password.1monitor + #rv_connect_win is specifically a test meant for a windows guest and a rhel client, rv_connect cannot be used. + only rv_connect_win.RHEL.6.3.beta.x86_64, shutdown + + - @spice_negative_rhel63_all: + qemu_binary = /usr/libexec/qemu-kvm + qemu_img_binary = /usr/bin/qemu-img + qemu_io_binary = /usr/bin/qemu-io + rv_binary = /usr/bin/remote-viewer + only qcow2 + only e1000 + only ide + only up + only no_9p_export + only no_pci_assignable + only smallpages + only Linux.RHEL.6.3.x86_64 + only spice.default_ipv.default_pc.default_sv.default_zlib_wc.default_jpeg_wc.default_ic.bad_port.no_password.1monitor, spice.default_ipv.default_pc.default_sv.default_zlib_wc.default_jpeg_wc.bad_ic.no_ssl.no_password.1monitor, spice.default_ipv.default_pc.default_sv.default_zlib_wc.bad_jpeg_wc.default_ic.no_ssl.no_password.1monitor, spice.default_ipv.default_pc.default_sv.bad_zlib_wc.default_jpeg_wc.default_ic.no_ssl.no_password.1monitor, spice.default_ipv.default_pc.bad_sv.default_zlib_wc.default_jpeg_wc.default_ic.no_ssl.no_password.1monitor, spice.default_ipv.bad_pc.default_sv.default_zlib_wc.default_jpeg_wc.default_ic.no_ssl.no_password.1monitor + only negative_create + + - @rv_disconnect_rhel63: + qemu_binary = /usr/libexec/qemu-kvm + qemu_img_binary = /usr/bin/qemu-img + qemu_io_binary = /usr/bin/qemu-io + rv_binary = /usr/bin/remote-viewer + only qcow2 + only e1000 + only ide + only up + only no_9p_export + only no_pci_assignable + only smallpages + only Linux.RHEL.6.3.x86_64 + only spice.default_ipv.default_pc.default_sv.default_zlib_wc.default_jpeg_wc.default_ic.no_ssl.no_password.1monitor + only start, rv_connect.RHEL.6.3.x86_64, rv_disconnect.RHEL.6.3.x86_64, shutdown + +variants: + - Create_VMs: + only qemu_kvm_rhel63_install_guest, qemu_kvm_rhel63_install_client + - Install_Win_Guest: + only qemu_kvm_windows_install_guest + - Negative_QEMU_Spice_Creation_Tests: + only spice_negative_rhel63_all + - Remote_Viewer_Test: + only remote_viewer_rhel63_quick + - Remote_Viewer_WinGuest_Test: + only remote_viewer_win_guest_quick + - Remote_Viewer_Disconnect_Test: + only rv_disconnect_rhel63 + - Remote_Viewer_SSL_Test: + only remote_viewer_rhel63ssl + +only Create_VMs, Install_Win_Guest, Negative_QEMU_Spice_Creation_Tests, Remote_Viewer_Test, Remote_Viewer_SSL_Test, Remote_Viewer_WinGuest_Test, Remote_Viewer_Disconnect_Test + +# Choose your test list from the testsets defined +# the following is for remote viewer tests with the setup of a rhel client, and a windows guest +#only qemu_kvm_windows_install_guest, qemu_kvm_rhel63_install_client, remote_viewer_win_guest_quick +#only qemu_kvm_rhel63_install_guest, qemu_kvm_rhel63_install_client, remote_viewer_rhel63_quick, rv_disconnect_rhel63, spice_negative_rhel63_all diff --git a/kvm/tests.cfg.sample b/kvm/tests.cfg.sample new file mode 100644 index 00000000..46ac5711 --- /dev/null +++ b/kvm/tests.cfg.sample @@ -0,0 +1,131 @@ +# Copy this file to tests.cfg and edit it. +# +# This file contains the test set definitions. Define your test sets here. + +# Include the base config files. +include tests-shared.cfg + +# Here you can override the image name for our custom linux and windows guests +# +CustomGuestLinux: + # Here you can override the default login credentials for your custom guest + username = root + password = 123456 + image_name = custom_image_linux + image_size = 10G + # If you want to use a block device as the vm disk, uncomment the 2 lines + # below, pointing the image name for the device you want + #image_name = /dev/mapper/vg_linux_guest + #image_raw_device = yes + +CustomGuestWindows: + image_name = custom_image_windows + image_size = 10G + # If you want to use a block device as the vm disk, uncomment the 2 lines + # below, pointing the image name for the device you want + #image_name = /dev/mapper/vg_windows_guest + #image_raw_device = yes + +# Here are the test sets variants. The variant 'qemu_kvm_windows_quick' is +# fully commented, the following ones have comments only on noteworthy points +variants: + # Runs all variants defined. HUGE test set. + - @full: + + # Runs qemu-kvm, Windows Vista 64 bit guest OS, install, boot, shutdown + - @qemu_kvm_windows_quick: + # We want qemu-kvm for this run + qemu_binary = /usr/bin/qemu-kvm + qemu_img_binary = /usr/bin/qemu-img + qemu_io_binary = /usr/bin/qemu-io + # Only qcow2 file format + only qcow2 + # Only rtl8139 for nw card (default on qemu-kvm) + only rtl8139 + # Only ide hard drives + only ide + # qemu-kvm will start only with -smp 2 (2 processors) + only smp2 + # Disable 9p export by default + only no_9p_export + # No PCI assignable devices + only no_pci_assignable + # No large memory pages + only smallpages + # Operating system choice + only Win7.64.sp1 + # Subtest choice. You can modify that line to add more subtests + only unattended_install.cdrom, boot, shutdown + + # Runs qemu, f17 64 bit guest OS, install, boot, shutdown + - @qemu_f17_quick: + # We want qemu for this run + qemu_binary = /usr/bin/qemu + qemu_img_binary = /usr/bin/qemu-img + qemu_io_binary = /usr/bin/qemu-io + only qcow2 + only virtio_net + only virtio_blk + # qemu using kvm doesn't support smp yet + only up + only no_9p_export + only no_pci_assignable + only smallpages + only Fedora.17.64 + only unattended_install.cdrom.extra_cdrom_ks, boot, shutdown + # qemu needs -enable-kvm on the cmdline + extra_params += ' -enable-kvm' + + # Runs qemu-kvm, f17 64 bit guest OS, install, boot, shutdown + - @qemu_kvm_f17_quick: + # We want qemu-kvm for this run + qemu_binary = /usr/bin/qemu-kvm + qemu_img_binary = /usr/bin/qemu-img + qemu_io_binary = /usr/bin/qemu-io + only qcow2 + only virtio_net + only virtio_blk + only smp2 + only no_9p_export + only no_pci_assignable + only smallpages + only Fedora.17.64 + only unattended_install.cdrom.extra_cdrom_ks, boot, shutdown + + # Runs qemu-kvm, f17 64 bit guest OS, install, starts qemu-kvm + # with 9P support and runs 9P CI tests + - @qemu_kvm_9p_export: + qemu_binary = /usr/bin/qemu-kvm + qemu_img_binary = /usr/bin/qemu-img + qemu_io_binary = /usr/bin/qemu-io + only raw + only virtio_net + only virtio_blk + only smp2 + only no_pci_assignable + only smallpages + only 9p_export + only Fedora.17.64 + only unattended_install.cdrom.extra_cdrom_ks, boot, 9p.9p_ci, shutdown + + # Runs your own guest image (qcow2, can be adjusted), all migration tests + # (on a core2 duo laptop with HD and 4GB RAM, F15 host took 3 hours to run) + # Be warned, disk stress + migration can corrupt your image, so make sure + # you have proper backups + - @qemu_kvm_custom_migrate: + # We want qemu-kvm for this run + qemu_binary = /usr/bin/qemu-kvm + qemu_img_binary = /usr/bin/qemu-img + qemu_io_binary = /usr/bin/qemu-io + only qcow2 + only virtio_net + only virtio_blk + only smp2 + only no_9p_export + only no_pci_assignable + only smallpages + only CustomGuestLinux + only migrate + +# Choose your test list from the testsets defined +only qemu_kvm_f17_quick diff --git a/kvm/tests/9p.py b/kvm/tests/9p.py new file mode 100644 index 00000000..7693b893 --- /dev/null +++ b/kvm/tests/9p.py @@ -0,0 +1,55 @@ +import os,logging +from autotest.client.shared import error +from autotest.client.virt import utils_test + + +def run_9p(test, params, env): + """ + Run an autotest test inside a guest. + + @param test: kvm test object. + @param params: Dictionary with test parameters. + @param env: Dictionary with the test environment. + """ + vm = env.get_vm(params["main_vm"]) + vm.verify_alive() + timeout = int(params.get("login_timeout", 360)) + session = vm.wait_for_login(timeout=timeout) + + mount_dir = params.get("9p_mount_dir") + + if mount_dir is None: + logging.info("User Variable for mount dir is not set") + else: + session.cmd("mkdir -p %s" % mount_dir) + + mount_option = " trans=virtio" + + p9_proto_version = params.get("9p_proto_version", "9p2000.L") + mount_option += ",version=" + p9_proto_version + + guest_cache = params.get("9p_guest_cache") + if guest_cache == "yes": + mount_option += ",cache=loose" + + posix_acl = params.get("9p_posix_acl") + if posix_acl == "yes": + mount_option += ",posixacl" + + logging.info("Mounting 9p mount point with options %s" % mount_option) + cmd = "mount -t 9p -o %s autotest_tag %s" % (mount_option, mount_dir) + mount_status = session.get_command_status(cmd) + + if (mount_status != 0): + logging.error("mount failed") + raise error.TestFail('mount failed.') + + # Collect test parameters + timeout = int(params.get("test_timeout", 14400)) + control_path = os.path.join(test.virtdir, "autotest_control", + params.get("test_control_file")) + + outputdir = test.outputdir + + utils_test.run_autotest(vm, session, control_path, + timeout, outputdir, params) diff --git a/kvm/tests/__init__.py b/kvm/tests/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/kvm/tests/balloon_check.py b/kvm/tests/balloon_check.py new file mode 100644 index 00000000..139d37fd --- /dev/null +++ b/kvm/tests/balloon_check.py @@ -0,0 +1,149 @@ +import re, logging, random, time +from autotest.client.shared import error +from autotest.client.virt import kvm_monitor, utils_test + + +def run_balloon_check(test, params, env): + """ + Check Memory ballooning: + 1) Boot a guest + 2) Change the memory between MemFree to Assigned memory of memory + of guest using ballooning + 3) check memory info + + @param test: kvm test object + @param params: Dictionary with the test parameters + @param env: Dictionary with test environment. + """ + def check_ballooned_memory(): + """ + Verify the actual memory reported by monitor command info balloon. If + the operation failed, increase the failure counter. + + @return: Number of failures occurred during operation. + """ + fail = 0 + try: + output = vm.monitor.info("balloon") + except kvm_monitor.MonitorError, e: + logging.error(e) + fail += 1 + return 0, fail + return int(re.findall("\d+", str(output))[0]), fail + + def balloon_memory(new_mem, offset): + """ + Baloon memory to new_mem and verifies on both qemu monitor and + guest OS if change worked. + + @param new_mem: New desired memory. + @return: Number of failures occurred during operation. + """ + _, fail = check_ballooned_memory() + if params.get("monitor_type") == "qmp": + new_mem = new_mem * 1024 * 1024 + logging.info("Changing VM memory to %s", new_mem) + # This should be replaced by proper monitor method call + vm.monitor.send_args_cmd("balloon value=%s" % new_mem) + time.sleep(20) + + ballooned_mem, cfail = check_ballooned_memory() + fail += cfail + # Verify whether the VM machine reports the correct new memory + if ballooned_mem != new_mem: + logging.error("Memory ballooning failed while changing memory " + "to %s", new_mem) + fail += 1 + + # Verify whether the guest OS reports the correct new memory + current_mem_guest = vm.get_current_memory_size() + fail += cfail + current_mem_guest = current_mem_guest + offset + if params.get("monitor_type") == "qmp": + current_mem_guest = current_mem_guest * 1024 * 1024 + # Current memory figures will allways be a little smaller than new + # memory. If they are higher, ballooning failed on guest perspective + if current_mem_guest > new_mem: + logging.error("Guest OS reports %s of RAM, but new ballooned RAM " + "is %s", current_mem_guest, new_mem) + fail += 1 + return fail + + fail = 0 + vm = env.get_vm(params["main_vm"]) + vm.verify_alive() + timeout = int(params.get("login_timeout", 360)) + session = vm.wait_for_login(timeout=timeout) + + # Upper limit that we can raise the memory + vm_assigned_mem = int(params.get("mem")) + + # Check memory size + logging.info("Memory check") + boot_mem = vm.get_memory_size() + if boot_mem != vm_assigned_mem: + logging.error("Memory size mismatch:") + logging.error(" Assigned to VM: %s", vm_assigned_mem) + logging.error(" Reported by guest OS at boot: %s", boot_mem) + fail += 1 + # Check if info balloon works or not + current_vm_mem, cfail = check_ballooned_memory() + if cfail: + fail += cfail + if current_vm_mem: + logging.info("Current VM memory according to ballooner: %s", + current_vm_mem) + # Get the offset of memory report by guest system + guest_memory = vm.get_current_memory_size() + offset = vm_assigned_mem - guest_memory + + # Reduce memory to random size between Free memory + # to max memory size + s, o = session.cmd_status_output("cat /proc/meminfo") + if s != 0: + raise error.TestError("Can not get guest memory information") + + vm_mem_free = int(re.findall('MemFree:\s+(\d+).*', o)[0]) / 1024 + + new_mem = int(random.uniform(vm_assigned_mem - vm_mem_free, vm_assigned_mem)) + fail += balloon_memory(new_mem, offset) + # Run option test after evict memory + if params.has_key('sub_balloon_test_evict'): + balloon_test = params['sub_balloon_test_evict'] + utils_test.run_virt_sub_test(test, params, env, sub_type=balloon_test) + if balloon_test == "shutdown" : + logging.info("Guest shutdown normally after balloon") + return + # Reset memory value to original memory assigned on qemu. This will ensure + # we won't trigger guest OOM killer while running multiple iterations + fail += balloon_memory(vm_assigned_mem, offset) + + # Run sub test after enlarge memory + if params.has_key('sub_balloon_test_enlarge'): + balloon_test = params['sub_balloon_test_enlarge'] + utils_test.run_virt_sub_test(test, params, env, sub_type=balloon_test) + if balloon_test == "shutdown" : + logging.info("Guest shutdown normally after balloon") + return + + #Check memory after sub test running + logging.info("Check memory after tests") + boot_mem = vm.get_memory_size() + if boot_mem != vm_assigned_mem: + fail += 1 + # Check if info balloon works or not + current_vm_mem, cfail = check_ballooned_memory() + if params.get("monitor_type") == "qmp": + current_vm_mem = current_vm_mem / 1024 / 1024 + if current_vm_mem != vm_assigned_mem: + fail += 1 + logging.error("Memory size after tests:") + logging.error(" Assigned to VM: %s", vm_assigned_mem) + logging.error(" Reported by guest OS: %s", boot_mem) + logging.error(" Reported by monitor: %s", current_vm_mem) + + # Close stablished session + session.close() + # Check if any failures happen during the whole test + if fail != 0: + raise error.TestFail("Memory ballooning test failed") diff --git a/kvm/tests/block_stream.py b/kvm/tests/block_stream.py new file mode 100644 index 00000000..675d341d --- /dev/null +++ b/kvm/tests/block_stream.py @@ -0,0 +1,141 @@ +import re, os, logging, time +from autotest.client.shared import utils, error +from autotest.client.virt import kvm_monitor +from autotest.client.virt import env_process + +@error.context_aware +def run_block_stream(test, params, env): + """ + Test block streaming functionality. + + 1) Create a image_bak.img with the backing file image.img + 2) Start the image_bak.img in qemu command line. + 3) Request for block-stream ide0-hd0/virtio0 + 4) Wait till the block job finishs + 5) Check for backing file in image_bak.img + 6) TODO: Check for the size of the image_bak.img should not exceeds the image.img + 7) TODO(extra): Block job completion can be check in QMP + """ + image_format = params.get("image_format") + image_name = params.get("image_name", "image") + drive_format = params.get("drive_format") + backing_file_name = "%s_bak" % (image_name) + qemu_img = params.get("qemu_img_binary") + block_stream_cmd = "block-stream" + + + def check_block_jobs_info(): + """ + Verify the status of block-jobs reported by monitor command info block-jobs. + @return: parsed output of info block-jobs + """ + fail = 0 + + try: + output = vm.monitor.info("block-jobs") + except kvm_monitor.MonitorError, e: + logging.error(e) + fail += 1 + return None, None + return (re.match("\w+", str(output)), re.findall("\d+", str(output))) + + try: + # Remove the existing backing file + backing_file = "%s.%s" % (backing_file_name, image_format) + if os.path.isfile(backing_file): + os.remove(backing_file) + + # Create the new backing file + create_cmd = "%s create -b %s.%s -f %s %s.%s" % (qemu_img, + image_name, + image_format, + image_format, + backing_file_name, + image_format) + error.context("Creating backing file") + utils.system(create_cmd) + + info_cmd = "%s info %s.%s" % (qemu_img,image_name,image_format) + error.context("Image file can not be find") + results = utils.system_output(info_cmd) + logging.info("Infocmd output of basefile: %s" ,results) + + # Set the qemu harddisk to the backing file + logging.info("Original image_name is: %s", params.get('image_name')) + params['image_name'] = backing_file_name + logging.info("Param image_name changed to: %s", + params.get('image_name')) + + # Start virtual machine, using backing file as its harddisk + vm_name = params.get('main_vm') + env_process.preprocess_vm(test, params, env, vm_name) + vm = env.get_vm(vm_name) + vm.create() + timeout = int(params.get("login_timeout", 360)) + session = vm.wait_for_login(timeout=timeout) + + info_cmd = "%s info %s.%s" % (qemu_img, backing_file_name, image_format) + error.context("Image file can not be find") + results = utils.system_output(info_cmd) + logging.info("Infocmd output of backing file before block streaming: " + "%s", results) + + if not re.search("backing file:", str(results)): + raise error.TestFail("Backing file is not available in the " + "backdrive image") + + if vm.monitor.protocol == "human": + block_stream_cmd = "block_stream" + + # Start streaming in qemu-cmd line + if 'ide' in drive_format: + error.context("Block streaming on qemu monitor (ide drive)") + vm.monitor.cmd("%s ide0-hd0" % block_stream_cmd) + elif 'virtio' in drive_format: + error.context("Block streaming on qemu monitor (virtio drive)") + vm.monitor.cmd("%s virtio0" % block_stream_cmd) + else: + raise error.TestError("The drive format is not supported") + + while True: + blkjobout, blkjobstatus = check_block_jobs_info() + if 'Streaming' in blkjobout.group(0): + logging.info("[(Completed bytes): %s (Total bytes): %s " + "(Speed in bytes/s): %s]", blkjobstatus[-3], + blkjobstatus[-2], blkjobstatus[-1]) + time.sleep(10) + continue + if 'No' in blkjobout.group(0): + logging.info("Block job completed") + break + + info_cmd = "%s info %s.%s" % (qemu_img,backing_file_name,image_format) + error.context("Image file can not be find") + results = utils.system_output(info_cmd) + logging.info("Infocmd output of backing file after block streaming: %s", + results) + + if re.search("backing file:", str(results)): + raise error.TestFail(" Backing file is still available in the " + "backdrive image") + # TODO + # The file size should be more/less equal to the "backing file" size + + # Shutdown the virtual machine + vm.destroy() + + # Relogin with the backup-harddrive + vm.create() + timeout = int(params.get("login_timeout", 360)) + session = vm.wait_for_login(timeout=timeout) + logging.info("Checking whether the guest with backup-harddrive boot " + "and respond after block stream completion") + error.context("checking responsiveness of guest") + session.cmd(params.get("alive_test_cmd")) + + # Finally shutdown the virtual machine + vm.destroy() + finally: + # Remove the backing file + if os.path.isfile(backing_file): + os.remove(backing_file) diff --git a/kvm/tests/cdrom.py b/kvm/tests/cdrom.py new file mode 100644 index 00000000..cb41ef07 --- /dev/null +++ b/kvm/tests/cdrom.py @@ -0,0 +1,244 @@ +""" +KVM cdrom test +@author: Amos Kong +@author: Lucas Meneghel Rodrigues +@author: Lukas Doktor +@copyright: 2011 Red Hat, Inc. +""" +import logging, re, time, os +from autotest.client.shared import error +from autotest.client import utils +from autotest.client.virt import utils_misc, aexpect, kvm_monitor + + +@error.context_aware +def run_cdrom(test, params, env): + """ + KVM cdrom test: + + 1) Boot up a VM with one iso. + 2) Check if VM identifies correctly the iso file. + 3) * If cdrom_test_autounlock is set, verifies that device is unlocked + <300s after boot + 4) Eject cdrom using monitor and change with another iso several times. + 5) * If cdrom_test_tray_status = yes, tests tray reporting. + 6) Try to format cdrom and check the return string. + 7) Mount cdrom device. + 8) Copy file from cdrom and compare files using diff. + 9) Umount and mount several times. + + @param test: kvm test object + @param params: Dictionary with the test parameters + @param env: Dictionary with test environment. + + @param cfg: workaround_eject_time - Some versions of qemu are unable to + eject CDROM directly after insert + @param cfg: cdrom_test_autounlock - Test whether guest OS unlocks cdrom + after boot (<300s after VM is booted) + @param cfg: cdrom_test_tray_status - Test tray reporting (eject and insert + CD couple of times in guest). + + @warning: Check dmesg for block device failures + """ + def master_cdroms(params): + """ Creates 'new' cdrom with one file on it """ + error.context("creating test cdrom") + os.chdir(test.tmpdir) + cdrom_cd1 = params.get("cdrom_cd1") + if not os.path.isabs(cdrom_cd1): + cdrom_cd1 = os.path.join(test.bindir, cdrom_cd1) + cdrom_dir = os.path.dirname(cdrom_cd1) + utils.run("dd if=/dev/urandom of=orig bs=10M count=1") + utils.run("dd if=/dev/urandom of=new bs=10M count=1") + utils.run("mkisofs -o %s/orig.iso orig" % cdrom_dir) + utils.run("mkisofs -o %s/new.iso new" % cdrom_dir) + return "%s/new.iso" % cdrom_dir + + def cleanup_cdroms(cdrom_dir): + """ Removes created cdrom """ + error.context("cleaning up temp cdrom images") + os.remove("%s/new.iso" % cdrom_dir) + + def get_cdrom_file(device): + """ + @param device: qemu monitor device + @return: file associated with $device device + """ + blocks = vm.monitor.info("block") + cdfile = None + if isinstance(blocks, str): + cdfile = re.findall('%s: .*file=(\S*) ' % device, blocks) + if not cdfile: + return None + else: + cdfile = cdfile[0] + else: + for block in blocks: + if block['device'] == device: + try: + cdfile = block['inserted']['file'] + except KeyError: + continue + return cdfile + + def check_cdrom_tray(cdrom): + """ Checks whether the tray is opend """ + blocks = vm.monitor.info("block") + if isinstance(blocks, str): + for block in blocks.splitlines(): + if cdrom in block: + if "tray-open=1" in block: + return True + elif "tray-open=0" in block: + return False + else: + for block in blocks: + if block['device'] == cdrom and 'tray_open' in block.keys(): + return block['tray_open'] + return None + + def eject_cdrom(device, monitor): + """ Ejects the cdrom using kvm-monitor """ + if isinstance(monitor, kvm_monitor.HumanMonitor): + monitor.cmd("eject %s" % device) + elif isinstance(monitor, kvm_monitor.QMPMonitor): + monitor.cmd("eject", args={'device': device}) + + def change_cdrom(device, target, monitor): + """ Changes the medium using kvm-monitor """ + if isinstance(monitor, kvm_monitor.HumanMonitor): + monitor.cmd("change %s %s" % (device, target)) + elif isinstance(monitor, kvm_monitor.QMPMonitor): + monitor.cmd("change", args={'device': device, 'target': target}) + + cdrom_new = master_cdroms(params) + cdrom_dir = os.path.dirname(cdrom_new) + vm = env.get_vm(params["main_vm"]) + vm.create() + + # Some versions of qemu are unable to eject CDROM directly after insert + workaround_eject_time = float(params.get('workaround_eject_time', 0)) + + session = vm.wait_for_login(timeout=int(params.get("login_timeout", 360))) + cdrom_orig = params.get("cdrom_cd1") + if not os.path.isabs(cdrom_orig): + cdrom_orig = os.path.join(test.bindir, cdrom_orig) + cdrom = cdrom_orig + output = session.get_command_output("ls /dev/cdrom*") + cdrom_dev_list = re.findall("/dev/cdrom-\w+|/dev/cdrom\d*", output) + logging.debug("cdrom_dev_list: %s", cdrom_dev_list) + + cdrom_dev = "" + test_cmd = "dd if=%s of=/dev/null bs=1 count=1" + for d in cdrom_dev_list: + try: + output = session.cmd(test_cmd % d) + cdrom_dev = d + break + except aexpect.ShellError: + logging.error(output) + if not cdrom_dev: + raise error.TestFail("Could not find a valid cdrom device") + + error.context("Detecting the existence of a cdrom") + cdfile = cdrom + device = vm.get_block({'file': cdfile}) + if not device: + device = vm.get_block({'backing_file': cdfile}) + if not device: + raise error.TestFail("Could not find a valid cdrom device") + + session.get_command_output("umount %s" % cdrom_dev) + if params.get('cdrom_test_autounlock') == 'yes': + error.context("Trying to unlock the cdrom") + if not utils_misc.wait_for(lambda: not vm.check_block_locked(device), + 300): + raise error.TestFail("Device %s could not be unlocked" % device) + + max_times = int(params.get("max_times", 100)) + error.context("Eject the cdrom in monitor %s times" % max_times) + for i in range(1, max_times): + session.cmd('eject %s' % cdrom_dev) + eject_cdrom(device, vm.monitor) + time.sleep(2) + if get_cdrom_file(device) is not None: + raise error.TestFail("Device %s was not ejected (%s)" % (cdrom, i)) + + cdrom = cdrom_new + # On even attempts, try to change the cdrom + if i % 2 == 0: + cdrom = cdrom_orig + change_cdrom(device, cdrom, vm.monitor) + if get_cdrom_file(device) != cdrom: + raise error.TestFail("It wasn't possible to change cdrom %s (%s)" + % (cdrom, i)) + time.sleep(workaround_eject_time) + + error.context('Eject the cdrom in guest %s times' % max_times) + if params.get('cdrom_test_tray_status') != 'yes': + pass + elif check_cdrom_tray(device) is None: + logging.error("Tray reporting not supported by qemu!") + logging.error("cdrom_test_tray_status skipped...") + else: + for i in range(1, max_times): + session.cmd('eject %s' % cdrom_dev) + if not check_cdrom_tray(device): + raise error.TestFail("Monitor reports closed tray (%s)" % i) + session.cmd('dd if=%s of=/dev/null count=1' % cdrom_dev) + if check_cdrom_tray(device): + raise error.TestFail("Monitor reports opened tray (%s)" % i) + time.sleep(workaround_eject_time) + + error.context("Check whether the cdrom is read-only") + try: + output = session.cmd("echo y | mkfs %s" % cdrom_dev) + raise error.TestFail("Attempt to format cdrom %s succeeded" % + cdrom_dev) + except aexpect.ShellError: + pass + + error.context("Mounting the cdrom under /mnt") + session.cmd("mount %s %s" % (cdrom_dev, "/mnt"), timeout=30) + + filename = "new" + + error.context("File copying test") + session.cmd("rm -f /tmp/%s" % filename) + session.cmd("cp -f /mnt/%s /tmp/" % filename) + + error.context("Compare file on disk and on cdrom") + f1_hash = session.cmd("md5sum /mnt/%s" % filename).split()[0].strip() + f2_hash = session.cmd("md5sum /tmp/%s" % filename).split()[0].strip() + if f1_hash != f2_hash: + raise error.TestFail("On disk and on cdrom files are different, " + "md5 mismatch") + + error.context("Mount/Unmount cdrom for %s times" % max_times) + for i in range(1, max_times): + try: + session.cmd("umount %s" % cdrom_dev) + session.cmd("mount %s /mnt" % cdrom_dev) + except aexpect.ShellError: + logging.debug(session.cmd("cat /etc/mtab")) + raise + + session.cmd("umount %s" % cdrom_dev) + + error.context("Cleanup") + # Return the cdrom_orig + cdfile = get_cdrom_file(device) + if cdfile != cdrom_orig: + time.sleep(workaround_eject_time) + session.cmd('eject %s' % cdrom_dev) + eject_cdrom(device, vm.monitor) + if get_cdrom_file(device) is not None: + raise error.TestFail("Device %s was not ejected (%s)" % (cdrom, i)) + + change_cdrom(device, cdrom_orig, vm.monitor) + if get_cdrom_file(device) != cdrom_orig: + raise error.TestFail("It wasn't possible to change cdrom %s (%s)" + % (cdrom, i)) + + session.close() + cleanup_cdroms(cdrom_dir) diff --git a/kvm/tests/cgroup.py b/kvm/tests/cgroup.py new file mode 100644 index 00000000..c2ce65db --- /dev/null +++ b/kvm/tests/cgroup.py @@ -0,0 +1,2009 @@ +""" +cgroup autotest test (on KVM guest) +@author: Lukas Doktor +@copyright: 2011 Red Hat, Inc. +""" +import logging, os, re, time +from autotest.client.shared import error +from autotest.client import utils +from autotest.client.tests.cgroup.cgroup_common import Cgroup +from autotest.client.tests.cgroup.cgroup_common import CgroupModules +from autotest.client.tests.cgroup.cgroup_common import get_load_per_cpu +from autotest.client.virt.env_process import preprocess +from autotest.client.virt import kvm_monitor +from autotest.client.virt.aexpect import ExpectTimeoutError +from autotest.client.virt.aexpect import ExpectProcessTerminatedError +from autotest.client.virt.aexpect import ShellTimeoutError + + +@error.context_aware +def run_cgroup(test, params, env): + """ + Tests the cgroup functions on KVM guests. + """ + # Func + def assign_vm_into_cgroup(vm, cgroup, pwd=None): + """ + Assigns all threads of VM into cgroup + @param vm: desired VM + @param cgroup: cgroup handler + @param pwd: desired cgroup's pwd, cgroup index or None for root cgroup + """ + cgroup.set_cgroup(vm.get_shell_pid(), pwd) + for i in range(10): + for pid in utils.get_children_pids(vm.get_shell_pid()): + try: + cgroup.set_cgroup(int(pid), pwd) + except Exception, detail: # Process might not already exist + if os.path.exists("/proc/%s/" % pid): + raise detail + else: # Thread doesn't exist, try it again + break + else: # All PIDs moved + break + else: + raise error.TestFail("Failed to move all VM threads to new cgroup" + " in %d trials" % i) + + def distance(actual, reference): + """ + Absolute value of relative distance of two numbers + @param actual: actual value + @param reference: reference value + @return: relative distance abs((a-r)/r) (float) + """ + return abs(float(actual - reference) / reference) + + def get_dd_cmd(direction, dev=None, count=None, blocksize=None): + """ + Generates dd_cmd string + @param direction: {read,write,bi} dd direction + @param dev: used device ('vd?') + @param count: count parameter of dd + @param blocksize: blocksize parameter of dd + @return: dd command string + """ + if dev is None: + if get_device_driver() == "virtio": + dev = 'vd?' + else: + dev = '[sh]d?' + if direction == "read": + params = "if=$FILE of=/dev/null iflag=direct" + elif direction == "write": + params = "if=/dev/zero of=$FILE oflag=direct" + else: + params = "if=$FILE of=$FILE iflag=direct oflag=direct" + if blocksize: + params += " bs=%s" % (blocksize) + if count: + params += " count=%s" % (count) + return ("export FILE=$(ls /dev/%s | tail -n 1); touch /tmp/cgroup_lock" + " ; while [ -e /tmp/cgroup_lock ]; do dd %s ; done" + % (dev, params)) + + def get_device_driver(): + """ + Discovers the used block device driver {ide, scsi, virtio_blk} + @return: Used block device driver {ide, scsi, virtio} + """ + return params.get('drive_format', 'virtio') + + def get_maj_min(dev): + """ + Returns the major and minor numbers of the dev device + @return: Tuple(major, minor) numbers of the dev device + """ + try: + rdev = os.stat(dev).st_rdev + ret = (os.major(rdev), os.minor(rdev)) + except Exception, details: + raise error.TestFail("get_maj_min(%s) failed: %s" % + (dev, details)) + return ret + + def rm_scsi_disks(no_disks): + """ + Removes no_disks scsi_debug disks from the last one. + @param no_disks: How many disks to remove + @note: params['cgroup_rmmod_scsi_debug'] == "yes" => rmmod scsi_debug + """ + utils.system("echo -%d > /sys/bus/pseudo/drivers/scsi_debug/add_host" + % no_disks) + + if params.get('cgroup_rmmod_scsi_debug', "no") == "yes": + utils.system("rmmod scsi_debug") + + def param_add_scsi_disks(prefix="scsi-debug-"): + """ + Adds scsi_debug disk to every VM in params['vms'] + @param prefix: adds prefix to drive name + """ + if utils.system("lsmod | grep scsi_debug", ignore_status=True): + utils.system("modprobe scsi_debug dev_size_mb=8 add_host=0") + for name in params.get('vms').split(' '): + disk_name = prefix + name + utils.system("echo 1 >/sys/bus/pseudo/drivers/scsi_debug/add_host") + time.sleep(1) # Wait for device init + dev = utils.system_output("ls /dev/sd* | tail -n 1") + # Enable idling in scsi_debug drive + utils.system("echo 1 > /sys/block/%s/queue/rotational" + % (dev.split('/')[-1])) + vm_disks = params.get('images_%s' % name, + params.get('images', 'image1')) + params['images_%s' % name] = "%s %s" % (vm_disks, disk_name) + params['image_name_%s' % disk_name] = dev + params['image_snapshot_%s' % disk_name] = "no" + params['image_format_%s' % disk_name] = "raw" + params['remove_image_%s' % disk_name] = "no" + params['image_raw_device_%s' % disk_name] = "yes" + + def param_add_file_disks(size, prefix="hd2-"): + """ + Adds file disk to every VM in params['vms'] + @param size: Disk size (1M) + @param prefix: adds prefix to drive name + """ + for name in params.get('vms').split(' '): + vm_disks = params.get('images_%s' % name, + params.get('images', 'image1')) + disk_name = prefix + name + params['images_%s' % name] = "%s %s" % (vm_disks, disk_name) + params['image_size_%s' % disk_name] = size + params['image_name_%s' % disk_name] = disk_name + params['image_snapshot_%s' % disk_name] = "no" + params['force_create_image_%s' % disk_name] = "yes" + params['image_format_%s' % disk_name] = "raw" + params['create_with_dd_%s' % disk_name] = "yes" + params['remove_image_%s' % disk_name] = "yes" + + def param_add_vms(no_vms): + """ + Defines $no_vms in params + @param no_vms: Desired number of VMs + @note: All defined VMs are overwritten. + """ + params['vms'] = "" + for i in range(no_vms): + params['vms'] += "vm%s " % i + params['vms'] = params['vms'][:-1] + + # Tests + @error.context_aware + def blkio_bandwidth(): + """ + Sets blkio.weight for each VM and measure the actual distribution + of read/write speeds. + @note: VMs are created in test + @param cfg: cgroup_test_time - test duration '60' + @param cfg: cgroup_weights - list of R/W weights '[100, 1000]' + @param cfg: cgroup_limit{ ,_read,_write} - allowed R/W threshold '0.1' + """ + def _test(direction): + """ + Executes loop of dd commands, kills it after $test_time and + verifies the speeds using median. + @param direction: "read" / "write" + @return: "" on success or err message when fails + """ + out = [] + # Initiate dd loop on all VMs (2 sessions per VM) + dd_cmd = get_dd_cmd(direction, blocksize="100K") + for i in range(no_vms): + sessions[i * 2].sendline(dd_cmd) + time.sleep(test_time) + for i in range(no_vms): + # Force stats in case no dd cmd finished + sessions[i * 2 + 1].sendline(stat_cmd) + for i in range(no_vms): + out.append(sessions[i * 2].read_until_output_matches( + [re_dd])[1]) + # Stop all transfers (on 2nd sessions) + for i in range(no_vms): + sessions[i * 2 + 1].sendline(kill_cmd) + # Read the rest of the stats + for i in range(no_vms): + out[-1] = out[-1] + sessions[i * 2].read_up_to_prompt( + timeout=120 + test_time) + + for i in range(no_vms): + # Get all dd loops' statistics + # calculate avg from duration and data + duration = 0 + data = 0 + if len(out[i]) > 5: + out[i] = out[i][1:-1] + for _ in re.findall(re_dd, out[i])[1:-1]: + data += int(_[0]) + duration += float(_[1]) + out[i] = int(data / duration) + + # normalize each output according to cgroup_weights + # Calculate the averages from medians / weights + sum_out = float(sum(out)) + sum_weights = float(sum(weights)) + for i in range(len(weights)): + # [status, norm_weights, norm_out, actual] + out[i] = ['PASS', weights[i] / sum_weights, out[i] / sum_out, + out[i]] + + err = "" + limit = float(params.get('cgroup_limit_%s' % direction, + params.get('cgroup_limit', 0.1))) + # if any of norm_output doesn't ~ match norm_weights, log it. + for i in range(len(out)): + if (out[i][2] > (out[i][1] + limit) + or out[i][2] < (out[i][1] - limit)): + out[i][0] = 'FAIL' + err += "%d, " % i + + logging.info("blkio_bandwidth_%s: dd statistics\n%s", direction, + utils.matrix_to_string(out, ['status', 'norm_weights', + 'norm_out', 'actual'])) + + if err: + err = ("blkio_bandwidth_%s: limits [%s] were broken" + % (direction, err[:-2])) + logging.debug(err) + return err + '\n' + return "" + + error.context("Init") + try: + weights = eval(params.get('cgroup_weights', "[100, 1000]")) + if type(weights) is not list: + raise TypeError + except TypeError: + raise error.TestError("Incorrect configuration: param " + "cgroup_weights have to be list-like string '[1, 2]'") + test_time = int(params.get("cgroup_test_time", 60)) + error.context("Prepare VMs") + # Prepare enough VMs each with 1 disk for testing + no_vms = len(weights) + param_add_vms(no_vms) + param_add_file_disks("1M") + preprocess(test, params, env) + + vms = [] + sessions = [] # 2 sessions per VM + timeout = int(params.get("login_timeout", 360)) + for name in params['vms'].split(): + vms.append(env.get_vm(name)) + sessions.append(vms[-1].wait_for_login(timeout=timeout)) + sessions.append(vms[-1].wait_for_login(timeout=30)) + + error.context("Setup test") + modules = CgroupModules() + if (modules.init(['blkio']) != 1): + raise error.TestFail("Can't mount blkio cgroup modules") + blkio = Cgroup('blkio', '') + blkio.initialize(modules) + for i in range(no_vms): + blkio.mk_cgroup() + assign_vm_into_cgroup(vms[i], blkio, i) + blkio.set_property("blkio.weight", weights[i], i) + + # Fails only when the session is occupied (Timeout) + # ; true is necessarily when there is no dd present at the time + kill_cmd = "rm -f /tmp/cgroup_lock; killall -9 dd; true" + stat_cmd = "killall -SIGUSR1 dd; true" + re_dd = (r'(\d+) bytes \(\d+\.*\d* \w*\) copied, (\d+\.*\d*) s, ' + '\d+\.*\d* \w./s') + err = "" + try: + error.context("Read test") + err += _test("read") + # verify sessions between tests + for session in sessions: + session.cmd("true") + error.context("Write test") + err += _test("write") + if err: + logging.error("Results:\n" + err) + else: + logging.info("Speeds distributed accordingly to blkio.weight.") + + finally: + error.context("Cleanup") + for i in range(no_vms): + # stop all workers + sessions[i * 2 + 1].sendline(kill_cmd) + for session in sessions: + # try whether all sessions are clean + session.cmd("true") + session.close() + + del(blkio) + del(modules) + + for i in range(len(vms)): + vms[i].destroy() + + error.context("Results") + if err: + raise error.TestFail(err) + else: + return "Speeds distributed accordingly to blkio.weight." + + @error.context_aware + def blkio_throttle(): + """ + Tests the blkio.throttle.{read,write}_bps_device cgroup capability. + It sets speeds accordingly to current scenario and let it run for + $test_time seconds. Afterwards it verifies whether the speeds matches. + @note: VMs are created in test + @note: Uses scsi_debug disks + @param cfg: cgroup_test_time - test duration '60' + @param cfg: cgroup_limit{ ,_read,_write} - allowed R/W threshold '0.1' + @param cfg: cgroup_speeds list of simultaneous speeds + [speed1, speed2,..] '[1024]' + """ + error.context("Init") + try: + speeds = eval(params.get('cgroup_speeds', "[1024]")) + if type(speeds) is not list: + raise TypeError + except TypeError: + raise error.TestError("Incorrect configuration: param " + "cgroup_speeds have to be list of strings" + "eg. [1024] or [1024,2048,8192].") + + # Make param suitable for multitest and execute it. + return blkio_throttle_multi([[_] for _ in speeds]) + + @error.context_aware + def blkio_throttle_multi(speeds=None): + """ + Tests the blkio.throttle.{read,write}_bps_device cgroup capability. + It sets speeds accordingly to current scenario and let it run for + $test_time seconds. Afterwards it verifies whether the speeds matches. + All scenarios have to have the same number of speeds (= no_vms). + @note: VMs are created in test + @note: Uses scsi_debug disks + @param cfg: cgroup_test_time - test duration '60' + @param cfg: cgroup_limit{ ,_read,_write} - allowed R/W threshold '0.1' + @param cfg: cgroup_speeds list of lists defining [[vm1],[vm2],..]] + and speeds [[speed1],[speed2],..],..]. + '[[1024,0,2048,0,8192]]' + """ + def _test(direction, blkio): + """ + Executes loop of small dd transfers changes cgroups and measures + speeds. + @param direction: "read" / "write" + @return: "" on success or err message when fails + """ + # Test + dd_cmd = get_dd_cmd(direction) + limit = float(params.get('cgroup_limit_%s' % direction, + params.get('cgroup_limit', 0.1))) + # every scenario have list of results [[][][]] + out = [] + # every VM have one output [] + for i in range(no_vms): + out.append([]) + sessions[i * 2].sendline(dd_cmd) + for j in range(no_speeds): + _ = "" + for i in range(no_vms): + # assign all VMs to current scenario cgroup + assign_vm_into_cgroup(vms[i], blkio, i * no_speeds + j) + _ += "vm%d:%d, " % (i, speeds[i][j]) + logging.debug("blkio_throttle_%s: Current speeds: %s", + direction, _[:-2]) + time.sleep(test_time) + # Read stats + for i in range(no_vms): + # Force stats in case no dd cmd finished + sessions[i * 2 + 1].sendline(stat_cmd) + for i in range(no_vms): + out[i].append(sessions[i * 2].read_until_output_matches( + [re_dd])[1]) + # Stop all transfers (on 2nd sessions) + for i in range(no_vms): + sessions[i * 2 + 1].sendline(kill_cmd) + # Read the rest of the stats + for i in range(no_vms): + out[i][-1] = (out[i][-1] + + sessions[i * 2].read_up_to_prompt( + timeout=120 + test_time)) + # Restart all transfers (on 1st sessions) + for i in range(no_vms): + sessions[i * 2].sendline(dd_cmd) + + # bash needs some time... + time.sleep(1) + for i in range(no_vms): + sessions[i * 2 + 1].sendline(kill_cmd) + + # Verification + err = "" + # [PASS/FAIL, iteration, vm, speed, actual] + output = [] + for j in range(len(out[i])): + for i in range(no_vms): + # calculate avg from duration and data + duration = 0 + data = 0 + for _ in re.findall(re_dd, out[i][j]): + data += int(_[0]) + duration += float(_[1]) + output.append(['PASS', j, 'vm%d' % i, speeds[i][j], + int(data / duration)]) + # Don't meassure unlimited speeds + if (speeds[i][j] == 0): + output[-1][0] = "INF" + output[-1][3] = "(inf)" + elif distance(output[-1][4], speeds[i][j]) > limit: + err += "vm%d:%d, " % (i, j) + output[-1][0] = "FAIL" + + # TODO: Unlimited speed fluctates during test + logging.info("blkio_throttle_%s: dd statistics\n%s", direction, + utils.matrix_to_string(output, ['result', 'it', + 'vm', 'speed', 'actual'])) + if err: + err = ("blkio_throttle_%s: limits [%s] were broken" + % (direction, err[:-2])) + logging.debug(err) + return err + '\n' + return "" + + error.context("Init") + no_speeds = 0 + if speeds: # blkio_throttle + no_speeds = len(speeds[0]) + else: # blkio_throttle_multi + try: + speeds = eval(params.get('cgroup_speeds', + "[[1024,0,2048,0,8192]]")) + if type(speeds) is not list: + raise TypeError + if type(speeds[0]) is not list: + logging.warn("cgroup_speeds have to be listOfLists") + speeds = [speeds] + no_speeds = len(speeds[0]) + for speed in speeds: + if type(speed) is not list: + logging.error("One of cgroup_speeds sublists is not " + "list") + raise TypeError + if len(speed) != no_speeds: + logging.error("cgroup_speeds sublists have different " + "lengths") + raise TypeError + except TypeError: + raise error.TestError("Incorrect configuration: param " + "cgroup_speeds have to be listOfList-" + "like string with same lengths. " + "([[1024]] or [[0,1024],[1024,2048]])") + # Minimum testing time is 30s (dd must copy few blocks) + test_time = max(int(params.get("cgroup_test_time", 60)) / no_speeds, + 30) + + error.context("Prepare VMs") + # create enough of VMs with scsi_debug attached disks + no_vms = len(speeds) + param_add_vms(no_vms) + param_add_scsi_disks() + preprocess(test, params, env) + + vms = [] + sessions = [] + timeout = int(params.get("login_timeout", 360)) + # 2 sessions per VM + for name in params['vms'].split(): + vms.append(env.get_vm(name)) + sessions.append(vms[-1].wait_for_login(timeout=timeout)) + sessions.append(vms[-1].wait_for_login(timeout=30)) + + error.context("Setup test") + modules = CgroupModules() + if (modules.init(['blkio']) != 1): + raise error.TestFail("Can't mount blkio cgroup modules") + blkio = Cgroup('blkio', '') + blkio.initialize(modules) + for i in range(no_vms): + # Set speeds for each scsi_debug device for each VM + dev = get_maj_min(params['image_name_scsi-debug-%s' % vms[i].name]) + for j in range(no_speeds): + speed = speeds[i][j] + blkio.mk_cgroup() + if speed == 0: # Disable limit (removes the limit) + blkio.set_property("blkio.throttle.write_bps_device", + "%s:%s %s" % (dev[0], dev[1], speed), + i * no_speeds + j, check="") + blkio.set_property("blkio.throttle.read_bps_device", + "%s:%s %s" % (dev[0], dev[1], speed), + i * no_speeds + j, check="") + else: # Enable limit (input separator ' ', output '\t') + blkio.set_property("blkio.throttle.write_bps_device", + "%s:%s %s" % (dev[0], dev[1], speed), + i * no_speeds + j, check="%s:%s\t%s" + % (dev[0], dev[1], speed)) + blkio.set_property("blkio.throttle.read_bps_device", + "%s:%s %s" % (dev[0], dev[1], speed), + i * no_speeds + j, check="%s:%s\t%s" + % (dev[0], dev[1], speed)) + + # ; true is necessarily when there is no dd present at the time + kill_cmd = "rm -f /tmp/cgroup_lock; killall -9 dd; true" + stat_cmd = "killall -SIGUSR1 dd; true" + re_dd = (r'(\d+) bytes \(\d+\.*\d* \w*\) copied, (\d+\.*\d*) s, ' + '\d+\.*\d* \w./s') + err = "" + try: + error.context("Read test") + err += _test("read", blkio) + # verify sessions between tests + for session in sessions: + session.cmd("true") + error.context("Write test") + err += _test("write", blkio) + + if err: + logging.error("Results\n" + err) + + finally: + error.context("Cleanup") + for i in range(no_vms): + # stop all workers + sessions[i * 2 + 1].sendline(kill_cmd) + + del(blkio) + del(modules) + + for session in sessions: + # try whether all sessions are clean + session.cmd("true") + session.close() + + for i in range(len(vms)): + vms[i].destroy() + + rm_scsi_disks(no_vms) + + error.context("Results") + if err: + raise error.TestFail(err) + else: + return "Throughputs matched the prescriptions." + + @error.context_aware + def cpu_cfs_util(): + """ + Tests cfs scheduler utilisation when cfs_period_us and cfs_quota_us + are set for each virtual CPU with multiple VMs. + Each VM have double the previous created one (1, 2, 4, 8..) upto + twice physical CPUs overcommit. cfs quotas are set to 1/2 thus VMs + should consume exactly 100%. It measures the difference. + @note: VMs are created in test + @param cfg: cgroup_test_time - test duration '60' + @param cfg: cgroup_limit - allowed threshold '0.05' (5%) + """ + error.context("Setup test") + modules = CgroupModules() + if (modules.init(['cpu']) != 1): + raise error.TestFail("Can't mount cpu cgroup modules") + cgroup = Cgroup('cpu', '') + cgroup.initialize(modules) + host_cpus = open('/proc/cpuinfo').read().count('model name') + + # Create first VM + params['smp'] = 1 + params['vms'] = "vm0" + preprocess(test, params, env) + + error.context("Prepare VMs") + vms = [] + sessions = [] + serials = [] + timeout = 1.5 * int(params.get("login_timeout", 360)) + # First one + vms.append(env.get_all_vms()[0]) + cpu_pids = vms[0].get_vcpu_pids() + smp = len(cpu_pids) + cgroup.mk_cgroup() + cgroup.set_property("cpu.cfs_period_us", 100000, 0) + cgroup.set_property("cpu.cfs_quota_us", 50000 * smp, 0) + assign_vm_into_cgroup(vms[0], cgroup, 0) + for j in range(smp): + cgroup.mk_cgroup(0) + cgroup.set_property("cpu.cfs_period_us", 100000, -1) + cgroup.set_property("cpu.cfs_quota_us", 50000, -1) + cgroup.set_cgroup(cpu_pids[j], -1) + sessions.append(vms[0].wait_for_login(timeout=timeout)) + serials.append(vms[0].wait_for_serial_login(timeout=30)) + serials[0].cmd("touch /tmp/cgroup-cpu-lock") + vm_cpus = smp + + # Clone the first one with different 'smp' setting + _params = params + i = 1 + while vm_cpus < 2 * host_cpus: + vm_name = "clone%d" % i + smp = min(2 * smp, 2 * host_cpus - vm_cpus) + _params['smp'] = smp + vms.append(vms[0].clone(vm_name, _params)) + env.register_vm(vm_name, vms[-1]) + vms[-1].create() + pwd = cgroup.mk_cgroup() + cgroup.set_property("cpu.cfs_period_us", 100000, -1) + # Total quota is for ALL vCPUs + cgroup.set_property("cpu.cfs_quota_us", 50000 * smp, -1) + assign_vm_into_cgroup(vms[-1], cgroup, -1) + cpu_pids = vms[-1].get_vcpu_pids() + for j in range(smp): + cgroup.mk_cgroup(pwd) + cgroup.set_property("cpu.cfs_period_us", 100000, -1) + # Quota for current vcpu + cgroup.set_property("cpu.cfs_quota_us", 50000, -1) + cgroup.set_cgroup(cpu_pids[j], -1) + sessions.append(vms[-1].wait_for_login(timeout=timeout)) + serials.append(vms[-1].wait_for_serial_login(timeout=30)) + serials[-1].cmd("touch /tmp/cgroup-cpu-lock") + vm_cpus += smp + i += 1 + + cmd = "renice -n 10 $$; " + cmd += "while [ -e /tmp/cgroup-cpu-lock ] ; do :; done" + kill_cmd = 'rm -f /tmp/cgroup-cpu-lock' + + stats = [] + # test_time is 1s stabilization, 1s first meass., 9s second and the + # rest of cgroup_test_time as 3rd meassurement. + test_time = max(1, int(params.get('cgroup_test_time', 60)) - 11) + err = "" + try: + error.context("Test") + for session in sessions: + session.sendline(cmd) + + time.sleep(1) + stats.append(open('/proc/stat', 'r').readline()) + time.sleep(1) + stats.append(open('/proc/stat', 'r').readline()) + time.sleep(9) + stats.append(open('/proc/stat', 'r').readline()) + time.sleep(test_time) + stats.append(open('/proc/stat', 'r').readline()) + for session in serials: + session.sendline('rm -f /tmp/cgroup-cpu-lock') + + # /proc/stat first line is cumulative CPU usage + # 1-8 are host times, 8-9 are guest times (on older kernels only 8) + error.context("Verification") + # Start of the test (time 0) + stats[0] = [int(_) for _ in stats[0].split()[1:]] + stats[0] = [sum(stats[0][0:8]), sum(stats[0][8:])] + # Calculate relative stats from time 0 + for i in range(1, len(stats)): + stats[i] = [int(_) for _ in stats[i].split()[1:]] + try: + stats[i] = (float(sum(stats[i][8:]) - stats[0][1]) / + (sum(stats[i][0:8]) - stats[0][0])) + except ZeroDivisionError: + logging.error("ZeroDivisionError in stats calculation") + stats[i] = False + + limit = 1 - float(params.get("cgroup_limit", 0.05)) + for i in range(1, len(stats)): + # Utilisation should be 100% - allowed treshold (limit) + if stats[i] < (100 - limit): + logging.debug("%d: guest time is not >%s%% %s" % (i, limit, + stats[i])) + + if err: + err = "Guest time is not >%s%% %s" % (limit, stats[1:]) + logging.error(err) + logging.info("Guest times are over %s%%: %s", limit, stats[1:]) + else: + logging.info("CFS utilisation was over %s", limit) + + finally: + error.context("Cleanup") + del(cgroup) + del(modules) + + for i in range(len(serials)): + # stop all workers + serials[i].sendline(kill_cmd) + for session in sessions: + # try whether all sessions are clean + session.cmd("true") + session.close() + + for i in range(1, len(vms)): + vms[i].destroy() + + error.context("Results") + if err: + raise error.TestFail(err) + else: + return "Guest times are over %s%%: %s" % (limit, stats[1:]) + + @error.context_aware + def cpu_share(): + """ + Sets cpu.share shares for different VMs and measure the actual + utilisation distribution over physical CPUs + @param cfg: cgroup_use_max_smp - use smp = all_host_cpus + @param cfg: cgroup_test_time - test duration '60' + @param cfg: smp - no_vcpus per VM. When smp <= 0 .. smp = no_host_cpus + @param cfg: cgroup_speeds - list of speeds of each vms [vm0, vm1,..]. + List is sorted in test! '[10000, 100000]' + """ + def _get_stat(f_stats, _stats=None): + """ Reads CPU times from f_stats[] files and sumarize them. """ + if _stats is None: + _stats = [] + for i in range(len(f_stats)): + _stats.append(0) + stats = [] + for i in range(len(f_stats)): + f_stats[i].seek(0) + stats.append(f_stats[i].read().split()[13:17]) + stats[i] = sum([int(_) for _ in stats[i]]) - _stats[i] + return stats + + error.context("Init") + try: + speeds = eval(params.get('cgroup_speeds', '[10000, 100000]')) + if type(speeds) is not list: + raise TypeError + except TypeError: + raise error.TestError("Incorrect configuration: param " + "cgroup_speeds have to be list-like string '[1, 2]'") + + host_cpus = open('/proc/cpuinfo').read().count('model name') + # when smp <= 0 use smp = no_host_cpus + vm_cpus = int(params.get('smp', 0)) # cpus per VM + # Use smp = no_host_cpu + if vm_cpus <= 0 or params.get('cgroup_use_max_smp') == "yes": + params['smp'] = host_cpus + vm_cpus = host_cpus + no_speeds = len(speeds) + # All host_cpus have to be used with no_speeds overcommit + no_vms = host_cpus * no_speeds / vm_cpus + no_threads = no_vms * vm_cpus + sessions = [] + serials = [] + modules = CgroupModules() + if (modules.init(['cpu']) != 1): + raise error.TestFail("Can't mount cpu cgroup modules") + cgroup = Cgroup('cpu', '') + cgroup.initialize(modules) + + error.context("Prepare VMs") + param_add_vms(no_vms) + preprocess(test, params, env) + + # session connections are spread vm1, vm2, vm3, ... With more vcpus + # the second round is similar after the whole round (vm1, vm2, vm1, ..) + # vms are spread into cgroups vm1=cg1, vm2=cg2, vm3=cg3 // % no_cgroup + # when we go incrementally through sessions we got always different cg + vms = env.get_all_vms() + timeout = 1.5 * int(params.get("login_timeout", 360)) + for i in range(no_threads): + sessions.append(vms[i % no_vms].wait_for_login(timeout=timeout)) + + for i in range(no_speeds): + cgroup.mk_cgroup() + cgroup.set_property('cpu.shares', speeds[i], i) + for i in range(no_vms): + assign_vm_into_cgroup(vms[i], cgroup, i % no_speeds) + sessions[i].cmd("touch /tmp/cgroup-cpu-lock") + serials.append(vms[i].wait_for_serial_login(timeout=30)) + + error.context("Test") + try: + f_stats = [] + err = [] + # Time 0 + for vm in vms: + f_stats.append(open("/proc/%d/stat" % vm.get_pid(), 'r')) + + time_init = 2 + # there are 6 tests + time_test = max(int(params.get("cgroup_test_time", 60)) / 6, 5) + thread_count = 0 # actual thread number + stats = [] + cmd = "renice -n 10 $$; " # new ssh login should pass + cmd += "while [ -e /tmp/cgroup-cpu-lock ]; do :; done" + # Occupy all host_cpus with 1 task (no overcommit) + for thread_count in range(0, host_cpus): + sessions[thread_count].sendline(cmd) + time.sleep(time_init) + _stats = _get_stat(f_stats) + time.sleep(time_test) + stats.append(_get_stat(f_stats, _stats)) + + # Overcommit on 1 cpu + thread_count += 1 + sessions[thread_count].sendline(cmd) + time.sleep(time_init) + _stats = _get_stat(f_stats) + time.sleep(time_test) + stats.append(_get_stat(f_stats, _stats)) + + # no_speeds overcommit on all CPUs + for i in range(thread_count + 1, no_threads): + sessions[i].sendline(cmd) + time.sleep(time_init) + _stats = _get_stat(f_stats) + for j in range(3): + __stats = _get_stat(f_stats) + time.sleep(time_test) + stats.append(_get_stat(f_stats, __stats)) + stats.append(_get_stat(f_stats, _stats)) + + # Verify results + err = "" + # accumulate stats from each cgroup + for j in range(len(stats)): + for i in range(no_speeds, len(stats[j])): + stats[j][i % no_speeds] += stats[j][i] + stats[j] = stats[j][:no_speeds] + # I. + i = 0 + # only first #host_cpus guests were running + dist = distance(min(stats[i][:host_cpus]), + max(stats[i][:host_cpus])) + # less vms, lower limit. Maximal limit is 0.2 + if dist > min(0.10 + 0.01 * len(vms), 0.2): + err += "1, " + logging.error("1st part's limits broken. Utilisation should be" + " equal. stats = %s, distance = %s", stats[i], + dist) + else: + logging.info("1st part's distance = %s", dist) + # II. + i += 1 + dist = distance(min(stats[i]), max(stats[i])) + if host_cpus % no_speeds == 0 and no_speeds <= host_cpus: + if dist > min(0.10 + 0.01 * len(vms), 0.2): + err += "2, " + logging.error("2nd part's limits broken, Utilisation " + "should be equal. stats = %s, distance = %s", + stats[i], dist) + else: + logging.info("2nd part's distance = %s", dist) + else: + logging.warn("2nd part's verification skipped (#cgroup,#cpu)," + " stats = %s,distance = %s", stats[i], dist) + + # III. + # normalize stats, then they should have equal values + i += 1 + for i in range(i, len(stats)): + norm_stats = [float(stats[i][_]) / speeds[_] + for _ in range(len(stats[i]))] + dist = distance(min(norm_stats), max(norm_stats)) + if dist > min(0.10 + 0.02 * len(vms), 0.25): + err += "3, " + logging.error("3rd part's limits broken; utilisation " + "should be in accordance to self.speeds. " + "stats=%s, norm_stats=%s, distance=%s, " + "speeds=%s,it=%d", stats[i], norm_stats, + dist, speeds, i - 1) + else: + logging.info("3rd part's norm_dist = %s", dist) + + if err: + err = "[%s] parts broke their limits" % err[:-2] + logging.error(err) + else: + logging.info("Cpu utilisation enforced successfully") + + finally: + error.context("Cleanup") + del(cgroup) + + for i in range(len(serials)): + # stop all workers + serials[i].sendline("rm -f /tmp/cgroup-cpu-lock") + for session in sessions: + # try whether all sessions are clean + session.cmd("true") + session.close() + + for i in range(len(vms)): + vms[i].destroy() + + del(modules) + + error.context("Results") + if err: + raise error.TestFail(err) + else: + return ("Cpu utilisation enforced succesfully") + + @error.context_aware + def cpuset_cpus(): + """ + Pins main_thread and each vcpu acoordingly to scenario setup + and measures physical CPU utilisation. + When nothing is set the test uses smp vcpus. When cgroup_cpuset is + specified it forces smp to fit cpuset prescription. Last but not least + you can force the test to use half of the host cpus. + @warning: Default verification method assumes 100% utilisation on each + used CPU. You can force cgroup_verify results. + @param cfg: cgroup_use_half_smp - force smp = no_host_cpus / 2 + @param cfg: cgroup_test_time - scenerio duration '1' + @param cfg: cgroup_limit - allowed threshold '0.05' (5%) + @params cfg: cgroup_cpuset - list of lists defining cpu pinning. + [[1st_scenario],[2nd_scenario], ...] + [[main_thread, vcpu0, vcpu1, ...], ...] + eg. [[None, '0,3', '1', '2', '1-2'], ['0', '0', '1'.....]] + 'by default 5 specific scenarios' + @params cfg: cgroup_verify - list of lists defining verification + physical CPUs utilisations + [[1st_scenario],[2nd_scenario], ...] + [[cpu0_util,cpu1_util,...], ...] + eg. [[50, 100, 100, 50], [100, 100, 0, 0]] + 'by default it assumes each used CPU will be 100% + utilised' + """ + def _generate_cpusets(vm_cpus, no_cpus): + """ + Generates 5 cpusets scenerios + @param vm_cpus: number of virtual CPUs + @param no_cpus: number of physical CPUs + """ + cpusets = [] + # OO__ + if no_cpus > vm_cpus: + cpuset = '0-%d' % (vm_cpus - 1) + # all cpus + main_thread + cpusets.append([cpuset for _ in range(no_cpus + 1)]) + # __OO + if no_cpus > vm_cpus: + cpuset = '%d-%d' % (no_cpus - vm_cpus - 1, no_cpus - 1) + cpusets.append([cpuset for _ in range(no_cpus + 1)]) + # O___ + cpusets.append(['0' for _ in range(no_cpus + 1)]) + # _OO_ + if no_cpus == 2: + cpuset = '1' + else: + cpuset = '1-%d' % min(no_cpus, vm_cpus - 1) + cpusets.append([cpuset for _ in range(no_cpus + 1)]) + # O_O_ + cpuset = '0' + for i in range(1, min(vm_cpus, (no_cpus / 2))): + cpuset += ',%d' % (i * 2) + cpusets.append([cpuset for i in range(no_cpus + 1)]) + return cpusets + + def _generate_verification(cpusets, no_cpus): + """ + Calculates verification data. + @warning: Inaccurate method, every pinned CPU have to have 100% + utilisation! + @param cpusets: cpusets scenarios + @param no_cpus: number of physical CPUs + """ + verify = [] + # For every scenerio + for cpuset in cpusets: + verify.append([0 for _ in range(no_cpus)]) + # For every vcpu (skip main_thread, it doesn't consume much) + for vcpu in cpuset[1:]: + vcpu.split(',') + # Get all usable CPUs for this vcpu + for vcpu_pin in vcpu.split(','): + _ = vcpu_pin.split('-') + if len(_) == 2: + # Range of CPUs + for cpu in range(int(_[0]), int(_[1]) + 1): + verify[-1][cpu] = 100 + else: + # Single CPU + verify[-1][int(_[0])] = 100 + return verify + + error.context("Init") + cpusets = None + verify = None + try: + cpusets = eval(params.get("cgroup_cpuset", "None")) + if not ((type(cpusets) is list) or (cpusets is None)): + raise Exception + except Exception: + raise error.TestError("Incorrect configuration: param cgroup_" + "cpuset have to be list of lists, where " + "all sublist have the same length and " + "the length is ('smp' + 1). Or 'None' for " + "default.\n%s" % cpusets) + try: + verify = eval(params.get("cgroup_verify", "None")) + if not ((type(cpusets) is list) or (cpusets is None)): + raise Exception + except Exception: + raise error.TestError("Incorrect configuration: param cgroup_" + "verify have to be list of lists or 'None' " + "for default/automatic.\n%s" % verify) + + limit = float(params.get("cgroup_limit", 0.05)) * 100 + + test_time = int(params.get("cgroup_test_time", 1)) + + vm = env.get_all_vms()[0] + modules = CgroupModules() + if (modules.init(['cpuset']) != 1): + raise error.TestFail("Can't mount cpu cgroup modules") + cgroup = Cgroup('cpuset', '') + cgroup.initialize(modules) + + all_cpus = cgroup.get_property("cpuset.cpus")[0] + all_mems = cgroup.get_property("cpuset.mems")[0] + + # parse all available host_cpus from cgroups + try: + no_cpus = int(all_cpus.split('-')[1]) + 1 + except (ValueError, IndexError): + raise error.TestFail("Failed to get #CPU from root cgroup. (%s)", + all_cpus) + vm_cpus = int(params.get("smp", 1)) + # If cpuset specified, set smp accordingly + if cpusets: + if no_cpus < (len(cpusets[0]) - 1): + err = ("Not enough host CPUs to run this test with selected " + "cpusets (cpus=%s, cpusets=%s)" % (no_cpus, cpusets)) + logging.error(err) + raise error.TestNAError(err) + vm_cpus = len(cpusets[0]) - 1 # Don't count main_thread to vcpus + for i in range(len(cpusets)): + # length of each list have to be 'smp' + 1 + if len(cpusets[i]) != (vm_cpus + 1): + err = ("cpusets inconsistent. %d sublist have different " + " length. (param cgroup_cpusets in cfg)." % i) + logging.error(err) + raise error.TestError(err) + # if cgroup_use_half_smp, set smp accordingly + elif params.get("cgroup_use_half_smp") == "yes": + vm_cpus = no_cpus / 2 + if no_cpus == 2: + logging.warn("Host have only 2 CPUs, using 'smp = all cpus'") + vm_cpus = 2 + + if vm_cpus <= 1: + logging.error("Test requires at least 2 vCPUs.") + raise error.TestNAError("Test requires at least 2 vCPUs.") + # Check whether smp changed and recreate VM if so + if vm_cpus != params.get("smp", 0): + logging.info("Expected VM reload.") + params['smp'] = vm_cpus + vm.create(params=params) + # Verify vcpus matches prescription + vcpus = vm.get_vcpu_pids() + if len(vcpus) != vm_cpus: + raise error.TestFail("Incorrect number of vcpu PIDs; smp=%s vcpus=" + "%s" % (vm_cpus, vcpus)) + + if not cpusets: + error.context("Generating cpusets scenerios") + cpusets = _generate_cpusets(vm_cpus, no_cpus) + + # None == all_cpus + for i in range(len(cpusets)): + for j in range(len(cpusets[i])): + if cpusets[i][j] == None: + cpusets[i][j] = all_cpus + + if verify: # Verify exists, check if it's correct + for _ in verify: + if len(_) != no_cpus: + err = ("Incorrect cgroup_verify. Each verify sublist have " + "to have length = no_host_cpus") + logging.error(err) + raise error.TestError(err) + else: # Generate one + error.context("Generating cpusets expected results") + try: + verify = _generate_verification(cpusets, no_cpus) + except IndexError: + raise error.TestError("IndexError occured while generatin " + "verification data. Probably missmatched" + " no_host_cpus and cgroup_cpuset cpus") + + error.context("Prepare") + for i in range(no_cpus + 1): + cgroup.mk_cgroup() + cgroup.set_property('cpuset.cpus', all_cpus, i) + cgroup.set_property('cpuset.mems', all_mems, i) + if i == 0: + assign_vm_into_cgroup(vm, cgroup, 0) + else: + cgroup.set_cgroup(vcpus[i - 1], i) + + timeout = int(params.get("login_timeout", 360)) + sessions = [] + stats = [] + serial = vm.wait_for_serial_login(timeout=timeout) + cmd = "renice -n 10 $$; " # new ssh login should pass + cmd += "while [ -e /tmp/cgroup-cpu-lock ]; do :; done" + for i in range(vm_cpus): + sessions.append(vm.wait_for_login(timeout=timeout)) + sessions[-1].cmd("touch /tmp/cgroup-cpu-lock") + sessions[-1].sendline(cmd) + + try: + error.context("Test") + for i in range(len(cpusets)): + cpuset = cpusets[i] + logging.debug("testing: %s", cpuset) + # setup scenario + for i in range(len(cpuset)): + cgroup.set_property('cpuset.cpus', cpuset[i], i) + # Time 0 + _load = get_load_per_cpu() + time.sleep(test_time) + # Stats after test_time + stats.append(get_load_per_cpu(_load)[1:]) + + serial.cmd("rm -f /tmp/cgroup-cpu-lock") + err = "" + + error.context("Verification") + # Normalize stats + for i in range(len(stats)): + stats[i] = [(_ / test_time) for _ in stats[i]] + # Check + # header and matrix variables are only for "beautiful" log + header = ['scen'] + header.extend([' cpu%d' % i for i in range(no_cpus)]) + matrix = [] + for i in range(len(stats)): + matrix.append(['%d' % i]) + for j in range(len(stats[i])): + if ((stats[i][j] < (verify[i][j] - limit)) or + (stats[i][j] > (verify[i][j] + limit))): + err += "%d(%d), " % (i, j) + matrix[-1].append("%3d ! %d" % (verify[i][j], + stats[i][j])) + else: + matrix[-1].append("%3d ~ %d" % (verify[i][j], + stats[i][j])) + logging.info("Results (theoretical ~ actual):\n%s" % + utils.matrix_to_string(matrix, header)) + if err: + err = "Scenerios %s FAILED" % err + logging.error(err) + else: + logging.info("All utilisations match prescriptions.") + + finally: + error.context("Cleanup") + serial.cmd("rm -f /tmp/cgroup-cpu-lock") + del(cgroup) + del(modules) + + error.context("Results") + if err: + raise error.TestFail(err) + else: + return ("All utilisations match prescriptions.") + + @error.context_aware + def cpuset_cpus_switching(): + """ + Tests the cpuset.cpus cgroup feature. It stresses all VM's CPUs + while switching between cgroups with different setting. + @param cfg: cgroup_test_time - test duration '60' + """ + error.context("Init") + try: + test_time = int(params.get("cgroup_test_time", 60)) + except ValueError: + raise error.TestError("Incorrect configuration: param " + "cgroup_test_time have to be an integer") + + error.context("Prepare") + modules = CgroupModules() + if (modules.init(['cpuset']) != 1): + raise error.TestFail("Can't mount cpuset cgroup modules") + cgroup = Cgroup('cpuset', '') + cgroup.initialize(modules) + + timeout = int(params.get("login_timeout", 360)) + vm = env.get_all_vms()[0] + serial = vm.wait_for_serial_login(timeout=timeout) + vm_cpus = int(params.get('smp', 1)) + all_cpus = cgroup.get_property("cpuset.cpus")[0] + if all_cpus == "0": + raise error.TestFail("This test needs at least 2 CPUs on " + "host, cpuset=%s" % all_cpus) + try: + last_cpu = int(all_cpus.split('-')[1]) + except Exception: + raise error.TestFail("Failed to get #CPU from root cgroup.") + + if last_cpu == 1: + second2last_cpu = "1" + else: + second2last_cpu = "1-%s" % last_cpu + + # Comments are for vm_cpus=2, no_cpus=4, _SC_CLK_TCK=100 + cgroup.mk_cgroup() # oooo + cgroup.set_property('cpuset.cpus', all_cpus, 0) + cgroup.set_property('cpuset.mems', 0, 0) + cgroup.mk_cgroup() # O___ + cgroup.set_property('cpuset.cpus', 0, 1) + cgroup.set_property('cpuset.mems', 0, 1) + cgroup.mk_cgroup() # _OO_ + cgroup.set_property('cpuset.cpus', second2last_cpu, 2) + cgroup.set_property('cpuset.mems', 0, 2) + assign_vm_into_cgroup(vm, cgroup, 0) + + error.context("Test") + err = "" + try: + cmd = "renice -n 10 $$; " # new ssh login should pass + cmd += "while [ -e /tmp/cgroup-cpu-lock ]; do :; done" + sessions = [] + # start stressers + for i in range(vm_cpus): + sessions.append(vm.wait_for_login(timeout=30)) + sessions[i].cmd("touch /tmp/cgroup-cpu-lock") + sessions[i].sendline(cmd) + + logging.info("Some harmless IOError messages of non-existing " + "processes might occur.") + i = 0 + t_stop = time.time() + test_time # run for $test_time seconds + while time.time() < t_stop: + assign_vm_into_cgroup(vm, cgroup, i % 3) + i += 1 + + error.context("Verification") + serial.sendline("rm -f /tmp/cgroup-cpu-lock") + + try: + vm.verify_alive() + except Exception, exc_details: + err += "VM died (no_switches=%s): %s\n" % (i, exc_details) + + if err: + err = err[:-1] + logging.error(err) + else: + logging.info("VM survived %d cgroup switches", i) + + finally: + error.context("Cleanup") + del(cgroup) + del(modules) + + serial.sendline("rm -f /tmp/cgroup-cpu-lock") + + for session in sessions: + # try whether all sessions are clean + session.cmd("true") + session.close() + + error.context("Results") + if err: + raise error.TestFail(err) + else: + return ("VM survived %d cgroup switches" % i) + + @error.context_aware + def cpuset_mems_switching(): + """ + Tests the cpuset.mems pinning. It changes cgroups with different + mem nodes while stressing memory. + @param cfg: cgroup_test_time - test duration '60' + @param cfg: cgroup_cpuset_mems_mb - override the size of memory blocks + 'by default 1/2 of VM memory' + """ + error.context("Init") + test_time = int(params.get('cgroup_test_time', 10)) + vm = env.get_all_vms()[0] + + error.context("Prepare") + modules = CgroupModules() + if (modules.init(['cpuset']) != 1): + raise error.TestFail("Can't mount cpuset cgroup modules") + cgroup = Cgroup('cpuset', '') + cgroup.initialize(modules) + + mems = cgroup.get_property("cpuset.mems")[0] + mems = mems.split('-') + no_mems = len(mems) + if no_mems < 2: + raise error.TestNAError("This test needs at least 2 memory nodes, " + "detected only %s" % mems) + # Create cgroups + all_cpus = cgroup.get_property("cpuset.cpus")[0] + mems = range(int(mems[0]), int(mems[1]) + 1) + for i in range(no_mems): + cgroup.mk_cgroup() + cgroup.set_property('cpuset.mems', mems[i], -1) + cgroup.set_property('cpuset.cpus', all_cpus, -1) + cgroup.set_property('cpuset.memory_migrate', 1) + + timeout = int(params.get("login_timeout", 360)) + sessions = [] + sessions.append(vm.wait_for_login(timeout=timeout)) + sessions.append(vm.wait_for_login(timeout=30)) + + # Don't allow to specify more than 1/2 of the VM's memory + size = int(params.get('mem', 1024)) / 2 + if params.get('cgroup_cpuset_mems_mb') is not None: + size = min(size, int(params.get('cgroup_cpuset_mems_mb'))) + + error.context("Test") + err = "" + try: + logging.info("Some harmless IOError messages of non-existing " + "processes might occur.") + sessions[0].sendline('dd if=/dev/zero of=/dev/null bs=%dM ' + 'iflag=fullblock' % size) + + i = 0 + sessions[1].cmd('killall -SIGUSR1 dd') + t_stop = time.time() + test_time + while time.time() < t_stop: + i += 1 + assign_vm_into_cgroup(vm, cgroup, i % no_mems) + sessions[1].cmd('killall -SIGUSR1 dd; true') + try: + out = sessions[0].read_until_output_matches( + ['(\d+)\+\d records out'])[1] + if len(re.findall(r'(\d+)\+\d records out', out)) < 2: + out += sessions[0].read_until_output_matches( + ['(\d+)\+\d records out'])[1] + except ExpectTimeoutError: + err = ("dd didn't produce expected output: %s" % out) + + if not err: + sessions[1].cmd('killall dd; true') + dd_res = re.findall(r'(\d+)\+(\d+) records in', out) + dd_res += re.findall(r'(\d+)\+(\d+) records out', out) + dd_res = [int(_[0]) + int(_[1]) for _ in dd_res] + if dd_res[1] <= dd_res[0] or dd_res[3] <= dd_res[2]: + err = ("dd stoped sending bytes: %s..%s, %s..%s" % + (dd_res[0], dd_res[1], dd_res[2], dd_res[3])) + if err: + logging.error(err) + else: + out = ("Guest moved %stimes in %s seconds while moving %d " + "blocks of %dMB each" % (i, test_time, dd_res[3], size)) + logging.info(out) + finally: + error.context("Cleanup") + del(cgroup) + del(modules) + + for session in sessions: + # try whether all sessions are clean + session.cmd("true") + session.close() + + error.context("Results") + if err: + raise error.TestFail(err) + else: + return ("VM survived %d cgroup switches" % i) + + @error.context_aware + def devices_access(): + """ + Tests devices.list capability. It tries hot-adding disk with different + devices.list permittions and verifies whether it pass or fails. + It tests booth RO and RW mode. + @note: VM is destroyed after this test (in order to remove the attached + disks) + @note: supported monitor CMDs are pci_add, drive_add and RH-drive_add + RH-QMP-drive_add + """ + def _set_permissions(cgroup, permissions): + """ + Wrapper for setting permissions to first cgroup + @param self.permissions: is defined as a list of dictionaries: + {'property': control property, 'value': permition value, + 'check_value': check value (from devices.list property), + 'read_results': excepced read results T/F, + 'write_results': expected write results T/F} + """ + cgroup.set_property('devices.' + permissions['property'], + permissions['value'], + cgroup.cgroups[0], + check=permissions['check_value'], + checkprop='devices.list') + + def _add_drive(monitor, monitor_type, disk, name, readonly=False): + """ + Hot-adds disk to monitor's VM. + @param monitor: VM's monitor. + @param monitor_type: which command to use for hot-adding. (string) + @param disk: pwd to disk + @param name: id name given to this disk in VM + @param readonly: Use readonly? 'False' + """ + if readonly: + readonly_str = "on" + else: + readonly_str = "off" + if monitor_type == "HUMAN PCI_ADD": + out = monitor.cmd("pci_add auto storage file=%s,readonly=%s," + "if=virtio,id=%s" % + (disk, readonly_str, name)) + if "all in use" in out: # All PCIs used + return -1 # restart machine and try again + if "%s: " % name not in monitor.cmd("info block"): + return False + elif monitor_type == "HUMAN DRIVE_ADD": + monitor.cmd("drive_add auto file=%s,readonly=%s,if=none,id=%s" + % (disk, readonly_str, name)) + if "%s: " % name not in monitor.cmd("info block"): + return False + elif monitor_type == "HUMAN RH": + monitor.cmd("__com.redhat_drive_add id=%s,file=%s,readonly=%s" + % (name, disk, readonly_str)) + if "%s: " % name not in monitor.cmd("info block"): + return False + elif monitor_type == "QMP RH": + monitor.cmd_obj({"execute": "__com.redhat_drive_add", + "arguments": {"file": disk, "id": name, + "readonly": readonly}}) + output = monitor.cmd_obj({"execute": "query-block"}) + for out in output['return']: + try: + if out['device'] == name: + return True + except KeyError: + pass + return False + else: + return False + + return True + + error.context("Setup test") + vm = env.get_all_vms()[0] + # Try to find suitable monitor + monitor_type = None + for i_monitor in range(len(vm.monitors)): + monitor = vm.monitors[i_monitor] + if isinstance(monitor, kvm_monitor.QMPMonitor): + out = monitor.cmd_obj({"execute": "query-commands"}) + try: + if {'name': '__com.redhat_drive_add'} in out['return']: + monitor_type = "QMP RH" + break + except KeyError: + logging.info("Incorrect data from QMP, skipping: %s", out) + continue + else: + out = monitor.cmd("help") + if "\ndrive_add " in out: + monitor_type = "HUMAN DRIVE_ADD" + break + elif "\n__com.redhat_drive_add " in out: + monitor_type = "HUMAN RH" + break + elif "\npci_add " in out: + monitor_type = "HUMAN PCI_ADD" + break + if monitor_type is None: + raise error.TestNAError("Not detected any suitable monitor cmd. " + "Supported methods:\nQMP: __com.redhat_" + "drive_add\nHuman: drive_add, pci_add, " + "__com.redhat_drive_add") + logging.debug("Using monitor type: %s", monitor_type) + + modules = CgroupModules() + if (modules.init(['devices']) != 1): + raise error.TestFail("Can't mount blkio cgroup modules") + devices = Cgroup('devices', '') + devices.initialize(modules) + devices.mk_cgroup() + + # Add one scsi_debug disk which will be used in testing + if utils.system("lsmod | grep scsi_debug", ignore_status=True): + utils.system("modprobe scsi_debug dev_size_mb=8 add_host=0") + utils.system("echo 1 > /sys/bus/pseudo/drivers/scsi_debug/add_host") + time.sleep(0.1) + disk = utils.system_output("ls /dev/sd* | tail -n 1") + dev = "%s:%s" % get_maj_min(disk) + permissions = [ + {'property': 'deny', + 'value': 'a', + 'check_value': '', + 'result': False, + 'result_read': False}, + {'property': 'allow', + 'value': 'b %s r' % dev, + 'check_value': True, + 'result': False, + 'result_read': True}, + {'property': 'allow', + 'value': 'b %s w' % dev, + 'check_value': 'b %s rw' % dev, + 'result': True, + 'result_read': True}, + {'property': 'deny', + 'value': 'b %s r' % dev, + 'check_value': 'b %s w' % dev, + 'result': False, + 'result_read': False}, + {'property': 'deny', + 'value': 'b %s w' % dev, + 'check_value': '', + 'result': False, + 'result_read': False}, + {'property': 'allow', + 'value': 'a', + 'check_value': 'a *:* rwm', + 'result': True, + 'result_read': True}, + ] + + assign_vm_into_cgroup(vm, devices, 0) + + error.context("Test") + err = "" + name = "idTest%s%d" + try: + i = 0 + while i < len(permissions): + perm = permissions[i] + _set_permissions(devices, perm) + logging.debug("Setting permissions: {%s: %s}, value: %s", + perm['property'], perm['value'], + devices.get_property('devices.list', 0)) + results = "" + out = _add_drive(monitor, monitor_type, disk, name % ("R", i), + True) + if out == -1: + logging.warn("All PCIs full, recreating VM") + vm.create() + monitor = vm.monitors[i_monitor] + assign_vm_into_cgroup(vm, devices, 0) + continue + if perm['result_read'] and not out: + results += "ReadNotAttached, " + elif not perm['result_read'] and out: + results += "ReadAttached, " + + out = _add_drive(monitor, monitor_type, disk, name % ("RW", i), + False) + if out == -1: + logging.warn("All PCIs full, recreating VM") + vm.create() + monitor = vm.monitors[i_monitor] + assign_vm_into_cgroup(vm, devices, 0) + continue + if perm['result'] and not out: + results += "RWNotAttached, " + elif not perm['result'] and out: + results += "RWAttached, " + + if results: + logging.debug("%d: FAIL: %s", i, results[:-2]) + err += "{%d: %s}, " % (i, results[:-2]) + else: + logging.info("%d: PASS", i) + i += 1 + + if err: + err = "Some restrictions weren't enforced:\n%s" % err[:-2] + logging.error(err) + else: + logging.info("All restrictions enforced.") + + finally: + error.context("Cleanup") + vm.destroy() # "Safely" remove devices :-) + rm_scsi_disks(1) + del(devices) + del(modules) + + error.context("Results") + if err: + raise error.TestFail(err) + else: + return("All restrictions enforced.") + + @error.context_aware + def freezer(): + """ + Tests the freezer.state cgroup functionality. (it freezes the guest + and unfreeze it again) + @param cfg: cgroup_test_time - test duration '60' + """ + def _get_stat(pid): + """ + Gather statistics of pid+1st level subprocesses cpu usage + @param pid: PID of the desired process + @return: sum of all cpu-related values of 1st level subprocesses + """ + out = None + for i in range(10): + try: + out = utils.system_output("cat /proc/%s/task/*/stat" % + pid) + except error.CmdError: + out = None + else: + break + out = out.split('\n') + ret = 0 + for i in out: + ret += sum([int(_) for _ in i.split(' ')[13:17]]) + return ret + + error.context("Init") + try: + test_time = int(params.get("cgroup_test_time", 60)) + except ValueError: + raise error.TestError("Incorrect configuration: param " + "cgroup_test_time have to be an integer") + + timeout = int(params.get("login_timeout", 360)) + vm = env.get_all_vms()[0] + vm_cpus = int(params.get('smp', 0)) # cpus per VM + serial = vm.wait_for_serial_login(timeout=timeout) + sessions = [] + for _ in range(vm_cpus): + sessions.append(vm.wait_for_login(timeout=timeout)) + + error.context("Prepare") + modules = CgroupModules() + if (modules.init(['freezer']) != 1): + raise error.TestFail("Can't mount freezer cgroup modules") + cgroup = Cgroup('freezer', '') + cgroup.initialize(modules) + cgroup.mk_cgroup() + assign_vm_into_cgroup(vm, cgroup, 0) + + error.context("Test") + err = "" + try: + for session in sessions: + session.cmd('touch /tmp/freeze-lock') + session.sendline('while [ -e /tmp/freeze-lock ]; do :; done') + cgroup = cgroup + pid = vm.get_pid() + + # Let it work for short, mid and long period of time + for tsttime in [0.5, 3, test_time]: + logging.debug("FREEZING (%ss)", tsttime) + # Freezing takes some time, DL is 1s + cgroup.set_property('freezer.state', 'FROZEN', + cgroup.cgroups[0], check=False) + time.sleep(1) + _ = cgroup.get_property('freezer.state', 0) + if 'FROZEN' not in _: + err = "Coundn't freze the VM: state %s" % _ + break + stat_ = _get_stat(pid) + time.sleep(tsttime) + stat = _get_stat(pid) + if stat != stat_: + err = ('Process was running in FROZEN state; stat=%s, ' + 'stat_=%s, diff=%s' % (stat, stat_, stat - stat_)) + break + logging.debug("THAWING (%ss)", tsttime) + cgroup.set_property('freezer.state', 'THAWED', 0) + stat_ = _get_stat(pid) + time.sleep(tsttime) + stat = _get_stat(pid) + if (stat - stat_) < (90 * tsttime): + err = ('Process was not active in FROZEN state; stat=%s, ' + 'stat_=%s, diff=%s' % (stat, stat_, stat - stat_)) + break + + if err: + logging.error(err) + else: + logging.info("Freezer works fine") + + finally: + error.context("Cleanup") + del(cgroup) + serial.sendline("rm -f /tmp/freeze-lock") + + for session in sessions: + session.cmd("true") + session.close() + + del(modules) + + if err: + raise error.TestFail(err) + else: + return ("Freezer works fine") + + @error.context_aware + def memory_limit(memsw=False): + """ + Tests the memory.limit_in_bytes or memory.memsw.limit_in_bytes cgroup + capability. It tries to allocate bigger block than allowed limit. + memory.limit_in_bytes: Qemu process should be swaped out and the + block created. + memory.memsw.limit_in_bytes: Qemu should be killed with err 137. + @param memsw: Whether to run memsw or rss mem only test + @param cfg: cgroup_memory_limit_kb - (4kb aligned) test uses + 1.1 * memory_limit memory blocks for testing + 'by default 1/2 of VM memory' + """ + error.context("Init") + try: + mem_limit = params.get('cgroup_memory_limit_kb', None) + if mem_limit is not None: + mem_limit = int(mem_limit) + except ValueError: + raise error.TestError("Incorrect configuration: param cgroup_" + "memory_limit_kb have to be an integer") + + vm = env.get_all_vms()[0] + + error.context("Prepare") + # Don't allow to specify more than 1/2 of the VM's memory + mem = int(params.get('mem', 1024)) * 512 + if mem_limit: + mem = min(mem, mem_limit) + else: + mem_limit = mem + # There have to be enough free swap space and hugepages can't be used + if not memsw: + if params.get('setup_hugepages') == 'yes': + err = "Hugepages can't be used in this test." + logging.error(err) + raise error.TestNAError(err) + if utils.read_from_meminfo('SwapFree') < (mem * 0.1): + err = "Not enough free swap space" + logging.error(err) + raise error.TestNAError(err) + # We want to copy slightely over "mem" limit + mem *= 1.1 + modules = CgroupModules() + if (modules.init(['memory']) != 1): + raise error.TestFail("Can't mount memory cgroup modules") + cgroup = Cgroup('memory', '') + cgroup.initialize(modules) + cgroup.mk_cgroup() + cgroup.set_property('memory.move_charge_at_immigrate', '3', 0) + cgroup.set_property_h('memory.limit_in_bytes', "%dK" % mem_limit, 0) + if memsw: + try: + cgroup.get_property("memory.memsw.limit_in_bytes", 0) + except error.TestError, details: + logging.error("Can't get memory.memsw.limit_in_bytes info." + "Do you have support for memsw? (try passing" + "swapaccount=1 parameter to kernel):%s", details) + raise error.TestNAError("System doesn't support memory.memsw.*" + " or swapaccount is disabled.") + cgroup.set_property_h('memory.memsw.limit_in_bytes', + "%dK" % mem_limit, 0) + + logging.info("Expected VM reload") + try: + vm.create() + except Exception, failure_detail: + raise error.TestFail("init: Failed to recreate the VM: %s" % + failure_detail) + assign_vm_into_cgroup(vm, cgroup, 0) + timeout = int(params.get("login_timeout", 360)) + session = vm.wait_for_login(timeout=timeout) + + # VM already eat-up more than allowed by this cgroup + fstats = open('/proc/%s/status' % vm.get_pid(), 'r') + rss = int(re.search(r'VmRSS:[\t ]*(\d+) kB', fstats.read()).group(1)) + if rss > mem_limit: + raise error.TestFail("Init failed to move VM into cgroup, VmRss" + "=%s, expected=%s" % (rss, mem_limit)) + + try: + error.context("Test") + """ + Let VM allocate huge block: + 1) memsw: During allocation limit of rss+swap should be exceeded + and VM should be killed with err 137. + 2) rsslimit: Allocation should pass, rss+swap should be greater + than mem_limit. + * Max execution time is limited to mem / 10 + * Checking every 0.1s + """ + session.sendline('dd if=/dev/zero of=/dev/null bs=%dK count=1 ' + 'iflag=fullblock' % mem) + + max_rss = 0 + max_rssswap = 0 + out = "" + err = "" + for _ in range(int(mem / 1024)): + try: + fstats.seek(0) + status = fstats.read() + rss = int(re.search(r'VmRSS:[\t ]*(\d+) kB', status) + .group(1)) + max_rss = max(rss, max_rss) + swap = int(re.search(r'VmSwap:[\t ]*(\d+) kB', status) + .group(1)) + max_rssswap = max(rss + swap, max_rssswap) + except Exception, details: + if memsw and not vm.is_alive(): + # VM got SIGTERM as expected, finish the test + break + else: + err = details + break + try: + out += session.read_up_to_prompt(timeout=0.1) + except ExpectTimeoutError: + #0.1s passed, lets begin the next round + pass + except ShellTimeoutError, detail: + if memsw and not vm.is_alive(): + # VM was killed, finish the test + break + else: + err = details + break + except ExpectProcessTerminatedError, detail: + if memsw: + err = ("dd command died (VM should die instead): %s\n" + "Output:%s\n" % (detail, out)) + else: + err = ("dd command died (should pass): %s\nOutput:" + "\n%s" % (detail, out)) + break + else: # dd command finished + break + + error.context("Verification") + if err: + logging.error(err) + elif memsw: + if max_rssswap > mem_limit: + err = ("The limit was broken: max_rssswap=%s, limit=%s" % + (max_rssswap, mem_limit)) + elif vm.process.get_status() != 137: # err: Limit exceeded + err = ("VM exit code is %s (should be %s)" % + (vm.process.get_status(), 137)) + else: + out = ("VM terminated as expected. Used rss+swap: %d, " + "limit %s" % (max_rssswap, mem_limit)) + logging.info(out) + else: # only RSS limit + exit_nr = session.cmd_output("echo $?")[:-1] + if max_rss > mem_limit: + err = ("The limit was broken: max_rss=%s, limit=%s" % + (max_rss, mem_limit)) + elif exit_nr != '0': + err = ("dd command failed(%s) output: %s" % (exit_nr, out)) + elif (max_rssswap) < mem_limit: + err = ("VM didn't consume expected amount of memory. %d:%d" + " Output of dd cmd: %s" % (max_rssswap, mem_limit, + out)) + else: + out = ("Created %dMB block with %.2f memory overcommit" % + (mem / 1024, float(max_rssswap) / mem_limit)) + logging.info(out) + + finally: + error.context("Cleanup") + del(cgroup) + del(modules) + + error.context("Results") + if err: + raise error.TestFail(err) + else: + return out + + def memory_memsw_limit(): + """ + Executes the memory_limit test with parameter memsw. + It tries to allocate bigger block than allowed limit. Qemu should be + killed with err 137. + @param cfg: cgroup_memory_limit_kb - test uses 1.1 * memory_limit + memory blocks for testing 'by default 1/2 of VM memory' + """ + return memory_limit(memsw=True) + + def memory_move(): + """ + Tests the memory.move_charge_at_immigrate cgroup capability. It changes + memory cgroup while running the guest system. + @param cfg: cgroup_test_time - test duration '60' + @param cfg: cgroup_memory_move_mb - override the size of memory blocks + 'by default 1/2 of VM memory' + """ + error.context("Init") + test_time = int(params.get('cgroup_test_time', 10)) + vm = env.get_all_vms()[0] + + error.context("Prepare") + modules = CgroupModules() + if (modules.init(['memory']) != 1): + raise error.TestFail("Can't mount memory cgroup modules") + cgroup = Cgroup('memory', '') + cgroup.initialize(modules) + # Two cgroups + cgroup.mk_cgroup() + cgroup.mk_cgroup() + cgroup.set_property('memory.move_charge_at_immigrate', '3', 0) + cgroup.set_property('memory.move_charge_at_immigrate', '3', 1) + + timeout = int(params.get("login_timeout", 360)) + sessions = [] + sessions.append(vm.wait_for_login(timeout=timeout)) + sessions.append(vm.wait_for_login(timeout=30)) + + # Don't allow to specify more than 1/2 of the VM's memory + size = int(params.get('mem', 1024)) / 2 + if params.get('cgroup_memory_move_mb') is not None: + size = min(size, int(params.get('cgroup_memory_move_mb'))) + + err = "" + try: + error.context("Test") + logging.info("Some harmless IOError messages of non-existing " + "processes might occur.") + sessions[0].sendline('dd if=/dev/zero of=/dev/null bs=%dM ' + 'iflag=fullblock' % size) + + i = 0 + sessions[1].cmd('killall -SIGUSR1 dd ; true') + t_stop = time.time() + test_time + while time.time() < t_stop: + i += 1 + assign_vm_into_cgroup(vm, cgroup, i % 2) + sessions[1].cmd('killall -SIGUSR1 dd; true') + try: + out = sessions[0].read_until_output_matches( + ['(\d+)\+\d records out'])[1] + if len(re.findall(r'(\d+)\+\d records out', out)) < 2: + out += sessions[0].read_until_output_matches( + ['(\d+)\+\d records out'])[1] + except ExpectTimeoutError: + err = ("dd didn't produce expected output: %s" % out) + + if not err: + sessions[1].cmd('killall dd; true') + dd_res = re.findall(r'(\d+)\+(\d+) records in', out) + dd_res += re.findall(r'(\d+)\+(\d+) records out', out) + dd_res = [int(_[0]) + int(_[1]) for _ in dd_res] + if dd_res[1] <= dd_res[0] or dd_res[3] <= dd_res[2]: + err = ("dd stoped sending bytes: %s..%s, %s..%s" % + (dd_res[0], dd_res[1], dd_res[2], dd_res[3])) + + if err: + logging.error(err) + else: + out = ("Guest moved %stimes in %s seconds while moving %d " + "blocks of %dMB each" % (i, test_time, dd_res[3], size)) + logging.info(out) + + finally: + error.context("Cleanup") + sessions[1].cmd('killall dd; true') + for session in sessions: + session.cmd("true") + session.close() + + del(cgroup) + del(modules) + + if err: + logging.error(err) + else: + return (out) + + # Main + # Executes test specified by cgroup_test variable in cfg + fce = None + _fce = params.get('cgroup_test') + error.context("Executing test: %s" % _fce) + try: + fce = locals()[_fce] + except KeyError: + raise error.TestNAError("Test %s doesn't exist. Check 'cgroup_test' " + "variable in subtest.cfg" % _fce) + else: + return fce() diff --git a/kvm/tests/cpu_hotplug.py b/kvm/tests/cpu_hotplug.py new file mode 100644 index 00000000..d1ad66ff --- /dev/null +++ b/kvm/tests/cpu_hotplug.py @@ -0,0 +1,111 @@ +import os, logging, re +from autotest.client.shared import error +from autotest.client.virt import utils_test + + +@error.context_aware +def run_cpu_hotplug(test, params, env): + """ + Runs CPU hotplug test: + + 1) Pick up a living guest + 2) Send the monitor command cpu_set [cpu id] for each cpu we wish to have + 3) Verify if guest has the additional CPUs showing up under + /sys/devices/system/cpu + 4) Try to bring them online by writing 1 to the 'online' file inside that dir + 5) Run the CPU Hotplug test suite shipped with autotest inside guest + + @param test: KVM test object. + @param params: Dictionary with test parameters. + @param env: Dictionary with the test environment. + """ + vm = env.get_vm(params["main_vm"]) + vm.verify_alive() + timeout = int(params.get("login_timeout", 360)) + session = vm.wait_for_login(timeout=timeout) + + n_cpus_add = int(params.get("n_cpus_add", 1)) + current_cpus = int(params.get("smp", 1)) + onoff_iterations = int(params.get("onoff_iterations", 20)) + total_cpus = current_cpus + n_cpus_add + + error.context("cleaning guest dmesg before addition") + session.cmd("dmesg -c") + + error.context("Adding %d CPUs to guest" % n_cpus_add) + for i in range(total_cpus): + vm.monitor.cmd("cpu_set %s online" % i) + + output = vm.monitor.cmd("info cpus") + logging.debug("Output of info cpus:\n%s", output) + + cpu_regexp = re.compile("CPU #(\d+)") + total_cpus_monitor = len(cpu_regexp.findall(output)) + if total_cpus_monitor != total_cpus: + raise error.TestFail("Monitor reports %s CPUs, when VM should have %s" % + (total_cpus_monitor, total_cpus)) + + dmesg_after = session.cmd("dmesg") + logging.debug("Guest dmesg output after CPU add:\n%s" % dmesg_after) + + # Verify whether the new cpus are showing up on /sys + error.context("verifying if new CPUs are showing on guest's /sys dir") + n_cmd = 'find /sys/devices/system/cpu/cpu[0-99] -maxdepth 0 -type d | wc -l' + output = session.cmd(n_cmd) + logging.debug("List of cpus on /sys:\n%s" % output) + try: + cpus_after_addition = int(output) + except ValueError: + logging.error("Output of '%s': %s", n_cmd, output) + raise error.TestFail("Unable to get CPU count after CPU addition") + + if cpus_after_addition != total_cpus: + raise error.TestFail("%s CPUs are showing up under " + "/sys/devices/system/cpu, was expecting %s" % + (cpus_after_addition, total_cpus)) + + error.context("locating online files for guest's new CPUs") + r_cmd = 'find /sys/devices/system/cpu/cpu[1-99]/online -maxdepth 0 -type f' + online_files = session.cmd(r_cmd) + logging.debug("CPU online files detected: %s", online_files) + online_files = online_files.split().sort() + + if not online_files: + raise error.TestFail("Could not find CPUs that can be " + "enabled/disabled on guest") + + for online_file in online_files: + cpu_regexp = re.compile("cpu(\d+)", re.IGNORECASE) + cpu_id = cpu_regexp.findall(online_file)[0] + error.context("changing online status for CPU %s" % cpu_id) + check_online_status = session.cmd("cat %s" % online_file) + try: + check_online_status = int(check_online_status) + except ValueError: + raise error.TestFail("Unable to get online status from CPU %s" % + cpu_id) + assert(check_online_status in [0, 1]) + if check_online_status == 0: + error.context("Bringing CPU %s online" % cpu_id) + session.cmd("echo 1 > %s" % online_file) + + # Now that all CPUs were onlined, let's execute the + # autotest CPU Hotplug test + control_path = os.path.join(test.virtdir, "autotest_control", + "cpu_hotplug.control") + + timeout = int(params.get("cpu_hotplug_timeout"), 300) + error.context("running cpu_hotplug autotest after cpu addition") + utils_test.run_autotest(vm, session, control_path, timeout, + test.outputdir, params) + + # Last, but not least, let's offline/online the CPUs in the guest + # several times + irq = 15 + irq_mask = "f0" + for i in xrange(onoff_iterations): + session.cmd("echo %s > /proc/irq/%s/smp_affinity" % (irq_mask, irq)) + for online_file in online_files: + session.cmd("echo 0 > %s" % online_file) + for online_file in online_files: + session.cmd("echo 1 > %s" % online_file) diff --git a/kvm/tests/cpuflags.py b/kvm/tests/cpuflags.py new file mode 100644 index 00000000..a59fbd97 --- /dev/null +++ b/kvm/tests/cpuflags.py @@ -0,0 +1,801 @@ +import logging, re, random, os, time, pickle, sys, traceback +from autotest.client.shared import error, utils +from autotest.client.virt import kvm_vm +from autotest.client.virt import utils_misc, utils_test, aexpect + + +def run_cpuflags(test, params, env): + """ + Boot guest with different cpu flags and check if guest works correctly. + + @param test: kvm test object. + @param params: Dictionary with the test parameters. + @param env: Dictionary with test environment. + """ + utils_misc.Flag.aliases = utils_misc.kvm_map_flags_aliases + qemu_binary = utils_misc.get_path('.', params.get("qemu_binary", "qemu")) + + cpuflags_src = os.path.join(test.virtdir, "deps", "test_cpu_flags") + smp = int(params.get("smp", 1)) + + all_host_supported_flags = params.get("all_host_supported_flags", "no") + + mig_timeout = float(params.get("mig_timeout", "3600")) + mig_protocol = params.get("migration_protocol", "tcp") + mig_speed = params.get("mig_speed", "100M") + + cpu_model_black_list = params.get("cpu_model_blacklist", "").split(" ") + + multi_host_migration = params.get("multi_host_migration", "no") + + class HgFlags(object): + def __init__(self, cpu_model, extra_flags=set([])): + virtual_flags = set(map(utils_misc.Flag, + params.get("guest_spec_flags", "").split())) + self.hw_flags = set(map(utils_misc.Flag, + params.get("host_spec_flags", "").split())) + self.qemu_support_flags = get_all_qemu_flags() + self.host_support_flags = set(map(utils_misc.Flag, + utils_misc.get_cpu_flags())) + self.quest_cpu_model_flags = (get_guest_host_cpuflags(cpu_model) - + virtual_flags) + + self.supported_flags = (self.qemu_support_flags & + self.host_support_flags) + self.cpumodel_unsupport_flags = (self.supported_flags - + self.quest_cpu_model_flags) + + self.host_unsupported_flags = (self.quest_cpu_model_flags - + self.host_support_flags) + + self.all_possible_guest_flags = (self.quest_cpu_model_flags - + self.host_unsupported_flags) + self.all_possible_guest_flags |= self.cpumodel_unsupport_flags + + self.guest_flags = (self.quest_cpu_model_flags - + self.host_unsupported_flags) + self.guest_flags |= extra_flags + + self.host_all_unsupported_flags = set([]) + self.host_all_unsupported_flags |= self.qemu_support_flags + self.host_all_unsupported_flags -= (self.host_support_flags | + virtual_flags) + + def start_guest_with_cpuflags(cpuflags, smp=None, migration=False, + wait=True): + """ + Try to boot guest with special cpu flags and try login in to them. + """ + params_b = params.copy() + params_b["cpu_model"] = cpuflags + if smp is not None: + params_b["smp"] = smp + + vm_name = "vm1-cpuflags" + vm = kvm_vm.VM(vm_name, params_b, test.bindir, env['address_cache']) + env.register_vm(vm_name, vm) + if (migration is True): + vm.create(migration_mode=mig_protocol) + else: + vm.create() + vm.verify_alive() + + session = None + if wait: + session = vm.wait_for_login() + + return (vm, session) + + def get_guest_system_cpuflags(vm_session): + """ + Get guest system cpuflags. + + @param vm_session: session to checked vm. + @return: [corespond flags] + """ + flags_re = re.compile(r'^flags\s*:(.*)$', re.MULTILINE) + out = vm_session.cmd_output("cat /proc/cpuinfo") + + flags = flags_re.search(out).groups()[0].split() + return set(map(utils_misc.Flag, flags)) + + def get_guest_host_cpuflags(cpumodel): + """ + Get cpu flags correspond with cpumodel parameters. + + @param cpumodel: Cpumodel parameter sended to . + @return: [corespond flags] + """ + cmd = qemu_binary + " -cpu ?dump" + output = utils.run(cmd).stdout + re.escape(cpumodel) + pattern = (".+%s.*\n.*\n +feature_edx .+ \((.*)\)\n +feature_" + "ecx .+ \((.*)\)\n +extfeature_edx .+ \((.*)\)\n +" + "extfeature_ecx .+ \((.*)\)\n" % (cpumodel)) + flags = [] + model = re.search(pattern, output) + if model == None: + raise error.TestFail("Cannot find %s cpu model." % (cpumodel)) + for flag_group in model.groups(): + flags += flag_group.split() + return set(map(utils_misc.Flag, flags)) + + def get_all_qemu_flags(): + cmd = qemu_binary + " -cpu ?cpuid" + output = utils.run(cmd).stdout + + flags_re = re.compile(r".*\n.*f_edx:(.*)\n.*f_ecx:(.*)\n.*extf_edx:" + "(.*)\n.*extf_ecx:(.*)") + m = flags_re.search(output) + flags = [] + for a in m.groups(): + flags += a.split() + + return set(map(utils_misc.Flag, flags)) + + def get_flags_full_name(cpu_flag): + """ + Get all name of Flag. + + @param cpu_flag: Flag + @return: all name of Flag. + """ + cpu_flag = utils_misc.Flag(cpu_flag) + for f in get_all_qemu_flags(): + if f == cpu_flag: + return utils_misc.Flag(f) + return [] + + def parse_qemu_cpucommand(cpumodel): + """ + Parse qemu cpu params. + + @param cpumodel: Cpu model command. + @return: All flags which guest must have. + """ + flags = cpumodel.split(",") + cpumodel = flags[0] + + qemu_model_flag = get_guest_host_cpuflags(cpumodel) + host_support_flag = set(map(utils_misc.Flag, + utils_misc.get_cpu_flags())) + real_flags = qemu_model_flag & host_support_flag + + for f in flags[1:]: + if f[0].startswith("+"): + real_flags |= set([get_flags_full_name(f[1:])]) + if f[0].startswith("-"): + real_flags -= set([get_flags_full_name(f[1:])]) + + return real_flags + + def get_cpu_models(): + """ + Get all cpu models from qemu. + + @return: cpu models. + """ + cmd = qemu_binary + " -cpu ?" + output = utils.run(cmd).stdout + + cpu_re = re.compile("\w+\s+\[?(\w+)\]?") + return cpu_re.findall(output) + + def check_cpuflags(cpumodel, vm_session): + """ + Check if vm flags are same like flags select by cpumodel. + + @param cpumodel: params for -cpu param in qemu-kvm + @param vm_session: session to vm to check flags. + + @return: ([excess], [missing]) flags + """ + gf = get_guest_system_cpuflags(vm_session) + rf = parse_qemu_cpucommand(cpumodel) + + logging.debug("Guest flags: %s", gf) + logging.debug("Host flags: %s", rf) + logging.debug("Flags on guest not defined by host: %s", (gf - rf)) + return rf - gf + + def get_cpu_models_supported_by_host(): + """ + Get all cpumodels which set of flags is subset of hosts flags. + + @return: [cpumodels] + """ + cpumodels = [] + for cpumodel in get_cpu_models(): + flags = HgFlags(cpumodel) + if flags.host_unsupported_flags == set([]): + cpumodels.append(cpumodel) + return cpumodels + + def disable_cpu(vm_session, cpu, disable=True): + """ + Disable cpu in guest system. + + @param cpu: CPU id to disable. + @param disable: if True disable cpu else enable cpu. + """ + system_cpu_dir = "/sys/devices/system/cpu/" + cpu_online = system_cpu_dir + "cpu%d/online" % (cpu) + cpu_state = vm_session.cmd_output("cat %s" % cpu_online).strip() + if disable and cpu_state == "1": + vm_session.cmd("echo 0 > %s" % cpu_online) + logging.debug("Guest cpu %d is disabled.", cpu) + elif cpu_state == "0": + vm_session.cmd("echo 1 > %s" % cpu_online) + logging.debug("Guest cpu %d is enabled.", cpu) + + def install_cpuflags_test_on_vm(vm, dst_dir): + """ + Install stress to vm. + + @param vm: virtual machine. + @param dst_dir: Installation path. + """ + session = vm.wait_for_login() + vm.copy_files_to(cpuflags_src, dst_dir) + session.cmd("sync") + session.cmd("cd %s; make EXTRA_FLAGS='';" % + os.path.join(dst_dir, "test_cpu_flags")) + session.cmd("sync") + session.close() + + def check_cpuflags_work(vm, path, flags): + """ + Check which flags work. + + @param vm: Virtual machine. + @param path: Path of cpuflags_test + @param flags: Flags to test. + @return: Tuple (Working, not working, not tested) flags. + """ + pass_Flags = [] + not_tested = [] + not_working = [] + session = vm.wait_for_login() + for f in flags: + try: + for tc in utils_misc.kvm_map_flags_to_test[f]: + session.cmd("%s/cpuflags-test --%s" % + (os.path.join(path, "test_cpu_flags"), tc)) + pass_Flags.append(f) + except aexpect.ShellCmdError: + not_working.append(f) + except KeyError: + not_tested.append(f) + return (set(map(utils_misc.Flag, pass_Flags)), + set(map(utils_misc.Flag, not_working)), + set(map(utils_misc.Flag, not_tested))) + + def run_stress(vm, timeout, guest_flags): + """ + Run stress on vm for timeout time. + """ + ret = False + install_path = "/tmp" + install_cpuflags_test_on_vm(vm, install_path) + flags = check_cpuflags_work(vm, install_path, guest_flags) + dd_session = vm.wait_for_login() + stress_session = vm.wait_for_login() + dd_session.sendline("dd if=/dev/[svh]da of=/tmp/stressblock" + " bs=10MB count=100 &") + try: + stress_session.cmd("%s/cpuflags-test --stress %s%s" % + (os.path.join(install_path, "test_cpu_flags"), smp, + utils_misc.kvm_flags_to_stresstests(flags[0])), + timeout=timeout) + except aexpect.ShellTimeoutError: + ret = True + stress_session.close() + dd_session.close() + return ret + + def separe_cpu_model(cpu_model): + try: + (cpu_model, _) = cpu_model.split(":") + except ValueError: + cpu_model = cpu_model + return cpu_model + + def parse_cpu_model(): + """ + Parse cpu_models from config file. + + @return: [(cpumodel, extra_flags)] + """ + cpu_model = params.get("cpu_model", "") + logging.debug("CPU model found: %s", str(cpu_model)) + + try: + (cpu_model, extra_flags) = cpu_model.split(":") + extra_flags = set(map(utils_misc.Flag, extra_flags.split(","))) + except ValueError: + cpu_model = cpu_model + extra_flags = set([]) + return (cpu_model, extra_flags) + + class MiniSubtest(object): + def __new__(cls, *args, **kargs): + self = super(MiniSubtest, cls).__new__(cls) + ret = None + if args is None: + args = [] + try: + ret = self.test(*args, **kargs) + finally: + if hasattr(self, "clean"): + self.clean() + return ret + + def print_exception(called_object): + exc_type, exc_value, exc_traceback = sys.exc_info() + logging.error("In function (" + called_object.__name__ + "):") + logging.error("Call from:\n" + + traceback.format_stack()[-2][:-1]) + logging.error("Exception from:\n" + + "".join(traceback.format_exception( + exc_type, exc_value, + exc_traceback.tb_next))) + + class Test_temp(MiniSubtest): + def clean(self): + logging.info("cleanup") + if (hasattr(self, "vm")): + self.vm.destroy(gracefully=False) + + # 1) -cpu ?model + class test_qemu_cpu_model(MiniSubtest): + def test(self): + cpu_models = params.get("cpu_models", "core2duo").split() + cmd = qemu_binary + " -cpu ?model" + result = utils.run(cmd) + missing = [] + cpu_models = map(separe_cpu_model, cpu_models) + for cpu_model in cpu_models: + if not cpu_model in result.stdout: + missing.append(cpu_model) + if missing: + raise error.TestFail("CPU models %s are not in output " + "'%s' of command \n%s" % + (missing, cmd, result.stdout)) + + # 2) -cpu ?dump + class test_qemu_dump(MiniSubtest): + def test(self): + cpu_models = params.get("cpu_models", "core2duo").split() + cmd = qemu_binary + " -cpu ?dump" + result = utils.run(cmd) + cpu_models = map(separe_cpu_model, cpu_models) + missing = [] + for cpu_model in cpu_models: + if not cpu_model in result.stdout: + missing.append(cpu_model) + if missing: + raise error.TestFail("CPU models %s are not in output " + "'%s' of command \n%s" % + (missing, cmd, result.stdout)) + + # 3) -cpu ?cpuid + class test_qemu_cpuid(MiniSubtest): + def test(self): + cmd = qemu_binary + " -cpu ?cpuid" + result = utils.run(cmd) + if result.stdout is "": + raise error.TestFail("There aren't any cpu Flag in output" + " '%s' of command \n%s" % + (cmd, result.stdout)) + + # 1) boot with cpu_model + class test_boot_cpu_model(Test_temp): + def test(self): + cpu_model, _ = parse_cpu_model() + logging.debug("Run tests with cpu model %s", cpu_model) + flags = HgFlags(cpu_model) + (self.vm, session) = start_guest_with_cpuflags(cpu_model) + not_enable_flags = (check_cpuflags(cpu_model, session) - + flags.hw_flags) + if not_enable_flags != set([]): + raise error.TestFail("Flags defined on host but not found " + "on guest: %s" % (not_enable_flags)) + + # 2) success boot with supported flags + class test_boot_cpu_model_and_additional_flags(Test_temp): + def test(self): + cpu_model, extra_flags = parse_cpu_model() + + flags = HgFlags(cpu_model, extra_flags) + + logging.debug("Cpu mode flags %s.", + str(flags.quest_cpu_model_flags)) + cpuf_model = cpu_model + + if all_host_supported_flags == "yes": + for fadd in flags.cpumodel_unsupport_flags: + cpuf_model += ",+" + str(fadd) + else: + for fadd in extra_flags: + cpuf_model += ",+" + str(fadd) + + for fdel in flags.host_unsupported_flags: + cpuf_model += ",-" + str(fdel) + + if all_host_supported_flags == "yes": + guest_flags = flags.all_possible_guest_flags + else: + guest_flags = flags.guest_flags + + (self.vm, session) = start_guest_with_cpuflags(cpuf_model) + + not_enable_flags = (check_cpuflags(cpuf_model, session) - + flags.hw_flags) + if not_enable_flags != set([]): + logging.info("Model unsupported flags: %s", + str(flags.cpumodel_unsupport_flags)) + logging.error("Flags defined on host but not on found " + "on guest: %s", str(not_enable_flags)) + logging.info("Check main instruction sets.") + + install_path = "/tmp" + install_cpuflags_test_on_vm(self.vm, install_path) + + Flags = check_cpuflags_work(self.vm, install_path, + flags.all_possible_guest_flags) + logging.info("Woking CPU flags: %s", str(Flags[0])) + logging.info("Not working CPU flags: %s", str(Flags[1])) + logging.warning("Flags works even if not deffined on guest cpu " + "flags: %s", str(Flags[0] - guest_flags)) + logging.warning("Not tested CPU flags: %s", str(Flags[2])) + + if Flags[1] & guest_flags: + raise error.TestFail("Some flags do not work: %s" % + (str(Flags[1]))) + + # 3) fail boot unsupported flags + class test_boot_warn_with_host_unsupported_flags(MiniSubtest): + def test(self): + #This is virtual cpu flags which are supported by + #qemu but no with host cpu. + cpu_model, extra_flags = parse_cpu_model() + + flags = HgFlags(cpu_model, extra_flags) + + logging.debug("Unsupported flags %s.", + str(flags.host_all_unsupported_flags)) + cpuf_model = cpu_model + ",check" + + # Add unsupported flags. + for fadd in flags.host_all_unsupported_flags: + cpuf_model += ",+" + str(fadd) + + vnc_port = utils_misc.find_free_port(5900, 6100) - 5900 + cmd = "%s -cpu %s -vnc :%d" % (qemu_binary, cpuf_model, vnc_port) + out = None + + try: + try: + out = utils.run(cmd, timeout=5, ignore_status=True).stderr + raise error.TestFail("Guest not boot with unsupported " + "flags.") + except error.CmdError, e: + out = e.result_obj.stderr + finally: + uns_re = re.compile("^warning:.*flag '(.+)'", re.MULTILINE) + warn_flags = set(map(utils_misc.Flag, uns_re.findall(out))) + fwarn_flags = flags.host_all_unsupported_flags - warn_flags + if fwarn_flags: + raise error.TestFail("Qemu did not warn the use of " + "flags %s" % str(fwarn_flags)) + + # 3) fail boot unsupported flags + class test_fail_boot_with_host_unsupported_flags(MiniSubtest): + def test(self): + #This is virtual cpu flags which are supported by + #qemu but no with host cpu. + cpu_model, extra_flags = parse_cpu_model() + + flags = HgFlags(cpu_model, extra_flags) + cpuf_model = cpu_model + ",enforce" + + logging.debug("Unsupported flags %s.", + str(flags.host_all_unsupported_flags)) + + # Add unsupported flags. + for fadd in flags.host_all_unsupported_flags: + cpuf_model += ",+" + str(fadd) + + vnc_port = utils_misc.find_free_port(5900, 6100) - 5900 + cmd = "%s -cpu %s -vnc :%d" % (qemu_binary, cpuf_model, vnc_port) + out = None + try: + try: + out = utils.run(cmd, timeout=5, ignore_status=True).stderr + except error.CmdError: + logging.error("Host boot with unsupported flag") + finally: + uns_re = re.compile("^warning:.*flag '(.+)'", re.MULTILINE) + warn_flags = set(map(utils_misc.Flag, uns_re.findall(out))) + fwarn_flags = flags.host_all_unsupported_flags - warn_flags + if fwarn_flags: + raise error.TestFail("Qemu did not warn the use of " + "flags %s" % str(fwarn_flags)) + + # 4) check guest flags under load cpu, stress and system (dd) + class test_boot_guest_and_try_flags_under_load(Test_temp): + def test(self): + logging.info("Check guest working cpuflags under load " + "cpu and stress and system (dd)") + cpu_model, extra_flags = parse_cpu_model() + + flags = HgFlags(cpu_model, extra_flags) + + cpuf_model = cpu_model + + logging.debug("Cpu mode flags %s.", + str(flags.quest_cpu_model_flags)) + + if all_host_supported_flags == "yes": + logging.debug("Added flags %s.", + str(flags.cpumodel_unsupport_flags)) + + # Add unsupported flags. + for fadd in flags.cpumodel_unsupport_flags: + cpuf_model += ",+" + str(fadd) + + for fdel in flags.host_unsupported_flags: + cpuf_model += ",-" + str(fdel) + + (self.vm, _) = start_guest_with_cpuflags(cpuf_model, smp) + + if (not run_stress(self.vm, 60, flags.guest_flags)): + raise error.TestFail("Stress test ended before" + " end of test.") + + def clean(self): + logging.info("cleanup") + self.vm.destroy(gracefully=False) + + # 5) Online/offline CPU + class test_online_offline_guest_CPUs(Test_temp): + def test(self): + cpu_model, extra_flags = parse_cpu_model() + + logging.debug("Run tests with cpu model %s.", (cpu_model)) + flags = HgFlags(cpu_model, extra_flags) + + (self.vm, session) = start_guest_with_cpuflags(cpu_model, smp) + + def encap(timeout): + random.seed() + begin = time.time() + end = begin + if smp > 1: + while end - begin < 60: + cpu = random.randint(1, smp - 1) + if random.randint(0, 1): + disable_cpu(session, cpu, True) + else: + disable_cpu(session, cpu, False) + end = time.time() + return True + else: + logging.warning("For this test is necessary smp > 1.") + return False + timeout = 60 + + test_flags = flags.guest_flags + if all_host_supported_flags == "yes": + test_flags = flags.all_possible_guest_flags + + result = utils_misc.parallel([(encap, [timeout]), + (run_stress, [self.vm, timeout, + test_flags])]) + if not (result[0] and result[1]): + raise error.TestFail("Stress tests failed before" + " end of testing.") + + # 6) migration test + class test_migration_with_additional_flags(Test_temp): + def test(self): + cpu_model, extra_flags = parse_cpu_model() + + flags = HgFlags(cpu_model, extra_flags) + + logging.debug("Cpu mode flags %s.", + str(flags.quest_cpu_model_flags)) + logging.debug("Added flags %s.", + str(flags.cpumodel_unsupport_flags)) + cpuf_model = cpu_model + + # Add unsupported flags. + for fadd in flags.cpumodel_unsupport_flags: + cpuf_model += ",+" + str(fadd) + + for fdel in flags.host_unsupported_flags: + cpuf_model += ",-" + str(fdel) + + (self.vm, _) = start_guest_with_cpuflags(cpuf_model, smp) + + install_path = "/tmp" + install_cpuflags_test_on_vm(self.vm, install_path) + flags = check_cpuflags_work(self.vm, install_path, + flags.guest_flags) + dd_session = self.vm.wait_for_login() + stress_session = self.vm.wait_for_login() + + dd_session.sendline("nohup dd if=/dev/[svh]da of=/tmp/" + "stressblock bs=10MB count=100 &") + cmd = ("nohup %s/cpuflags-test --stress %s%s &" % + (os.path.join(install_path, "test_cpu_flags"), smp, + utils_misc.kvm_flags_to_stresstests(flags[0]))) + stress_session.sendline(cmd) + + time.sleep(5) + + self.vm.monitor.migrate_set_speed(mig_speed) + self.vm.migrate(mig_timeout, mig_protocol, offline=False) + + time.sleep(5) + + #If cpuflags-test hang up during migration test raise exception + try: + stress_session.cmd('killall cpuflags-test') + except aexpect.ShellCmdError: + raise error.TestFail("Cpuflags-test should work after" + " migration.") + + def net_send_object(socket, obj): + """ + Send python object over network. + + @param ip_addr: ipaddres of waiter for data. + @param obj: object to send + """ + data = pickle.dumps(obj, pickle.HIGHEST_PROTOCOL) + socket.sendall("%6d" % len(data)) + socket.sendall(data) + + def net_recv_object(socket, timeout=60): + """ + Receive python object over network. + + @param ip_addr: ipaddres of waiter for data. + @param obj: object to send + @return: object from network + """ + try: + time_start = time.time() + data = "" + d_len = int(socket.recv(6)) + + while (len(data) < d_len and (time.time() - time_start) < timeout): + data += socket.recv(d_len - len(data)) + + data = pickle.loads(data) + return data + except: + error.TestFail("Failed to receive python object over the network") + raise + + class test_multi_host_migration(Test_temp): + def test(self): + """ + Test migration between multiple hosts. + """ + cpu_model, extra_flags = parse_cpu_model() + + flags = HgFlags(cpu_model, extra_flags) + + logging.debug("Cpu mode flags %s.", + str(flags.quest_cpu_model_flags)) + logging.debug("Added flags %s.", + str(flags.cpumodel_unsupport_flags)) + cpuf_model = cpu_model + + for fadd in extra_flags: + cpuf_model += ",+" + str(fadd) + + for fdel in flags.host_unsupported_flags: + cpuf_model += ",-" + str(fdel) + + install_path = "/tmp" + + class testMultihostMigration(utils_test.MultihostMigration): + def __init__(self, test, params, env): + super(testMultihostMigration, self).__init__(test, + params, + env) + + def migration_scenario(self): + srchost = self.params.get("hosts")[0] + dsthost = self.params.get("hosts")[1] + + def worker(mig_data): + vm = env.get_vm("vm1") + session = vm.wait_for_login(timeout=self.login_timeout) + + install_cpuflags_test_on_vm(vm, install_path) + + Flags = check_cpuflags_work(vm, install_path, + flags.all_possible_guest_flags) + logging.info("Woking CPU flags: %s", str(Flags[0])) + logging.info("Not working CPU flags: %s", + str(Flags[1])) + logging.warning("Flags works even if not deffined on" + " guest cpu flags: %s", + str(Flags[0] - flags.guest_flags)) + logging.warning("Not tested CPU flags: %s", + str(Flags[2])) + session.sendline("nohup dd if=/dev/[svh]da of=/tmp/" + "stressblock bs=10MB count=100 &") + + cmd = ("nohup %s/cpuflags-test --stress %s%s &" % + (os.path.join(install_path, "test_cpu_flags"), + smp, + utils_misc.kvm_flags_to_stresstests(Flags[0] & + flags.guest_flags))) + logging.debug("Guest_flags: %s", + str(flags.guest_flags)) + logging.debug("Working_flags: %s", str(Flags[0])) + logging.debug("Start stress on guest: %s", cmd) + session.sendline(cmd) + + def check_worker(mig_data): + vm = env.get_vm("vm1") + + vm.verify_illegal_instruction() + + session = vm.wait_for_login(timeout=self.login_timeout) + + try: + session.cmd('killall cpuflags-test') + except aexpect.ShellCmdError: + raise error.TestFail("The cpuflags-test program" + " should be active after" + " migration and it's not.") + + Flags = check_cpuflags_work(vm, install_path, + flags.all_possible_guest_flags) + logging.info("Woking CPU flags: %s", + str(Flags[0])) + logging.info("Not working CPU flags: %s", + str(Flags[1])) + logging.warning("Flags works even if not deffined on" + " guest cpu flags: %s", + str(Flags[0] - flags.guest_flags)) + logging.warning("Not tested CPU flags: %s", + str(Flags[2])) + + self.migrate_wait(["vm1"], srchost, dsthost, + worker, check_worker) + + params_b = params.copy() + params_b["cpu_model"] = cpu_model + mig = testMultihostMigration(test, params_b, env) + mig.run() + + test_type = params.get("test_type") + if (test_type in locals()): + tests_group = locals()[test_type] + if params.get("cpu_model"): + tests_group() + else: + cpu_models = (set(get_cpu_models_supported_by_host()) - + set(cpu_model_black_list)) + logging.info("Start test with cpu models %s" % (str(cpu_models))) + failed = [] + for cpumodel in cpu_models: + params["cpu_model"] = cpumodel + try: + tests_group() + except: + print_exception(tests_group) + failed.append(cpumodel) + if failed != []: + raise error.TestFail("Test of cpu models %s failed." % + (str(failed))) + else: + raise error.TestFail("Test group '%s' is not defined in" + " cpuflags test" % test_type) diff --git a/kvm/tests/enospc.py b/kvm/tests/enospc.py new file mode 100644 index 00000000..76d61dc6 --- /dev/null +++ b/kvm/tests/enospc.py @@ -0,0 +1,172 @@ +import logging, time, re, os +from autotest.client.shared import error +from autotest.client import utils +from autotest.client.virt import virt_vm, utils_misc, kvm_storage + + +class EnospcConfig(object): + """ + Performs setup for the test enospc. This is a borg class, similar to a + singleton. The idea is to keep state in memory for when we call cleanup() + on postprocessing. + """ + __shared_state = {} + def __init__(self, test, params): + self.__dict__ = self.__shared_state + root_dir = test.bindir + self.tmpdir = test.tmpdir + self.qemu_img_binary = params.get('qemu_img_binary') + if not os.path.isfile(self.qemu_img_binary): + self.qemu_img_binary = os.path.join(root_dir, + self.qemu_img_binary) + self.raw_file_path = os.path.join(self.tmpdir, 'enospc.raw') + # Here we're trying to choose fairly explanatory names so it's less + # likely that we run in conflict with other devices in the system + self.vgtest_name = params.get("vgtest_name") + self.lvtest_name = params.get("lvtest_name") + self.lvtest_device = "/dev/%s/%s" % (self.vgtest_name, self.lvtest_name) + image_dir = os.path.dirname(params.get("image_name")) + self.qcow_file_path = os.path.join(image_dir, 'enospc.qcow2') + try: + getattr(self, 'loopback') + except AttributeError: + self.loopback = '' + + @error.context_aware + def setup(self): + logging.debug("Starting enospc setup") + error.context("performing enospc setup") + utils_misc.display_attributes(self) + # Double check if there aren't any leftovers + self.cleanup() + try: + utils.run("%s create -f raw %s 10G" % + (self.qemu_img_binary, self.raw_file_path)) + # Associate a loopback device with the raw file. + # Subject to race conditions, that's why try here to associate + # it with the raw file as quickly as possible + l_result = utils.run("losetup -f") + utils.run("losetup -f %s" % self.raw_file_path) + self.loopback = l_result.stdout.strip() + # Add the loopback device configured to the list of pvs + # recognized by LVM + utils.run("pvcreate %s" % self.loopback) + utils.run("vgcreate %s %s" % (self.vgtest_name, self.loopback)) + # Create an lv inside the vg with starting size of 200M + utils.run("lvcreate -L 200M -n %s %s" % + (self.lvtest_name, self.vgtest_name)) + # Create a 10GB qcow2 image in the logical volume + utils.run("%s create -f qcow2 %s 10G" % + (self.qemu_img_binary, self.lvtest_device)) + # Let's symlink the logical volume with the image name that autotest + # expects this device to have + os.symlink(self.lvtest_device, self.qcow_file_path) + except Exception: + self.cleanup() + raise + + @error.context_aware + def cleanup(self): + error.context("performing enospc cleanup") + if os.path.isfile(self.lvtest_device): + utils.run("fuser -k %s" % self.lvtest_device) + time.sleep(2) + l_result = utils.run("lvdisplay") + # Let's remove all volumes inside the volume group created + if self.lvtest_name in l_result.stdout: + utils.run("lvremove -f %s" % self.lvtest_device) + # Now, removing the volume group itself + v_result = utils.run("vgdisplay") + if self.vgtest_name in v_result.stdout: + utils.run("vgremove -f %s" % self.vgtest_name) + # Now, if we can, let's remove the physical volume from lvm list + if self.loopback: + p_result = utils.run("pvdisplay") + if self.loopback in p_result.stdout: + utils.run("pvremove -f %s" % self.loopback) + l_result = utils.run('losetup -a') + if self.loopback and (self.loopback in l_result.stdout): + try: + utils.run("losetup -d %s" % self.loopback) + except error.CmdError: + logging.error("Failed to liberate loopback %s", self.loopback) + if os.path.islink(self.qcow_file_path): + os.remove(self.qcow_file_path) + if os.path.isfile(self.raw_file_path): + os.remove(self.raw_file_path) + + +def run_enospc(test, params, env): + """ + ENOSPC test + + 1) Create a virtual disk on lvm + 2) Boot up guest with two disks + 3) Continually write data to second disk + 4) Check images and extend second disk when no space + 5) Continue paused guest + 6) Repeat step 3~5 several times + + @param test: KVM test object. + @param params: Dictionary with the test parameters. + @param env: Dictionary with test environment. + """ + enospc_config = EnospcConfig(test, params) + enospc_config.setup() + vm = env.get_vm(params["main_vm"]) + vm.create() + login_timeout = int(params.get("login_timeout", 360)) + session_serial = vm.wait_for_serial_login(timeout=login_timeout) + + vgtest_name = params.get("vgtest_name") + lvtest_name = params.get("lvtest_name") + logical_volume = "/dev/%s/%s" % (vgtest_name, lvtest_name) + + drive_format = params.get("drive_format") + if drive_format == "virtio": + devname = "/dev/vdb" + elif drive_format == "ide": + output = session_serial.cmd_output("dir /dev") + devname = "/dev/" + re.findall("([sh]db)\s", output)[0] + elif drive_format == "scsi": + devname = "/dev/sdb" + cmd = params.get("background_cmd") + cmd %= devname + logging.info("Sending background cmd '%s'", cmd) + session_serial.sendline(cmd) + + iterations = int(params.get("repeat_time", 40)) + i = 0 + pause_n = 0 + while i < iterations: + if vm.monitor.verify_status("paused"): + pause_n += 1 + logging.info("Checking all images in use by the VM") + for image_name in vm.params.objects("images"): + image_params = vm.params.object_params(image_name) + try: + image = kvm_storage.QemuImg(image_params, test.bindir, + image_name) + image.check_image(image_params, test.bindir) + except (virt_vm.VMError, error.TestWarn), e: + logging.error(e) + logging.info("Guest paused, extending Logical Volume size") + try: + utils.run("lvextend -L +200M %s" % logical_volume) + except error.CmdError, e: + logging.debug(e.result_obj.stdout) + vm.resume() + elif not vm.monitor.verify_status("running"): + status = str(vm.monitor.info("status")) + raise error.TestError("Unexpected guest status: %s" % status) + time.sleep(10) + i += 1 + + if pause_n == 0: + raise error.TestFail("Guest didn't pause during loop") + else: + logging.info("Guest paused %s times from %s iterations", + pause_n, iterations) + + logging.info("Final %s", str(vm.monitor.info("status"))) + enospc_config.cleanup() diff --git a/kvm/tests/floppy.py b/kvm/tests/floppy.py new file mode 100644 index 00000000..730be700 --- /dev/null +++ b/kvm/tests/floppy.py @@ -0,0 +1,71 @@ +import logging, time, os +from autotest.client.shared import error +from autotest.client import utils + + +@error.context_aware +def run_floppy(test, params, env): + """ + Test virtual floppy of guest: + + 1) Create a floppy disk image on host + 2) Start the guest with this floppy image. + 3) Make a file system on guest virtual floppy. + 4) Calculate md5sum value of a file and copy it into floppy. + 5) Verify whether the md5sum does match. + + @param test: KVM test object. + @param params: Dictionary with the test parameters. + @param env: Dictionary with test environment. + """ + def master_floppy(params): + error.context("creating test floppy") + floppy = os.path.abspath(params.get("floppy_name")) + utils.run("dd if=/dev/zero of=%s bs=512 count=2880" % floppy) + + + master_floppy(params) + vm = env.get_vm(params["main_vm"]) + vm.create() + + timeout = int(params.get("login_timeout", 360)) + session = vm.wait_for_login(timeout=timeout) + + dest_dir = params.get("mount_dir") + # If mount_dir specified, treat guest as a Linux OS + # Some Linux distribution does not load floppy at boot and Windows + # needs time to load and init floppy driver + if dest_dir: + session.cmd("modprobe floppy") + else: + time.sleep(20) + + error.context("Formating floppy disk before using it") + format_cmd = params.get("format_floppy_cmd") + session.cmd(format_cmd, timeout=120) + logging.info("Floppy disk formatted successfully") + + source_file = params.get("source_file") + dest_file = params.get("dest_file") + + if dest_dir: + error.context("Mounting floppy") + session.cmd("mount /dev/fd0 %s" % dest_dir) + error.context("Testing floppy") + session.cmd(params.get("test_floppy_cmd")) + + try: + error.context("Copying file to the floppy") + session.cmd("%s %s %s" % (params.get("copy_cmd"), source_file, + dest_file)) + logging.info("Succeed to copy file '%s' into floppy disk" % source_file) + + error.context("Checking if the file is unchanged after copy") + session.cmd("%s %s %s" % (params.get("diff_file_cmd"), source_file, + dest_file)) + finally: + clean_cmd = "%s %s" % (params.get("clean_cmd"), dest_file) + session.cmd(clean_cmd) + if dest_dir: + session.cmd("umount %s" % dest_dir) + session.close() diff --git a/kvm/tests/getfd.py b/kvm/tests/getfd.py new file mode 100644 index 00000000..0f327ee4 --- /dev/null +++ b/kvm/tests/getfd.py @@ -0,0 +1,71 @@ +import os +from autotest.client.shared import error + + +def run_getfd(test, params, env): + """ + Test QEMU's getfd command + + 1) Boot up a guest + 2) Pass file descriptors via getfd + 3) Check if qemu process has a copy of the file descriptor + + @param test: KVM test object. + @param params: Dictionary with the test parameters. + @param env: Dictionary with test environment. + """ + def has_fd(pid, filepath): + """ + Returns true if process has a file descriptor pointing to filepath + + @param pid: the process id + @param filepath: the full path for the file + """ + pathlist = [] + dirname = "/proc/%s/fd" % pid + dirlist = [os.path.join(dirname, f) for f in os.listdir(dirname)] + for f in dirlist: + if os.path.islink(f): + pathlist.append(os.readlink(f)) + + if filepath in pathlist: + return True + else: + return False + + vm = env.get_vm(params["main_vm"]) + vm.verify_alive() + + pid = vm.get_pid() + if pid is None: + raise error.TestError("Fail to get process id for VM") + + # directory for storing temporary files + fdfiles_dir = os.path.join(test.tmpdir, 'fdfiles') + if not os.path.isdir(fdfiles_dir): + os.mkdir(fdfiles_dir) + + # number of files + nofiles = int(params.get("number_of_files", "900")) + for n in range(nofiles): + name = "fdfile-%s" % n + path = os.path.join(fdfiles_dir, name) + fd = os.open(path, os.O_RDWR | os.O_CREAT) + response = vm.monitor.getfd(fd, name) + os.close(fd) + # getfd is supposed to generate no output + if response: + raise error.TestError("getfd returned error: %s" % response) + # check if qemu process has a copy of the fd + if not has_fd(pid, path): + raise error.TestError("QEMU process does not seem to have a file " + "descriptor pointing to file %s" % path) + + # clean up files + for n in range(nofiles): + name = "fdfile-%s" % n + path = os.path.join(fdfiles_dir, name) + try: + os.unlink(path) + except OSError: + pass diff --git a/kvm/tests/hdparm.py b/kvm/tests/hdparm.py new file mode 100644 index 00000000..c7edcd29 --- /dev/null +++ b/kvm/tests/hdparm.py @@ -0,0 +1,89 @@ +import re, logging +from autotest.client.shared import error + + +@error.context_aware +def run_hdparm(test, params, env): + """ + Test hdparm setting on linux guest os. This case will: + 1) Set/record parameters value of hard disk to low performance status. + 2) Perform device/cache read timings then record the results. + 3) Set/record parameters value of hard disk to high performance status. + 4) Perform device/cache read timings then compare two results. + + @param test: KVM test object. + @param params: Dictionary with the test parameters. + @param env: Dictionary with test environment. + """ + def check_setting_result(set_cmd, timeout): + params = re.findall("(-[a-zA-Z])([0-9]*)", set_cmd) + disk = re.findall("(\/+[a-z]*\/[a-z]*$)", set_cmd)[0] + for (param, value) in params: + cmd = "hdparm %s %s" % (param, disk) + (s, output) = session.cmd_status_output(cmd, timeout) + if s != 0: + raise error.TestError("Fail to get %s parameter value. " + "Output is:\n%s" % (param, output.strip())) + if value not in output: + raise error.TestFail("Fail to set %s parameter to value: %s" + % (param, value)) + + + def perform_read_timing(disk, timeout, num=5): + results = 0 + for i in range(num): + cmd = params.get("device_cache_read_cmd") % disk + (s, output) = session.cmd_status_output(cmd, timeout) + if s != 0: + raise error.TestFail("Fail to perform device/cache read" + " timings \nOutput is: %s\n" % output) + logging.info("Output of device/cache read timing check (%s of %s):" + % (i + 1, num)) + for line in output.strip().splitlines(): + logging.info(line) + (result, unit) = re.findall("= *([0-9]*.+[0-9]*) ([a-zA-Z]*)", + output)[1] + if unit == "kB": + result = float(result)/1024.0 + results += float(result) + return results/num + + + vm = env.get_vm(params["main_vm"]) + vm.create() + session = vm.wait_for_login(timeout=int(params.get("login_timeout", 360))) + try: + timeout = float(params.get("cmd_timeout", 60)) + cmd = params.get("get_disk_cmd") + output = session.cmd(cmd) + disk = output.strip() + + error.context("Setting hard disk to lower performance") + cmd = params.get("low_status_cmd") % disk + session.cmd(cmd, timeout) + + error.context("Checking hard disk keyval under lower performance " + "settings") + check_setting_result(cmd, timeout) + low_result = perform_read_timing(disk, timeout) + logging.info("Average buffered disk read speed under low performance " + "settings: %.2f MB/sec" % low_result) + + error.context("Setting hard disk to higher performance") + cmd = params.get("high_status_cmd") % disk + session.cmd(cmd, timeout) + + error.context("Checking hard disk keyval under higher performance " + "settings") + check_setting_result(cmd, timeout) + high_result = perform_read_timing(disk, timeout) + logging.info("Average buffered disk read speed under high performance " + "settings: %.2f MB/sec" % high_result) + + if not float(high_result) > float(low_result): + raise error.TestFail("High performance setting does not " + "increase read speed\n") + + finally: + if session: + session.close() diff --git a/kvm/tests/kernel_install.py b/kvm/tests/kernel_install.py new file mode 100644 index 00000000..b185e263 --- /dev/null +++ b/kvm/tests/kernel_install.py @@ -0,0 +1,200 @@ +import logging, os +from autotest.client.shared import error +from autotest.client import utils +from autotest.client.virt import utils_test +from autotest.client.virt import utils_misc + +CLIENT_TEST = "kernelinstall" + +@error.context_aware +def run_kernel_install(test, params, env): + """ + KVM kernel install test: + 1) Log into a guest + 2) Save current default kernel information + 3) Fetch necessary files for guest kernel installation + 4) Generate contol file for kernelinstall test + 5) Launch kernel installation (kernelinstall) test in guest + 6) Reboot guest after kernel is installed (optional) + 7) Do sub tests in guest with new kernel (optional) + 8) Restore grub and reboot guest (optional) + + @param test: kvm test object + @param params: Dictionary with the test parameters + @param env: Dictionary with test environment. + """ + sub_test_path = os.path.join(test.bindir, "../%s" % CLIENT_TEST) + _tmp_file_list = [] + _tmp_params_dict = {} + + def _copy_file_to_test_dir(file_path): + if utils.is_url(file_path): + return file_path + file_abs_path = os.path.join(test.bindir, file_path) + dest = os.path.join(sub_test_path, os.path.basename(file_abs_path)) + return os.path.basename(utils.get_file(file_path, dest)) + + + def _save_bootloader_config(session): + """ + Save bootloader's config, in most case, it's grub + """ + default_kernel = "" + try: + default_kernel = session.cmd_output("grubby --default-kernel") + except Exception, e: + logging.warn("Save grub config failed: '%s'" % e) + + return default_kernel + + + def _restore_bootloader_config(session, default_kernel): + error.context("Restore the grub to old version") + + if not default_kernel: + logging.warn("Could not get previous grub config, do noting.") + return + + cmd = "grubby --set-default=%s" % default_kernel.strip() + try: + session.cmd(cmd) + except Exception, e: + raise error.TestWarn("Restore grub failed: '%s'" % e) + + + def _clean_up_tmp_files(file_list): + for f in file_list: + try: + os.unlink(f) + except Exception, e: + logging.warn("Could remove tmp file '%s', error message: '%s'", + f, e) + + + def _build_params(param_str, default_value=""): + param = _tmp_params_dict.get(param_str) + if param: + return {param_str: param} + param = params.get(param_str) + if param: + return {param_str: param} + return {param_str: default_value} + + + error.context("Log into a guest") + vm = env.get_vm(params["main_vm"]) + vm.verify_alive() + timeout = float(params.get("login_timeout", 240)) + session = vm.wait_for_login(timeout=timeout) + + error.context("Save current default kernel information") + default_kernel = _save_bootloader_config(session) + + # Check if there is local file in params, move local file to + # client test (kernelinstall) directory. + file_checklist = params.get("file_checklist", "") + for i in file_checklist.split(): + var_list = map(_copy_file_to_test_dir, params.get(i, "").split()) + _tmp_params_dict[i] = " ".join(var_list) + + # Env preparation for test. + install_type = params.get("install_type", "brew") + sub_test_params = {} + # rpm + sub_test_params.update(_build_params("kernel_rpm_path")) + sub_test_params.update(_build_params("kernel_deps_rpms")) + + # koji + if params.get("kernel_koji_tag"): + koji_tag = "kernel_koji_tag" + else: + # Try to get brew tag if not set "kernel_koji_tag" parameter + koji_tag = "brew_tag" + + sub_test_params.update(_build_params(koji_tag)) + sub_test_params.update(_build_params("kernel_dep_pkgs")) + + # git + sub_test_params.update(_build_params('kernel_git_repo')) + sub_test_params.update(_build_params('kernel_git_repo_base')) + sub_test_params.update(_build_params('kernel_git_branch')) + sub_test_params.update(_build_params('kernel_git_commit')) + sub_test_params.update(_build_params('kernel_patch_list')) + sub_test_params.update(_build_params('kernel_config')) + sub_test_params.update(_build_params("kernel_config_list")) + + # src + sub_test_params.update(_build_params("kernel_src_pkg")) + sub_test_params.update(_build_params("kernel_config", "tests_rsc/config")) + sub_test_params.update(_build_params("kernel_patch_list")) + + tag = params.get("kernel_tag") + + error.context("Generate contol file for kernelinstall test") + #Generate control file from parameters + control_base = "params = %s\n" + control_base += "job.run_test('kernelinstall'" + control_base += ", install_type='%s'" % install_type + control_base += ", params=params" + if install_type == "tar" and tag: + control_base += ", tag='%s'" % tag + control_base += ")" + + virt_dir = os.path.dirname(utils_misc.__file__) + test_control_file = "kernel_install.control" + test_control_path = os.path.join(virt_dir, "autotest_control", + test_control_file) + + control_str = control_base % sub_test_params + try: + fd = open(test_control_path, "w") + fd.write(control_str) + fd.close() + _tmp_file_list.append(os.path.abspath(test_control_path)) + except IOError, e: + _clean_up_tmp_files(_tmp_file_list) + raise error.TestError("Fail to Generate control file," + " error message:\n '%s'" % e) + + params["test_control_file_install"] = test_control_file + + error.context("Launch kernel installation test in guest") + utils_test.run_virt_sub_test(test, params, env, sub_type="autotest", + tag="install") + + if params.get("need_reboot", "yes") == "yes": + error.context("Reboot guest after kernel is installed") + session.close() + try: + vm.reboot() + except Exception: + _clean_up_tmp_files(_tmp_file_list) + raise error.TestFail("Could not login guest after install kernel") + + # Run Subtest in guest with new kernel + if params.has_key("sub_test"): + error.context("Run sub test in guest with new kernel") + sub_test = params.get("sub_test") + tag = params.get("sub_test_tag", "run") + try: + utils_test.run_virt_sub_test(test, params, env, + sub_type=sub_test, tag=tag) + except Exception, e: + logging.error("Fail to run sub_test '%s', error message: '%s'", + sub_test, e) + + if params.get("restore_defaut_kernel", "no") == "yes": + # Restore grub + error.context("Restore grub and reboot guest") + try: + session = vm.wait_for_login(timeout=timeout) + _restore_bootloader_config(session, default_kernel) + except Exception, e: + _clean_up_tmp_files(_tmp_file_list) + session.close() + raise error.TestFail("Fail to restore to default kernel," + " error message:\n '%s'" % e) + vm.reboot() + + # Finally, let me clean up the tmp files. + _clean_up_tmp_files(_tmp_file_list) diff --git a/kvm/tests/ksm_overcommit.py b/kvm/tests/ksm_overcommit.py new file mode 100644 index 00000000..4ee80ed5 --- /dev/null +++ b/kvm/tests/ksm_overcommit.py @@ -0,0 +1,615 @@ +import logging, time, random, math, os +from autotest.client.shared import error +from autotest.client import utils +from autotest.client.virt import utils_misc, utils_test, aexpect +from autotest.client.virt import env_process + + +def run_ksm_overcommit(test, params, env): + """ + Test how KSM (Kernel Shared Memory) act when more than physical memory is + used. In second part we also test how KVM handles a situation when the host + runs out of memory (it is expected to pause the guest system, wait until + some process returns memory and bring the guest back to life) + + @param test: kvm test object. + @param params: Dictionary with test parameters. + @param env: Dictionary with the test wnvironment. + """ + + def _start_allocator(vm, session, timeout): + """ + Execute ksm_overcommit_guest.py on a guest, wait until it is initialized. + + @param vm: VM object. + @param session: Remote session to a VM object. + @param timeout: Timeout that will be used to verify if + ksm_overcommit_guest.py started properly. + """ + logging.debug("Starting ksm_overcommit_guest.py on guest %s", vm.name) + session.sendline("python /tmp/ksm_overcommit_guest.py") + try: + session.read_until_last_line_matches(["PASS:", "FAIL:"], timeout) + except aexpect.ExpectProcessTerminatedError, e: + e_msg = ("Command ksm_overcommit_guest.py on vm '%s' failed: %s" % + (vm.name, str(e))) + raise error.TestFail(e_msg) + + + def _execute_allocator(command, vm, session, timeout): + """ + Execute a given command on ksm_overcommit_guest.py main loop, + indicating the vm the command was executed on. + + @param command: Command that will be executed. + @param vm: VM object. + @param session: Remote session to VM object. + @param timeout: Timeout used to verify expected output. + + @return: Tuple (match index, data) + """ + logging.debug("Executing '%s' on ksm_overcommit_guest.py loop, " + "vm: %s, timeout: %s", command, vm.name, timeout) + session.sendline(command) + try: + (match, data) = session.read_until_last_line_matches( + ["PASS:","FAIL:"], + timeout) + except aexpect.ExpectProcessTerminatedError, e: + e_msg = ("Failed to execute command '%s' on " + "ksm_overcommit_guest.py, vm '%s': %s" % + (command, vm.name, str(e))) + raise error.TestFail(e_msg) + return (match, data) + + + def get_ksmstat(): + """ + Return sharing memory by ksm in MB + + @return: memory in MB + """ + f = open('/sys/kernel/mm/ksm/pages_sharing') + ksm_pages = int(f.read()) + f.close() + return ((ksm_pages*4096)/1e6) + + + def initialize_guests(): + """ + Initialize guests (fill their memories with specified patterns). + """ + logging.info("Phase 1: filling guest memory pages") + for session in lsessions: + vm = lvms[lsessions.index(session)] + + logging.debug("Turning off swap on vm %s", vm.name) + session.cmd("swapoff -a", timeout=300) + + # Start the allocator + _start_allocator(vm, session, 60 * perf_ratio) + + # Execute allocator on guests + for i in range(0, vmsc): + vm = lvms[i] + + a_cmd = "mem = MemFill(%d, %s, %s)" % (ksm_size, skeys[i], dkeys[i]) + _execute_allocator(a_cmd, vm, lsessions[i], 60 * perf_ratio) + + a_cmd = "mem.value_fill(%d)" % skeys[0] + _execute_allocator(a_cmd, vm, lsessions[i], 120 * perf_ratio) + + # Let ksm_overcommit_guest.py do its job + # (until shared mem reaches expected value) + shm = 0 + j = 0 + logging.debug("Target shared meminfo for guest %s: %s", vm.name, + ksm_size) + while ((new_ksm and (shm < (ksm_size*(i+1)))) or + (not new_ksm and (shm < (ksm_size)))): + if j > 64: + logging.debug(utils_test.get_memory_info(lvms)) + raise error.TestError("SHM didn't merge the memory until " + "the DL on guest: %s" % vm.name) + st = ksm_size / 200 * perf_ratio + logging.debug("Waiting %ds before proceeding...", st) + time.sleep(st) + if (new_ksm): + shm = get_ksmstat() + else: + shm = vm.get_shared_meminfo() + logging.debug("Shared meminfo for guest %s after " + "iteration %s: %s", vm.name, j, shm) + j += 1 + + # Keep some reserve + rt = ksm_size / 200 * perf_ratio + logging.debug("Waiting %ds before proceeding...", rt) + time.sleep(rt) + + logging.debug(utils_test.get_memory_info(lvms)) + logging.info("Phase 1: PASS") + + + def separate_first_guest(): + """ + Separate memory of the first guest by generating special random series + """ + logging.info("Phase 2: Split the pages on the first guest") + + a_cmd = "mem.static_random_fill()" + data = _execute_allocator(a_cmd, lvms[0], lsessions[0], + 120 * perf_ratio)[1] + + r_msg = data.splitlines()[-1] + logging.debug("Return message of static_random_fill: %s", r_msg) + out = int(r_msg.split()[4]) + logging.debug("Performance: %dMB * 1000 / %dms = %dMB/s", ksm_size, out, + (ksm_size * 1000 / out)) + logging.debug(utils_test.get_memory_info(lvms)) + logging.debug("Phase 2: PASS") + + + def split_guest(): + """ + Sequential split of pages on guests up to memory limit + """ + logging.info("Phase 3a: Sequential split of pages on guests up to " + "memory limit") + last_vm = 0 + session = None + vm = None + for i in range(1, vmsc): + # Check VMs + for j in range(0, vmsc): + if not lvms[j].is_alive: + e_msg = ("VM %d died while executing static_random_fill in " + "VM %d on allocator loop" % (j, i)) + raise error.TestFail(e_msg) + vm = lvms[i] + session = lsessions[i] + a_cmd = "mem.static_random_fill()" + logging.debug("Executing %s on ksm_overcommit_guest.py loop, " + "vm: %s", a_cmd, vm.name) + session.sendline(a_cmd) + + out = "" + try: + logging.debug("Watching host memory while filling vm %s memory", + vm.name) + while not out.startswith("PASS") and not out.startswith("FAIL"): + if not vm.is_alive(): + e_msg = ("VM %d died while executing static_random_fill" + " on allocator loop" % i) + raise error.TestFail(e_msg) + free_mem = int(utils.read_from_meminfo("MemFree")) + if (ksm_swap): + free_mem = (free_mem + + int(utils.read_from_meminfo("SwapFree"))) + logging.debug("Free memory on host: %d", free_mem) + + # We need to keep some memory for python to run. + if (free_mem < 64000) or (ksm_swap and + free_mem < (450000 * perf_ratio)): + vm.pause() + for j in range(0, i): + lvms[j].destroy(gracefully = False) + time.sleep(20) + vm.resume() + logging.debug("Only %s free memory, killing %d guests", + free_mem, (i - 1)) + last_vm = i + break + out = session.read_nonblocking(0.1, 1) + time.sleep(2) + except OSError: + logging.debug("Only %s host free memory, killing %d guests", + free_mem, (i - 1)) + logging.debug("Stopping %s", vm.name) + vm.pause() + for j in range(0, i): + logging.debug("Destroying %s", lvms[j].name) + lvms[j].destroy(gracefully = False) + time.sleep(20) + vm.resume() + last_vm = i + + if last_vm != 0: + break + logging.debug("Memory filled for guest %s", vm.name) + + logging.info("Phase 3a: PASS") + + logging.info("Phase 3b: Check if memory in max loading guest is right") + for i in range(last_vm + 1, vmsc): + lsessions[i].close() + if i == (vmsc - 1): + logging.debug(utils_test.get_memory_info([lvms[i]])) + logging.debug("Destroying guest %s", lvms[i].name) + lvms[i].destroy(gracefully = False) + + # Verify last machine with randomly generated memory + a_cmd = "mem.static_random_verify()" + _execute_allocator(a_cmd, lvms[last_vm], lsessions[last_vm], + (mem / 200 * 50 * perf_ratio)) + logging.debug(utils_test.get_memory_info([lvms[last_vm]])) + + lsessions[i].cmd_output("die()", 20) + lvms[last_vm].destroy(gracefully = False) + logging.info("Phase 3b: PASS") + + + def split_parallel(): + """ + Parallel page spliting + """ + logging.info("Phase 1: parallel page spliting") + # We have to wait until allocator is finished (it waits 5 seconds to + # clean the socket + + session = lsessions[0] + vm = lvms[0] + for i in range(1, max_alloc): + lsessions.append(vm.wait_for_login(timeout=360)) + + session.cmd("swapoff -a", timeout=300) + + for i in range(0, max_alloc): + # Start the allocator + _start_allocator(vm, lsessions[i], 60 * perf_ratio) + + logging.info("Phase 1: PASS") + + logging.info("Phase 2a: Simultaneous merging") + logging.debug("Memory used by allocator on guests = %dMB", + (ksm_size / max_alloc)) + + for i in range(0, max_alloc): + a_cmd = "mem = MemFill(%d, %s, %s)" % ((ksm_size / max_alloc), + skeys[i], dkeys[i]) + _execute_allocator(a_cmd, vm, lsessions[i], 60 * perf_ratio) + + a_cmd = "mem.value_fill(%d)" % (skeys[0]) + _execute_allocator(a_cmd, vm, lsessions[i], 90 * perf_ratio) + + # Wait until ksm_overcommit_guest.py merges the pages (3 * ksm_size / 3) + shm = 0 + i = 0 + logging.debug("Target shared memory size: %s", ksm_size) + while (shm < ksm_size): + if i > 64: + logging.debug(utils_test.get_memory_info(lvms)) + raise error.TestError("SHM didn't merge the memory until DL") + wt = ksm_size / 200 * perf_ratio + logging.debug("Waiting %ds before proceed...", wt) + time.sleep(wt) + if (new_ksm): + shm = get_ksmstat() + else: + shm = vm.get_shared_meminfo() + logging.debug("Shared meminfo after attempt %s: %s", i, shm) + i += 1 + + logging.debug(utils_test.get_memory_info([vm])) + logging.info("Phase 2a: PASS") + + logging.info("Phase 2b: Simultaneous spliting") + # Actual splitting + for i in range(0, max_alloc): + a_cmd = "mem.static_random_fill()" + data = _execute_allocator(a_cmd, vm, lsessions[i], + 90 * perf_ratio)[1] + + data = data.splitlines()[-1] + logging.debug(data) + out = int(data.split()[4]) + logging.debug("Performance: %dMB * 1000 / %dms = %dMB/s", + (ksm_size / max_alloc), out, + (ksm_size * 1000 / out / max_alloc)) + logging.debug(utils_test.get_memory_info([vm])) + logging.info("Phase 2b: PASS") + + logging.info("Phase 2c: Simultaneous verification") + for i in range(0, max_alloc): + a_cmd = "mem.static_random_verify()" + data = _execute_allocator(a_cmd, vm, lsessions[i], + (mem / 200 * 50 * perf_ratio))[1] + logging.info("Phase 2c: PASS") + + logging.info("Phase 2d: Simultaneous merging") + # Actual splitting + for i in range(0, max_alloc): + a_cmd = "mem.value_fill(%d)" % skeys[0] + data = _execute_allocator(a_cmd, vm, lsessions[i], + 120 * perf_ratio)[1] + logging.debug(utils_test.get_memory_info([vm])) + logging.info("Phase 2d: PASS") + + logging.info("Phase 2e: Simultaneous verification") + for i in range(0, max_alloc): + a_cmd = "mem.value_check(%d)" % skeys[0] + data = _execute_allocator(a_cmd, vm, lsessions[i], + (mem / 200 * 50 * perf_ratio))[1] + logging.info("Phase 2e: PASS") + + logging.info("Phase 2f: Simultaneous spliting last 96B") + for i in range(0, max_alloc): + a_cmd = "mem.static_random_fill(96)" + data = _execute_allocator(a_cmd, vm, lsessions[i], + 60 * perf_ratio)[1] + + data = data.splitlines()[-1] + out = int(data.split()[4]) + logging.debug("Performance: %dMB * 1000 / %dms = %dMB/s", + ksm_size/max_alloc, out, + (ksm_size * 1000 / out / max_alloc)) + + logging.debug(utils_test.get_memory_info([vm])) + logging.info("Phase 2f: PASS") + + logging.info("Phase 2g: Simultaneous verification last 96B") + for i in range(0, max_alloc): + a_cmd = "mem.static_random_verify(96)" + _, data = _execute_allocator(a_cmd, vm, lsessions[i], + (mem / 200 * 50 * perf_ratio)) + logging.debug(utils_test.get_memory_info([vm])) + logging.info("Phase 2g: PASS") + + logging.debug("Cleaning up...") + for i in range(0, max_alloc): + lsessions[i].cmd_output("die()", 20) + session.close() + vm.destroy(gracefully = False) + + + # Main test code + logging.info("Starting phase 0: Initialization") + new_ksm = False + if (os.path.exists("/sys/kernel/mm/ksm/run")): + utils.run("echo 50 > /sys/kernel/mm/ksm/sleep_millisecs") + utils.run("echo 5000 > /sys/kernel/mm/ksm/pages_to_scan") + utils.run("echo 1 > /sys/kernel/mm/ksm/run") + + e_up = "/sys/kernel/mm/transparent_hugepage/enabled" + e_rh = "/sys/kernel/mm/redhat_transparent_hugepage/enabled" + if os.path.exists(e_up): + utils.run("echo 'never' > %s" % e_up) + if os.path.exists(e_rh): + utils.run("echo 'never' > %s" % e_rh) + new_ksm = True + else: + try: + utils.run("modprobe ksm") + utils.run("ksmctl start 5000 100") + except error.CmdError, e: + raise error.TestFail("Failed to load KSM: %s" % e) + + # host_reserve: mem reserve kept for the host system to run + host_reserve = int(params.get("ksm_host_reserve", -1)) + if (host_reserve == -1): + # default host_reserve = MemAvailable + one_minimal_guest(128MB) + # later we add 64MB per additional guest + host_reserve = ((utils.memtotal() - utils.read_from_meminfo("MemFree")) + / 1024 + 128) + # using default reserve + _host_reserve = True + else: + _host_reserve = False + + # guest_reserve: mem reserve kept to avoid guest OS to kill processes + guest_reserve = int(params.get("ksm_guest_reserve", -1)) + if (guest_reserve == -1): + # default guest_reserve = minimal_system_mem(256MB) + # later we add tmpfs overhead + guest_reserve = 256 + # using default reserve + _guest_reserve = True + else: + _guest_reserve = False + + max_vms = int(params.get("max_vms", 2)) + overcommit = float(params.get("ksm_overcommit_ratio", 2.0)) + max_alloc = int(params.get("ksm_parallel_ratio", 1)) + + # vmsc: count of all used VMs + vmsc = int(overcommit) + 1 + vmsc = max(vmsc, max_vms) + + if (params['ksm_mode'] == "serial"): + max_alloc = vmsc + if _host_reserve: + # First round of additional guest reserves + host_reserve += vmsc * 64 + _host_reserve = vmsc + + host_mem = (int(utils.memtotal()) / 1024 - host_reserve) + + ksm_swap = False + if params.get("ksm_swap") == "yes": + ksm_swap = True + + # Performance ratio + perf_ratio = params.get("ksm_perf_ratio") + if perf_ratio: + perf_ratio = float(perf_ratio) + else: + perf_ratio = 1 + + if (params['ksm_mode'] == "parallel"): + vmsc = 1 + overcommit = 1 + mem = host_mem + # 32bit system adjustment + if not params['image_name'].endswith("64"): + logging.debug("Probably i386 guest architecture, " + "max allocator mem = 2G") + # Guest can have more than 2G but + # kvm mem + 1MB (allocator itself) can't + if (host_mem > 3100): + mem = 3100 + + if os.popen("uname -i").readline().startswith("i386"): + logging.debug("Host is i386 architecture, max guest mem is 2G") + # Guest system with qemu overhead (64M) can't have more than 2G + if mem > 3100 - 64: + mem = 3100 - 64 + + else: + # mem: Memory of the guest systems. Maximum must be less than + # host's physical ram + mem = int(overcommit * host_mem / vmsc) + + # 32bit system adjustment + if not params['image_name'].endswith("64"): + logging.debug("Probably i386 guest architecture, " + "max allocator mem = 2G") + # Guest can have more than 2G but + # kvm mem + 1MB (allocator itself) can't + if mem - guest_reserve - 1 > 3100: + vmsc = int(math.ceil((host_mem * overcommit) / + (3100 + guest_reserve))) + if _host_reserve: + host_reserve += (vmsc - _host_reserve) * 64 + host_mem -= (vmsc - _host_reserve) * 64 + _host_reserve = vmsc + mem = int(math.floor(host_mem * overcommit / vmsc)) + + if os.popen("uname -i").readline().startswith("i386"): + logging.debug("Host is i386 architecture, max guest mem is 2G") + # Guest system with qemu overhead (64M) can't have more than 2G + if mem > 3100 - 64: + vmsc = int(math.ceil((host_mem * overcommit) / + (3100 - 64.0))) + if _host_reserve: + host_reserve += (vmsc - _host_reserve) * 64 + host_mem -= (vmsc - _host_reserve) * 64 + _host_reserve = vmsc + mem = int(math.floor(host_mem * overcommit / vmsc)) + + # 0.055 represents OS + TMPFS additional reserve per guest ram MB + if _guest_reserve: + guest_reserve += math.ceil(mem * 0.055) + + swap = int(utils.read_from_meminfo("SwapTotal")) / 1024 + + logging.debug("Overcommit = %f", overcommit) + logging.debug("True overcommit = %f ", (float(vmsc * mem) / + float(host_mem))) + logging.debug("Host memory = %dM", host_mem) + logging.debug("Guest memory = %dM", mem) + logging.debug("Using swap = %s", ksm_swap) + logging.debug("Swap = %dM", swap) + logging.debug("max_vms = %d", max_vms) + logging.debug("Count of all used VMs = %d", vmsc) + logging.debug("Performance_ratio = %f", perf_ratio) + + # Generate unique keys for random series + skeys = [] + dkeys = [] + for i in range(0, max(vmsc, max_alloc)): + key = random.randrange(0, 255) + while key in skeys: + key = random.randrange(0, 255) + skeys.append(key) + + key = random.randrange(0, 999) + while key in dkeys: + key = random.randrange(0, 999) + dkeys.append(key) + + logging.debug("skeys: %s", skeys) + logging.debug("dkeys: %s", dkeys) + + lvms = [] + lsessions = [] + + # As we don't know the number and memory amount of VMs in advance, + # we need to specify and create them here + vm_name = params.get("main_vm") + params['mem'] = mem + params['vms'] = vm_name + # Associate pidfile name + params['pid_' + vm_name] = utils_misc.generate_tmp_file_name(vm_name, + 'pid') + if not params.get('extra_params'): + params['extra_params'] = ' ' + params['extra_params_' + vm_name] = params.get('extra_params') + params['extra_params_' + vm_name] += (" -pidfile %s" % + (params.get('pid_' + vm_name))) + params['extra_params'] = params.get('extra_params_'+vm_name) + + # ksm_size: amount of memory used by allocator + ksm_size = mem - guest_reserve + logging.debug("Memory used by allocator on guests = %dM", ksm_size) + + # Creating the first guest + env_process.preprocess_vm(test, params, env, vm_name) + lvms.append(env.get_vm(vm_name)) + if not lvms[0]: + raise error.TestError("VM object not found in environment") + if not lvms[0].is_alive(): + raise error.TestError("VM seems to be dead; Test requires a living " + "VM") + + logging.debug("Booting first guest %s", lvms[0].name) + + lsessions.append(lvms[0].wait_for_login(timeout=360)) + # Associate vm PID + try: + tmp = open(params.get('pid_' + vm_name), 'r') + params['pid_' + vm_name] = int(tmp.readline()) + except Exception: + raise error.TestFail("Could not get PID of %s" % (vm_name)) + + # Creating other guest systems + for i in range(1, vmsc): + vm_name = "vm" + str(i + 1) + params['pid_' + vm_name] = utils_misc.generate_tmp_file_name(vm_name, + 'pid') + params['extra_params_' + vm_name] = params.get('extra_params') + params['extra_params_' + vm_name] += (" -pidfile %s" % + (params.get('pid_' + vm_name))) + params['extra_params'] = params.get('extra_params_' + vm_name) + + # Last VM is later used to run more allocators simultaneously + lvms.append(lvms[0].clone(vm_name, params)) + env.register_vm(vm_name, lvms[i]) + params['vms'] += " " + vm_name + + logging.debug("Booting guest %s", lvms[i].name) + lvms[i].create() + if not lvms[i].is_alive(): + raise error.TestError("VM %s seems to be dead; Test requires a" + "living VM" % lvms[i].name) + + lsessions.append(lvms[i].wait_for_login(timeout=360)) + try: + tmp = open(params.get('pid_' + vm_name), 'r') + params['pid_' + vm_name] = int(tmp.readline()) + except Exception: + raise error.TestFail("Could not get PID of %s" % (vm_name)) + + # Let guests rest a little bit :-) + st = vmsc * 2 * perf_ratio + logging.debug("Waiting %ds before proceed", st) + time.sleep(vmsc * 2 * perf_ratio) + logging.debug(utils_test.get_memory_info(lvms)) + + # Copy ksm_overcommit_guest.py into guests + virt_dir = os.path.join(os.environ['AUTODIR'], 'virt') + vksmd_src = os.path.join(virt_dir, "scripts", "ksm_overcommit_guest.py") + dst_dir = "/tmp" + for vm in lvms: + vm.copy_files_to(vksmd_src, dst_dir) + logging.info("Phase 0: PASS") + + if params['ksm_mode'] == "parallel": + logging.info("Starting KSM test parallel mode") + split_parallel() + logging.info("KSM test parallel mode: PASS") + elif params['ksm_mode'] == "serial": + logging.info("Starting KSM test serial mode") + initialize_guests() + separate_first_guest() + split_guest() + logging.info("KSM test serial mode: PASS") diff --git a/kvm/tests/migration.py b/kvm/tests/migration.py new file mode 100644 index 00000000..f164727f --- /dev/null +++ b/kvm/tests/migration.py @@ -0,0 +1,117 @@ +import logging, time, types +from autotest.client.shared import error +from autotest.client.virt import utils_misc + + +def run_migration(test, params, env): + """ + KVM migration test: + 1) Get a live VM and clone it. + 2) Verify that the source VM supports migration. If it does, proceed with + the test. + 3) Send a migration command to the source VM and wait until it's finished. + 4) Kill off the source VM. + 3) Log into the destination VM after the migration is finished. + 4) Compare the output of a reference command executed on the source with + the output of the same command on the destination machine. + + @param test: kvm test object. + @param params: Dictionary with test parameters. + @param env: Dictionary with the test environment. + """ + def get_functions(func_names, locals_dict): + """ + Find sub function(s) in this function with the given name(s). + """ + if not func_names: + return [] + funcs = [] + for f in func_names.split(): + f = locals_dict.get(f) + if isinstance(f, types.FunctionType): + funcs.append(f) + return funcs + + def mig_set_speed(): + mig_speed = params.get("mig_speed", "1G") + return vm.monitor.migrate_set_speed(mig_speed) + + mig_timeout = float(params.get("mig_timeout", "3600")) + mig_protocol = params.get("migration_protocol", "tcp") + mig_cancel_delay = int(params.get("mig_cancel") == "yes") * 2 + offline = params.get("offline", "no") == "yes" + check = params.get("vmstate_check", "no") == "yes" + living_guest_os = params.get("migration_living_guest", "yes") == "yes" + + vm = env.get_vm(params["main_vm"]) + vm.verify_alive() + + if living_guest_os: + timeout = int(params.get("login_timeout", 360)) + session = vm.wait_for_login(timeout=timeout) + + # Get the output of migration_test_command + test_command = params.get("migration_test_command") + reference_output = session.cmd_output(test_command) + + # Start some process in the background (and leave the session open) + background_command = params.get("migration_bg_command", "") + session.sendline(background_command) + time.sleep(5) + + # Start another session with the guest and make sure the background + # process is running + session2 = vm.wait_for_login(timeout=timeout) + + try: + check_command = params.get("migration_bg_check_command", "") + session2.cmd(check_command, timeout=30) + session2.close() + + # run some functions before migrate start. + pre_migrate = get_functions(params.get("pre_migrate"), locals()) + for func in pre_migrate: + func() + + # Migrate the VM + vm.migrate(mig_timeout, mig_protocol, mig_cancel_delay, offline, + check) + + # run some functions after migrate finish. + post_migrate = get_functions(params.get("post_migrate"), locals()) + for func in post_migrate: + func() + + # Log into the guest again + logging.info("Logging into guest after migration...") + session2 = vm.wait_for_login(timeout=30) + logging.info("Logged in after migration") + + # Make sure the background process is still running + session2.cmd(check_command, timeout=30) + + # Get the output of migration_test_command + output = session2.cmd_output(test_command) + + # Compare output to reference output + if output != reference_output: + logging.info("Command output before migration differs from " + "command output after migration") + logging.info("Command: %s", test_command) + logging.info("Output before:" + + utils_misc.format_str_for_message(reference_output)) + logging.info("Output after:" + + utils_misc.format_str_for_message(output)) + raise error.TestFail("Command '%s' produced different output " + "before and after migration" % test_command) + + finally: + # Kill the background process + if session2 and session2.is_alive(): + session2.cmd_output(params.get("migration_bg_kill_command", "")) + + session2.close() + session.close() + else: + # Just migrate without depending on a living guest OS + vm.migrate(mig_timeout, mig_protocol, mig_cancel_delay, offline, check) diff --git a/kvm/tests/migration_multi_host.py b/kvm/tests/migration_multi_host.py new file mode 100644 index 00000000..d0767b41 --- /dev/null +++ b/kvm/tests/migration_multi_host.py @@ -0,0 +1,27 @@ +from autotest.client.virt import utils_test + + +def run_migration_multi_host(test, params, env): + """ + KVM multi-host migration test: + + Migration execution progress is described in documentation + for migrate method in class MultihostMigration. + + @param test: kvm test object. + @param params: Dictionary with test parameters. + @param env: Dictionary with the test environment. + """ + class TestMultihostMigration(utils_test.MultihostMigration): + def __init__(self, test, params, env): + super(TestMultihostMigration, self).__init__(test, params, env) + + def migration_scenario(self): + srchost = self.params.get("hosts")[0] + dsthost = self.params.get("hosts")[1] + vms = params.get("vms").split() + + self.migrate_wait(vms, srchost, dsthost) + + mig = TestMultihostMigration(test, params, env) + mig.run() diff --git a/kvm/tests/migration_multi_host_fd.py b/kvm/tests/migration_multi_host_fd.py new file mode 100644 index 00000000..7bfc8283 --- /dev/null +++ b/kvm/tests/migration_multi_host_fd.py @@ -0,0 +1,124 @@ +import logging, socket, time, errno, os, fcntl +from autotest.client.virt import utils_test, utils_misc +from autotest.client.shared.syncdata import SyncData + +def run_migration_multi_host_fd(test, params, env): + """ + KVM multi-host migration over fd test: + + Migrate machine over socket's fd. Migration execution progress is + described in documentation for migrate method in class MultihostMigration. + This test allows migrate only one machine at once. + + @param test: kvm test object. + @param params: Dictionary with test parameters. + @param env: Dictionary with the test environment. + """ + class TestMultihostMigrationFd(utils_test.MultihostMigration): + def __init__(self, test, params, env): + super(TestMultihostMigrationFd, self).__init__(test, params, env) + + def migrate_vms_src(self, mig_data): + """ + Migrate vms source. + + @param mig_Data: Data for migration. + + For change way how machine migrates is necessary + re implement this method. + """ + logging.info("Start migrating now...") + vm = mig_data.vms[0] + vm.migrate(dest_host=mig_data.dst, + protocol="fd", + fd_src=mig_data.params['migration_fd']) + + def _check_vms_source(self, mig_data): + for vm in mig_data.vms: + vm.wait_for_login(timeout=self.login_timeout) + self._hosts_barrier(mig_data.hosts, mig_data.mig_id, + 'prepare_VMS', 60) + + def _check_vms_dest(self, mig_data): + self._hosts_barrier(mig_data.hosts, mig_data.mig_id, + 'prepare_VMS', 120) + os.close(mig_data.params['migration_fd']) + + def _connect_to_server(self, host, port, timeout=60): + """ + Connect to network server. + """ + endtime = time.time() + timeout + sock = None + while endtime > time.time(): + sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) + try: + sock.connect((host, port)) + break + except socket.error, err: + (code, _) = err + if (code != errno.ECONNREFUSED): + raise + time.sleep(1) + + return sock + + def _create_server(self, port, timeout=60): + """ + Create network server. + """ + sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) + sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) + sock.settimeout(timeout) + sock.bind(('', port)) + sock.listen(1) + return sock + + def migration_scenario(self): + srchost = self.params.get("hosts")[0] + dsthost = self.params.get("hosts")[1] + mig_port = None + + if params.get("hostid") == self.master_id(): + mig_port = utils_misc.find_free_port(5200, 6000) + + sync = SyncData(self.master_id(), self.hostid, + self.params.get("hosts"), + {'src': srchost, 'dst': dsthost, + 'port': "ports"}, self.sync_server) + mig_port = sync.sync(mig_port, timeout=120) + mig_port = mig_port[srchost] + logging.debug("Migration port %d" % (mig_port)) + + if params.get("hostid") != self.master_id(): + s = self._connect_to_server(srchost, mig_port) + try: + fd = s.fileno() + logging.debug("File descrtiptor %d used for" + " migration." % (fd)) + + self.migrate_wait(["vm1"], srchost, dsthost, mig_mode="fd", + params_append={"migration_fd": fd}) + finally: + s.close() + else: + s = self._create_server(mig_port) + try: + conn, _ = s.accept() + fd = conn.fileno() + logging.debug("File descrtiptor %d used for" + " migration." % (fd)) + + #Prohibits descriptor inheritance. + flags = fcntl.fcntl(fd, fcntl.F_GETFD) + flags |= fcntl.FD_CLOEXEC + fcntl.fcntl(fd, fcntl.F_SETFD, flags) + + self.migrate_wait(["vm1"], srchost, dsthost, mig_mode="fd", + params_append={"migration_fd": fd}) + conn.close() + finally: + s.close() + + mig = TestMultihostMigrationFd(test, params, env) + mig.run() diff --git a/kvm/tests/migration_multi_host_with_file_transfer.py b/kvm/tests/migration_multi_host_with_file_transfer.py new file mode 100644 index 00000000..29ffb719 --- /dev/null +++ b/kvm/tests/migration_multi_host_with_file_transfer.py @@ -0,0 +1,243 @@ +import logging, threading +from autotest.client import utils as client_utils +from autotest.client.shared import utils, error +from autotest.client.shared.syncdata import SyncData +from autotest.client.virt import env_process, utils_test, remote +from autotest.client.virt import utils_misc + + +@error.context_aware +def run_migration_multi_host_with_file_transfer(test, params, env): + """ + KVM multi-host migration test: + + Migration execution progress is described in documentation + for migrate method in class MultihostMigration. + + This test starts vm on master host. When vm is started then it starts file + transfer between vm and master host: + work: migration: + host1->vm mig_1(host1->host2) + vm->host1 + checksum file + host1->vm + vm->host1 mig_2(host2<-host1) + checksum file + host1->vm + vm->host1 + checksum file mig_3(host1<-host2) + ... ... + ... ... + ... ... + host1->vm ... + vm->host1 ... + checksum file mig_migrate_count(host2<-host1) + + end: + check all checksum with orig_file checksum + + @param test: Kvm test object. + @param params: Dictionary with test parameters. + @param env: Dictionary with the test environment. + @param cfg: + file_size: Size of generated file. + transfer_timeout: Timeout for file transfer. + transfer_speed: File transfer speed limit. + guest_path: Path where file is stored on guest. + """ + guest_root = params.get("guest_root", "root") + guest_pass = params.get("password", "123456") + + shell_client = params.get("shell_client", "ssh") + shell_port = int(params.get("shell_port", "22")) + shell_prompt = params.get("shell_prompt") + + #Path where file is stored on guest. + guest_path = params.get("guest_path", "/tmp/file") + #Path where file is generated. + host_path = "/tmp/file-%s" % utils_misc.generate_random_string(6) + #Path on host for file copied from vm. + host_path_returned = "%s-returned" % host_path + file_size = params.get("file_size", "500") + transfer_timeout = int(params.get("transfer_timeout", "240")) + transfer_speed = int(params.get("transfer_speed", "100")) * 1000 + d_transfer_timeout = 2 * transfer_timeout + + #Count of migration during file transfer. + migrate_count = int(params.get("migrate_count", "3")) + + class TestMultihostMigration(utils_test.MultihostMigration): + def __init__(self, test, params, env): + super(TestMultihostMigration, self).__init__(test, params, env) + self.vm = None + self.vm_addr = None + self.srchost = self.params.get("hosts")[0] + self.dsthost = self.params.get("hosts")[1] + self.slave = self.dsthost + self.id = {'src': self.srchost, + 'dst': self.dsthost, + "type": "file_trasfer"} + self.file_check_sums = [] + + def check_vms(self, mig_data): + """ + Check vms after migrate. + + @param mig_data: object with migration data. + """ + for vm in mig_data.vms: + if not utils_test.guest_active(vm): + raise error.TestFail("Guest not active after migration") + + logging.info("Migrated guest appears to be running") + + logging.info("Logging into migrated guest after migration...") + for vm in mig_data.vms: + vm.wait_for_login(timeout=self.login_timeout) + + def _prepare_vm(self, vm_name): + """ + Prepare, start vm and return vm. + + @param vm_name: Class with data necessary for migration. + + @return: Started VM. + """ + new_params = self.params.copy() + + new_params['migration_mode'] = None + new_params['start_vm'] = 'yes' + self.vm_lock.acquire() + env_process.process(self.test, new_params, self.env, + env_process.preprocess_image, + env_process.preprocess_vm) + self.vm_lock.release() + vm = self.env.get_vm(vm_name) + vm.wait_for_login(timeout=self.login_timeout) + return vm + + def _copy_until_end(self, end_event): + #Copy until migration not end. + while not end_event.isSet(): + logging.info("Copy file to guest %s.", self.vm_addr) + remote.copy_files_to(self.vm_addr, "scp", guest_root, + guest_pass, 22, host_path, + guest_path, limit=transfer_speed, + verbose=True, + timeout=transfer_timeout) + logging.info("Copy file to guests %s done.", self.vm_addr) + + logging.info("Copy file from guest %s.", self.vm_addr) + remote.copy_files_from(self.vm_addr, "scp", guest_root, + guest_pass, 22, guest_path, + host_path_returned, + limit=transfer_speed, verbose=True, + timeout=transfer_timeout) + logging.info("Copy file from guests %s done.", self.vm_addr) + check_sum = client_utils.hash_file(host_path_returned) + #store checksum for later check. + self.file_check_sums.append(check_sum) + + def _run_and_migrate(self, bg, end_event, sync, migrate_count): + bg.start() + try: + while bg.isAlive(): + logging.info("File transfer not ended, starting" + " a round of migration...") + sync.sync(True, timeout=d_transfer_timeout) + self.migrate_wait([self.vm], + self.srchost, + self.dsthost) + tmp = self.dsthost + self.dsthost = self.srchost + self.srchost = tmp + migrate_count -= 1 + if (migrate_count <= 0): + end_event.set() + bg.join() + + sync.sync(False, timeout=d_transfer_timeout) + except Exception: + # If something bad happened in the main thread, ignore + # exceptions raised in the background thread + bg.join(suppress_exception=True) + raise + else: + bg.join() + + def _slave_migrate(self, sync): + while True: + done = sync.sync(timeout=d_transfer_timeout)[self.master_id()] + if not done: + break + logging.info("File transfer not ended, starting" + " a round of migration...") + self.migrate_wait([self.vm], + self.srchost, + self.dsthost) + + tmp = self.dsthost + self.dsthost = self.srchost + self.srchost = tmp + + def migration_scenario(self): + sync = SyncData(self.master_id(), self.hostid, self.hosts, + self.id, self.sync_server) + self.vm = params.get("vms").split()[0] + address_cache = env.get("address_cache") + + if (self.hostid == self.master_id()): + utils.run("dd if=/dev/urandom of=%s bs=1M" + " count=%s" % (host_path, file_size)) + + self.vm_addr = self._prepare_vm(self.vm).get_address() + + end_event = threading.Event() + bg = utils.InterruptedThread(self._copy_until_end, + (end_event,)) + + self._hosts_barrier(self.hosts, self.id, "befor_mig", 120) + sync.sync(address_cache, timeout=120) + error.context("ping-pong between host and guest while" + " migrating", logging.info) + self._run_and_migrate(bg, end_event, sync, migrate_count) + + # Check if guest lives. + remote.wait_for_login(shell_client, self.vm_addr, + shell_port, guest_root, + guest_pass, shell_prompt) + self._hosts_barrier(self.hosts, self.id, "After_check", 120) + + error.context("comparing hashes", logging.info) + orig_hash = client_utils.hash_file(host_path) + returned_hash = client_utils.hash_file(host_path_returned) + + #Check all check sum + wrong_check_sum = False + for i in range(len(self.file_check_sums)): + check_sum = self.file_check_sums[i] + if check_sum != orig_hash: + wrong_check_sum = True + logging.error("Checksum in transfer number" + " %d if wrong." % (i)) + if wrong_check_sum: + raise error.TestFail("Returned file hash (%s) differs from" + " original one (%s)" % (returned_hash, + orig_hash)) + else: + #clean temp + utils.run("rm -rf %s" % (host_path)) + utils.run("rm -rf %s" % (returned_hash)) + + error.context() + else: + self._hosts_barrier(self.hosts, self.id, "befor_mig", 260) + address_cache.update(sync.sync(timeout=120)[self.master_id()]) + logging.debug("Address cache updated to %s" % address_cache) + self._slave_migrate(sync) + + #Wait for check if guest lives. + self._hosts_barrier(self.hosts, self.id, "After_check", 120) + + mig = TestMultihostMigration(test, params, env) + mig.run() diff --git a/kvm/tests/migration_multi_host_with_speed_measurement.py b/kvm/tests/migration_multi_host_with_speed_measurement.py new file mode 100644 index 00000000..4a1f4dd6 --- /dev/null +++ b/kvm/tests/migration_multi_host_with_speed_measurement.py @@ -0,0 +1,197 @@ +import os, re, logging, time, socket +from autotest.client.shared import error, utils +from autotest.client.shared.barrier import listen_server +from autotest.client.shared.syncdata import SyncData +from autotest.client.virt import utils_test, utils_misc + + +def run_migration_multi_host_with_speed_measurement(test, params, env): + """ + KVM migration test: + 1) Get a live VM and clone it. + 2) Verify that the source VM supports migration. If it does, proceed with + the test. + 3) Start memory load in vm. + 4) Set defined migration speed. + 5) Send a migration command to the source VM and collecting statistic + of migration speed. + !) Checks that migration utilisation didn't slow down in guest stresser + which would lead to less page-changes than required for this test. + (migration speed is set too high for current CPU) + 6) Kill both VMs. + 7) Print statistic of migration. + + @param test: kvm test object. + @param params: Dictionary with test parameters. + @param env: Dictionary with the test environment. + """ + install_path = params.get("cpuflags_install_path", "/tmp") + + vm_mem = int(params.get("mem", "512")) + + get_mig_speed = re.compile("^transferred ram: (\d+) kbytes$", + re.MULTILINE) + + mig_speed = params.get("mig_speed", "1G") + mig_speed_accuracy = float(params.get("mig_speed_accuracy", "0.2")) + + def get_migration_statistic(vm): + last_transfer_mem = 0 + transfered_mem = 0 + mig_stat = utils.Statistic() + for _ in range(30): + o = vm.monitor.info("migrate") + warning_msg = ("Migration already ended. Migration speed is" + " probably too high and will block vm while" + " filling its memory.") + fail_msg = ("Could not determine the transferred memory from" + " monitor data: %s" % o) + if isinstance(o, str): + if not "status: active" in o: + raise error.TestWarn(warning_msg) + try: + transfered_mem = int(get_mig_speed.search(o).groups()[0]) + except (IndexError, ValueError): + raise error.TestFail(fail_msg) + else: + if o.get("status") != "active": + raise error.TestWarn(warning_msg) + try: + transfered_mem = o.get("ram").get("transferred") / (1024) + except (IndexError, ValueError): + raise error.TestFail(fail_msg) + + real_mig_speed = (transfered_mem - last_transfer_mem) / 1024 + + last_transfer_mem = transfered_mem + + logging.debug("Migration speed: %s MB/s" % (real_mig_speed)) + mig_stat.record(real_mig_speed) + time.sleep(1) + + return mig_stat + + class TestMultihostMigration(utils_test.MultihostMigration): + def __init__(self, test, params, env): + super(TestMultihostMigration, self).__init__(test, params, env) + self.mig_stat = None + self.srchost = self.params.get("hosts")[0] + self.dsthost = self.params.get("hosts")[1] + self.id = {'src': self.srchost, + 'dst': self.dsthost, + "type": "speed_measurement"} + self.link_speed = 0 + + def check_vms(self, mig_data): + """ + Check vms after migrate. + + @param mig_data: object with migration data. + """ + pass + + def migrate_vms_src(self, mig_data): + """ + Migrate vms source. + + @param mig_Data: Data for migration. + + For change way how machine migrates is necessary + re implement this method. + """ + vm = mig_data.vms[0] + vm.migrate(dest_host=mig_data.dst, + remote_port=mig_data.vm_ports[vm.name], + not_wait_for_migration=True) + self.mig_stat = get_migration_statistic(vm) + + def migration_scenario(self): + sync = SyncData(self.master_id(), self.hostid, self.hosts, + self.id, self.sync_server) + srchost = self.params.get("hosts")[0] + dsthost = self.params.get("hosts")[1] + vms = [params.get("vms").split()[0]] + + def worker(mig_data): + vm = mig_data.vms[0] + session = vm.wait_for_login(timeout=self.login_timeout) + + utils_misc.install_cpuflags_util_on_vm(test, vm, install_path, + extra_flags="-msse3 -msse2") + + cmd = ("%s/cpuflags-test --stressmem %d" % + (os.path.join(install_path, "test_cpu_flags"), vm_mem / 2)) + logging.debug("Sending command: %s" % (cmd)) + session.sendline(cmd) + + if self.master_id() == self.hostid: + server_port = utils_misc.find_free_port(5200, 6000) + server = listen_server(port=server_port) + data_len = 0 + sync.sync(server_port, timeout=120) + client = server.socket.accept()[0] + endtime = time.time() + 30 + while endtime > time.time(): + data_len += len(client.recv(2048)) + client.close() + server.close() + self.link_speed = data_len / (30 * 1024 * 1024) + logging.info("Link speed %d MB/s" % (self.link_speed)) + ms = utils.convert_data_size(mig_speed, 'M') + if (ms > data_len / 30): + logging.warn("Migration speed %s MB/s is set faster than " + "real link speed %d MB/s" % (mig_speed, + self.link_speed)) + else: + self.link_speed = ms / (1024 * 1024) + else: + data = "" + for _ in range(10000): + data += "i" + server_port = sync.sync(timeout=120)[self.master_id()] + sock = socket.socket(socket.AF_INET, + socket.SOCK_STREAM) + sock.connect((self.master_id(), server_port)) + try: + endtime = time.time() + 10 + while endtime > time.time(): + sock.sendall(data) + sock.close() + except: + pass + self.migrate_wait(vms, srchost, dsthost, worker) + + mig = TestMultihostMigration(test, params, env) + #Start migration + mig.run() + + #If machine is migration master check migration statistic. + if mig.master_id() == mig.hostid: + mig_speed = utils.convert_data_size(mig_speed, "M") + + mig_stat = mig.mig_stat + + mig_speed = mig_speed / (1024 * 1024) + real_speed = mig_stat.get_average() + ack_speed = mig.link_speed * mig_speed_accuracy + + logging.info("Target migration speed: %d MB/s", mig_speed) + logging.info("Real Link speed: %d MB/s", mig.link_speed) + logging.info("Average migration speed: %d MB/s", mig_stat.get_average()) + logging.info("Minimum migration speed: %d MB/s", mig_stat.get_min()) + logging.info("Maximum migration speed: %d MB/s", mig_stat.get_max()) + + logging.info("Maximum tolerable divergence: %3.1f%%", + mig_speed_accuracy*100) + + if real_speed < mig_speed - ack_speed: + divergence = (1 - float(real_speed)/float(mig_speed)) * 100 + raise error.TestWarn("Average migration speed (%s MB/s) " + "is %3.1f%% lower than target (%s MB/s)" % + (real_speed, divergence, mig_speed)) + + if real_speed > mig_speed + ack_speed: + divergence = (1 - float(mig_speed)/float(real_speed)) * 100 + raise error.TestWarn("Average migration speed (%s MB/s) " + "is %3.1f %% higher than target (%s MB/s)" % + (real_speed, divergence, mig_speed)) diff --git a/kvm/tests/migration_with_file_transfer.py b/kvm/tests/migration_with_file_transfer.py new file mode 100644 index 00000000..ff284a47 --- /dev/null +++ b/kvm/tests/migration_with_file_transfer.py @@ -0,0 +1,85 @@ +import logging, os +from autotest.client.shared import utils, error +from autotest.client import utils as client_utils +from autotest.client.virt import utils_misc + + +@error.context_aware +def run_migration_with_file_transfer(test, params, env): + """ + KVM migration test: + 1) Get a live VM and clone it. + 2) Verify that the source VM supports migration. If it does, proceed with + the test. + 3) Transfer file from host to guest. + 4) Repeatedly migrate VM and wait until transfer's finished. + 5) Transfer file from guest back to host. + 6) Repeatedly migrate VM and wait until transfer's finished. + + @param test: kvm test object. + @param params: Dictionary with test parameters. + @param env: Dictionary with the test environment. + """ + vm = env.get_vm(params["main_vm"]) + vm.verify_alive() + login_timeout = int(params.get("login_timeout", 360)) + session = vm.wait_for_login(timeout=login_timeout) + + mig_timeout = float(params.get("mig_timeout", "3600")) + mig_protocol = params.get("migration_protocol", "tcp") + mig_cancel_delay = int(params.get("mig_cancel") == "yes") * 2 + + host_path = "/tmp/file-%s" % utils_misc.generate_random_string(6) + host_path_returned = "%s-returned" % host_path + guest_path = params.get("guest_path", "/tmp/file") + file_size = params.get("file_size", "500") + transfer_timeout = int(params.get("transfer_timeout", "240")) + + try: + utils.run("dd if=/dev/urandom of=%s bs=1M count=%s" % (host_path, + file_size)) + + def run_and_migrate(bg): + bg.start() + try: + while bg.isAlive(): + logging.info("File transfer not ended, starting a round of " + "migration...") + vm.migrate(mig_timeout, mig_protocol, mig_cancel_delay) + except Exception: + # If something bad happened in the main thread, ignore + # exceptions raised in the background thread + bg.join(suppress_exception=True) + raise + else: + bg.join() + + error.context("transferring file to guest while migrating", + logging.info) + bg = utils.InterruptedThread(vm.copy_files_to, (host_path, guest_path), + dict(verbose=True, timeout=transfer_timeout)) + run_and_migrate(bg) + + error.context("transferring file back to host while migrating", + logging.info) + bg = utils.InterruptedThread(vm.copy_files_from, + (guest_path, host_path_returned), + dict(verbose=True, timeout=transfer_timeout)) + run_and_migrate(bg) + + # Make sure the returned file is identical to the original one + error.context("comparing hashes", logging.info) + orig_hash = client_utils.hash_file(host_path) + returned_hash = client_utils.hash_file(host_path_returned) + if orig_hash != returned_hash: + raise error.TestFail("Returned file hash (%s) differs from " + "original one (%s)" % (returned_hash, + orig_hash)) + error.context() + + finally: + session.close() + if os.path.isfile(host_path): + os.remove(host_path) + if os.path.isfile(host_path_returned): + os.remove(host_path_returned) diff --git a/kvm/tests/migration_with_reboot.py b/kvm/tests/migration_with_reboot.py new file mode 100644 index 00000000..040d7d26 --- /dev/null +++ b/kvm/tests/migration_with_reboot.py @@ -0,0 +1,43 @@ +from autotest.client.shared import utils + + +def run_migration_with_reboot(test, params, env): + """ + KVM migration test: + 1) Get a live VM and clone it. + 2) Verify that the source VM supports migration. If it does, proceed with + the test. + 3) Reboot the VM + 4) Send a migration command to the source VM and wait until it's finished. + 5) Kill off the source VM. + 6) Log into the destination VM after the migration is finished. + + @param test: kvm test object. + @param params: Dictionary with test parameters. + @param env: Dictionary with the test environment. + """ + vm = env.get_vm(params["main_vm"]) + vm.verify_alive() + login_timeout = int(params.get("login_timeout", 360)) + session = vm.wait_for_login(timeout=login_timeout) + + mig_timeout = float(params.get("mig_timeout", "3600")) + mig_protocol = params.get("migration_protocol", "tcp") + mig_cancel_delay = int(params.get("mig_cancel") == "yes") * 2 + + try: + # Reboot the VM in the background + bg = utils.InterruptedThread(vm.reboot, (session,)) + bg.start() + try: + while bg.isAlive(): + vm.migrate(mig_timeout, mig_protocol, mig_cancel_delay) + except Exception: + # If something bad happened in the main thread, ignore exceptions + # raised in the background thread + bg.join(suppress_exception=True) + raise + else: + session = bg.join() + finally: + session.close() diff --git a/kvm/tests/migration_with_speed_measurement.py b/kvm/tests/migration_with_speed_measurement.py new file mode 100644 index 00000000..2705dd5e --- /dev/null +++ b/kvm/tests/migration_with_speed_measurement.py @@ -0,0 +1,129 @@ +import os, re, logging, time +from autotest.client.virt import utils_misc +from autotest.client.shared import error, utils + + +def run_migration_with_speed_measurement(test, params, env): + """ + KVM migration test: + 1) Get a live VM and clone it. + 2) Verify that the source VM supports migration. If it does, proceed with + the test. + 3) Start memory load on vm. + 4) Send a migration command to the source VM and collecting statistic + of migration speed. + !) If migration speed is too high migration could be successful and then + test ends with warning. + 5) Kill off both VMs. + 6) Print statistic of migration. + + @param test: kvm test object. + @param params: Dictionary with test parameters. + @param env: Dictionary with the test environment. + """ + vm = env.get_vm(params["main_vm"]) + vm.verify_alive() + login_timeout = int(params.get("login_timeout", 360)) + session = vm.wait_for_login(timeout=login_timeout) + + mig_timeout = float(params.get("mig_timeout", "10")) + mig_protocol = params.get("migration_protocol", "tcp") + + install_path = params.get("cpuflags_install_path", "/tmp") + + vm_mem = int(params.get("mem", "512")) + + get_mig_speed = re.compile("^transferred ram: (\d+) kbytes$", + re.MULTILINE) + + mig_speed = params.get("mig_speed", "1G") + mig_speed_accuracy = float(params.get("mig_speed_accuracy", "0.2")) + clonevm = None + + def get_migration_statistic(vm): + last_transfer_mem = 0 + transfered_mem = 0 + mig_stat = utils.Statistic() + for _ in range(30): + o = vm.monitor.info("migrate") + warning_msg = ("Migration already ended. Migration speed is" + " probably too high and will block vm while" + " filling its memory.") + fail_msg = ("Could not determine the transferred memory from" + " monitor data: %s" % o) + if isinstance(o, str): + if not "status: active" in o: + raise error.TestWarn(warning_msg) + try: + transfered_mem = int(get_mig_speed.search(o).groups()[0]) + except (IndexError, ValueError): + raise error.TestFail(fail_msg) + else: + if o.get("status") != "active": + raise error.TestWarn(warning_msg) + try: + transfered_mem = o.get("ram").get("transferred") / (1024) + except (IndexError, ValueError): + raise error.TestFail(fail_msg) + + real_mig_speed = (transfered_mem - last_transfer_mem) / 1024 + + last_transfer_mem = transfered_mem + + logging.debug("Migration speed: %s MB/s" % (real_mig_speed)) + mig_stat.record(real_mig_speed) + time.sleep(1) + + return mig_stat + + try: + # Reboot the VM in the background + utils_misc.install_cpuflags_util_on_vm(test, vm, install_path, + extra_flags="-msse3 -msse2") + + vm.monitor.migrate_set_speed(mig_speed) + + cmd = ("%s/cpuflags-test --stressmem %d" % + (os.path.join(install_path, "test_cpu_flags"), vm_mem / 2)) + logging.debug("Sending command: %s" % (cmd)) + session.sendline(cmd) + + time.sleep(2) + + clonevm = vm.migrate(mig_timeout, mig_protocol, + not_wait_for_migration=True) + + mig_speed = utils.convert_data_size(mig_speed, "M") + + mig_stat = get_migration_statistic(vm) + + mig_speed = mig_speed / (1024 * 1024) + real_speed = mig_stat.get_average() + ack_speed = mig_speed * mig_speed_accuracy + + logging.info("Target migration speed: %d MB/s.", mig_speed) + logging.info("Average migration speed: %d MB/s", mig_stat.get_average()) + logging.info("Minimum migration speed: %d MB/s", mig_stat.get_min()) + logging.info("Maximum migration speed: %d MB/s", mig_stat.get_max()) + + logging.info("Maximum tolerable divergence: %3.1f%%", + mig_speed_accuracy*100) + + if real_speed < mig_speed - ack_speed: + divergence = (1 - float(real_speed)/float(mig_speed)) * 100 + raise error.TestWarn("Average migration speed (%s MB/s) " + "is %3.1f%% lower than target (%s MB/s)" % + (real_speed, divergence, mig_speed)) + + if real_speed > mig_speed + ack_speed: + divergence = (1 - float(mig_speed)/float(real_speed)) * 100 + raise error.TestWarn("Average migration speed (%s MB/s) " + "is %3.1f %% higher than target (%s MB/s)" % + (real_speed, divergence, mig_speed)) + + finally: + session.close() + if clonevm: + clonevm.destroy(gracefully=False) + if vm: + vm.destroy(gracefully=False) diff --git a/kvm/tests/multi_disk.py b/kvm/tests/multi_disk.py new file mode 100644 index 00000000..41ab42de --- /dev/null +++ b/kvm/tests/multi_disk.py @@ -0,0 +1,292 @@ +""" +multi_disk test for Autotest framework. + +@copyright: 2011-2012 Red Hat Inc. +""" +import logging, re, random, string +from autotest.client.shared import error, utils +from autotest.client.virt import kvm_qtree, env_process + +_RE_RANGE1 = re.compile(r'range\([ ]*([-]?\d+|n).*\)') +_RE_RANGE2 = re.compile(r',[ ]*([-]?\d+|n)') +_RE_BLANKS = re.compile(r'^([ ]*)') + + +@error.context_aware +def _range(buf, n=None): + """ + Converts 'range(..)' string to range. It supports 1-4 args. It supports + 'n' as correct input, which is substituted to return the correct range. + range1-3 ... ordinary python range() + range4 ... multiplies the occurrence of each value + (range(0,4,1,2) => [0,0,1,1,2,2,3,3]) + @raise ValueError: In case incorrect values are given. + @return: List of int values. In case it can't substitute 'n' + it returns the original string. + """ + out = _RE_RANGE1.match(buf) + if not out: + return False + out = [out.groups()[0]] + out.extend(_RE_RANGE2.findall(buf)) + if 'n' in out: + if n is None: + # Don't know what to substitute, return the original + return buf + else: + # Doesn't cover all cases and also it works it's way... + n = int(n) + if out[0] == 'n': + out[0] = int(n) + if len(out) > 1 and out[1] == 'n': + out[1] = int(out[0]) + n + if len(out) > 2 and out[2] == 'n': + out[2] = (int(out[1]) - int(out[0])) / n + if len(out) > 3 and out[3] == 'n': + _len = len(range(int(out[0]), int(out[1]), int(out[2]))) + out[3] = n / _len + if n % _len: + out[3] += 1 + for i in range(len(out)): + out[i] = int(out[i]) + if len(out) == 1: + out = range(out[0]) + elif len(out) == 2: + out = range(out[0], out[1]) + elif len(out) == 3: + out = range(out[0], out[1], out[2]) + elif len(out) == 4: + # arg4 * range + _out = [] + for _ in range(out[0], out[1], out[2]): + _out.extend([_] * out[3]) + out = _out + else: + raise ValueError("More than 4 parameters in _range()") + return out + + +@error.context_aware +def run_multi_disk(test, params, env): + """ + Test multi disk suport of guest, this case will: + 1) Create disks image in configuration file. + 2) Start the guest with those disks. + 3) Checks qtree vs. test params. + 4) Format those disks. + 5) Copy file into / out of those disks. + 6) Compare the original file and the copied file using md5 or fc comand. + 7) Repeat steps 3-5 if needed. + + @param test: kvm test object + @param params: Dictionary with the test parameters + @param env: Dictionary with test environment. + """ + def _add_param(name, value): + """ Converts name+value to stg_params string """ + if value: + value = re.sub(' ', '\\ ', value) + return " %s:%s " % (name, value) + else: + return '' + + stg_image_num = 0 + stg_params = params.get("stg_params", "") + # Compatibility + stg_params += _add_param("image_size", params.get("stg_image_size")) + stg_params += _add_param("image_format", params.get("stg_image_format")) + stg_params += _add_param("image_boot", params.get("stg_image_boot")) + stg_params += _add_param("drive_format", params.get("stg_drive_format")) + if params.get("stg_assign_index") != "no": + # Assume 0 and 1 are already occupied (hd0 and cdrom) + stg_params += _add_param("drive_index", 'range(2,n)') + param_matrix = {} + + stg_params = stg_params.split(' ') + i = 0 + while i < len(stg_params) - 1: + if not stg_params[i].strip(): + i += 1 + continue + if stg_params[i][-1] == '\\': + stg_params[i] = '%s %s' % (stg_params[i][:-1], + stg_params.pop(i + 1)) + i += 1 + + rerange = [] + has_name = False + for i in xrange(len(stg_params)): + if not stg_params[i].strip(): + continue + (cmd, parm) = stg_params[i].split(':', 1) + if cmd == "image_name": + has_name = True + if _RE_RANGE1.match(parm): + parm = _range(parm) + if parm == False: + raise error.TestError("Incorrect cfg: stg_params %s looks " + "like range(..) but doesn't contain " + "numbers." % cmd) + param_matrix[cmd] = parm + if type(parm) is str: + # When we know the stg_image_num, substitute it. + rerange.append(cmd) + continue + else: + # ',' separated list of values + parm = parm.split(',') + j = 0 + while j < len(parm) - 1: + if parm[j][-1] == '\\': + parm[j] = '%s,%s' % (parm[j][:-1], parm.pop(j + 1)) + j += 1 + param_matrix[cmd] = parm + stg_image_num = max(stg_image_num, len(parm)) + + stg_image_num = int(params.get('stg_image_num', stg_image_num)) + for cmd in rerange: + param_matrix[cmd] = _range(param_matrix[cmd], stg_image_num) + # param_table* are for pretty print of param_matrix + param_table = [] + param_table_header = ['name'] + if not has_name: + param_table_header.append('image_name') + for _ in param_matrix: + param_table_header.append(_) + + stg_image_name = params.get('stg_image_name', '%s') + for i in xrange(stg_image_num): + name = "stg%d" % i + params['images'] += " %s" % name + param_table.append([]) + param_table[-1].append(name) + if not has_name: + params["image_name_%s" % name] = stg_image_name % name + param_table[-1].append(params.get("image_name_%s" % name)) + for parm in param_matrix.iteritems(): + params['%s_%s' % (parm[0], name)] = str(parm[1][i % len(parm[1])]) + param_table[-1].append(params.get('%s_%s' % (parm[0], name))) + + if params.get("multi_disk_params_only") == 'yes': + # Only print the test param_matrix and finish + logging.info('Newly added disks:\n%s', + utils.matrix_to_string(param_table, param_table_header)) + return + + # Always recreate VM (disks are already marked for deletion + env_process.preprocess(test, params, env) + vm = env.get_vm(params["main_vm"]) + vm.create(timeout=max(10, stg_image_num)) + session = vm.wait_for_login(timeout=int(params.get("login_timeout", 360))) + + images = params.get("images").split() + n_repeat = int(params.get("n_repeat", "1")) + image_num = len(images) + file_system = params.get("file_system").split() + fs_num = len(file_system) + cmd_timeout = float(params.get("cmd_timeout", 360)) + re_str = params.get("re_str") + black_list = params.get("black_list").split() + + error.context("verifying qtree vs. test params") + err = 0 + qtree = kvm_qtree.QtreeContainer() + qtree.parse_info_qtree(vm.monitor.info('qtree')) + disks = kvm_qtree.QtreeDisksContainer(qtree.get_nodes()) + (tmp1, tmp2) = disks.parse_info_block(vm.monitor.info('block')) + err += tmp1 + tmp2 + err += disks.generate_params() + err += disks.check_disk_params(params, vm.root_dir) + (tmp1, tmp2, _, _) = disks.check_guests_proc_scsi( + session.cmd_output('cat /proc/scsi/scsi')) + err += tmp1 + tmp2 + + if err: + raise error.TestFail("%s errors occurred while verifying qtree vs. " + "params" % err) + if params.get('multi_disk_only_qtree') == 'yes': + return + + try: + if params.get("clean_cmd"): + cmd = params.get("clean_cmd") + session.cmd_status_output(cmd) + if params.get("pre_cmd"): + cmd = params.get("pre_cmd") + error.context("creating partition on test disk") + session.cmd(cmd, timeout=cmd_timeout) + cmd = params.get("list_volume_command") + output = session.cmd_output(cmd, timeout=cmd_timeout) + disks = re.findall(re_str, output) + disks = map(string.strip, disks) + disks.sort() + logging.debug("Volume list that meets regular expressions: '%s'", disks) + if len(disks) < image_num: + raise error.TestFail("Fail to list all the volumes!") + + if params.get("os_type") == "linux": + df_output = session.cmd_output("df") + li = re.findall("^/dev/(.*?)[ \d]", df_output, re.M) + if li: + black_list.extend(li) + + exclude_list = [d for d in disks if d in black_list] + f = lambda d: logging.info("No need to check volume '%s'", d) + map(f, exclude_list) + + disks = [d for d in disks if d not in exclude_list] + + for i in range(n_repeat): + logging.info("iterations: %s", (i + 1)) + for disk in disks: + disk = disk.strip() + + logging.info("Format disk: %s...", disk) + index = random.randint(0, fs_num - 1) + + # Random select one file system from file_system + fs = file_system[index].strip() + cmd = params.get("format_command") % (fs, disk) + error.context("formatting test disk") + session.cmd(cmd, timeout=cmd_timeout) + if params.get("mount_command"): + cmd = params.get("mount_command") % (disk, disk, disk) + session.cmd(cmd, timeout=cmd_timeout) + + for disk in disks: + disk = disk.strip() + + logging.info("Performing I/O on disk: %s...", disk) + cmd_list = params.get("cmd_list").split() + for cmd_l in cmd_list: + if params.get(cmd_l): + cmd = params.get(cmd_l) % disk + session.cmd(cmd, timeout=cmd_timeout) + + cmd = params.get("compare_command") + output = session.cmd_output(cmd) + key_word = params.get("check_result_key_word") + if key_word and key_word in output: + logging.debug("Guest's virtual disk %s works fine", disk) + elif key_word: + raise error.TestFail("Files on guest os root fs and disk " + "differ") + else: + raise error.TestError("Param check_result_key_word was not " + "specified! Please check your config") + + if params.get("umount_command"): + cmd = params.get("show_mount_cmd") + output = session.cmd_output(cmd) + disks = re.findall(re_str, output) + disks.sort() + for disk in disks: + disk = disk.strip() + cmd = params.get("umount_command") % (disk, disk) + error.context("unmounting test disk") + session.cmd(cmd) + finally: + if params.get("post_cmd"): + cmd = params.get("post_cmd") + session.cmd(cmd) + session.close() diff --git a/kvm/tests/nic_bonding.py b/kvm/tests/nic_bonding.py new file mode 100644 index 00000000..995ec8ce --- /dev/null +++ b/kvm/tests/nic_bonding.py @@ -0,0 +1,77 @@ +import logging, time +from autotest.client.virt import utils_test, aexpect +from autotest.client.shared import error, utils + + +def run_nic_bonding(test, params, env): + """ + Nic bonding test in guest. + + 1) Start guest with four nic models. + 2) Setup bond0 in guest by script nic_bonding_guest.py. + 3) Execute file transfer test between guest and host. + 4) Repeatedly put down/up interfaces by set_link + 5) Execute file transfer test between guest and host. + + @param test: Kvm test object. + @param params: Dictionary with the test parameters. + @param env: Dictionary with test environment. + """ + timeout = int(params.get("login_timeout", 1200)) + vm = env.get_vm(params["main_vm"]) + vm.verify_alive() + session_serial = vm.wait_for_serial_login(timeout=timeout) + + # get params of bonding + modprobe_cmd = "modprobe bonding" + bonding_params = params.get("bonding_params") + if bonding_params: + modprobe_cmd += " %s" % bonding_params + session_serial.cmd(modprobe_cmd) + + session_serial.cmd("ifconfig bond0 up") + ifnames = [utils_test.get_linux_ifname(session_serial, + vm.get_mac_address(vlan)) + for vlan, nic in enumerate(vm.virtnet)] + setup_cmd = "ifenslave bond0 " + " ".join(ifnames) + session_serial.cmd(setup_cmd) + #do a pgrep to check if dhclient has already been running + pgrep_cmd = "pgrep dhclient" + try: + session_serial.cmd(pgrep_cmd) + #if dhclient is there, killl it + except aexpect.ShellCmdError: + logging.info("it's safe to run dhclient now") + else: + logging.info("dhclient already is running,kill it") + session_serial.cmd("killall -9 dhclient") + time.sleep(1) + session_serial.cmd("dhclient bond0") + + try: + logging.info("Test file transfering:") + utils_test.run_file_transfer(test, params, env) + + logging.info("Failover test with file transfer") + transfer_thread = utils.InterruptedThread( + utils_test.run_file_transfer, + (test, params, env)) + try: + transfer_thread.start() + while transfer_thread.isAlive(): + for vlan, nic in enumerate(vm.virtnet): + device_id = vm.get_peer(vm.netdev_id[vlan]) + if not device_id: + raise error.TestError("Could not find peer device for" + " nic device %s" % nic) + vm.set_link(device_id, up=False) + time.sleep(1) + vm.set_link(device_id, up=True) + except Exception: + transfer_thread.join(suppress_exception=True) + raise + else: + transfer_thread.join() + finally: + session_serial.sendline("ifenslave -d bond0 " + " ".join(ifnames)) + session_serial.sendline("kill -9 `pgrep dhclient`") diff --git a/kvm/tests/nic_hotplug.py b/kvm/tests/nic_hotplug.py new file mode 100644 index 00000000..96342119 --- /dev/null +++ b/kvm/tests/nic_hotplug.py @@ -0,0 +1,105 @@ +import logging +from autotest.client.shared import error +from autotest.client.virt import utils_test, virt_vm, aexpect + + +def run_nic_hotplug(test, params, env): + """ + Test hotplug of NIC devices + + 1) Boot up guest with one nic + 2) Add a host network device through monitor cmd and check if it's added + 3) Add nic device through monitor cmd and check if it's added + 4) Check if new interface gets ip address + 5) Disable primary link of guest + 6) Ping guest new ip from host + 7) Delete nic device and netdev + 8) Re-enable primary link of guest + + BEWARE OF THE NETWORK BRIDGE DEVICE USED FOR THIS TEST ("nettype=bridge" + and "netdst=" param). The KVM autotest default bridge virbr0, + leveraging libvirt, works fine for the purpose of this test. When using + other bridges, the timeouts which usually happen when the bridge + topology changes (that is, devices get added and removed) may cause random + failures. + + @param test: KVM test object. + @param params: Dictionary with the test parameters. + @param env: Dictionary with test environment. + """ + vm = utils_test.get_living_vm(env, params.get("main_vm")) + login_timeout = int(params.get("login_timeout", 360)) + pci_model = params.get("pci_model", "rtl8139") + run_dhclient = params.get("run_dhclient", "no") + guest_is_not_windows = "Win" not in params.get("guest_name", "") + + session = utils_test.wait_for_login(vm, timeout=login_timeout) + + udev_rules_path = "/etc/udev/rules.d/70-persistent-net.rules" + udev_rules_bkp_path = "/tmp/70-persistent-net.rules" + + def guest_path_isfile(path): + try: + session.cmd("test -f %s" % path) + except aexpect.ShellError: + return False + return True + + if guest_is_not_windows: + if guest_path_isfile(udev_rules_path): + session.cmd("mv -f %s %s" % (udev_rules_path, udev_rules_bkp_path)) + + # Modprobe the module if specified in config file + module = params.get("modprobe_module") + if module: + session.get_command_output("modprobe %s" % module) + + # hot-add the nic + nic_name = 'hotadded' + nic_info = vm.hotplug_nic(nic_model=pci_model, nic_name=nic_name) + + # Only run dhclient if explicitly set and guest is not running Windows. + # Most modern Linux guests run NetworkManager, and thus do not need this. + if run_dhclient == "yes" and guest_is_not_windows: + session_serial = vm.wait_for_serial_login(timeout=login_timeout) + ifname = utils_test.get_linux_ifname(session, nic_info['mac']) + session_serial.cmd("dhclient %s &" % ifname) + + logging.info("Shutting down the primary link(s)") + for nic in vm.virtnet: + if nic.nic_name == nic_name: + continue + else: + vm.monitor.cmd("set_link %s off" % nic.device_id) + + try: + logging.info("Waiting for new nic's ip address acquisition...") + try: + ip = vm.wait_for_get_address(nic_name) + except virt_vm.VMIPAddressMissingError: + raise error.TestFail("Could not get or verify ip address of nic") + logging.info("Got the ip address of new nic: %s", ip) + + logging.info("Ping test the new nic ...") + s, o = utils_test.ping(ip, 100) + if s != 0: + logging.error(o) + raise error.TestFail("New nic failed ping test") + + logging.info("Detaching the previously attached nic from vm") + vm.hotunplug_nic(nic_name) + + finally: + logging.info("Re-enabling the primary link(s)") + for nic in vm.virtnet: + if nic.nic_name == nic_name: + continue + else: + vm.monitor.cmd("set_link %s on" % nic.device_id) + + # Attempt to put back udev network naming rules, even if the command to + # disable the rules failed. We may be undoing what was done in a previous + # (failed) test that never reached this point. + if guest_is_not_windows: + if guest_path_isfile(udev_rules_bkp_path): + session.cmd("mv -f %s %s" % (udev_rules_bkp_path, udev_rules_path)) diff --git a/kvm/tests/nmi_watchdog.py b/kvm/tests/nmi_watchdog.py new file mode 100644 index 00000000..a711daa5 --- /dev/null +++ b/kvm/tests/nmi_watchdog.py @@ -0,0 +1,60 @@ +import time, logging +from autotest.client.shared import error + + +@error.context_aware +def run_nmi_watchdog(test, params, env): + """ + Test the function of nmi injection and verify the response of guest + + 1) Log in the guest + 2) Add 'watchdog=1' to boot option + 2) Check if guest's NMI counter augment after injecting nmi + + @param test: kvm test object + @param params: Dictionary with the test parameters. + @param env: Dictionary with test environment. + """ + vm = env.get_vm(params["main_vm"]) + vm.verify_alive() + timeout=int(params.get("login_timeout", 360)) + session = vm.wait_for_login(timeout=timeout) + get_nmi_cmd= params.get("get_nmi_cmd") + kernel_version = session.get_command_output("uname -r").strip() + nmi_watchdog_type = int(params.get("nmi_watchdog_type")) + update_kernel_cmd = ("grubby --update-kernel=/boot/vmlinuz-%s " + "--args='nmi_watchdog=%d'" % + (kernel_version, nmi_watchdog_type)) + + error.context("Add 'nmi_watchdog=%d' to guest kernel cmdline and reboot" + % nmi_watchdog_type) + session.cmd(update_kernel_cmd) + time.sleep(int(params.get("sleep_before_reset", 10))) + session = vm.reboot(session, method='shell', timeout=timeout) + try: + error.context("Getting guest's number of vcpus") + guest_cpu_num = session.cmd(params.get("cpu_chk_cmd")) + + error.context("Getting guest's NMI counter") + output = session.cmd(get_nmi_cmd) + logging.debug(output.strip()) + nmi_counter1 = output.split()[1:] + + logging.info("Waiting 60 seconds to see if guest's NMI counter " + "increases") + time.sleep(60) + + error.context("Getting guest's NMI counter 2nd time") + output = session.cmd(get_nmi_cmd) + logging.debug(output.strip()) + nmi_counter2 = output.split()[1:] + + error.context("") + for i in range(int(guest_cpu_num)): + logging.info("vcpu: %s, nmi_counter1: %s, nmi_counter2: %s" % + (i, nmi_counter1[i], nmi_counter2[i])) + if int(nmi_counter2[i]) <= int(nmi_counter1[i]): + raise error.TestFail("Guest's NMI counter did not increase " + "after 60 seconds") + finally: + session.close() diff --git a/kvm/tests/pci_hotplug.py b/kvm/tests/pci_hotplug.py new file mode 100644 index 00000000..b336d3f6 --- /dev/null +++ b/kvm/tests/pci_hotplug.py @@ -0,0 +1,203 @@ +import re +from autotest.client.shared import error +from autotest.client.virt import utils_misc, aexpect, storage + + +def run_pci_hotplug(test, params, env): + """ + Test hotplug of PCI devices. + + (Elements between [] are configurable test parameters) + 1) PCI add a deivce (NIC / block) + 2) Compare output of monitor command 'info pci'. + 3) Compare output of guest command [reference_cmd]. + 4) Verify whether pci_model is shown in [pci_find_cmd]. + 5) Check whether the newly added PCI device works fine. + 6) PCI delete the device, verify whether could remove the PCI device. + + @param test: KVM test object. + @param params: Dictionary with the test parameters. + @param env: Dictionary with test environment. + """ + vm = env.get_vm(params["main_vm"]) + vm.verify_alive() + timeout = int(params.get("login_timeout", 360)) + session = vm.wait_for_login(timeout=timeout) + + # Modprobe the module if specified in config file + module = params.get("modprobe_module") + if module: + session.cmd("modprobe %s" % module) + + # Get output of command 'info pci' as reference + info_pci_ref = vm.monitor.info("pci") + + # Get output of command as reference + reference = session.cmd_output(params.get("reference_cmd")) + + tested_model = params.get("pci_model") + test_type = params.get("pci_type") + image_format = params.get("image_format_stg") + + # Probe qemu to verify what is the supported syntax for PCI hotplug + cmd_output = vm.monitor.cmd("?") + if len(re.findall("\ndevice_add", cmd_output)) > 0: + cmd_type = "device_add" + elif len(re.findall("\npci_add", cmd_output)) > 0: + cmd_type = "pci_add" + else: + raise error.TestError("Unknow version of qemu") + + # Determine syntax of drive hotplug + # __com.redhat_drive_add == qemu-kvm-0.12 on RHEL 6 + if len(re.findall("\n__com.redhat_drive_add", cmd_output)) > 0: + drive_cmd_type = "__com.redhat_drive_add" + # drive_add == qemu-kvm-0.13 onwards + elif len(re.findall("\ndrive_add", cmd_output)) > 0: + drive_cmd_type = "drive_add" + else: + raise error.TestError("Unknow version of qemu") + + # Probe qemu for a list of supported devices + devices_support = vm.monitor.cmd("%s ?" % cmd_type) + + if cmd_type == "pci_add": + if test_type == "nic": + pci_add_cmd = "pci_add pci_addr=auto nic model=%s" % tested_model + elif test_type == "block": + image_params = params.object_params("stg") + image_filename = storage.get_image_filename(image_params, + test.bindir) + pci_add_cmd = ("pci_add pci_addr=auto storage file=%s,if=%s" % + (image_filename, tested_model)) + # Execute pci_add (should be replaced by a proper monitor method call) + add_output = vm.monitor.cmd(pci_add_cmd) + if not "OK domain" in add_output: + raise error.TestFail("Add PCI device failed. " + "Monitor command is: %s, Output: %r" % + (pci_add_cmd, add_output)) + after_add = vm.monitor.info("pci") + + elif cmd_type == "device_add": + driver_id = test_type + "-" + utils_misc.generate_random_id() + device_id = test_type + "-" + utils_misc.generate_random_id() + if test_type == "nic": + if tested_model == "virtio": + tested_model = "virtio-net-pci" + pci_add_cmd = "device_add id=%s,driver=%s" % (device_id, + tested_model) + + elif test_type == "block": + image_params = params.object_params("stg") + image_filename = storage.get_image_filename(image_params, + test.bindir) + controller_model = None + if tested_model == "virtio": + tested_model = "virtio-blk-pci" + + if tested_model == "scsi": + tested_model = "scsi-disk" + controller_model = "lsi53c895a" + if len(re.findall(controller_model, devices_support)) == 0: + raise error.TestError("scsi controller device (%s) not " + "supported by qemu" % + controller_model) + + if controller_model is not None: + controller_id = "controller-" + device_id + controller_add_cmd = ("device_add %s,id=%s" % + (controller_model, controller_id)) + vm.monitor.cmd(controller_add_cmd) + + if drive_cmd_type == "drive_add": + driver_add_cmd = ("drive_add auto " + "file=%s,if=none,id=%s,format=%s" % + (image_filename, driver_id, image_format)) + elif drive_cmd_type == "__com.redhat_drive_add": + driver_add_cmd = ("__com.redhat_drive_add " + "file=%s,format=%s,id=%s" % + (image_filename, image_format, driver_id)) + + pci_add_cmd = ("device_add id=%s,driver=%s,drive=%s" % + (device_id, tested_model, driver_id)) + vm.monitor.cmd(driver_add_cmd) + + # Check if the device is support in qemu + if len(re.findall(tested_model, devices_support)) > 0: + add_output = vm.monitor.cmd(pci_add_cmd) + else: + raise error.TestError("%s doesn't support device: %s" % + (cmd_type, tested_model)) + after_add = vm.monitor.info("pci") + + if not device_id in after_add: + raise error.TestFail("Add device failed. Monitor command is: %s" + ". Output: %r" % (pci_add_cmd, add_output)) + + # Define a helper function to delete the device + def pci_del(ignore_failure=False): + if cmd_type == "pci_add": + result_domain, bus, slot, _ = add_output.split(',') + domain = int(result_domain.split()[2]) + bus = int(bus.split()[1]) + slot = int(slot.split()[1]) + pci_addr = "%x:%x:%x" % (domain, bus, slot) + cmd = "pci_del pci_addr=%s" % pci_addr + elif cmd_type == "device_add": + cmd = "device_del %s" % device_id + # This should be replaced by a proper monitor method call + vm.monitor.cmd(cmd) + + def device_removed(): + after_del = vm.monitor.info("pci") + return after_del != after_add + + if (not utils_misc.wait_for(device_removed, 10, 0, 1) + and not ignore_failure): + raise error.TestFail("Failed to hot remove PCI device: %s. " + "Monitor command: %s" % + (tested_model, cmd)) + + try: + # Compare the output of 'info pci' + if after_add == info_pci_ref: + raise error.TestFail("No new PCI device shown after executing " + "monitor command: 'info pci'") + + # Define a helper function to compare the output + def new_shown(): + o = session.cmd_output(params.get("reference_cmd")) + return o != reference + + secs = int(params.get("wait_secs_for_hook_up")) + if not utils_misc.wait_for(new_shown, 30, secs, 3): + raise error.TestFail("No new device shown in output of command " + "executed inside the guest: %s" % + params.get("reference_cmd")) + + # Define a helper function to catch PCI device string + def find_pci(): + o = session.cmd_output(params.get("find_pci_cmd")) + return params.get("match_string") in o + + if not utils_misc.wait_for(find_pci, 30, 3, 3): + raise error.TestFail("PCI %s %s device not found in guest. " + "Command was: %s" % + (tested_model, test_type, + params.get("find_pci_cmd"))) + + # Test the newly added device + try: + session.cmd(params.get("pci_test_cmd")) + except aexpect.ShellError, e: + raise error.TestFail("Check for %s device failed after PCI " + "hotplug. Output: %r" % (test_type, e.output)) + + session.close() + + except Exception: + pci_del(ignore_failure=True) + raise + + else: + pci_del() diff --git a/kvm/tests/perf_kvm.py b/kvm/tests/perf_kvm.py new file mode 100644 index 00000000..cca4e3ad --- /dev/null +++ b/kvm/tests/perf_kvm.py @@ -0,0 +1,38 @@ +from autotest.client import utils + + +def run_perf_kvm(test, params, env): + """ + run perf tool to get kvm events info + + @param test: kvm test object + @param params: Dictionary with the test parameters + @param env: Dictionary with test environment. + """ + vm = env.get_vm(params["main_vm"]) + vm.verify_alive() + + login_timeout = int(params.get("login_timeout", 360)) + transfer_timeout = int(params.get("transfer_timeout", 240)) + perf_record_timeout = int(params.get("perf_record_timeout", 240)) + vm_kallsyms_path = "/tmp/guest_kallsyms" + vm_modules_path = "/tmp/guest_modules" + + # Prepare test environment in guest + session = vm.wait_for_login(timeout=login_timeout) + + session.cmd("cat /proc/kallsyms > %s" % vm_kallsyms_path) + session.cmd("cat /proc/modules > %s" % vm_modules_path) + + vm.copy_files_from("/tmp/guest_kallsyms", "/tmp", timeout=transfer_timeout) + vm.copy_files_from("/tmp/guest_modules", "/tmp", timeout=transfer_timeout) + + perf_record_cmd = "perf kvm --host --guest --guestkallsyms=%s" % vm_kallsyms_path + perf_record_cmd += " --guestmodules=%s record -a -o /tmp/perf.data sleep %s " % (vm_modules_path, perf_record_timeout) + perf_report_cmd = "perf kvm --host --guest --guestkallsyms=%s" % vm_kallsyms_path + perf_report_cmd += " --guestmodules=%s report -i /tmp/perf.data --force " % vm_modules_path + + utils.system(perf_record_cmd) + utils.system(perf_report_cmd) + + session.close() diff --git a/kvm/tests/performance.py b/kvm/tests/performance.py new file mode 100644 index 00000000..7d0f2f05 --- /dev/null +++ b/kvm/tests/performance.py @@ -0,0 +1,204 @@ +import os, re, commands, glob, shutil +from autotest.client.shared import error +from autotest.client import utils +from autotest.client.virt import utils_test + + +def run_performance(test, params, env): + """ + KVM performance test: + + The idea is similar to 'client/tests/kvm/tests/autotest.py', + but we can implement some special requests for performance + testing. + + @param test: kvm test object + @param params: Dictionary with the test parameters + @param env: Dictionary with test environment. + """ + vm = env.get_vm(params["main_vm"]) + vm.verify_alive() + + test_timeout = int(params.get("test_timeout", 240)) + monitor_cmd = params.get("monitor_cmd") + login_timeout = int(params.get("login_timeout", 360)) + test_cmd = params.get("test_cmd") + guest_path = params.get("result_path", "/tmp/guest_result") + test_src = params.get("test_src") + test_patch = params.get("test_patch") + + # Prepare test environment in guest + session = vm.wait_for_login(timeout=login_timeout) + + prefix = test.outputdir.split(".performance.")[0] + summary_results = params.get("summary_results") + guest_ver = session.cmd_output("uname -r").strip() + + if summary_results: + if params.get("test") == "ffsb": + ffsb_sum(os.path.dirname(test.outputdir), prefix, params, guest_ver, + test.resultsdir) + session.close() + return + + guest_launcher = os.path.join(test.virtdir, "scripts/cmd_runner.py") + vm.copy_files_to(guest_launcher, "/tmp") + md5value = params.get("md5value") + + tarball = utils.unmap_url_cache(test.tmpdir, test_src, md5value) + test_src = re.split("/", test_src)[-1] + vm.copy_files_to(tarball, "/tmp") + + session.cmd("rm -rf /tmp/src*") + session.cmd("mkdir -p /tmp/src_tmp") + session.cmd("tar -xf /tmp/%s -C %s" % (test_src, "/tmp/src_tmp")) + + # Find the newest file in src tmp directory + cmd = "ls -rt /tmp/src_tmp" + s, o = session.cmd_status_output(cmd) + if len(o) > 0: + new_file = re.findall("(.*)\n", o)[-1] + else: + raise error.TestError("Can not decompress test file in guest") + session.cmd("mv /tmp/src_tmp/%s /tmp/src" % new_file) + + if test_patch: + test_patch_path = os.path.join(test.bindir, '../ffsb/examples', test_patch) + vm.copy_files_to(test_patch_path, "/tmp/src") + session.cmd("cd /tmp/src && patch -p1 < /tmp/src/%s" % test_patch) + + compile_cmd = params.get("compile_cmd") + if compile_cmd: + session.cmd("cd /tmp/src && %s" % compile_cmd) + + prepare_cmd = params.get("prepare_cmd") + if prepare_cmd: + s, o = session.cmd_status_output(prepare_cmd, test_timeout) + if s != 0: + raise error.TestError("Fail to prepare test env in guest") + + cmd = "cd /tmp/src && python /tmp/cmd_runner.py \"%s &> " % monitor_cmd + cmd += "/tmp/guest_result_monitor\" \"/tmp/src/%s" % test_cmd + cmd += " &> %s \" \"/tmp/guest_result\"" + cmd += " %s" % int(test_timeout) + + test_cmd = cmd + # Run guest test with monitor + tag = utils_test.cmd_runner_monitor(vm, monitor_cmd, test_cmd, + guest_path, timeout = test_timeout) + + # Result collecting + result_list = ["/tmp/guest_result_%s" % tag, + "/tmp/host_monitor_result_%s" % tag, + "/tmp/guest_monitor_result_%s" % tag] + guest_results_dir = os.path.join(test.outputdir, "guest_results") + if not os.path.exists(guest_results_dir): + os.mkdir(guest_results_dir) + ignore_pattern = params.get("ignore_pattern") + head_pattern = params.get("head_pattern") + row_pattern = params.get("row_pattern") + for i in result_list: + if re.findall("monitor_result", i): + result = utils_test.summary_up_result(i, ignore_pattern, + head_pattern, row_pattern) + fd = open("%s.sum" % i, "w") + sum_info = {} + head_line = "" + for keys in result: + head_line += "\t%s" % keys + for col in result[keys]: + col_sum = "line %s" % col + if col_sum in sum_info: + sum_info[col_sum] += "\t%s" % result[keys][col] + else: + sum_info[col_sum] = "%s\t%s" % (col, result[keys][col]) + fd.write("%s\n" % head_line) + for keys in sum_info: + fd.write("%s\n" % sum_info[keys]) + fd.close() + shutil.copy("%s.sum" % i, guest_results_dir) + shutil.copy(i, guest_results_dir) + + session.cmd("rm -rf /tmp/src") + session.cmd("rm -rf guest_test*") + session.cmd("rm -rf pid_file*") + session.close() + +def ffsb_sum(topdir, prefix, params, guest_ver, resultsdir): + marks = ["Transactions per Second", "Read Throughput", "Write Throughput"] + matrix = [] + sum_thro = 0 + sum_hostcpu = 0 + + cmd = 'find %s|grep "%s.*guest_results/guest_result"|grep -v prepare|sort' \ + % (topdir, prefix) + for guest_result_file in commands.getoutput(cmd).split(): + sub_dir = os.path.dirname(guest_result_file) + content = open(guest_result_file, "r").readlines() + linestr = [] + readthro = 0 + writethro = 0 + + for line in content: + if marks[0] in line: + iops = "%8s" % re.split("\s+", line)[0] + elif marks[1] in line: + substr = re.findall("\d+(?:\.\d+)*", line)[0] + readthro = utils_test.aton("%.2f" % float(substr)) + elif marks[2] in line: + substr = re.findall("\d+(?:\.\d+)*", line)[0] + writethro = utils_test.aton("%.2f" % float(substr)) + break + + throughput = readthro + writethro + linestr.append(iops) + linestr.append(throughput) + sum_thro += throughput + + filename = glob.glob(os.path.join(sub_dir, "guest_monitor_result*.sum"))[0] + sr = open(filename, "r").readlines() + linestr.append("%8.2f" % (100 - utils_test.aton(sr[1].split()[3]))) + linestr.append("%8.2f" % (100 - utils_test.aton(sr[2].split()[3]))) + + filename = glob.glob(os.path.join(sub_dir, "host_monitor_result*.sum"))[0] + sr = open(filename, "r").readlines() + hostcpu = 100 - utils_test.aton(sr[-1].split()[3]) + linestr.append(hostcpu) + sum_hostcpu += hostcpu + linestr.append("%.2f" % (throughput/hostcpu)) + matrix.append(linestr) + + headstr = "threads| IOPS| Thro(MBps)| Vcpu1| Vcpu2| Hostcpu|" \ + " MBps/Hostcpu%" + categories = params.get("categories").split('|') + threads = params.get("threads").split() + kvm_ver = commands.getoutput(params.get('ver_cmd', "rpm -q qemu-kvm")) + + fd = open("%s/ffsb-result.RHS" % resultsdir, "w") + fd.write("#ver# %s\n#ver# host kernel: %s\n#ver# guest kernel:%s\n" % ( + kvm_ver, os.uname()[2], guest_ver)) + + desc = """#desc# The Flexible Filesystem Benchmark(FFSB) is a cross-platform +#desc# filesystem performance measurement tool. It uses customizable profiles +#desc# to measure of different workloads, and it supports multiple groups of +#desc# threads across multiple filesystems. +#desc# How to read the results: +#desc# - The Throughput is measured in MBps/sec. +#desc# - IOPS (Input/Output Operations Per Second, pronounced eye-ops) +#desc# - Usage of Vcpu, Hostcpu are all captured +#desc# +""" + fd.write(desc) + fd.write("Category:SUM\n None| MBps| Hostcpu|MBps/Hostcpu%\n") + fd.write(" 0|%8.2f|%13.2f|%8.2f\n" % (sum_thro, sum_hostcpu, + (sum_thro/sum_hostcpu))) + fd.write("Category:ALL\n") + idx = 0 + for i in range(len(matrix)): + if i % 3 == 0: + fd.write("%s\n%s\n" % (categories[idx], headstr)) + idx += 1 + fd.write("%7s|%8s|%13s|%8s|%8s|%10s|%14s\n" % (threads[i%3], + matrix[i][0], matrix[i][1], matrix[i][2], matrix[i][3], + matrix[i][4], matrix[i][5])) + fd.close() diff --git a/kvm/tests/physical_resources_check.py b/kvm/tests/physical_resources_check.py new file mode 100644 index 00000000..b81fff5b --- /dev/null +++ b/kvm/tests/physical_resources_check.py @@ -0,0 +1,258 @@ +import re, string, logging +from autotest.client.shared import error +from autotest.client.virt import kvm_monitor, storage + + +def run_physical_resources_check(test, params, env): + """ + Check physical resources assigned to KVM virtual machines: + 1) Log into the guest + 2) Verify whether cpu counts ,memory size, nics' model, + count and drives' format & count, drive_serial, UUID + reported by the guest OS matches what has been assigned + to the VM (qemu command line) + 3) Verify all MAC addresses for guest NICs + + @param test: KVM test object. + @param params: Dictionary with the test parameters. + @param env: Dictionary with test environment. + """ + # Define a function for checking number of hard drivers & NICs + def check_num(devices, info_cmd, check_str): + f_fail = [] + expected_num = params.objects(devices).__len__() + o = "" + try: + o = vm.monitor.info(info_cmd) + except kvm_monitor.MonitorError, e: + fail_log = e + "\n" + fail_log += "info/query monitor command failed (%s)" % info_cmd + f_fail.append(fail_log) + logging.error(fail_log) + + actual_num = string.count(o, check_str) + if expected_num != actual_num: + fail_log = "%s number mismatch:\n" % str(devices) + fail_log += " Assigned to VM: %d\n" % expected_num + fail_log += " Reported by OS: %d" % actual_num + f_fail.append(fail_log) + logging.error(fail_log) + return expected_num, f_fail + + # Define a function for checking hard drives & NICs' model + def chk_fmt_model(device, fmt_model, info_cmd, regexp): + f_fail = [] + devices = params.objects(device) + for chk_device in devices: + expected = params.object_params(chk_device).get(fmt_model) + if not expected: + expected = "rtl8139" + o = "" + try: + o = vm.monitor.info(info_cmd) + except kvm_monitor.MonitorError, e: + fail_log = e + "\n" + fail_log += "info/query monitor command failed (%s)" % info_cmd + f_fail.append(fail_log) + logging.error(fail_log) + + device_found = re.findall(regexp, o) + logging.debug("Found devices: %s", device_found) + found = False + for fm in device_found: + if expected in fm: + found = True + + if not found: + fail_log = "%s model mismatch:\n" % str(device) + fail_log += " Assigned to VM: %s\n" % expected + fail_log += " Reported by OS: %s" % device_found + f_fail.append(fail_log) + logging.error(fail_log) + return f_fail + + # Define a function to verify UUID & Serial number + def verify_device(expect, name, verify_cmd): + f_fail = [] + if verify_cmd: + actual = session.cmd_output(verify_cmd) + if not string.upper(expect) in actual: + fail_log = "%s mismatch:\n" % name + fail_log += " Assigned to VM: %s\n" % string.upper(expect) + fail_log += " Reported by OS: %s" % actual + f_fail.append(fail_log) + logging.error(fail_log) + return f_fail + + + def check_cpu_number(chk_type, expected_n, chk_timeout): + """ + Checking cpu sockets/cores/threads number. + + @param chk_type: Should be one of 'sockets', 'cores', 'threads'. + @param expected_n: Expected number of guest cpu number. + @param chk_timeout: timeout of running chk_cmd. + + @return a list that contains fail report. + """ + f_fail = [] + chk_str = params.get("mem_chk_re_str") + chk_cmd = params.get("cpu_%s_chk_cmd" % chk_type) + if chk_cmd is None: + fail_log = "Unknown cpu number checking type: '%s'" % chk_type + logging.error(fail_log) + f_fail.append(fail_log) + return f_fail + + if chk_cmd == "": + return f_fail + + logging.info("CPU %s number check", string.capitalize(chk_type)) + s, output = session.cmd_status_output(chk_cmd, timeout=chk_timeout) + num = re.findall(chk_str, output) + if s != 0 or not num: + fail_log = "Failed to get guest %s number, " % chk_type + fail_log += "guest output: '%s'" % output + f_fail.append(fail_log) + logging.error(fail_log) + return f_fail + + actual_n = int(num[0]) + if actual_n != expected_n: + fail_log = "%s output mismatch:\n" % string.capitalize(chk_type) + fail_log += " Assigned to VM: '%s'\n" % expected_n + fail_log += " Reported by OS: '%s'" % actual_n + f_fail.append(fail_log) + logging.error(fail_log) + return f_fail + + logging.debug("%s check pass. Expected: '%s', Actual: '%s'", + string.capitalize(chk_type), expected_n, actual_n) + return f_fail + + + vm = env.get_vm(params["main_vm"]) + vm.verify_alive() + timeout = int(params.get("login_timeout", 360)) + chk_timeout = int(params.get("chk_timeout", 240)) + session = vm.wait_for_login(timeout=timeout) + + logging.info("Starting physical resources check test") + logging.info("Values assigned to VM are the values we expect " + "to see reported by the Operating System") + # Define a failure counter, as we want to check all physical + # resources to know which checks passed and which ones failed + n_fail = [] + + # We will check HDs with the image name + image_name = storage.get_image_filename(params, test.bindir) + + # Check cpu count + logging.info("CPU count check") + actual_cpu_nr = vm.get_cpu_count() + if vm.cpuinfo.smp != actual_cpu_nr: + fail_log = "CPU count mismatch:\n" + fail_log += " Assigned to VM: %s \n" % vm.cpuinfo.smp + fail_log += " Reported by OS: %s" % actual_cpu_nr + n_fail.append(fail_log) + logging.error(fail_log) + + n_fail.extend(check_cpu_number("cores", vm.cpuinfo.cores, chk_timeout)) + + n_fail.extend(check_cpu_number("threads", vm.cpuinfo.threads, chk_timeout)) + + n_fail.extend(check_cpu_number("sockets", vm.cpuinfo.sockets, chk_timeout)) + + # Check the cpu vendor_id + expected_vendor_id = params.get("cpu_model_vendor") + cpu_vendor_id_chk_cmd = params.get("cpu_vendor_id_chk_cmd") + if expected_vendor_id and cpu_vendor_id_chk_cmd: + output = session.cmd_output(cpu_vendor_id_chk_cmd) + + if not expected_vendor_id in output: + fail_log = "CPU vendor id check failed.\n" + fail_log += " Assigned to VM: '%s'\n" % expected_vendor_id + fail_log += " Reported by OS: '%s'" % output + n_fail.append(fail_log) + logging.error(fail_log) + + # Check memory size + logging.info("Memory size check") + expected_mem = int(params.get("mem")) + actual_mem = vm.get_memory_size() + if actual_mem != expected_mem: + fail_log = "Memory size mismatch:\n" + fail_log += " Assigned to VM: %s\n" % expected_mem + fail_log += " Reported by OS: %s\n" % actual_mem + n_fail.append(fail_log) + logging.error(fail_log) + + + logging.info("Hard drive count check") + _, f_fail = check_num("images", "block", image_name) + n_fail.extend(f_fail) + + logging.info("NIC count check") + _, f_fail = check_num("nics", "network", "model=") + n_fail.extend(f_fail) + + logging.info("NICs model check") + f_fail = chk_fmt_model("nics", "nic_model", "network", "model=(.*),") + n_fail.extend(f_fail) + + logging.info("Drive format check") + f_fail = chk_fmt_model("images", "drive_format", + "block", "(.*)\: .*%s" % image_name) + n_fail.extend(f_fail) + + logging.info("Network card MAC check") + o = "" + try: + o = vm.monitor.info("network") + except kvm_monitor.MonitorError, e: + fail_log = e + "\n" + fail_log += "info/query monitor command failed (network)" + n_fail.append(fail_log) + logging.error(fail_log) + found_mac_addresses = re.findall("macaddr=(\S+)", o) + logging.debug("Found MAC adresses: %s", found_mac_addresses) + + num_nics = len(params.objects("nics")) + for nic_index in range(num_nics): + mac = vm.get_mac_address(nic_index) + if not string.lower(mac) in found_mac_addresses: + fail_log = "MAC address mismatch:\n" + fail_log += " Assigned to VM (not found): %s" % mac + n_fail.append(fail_log) + logging.error(fail_log) + + logging.info("UUID check") + if vm.get_uuid(): + f_fail = verify_device(vm.get_uuid(), "UUID", + params.get("catch_uuid_cmd")) + n_fail.extend(f_fail) + + logging.info("Hard Disk serial number check") + catch_serial_cmd = params.get("catch_serial_cmd") + f_fail = verify_device(params.get("drive_serial"), "Serial", + catch_serial_cmd) + n_fail.extend(f_fail) + + # only check if the MS Windows VirtIO driver is digital signed. + chk_cmd = params.get("vio_driver_chk_cmd") + if chk_cmd: + logging.info("Virtio Driver Check") + chk_output = session.cmd_output(chk_cmd, timeout=chk_timeout) + if "FALSE" in chk_output: + fail_log = "VirtIO driver is not digitally signed!" + fail_log += " VirtIO driver check output: '%s'" % chk_output + n_fail.append(fail_log) + logging.error(fail_log) + + if n_fail: + session.close() + raise error.TestFail("Physical resources check test " + "reported %s failures:\n%s" % + (len(n_fail), "\n".join(n_fail))) + + session.close() diff --git a/kvm/tests/qemu_guest_agent.py b/kvm/tests/qemu_guest_agent.py new file mode 100644 index 00000000..c60a7fcd --- /dev/null +++ b/kvm/tests/qemu_guest_agent.py @@ -0,0 +1,36 @@ +import logging +from autotest.client.shared import error +from autotest.client.virt import guest_agent + + +def run_qemu_guest_agent(test, params, env): + """ + Test qemu guest agent, this case will: + 1) Start VM with virtio serial port. + 2) Install qemu-guest-agent package in guest. + 3) Create QemuAgent object and test if virt agent works. + + @param test: kvm test object + @param params: Dictionary with the test parameters + @param env: Dictionary with test environmen. + """ + vm = env.get_vm(params["main_vm"]) + vm.verify_alive() + + session = vm.wait_for_login(timeout=int(params.get("login_timeout", 360))) + + # Try to install 'qemu-guest-agent' package. + gagent_install_cmd = params.get("gagent_install_cmd") + s = session.cmd_status(gagent_install_cmd) + session.close() + if s != 0: + raise error.TestError("Could not install qemu-guest-agent package") + + gagent_name = params.get("gagent_name", "org.qemu.guest_agent.0") + gagent_file_name = vm.get_virtio_port_filename(gagent_name) + gagent = guest_agent.QemuAgent(vm, gagent_name, gagent_file_name, + get_supported_cmds=True) + + # Check if guest agent work. + gagent.verify_responsive() + logging.info(gagent.cmd("guest-info")) diff --git a/kvm/tests/qemu_img.py b/kvm/tests/qemu_img.py new file mode 100644 index 00000000..8792688f --- /dev/null +++ b/kvm/tests/qemu_img.py @@ -0,0 +1,439 @@ +import re, os, logging, commands +from autotest.client.shared import utils, error +from autotest.client.virt import utils_misc, env_process, storage + + +def run_qemu_img(test, params, env): + """ + 'qemu-img' functions test: + 1) Judge what subcommand is going to be tested + 2) Run subcommand test + + @param test: kvm test object + @param params: Dictionary with the test parameters + @param env: Dictionary with test environment. + """ + cmd = utils_misc.get_path(test.bindir, params.get("qemu_img_binary")) + if not os.path.exists(cmd): + raise error.TestError("Binary of 'qemu-img' not found") + image_format = params.get("image_format") + image_size = params.get("image_size", "10G") + image_name = storage.get_image_filename(params, test.bindir) + + + def _check(cmd, img): + """ + Simple 'qemu-img check' function implementation. + + @param cmd: qemu-img base command. + @param img: image to be checked + """ + cmd += " check %s" % img + logging.info("Checking image '%s'...", img) + try: + output = utils.system_output(cmd) + except error.CmdError, e: + if "does not support checks" in str(e): + return (True, "") + else: + return (False, str(e)) + return (True, output) + + + def check_test(cmd): + """ + Subcommand 'qemu-img check' test. + + This tests will 'dd' to create a specified size file, and check it. + Then convert it to supported image_format in each loop and check again. + + @param cmd: qemu-img base command. + """ + test_image = utils_misc.get_path(test.bindir, + params.get("image_name_dd")) + print "test_image = %s" % test_image + create_image_cmd = params.get("create_image_cmd") + create_image_cmd = create_image_cmd % test_image + print "create_image_cmd = %s" % create_image_cmd + utils.system(create_image_cmd) + s, o = _check(cmd, test_image) + if not s: + raise error.TestFail("Check image '%s' failed with error: %s" % + (test_image, o)) + for fmt in params.get("supported_image_formats").split(): + output_image = test_image + ".%s" % fmt + _convert(cmd, fmt, test_image, output_image) + s, o = _check(cmd, output_image) + if not s: + raise error.TestFail("Check image '%s' got error: %s" % + (output_image, o)) + os.remove(output_image) + os.remove(test_image) + + + def _create(cmd, img_name, fmt, img_size=None, base_img=None, + base_img_fmt=None, encrypted="no"): + """ + Simple wrapper of 'qemu-img create' + + @param cmd: qemu-img base command. + @param img_name: name of the image file + @param fmt: image format + @param img_size: image size + @param base_img: base image if create a snapshot image + @param base_img_fmt: base image format if create a snapshot image + @param encrypted: indicates whether the created image is encrypted + """ + cmd += " create" + if encrypted == "yes": + cmd += " -e" + if base_img: + cmd += " -b %s" % base_img + if base_img_fmt: + cmd += " -F %s" % base_img_fmt + cmd += " -f %s" % fmt + cmd += " %s" % img_name + if img_size: + cmd += " %s" % img_size + utils.system(cmd) + + + def create_test(cmd): + """ + Subcommand 'qemu-img create' test. + + @param cmd: qemu-img base command. + """ + image_large = params.get("image_name_large") + img = utils_misc.get_path(test.bindir, image_large) + img += '.' + image_format + _create(cmd, img_name=img, fmt=image_format, + img_size=params.get("image_size_large")) + os.remove(img) + + + def _convert(cmd, output_fmt, img_name, output_filename, + fmt=None, compressed="no", encrypted="no"): + """ + Simple wrapper of 'qemu-img convert' function. + + @param cmd: qemu-img base command. + @param output_fmt: the output format of converted image + @param img_name: image name that to be converted + @param output_filename: output image name that converted + @param fmt: output image format + @param compressed: whether output image is compressed + @param encrypted: whether output image is encrypted + """ + cmd += " convert" + if compressed == "yes": + cmd += " -c" + if encrypted == "yes": + cmd += " -e" + if fmt: + cmd += " -f %s" % fmt + cmd += " -O %s" % output_fmt + cmd += " %s %s" % (img_name, output_filename) + logging.info("Converting '%s' from format '%s' to '%s'", img_name, fmt, + output_fmt) + utils.system(cmd) + + + def convert_test(cmd): + """ + Subcommand 'qemu-img convert' test. + + @param cmd: qemu-img base command. + """ + dest_img_fmt = params.get("dest_image_format") + output_filename = "%s.converted_%s" % (image_name, dest_img_fmt) + + _convert(cmd, dest_img_fmt, image_name, output_filename, + image_format, params.get("compressed"), params.get("encrypted")) + + if dest_img_fmt == "qcow2": + s, o = _check(cmd, output_filename) + if s: + os.remove(output_filename) + else: + raise error.TestFail("Check image '%s' failed with error: %s" % + (output_filename, o)) + else: + os.remove(output_filename) + + + def _info(cmd, img, sub_info=None, fmt=None): + """ + Simple wrapper of 'qemu-img info'. + + @param cmd: qemu-img base command. + @param img: image file + @param sub_info: sub info, say 'backing file' + @param fmt: image format + """ + cmd += " info" + if fmt: + cmd += " -f %s" % fmt + cmd += " %s" % img + + try: + output = utils.system_output(cmd) + except error.CmdError, e: + logging.error("Get info of image '%s' failed: %s", img, str(e)) + return None + + if not sub_info: + return output + + sub_info += ": (.*)" + matches = re.findall(sub_info, output) + if matches: + return matches[0] + return None + + + def info_test(cmd): + """ + Subcommand 'qemu-img info' test. + + @param cmd: qemu-img base command. + """ + img_info = _info(cmd, image_name) + logging.info("Info of image '%s':\n%s", image_name, img_info) + if not image_format in img_info: + raise error.TestFail("Got unexpected format of image '%s'" + " in info test" % image_name) + if not image_size in img_info: + raise error.TestFail("Got unexpected size of image '%s'" + " in info test" % image_name) + + + def snapshot_test(cmd): + """ + Subcommand 'qemu-img snapshot' test. + + @param cmd: qemu-img base command. + """ + cmd += " snapshot" + for i in range(2): + crtcmd = cmd + sn_name = "snapshot%d" % i + crtcmd += " -c %s %s" % (sn_name, image_name) + s, o = commands.getstatusoutput(crtcmd) + if s != 0: + raise error.TestFail("Create snapshot failed via command: %s;" + "Output is: %s" % (crtcmd, o)) + logging.info("Created snapshot '%s' in '%s'", sn_name, image_name) + listcmd = cmd + listcmd += " -l %s" % image_name + s, o = commands.getstatusoutput(listcmd) + if not ("snapshot0" in o and "snapshot1" in o and s == 0): + raise error.TestFail("Snapshot created failed or missed;" + "snapshot list is: \n%s" % o) + for i in range(2): + sn_name = "snapshot%d" % i + delcmd = cmd + delcmd += " -d %s %s" % (sn_name, image_name) + s, o = commands.getstatusoutput(delcmd) + if s != 0: + raise error.TestFail("Delete snapshot '%s' failed: %s" % + (sn_name, o)) + + + def commit_test(cmd): + """ + Subcommand 'qemu-img commit' test. + 1) Create a backing file of the qemu harddisk specified by image_name. + 2) Start a VM using the backing file as its harddisk. + 3) Touch a file "commit_testfile" in the backing_file, and shutdown the + VM. + 4) Make sure touching the file does not affect the original harddisk. + 5) Commit the change to the original harddisk by executing + "qemu-img commit" command. + 6) Start the VM using the original harddisk. + 7) Check if the file "commit_testfile" exists. + + @param cmd: qemu-img base command. + """ + cmd += " commit" + + logging.info("Commit testing started!") + image_name = params.get("image_name", "image") + image_format = params.get("image_format", "qcow2") + backing_file_name = "%s_bak" % (image_name) + + try: + # Remove the existing backing file + backing_file = "%s.%s" % (backing_file_name, image_format) + if os.path.isfile(backing_file): + os.remove(backing_file) + + # Create the new backing file + create_cmd = "qemu-img create -b %s.%s -f %s %s.%s" % (image_name, + image_format, + image_format, + backing_file_name, + image_format) + try: + utils.system(create_cmd) + except error.CmdError, e: + raise error.TestFail("Could not create a backing file!") + logging.info("backing_file created!") + + # Set the qemu harddisk to the backing file + logging.info("Original image_name is: %s", params.get('image_name')) + params['image_name'] = backing_file_name + logging.info("Param image_name changed to: %s", + params.get('image_name')) + + # Start a new VM, using backing file as its harddisk + vm_name = params.get('main_vm') + env_process.preprocess_vm(test, params, env, vm_name) + vm = env.get_vm(vm_name) + vm.create() + timeout = int(params.get("login_timeout", 360)) + session = vm.wait_for_login(timeout=timeout) + + # Do some changes to the backing_file harddisk + try: + output = session.cmd("touch /commit_testfile") + logging.info("Output of touch /commit_testfile: %s", output) + output = session.cmd("ls / | grep commit_testfile") + logging.info("Output of ls / | grep commit_testfile: %s", + output) + except Exception, e: + raise error.TestFail("Could not create commit_testfile in the " + "backing file %s" % e) + vm.destroy() + + # Make sure there is no effect on the original harddisk + # First, set the harddisk back to the original one + logging.info("Current image_name is: %s", params.get('image_name')) + params['image_name'] = image_name + logging.info("Param image_name reverted to: %s", + params.get('image_name')) + + # Second, Start a new VM, using image_name as its harddisk + # Here, the commit_testfile should not exist + vm_name = params.get('main_vm') + env_process.preprocess_vm(test, params, env, vm_name) + vm = env.get_vm(vm_name) + vm.create() + timeout = int(params.get("login_timeout", 360)) + session = vm.wait_for_login(timeout=timeout) + try: + output = session.cmd("[ ! -e /commit_testfile ] && echo $?") + logging.info("Output of [ ! -e /commit_testfile ] && echo $?: " + "%s", output) + except Exception: + output = session.cmd("rm -f /commit_testfile") + raise error.TestFail("The commit_testfile exists on the " + "original file") + vm.destroy() + + # Excecute the commit command + logging.info("Commiting image") + cmitcmd = "%s -f %s %s.%s" % (cmd, image_format, backing_file_name, + image_format) + try: + utils.system(cmitcmd) + except error.CmdError, e: + raise error.TestFail("Could not commit the backing file") + + # Start a new VM, using image_name as its harddisk + vm_name = params.get('main_vm') + env_process.preprocess_vm(test, params, env, vm_name) + vm = env.get_vm(vm_name) + vm.create() + timeout = int(params.get("login_timeout", 360)) + session = vm.wait_for_login(timeout=timeout) + try: + output = session.cmd("[ -e /commit_testfile ] && echo $?") + logging.info("Output of [ -e /commit_testfile ] && echo $?: %s", + output) + session.cmd("rm -f /commit_testfile") + except Exception: + raise error.TestFail("Could not find commit_testfile after a " + "commit") + vm.destroy() + + finally: + # Remove the backing file + if os.path.isfile(backing_file): + os.remove(backing_file) + + + def _rebase(cmd, img_name, base_img, backing_fmt, mode="unsafe"): + """ + Simple wrapper of 'qemu-img rebase'. + + @param cmd: qemu-img base command. + @param img_name: image name to be rebased + @param base_img: indicates the base image + @param backing_fmt: the format of base image + @param mode: rebase mode: safe mode, unsafe mode + """ + cmd += " rebase" + if mode == "unsafe": + cmd += " -u" + cmd += " -b %s -F %s %s" % (base_img, backing_fmt, img_name) + logging.info("Trying to rebase '%s' to '%s'...", img_name, base_img) + s, o = commands.getstatusoutput(cmd) + if s != 0: + raise error.TestError("Failed to rebase '%s' to '%s': %s" % + (img_name, base_img, o)) + + + def rebase_test(cmd): + """ + Subcommand 'qemu-img rebase' test + + Change the backing file of a snapshot image in "unsafe mode": + Assume the previous backing file had missed and we just have to change + reference of snapshot to new one. After change the backing file of a + snapshot image in unsafe mode, the snapshot should work still. + + @param cmd: qemu-img base command. + """ + if not 'rebase' in utils.system_output(cmd + ' --help', + ignore_status=True): + raise error.TestNAError("Current kvm user space version does not" + " support 'rebase' subcommand") + sn_fmt = params.get("snapshot_format", "qcow2") + sn1 = params.get("image_name_snapshot1") + sn1 = utils_misc.get_path(test.bindir, sn1) + ".%s" % sn_fmt + base_img = storage.get_image_filename(params, test.bindir) + _create(cmd, sn1, sn_fmt, base_img=base_img, base_img_fmt=image_format) + + # Create snapshot2 based on snapshot1 + sn2 = params.get("image_name_snapshot2") + sn2 = utils_misc.get_path(test.bindir, sn2) + ".%s" % sn_fmt + _create(cmd, sn2, sn_fmt, base_img=sn1, base_img_fmt=sn_fmt) + + rebase_mode = params.get("rebase_mode") + if rebase_mode == "unsafe": + os.remove(sn1) + + _rebase(cmd, sn2, base_img, image_format, mode=rebase_mode) + + # Check sn2's format and backing_file + actual_base_img = _info(cmd, sn2, "backing file") + base_img_name = os.path.basename(params.get("image_name")) + if not base_img_name in actual_base_img: + raise error.TestFail("After rebase the backing_file of 'sn2' is " + "'%s' which is not expected as '%s'" + % (actual_base_img, base_img_name)) + s, o = _check(cmd, sn2) + if not s: + raise error.TestFail("Check image '%s' failed after rebase;" + "got error: %s" % (sn2, o)) + try: + os.remove(sn2) + os.remove(sn1) + except Exception: + pass + + + # Here starts test + subcommand = params.get("subcommand") + eval("%s_test(cmd)" % subcommand) diff --git a/kvm/tests/qemu_io_blkdebug.py b/kvm/tests/qemu_io_blkdebug.py new file mode 100644 index 00000000..ffb8ceef --- /dev/null +++ b/kvm/tests/qemu_io_blkdebug.py @@ -0,0 +1,89 @@ +import re, logging, ConfigParser +from autotest.client.shared import error +from autotest.client.virt import qemu_io +from autotest.client.virt import utils_misc +from autotest.client.virt.kvm_storage import QemuImg +from autotest.client import utils + +@error.context_aware +def run_qemu_io_blkdebug(test, params, env): + """ + Run qemu-io blkdebug tests: + 1. Create image with given parameters + 2. Write the blkdebug config file + 3. Try to do operate in image with qemu-io and get the error message + 4. Get the error message from perror by error number set in config file + 5. Compare the error message + + @param test: kvm test object + @param params: Dictionary with the test parameters + @param env: Dictionary with test environment. + """ + tmp_dir = params.get("tmp_dir", "/tmp") + blkdebug_cfg = utils_misc.get_path(tmp_dir, params.get("blkdebug_cfg", + "blkdebug.cfg")) + err_command = params.get("err_command") + err_event = params.get("err_event") + errn_list = re.split("\s+", params.get("errn_list").strip()) + re_std_msg = params.get("re_std_msg") + test_timeout = int(params.get("test_timeout", "60")) + pre_err_commands = params.get("pre_err_commands") + image = params.get("images") + blkdebug_default = params.get("blkdebug_default") + + error.context("Create image", logging.info) + image_io = QemuImg(params.object_params(image), test.bindir, image) + image_name = image_io.create(params.object_params(image)) + + template_name = utils_misc.get_path(test.virtdir, blkdebug_default) + template = ConfigParser.ConfigParser() + template.read(template_name) + + for errn in errn_list: + log_filename = utils_misc.get_path(test.outputdir, + "qemu-io-log-%s" % errn) + error.context("Write the blkdebug config file", logging.info) + template.set("inject-error", "event", '"%s"' % err_event) + template.set("inject-error", "errno", '"%s"' % errn) + + error.context("Write blkdebug config file", logging.info) + blkdebug = None + try: + blkdebug = open(blkdebug_cfg, 'w') + template.write(blkdebug) + finally: + if blkdebug is not None: + blkdebug.close() + + error.context("Operate in qemu-io to trigger the error", logging.info) + session = qemu_io.QemuIOShellSession(test, params, image_name, + blkdebug_cfg=blkdebug_cfg, + log_filename=log_filename) + if pre_err_commands: + for cmd in re.split(",", pre_err_commands.strip()): + session.cmd_output(cmd, timeout=test_timeout) + + output = session.cmd_output(err_command, timeout=test_timeout) + error.context("Get error message from command perror", logging.info) + perror_cmd = "perror %s" % errn + std_msg = utils.system_output(perror_cmd) + std_msg = re.findall(re_std_msg, std_msg) + if std_msg: + std_msg = std_msg[0] + else: + std_msg = "" + logging.warning("Can not find error message from perror") + + session.close() + error.context("Compare the error message", logging.info) + if std_msg: + if std_msg in output: + logging.info("Error message is correct in qemu-io") + else: + fail_log = "The error message is mismatch:" + fail_log += "qemu-io reports: '%s'," % output + fail_log += "perror reports: '%s'" % std_msg + raise error.TestFail(fail_log) + else: + logging.warning("Can not find error message from perror." + " The output from qemu-io is %s" % output) diff --git a/kvm/tests/qemu_iotests.py b/kvm/tests/qemu_iotests.py new file mode 100644 index 00000000..a215f382 --- /dev/null +++ b/kvm/tests/qemu_iotests.py @@ -0,0 +1,48 @@ +import os +from autotest.client.shared import git, error +from autotest.client import utils +from autotest.client.virt import utils_misc + + +@error.context_aware +def run_qemu_iotests(test, params, env): + """ + Fetch from git and run qemu-iotests using the qemu binaries under test. + + 1) Fetch qemu-io from git + 3) Run test for the file format detected + 4) Report any errors found to autotest + + @param test: KVM test object. + @param params: Dictionary with the test parameters. + @param env: Dictionary with test environment. + """ + # First, let's get qemu-io + std = "git://git.kernel.org/pub/scm/linux/kernel/git/hch/qemu-iotests.git" + uri = params.get("qemu_io_uri", std) + branch = params.get("qemu_io_branch", 'master') + lbranch = params.get("qemu_io_lbranch", 'master') + commit = params.get("qemu_io_commit", None) + base_uri = params.get("qemu_io_base_uri", None) + destination_dir = os.path.join(test.srcdir, "qemu_io_tests") + git.get_repo(uri=uri, branch=branch, lbranch=lbranch, commit=commit, + destination_dir=destination_dir, base_uri=base_uri) + + # Then, set the qemu paths for the use of the testsuite + os.environ["QEMU_PROG"] = utils_misc.get_path(test.bindir, + params.get("qemu_binary", "qemu")) + os.environ["QEMU_IMG_PROG"] = utils_misc.get_path(test.bindir, + params.get("qemu_img_binary", "qemu-img")) + os.environ["QEMU_IO_PROG"] = utils_misc.get_path(test.bindir, + params.get("qemu_io_binary", "qemu-io")) + + os.chdir(destination_dir) + image_format = params.get("qemu_io_image_format") + extra_options = params.get("qemu_io_extra_options", "") + + cmd = './check' + if extra_options: + cmd += extra_options + + error.context("running qemu-iotests for image format %s" % image_format) + utils.system("%s -%s" % (cmd, image_format)) diff --git a/kvm/tests/qmp_basic.py b/kvm/tests/qmp_basic.py new file mode 100644 index 00000000..2a2db03f --- /dev/null +++ b/kvm/tests/qmp_basic.py @@ -0,0 +1,407 @@ +from autotest.client.shared import error +from autotest.client.virt import kvm_monitor + + +def run_qmp_basic(test, params, env): + """ + QMP Specification test-suite: this checks if the *basic* protocol conforms + to its specification, which is file QMP/qmp-spec.txt in QEMU's source tree. + + IMPORTANT NOTES: + + o Most tests depend heavily on QMP's error information (eg. classes), + this might have bad implications as the error interface is going to + change in QMP + + o Command testing is *not* covered in this suite. Each command has its + own specification and should be tested separately + + o We use the same terminology as used by the QMP specification, + specially with regard to JSON types (eg. a Python dict is called + a json-object) + + o This is divided in sub test-suites, please check the bottom of this + file to check the order in which they are run + + TODO: + + o Finding which test failed is not as easy as it should be + + o Are all those check_*() functions really needed? Wouldn't a + specialized class (eg. a Response class) do better? + """ + def fail_no_key(qmp_dict, key): + if not isinstance(qmp_dict, dict): + raise error.TestFail("qmp_dict is not a dict (it's '%s')" % + type(qmp_dict)) + if not key in qmp_dict: + raise error.TestFail("'%s' key doesn't exist in dict ('%s')" % + (key, str(qmp_dict))) + + + def check_dict_key(qmp_dict, key, keytype): + """ + Performs the following checks on a QMP dict key: + + 1. qmp_dict is a dict + 2. key exists in qmp_dict + 3. key is of type keytype + + If any of these checks fails, error.TestFail is raised. + """ + fail_no_key(qmp_dict, key) + if not isinstance(qmp_dict[key], keytype): + raise error.TestFail("'%s' key is not of type '%s', it's '%s'" % + (key, keytype, type(qmp_dict[key]))) + + + def check_key_is_dict(qmp_dict, key): + check_dict_key(qmp_dict, key, dict) + + + def check_key_is_list(qmp_dict, key): + check_dict_key(qmp_dict, key, list) + + + def check_key_is_str(qmp_dict, key): + check_dict_key(qmp_dict, key, unicode) + + + def check_str_key(qmp_dict, keyname, value=None): + check_dict_key(qmp_dict, keyname, unicode) + if value and value != qmp_dict[keyname]: + raise error.TestFail("'%s' key value '%s' should be '%s'" % + (keyname, str(qmp_dict[keyname]), str(value))) + + + def check_key_is_int(qmp_dict, key): + fail_no_key(qmp_dict, key) + try: + int(qmp_dict[key]) + except Exception: + raise error.TestFail("'%s' key is not of type int, it's '%s'" % + (key, type(qmp_dict[key]))) + + + def check_bool_key(qmp_dict, keyname, value=None): + check_dict_key(qmp_dict, keyname, bool) + if value and value != qmp_dict[keyname]: + raise error.TestFail("'%s' key value '%s' should be '%s'" % + (keyname, str(qmp_dict[keyname]), str(value))) + + + def check_success_resp(resp, empty=False): + """ + Check QMP OK response. + + @param resp: QMP response + @param empty: if True, response should not contain data to return + """ + check_key_is_dict(resp, "return") + if empty and len(resp["return"]) > 0: + raise error.TestFail("success response is not empty ('%s')" % + str(resp)) + + + def check_error_resp(resp, classname=None, datadict=None): + """ + Check QMP error response. + + @param resp: QMP response + @param classname: Expected error class name + @param datadict: Expected error data dictionary + """ + check_key_is_dict(resp, "error") + check_key_is_str(resp["error"], "class") + if classname and resp["error"]["class"] != classname: + raise error.TestFail("got error class '%s' expected '%s'" % + (resp["error"]["class"], classname)) + check_key_is_dict(resp["error"], "data") + if datadict and resp["error"]["data"] != datadict: + raise error.TestFail("got data dict '%s' expected '%s'" % + (resp["error"]["data"], datadict)) + + + def test_version(version): + """ + Check the QMP greeting message version key which, according to QMP's + documentation, should be: + + { "qemu": { "major": json-int, "minor": json-int, "micro": json-int } + "package": json-string } + """ + check_key_is_dict(version, "qemu") + for key in [ "major", "minor", "micro" ]: + check_key_is_int(version["qemu"], key) + check_key_is_str(version, "package") + + + def test_greeting(greeting): + check_key_is_dict(greeting, "QMP") + check_key_is_dict(greeting["QMP"], "version") + check_key_is_list(greeting["QMP"], "capabilities") + + + def greeting_suite(monitor): + """ + Check the greeting message format, as described in the QMP + specfication section '2.2 Server Greeting'. + + { "QMP": { "version": json-object, "capabilities": json-array } } + """ + greeting = monitor.get_greeting() + test_greeting(greeting) + test_version(greeting["QMP"]["version"]) + + + def json_parsing_errors_suite(monitor): + """ + Check that QMP's parser is able to recover from parsing errors, please + check the JSON spec for more info on the JSON syntax (RFC 4627). + """ + # We're quite simple right now and the focus is on parsing errors that + # have already biten us in the past. + # + # TODO: The following test-cases are missing: + # + # - JSON numbers, strings and arrays + # - More invalid characters or malformed structures + # - Valid, but not obvious syntax, like zillion of spaces or + # strings with unicode chars (different suite maybe?) + bad_json = [] + + # A JSON value MUST be an object, array, number, string, true, false, + # or null + # + # NOTE: QMP seems to ignore a number of chars, like: | and ? + bad_json.append(":") + bad_json.append(",") + + # Malformed json-objects + # + # NOTE: sending only "}" seems to break QMP + # NOTE: Duplicate keys are accepted (should it?) + bad_json.append("{ \"execute\" }") + bad_json.append("{ \"execute\": \"query-version\", }") + bad_json.append("{ 1: \"query-version\" }") + bad_json.append("{ true: \"query-version\" }") + bad_json.append("{ []: \"query-version\" }") + bad_json.append("{ {}: \"query-version\" }") + + for cmd in bad_json: + resp = monitor.cmd_raw(cmd) + check_error_resp(resp, "JSONParsing") + + + def test_id_key(monitor): + """ + Check that QMP's "id" key is correctly handled. + """ + # The "id" key must be echoed back in error responses + id_key = "kvm-autotest" + resp = monitor.cmd_qmp("eject", { "foobar": True }, id=id_key) + check_error_resp(resp) + check_str_key(resp, "id", id_key) + + # The "id" key must be echoed back in success responses + resp = monitor.cmd_qmp("query-status", id=id_key) + check_success_resp(resp) + check_str_key(resp, "id", id_key) + + # The "id" key can be any json-object + for id_key in [ True, 1234, "string again!", [1, [], {}, True, "foo"], + { "key": {} } ]: + resp = monitor.cmd_qmp("query-status", id=id_key) + check_success_resp(resp) + if resp["id"] != id_key: + raise error.TestFail("expected id '%s' but got '%s'" % + (str(id_key), str(resp["id"]))) + + + def test_invalid_arg_key(monitor): + """ + Currently, the only supported keys in the input object are: "execute", + "arguments" and "id". Although expansion is supported, invalid key + names must be detected. + """ + resp = monitor.cmd_obj({ "execute": "eject", "foobar": True }) + check_error_resp(resp, "QMPExtraInputObjectMember", + { "member": "foobar" }) + + + def test_bad_arguments_key_type(monitor): + """ + The "arguments" key must be an json-object. + + We use the eject command to perform the tests, but that's a random + choice, any command that accepts arguments will do, as the command + doesn't get called. + """ + for item in [ True, [], 1, "foo" ]: + resp = monitor.cmd_obj({ "execute": "eject", "arguments": item }) + check_error_resp(resp, "QMPBadInputObjectMember", + { "member": "arguments", "expected": "object" }) + + + def test_bad_execute_key_type(monitor): + """ + The "execute" key must be a json-string. + """ + for item in [ False, 1, {}, [] ]: + resp = monitor.cmd_obj({ "execute": item }) + check_error_resp(resp, "QMPBadInputObjectMember", + { "member": "execute", "expected": "string" }) + + + def test_no_execute_key(monitor): + """ + The "execute" key must exist, we also test for some stupid parsing + errors. + """ + for cmd in [ {}, { "execut": "qmp_capabilities" }, + { "executee": "qmp_capabilities" }, { "foo": "bar" }]: + resp = monitor.cmd_obj(cmd) + check_error_resp(resp) # XXX: check class and data dict? + + + def test_bad_input_obj_type(monitor): + """ + The input object must be... an json-object. + """ + for cmd in [ "foo", [], True, 1 ]: + resp = monitor.cmd_obj(cmd) + check_error_resp(resp, "QMPBadInputObject", { "expected":"object" }) + + + def test_good_input_obj(monitor): + """ + Basic success tests for issuing QMP commands. + """ + # NOTE: We don't use the cmd_qmp() method here because the command + # object is in a 'random' order + resp = monitor.cmd_obj({ "execute": "query-version" }) + check_success_resp(resp) + + resp = monitor.cmd_obj({ "arguments": {}, "execute": "query-version" }) + check_success_resp(resp) + + idd = "1234foo" + resp = monitor.cmd_obj({ "id": idd, "execute": "query-version", + "arguments": {} }) + check_success_resp(resp) + check_str_key(resp, "id", idd) + + # TODO: would be good to test simple argument usage, but we don't have + # a read-only command that accepts arguments. + + + def input_object_suite(monitor): + """ + Check the input object format, as described in the QMP specfication + section '2.3 Issuing Commands'. + + { "execute": json-string, "arguments": json-object, "id": json-value } + """ + test_good_input_obj(monitor) + test_bad_input_obj_type(monitor) + test_no_execute_key(monitor) + test_bad_execute_key_type(monitor) + test_bad_arguments_key_type(monitor) + test_id_key(monitor) + test_invalid_arg_key(monitor) + + + def argument_checker_suite(monitor): + """ + Check that QMP's argument checker is detecting all possible errors. + + We use a number of different commands to perform the checks, but the + command used doesn't matter much as QMP performs argument checking + _before_ calling the command. + """ + # stop doesn't take arguments + resp = monitor.cmd_qmp("stop", { "foo": 1 }) + check_error_resp(resp, "InvalidParameter", { "name": "foo" }) + + # required argument omitted + resp = monitor.cmd_qmp("screendump") + check_error_resp(resp, "MissingParameter", { "name": "filename" }) + + # 'bar' is not a valid argument + resp = monitor.cmd_qmp("screendump", { "filename": "outfile", + "bar": "bar" }) + check_error_resp(resp, "InvalidParameter", { "name": "bar"}) + + # test optional argument: 'force' is omitted, but it's optional, so + # the handler has to be called. Test this happens by checking an + # error that is generated by the handler itself. + resp = monitor.cmd_qmp("eject", { "device": "foobar" }) + check_error_resp(resp, "DeviceNotFound") + + # filename argument must be a json-string + for arg in [ {}, [], 1, True ]: + resp = monitor.cmd_qmp("screendump", { "filename": arg }) + check_error_resp(resp, "InvalidParameterType", + { "name": "filename", "expected": "string" }) + + # force argument must be a json-bool + for arg in [ {}, [], 1, "foo" ]: + resp = monitor.cmd_qmp("eject", { "force": arg, "device": "foo" }) + check_error_resp(resp, "InvalidParameterType", + { "name": "force", "expected": "bool" }) + + # val argument must be a json-int + for arg in [ {}, [], True, "foo" ]: + resp = monitor.cmd_qmp("memsave", { "val": arg, "filename": "foo", + "size": 10 }) + check_error_resp(resp, "InvalidParameterType", + { "name": "val", "expected": "int" }) + + # value argument must be a json-number + for arg in [ {}, [], True, "foo" ]: + resp = monitor.cmd_qmp("migrate_set_speed", { "value": arg }) + check_error_resp(resp, "InvalidParameterType", + { "name": "value", "expected": "number" }) + + # qdev-type commands have their own argument checker, all QMP does + # is to skip its checking and pass arguments through. Check this + # works by providing invalid options to device_add and expecting + # an error message from qdev + resp = monitor.cmd_qmp("device_add", { "driver": "e1000", + "foo": "bar" }) + check_error_resp(resp, "PropertyNotFound", + {"device": "e1000", "property": "foo"}) + + + def unknown_commands_suite(monitor): + """ + Check that QMP handles unknown commands correctly. + """ + # We also call a HMP-only command, to be sure it will fail as expected + for cmd in [ "bar", "query-", "query-foo", "q", "help" ]: + resp = monitor.cmd_qmp(cmd) + check_error_resp(resp, "CommandNotFound", { "name": cmd }) + + + vm = env.get_vm(params["main_vm"]) + vm.verify_alive() + + # Look for the first qmp monitor available, otherwise, fail the test + qmp_monitor = None + for m in vm.monitors: + if isinstance(m, kvm_monitor.QMPMonitor): + qmp_monitor = m + + if qmp_monitor is None: + raise error.TestError('Could not find a QMP monitor, aborting test') + + # Run all suites + greeting_suite(qmp_monitor) + input_object_suite(qmp_monitor) + argument_checker_suite(qmp_monitor) + unknown_commands_suite(qmp_monitor) + json_parsing_errors_suite(qmp_monitor) + + # check if QMP is still alive + if not qmp_monitor.is_responsive(): + raise error.TestFail('QMP monitor is not responsive after testing') diff --git a/kvm/tests/qmp_basic_rhel6.py b/kvm/tests/qmp_basic_rhel6.py new file mode 100644 index 00000000..9994f97a --- /dev/null +++ b/kvm/tests/qmp_basic_rhel6.py @@ -0,0 +1,389 @@ +import logging +from autotest.client.shared import error +from autotest.client.virt import kvm_monitor + + +def run_qmp_basic_rhel6(test, params, env): + """ + QMP Specification test-suite: this checks if the *basic* protocol conforms + to its specification, which is file QMP/qmp-spec.txt in QEMU's source tree. + + IMPORTANT NOTES: + + o Most tests depend heavily on QMP's error information (eg. classes), + this might have bad implications as the error interface is going to + change in QMP + + o Command testing is *not* covered in this suite. Each command has its + own specification and should be tested separately + + o We use the same terminology as used by the QMP specification, + specially with regard to JSON types (eg. a Python dict is called + a json-object) + + o This is divided in sub test-suites, please check the bottom of this + file to check the order in which they are run + + TODO: + + o Finding which test failed is not as easy as it should be + + o Are all those check_*() functions really needed? Wouldn't a + specialized class (eg. a Response class) do better? + """ + def fail_no_key(qmp_dict, key): + if not isinstance(qmp_dict, dict): + raise error.TestFail("qmp_dict is not a dict (it's '%s')" % + type(qmp_dict)) + if not key in qmp_dict: + raise error.TestFail("'%s' key doesn't exist in dict ('%s')" % + (key, str(qmp_dict))) + + + def check_dict_key(qmp_dict, key, keytype): + """ + Performs the following checks on a QMP dict key: + + 1. qmp_dict is a dict + 2. key exists in qmp_dict + 3. key is of type keytype + + If any of these checks fails, error.TestFail is raised. + """ + fail_no_key(qmp_dict, key) + if not isinstance(qmp_dict[key], keytype): + raise error.TestFail("'%s' key is not of type '%s', it's '%s'" % + (key, keytype, type(qmp_dict[key]))) + + + def check_key_is_dict(qmp_dict, key): + check_dict_key(qmp_dict, key, dict) + + + def check_key_is_list(qmp_dict, key): + check_dict_key(qmp_dict, key, list) + + + def check_key_is_str(qmp_dict, key): + check_dict_key(qmp_dict, key, unicode) + + + def check_str_key(qmp_dict, keyname, value=None): + check_dict_key(qmp_dict, keyname, unicode) + if value and value != qmp_dict[keyname]: + raise error.TestFail("'%s' key value '%s' should be '%s'" % + (keyname, str(qmp_dict[keyname]), str(value))) + + + def check_key_is_int(qmp_dict, key): + fail_no_key(qmp_dict, key) + try: + int(qmp_dict[key]) + except Exception: + raise error.TestFail("'%s' key is not of type int, it's '%s'" % + (key, type(qmp_dict[key]))) + + + def check_bool_key(qmp_dict, keyname, value=None): + check_dict_key(qmp_dict, keyname, bool) + if value and value != qmp_dict[keyname]: + raise error.TestFail("'%s' key value '%s' should be '%s'" % + (keyname, str(qmp_dict[keyname]), str(value))) + + + def check_success_resp(resp, empty=False): + """ + Check QMP OK response. + + @param resp: QMP response + @param empty: if True, response should not contain data to return + """ + check_key_is_dict(resp, "return") + if empty and len(resp["return"]) > 0: + raise error.TestFail("success response is not empty ('%s')" % + str(resp)) + + + def check_error_resp(resp, classname=None, datadict=None): + """ + Check QMP error response. + + @param resp: QMP response + @param classname: Expected error class name + @param datadict: Expected error data dictionary + """ + logging.debug("resp %s", str(resp)) + check_key_is_dict(resp, "error") + check_key_is_str(resp["error"], "class") + if classname and resp["error"]["class"] != classname: + raise error.TestFail("got error class '%s' expected '%s'" % + (resp["error"]["class"], classname)) + check_key_is_dict(resp["error"], "data") + if datadict and resp["error"]["data"] != datadict: + raise error.TestFail("got data dict '%s' expected '%s'" % + (resp["error"]["data"], datadict)) + + + def test_version(version): + """ + Check the QMP greeting message version key which, according to QMP's + documentation, should be: + + { "qemu": { "major": json-int, "minor": json-int, "micro": json-int } + "package": json-string } + """ + check_key_is_str(version, "qemu") + check_key_is_str(version, "package") + + + def test_greeting(greeting): + check_key_is_dict(greeting, "QMP") + check_key_is_dict(greeting["QMP"], "version") + check_key_is_list(greeting["QMP"], "capabilities") + + + def greeting_suite(monitor): + """ + Check the greeting message format, as described in the QMP + specfication section '2.2 Server Greeting'. + + { "QMP": { "version": json-object, "capabilities": json-array } } + """ + greeting = monitor.get_greeting() + test_greeting(greeting) + test_version(greeting["QMP"]["version"]) + + + def json_parsing_errors_suite(monitor): + """ + Check that QMP's parser is able to recover from parsing errors, please + check the JSON spec for more info on the JSON syntax (RFC 4627). + """ + # We're quite simple right now and the focus is on parsing errors that + # have already biten us in the past. + # + # TODO: The following test-cases are missing: + # + # - JSON numbers, strings and arrays + # - More invalid characters or malformed structures + # - Valid, but not obvious syntax, like zillion of spaces or + # strings with unicode chars (different suite maybe?) + bad_json = [] + + # A JSON value MUST be an object, array, number, string, true, false, + # or null + # + # NOTE: QMP seems to ignore a number of chars, like: | and ? + bad_json.append(":") + bad_json.append(",") + + # Malformed json-objects + # + # NOTE: sending only "}" seems to break QMP + # NOTE: Duplicate keys are accepted (should it?) + bad_json.append("{ \"execute\" }") + bad_json.append("{ \"execute\": \"query-version\", }") + bad_json.append("{ 1: \"query-version\" }") + bad_json.append("{ true: \"query-version\" }") + bad_json.append("{ []: \"query-version\" }") + bad_json.append("{ {}: \"query-version\" }") + + for cmd in bad_json: + resp = monitor.cmd_raw(cmd) + check_error_resp(resp, "JSONParsing") + + + def test_id_key(monitor): + """ + Check that QMP's "id" key is correctly handled. + """ + # The "id" key must be echoed back in error responses + id_key = "kvm-autotest" + resp = monitor.cmd_qmp("eject", { "foobar": True }, id=id_key) + check_error_resp(resp) + check_str_key(resp, "id", id_key) + + # The "id" key must be echoed back in success responses + resp = monitor.cmd_qmp("query-status", id=id_key) + check_success_resp(resp) + check_str_key(resp, "id", id_key) + + # The "id" key can be any json-object + for id_key in [ True, 1234, "string again!", [1, [], {}, True, "foo"], + { "key": {} } ]: + resp = monitor.cmd_qmp("query-status", id=id_key) + check_success_resp(resp) + if resp["id"] != id_key: + raise error.TestFail("expected id '%s' but got '%s'" % + (str(id_key), str(resp["id"]))) + + + def test_invalid_arg_key(monitor): + """ + Currently, the only supported keys in the input object are: "execute", + "arguments" and "id". Although expansion is supported, invalid key + names must be detected. + """ + resp = monitor.cmd_obj({ "execute": "eject", "foobar": True }) + expected_error = "MissingParameter" + data_dict = {"name": "device"} + check_error_resp(resp, expected_error, data_dict) + + + def test_bad_arguments_key_type(monitor): + """ + The "arguments" key must be an json-object. + + We use the eject command to perform the tests, but that's a random + choice, any command that accepts arguments will do, as the command + doesn't get called. + """ + for item in [ True, [], 1, "foo" ]: + resp = monitor.cmd_obj({ "execute": "eject", "arguments": item }) + check_error_resp(resp, "QMPBadInputObjectMember", + { "member": "arguments", "expected": "object" }) + + + def test_bad_execute_key_type(monitor): + """ + The "execute" key must be a json-string. + """ + for item in [ False, 1, {}, [] ]: + resp = monitor.cmd_obj({ "execute": item }) + check_error_resp(resp, "QMPBadInputObjectMember", + { "member": "execute", "expected": "string" }) + + + def test_no_execute_key(monitor): + """ + The "execute" key must exist, we also test for some stupid parsing + errors. + """ + for cmd in [ {}, { "execut": "qmp_capabilities" }, + { "executee": "qmp_capabilities" }, { "foo": "bar" }]: + resp = monitor.cmd_obj(cmd) + check_error_resp(resp) # XXX: check class and data dict? + + + def test_bad_input_obj_type(monitor): + """ + The input object must be... an json-object. + """ + for cmd in [ "foo", [], True, 1 ]: + resp = monitor.cmd_obj(cmd) + check_error_resp(resp, "QMPBadInputObject", { "expected":"object" }) + + + def test_good_input_obj(monitor): + """ + Basic success tests for issuing QMP commands. + """ + # NOTE: We don't use the cmd_qmp() method here because the command + # object is in a 'random' order + resp = monitor.cmd_obj({ "execute": "query-version" }) + check_success_resp(resp) + + resp = monitor.cmd_obj({ "arguments": {}, "execute": "query-version" }) + check_success_resp(resp) + + id_key = "1234foo" + resp = monitor.cmd_obj({ "id": id_key, "execute": "query-version", + "arguments": {} }) + check_success_resp(resp) + check_str_key(resp, "id", id_key) + + # TODO: would be good to test simple argument usage, but we don't have + # a read-only command that accepts arguments. + + + def input_object_suite(monitor): + """ + Check the input object format, as described in the QMP specfication + section '2.3 Issuing Commands'. + + { "execute": json-string, "arguments": json-object, "id": json-value } + """ + test_good_input_obj(monitor) + test_bad_input_obj_type(monitor) + test_no_execute_key(monitor) + test_bad_execute_key_type(monitor) + test_bad_arguments_key_type(monitor) + test_id_key(monitor) + test_invalid_arg_key(monitor) + + + def argument_checker_suite(monitor): + """ + Check that QMP's argument checker is detecting all possible errors. + + We use a number of different commands to perform the checks, but the + command used doesn't matter much as QMP performs argument checking + _before_ calling the command. + """ + # qmp in RHEL6 is different from 0.13.*: + # 1. 'stop' command just return {} evenif stop have arguments. + # 2. there is no 'screendump' command. + # 3. argument isn't checked in 'device' command. + # so skip these tests in RHEL6. + + # test optional argument: 'force' is omitted, but it's optional, so + # the handler has to be called. Test this happens by checking an + # error that is generated by the handler itself. + resp = monitor.cmd_qmp("eject", { "device": "foobar" }) + check_error_resp(resp, "DeviceNotFound") + + # val argument must be a json-int + for arg in [ {}, [], True, "foo" ]: + resp = monitor.cmd_qmp("memsave", { "val": arg, "filename": "foo", + "size": 10 }) + check_error_resp(resp, "InvalidParameterType", + { "name": "val", "expected": "int" }) + + # value argument must be a json-number + for arg in [ {}, [], True, "foo" ]: + resp = monitor.cmd_qmp("migrate_set_speed", { "value": arg }) + check_error_resp(resp, "InvalidParameterType", + { "name": "value", "expected": "number" }) + + # qdev-type commands have their own argument checker, all QMP does + # is to skip its checking and pass arguments through. Check this + # works by providing invalid options to device_add and expecting + # an error message from qdev + resp = monitor.cmd_qmp("device_add", {"driver": "e1000", + "foo": "bar" }) + check_error_resp(resp, "PropertyNotFound", + {"device": "e1000", "property": "foo"}) + + + def unknown_commands_suite(monitor): + """ + Check that QMP handles unknown commands correctly. + """ + # We also call a HMP-only command, to be sure it will fail as expected + for cmd in [ "bar", "query-", "query-foo", "q", "help" ]: + resp = monitor.cmd_qmp(cmd) + check_error_resp(resp, "CommandNotFound", { "name": cmd }) + + + vm = env.get_vm(params["main_vm"]) + vm.verify_alive() + + # Look for the first qmp monitor available, otherwise, fail the test + qmp_monitor = None + for m in vm.monitors: + if isinstance(m, kvm_monitor.QMPMonitor): + qmp_monitor = m + + if qmp_monitor is None: + raise error.TestError('Could not find a QMP monitor, aborting test') + + # Run all suites + greeting_suite(qmp_monitor) + input_object_suite(qmp_monitor) + argument_checker_suite(qmp_monitor) + unknown_commands_suite(qmp_monitor) + json_parsing_errors_suite(qmp_monitor) + + # check if QMP is still alive + if not qmp_monitor.is_responsive(): + raise error.TestFail('QMP monitor is not responsive after testing') diff --git a/kvm/tests/seabios.py b/kvm/tests/seabios.py new file mode 100644 index 00000000..15bd6fa4 --- /dev/null +++ b/kvm/tests/seabios.py @@ -0,0 +1,59 @@ +import re, logging +from autotest.client.shared import error +from autotest.client.virt import utils_misc + + +@error.context_aware +def run_seabios(test, params, env): + """ + KVM Seabios test: + 1) Start guest with sga bios + 2) Display and check the boot menu order + 3) Start guest from the specified boot entry + 4) Log into the guest to verify it's up + + @param test: kvm test object + @param params: Dictionary with the test parameters + @param env: Dictionary with test environment. + """ + error.context("Start guest with sga bios") + vm = env.get_vm(params["main_vm"]) + # Since the seabios is displayed in the beginning of guest boot, + # booting guest here so that we can check all of sgabios/seabios + # info, especially the correct time of sending boot menu key. + vm.create() + + timeout = float(params.get("login_timeout", 240)) + boot_menu_key = params.get("boot_menu_key", 'f12') + boot_menu_hint = params.get("boot_menu_hint") + boot_device = params.get("boot_device", "") + + error.context("Display and check the boot menu order") + + f = lambda: re.search(boot_menu_hint, vm.serial_console.get_output()) + if not (boot_menu_hint and utils_misc.wait_for(f, timeout, 1)): + raise error.TestFail("Could not get boot menu message.") + + # Send boot menu key in monitor. + vm.send_key(boot_menu_key) + + _ = vm.serial_console.get_output() + boot_list = re.findall("^\d+\. (.*)\s", _, re.M) + + if not boot_list: + raise error.TestFail("Could not get boot entries list.") + + logging.info("Got boot menu entries: '%s'", boot_list) + for i, v in enumerate(boot_list, start=1): + if re.search(boot_device, v, re.I): + error.context("Start guest from boot entry '%s'" % v, + logging.info) + vm.send_key(str(i)) + break + else: + raise error.TestFail("Could not get any boot entry match " + "pattern '%s'" % boot_device) + + error.context("Log into the guest to verify it's up") + session = vm.wait_for_login(timeout=timeout) + session.close() diff --git a/kvm/tests/set_link.py b/kvm/tests/set_link.py new file mode 100644 index 00000000..ad242ca0 --- /dev/null +++ b/kvm/tests/set_link.py @@ -0,0 +1,53 @@ +import logging +from autotest.client.shared import error +from autotest.client.virt import utils_test + + +def run_set_link(test, params, env): + """ + KVM guest link test: + 1) Boot up guest with one nic + 2) Ping guest from host + 3) Disable guest link and ping guest from host + 4) Re-enable guest link and ping guest from host + 5) Do file transfer test + + @param test: kvm test object + @param params: Dictionary with the test parameters + @param env: Dictionary with test environment. + """ + vm = utils_test.get_living_vm(env, params.get("main_vm")) + timeout = float(params.get("login_timeout", 360)) + session = utils_test.wait_for_login(vm, 0, timeout, 0, 2) + + def set_link_test(linkid): + """ + Issue set_link commands and test its function + + @param linkid: id of netdev or devices to be tested + """ + ip = vm.get_address(0) + + vm.set_link(linkid, up=False) + _, o = utils_test.ping(ip, count=10, timeout=20) + if utils_test.get_loss_ratio(o) != 100: + raise error.TestFail("Still can ping the %s after down %s" % + (ip, linkid)) + + vm.set_link(linkid, up=True) + _, o = utils_test.ping(ip, count=10, timeout=20) + # we use 100% here as the notification of link status changed may be + # delayed in guest driver + if utils_test.get_loss_ratio(o) == 100: + raise error.TestFail("Packet loss during ping %s after up %s" % + (ip, linkid)) + + netdev_id = vm.netdev_id[0] + device_id = vm.get_peer(netdev_id) + logging.info("Issue set_link commands for netdevs") + set_link_test(netdev_id) + logging.info("Issue set_link commands for network devics") + set_link_test(device_id) + + utils_test.run_file_transfer(test, params, env) + session.close() diff --git a/kvm/tests/smbios_table.py b/kvm/tests/smbios_table.py new file mode 100644 index 00000000..24541c93 --- /dev/null +++ b/kvm/tests/smbios_table.py @@ -0,0 +1,67 @@ +import logging +from autotest.client.shared import utils, error +from autotest.client.virt import env_process + + +@error.context_aware +def run_smbios_table(test, params, env): + """ + Check smbios table : + 1) Boot a guest with smbios options + 2) verify if host bios options have been emulated + + @param test: KVM test object. + @param params: Dictionary with the test parameters. + @param env: Dictionary with test environment. + """ + vendor_cmd = "dmidecode --type 0 | grep Vendor | awk '{print $2}'" + date_cmd = "dmidecode --type 0 | grep Date | awk '{print $3}'" + version_cmd = "dmidecode --type 0 | grep Version | awk '{print $2}'" + + error.context("getting smbios table on host") + host_vendor = utils.system_output(vendor_cmd) + host_date = utils.system_output(date_cmd) + host_version = utils.system_output(version_cmd) + + smbios = (" -smbios type=0,vendor=%s,version=%s,date=%s" % + (host_vendor, host_version, host_date)) + + extra_params = params.get("extra_params", "") + params["extra_params"] = extra_params + smbios + + logging.debug("Booting guest %s", params.get("main_vm")) + env_process.preprocess_vm(test, params, env, params.get("main_vm")) + vm = env.get_vm(params["main_vm"]) + vm.create() + login_timeout = float(params.get("login_timeout", 360)) + session = vm.wait_for_login(timeout=login_timeout) + + error.context("getting smbios table on guest") + guest_vendor = session.cmd(vendor_cmd).strip() + guest_date = session.cmd(date_cmd).strip() + guest_version = session.cmd(version_cmd).strip() + + failures = [] + + if host_vendor != guest_vendor: + e_msg = ("Vendor str mismatch -> host: %s guest: %s" % + (guest_vendor, host_vendor)) + logging.error(e_msg) + failures.append(e_msg) + + if host_date != guest_date: + e_msg = ("Date str mismatch -> host: %s guest: %s" % + (guest_date, host_date)) + logging.error(e_msg) + failures.append(e_msg) + + if host_version != guest_version: + e_msg = ("Version str mismatch -> host: %s guest: %s" % + (guest_version, host_version)) + logging.error(e_msg) + failures.append(e_msg) + + error.context("") + if failures: + raise error.TestFail("smbios table test reported %s failures:\n%s" % + (len(failures), "\n".join(failures))) diff --git a/kvm/tests/stepmaker.py b/kvm/tests/stepmaker.py new file mode 100755 index 00000000..df1afdd7 --- /dev/null +++ b/kvm/tests/stepmaker.py @@ -0,0 +1,355 @@ +#!/usr/bin/python +""" +Step file creator/editor. + +@copyright: Red Hat Inc 2009 +@author: mgoldish@redhat.com (Michael Goldish) +@version: "20090401" +""" + +import pygtk, gtk, gobject, time, os, commands, logging +from autotest.client.shared import error +from autotest.client.virt import utils_misc, ppm_utils, step_editor +from autotest.client.virt import kvm_monitor +pygtk.require('2.0') + + +class StepMaker(step_editor.StepMakerWindow): + """ + Application used to create a step file. It will grab your input to the + virtual machine and record it on a 'step file', that can be played + making it possible to do unattended installs. + """ + # Constructor + def __init__(self, vm, steps_filename, tempdir, params): + step_editor.StepMakerWindow.__init__(self) + + self.vm = vm + self.steps_filename = steps_filename + self.steps_data_dir = ppm_utils.get_data_dir(steps_filename) + self.tempdir = tempdir + self.screendump_filename = os.path.join(tempdir, "scrdump.ppm") + self.params = params + + if not os.path.exists(self.steps_data_dir): + os.makedirs(self.steps_data_dir) + + self.steps_file = open(self.steps_filename, "w") + self.vars_file = open(os.path.join(self.steps_data_dir, "vars"), "w") + + self.step_num = 1 + self.run_time = 0 + self.update_delay = 1000 + self.prev_x = 0 + self.prev_y = 0 + self.vars = {} + self.timer_id = None + + self.time_when_done_clicked = time.time() + self.time_when_actions_completed = time.time() + + self.steps_file.write("# Generated by Step Maker\n") + self.steps_file.write("# Generated on %s\n" % time.asctime()) + self.steps_file.write("# uname -a: %s\n" % + commands.getoutput("uname -a")) + self.steps_file.flush() + + self.vars_file.write("# This file lists the vars used during recording" + " with Step Maker\n") + self.vars_file.flush() + + # Done/Break HBox + hbox = gtk.HBox(spacing=10) + self.user_vbox.pack_start(hbox) + hbox.show() + + self.button_break = gtk.Button("Break") + self.button_break.connect("clicked", self.event_break_clicked) + hbox.pack_start(self.button_break) + self.button_break.show() + + self.button_done = gtk.Button("Done") + self.button_done.connect("clicked", self.event_done_clicked) + hbox.pack_start(self.button_done) + self.button_done.show() + + # Set window title + self.window.set_title("Step Maker") + + # Connect "capture" button + self.button_capture.connect("clicked", self.event_capture_clicked) + + # Switch to run mode + self.switch_to_run_mode() + + + def destroy(self, widget): + self.vm.resume() + self.steps_file.close() + self.vars_file.close() + step_editor.StepMakerWindow.destroy(self, widget) + + + # Utilities + def redirect_timer(self, delay=0, func=None): + if self.timer_id != None: + gobject.source_remove(self.timer_id) + self.timer_id = None + if func != None: + self.timer_id = gobject.timeout_add(delay, func, + priority=gobject.PRIORITY_LOW) + + + def switch_to_run_mode(self): + # Set all widgets to their default states + self.clear_state(clear_screendump=False) + # Enable/disable some widgets + self.button_break.set_sensitive(True) + self.button_done.set_sensitive(False) + self.data_vbox.set_sensitive(False) + # Give focus to the Break button + self.button_break.grab_focus() + # Start the screendump timer + self.redirect_timer(100, self.update) + # Resume the VM + self.vm.resume() + + + def switch_to_step_mode(self): + # Set all widgets to their default states + self.clear_state(clear_screendump=False) + # Enable/disable some widgets + self.button_break.set_sensitive(False) + self.button_done.set_sensitive(True) + self.data_vbox.set_sensitive(True) + # Give focus to the keystrokes entry widget + self.entry_keys.grab_focus() + # Start the screendump timer + self.redirect_timer() + # Stop the VM + self.vm.pause() + + + # Events in step mode + def update(self): + self.redirect_timer() + + if os.path.exists(self.screendump_filename): + os.unlink(self.screendump_filename) + + try: + self.vm.monitor.screendump(self.screendump_filename, debug=False) + except kvm_monitor.MonitorError, e: + logging.warn(e) + else: + self.set_image_from_file(self.screendump_filename) + + self.redirect_timer(self.update_delay, self.update) + return True + + + def event_break_clicked(self, widget): + if not self.vm.is_alive(): + self.message("The VM doesn't seem to be alive.", "Error") + return + # Switch to step mode + self.switch_to_step_mode() + # Compute time elapsed since last click on "Done" and add it + # to self.run_time + self.run_time += time.time() - self.time_when_done_clicked + # Set recording time widget + self.entry_time.set_text("%.2f" % self.run_time) + # Update screendump ID + self.update_screendump_id(self.steps_data_dir) + # By default, check the barrier checkbox + self.check_barrier.set_active(True) + # Set default sleep and barrier timeout durations + time_delta = time.time() - self.time_when_actions_completed + if time_delta < 1.0: time_delta = 1.0 + self.spin_sleep.set_value(round(time_delta)) + self.spin_barrier_timeout.set_value(round(time_delta * 5)) + # Set window title + self.window.set_title("Step Maker -- step %d at time %.2f" % + (self.step_num, self.run_time)) + + + def event_done_clicked(self, widget): + # Get step lines and screendump + lines = self.get_step_lines(self.steps_data_dir) + if lines == None: + return + + # Get var values from user and write them to vars file + var_dict = {} + for line in lines.splitlines(): + words = line.split() + if words and words[0] == "var": + varname = words[1] + if varname in self.vars.keys(): + val = self.vars[varname] + elif varname in var_dict.keys(): + val = var_dict[varname] + elif varname in self.params.keys(): + val = self.params[varname] + var_dict[varname] = val + else: + val = self.inputdialog("$%s =" % varname, "Variable") + if val == None: + return + var_dict[varname] = val + for varname in var_dict.keys(): + self.vars_file.write("%s=%s\n" % (varname, var_dict[varname])) + self.vars.update(var_dict) + + # Write step lines to file + self.steps_file.write("# " + "-" * 32 + "\n") + self.steps_file.write(lines) + + # Flush buffers of both files + self.steps_file.flush() + self.vars_file.flush() + + # Remember the current time + self.time_when_done_clicked = time.time() + + # Switch to run mode + self.switch_to_run_mode() + + # Send commands to VM + for line in lines.splitlines(): + words = line.split() + if not words: + continue + elif words[0] == "key": + self.vm.send_key(words[1]) + elif words[0] == "var": + val = self.vars.get(words[1]) + if not val: + continue + self.vm.send_string(val) + elif words[0] == "mousemove": + self.vm.monitor.mouse_move(-8000, -8000) + time.sleep(0.5) + self.vm.monitor.mouse_move(words[1], words[2]) + time.sleep(0.5) + elif words[0] == "mouseclick": + self.vm.monitor.mouse_button(words[1]) + time.sleep(0.1) + self.vm.monitor.mouse_button(0) + + # Remember the current time + self.time_when_actions_completed = time.time() + + # Move on to next step + self.step_num += 1 + + def event_capture_clicked(self, widget): + self.message("Mouse actions disabled (for now).", "Sorry") + return + + self.image_width_backup = self.image_width + self.image_height_backup = self.image_height + self.image_data_backup = self.image_data + + gtk.gdk.pointer_grab(self.event_box.window, False, + gtk.gdk.BUTTON_PRESS_MASK | + gtk.gdk.BUTTON_RELEASE_MASK) + # Create empty cursor + pix = gtk.gdk.Pixmap(self.event_box.window, 1, 1, 1) + color = gtk.gdk.Color() + cursor = gtk.gdk.Cursor(pix, pix, color, color, 0, 0) + self.event_box.window.set_cursor(cursor) + gtk.gdk.display_get_default().warp_pointer(gtk.gdk.screen_get_default(), + self.prev_x, self.prev_y) + self.redirect_event_box_input( + self.event_capture_button_press, + self.event_capture_button_release, + self.event_capture_scroll) + self.redirect_timer(10, self.update_capture) + self.vm.resume() + + # Events in mouse capture mode + + def update_capture(self): + self.redirect_timer() + + (_, x, y, _) = gtk.gdk.display_get_default().get_pointer() + self.mouse_click_coords[0] = int(x * self.spin_sensitivity.get_value()) + self.mouse_click_coords[1] = int(y * self.spin_sensitivity.get_value()) + + delay = self.spin_latency.get_value() / 1000 + if (x, y) != (self.prev_x, self.prev_y): + self.vm.monitor.mouse_move(-8000, -8000) + time.sleep(delay) + self.vm.monitor.mouse_move(self.mouse_click_coords[0], + self.mouse_click_coords[1]) + time.sleep(delay) + + self.prev_x = x + self.prev_y = y + + if os.path.exists(self.screendump_filename): + os.unlink(self.screendump_filename) + + try: + self.vm.monitor.screendump(self.screendump_filename, debug=False) + except kvm_monitor.MonitorError, e: + logging.warn(e) + else: + self.set_image_from_file(self.screendump_filename) + + self.redirect_timer(int(self.spin_latency.get_value()), + self.update_capture) + return True + + def event_capture_button_press(self, widget,event): + pass + + def event_capture_button_release(self, widget,event): + gtk.gdk.pointer_ungrab() + self.event_box.window.set_cursor(gtk.gdk.Cursor(gtk.gdk.CROSSHAIR)) + self.redirect_event_box_input( + self.event_button_press, + self.event_button_release, + None, + None, + self.event_expose) + self.redirect_timer() + self.vm.pause() + self.mouse_click_captured = True + self.mouse_click_button = event.button + self.set_image(self.image_width_backup, self.image_height_backup, + self.image_data_backup) + self.check_mousemove.set_sensitive(True) + self.check_mouseclick.set_sensitive(True) + self.check_mousemove.set_active(True) + self.check_mouseclick.set_active(True) + self.update_mouse_click_info() + + def event_capture_scroll(self, widget, event): + if event.direction == gtk.gdk.SCROLL_UP: + direction = 1 + else: + direction = -1 + self.spin_sensitivity.set_value(self.spin_sensitivity.get_value() + + direction) + pass + + +def run_stepmaker(test, params, env): + vm = env.get_vm(params.get("main_vm")) + if not vm: + raise error.TestError("VM object not found in environment") + if not vm.is_alive(): + raise error.TestError("VM seems to be dead; Step Maker requires a" + " living VM") + + steps_filename = params.get("steps") + if not steps_filename: + raise error.TestError("Steps filename not specified") + steps_filename = utils_misc.get_path(test.virtdir, steps_filename) + if os.path.exists(steps_filename): + raise error.TestError("Steps file %s already exists" % steps_filename) + + StepMaker(vm, steps_filename, test.debugdir, params) + gtk.main() diff --git a/kvm/tests/steps.py b/kvm/tests/steps.py new file mode 100644 index 00000000..377c6d55 --- /dev/null +++ b/kvm/tests/steps.py @@ -0,0 +1,247 @@ +""" +Utilities to perform automatic guest installation using step files. + +@copyright: Red Hat 2008-2009 +""" + +import os, time, shutil, logging +from autotest.client.shared import error +from autotest.client.virt import utils_misc, ppm_utils, kvm_monitor + +try: + import PIL.Image +except ImportError: + logging.warning('No python imaging library installed. PPM image ' + 'conversion to JPEG disabled. In order to enable it, ' + 'please install python-imaging or the equivalent for your ' + 'distro.') + + +def handle_var(vm, params, varname): + var = params.get(varname) + if not var: + return False + vm.send_string(var) + return True + + +def barrier_2(vm, words, params, debug_dir, data_scrdump_filename, + current_step_num): + if len(words) < 7: + logging.error("Bad barrier_2 command line") + return False + + # Parse barrier command line + _, dx, dy, x1, y1, md5sum, timeout = words[:7] + dx, dy, x1, y1, timeout = map(int, [dx, dy, x1, y1, timeout]) + + # Define some paths + scrdump_filename = os.path.join(debug_dir, "scrdump.ppm") + cropped_scrdump_filename = os.path.join(debug_dir, "cropped_scrdump.ppm") + expected_scrdump_filename = os.path.join(debug_dir, "scrdump_expected.ppm") + expected_cropped_scrdump_filename = os.path.join(debug_dir, + "cropped_scrdump_expected.ppm") + comparison_filename = os.path.join(debug_dir, "comparison.ppm") + history_dir = os.path.join(debug_dir, "barrier_history") + + # Collect a few parameters + timeout_multiplier = float(params.get("timeout_multiplier") or 1) + fail_if_stuck_for = float(params.get("fail_if_stuck_for") or 1e308) + stuck_detection_history = int(params.get("stuck_detection_history") or 2) + keep_screendump_history = params.get("keep_screendump_history") == "yes" + keep_all_history = params.get("keep_all_history") == "yes" + + # Multiply timeout by the timeout multiplier + timeout *= timeout_multiplier + + # Timeout/5 is the time it took stepmaker to complete this step. + # Divide that number by 10 to poll 10 times, just in case + # current machine is stronger then the "stepmaker machine". + # Limit to 1 (min) and 10 (max) seconds between polls. + sleep_duration = float(timeout) / 50.0 + if sleep_duration < 1.0: sleep_duration = 1.0 + if sleep_duration > 10.0: sleep_duration = 10.0 + + end_time = time.time() + timeout + end_time_stuck = time.time() + fail_if_stuck_for + start_time = time.time() + + prev_whole_image_md5sums = [] + + failure_message = None + + # Main loop + while True: + # Check for timeouts + if time.time() > end_time: + failure_message = "regular timeout" + break + if time.time() > end_time_stuck: + failure_message = "guest is stuck" + break + + # Make sure vm is alive + if not vm.is_alive(): + failure_message = "VM is dead" + break + + # Request screendump + try: + vm.monitor.screendump(scrdump_filename, debug=False) + except kvm_monitor.MonitorError, e: + logging.warn(e) + continue + + # Read image file + (w, h, data) = ppm_utils.image_read_from_ppm_file(scrdump_filename) + + # Make sure image is valid + if not ppm_utils.image_verify_ppm_file(scrdump_filename): + logging.warn("Got invalid screendump: dimensions: %dx%d, " + "data size: %d", w, h, len(data)) + continue + + # Compute md5sum of whole image + whole_image_md5sum = ppm_utils.image_md5sum(w, h, data) + + # Write screendump to history_dir (as JPG) if requested + # and if the screendump differs from the previous one + if (keep_screendump_history and + whole_image_md5sum not in prev_whole_image_md5sums[:1]): + try: + os.makedirs(history_dir) + except Exception: + pass + history_scrdump_filename = os.path.join(history_dir, + "scrdump-step_%s-%s.jpg" % (current_step_num, + time.strftime("%Y%m%d-%H%M%S"))) + try: + image = PIL.Image.open(scrdump_filename) + image.save(history_scrdump_filename, format = 'JPEG', + quality = 30) + except NameError: + pass + + # Compare md5sum of barrier region with the expected md5sum + calced_md5sum = ppm_utils.get_region_md5sum(w, h, data, x1, y1, dx, dy, + cropped_scrdump_filename) + if calced_md5sum == md5sum: + # Success -- remove screendump history unless requested not to + if keep_screendump_history and not keep_all_history: + shutil.rmtree(history_dir) + # Report success + return True + + # Insert image md5sum into queue of last seen images: + # If md5sum is already in queue... + if whole_image_md5sum in prev_whole_image_md5sums: + # Remove md5sum from queue + prev_whole_image_md5sums.remove(whole_image_md5sum) + else: + # Otherwise extend 'stuck' timeout + end_time_stuck = time.time() + fail_if_stuck_for + # Insert md5sum at beginning of queue + prev_whole_image_md5sums.insert(0, whole_image_md5sum) + # Limit queue length to stuck_detection_history + prev_whole_image_md5sums = \ + prev_whole_image_md5sums[:stuck_detection_history] + + # Sleep for a while + time.sleep(sleep_duration) + + # Failure + message = ("Barrier failed at step %s after %.2f seconds (%s)" % + (current_step_num, time.time() - start_time, failure_message)) + + # What should we do with this failure? + if words[-1] == "optional": + logging.info(message) + return False + else: + # Collect information and put it in debug_dir + if data_scrdump_filename and os.path.exists(data_scrdump_filename): + # Read expected screendump image + (ew, eh, edata) = \ + ppm_utils.image_read_from_ppm_file(data_scrdump_filename) + # Write it in debug_dir + ppm_utils.image_write_to_ppm_file(expected_scrdump_filename, + ew, eh, edata) + # Write the cropped version as well + ppm_utils.get_region_md5sum(ew, eh, edata, x1, y1, dx, dy, + expected_cropped_scrdump_filename) + # Perform comparison + (w, h, data) = ppm_utils.image_read_from_ppm_file(scrdump_filename) + if w == ew and h == eh: + (w, h, data) = ppm_utils.image_comparison(w, h, data, edata) + ppm_utils.image_write_to_ppm_file(comparison_filename, w, h, + data) + # Print error messages and fail the test + long_message = message + "\n(see analysis at %s)" % debug_dir + logging.error(long_message) + raise error.TestFail, message + + +def run_steps(test, params, env): + vm = env.get_vm(params.get("main_vm")) + if not vm: + raise error.TestError("VM object not found in environment") + if not vm.is_alive(): + e_msg = "VM seems to be dead. Guestwizard requires a living VM" + raise error.TestError(e_msg) + + steps_filename = params.get("steps") + if not steps_filename: + raise error.TestError("Steps filename not specified") + steps_filename = utils_misc.get_path(test.virtdir, steps_filename) + if not os.path.exists(steps_filename): + raise error.TestError("Steps file not found: %s" % steps_filename) + + sf = open(steps_filename, "r") + lines = sf.readlines() + sf.close() + + vm.resume() + + current_step_num = 0 + current_screendump = None + skip_current_step = False + + # Iterate over the lines in the file + for line in lines: + line = line.strip() + if not line: + continue + logging.info(line) + + if line.startswith("#"): + continue + + words = line.split() + if words[0] == "step": + current_step_num += 1 + current_screendump = None + skip_current_step = False + elif words[0] == "screendump": + current_screendump = words[1] + elif skip_current_step: + continue + elif words[0] == "sleep": + timeout_multiplier = float(params.get("timeout_multiplier") or 1) + time.sleep(float(words[1]) * timeout_multiplier) + elif words[0] == "key": + vm.send_key(words[1]) + elif words[0] == "var": + if not handle_var(vm, params, words[1]): + logging.error("Variable not defined: %s", words[1]) + elif words[0] == "barrier_2": + if current_screendump: + scrdump_filename = os.path.join( + ppm_utils.get_data_dir(steps_filename), + current_screendump) + else: + scrdump_filename = None + if not barrier_2(vm, words, params, test.debugdir, + scrdump_filename, current_step_num): + skip_current_step = True + else: + vm.send_key(words[0]) diff --git a/kvm/tests/stop_continue.py b/kvm/tests/stop_continue.py new file mode 100644 index 00000000..c800444e --- /dev/null +++ b/kvm/tests/stop_continue.py @@ -0,0 +1,43 @@ +import logging +from autotest.client.shared import error + + +def run_stop_continue(test, params, env): + """ + Suspend a running Virtual Machine and verify its state. + + 1) Boot the vm + 2) Suspend the vm through stop command + 3) Verify the state through info status command + 4) Check is the ssh session to guest is still responsive, + if succeed, fail the test. + + @param test: Kvm test object + @param params: Dictionary with the test parameters + @param env: Dictionary with test environment. + """ + vm = env.get_vm(params["main_vm"]) + vm.verify_alive() + timeout = float(params.get("login_timeout", 240)) + session = vm.wait_for_login(timeout=timeout) + + try: + logging.info("Stop the VM") + vm.pause() + logging.info("Verifying the status of VM is 'paused'") + vm.verify_status("paused") + + logging.info("Check the session is responsive") + if session.is_responsive(): + raise error.TestFail("Session is still responsive after stop") + + logging.info("Try to resume the guest") + vm.resume() + logging.info("Verifying the status of VM is 'running'") + vm.verify_status("running") + + logging.info("Try to re-log into guest") + session = vm.wait_for_login(timeout=timeout) + + finally: + session.close() diff --git a/kvm/tests/system_reset_bootable.py b/kvm/tests/system_reset_bootable.py new file mode 100644 index 00000000..be5b2ffc --- /dev/null +++ b/kvm/tests/system_reset_bootable.py @@ -0,0 +1,31 @@ +import logging, time + + +def run_system_reset_bootable(test, params, env): + """ + KVM reset test: + 1) Boot guest. + 2) Reset system by monitor command for several times. + 3) Log into the guest to verify it could normally boot. + + @param test: kvm test object + @param params: Dictionary with the test parameters + @param env: Dictionary with test environment. + """ + vm = env.get_vm(params["main_vm"]) + vm.verify_alive() + timeout = float(params.get("login_timeout", 240)) + reset_times = int(params.get("reset_times",20)) + interval = int(params.get("reset_interval",10)) + wait_time = int(params.get("wait_time_for_reset",60)) + + logging.info("Wait for %d seconds before reset" % wait_time) + time.sleep(wait_time) + + for _ in range(reset_times): + logging.info("Reset the system by monitor cmd") + vm.monitor.cmd("system_reset") + time.sleep(interval) + + logging.info("Try to login guest after reset") + vm.wait_for_login(timeout=timeout) diff --git a/kvm/tests/time_manage.py b/kvm/tests/time_manage.py new file mode 100644 index 00000000..34dba01d --- /dev/null +++ b/kvm/tests/time_manage.py @@ -0,0 +1,127 @@ +import logging, time +from autotest.client.shared import error +from autotest.client.virt import utils_test, aexpect +from autotest.client.virt import env_process + +@error.context_aware +def run_time_manage(test, params, env): + """ + Time manage test: + + 1) Generate stress in host. + 2) Run atleast 15 vms with "driftfix=slew" option + 3) Reboot the guest. + 4) Repeat the step 3 for all guests and check whether the guest + responds properly(not any watchdog reported). + 5) TODO: Improve the way of checking the response and + run some stress inside guest too. + 6) Continue the step 4 for 10 iterations and + record the guest/host realtime, calculate drift in time for + each iterations. + 7) Print the drift values for all sessions + 8) TODO: Validate if the drift value has to be within defined value + + @param test: KVM test object. + @param params: Dictionary with test parameters. + @param env: Dictionary with the test environment. + """ + # Checking the main vm is alive + vm = env.get_vm(params["main_vm"]) + vm.verify_alive() + timeout = int(params.get("login_timeout", 360)) + session = vm.wait_for_login(timeout=timeout) + + # Collect test parameters + login_timeout = float(params.get("login_timeout", 240)) + host_load_command = params.get("host_load_command") + host_load_kill_command = params.get("host_load_kill_command") + time_command = params.get("time_command") + time_filter_re = params.get("time_filter_re") + time_format = params.get("time_format") + + # Intialize the variables + itr = 0 + num = 2 + host_load_sessions = [] + sessions = [session] + prev_time = [] + curr_time = [] + timedrift = [] + totaldrift = [] + vmnames =["vm1"] + + # Run some load on the host + logging.info("Starting load on host.") + host_load_sessions.append(aexpect.run_bg(host_load_command, + output_func=logging.debug, + output_prefix="host load ", + timeout=0.5)) + # Boot the VMs + try: + while num <= int(params.get("max_vms")): + # Clone vm according to the first one + vm_name = "vm%d" % num + vmnames.append(vm_name) + vm_params = vm.params.copy() + curr_vm = vm.clone(vm_name, vm_params) + env.register_vm(vm_name, curr_vm) + env_process.preprocess_vm(test, vm_params, env, vm_name) + params["vms"] += " " + vm_name + + sessions.append(curr_vm.wait_for_login(timeout=login_timeout)) + logging.info("Guest #%d booted up successfully", num) + + # Check whether all previous shell sessions are responsive + error.context("checking responsiveness of the booted guest") + for se in sessions: + se.cmd(params.get("alive_test_cmd")) + num += 1 + + while itr <= int(params.get("max_itrs")): + for vmid,se in enumerate(sessions): + # Get the respective vm object + vmname = "vm%d" % (vmid +1) + vm = env.get_vm(vmname) + # Run current iteration + logging.info("Rebooting:vm%d iteration %d " % ((vmid + 1), itr)) + se = vm.reboot(se ,timeout=timeout) + # Remember the current changed session + sessions[vmid] = se + error.context("checking responsiveness of guest") + se.cmd(params.get("alive_test_cmd")) + if itr == 0: + (ht0, gt0) = utils_test.get_time(se, time_command, + time_filter_re, time_format) + prev_time.append((ht0, gt0)) + else: + (ht1, gt1) = utils_test.get_time(se, time_command, + time_filter_re, time_format) + curr_time.append((ht1, gt1)) + if itr != 0: + for i in range(int(params.get("max_vms"))): + hdelta = curr_time[i][0] - prev_time[i][0] + gdelta = curr_time[i][1] - prev_time[i][1] + drift = format( 100.0 * (hdelta - gdelta) / hdelta, ".2f" ) + timedrift.append(drift) + totaldrift.append(timedrift) + prev_time = curr_time + timedrift = [] + curr_time = [] + # Wait for some time before next iteration + time.sleep(30) + itr += 1 + + logging.info("The time drift values for all VM sessions/iterations") + logging.info("VM-Name:%s" % vmnames) + for idx,value in enumerate(totaldrift): + logging.info("itr-%2d:%s" % (idx+1,value)) + + finally: + for se in sessions: + # Closing all the sessions. + se.close() + logging.info("killing load on host.") + host_load_sessions.append(aexpect.run_bg(host_load_kill_command, + output_func=logging.debug, + output_prefix="host load kill", + timeout=0.5)) diff --git a/kvm/tests/timedrift.py b/kvm/tests/timedrift.py new file mode 100644 index 00000000..0f3afbfb --- /dev/null +++ b/kvm/tests/timedrift.py @@ -0,0 +1,181 @@ +import logging, time, commands +from autotest.client.shared import error +from autotest.client.virt import utils_test, aexpect + + +def run_timedrift(test, params, env): + """ + Time drift test (mainly for Windows guests): + + 1) Log into a guest. + 2) Take a time reading from the guest and host. + 3) Run load on the guest and host. + 4) Take a second time reading. + 5) Stop the load and rest for a while. + 6) Take a third time reading. + 7) If the drift immediately after load is higher than a user- + specified value (in %), fail. + If the drift after the rest period is higher than a user-specified value, + fail. + + @param test: KVM test object. + @param params: Dictionary with test parameters. + @param env: Dictionary with the test environment. + """ + # Helper functions + def set_cpu_affinity(pid, mask): + """ + Set the CPU affinity of all threads of the process with PID pid. + Do this recursively for all child processes as well. + + @param pid: The process ID. + @param mask: The CPU affinity mask. + @return: A dict containing the previous mask for each thread. + """ + tids = commands.getoutput("ps -L --pid=%s -o lwp=" % pid).split() + prev_masks = {} + for tid in tids: + prev_mask = commands.getoutput("taskset -p %s" % tid).split()[-1] + prev_masks[tid] = prev_mask + commands.getoutput("taskset -p %s %s" % (mask, tid)) + children = commands.getoutput("ps --ppid=%s -o pid=" % pid).split() + for child in children: + prev_masks.update(set_cpu_affinity(child, mask)) + return prev_masks + + def restore_cpu_affinity(prev_masks): + """ + Restore the CPU affinity of several threads. + + @param prev_masks: A dict containing TIDs as keys and masks as values. + """ + for tid, mask in prev_masks.items(): + commands.getoutput("taskset -p %s %s" % (mask, tid)) + + vm = env.get_vm(params["main_vm"]) + vm.verify_alive() + timeout = int(params.get("login_timeout", 360)) + session = vm.wait_for_login(timeout=timeout) + + # Collect test parameters: + # Command to run to get the current time + time_command = params.get("time_command") + # Filter which should match a string to be passed to time.strptime() + time_filter_re = params.get("time_filter_re") + # Time format for time.strptime() + time_format = params.get("time_format") + guest_load_command = params.get("guest_load_command") + guest_load_stop_command = params.get("guest_load_stop_command") + host_load_command = params.get("host_load_command") + guest_load_instances = int(params.get("guest_load_instances", "1")) + host_load_instances = int(params.get("host_load_instances", "0")) + # CPU affinity mask for taskset + cpu_mask = params.get("cpu_mask", "0xFF") + load_duration = float(params.get("load_duration", "30")) + rest_duration = float(params.get("rest_duration", "10")) + drift_threshold = float(params.get("drift_threshold", "200")) + drift_threshold_after_rest = float(params.get("drift_threshold_after_rest", + "200")) + + guest_load_sessions = [] + host_load_sessions = [] + + try: + # Set the VM's CPU affinity + prev_affinity = set_cpu_affinity(vm.get_shell_pid(), cpu_mask) + + try: + # Open shell sessions with the guest + logging.info("Starting load on guest...") + for i in range(guest_load_instances): + load_session = vm.login() + # Set output func to None to stop it from being called so we + # can change the callback function and the parameters it takes + # with no problems + load_session.set_output_func(None) + load_session.set_output_params(()) + load_session.set_output_prefix("(guest load %d) " % i) + load_session.set_output_func(logging.debug) + guest_load_sessions.append(load_session) + + # Get time before load + # (ht stands for host time, gt stands for guest time) + (ht0, gt0) = utils_test.get_time(session, + time_command, + time_filter_re, + time_format) + + # Run some load on the guest + for load_session in guest_load_sessions: + load_session.sendline(guest_load_command) + + # Run some load on the host + logging.info("Starting load on host...") + for i in range(host_load_instances): + host_load_sessions.append( + aexpect.run_bg(host_load_command, + output_func=logging.debug, + output_prefix="(host load %d) " % i, + timeout=0.5)) + # Set the CPU affinity of the load process + pid = host_load_sessions[-1].get_pid() + set_cpu_affinity(pid, cpu_mask) + + # Sleep for a while (during load) + logging.info("Sleeping for %s seconds...", load_duration) + time.sleep(load_duration) + + # Get time delta after load + (ht1, gt1) = utils_test.get_time(session, + time_command, + time_filter_re, + time_format) + + # Report results + host_delta = ht1 - ht0 + guest_delta = gt1 - gt0 + drift = 100.0 * (host_delta - guest_delta) / host_delta + logging.info("Host duration: %.2f", host_delta) + logging.info("Guest duration: %.2f", guest_delta) + logging.info("Drift: %.2f%%", drift) + + finally: + logging.info("Cleaning up...") + # Restore the VM's CPU affinity + restore_cpu_affinity(prev_affinity) + # Stop the guest load + if guest_load_stop_command: + session.cmd_output(guest_load_stop_command) + # Close all load shell sessions + for load_session in guest_load_sessions: + load_session.close() + for load_session in host_load_sessions: + load_session.close() + + # Sleep again (rest) + logging.info("Sleeping for %s seconds...", rest_duration) + time.sleep(rest_duration) + + # Get time after rest + (ht2, gt2) = utils_test.get_time(session, + time_command, + time_filter_re, + time_format) + + finally: + session.close() + + # Report results + host_delta_total = ht2 - ht0 + guest_delta_total = gt2 - gt0 + drift_total = 100.0 * (host_delta_total - guest_delta_total) / host_delta + logging.info("Total host duration including rest: %.2f", host_delta_total) + logging.info("Total guest duration including rest: %.2f", guest_delta_total) + logging.info("Total drift after rest: %.2f%%", drift_total) + + # Fail the test if necessary + if abs(drift) > drift_threshold: + raise error.TestFail("Time drift too large: %.2f%%" % drift) + if abs(drift_total) > drift_threshold_after_rest: + raise error.TestFail("Time drift too large after rest period: %.2f%%" + % drift_total) diff --git a/kvm/tests/timedrift_with_migration.py b/kvm/tests/timedrift_with_migration.py new file mode 100644 index 00000000..8a06116f --- /dev/null +++ b/kvm/tests/timedrift_with_migration.py @@ -0,0 +1,96 @@ +import logging +from autotest.client.shared import error +from autotest.client.virt import utils_test + + +def run_timedrift_with_migration(test, params, env): + """ + Time drift test with migration: + + 1) Log into a guest. + 2) Take a time reading from the guest and host. + 3) Migrate the guest. + 4) Take a second time reading. + 5) If the drift (in seconds) is higher than a user specified value, fail. + + @param test: KVM test object. + @param params: Dictionary with test parameters. + @param env: Dictionary with the test environment. + """ + vm = env.get_vm(params["main_vm"]) + vm.verify_alive() + timeout = int(params.get("login_timeout", 360)) + session = vm.wait_for_login(timeout=timeout) + + # Collect test parameters: + # Command to run to get the current time + time_command = params.get("time_command") + # Filter which should match a string to be passed to time.strptime() + time_filter_re = params.get("time_filter_re") + # Time format for time.strptime() + time_format = params.get("time_format") + drift_threshold = float(params.get("drift_threshold", "10")) + drift_threshold_single = float(params.get("drift_threshold_single", "3")) + migration_iterations = int(params.get("migration_iterations", 1)) + + try: + # Get initial time + # (ht stands for host time, gt stands for guest time) + (ht0, gt0) = utils_test.get_time(session, time_command, + time_filter_re, time_format) + + # Migrate + for i in range(migration_iterations): + # Get time before current iteration + (ht0_, gt0_) = utils_test.get_time(session, time_command, + time_filter_re, time_format) + session.close() + # Run current iteration + logging.info("Migrating: iteration %d of %d...", + (i + 1), migration_iterations) + vm.migrate() + # Log in + logging.info("Logging in after migration...") + session = vm.wait_for_login(timeout=30) + logging.info("Logged in after migration") + # Get time after current iteration + (ht1_, gt1_) = utils_test.get_time(session, time_command, + time_filter_re, time_format) + # Report iteration results + host_delta = ht1_ - ht0_ + guest_delta = gt1_ - gt0_ + drift = abs(host_delta - guest_delta) + logging.info("Host duration (iteration %d): %.2f", + (i + 1), host_delta) + logging.info("Guest duration (iteration %d): %.2f", + (i + 1), guest_delta) + logging.info("Drift at iteration %d: %.2f seconds", + (i + 1), drift) + # Fail if necessary + if drift > drift_threshold_single: + raise error.TestFail("Time drift too large at iteration %d: " + "%.2f seconds" % (i + 1, drift)) + + # Get final time + (ht1, gt1) = utils_test.get_time(session, time_command, + time_filter_re, time_format) + + finally: + if session: + session.close() + + # Report results + host_delta = ht1 - ht0 + guest_delta = gt1 - gt0 + drift = abs(host_delta - guest_delta) + logging.info("Host duration (%d migrations): %.2f", + migration_iterations, host_delta) + logging.info("Guest duration (%d migrations): %.2f", + migration_iterations, guest_delta) + logging.info("Drift after %d migrations: %.2f seconds", + migration_iterations, drift) + + # Fail if necessary + if drift > drift_threshold: + raise error.TestFail("Time drift too large after %d migrations: " + "%.2f seconds" % (migration_iterations, drift)) diff --git a/kvm/tests/timedrift_with_reboot.py b/kvm/tests/timedrift_with_reboot.py new file mode 100644 index 00000000..fd3e7802 --- /dev/null +++ b/kvm/tests/timedrift_with_reboot.py @@ -0,0 +1,91 @@ +import logging +from autotest.client.shared import error +from autotest.client.virt import utils_test + + +def run_timedrift_with_reboot(test, params, env): + """ + Time drift test with reboot: + + 1) Log into a guest. + 2) Take a time reading from the guest and host. + 3) Reboot the guest. + 4) Take a second time reading. + 5) If the drift (in seconds) is higher than a user specified value, fail. + + @param test: KVM test object. + @param params: Dictionary with test parameters. + @param env: Dictionary with the test environment. + """ + vm = env.get_vm(params["main_vm"]) + vm.verify_alive() + timeout = int(params.get("login_timeout", 360)) + session = vm.wait_for_login(timeout=timeout) + + # Collect test parameters: + # Command to run to get the current time + time_command = params.get("time_command") + # Filter which should match a string to be passed to time.strptime() + time_filter_re = params.get("time_filter_re") + # Time format for time.strptime() + time_format = params.get("time_format") + drift_threshold = float(params.get("drift_threshold", "10")) + drift_threshold_single = float(params.get("drift_threshold_single", "3")) + reboot_iterations = int(params.get("reboot_iterations", 1)) + + try: + # Get initial time + # (ht stands for host time, gt stands for guest time) + (ht0, gt0) = utils_test.get_time(session, time_command, + time_filter_re, time_format) + + # Reboot + for i in range(reboot_iterations): + # Get time before current iteration + (ht0_, gt0_) = utils_test.get_time(session, time_command, + time_filter_re, time_format) + # Run current iteration + logging.info("Rebooting: iteration %d of %d...", + (i + 1), reboot_iterations) + session = vm.reboot(session) + # Get time after current iteration + (ht1_, gt1_) = utils_test.get_time(session, time_command, + time_filter_re, time_format) + # Report iteration results + host_delta = ht1_ - ht0_ + guest_delta = gt1_ - gt0_ + drift = abs(host_delta - guest_delta) + logging.info("Host duration (iteration %d): %.2f", + (i + 1), host_delta) + logging.info("Guest duration (iteration %d): %.2f", + (i + 1), guest_delta) + logging.info("Drift at iteration %d: %.2f seconds", + (i + 1), drift) + # Fail if necessary + if drift > drift_threshold_single: + raise error.TestFail("Time drift too large at iteration %d: " + "%.2f seconds" % (i + 1, drift)) + + # Get final time + (ht1, gt1) = utils_test.get_time(session, time_command, + time_filter_re, time_format) + + finally: + if session: + session.close() + + # Report results + host_delta = ht1 - ht0 + guest_delta = gt1 - gt0 + drift = abs(host_delta - guest_delta) + logging.info("Host duration (%d reboots): %.2f", + reboot_iterations, host_delta) + logging.info("Guest duration (%d reboots): %.2f", + reboot_iterations, guest_delta) + logging.info("Drift after %d reboots: %.2f seconds", + reboot_iterations, drift) + + # Fail if necessary + if drift > drift_threshold: + raise error.TestFail("Time drift too large after %d reboots: " + "%.2f seconds" % (reboot_iterations, drift)) diff --git a/kvm/tests/timedrift_with_stop.py b/kvm/tests/timedrift_with_stop.py new file mode 100644 index 00000000..605a0454 --- /dev/null +++ b/kvm/tests/timedrift_with_stop.py @@ -0,0 +1,103 @@ +import logging, time +from autotest.client.shared import error +from autotest.client.virt import utils_test + + +def run_timedrift_with_stop(test, params, env): + """ + Time drift test with stop/continue the guest: + + 1) Log into a guest. + 2) Take a time reading from the guest and host. + 3) Stop the running of the guest + 4) Sleep for a while + 5) Continue the guest running + 6) Take a second time reading. + 7) If the drift (in seconds) is higher than a user specified value, fail. + + @param test: KVM test object. + @param params: Dictionary with test parameters. + @param env: Dictionary with the test environment. + """ + login_timeout = int(params.get("login_timeout", 360)) + sleep_time = int(params.get("sleep_time", 30)) + vm = env.get_vm(params["main_vm"]) + vm.verify_alive() + session = vm.wait_for_login(timeout=login_timeout) + + # Collect test parameters: + # Command to run to get the current time + time_command = params.get("time_command") + # Filter which should match a string to be passed to time.strptime() + time_filter_re = params.get("time_filter_re") + # Time format for time.strptime() + time_format = params.get("time_format") + drift_threshold = float(params.get("drift_threshold", "10")) + drift_threshold_single = float(params.get("drift_threshold_single", "3")) + stop_iterations = int(params.get("stop_iterations", 1)) + stop_time = int(params.get("stop_time", 60)) + + try: + # Get initial time + # (ht stands for host time, gt stands for guest time) + (ht0, gt0) = utils_test.get_time(session, time_command, + time_filter_re, time_format) + + # Stop the guest + for i in range(stop_iterations): + # Get time before current iteration + (ht0_, gt0_) = utils_test.get_time(session, time_command, + time_filter_re, time_format) + # Run current iteration + logging.info("Stop %s second: iteration %d of %d...", + stop_time, (i + 1), stop_iterations) + + vm.pause() + time.sleep(stop_time) + vm.resume() + + # Sleep for a while to wait the interrupt to be reinjected + logging.info("Waiting for the interrupt to be reinjected ...") + time.sleep(sleep_time) + + # Get time after current iteration + (ht1_, gt1_) = utils_test.get_time(session, time_command, + time_filter_re, time_format) + # Report iteration results + host_delta = ht1_ - ht0_ + guest_delta = gt1_ - gt0_ + drift = abs(host_delta - guest_delta) + logging.info("Host duration (iteration %d): %.2f", + (i + 1), host_delta) + logging.info("Guest duration (iteration %d): %.2f", + (i + 1), guest_delta) + logging.info("Drift at iteration %d: %.2f seconds", + (i + 1), drift) + # Fail if necessary + if drift > drift_threshold_single: + raise error.TestFail("Time drift too large at iteration %d: " + "%.2f seconds" % (i + 1, drift)) + + # Get final time + (ht1, gt1) = utils_test.get_time(session, time_command, + time_filter_re, time_format) + + finally: + if session: + session.close() + + # Report results + host_delta = ht1 - ht0 + guest_delta = gt1 - gt0 + drift = abs(host_delta - guest_delta) + logging.info("Host duration (%d stops): %.2f", + stop_iterations, host_delta) + logging.info("Guest duration (%d stops): %.2f", + stop_iterations, guest_delta) + logging.info("Drift after %d stops: %.2f seconds", + stop_iterations, drift) + + # Fail if necessary + if drift > drift_threshold: + raise error.TestFail("Time drift too large after %d stops: " + "%.2f seconds" % (stop_iterations, drift)) diff --git a/kvm/tests/unittest.py b/kvm/tests/unittest.py new file mode 100644 index 00000000..8fb73b9b --- /dev/null +++ b/kvm/tests/unittest.py @@ -0,0 +1,129 @@ +import logging, os, shutil, glob, ConfigParser +from autotest.client.shared import error +from autotest.client.virt import utils_misc, env_process + + +def run_unittest(test, params, env): + """ + KVM RHEL-6 style unit test: + 1) Resume a stopped VM + 2) Wait for VM to terminate + 3) If qemu exited with code = 0, the unittest passed. Otherwise, it failed + 4) Collect all logs generated + + @param test: kvm test object + @param params: Dictionary with the test parameters + @param env: Dictionary with test environment + """ + unittest_dir = os.path.join(test.bindir, 'unittests') + if not os.path.isdir(unittest_dir): + raise error.TestError("No unittest dir %s available (did you run the " + "build test first?)" % unittest_dir) + os.chdir(unittest_dir) + unittest_list = glob.glob('*.flat') + if not unittest_list: + raise error.TestError("No unittest files available (did you run the " + "build test first?)") + logging.debug('Flat file list: %s', unittest_list) + + unittest_cfg = os.path.join(unittest_dir, 'unittests.cfg') + parser = ConfigParser.ConfigParser() + parser.read(unittest_cfg) + test_list = parser.sections() + + if not test_list: + raise error.TestError("No tests listed on config file %s" % + unittest_cfg) + logging.debug('Unit test list: %s', test_list) + + if params.get('unittest_test_list'): + test_list = params.get('unittest_test_list').split() + logging.info('Original test list overriden by user') + logging.info('User defined unit test list: %s', test_list) + + black_list = params.get('unittest_test_blacklist', '').split() + if black_list: + for b in black_list: + if b in test_list: + test_list.remove(b) + logging.info('Tests blacklisted by user: %s', black_list) + logging.info('Test list after blacklist: %s', test_list) + + nfail = 0 + tests_failed = [] + + timeout = int(params.get('unittest_timeout', 600)) + + extra_params_original = params['extra_params'] + + for t in test_list: + logging.info('Running %s', t) + + flat_file = None + if parser.has_option(t, 'file'): + flat_file = parser.get(t, 'file') + + if flat_file is None: + nfail += 1 + tests_failed.append(t) + logging.error('Unittest config file %s has section %s but no ' + 'mandatory option file', unittest_cfg, t) + continue + + if flat_file not in unittest_list: + nfail += 1 + tests_failed.append(t) + logging.error('Unittest file %s referenced in config file %s but ' + 'was not find under the unittest dir', flat_file, + unittest_cfg) + continue + + smp = None + if parser.has_option(t, 'smp'): + smp = int(parser.get(t, 'smp')) + params['smp'] = smp + + extra_params = None + if parser.has_option(t, 'extra_params'): + extra_params = parser.get(t, 'extra_params') + params['extra_params'] += ' %s' % extra_params + + vm_name = params.get("main_vm") + params['kernel'] = os.path.join(unittest_dir, flat_file) + testlog_path = os.path.join(test.debugdir, "%s.log" % t) + + try: + try: + vm_name = params.get('main_vm') + env_process.preprocess_vm(test, params, env, vm_name) + vm = env.get_vm(vm_name) + vm.create() + vm.resume() + logging.info("Waiting for unittest %s to complete, timeout %s, " + "output in %s", t, timeout, + vm.get_testlog_filename()) + if not utils_misc.wait_for(vm.is_dead, timeout): + raise error.TestFail("Timeout elapsed (%ss)" % timeout) + # Check qemu's exit status + status = vm.process.get_status() + if status != 0: + nfail += 1 + tests_failed.append(t) + logging.error("Unit test %s failed", t) + except Exception, e: + nfail += 1 + tests_failed.append(t) + logging.error('Exception happened during %s: %s', t, str(e)) + finally: + try: + shutil.copy(vm.get_testlog_filename(), testlog_path) + logging.info("Unit test log collected and available under %s", + testlog_path) + except (NameError, IOError): + logging.error("Not possible to collect logs") + + # Restore the extra params so other tests can run normally + params['extra_params'] = extra_params_original + + if nfail != 0: + raise error.TestFail("Unit tests failed: %s" % " ".join(tests_failed)) diff --git a/kvm/tests/unittest_kvmctl.py b/kvm/tests/unittest_kvmctl.py new file mode 100644 index 00000000..f9852a5f --- /dev/null +++ b/kvm/tests/unittest_kvmctl.py @@ -0,0 +1,30 @@ +import os +from autotest.client import utils +from autotest.client.shared import error + + +def run_unittest_kvmctl(test, params, env): + """ + This is kvm userspace unit test, use kvm test harness kvmctl load binary + test case file to test various functions of the kvm kernel module. + The output of all unit tests can be found in the test result dir. + + @param test: KVM test object. + @param params: Dictionary with the test parameters. + @param env: Dictionary with test environment. + """ + case = params.get("case") + srcdir = params.get("srcdir", test.srcdir) + unit_dir = os.path.join(srcdir, "kvm_userspace", "kvm", "user") + if not os.path.isdir(unit_dir): + os.makedirs(unit_dir) + os.chdir(unit_dir) + + cmd = "./kvmctl test/x86/bootstrap test/x86/%s.flat" % case + try: + results = utils.system_output(cmd) + except error.CmdError: + raise error.TestFail("Unit test %s failed" % case) + + result_file = os.path.join(test.resultsdir, case) + utils.open_write_close(result_file, results) diff --git a/kvm/tests/usb.py b/kvm/tests/usb.py new file mode 100644 index 00000000..a4f441d0 --- /dev/null +++ b/kvm/tests/usb.py @@ -0,0 +1,291 @@ +import logging, re, uuid +from autotest.client.shared import error + + +@error.context_aware +def run_usb(test, params, env): + """ + Test usb device of guest + + 1) Create a image file by qemu-img + 2) Boot up a guest add this image as a usb device + 3) Check usb device information via monitor + 4) Check usb information by executing guest command + 5) Check usb serial option (optional) + 6) Check usb removable option (optional) + 7) Check usb min_io_size/opt_io_size option (optional) + + @param test: kvm test object + @param params: Dictionary with the test parameters + @param env: Dictionary with test environment. + """ + @error.context_aware + def _verify_string(regex_str, string, expect_result, search_opt=0): + """ + Verify USB storage device in monitor + + @param regex_str: Regex for checking command output + @param string: The string which will be checked + @param expect_result: The expected string + @param search_opt: Search option for re module. + """ + def _compare_str(act, exp, ignore_case): + str_func = lambda x: x + if ignore_case: + str_func = lambda x: x.lower() + if str_func(act) != str_func(exp): + return ("Expected: '%s', Actual: '%s'" % + (str_func(exp), str_func(act))) + return "" + + ignore_case = False + if search_opt & re.I == re.I: + ignore_case = True + + error.context("Finding matched sub-string with regex pattern %s" % + regex_str) + m = re.findall(regex_str, string, search_opt) + if not m: + logging.debug(string) + raise error.TestError("Could not find matched sub-string") + + error.context("Verify matched string is same as expected") + actual_result = m[0] + fail_log = [] + if isinstance(actual_result, tuple): + for i, v in enumerate(expect_result): + ret = _compare_str(actual_result[i], v, ignore_case) + if ret: + fail_log.append(ret) + else: + ret = _compare_str(actual_result, expect_result[0], ignore_case) + if ret: + fail_log.append(ret) + + if fail_log: + logging.debug(string) + raise error.TestFail("Could not find expected string:\n %s" % + ("\n".join(fail_log))) + + + @error.context_aware + def _do_io_test_guest(session): + blksizes = [ "4K", "16K", "64K", "256K" ] + + output = session.cmd("fdisk -l") + if params.get("fdisk_string") not in output: + for line in output.splitlines(): + logging.debug(line) + raise error.TestFail("Could not detect the usb device on" + "fdisk output") + + error.context("Formatting USB disk") + devname = session.cmd("ls /dev/disk/by-path/* | grep usb").strip() + session.cmd("yes | mkfs %s" % devname, + timeout=int(params.get("format_timeout"))) + + error.context("Mounting USB disk") + session.cmd("mount %s /mnt" % devname) + + error.context("Creating comparison file") + c_file = '/tmp/usbfile' + session.cmd("dd if=/dev/urandom of=%s bs=1M count=1" % c_file) + + error.context("Copying %s to USB disk" % c_file) + for s in blksizes: + u_file = "/mnt/usbfile-%s" % s + session.cmd("dd if=%s of=%s bs=%s" % + (c_file, u_file, s)) + + error.context("Unmounting USB disk before file comparison") + session.cmd("umount %s" % devname) + + error.context("Mounting USB disk for file comparison") + session.cmd("mount %s /mnt" % devname) + + error.context("Determining md5sum for file on root fs and in USB disk") + md5_root = session.cmd("md5sum %s" % c_file).strip() + md5_root = md5_root.split()[0] + for s in blksizes: + u_file = "/mnt/usbfile-%s" % s + md5_usb = session.cmd("md5sum %s" % u_file).strip() + md5_usb = md5_usb.split()[0] + + if md5_root != md5_usb: + raise error.TestError("MD5 mismatch between file on root fs " + "and on USB disk [%s]" % u_file) + + error.context("Unmounting USB disk after file comparison") + session.cmd("umount %s" % devname) + + error.context("Checking if there are I/O error messages in dmesg") + output = session.get_command_output("dmesg -c") + io_error_msg = [] + for line in output.splitlines(): + if "Buffer I/O error" in line: + io_error_msg.append(line) + if re.search("reset \w+ speed USB device", line): + io_error_msg.append(line) + + if io_error_msg: + e_msg = "IO error found on guest's dmesg when formatting USB device" + logging.error(e_msg) + for line in io_error_msg: + logging.error(line) + raise error.TestFail(e_msg) + + + @error.context_aware + def _restart_vm(options): + if vm.is_alive(): + vm.destroy() + + new_params = params.copy() + for option, value in options.iteritems(): + new_params[option] = value + error.context("Restarting VM") + vm.create(params=new_params) + vm.verify_alive() + + + def _login(): + return vm.wait_for_login(timeout=int(params.get("login_timeout", 360))) + + + @error.context_aware + def _check_serial_option(serial, regex_str, expect_str): + error.context("Set serial option to '%s'" % serial, logging.info) + _restart_vm({"drive_serial_stg": serial}) + + error.context("Check serial option in monitor", logging.info) + output = str(vm.monitor.info("qtree")) + _verify_string(regex_str, output, [expect_str], re.S) + + error.context("Check serial option in guest", logging.info) + session = _login() + output = session.cmd("lsusb -v") + if not ("EMPTY_STRING" in serial or "NO_EQUAL_STRING" in serial): + # Verify in guest when serial is set to empty/null is meaningless. + _verify_string(serial, output, [serial]) + _do_io_test_guest(session) + + session.close() + + + @error.context_aware + def _check_removable_option(removable, expect_str): + error.context("Set removable option to '%s'" % removable, logging.info) + _restart_vm({"removable_stg": removable}) + + error.context("Check removable option in monitor", logging.info) + output = str(vm.monitor.info("qtree")) + regex_str = 'usb-storage.*?removable = (.*?)\n' + _verify_string(regex_str, output, [removable], re.S) + + error.context("Check removable option in guest", logging.info) + session = _login() + output = session.cmd("ls -l /dev/disk/by-path/* | grep usb").strip() + devname = re.findall("sd\w", output) + if devname: + d = devname[0] + else: + d = "sda" + cmd = "dmesg | grep %s" % d + output = session.cmd(cmd) + _verify_string(expect_str, output, [expect_str], re.I) + _do_io_test_guest(session) + + session.close() + + + @error.context_aware + def _check_io_size_option(min_io_size="512", opt_io_size="0"): + error.context("Set min_io_size to %s, opt_io_size to %s" % + (min_io_size, opt_io_size), logging.info) + opt = {} + opt["min_io_size_stg"] = min_io_size + opt["opt_io_size_stg"] = opt_io_size + + _restart_vm(opt) + + error.context("Check min/opt io_size option in monitor", logging.info) + output = str(vm.monitor.info("qtree")) + regex_str = "usb-storage.*?min_io_size = (\d+).*?opt_io_size = (\d+)" + _verify_string(regex_str, output, [min_io_size, opt_io_size], re.S) + + error.context("Check min/opt io_size option in guest", logging.info) + session = _login() + output = session.cmd("ls -l /dev/disk/by-path/* | grep usb").strip() + devname = re.findall("sd\w", output) + if devname: + d = devname[0] + else: + d = 'sda' + cmd = ("cat /sys/block/%s/queue/{minimum,optimal}_io_size" % d) + + output = session.cmd(cmd) + # Note: If set min_io_size = 0, guest min_io_size would be set to + # 512 by default. + if min_io_size != "0": + expected_min_size = min_io_size + else: + expected_min_size = "512" + _verify_string("(\d+)\n(\d+)", output, [expected_min_size, opt_io_size]) + _do_io_test_guest(session) + + session.close() + + + vm = env.get_vm(params["main_vm"]) + vm.verify_alive() + + error.context("Check usb device information in monitor", logging.info) + output = str(vm.monitor.info("usb")) + if "Product QEMU USB MSD" not in output: + logging.debug(output) + raise error.TestFail("Could not find mass storage device") + + error.context("Check usb device information in guest", logging.info) + session = _login() + output = session.cmd("lsusb -v") + # No bus specified, default using "usb.0" for "usb-storage" + for i in ["Mass Storage", "SCSI", "QEMU USB HARDDRIVE"]: + _verify_string(i, output, [i]) + _do_io_test_guest(session) + session.close() + + if params.get("check_serial_option") == "yes": + error.context("Check usb serial option", logging.info) + serial = str(uuid.uuid4()) + regex_str = 'usb-storage.*?serial = "(.*?)"\n' + _check_serial_option(serial, regex_str, serial) + + logging.info("Check this option with some illegal string") + logging.info("Set usb serial to a empty string") + # An empty string, "" + serial = "EMPTY_STRING" + regex_str = 'usb-storage.*?serial = (.*?)\n' + _check_serial_option(serial, regex_str, '""') + + logging.info("Leave usb serial option blank") + serial = "NO_EQUAL_STRING" + regex_str = 'usb-storage.*?serial = (.*?)\n' + _check_serial_option(serial, regex_str, '"on"') + + if params.get("check_removable_option") == "yes": + error.context("Check usb removable option", logging.info) + removable = "on" + expect_str = "Attached SCSI removable disk" + _check_removable_option(removable, expect_str) + + removable = "off" + expect_str = "Attached SCSI disk" + _check_removable_option(removable, expect_str) + + if params.get("check_io_size_option") == "yes": + error.context("Check usb min/opt io_size option", logging.info) + _check_io_size_option("0", "0") + # Guest can't recognize correct value which we set now, + # So comment these test temporary. + #_check_io_size_option("1024", "1024") + #_check_io_size_option("4096", "4096") diff --git a/kvm/tests/virtio_console.py b/kvm/tests/virtio_console.py new file mode 100644 index 00000000..83f9d4f4 --- /dev/null +++ b/kvm/tests/virtio_console.py @@ -0,0 +1,1313 @@ +# TODO: Why VM recreation doesn't work? +""" +Collection of virtio_console and virtio_serialport tests. + +@copyright: 2010-2012 Red Hat Inc. +""" +from collections import deque +import array +import logging +import os +import random +import select +import socket +import threading +import time +from autotest.client import utils +from autotest.client.shared import error +from autotest.client.virt import kvm_virtio_port, env_process +from autotest.client.virt import utils_test + + +@error.context_aware +def run_virtio_console(test, params, env): + """ + KVM virtio_console test + + This test contain multiple tests. The name of the executed test is set + by 'virtio_console_test' cfg variable. Main function with the set name + with prefix 'test_' thus it's easy to find out which functions are + tests and which are helpers. + + Every test has it's own cfg parameters, please see the actual test's + docstring for details. + + @param test: kvm test object + @param params: Dictionary with the test parameters + @param env: Dictionary with test environment + @raise error.TestNAError: if function with test_$testname is not present + """ + ###################################################################### + # General helpers + ###################################################################### + def get_vm_with_ports(no_consoles=0, no_serialports=0, spread=None, + quiet=False, strict=False): + """ + Checks whether existing 'main_vm' fits the requirements, modifies + it if needed and returns the VM object. + @param no_console: Number of desired virtconsoles. + @param no_serialport: Number of desired virtserialports. + @param spread: Spread consoles across multiple virtio-serial-pcis. + @param quiet: Notify user about VM recreation. + @param strict: Whether no_consoles have to match or just exceed. + @return: vm object matching the requirements. + """ + # check the number of running VM's consoles + vm = env.get_vm(params.get("main_vm")) + + if not vm: + _no_serialports = -1 + _no_consoles = -1 + else: + _no_serialports = 0 + _no_consoles = 0 + for port in vm.virtio_ports: + if isinstance(port, kvm_virtio_port.VirtioSerial): + _no_serialports += 1 + else: + _no_consoles += 1 + _spread = int(params.get('virtio_port_spread', 2)) + if spread is None: + spread = _spread + if strict: + if (_no_serialports != no_serialports or + _no_consoles != no_consoles): + _no_serialports = -1 + _no_consoles = -1 + # If not enough ports, modify params and recreate VM + if (_no_serialports < no_serialports or _no_consoles < no_consoles + or spread != _spread): + if not quiet: + out = "tests reqirements are different from cfg: " + if _no_serialports < no_serialports: + out += "serial_ports(%d), " % no_serialports + if _no_consoles < no_consoles: + out += "consoles(%d), " % no_consoles + if spread != _spread: + out += "spread(%s), " % spread + logging.warning(out[:-2] + ". Modify config to speedup tests.") + + params['virtio_ports'] = "" + if spread: + params['virtio_port_spread'] = spread + else: + params['virtio_port_spread'] = 0 + + for i in xrange(max(no_consoles, _no_consoles)): + name = "console-%d" % i + params['virtio_ports'] += " %s" % name + params['virtio_port_type_%s' % name] = "console" + + for i in xrange(max(no_serialports, _no_serialports)): + name = "serialport-%d" % i + params['virtio_ports'] += " %s" % name + params['virtio_port_type_%s' % name] = "serialport" + + if quiet: + logging.debug("Recreating VM with more virtio ports.") + else: + logging.warning("Recreating VM with more virtio ports.") + env_process.preprocess_vm(test, params, env, + params.get("main_vm")) + vm = env.get_vm(params.get("main_vm")) + + vm.verify_kernel_crash() + return vm + + def get_vm_with_worker(no_consoles=0, no_serialports=0, spread=None, + quiet=False): + """ + Checks whether existing 'main_vm' fits the requirements, modifies + it if needed and returns the VM object and guest_worker. + @param no_console: Number of desired virtconsoles. + @param no_serialport: Number of desired virtserialports. + @param spread: Spread consoles across multiple virtio-serial-pcis. + @param quiet: Notify user about VM recreation. + @param strict: Whether no_consoles have to match or just exceed. + @return: tuple (vm object matching the requirements, + initialized GuestWorker of the vm) + """ + vm = get_vm_with_ports(no_consoles, no_serialports, spread, quiet) + guest_worker = kvm_virtio_port.GuestWorker(vm) + return vm, guest_worker + + def get_vm_with_single_port(port_type='serialport'): + """ + Wrapper which returns vm, guest_worker and virtio_ports with at lest + one port of the type specified by fction parameter. + @param port_type: type of the desired virtio port. + @return: tuple (vm object with at least 1 port of the port_type, + initialized GuestWorker of the vm, + list of virtio_ports of the port_type type) + """ + if port_type == 'serialport': + vm, guest_worker = get_vm_with_worker(no_serialports=1) + virtio_ports = get_virtio_ports(vm)[1][0] + else: + vm, guest_worker = get_vm_with_worker(no_consoles=1) + virtio_ports = get_virtio_ports(vm)[0][0] + return vm, guest_worker, virtio_ports + + def get_virtio_ports(vm): + """ + Returns separated virtconsoles and virtserialports + @param vm: VM object + @return: tuple (all virtconsoles, all virtserialports) + """ + consoles = [] + serialports = [] + for port in vm.virtio_ports: + if isinstance(port, kvm_virtio_port.VirtioSerial): + serialports.append(port) + else: + consoles.append(port) + return (consoles, serialports) + + @error.context_aware + def cleanup(vm=None, guest_worker=None): + """ + Cleanup function. + @param vm: VM whose ports should be cleaned + @param guest_worker: guest_worker which should be cleaned/exited + """ + error.context("Cleaning virtio_ports.", logging.debug) + logging.debug("Cleaning virtio_ports") + if guest_worker: + guest_worker.cleanup() + if vm: + for port in vm.virtio_ports: + port.clean_port() + port.close() + port.mark_as_clean() + + ###################################################################### + # Smoke tests + ###################################################################### + def test_open(): + """ + Try to open virtioconsole port. + @param cfg: virtio_console_params - which type of virtio port to test + @param cfg: virtio_port_spread - how many devices per virt pci (0=all) + """ + (vm, guest_worker, port) = get_vm_with_single_port( + params.get('virtio_console_params')) + guest_worker.cmd("virt.open('%s')" % (port.name)) + port.open() + cleanup(vm, guest_worker) + + def test_check_zero_sym(): + """ + Check if port /dev/vport0p0 was created. + @param cfg: virtio_console_params - which type of virtio port to test + @param cfg: virtio_port_spread - how many devices per virt pci (0=all) + """ + if params.get('virtio_console_params') == 'serialport': + vm, guest_worker = get_vm_with_worker(no_serialports=1) + else: + vm, guest_worker = get_vm_with_worker(no_consoles=1) + guest_worker.cmd("virt.check_zero_sym()", 10) + cleanup(vm, guest_worker) + + def test_multi_open(): + """ + Try to open the same port twice. + @note: It should pass with virtconsole and fail with virtserialport + @param cfg: virtio_console_params - which type of virtio port to test + @param cfg: virtio_port_spread - how many devices per virt pci (0=all) + """ + (vm, guest_worker, port) = get_vm_with_single_port( + params.get('virtio_console_params')) + guest_worker.cmd("virt.close('%s')" % (port.name), 10) + guest_worker.cmd("virt.open('%s')" % (port.name), 10) + (match, data) = guest_worker._cmd("virt.open('%s')" % (port.name), 10) + # Console is permitted to open the device multiple times + if port.is_console == "yes": # is console? + if match != 0: # Multiple open didn't pass + raise error.TestFail("Unexpected fail of opening the console" + " device for the 2nd time.\n%s" % data) + else: + if match != 1: # Multiple open didn't fail: + raise error.TestFail("Unexpended pass of opening the" + " serialport device for the 2nd time.") + elif not "[Errno 24]" in data: + raise error.TestFail("Multiple opening fail but with another" + " exception %s" % data) + port.open() + cleanup(vm, guest_worker) + + def test_close(): + """ + Close the socket on the guest side + @param cfg: virtio_console_params - which type of virtio port to test + @param cfg: virtio_port_spread - how many devices per virt pci (0=all) + """ + (vm, guest_worker, port) = get_vm_with_single_port( + params.get('virtio_console_params')) + guest_worker.cmd("virt.close('%s')" % (port.name), 10) + port.close() + cleanup(vm, guest_worker) + + def test_polling(): + """ + Test correct results of poll with different cases. + @param cfg: virtio_console_params - which type of virtio port to test + @param cfg: virtio_port_spread - how many devices per virt pci (0=all) + """ + (vm, guest_worker, port) = get_vm_with_single_port( + params.get('virtio_console_params')) + # Poll (OUT) + port.open() + guest_worker.cmd("virt.poll('%s', %s)" % (port.name, select.POLLOUT), + 2) + + # Poll (IN, OUT) + port.sock.sendall("test") + for test in [select.POLLIN, select.POLLOUT]: + guest_worker.cmd("virt.poll('%s', %s)" % (port.name, test), 10) + + # Poll (IN HUP) + # I store the socket informations and close the socket + port.close() + for test in [select.POLLIN, select.POLLHUP]: + guest_worker.cmd("virt.poll('%s', %s)" % (port.name, test), 10) + + # Poll (HUP) + guest_worker.cmd("virt.recv('%s', 4, 1024, False)" % (port.name), 10) + guest_worker.cmd("virt.poll('%s', %s)" % (port.name, select.POLLHUP), + 2) + + # Reconnect the socket + port.open() + # Redefine socket in consoles + guest_worker.cmd("virt.poll('%s', %s)" % (port.name, select.POLLOUT), + 2) + cleanup(vm, guest_worker) + + def test_sigio(): + """ + Test whether port use generates sigio signals correctly. + @param cfg: virtio_console_params - which type of virtio port to test + @param cfg: virtio_port_spread - how many devices per virt pci (0=all) + """ + (vm, guest_worker, port) = get_vm_with_single_port( + params.get('virtio_console_params')) + if port.is_open(): + port.close() + + # Enable sigio on specific port + guest_worker.cmd("virt.async('%s', True, 0)" % (port.name), 10) + guest_worker.cmd("virt.get_sigio_poll_return('%s')" % (port.name), 10) + + # Test sigio when port open + guest_worker.cmd("virt.set_pool_want_return('%s', select.POLLOUT)" % + (port.name), 10) + port.open() + match = guest_worker._cmd("virt.get_sigio_poll_return('%s')" % + (port.name), 10)[0] + if match == 1: + raise error.TestFail("Problem with HUP on console port.") + + # Test sigio when port receive data + guest_worker.cmd("virt.set_pool_want_return('%s', select.POLLOUT |" + " select.POLLIN)" % (port.name), 10) + port.sock.sendall("0123456789") + guest_worker.cmd("virt.get_sigio_poll_return('%s')" % (port.name), 10) + + # Test sigio port close event + guest_worker.cmd("virt.set_pool_want_return('%s', select.POLLHUP |" + " select.POLLIN)" % (port.name), 10) + port.close() + guest_worker.cmd("virt.get_sigio_poll_return('%s')" % (port.name), 10) + + # Test sigio port open event and persistence of written data on port. + guest_worker.cmd("virt.set_pool_want_return('%s', select.POLLOUT |" + " select.POLLIN)" % (port.name), 10) + port.open() + guest_worker.cmd("virt.get_sigio_poll_return('%s')" % (port.name), 10) + + # Test event when erase data. + guest_worker.cmd("virt.clean_port('%s')" % (port.name), 10) + port.close() + guest_worker.cmd("virt.set_pool_want_return('%s', select.POLLOUT)" + % (port.name), 10) + port.open() + guest_worker.cmd("virt.get_sigio_poll_return('%s')" % (port.name), 10) + + # Disable sigio on specific port + guest_worker.cmd("virt.async('%s', False, 0)" % (port.name), 10) + cleanup(vm, guest_worker) + + def test_lseek(): + """ + Tests the correct handling of lseek + @note: lseek should fail + @param cfg: virtio_console_params - which type of virtio port to test + @param cfg: virtio_port_spread - how many devices per virt pci (0=all) + """ + # The virt.lseek returns PASS when the seek fails + (vm, guest_worker, port) = get_vm_with_single_port( + params.get('virtio_console_params')) + guest_worker.cmd("virt.lseek('%s', 0, 0)" % (port.name), 10) + cleanup(vm, guest_worker) + + def test_rw_host_offline(): + """ + Try to read from/write to host on guest when host is disconnected. + @param cfg: virtio_console_params - which type of virtio port to test + @param cfg: virtio_port_spread - how many devices per virt pci (0=all) + """ + (vm, guest_worker, port) = get_vm_with_single_port( + params.get('virtio_console_params')) + if port.is_open(): + port.close() + + guest_worker.cmd("virt.recv('%s', 0, 1024, False)" % port.name, 10) + match, tmp = guest_worker._cmd("virt.send('%s', 10, True)" % port.name, + 10) + if match is not None: + raise error.TestFail("Write on guest while host disconnected " + "didn't time out.\nOutput:\n%s" + % tmp) + + port.open() + + if (port.sock.recv(1024) < 10): + raise error.TestFail("Didn't received data from guest") + # Now the cmd("virt.send('%s'... command should be finished + guest_worker.cmd("print('PASS: nothing')", 10) + cleanup(vm, guest_worker) + + def test_rw_host_offline_big_data(): + """ + Try to read from/write to host on guest when host is disconnected + @param cfg: virtio_console_params - which type of virtio port to test + @param cfg: virtio_port_spread - how many devices per virt pci (0=all) + """ + (vm, guest_worker, port) = get_vm_with_single_port( + params.get('virtio_console_params')) + if port.is_open(): + port.close() + + port.clean_port() + port.close() + guest_worker.cmd("virt.clean_port('%s'),1024" % port.name, 10) + match, tmp = guest_worker._cmd("virt.send('%s', (1024**3)*3, True, " + "is_static=True)" % port.name, 30) + if match is None: + raise error.TestFail("Write on guest while host disconnected " + "didn't time out.\nOutput:\n%s" + % tmp) + + time.sleep(20) + + port.open() + + rlen = 0 + while rlen < (1024 ** 3 * 3): + ret = select.select([port.sock], [], [], 10.0) + if (ret[0] != []): + rlen += len(port.sock.recv(((4096)))) + elif rlen != (1024 ** 3 * 3): + raise error.TestFail("Not all data was received," + "only %d from %d" % (rlen, 1024 ** 3 * 3)) + guest_worker.cmd("print('PASS: nothing')", 10) + cleanup(vm, guest_worker) + + def test_rw_blocking_mode(): + """ + Try to read/write data in blocking mode. + @param cfg: virtio_console_params - which type of virtio port to test + @param cfg: virtio_port_spread - how many devices per virt pci (0=all) + """ + # Blocking mode + (vm, guest_worker, port) = get_vm_with_single_port( + params.get('virtio_console_params')) + port.open() + guest_worker.cmd("virt.blocking('%s', True)" % port.name, 10) + # Recv should timed out + match, tmp = guest_worker._cmd("virt.recv('%s', 10, 1024, False)" % + port.name, 10) + if match == 0: + raise error.TestFail("Received data even when none was sent\n" + "Data:\n%s" % tmp) + elif match is not None: + raise error.TestFail("Unexpected fail\nMatch: %s\nData:\n%s" % + (match, tmp)) + port.sock.sendall("1234567890") + # Now guest received the data end escaped from the recv() + guest_worker.cmd("print('PASS: nothing')", 10) + cleanup(vm, guest_worker) + + def test_rw_nonblocking_mode(): + """ + Try to read/write data in non-blocking mode. + @param cfg: virtio_console_params - which type of virtio port to test + @param cfg: virtio_port_spread - how many devices per virt pci (0=all) + """ + # Non-blocking mode + (vm, guest_worker, port) = get_vm_with_single_port( + params.get('virtio_console_params')) + port.open() + guest_worker.cmd("virt.blocking('%s', False)" % port.name, 10) + # Recv should return FAIL with 0 received data + match, tmp = guest_worker._cmd("virt.recv('%s', 10, 1024, False)" % + port.name, 10) + if match == 0: + raise error.TestFail("Received data even when none was sent\n" + "Data:\n%s" % tmp) + elif match is None: + raise error.TestFail("Timed out, probably in blocking mode\n" + "Data:\n%s" % tmp) + elif match != 1: + raise error.TestFail("Unexpected fail\nMatch: %s\nData:\n%s" % + (match, tmp)) + port.sock.sendall("1234567890") + guest_worker.cmd("virt.recv('%s', 10, 1024, False)" % port.name, 10) + cleanup(vm, guest_worker) + + def test_basic_loopback(): + """ + Simple loop back test with loop over two ports. + @param cfg: virtio_console_params - which type of virtio port to test + @param cfg: virtio_port_spread - how many devices per virt pci (0=all) + """ + if params.get('virtio_console_params') == 'serialport': + vm, guest_worker = get_vm_with_worker(no_serialports=2) + send_port, recv_port = get_virtio_ports(vm)[1][:2] + else: + vm, guest_worker = get_vm_with_worker(no_consoles=2) + send_port, recv_port = get_virtio_ports(vm)[0][:2] + + data = "Smoke test data" + send_port.open() + recv_port.open() + # Set nonblocking mode + send_port.sock.setblocking(0) + recv_port.sock.setblocking(0) + guest_worker.cmd("virt.loopback(['%s'], ['%s'], 1024, virt.LOOP_NONE)" + % (send_port.name, recv_port.name), 10) + send_port.sock.sendall(data) + tmp = "" + i = 0 + while i <= 10: + i += 1 + ret = select.select([recv_port.sock], [], [], 1.0) + if ret: + try: + tmp += recv_port.sock.recv(1024) + except IOError, failure_detail: + logging.warn("Got err while recv: %s", failure_detail) + if len(tmp) >= len(data): + break + if tmp != data: + raise error.TestFail("Incorrect data: '%s' != '%s'", + data, tmp) + guest_worker.safe_exit_loopback_threads([send_port], [recv_port]) + cleanup(vm, guest_worker) + + ###################################################################### + # Loopback tests + ###################################################################### + @error.context_aware + def test_loopback(): + """ + Virtio console loopback test. + + Creates loopback on the vm machine between send_pt and recv_pts + ports and sends length amount of data through this connection. + It validates the correctness of the sent data. + @param cfg: virtio_console_params - semicolon separated loopback + scenarios, only $source_console_type and (multiple) + destination_console_types are mandatory. + '$source_console_type@buffer_length: + $destination_console_type1@$buffer_length:...: + $loopback_buffer_length;...' + @param cfg: virtio_console_test_time - how long to send the data + @param cfg: virtio_port_spread - how many devices per virt pci (0=all) + """ + # PREPARE + test_params = params.get('virtio_console_params') + if not test_params: + raise error.TestFail('No virtio_console_params specified') + test_time = int(params.get('virtio_console_test_time', 60)) + no_serialports = 0 + no_consoles = 0 + for param in test_params.split(';'): + no_serialports = max(no_serialports, param.count('serialport')) + no_consoles = max(no_consoles, param.count('console')) + vm, guest_worker = get_vm_with_worker(no_consoles, no_serialports) + + (consoles, serialports) = get_virtio_ports(vm) + + for param in test_params.split(';'): + if not param: + continue + error.context("test_loopback: params %s" % param, logging.info) + # Prepare + param = param.split(':') + idx_serialport = 0 + idx_console = 0 + buf_len = [] + if (param[0].startswith('console')): + send_pt = consoles[idx_console] + idx_console += 1 + else: + send_pt = serialports[idx_serialport] + idx_serialport += 1 + if (len(param[0].split('@')) == 2): + buf_len.append(int(param[0].split('@')[1])) + else: + buf_len.append(1024) + recv_pts = [] + for parm in param[1:]: + if (parm.isdigit()): + buf_len.append(int(parm)) + break # buf_len is the last portion of param + if (parm.startswith('console')): + recv_pts.append(consoles[idx_console]) + idx_console += 1 + else: + recv_pts.append(serialports[idx_serialport]) + idx_serialport += 1 + if (len(parm[0].split('@')) == 2): + buf_len.append(int(parm[0].split('@')[1])) + else: + buf_len.append(1024) + # There must be sum(idx_*) consoles + last item as loopback buf_len + if len(buf_len) == (idx_console + idx_serialport): + buf_len.append(1024) + + for port in recv_pts: + port.open() + + send_pt.open() + + if len(recv_pts) == 0: + raise error.TestFail("test_loopback: incorrect recv consoles" + "definition") + + threads = [] + queues = [] + for i in range(0, len(recv_pts)): + queues.append(deque()) + + # Start loopback + tmp = "'%s'" % recv_pts[0].name + for recv_pt in recv_pts[1:]: + tmp += ", '%s'" % (recv_pt.name) + guest_worker.cmd("virt.loopback(['%s'], [%s], %d, virt.LOOP_POLL)" + % (send_pt.name, tmp, buf_len[-1]), 10) + + exit_event = threading.Event() + + # TEST + thread = kvm_virtio_port.ThSendCheck(send_pt, exit_event, queues, + buf_len[0]) + thread.start() + threads.append(thread) + + for i in range(len(recv_pts)): + thread = kvm_virtio_port.ThRecvCheck(recv_pts[i], queues[i], + exit_event, buf_len[i + 1]) + thread.start() + threads.append(thread) + + time.sleep(test_time) + exit_event.set() + # TEST END + logging.debug('Joining th1') + threads[0].join() + tmp = "%d data sent; " % threads[0].idx + for thread in threads[1:]: + logging.debug('Joining th%s', thread) + thread.join() + tmp += "%d, " % thread.idx + logging.info("test_loopback: %s data received and verified", + tmp[:-2]) + + # Read-out all remaining data + for recv_pt in recv_pts: + while select.select([recv_pt.sock], [], [], 0.1)[0]: + recv_pt.sock.recv(1024) + + guest_worker.safe_exit_loopback_threads([send_pt], recv_pts) + + del exit_event + del threads[:] + cleanup(vm, guest_worker) + + def _process_stats(stats, scale=1.0): + """ + Process the stats to human readable form. + @param stats: List of measured data. + """ + if not stats: + return None + for i in range((len(stats) - 1), 0, -1): + stats[i] = stats[i] - stats[i - 1] + stats[i] /= scale + stats[0] /= scale + stats = sorted(stats) + return stats + + @error.context_aware + def test_perf(): + """ + Tests performance of the virtio_console tunnel. First it sends the data + from host to guest and than back. It provides informations about + computer utilization and statistic informations about the throughput. + + @param cfg: virtio_console_params - semicolon separated scenarios: + '$console_type@$buffer_length:$test_duration;...' + @param cfg: virtio_console_test_time - default test_duration time + @param cfg: virtio_port_spread - how many devices per virt pci (0=all) + """ + test_params = params.get('virtio_console_params') + if not test_params: + raise error.TestFail('No virtio_console_params specified') + test_time = int(params.get('virtio_console_test_time', 60)) + no_serialports = 0 + no_consoles = 0 + if test_params.count('serialport'): + no_serialports = 1 + if test_params.count('serialport'): + no_consoles = 1 + vm, guest_worker = get_vm_with_worker(no_consoles, no_serialports) + (consoles, serialports) = get_virtio_ports(vm) + consoles = [consoles, serialports] + + for param in test_params.split(';'): + if not param: + continue + error.context("test_perf: params %s" % param, logging.info) + # Prepare + param = param.split(':') + duration = test_time + if len(param) > 1: + try: + duration = float(param[1]) + except ValueError: + pass + param = param[0].split('@') + if len(param) > 1 and param[1].isdigit(): + buf_len = int(param[1]) + else: + buf_len = 1024 + param = (param[0] == 'serialport') + port = consoles[param][0] + + port.open() + + data = "" + for _ in range(buf_len): + data += "%c" % random.randrange(255) + + exit_event = threading.Event() + time_slice = float(duration) / 100 + + # HOST -> GUEST + guest_worker.cmd('virt.loopback(["%s"], [], %d, virt.LOOP_NONE)' + % (port.name, buf_len), 10) + thread = kvm_virtio_port.ThSend(port.sock, data, exit_event) + stats = array.array('f', []) + loads = utils.SystemLoad([(os.getpid(), 'autotest'), + (vm.get_pid(), 'VM'), 0]) + loads.start() + _time = time.time() + thread.start() + for _ in range(100): + stats.append(thread.idx) + time.sleep(time_slice) + _time = time.time() - _time - duration + logging.info("\n" + loads.get_cpu_status_string()[:-1]) + logging.info("\n" + loads.get_mem_status_string()[:-1]) + exit_event.set() + thread.join() + + # Let the guest read-out all the remaining data + while not guest_worker._cmd("virt.poll('%s', %s)" + % (port.name, select.POLLIN), 10)[0]: + time.sleep(1) + + guest_worker.safe_exit_loopback_threads([port], []) + + if (_time > time_slice): + logging.error("Test ran %fs longer which is more than one " + "time slice", _time) + else: + logging.debug("Test ran %fs longer", _time) + stats = _process_stats(stats[1:], time_slice * 1048576) + logging.debug("Stats = %s", stats) + logging.info("Host -> Guest [MB/s] (min/med/max) = %.3f/%.3f/%.3f", + stats[0], stats[len(stats) / 2], stats[-1]) + + del thread + + # GUEST -> HOST + exit_event.clear() + stats = array.array('f', []) + guest_worker.cmd("virt.send_loop_init('%s', %d)" + % (port.name, buf_len), 30) + thread = kvm_virtio_port.ThRecv(port.sock, exit_event, buf_len) + thread.start() + loads.start() + guest_worker.cmd("virt.send_loop()", 10) + _time = time.time() + for _ in range(100): + stats.append(thread.idx) + time.sleep(time_slice) + _time = time.time() - _time - duration + logging.info("\n" + loads.get_cpu_status_string()[:-1]) + logging.info("\n" + loads.get_mem_status_string()[:-1]) + guest_worker.cmd("virt.exit_threads()", 10) + exit_event.set() + thread.join() + if (_time > time_slice): # Deviation is higher than 1 time_slice + logging.error( + "Test ran %fs longer which is more than one time slice", _time) + else: + logging.debug("Test ran %fs longer", _time) + stats = _process_stats(stats[1:], time_slice * 1048576) + logging.debug("Stats = %s", stats) + logging.info("Guest -> Host [MB/s] (min/med/max) = %.3f/%.3f/%.3f", + stats[0], stats[len(stats) / 2], stats[-1]) + + del thread + del exit_event + cleanup(vm, guest_worker) + + ###################################################################### + # Migration tests + ###################################################################### + @error.context_aware + def _tmigrate(use_serialport, no_ports, no_migrations, blocklen, offline): + """ + An actual migration test. It creates loopback on guest from first port + to all remaining ports. Than it sends and validates the data. + During this it tries to migrate the vm n-times. + + @param vm: Target virtual machine [vm, session, tmp_dir, ser_session]. + @param consoles: Field of virtio ports with the minimum of 2 items. + @param parms: [media, no_migration, send-, recv-, loopback-buffer_len] + """ + # PREPARE + if use_serialport: + vm, guest_worker = get_vm_with_worker(no_serialports=no_ports) + ports = get_virtio_ports(vm)[1] + else: + vm, guest_worker = get_vm_with_worker(no_consoles=no_ports) + ports = get_virtio_ports(vm)[0] + + # TODO BUG: sendlen = max allowed data to be lost per one migration + # TODO BUG: using SMP the data loss is upto 4 buffers + # 2048 = char.dev. socket size, parms[2] = host->guest send buffer size + sendlen = 2 * 2 * max(kvm_virtio_port.SOCKET_SIZE, blocklen) + if not offline: # TODO BUG: online migration causes more loses + # TODO: Online migration lose n*buffer. n depends on the console + # troughput. FIX or analyse it's cause. + sendlen = 1000 * sendlen + for port in ports[1:]: + port.open() + + ports[0].open() + + threads = [] + queues = [] + verified = [] + for i in range(0, len(ports[1:])): + queues.append(deque()) + verified.append(0) + + tmp = "'%s'" % ports[1:][0].name + for recv_pt in ports[1:][1:]: + tmp += ", '%s'" % (recv_pt.name) + guest_worker.cmd("virt.loopback(['%s'], [%s], %d, virt.LOOP_POLL)" + % (ports[0].name, tmp, blocklen), 10) + + exit_event = threading.Event() + + # TEST + thread = kvm_virtio_port.ThSendCheck(ports[0], exit_event, queues, + blocklen, + migrate_event=threading.Event()) + thread.start() + threads.append(thread) + + for i in range(len(ports[1:])): + thread = kvm_virtio_port.ThRecvCheck(ports[1:][i], queues[i], + exit_event, blocklen, + sendlen=sendlen, + migrate_event=threading.Event()) + thread.start() + threads.append(thread) + + i = 0 + while i < 6: + tmp = "%d data sent; " % threads[0].idx + for thread in threads[1:]: + tmp += "%d, " % thread.idx + logging.debug("test_loopback: %s data received and verified", + tmp[:-2]) + i += 1 + time.sleep(2) + + for j in range(no_migrations): + error.context("Performing migration number %s/%s" + % (j, no_migrations)) + vm = utils_test.migrate(vm, env, 3600, "exec", 0, + offline) + if not vm: + raise error.TestFail("Migration failed") + + # Set new ports to Sender and Recver threads + # TODO: get ports in this function and use the right ports... + if use_serialport: + ports = get_virtio_ports(vm)[1] + else: + ports = get_virtio_ports(vm)[0] + for i in range(len(threads)): + threads[i].port = ports[i] + threads[i].migrate_event.set() + + # OS is sometime a bit dizzy. DL=30 + #guest_worker.reconnect(vm, timeout=30) + + i = 0 + while i < 6: + tmp = "%d data sent; " % threads[0].idx + for thread in threads[1:]: + tmp += "%d, " % thread.idx + logging.debug("test_loopback: %s data received and verified", + tmp[:-2]) + i += 1 + time.sleep(2) + if not threads[0].isAlive(): + if exit_event.isSet(): + raise error.TestFail("Exit event emited, check the log for" + "send/recv thread failure.") + else: + raise error.TestFail("Send thread died unexpectedly in " + "migration %d", (j + 1)) + for i in range(0, len(ports[1:])): + if not threads[i + 1].isAlive(): + raise error.TestFail("Recv thread %d died unexpectedly in " + "migration %d", i, (j + 1)) + if verified[i] == threads[i + 1].idx: + raise error.TestFail("No new data in %d console were " + "transfered after migration %d", + i, (j + 1)) + verified[i] = threads[i + 1].idx + logging.info("%d out of %d migration(s) passed", (j + 1), + no_migrations) + # If we get to this point let's assume all threads were reconnected + for thread in threads: + thread.migrate_event.clear() + # TODO detect recv-thread failure and throw out whole test + + # FINISH + exit_event.set() + # Send thread might fail to exit when the guest stucks + i = 30 + while threads[0].isAlive(): + if i <= 0: + raise error.TestFail("Send thread did not finish") + time.sleep(1) + i -= 1 + tmp = "%d data sent; " % threads[0].idx + for thread in threads[1:]: + thread.join() + tmp += "%d, " % thread.idx + logging.info("test_loopback: %s data received and verified during %d " + "migrations", tmp[:-2], no_migrations) + + # CLEANUP + guest_worker.safe_exit_loopback_threads([ports[0]], ports[1:]) + del exit_event + del threads[:] + cleanup(vm, guest_worker) + + def _test_migrate(offline): + """ + Migration test wrapper, see the actual test_migrate_* tests for details + """ + no_migrations = int(params.get("virtio_console_no_migrations", 5)) + no_ports = int(params.get("virtio_console_no_ports", 2)) + blocklen = int(params.get("virtio_console_blocklen", 1024)) + use_serialport = params.get('virtio_console_params') == "serialport" + _tmigrate(use_serialport, no_ports, no_migrations, blocklen, offline) + + def test_migrate_offline(): + """ + Tests whether the virtio-{console,port} are able to survive the offline + migration. + @param cfg: virtio_console_no_migrations - how many times to migrate + @param cfg: virtio_console_params - which type of virtio port to test + @param cfg: virtio_console_blocklen - send/recv block length + @param cfg: virtio_console_no_ports - minimum number of loopback ports + @param cfg: virtio_port_spread - how many devices per virt pci (0=all) + """ + _test_migrate(offline=True) + + def test_migrate_online(): + """ + Tests whether the virtio-{console,port} are able to survive the online + migration. + @param cfg: virtio_console_no_migrations - how many times to migrate + @param cfg: virtio_console_params - which type of virtio port to test + @param cfg: virtio_console_blocklen - send/recv block length + @param cfg: virtio_console_no_ports - minimum number of loopback ports + @param cfg: virtio_port_spread - how many devices per virt pci (0=all) + """ + _test_migrate(offline=False) + + def _virtio_dev_add(vm, pci_id, port_id, console="no"): + """ + Adds virtio serialport device. + @param vm: Target virtual machine [vm, session, tmp_dir, ser_session]. + @param pci_id: Id of virtio-serial-pci device. + @param port_id: Id of port. + @param console: if "yes" inicialize console. + """ + port = "serialport-" + port_type = "virtserialport" + if console == "yes": + port = "console-" + port_type = "virtconsole" + port += "%d-%d" % (pci_id, port_id) + ret = vm.monitors[0].cmd("device_add %s," + "bus=virtio_serial_pci%d.0," + "id=%s," + "name=%s" + % (port_type, pci_id, port, port)) + if console == "no": + vm.virtio_ports.append(kvm_virtio_port.VirtioSerial(port, None)) + else: + vm.virtio_ports.append(kvm_virtio_port.VirtioConsole(port, None)) + if ret != "": + logging.error(ret) + + def _virtio_dev_del(vm, pci_id, port_id): + """ + Removes virtio serialport device. + @param vm: Target virtual machine [vm, session, tmp_dir, ser_session]. + @param pci_id: Id of virtio-serial-pci device. + @param port_id: Id of port. + """ + for port in vm.virtio_ports: + if port.name.endswith("-%d-%d" % (pci_id, port_id)): + ret = vm.monitors[0].cmd("device_del %s" % (port.name)) + vm.virtio_ports.remove(port) + if ret != "": + logging.error(ret) + return + raise error.TestFail("Removing port which is not in vm.virtio_ports" + " ...-%d-%d" % (pci_id, port_id)) + + def test_hotplug(): + """ + Check the hotplug/unplug of virtio-consoles ports. + TODO: co vsechno to opravdu testuje? + @param cfg: virtio_console_params - which type of virtio port to test + @param cfg: virtio_console_pause - pause between monitor commands + """ + # TODO: Rewrite this test. It was left as it was before the virtio_port + # conversion and looked too messy to repair it during conversion. + # TODO: Split this test into multiple variants + # TODO: Think about customizable params + # TODO: use qtree to detect the right virtio-serial-pci name + # TODO: QMP + if params.get("virtio_console_params") == "serialport": + console = "no" + else: + console = "yes" + pause = int(params.get("virtio_console_pause", 1)) + logging.info("Timeout between hotplug operations t=%fs", pause) + + vm = get_vm_with_ports(1, 1, spread=0, quiet=True, strict=True) + consoles = get_virtio_ports(vm) + # send/recv might block for ever, set non-blocking mode + consoles[0][0].open() + consoles[1][0].open() + consoles[0][0].sock.setblocking(0) + consoles[1][0].sock.setblocking(0) + logging.info("Test correct initialization of hotplug ports") + for bus_id in range(1, 5): # count of pci device + ret = vm.monitors[0].cmd("device_add virtio-serial-pci," + "id=virtio_serial_pci%d" % (bus_id)) + if ret != "": + logging.error(ret) + for i in range(bus_id * 5 + 5): # max ports 30 + _virtio_dev_add(vm, bus_id, i, console) + time.sleep(pause) + # Test correct initialization of hotplug ports + time.sleep(10) # Timeout for port initialization + guest_worker = kvm_virtio_port.GuestWorker(vm) + + logging.info("Delete ports when ports are used") + # Delete ports when ports are used. + guest_worker.cmd("virt.loopback(['%s'], ['%s'], 1024," + "virt.LOOP_POLL)" % (consoles[0][0].name, + consoles[1][0].name), 10) + exit_event = threading.Event() + send = kvm_virtio_port.ThSend(consoles[0][0].sock, "Data", exit_event, + quiet=True) + recv = kvm_virtio_port.ThRecv(consoles[1][0].sock, exit_event, + quiet=True) + send.start() + time.sleep(2) + recv.start() + + # Try to delete ports under load + ret = vm.monitors[0].cmd("device_del %s" % consoles[1][0].name) + ret += vm.monitors[0].cmd("device_del %s" % consoles[0][0].name) + vm.virtio_ports = vm.virtio_ports[2:] + if ret != "": + logging.error(ret) + + exit_event.set() + send.join() + recv.join() + guest_worker.cmd("virt.exit_threads()", 10) + guest_worker.cmd('guest_exit()', 10) + + logging.info("Trying to add maximum count of ports to one pci device") + # Try to add ports + for i in range(30): # max port 30 + _virtio_dev_add(vm, 0, i, console) + time.sleep(pause) + guest_worker = kvm_virtio_port.GuestWorker(vm) + guest_worker.cmd('guest_exit()', 10) + + logging.info("Trying delete and add again part of ports") + # Try to delete ports + for i in range(25): # max port 30 + _virtio_dev_del(vm, 0, i) + time.sleep(pause) + guest_worker = kvm_virtio_port.GuestWorker(vm) + guest_worker.cmd('guest_exit()', 10) + + # Try to add ports + for i in range(5): # max port 30 + _virtio_dev_add(vm, 0, i, console) + time.sleep(pause) + guest_worker = kvm_virtio_port.GuestWorker(vm) + guest_worker.cmd('guest_exit()', 10) + + logging.info("Trying to add and delete one port 100 times") + # Try 100 times add and delete one port. + for i in range(100): + _virtio_dev_del(vm, 0, 0) + time.sleep(pause) + _virtio_dev_add(vm, 0, 0, console) + time.sleep(pause) + guest_worker = kvm_virtio_port.GuestWorker(vm) + cleanup(guest_worker=guest_worker) + # VM is broken (params mismatches actual state) + vm.destroy() + + @error.context_aware + def test_hotplug_virtio_pci(): + """ + Tests hotplug/unplug of the virtio-serial-pci bus. + @param cfg: virtio_console_pause - pause between monitor commands + @param cfg: virtio_console_loops - how many loops to run + """ + # TODO: QMP + # TODO: check qtree for device presense + pause = int(params.get("virtio_console_pause", 10)) + vm = get_vm_with_ports() + idx = 1 + for i in xrange(int(params.get("virtio_console_loops", 2))): + error.context("Hotpluging virtio_pci (iteration %d)" % i) + ret = vm.monitors[0].cmd("device_add virtio-serial-pci," + "id=virtio_serial_pci%d" % (idx)) + time.sleep(pause) + ret += vm.monitors[0].cmd("device_del virtio_serial_pci%d" + % (idx)) + time.sleep(pause) + if ret != "": + raise error.TestFail("Error occured while hotpluging virtio-" + "pci. Iteration %s, monitor output:\n%s" + % (i, ret)) + + ###################################################################### + # Destructive tests + ###################################################################### + def test_rw_notconnect_guest(): + """ + Try to send to/read from guest on host while guest not recvs/sends any + data. + @param cfg: virtio_console_params - which type of virtio port to test + @param cfg: virtio_port_spread - how many devices per virt pci (0=all) + """ + vm = env.get_vm(params.get("main_vm")) + use_serialport = params.get('virtio_console_params') == "serialport" + if use_serialport: + vm = get_vm_with_ports(no_serialports=1, strict=True) + else: + vm = get_vm_with_ports(no_consoles=1, strict=True) + if use_serialport: + port = get_virtio_ports(vm)[1][0] + else: + port = get_virtio_ports(vm)[0][1] + if not port.is_open(): + port.open() + else: + port.close() + port.open() + + port.sock.settimeout(20.0) + + loads = utils.SystemLoad([(os.getpid(), 'autotest'), + (vm.get_pid(), 'VM'), 0]) + loads.start() + + try: + sent1 = 0 + for _ in range(1000000): + sent1 += port.sock.send("a") + except socket.timeout: + logging.info("Data sending to closed port timed out.") + + logging.info("Bytes sent to client: %d", sent1) + logging.info("\n" + loads.get_cpu_status_string()[:-1]) + + logging.info("Open and then close port %s", port.name) + guest_worker = kvm_virtio_port.GuestWorker(vm) + # Test of live and open and close port again + guest_worker.cleanup() + port.sock.settimeout(20.0) + + loads.start() + try: + sent2 = 0 + for _ in range(40000): + sent2 = port.sock.send("a") + except socket.timeout: + logging.info("Data sending to closed port timed out.") + + logging.info("Bytes sent to client: %d", sent2) + logging.info("\n" + loads.get_cpu_status_string()[:-1]) + loads.stop() + if (sent1 != sent2): + logging.warning("Inconsistent behavior: First sent %d bytes and " + "second sent %d bytes", sent1, sent2) + + port.sock.settimeout(None) + guest_worker = kvm_virtio_port.GuestWorker(vm) + cleanup(vm, guest_worker) + + def test_rmmod(): + """ + Remove and load virtio_console kernel module. + @param cfg: virtio_console_params - which type of virtio port to test + @param cfg: virtio_port_spread - how many devices per virt pci (0=all) + """ + (vm, guest_worker, port) = get_vm_with_single_port( + params.get('virtio_console_params')) + guest_worker.cleanup() + session = vm.wait_for_login() + if session.cmd_status('lsmod | grep virtio_console'): + raise error.TestNAError("virtio_console not loaded, probably " + " not compiled as module. Can't test it.") + session.cmd("rmmod -f virtio_console") + session.cmd("modprobe virtio_console") + guest_worker = kvm_virtio_port.GuestWorker(vm) + guest_worker.cmd("virt.clean_port('%s'),1024" % port.name, 2) + cleanup(vm, guest_worker) + + def test_max_ports(): + """ + Try to start and initialize machine with maximum supported number of + virtio ports. (30) + @param cfg: virtio_console_params - which type of virtio port to test + """ + port_count = 30 + if params.get('virtio_console_params') == "serialport": + logging.debug("Count of serialports: %d", port_count) + vm = get_vm_with_ports(0, port_count, quiet=True) + else: + logging.debug("Count of consoles: %d", port_count) + vm = get_vm_with_ports(port_count, 0, quiet=True) + guest_worker = kvm_virtio_port.GuestWorker(vm) + cleanup(vm, guest_worker) + + def test_max_serials_and_conosles(): + """ + Try to start and initialize machine with maximum supported number of + virtio ports with 15 virtconsoles and 15 virtserialports. + """ + port_count = 15 + logging.debug("Count of virtports: %d %d", port_count, port_count) + vm = get_vm_with_ports(port_count, port_count, quiet=True) + guest_worker = kvm_virtio_port.GuestWorker(vm) + cleanup(vm, guest_worker) + + def test_shutdown(): + """ + Try to gently shutdown the machine while sending data through virtio + port. + @note: VM should shutdown safely. + @param cfg: virtio_console_params - which type of virtio port to test + @param cfg: virtio_port_spread - how many devices per virt pci (0=all) + """ + if params.get('virtio_console_params') == 'serialport': + vm, guest_worker = get_vm_with_worker(no_serialports=1) + else: + vm, guest_worker = get_vm_with_worker(no_consoles=1) + ports, _ports = get_virtio_ports(vm) + ports.extend(_ports) + for port in ports: + port.open() + # If more than one, send data on the other ports + for port in ports[1:]: + guest_worker.cmd("virt.close('%s')" % (port.name), 2) + guest_worker.cmd("virt.open('%s')" % (port.name), 2) + try: + os.system("dd if=/dev/random of='%s' bs=4096 &>/dev/null &" + % port.path) + except Exception: + pass + # Just start sending, it won't finish anyway... + guest_worker._cmd("virt.send('%s', 1024**3, True, is_static=True)" + % ports[0].name, 1) + + # Let the computer transfer some bytes :-) + time.sleep(2) + + # Power off the computer + vm.destroy(gracefully=True) + # close the virtio ports on the host side + for port in vm.virtio_ports: + port.close() + + ###################################################################### + # Debug and dummy tests + ###################################################################### + def test_delete_guest_script(): + """ + This dummy test only removes the guest_worker_script. Use this it + when you use the old image with a new guest_worker version. + @note: The script name might differ! + """ + vm = env.get_vm(params.get("main_vm")) + session = vm.wait_for_login() + out = session.cmd_output("echo on") + if "on" in out: # Linux + session.cmd_status("rm -f /tmp/virtio_console_guest.py*") + else: # Windows + session.cmd_status("del /F /Q C:\\virtio_console_guest.py*") + + ###################################################################### + # Main + # Executes test specified by virtio_console_test variable in cfg + ###################################################################### + fce = None + _fce = "test_" + params.get('virtio_console_test', '').strip() + error.context("Executing test: %s" % _fce, logging.info) + if _fce not in locals(): + raise error.TestNAError("Test %s doesn't exist. Check 'virtio_console_" + "test' variable in subtest.cfg" % _fce) + else: + fce = locals()[_fce] + return fce() diff --git a/kvm/tests/vmstop.py b/kvm/tests/vmstop.py new file mode 100644 index 00000000..3e4b668f --- /dev/null +++ b/kvm/tests/vmstop.py @@ -0,0 +1,85 @@ +import logging, time, os +from autotest.client.shared import error +from autotest.client import utils + + +def run_vmstop(test, params, env): + """ + KVM guest stop test: + 1) Log into a guest + 2) Copy a file into guest + 3) Stop guest + 4) Check the status through monitor + 5) Check the session + 6) Migrat the vm to a file twice and compare them. + + @param test: kvm test object + @param params: Dictionary with the test parameters + @param env: Dictionary with test environment. + """ + vm = env.get_vm(params["main_vm"]) + vm.verify_alive() + timeout = float(params.get("login_timeout", 240)) + session = vm.wait_for_login(timeout=timeout) + + save_path = params.get("save_path", "/tmp") + clean_save = params.get("clean_save") == "yes" + save1 = os.path.join(save_path, "save1") + save2 = os.path.join(save_path, "save2") + + guest_path = params.get("guest_path", "/tmp") + file_size = params.get("file_size", "1000") + + try: + utils.run("dd if=/dev/zero of=/tmp/file bs=1M count=%s" % file_size) + # Transfer file from host to guest, we didn't expect the finish of + # transfer, we just let it to be a kind of stress in guest. + bg = utils.InterruptedThread(vm.copy_files_to, + ("/tmp/file", guest_path), + dict(verbose=True, timeout=60)) + logging.info("Start the background transfer") + bg.start() + + try: + # wait for the transfer start + time.sleep(5) + logging.info("Stop the VM") + vm.pause() + + # check with monitor + logging.info("Check the status through monitor") + if not vm.monitor.verify_status("paused"): + status = str(vm.monitor.info("status")) + raise error.TestFail("Guest did not pause after sending stop," + " guest status is %s" % status) + + # check through session + logging.info("Check the session") + if session.is_responsive(): + raise error.TestFail("Session still alive after sending stop") + + # Check with the migration file + logging.info("Save and check the state files") + for p in [save1, save2]: + vm.save_to_file(p) + time.sleep(1) + if not os.path.isfile(p): + raise error.TestFail("VM failed to save state file %s" % p) + + # Fail if we see deltas + md5_save1 = utils.hash_file(save1) + md5_save2 = utils.hash_file(save2) + if md5_save1 != md5_save2: + raise error.TestFail("The produced state files differ") + finally: + bg.join(suppress_exception=True) + + finally: + session.close() + if clean_save: + logging.debug("Clean the state files") + if os.path.isfile(save1): + os.remove(save1) + if os.path.isfile(save2): + os.remove(save2) + vm.resume() diff --git a/kvm/unittests.cfg.sample b/kvm/unittests.cfg.sample new file mode 100644 index 00000000..c623483c --- /dev/null +++ b/kvm/unittests.cfg.sample @@ -0,0 +1,83 @@ +# Copy this file to unittests.cfg and edit it. +# +# Define the objects we'll be using +vms = unittest_vm +vm_type = kvm + +# Choose the main VM +main_vm = unittest_vm + +# Some preprocessor/postprocessor params +start_vm = yes +kill_vm = yes +kill_vm_gracefully = no + +# Monitor +monitors = humanmonitor1 +main_monitor = humanmonitor1 +monitor_type = human + +# Screendump specific stuff +take_regular_screendumps = no + +# Some default VM params +qemu_binary = qemu +qemu_img_binary = qemu-img +mem = 512 +display = vnc + +# Default scheduler params +used_cpus = 1 +used_mem = 512 + +# NIC parameters +run_tcpdump = no + +# Misc +run_kvm_stat = yes + +# Tests +variants: + - build: + type = build + vms = '' + start_vm = no + # Load modules built/installed by the build test? + load_modules = no + # Save the results of this build on test.resultsdir? + save_results = no + # Preserve the source code directory between tests? + preserve_srcdir = yes + + ###################################################################### + # INSTALLERS DEFINITION SECTION + # Many different components can be defined. The ones that will + # actually be run have to be defined in the 'installers' + ###################################################################### + # QEMU (KVM) installation from a GIT repo + git_repo_qemu_kvm_uri = git://git.kernel.org/pub/scm/virt/kvm/qemu-kvm.git + + # KVM unit tests from a GIT repo + git_repo_kvm_unit_tests_uri = git://git.kernel.org/pub/scm/virt/kvm/kvm-unit-tests.git + #git_repo_kvm_unit_tests_configure_options = --arch=x86_64 + + ###################################################################### + # INSTALLERS SELECTION + # Choose here what components you want to install + ###################################################################### + installers = git_repo_qemu_kvm git_repo_kvm_unit_tests + + - unittest: + type = unittest + vms = '' + start_vm = no + unittest_timeout = 600 + testdev = yes + extra_params += " -S" + # In case you want to execute only a subset of the tests defined on the + # unittests.cfg file on qemu-kvm, uncomment and edit test_list + #unittest_test_list = idt_test hypercall vmexit realmode + # In case you want to excluse just some of the tests, use a blacklist + #unittest_test_blacklist = access apic emulator + +only build unittest -- GitLab