提交 3a4c24b2 编写于 作者: L Lucas Meneghel Rodrigues

Merge branch 'next' - Virt Test 2013.09.03

......@@ -35,6 +35,8 @@ libvirt/cfg/machines.cfg
libvirt/cfg/subtests.cfg
libvirt/cfg/virtio-win.cfg
libvirt/env
lvsb/cfg/subtests.cfg
lvsb/env
v2v/cfg/base.cfg
v2v/cfg/cdkeys.cfg
v2v/cfg/guest-hw.cfg
......
......@@ -27,6 +27,15 @@ Pull request maintenance - Libvirt subtests
M: Christopher Evich <cevich@redhat.com>
M: Yu Mingfei <yumingfei@cn.fujitsu.com>
M: Yang Dongsheng <yangds.fnst@cn.fujitsu.com>
M: Li Yang <liyang.fnst@cn.fujitsu.com>
Pull request maintenance - Libvirt subtests
-------------------------------------------
M: Christopher Evich <cevich@redhat.com>
Pull request maintenance - Libguestfs
-------------------------------------
......
......@@ -25,6 +25,14 @@ variants:
only Fedora.17.x86_64
cpu_model = "core2duo"
cpu_model_flags = ",+sse3"
only no_glusterfs_support
only (vhost_settings=vhost_src_no_dst_no)
only (mig_protocol=tcp)
only (booting_phase=after_login_vm)
only (migraiton_type=mig_online)
#only (multihost_mig_tests=default)
only migrate_multi_host
# Runs qemu, f16 64 bit guest OS, install, boot, shutdown
......
......@@ -160,6 +160,7 @@ class BlockCopy(object):
"""
reboot VM, alias of vm.reboot();
"""
error.context("reboot vm", logging.info)
params = self.parser_test_args()
timeout = params["login_timeout"]
......
import logging
from autotest.client.shared import error, utils
from qemu.tests import blk_stream
......@@ -11,20 +10,13 @@ class BlockStreamReboot(blk_stream.BlockStream):
@error.context_aware
def start_reset(self):
def reboot(self):
"""
Reset guest with system_reset in loop;
Reset guest with system_reset;
"""
error.context("reset guest in loop", logging.info)
count = 0
while True:
self.reboot(method="system_reset", boot_check=False)
count +=1
status = self.get_status()
# if block stream job really started, stop reset loop
if status.get("offset", 0) > 0:
break
logging.info("has reset %s times, when start stream job" % count)
params = self.parser_test_args()
method = params.get("reboot_method", "system_reset")
return super(BlockStreamReboot, self).reboot(method=method)
def action_before_start(self):
......@@ -53,10 +45,9 @@ class BlockStreamReboot(blk_stream.BlockStream):
def run_block_stream_reboot(test, params, env):
"""
block_stream_reboot test:
1). boot up vm and create snapshots;
2). reboot guest, then start block steam job;
3). destroy live vm and create it, then start block stream job(optonal);
4). after stream done, then reboot guest and check it's alived
1). boot guest, then reboot guest with system_reset;
2). create snapshots and start stream job immediately;
3). waiting stream done and check guest is alive;
@param test: Kvm test object
@param params: Dictionary with the test parameters
......
......@@ -28,7 +28,7 @@ class BlockStreamStress(blk_stream.BlockStream):
return True
error.context("install stress app in guest", logging.info)
link = params.get("download_link")
md5sum = params.get("md5sum")
md5sum = params.get("pkg_md5sum")
tmp_dir = params.get("tmp_dir")
install_cmd = params.get("install_cmd")
config_cmd = params.get("config_cmd")
......
import os, logging
from autotest.client.shared import error
from virttest import utils_test
from virttest import storage, utils_misc, data_dir
@error.context_aware
def run_block_stream_with_stress(test, params, env):
"""
block_stream_with_stress test:
1). boot guest
2). make guest under heavyload status
3). create live snpshot file and start block stream job
4). wait for it done correctly
@param test: Kvm test object
@param params: Dictionary with the test parameters
@param env: Dictionary with test environment.
"""
vm = env.get_vm(params["main_vm"])
vm.verify_alive()
timeout = int(params.get("login_timeout", 360))
session = vm.wait_for_login(timeout=timeout)
image_filename = storage.get_image_filename(params, data_dir.get_data_dir())
device_id = vm.get_block({"file": image_filename})
snapshot_file = os.path.splitext(image_filename)[0] + "-snp"
sub_test = params.get("pre_test")
start_cmd = params.get("start_cmd")
def is_job_done():
"""
Query block job status to check is job finished
"""
job = vm.monitor.query_block_job(device_id)
if job:
processed = float(job["offset"]) / job["len"] * 100
logging.debug("%s, rocessed: %.2f" % (job["type"], processed))
return False
logging.info("block stream job done")
return True
try:
utils_test.run_virt_sub_test(test, params, env, sub_type=sub_test)
error.context("Heavy load in guest ...", logging.info)
if start_cmd.startswith("stress"):
cpu = int(params.get("smp", 1))
mem = int(params.get("mem", 1024))
start_cmd = start_cmd.format(cpu=cpu,
vm=cpu * 2,
mem=(mem - 512) / cpu)
session.sendline(start_cmd)
error.context("Creating live snapshot", logging.info)
if vm.monitor.live_snapshot(device_id, snapshot_file):
raise error.TestFail("Fail to create live snapshot")
error.context("Start block device stream job", logging.info)
if vm.monitor.block_stream(device_id):
raise error.TestFail("Fail to start block stream job")
if not utils_misc.wait_for(is_job_done,
timeout=int(params.get("job_timeout", 2400)),
text="wait job done, it will take long time"):
raise error.TestFail("Wait job finish timeout")
finally:
if session:
session.close()
if os.path.isfile(snapshot_file):
os.remove(snapshot_file)
import logging, time, re, os
import logging, time
from autotest.client.shared import error
from virttest import utils_misc, utils_test, env_process, storage, data_dir
try:
from autotest.client.shared import utils_memory
......@@ -57,7 +56,7 @@ def run_boot_time(test, params, env):
vm.verify_alive()
vm.wait_for_login(timeout=timeout)
except Exception:
logging.Warn("Can not restore guest run level, "
logging.warning("Can not restore guest run level, "
"need restore the image")
params["restore_image_after_testing"] = "yes"
......
......@@ -42,7 +42,7 @@
type = block_stream_reboot
reboot_method = "system_reset"
before_start = "reboot"
after_finished = "reboot verify_alive"
after_finished = "verify_alive"
- drop_backingfile:
snapshot_chain += " images/sn2"
type = block_stream_drop_backingfile
......@@ -57,7 +57,7 @@
- with_stress:
type = block_stream_stress
download_link = http://weather.ou.edu/~apw/projects/stress/stress-1.0.4.tar.gz
md5sum = a607afa695a511765b40993a64c6e2f4
pkg_md5sum = a607afa695a511765b40993a64c6e2f4
install_cmd = "tar -xzvf ${tmp_dir}/stress-1.0.4.tar.gz -C ./ && cd stress-1.0.4 && ./configure --prefix=/usr && make && make install "
config_cmd = ""
app_check_cmd = "stress --help"
......
- cpu_add_test: install setup image_copy unattended_install.cdrom
no smp2
virt_test_type = qemu
type = cpu_add
onoff_iterations = 2
login_timeout = 240
kill_vm = yes
smp = 4
vcpu_maxcpus = 255
vcpu_need_hotplug = 1
#In RHEL6 host, if your monitor type is qmp, need config vcpu_add_cmd.
Host_RHEL.6:
vcpu_add_cmd = "human-monitor-command command-line=cpu_set %s online"
Host_Fedora:
monitor_type = qmp
# this at least need a RHEL.6.3 host
# the smp can be overrided (tests.cfg is a good place)
# if you want to test with a guest booted with diff SMP, pls modify it.
# you might want to modify the vcpu_maxcpus to suit your host
variants:
- one_cpu:
- max_cpu:
vcpu_need_hotplug = 251
run_sub_test = yes
sub_test_name = "boot"
kill_vm_on_error = yes
reboot = yes
session_need_update = yes
reboot_method = shell
- cpuid_outof_range:
id_hotplug_vcpu0 = 255
qmp_error_recheck = Unable to add CPU:.*, max allowed:.*
human_error_recheck = must be in range
- invalid_vcpuid:
id_hotplug_vcpu0 = -1
qmp_error_recheck = Invalid parameter type.*, expected:.*
human_error_recheck = integer is for 32-bit values
- cpuid_already_exist:
id_hotplug_vcpu0 = 1
qmp_error_recheck = Unable to add CPU:.*, it already exists
human_error_recheck = already exists
- add_after_stop:
stop_before_hotplug = yes
- guest_s3:
no Host_RHEL.5, Host_RHEL.6.0, Host_RHEL.6.1, Host_RHEL.6.2
run_sub_test = yes
extra_params += " -global PIIX4_PM.disable_s3=0"
sub_test_name = "guest_suspend"
guest_suspend_type = "mem"
services_up_timeout = 30
- guest_s4:
no Host_RHEL.5, Host_RHEL.6.0, Host_RHEL.6.1, Host_RHEL.6.2
run_sub_test = yes
extra_params += " -global PIIX4_PM.disable_s4=0"
sub_test_name = "guest_suspend"
guest_suspend_type = "disk"
services_up_timeout = 30
qemu_quit_after_sub_case = yes
session_need_update = yes
- guest_reboot:
run_sub_test = yes
sub_test_name = "boot"
kill_vm_on_error = yes
reboot = yes
session_need_update = yes
variants:
- reboot_shell:
reboot_method = shell
- reboot_monitor:
reboot_method = system_reset
- guest-shutdown:
run_sub_test = yes
sub_test_name = "shutdown"
kill_vm = yes
kill_vm_gracefully = no
check_img = yes
variants:
- shutdown_shell:
shutdown_method = shell
- shutdown_monitor:
shutdown_method = system_powerdown
- cpu_offline:
repeat_time = 1
vcpu_need_hotplug = 20
offline = 1-3,4,10-16,19
sub_test_name = shutdown
run_sub_test = yes
kill_vm =yes
shutdown_method = shell
- cpu_online_offline:
repeat_time = 1
vcpu_need_hotplug = 20
offline = 1-3,4,10-16,19
online = 1-3,4,10-16,19
sub_test_name = shutdown
run_sub_test = yes
kill_vm =yes
shutdown_method = shell
- onoff_repeat:
smp = 1
repeat_time = 300
vcpu_need_hotplug = 100
offline = 1-100
online = 1-100
......@@ -64,7 +64,7 @@
variants:
- heavyload:
download_link = http://weather.ou.edu/~apw/projects/stress/stress-1.0.4.tar.gz
md5sum = a607afa695a511765b40993a64c6e2f4
pkg_md5sum = a607afa695a511765b40993a64c6e2f4
install_cmd = "tar -xzvf ${tmp_dir}/stress-1.0.4.tar.gz -C ./ && cd stress-1.0.4 && ./configure --prefix=/usr && make && make install "
config_cmd = ""
app_check_cmd = "stress --help"
......@@ -103,7 +103,7 @@
type = drive_mirror_powerdown
app_check_cmd = "test -d ${tmp_dir}/linux-2.6.35.14"
download_link = "https://www.kernel.org/pub/linux/kernel/v2.6/longterm/v2.6.35/linux-2.6.35.14.tar.gz"
md5sum = "15e4021ffcb47b93c218083e1f2734a7"
pkg_md5sum = "15e4021ffcb47b93c218083e1f2734a7"
install_cmd = "tar xzvf ${tmp_dir}/linux-2.6.35.14.tar.gz -C ${tmp_dir}/"
config_cmd = "cd ${tmp_dir}/linux-2.6.35.14 && make defconfig"
start_cmd = "cd ${tmp_dir}/linux-2.6.35.14 && make clean && make -j `grep processor /proc/cpuinfo|wc -l` && make modules"
......
......@@ -3,9 +3,8 @@
type = live_snapshot
no raw vmdk qed
kill_vm = yes
create_sn_cmd = snapshot_blkdev
create_cmd = "dd if=/dev/urandom of=%s bs=1M count=1024"
file_create = /tmp/file
file_create = /var/tmp/file
clean_cmd = rm -f
snapshot_name = live_snapshot_img
variants:
......@@ -20,7 +19,7 @@
filesize = 2000
transfer_timeout = 1200
transfer_type = remote
tmp_dir = /tmp/
tmp_dir = /var/tmp/
- base:
type = live_snapshot_base
backup_image_before_testing = yes
......
......@@ -58,7 +58,7 @@
# We need to unload scsi_debug modules used by VM
kill_vm = yes
force_create_image = no
pre_command = "modprobe scsi_debug && echo 9 > /sys/bus/pseudo/drivers/scsi_debug/add_host"
pre_command = "modprobe -r scsi_debug; modprobe scsi_debug add_host=9"
post_command = "rmmod scsi_debug"
stg_params += "image_raw_device:yes "
stg_params += "image_format:raw "
......@@ -67,6 +67,7 @@
- block:
stg_params += "image_name:/dev/sd* "
- generic:
stg_params += "drive_cache:writethrough "
stg_params += "drive_format:scsi-generic "
stg_params += "image_name:/dev/sg* "
- multi_lun:
......
......@@ -8,7 +8,7 @@
force_image_clone = no
virt_test_type = qemu
no JeOS
variants:
variants multi_host_tests:
- migrate_multi_host: install setup image_copy unattended_install.cdrom
type = migration_multi_host
not_preprocess_host1 = yes
......@@ -25,20 +25,15 @@
disk_prepare_timeout = 360
comm_port = 13234
regain_ip_cmd = killall dhclient; sleep 10; dhclient;
variants:
variants mig_protocol:
#Migration protocol.
- tcp:
mig_protocol = "tcp"
- x_rdma:
mig_protocol = "x-rdma"
- x-rdma:
- rdma:
mig_protocol = "rdma"
- fd:
mig_protocol = "fd"
- exec:
mig_protocol = "exec"
variants:
variants booting_phase:
#Time when start migration
- after_login_vm:
paused_after_start_vm = no
......@@ -52,13 +47,13 @@
- timeout_6:
start_migration_timeout = 6
variants:
variants migraiton_type:
- mig_online:
mig_offline = no
- mig_offline:
mig_offline = yes
variants:
variants vhost_settings:
- @vhost_src_no_dst_no:
netdev_extra_params_host1 = ""
netdev_extra_params_host2 = ""
......@@ -72,7 +67,7 @@
netdev_extra_params_host1 = ""
netdev_extra_params_host2 = ",vhost=on"
variants:
variants multihost_mig_tests:
# Migration properties
- @default:
type = migration_multi_host
......@@ -190,7 +185,7 @@
- suspend:
type = migration_multi_host
sub_test = "guest_suspend"
post_sub_test = "guest_suspend"
variants:
- guest_s3:
guest_suspend_type = "mem"
......@@ -198,6 +193,37 @@
- guest_s4:
guest_suspend_type = "disk"
- qxl:
variants:
- win_XP_VGA_mode:
# Guest has to be prepared for running with qlx
# driver. Drivers have to be properly installed
# before starting of the test. Otherwise the test
# could FAIL.
# tests steps:
# 1) starts windows XP guest on first host
# 2) try login to guest
# 3) starts presubtest steps with
# stepsfile = WinXP-set-VGA-mode.steps
# enable VGA mode on GPU
# 4) after presubtest finished starts migration
# to host2
# 5) after migration starts postsubtests with
# stepfile = WinXP-reset-VGA-mode.steps
only WinXP
type = migration_multi_host
start_migration_timeout = 0
not_preprocess_host1 = no
start_vm_host1 = yes
login_before_pre_tests = yes
not_login_after_mig = yes
pre_sub_test = steps
pre_sub_test_timeout = 360
post_sub_test = steps
steps_host1 = steps/WinXP-set-VGA-mode.steps
steps_host2 = steps/WinXP-reset-VGA-mode.steps
del regain_ip_cmd
- cdrom_test:
only Linux
only mig_online.after_login_vm
......@@ -300,36 +326,23 @@
variants:
- @default_machine_types:
variants:
-@pc:
machine_type = "pc"
-pc-0.14:
machine_type = "pc-0.14"
-pc-0.13:
machine_type = "pc-0.13"
-pc-0.12:
machine_type = "pc-0.12"
-pc-0.11:
machine_type = "pc-0.11"
-pc-0.10:
machine_type = "pc-0.10"
-isapc:
machine_type = "isapc"
variants machine_type:
- @pc:
- pc-0.14:
- pc-0.13:
- pc-0.12:
- pc-0.11:
- pc-0.10:
- isapc:
- @rhel_machine_types:
variants:
-@pc:
machine_type = "pc"
variants machine_type:
- pc:
- rhel5.4.0:
machine_type = "rhel5.4.0"
- rhel5.4.4:
machine_type = "rhel5.4.4"
- rhel5.5.0:
machine_type = "rhel5.5.0"
- rhel6.0.0:
machine_type = "rhel6.0.0"
- rhel6.2.0:
machine_type = "rhel6.2.0"
- rhel6.3.0:
machine_type = "rhel6.3.0"
- rhel6.4.0:
- numa:
type = numa_basic
kill_vm_on_error = yes
login_timeout = 240
variants:
- numa_basic:
vms = ""
- numa_consistency:
only Linux
type = numa_consistency
start_vm = no
- numa_stress:
only Linux
type = numa_stress
test_control_file = stress_memory_heavy.control
......@@ -22,3 +22,6 @@
virtio_blk:
start_vm = no
catch_serial_cmd = cat /sys/block/vda/serial; echo
virtio_scsi, ide, ahci:
start_vm = no
catch_serial_cmd = sginfo -s /dev/sda || hdparm -i /dev/hda
......@@ -6,6 +6,8 @@
kill_vm = yes
kill_vm_gracefully = no
smp = 1
usbs =
usb_devices =
# ask autotest to not mess with the cpu_model settings
auto_cpu_model = no
......@@ -14,12 +16,14 @@
variants qemu_flavor:
- @unknown:
- rhel:
variants rhel_version:
- 6_0:
- 6_1:
- 6_2:
- 6_3:
- 6_4:
variants rhel_major_version:
- 6:
variants rhel_version:
- 6_0:
- 6_1:
- 6_2:
- 6_3:
- 6_4:
- upstream:
variants qemu_version:
- 1_0:
......@@ -29,10 +33,20 @@
- 1_4:
- 1_5:
variants host_cpu_vendor:
- @unknown:
- amd:
- intel:
# uncomment the line corresponding to the QEMU version, if you know it:
only (qemu_flavor = unknown)
#only (qemu_flavor = rhel).(rhel_version = 6_4)
#only (qemu_flavor = upstream).(qemu_version = 1_5)
#only (qemu_flavor = rhel)..(rhel_version = 6_4)
#only (qemu_flavor = upstream)..(qemu_version = 1_5)
# uncomment the line corresponding to the host CPU vendor, if you know it:
only (host_cpu_vendor = unknown)
#only (host_cpu_vendor = amd)
#only (host_cpu_vendor = intel)
# CPU model lists:
variants:
......@@ -45,6 +59,7 @@
- core2duo:
cpu_model = "core2duo"
- kvm32:
no (qemu_flavor = rhel)..(rhel_major_version = 6)
cpu_model = "kvm32"
- kvm64:
cpu_model = "kvm64"
......@@ -93,7 +108,7 @@
- cpu64_rhel6:
only (qemu_flavor = rhel)
cpu_model = "cpu64-rhel6"
- cpu64_rhel4:
- cpu64_rhel5:
only (qemu_flavor = rhel)
cpu_model = "cpu64-rhel5"
......@@ -114,10 +129,11 @@
no (qemu_flavor = unknown)
# CPUID data tests:
- cpuid:
# 486 is excluded due to not supporting cpuid
no 486
variants:
- full_dump:
# we only have dump files for KVM mode, by now:
only kvm
# machine types:
# parameter is named "machine_type_to_check" because
# somehow "machine_type" is being overwritten by another
......@@ -159,6 +175,53 @@
test_type = "check_cpuid_dump"
no cpu.unset
# Optimization: models/machine-type combinations known to not be valid:
cpu.intel.Westmere:
no machine.rhel.rhel600
cpu.intel.SandyBridge:
no machine.rhel.rhel600, machine.rhel.rhel610, machine.rhel.rhel620
cpu.intel.Haswell:
no machine.rhel.rhel600, machine.rhel.rhel610, machine.rhel.rhel620, machine.rhel.rhel630
cpu.intel.kvm32:
no machine.rhel
cpu.amd.Opteron_G4:
no machine.rhel.rhel600, machine.rhel.rhel610, machine.rhel.rhel620
cpu.amd.Opteron_G5:
no machine.rhel.rhel600, machine.rhel.rhel610, machine.rhel.rhel620, machine.rhel.rhel630
# Some CPU models require "+apic" to be forced on some hosts:
variants apic_flag:
- default:
# we know those models won't run on Intel hosts on RHEL-6:
cpu.intel.486, cpu.intel.pentium, cpu.intel.pentium2, cpu.intel.pentium3:
no (rhel_major_version = 6)..(host_cpu_vendor = intel)
- force:
only cpu.intel.486, cpu.intel.pentium, cpu.intel.pentium2, cpu.intel.pentium3
cpu_model_flags += ",+apic"
# some models require "-abm,-sse4a" to run on Intel host CPUs:
variants abm_sse4a:
- default:
- disable:
only cpu.amd.qemu64, cpu.amd.cpu64_rhel6, cpu.amd.cpu64_rhel5
cpu_model_flags += ",-abm,-sse4a"
# Some CPU models can't be easily tested with all the flags
# due to lack of host-side support, so check using
# explicit "-flag" parameters
# We have to disable the SVM flag because we are not running QEMU with
# -enable-nesting:
cpu.amd.qemu64, cpu.amd.cpu64_rhel5, cpu.amd.cpu64_rhel6, cpu.amd.phenom, cpu.amd.Opteron_G1, cpu.amd.Opteron_G2, cpu.amd.Opteron_G3, cpu.amd.Opteron_G4, cpu.amd.Opteron_G5:
cpu_model_flags += ",-svm"
kvm:
# KVM doesn't support the "monitor" flag, so we have
# to explicitly disable it:
cpu.intel.core2duo, cpu.intel.coreduo, cpu.intel.n270, cpu.amd.Opteron_G3, cpu.amd.phenom:
cpu_model_flags += ",-monitor"
#FEATURE: Ignore vendor string on KVM because it depends on host
# CPU vendor:
kvm:
......@@ -170,18 +233,12 @@
#KNOWN BUG: ignore brand string on those CPU models because
# they change depending on QEMU version:
cpu.intel.qemu32:
ignore_cpuid_leaves += " 0x80000002 0x80000003 0x80000004"
cpu.amd.qemu64:
ignore_cpuid_leaves += " 0x80000002 0x80000003 0x80000004"
cpu.amd.athlon:
cpu.intel.qemu32, cpu.amd.qemu64, cpu.amd.athlon:
ignore_cpuid_leaves += " 0x80000002 0x80000003 0x80000004"
#FIXED BUG: QEMU v1.1 and older have unstable host-dependent
# data on the KVM leaf, so we will ignore it by now:
machine.upstream.pc_1_0:
ignore_cpuid_leaves += " 0x40000001,0x00,eax"
machine.upstream.pc_1_1:
machine.upstream.pc_1_0, machine.upstream.pc_1_1:
ignore_cpuid_leaves += " 0x40000001,0x00,eax"
#FIXED BUG: QEMU v1.0 had broken feature aliases on
......@@ -195,29 +252,6 @@
# (to be fixed on QEMU 1.6.0)
ignore_cpuid_leaves += " 0xA"
#KNOWN BUG: PCLMULQDQ compatibility on pc-1.4 and older is
# known to be broken since v1.5.0, as the bit was enabled
# without machine-type compatibility code
# (see commit 41cb383f42d0cb51d8e3e25e3ecebc954dd4196f)
cpu.intel.Westmere:
machine.upstream.pc_i440fx_1_4:
ignore_cpuid_leaves += " 1,0,ecx,1"
machine.upstream.pc_q35_1_4:
ignore_cpuid_leaves += " 1,0,ecx,1"
machine.upstream.pc_1_3:
ignore_cpuid_leaves += " 1,0,ecx,1"
machine.upstream.pc_1_2:
ignore_cpuid_leaves += " 1,0,ecx,1"
machine.upstream.pc_1_1:
ignore_cpuid_leaves += " 1,0,ecx,1"
machine.upstream.pc_1_0:
ignore_cpuid_leaves += " 1,0,ecx,1"
# CPU models without the "apic" feature can't boot
# using RHEL-6 Seabios
machine.rhel:
no cpu.intel.pentium, cpu.intel.pentium2, cpu.intel.pentium3
# known RHEL-6 ABI breakage bugs:
machine.rhel:
# Max physical address depends on host:
......@@ -226,13 +260,7 @@
ignore_cpuid_leaves += " 0x40000001,0,eax"
# SEP bit depended on host kernel on 6.3 and older:
machine.rhel.rhel630:
ignore_cpuid_leaves += " 1,0,edx,11"
machine.rhel.rhel620:
ignore_cpuid_leaves += " 1,0,edx,11"
machine.rhel.rhel610:
ignore_cpuid_leaves += " 1,0,edx,11"
machine.rhel.rhel600:
machine.rhel.rhel630, machine.rhel.rhel620, machine.rhel.rhel610, machine.rhel.rhel600:
ignore_cpuid_leaves += " 1,0,edx,11"
# unsupported-bits behavior changed between some RHEL-6
......@@ -240,16 +268,30 @@
machine.rhel:
ignore_cpuid_leaves += " 0xc0000000,0x0"
# RHEL-6.0 QEMU had broken feature aliases on
# 0x80000001.EDX, so ignore it:
#FIXED BUG: RHEL-6.0 QEMU had broken feature aliases on
# 0x80000001.EDX and they were fixed without adding any
# compat code, so ignore those bits:
machine.rhel.rhel600:
cpu.intel:
# alias bits were removed on Intel CPU models
ignore_cpuid_leaves += " 0x80000001,0x00,edx"
cpu.amd.Opteron_G1, cpu.amd.Opteron_G2, cpu.amd.Opteron_G3:
# AMD alias bits that were corrected on RHEL-6.1:
# mtrr, mca, pse36
ignore_cpuid_leaves += " 0x80000001,0x0,edx,12"
ignore_cpuid_leaves += " 0x80000001,0x0,edx,14"
ignore_cpuid_leaves += " 0x80000001,0x0,edx,17"
# bz#819562: broken passthrough mode of CPUID leaf 7
machine.rhel.rhel620:
ignore_cpuid_leaves += " 7,0"
#KNOWN BUG: Intel models had SVM information available,
# even though the SVM feature isn't available.
# Guest OSes should ignore those bits, anyway.
machine.rhel..cpu.intel:
ignore_cpuid_leaves += " 0x8000000a"
- default.vendor:
test_type = "default_vendor"
kvm:
......
......@@ -10,7 +10,7 @@
start_vm = no
image_size = 20G
# md5sum binary path
md5sum = "md5sum"
md5sum_bin = "md5sum"
force_create_image = no
backup_image_before_testing = yes
restore_image_before_testing = yes
......
- timerdevice:
only Fedora.19, RHEL.7
no Host_RHEL.5, Host_RHEL.6
restart_vm = yes
variants:
- tscwrite:
only Fedora.19, RHEL.7
type = timerdevice_tscwrite
msr_tools_install_cmd = "yum install -y msr-tools"
msr_tools_cmd = "a=$(rdmsr -d 0x00000010); echo -e ibase=16\\n $a | tail -n 1 | while read n; do wrmsr 0x00000010 $(($n+100000000000)); echo $n; done"
- clock_drift_with_sleep:
only RHEL
no up
type = timerdevice_clock_drift_with_sleep
rtc_base = utc
rtc_clock = host
rtc_drift = slew
- clock_drift_with_ntp:
only Fedora.19, RHEL.7
type = timerdevice_clock_drift_with_ntp
host_cpu_cnt_cmd = "cat /proc/cpuinfo | grep "physical id" | wc -l"
test_run_timeout = 7200
- change_guest_clksource:
only RHEL
type = timerdevice_change_guest_clksource
rtc_base = utc
rtc_clock = host
rtc_drift = slew
- boot_test:
type = timerdevice_boot
rtc_drift = slew
timerdevice_drift_threshold = 3
variants:
- clock_host:
rtc_clock = host
- clock_vm:
rtc_clock = vm
variants:
- base_utc:
no Windows
rtc_base = utc
- base_localtime:
only Windows
rtc_base = localtime
variants:
- clksource_unchanged:
only Windows
timerdevice_clksource = ""
- clksource_kvm-clock:
only Fedora.19, RHEL
timerdevice_clksource = "kvm-clock"
- clksource_tsc:
# Fedora guest can't bootup without '-kvmclock' option.
# And there is no way to change windows' clocksource.
only RHEL
timerdevice_clksource = "tsc"
- clksource_pit:
only RHEL.3, RHEL.4, RHEL.5
timerdevice_clksource = "pit"
timerdevice_file_operation = "yes"
variants:
- with_boot:
- with_reboot:
timerdevice_reboot_test = yes
variants:
- without_host_load:
- with_host_load:
timerdevice_host_load_cmd = "for (( I=0; I<`grep processor /proc/cpuinfo"
timerdevice_host_load_cmd += " | wc -l`; I++ )); do taskset -c $I /bin/bash -c"
timerdevice_host_load_cmd += " 'for ((;;)); do X=1; done &'; done"
timerdevice_host_load_stop_cmd = "pkill -f 'do X=1'"
variants:
- reboot_immediately:
- reboot_after_sleep:
timerdevice_sleep_time = 3600
- tscsync:
only Fedora.19, RHEL.7
variants:
- change_host_clksource:
type = timerdevice_tscsync_change_host_clksource
......
......@@ -23,6 +23,7 @@
no RHEL.3
no RHEL.4
no RHEL.5
no Host_RHEL.6
usb_type_usbtest = nec-usb-xhci
usb_controller = xhci
usb_max_port_usbtest = 4
......@@ -231,6 +232,8 @@
create_image = yes
image_boot_image1 = yes
usbs = usb1
# Override global config for the usb1.
usb_type_usb1 = usb-ehci
usb_type = usb-ehci
usb_max_port_usb1 = 6
usb_max_port = 6
......
......@@ -5,9 +5,9 @@
vms = ""
drivers_install = "balloon;block;nic"
mount_point = "/tmp/mnt"
driver_install_cmd_balloon = cmd /c WIN_UILS:\\whql\virtio_driver_install_whql.exe WIN_VIRTIO:\\Balloon\\xp\\x86 balloon
driver_install_cmd_block = cmd /c WIN_UILS:\\whql\virtio_driver_install_whql.exe WIN_VIRTIO:\\viostor\\xp\\x86 block
driver_install_cmd_nic = cmd /c WIN_UILS:\\whql\virtio_driver_install_whql.exe WIN_VIRTIO:\\\NetKVM\\xp\\x86 nic
driver_install_cmd_balloon = cmd /c WIN_UTILS:\\whql\virtio_driver_install_whql.exe WIN_VIRTIO:\\Balloon\\xp\\x86 balloon
driver_install_cmd_block = cmd /c WIN_UTILS:\\whql\virtio_driver_install_whql.exe WIN_VIRTIO:\\viostor\\xp\\x86 block
driver_install_cmd_nic = cmd /c WIN_UTILS:\\whql\virtio_driver_install_whql.exe WIN_VIRTIO:\\\NetKVM\\xp\\x86 nic
extra_params += " -balloon virtio"
images += " stg"
image_name_stg = "images/storage"
......
import logging
from autotest.client.shared import error
from tests import unattended_install
from virttest import virt_vm
from virttest import utils_test
@error.context_aware
......@@ -18,25 +17,23 @@ def run_check_block_size(test, params, env):
@param params: Dictionary with the test parameters
@param env: Dictionary with test environment.
"""
name = params["main_vm"]
if params.get("need_install") == "yes":
error.context("Install guest with a new image", logging.info)
unattended_install.run_unattended_install(test, params, env)
params["cdroms"] = ""
params["unattended_file"] = ""
params["cdrom_unattended"] = ""
params["kernel"] = ""
params["initrd"] = ""
params["kernel_params"] = ""
params["boot_once"] = "c"
vm = env.get_vm(params["main_vm"])
try:
vm.verify_alive()
except virt_vm.VMDeadError:
logging.info("VM is dead, creating...")
utils_test.run_virt_sub_test(test, params, env,
sub_type='unattended_install')
params["cdroms"] = ""
params["unattended_file"] = ""
params["cdrom_unattended"] = ""
params["kernel"] = ""
params["initrd"] = ""
params["kernel_params"] = ""
params["boot_once"] = "c"
vm = env.get_vm(name)
vm.destroy()
vm.create(params=params)
vm = env.get_vm(name)
timeout = float(params.get("login_timeout", 240))
session = vm.wait_for_login(timeout=timeout)
......
import logging, re, time
from autotest.client.shared import error
from virttest import utils_test, utils_misc
@error.context_aware
def run_cpu_add(test, params, env):
"""
Runs CPU hotplug test:
1) Boot the vm with -smp X,maxcpus=Y
2) After logged into the vm, check CPUs number
3) Stop the guest if config 'stop_before_hotplug'
4) Do cpu hotplug
5) Resume the guest if config 'stop_before_hotplug'
6) Recheck guest get hot-pluged CPUs
7) Do cpu online/offline in guest if config
8) Run sub test after CPU Hotplug
9) Recheck guest cpus after sub test
@param test: QEMU test object.
@param params: Dictionary with test parameters.
@param env: Dictionary with the test environment.
"""
def qemu_guest_cpu_match(vm, vcpu_been_pluged=0, wait_time=60):
"""
Check Whether the vcpus are matche
"""
total_cpus_expected = int(vm.cpuinfo.smp) + int(vcpu_been_pluged)
if utils_misc.wait_for(lambda: ((total_cpus_expected ==
vm.get_cpu_count()) and
(vm.get_cpu_count() ==
len(vm.vcpu_threads))),
wait_time, first=10, step=5.0):
logging.info("Cpu number in cmd_line, qemu and guest are match")
return True
err_msg = "Cpu mismatch! "
err_msg += "after hotplug %s vcpus, " % vcpu_been_pluged
err_msg += "there shoule be %s vcpus exist, " % total_cpus_expected
err_msg += "in qemu %s vcpus threads works, " % len(vm.vcpu_threads)
err_msg += "in guest %s cpus works." % vm.get_cpu_count()
raise error.TestFail(err_msg)
def cpu_online_offline(session, cpu_id, online=""):
"""
Do cpu online/offline in guest
"""
if online == "online":
online = 1
else:
online = 0
online_file = "/sys/devices/system/cpu/cpu%s/online" % cpu_id
if session.cmd_status("test -f %s" % online_file):
logging.info("online file %s not exist, just pass the cpu%s" %
(online_file, cpu_id))
return
session.cmd("echo %s > %s " % (online, online_file))
def onoff_para_opt(onoff_params):
"""
Online offline params anaylize
Return a cpu list need do online offline
"""
onoff_list = []
offline = onoff_params.split(",")
for item in offline:
if "-" in item:
onoff_list += range(int(item.split("-")[0]),
int(item.split("-")[1]))
else:
onoff_list.append(item)
return [str(i) for i in onoff_list]
timeout = int(params.get("login_timeout", 360))
onoff_iterations = int(params.get("onoff_iterations", 2))
vcpu_need_hotplug = int(params.get("vcpu_need_hotplug", 1))
error.context("Boot the vm, with '-smp X,maxcpus=Y' option", logging.info)
vm = env.get_vm(params["main_vm"])
vm.verify_alive()
session = vm.wait_for_login(timeout=timeout)
maxcpus = vm.cpuinfo.maxcpus
error.context("Check if cpus in guest matche qemu cmd before hotplug",
logging.info)
qemu_guest_cpu_match(vm)
#do pre_operation like stop, before vcpu Hotplug
stop_before_hotplug = params.get("stop_before_hotplug", "no")
if stop_before_hotplug == 'yes':
error.context("Stop the guest before hotplug vcpu", logging.info)
vm.pause()
error.context("Do cpu hotplug", logging.info)
if vm.monitor.protocol == 'human':
human_check_info = params.get("human_error_recheck", None)
qmp_check_info = None
hotplug_add_cmd = ""
elif vm.monitor.protocol == 'qmp':
qmp_check_info = params.get("qmp_error_recheck", None)
hotplug_add_cmd = params.get("vcpu_add_cmd", "")
if hotplug_add_cmd:
human_check_info = params.get("human_error_recheck", None)
else:
human_check_info = None
vcpu_been_pluged = 0
for i in range(vcpu_need_hotplug):
hotplug_vcpu_params = params.object_params("hotplug_vcpu%s" % i)
plug_cpu_id = len(vm.vcpu_threads)
plug_cpu_id = hotplug_vcpu_params.get("id", plug_cpu_id)
(status, output) = vm.hotplug_vcpu(plug_cpu_id, hotplug_add_cmd)
if status:
if not qmp_check_info and not human_check_info:
vcpu_been_pluged += 1
logging.info("Cpu%s hotplug successfully" % plug_cpu_id)
logging.info("Now '%s' cpus have been hotpluged" %
vcpu_been_pluged)
continue
else:
err_msg = "Qemu should report error, but hotplug successfully"
raise error.TestFail(err_msg)
else:
if not output:
warn_msg = "Qemu should report some warning information"
raise error.TestWarn(warn_msg)
if qmp_check_info and re.findall(qmp_check_info, output, re.I):
msg = "Hotplug vcpu(id:'%s') error, qemu report the error."
logging.info(msg % plug_cpu_id)
logging.debug("QMP error info: '%s'" % output)
continue
elif (human_check_info and
re.findall(human_check_info, output, re.I)):
msg = "Hotplug vcpu(id:'%s') error, qemu report the error"
logging.info(msg % plug_cpu_id)
logging.debug("Error info: '%s'" % output)
continue
else:
err_msg = "Hotplug error! "
err_msg += "the hotplug cpu_id is: '%s', " % plug_cpu_id
err_msg += "the maxcpus allowed is: '%s', " % maxcpus
err_msg += "qemu cpu list is:'%s'" % vm.monitor.info("cpus")
logging.debug("The error info is:\n '%s'" % output)
raise error.TestFail(err_msg)
if stop_before_hotplug:
error.context("Resume the guest after cpu hotplug", logging.info)
vm.resume()
if params.get("reboot_after_hotplug", False):
error.context("Reboot guest after hotplug vcpu", logging.info)
vm.reboot()
if vcpu_been_pluged != 0:
error.context("Check whether cpus are match after hotplug",
logging.info)
qemu_guest_cpu_match(vm, vcpu_been_pluged)
error.context("Do cpu online/offline in guest", logging.info)
# Window guest doesn't support online/offline test
if params['os_type'] == "windows":
logging.info("For windows guest not do online/offline test")
return
online_list = []
offline_list = []
offline = params.get("offline", "")
online = params.get("online", "")
repeat_time = int(params.get("repeat_time", 0))
if offline:
offline_list = onoff_para_opt(offline)
logging.debug("Cpu offline list is %s " % offline_list)
if online:
online_list = onoff_para_opt(online)
logging.debug("Cpu online list is %s " % offline_list)
for i in range(repeat_time):
for offline_cpu in offline_list:
cpu_online_offline(session, offline_cpu)
time.sleep(onoff_iterations)
for online_cpu in online_list:
cpu_online_offline(session, online_cpu, "online")
time.sleep(onoff_iterations)
# do sub test after cpu hotplug
if (params.get("run_sub_test", "no") == "yes" and
'sub_test_name' in params):
sub_test = params['sub_test_name']
error.context("Run subtest %s after cpu hotplug" % sub_test,
logging.info)
if (sub_test == "guest_suspend" and
params["guest_suspend_type"] == "disk"):
vm.params["smp"] = int(vm.cpuinfo.smp) + vcpu_been_pluged
vcpu_been_pluged = 0
utils_test.run_virt_sub_test(test, params, env, sub_type=sub_test)
if sub_test == "shutdown" :
logging.info("Guest shutdown normally after cpu hotplug")
return
if params.get("session_need_update", "no") == "yes":
session = vm.wait_for_login(timeout=timeout)
if params.get("vcpu_num_rechek", "yes") == "yes":
error.context("Recheck cpu numbers after operation", logging.info)
qemu_guest_cpu_match(vm, vcpu_been_pluged)
if session:
session.close()
......@@ -66,7 +66,7 @@ def run_cpuid(test, params, env):
qemu_models = utils_misc.get_qemu_cpu_models(qemu_binary)
missing = set(cpu_models) - set(qemu_models)
if missing:
raise error.TestFail("Some CPU models not in QEMU CPU model list: %s")
raise error.TestFail("Some CPU models not in QEMU CPU model list: %r" % (missing))
added = set(qemu_models) - set(cpu_models)
if added:
logging.info("Extra CPU models in QEMU CPU listing: %s", added)
......@@ -501,10 +501,15 @@ def run_cpuid(test, params, env):
if not machine_type:
raise error.TestNAError("No machine_type_to_check defined")
cpu_model_flags = params.get('cpu_model_flags', '')
full_cpu_model_name = cpu_model
if cpu_model_flags:
full_cpu_model_name += ','
full_cpu_model_name += cpu_model_flags.lstrip(',')
ref_file = os.path.join(test.virtdir, "deps",
"cpuid_dumps",
kvm_enabled and "kvm" or "nokvm",
machine_type, '%s-dump.txt' % (cpu_model))
machine_type, '%s-dump.txt' % (full_cpu_model_name))
if not os.path.exists(ref_file):
raise error.TestNAError("no cpuid dump file: %s" % (ref_file))
reference = open(ref_file, 'r').read()
......@@ -514,13 +519,14 @@ def run_cpuid(test, params, env):
if reference is None:
raise error.TestNAError("couldn't parse reference cpuid dump from file; %s" % (ref_file))
try:
out = get_guest_cpuid(self, cpu_model, 'enforce',
out = get_guest_cpuid(self, cpu_model, cpu_model_flags + ',enforce',
extra_params=dict(machine_type=machine_type, smp=1))
except virt_vm.VMCreateError,e:
if "host doesn't support requested feature:" in e.output \
or ("host cpuid" in e.output and \
"lacks requested flag" in e.output):
raise error.TestNAError("Can't run CPU model %s on this host" % (cpu_model))
("lacks requested flag" in e.output or
"flag restricted to guest" in e.output)):
raise error.TestNAError("Can't run CPU model %s on this host" % (full_cpu_model_name))
else:
raise
dbg('ref_file: %r', ref_file)
......
......@@ -17,7 +17,7 @@ class DriveMirrorStress(drive_mirror.DriveMirror):
return True
error.context("install stress app in guest", logging.info)
link = params.get("download_link")
md5sum = params.get("md5sum")
md5sum = params.get("pkg_md5sum")
tmp_dir = params.get("tmp_dir")
install_cmd = params.get("install_cmd")
config_cmd = params.get("config_cmd")
......
import time, logging
from autotest.client.shared import error
from virttest import utils_misc, utils_test
from virttest import utils_test
from tests import file_transfer
def run_live_snapshot(test, params, env):
......@@ -16,6 +16,7 @@ def run_live_snapshot(test, params, env):
@param env: Dictionary with test environment.
"""
@error.context_aware
def create_snapshot(vm):
"""
Create live snapshot:
......@@ -23,25 +24,15 @@ def run_live_snapshot(test, params, env):
2). Get device info
3). Create snapshot
"""
cmd = params.get("create_sn_cmd")
error.context("Creating live snapshot ...", logging.info)
block_info = vm.monitor.info("block")
if vm.monitor.protocol == 'qmp':
device = block_info[0]["device"]
else:
string = ""
device = string.join(block_info).split(":")[0]
cmd += " %s" % device
device = "".join(block_info).split(":")[0]
snapshot_name = params.get("snapshot_name")
cmd += " %s" % snapshot_name
format = params.get("snapshot_format")
if format:
cmd += " %s" % format
logging.info("Creating live snapshot ...")
vm.monitor.send_args_cmd(cmd)
format = params.get("snapshot_format", "qcow2")
vm.monitor.live_snapshot(device, snapshot_name, format)
logging.info("Check snapshot is created ...")
snapshot_info = str(vm.monitor.info("block"))
......
......@@ -10,13 +10,21 @@ def run_migration_multi_host(test, params, env):
Migration execution progress is described in documentation
for migrate method in class MultihostMigration.
steps:
1) try log to VM if login_before_pre_tests == yes
2) before migration start pre_sub_test
3) migration
4) after migration start post_sub_test
@param test: kvm test object.
@param params: Dictionary with test parameters.
@param env: Dictionary with the test environment.
"""
login_timeout = int(params.get("login_timeout", 360))
sub_test = params.get("sub_test")
pre_sub_test = params.get("pre_sub_test")
post_sub_test = params.get("post_sub_test")
pre_sub_test_timeout = int(params.get("pre_sub_test_timeout", "240"))
login_before_pre_tests = params.get("login_before_pre_tests", "no")
mig_protocol = params.get("mig_protocol", "tcp")
mig_type = utils_test.MultihostMigration
......@@ -30,11 +38,24 @@ def run_migration_multi_host(test, params, env):
dsthost = params["hosts"][1]
is_src = params["hostid"] == srchost
if is_src: #is destination
if pre_sub_test:
if login_before_pre_tests == "yes":
vm = env.get_vm(vms[0])
vm.wait_for_login(timeout=login_timeout)
error.context("Run sub test '%s' before migration on src"
% pre_sub_test, logging.info)
utils_test.run_virt_sub_test(test, params, env, pre_sub_test)
mig = mig_type(test, params, env, False)
mig._hosts_barrier([srchost, dsthost],
{'src': srchost, 'dst': dsthost, "vms": vms[0]},
"sync", pre_sub_test_timeout)
mig.migrate_wait([vms[0]], srchost, dsthost)
if not is_src: #is destination
if sub_test:
error.context("Run sub test '%s' after checking"
" clock resolution" % sub_test, logging.info)
utils_test.run_virt_sub_test(test, params, env, sub_test)
if post_sub_test:
error.context("Run sub test '%s' after migration on dst"
% post_sub_test, logging.info)
utils_test.run_virt_sub_test(test, params, env, post_sub_test)
......@@ -5,7 +5,7 @@ multi_disk test for Autotest framework.
"""
import logging, re, random, string
from autotest.client.shared import error, utils
from virttest import qemu_qtree, env_process, qemu_monitor
from virttest import qemu_qtree, env_process
_RE_RANGE1 = re.compile(r'range\([ ]*([-]?\d+|n).*\)')
_RE_RANGE2 = re.compile(r',[ ]*([-]?\d+|n)')
......@@ -107,6 +107,7 @@ def run_multi_disk(test, params, env):
stg_params += _add_param("image_format", params.get("stg_image_format"))
stg_params += _add_param("image_boot", params.get("stg_image_boot", "no"))
stg_params += _add_param("drive_format", params.get("stg_drive_format"))
stg_params += _add_param("drive_cache", params.get("stg_drive_cache"))
if params.get("stg_assign_index") != "no":
# Assume 0 and 1 are already occupied (hd0 and cdrom)
stg_params += _add_param("drive_index", 'range(2,n)')
......@@ -203,7 +204,7 @@ def run_multi_disk(test, params, env):
black_list = params["black_list"].split()
have_qtree = True
out = vm.monitor.human_monitor_cmd("qtree", debug=False)
out = vm.monitor.human_monitor_cmd("info qtree", debug=False)
if "unknown command" in str(out):
have_qtree = False
......@@ -213,7 +214,7 @@ def run_multi_disk(test, params, env):
qtree = qemu_qtree.QtreeContainer()
qtree.parse_info_qtree(vm.monitor.info('qtree'))
disks = qemu_qtree.QtreeDisksContainer(qtree.get_nodes())
(tmp1, tmp2) = disks.parse_info_block(vm.monitor.info('block'))
(tmp1, tmp2) = disks.parse_info_block(vm.monitor.info_block())
err += tmp1 + tmp2
err += disks.generate_params()
err += disks.check_disk_params(params)
......
import os, logging, re
import os, logging
from autotest.client.shared import error
from virttest import utils_test
from virttest import utils_test, utils_net
@error.context_aware
......@@ -20,29 +20,13 @@ def run_multi_nics_verify(test, params, env):
@param params: Dictionary with the test parameters
@param env: Dictionary with test environment.
"""
# A helper function for getting NICs counts from ifconfig output of guest
def get_nics_list(session):
s, o = session.get_command_status_output("ifconfig")
if s != 0:
raise error.TestError("Running command 'ifconfig' failed in guest"
" with output %s" % o)
logging.debug("The ifconfig ouput from guest is:\n%s" % o)
nics_list = re.findall(r'eth(\d+)\s+Link', o, re.M)
logging.info("NICs list: %s" % nics_list)
return nics_list
# A helper function for checking NICs number
def check_nics_num(expect_c, session):
txt = "Check whether guest NICs info match with params setting."
error.context(txt, logging.info)
nics_list = get_nics_list(session)
nics_list = utils_net.get_linux_ifname(session)
actual_c = len(nics_list)
msg = "Expect NICs nums are: %d\nPractical NICs nums are: %d\n" % \
(expect_c, actual_c)
msg = "Expected NICs count is: %d\n" % expect_c
msg += "Actual NICs count is: %d\n" % actual_c
if not expect_c == actual_c:
msg += "Nics count mismatch!\n"
......@@ -65,20 +49,21 @@ def run_multi_nics_verify(test, params, env):
logging.debug(check_nics_num(nics_num, session)[1])
txt = "Create configure file for every NIC interface in guest."
error.context(txt, logging.info)
ifcfg_prefix = "/etc/sysconfig/network-scripts/ifcfg-eth"
for num in range(nics_num):
eth_config_path = "".join([ifcfg_prefix, str(num)])
ifname_list = utils_net.get_linux_ifname(session)
ifcfg_path = "/etc/sysconfig/network-scripts/ifcfg-%s"
for ifname in ifname_list:
eth_config_path = ifcfg_path % ifname
eth_config = """DEVICE=eth%s
eth_config = """DEVICE=%s
BOOTPROTO=dhcp
ONBOOT=yes
""" % num
""" % ifname
cmd = "echo '%s' > %s" % (eth_config, eth_config_path)
s, o = session.get_command_status_output(cmd)
if s != 0:
raise error.TestError("Failed to create ether config file: %s\n"
"Reason is: %s" % (eth_config_path, o))
err_msg = "Failed to create ether config file: %s\nReason is: %s"
raise error.TestError(err_msg % (eth_config_path, o))
# Reboot and check the configurations.
new_session = vm.reboot(session)
......
......@@ -47,7 +47,7 @@ def run_nmi_bsod_catch(test, params, env):
if s:
raise error.TestFail("Fail command: %s. Output: %s" % (cmd, o))
if 'yes' in params.get("reboot_after_config"):
if params.get("reboot_after_config") == "yes":
error.context("Reboot guest", logging.info)
session = vm.reboot(timeout=timeout * 2)
......
import logging
from autotest.client.shared import error
from virttest import env_process, utils_misc, utils_test
try:
from autotest.client.shared import utils_memory
except ImportError:
from virttest.staging import utils_memory
@error.context_aware
def run_numa_basic(test, params, env):
"""
Qemu numa basic test:
1) Get host numa topological structure
2) Start a guest and bind it on the cpus of one node
3) Check the memory status of qemu process. It should mainly use the
memory in the same node.
4) Destory the guest
5) Repeat step 2 ~ 4 on every node in host
@param test: QEMU test object
@param params: Dictionary with the test parameters
@param env: Dictionary with test environment.
"""
error.context("Get host numa topological structure", logging.info)
timeout = float(params.get("login_timeout", 240))
host_numa_node = utils_misc.NumaInfo()
node_list = host_numa_node.online_nodes
for node_id in node_list:
error.base_context("Bind qemu process to numa node %s" % node_id,
logging.info)
vm = "vm_bind_to_%s" % node_id
params['qemu_command_prefix'] = "numactl --cpunodebind=%s" % node_id
utils_memory.drop_caches()
env_process.preprocess_vm(test, params, env, vm)
vm = env.get_vm(vm)
vm.verify_alive()
session = vm.wait_for_login(timeout=timeout)
session.close()
error.context("Check the memory use status of qemu process",
logging.info)
memory_status, _ = utils_test.get_qemu_numa_status(host_numa_node,
vm.get_pid())
node_used_most = 0
memory_sz_used_most = 0
for index in range(len(node_list)):
if memory_sz_used_most < memory_status[index]:
memory_sz_used_most = memory_status[index]
node_used_most = node_list[index]
logging.debug("Qemu used %s pages in node"
" %s" % (memory_status[index], node_list[index]))
if node_used_most != node_id:
raise error.TestFail("Qemu still use memory from other node."
" Expect: %s, used: %s" % (node_id,
node_used_most))
error.context("Destroy guest.", logging.info)
vm.destroy()
import logging
from autotest.client.shared import error
from virttest import env_process, utils_misc, utils_test
try:
from autotest.client.shared import utils_memory
except ImportError:
from virttest.staging import utils_memory
@error.context_aware
def run_numa_consistency(test, params, env):
"""
Qemu numa consistency test:
1) Get host numa topological structure
2) Start a guest with the same node as the host, each node has one cpu
3) Get the vcpu thread used cpu id in host and the cpu belongs which node
4) Allocate memory inside guest and bind the allocate process to one of
its vcpu.
5) The memory used in host should increase in the same node if the vcpu
thread is not switch to other node.
6) Repeat step 3~5 for each vcpu thread of the guest.
@param test: QEMU test object
@param params: Dictionary with the test parameters
@param env: Dictionary with test environment.
"""
def get_vcpu_used_node(numa_node_info, vcpu_thread):
cpu_used_host = utils_misc.get_thread_cpu(vcpu_thread)[0]
node_used_host = ([_ for _ in node_list if cpu_used_host
in numa_node_info.nodes[_].cpus][0])
return node_used_host
error.context("Get host numa topological structure", logging.info)
timeout = float(params.get("login_timeout", 240))
host_numa_node = utils_misc.NumaInfo()
node_list = host_numa_node.online_nodes
if len(node_list) < 2:
raise error.TestNAError("This host only has one NUMA node, "
"skipping test...")
node_list.sort()
params['smp'] = len(node_list)
params['vcpu_cores'] = 1
params['vcpu_threads'] = 1
params['vcpu_sockets'] = params['smp']
params['guest_numa_nodes'] = ""
for node_id in range(len(node_list)):
params['guest_numa_nodes'] += " node%d" % node_id
params['start_vm'] = 'yes'
utils_memory.drop_caches()
vm = params['main_vm']
env_process.preprocess_vm(test, params, env, vm)
vm = env.get_vm(vm)
vm.verify_alive()
vcpu_threads = vm.vcpu_threads
session = vm.wait_for_login(timeout=timeout)
dd_size = 256
if dd_size * len(vcpu_threads) > int(params['mem']):
dd_size = int(int(params['mem']) / 2 / len(vcpu_threads))
mount_size = dd_size * len(vcpu_threads)
mount_cmd = "mount -o size=%dM -t tmpfs none /tmp" % mount_size
qemu_pid = vm.get_pid()
drop = 0
for cpuid in range(len(vcpu_threads)):
error.context("Get vcpu %s used numa node." % cpuid, logging.info)
memory_status, _ = utils_test.get_qemu_numa_status(host_numa_node,
qemu_pid)
node_used_host = get_vcpu_used_node(host_numa_node,
vcpu_threads[cpuid])
memory_used_before = memory_status[node_used_host]
error.context("Allocate memory in guest", logging.info)
session.cmd(mount_cmd)
binded_dd_cmd = "taskset %s" % str(2 ** int(cpuid))
binded_dd_cmd += " dd if=/dev/urandom of=/tmp/%s" % cpuid
binded_dd_cmd += " bs=1M count=%s" % dd_size
session.cmd(binded_dd_cmd)
error.context("Check qemu process memory use status", logging.info)
node_after = get_vcpu_used_node(host_numa_node, vcpu_threads[cpuid])
if node_after != node_used_host:
logging.warn("Node used by vcpu thread changed. So drop the"
" results in this round.")
drop += 1
continue
memory_status, _ = utils_test.get_qemu_numa_status(host_numa_node,
qemu_pid)
memory_used_after = memory_status[node_used_host]
memory_allocated = (memory_used_after - memory_used_before) * 4 / 1024
if 1 - float(memory_allocated) / float(dd_size) > 0.05:
raise error.TestFail("Expect malloc %sM memory in node %s, but "
"only malloc %sM" % (dd_size, node_used_host,
memory_allocated))
session.close()
if drop == len(vcpu_threads):
raise error.TestError("All test rounds are dropped."
" Please test it again.")
import logging, os
from autotest.client.shared import error
from autotest.client import utils
from virttest import utils_misc, funcatexit, utils_test, data_dir
from tests import autotest_control
try:
from autotest.client.shared import utils_memory
except ImportError:
from virttest.staging import utils_memory
@error.context_aware
def run_numa_stress(test, params, env):
"""
Qemu numa stress test:
1) Boot up a guest and find the node it used
2) Try to allocate memory in that node
3) Run memory heavy stress inside guest
4) Check the memory use status of qemu process
5) Repeat step 2 ~ 4 several times
@param test: QEMU test object
@param params: Dictionary with the test parameters
@param env: Dictionary with test environment.
"""
host_numa_node = utils_misc.NumaInfo()
if len(host_numa_node.online_nodes) < 2:
raise error.TestNAError("Host only has one NUMA node, "
"skipping test...")
timeout = float(params.get("login_timeout", 240))
test_count = int(params.get("test_count", 4))
vm = env.get_vm(params["main_vm"])
vm.verify_alive()
session = vm.wait_for_login(timeout=timeout)
qemu_pid = vm.get_pid()
if test_count < len(host_numa_node.online_nodes):
test_count = len(host_numa_node.online_nodes)
tmpfs_size = 0
for node in host_numa_node.nodes:
node_mem = int(host_numa_node.read_from_node_meminfo(node, "MemTotal"))
if tmpfs_size < node_mem:
tmpfs_size = node_mem
tmpfs_path = params.get("tmpfs_path", "tmpfs_numa_test")
tmpfs_path = utils_misc.get_path(data_dir.get_tmp_dir(), tmpfs_path)
tmpfs_write_speed = int(params.get("tmpfs_write_speed", 10240))
dd_timeout = tmpfs_size / tmpfs_write_speed * 1.5
mount_fs_size = "size=%dK" % tmpfs_size
memory_file = utils_misc.get_path(tmpfs_path, "test")
dd_cmd = "dd if=/dev/urandom of=%s bs=1k count=%s" % (memory_file,
tmpfs_size)
if not os.path.isdir(tmpfs_path):
os.mkdir(tmpfs_path)
numa_node_malloc = -1
most_used_node, memory_used = utils_test.max_mem_map_node(host_numa_node,
qemu_pid)
for test_round in range(test_count):
if utils_memory.freememtotal() < tmpfs_size:
raise error.TestError("Don't have enough memory to execute this "
"test after %s round" % test_round)
error.context("Executing stress test round: %s" % test_round,
logging.info)
numa_node_malloc = most_used_node
numa_dd_cmd = "numactl -m %s %s" % (numa_node_malloc, dd_cmd)
error.context("Try to allocate memory in node %s" % numa_node_malloc,
logging.info)
try:
utils_misc.mount("none", tmpfs_path, "tmpfs", perm=mount_fs_size)
funcatexit.register(env, params.get("type"), utils_misc.umount,
"none", tmpfs_path, "tmpfs")
utils.system(numa_dd_cmd, timeout=dd_timeout)
except Exception, error_msg:
if "No space" in str(error_msg):
pass
else:
raise error.TestFail("Can not allocate memory in node %s."
" Error message:%s" % (numa_node_malloc,
str(error_msg)))
error.context("Run memory heavy stress in guest", logging.info)
autotest_control.run_autotest_control(test, params, env)
error.context("Get the qemu process memory use status", logging.info)
node_after, memory_after = utils_test.max_mem_map_node(host_numa_node,
qemu_pid)
if node_after == most_used_node and memory_after >= memory_used:
raise error.TestFail("Memory still stick in "
"node %s" % numa_node_malloc)
else:
most_used_node = node_after
memory_used = memory_after
utils_misc.umount("none", tmpfs_path, "tmpfs")
funcatexit.unregister(env, params.get("type"), utils_misc.umount,
"none", tmpfs_path, "tmpfs")
session.cmd("sync; echo 3 > /proc/sys/vm/drop_caches")
utils_memory.drop_caches()
session.close()
import re, string, logging, random
from autotest.client.shared import error
from virttest import qemu_monitor, storage, utils_misc, env_process, data_dir
from virttest import qemu_qtree
def run_physical_resources_check(test, params, env):
......@@ -185,6 +186,8 @@ def run_physical_resources_check(test, params, env):
timeout = int(params.get("login_timeout", 360))
chk_timeout = int(params.get("chk_timeout", 240))
session = vm.wait_for_login(timeout=timeout)
qtree = qemu_qtree.QtreeContainer()
qtree.parse_info_qtree(vm.monitor.info('qtree'))
logging.info("Starting physical resources check test")
logging.info("Values assigned to VM are the values we expect "
......@@ -249,10 +252,18 @@ def run_physical_resources_check(test, params, env):
f_fail = chk_fmt_model("nics", "nic_model", "network", "model=(.*),")
n_fail.extend(f_fail)
logging.info("Drive format check")
f_fail = chk_fmt_model("images", "drive_format",
"block", "(.*)\: .*%s" % image_name)
n_fail.extend(f_fail)
logging.info("Images params check")
logging.debug("Found devices: %s", params.objects('images'))
qdisks = qemu_qtree.QtreeDisksContainer(qtree.get_nodes())
_ = sum(qdisks.parse_info_block(
vm.monitor.info_block()))
_ += qdisks.generate_params()
_ += qdisks.check_disk_params(params)
if _:
_ = ("Images check failed with %s errors, check the log for "
"details" % _)
logging.error(_)
n_fail.append(_)
logging.info("Network card MAC check")
o = ""
......
......@@ -77,7 +77,7 @@ class QemuImgTest(qemu_storage.QemuImg):
return False
login_timeout = int(self.params.get("login_timeout", 360))
session = self.vm.wait_for_login(timeout=login_timeout)
md5bin = self.params["md5sum"]
md5bin = self.params["md5sum_bin"]
cmd = "%s %s" % (md5bin, cmd)
s, o = session.cmd_status_output(cmd)
if s != 0:
......
......@@ -424,12 +424,12 @@ class QemuGuestAgentBasicCheckWin(QemuGuestAgentBasicCheck):
def setup_gagent_in_host(self, params, vm):
error.context("Install qemu guest agent package on host", logging.info)
gagent_host_install_cmd = params["gagent_host_install_cmd"]
utils.run(gagent_host_install_cmd)
utils.run(gagent_host_install_cmd, params.get("login_timeout", 360))
error.context("Install dependence packages on host", logging.info)
gagent_host_dep_install_cmd = params.get("gagent_host_dep_install_cmd",
"")
utils.run(gagent_host_dep_install_cmd)
utils.run(gagent_host_dep_install_cmd, params.get("login_timeout", 360))
error.context("Copy necessary DLLs to guest", logging.info)
gagent_guest_dir = params["gagent_guest_dir"]
......
......@@ -186,7 +186,7 @@ def barrier_2(vm, words, params, debug_dir, data_scrdump_filename,
def run_steps(test, params, env):
vm = env.get_vm(params["main_vm"])
vm = env.get_vm(params.get("vms", "main_vm").split(" ")[0])
vm.verify_alive()
steps_filename = params.get("steps")
......
......@@ -86,4 +86,4 @@ def run_stop_continue(test, params, env):
if clean_op:
error.context("Do clean operation: '%s'" % clean_op, logging.info)
op_timeout = float(params.get("clean_op_timeout", 60))
session.cmd(clean_op, timeout=op_timeout)
session.cmd(clean_op, timeout=op_timeout, ignore_all_errors=True)
import logging, re, time
from autotest.client.shared import error
from autotest.client import utils
from virttest import data_dir, storage, utils_disk, utils_test, env_process
from virttest import funcatexit
@error.context_aware
def run_timerdevice_boot(test, params, env):
"""
Timer device boot guest:
1) Sync the host system time with ntp server
2) Add some load on host (Optional)
3) Boot the guest with specific clock source
4) Check the clock source currently used on guest
5) Do some file operation on guest (Optional)
6) Check the system time on guest and host (Optional)
7) Check the hardware time on guest and host (Optional)
8) Sleep period of time before reboot (Optional)
9) Reboot guest (Optional)
10) Check the system time on guest and host (Optional)
11) Check the hardware time on guest and host (Optional)
@param test: QEMU test object.
@param params: Dictionary with test parameters.
@param env: Dictionary with the test environment.
"""
def verify_guest_clock_source(session, expected):
error.context("Check the current clocksource in guest", logging.info)
cmd = "cat /sys/devices/system/clocksource/"
cmd += "clocksource0/current_clocksource"
if not expected in session.cmd(cmd):
raise error.TestFail("Guest didn't use '%s' clocksource" % expected)
error.context("Sync the host system time with ntp server", logging.info)
utils.system("ntpdate clock.redhat.com")
timerdevice_host_load_cmd = params.get("timerdevice_host_load_cmd")
if timerdevice_host_load_cmd:
error.context("Add some load on host", logging.info)
utils.system(timerdevice_host_load_cmd)
host_load_stop_cmd = params["timerdevice_host_load_stop_cmd"]
funcatexit.register(env, params["type"], utils.system,
host_load_stop_cmd)
error.context("Boot a guest with kvm-clock", logging.info)
vm = env.get_vm(params["main_vm"])
vm.verify_alive()
timeout = int(params.get("login_timeout", 360))
session = vm.wait_for_login(timeout=timeout)
timerdevice_clksource = params.get("timerdevice_clksource")
if timerdevice_clksource:
try:
verify_guest_clock_source(session, timerdevice_clksource)
except Exception:
clksrc = timerdevice_clksource
error.context("Shutdown guest")
vm.destroy()
env.unregister_vm(vm.name)
error.context("Update guest kernel cli to '%s'" % clksrc,
logging.info)
image_filename = storage.get_image_filename(params,
data_dir.get_data_dir())
grub_file = params.get("grub_file", "/boot/grub2/grub.cfg")
kernel_cfg_pattern = params.get("kernel_cfg_pos_reg",
r".*vmlinuz-\d+.*")
disk_obj = utils_disk.GuestFSModiDisk(image_filename)
kernel_cfg_original = disk_obj.read_file(grub_file)
try:
logging.warn("Update the first kernel entry to"
" '%s' only" % clksrc)
kernel_cfg = re.findall(kernel_cfg_pattern,
kernel_cfg_original)[0]
except IndexError, detail:
raise error.TestError("Couldn't find the kernel config, regex"
" pattern is '%s', detail: '%s'" %
(kernel_cfg_pattern, detail))
if "clocksource=" in kernel_cfg:
kernel_cfg_new = re.sub("clocksource=.*?\s",
"clocksource=%s" % clksrc, kernel_cfg)
else:
kernel_cfg_new = "%s %s" % (kernel_cfg,
"clocksource=%s" % clksrc)
disk_obj.replace_image_file_content(grub_file, kernel_cfg,
kernel_cfg_new)
error.context("Boot the guest", logging.info)
vm_name = params["main_vm"]
cpu_model_flags = params.get("cpu_model_flags")
params["cpu_model_flags"] = cpu_model_flags + ",-kvmclock"
env_process.preprocess_vm(test, params, env, vm_name)
vm = env.get_vm(vm_name)
vm.verify_alive()
session = vm.wait_for_login(timeout=timeout)
error.context("Check the current clocksource in guest",
logging.info)
verify_guest_clock_source(session, clksrc)
error.context("Kill all ntp related processes")
session.cmd("pkill ntp; true")
if params.get("timerdevice_file_operation") == "yes":
error.context("Do some file operation on guest", logging.info)
session.cmd("dd if=/dev/zero of=/tmp/timer-test-file bs=1M count=100")
return
# Command to run to get the current time
time_command = params["time_command"]
# Filter which should match a string to be passed to time.strptime()
time_filter_re = params["time_filter_re"]
# Time format for time.strptime()
time_format = params["time_format"]
timerdevice_drift_threshold = params.get("timerdevice_drift_threshold", 3)
error.context("Check the system time on guest and host", logging.info)
(host_time, guest_time) = utils_test.get_time(session, time_command,
time_filter_re, time_format)
drift = abs(float(host_time) - float(guest_time))
if drift > timerdevice_drift_threshold:
raise error.TestFail("The guest's system time is different with"
" host's. Host time: '%s', guest time:"
" '%s'" % (host_time, guest_time))
get_hw_time_cmd = params.get("get_hw_time_cmd")
if get_hw_time_cmd:
error.context("Check the hardware time on guest and host", logging.info)
host_time = utils.system_output(get_hw_time_cmd)
guest_time = session.cmd(get_hw_time_cmd)
drift = abs(float(host_time) - float(guest_time))
if drift > timerdevice_drift_threshold:
raise error.TestFail("The guest's hardware time is different with"
" host's. Host time: '%s', guest time:"
" '%s'" % (host_time, guest_time))
if params.get("timerdevice_reboot_test") == "yes":
sleep_time = params.get("timerdevice_sleep_time")
if sleep_time:
error.context("Sleep '%s' secs before reboot" % sleep_time,
logging.info)
sleep_time = int(sleep_time)
time.sleep(sleep_time)
session = vm.reboot()
error.context("Check the system time on guest and host", logging.info)
(host_time, guest_time) = utils_test.get_time(session, time_command,
time_filter_re, time_format)
drift = abs(float(host_time) - float(guest_time))
if drift > timerdevice_drift_threshold:
raise error.TestFail("The guest's system time is different with"
" host's. Host time: '%s', guest time:"
" '%s'" % (host_time, guest_time))
get_hw_time_cmd = params.get("get_hw_time_cmd")
if get_hw_time_cmd:
error.context("Check the hardware time on guest and host", logging.info)
host_time = utils.system_output(get_hw_time_cmd)
guest_time = session.cmd(get_hw_time_cmd)
drift = abs(float(host_time) - float(guest_time))
if drift > timerdevice_drift_threshold:
raise error.TestFail("The guest's hardware time is different with"
" host's. Host time: '%s', guest time:"
" '%s'" % (host_time, guest_time))
import logging, re
from autotest.client.shared import error
from virttest import data_dir, storage, utils_disk, env_process
@error.context_aware
def run_timerdevice_change_guest_clksource(test, params, env):
"""
Timer device check guest after update kernel line without kvmclock:
1) Boot a guest with kvm-clock
2) Check the current clocksource in guest
3) Check the available clocksource in guest
4) Update "clocksource=" parameter in guest kernel cli
5) Boot guest system
6) Check the current clocksource in guest
@param test: QEMU test object.
@param params: Dictionary with test parameters.
@param env: Dictionary with the test environment.
"""
def verify_guest_clock_source(session, expected):
error.context("Check the current clocksource in guest", logging.info)
cmd = "cat /sys/devices/system/clocksource/"
cmd += "clocksource0/current_clocksource"
if not expected in session.cmd(cmd):
raise error.TestFail("Guest didn't use '%s' clocksource" % expected)
error.context("Boot a guest with kvm-clock", logging.info)
vm = env.get_vm(params["main_vm"])
vm.verify_alive()
timeout = int(params.get("login_timeout", 360))
session = vm.wait_for_login(timeout=timeout)
error.context("Check the current clocksource in guest", logging.info)
cmd = "cat /sys/devices/system/clocksource/"
cmd += "clocksource0/current_clocksource"
if not "kvm-clock" in session.cmd(cmd):
grub_file = params.get("grub_file", "/boot/grub2/grub.cfg")
if "clocksource=" not in session.cmd("cat %s" % grub_file):
raise error.TestFail("Guest didn't use 'kvm-clock' clocksource")
error.context("Shutdown guest")
vm.destroy()
env.unregister_vm(vm.name)
error.context("Update guest kernel cli to kvm-clock",
logging.info)
image_filename = storage.get_image_filename(params,
data_dir.get_data_dir())
kernel_cfg_pattern = params.get("kernel_cfg_pos_reg",
r".*vmlinuz-\d+.*")
disk_obj = utils_disk.GuestFSModiDisk(image_filename)
kernel_cfg_original = disk_obj.read_file(grub_file)
try:
logging.warn("Update the first kernel entry to"
" kvm-clock only")
kernel_cfg = re.findall(kernel_cfg_pattern,
kernel_cfg_original)[0]
except IndexError, detail:
raise error.TestError("Couldn't find the kernel config, regex"
" pattern is '%s', detail: '%s'" %
(kernel_cfg_pattern, detail))
if "clocksource=" in kernel_cfg:
kernel_cfg_new = re.sub("clocksource=[a-z\- ]+", " ", kernel_cfg)
disk_obj.replace_image_file_content(grub_file, kernel_cfg,
kernel_cfg_new)
error.context("Boot the guest", logging.info)
vm_name = params["main_vm"]
cpu_model_flags = params.get("cpu_model_flags")
params["cpu_model_flags"] = cpu_model_flags + ",-kvmclock"
env_process.preprocess_vm(test, params, env, vm_name)
vm = env.get_vm(vm_name)
vm.verify_alive()
session = vm.wait_for_login(timeout=timeout)
error.context("Check the available clocksource in guest", logging.info)
cmd = "cat /sys/devices/system/clocksource/"
cmd += "clocksource0/available_clocksource"
try:
available_clksrc_list = session.cmd(cmd).splitlines()[-1].split()
available_clksrc_list = [_.strip() for _ in available_clksrc_list]
except Exception, detail:
raise error.TestFail("Couldn't get guest available clock source."
" Detail: '%s'" % detail)
try:
for clksrc in available_clksrc_list:
error.context("Shutdown guest")
vm.destroy()
env.unregister_vm(vm.name)
error.context("Update guest kernel cli to '%s'" % clksrc,
logging.info)
image_filename = storage.get_image_filename(params,
data_dir.get_data_dir())
grub_file = params.get("grub_file", "/boot/grub2/grub.cfg")
kernel_cfg_pattern = params.get("kernel_cfg_pos_reg",
r".*vmlinuz-\d+.*")
disk_obj = utils_disk.GuestFSModiDisk(image_filename)
kernel_cfg_original = disk_obj.read_file(grub_file)
try:
logging.warn("Update the first kernel entry to"
" '%s' only" % clksrc)
kernel_cfg = re.findall(kernel_cfg_pattern,
kernel_cfg_original)[0]
except IndexError, detail:
raise error.TestError("Couldn't find the kernel config, regex"
" pattern is '%s', detail: '%s'" %
(kernel_cfg_pattern, detail))
if "clocksource=" in kernel_cfg:
kernel_cfg_new = re.sub("clocksource=[a-z \-_]+",
"clocksource=%s " % clksrc, kernel_cfg)
else:
kernel_cfg_new = "%s %s" % (kernel_cfg,
"clocksource=%s" % clksrc)
disk_obj.replace_image_file_content(grub_file, kernel_cfg,
kernel_cfg_new)
error.context("Boot the guest", logging.info)
if clksrc != "kvm-clock":
cpu_model_flags = params.get("cpu_model_flags")
if "-kvmclock" not in cpu_model_flags:
params["cpu_model_flags"] = cpu_model_flags + ",-kvmclock"
vm_name = params["main_vm"]
env_process.preprocess_vm(test, params, env, vm_name)
vm = env.get_vm(vm_name)
vm.verify_alive()
session = vm.wait_for_login(timeout=timeout)
error.context("Check the current clocksource in guest",
logging.info)
verify_guest_clock_source(session, clksrc)
finally:
try:
error.context("Shutdown guest")
vm.destroy()
error.context("Restore guest kernel cli", logging.info)
image_filename = storage.get_image_filename(params,
data_dir.get_data_dir())
grub_file = params.get("grub_file", "/boot/grub2/grub.cfg")
kernel_cfg_pattern = params.get("kernel_cfg_pos_reg",
r".*vmlinuz-\d+.*")
disk_obj = utils_disk.GuestFSModiDisk(image_filename)
kernel_cfg_original = disk_obj.read_file(grub_file)
try:
kernel_cfg = re.findall(kernel_cfg_pattern,
kernel_cfg_original)[0]
except IndexError, detail:
raise error.TestError("Couldn't find the kernel config, regex"
" pattern is '%s', detail: '%s'" %
(kernel_cfg_pattern, detail))
if "clocksource=" in kernel_cfg:
kernel_cfg_new = re.sub("clocksource=[a-z \-_]+", " ", kernel_cfg)
disk_obj.replace_image_file_content(grub_file, kernel_cfg,
kernel_cfg_new)
except Exception, detail:
logging.error("Failed to restore guest kernel cli."
" Detail: '%s'" % detail)
import logging, re
from autotest.client.shared import error
from autotest.client import utils
from virttest import data_dir, storage, utils_disk, env_process
@error.context_aware
def run_timerdevice_clock_drift_with_sleep(test, params, env):
"""
Timer device measure clock drift after sleep in guest with kvmclock:
1) Sync the host system time with ntp server
2) Boot a guest with multiple vcpus, using kvm-clock
3) Check the clock source currently used on guest
4) Stop auto sync service in guest (Optional)
5) Sync time from guest to ntpserver
6) Pin (only 1/none/all) vcpus to host cpu.
7) Sleep a while and check the time drift on guest
@param test: QEMU test object.
@param params: Dictionary with test parameters.
@param env: Dictionary with the test environment.
"""
def verify_elapsed_time():
usleep_cmd = r'echo "for n in \$(seq 1000);'
usleep_cmd += ' do usleep 10000; done"'' > /tmp/usleep.sh'
session.cmd(usleep_cmd)
get_time_cmd = 'for (( i=0; i<$(grep "processor" /proc/cpuinfo'
get_time_cmd += ' | wc -l); i+=1 )); do /usr/bin/time -f"%e"'
get_time_cmd += ' taskset -c $i sh /tmp/usleep.sh; done'
output = session.cmd_output(get_time_cmd, timeout=timeout)
times_list = output.splitlines()[1:]
times_list = [_ for _ in times_list if _ > 10.0 or _ < 11.0]
if times_list:
raise error.TestFail("Unexpected time drift found:"
" Detail: '%s'" % output)
error.context("Sync the host system time with ntp server", logging.info)
utils.system("yum install -y ntpdate; ntpdate clock.redhat.com")
error.context("Boot the guest", logging.info)
vm = env.get_vm(params["main_vm"])
vm.verify_alive()
timeout = int(params.get("login_timeout", 360))
session = vm.wait_for_login(timeout=timeout)
error.context("Check the clock source currently used on guest",
logging.info)
cmd = "cat /sys/devices/system/clocksource/"
cmd += "clocksource0/current_clocksource"
if not "kvm-clock" in session.cmd(cmd):
grub_file = params.get("grub_file", "/boot/grub2/grub.cfg")
if "clocksource=" not in session.cmd("cat %s" % grub_file):
raise error.TestFail("Guest didn't use 'kvm-clock' clocksource")
error.context("Shutdown guest")
vm.destroy()
env.unregister_vm(vm.name)
error.context("Update guest kernel cli to kvm-clock",
logging.info)
image_filename = storage.get_image_filename(params,
data_dir.get_data_dir())
kernel_cfg_pattern = params.get("kernel_cfg_pos_reg",
r".*vmlinuz-\d+.*")
disk_obj = utils_disk.GuestFSModiDisk(image_filename)
kernel_cfg_original = disk_obj.read_file(grub_file)
try:
logging.warn("Update the first kernel entry to kvm-clock only")
kernel_cfg = re.findall(kernel_cfg_pattern,
kernel_cfg_original)[0]
except IndexError, detail:
raise error.TestError("Couldn't find the kernel config, regex"
" pattern is '%s', detail: '%s'" %
(kernel_cfg_pattern, detail))
if "clocksource=" in kernel_cfg:
kernel_cfg_new = re.sub(r"clocksource=[a-z\- ]+", " ", kernel_cfg)
disk_obj.replace_image_file_content(grub_file, kernel_cfg,
kernel_cfg_new)
error.context("Boot the guest", logging.info)
vm_name = params["main_vm"]
cpu_model_flags = params.get("cpu_model_flags")
params["cpu_model_flags"] = cpu_model_flags + ",-kvmclock"
env_process.preprocess_vm(test, params, env, vm_name)
vm = env.get_vm(vm_name)
vm.verify_alive()
session = vm.wait_for_login(timeout=timeout)
error.context("Stop auto sync service in guest", logging.info)
cmd = "(service chronyd status | grep 'Loaded: loaded')"
cmd += " && service chronyd stop"
session.cmd_status_output(cmd)
error.context("Sync time from guest to ntpserver", logging.info)
session.cmd("yum install -y ntpdate; ntpdate clock.redhat.com",
timeout=timeout)
error.context("Sleep a while and check the time drift on guest"
" (without any pinned vcpu)", logging.info)
verify_elapsed_time()
error.context("Pin every vcpu to physical cpu", logging.info)
host_cpu_cnt_cmd = params["host_cpu_cnt_cmd"]
host_cpu_num = utils.system_output(host_cpu_cnt_cmd).strip()
host_cpu_list = (_ for _ in range(int(host_cpu_num)))
cpu_pin_list = zip(vm.vcpu_threads, host_cpu_list)
if len(cpu_pin_list) < len(vm.vcpu_threads):
raise error.TestNAError("There isn't enough physical cpu to"
" pin all the vcpus")
check_one_cpu_pinned = False
for vcpu, pcpu in cpu_pin_list:
utils.system("taskset -p -c %s %s" % (pcpu, vcpu))
if not check_one_cpu_pinned:
error.context("Sleep a while and check the time drift on"
"guest (with one pinned vcpu)", logging.info)
verify_elapsed_time()
check_one_cpu_pinned = True
error.context("Sleep a while and check the time drift on"
"guest (with all pinned vcpus)", logging.info)
verify_elapsed_time()
......@@ -303,11 +303,6 @@ def run_virtio_console(test, params, env):
@param cfg: virtio_console_params - which type of virtio port to test
@param cfg: virtio_port_spread - how many devices per virt pci (0=all)
"""
# When the GW is already running and the thread only connects,
# every signal destroys the daemon. Fresh start solves the problem.
error.context("Reloading the GuestWorker before sigio test.",
logging.info)
test_delete_guest_script()
(vm, guest_worker, port) = get_vm_with_single_port(
params.get('virtio_console_params'))
if port.is_open():
......
......@@ -151,3 +151,6 @@
- wb_kupdate:
test_timeout =1800
test_control_file = wb_kupdate.control
- libhugetlbfs:
no RHEL.3 RHEL.4
test_control_file = libhugetlbfs.control
......@@ -2,7 +2,6 @@
virt_test_type = qemu libvirt
no JeOS
only Linux
only virtio_net
type = netperf
kill_vm = yes
image_snapshot = yes
......@@ -11,6 +10,7 @@
netdst_nic1 = private
nic_model_nic1 = virtio
netdst_nic2 = switch
#Configure different types of network adapters.
nic_model_nic2 = e1000
netperf_files = netperf-2.6.0.tar.bz2
setup_cmd = "cd /tmp && rm -rf netperf-2.6.0 && tar xvfj netperf-2.6.0.tar.bz2 && cd netperf-2.6.0 && ./configure --enable-burst --enable-demo=yes && make"
......
......@@ -2,7 +2,6 @@
virt_test_type = qemu libvirt
only Linux
type = nicdriver_unload
filesize = 100
transfer_timeout = 100
transfer_type = remote
sessions_num = 10
filesize = 512
transfer_timeout = 1000
sessions_num = 5
......@@ -13,3 +13,6 @@
image_snapshot = yes
kill_vm_vm2 = yes
kill_vm_gracefully_vm2 = no
cmd_type = ip
RHEL.5:
cmd_type = vconfig
......@@ -69,7 +69,5 @@ def run_nic_promisc(test, params, env):
raise
else:
transfer_thread.join()
if session_serial:
session_serial.close()
if session:
session.close()
import logging, os, time
import logging, os, time, random
from autotest.client import utils
from autotest.client.shared import error
from virttest import utils_test, utils_net
from virttest import utils_misc, utils_net, aexpect, data_dir
@error.context_aware
def run_nicdriver_unload(test, params, env):
......@@ -19,51 +18,138 @@ def run_nicdriver_unload(test, params, env):
@param params: Dictionary with the test parameters.
@param env: Dictionary with test environment.
"""
def send_cmd_safe(session, cmd, timeout=60):
logging.debug("Sending command: %s", cmd)
session.sendline(cmd)
output = ""
start_time = time.time()
# Wait for shell prompt until timeout.
while (time.time() - start_time) < timeout:
session.sendline()
try:
output += session.read_up_to_prompt(0.5)
break
except aexpect.ExpectTimeoutError:
pass
return output
def all_threads_done(threads):
for thread in threads:
if thread.isAlive():
return False
else:
continue
return True
def all_threads_alive(threads):
for thread in threads:
if not thread.isAlive():
return False
else:
continue
return True
timeout = int(params.get("login_timeout", 360))
transfer_timeout = int(params.get("transfer_timeout", 1000))
filesize = int(params.get("filesize", 512))
vm = env.get_vm(params["main_vm"])
vm.verify_alive()
session_serial = vm.wait_for_serial_login(timeout=timeout)
session = vm.wait_for_login(timeout=timeout)
error.base_context("Test env prepare")
error.context("Get NIC interface name in guest.", logging.info)
ethname = utils_net.get_linux_ifname(session_serial,
vm.get_mac_address(0))
ethname = utils_net.get_linux_ifname(session, vm.get_mac_address(0))
# get ethernet driver from '/sys' directory.
# ethtool can do the same thing and doesn't care about os type.
# if we make sure all guests have ethtool, we can make a change here.
sys_path = params.get("sys_path") % (ethname)
# readlink in RHEL4.8 doesn't have '-e' param, should use '-f' in RHEL4.8.
readlink_cmd = params.get("readlink_command", "readlink -e")
driver = os.path.basename(session_serial.cmd("%s %s" % (readlink_cmd,
sys_path)).strip())
driver = os.path.basename(session.cmd("%s %s" % (readlink_cmd,
sys_path)).strip())
logging.info("The guest interface %s using driver %s" % (ethname, driver))
logging.info("driver is %s", driver)
error.context("Host test file prepare, create %dMB file on host" %
filesize, logging.info)
tmp_dir = data_dir.get_tmp_dir()
host_path = os.path.join(tmp_dir, "host_file_%s" %
utils_misc.generate_random_string(8))
guest_path = os.path.join("/home", "guest_file_%s" %
utils_misc.generate_random_string(8))
cmd = "dd if=/dev/zero of=%s bs=1M count=%d" % (host_path, filesize)
utils.run(cmd)
file_checksum = utils.hash_file(host_path, "md5")
error.context("Guest test file prepare, Copy file %s from host to guest"
% host_path, logging.info)
vm.copy_files_to(host_path, guest_path, timeout=transfer_timeout)
if session.cmd_status("md5sum %s | grep %s" %
(guest_path, file_checksum)):
raise error.TestNAError("File MD5SUMs changed after copy to guest")
logging.info("Test env prepare successfully")
error.base_context("Nic driver load/unload testing", logging.info)
session_serial = vm.wait_for_serial_login(timeout=timeout)
try:
error.context("Transfer file between host and guest", logging.info)
threads = []
for i in range(int(params.get("sessions_num", "10"))):
txt = "File transfer on test interface. Thread %s" % i
error.context(txt, logging.info)
thread = utils.InterruptedThread(utils_test.run_file_transfer,
(test, params, env))
thread.start()
threads.append(thread)
time.sleep(10)
logging.info("Repeatedly unload/load NIC driver during file transfer.")
while threads[0].isAlive():
session_serial.cmd("sleep 10")
error.context("Shutdown the driver for NIC interface.", logging.info)
session_serial.cmd("ifconfig %s down" % ethname)
file_paths = []
host_file_paths = []
for sess_index in range(int(params.get("sessions_num", "10"))):
sess_path = os.path.join("/home","dst-%s" % sess_index)
host_sess_path = os.path.join(tmp_dir,"dst-%s" % sess_index)
thread1 = utils.InterruptedThread(vm.copy_files_to,
(host_path, sess_path),
{"timeout":transfer_timeout})
thread2 = utils.InterruptedThread(vm.copy_files_from,
(guest_path, host_sess_path),
{"timeout":transfer_timeout})
thread1.start()
threads.append(thread1)
thread2.start()
threads.append(thread2)
file_paths.append(sess_path)
host_file_paths.append(host_sess_path)
utils_misc.wait_for(lambda: all_threads_alive(threads), 60, 10, 1)
time.sleep(5)
error.context("Repeatedly unload/load NIC driver during file transfer",
logging.info)
while not all_threads_done(threads):
error.context("Shutdown the driver for NIC interface.",
logging.info)
send_cmd_safe(session_serial, "ifconfig %s down" % ethname)
error.context("Unload NIC driver.", logging.info)
session_serial.cmd("modprobe -r %s" % driver)
send_cmd_safe(session_serial, "modprobe -r %s" % driver)
error.context("Load NIC driver.", logging.info)
session_serial.cmd("modprobe %s" % driver)
send_cmd_safe(session_serial, "modprobe %s" % driver)
error.context("Activate NIC driver.", logging.info)
session_serial.cmd("ifconfig %s up" % ethname)
send_cmd_safe(session_serial, "ifconfig %s up" % ethname)
send_cmd_safe(session_serial, "sleep %s" %
random.randint(10, 60))
#files md5sums check
error.context("File transfer finished, checking files md5sums",
logging.info)
err_info = []
for copied_file in file_paths:
if session_serial.cmd_status("md5sum %s | grep %s" %
(copied_file, file_checksum)):
err_msg = "Guest file %s md5sum changed"
err_info.append(err_msg % copied_file)
for copied_file in host_file_paths:
if utils.system("md5sum %s | grep %s" %
(copied_file, file_checksum)):
err_msg = "Host file %s md5sum changed"
err_info.append(err_msg % copied_file)
if err_info:
raise error.TestError("files MD5SUMs changed after copying %s" %
err_info)
except Exception:
for thread in threads:
thread.join(suppress_exception=True)
......@@ -71,3 +157,11 @@ def run_nicdriver_unload(test, params, env):
else:
for thread in threads:
thread.join()
for copied_file in file_paths:
session_serial.cmd("rm -rf %s" % copied_file)
for copied_file in host_file_paths:
utils.system("rm -rf %s" % copied_file)
session_serial.cmd("%s %s" % ("rm -rf", guest_path))
os.remove(host_path)
session.close()
session_serial.close()
......@@ -6,12 +6,11 @@ from virttest import utils_misc, utils_test, aexpect, utils_net
@error.context_aware
def run_vlan(test, params, env):
"""
Test 802.1Q vlan of NIC, config it by vconfig command.
Test 802.1Q vlan of NIC, config it by vconfig/ip command.
1) Create two VMs.
2) load 8021q module in guest for vconfig.
3) Setup vlans by vconfig in guest and using hard-coded
ip address.
2) load 8021q module in guest.
3) Setup vlans by vconfig/ip in guest and using hard-coded ip address.
4) Enable arp_ignore for all ipv4 device in guest.
5) Repeat steps 2 - 4 in every guest.
6) Test by ping between same and different vlans of two VMs.
......@@ -24,35 +23,64 @@ def run_vlan(test, params, env):
@param params: Dictionary with the test parameters.
@param env: Dictionary with test environment.
"""
def add_vlan(session, v_id, iface="eth0"):
def add_vlan(session, v_id, iface="eth0", cmd_type="ip"):
"""
Creates a vlan-device on iface by cmd that assigned by cmd_type
now only support 'ip' and 'vconfig'
"""
txt = "Create a vlan-device on interface %s with vlan id %s" % (iface,
v_id)
error.context(txt, logging.info)
session.cmd("vconfig add %s %s" % (iface, v_id))
if cmd_type == "vconfig":
cmd = "vconfig add %s %s" % (iface, v_id)
elif cmd_type == "ip":
v_name = "%s.%s" % (iface, v_id)
cmd = "ip link add link %s %s type vlan id %s " % (iface,
v_name, v_id)
else:
err_msg = "Unexpected vlan operation command: %s" % cmd_type
err_msg += "only support 'ip' and 'vconfig' now"
raise error.TestError(err_msg)
session.cmd(cmd)
def set_ip_vlan(session, v_id, ip, iface="eth0"):
"""
Set ip address of vlan interface
"""
iface = "%s.%s" % (iface, v_id)
txt = "Set ip to '%s' for interface '%s'" % (iface, ip)
error.context(txt, logging.info)
session.cmd("ifconfig %s %s" % (iface, ip))
def set_arp_ignore(session, iface="eth0"):
error.context("Enable arp_ignore for all ipv4 device in guest",
def set_arp_ignore(session):
"""
Enable arp_ignore for all ipv4 device in guest
"""
error.context("Enable arp_ignore for all ipv4 device in guest",
logging.info)
ignore_cmd = "echo 1 > /proc/sys/net/ipv4/conf/all/arp_ignore"
session.cmd(ignore_cmd)
def rem_vlan(session, v_id, iface="eth0"):
rem_vlan_cmd = "if [[ -e /proc/net/vlan/%s ]];then vconfig rem %s;fi"
iface = "%s.%s" % (iface, v_id)
error.context("Remove the named vlan-device '%s'." % iface,
logging.info)
return session.cmd_status(rem_vlan_cmd % (iface, iface))
def rem_vlan(session, v_id, iface="eth0", cmd_type="ip"):
"""
Removes the named vlan interface(iface+v_id)
"""
v_iface = '%s.%s' % (iface, v_id)
if cmd_type == "vconfig":
rem_vlan_cmd = "vconfig rem %s" % v_iface
elif cmd_type == "ip":
rem_vlan_cmd = "ip link delete %s" % v_iface
else:
err_msg = "Unexpected vlan operation command: %s" % cmd_type
err_msg += "only support 'ip' and 'vconfig' now"
raise error.TestError(err_msg)
send_cmd = "[ -e /proc/net/vlan/%s ] && %s" % (v_iface, rem_vlan_cmd)
error.context("Remove the vlan-device '%s'." % v_iface, logging.info)
return session.cmd_status(send_cmd)
def nc_transfer(src, dst):
......@@ -81,7 +109,7 @@ def run_vlan(test, params, env):
logging.info("digest_origin is %s", digest_origin[src])
logging.info("digest_receive is %s", digest_receive)
raise error.TestFail("File transfered differ from origin")
session[dst].cmd_output("rm -f receive")
session[dst].cmd("rm -f receive")
def flood_ping(src, dst):
......@@ -104,10 +132,12 @@ def run_vlan(test, params, env):
digest_origin = []
vlan_ip = ['', '']
ip_unit = ['1', '2']
subnet = params.get("subnet")
vlan_num = int(params.get("vlan_num"))
subnet = params.get("subnet", "192.168")
vlan_num = int(params.get("vlan_num", 5))
maximal = int(params.get("maximal"))
file_size = params.get("file_size")
file_size = params.get("file_size", 4094)
cmd_type = params.get("cmd_type", "ip")
login_timeout = int(params.get("login_timeout", 360))
vm.append(env.get_vm(params["main_vm"]))
vm.append(env.get_vm("vm2"))
......@@ -115,11 +145,10 @@ def run_vlan(test, params, env):
vm_.verify_alive()
for i in range(2):
session.append(vm[i].wait_for_login(
timeout=int(params.get("login_timeout", 360))))
session.append(vm[i].wait_for_login(timeout=login_timeout))
if not session[i] :
raise error.TestError("Could not log into guest(vm%d)" % i)
logging.info("Logged in")
raise error.TestError("Could not log into guest %s" % vm[i].name)
logging.info("Logged in %s successfull" % vm[i].name)
ifname.append(utils_net.get_linux_ifname(session[i],
vm[i].get_mac_address()))
......@@ -134,22 +163,23 @@ def run_vlan(test, params, env):
digest_origin.append(re.findall(r'(\w+)', output)[0])
#stop firewall in vm
session[i].cmd_output("/etc/init.d/iptables stop")
session[i].cmd("service iptables stop; true")
error.context("load 8021q module in guest for vconfig", logging.info)
error.context("load 8021q module in guest %s" % vm[i].name,
logging.info)
session[i].cmd("modprobe 8021q")
try:
for i in range(2):
logging.info("Setup vlan environment in guest %s" % vm[i].name)
for vlan_i in range(1, vlan_num+1):
add_vlan(session[i], vlan_i, ifname[i])
set_ip_vlan(session[i], vlan_i, "%s.%s.%s" %
(subnet, vlan_i, ip_unit[i]), ifname[i])
set_arp_ignore(session[i], ifname[i])
add_vlan(session[i], vlan_i, ifname[i], cmd_type)
v_ip = "%s.%s.%s" % (subnet, vlan_i, ip_unit[i])
set_ip_vlan(session[i], vlan_i, v_ip, ifname[i])
set_arp_ignore(session[i])
for vlan in range(1, vlan_num+1):
logging.info("Test for vlan %s", vlan)
error.context("Test for vlan %s" % vlan, logging.info)
error.context("Ping test between vlans", logging.info)
interface = ifname[0] + '.' + str(vlan)
......@@ -177,8 +207,8 @@ def run_vlan(test, params, env):
finally:
for vlan in range(1, vlan_num+1):
logging.info("rem vlan: %s", vlan)
rem_vlan(session[0], vlan, ifname[0])
rem_vlan(session[1], vlan, ifname[1])
rem_vlan(session[0], vlan, ifname[0], cmd_type)
rem_vlan(session[1], vlan, ifname[1], cmd_type)
# Plumb/unplumb maximal number of vlan interfaces
i = 1
......@@ -186,10 +216,10 @@ def run_vlan(test, params, env):
try:
error.context("Testing the plumb of vlan interface", logging.info)
for i in range (1, maximal+1):
add_vlan(session[0], i, ifname[0])
add_vlan(session[0], i, ifname[0], cmd_type)
finally:
for j in range (1, i+1):
s = s or rem_vlan(session[0], j, ifname[0])
s = s or rem_vlan(session[0], j, ifname[0], cmd_type)
if s == 0:
logging.info("maximal interface plumb test done")
else:
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册