提交 8d707406 编写于 作者: X Xu Tian

Merge pull request #430 from xutian/mem_hotplug

tp-qemu: add memory hotplug/unplug tests
# Notes:
# Before start testing, please ensure systemd is your guest OS
# is support memory hotplug;
- hotplug_memory:
type = hotplug_mem
slots_mem = 4
size_mem = 1G
maxmem_mem = 32G
mem_devs = mem1
login_timeout = 600
no Host_RHEL.6
no RHEL.5 RHEL.6
no Windows..i386
no WinXP Win2000 Win2003 WinVista
variants numa_nodes:
- one:
guest_numa_nodes = "node0"
del numa_mem
del numa_cpus
del numa_nodeid
only after
only reboot
- two:
guest_numa_nodes = "node0 node1"
del numa_mem
del numa_cpus
numa_nodeid = 0
node_dimm_mem2 = 0
node_dimm_mem1 = 1
numa_nodeid_node0 = 0
numa_nodeid_node1 = 1
variants:
- policy_default:
policy_mem = default
- policy_bind:
policy_mem = bind
host-nodes = 0
- policy_interleave:
policy_mem = interleave
host-nodes = 0
only reboot
only after
- policy_preferred:
policy_mem = preferred
host-nodes = 0
only reboot
only after
- no_policy:
del policy_mem
only reboot
only after
variants:
- backend_ram:
backend_mem = memory-backend-ram
- backend_file:
# Notes:
# Before start testing, please ensure your host
# kernel has support hugpage and have enough memory
# to create guest memory
backend_mem = memory-backend-file
setup_hugepages = yes
# mem path should be the hugpage path
mem-path = /mnt/kvm_hugepage
# default pagesize is 2M, 2G guest memory need to allocate
# in hugepage, so page nubmer is:
# target_hugepages = size_mem / page_size + 10
target_hugepages = 1034
pre_command = "echo 3 > /proc/sys/vm/drop_caches"
pre_command_noncritical = yes
variants operation:
- unplug:
no Windows
variants:
- buildin_memory:
mem_devs += " buildin"
target_mem = "buildin"
- pluged_memory:
plug_mem = "plug"
target_mem = "plug"
- unused_memory:
target_mem = "unused"
mem_devs += " ${target_mem}"
use_mem_unused = no
- hotplug:
target_mem = "plug"
variants sub_test:
- system_reset:
sub_type = boot
reboot_method = system_reset
sleep_before_reset = 0
- reboot:
sub_type = boot
reboot_method = shell
kill_vm_on_error = yes
reboot_count = 1
- migration:
sub_type = migration
migration_test_command = help
migration_bg_command = "cd /tmp; nohup tcpdump -q -i any -t ip host localhost"
migration_bg_check_command = pgrep tcpdump
migration_bg_kill_command = pkill -9 tcpdump
kill_vm_on_error = yes
iterations = 2
used_mem = 1024
mig_timeout = 3600
ping_pong = 1
migration_protocol = "tcp"
only after
- pause_vm:
sub_type = stop_continue
pause_time = 30
wait_resume_timeout = "${pause_time}"
- stress:
variants stage:
- before:
- after:
- during:
import logging
from virttest.utils_test import BackgroundTest
from virttest.utils_test import run_virt_sub_test
from virttest.utils_test.qemu import MemoryHotplugTest
# Make it work under both autotest-framework and avocado-framework
try:
from avocado.utils.wait import wait_for
except ImportError:
from autotest.client.shared.utils import wait_for
try:
from virttest import error_context as step_engine
except ImportError:
from autotest.client.shared.error import step_engine
class MemoryHotplugSimple(MemoryHotplugTest):
def run_sub_test(self):
if self.params.get("sub_type"):
step = ("Run sub test '%s' %s %s memory device" %
(self.params["sub_test"],
self.params["stage"],
self.params["operation"]))
step_engine.context(step, logging.info)
args = (self.test, self.params, self.env, self.params["sub_type"])
run_virt_sub_test(*args)
def run_background_test(self):
args = (self.test, self.params, self.env, self.params["sub_type"])
bg_test = BackgroundTest(run_virt_sub_test, args)
bg_test.start()
wait_for(bg_test.is_alive, timeout=60)
return bg_test
def restore_memory(self, origin_vm, post_vm):
mem_devs_post = post_vm.params.objects("mem_devs")
mem_devs_origin = origin_vm.params.objects("mem_devs")
if len(mem_devs_post) != len(mem_devs_origin):
if len(mem_devs_origin) > len(mem_devs_post):
mem_devs = set(mem_devs_origin) - set(mem_devs_post)
vm, operation = post_vm, "hotplug"
else:
mem_devs = set(mem_devs_post) - set(mem_devs_origin)
vm, operation = origin_vm, "unplug"
func = getattr(self, "%s_memory" % operation)
for mem_dev in mem_devs:
func(vm, mem_dev)
self.check_vm_memory(vm)
def get_mem_by_name(self, vm, name):
"""
Return memory object and pc-dimm devices by given name
"""
devices = []
for dtype in ["mem", "dimm"]:
dev_qid = "%s-%s" % (dtype, name)
devs = vm.devices.get_by_qid(dev_qid)
if devs:
devices.append(devs[0])
return devices
def start_test(self):
try:
target_mem = self.params["target_mem"]
operation = self.params["operation"]
vm = self.env.get_vm(self.params["main_vm"])
if self.params.get("stage") == "before":
self.run_sub_test()
# If unplug memory not exits and test in instrcit then hotplug it
if operation == "unplug":
devs = self.get_mem_by_name(vm, target_mem)
if not devs and self.params.get("strict") != "yes":
self.hotplug_memory(vm, target_mem)
func_name = "%s_memory" % operation
func = getattr(self, func_name)
if self.params.get("stage") == "during":
sub_test = self.run_background_test()
func(vm, target_mem)
if self.params.get("stage") == "during":
test_timeout = float(self.params.get("sub_test_timeout", 3600))
sub_test.join(timeout=test_timeout)
if self.params.get("stage") == "after":
self.run_sub_test()
post_vm = self.env.get_vm(self.params["main_vm"])
self.restore_memory(vm, post_vm)
finally:
self.close_sessions()
@step_engine.context_aware
def run(test, params, env):
"""
Qemu memory hotplug test:
1) Boot guest with -m option
2) Run sub test before hotplug/unplug memory device
3) Hotplug/unplug memory device
4) Check hotpluged memory detect in guest OS
5) Check no calltrace in guest/host dmesg
6) Hotplug/unplug memory device
7) Run sub test after plug/unplug memory device
:param test: QEMU test object
:param params: Dictionary with the test parameters
:param env: Dictionary with test environment.
"""
hotplug_test = MemoryHotplugSimple(test, params, env)
hotplug_test.start_test()
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册