diff --git a/deps/kvm_unit_tests/kvm-unit-tests.tar.gz b/deps/kvm_unit_tests/kvm-unit-tests.tar.gz new file mode 100644 index 0000000000000000000000000000000000000000..7785c35a6aeb84913c3399e58a722089f7ca8d1f Binary files /dev/null and b/deps/kvm_unit_tests/kvm-unit-tests.tar.gz differ diff --git a/qemu/tests/cfg/hv_kvm_unit_test.cfg b/qemu/tests/cfg/hv_kvm_unit_test.cfg new file mode 100644 index 0000000000000000000000000000000000000000..fffe74628aadacd6ab3fc8f57dca1eec77656647 --- /dev/null +++ b/qemu/tests/cfg/hv_kvm_unit_test.cfg @@ -0,0 +1,14 @@ +- hv_kvm_unit_test: + type = hv_kvm_unit_test + only Windows + only q35 + start_vm = no + cpu_model_flags += ",hv_crash" + compile_cmd = "cd %s && cp %s/kvm-unit-tests.tar.gz . && tar -xvzf kvm-unit-tests.tar.gz" + compile_cmd += " && cd kvm-unit-tests && ./configure && make" + test_cmd = "cd %s/kvm-unit-tests" + test_cmd += " && ./x86/run x86/%s -cpu %s -device hyperv-testdev -M q35" + + unit_tests_mapping = '{"hyperv_synic.flat": ["PASS", 1], "hyperv_stimer.flat": ["PASS", 6], "hyperv_connections.flat": ["PASS", 7]}' + Host_RHEL.m7, Host_RHEL.m8.u0, Host_RHEL.m8.u1, Host_RHEL.m8.u2: + skip_tests = "hyperv_connections.flat" diff --git a/qemu/tests/hv_kvm_unit_test.py b/qemu/tests/hv_kvm_unit_test.py new file mode 100644 index 0000000000000000000000000000000000000000..418861743358a0169a70fe0577575b096e556c34 --- /dev/null +++ b/qemu/tests/hv_kvm_unit_test.py @@ -0,0 +1,45 @@ +import logging +import re +import json + +from virttest import error_context +from virttest import data_dir +from virttest import cpu +from avocado.utils import process + + +@error_context.context_aware +def run(test, params, env): + """ + Run kvm-unit-tests for Hyper-V testdev device + + 1) compile kvm-unit-tests tools source code + 2) Run each unit tests by compiled binary tools + 3) For each unit test, compare the test result to expected value + """ + tmp_dir = data_dir.get_tmp_dir() + kvm_unit_tests_dir = data_dir.get_deps_dir("kvm_unit_tests") + compile_cmd = params["compile_cmd"] % (tmp_dir, kvm_unit_tests_dir) + test_cmd = params["test_cmd"] + unit_tests_mapping = params["unit_tests_mapping"] + skip_tests = params.get("skip_tests", "").split() + cpu_flags = params["cpu_model_flags"] + cpu_model = cpu.get_qemu_best_cpu_model(params) + cpu_param = cpu_model + cpu_flags + + error_context.context("Copy & compile kvm-unit-test tools", logging.info) + process.system(compile_cmd, shell=True) + + error_context.context("Run unit tests", logging.info) + for unit_test, unit_test_result in json.loads(unit_tests_mapping).items(): + if unit_test in skip_tests: + continue + logging.info("Start running unit test %s" % unit_test) + unit_test_cmd = test_cmd % (tmp_dir, unit_test, cpu_param) + result_output = process.system_output(unit_test_cmd, shell=True) + result_output = result_output.decode() + find_result = re.findall('^%s' % unit_test_result[0], result_output, re.M) + if len(find_result) != int(unit_test_result[1]): + test.fail("Unit test result mismatch target, " + "target=%s, output=%s" % + (unit_test_result[1], result_output))