提交 d0cba585 编写于 作者: Y Yiqiao Pu 提交者: Lucas Meneghel Rodrigues

qemu.tests: Update the summary case for performance

Update the summary case for performance to make it fit for both
iozone and ffsb. And add the write to database part.

There is a key param in the new function "marks". It is following
the format like this separated with blank:
    marks = $key_shows_in_result_table:$pattern_to_find_the_value

changes from v1:
  - Use the out_loop_line to instead order_line inside the loop
    otherwise the iozone results will always be the first
    category's data
changes from v2:
  - Update the summary can get results by configure type and repeat
    times
  - Remove the desc string to fit the newest regresstion.py
  - Add support for qcow2perf results summary
changes from v3:
  - Update the category key get method
  - Remove the debug lines left
changes from v4:
  - Fix the index problem when generate the dict for guests files
Signed-off-by: NYiqiao Pu <ypu@redhat.com>

As there are some prepare cases we don't want to include in our
results summary file, make a ignore_cases list in our cfg files.
Signed-off-by: NYiqiao Pu <ypu@redhat.com>
Acked-by: NFeng Yang <fyang@redhat.com>

Now the data get from guest test resutls is not all covered and
will be truncated from float to int. So will add all numbers get
from results and update the pattern in standard_value to get the
float type numbers.
Signed-off-by: NYiqiao Pu <ypu@redhat.com>

As the regression.py requires the summary results have the same order
for different round results of the same test. So just make a list
with keys in dict with order.
Signed-off-by: NYiqiao Pu <ypu@redhat.com>

In the old code the results as read and write will be write to the
results_matrix which will cause the empty table in results file.
Just remove them from the matrix and only put them into the
no_table_reulsts dict.
Signed-off-by: NYiqiao Pu <ypu@redhat.com>

Update the version info to fit the regression.py requirement.
Signed-off-by: NYiqiao Pu <ypu@redhat.com>

Replace the cache in data line to head line as required when summary
qcow2perf results.And remove the comments line in scripts.
Signed-off-by: NYiqiao Pu <ypu@redhat.com>
上级 037decc8
......@@ -7,7 +7,7 @@
only Linux
md5value = "cabfc1021c2ec6c6b168fefc84210891"
images += " stg2"
image_name_stg2 = storage2
image_name_stg2 = images/storage2
image_size_stg2 = 110G
force_create_image = yes
force_create_image_image1 = no
......@@ -17,11 +17,10 @@
test_src = "http://cdnetworks-kr-1.dl.sourceforge.net/project/ffsb/ffsb/ffsb-6.0-rc2/ffsb-6.0-rc2.tar.bz2"
compile_cmd = "./configure && make"
prepare_cmd = " mount /dev/[sv]db /mnt"
result_path = "/tmp/guest_result"
ignore_pattern = "Linux|^(\n)"
head_pattern = "\d+:\d+:\d+\s+[AP]M\s+(\w+)\s+"
row_pattern = "CPU"
categories = "LargeFile Creates(256KB)|LargeFile Create(8KB)|Mail Server(8KB)|Radom Reads(8KB)|Random Writes(8KB)|Sequential Reads(256KB)|Sequential Reads(8KB)"
threads = "1 8 16"
variants:
- file_prepare:
prepare_cmd = "echo y|mkfs -t ext4 /dev/[sv]db; mount /dev/[sv]db /mnt; rm -rf /mnt/ffsb1;mkdir -p /mnt/ffsb1"
......@@ -112,5 +111,46 @@
test_patch = "ffsb_sequential_reads_8k_16.patch"
test_cmd = "ffsb examples/sequential_reads_8k_16.ffsb"
- summary_results:
summary_results = "yes"
test = "ffsb"
marks = "IOPS:(\d+\.\d+)\s+Transactions\s+per\s+Second "
marks += "Thro-MBps:[Read|Write]\s+Throughput.\s+([\d\.\w]+)"
sum_marks = "Thro-MBps Hostcpu"
mpstat = yes
ignore_cases = "file_prepare"
- iozone:
test_timeout = 1200
monitor_cmd = "mpstat -P ALL 1"
test_cmd = "./run_iozone.sh"
test_src = "http://www.iozone.org/src/current/iozone3_373.tar"
compile_cmd = "cd src/current && make linux"
prepare_cmd = "i=`/bin/ls /dev/[vs]db` "
prepare_cmd += "&& dd if=$i bs=64k >/dev/null && echo 1 "
prepare_cmd += "&& echo y | mkfs -t ext4 $i > /dev/null && echo 2 ;"
prepare_cmd += " partprobe && echo 3 ; umount /mnt ; mount $i /mnt "
prepare_cmd += "&& echo 3 > /proc/sys/vm/drop_caches && sleep 3"
result_path = "/tmp/guest_result"
ignore_pattern = "Linux|^(\n)"
head_pattern = "\d+:\d+:\d+\s+[AP]M\s+(\w+)\s+"
row_pattern = "CPU"
md5value = "6ce0277d3d1769f38040b84853a3472c"
images += " stg2"
image_name_stg2 = images/storage2
image_size_stg2 = 10G
force_create_image = yes
force_create_image_image1 = no
x86_64:
compile_cmd = "cd src/current && make linux-AMD64"
variants:
- incache_64k:
test_patch = "iozone_incache.patch"
- outcache_64k:
test_patch = "iozone_outcache.patch"
- dio_64k:
test_patch = "iozone_dio.patch"
- summary_results_iozone:
summary_results = yes
test = iozone
marks = "Write_Thro-KBps:\"Writer\s+report\"\n\s+\"\d+\"\n\"\d+\"\s+(\d+) "
marks += "ReWrite_Thro-KBps:\"Re-writer\s+report\"\n\s+\"\d+\"\n\"\d+\"\s+(\d+) "
marks += "Read_Thro-KBps:\"Reader\s+report\"\n\s+\"\d+\"\n\"\d+\"\s+(\d+) "
marks += "Reread_Thro-KBps:\"Re-Reader\s+report\"\n\s+\"\d+\"\n\"\d+\"\s+(\d+)"
mpstat = yes
import os, re, commands, glob, shutil
from autotest.client.shared import error
from autotest.client import utils
from virttest import utils_test
from virttest import utils_test, utils_misc, data_dir
def run_performance(test, params, env):
......@@ -12,7 +12,7 @@ def run_performance(test, params, env):
but we can implement some special requests for performance
testing.
@param test: kvm test object
@param test: QEMU test object
@param params: Dictionary with the test parameters
@param env: Dictionary with test environment.
"""
......@@ -35,9 +35,8 @@ def run_performance(test, params, env):
guest_ver = session.cmd_output("uname -r").strip()
if summary_results:
if params.get("test") == "ffsb":
ffsb_sum(os.path.dirname(test.outputdir), prefix, params, guest_ver,
test.resultsdir)
result_dir = params.get("result_dir", os.path.dirname(test.outputdir))
result_sum(result_dir, params, guest_ver, test.resultsdir, test)
session.close()
return
......@@ -63,7 +62,8 @@ def run_performance(test, params, env):
session.cmd("mv /tmp/src_tmp/%s /tmp/src" % new_file)
if test_patch:
test_patch_path = os.path.join(test.bindir, '../ffsb/examples', test_patch)
test_patch_path = os.path.join(data_dir.get_root_dir(), 'shared',
'deps', 'performance', test_patch)
vm.copy_files_to(test_patch_path, "/tmp/src")
session.cmd("cd /tmp/src && patch -p1 < /tmp/src/%s" % test_patch)
......@@ -124,81 +124,310 @@ def run_performance(test, params, env):
session.cmd("rm -rf pid_file*")
session.close()
def ffsb_sum(topdir, prefix, params, guest_ver, resultsdir):
marks = ["Transactions per Second", "Read Throughput", "Write Throughput"]
matrix = []
sum_thro = 0
sum_hostcpu = 0
cmd = 'find %s|grep "%s.*guest_results/guest_result"|grep -v prepare|sort' \
% (topdir, prefix)
for guest_result_file in commands.getoutput(cmd).split():
sub_dir = os.path.dirname(guest_result_file)
content = open(guest_result_file, "r").readlines()
linestr = []
readthro = 0
writethro = 0
for line in content:
if marks[0] in line:
iops = "%8s" % re.split("\s+", line)[0]
elif marks[1] in line:
substr = re.findall("\d+(?:\.\d+)*", line)[0]
readthro = utils_test.aton("%.2f" % float(substr))
elif marks[2] in line:
substr = re.findall("\d+(?:\.\d+)*", line)[0]
writethro = utils_test.aton("%.2f" % float(substr))
break
throughput = readthro + writethro
linestr.append(iops)
linestr.append(throughput)
sum_thro += throughput
filename = glob.glob(os.path.join(sub_dir, "guest_monitor_result*.sum"))[0]
sr = open(filename, "r").readlines()
linestr.append("%8.2f" % (100 - utils_test.aton(sr[1].split()[3])))
linestr.append("%8.2f" % (100 - utils_test.aton(sr[2].split()[3])))
filename = glob.glob(os.path.join(sub_dir, "host_monitor_result*.sum"))[0]
sr = open(filename, "r").readlines()
hostcpu = 100 - utils_test.aton(sr[-1].split()[3])
linestr.append(hostcpu)
sum_hostcpu += hostcpu
linestr.append("%.2f" % (throughput/hostcpu))
matrix.append(linestr)
headstr = "threads| IOPS| Thro(MBps)| Vcpu1| Vcpu2| Hostcpu|" \
" MBps/Hostcpu%"
categories = params["categories"].split('|')
threads = params["threads"].split()
kvm_ver = commands.getoutput(params.get('ver_cmd', "rpm -q qemu-kvm"))
fd = open("%s/ffsb-result.RHS" % resultsdir, "w")
fd.write("#ver# %s\n#ver# host kernel: %s\n#ver# guest kernel:%s\n" % (
kvm_ver, os.uname()[2], guest_ver))
desc = """#desc# The Flexible Filesystem Benchmark(FFSB) is a cross-platform
#desc# filesystem performance measurement tool. It uses customizable profiles
#desc# to measure of different workloads, and it supports multiple groups of
#desc# threads across multiple filesystems.
#desc# How to read the results:
#desc# - The Throughput is measured in MBps/sec.
#desc# - IOPS (Input/Output Operations Per Second, pronounced eye-ops)
#desc# - Usage of Vcpu, Hostcpu are all captured
#desc#
"""
fd.write(desc)
fd.write("Category:SUM\n None| MBps| Hostcpu|MBps/Hostcpu%\n")
fd.write(" 0|%8.2f|%13.2f|%8.2f\n" % (sum_thro, sum_hostcpu,
(sum_thro/sum_hostcpu)))
fd.write("Category:ALL\n")
idx = 0
for i in range(len(matrix)):
if i % 3 == 0:
fd.write("%s\n%s\n" % (categories[idx], headstr))
idx += 1
fd.write("%7s|%8s|%13s|%8s|%8s|%10s|%14s\n" % (threads[i%3],
matrix[i][0], matrix[i][1], matrix[i][2], matrix[i][3],
matrix[i][4], matrix[i][5]))
fd.close()
def mpstat_ana(filename):
"""
Get the cpu usage from the mpstat summary file
@param filename: filename of the mpstat summary file
"""
mpstat_result = open(filename, 'r')
key_value = "%idle"
index = 0
result = {}
for line in mpstat_result.readlines():
if key_value in line:
index = line.split().index(key_value) + 1
else:
data = line.split()
if data[0] == "all":
vcpu = "all"
else:
vcpu = "vcpu%s" % data[0]
cpu_use = "%20.2f" % (100 - utils_test.aton(data[index]))
result[vcpu] = cpu_use
return result
def time_ana(results_tuple):
"""
Get the time from the results when run test with time
@param results_tuple: the tuple get from results file
"""
time_unit = 1.0
time_data = 0.0
l = len(results_tuple)
while l > 0:
l -= 1
if results_tuple[l]:
time_data += float(results_tuple[l]) * time_unit
time_unit *= 60
return str(time_data)
def format_result(result, base="20", fbase="2"):
"""
Format the result to a fixed length string.
@param result: result need to convert
@param base: the length of converted string
@param fbase: the decimal digit for float
"""
if isinstance(result, str):
value = "%" + base + "s"
elif isinstance(result, int):
value = "%" + base + "d"
elif isinstance(result, float):
value = "%" + base + "." + fbase + "f"
return value % result
def get_sum_result(sum_matrix, value, tag):
"""
Calculate the summary result
@param sum_matrix: matrix to store the summary results
@param value: value to add to matrix
@param tag: the keyword for the value in matrix
"""
if tag in sum_matrix.keys():
sum_matrix[tag] += value
else:
sum_matrix[tag] = value
return sum_matrix
def result_sum(topdir, params, guest_ver, resultsdir, test):
case_type = params.get("test")
unit_std = params.get("unit_std", "M")
no_table_list = params.get("no_table_list", "").split()
ignore_cases = params.get("ignore_cases", "").split()
repeatn = ""
if "repeat" in test.outputdir:
repeatn = re.findall("repeat\d+", test.outputdir)[0]
category_key = re.split("/", test.outputdir)[-1]
category_key = re.split(case_type, category_key)[0]
category_key = re.sub("\.repeat\d+", "", category_key)
kvm_ver = utils.system_output(params.get('ver_cmd', "rpm -q qemu-kvm"))
host_ver = os.uname()[2]
test.write_test_keyval({ 'kvm-userspace-ver': kvm_ver })
test.write_test_keyval({ 'host-kernel-ver': host_ver })
test.write_test_keyval({ 'guest-kernel-ver': guest_ver })
#Find the results files
results_files = {}
file_list = ['guest_result', 'guest_monitor_result.*sum',
'host_monitor_result.*sum']
if params.get("file_list"):
file_list = params.get("file_list").split()
for files in os.walk(topdir):
if files[2]:
for file in files[2]:
jump_flag = False
for ignore_case in ignore_cases:
if ignore_case in files[0]:
jump_flag = True
if jump_flag:
continue
file_dir_norpt = re.sub("\.repeat\d+", "", files[0])
if (repeatn in files[0]
and category_key in file_dir_norpt
and case_type in files[0]):
for i, pattern in enumerate(file_list):
if re.findall(pattern, file):
prefix = re.findall("%s\.[\d\w_\.]+" % case_type,
file_dir_norpt)[0]
prefix = re.sub("\.|_", "--", prefix)
if prefix not in results_files.keys():
results_files[prefix] = []
tmp = []
for j in range(len(file_list)):
tmp.append(None)
results_files[prefix] = tmp
tmp_file = utils_misc.get_path(files[0], file)
results_files[prefix][i] = tmp_file
#Start to read results from results file and monitor file
results_matrix = {}
no_table_results = {}
thread_tag = params.get("thread_tag", "thread")
order_list = []
for prefix in results_files:
marks = params.get("marks", "").split()
case_infos = prefix.split("--")
case_type = case_infos[0]
threads = ""
refresh_order_list = True
prefix_perf = prefix
if case_type == "ffsb":
category = "-".join(case_infos[:-1])
threads = case_infos[-1]
elif case_type == "qcow2perf":
refresh_order_list = False
if len(case_infos) > 2:
category = "-".join(case_infos[:-2])
thread_tag = case_infos[-2]
threads = " "
marks[0] = re.sub("TIME", case_infos[-1], marks[0])
else:
category = case_infos[-1]
marks[0] = re.sub("TIME", case_infos[-1], marks[0])
prefix_perf = "--".join(case_infos[:-1])
else:
category = "-".join(case_infos)
if refresh_order_list:
order_list = []
if (category not in results_matrix.keys()
and category not in no_table_list):
results_matrix[category] = {}
if threads:
if threads not in results_matrix[category].keys():
results_matrix[category][threads] = {}
results_matrix["thread_tag"] = thread_tag
tmp_dic = results_matrix[category][threads]
elif category not in no_table_list:
tmp_dic = results_matrix[category]
result_context_file = open(results_files[prefix][0], 'r')
result_context = result_context_file.read()
result_context_file.close()
for mark in marks:
mark_tag, mark_key = mark.split(":")
datas = re.findall(mark_key, result_context)
if isinstance(datas[0], tuple):
data = time_ana(datas[0])
else:
tmp_data = 0.0
for data in datas:
if re.findall("[bmkg]", data, re.I):
data = utils_misc.normalize_data_size(data, unit_std)
tmp_data += float(data)
data = str(tmp_data)
if data:
if mark_tag in no_table_list:
no_table_results[mark_tag] = utils_test.aton(data)
perf_value = no_table_results[mark_tag]
else:
tmp_dic[mark_tag] = utils_test.aton(data)
perf_value = tmp_dic[mark_tag]
else:
raise error.TestError("Can not get the right data from result."
"Please check the debug file.")
if mark_tag not in no_table_list and mark_tag not in order_list:
order_list.append(mark_tag)
test.write_perf_keyval({ '%s-%s' % (prefix_perf, mark_tag) : \
perf_value })
# start analyze the mpstat results
if params.get('mpstat') == "yes":
guest_cpu_infos = mpstat_ana(results_files[prefix][1])
for vcpu in guest_cpu_infos:
if vcpu != "all":
tmp_dic[vcpu] = float(guest_cpu_infos[vcpu])
order_list.append(vcpu)
host_cpu_infos = mpstat_ana(results_files[prefix][2])
tmp_dic["Hostcpu"] = float(host_cpu_infos["all"])
order_list.append("Hostcpu")
# Add some special key for cases
if case_type == "ffsb":
tmp_dic["MBps_per_Hostcpu"] = (tmp_dic["Thro-MBps"] /
tmp_dic["Hostcpu"])
order_list.append("MBps_per_Hostcpu")
elif case_type == "iozone":
sum_kbps = 0
for mark in marks:
mark_tag, _ = mark.split(":")
sum_kbps += tmp_dic[mark_tag]
tmp_dic["SUMKbps_per_Hostcpu"] = sum_kbps / tmp_dic["Hostcpu"]
order_list.append("SUMKbps_per_Hostcpu")
sum_marks = params.get("sum_marks", "").split()
sum_matrix = {}
order_line = ""
if results_matrix.get("thread_tag"):
headline = "%20s|" % results_matrix["thread_tag"]
results_matrix.pop("thread_tag")
else:
headline = ""
for index, tag in enumerate(order_list):
headline += "%s|" % format_result(tag)
order_line += "DATA%d|" % index
headline = headline.rstrip("|")
order_line = order_line.rstrip("|")
result_path = utils_misc.get_path(resultsdir,
"%s-result.RHS" % case_type)
if os.path.isfile(result_path):
result_file = open(result_path, "r+")
else:
result_file = open(result_path, "w")
result_file.write("### kvm-userspace-version : %s\n" % kvm_ver)
result_file.write("### kvm-version : %s\n" % host_ver)
result_file.write("### guest-kernel-version :%s\n" % guest_ver)
test.write_test_keyval({ 'category': headline })
result_file.write("Category:ALL\n")
matrix_order = params.get("matrix_order", "").split()
if not matrix_order:
matrix_order = results_matrix.keys()
matrix_order.sort()
for category in matrix_order:
out_loop_line = order_line
result_file.write("%s\n" % category)
line = ""
write_out_loop = True
result_file.write("%s\n" % headline)
for item in results_matrix[category]:
if isinstance(results_matrix[category][item], dict):
tmp_dic = results_matrix[category][item]
line = "%s|" % format_result(item)
for tag in order_list:
line += "%s|" % format_result(tmp_dic[tag])
if tag in sum_marks:
sum_matrix = get_sum_result(sum_matrix, tmp_dic[tag],
tag)
result_file.write("%s\n" % line.rstrip("|"))
write_out_loop = False
else:
#line += "%s|" % format_result(results_matrix[category][item])
re_data = "DATA%s" % order_list.index(item)
out_loop_line = re.sub(re_data,
format_result(results_matrix[category][item]),
out_loop_line)
if tag in sum_marks:
sum_matrix = get_sum_result(sum_matrix, tmp_dic[tag],
tag)
if write_out_loop:
result_file.write("%s\n" % out_loop_line)
if sum_matrix:
if case_type == "ffsb":
sum_matrix["MBps_per_Hostcpu"] = (sum_matrix["Thro-MBps"] /
sum_matrix["Hostcpu"])
sum_marks.append("MBps_per_Hostcpu")
result_file.write("Category:SUM\n")
headline = ""
line = ""
if len(sum_matrix) < 4:
for i in range(4 - len(sum_matrix)):
headline += "%20s|" % "None"
line += "%20d|" % 0
for tag in sum_marks:
headline += "%20s|" % tag
line += "%s|" % format_result(sum_matrix[tag])
result_file.write("%s\n" % headline.rstrip("|"))
result_file.write("%s\n" % line.rstrip("|"))
if no_table_results:
no_table_order = params.get("no_table_order", "").split()
if not no_table_order:
no_table_order = no_table_results.keys()
no_table_order.sort()
for item in no_table_order:
result_file.write("%s: %s\n" % (item, no_table_results[item]))
result_file.close()
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册