diff --git a/examples/calculator/test/BUILD.gn b/examples/calculator/test/BUILD.gn index 730de036ffcefc5724da64eef9135d26d2dfc4b9..c63fcc58a6ededf8318016c9a04f68cd2923ea5b 100755 --- a/examples/calculator/test/BUILD.gn +++ b/examples/calculator/test/BUILD.gn @@ -31,4 +31,11 @@ group("fuzztest") { deps += [ "fuzztest/common/parse_fuzzer:fuzztest" ] } + +group("benchmarktest") { + testonly = true + deps = [] + + deps += [ "benchmarktest/common:benchmarktest" ] +} ############################################################################### diff --git a/examples/calculator/test/benchmarktest/common/BUILD.gn b/examples/calculator/test/benchmarktest/common/BUILD.gn new file mode 100644 index 0000000000000000000000000000000000000000..fe9146b7f0b7de8bba08d7df34fba81a5f5c3113 --- /dev/null +++ b/examples/calculator/test/benchmarktest/common/BUILD.gn @@ -0,0 +1,31 @@ +# Copyright (c) 2021 Huawei Device Co., Ltd. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import("//build/test.gni") + +module_output_path = "developertest/calculator" + +ohos_benchmarktest("BenchmarkDemoTest") { + module_out_path = module_output_path + sources = [ "benchmark_demo_test.cpp" ] +} + +group("benchmarktest") { + testonly = true + deps = [] + + deps += [ + # deps file + ":BenchmarkDemoTest", + ] +} diff --git a/examples/calculator/test/benchmarktest/common/benchmark_demo_test.cpp b/examples/calculator/test/benchmarktest/common/benchmark_demo_test.cpp new file mode 100644 index 0000000000000000000000000000000000000000..02ad3fea6dbc92cfec8714e2f837b0e0e86103fc --- /dev/null +++ b/examples/calculator/test/benchmarktest/common/benchmark_demo_test.cpp @@ -0,0 +1,145 @@ +/* + * Copyright (c) 2021 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include +#include +#include + +using namespace std; + +namespace { + /** + * @tc.name: BenchmarkTestExample + * @tc.desc: Testcase for testing 'SimpleExample' function. + * @tc.type: FUNC + * @tc.require: Issue Number + */ + size_t SimpleExample() + { + string str = "benchmark test"; + return str.size(); + } + + static void BenchmarkTestExample(benchmark::State &state) + { + for (auto _ : state) { + /* @tc.steps: step1.call SimpleExample in loop */ + SimpleExample(); + } + } + + /* Register the function as a benchmark */ + BENCHMARK(BenchmarkTestExample); + /* Register benchmark and explicitly set the fix iterations */ + BENCHMARK(BenchmarkTestExample)->Iterations(1000); + + /** + * @tc.name: BenchmarkTestVectorOperator + * @tc.desc: Testcase for testing "AccessVectorElementByOperator" + * function. + * @tc.type: FUNC + * @tc.require: Issue Number + */ + void AccessVectorElementByOperator() + { + constexpr int testLen = 5; + std::vector testVec(testLen, 0); + for (int i = 0; i < testLen; i++) { + testVec[i] = i * i; + } + } + + static void BenchmarkTestVectorOperator(benchmark::State &state) + { + for (auto _ : state) { + /* @tc.steps: step1.call AccessVectorElementByOperator in loop */ + AccessVectorElementByOperator(); + } + } + + /* + * Register the function as a benchmark, set iterations repetitions. + * And set "ReportAggregatesOnly", it will display the statistics Mean, + * Median and Standard Deviation of Repeated Benchmarks. + */ + BENCHMARK(BenchmarkTestVectorOperator)->Iterations(1000)->Repetitions(3)-> + ReportAggregatesOnly(); + + /** + * @tc.name: BenchmarkTestVectorAt + * @tc.desc: Testcase for testing "AccessVectorElementByAt" + * function. + * @tc.type: FUNC + * @tc.require: Issue Number + */ + void AccessVectorElementByAt() + { + constexpr int testLen = 5; + std::vector testVec(testLen, 0); + for (int i = 0; i < testLen; i++) { + testVec.at(i) = i * i; + } + } + + static void BenchmarkTestVectorAt(benchmark::State &state) + { + for (auto _ : state) { + /* @tc.steps: step1.call AccessVectorElementByAt in loop */ + AccessVectorElementByAt(); + } + } + + BENCHMARK(BenchmarkTestVectorAt)->Iterations(1000)->Repetitions(3)-> + ReportAggregatesOnly(); + + /** + * @tc.name: CalculatedAreaTestCase + * @tc.desc: Define a testcase that accesses a class member + * variable. + * @tc.type: FUNC + * @tc.require: Issue Number + */ + class BenchmarkDemoTest : public benchmark::Fixture { + public: + void SetUp(const ::benchmark::State &state) + { + /* @tc.setup: width and height assigned */ + phoneWidth_ = 1080; /* 1080 is default width */ + phoneHeight_ = 2244; /* 2244 is default height */ + } + + void TearDown(const ::benchmark::State &state) + { + } + + int phoneWidth_; + int phoneHeight_; + }; + + BENCHMARK_F(BenchmarkDemoTest, CalculatedAreaTestCase)( + benchmark::State &st) + { + long int area = 0; + for (auto _ : st) { + /* @tc.steps: step1.calculate area */ + area = phoneWidth_ * phoneHeight_; + } + } + + BENCHMARK_REGISTER_F(BenchmarkDemoTest, CalculatedAreaTestCase); +} + +// Run the benchmark +BENCHMARK_MAIN(); diff --git a/libs/benchmark/__init__.py b/libs/benchmark/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..a9c48070fbceebd30c9f2118e6d87cf3db623dc4 --- /dev/null +++ b/libs/benchmark/__init__.py @@ -0,0 +1,17 @@ +#!/usr/bin/env python3 +# coding=utf-8 + +# +# Copyright (c) 2020 Huawei Device Co., Ltd. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# diff --git a/libs/benchmark/report/__init__.py b/libs/benchmark/report/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..a9c48070fbceebd30c9f2118e6d87cf3db623dc4 --- /dev/null +++ b/libs/benchmark/report/__init__.py @@ -0,0 +1,17 @@ +#!/usr/bin/env python3 +# coding=utf-8 + +# +# Copyright (c) 2020 Huawei Device Co., Ltd. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# diff --git a/libs/benchmark/report/benchmark_reporter.py b/libs/benchmark/report/benchmark_reporter.py new file mode 100644 index 0000000000000000000000000000000000000000..b90bf2515ba2e86884f395bc7da2ff12c05052ca --- /dev/null +++ b/libs/benchmark/report/benchmark_reporter.py @@ -0,0 +1,52 @@ +#!/usr/bin/env python3 +# coding=utf-8 + +# +# Copyright (c) 2020 Huawei Device Co., Ltd. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +import os +import sys +import subprocess + +from xdevice import IReporter +from xdevice import Plugin +from xdevice import TestType + +from _core.logger import platform_logger + +__all__ = ["BenchmarkReporter"] +LOG = platform_logger("BenchmarkReporter") + +@Plugin(type=Plugin.REPORTER, id=TestType.benchmark) +class BenchmarkReporter(IReporter): + + def __generate_reports__(self, report_path, **kwargs): + del kwargs + LOG.info("report_path = %s" % report_path) + self._make_benchmark_report(report_path) + + def _make_benchmark_report(self, result_path): + result_path = os.path.join(result_path, "benchmark") + reports_dir = os.path.join(result_path, "benchmark", "report") + if not os.path.exists(reports_dir): + os.makedirs(reports_dir) + report_generate_tool = os.path.abspath( + os.path.join(os.path.dirname(os.path.realpath(__file__)), + "generate_report.py")) + + command = [sys.executable, report_generate_tool, result_path, + reports_dir] + LOG.info(command) + subprocess.call(command, shell=False) diff --git a/libs/benchmark/report/generate_report.py b/libs/benchmark/report/generate_report.py new file mode 100644 index 0000000000000000000000000000000000000000..a268bf46dc65696149d56e6de0aba53f50324ff5 --- /dev/null +++ b/libs/benchmark/report/generate_report.py @@ -0,0 +1,375 @@ +#!/usr/bin/env python3 +# coding=utf-8 + +# +# Copyright (c) 2020 Huawei Device Co., Ltd. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +import json +import os +import shutil +import sys + +SETTING_RED_STYLE = """\033[33;31m%s\033[0m""" + +def load_json_data(json_file_path): + json_data = {} + if os.path.isfile(json_file_path): + try: + with open(json_file_path, 'r') as file_read: + json_data = json.load(file_read) + if not json_data: + print("Loading file \"%s\" error" % json_file_path) + return {} + except(IOError, ValueError) as err_msg: + print("Error for load_json_data: \"%s\"" % + json_file_path, err_msg) + else: + print("Info: \"%s\" not exist." % json_file_path) + return json_data + + +def get_file_list(find_path, postfix): + file_names = os.listdir(find_path) + file_list = [] + if len(file_names) > 0: + for name in file_names: + if name.find(postfix) != -1 and name[-len(postfix):] == postfix: + file_list.append(name) + return file_list + + +def get_file_list_by_postfix(path, postfix, filter_jar = ""): + file_list = [] + for dirs in os.walk(path): + files = get_file_list(find_path=dirs[0], postfix=postfix) + for file_path in files: + if "" != file_path and -1 == file_path.find(__file__): + pos = file_path.rfind(os.sep) + file_name = file_path[pos + 1:] + file_path = os.path.join(dirs[0], file_path) + if filter_jar != "" and file_name == filter_jar: + print(SETTING_RED_STYLE % ("Skipped %s" % file_path)) + continue + file_list.append(file_path) + return file_list + +class BenchmarkReport(object): + SUBSYSTEM_SUMMARY = "OHOS_SUBSYSTEM_SUMMARY" + ENABLE_LINK = "OHOS_ENABLE_PASSCASE_LINK" + REPORT_SUMMARY = "OHOS_REPORT_SUMMARY" + LEGEND_DATA = "OHOS_LEGEND_DATA" + XAXIS_DATA = "OHOS_XAXIS_DATA" + SERIES_DATA = "OHOS_TITLE_DATA" + TITLE_TEXT = "OHOS_TITLE_TEST" + YAXIS_FORMATTER = "OHOS_YAXIS_FORMATTER" + + + def __init__(self): + self.index = 0 + self.filtered = ["detail", "id", "pm", "owner", + "Count", "ScoreUnit", "Variance"] + self.default_item = [] + self.max_index = 1000 + self.sbs_mdl_summary_list = [] + self.benchmark_list = [] + self._init_default_item() + + def _init_default_item(self): + self.default_item.append("Subsystem") + self.default_item.append("Module") + self.default_item.append("Testsuit") + self.default_item.append("Benchmark") + self.default_item.append("Mode") + self.default_item.append("RunType") + self.default_item.append("TestTargetName") + self.default_item.append("TestTargetMethod") + self.default_item.append("Repetitions") + self.default_item.append("RepetitionIndex") + self.default_item.append("Threads") + self.default_item.append("Iterations") + self.default_item.append("Score") + self.default_item.append("CpuTime") + self.max_index = len(self.default_item) + 1000 + + def generate_benchmark(self, args): + if args is None or len(args) <= 2: + print(SETTING_RED_STYLE % + "Error: source_dir and report_dir can't be empty") + return + + src_path = sys.argv[1] + dest_path = os.path.abspath(sys.argv[2]) + + print("source_dir: %s" % src_path) + print("report_dir: %s" % dest_path) + + if not os.path.exists(src_path): + print("%s not exists" % src_path) + return + + if os.path.exists(dest_path): + shutil.rmtree(dest_path) + + self._get_benchmark_result_data(src_path) + self._generate_benchmark_summary_report(os.path.abspath(dest_path)) + self._generate_all_benchmark_detail(os.path.abspath(dest_path)) + + def _remove_iterations(self, mdl_summary_list): + final_mdl_summary = [] + for item_info in mdl_summary_list: + copy_item = item_info.copy() + copy_item.pop("Iterations") + final_mdl_summary.append(copy_item) + return final_mdl_summary + + def _get_benchmark_result_data(self, src_path): + self.benchmark_list = [] + self.sbs_mdl_summary_list = [] + system_summary_dic = {} + json_files = get_file_list_by_postfix(src_path, ".json") + print("json_files %s" % json_files) + for json_file in json_files: + pos = json_file.find(src_path) + subsystem_root = json_file[pos + len(src_path):] + dir_list = subsystem_root.split(os.sep) + sbs_name = dir_list[1] + module_name = dir_list[2] + testsuit_name = dir_list[len(dir_list) - 2] + + print(SETTING_RED_STYLE % ( + "subsystem_root: %s \n\n" + "subsystem_name: %s \n\n" + "module_name: %s \n\n" + "testsuit_name: %s \n\n" % + (subsystem_root, str(sbs_name), + str(module_name), str(testsuit_name)))) + + mdl_summary_list = self._get_subsystem_cxx_benchmark(sbs_name, + module_name, testsuit_name, json_file) + self.benchmark_list = mdl_summary_list + + if sbs_name in system_summary_dic.keys() \ + and testsuit_name in system_summary_dic[sbs_name].keys(): + subsystem_summary_dic = \ + system_summary_dic[sbs_name][testsuit_name] + subsystem_summary_dic["children"] += \ + self._remove_iterations(mdl_summary_list) + else: + self.index += 1 + subsystem_summary_dic = dict() + subsystem_summary_dic["id"] = self.index + subsystem_summary_dic["Subsystem"] = sbs_name + subsystem_summary_dic["Testsuit"] = testsuit_name + subsystem_summary_dic["Module"] = "---" + subsystem_summary_dic["Detail"] = "" + subsystem_summary_dic["TestTargetName"] = "---" + subsystem_summary_dic["TestTargetMethod"] = "---" + subsystem_summary_dic["RunType"] = "---" + subsystem_summary_dic["Benchmark"] = "---" + subsystem_summary_dic["Mode"] = "---" + subsystem_summary_dic["Count"] = "---" + subsystem_summary_dic["Score"] = "---" + subsystem_summary_dic["ScoreUnit"] = "---" + subsystem_summary_dic["children"] = [] + subsystem_summary_dic["children"] += \ + self._remove_iterations(mdl_summary_list) + self.sbs_mdl_summary_list.append(subsystem_summary_dic) + system_summary_dic[sbs_name] = {} + system_summary_dic[sbs_name][testsuit_name] = \ + subsystem_summary_dic + subsystem_summary_dic["pm"] = "unknown" + subsystem_summary_dic["owner"] = "unknown" + + def _get_subsystem_cxx_benchmark(self, sbs_name, module_name, + testsuit_name, json_file): + sbs_mdl_summary_list = list() + json_data_dic = load_json_data(json_file) + for json_data in json_data_dic.get("benchmarks", []): + self.index += 1 + sbs_mdl_summary = dict() + sbs_mdl_summary["id"] = self.index + sbs_mdl_summary["Subsystem"] = sbs_name + sbs_mdl_summary["Module"] = module_name + sbs_mdl_summary["Testsuit"] = testsuit_name + sbs_mdl_summary["pm"] = "unknown" + sbs_mdl_summary["owner"] = "unknown" + + benchmark_name = json_data.get("name", "").replace("/", "_"). \ + replace(":", "_") + test_target = benchmark_name.split("_")[0] + sbs_mdl_summary["TestTargetName"] = test_target + sbs_mdl_summary["TestTargetMethod"] = "%s()" % test_target + sbs_mdl_summary["RunType"] = str(json_data.get("run_type", "")) + sbs_mdl_summary["Mode"] = \ + str(json_data.get("aggregate_name", "normal")) + sbs_mdl_summary["Benchmark"] = benchmark_name + sbs_mdl_summary["Repetitions"] = json_data.get("repetitions", 0) + sbs_mdl_summary["RepetitionIndex"] = \ + json_data.get("repetition_index", 0) + sbs_mdl_summary["Threads"] = json_data.get("threads", 0) + sbs_mdl_summary["Iterations"] = json_data.get("iterations", 0) + + score_unit = json_data.get("time_unit", "") + sbs_mdl_summary["ScoreUnit"] = score_unit + sbs_mdl_summary["CpuTime"] = "%.2e %s " % ( + json_data.get("cpu_time", 0), + score_unit + ) + sbs_mdl_summary["Score"] = "%.2e %s " % ( + json_data.get("real_time", 0), + score_unit + ) + sbs_mdl_summary["detail"] = "Link" + sbs_mdl_summary_list.append(sbs_mdl_summary) + return sbs_mdl_summary_list + + def _generate_benchmark_summary_report(self, dest_dir_path): + tmpl_file_path = os.path.abspath(os.path.join( + os.path.dirname(__file__), + "..", "template", "benchmark_summary.html")) + if not os.path.exists(os.path.dirname(tmpl_file_path)): + print(SETTING_RED_STYLE % + ("Warning: %s not exists" % tmpl_file_path)) + return + + out_report_file_path = os.path.join(dest_dir_path, "index.html") + if not os.path.exists(os.path.dirname(out_report_file_path)): + os.makedirs(os.path.dirname(out_report_file_path)) + + if os.path.exists(tmpl_file_path): + try: + with open(os.path.abspath(tmpl_file_path), "r+") as file_read: + report_content = file_read.read() + file_read.close() + content_new = report_content + + pos = content_new.find(BenchmarkReport.SUBSYSTEM_SUMMARY) + if pos >= 0: + content_new = \ + content_new[0:pos] + \ + str(self.sbs_mdl_summary_list) + \ + content_new[pos + + len(BenchmarkReport.SUBSYSTEM_SUMMARY): + len(content_new)] + + try: + with open(os.path.abspath(out_report_file_path), "w") \ + as output_fd: + content_new = str(content_new) + output_fd.write(content_new) + except IOError as err_msg: + print("Error5 for open %s failed, with msg %s" % + (out_report_file_path, err_msg)) + except IOError as err_msg: + print("Error6 for open %s failed, with msg %s" % + (tmpl_file_path, err_msg)) + + def _generate_all_benchmark_detail(self, dest_dir_parh): + for benchmark_info in self.benchmark_list: + self._generate_benchmark_detail(benchmark_info, + os.path.abspath(dest_dir_parh)) + + def _is_filtered_id(self, item_key): + if item_key in self.filtered: + return True + return False + + def _get_index_id(self, item_key): + pos = self.default_item.index(item_key) + if pos != -1: + return pos + 1 + else: + self.max_index -= 1 + return self.max_index + + def _generate_benchmark_detail(self, benchmark_info, dest_dir_path): + report_tmpl_file_path = os.path.abspath( + os.path.join(os.path.dirname(__file__), + "..", "template", "benchmark_detail.html")) + if not os.path.exists(os.path.dirname(report_tmpl_file_path)): + print(SETTING_RED_STYLE % + ("Warning: %s not exists" % report_tmpl_file_path)) + return + + out_report_file_path = os.path.join(os.path.abspath(dest_dir_path), + str(benchmark_info["Subsystem"]), + str(benchmark_info["Module"]), + str(benchmark_info["Testsuit"]), + str(benchmark_info["Benchmark"]) + + "_" + + str(benchmark_info["Mode"]) + + "_detail.html") + if not os.path.exists(os.path.dirname(out_report_file_path)): + os.makedirs(os.path.dirname(out_report_file_path)) + + detail_info = self._get_detail_info(benchmark_info) + + if os.path.exists(report_tmpl_file_path): + try: + with open(os.path.abspath(report_tmpl_file_path), "r+") \ + as file_read: + report_content = file_read.read() + file_read.close() + content_new = report_content + content_new = \ + self._update_report_summary(content_new, detail_info) + + try: + with open(os.path.abspath(out_report_file_path), "w") \ + as output_fd: + output_fd.write(content_new) + except IOError as err_msg: + print("Error5 for open %s failed, with msg %s" % + (out_report_file_path, err_msg)) + except IOError as err_msg: + print("Error6 for open %s failed, with msg %s" % + (report_tmpl_file_path, err_msg)) + + def _get_detail_info(self, benchmark_info): + detail_info = [] + self.max_index = 1000 + for item_key, item_value in benchmark_info.items(): + if self._is_filtered_id(item_key): + continue + + item_info = {"item": item_key, + "id": self._get_index_id(item_key), + "content": item_value.decode("UTF-8") + if isinstance(item_value, bytes) else item_value} + detail_info.append(item_info) + detail_info = sorted(detail_info, key=lambda s: s["id"]) + dest_detail_info = [] + index = 1 + for item in detail_info: + item["id"] = index + dest_detail_info.append(item) + index += 1 + return dest_detail_info + + def _update_report_summary(self, content_new, detail_info): + pos = content_new.find(BenchmarkReport.REPORT_SUMMARY) + if pos >= 0: + content_new = \ + content_new[0:pos] + \ + str(detail_info) + \ + content_new[pos + + len(BenchmarkReport.REPORT_SUMMARY): + len(content_new)] + return content_new + +if __name__ == '__main__': + print("****************** Benchmark Report Starting ******************") + BenchmarkReport().generate_benchmark(sys.argv) + print("****************** Benchmark Report Finished ******************") \ No newline at end of file diff --git a/libs/benchmark/template/benchmark_detail.html b/libs/benchmark/template/benchmark_detail.html new file mode 100644 index 0000000000000000000000000000000000000000..4599395f46d9998688a2c4d19f92eb5e3d1fc0d4 --- /dev/null +++ b/libs/benchmark/template/benchmark_detail.html @@ -0,0 +1,138 @@ + + + + + OHOSReport + + + + + + + + + + +
+ +
+ +
+ +
+ +
+ +
+ +
+ + + + + + + + + + + + \ No newline at end of file diff --git a/libs/benchmark/template/benchmark_summary.html b/libs/benchmark/template/benchmark_summary.html new file mode 100644 index 0000000000000000000000000000000000000000..13d829f6f007a0d92824c9e73fee545da02fe72d --- /dev/null +++ b/libs/benchmark/template/benchmark_summary.html @@ -0,0 +1,189 @@ + + + + + OHOSReport + + + + + +
+ +
+ +
+ +
+ +
+ +
+ + + + + + + + + + + + + + + + + diff --git a/ohos.build b/ohos.build index 5779bd182cd666e906ecbe7374e7151b9808f3e8..e2efca2a7452596656071915a25254189c8c99e6 100644 --- a/ohos.build +++ b/ohos.build @@ -12,6 +12,7 @@ "//test/developertest/examples/app_info/test:unittest", "//test/developertest/examples/calculator/test:unittest", "//test/developertest/examples/calculator/test:fuzztest", + "//test/developertest/examples/calculator/test:benchmarktest", "//test/developertest/examples/detector/test:unittest", "//test/developertest/examples/sleep/test:performance", "//test/developertest/examples/distributedb/test:distributedtest" diff --git a/src/core/driver/drivers.py b/src/core/driver/drivers.py index 6c9a9db36272befd7061039d40932d0bb8d1116b..224fb818b9344b15f204b307ee19b4e69e4a26b8 100755 --- a/src/core/driver/drivers.py +++ b/src/core/driver/drivers.py @@ -290,9 +290,7 @@ class ResultManager(object): _create_empty_result_file(filepath, self.testsuite_name, error_message) if "benchmark" == self.config.testtype[0]: - if self.device.is_directory( - os.path.join(self.device_testpath, "benchmark")): - self._obtain_benchmark_result() + self._obtain_benchmark_result() # Get coverage data files if self.is_coverage: self.obtain_coverage_data() @@ -310,24 +308,22 @@ class ResultManager(object): if not os.path.exists(benchmark_dir): os.makedirs(benchmark_dir) - print("benchmark_dir =%s", benchmark_dir) - if not self.device.pull_file( - os.path.join(self.device_testpath, "benchmark"), - benchmark_dir): + LOG.info("benchmark_dir = %s" % benchmark_dir) + self.device.pull_file(os.path.join(self.device_testpath, + "%s.json" % self.testsuite_name), benchmark_dir) + if not os.path.exists(os.path.join(benchmark_dir, + "%s.json" % self.testsuite_name)): os.rmdir(benchmark_dir) return benchmark_dir def get_result_sub_save_path(self): - find_key = os.sep + "tests" + os.sep + find_key = os.sep + "benchmark" + os.sep file_dir, _ = os.path.split(self.testsuite_path) pos = file_dir.find(find_key) subpath = "" if -1 != pos: subpath = file_dir[pos + len(find_key):] - pos1 = subpath.find(os.sep) - if -1 != pos1: - subpath = subpath[pos1 + len(os.sep):] - print("subpath = " + subpath) + LOG.info("subpath = " + subpath) return subpath def obtain_test_result_file(self): diff --git a/src/main/_init_global_config.py b/src/main/_init_global_config.py index 7929e8094fec300c411a8d855a694e9f684bc401..f01dcedf990702fef6c1dd6fb8d3dece5797612a 100755 --- a/src/main/_init_global_config.py +++ b/src/main/_init_global_config.py @@ -58,9 +58,7 @@ def _init_global_config(): sys.hmh_script = os.path.abspath(os.path.join( sys.framework_root_dir, - "..", - "test-tools", - "hmh")) + "libs")) sys.path.insert(4, sys.hmh_script) sys.framework_res_dir = sys.framework_root_dir @@ -90,7 +88,8 @@ def _iter_module_plugins(packages): def _load_internal_plugins(): import core.driver - _iter_module_plugins([core.driver]) + import benchmark.report.benchmark_reporter + _iter_module_plugins([core.driver, benchmark.report.benchmark_reporter]) try: import xdevice_extension._core.environment