# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import sys import os import yaml import argparse import benchmark_utils """ {'CPU_UTILIZATION': 0.8, 'MAX_GPU_MEMORY': 0, 'GPU_UTILIZATION': '0 %', 'DAG': {'50': 670.256, '60': 670.256, '70': 670.765, '80': 671.23, '90': 687.546, '95': 687.546, '99': 687.546, 'avg': 670.755625, 'qps': 0.8, 'query_count': 8, 'succ': 1.0}, 'demo': {'midp': 669.484375, 'postp': 0.184875, 'prep': 1.001875}} """ class LogHandler(object): def __init__(self): self.fstr = "" def print(self): print(self.fstr) def dump(self, filename): with open(filename, 'w') as fout: fout.write(self.fstr) def append(self, new_str): self.fstr += new_str + "\n" def parse_args(): # pylint: disable=doc-string-missing parser = argparse.ArgumentParser("serve") parser.add_argument( "--benchmark_cfg", type=str, required=True, help="benchmark config yaml. including general info, model info, data info, conf info" ) parser.add_argument( "--benchmark_log", type=str, required=True, help="benchmark log, generated by a web service or pipeline.") parser.add_argument( "--output", type=str, default="std_benchmark.log", help="the output filename, default std_benchmark.log") return parser.parse_args() if __name__ == "__main__": args = parse_args() benchmark_cfg_filename = args.benchmark_cfg f = open(benchmark_cfg_filename, 'r') benchmark_config = yaml.load(f) f.close() benchmark_raw_filename = args.benchmark_log f = open(benchmark_raw_filename, 'r') benchmark_raw = yaml.load(f) f.close() model_info = { 'model_name': benchmark_config["model_name"], 'precision': benchmark_config["precision"] } data_info = { 'batch_size': benchmark_config["batch_size"], 'shape': benchmark_config["input_shape"], 'data_num': benchmark_config["num_of_samples"] } perf_info = { 'preprocess_time_s': "", 'inference_time_s': benchmark_raw["DAG"]["avg"], 'postprocess_time_s': "", 'total_time_s': "", 'inference_time_s_90': benchmark_raw["DAG"]["90"], 'inference_time_s_99': benchmark_raw["DAG"]["99"], 'succ_rate': benchmark_raw["DAG"]["succ"], 'qps': benchmark_raw["DAG"]["qps"] } resource_info = { 'cpu_rss_mb': "", 'cpu_vms_mb': "", 'cpu_shared_mb': "", 'cpu_dirty_mb': "", 'cpu_util': benchmark_raw["CPU_MEM"], 'gpu_rss_mb': "", 'gpu_util': benchmark_raw["GPU_UTIL"], 'gpu_mem_util': benchmark_raw["GPU_MEM"] } server_log = benchmark_utils.PaddleInferBenchmark( benchmark_config, model_info, data_info, perf_info, resource_info) server_log('Serving')