diff --git a/python/examples/pipeline/PaddleClas/DarkNet53/benchmark.py b/python/examples/pipeline/PaddleClas/DarkNet53/benchmark.py index 2433b0132728dc96627254f9231949a74a551c28..d643b90f5b7ac6ef6892bb83e7dfb20b650df49b 100644 --- a/python/examples/pipeline/PaddleClas/DarkNet53/benchmark.py +++ b/python/examples/pipeline/PaddleClas/DarkNet53/benchmark.py @@ -83,7 +83,7 @@ def multithread_http(thread, batch_size): print("Total cost: {}s".format(total_cost)) print("Each thread cost: {}s. ".format(avg_cost)) print("Total count: {}. ".format(total_number)) - print("AVG QPS: {} samples/s".format(batch_size * total_number / + print("AVG_QPS: {} samples/s".format(batch_size * total_number / total_cost)) show_latency(result[1]) diff --git a/python/examples/pipeline/PaddleClas/DarkNet53/benchmark_cfg.yaml b/python/examples/pipeline/PaddleClas/DarkNet53/benchmark_cfg.yaml new file mode 100644 index 0000000000000000000000000000000000000000..f4d354e3a6ad919bd1219be20bd27ac45cc85c0f --- /dev/null +++ b/python/examples/pipeline/PaddleClas/DarkNet53/benchmark_cfg.yaml @@ -0,0 +1,29 @@ +cuda_version: "10.1" +cudnn_version: "7.6" +trt_version: "6.0" +python_version: "3.7" +gcc_version: "8.2" +paddle_version: "2.0.1" + +cpu: "Intel(R) Xeon(R) Gold 5117 CPU @ 2.00GHz X12" +gpu: "T4" +xpu: "None" +api: "" +owner: "cuicheng01" + +model_name: "imagenet" +model_type: "static" +model_source: "PaddleClas" +model_url: "" + +batch_size: 1 +num_of_samples: 1000 +input_shape: "3,224,224" + +runtime_device: "cpu" +ir_optim: true +enable_memory_optim: true +enable_tensorrt: false +precision: "fp32" +enable_mkldnn: false +cpu_math_library_num_threads: ""