未验证 提交 af681065 编写于 作者: B Birdylx 提交者: GitHub

[TIPC] Unify log and cfg name (#709)

* Rename cfg file

* Update benchmark iters in esrgan

* Standardlize log name in cpp infer
上级 1c66a2b2
......@@ -52,6 +52,6 @@ null:null
===========================train_benchmark_params==========================
batch_size:32|64
fp_items:fp32
total_iters:100
total_iters:500
--profiler_options:batch_range=[10,20];state=GPU;tracer_option=Default;profile_path=model.profile
flags:FLAGS_cudnn_exhaustive_search=1
......@@ -22,16 +22,21 @@ gpu_id=$(func_parser_value "${lines[9]}")
use_mkldnn=$(func_parser_value "${lines[10]}")
cpu_threads=$(func_parser_value "${lines[11]}")
LOG_PATH="./test_tipc/output/infer_cpp"
# only support fp32、bs=1, trt is not supported yet.
precision="fp32"
use_trt=false
batch_size=1
LOG_PATH="./test_tipc/output/${model_name}/${MODE}"
mkdir -p ${LOG_PATH}
status_log="${LOG_PATH}/results_cpp_infer.log"
status_log="${LOG_PATH}/results_cpp.log"
function func_cpp_inference(){
# set log
if [ ${device} = "GPU" ]; then
_save_log_path="${LOG_PATH}/infer_cpp_${device}.log"
_save_log_path="${LOG_PATH}/cpp_infer_gpu_usetrt_${use_trt}_precision_${precision}_batchsize_${batch_size}.log"
elif [ ${device} = "CPU" ]; then
_save_log_path="${LOG_PATH}/infer_cpp_${device}_usemkldnn_${usemkldnn}_threads_${cpu_threads}.log"
_save_log_path="${LOG_PATH}/cpp_infer_cpu_usemkldnn_${use_mkldnn}_threads_${cpu_threads}_precision_${precision}_batchsize_${batch_size}.log"
fi
# set params
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册