未验证 提交 e47f4350 编写于 作者: T Tingquan Gao 提交者: GitHub

support ampo1 in benchmark; fp16 -> amp (#8412)

* support ampo1 in benchmark; fp16 -> amp

* benchmark: fp16 -> amp
上级 253a5d2d
......@@ -51,10 +51,10 @@ inference:./deploy/python/infer.py
--trt_max_shape:1600
===========================train_benchmark_params==========================
batch_size:2
fp_items:fp32|fp16
fp_items:fp32|amp
epoch:1
repeat:1
--profiler_options:batch_range=[10,20];state=GPU;tracer_option=Default;profile_path=model.profile
flags:null
===========================infer_benchmark_params===========================
numpy_infer_input:3x640x640_2.npy
\ No newline at end of file
numpy_infer_input:3x640x640_2.npy
......@@ -51,10 +51,10 @@ inference:./deploy/python/infer.py
null:null
===========================train_benchmark_params==========================
batch_size:48
fp_items:fp32|fp16
fp_items:fp32|amp
epoch:1
repeat:1
--profiler_options:batch_range=[10,20];state=GPU;tracer_option=Default;profile_path=model.profile
flags:null
===========================infer_benchmark_params===========================
numpy_infer_input:3x416x416_2.npy
\ No newline at end of file
numpy_infer_input:3x416x416_2.npy
......@@ -51,7 +51,7 @@ inference:./deploy/python/infer.py
null:null
===========================train_benchmark_params==========================
batch_size:64
fp_items:fp32|fp16
fp_items:fp32|amp
epoch:1
repeat:1
--profiler_options:batch_range=[10,20];state=GPU;tracer_option=Default;profile_path=model.profile
......@@ -59,4 +59,4 @@ flags:null
===========================infer_benchmark_params===========================
numpy_infer_input:3x320x320_2.npy
===========================to_static_train_benchmark_params===========================
to_static_train:--to_static
\ No newline at end of file
to_static_train:--to_static
......@@ -51,7 +51,7 @@ inference:./deploy/python/infer.py
--trt_max_shape:1600
===========================train_benchmark_params==========================
batch_size:16
fp_items:fp32|fp16
fp_items:fp32|amp
epoch:1
repeat:1
--profiler_options:batch_range=[10,20];state=GPU;tracer_option=Default;profile_path=model.profile
......
......@@ -51,8 +51,8 @@ inference:./deploy/python/infer.py
--trt_max_shape:1600
===========================train_benchmark_params==========================
batch_size:16
fp_items:fp32|fp16
fp_items:fp32|amp
epoch:1
repeat:1
--profiler_options:batch_range=[10,20];state=GPU;tracer_option=Default;profile_path=model.profile
flags:null
\ No newline at end of file
flags:null
......@@ -203,7 +203,7 @@ if [ ${MODE} = "whole_infer" ] || [ ${MODE} = "klquant_whole_infer" ]; then
export_cmd="${python} ${run_export} ${set_export_weight} ${set_filename} ${set_save_export_dir} "
echo $export_cmd
eval $export_cmd
status_check $? "${export_cmd}" "${status_log}" "${model_name}"
status_check $? "${export_cmd}" "${status_log}" "${model_name}"
#run inference
save_export_model_dir="${save_export_value}/${model_name}"
......@@ -279,7 +279,10 @@ else
set_use_gpu=$(func_set_params "${train_use_gpu_key}" "${use_gpu}")
set_train_params1=$(func_set_params "${train_param_key1}" "${train_param_value1}")
save_log="${LOG_PATH}/${trainer}_gpus_${gpu}_autocast_${autocast}"
if [ ${autocast} = "amp" ] || [ ${autocast} = "fp16" ]; then
if [ ${autocast} = "amp" ]; then
set_autocast="--amp"
set_amp_level="amp_level=O1"
elif [ ${autocast} = "fp16" ]; then
set_autocast="--amp"
set_amp_level="amp_level=O2"
else
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册