提交 ac1c206d 编写于 作者: G gaotingquan 提交者: Tingquan Gao

tipc: support benchmark with fp32/fp16 training

上级 097de74d
......@@ -31,7 +31,7 @@
```
./test_tipc/
├── common_func.sh #test_*.sh会调用到的公共函数
├── config # 配置文件目录
├── configs # 配置文件目录
│ ├── MobileNetV3 # MobileNetV3系列模型测试配置文件目录
│ │ ├── MobileNetV3_large_x1_0_train_infer_python.txt #基础训练预测配置文件
│ │ ├── MobileNetV3_large_x1_0_train_linux_gpu_fleet_amp_infer_python_linux_gpu_cpu.txt #多机多卡训练预测配置文件
......
......@@ -50,5 +50,11 @@ inference:python/predict_cls.py -c configs/inference_cls.yaml
-o Global.benchmark:False
null:null
null:null
===========================train_benchmark_params==========================
batch_size:64
fp_items:fp32|fp16
epoch:1
--profiler_options:batch_range=[10,20];state=GPU;tracer_option=Default;profile_path=model.profile
flags:FLAGS_eager_delete_tensor_gb=0.0;FLAGS_fraction_of_gpu_memory_to_use=0.98;FLAGS_conv_workspace_size_limit=4096
===========================infer_benchmark_params==========================
random_infer_input:[{float32,[3,224,224]}]
\ No newline at end of file
......@@ -52,7 +52,7 @@ null:null
null:null
===========================train_benchmark_params==========================
batch_size:64|128
fp_items:fp32
fp_items:fp32|fp16
epoch:1
--profiler_options:batch_range=[10,20];state=GPU;tracer_option=Default;profile_path=model.profile
flags:FLAGS_eager_delete_tensor_gb=0.0;FLAGS_fraction_of_gpu_memory_to_use=0.98;FLAGS_conv_workspace_size_limit=4096
......
......@@ -52,7 +52,7 @@ null:null
null:null
===========================train_benchmark_params==========================
batch_size:64|128
fp_items:fp32
fp_items:fp32|fp16
epoch:1
--profiler_options:batch_range=[10,20];state=GPU;tracer_option=Default;profile_path=model.profile
flags:FLAGS_eager_delete_tensor_gb=0.0;FLAGS_fraction_of_gpu_memory_to_use=0.98;FLAGS_conv_workspace_size_limit=4096
......
......@@ -52,7 +52,7 @@ null:null
null:null
===========================train_benchmark_params==========================
batch_size:256|640
fp_items:fp32
fp_items:fp32|fp16
epoch:1
--profiler_options:batch_range=[10,20];state=GPU;tracer_option=Default;profile_path=model.profile
flags:FLAGS_eager_delete_tensor_gb=0.0;FLAGS_fraction_of_gpu_memory_to_use=0.98;FLAGS_conv_workspace_size_limit=4096
......
......@@ -49,5 +49,11 @@ inference:python/predict_cls.py -c configs/inference_cls.yaml -o PreProcess.tran
-o Global.save_log_path:null
-o Global.benchmark:False
null:null
===========================train_benchmark_params==========================
batch_size:128
fp_items:fp32|fp16
epoch:1
--profiler_options:batch_range=[10,20];state=GPU;tracer_option=Default;profile_path=model.profile
flags:FLAGS_eager_delete_tensor_gb=0.0;FLAGS_fraction_of_gpu_memory_to_use=0.98;FLAGS_conv_workspace_size_limit=4096
===========================infer_benchmark_params==========================
random_infer_input:[{float32,[3,224,224]}]
......@@ -49,5 +49,11 @@ inference:python/predict_cls.py -c configs/inference_cls.yaml -o PreProcess.tran
-o Global.save_log_path:null
-o Global.benchmark:False
null:null
===========================train_benchmark_params==========================
batch_size:128
fp_items:fp32|fp16
epoch:1
--profiler_options:batch_range=[10,20];state=GPU;tracer_option=Default;profile_path=model.profile
flags:FLAGS_eager_delete_tensor_gb=0.0;FLAGS_fraction_of_gpu_memory_to_use=0.98;FLAGS_conv_workspace_size_limit=4096
===========================infer_benchmark_params==========================
random_infer_input:[{float32,[3,224,224]}]
......@@ -49,5 +49,11 @@ inference:python/predict_cls.py -c configs/inference_cls.yaml
-o Global.save_log_path:null
-o Global.benchmark:False
null:null
===========================train_benchmark_params==========================
batch_size:512
fp_items:fp32|fp16
epoch:1
--profiler_options:batch_range=[10,20];state=GPU;tracer_option=Default;profile_path=model.profile
flags:FLAGS_eager_delete_tensor_gb=0.0;FLAGS_fraction_of_gpu_memory_to_use=0.98;FLAGS_conv_workspace_size_limit=4096
===========================infer_benchmark_params==========================
random_infer_input:[{float32,[3,224,224]}]
\ No newline at end of file
......@@ -49,5 +49,11 @@ inference:python/predict_cls.py -c configs/inference_cls.yaml
-o Global.save_log_path:null
-o Global.benchmark:False
null:null
===========================train_benchmark_params==========================
batch_size:512
fp_items:fp32|fp16
epoch:1
--profiler_options:batch_range=[10,20];state=GPU;tracer_option=Default;profile_path=model.profile
flags:FLAGS_eager_delete_tensor_gb=0.0;FLAGS_fraction_of_gpu_memory_to_use=0.98;FLAGS_conv_workspace_size_limit=4096
===========================infer_benchmark_params==========================
random_infer_input:[{float32,[3,224,224]}]
\ No newline at end of file
......@@ -49,5 +49,11 @@ inference:python/predict_cls.py -c configs/inference_cls.yaml
-o Global.save_log_path:null
-o Global.benchmark:False
null:null
===========================train_benchmark_params==========================
batch_size:512
fp_items:fp32|fp16
epoch:1
--profiler_options:batch_range=[10,20];state=GPU;tracer_option=Default;profile_path=model.profile
flags:FLAGS_eager_delete_tensor_gb=0.0;FLAGS_fraction_of_gpu_memory_to_use=0.98;FLAGS_conv_workspace_size_limit=4096
===========================infer_benchmark_params==========================
random_infer_input:[{float32,[3,224,224]}]
\ No newline at end of file
......@@ -49,5 +49,11 @@ inference:python/predict_cls.py -c configs/inference_cls.yaml
-o Global.save_log_path:null
-o Global.benchmark:False
null:null
===========================train_benchmark_params==========================
batch_size:512
fp_items:fp32|fp16
epoch:1
--profiler_options:batch_range=[10,20];state=GPU;tracer_option=Default;profile_path=model.profile
flags:FLAGS_eager_delete_tensor_gb=0.0;FLAGS_fraction_of_gpu_memory_to_use=0.98;FLAGS_conv_workspace_size_limit=4096
===========================infer_benchmark_params==========================
random_infer_input:[{float32,[3,224,224]}]
\ No newline at end of file
......@@ -49,5 +49,11 @@ inference:python/predict_cls.py -c configs/inference_cls.yaml
-o Global.save_log_path:null
-o Global.benchmark:False
null:null
===========================train_benchmark_params==========================
batch_size:512
fp_items:fp32|fp16
epoch:1
--profiler_options:batch_range=[10,20];state=GPU;tracer_option=Default;profile_path=model.profile
flags:FLAGS_eager_delete_tensor_gb=0.0;FLAGS_fraction_of_gpu_memory_to_use=0.98;FLAGS_conv_workspace_size_limit=4096
===========================infer_benchmark_params==========================
random_infer_input:[{float32,[3,224,224]}]
\ No newline at end of file
......@@ -49,5 +49,11 @@ inference:python/predict_cls.py -c configs/inference_cls.yaml
-o Global.save_log_path:null
-o Global.benchmark:False
null:null
===========================train_benchmark_params==========================
batch_size:512
fp_items:fp32|fp16
epoch:1
--profiler_options:batch_range=[10,20];state=GPU;tracer_option=Default;profile_path=model.profile
flags:FLAGS_eager_delete_tensor_gb=0.0;FLAGS_fraction_of_gpu_memory_to_use=0.98;FLAGS_conv_workspace_size_limit=4096
===========================infer_benchmark_params==========================
random_infer_input:[{float32,[3,224,224]}]
\ No newline at end of file
......@@ -49,5 +49,11 @@ inference:python/predict_cls.py -c configs/inference_cls.yaml
-o Global.save_log_path:null
-o Global.benchmark:False
null:null
===========================train_benchmark_params==========================
batch_size:512
fp_items:fp32|fp16
epoch:1
--profiler_options:batch_range=[10,20];state=GPU;tracer_option=Default;profile_path=model.profile
flags:FLAGS_eager_delete_tensor_gb=0.0;FLAGS_fraction_of_gpu_memory_to_use=0.98;FLAGS_conv_workspace_size_limit=4096
===========================infer_benchmark_params==========================
random_infer_input:[{float32,[3,224,224]}]
\ No newline at end of file
......@@ -49,5 +49,11 @@ inference:python/predict_cls.py -c configs/inference_cls.yaml
-o Global.save_log_path:null
-o Global.benchmark:False
null:null
===========================train_benchmark_params==========================
batch_size:512
fp_items:fp32|fp16
epoch:1
--profiler_options:batch_range=[10,20];state=GPU;tracer_option=Default;profile_path=model.profile
flags:FLAGS_eager_delete_tensor_gb=0.0;FLAGS_fraction_of_gpu_memory_to_use=0.98;FLAGS_conv_workspace_size_limit=4096
===========================infer_benchmark_params==========================
random_infer_input:[{float32,[3,224,224]}]
\ No newline at end of file
......@@ -49,5 +49,11 @@ inference:python/predict_cls.py -c configs/inference_cls.yaml
-o Global.save_log_path:null
-o Global.benchmark:False
null:null
===========================train_benchmark_params==========================
batch_size:500
fp_items:fp32|fp16
epoch:1
--profiler_options:batch_range=[10,20];state=GPU;tracer_option=Default;profile_path=model.profile
flags:FLAGS_eager_delete_tensor_gb=0.0;FLAGS_fraction_of_gpu_memory_to_use=0.98;FLAGS_conv_workspace_size_limit=4096
===========================infer_benchmark_params==========================
random_infer_input:[{float32,[3,224,224]}]
random_infer_input:[{float32,[3,224,224]}]
\ No newline at end of file
......@@ -51,8 +51,8 @@ inference:python/predict_cls.py -c configs/inference_cls.yaml
null:null
null:null
===========================train_benchmark_params==========================
batch_size:32
fp_items:fp32
batch_size:32|64
fp_items:fp32|fp16
epoch:1
--profiler_options:batch_range=[10,20];state=GPU;tracer_option=Default;profile_path=model.profile
flags:FLAGS_eager_delete_tensor_gb=0.0;FLAGS_fraction_of_gpu_memory_to_use=0.98;FLAGS_conv_workspace_size_limit=4096
......
......@@ -51,8 +51,8 @@ inference:python/predict_cls.py -c configs/inference_cls.yaml
null:null
null:null
===========================train_benchmark_params==========================
batch_size:128
fp_items:fp32
batch_size:128|64
fp_items:fp32|fp16
epoch:1
--profiler_options:batch_range=[10,20];state=GPU;tracer_option=Default;profile_path=model.profile
flags:FLAGS_eager_delete_tensor_gb=0.0;FLAGS_fraction_of_gpu_memory_to_use=0.98;FLAGS_conv_workspace_size_limit=4096
......
......@@ -50,5 +50,11 @@ inference:python/predict_cls.py -c configs/inference_cls.yaml
-o Global.benchmark:False
null:null
null:null
===========================train_benchmark_params==========================
batch_size:64
fp_items:fp32|fp16
epoch:1
--profiler_options:batch_range=[10,20];state=GPU;tracer_option=Default;profile_path=model.profile
flags:FLAGS_eager_delete_tensor_gb=0.0;FLAGS_fraction_of_gpu_memory_to_use=0.98;FLAGS_conv_workspace_size_limit=4096
===========================infer_benchmark_params==========================
random_infer_input:[{float32,[3,224,224]}]
\ No newline at end of file
......@@ -52,9 +52,9 @@ null:null
null:null
===========================train_benchmark_params==========================
batch_size:256|1536
fp_items:fp32
fp_items:fp32|fp16
epoch:2
--profiler_options:batch_range=[10,20];state=GPU;tracer_option=Default;profile_path=model.profile
flags:FLAGS_eager_delete_tensor_gb=0.0;FLAGS_fraction_of_gpu_memory_to_use=0.98;FLAGS_conv_workspace_size_limit=4096
===========================infer_benchmark_params==========================
random_infer_input:[{float32,[3,224,224]}]
random_infer_input:[{float32,[3,224,224]}]
\ No newline at end of file
......@@ -51,8 +51,8 @@ inference:python/predict_cls.py -c configs/inference_cls.yaml
null:null
null:null
===========================train_benchmark_params==========================
batch_size:64|104
fp_items:fp32
batch_size:64|104|128
fp_items:fp32|fp16
epoch:1
--profiler_options:batch_range=[10,20];state=GPU;tracer_option=Default;profile_path=model.profile
flags:FLAGS_eager_delete_tensor_gb=0.0;FLAGS_fraction_of_gpu_memory_to_use=0.98;FLAGS_conv_workspace_size_limit=4096
......
......@@ -50,5 +50,11 @@ inference:python/predict_cls.py -c configs/inference_cls.yaml
-o Global.benchmark:False
null:null
null:null
===========================train_benchmark_params==========================
batch_size:128
fp_items:fp32|fp16
epoch:1
--profiler_options:batch_range=[10,20];state=GPU;tracer_option=Default;profile_path=model.profile
flags:FLAGS_eager_delete_tensor_gb=0.0;FLAGS_fraction_of_gpu_memory_to_use=0.98;FLAGS_conv_workspace_size_limit=4096
===========================infer_benchmark_params==========================
random_infer_input:[{float32,[3,224,224]}]
\ No newline at end of file
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册