未验证 提交 1f58e8ba 编写于 作者: S shangliang Xu 提交者: GitHub

[TIPC] remove trt/mkldnn mode, and fix train benchmark params (#6055)

上级 51b7d4cf
...@@ -39,15 +39,15 @@ infer_mode:norm ...@@ -39,15 +39,15 @@ infer_mode:norm
infer_quant:False infer_quant:False
inference:./deploy/python/infer.py inference:./deploy/python/infer.py
--device:gpu|cpu --device:gpu|cpu
--enable_mkldnn:True|False --enable_mkldnn:False
--cpu_threads:1|6 --cpu_threads:4
--batch_size:1 --batch_size:1
--use_tensorrt:null --use_tensorrt:null
--run_mode:paddle|trt_fp32|trt_fp16 --run_mode:paddle
--model_dir: --model_dir:
--image_dir:./dataset/coco/test2017/ --image_dir:./dataset/coco/test2017/
--save_log_path:null --save_log_path:null
--run_benchmark:True --run_benchmark:False
--trt_max_shape:1600 --trt_max_shape:1600
===========================infer_benchmark_params=========================== ===========================infer_benchmark_params===========================
numpy_infer_input:3x800x1344.npy numpy_infer_input:3x800x1344.npy
\ No newline at end of file
...@@ -39,11 +39,11 @@ infer_mode:norm ...@@ -39,11 +39,11 @@ infer_mode:norm
infer_quant:False infer_quant:False
inference:./deploy/python/infer.py inference:./deploy/python/infer.py
--device:gpu|cpu --device:gpu|cpu
--enable_mkldnn:True|False --enable_mkldnn:False
--cpu_threads:1|6 --cpu_threads:4
--batch_size:1|2 --batch_size:1|2
--use_tensorrt:null --use_tensorrt:null
--run_mode:paddle|trt_fp32|trt_fp16 --run_mode:paddle
--model_dir: --model_dir:
--image_dir:./dataset/coco/test2017/ --image_dir:./dataset/coco/test2017/
--save_log_path:null --save_log_path:null
......
...@@ -39,15 +39,15 @@ infer_mode:norm ...@@ -39,15 +39,15 @@ infer_mode:norm
infer_quant:False infer_quant:False
inference:./deploy/python/infer.py inference:./deploy/python/infer.py
--device:gpu|cpu --device:gpu|cpu
--enable_mkldnn:True|False --enable_mkldnn:False
--cpu_threads:1|6 --cpu_threads:4
--batch_size:1 --batch_size:1
--use_tensorrt:null --use_tensorrt:null
--run_mode:paddle|trt_fp32|trt_fp16 --run_mode:paddle
--model_dir: --model_dir:
--image_dir:./dataset/coco/test2017/ --image_dir:./dataset/coco/test2017/
--save_log_path:null --save_log_path:null
--run_benchmark:True --run_benchmark:False
--trt_max_shape:1600 --trt_max_shape:1600
===========================infer_benchmark_params=========================== ===========================infer_benchmark_params===========================
numpy_infer_input:3x800x1344.npy numpy_infer_input:3x800x1344.npy
\ No newline at end of file
...@@ -39,11 +39,11 @@ infer_mode:norm ...@@ -39,11 +39,11 @@ infer_mode:norm
infer_quant:False infer_quant:False
inference:./deploy/python/infer.py inference:./deploy/python/infer.py
--device:gpu|cpu --device:gpu|cpu
--enable_mkldnn:True|False --enable_mkldnn:False
--cpu_threads:1|6 --cpu_threads:4
--batch_size:1|2 --batch_size:1|2
--use_tensorrt:null --use_tensorrt:null
--run_mode:paddle|trt_fp32|trt_fp16 --run_mode:paddle
--model_dir: --model_dir:
--image_dir:./dataset/coco/test2017/ --image_dir:./dataset/coco/test2017/
--save_log_path:null --save_log_path:null
......
...@@ -39,15 +39,15 @@ infer_mode:norm ...@@ -39,15 +39,15 @@ infer_mode:norm
infer_quant:False infer_quant:False
inference:./deploy/python/infer.py inference:./deploy/python/infer.py
--device:gpu|cpu --device:gpu|cpu
--enable_mkldnn:True|False --enable_mkldnn:False
--cpu_threads:1|6 --cpu_threads:4
--batch_size:1 --batch_size:1
--use_tensorrt:null --use_tensorrt:null
--run_mode:paddle|trt_fp32|trt_fp16 --run_mode:paddle
--model_dir: --model_dir:
--image_dir:./dataset/coco/test2017/ --image_dir:./dataset/coco/test2017/
--save_log_path:null --save_log_path:null
--run_benchmark:True --run_benchmark:False
--trt_max_shape:1600 --trt_max_shape:1600
===========================infer_benchmark_params=========================== ===========================infer_benchmark_params===========================
numpy_infer_input:3x800x1344.npy numpy_infer_input:3x800x1344.npy
\ No newline at end of file
...@@ -39,11 +39,11 @@ infer_mode:norm ...@@ -39,11 +39,11 @@ infer_mode:norm
infer_quant:False infer_quant:False
inference:./deploy/python/infer.py inference:./deploy/python/infer.py
--device:gpu|cpu --device:gpu|cpu
--enable_mkldnn:True|False --enable_mkldnn:False
--cpu_threads:1|6 --cpu_threads:4
--batch_size:1|2 --batch_size:1|2
--use_tensorrt:null --use_tensorrt:null
--run_mode:paddle|trt_fp32|trt_fp16 --run_mode:paddle
--model_dir: --model_dir:
--image_dir:./dataset/coco/test2017/ --image_dir:./dataset/coco/test2017/
--save_log_path:null --save_log_path:null
......
...@@ -39,15 +39,15 @@ infer_mode:norm ...@@ -39,15 +39,15 @@ infer_mode:norm
infer_quant:False infer_quant:False
inference:./deploy/python/infer.py inference:./deploy/python/infer.py
--device:gpu|cpu --device:gpu|cpu
--enable_mkldnn:True|False --enable_mkldnn:False
--cpu_threads:1|6 --cpu_threads:4
--batch_size:1 --batch_size:1
--use_tensorrt:null --use_tensorrt:null
--run_mode:paddle|trt_fp32|trt_fp16 --run_mode:paddle
--model_dir: --model_dir:
--image_dir:./dataset/coco/test2017/ --image_dir:./dataset/coco/test2017/
--save_log_path:null --save_log_path:null
--run_benchmark:True --run_benchmark:False
--trt_max_shape:1600 --trt_max_shape:1600
===========================infer_benchmark_params=========================== ===========================infer_benchmark_params===========================
numpy_infer_input:3x800x1344.npy numpy_infer_input:3x800x1344.npy
\ No newline at end of file
...@@ -39,11 +39,11 @@ infer_mode:norm ...@@ -39,11 +39,11 @@ infer_mode:norm
infer_quant:False infer_quant:False
inference:./deploy/python/infer.py inference:./deploy/python/infer.py
--device:gpu|cpu --device:gpu|cpu
--enable_mkldnn:True|False --enable_mkldnn:False
--cpu_threads:1|6 --cpu_threads:4
--batch_size:1|2 --batch_size:1|2
--use_tensorrt:null --use_tensorrt:null
--run_mode:paddle|trt_fp32|trt_fp16 --run_mode:paddle
--model_dir: --model_dir:
--image_dir:./dataset/coco/test2017/ --image_dir:./dataset/coco/test2017/
--save_log_path:null --save_log_path:null
......
...@@ -39,11 +39,11 @@ infer_mode:norm ...@@ -39,11 +39,11 @@ infer_mode:norm
infer_quant:False infer_quant:False
inference:./deploy/python/infer.py inference:./deploy/python/infer.py
--device:gpu|cpu --device:gpu|cpu
--enable_mkldnn:True|False --enable_mkldnn:False
--cpu_threads:1|6 --cpu_threads:4
--batch_size:1|2 --batch_size:1|2
--use_tensorrt:null --use_tensorrt:null
--run_mode:paddle|trt_fp32|trt_fp16 --run_mode:paddle
--model_dir: --model_dir:
--image_dir:./dataset/coco/test2017/ --image_dir:./dataset/coco/test2017/
--save_log_path:null --save_log_path:null
......
...@@ -39,11 +39,11 @@ infer_mode:norm ...@@ -39,11 +39,11 @@ infer_mode:norm
infer_quant:False infer_quant:False
inference:./deploy/python/infer.py inference:./deploy/python/infer.py
--device:gpu|cpu --device:gpu|cpu
--enable_mkldnn:True|False --enable_mkldnn:False
--cpu_threads:1|6 --cpu_threads:4
--batch_size:1|2 --batch_size:1|2
--use_tensorrt:null --use_tensorrt:null
--run_mode:paddle|trt_fp32|trt_fp16 --run_mode:paddle
--model_dir: --model_dir:
--image_dir:./dataset/coco/test2017/ --image_dir:./dataset/coco/test2017/
--save_log_path:null --save_log_path:null
......
...@@ -39,11 +39,11 @@ infer_mode:norm ...@@ -39,11 +39,11 @@ infer_mode:norm
infer_quant:False infer_quant:False
inference:./deploy/python/infer.py inference:./deploy/python/infer.py
--device:gpu|cpu --device:gpu|cpu
--enable_mkldnn:True|False --enable_mkldnn:False
--cpu_threads:1|6 --cpu_threads:4
--batch_size:1|2 --batch_size:1|2
--use_tensorrt:null --use_tensorrt:null
--run_mode:paddle|trt_fp32|trt_fp16 --run_mode:paddle
--model_dir: --model_dir:
--image_dir:./dataset/coco/test2017/ --image_dir:./dataset/coco/test2017/
--save_log_path:null --save_log_path:null
......
...@@ -39,11 +39,11 @@ infer_mode:norm ...@@ -39,11 +39,11 @@ infer_mode:norm
infer_quant:False infer_quant:False
inference:./deploy/python/infer.py inference:./deploy/python/infer.py
--device:gpu|cpu --device:gpu|cpu
--enable_mkldnn:True|False --enable_mkldnn:False
--cpu_threads:1|6 --cpu_threads:4
--batch_size:1|2 --batch_size:1|2
--use_tensorrt:null --use_tensorrt:null
--run_mode:paddle|trt_fp32|trt_fp16 --run_mode:paddle
--model_dir: --model_dir:
--image_dir:./dataset/coco/test2017/ --image_dir:./dataset/coco/test2017/
--save_log_path:null --save_log_path:null
......
...@@ -39,15 +39,15 @@ infer_mode:norm ...@@ -39,15 +39,15 @@ infer_mode:norm
infer_quant:False infer_quant:False
inference:./deploy/python/infer.py inference:./deploy/python/infer.py
--device:gpu|cpu --device:gpu|cpu
--enable_mkldnn:True|False --enable_mkldnn:False
--cpu_threads:1|6 --cpu_threads:4
--batch_size:1 --batch_size:1
--use_tensorrt:null --use_tensorrt:null
--run_mode:paddle|trt_fp32|trt_fp16 --run_mode:paddle
--model_dir: --model_dir:
--image_dir:./dataset/spine_coco/test/ --image_dir:./dataset/spine_coco/test/
--save_log_path:null --save_log_path:null
--run_benchmark:True --run_benchmark:False
null:null null:null
===========================infer_benchmark_params=========================== ===========================infer_benchmark_params===========================
numpy_infer_input:3x1024x1024.npy numpy_infer_input:3x1024x1024.npy
\ No newline at end of file
...@@ -39,11 +39,11 @@ infer_mode:norm ...@@ -39,11 +39,11 @@ infer_mode:norm
infer_quant:False infer_quant:False
inference:./deploy/python/infer.py inference:./deploy/python/infer.py
--device:gpu|cpu --device:gpu|cpu
--enable_mkldnn:True|False --enable_mkldnn:False
--cpu_threads:1|6 --cpu_threads:4
--batch_size:1|2 --batch_size:1|2
--use_tensorrt:null --use_tensorrt:null
--run_mode:paddle|trt_fp32|trt_fp16 --run_mode:paddle
--model_dir: --model_dir:
--image_dir:./dataset/coco/test2017/ --image_dir:./dataset/coco/test2017/
--save_log_path:null --save_log_path:null
......
...@@ -39,15 +39,15 @@ infer_mode:norm ...@@ -39,15 +39,15 @@ infer_mode:norm
infer_quant:False infer_quant:False
inference:./deploy/python/infer.py inference:./deploy/python/infer.py
--device:gpu|cpu --device:gpu|cpu
--enable_mkldnn:True|False --enable_mkldnn:False
--cpu_threads:1|6 --cpu_threads:4
--batch_size:1 --batch_size:1
--use_tensorrt:null --use_tensorrt:null
--run_mode:paddle|trt_fp32|trt_fp16 --run_mode:paddle
--model_dir: --model_dir:
--image_dir:./dataset/spine_coco/test/ --image_dir:./dataset/spine_coco/test/
--save_log_path:null --save_log_path:null
--run_benchmark:True --run_benchmark:False
null:null null:null
===========================infer_benchmark_params=========================== ===========================infer_benchmark_params===========================
numpy_infer_input:3x1024x1024.npy numpy_infer_input:3x1024x1024.npy
\ No newline at end of file
...@@ -39,11 +39,11 @@ infer_mode:norm ...@@ -39,11 +39,11 @@ infer_mode:norm
infer_quant:False infer_quant:False
inference:./deploy/python/infer.py inference:./deploy/python/infer.py
--device:gpu|cpu --device:gpu|cpu
--enable_mkldnn:True|False --enable_mkldnn:False
--cpu_threads:1|6 --cpu_threads:4
--batch_size:1 --batch_size:1
--use_tensorrt:null --use_tensorrt:null
--run_mode:paddle|trt_fp32|trt_fp16 --run_mode:paddle
--model_dir: --model_dir:
--image_dir:./dataset/spine_coco/test/ --image_dir:./dataset/spine_coco/test/
--save_log_path:null --save_log_path:null
......
...@@ -39,15 +39,15 @@ infer_mode:norm ...@@ -39,15 +39,15 @@ infer_mode:norm
infer_quant:False infer_quant:False
inference:./deploy/python/infer.py inference:./deploy/python/infer.py
--device:gpu|cpu --device:gpu|cpu
--enable_mkldnn:True|False --enable_mkldnn:False
--cpu_threads:1|6 --cpu_threads:4
--batch_size:1 --batch_size:1
--use_tensorrt:null --use_tensorrt:null
--run_mode:paddle|trt_fp32|trt_fp16 --run_mode:paddle
--model_dir: --model_dir:
--image_dir:./dataset/spine_coco/test/ --image_dir:./dataset/spine_coco/test/
--save_log_path:null --save_log_path:null
--run_benchmark:True --run_benchmark:False
null:null null:null
===========================infer_benchmark_params=========================== ===========================infer_benchmark_params===========================
numpy_infer_input:3x1024x1024.npy numpy_infer_input:3x1024x1024.npy
\ No newline at end of file
...@@ -39,11 +39,11 @@ infer_mode:norm ...@@ -39,11 +39,11 @@ infer_mode:norm
infer_quant:False infer_quant:False
inference:./deploy/python/infer.py inference:./deploy/python/infer.py
--device:gpu|cpu --device:gpu|cpu
--enable_mkldnn:True|False --enable_mkldnn:False
--cpu_threads:1|6 --cpu_threads:4
--batch_size:1 --batch_size:1
--use_tensorrt:null --use_tensorrt:null
--run_mode:paddle|trt_fp32|trt_fp16 --run_mode:paddle
--model_dir: --model_dir:
--image_dir:./dataset/spine_coco/test/ --image_dir:./dataset/spine_coco/test/
--save_log_path:null --save_log_path:null
......
...@@ -39,15 +39,15 @@ infer_mode:norm ...@@ -39,15 +39,15 @@ infer_mode:norm
infer_quant:False infer_quant:False
inference:./deploy/python/infer.py inference:./deploy/python/infer.py
--device:gpu|cpu --device:gpu|cpu
--enable_mkldnn:True|False --enable_mkldnn:False
--cpu_threads:1|6 --cpu_threads:4
--batch_size:1|2 --batch_size:1|2
--use_tensorrt:null --use_tensorrt:null
--run_mode:paddle|trt_fp32|trt_fp16 --run_mode:paddle
--model_dir: --model_dir:
--image_dir:./dataset/wider_face/WIDER_val/images/0--Parade/ --image_dir:./dataset/wider_face/WIDER_val/images/0--Parade/
--save_log_path:null --save_log_path:null
--run_benchmark:True --run_benchmark:False
null:null null:null
===========================infer_benchmark_params=========================== ===========================infer_benchmark_params===========================
numpy_infer_input:3x800x1344.npy numpy_infer_input:3x800x1344.npy
\ No newline at end of file
...@@ -39,11 +39,11 @@ infer_mode:norm ...@@ -39,11 +39,11 @@ infer_mode:norm
infer_quant:False infer_quant:False
inference:./deploy/python/infer.py inference:./deploy/python/infer.py
--device:gpu|cpu --device:gpu|cpu
--enable_mkldnn:True|False --enable_mkldnn:False
--cpu_threads:1|6 --cpu_threads:4
--batch_size:1|2 --batch_size:1|2
--use_tensorrt:null --use_tensorrt:null
--run_mode:paddle|trt_fp32|trt_fp16 --run_mode:paddle
--model_dir: --model_dir:
--image_dir:./dataset/wider_face/WIDER_val/images/0--Parade/ --image_dir:./dataset/wider_face/WIDER_val/images/0--Parade/
--save_log_path:null --save_log_path:null
......
...@@ -39,15 +39,15 @@ infer_mode:norm ...@@ -39,15 +39,15 @@ infer_mode:norm
infer_quant:False infer_quant:False
inference:./deploy/python/infer.py inference:./deploy/python/infer.py
--device:gpu|cpu --device:gpu|cpu
--enable_mkldnn:True|False --enable_mkldnn:False
--cpu_threads:1|6 --cpu_threads:4
--batch_size:1|2 --batch_size:1|2
--use_tensorrt:null --use_tensorrt:null
--run_mode:paddle|trt_fp32|trt_fp16 --run_mode:paddle
--model_dir: --model_dir:
--image_dir:./dataset/wider_face/WIDER_val/images/0--Parade/ --image_dir:./dataset/wider_face/WIDER_val/images/0--Parade/
--save_log_path:null --save_log_path:null
--run_benchmark:True --run_benchmark:False
null:null null:null
===========================infer_benchmark_params=========================== ===========================infer_benchmark_params===========================
numpy_infer_input:3x800x1344.npy numpy_infer_input:3x800x1344.npy
\ No newline at end of file
...@@ -39,11 +39,11 @@ infer_mode:norm ...@@ -39,11 +39,11 @@ infer_mode:norm
infer_quant:False infer_quant:False
inference:./deploy/python/infer.py inference:./deploy/python/infer.py
--device:gpu|cpu --device:gpu|cpu
--enable_mkldnn:True|False --enable_mkldnn:False
--cpu_threads:1|6 --cpu_threads:4
--batch_size:1|2 --batch_size:1|2
--use_tensorrt:null --use_tensorrt:null
--run_mode:paddle|trt_fp32|trt_fp16 --run_mode:paddle
--model_dir: --model_dir:
--image_dir:./dataset/wider_face/WIDER_val/images/0--Parade/ --image_dir:./dataset/wider_face/WIDER_val/images/0--Parade/
--save_log_path:null --save_log_path:null
......
...@@ -39,15 +39,15 @@ infer_mode:norm ...@@ -39,15 +39,15 @@ infer_mode:norm
infer_quant:False infer_quant:False
inference:./deploy/python/infer.py inference:./deploy/python/infer.py
--device:gpu|cpu --device:gpu|cpu
--enable_mkldnn:True|False --enable_mkldnn:False
--cpu_threads:1|6 --cpu_threads:4
--batch_size:1|2 --batch_size:1|2
--use_tensorrt:null --use_tensorrt:null
--run_mode:paddle|trt_fp32|trt_fp16 --run_mode:paddle
--model_dir: --model_dir:
--image_dir:./dataset/coco/test2017/ --image_dir:./dataset/coco/test2017/
--save_log_path:null --save_log_path:null
--run_benchmark:True --run_benchmark:False
--trt_max_shape:1600 --trt_max_shape:1600
===========================infer_benchmark_params=========================== ===========================infer_benchmark_params===========================
numpy_infer_input:3x800x1344.npy numpy_infer_input:3x800x1344.npy
\ No newline at end of file
...@@ -39,11 +39,11 @@ infer_mode:norm ...@@ -39,11 +39,11 @@ infer_mode:norm
infer_quant:False infer_quant:False
inference:./deploy/python/infer.py inference:./deploy/python/infer.py
--device:gpu|cpu --device:gpu|cpu
--enable_mkldnn:True|False --enable_mkldnn:False
--cpu_threads:1|6 --cpu_threads:4
--batch_size:1|2 --batch_size:1|2
--use_tensorrt:null --use_tensorrt:null
--run_mode:paddle|trt_fp32|trt_fp16 --run_mode:paddle
--model_dir: --model_dir:
--image_dir:./dataset/coco/test2017/ --image_dir:./dataset/coco/test2017/
--save_log_path:null --save_log_path:null
......
...@@ -39,15 +39,15 @@ infer_mode:norm ...@@ -39,15 +39,15 @@ infer_mode:norm
infer_quant:False infer_quant:False
inference:./deploy/python/infer.py inference:./deploy/python/infer.py
--device:gpu|cpu --device:gpu|cpu
--enable_mkldnn:True|False --enable_mkldnn:False
--cpu_threads:1|6 --cpu_threads:4
--batch_size:1|2 --batch_size:1|2
--use_tensorrt:null --use_tensorrt:null
--run_mode:paddle|trt_fp32|trt_fp16 --run_mode:paddle
--model_dir: --model_dir:
--image_dir:./dataset/coco/test2017/ --image_dir:./dataset/coco/test2017/
--save_log_path:null --save_log_path:null
--run_benchmark:True --run_benchmark:False
--trt_max_shape:1600 --trt_max_shape:1600
===========================infer_benchmark_params=========================== ===========================infer_benchmark_params===========================
numpy_infer_input:3x800x1344.npy numpy_infer_input:3x800x1344.npy
\ No newline at end of file
...@@ -39,11 +39,11 @@ infer_mode:norm ...@@ -39,11 +39,11 @@ infer_mode:norm
infer_quant:False infer_quant:False
inference:./deploy/python/infer.py inference:./deploy/python/infer.py
--device:gpu|cpu --device:gpu|cpu
--enable_mkldnn:True|False --enable_mkldnn:False
--cpu_threads:1|6 --cpu_threads:4
--batch_size:1|2 --batch_size:1|2
--use_tensorrt:null --use_tensorrt:null
--run_mode:paddle|trt_fp32|trt_fp16 --run_mode:paddle
--model_dir: --model_dir:
--image_dir:./dataset/coco/test2017/ --image_dir:./dataset/coco/test2017/
--save_log_path:null --save_log_path:null
......
...@@ -39,15 +39,15 @@ infer_mode:norm ...@@ -39,15 +39,15 @@ infer_mode:norm
infer_quant:False infer_quant:False
inference:./deploy/python/infer.py inference:./deploy/python/infer.py
--device:gpu|cpu --device:gpu|cpu
--enable_mkldnn:True|False --enable_mkldnn:False
--cpu_threads:1|6 --cpu_threads:4
--batch_size:1|2 --batch_size:1|2
--use_tensorrt:null --use_tensorrt:null
--run_mode:paddle|trt_fp32|trt_fp16 --run_mode:paddle
--model_dir: --model_dir:
--image_dir:./dataset/coco/test2017/ --image_dir:./dataset/coco/test2017/
--save_log_path:null --save_log_path:null
--run_benchmark:True --run_benchmark:False
--trt_max_shape:1600 --trt_max_shape:1600
===========================infer_benchmark_params=========================== ===========================infer_benchmark_params===========================
numpy_infer_input:3x800x1344.npy numpy_infer_input:3x800x1344.npy
\ No newline at end of file
...@@ -39,11 +39,11 @@ infer_mode:norm ...@@ -39,11 +39,11 @@ infer_mode:norm
infer_quant:False infer_quant:False
inference:./deploy/python/infer.py inference:./deploy/python/infer.py
--device:gpu|cpu --device:gpu|cpu
--enable_mkldnn:True|False --enable_mkldnn:False
--cpu_threads:1|6 --cpu_threads:4
--batch_size:1|2 --batch_size:1|2
--use_tensorrt:null --use_tensorrt:null
--run_mode:paddle|trt_fp32|trt_fp16 --run_mode:paddle
--model_dir: --model_dir:
--image_dir:./dataset/coco/test2017/ --image_dir:./dataset/coco/test2017/
--save_log_path:null --save_log_path:null
......
...@@ -39,15 +39,15 @@ infer_mode:norm ...@@ -39,15 +39,15 @@ infer_mode:norm
infer_quant:False infer_quant:False
inference:./deploy/python/infer.py inference:./deploy/python/infer.py
--device:gpu|cpu --device:gpu|cpu
--enable_mkldnn:True|False --enable_mkldnn:False
--cpu_threads:1|6 --cpu_threads:4
--batch_size:1|2 --batch_size:1|2
--use_tensorrt:null --use_tensorrt:null
--run_mode:paddle|trt_fp32|trt_fp16 --run_mode:paddle
--model_dir: --model_dir:
--image_dir:./dataset/coco/test2017/ --image_dir:./dataset/coco/test2017/
--save_log_path:null --save_log_path:null
--run_benchmark:True --run_benchmark:False
--trt_max_shape:1600 --trt_max_shape:1600
===========================infer_benchmark_params=========================== ===========================infer_benchmark_params===========================
numpy_infer_input:3x800x1344.npy numpy_infer_input:3x800x1344.npy
\ No newline at end of file
...@@ -39,11 +39,11 @@ infer_mode:norm ...@@ -39,11 +39,11 @@ infer_mode:norm
infer_quant:False infer_quant:False
inference:./deploy/python/infer.py inference:./deploy/python/infer.py
--device:gpu|cpu --device:gpu|cpu
--enable_mkldnn:True|False --enable_mkldnn:False
--cpu_threads:1|6 --cpu_threads:4
--batch_size:1|2 --batch_size:1|2
--use_tensorrt:null --use_tensorrt:null
--run_mode:paddle|trt_fp32|trt_fp16 --run_mode:paddle
--model_dir: --model_dir:
--image_dir:./dataset/coco/test2017/ --image_dir:./dataset/coco/test2017/
--save_log_path:null --save_log_path:null
......
...@@ -39,15 +39,15 @@ infer_mode:norm ...@@ -39,15 +39,15 @@ infer_mode:norm
infer_quant:False infer_quant:False
inference:./deploy/python/infer.py inference:./deploy/python/infer.py
--device:gpu|cpu --device:gpu|cpu
--enable_mkldnn:True|False --enable_mkldnn:False
--cpu_threads:1|6 --cpu_threads:4
--batch_size:1|2 --batch_size:1|2
--use_tensorrt:null --use_tensorrt:null
--run_mode:paddle|trt_fp32|trt_fp16 --run_mode:paddle
--model_dir: --model_dir:
--image_dir:./dataset/coco/test2017/ --image_dir:./dataset/coco/test2017/
--save_log_path:null --save_log_path:null
--run_benchmark:True --run_benchmark:False
--trt_max_shape:1600 --trt_max_shape:1600
===========================infer_benchmark_params=========================== ===========================infer_benchmark_params===========================
numpy_infer_input:3x800x1344.npy numpy_infer_input:3x800x1344.npy
\ No newline at end of file
...@@ -39,11 +39,11 @@ infer_mode:norm ...@@ -39,11 +39,11 @@ infer_mode:norm
infer_quant:False infer_quant:False
inference:./deploy/python/infer.py inference:./deploy/python/infer.py
--device:gpu|cpu --device:gpu|cpu
--enable_mkldnn:True|False --enable_mkldnn:False
--cpu_threads:1|6 --cpu_threads:4
--batch_size:1|2 --batch_size:1|2
--use_tensorrt:null --use_tensorrt:null
--run_mode:paddle|trt_fp32|trt_fp16 --run_mode:paddle
--model_dir: --model_dir:
--image_dir:./dataset/coco/test2017/ --image_dir:./dataset/coco/test2017/
--save_log_path:null --save_log_path:null
......
...@@ -39,15 +39,15 @@ infer_mode:norm ...@@ -39,15 +39,15 @@ infer_mode:norm
infer_quant:False infer_quant:False
inference:./deploy/python/infer.py inference:./deploy/python/infer.py
--device:gpu|cpu --device:gpu|cpu
--enable_mkldnn:True|False --enable_mkldnn:False
--cpu_threads:1|6 --cpu_threads:4
--batch_size:1|2 --batch_size:1|2
--use_tensorrt:null --use_tensorrt:null
--run_mode:paddle|trt_fp32|trt_fp16 --run_mode:paddle
--model_dir: --model_dir:
--image_dir:./dataset/coco/test2017/ --image_dir:./dataset/coco/test2017/
--save_log_path:null --save_log_path:null
--run_benchmark:True --run_benchmark:False
--trt_max_shape:1600 --trt_max_shape:1600
===========================infer_benchmark_params=========================== ===========================infer_benchmark_params===========================
numpy_infer_input:3x800x1344.npy numpy_infer_input:3x800x1344.npy
\ No newline at end of file
...@@ -39,11 +39,11 @@ infer_mode:norm ...@@ -39,11 +39,11 @@ infer_mode:norm
infer_quant:False infer_quant:False
inference:./deploy/python/infer.py inference:./deploy/python/infer.py
--device:gpu|cpu --device:gpu|cpu
--enable_mkldnn:True|False --enable_mkldnn:False
--cpu_threads:1|6 --cpu_threads:4
--batch_size:1|2 --batch_size:1|2
--use_tensorrt:null --use_tensorrt:null
--run_mode:paddle|trt_fp32|trt_fp16 --run_mode:paddle
--model_dir: --model_dir:
--image_dir:./dataset/coco/test2017/ --image_dir:./dataset/coco/test2017/
--save_log_path:null --save_log_path:null
......
...@@ -39,15 +39,15 @@ infer_mode:norm ...@@ -39,15 +39,15 @@ infer_mode:norm
infer_quant:False infer_quant:False
inference:./deploy/python/infer.py inference:./deploy/python/infer.py
--device:gpu|cpu --device:gpu|cpu
--enable_mkldnn:True|False --enable_mkldnn:False
--cpu_threads:1|6 --cpu_threads:4
--batch_size:1|2 --batch_size:1|2
--use_tensorrt:null --use_tensorrt:null
--run_mode:paddle|trt_fp32|trt_fp16 --run_mode:paddle
--model_dir: --model_dir:
--image_dir:./dataset/coco/test2017/ --image_dir:./dataset/coco/test2017/
--save_log_path:null --save_log_path:null
--run_benchmark:True --run_benchmark:False
--trt_max_shape:1600 --trt_max_shape:1600
===========================train_benchmark_params========================== ===========================train_benchmark_params==========================
batch_size:2|8 batch_size:2|8
......
...@@ -39,11 +39,11 @@ infer_mode:norm ...@@ -39,11 +39,11 @@ infer_mode:norm
infer_quant:False infer_quant:False
inference:./deploy/python/infer.py inference:./deploy/python/infer.py
--device:gpu|cpu --device:gpu|cpu
--enable_mkldnn:True|False --enable_mkldnn:False
--cpu_threads:1|6 --cpu_threads:4
--batch_size:1|2 --batch_size:1|2
--use_tensorrt:null --use_tensorrt:null
--run_mode:paddle|trt_fp32|trt_fp16 --run_mode:paddle
--model_dir: --model_dir:
--image_dir:./dataset/coco/test2017/ --image_dir:./dataset/coco/test2017/
--save_log_path:null --save_log_path:null
......
...@@ -39,15 +39,15 @@ infer_mode:norm ...@@ -39,15 +39,15 @@ infer_mode:norm
infer_quant:False infer_quant:False
inference:./deploy/python/infer.py inference:./deploy/python/infer.py
--device:gpu|cpu --device:gpu|cpu
--enable_mkldnn:True|False --enable_mkldnn:False
--cpu_threads:1|6 --cpu_threads:4
--batch_size:1|2 --batch_size:1|2
--use_tensorrt:null --use_tensorrt:null
--run_mode:paddle|trt_fp32|trt_fp16 --run_mode:paddle
--model_dir: --model_dir:
--image_dir:./dataset/coco/test2017/ --image_dir:./dataset/coco/test2017/
--save_log_path:null --save_log_path:null
--run_benchmark:True --run_benchmark:False
--trt_max_shape:1600 --trt_max_shape:1600
===========================infer_benchmark_params=========================== ===========================infer_benchmark_params===========================
numpy_infer_input:3x800x1344.npy numpy_infer_input:3x800x1344.npy
\ No newline at end of file
...@@ -39,11 +39,11 @@ infer_mode:norm ...@@ -39,11 +39,11 @@ infer_mode:norm
infer_quant:False infer_quant:False
inference:./deploy/python/infer.py inference:./deploy/python/infer.py
--device:gpu|cpu --device:gpu|cpu
--enable_mkldnn:True|False --enable_mkldnn:False
--cpu_threads:1|6 --cpu_threads:4
--batch_size:1|2 --batch_size:1|2
--use_tensorrt:null --use_tensorrt:null
--run_mode:paddle|trt_fp32|trt_fp16 --run_mode:paddle
--model_dir: --model_dir:
--image_dir:./dataset/coco/test2017/ --image_dir:./dataset/coco/test2017/
--save_log_path:null --save_log_path:null
......
...@@ -39,15 +39,15 @@ infer_mode:norm ...@@ -39,15 +39,15 @@ infer_mode:norm
infer_quant:False infer_quant:False
inference:./deploy/python/infer.py inference:./deploy/python/infer.py
--device:gpu|cpu --device:gpu|cpu
--enable_mkldnn:True|False --enable_mkldnn:False
--cpu_threads:1|6 --cpu_threads:4
--batch_size:1|2 --batch_size:1|2
--use_tensorrt:null --use_tensorrt:null
--run_mode:paddle|trt_fp32|trt_fp16 --run_mode:paddle
--model_dir: --model_dir:
--image_dir:./dataset/coco/test2017/ --image_dir:./dataset/coco/test2017/
--save_log_path:null --save_log_path:null
--run_benchmark:True --run_benchmark:False
--trt_max_shape:1600 --trt_max_shape:1600
===========================infer_benchmark_params=========================== ===========================infer_benchmark_params===========================
numpy_infer_input:3x800x1344.npy numpy_infer_input:3x800x1344.npy
\ No newline at end of file
...@@ -39,11 +39,11 @@ infer_mode:norm ...@@ -39,11 +39,11 @@ infer_mode:norm
infer_quant:False infer_quant:False
inference:./deploy/python/infer.py inference:./deploy/python/infer.py
--device:gpu|cpu --device:gpu|cpu
--enable_mkldnn:True|False --enable_mkldnn:False
--cpu_threads:1|6 --cpu_threads:4
--batch_size:1|2 --batch_size:1|2
--use_tensorrt:null --use_tensorrt:null
--run_mode:paddle|trt_fp32|trt_fp16 --run_mode:paddle
--model_dir: --model_dir:
--image_dir:./dataset/coco/test2017/ --image_dir:./dataset/coco/test2017/
--save_log_path:null --save_log_path:null
......
...@@ -39,15 +39,15 @@ infer_mode:norm ...@@ -39,15 +39,15 @@ infer_mode:norm
infer_quant:False infer_quant:False
inference:./deploy/python/infer.py inference:./deploy/python/infer.py
--device:gpu|cpu --device:gpu|cpu
--enable_mkldnn:True|False --enable_mkldnn:False
--cpu_threads:1|6 --cpu_threads:4
--batch_size:1 --batch_size:1
--use_tensorrt:null --use_tensorrt:null
--run_mode:paddle|trt_fp32|trt_fp16 --run_mode:paddle
--model_dir: --model_dir:
--image_dir:./dataset/coco/test2017/ --image_dir:./dataset/coco/test2017/
--save_log_path:null --save_log_path:null
--run_benchmark:True --run_benchmark:False
--trt_max_shape:1600 --trt_max_shape:1600
===========================infer_benchmark_params=========================== ===========================infer_benchmark_params===========================
numpy_infer_input:3x800x1344.npy numpy_infer_input:3x800x1344.npy
\ No newline at end of file
...@@ -39,11 +39,11 @@ infer_mode:norm ...@@ -39,11 +39,11 @@ infer_mode:norm
infer_quant:False infer_quant:False
inference:./deploy/python/infer.py inference:./deploy/python/infer.py
--device:gpu|cpu --device:gpu|cpu
--enable_mkldnn:True|False --enable_mkldnn:False
--cpu_threads:1|6 --cpu_threads:4
--batch_size:1|2 --batch_size:1|2
--use_tensorrt:null --use_tensorrt:null
--run_mode:paddle|trt_fp32|trt_fp16 --run_mode:paddle
--model_dir: --model_dir:
--image_dir:./dataset/coco/test2017/ --image_dir:./dataset/coco/test2017/
--save_log_path:null --save_log_path:null
......
...@@ -39,15 +39,15 @@ infer_mode:norm ...@@ -39,15 +39,15 @@ infer_mode:norm
infer_quant:False infer_quant:False
inference:./deploy/python/infer.py inference:./deploy/python/infer.py
--device:gpu|cpu --device:gpu|cpu
--enable_mkldnn:True|False --enable_mkldnn:False
--cpu_threads:1|6 --cpu_threads:4
--batch_size:1|2 --batch_size:1|2
--use_tensorrt:null --use_tensorrt:null
--run_mode:paddle|trt_fp32|trt_fp16 --run_mode:paddle
--model_dir: --model_dir:
--image_dir:./dataset/coco/test2017/ --image_dir:./dataset/coco/test2017/
--save_log_path:null --save_log_path:null
--run_benchmark:True --run_benchmark:False
--trt_max_shape:1600 --trt_max_shape:1600
===========================infer_benchmark_params=========================== ===========================infer_benchmark_params===========================
numpy_infer_input:3x800x1344.npy numpy_infer_input:3x800x1344.npy
\ No newline at end of file
...@@ -39,11 +39,11 @@ infer_mode:norm ...@@ -39,11 +39,11 @@ infer_mode:norm
infer_quant:False infer_quant:False
inference:./deploy/python/infer.py inference:./deploy/python/infer.py
--device:gpu|cpu --device:gpu|cpu
--enable_mkldnn:True|False --enable_mkldnn:False
--cpu_threads:1|6 --cpu_threads:4
--batch_size:1|2 --batch_size:1|2
--use_tensorrt:null --use_tensorrt:null
--run_mode:paddle|trt_fp32|trt_fp16 --run_mode:paddle
--model_dir: --model_dir:
--image_dir:./dataset/coco/test2017/ --image_dir:./dataset/coco/test2017/
--save_log_path:null --save_log_path:null
......
...@@ -39,15 +39,15 @@ infer_mode:norm ...@@ -39,15 +39,15 @@ infer_mode:norm
infer_quant:False infer_quant:False
inference:./deploy/python/infer.py inference:./deploy/python/infer.py
--device:gpu|cpu --device:gpu|cpu
--enable_mkldnn:True|False --enable_mkldnn:False
--cpu_threads:1|6 --cpu_threads:4
--batch_size:1|2 --batch_size:1|2
--use_tensorrt:null --use_tensorrt:null
--run_mode:paddle|trt_fp32|trt_fp16 --run_mode:paddle
--model_dir: --model_dir:
--image_dir:./dataset/coco/test2017/ --image_dir:./dataset/coco/test2017/
--save_log_path:null --save_log_path:null
--run_benchmark:True --run_benchmark:False
null:null null:null
===========================infer_benchmark_params=========================== ===========================infer_benchmark_params===========================
numpy_infer_input:3x800x1344_2.npy numpy_infer_input:3x800x1344_2.npy
\ No newline at end of file
...@@ -39,11 +39,11 @@ infer_mode:norm ...@@ -39,11 +39,11 @@ infer_mode:norm
infer_quant:False infer_quant:False
inference:./deploy/python/infer.py inference:./deploy/python/infer.py
--device:gpu|cpu --device:gpu|cpu
--enable_mkldnn:True|False --enable_mkldnn:False
--cpu_threads:1|6 --cpu_threads:4
--batch_size:1|2 --batch_size:1|2
--use_tensorrt:null --use_tensorrt:null
--run_mode:paddle|trt_fp32|trt_fp16 --run_mode:paddle
--model_dir: --model_dir:
--image_dir:./dataset/coco/test2017/ --image_dir:./dataset/coco/test2017/
--save_log_path:null --save_log_path:null
......
...@@ -39,15 +39,15 @@ infer_mode:norm ...@@ -39,15 +39,15 @@ infer_mode:norm
infer_quant:False infer_quant:False
inference:./deploy/python/infer.py inference:./deploy/python/infer.py
--device:gpu|cpu --device:gpu|cpu
--enable_mkldnn:True|False --enable_mkldnn:False
--cpu_threads:1|6 --cpu_threads:4
--batch_size:1|2 --batch_size:1|2
--use_tensorrt:null --use_tensorrt:null
--run_mode:paddle|trt_fp32|trt_fp16 --run_mode:paddle
--model_dir: --model_dir:
--image_dir:./dataset/coco/test2017/ --image_dir:./dataset/coco/test2017/
--save_log_path:null --save_log_path:null
--run_benchmark:True --run_benchmark:False
--trt_max_shape:1600 --trt_max_shape:1600
===========================train_benchmark_params========================== ===========================train_benchmark_params==========================
batch_size:2|8 batch_size:2|8
......
...@@ -39,11 +39,11 @@ infer_mode:norm ...@@ -39,11 +39,11 @@ infer_mode:norm
infer_quant:False infer_quant:False
inference:./deploy/python/infer.py inference:./deploy/python/infer.py
--device:gpu|cpu --device:gpu|cpu
--enable_mkldnn:True|False --enable_mkldnn:False
--cpu_threads:1|6 --cpu_threads:4
--batch_size:1|2 --batch_size:1|2
--use_tensorrt:null --use_tensorrt:null
--run_mode:paddle|trt_fp32|trt_fp16 --run_mode:paddle
--model_dir: --model_dir:
--image_dir:./dataset/coco/test2017/ --image_dir:./dataset/coco/test2017/
--save_log_path:null --save_log_path:null
......
...@@ -39,11 +39,11 @@ infer_mode:norm ...@@ -39,11 +39,11 @@ infer_mode:norm
infer_quant:False infer_quant:False
inference:./deploy/python/infer.py inference:./deploy/python/infer.py
--device:gpu|cpu --device:gpu|cpu
--enable_mkldnn:True|False --enable_mkldnn:False
--cpu_threads:1|6 --cpu_threads:4
--batch_size:1|2 --batch_size:1|2
--use_tensorrt:null --use_tensorrt:null
--run_mode:paddle|trt_fp32|trt_fp16 --run_mode:paddle
--model_dir: --model_dir:
--image_dir:./dataset/coco/test2017/ --image_dir:./dataset/coco/test2017/
--save_log_path:null --save_log_path:null
......
...@@ -39,11 +39,11 @@ infer_mode:norm ...@@ -39,11 +39,11 @@ infer_mode:norm
infer_quant:False infer_quant:False
inference:./deploy/python/infer.py inference:./deploy/python/infer.py
--device:gpu|cpu --device:gpu|cpu
--enable_mkldnn:True|False --enable_mkldnn:False
--cpu_threads:1|6 --cpu_threads:4
--batch_size:1|2 --batch_size:1|2
--use_tensorrt:null --use_tensorrt:null
--run_mode:paddle|trt_fp32|trt_fp16 --run_mode:paddle
--model_dir: --model_dir:
--image_dir:./dataset/coco/test2017/ --image_dir:./dataset/coco/test2017/
--save_log_path:null --save_log_path:null
......
...@@ -39,15 +39,15 @@ infer_mode:norm ...@@ -39,15 +39,15 @@ infer_mode:norm
infer_quant:False infer_quant:False
inference:./deploy/python/keypoint_infer.py inference:./deploy/python/keypoint_infer.py
--device:gpu|cpu --device:gpu|cpu
--enable_mkldnn:True|False --enable_mkldnn:False
--cpu_threads:1|6 --cpu_threads:4
--batch_size:1|2 --batch_size:1|2
--use_tensorrt:null --use_tensorrt:null
--run_mode:paddle|trt_fp32|trt_fp16 --run_mode:paddle
--model_dir: --model_dir:
--image_dir:./dataset/coco/test2017/ --image_dir:./dataset/coco/test2017/
--save_log_path:null --save_log_path:null
--run_benchmark:True --run_benchmark:False
null:null null:null
===========================infer_benchmark_params=========================== ===========================infer_benchmark_params===========================
random_infer_input:[{float32,[3,256,192]}] random_infer_input:[{float32,[3,256,192]}]
\ No newline at end of file
...@@ -39,11 +39,11 @@ infer_mode:norm ...@@ -39,11 +39,11 @@ infer_mode:norm
infer_quant:False infer_quant:False
inference:./deploy/python/infer.py inference:./deploy/python/infer.py
--device:gpu|cpu --device:gpu|cpu
--enable_mkldnn:True|False --enable_mkldnn:False
--cpu_threads:1|6 --cpu_threads:4
--batch_size:1|2 --batch_size:1|2
--use_tensorrt:null --use_tensorrt:null
--run_mode:paddle|trt_fp32|trt_fp16 --run_mode:paddle
--model_dir: --model_dir:
--image_dir:./dataset/coco/test2017/ --image_dir:./dataset/coco/test2017/
--save_log_path:null --save_log_path:null
......
...@@ -39,15 +39,15 @@ infer_mode:norm ...@@ -39,15 +39,15 @@ infer_mode:norm
infer_quant:False infer_quant:False
inference:./deploy/python/keypoint_infer.py inference:./deploy/python/keypoint_infer.py
--device:gpu|cpu --device:gpu|cpu
--enable_mkldnn:True|False --enable_mkldnn:False
--cpu_threads:1|6 --cpu_threads:4
--batch_size:1|2 --batch_size:1|2
--use_tensorrt:null --use_tensorrt:null
--run_mode:paddle|trt_fp32|trt_fp16 --run_mode:paddle
--model_dir: --model_dir:
--image_dir:./dataset/coco/test2017/ --image_dir:./dataset/coco/test2017/
--save_log_path:null --save_log_path:null
--run_benchmark:True --run_benchmark:False
null:null null:null
===========================infer_benchmark_params=========================== ===========================infer_benchmark_params===========================
random_infer_input:[{float32,[3,256,192]}] random_infer_input:[{float32,[3,256,192]}]
\ No newline at end of file
...@@ -39,11 +39,11 @@ infer_mode:norm ...@@ -39,11 +39,11 @@ infer_mode:norm
infer_quant:False infer_quant:False
inference:./deploy/python/infer.py inference:./deploy/python/infer.py
--device:gpu|cpu --device:gpu|cpu
--enable_mkldnn:True|False --enable_mkldnn:False
--cpu_threads:1|6 --cpu_threads:4
--batch_size:1|2 --batch_size:1|2
--use_tensorrt:null --use_tensorrt:null
--run_mode:paddle|trt_fp32|trt_fp16 --run_mode:paddle
--model_dir: --model_dir:
--image_dir:./dataset/coco/test2017/ --image_dir:./dataset/coco/test2017/
--save_log_path:null --save_log_path:null
......
...@@ -39,15 +39,15 @@ infer_mode:norm ...@@ -39,15 +39,15 @@ infer_mode:norm
infer_quant:False infer_quant:False
inference:./deploy/python/keypoint_infer.py inference:./deploy/python/keypoint_infer.py
--device:gpu|cpu --device:gpu|cpu
--enable_mkldnn:True|False --enable_mkldnn:False
--cpu_threads:1|6 --cpu_threads:4
--batch_size:1|2 --batch_size:1|2
--use_tensorrt:null --use_tensorrt:null
--run_mode:paddle|trt_fp32|trt_fp16 --run_mode:paddle
--model_dir: --model_dir:
--image_dir:./dataset/coco/test2017/ --image_dir:./dataset/coco/test2017/
--save_log_path:null --save_log_path:null
--run_benchmark:True --run_benchmark:False
--trt_max_shape:1600 --trt_max_shape:1600
===========================train_benchmark_params========================== ===========================train_benchmark_params==========================
batch_size:20|24 batch_size:20|24
......
...@@ -39,11 +39,11 @@ infer_mode:norm ...@@ -39,11 +39,11 @@ infer_mode:norm
infer_quant:False infer_quant:False
inference:./deploy/python/infer.py inference:./deploy/python/infer.py
--device:gpu|cpu --device:gpu|cpu
--enable_mkldnn:True|False --enable_mkldnn:False
--cpu_threads:1|6 --cpu_threads:4
--batch_size:1|2 --batch_size:1|2
--use_tensorrt:null --use_tensorrt:null
--run_mode:paddle|trt_fp32|trt_fp16 --run_mode:paddle
--model_dir: --model_dir:
--image_dir:./dataset/coco/test2017/ --image_dir:./dataset/coco/test2017/
--save_log_path:null --save_log_path:null
......
...@@ -39,15 +39,15 @@ infer_mode:norm ...@@ -39,15 +39,15 @@ infer_mode:norm
infer_quant:False infer_quant:False
inference:./deploy/python/keypoint_infer.py inference:./deploy/python/keypoint_infer.py
--device:gpu|cpu --device:gpu|cpu
--enable_mkldnn:True|False --enable_mkldnn:False
--cpu_threads:1|6 --cpu_threads:4
--batch_size:1|2 --batch_size:1|2
--use_tensorrt:null --use_tensorrt:null
--run_mode:paddle|trt_fp32|trt_fp16 --run_mode:paddle
--model_dir: --model_dir:
--image_dir:./dataset/coco/test2017/ --image_dir:./dataset/coco/test2017/
--save_log_path:null --save_log_path:null
--run_benchmark:True --run_benchmark:False
null:null null:null
===========================train_benchmark_params========================== ===========================train_benchmark_params==========================
batch_size:64|160 batch_size:64|160
......
...@@ -39,11 +39,11 @@ infer_mode:norm ...@@ -39,11 +39,11 @@ infer_mode:norm
infer_quant:False infer_quant:False
inference:./deploy/python/infer.py inference:./deploy/python/infer.py
--device:gpu|cpu --device:gpu|cpu
--enable_mkldnn:True|False --enable_mkldnn:False
--cpu_threads:1|6 --cpu_threads:4
--batch_size:1|2 --batch_size:1|2
--use_tensorrt:null --use_tensorrt:null
--run_mode:paddle|trt_fp32|trt_fp16 --run_mode:paddle
--model_dir: --model_dir:
--image_dir:./dataset/coco/test2017/ --image_dir:./dataset/coco/test2017/
--save_log_path:null --save_log_path:null
......
...@@ -39,19 +39,13 @@ infer_mode:kl_quant ...@@ -39,19 +39,13 @@ infer_mode:kl_quant
infer_quant:True infer_quant:True
inference:./deploy/python/infer.py inference:./deploy/python/infer.py
--device:gpu|cpu --device:gpu|cpu
--enable_mkldnn:True|False --enable_mkldnn:False
--cpu_threads:1|6 --cpu_threads:4
--batch_size:1|2 --batch_size:1|2
--use_tensorrt:null --use_tensorrt:null
--run_mode:paddle|trt_int8 --run_mode:paddle
--model_dir: --model_dir:
--image_dir:./dataset/coco/test2017/ --image_dir:./dataset/coco/test2017/
--save_log_path:null --save_log_path:null
--run_benchmark:False --run_benchmark:False
--trt_max_shape:1600 --trt_max_shape:1600
===========================train_benchmark_params========================== \ No newline at end of file
batch_size:_benchmark_batch_size
fp_items:_benchmark_fp_items
epoch:_benchmark_epoch
--profiler_options:batch_range=[10,20];state=GPU;tracer_option=Default;profile_path=model.profile
flags:FLAGS_eager_delete_tensor_gb=0.0;FLAGS_fraction_of_gpu_memory_to_use=0.98;FLAGS_conv_workspace_size_limit=4096
\ No newline at end of file
...@@ -19,10 +19,10 @@ infer_quant:False ...@@ -19,10 +19,10 @@ infer_quant:False
inference:./deploy/cpp/build/main inference:./deploy/cpp/build/main
--device:gpu|cpu --device:gpu|cpu
--use_mkldnn:True|False --use_mkldnn:True|False
--cpu_threads:1|6 --cpu_threads:4
--batch_size:1|2 --batch_size:1|2
--use_tensorrt:null --use_tensorrt:null
--run_mode:paddle|trt_fp32|trt_fp16 --run_mode:paddle
--model_dir: --model_dir:
--image_dir:./dataset/coco/test2017/ --image_dir:./dataset/coco/test2017/
--run_benchmark:False --run_benchmark:False
......
...@@ -39,15 +39,15 @@ infer_mode:norm ...@@ -39,15 +39,15 @@ infer_mode:norm
infer_quant:False infer_quant:False
inference:./deploy/python/keypoint_infer.py inference:./deploy/python/keypoint_infer.py
--device:gpu|cpu --device:gpu|cpu
--enable_mkldnn:True|False --enable_mkldnn:False
--cpu_threads:1|6 --cpu_threads:4
--batch_size:1|2 --batch_size:1|2
--use_tensorrt:null --use_tensorrt:null
--run_mode:paddle|trt_fp32|trt_fp16 --run_mode:paddle
--model_dir: --model_dir:
--image_dir:./dataset/coco/test2017/ --image_dir:./dataset/coco/test2017/
--save_log_path:null --save_log_path:null
--run_benchmark:True --run_benchmark:False
null:null null:null
===========================infer_benchmark_params=========================== ===========================infer_benchmark_params===========================
random_infer_input:[{float32,[3,128,96]}] random_infer_input:[{float32,[3,128,96]}]
\ No newline at end of file
...@@ -39,11 +39,11 @@ infer_mode:norm ...@@ -39,11 +39,11 @@ infer_mode:norm
infer_quant:False infer_quant:False
inference:./deploy/python/infer.py inference:./deploy/python/infer.py
--device:gpu|cpu --device:gpu|cpu
--enable_mkldnn:True|False --enable_mkldnn:False
--cpu_threads:1|6 --cpu_threads:4
--batch_size:1|2 --batch_size:1|2
--use_tensorrt:null --use_tensorrt:null
--run_mode:paddle|trt_fp32|trt_fp16 --run_mode:paddle
--model_dir: --model_dir:
--image_dir:./dataset/coco/test2017/ --image_dir:./dataset/coco/test2017/
--save_log_path:null --save_log_path:null
......
...@@ -39,15 +39,15 @@ infer_mode:norm ...@@ -39,15 +39,15 @@ infer_mode:norm
infer_quant:False infer_quant:False
inference:./deploy/python/infer.py inference:./deploy/python/infer.py
--device:gpu|cpu --device:gpu|cpu
--enable_mkldnn:True|False --enable_mkldnn:False
--cpu_threads:1|6 --cpu_threads:4
--batch_size:1 --batch_size:1
--use_tensorrt:null --use_tensorrt:null
--run_mode:paddle|trt_fp32|trt_fp16 --run_mode:paddle
--model_dir: --model_dir:
--image_dir:./dataset/coco/test2017/ --image_dir:./dataset/coco/test2017/
--save_log_path:null --save_log_path:null
--run_benchmark:True --run_benchmark:False
--trt_max_shape:1600 --trt_max_shape:1600
===========================infer_benchmark_params=========================== ===========================infer_benchmark_params===========================
numpy_infer_input:3x800x1344.npy numpy_infer_input:3x800x1344.npy
\ No newline at end of file
...@@ -39,11 +39,11 @@ infer_mode:norm ...@@ -39,11 +39,11 @@ infer_mode:norm
infer_quant:False infer_quant:False
inference:./deploy/python/infer.py inference:./deploy/python/infer.py
--device:gpu|cpu --device:gpu|cpu
--enable_mkldnn:True|False --enable_mkldnn:False
--cpu_threads:1|6 --cpu_threads:4
--batch_size:1|2 --batch_size:1|2
--use_tensorrt:null --use_tensorrt:null
--run_mode:paddle|trt_fp32|trt_fp16 --run_mode:paddle
--model_dir: --model_dir:
--image_dir:./dataset/coco/test2017/ --image_dir:./dataset/coco/test2017/
--save_log_path:null --save_log_path:null
......
...@@ -39,15 +39,15 @@ infer_mode:norm ...@@ -39,15 +39,15 @@ infer_mode:norm
infer_quant:False infer_quant:False
inference:./deploy/python/infer.py inference:./deploy/python/infer.py
--device:gpu|cpu --device:gpu|cpu
--enable_mkldnn:True|False --enable_mkldnn:False
--cpu_threads:1|6 --cpu_threads:4
--batch_size:1 --batch_size:1
--use_tensorrt:null --use_tensorrt:null
--run_mode:paddle|trt_fp32|trt_fp16 --run_mode:paddle
--model_dir: --model_dir:
--image_dir:./dataset/coco/test2017/ --image_dir:./dataset/coco/test2017/
--save_log_path:null --save_log_path:null
--run_benchmark:True --run_benchmark:False
--trt_max_shape:1600 --trt_max_shape:1600
===========================infer_benchmark_params=========================== ===========================infer_benchmark_params===========================
numpy_infer_input:3x800x1344.npy numpy_infer_input:3x800x1344.npy
\ No newline at end of file
...@@ -39,11 +39,11 @@ infer_mode:norm ...@@ -39,11 +39,11 @@ infer_mode:norm
infer_quant:False infer_quant:False
inference:./deploy/python/infer.py inference:./deploy/python/infer.py
--device:gpu|cpu --device:gpu|cpu
--enable_mkldnn:True|False --enable_mkldnn:False
--cpu_threads:1|6 --cpu_threads:4
--batch_size:1|2 --batch_size:1|2
--use_tensorrt:null --use_tensorrt:null
--run_mode:paddle|trt_fp32|trt_fp16 --run_mode:paddle
--model_dir: --model_dir:
--image_dir:./dataset/coco/test2017/ --image_dir:./dataset/coco/test2017/
--save_log_path:null --save_log_path:null
......
...@@ -39,15 +39,15 @@ infer_mode:norm ...@@ -39,15 +39,15 @@ infer_mode:norm
infer_quant:False infer_quant:False
inference:./deploy/python/infer.py inference:./deploy/python/infer.py
--device:gpu|cpu --device:gpu|cpu
--enable_mkldnn:True|False --enable_mkldnn:False
--cpu_threads:1|6 --cpu_threads:4
--batch_size:1 --batch_size:1
--use_tensorrt:null --use_tensorrt:null
--run_mode:paddle|trt_fp32|trt_fp16 --run_mode:paddle
--model_dir: --model_dir:
--image_dir:./dataset/coco/test2017/ --image_dir:./dataset/coco/test2017/
--save_log_path:null --save_log_path:null
--run_benchmark:True --run_benchmark:False
--trt_max_shape:1600 --trt_max_shape:1600
===========================train_benchmark_params========================== ===========================train_benchmark_params==========================
batch_size:2|4 batch_size:2|4
......
...@@ -39,11 +39,11 @@ infer_mode:norm ...@@ -39,11 +39,11 @@ infer_mode:norm
infer_quant:False infer_quant:False
inference:./deploy/python/infer.py inference:./deploy/python/infer.py
--device:gpu|cpu --device:gpu|cpu
--enable_mkldnn:True|False --enable_mkldnn:False
--cpu_threads:1|6 --cpu_threads:4
--batch_size:1|2 --batch_size:1|2
--use_tensorrt:null --use_tensorrt:null
--run_mode:paddle|trt_fp32|trt_fp16 --run_mode:paddle
--model_dir: --model_dir:
--image_dir:./dataset/coco/test2017/ --image_dir:./dataset/coco/test2017/
--save_log_path:null --save_log_path:null
......
...@@ -39,19 +39,13 @@ infer_mode:kl_quant ...@@ -39,19 +39,13 @@ infer_mode:kl_quant
infer_quant:True infer_quant:True
inference:./deploy/python/infer.py inference:./deploy/python/infer.py
--device:gpu|cpu --device:gpu|cpu
--enable_mkldnn:True|False --enable_mkldnn:False
--cpu_threads:1|6 --cpu_threads:4
--batch_size:1|2 --batch_size:1|2
--use_tensorrt:null --use_tensorrt:null
--run_mode:paddle|trt_int8 --run_mode:paddle
--model_dir: --model_dir:
--image_dir:./dataset/coco/test2017/ --image_dir:./dataset/coco/test2017/
--save_log_path:null --save_log_path:null
--run_benchmark:False --run_benchmark:False
--trt_max_shape:1600 --trt_max_shape:1600
===========================train_benchmark_params========================== \ No newline at end of file
batch_size:_benchmark_batch_size
fp_items:_benchmark_fp_items
epoch:_benchmark_epoch
--profiler_options:batch_range=[10,20];state=GPU;tracer_option=Default;profile_path=model.profile
flags:FLAGS_eager_delete_tensor_gb=0.0;FLAGS_fraction_of_gpu_memory_to_use=0.98;FLAGS_conv_workspace_size_limit=4096
\ No newline at end of file
...@@ -19,10 +19,10 @@ infer_quant:False ...@@ -19,10 +19,10 @@ infer_quant:False
inference:./deploy/cpp/build/main inference:./deploy/cpp/build/main
--device:gpu|cpu --device:gpu|cpu
--use_mkldnn:True|False --use_mkldnn:True|False
--cpu_threads:1|6 --cpu_threads:4
--batch_size:1|2 --batch_size:1|2
--use_tensorrt:null --use_tensorrt:null
--run_mode:paddle|trt_fp32|trt_fp16 --run_mode:paddle
--model_dir: --model_dir:
--image_dir:./dataset/coco/test2017/ --image_dir:./dataset/coco/test2017/
--run_benchmark:False --run_benchmark:False
......
...@@ -39,15 +39,15 @@ infer_mode:norm ...@@ -39,15 +39,15 @@ infer_mode:norm
infer_quant:False infer_quant:False
inference:./deploy/python/infer.py inference:./deploy/python/infer.py
--device:gpu|cpu --device:gpu|cpu
--enable_mkldnn:True|False --enable_mkldnn:False
--cpu_threads:1|6 --cpu_threads:4
--batch_size:1 --batch_size:1
--use_tensorrt:null --use_tensorrt:null
--run_mode:paddle|trt_fp32|trt_fp16 --run_mode:paddle
--model_dir: --model_dir:
--image_dir:./dataset/coco/test2017/ --image_dir:./dataset/coco/test2017/
--save_log_path:null --save_log_path:null
--run_benchmark:True --run_benchmark:False
--trt_max_shape:1600 --trt_max_shape:1600
===========================train_benchmark_params========================== ===========================train_benchmark_params==========================
batch_size:2|4 batch_size:2|4
......
...@@ -39,11 +39,11 @@ infer_mode:norm ...@@ -39,11 +39,11 @@ infer_mode:norm
infer_quant:False infer_quant:False
inference:./deploy/python/infer.py inference:./deploy/python/infer.py
--device:gpu|cpu --device:gpu|cpu
--enable_mkldnn:True|False --enable_mkldnn:False
--cpu_threads:1|6 --cpu_threads:4
--batch_size:1|2 --batch_size:1|2
--use_tensorrt:null --use_tensorrt:null
--run_mode:paddle|trt_fp32|trt_fp16 --run_mode:paddle
--model_dir: --model_dir:
--image_dir:./dataset/coco/test2017/ --image_dir:./dataset/coco/test2017/
--save_log_path:null --save_log_path:null
......
...@@ -39,15 +39,15 @@ infer_mode:norm ...@@ -39,15 +39,15 @@ infer_mode:norm
infer_quant:False infer_quant:False
inference:./deploy/python/infer.py inference:./deploy/python/infer.py
--device:gpu|cpu --device:gpu|cpu
--enable_mkldnn:True|False --enable_mkldnn:False
--cpu_threads:1|6 --cpu_threads:4
--batch_size:1 --batch_size:1
--use_tensorrt:null --use_tensorrt:null
--run_mode:paddle|trt_fp32|trt_fp16 --run_mode:paddle
--model_dir: --model_dir:
--image_dir:./dataset/coco/test2017/ --image_dir:./dataset/coco/test2017/
--save_log_path:null --save_log_path:null
--run_benchmark:True --run_benchmark:False
--trt_max_shape:1600 --trt_max_shape:1600
===========================infer_benchmark_params=========================== ===========================infer_benchmark_params===========================
numpy_infer_input:3x800x1344.npy numpy_infer_input:3x800x1344.npy
\ No newline at end of file
...@@ -39,11 +39,11 @@ infer_mode:norm ...@@ -39,11 +39,11 @@ infer_mode:norm
infer_quant:False infer_quant:False
inference:./deploy/python/infer.py inference:./deploy/python/infer.py
--device:gpu|cpu --device:gpu|cpu
--enable_mkldnn:True|False --enable_mkldnn:False
--cpu_threads:1|6 --cpu_threads:4
--batch_size:1|2 --batch_size:1|2
--use_tensorrt:null --use_tensorrt:null
--run_mode:paddle|trt_fp32|trt_fp16 --run_mode:paddle
--model_dir: --model_dir:
--image_dir:./dataset/coco/test2017/ --image_dir:./dataset/coco/test2017/
--save_log_path:null --save_log_path:null
......
...@@ -39,15 +39,15 @@ infer_mode:norm ...@@ -39,15 +39,15 @@ infer_mode:norm
infer_quant:False infer_quant:False
inference:./deploy/python/infer.py inference:./deploy/python/infer.py
--device:gpu|cpu --device:gpu|cpu
--enable_mkldnn:True|False --enable_mkldnn:False
--cpu_threads:1|6 --cpu_threads:4
--batch_size:1 --batch_size:1
--use_tensorrt:null --use_tensorrt:null
--run_mode:paddle|trt_fp32|trt_fp16 --run_mode:paddle
--model_dir: --model_dir:
--image_dir:./dataset/coco/test2017/ --image_dir:./dataset/coco/test2017/
--save_log_path:null --save_log_path:null
--run_benchmark:True --run_benchmark:False
--trt_max_shape:1600 --trt_max_shape:1600
===========================infer_benchmark_params=========================== ===========================infer_benchmark_params===========================
numpy_infer_input:3x800x1344.npy numpy_infer_input:3x800x1344.npy
\ No newline at end of file
...@@ -39,11 +39,11 @@ infer_mode:norm ...@@ -39,11 +39,11 @@ infer_mode:norm
infer_quant:False infer_quant:False
inference:./deploy/python/infer.py inference:./deploy/python/infer.py
--device:gpu|cpu --device:gpu|cpu
--enable_mkldnn:True|False --enable_mkldnn:False
--cpu_threads:1|6 --cpu_threads:4
--batch_size:1|2 --batch_size:1|2
--use_tensorrt:null --use_tensorrt:null
--run_mode:paddle|trt_fp32|trt_fp16 --run_mode:paddle
--model_dir: --model_dir:
--image_dir:./dataset/coco/test2017/ --image_dir:./dataset/coco/test2017/
--save_log_path:null --save_log_path:null
......
...@@ -39,11 +39,11 @@ infer_mode:norm ...@@ -39,11 +39,11 @@ infer_mode:norm
infer_quant:False infer_quant:False
inference:./deploy/pptracking/python/mot_jde_infer.py inference:./deploy/pptracking/python/mot_jde_infer.py
--device:gpu|cpu --device:gpu|cpu
--enable_mkldnn:True|False --enable_mkldnn:False
--cpu_threads:1|6 --cpu_threads:4
--batch_size:1|2 --batch_size:1|2
--use_tensorrt:null --use_tensorrt:null
--run_mode:paddle|trt_fp32|trt_fp16 --run_mode:paddle
--model_dir: --model_dir:
--video_file:./dataset/mot/test.mp4 --video_file:./dataset/mot/test.mp4
--save_log_path:null --save_log_path:null
......
...@@ -39,11 +39,11 @@ infer_mode:norm ...@@ -39,11 +39,11 @@ infer_mode:norm
infer_quant:False infer_quant:False
inference:./deploy/pptracking/python/mot_jde_infer.py inference:./deploy/pptracking/python/mot_jde_infer.py
--device:gpu|cpu --device:gpu|cpu
--enable_mkldnn:True|False --enable_mkldnn:False
--cpu_threads:1|6 --cpu_threads:4
--batch_size:1|2 --batch_size:1|2
--use_tensorrt:null --use_tensorrt:null
--run_mode:paddle|trt_fp32|trt_fp16 --run_mode:paddle
--model_dir: --model_dir:
--video_file:./dataset/mot/test.mp4 --video_file:./dataset/mot/test.mp4
--save_log_path:null --save_log_path:null
......
...@@ -39,11 +39,11 @@ infer_mode:norm ...@@ -39,11 +39,11 @@ infer_mode:norm
infer_quant:False infer_quant:False
inference:./deploy/pptracking/python/mot_jde_infer.py inference:./deploy/pptracking/python/mot_jde_infer.py
--device:gpu|cpu --device:gpu|cpu
--enable_mkldnn:True|False --enable_mkldnn:False
--cpu_threads:1|6 --cpu_threads:4
--batch_size:1|2 --batch_size:1|2
--use_tensorrt:null --use_tensorrt:null
--run_mode:paddle|trt_fp32|trt_fp16 --run_mode:paddle
--model_dir: --model_dir:
--video_file:./dataset/mot/test.mp4 --video_file:./dataset/mot/test.mp4
--save_log_path:null --save_log_path:null
......
...@@ -39,11 +39,11 @@ infer_mode:norm ...@@ -39,11 +39,11 @@ infer_mode:norm
infer_quant:False infer_quant:False
inference:./deploy/pptracking/python/mot_jde_infer.py inference:./deploy/pptracking/python/mot_jde_infer.py
--device:gpu|cpu --device:gpu|cpu
--enable_mkldnn:True|False --enable_mkldnn:False
--cpu_threads:1|6 --cpu_threads:4
--batch_size:1|2 --batch_size:1|2
--use_tensorrt:null --use_tensorrt:null
--run_mode:paddle|trt_fp32|trt_fp16 --run_mode:paddle
--model_dir: --model_dir:
--video_file:./dataset/mot/test.mp4 --video_file:./dataset/mot/test.mp4
--save_log_path:null --save_log_path:null
......
...@@ -39,11 +39,11 @@ infer_mode:norm ...@@ -39,11 +39,11 @@ infer_mode:norm
infer_quant:False infer_quant:False
inference:./deploy/pptracking/python/mot_jde_infer.py inference:./deploy/pptracking/python/mot_jde_infer.py
--device:gpu|cpu --device:gpu|cpu
--enable_mkldnn:True|False --enable_mkldnn:False
--cpu_threads:1|6 --cpu_threads:4
--batch_size:1|2 --batch_size:1|2
--use_tensorrt:null --use_tensorrt:null
--run_mode:paddle|trt_fp32|trt_fp16 --run_mode:paddle
--model_dir: --model_dir:
--video_file:./dataset/mot/test.mp4 --video_file:./dataset/mot/test.mp4
--save_log_path:null --save_log_path:null
......
...@@ -39,11 +39,11 @@ infer_mode:norm ...@@ -39,11 +39,11 @@ infer_mode:norm
infer_quant:False infer_quant:False
inference:./deploy/pptracking/python/mot_jde_infer.py inference:./deploy/pptracking/python/mot_jde_infer.py
--device:gpu|cpu --device:gpu|cpu
--enable_mkldnn:True|False --enable_mkldnn:False
--cpu_threads:1|6 --cpu_threads:4
--batch_size:1|2 --batch_size:1|2
--use_tensorrt:null --use_tensorrt:null
--run_mode:paddle|trt_fp32|trt_fp16 --run_mode:paddle
--model_dir: --model_dir:
--video_file:./dataset/mot/test.mp4 --video_file:./dataset/mot/test.mp4
--save_log_path:null --save_log_path:null
......
...@@ -39,11 +39,11 @@ infer_mode:norm ...@@ -39,11 +39,11 @@ infer_mode:norm
infer_quant:False infer_quant:False
inference:./deploy/pptracking/python/mot_jde_infer.py inference:./deploy/pptracking/python/mot_jde_infer.py
--device:gpu|cpu --device:gpu|cpu
--enable_mkldnn:True|False --enable_mkldnn:False
--cpu_threads:1|6 --cpu_threads:4
--batch_size:1|2 --batch_size:1|2
--use_tensorrt:null --use_tensorrt:null
--run_mode:paddle|trt_fp32|trt_fp16 --run_mode:paddle
--model_dir: --model_dir:
--video_file:./dataset/mot/test.mp4 --video_file:./dataset/mot/test.mp4
--save_log_path:null --save_log_path:null
......
...@@ -39,11 +39,11 @@ infer_mode:norm ...@@ -39,11 +39,11 @@ infer_mode:norm
infer_quant:False infer_quant:False
inference:./deploy/pptracking/python/mot_jde_infer.py inference:./deploy/pptracking/python/mot_jde_infer.py
--device:gpu|cpu --device:gpu|cpu
--enable_mkldnn:True|False --enable_mkldnn:False
--cpu_threads:1|6 --cpu_threads:4
--batch_size:1|2 --batch_size:1|2
--use_tensorrt:null --use_tensorrt:null
--run_mode:paddle|trt_fp32|trt_fp16 --run_mode:paddle
--model_dir: --model_dir:
--video_file:./dataset/mot/test.mp4 --video_file:./dataset/mot/test.mp4
--save_log_path:null --save_log_path:null
......
...@@ -39,11 +39,11 @@ infer_mode:norm ...@@ -39,11 +39,11 @@ infer_mode:norm
infer_quant:False infer_quant:False
inference:./deploy/pptracking/python/mot_jde_infer.py inference:./deploy/pptracking/python/mot_jde_infer.py
--device:gpu|cpu --device:gpu|cpu
--enable_mkldnn:True|False --enable_mkldnn:False
--cpu_threads:1|6 --cpu_threads:4
--batch_size:1|2 --batch_size:1|2
--use_tensorrt:null --use_tensorrt:null
--run_mode:paddle|trt_fp32|trt_fp16 --run_mode:paddle
--model_dir: --model_dir:
--video_file:./dataset/mot/test.mp4 --video_file:./dataset/mot/test.mp4
--save_log_path:null --save_log_path:null
......
...@@ -39,11 +39,11 @@ infer_mode:norm ...@@ -39,11 +39,11 @@ infer_mode:norm
infer_quant:False infer_quant:False
inference:./deploy/pptracking/python/mot_jde_infer.py inference:./deploy/pptracking/python/mot_jde_infer.py
--device:gpu|cpu --device:gpu|cpu
--enable_mkldnn:True|False --enable_mkldnn:False
--cpu_threads:1|6 --cpu_threads:4
--batch_size:1|2 --batch_size:1|2
--use_tensorrt:null --use_tensorrt:null
--run_mode:paddle|trt_fp32|trt_fp16 --run_mode:paddle
--model_dir: --model_dir:
--video_file:./dataset/mot/test.mp4 --video_file:./dataset/mot/test.mp4
--save_log_path:null --save_log_path:null
......
...@@ -19,10 +19,10 @@ infer_quant:False ...@@ -19,10 +19,10 @@ infer_quant:False
inference:./deploy/cpp/build/main inference:./deploy/cpp/build/main
--device:gpu|cpu --device:gpu|cpu
--use_mkldnn:True|False --use_mkldnn:True|False
--cpu_threads:1|6 --cpu_threads:4
--batch_size:1|2 --batch_size:1|2
--use_tensorrt:null --use_tensorrt:null
--run_mode:paddle|trt_fp32|trt_fp16 --run_mode:paddle
--model_dir: --model_dir:
--image_dir:./dataset/coco/test2017/ --image_dir:./dataset/coco/test2017/
--run_benchmark:False --run_benchmark:False
......
...@@ -39,15 +39,15 @@ infer_mode:norm ...@@ -39,15 +39,15 @@ infer_mode:norm
infer_quant:False infer_quant:False
inference:./deploy/python/infer.py inference:./deploy/python/infer.py
--device:gpu|cpu --device:gpu|cpu
--enable_mkldnn:True|False --enable_mkldnn:False
--cpu_threads:1|6 --cpu_threads:4
--batch_size:1 --batch_size:1
--use_tensorrt:null --use_tensorrt:null
--run_mode:paddle|trt_fp32|trt_fp16 --run_mode:paddle
--model_dir: --model_dir:
--image_dir:./dataset/coco/test2017/ --image_dir:./dataset/coco/test2017/
--save_log_path:null --save_log_path:null
--run_benchmark:True --run_benchmark:False
null:null null:null
===========================infer_benchmark_params=========================== ===========================infer_benchmark_params===========================
numpy_infer_input:3x640x640_2.npy numpy_infer_input:3x640x640_2.npy
\ No newline at end of file
...@@ -39,11 +39,11 @@ infer_mode:norm ...@@ -39,11 +39,11 @@ infer_mode:norm
infer_quant:False infer_quant:False
inference:./deploy/python/infer.py inference:./deploy/python/infer.py
--device:gpu|cpu --device:gpu|cpu
--enable_mkldnn:True|False --enable_mkldnn:False
--cpu_threads:1|6 --cpu_threads:4
--batch_size:1|2 --batch_size:1|2
--use_tensorrt:null --use_tensorrt:null
--run_mode:paddle|trt_fp32|trt_fp16 --run_mode:paddle
--model_dir: --model_dir:
--image_dir:./dataset/coco/test2017/ --image_dir:./dataset/coco/test2017/
--save_log_path:null --save_log_path:null
......
...@@ -19,10 +19,10 @@ infer_quant:False ...@@ -19,10 +19,10 @@ infer_quant:False
inference:./deploy/cpp/build/main inference:./deploy/cpp/build/main
--device:gpu|cpu --device:gpu|cpu
--use_mkldnn:True|False --use_mkldnn:True|False
--cpu_threads:1|6 --cpu_threads:4
--batch_size:1|2 --batch_size:1|2
--use_tensorrt:null --use_tensorrt:null
--run_mode:paddle|trt_fp32|trt_fp16 --run_mode:paddle
--model_dir: --model_dir:
--image_dir:./dataset/coco/test2017/ --image_dir:./dataset/coco/test2017/
--run_benchmark:False --run_benchmark:False
......
...@@ -39,15 +39,15 @@ infer_mode:norm ...@@ -39,15 +39,15 @@ infer_mode:norm
infer_quant:False infer_quant:False
inference:./deploy/python/infer.py inference:./deploy/python/infer.py
--device:gpu|cpu --device:gpu|cpu
--enable_mkldnn:True|False --enable_mkldnn:False
--cpu_threads:1|6 --cpu_threads:4
--batch_size:1 --batch_size:1
--use_tensorrt:null --use_tensorrt:null
--run_mode:paddle|trt_fp32|trt_fp16 --run_mode:paddle
--model_dir: --model_dir:
--image_dir:./dataset/coco/test2017/ --image_dir:./dataset/coco/test2017/
--save_log_path:null --save_log_path:null
--run_benchmark:True --run_benchmark:False
null:null null:null
===========================infer_benchmark_params=========================== ===========================infer_benchmark_params===========================
numpy_infer_input:3x640x640_2.npy numpy_infer_input:3x640x640_2.npy
\ No newline at end of file
...@@ -39,11 +39,11 @@ infer_mode:norm ...@@ -39,11 +39,11 @@ infer_mode:norm
infer_quant:False infer_quant:False
inference:./deploy/python/infer.py inference:./deploy/python/infer.py
--device:gpu|cpu --device:gpu|cpu
--enable_mkldnn:True|False --enable_mkldnn:False
--cpu_threads:1|6 --cpu_threads:4
--batch_size:1|2 --batch_size:1|2
--use_tensorrt:null --use_tensorrt:null
--run_mode:paddle|trt_fp32|trt_fp16 --run_mode:paddle
--model_dir: --model_dir:
--image_dir:./dataset/coco/test2017/ --image_dir:./dataset/coco/test2017/
--save_log_path:null --save_log_path:null
......
...@@ -19,10 +19,10 @@ infer_quant:False ...@@ -19,10 +19,10 @@ infer_quant:False
inference:./deploy/cpp/build/main inference:./deploy/cpp/build/main
--device:gpu|cpu --device:gpu|cpu
--use_mkldnn:True|False --use_mkldnn:True|False
--cpu_threads:1|6 --cpu_threads:4
--batch_size:1|2 --batch_size:1|2
--use_tensorrt:null --use_tensorrt:null
--run_mode:paddle|trt_fp32|trt_fp16 --run_mode:paddle
--model_dir: --model_dir:
--image_dir:./dataset/coco/test2017/ --image_dir:./dataset/coco/test2017/
--run_benchmark:False --run_benchmark:False
......
...@@ -39,15 +39,15 @@ infer_mode:norm ...@@ -39,15 +39,15 @@ infer_mode:norm
infer_quant:False infer_quant:False
inference:./deploy/python/infer.py inference:./deploy/python/infer.py
--device:gpu|cpu --device:gpu|cpu
--enable_mkldnn:True|False --enable_mkldnn:False
--cpu_threads:1|6 --cpu_threads:4
--batch_size:1 --batch_size:1
--use_tensorrt:null --use_tensorrt:null
--run_mode:paddle|trt_fp32|trt_fp16 --run_mode:paddle
--model_dir: --model_dir:
--image_dir:./dataset/coco/test2017/ --image_dir:./dataset/coco/test2017/
--save_log_path:null --save_log_path:null
--run_benchmark:True --run_benchmark:False
null:null null:null
===========================infer_benchmark_params=========================== ===========================infer_benchmark_params===========================
numpy_infer_input:3x416x416_2.npy numpy_infer_input:3x416x416_2.npy
\ No newline at end of file
...@@ -39,11 +39,11 @@ infer_mode:norm ...@@ -39,11 +39,11 @@ infer_mode:norm
infer_quant:False infer_quant:False
inference:./deploy/python/infer.py inference:./deploy/python/infer.py
--device:gpu|cpu --device:gpu|cpu
--enable_mkldnn:True|False --enable_mkldnn:False
--cpu_threads:1|6 --cpu_threads:4
--batch_size:1|2 --batch_size:1|2
--use_tensorrt:null --use_tensorrt:null
--run_mode:paddle|trt_fp32|trt_fp16 --run_mode:paddle
--model_dir: --model_dir:
--image_dir:./dataset/coco/test2017/ --image_dir:./dataset/coco/test2017/
--save_log_path:null --save_log_path:null
......
...@@ -19,10 +19,10 @@ infer_quant:False ...@@ -19,10 +19,10 @@ infer_quant:False
inference:./deploy/cpp/build/main inference:./deploy/cpp/build/main
--device:gpu|cpu --device:gpu|cpu
--use_mkldnn:True|False --use_mkldnn:True|False
--cpu_threads:1|6 --cpu_threads:4
--batch_size:1|2 --batch_size:1|2
--use_tensorrt:null --use_tensorrt:null
--run_mode:paddle|trt_fp32|trt_fp16 --run_mode:paddle
--model_dir: --model_dir:
--image_dir:./dataset/coco/test2017/ --image_dir:./dataset/coco/test2017/
--run_benchmark:False --run_benchmark:False
......
...@@ -39,15 +39,15 @@ infer_mode:norm ...@@ -39,15 +39,15 @@ infer_mode:norm
infer_quant:False infer_quant:False
inference:./deploy/python/infer.py inference:./deploy/python/infer.py
--device:gpu|cpu --device:gpu|cpu
--enable_mkldnn:True|False --enable_mkldnn:False
--cpu_threads:1|6 --cpu_threads:4
--batch_size:1 --batch_size:1
--use_tensorrt:null --use_tensorrt:null
--run_mode:paddle|trt_fp32|trt_fp16 --run_mode:paddle
--model_dir: --model_dir:
--image_dir:./dataset/coco/test2017/ --image_dir:./dataset/coco/test2017/
--save_log_path:null --save_log_path:null
--run_benchmark:True --run_benchmark:False
null:null null:null
===========================infer_benchmark_params=========================== ===========================infer_benchmark_params===========================
numpy_infer_input:3x416x416_2.npy numpy_infer_input:3x416x416_2.npy
\ No newline at end of file
...@@ -39,11 +39,11 @@ infer_mode:norm ...@@ -39,11 +39,11 @@ infer_mode:norm
infer_quant:False infer_quant:False
inference:./deploy/python/infer.py inference:./deploy/python/infer.py
--device:gpu|cpu --device:gpu|cpu
--enable_mkldnn:True|False --enable_mkldnn:False
--cpu_threads:1|6 --cpu_threads:4
--batch_size:1|2 --batch_size:1|2
--use_tensorrt:null --use_tensorrt:null
--run_mode:paddle|trt_fp32|trt_fp16 --run_mode:paddle
--model_dir: --model_dir:
--image_dir:./dataset/coco/test2017/ --image_dir:./dataset/coco/test2017/
--save_log_path:null --save_log_path:null
......
...@@ -19,10 +19,10 @@ infer_quant:False ...@@ -19,10 +19,10 @@ infer_quant:False
inference:./deploy/cpp/build/main inference:./deploy/cpp/build/main
--device:gpu|cpu --device:gpu|cpu
--use_mkldnn:True|False --use_mkldnn:True|False
--cpu_threads:1|6 --cpu_threads:4
--batch_size:1|2 --batch_size:1|2
--use_tensorrt:null --use_tensorrt:null
--run_mode:paddle|trt_fp32|trt_fp16 --run_mode:paddle
--model_dir: --model_dir:
--image_dir:./dataset/coco/test2017/ --image_dir:./dataset/coco/test2017/
--run_benchmark:False --run_benchmark:False
......
...@@ -39,15 +39,15 @@ infer_mode:norm ...@@ -39,15 +39,15 @@ infer_mode:norm
infer_quant:False infer_quant:False
inference:./deploy/python/infer.py inference:./deploy/python/infer.py
--device:gpu|cpu --device:gpu|cpu
--enable_mkldnn:True|False --enable_mkldnn:False
--cpu_threads:1|6 --cpu_threads:4
--batch_size:1 --batch_size:1
--use_tensorrt:null --use_tensorrt:null
--run_mode:paddle|trt_fp32|trt_fp16 --run_mode:paddle
--model_dir: --model_dir:
--image_dir:./dataset/coco/test2017/ --image_dir:./dataset/coco/test2017/
--save_log_path:null --save_log_path:null
--run_benchmark:True --run_benchmark:False
null:null null:null
===========================infer_benchmark_params=========================== ===========================infer_benchmark_params===========================
numpy_infer_input:3x416x416_2.npy numpy_infer_input:3x416x416_2.npy
\ No newline at end of file
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册