diff --git a/tests/config/DarkNet53.txt b/tests/config/DarkNet53.txt index 589d655112283d31de41ba27a5b9adb7e590c325..9b9b95c88f1dae2ac4472eaa7d175fc844fd8984 100644 --- a/tests/config/DarkNet53.txt +++ b/tests/config/DarkNet53.txt @@ -41,7 +41,7 @@ inference:python/predict_cls.py -c configs/inference_cls.yaml -o Global.use_gpu:True|False -o Global.enable_mkldnn:True|False -o Global.cpu_num_threads:1|6 --o Global.batch_size:1 +-o Global.batch_size:1|16 -o Global.use_tensorrt:True|False -o Global.use_fp16:True|False -o Global.inference_model_dir:../inference diff --git a/tests/config/GhostNet_x0_5.txt b/tests/config/GhostNet_x0_5.txt index 9505f9ad382d2a4cf4a4aab6a18025c26b20e17e..b752da46a2bd3068284166bcbc400f159f12738f 100644 --- a/tests/config/GhostNet_x0_5.txt +++ b/tests/config/GhostNet_x0_5.txt @@ -41,7 +41,7 @@ inference:python/predict_cls.py -c configs/inference_cls.yaml -o Global.use_gpu:True|False -o Global.enable_mkldnn:True|False -o Global.cpu_num_threads:1|6 --o Global.batch_size:1 +-o Global.batch_size:1|16 -o Global.use_tensorrt:True|False -o Global.use_fp16:True|False -o Global.inference_model_dir:../inference diff --git a/tests/config/GhostNet_x1_0.txt b/tests/config/GhostNet_x1_0.txt index bcef875c9fea4bab3e4bf5383024c5a48cf6419a..c927b2bf3358fc952bb2067e912fa5e6f65a5925 100644 --- a/tests/config/GhostNet_x1_0.txt +++ b/tests/config/GhostNet_x1_0.txt @@ -41,7 +41,7 @@ inference:python/predict_cls.py -c configs/inference_cls.yaml -o Global.use_gpu:True|False -o Global.enable_mkldnn:True|False -o Global.cpu_num_threads:1|6 --o Global.batch_size:1 +-o Global.batch_size:1|16 -o Global.use_tensorrt:True|False -o Global.use_fp16:True|False -o Global.inference_model_dir:../inference diff --git a/tests/config/GhostNet_x1_3.txt b/tests/config/GhostNet_x1_3.txt index 7501dec594d97459802a97cf5209b6bdf63fa188..5766332f3c08f94c02237c724a20e8ec24442ec8 100644 --- a/tests/config/GhostNet_x1_3.txt +++ b/tests/config/GhostNet_x1_3.txt @@ -41,7 +41,7 @@ inference:python/predict_cls.py -c configs/inference_cls.yaml -o Global.use_gpu:True|False -o Global.enable_mkldnn:True|False -o Global.cpu_num_threads:1|6 --o Global.batch_size:1 +-o Global.batch_size:1|16 -o Global.use_tensorrt:True|False -o Global.use_fp16:True|False -o Global.inference_model_dir:../inference diff --git a/tests/config/HRNet_W18_C.txt b/tests/config/HRNet_W18_C.txt index 0609047b834044d2edcf0874c5da00e722a4b4e4..c5334e38df3a9516f2a2e5d05bd30c704350351d 100644 --- a/tests/config/HRNet_W18_C.txt +++ b/tests/config/HRNet_W18_C.txt @@ -41,7 +41,7 @@ inference:python/predict_cls.py -c configs/inference_cls.yaml -o Global.use_gpu:True|False -o Global.enable_mkldnn:True|False -o Global.cpu_num_threads:1|6 --o Global.batch_size:1 +-o Global.batch_size:1|16 -o Global.use_tensorrt:True|False -o Global.use_fp16:True|False -o Global.inference_model_dir:../inference diff --git a/tests/config/LeViT_128S.txt b/tests/config/LeViT_128S.txt index 35feb2ffb1ecd55359d19d83e60d3d683e68d846..6ddc0cd9ffd51f991c7f67a1ce9723e6e4ac2381 100644 --- a/tests/config/LeViT_128S.txt +++ b/tests/config/LeViT_128S.txt @@ -41,7 +41,7 @@ inference:python/predict_cls.py -c configs/inference_cls.yaml -o Global.use_gpu:True|Fasle -o Global.enable_mkldnn:True|False -o Global.cpu_num_threads:1|6 --o Global.batch_size:1 +-o Global.batch_size:1|16 -o Global.use_tensorrt:True|False -o Global.use_fp16:True|False -o Global.inference_model_dir:../inference diff --git a/tests/config/MobileNetV1.txt b/tests/config/MobileNetV1.txt index 278906219a377a362e728cd595a025f10f2ab820..319f852dbd7731ccedc22af9ab415080e32993fe 100644 --- a/tests/config/MobileNetV1.txt +++ b/tests/config/MobileNetV1.txt @@ -41,7 +41,7 @@ inference:python/predict_cls.py -c configs/inference_cls.yaml -o Global.use_gpu:True|False -o Global.enable_mkldnn:True|False -o Global.cpu_num_threads:1|6 --o Global.batch_size:1 +-o Global.batch_size:1|16 -o Global.use_tensorrt:True|False -o Global.use_fp16:True|False -o Global.inference_model_dir:../inference diff --git a/tests/config/MobileNetV1_x0_25.txt b/tests/config/MobileNetV1_x0_25.txt index 7b8271752bdd7c73ee268ef4ea9e6a6077d4ad8b..a46a8e30facf657324dd041f5cda44af85596fa2 100644 --- a/tests/config/MobileNetV1_x0_25.txt +++ b/tests/config/MobileNetV1_x0_25.txt @@ -41,7 +41,7 @@ inference:python/predict_cls.py -c configs/inference_cls.yaml -o Global.use_gpu:True|False -o Global.enable_mkldnn:True|False -o Global.cpu_num_threads:1|6 --o Global.batch_size:1 +-o Global.batch_size:1|16 -o Global.use_tensorrt:True|False -o Global.use_fp16:True|False -o Global.inference_model_dir:../inference diff --git a/tests/config/MobileNetV1_x0_5.txt b/tests/config/MobileNetV1_x0_5.txt index b7ba44dffc05b6aa006b1ad2d10c40360454368f..bca197b7900af161abf5d04335b10202801a0c26 100644 --- a/tests/config/MobileNetV1_x0_5.txt +++ b/tests/config/MobileNetV1_x0_5.txt @@ -41,7 +41,7 @@ inference:python/predict_cls.py -c configs/inference_cls.yaml -o Global.use_gpu:True|False -o Global.enable_mkldnn:True|False -o Global.cpu_num_threads:1|6 --o Global.batch_size:1 +-o Global.batch_size:1|16 -o Global.use_tensorrt:True|False -o Global.use_fp16:True|False -o Global.inference_model_dir:../inference diff --git a/tests/config/MobileNetV1_x0_75.txt b/tests/config/MobileNetV1_x0_75.txt index dbab4deb1821831f4e49ab4c9ec3ee1a9a388252..ba81e35ce002b1278eacacc4e693e9932d1f195e 100644 --- a/tests/config/MobileNetV1_x0_75.txt +++ b/tests/config/MobileNetV1_x0_75.txt @@ -41,7 +41,7 @@ inference:python/predict_cls.py -c configs/inference_cls.yaml -o Global.use_gpu:True|False -o Global.enable_mkldnn:True|False -o Global.cpu_num_threads:1|6 --o Global.batch_size:1 +-o Global.batch_size:1|16 -o Global.use_tensorrt:True|False -o Global.use_fp16:True|False -o Global.inference_model_dir:../inference diff --git a/tests/config/MobileNetV2.txt b/tests/config/MobileNetV2.txt index 3904cb5d6081ea6dde63bb6ff69487d103cf55ef..d4cf3fc5d48685f627b4e76cb32e7524565c8944 100644 --- a/tests/config/MobileNetV2.txt +++ b/tests/config/MobileNetV2.txt @@ -41,7 +41,7 @@ inference:python/predict_cls.py -c configs/inference_cls.yaml -o Global.use_gpu:True|False -o Global.enable_mkldnn:True|False -o Global.cpu_num_threads:1|6 --o Global.batch_size:1 +-o Global.batch_size:1|16 -o Global.use_tensorrt:True|False -o Global.use_fp16:True|False -o Global.inference_model_dir:../inference diff --git a/tests/config/MobileNetV2_x0_25.txt b/tests/config/MobileNetV2_x0_25.txt index 7fb93555d9a932a8ee6a88075130e52fa253c732..5368aef958bfd8c4edef819b6963c55891388b84 100644 --- a/tests/config/MobileNetV2_x0_25.txt +++ b/tests/config/MobileNetV2_x0_25.txt @@ -41,7 +41,7 @@ inference:python/predict_cls.py -c configs/inference_cls.yaml -o Global.use_gpu:True|False -o Global.enable_mkldnn:True|False -o Global.cpu_num_threads:1|6 --o Global.batch_size:1 +-o Global.batch_size:1|16 -o Global.use_tensorrt:True|False -o Global.use_fp16:True|False -o Global.inference_model_dir:../inference diff --git a/tests/config/MobileNetV2_x0_5.txt b/tests/config/MobileNetV2_x0_5.txt index 5a36396d0d1ef465126fdb006c89a2951ccea1ed..ba35c3affc57404382bd73215954d4de7454fce9 100644 --- a/tests/config/MobileNetV2_x0_5.txt +++ b/tests/config/MobileNetV2_x0_5.txt @@ -41,7 +41,7 @@ inference:python/predict_cls.py -c configs/inference_cls.yaml -o Global.use_gpu:True|False -o Global.enable_mkldnn:True|False -o Global.cpu_num_threads:1|6 --o Global.batch_size:1 +-o Global.batch_size:1|16 -o Global.use_tensorrt:True|False -o Global.use_fp16:True|False -o Global.inference_model_dir:../inference diff --git a/tests/config/MobileNetV2_x0_75.txt b/tests/config/MobileNetV2_x0_75.txt index cf67fefd36b63f9ff351a8795642d97a8a888e62..0f5a8ab8a71da4bcd44592f7f6cf3160def6b206 100644 --- a/tests/config/MobileNetV2_x0_75.txt +++ b/tests/config/MobileNetV2_x0_75.txt @@ -41,7 +41,7 @@ inference:python/predict_cls.py -c configs/inference_cls.yaml -o Global.use_gpu:True|False -o Global.enable_mkldnn:True|False -o Global.cpu_num_threads:1|6 --o Global.batch_size:1 +-o Global.batch_size:1|16 -o Global.use_tensorrt:True|False -o Global.use_fp16:True|False -o Global.inference_model_dir:../inference diff --git a/tests/config/MobileNetV2_x1_5.txt b/tests/config/MobileNetV2_x1_5.txt index 657bb24575a712a4725c3642419cd85e981935c7..55da65b1268b022831472eb9e8f88e8690e3a7dd 100644 --- a/tests/config/MobileNetV2_x1_5.txt +++ b/tests/config/MobileNetV2_x1_5.txt @@ -41,7 +41,7 @@ inference:python/predict_cls.py -c configs/inference_cls.yaml -o Global.use_gpu:True|False -o Global.enable_mkldnn:True|False -o Global.cpu_num_threads:1|6 --o Global.batch_size:1 +-o Global.batch_size:1|16 -o Global.use_tensorrt:True|False -o Global.use_fp16:True|False -o Global.inference_model_dir:../inference diff --git a/tests/config/MobileNetV2_x2_0.txt b/tests/config/MobileNetV2_x2_0.txt index 9ef458084cbeaac39099ef2120e20bb1f6e16534..cd32244ff257a39c248e5bb65aec0ef0590356df 100644 --- a/tests/config/MobileNetV2_x2_0.txt +++ b/tests/config/MobileNetV2_x2_0.txt @@ -41,7 +41,7 @@ inference:python/predict_cls.py -c configs/inference_cls.yaml -o Global.use_gpu:True|False -o Global.enable_mkldnn:True|False -o Global.cpu_num_threads:1|6 --o Global.batch_size:1 +-o Global.batch_size:1|16 -o Global.use_tensorrt:True|False -o Global.use_fp16:True|False -o Global.inference_model_dir:../inference diff --git a/tests/config/MobileNetV3_large_x0_35.txt b/tests/config/MobileNetV3_large_x0_35.txt index e139c0ae93ec00b59ea882efdcc89a1c6e8c9d95..bc6e77c0bd3fa3ca32d2a848e1f8fe9337def63e 100644 --- a/tests/config/MobileNetV3_large_x0_35.txt +++ b/tests/config/MobileNetV3_large_x0_35.txt @@ -41,7 +41,7 @@ inference:python/predict_cls.py -c configs/inference_cls.yaml -o Global.use_gpu:True|False -o Global.enable_mkldnn:True|False -o Global.cpu_num_threads:1|6 --o Global.batch_size:1 +-o Global.batch_size:1|16 -o Global.use_tensorrt:True|False -o Global.use_fp16:True|False -o Global.inference_model_dir:../inference diff --git a/tests/config/MobileNetV3_large_x0_5.txt b/tests/config/MobileNetV3_large_x0_5.txt index e1ee2c53fbeea3f07b1cd8da90bdf8ada733943a..693423dacef1bd3029d65ba88dcfc3686ea3b13d 100644 --- a/tests/config/MobileNetV3_large_x0_5.txt +++ b/tests/config/MobileNetV3_large_x0_5.txt @@ -41,7 +41,7 @@ inference:python/predict_cls.py -c configs/inference_cls.yaml -o Global.use_gpu:True|False -o Global.enable_mkldnn:True|False -o Global.cpu_num_threads:1|6 --o Global.batch_size:1 +-o Global.batch_size:1|16 -o Global.use_tensorrt:True|False -o Global.use_fp16:True|False -o Global.inference_model_dir:../inference diff --git a/tests/config/MobileNetV3_large_x0_75.txt b/tests/config/MobileNetV3_large_x0_75.txt index 685c1f6fbc8fccdbb1cff872a2ae6c693be7d4ef..24b35de56125df1770012c9111399a5a393051e0 100644 --- a/tests/config/MobileNetV3_large_x0_75.txt +++ b/tests/config/MobileNetV3_large_x0_75.txt @@ -41,7 +41,7 @@ inference:python/predict_cls.py -c configs/inference_cls.yaml -o Global.use_gpu:True|False -o Global.enable_mkldnn:True|False -o Global.cpu_num_threads:1|6 --o Global.batch_size:1 +-o Global.batch_size:1|16 -o Global.use_tensorrt:True|False -o Global.use_fp16:True|False -o Global.inference_model_dir:../inference diff --git a/tests/config/MobileNetV3_large_x1_0.txt b/tests/config/MobileNetV3_large_x1_0.txt index f290db3eaf2299f61ce19f745f5fbacfc42571fe..873072f256deee6b107ae44f00c1f85eff1f13e1 100644 --- a/tests/config/MobileNetV3_large_x1_0.txt +++ b/tests/config/MobileNetV3_large_x1_0.txt @@ -41,7 +41,7 @@ inference:python/predict_cls.py -c configs/inference_cls.yaml -o Global.use_gpu:True|False -o Global.enable_mkldnn:True|False -o Global.cpu_num_threads:1|6 --o Global.batch_size:1 +-o Global.batch_size:1|16 -o Global.use_tensorrt:True|False -o Global.use_fp16:True|False -o Global.inference_model_dir:../inference diff --git a/tests/config/MobileNetV3_large_x1_25.txt b/tests/config/MobileNetV3_large_x1_25.txt index 7f35708350a90276c6278853909a9f0814fd7323..34057be52366b5c1ea81ab12c2b883c8040dd027 100644 --- a/tests/config/MobileNetV3_large_x1_25.txt +++ b/tests/config/MobileNetV3_large_x1_25.txt @@ -41,7 +41,7 @@ inference:python/predict_cls.py -c configs/inference_cls.yaml -o Global.use_gpu:True|False -o Global.enable_mkldnn:True|False -o Global.cpu_num_threads:1|6 --o Global.batch_size:1 +-o Global.batch_size:1|16 -o Global.use_tensorrt:True|False -o Global.use_fp16:True|False -o Global.inference_model_dir:../inference diff --git a/tests/config/MobileNetV3_small_x0_35.txt b/tests/config/MobileNetV3_small_x0_35.txt index 40fc5987814ec3edb284a2a45adccd67f64d90bd..0f8b75e9999f2f44d41b2d46153cffba3b926b2a 100644 --- a/tests/config/MobileNetV3_small_x0_35.txt +++ b/tests/config/MobileNetV3_small_x0_35.txt @@ -41,7 +41,7 @@ inference:python/predict_cls.py -c configs/inference_cls.yaml -o Global.use_gpu:True|False -o Global.enable_mkldnn:True|False -o Global.cpu_num_threads:1|6 --o Global.batch_size:1 +-o Global.batch_size:1|16 -o Global.use_tensorrt:True|False -o Global.use_fp16:True|False -o Global.inference_model_dir:../inference diff --git a/tests/config/MobileNetV3_small_x0_5.txt b/tests/config/MobileNetV3_small_x0_5.txt index d444054fb31d90c6482ab37c743e9f282bf565ea..0693c086507c1bbc74f27281507a46f0b673457d 100644 --- a/tests/config/MobileNetV3_small_x0_5.txt +++ b/tests/config/MobileNetV3_small_x0_5.txt @@ -41,7 +41,7 @@ inference:python/predict_cls.py -c configs/inference_cls.yaml -o Global.use_gpu:True|False -o Global.enable_mkldnn:True|False -o Global.cpu_num_threads:1|6 --o Global.batch_size:1 +-o Global.batch_size:1|16 -o Global.use_tensorrt:True|False -o Global.use_fp16:True|False -o Global.inference_model_dir:../inference diff --git a/tests/config/MobileNetV3_small_x0_75.txt b/tests/config/MobileNetV3_small_x0_75.txt index 95cd5b65d752b1ce276914be847a5334028aa901..ce42427370af1965d8dc988ee9d8a7bc59859825 100644 --- a/tests/config/MobileNetV3_small_x0_75.txt +++ b/tests/config/MobileNetV3_small_x0_75.txt @@ -41,7 +41,7 @@ inference:python/predict_cls.py -c configs/inference_cls.yaml -o Global.use_gpu:True|False -o Global.enable_mkldnn:True|False -o Global.cpu_num_threads:1|6 --o Global.batch_size:1 +-o Global.batch_size:1|16 -o Global.use_tensorrt:True|False -o Global.use_fp16:True|False -o Global.inference_model_dir:../inference diff --git a/tests/config/MobileNetV3_small_x1_0.txt b/tests/config/MobileNetV3_small_x1_0.txt index 7e3e7e86cb73b9e4d0fb94c5086c79ccdc605201..a1f2b8b70321974710f5fe83a66d2a3d42110652 100644 --- a/tests/config/MobileNetV3_small_x1_0.txt +++ b/tests/config/MobileNetV3_small_x1_0.txt @@ -41,7 +41,7 @@ inference:python/predict_cls.py -c configs/inference_cls.yaml -o Global.use_gpu:True|False -o Global.enable_mkldnn:True|False -o Global.cpu_num_threads:1|6 --o Global.batch_size:1 +-o Global.batch_size:1|16 -o Global.use_tensorrt:True|False -o Global.use_fp16:True|False -o Global.inference_model_dir:../inference diff --git a/tests/config/MobileNetV3_small_x1_25.txt b/tests/config/MobileNetV3_small_x1_25.txt index 617b49cb2851010e5cde476135f447b3b783d460..49831b6675bd2fc6cf3a2efc81fefbdcbf2f76c7 100644 --- a/tests/config/MobileNetV3_small_x1_25.txt +++ b/tests/config/MobileNetV3_small_x1_25.txt @@ -41,7 +41,7 @@ inference:python/predict_cls.py -c configs/inference_cls.yaml -o Global.use_gpu:True|False -o Global.enable_mkldnn:True|False -o Global.cpu_num_threads:1|6 --o Global.batch_size:1 +-o Global.batch_size:1|16 -o Global.use_tensorrt:True|False -o Global.use_fp16:True|False -o Global.inference_model_dir:../inference diff --git a/tests/config/PPLCNet_x0_25.txt b/tests/config/PPLCNet_x0_25.txt index c3166ec1e57e99906ad1fa019a9ee263353f2345..0777cfa4fc60e735563ee0c72ad039ce0fffb913 100644 --- a/tests/config/PPLCNet_x0_25.txt +++ b/tests/config/PPLCNet_x0_25.txt @@ -41,7 +41,7 @@ inference:python/predict_cls.py -c configs/inference_cls.yaml -o Global.use_gpu:True|False -o Global.enable_mkldnn:True|False -o Global.cpu_num_threads:1|6 --o Global.batch_size:1 +-o Global.batch_size:1|16 -o Global.use_tensorrt:True|False -o Global.use_fp16:True|False -o Global.inference_model_dir:../inference diff --git a/tests/config/PPLCNet_x0_35.txt b/tests/config/PPLCNet_x0_35.txt index 347661c957aa8696205355d81f28d514facbe201..0ce679e4d4444d6983d68e876cd71c6aebf200fb 100644 --- a/tests/config/PPLCNet_x0_35.txt +++ b/tests/config/PPLCNet_x0_35.txt @@ -41,7 +41,7 @@ inference:python/predict_cls.py -c configs/inference_cls.yaml -o Global.use_gpu:True|False -o Global.enable_mkldnn:True|False -o Global.cpu_num_threads:1|6 --o Global.batch_size:1 +-o Global.batch_size:1|16 -o Global.use_tensorrt:True|False -o Global.use_fp16:True|False -o Global.inference_model_dir:../inference diff --git a/tests/config/PPLCNet_x0_5.txt b/tests/config/PPLCNet_x0_5.txt index b57d27dd9769e95627bf589fc447c7edce75237d..fbee882a9101a090259fe9ea98d757eb8d4263cd 100644 --- a/tests/config/PPLCNet_x0_5.txt +++ b/tests/config/PPLCNet_x0_5.txt @@ -41,7 +41,7 @@ inference:python/predict_cls.py -c configs/inference_cls.yaml -o Global.use_gpu:True|False -o Global.enable_mkldnn:True|False -o Global.cpu_num_threads:1|6 --o Global.batch_size:1 +-o Global.batch_size:1|16 -o Global.use_tensorrt:True|False -o Global.use_fp16:True|False -o Global.inference_model_dir:../inference diff --git a/tests/config/PPLCNet_x1_0.txt b/tests/config/PPLCNet_x1_0.txt index be0c45ac5da8a1567fb37c91ef71e19cd182b21b..1e991164903a91f89c139fdb43741e9fddcbfda7 100644 --- a/tests/config/PPLCNet_x1_0.txt +++ b/tests/config/PPLCNet_x1_0.txt @@ -41,7 +41,7 @@ inference:python/predict_cls.py -c configs/inference_cls.yaml -o Global.use_gpu:True|False -o Global.enable_mkldnn:True|False -o Global.cpu_num_threads:1|6 --o Global.batch_size:1 +-o Global.batch_size:1|16 -o Global.use_tensorrt:True|False -o Global.use_fp16:True|False -o Global.inference_model_dir:../inference diff --git a/tests/config/ResNeXt101_vd_64x4d.txt b/tests/config/ResNeXt101_vd_64x4d.txt index 90b7965573417e35302a15d736ba54138eef8ce3..aab18d564834368742469f491bd95dca3bae66a4 100644 --- a/tests/config/ResNeXt101_vd_64x4d.txt +++ b/tests/config/ResNeXt101_vd_64x4d.txt @@ -41,7 +41,7 @@ inference:python/predict_cls.py -c configs/inference_cls.yaml -o Global.use_gpu:True|False -o Global.enable_mkldnn:True|False -o Global.cpu_num_threads:1|6 --o Global.batch_size:1 +-o Global.batch_size:1|16 -o Global.use_tensorrt:True|False -o Global.use_fp16:True|False -o Global.inference_model_dir:../inference diff --git a/tests/config/ResNet50_vd.txt b/tests/config/ResNet50_vd.txt index a2c7ea256df0f278abc8a5c25e9cafd8c17b1e93..9b8f27cc6485ff61f3b3999a870af544afca485b 100644 --- a/tests/config/ResNet50_vd.txt +++ b/tests/config/ResNet50_vd.txt @@ -41,7 +41,7 @@ inference:python/predict_cls.py -c configs/inference_cls.yaml -o Global.use_gpu:True|False -o Global.enable_mkldnn:True|False -o Global.cpu_num_threads:1|6 --o Global.batch_size:1 +-o Global.batch_size:1|16 -o Global.use_tensorrt:True|False -o Global.use_fp16:True|False -o Global.inference_model_dir:../inference diff --git a/tests/config/ShuffleNetV2_x0_25.txt b/tests/config/ShuffleNetV2_x0_25.txt index 9ad0ebd5bb5610e50b545fa6f2c69773d7a3d8d3..1c80e4f4aef71ed2b211b7036c63be8f86d6e5ef 100644 --- a/tests/config/ShuffleNetV2_x0_25.txt +++ b/tests/config/ShuffleNetV2_x0_25.txt @@ -41,7 +41,7 @@ inference:python/predict_cls.py -c configs/inference_cls.yaml -o Global.use_gpu:True|False -o Global.enable_mkldnn:True|False -o Global.cpu_num_threads:1|6 --o Global.batch_size:1 +-o Global.batch_size:1|16 -o Global.use_tensorrt:True|False -o Global.use_fp16:True|False -o Global.inference_model_dir:../inference diff --git a/tests/config/ShuffleNetV2_x0_33.txt b/tests/config/ShuffleNetV2_x0_33.txt index 70dc11a77931e13e9a36d3fe0d30f88cf3175c4c..34e813fb4d236e657bc3902c5dd1ad3eeca4c1fc 100644 --- a/tests/config/ShuffleNetV2_x0_33.txt +++ b/tests/config/ShuffleNetV2_x0_33.txt @@ -41,7 +41,7 @@ inference:python/predict_cls.py -c configs/inference_cls.yaml -o Global.use_gpu:True|False -o Global.enable_mkldnn:True|False -o Global.cpu_num_threads:1|6 --o Global.batch_size:1 +-o Global.batch_size:1|16 -o Global.use_tensorrt:True|False -o Global.use_fp16:True|False -o Global.inference_model_dir:../inference diff --git a/tests/config/ShuffleNetV2_x0_5.txt b/tests/config/ShuffleNetV2_x0_5.txt index 84f1df60bd0f41a504cd5a79fe4637953961d2e2..b918a780718157105c7d9a99205a77fca6537c5d 100644 --- a/tests/config/ShuffleNetV2_x0_5.txt +++ b/tests/config/ShuffleNetV2_x0_5.txt @@ -41,7 +41,7 @@ inference:python/predict_cls.py -c configs/inference_cls.yaml -o Global.use_gpu:True|False -o Global.enable_mkldnn:True|False -o Global.cpu_num_threads:1|6 --o Global.batch_size:1 +-o Global.batch_size:1|16 -o Global.use_tensorrt:True|False -o Global.use_fp16:True|False -o Global.inference_model_dir:../inference diff --git a/tests/config/ShuffleNetV2_x1_0.txt b/tests/config/ShuffleNetV2_x1_0.txt index a208fa14d6536adc0ebd4801678dfbdf3cc8cb86..1055910ac9c807864dc0c3dfad50a5f942b3d79c 100644 --- a/tests/config/ShuffleNetV2_x1_0.txt +++ b/tests/config/ShuffleNetV2_x1_0.txt @@ -41,7 +41,7 @@ inference:python/predict_cls.py -c configs/inference_cls.yaml -o Global.use_gpu:True|False -o Global.enable_mkldnn:True|False -o Global.cpu_num_threads:1|6 --o Global.batch_size:1 +-o Global.batch_size:1|16 -o Global.use_tensorrt:True|False -o Global.use_fp16:True|False -o Global.inference_model_dir:../inference diff --git a/tests/config/SwinTransformer_tiny_patch4_window7_224.txt b/tests/config/SwinTransformer_tiny_patch4_window7_224.txt index f2937b14b4c0c7e6b7afdab63b5863bc586e27f3..94acb1503d1ad9277231f0157259154925d47fef 100644 --- a/tests/config/SwinTransformer_tiny_patch4_window7_224.txt +++ b/tests/config/SwinTransformer_tiny_patch4_window7_224.txt @@ -41,7 +41,7 @@ inference:python/predict_cls.py -c configs/inference_cls.yaml -o Global.use_gpu:True|False -o Global.enable_mkldnn:True|False -o Global.cpu_num_threads:1|6 --o Global.batch_size:1 +-o Global.batch_size:1|16 -o Global.use_tensorrt:True|False -o Global.use_fp16:True|False -o Global.inference_model_dir:../inference