diff --git a/deploy/configs/build_cartoon.yaml b/deploy/configs/build_cartoon.yaml index 99aa816442cc69fde0642db9e5971c1e887a5385..4c000de1f0c1b0fd41d50296f39b026745442788 100644 --- a/deploy/configs/build_cartoon.yaml +++ b/deploy/configs/build_cartoon.yaml @@ -2,7 +2,7 @@ Global: rec_inference_model_dir: "./models/cartoon_rec_ResNet50_iCartoon_v1.0_infer/" batch_size: 1 use_gpu: True - enable_mkldnn: True + enable_mkldnn: False cpu_num_threads: 100 enable_benchmark: True use_fp16: False diff --git a/deploy/configs/build_logo.yaml b/deploy/configs/build_logo.yaml index 2ff383b37c48a51566e80c06cc8653dfd38a982c..8bcb9be6f2808d381cd728910c78559a8a160dbe 100644 --- a/deploy/configs/build_logo.yaml +++ b/deploy/configs/build_logo.yaml @@ -2,7 +2,7 @@ Global: rec_inference_model_dir: "./models/logo_rec_ResNet50_Logo3K_v1.0_infer/" batch_size: 1 use_gpu: True - enable_mkldnn: True + enable_mkldnn: False cpu_num_threads: 100 enable_benchmark: True use_fp16: False diff --git a/deploy/configs/build_product.yaml b/deploy/configs/build_product.yaml index 987c5c0df4f80ebc048cb55f0620b846b994ef8f..ebe2d34c692b4a7de8c560792f8cbae9b52dffad 100644 --- a/deploy/configs/build_product.yaml +++ b/deploy/configs/build_product.yaml @@ -2,7 +2,7 @@ Global: rec_inference_model_dir: "./models/product_ResNet50_vd_aliproduct_v1.0_infer" batch_size: 1 use_gpu: True - enable_mkldnn: True + enable_mkldnn: False cpu_num_threads: 100 enable_benchmark: True use_fp16: False diff --git a/deploy/configs/build_vehicle.yaml b/deploy/configs/build_vehicle.yaml index c7335dd20c8a0cd4a08b1b01d5eaecbfdaafe54f..2e11a7df04fbd20c35cb9961f37f3a645bf6ea5b 100644 --- a/deploy/configs/build_vehicle.yaml +++ b/deploy/configs/build_vehicle.yaml @@ -2,7 +2,7 @@ Global: rec_inference_model_dir: "./models/vehicle_cls_ResNet50_CompCars_v1.0_infer/" batch_size: 1 use_gpu: True - enable_mkldnn: True + enable_mkldnn: False cpu_num_threads: 100 enable_benchmark: True use_fp16: False diff --git a/deploy/configs/inference_cartoon.yaml b/deploy/configs/inference_cartoon.yaml index e47aca31d4bf7d46e225997880e00a77e7c2b640..eb7c869baa3046173aa932607ef9790fe69b64b6 100644 --- a/deploy/configs/inference_cartoon.yaml +++ b/deploy/configs/inference_cartoon.yaml @@ -10,7 +10,7 @@ Global: - foreground use_gpu: True - enable_mkldnn: True + enable_mkldnn: False cpu_num_threads: 100 enable_benchmark: True use_fp16: False diff --git a/deploy/configs/inference_cls.yaml b/deploy/configs/inference_cls.yaml index 577c77d3513b1d4b3e0061d9446996f687c1b324..09d4ba797eaf0e83a861239f509bd20aeabe729c 100644 --- a/deploy/configs/inference_cls.yaml +++ b/deploy/configs/inference_cls.yaml @@ -3,7 +3,7 @@ Global: inference_model_dir: "./models" batch_size: 1 use_gpu: True - enable_mkldnn: True + enable_mkldnn: False cpu_num_threads: 100 enable_benchmark: True use_fp16: False diff --git a/deploy/configs/inference_det.yaml b/deploy/configs/inference_det.yaml index d5d6d679022d2fed75fdea4d3df43cedc71908ec..5236dd7767fbb4db37f7fd720faf5e7bd995dc85 100644 --- a/deploy/configs/inference_det.yaml +++ b/deploy/configs/inference_det.yaml @@ -10,7 +10,7 @@ Global: # inference engine config use_gpu: True - enable_mkldnn: True + enable_mkldnn: False cpu_num_threads: 100 enable_benchmark: True use_fp16: False diff --git a/deploy/configs/inference_logo.yaml b/deploy/configs/inference_logo.yaml index 6f183f1059fac17d077f535d08286a59c4b3ddb4..ea7fede4ff95be9642286ab90ef966487927efda 100644 --- a/deploy/configs/inference_logo.yaml +++ b/deploy/configs/inference_logo.yaml @@ -11,7 +11,7 @@ Global: # inference engine config use_gpu: True - enable_mkldnn: True + enable_mkldnn: False cpu_num_threads: 100 enable_benchmark: True use_fp16: False diff --git a/deploy/configs/inference_product.yaml b/deploy/configs/inference_product.yaml index 4a6aa572c316a8d4c84bffb812f81cad6f13be18..f85933e8fbb08419399fcea5acdc576d3d7809e8 100644 --- a/deploy/configs/inference_product.yaml +++ b/deploy/configs/inference_product.yaml @@ -11,7 +11,7 @@ Global: # inference engine config use_gpu: True - enable_mkldnn: True + enable_mkldnn: False cpu_num_threads: 100 enable_benchmark: True use_fp16: False diff --git a/deploy/configs/inference_rec.yaml b/deploy/configs/inference_rec.yaml index 2293752c5730fd117914a80335ea303ad9e8b851..dd906880cfb1c86800feeb042817ff0ae7ddf9a5 100644 --- a/deploy/configs/inference_rec.yaml +++ b/deploy/configs/inference_rec.yaml @@ -10,7 +10,7 @@ Global: # inference engine config use_gpu: False - enable_mkldnn: True + enable_mkldnn: False cpu_num_threads: 100 enable_benchmark: True use_fp16: False diff --git a/deploy/configs/inference_vehicle.yaml b/deploy/configs/inference_vehicle.yaml index 357b722d57508c04dcefa1e287846b13a01bf958..ecf5f8cc6467087b6168573b874592645cdc9444 100644 --- a/deploy/configs/inference_vehicle.yaml +++ b/deploy/configs/inference_vehicle.yaml @@ -11,7 +11,7 @@ Global: # inference engine config use_gpu: True - enable_mkldnn: True + enable_mkldnn: False cpu_num_threads: 100 enable_benchmark: True use_fp16: False