diff --git a/deploy/cpp/src/paddlex.cpp b/deploy/cpp/src/paddlex.cpp index 5d33ae0e60285f41a2c24cca0ce96f51b54478bf..6d3c23094c3944b9359c701c4c3359c26313d1e3 100644 --- a/deploy/cpp/src/paddlex.cpp +++ b/deploy/cpp/src/paddlex.cpp @@ -66,7 +66,7 @@ void Model::create_predictor(const std::string& model_dir, if (key == "") { config.SetModel(model_file, params_file); } - if (use_mkl) { + if (use_mkl && !use_gpu) { if (name != "HRNet" && name != "DeepLabv3p" && name != "PPYOLO") { config.EnableMKLDNN(); config.SetCpuMathLibraryNumThreads(mkl_thread_num); diff --git a/paddlex/deploy.py b/paddlex/deploy.py index 12570bec5c90b6334b41c77dac99e4b93fb144e4..b04f46ebf037af0d89e46f3d8e24efdb36a2fb47 100644 --- a/paddlex/deploy.py +++ b/paddlex/deploy.py @@ -108,7 +108,7 @@ class Predictor: config.enable_use_gpu(100, gpu_id) else: config.disable_gpu() - if use_mkl: + if use_mkl and not use_gpu: if self.model_name not in ["HRNet", "DeepLabv3p", "PPYOLO"]: config.enable_mkldnn() config.set_cpu_math_library_num_threads(mkl_thread_num)