diff --git a/deploy/cpp/demo/classifier.cpp b/deploy/cpp/demo/classifier.cpp index f8b47cba7a6d2257c41fd0a64784a083c96544d9..548eaff411a737ea0ffcfca63d36a7f18cd9d994 100644 --- a/deploy/cpp/demo/classifier.cpp +++ b/deploy/cpp/demo/classifier.cpp @@ -61,9 +61,9 @@ int main(int argc, char** argv) { FLAGS_use_gpu, FLAGS_use_trt, FLAGS_use_mkl, + FLAGS_mkl_thread_num, FLAGS_gpu_id, - FLAGS_key, - FLAGS_mkl_thread_num); + FLAGS_key); // Predict int imgs = 1; diff --git a/deploy/cpp/demo/detector.cpp b/deploy/cpp/demo/detector.cpp index 02e4912c2459d590563963199dca0bac2ef3996b..f5fefc05d0bbc4bbd482c23f0db8c066b7d1013b 100644 --- a/deploy/cpp/demo/detector.cpp +++ b/deploy/cpp/demo/detector.cpp @@ -66,9 +66,9 @@ int main(int argc, char** argv) { FLAGS_use_gpu, FLAGS_use_trt, FLAGS_use_mkl, + FLAGS_mkl_thread_num, FLAGS_gpu_id, - FLAGS_key, - FLAGS_mkl_thread_num); + FLAGS_key); int imgs = 1; std::string save_dir = "output"; // Predict diff --git a/deploy/cpp/demo/segmenter.cpp b/deploy/cpp/demo/segmenter.cpp index b38105e68ffbba79896e7d99375ea258f5d37c84..0d888001490759f65790d51837e2e69a6f448c4b 100644 --- a/deploy/cpp/demo/segmenter.cpp +++ b/deploy/cpp/demo/segmenter.cpp @@ -63,9 +63,9 @@ int main(int argc, char** argv) { FLAGS_use_gpu, FLAGS_use_trt, FLAGS_use_mkl, + FLAGS_mkl_thread_num, FLAGS_gpu_id, - FLAGS_key, - FLAGS_mkl_thread_num); + FLAGS_key); int imgs = 1; // Predict if (FLAGS_image_list != "") { diff --git a/deploy/cpp/demo/video_classifier.cpp b/deploy/cpp/demo/video_classifier.cpp index fd29dedb0849ed0b46de334eddcaa3a12f9411e3..c0485791ccb42fc880ab384ae2cf5e1d9d48b1ae 100644 --- a/deploy/cpp/demo/video_classifier.cpp +++ b/deploy/cpp/demo/video_classifier.cpp @@ -67,9 +67,9 @@ int main(int argc, char** argv) { FLAGS_use_gpu, FLAGS_use_trt, FLAGS_use_mkl, + FLAGS_mkl_thread_num, FLAGS_gpu_id, - FLAGS_key, - FLAGS_mkl_thread_num); + FLAGS_key); // Open video cv::VideoCapture capture; diff --git a/deploy/cpp/demo/video_detector.cpp b/deploy/cpp/demo/video_detector.cpp index fb8b4a78c87e27b33206572a06fa580d8c5e1521..e617dbd1339b73676225a65a667a42a06abfa63e 100644 --- a/deploy/cpp/demo/video_detector.cpp +++ b/deploy/cpp/demo/video_detector.cpp @@ -69,9 +69,9 @@ int main(int argc, char** argv) { FLAGS_use_gpu, FLAGS_use_trt, FLAGS_use_mkl, + FLAGS_mkl_thread_num, FLAGS_gpu_id, - FLAGS_key, - FLAGS_mkl_thread_num); + FLAGS_key); // Open video cv::VideoCapture capture; if (FLAGS_use_camera) { diff --git a/deploy/cpp/demo/video_segmenter.cpp b/deploy/cpp/demo/video_segmenter.cpp index 54e650e402a1059021e161acc5c07dd591712754..35af64f4b00ea5983653bb135394da9389539604 100644 --- a/deploy/cpp/demo/video_segmenter.cpp +++ b/deploy/cpp/demo/video_segmenter.cpp @@ -67,9 +67,9 @@ int main(int argc, char** argv) { FLAGS_use_gpu, FLAGS_use_trt, FLAGS_use_mkl, + FLAGS_mkl_thread_num, FLAGS_gpu_id, - FLAGS_key, - FLAGS_mkl_thread_num); + FLAGS_key); // Open video cv::VideoCapture capture; if (FLAGS_use_camera) { diff --git a/deploy/cpp/include/paddlex/paddlex.h b/deploy/cpp/include/paddlex/paddlex.h index 7a8e60021cd74b402561527d9b40c3eb72a6d672..3af3dd85e290dd18fec3d7bcfb68207a0cf2d5b5 100644 --- a/deploy/cpp/include/paddlex/paddlex.h +++ b/deploy/cpp/include/paddlex/paddlex.h @@ -70,6 +70,8 @@ class Model { * @param model_dir: the directory which contains model.yml * @param use_gpu: use gpu or not when infering * @param use_trt: use Tensor RT or not when infering + * @param use_trt: use mkl or not when infering + * @param mkl_thread_num: the threads of mkl when infering * @param gpu_id: the id of gpu when infering with using gpu * @param key: the key of encryption when using encrypted model * @param use_ir_optim: use ir optimization when infering @@ -78,28 +80,27 @@ class Model { bool use_gpu = false, bool use_trt = false, bool use_mkl = true, + int mkl_thread_num = 4, int gpu_id = 0, std::string key = "", - int mkl_thread_num = 4, bool use_ir_optim = true) { create_predictor( model_dir, use_gpu, use_trt, use_mkl, + mkl_thread_num, gpu_id, key, - mkl_thread_num, use_ir_optim); } - void create_predictor(const std::string& model_dir, bool use_gpu = false, bool use_trt = false, bool use_mkl = true, + int mkl_thread_num = 4, int gpu_id = 0, std::string key = "", - int mkl_thread_num = 4, bool use_ir_optim = true); /* diff --git a/deploy/cpp/src/paddlex.cpp b/deploy/cpp/src/paddlex.cpp index 68038b6bc4bbe12de24c04dac7a186c69f42ea86..57d35b89f638173aa1fdc46600d059e16f183c14 100644 --- a/deploy/cpp/src/paddlex.cpp +++ b/deploy/cpp/src/paddlex.cpp @@ -29,9 +29,9 @@ void Model::create_predictor(const std::string& model_dir, bool use_gpu, bool use_trt, bool use_mkl, + int mkl_thread_num, int gpu_id, std::string key, - int mkl_thread_num, bool use_ir_optim) { paddle::AnalysisConfig config; std::string model_file = model_dir + OS_PATH_SEP + "__model__";