diff --git a/deploy/cpp_infer/include/cls.h b/deploy/cpp_infer/include/cls.h index a8f56a3be274462e0f53633369e85ae1a0db1070..f954957321f9756d1659bdc3117be60c18981d3d 100644 --- a/deploy/cpp_infer/include/cls.h +++ b/deploy/cpp_infer/include/cls.h @@ -36,7 +36,8 @@ namespace PaddleClas { class Classifier { public: - explicit Classifier(const std::string &model_dir, const bool &use_gpu, + explicit Classifier(const std::string &model_path, + const std::string ¶ms_path, const bool &use_gpu, const int &gpu_id, const int &gpu_mem, const int &cpu_math_library_num_threads, const bool &use_mkldnn, const int &resize_short_size, @@ -50,11 +51,11 @@ public: this->resize_short_size_ = resize_short_size; this->crop_size_ = crop_size; - LoadModel(model_dir); + LoadModel(model_path, params_path); } // Load Paddle inference model - void LoadModel(const std::string &model_dir); + void LoadModel(const std::string &model_path, const std::string ¶ms_path); // Run predictor void Run(cv::Mat &img); @@ -82,4 +83,4 @@ private: CenterCropImg crop_op_; }; -} // namespace PaddleClas \ No newline at end of file +} // namespace PaddleClas diff --git a/deploy/cpp_infer/include/cls_config.h b/deploy/cpp_infer/include/cls_config.h index cfc566e849edac1d717c00edc05cdddf2717b59c..4d40347f5b15507df3d767af1144f9d6b675c786 100644 --- a/deploy/cpp_infer/include/cls_config.h +++ b/deploy/cpp_infer/include/cls_config.h @@ -41,7 +41,9 @@ public: this->use_mkldnn = bool(stoi(config_map_["use_mkldnn"])); - this->cls_model_dir.assign(config_map_["cls_model_dir"]); + this->cls_model_path.assign(config_map_["cls_model_path"]); + + this->cls_params_path.assign(config_map_["cls_params_path"]); this->resize_short_size = stoi(config_map_["resize_short_size"]); @@ -58,7 +60,9 @@ public: bool use_mkldnn = false; - std::string cls_model_dir; + std::string cls_model_path; + + std::string cls_params_path; int resize_short_size = 256; int crop_size = 224; diff --git a/deploy/cpp_infer/readme.md b/deploy/cpp_infer/readme.md index 323947e198763fe7de10386d385dfcc3ad836087..ddf549c6c97f6e96a5917743a422f8473fc89db9 100644 --- a/deploy/cpp_infer/readme.md +++ b/deploy/cpp_infer/readme.md @@ -120,13 +120,13 @@ build/fluid_inference_install_dir/ #### 1.2.2 直接下载安装 -* [Paddle预测库官网](https://www.paddlepaddle.org.cn/documentation/docs/zh/advanced_guide/inference_deployment/inference/build_and_install_lib_cn.html)上提供了不同cuda版本的Linux预测库,可以在官网查看并选择合适的预测库版本。 +* [Paddle预测库官网](https://www.paddlepaddle.org.cn/documentation/docs/zh/advanced_guide/inference_deployment/inference/build_and_install_lib_cn.html)上提供了不同cuda版本的Linux预测库,可以在官网查看并选择合适的预测库版本,注意必须选择`develop`版本。 - 以`ubuntu14.04_cuda9.0_cudnn7_avx_mkl`的`1.8.4`版本为例,使用下述命令下载并解压: + 以`ubuntu14.04_cuda9.0_cudnn7_avx_mkl`的`develop`版本为例,使用下述命令下载并解压: ```shell -wget https://paddle-inference-lib.bj.bcebos.com/1.8.4-gpu-cuda9-cudnn7-avx-mkl/fluid_inference.tgz +wget https://paddle-inference-lib.bj.bcebos.com/latest-gpu-cuda9-cudnn7-avx-mkl/fluid_inference.tgz tar -xvf fluid_inference.tgz ``` @@ -143,11 +143,10 @@ tar -xvf fluid_inference.tgz ``` inference/ -|--model -|--params +|--cls_infer.pdmodel +|--cls_infer.pdiparams ``` -**注意**:上述文件中,`model`文件存储了模型结构信息,`params`文件存储了模型参数信息。因此,在使用模型导出时,需将导出的`cls_infer.pdmodel`文件重命名为`model`,`cls_infer.pdiparams`文件重命名为`params`。 - +**注意**:上述文件中,`cls_infer.pdmodel`文件存储了模型结构信息,`cls_infer.pdiparams`文件存储了模型参数信息。注意两个文件的路径需要与配置文件`tools/config.txt`中的`cls_model_path`和`cls_params_path`参数对应一致。 ### 2.2 编译PaddleClas C++预测demo diff --git a/deploy/cpp_infer/readme_en.md b/deploy/cpp_infer/readme_en.md index b2ca1b7b0b09a4d18546588cedd49711946f25de..0afb1734502bf0522b14638ef418ddd1ed705ffa 100644 --- a/deploy/cpp_infer/readme_en.md +++ b/deploy/cpp_infer/readme_en.md @@ -131,6 +131,7 @@ Among them, `paddle` is the Paddle library required for C++ prediction later, an * Different cuda versions of the Linux inference library (based on GCC 4.8.2) are provided on the [Paddle Inference Library official website](https://www.paddlepaddle.org.cn/documentation/docs/en/advanced_guide/inference_deployment/inference/build_and_install_lib_en.html). You can view and select the appropriate version of the inference library on the official website. +* Please select the `develop` version. * After downloading, use the following method to uncompress. @@ -149,11 +150,11 @@ Finally you can see the following files in the folder of `fluid_inference/`. ``` inference/ -|--model -|--params +|--cls_infer.pdmodel +|--cls_infer.pdiparams ``` -**NOTICE**: Among them, `model` file stores the model structure information and the `params` file stores the model parameter information.Therefore, you could rename the files name exported by [Model inference](../../tools/export_model.py). +**NOTICE**: Among them, `cls_infer.pdmodel` file stores the model structure information and the `cls_infer.pdiparams` file stores the model parameter information.The paths of the two files need to correspond to the parameters of `cls_model_path` and `cls_params_path` in the configuration file `tools/config.txt`. ### 2.2 Compile PaddleClas C++ inference demo diff --git a/deploy/cpp_infer/src/cls.cpp b/deploy/cpp_infer/src/cls.cpp index dbb36f84d897185d04d403892e5183fdf92346eb..0755eed15e86a31ac415a5507dbd55a042825e20 100644 --- a/deploy/cpp_infer/src/cls.cpp +++ b/deploy/cpp_infer/src/cls.cpp @@ -16,9 +16,10 @@ namespace PaddleClas { -void Classifier::LoadModel(const std::string &model_dir) { +void Classifier::LoadModel(const std::string &model_path, + const std::string ¶ms_path) { paddle_infer::Config config; - config.SetModel(model_dir + "/model", model_dir + "/params"); + config.SetModel(model_path, params_path); if (this->use_gpu_) { config.EnableUseGpu(this->gpu_mem_, this->gpu_id_); diff --git a/deploy/cpp_infer/src/main.cpp b/deploy/cpp_infer/src/main.cpp index 07e92a54ea810dac178889b5c6d7293ac4f8f0d6..9b562adcf2b45e06f578149f0889fc483348f851 100644 --- a/deploy/cpp_infer/src/main.cpp +++ b/deploy/cpp_infer/src/main.cpp @@ -59,10 +59,10 @@ int main(int argc, char **argv) { std::cout << "img_file_list length: " << img_files_list.size() << std::endl; - Classifier classifier(config.cls_model_dir, config.use_gpu, config.gpu_id, - config.gpu_mem, config.cpu_math_library_num_threads, - config.use_mkldnn, config.resize_short_size, - config.crop_size); + Classifier classifier(config.cls_model_path, config.cls_params_path, + config.use_gpu, config.gpu_id, config.gpu_mem, + config.cpu_math_library_num_threads, config.use_mkldnn, + config.resize_short_size, config.crop_size); double elapsed_time = 0.0; int warmup_iter = img_files_list.size() > 5 ? 5 : 0; diff --git a/deploy/cpp_infer/tools/build.sh b/deploy/cpp_infer/tools/build.sh index 0de61f04e53873e7383d3b0c244a4502c006bf78..ad6a727f0ce05ba21f536b6dbaa171ade8b5335b 100755 --- a/deploy/cpp_infer/tools/build.sh +++ b/deploy/cpp_infer/tools/build.sh @@ -1,5 +1,5 @@ -OPENCV_DIR=/PaddleClas/PaddleOCR/opencv-3.4.7/opencv3/ -LIB_DIR=/PaddleClas/PaddleOCR/fluid_inference/ +OPENCV_DIR=/PaddleClas/opencv-3.4.7/opencv3/ +LIB_DIR=/PaddleClas/fluid_inference/ CUDA_LIB_DIR=/usr/local/cuda/lib64 CUDNN_LIB_DIR=/usr/lib/x86_64-linux-gnu/ diff --git a/deploy/cpp_infer/tools/config.txt b/deploy/cpp_infer/tools/config.txt index 14096a1d3ce3420fd1c5ff40b6c96088512043b7..5277f36609b348255d0c0851f0a83f996621060f 100755 --- a/deploy/cpp_infer/tools/config.txt +++ b/deploy/cpp_infer/tools/config.txt @@ -6,6 +6,7 @@ cpu_math_library_num_threads 10 use_mkldnn 1 # cls config -cls_model_dir ./inference/ +cls_model_path /PaddleClas/inference/cls_infer.pdmodel +cls_params_path /PaddleClas/inference/cls_infer.pdiparams resize_short_size 256 crop_size 224 diff --git a/deploy/cpp_infer/tools/run.sh b/deploy/cpp_infer/tools/run.sh index 7972bc267b01b0f16ba4453a62ccc8fa4c7aeda3..1c70aaa1ce645de33673ca691fd6af630c5cd383 100755 --- a/deploy/cpp_infer/tools/run.sh +++ b/deploy/cpp_infer/tools/run.sh @@ -1,2 +1 @@ - ./build/clas_system ./tools/config.txt ./docs/imgs/ILSVRC2012_val_00000666.JPEG