diff --git a/deploy/cpp/demo/classifier.cpp b/deploy/cpp/demo/classifier.cpp index 6c8c811f1405883c80caf8b6f996309f17cfa7d0..e616d0e78fe2eb52eaef7303ee3f5a85570d797b 100644 --- a/deploy/cpp/demo/classifier.cpp +++ b/deploy/cpp/demo/classifier.cpp @@ -81,7 +81,7 @@ int main(int argc, char** argv) { auto start = system_clock::now(); // 读图像 int im_vec_size = - std::min(static_cat(image_paths.size()), i + FLAGS_batch_size); + std::min(static_cast(image_paths.size()), i + FLAGS_batch_size); std::vector im_vec(im_vec_size - i); std::vector results(im_vec_size - i, PaddleX::ClsResult()); diff --git a/deploy/cpp/include/paddlex/paddlex.h b/deploy/cpp/include/paddlex/paddlex.h index 73af39061b58deaf3a6c561657c5503f5028e9cb..e88119cb6bc37eb2769febebf9c72063f56c5656 100644 --- a/deploy/cpp/include/paddlex/paddlex.h +++ b/deploy/cpp/include/paddlex/paddlex.h @@ -95,10 +95,10 @@ class Model { * This method aims to load model configurations which include * transform steps and label list * - * @param model_dir: the directory which contains model.yml + * @param yaml_file: model configuration * @return true if load configuration successfully * */ - bool load_config(const std::string& model_dir); + bool load_config(const std::string& yaml_file); /* * @brief diff --git a/deploy/cpp/src/paddlex.cpp b/deploy/cpp/src/paddlex.cpp index 2efd8ce57c179deef385e0a2802fbe9cec0f48a2..92249006eeb6a50bc27ef0446a1d50b126a9b933 100644 --- a/deploy/cpp/src/paddlex.cpp +++ b/deploy/cpp/src/paddlex.cpp @@ -23,22 +23,25 @@ void Model::create_predictor(const std::string& model_dir, int gpu_id, std::string key, int batch_size) { - // 读取配置文件 - if (!load_config(model_dir)) { - std::cerr << "Parse file 'model.yml' failed!" << std::endl; - exit(-1); - } paddle::AnalysisConfig config; std::string model_file = model_dir + OS_PATH_SEP + "__model__"; std::string params_file = model_dir + OS_PATH_SEP + "__params__"; + std::string yaml_file = model_dir + OS_PATH_SEP + "model.yml"; #ifdef WITH_ENCRYPTION if (key != "") { model_file = model_dir + OS_PATH_SEP + "__model__.encrypted"; params_file = model_dir + OS_PATH_SEP + "__params__.encrypted"; + std::string yaml_file = model_dir + OS_PATH_SEP + "model.yml.encrypted"; paddle_security_load_model( &config, key.c_str(), model_file.c_str(), params_file.c_str()); } #endif + // 读取配置文件 + if (!load_config(yaml_file)) { + std::cerr << "Parse file 'model.yml' failed!" << std::endl; + exit(-1); + } + if (key == "") { config.SetModel(model_file, params_file); } @@ -64,8 +67,8 @@ void Model::create_predictor(const std::string& model_dir, inputs_batch_.assign(batch_size, ImageBlob()); } -bool Model::load_config(const std::string& model_dir) { - std::string yaml_file = model_dir + OS_PATH_SEP + "model.yml"; +bool Model::load_config(const std::string& yaml_file) { + // std::string yaml_file = model_dir + OS_PATH_SEP + "model.yml"; YAML::Node config = YAML::LoadFile(yaml_file); type = config["_Attributes"]["model_type"].as(); name = config["Model"].as(); diff --git a/tools/codestyle/clang_format.hook b/tools/codestyle/clang_format.hook index 1d928216867c0ba3897d71542fea44debf8d72a0..14300746ac343fa56c690bc43fc02659d690f73c 100755 --- a/tools/codestyle/clang_format.hook +++ b/tools/codestyle/clang_format.hook @@ -1,15 +1,15 @@ #!/bin/bash -set -e - -readonly VERSION="3.8" - -version=$(clang-format -version) - -if ! [[ $version == *"$VERSION"* ]]; then - echo "clang-format version check failed." - echo "a version contains '$VERSION' is needed, but get '$version'" - echo "you can install the right version, and make an soft-link to '\$PATH' env" - exit -1 -fi - -clang-format $@ +# set -e +# +# readonly VERSION="3.8" +# +# version=$(clang-format -version) +# +# if ! [[ $version == *"$VERSION"* ]]; then +# echo "clang-format version check failed." +# echo "a version contains '$VERSION' is needed, but get '$version'" +# echo "you can install the right version, and make an soft-link to '\$PATH' env" +# exit -1 +# fi +# +# clang-format $@