diff --git a/deploy/cpp/include/paddlex/paddlex.h b/deploy/cpp/include/paddlex/paddlex.h index 74b478c0fedae13c1f25eb125f27b85e1755cd45..af4d8898496fee47ed9b5c74599536ddf1fe9f6c 100644 --- a/deploy/cpp/include/paddlex/paddlex.h +++ b/deploy/cpp/include/paddlex/paddlex.h @@ -72,23 +72,20 @@ class Model { * @param use_trt: use Tensor RT or not when infering * @param gpu_id: the id of gpu when infering with using gpu * @param key: the key of encryption when using encrypted model - * @param batch_size: batch size of infering * */ void Init(const std::string& model_dir, bool use_gpu = false, bool use_trt = false, int gpu_id = 0, - std::string key = "", - int batch_size = 1) { - create_predictor(model_dir, use_gpu, use_trt, gpu_id, key, batch_size); + std::string key = "") { + create_predictor(model_dir, use_gpu, use_trt, gpu_id, key); } void create_predictor(const std::string& model_dir, bool use_gpu = false, bool use_trt = false, int gpu_id = 0, - std::string key = "", - int batch_size = 1); + std::string key = ""); /* * @brief diff --git a/deploy/cpp/src/paddlex.cpp b/deploy/cpp/src/paddlex.cpp index e7fd9402b8ec6daa87dbba701699659a36416cad..bedd83b356baff41d7f9d16ac6de855e982332b2 100644 --- a/deploy/cpp/src/paddlex.cpp +++ b/deploy/cpp/src/paddlex.cpp @@ -22,8 +22,7 @@ void Model::create_predictor(const std::string& model_dir, bool use_gpu, bool use_trt, int gpu_id, - std::string key, - int batch_size) { + std::string key) { paddle::AnalysisConfig config; std::string model_file = model_dir + OS_PATH_SEP + "__model__"; std::string params_file = model_dir + OS_PATH_SEP + "__params__"; @@ -76,7 +75,6 @@ void Model::create_predictor(const std::string& model_dir, false /* use_calib_mode*/); } predictor_ = std::move(CreatePaddlePredictor(config)); - inputs_batch_.assign(batch_size, ImageBlob()); } bool Model::load_config(const std::string& yaml_input) { @@ -192,6 +190,7 @@ bool Model::predict(const std::vector& im_batch, "to function predict()!" << std::endl; return false; } + inputs_batch_.assign(im_batch.size(), ImageBlob()); // 处理输入图像 if (!preprocess(im_batch, &inputs_batch_, thread_num)) { std::cerr << "Preprocess failed!" << std::endl; @@ -356,6 +355,7 @@ bool Model::predict(const std::vector& im_batch, return false; } + inputs_batch_.assign(im_batch.size(), ImageBlob()); int batch_size = im_batch.size(); // 处理输入图像 if (!preprocess(im_batch, &inputs_batch_, thread_num)) { @@ -637,6 +637,7 @@ bool Model::predict(const std::vector& im_batch, } // 处理输入图像 + inputs_batch_.assign(im_batch.size(), ImageBlob()); if (!preprocess(im_batch, &inputs_batch_, thread_num)) { std::cerr << "Preprocess failed!" << std::endl; return false;