From ce21025aefdfa89782ce50ebcbbd007230340b4e Mon Sep 17 00:00:00 2001 From: xiebaiyuan Date: Tue, 23 Oct 2018 17:52:59 +0800 Subject: [PATCH] add memory load high api --- src/io/api_paddle_mobile.cc | 9 ++- src/io/paddle_inference_api.h | 9 +++ test/CMakeLists.txt | 4 + test/framework/test_load_memory.cpp | 4 +- .../test_load_memory_inference_api.cpp | 80 +++++++++++++++++++ 5 files changed, 103 insertions(+), 3 deletions(-) create mode 100644 test/framework/test_load_memory_inference_api.cpp diff --git a/src/io/api_paddle_mobile.cc b/src/io/api_paddle_mobile.cc index b07232867c..6a7dff597a 100644 --- a/src/io/api_paddle_mobile.cc +++ b/src/io/api_paddle_mobile.cc @@ -29,7 +29,14 @@ PaddleMobilePredictor::PaddleMobilePredictor( template bool PaddleMobilePredictor::Init(const PaddleMobileConfig &config) { paddle_mobile_.reset(new PaddleMobile()); - if (!config.model_dir.empty()) { + + if (config.memory_pack.from_memory) { + DLOG << "load from memory!"; + paddle_mobile_->LoadCombinedMemory(config.memory_pack.model_size, + config.memory_pack.model_buf, + config.memory_pack.combined_params_size, + config.memory_pack.combined_params_buf); + } else if (!config.model_dir.empty()) { paddle_mobile_->Load(config.model_dir, config.optimize, config.quantification, config.batch_size); } else if (!config.prog_file.empty() && !config.param_file.empty()) { diff --git a/src/io/paddle_inference_api.h b/src/io/paddle_inference_api.h index 104ba11153..16756a61bf 100644 --- a/src/io/paddle_inference_api.h +++ b/src/io/paddle_inference_api.h @@ -111,6 +111,14 @@ class PaddlePredictor { PaddlePredictor() = default; }; +struct PaddleModelMemoryPack { + bool from_memory = false; + size_t model_size = 0; + uint8_t* model_buf = nullptr; + size_t combined_params_size = 0; + uint8_t* combined_params_buf = nullptr; +}; + struct PaddleMobileConfig : public PaddlePredictor::Config { enum Precision { FP32 = 0 }; enum Device { kCPU = 0, kFPGA = 1, kGPU_MALI = 2 }; @@ -124,6 +132,7 @@ struct PaddleMobileConfig : public PaddlePredictor::Config { int thread_num = 1; std::string prog_file; std::string param_file; + struct PaddleModelMemoryPack memory_pack; }; // A factory to help create different predictors. diff --git a/test/CMakeLists.txt b/test/CMakeLists.txt index 64324b08a5..2050b34d21 100644 --- a/test/CMakeLists.txt +++ b/test/CMakeLists.txt @@ -236,6 +236,10 @@ if (NOT FOUND_MATCH) ADD_EXECUTABLE(test-loadmemory framework/test_load_memory.cpp) target_link_libraries(test-loadmemory paddle-mobile) + # gen test log + ADD_EXECUTABLE(test-loadmemory-inference framework/test_load_memory_inference_api.cpp) + target_link_libraries(test-loadmemory-inference paddle-mobile) + ADD_EXECUTABLE(test-inference-api framework/test_inference_api.cpp) target_link_libraries(test-inference-api paddle-mobile) diff --git a/test/framework/test_load_memory.cpp b/test/framework/test_load_memory.cpp index 4be7aaa82f..162dba3727 100644 --- a/test/framework/test_load_memory.cpp +++ b/test/framework/test_load_memory.cpp @@ -58,9 +58,9 @@ int main() { size_t sizeBuf = ReadBuffer(model_path.c_str(), &bufModel); uint8_t *bufParams = nullptr; - DLOG << "sizeBuf: " << sizeBuf; + std::cout << "sizeBuf: " << sizeBuf << std::endl; size_t sizeParams = ReadBuffer(params_path.c_str(), &bufParams); - DLOG << "sizeParams: " << sizeParams; + std::cout << "sizeParams: " << sizeParams << std::endl; paddle_mobile.LoadCombinedMemory(sizeBuf, bufModel, sizeParams, bufParams); return 0; diff --git a/test/framework/test_load_memory_inference_api.cpp b/test/framework/test_load_memory_inference_api.cpp new file mode 100644 index 0000000000..05d5191017 --- /dev/null +++ b/test/framework/test_load_memory_inference_api.cpp @@ -0,0 +1,80 @@ +/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#include + +#include +#include "../test_helper.h" +#include "io/paddle_inference_api.h" + +static size_t ReadBuffer(const char *file_name, uint8_t **out) { + FILE *fp; + fp = fopen(file_name, "rb"); + PADDLE_MOBILE_ENFORCE(fp != nullptr, " %s open failed !", file_name); + fseek(fp, 0, SEEK_END); + auto size = static_cast(ftell(fp)); + rewind(fp); + DLOG << "model size: " << size; + *out = reinterpret_cast(malloc(size)); + size_t cur_len = 0; + size_t nread; + while ((nread = fread(*out + cur_len, 1, size - cur_len, fp)) != 0) { + cur_len += nread; + } + fclose(fp); + return cur_len; +} + +static char *Get_binary_data(std::string filename) { + FILE *file = fopen(filename.c_str(), "rb"); + PADDLE_MOBILE_ENFORCE(file != nullptr, "can't open file: %s ", + filename.c_str()); + fseek(file, 0, SEEK_END); + int64_t size = ftell(file); + PADDLE_MOBILE_ENFORCE(size > 0, "size is too small"); + rewind(file); + auto *data = new char[size]; + size_t bytes_read = fread(data, 1, size, file); + PADDLE_MOBILE_ENFORCE(bytes_read == size, + "read binary file bytes do not match with fseek"); + fclose(file); + return data; +} + +paddle_mobile::PaddleMobileConfig GetConfig() { + paddle_mobile::PaddleMobileConfig config; + config.precision = paddle_mobile::PaddleMobileConfig::FP32; + config.device = paddle_mobile::PaddleMobileConfig::kCPU; + const std::shared_ptr &memory_pack = + std::make_shared(); + auto model_path = std::string(g_genet_combine) + "/model"; + auto params_path = std::string(g_genet_combine) + "/params"; + memory_pack->model_size = + ReadBuffer(model_path.c_str(), &memory_pack->model_buf); + std::cout << "sizeBuf: " << memory_pack->model_size << std::endl; + memory_pack->combined_params_size = + ReadBuffer(params_path.c_str(), &memory_pack->combined_params_buf); + std::cout << "sizeParams: " << memory_pack->combined_params_size << std::endl; + memory_pack->from_memory = true; + config.memory_pack = *memory_pack; + config.thread_num = 4; + return config; +} +int main() { + paddle_mobile::PaddleMobileConfig config = GetConfig(); + auto predictor = paddle_mobile::CreatePaddlePredictor< + paddle_mobile::PaddleMobileConfig, + paddle_mobile::PaddleEngineKind::kPaddleMobile>(config); + return 0; +} -- GitLab