From 8b6cc00a3b9150c1417bcbc2a3ccedf54db21e32 Mon Sep 17 00:00:00 2001 From: liuruilong Date: Fri, 8 Jun 2018 17:00:03 +0800 Subject: [PATCH] add code comment --- src/io/io.cpp | 87 -------------------------------------------------- src/io/io.h | 30 +++++++++++++---- tools/build.sh | 1 - 3 files changed, 23 insertions(+), 95 deletions(-) diff --git a/src/io/io.cpp b/src/io/io.cpp index b37b3e248a..9c34378d99 100644 --- a/src/io/io.cpp +++ b/src/io/io.cpp @@ -68,93 +68,6 @@ static size_t ReadBuffer(const char *file_name, uint8_t **out) { return cur_len; } -template -void Loader::LoadVar(framework::Variable *variable, - const framework::VarDesc &var_desc, - const std::string &file_path) { - auto tensor = variable->GetMutable(); - char *data = Get_binary_data(file_path); - - // 1. version - uint32_t version = *(uint32_t *)data; - data += sizeof(uint32_t); - - // 2 Lod information - uint32_t lod_level = *(uint64_t *)data; - data += sizeof(uint64_t); - - auto &lod = *tensor->mutable_lod(); - lod.resize(lod_level); - for (uint64_t i = 0; i < lod_level; ++i) { - uint32_t size = *(uint64_t *)data; - data += sizeof(uint64_t); - - std::vector tmp(size / sizeof(size_t)); - - for (int k = 0; k < tmp.size(); ++k) { - tmp[k] = *(size_t *)data; - } - lod[i] = tmp; - } - - // 3. tensor version - uint32_t tensor_version = *(uint32_t *)data; - data += sizeof(uint32_t); - - // 4. tensor desc - uint32_t size = *(int32_t *)data; - data += sizeof(int32_t); - - std::unique_ptr buf(new char[size]); - - for (int m = 0; m < size; ++m) { - buf.get()[m] = data[m]; - } - - const framework::TensorDesc &desc = var_desc.Tensor_desc(); - - PaddleMobile__Framework__Proto__VarType__TensorDesc *tensor_desc = NULL; - - int memory_size = 1; - for (auto l : desc.Dims()) { - memory_size *= l; - } - - tensor->Resize(framework::make_ddim(desc.Dims())); - - void *memory = tensor; - int type_size = 0; - switch (desc.DataType()) { - case framework::VARTYPE_TYPE_FP16: - type_size = 2; - break; - case framework::VARTYPE_TYPE_FP32: - type_size = 4; - memory = tensor->mutable_data(); - break; - case framework::VARTYPE_TYPE_FP64: - type_size = 8; - break; - case framework::VARTYPE_TYPE_INT32: - type_size = 4; - break; - case framework::VARTYPE_TYPE_INT64: - type_size = 8; - break; - case framework::VARTYPE_TYPE_BOOL: - type_size = 1; - break; - default: - break; - } - - for (int n = 0; n < memory_size * type_size; ++n) { - static_cast(memory)[n] = data[n]; - } - - delete data; -} - template const framework::Program Loader::Load( const std::string &dirname, bool optimize) { diff --git a/src/io/io.h b/src/io/io.h index 2eaa94d0a6..14122b736f 100644 --- a/src/io/io.h +++ b/src/io/io.h @@ -20,29 +20,34 @@ limitations under the License. */ #include #include "common/types.h" -#include "framework/lod_tensor.h" +#include "framework/tensor.h" #include "framework/operator.h" +#include "framework/lod_tensor.h" #include "framework/program/program.h" -#include "framework/tensor.h" namespace paddle_mobile { template class Loader { public: + + /* + * @b load separate format fluid model + * @b 加载分开形式的 fluid 模型 + * */ const framework::Program Load(const std::string &dirname, bool optimize = false); + /* + * @b load combine format fluid mode + * @b 加载结合在一起格式的模型 + * */ const framework::Program Load(const std::string &model_path, const std::string ¶_path, bool optimize = false); - private: const framework::Program LoadProgram(const std::string &model_path, bool optimize = false); - void LoadVar(framework::Variable *variable, - const framework::VarDesc &var_desc, - const std::string &file_path); }; template @@ -50,17 +55,28 @@ class Executor { public: typedef typename PrecisionTrait

::ptype Ptype; + /* + * @b init executor with program load by Loader class + * @b 用 loader load 的 program 实例化 executor + * */ Executor(const framework::Program p, int batch_size = 1, bool use_optimize = true); + /* + * @b to predict + * */ std::shared_ptr Predict(const framework::Tensor &t); + /* + * @b to predict with vector and dim + * + * @b 使用 输入 和 输入的维度信息 进行预测 + * */ std::vector Predict(const std::vector &input, const std::vector &dims); protected: Executor() = default; - void InitMemory(); void LoadMemory(const framework::VarDesc var_desc, framework::LoDTensor *tensor, char *&data); diff --git a/tools/build.sh b/tools/build.sh index e6ffdf155c..aa59bd3d28 100755 --- a/tools/build.sh +++ b/tools/build.sh @@ -70,7 +70,6 @@ build_for_android() { -DCMAKE_TOOLCHAIN_FILE="${TOOLCHAIN_FILE}" \ -DANDROID_PLATFORM="${ANDROID_PLATFORM_VERSION}" \ -DCMAKE_CXX_FLAGS="${CXX_FLAGS}" \ - -DCMAKE_LDFLAGS="-Wl,--gc-sections --icf=safe" \ -DANDROID_STL=c++_static \ -DANDROID=true \ -D"${NET}=true" \ -- GitLab