From 7c181047acaf17fee6f3633e990de4a084bf35f3 Mon Sep 17 00:00:00 2001 From: wangliu Date: Fri, 20 Jul 2018 12:01:51 +0800 Subject: [PATCH] modify code style --- src/io/executor.cpp | 37 ++++++++++++++++++------------------- src/io/loader.cpp | 14 ++++++++------ src/io/paddle_mobile.cpp | 13 +++++++------ src/io/paddle_mobile.h | 7 ++++--- 4 files changed, 37 insertions(+), 34 deletions(-) diff --git a/src/io/executor.cpp b/src/io/executor.cpp index 08eb787bcb..65f019d1e3 100644 --- a/src/io/executor.cpp +++ b/src/io/executor.cpp @@ -179,26 +179,25 @@ void Executor::LoadMemory(const framework::VarDesc var_desc, default: break; } - if (program_.quantification) { - float min_value; - float max_value; - - memcpy(&min_value, *data, sizeof(float)); - memcpy(&max_value, *data + sizeof(float) , sizeof(float)); - *data += 2 * sizeof(float); - const float factor = (max_value - min_value) / 255.0; - uint8_t *uint8_data = (uint8_t *) (*data); - for (int k = 0; k < memory_size; ++k) { - static_cast(memory)[k] = uint8_data[k] * factor + min_value; - } - *data += (memory_size * sizeof(uint8_t)); - } else { - for (int n = 0; n < memory_size * type_size; ++n) { - static_cast(memory)[n] = (*data)[n]; - } - (*data) += (sizeof(char) * memory_size * type_size); + if (program_.quantification) { + float min_value; + float max_value; + + memcpy(&min_value, *data, sizeof(float)); + memcpy(&max_value, *data + sizeof(float), sizeof(float)); + *data += 2 * sizeof(float); + const float factor = (max_value - min_value) / 255.0; + uint8_t *uint8_data = (uint8_t *)(*data); + for (int k = 0; k < memory_size; ++k) { + static_cast(memory)[k] = uint8_data[k] * factor + min_value; + } + *data += (memory_size * sizeof(uint8_t)); + } else { + for (int n = 0; n < memory_size * type_size; ++n) { + static_cast(memory)[n] = (*data)[n]; + } + (*data) += (sizeof(char) * memory_size * type_size); } - } template diff --git a/src/io/loader.cpp b/src/io/loader.cpp index c9c8c59749..9ed877d05d 100644 --- a/src/io/loader.cpp +++ b/src/io/loader.cpp @@ -44,17 +44,18 @@ static size_t ReadBuffer(const char *file_name, uint8_t **out) { template const framework::Program Loader::Load( - const std::string &dirname, bool optimize,bool quantification, bool can_add_split) { - auto program = - this->LoadProgram(dirname + "/__model__", optimize,quantification, can_add_split); + const std::string &dirname, bool optimize, bool quantification, + bool can_add_split) { + auto program = this->LoadProgram(dirname + "/__model__", optimize, + quantification, can_add_split); program.model_path = dirname; return program; } template const framework::Program Loader::Load( - const std::string &model_path, const std::string ¶_path, - bool optimize, bool quantification) { + const std::string &model_path, const std::string ¶_path, bool optimize, + bool quantification) { auto program = this->LoadProgram(model_path, optimize); program.para_path = para_path; program.combined = true; @@ -64,7 +65,8 @@ const framework::Program Loader::Load( template const framework::Program Loader::LoadProgram( - const std::string &model_path, bool optimize, bool quantification, bool can_add_split) { + const std::string &model_path, bool optimize, bool quantification, + bool can_add_split) { std::string model_filename = model_path; PaddleMobile__Framework__Proto__ProgramDesc *c_program; uint8_t *buf = NULL; diff --git a/src/io/paddle_mobile.cpp b/src/io/paddle_mobile.cpp index e455f9acd2..5e2e209d64 100644 --- a/src/io/paddle_mobile.cpp +++ b/src/io/paddle_mobile.cpp @@ -25,8 +25,8 @@ void PaddleMobile::SetThreadNum(int num) { }; template -bool PaddleMobile::Load(const std::string &dirname, bool optimize, bool quantification, - int batch_size) { +bool PaddleMobile::Load(const std::string &dirname, bool optimize, + bool quantification, int batch_size) { if (loader_.get() == nullptr) { loader_ = std::make_shared>(); } else { @@ -35,7 +35,7 @@ bool PaddleMobile::Load(const std::string &dirname, bool optimize, boo if (executor_.get() == nullptr) { executor_ = std::make_shared>( - loader_->Load(dirname, optimize,quantification), batch_size, optimize); + loader_->Load(dirname, optimize, quantification), batch_size, optimize); } else { LOG(kLOG_INFO) << "executor inited"; } @@ -45,8 +45,8 @@ bool PaddleMobile::Load(const std::string &dirname, bool optimize, boo template bool PaddleMobile::Load(const std::string &model_path, - const std::string ¶_path, bool optimize, bool quantification, - int batch_size) { + const std::string ¶_path, bool optimize, + bool quantification, int batch_size) { if (loader_.get() == nullptr) { loader_ = std::make_shared>(); } else { @@ -55,7 +55,8 @@ bool PaddleMobile::Load(const std::string &model_path, if (executor_.get() == nullptr) { executor_ = std::make_shared>( - loader_->Load(model_path, para_path, optimize, quantification), batch_size, optimize); + loader_->Load(model_path, para_path, optimize, quantification), + batch_size, optimize); } else { LOG(kLOG_INFO) << "executor inited"; } diff --git a/src/io/paddle_mobile.h b/src/io/paddle_mobile.h index b035bc5204..31a9207902 100644 --- a/src/io/paddle_mobile.h +++ b/src/io/paddle_mobile.h @@ -38,15 +38,16 @@ class PaddleMobile { * @b load separate format fluid model * @b 加载分开形式的 fluid 模型 * */ - bool Load(const std::string &dirname, bool optimize = false, bool quantification = false, - int batch_size = 1); + bool Load(const std::string &dirname, bool optimize = false, + bool quantification = false, int batch_size = 1); /* * @b load combine format fluid mode * @b 加载结合在一起格式的模型 * */ bool Load(const std::string &model_path, const std::string ¶_path, - bool optimize = false,bool quantification = false, int batch_size = 1); + bool optimize = false, bool quantification = false, + int batch_size = 1); void SetThreadNum(int num); /* -- GitLab