提交 92435044 编写于 作者: W wangliu

modify code style

上级 6ef17f09
...@@ -179,26 +179,25 @@ void Executor<Dtype, P>::LoadMemory(const framework::VarDesc var_desc, ...@@ -179,26 +179,25 @@ void Executor<Dtype, P>::LoadMemory(const framework::VarDesc var_desc,
default: default:
break; break;
} }
if (program_.quantification) { if (program_.quantification) {
float min_value; float min_value;
float max_value; float max_value;
memcpy(&min_value, *data, sizeof(float)); memcpy(&min_value, *data, sizeof(float));
memcpy(&max_value, *data + sizeof(float) , sizeof(float)); memcpy(&max_value, *data + sizeof(float), sizeof(float));
*data += 2 * sizeof(float); *data += 2 * sizeof(float);
const float factor = (max_value - min_value) / 255.0; const float factor = (max_value - min_value) / 255.0;
uint8_t *uint8_data = (uint8_t *) (*data); uint8_t *uint8_data = (uint8_t *)(*data);
for (int k = 0; k < memory_size; ++k) { for (int k = 0; k < memory_size; ++k) {
static_cast<float *>(memory)[k] = uint8_data[k] * factor + min_value; static_cast<float *>(memory)[k] = uint8_data[k] * factor + min_value;
} }
*data += (memory_size * sizeof(uint8_t)); *data += (memory_size * sizeof(uint8_t));
} else { } else {
for (int n = 0; n < memory_size * type_size; ++n) { for (int n = 0; n < memory_size * type_size; ++n) {
static_cast<char *>(memory)[n] = (*data)[n]; static_cast<char *>(memory)[n] = (*data)[n];
} }
(*data) += (sizeof(char) * memory_size * type_size); (*data) += (sizeof(char) * memory_size * type_size);
} }
} }
template <typename Dtype, Precision P> template <typename Dtype, Precision P>
......
...@@ -44,17 +44,18 @@ static size_t ReadBuffer(const char *file_name, uint8_t **out) { ...@@ -44,17 +44,18 @@ static size_t ReadBuffer(const char *file_name, uint8_t **out) {
template <typename Dtype, Precision P> template <typename Dtype, Precision P>
const framework::Program<Dtype, P> Loader<Dtype, P>::Load( const framework::Program<Dtype, P> Loader<Dtype, P>::Load(
const std::string &dirname, bool optimize,bool quantification, bool can_add_split) { const std::string &dirname, bool optimize, bool quantification,
auto program = bool can_add_split) {
this->LoadProgram(dirname + "/__model__", optimize,quantification, can_add_split); auto program = this->LoadProgram(dirname + "/__model__", optimize,
quantification, can_add_split);
program.model_path = dirname; program.model_path = dirname;
return program; return program;
} }
template <typename Dtype, Precision P> template <typename Dtype, Precision P>
const framework::Program<Dtype, P> Loader<Dtype, P>::Load( const framework::Program<Dtype, P> Loader<Dtype, P>::Load(
const std::string &model_path, const std::string &para_path, const std::string &model_path, const std::string &para_path, bool optimize,
bool optimize, bool quantification) { bool quantification) {
auto program = this->LoadProgram(model_path, optimize); auto program = this->LoadProgram(model_path, optimize);
program.para_path = para_path; program.para_path = para_path;
program.combined = true; program.combined = true;
...@@ -64,7 +65,8 @@ const framework::Program<Dtype, P> Loader<Dtype, P>::Load( ...@@ -64,7 +65,8 @@ const framework::Program<Dtype, P> Loader<Dtype, P>::Load(
template <typename Dtype, Precision P> template <typename Dtype, Precision P>
const framework::Program<Dtype, P> Loader<Dtype, P>::LoadProgram( const framework::Program<Dtype, P> Loader<Dtype, P>::LoadProgram(
const std::string &model_path, bool optimize, bool quantification, bool can_add_split) { const std::string &model_path, bool optimize, bool quantification,
bool can_add_split) {
std::string model_filename = model_path; std::string model_filename = model_path;
PaddleMobile__Framework__Proto__ProgramDesc *c_program; PaddleMobile__Framework__Proto__ProgramDesc *c_program;
uint8_t *buf = NULL; uint8_t *buf = NULL;
......
...@@ -25,8 +25,8 @@ void PaddleMobile<Dtype, P>::SetThreadNum(int num) { ...@@ -25,8 +25,8 @@ void PaddleMobile<Dtype, P>::SetThreadNum(int num) {
}; };
template <typename Dtype, Precision P> template <typename Dtype, Precision P>
bool PaddleMobile<Dtype, P>::Load(const std::string &dirname, bool optimize, bool quantification, bool PaddleMobile<Dtype, P>::Load(const std::string &dirname, bool optimize,
int batch_size) { bool quantification, int batch_size) {
if (loader_.get() == nullptr) { if (loader_.get() == nullptr) {
loader_ = std::make_shared<Loader<Dtype, P>>(); loader_ = std::make_shared<Loader<Dtype, P>>();
} else { } else {
...@@ -35,7 +35,7 @@ bool PaddleMobile<Dtype, P>::Load(const std::string &dirname, bool optimize, boo ...@@ -35,7 +35,7 @@ bool PaddleMobile<Dtype, P>::Load(const std::string &dirname, bool optimize, boo
if (executor_.get() == nullptr) { if (executor_.get() == nullptr) {
executor_ = std::make_shared<Executor<Dtype, P>>( executor_ = std::make_shared<Executor<Dtype, P>>(
loader_->Load(dirname, optimize,quantification), batch_size, optimize); loader_->Load(dirname, optimize, quantification), batch_size, optimize);
} else { } else {
LOG(kLOG_INFO) << "executor inited"; LOG(kLOG_INFO) << "executor inited";
} }
...@@ -45,8 +45,8 @@ bool PaddleMobile<Dtype, P>::Load(const std::string &dirname, bool optimize, boo ...@@ -45,8 +45,8 @@ bool PaddleMobile<Dtype, P>::Load(const std::string &dirname, bool optimize, boo
template <typename Dtype, Precision P> template <typename Dtype, Precision P>
bool PaddleMobile<Dtype, P>::Load(const std::string &model_path, bool PaddleMobile<Dtype, P>::Load(const std::string &model_path,
const std::string &para_path, bool optimize, bool quantification, const std::string &para_path, bool optimize,
int batch_size) { bool quantification, int batch_size) {
if (loader_.get() == nullptr) { if (loader_.get() == nullptr) {
loader_ = std::make_shared<Loader<Dtype, P>>(); loader_ = std::make_shared<Loader<Dtype, P>>();
} else { } else {
...@@ -55,7 +55,8 @@ bool PaddleMobile<Dtype, P>::Load(const std::string &model_path, ...@@ -55,7 +55,8 @@ bool PaddleMobile<Dtype, P>::Load(const std::string &model_path,
if (executor_.get() == nullptr) { if (executor_.get() == nullptr) {
executor_ = std::make_shared<Executor<Dtype, P>>( executor_ = std::make_shared<Executor<Dtype, P>>(
loader_->Load(model_path, para_path, optimize, quantification), batch_size, optimize); loader_->Load(model_path, para_path, optimize, quantification),
batch_size, optimize);
} else { } else {
LOG(kLOG_INFO) << "executor inited"; LOG(kLOG_INFO) << "executor inited";
} }
......
...@@ -38,15 +38,16 @@ class PaddleMobile { ...@@ -38,15 +38,16 @@ class PaddleMobile {
* @b load separate format fluid model * @b load separate format fluid model
* @b 加载分开形式的 fluid 模型 * @b 加载分开形式的 fluid 模型
* */ * */
bool Load(const std::string &dirname, bool optimize = false, bool quantification = false, bool Load(const std::string &dirname, bool optimize = false,
int batch_size = 1); bool quantification = false, int batch_size = 1);
/* /*
* @b load combine format fluid mode * @b load combine format fluid mode
* @b 加载结合在一起格式的模型 * @b 加载结合在一起格式的模型
* */ * */
bool Load(const std::string &model_path, const std::string &para_path, bool Load(const std::string &model_path, const std::string &para_path,
bool optimize = false,bool quantification = false, int batch_size = 1); bool optimize = false, bool quantification = false,
int batch_size = 1);
void SetThreadNum(int num); void SetThreadNum(int num);
/* /*
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册