提交 262bab3d 编写于 作者: L liuruilong

format files

上级 51704db5
...@@ -2095,4 +2095,4 @@ protobuf_c_boolean protobuf_c_message_check(const ProtobufCMessage *message) { ...@@ -2095,4 +2095,4 @@ protobuf_c_boolean protobuf_c_message_check(const ProtobufCMessage *message) {
/* === services === */ /* === services === */
typedef void (*GenericHandler)(void *service, const ProtobufCMessage *input, typedef void (*GenericHandler)(void *service, const ProtobufCMessage *input,
ProtobufCClosure closure, void *closure_data); ProtobufCClosure closure, void *closure_data);
\ No newline at end of file
...@@ -17,9 +17,7 @@ limitations under the License. */ ...@@ -17,9 +17,7 @@ limitations under the License. */
namespace paddle_mobile { namespace paddle_mobile {
namespace framework { namespace framework {
std::vector<std::shared_ptr<VarDesc>> BlockDesc::Vars() const { std::vector<std::shared_ptr<VarDesc>> BlockDesc::Vars() const { return vars_; }
return vars_;
}
std::vector<std::shared_ptr<OpDesc>> BlockDesc::Ops() const { return ops_; } std::vector<std::shared_ptr<OpDesc>> BlockDesc::Ops() const { return ops_; }
...@@ -30,11 +28,10 @@ BlockDesc::BlockDesc(PaddleMobile__Framework__Proto__BlockDesc *desc) ...@@ -30,11 +28,10 @@ BlockDesc::BlockDesc(PaddleMobile__Framework__Proto__BlockDesc *desc)
vars_.emplace_back(std::shared_ptr<VarDesc>(new VarDesc(var_desc))); vars_.emplace_back(std::shared_ptr<VarDesc>(new VarDesc(var_desc)));
} }
std::sort(vars_.begin(), std::sort(vars_.begin(), vars_.end(),
vars_.end(), [](std::shared_ptr<VarDesc> left, std::shared_ptr<VarDesc> right) {
[](std::shared_ptr<VarDesc> left, std::shared_ptr<VarDesc> right){
return left->Name() < right->Name(); return left->Name() < right->Name();
}); });
for (int j = 0; j < desc->n_ops; ++j) { for (int j = 0; j < desc->n_ops; ++j) {
PaddleMobile__Framework__Proto__OpDesc *op_desc = desc->ops[j]; PaddleMobile__Framework__Proto__OpDesc *op_desc = desc->ops[j];
......
...@@ -36,7 +36,7 @@ class BlockDesc { ...@@ -36,7 +36,7 @@ class BlockDesc {
for (int i = 0; i < block_desc.vars_.size(); ++i) { for (int i = 0; i < block_desc.vars_.size(); ++i) {
auto &var_desc = block_desc.vars_[i]; auto &var_desc = block_desc.vars_[i];
vars_.emplace_back(std::make_shared<VarDesc>(*var_desc)) ; vars_.emplace_back(std::make_shared<VarDesc>(*var_desc));
} }
} }
......
...@@ -91,7 +91,7 @@ int Node::Depth(int begin) { ...@@ -91,7 +91,7 @@ int Node::Depth(int begin) {
} }
Node &Node::Folder( Node &Node::Folder(
int size, std::string type, int size, std::string type,
std::map<std::string, std::pair<std::string, std::string>> change, std::map<std::string, std::pair<std::string, std::string>> change,
std::vector<std::shared_ptr<Node>> *removed_nodes) { std::vector<std::shared_ptr<Node>> *removed_nodes) {
std::shared_ptr<framework::OpDesc> op_desc = std::shared_ptr<framework::OpDesc> op_desc =
......
...@@ -14,10 +14,10 @@ limitations under the License. */ ...@@ -14,10 +14,10 @@ limitations under the License. */
#pragma once #pragma once
#include <cinttypes>
#include <map> #include <map>
#include <string> #include <string>
#include <vector> #include <vector>
#include <cinttypes>
#include "common/log.h" #include "common/log.h"
#include "framework/program/op_desc.h" #include "framework/program/op_desc.h"
...@@ -42,7 +42,7 @@ class Node { ...@@ -42,7 +42,7 @@ class Node {
std::shared_ptr<Node> To(int size); std::shared_ptr<Node> To(int size);
int Depth(int begin = 0); int Depth(int begin = 0);
Node &Folder( Node &Folder(
int size, std::string type, int size, std::string type,
std::map<std::string, std::pair<std::string, std::string>> change_map, std::map<std::string, std::pair<std::string, std::string>> change_map,
std::vector<std::shared_ptr<Node>> *removed_nodes); std::vector<std::shared_ptr<Node>> *removed_nodes);
std::vector<std::shared_ptr<framework::OpDesc>> OpDescs(int size); std::vector<std::shared_ptr<framework::OpDesc>> OpDescs(int size);
......
...@@ -30,6 +30,7 @@ class Program { ...@@ -30,6 +30,7 @@ class Program {
std::string model_path; std::string model_path;
std::string para_path; std::string para_path;
bool is_commbine = false; bool is_commbine = false;
private: private:
}; };
......
...@@ -23,9 +23,9 @@ limitations under the License. */ ...@@ -23,9 +23,9 @@ limitations under the License. */
#include "common/enforce.h" #include "common/enforce.h"
#include "common/enforce.h" #include "common/enforce.h"
#include "framework/data_layout.h"
#include "framework/ddim.h" #include "framework/ddim.h"
#include "memory/t_malloc.h" #include "memory/t_malloc.h"
#include "framework/data_layout.h"
namespace paddle_mobile { namespace paddle_mobile {
namespace framework { namespace framework {
......
...@@ -164,9 +164,9 @@ const framework::Program<Dtype, P> Loader<Dtype, P>::Load( ...@@ -164,9 +164,9 @@ const framework::Program<Dtype, P> Loader<Dtype, P>::Load(
} }
template <typename Dtype, Precision P> template <typename Dtype, Precision P>
const framework::Program<Dtype, P> Loader<Dtype, P>::Load(const std::string &model_path, const framework::Program<Dtype, P> Loader<Dtype, P>::Load(
const std::string &para_path, const std::string &model_path, const std::string &para_path,
bool optimize){ bool optimize) {
auto program = this->LoadProgram(model_path, optimize); auto program = this->LoadProgram(model_path, optimize);
program.para_path = para_path; program.para_path = para_path;
program.is_commbine = true; program.is_commbine = true;
...@@ -174,9 +174,8 @@ const framework::Program<Dtype, P> Loader<Dtype, P>::Load(const std::string &mod ...@@ -174,9 +174,8 @@ const framework::Program<Dtype, P> Loader<Dtype, P>::Load(const std::string &mod
} }
template <typename Dtype, Precision P> template <typename Dtype, Precision P>
const framework::Program<Dtype, P> Loader<Dtype, P>::LoadProgram(const std::string &model_path, const framework::Program<Dtype, P> Loader<Dtype, P>::LoadProgram(
bool optimize){ const std::string &model_path, bool optimize) {
std::string model_filename = model_path; std::string model_filename = model_path;
PaddleMobile__Framework__Proto__ProgramDesc *c_program; PaddleMobile__Framework__Proto__ProgramDesc *c_program;
uint8_t *buf = NULL; uint8_t *buf = NULL;
...@@ -185,7 +184,7 @@ const framework::Program<Dtype, P> Loader<Dtype, P>::LoadProgram(const std::stri ...@@ -185,7 +184,7 @@ const framework::Program<Dtype, P> Loader<Dtype, P>::LoadProgram(const std::stri
PADDLE_MOBILE_ENFORCE(buf != NULL, "read from __model__ is null"); PADDLE_MOBILE_ENFORCE(buf != NULL, "read from __model__ is null");
c_program = paddle_mobile__framework__proto__program_desc__unpack( c_program = paddle_mobile__framework__proto__program_desc__unpack(
NULL, read_size, buf); NULL, read_size, buf);
// //
PADDLE_MOBILE_ENFORCE(c_program != NULL, "program is null"); PADDLE_MOBILE_ENFORCE(c_program != NULL, "program is null");
// //
...@@ -228,7 +227,7 @@ const framework::Program<Dtype, P> Loader<Dtype, P>::LoadProgram(const std::stri ...@@ -228,7 +227,7 @@ const framework::Program<Dtype, P> Loader<Dtype, P>::LoadProgram(const std::stri
if (optimize) { if (optimize) {
framework::ProgramOptimize program_optimize; framework::ProgramOptimize program_optimize;
program.optimizeProgram = program.optimizeProgram =
program_optimize.FushionOptimize(originProgramDesc); program_optimize.FushionOptimize(originProgramDesc);
} }
if (optimize) { if (optimize) {
program.optimizeProgram->Description("optimize: "); program.optimizeProgram->Description("optimize: ");
...@@ -276,14 +275,12 @@ Executor<Dtype, P>::Executor(const framework::Program<Dtype> p, int batch_size, ...@@ -276,14 +275,12 @@ Executor<Dtype, P>::Executor(const framework::Program<Dtype> p, int batch_size,
} else { } else {
InitMemory(); InitMemory();
} }
} }
template <typename Dtype, Precision P> template <typename Dtype, Precision P>
void Executor<Dtype, P>::LoadMemory(const framework::VarDesc var_desc, void Executor<Dtype, P>::LoadMemory(const framework::VarDesc var_desc,
framework::LoDTensor *tensor, framework::LoDTensor *tensor,
const std::string &file_path, char *data) { const std::string &file_path, char *data) {
// 1. version // 1. version
uint32_t version = *(uint32_t *)data; uint32_t version = *(uint32_t *)data;
data += sizeof(uint32_t); data += sizeof(uint32_t);
...@@ -383,7 +380,8 @@ void Executor<Dtype, P>::InitMemory() { ...@@ -383,7 +380,8 @@ void Executor<Dtype, P>::InitMemory() {
continue; continue;
} }
char *origin_data = Get_binary_data(program_.model_path + "/" + var_desc->Name()); char *origin_data =
Get_binary_data(program_.model_path + "/" + var_desc->Name());
LoadMemory(*var_desc, tensor, LoadMemory(*var_desc, tensor,
program_.model_path + "/" + var_desc->Name(), origin_data); program_.model_path + "/" + var_desc->Name(), origin_data);
delete origin_data; delete origin_data;
...@@ -399,7 +397,7 @@ void Executor<Dtype, P>::InitMemory() { ...@@ -399,7 +397,7 @@ void Executor<Dtype, P>::InitMemory() {
} }
template <typename Dtype, Precision P> template <typename Dtype, Precision P>
void Executor<Dtype, P>::InitCombineMemory(){ void Executor<Dtype, P>::InitCombineMemory() {
char *origin_data = Get_binary_data(program_.para_path); char *origin_data = Get_binary_data(program_.para_path);
for (const auto &block : to_predict_program_->Blocks()) { for (const auto &block : to_predict_program_->Blocks()) {
......
...@@ -63,7 +63,8 @@ class Executor { ...@@ -63,7 +63,8 @@ class Executor {
void InitMemory(); void InitMemory();
void LoadMemory(const framework::VarDesc var_desc, void LoadMemory(const framework::VarDesc var_desc,
framework::LoDTensor *tensor, const std::string &file_path, char *data); framework::LoDTensor *tensor, const std::string &file_path,
char *data);
void InitCombineMemory(); void InitCombineMemory();
framework::Program<Dtype> program_; framework::Program<Dtype> program_;
int batch_size_ = 1; int batch_size_ = 1;
......
...@@ -20,7 +20,8 @@ int main() { ...@@ -20,7 +20,8 @@ int main() {
// ../../../test/models/googlenet // ../../../test/models/googlenet
// ../../../test/models/mobilenet // ../../../test/models/mobilenet
auto program = loader.Load(g_resnet, true); auto program = loader.Load(g_resnet, true);
loader.Load(g_googlenet_combine + "/model", g_googlenet_combine + "/params", true); loader.Load(g_googlenet_combine + "/model", g_googlenet_combine + "/params",
true);
program.originProgram->Description("program desc: "); program.originProgram->Description("program desc: ");
return 0; return 0;
......
...@@ -22,8 +22,8 @@ int main() { ...@@ -22,8 +22,8 @@ int main() {
auto time1 = time(); auto time1 = time();
auto program = loader.Load(g_googlenet, optimize); auto program = loader.Load(g_googlenet, optimize);
// auto program = loader.Load(g_googlenet_combine + "/model", // auto program = loader.Load(g_googlenet_combine + "/model",
// g_googlenet_combine + "/params", optimize); // g_googlenet_combine + "/params", optimize);
auto time2 = time(); auto time2 = time();
DLOG << "load cost :" << time_diff(time1, time2) << "ms\n"; DLOG << "load cost :" << time_diff(time1, time2) << "ms\n";
paddle_mobile::Executor<paddle_mobile::CPU> executor(program, 1, optimize); paddle_mobile::Executor<paddle_mobile::CPU> executor(program, 1, optimize);
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册