提交 262bab3d 编写于 作者: L liuruilong

format files

上级 51704db5
......@@ -17,9 +17,7 @@ limitations under the License. */
namespace paddle_mobile {
namespace framework {
std::vector<std::shared_ptr<VarDesc>> BlockDesc::Vars() const {
return vars_;
}
std::vector<std::shared_ptr<VarDesc>> BlockDesc::Vars() const { return vars_; }
std::vector<std::shared_ptr<OpDesc>> BlockDesc::Ops() const { return ops_; }
......@@ -30,9 +28,8 @@ BlockDesc::BlockDesc(PaddleMobile__Framework__Proto__BlockDesc *desc)
vars_.emplace_back(std::shared_ptr<VarDesc>(new VarDesc(var_desc)));
}
std::sort(vars_.begin(),
vars_.end(),
[](std::shared_ptr<VarDesc> left, std::shared_ptr<VarDesc> right){
std::sort(vars_.begin(), vars_.end(),
[](std::shared_ptr<VarDesc> left, std::shared_ptr<VarDesc> right) {
return left->Name() < right->Name();
});
......
......@@ -36,7 +36,7 @@ class BlockDesc {
for (int i = 0; i < block_desc.vars_.size(); ++i) {
auto &var_desc = block_desc.vars_[i];
vars_.emplace_back(std::make_shared<VarDesc>(*var_desc)) ;
vars_.emplace_back(std::make_shared<VarDesc>(*var_desc));
}
}
......
......@@ -14,10 +14,10 @@ limitations under the License. */
#pragma once
#include <cinttypes>
#include <map>
#include <string>
#include <vector>
#include <cinttypes>
#include "common/log.h"
#include "framework/program/op_desc.h"
......
......@@ -30,6 +30,7 @@ class Program {
std::string model_path;
std::string para_path;
bool is_commbine = false;
private:
};
......
......@@ -23,9 +23,9 @@ limitations under the License. */
#include "common/enforce.h"
#include "common/enforce.h"
#include "framework/data_layout.h"
#include "framework/ddim.h"
#include "memory/t_malloc.h"
#include "framework/data_layout.h"
namespace paddle_mobile {
namespace framework {
......
......@@ -164,9 +164,9 @@ const framework::Program<Dtype, P> Loader<Dtype, P>::Load(
}
template <typename Dtype, Precision P>
const framework::Program<Dtype, P> Loader<Dtype, P>::Load(const std::string &model_path,
const std::string &para_path,
bool optimize){
const framework::Program<Dtype, P> Loader<Dtype, P>::Load(
const std::string &model_path, const std::string &para_path,
bool optimize) {
auto program = this->LoadProgram(model_path, optimize);
program.para_path = para_path;
program.is_commbine = true;
......@@ -174,9 +174,8 @@ const framework::Program<Dtype, P> Loader<Dtype, P>::Load(const std::string &mod
}
template <typename Dtype, Precision P>
const framework::Program<Dtype, P> Loader<Dtype, P>::LoadProgram(const std::string &model_path,
bool optimize){
const framework::Program<Dtype, P> Loader<Dtype, P>::LoadProgram(
const std::string &model_path, bool optimize) {
std::string model_filename = model_path;
PaddleMobile__Framework__Proto__ProgramDesc *c_program;
uint8_t *buf = NULL;
......@@ -276,14 +275,12 @@ Executor<Dtype, P>::Executor(const framework::Program<Dtype> p, int batch_size,
} else {
InitMemory();
}
}
template <typename Dtype, Precision P>
void Executor<Dtype, P>::LoadMemory(const framework::VarDesc var_desc,
framework::LoDTensor *tensor,
const std::string &file_path, char *data) {
// 1. version
uint32_t version = *(uint32_t *)data;
data += sizeof(uint32_t);
......@@ -383,7 +380,8 @@ void Executor<Dtype, P>::InitMemory() {
continue;
}
char *origin_data = Get_binary_data(program_.model_path + "/" + var_desc->Name());
char *origin_data =
Get_binary_data(program_.model_path + "/" + var_desc->Name());
LoadMemory(*var_desc, tensor,
program_.model_path + "/" + var_desc->Name(), origin_data);
delete origin_data;
......@@ -399,7 +397,7 @@ void Executor<Dtype, P>::InitMemory() {
}
template <typename Dtype, Precision P>
void Executor<Dtype, P>::InitCombineMemory(){
void Executor<Dtype, P>::InitCombineMemory() {
char *origin_data = Get_binary_data(program_.para_path);
for (const auto &block : to_predict_program_->Blocks()) {
......
......@@ -63,7 +63,8 @@ class Executor {
void InitMemory();
void LoadMemory(const framework::VarDesc var_desc,
framework::LoDTensor *tensor, const std::string &file_path, char *data);
framework::LoDTensor *tensor, const std::string &file_path,
char *data);
void InitCombineMemory();
framework::Program<Dtype> program_;
int batch_size_ = 1;
......
......@@ -20,7 +20,8 @@ int main() {
// ../../../test/models/googlenet
// ../../../test/models/mobilenet
auto program = loader.Load(g_resnet, true);
loader.Load(g_googlenet_combine + "/model", g_googlenet_combine + "/params", true);
loader.Load(g_googlenet_combine + "/model", g_googlenet_combine + "/params",
true);
program.originProgram->Description("program desc: ");
return 0;
......
......@@ -22,8 +22,8 @@ int main() {
auto time1 = time();
auto program = loader.Load(g_googlenet, optimize);
// auto program = loader.Load(g_googlenet_combine + "/model",
// g_googlenet_combine + "/params", optimize);
// auto program = loader.Load(g_googlenet_combine + "/model",
// g_googlenet_combine + "/params", optimize);
auto time2 = time();
DLOG << "load cost :" << time_diff(time1, time2) << "ms\n";
paddle_mobile::Executor<paddle_mobile::CPU> executor(program, 1, optimize);
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册