提交 2053d26a 编写于 作者: L liuruilong

move loader and executor to framework

上级 049340dd
......@@ -12,7 +12,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include "io/executor.h"
#include "executor.h"
#include <operators/math/gemm.h>
#include <algorithm>
#include <vector>
......@@ -38,6 +38,8 @@ limitations under the License. */
#endif
namespace paddle_mobile {
namespace framework {
using framework::Variable;
char *Get_binary_data(std::string filename) {
......@@ -57,13 +59,14 @@ char *Get_binary_data(std::string filename) {
}
#pragma mark - executor
template <typename Dtype, Precision P>
template<typename Dtype, Precision P>
Executor<Dtype, P>::Executor(const framework::Program<Dtype> p, int batch_size,
bool use_optimize, bool loddable)
: program_(p),
batch_size_(batch_size),
use_optimize_(use_optimize),
loddable_(loddable) {
: program_(p),
batch_size_(batch_size),
use_optimize_(use_optimize),
loddable_(loddable) {
if (use_optimize_) {
to_predict_program_ = program_.optimizeProgram;
} else {
......@@ -74,7 +77,7 @@ Executor<Dtype, P>::Executor(const framework::Program<Dtype> p, int batch_size,
PADDLE_MOBILE_ENFORCE(to_predict_program_ != nullptr,
"to_predict_program_ == NULL!");
const std::vector<std::shared_ptr<framework::BlockDesc>> blocks =
to_predict_program_->Blocks();
to_predict_program_->Blocks();
#ifdef PADDLE_EXECUTOR_MULTITHREAD
depManager.resize(blocks.size());
#endif
......@@ -86,8 +89,8 @@ Executor<Dtype, P>::Executor(const framework::Program<Dtype> p, int batch_size,
std::shared_ptr<framework::OpDesc> op = ops[j];
DLOG << "create op: " << j << " " << op->Type();
auto op_base = framework::OpRegistry<Dtype>::CreateOp(
op->Type(), op->GetInputs(), op->GetOutputs(), op->GetAttrMap(),
program_.scope);
op->Type(), op->GetInputs(), op->GetOutputs(), op->GetAttrMap(),
program_.scope);
// use pre_infershape to pre resize , but if u use an lod mode tensor u
// need to resize in runtime
if (!loddable_) {
......@@ -106,7 +109,7 @@ Executor<Dtype, P>::Executor(const framework::Program<Dtype> p, int batch_size,
InitMemory();
}
std::shared_ptr<framework::BlockDesc> to_predict_block =
to_predict_program_->Block(0);
to_predict_program_->Block(0);
auto &ops = ops_of_block_[*to_predict_block.get()];
int i = 0;
for (const auto &op : ops) {
......@@ -115,7 +118,7 @@ Executor<Dtype, P>::Executor(const framework::Program<Dtype> p, int batch_size,
}
}
template <typename Dtype, Precision P>
template<typename Dtype, Precision P>
void Executor<Dtype, P>::LoadMemory(const framework::VarDesc var_desc,
framework::LoDTensor *tensor, char **data) {
// 1. version
......@@ -223,7 +226,7 @@ void Executor<Dtype, P>::LoadMemory(const framework::VarDesc var_desc,
}
}
template <typename Dtype, Precision P>
template<typename Dtype, Precision P>
void Executor<Dtype, P>::InitMemory() {
for (const auto &block : to_predict_program_->Blocks()) {
for (const auto &var_desc : block->Vars()) {
......@@ -235,7 +238,7 @@ void Executor<Dtype, P>::InitMemory() {
}
char *origin_data =
Get_binary_data(program_.model_path + "/" + var_desc->Name());
Get_binary_data(program_.model_path + "/" + var_desc->Name());
char *data = origin_data;
LoadMemory(*var_desc, tensor, &data);
......@@ -248,21 +251,21 @@ void Executor<Dtype, P>::InitMemory() {
is_mute_match = varInputMemory(var_desc, var, tensor);
PADDLE_MOBILE_ENFORCE(
is_mute_match,
"got unhandled var_desc->Tensor_desc().DataType(): %d",
var_desc->Tensor_desc().DataType());
is_mute_match,
"got unhandled var_desc->Tensor_desc().DataType(): %d",
var_desc->Tensor_desc().DataType());
}
}
}
}
}
template <typename Dtype, Precision P>
template<typename Dtype, Precision P>
void Executor<Dtype, P>::InitCombineMemory() {
char *origin_data;
if (program_.combined_params_buf && program_.combined_params_len) {
LOG(kLOG_INFO) << "use outter memory";
origin_data = (char *)program_.combined_params_buf;
origin_data = (char *) program_.combined_params_buf;
} else {
LOG(kLOG_INFO) << " begin init combine memory";
origin_data = Get_binary_data(program_.para_path);
......@@ -286,9 +289,9 @@ void Executor<Dtype, P>::InitCombineMemory() {
is_mute_match = varInputMemory(var_desc, var, tensor);
PADDLE_MOBILE_ENFORCE(
is_mute_match,
"got unhandled var_desc->Tensor_desc().DataType(): %d",
var_desc->Tensor_desc().DataType());
is_mute_match,
"got unhandled var_desc->Tensor_desc().DataType(): %d",
var_desc->Tensor_desc().DataType());
}
}
}
......@@ -297,10 +300,10 @@ void Executor<Dtype, P>::InitCombineMemory() {
LOG(kLOG_INFO) << " end init combine memory ";
}
template <typename Dtype, Precision P>
template<typename Dtype, Precision P>
bool Executor<Dtype, P>::varInputMemory(
const std::shared_ptr<framework::VarDesc> &var_desc, Variable *var,
framework::LoDTensor *tensor) const {
const std::shared_ptr<framework::VarDesc> &var_desc, Variable *var,
framework::LoDTensor *tensor) const {
bool is_mute_match = false;
switch (var_desc->Tensor_desc().DataType()) {
case framework::VARTYPE_TYPE_FP16: {
......@@ -335,22 +338,24 @@ bool Executor<Dtype, P>::varInputMemory(
break;
}
default: { break; }
default: {
break;
}
}
return is_mute_match;
}
template <typename Dtype, Precision P>
template<typename Dtype, Precision P>
std::shared_ptr<framework::Tensor> Executor<Dtype, P>::Predict(
const framework::Tensor &t) {
const framework::Tensor &t) {
framework::Variable *g_feed_value = program_.scope->Var("feed");
framework::Tensor *feed_tensor =
g_feed_value->GetMutable<framework::LoDTensor>();
g_feed_value->GetMutable<framework::LoDTensor>();
feed_tensor->Resize(t.dims());
feed_tensor->ShareDataWith(t);
std::shared_ptr<framework::BlockDesc> to_predict_block =
to_predict_program_->Block(0);
to_predict_program_->Block(0);
auto &ops = ops_of_block_[*to_predict_block.get()];
#ifdef PADDLE_MOBILE_PROFILE
......@@ -430,8 +435,8 @@ std::shared_ptr<framework::Tensor> Executor<Dtype, P>::Predict(
std::vector<std::string> out_keys = (*last_op)->GetOutKeys();
PADDLE_MOBILE_ENFORCE(out_keys.size() > 0, "the last op contains no output");
framework::LoDTensor *output_tensor =
framework::GetVarValue<framework::LoDTensor>(out_keys[0], output_map,
*(program_.scope));
framework::GetVarValue<framework::LoDTensor>(out_keys[0], output_map,
*(program_.scope));
#ifdef PADDLE_MOBILE_PROFILE
#ifdef PADDLE_EXECUTOR_MULTITHREAD
// TODO(haipeng): expose profile info as an interface, user can get them to
......@@ -483,18 +488,18 @@ std::shared_ptr<framework::Tensor> Executor<Dtype, P>::Predict(
return std::make_shared<framework::Tensor>(framework::Tensor(*output_tensor));
}
template <typename Dtype, Precision P>
template<typename Dtype, Precision P>
std::shared_ptr<framework::LoDTensor> Executor<Dtype, P>::PredictLod(
const framework::LoDTensor &t) {
const framework::LoDTensor &t) {
framework::Variable *g_feed_value = program_.scope->Var("feed");
framework::LoDTensor *feed_tensor =
g_feed_value->GetMutable<framework::LoDTensor>();
g_feed_value->GetMutable<framework::LoDTensor>();
feed_tensor->Resize(t.dims());
feed_tensor->ShareDataWith(t);
feed_tensor->set_lod(t.lod());
std::shared_ptr<framework::BlockDesc> to_predict_block =
to_predict_program_->Block(0);
to_predict_program_->Block(0);
auto &ops = ops_of_block_[*to_predict_block.get()];
......@@ -579,8 +584,8 @@ std::shared_ptr<framework::LoDTensor> Executor<Dtype, P>::PredictLod(
std::vector<std::string> out_keys = (*last_op)->GetOutKeys();
PADDLE_MOBILE_ENFORCE(out_keys.size() > 0, "the last op contains no output");
framework::LoDTensor *output_tensor =
framework::GetVarValue<framework::LoDTensor>(out_keys[0], output_map,
*(program_.scope));
framework::GetVarValue<framework::LoDTensor>(out_keys[0], output_map,
*(program_.scope));
#ifdef PADDLE_MOBILE_PROFILE
#ifdef PADDLE_EXECUTOR_MULTITHREAD
// TODO(haipeng): expose profile info as an interface, user can get them to
......@@ -630,22 +635,22 @@ std::shared_ptr<framework::LoDTensor> Executor<Dtype, P>::PredictLod(
printf("====================[---------]======================\n");
#endif
return std::make_shared<framework::LoDTensor>(
framework::LoDTensor(*output_tensor));
framework::LoDTensor(*output_tensor));
}
template <typename Dtype, Precision P>
template<typename Dtype, Precision P>
std::shared_ptr<framework::Tensor> Executor<Dtype, P>::Predict(
const framework::Tensor &t, int block_id) {
const framework::Tensor &t, int block_id) {
return Predict(t);
}
template <typename Dtype, Precision P>
template<typename Dtype, Precision P>
std::vector<typename Executor<Dtype, P>::Ptype> Executor<Dtype, P>::Predict(
const std::vector<Ptype> &input, const std::vector<int64_t> &dims) {
const std::vector<Ptype> &input, const std::vector<int64_t> &dims) {
framework::Tensor tensor(input, framework::make_ddim(dims));
std::shared_ptr<framework::Tensor> output_tensor = Predict(tensor, 0);
Executor<Dtype, P>::Ptype *output_ptr =
output_tensor->data<typename Executor<Dtype, P>::Ptype>();
output_tensor->data<typename Executor<Dtype, P>::Ptype>();
std::vector<typename Executor<Dtype, P>::Ptype> result_vector;
for (int j = 0; j < output_tensor->numel(); ++j) {
result_vector.push_back(output_ptr[j]);
......@@ -725,9 +730,17 @@ void Executor<Dtype, P>::Predict_To(int end) {
};
#endif
template class Executor<CPU, Precision::FP32>;
template class Executor<FPGA, Precision::FP32>;
template class Executor<GPU_CL, Precision::FP32>;
template class Executor<GPU_MALI, Precision::FP32>;
template
class Executor<CPU, Precision::FP32>;
template
class Executor<FPGA, Precision::FP32>;
template
class Executor<GPU_CL, Precision::FP32>;
template
class Executor<GPU_MALI, Precision::FP32>;
}
} // namespace paddle_mobile
......@@ -33,8 +33,9 @@ limitations under the License. */
using std::string;
namespace paddle_mobile {
namespace framework {
template <typename Dtype = CPU, Precision P = Precision::FP32>
template<typename Dtype = CPU, Precision P = Precision::FP32>
class Executor {
public:
typedef typename PrecisionTrait<P>::ptype Ptype;
......@@ -50,11 +51,13 @@ class Executor {
* @b to predict
* */
std::shared_ptr<framework::Tensor> Predict(const framework::Tensor &t);
/*
* @b to predict
* */
std::shared_ptr<framework::LoDTensor> PredictLod(
const framework::LoDTensor &t);
const framework::LoDTensor &t);
/*
* @b to predict with vector and dim
*
......@@ -65,18 +68,24 @@ class Executor {
protected:
Executor() = default;
void InitMemory();
void LoadMemory(const framework::VarDesc var_desc,
framework::LoDTensor *tensor, char **data);
void InitCombineMemory();
framework::Program<Dtype> program_;
int batch_size_ = 1;
std::shared_ptr<framework::ProgramDesc> to_predict_program_;
std::shared_ptr<framework::Tensor> Predict(const framework::Tensor &t,
int block_id);
std::map<framework::BlockDesc,
std::vector<std::shared_ptr<framework::OperatorBase<Dtype>>>>
ops_of_block_;
std::vector<std::shared_ptr<framework::OperatorBase<Dtype>>>>
ops_of_block_;
bool use_optimize_ = false;
bool loddable_ = false;
#ifdef PADDLE_EXECUTOR_MULTITHREAD
......@@ -96,14 +105,15 @@ class Executor {
#ifdef PADDLE_MOBILE_FPGA
public:
void InjectVariable(const framework::Tensor &t, string var_name);
void FeedData(const framework::Tensor &t);
std::shared_ptr<framework::Tensor> FetchResult(int id = -1);
void Predict_From_To(int start = 0, int end = -1);
void Predict_From(int start);
void Predict_To(int end);
public:
void InjectVariable(const framework::Tensor &t, string var_name);
void FeedData(const framework::Tensor &t);
std::shared_ptr<framework::Tensor> FetchResult(int id = -1);
void Predict_From_To(int start = 0, int end = -1);
void Predict_From(int start);
void Predict_To(int end);
#endif
};
}
} // namespace paddle_mobile
......@@ -12,13 +12,13 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include "io/loader.h"
#include "loader.h"
#include "framework/lod_tensor.h"
#include "framework/program/program-optimize/program_optimize.h"
namespace paddle_mobile {
using framework::Variable;
namespace framework {
/**
* muteandresize tensor as originProgramDesc and scope in loadParams
......@@ -27,22 +27,22 @@ using framework::Variable;
* @param scope
*/
void InitMemoryFromProgram(
std::shared_ptr<framework::ProgramDesc> &originProgramDesc,
std::shared_ptr<framework::Scope> &scope) {
std::shared_ptr<ProgramDesc> &originProgramDesc,
std::shared_ptr<Scope> &scope) {
for (const auto &block : originProgramDesc.get()->Blocks()) {
for (const auto &var_desc : block->Vars()) {
auto var = scope.get()->Var(var_desc->Name());
if (var_desc->Type() == framework::VARTYPE_TYPE_LOD_TENSOR) {
if (var_desc->Type() == VARTYPE_TYPE_LOD_TENSOR) {
if (var_desc->Persistable()) {
auto dim = var_desc->Tensor_desc().Dims();
auto tensor = var->GetMutable<framework::LoDTensor>();
tensor->Resize(framework::make_ddim(dim));
auto tensor = var->GetMutable<LoDTensor>();
tensor->Resize(make_ddim(dim));
} else {
auto dim = var_desc->Tensor_desc().Dims();
PADDLE_MOBILE_ENFORCE(dim.size() > 0, "dim size is 0");
dim[0] = 1;
auto tensor = var->GetMutable<framework::LoDTensor>();
tensor->Resize(framework::make_ddim(dim));
auto tensor = var->GetMutable<LoDTensor>();
tensor->Resize(make_ddim(dim));
}
} else {
// TODO(codeWorm): some.
......@@ -50,6 +50,7 @@ void InitMemoryFromProgram(
}
}
}
/**
* fusion and print someinfos
* @tparam Dtype
......@@ -59,14 +60,14 @@ void InitMemoryFromProgram(
* @param program
* @param originProgramDesc
*/
template <typename Dtype, Precision P>
template<typename Dtype, Precision P>
void FusionAndPrintInfos(
bool &optimize, bool &can_add_split, framework::Program<Dtype, P> &program,
const std::shared_ptr<framework::ProgramDesc> &originProgramDesc) {
bool &optimize, bool &can_add_split, Program<Dtype, P> &program,
const std::shared_ptr<ProgramDesc> &originProgramDesc) {
if (optimize) {
framework::ProgramOptimize program_optimize;
ProgramOptimize program_optimize;
program.optimizeProgram =
program_optimize.FusionOptimize(originProgramDesc, can_add_split);
program_optimize.FusionOptimize(originProgramDesc, can_add_split);
}
if (optimize) {
program.optimizeProgram->Description("optimize: ");
......@@ -74,6 +75,7 @@ void FusionAndPrintInfos(
originProgramDesc->Description("program: ");
}
}
static size_t ReadBuffer(const char *file_name, uint8_t **out) {
FILE *fp;
fp = fopen(file_name, "rb");
......@@ -96,20 +98,20 @@ static size_t ReadBuffer(const char *file_name, uint8_t **out) {
return cur_len;
}
template <typename Dtype, Precision P>
const framework::Program<Dtype, P> Loader<Dtype, P>::Load(
const std::string &dirname, bool optimize, bool quantification,
bool can_add_split) {
template<typename Dtype, Precision P>
const Program<Dtype, P> Loader<Dtype, P>::Load(
const std::string &dirname, bool optimize, bool quantification,
bool can_add_split) {
auto program = this->LoadProgram(dirname + "/__model__", optimize,
quantification, can_add_split);
program.model_path = dirname;
return program;
}
template <typename Dtype, Precision P>
const framework::Program<Dtype, P> Loader<Dtype, P>::Load(
const std::string &model_path, const std::string &para_path, bool optimize,
bool quantification) {
template<typename Dtype, Precision P>
const Program<Dtype, P> Loader<Dtype, P>::Load(
const std::string &model_path, const std::string &para_path, bool optimize,
bool quantification) {
auto program = this->LoadProgram(model_path, optimize, quantification);
program.para_path = para_path;
......@@ -118,10 +120,10 @@ const framework::Program<Dtype, P> Loader<Dtype, P>::Load(
return program;
}
template <typename Dtype, Precision P>
const framework::Program<Dtype, P> Loader<Dtype, P>::LoadProgram(
const std::string &model_path, bool optimize, bool quantification,
bool can_add_split) {
template<typename Dtype, Precision P>
const Program<Dtype, P> Loader<Dtype, P>::LoadProgram(
const std::string &model_path, bool optimize, bool quantification,
bool can_add_split) {
std::string model_filename = model_path;
PaddleMobile__Framework__Proto__ProgramDesc *c_program;
uint8_t *buf = NULL;
......@@ -130,20 +132,20 @@ const framework::Program<Dtype, P> Loader<Dtype, P>::LoadProgram(
PADDLE_MOBILE_ENFORCE(buf != NULL, "read from __model__ is null");
c_program = paddle_mobile__framework__proto__program_desc__unpack(
NULL, read_size, buf);
NULL, read_size, buf);
//
PADDLE_MOBILE_ENFORCE(c_program != NULL, "program is null");
//
DLOG << "n_ops: " << (*c_program->blocks)->n_ops;
//
auto originProgramDesc = std::make_shared<framework::ProgramDesc>(c_program);
auto originProgramDesc = std::make_shared<ProgramDesc>(c_program);
framework::Program<Dtype, P> program;
Program<Dtype, P> program;
program.originProgram = originProgramDesc;
program.quantification = quantification;
program.combined_params_len = 0;
program.combined_params_buf = nullptr;
auto scope = std::make_shared<framework::Scope>();
auto scope = std::make_shared<Scope>();
program.scope = scope;
// use originProgramDesc and scope to init tensors
......@@ -155,33 +157,33 @@ const framework::Program<Dtype, P> Loader<Dtype, P>::LoadProgram(
return program;
}
template <typename Dtype, Precision P>
const framework::Program<Dtype, P> Loader<Dtype, P>::LoadCombinedMemory(
size_t read_size, const uint8_t *buf, size_t combined_params_len,
const uint8_t *combined_params_buf, bool optimize, bool quantification) {
template<typename Dtype, Precision P>
const Program<Dtype, P> Loader<Dtype, P>::LoadCombinedMemory(
size_t read_size, const uint8_t *buf, size_t combined_params_len,
const uint8_t *combined_params_buf, bool optimize, bool quantification) {
bool can_add_split = false;
PaddleMobile__Framework__Proto__ProgramDesc *c_program;
PADDLE_MOBILE_ENFORCE(buf != nullptr, "read from __model__ is null");
c_program = paddle_mobile__framework__proto__program_desc__unpack(
nullptr, read_size, buf);
nullptr, read_size, buf);
//
PADDLE_MOBILE_ENFORCE(c_program != nullptr, "program is null");
//
DLOG << "n_ops: " << (*c_program->blocks)->n_ops;
//
auto originProgramDesc = std::make_shared<framework::ProgramDesc>(c_program);
auto originProgramDesc = std::make_shared<ProgramDesc>(c_program);
framework::Program<Dtype, P> program;
Program<Dtype, P> program;
program.combined = true;
program.originProgram = originProgramDesc;
program.quantification = quantification;
program.combined_params_len = combined_params_len;
program.combined_params_buf = combined_params_buf;
auto scope = std::make_shared<framework::Scope>();
auto scope = std::make_shared<Scope>();
program.scope = scope;
InitMemoryFromProgram(originProgramDesc, scope);
FusionAndPrintInfos(optimize, can_add_split, program, originProgramDesc);
......@@ -190,9 +192,17 @@ const framework::Program<Dtype, P> Loader<Dtype, P>::LoadCombinedMemory(
return program;
}
template class Loader<CPU, Precision::FP32>;
template class Loader<FPGA, Precision::FP32>;
template class Loader<GPU_MALI, Precision::FP32>;
template class Loader<GPU_CL, Precision::FP32>;
template
class Loader<CPU, Precision::FP32>;
template
class Loader<FPGA, Precision::FP32>;
template
class Loader<GPU_MALI, Precision::FP32>;
template
class Loader<GPU_CL, Precision::FP32>;
}
} // namespace paddle_mobile
......@@ -20,6 +20,7 @@ limitations under the License. */
#include "framework/program/program.h"
namespace paddle_mobile {
namespace framework{
template <typename Dtype = CPU, Precision P = Precision::FP32>
class Loader {
......@@ -28,7 +29,7 @@ class Loader {
* @b load separate format fluid model
* @b 加载分开形式的 fluid 模型
* */
const framework::Program<Dtype, P> Load(const std::string &dirname,
const Program<Dtype, P> Load(const std::string &dirname,
bool optimize = false,
bool quantification = false,
bool can_add_split = false);
......@@ -37,21 +38,22 @@ class Loader {
* @b load combine format fluid mode
* @b 加载结合在一起格式的模型
* */
const framework::Program<Dtype, P> Load(const std::string &model_path,
const Program<Dtype, P> Load(const std::string &model_path,
const std::string &para_path,
bool optimize = false,
bool quantification = false);
const framework::Program<Dtype, P> LoadCombinedMemory(
const Program<Dtype, P> LoadCombinedMemory(
size_t model_len, const uint8_t *model_buf, size_t combined_params_len,
const uint8_t *combined_params_buf, bool optimize = false,
bool quantification = false);
private:
const framework::Program<Dtype, P> LoadProgram(const std::string &model_path,
const Program<Dtype, P> LoadProgram(const std::string &model_path,
bool optimize = false,
bool quantification = false,
bool can_add_split = false);
};
}
} // namespace paddle_mobile
......@@ -29,13 +29,13 @@ bool PaddleMobile<Dtype, P>::Load(const std::string &dirname, bool optimize,
bool quantification, int batch_size,
bool loddable) {
if (loader_.get() == nullptr) {
loader_ = std::make_shared<Loader<Dtype, P>>();
loader_ = std::make_shared<framework::Loader<Dtype, P>>();
} else {
LOG(kLOG_INFO) << "loader inited";
}
if (executor_.get() == nullptr) {
executor_ = std::make_shared<Executor<Dtype, P>>(
executor_ = std::make_shared<framework::Executor<Dtype, P>>(
loader_->Load(dirname, optimize, quantification), batch_size, optimize,
loddable);
} else {
......@@ -51,13 +51,13 @@ bool PaddleMobile<Dtype, P>::Load(const std::string &model_path,
bool quantification, int batch_size,
bool loddable) {
if (loader_.get() == nullptr) {
loader_ = std::make_shared<Loader<Dtype, P>>();
loader_ = std::make_shared<framework::Loader<Dtype, P>>();
} else {
LOG(kLOG_INFO) << "loader inited";
}
if (executor_.get() == nullptr) {
executor_ = std::make_shared<Executor<Dtype, P>>(
executor_ = std::make_shared<framework::Executor<Dtype, P>>(
loader_->Load(model_path, para_path, optimize, quantification),
batch_size, optimize, loddable);
} else {
......@@ -76,13 +76,13 @@ bool PaddleMobile<Dtype, P>::LoadCombinedMemory(
bool quantification = false;
if (loader_.get() == nullptr) {
loader_ = std::make_shared<Loader<Dtype, P>>();
loader_ = std::make_shared<framework::Loader<Dtype, P>>();
} else {
LOG(kLOG_INFO) << "loader inited";
}
if (executor_.get() == nullptr) {
executor_ = std::make_shared<Executor<Dtype, P>>(
executor_ = std::make_shared<framework::Executor<Dtype, P>>(
loader_->LoadCombinedMemory(model_len, model_buf, combined_params_len,
combined_params_buf, optimise,
quantification),
......
......@@ -23,8 +23,8 @@ limitations under the License. */
#include "common/types.h"
#include "framework/tensor.h"
#include "io/executor.h"
#include "io/loader.h"
#include "framework/executor.h"
#include "framework/loader.h"
namespace paddle_mobile {
......@@ -90,8 +90,8 @@ class PaddleMobile {
~PaddleMobile();
private:
std::shared_ptr<Loader<Dtype, P>> loader_;
std::shared_ptr<Executor<Dtype, P>> executor_;
std::shared_ptr<framework::Loader<Dtype, P>> loader_;
std::shared_ptr<framework::Executor<Dtype, P>> executor_;
#ifdef PADDLE_MOBILE_FPGA
public:
......
......@@ -19,7 +19,7 @@ limitations under the License. */
#include "common/log.h"
#include "framework/op_registry.h"
#include "io/executor.h"
#include "framework/executor.h"
#include "operators/conv_op.h"
#include "operators/elementwise_add_op.h"
#include "operators/pool_op.h"
......@@ -29,7 +29,7 @@ limitations under the License. */
#include "operators/softmax_op.h"
#include "operators/transpose_op.h"
using paddle_mobile::Executor;
using paddle_mobile::framework::Executor;
using paddle_mobile::framework::BlockDesc;
using paddle_mobile::framework::DDim;
using paddle_mobile::framework::LoDTensor;
......
......@@ -16,7 +16,7 @@ limitations under the License. */
#include "operators/concat_op.h"
int main() {
paddle_mobile::Loader<paddle_mobile::FPGA> loader;
paddle_mobile::framework::Loader<paddle_mobile::FPGA> loader;
auto program = loader.Load(g_googlenet);
PADDLE_MOBILE_ENFORCE(program.originProgram != nullptr,
"program file read fail");
......
......@@ -15,10 +15,10 @@ limitations under the License. */
#include <string>
#include "../test_helper.h"
#include "io/loader.h"
#include "framework/loader.h"
int main() {
paddle_mobile::Loader<paddle_mobile::CPU> loader;
paddle_mobile::framework::Loader<paddle_mobile::CPU> loader;
// ../../../test/models/googlenet
// ../../../test/models/mobilenet
// auto program = loader.Load(g_googlenet, true);
......
......@@ -15,10 +15,10 @@ limitations under the License. */
#include "../test_helper.h"
#include "framework/program/program-optimize/node.h"
#include "framework/program/program-optimize/program_optimize.h"
#include "io/loader.h"
#include "framework/loader.h"
int main() {
paddle_mobile::Loader<paddle_mobile::CPU> loader;
paddle_mobile::framework::Loader<paddle_mobile::CPU> loader;
// "../../../test/models/googlenet"
auto program = loader.Load(g_mobilenet_ssd, true);
paddle_mobile::framework::ProgramOptimize optimize;
......
......@@ -127,7 +127,7 @@ template class TestBatchNormOp<CPU>;
int main() {
DLOG << "----------**********----------";
DLOG << "begin to run BatchNormOp Test";
paddle_mobile::Loader<paddle_mobile::CPU> loader;
paddle_mobile::framework::Loader<paddle_mobile::CPU> loader;
auto program = loader.Load(std::string(g_mobilenet_ssd));
/// input x (4,10,2,2)
......
......@@ -115,7 +115,7 @@ template class TestBoxCoderOp<CPU>;
int main() {
DLOG << "----------**********----------";
DLOG << "begin to run BoxCoderOp Test";
paddle_mobile::Loader<paddle_mobile::CPU> loader;
paddle_mobile::framework::Loader<paddle_mobile::CPU> loader;
auto program = loader.Load(std::string(g_mobilenet_ssd));
paddle_mobile::framework::Tensor priorbox;
......
......@@ -16,7 +16,7 @@ limitations under the License. */
#include "operators/concat_op.h"
int main() {
paddle_mobile::Loader<paddle_mobile::CPU> loader;
paddle_mobile::framework::Loader<paddle_mobile::CPU> loader;
auto program = loader.Load(g_googlenet);
PADDLE_MOBILE_ENFORCE(program.originProgram != nullptr,
"program file read fail");
......
......@@ -16,7 +16,7 @@ limitations under the License. */
#include "operators/fusion_conv_add_relu_op.h"
int main() {
paddle_mobile::Loader<paddle_mobile::CPU> loader;
paddle_mobile::framework::Loader<paddle_mobile::CPU> loader;
// ../models/image_classification_resnet.inference.model
auto program = loader.Load(g_googlenet, true);
......
......@@ -16,7 +16,7 @@ limitations under the License. */
#include "operators/conv_op.h"
int main() {
paddle_mobile::Loader<paddle_mobile::GPU_MALI> loader;
paddle_mobile::framework::Loader<paddle_mobile::GPU_MALI> loader;
// ../models/image_classification_resnet.inference.model
auto program = loader.Load(g_googlenet);
......
......@@ -16,7 +16,7 @@ limitations under the License. */
#include "operators/depthwise_conv_op.h"
int main() {
paddle_mobile::Loader<paddle_mobile::CPU> loader;
paddle_mobile::framework::Loader<paddle_mobile::CPU> loader;
// ../models/image_classification_resnet.inference.model
auto program = loader.Load(g_mobilenet_ssd);
......
......@@ -15,7 +15,7 @@ limitations under the License. */
#include "../test_include.h"
int main() {
paddle_mobile::Loader<paddle_mobile::CPU> loader;
paddle_mobile::framework::Loader<paddle_mobile::CPU> loader;
auto program = loader.Load(g_resnet);
PADDLE_MOBILE_ENFORCE(program.originProgram != nullptr,
"program file read fail");
......
......@@ -17,7 +17,7 @@ limitations under the License. */
#include "operators/fusion_conv_add_bn_relu_op.h"
int main() {
paddle_mobile::Loader<paddle_mobile::CPU> loader;
paddle_mobile::framework::Loader<paddle_mobile::CPU> loader;
// ../models/image_classification_resnet.inference.model
auto program = loader.Load(g_mobilenet, true);
......
......@@ -114,7 +114,7 @@ template class TestFcOp<CPU>;
int main() {
DLOG << "----------**********----------";
DLOG << "begin to run Fc Test";
paddle_mobile::Loader<paddle_mobile::CPU> loader;
paddle_mobile::framework::Loader<paddle_mobile::CPU> loader;
// "../../../test/models/googlenet"
auto program = loader.Load(g_googlenet);
paddle_mobile::framework::ProgramOptimize optimize;
......
......@@ -16,7 +16,7 @@ limitations under the License. */
#include "operators/gru_op.h"
int main() {
paddle_mobile::Loader<paddle_mobile::CPU> loader;
paddle_mobile::framework::Loader<paddle_mobile::CPU> loader;
auto program = loader.Load(g_nlp);
PADDLE_MOBILE_ENFORCE(program.originProgram != nullptr,
"program file read fail");
......
......@@ -17,7 +17,7 @@ limitations under the License. */
#include "operators/im2sequence_op.h"
int main() {
paddle_mobile::Loader<paddle_mobile::CPU> loader;
paddle_mobile::framework::Loader<paddle_mobile::CPU> loader;
auto program = loader.Load(g_ocr_recg);
PADDLE_MOBILE_ENFORCE(program.originProgram != nullptr,
"program file read fail");
......
......@@ -16,7 +16,7 @@ limitations under the License. */
#include "operators/lrn_op.h"
int main() {
paddle_mobile::Loader<paddle_mobile::CPU> loader;
paddle_mobile::framework::Loader<paddle_mobile::CPU> loader;
auto program = loader.Load(g_googlenet);
PADDLE_MOBILE_ENFORCE(program.originProgram != nullptr,
"program file read fail");
......
......@@ -16,7 +16,7 @@ limitations under the License. */
#include "operators/mul_op.h"
int main() {
paddle_mobile::Loader<paddle_mobile::CPU> loader;
paddle_mobile::framework::Loader<paddle_mobile::CPU> loader;
auto program = loader.Load(g_resnet);
PADDLE_MOBILE_ENFORCE(program.originProgram != nullptr,
"program file read fail");
......
......@@ -126,7 +126,7 @@ template class TestMultiClassNMSOp<CPU>;
int main() {
DLOG << "----------**********----------";
DLOG << "begin to run MulticlassNMS Test";
paddle_mobile::Loader<paddle_mobile::CPU> loader;
paddle_mobile::framework::Loader<paddle_mobile::CPU> loader;
auto program = loader.Load(std::string("../../test/models/mobilenet+ssd"));
/// input x (1,3,300,300)
......
......@@ -16,7 +16,7 @@ limitations under the License. */
#include "operators/pool_op.h"
int main() {
paddle_mobile::Loader<paddle_mobile::CPU> loader;
paddle_mobile::framework::Loader<paddle_mobile::CPU> loader;
auto program = loader.Load(std::string(g_googlenet));
if (program.originProgram == nullptr) {
DLOG << "program read file";
......
......@@ -17,7 +17,7 @@ limitations under the License. */
#include "operators/prelu_op.h"
int main() {
paddle_mobile::Loader<paddle_mobile::CPU> loader;
paddle_mobile::framework::Loader<paddle_mobile::CPU> loader;
auto program = loader.Load(g_resnet);
PADDLE_MOBILE_ENFORCE(program.originProgram != nullptr,
"program file read fail");
......
......@@ -126,7 +126,7 @@ template class TestPriorBoxOp<CPU>;
int main() {
DLOG << "----------**********----------";
DLOG << "begin to run PriorBoxOp Test";
paddle_mobile::Loader<paddle_mobile::CPU> loader;
paddle_mobile::framework::Loader<paddle_mobile::CPU> loader;
auto program = loader.Load(std::string(g_mobilenet_ssd));
/// input x (1,3,300,300)
......
......@@ -16,7 +16,7 @@ limitations under the License. */
#include "operators/relu_op.h"
int main() {
paddle_mobile::Loader<paddle_mobile::CPU> loader;
paddle_mobile::framework::Loader<paddle_mobile::CPU> loader;
auto program = loader.Load(g_resnet);
PADDLE_MOBILE_ENFORCE(program.originProgram != nullptr,
"program file read fail");
......
......@@ -16,7 +16,7 @@ limitations under the License. */
#include "operators/reshape_op.h"
int main() {
paddle_mobile::Loader<paddle_mobile::CPU> loader;
paddle_mobile::framework::Loader<paddle_mobile::CPU> loader;
auto program = loader.Load(std::string(g_mobilenet_ssd));
if (program.originProgram == nullptr) {
DLOG << "program read file";
......
......@@ -16,7 +16,7 @@ limitations under the License. */
#include "operators/resize_op.h"
int main() {
paddle_mobile::Loader<paddle_mobile::CPU> loader;
paddle_mobile::framework::Loader<paddle_mobile::CPU> loader;
auto program = loader.Load(std::string(g_mobilenet_ssd));
if (program.originProgram == nullptr) {
DLOG << "program read file";
......
......@@ -15,7 +15,7 @@ limitations under the License. */
#include "../../src/operators/kernel/sigmoid_kernel.h"
#include "../../src/operators/kernel/central-arm-func/sigmoid_arm_func.h"
#include "../test_helper.h"
#include "io/executor.h"
#include "framework/executor.h"
int main() {
paddle_mobile::framework::Tensor input;
......
......@@ -17,7 +17,7 @@ limitations under the License. */
#include "operators/softmax_op.h"
int main() {
paddle_mobile::Loader<paddle_mobile::CPU> loader;
paddle_mobile::framework::Loader<paddle_mobile::CPU> loader;
auto program = loader.Load(std::string(g_mobilenet));
if (program.originProgram == nullptr) {
DLOG << "program read file";
......
......@@ -16,7 +16,7 @@ limitations under the License. */
#include "../test_include.h"
#include "operators/transpose_op.h"
int main() {
paddle_mobile::Loader<paddle_mobile::CPU> loader;
paddle_mobile::framework::Loader<paddle_mobile::CPU> loader;
auto program = loader.Load(std::string(g_mobilenet_ssd));
if (program.originProgram == nullptr) {
DLOG << "program read file";
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册