提交 2053d26a 编写于 作者: L liuruilong

move loader and executor to framework

上级 049340dd
...@@ -12,7 +12,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. ...@@ -12,7 +12,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and See the License for the specific language governing permissions and
limitations under the License. */ limitations under the License. */
#include "io/executor.h" #include "executor.h"
#include <operators/math/gemm.h> #include <operators/math/gemm.h>
#include <algorithm> #include <algorithm>
#include <vector> #include <vector>
...@@ -38,6 +38,8 @@ limitations under the License. */ ...@@ -38,6 +38,8 @@ limitations under the License. */
#endif #endif
namespace paddle_mobile { namespace paddle_mobile {
namespace framework {
using framework::Variable; using framework::Variable;
char *Get_binary_data(std::string filename) { char *Get_binary_data(std::string filename) {
...@@ -57,13 +59,14 @@ char *Get_binary_data(std::string filename) { ...@@ -57,13 +59,14 @@ char *Get_binary_data(std::string filename) {
} }
#pragma mark - executor #pragma mark - executor
template <typename Dtype, Precision P>
template<typename Dtype, Precision P>
Executor<Dtype, P>::Executor(const framework::Program<Dtype> p, int batch_size, Executor<Dtype, P>::Executor(const framework::Program<Dtype> p, int batch_size,
bool use_optimize, bool loddable) bool use_optimize, bool loddable)
: program_(p), : program_(p),
batch_size_(batch_size), batch_size_(batch_size),
use_optimize_(use_optimize), use_optimize_(use_optimize),
loddable_(loddable) { loddable_(loddable) {
if (use_optimize_) { if (use_optimize_) {
to_predict_program_ = program_.optimizeProgram; to_predict_program_ = program_.optimizeProgram;
} else { } else {
...@@ -74,7 +77,7 @@ Executor<Dtype, P>::Executor(const framework::Program<Dtype> p, int batch_size, ...@@ -74,7 +77,7 @@ Executor<Dtype, P>::Executor(const framework::Program<Dtype> p, int batch_size,
PADDLE_MOBILE_ENFORCE(to_predict_program_ != nullptr, PADDLE_MOBILE_ENFORCE(to_predict_program_ != nullptr,
"to_predict_program_ == NULL!"); "to_predict_program_ == NULL!");
const std::vector<std::shared_ptr<framework::BlockDesc>> blocks = const std::vector<std::shared_ptr<framework::BlockDesc>> blocks =
to_predict_program_->Blocks(); to_predict_program_->Blocks();
#ifdef PADDLE_EXECUTOR_MULTITHREAD #ifdef PADDLE_EXECUTOR_MULTITHREAD
depManager.resize(blocks.size()); depManager.resize(blocks.size());
#endif #endif
...@@ -86,8 +89,8 @@ Executor<Dtype, P>::Executor(const framework::Program<Dtype> p, int batch_size, ...@@ -86,8 +89,8 @@ Executor<Dtype, P>::Executor(const framework::Program<Dtype> p, int batch_size,
std::shared_ptr<framework::OpDesc> op = ops[j]; std::shared_ptr<framework::OpDesc> op = ops[j];
DLOG << "create op: " << j << " " << op->Type(); DLOG << "create op: " << j << " " << op->Type();
auto op_base = framework::OpRegistry<Dtype>::CreateOp( auto op_base = framework::OpRegistry<Dtype>::CreateOp(
op->Type(), op->GetInputs(), op->GetOutputs(), op->GetAttrMap(), op->Type(), op->GetInputs(), op->GetOutputs(), op->GetAttrMap(),
program_.scope); program_.scope);
// use pre_infershape to pre resize , but if u use an lod mode tensor u // use pre_infershape to pre resize , but if u use an lod mode tensor u
// need to resize in runtime // need to resize in runtime
if (!loddable_) { if (!loddable_) {
...@@ -106,7 +109,7 @@ Executor<Dtype, P>::Executor(const framework::Program<Dtype> p, int batch_size, ...@@ -106,7 +109,7 @@ Executor<Dtype, P>::Executor(const framework::Program<Dtype> p, int batch_size,
InitMemory(); InitMemory();
} }
std::shared_ptr<framework::BlockDesc> to_predict_block = std::shared_ptr<framework::BlockDesc> to_predict_block =
to_predict_program_->Block(0); to_predict_program_->Block(0);
auto &ops = ops_of_block_[*to_predict_block.get()]; auto &ops = ops_of_block_[*to_predict_block.get()];
int i = 0; int i = 0;
for (const auto &op : ops) { for (const auto &op : ops) {
...@@ -115,7 +118,7 @@ Executor<Dtype, P>::Executor(const framework::Program<Dtype> p, int batch_size, ...@@ -115,7 +118,7 @@ Executor<Dtype, P>::Executor(const framework::Program<Dtype> p, int batch_size,
} }
} }
template <typename Dtype, Precision P> template<typename Dtype, Precision P>
void Executor<Dtype, P>::LoadMemory(const framework::VarDesc var_desc, void Executor<Dtype, P>::LoadMemory(const framework::VarDesc var_desc,
framework::LoDTensor *tensor, char **data) { framework::LoDTensor *tensor, char **data) {
// 1. version // 1. version
...@@ -223,7 +226,7 @@ void Executor<Dtype, P>::LoadMemory(const framework::VarDesc var_desc, ...@@ -223,7 +226,7 @@ void Executor<Dtype, P>::LoadMemory(const framework::VarDesc var_desc,
} }
} }
template <typename Dtype, Precision P> template<typename Dtype, Precision P>
void Executor<Dtype, P>::InitMemory() { void Executor<Dtype, P>::InitMemory() {
for (const auto &block : to_predict_program_->Blocks()) { for (const auto &block : to_predict_program_->Blocks()) {
for (const auto &var_desc : block->Vars()) { for (const auto &var_desc : block->Vars()) {
...@@ -235,7 +238,7 @@ void Executor<Dtype, P>::InitMemory() { ...@@ -235,7 +238,7 @@ void Executor<Dtype, P>::InitMemory() {
} }
char *origin_data = char *origin_data =
Get_binary_data(program_.model_path + "/" + var_desc->Name()); Get_binary_data(program_.model_path + "/" + var_desc->Name());
char *data = origin_data; char *data = origin_data;
LoadMemory(*var_desc, tensor, &data); LoadMemory(*var_desc, tensor, &data);
...@@ -248,21 +251,21 @@ void Executor<Dtype, P>::InitMemory() { ...@@ -248,21 +251,21 @@ void Executor<Dtype, P>::InitMemory() {
is_mute_match = varInputMemory(var_desc, var, tensor); is_mute_match = varInputMemory(var_desc, var, tensor);
PADDLE_MOBILE_ENFORCE( PADDLE_MOBILE_ENFORCE(
is_mute_match, is_mute_match,
"got unhandled var_desc->Tensor_desc().DataType(): %d", "got unhandled var_desc->Tensor_desc().DataType(): %d",
var_desc->Tensor_desc().DataType()); var_desc->Tensor_desc().DataType());
} }
} }
} }
} }
} }
template <typename Dtype, Precision P> template<typename Dtype, Precision P>
void Executor<Dtype, P>::InitCombineMemory() { void Executor<Dtype, P>::InitCombineMemory() {
char *origin_data; char *origin_data;
if (program_.combined_params_buf && program_.combined_params_len) { if (program_.combined_params_buf && program_.combined_params_len) {
LOG(kLOG_INFO) << "use outter memory"; LOG(kLOG_INFO) << "use outter memory";
origin_data = (char *)program_.combined_params_buf; origin_data = (char *) program_.combined_params_buf;
} else { } else {
LOG(kLOG_INFO) << " begin init combine memory"; LOG(kLOG_INFO) << " begin init combine memory";
origin_data = Get_binary_data(program_.para_path); origin_data = Get_binary_data(program_.para_path);
...@@ -286,9 +289,9 @@ void Executor<Dtype, P>::InitCombineMemory() { ...@@ -286,9 +289,9 @@ void Executor<Dtype, P>::InitCombineMemory() {
is_mute_match = varInputMemory(var_desc, var, tensor); is_mute_match = varInputMemory(var_desc, var, tensor);
PADDLE_MOBILE_ENFORCE( PADDLE_MOBILE_ENFORCE(
is_mute_match, is_mute_match,
"got unhandled var_desc->Tensor_desc().DataType(): %d", "got unhandled var_desc->Tensor_desc().DataType(): %d",
var_desc->Tensor_desc().DataType()); var_desc->Tensor_desc().DataType());
} }
} }
} }
...@@ -297,10 +300,10 @@ void Executor<Dtype, P>::InitCombineMemory() { ...@@ -297,10 +300,10 @@ void Executor<Dtype, P>::InitCombineMemory() {
LOG(kLOG_INFO) << " end init combine memory "; LOG(kLOG_INFO) << " end init combine memory ";
} }
template <typename Dtype, Precision P> template<typename Dtype, Precision P>
bool Executor<Dtype, P>::varInputMemory( bool Executor<Dtype, P>::varInputMemory(
const std::shared_ptr<framework::VarDesc> &var_desc, Variable *var, const std::shared_ptr<framework::VarDesc> &var_desc, Variable *var,
framework::LoDTensor *tensor) const { framework::LoDTensor *tensor) const {
bool is_mute_match = false; bool is_mute_match = false;
switch (var_desc->Tensor_desc().DataType()) { switch (var_desc->Tensor_desc().DataType()) {
case framework::VARTYPE_TYPE_FP16: { case framework::VARTYPE_TYPE_FP16: {
...@@ -335,22 +338,24 @@ bool Executor<Dtype, P>::varInputMemory( ...@@ -335,22 +338,24 @@ bool Executor<Dtype, P>::varInputMemory(
break; break;
} }
default: { break; } default: {
break;
}
} }
return is_mute_match; return is_mute_match;
} }
template <typename Dtype, Precision P> template<typename Dtype, Precision P>
std::shared_ptr<framework::Tensor> Executor<Dtype, P>::Predict( std::shared_ptr<framework::Tensor> Executor<Dtype, P>::Predict(
const framework::Tensor &t) { const framework::Tensor &t) {
framework::Variable *g_feed_value = program_.scope->Var("feed"); framework::Variable *g_feed_value = program_.scope->Var("feed");
framework::Tensor *feed_tensor = framework::Tensor *feed_tensor =
g_feed_value->GetMutable<framework::LoDTensor>(); g_feed_value->GetMutable<framework::LoDTensor>();
feed_tensor->Resize(t.dims()); feed_tensor->Resize(t.dims());
feed_tensor->ShareDataWith(t); feed_tensor->ShareDataWith(t);
std::shared_ptr<framework::BlockDesc> to_predict_block = std::shared_ptr<framework::BlockDesc> to_predict_block =
to_predict_program_->Block(0); to_predict_program_->Block(0);
auto &ops = ops_of_block_[*to_predict_block.get()]; auto &ops = ops_of_block_[*to_predict_block.get()];
#ifdef PADDLE_MOBILE_PROFILE #ifdef PADDLE_MOBILE_PROFILE
...@@ -430,8 +435,8 @@ std::shared_ptr<framework::Tensor> Executor<Dtype, P>::Predict( ...@@ -430,8 +435,8 @@ std::shared_ptr<framework::Tensor> Executor<Dtype, P>::Predict(
std::vector<std::string> out_keys = (*last_op)->GetOutKeys(); std::vector<std::string> out_keys = (*last_op)->GetOutKeys();
PADDLE_MOBILE_ENFORCE(out_keys.size() > 0, "the last op contains no output"); PADDLE_MOBILE_ENFORCE(out_keys.size() > 0, "the last op contains no output");
framework::LoDTensor *output_tensor = framework::LoDTensor *output_tensor =
framework::GetVarValue<framework::LoDTensor>(out_keys[0], output_map, framework::GetVarValue<framework::LoDTensor>(out_keys[0], output_map,
*(program_.scope)); *(program_.scope));
#ifdef PADDLE_MOBILE_PROFILE #ifdef PADDLE_MOBILE_PROFILE
#ifdef PADDLE_EXECUTOR_MULTITHREAD #ifdef PADDLE_EXECUTOR_MULTITHREAD
// TODO(haipeng): expose profile info as an interface, user can get them to // TODO(haipeng): expose profile info as an interface, user can get them to
...@@ -483,18 +488,18 @@ std::shared_ptr<framework::Tensor> Executor<Dtype, P>::Predict( ...@@ -483,18 +488,18 @@ std::shared_ptr<framework::Tensor> Executor<Dtype, P>::Predict(
return std::make_shared<framework::Tensor>(framework::Tensor(*output_tensor)); return std::make_shared<framework::Tensor>(framework::Tensor(*output_tensor));
} }
template <typename Dtype, Precision P> template<typename Dtype, Precision P>
std::shared_ptr<framework::LoDTensor> Executor<Dtype, P>::PredictLod( std::shared_ptr<framework::LoDTensor> Executor<Dtype, P>::PredictLod(
const framework::LoDTensor &t) { const framework::LoDTensor &t) {
framework::Variable *g_feed_value = program_.scope->Var("feed"); framework::Variable *g_feed_value = program_.scope->Var("feed");
framework::LoDTensor *feed_tensor = framework::LoDTensor *feed_tensor =
g_feed_value->GetMutable<framework::LoDTensor>(); g_feed_value->GetMutable<framework::LoDTensor>();
feed_tensor->Resize(t.dims()); feed_tensor->Resize(t.dims());
feed_tensor->ShareDataWith(t); feed_tensor->ShareDataWith(t);
feed_tensor->set_lod(t.lod()); feed_tensor->set_lod(t.lod());
std::shared_ptr<framework::BlockDesc> to_predict_block = std::shared_ptr<framework::BlockDesc> to_predict_block =
to_predict_program_->Block(0); to_predict_program_->Block(0);
auto &ops = ops_of_block_[*to_predict_block.get()]; auto &ops = ops_of_block_[*to_predict_block.get()];
...@@ -579,8 +584,8 @@ std::shared_ptr<framework::LoDTensor> Executor<Dtype, P>::PredictLod( ...@@ -579,8 +584,8 @@ std::shared_ptr<framework::LoDTensor> Executor<Dtype, P>::PredictLod(
std::vector<std::string> out_keys = (*last_op)->GetOutKeys(); std::vector<std::string> out_keys = (*last_op)->GetOutKeys();
PADDLE_MOBILE_ENFORCE(out_keys.size() > 0, "the last op contains no output"); PADDLE_MOBILE_ENFORCE(out_keys.size() > 0, "the last op contains no output");
framework::LoDTensor *output_tensor = framework::LoDTensor *output_tensor =
framework::GetVarValue<framework::LoDTensor>(out_keys[0], output_map, framework::GetVarValue<framework::LoDTensor>(out_keys[0], output_map,
*(program_.scope)); *(program_.scope));
#ifdef PADDLE_MOBILE_PROFILE #ifdef PADDLE_MOBILE_PROFILE
#ifdef PADDLE_EXECUTOR_MULTITHREAD #ifdef PADDLE_EXECUTOR_MULTITHREAD
// TODO(haipeng): expose profile info as an interface, user can get them to // TODO(haipeng): expose profile info as an interface, user can get them to
...@@ -630,22 +635,22 @@ std::shared_ptr<framework::LoDTensor> Executor<Dtype, P>::PredictLod( ...@@ -630,22 +635,22 @@ std::shared_ptr<framework::LoDTensor> Executor<Dtype, P>::PredictLod(
printf("====================[---------]======================\n"); printf("====================[---------]======================\n");
#endif #endif
return std::make_shared<framework::LoDTensor>( return std::make_shared<framework::LoDTensor>(
framework::LoDTensor(*output_tensor)); framework::LoDTensor(*output_tensor));
} }
template <typename Dtype, Precision P> template<typename Dtype, Precision P>
std::shared_ptr<framework::Tensor> Executor<Dtype, P>::Predict( std::shared_ptr<framework::Tensor> Executor<Dtype, P>::Predict(
const framework::Tensor &t, int block_id) { const framework::Tensor &t, int block_id) {
return Predict(t); return Predict(t);
} }
template <typename Dtype, Precision P> template<typename Dtype, Precision P>
std::vector<typename Executor<Dtype, P>::Ptype> Executor<Dtype, P>::Predict( std::vector<typename Executor<Dtype, P>::Ptype> Executor<Dtype, P>::Predict(
const std::vector<Ptype> &input, const std::vector<int64_t> &dims) { const std::vector<Ptype> &input, const std::vector<int64_t> &dims) {
framework::Tensor tensor(input, framework::make_ddim(dims)); framework::Tensor tensor(input, framework::make_ddim(dims));
std::shared_ptr<framework::Tensor> output_tensor = Predict(tensor, 0); std::shared_ptr<framework::Tensor> output_tensor = Predict(tensor, 0);
Executor<Dtype, P>::Ptype *output_ptr = Executor<Dtype, P>::Ptype *output_ptr =
output_tensor->data<typename Executor<Dtype, P>::Ptype>(); output_tensor->data<typename Executor<Dtype, P>::Ptype>();
std::vector<typename Executor<Dtype, P>::Ptype> result_vector; std::vector<typename Executor<Dtype, P>::Ptype> result_vector;
for (int j = 0; j < output_tensor->numel(); ++j) { for (int j = 0; j < output_tensor->numel(); ++j) {
result_vector.push_back(output_ptr[j]); result_vector.push_back(output_ptr[j]);
...@@ -725,9 +730,17 @@ void Executor<Dtype, P>::Predict_To(int end) { ...@@ -725,9 +730,17 @@ void Executor<Dtype, P>::Predict_To(int end) {
}; };
#endif #endif
template class Executor<CPU, Precision::FP32>; template
template class Executor<FPGA, Precision::FP32>; class Executor<CPU, Precision::FP32>;
template class Executor<GPU_CL, Precision::FP32>;
template class Executor<GPU_MALI, Precision::FP32>; template
class Executor<FPGA, Precision::FP32>;
template
class Executor<GPU_CL, Precision::FP32>;
template
class Executor<GPU_MALI, Precision::FP32>;
}
} // namespace paddle_mobile } // namespace paddle_mobile
...@@ -33,8 +33,9 @@ limitations under the License. */ ...@@ -33,8 +33,9 @@ limitations under the License. */
using std::string; using std::string;
namespace paddle_mobile { namespace paddle_mobile {
namespace framework {
template <typename Dtype = CPU, Precision P = Precision::FP32> template<typename Dtype = CPU, Precision P = Precision::FP32>
class Executor { class Executor {
public: public:
typedef typename PrecisionTrait<P>::ptype Ptype; typedef typename PrecisionTrait<P>::ptype Ptype;
...@@ -50,11 +51,13 @@ class Executor { ...@@ -50,11 +51,13 @@ class Executor {
* @b to predict * @b to predict
* */ * */
std::shared_ptr<framework::Tensor> Predict(const framework::Tensor &t); std::shared_ptr<framework::Tensor> Predict(const framework::Tensor &t);
/* /*
* @b to predict * @b to predict
* */ * */
std::shared_ptr<framework::LoDTensor> PredictLod( std::shared_ptr<framework::LoDTensor> PredictLod(
const framework::LoDTensor &t); const framework::LoDTensor &t);
/* /*
* @b to predict with vector and dim * @b to predict with vector and dim
* *
...@@ -65,18 +68,24 @@ class Executor { ...@@ -65,18 +68,24 @@ class Executor {
protected: protected:
Executor() = default; Executor() = default;
void InitMemory(); void InitMemory();
void LoadMemory(const framework::VarDesc var_desc, void LoadMemory(const framework::VarDesc var_desc,
framework::LoDTensor *tensor, char **data); framework::LoDTensor *tensor, char **data);
void InitCombineMemory(); void InitCombineMemory();
framework::Program<Dtype> program_; framework::Program<Dtype> program_;
int batch_size_ = 1; int batch_size_ = 1;
std::shared_ptr<framework::ProgramDesc> to_predict_program_; std::shared_ptr<framework::ProgramDesc> to_predict_program_;
std::shared_ptr<framework::Tensor> Predict(const framework::Tensor &t, std::shared_ptr<framework::Tensor> Predict(const framework::Tensor &t,
int block_id); int block_id);
std::map<framework::BlockDesc, std::map<framework::BlockDesc,
std::vector<std::shared_ptr<framework::OperatorBase<Dtype>>>> std::vector<std::shared_ptr<framework::OperatorBase<Dtype>>>>
ops_of_block_; ops_of_block_;
bool use_optimize_ = false; bool use_optimize_ = false;
bool loddable_ = false; bool loddable_ = false;
#ifdef PADDLE_EXECUTOR_MULTITHREAD #ifdef PADDLE_EXECUTOR_MULTITHREAD
...@@ -96,14 +105,15 @@ class Executor { ...@@ -96,14 +105,15 @@ class Executor {
#ifdef PADDLE_MOBILE_FPGA #ifdef PADDLE_MOBILE_FPGA
public: public:
void InjectVariable(const framework::Tensor &t, string var_name); void InjectVariable(const framework::Tensor &t, string var_name);
void FeedData(const framework::Tensor &t); void FeedData(const framework::Tensor &t);
std::shared_ptr<framework::Tensor> FetchResult(int id = -1); std::shared_ptr<framework::Tensor> FetchResult(int id = -1);
void Predict_From_To(int start = 0, int end = -1); void Predict_From_To(int start = 0, int end = -1);
void Predict_From(int start); void Predict_From(int start);
void Predict_To(int end); void Predict_To(int end);
#endif #endif
}; };
}
} // namespace paddle_mobile } // namespace paddle_mobile
...@@ -12,13 +12,13 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. ...@@ -12,13 +12,13 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and See the License for the specific language governing permissions and
limitations under the License. */ limitations under the License. */
#include "io/loader.h" #include "loader.h"
#include "framework/lod_tensor.h" #include "framework/lod_tensor.h"
#include "framework/program/program-optimize/program_optimize.h" #include "framework/program/program-optimize/program_optimize.h"
namespace paddle_mobile { namespace paddle_mobile {
using framework::Variable; namespace framework {
/** /**
* muteandresize tensor as originProgramDesc and scope in loadParams * muteandresize tensor as originProgramDesc and scope in loadParams
...@@ -27,22 +27,22 @@ using framework::Variable; ...@@ -27,22 +27,22 @@ using framework::Variable;
* @param scope * @param scope
*/ */
void InitMemoryFromProgram( void InitMemoryFromProgram(
std::shared_ptr<framework::ProgramDesc> &originProgramDesc, std::shared_ptr<ProgramDesc> &originProgramDesc,
std::shared_ptr<framework::Scope> &scope) { std::shared_ptr<Scope> &scope) {
for (const auto &block : originProgramDesc.get()->Blocks()) { for (const auto &block : originProgramDesc.get()->Blocks()) {
for (const auto &var_desc : block->Vars()) { for (const auto &var_desc : block->Vars()) {
auto var = scope.get()->Var(var_desc->Name()); auto var = scope.get()->Var(var_desc->Name());
if (var_desc->Type() == framework::VARTYPE_TYPE_LOD_TENSOR) { if (var_desc->Type() == VARTYPE_TYPE_LOD_TENSOR) {
if (var_desc->Persistable()) { if (var_desc->Persistable()) {
auto dim = var_desc->Tensor_desc().Dims(); auto dim = var_desc->Tensor_desc().Dims();
auto tensor = var->GetMutable<framework::LoDTensor>(); auto tensor = var->GetMutable<LoDTensor>();
tensor->Resize(framework::make_ddim(dim)); tensor->Resize(make_ddim(dim));
} else { } else {
auto dim = var_desc->Tensor_desc().Dims(); auto dim = var_desc->Tensor_desc().Dims();
PADDLE_MOBILE_ENFORCE(dim.size() > 0, "dim size is 0"); PADDLE_MOBILE_ENFORCE(dim.size() > 0, "dim size is 0");
dim[0] = 1; dim[0] = 1;
auto tensor = var->GetMutable<framework::LoDTensor>(); auto tensor = var->GetMutable<LoDTensor>();
tensor->Resize(framework::make_ddim(dim)); tensor->Resize(make_ddim(dim));
} }
} else { } else {
// TODO(codeWorm): some. // TODO(codeWorm): some.
...@@ -50,6 +50,7 @@ void InitMemoryFromProgram( ...@@ -50,6 +50,7 @@ void InitMemoryFromProgram(
} }
} }
} }
/** /**
* fusion and print someinfos * fusion and print someinfos
* @tparam Dtype * @tparam Dtype
...@@ -59,14 +60,14 @@ void InitMemoryFromProgram( ...@@ -59,14 +60,14 @@ void InitMemoryFromProgram(
* @param program * @param program
* @param originProgramDesc * @param originProgramDesc
*/ */
template <typename Dtype, Precision P> template<typename Dtype, Precision P>
void FusionAndPrintInfos( void FusionAndPrintInfos(
bool &optimize, bool &can_add_split, framework::Program<Dtype, P> &program, bool &optimize, bool &can_add_split, Program<Dtype, P> &program,
const std::shared_ptr<framework::ProgramDesc> &originProgramDesc) { const std::shared_ptr<ProgramDesc> &originProgramDesc) {
if (optimize) { if (optimize) {
framework::ProgramOptimize program_optimize; ProgramOptimize program_optimize;
program.optimizeProgram = program.optimizeProgram =
program_optimize.FusionOptimize(originProgramDesc, can_add_split); program_optimize.FusionOptimize(originProgramDesc, can_add_split);
} }
if (optimize) { if (optimize) {
program.optimizeProgram->Description("optimize: "); program.optimizeProgram->Description("optimize: ");
...@@ -74,6 +75,7 @@ void FusionAndPrintInfos( ...@@ -74,6 +75,7 @@ void FusionAndPrintInfos(
originProgramDesc->Description("program: "); originProgramDesc->Description("program: ");
} }
} }
static size_t ReadBuffer(const char *file_name, uint8_t **out) { static size_t ReadBuffer(const char *file_name, uint8_t **out) {
FILE *fp; FILE *fp;
fp = fopen(file_name, "rb"); fp = fopen(file_name, "rb");
...@@ -96,20 +98,20 @@ static size_t ReadBuffer(const char *file_name, uint8_t **out) { ...@@ -96,20 +98,20 @@ static size_t ReadBuffer(const char *file_name, uint8_t **out) {
return cur_len; return cur_len;
} }
template <typename Dtype, Precision P> template<typename Dtype, Precision P>
const framework::Program<Dtype, P> Loader<Dtype, P>::Load( const Program<Dtype, P> Loader<Dtype, P>::Load(
const std::string &dirname, bool optimize, bool quantification, const std::string &dirname, bool optimize, bool quantification,
bool can_add_split) { bool can_add_split) {
auto program = this->LoadProgram(dirname + "/__model__", optimize, auto program = this->LoadProgram(dirname + "/__model__", optimize,
quantification, can_add_split); quantification, can_add_split);
program.model_path = dirname; program.model_path = dirname;
return program; return program;
} }
template <typename Dtype, Precision P> template<typename Dtype, Precision P>
const framework::Program<Dtype, P> Loader<Dtype, P>::Load( const Program<Dtype, P> Loader<Dtype, P>::Load(
const std::string &model_path, const std::string &para_path, bool optimize, const std::string &model_path, const std::string &para_path, bool optimize,
bool quantification) { bool quantification) {
auto program = this->LoadProgram(model_path, optimize, quantification); auto program = this->LoadProgram(model_path, optimize, quantification);
program.para_path = para_path; program.para_path = para_path;
...@@ -118,10 +120,10 @@ const framework::Program<Dtype, P> Loader<Dtype, P>::Load( ...@@ -118,10 +120,10 @@ const framework::Program<Dtype, P> Loader<Dtype, P>::Load(
return program; return program;
} }
template <typename Dtype, Precision P> template<typename Dtype, Precision P>
const framework::Program<Dtype, P> Loader<Dtype, P>::LoadProgram( const Program<Dtype, P> Loader<Dtype, P>::LoadProgram(
const std::string &model_path, bool optimize, bool quantification, const std::string &model_path, bool optimize, bool quantification,
bool can_add_split) { bool can_add_split) {
std::string model_filename = model_path; std::string model_filename = model_path;
PaddleMobile__Framework__Proto__ProgramDesc *c_program; PaddleMobile__Framework__Proto__ProgramDesc *c_program;
uint8_t *buf = NULL; uint8_t *buf = NULL;
...@@ -130,20 +132,20 @@ const framework::Program<Dtype, P> Loader<Dtype, P>::LoadProgram( ...@@ -130,20 +132,20 @@ const framework::Program<Dtype, P> Loader<Dtype, P>::LoadProgram(
PADDLE_MOBILE_ENFORCE(buf != NULL, "read from __model__ is null"); PADDLE_MOBILE_ENFORCE(buf != NULL, "read from __model__ is null");
c_program = paddle_mobile__framework__proto__program_desc__unpack( c_program = paddle_mobile__framework__proto__program_desc__unpack(
NULL, read_size, buf); NULL, read_size, buf);
// //
PADDLE_MOBILE_ENFORCE(c_program != NULL, "program is null"); PADDLE_MOBILE_ENFORCE(c_program != NULL, "program is null");
// //
DLOG << "n_ops: " << (*c_program->blocks)->n_ops; DLOG << "n_ops: " << (*c_program->blocks)->n_ops;
// //
auto originProgramDesc = std::make_shared<framework::ProgramDesc>(c_program); auto originProgramDesc = std::make_shared<ProgramDesc>(c_program);
framework::Program<Dtype, P> program; Program<Dtype, P> program;
program.originProgram = originProgramDesc; program.originProgram = originProgramDesc;
program.quantification = quantification; program.quantification = quantification;
program.combined_params_len = 0; program.combined_params_len = 0;
program.combined_params_buf = nullptr; program.combined_params_buf = nullptr;
auto scope = std::make_shared<framework::Scope>(); auto scope = std::make_shared<Scope>();
program.scope = scope; program.scope = scope;
// use originProgramDesc and scope to init tensors // use originProgramDesc and scope to init tensors
...@@ -155,33 +157,33 @@ const framework::Program<Dtype, P> Loader<Dtype, P>::LoadProgram( ...@@ -155,33 +157,33 @@ const framework::Program<Dtype, P> Loader<Dtype, P>::LoadProgram(
return program; return program;
} }
template <typename Dtype, Precision P> template<typename Dtype, Precision P>
const framework::Program<Dtype, P> Loader<Dtype, P>::LoadCombinedMemory( const Program<Dtype, P> Loader<Dtype, P>::LoadCombinedMemory(
size_t read_size, const uint8_t *buf, size_t combined_params_len, size_t read_size, const uint8_t *buf, size_t combined_params_len,
const uint8_t *combined_params_buf, bool optimize, bool quantification) { const uint8_t *combined_params_buf, bool optimize, bool quantification) {
bool can_add_split = false; bool can_add_split = false;
PaddleMobile__Framework__Proto__ProgramDesc *c_program; PaddleMobile__Framework__Proto__ProgramDesc *c_program;
PADDLE_MOBILE_ENFORCE(buf != nullptr, "read from __model__ is null"); PADDLE_MOBILE_ENFORCE(buf != nullptr, "read from __model__ is null");
c_program = paddle_mobile__framework__proto__program_desc__unpack( c_program = paddle_mobile__framework__proto__program_desc__unpack(
nullptr, read_size, buf); nullptr, read_size, buf);
// //
PADDLE_MOBILE_ENFORCE(c_program != nullptr, "program is null"); PADDLE_MOBILE_ENFORCE(c_program != nullptr, "program is null");
// //
DLOG << "n_ops: " << (*c_program->blocks)->n_ops; DLOG << "n_ops: " << (*c_program->blocks)->n_ops;
// //
auto originProgramDesc = std::make_shared<framework::ProgramDesc>(c_program); auto originProgramDesc = std::make_shared<ProgramDesc>(c_program);
framework::Program<Dtype, P> program; Program<Dtype, P> program;
program.combined = true; program.combined = true;
program.originProgram = originProgramDesc; program.originProgram = originProgramDesc;
program.quantification = quantification; program.quantification = quantification;
program.combined_params_len = combined_params_len; program.combined_params_len = combined_params_len;
program.combined_params_buf = combined_params_buf; program.combined_params_buf = combined_params_buf;
auto scope = std::make_shared<framework::Scope>(); auto scope = std::make_shared<Scope>();
program.scope = scope; program.scope = scope;
InitMemoryFromProgram(originProgramDesc, scope); InitMemoryFromProgram(originProgramDesc, scope);
FusionAndPrintInfos(optimize, can_add_split, program, originProgramDesc); FusionAndPrintInfos(optimize, can_add_split, program, originProgramDesc);
...@@ -190,9 +192,17 @@ const framework::Program<Dtype, P> Loader<Dtype, P>::LoadCombinedMemory( ...@@ -190,9 +192,17 @@ const framework::Program<Dtype, P> Loader<Dtype, P>::LoadCombinedMemory(
return program; return program;
} }
template class Loader<CPU, Precision::FP32>; template
template class Loader<FPGA, Precision::FP32>; class Loader<CPU, Precision::FP32>;
template class Loader<GPU_MALI, Precision::FP32>;
template class Loader<GPU_CL, Precision::FP32>; template
class Loader<FPGA, Precision::FP32>;
template
class Loader<GPU_MALI, Precision::FP32>;
template
class Loader<GPU_CL, Precision::FP32>;
}
} // namespace paddle_mobile } // namespace paddle_mobile
...@@ -20,6 +20,7 @@ limitations under the License. */ ...@@ -20,6 +20,7 @@ limitations under the License. */
#include "framework/program/program.h" #include "framework/program/program.h"
namespace paddle_mobile { namespace paddle_mobile {
namespace framework{
template <typename Dtype = CPU, Precision P = Precision::FP32> template <typename Dtype = CPU, Precision P = Precision::FP32>
class Loader { class Loader {
...@@ -28,7 +29,7 @@ class Loader { ...@@ -28,7 +29,7 @@ class Loader {
* @b load separate format fluid model * @b load separate format fluid model
* @b 加载分开形式的 fluid 模型 * @b 加载分开形式的 fluid 模型
* */ * */
const framework::Program<Dtype, P> Load(const std::string &dirname, const Program<Dtype, P> Load(const std::string &dirname,
bool optimize = false, bool optimize = false,
bool quantification = false, bool quantification = false,
bool can_add_split = false); bool can_add_split = false);
...@@ -37,21 +38,22 @@ class Loader { ...@@ -37,21 +38,22 @@ class Loader {
* @b load combine format fluid mode * @b load combine format fluid mode
* @b 加载结合在一起格式的模型 * @b 加载结合在一起格式的模型
* */ * */
const framework::Program<Dtype, P> Load(const std::string &model_path, const Program<Dtype, P> Load(const std::string &model_path,
const std::string &para_path, const std::string &para_path,
bool optimize = false, bool optimize = false,
bool quantification = false); bool quantification = false);
const framework::Program<Dtype, P> LoadCombinedMemory( const Program<Dtype, P> LoadCombinedMemory(
size_t model_len, const uint8_t *model_buf, size_t combined_params_len, size_t model_len, const uint8_t *model_buf, size_t combined_params_len,
const uint8_t *combined_params_buf, bool optimize = false, const uint8_t *combined_params_buf, bool optimize = false,
bool quantification = false); bool quantification = false);
private: private:
const framework::Program<Dtype, P> LoadProgram(const std::string &model_path, const Program<Dtype, P> LoadProgram(const std::string &model_path,
bool optimize = false, bool optimize = false,
bool quantification = false, bool quantification = false,
bool can_add_split = false); bool can_add_split = false);
}; };
}
} // namespace paddle_mobile } // namespace paddle_mobile
...@@ -29,13 +29,13 @@ bool PaddleMobile<Dtype, P>::Load(const std::string &dirname, bool optimize, ...@@ -29,13 +29,13 @@ bool PaddleMobile<Dtype, P>::Load(const std::string &dirname, bool optimize,
bool quantification, int batch_size, bool quantification, int batch_size,
bool loddable) { bool loddable) {
if (loader_.get() == nullptr) { if (loader_.get() == nullptr) {
loader_ = std::make_shared<Loader<Dtype, P>>(); loader_ = std::make_shared<framework::Loader<Dtype, P>>();
} else { } else {
LOG(kLOG_INFO) << "loader inited"; LOG(kLOG_INFO) << "loader inited";
} }
if (executor_.get() == nullptr) { if (executor_.get() == nullptr) {
executor_ = std::make_shared<Executor<Dtype, P>>( executor_ = std::make_shared<framework::Executor<Dtype, P>>(
loader_->Load(dirname, optimize, quantification), batch_size, optimize, loader_->Load(dirname, optimize, quantification), batch_size, optimize,
loddable); loddable);
} else { } else {
...@@ -51,13 +51,13 @@ bool PaddleMobile<Dtype, P>::Load(const std::string &model_path, ...@@ -51,13 +51,13 @@ bool PaddleMobile<Dtype, P>::Load(const std::string &model_path,
bool quantification, int batch_size, bool quantification, int batch_size,
bool loddable) { bool loddable) {
if (loader_.get() == nullptr) { if (loader_.get() == nullptr) {
loader_ = std::make_shared<Loader<Dtype, P>>(); loader_ = std::make_shared<framework::Loader<Dtype, P>>();
} else { } else {
LOG(kLOG_INFO) << "loader inited"; LOG(kLOG_INFO) << "loader inited";
} }
if (executor_.get() == nullptr) { if (executor_.get() == nullptr) {
executor_ = std::make_shared<Executor<Dtype, P>>( executor_ = std::make_shared<framework::Executor<Dtype, P>>(
loader_->Load(model_path, para_path, optimize, quantification), loader_->Load(model_path, para_path, optimize, quantification),
batch_size, optimize, loddable); batch_size, optimize, loddable);
} else { } else {
...@@ -76,13 +76,13 @@ bool PaddleMobile<Dtype, P>::LoadCombinedMemory( ...@@ -76,13 +76,13 @@ bool PaddleMobile<Dtype, P>::LoadCombinedMemory(
bool quantification = false; bool quantification = false;
if (loader_.get() == nullptr) { if (loader_.get() == nullptr) {
loader_ = std::make_shared<Loader<Dtype, P>>(); loader_ = std::make_shared<framework::Loader<Dtype, P>>();
} else { } else {
LOG(kLOG_INFO) << "loader inited"; LOG(kLOG_INFO) << "loader inited";
} }
if (executor_.get() == nullptr) { if (executor_.get() == nullptr) {
executor_ = std::make_shared<Executor<Dtype, P>>( executor_ = std::make_shared<framework::Executor<Dtype, P>>(
loader_->LoadCombinedMemory(model_len, model_buf, combined_params_len, loader_->LoadCombinedMemory(model_len, model_buf, combined_params_len,
combined_params_buf, optimise, combined_params_buf, optimise,
quantification), quantification),
......
...@@ -23,8 +23,8 @@ limitations under the License. */ ...@@ -23,8 +23,8 @@ limitations under the License. */
#include "common/types.h" #include "common/types.h"
#include "framework/tensor.h" #include "framework/tensor.h"
#include "io/executor.h" #include "framework/executor.h"
#include "io/loader.h" #include "framework/loader.h"
namespace paddle_mobile { namespace paddle_mobile {
...@@ -90,8 +90,8 @@ class PaddleMobile { ...@@ -90,8 +90,8 @@ class PaddleMobile {
~PaddleMobile(); ~PaddleMobile();
private: private:
std::shared_ptr<Loader<Dtype, P>> loader_; std::shared_ptr<framework::Loader<Dtype, P>> loader_;
std::shared_ptr<Executor<Dtype, P>> executor_; std::shared_ptr<framework::Executor<Dtype, P>> executor_;
#ifdef PADDLE_MOBILE_FPGA #ifdef PADDLE_MOBILE_FPGA
public: public:
......
...@@ -19,7 +19,7 @@ limitations under the License. */ ...@@ -19,7 +19,7 @@ limitations under the License. */
#include "common/log.h" #include "common/log.h"
#include "framework/op_registry.h" #include "framework/op_registry.h"
#include "io/executor.h" #include "framework/executor.h"
#include "operators/conv_op.h" #include "operators/conv_op.h"
#include "operators/elementwise_add_op.h" #include "operators/elementwise_add_op.h"
#include "operators/pool_op.h" #include "operators/pool_op.h"
...@@ -29,7 +29,7 @@ limitations under the License. */ ...@@ -29,7 +29,7 @@ limitations under the License. */
#include "operators/softmax_op.h" #include "operators/softmax_op.h"
#include "operators/transpose_op.h" #include "operators/transpose_op.h"
using paddle_mobile::Executor; using paddle_mobile::framework::Executor;
using paddle_mobile::framework::BlockDesc; using paddle_mobile::framework::BlockDesc;
using paddle_mobile::framework::DDim; using paddle_mobile::framework::DDim;
using paddle_mobile::framework::LoDTensor; using paddle_mobile::framework::LoDTensor;
......
...@@ -16,7 +16,7 @@ limitations under the License. */ ...@@ -16,7 +16,7 @@ limitations under the License. */
#include "operators/concat_op.h" #include "operators/concat_op.h"
int main() { int main() {
paddle_mobile::Loader<paddle_mobile::FPGA> loader; paddle_mobile::framework::Loader<paddle_mobile::FPGA> loader;
auto program = loader.Load(g_googlenet); auto program = loader.Load(g_googlenet);
PADDLE_MOBILE_ENFORCE(program.originProgram != nullptr, PADDLE_MOBILE_ENFORCE(program.originProgram != nullptr,
"program file read fail"); "program file read fail");
......
...@@ -15,10 +15,10 @@ limitations under the License. */ ...@@ -15,10 +15,10 @@ limitations under the License. */
#include <string> #include <string>
#include "../test_helper.h" #include "../test_helper.h"
#include "io/loader.h" #include "framework/loader.h"
int main() { int main() {
paddle_mobile::Loader<paddle_mobile::CPU> loader; paddle_mobile::framework::Loader<paddle_mobile::CPU> loader;
// ../../../test/models/googlenet // ../../../test/models/googlenet
// ../../../test/models/mobilenet // ../../../test/models/mobilenet
// auto program = loader.Load(g_googlenet, true); // auto program = loader.Load(g_googlenet, true);
......
...@@ -15,10 +15,10 @@ limitations under the License. */ ...@@ -15,10 +15,10 @@ limitations under the License. */
#include "../test_helper.h" #include "../test_helper.h"
#include "framework/program/program-optimize/node.h" #include "framework/program/program-optimize/node.h"
#include "framework/program/program-optimize/program_optimize.h" #include "framework/program/program-optimize/program_optimize.h"
#include "io/loader.h" #include "framework/loader.h"
int main() { int main() {
paddle_mobile::Loader<paddle_mobile::CPU> loader; paddle_mobile::framework::Loader<paddle_mobile::CPU> loader;
// "../../../test/models/googlenet" // "../../../test/models/googlenet"
auto program = loader.Load(g_mobilenet_ssd, true); auto program = loader.Load(g_mobilenet_ssd, true);
paddle_mobile::framework::ProgramOptimize optimize; paddle_mobile::framework::ProgramOptimize optimize;
......
...@@ -127,7 +127,7 @@ template class TestBatchNormOp<CPU>; ...@@ -127,7 +127,7 @@ template class TestBatchNormOp<CPU>;
int main() { int main() {
DLOG << "----------**********----------"; DLOG << "----------**********----------";
DLOG << "begin to run BatchNormOp Test"; DLOG << "begin to run BatchNormOp Test";
paddle_mobile::Loader<paddle_mobile::CPU> loader; paddle_mobile::framework::Loader<paddle_mobile::CPU> loader;
auto program = loader.Load(std::string(g_mobilenet_ssd)); auto program = loader.Load(std::string(g_mobilenet_ssd));
/// input x (4,10,2,2) /// input x (4,10,2,2)
......
...@@ -115,7 +115,7 @@ template class TestBoxCoderOp<CPU>; ...@@ -115,7 +115,7 @@ template class TestBoxCoderOp<CPU>;
int main() { int main() {
DLOG << "----------**********----------"; DLOG << "----------**********----------";
DLOG << "begin to run BoxCoderOp Test"; DLOG << "begin to run BoxCoderOp Test";
paddle_mobile::Loader<paddle_mobile::CPU> loader; paddle_mobile::framework::Loader<paddle_mobile::CPU> loader;
auto program = loader.Load(std::string(g_mobilenet_ssd)); auto program = loader.Load(std::string(g_mobilenet_ssd));
paddle_mobile::framework::Tensor priorbox; paddle_mobile::framework::Tensor priorbox;
......
...@@ -16,7 +16,7 @@ limitations under the License. */ ...@@ -16,7 +16,7 @@ limitations under the License. */
#include "operators/concat_op.h" #include "operators/concat_op.h"
int main() { int main() {
paddle_mobile::Loader<paddle_mobile::CPU> loader; paddle_mobile::framework::Loader<paddle_mobile::CPU> loader;
auto program = loader.Load(g_googlenet); auto program = loader.Load(g_googlenet);
PADDLE_MOBILE_ENFORCE(program.originProgram != nullptr, PADDLE_MOBILE_ENFORCE(program.originProgram != nullptr,
"program file read fail"); "program file read fail");
......
...@@ -16,7 +16,7 @@ limitations under the License. */ ...@@ -16,7 +16,7 @@ limitations under the License. */
#include "operators/fusion_conv_add_relu_op.h" #include "operators/fusion_conv_add_relu_op.h"
int main() { int main() {
paddle_mobile::Loader<paddle_mobile::CPU> loader; paddle_mobile::framework::Loader<paddle_mobile::CPU> loader;
// ../models/image_classification_resnet.inference.model // ../models/image_classification_resnet.inference.model
auto program = loader.Load(g_googlenet, true); auto program = loader.Load(g_googlenet, true);
......
...@@ -16,7 +16,7 @@ limitations under the License. */ ...@@ -16,7 +16,7 @@ limitations under the License. */
#include "operators/conv_op.h" #include "operators/conv_op.h"
int main() { int main() {
paddle_mobile::Loader<paddle_mobile::GPU_MALI> loader; paddle_mobile::framework::Loader<paddle_mobile::GPU_MALI> loader;
// ../models/image_classification_resnet.inference.model // ../models/image_classification_resnet.inference.model
auto program = loader.Load(g_googlenet); auto program = loader.Load(g_googlenet);
......
...@@ -16,7 +16,7 @@ limitations under the License. */ ...@@ -16,7 +16,7 @@ limitations under the License. */
#include "operators/depthwise_conv_op.h" #include "operators/depthwise_conv_op.h"
int main() { int main() {
paddle_mobile::Loader<paddle_mobile::CPU> loader; paddle_mobile::framework::Loader<paddle_mobile::CPU> loader;
// ../models/image_classification_resnet.inference.model // ../models/image_classification_resnet.inference.model
auto program = loader.Load(g_mobilenet_ssd); auto program = loader.Load(g_mobilenet_ssd);
......
...@@ -15,7 +15,7 @@ limitations under the License. */ ...@@ -15,7 +15,7 @@ limitations under the License. */
#include "../test_include.h" #include "../test_include.h"
int main() { int main() {
paddle_mobile::Loader<paddle_mobile::CPU> loader; paddle_mobile::framework::Loader<paddle_mobile::CPU> loader;
auto program = loader.Load(g_resnet); auto program = loader.Load(g_resnet);
PADDLE_MOBILE_ENFORCE(program.originProgram != nullptr, PADDLE_MOBILE_ENFORCE(program.originProgram != nullptr,
"program file read fail"); "program file read fail");
......
...@@ -17,7 +17,7 @@ limitations under the License. */ ...@@ -17,7 +17,7 @@ limitations under the License. */
#include "operators/fusion_conv_add_bn_relu_op.h" #include "operators/fusion_conv_add_bn_relu_op.h"
int main() { int main() {
paddle_mobile::Loader<paddle_mobile::CPU> loader; paddle_mobile::framework::Loader<paddle_mobile::CPU> loader;
// ../models/image_classification_resnet.inference.model // ../models/image_classification_resnet.inference.model
auto program = loader.Load(g_mobilenet, true); auto program = loader.Load(g_mobilenet, true);
......
...@@ -114,7 +114,7 @@ template class TestFcOp<CPU>; ...@@ -114,7 +114,7 @@ template class TestFcOp<CPU>;
int main() { int main() {
DLOG << "----------**********----------"; DLOG << "----------**********----------";
DLOG << "begin to run Fc Test"; DLOG << "begin to run Fc Test";
paddle_mobile::Loader<paddle_mobile::CPU> loader; paddle_mobile::framework::Loader<paddle_mobile::CPU> loader;
// "../../../test/models/googlenet" // "../../../test/models/googlenet"
auto program = loader.Load(g_googlenet); auto program = loader.Load(g_googlenet);
paddle_mobile::framework::ProgramOptimize optimize; paddle_mobile::framework::ProgramOptimize optimize;
......
...@@ -16,7 +16,7 @@ limitations under the License. */ ...@@ -16,7 +16,7 @@ limitations under the License. */
#include "operators/gru_op.h" #include "operators/gru_op.h"
int main() { int main() {
paddle_mobile::Loader<paddle_mobile::CPU> loader; paddle_mobile::framework::Loader<paddle_mobile::CPU> loader;
auto program = loader.Load(g_nlp); auto program = loader.Load(g_nlp);
PADDLE_MOBILE_ENFORCE(program.originProgram != nullptr, PADDLE_MOBILE_ENFORCE(program.originProgram != nullptr,
"program file read fail"); "program file read fail");
......
...@@ -17,7 +17,7 @@ limitations under the License. */ ...@@ -17,7 +17,7 @@ limitations under the License. */
#include "operators/im2sequence_op.h" #include "operators/im2sequence_op.h"
int main() { int main() {
paddle_mobile::Loader<paddle_mobile::CPU> loader; paddle_mobile::framework::Loader<paddle_mobile::CPU> loader;
auto program = loader.Load(g_ocr_recg); auto program = loader.Load(g_ocr_recg);
PADDLE_MOBILE_ENFORCE(program.originProgram != nullptr, PADDLE_MOBILE_ENFORCE(program.originProgram != nullptr,
"program file read fail"); "program file read fail");
......
...@@ -16,7 +16,7 @@ limitations under the License. */ ...@@ -16,7 +16,7 @@ limitations under the License. */
#include "operators/lrn_op.h" #include "operators/lrn_op.h"
int main() { int main() {
paddle_mobile::Loader<paddle_mobile::CPU> loader; paddle_mobile::framework::Loader<paddle_mobile::CPU> loader;
auto program = loader.Load(g_googlenet); auto program = loader.Load(g_googlenet);
PADDLE_MOBILE_ENFORCE(program.originProgram != nullptr, PADDLE_MOBILE_ENFORCE(program.originProgram != nullptr,
"program file read fail"); "program file read fail");
......
...@@ -16,7 +16,7 @@ limitations under the License. */ ...@@ -16,7 +16,7 @@ limitations under the License. */
#include "operators/mul_op.h" #include "operators/mul_op.h"
int main() { int main() {
paddle_mobile::Loader<paddle_mobile::CPU> loader; paddle_mobile::framework::Loader<paddle_mobile::CPU> loader;
auto program = loader.Load(g_resnet); auto program = loader.Load(g_resnet);
PADDLE_MOBILE_ENFORCE(program.originProgram != nullptr, PADDLE_MOBILE_ENFORCE(program.originProgram != nullptr,
"program file read fail"); "program file read fail");
......
...@@ -126,7 +126,7 @@ template class TestMultiClassNMSOp<CPU>; ...@@ -126,7 +126,7 @@ template class TestMultiClassNMSOp<CPU>;
int main() { int main() {
DLOG << "----------**********----------"; DLOG << "----------**********----------";
DLOG << "begin to run MulticlassNMS Test"; DLOG << "begin to run MulticlassNMS Test";
paddle_mobile::Loader<paddle_mobile::CPU> loader; paddle_mobile::framework::Loader<paddle_mobile::CPU> loader;
auto program = loader.Load(std::string("../../test/models/mobilenet+ssd")); auto program = loader.Load(std::string("../../test/models/mobilenet+ssd"));
/// input x (1,3,300,300) /// input x (1,3,300,300)
......
...@@ -16,7 +16,7 @@ limitations under the License. */ ...@@ -16,7 +16,7 @@ limitations under the License. */
#include "operators/pool_op.h" #include "operators/pool_op.h"
int main() { int main() {
paddle_mobile::Loader<paddle_mobile::CPU> loader; paddle_mobile::framework::Loader<paddle_mobile::CPU> loader;
auto program = loader.Load(std::string(g_googlenet)); auto program = loader.Load(std::string(g_googlenet));
if (program.originProgram == nullptr) { if (program.originProgram == nullptr) {
DLOG << "program read file"; DLOG << "program read file";
......
...@@ -17,7 +17,7 @@ limitations under the License. */ ...@@ -17,7 +17,7 @@ limitations under the License. */
#include "operators/prelu_op.h" #include "operators/prelu_op.h"
int main() { int main() {
paddle_mobile::Loader<paddle_mobile::CPU> loader; paddle_mobile::framework::Loader<paddle_mobile::CPU> loader;
auto program = loader.Load(g_resnet); auto program = loader.Load(g_resnet);
PADDLE_MOBILE_ENFORCE(program.originProgram != nullptr, PADDLE_MOBILE_ENFORCE(program.originProgram != nullptr,
"program file read fail"); "program file read fail");
......
...@@ -126,7 +126,7 @@ template class TestPriorBoxOp<CPU>; ...@@ -126,7 +126,7 @@ template class TestPriorBoxOp<CPU>;
int main() { int main() {
DLOG << "----------**********----------"; DLOG << "----------**********----------";
DLOG << "begin to run PriorBoxOp Test"; DLOG << "begin to run PriorBoxOp Test";
paddle_mobile::Loader<paddle_mobile::CPU> loader; paddle_mobile::framework::Loader<paddle_mobile::CPU> loader;
auto program = loader.Load(std::string(g_mobilenet_ssd)); auto program = loader.Load(std::string(g_mobilenet_ssd));
/// input x (1,3,300,300) /// input x (1,3,300,300)
......
...@@ -16,7 +16,7 @@ limitations under the License. */ ...@@ -16,7 +16,7 @@ limitations under the License. */
#include "operators/relu_op.h" #include "operators/relu_op.h"
int main() { int main() {
paddle_mobile::Loader<paddle_mobile::CPU> loader; paddle_mobile::framework::Loader<paddle_mobile::CPU> loader;
auto program = loader.Load(g_resnet); auto program = loader.Load(g_resnet);
PADDLE_MOBILE_ENFORCE(program.originProgram != nullptr, PADDLE_MOBILE_ENFORCE(program.originProgram != nullptr,
"program file read fail"); "program file read fail");
......
...@@ -16,7 +16,7 @@ limitations under the License. */ ...@@ -16,7 +16,7 @@ limitations under the License. */
#include "operators/reshape_op.h" #include "operators/reshape_op.h"
int main() { int main() {
paddle_mobile::Loader<paddle_mobile::CPU> loader; paddle_mobile::framework::Loader<paddle_mobile::CPU> loader;
auto program = loader.Load(std::string(g_mobilenet_ssd)); auto program = loader.Load(std::string(g_mobilenet_ssd));
if (program.originProgram == nullptr) { if (program.originProgram == nullptr) {
DLOG << "program read file"; DLOG << "program read file";
......
...@@ -16,7 +16,7 @@ limitations under the License. */ ...@@ -16,7 +16,7 @@ limitations under the License. */
#include "operators/resize_op.h" #include "operators/resize_op.h"
int main() { int main() {
paddle_mobile::Loader<paddle_mobile::CPU> loader; paddle_mobile::framework::Loader<paddle_mobile::CPU> loader;
auto program = loader.Load(std::string(g_mobilenet_ssd)); auto program = loader.Load(std::string(g_mobilenet_ssd));
if (program.originProgram == nullptr) { if (program.originProgram == nullptr) {
DLOG << "program read file"; DLOG << "program read file";
......
...@@ -15,7 +15,7 @@ limitations under the License. */ ...@@ -15,7 +15,7 @@ limitations under the License. */
#include "../../src/operators/kernel/sigmoid_kernel.h" #include "../../src/operators/kernel/sigmoid_kernel.h"
#include "../../src/operators/kernel/central-arm-func/sigmoid_arm_func.h" #include "../../src/operators/kernel/central-arm-func/sigmoid_arm_func.h"
#include "../test_helper.h" #include "../test_helper.h"
#include "io/executor.h" #include "framework/executor.h"
int main() { int main() {
paddle_mobile::framework::Tensor input; paddle_mobile::framework::Tensor input;
......
...@@ -17,7 +17,7 @@ limitations under the License. */ ...@@ -17,7 +17,7 @@ limitations under the License. */
#include "operators/softmax_op.h" #include "operators/softmax_op.h"
int main() { int main() {
paddle_mobile::Loader<paddle_mobile::CPU> loader; paddle_mobile::framework::Loader<paddle_mobile::CPU> loader;
auto program = loader.Load(std::string(g_mobilenet)); auto program = loader.Load(std::string(g_mobilenet));
if (program.originProgram == nullptr) { if (program.originProgram == nullptr) {
DLOG << "program read file"; DLOG << "program read file";
......
...@@ -16,7 +16,7 @@ limitations under the License. */ ...@@ -16,7 +16,7 @@ limitations under the License. */
#include "../test_include.h" #include "../test_include.h"
#include "operators/transpose_op.h" #include "operators/transpose_op.h"
int main() { int main() {
paddle_mobile::Loader<paddle_mobile::CPU> loader; paddle_mobile::framework::Loader<paddle_mobile::CPU> loader;
auto program = loader.Load(std::string(g_mobilenet_ssd)); auto program = loader.Load(std::string(g_mobilenet_ssd));
if (program.originProgram == nullptr) { if (program.originProgram == nullptr) {
DLOG << "program read file"; DLOG << "program read file";
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册