提交 2053d26a 编写于 作者: L liuruilong

move loader and executor to framework

上级 049340dd
...@@ -12,7 +12,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. ...@@ -12,7 +12,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and See the License for the specific language governing permissions and
limitations under the License. */ limitations under the License. */
#include "io/executor.h" #include "executor.h"
#include <operators/math/gemm.h> #include <operators/math/gemm.h>
#include <algorithm> #include <algorithm>
#include <vector> #include <vector>
...@@ -38,6 +38,8 @@ limitations under the License. */ ...@@ -38,6 +38,8 @@ limitations under the License. */
#endif #endif
namespace paddle_mobile { namespace paddle_mobile {
namespace framework {
using framework::Variable; using framework::Variable;
char *Get_binary_data(std::string filename) { char *Get_binary_data(std::string filename) {
...@@ -57,7 +59,8 @@ char *Get_binary_data(std::string filename) { ...@@ -57,7 +59,8 @@ char *Get_binary_data(std::string filename) {
} }
#pragma mark - executor #pragma mark - executor
template <typename Dtype, Precision P>
template<typename Dtype, Precision P>
Executor<Dtype, P>::Executor(const framework::Program<Dtype> p, int batch_size, Executor<Dtype, P>::Executor(const framework::Program<Dtype> p, int batch_size,
bool use_optimize, bool loddable) bool use_optimize, bool loddable)
: program_(p), : program_(p),
...@@ -115,7 +118,7 @@ Executor<Dtype, P>::Executor(const framework::Program<Dtype> p, int batch_size, ...@@ -115,7 +118,7 @@ Executor<Dtype, P>::Executor(const framework::Program<Dtype> p, int batch_size,
} }
} }
template <typename Dtype, Precision P> template<typename Dtype, Precision P>
void Executor<Dtype, P>::LoadMemory(const framework::VarDesc var_desc, void Executor<Dtype, P>::LoadMemory(const framework::VarDesc var_desc,
framework::LoDTensor *tensor, char **data) { framework::LoDTensor *tensor, char **data) {
// 1. version // 1. version
...@@ -223,7 +226,7 @@ void Executor<Dtype, P>::LoadMemory(const framework::VarDesc var_desc, ...@@ -223,7 +226,7 @@ void Executor<Dtype, P>::LoadMemory(const framework::VarDesc var_desc,
} }
} }
template <typename Dtype, Precision P> template<typename Dtype, Precision P>
void Executor<Dtype, P>::InitMemory() { void Executor<Dtype, P>::InitMemory() {
for (const auto &block : to_predict_program_->Blocks()) { for (const auto &block : to_predict_program_->Blocks()) {
for (const auto &var_desc : block->Vars()) { for (const auto &var_desc : block->Vars()) {
...@@ -257,12 +260,12 @@ void Executor<Dtype, P>::InitMemory() { ...@@ -257,12 +260,12 @@ void Executor<Dtype, P>::InitMemory() {
} }
} }
template <typename Dtype, Precision P> template<typename Dtype, Precision P>
void Executor<Dtype, P>::InitCombineMemory() { void Executor<Dtype, P>::InitCombineMemory() {
char *origin_data; char *origin_data;
if (program_.combined_params_buf && program_.combined_params_len) { if (program_.combined_params_buf && program_.combined_params_len) {
LOG(kLOG_INFO) << "use outter memory"; LOG(kLOG_INFO) << "use outter memory";
origin_data = (char *)program_.combined_params_buf; origin_data = (char *) program_.combined_params_buf;
} else { } else {
LOG(kLOG_INFO) << " begin init combine memory"; LOG(kLOG_INFO) << " begin init combine memory";
origin_data = Get_binary_data(program_.para_path); origin_data = Get_binary_data(program_.para_path);
...@@ -297,7 +300,7 @@ void Executor<Dtype, P>::InitCombineMemory() { ...@@ -297,7 +300,7 @@ void Executor<Dtype, P>::InitCombineMemory() {
LOG(kLOG_INFO) << " end init combine memory "; LOG(kLOG_INFO) << " end init combine memory ";
} }
template <typename Dtype, Precision P> template<typename Dtype, Precision P>
bool Executor<Dtype, P>::varInputMemory( bool Executor<Dtype, P>::varInputMemory(
const std::shared_ptr<framework::VarDesc> &var_desc, Variable *var, const std::shared_ptr<framework::VarDesc> &var_desc, Variable *var,
framework::LoDTensor *tensor) const { framework::LoDTensor *tensor) const {
...@@ -335,13 +338,15 @@ bool Executor<Dtype, P>::varInputMemory( ...@@ -335,13 +338,15 @@ bool Executor<Dtype, P>::varInputMemory(
break; break;
} }
default: { break; } default: {
break;
}
} }
return is_mute_match; return is_mute_match;
} }
template <typename Dtype, Precision P> template<typename Dtype, Precision P>
std::shared_ptr<framework::Tensor> Executor<Dtype, P>::Predict( std::shared_ptr<framework::Tensor> Executor<Dtype, P>::Predict(
const framework::Tensor &t) { const framework::Tensor &t) {
framework::Variable *g_feed_value = program_.scope->Var("feed"); framework::Variable *g_feed_value = program_.scope->Var("feed");
...@@ -483,7 +488,7 @@ std::shared_ptr<framework::Tensor> Executor<Dtype, P>::Predict( ...@@ -483,7 +488,7 @@ std::shared_ptr<framework::Tensor> Executor<Dtype, P>::Predict(
return std::make_shared<framework::Tensor>(framework::Tensor(*output_tensor)); return std::make_shared<framework::Tensor>(framework::Tensor(*output_tensor));
} }
template <typename Dtype, Precision P> template<typename Dtype, Precision P>
std::shared_ptr<framework::LoDTensor> Executor<Dtype, P>::PredictLod( std::shared_ptr<framework::LoDTensor> Executor<Dtype, P>::PredictLod(
const framework::LoDTensor &t) { const framework::LoDTensor &t) {
framework::Variable *g_feed_value = program_.scope->Var("feed"); framework::Variable *g_feed_value = program_.scope->Var("feed");
...@@ -633,13 +638,13 @@ std::shared_ptr<framework::LoDTensor> Executor<Dtype, P>::PredictLod( ...@@ -633,13 +638,13 @@ std::shared_ptr<framework::LoDTensor> Executor<Dtype, P>::PredictLod(
framework::LoDTensor(*output_tensor)); framework::LoDTensor(*output_tensor));
} }
template <typename Dtype, Precision P> template<typename Dtype, Precision P>
std::shared_ptr<framework::Tensor> Executor<Dtype, P>::Predict( std::shared_ptr<framework::Tensor> Executor<Dtype, P>::Predict(
const framework::Tensor &t, int block_id) { const framework::Tensor &t, int block_id) {
return Predict(t); return Predict(t);
} }
template <typename Dtype, Precision P> template<typename Dtype, Precision P>
std::vector<typename Executor<Dtype, P>::Ptype> Executor<Dtype, P>::Predict( std::vector<typename Executor<Dtype, P>::Ptype> Executor<Dtype, P>::Predict(
const std::vector<Ptype> &input, const std::vector<int64_t> &dims) { const std::vector<Ptype> &input, const std::vector<int64_t> &dims) {
framework::Tensor tensor(input, framework::make_ddim(dims)); framework::Tensor tensor(input, framework::make_ddim(dims));
...@@ -725,9 +730,17 @@ void Executor<Dtype, P>::Predict_To(int end) { ...@@ -725,9 +730,17 @@ void Executor<Dtype, P>::Predict_To(int end) {
}; };
#endif #endif
template class Executor<CPU, Precision::FP32>; template
template class Executor<FPGA, Precision::FP32>; class Executor<CPU, Precision::FP32>;
template class Executor<GPU_CL, Precision::FP32>;
template class Executor<GPU_MALI, Precision::FP32>;
template
class Executor<FPGA, Precision::FP32>;
template
class Executor<GPU_CL, Precision::FP32>;
template
class Executor<GPU_MALI, Precision::FP32>;
}
} // namespace paddle_mobile } // namespace paddle_mobile
...@@ -33,8 +33,9 @@ limitations under the License. */ ...@@ -33,8 +33,9 @@ limitations under the License. */
using std::string; using std::string;
namespace paddle_mobile { namespace paddle_mobile {
namespace framework {
template <typename Dtype = CPU, Precision P = Precision::FP32> template<typename Dtype = CPU, Precision P = Precision::FP32>
class Executor { class Executor {
public: public:
typedef typename PrecisionTrait<P>::ptype Ptype; typedef typename PrecisionTrait<P>::ptype Ptype;
...@@ -50,11 +51,13 @@ class Executor { ...@@ -50,11 +51,13 @@ class Executor {
* @b to predict * @b to predict
* */ * */
std::shared_ptr<framework::Tensor> Predict(const framework::Tensor &t); std::shared_ptr<framework::Tensor> Predict(const framework::Tensor &t);
/* /*
* @b to predict * @b to predict
* */ * */
std::shared_ptr<framework::LoDTensor> PredictLod( std::shared_ptr<framework::LoDTensor> PredictLod(
const framework::LoDTensor &t); const framework::LoDTensor &t);
/* /*
* @b to predict with vector and dim * @b to predict with vector and dim
* *
...@@ -65,15 +68,21 @@ class Executor { ...@@ -65,15 +68,21 @@ class Executor {
protected: protected:
Executor() = default; Executor() = default;
void InitMemory(); void InitMemory();
void LoadMemory(const framework::VarDesc var_desc, void LoadMemory(const framework::VarDesc var_desc,
framework::LoDTensor *tensor, char **data); framework::LoDTensor *tensor, char **data);
void InitCombineMemory(); void InitCombineMemory();
framework::Program<Dtype> program_; framework::Program<Dtype> program_;
int batch_size_ = 1; int batch_size_ = 1;
std::shared_ptr<framework::ProgramDesc> to_predict_program_; std::shared_ptr<framework::ProgramDesc> to_predict_program_;
std::shared_ptr<framework::Tensor> Predict(const framework::Tensor &t, std::shared_ptr<framework::Tensor> Predict(const framework::Tensor &t,
int block_id); int block_id);
std::map<framework::BlockDesc, std::map<framework::BlockDesc,
std::vector<std::shared_ptr<framework::OperatorBase<Dtype>>>> std::vector<std::shared_ptr<framework::OperatorBase<Dtype>>>>
ops_of_block_; ops_of_block_;
...@@ -106,4 +115,5 @@ class Executor { ...@@ -106,4 +115,5 @@ class Executor {
#endif #endif
}; };
}
} // namespace paddle_mobile } // namespace paddle_mobile
...@@ -12,13 +12,13 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. ...@@ -12,13 +12,13 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and See the License for the specific language governing permissions and
limitations under the License. */ limitations under the License. */
#include "io/loader.h" #include "loader.h"
#include "framework/lod_tensor.h" #include "framework/lod_tensor.h"
#include "framework/program/program-optimize/program_optimize.h" #include "framework/program/program-optimize/program_optimize.h"
namespace paddle_mobile { namespace paddle_mobile {
using framework::Variable; namespace framework {
/** /**
* muteandresize tensor as originProgramDesc and scope in loadParams * muteandresize tensor as originProgramDesc and scope in loadParams
...@@ -27,22 +27,22 @@ using framework::Variable; ...@@ -27,22 +27,22 @@ using framework::Variable;
* @param scope * @param scope
*/ */
void InitMemoryFromProgram( void InitMemoryFromProgram(
std::shared_ptr<framework::ProgramDesc> &originProgramDesc, std::shared_ptr<ProgramDesc> &originProgramDesc,
std::shared_ptr<framework::Scope> &scope) { std::shared_ptr<Scope> &scope) {
for (const auto &block : originProgramDesc.get()->Blocks()) { for (const auto &block : originProgramDesc.get()->Blocks()) {
for (const auto &var_desc : block->Vars()) { for (const auto &var_desc : block->Vars()) {
auto var = scope.get()->Var(var_desc->Name()); auto var = scope.get()->Var(var_desc->Name());
if (var_desc->Type() == framework::VARTYPE_TYPE_LOD_TENSOR) { if (var_desc->Type() == VARTYPE_TYPE_LOD_TENSOR) {
if (var_desc->Persistable()) { if (var_desc->Persistable()) {
auto dim = var_desc->Tensor_desc().Dims(); auto dim = var_desc->Tensor_desc().Dims();
auto tensor = var->GetMutable<framework::LoDTensor>(); auto tensor = var->GetMutable<LoDTensor>();
tensor->Resize(framework::make_ddim(dim)); tensor->Resize(make_ddim(dim));
} else { } else {
auto dim = var_desc->Tensor_desc().Dims(); auto dim = var_desc->Tensor_desc().Dims();
PADDLE_MOBILE_ENFORCE(dim.size() > 0, "dim size is 0"); PADDLE_MOBILE_ENFORCE(dim.size() > 0, "dim size is 0");
dim[0] = 1; dim[0] = 1;
auto tensor = var->GetMutable<framework::LoDTensor>(); auto tensor = var->GetMutable<LoDTensor>();
tensor->Resize(framework::make_ddim(dim)); tensor->Resize(make_ddim(dim));
} }
} else { } else {
// TODO(codeWorm): some. // TODO(codeWorm): some.
...@@ -50,6 +50,7 @@ void InitMemoryFromProgram( ...@@ -50,6 +50,7 @@ void InitMemoryFromProgram(
} }
} }
} }
/** /**
* fusion and print someinfos * fusion and print someinfos
* @tparam Dtype * @tparam Dtype
...@@ -59,12 +60,12 @@ void InitMemoryFromProgram( ...@@ -59,12 +60,12 @@ void InitMemoryFromProgram(
* @param program * @param program
* @param originProgramDesc * @param originProgramDesc
*/ */
template <typename Dtype, Precision P> template<typename Dtype, Precision P>
void FusionAndPrintInfos( void FusionAndPrintInfos(
bool &optimize, bool &can_add_split, framework::Program<Dtype, P> &program, bool &optimize, bool &can_add_split, Program<Dtype, P> &program,
const std::shared_ptr<framework::ProgramDesc> &originProgramDesc) { const std::shared_ptr<ProgramDesc> &originProgramDesc) {
if (optimize) { if (optimize) {
framework::ProgramOptimize program_optimize; ProgramOptimize program_optimize;
program.optimizeProgram = program.optimizeProgram =
program_optimize.FusionOptimize(originProgramDesc, can_add_split); program_optimize.FusionOptimize(originProgramDesc, can_add_split);
} }
...@@ -74,6 +75,7 @@ void FusionAndPrintInfos( ...@@ -74,6 +75,7 @@ void FusionAndPrintInfos(
originProgramDesc->Description("program: "); originProgramDesc->Description("program: ");
} }
} }
static size_t ReadBuffer(const char *file_name, uint8_t **out) { static size_t ReadBuffer(const char *file_name, uint8_t **out) {
FILE *fp; FILE *fp;
fp = fopen(file_name, "rb"); fp = fopen(file_name, "rb");
...@@ -96,8 +98,8 @@ static size_t ReadBuffer(const char *file_name, uint8_t **out) { ...@@ -96,8 +98,8 @@ static size_t ReadBuffer(const char *file_name, uint8_t **out) {
return cur_len; return cur_len;
} }
template <typename Dtype, Precision P> template<typename Dtype, Precision P>
const framework::Program<Dtype, P> Loader<Dtype, P>::Load( const Program<Dtype, P> Loader<Dtype, P>::Load(
const std::string &dirname, bool optimize, bool quantification, const std::string &dirname, bool optimize, bool quantification,
bool can_add_split) { bool can_add_split) {
auto program = this->LoadProgram(dirname + "/__model__", optimize, auto program = this->LoadProgram(dirname + "/__model__", optimize,
...@@ -106,8 +108,8 @@ const framework::Program<Dtype, P> Loader<Dtype, P>::Load( ...@@ -106,8 +108,8 @@ const framework::Program<Dtype, P> Loader<Dtype, P>::Load(
return program; return program;
} }
template <typename Dtype, Precision P> template<typename Dtype, Precision P>
const framework::Program<Dtype, P> Loader<Dtype, P>::Load( const Program<Dtype, P> Loader<Dtype, P>::Load(
const std::string &model_path, const std::string &para_path, bool optimize, const std::string &model_path, const std::string &para_path, bool optimize,
bool quantification) { bool quantification) {
auto program = this->LoadProgram(model_path, optimize, quantification); auto program = this->LoadProgram(model_path, optimize, quantification);
...@@ -118,8 +120,8 @@ const framework::Program<Dtype, P> Loader<Dtype, P>::Load( ...@@ -118,8 +120,8 @@ const framework::Program<Dtype, P> Loader<Dtype, P>::Load(
return program; return program;
} }
template <typename Dtype, Precision P> template<typename Dtype, Precision P>
const framework::Program<Dtype, P> Loader<Dtype, P>::LoadProgram( const Program<Dtype, P> Loader<Dtype, P>::LoadProgram(
const std::string &model_path, bool optimize, bool quantification, const std::string &model_path, bool optimize, bool quantification,
bool can_add_split) { bool can_add_split) {
std::string model_filename = model_path; std::string model_filename = model_path;
...@@ -136,14 +138,14 @@ const framework::Program<Dtype, P> Loader<Dtype, P>::LoadProgram( ...@@ -136,14 +138,14 @@ const framework::Program<Dtype, P> Loader<Dtype, P>::LoadProgram(
// //
DLOG << "n_ops: " << (*c_program->blocks)->n_ops; DLOG << "n_ops: " << (*c_program->blocks)->n_ops;
// //
auto originProgramDesc = std::make_shared<framework::ProgramDesc>(c_program); auto originProgramDesc = std::make_shared<ProgramDesc>(c_program);
framework::Program<Dtype, P> program; Program<Dtype, P> program;
program.originProgram = originProgramDesc; program.originProgram = originProgramDesc;
program.quantification = quantification; program.quantification = quantification;
program.combined_params_len = 0; program.combined_params_len = 0;
program.combined_params_buf = nullptr; program.combined_params_buf = nullptr;
auto scope = std::make_shared<framework::Scope>(); auto scope = std::make_shared<Scope>();
program.scope = scope; program.scope = scope;
// use originProgramDesc and scope to init tensors // use originProgramDesc and scope to init tensors
...@@ -155,8 +157,8 @@ const framework::Program<Dtype, P> Loader<Dtype, P>::LoadProgram( ...@@ -155,8 +157,8 @@ const framework::Program<Dtype, P> Loader<Dtype, P>::LoadProgram(
return program; return program;
} }
template <typename Dtype, Precision P> template<typename Dtype, Precision P>
const framework::Program<Dtype, P> Loader<Dtype, P>::LoadCombinedMemory( const Program<Dtype, P> Loader<Dtype, P>::LoadCombinedMemory(
size_t read_size, const uint8_t *buf, size_t combined_params_len, size_t read_size, const uint8_t *buf, size_t combined_params_len,
const uint8_t *combined_params_buf, bool optimize, bool quantification) { const uint8_t *combined_params_buf, bool optimize, bool quantification) {
bool can_add_split = false; bool can_add_split = false;
...@@ -172,16 +174,16 @@ const framework::Program<Dtype, P> Loader<Dtype, P>::LoadCombinedMemory( ...@@ -172,16 +174,16 @@ const framework::Program<Dtype, P> Loader<Dtype, P>::LoadCombinedMemory(
DLOG << "n_ops: " << (*c_program->blocks)->n_ops; DLOG << "n_ops: " << (*c_program->blocks)->n_ops;
// //
auto originProgramDesc = std::make_shared<framework::ProgramDesc>(c_program); auto originProgramDesc = std::make_shared<ProgramDesc>(c_program);
framework::Program<Dtype, P> program; Program<Dtype, P> program;
program.combined = true; program.combined = true;
program.originProgram = originProgramDesc; program.originProgram = originProgramDesc;
program.quantification = quantification; program.quantification = quantification;
program.combined_params_len = combined_params_len; program.combined_params_len = combined_params_len;
program.combined_params_buf = combined_params_buf; program.combined_params_buf = combined_params_buf;
auto scope = std::make_shared<framework::Scope>(); auto scope = std::make_shared<Scope>();
program.scope = scope; program.scope = scope;
InitMemoryFromProgram(originProgramDesc, scope); InitMemoryFromProgram(originProgramDesc, scope);
FusionAndPrintInfos(optimize, can_add_split, program, originProgramDesc); FusionAndPrintInfos(optimize, can_add_split, program, originProgramDesc);
...@@ -190,9 +192,17 @@ const framework::Program<Dtype, P> Loader<Dtype, P>::LoadCombinedMemory( ...@@ -190,9 +192,17 @@ const framework::Program<Dtype, P> Loader<Dtype, P>::LoadCombinedMemory(
return program; return program;
} }
template class Loader<CPU, Precision::FP32>; template
template class Loader<FPGA, Precision::FP32>; class Loader<CPU, Precision::FP32>;
template class Loader<GPU_MALI, Precision::FP32>;
template class Loader<GPU_CL, Precision::FP32>; template
class Loader<FPGA, Precision::FP32>;
template
class Loader<GPU_MALI, Precision::FP32>;
template
class Loader<GPU_CL, Precision::FP32>;
}
} // namespace paddle_mobile } // namespace paddle_mobile
...@@ -20,6 +20,7 @@ limitations under the License. */ ...@@ -20,6 +20,7 @@ limitations under the License. */
#include "framework/program/program.h" #include "framework/program/program.h"
namespace paddle_mobile { namespace paddle_mobile {
namespace framework{
template <typename Dtype = CPU, Precision P = Precision::FP32> template <typename Dtype = CPU, Precision P = Precision::FP32>
class Loader { class Loader {
...@@ -28,7 +29,7 @@ class Loader { ...@@ -28,7 +29,7 @@ class Loader {
* @b load separate format fluid model * @b load separate format fluid model
* @b 加载分开形式的 fluid 模型 * @b 加载分开形式的 fluid 模型
* */ * */
const framework::Program<Dtype, P> Load(const std::string &dirname, const Program<Dtype, P> Load(const std::string &dirname,
bool optimize = false, bool optimize = false,
bool quantification = false, bool quantification = false,
bool can_add_split = false); bool can_add_split = false);
...@@ -37,21 +38,22 @@ class Loader { ...@@ -37,21 +38,22 @@ class Loader {
* @b load combine format fluid mode * @b load combine format fluid mode
* @b 加载结合在一起格式的模型 * @b 加载结合在一起格式的模型
* */ * */
const framework::Program<Dtype, P> Load(const std::string &model_path, const Program<Dtype, P> Load(const std::string &model_path,
const std::string &para_path, const std::string &para_path,
bool optimize = false, bool optimize = false,
bool quantification = false); bool quantification = false);
const framework::Program<Dtype, P> LoadCombinedMemory( const Program<Dtype, P> LoadCombinedMemory(
size_t model_len, const uint8_t *model_buf, size_t combined_params_len, size_t model_len, const uint8_t *model_buf, size_t combined_params_len,
const uint8_t *combined_params_buf, bool optimize = false, const uint8_t *combined_params_buf, bool optimize = false,
bool quantification = false); bool quantification = false);
private: private:
const framework::Program<Dtype, P> LoadProgram(const std::string &model_path, const Program<Dtype, P> LoadProgram(const std::string &model_path,
bool optimize = false, bool optimize = false,
bool quantification = false, bool quantification = false,
bool can_add_split = false); bool can_add_split = false);
}; };
}
} // namespace paddle_mobile } // namespace paddle_mobile
...@@ -29,13 +29,13 @@ bool PaddleMobile<Dtype, P>::Load(const std::string &dirname, bool optimize, ...@@ -29,13 +29,13 @@ bool PaddleMobile<Dtype, P>::Load(const std::string &dirname, bool optimize,
bool quantification, int batch_size, bool quantification, int batch_size,
bool loddable) { bool loddable) {
if (loader_.get() == nullptr) { if (loader_.get() == nullptr) {
loader_ = std::make_shared<Loader<Dtype, P>>(); loader_ = std::make_shared<framework::Loader<Dtype, P>>();
} else { } else {
LOG(kLOG_INFO) << "loader inited"; LOG(kLOG_INFO) << "loader inited";
} }
if (executor_.get() == nullptr) { if (executor_.get() == nullptr) {
executor_ = std::make_shared<Executor<Dtype, P>>( executor_ = std::make_shared<framework::Executor<Dtype, P>>(
loader_->Load(dirname, optimize, quantification), batch_size, optimize, loader_->Load(dirname, optimize, quantification), batch_size, optimize,
loddable); loddable);
} else { } else {
...@@ -51,13 +51,13 @@ bool PaddleMobile<Dtype, P>::Load(const std::string &model_path, ...@@ -51,13 +51,13 @@ bool PaddleMobile<Dtype, P>::Load(const std::string &model_path,
bool quantification, int batch_size, bool quantification, int batch_size,
bool loddable) { bool loddable) {
if (loader_.get() == nullptr) { if (loader_.get() == nullptr) {
loader_ = std::make_shared<Loader<Dtype, P>>(); loader_ = std::make_shared<framework::Loader<Dtype, P>>();
} else { } else {
LOG(kLOG_INFO) << "loader inited"; LOG(kLOG_INFO) << "loader inited";
} }
if (executor_.get() == nullptr) { if (executor_.get() == nullptr) {
executor_ = std::make_shared<Executor<Dtype, P>>( executor_ = std::make_shared<framework::Executor<Dtype, P>>(
loader_->Load(model_path, para_path, optimize, quantification), loader_->Load(model_path, para_path, optimize, quantification),
batch_size, optimize, loddable); batch_size, optimize, loddable);
} else { } else {
...@@ -76,13 +76,13 @@ bool PaddleMobile<Dtype, P>::LoadCombinedMemory( ...@@ -76,13 +76,13 @@ bool PaddleMobile<Dtype, P>::LoadCombinedMemory(
bool quantification = false; bool quantification = false;
if (loader_.get() == nullptr) { if (loader_.get() == nullptr) {
loader_ = std::make_shared<Loader<Dtype, P>>(); loader_ = std::make_shared<framework::Loader<Dtype, P>>();
} else { } else {
LOG(kLOG_INFO) << "loader inited"; LOG(kLOG_INFO) << "loader inited";
} }
if (executor_.get() == nullptr) { if (executor_.get() == nullptr) {
executor_ = std::make_shared<Executor<Dtype, P>>( executor_ = std::make_shared<framework::Executor<Dtype, P>>(
loader_->LoadCombinedMemory(model_len, model_buf, combined_params_len, loader_->LoadCombinedMemory(model_len, model_buf, combined_params_len,
combined_params_buf, optimise, combined_params_buf, optimise,
quantification), quantification),
......
...@@ -23,8 +23,8 @@ limitations under the License. */ ...@@ -23,8 +23,8 @@ limitations under the License. */
#include "common/types.h" #include "common/types.h"
#include "framework/tensor.h" #include "framework/tensor.h"
#include "io/executor.h" #include "framework/executor.h"
#include "io/loader.h" #include "framework/loader.h"
namespace paddle_mobile { namespace paddle_mobile {
...@@ -90,8 +90,8 @@ class PaddleMobile { ...@@ -90,8 +90,8 @@ class PaddleMobile {
~PaddleMobile(); ~PaddleMobile();
private: private:
std::shared_ptr<Loader<Dtype, P>> loader_; std::shared_ptr<framework::Loader<Dtype, P>> loader_;
std::shared_ptr<Executor<Dtype, P>> executor_; std::shared_ptr<framework::Executor<Dtype, P>> executor_;
#ifdef PADDLE_MOBILE_FPGA #ifdef PADDLE_MOBILE_FPGA
public: public:
......
...@@ -19,7 +19,7 @@ limitations under the License. */ ...@@ -19,7 +19,7 @@ limitations under the License. */
#include "common/log.h" #include "common/log.h"
#include "framework/op_registry.h" #include "framework/op_registry.h"
#include "io/executor.h" #include "framework/executor.h"
#include "operators/conv_op.h" #include "operators/conv_op.h"
#include "operators/elementwise_add_op.h" #include "operators/elementwise_add_op.h"
#include "operators/pool_op.h" #include "operators/pool_op.h"
...@@ -29,7 +29,7 @@ limitations under the License. */ ...@@ -29,7 +29,7 @@ limitations under the License. */
#include "operators/softmax_op.h" #include "operators/softmax_op.h"
#include "operators/transpose_op.h" #include "operators/transpose_op.h"
using paddle_mobile::Executor; using paddle_mobile::framework::Executor;
using paddle_mobile::framework::BlockDesc; using paddle_mobile::framework::BlockDesc;
using paddle_mobile::framework::DDim; using paddle_mobile::framework::DDim;
using paddle_mobile::framework::LoDTensor; using paddle_mobile::framework::LoDTensor;
......
...@@ -16,7 +16,7 @@ limitations under the License. */ ...@@ -16,7 +16,7 @@ limitations under the License. */
#include "operators/concat_op.h" #include "operators/concat_op.h"
int main() { int main() {
paddle_mobile::Loader<paddle_mobile::FPGA> loader; paddle_mobile::framework::Loader<paddle_mobile::FPGA> loader;
auto program = loader.Load(g_googlenet); auto program = loader.Load(g_googlenet);
PADDLE_MOBILE_ENFORCE(program.originProgram != nullptr, PADDLE_MOBILE_ENFORCE(program.originProgram != nullptr,
"program file read fail"); "program file read fail");
......
...@@ -15,10 +15,10 @@ limitations under the License. */ ...@@ -15,10 +15,10 @@ limitations under the License. */
#include <string> #include <string>
#include "../test_helper.h" #include "../test_helper.h"
#include "io/loader.h" #include "framework/loader.h"
int main() { int main() {
paddle_mobile::Loader<paddle_mobile::CPU> loader; paddle_mobile::framework::Loader<paddle_mobile::CPU> loader;
// ../../../test/models/googlenet // ../../../test/models/googlenet
// ../../../test/models/mobilenet // ../../../test/models/mobilenet
// auto program = loader.Load(g_googlenet, true); // auto program = loader.Load(g_googlenet, true);
......
...@@ -15,10 +15,10 @@ limitations under the License. */ ...@@ -15,10 +15,10 @@ limitations under the License. */
#include "../test_helper.h" #include "../test_helper.h"
#include "framework/program/program-optimize/node.h" #include "framework/program/program-optimize/node.h"
#include "framework/program/program-optimize/program_optimize.h" #include "framework/program/program-optimize/program_optimize.h"
#include "io/loader.h" #include "framework/loader.h"
int main() { int main() {
paddle_mobile::Loader<paddle_mobile::CPU> loader; paddle_mobile::framework::Loader<paddle_mobile::CPU> loader;
// "../../../test/models/googlenet" // "../../../test/models/googlenet"
auto program = loader.Load(g_mobilenet_ssd, true); auto program = loader.Load(g_mobilenet_ssd, true);
paddle_mobile::framework::ProgramOptimize optimize; paddle_mobile::framework::ProgramOptimize optimize;
......
...@@ -127,7 +127,7 @@ template class TestBatchNormOp<CPU>; ...@@ -127,7 +127,7 @@ template class TestBatchNormOp<CPU>;
int main() { int main() {
DLOG << "----------**********----------"; DLOG << "----------**********----------";
DLOG << "begin to run BatchNormOp Test"; DLOG << "begin to run BatchNormOp Test";
paddle_mobile::Loader<paddle_mobile::CPU> loader; paddle_mobile::framework::Loader<paddle_mobile::CPU> loader;
auto program = loader.Load(std::string(g_mobilenet_ssd)); auto program = loader.Load(std::string(g_mobilenet_ssd));
/// input x (4,10,2,2) /// input x (4,10,2,2)
......
...@@ -115,7 +115,7 @@ template class TestBoxCoderOp<CPU>; ...@@ -115,7 +115,7 @@ template class TestBoxCoderOp<CPU>;
int main() { int main() {
DLOG << "----------**********----------"; DLOG << "----------**********----------";
DLOG << "begin to run BoxCoderOp Test"; DLOG << "begin to run BoxCoderOp Test";
paddle_mobile::Loader<paddle_mobile::CPU> loader; paddle_mobile::framework::Loader<paddle_mobile::CPU> loader;
auto program = loader.Load(std::string(g_mobilenet_ssd)); auto program = loader.Load(std::string(g_mobilenet_ssd));
paddle_mobile::framework::Tensor priorbox; paddle_mobile::framework::Tensor priorbox;
......
...@@ -16,7 +16,7 @@ limitations under the License. */ ...@@ -16,7 +16,7 @@ limitations under the License. */
#include "operators/concat_op.h" #include "operators/concat_op.h"
int main() { int main() {
paddle_mobile::Loader<paddle_mobile::CPU> loader; paddle_mobile::framework::Loader<paddle_mobile::CPU> loader;
auto program = loader.Load(g_googlenet); auto program = loader.Load(g_googlenet);
PADDLE_MOBILE_ENFORCE(program.originProgram != nullptr, PADDLE_MOBILE_ENFORCE(program.originProgram != nullptr,
"program file read fail"); "program file read fail");
......
...@@ -16,7 +16,7 @@ limitations under the License. */ ...@@ -16,7 +16,7 @@ limitations under the License. */
#include "operators/fusion_conv_add_relu_op.h" #include "operators/fusion_conv_add_relu_op.h"
int main() { int main() {
paddle_mobile::Loader<paddle_mobile::CPU> loader; paddle_mobile::framework::Loader<paddle_mobile::CPU> loader;
// ../models/image_classification_resnet.inference.model // ../models/image_classification_resnet.inference.model
auto program = loader.Load(g_googlenet, true); auto program = loader.Load(g_googlenet, true);
......
...@@ -16,7 +16,7 @@ limitations under the License. */ ...@@ -16,7 +16,7 @@ limitations under the License. */
#include "operators/conv_op.h" #include "operators/conv_op.h"
int main() { int main() {
paddle_mobile::Loader<paddle_mobile::GPU_MALI> loader; paddle_mobile::framework::Loader<paddle_mobile::GPU_MALI> loader;
// ../models/image_classification_resnet.inference.model // ../models/image_classification_resnet.inference.model
auto program = loader.Load(g_googlenet); auto program = loader.Load(g_googlenet);
......
...@@ -16,7 +16,7 @@ limitations under the License. */ ...@@ -16,7 +16,7 @@ limitations under the License. */
#include "operators/depthwise_conv_op.h" #include "operators/depthwise_conv_op.h"
int main() { int main() {
paddle_mobile::Loader<paddle_mobile::CPU> loader; paddle_mobile::framework::Loader<paddle_mobile::CPU> loader;
// ../models/image_classification_resnet.inference.model // ../models/image_classification_resnet.inference.model
auto program = loader.Load(g_mobilenet_ssd); auto program = loader.Load(g_mobilenet_ssd);
......
...@@ -15,7 +15,7 @@ limitations under the License. */ ...@@ -15,7 +15,7 @@ limitations under the License. */
#include "../test_include.h" #include "../test_include.h"
int main() { int main() {
paddle_mobile::Loader<paddle_mobile::CPU> loader; paddle_mobile::framework::Loader<paddle_mobile::CPU> loader;
auto program = loader.Load(g_resnet); auto program = loader.Load(g_resnet);
PADDLE_MOBILE_ENFORCE(program.originProgram != nullptr, PADDLE_MOBILE_ENFORCE(program.originProgram != nullptr,
"program file read fail"); "program file read fail");
......
...@@ -17,7 +17,7 @@ limitations under the License. */ ...@@ -17,7 +17,7 @@ limitations under the License. */
#include "operators/fusion_conv_add_bn_relu_op.h" #include "operators/fusion_conv_add_bn_relu_op.h"
int main() { int main() {
paddle_mobile::Loader<paddle_mobile::CPU> loader; paddle_mobile::framework::Loader<paddle_mobile::CPU> loader;
// ../models/image_classification_resnet.inference.model // ../models/image_classification_resnet.inference.model
auto program = loader.Load(g_mobilenet, true); auto program = loader.Load(g_mobilenet, true);
......
...@@ -114,7 +114,7 @@ template class TestFcOp<CPU>; ...@@ -114,7 +114,7 @@ template class TestFcOp<CPU>;
int main() { int main() {
DLOG << "----------**********----------"; DLOG << "----------**********----------";
DLOG << "begin to run Fc Test"; DLOG << "begin to run Fc Test";
paddle_mobile::Loader<paddle_mobile::CPU> loader; paddle_mobile::framework::Loader<paddle_mobile::CPU> loader;
// "../../../test/models/googlenet" // "../../../test/models/googlenet"
auto program = loader.Load(g_googlenet); auto program = loader.Load(g_googlenet);
paddle_mobile::framework::ProgramOptimize optimize; paddle_mobile::framework::ProgramOptimize optimize;
......
...@@ -16,7 +16,7 @@ limitations under the License. */ ...@@ -16,7 +16,7 @@ limitations under the License. */
#include "operators/gru_op.h" #include "operators/gru_op.h"
int main() { int main() {
paddle_mobile::Loader<paddle_mobile::CPU> loader; paddle_mobile::framework::Loader<paddle_mobile::CPU> loader;
auto program = loader.Load(g_nlp); auto program = loader.Load(g_nlp);
PADDLE_MOBILE_ENFORCE(program.originProgram != nullptr, PADDLE_MOBILE_ENFORCE(program.originProgram != nullptr,
"program file read fail"); "program file read fail");
......
...@@ -17,7 +17,7 @@ limitations under the License. */ ...@@ -17,7 +17,7 @@ limitations under the License. */
#include "operators/im2sequence_op.h" #include "operators/im2sequence_op.h"
int main() { int main() {
paddle_mobile::Loader<paddle_mobile::CPU> loader; paddle_mobile::framework::Loader<paddle_mobile::CPU> loader;
auto program = loader.Load(g_ocr_recg); auto program = loader.Load(g_ocr_recg);
PADDLE_MOBILE_ENFORCE(program.originProgram != nullptr, PADDLE_MOBILE_ENFORCE(program.originProgram != nullptr,
"program file read fail"); "program file read fail");
......
...@@ -16,7 +16,7 @@ limitations under the License. */ ...@@ -16,7 +16,7 @@ limitations under the License. */
#include "operators/lrn_op.h" #include "operators/lrn_op.h"
int main() { int main() {
paddle_mobile::Loader<paddle_mobile::CPU> loader; paddle_mobile::framework::Loader<paddle_mobile::CPU> loader;
auto program = loader.Load(g_googlenet); auto program = loader.Load(g_googlenet);
PADDLE_MOBILE_ENFORCE(program.originProgram != nullptr, PADDLE_MOBILE_ENFORCE(program.originProgram != nullptr,
"program file read fail"); "program file read fail");
......
...@@ -16,7 +16,7 @@ limitations under the License. */ ...@@ -16,7 +16,7 @@ limitations under the License. */
#include "operators/mul_op.h" #include "operators/mul_op.h"
int main() { int main() {
paddle_mobile::Loader<paddle_mobile::CPU> loader; paddle_mobile::framework::Loader<paddle_mobile::CPU> loader;
auto program = loader.Load(g_resnet); auto program = loader.Load(g_resnet);
PADDLE_MOBILE_ENFORCE(program.originProgram != nullptr, PADDLE_MOBILE_ENFORCE(program.originProgram != nullptr,
"program file read fail"); "program file read fail");
......
...@@ -126,7 +126,7 @@ template class TestMultiClassNMSOp<CPU>; ...@@ -126,7 +126,7 @@ template class TestMultiClassNMSOp<CPU>;
int main() { int main() {
DLOG << "----------**********----------"; DLOG << "----------**********----------";
DLOG << "begin to run MulticlassNMS Test"; DLOG << "begin to run MulticlassNMS Test";
paddle_mobile::Loader<paddle_mobile::CPU> loader; paddle_mobile::framework::Loader<paddle_mobile::CPU> loader;
auto program = loader.Load(std::string("../../test/models/mobilenet+ssd")); auto program = loader.Load(std::string("../../test/models/mobilenet+ssd"));
/// input x (1,3,300,300) /// input x (1,3,300,300)
......
...@@ -16,7 +16,7 @@ limitations under the License. */ ...@@ -16,7 +16,7 @@ limitations under the License. */
#include "operators/pool_op.h" #include "operators/pool_op.h"
int main() { int main() {
paddle_mobile::Loader<paddle_mobile::CPU> loader; paddle_mobile::framework::Loader<paddle_mobile::CPU> loader;
auto program = loader.Load(std::string(g_googlenet)); auto program = loader.Load(std::string(g_googlenet));
if (program.originProgram == nullptr) { if (program.originProgram == nullptr) {
DLOG << "program read file"; DLOG << "program read file";
......
...@@ -17,7 +17,7 @@ limitations under the License. */ ...@@ -17,7 +17,7 @@ limitations under the License. */
#include "operators/prelu_op.h" #include "operators/prelu_op.h"
int main() { int main() {
paddle_mobile::Loader<paddle_mobile::CPU> loader; paddle_mobile::framework::Loader<paddle_mobile::CPU> loader;
auto program = loader.Load(g_resnet); auto program = loader.Load(g_resnet);
PADDLE_MOBILE_ENFORCE(program.originProgram != nullptr, PADDLE_MOBILE_ENFORCE(program.originProgram != nullptr,
"program file read fail"); "program file read fail");
......
...@@ -126,7 +126,7 @@ template class TestPriorBoxOp<CPU>; ...@@ -126,7 +126,7 @@ template class TestPriorBoxOp<CPU>;
int main() { int main() {
DLOG << "----------**********----------"; DLOG << "----------**********----------";
DLOG << "begin to run PriorBoxOp Test"; DLOG << "begin to run PriorBoxOp Test";
paddle_mobile::Loader<paddle_mobile::CPU> loader; paddle_mobile::framework::Loader<paddle_mobile::CPU> loader;
auto program = loader.Load(std::string(g_mobilenet_ssd)); auto program = loader.Load(std::string(g_mobilenet_ssd));
/// input x (1,3,300,300) /// input x (1,3,300,300)
......
...@@ -16,7 +16,7 @@ limitations under the License. */ ...@@ -16,7 +16,7 @@ limitations under the License. */
#include "operators/relu_op.h" #include "operators/relu_op.h"
int main() { int main() {
paddle_mobile::Loader<paddle_mobile::CPU> loader; paddle_mobile::framework::Loader<paddle_mobile::CPU> loader;
auto program = loader.Load(g_resnet); auto program = loader.Load(g_resnet);
PADDLE_MOBILE_ENFORCE(program.originProgram != nullptr, PADDLE_MOBILE_ENFORCE(program.originProgram != nullptr,
"program file read fail"); "program file read fail");
......
...@@ -16,7 +16,7 @@ limitations under the License. */ ...@@ -16,7 +16,7 @@ limitations under the License. */
#include "operators/reshape_op.h" #include "operators/reshape_op.h"
int main() { int main() {
paddle_mobile::Loader<paddle_mobile::CPU> loader; paddle_mobile::framework::Loader<paddle_mobile::CPU> loader;
auto program = loader.Load(std::string(g_mobilenet_ssd)); auto program = loader.Load(std::string(g_mobilenet_ssd));
if (program.originProgram == nullptr) { if (program.originProgram == nullptr) {
DLOG << "program read file"; DLOG << "program read file";
......
...@@ -16,7 +16,7 @@ limitations under the License. */ ...@@ -16,7 +16,7 @@ limitations under the License. */
#include "operators/resize_op.h" #include "operators/resize_op.h"
int main() { int main() {
paddle_mobile::Loader<paddle_mobile::CPU> loader; paddle_mobile::framework::Loader<paddle_mobile::CPU> loader;
auto program = loader.Load(std::string(g_mobilenet_ssd)); auto program = loader.Load(std::string(g_mobilenet_ssd));
if (program.originProgram == nullptr) { if (program.originProgram == nullptr) {
DLOG << "program read file"; DLOG << "program read file";
......
...@@ -15,7 +15,7 @@ limitations under the License. */ ...@@ -15,7 +15,7 @@ limitations under the License. */
#include "../../src/operators/kernel/sigmoid_kernel.h" #include "../../src/operators/kernel/sigmoid_kernel.h"
#include "../../src/operators/kernel/central-arm-func/sigmoid_arm_func.h" #include "../../src/operators/kernel/central-arm-func/sigmoid_arm_func.h"
#include "../test_helper.h" #include "../test_helper.h"
#include "io/executor.h" #include "framework/executor.h"
int main() { int main() {
paddle_mobile::framework::Tensor input; paddle_mobile::framework::Tensor input;
......
...@@ -17,7 +17,7 @@ limitations under the License. */ ...@@ -17,7 +17,7 @@ limitations under the License. */
#include "operators/softmax_op.h" #include "operators/softmax_op.h"
int main() { int main() {
paddle_mobile::Loader<paddle_mobile::CPU> loader; paddle_mobile::framework::Loader<paddle_mobile::CPU> loader;
auto program = loader.Load(std::string(g_mobilenet)); auto program = loader.Load(std::string(g_mobilenet));
if (program.originProgram == nullptr) { if (program.originProgram == nullptr) {
DLOG << "program read file"; DLOG << "program read file";
......
...@@ -16,7 +16,7 @@ limitations under the License. */ ...@@ -16,7 +16,7 @@ limitations under the License. */
#include "../test_include.h" #include "../test_include.h"
#include "operators/transpose_op.h" #include "operators/transpose_op.h"
int main() { int main() {
paddle_mobile::Loader<paddle_mobile::CPU> loader; paddle_mobile::framework::Loader<paddle_mobile::CPU> loader;
auto program = loader.Load(std::string(g_mobilenet_ssd)); auto program = loader.Load(std::string(g_mobilenet_ssd));
if (program.originProgram == nullptr) { if (program.originProgram == nullptr) {
DLOG << "program read file"; DLOG << "program read file";
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册