diff --git a/src/io/executor.cpp b/src/framework/executor.cpp similarity index 90% rename from src/io/executor.cpp rename to src/framework/executor.cpp index 0c8fcb2f5ea8663a6deec815adfa82faf676fb6e..26cfa4ecba8abb629fb9f3216c0f9b7620872dc7 100644 --- a/src/io/executor.cpp +++ b/src/framework/executor.cpp @@ -12,7 +12,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#include "io/executor.h" +#include "executor.h" #include #include #include @@ -38,6 +38,8 @@ limitations under the License. */ #endif namespace paddle_mobile { +namespace framework { + using framework::Variable; char *Get_binary_data(std::string filename) { @@ -57,13 +59,14 @@ char *Get_binary_data(std::string filename) { } #pragma mark - executor -template + +template Executor::Executor(const framework::Program p, int batch_size, bool use_optimize, bool loddable) - : program_(p), - batch_size_(batch_size), - use_optimize_(use_optimize), - loddable_(loddable) { + : program_(p), + batch_size_(batch_size), + use_optimize_(use_optimize), + loddable_(loddable) { if (use_optimize_) { to_predict_program_ = program_.optimizeProgram; } else { @@ -74,7 +77,7 @@ Executor::Executor(const framework::Program p, int batch_size, PADDLE_MOBILE_ENFORCE(to_predict_program_ != nullptr, "to_predict_program_ == NULL!"); const std::vector> blocks = - to_predict_program_->Blocks(); + to_predict_program_->Blocks(); #ifdef PADDLE_EXECUTOR_MULTITHREAD depManager.resize(blocks.size()); #endif @@ -86,8 +89,8 @@ Executor::Executor(const framework::Program p, int batch_size, std::shared_ptr op = ops[j]; DLOG << "create op: " << j << " " << op->Type(); auto op_base = framework::OpRegistry::CreateOp( - op->Type(), op->GetInputs(), op->GetOutputs(), op->GetAttrMap(), - program_.scope); + op->Type(), op->GetInputs(), op->GetOutputs(), op->GetAttrMap(), + program_.scope); // use pre_infershape to pre resize , but if u use an lod mode tensor u // need to resize in runtime if (!loddable_) { @@ -106,7 +109,7 @@ Executor::Executor(const framework::Program p, int batch_size, InitMemory(); } std::shared_ptr to_predict_block = - to_predict_program_->Block(0); + to_predict_program_->Block(0); auto &ops = ops_of_block_[*to_predict_block.get()]; int i = 0; for (const auto &op : ops) { @@ -115,7 +118,7 @@ Executor::Executor(const framework::Program p, int batch_size, } } -template +template void Executor::LoadMemory(const framework::VarDesc var_desc, framework::LoDTensor *tensor, char **data) { // 1. version @@ -223,7 +226,7 @@ void Executor::LoadMemory(const framework::VarDesc var_desc, } } -template +template void Executor::InitMemory() { for (const auto &block : to_predict_program_->Blocks()) { for (const auto &var_desc : block->Vars()) { @@ -235,7 +238,7 @@ void Executor::InitMemory() { } char *origin_data = - Get_binary_data(program_.model_path + "/" + var_desc->Name()); + Get_binary_data(program_.model_path + "/" + var_desc->Name()); char *data = origin_data; LoadMemory(*var_desc, tensor, &data); @@ -248,21 +251,21 @@ void Executor::InitMemory() { is_mute_match = varInputMemory(var_desc, var, tensor); PADDLE_MOBILE_ENFORCE( - is_mute_match, - "got unhandled var_desc->Tensor_desc().DataType(): %d", - var_desc->Tensor_desc().DataType()); + is_mute_match, + "got unhandled var_desc->Tensor_desc().DataType(): %d", + var_desc->Tensor_desc().DataType()); } } } } } -template +template void Executor::InitCombineMemory() { char *origin_data; if (program_.combined_params_buf && program_.combined_params_len) { LOG(kLOG_INFO) << "use outter memory"; - origin_data = (char *)program_.combined_params_buf; + origin_data = (char *) program_.combined_params_buf; } else { LOG(kLOG_INFO) << " begin init combine memory"; origin_data = Get_binary_data(program_.para_path); @@ -286,9 +289,9 @@ void Executor::InitCombineMemory() { is_mute_match = varInputMemory(var_desc, var, tensor); PADDLE_MOBILE_ENFORCE( - is_mute_match, - "got unhandled var_desc->Tensor_desc().DataType(): %d", - var_desc->Tensor_desc().DataType()); + is_mute_match, + "got unhandled var_desc->Tensor_desc().DataType(): %d", + var_desc->Tensor_desc().DataType()); } } } @@ -297,10 +300,10 @@ void Executor::InitCombineMemory() { LOG(kLOG_INFO) << " end init combine memory "; } -template +template bool Executor::varInputMemory( - const std::shared_ptr &var_desc, Variable *var, - framework::LoDTensor *tensor) const { + const std::shared_ptr &var_desc, Variable *var, + framework::LoDTensor *tensor) const { bool is_mute_match = false; switch (var_desc->Tensor_desc().DataType()) { case framework::VARTYPE_TYPE_FP16: { @@ -335,22 +338,24 @@ bool Executor::varInputMemory( break; } - default: { break; } + default: { + break; + } } return is_mute_match; } -template +template std::shared_ptr Executor::Predict( - const framework::Tensor &t) { + const framework::Tensor &t) { framework::Variable *g_feed_value = program_.scope->Var("feed"); framework::Tensor *feed_tensor = - g_feed_value->GetMutable(); + g_feed_value->GetMutable(); feed_tensor->Resize(t.dims()); feed_tensor->ShareDataWith(t); std::shared_ptr to_predict_block = - to_predict_program_->Block(0); + to_predict_program_->Block(0); auto &ops = ops_of_block_[*to_predict_block.get()]; #ifdef PADDLE_MOBILE_PROFILE @@ -430,8 +435,8 @@ std::shared_ptr Executor::Predict( std::vector out_keys = (*last_op)->GetOutKeys(); PADDLE_MOBILE_ENFORCE(out_keys.size() > 0, "the last op contains no output"); framework::LoDTensor *output_tensor = - framework::GetVarValue(out_keys[0], output_map, - *(program_.scope)); + framework::GetVarValue(out_keys[0], output_map, + *(program_.scope)); #ifdef PADDLE_MOBILE_PROFILE #ifdef PADDLE_EXECUTOR_MULTITHREAD // TODO(haipeng): expose profile info as an interface, user can get them to @@ -483,18 +488,18 @@ std::shared_ptr Executor::Predict( return std::make_shared(framework::Tensor(*output_tensor)); } -template +template std::shared_ptr Executor::PredictLod( - const framework::LoDTensor &t) { + const framework::LoDTensor &t) { framework::Variable *g_feed_value = program_.scope->Var("feed"); framework::LoDTensor *feed_tensor = - g_feed_value->GetMutable(); + g_feed_value->GetMutable(); feed_tensor->Resize(t.dims()); feed_tensor->ShareDataWith(t); feed_tensor->set_lod(t.lod()); std::shared_ptr to_predict_block = - to_predict_program_->Block(0); + to_predict_program_->Block(0); auto &ops = ops_of_block_[*to_predict_block.get()]; @@ -579,8 +584,8 @@ std::shared_ptr Executor::PredictLod( std::vector out_keys = (*last_op)->GetOutKeys(); PADDLE_MOBILE_ENFORCE(out_keys.size() > 0, "the last op contains no output"); framework::LoDTensor *output_tensor = - framework::GetVarValue(out_keys[0], output_map, - *(program_.scope)); + framework::GetVarValue(out_keys[0], output_map, + *(program_.scope)); #ifdef PADDLE_MOBILE_PROFILE #ifdef PADDLE_EXECUTOR_MULTITHREAD // TODO(haipeng): expose profile info as an interface, user can get them to @@ -630,22 +635,22 @@ std::shared_ptr Executor::PredictLod( printf("====================[---------]======================\n"); #endif return std::make_shared( - framework::LoDTensor(*output_tensor)); + framework::LoDTensor(*output_tensor)); } -template +template std::shared_ptr Executor::Predict( - const framework::Tensor &t, int block_id) { + const framework::Tensor &t, int block_id) { return Predict(t); } -template +template std::vector::Ptype> Executor::Predict( - const std::vector &input, const std::vector &dims) { + const std::vector &input, const std::vector &dims) { framework::Tensor tensor(input, framework::make_ddim(dims)); std::shared_ptr output_tensor = Predict(tensor, 0); Executor::Ptype *output_ptr = - output_tensor->data::Ptype>(); + output_tensor->data::Ptype>(); std::vector::Ptype> result_vector; for (int j = 0; j < output_tensor->numel(); ++j) { result_vector.push_back(output_ptr[j]); @@ -725,9 +730,17 @@ void Executor::Predict_To(int end) { }; #endif -template class Executor; -template class Executor; -template class Executor; -template class Executor; +template +class Executor; + +template +class Executor; + +template +class Executor; +template +class Executor; + +} } // namespace paddle_mobile diff --git a/src/io/executor.h b/src/framework/executor.h similarity index 83% rename from src/io/executor.h rename to src/framework/executor.h index 67d3f02ac37c4203950a2679d30d7aa9072c70ba..f43cd14c29b909e9f666b098824d1bb444998add 100644 --- a/src/io/executor.h +++ b/src/framework/executor.h @@ -33,8 +33,9 @@ limitations under the License. */ using std::string; namespace paddle_mobile { +namespace framework { -template +template class Executor { public: typedef typename PrecisionTrait

::ptype Ptype; @@ -50,11 +51,13 @@ class Executor { * @b to predict * */ std::shared_ptr Predict(const framework::Tensor &t); + /* * @b to predict * */ std::shared_ptr PredictLod( - const framework::LoDTensor &t); + const framework::LoDTensor &t); + /* * @b to predict with vector and dim * @@ -65,18 +68,24 @@ class Executor { protected: Executor() = default; + void InitMemory(); + void LoadMemory(const framework::VarDesc var_desc, framework::LoDTensor *tensor, char **data); + void InitCombineMemory(); + framework::Program program_; int batch_size_ = 1; std::shared_ptr to_predict_program_; + std::shared_ptr Predict(const framework::Tensor &t, int block_id); + std::map>>> - ops_of_block_; + std::vector>>> + ops_of_block_; bool use_optimize_ = false; bool loddable_ = false; #ifdef PADDLE_EXECUTOR_MULTITHREAD @@ -96,14 +105,15 @@ class Executor { #ifdef PADDLE_MOBILE_FPGA - public: - void InjectVariable(const framework::Tensor &t, string var_name); - void FeedData(const framework::Tensor &t); - std::shared_ptr FetchResult(int id = -1); - void Predict_From_To(int start = 0, int end = -1); - void Predict_From(int start); - void Predict_To(int end); + public: + void InjectVariable(const framework::Tensor &t, string var_name); + void FeedData(const framework::Tensor &t); + std::shared_ptr FetchResult(int id = -1); + void Predict_From_To(int start = 0, int end = -1); + void Predict_From(int start); + void Predict_To(int end); #endif }; +} } // namespace paddle_mobile diff --git a/src/io/loader.cpp b/src/framework/loader.cpp similarity index 67% rename from src/io/loader.cpp rename to src/framework/loader.cpp index 1faeb9d3ea2b29756a49c7ed676e431f9b0b6b4e..7b95f1d1bf3186583cb586c6c4f3cb5862fa7978 100644 --- a/src/io/loader.cpp +++ b/src/framework/loader.cpp @@ -12,13 +12,13 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#include "io/loader.h" +#include "loader.h" #include "framework/lod_tensor.h" #include "framework/program/program-optimize/program_optimize.h" namespace paddle_mobile { -using framework::Variable; +namespace framework { /** * muteandresize tensor as originProgramDesc and scope in loadParams @@ -27,22 +27,22 @@ using framework::Variable; * @param scope */ void InitMemoryFromProgram( - std::shared_ptr &originProgramDesc, - std::shared_ptr &scope) { + std::shared_ptr &originProgramDesc, + std::shared_ptr &scope) { for (const auto &block : originProgramDesc.get()->Blocks()) { for (const auto &var_desc : block->Vars()) { auto var = scope.get()->Var(var_desc->Name()); - if (var_desc->Type() == framework::VARTYPE_TYPE_LOD_TENSOR) { + if (var_desc->Type() == VARTYPE_TYPE_LOD_TENSOR) { if (var_desc->Persistable()) { auto dim = var_desc->Tensor_desc().Dims(); - auto tensor = var->GetMutable(); - tensor->Resize(framework::make_ddim(dim)); + auto tensor = var->GetMutable(); + tensor->Resize(make_ddim(dim)); } else { auto dim = var_desc->Tensor_desc().Dims(); PADDLE_MOBILE_ENFORCE(dim.size() > 0, "dim size is 0"); dim[0] = 1; - auto tensor = var->GetMutable(); - tensor->Resize(framework::make_ddim(dim)); + auto tensor = var->GetMutable(); + tensor->Resize(make_ddim(dim)); } } else { // TODO(codeWorm): some. @@ -50,6 +50,7 @@ void InitMemoryFromProgram( } } } + /** * fusion and print someinfos * @tparam Dtype @@ -59,14 +60,14 @@ void InitMemoryFromProgram( * @param program * @param originProgramDesc */ -template +template void FusionAndPrintInfos( - bool &optimize, bool &can_add_split, framework::Program &program, - const std::shared_ptr &originProgramDesc) { + bool &optimize, bool &can_add_split, Program &program, + const std::shared_ptr &originProgramDesc) { if (optimize) { - framework::ProgramOptimize program_optimize; + ProgramOptimize program_optimize; program.optimizeProgram = - program_optimize.FusionOptimize(originProgramDesc, can_add_split); + program_optimize.FusionOptimize(originProgramDesc, can_add_split); } if (optimize) { program.optimizeProgram->Description("optimize: "); @@ -74,6 +75,7 @@ void FusionAndPrintInfos( originProgramDesc->Description("program: "); } } + static size_t ReadBuffer(const char *file_name, uint8_t **out) { FILE *fp; fp = fopen(file_name, "rb"); @@ -96,20 +98,20 @@ static size_t ReadBuffer(const char *file_name, uint8_t **out) { return cur_len; } -template -const framework::Program Loader::Load( - const std::string &dirname, bool optimize, bool quantification, - bool can_add_split) { +template +const Program Loader::Load( + const std::string &dirname, bool optimize, bool quantification, + bool can_add_split) { auto program = this->LoadProgram(dirname + "/__model__", optimize, quantification, can_add_split); program.model_path = dirname; return program; } -template -const framework::Program Loader::Load( - const std::string &model_path, const std::string ¶_path, bool optimize, - bool quantification) { +template +const Program Loader::Load( + const std::string &model_path, const std::string ¶_path, bool optimize, + bool quantification) { auto program = this->LoadProgram(model_path, optimize, quantification); program.para_path = para_path; @@ -118,10 +120,10 @@ const framework::Program Loader::Load( return program; } -template -const framework::Program Loader::LoadProgram( - const std::string &model_path, bool optimize, bool quantification, - bool can_add_split) { +template +const Program Loader::LoadProgram( + const std::string &model_path, bool optimize, bool quantification, + bool can_add_split) { std::string model_filename = model_path; PaddleMobile__Framework__Proto__ProgramDesc *c_program; uint8_t *buf = NULL; @@ -130,20 +132,20 @@ const framework::Program Loader::LoadProgram( PADDLE_MOBILE_ENFORCE(buf != NULL, "read from __model__ is null"); c_program = paddle_mobile__framework__proto__program_desc__unpack( - NULL, read_size, buf); + NULL, read_size, buf); // PADDLE_MOBILE_ENFORCE(c_program != NULL, "program is null"); // DLOG << "n_ops: " << (*c_program->blocks)->n_ops; // - auto originProgramDesc = std::make_shared(c_program); + auto originProgramDesc = std::make_shared(c_program); - framework::Program program; + Program program; program.originProgram = originProgramDesc; program.quantification = quantification; program.combined_params_len = 0; program.combined_params_buf = nullptr; - auto scope = std::make_shared(); + auto scope = std::make_shared(); program.scope = scope; // use originProgramDesc and scope to init tensors @@ -155,33 +157,33 @@ const framework::Program Loader::LoadProgram( return program; } -template -const framework::Program Loader::LoadCombinedMemory( - size_t read_size, const uint8_t *buf, size_t combined_params_len, - const uint8_t *combined_params_buf, bool optimize, bool quantification) { +template +const Program Loader::LoadCombinedMemory( + size_t read_size, const uint8_t *buf, size_t combined_params_len, + const uint8_t *combined_params_buf, bool optimize, bool quantification) { bool can_add_split = false; PaddleMobile__Framework__Proto__ProgramDesc *c_program; PADDLE_MOBILE_ENFORCE(buf != nullptr, "read from __model__ is null"); c_program = paddle_mobile__framework__proto__program_desc__unpack( - nullptr, read_size, buf); + nullptr, read_size, buf); // PADDLE_MOBILE_ENFORCE(c_program != nullptr, "program is null"); // DLOG << "n_ops: " << (*c_program->blocks)->n_ops; // - auto originProgramDesc = std::make_shared(c_program); + auto originProgramDesc = std::make_shared(c_program); - framework::Program program; + Program program; program.combined = true; program.originProgram = originProgramDesc; program.quantification = quantification; program.combined_params_len = combined_params_len; program.combined_params_buf = combined_params_buf; - auto scope = std::make_shared(); + auto scope = std::make_shared(); program.scope = scope; InitMemoryFromProgram(originProgramDesc, scope); FusionAndPrintInfos(optimize, can_add_split, program, originProgramDesc); @@ -190,9 +192,17 @@ const framework::Program Loader::LoadCombinedMemory( return program; } -template class Loader; -template class Loader; -template class Loader; -template class Loader; +template +class Loader; + +template +class Loader; +template +class Loader; + +template +class Loader; + +} } // namespace paddle_mobile diff --git a/src/io/loader.h b/src/framework/loader.h similarity index 86% rename from src/io/loader.h rename to src/framework/loader.h index 505366793da50413c52d8970cb47d062608d6484..fe4460f6c0045116b7ed7947f080024165917bfe 100644 --- a/src/io/loader.h +++ b/src/framework/loader.h @@ -20,6 +20,7 @@ limitations under the License. */ #include "framework/program/program.h" namespace paddle_mobile { +namespace framework{ template class Loader { @@ -28,7 +29,7 @@ class Loader { * @b load separate format fluid model * @b 加载分开形式的 fluid 模型 * */ - const framework::Program Load(const std::string &dirname, + const Program Load(const std::string &dirname, bool optimize = false, bool quantification = false, bool can_add_split = false); @@ -37,21 +38,22 @@ class Loader { * @b load combine format fluid mode * @b 加载结合在一起格式的模型 * */ - const framework::Program Load(const std::string &model_path, + const Program Load(const std::string &model_path, const std::string ¶_path, bool optimize = false, bool quantification = false); - const framework::Program LoadCombinedMemory( + const Program LoadCombinedMemory( size_t model_len, const uint8_t *model_buf, size_t combined_params_len, const uint8_t *combined_params_buf, bool optimize = false, bool quantification = false); private: - const framework::Program LoadProgram(const std::string &model_path, + const Program LoadProgram(const std::string &model_path, bool optimize = false, bool quantification = false, bool can_add_split = false); }; +} } // namespace paddle_mobile diff --git a/src/io/paddle_mobile.cpp b/src/io/paddle_mobile.cpp index 0b84f1ff45e519dbbc244863db481f2364907a89..c98e7ac684c3eff9417db9b102f6c77fbd38932c 100644 --- a/src/io/paddle_mobile.cpp +++ b/src/io/paddle_mobile.cpp @@ -29,13 +29,13 @@ bool PaddleMobile::Load(const std::string &dirname, bool optimize, bool quantification, int batch_size, bool loddable) { if (loader_.get() == nullptr) { - loader_ = std::make_shared>(); + loader_ = std::make_shared>(); } else { LOG(kLOG_INFO) << "loader inited"; } if (executor_.get() == nullptr) { - executor_ = std::make_shared>( + executor_ = std::make_shared>( loader_->Load(dirname, optimize, quantification), batch_size, optimize, loddable); } else { @@ -51,13 +51,13 @@ bool PaddleMobile::Load(const std::string &model_path, bool quantification, int batch_size, bool loddable) { if (loader_.get() == nullptr) { - loader_ = std::make_shared>(); + loader_ = std::make_shared>(); } else { LOG(kLOG_INFO) << "loader inited"; } if (executor_.get() == nullptr) { - executor_ = std::make_shared>( + executor_ = std::make_shared>( loader_->Load(model_path, para_path, optimize, quantification), batch_size, optimize, loddable); } else { @@ -76,13 +76,13 @@ bool PaddleMobile::LoadCombinedMemory( bool quantification = false; if (loader_.get() == nullptr) { - loader_ = std::make_shared>(); + loader_ = std::make_shared>(); } else { LOG(kLOG_INFO) << "loader inited"; } if (executor_.get() == nullptr) { - executor_ = std::make_shared>( + executor_ = std::make_shared>( loader_->LoadCombinedMemory(model_len, model_buf, combined_params_len, combined_params_buf, optimise, quantification), diff --git a/src/io/paddle_mobile.h b/src/io/paddle_mobile.h index 73c5553d91c1b4781718265aba8b7fa8dd5e2777..cf753fa7a64c08ded2ec756731780769f35531d3 100644 --- a/src/io/paddle_mobile.h +++ b/src/io/paddle_mobile.h @@ -23,8 +23,8 @@ limitations under the License. */ #include "common/types.h" #include "framework/tensor.h" -#include "io/executor.h" -#include "io/loader.h" +#include "framework/executor.h" +#include "framework/loader.h" namespace paddle_mobile { @@ -90,8 +90,8 @@ class PaddleMobile { ~PaddleMobile(); private: - std::shared_ptr> loader_; - std::shared_ptr> executor_; + std::shared_ptr> loader_; + std::shared_ptr> executor_; #ifdef PADDLE_MOBILE_FPGA public: diff --git a/test/executor_for_test.h b/test/executor_for_test.h index 93847af20a6d48a6df33dc50f6c6a1db76facf51..fbef578dfca461681c4dd07688eb650a0b91cb8f 100644 --- a/test/executor_for_test.h +++ b/test/executor_for_test.h @@ -19,7 +19,7 @@ limitations under the License. */ #include "common/log.h" #include "framework/op_registry.h" -#include "io/executor.h" +#include "framework/executor.h" #include "operators/conv_op.h" #include "operators/elementwise_add_op.h" #include "operators/pool_op.h" @@ -29,7 +29,7 @@ limitations under the License. */ #include "operators/softmax_op.h" #include "operators/transpose_op.h" -using paddle_mobile::Executor; +using paddle_mobile::framework::Executor; using paddle_mobile::framework::BlockDesc; using paddle_mobile::framework::DDim; using paddle_mobile::framework::LoDTensor; diff --git a/test/fpga/test_concat_op.cpp b/test/fpga/test_concat_op.cpp index 5d1a5828b36b3d9ed371a271af6db82657ff1596..44b9f4971bbd5cc69e1f663ae71e27e69c31a04b 100644 --- a/test/fpga/test_concat_op.cpp +++ b/test/fpga/test_concat_op.cpp @@ -16,7 +16,7 @@ limitations under the License. */ #include "operators/concat_op.h" int main() { - paddle_mobile::Loader loader; + paddle_mobile::framework::Loader loader; auto program = loader.Load(g_googlenet); PADDLE_MOBILE_ENFORCE(program.originProgram != nullptr, "program file read fail"); diff --git a/test/framework/test_load.cpp b/test/framework/test_load.cpp index 25cad4feaa706899122902dee2a8f0c915e78975..202d6608d50bdc9691e3739b2e721d427847e723 100644 --- a/test/framework/test_load.cpp +++ b/test/framework/test_load.cpp @@ -15,10 +15,10 @@ limitations under the License. */ #include #include "../test_helper.h" -#include "io/loader.h" +#include "framework/loader.h" int main() { - paddle_mobile::Loader loader; + paddle_mobile::framework::Loader loader; // ../../../test/models/googlenet // ../../../test/models/mobilenet // auto program = loader.Load(g_googlenet, true); diff --git a/test/framework/test_optimize.cpp b/test/framework/test_optimize.cpp index 3cae963eca048da221d69c4c336dd4fdfecbb584..2f187eb583bed5bd2bcf1787beb03e521dfcf1b4 100644 --- a/test/framework/test_optimize.cpp +++ b/test/framework/test_optimize.cpp @@ -15,10 +15,10 @@ limitations under the License. */ #include "../test_helper.h" #include "framework/program/program-optimize/node.h" #include "framework/program/program-optimize/program_optimize.h" -#include "io/loader.h" +#include "framework/loader.h" int main() { - paddle_mobile::Loader loader; + paddle_mobile::framework::Loader loader; // "../../../test/models/googlenet" auto program = loader.Load(g_mobilenet_ssd, true); paddle_mobile::framework::ProgramOptimize optimize; diff --git a/test/operators/test_batchnorm_op.cpp b/test/operators/test_batchnorm_op.cpp index 4ccad8c1512036c2400a09575b3775e75b26acce..e66c2977bda20d5b40c0d9ae6d3dccab9d35d599 100644 --- a/test/operators/test_batchnorm_op.cpp +++ b/test/operators/test_batchnorm_op.cpp @@ -127,7 +127,7 @@ template class TestBatchNormOp; int main() { DLOG << "----------**********----------"; DLOG << "begin to run BatchNormOp Test"; - paddle_mobile::Loader loader; + paddle_mobile::framework::Loader loader; auto program = loader.Load(std::string(g_mobilenet_ssd)); /// input x (4,10,2,2) diff --git a/test/operators/test_box_coder_op.cpp b/test/operators/test_box_coder_op.cpp index dac0d0b8051ec1790d6982a13ea31ef3f4a64242..8e598221b3889391531d30d9c3bc2a6c4392eb09 100644 --- a/test/operators/test_box_coder_op.cpp +++ b/test/operators/test_box_coder_op.cpp @@ -115,7 +115,7 @@ template class TestBoxCoderOp; int main() { DLOG << "----------**********----------"; DLOG << "begin to run BoxCoderOp Test"; - paddle_mobile::Loader loader; + paddle_mobile::framework::Loader loader; auto program = loader.Load(std::string(g_mobilenet_ssd)); paddle_mobile::framework::Tensor priorbox; diff --git a/test/operators/test_concat_op.cpp b/test/operators/test_concat_op.cpp index edaa4ce1ddba251886c90262895333b0a56c3a07..1a347a9c37a96f3c31506d0b45f95e05b64292ff 100644 --- a/test/operators/test_concat_op.cpp +++ b/test/operators/test_concat_op.cpp @@ -16,7 +16,7 @@ limitations under the License. */ #include "operators/concat_op.h" int main() { - paddle_mobile::Loader loader; + paddle_mobile::framework::Loader loader; auto program = loader.Load(g_googlenet); PADDLE_MOBILE_ENFORCE(program.originProgram != nullptr, "program file read fail"); diff --git a/test/operators/test_conv_add_relu_op.cpp b/test/operators/test_conv_add_relu_op.cpp index 987f52cd62f91b3bc00cc1ef49bd21913e288d75..f170719218b98d341985a61ca6160884afe4ad3b 100644 --- a/test/operators/test_conv_add_relu_op.cpp +++ b/test/operators/test_conv_add_relu_op.cpp @@ -16,7 +16,7 @@ limitations under the License. */ #include "operators/fusion_conv_add_relu_op.h" int main() { - paddle_mobile::Loader loader; + paddle_mobile::framework::Loader loader; // ../models/image_classification_resnet.inference.model auto program = loader.Load(g_googlenet, true); diff --git a/test/operators/test_cov_op.cpp b/test/operators/test_cov_op.cpp index a85ad9edba5d3e2256b8d7ee7d7d3c5b7200888d..535d82c4be6cedcc77e9e9cf97a9a813f4ca518d 100644 --- a/test/operators/test_cov_op.cpp +++ b/test/operators/test_cov_op.cpp @@ -16,7 +16,7 @@ limitations under the License. */ #include "operators/conv_op.h" int main() { - paddle_mobile::Loader loader; + paddle_mobile::framework::Loader loader; // ../models/image_classification_resnet.inference.model auto program = loader.Load(g_googlenet); diff --git a/test/operators/test_depthwise_conv_op.cpp b/test/operators/test_depthwise_conv_op.cpp index bd2aad19eda896bad3da8a47f5b70b1a923dc1a7..77c76eedc5690412dfee95dd11e8a3fe9ed6ecbe 100644 --- a/test/operators/test_depthwise_conv_op.cpp +++ b/test/operators/test_depthwise_conv_op.cpp @@ -16,7 +16,7 @@ limitations under the License. */ #include "operators/depthwise_conv_op.h" int main() { - paddle_mobile::Loader loader; + paddle_mobile::framework::Loader loader; // ../models/image_classification_resnet.inference.model auto program = loader.Load(g_mobilenet_ssd); diff --git a/test/operators/test_elementwise_add_op.cpp b/test/operators/test_elementwise_add_op.cpp index 0a5e9f7e92701e748df51078b21eb46eec90599d..3922b216cfc6ecf55be251ded02c0c064e2c3ffc 100644 --- a/test/operators/test_elementwise_add_op.cpp +++ b/test/operators/test_elementwise_add_op.cpp @@ -15,7 +15,7 @@ limitations under the License. */ #include "../test_include.h" int main() { - paddle_mobile::Loader loader; + paddle_mobile::framework::Loader loader; auto program = loader.Load(g_resnet); PADDLE_MOBILE_ENFORCE(program.originProgram != nullptr, "program file read fail"); diff --git a/test/operators/test_fusion_conv_add_bn_relu_op.cpp b/test/operators/test_fusion_conv_add_bn_relu_op.cpp index 7764d95ed72da613459233bd55ddcffdc444318f..347bcb40a6156a576842af34920bde838dd83cd8 100644 --- a/test/operators/test_fusion_conv_add_bn_relu_op.cpp +++ b/test/operators/test_fusion_conv_add_bn_relu_op.cpp @@ -17,7 +17,7 @@ limitations under the License. */ #include "operators/fusion_conv_add_bn_relu_op.h" int main() { - paddle_mobile::Loader loader; + paddle_mobile::framework::Loader loader; // ../models/image_classification_resnet.inference.model auto program = loader.Load(g_mobilenet, true); diff --git a/test/operators/test_fusion_fc_op.cpp b/test/operators/test_fusion_fc_op.cpp index a23bde45cb74f0f75e655821b15e66b1cef4c081..4fc290660e062745512fc5c2ecc29c9b7bc5d545 100644 --- a/test/operators/test_fusion_fc_op.cpp +++ b/test/operators/test_fusion_fc_op.cpp @@ -114,7 +114,7 @@ template class TestFcOp; int main() { DLOG << "----------**********----------"; DLOG << "begin to run Fc Test"; - paddle_mobile::Loader loader; + paddle_mobile::framework::Loader loader; // "../../../test/models/googlenet" auto program = loader.Load(g_googlenet); paddle_mobile::framework::ProgramOptimize optimize; diff --git a/test/operators/test_gru_op.cpp b/test/operators/test_gru_op.cpp index 52ab8b54d709391ea263b74a395a635ce50a18af..f2ce833661bfd1b3d751a7ac2d54cfb70114a6c6 100644 --- a/test/operators/test_gru_op.cpp +++ b/test/operators/test_gru_op.cpp @@ -16,7 +16,7 @@ limitations under the License. */ #include "operators/gru_op.h" int main() { - paddle_mobile::Loader loader; + paddle_mobile::framework::Loader loader; auto program = loader.Load(g_nlp); PADDLE_MOBILE_ENFORCE(program.originProgram != nullptr, "program file read fail"); diff --git a/test/operators/test_im2sequence_op.cpp b/test/operators/test_im2sequence_op.cpp index a7512d3bf3cffcb100fe292e50fc7b7b23fa0aa0..4296da2348e7ba165f66a369c8b05e187498b762 100644 --- a/test/operators/test_im2sequence_op.cpp +++ b/test/operators/test_im2sequence_op.cpp @@ -17,7 +17,7 @@ limitations under the License. */ #include "operators/im2sequence_op.h" int main() { - paddle_mobile::Loader loader; + paddle_mobile::framework::Loader loader; auto program = loader.Load(g_ocr_recg); PADDLE_MOBILE_ENFORCE(program.originProgram != nullptr, "program file read fail"); diff --git a/test/operators/test_lrn_op.cpp b/test/operators/test_lrn_op.cpp index d4d9f8da802fc0f5f885a3b2e81cba695776c29e..5d1ac9b4dd7225112ace8bfbb13f926502c77b94 100644 --- a/test/operators/test_lrn_op.cpp +++ b/test/operators/test_lrn_op.cpp @@ -16,7 +16,7 @@ limitations under the License. */ #include "operators/lrn_op.h" int main() { - paddle_mobile::Loader loader; + paddle_mobile::framework::Loader loader; auto program = loader.Load(g_googlenet); PADDLE_MOBILE_ENFORCE(program.originProgram != nullptr, "program file read fail"); diff --git a/test/operators/test_mul_op.cpp b/test/operators/test_mul_op.cpp index 8ebf0926890497c0ed622b69f163a9f6f5c8612b..e3f67b3a9979cfefe054ae2398f4d6a6ed0f9f9e 100644 --- a/test/operators/test_mul_op.cpp +++ b/test/operators/test_mul_op.cpp @@ -16,7 +16,7 @@ limitations under the License. */ #include "operators/mul_op.h" int main() { - paddle_mobile::Loader loader; + paddle_mobile::framework::Loader loader; auto program = loader.Load(g_resnet); PADDLE_MOBILE_ENFORCE(program.originProgram != nullptr, "program file read fail"); diff --git a/test/operators/test_multiclass_nms_op.cpp b/test/operators/test_multiclass_nms_op.cpp index e6c41bd4b3bb241964a23accf4633e65818465be..b19808851a8150e17dfa92f6492dbb5c37df2265 100644 --- a/test/operators/test_multiclass_nms_op.cpp +++ b/test/operators/test_multiclass_nms_op.cpp @@ -126,7 +126,7 @@ template class TestMultiClassNMSOp; int main() { DLOG << "----------**********----------"; DLOG << "begin to run MulticlassNMS Test"; - paddle_mobile::Loader loader; + paddle_mobile::framework::Loader loader; auto program = loader.Load(std::string("../../test/models/mobilenet+ssd")); /// input x (1,3,300,300) diff --git a/test/operators/test_pool_op.cpp b/test/operators/test_pool_op.cpp index 2daecd7b4c1a50c612bc784c801208d2e6f31482..09470caf82eb90df56f7aa79b6873c2a6b94fbef 100644 --- a/test/operators/test_pool_op.cpp +++ b/test/operators/test_pool_op.cpp @@ -16,7 +16,7 @@ limitations under the License. */ #include "operators/pool_op.h" int main() { - paddle_mobile::Loader loader; + paddle_mobile::framework::Loader loader; auto program = loader.Load(std::string(g_googlenet)); if (program.originProgram == nullptr) { DLOG << "program read file"; diff --git a/test/operators/test_prelu_op.cpp b/test/operators/test_prelu_op.cpp index e93d8732d18496721b24cfba1df296250169f8b2..f98c9904ae3799cb863142b0fcb332c74c91ba98 100644 --- a/test/operators/test_prelu_op.cpp +++ b/test/operators/test_prelu_op.cpp @@ -17,7 +17,7 @@ limitations under the License. */ #include "operators/prelu_op.h" int main() { - paddle_mobile::Loader loader; + paddle_mobile::framework::Loader loader; auto program = loader.Load(g_resnet); PADDLE_MOBILE_ENFORCE(program.originProgram != nullptr, "program file read fail"); diff --git a/test/operators/test_prior_box_op.cpp b/test/operators/test_prior_box_op.cpp index 8c697a9a7982f05b71caa5bb5f4d12e50dc9d418..8b9b25a0dab704c6118d8f8e8cd7d0ecff49443f 100644 --- a/test/operators/test_prior_box_op.cpp +++ b/test/operators/test_prior_box_op.cpp @@ -126,7 +126,7 @@ template class TestPriorBoxOp; int main() { DLOG << "----------**********----------"; DLOG << "begin to run PriorBoxOp Test"; - paddle_mobile::Loader loader; + paddle_mobile::framework::Loader loader; auto program = loader.Load(std::string(g_mobilenet_ssd)); /// input x (1,3,300,300) diff --git a/test/operators/test_relu_op.cpp b/test/operators/test_relu_op.cpp index fad0d0c30a126cc2730e4aa8b87364eee9fc8209..542d3d18f6a383c1e03962ba845b39c04a51631b 100644 --- a/test/operators/test_relu_op.cpp +++ b/test/operators/test_relu_op.cpp @@ -16,7 +16,7 @@ limitations under the License. */ #include "operators/relu_op.h" int main() { - paddle_mobile::Loader loader; + paddle_mobile::framework::Loader loader; auto program = loader.Load(g_resnet); PADDLE_MOBILE_ENFORCE(program.originProgram != nullptr, "program file read fail"); diff --git a/test/operators/test_reshape_op.cpp b/test/operators/test_reshape_op.cpp index 3541151d8a1a286527e715f402df381d2efc094c..ff3299f5e818d8169a356323213707417d747dba 100644 --- a/test/operators/test_reshape_op.cpp +++ b/test/operators/test_reshape_op.cpp @@ -16,7 +16,7 @@ limitations under the License. */ #include "operators/reshape_op.h" int main() { - paddle_mobile::Loader loader; + paddle_mobile::framework::Loader loader; auto program = loader.Load(std::string(g_mobilenet_ssd)); if (program.originProgram == nullptr) { DLOG << "program read file"; diff --git a/test/operators/test_resize_op.cpp b/test/operators/test_resize_op.cpp index f4dcaa6885d92a727e8c97d5106c3b6913a4ab33..c452ef8d850f97f6988688c4e47d5041220cb828 100644 --- a/test/operators/test_resize_op.cpp +++ b/test/operators/test_resize_op.cpp @@ -16,7 +16,7 @@ limitations under the License. */ #include "operators/resize_op.h" int main() { - paddle_mobile::Loader loader; + paddle_mobile::framework::Loader loader; auto program = loader.Load(std::string(g_mobilenet_ssd)); if (program.originProgram == nullptr) { DLOG << "program read file"; diff --git a/test/operators/test_sigmoid_op.cpp b/test/operators/test_sigmoid_op.cpp index 37d05a44b5b66f2428eedd8e8719cd127747ee08..4f466845b97b124b971b76e776f177b3c06ef937 100644 --- a/test/operators/test_sigmoid_op.cpp +++ b/test/operators/test_sigmoid_op.cpp @@ -15,7 +15,7 @@ limitations under the License. */ #include "../../src/operators/kernel/sigmoid_kernel.h" #include "../../src/operators/kernel/central-arm-func/sigmoid_arm_func.h" #include "../test_helper.h" -#include "io/executor.h" +#include "framework/executor.h" int main() { paddle_mobile::framework::Tensor input; diff --git a/test/operators/test_softmax_op.cpp b/test/operators/test_softmax_op.cpp index a0184729a8bc5e6b0ba952923eecd5242cfe36d4..f31bcb4e455a6b9699cf96271310681e51d4c6a7 100644 --- a/test/operators/test_softmax_op.cpp +++ b/test/operators/test_softmax_op.cpp @@ -17,7 +17,7 @@ limitations under the License. */ #include "operators/softmax_op.h" int main() { - paddle_mobile::Loader loader; + paddle_mobile::framework::Loader loader; auto program = loader.Load(std::string(g_mobilenet)); if (program.originProgram == nullptr) { DLOG << "program read file"; diff --git a/test/operators/test_transpose_op.cpp b/test/operators/test_transpose_op.cpp index f83ee23c25d8f2588e0fe40d5fabc6114129b995..263fdcfa0ed448b126f4b9cb01ace889318eeddb 100644 --- a/test/operators/test_transpose_op.cpp +++ b/test/operators/test_transpose_op.cpp @@ -16,7 +16,7 @@ limitations under the License. */ #include "../test_include.h" #include "operators/transpose_op.h" int main() { - paddle_mobile::Loader loader; + paddle_mobile::framework::Loader loader; auto program = loader.Load(std::string(g_mobilenet_ssd)); if (program.originProgram == nullptr) { DLOG << "program read file";