提交 c9bcd3c5 编写于 作者: R Ray Liu 提交者: GitHub

Merge pull request #1034 from codeWorm2015/opencl

move loader and executor to framework
......@@ -12,7 +12,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include "io/executor.h"
#include "executor.h"
#include <operators/math/gemm.h>
#include <algorithm>
#include <vector>
......@@ -38,6 +38,8 @@ limitations under the License. */
#endif
namespace paddle_mobile {
namespace framework {
using framework::Variable;
char *Get_binary_data(std::string filename) {
......@@ -57,7 +59,8 @@ char *Get_binary_data(std::string filename) {
}
#pragma mark - executor
template <typename Dtype, Precision P>
template<typename Dtype, Precision P>
Executor<Dtype, P>::Executor(const framework::Program<Dtype> p, int batch_size,
bool use_optimize, bool loddable)
: program_(p),
......@@ -115,7 +118,7 @@ Executor<Dtype, P>::Executor(const framework::Program<Dtype> p, int batch_size,
}
}
template <typename Dtype, Precision P>
template<typename Dtype, Precision P>
void Executor<Dtype, P>::LoadMemory(const framework::VarDesc var_desc,
framework::LoDTensor *tensor, char **data) {
// 1. version
......@@ -223,7 +226,7 @@ void Executor<Dtype, P>::LoadMemory(const framework::VarDesc var_desc,
}
}
template <typename Dtype, Precision P>
template<typename Dtype, Precision P>
void Executor<Dtype, P>::InitMemory() {
for (const auto &block : to_predict_program_->Blocks()) {
for (const auto &var_desc : block->Vars()) {
......@@ -257,12 +260,12 @@ void Executor<Dtype, P>::InitMemory() {
}
}
template <typename Dtype, Precision P>
template<typename Dtype, Precision P>
void Executor<Dtype, P>::InitCombineMemory() {
char *origin_data;
if (program_.combined_params_buf && program_.combined_params_len) {
LOG(kLOG_INFO) << "use outter memory";
origin_data = (char *)program_.combined_params_buf;
origin_data = (char *) program_.combined_params_buf;
} else {
LOG(kLOG_INFO) << " begin init combine memory";
origin_data = Get_binary_data(program_.para_path);
......@@ -297,7 +300,7 @@ void Executor<Dtype, P>::InitCombineMemory() {
LOG(kLOG_INFO) << " end init combine memory ";
}
template <typename Dtype, Precision P>
template<typename Dtype, Precision P>
bool Executor<Dtype, P>::varInputMemory(
const std::shared_ptr<framework::VarDesc> &var_desc, Variable *var,
framework::LoDTensor *tensor) const {
......@@ -335,13 +338,15 @@ bool Executor<Dtype, P>::varInputMemory(
break;
}
default: { break; }
default: {
break;
}
}
return is_mute_match;
}
template <typename Dtype, Precision P>
template<typename Dtype, Precision P>
std::shared_ptr<framework::Tensor> Executor<Dtype, P>::Predict(
const framework::Tensor &t) {
framework::Variable *g_feed_value = program_.scope->Var("feed");
......@@ -483,7 +488,7 @@ std::shared_ptr<framework::Tensor> Executor<Dtype, P>::Predict(
return std::make_shared<framework::Tensor>(framework::Tensor(*output_tensor));
}
template <typename Dtype, Precision P>
template<typename Dtype, Precision P>
std::shared_ptr<framework::LoDTensor> Executor<Dtype, P>::PredictLod(
const framework::LoDTensor &t) {
framework::Variable *g_feed_value = program_.scope->Var("feed");
......@@ -633,13 +638,13 @@ std::shared_ptr<framework::LoDTensor> Executor<Dtype, P>::PredictLod(
framework::LoDTensor(*output_tensor));
}
template <typename Dtype, Precision P>
template<typename Dtype, Precision P>
std::shared_ptr<framework::Tensor> Executor<Dtype, P>::Predict(
const framework::Tensor &t, int block_id) {
return Predict(t);
}
template <typename Dtype, Precision P>
template<typename Dtype, Precision P>
std::vector<typename Executor<Dtype, P>::Ptype> Executor<Dtype, P>::Predict(
const std::vector<Ptype> &input, const std::vector<int64_t> &dims) {
framework::Tensor tensor(input, framework::make_ddim(dims));
......@@ -725,9 +730,17 @@ void Executor<Dtype, P>::Predict_To(int end) {
};
#endif
template class Executor<CPU, Precision::FP32>;
template class Executor<FPGA, Precision::FP32>;
template class Executor<GPU_CL, Precision::FP32>;
template class Executor<GPU_MALI, Precision::FP32>;
template
class Executor<CPU, Precision::FP32>;
template
class Executor<FPGA, Precision::FP32>;
template
class Executor<GPU_CL, Precision::FP32>;
template
class Executor<GPU_MALI, Precision::FP32>;
}
} // namespace paddle_mobile
......@@ -33,8 +33,9 @@ limitations under the License. */
using std::string;
namespace paddle_mobile {
namespace framework {
template <typename Dtype = CPU, Precision P = Precision::FP32>
template<typename Dtype = CPU, Precision P = Precision::FP32>
class Executor {
public:
typedef typename PrecisionTrait<P>::ptype Ptype;
......@@ -50,11 +51,13 @@ class Executor {
* @b to predict
* */
std::shared_ptr<framework::Tensor> Predict(const framework::Tensor &t);
/*
* @b to predict
* */
std::shared_ptr<framework::LoDTensor> PredictLod(
const framework::LoDTensor &t);
/*
* @b to predict with vector and dim
*
......@@ -65,15 +68,21 @@ class Executor {
protected:
Executor() = default;
void InitMemory();
void LoadMemory(const framework::VarDesc var_desc,
framework::LoDTensor *tensor, char **data);
void InitCombineMemory();
framework::Program<Dtype> program_;
int batch_size_ = 1;
std::shared_ptr<framework::ProgramDesc> to_predict_program_;
std::shared_ptr<framework::Tensor> Predict(const framework::Tensor &t,
int block_id);
std::map<framework::BlockDesc,
std::vector<std::shared_ptr<framework::OperatorBase<Dtype>>>>
ops_of_block_;
......@@ -106,4 +115,5 @@ class Executor {
#endif
};
}
} // namespace paddle_mobile
......@@ -12,13 +12,13 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include "io/loader.h"
#include "loader.h"
#include "framework/lod_tensor.h"
#include "framework/program/program-optimize/program_optimize.h"
namespace paddle_mobile {
using framework::Variable;
namespace framework {
/**
* muteandresize tensor as originProgramDesc and scope in loadParams
......@@ -27,22 +27,22 @@ using framework::Variable;
* @param scope
*/
void InitMemoryFromProgram(
std::shared_ptr<framework::ProgramDesc> &originProgramDesc,
std::shared_ptr<framework::Scope> &scope) {
std::shared_ptr<ProgramDesc> &originProgramDesc,
std::shared_ptr<Scope> &scope) {
for (const auto &block : originProgramDesc.get()->Blocks()) {
for (const auto &var_desc : block->Vars()) {
auto var = scope.get()->Var(var_desc->Name());
if (var_desc->Type() == framework::VARTYPE_TYPE_LOD_TENSOR) {
if (var_desc->Type() == VARTYPE_TYPE_LOD_TENSOR) {
if (var_desc->Persistable()) {
auto dim = var_desc->Tensor_desc().Dims();
auto tensor = var->GetMutable<framework::LoDTensor>();
tensor->Resize(framework::make_ddim(dim));
auto tensor = var->GetMutable<LoDTensor>();
tensor->Resize(make_ddim(dim));
} else {
auto dim = var_desc->Tensor_desc().Dims();
PADDLE_MOBILE_ENFORCE(dim.size() > 0, "dim size is 0");
dim[0] = 1;
auto tensor = var->GetMutable<framework::LoDTensor>();
tensor->Resize(framework::make_ddim(dim));
auto tensor = var->GetMutable<LoDTensor>();
tensor->Resize(make_ddim(dim));
}
} else {
// TODO(codeWorm): some.
......@@ -50,6 +50,7 @@ void InitMemoryFromProgram(
}
}
}
/**
* fusion and print someinfos
* @tparam Dtype
......@@ -59,12 +60,12 @@ void InitMemoryFromProgram(
* @param program
* @param originProgramDesc
*/
template <typename Dtype, Precision P>
template<typename Dtype, Precision P>
void FusionAndPrintInfos(
bool &optimize, bool &can_add_split, framework::Program<Dtype, P> &program,
const std::shared_ptr<framework::ProgramDesc> &originProgramDesc) {
bool &optimize, bool &can_add_split, Program<Dtype, P> &program,
const std::shared_ptr<ProgramDesc> &originProgramDesc) {
if (optimize) {
framework::ProgramOptimize program_optimize;
ProgramOptimize program_optimize;
program.optimizeProgram =
program_optimize.FusionOptimize(originProgramDesc, can_add_split);
}
......@@ -74,6 +75,7 @@ void FusionAndPrintInfos(
originProgramDesc->Description("program: ");
}
}
static size_t ReadBuffer(const char *file_name, uint8_t **out) {
FILE *fp;
fp = fopen(file_name, "rb");
......@@ -96,8 +98,8 @@ static size_t ReadBuffer(const char *file_name, uint8_t **out) {
return cur_len;
}
template <typename Dtype, Precision P>
const framework::Program<Dtype, P> Loader<Dtype, P>::Load(
template<typename Dtype, Precision P>
const Program<Dtype, P> Loader<Dtype, P>::Load(
const std::string &dirname, bool optimize, bool quantification,
bool can_add_split) {
auto program = this->LoadProgram(dirname + "/__model__", optimize,
......@@ -106,8 +108,8 @@ const framework::Program<Dtype, P> Loader<Dtype, P>::Load(
return program;
}
template <typename Dtype, Precision P>
const framework::Program<Dtype, P> Loader<Dtype, P>::Load(
template<typename Dtype, Precision P>
const Program<Dtype, P> Loader<Dtype, P>::Load(
const std::string &model_path, const std::string &para_path, bool optimize,
bool quantification) {
auto program = this->LoadProgram(model_path, optimize, quantification);
......@@ -118,8 +120,8 @@ const framework::Program<Dtype, P> Loader<Dtype, P>::Load(
return program;
}
template <typename Dtype, Precision P>
const framework::Program<Dtype, P> Loader<Dtype, P>::LoadProgram(
template<typename Dtype, Precision P>
const Program<Dtype, P> Loader<Dtype, P>::LoadProgram(
const std::string &model_path, bool optimize, bool quantification,
bool can_add_split) {
std::string model_filename = model_path;
......@@ -136,14 +138,14 @@ const framework::Program<Dtype, P> Loader<Dtype, P>::LoadProgram(
//
DLOG << "n_ops: " << (*c_program->blocks)->n_ops;
//
auto originProgramDesc = std::make_shared<framework::ProgramDesc>(c_program);
auto originProgramDesc = std::make_shared<ProgramDesc>(c_program);
framework::Program<Dtype, P> program;
Program<Dtype, P> program;
program.originProgram = originProgramDesc;
program.quantification = quantification;
program.combined_params_len = 0;
program.combined_params_buf = nullptr;
auto scope = std::make_shared<framework::Scope>();
auto scope = std::make_shared<Scope>();
program.scope = scope;
// use originProgramDesc and scope to init tensors
......@@ -155,8 +157,8 @@ const framework::Program<Dtype, P> Loader<Dtype, P>::LoadProgram(
return program;
}
template <typename Dtype, Precision P>
const framework::Program<Dtype, P> Loader<Dtype, P>::LoadCombinedMemory(
template<typename Dtype, Precision P>
const Program<Dtype, P> Loader<Dtype, P>::LoadCombinedMemory(
size_t read_size, const uint8_t *buf, size_t combined_params_len,
const uint8_t *combined_params_buf, bool optimize, bool quantification) {
bool can_add_split = false;
......@@ -172,16 +174,16 @@ const framework::Program<Dtype, P> Loader<Dtype, P>::LoadCombinedMemory(
DLOG << "n_ops: " << (*c_program->blocks)->n_ops;
//
auto originProgramDesc = std::make_shared<framework::ProgramDesc>(c_program);
auto originProgramDesc = std::make_shared<ProgramDesc>(c_program);
framework::Program<Dtype, P> program;
Program<Dtype, P> program;
program.combined = true;
program.originProgram = originProgramDesc;
program.quantification = quantification;
program.combined_params_len = combined_params_len;
program.combined_params_buf = combined_params_buf;
auto scope = std::make_shared<framework::Scope>();
auto scope = std::make_shared<Scope>();
program.scope = scope;
InitMemoryFromProgram(originProgramDesc, scope);
FusionAndPrintInfos(optimize, can_add_split, program, originProgramDesc);
......@@ -190,9 +192,17 @@ const framework::Program<Dtype, P> Loader<Dtype, P>::LoadCombinedMemory(
return program;
}
template class Loader<CPU, Precision::FP32>;
template class Loader<FPGA, Precision::FP32>;
template class Loader<GPU_MALI, Precision::FP32>;
template class Loader<GPU_CL, Precision::FP32>;
template
class Loader<CPU, Precision::FP32>;
template
class Loader<FPGA, Precision::FP32>;
template
class Loader<GPU_MALI, Precision::FP32>;
template
class Loader<GPU_CL, Precision::FP32>;
}
} // namespace paddle_mobile
......@@ -20,6 +20,7 @@ limitations under the License. */
#include "framework/program/program.h"
namespace paddle_mobile {
namespace framework{
template <typename Dtype = CPU, Precision P = Precision::FP32>
class Loader {
......@@ -28,7 +29,7 @@ class Loader {
* @b load separate format fluid model
* @b 加载分开形式的 fluid 模型
* */
const framework::Program<Dtype, P> Load(const std::string &dirname,
const Program<Dtype, P> Load(const std::string &dirname,
bool optimize = false,
bool quantification = false,
bool can_add_split = false);
......@@ -37,21 +38,22 @@ class Loader {
* @b load combine format fluid mode
* @b 加载结合在一起格式的模型
* */
const framework::Program<Dtype, P> Load(const std::string &model_path,
const Program<Dtype, P> Load(const std::string &model_path,
const std::string &para_path,
bool optimize = false,
bool quantification = false);
const framework::Program<Dtype, P> LoadCombinedMemory(
const Program<Dtype, P> LoadCombinedMemory(
size_t model_len, const uint8_t *model_buf, size_t combined_params_len,
const uint8_t *combined_params_buf, bool optimize = false,
bool quantification = false);
private:
const framework::Program<Dtype, P> LoadProgram(const std::string &model_path,
const Program<Dtype, P> LoadProgram(const std::string &model_path,
bool optimize = false,
bool quantification = false,
bool can_add_split = false);
};
}
} // namespace paddle_mobile
......@@ -29,13 +29,13 @@ bool PaddleMobile<Dtype, P>::Load(const std::string &dirname, bool optimize,
bool quantification, int batch_size,
bool loddable) {
if (loader_.get() == nullptr) {
loader_ = std::make_shared<Loader<Dtype, P>>();
loader_ = std::make_shared<framework::Loader<Dtype, P>>();
} else {
LOG(kLOG_INFO) << "loader inited";
}
if (executor_.get() == nullptr) {
executor_ = std::make_shared<Executor<Dtype, P>>(
executor_ = std::make_shared<framework::Executor<Dtype, P>>(
loader_->Load(dirname, optimize, quantification), batch_size, optimize,
loddable);
} else {
......@@ -51,13 +51,13 @@ bool PaddleMobile<Dtype, P>::Load(const std::string &model_path,
bool quantification, int batch_size,
bool loddable) {
if (loader_.get() == nullptr) {
loader_ = std::make_shared<Loader<Dtype, P>>();
loader_ = std::make_shared<framework::Loader<Dtype, P>>();
} else {
LOG(kLOG_INFO) << "loader inited";
}
if (executor_.get() == nullptr) {
executor_ = std::make_shared<Executor<Dtype, P>>(
executor_ = std::make_shared<framework::Executor<Dtype, P>>(
loader_->Load(model_path, para_path, optimize, quantification),
batch_size, optimize, loddable);
} else {
......@@ -76,13 +76,13 @@ bool PaddleMobile<Dtype, P>::LoadCombinedMemory(
bool quantification = false;
if (loader_.get() == nullptr) {
loader_ = std::make_shared<Loader<Dtype, P>>();
loader_ = std::make_shared<framework::Loader<Dtype, P>>();
} else {
LOG(kLOG_INFO) << "loader inited";
}
if (executor_.get() == nullptr) {
executor_ = std::make_shared<Executor<Dtype, P>>(
executor_ = std::make_shared<framework::Executor<Dtype, P>>(
loader_->LoadCombinedMemory(model_len, model_buf, combined_params_len,
combined_params_buf, optimise,
quantification),
......
......@@ -23,8 +23,8 @@ limitations under the License. */
#include "common/types.h"
#include "framework/tensor.h"
#include "io/executor.h"
#include "io/loader.h"
#include "framework/executor.h"
#include "framework/loader.h"
namespace paddle_mobile {
......@@ -90,8 +90,8 @@ class PaddleMobile {
~PaddleMobile();
private:
std::shared_ptr<Loader<Dtype, P>> loader_;
std::shared_ptr<Executor<Dtype, P>> executor_;
std::shared_ptr<framework::Loader<Dtype, P>> loader_;
std::shared_ptr<framework::Executor<Dtype, P>> executor_;
#ifdef PADDLE_MOBILE_FPGA
public:
......
......@@ -19,7 +19,7 @@ limitations under the License. */
#include "common/log.h"
#include "framework/op_registry.h"
#include "io/executor.h"
#include "framework/executor.h"
#include "operators/conv_op.h"
#include "operators/elementwise_add_op.h"
#include "operators/pool_op.h"
......@@ -29,7 +29,7 @@ limitations under the License. */
#include "operators/softmax_op.h"
#include "operators/transpose_op.h"
using paddle_mobile::Executor;
using paddle_mobile::framework::Executor;
using paddle_mobile::framework::BlockDesc;
using paddle_mobile::framework::DDim;
using paddle_mobile::framework::LoDTensor;
......
......@@ -16,7 +16,7 @@ limitations under the License. */
#include "operators/concat_op.h"
int main() {
paddle_mobile::Loader<paddle_mobile::FPGA> loader;
paddle_mobile::framework::Loader<paddle_mobile::FPGA> loader;
auto program = loader.Load(g_googlenet);
PADDLE_MOBILE_ENFORCE(program.originProgram != nullptr,
"program file read fail");
......
......@@ -15,10 +15,10 @@ limitations under the License. */
#include <string>
#include "../test_helper.h"
#include "io/loader.h"
#include "framework/loader.h"
int main() {
paddle_mobile::Loader<paddle_mobile::CPU> loader;
paddle_mobile::framework::Loader<paddle_mobile::CPU> loader;
// ../../../test/models/googlenet
// ../../../test/models/mobilenet
// auto program = loader.Load(g_googlenet, true);
......
......@@ -15,10 +15,10 @@ limitations under the License. */
#include "../test_helper.h"
#include "framework/program/program-optimize/node.h"
#include "framework/program/program-optimize/program_optimize.h"
#include "io/loader.h"
#include "framework/loader.h"
int main() {
paddle_mobile::Loader<paddle_mobile::CPU> loader;
paddle_mobile::framework::Loader<paddle_mobile::CPU> loader;
// "../../../test/models/googlenet"
auto program = loader.Load(g_mobilenet_ssd, true);
paddle_mobile::framework::ProgramOptimize optimize;
......
......@@ -127,7 +127,7 @@ template class TestBatchNormOp<CPU>;
int main() {
DLOG << "----------**********----------";
DLOG << "begin to run BatchNormOp Test";
paddle_mobile::Loader<paddle_mobile::CPU> loader;
paddle_mobile::framework::Loader<paddle_mobile::CPU> loader;
auto program = loader.Load(std::string(g_mobilenet_ssd));
/// input x (4,10,2,2)
......
......@@ -115,7 +115,7 @@ template class TestBoxCoderOp<CPU>;
int main() {
DLOG << "----------**********----------";
DLOG << "begin to run BoxCoderOp Test";
paddle_mobile::Loader<paddle_mobile::CPU> loader;
paddle_mobile::framework::Loader<paddle_mobile::CPU> loader;
auto program = loader.Load(std::string(g_mobilenet_ssd));
paddle_mobile::framework::Tensor priorbox;
......
......@@ -16,7 +16,7 @@ limitations under the License. */
#include "operators/concat_op.h"
int main() {
paddle_mobile::Loader<paddle_mobile::CPU> loader;
paddle_mobile::framework::Loader<paddle_mobile::CPU> loader;
auto program = loader.Load(g_googlenet);
PADDLE_MOBILE_ENFORCE(program.originProgram != nullptr,
"program file read fail");
......
......@@ -16,7 +16,7 @@ limitations under the License. */
#include "operators/fusion_conv_add_relu_op.h"
int main() {
paddle_mobile::Loader<paddle_mobile::CPU> loader;
paddle_mobile::framework::Loader<paddle_mobile::CPU> loader;
// ../models/image_classification_resnet.inference.model
auto program = loader.Load(g_googlenet, true);
......
......@@ -16,7 +16,7 @@ limitations under the License. */
#include "operators/conv_op.h"
int main() {
paddle_mobile::Loader<paddle_mobile::GPU_MALI> loader;
paddle_mobile::framework::Loader<paddle_mobile::GPU_MALI> loader;
// ../models/image_classification_resnet.inference.model
auto program = loader.Load(g_googlenet);
......
......@@ -16,7 +16,7 @@ limitations under the License. */
#include "operators/depthwise_conv_op.h"
int main() {
paddle_mobile::Loader<paddle_mobile::CPU> loader;
paddle_mobile::framework::Loader<paddle_mobile::CPU> loader;
// ../models/image_classification_resnet.inference.model
auto program = loader.Load(g_mobilenet_ssd);
......
......@@ -15,7 +15,7 @@ limitations under the License. */
#include "../test_include.h"
int main() {
paddle_mobile::Loader<paddle_mobile::CPU> loader;
paddle_mobile::framework::Loader<paddle_mobile::CPU> loader;
auto program = loader.Load(g_resnet);
PADDLE_MOBILE_ENFORCE(program.originProgram != nullptr,
"program file read fail");
......
......@@ -17,7 +17,7 @@ limitations under the License. */
#include "operators/fusion_conv_add_bn_relu_op.h"
int main() {
paddle_mobile::Loader<paddle_mobile::CPU> loader;
paddle_mobile::framework::Loader<paddle_mobile::CPU> loader;
// ../models/image_classification_resnet.inference.model
auto program = loader.Load(g_mobilenet, true);
......
......@@ -114,7 +114,7 @@ template class TestFcOp<CPU>;
int main() {
DLOG << "----------**********----------";
DLOG << "begin to run Fc Test";
paddle_mobile::Loader<paddle_mobile::CPU> loader;
paddle_mobile::framework::Loader<paddle_mobile::CPU> loader;
// "../../../test/models/googlenet"
auto program = loader.Load(g_googlenet);
paddle_mobile::framework::ProgramOptimize optimize;
......
......@@ -16,7 +16,7 @@ limitations under the License. */
#include "operators/gru_op.h"
int main() {
paddle_mobile::Loader<paddle_mobile::CPU> loader;
paddle_mobile::framework::Loader<paddle_mobile::CPU> loader;
auto program = loader.Load(g_nlp);
PADDLE_MOBILE_ENFORCE(program.originProgram != nullptr,
"program file read fail");
......
......@@ -17,7 +17,7 @@ limitations under the License. */
#include "operators/im2sequence_op.h"
int main() {
paddle_mobile::Loader<paddle_mobile::CPU> loader;
paddle_mobile::framework::Loader<paddle_mobile::CPU> loader;
auto program = loader.Load(g_ocr_recg);
PADDLE_MOBILE_ENFORCE(program.originProgram != nullptr,
"program file read fail");
......
......@@ -16,7 +16,7 @@ limitations under the License. */
#include "operators/lrn_op.h"
int main() {
paddle_mobile::Loader<paddle_mobile::CPU> loader;
paddle_mobile::framework::Loader<paddle_mobile::CPU> loader;
auto program = loader.Load(g_googlenet);
PADDLE_MOBILE_ENFORCE(program.originProgram != nullptr,
"program file read fail");
......
......@@ -16,7 +16,7 @@ limitations under the License. */
#include "operators/mul_op.h"
int main() {
paddle_mobile::Loader<paddle_mobile::CPU> loader;
paddle_mobile::framework::Loader<paddle_mobile::CPU> loader;
auto program = loader.Load(g_resnet);
PADDLE_MOBILE_ENFORCE(program.originProgram != nullptr,
"program file read fail");
......
......@@ -126,7 +126,7 @@ template class TestMultiClassNMSOp<CPU>;
int main() {
DLOG << "----------**********----------";
DLOG << "begin to run MulticlassNMS Test";
paddle_mobile::Loader<paddle_mobile::CPU> loader;
paddle_mobile::framework::Loader<paddle_mobile::CPU> loader;
auto program = loader.Load(std::string("../../test/models/mobilenet+ssd"));
/// input x (1,3,300,300)
......
......@@ -16,7 +16,7 @@ limitations under the License. */
#include "operators/pool_op.h"
int main() {
paddle_mobile::Loader<paddle_mobile::CPU> loader;
paddle_mobile::framework::Loader<paddle_mobile::CPU> loader;
auto program = loader.Load(std::string(g_googlenet));
if (program.originProgram == nullptr) {
DLOG << "program read file";
......
......@@ -17,7 +17,7 @@ limitations under the License. */
#include "operators/prelu_op.h"
int main() {
paddle_mobile::Loader<paddle_mobile::CPU> loader;
paddle_mobile::framework::Loader<paddle_mobile::CPU> loader;
auto program = loader.Load(g_resnet);
PADDLE_MOBILE_ENFORCE(program.originProgram != nullptr,
"program file read fail");
......
......@@ -126,7 +126,7 @@ template class TestPriorBoxOp<CPU>;
int main() {
DLOG << "----------**********----------";
DLOG << "begin to run PriorBoxOp Test";
paddle_mobile::Loader<paddle_mobile::CPU> loader;
paddle_mobile::framework::Loader<paddle_mobile::CPU> loader;
auto program = loader.Load(std::string(g_mobilenet_ssd));
/// input x (1,3,300,300)
......
......@@ -16,7 +16,7 @@ limitations under the License. */
#include "operators/relu_op.h"
int main() {
paddle_mobile::Loader<paddle_mobile::CPU> loader;
paddle_mobile::framework::Loader<paddle_mobile::CPU> loader;
auto program = loader.Load(g_resnet);
PADDLE_MOBILE_ENFORCE(program.originProgram != nullptr,
"program file read fail");
......
......@@ -16,7 +16,7 @@ limitations under the License. */
#include "operators/reshape_op.h"
int main() {
paddle_mobile::Loader<paddle_mobile::CPU> loader;
paddle_mobile::framework::Loader<paddle_mobile::CPU> loader;
auto program = loader.Load(std::string(g_mobilenet_ssd));
if (program.originProgram == nullptr) {
DLOG << "program read file";
......
......@@ -16,7 +16,7 @@ limitations under the License. */
#include "operators/resize_op.h"
int main() {
paddle_mobile::Loader<paddle_mobile::CPU> loader;
paddle_mobile::framework::Loader<paddle_mobile::CPU> loader;
auto program = loader.Load(std::string(g_mobilenet_ssd));
if (program.originProgram == nullptr) {
DLOG << "program read file";
......
......@@ -15,7 +15,7 @@ limitations under the License. */
#include "../../src/operators/kernel/sigmoid_kernel.h"
#include "../../src/operators/kernel/central-arm-func/sigmoid_arm_func.h"
#include "../test_helper.h"
#include "io/executor.h"
#include "framework/executor.h"
int main() {
paddle_mobile::framework::Tensor input;
......
......@@ -17,7 +17,7 @@ limitations under the License. */
#include "operators/softmax_op.h"
int main() {
paddle_mobile::Loader<paddle_mobile::CPU> loader;
paddle_mobile::framework::Loader<paddle_mobile::CPU> loader;
auto program = loader.Load(std::string(g_mobilenet));
if (program.originProgram == nullptr) {
DLOG << "program read file";
......
......@@ -16,7 +16,7 @@ limitations under the License. */
#include "../test_include.h"
#include "operators/transpose_op.h"
int main() {
paddle_mobile::Loader<paddle_mobile::CPU> loader;
paddle_mobile::framework::Loader<paddle_mobile::CPU> loader;
auto program = loader.Load(std::string(g_mobilenet_ssd));
if (program.originProgram == nullptr) {
DLOG << "program read file";
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册