diff --git a/paddle/fluid/lite/api/cxx_api_bin.cc b/paddle/fluid/lite/api/cxx_api_bin.cc index 0cc786c024f6d7447ec57bb4a539ddf8bcdb1c25..96cad7cbe07bb690464c92e2ac6a087412787580 100644 --- a/paddle/fluid/lite/api/cxx_api_bin.cc +++ b/paddle/fluid/lite/api/cxx_api_bin.cc @@ -13,17 +13,16 @@ // limitations under the License. #include "paddle/fluid/lite/api/cxx_api.h" - -#ifndef LITE_WITH_LIGHT_WEIGHT_FRAMEWORK #include "paddle/fluid/lite/core/mir/passes.h" -#endif - #include "paddle/fluid/lite/core/op_registry.h" namespace paddle { namespace lite { void Run(const char* model_dir) { +#ifdef LITE_WITH_ARM + DeviceInfo::Init(); +#endif lite::ExecutorLite predictor; std::vector valid_places({Place{TARGET(kHost), PRECISION(kFloat)}, Place{TARGET(kARM), PRECISION(kFloat)}}); @@ -32,9 +31,9 @@ void Run(const char* model_dir) { valid_places); auto* input_tensor = predictor.GetInput(0); - input_tensor->Resize(DDim(std::vector({3, 224, 224}))); + input_tensor->Resize(DDim(std::vector({1, 3, 224, 224}))); auto* data = input_tensor->mutable_data(); - for (int i = 0; i < 3 * 224 * 224; i++) { + for (int i = 0; i < input_tensor->dims().production(); i++) { data[i] = i; } @@ -65,8 +64,8 @@ USE_LITE_OP(feed); USE_LITE_OP(fetch); USE_LITE_OP(io_copy); -USE_LITE_OP(con2d); -// USE_LITE_OP(batch_norm); +USE_LITE_OP(conv2d); +USE_LITE_OP(batch_norm); USE_LITE_OP(relu); USE_LITE_OP(depthwise_conv2d); USE_LITE_OP(pool2d); @@ -81,10 +80,10 @@ USE_LITE_KERNEL(fc, kARM, kFloat, kNCHW, def); USE_LITE_KERNEL(mul, kARM, kFloat, kNCHW, def); USE_LITE_KERNEL(scale, kARM, kFloat, kNCHW, def); -USE_LITE_KERNEL(con2d, kARM, kFloat, kNCHW, def); +USE_LITE_KERNEL(conv2d, kARM, kFloat, kNCHW, def); USE_LITE_KERNEL(batch_norm, kARM, kFloat, kNCHW, def); USE_LITE_KERNEL(relu, kARM, kFloat, kNCHW, def); -USE_LITE_KERNEL(depthwise_con2d, kARM, kFloat, kNCHW, def); +USE_LITE_KERNEL(depthwise_conv2d, kARM, kFloat, kNCHW, def); USE_LITE_KERNEL(pool2d, kARM, kFloat, kNCHW, def); USE_LITE_KERNEL(elementwise_add, kARM, kFloat, kNCHW, def); USE_LITE_KERNEL(softmax, kARM, kFloat, kNCHW, def); diff --git a/paddle/fluid/lite/core/mir/passes.h b/paddle/fluid/lite/core/mir/passes.h index 60e53257ba01006e71095faa62b083d47e894c60..ac7a19bdfc0b791d92b2c694363bda1b9d0bb9be 100644 --- a/paddle/fluid/lite/core/mir/passes.h +++ b/paddle/fluid/lite/core/mir/passes.h @@ -21,6 +21,7 @@ namespace mir {} // namespace mir } // namespace lite } // namespace paddle +#ifndef LITE_WITH_LIGHT_WEIGHT_FRAMEWORK USE_MIR_PASS(demo); USE_MIR_PASS(static_kernel_pick_pass); USE_MIR_PASS(variable_place_inference_pass); @@ -28,4 +29,5 @@ USE_MIR_PASS(type_target_transform_pass); USE_MIR_PASS(generate_program_pass); USE_MIR_PASS(io_copy_kernel_pick_pass); USE_MIR_PASS(argument_type_display_pass); +#endif USE_MIR_PASS(runtime_context_assign_pass); diff --git a/paddle/fluid/lite/core/optimizer.h b/paddle/fluid/lite/core/optimizer.h index 161e765a98ba54bfaee11fb7b6f3ae1b4bde23d4..c6e22a6f5881569a6e0fe8fcb02635bc560d74c2 100644 --- a/paddle/fluid/lite/core/optimizer.h +++ b/paddle/fluid/lite/core/optimizer.h @@ -46,9 +46,9 @@ class Optimizer { SpecifyKernelPickTactic(kernel_pick_factor); InitTargetTypeTransformPass(); -#ifndef LITE_WITH_LIGHT_WEIGHT_FRAMEWORK if (passes.empty()) { RunPasses(std::vector{{ +#ifndef LITE_WITH_LIGHT_WEIGHT_FRAMEWORK "static_kernel_pick_pass", // "variable_place_inference_pass", // "argument_type_display_pass", // @@ -58,12 +58,12 @@ class Optimizer { "argument_type_display_pass", // "io_copy_kernel_pick_pass", // "variable_place_inference_pass", // - "runtime_context_assign_pass", // +#endif + "runtime_context_assign_pass", // }}); } else { RunPasses(passes); } -#endif exec_scope_ = program.exec_scope(); } diff --git a/paddle/fluid/lite/kernels/arm/conv_compute_test.cc b/paddle/fluid/lite/kernels/arm/conv_compute_test.cc index e4d80265d7728fa0eeea97fd070a982a8888ec7e..f25a5cf07452f128681bb4367b7dfc8f7fb09c0d 100644 --- a/paddle/fluid/lite/kernels/arm/conv_compute_test.cc +++ b/paddle/fluid/lite/kernels/arm/conv_compute_test.cc @@ -45,7 +45,7 @@ void conv_compute_ref(const operators::ConvParam& param) { bias_data = param.bias->mutable_data(); } bool flag_bias = bias_data != nullptr; - bool flag_relu = false; // TODO(hong19860320) param.relu + bool flag_relu = param.fuse_relu; int num = input_dims[0]; int chout = output_dims[1]; @@ -183,7 +183,8 @@ TEST(conv_arm, compute) { auto* filter_data = filter.mutable_data(); auto* output_data = output.mutable_data(); for (int i = 0; i < input.dims().production(); i++) { - input_data[i] = static_cast(i % 128); + float sign = i % 3 == 0 ? -1.0f : 1.0f; + input_data[i] = sign * static_cast(i % 128); } for (int i = 0; i < filter.dims().production(); i++) { filter_data[i] = @@ -208,7 +209,7 @@ TEST(conv_arm, compute) { } param.bias = &bias; } - // TODO(hong19860320) param.relu = flag_relu; + param.fuse_relu = flag_relu; param.paddings = std::vector({padding, padding}); param.strides = std::vector({stride, stride}); param.dilations = diff --git a/paddle/fluid/lite/kernels/arm/pool_compute.cc b/paddle/fluid/lite/kernels/arm/pool_compute.cc index 6a7716fae6bfc3aa52dad7c8b8192191e986b6f3..168b0e50c98bcf8eab324b627478a7790e665b82 100644 --- a/paddle/fluid/lite/kernels/arm/pool_compute.cc +++ b/paddle/fluid/lite/kernels/arm/pool_compute.cc @@ -163,7 +163,7 @@ PrecisionType PoolCompute::precision() const { return PRECISION(kFloat); } } // namespace lite } // namespace paddle -REGISTER_LITE_KERNEL(pool, kARM, kFloat, kNCHW, +REGISTER_LITE_KERNEL(pool2d, kARM, kFloat, kNCHW, paddle::lite::kernels::arm::PoolCompute, def) .BindInput("X", {LiteType::GetTensorTy(TARGET(kARM))}) .BindOutput("Out", {LiteType::GetTensorTy(TARGET(kARM))}) diff --git a/paddle/fluid/lite/kernels/arm/pool_compute_test.cc b/paddle/fluid/lite/kernels/arm/pool_compute_test.cc index b024ccef9d526d56bcf52c1600940ff0804eaf1f..70995d10bd8c5de5bd0a594a66ea326e6cff6f55 100644 --- a/paddle/fluid/lite/kernels/arm/pool_compute_test.cc +++ b/paddle/fluid/lite/kernels/arm/pool_compute_test.cc @@ -261,8 +261,8 @@ TEST(pool_arm, compute) { } TEST(pool, retrive_op) { - auto pool = - KernelRegistry::Global().Create("pool"); + auto pool = KernelRegistry::Global().Create( + "pool2d"); ASSERT_FALSE(pool.empty()); ASSERT_TRUE(pool.front()); } @@ -272,4 +272,4 @@ TEST(pool, retrive_op) { } // namespace lite } // namespace paddle -USE_LITE_KERNEL(pool, kARM, kFloat, kNCHW, def); +USE_LITE_KERNEL(pool2d, kARM, kFloat, kNCHW, def); diff --git a/paddle/fluid/lite/kernels/arm/relu_compute.h b/paddle/fluid/lite/kernels/arm/relu_compute.h index 29d17bf5918e112dfd065c9cc11910703ab5e92d..def3f02c5046c8f60fb5c6d518361ae8456253a4 100644 --- a/paddle/fluid/lite/kernels/arm/relu_compute.h +++ b/paddle/fluid/lite/kernels/arm/relu_compute.h @@ -45,4 +45,6 @@ class ReluCompute : public KernelLite { REGISTER_LITE_KERNEL(relu, kARM, kFloat, kNCHW, paddle::lite::kernels::arm::ReluCompute, def) + .BindInput("X", {LiteType::GetTensorTy(TARGET(kARM))}) + .BindOutput("Out", {LiteType::GetTensorTy(TARGET(kARM))}) .Finalize(); diff --git a/paddle/fluid/lite/operators/batch_norm_op.cc b/paddle/fluid/lite/operators/batch_norm_op.cc index e974d0134dad93a2241c265687a190b10d5ff85d..b6ef87732defcb879f655c3177c00725bf57cfed 100644 --- a/paddle/fluid/lite/operators/batch_norm_op.cc +++ b/paddle/fluid/lite/operators/batch_norm_op.cc @@ -82,7 +82,7 @@ bool BatchNormOp::AttachImpl(const cpp::OpDesc &op_desc, lite::Scope *scope) { param_.variance = scope->FindVar(op_desc.Input("Variance").front())->GetMutable(); param_.y = scope->FindVar(op_desc.Output("Y").front())->GetMutable(); - param_.is_test = op_desc.GetAttr("is_test"); + param_.is_test = op_desc.GetAttr("is_test"); param_.use_global_stats = op_desc.GetAttr("use_global_stats"); if (!param_.is_test) { param_.mean_out = diff --git a/paddle/fluid/lite/operators/batch_norm_op_test.cc b/paddle/fluid/lite/operators/batch_norm_op_test.cc index b91c367d92b721c1f96fd5fc92ec0b4f877408e4..9fb02759722e21dcd18276359edf3d84da766d04 100644 --- a/paddle/fluid/lite/operators/batch_norm_op_test.cc +++ b/paddle/fluid/lite/operators/batch_norm_op_test.cc @@ -46,7 +46,7 @@ TEST(batch_norm_op_lite, test) { desc.SetInput("Mean", {"mean"}); desc.SetInput("Variance", {"variance"}); desc.SetOutput("Y", {"y"}); - desc.SetAttr("is_test", true); + desc.SetAttr("is_test", static_cast(1)); desc.SetAttr("use_global_stats", false); desc.SetAttr("epsilon", 1e-5f); desc.SetAttr("momentum", 0.9f); @@ -101,7 +101,7 @@ TEST(batch_norm_op_lite, test_enable_is_test) { desc.SetOutput("VarianceOut", {"variance_out"}); desc.SetOutput("SavedMean", {"saved_mean"}); desc.SetOutput("SavedVariance", {"saved_variance"}); - desc.SetAttr("is_test", false); + desc.SetAttr("is_test", static_cast(0)); desc.SetAttr("use_global_stats", false); desc.SetAttr("epsilon", 1e-5f); desc.SetAttr("momentum", 0.9f); diff --git a/paddle/fluid/lite/operators/conv_op.h b/paddle/fluid/lite/operators/conv_op.h index 393b5dc2a8e5e9aa8d94784bc4f5a8d041414200..3d09d42241c7cbfcc6dd6893d50196550469d28c 100644 --- a/paddle/fluid/lite/operators/conv_op.h +++ b/paddle/fluid/lite/operators/conv_op.h @@ -40,11 +40,11 @@ class ConvOpLite : public OpLite { bool AttachImpl(const cpp::OpDesc &op_desc, lite::Scope *scope) override { auto input = op_desc.Input("Input").front(); auto filter = op_desc.Input("Filter").front(); - auto out = op_desc.Output("Out").front(); + auto output = op_desc.Output("Output").front(); param_.x = scope->FindVar(input)->GetMutable(); param_.filter = scope->FindVar(filter)->GetMutable(); - CHECK(scope->FindVar(out)); - param_.output = scope->FindVar(out)->GetMutable(); + CHECK(scope->FindVar(output)); + param_.output = scope->FindVar(output)->GetMutable(); param_.strides = op_desc.GetAttr>("strides"); param_.paddings = op_desc.GetAttr>("paddings"); param_.groups = op_desc.GetAttr("groups"); @@ -53,21 +53,27 @@ class ConvOpLite : public OpLite { std::vector input_arg_names = op_desc.InputArgumentNames(); if (std::find(input_arg_names.begin(), input_arg_names.end(), "Bias") != input_arg_names.end()) { - auto bias_var = scope->FindVar(op_desc.Input("Bias").front()); - if (bias_var != nullptr) { - param_.bias = - const_cast(&(bias_var->Get())); + auto bias_arguments = op_desc.Input("Bias"); + if (bias_arguments.size() > 0) { + auto bias_var = scope->FindVar(bias_arguments.front()); + if (bias_var != nullptr) { + param_.bias = + const_cast(&(bias_var->Get())); + } } } if (std::find(input_arg_names.begin(), input_arg_names.end(), "ResidualData") != input_arg_names.end()) { - auto residual_data_var = - scope->FindVar(op_desc.Input("ResidualData").front()); - if (residual_data_var != nullptr) { - param_.residualData = const_cast( - &(residual_data_var->Get())); + auto res_data_arguments = op_desc.Input("ResidualData"); + if (res_data_arguments.size() > 0) { + auto residual_data_var = scope->FindVar(res_data_arguments.front()); + if (residual_data_var != nullptr) { + param_.residualData = const_cast( + &(residual_data_var->Get())); + } } } + param_.fuse_relu = op_desc.GetAttr("fuse_relu"); return true; } diff --git a/paddle/fluid/lite/operators/pool_op.h b/paddle/fluid/lite/operators/pool_op.h index 2e9a02eec189599ba2fc23da8e7bcc9ebd0ea8b3..29946ed92a445dd7f43ae3f45362780f2912f17a 100644 --- a/paddle/fluid/lite/operators/pool_op.h +++ b/paddle/fluid/lite/operators/pool_op.h @@ -53,17 +53,25 @@ class PoolOpLite : public OpLite { param_.strides = op_desc.GetAttr>("strides"); param_.paddings = op_desc.GetAttr>("paddings"); - param_.exclusive = op_desc.GetAttr("exclusive"); - param_.adaptive = op_desc.GetAttr("adaptive"); - param_.ceil_mode = op_desc.GetAttr("ceil_mode"); - param_.use_quantizer = op_desc.GetAttr("use_quantizer"); + if (op_desc.HasAttr("exclusive")) { + param_.exclusive = op_desc.GetAttr("exclusive"); + } + if (op_desc.HasAttr("adaptive")) { + param_.adaptive = op_desc.GetAttr("adaptive"); + } + if (op_desc.HasAttr("ceil_mode")) { + param_.ceil_mode = op_desc.GetAttr("ceil_mode"); + } + if (op_desc.HasAttr("use_quantizer")) { + param_.use_quantizer = op_desc.GetAttr("use_quantizer"); + } // param_.data_format = op_desc.GetAttr("data_format"); return true; } void AttachKernel(KernelBase *kernel) override { kernel->SetParam(param_); } - std::string DebugString() const override { return "pool"; } + std::string DebugString() const override { return "pool2d"; } private: mutable PoolParam param_; diff --git a/paddle/fluid/lite/operators/pool_op_test.cc b/paddle/fluid/lite/operators/pool_op_test.cc index 9ab2865f1d04f2ca173b9d2f5f7d9e457f6754e8..e9616ede5a49671d70094edc45224fb4a5a7a927 100644 --- a/paddle/fluid/lite/operators/pool_op_test.cc +++ b/paddle/fluid/lite/operators/pool_op_test.cc @@ -38,7 +38,7 @@ TEST(pool_op_lite, test) { // prepare op desc cpp::OpDesc desc; - desc.SetType("pool"); + desc.SetType("pool2d"); desc.SetInput("X", {"x"}); desc.SetOutput("Out", {"output"}); @@ -69,7 +69,7 @@ TEST(pool_op_lite, test) { bool use_quantizer{false}; desc.SetAttr("use_quantizer", use_quantizer); - PoolOpLite pool("pool"); + PoolOpLite pool("pool2d"); pool.SetValidPlaces({Place{TARGET(kARM), PRECISION(kFloat)}}); pool.Attach(desc, &scope); auto kernels = pool.CreateKernels({Place{TARGET(kARM), PRECISION(kFloat)}}); @@ -86,5 +86,5 @@ TEST(pool_op_lite, test) { } // namespace paddle #ifdef LITE_WITH_ARM -USE_LITE_KERNEL(pool, kARM, kFloat, kNCHW, def); +USE_LITE_KERNEL(pool2d, kARM, kFloat, kNCHW, def); #endif diff --git a/paddle/fluid/lite/operators/relu_op.cc b/paddle/fluid/lite/operators/relu_op.cc index b073e2db43a4891defeb95750424941969323ba0..47251c72dfa5183e19ace3e36a1d3a9dd27a6bb0 100644 --- a/paddle/fluid/lite/operators/relu_op.cc +++ b/paddle/fluid/lite/operators/relu_op.cc @@ -32,12 +32,11 @@ bool ReluOp::InferShape() const { bool ReluOp::AttachImpl(const cpp::OpDesc &opdesc, lite::Scope *scope) { param_.input = const_cast( - &scope->FindVar(opdesc.Input("Input").front())->Get()); + &scope->FindVar(opdesc.Input("X").front())->Get()); param_.output = scope->FindVar(opdesc.Output("Out").front())->GetMutable(); CHECK(param_.input); CHECK(param_.output); - kernel_->SetParam(param_); return true; } diff --git a/paddle/fluid/lite/operators/split_op.cc b/paddle/fluid/lite/operators/split_op.cc index 0d5075b0971e4bd98de8aac9810bbe7514c1a562..58768276377edd9ea92356a808a6f46c3b5c6a80 100644 --- a/paddle/fluid/lite/operators/split_op.cc +++ b/paddle/fluid/lite/operators/split_op.cc @@ -37,7 +37,7 @@ bool SplitOp::InferShape() const { const auto §ions = param_.sections; const int outs_number = outs.size(); - std::vector outs_dims; + std::vector outs_dims; outs_dims.reserve(outs_number); if (num > 0) {