From 2776922c1fd0b9a895fa4e41b9069ebced41958f Mon Sep 17 00:00:00 2001 From: hjchen2 Date: Tue, 12 Mar 2019 23:18:03 +0800 Subject: [PATCH] update --- src/common/types.cpp | 2 - src/framework/operator.h | 24 +-- src/operators/fill_constant_op.h | 2 +- src/operators/kernel/arm/conv_kernel.cpp | 137 ------------------ .../convolution/conv_add_bn_relu_kernel.cpp | 4 +- .../convolution/conv_bn_add_relu_kernel.cpp | 5 +- .../arm/convolution/conv_bn_relu_kernel.cpp | 4 +- .../kernel/beam_search_decode_kernel.h | 11 +- src/operators/kernel/beam_search_kernel.h | 20 +-- src/operators/kernel/one_hot_kernel.h | 7 +- src/operators/kernel/pad2d_kernel.h | 8 +- src/operators/kernel/while_kernel.h | 7 +- src/operators/op_param.h | 88 +++++------ src/operators/pad2d.cpp | 42 ------ src/operators/pad2d.h | 32 ---- src/operators/pad2d_op.cpp | 24 ++- src/operators/pad2d_op.h | 25 +--- test/net/test_ocr.cpp | 40 ++--- tools/op.cmake | 1 + 19 files changed, 120 insertions(+), 363 deletions(-) mode change 100755 => 100644 src/common/types.cpp delete mode 100644 src/operators/kernel/arm/conv_kernel.cpp delete mode 100644 src/operators/pad2d.cpp delete mode 100644 src/operators/pad2d.h diff --git a/src/common/types.cpp b/src/common/types.cpp old mode 100755 new mode 100644 index 19bc6ea595..20656acb20 --- a/src/common/types.cpp +++ b/src/common/types.cpp @@ -116,8 +116,6 @@ const char *G_OP_TYPE_FUSION_DECONV_ADD_BN_RELU = "fusion_deconv_add_bn_relu"; const char *G_OP_TYPE_FUSION_DECONV_ADD_BN = "fusion_deconv_add_bn"; const char *G_OP_TYPE_FUSION_DECONV_BN_RELU = "fusion_deconv_bn_relu"; -const char *G_OP_TYPE_PAD2D = "pad2d"; - std::unordered_map< std::string, std::pair, std::vector>> op_input_output_key = { diff --git a/src/framework/operator.h b/src/framework/operator.h index d58168017f..aaddb9c564 100644 --- a/src/framework/operator.h +++ b/src/framework/operator.h @@ -15,7 +15,6 @@ limitations under the License. */ #pragma once #include -#include #include #include #include @@ -81,7 +80,6 @@ class OperatorBase { } #ifdef PADDLE_MOBILE_FPGA void InsertTensors(); - void ChangeNameMap(string key, std::vector value); #endif protected: @@ -98,35 +96,15 @@ class OperatorBase { template class OperatorWithKernel : public OperatorBase { public: -#ifndef PADDLE_MOBILE_FPGA1 OperatorWithKernel(const std::string &type, const VariableNameMap &inputs, const VariableNameMap &outputs, const AttributeMap &attrs, framework::Scope *scope) : OperatorBase(type, inputs, outputs, attrs, scope), - param_(inputs, outputs, attrs, scope.get()) { + param_(inputs, outputs, attrs, scope) { #ifdef PADDLE_MOBILE_CL kernel_.InitCLHelper(scope->GetCLScpoe()); #endif } -#else - OperatorWithKernel(const std::string &type, const VariableNameMap inputs, - const VariableNameMap &outputs, const AttributeMap &attrs, - std::shared_ptr scope) - : OperatorBase(type, inputs, outputs, attrs, scope) { - static int feed_num = 0; - static int fetch_num = 0; - if (type == "feed") { - auto new_name = string("feed") + std::to_string(feed_num++); - auto var = scope->Var(new_name); - (const_cast(inputs)).at("X") = {string(new_name)}; - } else if (type == "fetch") { - auto new_name = string("fetch") + std::to_string(fetch_num++); - auto var = scope->Var(new_name); - (const_cast(outputs)).at("Out") = {string(new_name)}; - } - param_ = ParamType(inputs, outputs, attrs, *scope); - } -#endif virtual void RunImpl() { this->kernel_.Compute(this->param_); } virtual void InferShape() const = 0; diff --git a/src/operators/fill_constant_op.h b/src/operators/fill_constant_op.h index 3d078d0e59..0a51f8494d 100644 --- a/src/operators/fill_constant_op.h +++ b/src/operators/fill_constant_op.h @@ -34,7 +34,7 @@ class FillConstantOp : public framework::OperatorBase { const framework::AttributeMap attrs, framework::Scope *scope) : framework::OperatorBase(type, inputs, outputs, attrs, scope), - param_(inputs, outputs, attrs, scope.get()) {} + param_(inputs, outputs, attrs, scope) {} void RunImpl() { auto data_type = static_cast<_PaddleMobile__Framework__Proto__VarType__Type>( diff --git a/src/operators/kernel/arm/conv_kernel.cpp b/src/operators/kernel/arm/conv_kernel.cpp deleted file mode 100644 index de19127e68..0000000000 --- a/src/operators/kernel/arm/conv_kernel.cpp +++ /dev/null @@ -1,137 +0,0 @@ -/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. */ - -#ifdef CONV_OP - -#include "operators/kernel/conv_kernel.h" -#include "operators/kernel/central-arm-func/conv_arm_func.h" - -namespace paddle_mobile { -namespace operators { - -template <> -bool ConvKernel::Init(ConvParam *param) { - bool conv3x3 = param->Filter()->dims()[2] == param->Filter()->dims()[3] && - param->Filter()->dims()[2] == 3; - bool conv5x5 = param->Filter()->dims()[2] == param->Filter()->dims()[3] && - param->Filter()->dims()[2] == 5; - bool depth3x3 = conv3x3 && param->Groups() == param->Input()->dims()[1] && - param->Input()->dims()[1] == param->Output()->dims()[1]; - bool depth5x5 = conv5x5 && param->Groups() == param->Input()->dims()[1] && - param->Input()->dims()[1] == param->Output()->dims()[1]; - if (param->Filter()->type() == typeid(int8_t)) { -#ifndef __aarch64__ - if (depth3x3 && param->Strides()[0] < 3 && - param->Strides()[0] == param->Strides()[1]) { - param->ExecMode() = ConvParam::EXEC_DEPTHWISE3x3_INT8; - } else if (depth5x5 && param->Strides()[0] < 2 && - param->Strides()[0] == param->Strides()[1]) { - param->ExecMode() = ConvParam::EXEC_DEPTHWISE5x5_INT8; - } else { -#endif // __aarch64__ - param->ExecMode() = ConvParam::EXEC_GEMM_INT8; -#ifndef __aarch64__ - } -#endif // __aarch64__ - } else { - if (depth3x3 && param->Strides()[0] == param->Strides()[1] && - param->Strides()[0] == 1 && param->Paddings()[0] == 1 && - param->Paddings()[0] == param->Paddings()[1]) { - param->ExecMode() = ConvParam::EXEC_DEPTHWISE3x3S1P1_FLOAT; - } else if (depth3x3 && param->Strides()[0] == param->Strides()[1] && - param->Strides()[0] == 2 && param->Paddings()[0] == 0 && - param->Paddings()[0] == param->Paddings()[1]) { - param->ExecMode() = ConvParam::EXEC_DEPTHWISE3x3S2P0_FLOAT; - } else if (depth3x3 && param->Strides()[0] == param->Strides()[1] && - param->Strides()[0] == 2 && param->Paddings()[0] == 1 && - param->Paddings()[0] == param->Paddings()[1]) { - param->ExecMode() = ConvParam::EXEC_DEPTHWISE3x3S2P1_FLOAT; - } else if (depth3x3) { - param->ExecMode() = ConvParam::EXEC_DEPTHWISE3x3_FLOAT; -#ifndef __aarch64__ - } else if (depth5x5 && param->Strides()[0] == param->Strides()[1] && - param->Strides()[0] == 1) { - param->ExecMode() = ConvParam::EXEC_DEPTHWISE5x5_FLOAT; - } else if (conv3x3 && param->Strides()[0] == param->Strides()[1] && - param->Dilations()[0] == param->Dilations()[1] && - param->Strides()[0] == 1 && param->Dilations()[0] == 1 && - param->Output()->dims()[1] >= 16 && - param->Input()->dims()[1] >= 16 && - param->Input()->dims()[2] <= 140 /* refered from ncnn */) { - param->ExecMode() = ConvParam::EXEC_WINOGRAD3X3_FLOAT; - // transform weight - param->transformed_filter_ = new framework::Tensor; - operators::math::winograd_transform_weight<8, 3>( - *param->Filter(), param->transformed_filter_); -#endif - } else { - param->ExecMode() = ConvParam::EXEC_GEMM_FLOAT; - } - } - return true; -} - -template <> -void ConvKernel::Compute(const ConvParam ¶m) { - switch (param.ExecMode()) { - case ConvParam::EXEC_GEMM_INT8: - GemmConv(param); - break; -#ifndef __aarch64__ - case ConvParam::EXEC_DEPTHWISE3x3_INT8: - DepthwiseConv3x3(param); - break; - case ConvParam::EXEC_DEPTHWISE5x5_INT8: - DepthwiseConv5x5(param); - break; -#endif // __aarch64__ - case ConvParam::EXEC_DEPTHWISE3x3S1P1_FLOAT: - math::DepthwiseConv3x3s1p1(param.Input(), param.Filter(), param.Output(), - nullptr, false, false); - break; - case ConvParam::EXEC_DEPTHWISE3x3S2P1_FLOAT: - math::DepthwiseConv3x3s2p1v2(param.Input(), param.Filter(), - param.Output(), nullptr, false, false); - break; - case ConvParam::EXEC_DEPTHWISE3x3S2P0_FLOAT: - math::DepthwiseConv3x3s2p0(param.Input(), param.Filter(), param.Output(), - nullptr, false, false); - break; - case ConvParam::EXEC_DEPTHWISE3x3_FLOAT: - math::DepthwiseConv3x3(param.Input(), param.Strides(), param.Paddings(), - param.Filter(), nullptr, param.Output(), false); - break; -#ifndef __aarch64__ - case ConvParam::EXEC_DEPTHWISE5x5_FLOAT: - DepthwiseConv5x5(param); - break; - case ConvParam::EXEC_WINOGRAD3X3_FLOAT: - WinogradConv3x3<8, 3>(param); - break; -#endif // __aarch64__ - case ConvParam::EXEC_GEMM_FLOAT: - GemmConv(param); - break; - default: - PADDLE_MOBILE_THROW_EXCEPTION("Invalid convolution execute mode %d", - param.ExecMode()); - } -} - -template class ConvKernel; - -} // namespace operators -} // namespace paddle_mobile - -#endif diff --git a/src/operators/kernel/arm/convolution/conv_add_bn_relu_kernel.cpp b/src/operators/kernel/arm/convolution/conv_add_bn_relu_kernel.cpp index 0cbb55acb9..9c70d1e2c8 100644 --- a/src/operators/kernel/arm/convolution/conv_add_bn_relu_kernel.cpp +++ b/src/operators/kernel/arm/convolution/conv_add_bn_relu_kernel.cpp @@ -45,8 +45,8 @@ bool ConvAddBNReluKernel::Init( } // Tensor *new_scale = new Tensor(); // Tensor *new_bias = new Tensor(); - Tensor *new_scale = param->CreateNewScale(); - Tensor *new_bias = param->CreateNewBiase(); + auto *new_scale = param->CreateNewScale(); + auto *new_bias = param->CreateNewBiase(); auto new_scale_ptr = new_scale->mutable_data({C}); auto new_bias_ptr = new_bias->mutable_data({C}); diff --git a/src/operators/kernel/arm/convolution/conv_bn_add_relu_kernel.cpp b/src/operators/kernel/arm/convolution/conv_bn_add_relu_kernel.cpp index 26e0e34367..0f52df8b18 100644 --- a/src/operators/kernel/arm/convolution/conv_bn_add_relu_kernel.cpp +++ b/src/operators/kernel/arm/convolution/conv_bn_add_relu_kernel.cpp @@ -43,8 +43,9 @@ bool ConvBNAddReluKernel::Init( inv_std_ptr[i] = 1 / static_cast(pow((variance_ptr[i] + epsilon), 0.5)); } - LoDTensor *new_scale = new LoDTensor(); - LoDTensor *new_bias = new LoDTensor(); + + auto *new_scale = param->CreateNewScale(); + auto *new_bias = param->CreateNewBiase(); auto new_scale_ptr = new_scale->mutable_data({C}); auto new_bias_ptr = new_bias->mutable_data({C}); for (int i = 0; i < C; i++) { diff --git a/src/operators/kernel/arm/convolution/conv_bn_relu_kernel.cpp b/src/operators/kernel/arm/convolution/conv_bn_relu_kernel.cpp index 4de479a1b9..1be0c94397 100644 --- a/src/operators/kernel/arm/convolution/conv_bn_relu_kernel.cpp +++ b/src/operators/kernel/arm/convolution/conv_bn_relu_kernel.cpp @@ -43,8 +43,8 @@ bool ConvBNReluKernel::Init(FusionConvBNReluParam *param) { 1 / static_cast(pow((variance_ptr[i] + epsilon), 0.5)); } - Tensor *new_scale = param->CreateNewScale(); - Tensor *new_bias = param->CreateNewBiase(); + auto *new_scale = param->CreateNewScale(); + auto *new_bias = param->CreateNewBiase(); auto new_scale_ptr = new_scale->mutable_data({C}); auto new_bias_ptr = new_bias->mutable_data({C}); for (int i = 0; i < C; i++) { diff --git a/src/operators/kernel/beam_search_decode_kernel.h b/src/operators/kernel/beam_search_decode_kernel.h index fa40436115..36cc7f9f2d 100644 --- a/src/operators/kernel/beam_search_decode_kernel.h +++ b/src/operators/kernel/beam_search_decode_kernel.h @@ -27,15 +27,16 @@ class BeamSearchDecodeParam : public OpParam { public: BeamSearchDecodeParam(const VariableNameMap &inputs, const VariableNameMap &outputs, - const AttributeMap &attrs, const Scope &scope) { + const AttributeMap &attrs, Scope *scope) + : OpParam(inputs, outputs, attrs, scope) { ids_ = - OpParam::GetVarValue("Ids", inputs, scope); + OpParam::GetVarValue("Ids", inputs, *scope); scores_ = OpParam::GetVarValue("Scores", inputs, - scope); + *scope); sentence_ids_ = OpParam::GetVarValue("SentenceIds", - outputs, scope); + outputs, *scope); sentence_scores_ = OpParam::GetVarValue( - "SentenceScores", outputs, scope); + "SentenceScores", outputs, *scope); beam_size_ = OpParam::GetAttr("beam_size", attrs); end_id_ = OpParam::GetAttr("end_id", attrs); } diff --git a/src/operators/kernel/beam_search_kernel.h b/src/operators/kernel/beam_search_kernel.h index 4e3640d905..38fe162b24 100644 --- a/src/operators/kernel/beam_search_kernel.h +++ b/src/operators/kernel/beam_search_kernel.h @@ -29,16 +29,18 @@ template class BeamSearchParam : public OpParam { public: BeamSearchParam(const VariableNameMap &inputs, const VariableNameMap &outputs, - const AttributeMap &attrs, const Scope &scope) { - pre_ids_ = GET_VAR_AS_LOD_TENSOR("pre_ids", inputs, scope); - pre_scores_ = GET_VAR_AS_LOD_TENSOR("pre_scores", inputs, scope); - ids_ = GET_VAR_AS_LOD_TENSOR("ids", inputs, scope); - scores_ = GET_VAR_AS_LOD_TENSOR("scores", inputs, scope); - - selected_ids_ = GET_VAR_AS_LOD_TENSOR("selected_ids", outputs, scope); - selected_scores_ = GET_VAR_AS_LOD_TENSOR("selected_scores", outputs, scope); + const AttributeMap &attrs, Scope *scope) + : OpParam(inputs, outputs, attrs, scope) { + pre_ids_ = GET_VAR_AS_LOD_TENSOR("pre_ids", inputs, *scope); + pre_scores_ = GET_VAR_AS_LOD_TENSOR("pre_scores", inputs, *scope); + ids_ = GET_VAR_AS_LOD_TENSOR("ids", inputs, *scope); + scores_ = GET_VAR_AS_LOD_TENSOR("scores", inputs, *scope); + + selected_ids_ = GET_VAR_AS_LOD_TENSOR("selected_ids", outputs, *scope); + selected_scores_ = + GET_VAR_AS_LOD_TENSOR("selected_scores", outputs, *scope); if (outputs.count("parent_idx")) { - parent_idx_ = GET_VAR_AS_LOD_TENSOR("parent_idx", outputs, scope); + parent_idx_ = GET_VAR_AS_LOD_TENSOR("parent_idx", outputs, *scope); } else { parent_idx_ = new framework::Tensor(); } diff --git a/src/operators/kernel/one_hot_kernel.h b/src/operators/kernel/one_hot_kernel.h index 0a57158823..fd883cabee 100644 --- a/src/operators/kernel/one_hot_kernel.h +++ b/src/operators/kernel/one_hot_kernel.h @@ -29,9 +29,10 @@ template class OnehotParam : public OpParam { public: OnehotParam(const VariableNameMap &inputs, const VariableNameMap &outputs, - const AttributeMap &attrs, const Scope &scope) { - input_ = GET_VAR_AS_LOD_TENSOR("X", inputs, scope); - output_ = GET_VAR_AS_LOD_TENSOR("Out", outputs, scope); + const AttributeMap &attrs, Scope *scope) + : OpParam(inputs, outputs, attrs, scope) { + input_ = GET_VAR_AS_LOD_TENSOR("X", inputs, *scope); + output_ = GET_VAR_AS_LOD_TENSOR("Out", outputs, *scope); depth_ = OpParam::GetAttr("depth", attrs); dtype_ = OpParam::GetAttr("dtype", attrs); diff --git a/src/operators/kernel/pad2d_kernel.h b/src/operators/kernel/pad2d_kernel.h index 4959346b56..0834cbc0cf 100644 --- a/src/operators/kernel/pad2d_kernel.h +++ b/src/operators/kernel/pad2d_kernel.h @@ -28,9 +28,11 @@ template class Pad2DParam : public OpParam { public: Pad2DParam(const VariableNameMap &inputs, const VariableNameMap &outputs, - const AttributeMap &attrs, const Scope &scope) { - input_ = OpParam::GetVarValue("X", inputs, scope); - output_ = OpParam::GetVarValue("Out", outputs, scope); + const AttributeMap &attrs, Scope *scope) + : OpParam(inputs, outputs, attrs, scope) { + input_ = OpParam::GetVarValue("X", inputs, *scope); + output_ = + OpParam::GetVarValue("Out", outputs, *scope); paddings_ = OpParam::GetAttr>("paddings", attrs); pad_value_ = OpParam::GetAttr("pad_value", attrs); mode_ = OpParam::GetStringAttr("mode", attrs); diff --git a/src/operators/kernel/while_kernel.h b/src/operators/kernel/while_kernel.h index 149a2e9829..ba014a9079 100644 --- a/src/operators/kernel/while_kernel.h +++ b/src/operators/kernel/while_kernel.h @@ -26,13 +26,10 @@ class WhileParam : public OpParam { public: WhileParam(const VariableNameMap &inputs, const VariableNameMap &outputs, const AttributeMap &attrs, Scope *scope) - : inputs_(inputs), - outputs_(outputs), - scope_(*scope), - OpParam(inputs, outputs, attrs, scope) { + : scope_(scope), OpParam(inputs, outputs, attrs, scope) { cond_ = OpParam::GetVarValue("Condition", inputs, *scope); - sub_block_ = OpParam::GetAttr("sub_block", attrs); + sub_block_ = OpParam::GetAttr("sub_block", attrs); } public: diff --git a/src/operators/op_param.h b/src/operators/op_param.h index bc45c69d1d..95cb7d6753 100644 --- a/src/operators/op_param.h +++ b/src/operators/op_param.h @@ -1222,12 +1222,12 @@ class FeedParam : public OpParam { public: FeedParam(const VariableNameMap &inputs, const VariableNameMap &outputs, - const AttributeMap &attrs, const Scope &scope) + const AttributeMap &attrs, Scope *scope) : OpParam(inputs, outputs, attrs, scope) { - input_x_ = InputXFrom(inputs, scope); - out_ = OutFrom(outputs, scope); + input_x_ = InputXFrom(inputs, *scope); + out_ = OutFrom(outputs, *scope); col_ = GetAttr("col", attrs); - auto var = scope.FindVar("batch_size"); + auto var = scope->FindVar("batch_size"); batch_size = var->GetValue(); } const framework::LoDTensorArray *InputX() const { return input_x_; } @@ -1249,10 +1249,10 @@ class FetchParam : public OpParam { public: FetchParam(const VariableNameMap &inputs, const VariableNameMap &outputs, - const AttributeMap &attrs, const Scope &scope) + const AttributeMap &attrs, Scope *scope) : OpParam(inputs, outputs, attrs, scope) { - input_x_ = InputXFrom(inputs, scope); - out_ = OutFrom(outputs, scope); + input_x_ = InputXFrom(inputs, *scope); + out_ = OutFrom(outputs, *scope); col_ = GetAttr("col", attrs); } @@ -1821,7 +1821,7 @@ class FusionConvAddParam : public ConvParam { : ConvParam(inputs, outputs, attrs, scope) { bias_ = OpParam::InputYFrom(inputs, *scope); axis_ = OpParam::GetAttr("axis", attrs); - this->output_ = OpParam::OutFrom(outputs, scope); + this->output_ = OpParam::OutFrom(outputs, *scope); } GType *Bias() const { return bias_; } @@ -1862,7 +1862,7 @@ class FusionConvAddPReluParam : public ConvParam { framework::DDim dims = alpha_->dims(); bias_ = OpParam::InputYFrom(inputs, *scope); axis_ = OpParam::GetAttr("axis", attrs); - this->output_ = OpParam::OutFrom(outputs, scope); + this->output_ = OpParam::OutFrom(outputs, *scope); } const GType *InputAlpha() const { return alpha_; } const std::string &Mode() const { return mode_; } @@ -1892,7 +1892,7 @@ class FusionConvAddAddPReluParam : public ConvParam { alpha_ = OpParam::InputAlphaFrom(inputs, *scope); mode_ = OpParam::GetStringAttr("mode", attrs); framework::DDim dims = alpha_->dims(); - bias_ = OpParam::InputYFrom(inputs, scope); + bias_ = OpParam::InputYFrom(inputs, *scope); axis_ = OpParam::GetAttr("axis", attrs); keyOutput_ = OpParam::Getkey("addOut", inputs, 0); keyX1_ = OpParam::Getkey("addX", inputs, 1); @@ -1902,7 +1902,7 @@ class FusionConvAddAddPReluParam : public ConvParam { } else if (keyY1_ == keyOutput_) { bias1_ = OpParam::InputXFrom1(inputs, *scope); } - this->output_ = OpParam::OutFrom(outputs, scope); + this->output_ = OpParam::OutFrom(outputs, *scope); } const GType *InputAlpha() const { return alpha_; } const std::string &Mode() const { return mode_; } @@ -1937,13 +1937,13 @@ class FusionConvAddBNReluParam : public ConvParam { : ConvParam(inputs, outputs, attrs, scope) { bias_ = OpParam::InputYFrom(inputs, *scope); axis_ = OpParam::GetAttr("axis", attrs); - input_bias_ = OpParam::InputBiasFrom(inputs, scope); - input_mean_ = OpParam::InputMeanFrom(inputs, scope); - input_scale_ = OpParam::InputScaleFrom(inputs, scope); - input_variance_ = OpParam::InputVarianceFrom(inputs, scope); + input_bias_ = OpParam::InputBiasFrom(inputs, *scope); + input_mean_ = OpParam::InputMeanFrom(inputs, *scope); + input_scale_ = OpParam::InputScaleFrom(inputs, *scope); + input_variance_ = OpParam::InputVarianceFrom(inputs, *scope); epsilon_ = OpParam::GetAttr("epsilon", attrs); momentum_ = OpParam::GetAttr("momentum", attrs); - this->output_ = OpParam::OutFrom(outputs, scope); + this->output_ = OpParam::OutFrom(outputs, *scope); } GType *Bias() const { return bias_; } @@ -1996,10 +1996,10 @@ class FusionConvBNAddReluParam : public ConvParam { : ConvParam(inputs, outputs, attrs, scope) { bias_ = OpParam::InputYFrom(inputs, *scope); axis_ = OpParam::GetAttr("axis", attrs); - input_bias_ = OpParam::InputBiasFrom(inputs, scope); - input_mean_ = OpParam::InputMeanFrom(inputs, scope); - input_scale_ = OpParam::InputScaleFrom(inputs, scope); - input_variance_ = OpParam::InputVarianceFrom(inputs, scope); + input_bias_ = OpParam::InputBiasFrom(inputs, *scope); + input_mean_ = OpParam::InputMeanFrom(inputs, *scope); + input_scale_ = OpParam::InputScaleFrom(inputs, *scope); + input_variance_ = OpParam::InputVarianceFrom(inputs, *scope); epsilon_ = OpParam::GetAttr("epsilon", attrs); momentum_ = OpParam::GetAttr("momentum", attrs); keyBNY_ = OpParam::Getkey("BNY", inputs, 0); @@ -2010,7 +2010,7 @@ class FusionConvBNAddReluParam : public ConvParam { } else if (keyY_ == keyBNY_) { bias_ = OpParam::InputXFrom(inputs, *scope); } - this->output_ = OpParam::OutFrom(outputs, scope); + this->output_ = OpParam::OutFrom(outputs, *scope); } GType *Bias() const { return bias_; } @@ -2064,13 +2064,13 @@ class FusionConvBNParam : public ConvParam { const VariableNameMap &outputs, const AttributeMap &attrs, Scope *scope) : ConvParam(inputs, outputs, attrs, scope) { - input_bias_ = OpParam::InputBiasFrom(inputs, scope); - input_mean_ = OpParam::InputMeanFrom(inputs, scope); - input_scale_ = OpParam::InputScaleFrom(inputs, scope); - input_variance_ = OpParam::InputVarianceFrom(inputs, scope); + input_bias_ = OpParam::InputBiasFrom(inputs, *scope); + input_mean_ = OpParam::InputMeanFrom(inputs, *scope); + input_scale_ = OpParam::InputScaleFrom(inputs, *scope); + input_variance_ = OpParam::InputVarianceFrom(inputs, *scope); epsilon_ = OpParam::GetAttr("epsilon", attrs); momentum_ = OpParam::GetAttr("momentum", attrs); - this->output_ = OpParam::OutputYFrom(outputs, scope); + this->output_ = OpParam::OutputYFrom(outputs, *scope); } const GType *InputBias() const { return input_bias_; } @@ -2118,13 +2118,13 @@ class FusionConvAddBNParam : public ConvParam { : ConvParam(inputs, outputs, attrs, scope) { bias_ = OpParam::InputYFrom(inputs, *scope); axis_ = OpParam::GetAttr("axis", attrs); - input_bias_ = OpParam::InputBiasFrom(inputs, scope); - input_mean_ = OpParam::InputMeanFrom(inputs, scope); - input_scale_ = OpParam::InputScaleFrom(inputs, scope); - input_variance_ = OpParam::InputVarianceFrom(inputs, scope); + input_bias_ = OpParam::InputBiasFrom(inputs, *scope); + input_mean_ = OpParam::InputMeanFrom(inputs, *scope); + input_scale_ = OpParam::InputScaleFrom(inputs, *scope); + input_variance_ = OpParam::InputVarianceFrom(inputs, *scope); epsilon_ = OpParam::GetAttr("epsilon", attrs); momentum_ = OpParam::GetAttr("momentum", attrs); - this->output_ = OpParam::OutputYFrom(outputs, scope); + this->output_ = OpParam::OutputYFrom(outputs, *scope); } GType *Bias() const { return bias_; } @@ -2175,13 +2175,13 @@ class FusionDWConvBNReluParam : public ConvParam { const VariableNameMap &outputs, const AttributeMap &attrs, Scope *scope) : ConvParam(inputs, outputs, attrs, scope) { - input_bias_ = OpParam::InputBiasFrom(inputs, scope); - input_mean_ = OpParam::InputMeanFrom(inputs, scope); - input_scale_ = OpParam::InputScaleFrom(inputs, scope); - input_variance_ = OpParam::InputVarianceFrom(inputs, scope); + input_bias_ = OpParam::InputBiasFrom(inputs, *scope); + input_mean_ = OpParam::InputMeanFrom(inputs, *scope); + input_scale_ = OpParam::InputScaleFrom(inputs, *scope); + input_variance_ = OpParam::InputVarianceFrom(inputs, *scope); epsilon_ = OpParam::GetAttr("epsilon", attrs); momentum_ = OpParam::GetAttr("momentum", attrs); - this->output_ = OpParam::OutFrom(outputs, scope); + this->output_ = OpParam::OutFrom(outputs, *scope); } const GType *InputBias() const { return input_bias_; } @@ -2228,13 +2228,13 @@ class FusionConvBNReluParam : public ConvParam { const VariableNameMap &outputs, const AttributeMap &attrs, Scope *scope) : ConvParam(inputs, outputs, attrs, scope) { - input_bias_ = OpParam::InputBiasFrom(inputs, scope); - input_mean_ = OpParam::InputMeanFrom(inputs, scope); - input_scale_ = OpParam::InputScaleFrom(inputs, scope); - input_variance_ = OpParam::InputVarianceFrom(inputs, scope); + input_bias_ = OpParam::InputBiasFrom(inputs, *scope); + input_mean_ = OpParam::InputMeanFrom(inputs, *scope); + input_scale_ = OpParam::InputScaleFrom(inputs, *scope); + input_variance_ = OpParam::InputVarianceFrom(inputs, *scope); epsilon_ = OpParam::GetAttr("epsilon", attrs); momentum_ = OpParam::GetAttr("momentum", attrs); - this->output_ = OpParam::OutFrom(outputs, scope); + this->output_ = OpParam::OutFrom(outputs, *scope); } const GType *InputBias() const { return input_bias_; } @@ -3285,10 +3285,10 @@ class IncrementParam : public OpParam { public: IncrementParam(const VariableNameMap &inputs, const VariableNameMap &outputs, - const AttributeMap &attrs, const Scope &scope) + const AttributeMap &attrs, Scope *scope) : OpParam(inputs, outputs, attrs, scope) { - input_x_ = InputXFrom(inputs, scope); - output_ = OutFrom(outputs, scope); + input_x_ = InputXFrom(inputs, *scope); + output_ = OutFrom(outputs, *scope); step_ = OpParam::GetAttr("step", attrs); } diff --git a/src/operators/pad2d.cpp b/src/operators/pad2d.cpp deleted file mode 100644 index 886c53612d..0000000000 --- a/src/operators/pad2d.cpp +++ /dev/null @@ -1,42 +0,0 @@ -/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. */ - -#ifdef PAD2D_OP - -#include "operators/pad2d.h" - -namespace paddle_mobile { -namespace operators { - -template -void Pad2DOp::InferShape() const { - auto input_dims = this->param_.input_->dims(); - const auto &paddings = this->param_.paddings_; - PADDLE_MOBILE_ENFORCE(paddings.size() == 4, - "Size of paddings should be equal to 4."); - - input_dims[2] += paddings[0] + paddings[1]; - input_dims[3] += paddings[2] + paddings[3]; - this->param_.output_->Resize(input_dims); -} - -} // namespace operators -} // namespace paddle_mobile - -namespace ops = paddle_mobile::operators; -#ifdef PADDLE_MOBILE_CPU -REGISTER_OPERATOR_CPU(pad2d, ops::Pad2DOp); -#endif - -#endif // PAD2D_OP diff --git a/src/operators/pad2d.h b/src/operators/pad2d.h deleted file mode 100644 index 1a80cbac40..0000000000 --- a/src/operators/pad2d.h +++ /dev/null @@ -1,32 +0,0 @@ -/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. */ - -#ifdef PAD2D_OP - -#pragma once - -#include -#include "framework/operator.h" -#include "operators/kernel/pad2d_kernel.h" -#include "operators/op_param.h" - -namespace paddle_mobile { -namespace operators { - -DECLARE_OPERATOR(Pad2D, Pad2DParam, Pad2DKernel); - -} // namespace operators -} // namespace paddle_mobile - -#endif // PAD2D_OP diff --git a/src/operators/pad2d_op.cpp b/src/operators/pad2d_op.cpp index e7eda00d08..3d0fdf44d5 100644 --- a/src/operators/pad2d_op.cpp +++ b/src/operators/pad2d_op.cpp @@ -19,14 +19,15 @@ namespace paddle_mobile { namespace operators { template -void Pad2dOp::InferShape() const { - auto input_dims = this->param_.InputX()->dims(); - auto input_n = input_dims[0]; - auto input_c = input_dims[1]; - auto input_h = input_dims[2]; - auto input_w = input_dims[3]; - - this->param_.Out()->Resize({input_n, input_c, input_h + 1, input_w + 1}); +void Pad2DOp::InferShape() const { + auto input_dims = this->param_.input_->dims(); + const auto &paddings = this->param_.paddings_; + PADDLE_MOBILE_ENFORCE(paddings.size() == 4, + "Size of paddings should be equal to 4."); + + input_dims[2] += paddings[0] + paddings[1]; + input_dims[3] += paddings[2] + paddings[3]; + this->param_.output_->Resize(input_dims); } } // namespace operators @@ -34,10 +35,7 @@ void Pad2dOp::InferShape() const { namespace ops = paddle_mobile::operators; #ifdef PADDLE_MOBILE_CPU -REGISTER_OPERATOR_CPU(pad2d, ops::Pad2dOp); -#endif -#ifdef PADDLE_MOBILE_FPGA -REGISTER_OPERATOR_FPGA(pad2d, ops::Pad2dOp); +REGISTER_OPERATOR_CPU(pad2d, ops::Pad2DOp); #endif -#endif +#endif // PAD2D_OP diff --git a/src/operators/pad2d_op.h b/src/operators/pad2d_op.h index 761e2b837d..1a80cbac40 100644 --- a/src/operators/pad2d_op.h +++ b/src/operators/pad2d_op.h @@ -17,33 +17,16 @@ limitations under the License. */ #pragma once #include - #include "framework/operator.h" #include "operators/kernel/pad2d_kernel.h" #include "operators/op_param.h" namespace paddle_mobile { namespace operators { -using framework::AttributeMap; -using framework::OperatorWithKernel; -using framework::Scope; -using std::string; -template -class Pad2dOp - : public OperatorWithKernel, - operators::Pad2dKernel> { - public: - Pad2dOp(const string &type, const VariableNameMap &inputs, - const VariableNameMap &outputs, const AttributeMap &attrs, - std::shared_ptr scope) - : OperatorWithKernel, - operators::Pad2dKernel>( - type, inputs, outputs, attrs, scope) {} - void InferShape() const override; - - private: -}; + +DECLARE_OPERATOR(Pad2D, Pad2DParam, Pad2DKernel); + } // namespace operators } // namespace paddle_mobile -#endif +#endif // PAD2D_OP diff --git a/test/net/test_ocr.cpp b/test/net/test_ocr.cpp index 5b8b245e9c..661b6e5cbf 100644 --- a/test/net/test_ocr.cpp +++ b/test/net/test_ocr.cpp @@ -20,11 +20,11 @@ limitations under the License. */ void load_images(const char *image_dir, const char *images_list, std::vector *image_names, std::vector> *image_shapes) { - int height, width; + int channel, height, width; std::string filename; std::ifstream if_list(images_list, std::ios::in); while (!if_list.eof()) { - if_list >> height >> width >> filename; + if_list >> channel >> height >> width >> filename; image_shapes->push_back(std::make_pair(height, width)); image_names->push_back(filename); } @@ -32,20 +32,25 @@ void load_images(const char *image_dir, const char *images_list, } int main(int argc, char **argv) { - if (argc < 4) { - std::cerr << "Usage: ./test_ocr model_dir image_dir images_list." - << std::endl; + if (argc < 5) { + std::cerr + << "Usage: ./test_ocr model_dir image_dir images_list output_name." + << std::endl; return 1; } char *model_dir = argv[1]; char *image_dir = argv[2]; char *images_list = argv[3]; + char *output_name = argv[4]; paddle_mobile::PaddleMobile paddle_mobile; - paddle_mobile.SetThreadNum(8); + paddle_mobile.SetThreadNum(1); auto isok = paddle_mobile.Load(std::string(model_dir) + "/model", std::string(model_dir) + "/params", true, false, 1, true); + // auto isok = paddle_mobile.Load(std::string(model_dir), false, + // false, 1, true); + DLOG << "pass init model"; std::vector image_names; std::vector> image_shapes; @@ -55,7 +60,7 @@ int main(int argc, char **argv) { for (int i = 0; i < image_names.size(); i++) { std::string file_name = image_names[i]; std::vector input_vec; - std::vector dims{1, 1, 48, 512}; + std::vector dims{1, 3, 48, 512}; dims[2] = image_shapes[i].first; dims[3] = image_shapes[i].second; // load input image @@ -64,23 +69,24 @@ int main(int argc, char **argv) { std::cerr << "shape = [" << dims[0] << ", " << dims[1] << ", " << dims[2] << ", " << dims[3] << "]" << std::endl; GetInput(img_path, &input_vec, dims); - framework::Tensor input(input_vec, framework::make_ddim(dims)); + // framework::Tensor input(input_vec, framework::make_ddim(dims)); // predict - paddle_mobile.Predict(input); - auto output_topk = paddle_mobile.Fetch("top_k_1.tmp_0"); - auto output_indices = paddle_mobile.Fetch("cast_68.tmp_0"); + // for (int j = 0; j < 10000; ++j) { + auto time3 = paddle_mobile::time(); + paddle_mobile.Predict(input_vec, dims); + auto output_topk = paddle_mobile.Fetch(output_name); + auto time4 = paddle_mobile::time(); + std::cerr << "average predict elapsed: " + << paddle_mobile::time_diff(time3, time4) << "ms" << std::endl; + // } + // print result - std::cerr << file_name << std::endl; + std::cerr << output_name << std::endl; std::cerr << output_topk->data()[0]; for (int j = 1; j < output_topk->numel(); ++j) { std::cerr << " " << output_topk->data()[j]; } std::cerr << std::endl; - std::cerr << output_indices->data()[0]; - for (int j = 1; j < output_indices->numel(); ++j) { - std::cerr << " " << output_indices->data()[j]; - } - std::cerr << std::endl; } return 0; } diff --git a/tools/op.cmake b/tools/op.cmake index 3b5a98023f..84b5bb6ef0 100755 --- a/tools/op.cmake +++ b/tools/op.cmake @@ -624,6 +624,7 @@ if (BEAM_SEARCH_OP) endif() if (BEAM_SEARCH_DECODE_OP) add_definitions(-DBEAM_SEARCH_DECODE_OP) +endif() if (FUSION_DECONVADDBNRELU_OP) add_definitions(-DFUSION_DECONVADDBNRELU_OP) endif() -- GitLab