From 61125ce121ed04ba17f77ee01c98e07e60f60d10 Mon Sep 17 00:00:00 2001 From: liuruilong Date: Fri, 15 Jun 2018 19:37:51 +0800 Subject: [PATCH] adjust the operator structure --- src/framework/operator.cpp | 4 ---- src/framework/operator.h | 17 ++++++++++--- src/operators/batchnorm_op.cpp | 4 ++-- src/operators/batchnorm_op.h | 13 +++------- src/operators/box_coder_op.cpp | 10 ++++---- src/operators/box_coder_op.h | 15 ++++-------- src/operators/concat_op.cpp | 6 ++--- src/operators/concat_op.h | 15 ++++-------- src/operators/conv_op.cpp | 14 +++++------ src/operators/conv_op.h | 16 ++++--------- src/operators/depthwise_conv_op.cpp | 14 +++++------ src/operators/depthwise_conv_op.h | 16 ++++--------- src/operators/elementwise_add_op.cpp | 4 ++-- src/operators/elementwise_add_op.h | 15 ++++-------- src/operators/fusion_conv_add.cpp | 14 +++++------ src/operators/fusion_conv_add.h | 16 ++++--------- src/operators/fusion_fc_op.cpp | 10 ++++---- src/operators/fusion_fc_op.h | 15 ++++-------- src/operators/kernel/arm/batchnorm_kernel.cpp | 24 ++++++++++--------- src/operators/kernel/mali/conv_kernel.cpp | 5 +++- src/operators/lrn_op.cpp | 4 ++-- src/operators/lrn_op.h | 16 ++++--------- src/operators/mul_op.cpp | 10 ++++---- src/operators/mul_op.h | 15 ++++-------- src/operators/multiclass_nms_op.cpp | 6 ++--- src/operators/multiclass_nms_op.h | 15 ++++-------- src/operators/pool_op.cpp | 14 +++++------ src/operators/pool_op.h | 14 +++-------- src/operators/prior_box_op.cpp | 18 +++++++------- src/operators/prior_box_op.h | 15 ++++-------- src/operators/relu_op.cpp | 4 ++-- src/operators/relu_op.h | 22 ++++------------- src/operators/reshape_op.cpp | 6 ++--- src/operators/reshape_op.h | 15 ++++-------- src/operators/sigmoid_op.cpp | 2 +- src/operators/sigmoid_op.h | 18 ++++---------- src/operators/softmax_op.cpp | 2 +- src/operators/softmax_op.h | 16 ++++--------- src/operators/transpose_op.cpp | 6 ++--- src/operators/transpose_op.h | 17 ++++--------- test/CMakeLists.txt | 4 ++++ test/net/test_mobilenet.cpp | 8 +++---- 42 files changed, 183 insertions(+), 311 deletions(-) diff --git a/src/framework/operator.cpp b/src/framework/operator.cpp index 0e8d6f9780..36b4663cb6 100644 --- a/src/framework/operator.cpp +++ b/src/framework/operator.cpp @@ -61,9 +61,5 @@ template class OperatorBase; template class OperatorBase; template class OperatorBase; -template class OperatorWithKernel; -template class OperatorWithKernel; -template class OperatorWithKernel; - } // namespace framework } // namespace paddle_mobile diff --git a/src/framework/operator.h b/src/framework/operator.h index 2ea7626711..2b26264ddb 100644 --- a/src/framework/operator.h +++ b/src/framework/operator.h @@ -103,16 +103,24 @@ class OperatorBase { /* * @b 这个类为所有带有运算的 op 的父类, 这个 op 继承与 OperatorBase * */ -template +template class OperatorWithKernel : public OperatorBase { public: OperatorWithKernel(const std::string &type, const VariableNameMap &inputs, const VariableNameMap &outputs, const AttributeMap &attrs, std::shared_ptr scope) - : OperatorBase(type, inputs, outputs, attrs, scope) {} + : OperatorBase(type, inputs, outputs, attrs, scope), param_(inputs, outputs, attrs, *scope){ + kernel_.Init(param_); + } + + virtual void RunImpl() const { + this->kernel_.Compute(this->param_); + } - virtual void RunImpl() const = 0; virtual void InferShape() const = 0; + protected: + KernelType kernel_; + ParamType param_; }; /* @@ -127,6 +135,9 @@ class OpKernelBase { * 所有结构体存在与: paddle-mobile/src/operators/op_param.h * */ virtual void Compute(const P ¶) const = 0; + virtual bool Init(const P ¶) const { + return true; + }; virtual ~OpKernelBase() = default; }; diff --git a/src/operators/batchnorm_op.cpp b/src/operators/batchnorm_op.cpp index e36cb24b7c..672e990be4 100644 --- a/src/operators/batchnorm_op.cpp +++ b/src/operators/batchnorm_op.cpp @@ -23,8 +23,8 @@ namespace operators { template void BatchNormOp::InferShape() const { - auto x_dims = param_.InputX()->dims(); - param_.OutputY()->Resize(x_dims); + auto x_dims = this->param_.InputX()->dims(); + this->param_.OutputY()->Resize(x_dims); } template class BatchNormOp; } // namespace operators diff --git a/src/operators/batchnorm_op.h b/src/operators/batchnorm_op.h index ea774662ab..876103a689 100644 --- a/src/operators/batchnorm_op.h +++ b/src/operators/batchnorm_op.h @@ -25,26 +25,19 @@ namespace paddle_mobile { namespace operators { using std::string; template -class BatchNormOp : public framework::OperatorWithKernel { +class BatchNormOp : public framework::OperatorWithKernel> { public: BatchNormOp(const string &type, const VariableNameMap &inputs, const VariableNameMap &outputs, const framework::AttributeMap &attrs, std::shared_ptr scope) - : framework::OperatorWithKernel(type, inputs, outputs, attrs, - scope), - param_(inputs, outputs, attrs, *scope) {} - - void RunImpl() const { - operators::BatchNormKernel kernel; - kernel.Compute(param_); + : framework::OperatorWithKernel>(type, inputs, outputs, attrs, + scope) { } - using framework::OperatorWithKernel::OperatorWithKernel; void InferShape() const override; protected: - BatchNormParam param_; }; } // namespace operators diff --git a/src/operators/box_coder_op.cpp b/src/operators/box_coder_op.cpp index 8dc5da43d0..31891ed742 100644 --- a/src/operators/box_coder_op.cpp +++ b/src/operators/box_coder_op.cpp @@ -21,11 +21,11 @@ namespace operators { template void BoxCoderOp::InferShape() const { - auto input_priorbox_dims = param_.InputPriorBox()->dims(); - auto input_priorboxvar_dims = param_.InputPriorBoxVar()->dims(); - auto input_targetbox_dims = param_.InputTargetBox()->dims(); + auto input_priorbox_dims = this->param_.InputPriorBox()->dims(); + auto input_priorboxvar_dims = this->param_.InputPriorBoxVar()->dims(); + auto input_targetbox_dims = this->param_.InputTargetBox()->dims(); - auto code_type = param_.CodeType(); + auto code_type = this->param_.CodeType(); if (code_type == "encode_center_size") { if (input_targetbox_dims.size() != 2) { @@ -44,7 +44,7 @@ void BoxCoderOp::InferShape() const { LOG(kLOG_ERROR) << " dimension not match"; } } - param_.OutputBox()->Resize(framework::make_ddim( + this->param_.OutputBox()->Resize(framework::make_ddim( {input_targetbox_dims[0], input_priorbox_dims[0], 4})); } template class BoxCoderOp; diff --git a/src/operators/box_coder_op.h b/src/operators/box_coder_op.h index 001ef20023..1c6f706d77 100644 --- a/src/operators/box_coder_op.h +++ b/src/operators/box_coder_op.h @@ -28,26 +28,19 @@ namespace operators { using paddle_mobile::framework::Tensor; template -class BoxCoderOp : public framework::OperatorWithKernel { +class BoxCoderOp : public framework::OperatorWithKernel> { public: BoxCoderOp(const std::string &type, const VariableNameMap &inputs, const VariableNameMap &outputs, const framework::AttributeMap &attrs, std::shared_ptr scope) - : framework::OperatorWithKernel(type, inputs, outputs, attrs, - scope), - param_(inputs, outputs, attrs, *scope) {} + : framework::OperatorWithKernel>(type, inputs, outputs, attrs, + scope) {} - void RunImpl() const { - operators::BoxCoderKernel kernel; - kernel.Compute(param_); - } - - using framework::OperatorWithKernel::OperatorWithKernel; + using framework::OperatorWithKernel>::OperatorWithKernel; void InferShape() const override; protected: - BoxCoderParam param_; }; } // namespace operators diff --git a/src/operators/concat_op.cpp b/src/operators/concat_op.cpp index b9eacde60e..f5a9c3d81e 100644 --- a/src/operators/concat_op.cpp +++ b/src/operators/concat_op.cpp @@ -21,7 +21,7 @@ namespace operators { template void ConcatOp::InferShape() const { - auto inputs = param_.Inputs(); + auto inputs = this->param_.Inputs(); const size_t n = inputs.size(); std::vector inputs_dims; @@ -30,7 +30,7 @@ void ConcatOp::InferShape() const { inputs_dims.push_back(inputs[i]->dims()); } - auto axis = static_cast(param_.Axis()); + auto axis = static_cast(this->param_.Axis()); if (n == 1) { DLOG << "Warning: concat op have only one input, " @@ -54,7 +54,7 @@ void ConcatOp::InferShape() const { out_dims[axis] = -1; } - param_.Out()->Resize(out_dims); + this->param_.Out()->Resize(out_dims); } template class ConcatOp; diff --git a/src/operators/concat_op.h b/src/operators/concat_op.h index fff704e4d8..974924b1f0 100644 --- a/src/operators/concat_op.h +++ b/src/operators/concat_op.h @@ -24,25 +24,18 @@ namespace paddle_mobile { namespace operators { using std::string; template -class ConcatOp : public framework::OperatorWithKernel { +class ConcatOp : public framework::OperatorWithKernel> { public: ConcatOp(const string &type, const VariableNameMap &inputs, const VariableNameMap &outputs, const framework::AttributeMap &attrs, std::shared_ptr scope) - : framework::OperatorWithKernel(type, inputs, outputs, attrs, - scope), - param_(inputs, outputs, attrs, *scope) {} - - void RunImpl() const { - operators::ConcatKernel kernel; - kernel.Compute(param_); + : framework::OperatorWithKernel>(type, inputs, outputs, attrs, + scope) { } - using framework::OperatorWithKernel::OperatorWithKernel; + using framework::OperatorWithKernel>::OperatorWithKernel; void InferShape() const override; - protected: - ConcatParam param_; }; } // namespace operators diff --git a/src/operators/conv_op.cpp b/src/operators/conv_op.cpp index 4be442f616..b4910eb26e 100644 --- a/src/operators/conv_op.cpp +++ b/src/operators/conv_op.cpp @@ -24,12 +24,12 @@ namespace operators { template void ConvOp::InferShape() const { - auto in_dims = param_.Input()->dims(); - auto filter_dims = param_.Filter()->dims(); - const std::vector &strides = param_.Strides(); - std::vector paddings = param_.Paddings(); - int groups = param_.Groups(); - std::vector dilations = param_.Dilations(); + auto in_dims = this->param_.Input()->dims(); + auto filter_dims = this->param_.Filter()->dims(); + const std::vector &strides = this->param_.Strides(); + std::vector paddings = this->param_.Paddings(); + int groups = this->param_.Groups(); + std::vector dilations = this->param_.Dilations(); PADDLE_MOBILE_ENFORCE((in_dims.size() == filter_dims.size() && dilations.size() == paddings.size() && @@ -44,7 +44,7 @@ void ConvOp::InferShape() const { } framework::DDim ddim = framework::make_ddim(output_shape); - param_.Output()->Resize(ddim); + this->param_.Output()->Resize(ddim); } template class ConvOp; diff --git a/src/operators/conv_op.h b/src/operators/conv_op.h index 0a26ce6c3f..406d54b86e 100644 --- a/src/operators/conv_op.h +++ b/src/operators/conv_op.h @@ -24,26 +24,18 @@ namespace paddle_mobile { namespace operators { using std::string; template -class ConvOp : public framework::OperatorWithKernel { +class ConvOp : public framework::OperatorWithKernel> { public: ConvOp(const std::string &type, const VariableNameMap &inputs, const VariableNameMap &outputs, const framework::AttributeMap &attrs, std::shared_ptr scope) - : framework::OperatorWithKernel(type, inputs, outputs, attrs, - scope), - param_(inputs, outputs, attrs, *scope) {} + : framework::OperatorWithKernel>(type, inputs, outputs, attrs, + scope){} - using framework::OperatorWithKernel::OperatorWithKernel; + using framework::OperatorWithKernel>::OperatorWithKernel; void InferShape() const override; - void RunImpl() const { - operators::ConvKernel kernel; - kernel.Compute(param_); - this->ClearVariables({"Filter", "Input"}); - } - private: - ConvParam param_; }; inline int ConvOutputSize(int input_size, int filter_size, int dilation, diff --git a/src/operators/depthwise_conv_op.cpp b/src/operators/depthwise_conv_op.cpp index 55198fd116..be3b9a0ca8 100644 --- a/src/operators/depthwise_conv_op.cpp +++ b/src/operators/depthwise_conv_op.cpp @@ -25,12 +25,12 @@ namespace operators { template void DepthwiseConvOp::InferShape() const { - auto in_dims = param_.Input()->dims(); - auto filter_dims = param_.Filter()->dims(); - const std::vector &strides = param_.Strides(); - std::vector paddings = param_.Paddings(); - int groups = param_.Groups(); - std::vector dilations = param_.Dilations(); + auto in_dims = this->param_.Input()->dims(); + auto filter_dims = this->param_.Filter()->dims(); + const std::vector &strides = this->param_.Strides(); + std::vector paddings = this->param_.Paddings(); + int groups = this->param_.Groups(); + std::vector dilations = this->param_.Dilations(); PADDLE_MOBILE_ENFORCE((in_dims.size() == filter_dims.size() && dilations.size() == paddings.size() && @@ -45,7 +45,7 @@ void DepthwiseConvOp::InferShape() const { } framework::DDim ddim = framework::make_ddim(output_shape); - param_.Output()->Resize(ddim); + this->param_.Output()->Resize(ddim); } template class DepthwiseConvOp; diff --git a/src/operators/depthwise_conv_op.h b/src/operators/depthwise_conv_op.h index 37ba1b9ada..6534aba525 100644 --- a/src/operators/depthwise_conv_op.h +++ b/src/operators/depthwise_conv_op.h @@ -24,27 +24,19 @@ namespace paddle_mobile { namespace operators { template -class DepthwiseConvOp : public framework::OperatorWithKernel { +class DepthwiseConvOp : public framework::OperatorWithKernel> { public: DepthwiseConvOp(const std::string &type, const VariableNameMap &inputs, const VariableNameMap &outputs, const framework::AttributeMap &attrs, std::shared_ptr scope) - : framework::OperatorWithKernel(type, inputs, outputs, attrs, - scope), - param_(inputs, outputs, attrs, *scope) {} + : framework::OperatorWithKernel>(type, inputs, outputs, attrs, + scope) {} - using framework::OperatorWithKernel::OperatorWithKernel; + using framework::OperatorWithKernel>::OperatorWithKernel; void InferShape() const override; - void RunImpl() const { - operators::DepthwiseConvKernel kernel; - kernel.Compute(param_); - this->ClearVariables({"Filter", "Input"}); - } - private: - ConvParam param_; }; } // namespace operators diff --git a/src/operators/elementwise_add_op.cpp b/src/operators/elementwise_add_op.cpp index 5333dcfdb6..966bc9c1e7 100644 --- a/src/operators/elementwise_add_op.cpp +++ b/src/operators/elementwise_add_op.cpp @@ -21,8 +21,8 @@ namespace operators { template void ElementwiseAddOp::InferShape() const { - auto x_dim = param_.InputX()->dims(); - param_.Out()->Resize(x_dim); + auto x_dim = this->param_.InputX()->dims(); + this->param_.Out()->Resize(x_dim); } template class ElementwiseAddOp; } // namespace operators diff --git a/src/operators/elementwise_add_op.h b/src/operators/elementwise_add_op.h index 62034b14ed..f3820db12c 100644 --- a/src/operators/elementwise_add_op.h +++ b/src/operators/elementwise_add_op.h @@ -25,26 +25,19 @@ namespace paddle_mobile { namespace operators { using std::string; template -class ElementwiseAddOp : public framework::OperatorWithKernel { +class ElementwiseAddOp : public framework::OperatorWithKernel> { public: ElementwiseAddOp(const string &type, const VariableNameMap &inputs, const VariableNameMap &outputs, const framework::AttributeMap &attrs, std::shared_ptr scope) - : framework::OperatorWithKernel(type, inputs, outputs, attrs, - scope), - param_(inputs, outputs, attrs, *scope) {} + : framework::OperatorWithKernel>(type, inputs, outputs, attrs, + scope) {} - void RunImpl() const { - operators::ElementwiseAddKernel kernel; - kernel.Compute(param_); - } - - using framework::OperatorWithKernel::OperatorWithKernel; + using framework::OperatorWithKernel>::OperatorWithKernel; void InferShape() const override; protected: - ElementwiseAddParam param_; }; } // namespace operators } // namespace paddle_mobile diff --git a/src/operators/fusion_conv_add.cpp b/src/operators/fusion_conv_add.cpp index 21e13d87af..1f98cbdd8e 100644 --- a/src/operators/fusion_conv_add.cpp +++ b/src/operators/fusion_conv_add.cpp @@ -21,12 +21,12 @@ namespace operators { template void FushionConvAddOp::InferShape() const { - auto in_dims = param_.Input()->dims(); - auto filter_dims = param_.Filter()->dims(); - const std::vector &strides = param_.Strides(); - std::vector paddings = param_.Paddings(); - int groups = param_.Groups(); - std::vector dilations = param_.Dilations(); + auto in_dims = this->param_.Input()->dims(); + auto filter_dims = this->param_.Filter()->dims(); + const std::vector &strides = this->param_.Strides(); + std::vector paddings = this->param_.Paddings(); + int groups = this->param_.Groups(); + std::vector dilations = this->param_.Dilations(); PADDLE_MOBILE_ENFORCE((in_dims.size() == filter_dims.size() && dilations.size() == paddings.size() && @@ -41,7 +41,7 @@ void FushionConvAddOp::InferShape() const { } framework::DDim ddim = framework::make_ddim(output_shape); - param_.Output()->Resize(ddim); + this->param_.Output()->Resize(ddim); } template class FushionConvAddOp; } // namespace operators diff --git a/src/operators/fusion_conv_add.h b/src/operators/fusion_conv_add.h index dc35409b46..1b9b85c9c9 100644 --- a/src/operators/fusion_conv_add.h +++ b/src/operators/fusion_conv_add.h @@ -47,27 +47,19 @@ class FusionConvAddMatcher : public framework::FusionOpMatcher { }; template -class FushionConvAddOp : public framework::OperatorWithKernel { +class FushionConvAddOp : public framework::OperatorWithKernel> { public: FushionConvAddOp(const string &type, const VariableNameMap &inputs, const VariableNameMap &outputs, const framework::AttributeMap &attrs, std::shared_ptr scope) - : framework::OperatorWithKernel(type, inputs, outputs, attrs, - scope), - param_(inputs, outputs, attrs, *scope) {} + : framework::OperatorWithKernel>(type, inputs, outputs, attrs, + scope) {} - void RunImpl() const { - operators::ConvAddKernel kernel; - kernel.Compute(param_); - this->ClearVariables({"Filter", "Input", "Y"}); - } - - using framework::OperatorWithKernel::OperatorWithKernel; + using framework::OperatorWithKernel>::OperatorWithKernel; void InferShape() const override; protected: - FushionConvAddParam param_; }; inline int ConvOutputSize(int input_size, int filter_size, int dilation, diff --git a/src/operators/fusion_fc_op.cpp b/src/operators/fusion_fc_op.cpp index c85de86202..65c1605a5d 100644 --- a/src/operators/fusion_fc_op.cpp +++ b/src/operators/fusion_fc_op.cpp @@ -20,10 +20,10 @@ namespace operators { template void FushionFcOp::InferShape() const { - auto x_dims = param_.InputX()->dims(); - auto y_dims = param_.InputY()->dims(); - int x_num_col_dims = param_.XNumColDims(); - int y_num_col_dims = param_.YNumColDims(); + auto x_dims = this->param_.InputX()->dims(); + auto y_dims = this->param_.InputY()->dims(); + int x_num_col_dims = this->param_.XNumColDims(); + int y_num_col_dims = this->param_.YNumColDims(); assert(x_dims.size() > x_num_col_dims); assert(y_dims.size() > y_num_col_dims); @@ -47,7 +47,7 @@ void FushionFcOp::InferShape() const { } framework::DDim ddim = framework::make_ddim(output_dims); - param_.Out()->Resize(ddim); + this->param_.Out()->Resize(ddim); } template class FushionFcOp; } // namespace operators diff --git a/src/operators/fusion_fc_op.h b/src/operators/fusion_fc_op.h index 839ef07b24..cf2efa3f60 100644 --- a/src/operators/fusion_fc_op.h +++ b/src/operators/fusion_fc_op.h @@ -45,26 +45,19 @@ class FusionFcMatcher : public framework::FusionOpMatcher { }; template -class FushionFcOp : public framework::OperatorWithKernel { +class FushionFcOp : public framework::OperatorWithKernel> { public: FushionFcOp(const string &type, const VariableNameMap &inputs, const VariableNameMap &outputs, const framework::AttributeMap &attrs, std::shared_ptr scope) - : framework::OperatorWithKernel(type, inputs, outputs, attrs, - scope), - param_(inputs, outputs, attrs, *scope) {} + : framework::OperatorWithKernel>(type, inputs, outputs, attrs, + scope) {} - void RunImpl() const { - operators::FushionFcKernel kernel; - kernel.Compute(param_); - } - - using framework::OperatorWithKernel::OperatorWithKernel; + using framework::OperatorWithKernel>::OperatorWithKernel; void InferShape() const override; protected: - FushionFcParam param_; }; #ifdef PADDLE_MOBILE_CPU diff --git a/src/operators/kernel/arm/batchnorm_kernel.cpp b/src/operators/kernel/arm/batchnorm_kernel.cpp index 30d922a777..4103ef6dcd 100644 --- a/src/operators/kernel/arm/batchnorm_kernel.cpp +++ b/src/operators/kernel/arm/batchnorm_kernel.cpp @@ -61,19 +61,20 @@ void BatchNormKernel::Compute(const BatchNormParam ¶m) const { /// std = (var + epsilon).sqrt(); /// inv_std = 1 / std; for (int i = 0; i < C * 4; i += 4) { + int index = i/4; inv_std_ptr[i] = - 1 / static_cast(pow((variance_ptr[i / 4] + epsilon), 0.5)); + 1 / static_cast(pow((variance_ptr[index] + epsilon), 0.5)); inv_std_ptr[i + 1] = inv_std_ptr[i]; inv_std_ptr[i + 2] = inv_std_ptr[i]; inv_std_ptr[i + 3] = inv_std_ptr[i]; - new_scale_ptr[i] = inv_std_ptr[i] * scale_ptr[i / 4]; + new_scale_ptr[i] = inv_std_ptr[i] * scale_ptr[index]; new_scale_ptr[i + 1] = new_scale_ptr[i]; new_scale_ptr[i + 2] = new_scale_ptr[i]; new_scale_ptr[i + 3] = new_scale_ptr[i]; new_bias_ptr[i] = - bias_ptr[i / 4] - mean_ptr[i / 4] * inv_std_ptr[i] * scale_ptr[i / 4]; + bias_ptr[index] - mean_ptr[index] * inv_std_ptr[i] * scale_ptr[index]; new_bias_ptr[i + 1] = new_bias_ptr[i]; new_bias_ptr[i + 2] = new_bias_ptr[i]; @@ -164,21 +165,21 @@ void BatchNormKernel::Compute(const BatchNormParam ¶m) const { "vadd.f32 q7, q7, q10 \n\t" "vadd.f32 q8, q8, q10 \n\t" - "add %[out_ptr], %[out_ptr], r6 \n\t" + "add %[out_ptr], %[out_ptr], r6 \n\t" "vst1.32 {q1, q2}, [%[out_ptr]]! \n\t" - "vst1.32 {q3, q4}, [%[out_ptr]]! \n\t" - "vst1.32 {q5, q6}, [%[out_ptr]]! \n\t" - "vst1.32 {q7, q8}, [%[out_ptr]]! \n\t" + "vst1.32 {q3, q4}, [%[out_ptr]]! \n\t" + "vst1.32 {q5, q6}, [%[out_ptr]]! \n\t" + "vst1.32 {q7, q8}, [%[out_ptr]]! \n\t" - "end_remainder_%=: \n\t" + "end_remainder_%=: \n\t" "subs %[C], %[C], #1 \n\t" "bge loop_c_%= \n\t" "end_c_%=: \n\t" - "subs %[N], %[N], #1 \n\t" - "bge loop_n_%= \n\t" - "end_n_%=: \n\t" + "subs %[N], %[N], #1 \n\t" + "bge loop_n_%= \n\t" + "end_n_%=: \n\t" : : [input_x_ptr] "r"(input_x_ptr), [out_ptr] "r"(out_ptr), [new_scale_ptr] "r"(new_scale_ptr), [new_bias_ptr] "r"(new_bias_ptr), @@ -232,6 +233,7 @@ void BatchNormKernel::Compute(const BatchNormParam ¶m) const { // DLOG << "out_ptr : " << out_ptr[102]; } } + } // namespace operators } // namespace paddle_mobile diff --git a/src/operators/kernel/mali/conv_kernel.cpp b/src/operators/kernel/mali/conv_kernel.cpp index 695f937880..266fde69d8 100644 --- a/src/operators/kernel/mali/conv_kernel.cpp +++ b/src/operators/kernel/mali/conv_kernel.cpp @@ -20,7 +20,10 @@ namespace paddle_mobile { namespace operators { template <> -void ConvKernel::Compute(const ConvParam ¶m) const {} +void ConvKernel::Compute(const ConvParam ¶m) const { +// ArmConvImplement imp; +// imp.Compute(param); +} template class ConvKernel; } // namespace operators diff --git a/src/operators/lrn_op.cpp b/src/operators/lrn_op.cpp index d159cdf21b..2533ab19a5 100644 --- a/src/operators/lrn_op.cpp +++ b/src/operators/lrn_op.cpp @@ -21,8 +21,8 @@ namespace operators { template void LrnOp::InferShape() const { - auto x_dims = param_.InputX()->dims(); - param_.Out()->Resize(x_dims); + auto x_dims = this->param_.InputX()->dims(); + this->param_.Out()->Resize(x_dims); } template class LrnOp; } // namespace operators diff --git a/src/operators/lrn_op.h b/src/operators/lrn_op.h index c0f7abba0b..3e84bebb93 100644 --- a/src/operators/lrn_op.h +++ b/src/operators/lrn_op.h @@ -25,25 +25,17 @@ namespace paddle_mobile { namespace operators { using std::string; template -class LrnOp : public framework::OperatorWithKernel { +class LrnOp : public framework::OperatorWithKernel> { public: LrnOp(const string &type, const VariableNameMap &inputs, const VariableNameMap &outputs, const framework::AttributeMap &attrs, std::shared_ptr scope) - : framework::OperatorWithKernel(type, inputs, outputs, attrs, - scope), - param_(inputs, outputs, attrs, *scope) {} + : framework::OperatorWithKernel>(type, inputs, outputs, attrs, + scope) {} - void RunImpl() const { - operators::LrnKernel kernel; - kernel.Compute(param_); - } - - using framework::OperatorWithKernel::OperatorWithKernel; + using framework::OperatorWithKernel>::OperatorWithKernel; void InferShape() const override; - protected: - LrnParam param_; }; } // namespace operators diff --git a/src/operators/mul_op.cpp b/src/operators/mul_op.cpp index d33bcbfddd..d97c6ec3e4 100644 --- a/src/operators/mul_op.cpp +++ b/src/operators/mul_op.cpp @@ -21,10 +21,10 @@ namespace operators { template void MulOp::InferShape() const { - auto x_dims = param_.InputX()->dims(); - auto y_dims = param_.InputY()->dims(); - int x_num_col_dims = param_.XNumColDims(); - int y_num_col_dims = param_.YNumColDims(); + auto x_dims = this->param_.InputX()->dims(); + auto y_dims = this->param_.InputY()->dims(); + int x_num_col_dims = this->param_.XNumColDims(); + int y_num_col_dims = this->param_.YNumColDims(); assert(x_dims.size() > x_num_col_dims); assert(y_dims.size() > y_num_col_dims); @@ -48,7 +48,7 @@ void MulOp::InferShape() const { } framework::DDim ddim = framework::make_ddim(output_dims); - param_.Out()->Resize(ddim); + this->param_.Out()->Resize(ddim); } template class MulOp; } // namespace operators diff --git a/src/operators/mul_op.h b/src/operators/mul_op.h index 5ecf6571ae..08bda2a74b 100644 --- a/src/operators/mul_op.h +++ b/src/operators/mul_op.h @@ -25,25 +25,18 @@ namespace paddle_mobile { namespace operators { template -class MulOp : public framework::OperatorWithKernel { +class MulOp : public framework::OperatorWithKernel> { public: MulOp(const std::string &type, const VariableNameMap &inputs, const VariableNameMap &outputs, const framework::AttributeMap &attrs, std::shared_ptr scope) - : framework::OperatorWithKernel(type, inputs, outputs, attrs, - scope), - param_(inputs, outputs, attrs, *scope) {} + : framework::OperatorWithKernel>(type, inputs, outputs, attrs, + scope) {} - void RunImpl() const { - operators::MulKernel kernel; - kernel.Compute(param_); - } - - using framework::OperatorWithKernel::OperatorWithKernel; + using framework::OperatorWithKernel>::OperatorWithKernel; void InferShape() const override; protected: - MulParam param_; }; } // namespace operators diff --git a/src/operators/multiclass_nms_op.cpp b/src/operators/multiclass_nms_op.cpp index e8b5f195fe..52adf6cc62 100644 --- a/src/operators/multiclass_nms_op.cpp +++ b/src/operators/multiclass_nms_op.cpp @@ -20,8 +20,8 @@ namespace operators { template void MultiClassNMSOp::InferShape() const { - auto input_bboxes_dims = param_.InputBBoxes()->dims(); - auto input_scores_dims = param_.InputScores()->dims(); + auto input_bboxes_dims = this->param_.InputBBoxes()->dims(); + auto input_scores_dims = this->param_.InputScores()->dims(); if (input_scores_dims.size() != 3) { LOG(kLOG_ERROR) << "Input Scores size must be 3"; } @@ -32,7 +32,7 @@ void MultiClassNMSOp::InferShape() const { LOG(kLOG_ERROR) << "Predict bboxes must be equal"; } // pre size, will change in Compute. - param_.Out()->Resize(framework::make_ddim({input_bboxes_dims[1], 6})); + this->param_.Out()->Resize(framework::make_ddim({input_bboxes_dims[1], 6})); } template class MultiClassNMSOp; } // namespace operators diff --git a/src/operators/multiclass_nms_op.h b/src/operators/multiclass_nms_op.h index 37f3742524..2d65657c8d 100644 --- a/src/operators/multiclass_nms_op.h +++ b/src/operators/multiclass_nms_op.h @@ -28,26 +28,19 @@ namespace operators { using paddle_mobile::framework::Tensor; template -class MultiClassNMSOp : public framework::OperatorWithKernel { +class MultiClassNMSOp : public framework::OperatorWithKernel> { public: MultiClassNMSOp(const std::string &type, const VariableNameMap &inputs, const VariableNameMap &outputs, const framework::AttributeMap &attrs, std::shared_ptr scope) - : framework::OperatorWithKernel(type, inputs, outputs, attrs, - scope), - param_(inputs, outputs, attrs, *scope) {} + : framework::OperatorWithKernel>(type, inputs, outputs, attrs, + scope) {} - void RunImpl() const { - operators::MultiClassNMSKernel kernel; - kernel.Compute(param_); - } - - using framework::OperatorWithKernel::OperatorWithKernel; + using framework::OperatorWithKernel>::OperatorWithKernel; void InferShape() const override; protected: - MultiClassNMSParam param_; }; } // namespace operators diff --git a/src/operators/pool_op.cpp b/src/operators/pool_op.cpp index 79b06174af..e8a469d431 100644 --- a/src/operators/pool_op.cpp +++ b/src/operators/pool_op.cpp @@ -34,13 +34,13 @@ int PoolOutputSize(int input_size, int filter_size, int padding, int stride, } template void PoolOp::InferShape() const { - auto in_x_dims = param_.Input()->dims(); - std::vector ksize = param_.Ksize(); - std::vector paddings = param_.Paddings(); - std::vector strides = param_.Strides(); - bool ceil_mode = param_.isCeilMode(); + auto in_x_dims = this->param_.Input()->dims(); + std::vector ksize = this->param_.Ksize(); + std::vector paddings = this->param_.Paddings(); + std::vector strides = this->param_.Strides(); + bool ceil_mode = this->param_.isCeilMode(); - if (param_.isGlobalPooling()) { + if (this->param_.isGlobalPooling()) { ksize.resize(static_cast(in_x_dims.size()) - 2); for (size_t i = 0; i < ksize.size(); ++i) { paddings[i] = 0; @@ -52,7 +52,7 @@ void PoolOp::InferShape() const { output_shape.push_back(PoolOutputSize(in_x_dims[i + 2], ksize[i], paddings[i], strides[i], ceil_mode)); } - param_.Output()->Resize(framework::make_ddim(output_shape)); + this->param_.Output()->Resize(framework::make_ddim(output_shape)); } template class PoolOp; } // namespace operators diff --git a/src/operators/pool_op.h b/src/operators/pool_op.h index 8dc99ae686..b9d679963c 100644 --- a/src/operators/pool_op.h +++ b/src/operators/pool_op.h @@ -29,24 +29,16 @@ using framework::OperatorWithKernel; using framework::Scope; using std::string; template -class PoolOp : public OperatorWithKernel { +class PoolOp : public OperatorWithKernel> { public: PoolOp(const string &type, const VariableNameMap &inputs, const VariableNameMap &outputs, const AttributeMap &attrs, std::shared_ptr scope) - : OperatorWithKernel(type, inputs, outputs, attrs, scope), - param_(inputs, outputs, attrs, *scope) {} - using OperatorWithKernel::OperatorWithKernel; + : OperatorWithKernel>(type, inputs, outputs, attrs, scope) {} + using OperatorWithKernel>::OperatorWithKernel; void InferShape() const override; - void RunImpl() const { - operators::PoolKernel kernel; - kernel.Compute(param_); - this->ClearVariables({"X"}); - } - private: - PoolParam param_; }; } // namespace operators } // namespace paddle_mobile diff --git a/src/operators/prior_box_op.cpp b/src/operators/prior_box_op.cpp index f3ae6e5231..44e1741b66 100644 --- a/src/operators/prior_box_op.cpp +++ b/src/operators/prior_box_op.cpp @@ -21,13 +21,13 @@ namespace operators { template void PriorBoxOp::InferShape() const { - auto input_dims = param_.Input()->dims(); - auto input_image_dims = param_.InputImage()->dims(); - auto min_sizes = param_.MinSizes(); - auto max_sizes = param_.MaxSizes(); - auto variances = param_.Variances(); - auto aspect_ratios = param_.AspectRatios(); - bool flip = param_.Flip(); + auto input_dims = this->param_.Input()->dims(); + auto input_image_dims = this->param_.InputImage()->dims(); + auto min_sizes = this->param_.MinSizes(); + auto max_sizes = this->param_.MaxSizes(); + auto variances = this->param_.Variances(); + auto aspect_ratios = this->param_.AspectRatios(); + bool flip = this->param_.Flip(); std::vector aspect_ratios_vec; ExpandAspectRatios(aspect_ratios, flip, &aspect_ratios_vec); @@ -41,8 +41,8 @@ void PriorBoxOp::InferShape() const { dim_vec[1] = input_dims[3]; dim_vec[2] = num_priors; dim_vec[3] = 4; - param_.OutputBoxes()->Resize(framework::make_ddim(dim_vec)); - param_.OutputVariances()->Resize(framework::make_ddim(dim_vec)); + this->param_.OutputBoxes()->Resize(framework::make_ddim(dim_vec)); + this->param_.OutputVariances()->Resize(framework::make_ddim(dim_vec)); } template class PriorBoxOp; } // namespace operators diff --git a/src/operators/prior_box_op.h b/src/operators/prior_box_op.h index e3de58b372..ad6b901041 100644 --- a/src/operators/prior_box_op.h +++ b/src/operators/prior_box_op.h @@ -28,26 +28,19 @@ namespace operators { using paddle_mobile::framework::Tensor; template -class PriorBoxOp : public framework::OperatorWithKernel { +class PriorBoxOp : public framework::OperatorWithKernel> { public: PriorBoxOp(const std::string &type, const VariableNameMap &inputs, const VariableNameMap &outputs, const framework::AttributeMap &attrs, std::shared_ptr scope) - : framework::OperatorWithKernel(type, inputs, outputs, attrs, - scope), - param_(inputs, outputs, attrs, *scope) {} + : framework::OperatorWithKernel>(type, inputs, outputs, attrs, + scope) {} - void RunImpl() const { - operators::PriorBoxKernel kernel; - kernel.Compute(param_); - } - - using framework::OperatorWithKernel::OperatorWithKernel; + using framework::OperatorWithKernel>::OperatorWithKernel; void InferShape() const override; protected: - PriorBoxParam param_; }; } // namespace operators diff --git a/src/operators/relu_op.cpp b/src/operators/relu_op.cpp index 066772e3be..cf495d8bda 100644 --- a/src/operators/relu_op.cpp +++ b/src/operators/relu_op.cpp @@ -20,8 +20,8 @@ namespace operators { template void ReluOp::InferShape() const { - auto input_dims = param_.InputX()->dims(); - param_.Out()->Resize(input_dims); + auto input_dims = this->param_.InputX()->dims(); + this->param_.Out()->Resize(input_dims); } template class ReluOp; } // namespace operators diff --git a/src/operators/relu_op.h b/src/operators/relu_op.h index f032546c82..d8bfc8a5ec 100644 --- a/src/operators/relu_op.h +++ b/src/operators/relu_op.h @@ -28,7 +28,7 @@ namespace operators { using paddle_mobile::framework::Tensor; template -class ReluOp : public framework::OperatorWithKernel { +class ReluOp : public framework::OperatorWithKernel> { public: /* * @b op 的实例化方法, 需要调用父类的实例化方法, 以及实例化自己的参数结构体 @@ -36,27 +36,13 @@ class ReluOp : public framework::OperatorWithKernel { ReluOp(const std::string &type, const VariableNameMap &inputs, const VariableNameMap &outputs, const framework::AttributeMap &attrs, std::shared_ptr scope) - : framework::OperatorWithKernel(type, inputs, outputs, attrs, - scope), - param_(inputs, outputs, attrs, *scope) {} + : framework::OperatorWithKernel>(type, inputs, outputs, attrs, + scope) {} - /* - * @b op 进行运算, 调用相应的 kernel 进行运算 - * */ - void RunImpl() const { - operators::ReluKernel kernel; - kernel.Compute(param_); - } - - using framework::OperatorWithKernel::OperatorWithKernel; + using framework::OperatorWithKernel>::OperatorWithKernel; void InferShape() const override; protected: - /* - * @b Relu kernel 进行运算时所需要用到参数的结构体, - * 结构体定义在: paddle-mobile/src/operators/op_param.h - * */ - ReluParam param_; }; } // namespace operators diff --git a/src/operators/reshape_op.cpp b/src/operators/reshape_op.cpp index 5d0aa49a26..0fdcaf4d1a 100644 --- a/src/operators/reshape_op.cpp +++ b/src/operators/reshape_op.cpp @@ -22,10 +22,10 @@ namespace operators { template void ReshapeOp::InferShape() const { /// todo: add InputShape() detection. - auto &shape = param_.Shape(); - auto input_x_dims = param_.InputX()->dims(); + auto &shape = this->param_.Shape(); + auto input_x_dims = this->param_.InputX()->dims(); auto out_dims = ValidateShape(shape, input_x_dims); - param_.Out()->Resize(out_dims); + this->param_.Out()->Resize(out_dims); } template class ReshapeOp; } // namespace operators diff --git a/src/operators/reshape_op.h b/src/operators/reshape_op.h index a14c84b6be..46451fbda3 100644 --- a/src/operators/reshape_op.h +++ b/src/operators/reshape_op.h @@ -28,26 +28,19 @@ namespace operators { using paddle_mobile::framework::Tensor; template -class ReshapeOp : public framework::OperatorWithKernel { +class ReshapeOp : public framework::OperatorWithKernel> { public: ReshapeOp(const std::string &type, const VariableNameMap &inputs, const VariableNameMap &outputs, const framework::AttributeMap &attrs, std::shared_ptr scope) - : framework::OperatorWithKernel(type, inputs, outputs, attrs, - scope), - param_(inputs, outputs, attrs, *scope) {} + : framework::OperatorWithKernel>(type, inputs, outputs, attrs, + scope) {} - void RunImpl() const { - operators::ReshapeKernel kernel; - kernel.Compute(param_); - } - - using framework::OperatorWithKernel::OperatorWithKernel; + using framework::OperatorWithKernel>::OperatorWithKernel; void InferShape() const override; protected: - ReshapeParam param_; }; } // namespace operators diff --git a/src/operators/sigmoid_op.cpp b/src/operators/sigmoid_op.cpp index 641b6f29f2..79190e6c33 100644 --- a/src/operators/sigmoid_op.cpp +++ b/src/operators/sigmoid_op.cpp @@ -20,7 +20,7 @@ namespace paddle_mobile { namespace operators { template void SigmoidOp::InferShape() const { - param_.Out()->Resize(param_.InputX()->dims()); + this->param_.Out()->Resize(this->param_.InputX()->dims()); } template class SigmoidOp; } // namespace operators diff --git a/src/operators/sigmoid_op.h b/src/operators/sigmoid_op.h index 7cdeb41af1..77aff2efaf 100644 --- a/src/operators/sigmoid_op.h +++ b/src/operators/sigmoid_op.h @@ -25,28 +25,18 @@ limitations under the License. */ namespace paddle_mobile { namespace operators { template -class SigmoidOp : public framework::OperatorWithKernel { +class SigmoidOp : public framework::OperatorWithKernel> { public: SigmoidOp(const std::string &type, const VariableNameMap &inputs, const VariableNameMap &outputs, const framework::AttributeMap &attrs, std::shared_ptr scope) - : framework::OperatorWithKernel(type, inputs, outputs, attrs, - scope), - param_(inputs, outputs, attrs, *scope) {} + : framework::OperatorWithKernel>(type, inputs, outputs, attrs, + scope) {} - using framework::OperatorWithKernel::OperatorWithKernel; + using framework::OperatorWithKernel>::OperatorWithKernel; void InferShape() const override; - - void RunImpl() const { - operators::SigmoidKernel kernel; - kernel.Compute(param_); - this->ClearVariables({"X"}); - } - - private: - SigmoidParam param_; }; } // namespace operators } // namespace paddle_mobile diff --git a/src/operators/softmax_op.cpp b/src/operators/softmax_op.cpp index 8d5f669466..e25b59198f 100644 --- a/src/operators/softmax_op.cpp +++ b/src/operators/softmax_op.cpp @@ -20,7 +20,7 @@ namespace paddle_mobile { namespace operators { template void SoftmaxOp::InferShape() const { - param_.Out()->Resize(param_.InputX()->dims()); + this->param_.Out()->Resize(this->param_.InputX()->dims()); } template class SoftmaxOp; } // namespace operators diff --git a/src/operators/softmax_op.h b/src/operators/softmax_op.h index 5cac4d8a33..7bf0bdb4ca 100644 --- a/src/operators/softmax_op.h +++ b/src/operators/softmax_op.h @@ -25,28 +25,20 @@ limitations under the License. */ namespace paddle_mobile { namespace operators { template -class SoftmaxOp : public framework::OperatorWithKernel { +class SoftmaxOp : public framework::OperatorWithKernel> { public: SoftmaxOp(const std::string &type, const VariableNameMap &inputs, const VariableNameMap &outputs, const framework::AttributeMap &attrs, std::shared_ptr scope) - : framework::OperatorWithKernel(type, inputs, outputs, attrs, - scope), - param_(inputs, outputs, attrs, *scope) {} + : framework::OperatorWithKernel>(type, inputs, outputs, attrs, + scope) {} - using framework::OperatorWithKernel::OperatorWithKernel; + using framework::OperatorWithKernel>::OperatorWithKernel; void InferShape() const override; - void RunImpl() const { - operators::SoftmaxKernel kernel; - kernel.Compute(param_); - this->ClearVariables({"X"}); - } - private: - SoftmaxParam param_; }; } // namespace operators } // namespace paddle_mobile diff --git a/src/operators/transpose_op.cpp b/src/operators/transpose_op.cpp index 02a3b16e8d..989b277b9d 100644 --- a/src/operators/transpose_op.cpp +++ b/src/operators/transpose_op.cpp @@ -23,8 +23,8 @@ namespace operators { template void TransposeOp::InferShape() const { - auto input_x_dims = param_.InputX()->dims(); - auto axis = param_.Axis(); + auto input_x_dims = this->param_.InputX()->dims(); + auto axis = this->param_.Axis(); size_t x_dims_size = input_x_dims.size(); size_t axis_size = axis.size(); @@ -45,7 +45,7 @@ void TransposeOp::InferShape() const { for (size_t i = 0; i < axis_size; i++) { out_dims[i] = input_x_dims[axis[i]]; } - param_.Out()->Resize(out_dims); + this->param_.Out()->Resize(out_dims); } template class TransposeOp; } // namespace operators diff --git a/src/operators/transpose_op.h b/src/operators/transpose_op.h index f65a725756..bfed556522 100644 --- a/src/operators/transpose_op.h +++ b/src/operators/transpose_op.h @@ -28,26 +28,17 @@ namespace operators { using paddle_mobile::framework::Tensor; template -class TransposeOp : public framework::OperatorWithKernel { +class TransposeOp : public framework::OperatorWithKernel> { public: TransposeOp(const std::string &type, const VariableNameMap &inputs, const VariableNameMap &outputs, const framework::AttributeMap &attrs, std::shared_ptr scope) - : framework::OperatorWithKernel(type, inputs, outputs, attrs, - scope), - param_(inputs, outputs, attrs, *scope) {} + : framework::OperatorWithKernel>(type, inputs, outputs, attrs, + scope) {} - void RunImpl() const { - operators::TransposeKernel kernel; - kernel.Compute(param_); - } - - using framework::OperatorWithKernel::OperatorWithKernel; + using framework::OperatorWithKernel>::OperatorWithKernel; void InferShape() const override; - - protected: - TransposeParam param_; }; } // namespace operators diff --git a/test/CMakeLists.txt b/test/CMakeLists.txt index 0cf3537ad6..cc707ded7f 100644 --- a/test/CMakeLists.txt +++ b/test/CMakeLists.txt @@ -137,6 +137,10 @@ else () ADD_EXECUTABLE(test-depthwise-conv-op operators/test_depthwise_conv_op.cpp test_helper.h test_include.h executor_for_test.h) target_link_libraries(test-depthwise-conv-op paddle-mobile) + # gen test + ADD_EXECUTABLE(test-mobilenet net/test_mobilenet.cpp test_helper.h test_include.h executor_for_test.h) + target_link_libraries(test-mobilenet paddle-mobile) + #add_library(test-lib-size SHARED common/test_lib_size.h common/test_lib_size.cpp) endif() diff --git a/test/net/test_mobilenet.cpp b/test/net/test_mobilenet.cpp index 7ed9a3566e..8400b08f22 100644 --- a/test/net/test_mobilenet.cpp +++ b/test/net/test_mobilenet.cpp @@ -19,14 +19,14 @@ limitations under the License. */ int main() { paddle_mobile::Loader loader; auto time1 = time(); - auto program = loader.Load(g_mobilenet, false); + auto program = loader.Load(g_mobilenet, true); auto time2 = time(); DLOG << "load cost :" << time_diff(time1, time1) << "ms"; - paddle_mobile::Executor executor(program, 2, false); + paddle_mobile::Executor executor(program, 1, true); - std::vector dims{2, 3, 224, 224}; + std::vector dims{1, 3, 224, 224}; Tensor input_tensor; - SetupTensor(&input_tensor, {2, 3, 224, 224}, static_cast(0), + SetupTensor(&input_tensor, {1, 3, 224, 224}, static_cast(0), static_cast(1)); std::vector input(input_tensor.data(), -- GitLab