diff --git a/src/framework/operator.cpp b/src/framework/operator.cpp index 0e8d6f97809f7a204acc74a8c5427d88a63949aa..36b4663cb603d29bb60cfc297899d1c300e8ca91 100644 --- a/src/framework/operator.cpp +++ b/src/framework/operator.cpp @@ -61,9 +61,5 @@ template class OperatorBase; template class OperatorBase; template class OperatorBase; -template class OperatorWithKernel; -template class OperatorWithKernel; -template class OperatorWithKernel; - } // namespace framework } // namespace paddle_mobile diff --git a/src/framework/operator.h b/src/framework/operator.h index 2ea7626711c4161bbbedd5e26cdc895c27cdcd83..2b26264ddbbe8e53d0aa7e4d6c3d5a155dfd1eb2 100644 --- a/src/framework/operator.h +++ b/src/framework/operator.h @@ -103,16 +103,24 @@ class OperatorBase { /* * @b 这个类为所有带有运算的 op 的父类, 这个 op 继承与 OperatorBase * */ -template +template class OperatorWithKernel : public OperatorBase { public: OperatorWithKernel(const std::string &type, const VariableNameMap &inputs, const VariableNameMap &outputs, const AttributeMap &attrs, std::shared_ptr scope) - : OperatorBase(type, inputs, outputs, attrs, scope) {} + : OperatorBase(type, inputs, outputs, attrs, scope), param_(inputs, outputs, attrs, *scope){ + kernel_.Init(param_); + } + + virtual void RunImpl() const { + this->kernel_.Compute(this->param_); + } - virtual void RunImpl() const = 0; virtual void InferShape() const = 0; + protected: + KernelType kernel_; + ParamType param_; }; /* @@ -127,6 +135,9 @@ class OpKernelBase { * 所有结构体存在与: paddle-mobile/src/operators/op_param.h * */ virtual void Compute(const P ¶) const = 0; + virtual bool Init(const P ¶) const { + return true; + }; virtual ~OpKernelBase() = default; }; diff --git a/src/operators/batchnorm_op.cpp b/src/operators/batchnorm_op.cpp index e36cb24b7c46039463b76635536f0af4c6407824..672e990be44c11df0795b9c6f301803f8ad02285 100644 --- a/src/operators/batchnorm_op.cpp +++ b/src/operators/batchnorm_op.cpp @@ -23,8 +23,8 @@ namespace operators { template void BatchNormOp::InferShape() const { - auto x_dims = param_.InputX()->dims(); - param_.OutputY()->Resize(x_dims); + auto x_dims = this->param_.InputX()->dims(); + this->param_.OutputY()->Resize(x_dims); } template class BatchNormOp; } // namespace operators diff --git a/src/operators/batchnorm_op.h b/src/operators/batchnorm_op.h index ea774662abc093c36f75ad693aff579323becb23..876103a6890f97b292cf80fcc76784778c89c648 100644 --- a/src/operators/batchnorm_op.h +++ b/src/operators/batchnorm_op.h @@ -25,26 +25,19 @@ namespace paddle_mobile { namespace operators { using std::string; template -class BatchNormOp : public framework::OperatorWithKernel { +class BatchNormOp : public framework::OperatorWithKernel> { public: BatchNormOp(const string &type, const VariableNameMap &inputs, const VariableNameMap &outputs, const framework::AttributeMap &attrs, std::shared_ptr scope) - : framework::OperatorWithKernel(type, inputs, outputs, attrs, - scope), - param_(inputs, outputs, attrs, *scope) {} - - void RunImpl() const { - operators::BatchNormKernel kernel; - kernel.Compute(param_); + : framework::OperatorWithKernel>(type, inputs, outputs, attrs, + scope) { } - using framework::OperatorWithKernel::OperatorWithKernel; void InferShape() const override; protected: - BatchNormParam param_; }; } // namespace operators diff --git a/src/operators/box_coder_op.cpp b/src/operators/box_coder_op.cpp index 8dc5da43d0e5594830c48ab2934b0df1875c6a54..31891ed74266d599898dd7426eed5cd28f320ab6 100644 --- a/src/operators/box_coder_op.cpp +++ b/src/operators/box_coder_op.cpp @@ -21,11 +21,11 @@ namespace operators { template void BoxCoderOp::InferShape() const { - auto input_priorbox_dims = param_.InputPriorBox()->dims(); - auto input_priorboxvar_dims = param_.InputPriorBoxVar()->dims(); - auto input_targetbox_dims = param_.InputTargetBox()->dims(); + auto input_priorbox_dims = this->param_.InputPriorBox()->dims(); + auto input_priorboxvar_dims = this->param_.InputPriorBoxVar()->dims(); + auto input_targetbox_dims = this->param_.InputTargetBox()->dims(); - auto code_type = param_.CodeType(); + auto code_type = this->param_.CodeType(); if (code_type == "encode_center_size") { if (input_targetbox_dims.size() != 2) { @@ -44,7 +44,7 @@ void BoxCoderOp::InferShape() const { LOG(kLOG_ERROR) << " dimension not match"; } } - param_.OutputBox()->Resize(framework::make_ddim( + this->param_.OutputBox()->Resize(framework::make_ddim( {input_targetbox_dims[0], input_priorbox_dims[0], 4})); } template class BoxCoderOp; diff --git a/src/operators/box_coder_op.h b/src/operators/box_coder_op.h index 001ef20023a4500adec558e6f0bddb16a3c65551..1c6f706d77dd380593940f1456e531cc28f8f68a 100644 --- a/src/operators/box_coder_op.h +++ b/src/operators/box_coder_op.h @@ -28,26 +28,19 @@ namespace operators { using paddle_mobile::framework::Tensor; template -class BoxCoderOp : public framework::OperatorWithKernel { +class BoxCoderOp : public framework::OperatorWithKernel> { public: BoxCoderOp(const std::string &type, const VariableNameMap &inputs, const VariableNameMap &outputs, const framework::AttributeMap &attrs, std::shared_ptr scope) - : framework::OperatorWithKernel(type, inputs, outputs, attrs, - scope), - param_(inputs, outputs, attrs, *scope) {} + : framework::OperatorWithKernel>(type, inputs, outputs, attrs, + scope) {} - void RunImpl() const { - operators::BoxCoderKernel kernel; - kernel.Compute(param_); - } - - using framework::OperatorWithKernel::OperatorWithKernel; + using framework::OperatorWithKernel>::OperatorWithKernel; void InferShape() const override; protected: - BoxCoderParam param_; }; } // namespace operators diff --git a/src/operators/concat_op.cpp b/src/operators/concat_op.cpp index b9eacde60ee25a91769317bd441058b4afb3f296..f5a9c3d81ef34ac9ff643dd174741e083c879cbc 100644 --- a/src/operators/concat_op.cpp +++ b/src/operators/concat_op.cpp @@ -21,7 +21,7 @@ namespace operators { template void ConcatOp::InferShape() const { - auto inputs = param_.Inputs(); + auto inputs = this->param_.Inputs(); const size_t n = inputs.size(); std::vector inputs_dims; @@ -30,7 +30,7 @@ void ConcatOp::InferShape() const { inputs_dims.push_back(inputs[i]->dims()); } - auto axis = static_cast(param_.Axis()); + auto axis = static_cast(this->param_.Axis()); if (n == 1) { DLOG << "Warning: concat op have only one input, " @@ -54,7 +54,7 @@ void ConcatOp::InferShape() const { out_dims[axis] = -1; } - param_.Out()->Resize(out_dims); + this->param_.Out()->Resize(out_dims); } template class ConcatOp; diff --git a/src/operators/concat_op.h b/src/operators/concat_op.h index fff704e4d858a7c67a0e8331089d8e8d5d4639fb..974924b1f075a0df525a0167fe1cbbfe2b2e5ce0 100644 --- a/src/operators/concat_op.h +++ b/src/operators/concat_op.h @@ -24,25 +24,18 @@ namespace paddle_mobile { namespace operators { using std::string; template -class ConcatOp : public framework::OperatorWithKernel { +class ConcatOp : public framework::OperatorWithKernel> { public: ConcatOp(const string &type, const VariableNameMap &inputs, const VariableNameMap &outputs, const framework::AttributeMap &attrs, std::shared_ptr scope) - : framework::OperatorWithKernel(type, inputs, outputs, attrs, - scope), - param_(inputs, outputs, attrs, *scope) {} - - void RunImpl() const { - operators::ConcatKernel kernel; - kernel.Compute(param_); + : framework::OperatorWithKernel>(type, inputs, outputs, attrs, + scope) { } - using framework::OperatorWithKernel::OperatorWithKernel; + using framework::OperatorWithKernel>::OperatorWithKernel; void InferShape() const override; - protected: - ConcatParam param_; }; } // namespace operators diff --git a/src/operators/conv_op.cpp b/src/operators/conv_op.cpp index 4be442f6169f65be9cffb2710f4f3aae40e9c905..b4910eb26eda0afc353c57a68eabece07c3d0f50 100644 --- a/src/operators/conv_op.cpp +++ b/src/operators/conv_op.cpp @@ -24,12 +24,12 @@ namespace operators { template void ConvOp::InferShape() const { - auto in_dims = param_.Input()->dims(); - auto filter_dims = param_.Filter()->dims(); - const std::vector &strides = param_.Strides(); - std::vector paddings = param_.Paddings(); - int groups = param_.Groups(); - std::vector dilations = param_.Dilations(); + auto in_dims = this->param_.Input()->dims(); + auto filter_dims = this->param_.Filter()->dims(); + const std::vector &strides = this->param_.Strides(); + std::vector paddings = this->param_.Paddings(); + int groups = this->param_.Groups(); + std::vector dilations = this->param_.Dilations(); PADDLE_MOBILE_ENFORCE((in_dims.size() == filter_dims.size() && dilations.size() == paddings.size() && @@ -44,7 +44,7 @@ void ConvOp::InferShape() const { } framework::DDim ddim = framework::make_ddim(output_shape); - param_.Output()->Resize(ddim); + this->param_.Output()->Resize(ddim); } template class ConvOp; diff --git a/src/operators/conv_op.h b/src/operators/conv_op.h index 0a26ce6c3f1ee005e982f10dcc3b38853124bdfb..406d54b86e39aa4ed45ed04cb6a7523e552487a8 100644 --- a/src/operators/conv_op.h +++ b/src/operators/conv_op.h @@ -24,26 +24,18 @@ namespace paddle_mobile { namespace operators { using std::string; template -class ConvOp : public framework::OperatorWithKernel { +class ConvOp : public framework::OperatorWithKernel> { public: ConvOp(const std::string &type, const VariableNameMap &inputs, const VariableNameMap &outputs, const framework::AttributeMap &attrs, std::shared_ptr scope) - : framework::OperatorWithKernel(type, inputs, outputs, attrs, - scope), - param_(inputs, outputs, attrs, *scope) {} + : framework::OperatorWithKernel>(type, inputs, outputs, attrs, + scope){} - using framework::OperatorWithKernel::OperatorWithKernel; + using framework::OperatorWithKernel>::OperatorWithKernel; void InferShape() const override; - void RunImpl() const { - operators::ConvKernel kernel; - kernel.Compute(param_); - this->ClearVariables({"Filter", "Input"}); - } - private: - ConvParam param_; }; inline int ConvOutputSize(int input_size, int filter_size, int dilation, diff --git a/src/operators/depthwise_conv_op.cpp b/src/operators/depthwise_conv_op.cpp index 55198fd1160dd1c62cea12879c80539b71690822..be3b9a0ca851e82064cbab866b4b25341018729e 100644 --- a/src/operators/depthwise_conv_op.cpp +++ b/src/operators/depthwise_conv_op.cpp @@ -25,12 +25,12 @@ namespace operators { template void DepthwiseConvOp::InferShape() const { - auto in_dims = param_.Input()->dims(); - auto filter_dims = param_.Filter()->dims(); - const std::vector &strides = param_.Strides(); - std::vector paddings = param_.Paddings(); - int groups = param_.Groups(); - std::vector dilations = param_.Dilations(); + auto in_dims = this->param_.Input()->dims(); + auto filter_dims = this->param_.Filter()->dims(); + const std::vector &strides = this->param_.Strides(); + std::vector paddings = this->param_.Paddings(); + int groups = this->param_.Groups(); + std::vector dilations = this->param_.Dilations(); PADDLE_MOBILE_ENFORCE((in_dims.size() == filter_dims.size() && dilations.size() == paddings.size() && @@ -45,7 +45,7 @@ void DepthwiseConvOp::InferShape() const { } framework::DDim ddim = framework::make_ddim(output_shape); - param_.Output()->Resize(ddim); + this->param_.Output()->Resize(ddim); } template class DepthwiseConvOp; diff --git a/src/operators/depthwise_conv_op.h b/src/operators/depthwise_conv_op.h index 37ba1b9ada32d75cb715dd86221758c71c6b1929..6534aba5255d7552bed01b72aa9072f7b8a6e472 100644 --- a/src/operators/depthwise_conv_op.h +++ b/src/operators/depthwise_conv_op.h @@ -24,27 +24,19 @@ namespace paddle_mobile { namespace operators { template -class DepthwiseConvOp : public framework::OperatorWithKernel { +class DepthwiseConvOp : public framework::OperatorWithKernel> { public: DepthwiseConvOp(const std::string &type, const VariableNameMap &inputs, const VariableNameMap &outputs, const framework::AttributeMap &attrs, std::shared_ptr scope) - : framework::OperatorWithKernel(type, inputs, outputs, attrs, - scope), - param_(inputs, outputs, attrs, *scope) {} + : framework::OperatorWithKernel>(type, inputs, outputs, attrs, + scope) {} - using framework::OperatorWithKernel::OperatorWithKernel; + using framework::OperatorWithKernel>::OperatorWithKernel; void InferShape() const override; - void RunImpl() const { - operators::DepthwiseConvKernel kernel; - kernel.Compute(param_); - this->ClearVariables({"Filter", "Input"}); - } - private: - ConvParam param_; }; } // namespace operators diff --git a/src/operators/elementwise_add_op.cpp b/src/operators/elementwise_add_op.cpp index 5333dcfdb6602e7be235c4faa3651a86502bc8a4..966bc9c1e77a4ae6e33bc830c06ba7593c7ba3e0 100644 --- a/src/operators/elementwise_add_op.cpp +++ b/src/operators/elementwise_add_op.cpp @@ -21,8 +21,8 @@ namespace operators { template void ElementwiseAddOp::InferShape() const { - auto x_dim = param_.InputX()->dims(); - param_.Out()->Resize(x_dim); + auto x_dim = this->param_.InputX()->dims(); + this->param_.Out()->Resize(x_dim); } template class ElementwiseAddOp; } // namespace operators diff --git a/src/operators/elementwise_add_op.h b/src/operators/elementwise_add_op.h index 62034b14edcbc9ec6ad44af59f6927b8cfa38aa2..f3820db12c8b9243f4c9cce69ac2179708161780 100644 --- a/src/operators/elementwise_add_op.h +++ b/src/operators/elementwise_add_op.h @@ -25,26 +25,19 @@ namespace paddle_mobile { namespace operators { using std::string; template -class ElementwiseAddOp : public framework::OperatorWithKernel { +class ElementwiseAddOp : public framework::OperatorWithKernel> { public: ElementwiseAddOp(const string &type, const VariableNameMap &inputs, const VariableNameMap &outputs, const framework::AttributeMap &attrs, std::shared_ptr scope) - : framework::OperatorWithKernel(type, inputs, outputs, attrs, - scope), - param_(inputs, outputs, attrs, *scope) {} + : framework::OperatorWithKernel>(type, inputs, outputs, attrs, + scope) {} - void RunImpl() const { - operators::ElementwiseAddKernel kernel; - kernel.Compute(param_); - } - - using framework::OperatorWithKernel::OperatorWithKernel; + using framework::OperatorWithKernel>::OperatorWithKernel; void InferShape() const override; protected: - ElementwiseAddParam param_; }; } // namespace operators } // namespace paddle_mobile diff --git a/src/operators/fusion_conv_add.cpp b/src/operators/fusion_conv_add.cpp index 21e13d87aff263db39e5a6105d49b9e6bdb9e97b..1f98cbdd8eb2ef6a386e0aa3d3106d69179f64d1 100644 --- a/src/operators/fusion_conv_add.cpp +++ b/src/operators/fusion_conv_add.cpp @@ -21,12 +21,12 @@ namespace operators { template void FushionConvAddOp::InferShape() const { - auto in_dims = param_.Input()->dims(); - auto filter_dims = param_.Filter()->dims(); - const std::vector &strides = param_.Strides(); - std::vector paddings = param_.Paddings(); - int groups = param_.Groups(); - std::vector dilations = param_.Dilations(); + auto in_dims = this->param_.Input()->dims(); + auto filter_dims = this->param_.Filter()->dims(); + const std::vector &strides = this->param_.Strides(); + std::vector paddings = this->param_.Paddings(); + int groups = this->param_.Groups(); + std::vector dilations = this->param_.Dilations(); PADDLE_MOBILE_ENFORCE((in_dims.size() == filter_dims.size() && dilations.size() == paddings.size() && @@ -41,7 +41,7 @@ void FushionConvAddOp::InferShape() const { } framework::DDim ddim = framework::make_ddim(output_shape); - param_.Output()->Resize(ddim); + this->param_.Output()->Resize(ddim); } template class FushionConvAddOp; } // namespace operators diff --git a/src/operators/fusion_conv_add.h b/src/operators/fusion_conv_add.h index dc35409b4666aafc7b19c23c02cf6003acdd7dc7..1b9b85c9c95bbb6165e27ed2ec6fd6465654ee09 100644 --- a/src/operators/fusion_conv_add.h +++ b/src/operators/fusion_conv_add.h @@ -47,27 +47,19 @@ class FusionConvAddMatcher : public framework::FusionOpMatcher { }; template -class FushionConvAddOp : public framework::OperatorWithKernel { +class FushionConvAddOp : public framework::OperatorWithKernel> { public: FushionConvAddOp(const string &type, const VariableNameMap &inputs, const VariableNameMap &outputs, const framework::AttributeMap &attrs, std::shared_ptr scope) - : framework::OperatorWithKernel(type, inputs, outputs, attrs, - scope), - param_(inputs, outputs, attrs, *scope) {} + : framework::OperatorWithKernel>(type, inputs, outputs, attrs, + scope) {} - void RunImpl() const { - operators::ConvAddKernel kernel; - kernel.Compute(param_); - this->ClearVariables({"Filter", "Input", "Y"}); - } - - using framework::OperatorWithKernel::OperatorWithKernel; + using framework::OperatorWithKernel>::OperatorWithKernel; void InferShape() const override; protected: - FushionConvAddParam param_; }; inline int ConvOutputSize(int input_size, int filter_size, int dilation, diff --git a/src/operators/fusion_fc_op.cpp b/src/operators/fusion_fc_op.cpp index c85de862027fb21bcbde77dbc9cba22e7e2811d9..65c1605a5df521b64571d279e3aa9c2d31aacfb6 100644 --- a/src/operators/fusion_fc_op.cpp +++ b/src/operators/fusion_fc_op.cpp @@ -20,10 +20,10 @@ namespace operators { template void FushionFcOp::InferShape() const { - auto x_dims = param_.InputX()->dims(); - auto y_dims = param_.InputY()->dims(); - int x_num_col_dims = param_.XNumColDims(); - int y_num_col_dims = param_.YNumColDims(); + auto x_dims = this->param_.InputX()->dims(); + auto y_dims = this->param_.InputY()->dims(); + int x_num_col_dims = this->param_.XNumColDims(); + int y_num_col_dims = this->param_.YNumColDims(); assert(x_dims.size() > x_num_col_dims); assert(y_dims.size() > y_num_col_dims); @@ -47,7 +47,7 @@ void FushionFcOp::InferShape() const { } framework::DDim ddim = framework::make_ddim(output_dims); - param_.Out()->Resize(ddim); + this->param_.Out()->Resize(ddim); } template class FushionFcOp; } // namespace operators diff --git a/src/operators/fusion_fc_op.h b/src/operators/fusion_fc_op.h index 839ef07b244e84675cd186f267493eb29095d7e8..cf2efa3f604840dfad4b85a84c3077ff57d7e06b 100644 --- a/src/operators/fusion_fc_op.h +++ b/src/operators/fusion_fc_op.h @@ -45,26 +45,19 @@ class FusionFcMatcher : public framework::FusionOpMatcher { }; template -class FushionFcOp : public framework::OperatorWithKernel { +class FushionFcOp : public framework::OperatorWithKernel> { public: FushionFcOp(const string &type, const VariableNameMap &inputs, const VariableNameMap &outputs, const framework::AttributeMap &attrs, std::shared_ptr scope) - : framework::OperatorWithKernel(type, inputs, outputs, attrs, - scope), - param_(inputs, outputs, attrs, *scope) {} + : framework::OperatorWithKernel>(type, inputs, outputs, attrs, + scope) {} - void RunImpl() const { - operators::FushionFcKernel kernel; - kernel.Compute(param_); - } - - using framework::OperatorWithKernel::OperatorWithKernel; + using framework::OperatorWithKernel>::OperatorWithKernel; void InferShape() const override; protected: - FushionFcParam param_; }; #ifdef PADDLE_MOBILE_CPU diff --git a/src/operators/kernel/arm/batchnorm_kernel.cpp b/src/operators/kernel/arm/batchnorm_kernel.cpp index 30d922a777b67a55a7d0dfa98a55144bcb569d49..4103ef6dcde8dd61ffb2ac89746634f49eafcb68 100644 --- a/src/operators/kernel/arm/batchnorm_kernel.cpp +++ b/src/operators/kernel/arm/batchnorm_kernel.cpp @@ -61,19 +61,20 @@ void BatchNormKernel::Compute(const BatchNormParam ¶m) const { /// std = (var + epsilon).sqrt(); /// inv_std = 1 / std; for (int i = 0; i < C * 4; i += 4) { + int index = i/4; inv_std_ptr[i] = - 1 / static_cast(pow((variance_ptr[i / 4] + epsilon), 0.5)); + 1 / static_cast(pow((variance_ptr[index] + epsilon), 0.5)); inv_std_ptr[i + 1] = inv_std_ptr[i]; inv_std_ptr[i + 2] = inv_std_ptr[i]; inv_std_ptr[i + 3] = inv_std_ptr[i]; - new_scale_ptr[i] = inv_std_ptr[i] * scale_ptr[i / 4]; + new_scale_ptr[i] = inv_std_ptr[i] * scale_ptr[index]; new_scale_ptr[i + 1] = new_scale_ptr[i]; new_scale_ptr[i + 2] = new_scale_ptr[i]; new_scale_ptr[i + 3] = new_scale_ptr[i]; new_bias_ptr[i] = - bias_ptr[i / 4] - mean_ptr[i / 4] * inv_std_ptr[i] * scale_ptr[i / 4]; + bias_ptr[index] - mean_ptr[index] * inv_std_ptr[i] * scale_ptr[index]; new_bias_ptr[i + 1] = new_bias_ptr[i]; new_bias_ptr[i + 2] = new_bias_ptr[i]; @@ -164,21 +165,21 @@ void BatchNormKernel::Compute(const BatchNormParam ¶m) const { "vadd.f32 q7, q7, q10 \n\t" "vadd.f32 q8, q8, q10 \n\t" - "add %[out_ptr], %[out_ptr], r6 \n\t" + "add %[out_ptr], %[out_ptr], r6 \n\t" "vst1.32 {q1, q2}, [%[out_ptr]]! \n\t" - "vst1.32 {q3, q4}, [%[out_ptr]]! \n\t" - "vst1.32 {q5, q6}, [%[out_ptr]]! \n\t" - "vst1.32 {q7, q8}, [%[out_ptr]]! \n\t" + "vst1.32 {q3, q4}, [%[out_ptr]]! \n\t" + "vst1.32 {q5, q6}, [%[out_ptr]]! \n\t" + "vst1.32 {q7, q8}, [%[out_ptr]]! \n\t" - "end_remainder_%=: \n\t" + "end_remainder_%=: \n\t" "subs %[C], %[C], #1 \n\t" "bge loop_c_%= \n\t" "end_c_%=: \n\t" - "subs %[N], %[N], #1 \n\t" - "bge loop_n_%= \n\t" - "end_n_%=: \n\t" + "subs %[N], %[N], #1 \n\t" + "bge loop_n_%= \n\t" + "end_n_%=: \n\t" : : [input_x_ptr] "r"(input_x_ptr), [out_ptr] "r"(out_ptr), [new_scale_ptr] "r"(new_scale_ptr), [new_bias_ptr] "r"(new_bias_ptr), @@ -232,6 +233,7 @@ void BatchNormKernel::Compute(const BatchNormParam ¶m) const { // DLOG << "out_ptr : " << out_ptr[102]; } } + } // namespace operators } // namespace paddle_mobile diff --git a/src/operators/kernel/mali/conv_kernel.cpp b/src/operators/kernel/mali/conv_kernel.cpp index 695f937880328e8c2ffed91a8beee23e9a72899a..266fde69d824e1173a862774dc13785241d4e682 100644 --- a/src/operators/kernel/mali/conv_kernel.cpp +++ b/src/operators/kernel/mali/conv_kernel.cpp @@ -20,7 +20,10 @@ namespace paddle_mobile { namespace operators { template <> -void ConvKernel::Compute(const ConvParam ¶m) const {} +void ConvKernel::Compute(const ConvParam ¶m) const { +// ArmConvImplement imp; +// imp.Compute(param); +} template class ConvKernel; } // namespace operators diff --git a/src/operators/lrn_op.cpp b/src/operators/lrn_op.cpp index d159cdf21b4d0f9ab85d24112ecb2a66729a0236..2533ab19a5084513a991082f148d546cb0059657 100644 --- a/src/operators/lrn_op.cpp +++ b/src/operators/lrn_op.cpp @@ -21,8 +21,8 @@ namespace operators { template void LrnOp::InferShape() const { - auto x_dims = param_.InputX()->dims(); - param_.Out()->Resize(x_dims); + auto x_dims = this->param_.InputX()->dims(); + this->param_.Out()->Resize(x_dims); } template class LrnOp; } // namespace operators diff --git a/src/operators/lrn_op.h b/src/operators/lrn_op.h index c0f7abba0bd095c7408787eda3b819a81fa2227e..3e84bebb934a2a770c16ff1c385efe2d2bb627bc 100644 --- a/src/operators/lrn_op.h +++ b/src/operators/lrn_op.h @@ -25,25 +25,17 @@ namespace paddle_mobile { namespace operators { using std::string; template -class LrnOp : public framework::OperatorWithKernel { +class LrnOp : public framework::OperatorWithKernel> { public: LrnOp(const string &type, const VariableNameMap &inputs, const VariableNameMap &outputs, const framework::AttributeMap &attrs, std::shared_ptr scope) - : framework::OperatorWithKernel(type, inputs, outputs, attrs, - scope), - param_(inputs, outputs, attrs, *scope) {} + : framework::OperatorWithKernel>(type, inputs, outputs, attrs, + scope) {} - void RunImpl() const { - operators::LrnKernel kernel; - kernel.Compute(param_); - } - - using framework::OperatorWithKernel::OperatorWithKernel; + using framework::OperatorWithKernel>::OperatorWithKernel; void InferShape() const override; - protected: - LrnParam param_; }; } // namespace operators diff --git a/src/operators/mul_op.cpp b/src/operators/mul_op.cpp index d33bcbfdddba06947c9d04711c39cb619ada536e..d97c6ec3e470bb2b083ef7e5234168c6fdfc34c1 100644 --- a/src/operators/mul_op.cpp +++ b/src/operators/mul_op.cpp @@ -21,10 +21,10 @@ namespace operators { template void MulOp::InferShape() const { - auto x_dims = param_.InputX()->dims(); - auto y_dims = param_.InputY()->dims(); - int x_num_col_dims = param_.XNumColDims(); - int y_num_col_dims = param_.YNumColDims(); + auto x_dims = this->param_.InputX()->dims(); + auto y_dims = this->param_.InputY()->dims(); + int x_num_col_dims = this->param_.XNumColDims(); + int y_num_col_dims = this->param_.YNumColDims(); assert(x_dims.size() > x_num_col_dims); assert(y_dims.size() > y_num_col_dims); @@ -48,7 +48,7 @@ void MulOp::InferShape() const { } framework::DDim ddim = framework::make_ddim(output_dims); - param_.Out()->Resize(ddim); + this->param_.Out()->Resize(ddim); } template class MulOp; } // namespace operators diff --git a/src/operators/mul_op.h b/src/operators/mul_op.h index 5ecf6571ae2725975271d5b0e7212380caa47578..08bda2a74b8c51e59799bb583674e2d87f84ab4d 100644 --- a/src/operators/mul_op.h +++ b/src/operators/mul_op.h @@ -25,25 +25,18 @@ namespace paddle_mobile { namespace operators { template -class MulOp : public framework::OperatorWithKernel { +class MulOp : public framework::OperatorWithKernel> { public: MulOp(const std::string &type, const VariableNameMap &inputs, const VariableNameMap &outputs, const framework::AttributeMap &attrs, std::shared_ptr scope) - : framework::OperatorWithKernel(type, inputs, outputs, attrs, - scope), - param_(inputs, outputs, attrs, *scope) {} + : framework::OperatorWithKernel>(type, inputs, outputs, attrs, + scope) {} - void RunImpl() const { - operators::MulKernel kernel; - kernel.Compute(param_); - } - - using framework::OperatorWithKernel::OperatorWithKernel; + using framework::OperatorWithKernel>::OperatorWithKernel; void InferShape() const override; protected: - MulParam param_; }; } // namespace operators diff --git a/src/operators/multiclass_nms_op.cpp b/src/operators/multiclass_nms_op.cpp index e8b5f195feff617a5de55fe1a0b12a0e0cd70ce9..52adf6cc627d76b18b3b48928c344545327ca99e 100644 --- a/src/operators/multiclass_nms_op.cpp +++ b/src/operators/multiclass_nms_op.cpp @@ -20,8 +20,8 @@ namespace operators { template void MultiClassNMSOp::InferShape() const { - auto input_bboxes_dims = param_.InputBBoxes()->dims(); - auto input_scores_dims = param_.InputScores()->dims(); + auto input_bboxes_dims = this->param_.InputBBoxes()->dims(); + auto input_scores_dims = this->param_.InputScores()->dims(); if (input_scores_dims.size() != 3) { LOG(kLOG_ERROR) << "Input Scores size must be 3"; } @@ -32,7 +32,7 @@ void MultiClassNMSOp::InferShape() const { LOG(kLOG_ERROR) << "Predict bboxes must be equal"; } // pre size, will change in Compute. - param_.Out()->Resize(framework::make_ddim({input_bboxes_dims[1], 6})); + this->param_.Out()->Resize(framework::make_ddim({input_bboxes_dims[1], 6})); } template class MultiClassNMSOp; } // namespace operators diff --git a/src/operators/multiclass_nms_op.h b/src/operators/multiclass_nms_op.h index 37f3742524f64b5bfa97f78a55f86b4264489dd5..2d65657c8d70ef4310f8394e98854b571fc40d7a 100644 --- a/src/operators/multiclass_nms_op.h +++ b/src/operators/multiclass_nms_op.h @@ -28,26 +28,19 @@ namespace operators { using paddle_mobile::framework::Tensor; template -class MultiClassNMSOp : public framework::OperatorWithKernel { +class MultiClassNMSOp : public framework::OperatorWithKernel> { public: MultiClassNMSOp(const std::string &type, const VariableNameMap &inputs, const VariableNameMap &outputs, const framework::AttributeMap &attrs, std::shared_ptr scope) - : framework::OperatorWithKernel(type, inputs, outputs, attrs, - scope), - param_(inputs, outputs, attrs, *scope) {} + : framework::OperatorWithKernel>(type, inputs, outputs, attrs, + scope) {} - void RunImpl() const { - operators::MultiClassNMSKernel kernel; - kernel.Compute(param_); - } - - using framework::OperatorWithKernel::OperatorWithKernel; + using framework::OperatorWithKernel>::OperatorWithKernel; void InferShape() const override; protected: - MultiClassNMSParam param_; }; } // namespace operators diff --git a/src/operators/pool_op.cpp b/src/operators/pool_op.cpp index 79b06174af736f693e05fe87985bef1a02886435..e8a469d43141f0b880605b52216094c292ca50fb 100644 --- a/src/operators/pool_op.cpp +++ b/src/operators/pool_op.cpp @@ -34,13 +34,13 @@ int PoolOutputSize(int input_size, int filter_size, int padding, int stride, } template void PoolOp::InferShape() const { - auto in_x_dims = param_.Input()->dims(); - std::vector ksize = param_.Ksize(); - std::vector paddings = param_.Paddings(); - std::vector strides = param_.Strides(); - bool ceil_mode = param_.isCeilMode(); + auto in_x_dims = this->param_.Input()->dims(); + std::vector ksize = this->param_.Ksize(); + std::vector paddings = this->param_.Paddings(); + std::vector strides = this->param_.Strides(); + bool ceil_mode = this->param_.isCeilMode(); - if (param_.isGlobalPooling()) { + if (this->param_.isGlobalPooling()) { ksize.resize(static_cast(in_x_dims.size()) - 2); for (size_t i = 0; i < ksize.size(); ++i) { paddings[i] = 0; @@ -52,7 +52,7 @@ void PoolOp::InferShape() const { output_shape.push_back(PoolOutputSize(in_x_dims[i + 2], ksize[i], paddings[i], strides[i], ceil_mode)); } - param_.Output()->Resize(framework::make_ddim(output_shape)); + this->param_.Output()->Resize(framework::make_ddim(output_shape)); } template class PoolOp; } // namespace operators diff --git a/src/operators/pool_op.h b/src/operators/pool_op.h index 8dc99ae686390041b3c99c2df71d91ae9801a1f2..b9d679963c09638797560c2365b7cf7f5bb7f04b 100644 --- a/src/operators/pool_op.h +++ b/src/operators/pool_op.h @@ -29,24 +29,16 @@ using framework::OperatorWithKernel; using framework::Scope; using std::string; template -class PoolOp : public OperatorWithKernel { +class PoolOp : public OperatorWithKernel> { public: PoolOp(const string &type, const VariableNameMap &inputs, const VariableNameMap &outputs, const AttributeMap &attrs, std::shared_ptr scope) - : OperatorWithKernel(type, inputs, outputs, attrs, scope), - param_(inputs, outputs, attrs, *scope) {} - using OperatorWithKernel::OperatorWithKernel; + : OperatorWithKernel>(type, inputs, outputs, attrs, scope) {} + using OperatorWithKernel>::OperatorWithKernel; void InferShape() const override; - void RunImpl() const { - operators::PoolKernel kernel; - kernel.Compute(param_); - this->ClearVariables({"X"}); - } - private: - PoolParam param_; }; } // namespace operators } // namespace paddle_mobile diff --git a/src/operators/prior_box_op.cpp b/src/operators/prior_box_op.cpp index f3ae6e5231efd604862e9c7b4dbafd71c71d6f54..44e1741b66f301aee55f1f4d33b9bb1173e6004d 100644 --- a/src/operators/prior_box_op.cpp +++ b/src/operators/prior_box_op.cpp @@ -21,13 +21,13 @@ namespace operators { template void PriorBoxOp::InferShape() const { - auto input_dims = param_.Input()->dims(); - auto input_image_dims = param_.InputImage()->dims(); - auto min_sizes = param_.MinSizes(); - auto max_sizes = param_.MaxSizes(); - auto variances = param_.Variances(); - auto aspect_ratios = param_.AspectRatios(); - bool flip = param_.Flip(); + auto input_dims = this->param_.Input()->dims(); + auto input_image_dims = this->param_.InputImage()->dims(); + auto min_sizes = this->param_.MinSizes(); + auto max_sizes = this->param_.MaxSizes(); + auto variances = this->param_.Variances(); + auto aspect_ratios = this->param_.AspectRatios(); + bool flip = this->param_.Flip(); std::vector aspect_ratios_vec; ExpandAspectRatios(aspect_ratios, flip, &aspect_ratios_vec); @@ -41,8 +41,8 @@ void PriorBoxOp::InferShape() const { dim_vec[1] = input_dims[3]; dim_vec[2] = num_priors; dim_vec[3] = 4; - param_.OutputBoxes()->Resize(framework::make_ddim(dim_vec)); - param_.OutputVariances()->Resize(framework::make_ddim(dim_vec)); + this->param_.OutputBoxes()->Resize(framework::make_ddim(dim_vec)); + this->param_.OutputVariances()->Resize(framework::make_ddim(dim_vec)); } template class PriorBoxOp; } // namespace operators diff --git a/src/operators/prior_box_op.h b/src/operators/prior_box_op.h index e3de58b372cc101956d83ff39b02e172c990b254..ad6b90104148bcfa53935ad3758ed4c234aef4fb 100644 --- a/src/operators/prior_box_op.h +++ b/src/operators/prior_box_op.h @@ -28,26 +28,19 @@ namespace operators { using paddle_mobile::framework::Tensor; template -class PriorBoxOp : public framework::OperatorWithKernel { +class PriorBoxOp : public framework::OperatorWithKernel> { public: PriorBoxOp(const std::string &type, const VariableNameMap &inputs, const VariableNameMap &outputs, const framework::AttributeMap &attrs, std::shared_ptr scope) - : framework::OperatorWithKernel(type, inputs, outputs, attrs, - scope), - param_(inputs, outputs, attrs, *scope) {} + : framework::OperatorWithKernel>(type, inputs, outputs, attrs, + scope) {} - void RunImpl() const { - operators::PriorBoxKernel kernel; - kernel.Compute(param_); - } - - using framework::OperatorWithKernel::OperatorWithKernel; + using framework::OperatorWithKernel>::OperatorWithKernel; void InferShape() const override; protected: - PriorBoxParam param_; }; } // namespace operators diff --git a/src/operators/relu_op.cpp b/src/operators/relu_op.cpp index 066772e3bee32b3296d7fb9bebf615cc57702871..cf495d8bdace83f5dd7f86d372d07b3241867af9 100644 --- a/src/operators/relu_op.cpp +++ b/src/operators/relu_op.cpp @@ -20,8 +20,8 @@ namespace operators { template void ReluOp::InferShape() const { - auto input_dims = param_.InputX()->dims(); - param_.Out()->Resize(input_dims); + auto input_dims = this->param_.InputX()->dims(); + this->param_.Out()->Resize(input_dims); } template class ReluOp; } // namespace operators diff --git a/src/operators/relu_op.h b/src/operators/relu_op.h index f032546c82d740c179385434b5d72082e4bd5a9d..d8bfc8a5ec0c2517da384b63eb6b5e9791127eb6 100644 --- a/src/operators/relu_op.h +++ b/src/operators/relu_op.h @@ -28,7 +28,7 @@ namespace operators { using paddle_mobile::framework::Tensor; template -class ReluOp : public framework::OperatorWithKernel { +class ReluOp : public framework::OperatorWithKernel> { public: /* * @b op 的实例化方法, 需要调用父类的实例化方法, 以及实例化自己的参数结构体 @@ -36,27 +36,13 @@ class ReluOp : public framework::OperatorWithKernel { ReluOp(const std::string &type, const VariableNameMap &inputs, const VariableNameMap &outputs, const framework::AttributeMap &attrs, std::shared_ptr scope) - : framework::OperatorWithKernel(type, inputs, outputs, attrs, - scope), - param_(inputs, outputs, attrs, *scope) {} + : framework::OperatorWithKernel>(type, inputs, outputs, attrs, + scope) {} - /* - * @b op 进行运算, 调用相应的 kernel 进行运算 - * */ - void RunImpl() const { - operators::ReluKernel kernel; - kernel.Compute(param_); - } - - using framework::OperatorWithKernel::OperatorWithKernel; + using framework::OperatorWithKernel>::OperatorWithKernel; void InferShape() const override; protected: - /* - * @b Relu kernel 进行运算时所需要用到参数的结构体, - * 结构体定义在: paddle-mobile/src/operators/op_param.h - * */ - ReluParam param_; }; } // namespace operators diff --git a/src/operators/reshape_op.cpp b/src/operators/reshape_op.cpp index 5d0aa49a26b6c0b2f78b5fcb4b3bd144edaa313c..0fdcaf4d1a95ccd2a0ceccdc6d890b30a1d66368 100644 --- a/src/operators/reshape_op.cpp +++ b/src/operators/reshape_op.cpp @@ -22,10 +22,10 @@ namespace operators { template void ReshapeOp::InferShape() const { /// todo: add InputShape() detection. - auto &shape = param_.Shape(); - auto input_x_dims = param_.InputX()->dims(); + auto &shape = this->param_.Shape(); + auto input_x_dims = this->param_.InputX()->dims(); auto out_dims = ValidateShape(shape, input_x_dims); - param_.Out()->Resize(out_dims); + this->param_.Out()->Resize(out_dims); } template class ReshapeOp; } // namespace operators diff --git a/src/operators/reshape_op.h b/src/operators/reshape_op.h index a14c84b6be95a1d86ac645563dc21c21a51ca6d4..46451fbda357589fb49bbc3f00772242e6cffa05 100644 --- a/src/operators/reshape_op.h +++ b/src/operators/reshape_op.h @@ -28,26 +28,19 @@ namespace operators { using paddle_mobile::framework::Tensor; template -class ReshapeOp : public framework::OperatorWithKernel { +class ReshapeOp : public framework::OperatorWithKernel> { public: ReshapeOp(const std::string &type, const VariableNameMap &inputs, const VariableNameMap &outputs, const framework::AttributeMap &attrs, std::shared_ptr scope) - : framework::OperatorWithKernel(type, inputs, outputs, attrs, - scope), - param_(inputs, outputs, attrs, *scope) {} + : framework::OperatorWithKernel>(type, inputs, outputs, attrs, + scope) {} - void RunImpl() const { - operators::ReshapeKernel kernel; - kernel.Compute(param_); - } - - using framework::OperatorWithKernel::OperatorWithKernel; + using framework::OperatorWithKernel>::OperatorWithKernel; void InferShape() const override; protected: - ReshapeParam param_; }; } // namespace operators diff --git a/src/operators/sigmoid_op.cpp b/src/operators/sigmoid_op.cpp index 641b6f29f2f1eaff7304b8e70b12284575a2e246..79190e6c3368b9d375770062d948580779393f04 100644 --- a/src/operators/sigmoid_op.cpp +++ b/src/operators/sigmoid_op.cpp @@ -20,7 +20,7 @@ namespace paddle_mobile { namespace operators { template void SigmoidOp::InferShape() const { - param_.Out()->Resize(param_.InputX()->dims()); + this->param_.Out()->Resize(this->param_.InputX()->dims()); } template class SigmoidOp; } // namespace operators diff --git a/src/operators/sigmoid_op.h b/src/operators/sigmoid_op.h index 7cdeb41af1b20ddf05ac80d7de0962c4bfe8dff4..77aff2efaf40269fa0b0e4bf3855d90a6522077c 100644 --- a/src/operators/sigmoid_op.h +++ b/src/operators/sigmoid_op.h @@ -25,28 +25,18 @@ limitations under the License. */ namespace paddle_mobile { namespace operators { template -class SigmoidOp : public framework::OperatorWithKernel { +class SigmoidOp : public framework::OperatorWithKernel> { public: SigmoidOp(const std::string &type, const VariableNameMap &inputs, const VariableNameMap &outputs, const framework::AttributeMap &attrs, std::shared_ptr scope) - : framework::OperatorWithKernel(type, inputs, outputs, attrs, - scope), - param_(inputs, outputs, attrs, *scope) {} + : framework::OperatorWithKernel>(type, inputs, outputs, attrs, + scope) {} - using framework::OperatorWithKernel::OperatorWithKernel; + using framework::OperatorWithKernel>::OperatorWithKernel; void InferShape() const override; - - void RunImpl() const { - operators::SigmoidKernel kernel; - kernel.Compute(param_); - this->ClearVariables({"X"}); - } - - private: - SigmoidParam param_; }; } // namespace operators } // namespace paddle_mobile diff --git a/src/operators/softmax_op.cpp b/src/operators/softmax_op.cpp index 8d5f669466d5725d877afe9db2adb9441fe488ca..e25b59198f3206357a770a104080f99bafa84dc5 100644 --- a/src/operators/softmax_op.cpp +++ b/src/operators/softmax_op.cpp @@ -20,7 +20,7 @@ namespace paddle_mobile { namespace operators { template void SoftmaxOp::InferShape() const { - param_.Out()->Resize(param_.InputX()->dims()); + this->param_.Out()->Resize(this->param_.InputX()->dims()); } template class SoftmaxOp; } // namespace operators diff --git a/src/operators/softmax_op.h b/src/operators/softmax_op.h index 5cac4d8a3394b07e978ba41e18fd7fbb7f4756d7..7bf0bdb4ca823801f39078c42f6277fd31e5a3d2 100644 --- a/src/operators/softmax_op.h +++ b/src/operators/softmax_op.h @@ -25,28 +25,20 @@ limitations under the License. */ namespace paddle_mobile { namespace operators { template -class SoftmaxOp : public framework::OperatorWithKernel { +class SoftmaxOp : public framework::OperatorWithKernel> { public: SoftmaxOp(const std::string &type, const VariableNameMap &inputs, const VariableNameMap &outputs, const framework::AttributeMap &attrs, std::shared_ptr scope) - : framework::OperatorWithKernel(type, inputs, outputs, attrs, - scope), - param_(inputs, outputs, attrs, *scope) {} + : framework::OperatorWithKernel>(type, inputs, outputs, attrs, + scope) {} - using framework::OperatorWithKernel::OperatorWithKernel; + using framework::OperatorWithKernel>::OperatorWithKernel; void InferShape() const override; - void RunImpl() const { - operators::SoftmaxKernel kernel; - kernel.Compute(param_); - this->ClearVariables({"X"}); - } - private: - SoftmaxParam param_; }; } // namespace operators } // namespace paddle_mobile diff --git a/src/operators/transpose_op.cpp b/src/operators/transpose_op.cpp index 02a3b16e8d62d094a83c329f52957b4b8b87d805..989b277b9d58a8c029e041a89a1982f8994bae44 100644 --- a/src/operators/transpose_op.cpp +++ b/src/operators/transpose_op.cpp @@ -23,8 +23,8 @@ namespace operators { template void TransposeOp::InferShape() const { - auto input_x_dims = param_.InputX()->dims(); - auto axis = param_.Axis(); + auto input_x_dims = this->param_.InputX()->dims(); + auto axis = this->param_.Axis(); size_t x_dims_size = input_x_dims.size(); size_t axis_size = axis.size(); @@ -45,7 +45,7 @@ void TransposeOp::InferShape() const { for (size_t i = 0; i < axis_size; i++) { out_dims[i] = input_x_dims[axis[i]]; } - param_.Out()->Resize(out_dims); + this->param_.Out()->Resize(out_dims); } template class TransposeOp; } // namespace operators diff --git a/src/operators/transpose_op.h b/src/operators/transpose_op.h index f65a725756c858b8e2e304906ed8236b00046fc9..bfed55652240b6eddf4138e0ef5978b4fdd62a72 100644 --- a/src/operators/transpose_op.h +++ b/src/operators/transpose_op.h @@ -28,26 +28,17 @@ namespace operators { using paddle_mobile::framework::Tensor; template -class TransposeOp : public framework::OperatorWithKernel { +class TransposeOp : public framework::OperatorWithKernel> { public: TransposeOp(const std::string &type, const VariableNameMap &inputs, const VariableNameMap &outputs, const framework::AttributeMap &attrs, std::shared_ptr scope) - : framework::OperatorWithKernel(type, inputs, outputs, attrs, - scope), - param_(inputs, outputs, attrs, *scope) {} + : framework::OperatorWithKernel>(type, inputs, outputs, attrs, + scope) {} - void RunImpl() const { - operators::TransposeKernel kernel; - kernel.Compute(param_); - } - - using framework::OperatorWithKernel::OperatorWithKernel; + using framework::OperatorWithKernel>::OperatorWithKernel; void InferShape() const override; - - protected: - TransposeParam param_; }; } // namespace operators diff --git a/test/CMakeLists.txt b/test/CMakeLists.txt index 0cf3537ad64fb0dfb23d0514cf4d068b9c9c6199..cc707ded7f6682136974b4fb16e82f112bc5b24f 100644 --- a/test/CMakeLists.txt +++ b/test/CMakeLists.txt @@ -137,6 +137,10 @@ else () ADD_EXECUTABLE(test-depthwise-conv-op operators/test_depthwise_conv_op.cpp test_helper.h test_include.h executor_for_test.h) target_link_libraries(test-depthwise-conv-op paddle-mobile) + # gen test + ADD_EXECUTABLE(test-mobilenet net/test_mobilenet.cpp test_helper.h test_include.h executor_for_test.h) + target_link_libraries(test-mobilenet paddle-mobile) + #add_library(test-lib-size SHARED common/test_lib_size.h common/test_lib_size.cpp) endif() diff --git a/test/net/test_mobilenet.cpp b/test/net/test_mobilenet.cpp index 7ed9a3566e3be8d5baa7e47611fc713772e94327..8400b08f2292bb5655e2d85298acce603e1ce603 100644 --- a/test/net/test_mobilenet.cpp +++ b/test/net/test_mobilenet.cpp @@ -19,14 +19,14 @@ limitations under the License. */ int main() { paddle_mobile::Loader loader; auto time1 = time(); - auto program = loader.Load(g_mobilenet, false); + auto program = loader.Load(g_mobilenet, true); auto time2 = time(); DLOG << "load cost :" << time_diff(time1, time1) << "ms"; - paddle_mobile::Executor executor(program, 2, false); + paddle_mobile::Executor executor(program, 1, true); - std::vector dims{2, 3, 224, 224}; + std::vector dims{1, 3, 224, 224}; Tensor input_tensor; - SetupTensor(&input_tensor, {2, 3, 224, 224}, static_cast(0), + SetupTensor(&input_tensor, {1, 3, 224, 224}, static_cast(0), static_cast(1)); std::vector input(input_tensor.data(),