From 4a493ead2509bade60a487ddbec468c3c362b500 Mon Sep 17 00:00:00 2001 From: liuruilong Date: Wed, 20 Jun 2018 17:17:55 +0800 Subject: [PATCH] format files --- src/operators/conv_op.cpp | 8 ++--- src/operators/depthwise_conv_op.cpp | 8 ++--- src/operators/fusion_conv_add.cpp | 8 ++--- src/operators/fusion_conv_add.h | 4 +-- src/operators/fusion_conv_add_relu_op.cpp | 10 +++--- src/operators/fusion_conv_add_relu_op.h | 31 ++++++++++--------- src/operators/fusion_fc_op.h | 2 +- src/operators/kernel/arm/conv_add_kernel.cpp | 3 +- .../kernel/arm/conv_add_relu_kernel.cpp | 18 +++++------ src/operators/kernel/conv_add_kernel.h | 7 ++--- src/operators/kernel/conv_add_relu_kernel.h | 9 +++--- .../kernel/mali/batchnorm_kernel.cpp | 4 +-- src/operators/math/conv_func.h | 8 ++--- src/operators/math/gemm.cpp | 30 +++++++++--------- src/operators/math/gemm.h | 5 +-- src/operators/math/math_function.cpp | 10 +++--- src/operators/op_param.h | 8 ++--- test/executor_for_test.h | 13 ++++---- test/framework/test_load.cpp | 7 +++-- test/operators/test_conv_add_relu_op.cpp | 7 +++-- test/operators/test_cov_op.cpp | 4 +-- 21 files changed, 104 insertions(+), 100 deletions(-) diff --git a/src/operators/conv_op.cpp b/src/operators/conv_op.cpp index 824ab9ee31..01d284a06e 100644 --- a/src/operators/conv_op.cpp +++ b/src/operators/conv_op.cpp @@ -17,8 +17,8 @@ limitations under the License. */ #include "operators/conv_op.h" #include #include "framework/op_proto_maker.h" -#include "operators/math/conv_func.h" #include "framework/op_registry.h" +#include "operators/math/conv_func.h" namespace paddle_mobile { namespace operators { @@ -39,9 +39,9 @@ void ConvOp::InferShape() const { std::vector output_shape({in_dims[0], filter_dims[0]}); for (size_t i = 0; i < strides.size(); ++i) { - output_shape.push_back(math::ConvOutputSize(in_dims[i + 2], filter_dims[i + 2], - dilations[i], paddings[i], - strides[i])); + output_shape.push_back( + math::ConvOutputSize(in_dims[i + 2], filter_dims[i + 2], dilations[i], + paddings[i], strides[i])); } framework::DDim ddim = framework::make_ddim(output_shape); diff --git a/src/operators/depthwise_conv_op.cpp b/src/operators/depthwise_conv_op.cpp index b127424bce..46f2db30ba 100644 --- a/src/operators/depthwise_conv_op.cpp +++ b/src/operators/depthwise_conv_op.cpp @@ -17,9 +17,9 @@ limitations under the License. */ #include "operators/depthwise_conv_op.h" #include #include "framework/op_proto_maker.h" -#include "operators/math/conv_func.h" #include "framework/op_registry.h" #include "operators/conv_op.h" +#include "operators/math/conv_func.h" namespace paddle_mobile { namespace operators { @@ -40,9 +40,9 @@ void DepthwiseConvOp::InferShape() const { std::vector output_shape({in_dims[0], filter_dims[0]}); for (size_t i = 0; i < strides.size(); ++i) { - output_shape.push_back(math::ConvOutputSize(in_dims[i + 2], filter_dims[i + 2], - dilations[i], paddings[i], - strides[i])); + output_shape.push_back( + math::ConvOutputSize(in_dims[i + 2], filter_dims[i + 2], dilations[i], + paddings[i], strides[i])); } framework::DDim ddim = framework::make_ddim(output_shape); diff --git a/src/operators/fusion_conv_add.cpp b/src/operators/fusion_conv_add.cpp index c8519a7e00..80682324f7 100644 --- a/src/operators/fusion_conv_add.cpp +++ b/src/operators/fusion_conv_add.cpp @@ -14,8 +14,8 @@ limitations under the License. */ #ifdef FUSION_CONVADD_OP -#include "operators/math/conv_func.h" #include "operators/fusion_conv_add.h" +#include "operators/math/conv_func.h" namespace paddle_mobile { namespace operators { @@ -36,9 +36,9 @@ void FushionConvAddOp::InferShape() const { std::vector output_shape({in_dims[0], filter_dims[0]}); for (size_t i = 0; i < strides.size(); ++i) { - output_shape.push_back(math::ConvOutputSize(in_dims[i + 2], filter_dims[i + 2], - dilations[i], paddings[i], - strides[i])); + output_shape.push_back( + math::ConvOutputSize(in_dims[i + 2], filter_dims[i + 2], dilations[i], + paddings[i], strides[i])); } framework::DDim ddim = framework::make_ddim(output_shape); diff --git a/src/operators/fusion_conv_add.h b/src/operators/fusion_conv_add.h index 1c4c898a8e..88ddd055b9 100644 --- a/src/operators/fusion_conv_add.h +++ b/src/operators/fusion_conv_add.h @@ -18,10 +18,10 @@ limitations under the License. */ #include #include -#include "op_param.h" #include "framework/operator.h" -#include "operators/kernel/conv_add_kernel.h" #include "framework/program/program-optimize/fusion_op_register.h" +#include "op_param.h" +#include "operators/kernel/conv_add_kernel.h" namespace paddle_mobile { namespace operators { diff --git a/src/operators/fusion_conv_add_relu_op.cpp b/src/operators/fusion_conv_add_relu_op.cpp index e7f18a7199..694e46af1f 100644 --- a/src/operators/fusion_conv_add_relu_op.cpp +++ b/src/operators/fusion_conv_add_relu_op.cpp @@ -36,16 +36,16 @@ void FusionConvAddReluOp::InferShape() const { std::vector output_shape({in_dims[0], filter_dims[0]}); for (size_t i = 0; i < strides.size(); ++i) { - output_shape.push_back(math::ConvOutputSize(in_dims[i + 2], filter_dims[i + 2], - dilations[i], paddings[i], - strides[i])); + output_shape.push_back( + math::ConvOutputSize(in_dims[i + 2], filter_dims[i + 2], dilations[i], + paddings[i], strides[i])); } framework::DDim ddim = framework::make_ddim(output_shape); this->param_.Output()->Resize(ddim); } -} -} +} // namespace operators +} // namespace paddle_mobile namespace ops = paddle_mobile::operators; #ifdef PADDLE_MOBILE_CPU diff --git a/src/operators/fusion_conv_add_relu_op.h b/src/operators/fusion_conv_add_relu_op.h index c5b13d23fb..c19c92ef78 100644 --- a/src/operators/fusion_conv_add_relu_op.h +++ b/src/operators/fusion_conv_add_relu_op.h @@ -17,9 +17,9 @@ limitations under the License. */ #pragma once #include "framework/operator.h" -#include "operators/op_param.h" -#include "operators/kernel/conv_add_relu_kernel.h" #include "framework/program/program-optimize/fusion_op_register.h" +#include "operators/kernel/conv_add_relu_kernel.h" +#include "operators/op_param.h" namespace paddle_mobile { namespace operators { @@ -42,27 +42,30 @@ class FushionConvAddReluOpMatcher : public framework::FusionOpMatcher { }; template -class FusionConvAddReluOp: public framework::OperatorWithKernel< - DeviceType, FushionConvAddReluParam, - operators::ConvAddReluKernel> { +class FusionConvAddReluOp : public framework::OperatorWithKernel< + DeviceType, FushionConvAddReluParam, + operators::ConvAddReluKernel> { public: FusionConvAddReluOp(const string &type, const VariableNameMap &inputs, - const VariableNameMap &outputs, - const framework::AttributeMap &attrs, - std::shared_ptr scope) - : framework::OperatorWithKernel>( - type, inputs, outputs, attrs, scope) {} + const VariableNameMap &outputs, + const framework::AttributeMap &attrs, + std::shared_ptr scope) + : framework::OperatorWithKernel< + DeviceType, FushionConvAddReluParam, + operators::ConvAddReluKernel>(type, inputs, outputs, + attrs, scope) {} using framework::OperatorWithKernel< - DeviceType, FushionConvAddReluParam, - operators::ConvAddReluKernel>::OperatorWithKernel; + DeviceType, FushionConvAddReluParam, + operators::ConvAddReluKernel>::OperatorWithKernel; void InferShape() const override; + protected: }; #ifdef PADDLE_MOBILE_CPU -//static framework::FusionOpRegistrar fusion_conv_add_relu_registrar(new FushionConvAddReluOpMatcher()); +// static framework::FusionOpRegistrar fusion_conv_add_relu_registrar(new +// FushionConvAddReluOpMatcher()); #endif #ifdef PADDLE_MOBILE_MALI_GPU #endif diff --git a/src/operators/fusion_fc_op.h b/src/operators/fusion_fc_op.h index b1c255d7ab..5e8ddc1425 100644 --- a/src/operators/fusion_fc_op.h +++ b/src/operators/fusion_fc_op.h @@ -70,7 +70,7 @@ class FushionFcOp : public framework::OperatorWithKernel< static framework::FusionOpRegistrar fc_registrar(new FusionFcMatcher()); #endif #ifdef PADDLE_MOBILE_MALI_GPU -//static framework::FusionOpRegistrar fc_registrar(new FusionFcMatcher()); +// static framework::FusionOpRegistrar fc_registrar(new FusionFcMatcher()); #endif #ifdef PADDLE_MOBILE_FPGA #endif diff --git a/src/operators/kernel/arm/conv_add_kernel.cpp b/src/operators/kernel/arm/conv_add_kernel.cpp index 879cc4a266..4a669dc111 100644 --- a/src/operators/kernel/arm/conv_add_kernel.cpp +++ b/src/operators/kernel/arm/conv_add_kernel.cpp @@ -50,7 +50,8 @@ void ConvAddKernel::Compute( framework::DDim col_matrix_shape = framework::flatten_to_2d(col_shape, data_dim + 1); - bool is_expand = math::IsExpand(filter_shape_vec, strides, paddings, dilations); + bool is_expand = + math::IsExpand(filter_shape_vec, strides, paddings, dilations); Tensor col; Tensor col_matrix; if (is_expand) { diff --git a/src/operators/kernel/arm/conv_add_relu_kernel.cpp b/src/operators/kernel/arm/conv_add_relu_kernel.cpp index 4843c9aa5d..a7141db7ec 100644 --- a/src/operators/kernel/arm/conv_add_relu_kernel.cpp +++ b/src/operators/kernel/arm/conv_add_relu_kernel.cpp @@ -21,7 +21,7 @@ namespace operators { template <> void ConvAddReluKernel::Compute( - const FushionConvAddReluParam ¶m) const { + const FushionConvAddReluParam ¶m) const { const Tensor *input = param.Input(); Tensor filter = *param.Filter(); Tensor bias = *param.Bias(); @@ -49,9 +49,10 @@ void ConvAddReluKernel::Compute( framework::DDim col_shape(framework::make_ddim(col_shape_vec)); framework::DDim col_matrix_shape = - framework::flatten_to_2d(col_shape, data_dim + 1); + framework::flatten_to_2d(col_shape, data_dim + 1); - bool is_expand = math::IsExpand(filter_shape_vec, strides, paddings, dilations); + bool is_expand = + math::IsExpand(filter_shape_vec, strides, paddings, dilations); Tensor col; Tensor col_matrix; if (is_expand) { @@ -61,14 +62,14 @@ void ConvAddReluKernel::Compute( } framework::DDim input_shape = framework::slice_ddim( - input->dims(), 1, static_cast(input->dims().size())); + input->dims(), 1, static_cast(input->dims().size())); framework::DDim filter_matrix_shape = {filter.dims()[0], filter.numel() / filter.dims()[0]}; filter.Resize(filter_matrix_shape); framework::DDim output_matrix_shape = { - output->dims()[1], - output->numel() / (output->dims()[0] * output->dims()[1])}; + output->dims()[1], + output->numel() / (output->dims()[0] * output->dims()[1])}; // convolution operator: im2col(or vol2col) + gemm int in_step = static_cast(input->dims()[1]) / groups; @@ -105,13 +106,12 @@ void ConvAddReluKernel::Compute( math::matmul(filter_slice, false, col_matrix, false, static_cast(1), &out_slice, static_cast(1), true); - } } } template class ConvAddReluKernel; -} -} +} // namespace operators +} // namespace paddle_mobile #endif diff --git a/src/operators/kernel/conv_add_kernel.h b/src/operators/kernel/conv_add_kernel.h index 2eea496602..ab119e80c3 100644 --- a/src/operators/kernel/conv_add_kernel.h +++ b/src/operators/kernel/conv_add_kernel.h @@ -21,12 +21,12 @@ limitations under the License. */ #include #endif #include "framework/ddim.h" -#include "operators/op_param.h" #include "framework/operator.h" -#include "operators/math/im2col.h" -#include "operators/math/vol2col.h" #include "operators/math/conv_func.h" +#include "operators/math/im2col.h" #include "operators/math/math_function.h" +#include "operators/math/vol2col.h" +#include "operators/op_param.h" namespace paddle_mobile { namespace operators { @@ -34,7 +34,6 @@ namespace operators { using framework::DDim; using framework::OpKernelBase; - template class ConvAddKernel : public OpKernelBase { public: diff --git a/src/operators/kernel/conv_add_relu_kernel.h b/src/operators/kernel/conv_add_relu_kernel.h index d99f90def1..d2da6d30d2 100644 --- a/src/operators/kernel/conv_add_relu_kernel.h +++ b/src/operators/kernel/conv_add_relu_kernel.h @@ -19,11 +19,11 @@ limitations under the License. */ #include #include "framework/ddim.h" #include "framework/operator.h" -#include "operators/op_param.h" -#include "operators/math/im2col.h" -#include "operators/math/vol2col.h" #include "operators/math/conv_func.h" +#include "operators/math/im2col.h" #include "operators/math/math_function.h" +#include "operators/math/vol2col.h" +#include "operators/op_param.h" namespace paddle_mobile { namespace operators { @@ -32,7 +32,8 @@ using framework::DDim; using framework::OpKernelBase; template -class ConvAddReluKernel : public OpKernelBase { +class ConvAddReluKernel + : public OpKernelBase { public: void Compute(const FushionConvAddReluParam ¶m) const; }; diff --git a/src/operators/kernel/mali/batchnorm_kernel.cpp b/src/operators/kernel/mali/batchnorm_kernel.cpp index f1bb29575a..5ad6d6f015 100644 --- a/src/operators/kernel/mali/batchnorm_kernel.cpp +++ b/src/operators/kernel/mali/batchnorm_kernel.cpp @@ -22,8 +22,8 @@ namespace paddle_mobile { namespace operators { template <> -void BatchNormKernel::Compute(const BatchNormParam ¶m) const { -} +void BatchNormKernel::Compute( + const BatchNormParam ¶m) const {} } // namespace operators } // namespace paddle_mobile diff --git a/src/operators/math/conv_func.h b/src/operators/math/conv_func.h index e6af2172fc..3d23f6c8a2 100644 --- a/src/operators/math/conv_func.h +++ b/src/operators/math/conv_func.h @@ -42,7 +42,7 @@ inline void expand_bias(Tensor &bias, int axis, const DDim &dDim) { "the bias tensor's dims size != 1") DDim outer_ddim = paddle_mobile::framework::slice_ddim(dDim, 0, axis + 1); DDim inner_ddim = - paddle_mobile::framework::slice_ddim(dDim, axis + 1, dDim.size()); + paddle_mobile::framework::slice_ddim(dDim, axis + 1, dDim.size()); int outer_size = paddle_mobile::framework::product(outer_ddim); int inner_size = paddle_mobile::framework::product(inner_ddim); bias.Resize(dDim); @@ -98,6 +98,6 @@ inline bool IsExpand(const std::vector &filter_dim, return !(filter_1 && strides_1 && padding_0 && dilation_1); } -} -} -} +} // namespace math +} // namespace operators +} // namespace paddle_mobile diff --git a/src/operators/math/gemm.cpp b/src/operators/math/gemm.cpp index c19fdfd57b..669c7bff62 100644 --- a/src/operators/math/gemm.cpp +++ b/src/operators/math/gemm.cpp @@ -177,8 +177,8 @@ void InnerKernel(int m, int n, int k, float alpha, const float *A, int lda, // 分块矩阵乘法 void InnerKernel_relu(int m, int n, int k, float alpha, const float *A, int lda, - const float *B, int ldb, float beta, float *C, int ldc, - int first_time, bool relu = false) { + const float *B, int ldb, float beta, float *C, int ldc, + int first_time, bool relu = false) { int Buff_A_M = m; int Buff_B_N = n; @@ -210,12 +210,11 @@ void InnerKernel_relu(int m, int n, int k, float alpha, const float *A, int lda, for (i = 0; i < Buff_A_M; i += MR) { mc = (m - i) < MR ? _mc : MR; AddDot4x4_relu(k, alpha, &packedA[i * k], 4, &packedB[j * k], k, beta, - &C(i, j), ldc, mc, nc, relu); + &C(i, j), ldc, mc, nc, relu); } } } - //计算一个更小的 4 * 4 的 C 矩阵分块 #if defined(IOS) void AddDot4x4(int k, float alpha, const float *a, int lda, const float *b, @@ -269,8 +268,9 @@ void AddDot4x4(int k, float alpha, const float *a, int lda, const float *b, } void AddDot4x4_relu(int k, float alpha, const float *a, int lda, const float *b, - int ldb, float beta, float *C, int ldc, int mc, int nc, bool relu = false) { - // init C + int ldb, float beta, float *C, int ldc, int mc, int nc, + bool relu = false) { + // init C float32x4_t cv0 = vdupq_n_f32(0.0); float32x4_t cv1 = vdupq_n_f32(0.0); float32x4_t cv2 = vdupq_n_f32(0.0); @@ -458,7 +458,8 @@ void AddDot4x4(int k, float alpha, const float *a, int lda, const float *b, } void AddDot4x4_relu(int k, float alpha, const float *a, int lda, const float *b, - int ldb, float beta, float *C, int ldc, int mc, int nc, bool relu = false) { + int ldb, float beta, float *C, int ldc, int mc, int nc, + bool relu = false) { int kc1 = k / 2, kc2 = k % 2; int bytes_ldc = 4 * ldc; int flag_alpha = (alpha == 1.0) ? 1 : 2; @@ -571,8 +572,8 @@ void AddDot4x4_relu(int k, float alpha, const float *a, int lda, const float *b, [kc2] "r"(kc2), [mc] "r"(mc), [nc] "r"(nc), [alpha] "r"(alpha), [beta] "r"(beta), [bytes_ldc] "r"(bytes_ldc), [flag_alpha] "r"(flag_alpha), [flag_beta] "r"(flag_beta) - : "memory", "q0", "q1", "q2", "q3", "q4", "q10", "q11", "q12", "q13", "q14"); - + : "memory", "q0", "q1", "q2", "q3", "q4", "q10", "q11", "q12", "q13", + "q14"); if (mc != MR || nc != NR) { int i, j; @@ -599,7 +600,6 @@ void AddDot4x4_relu(int k, float alpha, const float *a, int lda, const float *b, C(i, j) = 0; } } - } } } @@ -664,7 +664,8 @@ void AddDot4x4(int k, float alpha, const float *a, int lda, const float *b, } void AddDot4x4_relu(int k, float alpha, const float *a, int lda, const float *b, - int ldb, float beta, float *C, int ldc, int mc, int nc, bool relu) { + int ldb, float beta, float *C, int ldc, int mc, int nc, + bool relu) { float c[16] = {0}; float reg_a0, reg_a1, reg_a2, reg_a3, reg_b0, reg_b1, reg_b2, reg_b3; @@ -725,7 +726,6 @@ void AddDot4x4_relu(int k, float alpha, const float *a, int lda, const float *b, } } - #endif // 32位 float 矩阵乘法 @@ -768,11 +768,11 @@ void sgemm_relu(int m, int n, int k, float alpha, const float *A, int lda, } if (p + KC >= k) { - InnerKernel_relu(mc, nc, kc, alpha, &A(i, p), lda, &B(p, j), ldb, beta_, - &C(i, j), ldc, i == 0, true); + InnerKernel_relu(mc, nc, kc, alpha, &A(i, p), lda, &B(p, j), ldb, + beta_, &C(i, j), ldc, i == 0, true); } else { InnerKernel(mc, nc, kc, alpha, &A(i, p), lda, &B(p, j), ldb, beta_, - &C(i, j), ldc, i == 0); + &C(i, j), ldc, i == 0); } } } diff --git a/src/operators/math/gemm.h b/src/operators/math/gemm.h index 3ac51765bf..00285aed94 100644 --- a/src/operators/math/gemm.h +++ b/src/operators/math/gemm.h @@ -58,14 +58,15 @@ void AddDot4x4(int k, float alpha, const float *A, int lda, const float *B, int ldb, float beta, float *C, int ldc, int mc, int nc); void AddDot4x4_relu(int k, float alpha, const float *a, int lda, const float *b, - int ldb, float beta, float *C, int ldc, int mc, int nc, bool relu); + int ldb, float beta, float *C, int ldc, int mc, int nc, + bool relu); // 32位 float 矩阵乘法 void sgemm(int m, int n, int k, float alpha, const float *A, int lda, const float *B, int ldb, float beta, float *C, int ldc); void sgemm_relu(int m, int n, int k, float alpha, const float *A, int lda, - const float *B, int ldb, float beta, float *C, int ldc); + const float *B, int ldb, float beta, float *C, int ldc); // 64位 double 矩阵乘法 void dgemm(int m, int n, int k, float alpha, const double *A, int lda, diff --git a/src/operators/math/math_function.cpp b/src/operators/math/math_function.cpp index 89faf217e9..fd4106038c 100644 --- a/src/operators/math/math_function.cpp +++ b/src/operators/math/math_function.cpp @@ -42,19 +42,19 @@ void matmul(const framework::Tensor &matrix_a, bool trans_a, int K = (trans_a == false) ? dim_a[1] : dim_a[0]; if (relu) { - sgemm_relu(M, N, K, alpha, matrix_a.data(), K, matrix_b.data(), N, - beta, matrix_out->data(), N); + sgemm_relu(M, N, K, alpha, matrix_a.data(), K, + matrix_b.data(), N, beta, matrix_out->data(), N); } else { sgemm(M, N, K, alpha, matrix_a.data(), K, matrix_b.data(), N, beta, matrix_out->data(), N); } - } template <> void matmul(const framework::Tensor &matrix_a, bool trans_a, const framework::Tensor &matrix_b, bool trans_b, - double alpha, framework::Tensor *matrix_out, double beta, bool relu) { + double alpha, framework::Tensor *matrix_out, double beta, + bool relu) { auto dim_a = matrix_a.dims(); auto dim_b = matrix_b.dims(); auto dim_out = matrix_out->dims(); @@ -74,8 +74,6 @@ void matmul(const framework::Tensor &matrix_a, bool trans_a, int K = (trans_a == false) ? dim_a[1] : dim_a[0]; } - - } // namespace math } // namespace operators } // namespace paddle_mobile diff --git a/src/operators/op_param.h b/src/operators/op_param.h index 6bf2b41a09..8d69ea1a2e 100644 --- a/src/operators/op_param.h +++ b/src/operators/op_param.h @@ -839,12 +839,12 @@ Print &operator<<(Print &printer, const FushionConvAddParam &conv_param); #endif #ifdef FUSION_CONVADD_RELU_OP -class FushionConvAddReluParam: public FushionConvAddParam { +class FushionConvAddReluParam : public FushionConvAddParam { public: FushionConvAddReluParam(const VariableNameMap &inputs, - const VariableNameMap &outputs, const AttributeMap &attrs, - const Scope &scope): FushionConvAddParam(inputs, outputs, attrs, scope) { - } + const VariableNameMap &outputs, + const AttributeMap &attrs, const Scope &scope) + : FushionConvAddParam(inputs, outputs, attrs, scope) {} }; #endif diff --git a/test/executor_for_test.h b/test/executor_for_test.h index 1c47410a0b..0d3051327a 100644 --- a/test/executor_for_test.h +++ b/test/executor_for_test.h @@ -42,7 +42,8 @@ using std::vector; template class Executor4Test : public Executor { public: - Executor4Test(Program p, string op_type, bool use_optimize = false) + Executor4Test(Program p, string op_type, + bool use_optimize = false) : Executor() { this->use_optimize_ = use_optimize; this->program_ = p; @@ -62,16 +63,14 @@ class Executor4Test : public Executor { std::vector> ops = block_desc->Ops(); for (std::shared_ptr op : ops) { if (op->Type() == op_type) { - DLOG << "匹配到: " << op->Type(); /// test first meeting op in program std::shared_ptr> - op_ptr = paddle_mobile::framework::OpRegistry< - DeviceType>::CreateOp(op->Type(), op->GetInputs(), - op->GetOutputs(), - op->GetAttrMap(), - this->program_.scope); + op_ptr = + paddle_mobile::framework::OpRegistry::CreateOp( + op->Type(), op->GetInputs(), op->GetOutputs(), + op->GetAttrMap(), this->program_.scope); this->ops_of_block_[*block_desc.get()].push_back(op_ptr); break; } diff --git a/test/framework/test_load.cpp b/test/framework/test_load.cpp index 3128fd41ba..8c76eb1dde 100644 --- a/test/framework/test_load.cpp +++ b/test/framework/test_load.cpp @@ -20,9 +20,10 @@ int main() { // ../../../test/models/googlenet // ../../../test/models/mobilenet auto program = loader.Load(g_mobilenet_ssd, false, false); -// auto program = loader.Load(g_googlenet_combine + "/model", g_googlenet_combine + -// "/params", true); - + // auto program = loader.Load(g_googlenet_combine + "/model", + // g_googlenet_combine + + // "/params", true); + // program.originProgram->Description("program desc: "); return 0; } diff --git a/test/operators/test_conv_add_relu_op.cpp b/test/operators/test_conv_add_relu_op.cpp index 6563d7a0b5..987f52cd62 100644 --- a/test/operators/test_conv_add_relu_op.cpp +++ b/test/operators/test_conv_add_relu_op.cpp @@ -23,9 +23,10 @@ int main() { PADDLE_MOBILE_ENFORCE(program.originProgram != nullptr, "program file read fail"); - Executor4Test> - executor(program, "fusion_conv_add_relu", true); + Executor4Test< + paddle_mobile::CPU, + paddle_mobile::operators::FusionConvAddReluOp> + executor(program, "fusion_conv_add_relu", true); paddle_mobile::framework::Tensor input; GetInput(g_test_image_1x3x224x224, &input, {1, 3, 224, 224}); diff --git a/test/operators/test_cov_op.cpp b/test/operators/test_cov_op.cpp index 3b53a3951a..a85ad9edba 100644 --- a/test/operators/test_cov_op.cpp +++ b/test/operators/test_cov_op.cpp @@ -23,8 +23,8 @@ int main() { PADDLE_MOBILE_ENFORCE(program.originProgram != nullptr, "program file read fail"); - Executor4Test> + Executor4Test> executor(program, "conv2d"); paddle_mobile::framework::Tensor input; -- GitLab