diff --git a/src/operators/conv_op.cpp b/src/operators/conv_op.cpp index 824ab9ee311f45b3db4efba756f6b36bc7ff97b3..01d284a06ed33142a8d16cdc32f304c3d1a75e28 100644 --- a/src/operators/conv_op.cpp +++ b/src/operators/conv_op.cpp @@ -17,8 +17,8 @@ limitations under the License. */ #include "operators/conv_op.h" #include #include "framework/op_proto_maker.h" -#include "operators/math/conv_func.h" #include "framework/op_registry.h" +#include "operators/math/conv_func.h" namespace paddle_mobile { namespace operators { @@ -39,9 +39,9 @@ void ConvOp::InferShape() const { std::vector output_shape({in_dims[0], filter_dims[0]}); for (size_t i = 0; i < strides.size(); ++i) { - output_shape.push_back(math::ConvOutputSize(in_dims[i + 2], filter_dims[i + 2], - dilations[i], paddings[i], - strides[i])); + output_shape.push_back( + math::ConvOutputSize(in_dims[i + 2], filter_dims[i + 2], dilations[i], + paddings[i], strides[i])); } framework::DDim ddim = framework::make_ddim(output_shape); diff --git a/src/operators/depthwise_conv_op.cpp b/src/operators/depthwise_conv_op.cpp index b127424bce2122f3d89b7adaf7936409700d9f01..46f2db30ba2fbff5839d6a737dda12fa6cd10b43 100644 --- a/src/operators/depthwise_conv_op.cpp +++ b/src/operators/depthwise_conv_op.cpp @@ -17,9 +17,9 @@ limitations under the License. */ #include "operators/depthwise_conv_op.h" #include #include "framework/op_proto_maker.h" -#include "operators/math/conv_func.h" #include "framework/op_registry.h" #include "operators/conv_op.h" +#include "operators/math/conv_func.h" namespace paddle_mobile { namespace operators { @@ -40,9 +40,9 @@ void DepthwiseConvOp::InferShape() const { std::vector output_shape({in_dims[0], filter_dims[0]}); for (size_t i = 0; i < strides.size(); ++i) { - output_shape.push_back(math::ConvOutputSize(in_dims[i + 2], filter_dims[i + 2], - dilations[i], paddings[i], - strides[i])); + output_shape.push_back( + math::ConvOutputSize(in_dims[i + 2], filter_dims[i + 2], dilations[i], + paddings[i], strides[i])); } framework::DDim ddim = framework::make_ddim(output_shape); diff --git a/src/operators/fusion_conv_add.cpp b/src/operators/fusion_conv_add.cpp index c8519a7e0001eb5f8c77d47e21df4bb176c31222..80682324f75e83df524dc038c7e0cf3608763e7d 100644 --- a/src/operators/fusion_conv_add.cpp +++ b/src/operators/fusion_conv_add.cpp @@ -14,8 +14,8 @@ limitations under the License. */ #ifdef FUSION_CONVADD_OP -#include "operators/math/conv_func.h" #include "operators/fusion_conv_add.h" +#include "operators/math/conv_func.h" namespace paddle_mobile { namespace operators { @@ -36,9 +36,9 @@ void FushionConvAddOp::InferShape() const { std::vector output_shape({in_dims[0], filter_dims[0]}); for (size_t i = 0; i < strides.size(); ++i) { - output_shape.push_back(math::ConvOutputSize(in_dims[i + 2], filter_dims[i + 2], - dilations[i], paddings[i], - strides[i])); + output_shape.push_back( + math::ConvOutputSize(in_dims[i + 2], filter_dims[i + 2], dilations[i], + paddings[i], strides[i])); } framework::DDim ddim = framework::make_ddim(output_shape); diff --git a/src/operators/fusion_conv_add.h b/src/operators/fusion_conv_add.h index 1c4c898a8ef753298128a587e44d5ee2edb36ec0..88ddd055b962d83c2e486da69083e5b77bbbcf96 100644 --- a/src/operators/fusion_conv_add.h +++ b/src/operators/fusion_conv_add.h @@ -18,10 +18,10 @@ limitations under the License. */ #include #include -#include "op_param.h" #include "framework/operator.h" -#include "operators/kernel/conv_add_kernel.h" #include "framework/program/program-optimize/fusion_op_register.h" +#include "op_param.h" +#include "operators/kernel/conv_add_kernel.h" namespace paddle_mobile { namespace operators { diff --git a/src/operators/fusion_conv_add_relu_op.cpp b/src/operators/fusion_conv_add_relu_op.cpp index e7f18a7199fb5a19eeaf3c4b1d5335f7da12ddcd..694e46af1f8dec3513c5a6d2ff26e3676e9204e4 100644 --- a/src/operators/fusion_conv_add_relu_op.cpp +++ b/src/operators/fusion_conv_add_relu_op.cpp @@ -36,16 +36,16 @@ void FusionConvAddReluOp::InferShape() const { std::vector output_shape({in_dims[0], filter_dims[0]}); for (size_t i = 0; i < strides.size(); ++i) { - output_shape.push_back(math::ConvOutputSize(in_dims[i + 2], filter_dims[i + 2], - dilations[i], paddings[i], - strides[i])); + output_shape.push_back( + math::ConvOutputSize(in_dims[i + 2], filter_dims[i + 2], dilations[i], + paddings[i], strides[i])); } framework::DDim ddim = framework::make_ddim(output_shape); this->param_.Output()->Resize(ddim); } -} -} +} // namespace operators +} // namespace paddle_mobile namespace ops = paddle_mobile::operators; #ifdef PADDLE_MOBILE_CPU diff --git a/src/operators/fusion_conv_add_relu_op.h b/src/operators/fusion_conv_add_relu_op.h index c5b13d23fbfb8fbcdbcb5885eaa688cafcc47497..c19c92ef78d8d60e70b07558a1dfea84b0b4707c 100644 --- a/src/operators/fusion_conv_add_relu_op.h +++ b/src/operators/fusion_conv_add_relu_op.h @@ -17,9 +17,9 @@ limitations under the License. */ #pragma once #include "framework/operator.h" -#include "operators/op_param.h" -#include "operators/kernel/conv_add_relu_kernel.h" #include "framework/program/program-optimize/fusion_op_register.h" +#include "operators/kernel/conv_add_relu_kernel.h" +#include "operators/op_param.h" namespace paddle_mobile { namespace operators { @@ -42,27 +42,30 @@ class FushionConvAddReluOpMatcher : public framework::FusionOpMatcher { }; template -class FusionConvAddReluOp: public framework::OperatorWithKernel< - DeviceType, FushionConvAddReluParam, - operators::ConvAddReluKernel> { +class FusionConvAddReluOp : public framework::OperatorWithKernel< + DeviceType, FushionConvAddReluParam, + operators::ConvAddReluKernel> { public: FusionConvAddReluOp(const string &type, const VariableNameMap &inputs, - const VariableNameMap &outputs, - const framework::AttributeMap &attrs, - std::shared_ptr scope) - : framework::OperatorWithKernel>( - type, inputs, outputs, attrs, scope) {} + const VariableNameMap &outputs, + const framework::AttributeMap &attrs, + std::shared_ptr scope) + : framework::OperatorWithKernel< + DeviceType, FushionConvAddReluParam, + operators::ConvAddReluKernel>(type, inputs, outputs, + attrs, scope) {} using framework::OperatorWithKernel< - DeviceType, FushionConvAddReluParam, - operators::ConvAddReluKernel>::OperatorWithKernel; + DeviceType, FushionConvAddReluParam, + operators::ConvAddReluKernel>::OperatorWithKernel; void InferShape() const override; + protected: }; #ifdef PADDLE_MOBILE_CPU -//static framework::FusionOpRegistrar fusion_conv_add_relu_registrar(new FushionConvAddReluOpMatcher()); +// static framework::FusionOpRegistrar fusion_conv_add_relu_registrar(new +// FushionConvAddReluOpMatcher()); #endif #ifdef PADDLE_MOBILE_MALI_GPU #endif diff --git a/src/operators/fusion_fc_op.h b/src/operators/fusion_fc_op.h index b1c255d7abb6c403b395d7e3ed5bccfaf3ee3815..5e8ddc142552451ad1dbe360d9a19a95ced5e2c1 100644 --- a/src/operators/fusion_fc_op.h +++ b/src/operators/fusion_fc_op.h @@ -70,7 +70,7 @@ class FushionFcOp : public framework::OperatorWithKernel< static framework::FusionOpRegistrar fc_registrar(new FusionFcMatcher()); #endif #ifdef PADDLE_MOBILE_MALI_GPU -//static framework::FusionOpRegistrar fc_registrar(new FusionFcMatcher()); +// static framework::FusionOpRegistrar fc_registrar(new FusionFcMatcher()); #endif #ifdef PADDLE_MOBILE_FPGA #endif diff --git a/src/operators/kernel/arm/conv_add_kernel.cpp b/src/operators/kernel/arm/conv_add_kernel.cpp index 879cc4a2664bac26f9f1f5ceb661cbe1fa6f8b7f..4a669dc111675a4a5376b9585a0645a8bf4c4659 100644 --- a/src/operators/kernel/arm/conv_add_kernel.cpp +++ b/src/operators/kernel/arm/conv_add_kernel.cpp @@ -50,7 +50,8 @@ void ConvAddKernel::Compute( framework::DDim col_matrix_shape = framework::flatten_to_2d(col_shape, data_dim + 1); - bool is_expand = math::IsExpand(filter_shape_vec, strides, paddings, dilations); + bool is_expand = + math::IsExpand(filter_shape_vec, strides, paddings, dilations); Tensor col; Tensor col_matrix; if (is_expand) { diff --git a/src/operators/kernel/arm/conv_add_relu_kernel.cpp b/src/operators/kernel/arm/conv_add_relu_kernel.cpp index 4843c9aa5d20e4c548f9c49b7900721379d899ba..a7141db7ec75eb78e5f09fd009fb94a1804c5d3b 100644 --- a/src/operators/kernel/arm/conv_add_relu_kernel.cpp +++ b/src/operators/kernel/arm/conv_add_relu_kernel.cpp @@ -21,7 +21,7 @@ namespace operators { template <> void ConvAddReluKernel::Compute( - const FushionConvAddReluParam ¶m) const { + const FushionConvAddReluParam ¶m) const { const Tensor *input = param.Input(); Tensor filter = *param.Filter(); Tensor bias = *param.Bias(); @@ -49,9 +49,10 @@ void ConvAddReluKernel::Compute( framework::DDim col_shape(framework::make_ddim(col_shape_vec)); framework::DDim col_matrix_shape = - framework::flatten_to_2d(col_shape, data_dim + 1); + framework::flatten_to_2d(col_shape, data_dim + 1); - bool is_expand = math::IsExpand(filter_shape_vec, strides, paddings, dilations); + bool is_expand = + math::IsExpand(filter_shape_vec, strides, paddings, dilations); Tensor col; Tensor col_matrix; if (is_expand) { @@ -61,14 +62,14 @@ void ConvAddReluKernel::Compute( } framework::DDim input_shape = framework::slice_ddim( - input->dims(), 1, static_cast(input->dims().size())); + input->dims(), 1, static_cast(input->dims().size())); framework::DDim filter_matrix_shape = {filter.dims()[0], filter.numel() / filter.dims()[0]}; filter.Resize(filter_matrix_shape); framework::DDim output_matrix_shape = { - output->dims()[1], - output->numel() / (output->dims()[0] * output->dims()[1])}; + output->dims()[1], + output->numel() / (output->dims()[0] * output->dims()[1])}; // convolution operator: im2col(or vol2col) + gemm int in_step = static_cast(input->dims()[1]) / groups; @@ -105,13 +106,12 @@ void ConvAddReluKernel::Compute( math::matmul(filter_slice, false, col_matrix, false, static_cast(1), &out_slice, static_cast(1), true); - } } } template class ConvAddReluKernel; -} -} +} // namespace operators +} // namespace paddle_mobile #endif diff --git a/src/operators/kernel/conv_add_kernel.h b/src/operators/kernel/conv_add_kernel.h index 2eea4966028a8a298ea9384ef0647a68600ee20f..ab119e80c356453d18461e1d67361896057b16e9 100644 --- a/src/operators/kernel/conv_add_kernel.h +++ b/src/operators/kernel/conv_add_kernel.h @@ -21,12 +21,12 @@ limitations under the License. */ #include #endif #include "framework/ddim.h" -#include "operators/op_param.h" #include "framework/operator.h" -#include "operators/math/im2col.h" -#include "operators/math/vol2col.h" #include "operators/math/conv_func.h" +#include "operators/math/im2col.h" #include "operators/math/math_function.h" +#include "operators/math/vol2col.h" +#include "operators/op_param.h" namespace paddle_mobile { namespace operators { @@ -34,7 +34,6 @@ namespace operators { using framework::DDim; using framework::OpKernelBase; - template class ConvAddKernel : public OpKernelBase { public: diff --git a/src/operators/kernel/conv_add_relu_kernel.h b/src/operators/kernel/conv_add_relu_kernel.h index d99f90def10910403c382f77ed389cc10c113639..d2da6d30d2c30a28c0b383a0bd3b3820a1977ea2 100644 --- a/src/operators/kernel/conv_add_relu_kernel.h +++ b/src/operators/kernel/conv_add_relu_kernel.h @@ -19,11 +19,11 @@ limitations under the License. */ #include #include "framework/ddim.h" #include "framework/operator.h" -#include "operators/op_param.h" -#include "operators/math/im2col.h" -#include "operators/math/vol2col.h" #include "operators/math/conv_func.h" +#include "operators/math/im2col.h" #include "operators/math/math_function.h" +#include "operators/math/vol2col.h" +#include "operators/op_param.h" namespace paddle_mobile { namespace operators { @@ -32,7 +32,8 @@ using framework::DDim; using framework::OpKernelBase; template -class ConvAddReluKernel : public OpKernelBase { +class ConvAddReluKernel + : public OpKernelBase { public: void Compute(const FushionConvAddReluParam ¶m) const; }; diff --git a/src/operators/kernel/mali/batchnorm_kernel.cpp b/src/operators/kernel/mali/batchnorm_kernel.cpp index f1bb29575a8c481c7205776d667b64cf21c0c780..5ad6d6f015c9d8ec095f8269642dd72f4d0a56a1 100644 --- a/src/operators/kernel/mali/batchnorm_kernel.cpp +++ b/src/operators/kernel/mali/batchnorm_kernel.cpp @@ -22,8 +22,8 @@ namespace paddle_mobile { namespace operators { template <> -void BatchNormKernel::Compute(const BatchNormParam ¶m) const { -} +void BatchNormKernel::Compute( + const BatchNormParam ¶m) const {} } // namespace operators } // namespace paddle_mobile diff --git a/src/operators/math/conv_func.h b/src/operators/math/conv_func.h index e6af2172fc106853e7657cf2fbe58466ce7228d3..3d23f6c8a24be7f52e1b322e07addb47ccd8b056 100644 --- a/src/operators/math/conv_func.h +++ b/src/operators/math/conv_func.h @@ -42,7 +42,7 @@ inline void expand_bias(Tensor &bias, int axis, const DDim &dDim) { "the bias tensor's dims size != 1") DDim outer_ddim = paddle_mobile::framework::slice_ddim(dDim, 0, axis + 1); DDim inner_ddim = - paddle_mobile::framework::slice_ddim(dDim, axis + 1, dDim.size()); + paddle_mobile::framework::slice_ddim(dDim, axis + 1, dDim.size()); int outer_size = paddle_mobile::framework::product(outer_ddim); int inner_size = paddle_mobile::framework::product(inner_ddim); bias.Resize(dDim); @@ -98,6 +98,6 @@ inline bool IsExpand(const std::vector &filter_dim, return !(filter_1 && strides_1 && padding_0 && dilation_1); } -} -} -} +} // namespace math +} // namespace operators +} // namespace paddle_mobile diff --git a/src/operators/math/gemm.cpp b/src/operators/math/gemm.cpp index c19fdfd57b48e2cfeedd6bae69bdf90a9f7e7a11..669c7bff62986277603878698eecfb73819c0d13 100644 --- a/src/operators/math/gemm.cpp +++ b/src/operators/math/gemm.cpp @@ -177,8 +177,8 @@ void InnerKernel(int m, int n, int k, float alpha, const float *A, int lda, // 分块矩阵乘法 void InnerKernel_relu(int m, int n, int k, float alpha, const float *A, int lda, - const float *B, int ldb, float beta, float *C, int ldc, - int first_time, bool relu = false) { + const float *B, int ldb, float beta, float *C, int ldc, + int first_time, bool relu = false) { int Buff_A_M = m; int Buff_B_N = n; @@ -210,12 +210,11 @@ void InnerKernel_relu(int m, int n, int k, float alpha, const float *A, int lda, for (i = 0; i < Buff_A_M; i += MR) { mc = (m - i) < MR ? _mc : MR; AddDot4x4_relu(k, alpha, &packedA[i * k], 4, &packedB[j * k], k, beta, - &C(i, j), ldc, mc, nc, relu); + &C(i, j), ldc, mc, nc, relu); } } } - //计算一个更小的 4 * 4 的 C 矩阵分块 #if defined(IOS) void AddDot4x4(int k, float alpha, const float *a, int lda, const float *b, @@ -269,8 +268,9 @@ void AddDot4x4(int k, float alpha, const float *a, int lda, const float *b, } void AddDot4x4_relu(int k, float alpha, const float *a, int lda, const float *b, - int ldb, float beta, float *C, int ldc, int mc, int nc, bool relu = false) { - // init C + int ldb, float beta, float *C, int ldc, int mc, int nc, + bool relu = false) { + // init C float32x4_t cv0 = vdupq_n_f32(0.0); float32x4_t cv1 = vdupq_n_f32(0.0); float32x4_t cv2 = vdupq_n_f32(0.0); @@ -458,7 +458,8 @@ void AddDot4x4(int k, float alpha, const float *a, int lda, const float *b, } void AddDot4x4_relu(int k, float alpha, const float *a, int lda, const float *b, - int ldb, float beta, float *C, int ldc, int mc, int nc, bool relu = false) { + int ldb, float beta, float *C, int ldc, int mc, int nc, + bool relu = false) { int kc1 = k / 2, kc2 = k % 2; int bytes_ldc = 4 * ldc; int flag_alpha = (alpha == 1.0) ? 1 : 2; @@ -571,8 +572,8 @@ void AddDot4x4_relu(int k, float alpha, const float *a, int lda, const float *b, [kc2] "r"(kc2), [mc] "r"(mc), [nc] "r"(nc), [alpha] "r"(alpha), [beta] "r"(beta), [bytes_ldc] "r"(bytes_ldc), [flag_alpha] "r"(flag_alpha), [flag_beta] "r"(flag_beta) - : "memory", "q0", "q1", "q2", "q3", "q4", "q10", "q11", "q12", "q13", "q14"); - + : "memory", "q0", "q1", "q2", "q3", "q4", "q10", "q11", "q12", "q13", + "q14"); if (mc != MR || nc != NR) { int i, j; @@ -599,7 +600,6 @@ void AddDot4x4_relu(int k, float alpha, const float *a, int lda, const float *b, C(i, j) = 0; } } - } } } @@ -664,7 +664,8 @@ void AddDot4x4(int k, float alpha, const float *a, int lda, const float *b, } void AddDot4x4_relu(int k, float alpha, const float *a, int lda, const float *b, - int ldb, float beta, float *C, int ldc, int mc, int nc, bool relu) { + int ldb, float beta, float *C, int ldc, int mc, int nc, + bool relu) { float c[16] = {0}; float reg_a0, reg_a1, reg_a2, reg_a3, reg_b0, reg_b1, reg_b2, reg_b3; @@ -725,7 +726,6 @@ void AddDot4x4_relu(int k, float alpha, const float *a, int lda, const float *b, } } - #endif // 32位 float 矩阵乘法 @@ -768,11 +768,11 @@ void sgemm_relu(int m, int n, int k, float alpha, const float *A, int lda, } if (p + KC >= k) { - InnerKernel_relu(mc, nc, kc, alpha, &A(i, p), lda, &B(p, j), ldb, beta_, - &C(i, j), ldc, i == 0, true); + InnerKernel_relu(mc, nc, kc, alpha, &A(i, p), lda, &B(p, j), ldb, + beta_, &C(i, j), ldc, i == 0, true); } else { InnerKernel(mc, nc, kc, alpha, &A(i, p), lda, &B(p, j), ldb, beta_, - &C(i, j), ldc, i == 0); + &C(i, j), ldc, i == 0); } } } diff --git a/src/operators/math/gemm.h b/src/operators/math/gemm.h index 3ac51765bfa571add3801499f7faf0f7e9132b71..00285aed94613ac7666c6c68df7b3208b09a777a 100644 --- a/src/operators/math/gemm.h +++ b/src/operators/math/gemm.h @@ -58,14 +58,15 @@ void AddDot4x4(int k, float alpha, const float *A, int lda, const float *B, int ldb, float beta, float *C, int ldc, int mc, int nc); void AddDot4x4_relu(int k, float alpha, const float *a, int lda, const float *b, - int ldb, float beta, float *C, int ldc, int mc, int nc, bool relu); + int ldb, float beta, float *C, int ldc, int mc, int nc, + bool relu); // 32位 float 矩阵乘法 void sgemm(int m, int n, int k, float alpha, const float *A, int lda, const float *B, int ldb, float beta, float *C, int ldc); void sgemm_relu(int m, int n, int k, float alpha, const float *A, int lda, - const float *B, int ldb, float beta, float *C, int ldc); + const float *B, int ldb, float beta, float *C, int ldc); // 64位 double 矩阵乘法 void dgemm(int m, int n, int k, float alpha, const double *A, int lda, diff --git a/src/operators/math/math_function.cpp b/src/operators/math/math_function.cpp index 89faf217e990c4b84f4312e95f25c82b2dd3978f..fd4106038c7446e659736c6b3c61b5aa05127e72 100644 --- a/src/operators/math/math_function.cpp +++ b/src/operators/math/math_function.cpp @@ -42,19 +42,19 @@ void matmul(const framework::Tensor &matrix_a, bool trans_a, int K = (trans_a == false) ? dim_a[1] : dim_a[0]; if (relu) { - sgemm_relu(M, N, K, alpha, matrix_a.data(), K, matrix_b.data(), N, - beta, matrix_out->data(), N); + sgemm_relu(M, N, K, alpha, matrix_a.data(), K, + matrix_b.data(), N, beta, matrix_out->data(), N); } else { sgemm(M, N, K, alpha, matrix_a.data(), K, matrix_b.data(), N, beta, matrix_out->data(), N); } - } template <> void matmul(const framework::Tensor &matrix_a, bool trans_a, const framework::Tensor &matrix_b, bool trans_b, - double alpha, framework::Tensor *matrix_out, double beta, bool relu) { + double alpha, framework::Tensor *matrix_out, double beta, + bool relu) { auto dim_a = matrix_a.dims(); auto dim_b = matrix_b.dims(); auto dim_out = matrix_out->dims(); @@ -74,8 +74,6 @@ void matmul(const framework::Tensor &matrix_a, bool trans_a, int K = (trans_a == false) ? dim_a[1] : dim_a[0]; } - - } // namespace math } // namespace operators } // namespace paddle_mobile diff --git a/src/operators/op_param.h b/src/operators/op_param.h index 6bf2b41a092eee1dd1ee807ac37bdfe31a6e9779..8d69ea1a2ef39a49cc6e2cdc2c23eeef820e748e 100644 --- a/src/operators/op_param.h +++ b/src/operators/op_param.h @@ -839,12 +839,12 @@ Print &operator<<(Print &printer, const FushionConvAddParam &conv_param); #endif #ifdef FUSION_CONVADD_RELU_OP -class FushionConvAddReluParam: public FushionConvAddParam { +class FushionConvAddReluParam : public FushionConvAddParam { public: FushionConvAddReluParam(const VariableNameMap &inputs, - const VariableNameMap &outputs, const AttributeMap &attrs, - const Scope &scope): FushionConvAddParam(inputs, outputs, attrs, scope) { - } + const VariableNameMap &outputs, + const AttributeMap &attrs, const Scope &scope) + : FushionConvAddParam(inputs, outputs, attrs, scope) {} }; #endif diff --git a/test/executor_for_test.h b/test/executor_for_test.h index 1c47410a0b2b3c476b27ed162a742e369ecbe356..0d3051327a57202e2b8d1dcbdda571fd244de108 100644 --- a/test/executor_for_test.h +++ b/test/executor_for_test.h @@ -42,7 +42,8 @@ using std::vector; template class Executor4Test : public Executor { public: - Executor4Test(Program p, string op_type, bool use_optimize = false) + Executor4Test(Program p, string op_type, + bool use_optimize = false) : Executor() { this->use_optimize_ = use_optimize; this->program_ = p; @@ -62,16 +63,14 @@ class Executor4Test : public Executor { std::vector> ops = block_desc->Ops(); for (std::shared_ptr op : ops) { if (op->Type() == op_type) { - DLOG << "匹配到: " << op->Type(); /// test first meeting op in program std::shared_ptr> - op_ptr = paddle_mobile::framework::OpRegistry< - DeviceType>::CreateOp(op->Type(), op->GetInputs(), - op->GetOutputs(), - op->GetAttrMap(), - this->program_.scope); + op_ptr = + paddle_mobile::framework::OpRegistry::CreateOp( + op->Type(), op->GetInputs(), op->GetOutputs(), + op->GetAttrMap(), this->program_.scope); this->ops_of_block_[*block_desc.get()].push_back(op_ptr); break; } diff --git a/test/framework/test_load.cpp b/test/framework/test_load.cpp index 3128fd41ba66b952bfe507e8962f6dbdb6c61b6c..8c76eb1dde3ef39a342d19e7f3d4e26fc1be2b2f 100644 --- a/test/framework/test_load.cpp +++ b/test/framework/test_load.cpp @@ -20,9 +20,10 @@ int main() { // ../../../test/models/googlenet // ../../../test/models/mobilenet auto program = loader.Load(g_mobilenet_ssd, false, false); -// auto program = loader.Load(g_googlenet_combine + "/model", g_googlenet_combine + -// "/params", true); - + // auto program = loader.Load(g_googlenet_combine + "/model", + // g_googlenet_combine + + // "/params", true); + // program.originProgram->Description("program desc: "); return 0; } diff --git a/test/operators/test_conv_add_relu_op.cpp b/test/operators/test_conv_add_relu_op.cpp index 6563d7a0b56979b90ed7a0f4d958f0fcf7c71eb9..987f52cd62f91b3bc00cc1ef49bd21913e288d75 100644 --- a/test/operators/test_conv_add_relu_op.cpp +++ b/test/operators/test_conv_add_relu_op.cpp @@ -23,9 +23,10 @@ int main() { PADDLE_MOBILE_ENFORCE(program.originProgram != nullptr, "program file read fail"); - Executor4Test> - executor(program, "fusion_conv_add_relu", true); + Executor4Test< + paddle_mobile::CPU, + paddle_mobile::operators::FusionConvAddReluOp> + executor(program, "fusion_conv_add_relu", true); paddle_mobile::framework::Tensor input; GetInput(g_test_image_1x3x224x224, &input, {1, 3, 224, 224}); diff --git a/test/operators/test_cov_op.cpp b/test/operators/test_cov_op.cpp index 3b53a3951abcdfc39d0fdcdaf1e8be4cadb333a6..a85ad9edba5d3e2256b8d7ee7d7d3c5b7200888d 100644 --- a/test/operators/test_cov_op.cpp +++ b/test/operators/test_cov_op.cpp @@ -23,8 +23,8 @@ int main() { PADDLE_MOBILE_ENFORCE(program.originProgram != nullptr, "program file read fail"); - Executor4Test> + Executor4Test> executor(program, "conv2d"); paddle_mobile::framework::Tensor input;