From 69a0ecad989278f009042c7dda9986357cdbfb2c Mon Sep 17 00:00:00 2001 From: hjchen2 Date: Thu, 11 Oct 2018 08:03:57 +0000 Subject: [PATCH] Refine: solve static link problem --- CMakeLists.txt | 32 ++- src/framework/load_ops.h | 210 ++++++++++++++++++ src/framework/op_registry.h | 6 +- .../program-optimize/fusion_op_register.h | 9 + src/framework/tensor.h | 3 +- src/io/paddle_mobile.h | 1 + src/ios_io/PaddleMobileCPU.mm | 6 +- src/operators/conv_transpose_op.cpp | 2 +- src/operators/conv_transpose_op.h | 2 +- .../fusion_conv_add_add_prelu_op.cpp | 8 +- src/operators/fusion_conv_add_bn_op.cpp | 5 +- src/operators/fusion_conv_add_bn_relu_op.cpp | 6 +- src/operators/fusion_conv_add_op.cpp | 5 +- src/operators/fusion_conv_add_prelu_op.cpp | 8 +- src/operators/fusion_conv_add_relu_op.cpp | 7 +- src/operators/fusion_conv_bn_add_relu_op.cpp | 6 +- src/operators/fusion_conv_bn_op.cpp | 5 +- src/operators/fusion_conv_bn_relu_op.cpp | 5 +- src/operators/fusion_dwconv_bn_relu_op.cpp | 5 +- .../fusion_elementwise_add_relu_op.cpp | 8 +- src/operators/fusion_fc_op.cpp | 3 +- src/operators/fusion_fc_relu_op.cpp | 6 +- .../kernel/arm/conv_transpose_kernel.cpp | 2 +- .../conv_transpose_arm_func.h | 7 +- src/operators/kernel/conv_transpose_kernel.h | 2 +- src/operators/op_param.h | 2 +- test/CMakeLists.txt | 2 - tools/op.cmake | 4 +- 28 files changed, 288 insertions(+), 79 deletions(-) create mode 100644 src/framework/load_ops.h diff --git a/CMakeLists.txt b/CMakeLists.txt index 7c693afd04..8f22c349db 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -1,42 +1,38 @@ cmake_minimum_required(VERSION 3.0) -option(USE_OPENMP "openmp support" OFF) - project(paddle-mobile) +# select the platform to build +option(CPU "armv7 with neon support" ON) +option(MALI_GPU "mali gpu support" OFF) +option(FPGA "fpga support" OFF) +option(USE_OPENMP "openmp support" OFF) option(DEBUGING "enable debug mode" ON) option(USE_EXCEPTION "use std exception" OFF) option(LOG_PROFILE "log profile" OFF) -# select the platform to build -option(CPU "armv7 with neon" ON) -option(MALI_GPU "mali gpu" OFF) -option(FPGA "fpga" OFF) file(GLOB_RECURSE PADDLE_MOBILE_CC src/*.cc src/*.cpp src/*.c src/*.mm) file(GLOB_RECURSE PADDLE_MOBILE_H src/*.h) include_directories(src/) +set(CMAKE_BUILD_TYPE Release) +add_definitions(-O3 -s -DNDEBUG) if(IS_IOS) - set(CMAKE_CXX_FLAGS "-mfpu=neon -marm -fobjc-abi-version=2 -fobjc-arc -std=gnu++11 -stdlib=libc++ -O3 -s -isysroot ${CMAKE_OSX_SYSROOT} ${CMAKE_CXX_FLAGS}") + set(CMAKE_CXX_FLAGS "-mfpu=neon -marm -fobjc-abi-version=2 -fobjc-arc \ + -std=gnu++11 -stdlib=libc++ -isysroot ${CMAKE_OSX_SYSROOT} ${CMAKE_CXX_FLAGS}") else() set(CMAKE_CXX_FLAGS "-std=c++11 ${CMAKE_CXX_FLAGS}") endif() if(DEBUGING) - message(STATUS "debug") - set(CMAKE_BUILD_TYPE debug) - set(CMAKE_CXX_FLAGS "-O0 -g ${CMAKE_CXX_FLAGS}") + message(STATUS "debugging mode") add_definitions(-DPADDLE_MOBILE_DEBUG) else() - set(CMAKE_BUILD_TYPE Release) - set(CMAKE_CXX_FLAGS "-Os ${CMAKE_CXX_FLAGS}") - set(CMAKE_CXX_FLAGS_RELEASE "-DNDEBUG") add_definitions(-fvisibility=hidden -fvisibility-inlines-hidden) endif() if(USE_EXCEPTION) message(STATUS "use exception") - add_definitions(-DENABLE_EXCEPTION) - add_definitions(-fexceptions) + add_definitions(-DENABLE_EXCEPTION -fexceptions) else() add_definitions(-fno-exceptions) endif() @@ -45,7 +41,7 @@ if(LOG_PROFILE) add_definitions(-DPADDLE_MOBILE_PROFILE) endif() -if(USE_OPENMP AND NOT IS_IOS) +if(USE_OPENMP) set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -fopenmp") add_definitions(-DPADDLE_MOBILE_USE_OPENMP) endif() @@ -156,7 +152,7 @@ include("${CMAKE_CURRENT_LIST_DIR}/tools/op.cmake") # build library if(ANDROID_NDK_TOOLCHAIN_INCLUDED) list(REMOVE_DUPLICATES CMAKE_CXX_FLAGS) - add_library(paddle-mobile SHARED ${PADDLE_MOBILE_CC} ${PADDLE_MOBILE_H}) + add_library(paddle-mobile STATIC ${PADDLE_MOBILE_CC} ${PADDLE_MOBILE_H}) elseif(IS_IOS) if(USE_OPENMP) add_library(paddle-mobile-stage0 STATIC ${PADDLE_MOBILE_CC} ${PADDLE_MOBILE_H}) @@ -170,7 +166,7 @@ elseif(IS_IOS) add_library(paddle-mobile STATIC ${PADDLE_MOBILE_CC} ${PADDLE_MOBILE_H}) endif() else() - add_library(paddle-mobile SHARED ${PADDLE_MOBILE_CC} ${PADDLE_MOBILE_H}) + add_library(paddle-mobile STATIC ${PADDLE_MOBILE_CC} ${PADDLE_MOBILE_H}) endif() # unit test diff --git a/src/framework/load_ops.h b/src/framework/load_ops.h new file mode 100644 index 0000000000..8487e2e052 --- /dev/null +++ b/src/framework/load_ops.h @@ -0,0 +1,210 @@ +/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#pragma once + +#ifdef PADDLE_MOBILE_CPU +#define LOAD_CPU_OP(op_type) \ + extern int TouchOpRegistrar_##op_type##_##cpu(); \ + static int use_op_itself_##op_type##_##cpu __attribute__((unused)) = \ + TouchOpRegistrar_##op_type##_##cpu() +#else +#define LOAD_CPU_OP(op_type) +#endif + +#ifdef PADDLE_MOBILE_MALI_GPU +#define LOAD_MALI_GPU_OP(op_type) \ + extern int TouchOpRegistrar_##op_type##_##mali_gpu(); \ + static int use_op_itself_##op_type##_##mali_gpu __attribute__((unused)) = \ + TouchOpRegistrar_##op_type##_##mali_gpu() +#else +#define LOAD_MALI_GPU_OP(op_type) +#endif + +#ifdef PADDLE_MOBILE_FPGA +#define LOAD_FPGA_OP(op_type) \ + extern int TouchOpRegistrar_##op_type##_##fpga(); \ + static int use_op_itself_##op_type##_##fpga __attribute__((unused)) = \ + TouchOpRegistrar_##op_type##_##mali_gpu() +#else +#define LOAD_FPGA_OP(op_type) +#endif + +#define LOAD_FUSION_MATCHER(op_type) \ + extern int TouchFusionMatcherRegistrar_##op_type(); \ + static int use_fusion_matcher_itself_##op_type __attribute__((unused)) = \ + TouchFusionMatcherRegistrar_##op_type(); + +#define LOAD_OP(op_type) \ + LOAD_CPU_OP(op_type); \ + LOAD_MALI_GPU_OP(op_type); \ + LOAD_FPGA_OP(op_type); + +#define LOAD_OP1(op_type, device_type) LOAD_##device_type##_OP(op_type); + +#define LOAD_OP2(op_type, device_type1, device_type2) \ + LOAD_OP1(op_type, device_type1) \ + LOAD_OP1(op_type, device_type2) + +#define LOAD_OP3(op_type, device_type1, device_type2, device_type3) \ + LOAD_OP2(op_type, device_type1, device_type2) \ + LOAD_OP1(op_type, device_type3) + +// load requared ops +LOAD_OP(feed) +LOAD_OP(fetch) +#ifdef BATCHNORM_OP +LOAD_OP2(batch_norm, CPU, MALI_GPU) // NOLINT +#endif +#ifdef BILINEAR_INTERP_OP +LOAD_OP1(bilinear_interp, CPU) // NOLINT +#endif +#ifdef BOXCODER_OP +LOAD_OP1(box_coder, CPU) // NOLINT +#endif +#ifdef CONCAT_OP +LOAD_OP3(concat, CPU, MALI_GPU, FPGA) // NOLINT +#endif +#ifdef CONV_OP +LOAD_OP3(conv2d, CPU, MALI_GPU, FPGA) // NOLINT +#endif +#ifdef LRN_OP +LOAD_OP2(lrn, CPU, MALI_GPU) // NOLINT +#endif +#ifdef SIGMOID_OP +LOAD_OP1(sigmoid, CPU) // NOLINT +#endif +#ifdef FUSION_FC_RELU_OP +LOAD_OP3(fusion_fc_relu, CPU, MALI_GPU, FPGA) // NOLINT +LOAD_FUSION_MATCHER(fusion_fc_relu) // NOLINT +#endif +#ifdef FUSION_ELEMENTWISEADDRELU_OP +LOAD_OP3(fusion_elementwise_add_relu, CPU, MALI_GPU, FPGA) // NOLINT +LOAD_FUSION_MATCHER(fusion_elementwise_add_relu) // NOLINT +#endif +#ifdef SPLIT_OP +LOAD_OP1(split, CPU) // NOLINT +#endif +#ifdef RESIZE_OP +LOAD_OP2(resize, CPU, MALI_GPU) // NOLINT +#endif +#ifdef FUSION_CONVADDBNRELU_OP +LOAD_OP2(fusion_conv_add_bn_relu, CPU, FPGA) // NOLINT +LOAD_FUSION_MATCHER(fusion_conv_add_bn_relu) // NOLINT +#endif +#ifdef RESHAPE_OP +LOAD_OP2(reshape, CPU, MALI_GPU) // NOLINT +#endif +#ifdef TRANSPOSE_OP +LOAD_OP1(transpose, CPU) // NOLINT +#endif +#ifdef PRIORBOX_OP +LOAD_OP1(prior_box, CPU) // NOLINT +#endif +#ifdef FUSION_CONVADDRELU_OP +LOAD_OP2(fusion_conv_add_relu, CPU, FPGA) // NOLINT +LOAD_FUSION_MATCHER(fusion_conv_add_relu) // NOLINT +#endif +#ifdef FUSION_CONVADDADDPRELU_OP +LOAD_OP2(fusion_conv_add_add_prelu, CPU, FPGA) // NOLINT +LOAD_FUSION_MATCHER(fusion_conv_add_add_prelu) // NOLINT +#endif +#ifdef FUSION_CONVADD_OP +LOAD_OP2(fusion_conv_add, CPU, MALI_GPU) // NOLINT +LOAD_FUSION_MATCHER(fusion_conv_add) // NOLINT +#endif +#ifdef SOFTMAX_OP +LOAD_OP2(softmax, CPU, MALI_GPU) // NOLINT +#endif +#ifdef SHAPE_OP +LOAD_OP1(shape, CPU) // NOLINT +#endif +#ifdef DEPTHWISECONV_OP +LOAD_OP1(depthwise_conv2d, CPU) // NOLINT +#endif +#ifdef CONV_TRANSPOSE_OP +LOAD_OP1(conv2d_transpose, CPU) // NOLINT +#endif +#ifdef SCALE_OP +LOAD_OP2(scale, CPU, MALI_GPU) // NOLINT +#endif +#ifdef ELEMENTWISEADD_OP +LOAD_OP2(elementwise_add, CPU, MALI_GPU) // NOLINT +#endif +#ifdef PRELU_OP +LOAD_OP2(prelu, CPU, MALI_GPU) // NOLINT +#endif +#ifdef FLATTEN_OP +LOAD_OP1(flatten, CPU) // NOLINT +#endif +#ifdef FUSION_CONVBNADDRELU_OP +LOAD_OP2(fusion_conv_bn_add_relu, CPU, FPGA) // NOLINT +LOAD_FUSION_MATCHER(fusion_conv_bn_add_relu) // NOLINT +#endif +#ifdef FUSION_CONVBNRELU_OP +LOAD_OP2(fusion_conv_bn_relu, CPU, FPGA) // NOLINT +LOAD_FUSION_MATCHER(fusion_conv_bn_relu) // NOLINT +#endif +#ifdef GRU_OP +LOAD_OP1(gru, CPU) // NOLINT +#endif +#ifdef FUSION_CONVADDBN_OP +LOAD_OP2(fusion_conv_add_bn, CPU, FPGA) // NOLINT +LOAD_FUSION_MATCHER(fusion_conv_add_bn) // NOLINT +#endif +#ifdef DROPOUT_OP +LOAD_OP2(dropout, CPU, FPGA) // NOLINT +#endif +#ifdef FUSION_CONVADDPRELU_OP +LOAD_OP2(fusion_conv_add_prelu, CPU, FPGA) // NOLINT +LOAD_FUSION_MATCHER(fusion_conv_add_prelu) // NOLINT +#endif +#ifdef FUSION_DWCONVBNRELU_OP +LOAD_OP1(fusion_dwconv_bn_relu, CPU) // NOLINT +LOAD_FUSION_MATCHER(fusion_dwconv_bn_relu) // NOLINT +#endif +#ifdef CRF_OP +LOAD_OP1(crf_decoding, CPU) // NOLINT +#endif +#ifdef MUL_OP +LOAD_OP2(mul, CPU, MALI_GPU) // NOLINT +#endif +#ifdef RELU_OP +LOAD_OP2(relu, CPU, MALI_GPU) // NOLINT +#endif +#ifdef IM2SEQUENCE_OP +LOAD_OP1(im2sequence, CPU) // NOLINT +#endif +#ifdef LOOKUP_OP +LOAD_OP1(lookup_table, CPU) // NOLINT +#endif +#ifdef FUSION_FC_OP +LOAD_OP3(fusion_fc, CPU, MALI_GPU, FPGA) // NOLINT +LOAD_FUSION_MATCHER(fusion_fc) // NOLINT +#endif +#ifdef POOL_OP +LOAD_OP3(pool2d, CPU, MALI_GPU, FPGA) // NOLINT +#endif +#ifdef MULTICLASSNMS_OP +LOAD_OP1(multiclass_nms, CPU) // NOLINT +#endif +#ifdef SLICE_OP +LOAD_OP2(slice, CPU, MALI_GPU) // NOLINT +#endif +#ifdef FUSION_CONVBN_OP +LOAD_OP2(fusion_conv_bn, CPU, FPGA) // NOLINT +LOAD_FUSION_MATCHER(fusion_conv_bn) // NOLINT +#endif +LOAD_OP1(quantize, CPU) // NOLINT +LOAD_OP1(dequantize, CPU) // NOLINT diff --git a/src/framework/op_registry.h b/src/framework/op_registry.h index 25fc6652a4..32954531d0 100644 --- a/src/framework/op_registry.h +++ b/src/framework/op_registry.h @@ -105,7 +105,11 @@ class OpRegistry { }; \ static paddle_mobile::framework::OperatorRegistrar< \ device_type, _OpClass_##op_type##_##device_name> \ - __op_registrar_##op_type##_##device_name(#op_type); + __op_registrar_##op_type##_##device_name(#op_type); \ + int TouchOpRegistrar_##op_type##_##device_name() { \ + __op_registrar_##op_type##_##device_name.Touch(); \ + return 0; \ + } #define REGISTER_OPERATOR_CPU(op_type, op_class) \ REGISTER_OPERATOR(op_type, op_class, cpu, paddle_mobile::CPU); diff --git a/src/framework/program/program-optimize/fusion_op_register.h b/src/framework/program/program-optimize/fusion_op_register.h index a5890d34c6..1bf04bd6ec 100644 --- a/src/framework/program/program-optimize/fusion_op_register.h +++ b/src/framework/program/program-optimize/fusion_op_register.h @@ -67,7 +67,16 @@ class FusionOpRegistrar { explicit FusionOpRegistrar(FusionOpMatcher* matcher) { FusionOpRegister::Instance()->regist(matcher); } + void Touch() {} }; } // namespace framework } // namespace paddle_mobile + +#define REGISTER_FUSION_MATCHER(op_type, matcher) \ + static paddle_mobile::framework::FusionOpRegistrar \ + __fusion_matcher_registrar_##op_type(new matcher()); \ + int TouchFusionMatcherRegistrar_##op_type() { \ + __fusion_matcher_registrar_##op_type.Touch(); \ + return 0; \ + } diff --git a/src/framework/tensor.h b/src/framework/tensor.h index 86dad5cdd2..909819c145 100644 --- a/src/framework/tensor.h +++ b/src/framework/tensor.h @@ -336,11 +336,12 @@ inline Print &operator<<(Print &printer, const Tensor &tensor) { stride = stride > 0 ? stride : 1; #ifndef PADDLE_MOBILE_FPGA for (int i = 0; i < tensor.numel(); i += stride) { - // 这不一定是float的 if (tensor.type() == typeid(float)) { printer << tensor.data()[i] << " "; } else if (tensor.type() == typeid(int64_t)) { printer << tensor.data()[i] << " "; + } else if (tensor.type() == typeid(int8_t)) { + printer << tensor.data()[i] << " "; } } #endif diff --git a/src/io/paddle_mobile.h b/src/io/paddle_mobile.h index 7ed23adbf9..bfd6da909e 100644 --- a/src/io/paddle_mobile.h +++ b/src/io/paddle_mobile.h @@ -22,6 +22,7 @@ limitations under the License. */ #endif // _OPENMP #include "common/types.h" +#include "framework/load_ops.h" #include "framework/tensor.h" #include "io/executor.h" #include "io/loader.h" diff --git a/src/ios_io/PaddleMobileCPU.mm b/src/ios_io/PaddleMobileCPU.mm index 5a21418ef5..937bdb7946 100644 --- a/src/ios_io/PaddleMobileCPU.mm +++ b/src/ios_io/PaddleMobileCPU.mm @@ -13,15 +13,13 @@ limitations under the License. */ #import "PaddleMobileCPU.h" - #import "op_symbols.h" -#include "framework/tensor.h" +#import "framework/load_ops.h" +#import "framework/tensor.h" #import "io/paddle_mobile.h" - #import #import - @interface PaddleMobileCPUResult() -(void)toSetOutput:(float *)output; diff --git a/src/operators/conv_transpose_op.cpp b/src/operators/conv_transpose_op.cpp index 34de4cbb10..4d9eefaa85 100644 --- a/src/operators/conv_transpose_op.cpp +++ b/src/operators/conv_transpose_op.cpp @@ -12,7 +12,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#ifdef CONV_TRANSPOSE +#ifdef CONV_TRANSPOSE_OP #include "operators/conv_transpose_op.h" diff --git a/src/operators/conv_transpose_op.h b/src/operators/conv_transpose_op.h index e28cee2d74..4e6464b3a4 100644 --- a/src/operators/conv_transpose_op.h +++ b/src/operators/conv_transpose_op.h @@ -12,7 +12,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#ifdef CONV_TRANSPOSE +#ifdef CONV_TRANSPOSE_OP #pragma once diff --git a/src/operators/fusion_conv_add_add_prelu_op.cpp b/src/operators/fusion_conv_add_add_prelu_op.cpp index dd2514be45..2f3d29dc74 100644 --- a/src/operators/fusion_conv_add_add_prelu_op.cpp +++ b/src/operators/fusion_conv_add_add_prelu_op.cpp @@ -14,7 +14,7 @@ limitations under the License. */ #ifdef FUSION_CONVADDADDPRELU_OP -#include "fusion_conv_add_add_prelu_op.h" +#include "operators/fusion_conv_add_add_prelu_op.h" #include "operators/math/conv_func.h" namespace paddle_mobile { @@ -44,13 +44,13 @@ void FusionConvAddAddPReluOp::InferShape() const { this->param_.Output()->Resize(ddim); } -static framework::FusionOpRegistrar fusion_conv_add_add_prelu_registrar( - new FusionConvAddAddPReluOpMatcher()); - } // namespace operators } // namespace paddle_mobile namespace ops = paddle_mobile::operators; +REGISTER_FUSION_MATCHER(fusion_conv_add_add_prelu, + ops::FusionConvAddAddPReluOpMatcher); + #ifdef PADDLE_MOBILE_CPU REGISTER_OPERATOR_CPU(fusion_conv_add_add_prelu, ops::FusionConvAddAddPReluOp); #endif diff --git a/src/operators/fusion_conv_add_bn_op.cpp b/src/operators/fusion_conv_add_bn_op.cpp index c8acc19ab8..e8daba7e9b 100644 --- a/src/operators/fusion_conv_add_bn_op.cpp +++ b/src/operators/fusion_conv_add_bn_op.cpp @@ -45,13 +45,12 @@ void FusionConvAddBNOp::InferShape() const { this->param_.Output()->Resize(ddim); } -static framework::FusionOpRegistrar fusion_conv_add_bn_registrar( - new FusionConvAddBNMatcher()); - } // namespace operators } // namespace paddle_mobile namespace ops = paddle_mobile::operators; +REGISTER_FUSION_MATCHER(fusion_conv_add_bn, ops::FusionConvAddBNMatcher); + #ifdef PADDLE_MOBILE_CPU REGISTER_OPERATOR_CPU(fusion_conv_add_bn, ops::FusionConvAddBNOp); #endif diff --git a/src/operators/fusion_conv_add_bn_relu_op.cpp b/src/operators/fusion_conv_add_bn_relu_op.cpp index 5c00ce95ef..e7d6ee59f2 100644 --- a/src/operators/fusion_conv_add_bn_relu_op.cpp +++ b/src/operators/fusion_conv_add_bn_relu_op.cpp @@ -45,13 +45,13 @@ void FusionConvAddBNReluOp::InferShape() const { this->param_.Output()->Resize(ddim); } -static framework::FusionOpRegistrar fusion_conv_add_bn_relu_registrar( - new FusionConvAddBNReluMatcher()); - } // namespace operators } // namespace paddle_mobile namespace ops = paddle_mobile::operators; +REGISTER_FUSION_MATCHER(fusion_conv_add_bn_relu, + ops::FusionConvAddBNReluMatcher); + #ifdef PADDLE_MOBILE_CPU REGISTER_OPERATOR_CPU(fusion_conv_add_bn_relu, ops::FusionConvAddBNReluOp); #endif diff --git a/src/operators/fusion_conv_add_op.cpp b/src/operators/fusion_conv_add_op.cpp index 8c191859eb..485ba1be9b 100644 --- a/src/operators/fusion_conv_add_op.cpp +++ b/src/operators/fusion_conv_add_op.cpp @@ -45,13 +45,12 @@ void FusionConvAddOp::InferShape() const { this->param_.Output()->Resize(ddim); } -static framework::FusionOpRegistrar convadd_registrar( - new FusionConvAddMatcher()); - } // namespace operators } // namespace paddle_mobile namespace ops = paddle_mobile::operators; +REGISTER_FUSION_MATCHER(fusion_conv_add, ops::FusionConvAddMatcher); + #ifdef PADDLE_MOBILE_CPU REGISTER_OPERATOR_CPU(fusion_conv_add, ops::FusionConvAddOp); #endif diff --git a/src/operators/fusion_conv_add_prelu_op.cpp b/src/operators/fusion_conv_add_prelu_op.cpp index 9784353dbc..9273af388c 100644 --- a/src/operators/fusion_conv_add_prelu_op.cpp +++ b/src/operators/fusion_conv_add_prelu_op.cpp @@ -14,7 +14,7 @@ limitations under the License. */ #ifdef FUSION_CONVADDPRELU_OP -#include "fusion_conv_add_prelu_op.h" +#include "operators/fusion_conv_add_prelu_op.h" #include "operators/math/conv_func.h" namespace paddle_mobile { @@ -44,13 +44,13 @@ void FusionConvAddPReluOp::InferShape() const { this->param_.Output()->Resize(ddim); } -static framework::FusionOpRegistrar fusion_conv_add_prelu_registrar( - new FusionConvAddPReluOpMatcher()); - } // namespace operators } // namespace paddle_mobile namespace ops = paddle_mobile::operators; +REGISTER_FUSION_MATCHER(fusion_conv_add_prelu, + ops::FusionConvAddPReluOpMatcher); + #ifdef PADDLE_MOBILE_CPU REGISTER_OPERATOR_CPU(fusion_conv_add_prelu, ops::FusionConvAddPReluOp); #endif diff --git a/src/operators/fusion_conv_add_relu_op.cpp b/src/operators/fusion_conv_add_relu_op.cpp index 3ea417e3f7..486221f0f6 100644 --- a/src/operators/fusion_conv_add_relu_op.cpp +++ b/src/operators/fusion_conv_add_relu_op.cpp @@ -14,7 +14,7 @@ limitations under the License. */ #ifdef FUSION_CONVADDRELU_OP -#include "fusion_conv_add_relu_op.h" +#include "operators/fusion_conv_add_relu_op.h" #include "operators/math/conv_func.h" namespace paddle_mobile { @@ -44,13 +44,12 @@ void FusionConvAddReluOp::InferShape() const { this->param_.Output()->Resize(ddim); } -static framework::FusionOpRegistrar fusion_conv_add_relu_registrar( - new FusionConvAddReluOpMatcher()); - } // namespace operators } // namespace paddle_mobile namespace ops = paddle_mobile::operators; +REGISTER_FUSION_MATCHER(fusion_conv_add_relu, ops::FusionConvAddReluOpMatcher); + #ifdef PADDLE_MOBILE_CPU REGISTER_OPERATOR_CPU(fusion_conv_add_relu, ops::FusionConvAddReluOp); #endif diff --git a/src/operators/fusion_conv_bn_add_relu_op.cpp b/src/operators/fusion_conv_bn_add_relu_op.cpp index 1b78cd1563..1c03e29ea0 100644 --- a/src/operators/fusion_conv_bn_add_relu_op.cpp +++ b/src/operators/fusion_conv_bn_add_relu_op.cpp @@ -45,13 +45,13 @@ void FusionConvBNAddReluOp::InferShape() const { this->param_.Output()->Resize(ddim); } -static framework::FusionOpRegistrar fusion_conv_bn_add_relu_registrar( - new FusionConvBNAddReluMatcher()); - } // namespace operators } // namespace paddle_mobile namespace ops = paddle_mobile::operators; +REGISTER_FUSION_MATCHER(fusion_conv_bn_add_relu, + ops::FusionConvBNAddReluMatcher); + #ifdef PADDLE_MOBILE_CPU REGISTER_OPERATOR_CPU(fusion_conv_bn_add_relu, ops::FusionConvBNAddReluOp); #endif diff --git a/src/operators/fusion_conv_bn_op.cpp b/src/operators/fusion_conv_bn_op.cpp index 85bbccb2f2..7786cd713b 100644 --- a/src/operators/fusion_conv_bn_op.cpp +++ b/src/operators/fusion_conv_bn_op.cpp @@ -44,13 +44,12 @@ void FusionConvBNOp::InferShape() const { this->param_.Output()->Resize(ddim); } -static framework::FusionOpRegistrar fusion_conv_bn_registrar( - new FusionConvBNMatcher()); - } // namespace operators } // namespace paddle_mobile namespace ops = paddle_mobile::operators; +REGISTER_FUSION_MATCHER(fusion_conv_bn, ops::FusionConvBNMatcher); + #ifdef PADDLE_MOBILE_CPU REGISTER_OPERATOR_CPU(fusion_conv_bn, ops::FusionConvBNOp); #endif diff --git a/src/operators/fusion_conv_bn_relu_op.cpp b/src/operators/fusion_conv_bn_relu_op.cpp index 63e194a4c9..2ec72dafc0 100644 --- a/src/operators/fusion_conv_bn_relu_op.cpp +++ b/src/operators/fusion_conv_bn_relu_op.cpp @@ -45,13 +45,12 @@ void FusionConvBNReluOp::InferShape() const { this->param_.Output()->Resize(ddim); } -static framework::FusionOpRegistrar fusion_conv_bn_relu_registrar( - new FusionConvBNReluMatcher()); - } // namespace operators } // namespace paddle_mobile namespace ops = paddle_mobile::operators; +REGISTER_FUSION_MATCHER(fusion_conv_bn_relu, ops::FusionConvBNReluMatcher); + #ifdef PADDLE_MOBILE_CPU REGISTER_OPERATOR_CPU(fusion_conv_bn_relu, ops::FusionConvBNReluOp); #endif diff --git a/src/operators/fusion_dwconv_bn_relu_op.cpp b/src/operators/fusion_dwconv_bn_relu_op.cpp index ff8d829e26..060d8b8956 100644 --- a/src/operators/fusion_dwconv_bn_relu_op.cpp +++ b/src/operators/fusion_dwconv_bn_relu_op.cpp @@ -45,13 +45,12 @@ void FusionDWConvBNReluOp::InferShape() const { this->param_.Output()->Resize(ddim); } -static framework::FusionOpRegistrar fusion_dwconv_bn_relu_registrar( - new FusionDWConvBNReluMatcher()); - } // namespace operators } // namespace paddle_mobile namespace ops = paddle_mobile::operators; +REGISTER_FUSION_MATCHER(fusion_dwconv_bn_relu, ops::FusionDWConvBNReluMatcher); + #ifdef PADDLE_MOBILE_CPU REGISTER_OPERATOR_CPU(fusion_dwconv_bn_relu, ops::FusionDWConvBNReluOp); #endif diff --git a/src/operators/fusion_elementwise_add_relu_op.cpp b/src/operators/fusion_elementwise_add_relu_op.cpp index 82c2957ec8..0297fb01f5 100644 --- a/src/operators/fusion_elementwise_add_relu_op.cpp +++ b/src/operators/fusion_elementwise_add_relu_op.cpp @@ -14,7 +14,7 @@ limitations under the License. */ #ifdef FUSION_ELEMENTWISEADDRELU_OP -#include "fusion_elementwise_add_relu_op.h" +#include "operators/fusion_elementwise_add_relu_op.h" namespace paddle_mobile { namespace operators { @@ -25,13 +25,13 @@ void FusionElementwiseAddReluOp::InferShape() const { this->param_.Out()->Resize(x_dim); } -static framework::FusionOpRegistrar fusion_elementwise_relu_registrar( - new FusioneElementwiseAddReluMatcher()); - } // namespace operators } // namespace paddle_mobile namespace ops = paddle_mobile::operators; +REGISTER_FUSION_MATCHER(fusion_elementwise_add_relu, + ops::FusioneElementwiseAddReluMatcher); + #ifdef PADDLE_MOBILE_CPU // REGISTER_OPERATOR_CPU(fusion_elementwise_add_relu, // ops::FusionElementwiseAddReluOp); diff --git a/src/operators/fusion_fc_op.cpp b/src/operators/fusion_fc_op.cpp index 5d6a60ca1b..928a4d8541 100644 --- a/src/operators/fusion_fc_op.cpp +++ b/src/operators/fusion_fc_op.cpp @@ -19,8 +19,6 @@ limitations under the License. */ namespace paddle_mobile { namespace operators { -static framework::FusionOpRegistrar fc_registrar(new FusionFcMatcher()); - template void FusionFcOp::InferShape() const { auto x_dims = this->param_.InputX()->dims(); @@ -57,6 +55,7 @@ void FusionFcOp::InferShape() const { } // namespace paddle_mobile namespace ops = paddle_mobile::operators; +REGISTER_FUSION_MATCHER(fusion_fc, ops::FusionFcMatcher); #ifdef PADDLE_MOBILE_CPU REGISTER_OPERATOR_CPU(fusion_fc, ops::FusionFcOp); diff --git a/src/operators/fusion_fc_relu_op.cpp b/src/operators/fusion_fc_relu_op.cpp index 870af45656..b19e94cf9a 100644 --- a/src/operators/fusion_fc_relu_op.cpp +++ b/src/operators/fusion_fc_relu_op.cpp @@ -50,13 +50,13 @@ void FusionFcReluOp::InferShape() const { this->param_.Out()->Resize(ddim); } -static framework::FusionOpRegistrar fc_relu_registrar( - new FusionFcReluMatcher()); - } // namespace operators } // namespace paddle_mobile namespace ops = paddle_mobile::operators; + +REGISTER_FUSION_MATCHER(fusion_fc_relu, ops::FusionFcReluMatcher); + #ifdef PADDLE_MOBILE_CPU REGISTER_OPERATOR_CPU(fusion_fc_relu, ops::FusionFcReluOp); #endif diff --git a/src/operators/kernel/arm/conv_transpose_kernel.cpp b/src/operators/kernel/arm/conv_transpose_kernel.cpp index d695e6144b..94f8a79101 100644 --- a/src/operators/kernel/arm/conv_transpose_kernel.cpp +++ b/src/operators/kernel/arm/conv_transpose_kernel.cpp @@ -12,7 +12,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#ifdef CONV_TRANSPOSE +#ifdef CONV_TRANSPOSE_OP #include "operators/kernel/conv_transpose_kernel.h" #include "operators/kernel/central-arm-func/conv_transpose_arm_func.h" diff --git a/src/operators/kernel/central-arm-func/conv_transpose_arm_func.h b/src/operators/kernel/central-arm-func/conv_transpose_arm_func.h index 343e5f1476..1bb3aac3e9 100644 --- a/src/operators/kernel/central-arm-func/conv_transpose_arm_func.h +++ b/src/operators/kernel/central-arm-func/conv_transpose_arm_func.h @@ -12,18 +12,17 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#ifdef CONV_TRANSPOSE +#pragma once -#include +#ifdef CONV_TRANSPOSE_OP +#include #include "framework/ddim.h" #include "operators/math/im2col.h" #include "operators/math/math_function.h" #include "operators/math/vol2col.h" #include "operators/op_param.h" -#pragma once - namespace paddle_mobile { namespace operators { diff --git a/src/operators/kernel/conv_transpose_kernel.h b/src/operators/kernel/conv_transpose_kernel.h index 9cbd7c8c3b..761370095c 100644 --- a/src/operators/kernel/conv_transpose_kernel.h +++ b/src/operators/kernel/conv_transpose_kernel.h @@ -12,7 +12,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#ifdef CONV_TRANSPOSE +#ifdef CONV_TRANSPOSE_OP #pragma once diff --git a/src/operators/op_param.h b/src/operators/op_param.h index 2207c001e9..f1c9f09b65 100644 --- a/src/operators/op_param.h +++ b/src/operators/op_param.h @@ -1911,7 +1911,7 @@ class DropoutParam : public OpParam { }; #endif -#ifdef CONV_TRANSPOSE +#ifdef CONV_TRANSPOSE_OP template class ConvTransposeParam : public OpParam { typedef typename DtypeTensorTrait::gtype GType; diff --git a/test/CMakeLists.txt b/test/CMakeLists.txt index e82cd9e248..ce958822c7 100644 --- a/test/CMakeLists.txt +++ b/test/CMakeLists.txt @@ -324,6 +324,4 @@ if (NOT FOUND_MATCH) #add_library(test-lib-size SHARED common/test_lib_size.h common/test_lib_size.cpp) - - endif () diff --git a/tools/op.cmake b/tools/op.cmake index 6158a31814..9a6ec0a147 100644 --- a/tools/op.cmake +++ b/tools/op.cmake @@ -356,7 +356,7 @@ if (FUSION_CONVBN_OP) endif() if (CONV_TRANSPOSE_OP) - add_definitions(-DCONV_TRANSPOSE) + add_definitions(-DCONV_TRANSPOSE_OP) endif() if (LOOKUP_OP) @@ -386,4 +386,4 @@ endif() if (SHAPE_OP) add_definitions(-DSHAPE_OP) -endif() \ No newline at end of file +endif() -- GitLab