提交 69a0ecad 编写于 作者: H hjchen2

Refine: solve static link problem

上级 946afe3f
cmake_minimum_required(VERSION 3.0)
option(USE_OPENMP "openmp support" OFF)
project(paddle-mobile)
# select the platform to build
option(CPU "armv7 with neon support" ON)
option(MALI_GPU "mali gpu support" OFF)
option(FPGA "fpga support" OFF)
option(USE_OPENMP "openmp support" OFF)
option(DEBUGING "enable debug mode" ON)
option(USE_EXCEPTION "use std exception" OFF)
option(LOG_PROFILE "log profile" OFF)
# select the platform to build
option(CPU "armv7 with neon" ON)
option(MALI_GPU "mali gpu" OFF)
option(FPGA "fpga" OFF)
file(GLOB_RECURSE PADDLE_MOBILE_CC src/*.cc src/*.cpp src/*.c src/*.mm)
file(GLOB_RECURSE PADDLE_MOBILE_H src/*.h)
include_directories(src/)
set(CMAKE_BUILD_TYPE Release)
add_definitions(-O3 -s -DNDEBUG)
if(IS_IOS)
set(CMAKE_CXX_FLAGS "-mfpu=neon -marm -fobjc-abi-version=2 -fobjc-arc -std=gnu++11 -stdlib=libc++ -O3 -s -isysroot ${CMAKE_OSX_SYSROOT} ${CMAKE_CXX_FLAGS}")
set(CMAKE_CXX_FLAGS "-mfpu=neon -marm -fobjc-abi-version=2 -fobjc-arc \
-std=gnu++11 -stdlib=libc++ -isysroot ${CMAKE_OSX_SYSROOT} ${CMAKE_CXX_FLAGS}")
else()
set(CMAKE_CXX_FLAGS "-std=c++11 ${CMAKE_CXX_FLAGS}")
endif()
if(DEBUGING)
message(STATUS "debug")
set(CMAKE_BUILD_TYPE debug)
set(CMAKE_CXX_FLAGS "-O0 -g ${CMAKE_CXX_FLAGS}")
message(STATUS "debugging mode")
add_definitions(-DPADDLE_MOBILE_DEBUG)
else()
set(CMAKE_BUILD_TYPE Release)
set(CMAKE_CXX_FLAGS "-Os ${CMAKE_CXX_FLAGS}")
set(CMAKE_CXX_FLAGS_RELEASE "-DNDEBUG")
add_definitions(-fvisibility=hidden -fvisibility-inlines-hidden)
endif()
if(USE_EXCEPTION)
message(STATUS "use exception")
add_definitions(-DENABLE_EXCEPTION)
add_definitions(-fexceptions)
add_definitions(-DENABLE_EXCEPTION -fexceptions)
else()
add_definitions(-fno-exceptions)
endif()
......@@ -45,7 +41,7 @@ if(LOG_PROFILE)
add_definitions(-DPADDLE_MOBILE_PROFILE)
endif()
if(USE_OPENMP AND NOT IS_IOS)
if(USE_OPENMP)
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -fopenmp")
add_definitions(-DPADDLE_MOBILE_USE_OPENMP)
endif()
......@@ -156,7 +152,7 @@ include("${CMAKE_CURRENT_LIST_DIR}/tools/op.cmake")
# build library
if(ANDROID_NDK_TOOLCHAIN_INCLUDED)
list(REMOVE_DUPLICATES CMAKE_CXX_FLAGS)
add_library(paddle-mobile SHARED ${PADDLE_MOBILE_CC} ${PADDLE_MOBILE_H})
add_library(paddle-mobile STATIC ${PADDLE_MOBILE_CC} ${PADDLE_MOBILE_H})
elseif(IS_IOS)
if(USE_OPENMP)
add_library(paddle-mobile-stage0 STATIC ${PADDLE_MOBILE_CC} ${PADDLE_MOBILE_H})
......@@ -170,7 +166,7 @@ elseif(IS_IOS)
add_library(paddle-mobile STATIC ${PADDLE_MOBILE_CC} ${PADDLE_MOBILE_H})
endif()
else()
add_library(paddle-mobile SHARED ${PADDLE_MOBILE_CC} ${PADDLE_MOBILE_H})
add_library(paddle-mobile STATIC ${PADDLE_MOBILE_CC} ${PADDLE_MOBILE_H})
endif()
# unit test
......
/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#pragma once
#ifdef PADDLE_MOBILE_CPU
#define LOAD_CPU_OP(op_type) \
extern int TouchOpRegistrar_##op_type##_##cpu(); \
static int use_op_itself_##op_type##_##cpu __attribute__((unused)) = \
TouchOpRegistrar_##op_type##_##cpu()
#else
#define LOAD_CPU_OP(op_type)
#endif
#ifdef PADDLE_MOBILE_MALI_GPU
#define LOAD_MALI_GPU_OP(op_type) \
extern int TouchOpRegistrar_##op_type##_##mali_gpu(); \
static int use_op_itself_##op_type##_##mali_gpu __attribute__((unused)) = \
TouchOpRegistrar_##op_type##_##mali_gpu()
#else
#define LOAD_MALI_GPU_OP(op_type)
#endif
#ifdef PADDLE_MOBILE_FPGA
#define LOAD_FPGA_OP(op_type) \
extern int TouchOpRegistrar_##op_type##_##fpga(); \
static int use_op_itself_##op_type##_##fpga __attribute__((unused)) = \
TouchOpRegistrar_##op_type##_##mali_gpu()
#else
#define LOAD_FPGA_OP(op_type)
#endif
#define LOAD_FUSION_MATCHER(op_type) \
extern int TouchFusionMatcherRegistrar_##op_type(); \
static int use_fusion_matcher_itself_##op_type __attribute__((unused)) = \
TouchFusionMatcherRegistrar_##op_type();
#define LOAD_OP(op_type) \
LOAD_CPU_OP(op_type); \
LOAD_MALI_GPU_OP(op_type); \
LOAD_FPGA_OP(op_type);
#define LOAD_OP1(op_type, device_type) LOAD_##device_type##_OP(op_type);
#define LOAD_OP2(op_type, device_type1, device_type2) \
LOAD_OP1(op_type, device_type1) \
LOAD_OP1(op_type, device_type2)
#define LOAD_OP3(op_type, device_type1, device_type2, device_type3) \
LOAD_OP2(op_type, device_type1, device_type2) \
LOAD_OP1(op_type, device_type3)
// load requared ops
LOAD_OP(feed)
LOAD_OP(fetch)
#ifdef BATCHNORM_OP
LOAD_OP2(batch_norm, CPU, MALI_GPU) // NOLINT
#endif
#ifdef BILINEAR_INTERP_OP
LOAD_OP1(bilinear_interp, CPU) // NOLINT
#endif
#ifdef BOXCODER_OP
LOAD_OP1(box_coder, CPU) // NOLINT
#endif
#ifdef CONCAT_OP
LOAD_OP3(concat, CPU, MALI_GPU, FPGA) // NOLINT
#endif
#ifdef CONV_OP
LOAD_OP3(conv2d, CPU, MALI_GPU, FPGA) // NOLINT
#endif
#ifdef LRN_OP
LOAD_OP2(lrn, CPU, MALI_GPU) // NOLINT
#endif
#ifdef SIGMOID_OP
LOAD_OP1(sigmoid, CPU) // NOLINT
#endif
#ifdef FUSION_FC_RELU_OP
LOAD_OP3(fusion_fc_relu, CPU, MALI_GPU, FPGA) // NOLINT
LOAD_FUSION_MATCHER(fusion_fc_relu) // NOLINT
#endif
#ifdef FUSION_ELEMENTWISEADDRELU_OP
LOAD_OP3(fusion_elementwise_add_relu, CPU, MALI_GPU, FPGA) // NOLINT
LOAD_FUSION_MATCHER(fusion_elementwise_add_relu) // NOLINT
#endif
#ifdef SPLIT_OP
LOAD_OP1(split, CPU) // NOLINT
#endif
#ifdef RESIZE_OP
LOAD_OP2(resize, CPU, MALI_GPU) // NOLINT
#endif
#ifdef FUSION_CONVADDBNRELU_OP
LOAD_OP2(fusion_conv_add_bn_relu, CPU, FPGA) // NOLINT
LOAD_FUSION_MATCHER(fusion_conv_add_bn_relu) // NOLINT
#endif
#ifdef RESHAPE_OP
LOAD_OP2(reshape, CPU, MALI_GPU) // NOLINT
#endif
#ifdef TRANSPOSE_OP
LOAD_OP1(transpose, CPU) // NOLINT
#endif
#ifdef PRIORBOX_OP
LOAD_OP1(prior_box, CPU) // NOLINT
#endif
#ifdef FUSION_CONVADDRELU_OP
LOAD_OP2(fusion_conv_add_relu, CPU, FPGA) // NOLINT
LOAD_FUSION_MATCHER(fusion_conv_add_relu) // NOLINT
#endif
#ifdef FUSION_CONVADDADDPRELU_OP
LOAD_OP2(fusion_conv_add_add_prelu, CPU, FPGA) // NOLINT
LOAD_FUSION_MATCHER(fusion_conv_add_add_prelu) // NOLINT
#endif
#ifdef FUSION_CONVADD_OP
LOAD_OP2(fusion_conv_add, CPU, MALI_GPU) // NOLINT
LOAD_FUSION_MATCHER(fusion_conv_add) // NOLINT
#endif
#ifdef SOFTMAX_OP
LOAD_OP2(softmax, CPU, MALI_GPU) // NOLINT
#endif
#ifdef SHAPE_OP
LOAD_OP1(shape, CPU) // NOLINT
#endif
#ifdef DEPTHWISECONV_OP
LOAD_OP1(depthwise_conv2d, CPU) // NOLINT
#endif
#ifdef CONV_TRANSPOSE_OP
LOAD_OP1(conv2d_transpose, CPU) // NOLINT
#endif
#ifdef SCALE_OP
LOAD_OP2(scale, CPU, MALI_GPU) // NOLINT
#endif
#ifdef ELEMENTWISEADD_OP
LOAD_OP2(elementwise_add, CPU, MALI_GPU) // NOLINT
#endif
#ifdef PRELU_OP
LOAD_OP2(prelu, CPU, MALI_GPU) // NOLINT
#endif
#ifdef FLATTEN_OP
LOAD_OP1(flatten, CPU) // NOLINT
#endif
#ifdef FUSION_CONVBNADDRELU_OP
LOAD_OP2(fusion_conv_bn_add_relu, CPU, FPGA) // NOLINT
LOAD_FUSION_MATCHER(fusion_conv_bn_add_relu) // NOLINT
#endif
#ifdef FUSION_CONVBNRELU_OP
LOAD_OP2(fusion_conv_bn_relu, CPU, FPGA) // NOLINT
LOAD_FUSION_MATCHER(fusion_conv_bn_relu) // NOLINT
#endif
#ifdef GRU_OP
LOAD_OP1(gru, CPU) // NOLINT
#endif
#ifdef FUSION_CONVADDBN_OP
LOAD_OP2(fusion_conv_add_bn, CPU, FPGA) // NOLINT
LOAD_FUSION_MATCHER(fusion_conv_add_bn) // NOLINT
#endif
#ifdef DROPOUT_OP
LOAD_OP2(dropout, CPU, FPGA) // NOLINT
#endif
#ifdef FUSION_CONVADDPRELU_OP
LOAD_OP2(fusion_conv_add_prelu, CPU, FPGA) // NOLINT
LOAD_FUSION_MATCHER(fusion_conv_add_prelu) // NOLINT
#endif
#ifdef FUSION_DWCONVBNRELU_OP
LOAD_OP1(fusion_dwconv_bn_relu, CPU) // NOLINT
LOAD_FUSION_MATCHER(fusion_dwconv_bn_relu) // NOLINT
#endif
#ifdef CRF_OP
LOAD_OP1(crf_decoding, CPU) // NOLINT
#endif
#ifdef MUL_OP
LOAD_OP2(mul, CPU, MALI_GPU) // NOLINT
#endif
#ifdef RELU_OP
LOAD_OP2(relu, CPU, MALI_GPU) // NOLINT
#endif
#ifdef IM2SEQUENCE_OP
LOAD_OP1(im2sequence, CPU) // NOLINT
#endif
#ifdef LOOKUP_OP
LOAD_OP1(lookup_table, CPU) // NOLINT
#endif
#ifdef FUSION_FC_OP
LOAD_OP3(fusion_fc, CPU, MALI_GPU, FPGA) // NOLINT
LOAD_FUSION_MATCHER(fusion_fc) // NOLINT
#endif
#ifdef POOL_OP
LOAD_OP3(pool2d, CPU, MALI_GPU, FPGA) // NOLINT
#endif
#ifdef MULTICLASSNMS_OP
LOAD_OP1(multiclass_nms, CPU) // NOLINT
#endif
#ifdef SLICE_OP
LOAD_OP2(slice, CPU, MALI_GPU) // NOLINT
#endif
#ifdef FUSION_CONVBN_OP
LOAD_OP2(fusion_conv_bn, CPU, FPGA) // NOLINT
LOAD_FUSION_MATCHER(fusion_conv_bn) // NOLINT
#endif
LOAD_OP1(quantize, CPU) // NOLINT
LOAD_OP1(dequantize, CPU) // NOLINT
......@@ -105,7 +105,11 @@ class OpRegistry {
}; \
static paddle_mobile::framework::OperatorRegistrar< \
device_type, _OpClass_##op_type##_##device_name<device_type, float>> \
__op_registrar_##op_type##_##device_name(#op_type);
__op_registrar_##op_type##_##device_name(#op_type); \
int TouchOpRegistrar_##op_type##_##device_name() { \
__op_registrar_##op_type##_##device_name.Touch(); \
return 0; \
}
#define REGISTER_OPERATOR_CPU(op_type, op_class) \
REGISTER_OPERATOR(op_type, op_class, cpu, paddle_mobile::CPU);
......
......@@ -67,7 +67,16 @@ class FusionOpRegistrar {
explicit FusionOpRegistrar(FusionOpMatcher* matcher) {
FusionOpRegister::Instance()->regist(matcher);
}
void Touch() {}
};
} // namespace framework
} // namespace paddle_mobile
#define REGISTER_FUSION_MATCHER(op_type, matcher) \
static paddle_mobile::framework::FusionOpRegistrar \
__fusion_matcher_registrar_##op_type(new matcher()); \
int TouchFusionMatcherRegistrar_##op_type() { \
__fusion_matcher_registrar_##op_type.Touch(); \
return 0; \
}
......@@ -336,11 +336,12 @@ inline Print &operator<<(Print &printer, const Tensor &tensor) {
stride = stride > 0 ? stride : 1;
#ifndef PADDLE_MOBILE_FPGA
for (int i = 0; i < tensor.numel(); i += stride) {
// 这不一定是float的
if (tensor.type() == typeid(float)) {
printer << tensor.data<float>()[i] << " ";
} else if (tensor.type() == typeid(int64_t)) {
printer << tensor.data<int64_t>()[i] << " ";
} else if (tensor.type() == typeid(int8_t)) {
printer << tensor.data<int8_t>()[i] << " ";
}
}
#endif
......
......@@ -22,6 +22,7 @@ limitations under the License. */
#endif // _OPENMP
#include "common/types.h"
#include "framework/load_ops.h"
#include "framework/tensor.h"
#include "io/executor.h"
#include "io/loader.h"
......
......@@ -13,15 +13,13 @@
limitations under the License. */
#import "PaddleMobileCPU.h"
#import "op_symbols.h"
#include "framework/tensor.h"
#import "framework/load_ops.h"
#import "framework/tensor.h"
#import "io/paddle_mobile.h"
#import <memory>
#import <vector>
@interface PaddleMobileCPUResult()
-(void)toSetOutput:(float *)output;
......
......@@ -12,7 +12,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#ifdef CONV_TRANSPOSE
#ifdef CONV_TRANSPOSE_OP
#include "operators/conv_transpose_op.h"
......
......@@ -12,7 +12,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#ifdef CONV_TRANSPOSE
#ifdef CONV_TRANSPOSE_OP
#pragma once
......
......@@ -14,7 +14,7 @@ limitations under the License. */
#ifdef FUSION_CONVADDADDPRELU_OP
#include "fusion_conv_add_add_prelu_op.h"
#include "operators/fusion_conv_add_add_prelu_op.h"
#include "operators/math/conv_func.h"
namespace paddle_mobile {
......@@ -44,13 +44,13 @@ void FusionConvAddAddPReluOp<Dtype, T>::InferShape() const {
this->param_.Output()->Resize(ddim);
}
static framework::FusionOpRegistrar fusion_conv_add_add_prelu_registrar(
new FusionConvAddAddPReluOpMatcher());
} // namespace operators
} // namespace paddle_mobile
namespace ops = paddle_mobile::operators;
REGISTER_FUSION_MATCHER(fusion_conv_add_add_prelu,
ops::FusionConvAddAddPReluOpMatcher);
#ifdef PADDLE_MOBILE_CPU
REGISTER_OPERATOR_CPU(fusion_conv_add_add_prelu, ops::FusionConvAddAddPReluOp);
#endif
......
......@@ -45,13 +45,12 @@ void FusionConvAddBNOp<Dtype, T>::InferShape() const {
this->param_.Output()->Resize(ddim);
}
static framework::FusionOpRegistrar fusion_conv_add_bn_registrar(
new FusionConvAddBNMatcher());
} // namespace operators
} // namespace paddle_mobile
namespace ops = paddle_mobile::operators;
REGISTER_FUSION_MATCHER(fusion_conv_add_bn, ops::FusionConvAddBNMatcher);
#ifdef PADDLE_MOBILE_CPU
REGISTER_OPERATOR_CPU(fusion_conv_add_bn, ops::FusionConvAddBNOp);
#endif
......
......@@ -45,13 +45,13 @@ void FusionConvAddBNReluOp<Dtype, T>::InferShape() const {
this->param_.Output()->Resize(ddim);
}
static framework::FusionOpRegistrar fusion_conv_add_bn_relu_registrar(
new FusionConvAddBNReluMatcher());
} // namespace operators
} // namespace paddle_mobile
namespace ops = paddle_mobile::operators;
REGISTER_FUSION_MATCHER(fusion_conv_add_bn_relu,
ops::FusionConvAddBNReluMatcher);
#ifdef PADDLE_MOBILE_CPU
REGISTER_OPERATOR_CPU(fusion_conv_add_bn_relu, ops::FusionConvAddBNReluOp);
#endif
......
......@@ -45,13 +45,12 @@ void FusionConvAddOp<Dtype, T>::InferShape() const {
this->param_.Output()->Resize(ddim);
}
static framework::FusionOpRegistrar convadd_registrar(
new FusionConvAddMatcher());
} // namespace operators
} // namespace paddle_mobile
namespace ops = paddle_mobile::operators;
REGISTER_FUSION_MATCHER(fusion_conv_add, ops::FusionConvAddMatcher);
#ifdef PADDLE_MOBILE_CPU
REGISTER_OPERATOR_CPU(fusion_conv_add, ops::FusionConvAddOp);
#endif
......
......@@ -14,7 +14,7 @@ limitations under the License. */
#ifdef FUSION_CONVADDPRELU_OP
#include "fusion_conv_add_prelu_op.h"
#include "operators/fusion_conv_add_prelu_op.h"
#include "operators/math/conv_func.h"
namespace paddle_mobile {
......@@ -44,13 +44,13 @@ void FusionConvAddPReluOp<Dtype, T>::InferShape() const {
this->param_.Output()->Resize(ddim);
}
static framework::FusionOpRegistrar fusion_conv_add_prelu_registrar(
new FusionConvAddPReluOpMatcher());
} // namespace operators
} // namespace paddle_mobile
namespace ops = paddle_mobile::operators;
REGISTER_FUSION_MATCHER(fusion_conv_add_prelu,
ops::FusionConvAddPReluOpMatcher);
#ifdef PADDLE_MOBILE_CPU
REGISTER_OPERATOR_CPU(fusion_conv_add_prelu, ops::FusionConvAddPReluOp);
#endif
......
......@@ -14,7 +14,7 @@ limitations under the License. */
#ifdef FUSION_CONVADDRELU_OP
#include "fusion_conv_add_relu_op.h"
#include "operators/fusion_conv_add_relu_op.h"
#include "operators/math/conv_func.h"
namespace paddle_mobile {
......@@ -44,13 +44,12 @@ void FusionConvAddReluOp<Dtype, T>::InferShape() const {
this->param_.Output()->Resize(ddim);
}
static framework::FusionOpRegistrar fusion_conv_add_relu_registrar(
new FusionConvAddReluOpMatcher());
} // namespace operators
} // namespace paddle_mobile
namespace ops = paddle_mobile::operators;
REGISTER_FUSION_MATCHER(fusion_conv_add_relu, ops::FusionConvAddReluOpMatcher);
#ifdef PADDLE_MOBILE_CPU
REGISTER_OPERATOR_CPU(fusion_conv_add_relu, ops::FusionConvAddReluOp);
#endif
......
......@@ -45,13 +45,13 @@ void FusionConvBNAddReluOp<Dtype, T>::InferShape() const {
this->param_.Output()->Resize(ddim);
}
static framework::FusionOpRegistrar fusion_conv_bn_add_relu_registrar(
new FusionConvBNAddReluMatcher());
} // namespace operators
} // namespace paddle_mobile
namespace ops = paddle_mobile::operators;
REGISTER_FUSION_MATCHER(fusion_conv_bn_add_relu,
ops::FusionConvBNAddReluMatcher);
#ifdef PADDLE_MOBILE_CPU
REGISTER_OPERATOR_CPU(fusion_conv_bn_add_relu, ops::FusionConvBNAddReluOp);
#endif
......
......@@ -44,13 +44,12 @@ void FusionConvBNOp<Dtype, T>::InferShape() const {
this->param_.Output()->Resize(ddim);
}
static framework::FusionOpRegistrar fusion_conv_bn_registrar(
new FusionConvBNMatcher());
} // namespace operators
} // namespace paddle_mobile
namespace ops = paddle_mobile::operators;
REGISTER_FUSION_MATCHER(fusion_conv_bn, ops::FusionConvBNMatcher);
#ifdef PADDLE_MOBILE_CPU
REGISTER_OPERATOR_CPU(fusion_conv_bn, ops::FusionConvBNOp);
#endif
......
......@@ -45,13 +45,12 @@ void FusionConvBNReluOp<Dtype, T>::InferShape() const {
this->param_.Output()->Resize(ddim);
}
static framework::FusionOpRegistrar fusion_conv_bn_relu_registrar(
new FusionConvBNReluMatcher());
} // namespace operators
} // namespace paddle_mobile
namespace ops = paddle_mobile::operators;
REGISTER_FUSION_MATCHER(fusion_conv_bn_relu, ops::FusionConvBNReluMatcher);
#ifdef PADDLE_MOBILE_CPU
REGISTER_OPERATOR_CPU(fusion_conv_bn_relu, ops::FusionConvBNReluOp);
#endif
......
......@@ -45,13 +45,12 @@ void FusionDWConvBNReluOp<Dtype, T>::InferShape() const {
this->param_.Output()->Resize(ddim);
}
static framework::FusionOpRegistrar fusion_dwconv_bn_relu_registrar(
new FusionDWConvBNReluMatcher());
} // namespace operators
} // namespace paddle_mobile
namespace ops = paddle_mobile::operators;
REGISTER_FUSION_MATCHER(fusion_dwconv_bn_relu, ops::FusionDWConvBNReluMatcher);
#ifdef PADDLE_MOBILE_CPU
REGISTER_OPERATOR_CPU(fusion_dwconv_bn_relu, ops::FusionDWConvBNReluOp);
#endif
......
......@@ -14,7 +14,7 @@ limitations under the License. */
#ifdef FUSION_ELEMENTWISEADDRELU_OP
#include "fusion_elementwise_add_relu_op.h"
#include "operators/fusion_elementwise_add_relu_op.h"
namespace paddle_mobile {
namespace operators {
......@@ -25,13 +25,13 @@ void FusionElementwiseAddReluOp<Dtype, T>::InferShape() const {
this->param_.Out()->Resize(x_dim);
}
static framework::FusionOpRegistrar fusion_elementwise_relu_registrar(
new FusioneElementwiseAddReluMatcher());
} // namespace operators
} // namespace paddle_mobile
namespace ops = paddle_mobile::operators;
REGISTER_FUSION_MATCHER(fusion_elementwise_add_relu,
ops::FusioneElementwiseAddReluMatcher);
#ifdef PADDLE_MOBILE_CPU
// REGISTER_OPERATOR_CPU(fusion_elementwise_add_relu,
// ops::FusionElementwiseAddReluOp);
......
......@@ -19,8 +19,6 @@ limitations under the License. */
namespace paddle_mobile {
namespace operators {
static framework::FusionOpRegistrar fc_registrar(new FusionFcMatcher());
template <typename Dtype, typename T>
void FusionFcOp<Dtype, T>::InferShape() const {
auto x_dims = this->param_.InputX()->dims();
......@@ -57,6 +55,7 @@ void FusionFcOp<Dtype, T>::InferShape() const {
} // namespace paddle_mobile
namespace ops = paddle_mobile::operators;
REGISTER_FUSION_MATCHER(fusion_fc, ops::FusionFcMatcher);
#ifdef PADDLE_MOBILE_CPU
REGISTER_OPERATOR_CPU(fusion_fc, ops::FusionFcOp);
......
......@@ -50,13 +50,13 @@ void FusionFcReluOp<Dtype, T>::InferShape() const {
this->param_.Out()->Resize(ddim);
}
static framework::FusionOpRegistrar fc_relu_registrar(
new FusionFcReluMatcher());
} // namespace operators
} // namespace paddle_mobile
namespace ops = paddle_mobile::operators;
REGISTER_FUSION_MATCHER(fusion_fc_relu, ops::FusionFcReluMatcher);
#ifdef PADDLE_MOBILE_CPU
REGISTER_OPERATOR_CPU(fusion_fc_relu, ops::FusionFcReluOp);
#endif
......
......@@ -12,7 +12,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#ifdef CONV_TRANSPOSE
#ifdef CONV_TRANSPOSE_OP
#include "operators/kernel/conv_transpose_kernel.h"
#include "operators/kernel/central-arm-func/conv_transpose_arm_func.h"
......
......@@ -12,18 +12,17 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#ifdef CONV_TRANSPOSE
#pragma once
#include <vector>
#ifdef CONV_TRANSPOSE_OP
#include <vector>
#include "framework/ddim.h"
#include "operators/math/im2col.h"
#include "operators/math/math_function.h"
#include "operators/math/vol2col.h"
#include "operators/op_param.h"
#pragma once
namespace paddle_mobile {
namespace operators {
......
......@@ -12,7 +12,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#ifdef CONV_TRANSPOSE
#ifdef CONV_TRANSPOSE_OP
#pragma once
......
......@@ -1911,7 +1911,7 @@ class DropoutParam : public OpParam {
};
#endif
#ifdef CONV_TRANSPOSE
#ifdef CONV_TRANSPOSE_OP
template <typename Dtype>
class ConvTransposeParam : public OpParam {
typedef typename DtypeTensorTrait<Dtype>::gtype GType;
......
......@@ -324,6 +324,4 @@ if (NOT FOUND_MATCH)
#add_library(test-lib-size SHARED common/test_lib_size.h common/test_lib_size.cpp)
endif ()
......@@ -356,7 +356,7 @@ if (FUSION_CONVBN_OP)
endif()
if (CONV_TRANSPOSE_OP)
add_definitions(-DCONV_TRANSPOSE)
add_definitions(-DCONV_TRANSPOSE_OP)
endif()
if (LOOKUP_OP)
......@@ -386,4 +386,4 @@ endif()
if (SHAPE_OP)
add_definitions(-DSHAPE_OP)
endif()
\ No newline at end of file
endif()
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册