提交 9b791af6 编写于 作者: R Ruilong Liu 提交者: GitHub

Merge pull request #398 from codeWorm2015/develop

#397 modify ios cmake tool chain, add macro option for multi-platform
cmake_minimum_required(VERSION 3.0) cmake_minimum_required(VERSION 3.0)
project(paddle-mobile) project(paddle-mobile)
option(DEBUGING "enable debug mode" OFF) option(DEBUGING "enable debug mode" ON)
option(USE_OPENMP "openmp support" OFF) option(USE_OPENMP "openmp support" OFF)
option(USE_EXCEPTION "use std exception" OFF) option(USE_EXCEPTION "use std exception" ON)
option(CPU "cpu" ON)
option(MALI_GPU "mali gpu" OFF)
option(FPGA "fpga" OFF)
if (CPU)
add_definitions(-DPADDLE_MOBILE_CPU)
elseif (MALI_GPU)
add_definitions(-DPADDLE_MOBILE_MALI_GPU)
elseif(FPGA)
add_definitions(-DPADDLE_MOBILE_FPGA)
endif()
if (DEBUGING) if (DEBUGING)
set(CMAKE_BUILD_TYPE Debug) set(CMAKE_BUILD_TYPE Debug)
...@@ -127,8 +139,13 @@ else () ...@@ -127,8 +139,13 @@ else ()
add_definitions(-DTRANSPOSE_OP) add_definitions(-DTRANSPOSE_OP)
endif() endif()
if (IS_IOS)
add_library(paddle-mobile SHARED ${PADDLE_MOBILE_CC} ${PADDLE_MOBILE_H}) add_library(paddle-mobile STATIC ${PADDLE_MOBILE_CC} ${PADDLE_MOBILE_H})
elseif(ANDROID)
add_library(paddle-mobile SHARED ${PADDLE_MOBILE_CC} ${PADDLE_MOBILE_H})
else()
add_library(paddle-mobile SHARED ${PADDLE_MOBILE_CC} ${PADDLE_MOBILE_H})
endif ()
if(DEBUGING) if(DEBUGING)
add_subdirectory(test) add_subdirectory(test)
......
...@@ -96,24 +96,74 @@ class OpRegistry { ...@@ -96,24 +96,74 @@ class OpRegistry {
} }
}; };
#define REGISTER_OPERATOR(op_type, op_class) \ #ifdef PADDLE_MOBILE_CPU
#define REGISTER_OPERATOR_CPU(op_type, op_class) \
template <typename Dtype, typename T> \
class _OpClass_##op_type##_cpu : public op_class<Dtype, T> { \
public: \
DEFINE_OP_CONSTRUCTOR(_OpClass_##op_type##_cpu, op_class); \
}; \
static paddle_mobile::framework::OperatorRegistrar< \
paddle_mobile::CPU, _OpClass_##op_type##_cpu<paddle_mobile::CPU, float>> \
__op_registrar_##op_type##__cpu(#op_type); \
int TouchOpRegistrar_##op_type##_cpu() { \
__op_registrar_##op_type##__cpu.Touch(); \
return 0; \
}
#define USE_OP_CPU(op_type) \
extern int TouchOpRegistrar_##op_type##_cpu(); \
static int use_op_itself_##op_type##_ __attribute__((unused)) = \
TouchOpRegistrar_##op_type##_cpu()
#endif
#ifdef PADDLE_MOBILE_MALI_GPU
#define REGISTER_OPERATOR_MALI_GPU(op_type, op_class) \
template <typename Dtype, typename T> \ template <typename Dtype, typename T> \
class _OpClass_##op_type##_ : public op_class<Dtype, T> { \ class _OpClass_##op_type##_mali_gpu : public op_class<Dtype, T> { \
public: \ public: \
DEFINE_OP_CONSTRUCTOR(_OpClass_##op_type##_, op_class); \ DEFINE_OP_CONSTRUCTOR(_OpClass_##op_type##_mali_gpu, op_class); \
}; \ }; \
static paddle_mobile::framework::OperatorRegistrar< \ static paddle_mobile::framework::OperatorRegistrar< \
paddle_mobile::CPU, _OpClass_##op_type##_<paddle_mobile::CPU, float>> \ paddle_mobile::CPU, \
__op_registrar_##op_type##__(#op_type); \ _OpClass_##op_type##_mali_gpu<paddle_mobile::CPU, float>> \
int TouchOpRegistrar_##op_type() { \ __op_registrar_##op_type##__mali_gpu(#op_type); \
__op_registrar_##op_type##__.Touch(); \ int TouchOpRegistrar_##op_type##_mali_gpu() { \
__op_registrar_##op_type##__mali_gpu.Touch(); \
return 0; \ return 0; \
} }
#define USE_OP(op_type) \ #define USE_OP_MALI_GPU(op_type) \
extern int TouchOpRegistrar_##op_type(); \ extern int TouchOpRegistrar_##op_type##_mali_gpu(); \
static int use_op_itself_##op_type##_ __attribute__((unused)) = \ static int use_op_itself_##op_type##_ __attribute__((unused)) = \
TouchOpRegistrar_##op_type() TouchOpRegistrar_##op_type##_mali_gpu()
#endif
#ifdef PADDLE_MOBILE_FPGA
#define REGISTER_OPERATOR_FPGA(op_type, op_class) \
template <typename Dtype, typename T> \
class _OpClass_##op_type##_fpga : public op_class<Dtype, T> { \
public: \
DEFINE_OP_CONSTRUCTOR(_OpClass_##op_type##_fpga, op_class); \
}; \
static paddle_mobile::framework::OperatorRegistrar< \
paddle_mobile::CPU, \
_OpClass_##op_type##_fpga<paddle_mobile::CPU, float>> \
__op_registrar_##op_type##__fpga(#op_type); \
int TouchOpRegistrar_##op_type##_fpga() { \
__op_registrar_##op_type##__fpga.Touch(); \
return 0; \
}
#define USE_OP_FPGA(op_type) \
extern int TouchOpRegistrar_##op_type##_fpga(); \
static int use_op_itself_##op_type##_ __attribute__((unused)) = \
TouchOpRegistrar_##op_type##_fpga()
#endif
} // namespace framework } // namespace framework
} // namespace paddle_mobile } // namespace paddle_mobile
...@@ -58,7 +58,12 @@ void OperatorBase<Dtype>::Run() const { ...@@ -58,7 +58,12 @@ void OperatorBase<Dtype>::Run() const {
} }
template class OperatorBase<CPU>; template class OperatorBase<CPU>;
template class OperatorBase<FPGA>;
template class OperatorBase<GPU_MALI>;
template class OperatorWithKernel<CPU>; template class OperatorWithKernel<CPU>;
template class OperatorWithKernel<FPGA>;
template class OperatorWithKernel<GPU_MALI>;
} // namespace framework } // namespace framework
} // namespace paddle_mobile } // namespace paddle_mobile
...@@ -153,6 +153,7 @@ class FusionOpMatcher { ...@@ -153,6 +153,7 @@ class FusionOpMatcher {
std::string BeginType() { return node_.Type(); } std::string BeginType() { return node_.Type(); }
// virtual bool Fusion();
protected: protected:
Node node_; Node node_;
std::string type_; std::string type_;
......
...@@ -31,7 +31,13 @@ template class BatchNormOp<CPU, float>; ...@@ -31,7 +31,13 @@ template class BatchNormOp<CPU, float>;
} // namespace paddle_mobile } // namespace paddle_mobile
namespace ops = paddle_mobile::operators; namespace ops = paddle_mobile::operators;
USE_OP(batch_norm); #ifdef PADDLE_MOBILE_CPU
REGISTER_OPERATOR(batch_norm, ops::BatchNormOp); USE_OP_CPU(batch_norm);
REGISTER_OPERATOR_CPU(batch_norm, ops::BatchNormOp);
#endif
#ifdef PADDLE_MOBILE_MALI_GPU
#endif
#ifdef PADDLE_MOBILE_FPGA
#endif
#endif #endif
...@@ -52,7 +52,13 @@ template class BoxCoderOp<CPU, float>; ...@@ -52,7 +52,13 @@ template class BoxCoderOp<CPU, float>;
} // namespace paddle_mobile } // namespace paddle_mobile
namespace ops = paddle_mobile::operators; namespace ops = paddle_mobile::operators;
USE_OP(box_coder); #ifdef PADDLE_MOBILE_CPU
REGISTER_OPERATOR(box_coder, ops::BoxCoderOp); USE_OP_CPU(box_coder);
REGISTER_OPERATOR_CPU(box_coder, ops::BoxCoderOp);
#endif
#ifdef PADDLE_MOBILE_MALI_GPU
#endif
#ifdef PADDLE_MOBILE_FPGA
#endif
#endif #endif
...@@ -62,7 +62,13 @@ template class ConcatOp<CPU, float>; ...@@ -62,7 +62,13 @@ template class ConcatOp<CPU, float>;
} // namespace paddle_mobile } // namespace paddle_mobile
namespace ops = paddle_mobile::operators; namespace ops = paddle_mobile::operators;
USE_OP(concat); #ifdef PADDLE_MOBILE_CPU
REGISTER_OPERATOR(concat, ops::ConcatOp); USE_OP_CPU(concat);
REGISTER_OPERATOR_CPU(concat, ops::ConcatOp);
#endif
#ifdef PADDLE_MOBILE_MALI_GPU
#endif
#ifdef PADDLE_MOBILE_FPGA
#endif
#endif #endif
...@@ -53,7 +53,17 @@ template class ConvOp<CPU, float>; ...@@ -53,7 +53,17 @@ template class ConvOp<CPU, float>;
} // namespace paddle_mobile } // namespace paddle_mobile
namespace ops = paddle_mobile::operators; namespace ops = paddle_mobile::operators;
USE_OP(conv2d); #ifdef PADDLE_MOBILE_CPU
REGISTER_OPERATOR(conv2d, ops::ConvOp); USE_OP_CPU(conv2d);
REGISTER_OPERATOR_CPU(conv2d, ops::ConvOp);
#endif
#ifdef PADDLE_MOBILE_MALI_GPU
USE_OP_MALI_GPU(conv2d);
REGISTER_OPERATOR_MALI_GPU(conv2d, ops::ConvOp);
#endif
#ifdef PADDLE_MOBILE_FPGA
USE_OP_FPGA(conv2d);
REGISTER_OPERATOR_FPGA(conv2d, ops::ConvOp);
#endif
#endif #endif
...@@ -54,7 +54,13 @@ template class DepthwiseConvOp<CPU, float>; ...@@ -54,7 +54,13 @@ template class DepthwiseConvOp<CPU, float>;
} // namespace paddle_mobile } // namespace paddle_mobile
namespace ops = paddle_mobile::operators; namespace ops = paddle_mobile::operators;
USE_OP(depthwise_conv2d); #ifdef PADDLE_MOBILE_CPU
REGISTER_OPERATOR(depthwise_conv2d, ops::DepthwiseConvOp); USE_OP_CPU(depthwise_conv2d);
REGISTER_OPERATOR_CPU(depthwise_conv2d, ops::DepthwiseConvOp);
#endif
#ifdef PADDLE_MOBILE_MALI_GPU
#endif
#ifdef PADDLE_MOBILE_FPGA
#endif
#endif #endif
...@@ -29,7 +29,13 @@ template class ElementwiseAddOp<CPU, float>; ...@@ -29,7 +29,13 @@ template class ElementwiseAddOp<CPU, float>;
} // namespace paddle_mobile } // namespace paddle_mobile
namespace ops = paddle_mobile::operators; namespace ops = paddle_mobile::operators;
USE_OP(elementwise_add); #ifdef PADDLE_MOBILE_CPU
REGISTER_OPERATOR(elementwise_add, ops::ElementwiseAddOp); USE_OP_CPU(elementwise_add);
REGISTER_OPERATOR_CPU(elementwise_add, ops::ElementwiseAddOp);
#endif
#ifdef PADDLE_MOBILE_MALI_GPU
#endif
#ifdef PADDLE_MOBILE_FPGA
#endif
#endif #endif
...@@ -43,8 +43,14 @@ class FeedOp : public framework::OperatorBase<DeviceType> { ...@@ -43,8 +43,14 @@ class FeedOp : public framework::OperatorBase<DeviceType> {
}; };
namespace ops = paddle_mobile::operators; namespace ops = paddle_mobile::operators;
USE_OP(feed); #ifdef PADDLE_MOBILE_CPU
REGISTER_OPERATOR(feed, ops::FeedOp); USE_OP_CPU(feed);
REGISTER_OPERATOR_CPU(feed, ops::FeedOp);
#endif
#ifdef PADDLE_MOBILE_MALI_GPU
#endif
#ifdef PADDLE_MOBILE_FPGA
#endif
} // namespace operators } // namespace operators
} // namespace paddle_mobile } // namespace paddle_mobile
...@@ -43,8 +43,14 @@ class FetchOp : public framework::OperatorBase<DeviceType> { ...@@ -43,8 +43,14 @@ class FetchOp : public framework::OperatorBase<DeviceType> {
}; };
namespace ops = paddle_mobile::operators; namespace ops = paddle_mobile::operators;
USE_OP(fetch); #ifdef PADDLE_MOBILE_CPU
REGISTER_OPERATOR(fetch, ops::FetchOp); USE_OP_CPU(fetch);
REGISTER_OPERATOR_CPU(fetch, ops::FetchOp);
#endif
#ifdef PADDLE_MOBILE_MALI_GPU
#endif
#ifdef PADDLE_MOBILE_FPGA
#endif
} // namespace operators } // namespace operators
} // namespace paddle_mobile } // namespace paddle_mobile
...@@ -25,7 +25,13 @@ template class FushionConvAddOp<CPU, float>; ...@@ -25,7 +25,13 @@ template class FushionConvAddOp<CPU, float>;
} // namespace paddle_mobile } // namespace paddle_mobile
namespace ops = paddle_mobile::operators; namespace ops = paddle_mobile::operators;
USE_OP(conv_add); #ifdef PADDLE_MOBILE_CPU
REGISTER_OPERATOR(conv_add, ops::FushionConvAddOp); USE_OP_CPU(conv_add);
REGISTER_OPERATOR_CPU(conv_add, ops::FushionConvAddOp);
#endif
#ifdef PADDLE_MOBILE_MALI_GPU
#endif
#ifdef PADDLE_MOBILE_FPGA
#endif
#endif #endif
...@@ -64,7 +64,13 @@ class FushionConvAddOp : public framework::OperatorWithKernel<DeviceType> { ...@@ -64,7 +64,13 @@ class FushionConvAddOp : public framework::OperatorWithKernel<DeviceType> {
// FushionFcParam param_; // FushionFcParam param_;
}; };
// static framework::FusionOpRegistrar fc_registrar(new FusionConvAddMatcher()); #ifdef PADDLE_MOBILE_CPU
static framework::FusionOpRegistrar fc_registrar(new FusionConvAddMatcher());
#endif
#ifdef PADDLE_MOBILE_MALI_GPU
#endif
#ifdef PADDLE_MOBILE_FPGA
#endif
} // namespace operators } // namespace operators
} // namespace paddle_mobile } // namespace paddle_mobile
......
...@@ -46,8 +46,14 @@ class ConvAddReluOp { ...@@ -46,8 +46,14 @@ class ConvAddReluOp {
private: private:
}; };
// static framework::FusionOpRegistrar fc_registrar( #ifdef PADDLE_MOBILE_CPU
// static framework::FusionOpRegistrar fusion_conv_add_relu_registrar(
// new FushionConvAddReluOpMatcher()); // new FushionConvAddReluOpMatcher());
#endif
#ifdef PADDLE_MOBILE_MALI_GPU
#endif
#ifdef PADDLE_MOBILE_FPGA
#endif
} // namespace operators } // namespace operators
} // namespace paddle_mobile } // namespace paddle_mobile
......
...@@ -54,7 +54,13 @@ template class FushionFcOp<CPU, float>; ...@@ -54,7 +54,13 @@ template class FushionFcOp<CPU, float>;
} // namespace paddle_mobile } // namespace paddle_mobile
namespace ops = paddle_mobile::operators; namespace ops = paddle_mobile::operators;
USE_OP(fc); #ifdef PADDLE_MOBILE_CPU
REGISTER_OPERATOR(fc, ops::FushionFcOp); USE_OP_CPU(fc);
REGISTER_OPERATOR_CPU(fc, ops::FushionFcOp);
#endif
#ifdef PADDLE_MOBILE_MALI_GPU
#endif
#ifdef PADDLE_MOBILE_FPGA
#endif
#endif #endif
...@@ -37,8 +37,6 @@ class FusionFcMatcher : public framework::FusionOpMatcher { ...@@ -37,8 +37,6 @@ class FusionFcMatcher : public framework::FusionOpMatcher {
void FolderNodes( void FolderNodes(
framework::Node *node, framework::Node *node,
std::vector<std::shared_ptr<framework::Node>> *removed_nodes) { std::vector<std::shared_ptr<framework::Node>> *removed_nodes) {
vector<std::shared_ptr<framework::OpDesc>> origin_descs =
node->OpDescs(node_.Depth());
node->Folder(node_.Depth(), Type(), node->Folder(node_.Depth(), Type(),
{{G_OP_TYPE_ELEMENTWISE_ADD, {"Y", "Z"}}}, removed_nodes); {{G_OP_TYPE_ELEMENTWISE_ADD, {"Y", "Z"}}}, removed_nodes);
} }
...@@ -69,7 +67,14 @@ class FushionFcOp : public framework::OperatorWithKernel<DeviceType> { ...@@ -69,7 +67,14 @@ class FushionFcOp : public framework::OperatorWithKernel<DeviceType> {
FushionFcParam param_; FushionFcParam param_;
}; };
// static framework::FusionOpRegistrar fc_registrar(new FusionFcMatcher()); #ifdef PADDLE_MOBILE_CPU
static framework::FusionOpRegistrar fc_registrar(new FusionFcMatcher());
#endif
#ifdef PADDLE_MOBILE_MALI_GPU
static framework::FusionOpRegistrar fc_registrar(new FusionFcMatcher());
#endif
#ifdef PADDLE_MOBILE_FPGA
#endif
} // namespace operators } // namespace operators
} // namespace paddle_mobile } // namespace paddle_mobile
......
...@@ -14,15 +14,16 @@ limitations under the License. */ ...@@ -14,15 +14,16 @@ limitations under the License. */
#ifdef CONV_OP #ifdef CONV_OP
#include "operators/kernel/conv_kernel.h"
namespace paddle_mobile { namespace paddle_mobile {
namespace operators { namespace operators {
// template<> template <>
// void ConvKernel<FPGA, float>::Compute(const ConvParam &param) const void ConvKernel<FPGA, float>::Compute(const ConvParam &param) const {}
// {} template class ConvKernel<FPGA, float>;
//
// template class ConvKernel<FPGA, float>; } // namespace operators
}
} // namespace paddle_mobile } // namespace paddle_mobile
#endif #endif
...@@ -29,7 +29,13 @@ template class LrnOp<CPU, float>; ...@@ -29,7 +29,13 @@ template class LrnOp<CPU, float>;
} // namespace paddle_mobile } // namespace paddle_mobile
namespace ops = paddle_mobile::operators; namespace ops = paddle_mobile::operators;
USE_OP(lrn); #ifdef PADDLE_MOBILE_CPU
REGISTER_OPERATOR(lrn, ops::LrnOp); USE_OP_CPU(lrn);
REGISTER_OPERATOR_CPU(lrn, ops::LrnOp);
#endif
#ifdef PADDLE_MOBILE_MALI_GPU
#endif
#ifdef PADDLE_MOBILE_FPGA
#endif
#endif #endif
...@@ -55,7 +55,13 @@ template class MulOp<CPU, float>; ...@@ -55,7 +55,13 @@ template class MulOp<CPU, float>;
} // namespace paddle_mobile } // namespace paddle_mobile
namespace ops = paddle_mobile::operators; namespace ops = paddle_mobile::operators;
USE_OP(mul); #ifdef PADDLE_MOBILE_CPU
REGISTER_OPERATOR(mul, ops::MulOp); USE_OP_CPU(mul);
REGISTER_OPERATOR_CPU(mul, ops::MulOp);
#endif
#ifdef PADDLE_MOBILE_MALI_GPU
#endif
#ifdef PADDLE_MOBILE_FPGA
#endif
#endif #endif
...@@ -39,7 +39,13 @@ template class MultiClassNMSOp<CPU, float>; ...@@ -39,7 +39,13 @@ template class MultiClassNMSOp<CPU, float>;
} // namespace paddle_mobile } // namespace paddle_mobile
namespace ops = paddle_mobile::operators; namespace ops = paddle_mobile::operators;
USE_OP(multiclass_nms); #ifdef PADDLE_MOBILE_CPU
REGISTER_OPERATOR(multiclass_nms, ops::MultiClassNMSOp); USE_OP_CPU(multiclass_nms);
REGISTER_OPERATOR_CPU(multiclass_nms, ops::MultiClassNMSOp);
#endif
#ifdef PADDLE_MOBILE_MALI_GPU
#endif
#ifdef PADDLE_MOBILE_FPGA
#endif
#endif #endif
...@@ -59,7 +59,13 @@ template class PoolOp<CPU, float>; ...@@ -59,7 +59,13 @@ template class PoolOp<CPU, float>;
} // namespace paddle_mobile } // namespace paddle_mobile
namespace ops = paddle_mobile::operators; namespace ops = paddle_mobile::operators;
USE_OP(pool2d); #ifdef PADDLE_MOBILE_CPU
REGISTER_OPERATOR(pool2d, ops::PoolOp); USE_OP_CPU(pool2d);
REGISTER_OPERATOR_CPU(pool2d, ops::PoolOp);
#endif
#ifdef PADDLE_MOBILE_MALI_GPU
#endif
#ifdef PADDLE_MOBILE_FPGA
#endif
#endif #endif
...@@ -49,7 +49,13 @@ template class PriorBoxOp<CPU, float>; ...@@ -49,7 +49,13 @@ template class PriorBoxOp<CPU, float>;
} // namespace paddle_mobile } // namespace paddle_mobile
namespace ops = paddle_mobile::operators; namespace ops = paddle_mobile::operators;
USE_OP(prior_box); #ifdef PADDLE_MOBILE_CPU
REGISTER_OPERATOR(prior_box, ops::PriorBoxOp); USE_OP_CPU(prior_box);
REGISTER_OPERATOR_CPU(prior_box, ops::PriorBoxOp);
#endif
#ifdef PADDLE_MOBILE_MALI_GPU
#endif
#ifdef PADDLE_MOBILE_FPGA
#endif
#endif #endif
...@@ -33,7 +33,13 @@ template class ReluOp<CPU, float>; ...@@ -33,7 +33,13 @@ template class ReluOp<CPU, float>;
* 都是需要和model中类型对应起来的 * 都是需要和model中类型对应起来的
* */ * */
namespace ops = paddle_mobile::operators; namespace ops = paddle_mobile::operators;
USE_OP(relu); #ifdef PADDLE_MOBILE_CPU
REGISTER_OPERATOR(relu, ops::ReluOp); USE_OP_CPU(relu);
REGISTER_OPERATOR_CPU(relu, ops::ReluOp);
#endif
#ifdef PADDLE_MOBILE_MALI_GPU
#endif
#ifdef PADDLE_MOBILE_FPGA
#endif
#endif #endif
...@@ -32,7 +32,13 @@ template class ReshapeOp<CPU, float>; ...@@ -32,7 +32,13 @@ template class ReshapeOp<CPU, float>;
} // namespace paddle_mobile } // namespace paddle_mobile
namespace ops = paddle_mobile::operators; namespace ops = paddle_mobile::operators;
USE_OP(reshape); #ifdef PADDLE_MOBILE_CPU
REGISTER_OPERATOR(reshape, ops::ReshapeOp); USE_OP_CPU(reshape);
REGISTER_OPERATOR_CPU(reshape, ops::ReshapeOp);
#endif
#ifdef PADDLE_MOBILE_MALI_GPU
#endif
#ifdef PADDLE_MOBILE_FPGA
#endif
#endif #endif
...@@ -27,7 +27,13 @@ template class SigmoidOp<CPU, float>; ...@@ -27,7 +27,13 @@ template class SigmoidOp<CPU, float>;
} // namespace paddle_mobile } // namespace paddle_mobile
namespace ops = paddle_mobile::operators; namespace ops = paddle_mobile::operators;
USE_OP(sigmoid); #ifdef PADDLE_MOBILE_CPU
REGISTER_OPERATOR(sigmoid, ops::SigmoidOp); USE_OP_CPU(sigmoid);
REGISTER_OPERATOR_CPU(sigmoid, ops::SigmoidOp);
#endif
#ifdef PADDLE_MOBILE_MALI_GPU
#endif
#ifdef PADDLE_MOBILE_FPGA
#endif
#endif #endif
...@@ -27,7 +27,13 @@ template class SoftmaxOp<CPU, float>; ...@@ -27,7 +27,13 @@ template class SoftmaxOp<CPU, float>;
} // namespace paddle_mobile } // namespace paddle_mobile
namespace ops = paddle_mobile::operators; namespace ops = paddle_mobile::operators;
USE_OP(softmax); #ifdef PADDLE_MOBILE_CPU
REGISTER_OPERATOR(softmax, ops::SoftmaxOp); USE_OP_CPU(softmax);
REGISTER_OPERATOR_CPU(softmax, ops::SoftmaxOp);
#endif
#ifdef PADDLE_MOBILE_MALI_GPU
#endif
#ifdef PADDLE_MOBILE_FPGA
#endif
#endif #endif
...@@ -52,7 +52,13 @@ template class TransposeOp<CPU, float>; ...@@ -52,7 +52,13 @@ template class TransposeOp<CPU, float>;
} // namespace paddle_mobile } // namespace paddle_mobile
namespace ops = paddle_mobile::operators; namespace ops = paddle_mobile::operators;
USE_OP(transpose); #ifdef PADDLE_MOBILE_CPU
REGISTER_OPERATOR(transpose, ops::TransposeOp); USE_OP_CPU(transpose);
REGISTER_OPERATOR_CPU(transpose, ops::TransposeOp);
#endif
#ifdef PADDLE_MOBILE_MALI_GPU
#endif
#ifdef PADDLE_MOBILE_FPGA
#endif
#endif #endif
...@@ -98,7 +98,7 @@ build_for_ios() { ...@@ -98,7 +98,7 @@ build_for_ios() {
BUILD_DIR=../build/release/"${PLATFORM}" BUILD_DIR=../build/release/"${PLATFORM}"
TOOLCHAIN_FILE="./tools/ios-cmake/ios.toolchain.cmake" TOOLCHAIN_FILE="./tools/ios-cmake/ios.toolchain.cmake"
C_FLAGS="-fobjc-abi-version=2 -fobjc-arc -isysroot ${CMAKE_OSX_SYSROOT}" C_FLAGS="-fobjc-abi-version=2 -fobjc-arc -isysroot ${CMAKE_OSX_SYSROOT}"
CXX_FLAGS="-fobjc-abi-version=2 -fobjc-arc -std=gnu++11 -stdlib=libc++ -isysroot ${CMAKE_OSX_SYSROOT}" CXX_FLAGS="-fobjc-abi-version=2 -fobjc-arc -std=gnu++14 -stdlib=libc++ -isysroot ${CMAKE_OSX_SYSROOT}"
mkdir -p "${BUILD_DIR}" mkdir -p "${BUILD_DIR}"
if [ $# -eq 1 ]; then if [ $# -eq 1 ]; then
NET=$1 NET=$1
......
此差异已折叠。
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册