diff --git a/CMakeLists.txt b/CMakeLists.txt index 7d06636b587e9c001990fe9a2aae13117fd899da..dd44f8041cfe0c27094c554bd65c9f6703289c88 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -7,7 +7,6 @@ option(DEBUGING "enable debug mode" ON) option(USE_EXCEPTION "use std exception" OFF) option(LOG_PROFILE "log profile" OFF) # select the platform to build -option(X86 "x86" OFF) option(CPU "armv7 with neon" ON) option(MALI_GPU "mali gpu" OFF) option(FPGA "fpga" OFF) @@ -57,8 +56,6 @@ endif() if(CPU) add_definitions(-DPADDLE_MOBILE_CPU) -elseif(X86) - add_definitions(-DPADDLE_MOBILE_X86) else() file(GLOB_RECURSE _tmp_list src/operators/kernel/arm/*.cpp src/operators/kernel/arm/*.cc) foreach(f ${_tmp_list}) diff --git a/src/common/types.h b/src/common/types.h index 81f1b7dda68cd30700d686d86ad0cb2a206b95c6..ec2e3ea2f2c818ca6ea7634ac1c564bbca492a34 100644 --- a/src/common/types.h +++ b/src/common/types.h @@ -39,7 +39,7 @@ struct PrecisionTrait { }; //! device type -enum DeviceTypeEnum { kINVALID = -1, kCPU = 0, kFPGA = 1, kGPU_MALI = 2, kX86 = 3 }; +enum DeviceTypeEnum { kINVALID = -1, kCPU = 0, kFPGA = 1, kGPU_MALI = 2 }; template struct DeviceType {}; @@ -47,7 +47,6 @@ struct DeviceType {}; typedef DeviceType CPU; typedef DeviceType FPGA; typedef DeviceType GPU_MALI; -typedef DeviceType X86; //! data type enum DataType { diff --git a/src/framework/op_registry.h b/src/framework/op_registry.h index 1d625e00f61e8eb7f02b72920c6e42672b83d739..25fc6652a466ecec7cb3fd5fe8014019db2794de 100644 --- a/src/framework/op_registry.h +++ b/src/framework/op_registry.h @@ -116,8 +116,5 @@ class OpRegistry { #define REGISTER_OPERATOR_FPGA(op_type, op_class) \ REGISTER_OPERATOR(op_type, op_class, fpga, paddle_mobile::FPGA); -#define REGISTER_OPERATOR_X86(op_type, op_class) \ - REGISTER_OPERATOR(op_type, op_class, x86, paddle_mobile::X86); - } // namespace framework } // namespace paddle_mobile diff --git a/src/framework/operator.cpp b/src/framework/operator.cpp index d95becc51cc4ccdf07b26d3b185b7ded12492a14..7c66f932df3df9793f116c8e62fea704e346b146 100644 --- a/src/framework/operator.cpp +++ b/src/framework/operator.cpp @@ -76,7 +76,6 @@ void OperatorBase::Run() const { template class OperatorBase; template class OperatorBase; template class OperatorBase; -template class OperatorBase; } // namespace framework } // namespace paddle_mobile diff --git a/src/io/executor.cpp b/src/io/executor.cpp index 5d3d3525812bfb5ad1646f7354bbd68193c9a78c..edec033162c9b1679192ec983592609700bf8780 100644 --- a/src/io/executor.cpp +++ b/src/io/executor.cpp @@ -396,6 +396,5 @@ std::vector::Ptype> Executor::Predict( template class Executor; template class Executor; template class Executor; -template class Executor; } // namespace paddle_mobile diff --git a/src/io/loader.cpp b/src/io/loader.cpp index f736372c460160c6433be1a1140d814f5fbf76ab..1cef0ad2fdd6bc9f1e0351ed02778f3a1c322677 100644 --- a/src/io/loader.cpp +++ b/src/io/loader.cpp @@ -197,6 +197,5 @@ const framework::Program Loader::LoadCombinedMemory( template class Loader; template class Loader; template class Loader; -template class Loader; } // namespace paddle_mobile diff --git a/src/io/paddle_mobile.cpp b/src/io/paddle_mobile.cpp index f436c00adb22e826cdce4f5af61f0d85acc25450..275e850caa2fb8da494cdfde5acf24b45e1b40ec 100644 --- a/src/io/paddle_mobile.cpp +++ b/src/io/paddle_mobile.cpp @@ -125,6 +125,5 @@ PaddleMobile::~PaddleMobile() { template class PaddleMobile; template class PaddleMobile; template class PaddleMobile; -template class PaddleMobile; } // namespace paddle_mobile diff --git a/src/operators/batchnorm_op.cpp b/src/operators/batchnorm_op.cpp index a36f6dd39c0a9d75250e64cd80443d946a28a755..f820908404ea637d9680c32d5c4b5568e191dd7e 100644 --- a/src/operators/batchnorm_op.cpp +++ b/src/operators/batchnorm_op.cpp @@ -40,5 +40,4 @@ REGISTER_OPERATOR_MALI_GPU(batch_norm, ops::BatchNormOp); #ifdef PADDLE_MOBILE_FPGA #endif -REGISTER_OPERATOR_X86(batch_norm, ops::BatchNormOp); #endif diff --git a/src/operators/bilinear_interp_op.cpp b/src/operators/bilinear_interp_op.cpp index 608e2ab3a6ae8db428d4dd3a0294cafd81ed682d..b3388c38ec6050faff1cb7bbe49e8dd042291fc9 100644 --- a/src/operators/bilinear_interp_op.cpp +++ b/src/operators/bilinear_interp_op.cpp @@ -53,6 +53,4 @@ REGISTER_OPERATOR_CPU(bilinear_interp, ops::BilinearOp); #ifdef PADDLE_MOBILE_FPGA #endif -REGISTER_OPERATOR_X86(bilinear_interp, ops::BilinearOp); - #endif diff --git a/src/operators/box_coder_op.cpp b/src/operators/box_coder_op.cpp index 9c2f53a3576d48b2ab233fc385dd07549eee949c..9e57c9021dac1b6857752989727c1c86051e33f7 100644 --- a/src/operators/box_coder_op.cpp +++ b/src/operators/box_coder_op.cpp @@ -60,6 +60,4 @@ REGISTER_OPERATOR_CPU(box_coder, ops::BoxCoderOp); #ifdef PADDLE_MOBILE_FPGA #endif -REGISTER_OPERATOR_X86(box_coder, ops::BoxCoderOp); - #endif diff --git a/src/operators/concat_op.cpp b/src/operators/concat_op.cpp index 2e26d2764b0e1b0a98a8429b97b4901910b8e955..f767f3481c999a16da46e75e314e8ebcb54193fa 100644 --- a/src/operators/concat_op.cpp +++ b/src/operators/concat_op.cpp @@ -73,6 +73,4 @@ REGISTER_OPERATOR_MALI_GPU(concat, ops::ConcatOp); REGISTER_OPERATOR_FPGA(concat, ops::ConcatOp); #endif -REGISTER_OPERATOR_X86(concat, ops::ConcatOp); - #endif diff --git a/src/operators/conv_op.cpp b/src/operators/conv_op.cpp index 15702cb8ff02370546251b40c4ced9ba25b6c8f3..c4601995219b32db75f22c7c2ed959e18af85f36 100644 --- a/src/operators/conv_op.cpp +++ b/src/operators/conv_op.cpp @@ -62,6 +62,4 @@ REGISTER_OPERATOR_MALI_GPU(conv2d, ops::ConvOp); REGISTER_OPERATOR_FPGA(conv2d, ops::ConvOp); #endif -REGISTER_OPERATOR_X86(conv2d, ops::ConvOp); - #endif diff --git a/src/operators/conv_transpose_op.cpp b/src/operators/conv_transpose_op.cpp index 870b82f75a04f8d65b1b238fa1b985b133e20099..34de4cbb10d3689f0be95f1277cfdd76b4c2c141 100644 --- a/src/operators/conv_transpose_op.cpp +++ b/src/operators/conv_transpose_op.cpp @@ -29,6 +29,4 @@ REGISTER_OPERATOR_CPU(conv2d_transpose, ops::ConvOpTranspose); #ifdef PADDLE_MOBILE_FPGA #endif -REGISTER_OPERATOR_X86(conv2d_transpose, ops::ConvOpTranspose); - #endif diff --git a/src/operators/crf_op.cpp b/src/operators/crf_op.cpp index 3411811f3a80cb014431979f6104879db1389a89..61f9a54352e236a7fcb7b2765ab11055fbec95ab 100644 --- a/src/operators/crf_op.cpp +++ b/src/operators/crf_op.cpp @@ -52,6 +52,5 @@ REGISTER_OPERATOR_CPU(crf_decoding, ops::CrfOp); #endif #ifdef PADDLE_MOBILE_FPGA #endif -REGISTER_OPERATOR_X86(crf_decoding, ops::CrfOp); #endif diff --git a/src/operators/depthwise_conv_op.cpp b/src/operators/depthwise_conv_op.cpp index 0fc8f29b81f8fcdcc683fe780efc0fdea10df418..2e7f193c5c9f66668411bb115da9d3cd980f8a6b 100644 --- a/src/operators/depthwise_conv_op.cpp +++ b/src/operators/depthwise_conv_op.cpp @@ -56,7 +56,5 @@ namespace ops = paddle_mobile::operators; #ifdef PADDLE_MOBILE_CPU REGISTER_OPERATOR_CPU(depthwise_conv2d, ops::DepthwiseConvOp); #endif -#ifdef PADDLE_MOBILE_X86 -REGISTER_OPERATOR_X86(depthwise_conv2d, ops::DepthwiseConvOp); -#endif + #endif diff --git a/src/operators/dequantize_op.cpp b/src/operators/dequantize_op.cpp index 6936660a393d6c17a90bd59a67c632d21eba9a8a..8dd35a2e42a220f75e66b31f5474ee4325afffca 100644 --- a/src/operators/dequantize_op.cpp +++ b/src/operators/dequantize_op.cpp @@ -30,7 +30,4 @@ namespace ops = paddle_mobile::operators; #ifdef PADDLE_MOBILE_CPU REGISTER_OPERATOR_CPU(dequantize, ops::DequantizeOp); #endif -#ifdef PADDLE_MOBILE_X86 -REGISTER_OPERATOR_X86(dequantize, ops::DequantizeOp); -#endif diff --git a/src/operators/dropout_op.cpp b/src/operators/dropout_op.cpp index f1cf92b4cc2315ca232d218f9f63667fc705938f..5a0d7cec07b5b7654b4e67dcd899dd425667be27 100644 --- a/src/operators/dropout_op.cpp +++ b/src/operators/dropout_op.cpp @@ -30,9 +30,6 @@ namespace ops = paddle_mobile::operators; #ifdef PADDLE_MOBILE_CPU REGISTER_OPERATOR_CPU(dropout, ops::DropoutOp); #endif -#ifdef PADDLE_MOBILE_X86 -REGISTER_OPERATOR_X86(dropout, ops::DropoutOp); -#endif #ifdef PADDLE_MOBILE_FPGA REGISTER_OPERATOR_FPGA(dropout, ops::DropoutOp); #endif diff --git a/src/operators/elementwise_add_op.cpp b/src/operators/elementwise_add_op.cpp index 0835f3f74928e44e5233889a87a8059564d490be..93e447d51f0e9ce2fdf75c60332ad52950d68c3d 100644 --- a/src/operators/elementwise_add_op.cpp +++ b/src/operators/elementwise_add_op.cpp @@ -35,8 +35,5 @@ REGISTER_OPERATOR_CPU(elementwise_add, ops::ElementwiseAddOp); #ifdef PADDLE_MOBILE_MALI_GPU REGISTER_OPERATOR_MALI_GPU(elementwise_add, ops::ElementwiseAddOp); #endif -#ifdef PADDLE_MOBILE_X86 -REGISTER_OPERATOR_X86(elementwise_add, ops::ElementwiseAddOp); -#endif #endif diff --git a/src/operators/feed_op.cpp b/src/operators/feed_op.cpp index 373239203620ef51858b51e9a93a79fbbb957886..30c794c5d1b5cd717021cd18fa52faf3caa38c79 100644 --- a/src/operators/feed_op.cpp +++ b/src/operators/feed_op.cpp @@ -26,6 +26,4 @@ REGISTER_OPERATOR_MALI_GPU(feed, ops::FeedOp); #ifdef PADDLE_MOBILE_FPGA REGISTER_OPERATOR_FPGA(feed, ops::FeedOp); #endif -#ifdef PADDLE_MOBILE_X86 -REGISTER_OPERATOR_X86(feed, ops::FeedOp); -#endif + diff --git a/src/operators/fetch_op.cpp b/src/operators/fetch_op.cpp index 0a4872089414f68f6a801536053744c1becf9eb8..8bb37638b493d432a5bf69d173c5d980e831c903 100644 --- a/src/operators/fetch_op.cpp +++ b/src/operators/fetch_op.cpp @@ -27,6 +27,4 @@ REGISTER_OPERATOR_MALI_GPU(fetch, ops::FetchOp); #ifdef PADDLE_MOBILE_FPGA REGISTER_OPERATOR_FPGA(fetch, ops::FetchOp); #endif -#ifdef PADDLE_MOBILE_X86 -REGISTER_OPERATOR_X86(fetch, ops::FetchOp); -#endif + diff --git a/src/operators/flatten_op.cpp b/src/operators/flatten_op.cpp index 7f941509e24fdf60545914f33235047c601848e0..932f780d03868b1bbd7c6ee4a84cc5ee92a3fb59 100644 --- a/src/operators/flatten_op.cpp +++ b/src/operators/flatten_op.cpp @@ -53,9 +53,6 @@ namespace ops = paddle_mobile::operators; #ifdef PADDLE_MOBILE_CPU REGISTER_OPERATOR_CPU(flatten, ops::FlattenOp); #endif -#ifdef PADDLE_MOBILE_X86 -REGISTER_OPERATOR_X86(flatten, ops::FlattenOp); -#endif #ifdef PADDLE_MOBILE_FPGA #endif diff --git a/src/operators/fusion_conv_add_add_prelu_op.cpp b/src/operators/fusion_conv_add_add_prelu_op.cpp index 23049265e85add40ed850affe46a492f6b3044e2..dd2514be45c6bbd41e70da237b92689f5e3f322a 100644 --- a/src/operators/fusion_conv_add_add_prelu_op.cpp +++ b/src/operators/fusion_conv_add_add_prelu_op.cpp @@ -51,13 +51,11 @@ static framework::FusionOpRegistrar fusion_conv_add_add_prelu_registrar( } // namespace paddle_mobile namespace ops = paddle_mobile::operators; -#if defined(PADDLE_MOBILE_CPU) +#ifdef PADDLE_MOBILE_CPU REGISTER_OPERATOR_CPU(fusion_conv_add_add_prelu, ops::FusionConvAddAddPReluOp); -#elif defined(PADDLE_MOBILE_MALI_GPU) -#elif defined(PADDLE_MOBILE_FPGA) +#endif +#ifdef PADDLE_MOBILE_FPGA REGISTER_OPERATOR_FPGA(fusion_conv_add_add_prelu, ops::FusionConvAddAddPReluOp); -#else -REGISTER_OPERATOR_X86(fusion_conv_add_add_prelu, ops::FusionConvAddAddPReluOp); #endif #endif // FUSION_CONVADDADDPRELU_OP diff --git a/src/operators/fusion_conv_add_bn_op.cpp b/src/operators/fusion_conv_add_bn_op.cpp index 99f942b42d34031796da3cc8cc8e6a08c8cc0208..c8acc19ab83f4f6db09447bd7ee386bb743d1212 100644 --- a/src/operators/fusion_conv_add_bn_op.cpp +++ b/src/operators/fusion_conv_add_bn_op.cpp @@ -55,9 +55,6 @@ namespace ops = paddle_mobile::operators; #ifdef PADDLE_MOBILE_CPU REGISTER_OPERATOR_CPU(fusion_conv_add_bn, ops::FusionConvAddBNOp); #endif -#ifdef PADDLE_MOBILE_X86 -REGISTER_OPERATOR_X86(fusion_conv_add_bn, ops::FusionConvAddBNOp); -#endif #ifdef PADDLE_MOBILE_FPGA REGISTER_OPERATOR_FPGA(fusion_conv_add_bn, ops::FusionConvAddBNOp); #endif diff --git a/src/operators/fusion_conv_add_bn_relu_op.cpp b/src/operators/fusion_conv_add_bn_relu_op.cpp index c4cd211d1cd7acfa4bfa6b9806fae6304d08769e..5c00ce95efe19b26816e10f7d38aad5d7654658f 100644 --- a/src/operators/fusion_conv_add_bn_relu_op.cpp +++ b/src/operators/fusion_conv_add_bn_relu_op.cpp @@ -55,9 +55,6 @@ namespace ops = paddle_mobile::operators; #ifdef PADDLE_MOBILE_CPU REGISTER_OPERATOR_CPU(fusion_conv_add_bn_relu, ops::FusionConvAddBNReluOp); #endif -#ifdef PADDLE_MOBILE_X86 -REGISTER_OPERATOR_X86(fusion_conv_add_bn_relu, ops::FusionConvAddBNReluOp); -#endif #ifdef PADDLE_MOBILE_FPGA REGISTER_OPERATOR_FPGA(fusion_conv_add_bn_relu, ops::FusionConvAddBNReluOp); #endif diff --git a/src/operators/fusion_conv_add_op.cpp b/src/operators/fusion_conv_add_op.cpp index 8cb9cdf22c1b94f1b9c6992ecf2f4b7b3c42105b..8c191859ebe669e50e704fb05c3602b2c3a03812 100644 --- a/src/operators/fusion_conv_add_op.cpp +++ b/src/operators/fusion_conv_add_op.cpp @@ -58,8 +58,5 @@ REGISTER_OPERATOR_CPU(fusion_conv_add, ops::FusionConvAddOp); #ifdef PADDLE_MOBILE_MALI_GPU REGISTER_OPERATOR_MALI_GPU(fusion_conv_add, ops::FusionConvAddOp); #endif -#ifdef PADDLE_MOBILE_X86 -REGISTER_OPERATOR_X86(fusion_conv_add, ops::FusionConvAddOp); -#endif #endif diff --git a/src/operators/fusion_conv_add_prelu_op.cpp b/src/operators/fusion_conv_add_prelu_op.cpp index 7d17292c2e8c047aa913c89eea11611d28fe1084..9784353dbc5016058edc5f1b029785f01f5212dd 100644 --- a/src/operators/fusion_conv_add_prelu_op.cpp +++ b/src/operators/fusion_conv_add_prelu_op.cpp @@ -54,9 +54,6 @@ namespace ops = paddle_mobile::operators; #ifdef PADDLE_MOBILE_CPU REGISTER_OPERATOR_CPU(fusion_conv_add_prelu, ops::FusionConvAddPReluOp); #endif -#ifdef PADDLE_MOBILE_X86 -REGISTER_OPERATOR_X86(fusion_conv_add_prelu, ops::FusionConvAddPReluOp); -#endif #ifdef PADDLE_MOBILE_FPGA REGISTER_OPERATOR_FPGA(fusion_conv_add_prelu, ops::FusionConvAddPReluOp); #endif diff --git a/src/operators/fusion_conv_add_relu_op.cpp b/src/operators/fusion_conv_add_relu_op.cpp index 7cee23c6b77a45dd1ae4bdfaa6e2e57ccdf10d89..3ea417e3f74db8d5c242bb4386e32d922ce91e17 100644 --- a/src/operators/fusion_conv_add_relu_op.cpp +++ b/src/operators/fusion_conv_add_relu_op.cpp @@ -54,9 +54,6 @@ namespace ops = paddle_mobile::operators; #ifdef PADDLE_MOBILE_CPU REGISTER_OPERATOR_CPU(fusion_conv_add_relu, ops::FusionConvAddReluOp); #endif -#ifdef PADDLE_MOBILE_X86 -REGISTER_OPERATOR_X86(fusion_conv_add_relu, ops::FusionConvAddReluOp); -#endif #ifdef PADDLE_MOBILE_FPGA REGISTER_OPERATOR_FPGA(fusion_conv_add_relu, ops::FusionConvAddReluOp); #endif diff --git a/src/operators/fusion_conv_bn_add_relu_op.cpp b/src/operators/fusion_conv_bn_add_relu_op.cpp index 693eac81fb3617cddbdab9574135a13b30aa0a32..1b78cd1563f533160d252c7cb62fedc0003c069e 100644 --- a/src/operators/fusion_conv_bn_add_relu_op.cpp +++ b/src/operators/fusion_conv_bn_add_relu_op.cpp @@ -55,9 +55,6 @@ namespace ops = paddle_mobile::operators; #ifdef PADDLE_MOBILE_CPU REGISTER_OPERATOR_CPU(fusion_conv_bn_add_relu, ops::FusionConvBNAddReluOp); #endif -#ifdef PADDLE_MOBILE_X86 -REGISTER_OPERATOR_X86(fusion_conv_bn_add_relu, ops::FusionConvBNAddReluOp); -#endif #ifdef PADDLE_MOBILE_FPGA REGISTER_OPERATOR_FPGA(fusion_conv_bn_add_relu, ops::FusionConvBNAddReluOp); #endif diff --git a/src/operators/fusion_conv_bn_op.cpp b/src/operators/fusion_conv_bn_op.cpp index 7e736092721cc08252c78d3848fe9962d8933a24..85bbccb2f24517a4e9dccafee37de0a0e120727a 100644 --- a/src/operators/fusion_conv_bn_op.cpp +++ b/src/operators/fusion_conv_bn_op.cpp @@ -54,9 +54,6 @@ namespace ops = paddle_mobile::operators; #ifdef PADDLE_MOBILE_CPU REGISTER_OPERATOR_CPU(fusion_conv_bn, ops::FusionConvBNOp); #endif -#ifdef PADDLE_MOBILE_X86 -REGISTER_OPERATOR_X86(fusion_conv_bn, ops::FusionConvBNOp); -#endif #ifdef PADDLE_MOBILE_FPGA REGISTER_OPERATOR_FPGA(fusion_conv_bn, ops::FusionConvBNOp); #endif diff --git a/src/operators/fusion_conv_bn_relu_op.cpp b/src/operators/fusion_conv_bn_relu_op.cpp index c5c403c1942e03dd9ef1cb04477a671374577859..63e194a4c9d86aa7d747f7e0055deb53050b663d 100644 --- a/src/operators/fusion_conv_bn_relu_op.cpp +++ b/src/operators/fusion_conv_bn_relu_op.cpp @@ -55,9 +55,6 @@ namespace ops = paddle_mobile::operators; #ifdef PADDLE_MOBILE_CPU REGISTER_OPERATOR_CPU(fusion_conv_bn_relu, ops::FusionConvBNReluOp); #endif -#ifdef PADDLE_MOBILE_X86 -REGISTER_OPERATOR_X86(fusion_conv_bn_relu, ops::FusionConvBNReluOp); -#endif #ifdef PADDLE_MOBILE_FPGA REGISTER_OPERATOR_FPGA(fusion_conv_bn_relu, ops::FusionConvBNReluOp); #endif diff --git a/src/operators/fusion_dwconv_bn_relu_op.cpp b/src/operators/fusion_dwconv_bn_relu_op.cpp index 4a53e183df19313569dcdfdaea9b9650a58b9633..ff8d829e26f1f7879b5e4831bd832304a13e799b 100644 --- a/src/operators/fusion_dwconv_bn_relu_op.cpp +++ b/src/operators/fusion_dwconv_bn_relu_op.cpp @@ -55,9 +55,6 @@ namespace ops = paddle_mobile::operators; #ifdef PADDLE_MOBILE_CPU REGISTER_OPERATOR_CPU(fusion_dwconv_bn_relu, ops::FusionDWConvBNReluOp); #endif -#ifdef PADDLE_MOBILE_X86 -REGISTER_OPERATOR_X86(fusion_dwconv_bn_relu, ops::FusionDWConvBNReluOp); -#endif #ifdef PADDLE_MOBILE_FPGA #endif diff --git a/src/operators/fusion_fc_op.cpp b/src/operators/fusion_fc_op.cpp index 7ec8150b600e0ee21a7c40fcf266a1a8c79db164..5d6a60ca1b73c83676022f292b9b69714b41b9c0 100644 --- a/src/operators/fusion_fc_op.cpp +++ b/src/operators/fusion_fc_op.cpp @@ -58,14 +58,14 @@ void FusionFcOp::InferShape() const { namespace ops = paddle_mobile::operators; -#if defined(PADDLE_MOBILE_CPU) +#ifdef PADDLE_MOBILE_CPU REGISTER_OPERATOR_CPU(fusion_fc, ops::FusionFcOp); -#elif defined(PADDLE_MOBILE_MALI_GPU) +#endif +#ifdef PADDLE_MOBILE_MALI_GPU REGISTER_OPERATOR_MALI_GPU(fusion_fc, ops::FusionFcOp); -#elif defined(PADDLE_MOBILE_FPGA) +#endif +#ifdef PADDLE_MOBILE_FPGA REGISTER_OPERATOR_FPGA(fusion_fc, ops::FusionFcOp); -#else -REGISTER_OPERATOR_X86(fusion_fc, ops::FusionFcOp); #endif #endif // FUSION_FC_OP diff --git a/src/operators/fusion_fc_relu_op.cpp b/src/operators/fusion_fc_relu_op.cpp index 520372c6fb8e2c621aa6857ccaed2a9094f00dca..870af45656a57e37607beb76205538af16848506 100644 --- a/src/operators/fusion_fc_relu_op.cpp +++ b/src/operators/fusion_fc_relu_op.cpp @@ -66,8 +66,5 @@ REGISTER_OPERATOR_MALI_GPU(fusion_fc_relu, ops::FusionFcReluOp); #ifdef PADDLE_MOBILE_FPGA REGISTER_OPERATOR_FPGA(fusion_fc_relu, ops::FusionFcReluOp); #endif -#ifdef PADDLE_MOBILE_X86 -REGISTER_OPERATOR_X86(fusion_fc_relu, ops::FusionFcReluOp); -#endif #endif diff --git a/src/operators/gru_op.cpp b/src/operators/gru_op.cpp index ac64b5f541c436a160fd1f6713931237e7c0239b..cdeb1334cd13df484cbb8517ae3eb87a06d43847 100644 --- a/src/operators/gru_op.cpp +++ b/src/operators/gru_op.cpp @@ -64,9 +64,6 @@ namespace ops = paddle_mobile::operators; #ifdef PADDLE_MOBILE_CPU REGISTER_OPERATOR_CPU(gru, ops::GruOp); #endif -#ifdef PADDLE_MOBILE_X86 -REGISTER_OPERATOR_X86(gru, ops::GruOp); -#endif #ifdef PADDLE_MOBILE_FPGA #endif diff --git a/src/operators/kernel/x86/batchnorm_kernel.cpp b/src/operators/kernel/x86/batchnorm_kernel.cpp deleted file mode 100644 index f896e51cf846104b60b3b919671265cf3741ce6e..0000000000000000000000000000000000000000 --- a/src/operators/kernel/x86/batchnorm_kernel.cpp +++ /dev/null @@ -1,36 +0,0 @@ -/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. */ - -#ifdef BATCHNORM_OP - -#include "operators/kernel/batchnorm_kernel.h" - -namespace paddle_mobile { -namespace operators { - -template <> -bool BatchNormKernel::Init(BatchNormParam *param) { - return true; -} - -template <> -void BatchNormKernel::Compute( - const BatchNormParam ¶m) const { - // TODO -} - -} // namespace operators -} // namespace paddle_mobile - -#endif diff --git a/src/operators/kernel/x86/bilinear_interp_kernel.cpp b/src/operators/kernel/x86/bilinear_interp_kernel.cpp deleted file mode 100644 index 63dd5781aca4659755bd7844977c2af714526178..0000000000000000000000000000000000000000 --- a/src/operators/kernel/x86/bilinear_interp_kernel.cpp +++ /dev/null @@ -1,36 +0,0 @@ -/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. */ - -#ifdef BILINEAR_INTERP_OP - -#include "operators/kernel/bilinear_interp_kernel.h" - -namespace paddle_mobile { -namespace operators { - -template <> -bool BilinearInterpKernel::Init(BilinearInterpParam *param) { - return true; -} - -template <> -void BilinearInterpKernel::Compute( - const BilinearInterpParam ¶m) const { - // TODO -} - -} // namespace operators -} // namespace paddle_mobile - -#endif diff --git a/src/operators/kernel/x86/box_coder_kernel.cpp b/src/operators/kernel/x86/box_coder_kernel.cpp deleted file mode 100644 index a63a23646d39949c4248714cc6d4f4954ca82fc2..0000000000000000000000000000000000000000 --- a/src/operators/kernel/x86/box_coder_kernel.cpp +++ /dev/null @@ -1,36 +0,0 @@ -/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. */ - -#ifdef BOXCODER_OP - -#include "operators/kernel/box_coder_kernel.h" - -namespace paddle_mobile { -namespace operators { - -template <> -bool BoxCoderKernel::Init(BoxCoderParam *param) { - return true; -} - -template <> -void BoxCoderKernel::Compute( - const BoxCoderParam ¶m) const { - // TODO -} - -} // namespace operators -} // namespace paddle_mobile - -#endif diff --git a/src/operators/kernel/x86/concat_kernel.cpp b/src/operators/kernel/x86/concat_kernel.cpp deleted file mode 100644 index 88e0cc99f46cccb578582f686bd5f854241ac73f..0000000000000000000000000000000000000000 --- a/src/operators/kernel/x86/concat_kernel.cpp +++ /dev/null @@ -1,35 +0,0 @@ -/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. */ - -#ifdef CONCAT_OP - -#include "operators/kernel/concat_kernel.h" - -namespace paddle_mobile { -namespace operators { - -template <> -bool ConcatKernel::Init(ConcatParam *param) { - return true; -} - -template <> -void ConcatKernel::Compute(const ConcatParam ¶m) const { - // TODO -} - -} // namespace operators -} // namespace paddle_mobile - -#endif diff --git a/src/operators/kernel/x86/conv_add_add_prelu_kernel.cpp b/src/operators/kernel/x86/conv_add_add_prelu_kernel.cpp deleted file mode 100644 index ad327b09016d2e494b847f4efe849f13d1bffc86..0000000000000000000000000000000000000000 --- a/src/operators/kernel/x86/conv_add_add_prelu_kernel.cpp +++ /dev/null @@ -1,38 +0,0 @@ -/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. */ - -#ifdef FUSION_CONVADDADDPRELU_OP - -#include "operators/kernel/conv_add_add_prelu_kernel.h" - -namespace paddle_mobile { -namespace operators { - -template <> -bool ConvAddAddPReluKernel::Init( - FusionConvAddAddPReluParam *param) { - return true; -} - -template <> -void ConvAddAddPReluKernel::Compute( - const FusionConvAddAddPReluParam ¶m) const { - // TODO -} -template class ConvAddAddPReluKernel; - -} // namespace operators -} // namespace paddle_mobile - -#endif diff --git a/src/operators/kernel/x86/conv_add_bn_relu_kernel.cpp b/src/operators/kernel/x86/conv_add_bn_relu_kernel.cpp deleted file mode 100644 index 3139b8a30fb9fc55eb1ca2ff43ce35c49e8e258d..0000000000000000000000000000000000000000 --- a/src/operators/kernel/x86/conv_add_bn_relu_kernel.cpp +++ /dev/null @@ -1,65 +0,0 @@ -/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. */ - -#ifdef FUSION_CONVADDBNRELU_OP - -#include "operators/kernel/conv_add_bn_relu_kernel.h" - -namespace paddle_mobile { -namespace operators { - -template <> -bool ConvAddBNReluKernel::Init( - FusionConvAddBNReluParam *param) { - const Tensor *mean = param->InputMean(); - const Tensor *variance = param->InputVariance(); - const Tensor *scale = param->InputScale(); - const Tensor *bias = param->InputBias(); - const float epsilon = param->Epsilon(); - - auto mean_ptr = mean->data(); - auto variance_ptr = variance->data(); - auto scale_ptr = scale->data(); - auto bias_ptr = bias->data(); - - const int C = mean->numel(); - float inv_std_ptr[C]; - for (int i = 0; i < C; i++) { - inv_std_ptr[i] = - 1 / static_cast(pow((variance_ptr[i] + epsilon), 0.5)); - } - Tensor *new_scale = new Tensor(); - Tensor *new_bias = new Tensor(); - auto new_scale_ptr = new_scale->mutable_data({C}); - auto new_bias_ptr = new_bias->mutable_data({C}); - for (int i = 0; i < C; i++) { - new_scale_ptr[i] = inv_std_ptr[i] * scale_ptr[i]; - new_bias_ptr[i] = bias_ptr[i] - mean_ptr[i] * inv_std_ptr[i] * scale_ptr[i]; - } - param->SetNewScale(new_scale); - param->SetNewBias(new_bias); - return true; -} - -template <> -void ConvAddBNReluKernel::Compute( - const FusionConvAddBNReluParam ¶m) const { - // TODO -} -template class ConvAddBNReluKernel; - -} // namespace operators -} // namespace paddle_mobile - -#endif diff --git a/src/operators/kernel/x86/conv_add_kernel.cpp b/src/operators/kernel/x86/conv_add_kernel.cpp deleted file mode 100644 index f051daf7961767c32f601b89d532c77ba61ca9bb..0000000000000000000000000000000000000000 --- a/src/operators/kernel/x86/conv_add_kernel.cpp +++ /dev/null @@ -1,38 +0,0 @@ -/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. */ - -#ifdef FUSION_CONVADD_OP - -#include "operators/kernel/conv_add_kernel.h" - -namespace paddle_mobile { -namespace operators { - -template <> -bool ConvAddKernel::Init(FusionConvAddParam *param) { - return true; -} - -template <> -void ConvAddKernel::Compute( - const FusionConvAddParam ¶m) const { - // TODO -} - -template class ConvAddKernel; - -} // namespace operators -} // namespace paddle_mobile - -#endif diff --git a/src/operators/kernel/x86/conv_add_prelu_kernel.cpp b/src/operators/kernel/x86/conv_add_prelu_kernel.cpp deleted file mode 100644 index 126b5e2079ea1f376a786c1b409afc0d43765f15..0000000000000000000000000000000000000000 --- a/src/operators/kernel/x86/conv_add_prelu_kernel.cpp +++ /dev/null @@ -1,38 +0,0 @@ -/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. */ - -#ifdef FUSION_CONVADDPRELU_OP - -#include "operators/kernel/conv_add_prelu_kernel.h" - -namespace paddle_mobile { -namespace operators { - -template <> -bool ConvAddPReluKernel::Init(FusionConvAddPReluParam *param) { - return true; -} - -template <> -void ConvAddPReluKernel::Compute( - const FusionConvAddPReluParam ¶m) const { - // TODO -} - -template class ConvAddPReluKernel; - -} // namespace operators -} // namespace paddle_mobile - -#endif diff --git a/src/operators/kernel/x86/conv_add_relu_kernel.cpp b/src/operators/kernel/x86/conv_add_relu_kernel.cpp deleted file mode 100644 index f01f3a0feff2f2eff141363d3901f5e5749fda1b..0000000000000000000000000000000000000000 --- a/src/operators/kernel/x86/conv_add_relu_kernel.cpp +++ /dev/null @@ -1,39 +0,0 @@ -/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. */ - -#ifdef FUSION_CONVADDRELU_OP - -#include "operators/kernel/conv_add_relu_kernel.h" - - -namespace paddle_mobile { -namespace operators { - -template <> -bool ConvAddReluKernel::Init(FusionConvAddReluParam *param) { - return true; -} - -template <> -void ConvAddReluKernel::Compute( - const FusionConvAddReluParam ¶m) const { - // TODO -} - -template class ConvAddReluKernel; - -} // namespace operators -} // namespace paddle_mobile - -#endif diff --git a/src/operators/kernel/x86/conv_bn_add_relu_kernel.cpp b/src/operators/kernel/x86/conv_bn_add_relu_kernel.cpp deleted file mode 100644 index 1fed6538b4ce511997b6e37a780ba3d32f6b818b..0000000000000000000000000000000000000000 --- a/src/operators/kernel/x86/conv_bn_add_relu_kernel.cpp +++ /dev/null @@ -1,65 +0,0 @@ -/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. */ - -#ifdef FUSION_CONVBNADDRELU_OP - -#include "operators/kernel/conv_bn_add_relu_kernel.h" - -namespace paddle_mobile { -namespace operators { - -template <> -bool ConvBNAddReluKernel::Init( - FusionConvBNAddReluParam *param) { - const Tensor *mean = param->InputMean(); - const Tensor *variance = param->InputVariance(); - const Tensor *scale = param->InputScale(); - const Tensor *bias = param->InputBias(); - const float epsilon = param->Epsilon(); - - auto mean_ptr = mean->data(); - auto variance_ptr = variance->data(); - auto scale_ptr = scale->data(); - auto bias_ptr = bias->data(); - - const int C = mean->numel(); - float inv_std_ptr[C]; - for (int i = 0; i < C; i++) { - inv_std_ptr[i] = - 1 / static_cast(pow((variance_ptr[i] + epsilon), 0.5)); - } - Tensor *new_scale = new Tensor(); - Tensor *new_bias = new Tensor(); - auto new_scale_ptr = new_scale->mutable_data({C}); - auto new_bias_ptr = new_bias->mutable_data({C}); - for (int i = 0; i < C; i++) { - new_scale_ptr[i] = inv_std_ptr[i] * scale_ptr[i]; - new_bias_ptr[i] = bias_ptr[i] - mean_ptr[i] * inv_std_ptr[i] * scale_ptr[i]; - } - param->SetNewScale(new_scale); - param->SetNewBias(new_bias); - return true; -} - -template <> -void ConvBNAddReluKernel::Compute( - const FusionConvBNAddReluParam ¶m) const { - // TODO -} -template class ConvBNAddReluKernel; - -} // namespace operators -} // namespace paddle_mobile - -#endif diff --git a/src/operators/kernel/x86/conv_bn_relu_kernel.cpp b/src/operators/kernel/x86/conv_bn_relu_kernel.cpp deleted file mode 100644 index 8eeb2109c58b96e078e483947ff15a6dcfc61298..0000000000000000000000000000000000000000 --- a/src/operators/kernel/x86/conv_bn_relu_kernel.cpp +++ /dev/null @@ -1,68 +0,0 @@ -/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. */ - -#ifdef FUSION_CONVBNRELU_OP - -#include "operators/kernel/conv_bn_relu_kernel.h" - -namespace paddle_mobile { -namespace operators { - -template <> -bool ConvBNReluKernel::Init(FusionConvBNReluParam *param) { - const Tensor *mean = param->InputMean(); - const Tensor *variance = param->InputVariance(); - const Tensor *scale = param->InputScale(); - const Tensor *bias = param->InputBias(); - const float epsilon = param->Epsilon(); - - // DLOG << "variance: " << *variance; - - auto mean_ptr = mean->data(); - auto variance_ptr = variance->data(); - auto scale_ptr = scale->data(); - auto bias_ptr = bias->data(); - - const int C = mean->numel(); - float inv_std_ptr[C]; - for (int i = 0; i < C; i++) { - inv_std_ptr[i] = - 1 / static_cast(pow((variance_ptr[i] + epsilon), 0.5)); - } - Tensor *new_scale = new Tensor(); - Tensor *new_bias = new Tensor(); - auto new_scale_ptr = new_scale->mutable_data({C}); - auto new_bias_ptr = new_bias->mutable_data({C}); - for (int i = 0; i < C; i++) { - new_scale_ptr[i] = inv_std_ptr[i] * scale_ptr[i]; - new_bias_ptr[i] = bias_ptr[i] - mean_ptr[i] * inv_std_ptr[i] * scale_ptr[i]; - } - - param->SetNewScale(new_scale); - param->SetNewBias(new_bias); - return true; -} - -template <> -void ConvBNReluKernel::Compute( - const FusionConvBNReluParam ¶m) const { - // TODO -} - -template class ConvBNReluKernel; - -} // namespace operators -} // namespace paddle_mobile - -#endif diff --git a/src/operators/kernel/x86/conv_kernel.cpp b/src/operators/kernel/x86/conv_kernel.cpp deleted file mode 100644 index 75674ef979398124c3572f9e51ef9fb269c3d74b..0000000000000000000000000000000000000000 --- a/src/operators/kernel/x86/conv_kernel.cpp +++ /dev/null @@ -1,37 +0,0 @@ -/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. */ - -#ifdef CONV_OP - -#include "operators/kernel/conv_kernel.h" - -namespace paddle_mobile { -namespace operators { - -template <> -bool ConvKernel::Init(ConvParam *param) { - return true; -} - -template <> -void ConvKernel::Compute(const ConvParam ¶m) const { - // TODO -} - -template class ConvKernel; - -} // namespace operators -} // namespace paddle_mobile - -#endif diff --git a/src/operators/kernel/x86/conv_transpose_kernel.cpp b/src/operators/kernel/x86/conv_transpose_kernel.cpp deleted file mode 100644 index ce0f84736c377f459e9898692b41d47e808cb5d1..0000000000000000000000000000000000000000 --- a/src/operators/kernel/x86/conv_transpose_kernel.cpp +++ /dev/null @@ -1,38 +0,0 @@ -/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. */ - -#ifdef CONV_TRANSPOSE - -#include "operators/kernel/conv_transpose_kernel.h" - -namespace paddle_mobile { -namespace operators { - -template <> -bool ConvTransposeKernel::Init(ConvTransposeParam *param) { - return true; -} - -template <> -void ConvTransposeKernel::Compute( - const ConvTransposeParam ¶m) const { - // TODO -} - -template class ConvTransposeKernel; - -} // namespace operators -} // namespace paddle_mobile - -#endif diff --git a/src/operators/kernel/x86/crf_kernel.cpp b/src/operators/kernel/x86/crf_kernel.cpp deleted file mode 100644 index 9ba0f13cf964e6b6023b261d89519ccd0f662612..0000000000000000000000000000000000000000 --- a/src/operators/kernel/x86/crf_kernel.cpp +++ /dev/null @@ -1,38 +0,0 @@ -/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. */ - -#ifdef CRF_OP - -#include "operators/kernel/crf_kernel.h" -#include "common/types.h" - -namespace paddle_mobile { -namespace operators { - -template <> -bool CrfKernel::Init(CrfParam *param) { - return true; -} - -template <> -void CrfKernel::Compute(const CrfParam ¶m) const { - // TODO -} - -template class CrfKernel; - -} // namespace operators -} // namespace paddle_mobile - -#endif diff --git a/src/operators/kernel/x86/depthwise_conv_kernel.cpp b/src/operators/kernel/x86/depthwise_conv_kernel.cpp deleted file mode 100644 index 42ccceee8ec4d7ad2ec9e464894c7ef33852d660..0000000000000000000000000000000000000000 --- a/src/operators/kernel/x86/depthwise_conv_kernel.cpp +++ /dev/null @@ -1,38 +0,0 @@ -/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. */ - -#ifdef DEPTHWISECONV_OP - -#include "operators/kernel/depthwise_conv_kernel.h" - -namespace paddle_mobile { -namespace operators { - -template <> -bool DepthwiseConvKernel::Init(ConvParam *param) { - return true; -} - -template <> -void DepthwiseConvKernel::Compute( - const ConvParam ¶m) const { - // TODO -} - -template class DepthwiseConvKernel; - -} // namespace operators -} // namespace paddle_mobile - -#endif diff --git a/src/operators/kernel/x86/dequantize_kernel.cpp b/src/operators/kernel/x86/dequantize_kernel.cpp deleted file mode 100644 index f706777bab8274201c7774ae419236fb4d9ea618..0000000000000000000000000000000000000000 --- a/src/operators/kernel/x86/dequantize_kernel.cpp +++ /dev/null @@ -1,41 +0,0 @@ -/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. */ - -#include "operators/kernel/dequantize_kernel.h" - -namespace paddle_mobile { -namespace operators { - -template<> -bool DequantizeKernel::Init(DequantizeParam *param) { - return true; -} - -template<> -void DequantizeKernel::Compute( - const DequantizeParam ¶m) const { - // TODO - const Tensor *input = param.input_; - Tensor *output = param.out_; - float activation_scale = param.activation_scale_->data()[0]; - float weight_scale = param.weight_scale_; - const int32_t *x = input->data(); - float *y = output->mutable_data(); - for (size_t i = 0; i < output->numel(); ++i) { - y[i] = x[i] / activation_scale / weight_scale; - } -} - -} // namespace paddle_mobile -} // namespace operators diff --git a/src/operators/kernel/x86/dropout_kernel.cpp b/src/operators/kernel/x86/dropout_kernel.cpp deleted file mode 100644 index 487ecdc3c8f66633045abb6e1ca7fbfb71a9f13d..0000000000000000000000000000000000000000 --- a/src/operators/kernel/x86/dropout_kernel.cpp +++ /dev/null @@ -1,36 +0,0 @@ -/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. */ - -#ifdef DROPOUT_OP - -#include "operators/kernel/dropout_kernel.h" -#include - -namespace paddle_mobile { -namespace operators { - -template <> -bool DropoutKernel::Init(DropoutParam *para) { - return true; -} - -template <> -void DropoutKernel::Compute(const DropoutParam ¶m) const { - // TODO -} - -} // namespace operators -} // namespace paddle_mobile - -#endif diff --git a/src/operators/kernel/x86/dwconv_bn_relu_kernel.cpp b/src/operators/kernel/x86/dwconv_bn_relu_kernel.cpp deleted file mode 100644 index 87839eca76429aa943e03d760fc831ec043163cf..0000000000000000000000000000000000000000 --- a/src/operators/kernel/x86/dwconv_bn_relu_kernel.cpp +++ /dev/null @@ -1,65 +0,0 @@ -/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. */ - -#ifdef FUSION_DWCONVBNRELU_OP - -#include "operators/kernel/dwconv_bn_relu_kernel.h" - -namespace paddle_mobile { -namespace operators { - -template <> -bool DWConvBNReluKernel::Init(FusionDWConvBNReluParam *param) { - const Tensor *mean = param->InputMean(); - const Tensor *variance = param->InputVariance(); - const Tensor *scale = param->InputScale(); - const Tensor *bias = param->InputBias(); - const float epsilon = param->Epsilon(); - - auto mean_ptr = mean->data(); - auto variance_ptr = variance->data(); - auto scale_ptr = scale->data(); - auto bias_ptr = bias->data(); - - const int C = mean->numel(); - float inv_std_ptr[C]; - for (int i = 0; i < C; i++) { - inv_std_ptr[i] = - 1 / static_cast(pow((variance_ptr[i] + epsilon), 0.5)); - } - Tensor *new_scale = new Tensor(); - Tensor *new_bias = new Tensor(); - auto new_scale_ptr = new_scale->mutable_data({C}); - auto new_bias_ptr = new_bias->mutable_data({C}); - for (int i = 0; i < C; i++) { - new_scale_ptr[i] = inv_std_ptr[i] * scale_ptr[i]; - new_bias_ptr[i] = bias_ptr[i] - mean_ptr[i] * inv_std_ptr[i] * scale_ptr[i]; - } - param->SetNewScale(new_scale); - param->SetNewBias(new_bias); - return true; -} - -template <> -void DWConvBNReluKernel::Compute( - const FusionDWConvBNReluParam ¶m) const { - // TODO -} - -template class DWConvBNReluKernel; - -} // namespace operators -} // namespace paddle_mobile - -#endif diff --git a/src/operators/kernel/x86/elementwise_add_kernel.cpp b/src/operators/kernel/x86/elementwise_add_kernel.cpp deleted file mode 100644 index 142fddbb46b43ead9559c789cc63b30206b9582f..0000000000000000000000000000000000000000 --- a/src/operators/kernel/x86/elementwise_add_kernel.cpp +++ /dev/null @@ -1,36 +0,0 @@ -/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. */ - -#ifdef ELEMENTWISEADD_OP - -#include "operators/kernel/elementwise_add_kernel.h" - -namespace paddle_mobile { -namespace operators { - -template <> -bool ElementwiseAddKernel::Init(ElementwiseAddParam *param) { - return true; -} - -template <> -void ElementwiseAddKernel::Compute( - const ElementwiseAddParam ¶m) const { - // TODO -} - -} // namespace operators -} // namespace paddle_mobile - -#endif diff --git a/src/operators/kernel/x86/flatten_kernel.cpp b/src/operators/kernel/x86/flatten_kernel.cpp deleted file mode 100644 index 3d488e580951f3f74235e2903afd4b833d97d70a..0000000000000000000000000000000000000000 --- a/src/operators/kernel/x86/flatten_kernel.cpp +++ /dev/null @@ -1,35 +0,0 @@ -/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. */ - -#ifdef FLATTEN_OP - -#include "operators/kernel/flatten_kernel.h" - -namespace paddle_mobile { -namespace operators { - -template <> -bool FlattenKernel::Init(FlattenParam *param) { - return true; -} - -template <> -void FlattenKernel::Compute(const FlattenParam ¶m) const { - // TODO -} - -} // namespace operators -} // namespace paddle_mobile - -#endif diff --git a/src/operators/kernel/x86/fusion_fc_kernel.cpp b/src/operators/kernel/x86/fusion_fc_kernel.cpp deleted file mode 100644 index 29f6e3896f355632567799b27f772564bb2c6bad..0000000000000000000000000000000000000000 --- a/src/operators/kernel/x86/fusion_fc_kernel.cpp +++ /dev/null @@ -1,36 +0,0 @@ -/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. */ - -#ifdef FUSION_FC_OP - -#include "operators/kernel/fusion_fc_kernel.h" - -namespace paddle_mobile { -namespace operators { - -template<> -bool FusionFcKernel::Init(FusionFcParam *param) { - return true; -} - -template<> -void FusionFcKernel::Compute( - const FusionFcParam ¶m) const { - // TODO -} - -} // namespace operators -} // namespace paddle_mobile - -#endif diff --git a/src/operators/kernel/x86/gru_kernel.cpp b/src/operators/kernel/x86/gru_kernel.cpp deleted file mode 100644 index 9f7e1d0aecbae1846cc7196a780219d54fe9a05d..0000000000000000000000000000000000000000 --- a/src/operators/kernel/x86/gru_kernel.cpp +++ /dev/null @@ -1,37 +0,0 @@ -/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. */ - -#ifdef GRU_OP - -#include "operators/kernel/gru_kernel.h" - -namespace paddle_mobile { -namespace operators { - -template <> -bool GruKernel::Init(GruParam *param) { - return true; -} - -template <> -void GruKernel::Compute(const GruParam ¶m) const { - // TODO -} - -template class GruKernel; - -} // namespace operators -} // namespace paddle_mobile - -#endif diff --git a/src/operators/kernel/x86/lrn_kernel.cpp b/src/operators/kernel/x86/lrn_kernel.cpp deleted file mode 100644 index e48ea4644210abdd27fbf473c217d2d6b8a44ec0..0000000000000000000000000000000000000000 --- a/src/operators/kernel/x86/lrn_kernel.cpp +++ /dev/null @@ -1,35 +0,0 @@ -/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. */ - -#ifdef LRN_OP - -#include "operators/kernel/lrn_kernel.h" - -namespace paddle_mobile { -namespace operators { - -template <> -bool LrnKernel::Init(LrnParam *param) { - return true; -} - -template <> -void LrnKernel::Compute(const LrnParam ¶m) const { - // TODO -} - -} // namespace operators -} // namespace paddle_mobile - -#endif diff --git a/src/operators/kernel/x86/mul_kernel.cpp b/src/operators/kernel/x86/mul_kernel.cpp deleted file mode 100644 index 6abaddb419e0fc36dcf953338d43ad3d6649f069..0000000000000000000000000000000000000000 --- a/src/operators/kernel/x86/mul_kernel.cpp +++ /dev/null @@ -1,35 +0,0 @@ -/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. */ - -#ifdef MUL_OP - -#include "operators/kernel/mul_kernel.h" - -namespace paddle_mobile { -namespace operators { - -template <> -bool MulKernel::Init(MulParam *param) { - return true; -} - -template <> -void MulKernel::Compute(const MulParam ¶m) const { - // TODO -} - -} // namespace operators -} // namespace paddle_mobile - -#endif diff --git a/src/operators/kernel/x86/multiclass_nms_kernel.cpp b/src/operators/kernel/x86/multiclass_nms_kernel.cpp deleted file mode 100644 index 6f3d5139a7c62a2e0cbff0cf7b275bb2c770ee38..0000000000000000000000000000000000000000 --- a/src/operators/kernel/x86/multiclass_nms_kernel.cpp +++ /dev/null @@ -1,36 +0,0 @@ -/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. */ - -#ifdef MULTICLASSNMS_OP - -#include "operators/kernel/multiclass_nms_kernel.h" - -namespace paddle_mobile { -namespace operators { - -template <> -bool MultiClassNMSKernel::Init(MultiClassNMSParam *param) { - return true; -} - -template <> -void MultiClassNMSKernel::Compute( - const MultiClassNMSParam ¶m) const { - // TODO -} - -} // namespace operators -} // namespace paddle_mobile - -#endif diff --git a/src/operators/kernel/x86/pool_kernel.cpp b/src/operators/kernel/x86/pool_kernel.cpp deleted file mode 100644 index 07c444d1596b887daec6436774f05f4cfaaff70e..0000000000000000000000000000000000000000 --- a/src/operators/kernel/x86/pool_kernel.cpp +++ /dev/null @@ -1,34 +0,0 @@ -/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. */ - -#ifdef POOL_OP - -#include "operators/kernel/pool_kernel.h" - -namespace paddle_mobile { -namespace operators { - -template <> -bool PoolKernel::Init(PoolParam *param) { - return true; -} - -template <> -void PoolKernel::Compute(const PoolParam ¶m) const { - // TODO -} -} // namespace operators -} // namespace paddle_mobile - -#endif diff --git a/src/operators/kernel/x86/prelu_kernel.cpp b/src/operators/kernel/x86/prelu_kernel.cpp deleted file mode 100644 index d885a35335e9678fc5dbae50d2fb4f52056e7a37..0000000000000000000000000000000000000000 --- a/src/operators/kernel/x86/prelu_kernel.cpp +++ /dev/null @@ -1,30 +0,0 @@ -/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. */ - -#ifdef PRELU_OP - -#include "operators/kernel/prelu_kernel.h" - -namespace paddle_mobile { -namespace operators { - -template <> -void PReluKernel::Compute(const PReluParam ¶m) const { - // TODO -} - -} // namespace operators -} // namespace paddle_mobile - -#endif diff --git a/src/operators/kernel/x86/prior_box_kernel.cpp b/src/operators/kernel/x86/prior_box_kernel.cpp deleted file mode 100644 index f4ca30821af3d44a61b323fd92de2825f9b3644a..0000000000000000000000000000000000000000 --- a/src/operators/kernel/x86/prior_box_kernel.cpp +++ /dev/null @@ -1,36 +0,0 @@ -/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. */ - -#ifdef PRIORBOX_OP - -#include "operators/kernel/prior_box_kernel.h" - -namespace paddle_mobile { -namespace operators { - -template <> -bool PriorBoxKernel::Init(PriorBoxParam *param) { - return true; -} - -template <> -void PriorBoxKernel::Compute( - const PriorBoxParam ¶m) const { - // TODO -} - -} // namespace operators -} // namespace paddle_mobile - -#endif diff --git a/src/operators/kernel/x86/quantize_kernel.cpp b/src/operators/kernel/x86/quantize_kernel.cpp deleted file mode 100644 index 0e6ff424a71326d1fe08311d97e275bf646f9d72..0000000000000000000000000000000000000000 --- a/src/operators/kernel/x86/quantize_kernel.cpp +++ /dev/null @@ -1,118 +0,0 @@ -/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. */ - -#ifdef PADDLE_MOBILE_X86 - -#include "operators/kernel/quantize_kernel.h" -#include -#include - -namespace paddle_mobile { -namespace operators { - -static float find_abs_max(const Tensor *input) { - float max_abs = float(0); - const float *x = input->data(); - for (size_t i = 0; i < input->numel(); ++i) { - float value = std::abs(x[i]); - if (value > max_abs) { - max_abs = value; - } - } - return max_abs; -} - -static void quantize_round_to_even(const Tensor *input, - const float scale, - Tensor *output) { - const float *x = input->data(); - int8_t *y = output->data(); - for (size_t i = 0; i < input->numel(); ++i) { - float value = x[i] * scale; - long long quant = llround(value); - if (abs(abs(round(value) - value) - 0.5) > 0) { - y[i] = quant; - } else { - if (abs(quant) % 2 == 0) { - y[i] = quant; - } else { - y[i] = quant + (quant > 0) ? -1 : 1; - } - } - } -} - -static void quantize_round_to_zero(const Tensor *input, - const float scale, - Tensor *output) { - const float *x = input->data(); - int8_t *y = output->data(); - for (size_t i = 0; i < input->numel(); ++i) { - y[i] = trunc(x[i] * scale); - } -} - -static void quantize_round_to_nearest(const Tensor *input, - const float scale, - Tensor *output) { - const float *x = input->data(); - int8_t *y = output->data(); - for (size_t i = 0; i < input->numel(); ++i) { - y[i] = round(x[i] * scale); - } -} - -template<> -bool QuantizeKernel::Init(QuantizeParam *param) { - return true; -} - -template<> -void QuantizeKernel::Compute( - const QuantizeParam ¶m) const { - // TODO - float max_abs = 0.f; - const Tensor *input = param.input_; - Tensor *output = param.out_; - Tensor *output_scale = param.online_scale_; - if (param.is_static_) { - max_abs = param.static_scale_; - } else { - max_abs = find_abs_max(input); - } - if (max_abs < std::numeric_limits::min()) { - max_abs = std::numeric_limits::min(); - } - // only support int8 currently - float online_scale = 127 / max_abs; - param.online_scale_->mutable_data()[0] = online_scale; - switch (param.round_type_) { - case ROUND_NEAREST_TO_EVEN: - quantize_round_to_even(input, online_scale, output); - break; - case ROUND_NEAREST_TOWARDS_ZERO: - quantize_round_to_zero(input, online_scale, output); - break; - case ROUND_NEAREST_AWAY_ZERO: - quantize_round_to_nearest(input, online_scale, output); - default: - LOG(kLOG_ERROR) << "round type is not supported."; - break; - } -} - -} // namespace paddle_mobile -} // namespace operators - -#endif diff --git a/src/operators/kernel/x86/relu_kernel.cpp b/src/operators/kernel/x86/relu_kernel.cpp deleted file mode 100644 index ae353aa18678414bc7a2865491b46bb86b03aa23..0000000000000000000000000000000000000000 --- a/src/operators/kernel/x86/relu_kernel.cpp +++ /dev/null @@ -1,35 +0,0 @@ -/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. */ - -#ifdef RELU_OP - -#include "operators/kernel/relu_kernel.h" - -namespace paddle_mobile { -namespace operators { - -template <> -bool ReluKernel::Init(ReluParam *param) { - return true; -} - -template <> -void ReluKernel::Compute(const ReluParam ¶m) const { - // TODO -} - -} // namespace operators -} // namespace paddle_mobile - -#endif diff --git a/src/operators/kernel/x86/reshape_kernel.cpp b/src/operators/kernel/x86/reshape_kernel.cpp deleted file mode 100644 index d938a9dd23f04e42f99960d882d7445925a9a83d..0000000000000000000000000000000000000000 --- a/src/operators/kernel/x86/reshape_kernel.cpp +++ /dev/null @@ -1,35 +0,0 @@ -/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. */ - -#ifdef RESHAPE_OP - -#include "operators/kernel/reshape_kernel.h" - -namespace paddle_mobile { -namespace operators { - -template <> -bool ReshapeKernel::Init(ReshapeParam *param) { - return true; -} - -template <> -void ReshapeKernel::Compute(const ReshapeParam ¶m) const { - // TODO -} - -} // namespace operators -} // namespace paddle_mobile - -#endif diff --git a/src/operators/kernel/x86/resize_kernel.cpp b/src/operators/kernel/x86/resize_kernel.cpp deleted file mode 100644 index 553d28f233bec2873583edb7eaccdd61b683d954..0000000000000000000000000000000000000000 --- a/src/operators/kernel/x86/resize_kernel.cpp +++ /dev/null @@ -1,31 +0,0 @@ -/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. */ - -#ifdef RESIZE_OP - -#include "operators/kernel/resize_kernel.h" -#include - -namespace paddle_mobile { -namespace operators { - -template <> -void ResizeKernel::Compute(const ResizeParam& param) const { - // TODO -} - -} // namespace operators -} // namespace paddle_mobile - -#endif diff --git a/src/operators/kernel/x86/scale_kernel.cpp b/src/operators/kernel/x86/scale_kernel.cpp deleted file mode 100644 index c1ab15dafa42fc850f5613469309222a06cd27dc..0000000000000000000000000000000000000000 --- a/src/operators/kernel/x86/scale_kernel.cpp +++ /dev/null @@ -1,143 +0,0 @@ -/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. */ - -#ifdef SCALE_OP - -#include "operators/kernel/scale_kernel.h" - -namespace paddle_mobile { -namespace operators { - -template <> -void ScaleKernel::Compute(const ScaleParam ¶m) const { - const auto *input_x = param.InputX(); - auto *input_x_ptr = input_x->data(); - auto *out = param.Out(); - auto *out_ptr = out->mutable_data(); - - const vector scales = param.Scales(); - bool has_bias = param.HasBias(); - - const int dim_size = input_x->dims().size(); - switch (dim_size) { - case 1: { - const int input_width = input_x->dims()[0]; - if (has_bias) { - const vector biases = param.Biases(); - #pragma omp parallel for - for (int w = 0; w < input_width; w++) { - out_ptr[w] = input_x_ptr[w] * scales[w] + biases[w]; - } - } else { - #pragma omp parallel for - for (int w = 0; w < input_width; w++) { - out_ptr[w] = input_x_ptr[w] * scales[w]; - } - } - } break; - case 2: { - const int input_height = input_x->dims()[0]; - const int input_width = input_x->dims()[1]; - - if (has_bias) { - const vector biases = param.Biases(); - #pragma omp parallel for - for (int h = 0; h < input_height; ++h) { - const float *iptr = input_x_ptr + h * input_width; - float *optr = out_ptr + h * input_width; - for (int w = 0; w < input_width; ++w) { - optr[w] = iptr[w] * scales[w] + biases[w]; - } - } - } else { - #pragma omp parallel for - for (int h = 0; h < input_height; ++h) { - const float *iptr = input_x_ptr + h * input_width; - float *optr = out_ptr + h * input_width; - for (int w = 0; w < input_width; ++w) { - optr[w] = iptr[w] * scales[w]; - } - } - } - } break; - case 3: { - const int chan_size = input_x->dims()[0]; - const int input_height = input_x->dims()[1]; - const int input_width = input_x->dims()[2]; - int size = input_width * input_height; - - if (has_bias) { - const vector biases = param.Biases(); - - #pragma omp parallel for - for (int c = 0; c < chan_size; ++c) { - const float *iptr = input_x_ptr + c * size; - float *optr = out_ptr + c * size; - for (int i = 0; i < size; ++i) { - optr[i] = iptr[i] * scales[c] + biases[c]; - } - } - } else { - #pragma omp parallel for - for (int c = 0; c < chan_size; ++c) { - const float *iptr = input_x_ptr + c * size; - float *optr = out_ptr + c * size; - for (int i = 0; i < size; ++i) { - optr[i] = iptr[i] * scales[c]; - } - } - } - } break; - - case 4: { - const int batch_size = input_x->dims()[0]; - const int chan_size = input_x->dims()[0]; - const int input_height = input_x->dims()[1]; - const int input_width = input_x->dims()[2]; - int size = input_width * input_height; - - if (has_bias) { - const vector biases = param.Biases(); - - #pragma omp parallel for - for (int b = 0; b < batch_size; ++b) { - for (int c = 0; c < chan_size; ++c) { - const float *iptr = input_x_ptr + b * c * size; - float *optr = out_ptr + b * c * size; - for (int i = 0; i < size; ++i) { - optr[i] = iptr[i] * scales[c] + biases[c]; - } - } - } - } else { - #pragma omp parallel for - for (int b = 0; b < batch_size; ++b) { - for (int c = 0; c < chan_size; ++c) { - const float *iptr = input_x_ptr + b * c * size; - float *optr = out_ptr + b * c * size; - for (int i = 0; i < size; ++i) { - optr[i] = iptr[i] * scales[c]; - } - } - } - } - } break; - default: - break; - } -} -} // namespace operators -} // namespace paddle_mobile - -#endif diff --git a/src/operators/kernel/x86/shape_kernel.cpp b/src/operators/kernel/x86/shape_kernel.cpp deleted file mode 100644 index 3ede2c0d9afaaf3de37e7520fd6a9a37ac876d27..0000000000000000000000000000000000000000 --- a/src/operators/kernel/x86/shape_kernel.cpp +++ /dev/null @@ -1,35 +0,0 @@ -/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. */ - -#ifdef SHAPE_OP - -#include "operators/kernel/shape_kernel.h" - -namespace paddle_mobile { -namespace operators { - -template <> -bool ShapeKernel::Init(ShapeParam *param) { - return true; -} - -template <> -void ShapeKernel::Compute(const ShapeParam ¶m) const { - // TODO -} - -} // namespace operators -} // namespace paddle_mobile - -#endif diff --git a/src/operators/kernel/x86/sigmoid_kernel.cpp b/src/operators/kernel/x86/sigmoid_kernel.cpp deleted file mode 100644 index c97fd94100b6e73f0045bec9504e1985f8d79507..0000000000000000000000000000000000000000 --- a/src/operators/kernel/x86/sigmoid_kernel.cpp +++ /dev/null @@ -1,40 +0,0 @@ -/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. */ - -#ifdef SIGMOID_OP - -#include "operators/kernel/sigmoid_kernel.h" -#include - -namespace paddle_mobile { -namespace operators { - -using framework::DDim; -using framework::Tensor; - -template <> -bool SigmoidKernel::Init(SigmoidParam *param) { - return true; -} - -template <> -void SigmoidKernel::Compute(const SigmoidParam ¶m) const { - // TODO -} - -template class SigmoidKernel; -} // namespace operators -} // namespace paddle_mobile - -#endif diff --git a/src/operators/kernel/x86/softmax_kernel.cpp b/src/operators/kernel/x86/softmax_kernel.cpp deleted file mode 100644 index 7bef5c1605a56a50a6bede52ff32931e331eb9dd..0000000000000000000000000000000000000000 --- a/src/operators/kernel/x86/softmax_kernel.cpp +++ /dev/null @@ -1,38 +0,0 @@ -/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. */ - -#ifdef SOFTMAX_OP - -#include "../softmax_kernel.h" -#include "operators/math/softmax.h" - -namespace paddle_mobile { -namespace operators { - -template <> -bool SoftmaxKernel::Init(SoftmaxParam *param) { - return true; -} - -template <> -void SoftmaxKernel::Compute(const SoftmaxParam ¶m) const { - // TODO -} - -template class SoftmaxKernel; - -} // namespace operators -} // namespace paddle_mobile - -#endif diff --git a/src/operators/kernel/x86/split_kernel.cpp b/src/operators/kernel/x86/split_kernel.cpp deleted file mode 100644 index 63fb597aeb0d626c9630c1a90d58fff3d4516cb7..0000000000000000000000000000000000000000 --- a/src/operators/kernel/x86/split_kernel.cpp +++ /dev/null @@ -1,35 +0,0 @@ -/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. */ - -#ifdef SPLIT_OP - -#include "operators/kernel/split_kernel.h" - -namespace paddle_mobile { -namespace operators { - -template <> -bool SplitKernel::Init(SplitParam *param) { - return true; -} - -template <> -void SplitKernel::Compute(const SplitParam ¶m) const { - // TODO -} - -} // namespace operators -} // namespace paddle_mobile - -#endif diff --git a/src/operators/kernel/x86/transpose_kernel.cpp b/src/operators/kernel/x86/transpose_kernel.cpp deleted file mode 100644 index 11dd599c56d9fe22f2f9e31a96b61b5200abc18c..0000000000000000000000000000000000000000 --- a/src/operators/kernel/x86/transpose_kernel.cpp +++ /dev/null @@ -1,35 +0,0 @@ -/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. */ -#ifdef TRANSPOSE_OP - -#include "operators/kernel/transpose_kernel.h" - -namespace paddle_mobile { -namespace operators { - -template <> -bool TransposeKernel::Init(TransposeParam *param) { - return true; -} - -template <> -void TransposeKernel::Compute( - const TransposeParam ¶m) const { - // TODO -} - -} // namespace operators -} // namespace paddle_mobile - -#endif diff --git a/src/operators/lrn_op.cpp b/src/operators/lrn_op.cpp index e19813b842651664bd3a06da5afbbe1bca9c1813..faa9ccb6132e70e01e5c076554455d9424c68086 100644 --- a/src/operators/lrn_op.cpp +++ b/src/operators/lrn_op.cpp @@ -35,8 +35,5 @@ REGISTER_OPERATOR_CPU(lrn, ops::LrnOp); #ifdef PADDLE_MOBILE_MALI_GPU REGISTER_OPERATOR_MALI_GPU(lrn, ops::LrnOp); #endif -#ifdef PADDLE_MOBILE_X86 -REGISTER_OPERATOR_X86(lrn, ops::LrnOp); -#endif #endif diff --git a/src/operators/mul_op.cpp b/src/operators/mul_op.cpp index e386a803f03670b2df0d1b8527f8e3b70425da2a..a6b055b62fa25fbca2a85dfa386fa406e207b2e9 100644 --- a/src/operators/mul_op.cpp +++ b/src/operators/mul_op.cpp @@ -55,13 +55,11 @@ void MulOp::InferShape() const { } // namespace paddle_mobile namespace ops = paddle_mobile::operators; -#if defined(PADDLE_MOBILE_CPU) +#ifdef PADDLE_MOBILE_CPU REGISTER_OPERATOR_CPU(mul, ops::MulOp); -#elif defined(PADDLE_MOBILE_MALI_GPU) +#endif +#ifdef PADDLE_MOBILE_MALI_GPU REGISTER_OPERATOR_MALI_GPU(mul, ops::MulOp); -#elif defined(PADDLE_MOBILE_FPGA) -#else -REGISTER_OPERATOR_X86(mul, ops::MulOp); #endif #endif diff --git a/src/operators/multiclass_nms_op.cpp b/src/operators/multiclass_nms_op.cpp index f97170e27cbbcb62e734b580ab9ae39128665cba..97f4f1a1c650e2810b99a2938962ee7f8371dd2f 100644 --- a/src/operators/multiclass_nms_op.cpp +++ b/src/operators/multiclass_nms_op.cpp @@ -39,12 +39,8 @@ void MultiClassNMSOp::InferShape() const { } // namespace paddle_mobile namespace ops = paddle_mobile::operators; -#if defined(PADDLE_MOBILE_CPU) +#ifdef PADDLE_MOBILE_CPU REGISTER_OPERATOR_CPU(multiclass_nms, ops::MultiClassNMSOp); -#elif defined(PADDLE_MOBILE_MALI_GPU) -#elif defined(PADDLE_MOBILE_FPGA) -#else -REGISTER_OPERATOR_X86(multiclass_nms, ops::MultiClassNMSOp); #endif #endif diff --git a/src/operators/op_param.cpp b/src/operators/op_param.cpp index 54d76a3654403cf473a5db15f5cb38adb17495a0..4d1689911686198612eb4df4dfe8f99450ba503d 100644 --- a/src/operators/op_param.cpp +++ b/src/operators/op_param.cpp @@ -42,33 +42,28 @@ Print &operator<<(Print &printer, const ConvParam &conv_param) { template class ConvParam; template class ConvParam; template class ConvParam; -template class ConvParam; #endif template class ElementwiseAddParam; template class ElementwiseAddParam; template class ElementwiseAddParam; -template class ElementwiseAddParam; #ifdef MUL_OP template class MulParam; template class MulParam; template class MulParam; -template class MulParam; #endif #ifdef CONCAT_OP template class ConcatParam; template class ConcatParam; template class ConcatParam; -template class ConcatParam; #endif #ifdef LRN_OP template class LrnParam; template class LrnParam; template class LrnParam; -template class LrnParam; #endif #ifdef FUSION_CONVADD_OP diff --git a/src/operators/pool_op.cpp b/src/operators/pool_op.cpp index e439cfb97b8d7d5b4d3876a29ccd951f9a6c12f1..dd23059ea01a332aff45137b7f7ed4c9f6c2e1bb 100644 --- a/src/operators/pool_op.cpp +++ b/src/operators/pool_op.cpp @@ -68,8 +68,5 @@ REGISTER_OPERATOR_MALI_GPU(pool2d, ops::PoolOp); #ifdef PADDLE_MOBILE_FPGA REGISTER_OPERATOR_FPGA(pool2d, ops::PoolOp); #endif -#ifdef PADDLE_MOBILE_X86 -REGISTER_OPERATOR_X86(pool2d, ops::PoolOp); -#endif #endif diff --git a/src/operators/prelu_op.cpp b/src/operators/prelu_op.cpp index d0bc85b709620fa542f526b18bf3c1c05324e7ce..2e79c2acd20fd00a8c17627196a385e69cc3c94d 100644 --- a/src/operators/prelu_op.cpp +++ b/src/operators/prelu_op.cpp @@ -39,8 +39,5 @@ REGISTER_OPERATOR_CPU(prelu, ops::PReluOp); #ifdef PADDLE_MOBILE_MALI_GPU REGISTER_OPERATOR_MALI_GPU(prelu, ops::PReluOp); #endif -#ifdef PADDLE_MOBILE_X86 -REGISTER_OPERATOR_X86(prelu, ops::PReluOp); -#endif #endif diff --git a/src/operators/prior_box_op.cpp b/src/operators/prior_box_op.cpp index 59da95ef8bf0428c2d872c89be1b78d5f7bf60c4..bd48013b52f9e4b8651e61afc4c280be3f96b2ac 100644 --- a/src/operators/prior_box_op.cpp +++ b/src/operators/prior_box_op.cpp @@ -54,8 +54,5 @@ REGISTER_OPERATOR_CPU(prior_box, ops::PriorBoxOp); #endif #ifdef PADDLE_MOBILE_MALI_GPU #endif -#ifdef PADDLE_MOBILE_X86 -REGISTER_OPERATOR_X86(prior_box, ops::PriorBoxOp); -#endif #endif diff --git a/src/operators/quantize_op.cpp b/src/operators/quantize_op.cpp index f264c211ec4d00a40da2496caa5c616f559a2b6a..1c6f1049a6a100eac1f1baac01909bd5897dddc7 100644 --- a/src/operators/quantize_op.cpp +++ b/src/operators/quantize_op.cpp @@ -32,7 +32,4 @@ namespace ops = paddle_mobile::operators; #ifdef PADDLE_MOBILE_CPU REGISTER_OPERATOR_CPU(quantize, ops::QuantizeOp); #endif -#ifdef PADDLE_MOBILE_X86 -REGISTER_OPERATOR_X86(quantize, ops::QuantizeOp); -#endif diff --git a/src/operators/relu_op.cpp b/src/operators/relu_op.cpp index 3275fa499ddc13a31ffb7cfac6121e72c7fc9f6c..933e1cfce064d63664ebc35b7ac331d4f32b74b9 100644 --- a/src/operators/relu_op.cpp +++ b/src/operators/relu_op.cpp @@ -39,8 +39,5 @@ REGISTER_OPERATOR_CPU(relu, ops::ReluOp); #ifdef PADDLE_MOBILE_MALI_GPU REGISTER_OPERATOR_MALI_GPU(relu, ops::ReluOp); #endif -#ifdef PADDLE_MOBILE_X86 -REGISTER_OPERATOR_X86(relu, ops::ReluOp); -#endif #endif diff --git a/src/operators/reshape_op.cpp b/src/operators/reshape_op.cpp index 4426149be11ead0da63d85351cc143c89b11cbc9..214007545844e19cf698c6294416a6501a595b58 100644 --- a/src/operators/reshape_op.cpp +++ b/src/operators/reshape_op.cpp @@ -38,8 +38,5 @@ REGISTER_OPERATOR_CPU(reshape, ops::ReshapeOp); #ifdef PADDLE_MOBILE_MALI_GPU REGISTER_OPERATOR_MALI_GPU(reshape, ops::ReshapeOp); #endif -#ifdef PADDLE_MOBILE_X86 -REGISTER_OPERATOR_X86(reshape, ops::ReshapeOp); -#endif #endif diff --git a/src/operators/resize_op.cpp b/src/operators/resize_op.cpp index 1dc52cb7b1399fd247e1651271f79d330c8d9542..dc7a532e7912416738679f5c06eca253be4c3eff 100644 --- a/src/operators/resize_op.cpp +++ b/src/operators/resize_op.cpp @@ -35,8 +35,5 @@ REGISTER_OPERATOR_CPU(resize, ops::ResizeOp); #ifdef PADDLE_MOBILE_MALI_GPU REGISTER_OPERATOR_MALI_GPU(resize, ops::ResizeOp); #endif -#ifdef PADDLE_MOBILE_X86 -REGISTER_OPERATOR_X86(resize, ops::ResizeOp); -#endif #endif diff --git a/src/operators/scale_op.cpp b/src/operators/scale_op.cpp index e55f696f730dd4e9f38a530ef857cf93ce1af436..ceabbaf7a4a94d49c34cbd7e6a38fda8292b8828 100644 --- a/src/operators/scale_op.cpp +++ b/src/operators/scale_op.cpp @@ -35,8 +35,5 @@ REGISTER_OPERATOR_CPU(scale, ops::ScaleOp); #ifdef PADDLE_MOBILE_MALI_GPU REGISTER_OPERATOR_MALI_GPU(scale, ops::ScaleOp); #endif -#ifdef PADDLE_MOBILE_X86 -REGISTER_OPERATOR_X86(scale, ops::ScaleOp); -#endif #endif diff --git a/src/operators/shape_op.cpp b/src/operators/shape_op.cpp index ac654356f1e2fbc3d7d450e46df588055e26c514..6b7754f93c238b0687395194f17bf1df8737dc52 100644 --- a/src/operators/shape_op.cpp +++ b/src/operators/shape_op.cpp @@ -36,8 +36,5 @@ REGISTER_OPERATOR_CPU(shape, ops::ShapeOp); #endif #ifdef PADDLE_MOBILE_MALI_GPU #endif -#ifdef PADDLE_MOBILE_X86 -REGISTER_OPERATOR_X86(shape, ops::ShapeOp); -#endif #endif diff --git a/src/operators/sigmoid_op.cpp b/src/operators/sigmoid_op.cpp index 2219f302d36d5c723034daaaca7084858ab6c87a..1bb42792e760cf02c16a0ea38f759fbb52827fcf 100644 --- a/src/operators/sigmoid_op.cpp +++ b/src/operators/sigmoid_op.cpp @@ -27,12 +27,8 @@ void SigmoidOp::InferShape() const { } // namespace paddle_mobile namespace ops = paddle_mobile::operators; -#if defined(PADDLE_MOBILE_CPU) +#ifdef PADDLE_MOBILE_CPU REGISTER_OPERATOR_CPU(sigmoid, ops::SigmoidOp); -#elif defined(PADDLE_MOBILE_MALI_GPU) -#elif defined(PADDLE_MOBILE_FPGA) -#else -REGISTER_OPERATOR_X86(sigmoid, ops::SigmoidOp); #endif #endif diff --git a/src/operators/slice_op.cpp b/src/operators/slice_op.cpp index 975dd4dbd6a6d6ea05b32e370858beabc3142670..ac6c434c9450905931abeb395b294bed64c036b0 100644 --- a/src/operators/slice_op.cpp +++ b/src/operators/slice_op.cpp @@ -34,8 +34,5 @@ REGISTER_OPERATOR_CPU(slice, ops::SliceOp); #ifdef PADDLE_MOBILE_MALI_GPU REGISTER_OPERATOR_MALI_GPU(slice, ops::SliceOp); #endif -#ifdef PADDLE_MOBILE_X86 -REGISTER_OPERATOR_X86(slice, ops::SliceOp); -#endif #endif diff --git a/src/operators/softmax_op.cpp b/src/operators/softmax_op.cpp index 14e0ffeec0c7a5c9c134b753db74107358fe062a..e85edc69c3291c794f2eeb8119b91b2926c4d870 100644 --- a/src/operators/softmax_op.cpp +++ b/src/operators/softmax_op.cpp @@ -36,8 +36,5 @@ REGISTER_OPERATOR_MALI_GPU(softmax, ops::SoftmaxOp); #ifdef PADDLE_MOBILE_FPGA REGISTER_OPERATOR_FPGA(softmax, ops::SoftmaxOp); #endif -#ifdef PADDLE_MOBILE_X86 -REGISTER_OPERATOR_X86(softmax, ops::SoftmaxOp); -#endif #endif diff --git a/src/operators/split_op.cpp b/src/operators/split_op.cpp index c807df05d1f70be0250ae2dd80f853d339031d39..52732b41288fdc94a7dfc07ef6cfc8d12a969b7b 100644 --- a/src/operators/split_op.cpp +++ b/src/operators/split_op.cpp @@ -80,12 +80,8 @@ void SplitOp::InferShape() const { } // namespace paddle_mobile namespace ops = paddle_mobile::operators; -#if defined(PADDLE_MOBILE_CPU) +#ifdef PADDLE_MOBILE_CPU REGISTER_OPERATOR_CPU(split, ops::SplitOp); -#elif defined(PADDLE_MOBILE_MALI_GPU) -#elif defined(PADDLE_MOBILE_FPGA) -#else -REGISTER_OPERATOR_X86(split, ops::SplitOp); #endif #endif // SPLIT_OP diff --git a/src/operators/transpose_op.cpp b/src/operators/transpose_op.cpp index 1a2fdc802c90a873c3927f9e8b82926a6afa0705..74e0c022f7d80b57235f1b3b3dac704728bda780 100644 --- a/src/operators/transpose_op.cpp +++ b/src/operators/transpose_op.cpp @@ -52,12 +52,8 @@ void TransposeOp::InferShape() const { } // namespace paddle_mobile namespace ops = paddle_mobile::operators; -#if defined(PADDLE_MOBILE_CPU) +#ifdef PADDLE_MOBILE_CPU REGISTER_OPERATOR_CPU(transpose, ops::TransposeOp); -#elif defined(PADDLE_MOBILE_MALI_GPU) -#elif defined(PADDLE_MOBILE_FPGA) -#else -REGISTER_OPERATOR_X86(transpose, ops::TransposeOp); #endif #endif // TRANSPOSE_OP diff --git a/test/net/test_googlenet.cpp b/test/net/test_googlenet.cpp index ec772a4b5219937686797db7a65133ae888c3a38..0d21f3032c58302cf8bf655c406e8ee8a5b0e077 100644 --- a/test/net/test_googlenet.cpp +++ b/test/net/test_googlenet.cpp @@ -17,11 +17,7 @@ limitations under the License. */ #include "../test_include.h" int main() { -#if defined(PADDLE_MOBILE_CPU) paddle_mobile::PaddleMobile paddle_mobile; -#elif defined(PADDLE_MOBILE_X86) - paddle_mobile::PaddleMobile paddle_mobile; -#endif paddle_mobile.SetThreadNum(4); bool optimize = true; auto time1 = time();