Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
PaddlePaddle
Paddle-Lite
提交
3c7cde0c
P
Paddle-Lite
项目概览
PaddlePaddle
/
Paddle-Lite
通知
331
Star
4
Fork
1
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
271
列表
看板
标记
里程碑
合并请求
78
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
Paddle-Lite
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
271
Issue
271
列表
看板
标记
里程碑
合并请求
78
合并请求
78
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
提交
3c7cde0c
编写于
10月 09, 2018
作者:
H
hjchen2
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
Remove x86 code
上级
244297e8
变更
91
隐藏空白更改
内联
并排
Showing
91 changed file
with
19 addition
and
1840 deletion
+19
-1840
CMakeLists.txt
CMakeLists.txt
+0
-3
src/common/types.h
src/common/types.h
+1
-2
src/framework/op_registry.h
src/framework/op_registry.h
+0
-3
src/framework/operator.cpp
src/framework/operator.cpp
+0
-1
src/io/executor.cpp
src/io/executor.cpp
+0
-1
src/io/loader.cpp
src/io/loader.cpp
+0
-1
src/io/paddle_mobile.cpp
src/io/paddle_mobile.cpp
+0
-1
src/operators/batchnorm_op.cpp
src/operators/batchnorm_op.cpp
+0
-1
src/operators/bilinear_interp_op.cpp
src/operators/bilinear_interp_op.cpp
+0
-2
src/operators/box_coder_op.cpp
src/operators/box_coder_op.cpp
+0
-2
src/operators/concat_op.cpp
src/operators/concat_op.cpp
+0
-2
src/operators/conv_op.cpp
src/operators/conv_op.cpp
+0
-2
src/operators/conv_transpose_op.cpp
src/operators/conv_transpose_op.cpp
+0
-2
src/operators/crf_op.cpp
src/operators/crf_op.cpp
+0
-1
src/operators/depthwise_conv_op.cpp
src/operators/depthwise_conv_op.cpp
+1
-3
src/operators/dequantize_op.cpp
src/operators/dequantize_op.cpp
+0
-3
src/operators/dropout_op.cpp
src/operators/dropout_op.cpp
+0
-3
src/operators/elementwise_add_op.cpp
src/operators/elementwise_add_op.cpp
+0
-3
src/operators/feed_op.cpp
src/operators/feed_op.cpp
+1
-3
src/operators/fetch_op.cpp
src/operators/fetch_op.cpp
+1
-3
src/operators/flatten_op.cpp
src/operators/flatten_op.cpp
+0
-3
src/operators/fusion_conv_add_add_prelu_op.cpp
src/operators/fusion_conv_add_add_prelu_op.cpp
+3
-5
src/operators/fusion_conv_add_bn_op.cpp
src/operators/fusion_conv_add_bn_op.cpp
+0
-3
src/operators/fusion_conv_add_bn_relu_op.cpp
src/operators/fusion_conv_add_bn_relu_op.cpp
+0
-3
src/operators/fusion_conv_add_op.cpp
src/operators/fusion_conv_add_op.cpp
+0
-3
src/operators/fusion_conv_add_prelu_op.cpp
src/operators/fusion_conv_add_prelu_op.cpp
+0
-3
src/operators/fusion_conv_add_relu_op.cpp
src/operators/fusion_conv_add_relu_op.cpp
+0
-3
src/operators/fusion_conv_bn_add_relu_op.cpp
src/operators/fusion_conv_bn_add_relu_op.cpp
+0
-3
src/operators/fusion_conv_bn_op.cpp
src/operators/fusion_conv_bn_op.cpp
+0
-3
src/operators/fusion_conv_bn_relu_op.cpp
src/operators/fusion_conv_bn_relu_op.cpp
+0
-3
src/operators/fusion_dwconv_bn_relu_op.cpp
src/operators/fusion_dwconv_bn_relu_op.cpp
+0
-3
src/operators/fusion_fc_op.cpp
src/operators/fusion_fc_op.cpp
+5
-5
src/operators/fusion_fc_relu_op.cpp
src/operators/fusion_fc_relu_op.cpp
+0
-3
src/operators/gru_op.cpp
src/operators/gru_op.cpp
+0
-3
src/operators/kernel/x86/batchnorm_kernel.cpp
src/operators/kernel/x86/batchnorm_kernel.cpp
+0
-36
src/operators/kernel/x86/bilinear_interp_kernel.cpp
src/operators/kernel/x86/bilinear_interp_kernel.cpp
+0
-36
src/operators/kernel/x86/box_coder_kernel.cpp
src/operators/kernel/x86/box_coder_kernel.cpp
+0
-36
src/operators/kernel/x86/concat_kernel.cpp
src/operators/kernel/x86/concat_kernel.cpp
+0
-35
src/operators/kernel/x86/conv_add_add_prelu_kernel.cpp
src/operators/kernel/x86/conv_add_add_prelu_kernel.cpp
+0
-38
src/operators/kernel/x86/conv_add_bn_relu_kernel.cpp
src/operators/kernel/x86/conv_add_bn_relu_kernel.cpp
+0
-65
src/operators/kernel/x86/conv_add_kernel.cpp
src/operators/kernel/x86/conv_add_kernel.cpp
+0
-38
src/operators/kernel/x86/conv_add_prelu_kernel.cpp
src/operators/kernel/x86/conv_add_prelu_kernel.cpp
+0
-38
src/operators/kernel/x86/conv_add_relu_kernel.cpp
src/operators/kernel/x86/conv_add_relu_kernel.cpp
+0
-39
src/operators/kernel/x86/conv_bn_add_relu_kernel.cpp
src/operators/kernel/x86/conv_bn_add_relu_kernel.cpp
+0
-65
src/operators/kernel/x86/conv_bn_relu_kernel.cpp
src/operators/kernel/x86/conv_bn_relu_kernel.cpp
+0
-68
src/operators/kernel/x86/conv_kernel.cpp
src/operators/kernel/x86/conv_kernel.cpp
+0
-37
src/operators/kernel/x86/conv_transpose_kernel.cpp
src/operators/kernel/x86/conv_transpose_kernel.cpp
+0
-38
src/operators/kernel/x86/crf_kernel.cpp
src/operators/kernel/x86/crf_kernel.cpp
+0
-38
src/operators/kernel/x86/depthwise_conv_kernel.cpp
src/operators/kernel/x86/depthwise_conv_kernel.cpp
+0
-38
src/operators/kernel/x86/dequantize_kernel.cpp
src/operators/kernel/x86/dequantize_kernel.cpp
+0
-41
src/operators/kernel/x86/dropout_kernel.cpp
src/operators/kernel/x86/dropout_kernel.cpp
+0
-36
src/operators/kernel/x86/dwconv_bn_relu_kernel.cpp
src/operators/kernel/x86/dwconv_bn_relu_kernel.cpp
+0
-65
src/operators/kernel/x86/elementwise_add_kernel.cpp
src/operators/kernel/x86/elementwise_add_kernel.cpp
+0
-36
src/operators/kernel/x86/flatten_kernel.cpp
src/operators/kernel/x86/flatten_kernel.cpp
+0
-35
src/operators/kernel/x86/fusion_fc_kernel.cpp
src/operators/kernel/x86/fusion_fc_kernel.cpp
+0
-36
src/operators/kernel/x86/gru_kernel.cpp
src/operators/kernel/x86/gru_kernel.cpp
+0
-37
src/operators/kernel/x86/lrn_kernel.cpp
src/operators/kernel/x86/lrn_kernel.cpp
+0
-35
src/operators/kernel/x86/mul_kernel.cpp
src/operators/kernel/x86/mul_kernel.cpp
+0
-35
src/operators/kernel/x86/multiclass_nms_kernel.cpp
src/operators/kernel/x86/multiclass_nms_kernel.cpp
+0
-36
src/operators/kernel/x86/pool_kernel.cpp
src/operators/kernel/x86/pool_kernel.cpp
+0
-34
src/operators/kernel/x86/prelu_kernel.cpp
src/operators/kernel/x86/prelu_kernel.cpp
+0
-30
src/operators/kernel/x86/prior_box_kernel.cpp
src/operators/kernel/x86/prior_box_kernel.cpp
+0
-36
src/operators/kernel/x86/quantize_kernel.cpp
src/operators/kernel/x86/quantize_kernel.cpp
+0
-118
src/operators/kernel/x86/relu_kernel.cpp
src/operators/kernel/x86/relu_kernel.cpp
+0
-35
src/operators/kernel/x86/reshape_kernel.cpp
src/operators/kernel/x86/reshape_kernel.cpp
+0
-35
src/operators/kernel/x86/resize_kernel.cpp
src/operators/kernel/x86/resize_kernel.cpp
+0
-31
src/operators/kernel/x86/scale_kernel.cpp
src/operators/kernel/x86/scale_kernel.cpp
+0
-143
src/operators/kernel/x86/shape_kernel.cpp
src/operators/kernel/x86/shape_kernel.cpp
+0
-35
src/operators/kernel/x86/sigmoid_kernel.cpp
src/operators/kernel/x86/sigmoid_kernel.cpp
+0
-40
src/operators/kernel/x86/softmax_kernel.cpp
src/operators/kernel/x86/softmax_kernel.cpp
+0
-38
src/operators/kernel/x86/split_kernel.cpp
src/operators/kernel/x86/split_kernel.cpp
+0
-35
src/operators/kernel/x86/transpose_kernel.cpp
src/operators/kernel/x86/transpose_kernel.cpp
+0
-35
src/operators/lrn_op.cpp
src/operators/lrn_op.cpp
+0
-3
src/operators/mul_op.cpp
src/operators/mul_op.cpp
+3
-5
src/operators/multiclass_nms_op.cpp
src/operators/multiclass_nms_op.cpp
+1
-5
src/operators/op_param.cpp
src/operators/op_param.cpp
+0
-5
src/operators/pool_op.cpp
src/operators/pool_op.cpp
+0
-3
src/operators/prelu_op.cpp
src/operators/prelu_op.cpp
+0
-3
src/operators/prior_box_op.cpp
src/operators/prior_box_op.cpp
+0
-3
src/operators/quantize_op.cpp
src/operators/quantize_op.cpp
+0
-3
src/operators/relu_op.cpp
src/operators/relu_op.cpp
+0
-3
src/operators/reshape_op.cpp
src/operators/reshape_op.cpp
+0
-3
src/operators/resize_op.cpp
src/operators/resize_op.cpp
+0
-3
src/operators/scale_op.cpp
src/operators/scale_op.cpp
+0
-3
src/operators/shape_op.cpp
src/operators/shape_op.cpp
+0
-3
src/operators/sigmoid_op.cpp
src/operators/sigmoid_op.cpp
+1
-5
src/operators/slice_op.cpp
src/operators/slice_op.cpp
+0
-3
src/operators/softmax_op.cpp
src/operators/softmax_op.cpp
+0
-3
src/operators/split_op.cpp
src/operators/split_op.cpp
+1
-5
src/operators/transpose_op.cpp
src/operators/transpose_op.cpp
+1
-5
test/net/test_googlenet.cpp
test/net/test_googlenet.cpp
+0
-4
未找到文件。
CMakeLists.txt
浏览文件 @
3c7cde0c
...
...
@@ -7,7 +7,6 @@ option(DEBUGING "enable debug mode" ON)
option
(
USE_EXCEPTION
"use std exception"
OFF
)
option
(
LOG_PROFILE
"log profile"
OFF
)
# select the platform to build
option
(
X86
"x86"
OFF
)
option
(
CPU
"armv7 with neon"
ON
)
option
(
MALI_GPU
"mali gpu"
OFF
)
option
(
FPGA
"fpga"
OFF
)
...
...
@@ -57,8 +56,6 @@ endif()
if
(
CPU
)
add_definitions
(
-DPADDLE_MOBILE_CPU
)
elseif
(
X86
)
add_definitions
(
-DPADDLE_MOBILE_X86
)
else
()
file
(
GLOB_RECURSE _tmp_list src/operators/kernel/arm/*.cpp src/operators/kernel/arm/*.cc
)
foreach
(
f
${
_tmp_list
}
)
...
...
src/common/types.h
浏览文件 @
3c7cde0c
...
...
@@ -39,7 +39,7 @@ struct PrecisionTrait<Precision::FP16> {
};
//! device type
enum
DeviceTypeEnum
{
kINVALID
=
-
1
,
kCPU
=
0
,
kFPGA
=
1
,
kGPU_MALI
=
2
,
kX86
=
3
};
enum
DeviceTypeEnum
{
kINVALID
=
-
1
,
kCPU
=
0
,
kFPGA
=
1
,
kGPU_MALI
=
2
};
template
<
DeviceTypeEnum
T
>
struct
DeviceType
{};
...
...
@@ -47,7 +47,6 @@ struct DeviceType {};
typedef
DeviceType
<
kCPU
>
CPU
;
typedef
DeviceType
<
kFPGA
>
FPGA
;
typedef
DeviceType
<
kGPU_MALI
>
GPU_MALI
;
typedef
DeviceType
<
kX86
>
X86
;
//! data type
enum
DataType
{
...
...
src/framework/op_registry.h
浏览文件 @
3c7cde0c
...
...
@@ -116,8 +116,5 @@ class OpRegistry {
#define REGISTER_OPERATOR_FPGA(op_type, op_class) \
REGISTER_OPERATOR(op_type, op_class, fpga, paddle_mobile::FPGA);
#define REGISTER_OPERATOR_X86(op_type, op_class) \
REGISTER_OPERATOR(op_type, op_class, x86, paddle_mobile::X86);
}
// namespace framework
}
// namespace paddle_mobile
src/framework/operator.cpp
浏览文件 @
3c7cde0c
...
...
@@ -76,7 +76,6 @@ void OperatorBase<Dtype>::Run() const {
template
class
OperatorBase
<
CPU
>;
template
class
OperatorBase
<
FPGA
>;
template
class
OperatorBase
<
GPU_MALI
>;
template
class
OperatorBase
<
X86
>;
}
// namespace framework
}
// namespace paddle_mobile
src/io/executor.cpp
浏览文件 @
3c7cde0c
...
...
@@ -396,6 +396,5 @@ std::vector<typename Executor<Dtype, P>::Ptype> Executor<Dtype, P>::Predict(
template
class
Executor
<
CPU
,
Precision
::
FP32
>;
template
class
Executor
<
GPU_MALI
,
Precision
::
FP32
>;
template
class
Executor
<
FPGA
,
Precision
::
FP32
>;
template
class
Executor
<
X86
,
Precision
::
FP32
>;
}
// namespace paddle_mobile
src/io/loader.cpp
浏览文件 @
3c7cde0c
...
...
@@ -197,6 +197,5 @@ const framework::Program<Dtype, P> Loader<Dtype, P>::LoadCombinedMemory(
template
class
Loader
<
CPU
,
Precision
::
FP32
>;
template
class
Loader
<
FPGA
,
Precision
::
FP32
>;
template
class
Loader
<
GPU_MALI
,
Precision
::
FP32
>;
template
class
Loader
<
X86
,
Precision
::
FP32
>;
}
// namespace paddle_mobile
src/io/paddle_mobile.cpp
浏览文件 @
3c7cde0c
...
...
@@ -125,6 +125,5 @@ PaddleMobile<Dtype, P>::~PaddleMobile() {
template
class
PaddleMobile
<
CPU
,
Precision
::
FP32
>;
template
class
PaddleMobile
<
FPGA
,
Precision
::
FP32
>;
template
class
PaddleMobile
<
GPU_MALI
,
Precision
::
FP32
>;
template
class
PaddleMobile
<
X86
,
Precision
::
FP32
>;
}
// namespace paddle_mobile
src/operators/batchnorm_op.cpp
浏览文件 @
3c7cde0c
...
...
@@ -40,5 +40,4 @@ REGISTER_OPERATOR_MALI_GPU(batch_norm, ops::BatchNormOp);
#ifdef PADDLE_MOBILE_FPGA
#endif
REGISTER_OPERATOR_X86
(
batch_norm
,
ops
::
BatchNormOp
);
#endif
src/operators/bilinear_interp_op.cpp
浏览文件 @
3c7cde0c
...
...
@@ -53,6 +53,4 @@ REGISTER_OPERATOR_CPU(bilinear_interp, ops::BilinearOp);
#ifdef PADDLE_MOBILE_FPGA
#endif
REGISTER_OPERATOR_X86
(
bilinear_interp
,
ops
::
BilinearOp
);
#endif
src/operators/box_coder_op.cpp
浏览文件 @
3c7cde0c
...
...
@@ -60,6 +60,4 @@ REGISTER_OPERATOR_CPU(box_coder, ops::BoxCoderOp);
#ifdef PADDLE_MOBILE_FPGA
#endif
REGISTER_OPERATOR_X86
(
box_coder
,
ops
::
BoxCoderOp
);
#endif
src/operators/concat_op.cpp
浏览文件 @
3c7cde0c
...
...
@@ -73,6 +73,4 @@ REGISTER_OPERATOR_MALI_GPU(concat, ops::ConcatOp);
REGISTER_OPERATOR_FPGA
(
concat
,
ops
::
ConcatOp
);
#endif
REGISTER_OPERATOR_X86
(
concat
,
ops
::
ConcatOp
);
#endif
src/operators/conv_op.cpp
浏览文件 @
3c7cde0c
...
...
@@ -62,6 +62,4 @@ REGISTER_OPERATOR_MALI_GPU(conv2d, ops::ConvOp);
REGISTER_OPERATOR_FPGA
(
conv2d
,
ops
::
ConvOp
);
#endif
REGISTER_OPERATOR_X86
(
conv2d
,
ops
::
ConvOp
);
#endif
src/operators/conv_transpose_op.cpp
浏览文件 @
3c7cde0c
...
...
@@ -29,6 +29,4 @@ REGISTER_OPERATOR_CPU(conv2d_transpose, ops::ConvOpTranspose);
#ifdef PADDLE_MOBILE_FPGA
#endif
REGISTER_OPERATOR_X86
(
conv2d_transpose
,
ops
::
ConvOpTranspose
);
#endif
src/operators/crf_op.cpp
浏览文件 @
3c7cde0c
...
...
@@ -52,6 +52,5 @@ REGISTER_OPERATOR_CPU(crf_decoding, ops::CrfOp);
#endif
#ifdef PADDLE_MOBILE_FPGA
#endif
REGISTER_OPERATOR_X86
(
crf_decoding
,
ops
::
CrfOp
);
#endif
src/operators/depthwise_conv_op.cpp
浏览文件 @
3c7cde0c
...
...
@@ -56,7 +56,5 @@ namespace ops = paddle_mobile::operators;
#ifdef PADDLE_MOBILE_CPU
REGISTER_OPERATOR_CPU
(
depthwise_conv2d
,
ops
::
DepthwiseConvOp
);
#endif
#ifdef PADDLE_MOBILE_X86
REGISTER_OPERATOR_X86
(
depthwise_conv2d
,
ops
::
DepthwiseConvOp
);
#endif
#endif
src/operators/dequantize_op.cpp
浏览文件 @
3c7cde0c
...
...
@@ -30,7 +30,4 @@ namespace ops = paddle_mobile::operators;
#ifdef PADDLE_MOBILE_CPU
REGISTER_OPERATOR_CPU
(
dequantize
,
ops
::
DequantizeOp
);
#endif
#ifdef PADDLE_MOBILE_X86
REGISTER_OPERATOR_X86
(
dequantize
,
ops
::
DequantizeOp
);
#endif
src/operators/dropout_op.cpp
浏览文件 @
3c7cde0c
...
...
@@ -30,9 +30,6 @@ namespace ops = paddle_mobile::operators;
#ifdef PADDLE_MOBILE_CPU
REGISTER_OPERATOR_CPU
(
dropout
,
ops
::
DropoutOp
);
#endif
#ifdef PADDLE_MOBILE_X86
REGISTER_OPERATOR_X86
(
dropout
,
ops
::
DropoutOp
);
#endif
#ifdef PADDLE_MOBILE_FPGA
REGISTER_OPERATOR_FPGA
(
dropout
,
ops
::
DropoutOp
);
#endif
...
...
src/operators/elementwise_add_op.cpp
浏览文件 @
3c7cde0c
...
...
@@ -35,8 +35,5 @@ REGISTER_OPERATOR_CPU(elementwise_add, ops::ElementwiseAddOp);
#ifdef PADDLE_MOBILE_MALI_GPU
REGISTER_OPERATOR_MALI_GPU
(
elementwise_add
,
ops
::
ElementwiseAddOp
);
#endif
#ifdef PADDLE_MOBILE_X86
REGISTER_OPERATOR_X86
(
elementwise_add
,
ops
::
ElementwiseAddOp
);
#endif
#endif
src/operators/feed_op.cpp
浏览文件 @
3c7cde0c
...
...
@@ -26,6 +26,4 @@ REGISTER_OPERATOR_MALI_GPU(feed, ops::FeedOp);
#ifdef PADDLE_MOBILE_FPGA
REGISTER_OPERATOR_FPGA
(
feed
,
ops
::
FeedOp
);
#endif
#ifdef PADDLE_MOBILE_X86
REGISTER_OPERATOR_X86
(
feed
,
ops
::
FeedOp
);
#endif
src/operators/fetch_op.cpp
浏览文件 @
3c7cde0c
...
...
@@ -27,6 +27,4 @@ REGISTER_OPERATOR_MALI_GPU(fetch, ops::FetchOp);
#ifdef PADDLE_MOBILE_FPGA
REGISTER_OPERATOR_FPGA
(
fetch
,
ops
::
FetchOp
);
#endif
#ifdef PADDLE_MOBILE_X86
REGISTER_OPERATOR_X86
(
fetch
,
ops
::
FetchOp
);
#endif
src/operators/flatten_op.cpp
浏览文件 @
3c7cde0c
...
...
@@ -53,9 +53,6 @@ namespace ops = paddle_mobile::operators;
#ifdef PADDLE_MOBILE_CPU
REGISTER_OPERATOR_CPU
(
flatten
,
ops
::
FlattenOp
);
#endif
#ifdef PADDLE_MOBILE_X86
REGISTER_OPERATOR_X86
(
flatten
,
ops
::
FlattenOp
);
#endif
#ifdef PADDLE_MOBILE_FPGA
#endif
...
...
src/operators/fusion_conv_add_add_prelu_op.cpp
浏览文件 @
3c7cde0c
...
...
@@ -51,13 +51,11 @@ static framework::FusionOpRegistrar fusion_conv_add_add_prelu_registrar(
}
// namespace paddle_mobile
namespace
ops
=
paddle_mobile
::
operators
;
#if
defined(PADDLE_MOBILE_CPU)
#if
def PADDLE_MOBILE_CPU
REGISTER_OPERATOR_CPU
(
fusion_conv_add_add_prelu
,
ops
::
FusionConvAddAddPReluOp
);
#e
lif defined(PADDLE_MOBILE_MALI_GPU)
#
elif defined(PADDLE_MOBILE_FPGA)
#e
ndif
#
ifdef PADDLE_MOBILE_FPGA
REGISTER_OPERATOR_FPGA
(
fusion_conv_add_add_prelu
,
ops
::
FusionConvAddAddPReluOp
);
#else
REGISTER_OPERATOR_X86
(
fusion_conv_add_add_prelu
,
ops
::
FusionConvAddAddPReluOp
);
#endif
#endif // FUSION_CONVADDADDPRELU_OP
src/operators/fusion_conv_add_bn_op.cpp
浏览文件 @
3c7cde0c
...
...
@@ -55,9 +55,6 @@ namespace ops = paddle_mobile::operators;
#ifdef PADDLE_MOBILE_CPU
REGISTER_OPERATOR_CPU
(
fusion_conv_add_bn
,
ops
::
FusionConvAddBNOp
);
#endif
#ifdef PADDLE_MOBILE_X86
REGISTER_OPERATOR_X86
(
fusion_conv_add_bn
,
ops
::
FusionConvAddBNOp
);
#endif
#ifdef PADDLE_MOBILE_FPGA
REGISTER_OPERATOR_FPGA
(
fusion_conv_add_bn
,
ops
::
FusionConvAddBNOp
);
#endif
...
...
src/operators/fusion_conv_add_bn_relu_op.cpp
浏览文件 @
3c7cde0c
...
...
@@ -55,9 +55,6 @@ namespace ops = paddle_mobile::operators;
#ifdef PADDLE_MOBILE_CPU
REGISTER_OPERATOR_CPU
(
fusion_conv_add_bn_relu
,
ops
::
FusionConvAddBNReluOp
);
#endif
#ifdef PADDLE_MOBILE_X86
REGISTER_OPERATOR_X86
(
fusion_conv_add_bn_relu
,
ops
::
FusionConvAddBNReluOp
);
#endif
#ifdef PADDLE_MOBILE_FPGA
REGISTER_OPERATOR_FPGA
(
fusion_conv_add_bn_relu
,
ops
::
FusionConvAddBNReluOp
);
#endif
...
...
src/operators/fusion_conv_add_op.cpp
浏览文件 @
3c7cde0c
...
...
@@ -58,8 +58,5 @@ REGISTER_OPERATOR_CPU(fusion_conv_add, ops::FusionConvAddOp);
#ifdef PADDLE_MOBILE_MALI_GPU
REGISTER_OPERATOR_MALI_GPU
(
fusion_conv_add
,
ops
::
FusionConvAddOp
);
#endif
#ifdef PADDLE_MOBILE_X86
REGISTER_OPERATOR_X86
(
fusion_conv_add
,
ops
::
FusionConvAddOp
);
#endif
#endif
src/operators/fusion_conv_add_prelu_op.cpp
浏览文件 @
3c7cde0c
...
...
@@ -54,9 +54,6 @@ namespace ops = paddle_mobile::operators;
#ifdef PADDLE_MOBILE_CPU
REGISTER_OPERATOR_CPU
(
fusion_conv_add_prelu
,
ops
::
FusionConvAddPReluOp
);
#endif
#ifdef PADDLE_MOBILE_X86
REGISTER_OPERATOR_X86
(
fusion_conv_add_prelu
,
ops
::
FusionConvAddPReluOp
);
#endif
#ifdef PADDLE_MOBILE_FPGA
REGISTER_OPERATOR_FPGA
(
fusion_conv_add_prelu
,
ops
::
FusionConvAddPReluOp
);
#endif
...
...
src/operators/fusion_conv_add_relu_op.cpp
浏览文件 @
3c7cde0c
...
...
@@ -54,9 +54,6 @@ namespace ops = paddle_mobile::operators;
#ifdef PADDLE_MOBILE_CPU
REGISTER_OPERATOR_CPU
(
fusion_conv_add_relu
,
ops
::
FusionConvAddReluOp
);
#endif
#ifdef PADDLE_MOBILE_X86
REGISTER_OPERATOR_X86
(
fusion_conv_add_relu
,
ops
::
FusionConvAddReluOp
);
#endif
#ifdef PADDLE_MOBILE_FPGA
REGISTER_OPERATOR_FPGA
(
fusion_conv_add_relu
,
ops
::
FusionConvAddReluOp
);
#endif
...
...
src/operators/fusion_conv_bn_add_relu_op.cpp
浏览文件 @
3c7cde0c
...
...
@@ -55,9 +55,6 @@ namespace ops = paddle_mobile::operators;
#ifdef PADDLE_MOBILE_CPU
REGISTER_OPERATOR_CPU
(
fusion_conv_bn_add_relu
,
ops
::
FusionConvBNAddReluOp
);
#endif
#ifdef PADDLE_MOBILE_X86
REGISTER_OPERATOR_X86
(
fusion_conv_bn_add_relu
,
ops
::
FusionConvBNAddReluOp
);
#endif
#ifdef PADDLE_MOBILE_FPGA
REGISTER_OPERATOR_FPGA
(
fusion_conv_bn_add_relu
,
ops
::
FusionConvBNAddReluOp
);
#endif
...
...
src/operators/fusion_conv_bn_op.cpp
浏览文件 @
3c7cde0c
...
...
@@ -54,9 +54,6 @@ namespace ops = paddle_mobile::operators;
#ifdef PADDLE_MOBILE_CPU
REGISTER_OPERATOR_CPU
(
fusion_conv_bn
,
ops
::
FusionConvBNOp
);
#endif
#ifdef PADDLE_MOBILE_X86
REGISTER_OPERATOR_X86
(
fusion_conv_bn
,
ops
::
FusionConvBNOp
);
#endif
#ifdef PADDLE_MOBILE_FPGA
REGISTER_OPERATOR_FPGA
(
fusion_conv_bn
,
ops
::
FusionConvBNOp
);
#endif
...
...
src/operators/fusion_conv_bn_relu_op.cpp
浏览文件 @
3c7cde0c
...
...
@@ -55,9 +55,6 @@ namespace ops = paddle_mobile::operators;
#ifdef PADDLE_MOBILE_CPU
REGISTER_OPERATOR_CPU
(
fusion_conv_bn_relu
,
ops
::
FusionConvBNReluOp
);
#endif
#ifdef PADDLE_MOBILE_X86
REGISTER_OPERATOR_X86
(
fusion_conv_bn_relu
,
ops
::
FusionConvBNReluOp
);
#endif
#ifdef PADDLE_MOBILE_FPGA
REGISTER_OPERATOR_FPGA
(
fusion_conv_bn_relu
,
ops
::
FusionConvBNReluOp
);
#endif
...
...
src/operators/fusion_dwconv_bn_relu_op.cpp
浏览文件 @
3c7cde0c
...
...
@@ -55,9 +55,6 @@ namespace ops = paddle_mobile::operators;
#ifdef PADDLE_MOBILE_CPU
REGISTER_OPERATOR_CPU
(
fusion_dwconv_bn_relu
,
ops
::
FusionDWConvBNReluOp
);
#endif
#ifdef PADDLE_MOBILE_X86
REGISTER_OPERATOR_X86
(
fusion_dwconv_bn_relu
,
ops
::
FusionDWConvBNReluOp
);
#endif
#ifdef PADDLE_MOBILE_FPGA
#endif
...
...
src/operators/fusion_fc_op.cpp
浏览文件 @
3c7cde0c
...
...
@@ -58,14 +58,14 @@ void FusionFcOp<Dtype, T>::InferShape() const {
namespace
ops
=
paddle_mobile
::
operators
;
#if
defined(PADDLE_MOBILE_CPU)
#if
def PADDLE_MOBILE_CPU
REGISTER_OPERATOR_CPU
(
fusion_fc
,
ops
::
FusionFcOp
);
#elif defined(PADDLE_MOBILE_MALI_GPU)
#endif
#ifdef PADDLE_MOBILE_MALI_GPU
REGISTER_OPERATOR_MALI_GPU
(
fusion_fc
,
ops
::
FusionFcOp
);
#elif defined(PADDLE_MOBILE_FPGA)
#endif
#ifdef PADDLE_MOBILE_FPGA
REGISTER_OPERATOR_FPGA
(
fusion_fc
,
ops
::
FusionFcOp
);
#else
REGISTER_OPERATOR_X86
(
fusion_fc
,
ops
::
FusionFcOp
);
#endif
#endif // FUSION_FC_OP
src/operators/fusion_fc_relu_op.cpp
浏览文件 @
3c7cde0c
...
...
@@ -66,8 +66,5 @@ REGISTER_OPERATOR_MALI_GPU(fusion_fc_relu, ops::FusionFcReluOp);
#ifdef PADDLE_MOBILE_FPGA
REGISTER_OPERATOR_FPGA
(
fusion_fc_relu
,
ops
::
FusionFcReluOp
);
#endif
#ifdef PADDLE_MOBILE_X86
REGISTER_OPERATOR_X86
(
fusion_fc_relu
,
ops
::
FusionFcReluOp
);
#endif
#endif
src/operators/gru_op.cpp
浏览文件 @
3c7cde0c
...
...
@@ -64,9 +64,6 @@ namespace ops = paddle_mobile::operators;
#ifdef PADDLE_MOBILE_CPU
REGISTER_OPERATOR_CPU
(
gru
,
ops
::
GruOp
);
#endif
#ifdef PADDLE_MOBILE_X86
REGISTER_OPERATOR_X86
(
gru
,
ops
::
GruOp
);
#endif
#ifdef PADDLE_MOBILE_FPGA
#endif
...
...
src/operators/kernel/x86/batchnorm_kernel.cpp
已删除
100644 → 0
浏览文件 @
244297e8
/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#ifdef BATCHNORM_OP
#include "operators/kernel/batchnorm_kernel.h"
namespace
paddle_mobile
{
namespace
operators
{
template
<
>
bool
BatchNormKernel
<
X86
,
float
>::
Init
(
BatchNormParam
<
X86
>
*
param
)
{
return
true
;
}
template
<
>
void
BatchNormKernel
<
X86
,
float
>::
Compute
(
const
BatchNormParam
<
X86
>
&
param
)
const
{
// TODO
}
}
// namespace operators
}
// namespace paddle_mobile
#endif
src/operators/kernel/x86/bilinear_interp_kernel.cpp
已删除
100644 → 0
浏览文件 @
244297e8
/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#ifdef BILINEAR_INTERP_OP
#include "operators/kernel/bilinear_interp_kernel.h"
namespace
paddle_mobile
{
namespace
operators
{
template
<
>
bool
BilinearInterpKernel
<
X86
,
float
>::
Init
(
BilinearInterpParam
<
X86
>
*
param
)
{
return
true
;
}
template
<
>
void
BilinearInterpKernel
<
X86
,
float
>::
Compute
(
const
BilinearInterpParam
<
X86
>
&
param
)
const
{
// TODO
}
}
// namespace operators
}
// namespace paddle_mobile
#endif
src/operators/kernel/x86/box_coder_kernel.cpp
已删除
100644 → 0
浏览文件 @
244297e8
/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#ifdef BOXCODER_OP
#include "operators/kernel/box_coder_kernel.h"
namespace
paddle_mobile
{
namespace
operators
{
template
<
>
bool
BoxCoderKernel
<
X86
,
float
>::
Init
(
BoxCoderParam
<
X86
>
*
param
)
{
return
true
;
}
template
<
>
void
BoxCoderKernel
<
X86
,
float
>::
Compute
(
const
BoxCoderParam
<
X86
>
&
param
)
const
{
// TODO
}
}
// namespace operators
}
// namespace paddle_mobile
#endif
src/operators/kernel/x86/concat_kernel.cpp
已删除
100644 → 0
浏览文件 @
244297e8
/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#ifdef CONCAT_OP
#include "operators/kernel/concat_kernel.h"
namespace
paddle_mobile
{
namespace
operators
{
template
<
>
bool
ConcatKernel
<
X86
,
float
>::
Init
(
ConcatParam
<
X86
>
*
param
)
{
return
true
;
}
template
<
>
void
ConcatKernel
<
X86
,
float
>::
Compute
(
const
ConcatParam
<
X86
>
&
param
)
const
{
// TODO
}
}
// namespace operators
}
// namespace paddle_mobile
#endif
src/operators/kernel/x86/conv_add_add_prelu_kernel.cpp
已删除
100644 → 0
浏览文件 @
244297e8
/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#ifdef FUSION_CONVADDADDPRELU_OP
#include "operators/kernel/conv_add_add_prelu_kernel.h"
namespace
paddle_mobile
{
namespace
operators
{
template
<
>
bool
ConvAddAddPReluKernel
<
X86
,
float
>::
Init
(
FusionConvAddAddPReluParam
<
X86
>
*
param
)
{
return
true
;
}
template
<
>
void
ConvAddAddPReluKernel
<
X86
,
float
>::
Compute
(
const
FusionConvAddAddPReluParam
<
X86
>
&
param
)
const
{
// TODO
}
template
class
ConvAddAddPReluKernel
<
X86
,
float
>;
}
// namespace operators
}
// namespace paddle_mobile
#endif
src/operators/kernel/x86/conv_add_bn_relu_kernel.cpp
已删除
100644 → 0
浏览文件 @
244297e8
/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#ifdef FUSION_CONVADDBNRELU_OP
#include "operators/kernel/conv_add_bn_relu_kernel.h"
namespace
paddle_mobile
{
namespace
operators
{
template
<
>
bool
ConvAddBNReluKernel
<
X86
,
float
>::
Init
(
FusionConvAddBNReluParam
<
X86
>
*
param
)
{
const
Tensor
*
mean
=
param
->
InputMean
();
const
Tensor
*
variance
=
param
->
InputVariance
();
const
Tensor
*
scale
=
param
->
InputScale
();
const
Tensor
*
bias
=
param
->
InputBias
();
const
float
epsilon
=
param
->
Epsilon
();
auto
mean_ptr
=
mean
->
data
<
float
>
();
auto
variance_ptr
=
variance
->
data
<
float
>
();
auto
scale_ptr
=
scale
->
data
<
float
>
();
auto
bias_ptr
=
bias
->
data
<
float
>
();
const
int
C
=
mean
->
numel
();
float
inv_std_ptr
[
C
];
for
(
int
i
=
0
;
i
<
C
;
i
++
)
{
inv_std_ptr
[
i
]
=
1
/
static_cast
<
float
>
(
pow
((
variance_ptr
[
i
]
+
epsilon
),
0.5
));
}
Tensor
*
new_scale
=
new
Tensor
();
Tensor
*
new_bias
=
new
Tensor
();
auto
new_scale_ptr
=
new_scale
->
mutable_data
<
float
>
({
C
});
auto
new_bias_ptr
=
new_bias
->
mutable_data
<
float
>
({
C
});
for
(
int
i
=
0
;
i
<
C
;
i
++
)
{
new_scale_ptr
[
i
]
=
inv_std_ptr
[
i
]
*
scale_ptr
[
i
];
new_bias_ptr
[
i
]
=
bias_ptr
[
i
]
-
mean_ptr
[
i
]
*
inv_std_ptr
[
i
]
*
scale_ptr
[
i
];
}
param
->
SetNewScale
(
new_scale
);
param
->
SetNewBias
(
new_bias
);
return
true
;
}
template
<
>
void
ConvAddBNReluKernel
<
X86
,
float
>::
Compute
(
const
FusionConvAddBNReluParam
<
X86
>
&
param
)
const
{
// TODO
}
template
class
ConvAddBNReluKernel
<
X86
,
float
>;
}
// namespace operators
}
// namespace paddle_mobile
#endif
src/operators/kernel/x86/conv_add_kernel.cpp
已删除
100644 → 0
浏览文件 @
244297e8
/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#ifdef FUSION_CONVADD_OP
#include "operators/kernel/conv_add_kernel.h"
namespace
paddle_mobile
{
namespace
operators
{
template
<
>
bool
ConvAddKernel
<
X86
,
float
>::
Init
(
FusionConvAddParam
<
X86
>
*
param
)
{
return
true
;
}
template
<
>
void
ConvAddKernel
<
X86
,
float
>::
Compute
(
const
FusionConvAddParam
<
X86
>
&
param
)
const
{
// TODO
}
template
class
ConvAddKernel
<
X86
,
float
>;
}
// namespace operators
}
// namespace paddle_mobile
#endif
src/operators/kernel/x86/conv_add_prelu_kernel.cpp
已删除
100644 → 0
浏览文件 @
244297e8
/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#ifdef FUSION_CONVADDPRELU_OP
#include "operators/kernel/conv_add_prelu_kernel.h"
namespace
paddle_mobile
{
namespace
operators
{
template
<
>
bool
ConvAddPReluKernel
<
X86
,
float
>::
Init
(
FusionConvAddPReluParam
<
X86
>
*
param
)
{
return
true
;
}
template
<
>
void
ConvAddPReluKernel
<
X86
,
float
>::
Compute
(
const
FusionConvAddPReluParam
<
X86
>
&
param
)
const
{
// TODO
}
template
class
ConvAddPReluKernel
<
X86
,
float
>;
}
// namespace operators
}
// namespace paddle_mobile
#endif
src/operators/kernel/x86/conv_add_relu_kernel.cpp
已删除
100644 → 0
浏览文件 @
244297e8
/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#ifdef FUSION_CONVADDRELU_OP
#include "operators/kernel/conv_add_relu_kernel.h"
namespace
paddle_mobile
{
namespace
operators
{
template
<
>
bool
ConvAddReluKernel
<
X86
,
float
>::
Init
(
FusionConvAddReluParam
<
X86
>
*
param
)
{
return
true
;
}
template
<
>
void
ConvAddReluKernel
<
X86
,
float
>::
Compute
(
const
FusionConvAddReluParam
<
X86
>
&
param
)
const
{
// TODO
}
template
class
ConvAddReluKernel
<
X86
,
float
>;
}
// namespace operators
}
// namespace paddle_mobile
#endif
src/operators/kernel/x86/conv_bn_add_relu_kernel.cpp
已删除
100644 → 0
浏览文件 @
244297e8
/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#ifdef FUSION_CONVBNADDRELU_OP
#include "operators/kernel/conv_bn_add_relu_kernel.h"
namespace
paddle_mobile
{
namespace
operators
{
template
<
>
bool
ConvBNAddReluKernel
<
X86
,
float
>::
Init
(
FusionConvBNAddReluParam
<
X86
>
*
param
)
{
const
Tensor
*
mean
=
param
->
InputMean
();
const
Tensor
*
variance
=
param
->
InputVariance
();
const
Tensor
*
scale
=
param
->
InputScale
();
const
Tensor
*
bias
=
param
->
InputBias
();
const
float
epsilon
=
param
->
Epsilon
();
auto
mean_ptr
=
mean
->
data
<
float
>
();
auto
variance_ptr
=
variance
->
data
<
float
>
();
auto
scale_ptr
=
scale
->
data
<
float
>
();
auto
bias_ptr
=
bias
->
data
<
float
>
();
const
int
C
=
mean
->
numel
();
float
inv_std_ptr
[
C
];
for
(
int
i
=
0
;
i
<
C
;
i
++
)
{
inv_std_ptr
[
i
]
=
1
/
static_cast
<
float
>
(
pow
((
variance_ptr
[
i
]
+
epsilon
),
0.5
));
}
Tensor
*
new_scale
=
new
Tensor
();
Tensor
*
new_bias
=
new
Tensor
();
auto
new_scale_ptr
=
new_scale
->
mutable_data
<
float
>
({
C
});
auto
new_bias_ptr
=
new_bias
->
mutable_data
<
float
>
({
C
});
for
(
int
i
=
0
;
i
<
C
;
i
++
)
{
new_scale_ptr
[
i
]
=
inv_std_ptr
[
i
]
*
scale_ptr
[
i
];
new_bias_ptr
[
i
]
=
bias_ptr
[
i
]
-
mean_ptr
[
i
]
*
inv_std_ptr
[
i
]
*
scale_ptr
[
i
];
}
param
->
SetNewScale
(
new_scale
);
param
->
SetNewBias
(
new_bias
);
return
true
;
}
template
<
>
void
ConvBNAddReluKernel
<
X86
,
float
>::
Compute
(
const
FusionConvBNAddReluParam
<
X86
>
&
param
)
const
{
// TODO
}
template
class
ConvBNAddReluKernel
<
X86
,
float
>;
}
// namespace operators
}
// namespace paddle_mobile
#endif
src/operators/kernel/x86/conv_bn_relu_kernel.cpp
已删除
100644 → 0
浏览文件 @
244297e8
/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#ifdef FUSION_CONVBNRELU_OP
#include "operators/kernel/conv_bn_relu_kernel.h"
namespace
paddle_mobile
{
namespace
operators
{
template
<
>
bool
ConvBNReluKernel
<
X86
,
float
>::
Init
(
FusionConvBNReluParam
<
X86
>
*
param
)
{
const
Tensor
*
mean
=
param
->
InputMean
();
const
Tensor
*
variance
=
param
->
InputVariance
();
const
Tensor
*
scale
=
param
->
InputScale
();
const
Tensor
*
bias
=
param
->
InputBias
();
const
float
epsilon
=
param
->
Epsilon
();
// DLOG << "variance: " << *variance;
auto
mean_ptr
=
mean
->
data
<
float
>
();
auto
variance_ptr
=
variance
->
data
<
float
>
();
auto
scale_ptr
=
scale
->
data
<
float
>
();
auto
bias_ptr
=
bias
->
data
<
float
>
();
const
int
C
=
mean
->
numel
();
float
inv_std_ptr
[
C
];
for
(
int
i
=
0
;
i
<
C
;
i
++
)
{
inv_std_ptr
[
i
]
=
1
/
static_cast
<
float
>
(
pow
((
variance_ptr
[
i
]
+
epsilon
),
0.5
));
}
Tensor
*
new_scale
=
new
Tensor
();
Tensor
*
new_bias
=
new
Tensor
();
auto
new_scale_ptr
=
new_scale
->
mutable_data
<
float
>
({
C
});
auto
new_bias_ptr
=
new_bias
->
mutable_data
<
float
>
({
C
});
for
(
int
i
=
0
;
i
<
C
;
i
++
)
{
new_scale_ptr
[
i
]
=
inv_std_ptr
[
i
]
*
scale_ptr
[
i
];
new_bias_ptr
[
i
]
=
bias_ptr
[
i
]
-
mean_ptr
[
i
]
*
inv_std_ptr
[
i
]
*
scale_ptr
[
i
];
}
param
->
SetNewScale
(
new_scale
);
param
->
SetNewBias
(
new_bias
);
return
true
;
}
template
<
>
void
ConvBNReluKernel
<
X86
,
float
>::
Compute
(
const
FusionConvBNReluParam
<
X86
>
&
param
)
const
{
// TODO
}
template
class
ConvBNReluKernel
<
X86
,
float
>;
}
// namespace operators
}
// namespace paddle_mobile
#endif
src/operators/kernel/x86/conv_kernel.cpp
已删除
100644 → 0
浏览文件 @
244297e8
/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#ifdef CONV_OP
#include "operators/kernel/conv_kernel.h"
namespace
paddle_mobile
{
namespace
operators
{
template
<
>
bool
ConvKernel
<
X86
,
float
>::
Init
(
ConvParam
<
X86
>
*
param
)
{
return
true
;
}
template
<
>
void
ConvKernel
<
X86
,
float
>::
Compute
(
const
ConvParam
<
X86
>
&
param
)
const
{
// TODO
}
template
class
ConvKernel
<
X86
,
float
>;
}
// namespace operators
}
// namespace paddle_mobile
#endif
src/operators/kernel/x86/conv_transpose_kernel.cpp
已删除
100644 → 0
浏览文件 @
244297e8
/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#ifdef CONV_TRANSPOSE
#include "operators/kernel/conv_transpose_kernel.h"
namespace
paddle_mobile
{
namespace
operators
{
template
<
>
bool
ConvTransposeKernel
<
X86
,
float
>::
Init
(
ConvTransposeParam
<
X86
>
*
param
)
{
return
true
;
}
template
<
>
void
ConvTransposeKernel
<
X86
,
float
>::
Compute
(
const
ConvTransposeParam
<
X86
>
&
param
)
const
{
// TODO
}
template
class
ConvTransposeKernel
<
X86
,
float
>;
}
// namespace operators
}
// namespace paddle_mobile
#endif
src/operators/kernel/x86/crf_kernel.cpp
已删除
100644 → 0
浏览文件 @
244297e8
/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#ifdef CRF_OP
#include "operators/kernel/crf_kernel.h"
#include "common/types.h"
namespace
paddle_mobile
{
namespace
operators
{
template
<
>
bool
CrfKernel
<
X86
,
float
>::
Init
(
CrfParam
<
X86
>
*
param
)
{
return
true
;
}
template
<
>
void
CrfKernel
<
X86
,
float
>::
Compute
(
const
CrfParam
<
X86
>
&
param
)
const
{
// TODO
}
template
class
CrfKernel
<
X86
,
float
>;
}
// namespace operators
}
// namespace paddle_mobile
#endif
src/operators/kernel/x86/depthwise_conv_kernel.cpp
已删除
100644 → 0
浏览文件 @
244297e8
/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#ifdef DEPTHWISECONV_OP
#include "operators/kernel/depthwise_conv_kernel.h"
namespace
paddle_mobile
{
namespace
operators
{
template
<
>
bool
DepthwiseConvKernel
<
X86
,
float
>::
Init
(
ConvParam
<
X86
>
*
param
)
{
return
true
;
}
template
<
>
void
DepthwiseConvKernel
<
X86
,
float
>::
Compute
(
const
ConvParam
<
X86
>
&
param
)
const
{
// TODO
}
template
class
DepthwiseConvKernel
<
X86
,
float
>;
}
// namespace operators
}
// namespace paddle_mobile
#endif
src/operators/kernel/x86/dequantize_kernel.cpp
已删除
100644 → 0
浏览文件 @
244297e8
/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include "operators/kernel/dequantize_kernel.h"
namespace
paddle_mobile
{
namespace
operators
{
template
<
>
bool
DequantizeKernel
<
X86
,
float
>::
Init
(
DequantizeParam
<
X86
>
*
param
)
{
return
true
;
}
template
<
>
void
DequantizeKernel
<
X86
,
float
>::
Compute
(
const
DequantizeParam
<
X86
>
&
param
)
const
{
// TODO
const
Tensor
*
input
=
param
.
input_
;
Tensor
*
output
=
param
.
out_
;
float
activation_scale
=
param
.
activation_scale_
->
data
<
float
>
()[
0
];
float
weight_scale
=
param
.
weight_scale_
;
const
int32_t
*
x
=
input
->
data
<
const
int32_t
>
();
float
*
y
=
output
->
mutable_data
<
float
>
();
for
(
size_t
i
=
0
;
i
<
output
->
numel
();
++
i
)
{
y
[
i
]
=
x
[
i
]
/
activation_scale
/
weight_scale
;
}
}
}
// namespace paddle_mobile
}
// namespace operators
src/operators/kernel/x86/dropout_kernel.cpp
已删除
100644 → 0
浏览文件 @
244297e8
/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#ifdef DROPOUT_OP
#include "operators/kernel/dropout_kernel.h"
#include <operators/math/transform.h>
namespace
paddle_mobile
{
namespace
operators
{
template
<
>
bool
DropoutKernel
<
X86
,
float
>::
Init
(
DropoutParam
<
X86
>
*
para
)
{
return
true
;
}
template
<
>
void
DropoutKernel
<
X86
,
float
>::
Compute
(
const
DropoutParam
<
X86
>
&
param
)
const
{
// TODO
}
}
// namespace operators
}
// namespace paddle_mobile
#endif
src/operators/kernel/x86/dwconv_bn_relu_kernel.cpp
已删除
100644 → 0
浏览文件 @
244297e8
/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#ifdef FUSION_DWCONVBNRELU_OP
#include "operators/kernel/dwconv_bn_relu_kernel.h"
namespace
paddle_mobile
{
namespace
operators
{
template
<
>
bool
DWConvBNReluKernel
<
X86
,
float
>::
Init
(
FusionDWConvBNReluParam
<
X86
>
*
param
)
{
const
Tensor
*
mean
=
param
->
InputMean
();
const
Tensor
*
variance
=
param
->
InputVariance
();
const
Tensor
*
scale
=
param
->
InputScale
();
const
Tensor
*
bias
=
param
->
InputBias
();
const
float
epsilon
=
param
->
Epsilon
();
auto
mean_ptr
=
mean
->
data
<
float
>
();
auto
variance_ptr
=
variance
->
data
<
float
>
();
auto
scale_ptr
=
scale
->
data
<
float
>
();
auto
bias_ptr
=
bias
->
data
<
float
>
();
const
int
C
=
mean
->
numel
();
float
inv_std_ptr
[
C
];
for
(
int
i
=
0
;
i
<
C
;
i
++
)
{
inv_std_ptr
[
i
]
=
1
/
static_cast
<
float
>
(
pow
((
variance_ptr
[
i
]
+
epsilon
),
0.5
));
}
Tensor
*
new_scale
=
new
Tensor
();
Tensor
*
new_bias
=
new
Tensor
();
auto
new_scale_ptr
=
new_scale
->
mutable_data
<
float
>
({
C
});
auto
new_bias_ptr
=
new_bias
->
mutable_data
<
float
>
({
C
});
for
(
int
i
=
0
;
i
<
C
;
i
++
)
{
new_scale_ptr
[
i
]
=
inv_std_ptr
[
i
]
*
scale_ptr
[
i
];
new_bias_ptr
[
i
]
=
bias_ptr
[
i
]
-
mean_ptr
[
i
]
*
inv_std_ptr
[
i
]
*
scale_ptr
[
i
];
}
param
->
SetNewScale
(
new_scale
);
param
->
SetNewBias
(
new_bias
);
return
true
;
}
template
<
>
void
DWConvBNReluKernel
<
X86
,
float
>::
Compute
(
const
FusionDWConvBNReluParam
<
X86
>
&
param
)
const
{
// TODO
}
template
class
DWConvBNReluKernel
<
X86
,
float
>;
}
// namespace operators
}
// namespace paddle_mobile
#endif
src/operators/kernel/x86/elementwise_add_kernel.cpp
已删除
100644 → 0
浏览文件 @
244297e8
/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#ifdef ELEMENTWISEADD_OP
#include "operators/kernel/elementwise_add_kernel.h"
namespace
paddle_mobile
{
namespace
operators
{
template
<
>
bool
ElementwiseAddKernel
<
X86
,
float
>::
Init
(
ElementwiseAddParam
<
X86
>
*
param
)
{
return
true
;
}
template
<
>
void
ElementwiseAddKernel
<
X86
,
float
>::
Compute
(
const
ElementwiseAddParam
<
X86
>
&
param
)
const
{
// TODO
}
}
// namespace operators
}
// namespace paddle_mobile
#endif
src/operators/kernel/x86/flatten_kernel.cpp
已删除
100644 → 0
浏览文件 @
244297e8
/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#ifdef FLATTEN_OP
#include "operators/kernel/flatten_kernel.h"
namespace
paddle_mobile
{
namespace
operators
{
template
<
>
bool
FlattenKernel
<
X86
,
float
>::
Init
(
FlattenParam
<
X86
>
*
param
)
{
return
true
;
}
template
<
>
void
FlattenKernel
<
X86
,
float
>::
Compute
(
const
FlattenParam
<
X86
>
&
param
)
const
{
// TODO
}
}
// namespace operators
}
// namespace paddle_mobile
#endif
src/operators/kernel/x86/fusion_fc_kernel.cpp
已删除
100644 → 0
浏览文件 @
244297e8
/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#ifdef FUSION_FC_OP
#include "operators/kernel/fusion_fc_kernel.h"
namespace
paddle_mobile
{
namespace
operators
{
template
<
>
bool
FusionFcKernel
<
X86
,
float
>::
Init
(
FusionFcParam
<
X86
>
*
param
)
{
return
true
;
}
template
<
>
void
FusionFcKernel
<
X86
,
float
>::
Compute
(
const
FusionFcParam
<
X86
>
&
param
)
const
{
// TODO
}
}
// namespace operators
}
// namespace paddle_mobile
#endif
src/operators/kernel/x86/gru_kernel.cpp
已删除
100644 → 0
浏览文件 @
244297e8
/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#ifdef GRU_OP
#include "operators/kernel/gru_kernel.h"
namespace
paddle_mobile
{
namespace
operators
{
template
<
>
bool
GruKernel
<
X86
,
float
>::
Init
(
GruParam
<
X86
>
*
param
)
{
return
true
;
}
template
<
>
void
GruKernel
<
X86
,
float
>::
Compute
(
const
GruParam
<
X86
>
&
param
)
const
{
// TODO
}
template
class
GruKernel
<
X86
,
float
>;
}
// namespace operators
}
// namespace paddle_mobile
#endif
src/operators/kernel/x86/lrn_kernel.cpp
已删除
100644 → 0
浏览文件 @
244297e8
/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#ifdef LRN_OP
#include "operators/kernel/lrn_kernel.h"
namespace
paddle_mobile
{
namespace
operators
{
template
<
>
bool
LrnKernel
<
X86
,
float
>::
Init
(
LrnParam
<
X86
>
*
param
)
{
return
true
;
}
template
<
>
void
LrnKernel
<
X86
,
float
>::
Compute
(
const
LrnParam
<
X86
>
&
param
)
const
{
// TODO
}
}
// namespace operators
}
// namespace paddle_mobile
#endif
src/operators/kernel/x86/mul_kernel.cpp
已删除
100644 → 0
浏览文件 @
244297e8
/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#ifdef MUL_OP
#include "operators/kernel/mul_kernel.h"
namespace
paddle_mobile
{
namespace
operators
{
template
<
>
bool
MulKernel
<
X86
,
float
>::
Init
(
MulParam
<
X86
>
*
param
)
{
return
true
;
}
template
<
>
void
MulKernel
<
X86
,
float
>::
Compute
(
const
MulParam
<
X86
>
&
param
)
const
{
// TODO
}
}
// namespace operators
}
// namespace paddle_mobile
#endif
src/operators/kernel/x86/multiclass_nms_kernel.cpp
已删除
100644 → 0
浏览文件 @
244297e8
/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#ifdef MULTICLASSNMS_OP
#include "operators/kernel/multiclass_nms_kernel.h"
namespace
paddle_mobile
{
namespace
operators
{
template
<
>
bool
MultiClassNMSKernel
<
X86
,
float
>::
Init
(
MultiClassNMSParam
<
X86
>
*
param
)
{
return
true
;
}
template
<
>
void
MultiClassNMSKernel
<
X86
,
float
>::
Compute
(
const
MultiClassNMSParam
<
X86
>
&
param
)
const
{
// TODO
}
}
// namespace operators
}
// namespace paddle_mobile
#endif
src/operators/kernel/x86/pool_kernel.cpp
已删除
100644 → 0
浏览文件 @
244297e8
/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#ifdef POOL_OP
#include "operators/kernel/pool_kernel.h"
namespace
paddle_mobile
{
namespace
operators
{
template
<
>
bool
PoolKernel
<
X86
,
float
>::
Init
(
PoolParam
<
X86
>
*
param
)
{
return
true
;
}
template
<
>
void
PoolKernel
<
X86
,
float
>::
Compute
(
const
PoolParam
<
X86
>
&
param
)
const
{
// TODO
}
}
// namespace operators
}
// namespace paddle_mobile
#endif
src/operators/kernel/x86/prelu_kernel.cpp
已删除
100644 → 0
浏览文件 @
244297e8
/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#ifdef PRELU_OP
#include "operators/kernel/prelu_kernel.h"
namespace
paddle_mobile
{
namespace
operators
{
template
<
>
void
PReluKernel
<
X86
,
float
>::
Compute
(
const
PReluParam
<
X86
>
&
param
)
const
{
// TODO
}
}
// namespace operators
}
// namespace paddle_mobile
#endif
src/operators/kernel/x86/prior_box_kernel.cpp
已删除
100644 → 0
浏览文件 @
244297e8
/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#ifdef PRIORBOX_OP
#include "operators/kernel/prior_box_kernel.h"
namespace
paddle_mobile
{
namespace
operators
{
template
<
>
bool
PriorBoxKernel
<
X86
,
float
>::
Init
(
PriorBoxParam
<
X86
>
*
param
)
{
return
true
;
}
template
<
>
void
PriorBoxKernel
<
X86
,
float
>::
Compute
(
const
PriorBoxParam
<
X86
>
&
param
)
const
{
// TODO
}
}
// namespace operators
}
// namespace paddle_mobile
#endif
src/operators/kernel/x86/quantize_kernel.cpp
已删除
100644 → 0
浏览文件 @
244297e8
/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#ifdef PADDLE_MOBILE_X86
#include "operators/kernel/quantize_kernel.h"
#include <cmath>
#include <limits>
namespace
paddle_mobile
{
namespace
operators
{
static
float
find_abs_max
(
const
Tensor
*
input
)
{
float
max_abs
=
float
(
0
);
const
float
*
x
=
input
->
data
<
const
float
>
();
for
(
size_t
i
=
0
;
i
<
input
->
numel
();
++
i
)
{
float
value
=
std
::
abs
(
x
[
i
]);
if
(
value
>
max_abs
)
{
max_abs
=
value
;
}
}
return
max_abs
;
}
static
void
quantize_round_to_even
(
const
Tensor
*
input
,
const
float
scale
,
Tensor
*
output
)
{
const
float
*
x
=
input
->
data
<
const
float
>
();
int8_t
*
y
=
output
->
data
<
int8_t
>
();
for
(
size_t
i
=
0
;
i
<
input
->
numel
();
++
i
)
{
float
value
=
x
[
i
]
*
scale
;
long
long
quant
=
llround
(
value
);
if
(
abs
(
abs
(
round
(
value
)
-
value
)
-
0.5
)
>
0
)
{
y
[
i
]
=
quant
;
}
else
{
if
(
abs
(
quant
)
%
2
==
0
)
{
y
[
i
]
=
quant
;
}
else
{
y
[
i
]
=
quant
+
(
quant
>
0
)
?
-
1
:
1
;
}
}
}
}
static
void
quantize_round_to_zero
(
const
Tensor
*
input
,
const
float
scale
,
Tensor
*
output
)
{
const
float
*
x
=
input
->
data
<
const
float
>
();
int8_t
*
y
=
output
->
data
<
int8_t
>
();
for
(
size_t
i
=
0
;
i
<
input
->
numel
();
++
i
)
{
y
[
i
]
=
trunc
(
x
[
i
]
*
scale
);
}
}
static
void
quantize_round_to_nearest
(
const
Tensor
*
input
,
const
float
scale
,
Tensor
*
output
)
{
const
float
*
x
=
input
->
data
<
const
float
>
();
int8_t
*
y
=
output
->
data
<
int8_t
>
();
for
(
size_t
i
=
0
;
i
<
input
->
numel
();
++
i
)
{
y
[
i
]
=
round
(
x
[
i
]
*
scale
);
}
}
template
<
>
bool
QuantizeKernel
<
X86
,
float
>::
Init
(
QuantizeParam
<
X86
>
*
param
)
{
return
true
;
}
template
<
>
void
QuantizeKernel
<
X86
,
float
>::
Compute
(
const
QuantizeParam
<
X86
>
&
param
)
const
{
// TODO
float
max_abs
=
0.
f
;
const
Tensor
*
input
=
param
.
input_
;
Tensor
*
output
=
param
.
out_
;
Tensor
*
output_scale
=
param
.
online_scale_
;
if
(
param
.
is_static_
)
{
max_abs
=
param
.
static_scale_
;
}
else
{
max_abs
=
find_abs_max
(
input
);
}
if
(
max_abs
<
std
::
numeric_limits
<
float
>::
min
())
{
max_abs
=
std
::
numeric_limits
<
float
>::
min
();
}
// only support int8 currently
float
online_scale
=
127
/
max_abs
;
param
.
online_scale_
->
mutable_data
<
float
>
()[
0
]
=
online_scale
;
switch
(
param
.
round_type_
)
{
case
ROUND_NEAREST_TO_EVEN
:
quantize_round_to_even
(
input
,
online_scale
,
output
);
break
;
case
ROUND_NEAREST_TOWARDS_ZERO
:
quantize_round_to_zero
(
input
,
online_scale
,
output
);
break
;
case
ROUND_NEAREST_AWAY_ZERO
:
quantize_round_to_nearest
(
input
,
online_scale
,
output
);
default:
LOG
(
kLOG_ERROR
)
<<
"round type is not supported."
;
break
;
}
}
}
// namespace paddle_mobile
}
// namespace operators
#endif
src/operators/kernel/x86/relu_kernel.cpp
已删除
100644 → 0
浏览文件 @
244297e8
/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#ifdef RELU_OP
#include "operators/kernel/relu_kernel.h"
namespace
paddle_mobile
{
namespace
operators
{
template
<
>
bool
ReluKernel
<
X86
,
float
>::
Init
(
ReluParam
<
X86
>
*
param
)
{
return
true
;
}
template
<
>
void
ReluKernel
<
X86
,
float
>::
Compute
(
const
ReluParam
<
X86
>
&
param
)
const
{
// TODO
}
}
// namespace operators
}
// namespace paddle_mobile
#endif
src/operators/kernel/x86/reshape_kernel.cpp
已删除
100644 → 0
浏览文件 @
244297e8
/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#ifdef RESHAPE_OP
#include "operators/kernel/reshape_kernel.h"
namespace
paddle_mobile
{
namespace
operators
{
template
<
>
bool
ReshapeKernel
<
X86
,
float
>::
Init
(
ReshapeParam
<
X86
>
*
param
)
{
return
true
;
}
template
<
>
void
ReshapeKernel
<
X86
,
float
>::
Compute
(
const
ReshapeParam
<
X86
>
&
param
)
const
{
// TODO
}
}
// namespace operators
}
// namespace paddle_mobile
#endif
src/operators/kernel/x86/resize_kernel.cpp
已删除
100644 → 0
浏览文件 @
244297e8
/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#ifdef RESIZE_OP
#include "operators/kernel/resize_kernel.h"
#include <cmath>
namespace
paddle_mobile
{
namespace
operators
{
template
<
>
void
ResizeKernel
<
X86
,
float
>::
Compute
(
const
ResizeParam
<
X86
>&
param
)
const
{
// TODO
}
}
// namespace operators
}
// namespace paddle_mobile
#endif
src/operators/kernel/x86/scale_kernel.cpp
已删除
100644 → 0
浏览文件 @
244297e8
/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#ifdef SCALE_OP
#include "operators/kernel/scale_kernel.h"
namespace
paddle_mobile
{
namespace
operators
{
template
<
>
void
ScaleKernel
<
X86
,
float
>::
Compute
(
const
ScaleParam
<
X86
>
&
param
)
const
{
const
auto
*
input_x
=
param
.
InputX
();
auto
*
input_x_ptr
=
input_x
->
data
<
float
>
();
auto
*
out
=
param
.
Out
();
auto
*
out_ptr
=
out
->
mutable_data
<
float
>
();
const
vector
<
float
>
scales
=
param
.
Scales
();
bool
has_bias
=
param
.
HasBias
();
const
int
dim_size
=
input_x
->
dims
().
size
();
switch
(
dim_size
)
{
case
1
:
{
const
int
input_width
=
input_x
->
dims
()[
0
];
if
(
has_bias
)
{
const
vector
<
float
>
biases
=
param
.
Biases
();
#pragma omp parallel for
for
(
int
w
=
0
;
w
<
input_width
;
w
++
)
{
out_ptr
[
w
]
=
input_x_ptr
[
w
]
*
scales
[
w
]
+
biases
[
w
];
}
}
else
{
#pragma omp parallel for
for
(
int
w
=
0
;
w
<
input_width
;
w
++
)
{
out_ptr
[
w
]
=
input_x_ptr
[
w
]
*
scales
[
w
];
}
}
}
break
;
case
2
:
{
const
int
input_height
=
input_x
->
dims
()[
0
];
const
int
input_width
=
input_x
->
dims
()[
1
];
if
(
has_bias
)
{
const
vector
<
float
>
biases
=
param
.
Biases
();
#pragma omp parallel for
for
(
int
h
=
0
;
h
<
input_height
;
++
h
)
{
const
float
*
iptr
=
input_x_ptr
+
h
*
input_width
;
float
*
optr
=
out_ptr
+
h
*
input_width
;
for
(
int
w
=
0
;
w
<
input_width
;
++
w
)
{
optr
[
w
]
=
iptr
[
w
]
*
scales
[
w
]
+
biases
[
w
];
}
}
}
else
{
#pragma omp parallel for
for
(
int
h
=
0
;
h
<
input_height
;
++
h
)
{
const
float
*
iptr
=
input_x_ptr
+
h
*
input_width
;
float
*
optr
=
out_ptr
+
h
*
input_width
;
for
(
int
w
=
0
;
w
<
input_width
;
++
w
)
{
optr
[
w
]
=
iptr
[
w
]
*
scales
[
w
];
}
}
}
}
break
;
case
3
:
{
const
int
chan_size
=
input_x
->
dims
()[
0
];
const
int
input_height
=
input_x
->
dims
()[
1
];
const
int
input_width
=
input_x
->
dims
()[
2
];
int
size
=
input_width
*
input_height
;
if
(
has_bias
)
{
const
vector
<
float
>
biases
=
param
.
Biases
();
#pragma omp parallel for
for
(
int
c
=
0
;
c
<
chan_size
;
++
c
)
{
const
float
*
iptr
=
input_x_ptr
+
c
*
size
;
float
*
optr
=
out_ptr
+
c
*
size
;
for
(
int
i
=
0
;
i
<
size
;
++
i
)
{
optr
[
i
]
=
iptr
[
i
]
*
scales
[
c
]
+
biases
[
c
];
}
}
}
else
{
#pragma omp parallel for
for
(
int
c
=
0
;
c
<
chan_size
;
++
c
)
{
const
float
*
iptr
=
input_x_ptr
+
c
*
size
;
float
*
optr
=
out_ptr
+
c
*
size
;
for
(
int
i
=
0
;
i
<
size
;
++
i
)
{
optr
[
i
]
=
iptr
[
i
]
*
scales
[
c
];
}
}
}
}
break
;
case
4
:
{
const
int
batch_size
=
input_x
->
dims
()[
0
];
const
int
chan_size
=
input_x
->
dims
()[
0
];
const
int
input_height
=
input_x
->
dims
()[
1
];
const
int
input_width
=
input_x
->
dims
()[
2
];
int
size
=
input_width
*
input_height
;
if
(
has_bias
)
{
const
vector
<
float
>
biases
=
param
.
Biases
();
#pragma omp parallel for
for
(
int
b
=
0
;
b
<
batch_size
;
++
b
)
{
for
(
int
c
=
0
;
c
<
chan_size
;
++
c
)
{
const
float
*
iptr
=
input_x_ptr
+
b
*
c
*
size
;
float
*
optr
=
out_ptr
+
b
*
c
*
size
;
for
(
int
i
=
0
;
i
<
size
;
++
i
)
{
optr
[
i
]
=
iptr
[
i
]
*
scales
[
c
]
+
biases
[
c
];
}
}
}
}
else
{
#pragma omp parallel for
for
(
int
b
=
0
;
b
<
batch_size
;
++
b
)
{
for
(
int
c
=
0
;
c
<
chan_size
;
++
c
)
{
const
float
*
iptr
=
input_x_ptr
+
b
*
c
*
size
;
float
*
optr
=
out_ptr
+
b
*
c
*
size
;
for
(
int
i
=
0
;
i
<
size
;
++
i
)
{
optr
[
i
]
=
iptr
[
i
]
*
scales
[
c
];
}
}
}
}
}
break
;
default:
break
;
}
}
}
// namespace operators
}
// namespace paddle_mobile
#endif
src/operators/kernel/x86/shape_kernel.cpp
已删除
100644 → 0
浏览文件 @
244297e8
/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#ifdef SHAPE_OP
#include "operators/kernel/shape_kernel.h"
namespace
paddle_mobile
{
namespace
operators
{
template
<
>
bool
ShapeKernel
<
X86
,
float
>::
Init
(
ShapeParam
<
X86
>
*
param
)
{
return
true
;
}
template
<
>
void
ShapeKernel
<
X86
,
float
>::
Compute
(
const
ShapeParam
<
X86
>
&
param
)
const
{
// TODO
}
}
// namespace operators
}
// namespace paddle_mobile
#endif
src/operators/kernel/x86/sigmoid_kernel.cpp
已删除
100644 → 0
浏览文件 @
244297e8
/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#ifdef SIGMOID_OP
#include "operators/kernel/sigmoid_kernel.h"
#include <cmath>
namespace
paddle_mobile
{
namespace
operators
{
using
framework
::
DDim
;
using
framework
::
Tensor
;
template
<
>
bool
SigmoidKernel
<
X86
,
float
>::
Init
(
SigmoidParam
<
X86
>
*
param
)
{
return
true
;
}
template
<
>
void
SigmoidKernel
<
X86
,
float
>::
Compute
(
const
SigmoidParam
<
X86
>
&
param
)
const
{
// TODO
}
template
class
SigmoidKernel
<
X86
,
float
>;
}
// namespace operators
}
// namespace paddle_mobile
#endif
src/operators/kernel/x86/softmax_kernel.cpp
已删除
100644 → 0
浏览文件 @
244297e8
/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#ifdef SOFTMAX_OP
#include "../softmax_kernel.h"
#include "operators/math/softmax.h"
namespace
paddle_mobile
{
namespace
operators
{
template
<
>
bool
SoftmaxKernel
<
X86
,
float
>::
Init
(
SoftmaxParam
<
X86
>
*
param
)
{
return
true
;
}
template
<
>
void
SoftmaxKernel
<
X86
,
float
>::
Compute
(
const
SoftmaxParam
<
X86
>
&
param
)
const
{
// TODO
}
template
class
SoftmaxKernel
<
X86
,
float
>;
}
// namespace operators
}
// namespace paddle_mobile
#endif
src/operators/kernel/x86/split_kernel.cpp
已删除
100644 → 0
浏览文件 @
244297e8
/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#ifdef SPLIT_OP
#include "operators/kernel/split_kernel.h"
namespace
paddle_mobile
{
namespace
operators
{
template
<
>
bool
SplitKernel
<
X86
,
float
>::
Init
(
SplitParam
<
X86
>
*
param
)
{
return
true
;
}
template
<
>
void
SplitKernel
<
X86
,
float
>::
Compute
(
const
SplitParam
<
X86
>
&
param
)
const
{
// TODO
}
}
// namespace operators
}
// namespace paddle_mobile
#endif
src/operators/kernel/x86/transpose_kernel.cpp
已删除
100644 → 0
浏览文件 @
244297e8
/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#ifdef TRANSPOSE_OP
#include "operators/kernel/transpose_kernel.h"
namespace
paddle_mobile
{
namespace
operators
{
template
<
>
bool
TransposeKernel
<
X86
,
float
>::
Init
(
TransposeParam
<
X86
>
*
param
)
{
return
true
;
}
template
<
>
void
TransposeKernel
<
X86
,
float
>::
Compute
(
const
TransposeParam
<
X86
>
&
param
)
const
{
// TODO
}
}
// namespace operators
}
// namespace paddle_mobile
#endif
src/operators/lrn_op.cpp
浏览文件 @
3c7cde0c
...
...
@@ -35,8 +35,5 @@ REGISTER_OPERATOR_CPU(lrn, ops::LrnOp);
#ifdef PADDLE_MOBILE_MALI_GPU
REGISTER_OPERATOR_MALI_GPU
(
lrn
,
ops
::
LrnOp
);
#endif
#ifdef PADDLE_MOBILE_X86
REGISTER_OPERATOR_X86
(
lrn
,
ops
::
LrnOp
);
#endif
#endif
src/operators/mul_op.cpp
浏览文件 @
3c7cde0c
...
...
@@ -55,13 +55,11 @@ void MulOp<Dtype, T>::InferShape() const {
}
// namespace paddle_mobile
namespace
ops
=
paddle_mobile
::
operators
;
#if
defined(PADDLE_MOBILE_CPU)
#if
def PADDLE_MOBILE_CPU
REGISTER_OPERATOR_CPU
(
mul
,
ops
::
MulOp
);
#elif defined(PADDLE_MOBILE_MALI_GPU)
#endif
#ifdef PADDLE_MOBILE_MALI_GPU
REGISTER_OPERATOR_MALI_GPU
(
mul
,
ops
::
MulOp
);
#elif defined(PADDLE_MOBILE_FPGA)
#else
REGISTER_OPERATOR_X86
(
mul
,
ops
::
MulOp
);
#endif
#endif
src/operators/multiclass_nms_op.cpp
浏览文件 @
3c7cde0c
...
...
@@ -39,12 +39,8 @@ void MultiClassNMSOp<Dtype, T>::InferShape() const {
}
// namespace paddle_mobile
namespace
ops
=
paddle_mobile
::
operators
;
#if
defined(PADDLE_MOBILE_CPU)
#if
def PADDLE_MOBILE_CPU
REGISTER_OPERATOR_CPU
(
multiclass_nms
,
ops
::
MultiClassNMSOp
);
#elif defined(PADDLE_MOBILE_MALI_GPU)
#elif defined(PADDLE_MOBILE_FPGA)
#else
REGISTER_OPERATOR_X86
(
multiclass_nms
,
ops
::
MultiClassNMSOp
);
#endif
#endif
src/operators/op_param.cpp
浏览文件 @
3c7cde0c
...
...
@@ -42,33 +42,28 @@ Print &operator<<(Print &printer, const ConvParam<CPU> &conv_param) {
template
class
ConvParam
<
CPU
>;
template
class
ConvParam
<
FPGA
>;
template
class
ConvParam
<
GPU_MALI
>;
template
class
ConvParam
<
X86
>;
#endif
template
class
ElementwiseAddParam
<
CPU
>;
template
class
ElementwiseAddParam
<
FPGA
>;
template
class
ElementwiseAddParam
<
GPU_MALI
>;
template
class
ElementwiseAddParam
<
X86
>;
#ifdef MUL_OP
template
class
MulParam
<
CPU
>;
template
class
MulParam
<
FPGA
>;
template
class
MulParam
<
GPU_MALI
>;
template
class
MulParam
<
X86
>;
#endif
#ifdef CONCAT_OP
template
class
ConcatParam
<
CPU
>;
template
class
ConcatParam
<
FPGA
>;
template
class
ConcatParam
<
GPU_MALI
>;
template
class
ConcatParam
<
X86
>;
#endif
#ifdef LRN_OP
template
class
LrnParam
<
CPU
>;
template
class
LrnParam
<
FPGA
>;
template
class
LrnParam
<
GPU_MALI
>;
template
class
LrnParam
<
X86
>;
#endif
#ifdef FUSION_CONVADD_OP
...
...
src/operators/pool_op.cpp
浏览文件 @
3c7cde0c
...
...
@@ -68,8 +68,5 @@ REGISTER_OPERATOR_MALI_GPU(pool2d, ops::PoolOp);
#ifdef PADDLE_MOBILE_FPGA
REGISTER_OPERATOR_FPGA
(
pool2d
,
ops
::
PoolOp
);
#endif
#ifdef PADDLE_MOBILE_X86
REGISTER_OPERATOR_X86
(
pool2d
,
ops
::
PoolOp
);
#endif
#endif
src/operators/prelu_op.cpp
浏览文件 @
3c7cde0c
...
...
@@ -39,8 +39,5 @@ REGISTER_OPERATOR_CPU(prelu, ops::PReluOp);
#ifdef PADDLE_MOBILE_MALI_GPU
REGISTER_OPERATOR_MALI_GPU
(
prelu
,
ops
::
PReluOp
);
#endif
#ifdef PADDLE_MOBILE_X86
REGISTER_OPERATOR_X86
(
prelu
,
ops
::
PReluOp
);
#endif
#endif
src/operators/prior_box_op.cpp
浏览文件 @
3c7cde0c
...
...
@@ -54,8 +54,5 @@ REGISTER_OPERATOR_CPU(prior_box, ops::PriorBoxOp);
#endif
#ifdef PADDLE_MOBILE_MALI_GPU
#endif
#ifdef PADDLE_MOBILE_X86
REGISTER_OPERATOR_X86
(
prior_box
,
ops
::
PriorBoxOp
);
#endif
#endif
src/operators/quantize_op.cpp
浏览文件 @
3c7cde0c
...
...
@@ -32,7 +32,4 @@ namespace ops = paddle_mobile::operators;
#ifdef PADDLE_MOBILE_CPU
REGISTER_OPERATOR_CPU
(
quantize
,
ops
::
QuantizeOp
);
#endif
#ifdef PADDLE_MOBILE_X86
REGISTER_OPERATOR_X86
(
quantize
,
ops
::
QuantizeOp
);
#endif
src/operators/relu_op.cpp
浏览文件 @
3c7cde0c
...
...
@@ -39,8 +39,5 @@ REGISTER_OPERATOR_CPU(relu, ops::ReluOp);
#ifdef PADDLE_MOBILE_MALI_GPU
REGISTER_OPERATOR_MALI_GPU
(
relu
,
ops
::
ReluOp
);
#endif
#ifdef PADDLE_MOBILE_X86
REGISTER_OPERATOR_X86
(
relu
,
ops
::
ReluOp
);
#endif
#endif
src/operators/reshape_op.cpp
浏览文件 @
3c7cde0c
...
...
@@ -38,8 +38,5 @@ REGISTER_OPERATOR_CPU(reshape, ops::ReshapeOp);
#ifdef PADDLE_MOBILE_MALI_GPU
REGISTER_OPERATOR_MALI_GPU
(
reshape
,
ops
::
ReshapeOp
);
#endif
#ifdef PADDLE_MOBILE_X86
REGISTER_OPERATOR_X86
(
reshape
,
ops
::
ReshapeOp
);
#endif
#endif
src/operators/resize_op.cpp
浏览文件 @
3c7cde0c
...
...
@@ -35,8 +35,5 @@ REGISTER_OPERATOR_CPU(resize, ops::ResizeOp);
#ifdef PADDLE_MOBILE_MALI_GPU
REGISTER_OPERATOR_MALI_GPU
(
resize
,
ops
::
ResizeOp
);
#endif
#ifdef PADDLE_MOBILE_X86
REGISTER_OPERATOR_X86
(
resize
,
ops
::
ResizeOp
);
#endif
#endif
src/operators/scale_op.cpp
浏览文件 @
3c7cde0c
...
...
@@ -35,8 +35,5 @@ REGISTER_OPERATOR_CPU(scale, ops::ScaleOp);
#ifdef PADDLE_MOBILE_MALI_GPU
REGISTER_OPERATOR_MALI_GPU
(
scale
,
ops
::
ScaleOp
);
#endif
#ifdef PADDLE_MOBILE_X86
REGISTER_OPERATOR_X86
(
scale
,
ops
::
ScaleOp
);
#endif
#endif
src/operators/shape_op.cpp
浏览文件 @
3c7cde0c
...
...
@@ -36,8 +36,5 @@ REGISTER_OPERATOR_CPU(shape, ops::ShapeOp);
#endif
#ifdef PADDLE_MOBILE_MALI_GPU
#endif
#ifdef PADDLE_MOBILE_X86
REGISTER_OPERATOR_X86
(
shape
,
ops
::
ShapeOp
);
#endif
#endif
src/operators/sigmoid_op.cpp
浏览文件 @
3c7cde0c
...
...
@@ -27,12 +27,8 @@ void SigmoidOp<DeviceType, T>::InferShape() const {
}
// namespace paddle_mobile
namespace
ops
=
paddle_mobile
::
operators
;
#if
defined(PADDLE_MOBILE_CPU)
#if
def PADDLE_MOBILE_CPU
REGISTER_OPERATOR_CPU
(
sigmoid
,
ops
::
SigmoidOp
);
#elif defined(PADDLE_MOBILE_MALI_GPU)
#elif defined(PADDLE_MOBILE_FPGA)
#else
REGISTER_OPERATOR_X86
(
sigmoid
,
ops
::
SigmoidOp
);
#endif
#endif
src/operators/slice_op.cpp
浏览文件 @
3c7cde0c
...
...
@@ -34,8 +34,5 @@ REGISTER_OPERATOR_CPU(slice, ops::SliceOp);
#ifdef PADDLE_MOBILE_MALI_GPU
REGISTER_OPERATOR_MALI_GPU
(
slice
,
ops
::
SliceOp
);
#endif
#ifdef PADDLE_MOBILE_X86
REGISTER_OPERATOR_X86
(
slice
,
ops
::
SliceOp
);
#endif
#endif
src/operators/softmax_op.cpp
浏览文件 @
3c7cde0c
...
...
@@ -36,8 +36,5 @@ REGISTER_OPERATOR_MALI_GPU(softmax, ops::SoftmaxOp);
#ifdef PADDLE_MOBILE_FPGA
REGISTER_OPERATOR_FPGA
(
softmax
,
ops
::
SoftmaxOp
);
#endif
#ifdef PADDLE_MOBILE_X86
REGISTER_OPERATOR_X86
(
softmax
,
ops
::
SoftmaxOp
);
#endif
#endif
src/operators/split_op.cpp
浏览文件 @
3c7cde0c
...
...
@@ -80,12 +80,8 @@ void SplitOp<DeviceType, T>::InferShape() const {
}
// namespace paddle_mobile
namespace
ops
=
paddle_mobile
::
operators
;
#if
defined(PADDLE_MOBILE_CPU)
#if
def PADDLE_MOBILE_CPU
REGISTER_OPERATOR_CPU
(
split
,
ops
::
SplitOp
);
#elif defined(PADDLE_MOBILE_MALI_GPU)
#elif defined(PADDLE_MOBILE_FPGA)
#else
REGISTER_OPERATOR_X86
(
split
,
ops
::
SplitOp
);
#endif
#endif // SPLIT_OP
src/operators/transpose_op.cpp
浏览文件 @
3c7cde0c
...
...
@@ -52,12 +52,8 @@ void TransposeOp<Dtype, T>::InferShape() const {
}
// namespace paddle_mobile
namespace
ops
=
paddle_mobile
::
operators
;
#if
defined(PADDLE_MOBILE_CPU)
#if
def PADDLE_MOBILE_CPU
REGISTER_OPERATOR_CPU
(
transpose
,
ops
::
TransposeOp
);
#elif defined(PADDLE_MOBILE_MALI_GPU)
#elif defined(PADDLE_MOBILE_FPGA)
#else
REGISTER_OPERATOR_X86
(
transpose
,
ops
::
TransposeOp
);
#endif
#endif // TRANSPOSE_OP
test/net/test_googlenet.cpp
浏览文件 @
3c7cde0c
...
...
@@ -17,11 +17,7 @@ limitations under the License. */
#include "../test_include.h"
int
main
()
{
#if defined(PADDLE_MOBILE_CPU)
paddle_mobile
::
PaddleMobile
<
paddle_mobile
::
CPU
>
paddle_mobile
;
#elif defined(PADDLE_MOBILE_X86)
paddle_mobile
::
PaddleMobile
<
paddle_mobile
::
X86
>
paddle_mobile
;
#endif
paddle_mobile
.
SetThreadNum
(
4
);
bool
optimize
=
true
;
auto
time1
=
time
();
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录