Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
PaddlePaddle
Paddle-Lite
提交
01c39a9f
P
Paddle-Lite
项目概览
PaddlePaddle
/
Paddle-Lite
通知
332
Star
4
Fork
1
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
271
列表
看板
标记
里程碑
合并请求
78
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
Paddle-Lite
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
271
Issue
271
列表
看板
标记
里程碑
合并请求
78
合并请求
78
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
提交
01c39a9f
编写于
7月 03, 2019
作者:
Z
zp7
提交者:
Yanzhan Yang
7月 03, 2019
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
add relu6 threshold param (#1724)
上级
b6dc8773
变更
8
隐藏空白更改
内联
并排
Showing
8 changed file
with
41 addition
and
9 deletion
+41
-9
src/operators/activation_op.h
src/operators/activation_op.h
+1
-1
src/operators/kernel/activation_kernel.h
src/operators/kernel/activation_kernel.h
+1
-1
src/operators/kernel/arm/activation_kernel.cpp
src/operators/kernel/arm/activation_kernel.cpp
+4
-3
src/operators/kernel/cl/cl_kernel/relu6.cl
src/operators/kernel/cl/cl_kernel/relu6.cl
+3
-2
src/operators/kernel/cl/relu6_kernel.cpp
src/operators/kernel/cl/relu6_kernel.cpp
+4
-2
src/operators/math/activation.h
src/operators/math/activation.h
+13
-0
src/operators/op_param.h
src/operators/op_param.h
+14
-0
test/operators/test_relu6_op.cpp
test/operators/test_relu6_op.cpp
+1
-0
未找到文件。
src/operators/activation_op.h
浏览文件 @
01c39a9f
...
...
@@ -24,7 +24,7 @@ namespace operators {
#ifdef RELU_OP
DECLARE_OPERATOR
(
Relu
,
ReluParam
,
ReluKernel
);
DECLARE_OPERATOR
(
Relu6
,
ReluParam
,
Relu6Kernel
);
DECLARE_OPERATOR
(
Relu6
,
Relu
6
Param
,
Relu6Kernel
);
#endif
#ifdef SIGMOID_OP
...
...
src/operators/kernel/activation_kernel.h
浏览文件 @
01c39a9f
...
...
@@ -22,7 +22,7 @@ namespace operators {
#ifdef RELU_OP
DECLARE_KERNEL
(
Relu
,
ReluParam
);
DECLARE_KERNEL
(
Relu6
,
ReluParam
);
DECLARE_KERNEL
(
Relu6
,
Relu
6
Param
);
#endif
#ifdef SIGMOID_OP
...
...
src/operators/kernel/arm/activation_kernel.cpp
浏览文件 @
01c39a9f
...
...
@@ -38,15 +38,16 @@ void ReluKernel<CPU, float>::Compute(const ReluParam<CPU> ¶m) {
}
template
<
>
bool
Relu6Kernel
<
CPU
,
float
>::
Init
(
ReluParam
<
CPU
>
*
param
)
{
bool
Relu6Kernel
<
CPU
,
float
>::
Init
(
Relu
6
Param
<
CPU
>
*
param
)
{
return
true
;
}
template
<
>
void
Relu6Kernel
<
CPU
,
float
>::
Compute
(
const
ReluParam
<
CPU
>
&
param
)
{
void
Relu6Kernel
<
CPU
,
float
>::
Compute
(
const
Relu
6
Param
<
CPU
>
&
param
)
{
const
LoDTensor
*
input
=
param
.
InputX
();
LoDTensor
*
output
=
param
.
Out
();
ActivationCompute
<
float
,
RELU6
>
()(
input
,
output
);
float
threshold
=
param
.
getThreshold
();
ActivationCompute
<
float
,
RELU6
>
()(
input
,
output
,
threshold
);
output
->
set_lod
(
input
->
lod
());
}
#endif
...
...
src/operators/kernel/cl/cl_kernel/relu6.cl
浏览文件 @
01c39a9f
...
...
@@ -15,7 +15,8 @@ limitations under the License. */
#
pragma
OPENCL
EXTENSION
cl_khr_fp16
:
enable
__kernel
void
relu6
(
__read_only
image2d_t
input,
__write_only
image2d_t
output
)
{
__write_only
image2d_t
output,
__private
const
float
threshold
)
{
const
int
x
=
get_global_id
(
0
)
;
const
int
y
=
get_global_id
(
1
)
;
...
...
@@ -26,6 +27,6 @@ __kernel void relu6(__read_only image2d_t input,
half4
in
=
read_imageh
(
input,
sampler,
(
int2
)(
x,
y
))
;
in
=
max
((
half4
)(
0.0f,
0.0f,
0.0f,
0.0f
)
,
in
)
;
in
=
min
((
half4
)(
6.0f,
6.0f,
6.0f,
6.0f
)
,
in
)
;
in
=
min
((
half4
)(
threshold,
threshold,
threshold,
threshold
)
,
in
)
;
write_imageh
(
output,
(
int2
)(
x,
y
)
,
in
)
;
}
src/operators/kernel/cl/relu6_kernel.cpp
浏览文件 @
01c39a9f
...
...
@@ -19,21 +19,23 @@ namespace paddle_mobile {
namespace
operators
{
template
<
>
bool
Relu6Kernel
<
GPU_CL
,
float
>::
Init
(
ReluParam
<
GPU_CL
>*
param
)
{
bool
Relu6Kernel
<
GPU_CL
,
float
>::
Init
(
Relu
6
Param
<
GPU_CL
>*
param
)
{
this
->
cl_helper_
.
AddKernel
(
"relu6"
,
"relu6.cl"
);
return
true
;
}
template
<
>
void
Relu6Kernel
<
GPU_CL
,
float
>::
Compute
(
const
ReluParam
<
GPU_CL
>&
param
)
{
void
Relu6Kernel
<
GPU_CL
,
float
>::
Compute
(
const
Relu
6
Param
<
GPU_CL
>&
param
)
{
auto
kernel
=
this
->
cl_helper_
.
KernelAt
(
0
);
const
auto
*
input
=
param
.
InputX
();
auto
*
output
=
param
.
Out
();
float
threshold
=
param
.
getThreshold
();
auto
default_work_size
=
this
->
cl_helper_
.
DefaultWorkSize
(
*
output
);
auto
inputImage
=
input
->
GetCLImage
();
auto
outputImage
=
output
->
GetCLImage
();
clSetKernelArg
(
kernel
,
0
,
sizeof
(
cl_mem
),
&
inputImage
);
clSetKernelArg
(
kernel
,
1
,
sizeof
(
cl_mem
),
&
outputImage
);
clSetKernelArg
(
kernel
,
2
,
sizeof
(
cl_mem
),
&
threshold
);
const
size_t
work_size
[
2
]
=
{
input
->
ImageWidth
(),
input
->
ImageHeight
()};
clEnqueueNDRangeKernel
(
this
->
cl_helper_
.
CLCommandQueue
(),
kernel
,
2
,
NULL
,
...
...
src/operators/math/activation.h
浏览文件 @
01c39a9f
...
...
@@ -116,6 +116,14 @@ inline float32x4_t vActiveq_f32<LEAKY_RELU>(const float32x4_t &x,
const
float32x4_t
&
alpha
)
{
return
vmaxq_f32
(
x
,
vmulq_f32
(
x
,
alpha
));
}
template
<
>
inline
float32x4_t
vActiveq_f32
<
RELU6
>
(
const
float32x4_t
&
x
,
const
float32x4_t
&
alpha
)
{
float32x4_t
__zero
=
vdupq_n_f32
(
0.
f
);
float32x4_t
__threshold
=
vdupq_n_f32
(
vgetq_lane_f32
(
alpha
,
0
));
return
vminq_f32
(
vmaxq_f32
(
x
,
__zero
),
__threshold
);
}
#endif
template
<
ActivationType
Act
=
IDENTITY
>
...
...
@@ -164,6 +172,11 @@ inline float Active<LEAKY_RELU>(const float &x, const float &alpha) {
return
std
::
max
(
x
,
alpha
*
x
);
}
template
<
>
inline
float
Active
<
RELU6
>
(
const
float
&
x
,
const
float
&
alpha
)
{
return
std
::
min
(
std
::
max
(
x
,
0.
f
),
alpha
);
}
}
// namespace math
}
// namespace operators
}
// namespace paddle_mobile
src/operators/op_param.h
浏览文件 @
01c39a9f
...
...
@@ -1675,6 +1675,20 @@ class ReluParam : public ReluParamBase<Dtype> {
using
ReluParamBase
<
Dtype
>::
ReluParamBase
;
};
template
<
typename
Dtype
>
class
Relu6Param
:
public
ReluParamBase
<
Dtype
>
{
public:
Relu6Param
(
const
VariableNameMap
&
inputs
,
const
VariableNameMap
&
outputs
,
const
AttributeMap
&
attrs
,
Scope
*
scope
)
:
ReluParamBase
<
Dtype
>
(
inputs
,
outputs
,
attrs
,
scope
)
{
threshold
=
OpParam
::
GetAttr
<
float
>
(
"threshold"
,
attrs
);
}
float
getThreshold
()
const
{
return
threshold
;
}
private:
float
threshold
;
};
#ifdef PADDLE_MOBILE_CL
template
<
>
class
ReluParam
<
GPU_CL
>
:
public
ReluParamBase
<
GPU_CL
>
{
...
...
test/operators/test_relu6_op.cpp
浏览文件 @
01c39a9f
...
...
@@ -44,6 +44,7 @@ int TestRelu6Op(const std::vector<int> input_shape) {
auto
output_var
=
scope
.
get
()
->
Var
(
"output"
);
framework
::
AttributeMap
attrs
;
attrs
[
"threshold"
].
Set
<
float
>
(
6.
f
);
auto
*
op
=
new
operators
::
Relu6Op
<
CPU
,
float
>
(
"relu6"
,
inputs
,
outputs
,
attrs
,
scope
.
get
());
op
->
InferShape
();
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录