Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
PaddlePaddle
Paddle-Lite
提交
8e400754
P
Paddle-Lite
项目概览
PaddlePaddle
/
Paddle-Lite
通知
331
Star
4
Fork
1
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
271
列表
看板
标记
里程碑
合并请求
78
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
Paddle-Lite
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
271
Issue
271
列表
看板
标记
里程碑
合并请求
78
合并请求
78
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
体验新版 GitCode,发现更多精彩内容 >>
未验证
提交
8e400754
编写于
6月 15, 2020
作者:
Q
Qi Li
提交者:
GitHub
6月 15, 2020
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
[NPU] add thresholded_relu for arm and npu, test=develop (#3786)
上级
b081fcb0
变更
13
显示空白变更内容
内联
并排
Showing
13 changed file
with
110 addition
and
3 deletion
+110
-3
lite/api/paddle_place.cc
lite/api/paddle_place.cc
+2
-1
lite/api/paddle_place.h
lite/api/paddle_place.h
+2
-1
lite/backends/arm/math/activation.cc
lite/backends/arm/math/activation.cc
+10
-0
lite/backends/arm/math/activation.h
lite/backends/arm/math/activation.h
+4
-0
lite/kernels/arm/activation_compute.cc
lite/kernels/arm/activation_compute.cc
+20
-0
lite/kernels/arm/activation_compute.h
lite/kernels/arm/activation_compute.h
+10
-0
lite/kernels/npu/bridges/act_op.cc
lite/kernels/npu/bridges/act_op.cc
+7
-0
lite/kernels/npu/bridges/paddle_use_bridges.h
lite/kernels/npu/bridges/paddle_use_bridges.h
+1
-0
lite/kernels/npu/bridges/utility.cc
lite/kernels/npu/bridges/utility.cc
+2
-0
lite/operators/activation_ops.cc
lite/operators/activation_ops.cc
+5
-0
lite/operators/activation_ops.h
lite/operators/activation_ops.h
+3
-0
lite/operators/op_params.h
lite/operators/op_params.h
+2
-0
lite/tests/kernels/activation_compute_test.cc
lite/tests/kernels/activation_compute_test.cc
+42
-1
未找到文件。
lite/api/paddle_place.cc
浏览文件 @
8e400754
...
...
@@ -54,7 +54,8 @@ const std::string& ActivationTypeToStr(ActivationType act) {
"Sigmoid"
,
"Tanh"
,
"Swish"
,
"Exp"
};
"Exp"
,
"ThresholdedRelu"
};
auto
x
=
static_cast
<
int
>
(
act
);
CHECK_LT
(
x
,
static_cast
<
int
>
(
ActivationType
::
NUM
));
return
act2string
[
x
];
...
...
lite/api/paddle_place.h
浏览文件 @
8e400754
...
...
@@ -106,7 +106,8 @@ enum class ActivationType : int {
kAbs
=
9
,
kHardSwish
=
10
,
kReciprocal
=
11
,
NUM
=
12
,
kThresholdedRelu
=
12
,
NUM
=
13
,
};
static
size_t
PrecisionTypeLength
(
PrecisionType
type
)
{
...
...
lite/backends/arm/math/activation.cc
浏览文件 @
8e400754
...
...
@@ -753,6 +753,16 @@ void act_abs<float>(const float* din, float* dout, int size, int threads) {
}
}
template
<
>
void
act_thresholded_relu
<
float
>
(
const
float
*
din
,
float
*
dout
,
int
size
,
float
threshold
,
int
threads
)
{
for
(
int
i
=
0
;
i
<
size
;
++
i
)
{
dout
[
0
]
=
(
din
[
0
]
>
threshold
?
din
[
0
]
:
0.
f
);
din
++
;
dout
++
;
}
}
#ifdef LITE_WITH_TRAIN
template
<
>
void
act_square_grad
(
const
float
*
din
,
...
...
lite/backends/arm/math/activation.h
浏览文件 @
8e400754
...
...
@@ -86,6 +86,10 @@ void act_reciprocal(const T* din, T* dout, int size, int threads);
template
<
typename
T
>
void
act_abs
(
const
T
*
din
,
T
*
dout
,
int
size
,
int
threads
);
template
<
typename
T
>
void
act_thresholded_relu
(
const
T
*
din
,
T
*
dout
,
int
size
,
float
threshold
,
int
threads
);
#ifdef LITE_WITH_TRAIN
template
<
typename
T
>
void
act_square_grad
(
...
...
lite/kernels/arm/activation_compute.cc
浏览文件 @
8e400754
...
...
@@ -217,6 +217,17 @@ void AbsCompute::Run() {
x_data
,
output_data
,
x_dims
.
production
(),
ctx
.
threads
());
}
void
ThresholdedReluCompute
::
Run
()
{
auto
&
param
=
this
->
Param
<
param_t
>
();
auto
&
ctx
=
this
->
ctx_
->
template
As
<
ARMContext
>();
auto
x_dims
=
param
.
X
->
dims
();
auto
x_data
=
param
.
X
->
data
<
float
>
();
auto
output_data
=
param
.
Out
->
mutable_data
<
float
>
();
float
threshold
=
param
.
relu_threshold
;
lite
::
arm
::
math
::
act_thresholded_relu
<
float
>
(
x_data
,
output_data
,
x_dims
.
production
(),
threshold
,
ctx
.
threads
());
}
}
// namespace arm
}
// namespace kernels
}
// namespace lite
...
...
@@ -336,3 +347,12 @@ REGISTER_LITE_KERNEL(
.
BindInput
(
"X"
,
{
LiteType
::
GetTensorTy
(
TARGET
(
kARM
))})
.
BindOutput
(
"Out"
,
{
LiteType
::
GetTensorTy
(
TARGET
(
kARM
))})
.
Finalize
();
REGISTER_LITE_KERNEL
(
thresholded_relu
,
kARM
,
kFloat
,
kNCHW
,
paddle
::
lite
::
kernels
::
arm
::
ThresholdedReluCompute
,
def
)
.
BindInput
(
"X"
,
{
LiteType
::
GetTensorTy
(
TARGET
(
kARM
))})
.
BindOutput
(
"Out"
,
{
LiteType
::
GetTensorTy
(
TARGET
(
kARM
))})
.
Finalize
();
lite/kernels/arm/activation_compute.h
浏览文件 @
8e400754
...
...
@@ -175,6 +175,16 @@ class AbsCompute : public KernelLite<TARGET(kARM), PRECISION(kFloat)> {
virtual
~
AbsCompute
()
=
default
;
};
class
ThresholdedReluCompute
:
public
KernelLite
<
TARGET
(
kARM
),
PRECISION
(
kFloat
)
>
{
public:
using
param_t
=
operators
::
ActivationParam
;
void
Run
()
override
;
virtual
~
ThresholdedReluCompute
()
=
default
;
};
}
// namespace arm
}
// namespace kernels
}
// namespace lite
...
...
lite/kernels/npu/bridges/act_op.cc
浏览文件 @
8e400754
...
...
@@ -100,6 +100,9 @@ int ActConverter<ge::op::Activation>(void* ctx,
auto
offset
=
op_info
->
GetAttr
<
float
>
(
"offset"
);
act_op
->
set_attr_negative_slope
(
slope
);
act_op
->
set_attr_coef
(
offset
);
}
else
if
(
op_type
==
"thresholded_relu"
)
{
auto
threshold
=
op_info
->
GetAttr
<
float
>
(
"threshold"
);
act_op
->
set_attr_coef
(
threshold
);
}
return
SUCCESS
;
}
...
...
@@ -141,6 +144,10 @@ REGISTER_SUBGRAPH_BRIDGE(
hard_sigmoid
,
kNPU
,
paddle
::
lite
::
subgraph
::
npu
::
ActConverter
<
ge
::
op
::
Activation
>
);
REGISTER_SUBGRAPH_BRIDGE
(
thresholded_relu
,
kNPU
,
paddle
::
lite
::
subgraph
::
npu
::
ActConverter
<
ge
::
op
::
Activation
>
);
REGISTER_SUBGRAPH_BRIDGE
(
log
,
kNPU
,
paddle
::
lite
::
subgraph
::
npu
::
ActConverter
<
ge
::
op
::
Log
>
);
...
...
lite/kernels/npu/bridges/paddle_use_bridges.h
浏览文件 @
8e400754
...
...
@@ -25,6 +25,7 @@ USE_SUBGRAPH_BRIDGE(hard_sigmoid, kNPU);
USE_SUBGRAPH_BRIDGE
(
log
,
kNPU
);
USE_SUBGRAPH_BRIDGE
(
sqrt
,
kNPU
);
USE_SUBGRAPH_BRIDGE
(
square
,
kNPU
);
USE_SUBGRAPH_BRIDGE
(
thresholded_relu
,
kNPU
);
USE_SUBGRAPH_BRIDGE
(
batch_norm
,
kNPU
);
USE_SUBGRAPH_BRIDGE
(
less_than
,
kNPU
);
...
...
lite/kernels/npu/bridges/utility.cc
浏览文件 @
8e400754
...
...
@@ -144,6 +144,8 @@ int CvtActMode(std::string act_type) {
act_mode
=
9
;
}
else
if
(
act_type
==
"hard_sigmoid"
)
{
act_mode
=
10
;
}
else
if
(
act_type
==
"thresholded_relu"
)
{
act_mode
=
11
;
}
else
{
// TODO(hong19860320) support more activation mode
LOG
(
FATAL
)
<<
"[NPU] Unsupported activation type "
<<
act_type
;
...
...
lite/operators/activation_ops.cc
浏览文件 @
8e400754
...
...
@@ -82,7 +82,11 @@ bool ActivationOp::AttachImpl(const cpp::OpDesc& opdesc, lite::Scope* scope) {
param_
.
hard_swish_offset
=
opdesc
.
GetAttr
<
float
>
(
"offset"
);
}
else
if
(
opdesc
.
Type
()
==
"reciprocal"
)
{
param_
.
active_type
=
lite_api
::
ActivationType
::
kReciprocal
;
}
else
if
(
opdesc
.
Type
()
==
"thresholded_relu"
)
{
param_
.
active_type
=
lite_api
::
ActivationType
::
kThresholdedRelu
;
param_
.
relu_threshold
=
opdesc
.
GetAttr
<
float
>
(
"threshold"
);
}
VLOG
(
4
)
<<
"opdesc.Type():"
<<
opdesc
.
Type
();
param_
.
Out
=
scope
->
FindVar
(
out_name
)
->
GetMutable
<
lite
::
Tensor
>
();
...
...
@@ -100,3 +104,4 @@ REGISTER_LITE_OP(relu, paddle::lite::operators::ActivationOp);
REGISTER_LITE_OP
(
leaky_relu
,
paddle
::
lite
::
operators
::
ActivationOp
);
REGISTER_LITE_OP
(
relu6
,
paddle
::
lite
::
operators
::
ActivationOp
);
REGISTER_LITE_OP
(
prelu
,
paddle
::
lite
::
operators
::
ActivationOp
);
REGISTER_LITE_OP
(
thresholded_relu
,
paddle
::
lite
::
operators
::
ActivationOp
);
lite/operators/activation_ops.h
浏览文件 @
8e400754
...
...
@@ -80,6 +80,9 @@ class ActivationOp : public OpLite {
break
;
case
lite_api
::
ActivationType
::
kIndentity
:
break
;
case
lite_api
::
ActivationType
::
kThresholdedRelu
:
ch
->
macs
=
param_
.
X
->
numel
();
break
;
default:
LOG
(
FATAL
)
<<
"This Type of Activation:"
<<
static_cast
<
int
>
(
param_
.
active_type
)
...
...
lite/operators/op_params.h
浏览文件 @
8e400754
...
...
@@ -358,6 +358,8 @@ struct ActivationParam : ParamBase {
float
hard_swish_threshold
{
6.0
};
float
hard_swish_scale
{
6.0
};
float
hard_swish_offset
{
3.0
};
// thresholded_relu
float
relu_threshold
{
1.0
f
};
};
struct
ActivationGradParam
:
ParamBase
{
...
...
lite/tests/kernels/activation_compute_test.cc
浏览文件 @
8e400754
...
...
@@ -38,7 +38,8 @@ enum activation_type_test {
GELU
,
SQUARE
,
HARD_SWISH
,
RECIPROCAL
RECIPROCAL
,
THRESHOLDED_RELU
};
class
ActivationComputeTester
:
public
arena
::
TestCase
{
...
...
@@ -54,6 +55,7 @@ class ActivationComputeTester : public arena::TestCase {
float
hard_swish_threshold
=
6.0
;
float
hard_swish_scale
=
6.0
;
float
hard_swish_offset
=
3.0
;
float
relu_threshold_
=
1.0
;
DDim
dims_
{{
1
}};
std
::
string
type_
=
""
;
activation_type_test
act_type_
=
RELU
;
...
...
@@ -218,6 +220,12 @@ class ActivationComputeTester : public arena::TestCase {
}
break
;
}
case
THRESHOLDED_RELU
:
{
for
(
int
i
=
0
;
i
<
dims_
.
production
();
i
++
)
{
output_data
[
i
]
=
x_data
[
i
]
>
relu_threshold_
?
x_data
[
i
]
:
0.
f
;
}
break
;
}
default:
LOG
(
INFO
)
<<
"the type of activation is unknow."
;
}
...
...
@@ -245,6 +253,9 @@ class ActivationComputeTester : public arena::TestCase {
op_desc
->
SetAttr
(
"scale"
,
hard_swish_scale
);
op_desc
->
SetAttr
(
"offset"
,
hard_swish_offset
);
}
if
(
act_type_
==
THRESHOLDED_RELU
)
{
op_desc
->
SetAttr
(
"threshold"
,
relu_threshold_
);
}
}
void
PrepareData
()
override
{
...
...
@@ -632,5 +643,35 @@ TEST(activation_reciprocal, precision) {
}
}
TEST
(
Activation_thresholded_relu
,
precision
)
{
LOG
(
INFO
)
<<
"test thresholded_relu op"
;
Place
place
;
float
abs_error
=
2e-5
;
#if defined(LITE_WITH_NPU)
place
=
TARGET
(
kNPU
);
abs_error
=
1e-2
;
// Using fp16 in NPU
#elif defined(LITE_WITH_ARM)
place
=
TARGET
(
kARM
);
#else
return
;
#endif
for
(
auto
dims
:
std
::
vector
<
std
::
vector
<
int64_t
>>
{
{
1
,
3
,
2
,
4
},
{
2
,
3
,
4
},
{
5
,
4
},
{
8
}})
{
std
::
unique_ptr
<
arena
::
TestCase
>
tester
(
new
ActivationComputeTester
(
place
,
"def"
,
0.01
,
6.
,
"all"
,
0.
,
DDim
(
dims
),
"thresholded_relu"
,
THRESHOLDED_RELU
));
arena
::
Arena
arena
(
std
::
move
(
tester
),
place
,
abs_error
);
arena
.
TestPrecision
();
}
}
}
// namespace lite
}
// namespace paddle
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录