Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
Crayon鑫
Paddle
提交
bef6f2e1
P
Paddle
项目概览
Crayon鑫
/
Paddle
与 Fork 源项目一致
Fork自
PaddlePaddle / Paddle
通知
1
Star
1
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
1
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
Paddle
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
1
Issue
1
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
未验证
提交
bef6f2e1
编写于
3月 16, 2022
作者:
L
Lijunhui
提交者:
GitHub
3月 16, 2022
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
[KP] Fix registry and add UT for thresholded_relu & softshrink (#40524)
* init commit * correct namespace
上级
9fc89b34
变更
2
隐藏空白更改
内联
并排
Showing
2 changed file
with
281 addition
and
58 deletion
+281
-58
paddle/fluid/operators/activation_op.kps
paddle/fluid/operators/activation_op.kps
+218
-58
python/paddle/fluid/tests/unittests/xpu/test_activation_op_xpu.py
...addle/fluid/tests/unittests/xpu/test_activation_op_xpu.py
+63
-0
未找到文件。
paddle/fluid/operators/activation_op.kps
浏览文件 @
bef6f2e1
...
...
@@ -15,6 +15,8 @@ limitations under the License. */
#include "paddle/fluid/platform/bfloat16.h"
#include "paddle/fluid/platform/device/gpu/gpu_device_function.h"
#include "paddle/phi/kernels/funcs/activation_functor.h"
namespace paddle {
namespace operators {
...
...
@@ -1148,63 +1150,221 @@ REGISTER_OP_CUDA_KERNEL(
FOR_EACH_ACTIVATION_CUDA_OP(REGISTER_ACTIVATION_CUDA_KERNEL)
#ifdef PADDLE_WITH_XPU_KP
#define REGISTER_ACTIVATION_XPU_KERNEL(act_type, op_name, functor, \
grad_functor) \
REGISTER_OP_KERNEL( \
act_type, KP, plat::XPUPlace, \
ops::ActivationCudaKernel<plat::XPUDeviceContext, ops::functor<float>>); \
REGISTER_OP_KERNEL(act_type##_grad, KP, plat::XPUPlace, \
ops::ActivationGradCudaKernel<plat::XPUDeviceContext, \
ops::grad_functor<float>>);
REGISTER_ACTIVATION_XPU_KERNEL(leaky_relu, LeakyRelu, CudaLeakyReluFunctor,
CudaLeakyReluGradFunctor);
REGISTER_ACTIVATION_XPU_KERNEL(sigmoid, Sigmoid, CudaSigmoidFunctor,
CudaSigmoidGradFunctor);
REGISTER_ACTIVATION_XPU_KERNEL(exp, Exp, CudaExpFunctor, CudaExpGradFunctor);
REGISTER_ACTIVATION_XPU_KERNEL(log, Log, CudaLogFunctor, CudaLogGradFunctor);
REGISTER_ACTIVATION_XPU_KERNEL(reciprocal, Reciprocal, CudaReciprocalFunctor,
CudaReciprocalGradFunctor);
REGISTER_ACTIVATION_XPU_KERNEL(softplus, Softplus, CudaSoftplusFunctor,
CudaSoftplusGradFunctor);
REGISTER_ACTIVATION_XPU_KERNEL(hard_swish, HardSwish, CudaHardSwishFunctor,
CudaHardSwishGradFunctor);
REGISTER_ACTIVATION_XPU_KERNEL(elu, Elu, CudaELUFunctor, CudaELUGradFunctor);
REGISTER_ACTIVATION_XPU_KERNEL(celu, Celu, CudaCELUFunctor,
CudaCELUGradFunctor);
REGISTER_ACTIVATION_XPU_KERNEL(sqrt, Sqrt, CudaSqrtFunctor,
CudaSqrtGradFunctor);
REGISTER_ACTIVATION_XPU_KERNEL(square, Square, CudaSquareFunctor,
CudaSquareGradFunctor);
REGISTER_ACTIVATION_XPU_KERNEL(silu, Silu, CudaSiluFunctor,
CudaSiluGradFunctor);
REGISTER_ACTIVATION_XPU_KERNEL(logsigmoid, LogSigmoid, CudaLogSigmoidFunctor,
CudaLogSigmoidGradFunctor);
REGISTER_ACTIVATION_XPU_KERNEL(softshrink, SoftShrink, CudaSoftShrinkFunctor,
CudaSoftShrinkGradFunctor);
REGISTER_ACTIVATION_XPU_KERNEL(ceil, Ceil, CudaCeilFunctor,
CudaZeroGradFunctor);
REGISTER_ACTIVATION_XPU_KERNEL(floor, Floor, CudaFloorFunctor,
CudaZeroGradFunctor);
REGISTER_ACTIVATION_XPU_KERNEL(log1p, Log1p, CudaLog1pFunctor,
CudaLog1pGradFunctor);
REGISTER_ACTIVATION_XPU_KERNEL(brelu, BRelu, CudaBReluFunctor,
CudaBReluGradFunctor);
REGISTER_ACTIVATION_XPU_KERNEL(soft_relu, SoftRelu, CudaSoftReluFunctor,
CudaSoftReluGradFunctor);
REGISTER_ACTIVATION_XPU_KERNEL(softsign, Softsign, CudaSoftsignFunctor,
CudaSoftsignGradFunctor);
REGISTER_ACTIVATION_XPU_KERNEL(relu6, Relu6, CudaRelu6Functor,
CudaRelu6GradFunctor);
REGISTER_ACTIVATION_XPU_KERNEL(hard_shrink, HardShrink, CudaHardShrinkFunctor,
CudaHardShrinkGradFunctor);
REGISTER_ACTIVATION_XPU_KERNEL(hard_sigmoid, HardSigmoid,
CudaHardSigmoidFunctor,
CudaHardSigmoidGradFunctor);
REGISTER_ACTIVATION_XPU_KERNEL(swish, Swish, CudaSwishFunctor,
CudaSwishGradFunctor);
REGISTER_ACTIVATION_XPU_KERNEL(thresholded_relu, ThresholdedRelu,
CudaThresholdedReluFunctor,
CudaThresholdedReluGradFunctor);
REGISTER_OP_KERNEL(
brelu, KP, plat::XPUPlace,
ops::ActivationCudaKernel<paddle::platform::XPUDeviceContext,
phi::funcs::CudaBReluFunctor<float>>);
REGISTER_OP_KERNEL(
brelu_grad, KP, plat::XPUPlace,
ops::ActivationGradCudaKernel<paddle::platform::XPUDeviceContext,
phi::funcs::CudaBReluGradFunctor<float>>);
REGISTER_OP_KERNEL(ceil, KP, plat::XPUPlace,
ops::ActivationCudaKernel<paddle::platform::XPUDeviceContext,
ops::CudaCeilFunctor<float>>);
REGISTER_OP_KERNEL(
ceil_grad, KP, plat::XPUPlace,
ops::ActivationGradCudaKernel<paddle::platform::XPUDeviceContext,
ops::CudaZeroGradFunctor<float>>);
REGISTER_OP_KERNEL(celu, KP, plat::XPUPlace,
ops::ActivationCudaKernel<paddle::platform::XPUDeviceContext,
ops::CudaCELUFunctor<float>>);
REGISTER_OP_KERNEL(
celu_grad, KP, plat::XPUPlace,
ops::ActivationGradCudaKernel<paddle::platform::XPUDeviceContext,
ops::CudaCELUGradFunctor<float>>);
REGISTER_OP_KERNEL(elu, KP, plat::XPUPlace,
ops::ActivationCudaKernel<paddle::platform::XPUDeviceContext,
ops::CudaELUFunctor<float>>);
REGISTER_OP_KERNEL(
elu_grad, KP, plat::XPUPlace,
ops::ActivationGradCudaKernel<paddle::platform::XPUDeviceContext,
ops::CudaELUGradFunctor<float>>);
REGISTER_OP_KERNEL(exp, KP, plat::XPUPlace,
ops::ActivationCudaKernel<paddle::platform::XPUDeviceContext,
ops::CudaExpFunctor<float>>);
REGISTER_OP_KERNEL(
exp_grad, KP, plat::XPUPlace,
ops::ActivationGradCudaKernel<paddle::platform::XPUDeviceContext,
ops::CudaExpGradFunctor<float>>);
REGISTER_OP_KERNEL(floor, KP, plat::XPUPlace,
ops::ActivationCudaKernel<paddle::platform::XPUDeviceContext,
ops::CudaFloorFunctor<float>>);
REGISTER_OP_KERNEL(
floor_grad, KP, plat::XPUPlace,
ops::ActivationGradCudaKernel<paddle::platform::XPUDeviceContext,
ops::CudaZeroGradFunctor<float>>);
REGISTER_OP_KERNEL(
hard_shrink, KP, plat::XPUPlace,
ops::ActivationCudaKernel<paddle::platform::XPUDeviceContext,
ops::CudaHardShrinkFunctor<float>>);
REGISTER_OP_KERNEL(
hard_shrink_grad, KP, plat::XPUPlace,
ops::ActivationGradCudaKernel<paddle::platform::XPUDeviceContext,
ops::CudaHardShrinkGradFunctor<float>>);
REGISTER_OP_KERNEL(
hard_sigmoid, KP, plat::XPUPlace,
ops::ActivationCudaKernel<paddle::platform::XPUDeviceContext,
ops::CudaHardSigmoidFunctor<float>>);
REGISTER_OP_KERNEL(
hard_sigmoid_grad, KP, plat::XPUPlace,
ops::ActivationGradCudaKernel<paddle::platform::XPUDeviceContext,
ops::CudaHardSigmoidGradFunctor<float>>);
REGISTER_OP_KERNEL(hard_swish, KP, plat::XPUPlace,
ops::ActivationCudaKernel<paddle::platform::XPUDeviceContext,
ops::CudaHardSwishFunctor<float>>);
REGISTER_OP_KERNEL(
hard_swish_grad, KP, plat::XPUPlace,
ops::ActivationGradCudaKernel<paddle::platform::XPUDeviceContext,
ops::CudaHardSwishGradFunctor<float>>);
REGISTER_OP_KERNEL(
leaky_relu, KP, plat::XPUPlace,
ops::ActivationCudaKernel<paddle::platform::XPUDeviceContext,
phi::funcs::CudaLeakyReluFunctor<float>>);
REGISTER_OP_KERNEL(
leaky_relu_grad, KP, plat::XPUPlace,
ops::ActivationGradCudaKernel<paddle::platform::XPUDeviceContext,
phi::funcs::CudaLeakyReluGradFunctor<float>>);
REGISTER_OP_KERNEL(log, KP, plat::XPUPlace,
ops::ActivationCudaKernel<paddle::platform::XPUDeviceContext,
ops::CudaLogFunctor<float>>);
REGISTER_OP_KERNEL(
log_grad, KP, plat::XPUPlace,
ops::ActivationGradCudaKernel<paddle::platform::XPUDeviceContext,
ops::CudaLogGradFunctor<float>>);
REGISTER_OP_KERNEL(log1p, KP, plat::XPUPlace,
ops::ActivationCudaKernel<paddle::platform::XPUDeviceContext,
ops::CudaLog1pFunctor<float>>);
REGISTER_OP_KERNEL(
log1p_grad, KP, plat::XPUPlace,
ops::ActivationGradCudaKernel<paddle::platform::XPUDeviceContext,
ops::CudaLog1pGradFunctor<float>>);
REGISTER_OP_KERNEL(
logsigmoid, KP, plat::XPUPlace,
ops::ActivationCudaKernel<paddle::platform::XPUDeviceContext,
ops::CudaLogSigmoidFunctor<float>>);
REGISTER_OP_KERNEL(
logsigmoid_grad, KP, plat::XPUPlace,
ops::ActivationGradCudaKernel<paddle::platform::XPUDeviceContext,
ops::CudaLogSigmoidGradFunctor<float>>);
REGISTER_OP_KERNEL(
reciprocal, KP, plat::XPUPlace,
ops::ActivationCudaKernel<paddle::platform::XPUDeviceContext,
ops::CudaReciprocalFunctor<float>>);
REGISTER_OP_KERNEL(
reciprocal_grad, KP, plat::XPUPlace,
ops::ActivationGradCudaKernel<paddle::platform::XPUDeviceContext,
ops::CudaReciprocalGradFunctor<float>>);
REGISTER_OP_KERNEL(
relu, KP, plat::XPUPlace,
ops::ActivationCudaKernel<paddle::platform::XPUDeviceContext,
phi::funcs::CudaReluFunctor<float>>);
REGISTER_OP_KERNEL(
relu_grad, KP, plat::XPUPlace,
ops::ActivationGradCudaKernel<paddle::platform::XPUDeviceContext,
phi::funcs::CudaReluGradFunctor<float>>);
REGISTER_OP_KERNEL(relu6, KP, plat::XPUPlace,
ops::ActivationCudaKernel<paddle::platform::XPUDeviceContext,
ops::CudaRelu6Functor<float>>);
REGISTER_OP_KERNEL(
relu6_grad, KP, plat::XPUPlace,
ops::ActivationGradCudaKernel<paddle::platform::XPUDeviceContext,
ops::CudaRelu6GradFunctor<float>>);
REGISTER_OP_KERNEL(sigmoid, KP, plat::XPUPlace,
ops::ActivationCudaKernel<paddle::platform::XPUDeviceContext,
ops::CudaSigmoidFunctor<float>>);
REGISTER_OP_KERNEL(
sigmoid_grad, KP, plat::XPUPlace,
ops::ActivationGradCudaKernel<paddle::platform::XPUDeviceContext,
ops::CudaSigmoidGradFunctor<float>>);
REGISTER_OP_KERNEL(silu, KP, plat::XPUPlace,
ops::ActivationCudaKernel<paddle::platform::XPUDeviceContext,
ops::CudaSiluFunctor<float>>);
REGISTER_OP_KERNEL(
silu_grad, KP, plat::XPUPlace,
ops::ActivationGradCudaKernel<paddle::platform::XPUDeviceContext,
ops::CudaSiluGradFunctor<float>>);
REGISTER_OP_KERNEL(soft_relu, KP, plat::XPUPlace,
ops::ActivationCudaKernel<paddle::platform::XPUDeviceContext,
ops::CudaSoftReluFunctor<float>>);
REGISTER_OP_KERNEL(
soft_relu_grad, KP, plat::XPUPlace,
ops::ActivationGradCudaKernel<paddle::platform::XPUDeviceContext,
ops::CudaSoftReluGradFunctor<float>>);
REGISTER_OP_KERNEL(softplus, KP, plat::XPUPlace,
ops::ActivationCudaKernel<paddle::platform::XPUDeviceContext,
ops::CudaSoftplusFunctor<float>>);
REGISTER_OP_KERNEL(
softplus_grad, KP, plat::XPUPlace,
ops::ActivationGradCudaKernel<paddle::platform::XPUDeviceContext,
ops::CudaSoftplusGradFunctor<float>>);
REGISTER_OP_KERNEL(
softshrink, KP, plat::XPUPlace,
ops::ActivationCudaKernel<paddle::platform::XPUDeviceContext,
ops::CudaSoftShrinkFunctor<float>>);
REGISTER_OP_KERNEL(
softshrink_grad, KP, plat::XPUPlace,
ops::ActivationGradCudaKernel<paddle::platform::XPUDeviceContext,
ops::CudaSoftShrinkGradFunctor<float>>);
REGISTER_OP_KERNEL(softsign, KP, plat::XPUPlace,
ops::ActivationCudaKernel<paddle::platform::XPUDeviceContext,
ops::CudaSoftsignFunctor<float>>);
REGISTER_OP_KERNEL(
softsign_grad, KP, plat::XPUPlace,
ops::ActivationGradCudaKernel<paddle::platform::XPUDeviceContext,
ops::CudaSoftsignGradFunctor<float>>);
REGISTER_OP_KERNEL(sqrt, KP, plat::XPUPlace,
ops::ActivationCudaKernel<paddle::platform::XPUDeviceContext,
ops::CudaSqrtFunctor<float>>);
REGISTER_OP_KERNEL(
sqrt_grad, KP, plat::XPUPlace,
ops::ActivationGradCudaKernel<paddle::platform::XPUDeviceContext,
ops::CudaSqrtGradFunctor<float>>);
REGISTER_OP_KERNEL(square, KP, plat::XPUPlace,
ops::ActivationCudaKernel<paddle::platform::XPUDeviceContext,
ops::CudaSquareFunctor<float>>);
REGISTER_OP_KERNEL(
square_grad, KP, plat::XPUPlace,
ops::ActivationGradCudaKernel<paddle::platform::XPUDeviceContext,
ops::CudaSquareGradFunctor<float>>);
REGISTER_OP_KERNEL(swish, KP, plat::XPUPlace,
ops::ActivationCudaKernel<paddle::platform::XPUDeviceContext,
ops::CudaSwishFunctor<float>>);
REGISTER_OP_KERNEL(
swish_grad, KP, plat::XPUPlace,
ops::ActivationGradCudaKernel<paddle::platform::XPUDeviceContext,
ops::CudaSwishGradFunctor<float>>);
REGISTER_OP_KERNEL(
thresholded_relu, KP, plat::XPUPlace,
ops::ActivationCudaKernel<paddle::platform::XPUDeviceContext,
ops::CudaThresholdedReluFunctor<float>>);
REGISTER_OP_KERNEL(
thresholded_relu_grad, KP, plat::XPUPlace,
ops::ActivationGradCudaKernel<paddle::platform::XPUDeviceContext,
ops::CudaThresholdedReluGradFunctor<float>>);
#endif // PADDLE_WITH_XPU_KP
python/paddle/fluid/tests/unittests/xpu/test_activation_op_xpu.py
浏览文件 @
bef6f2e1
...
...
@@ -849,6 +849,38 @@ def ref_softsign(x):
return
out
class
XPUTestSoftshrinkOP
(
XPUOpTestWrapper
):
def
__init__
(
self
):
self
.
op_name
=
'softshrink'
self
.
use_dynamic_create_class
=
False
class
XPUTestSoftshrink
(
TestActivationOPBase
):
def
set_case
(
self
):
self
.
op_type
=
"softshrink"
self
.
dtype
=
self
.
in_type
threshold
=
0.5
np
.
random
.
seed
(
1023
)
x
=
np
.
random
.
uniform
(
0.25
,
10
,
[
10
,
12
]).
astype
(
self
.
dtype
)
out
=
ref_softshrink
(
x
,
threshold
)
self
.
inputs
=
{
'X'
:
x
}
self
.
outputs
=
{
'Out'
:
out
}
self
.
attrs
=
{
'use_xpu'
:
True
}
support_types
=
get_xpu_op_support_types
(
'softshrink'
)
for
stype
in
support_types
:
create_test_class
(
globals
(),
XPUTestSoftshrinkOP
,
stype
)
def
ref_softshrink
(
x
,
threshold
=
0.5
):
out
=
np
.
copy
(
x
)
out
=
(
out
<
-
threshold
)
*
(
out
+
threshold
)
+
(
out
>
threshold
)
*
(
out
-
threshold
)
return
out
class
XPUTestSwishOP
(
XPUOpTestWrapper
):
def
__init__
(
self
):
self
.
op_name
=
'swish'
...
...
@@ -879,5 +911,36 @@ def ref_swish(x):
return
out
class
XPUTestThresholdedReluOP
(
XPUOpTestWrapper
):
def
__init__
(
self
):
self
.
op_name
=
'thresholded_relu'
self
.
use_dynamic_create_class
=
False
class
XPUTestThresholdedRelu
(
TestActivationOPBase
):
def
set_case
(
self
):
self
.
op_type
=
"thresholded_relu"
self
.
dtype
=
self
.
in_type
threshold
=
1.0
np
.
random
.
seed
(
1024
)
x
=
np
.
random
.
uniform
(
-
20
,
20
,
[
10
,
12
]).
astype
(
self
.
dtype
)
x
[
np
.
abs
(
x
)
<
0.005
]
=
0.02
out
=
ref_thresholded_relu
(
x
,
threshold
)
self
.
inputs
=
{
'X'
:
x
}
self
.
outputs
=
{
'Out'
:
out
}
self
.
attrs
=
{
'use_xpu'
:
True
}
support_types
=
get_xpu_op_support_types
(
'thresholded_relu'
)
for
stype
in
support_types
:
create_test_class
(
globals
(),
XPUTestThresholdedReluOP
,
stype
)
def
ref_thresholded_relu
(
x
,
threshold
=
1.0
):
out
=
(
x
>
threshold
)
*
x
return
out
if
__name__
==
"__main__"
:
unittest
.
main
()
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录