Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
BaiXuePrincess
Paddle
提交
d2f87d96
P
Paddle
项目概览
BaiXuePrincess
/
Paddle
与 Fork 源项目一致
Fork自
PaddlePaddle / Paddle
通知
1
Star
1
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
0
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
Paddle
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
0
Issue
0
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
未验证
提交
d2f87d96
编写于
11月 24, 2022
作者:
Z
zhangyikun02
提交者:
GitHub
11月 24, 2022
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
add exp_grad, hard_sigmoid and hard_sigmoid_grad for xpu, test=kunlun (#48307)
上级
22555e96
变更
3
隐藏空白更改
内联
并排
Showing
3 changed file
with
84 addition
and
0 deletion
+84
-0
paddle/fluid/platform/device/xpu/xpu2_op_list.h
paddle/fluid/platform/device/xpu/xpu2_op_list.h
+5
-0
paddle/phi/kernels/xpu/activation_grad_kernel.cc
paddle/phi/kernels/xpu/activation_grad_kernel.cc
+55
-0
paddle/phi/kernels/xpu/activation_kernel.cc
paddle/phi/kernels/xpu/activation_kernel.cc
+24
-0
未找到文件。
paddle/fluid/platform/device/xpu/xpu2_op_list.h
浏览文件 @
d2f87d96
...
...
@@ -220,6 +220,7 @@ XPUOpMap& get_kl2_ops() {
XPUKernelSet
({
pOpKernelType
(
vartype
::
INT64
,
XPUPlace
()),
pOpKernelType
(
vartype
::
INT32
,
XPUPlace
()),
pOpKernelType
(
vartype
::
FP32
,
XPUPlace
())})},
{
"exp_grad"
,
XPUKernelSet
({
pOpKernelType
(
vartype
::
FP32
,
XPUPlace
())})},
{
"exp"
,
XPUKernelSet
({
pOpKernelType
(
vartype
::
FP32
,
XPUPlace
())})},
{
"expand_as_v2"
,
XPUKernelSet
({
pOpKernelType
(
vartype
::
INT32
,
XPUPlace
()),
...
...
@@ -314,6 +315,10 @@ XPUOpMap& get_kl2_ops() {
pOpKernelType
(
vartype
::
FP32
,
XPUPlace
())})},
{
"grid_sampler"
,
XPUKernelSet
({
pOpKernelType
(
vartype
::
FP32
,
XPUPlace
())})},
{
"hard_sigmoid_grad"
,
XPUKernelSet
({
pOpKernelType
(
vartype
::
FP32
,
XPUPlace
())})},
{
"hard_sigmoid"
,
XPUKernelSet
({
pOpKernelType
(
vartype
::
FP32
,
XPUPlace
())})},
{
"hard_swish_grad"
,
XPUKernelSet
({
pOpKernelType
(
vartype
::
FP32
,
XPUPlace
()),
pOpKernelType
(
vartype
::
FP16
,
XPUPlace
())})},
...
...
paddle/phi/kernels/xpu/activation_grad_kernel.cc
浏览文件 @
d2f87d96
...
...
@@ -160,6 +160,21 @@ int xpu_activation_backward(const Context& dev_ctx,
return
r
;
}
template
<
typename
T
>
struct
XPUExpGradFunctor
:
public
funcs
::
BaseActivationFunctor
<
T
>
{
using
XPUType
=
typename
XPUTypeTrait
<
T
>::
Type
;
template
<
typename
Context
>
void
operator
()(
const
Context
&
dev_ctx
,
const
DenseTensor
*
x
,
const
DenseTensor
*
out
,
const
DenseTensor
*
dout
,
DenseTensor
*
dx
)
const
{
int
r
=
xpu_activation_backward
<
Context
,
T
,
XPUType
>
(
dev_ctx
,
x
,
out
,
dout
,
dx
,
xpu
::
exp_grad
<
XPUType
>
);
PADDLE_ENFORCE_XDNN_SUCCESS
(
r
,
"exp_grad"
);
}
};
template
<
typename
T
>
struct
XPULogGradFunctor
:
public
funcs
::
BaseActivationFunctor
<
T
>
{
template
<
typename
Context
>
...
...
@@ -238,6 +253,39 @@ struct XPULeakyReluGradFunctor : public funcs::BaseActivationFunctor<T> {
}
};
template
<
typename
T
>
struct
XPUHardSigmoidGradFunctor
:
public
funcs
::
BaseActivationFunctor
<
T
>
{
using
XPUType
=
typename
XPUTypeTrait
<
T
>::
Type
;
float
slope
;
float
offset
;
typename
funcs
::
BaseActivationFunctor
<
T
>::
AttrPair
GetAttrs
()
{
return
{{
"slope"
,
&
slope
},
{
"offset"
,
&
offset
}};
}
template
<
typename
Context
>
void
operator
()(
const
Context
&
dev_ctx
,
const
DenseTensor
*
x
,
const
DenseTensor
*
out
,
const
DenseTensor
*
dout
,
DenseTensor
*
dx
)
const
{
const
T
*
y_data
=
out
->
data
<
T
>
();
const
T
*
y_grad
=
dout
->
data
<
T
>
();
T
*
x_grad
=
dx
->
data
<
T
>
();
auto
xpu_context
=
dev_ctx
.
x_context
();
int
r
=
xpu
::
hard_sigmoid_grad
(
xpu_context
,
reinterpret_cast
<
const
XPUType
*>
(
y_data
),
// hard_sigmoid_grad do not need x_data
reinterpret_cast
<
const
XPUType
*>
(
y_data
),
reinterpret_cast
<
const
XPUType
*>
(
y_grad
),
reinterpret_cast
<
XPUType
*>
(
x_grad
),
dx
->
numel
(),
slope
);
PADDLE_ENFORCE_XDNN_SUCCESS
(
r
,
"hard_sigmoid_grad"
);
}
};
template
<
typename
T
>
struct
XPUHardSwishGradFunctor
:
public
funcs
::
BaseActivationFunctor
<
T
>
{
float
threshold
;
...
...
@@ -497,6 +545,7 @@ struct XPUSoftPlusGradFunctor : public funcs::BaseActivationFunctor<T> {
}
};
DEFINE_XPU_ACTIVATION_GRAD_KERNEL_DEPOUT
(
Exp
,
XPUExpGradFunctor
);
DEFINE_XPU_ACTIVATION_GRAD_KERNEL_DEPOUT
(
Reciprocal
,
XPUReciprocalGradFunctor
);
DEFINE_XPU_ACTIVATION_GRAD_KERNEL_DEPOUT
(
Sigmoid
,
XPUSigmoidGradFunctor
);
DEFINE_XPU_ACTIVATION_GRAD_KERNEL_DEPOUT
(
Sqrt
,
XPUSqrtGradFunctor
);
...
...
@@ -524,6 +573,10 @@ DEFINE_XPU_ACT_GRAD_KERNEL_WITH_TWO_ATTRS_DEPX(Softplus,
XPUSoftPlusGradFunctor
,
beta
,
threshold
)
DEFINE_XPU_ACT_GRAD_KERNEL_WITH_TWO_ATTRS_DEPOUT
(
HardSigmoid
,
XPUHardSigmoidGradFunctor
,
slope
,
offset
)
template
<
typename
T
,
typename
Context
>
void
HardSwishGradKernel
(
const
Context
&
dev_ctx
,
...
...
@@ -560,8 +613,10 @@ PD_REGISTER_KERNEL(tanh_grad,
phi
::
TanhGradKernel
,
float
,
phi
::
dtype
::
float16
)
{}
PD_REGISTER_ACTIVATION_GRAD_KERNEL
(
exp_grad
,
ExpGradKernel
)
PD_REGISTER_ACTIVATION_GRAD_KERNEL
(
log_grad
,
LogGradKernel
)
PD_REGISTER_ACTIVATION_GRAD_KERNEL
(
leaky_relu_grad
,
LeakyReluGradKernel
)
PD_REGISTER_ACTIVATION_GRAD_KERNEL
(
hard_sigmoid_grad
,
HardSigmoidGradKernel
)
PD_REGISTER_ACTIVATION_GRAD_KERNEL
(
hard_swish_grad
,
HardSwishGradKernel
)
PD_REGISTER_ACTIVATION_GRAD_KERNEL
(
reciprocal_grad
,
ReciprocalGradKernel
)
PD_REGISTER_ACTIVATION_GRAD_KERNEL
(
relu6_grad
,
Relu6GradKernel
)
...
...
paddle/phi/kernels/xpu/activation_kernel.cc
浏览文件 @
d2f87d96
...
...
@@ -226,6 +226,25 @@ void PowKernel(const Context& dev_ctx,
PADDLE_ENFORCE_XDNN_SUCCESS
(
r
,
"broadcast_pow"
);
}
template
<
typename
T
>
struct
XPUHardSigmoidFunctor
:
public
funcs
::
BaseActivationFunctor
<
T
>
{
float
slope
;
float
offset
;
typename
funcs
::
BaseActivationFunctor
<
T
>::
AttrPair
GetAttrs
()
{
return
{{
"slope"
,
&
slope
},
{
"offset"
,
&
offset
}};
}
template
<
typename
Context
>
void
operator
()(
const
Context
&
dev_ctx
,
const
DenseTensor
&
x
,
DenseTensor
*
out
)
const
{
using
XPUType
=
typename
XPUTypeTrait
<
T
>::
Type
;
int
r
=
xpu_activation_1attr_func
<
Context
,
T
,
XPUType
>
(
dev_ctx
,
x
,
out
,
slope
,
xpu
::
hard_sigmoid
<
XPUType
>
);
PADDLE_ENFORCE_XDNN_SUCCESS
(
r
,
"hard_sigmoid"
);
}
};
template
<
typename
T
>
struct
XPUHardSwishFunctor
:
public
funcs
::
BaseActivationFunctor
<
T
>
{
float
threshold
;
...
...
@@ -428,6 +447,10 @@ DEFINE_XPU_ACTIVATION_KERNEL_WITH_TWO_ATTRS(Softplus,
XPUSoftplusFunctor
,
beta
,
threshold
)
DEFINE_XPU_ACTIVATION_KERNEL_WITH_TWO_ATTRS
(
HardSigmoid
,
XPUHardSigmoidFunctor
,
slope
,
offset
)
template
<
typename
T
,
typename
Context
>
void
HardSwishRawKernel
(
const
Context
&
dev_ctx
,
...
...
@@ -459,6 +482,7 @@ PD_REGISTER_KERNEL(
PD_REGISTER_ACTIVATION_KERNEL
(
exp
,
ExpKernel
)
// no grad
PD_REGISTER_ACTIVATION_KERNEL
(
log
,
LogKernel
)
PD_REGISTER_ACTIVATION_KERNEL
(
leaky_relu
,
LeakyReluKernel
)
PD_REGISTER_ACTIVATION_KERNEL
(
hard_sigmoid
,
HardSigmoidKernel
)
PD_REGISTER_ACTIVATION_KERNEL
(
hard_swish_raw
,
HardSwishRawKernel
)
PD_REGISTER_ACTIVATION_KERNEL
(
mish
,
MishKernel
)
PD_REGISTER_ACTIVATION_KERNEL
(
pow
,
PowKernel
)
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录