Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
机器未来
Paddle
提交
6c7a03bd
P
Paddle
项目概览
机器未来
/
Paddle
与 Fork 源项目一致
Fork自
PaddlePaddle / Paddle
通知
1
Star
1
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
1
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
Paddle
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
1
Issue
1
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
提交
6c7a03bd
编写于
3月 17, 2022
作者:
P
phlrain
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
update; test=develop
上级
c7c81fe0
变更
6
隐藏空白更改
内联
并排
Showing
6 changed file
with
26 addition
and
149 deletion
+26
-149
paddle/fluid/operators/activation_op.h
paddle/fluid/operators/activation_op.h
+0
-93
paddle/phi/kernels/activation_kernel.h
paddle/phi/kernels/activation_kernel.h
+1
-0
paddle/phi/kernels/cpu/activation_grad_kernel.cc
paddle/phi/kernels/cpu/activation_grad_kernel.cc
+1
-1
paddle/phi/kernels/cpu/activation_kernel.cc
paddle/phi/kernels/cpu/activation_kernel.cc
+16
-25
paddle/phi/kernels/gpu/activation_grad_kernel.cu
paddle/phi/kernels/gpu/activation_grad_kernel.cu
+1
-4
paddle/phi/kernels/gpu/activation_kernel.cu
paddle/phi/kernels/gpu/activation_kernel.cu
+7
-26
未找到文件。
paddle/fluid/operators/activation_op.h
浏览文件 @
6c7a03bd
...
...
@@ -466,99 +466,6 @@ using ReluGradGradFunctor = phi::funcs::ReluGradGradFunctor<T>;
template
<
typename
T
>
using
ReluCUDAFunctor
=
phi
::
funcs
::
ReluCUDAFunctor
<
T
>
;
// tanhshrink(x) = x - tanh(x)
// where tanh(x) = (exp(x) - exp(-x)) / (exp(x) + exp(-x))
template
<
typename
T
>
struct
TanhShrinkFunctor
:
public
BaseActivationFunctor
<
T
>
{
template
<
typename
Device
,
typename
X
,
typename
Out
>
void
operator
()(
Device
d
,
X
x
,
Out
out
)
const
{
out
.
device
(
d
)
=
x
-
x
.
tanh
();
}
};
template
<
typename
T
>
struct
TanhShrinkGradFunctor
:
public
BaseActivationFunctor
<
T
>
{
template
<
typename
Device
,
typename
X
,
typename
Out
,
typename
dOut
,
typename
dX
>
void
operator
()(
Device
d
,
X
x
,
Out
out
,
dOut
dout
,
dX
dx
)
const
{
dx
.
device
(
d
)
=
dout
*
(
x
.
tanh
()
*
x
.
tanh
());
}
static
constexpr
ActBwdOpFwdDeps
FwdDeps
()
{
return
ActBwdOpFwdDeps
::
kDepX
;
}
};
// tanhshrink(x) = x - tanh(x)
// where tanh(x) = (exp(x) - exp(-x)) / (exp(x) + exp(-x))
template
<
typename
T
>
struct
HardShrinkFunctor
:
public
BaseActivationFunctor
<
T
>
{
float
threshold
;
typename
BaseActivationFunctor
<
T
>::
AttrPair
GetAttrs
()
{
return
{{
"threshold"
,
&
threshold
}};
}
template
<
typename
Device
,
typename
X
,
typename
Out
>
void
operator
()(
Device
d
,
X
x
,
Out
out
)
const
{
auto
temp1
=
x
<
static_cast
<
T
>
(
threshold
*
-
1.
f
);
auto
temp2
=
x
>
static_cast
<
T
>
(
threshold
);
out
.
device
(
d
)
=
x
*
(
temp1
||
temp2
).
template
cast
<
T
>();
}
};
template
<
typename
T
>
struct
HardShrinkGradFunctor
:
public
BaseActivationFunctor
<
T
>
{
float
threshold
;
typename
BaseActivationFunctor
<
T
>::
AttrPair
GetAttrs
()
{
return
{{
"threshold"
,
&
threshold
}};
}
template
<
typename
Device
,
typename
X
,
typename
Out
,
typename
dOut
,
typename
dX
>
void
operator
()(
Device
d
,
X
x
,
Out
out
,
dOut
dout
,
dX
dx
)
const
{
auto
temp1
=
x
<
static_cast
<
T
>
(
threshold
*
-
1.
f
);
auto
temp2
=
x
>
static_cast
<
T
>
(
threshold
);
dx
.
device
(
d
)
=
dout
*
(
temp1
||
temp2
).
template
cast
<
T
>();
}
static
constexpr
ActBwdOpFwdDeps
FwdDeps
()
{
return
ActBwdOpFwdDeps
::
kDepX
;
}
};
// softshrink(x) = x - lambda, if x > lambda; x + lambda, if x < -lambda; 0
// otherwise
template
<
typename
T
>
struct
SoftShrinkFunctor
:
public
BaseActivationFunctor
<
T
>
{
float
lambda
;
typename
BaseActivationFunctor
<
T
>::
AttrPair
GetAttrs
()
{
return
{{
"lambda"
,
&
lambda
}};
}
template
<
typename
Device
,
typename
X
,
typename
Out
>
void
operator
()(
Device
d
,
X
x
,
Out
out
)
const
{
auto
lambdaT
=
static_cast
<
T
>
(
lambda
);
auto
temp1
=
(
x
>
lambdaT
).
template
cast
<
T
>();
auto
temp2
=
(
x
<
-
lambdaT
).
template
cast
<
T
>();
out
.
device
(
d
)
=
temp1
*
(
x
-
lambdaT
)
+
temp2
*
(
x
+
lambdaT
);
}
};
template
<
typename
T
>
struct
SoftShrinkGradFunctor
:
public
BaseActivationFunctor
<
T
>
{
float
lambda
;
typename
BaseActivationFunctor
<
T
>::
AttrPair
GetAttrs
()
{
return
{{
"lambda"
,
&
lambda
}};
}
template
<
typename
Device
,
typename
X
,
typename
Out
,
typename
dOut
,
typename
dX
>
void
operator
()(
Device
d
,
X
x
,
Out
out
,
dOut
dout
,
dX
dx
)
const
{
auto
lambdaT
=
static_cast
<
T
>
(
lambda
);
auto
temp1
=
(
x
>
lambdaT
).
template
cast
<
T
>();
auto
temp2
=
(
x
<
-
lambdaT
).
template
cast
<
T
>();
dx
.
device
(
d
)
=
dout
*
(
temp1
+
temp2
).
template
cast
<
T
>();
}
static
constexpr
ActBwdOpFwdDeps
FwdDeps
()
{
return
ActBwdOpFwdDeps
::
kDepX
;
}
};
template
<
typename
T
>
struct
SqrtGradFunctor
:
public
BaseActivationFunctor
<
T
>
{
template
<
typename
Device
,
typename
X
,
typename
Out
,
typename
dOut
,
...
...
paddle/phi/kernels/activation_kernel.h
浏览文件 @
6c7a03bd
...
...
@@ -65,6 +65,7 @@ DECLARE_ACTIVATION_KERNEL_WITH_ONE_ATTRS(HardShrink, threshold)
DECLARE_ACTIVATION_KERNEL_WITH_ONE_ATTRS
(
Elu
,
alpha
)
DECLARE_ACTIVATION_KERNEL_WITH_TWO_ATTRS
(
BRelu
,
t_min
,
t_max
)
DECLARE_ACTIVATION_KERNEL_WITH_TWO_ATTRS
(
STanh
,
scale_a
,
scale_b
)
template
<
typename
T
,
typename
Context
>
void
LogitKernel
(
const
Context
&
dev_ctx
,
...
...
paddle/phi/kernels/cpu/activation_grad_kernel.cc
浏览文件 @
6c7a03bd
...
...
@@ -103,7 +103,7 @@ DEFINE_CPU_ACTIVATION_GRAD_KERNEL_DEPX(Acosh, AcoshGradFunctor);
DEFINE_CPU_ACTIVATION_GRAD_KERNEL_DEPX
(
Atanh
,
AtanhGradFunctor
);
DEFINE_CPU_ACTIVATION_GRAD_KERNEL_DEPX
(
TanhShrink
,
TanhShrinkGradFunctor
);
DEFINE_CPU_ACTIVATION_GRAD_KERNEL_DEPX
(
Silu
,
SiluGradFunctor
);
DEFINE_CPU_ACTIVATION_GRAD_KERNEL_D
epOut
(
Exp
,
ExpGradFunctor
);
DEFINE_CPU_ACTIVATION_GRAD_KERNEL_D
EPOUT
(
Exp
,
ExpGradFunctor
);
DEFINE_CPU_ACTIVATION_GRAD_KERNEL_DEPOUT
(
Relu
,
ReluGradFunctor
);
DEFINE_CPU_ACTIVATION_GRAD_KERNEL_DEPOUT
(
Tanh
,
TanhGradFunctor
);
...
...
paddle/phi/kernels/cpu/activation_kernel.cc
浏览文件 @
6c7a03bd
...
...
@@ -73,36 +73,27 @@ DEFINE_CPU_ACTIVATION_KERNEL(Relu, ReluCPUFunctor)
DEFINE_CPU_ACTIVATION_KERNEL
(
Tanh
,
TanhFunctor
)
DEFINE_CPU_ACTIVATION_KERNEL
(
TanhShrink
,
TanhShrinkFunctor
)
DEFINE_CPU_ACTIVATION_KERNEL
(
Silu
,
SiluFunctor
)
DEFINE_CPU_ACTIVATION_KERNEL
(
Exp
,
funcs
::
ExpFunctor
)
DEFINE_CPU_ACTIVATION_KERNEL
(
Expm1
,
funcs
::
Expm1Functor
)
DEFINE_CPU_ACTIVATION_KERNEL
(
Reciprocal
,
funcs
::
ReciprocalFunctor
)
DEFINE_CPU_ACTIVATION_KERNEL
(
Square
,
funcs
::
SquareFunctor
)
DEFINE_CPU_ACTIVATION_KERNEL
(
Sqrt
,
funcs
::
SqrtFunctor
)
DEFINE_CPU_ACTIVATION_KERNEL
(
Rsqrt
,
funcs
::
RsqrtFunctor
)
DEFINE_CPU_ACTIVATION_KERNEL
(
Softsign
,
funcs
::
SoftsignFunctor
)
DEFINE_CPU_ACTIVATION_KERNEL
(
Exp
,
ExpFunctor
)
DEFINE_CPU_ACTIVATION_KERNEL
(
Expm1
,
Expm1Functor
)
DEFINE_CPU_ACTIVATION_KERNEL
(
Reciprocal
,
ReciprocalFunctor
)
DEFINE_CPU_ACTIVATION_KERNEL
(
Square
,
SquareFunctor
)
DEFINE_CPU_ACTIVATION_KERNEL
(
Sqrt
,
SqrtFunctor
)
DEFINE_CPU_ACTIVATION_KERNEL
(
Softsign
,
SoftsignFunctor
)
DEFINE_CPU_ACTIVATION_KERNEL
(
Rsqrt
,
RsqrtFunctor
)
DEFINE_CPU_ACT_KERNEL_WITH_ONE_ATTRS
(
LeakyRelu
,
LeakyReluFunctor
,
alpha
)
DEFINE_CPU_ACT_KERNEL_WITH_ONE_ATTRS
(
ThresholdedRelu
,
ThresholdedReluFunctor
,
threshold
)
DEFINE_CPU_ACT_KERNEL_WITH_ONE_ATTRS
(
Mish
,
funcs
::
MishFunctor
,
threshold
)
DEFINE_CPU_ACT_KERNEL_WITH_TWO_ATTRS
(
BRelu
,
funcs
::
BReluFunctor
,
t_min
,
t_max
)
DEFINE_CPU_ACT_KERNEL_WITH_TWO_ATTRS
(
STanh
,
funcs
::
STanhFunctor
,
scale_a
,
scale_b
)
DEFINE_CPU_ACT_KERNEL_WITH_TWO_ATTRS
(
Softplus
,
funcs
::
SoftplusFunctor
,
beta
,
threshold
)
DEFINE_CPU_ACT_KERNEL_WITH_ONE_ATTRS
(
Mish
,
MishFunctor
,
threshold
)
DEFINE_CPU_ACT_KERNEL_WITH_TWO_ATTRS
(
BRelu
,
BReluFunctor
,
t_min
,
t_max
)
DEFINE_CPU_ACT_KERNEL_WITH_TWO_ATTRS
(
STanh
,
STanhFunctor
,
scale_a
,
scale_b
)
DEFINE_CPU_ACT_KERNEL_WITH_TWO_ATTRS
(
Softplus
,
SoftplusFunctor
,
beta
,
threshold
)
DEFINE_CPU_ACT_KERNEL_WITH_ONE_ATTRS
(
HardShrink
,
HardShrinkFunctor
,
threshold
)
DEFINE_CPU_ACT_KERNEL_WITH_ONE_ATTRS
(
SoftShrink
,
SoftShrinkFunctor
,
lambda
)
DEFINE_CPU_ACT_KERNEL_WITH_ONE_ATTRS
(
Elu
,
ELUFunctor
,
alpha
)
DEFINE_CPU_ACT_KERNEL_WITH_TWO_ATTRS
(
BRelu
,
BReluFunctor
,
t_min
,
t_max
)
}
// namespace phi
PD_REGISTER_KERNEL
(
relu
,
CPU
,
ALL_LAYOUT
,
phi
::
ReluKernel
,
float
,
double
)
{}
...
...
@@ -130,12 +121,12 @@ PD_REGISTER_ACTIVATION_KERNEL(tanh_shrink, TanhShrinkKernel)
PD_REGISTER_ACTIVATION_KERNEL
(
elu
,
EluKernel
)
PD_REGISTER_ACTIVATION_KERNEL
(
silu
,
SiluKernel
)
// PD_REGISTER_ACTIVATION_KERNEL(mish, Mish)
PD_REGISTER_ACTIVATION_KERNEL
(
stanh
,
STanh
)
PD_REGISTER_ACTIVATION_KERNEL
(
reciprocal
,
Reciprocal
)
PD_REGISTER_ACTIVATION_KERNEL
(
sqrt
,
Sqrt
)
PD_REGISTER_ACTIVATION_KERNEL
(
rsqrt
,
Rsqrt
)
PD_REGISTER_ACTIVATION_KERNEL
(
stanh
,
STanh
Kernel
)
PD_REGISTER_ACTIVATION_KERNEL
(
reciprocal
,
Reciprocal
Kernel
)
PD_REGISTER_ACTIVATION_KERNEL
(
sqrt
,
Sqrt
Kernel
)
PD_REGISTER_ACTIVATION_KERNEL
(
rsqrt
,
Rsqrt
Kernel
)
// PD_REGISTER_ACTIVATION_KERNEL(softplus, Softplus)
PD_REGISTER_ACTIVATION_KERNEL
(
softsign
,
Softsign
)
PD_REGISTER_ACTIVATION_KERNEL
(
softsign
,
Softsign
Kernel
)
PD_REGISTER_KERNEL
(
exp
,
CPU
,
ALL_LAYOUT
,
phi
::
ExpKernel
,
float
,
double
,
int
,
int64_t
)
{}
...
...
paddle/phi/kernels/gpu/activation_grad_kernel.cu
浏览文件 @
6c7a03bd
...
...
@@ -157,7 +157,7 @@ DEFINE_GPU_ACTIVATION_GRAD_KERNEL_DEPX(Acosh, CudaAcoshGradFunctor);
DEFINE_GPU_ACTIVATION_GRAD_KERNEL_DEPX
(
Atanh
,
CudaAtanhGradFunctor
);
DEFINE_GPU_ACTIVATION_GRAD_KERNEL_DEPX
(
TanhShrink
,
CudaTanhShrinkGradFunctor
);
DEFINE_GPU_ACTIVATION_GRAD_KERNEL_DEPX
(
Silu
,
CudaSiluGradFunctor
);
DEFINE_GPU_ACTIVATION_GRAD_KERNEL_D
epOut
(
Exp
,
CudaExpGradFunctor
);
DEFINE_GPU_ACTIVATION_GRAD_KERNEL_D
EPOUT
(
Exp
,
CudaExpGradFunctor
);
DEFINE_GPU_ACT_GRAD_KERNEL_WITH_ONE_ATTRS_DEPX
(
LeakyRelu
,
CudaLeakyReluGradFunctor
,
...
...
@@ -265,7 +265,6 @@ PD_REGISTER_ACTIVATION_GRAD_KERNEL(leaky_relu_double_grad,
LeakyReluDoubleGradKernel
)
PD_REGISTER_ACTIVATION_GRAD_KERNEL
(
thresholded_relu_grad
,
ThresholdedReluGradKernel
)
<<<<<<<
HEAD
PD_REGISTER_KERNEL
(
exp_grad
,
GPU
,
...
...
@@ -275,11 +274,9 @@ PD_REGISTER_KERNEL(exp_grad,
double
,
int
,
int64_t
)
{}
=======
PD_REGISTER_ACTIVATION_GRAD_KERNEL
(
soft_shrink_grad
,
SoftShrinkGradKernel
)
PD_REGISTER_ACTIVATION_GRAD_KERNEL
(
hard_shrink_grad
,
HardShrinkGradKernel
)
PD_REGISTER_ACTIVATION_GRAD_KERNEL
(
tanh_shrink_grad
,
TanhShrinkGradKernel
)
PD_REGISTER_ACTIVATION_GRAD_KERNEL
(
silu_grad
,
SiluGradKernel
)
PD_REGISTER_ACTIVATION_GRAD_KERNEL
(
elu_grad
,
EluGradKernel
)
PD_REGISTER_ACTIVATION_GRAD_KERNEL
(
elu_double_grad
,
EluDoubleGradKernel
)
>>>>>>>
6849
d33b62cacccb27797375a212e37a47ca9484
paddle/phi/kernels/gpu/activation_kernel.cu
浏览文件 @
6c7a03bd
...
...
@@ -77,28 +77,6 @@ void ActivationGPUImpl(const Context& dev_ctx,
dev_ctx, x, out, functor); \
}
<<<<<<<
HEAD
DEFINE_GPU_ACTIVATION_KERNEL
(
Cos
,
funcs
::
CudaCosFunctor
<
T
>
)
DEFINE_GPU_ACTIVATION_KERNEL
(
Tan
,
funcs
::
CudaTanFunctor
<
T
>
)
DEFINE_GPU_ACTIVATION_KERNEL
(
Acos
,
funcs
::
CudaAcosFunctor
<
T
>
)
DEFINE_GPU_ACTIVATION_KERNEL
(
Sin
,
funcs
::
CudaSinFunctor
<
T
>
)
DEFINE_GPU_ACTIVATION_KERNEL
(
Asin
,
funcs
::
CudaAsinFunctor
<
T
>
)
DEFINE_GPU_ACTIVATION_KERNEL
(
Atan
,
funcs
::
CudaAtanFunctor
<
T
>
)
DEFINE_GPU_ACTIVATION_KERNEL
(
Sinh
,
funcs
::
CudaSinhFunctor
<
T
>
)
DEFINE_GPU_ACTIVATION_KERNEL
(
Cosh
,
funcs
::
CudaCoshFunctor
<
T
>
)
DEFINE_GPU_ACTIVATION_KERNEL
(
Asinh
,
funcs
::
CudaAsinhFunctor
<
T
>
)
DEFINE_GPU_ACTIVATION_KERNEL
(
Acosh
,
funcs
::
CudaAcoshFunctor
<
T
>
)
DEFINE_GPU_ACTIVATION_KERNEL
(
Atanh
,
funcs
::
CudaAtanhFunctor
<
T
>
)
DEFINE_GPU_ACTIVATION_KERNEL
(
Relu
,
funcs
::
CudaReluFunctor
<
T
>
)
DEFINE_GPU_ACTIVATION_KERNEL
(
Tanh
,
funcs
::
CudaTanhFunctor
<
T
>
)
DEFINE_GPU_ACTIVATION_KERNEL
(
Exp
,
funcs
::
CudaExpFunctor
<
T
>
)
DEFINE_GPU_ACTIVATION_KERNEL
(
Expm1
,
funcs
::
CudaExpm1Functor
<
T
>
)
DEFINE_GPU_ACTIVATION_KERNEL
(
Reciprocal
,
funcs
::
CudaReciprocalFunctor
<
T
>
)
DEFINE_GPU_ACTIVATION_KERNEL
(
Square
,
funcs
::
CudaSquareFunctor
<
T
>
)
DEFINE_GPU_ACTIVATION_KERNEL
(
Sqrt
,
funcs
::
CudaSqrtFunctor
<
T
>
)
DEFINE_GPU_ACTIVATION_KERNEL
(
Rsqrt
,
funcs
::
CudaRsqrtFunctor
<
T
>
)
DEFINE_GPU_ACTIVATION_KERNEL
(
Softsign
,
funcs
::
CudaSoftsignFunctor
<
T
>
)
=======
DEFINE_GPU_ACTIVATION_KERNEL
(
Cos
,
CudaCosFunctor
)
DEFINE_GPU_ACTIVATION_KERNEL
(
Tan
,
CudaTanFunctor
)
DEFINE_GPU_ACTIVATION_KERNEL
(
Acos
,
CudaAcosFunctor
)
...
...
@@ -114,7 +92,13 @@ DEFINE_GPU_ACTIVATION_KERNEL(Relu, CudaReluFunctor)
DEFINE_GPU_ACTIVATION_KERNEL
(
Tanh
,
CudaTanhFunctor
)
DEFINE_GPU_ACTIVATION_KERNEL
(
TanhShrink
,
CudaTanhShrinkFunctor
)
DEFINE_GPU_ACTIVATION_KERNEL
(
Silu
,
CudaSiluFunctor
)
>>>>>>>
6849
d33b62cacccb27797375a212e37a47ca9484
DEFINE_GPU_ACTIVATION_KERNEL
(
Exp
,
CudaExpFunctor
)
DEFINE_GPU_ACTIVATION_KERNEL
(
Expm1
,
CudaExpm1Functor
)
DEFINE_GPU_ACTIVATION_KERNEL
(
Reciprocal
,
CudaReciprocalFunctor
)
DEFINE_GPU_ACTIVATION_KERNEL
(
Square
,
CudaSquareFunctor
)
DEFINE_GPU_ACTIVATION_KERNEL
(
Sqrt
,
CudaSqrtFunctor
)
DEFINE_GPU_ACTIVATION_KERNEL
(
Rsqrt
,
CudaRsqrtFunctor
)
DEFINE_GPU_ACTIVATION_KERNEL
(
Softsign
,
CudaSoftsignFunctor
)
DEFINE_GPU_ACT_KERNEL_WITH_ONE_ATTRS
(
LeakyRelu
,
CudaLeakyReluFunctor
,
alpha
)
DEFINE_GPU_ACT_KERNEL_WITH_ONE_ATTRS
(
ThresholdedRelu
,
...
...
@@ -181,7 +165,6 @@ PD_REGISTER_ACTIVATION_KERNEL(tanh, TanhKernel)
PD_REGISTER_ACTIVATION_KERNEL
(
brelu
,
BReluKernel
)
PD_REGISTER_ACTIVATION_KERNEL
(
thresholded_relu
,
ThresholdedReluKernel
)
PD_REGISTER_ACTIVATION_KERNEL
(
leaky_relu
,
LeakyReluKernel
)
<<<<<<<
HEAD
PD_REGISTER_ACTIVATION_KERNEL
(
mish
,
MishKernel
)
PD_REGISTER_ACTIVATION_KERNEL
(
stanh
,
StanhKernel
)
PD_REGISTER_ACTIVATION_KERNEL
(
reciprocal
,
ReciprocalKernel
)
...
...
@@ -202,10 +185,8 @@ PD_REGISTER_KERNEL(expm1,
PD_REGISTER_KERNEL
(
logit
,
GPU
,
ALL_LAYOUT
,
phi
::
LogitKernel
,
float
,
double
)
{}
PD_REGISTER_KERNEL
(
square
,
GPU
,
ALL_LAYOUT
,
phi
::
SquareKernel
,
float
,
double
,
int
,
int64_t
)
{}
=======
PD_REGISTER_ACTIVATION_KERNEL
(
hard_shrink
,
HardShrinkKernel
)
PD_REGISTER_ACTIVATION_KERNEL
(
soft_shrink
,
SoftShrinkKernel
)
PD_REGISTER_ACTIVATION_KERNEL
(
tanh_shrink
,
TanhShrinkKernel
)
PD_REGISTER_ACTIVATION_KERNEL
(
elu
,
EluKernel
)
PD_REGISTER_ACTIVATION_KERNEL
(
silu
,
SiluKernel
)
>>>>>>>
6849
d33b62cacccb27797375a212e37a47ca9484
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录