Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
BaiXuePrincess
Paddle
提交
95113cb2
P
Paddle
项目概览
BaiXuePrincess
/
Paddle
与 Fork 源项目一致
Fork自
PaddlePaddle / Paddle
通知
1
Star
1
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
0
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
Paddle
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
0
Issue
0
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
提交
95113cb2
编写于
3月 17, 2022
作者:
P
phlrain
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
fix error; test=develop
上级
c72cf5fa
变更
1
隐藏空白更改
内联
并排
Showing
1 changed file
with
10 addition
and
112 deletion
+10
-112
paddle/fluid/operators/activation_op.h
paddle/fluid/operators/activation_op.h
+10
-112
未找到文件。
paddle/fluid/operators/activation_op.h
浏览文件 @
95113cb2
...
@@ -296,8 +296,6 @@ USE_PHI_FUNCTOR(Softsign)
...
@@ -296,8 +296,6 @@ USE_PHI_FUNCTOR(Softsign)
template
<
typename
T
>
template
<
typename
T
>
using
ELUGradNegativeAlphaFunctor
=
phi
::
funcs
::
ELUGradNegativeAlphaFunctor
<
T
>
;
using
ELUGradNegativeAlphaFunctor
=
phi
::
funcs
::
ELUGradNegativeAlphaFunctor
<
T
>
;
template
<
typename
T
>
template
<
typename
T
>
using
ReluCPUFunctor
=
phi
::
funcs
::
ReluCPUFunctor
<
T
>
;
using
ReluCPUFunctor
=
phi
::
funcs
::
ReluCPUFunctor
<
T
>
;
template
<
typename
T
>
template
<
typename
T
>
...
@@ -717,106 +715,6 @@ struct PowGradFunctor : public BaseActivationFunctor<T> {
...
@@ -717,106 +715,6 @@ struct PowGradFunctor : public BaseActivationFunctor<T> {
static
constexpr
ActBwdOpFwdDeps
FwdDeps
()
{
return
ActBwdOpFwdDeps
::
kDepX
;
}
static
constexpr
ActBwdOpFwdDeps
FwdDeps
()
{
return
ActBwdOpFwdDeps
::
kDepX
;
}
};
};
template
<
typename
T
>
<<<<<<<
HEAD
struct
HardSigmoidFunctor
:
public
BaseActivationFunctor
<
T
>
{
float
slope
;
float
offset
;
typename
BaseActivationFunctor
<
T
>::
AttrPair
GetAttrs
()
{
return
{{
"slope"
,
&
slope
},
{
"offset"
,
&
offset
}};
=======
struct
LogitFunctor
{
template
<
typename
Device
,
typename
X
,
typename
Out
,
typename
P
>
void
operator
()(
Device
d
,
X
x
,
Out
out
,
P
p
,
float
eps
)
const
{
// logit(x) = ln(x/(1-x))
auto
tmp_x
=
(
x
.
cwiseMin
(
static_cast
<
T
>
(
1.0
-
eps
))).
cwiseMax
(
static_cast
<
T
>
(
eps
));
if
(
!
eps
)
{
out
.
device
(
d
)
=
(
x
<
static_cast
<
T
>
(
0.0
)
||
x
>
static_cast
<
T
>
(
1.0
))
.
select
(
p
.
constant
(
static_cast
<
T
>
(
NAN
)),
(
tmp_x
/
(
static_cast
<
T
>
(
1
)
-
tmp_x
)).
log
());
}
else
{
out
.
device
(
d
)
=
(
tmp_x
/
(
static_cast
<
T
>
(
1
)
-
tmp_x
)).
log
();
}
}
};
template
<
typename
T
>
struct
LogitGradFunctor
{
template
<
typename
Device
,
typename
X
,
typename
dOut
,
typename
dX
,
typename
P
>
void
operator
()(
Device
d
,
X
x
,
dOut
dout
,
dX
dx
,
P
p
,
float
eps
)
const
{
// logit(x)' = 1/(x*(1-x))
dx
.
device
(
d
)
=
(
x
<
static_cast
<
T
>
(
eps
)
||
x
>
static_cast
<
T
>
(
1.0
-
eps
))
.
select
(
p
.
constant
(
static_cast
<
T
>
(
0
)),
dout
*
(
static_cast
<
T
>
(
1
)
/
((
static_cast
<
T
>
(
1
)
-
x
)
*
x
)));
}
};
template
<
typename
T
>
struct
STanhFunctor
:
public
BaseActivationFunctor
<
T
>
{
float
scale_a
;
float
scale_b
;
typename
BaseActivationFunctor
<
T
>::
AttrPair
GetAttrs
()
{
return
{{
"scale_a"
,
&
scale_a
},
{
"scale_b"
,
&
scale_b
}};
>>>>>>>
1904572
ac8edb57dfb528e711588758002a168dd
}
template
<
typename
Device
,
typename
X
,
typename
Out
>
void
operator
()(
Device
d
,
X
x
,
Out
out
)
const
{
<<<<<<<
HEAD
auto
temp
=
x
*
static_cast
<
T
>
(
slope
)
+
static_cast
<
T
>
(
offset
);
out
.
device
(
d
)
=
temp
.
cwiseMax
(
static_cast
<
T
>
(
0
)).
cwiseMin
(
static_cast
<
T
>
(
1
));
=======
out
.
device
(
d
)
=
static_cast
<
T
>
(
scale_b
)
*
(
static_cast
<
T
>
(
scale_a
)
*
x
).
tanh
();
>>>>>>>
1904572
ac8edb57dfb528e711588758002a168dd
}
};
template
<
typename
T
>
<<<<<<<
HEAD
struct
HardSigmoidGradFunctor
:
public
BaseActivationFunctor
<
T
>
{
float
slope
;
float
offset
;
typename
BaseActivationFunctor
<
T
>::
AttrPair
GetAttrs
()
{
return
{{
"slope"
,
&
slope
},
{
"offset"
,
&
offset
}};
}
template
<
typename
Device
,
typename
X
,
typename
Out
,
typename
dOut
,
typename
dX
>
void
operator
()(
Device
d
,
X
x
,
Out
out
,
dOut
dout
,
dX
dx
)
const
{
dx
.
device
(
d
)
=
dout
*
((
out
>
static_cast
<
T
>
(
0
))
*
(
out
<
static_cast
<
T
>
(
1
)))
.
template
cast
<
T
>()
*
static_cast
<
T
>
(
slope
);
}
static
constexpr
ActBwdOpFwdDeps
FwdDeps
()
{
return
ActBwdOpFwdDeps
::
kDepOut
;
}
=======
struct
STanhGradFunctor
:
public
BaseActivationFunctor
<
T
>
{
float
scale_a
;
float
scale_b
;
typename
BaseActivationFunctor
<
T
>::
AttrPair
GetAttrs
()
{
return
{{
"scale_a"
,
&
scale_a
},
{
"scale_b"
,
&
scale_b
}};
}
template
<
typename
Device
,
typename
X
,
typename
Out
,
typename
dOut
,
typename
dX
>
void
operator
()(
Device
d
,
X
x
,
Out
out
,
dOut
dout
,
dX
dx
)
const
{
auto
a
=
static_cast
<
T
>
(
scale_a
);
auto
b
=
static_cast
<
T
>
(
scale_b
);
auto
temp
=
(
a
*
x
).
tanh
()
*
(
a
*
x
).
tanh
();
dx
.
device
(
d
)
=
dout
*
a
*
b
*
(
static_cast
<
T
>
(
1
)
-
temp
);
}
static
constexpr
ActBwdOpFwdDeps
FwdDeps
()
{
return
ActBwdOpFwdDeps
::
kDepX
;
}
>>>>>>>
1904572
ac8edb57dfb528e711588758002a168dd
};
template
<
typename
T
>
template
<
typename
T
>
struct
SwishFunctor
:
public
BaseActivationFunctor
<
T
>
{
struct
SwishFunctor
:
public
BaseActivationFunctor
<
T
>
{
float
beta
;
float
beta
;
...
@@ -1326,14 +1224,14 @@ struct LogGradGradFunctor : public BaseActivationFunctor<T> {
...
@@ -1326,14 +1224,14 @@ struct LogGradGradFunctor : public BaseActivationFunctor<T> {
}
// namespace operators
}
// namespace operators
}
// namespace paddle
}
// namespace paddle
#define FOR_EACH_ACTIVATION_OP(__macro)
\
#define FOR_EACH_ACTIVATION_OP(__macro) \
__macro(ceil, Ceil, CeilFunctor, ZeroGradFunctor);
\
__macro(ceil, Ceil, CeilFunctor, ZeroGradFunctor); \
__macro(floor, Floor, FloorFunctor, ZeroGradFunctor);
\
__macro(floor, Floor, FloorFunctor, ZeroGradFunctor); \
__macro(round, Round, RoundFunctor, ZeroGradFunctor);
\
__macro(round, Round, RoundFunctor, ZeroGradFunctor); \
__macro(log1p, Log1p, Log1pFunctor, Log1pGradFunctor);
\
__macro(log1p, Log1p, Log1pFunctor, Log1pGradFunctor); \
__macro(log2, Log2, Log2Functor, Log2GradFunctor);
\
__macro(log2, Log2, Log2Functor, Log2GradFunctor); \
__macro(log10, Log10, Log10Functor, Log10GradFunctor);
\
__macro(log10, Log10, Log10Functor, Log10GradFunctor); \
__macro(soft_relu, SoftRelu, SoftReluFunctor, SoftReluGradFunctor);
\
__macro(soft_relu, SoftRelu, SoftReluFunctor, SoftReluGradFunctor); \
__macro(relu6, Relu6, Relu6Functor, Relu6GradFunctor);
\
__macro(relu6, Relu6, Relu6Functor, Relu6GradFunctor); \
__macro(swish, Swish, SwishFunctor, SwishGradFunctor);
\
__macro(swish, Swish, SwishFunctor, SwishGradFunctor); \
__macro(hard_swish, HardSwish, HardSwishFunctor, HardSwishGradFunctor);
__macro(hard_swish, HardSwish, HardSwishFunctor, HardSwishGradFunctor);
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录