Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
机器未来
Paddle
提交
8622f8c2
P
Paddle
项目概览
机器未来
/
Paddle
与 Fork 源项目一致
Fork自
PaddlePaddle / Paddle
通知
1
Star
1
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
1
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
Paddle
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
1
Issue
1
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
未验证
提交
8622f8c2
编写于
9月 28, 2020
作者:
J
Jack Zhou
提交者:
GitHub
9月 28, 2020
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
[Cherry-Pick] Add Log double grad kernel (#27604)
[Cherry-Pick] Add Log double grad kernel (#27604)
上级
5634d2ca
变更
4
隐藏空白更改
内联
并排
Showing
4 changed file
with
122 addition
and
1 deletion
+122
-1
paddle/fluid/operators/activation_op.cc
paddle/fluid/operators/activation_op.cc
+51
-0
paddle/fluid/operators/activation_op.cu
paddle/fluid/operators/activation_op.cu
+12
-0
paddle/fluid/operators/activation_op.h
paddle/fluid/operators/activation_op.h
+35
-1
python/paddle/fluid/tests/unittests/test_activation_nn_grad.py
...n/paddle/fluid/tests/unittests/test_activation_nn_grad.py
+24
-0
未找到文件。
paddle/fluid/operators/activation_op.cc
浏览文件 @
8622f8c2
...
...
@@ -839,6 +839,28 @@ class SquareDoubleGradMaker : public ::paddle::framework::SingleGradOpMaker<T> {
}
};
// log Grad: dx = dout / x
// log Grad Grad: ddout = ddx / x; dx = -(dout / x) * (ddx / x)
template
<
typename
T
>
class
LogDoubleGradMaker
:
public
::
paddle
::
framework
::
SingleGradOpMaker
<
T
>
{
public:
using
::
paddle
::
framework
::
SingleGradOpMaker
<
T
>::
SingleGradOpMaker
;
protected:
void
Apply
(
GradOpPtr
<
T
>
op
)
const
override
{
op
->
SetType
(
"log_grad_grad"
);
op
->
SetInput
(
"X"
,
this
->
Input
(
"X"
));
// X@GRAD@GRAD: ddx
op
->
SetInput
(
"DDX"
,
this
->
OutputGrad
(
framework
::
GradVarName
(
"X"
)));
op
->
SetInput
(
"DOut"
,
this
->
Input
(
framework
::
GradVarName
(
"Out"
)));
op
->
SetAttrMap
(
this
->
Attrs
());
// X@GRAD: dx
op
->
SetOutput
(
"DX"
,
this
->
InputGrad
(
"X"
));
// Out@GRAD@GRAD: ddy
op
->
SetOutput
(
"DDOut"
,
this
->
InputGrad
(
framework
::
GradVarName
(
"Out"
)));
}
};
DECLARE_INPLACE_OP_INFERER
(
ActivationGradOpInplaceInference
,
{
framework
::
GradVarName
(
"Out"
),
framework
::
GradVarName
(
"X"
)});
...
...
@@ -1219,3 +1241,32 @@ REGISTER_OP_CPU_KERNEL(
ops
::
ActivationDoubleGradKernel
<
plat
::
CPUDeviceContext
,
ops
::
AbsGradGradFunctor
<
int64_t
>>
);
/* ========================================================================== */
/* ========================== Log register ==================================*/
REGISTER_OPERATOR
(
log
,
ops
::
ActivationOp
,
ops
::
LogOpMaker
,
ops
::
ActivationOpInferVarType
,
ops
::
ActivationGradOpMaker
<
ops
::
LogGradFunctor
<
float
>::
FwdDeps
(),
paddle
::
framework
::
OpDesc
>
,
ops
::
ActivationGradOpMaker
<
ops
::
LogGradFunctor
<
float
>::
FwdDeps
(),
paddle
::
imperative
::
OpBase
>
,
ops
::
ActFwdInplaceInferer
);
REGISTER_OPERATOR
(
log_grad
,
ops
::
ActivationOpGrad
,
ops
::
ActivationGradOpInplaceInference
,
ops
::
LogDoubleGradMaker
<
paddle
::
framework
::
OpDesc
>
,
ops
::
LogDoubleGradMaker
<
paddle
::
imperative
::
OpBase
>
);
REGISTER_OPERATOR
(
log_grad_grad
,
ops
::
ActivationOpDoubleGrad
<
ops
::
LogGradGradFunctor
<
float
>::
FwdDeps
()
>
,
ops
::
ActivationDoubleGradOpInplaceInference
);
REGISTER_ACTIVATION_CPU_KERNEL
(
log
,
Log
,
LogFunctor
,
LogGradFunctor
);
REGISTER_OP_CPU_KERNEL
(
log_grad_grad
,
ops
::
LogDoubleGradKernel
<
plat
::
CPUDeviceContext
,
ops
::
LogGradGradFunctor
<
float
>>
,
ops
::
LogDoubleGradKernel
<
plat
::
CPUDeviceContext
,
ops
::
LogGradGradFunctor
<
double
>>
,
ops
::
LogDoubleGradKernel
<
plat
::
CPUDeviceContext
,
ops
::
LogGradGradFunctor
<
plat
::
float16
>>
);
/* ========================================================================== */
paddle/fluid/operators/activation_op.cu
浏览文件 @
8622f8c2
...
...
@@ -193,3 +193,15 @@ REGISTER_OP_CUDA_KERNEL(
ops
::
ActivationDoubleGradKernel
<
paddle
::
platform
::
CUDADeviceContext
,
ops
::
AbsGradGradFunctor
<
int64_t
>>
);
/* ========================================================================== */
/* ========================== Log register ==================================*/
REGISTER_ACTIVATION_CUDA_KERNEL
(
log
,
Log
,
LogFunctor
,
LogGradFunctor
);
REGISTER_OP_CUDA_KERNEL
(
log_grad_grad
,
ops
::
LogDoubleGradKernel
<
plat
::
CUDADeviceContext
,
ops
::
LogGradGradFunctor
<
float
>>
,
ops
::
LogDoubleGradKernel
<
plat
::
CUDADeviceContext
,
ops
::
LogGradGradFunctor
<
double
>>
,
ops
::
LogDoubleGradKernel
<
plat
::
CUDADeviceContext
,
ops
::
LogGradGradFunctor
<
plat
::
float16
>>
);
/* ========================================================================== */
paddle/fluid/operators/activation_op.h
浏览文件 @
8622f8c2
...
...
@@ -1551,6 +1551,10 @@ class SquareDoubleGradKernel
}
};
template
<
typename
DeviceContext
,
typename
Functor
>
class
LogDoubleGradKernel
:
public
SquareDoubleGradKernel
<
DeviceContext
,
Functor
>
{};
template
<
typename
DeviceContext
,
typename
Functor
>
class
ELUDoubleGradKernel
:
public
framework
::
OpKernel
<
typename
Functor
::
ELEMENT_TYPE
>
{
...
...
@@ -1740,6 +1744,37 @@ class PowGradKernel
functor
(
*
place
,
x
,
out
,
dout
,
dx
);
}
};
template
<
typename
T
>
struct
LogGradGradFunctor
:
public
BaseActivationFunctor
<
T
>
{
template
<
typename
Device
>
void
operator
()(
const
Device
&
dev
,
const
framework
::
Tensor
*
X
,
const
framework
::
Tensor
*
ddX
,
framework
::
Tensor
*
ddOut
,
const
framework
::
Tensor
*
dOut
,
framework
::
Tensor
*
dX
)
const
{
auto
*
d
=
dev
.
eigen_device
();
auto
ddx
=
framework
::
EigenVector
<
T
>::
Flatten
(
GET_DATA_SAFELY
(
ddX
,
"Input"
,
"DDX"
,
"LogGradGrad"
));
auto
x
=
framework
::
EigenVector
<
T
>::
Flatten
(
GET_DATA_SAFELY
(
X
,
"Input"
,
"X"
,
"LogGradGrad"
));
// ddout = ddx / x; dx = -(dout / x) * (ddx / x)
// calculate dx first, so ddout can inplace ddx
if
(
dX
)
{
auto
dout
=
framework
::
EigenVector
<
T
>::
Flatten
(
GET_DATA_SAFELY
(
dOut
,
"Output"
,
"DOut"
,
"LogGradGrad"
));
auto
dx
=
framework
::
EigenVector
<
T
>::
Flatten
(
GET_DATA_SAFELY
(
dX
,
"Output"
,
"DX"
,
"LogGradGrad"
));
dx
.
device
(
*
d
)
=
dout
*
static_cast
<
T
>
(
-
1
)
*
ddx
/
(
x
*
x
);
}
if
(
ddOut
)
{
auto
ddout
=
framework
::
EigenVector
<
T
>::
Flatten
(
GET_DATA_SAFELY
(
ddOut
,
"Output"
,
"DDOut"
,
"LogGradGrad"
));
ddout
.
device
(
*
d
)
=
ddx
*
static_cast
<
T
>
(
1
)
/
x
;
}
}
static
constexpr
ActBwdOpFwdDeps
FwdDeps
()
{
return
kDepX
;
}
};
}
// namespace operators
}
// namespace paddle
...
...
@@ -1758,7 +1793,6 @@ class PowGradKernel
__macro(asin, Asin, AsinFunctor, AsinGradFunctor); \
__macro(round, Round, RoundFunctor, ZeroGradFunctor); \
__macro(reciprocal, Reciprocal, ReciprocalFunctor, ReciprocalGradFunctor); \
__macro(log, Log, LogFunctor, LogGradFunctor); \
__macro(log1p, Log1p, Log1pFunctor, Log1pGradFunctor); \
__macro(brelu, BRelu, BReluFunctor, BReluGradFunctor); \
__macro(soft_relu, SoftRelu, SoftReluFunctor, SoftReluGradFunctor); \
...
...
python/paddle/fluid/tests/unittests/test_activation_nn_grad.py
浏览文件 @
8622f8c2
...
...
@@ -123,6 +123,30 @@ class TestSqrtDoubleGradCheck(unittest.TestCase):
self
.
func
(
p
)
class
TestLogDoubleGradCheck
(
unittest
.
TestCase
):
@
prog_scope
()
def
func
(
self
,
place
):
shape
=
[
2
,
3
,
7
,
9
]
eps
=
1e-6
dtype
=
np
.
float64
x
=
layers
.
data
(
'x'
,
shape
,
False
,
dtype
)
x
.
persistable
=
True
y
=
layers
.
log
(
x
)
x_arr
=
np
.
random
.
uniform
(
0.1
,
1
,
shape
).
astype
(
dtype
)
gradient_checker
.
double_grad_check
(
[
x
],
y
,
x_init
=
x_arr
,
place
=
place
,
eps
=
eps
)
def
test_grad
(
self
):
places
=
[
fluid
.
CPUPlace
()]
if
core
.
is_compiled_with_cuda
():
places
.
append
(
fluid
.
CUDAPlace
(
0
))
for
p
in
places
:
self
.
func
(
p
)
class
TestSquareDoubleGradCheck
(
unittest
.
TestCase
):
@
prog_scope
()
def
func
(
self
,
place
):
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录