Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
BaiXuePrincess
Paddle
提交
44703329
P
Paddle
项目概览
BaiXuePrincess
/
Paddle
与 Fork 源项目一致
Fork自
PaddlePaddle / Paddle
通知
1
Star
1
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
0
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
Paddle
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
0
Issue
0
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
提交
44703329
编写于
9月 04, 2017
作者:
D
dangqingqing
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
Make some operator correctly handle gradients for multi inputs.
上级
35420274
变更
8
隐藏空白更改
内联
并排
Showing
8 changed file
with
72 addition
and
40 deletion
+72
-40
paddle/operators/mul_op.cc
paddle/operators/mul_op.cc
+2
-2
paddle/operators/mul_op.h
paddle/operators/mul_op.h
+20
-16
paddle/operators/rowwise_add_op.cc
paddle/operators/rowwise_add_op.cc
+4
-2
paddle/operators/rowwise_add_op.h
paddle/operators/rowwise_add_op.h
+14
-10
paddle/operators/scatter_op.cc
paddle/operators/scatter_op.cc
+2
-2
paddle/operators/scatter_op.h
paddle/operators/scatter_op.h
+6
-4
python/paddle/v2/framework/tests/gradient_checker.py
python/paddle/v2/framework/tests/gradient_checker.py
+22
-3
python/paddle/v2/framework/tests/test_mul_op.py
python/paddle/v2/framework/tests/test_mul_op.py
+2
-1
未找到文件。
paddle/operators/mul_op.cc
浏览文件 @
44703329
...
...
@@ -75,8 +75,8 @@ class MulOpGrad : public framework::OperatorWithKernel {
PADDLE_ENFORCE
(
y_dims
[
1
]
==
out_dims
[
1
],
"Out@GRAD M X N must equal to Y dims 1, N "
);
x_grad
->
Resize
(
x_dims
);
y_grad
->
Resize
(
y_dims
);
if
(
x_grad
)
x_grad
->
Resize
(
x_dims
);
if
(
y_grad
)
y_grad
->
Resize
(
y_dims
);
}
};
...
...
paddle/operators/mul_op.h
浏览文件 @
44703329
...
...
@@ -31,13 +31,13 @@ template <typename Place, typename T>
class
MulKernel
:
public
framework
::
OpKernel
{
public:
void
Compute
(
const
framework
::
ExecutionContext
&
context
)
const
override
{
auto
*
X
=
context
.
Input
<
Tensor
>
(
"X"
);
auto
*
Y
=
context
.
Input
<
Tensor
>
(
"Y"
);
auto
*
Z
=
context
.
Output
<
Tensor
>
(
"Out"
);
Z
->
mutable_data
<
T
>
(
context
.
GetPlace
());
auto
*
x
=
context
.
Input
<
Tensor
>
(
"X"
);
auto
*
y
=
context
.
Input
<
Tensor
>
(
"Y"
);
auto
*
z
=
context
.
Output
<
Tensor
>
(
"Out"
);
z
->
mutable_data
<
T
>
(
context
.
GetPlace
());
auto
*
device_context
=
const_cast
<
platform
::
DeviceContext
*>
(
context
.
device_context_
);
math
::
matmul
<
Place
,
T
>
(
*
X
,
false
,
*
Y
,
false
,
1
,
Z
,
0
,
device_context
);
math
::
matmul
<
Place
,
T
>
(
*
x
,
false
,
*
y
,
false
,
1
,
z
,
0
,
device_context
);
}
};
...
...
@@ -45,20 +45,24 @@ template <typename Place, typename T>
class
MulGradKernel
:
public
framework
::
OpKernel
{
public:
void
Compute
(
const
framework
::
ExecutionContext
&
ctx
)
const
override
{
auto
*
X
=
ctx
.
Input
<
Tensor
>
(
"X"
);
auto
*
Y
=
ctx
.
Input
<
Tensor
>
(
"Y"
);
auto
*
d
O
ut
=
ctx
.
Input
<
Tensor
>
(
framework
::
GradVarName
(
"Out"
));
auto
*
x
=
ctx
.
Input
<
Tensor
>
(
"X"
);
auto
*
y
=
ctx
.
Input
<
Tensor
>
(
"Y"
);
auto
*
d
o
ut
=
ctx
.
Input
<
Tensor
>
(
framework
::
GradVarName
(
"Out"
));
auto
*
dX
=
ctx
.
Output
<
Tensor
>
(
framework
::
GradVarName
(
"X"
));
auto
*
dY
=
ctx
.
Output
<
Tensor
>
(
framework
::
GradVarName
(
"Y"
));
dX
->
mutable_data
<
T
>
(
ctx
.
GetPlace
());
dY
->
mutable_data
<
T
>
(
ctx
.
GetPlace
());
auto
*
dx
=
ctx
.
Output
<
Tensor
>
(
framework
::
GradVarName
(
"X"
));
auto
*
dy
=
ctx
.
Output
<
Tensor
>
(
framework
::
GradVarName
(
"Y"
));
auto
*
device_context
=
const_cast
<
platform
::
DeviceContext
*>
(
ctx
.
device_context_
);
// dX = dOut * Y'. dX: M x K, dOut : M x N, Y : K x N
math
::
matmul
<
Place
,
T
>
(
*
dOut
,
false
,
*
Y
,
true
,
1
,
dX
,
0
,
device_context
);
// dY = X' * dOut. dY: K x N, dOut : M x N, X : M x K
math
::
matmul
<
Place
,
T
>
(
*
X
,
true
,
*
dOut
,
false
,
1
,
dY
,
0
,
device_context
);
if
(
dx
)
{
// dx = dout * y'. dx: M x K, dout : M x N, y : K x N
dx
->
mutable_data
<
T
>
(
ctx
.
GetPlace
());
math
::
matmul
<
Place
,
T
>
(
*
dout
,
false
,
*
y
,
true
,
1
,
dx
,
0
,
device_context
);
}
if
(
dy
)
{
dy
->
mutable_data
<
T
>
(
ctx
.
GetPlace
());
// dy = x' * dout. dy K x N, dout : M x N, x : M x K
math
::
matmul
<
Place
,
T
>
(
*
x
,
true
,
*
dout
,
false
,
1
,
dy
,
0
,
device_context
);
}
}
};
...
...
paddle/operators/rowwise_add_op.cc
浏览文件 @
44703329
...
...
@@ -64,8 +64,10 @@ class RowwiseAddGradOp : public framework::OperatorWithKernel {
auto
dims0
=
ctx
.
Input
<
Tensor
>
(
"X"
)
->
dims
();
auto
dims1
=
ctx
.
Input
<
Tensor
>
(
"b"
)
->
dims
();
PADDLE_ENFORCE_EQ
(
1
,
dims1
.
size
(),
"b dims should be 1"
)
ctx
.
Output
<
Tensor
>
(
framework
::
GradVarName
(
"X"
))
->
Resize
(
dims0
);
ctx
.
Output
<
Tensor
>
(
framework
::
GradVarName
(
"b"
))
->
Resize
(
dims1
);
auto
*
dx
=
ctx
.
Output
<
Tensor
>
(
framework
::
GradVarName
(
"X"
));
auto
*
db
=
ctx
.
Output
<
Tensor
>
(
framework
::
GradVarName
(
"b"
));
if
(
dx
)
dx
->
Resize
(
dims0
);
if
(
db
)
db
->
Resize
(
dims1
);
}
};
...
...
paddle/operators/rowwise_add_op.h
浏览文件 @
44703329
...
...
@@ -51,20 +51,24 @@ template <typename Place, typename T>
class
RowwiseAddGradKernel
:
public
framework
::
OpKernel
{
public:
void
Compute
(
const
framework
::
ExecutionContext
&
context
)
const
override
{
auto
*
d
O
ut
=
context
.
Input
<
Tensor
>
(
framework
::
GradVarName
(
"Out"
));
auto
*
d
X
=
context
.
Output
<
Tensor
>
(
framework
::
GradVarName
(
"X"
));
auto
*
d
o
ut
=
context
.
Input
<
Tensor
>
(
framework
::
GradVarName
(
"Out"
));
auto
*
d
x
=
context
.
Output
<
Tensor
>
(
framework
::
GradVarName
(
"X"
));
auto
*
db
=
context
.
Output
<
Tensor
>
(
framework
::
GradVarName
(
"b"
));
dX
->
mutable_data
<
T
>
(
context
.
GetPlace
());
db
->
mutable_data
<
T
>
(
context
.
GetPlace
());
auto
OutGrad
=
EigenMatrix
<
T
>::
From
(
*
dO
ut
);
auto
out_grad
=
EigenMatrix
<
T
>::
From
(
*
do
ut
);
auto
place
=
context
.
GetEigenDevice
<
Place
>
();
EigenMatrix
<
T
>::
From
(
*
dX
).
device
(
place
)
=
OutGrad
;
if
(
dx
)
{
dx
->
mutable_data
<
T
>
(
context
.
GetPlace
());
EigenMatrix
<
T
>::
From
(
*
dx
).
device
(
place
)
=
out_grad
;
}
// https://eigen.tuxfamily.org/dox/unsupported/TensorBase_8h_source.html
// colwise add
Eigen
::
array
<
int
,
1
>
dims
{{
0
}};
/* dimension to reduce */
EigenVector
<
T
>::
Flatten
(
*
db
).
device
(
place
)
=
OutGrad
.
sum
(
dims
);
if
(
db
)
{
db
->
mutable_data
<
T
>
(
context
.
GetPlace
());
// https://eigen.tuxfamily.org/dox/unsupported/TensorBase_8h_source.html
// colwise add
Eigen
::
array
<
int
,
1
>
dims
{{
0
}};
/* dimension to reduce */
EigenVector
<
T
>::
Flatten
(
*
db
).
device
(
place
)
=
out_grad
.
sum
(
dims
);
}
}
};
}
// namespace operators
...
...
paddle/operators/scatter_op.cc
浏览文件 @
44703329
...
...
@@ -50,8 +50,8 @@ class ScatterGradOp : public framework::OperatorWithKernel {
auto
*
dRef
=
ctx
.
Output
<
Tensor
>
(
framework
::
GradVarName
(
"Ref"
));
auto
*
Ref
=
ctx
.
Input
<
Tensor
>
(
"Ref"
);
dRef
->
Resize
(
Ref
->
dims
());
dUpdates
->
Resize
(
Updates
->
dims
());
if
(
dRef
)
dRef
->
Resize
(
Ref
->
dims
());
if
(
dUpdates
)
dUpdates
->
Resize
(
Updates
->
dims
());
}
};
...
...
paddle/operators/scatter_op.h
浏览文件 @
44703329
...
...
@@ -49,10 +49,12 @@ class ScatterGradientOpKernel : public framework::OpKernel {
auto
*
dOut
=
ctx
.
Input
<
Tensor
>
(
framework
::
GradVarName
(
"Out"
));
// In place gradient: dRef = dO
dRef
->
ShareDataWith
<
T
>
(
*
dOut
);
dUpdates
->
mutable_data
<
T
>
(
ctx
.
GetPlace
());
// Gradient by Gather: dUpdates += dO[Index]
Gather
<
T
>
(
ctx
.
GetPlace
(),
dOut
,
Index
,
dUpdates
);
if
(
dRef
)
dRef
->
ShareDataWith
<
T
>
(
*
dOut
);
if
(
dUpdates
)
{
dUpdates
->
mutable_data
<
T
>
(
ctx
.
GetPlace
());
// Gradient by Gather: dUpdates += dO[Index]
Gather
<
T
>
(
ctx
.
GetPlace
(),
dOut
,
Index
,
dUpdates
);
}
}
};
...
...
python/paddle/v2/framework/tests/gradient_checker.py
浏览文件 @
44703329
...
...
@@ -286,6 +286,9 @@ class GradientChecker(unittest.TestCase):
for
no_grad
in
no_grad_set
:
if
no_grad
not
in
in_names
:
raise
ValueError
(
"no_grad should be in in_names"
)
if
name
in
inputs_to_check
:
raise
ValueError
(
"no_grad should not be in inputs_to_check"
)
backward_op
=
core
.
Operator
.
backward
(
forward_op
,
no_grad_set
)
places
=
[
core
.
CPUPlace
()]
...
...
@@ -301,9 +304,25 @@ class GradientChecker(unittest.TestCase):
check_names
=
[
grad_var_name
(
name
)
for
name
in
inputs_to_check
]
for
place
in
places
:
# get analytical gradients according to different device
analytic_grads
=
self
.
__get_gradient
(
forward_op
,
backward_op
,
input_vars
,
check_names
,
place
)
# analytic_grads = self.__get_gradient(forward_op, backward_op,
# input_vars, check_names, place)
# In fact, the above two lines can be used to replace following
# codes. But most of the gradient operators need to handle the case
# where one of more of the gradient of the input is not needed.
# We change the unit test framework to explicitly test whether
# the operator correctly handles this through follow codes.
# In addtion, if all the inputs have no gradients, the NOP operator
# will be returned by core.Operator.backward(). The following codes
# do not test this case.
analytic_grads
=
[]
for
name
in
inputs_to_check
:
no_grads
=
[
name
for
name
in
no_grad_set
]
no_grads
.
extend
(
filter
(
lambda
x
:
x
!=
name
,
inputs_to_check
))
backward_op
=
core
.
Operator
.
backward
(
forward_op
,
set
(
no_grads
))
# get analytical gradients according to different device
analytic_grads
.
extend
(
self
.
__get_gradient
(
forward_op
,
backward_op
,
input_vars
,
[
grad_var_name
(
name
)],
place
))
self
.
__assert_is_close
(
numeric_grads
,
analytic_grads
,
check_names
,
max_relative_error
,
"Gradient Check On %s"
%
str
(
place
))
python/paddle/v2/framework/tests/test_mul_op.py
浏览文件 @
44703329
...
...
@@ -16,13 +16,14 @@ class TestMulOp(unittest.TestCase):
self
.
outputs
=
{
'Out'
:
np
.
dot
(
self
.
inputs
[
'X'
],
self
.
inputs
[
'Y'
])}
class
MulGradOpTest
(
GradientChecker
):
class
TestMulGradOp
(
GradientChecker
):
def
test_mul
(
self
):
op
=
create_op
(
"mul"
)
inputs
=
{
'X'
:
np
.
random
.
random
((
32
,
84
)).
astype
(
"float32"
),
'Y'
:
np
.
random
.
random
((
84
,
100
)).
astype
(
"float32"
)
}
self
.
compare_grad
(
op
,
inputs
)
# mul op will enlarge the relative error
self
.
check_grad
(
op
,
inputs
,
set
([
"X"
,
"Y"
]),
"Out"
,
max_relative_error
=
0.5
)
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录