Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
BaiXuePrincess
Paddle
提交
78cd3dd5
P
Paddle
项目概览
BaiXuePrincess
/
Paddle
与 Fork 源项目一致
Fork自
PaddlePaddle / Paddle
通知
1
Star
1
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
0
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
Paddle
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
0
Issue
0
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
未验证
提交
78cd3dd5
编写于
4月 30, 2020
作者:
F
Feiyu Chan
提交者:
GitHub
4月 30, 2020
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
fix kron_op: when only one input needs gradient, test=develop (#24269)
fix kron_op: when only one input needs gradient
上级
5dc069d0
变更
4
隐藏空白更改
内联
并排
Showing
4 changed file
with
74 addition
and
36 deletion
+74
-36
paddle/fluid/operators/kron_op.cc
paddle/fluid/operators/kron_op.cc
+6
-8
paddle/fluid/operators/kron_op.h
paddle/fluid/operators/kron_op.h
+61
-27
python/paddle/fluid/tests/unittests/test_kron_op.py
python/paddle/fluid/tests/unittests/test_kron_op.py
+6
-0
python/paddle/fluid/tests/unittests/white_list/no_grad_set_white_list.py
...luid/tests/unittests/white_list/no_grad_set_white_list.py
+1
-1
未找到文件。
paddle/fluid/operators/kron_op.cc
浏览文件 @
78cd3dd5
...
@@ -99,17 +99,15 @@ class KronGradOp : public framework::OperatorWithKernel {
...
@@ -99,17 +99,15 @@ class KronGradOp : public framework::OperatorWithKernel {
OP_INOUT_CHECK
(
ctx
->
HasInput
(
"Y"
),
"Input"
,
"Y"
,
"kron_grad"
);
OP_INOUT_CHECK
(
ctx
->
HasInput
(
"Y"
),
"Input"
,
"Y"
,
"kron_grad"
);
OP_INOUT_CHECK
(
ctx
->
HasInput
(
framework
::
GradVarName
(
"Out"
)),
"Input"
,
OP_INOUT_CHECK
(
ctx
->
HasInput
(
framework
::
GradVarName
(
"Out"
)),
"Input"
,
framework
::
GradVarName
(
"Out"
),
"kron_grad"
);
framework
::
GradVarName
(
"Out"
),
"kron_grad"
);
OP_INOUT_CHECK
(
ctx
->
HasOutput
(
framework
::
GradVarName
(
"X"
)),
"Output"
,
framework
::
GradVarName
(
"X"
),
"kron_grad"
);
OP_INOUT_CHECK
(
ctx
->
HasOutput
(
framework
::
GradVarName
(
"Y"
)),
"Output"
,
framework
::
GradVarName
(
"Y"
),
"kron_grad"
);
auto
x_grad_name
=
framework
::
GradVarName
(
"X"
);
auto
x_grad_name
=
framework
::
GradVarName
(
"X"
);
auto
y_grad_name
=
framework
::
GradVarName
(
"Y"
);
auto
y_grad_name
=
framework
::
GradVarName
(
"Y"
);
ctx
->
SetOutputDim
(
x_grad_name
,
ctx
->
GetInputDim
(
"X"
));
if
(
ctx
->
HasOutput
(
x_grad_name
))
{
ctx
->
ShareLoD
(
"X"
,
/*->*/
x_grad_name
);
ctx
->
SetOutputDim
(
x_grad_name
,
ctx
->
GetInputDim
(
"X"
));
ctx
->
SetOutputDim
(
y_grad_name
,
ctx
->
GetInputDim
(
"Y"
));
}
ctx
->
ShareLoD
(
"Y"
,
/*->*/
y_grad_name
);
if
(
ctx
->
HasOutput
(
y_grad_name
))
{
ctx
->
SetOutputDim
(
y_grad_name
,
ctx
->
GetInputDim
(
"Y"
));
}
}
}
protected:
protected:
...
...
paddle/fluid/operators/kron_op.h
浏览文件 @
78cd3dd5
...
@@ -147,11 +147,14 @@ struct KronGradElemFunctor {
...
@@ -147,11 +147,14 @@ struct KronGradElemFunctor {
index_b
+=
stride_b_
[
i
]
*
pos_bi
;
index_b
+=
stride_b_
[
i
]
*
pos_bi
;
}
}
size_t
index_out_a
=
index_a
*
numel_b_
+
index_b
;
if
(
dout_a_
)
{
size_t
index_out_b
=
index_b
*
numel_a_
+
index_a
;
size_t
index_out_a
=
index_a
*
numel_b_
+
index_b
;
dout_a_
[
index_out_a
]
=
dout_
[
idx
]
*
B_
[
index_b
];
dout_a_
[
index_out_a
]
=
dout_
[
idx
]
*
B_
[
index_b
];
}
dout_b_
[
index_out_b
]
=
dout_
[
idx
]
*
A_
[
index_a
];
if
(
dout_b_
)
{
size_t
index_out_b
=
index_b
*
numel_a_
+
index_a
;
dout_b_
[
index_out_b
]
=
dout_
[
idx
]
*
A_
[
index_a
];
}
}
}
private:
private:
...
@@ -222,35 +225,50 @@ struct KronGradOpFunctor {
...
@@ -222,35 +225,50 @@ struct KronGradOpFunctor {
// dout_x: dout * kron(ones(X), Y) re-aranged in shape (numel_x, numel_y)
// dout_x: dout * kron(ones(X), Y) re-aranged in shape (numel_x, numel_y)
// dout_y: dout * kron(X, ones(Y)) re-aranged in shaoe (numel_y, numel_x)
// dout_y: dout * kron(X, ones(Y)) re-aranged in shaoe (numel_y, numel_x)
framework
::
Tensor
dout_x
;
framework
::
Tensor
dout_x
;
dout_x
.
mutable_data
<
T
>
({
numel_x
,
numel_y
},
dev_ctx
.
GetPlace
());
T
*
p_dout_x
=
nullptr
;
if
(
dx
)
{
dout_x
.
mutable_data
<
T
>
({
numel_x
,
numel_y
},
dev_ctx
.
GetPlace
());
p_dout_x
=
dout_x
.
data
<
T
>
();
}
framework
::
Tensor
dout_y
;
framework
::
Tensor
dout_y
;
dout_y
.
mutable_data
<
T
>
({
numel_y
,
numel_x
},
dev_ctx
.
GetPlace
());
T
*
p_dout_y
=
nullptr
;
if
(
dy
)
{
dout_y
.
mutable_data
<
T
>
({
numel_y
,
numel_x
},
dev_ctx
.
GetPlace
());
p_dout_y
=
dout_y
.
data
<
T
>
();
}
platform
::
ForRange
<
DeviceContext
>
for_range
(
dev_ctx
,
numel
);
platform
::
ForRange
<
DeviceContext
>
for_range
(
dev_ctx
,
numel
);
KronGradElemFunctor
<
T
>
func
(
dout
.
data
<
T
>
(),
x
.
data
<
T
>
(),
y
.
data
<
T
>
(),
KronGradElemFunctor
<
T
>
func
(
dout
.
data
<
T
>
(),
x
.
data
<
T
>
(),
y
.
data
<
T
>
(),
dout_x
.
data
<
T
>
(),
dout_y
.
data
<
T
>
(),
p_dout_x
,
p_dout_y
,
p_stride_dout
,
p_stride_x
,
p_stride_dout
,
p_stride_x
,
p_stride_y
,
p_stride_y
,
p_shape_y
,
numel_x
,
numel_y
,
ndims
);
p_shape_y
,
numel_x
,
numel_y
,
ndims
);
for_range
(
func
);
for_range
(
func
);
// reduce_sum along aixs 1
// reduce_sum along aixs 1
#if __NVCC__
#if __NVCC__
auto
stream
=
dev_ctx
.
stream
();
// it is a cuda device_context
auto
stream
=
dev_ctx
.
stream
();
// it is a cuda device_context
TensorReduce
<
T
,
T
,
cub
::
Sum
,
IdentityFunctor
<
T
>>
(
if
(
dx
)
{
dout_x
,
dx
,
{
1
},
static_cast
<
T
>
(
0
),
cub
::
Sum
(),
IdentityFunctor
<
T
>
(),
TensorReduce
<
T
,
T
,
cub
::
Sum
,
IdentityFunctor
<
T
>>
(
stream
);
dout_x
,
dx
,
{
1
},
static_cast
<
T
>
(
0
),
cub
::
Sum
(),
IdentityFunctor
<
T
>
(),
TensorReduce
<
T
,
T
,
cub
::
Sum
,
IdentityFunctor
<
T
>>
(
stream
);
dout_y
,
dy
,
{
1
},
static_cast
<
T
>
(
0
),
cub
::
Sum
(),
IdentityFunctor
<
T
>
(),
}
stream
);
if
(
dy
)
{
TensorReduce
<
T
,
T
,
cub
::
Sum
,
IdentityFunctor
<
T
>>
(
dout_y
,
dy
,
{
1
},
static_cast
<
T
>
(
0
),
cub
::
Sum
(),
IdentityFunctor
<
T
>
(),
stream
);
}
#else
#else
auto
eigen_dout_x
=
framework
::
EigenMatrix
<
T
>::
Reshape
(
dout_x
,
1
);
auto
eigen_dout_y
=
framework
::
EigenMatrix
<
T
>::
Reshape
(
dout_y
,
1
);
auto
eigen_vec_dx
=
framework
::
EigenVector
<
T
>::
Flatten
(
*
dx
);
auto
eigen_vec_dy
=
framework
::
EigenVector
<
T
>::
Flatten
(
*
dy
);
auto
*
place
=
dev_ctx
.
eigen_device
();
auto
*
place
=
dev_ctx
.
eigen_device
();
Eigen
::
array
<
int
,
1
>
reduce_dim
=
{
1
};
Eigen
::
array
<
int
,
1
>
reduce_dim
=
{
1
};
eigen_vec_dx
.
device
(
*
place
)
=
eigen_dout_x
.
sum
(
reduce_dim
);
if
(
dx
)
{
eigen_vec_dy
.
device
(
*
place
)
=
eigen_dout_y
.
sum
(
reduce_dim
);
auto
eigen_dout_x
=
framework
::
EigenMatrix
<
T
>::
Reshape
(
dout_x
,
1
);
auto
eigen_vec_dx
=
framework
::
EigenVector
<
T
>::
Flatten
(
*
dx
);
eigen_vec_dx
.
device
(
*
place
)
=
eigen_dout_x
.
sum
(
reduce_dim
);
}
if
(
dy
)
{
auto
eigen_dout_y
=
framework
::
EigenMatrix
<
T
>::
Reshape
(
dout_y
,
1
);
auto
eigen_vec_dy
=
framework
::
EigenVector
<
T
>::
Flatten
(
*
dy
);
eigen_vec_dy
.
device
(
*
place
)
=
eigen_dout_y
.
sum
(
reduce_dim
);
}
#endif
#endif
}
}
};
};
...
@@ -307,17 +325,33 @@ class KronGradKernel : public framework::OpKernel<T> {
...
@@ -307,17 +325,33 @@ class KronGradKernel : public framework::OpKernel<T> {
auto
*
dx
=
ctx
.
Output
<
framework
::
Tensor
>
(
framework
::
GradVarName
(
"X"
));
auto
*
dx
=
ctx
.
Output
<
framework
::
Tensor
>
(
framework
::
GradVarName
(
"X"
));
auto
*
dy
=
ctx
.
Output
<
framework
::
Tensor
>
(
framework
::
GradVarName
(
"Y"
));
auto
*
dy
=
ctx
.
Output
<
framework
::
Tensor
>
(
framework
::
GradVarName
(
"Y"
));
dx
->
mutable_data
<
T
>
(
ctx
.
GetPlace
());
if
(
dx
)
{
dy
->
mutable_data
<
T
>
(
ctx
.
GetPlace
());
dx
->
mutable_data
<
T
>
(
ctx
.
GetPlace
());
}
if
(
dy
)
{
dy
->
mutable_data
<
T
>
(
ctx
.
GetPlace
());
}
int
ndims
=
dout
->
dims
().
size
();
int
ndims
=
dout
->
dims
().
size
();
framework
::
Tensor
xx
=
UnsqueezeTo
(
*
x
,
ndims
);
framework
::
Tensor
xx
=
UnsqueezeTo
(
*
x
,
ndims
);
framework
::
Tensor
dxx
=
UnsqueezeTo
(
*
dx
,
ndims
);
framework
::
Tensor
yy
=
UnsqueezeTo
(
*
y
,
ndims
);
framework
::
Tensor
yy
=
UnsqueezeTo
(
*
y
,
ndims
);
framework
::
Tensor
dyy
=
UnsqueezeTo
(
*
dy
,
ndims
);
framework
::
Tensor
*
pdxx
=
nullptr
;
framework
::
Tensor
*
pdyy
=
nullptr
;
framework
::
Tensor
dxx
;
framework
::
Tensor
dyy
;
if
(
dx
)
{
dxx
=
UnsqueezeTo
(
*
dx
,
ndims
);
pdxx
=
&
dxx
;
}
if
(
dy
)
{
dyy
=
UnsqueezeTo
(
*
dy
,
ndims
);
pdyy
=
&
dyy
;
}
KronGradOpFunctor
<
DeviceContext
,
T
>
func
;
KronGradOpFunctor
<
DeviceContext
,
T
>
func
;
func
(
dev_ctx
,
*
dout
,
xx
,
yy
,
&
dxx
,
&
dyy
);
func
(
dev_ctx
,
*
dout
,
xx
,
yy
,
pdxx
,
p
dyy
);
}
}
};
};
...
...
python/paddle/fluid/tests/unittests/test_kron_op.py
浏览文件 @
78cd3dd5
...
@@ -42,6 +42,12 @@ class TestKronOp(OpTest):
...
@@ -42,6 +42,12 @@ class TestKronOp(OpTest):
def
test_check_grad
(
self
):
def
test_check_grad
(
self
):
self
.
check_grad
([
'X'
,
'Y'
],
'Out'
)
self
.
check_grad
([
'X'
,
'Y'
],
'Out'
)
def
test_check_grad_ignore_x
(
self
):
self
.
check_grad
([
'Y'
],
'Out'
,
no_grad_set
=
set
(
'X'
))
def
test_check_grad_ignore_y
(
self
):
self
.
check_grad
([
'X'
],
'Out'
,
no_grad_set
=
set
(
'Y'
))
class
TestKronOp2
(
TestKronOp
):
class
TestKronOp2
(
TestKronOp
):
def
setUp
(
self
):
def
setUp
(
self
):
...
...
python/paddle/fluid/tests/unittests/white_list/no_grad_set_white_list.py
浏览文件 @
78cd3dd5
...
@@ -13,7 +13,7 @@
...
@@ -13,7 +13,7 @@
# limitations under the License.
# limitations under the License.
# check no_grad_set is None
# check no_grad_set is None
NOT_CHECK_OP_LIST
=
[
'deformable_conv'
,
'row_conv'
]
NOT_CHECK_OP_LIST
=
[
'deformable_conv'
,
'row_conv'
,
'kron'
]
# TODO(Shixiaowei02): Check if the items do not need fix.
# TODO(Shixiaowei02): Check if the items do not need fix.
# no_grad_set has value in NEED_TO_FIX_OP_LIST
# no_grad_set has value in NEED_TO_FIX_OP_LIST
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录