Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
机器未来
Paddle
提交
12d29f4d
P
Paddle
项目概览
机器未来
/
Paddle
与 Fork 源项目一致
Fork自
PaddlePaddle / Paddle
通知
1
Star
1
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
1
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
Paddle
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
1
Issue
1
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
未验证
提交
12d29f4d
编写于
8月 26, 2019
作者:
H
Huihuang Zheng
提交者:
GitHub
8月 26, 2019
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
Change TensorCopy in recurrent_op to ShareDataWith (#19319)
上级
da127d11
变更
1
显示空白变更内容
内联
并排
Showing
1 changed file
with
65 addition
and
32 deletion
+65
-32
paddle/fluid/operators/recurrent_op.cc
paddle/fluid/operators/recurrent_op.cc
+65
-32
未找到文件。
paddle/fluid/operators/recurrent_op.cc
浏览文件 @
12d29f4d
...
@@ -220,27 +220,39 @@ void RecurrentOp::RunImpl(const framework::Scope &scope,
...
@@ -220,27 +220,39 @@ void RecurrentOp::RunImpl(const framework::Scope &scope,
}
}
}
}
// Every inputs are linked now, execute!
// Link inside::output -> outside::output
// outside::output[seq_offset: seq_offset + 1] = inside::output
executor
.
CreateVariables
(
ctx
->
prog_
,
&
cur_scope
,
ctx
->
block_id_
);
if
(
i
>
0
)
{
LinkTensorWithCallback
(
scope
,
Outputs
(
kOutputs
),
cur_scope
,
Outputs
(
kOutputs
),
[
&
](
const
framework
::
LoDTensor
&
src_tensor
,
framework
::
LoDTensor
*
dst_tensor
)
{
framework
::
Tensor
src_slice
=
src_tensor
.
Slice
(
seq_offset
,
seq_offset
+
1
);
dst_tensor
->
ShareDataWith
(
src_slice
);
});
}
// Linked now, execute!
executor
.
RunPreparedContext
(
ctx
.
get
(),
&
cur_scope
,
executor
.
RunPreparedContext
(
ctx
.
get
(),
&
cur_scope
,
false
/*create_local_scope*/
,
false
/*create_local_scope*/
,
true
/*create_vars*/
,
true
/* keep_kids */
);
false
/*create_vars*/
,
true
/* keep_kids */
);
if
(
i
==
0
)
{
// Copy inside::output -> outside::output
LinkTensorWithCallback
(
// outside::output[seq_offset: seq_offset + 1] = inside::output
this
->
LinkTensorWithCallback
(
cur_scope
,
Outputs
(
kOutputs
),
scope
,
Outputs
(
kOutputs
),
cur_scope
,
Outputs
(
kOutputs
),
scope
,
Outputs
(
kOutputs
),
[
&
](
const
framework
::
LoDTensor
&
src_tensor
,
[
&
](
const
framework
::
LoDTensor
&
src_tensor
,
framework
::
LoDTensor
*
dst_tensor
)
{
framework
::
LoDTensor
*
dst_tensor
)
{
if
(
i
==
0
)
{
// create output tensor at begin
// create output tensor at begin
dst_tensor
->
Resize
(
PrependDims
(
seq_len
,
src_tensor
.
dims
()));
dst_tensor
->
Resize
(
PrependDims
(
seq_len
,
src_tensor
.
dims
()));
dst_tensor
->
mutable_data
(
place
,
src_tensor
.
type
());
dst_tensor
->
mutable_data
(
place
,
src_tensor
.
type
());
}
auto
dst_out
=
dst_tensor
->
Slice
(
seq_offset
,
seq_offset
+
1
);
auto
dst_out
=
dst_tensor
->
Slice
(
seq_offset
,
seq_offset
+
1
);
// Explicit copy output since the local RNN scope can be destroyed
// Explicit copy output since the local RNN scope can be destroyed
// early.
// early.
framework
::
TensorCopy
(
src_tensor
,
place
,
dev_ctx
,
&
dst_out
);
framework
::
TensorCopy
(
src_tensor
,
place
,
dev_ctx
,
&
dst_out
);
});
});
}
scopes
.
Next
();
scopes
.
Next
();
}
}
...
@@ -322,23 +334,42 @@ void RecurrentGradOp::RunImpl(const framework::Scope &scope,
...
@@ -322,23 +334,42 @@ void RecurrentGradOp::RunImpl(const framework::Scope &scope,
for
(
size_t
i
=
0
;
i
<
ex_state_grads
.
size
();
++
i
)
{
for
(
size_t
i
=
0
;
i
<
ex_state_grads
.
size
();
++
i
)
{
auto
&
cur_grad
=
cur_state_grads
[
i
];
auto
&
cur_grad
=
cur_state_grads
[
i
];
auto
&
ex_grad
=
ex_state_grads
[
i
];
auto
&
ex_grad
=
ex_state_grads
[
i
];
auto
&
ex_tensor
=
auto
&
ex_
grad_
tensor
=
ex_scope
.
FindVar
(
ex_grad
)
->
Get
<
framework
::
LoDTensor
>
();
ex_scope
.
FindVar
(
ex_grad
)
->
Get
<
framework
::
LoDTensor
>
();
VLOG
(
10
)
<<
" RNN link "
<<
cur_grad
<<
" from "
<<
ex_grad
;
VLOG
(
10
)
<<
" RNN link "
<<
cur_grad
<<
" from "
<<
ex_grad
;
auto
*
cur_grad_var
=
cur_scope
.
Var
(
cur_grad
);
auto
*
cur_grad_var
=
cur_scope
.
Var
(
cur_grad
);
auto
cur_grad_tensor
=
framework
::
LoDTensor
*
cur_grad_tensor
=
cur_grad_var
->
GetMutable
<
framework
::
LoDTensor
>
();
cur_grad_var
->
GetMutable
<
framework
::
LoDTensor
>
();
framework
::
TensorCopy
(
ex_tensor
,
place
,
dev_ctx
,
cur_grad_tensor
);
cur_grad_tensor
->
ShareDataWith
(
ex_grad_tensor
);
}
}
}
}
}
// Link inside::output -> outside::output
// outside::output[seq_offset: seq_offset + 1] = inside::output
executor
.
CreateVariables
(
ctx
->
prog_
,
&
cur_scope
,
ctx
->
block_id_
);
if
(
step_id
>
0
)
{
LinkTensorWithCallback
(
scope
,
Outputs
(
kInputGrads
),
cur_scope
,
GradVarLists
(
Inputs
(
kInputs
)),
[
&
](
const
framework
::
LoDTensor
&
src_tensor
,
framework
::
LoDTensor
*
dst_tensor
)
{
if
(
src_tensor
.
memory_size
()
==
0
)
{
// Inside Gradient is not created.
return
;
}
framework
::
Tensor
src_slice
=
src_tensor
.
Slice
(
seq_offset
,
seq_offset
+
1
);
dst_tensor
->
ShareDataWith
(
src_slice
);
},
true
/*is_backward*/
);
}
}
VLOG
(
5
)
<<
"Recurrent memory linking finished "
;
VLOG
(
5
)
<<
"Recurrent memory linking finished "
;
// Run step block with cur_scope
// Run step block with cur_scope
executor
.
RunPreparedContext
(
ctx
.
get
(),
&
cur_scope
,
executor
.
RunPreparedContext
(
ctx
.
get
(),
&
cur_scope
,
false
/*create_local_scope*/
,
false
/*create_local_scope*/
,
tru
e
/*create_vars*/
,
true
/* keep_kids */
);
fals
e
/*create_vars*/
,
true
/* keep_kids */
);
VLOG
(
5
)
<<
"executor.Run finished "
;
VLOG
(
5
)
<<
"executor.Run finished "
;
...
@@ -393,21 +424,23 @@ void RecurrentGradOp::RunImpl(const framework::Scope &scope,
...
@@ -393,21 +424,23 @@ void RecurrentGradOp::RunImpl(const framework::Scope &scope,
// Copy input gradient from inside to outside
// Copy input gradient from inside to outside
// outside::input_grad[seq_offset: seq_offset + 1] = inside::input_grad
// outside::input_grad[seq_offset: seq_offset + 1] = inside::input_grad
if
(
step_id
==
0
)
{
LinkTensorWithCallback
(
LinkTensorWithCallback
(
cur_scope
,
GradVarLists
(
Inputs
(
kInputs
)),
scope
,
Outputs
(
kInputGrads
),
cur_scope
,
GradVarLists
(
Inputs
(
kInputs
)),
scope
,
Outputs
(
kInputGrads
),
[
&
](
const
framework
::
LoDTensor
&
inside
,
framework
::
LoDTensor
*
outside
)
{
[
&
](
const
framework
::
LoDTensor
&
inside
,
framework
::
LoDTensor
*
outside
)
{
if
(
inside
.
memory_size
()
==
0
)
{
// IG is not created.
if
(
inside
.
memory_size
()
==
0
)
{
// IG is not created.
return
;
return
;
}
}
if
(
step_id
==
0
)
{
// alloc
memory
// Alloc outside
memory
outside
->
Resize
(
PrependDims
(
seq_len
,
inside
.
dims
()));
outside
->
Resize
(
PrependDims
(
seq_len
,
inside
.
dims
()));
outside
->
mutable_data
(
place
,
inside
.
type
());
outside
->
mutable_data
(
place
,
inside
.
type
());
}
auto
dst
=
outside
->
Slice
(
seq_offset
,
seq_offset
+
1
);
auto
dst
=
outside
->
Slice
(
seq_offset
,
seq_offset
+
1
);
framework
::
TensorCopy
(
inside
,
place
,
dev_ctx
,
&
dst
);
framework
::
TensorCopy
(
inside
,
place
,
dev_ctx
,
&
dst
);
},
},
true
/*is_backward*/
);
true
/*is_backward*/
);
}
VLOG
(
5
)
<<
"Link outside gradient finished "
;
VLOG
(
5
)
<<
"Link outside gradient finished "
;
if
(
has_state
)
{
if
(
has_state
)
{
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录