Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
PaddlePaddle
Paddle
提交
b106c424
P
Paddle
项目概览
PaddlePaddle
/
Paddle
1 年多 前同步成功
通知
2302
Star
20931
Fork
5422
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
1423
列表
看板
标记
里程碑
合并请求
543
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
Paddle
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
1,423
Issue
1,423
列表
看板
标记
里程碑
合并请求
543
合并请求
543
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
未验证
提交
b106c424
编写于
9月 27, 2022
作者:
W
wanghuancoder
提交者:
GitHub
9月 27, 2022
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
[Eager] refine gil use (#46452)
* refine gil use
上级
a02eb143
变更
4
展开全部
隐藏空白更改
内联
并排
Showing
4 changed file
with
487 addition
and
428 deletion
+487
-428
paddle/fluid/eager/pylayer/py_layer_node.cc
paddle/fluid/eager/pylayer/py_layer_node.cc
+5
-0
paddle/fluid/eager/pylayer/py_layer_node.h
paddle/fluid/eager/pylayer/py_layer_node.h
+1
-1
paddle/fluid/pybind/eager_functions.cc
paddle/fluid/pybind/eager_functions.cc
+420
-391
paddle/fluid/pybind/eager_method.cc
paddle/fluid/pybind/eager_method.cc
+61
-36
未找到文件。
paddle/fluid/eager/pylayer/py_layer_node.cc
浏览文件 @
b106c424
...
...
@@ -27,6 +27,11 @@
#include "pybind11/pytypes.h"
namespace
egr
{
GradNodePyLayer
::~
GradNodePyLayer
()
{
pybind11
::
gil_scoped_acquire
gil
;
Py_XDECREF
(
ctx_
);
}
paddle
::
small_vector
<
std
::
vector
<
paddle
::
experimental
::
Tensor
>
,
kSlotSmallVectorSize
>
GradNodePyLayer
::
operator
()(
...
...
paddle/fluid/eager/pylayer/py_layer_node.h
浏览文件 @
b106c424
...
...
@@ -34,7 +34,7 @@ class GradNodePyLayer : public GradNodeBase {
Py_INCREF
(
ctx_
);
}
~
GradNodePyLayer
()
override
{
Py_XDECREF
(
ctx_
);
}
;
~
GradNodePyLayer
()
override
;
virtual
paddle
::
small_vector
<
std
::
vector
<
paddle
::
experimental
::
Tensor
>
,
kSlotSmallVectorSize
>
...
...
paddle/fluid/pybind/eager_functions.cc
浏览文件 @
b106c424
此差异已折叠。
点击以展开。
paddle/fluid/pybind/eager_method.cc
浏览文件 @
b106c424
...
...
@@ -156,6 +156,7 @@ static PyObject* tensor_method_numpy(TensorObject* self,
}
if
(
self
->
tensor
.
is_cpu
()
||
self
->
tensor
.
is_gpu_pinned
())
{
eager_gil_scoped_release
guard
;
platform
::
CPUPlace
place
;
if
(
self
->
tensor
.
is_selected_rows
())
{
VLOG
(
6
)
<<
"Getting SelectedRows's numpy value"
;
...
...
@@ -186,6 +187,7 @@ static PyObject* tensor_method_numpy(TensorObject* self,
#if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP)
}
else
if
(
self
->
tensor
.
is_gpu
())
{
eager_gil_scoped_release
guard
;
#if defined(PADDLE_WITH_CUDA)
gpuMemcpyKind
kind
=
cudaMemcpyDeviceToHost
;
#elif defined(PADDLE_WITH_HIP)
...
...
@@ -244,6 +246,7 @@ static PyObject* tensor_method_numpy(TensorObject* self,
#endif
#ifdef PADDLE_WITH_CUSTOM_DEVICE
}
else
if
(
self
->
tensor
.
is_custom_device
())
{
eager_gil_scoped_release
guard
;
if
(
self
->
tensor
.
is_selected_rows
())
{
VLOG
(
6
)
<<
"Getting SelectedRows's numpy value"
;
auto
*
selected_rows
=
...
...
@@ -311,8 +314,8 @@ static PyObject* tensor_method_numpy_for_string_tensor(TensorObject* self,
const
auto
*
st_ptr
=
string_tensor
->
data
();
auto
numel
=
self
->
tensor
.
numel
();
auto
tensor_dims
=
self
->
tensor
.
shape
();
// Get the max unicode length of StringTensor to create numpy unicode
string
// array.
// Get the max unicode length of StringTensor to create numpy unicode
//
string
array.
auto
*
longest_pstring
=
std
::
max_element
(
st_ptr
,
st_ptr
+
numel
,
[](
const
auto
&
a
,
const
auto
&
b
)
{
auto
a_unicode_len
=
...
...
@@ -394,14 +397,18 @@ static PyObject* tensor_method__copy_to(TensorObject* self,
EAGER_TRY
auto
place
=
CastPyArg2Place
(
PyTuple_GET_ITEM
(
args
,
0
),
0
);
bool
blocking
=
CastPyArg2AttrBoolean
(
PyTuple_GET_ITEM
(
args
,
1
),
1
);
auto
cp_tensor
=
self
->
tensor
.
copy_to
(
place
,
blocking
);
if
(
!
blocking
)
{
IncreaseTensorReferenceCountUntilCopyComplete
(
self
->
tensor
,
place
);
paddle
::
experimental
::
Tensor
cp_tensor
;
{
eager_gil_scoped_release
guard
;
cp_tensor
=
self
->
tensor
.
copy_to
(
place
,
blocking
);
if
(
!
blocking
)
{
IncreaseTensorReferenceCountUntilCopyComplete
(
self
->
tensor
,
place
);
}
egr
::
EagerUtils
::
autograd_meta
(
&
cp_tensor
)
->
SetStopGradient
(
true
);
egr
::
EagerUtils
::
autograd_meta
(
&
cp_tensor
)
->
SetPersistable
(
egr
::
EagerUtils
::
autograd_meta
(
&
(
self
->
tensor
))
->
Persistable
());
}
egr
::
EagerUtils
::
autograd_meta
(
&
cp_tensor
)
->
SetStopGradient
(
true
);
egr
::
EagerUtils
::
autograd_meta
(
&
cp_tensor
)
->
SetPersistable
(
egr
::
EagerUtils
::
autograd_meta
(
&
(
self
->
tensor
))
->
Persistable
());
return
ToPyObject
(
cp_tensor
);
EAGER_CATCH_AND_THROW_RETURN_NULL
}
...
...
@@ -410,11 +417,15 @@ static PyObject* tensor_method_cpu(TensorObject* self,
PyObject
*
args
,
PyObject
*
kwargs
)
{
EAGER_TRY
auto
cp_tensor
=
self
->
tensor
.
copy_to
(
phi
::
CPUPlace
(),
true
);
egr
::
EagerUtils
::
autograd_meta
(
&
cp_tensor
)
->
SetStopGradient
(
true
);
egr
::
EagerUtils
::
autograd_meta
(
&
cp_tensor
)
->
SetPersistable
(
egr
::
EagerUtils
::
autograd_meta
(
&
(
self
->
tensor
))
->
Persistable
());
paddle
::
experimental
::
Tensor
cp_tensor
;
{
eager_gil_scoped_release
guard
;
cp_tensor
=
self
->
tensor
.
copy_to
(
phi
::
CPUPlace
(),
true
);
egr
::
EagerUtils
::
autograd_meta
(
&
cp_tensor
)
->
SetStopGradient
(
true
);
egr
::
EagerUtils
::
autograd_meta
(
&
cp_tensor
)
->
SetPersistable
(
egr
::
EagerUtils
::
autograd_meta
(
&
(
self
->
tensor
))
->
Persistable
());
}
return
ToPyObject
(
cp_tensor
);
EAGER_CATCH_AND_THROW_RETURN_NULL
}
...
...
@@ -450,6 +461,7 @@ static PyObject* tensor_method_copy_(TensorObject* self,
VLOG
(
6
)
<<
"Start Copy Tensor "
<<
src_tensor
.
name
()
<<
" to "
<<
self
->
tensor
.
name
();
if
(
!
self
->
tensor
.
initialized
())
{
eager_gil_scoped_release
guard
;
egr
::
EagerUtils
::
autograd_meta
(
&
(
self
->
tensor
))
->
SetStopGradient
(
egr
::
EagerUtils
::
autograd_meta
(
&
(
src_tensor
))
->
StopGradient
());
...
...
@@ -461,6 +473,7 @@ static PyObject* tensor_method_copy_(TensorObject* self,
}
}
else
{
if
(
src_tensor
.
initialized
())
{
eager_gil_scoped_release
guard
;
self
->
tensor
.
copy_
(
src_tensor
,
self
->
tensor
.
place
(),
blocking
);
}
}
...
...
@@ -476,16 +489,19 @@ static PyObject* tensor_method_clone(TensorObject* self,
PyObject
*
args
,
PyObject
*
kwargs
)
{
EAGER_TRY
paddle
::
experimental
::
Tensor
out
;
{
eager_gil_scoped_release
guard
;
PADDLE_ENFORCE_EQ
(
self
->
tensor
.
initialized
(),
true
,
paddle
::
platform
::
errors
::
InvalidArgument
(
"We can only support initialized tensor in clone, however we got "
"uninitialized tensor %s, please check your code."
,
self
->
tensor
.
name
()));
PADDLE_ENFORCE_EQ
(
self
->
tensor
.
initialized
(),
true
,
paddle
::
platform
::
errors
::
InvalidArgument
(
"We can only support initialized tensor in clone, however we got "
"uninitialized tensor %s, please check your code."
,
self
->
tensor
.
name
()));
auto
out
=
assign_ad_func
(
self
->
tensor
);
out
=
assign_ad_func
(
self
->
tensor
);
}
return
ToPyObject
(
out
);
EAGER_CATCH_AND_THROW_RETURN_NULL
}
...
...
@@ -495,6 +511,7 @@ static PyObject* tensor_retain_grads(TensorObject* self,
PyObject
*
kwargs
)
{
EAGER_TRY
if
(
egr
::
Controller
::
Instance
().
HasGrad
())
{
eager_gil_scoped_release
guard
;
auto
meta
=
egr
::
EagerUtils
::
autograd_meta
(
&
(
self
->
tensor
));
if
(
!
meta
->
GetMutableGradNode
())
{
VLOG
(
6
)
<<
"Make grad node of tensor: "
<<
self
->
tensor
.
name
()
...
...
@@ -535,6 +552,7 @@ static PyObject* tensor_clear_gradient(TensorObject* self,
}
if
(
grad
->
impl
())
{
eager_gil_scoped_release
guard
;
if
(
grad
->
is_selected_rows
())
{
auto
selected_rows
=
std
::
dynamic_pointer_cast
<
phi
::
SelectedRows
>
(
grad
->
impl
());
...
...
@@ -577,6 +595,7 @@ static PyObject* tensor__zero_grads(TensorObject* self,
VLOG
(
4
)
<<
"ZeroGrads "
<<
self
->
tensor
.
name
();
if
(
egr
::
egr_utils_api
::
IsLeafTensor
(
self
->
tensor
))
{
eager_gil_scoped_release
guard
;
// Add RetainGrad as PostHook to AccumulationNode
paddle
::
experimental
::
Tensor
*
grad
=
egr
::
EagerUtils
::
mutable_grad
(
self
->
tensor
);
...
...
@@ -595,6 +614,7 @@ static PyObject* tensor__zero_grads(TensorObject* self,
}
}
}
else
{
eager_gil_scoped_release
guard
;
auto
meta
=
egr
::
EagerUtils
::
unsafe_autograd_meta
(
self
->
tensor
);
if
(
meta
->
MutableGrad
()
->
initialized
())
{
if
(
meta
->
MutableGrad
()
->
is_dense_tensor
())
{
...
...
@@ -855,6 +875,7 @@ static PyObject* tensor__getitem_index_not_tensor(TensorObject* self,
decrease_axis
.
end
());
if
(
op_type
==
"slice"
)
{
eager_gil_scoped_release
guard
;
out
=
slice_ad_func
(
self
->
tensor
,
slice_axes_tmp
,
slice_starts
,
...
...
@@ -862,6 +883,7 @@ static PyObject* tensor__getitem_index_not_tensor(TensorObject* self,
infer_flags_tmp
,
decrease_axis_tmp
);
}
else
if
(
op_type
==
"strided_slice"
)
{
eager_gil_scoped_release
guard
;
out
=
strided_slice_ad_func
(
self
->
tensor
,
slice_axes
,
slice_starts
,
slice_ends
,
slice_strides
);
}
else
{
...
...
@@ -886,28 +908,31 @@ static PyObject* tensor__getitem_index_not_tensor(TensorObject* self,
none_axes
.
pop_back
();
}
if
(
!
none_axes
.
empty
())
{
// Deal with cases that decrease_axes is not empty
// For example:
// # x.shape: (2,3,4)
// out = x[0, 0:2, None] # out.shape : (2, 1, 4)
for
(
auto
&
axis
:
none_axes
)
{
int
len
=
0
;
for
(
int
da
:
decrease_axis
)
{
if
(
da
<
axis
)
{
len
++
;
paddle
::
experimental
::
Tensor
new_out
;
{
eager_gil_scoped_release
guard
;
// Deal with cases that decrease_axes is not empty
// For example:
// # x.shape: (2,3,4)
// out = x[0, 0:2, None] # out.shape : (2, 1, 4)
for
(
auto
&
axis
:
none_axes
)
{
int
len
=
0
;
for
(
int
da
:
decrease_axis
)
{
if
(
da
<
axis
)
{
len
++
;
}
}
axis
-=
len
;
}
axis
-=
len
;
new_out
=
unsqueeze_ad_func
(
out
,
none_axes
)
;
}
paddle
::
experimental
::
Tensor
new_out
;
new_out
=
unsqueeze_ad_func
(
out
,
none_axes
);
return
ToPyObject
(
new_out
);
}
}
// the index is a list
if
(
list_select_flag
)
{
eager_gil_scoped_release
guard
;
auto
select_index
=
paddle
::
experimental
::
Tensor
(
egr
::
Controller
::
Instance
().
GenerateUniqueName
());
auto
idx_tensor
=
std
::
make_shared
<
phi
::
DenseTensor
>
();
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录