Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
magicwindyyd
mindspore
提交
e7936ded
M
mindspore
项目概览
magicwindyyd
/
mindspore
与 Fork 源项目一致
Fork自
MindSpore / mindspore
通知
1
Star
1
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
0
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
M
mindspore
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
0
Issue
0
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
提交
e7936ded
编写于
5月 27, 2020
作者:
M
mindspore-ci-bot
提交者:
Gitee
5月 27, 2020
浏览文件
操作
浏览文件
下载
差异文件
!1480 gpu iiterator weak ref support
Merge pull request !1480 from panfengfeng/iterator_gpu_weak_ref
上级
4e8e82f2
636d419a
变更
4
隐藏空白更改
内联
并排
Showing
4 changed file
with
15 addition
and
47 deletion
+15
-47
mindspore/ccsrc/dataset/engine/datasetops/device_queue_op.cc
mindspore/ccsrc/dataset/engine/datasetops/device_queue_op.cc
+9
-7
mindspore/ccsrc/device/gpu/gpu_buffer_mgr.cc
mindspore/ccsrc/device/gpu/gpu_buffer_mgr.cc
+2
-14
mindspore/ccsrc/device/gpu/gpu_buffer_mgr.h
mindspore/ccsrc/device/gpu/gpu_buffer_mgr.h
+0
-1
mindspore/dataset/engine/iterators.py
mindspore/dataset/engine/iterators.py
+4
-25
未找到文件。
mindspore/ccsrc/dataset/engine/datasetops/device_queue_op.cc
浏览文件 @
e7936ded
...
...
@@ -26,10 +26,6 @@
#include "dataset/util/task_manager.h"
#include "dataset/engine/opt/pass.h"
#ifdef ENABLE_TDTQUE
#include "tdt/tsd_client.h"
#endif
namespace
mindspore
{
namespace
dataset
{
DeviceQueueOp
::
DeviceQueueOp
(
std
::
string
channel_name
,
DeviceType
device_type
,
int32_t
device_id
,
int32_t
prefetch_size
,
...
...
@@ -167,9 +163,15 @@ Status DeviceQueueOp::SendDataToGPU() {
is_break_loop
=
true
;
}
}
RETURN_IF_NOT_OK
(
GetNextInput
(
&
current_buffer
));
if
(
!
TaskManager
::
FindMe
()
->
Interrupted
())
RETURN_IF_NOT_OK
(
GetNextInput
(
&
current_buffer
));
else
is_break_loop
=
true
;
}
RETURN_IF_NOT_OK
(
GetNextInput
(
&
current_buffer
));
if
(
!
TaskManager
::
FindMe
()
->
Interrupted
())
RETURN_IF_NOT_OK
(
GetNextInput
(
&
current_buffer
));
else
is_break_loop
=
true
;
}
MS_LOG
(
INFO
)
<<
"Device queue total batch is "
<<
total_batch
<<
", number of batches is "
<<
num_batch_
<<
"."
;
...
...
@@ -191,7 +193,7 @@ Status DeviceQueueOp::RetryPushGPUData(const std::vector<size_t> &data_size, con
items
.
push_back
(
data_item
);
}
while
(
!
GpuBufferMgr
::
GetInstance
().
IsClosed
())
{
while
(
!
GpuBufferMgr
::
GetInstance
().
IsClosed
()
&&
!
TaskManager
::
FindMe
()
->
Interrupted
()
)
{
RETURN_IF_NOT_OK
(
MallocForGPUData
(
&
items
,
curr_row
));
auto
ret
=
GpuBufferMgr
::
GetInstance
().
Push
(
handle
,
items
,
WAIT_TIME
);
if
(
ret
)
{
...
...
mindspore/ccsrc/device/gpu/gpu_buffer_mgr.cc
浏览文件 @
e7936ded
...
...
@@ -172,9 +172,7 @@ bool GpuBufferMgr::CloseNotify() {
{
std
::
lock_guard
<
std
::
mutex
>
lk
(
close_mutex_
);
// set closed_ to be true, all the dataset retry can be jumped out of the while
closed_
=
true
;
// set closed_ to be true, all the dataset retry can be jumped out of the while
// notify all the waiting dataset threads
close_confirm_cond_
.
notify_all
();
// notify all the waiting dataset threads
closed_
=
true
;
}
// wati for the dataset threads' ack
...
...
@@ -188,16 +186,6 @@ bool GpuBufferMgr::CloseNotify() {
return
result
;
}
void
GpuBufferMgr
::
CloseConfirm
()
{
// lock scope
{
std
::
unique_lock
<
std
::
mutex
>
lk
(
close_mutex_
);
// dataset threads wait for the closed_ flag from false to true
close_confirm_cond_
.
wait
(
lk
,
[
this
]
{
return
closed_
;
});
// dataset threads wait for the closed_ flag from false to true
}
sema
.
Signal
();
}
void
GpuBufferMgr
::
CloseConfirm
()
{
sema
.
Signal
();
}
}
// namespace device
}
// namespace mindspore
mindspore/ccsrc/device/gpu/gpu_buffer_mgr.h
浏览文件 @
e7936ded
...
...
@@ -119,7 +119,6 @@ class GpuBufferMgr {
bool
closed_
;
std
::
mutex
mutex_
;
std
::
mutex
close_mutex_
;
std
::
condition_variable
close_confirm_cond_
;
// how many queues opened by dataset
int
open_by_dataset_
;
Semaphore
sema
;
...
...
mindspore/dataset/engine/iterators.py
浏览文件 @
e7936ded
...
...
@@ -17,7 +17,6 @@
from
abc
import
abstractmethod
import
copy
import
weakref
from
importlib
import
import_module
from
mindspore._c_dataengine
import
DEPipeline
from
mindspore._c_dataengine
import
OpName
...
...
@@ -25,10 +24,6 @@ from mindspore._c_dataengine import OpName
from
mindspore
import
log
as
logger
from
.
import
datasets
as
de
try
:
context
=
import_module
(
"mindspore.context"
)
except
ModuleNotFoundError
:
context
=
None
ITERATORS_LIST
=
list
()
...
...
@@ -36,18 +31,9 @@ ITERATORS_LIST = list()
def
_cleanup
():
"""Release all the Iterator."""
for
itr_ref
in
ITERATORS_LIST
:
if
context
:
device_type
=
context
.
get_context
(
"device_target"
)
if
device_type
==
"GPU"
:
itr_ref
.
release
()
else
:
itr
=
itr_ref
()
if
itr
is
not
None
:
itr
.
release
()
else
:
itr
=
itr_ref
()
if
itr
is
not
None
:
itr
.
release
()
itr
=
itr_ref
()
if
itr
is
not
None
:
itr
.
release
()
def
alter_tree
(
node
):
...
...
@@ -101,14 +87,7 @@ class Iterator:
"""
def
__init__
(
self
,
dataset
):
if
context
:
device_type
=
context
.
get_context
(
"device_target"
)
if
device_type
==
"GPU"
:
ITERATORS_LIST
.
append
(
self
)
else
:
ITERATORS_LIST
.
append
(
weakref
.
ref
(
self
))
else
:
ITERATORS_LIST
.
append
(
weakref
.
ref
(
self
))
ITERATORS_LIST
.
append
(
weakref
.
ref
(
self
))
# create a copy of tree and work on it.
self
.
dataset
=
copy
.
deepcopy
(
dataset
)
self
.
dataset
=
alter_tree
(
self
.
dataset
)
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录