Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
s920243400
PaddleDetection
提交
47d7e276
P
PaddleDetection
项目概览
s920243400
/
PaddleDetection
与 Fork 源项目一致
Fork自
PaddlePaddle / PaddleDetection
通知
2
Star
0
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
0
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
PaddleDetection
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
0
Issue
0
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
体验新版 GitCode,发现更多精彩内容 >>
未验证
提交
47d7e276
编写于
3月 17, 2021
作者:
K
Kaipeng Deng
提交者:
GitHub
3月 17, 2021
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
enable shared memory (#2356)
* enable shared memory
上级
4f6ffb40
变更
6
隐藏空白更改
内联
并排
Showing
6 changed file
with
121 addition
and
19 deletion
+121
-19
dygraph/.gitignore
dygraph/.gitignore
+3
-0
dygraph/configs/ppyolo/_base_/ppyolo_reader.yml
dygraph/configs/ppyolo/_base_/ppyolo_reader.yml
+1
-1
dygraph/configs/ttfnet/_base_/ttfnet_reader.yml
dygraph/configs/ttfnet/_base_/ttfnet_reader.yml
+1
-0
dygraph/configs/yolov3/_base_/yolov3_reader.yml
dygraph/configs/yolov3/_base_/yolov3_reader.yml
+1
-1
dygraph/ppdet/data/reader.py
dygraph/ppdet/data/reader.py
+48
-17
dygraph/ppdet/data/shm_utils.py
dygraph/ppdet/data/shm_utils.py
+67
-0
未找到文件。
dygraph/.gitignore
浏览文件 @
47d7e276
...
...
@@ -74,3 +74,6 @@ dataset/wider_face/WIDER_test
dataset/wider_face/WIDER_train
dataset/wider_face/WIDER_val
dataset/wider_face/wider_face_split
# distribute launch log
log*
dygraph/configs/ppyolo/_base_/ppyolo_reader.yml
浏览文件 @
47d7e276
...
...
@@ -21,7 +21,7 @@ TrainReader:
shuffle
:
true
drop_last
:
true
mixup_epoch
:
25000
use_shared_memory
:
true
EvalReader
:
sample_transforms
:
...
...
dygraph/configs/ttfnet/_base_/ttfnet_reader.yml
浏览文件 @
47d7e276
...
...
@@ -12,6 +12,7 @@ TrainReader:
batch_size
:
12
shuffle
:
true
drop_last
:
true
use_shared_memory
:
true
EvalReader
:
sample_transforms
:
...
...
dygraph/configs/yolov3/_base_/yolov3_reader.yml
浏览文件 @
47d7e276
...
...
@@ -21,7 +21,7 @@ TrainReader:
shuffle
:
true
drop_last
:
true
mixup_epoch
:
250
use_shared_memory
:
true
EvalReader
:
inputs_def
:
...
...
dygraph/ppdet/data/reader.py
浏览文件 @
47d7e276
...
...
@@ -29,6 +29,7 @@ from paddle.io import DistributedBatchSampler
from
ppdet.core.workspace
import
register
,
serializable
,
create
from
.
import
transform
from
.shm_utils
import
_get_shared_memory_size_in_M
from
ppdet.utils.logger
import
setup_logger
logger
=
setup_logger
(
'reader'
)
...
...
@@ -111,8 +112,32 @@ class BatchCompose(Compose):
class
BaseDataLoader
(
object
):
"""
Base DataLoader implementation for detection models
Args:
sample_transforms (list): a list of transforms to perform
on each sample
batch_transforms (list): a list of transforms to perform
on batch
batch_size (int): batch size for batch collating, default 1.
shuffle (bool): whether to shuffle samples
drop_last (bool): whether to drop the last incomplete,
default False
drop_empty (bool): whether to drop samples with no ground
truth labels, default True
num_classes (int): class number of dataset, default 80
use_shared_memory (bool): whether to use shared memory to
accelerate data loading, enable this only if you
are sure that the shared memory size of your OS
is larger than memory cost of input datas of model.
Note that shared memory will be automatically
disabled if the shared memory of OS is less than
1G, which is not enough for detection models.
Default False.
"""
def
__init__
(
self
,
inputs_def
=
None
,
sample_transforms
=
[],
batch_transforms
=
[],
batch_size
=
1
,
...
...
@@ -120,6 +145,7 @@ class BaseDataLoader(object):
drop_last
=
False
,
drop_empty
=
True
,
num_classes
=
80
,
use_shared_memory
=
False
,
**
kwargs
):
# sample transform
self
.
_sample_transforms
=
Compose
(
...
...
@@ -131,14 +157,14 @@ class BaseDataLoader(object):
self
.
batch_size
=
batch_size
self
.
shuffle
=
shuffle
self
.
drop_last
=
drop_last
self
.
use_shared_memory
=
use_shared_memory
self
.
kwargs
=
kwargs
def
__call__
(
self
,
dataset
,
worker_num
,
batch_sampler
=
None
,
return_list
=
False
,
use_prefetch
=
True
):
return_list
=
False
):
self
.
dataset
=
dataset
self
.
dataset
.
parse_dataset
()
# get data
...
...
@@ -155,14 +181,22 @@ class BaseDataLoader(object):
else
:
self
.
_batch_sampler
=
batch_sampler
use_shared_memory
=
self
.
use_shared_memory
# check whether shared memory size is bigger than 1G(1024M)
if
use_shared_memory
:
shm_size
=
_get_shared_memory_size_in_M
()
if
shm_size
is
not
None
and
shm_size
<
1024.
:
logger
.
warn
(
"Shared memory size is less than 1G, "
"disable shared_memory in DataLoader"
)
use_shared_memory
=
False
self
.
dataloader
=
DataLoader
(
dataset
=
self
.
dataset
,
batch_sampler
=
self
.
_batch_sampler
,
collate_fn
=
self
.
_batch_transforms
,
num_workers
=
worker_num
,
return_list
=
return_list
,
use_buffer_reader
=
use_prefetch
,
use_shared_memory
=
False
)
use_shared_memory
=
use_shared_memory
)
self
.
loader
=
iter
(
self
.
dataloader
)
return
self
...
...
@@ -197,7 +231,6 @@ class TrainReader(BaseDataLoader):
__shared__
=
[
'num_classes'
]
def
__init__
(
self
,
inputs_def
=
None
,
sample_transforms
=
[],
batch_transforms
=
[],
batch_size
=
1
,
...
...
@@ -206,9 +239,9 @@ class TrainReader(BaseDataLoader):
drop_empty
=
True
,
num_classes
=
80
,
**
kwargs
):
super
(
TrainReader
,
self
).
__init__
(
inputs_def
,
sample_transforms
,
batch_transforms
,
batch_size
,
shuffle
,
drop_last
,
drop_empty
,
num_classes
,
**
kwargs
)
super
(
TrainReader
,
self
).
__init__
(
sample_transforms
,
batch_transforms
,
batch_size
,
shuffle
,
drop_last
,
drop_empty
,
num_classes
,
**
kwargs
)
@
register
...
...
@@ -216,7 +249,6 @@ class EvalReader(BaseDataLoader):
__shared__
=
[
'num_classes'
]
def
__init__
(
self
,
inputs_def
=
None
,
sample_transforms
=
[],
batch_transforms
=
[],
batch_size
=
1
,
...
...
@@ -225,9 +257,9 @@ class EvalReader(BaseDataLoader):
drop_empty
=
True
,
num_classes
=
80
,
**
kwargs
):
super
(
EvalReader
,
self
).
__init__
(
inputs_def
,
sample_transforms
,
batch_transforms
,
batch_size
,
shuffle
,
drop_last
,
drop_empty
,
num_classes
,
**
kwargs
)
super
(
EvalReader
,
self
).
__init__
(
sample_transforms
,
batch_transforms
,
batch_size
,
shuffle
,
drop_last
,
drop_empty
,
num_classes
,
**
kwargs
)
@
register
...
...
@@ -235,7 +267,6 @@ class TestReader(BaseDataLoader):
__shared__
=
[
'num_classes'
]
def
__init__
(
self
,
inputs_def
=
None
,
sample_transforms
=
[],
batch_transforms
=
[],
batch_size
=
1
,
...
...
@@ -244,6 +275,6 @@ class TestReader(BaseDataLoader):
drop_empty
=
True
,
num_classes
=
80
,
**
kwargs
):
super
(
TestReader
,
self
).
__init__
(
inputs_def
,
sample_transforms
,
batch_transforms
,
batch_size
,
shuffle
,
drop_last
,
drop_empty
,
num_classes
,
**
kwargs
)
super
(
TestReader
,
self
).
__init__
(
sample_transforms
,
batch_transforms
,
batch_size
,
shuffle
,
drop_last
,
drop_empty
,
num_classes
,
**
kwargs
)
dygraph/ppdet/data/shm_utils.py
0 → 100644
浏览文件 @
47d7e276
# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import
os
SIZE_UNIT
=
[
'K'
,
'M'
,
'G'
,
'T'
]
SHM_QUERY_CMD
=
'df -h'
SHM_KEY
=
'shm'
SHM_DEFAULT_MOUNT
=
'/dev/shm'
# [ shared memory size check ]
# In detection models, image/target data occupies a lot of memory, and
# will occupy lots of shared memory in multi-process DataLoader, we use
# following code to get shared memory size and perform a size check to
# disable shared memory use if shared memory size is not enough.
# Shared memory getting process as follows:
# 1. use `df -h` get all mount info
# 2. pick up spaces whose mount info contains 'shm'
# 3. if 'shm' space number is only 1, return its size
# 4. if there are multiple 'shm' space, try to find the default mount
# directory '/dev/shm' is Linux-like system, otherwise return the
# biggest space size.
def
_parse_size_in_M
(
size_str
):
num
,
unit
=
size_str
[:
-
1
],
size_str
[
-
1
]
assert
unit
in
SIZE_UNIT
,
\
"unknown shm size unit {}"
.
format
(
unit
)
return
float
(
num
)
*
\
(
1024
**
(
SIZE_UNIT
.
index
(
unit
)
-
1
))
def
_get_shared_memory_size_in_M
():
try
:
df_infos
=
os
.
popen
(
SHM_QUERY_CMD
).
readlines
()
except
:
return
None
else
:
shm_infos
=
[]
for
df_info
in
df_infos
:
info
=
df_info
.
strip
()
if
info
.
find
(
SHM_KEY
)
>=
0
:
shm_infos
.
append
(
info
.
split
())
if
len
(
shm_infos
)
==
0
:
return
None
elif
len
(
shm_infos
)
==
1
:
return
_parse_size_in_M
(
shm_infos
[
0
][
3
])
else
:
shm_infos
=
[
si
for
si
in
shm_infos
\
if
si
[
-
1
]
==
SHM_DEFAULT_MOUNT
]
if
len
(
shm_infos
)
==
0
:
return
_parse_size_in_M
(
shm_infos
[
0
][
3
])
else
:
return
max
([
_parse_size_in_M
(
si
[
3
])
\
for
si
in
shm_infos
])
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录