Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
Oneflow-Inc
OneFlow-Benchmark
提交
24522fe0
O
OneFlow-Benchmark
项目概览
Oneflow-Inc
/
OneFlow-Benchmark
上一次同步 接近 3 年
通知
1
Star
92
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
0
列表
看板
标记
里程碑
合并请求
0
DevOps
流水线
流水线任务
计划
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
O
OneFlow-Benchmark
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
0
Issue
0
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
DevOps
DevOps
流水线
流水线任务
计划
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
流水线任务
提交
Issue看板
体验新版 GitCode,发现更多精彩内容 >>
提交
24522fe0
编写于
2月 18, 2020
作者:
S
ShawnXuan
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
consistent mode ok
上级
5dda431f
变更
1
隐藏空白更改
内联
并排
Showing
1 changed file
with
22 addition
and
21 deletion
+22
-21
cnn_benchmark/dali_consistent.py
cnn_benchmark/dali_consistent.py
+22
-21
未找到文件。
cnn_benchmark/dali_consistent.py
浏览文件 @
24522fe0
...
...
@@ -119,7 +119,7 @@ class HybridValPipe(Pipeline):
return
[
output
,
self
.
cast
(
self
.
labels
)]
def
feed_ndarray
(
dali_tensor
,
arr
):
def
feed_ndarray
(
dali_tensor
,
arr
,
offset
):
"""
Copy contents of DALI tensor to numpy's NDArray.
...
...
@@ -131,11 +131,11 @@ def feed_ndarray(dali_tensor, arr):
Destination of the copy
"""
# Wait until arr is no longer used by the engine
assert
dali_tensor
.
shape
()
==
list
(
arr
.
shape
),
\
(
"Shapes do not match: DALI tensor has shape {0}"
", but NDArray has shape {1}"
.
format
(
dali_tensor
.
shape
(),
list
(
arr
.
shape
)))
#
assert dali_tensor.shape() == list(arr.shape), \
#
("Shapes do not match: DALI tensor has shape {0}"
#
", but NDArray has shape {1}".format(dali_tensor.shape(), list(arr.shape)))
# Get CTypes void pointer to the underlying memory held by arr
c_type_pointer
=
ctypes
.
c_void_p
(
arr
.
ctypes
.
data
)
c_type_pointer
=
ctypes
.
c_void_p
(
arr
.
ctypes
.
data
+
offset
)
# Copy data from DALI tensor to ptr
dali_tensor
.
copy_to_external
(
c_type_pointer
)
...
...
@@ -208,13 +208,15 @@ class DALIGenericIterator(object):
self
.
_last_batch_padded
=
last_batch_padded
self
.
_auto_reset
=
auto_reset
self
.
_squeeze_labels
=
squeeze_labels
assert
dynamic_shape
==
False
,
"support fixed shape only."
self
.
_dynamic_shape
=
dynamic_shape
# Build all pipelines
for
p
in
self
.
_pipes
:
with
p
.
_check_api_type_scope
(
types
.
PipelineAPIType
.
ITERATOR
):
p
.
build
()
# Use double-buffering of data batches
self
.
_data_batches
=
[[
None
]
for
i
in
range
(
self
.
_num_gpus
)]
#self._data_batches = [[None] for i in range(self._num_gpus)]
self
.
_data_batches
=
[
None
for
i
in
range
(
2
)]
self
.
_counter
=
0
self
.
_current_data_batch
=
0
self
.
output_map
=
output_map
...
...
@@ -242,32 +244,35 @@ class DALIGenericIterator(object):
for
p
in
self
.
_pipes
:
with
p
.
_check_api_type_scope
(
types
.
PipelineAPIType
.
ITERATOR
):
outputs
.
append
(
p
.
share_outputs
())
for
i
in
range
(
self
.
_num_gpus
):
for
gpu_id
in
range
(
self
.
_num_gpus
):
# MXNet wants batches with clear distinction between
# data and label entries, so segregate outputs into
# 2 categories
# Change DALI TensorLists into Tensors
category_tensors
=
dict
()
category_info
=
dict
()
for
j
,
out
in
enumerate
(
outputs
[
i
]):
category_info
=
[]
for
j
,
out
in
enumerate
(
outputs
[
gpu_id
]):
x
=
out
.
as_tensor
()
category_tensors
[
self
.
output_map
[
j
]]
=
x
#.as_tensor()
if
self
.
_squeeze_labels
and
self
.
output_map
[
j
]
==
'label'
:
category_tensors
[
self
.
output_map
[
j
]].
squeeze
()
category_info
[
self
.
output_map
[
j
]]
=
(
x
.
shape
(),
np
.
dtype
(
x
.
dtype
(
)))
category_info
.
append
((
x
.
shape
(),
np
.
dtype
(
x
.
dtype
()
)))
# If we did not yet allocate memory for that batch, do it now
if
self
.
_data_batches
[
i
][
self
.
_current_data_batch
]
is
None
:
if
self
.
_data_batches
[
self
.
_current_data_batch
]
is
None
:
for
category
in
self
.
output_map
:
t
=
category_tensors
[
category
]
assert
type
(
t
)
is
not
TensorGPU
,
"CPU data only"
#TODO
d
=
[]
for
(
shape
,
dtype
)
in
category_info
.
values
():
self
.
category_nbytes
=
[]
for
j
,
(
shape
,
dtype
)
in
enumerate
(
category_info
):
self
.
category_nbytes
.
append
(
np
.
zeros
(
shape
,
dtype
=
dtype
).
nbytes
)
shape
[
0
]
=
self
.
_num_gpus
*
shape
[
0
]
d
.
append
(
np
.
zeros
(
shape
,
dtype
=
dtype
))
self
.
_data_batches
[
i
][
self
.
_current_data_batch
]
=
d
self
.
_data_batches
[
self
.
_current_data_batch
]
=
d
d
=
self
.
_data_batches
[
i
][
self
.
_current_data_batch
]
d
=
self
.
_data_batches
[
self
.
_current_data_batch
]
# Copy data from DALI Tensors to NDArrays
if
self
.
_dynamic_shape
:
for
j
,
(
shape
,
dtype
)
in
enumerate
(
category_info
):
...
...
@@ -275,7 +280,8 @@ class DALIGenericIterator(object):
d
[
j
]
=
np
.
zeros
(
shape
,
dtype
=
dtype
)
for
j
,
d_arr
in
enumerate
(
d
):
feed_ndarray
(
category_tensors
[
self
.
output_map
[
j
]],
d_arr
)
offset
=
gpu_id
*
self
.
category_nbytes
[
j
]
feed_ndarray
(
category_tensors
[
self
.
output_map
[
j
]],
d_arr
,
offset
)
for
p
in
self
.
_pipes
:
with
p
.
_check_api_type_scope
(
types
.
PipelineAPIType
.
ITERATOR
):
...
...
@@ -303,12 +309,7 @@ class DALIGenericIterator(object):
# for db in self._data_batches:
# db[copy_db_index].pad = 0
#_data_batches[gpu_id][_current_data_batch][images, labels]
images
=
[
db
[
copy_db_index
][
0
]
for
db
in
self
.
_data_batches
]
labels
=
[
db
[
copy_db_index
][
1
]
for
db
in
self
.
_data_batches
]
#return images, labels
return
np
.
concatenate
(
images
),
np
.
concatenate
(
labels
)
#return [db[copy_db_index] for db in self._data_batches]
return
self
.
_data_batches
[
copy_db_index
]
def
next
(
self
):
"""
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录