Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
magicwindyyd
mindspore
提交
fdafc690
M
mindspore
项目概览
magicwindyyd
/
mindspore
与 Fork 源项目一致
Fork自
MindSpore / mindspore
通知
1
Star
1
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
0
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
M
mindspore
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
0
Issue
0
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
提交
fdafc690
编写于
6月 16, 2020
作者:
M
ms_yan
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
repair problem in NumpySlicesDataset
上级
2865436f
变更
3
隐藏空白更改
内联
并排
Showing
3 changed file
with
48 addition
and
70 deletion
+48
-70
mindspore/dataset/engine/datasets.py
mindspore/dataset/engine/datasets.py
+22
-42
mindspore/dataset/engine/validators.py
mindspore/dataset/engine/validators.py
+12
-12
tests/ut/python/dataset/test_dataset_numpy_slices.py
tests/ut/python/dataset/test_dataset_numpy_slices.py
+14
-16
未找到文件。
mindspore/dataset/engine/datasets.py
浏览文件 @
fdafc690
...
@@ -2209,7 +2209,7 @@ class ConcatDataset(DatasetOp):
...
@@ -2209,7 +2209,7 @@ class ConcatDataset(DatasetOp):
Number, number of batches.
Number, number of batches.
"""
"""
children_sizes
=
[
c
.
get_dataset_size
()
for
c
in
self
.
input
]
children_sizes
=
[
c
.
get_dataset_size
()
for
c
in
self
.
input
]
dataset_size
=
np
.
sum
(
children_sizes
)
dataset_size
=
sum
(
children_sizes
)
return
dataset_size
return
dataset_size
...
@@ -2219,8 +2219,8 @@ class RenameDataset(DatasetOp):
...
@@ -2219,8 +2219,8 @@ class RenameDataset(DatasetOp):
Args:
Args:
input_dataset (Dataset): Input Dataset to be Renamed.
input_dataset (Dataset): Input Dataset to be Renamed.
input_column
_name
s (list[str]): list of names of the input columns.
input_columns (list[str]): list of names of the input columns.
output_column
_name
s (list[str]): list of names of the output columns.
output_columns (list[str]): list of names of the output columns.
"""
"""
def
__init__
(
self
,
input_dataset
,
input_columns
,
output_columns
):
def
__init__
(
self
,
input_dataset
,
input_columns
,
output_columns
):
...
@@ -4737,58 +4737,39 @@ class _NumpySlicesDataset:
...
@@ -4737,58 +4737,39 @@ class _NumpySlicesDataset:
def
__init__
(
self
,
data
,
column_list
=
None
):
def
__init__
(
self
,
data
,
column_list
=
None
):
self
.
column_list
=
None
self
.
column_list
=
None
# Convert dict data into tuple
# Convert dict data into tuple
if
isinstance
(
data
,
dict
)
or
isinstance
(
data
[
0
],
dict
)
:
if
isinstance
(
data
,
dict
):
data
=
self
.
process_dict
(
data
)
data
=
self
.
process_dict
(
data
)
if
isinstance
(
data
[
0
],
tuple
)
or
isinstance
(
data
,
tuple
):
if
isinstance
(
data
,
tuple
):
self
.
is_tuple
=
True
self
.
data
=
()
self
.
data
=
data
data_len
=
len
(
data
)
if
isinstance
(
data
[
0
],
tuple
):
for
i
in
range
(
data_len
):
for
i
in
range
(
len
(
self
.
data
)):
self
.
data
=
self
.
data
+
(
np
.
array
(
data
[
i
]),)
self
.
data
[
i
]
=
np
.
array
(
self
.
data
[
i
])
else
:
else
:
self
.
is_tuple
=
False
self
.
data
=
(
np
.
array
(
data
),)
self
.
data
=
np
.
array
(
data
)
# Init column_name
# Init column_name
if
column_list
is
not
None
:
if
column_list
is
not
None
:
self
.
column_list
=
column_list
self
.
column_list
=
column_list
elif
self
.
column_list
is
None
:
elif
self
.
column_list
is
None
:
self
.
column_list
=
[]
self
.
column_list
=
[]
column_num
=
len
(
self
.
data
)
if
self
.
is_tuple
else
1
column_num
=
len
(
self
.
data
)
for
i
in
range
(
column_num
):
for
i
in
range
(
column_num
):
self
.
column_list
.
append
(
"column_"
+
str
(
i
))
self
.
column_list
.
append
(
"column_"
+
str
(
i
))
def
__getitem__
(
self
,
index
):
def
__getitem__
(
self
,
index
):
if
self
.
is_tuple
:
data_row
=
[
d
[
index
,
...]
for
d
in
self
.
data
]
data_row
=
[]
data_res
=
tuple
(
data_row
)
for
i
in
range
(
len
(
self
.
data
)):
data_row
.
append
(
self
.
data
[
i
][
index
,
...])
data_res
=
tuple
(
data_row
)
else
:
data_row
=
self
.
data
[
index
,
...]
data_row
=
[
data_row
]
data_res
=
tuple
(
data_row
)
return
data_res
return
data_res
def
__len__
(
self
):
def
__len__
(
self
):
if
self
.
is_tuple
:
return
len
(
self
.
data
[
0
])
return
len
(
self
.
data
[
0
])
return
len
(
self
.
data
)
def
process_dict
(
self
,
input_data
):
def
process_dict
(
self
,
input_data
):
"""
"""
Convert the dict like data into tuple format, when input is a tuple of dict then compose it into a dict first.
Convert the dict like data into tuple format, when input is a tuple of dict then compose it into a dict first.
"""
"""
# When input is a tuple of dict, composing it
# Convert pandas like dict(has "values" column) into General dict
if
isinstance
(
input_data
,
tuple
)
and
isinstance
(
input_data
[
0
],
dict
):
data_dict
=
{}
for
d
in
input_data
:
data_dict
.
update
(
d
)
input_data
=
data_dict
# convert pandas like dict(has "values" column) into General dict
data_keys
=
list
(
input_data
.
keys
())
data_keys
=
list
(
input_data
.
keys
())
data_col
=
input_data
[
data_keys
[
0
]]
data_col
=
input_data
[
data_keys
[
0
]]
if
hasattr
(
data_col
,
"values"
):
if
hasattr
(
data_col
,
"values"
):
...
@@ -4799,13 +4780,12 @@ class _NumpySlicesDataset:
...
@@ -4799,13 +4780,12 @@ class _NumpySlicesDataset:
input_data
=
new_dict
input_data
=
new_dict
# Convert the data in dict into tuple
# Convert the data in dict into tuple
data
=
[]
data
=
()
self
.
column_list
=
[]
keys
=
list
(
input_data
.
keys
())
keys
=
input_data
.
keys
()
self
.
column_list
=
keys
for
key
in
keys
:
for
key
in
keys
:
self
.
column_list
.
append
(
key
)
value
=
input_data
[
key
]
value
=
input_data
[
key
]
data
.
append
(
tuple
(
value
)
)
data
=
data
+
(
list
(
value
),
)
return
data
return
data
...
@@ -4844,7 +4824,7 @@ class NumpySlicesDataset(GeneratorDataset):
...
@@ -4844,7 +4824,7 @@ class NumpySlicesDataset(GeneratorDataset):
- not allowed
- not allowed
Args:
Args:
data
(list, tuple or dict)
Input of Given data, supported data type includes list, tuple, dict and other numpy
data
(list, tuple or dict)
Input of Given data, supported data type includes list, tuple, dict and other numpy
format. Input data will be sliced in first dimension and generate many rows, large data is not recommend to
format. Input data will be sliced in first dimension and generate many rows, large data is not recommend to
load in this way as data is loading into memory.
load in this way as data is loading into memory.
column_names (list[str], optional): List of column names of the dataset (default=None). If column_names not
column_names (list[str], optional): List of column names of the dataset (default=None). If column_names not
...
@@ -4868,8 +4848,8 @@ class NumpySlicesDataset(GeneratorDataset):
...
@@ -4868,8 +4848,8 @@ class NumpySlicesDataset(GeneratorDataset):
>>> # 2) Input data can be a dict, and column_names will be its key
>>> # 2) Input data can be a dict, and column_names will be its key
>>> data = {"a": [1, 2], "b": [3, 4]}
>>> data = {"a": [1, 2], "b": [3, 4]}
>>> dataset2 = ds.NumpySlicesDataset(data)
>>> dataset2 = ds.NumpySlicesDataset(data)
>>> # 3) Input data can be a tuple
(or list of tuple), and
each tuple element refers to data in each column
>>> # 3) Input data can be a tuple
of lists (or numpy arrays),
each tuple element refers to data in each column
>>> data = (
(1, 2), (3, 4), (5, 6)
)
>>> data = (
[1, 2], [3, 4], [5, 6]
)
>>> dataset3 = ds.NumpySlicesDataset(data, column_names=["column_1", "column_2", "column_3"])
>>> dataset3 = ds.NumpySlicesDataset(data, column_names=["column_1", "column_2", "column_3"])
>>> # 4) Load data from csv file
>>> # 4) Load data from csv file
>>> import pandas as pd
>>> import pandas as pd
...
...
mindspore/dataset/engine/validators.py
浏览文件 @
fdafc690
...
@@ -1484,8 +1484,11 @@ def check_numpyslicesdataset(method):
...
@@ -1484,8 +1484,11 @@ def check_numpyslicesdataset(method):
# check data; required argument
# check data; required argument
data
=
param_dict
.
get
(
'data'
)
data
=
param_dict
.
get
(
'data'
)
if
not
isinstance
(
data
,
(
list
,
tuple
,
dict
,
np
.
ndarray
)):
if
not
isinstance
(
data
,
(
list
,
tuple
,
dict
,
np
.
ndarray
)):
raise
TypeError
(
"Unsupported data type: {}, only support some common python data type,
\
raise
TypeError
(
"Unsupported data type: {}, only support some common python data type, "
like list, tuple, dict, and numpy array."
.
format
(
type
(
data
)))
"like list, tuple, dict, and numpy array."
.
format
(
type
(
data
)))
if
isinstance
(
data
,
tuple
)
and
not
isinstance
(
data
[
0
],
(
list
,
np
.
ndarray
)):
raise
TypeError
(
"Unsupported data type: when input is tuple, only support some common python "
"data type, like tuple of lists and tuple of numpy arrays."
)
if
not
data
:
if
not
data
:
raise
ValueError
(
"Input data is empty."
)
raise
ValueError
(
"Input data is empty."
)
...
@@ -1499,20 +1502,17 @@ def check_numpyslicesdataset(method):
...
@@ -1499,20 +1502,17 @@ def check_numpyslicesdataset(method):
if
isinstance
(
data
,
dict
):
if
isinstance
(
data
,
dict
):
data_column
=
len
(
list
(
data
.
keys
()))
data_column
=
len
(
list
(
data
.
keys
()))
if
column_num
!=
data_column
:
if
column_num
!=
data_column
:
raise
ValueError
(
"Num of column is {0}, but required is {1}."
.
format
(
column_num
,
data_column
))
raise
ValueError
(
"Num of input column names is {0}, but required is {1}."
.
format
(
column_num
,
data_column
))
# Consider input is a tuple of dict
elif
isinstance
(
data
,
tuple
):
elif
isinstance
(
data
[
0
],
dict
):
data_column
=
sum
(
len
(
list
(
data
[
i
].
keys
()))
for
i
in
range
(
len
(
data
)))
if
column_num
!=
data_column
:
raise
ValueError
(
"Num of column is {0}, but required is {1}."
.
format
(
column_num
,
data_column
))
elif
isinstance
(
data
[
0
],
tuple
)
or
isinstance
(
data
,
tuple
):
if
column_num
!=
len
(
data
):
if
column_num
!=
len
(
data
):
raise
ValueError
(
"Num of column is {0}, but required is {1}."
.
format
(
column_num
,
len
(
data
)))
raise
ValueError
(
"Num of input column names is {0}, but required is {1}."
.
format
(
column_num
,
len
(
data
)))
else
:
else
:
if
column_num
!=
1
:
if
column_num
!=
1
:
raise
ValueError
(
"Num of column is {0}, but required is {1} as data is list."
.
format
(
column_num
,
1
))
raise
ValueError
(
"Num of input column names is {0}, but required is {1} as data is list."
.
format
(
column_num
,
1
))
return
method
(
*
args
,
**
kwargs
)
return
method
(
*
args
,
**
kwargs
)
...
...
tests/ut/python/dataset/test_dataset_numpy_slices.py
浏览文件 @
fdafc690
...
@@ -81,34 +81,32 @@ def test_numpy_slices_dict_1():
...
@@ -81,34 +81,32 @@ def test_numpy_slices_dict_1():
assert
data
[
1
]
==
res
[
i
][
1
]
assert
data
[
1
]
==
res
[
i
][
1
]
def
test_numpy_slices_
dict_2
():
def
test_numpy_slices_
tuple_1
():
logger
.
info
(
"Test
input data is a tuple of Dictionary structure data
."
)
logger
.
info
(
"Test
slicing a list of tuple
."
)
data1
,
data2
=
{
"a"
:
[
1
,
2
]},
{
"b"
:
[
3
,
4
]}
np_data
=
[([
1
,
2
],
[
3
,
4
]),
([
11
,
12
],
[
13
,
14
]),
([
21
,
22
],
[
23
,
24
])]
ds
=
de
.
NumpySlicesDataset
((
data1
,
data2
),
column_names
=
[
"col1"
,
"col2"
],
shuffle
=
False
)
ds
=
de
.
NumpySlicesDataset
(
np_data
,
shuffle
=
False
)
res
=
[[
1
,
3
],
[
2
,
4
]]
for
i
,
data
in
enumerate
(
ds
):
for
i
,
data
in
enumerate
(
ds
):
assert
data
[
0
]
==
res
[
i
][
0
]
assert
np
.
equal
(
data
,
np_data
[
i
]).
all
()
assert
data
[
1
]
==
res
[
i
][
1
]
assert
sum
([
1
for
_
in
ds
])
==
3
def
test_numpy_slices_tuple_1
():
logger
.
info
(
"Test slicing a list of tuple."
)
np_data
=
[([
1
,
2
],
[
3
,
4
]),
([
11
,
12
],
[
13
,
14
]),
([
21
,
22
],
[
23
,
24
])]
def
test_numpy_slices_tuple_2
():
res
=
[[[
1
,
2
],
[
11
,
12
],
[
21
,
22
]],
[[
3
,
4
],
[
13
,
14
],
[
23
,
24
]]]
logger
.
info
(
"Test slicing a tuple of list."
)
np_data
=
([
1
,
2
],
[
3
,
4
],
[
5
,
6
])
expected
=
[[
1
,
3
,
5
],
[
2
,
4
,
6
]]
ds
=
de
.
NumpySlicesDataset
(
np_data
,
shuffle
=
False
)
ds
=
de
.
NumpySlicesDataset
(
np_data
,
shuffle
=
False
)
for
i
,
data
in
enumerate
(
ds
):
for
i
,
data
in
enumerate
(
ds
):
assert
np
.
equal
(
data
[
0
],
res
[
i
][
0
]).
all
()
assert
np
.
equal
(
data
,
expected
[
i
]).
all
()
assert
np
.
equal
(
data
[
1
],
res
[
i
][
1
]).
all
()
assert
np
.
equal
(
data
[
2
],
res
[
i
][
2
]).
all
()
assert
sum
([
1
for
_
in
ds
])
==
2
assert
sum
([
1
for
_
in
ds
])
==
2
def
test_numpy_slices_tuple_
2
():
def
test_numpy_slices_tuple_
3
():
logger
.
info
(
"Test reading different dimension of tuple data."
)
logger
.
info
(
"Test reading different dimension of tuple data."
)
features
,
labels
=
np
.
random
.
sample
((
5
,
2
)),
np
.
random
.
sample
((
5
,
1
))
features
,
labels
=
np
.
random
.
sample
((
5
,
2
)),
np
.
random
.
sample
((
5
,
1
))
data
=
(
features
,
labels
)
data
=
(
features
,
labels
)
...
@@ -191,9 +189,9 @@ if __name__ == "__main__":
...
@@ -191,9 +189,9 @@ if __name__ == "__main__":
test_numpy_slices_list_3
()
test_numpy_slices_list_3
()
test_numpy_slices_list_append
()
test_numpy_slices_list_append
()
test_numpy_slices_dict_1
()
test_numpy_slices_dict_1
()
test_numpy_slices_dict_2
()
test_numpy_slices_tuple_1
()
test_numpy_slices_tuple_1
()
test_numpy_slices_tuple_2
()
test_numpy_slices_tuple_2
()
test_numpy_slices_tuple_3
()
test_numpy_slices_csv_value
()
test_numpy_slices_csv_value
()
test_numpy_slices_csv_dict
()
test_numpy_slices_csv_dict
()
test_numpy_slices_num_samplers
()
test_numpy_slices_num_samplers
()
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录