Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
magicwindyyd
mindspore
提交
6d1ea7af
M
mindspore
项目概览
magicwindyyd
/
mindspore
与 Fork 源项目一致
Fork自
MindSpore / mindspore
通知
1
Star
1
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
0
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
M
mindspore
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
0
Issue
0
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
提交
6d1ea7af
编写于
4月 07, 2020
作者:
A
Alexey Shevlyakov
提交者:
高东海
4月 10, 2020
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
remove make_unique.h
上级
71b81c8f
变更
60
隐藏空白更改
内联
并排
Showing
60 changed file
with
241 addition
and
291 deletion
+241
-291
mindspore/ccsrc/dataset/api/de_pipeline.cc
mindspore/ccsrc/dataset/api/de_pipeline.cc
+3
-4
mindspore/ccsrc/dataset/core/global_context.cc
mindspore/ccsrc/dataset/core/global_context.cc
+3
-3
mindspore/ccsrc/dataset/core/tensor.cc
mindspore/ccsrc/dataset/core/tensor.cc
+2
-3
mindspore/ccsrc/dataset/engine/data_buffer.cc
mindspore/ccsrc/dataset/engine/data_buffer.cc
+1
-1
mindspore/ccsrc/dataset/engine/data_schema.cc
mindspore/ccsrc/dataset/engine/data_schema.cc
+4
-4
mindspore/ccsrc/dataset/engine/datasetops/batch_op.cc
mindspore/ccsrc/dataset/engine/datasetops/batch_op.cc
+8
-8
mindspore/ccsrc/dataset/engine/datasetops/dataset_op.cc
mindspore/ccsrc/dataset/engine/datasetops/dataset_op.cc
+5
-5
mindspore/ccsrc/dataset/engine/datasetops/device_queue_op.cc
mindspore/ccsrc/dataset/engine/datasetops/device_queue_op.cc
+1
-1
mindspore/ccsrc/dataset/engine/datasetops/map_op.cc
mindspore/ccsrc/dataset/engine/datasetops/map_op.cc
+1
-1
mindspore/ccsrc/dataset/engine/datasetops/parallel_op.cc
mindspore/ccsrc/dataset/engine/datasetops/parallel_op.cc
+1
-1
mindspore/ccsrc/dataset/engine/datasetops/project_op.cc
mindspore/ccsrc/dataset/engine/datasetops/project_op.cc
+1
-1
mindspore/ccsrc/dataset/engine/datasetops/rename_op.cc
mindspore/ccsrc/dataset/engine/datasetops/rename_op.cc
+2
-2
mindspore/ccsrc/dataset/engine/datasetops/shuffle_op.cc
mindspore/ccsrc/dataset/engine/datasetops/shuffle_op.cc
+6
-6
mindspore/ccsrc/dataset/engine/datasetops/source/celeba_op.cc
...spore/ccsrc/dataset/engine/datasetops/source/celeba_op.cc
+12
-12
mindspore/ccsrc/dataset/engine/datasetops/source/cifar_op.cc
mindspore/ccsrc/dataset/engine/datasetops/source/cifar_op.cc
+12
-12
mindspore/ccsrc/dataset/engine/datasetops/source/generator_op.cc
...re/ccsrc/dataset/engine/datasetops/source/generator_op.cc
+4
-4
mindspore/ccsrc/dataset/engine/datasetops/source/image_folder_op.cc
...ccsrc/dataset/engine/datasetops/source/image_folder_op.cc
+13
-13
mindspore/ccsrc/dataset/engine/datasetops/source/manifest_op.cc
...ore/ccsrc/dataset/engine/datasetops/source/manifest_op.cc
+11
-11
mindspore/ccsrc/dataset/engine/datasetops/source/mindrecord_op.cc
...e/ccsrc/dataset/engine/datasetops/source/mindrecord_op.cc
+22
-21
mindspore/ccsrc/dataset/engine/datasetops/source/mnist_op.cc
mindspore/ccsrc/dataset/engine/datasetops/source/mnist_op.cc
+13
-13
mindspore/ccsrc/dataset/engine/datasetops/source/sampler/distributed_sampler.cc
...t/engine/datasetops/source/sampler/distributed_sampler.cc
+3
-3
mindspore/ccsrc/dataset/engine/datasetops/source/sampler/pk_sampler.cc
...rc/dataset/engine/datasetops/source/sampler/pk_sampler.cc
+3
-3
mindspore/ccsrc/dataset/engine/datasetops/source/sampler/random_sampler.cc
...ataset/engine/datasetops/source/sampler/random_sampler.cc
+4
-4
mindspore/ccsrc/dataset/engine/datasetops/source/sampler/sampler.cc
...ccsrc/dataset/engine/datasetops/source/sampler/sampler.cc
+1
-1
mindspore/ccsrc/dataset/engine/datasetops/source/sampler/sampler.h
.../ccsrc/dataset/engine/datasetops/source/sampler/sampler.h
+0
-1
mindspore/ccsrc/dataset/engine/datasetops/source/sampler/sequential_sampler.cc
...et/engine/datasetops/source/sampler/sequential_sampler.cc
+3
-3
mindspore/ccsrc/dataset/engine/datasetops/source/sampler/subset_random_sampler.cc
...engine/datasetops/source/sampler/subset_random_sampler.cc
+3
-3
mindspore/ccsrc/dataset/engine/datasetops/source/sampler/weighted_random_sampler.cc
...gine/datasetops/source/sampler/weighted_random_sampler.cc
+5
-5
mindspore/ccsrc/dataset/engine/datasetops/source/storage_client.cc
.../ccsrc/dataset/engine/datasetops/source/storage_client.cc
+4
-5
mindspore/ccsrc/dataset/engine/datasetops/source/storage_op.cc
...pore/ccsrc/dataset/engine/datasetops/source/storage_op.cc
+4
-4
mindspore/ccsrc/dataset/engine/datasetops/source/tf_buffer.cc
...spore/ccsrc/dataset/engine/datasetops/source/tf_buffer.cc
+3
-4
mindspore/ccsrc/dataset/engine/datasetops/source/tf_reader_op.cc
...re/ccsrc/dataset/engine/datasetops/source/tf_reader_op.cc
+18
-19
mindspore/ccsrc/dataset/engine/datasetops/source/voc_op.cc
mindspore/ccsrc/dataset/engine/datasetops/source/voc_op.cc
+11
-11
mindspore/ccsrc/dataset/engine/datasetops/zip_op.cc
mindspore/ccsrc/dataset/engine/datasetops/zip_op.cc
+6
-8
mindspore/ccsrc/dataset/engine/db_connector.h
mindspore/ccsrc/dataset/engine/db_connector.h
+1
-1
mindspore/ccsrc/dataset/engine/execution_tree.cc
mindspore/ccsrc/dataset/engine/execution_tree.cc
+1
-1
mindspore/ccsrc/dataset/kernels/image/image_utils.cc
mindspore/ccsrc/dataset/kernels/image/image_utils.cc
+1
-2
mindspore/ccsrc/dataset/kernels/py_func_op.cc
mindspore/ccsrc/dataset/kernels/py_func_op.cc
+0
-1
mindspore/ccsrc/dataset/util/arena.cc
mindspore/ccsrc/dataset/util/arena.cc
+0
-1
mindspore/ccsrc/dataset/util/circular_pool.cc
mindspore/ccsrc/dataset/util/circular_pool.cc
+1
-3
mindspore/ccsrc/dataset/util/de_error.h
mindspore/ccsrc/dataset/util/de_error.h
+7
-0
mindspore/ccsrc/dataset/util/list.h
mindspore/ccsrc/dataset/util/list.h
+1
-2
mindspore/ccsrc/dataset/util/lock.cc
mindspore/ccsrc/dataset/util/lock.cc
+1
-0
mindspore/ccsrc/dataset/util/lock.h
mindspore/ccsrc/dataset/util/lock.h
+0
-1
mindspore/ccsrc/dataset/util/make_unique.h
mindspore/ccsrc/dataset/util/make_unique.h
+0
-37
mindspore/ccsrc/dataset/util/queue.h
mindspore/ccsrc/dataset/util/queue.h
+1
-1
mindspore/ccsrc/dataset/util/task.h
mindspore/ccsrc/dataset/util/task.h
+0
-1
mindspore/ccsrc/device/gpu/blocking_queue.cc
mindspore/ccsrc/device/gpu/blocking_queue.cc
+1
-2
mindspore/ccsrc/kernel/gpu/math/bias_add_gpu_kernel.h
mindspore/ccsrc/kernel/gpu/math/bias_add_gpu_kernel.h
+2
-3
mindspore/ccsrc/kernel/gpu/nn/bias_add_grad_gpu_kenel.h
mindspore/ccsrc/kernel/gpu/nn/bias_add_grad_gpu_kenel.h
+2
-3
mindspore/ccsrc/kernel/gpu/nn/lstm_gpu_kernel.h
mindspore/ccsrc/kernel/gpu/nn/lstm_gpu_kernel.h
+2
-3
mindspore/ccsrc/kernel/gpu/nn/lstm_grad_data_gpu_kernel.h
mindspore/ccsrc/kernel/gpu/nn/lstm_grad_data_gpu_kernel.h
+3
-4
mindspore/ccsrc/kernel/gpu/nn/lstm_grad_weight_gpu_kernel.h
mindspore/ccsrc/kernel/gpu/nn/lstm_grad_weight_gpu_kernel.h
+2
-3
tests/ut/cpp/dataset/celeba_op_test.cc
tests/ut/cpp/dataset/celeba_op_test.cc
+1
-1
tests/ut/cpp/dataset/cifar_op_test.cc
tests/ut/cpp/dataset/cifar_op_test.cc
+1
-1
tests/ut/cpp/dataset/image_folder_op_test.cc
tests/ut/cpp/dataset/image_folder_op_test.cc
+7
-7
tests/ut/cpp/dataset/manifest_op_test.cc
tests/ut/cpp/dataset/manifest_op_test.cc
+1
-1
tests/ut/cpp/dataset/project_op_test.cc
tests/ut/cpp/dataset/project_op_test.cc
+1
-1
tests/ut/cpp/dataset/stand_alone_samplers_test.cc
tests/ut/cpp/dataset/stand_alone_samplers_test.cc
+1
-1
tests/ut/cpp/dataset/tfReader_op_test.cc
tests/ut/cpp/dataset/tfReader_op_test.cc
+10
-10
未找到文件。
mindspore/ccsrc/dataset/api/de_pipeline.cc
浏览文件 @
6d1ea7af
...
...
@@ -23,7 +23,6 @@
#include "dataset/engine/datasetops/source/image_folder_op.h"
#include "dataset/engine/datasetops/source/mnist_op.h"
#include "dataset/engine/datasetops/source/voc_op.h"
#include "dataset/util/make_unique.h"
#include "dataset/core/tensor.h"
#include "dataset/engine/dataset_iterator.h"
#include "dataset/engine/datasetops/source/manifest_op.h"
...
...
@@ -119,7 +118,7 @@ Status DEPipeline::AssignRootNode(const DsOpPtr &dataset_op) { return (tree_->As
Status
DEPipeline
::
LaunchTreeExec
()
{
RETURN_IF_NOT_OK
(
tree_
->
Prepare
());
RETURN_IF_NOT_OK
(
tree_
->
Launch
());
iterator_
=
make_unique
<
DatasetIterator
>
(
tree_
);
iterator_
=
std
::
make_unique
<
DatasetIterator
>
(
tree_
);
if
(
iterator_
==
nullptr
)
RETURN_STATUS_UNEXPECTED
(
"Cannot create an Iterator."
);
return
Status
::
OK
();
}
...
...
@@ -307,7 +306,7 @@ Status DEPipeline::ParseStorageOp(const py::dict &args, std::shared_ptr<DatasetO
if
(
!
args
[
"schema"
].
is_none
())
{
(
void
)
builder
->
SetSchemaFile
(
ToString
(
args
[
"schema"
]));
}
else
if
(
!
args
[
"schema_json_string"
].
is_none
())
{
std
::
unique_ptr
<
DataSchema
>
schema
=
make_unique
<
DataSchema
>
();
std
::
unique_ptr
<
DataSchema
>
schema
=
std
::
make_unique
<
DataSchema
>
();
std
::
string
s
=
ToString
(
args
[
"schema_json_string"
]);
RETURN_IF_NOT_OK
(
schema
->
LoadSchemaString
(
s
,
std
::
vector
<
std
::
string
>
()));
(
void
)
builder
->
SetNumRows
(
schema
->
num_rows
());
...
...
@@ -683,7 +682,7 @@ Status DEPipeline::ParseTFReaderOp(const py::dict &args, std::shared_ptr<Dataset
}
}
if
(
schema_exists
)
{
std
::
unique_ptr
<
DataSchema
>
schema
=
make_unique
<
DataSchema
>
();
std
::
unique_ptr
<
DataSchema
>
schema
=
std
::
make_unique
<
DataSchema
>
();
if
(
args
.
contains
(
"schema_file_path"
))
{
RETURN_IF_NOT_OK
(
schema
->
LoadSchemaFile
(
ToString
(
args
[
"schema_file_path"
]),
columns_to_load
));
}
else
{
...
...
mindspore/ccsrc/dataset/core/global_context.cc
浏览文件 @
6d1ea7af
...
...
@@ -55,9 +55,9 @@ Status GlobalContext::Init() {
// For testing we can use Dummy pool instead
// Create some tensor allocators for the different types and hook them into the pool.
tensor_allocator_
=
mindspore
::
make_unique
<
Allocator
<
Tensor
>>
(
mem_pool_
);
cv_tensor_allocator_
=
mindspore
::
make_unique
<
Allocator
<
CVTensor
>>
(
mem_pool_
);
int_allocator_
=
mindspore
::
make_unique
<
IntAlloc
>
(
mem_pool_
);
tensor_allocator_
=
std
::
make_unique
<
Allocator
<
Tensor
>>
(
mem_pool_
);
cv_tensor_allocator_
=
std
::
make_unique
<
Allocator
<
CVTensor
>>
(
mem_pool_
);
int_allocator_
=
std
::
make_unique
<
IntAlloc
>
(
mem_pool_
);
return
Status
::
OK
();
}
...
...
mindspore/ccsrc/dataset/core/tensor.cc
浏览文件 @
6d1ea7af
...
...
@@ -28,7 +28,6 @@
#include "dataset/core/global_context.h"
#include "dataset/core/pybind_support.h"
#include "dataset/core/tensor_shape.h"
#include "dataset/util/make_unique.h"
namespace
py
=
pybind11
;
namespace
mindspore
{
...
...
@@ -53,7 +52,7 @@ namespace dataset {
Tensor
::
Tensor
(
const
TensorShape
&
shape
,
const
DataType
&
type
)
:
shape_
(
shape
),
type_
(
type
),
data_
(
nullptr
)
{
// grab the mem pool from global context and create the allocator for char data area
std
::
shared_ptr
<
MemoryPool
>
global_pool
=
GlobalContext
::
Instance
()
->
mem_pool
();
data_allocator_
=
mindspore
::
make_unique
<
Allocator
<
unsigned
char
>>
(
global_pool
);
data_allocator_
=
std
::
make_unique
<
Allocator
<
unsigned
char
>>
(
global_pool
);
}
Tensor
::
Tensor
(
const
TensorShape
&
shape
,
const
DataType
&
type
,
const
unsigned
char
*
data
)
:
Tensor
(
shape
,
type
)
{
...
...
@@ -137,7 +136,7 @@ Status Tensor::CreateTensor(std::shared_ptr<Tensor> *ptr, py::array arr) {
if
((
*
ptr
)
->
type_
==
DataType
::
DE_UNKNOWN
)
RETURN_STATUS_UNEXPECTED
(
"Invalid data type."
);
std
::
shared_ptr
<
MemoryPool
>
global_pool
=
GlobalContext
::
Instance
()
->
mem_pool
();
(
*
ptr
)
->
data_allocator_
=
mindspore
::
make_unique
<
Allocator
<
unsigned
char
>>
(
global_pool
);
(
*
ptr
)
->
data_allocator_
=
std
::
make_unique
<
Allocator
<
unsigned
char
>>
(
global_pool
);
static_cast
<
void
>
((
*
ptr
)
->
StartAddr
());
int64_t
byte_size
=
(
*
ptr
)
->
SizeInBytes
();
unsigned
char
*
data
=
static_cast
<
unsigned
char
*>
(
arr
.
request
().
ptr
);
...
...
mindspore/ccsrc/dataset/engine/data_buffer.cc
浏览文件 @
6d1ea7af
...
...
@@ -40,7 +40,7 @@ Status DataBuffer::CreateDataBuffer(
case
DatasetType
::
kTf
:
{
// This type of buffer is for TF record data.
// Allocate derived class version for a TF buffers
new_data_buffer
=
mindspore
::
make_unique
<
TFBuffer
>
(
id
,
kDeBFlagNone
,
storage_client
);
new_data_buffer
=
std
::
make_unique
<
TFBuffer
>
(
id
,
kDeBFlagNone
,
storage_client
);
break
;
}
default:
{
...
...
mindspore/ccsrc/dataset/engine/data_schema.cc
浏览文件 @
6d1ea7af
...
...
@@ -26,8 +26,8 @@
#include "common/utils.h"
#include "dataset/util/status.h"
#include "dataset/core/tensor_shape.h"
#include "dataset/util/make_unique.h"
#include "utils/log_adapter.h"
#include "dataset/util/de_error.h"
namespace
mindspore
{
namespace
dataset
{
...
...
@@ -58,7 +58,7 @@ ColDescriptor::ColDescriptor(const std::string &col_name, DataType col_type, Ten
// our shape. Otherwise, set our shape to be empty.
if
(
in_shape
!=
nullptr
)
{
// Create a shape and copy construct it into our column's shape.
tensor_shape_
=
mindspore
::
make_unique
<
TensorShape
>
(
*
in_shape
);
tensor_shape_
=
std
::
make_unique
<
TensorShape
>
(
*
in_shape
);
}
else
{
tensor_shape_
=
nullptr
;
}
...
...
@@ -75,7 +75,7 @@ ColDescriptor::ColDescriptor(const std::string &col_name, DataType col_type, Ten
ColDescriptor
::
ColDescriptor
(
const
ColDescriptor
&
in_cd
)
:
type_
(
in_cd
.
type_
),
rank_
(
in_cd
.
rank_
),
tensor_impl_
(
in_cd
.
tensor_impl_
),
col_name_
(
in_cd
.
col_name_
)
{
// If it has a tensor shape, make a copy of it with our own unique_ptr.
tensor_shape_
=
in_cd
.
hasShape
()
?
mindspore
::
make_unique
<
TensorShape
>
(
in_cd
.
shape
())
:
nullptr
;
tensor_shape_
=
in_cd
.
hasShape
()
?
std
::
make_unique
<
TensorShape
>
(
in_cd
.
shape
())
:
nullptr
;
}
// Assignment overload
...
...
@@ -86,7 +86,7 @@ ColDescriptor &ColDescriptor::operator=(const ColDescriptor &in_cd) {
tensor_impl_
=
in_cd
.
tensor_impl_
;
col_name_
=
in_cd
.
col_name_
;
// If it has a tensor shape, make a copy of it with our own unique_ptr.
tensor_shape_
=
in_cd
.
hasShape
()
?
mindspore
::
make_unique
<
TensorShape
>
(
in_cd
.
shape
())
:
nullptr
;
tensor_shape_
=
in_cd
.
hasShape
()
?
std
::
make_unique
<
TensorShape
>
(
in_cd
.
shape
())
:
nullptr
;
}
return
*
this
;
}
...
...
mindspore/ccsrc/dataset/engine/datasetops/batch_op.cc
浏览文件 @
6d1ea7af
...
...
@@ -59,8 +59,8 @@ Status BatchOp::operator()() {
TaskManager
::
FindMe
()
->
Post
();
int32_t
epoch_num
=
0
,
batch_num
=
0
,
cnt
=
0
;
TensorRow
new_row
;
std
::
unique_ptr
<
TensorQTable
>
table
=
make_unique
<
TensorQTable
>
();
child_iterator_
=
mindspore
::
make_unique
<
ChildIterator
>
(
this
,
0
,
0
);
std
::
unique_ptr
<
TensorQTable
>
table
=
std
::
make_unique
<
TensorQTable
>
();
child_iterator_
=
std
::
make_unique
<
ChildIterator
>
(
this
,
0
,
0
);
RETURN_IF_NOT_OK
(
child_iterator_
->
FetchNextTensorRow
(
&
new_row
));
column_name_map_
=
child_iterator_
->
col_name_id_map
();
int32_t
cur_batch_size
=
0
;
...
...
@@ -72,7 +72,7 @@ Status BatchOp::operator()() {
if
(
table
->
size
()
==
static_cast
<
size_t
>
(
cur_batch_size
))
{
RETURN_IF_NOT_OK
(
worker_queues_
[
cnt
++
%
num_workers_
]
->
EmplaceBack
(
std
::
make_pair
(
std
::
move
(
table
),
CBatchInfo
(
epoch_num
,
batch_num
++
,
cnt
-
epoch_num
))));
table
=
make_unique
<
TensorQTable
>
();
table
=
std
::
make_unique
<
TensorQTable
>
();
RETURN_IF_NOT_OK
(
GetBatchSize
(
&
cur_batch_size
,
CBatchInfo
(
epoch_num
,
batch_num
,
cnt
-
epoch_num
)));
}
RETURN_IF_NOT_OK
(
child_iterator_
->
FetchNextTensorRow
(
&
new_row
));
...
...
@@ -82,7 +82,7 @@ Status BatchOp::operator()() {
RETURN_IF_NOT_OK
(
worker_queues_
[
cnt
++
%
num_workers_
]
->
EmplaceBack
(
std
::
make_pair
(
std
::
move
(
table
),
CBatchInfo
(
epoch_num
,
batch_num
++
,
cnt
-
epoch_num
))));
}
table
=
make_unique
<
TensorQTable
>
();
// this drops when drop == true
table
=
std
::
make_unique
<
TensorQTable
>
();
// this drops when drop == true
// end of the current epoch, batch_num should start from 0 again
batch_num
=
0
;
epoch_num
++
;
...
...
@@ -153,9 +153,9 @@ Status BatchOp::WorkerEntry(int32_t workerId) {
RETURN_IF_NOT_OK
(
worker_queues_
[
workerId
]
->
PopFront
(
&
table_pair
));
while
(
table_pair
.
second
.
ctrl_
!=
batchCtrl
::
kQuit
)
{
if
(
table_pair
.
second
.
ctrl_
==
batchCtrl
::
kEOE
)
{
RETURN_IF_NOT_OK
(
out_connector_
->
Add
(
workerId
,
make_unique
<
DataBuffer
>
(
0
,
DataBuffer
::
kDeBFlagEOE
)));
RETURN_IF_NOT_OK
(
out_connector_
->
Add
(
workerId
,
std
::
make_unique
<
DataBuffer
>
(
0
,
DataBuffer
::
kDeBFlagEOE
)));
}
else
if
(
table_pair
.
second
.
ctrl_
==
batchCtrl
::
kEOF
)
{
RETURN_IF_NOT_OK
(
out_connector_
->
Add
(
workerId
,
make_unique
<
DataBuffer
>
(
0
,
DataBuffer
::
kDeBFlagEOF
)));
RETURN_IF_NOT_OK
(
out_connector_
->
Add
(
workerId
,
std
::
make_unique
<
DataBuffer
>
(
0
,
DataBuffer
::
kDeBFlagEOF
)));
}
else
if
(
table_pair
.
second
.
ctrl_
==
batchCtrl
::
kNoCtrl
)
{
std
::
unique_ptr
<
DataBuffer
>
db
=
nullptr
;
RETURN_IF_NOT_OK
(
MakeBatchedBuffer
(
std
::
move
(
table_pair
),
&
db
));
...
...
@@ -170,8 +170,8 @@ Status BatchOp::MakeBatchedBuffer(std::pair<std::unique_ptr<TensorQTable>, CBatc
std
::
unique_ptr
<
DataBuffer
>
*
db
)
{
RETURN_UNEXPECTED_IF_NULL
(
table_pair
.
first
);
if
(
!
input_column_names_
.
empty
())
RETURN_IF_NOT_OK
(
MapColumns
(
&
table_pair
));
// pass it through pyfunc
(
*
db
)
=
make_unique
<
DataBuffer
>
(
table_pair
.
second
.
batch_num_
,
DataBuffer
::
kDeBFlagNone
);
std
::
unique_ptr
<
TensorQTable
>
dest_table
=
make_unique
<
TensorQTable
>
();
(
*
db
)
=
std
::
make_unique
<
DataBuffer
>
(
table_pair
.
second
.
batch_num_
,
DataBuffer
::
kDeBFlagNone
);
std
::
unique_ptr
<
TensorQTable
>
dest_table
=
std
::
make_unique
<
TensorQTable
>
();
RETURN_IF_NOT_OK
(
BatchRows
(
&
table_pair
.
first
,
&
dest_table
,
table_pair
.
first
->
size
()));
(
*
db
)
->
set_tensor_table
(
std
::
move
(
dest_table
));
(
*
db
)
->
set_column_name_map
(
column_name_map_
);
...
...
mindspore/ccsrc/dataset/engine/datasetops/dataset_op.cc
浏览文件 @
6d1ea7af
...
...
@@ -80,9 +80,9 @@ void DatasetOp::CreateConnector(int32_t num_producers, int32_t num_consumers) {
MS_LOG
(
INFO
)
<<
"Creating connector in tree operator: "
<<
operator_id_
<<
". Producer: "
<<
num_producers
<<
". Consumer: "
<<
num_consumers
<<
"."
;
if
(
oc_queue_size_
>
0
)
{
out_connector_
=
mindspore
::
make_unique
<
DbConnector
>
(
num_producers
,
// The number of producers
num_consumers
,
// Only one consumer (the training App)
oc_queue_size_
);
out_connector_
=
std
::
make_unique
<
DbConnector
>
(
num_producers
,
// The number of producers
num_consumers
,
// Only one consumer (the training App)
oc_queue_size_
);
}
else
{
// Some op's may choose not to have an output connector
MS_LOG
(
INFO
)
<<
"Bypassed connector creation for tree operator: "
<<
operator_id_
<<
"."
;
...
...
@@ -149,7 +149,7 @@ Status DatasetOp::GetNextInput(std::unique_ptr<DataBuffer> *p_buffer, int32_t wo
// The base class implementation simply flows the eoe message to output. Derived classes
// may override if they need to perform special eoe handling.
Status
DatasetOp
::
EoeReceived
(
int32_t
worker_id
)
{
std
::
unique_ptr
<
DataBuffer
>
eoe_buffer
=
mindspore
::
make_unique
<
DataBuffer
>
(
0
,
DataBuffer
::
kDeBFlagEOE
);
std
::
unique_ptr
<
DataBuffer
>
eoe_buffer
=
std
::
make_unique
<
DataBuffer
>
(
0
,
DataBuffer
::
kDeBFlagEOE
);
return
(
out_connector_
->
Add
(
static_cast
<
int
>
(
worker_id
),
std
::
move
(
eoe_buffer
)));
}
...
...
@@ -157,7 +157,7 @@ Status DatasetOp::EoeReceived(int32_t worker_id) {
// The base class implementation simply flows the eof message to output. Derived classes
// may override if they need to perform special eof handling.
Status
DatasetOp
::
EofReceived
(
int32_t
worker_id
)
{
std
::
unique_ptr
<
DataBuffer
>
eof_buffer
=
mindspore
::
make_unique
<
DataBuffer
>
(
0
,
DataBuffer
::
kDeBFlagEOF
);
std
::
unique_ptr
<
DataBuffer
>
eof_buffer
=
std
::
make_unique
<
DataBuffer
>
(
0
,
DataBuffer
::
kDeBFlagEOF
);
return
(
out_connector_
->
Add
(
static_cast
<
int
>
(
worker_id
),
std
::
move
(
eof_buffer
)));
}
...
...
mindspore/ccsrc/dataset/engine/datasetops/device_queue_op.cc
浏览文件 @
6d1ea7af
...
...
@@ -225,7 +225,7 @@ Status DeviceQueueOp::SendDataToCPU() {
MS_LOG
(
INFO
)
<<
"Device queue, sending data to CPU."
;
int64_t
total_batch
=
0
;
std
::
unique_ptr
<
ChildIterator
>
child_iterator
=
mindspore
::
make_unique
<
ChildIterator
>
(
this
,
0
,
0
);
std
::
unique_ptr
<
ChildIterator
>
child_iterator
=
std
::
make_unique
<
ChildIterator
>
(
this
,
0
,
0
);
while
(
!
(
child_iterator
->
eof_handled
()))
{
TensorRow
curr_row
;
RETURN_IF_NOT_OK
(
child_iterator
->
FetchNextTensorRow
(
&
curr_row
));
...
...
mindspore/ccsrc/dataset/engine/datasetops/map_op.cc
浏览文件 @
6d1ea7af
...
...
@@ -179,7 +179,7 @@ Status MapOp::WorkerEntry(int32_t worker_id) {
RETURN_IF_NOT_OK
(
WorkerEntryInit
(
in_buffer
.
get
(),
&
keep_input_columns
,
&
to_process_indices
,
&
final_col_name_id_map
,
&
input_columns
,
&
output_columns
));
std
::
unique_ptr
<
TensorQTable
>
new_tensor_table
(
mindspore
::
make_unique
<
TensorQTable
>
());
std
::
unique_ptr
<
TensorQTable
>
new_tensor_table
(
std
::
make_unique
<
TensorQTable
>
());
// Perform the compute function of TensorOp(s) and store the result in new_tensor_table.
RETURN_IF_NOT_OK
(
WorkerCompute
(
in_buffer
.
get
(),
to_process_indices
,
new_tensor_table
.
get
(),
keep_input_columns
,
&
input_columns
,
&
output_columns
));
...
...
mindspore/ccsrc/dataset/engine/datasetops/parallel_op.cc
浏览文件 @
6d1ea7af
...
...
@@ -48,7 +48,7 @@ Status ParallelOp::CreateWorkerConnector(int32_t worker_connector_size) {
// Instantiate the worker connector. This is the internal connector, not the operators
// output connector. It has single master consuming from it (num producers is 1), and the number
// of workers is the defined count from the op.
worker_connector_
=
mindspore
::
make_unique
<
DbConnector
>
(
num_workers_
,
num_producers_
,
worker_connector_size
);
worker_connector_
=
std
::
make_unique
<
DbConnector
>
(
num_workers_
,
num_producers_
,
worker_connector_size
);
return
Status
::
OK
();
}
...
...
mindspore/ccsrc/dataset/engine/datasetops/project_op.cc
浏览文件 @
6d1ea7af
...
...
@@ -79,7 +79,7 @@ Status ProjectOp::Project(std::unique_ptr<DataBuffer> *data_buffer) {
new_column_name_mapping
[
current_column
]
=
i
;
projected_column_indices
.
push_back
(
column_name_mapping
[
current_column
]);
}
std
::
unique_ptr
<
TensorQTable
>
new_tensor_table
=
mindspore
::
make_unique
<
TensorQTable
>
();
std
::
unique_ptr
<
TensorQTable
>
new_tensor_table
=
std
::
make_unique
<
TensorQTable
>
();
while
((
*
data_buffer
)
->
NumRows
()
>
0
)
{
TensorRow
current_row
;
RETURN_IF_NOT_OK
((
*
data_buffer
)
->
PopRow
(
&
current_row
));
...
...
mindspore/ccsrc/dataset/engine/datasetops/rename_op.cc
浏览文件 @
6d1ea7af
...
...
@@ -84,13 +84,13 @@ Status RenameOp::operator()() {
// we got eoe, now try again until we get eof
MS_LOG
(
INFO
)
<<
"Rename operator EOE Received."
;
RETURN_IF_NOT_OK
(
out_connector_
->
Add
(
0
,
std
::
move
(
mindspore
::
make_unique
<
DataBuffer
>
(
0
,
DataBuffer
::
kDeBFlagEOE
))));
RETURN_IF_NOT_OK
(
out_connector_
->
Add
(
0
,
std
::
move
(
std
::
make_unique
<
DataBuffer
>
(
0
,
DataBuffer
::
kDeBFlagEOE
))));
MS_LOG
(
DEBUG
)
<<
"Rename operator fetching buffer after EOE."
;
RETURN_IF_NOT_OK
(
GetNextInput
(
&
curr_buffer
));
}
// end of while eof loop
MS_LOG
(
INFO
)
<<
"Rename opeerator EOF Received."
;
RETURN_IF_NOT_OK
(
out_connector_
->
Add
(
0
,
std
::
move
(
mindspore
::
make_unique
<
DataBuffer
>
(
0
,
DataBuffer
::
kDeBFlagEOF
))));
RETURN_IF_NOT_OK
(
out_connector_
->
Add
(
0
,
std
::
move
(
std
::
make_unique
<
DataBuffer
>
(
0
,
DataBuffer
::
kDeBFlagEOF
))));
return
Status
::
OK
();
}
...
...
mindspore/ccsrc/dataset/engine/datasetops/shuffle_op.cc
浏览文件 @
6d1ea7af
...
...
@@ -70,7 +70,7 @@ ShuffleOp::ShuffleOp(int32_t shuffle_size, uint32_t shuffle_seed, int32_t op_con
rng_
(
shuffle_seed
),
buffer_counter_
(
0
),
rows_per_buffer_
(
rows_per_buffer
),
shuffle_buffer_
(
mindspore
::
make_unique
<
TensorTable
>
()),
shuffle_buffer_
(
std
::
make_unique
<
TensorTable
>
()),
shuffle_last_row_idx_
(
0
),
shuffle_buffer_state_
(
kShuffleStateInit
)
{}
...
...
@@ -90,7 +90,7 @@ Status ShuffleOp::SelfReset() {
shuffle_seed_
=
distribution
(
random_device
);
rng_
=
std
::
mt19937_64
(
shuffle_seed_
);
}
shuffle_buffer_
=
mindspore
::
make_unique
<
TensorTable
>
();
shuffle_buffer_
=
std
::
make_unique
<
TensorTable
>
();
buffer_counter_
=
0
;
shuffle_last_row_idx_
=
0
;
shuffle_buffer_state_
=
kShuffleStateInit
;
...
...
@@ -142,7 +142,7 @@ Status ShuffleOp::operator()() {
// Create the child iterator to fetch our data from.
int32_t
worker_id
=
0
;
int32_t
child_idx
=
0
;
child_iterator_
=
mindspore
::
make_unique
<
ChildIterator
>
(
this
,
worker_id
,
child_idx
);
child_iterator_
=
std
::
make_unique
<
ChildIterator
>
(
this
,
worker_id
,
child_idx
);
// Main operator loop
while
(
true
)
{
...
...
@@ -161,7 +161,7 @@ Status ShuffleOp::operator()() {
// Step 1)
// Create an output tensor table if one is not created yet.
if
(
!
new_buffer_table
)
{
new_buffer_table
=
mindspore
::
make_unique
<
TensorQTable
>
();
new_buffer_table
=
std
::
make_unique
<
TensorQTable
>
();
}
// Step 2)
...
...
@@ -176,7 +176,7 @@ Status ShuffleOp::operator()() {
// and send this buffer on it's way up the pipeline. Special case is if this is the
// last row then we also send it.
if
(
new_buffer_table
->
size
()
==
rows_per_buffer_
||
shuffle_last_row_idx_
==
0
)
{
auto
new_buffer
=
mindspore
::
make_unique
<
DataBuffer
>
(
buffer_counter_
,
DataBuffer
::
kDeBFlagNone
);
auto
new_buffer
=
std
::
make_unique
<
DataBuffer
>
(
buffer_counter_
,
DataBuffer
::
kDeBFlagNone
);
new_buffer
->
set_tensor_table
(
std
::
move
(
new_buffer_table
));
new_buffer
->
set_column_name_map
(
column_name_map_
);
buffer_counter_
++
;
...
...
@@ -218,7 +218,7 @@ Status ShuffleOp::operator()() {
// Since we overloaded eoeReceived function, we are responsible to flow the EOE up the
// pipepline manually now that we are done draining the shuffle buffer
MS_LOG
(
INFO
)
<<
"Shuffle operator sending EOE."
;
auto
eoe_buffer
=
mindspore
::
make_unique
<
DataBuffer
>
(
0
,
DataBuffer
::
kDeBFlagEOE
);
auto
eoe_buffer
=
std
::
make_unique
<
DataBuffer
>
(
0
,
DataBuffer
::
kDeBFlagEOE
);
RETURN_IF_NOT_OK
(
out_connector_
->
Add
(
0
,
std
::
move
(
eoe_buffer
)));
// Do not wait for any reset to be flown down from operators above us.
...
...
mindspore/ccsrc/dataset/engine/datasetops/source/celeba_op.cc
浏览文件 @
6d1ea7af
...
...
@@ -40,7 +40,7 @@ Status CelebAOp::Builder::Build(std::shared_ptr<CelebAOp> *op) {
builder_sampler_
=
std
::
make_shared
<
SequentialSampler
>
();
}
builder_schema_
=
make_unique
<
DataSchema
>
();
builder_schema_
=
std
::
make_unique
<
DataSchema
>
();
RETURN_IF_NOT_OK
(
builder_schema_
->
AddColumn
(
ColDescriptor
(
"image"
,
DataType
(
DataType
::
DE_UINT8
),
TensorImpl
::
kFlexible
,
1
)));
// label is like this:0 1 0 0 1......
...
...
@@ -83,7 +83,7 @@ CelebAOp::CelebAOp(int32_t num_workers, int32_t rows_per_buffer, const std::stri
col_name_map_
[
data_schema_
->
column
(
index
).
name
()]
=
index
;
}
attr_info_queue_
=
make_unique
<
Queue
<
std
::
vector
<
std
::
string
>>>
(
queue_size
);
attr_info_queue_
=
std
::
make_unique
<
Queue
<
std
::
vector
<
std
::
string
>>>
(
queue_size
);
io_block_queues_
.
Init
(
num_workers_
,
queue_size
);
}
...
...
@@ -311,7 +311,7 @@ Status CelebAOp::AddIOBlock(std::unique_ptr<DataBuffer> *data_buffer) {
row_count
++
;
if
(
row_count
%
rows_per_buffer_
==
0
)
{
RETURN_IF_NOT_OK
(
io_block_queues_
[
buff_count
++
%
num_workers_
]
->
Add
(
make_unique
<
IOBlock
>
(
IOBlock
(
keys
,
IOBlock
::
kDeIoBlockNone
))));
std
::
make_unique
<
IOBlock
>
(
IOBlock
(
keys
,
IOBlock
::
kDeIoBlockNone
))));
keys
.
clear
();
}
}
...
...
@@ -320,21 +320,21 @@ Status CelebAOp::AddIOBlock(std::unique_ptr<DataBuffer> *data_buffer) {
if
(
!
keys
.
empty
())
{
RETURN_IF_NOT_OK
(
io_block_queues_
[(
buff_count
++
)
%
num_workers_
]
->
Add
(
make_unique
<
IOBlock
>
(
IOBlock
(
keys
,
IOBlock
::
kDeIoBlockNone
))));
std
::
make_unique
<
IOBlock
>
(
IOBlock
(
keys
,
IOBlock
::
kDeIoBlockNone
))));
}
if
(
!
BitTest
(
op_ctrl_flags_
,
kDeOpRepeated
)
||
BitTest
(
op_ctrl_flags_
,
kDeOpLastRepeat
))
{
RETURN_IF_NOT_OK
(
io_block_queues_
[(
buff_count
++
)
%
num_workers_
]
->
Add
(
make_unique
<
IOBlock
>
(
IOBlock
::
kDeIoBlockFlagEoe
)));
io_block_queues_
[(
buff_count
++
)
%
num_workers_
]
->
Add
(
std
::
make_unique
<
IOBlock
>
(
IOBlock
::
kDeIoBlockFlagEoe
)));
RETURN_IF_NOT_OK
(
io_block_queues_
[(
buff_count
++
)
%
num_workers_
]
->
Add
(
make_unique
<
IOBlock
>
(
IOBlock
::
kDeIoBlockFlagEof
)));
io_block_queues_
[(
buff_count
++
)
%
num_workers_
]
->
Add
(
std
::
make_unique
<
IOBlock
>
(
IOBlock
::
kDeIoBlockFlagEof
)));
for
(
int32_t
i
=
0
;
i
<
num_workers_
;
i
++
)
{
RETURN_IF_NOT_OK
(
io_block_queues_
[
i
]
->
Add
(
std
::
m
ove
(
make_unique
<
IOBlock
>
(
std
::
vector
<
int64_t
>
(),
IOBlock
::
kDeIoBlockNone
)
)));
io_block_queues_
[
i
]
->
Add
(
std
::
m
ake_unique
<
IOBlock
>
(
std
::
vector
<
int64_t
>
(),
IOBlock
::
kDeIoBlockNone
)));
}
return
Status
::
OK
();
}
else
{
// not the last repeat. Acquire lock, sleeps master thread, wait for the wake-up from reset
RETURN_IF_NOT_OK
(
io_block_queues_
[(
buff_count
++
)
%
num_workers_
]
->
Add
(
make_unique
<
IOBlock
>
(
IOBlock
::
kDeIoBlockFlagEoe
)));
io_block_queues_
[(
buff_count
++
)
%
num_workers_
]
->
Add
(
std
::
make_unique
<
IOBlock
>
(
IOBlock
::
kDeIoBlockFlagEoe
)));
RETURN_IF_NOT_OK
(
wp_
.
Wait
());
// Master thread goes to sleep after it has made all the IOBlocks
wp_
.
Clear
();
RETURN_IF_NOT_OK
(
sampler_
->
GetNextBuffer
(
data_buffer
));
...
...
@@ -349,17 +349,17 @@ Status CelebAOp::WorkerEntry(int32_t worker_id) {
RETURN_IF_NOT_OK
(
io_block_queues_
[
worker_id
]
->
PopFront
(
&
io_block
));
while
(
io_block
!=
nullptr
)
{
if
(
io_block
->
eoe
()
==
true
)
{
RETURN_IF_NOT_OK
(
out_connector_
->
Add
(
worker_id
,
std
::
m
ove
(
make_unique
<
DataBuffer
>
(
0
,
DataBuffer
::
kDeBFlagEOE
)
)));
RETURN_IF_NOT_OK
(
out_connector_
->
Add
(
worker_id
,
std
::
m
ake_unique
<
DataBuffer
>
(
0
,
DataBuffer
::
kDeBFlagEOE
)));
buffer_id
=
worker_id
;
}
else
if
(
io_block
->
eof
()
==
true
)
{
RETURN_IF_NOT_OK
(
out_connector_
->
Add
(
worker_id
,
std
::
m
ove
(
make_unique
<
DataBuffer
>
(
0
,
DataBuffer
::
kDeBFlagEOF
)
)));
RETURN_IF_NOT_OK
(
out_connector_
->
Add
(
worker_id
,
std
::
m
ake_unique
<
DataBuffer
>
(
0
,
DataBuffer
::
kDeBFlagEOF
)));
}
else
{
std
::
vector
<
int64_t
>
keys
;
RETURN_IF_NOT_OK
(
io_block
->
GetKeys
(
&
keys
));
if
(
keys
.
empty
())
{
return
Status
::
OK
();
// empty key is a quit signal for workers
}
std
::
unique_ptr
<
DataBuffer
>
db
=
make_unique
<
DataBuffer
>
(
buffer_id
,
DataBuffer
::
kDeBFlagNone
);
std
::
unique_ptr
<
DataBuffer
>
db
=
std
::
make_unique
<
DataBuffer
>
(
buffer_id
,
DataBuffer
::
kDeBFlagNone
);
RETURN_IF_NOT_OK
(
LoadBuffer
(
keys
,
&
db
));
RETURN_IF_NOT_OK
(
out_connector_
->
Add
(
worker_id
,
std
::
move
(
db
)));
buffer_id
+=
num_workers_
;
...
...
@@ -370,7 +370,7 @@ Status CelebAOp::WorkerEntry(int32_t worker_id) {
}
Status
CelebAOp
::
LoadBuffer
(
const
std
::
vector
<
int64_t
>
&
keys
,
std
::
unique_ptr
<
DataBuffer
>
*
db
)
{
std
::
unique_ptr
<
TensorQTable
>
deq
=
make_unique
<
TensorQTable
>
();
std
::
unique_ptr
<
TensorQTable
>
deq
=
std
::
make_unique
<
TensorQTable
>
();
for
(
const
auto
&
key
:
keys
)
{
TensorRow
row
;
RETURN_IF_NOT_OK
(
LoadTensorRow
(
image_labels_vec_
[
key
],
&
row
));
...
...
mindspore/ccsrc/dataset/engine/datasetops/source/cifar_op.cc
浏览文件 @
6d1ea7af
...
...
@@ -47,7 +47,7 @@ Status CifarOp::Builder::Build(std::shared_ptr<CifarOp> *ptr) {
if
(
sampler_
==
nullptr
)
{
sampler_
=
std
::
make_shared
<
SequentialSampler
>
();
}
schema_
=
make_unique
<
DataSchema
>
();
schema_
=
std
::
make_unique
<
DataSchema
>
();
TensorShape
scalar
=
TensorShape
::
CreateScalar
();
RETURN_IF_NOT_OK
(
schema_
->
AddColumn
(
ColDescriptor
(
"image"
,
DataType
(
DataType
::
DE_UINT8
),
TensorImpl
::
kFlexible
,
1
)));
if
(
cifar_type_
==
kCifar10
)
{
...
...
@@ -91,7 +91,7 @@ CifarOp::CifarOp(CifarType type, int32_t num_works, int32_t rows_per_buf, const
col_name_map_
[
data_schema_
->
column
(
i
).
name
()]
=
i
;
}
constexpr
uint64_t
kUtilQueueSize
=
512
;
cifar_raw_data_block_
=
make_unique
<
Queue
<
std
::
vector
<
unsigned
char
>>>
(
kUtilQueueSize
);
cifar_raw_data_block_
=
std
::
make_unique
<
Queue
<
std
::
vector
<
unsigned
char
>>>
(
kUtilQueueSize
);
io_block_queues_
.
Init
(
num_workers_
,
queue_size
);
}
...
...
@@ -114,7 +114,7 @@ Status CifarOp::operator()() {
if
(
row_cnt_
>=
num_samples_
)
break
;
// enough row read, break for loop
if
(
row_cnt_
%
rows_per_buffer_
==
0
)
{
RETURN_IF_NOT_OK
(
io_block_queues_
[
buf_cnt_
++
%
num_workers_
]
->
Add
(
make_unique
<
IOBlock
>
(
IOBlock
(
keys
,
IOBlock
::
kDeIoBlockNone
))));
std
::
make_unique
<
IOBlock
>
(
IOBlock
(
keys
,
IOBlock
::
kDeIoBlockNone
))));
keys
.
clear
();
}
}
...
...
@@ -122,21 +122,21 @@ Status CifarOp::operator()() {
}
if
(
keys
.
empty
()
==
false
)
{
RETURN_IF_NOT_OK
(
io_block_queues_
[(
buf_cnt_
++
)
%
num_workers_
]
->
Add
(
make_unique
<
IOBlock
>
(
IOBlock
(
keys
,
IOBlock
::
kDeIoBlockNone
))));
std
::
make_unique
<
IOBlock
>
(
IOBlock
(
keys
,
IOBlock
::
kDeIoBlockNone
))));
}
if
(
!
BitTest
(
op_ctrl_flags_
,
kDeOpRepeated
)
||
BitTest
(
op_ctrl_flags_
,
kDeOpLastRepeat
))
{
RETURN_IF_NOT_OK
(
io_block_queues_
[(
buf_cnt_
++
)
%
num_workers_
]
->
Add
(
make_unique
<
IOBlock
>
(
IOBlock
::
kDeIoBlockFlagEoe
)));
io_block_queues_
[(
buf_cnt_
++
)
%
num_workers_
]
->
Add
(
std
::
make_unique
<
IOBlock
>
(
IOBlock
::
kDeIoBlockFlagEoe
)));
RETURN_IF_NOT_OK
(
io_block_queues_
[(
buf_cnt_
++
)
%
num_workers_
]
->
Add
(
make_unique
<
IOBlock
>
(
IOBlock
::
kDeIoBlockFlagEof
)));
io_block_queues_
[(
buf_cnt_
++
)
%
num_workers_
]
->
Add
(
std
::
make_unique
<
IOBlock
>
(
IOBlock
::
kDeIoBlockFlagEof
)));
for
(
int32_t
i
=
0
;
i
<
num_workers_
;
i
++
)
{
RETURN_IF_NOT_OK
(
io_block_queues_
[
i
]
->
Add
(
make_unique
<
IOBlock
>
(
std
::
vector
<
int64_t
>
(),
IOBlock
::
kDeIoBlockNone
)));
io_block_queues_
[
i
]
->
Add
(
std
::
make_unique
<
IOBlock
>
(
std
::
vector
<
int64_t
>
(),
IOBlock
::
kDeIoBlockNone
)));
}
return
Status
::
OK
();
}
else
{
// not the last repeat. Acquire lock, sleeps master thread, wait for the wake-up from reset
RETURN_IF_NOT_OK
(
io_block_queues_
[(
buf_cnt_
++
)
%
num_workers_
]
->
Add
(
make_unique
<
IOBlock
>
(
IOBlock
::
kDeIoBlockFlagEoe
)));
io_block_queues_
[(
buf_cnt_
++
)
%
num_workers_
]
->
Add
(
std
::
make_unique
<
IOBlock
>
(
IOBlock
::
kDeIoBlockFlagEoe
)));
RETURN_IF_NOT_OK
(
wp_
.
Wait
());
// Master thread goes to sleep after it has made all the IOBlocks
wp_
.
Clear
();
RETURN_IF_NOT_OK
(
sampler_
->
GetNextBuffer
(
&
sampler_buffer
));
...
...
@@ -169,17 +169,17 @@ Status CifarOp::WorkerEntry(int32_t worker_id) {
RETURN_IF_NOT_OK
(
io_block_queues_
[
worker_id
]
->
PopFront
(
&
io_block
));
while
(
io_block
!=
nullptr
)
{
if
(
io_block
->
eoe
()
==
true
)
{
RETURN_IF_NOT_OK
(
out_connector_
->
Add
(
worker_id
,
make_unique
<
DataBuffer
>
(
0
,
DataBuffer
::
kDeBFlagEOE
)));
RETURN_IF_NOT_OK
(
out_connector_
->
Add
(
worker_id
,
std
::
make_unique
<
DataBuffer
>
(
0
,
DataBuffer
::
kDeBFlagEOE
)));
buffer_id
=
worker_id
;
}
else
if
(
io_block
->
eof
()
==
true
)
{
RETURN_IF_NOT_OK
(
out_connector_
->
Add
(
worker_id
,
make_unique
<
DataBuffer
>
(
0
,
DataBuffer
::
kDeBFlagEOF
)));
RETURN_IF_NOT_OK
(
out_connector_
->
Add
(
worker_id
,
std
::
make_unique
<
DataBuffer
>
(
0
,
DataBuffer
::
kDeBFlagEOF
)));
}
else
{
std
::
vector
<
int64_t
>
keys
;
RETURN_IF_NOT_OK
(
io_block
->
GetKeys
(
&
keys
));
if
(
keys
.
empty
()
==
true
)
{
return
Status
::
OK
();
// empty key is a quit signal for workers
}
std
::
unique_ptr
<
DataBuffer
>
db
=
make_unique
<
DataBuffer
>
(
buffer_id
,
DataBuffer
::
kDeBFlagNone
);
std
::
unique_ptr
<
DataBuffer
>
db
=
std
::
make_unique
<
DataBuffer
>
(
buffer_id
,
DataBuffer
::
kDeBFlagNone
);
RETURN_IF_NOT_OK
(
LoadBuffer
(
keys
,
&
db
));
RETURN_IF_NOT_OK
(
out_connector_
->
Add
(
worker_id
,
std
::
move
(
db
)));
buffer_id
+=
num_workers_
;
...
...
@@ -213,7 +213,7 @@ Status CifarOp::LoadTensorRow(uint64_t index, TensorRow *trow) {
// Looping over LoadTensorRow to make 1 DataBuffer. 1 function call produces 1 buffer
Status
CifarOp
::
LoadBuffer
(
const
std
::
vector
<
int64_t
>
&
keys
,
std
::
unique_ptr
<
DataBuffer
>
*
db
)
{
std
::
unique_ptr
<
TensorQTable
>
deq
=
make_unique
<
TensorQTable
>
();
std
::
unique_ptr
<
TensorQTable
>
deq
=
std
::
make_unique
<
TensorQTable
>
();
for
(
const
int64_t
&
key
:
keys
)
{
TensorRow
trow
;
RETURN_IF_NOT_OK
(
LoadTensorRow
(
key
,
&
trow
));
...
...
mindspore/ccsrc/dataset/engine/datasetops/source/generator_op.cc
浏览文件 @
6d1ea7af
...
...
@@ -173,9 +173,9 @@ Status GeneratorOp::operator()() {
bool
eof
=
false
;
while
(
!
eof
)
{
// Create new buffer each iteration
fetched_buffer
=
mindspore
::
make_unique
<
DataBuffer
>
(
buffer_id_
++
,
DataBuffer
::
kDeBFlagNone
);
fetched_buffer
=
std
::
make_unique
<
DataBuffer
>
(
buffer_id_
++
,
DataBuffer
::
kDeBFlagNone
);
fetched_buffer
->
set_column_name_map
(
column_names_map_
);
std
::
unique_ptr
<
TensorQTable
>
fetched_table
=
mindspore
::
make_unique
<
TensorQTable
>
();
std
::
unique_ptr
<
TensorQTable
>
fetched_table
=
std
::
make_unique
<
TensorQTable
>
();
bool
eoe
=
false
;
{
py
::
gil_scoped_acquire
gil_acquire
;
...
...
@@ -201,12 +201,12 @@ Status GeneratorOp::operator()() {
if
(
eoe
)
{
// Push out EOE upon StopIteration exception from generator
MS_LOG
(
INFO
)
<<
"Generator operator sends out EOE."
;
std
::
unique_ptr
<
DataBuffer
>
eoe_buffer
=
mindspore
::
make_unique
<
DataBuffer
>
(
0
,
DataBuffer
::
kDeBFlagEOE
);
std
::
unique_ptr
<
DataBuffer
>
eoe_buffer
=
std
::
make_unique
<
DataBuffer
>
(
0
,
DataBuffer
::
kDeBFlagEOE
);
RETURN_IF_NOT_OK
(
out_connector_
->
Add
(
0
,
std
::
move
(
eoe_buffer
)));
if
(
!
BitTest
(
op_ctrl_flags_
,
kDeOpRepeated
)
||
BitTest
(
op_ctrl_flags_
,
kDeOpLastRepeat
))
{
// If last repeat or not repeated, push out EOF and exit master loop
MS_LOG
(
INFO
)
<<
"Generator operator sends out EOF."
;
std
::
unique_ptr
<
DataBuffer
>
eof_buffer
=
mindspore
::
make_unique
<
DataBuffer
>
(
0
,
DataBuffer
::
kDeBFlagEOF
);
std
::
unique_ptr
<
DataBuffer
>
eof_buffer
=
std
::
make_unique
<
DataBuffer
>
(
0
,
DataBuffer
::
kDeBFlagEOF
);
RETURN_IF_NOT_OK
(
out_connector_
->
Add
(
0
,
std
::
move
(
eof_buffer
)));
MS_LOG
(
INFO
)
<<
"Generator operator main execution loop complete."
;
eof
=
true
;
...
...
mindspore/ccsrc/dataset/engine/datasetops/source/image_folder_op.cc
浏览文件 @
6d1ea7af
...
...
@@ -39,7 +39,7 @@ Status ImageFolderOp::Builder::Build(std::shared_ptr<ImageFolderOp> *ptr) {
if
(
builder_sampler_
==
nullptr
)
{
builder_sampler_
=
std
::
make_shared
<
SequentialSampler
>
();
}
builder_schema_
=
make_unique
<
DataSchema
>
();
builder_schema_
=
std
::
make_unique
<
DataSchema
>
();
TensorShape
scalar
=
TensorShape
::
CreateScalar
();
RETURN_IF_NOT_OK
(
builder_schema_
->
AddColumn
(
ColDescriptor
(
"image"
,
DataType
(
DataType
::
DE_UINT8
),
TensorImpl
::
kFlexible
,
1
)));
...
...
@@ -82,8 +82,8 @@ ImageFolderOp::ImageFolderOp(int32_t num_wkrs, int32_t rows_per_buffer, std::str
for
(
int32_t
i
=
0
;
i
<
data_schema_
->
NumColumns
();
++
i
)
{
col_name_map_
[
data_schema_
->
column
(
i
).
name
()]
=
i
;
}
folder_name_queue_
=
make_unique
<
Queue
<
std
::
string
>>
(
num_wkrs
*
queue_size
);
image_name_queue_
=
make_unique
<
Queue
<
FolderImagesPair
>>
(
num_wkrs
*
queue_size
);
folder_name_queue_
=
std
::
make_unique
<
Queue
<
std
::
string
>>
(
num_wkrs
*
queue_size
);
image_name_queue_
=
std
::
make_unique
<
Queue
<
FolderImagesPair
>>
(
num_wkrs
*
queue_size
);
io_block_queues_
.
Init
(
num_workers_
,
queue_size
);
}
...
...
@@ -143,7 +143,7 @@ Status ImageFolderOp::operator()() {
row_cnt_
++
;
if
(
row_cnt_
%
rows_per_buffer_
==
0
)
{
RETURN_IF_NOT_OK
(
io_block_queues_
[
buf_cnt_
++
%
num_workers_
]
->
Add
(
make_unique
<
IOBlock
>
(
keys
,
IOBlock
::
kDeIoBlockNone
)));
io_block_queues_
[
buf_cnt_
++
%
num_workers_
]
->
Add
(
std
::
make_unique
<
IOBlock
>
(
keys
,
IOBlock
::
kDeIoBlockNone
)));
keys
.
clear
();
}
}
...
...
@@ -151,21 +151,21 @@ Status ImageFolderOp::operator()() {
}
if
(
keys
.
empty
()
==
false
)
{
RETURN_IF_NOT_OK
(
io_block_queues_
[(
buf_cnt_
++
)
%
num_workers_
]
->
Add
(
make_unique
<
IOBlock
>
(
keys
,
IOBlock
::
kDeIoBlockNone
)));
io_block_queues_
[(
buf_cnt_
++
)
%
num_workers_
]
->
Add
(
std
::
make_unique
<
IOBlock
>
(
keys
,
IOBlock
::
kDeIoBlockNone
)));
}
if
(
!
BitTest
(
op_ctrl_flags_
,
kDeOpRepeated
)
||
BitTest
(
op_ctrl_flags_
,
kDeOpLastRepeat
))
{
std
::
unique_ptr
<
IOBlock
>
eoe_block
=
make_unique
<
IOBlock
>
(
IOBlock
::
kDeIoBlockFlagEoe
);
std
::
unique_ptr
<
IOBlock
>
eof_block
=
make_unique
<
IOBlock
>
(
IOBlock
::
kDeIoBlockFlagEof
);
std
::
unique_ptr
<
IOBlock
>
eoe_block
=
std
::
make_unique
<
IOBlock
>
(
IOBlock
::
kDeIoBlockFlagEoe
);
std
::
unique_ptr
<
IOBlock
>
eof_block
=
std
::
make_unique
<
IOBlock
>
(
IOBlock
::
kDeIoBlockFlagEof
);
RETURN_IF_NOT_OK
(
io_block_queues_
[(
buf_cnt_
++
)
%
num_workers_
]
->
Add
(
std
::
move
(
eoe_block
)));
RETURN_IF_NOT_OK
(
io_block_queues_
[(
buf_cnt_
++
)
%
num_workers_
]
->
Add
(
std
::
move
(
eof_block
)));
for
(
int32_t
i
=
0
;
i
<
num_workers_
;
++
i
)
{
RETURN_IF_NOT_OK
(
io_block_queues_
[
i
]
->
Add
(
make_unique
<
IOBlock
>
(
std
::
vector
<
int64_t
>
(),
IOBlock
::
kDeIoBlockNone
)));
io_block_queues_
[
i
]
->
Add
(
std
::
make_unique
<
IOBlock
>
(
std
::
vector
<
int64_t
>
(),
IOBlock
::
kDeIoBlockNone
)));
}
return
Status
::
OK
();
}
else
{
// not the last repeat. Sleep master thread, wait for the wake-up from reset
RETURN_IF_NOT_OK
(
io_block_queues_
[(
buf_cnt_
++
)
%
num_workers_
]
->
Add
(
make_unique
<
IOBlock
>
(
IOBlock
::
kDeIoBlockFlagEoe
)));
io_block_queues_
[(
buf_cnt_
++
)
%
num_workers_
]
->
Add
(
std
::
make_unique
<
IOBlock
>
(
IOBlock
::
kDeIoBlockFlagEoe
)));
RETURN_IF_NOT_OK
(
wp_
.
Wait
());
// Master thread goes to sleep after it has made all the IOBlocks
wp_
.
Clear
();
RETURN_IF_NOT_OK
(
sampler_
->
GetNextBuffer
(
&
sampler_buffer
));
...
...
@@ -182,15 +182,15 @@ Status ImageFolderOp::WorkerEntry(int32_t worker_id) {
RETURN_IF_NOT_OK
(
io_block_queues_
[
worker_id
]
->
PopFront
(
&
io_block
));
while
(
io_block
!=
nullptr
)
{
if
(
io_block
->
eoe
()
==
true
)
{
RETURN_IF_NOT_OK
(
out_connector_
->
Add
(
worker_id
,
make_unique
<
DataBuffer
>
(
0
,
DataBuffer
::
kDeBFlagEOE
)));
RETURN_IF_NOT_OK
(
out_connector_
->
Add
(
worker_id
,
std
::
make_unique
<
DataBuffer
>
(
0
,
DataBuffer
::
kDeBFlagEOE
)));
buffer_id
=
worker_id
;
}
else
if
(
io_block
->
eof
()
==
true
)
{
RETURN_IF_NOT_OK
(
out_connector_
->
Add
(
worker_id
,
make_unique
<
DataBuffer
>
(
0
,
DataBuffer
::
kDeBFlagEOF
)));
RETURN_IF_NOT_OK
(
out_connector_
->
Add
(
worker_id
,
std
::
make_unique
<
DataBuffer
>
(
0
,
DataBuffer
::
kDeBFlagEOF
)));
}
else
{
std
::
vector
<
int64_t
>
keys
;
RETURN_IF_NOT_OK
(
io_block
->
GetKeys
(
&
keys
));
if
(
keys
.
empty
()
==
true
)
return
Status
::
OK
();
// empty key is a quit signal for workers
std
::
unique_ptr
<
DataBuffer
>
db
=
make_unique
<
DataBuffer
>
(
buffer_id
,
DataBuffer
::
kDeBFlagNone
);
std
::
unique_ptr
<
DataBuffer
>
db
=
std
::
make_unique
<
DataBuffer
>
(
buffer_id
,
DataBuffer
::
kDeBFlagNone
);
RETURN_IF_NOT_OK
(
LoadBuffer
(
keys
,
&
db
));
RETURN_IF_NOT_OK
(
out_connector_
->
Add
(
worker_id
,
std
::
move
(
db
)));
buffer_id
+=
num_workers_
;
...
...
@@ -231,7 +231,7 @@ Status ImageFolderOp::LoadTensorRow(ImageLabelPair pairPtr, TensorRow *trow) {
// Looping over LoadTensorRow to make 1 DataBuffer. 1 function call produces 1 buffer
Status
ImageFolderOp
::
LoadBuffer
(
const
std
::
vector
<
int64_t
>
&
keys
,
std
::
unique_ptr
<
DataBuffer
>
*
db
)
{
std
::
unique_ptr
<
TensorQTable
>
deq
=
make_unique
<
TensorQTable
>
();
std
::
unique_ptr
<
TensorQTable
>
deq
=
std
::
make_unique
<
TensorQTable
>
();
TensorRow
trow
;
for
(
const
int64_t
&
key
:
keys
)
{
RETURN_IF_NOT_OK
(
this
->
LoadTensorRow
(
image_label_pairs_
[
key
],
&
trow
));
...
...
mindspore/ccsrc/dataset/engine/datasetops/source/manifest_op.cc
浏览文件 @
6d1ea7af
...
...
@@ -40,7 +40,7 @@ Status ManifestOp::Builder::Build(std::shared_ptr<ManifestOp> *ptr) {
if
(
builder_sampler_
==
nullptr
)
{
builder_sampler_
=
std
::
make_shared
<
SequentialSampler
>
();
}
builder_schema_
=
make_unique
<
DataSchema
>
();
builder_schema_
=
std
::
make_unique
<
DataSchema
>
();
RETURN_IF_NOT_OK
(
builder_schema_
->
AddColumn
(
ColDescriptor
(
"image"
,
DataType
(
DataType
::
DE_UINT8
),
TensorImpl
::
kFlexible
,
1
)));
RETURN_IF_NOT_OK
(
...
...
@@ -105,7 +105,7 @@ Status ManifestOp::AddIoBlock(std::unique_ptr<DataBuffer> *sampler_buffer) {
row_cnt_
++
;
if
(
row_cnt_
%
rows_per_buffer_
==
0
)
{
RETURN_IF_NOT_OK
(
io_block_queues_
[
buf_cnt_
++
%
num_workers_
]
->
Add
(
make_unique
<
IOBlock
>
(
IOBlock
(
keys
,
IOBlock
::
kDeIoBlockNone
))));
std
::
make_unique
<
IOBlock
>
(
IOBlock
(
keys
,
IOBlock
::
kDeIoBlockNone
))));
keys
.
clear
();
}
}
...
...
@@ -113,21 +113,21 @@ Status ManifestOp::AddIoBlock(std::unique_ptr<DataBuffer> *sampler_buffer) {
}
if
(
keys
.
empty
()
==
false
)
{
RETURN_IF_NOT_OK
(
io_block_queues_
[(
buf_cnt_
++
)
%
num_workers_
]
->
Add
(
make_unique
<
IOBlock
>
(
IOBlock
(
keys
,
IOBlock
::
kDeIoBlockNone
))));
std
::
make_unique
<
IOBlock
>
(
IOBlock
(
keys
,
IOBlock
::
kDeIoBlockNone
))));
}
if
(
!
BitTest
(
op_ctrl_flags_
,
kDeOpRepeated
)
||
BitTest
(
op_ctrl_flags_
,
kDeOpLastRepeat
))
{
RETURN_IF_NOT_OK
(
io_block_queues_
[(
buf_cnt_
++
)
%
num_workers_
]
->
Add
(
make_unique
<
IOBlock
>
(
IOBlock
::
kDeIoBlockFlagEoe
)));
io_block_queues_
[(
buf_cnt_
++
)
%
num_workers_
]
->
Add
(
std
::
make_unique
<
IOBlock
>
(
IOBlock
::
kDeIoBlockFlagEoe
)));
RETURN_IF_NOT_OK
(
io_block_queues_
[(
buf_cnt_
++
)
%
num_workers_
]
->
Add
(
make_unique
<
IOBlock
>
(
IOBlock
::
kDeIoBlockFlagEof
)));
io_block_queues_
[(
buf_cnt_
++
)
%
num_workers_
]
->
Add
(
std
::
make_unique
<
IOBlock
>
(
IOBlock
::
kDeIoBlockFlagEof
)));
for
(
int32_t
i
=
0
;
i
<
num_workers_
;
i
++
)
{
RETURN_IF_NOT_OK
(
io_block_queues_
[
i
]
->
Add
(
make_unique
<
IOBlock
>
(
std
::
vector
<
int64_t
>
(),
IOBlock
::
kDeIoBlockNone
)));
io_block_queues_
[
i
]
->
Add
(
std
::
make_unique
<
IOBlock
>
(
std
::
vector
<
int64_t
>
(),
IOBlock
::
kDeIoBlockNone
)));
}
return
Status
::
OK
();
}
else
{
RETURN_IF_NOT_OK
(
io_block_queues_
[(
buf_cnt_
++
)
%
num_workers_
]
->
Add
(
make_unique
<
IOBlock
>
(
IOBlock
::
kDeIoBlockFlagEoe
)));
io_block_queues_
[(
buf_cnt_
++
)
%
num_workers_
]
->
Add
(
std
::
make_unique
<
IOBlock
>
(
IOBlock
::
kDeIoBlockFlagEoe
)));
RETURN_IF_NOT_OK
(
wp_
.
Wait
());
// Master thread goes to sleep after it has made all the IOBlocks
wp_
.
Clear
();
RETURN_IF_NOT_OK
(
sampler_
->
GetNextBuffer
(
sampler_buffer
));
...
...
@@ -160,17 +160,17 @@ Status ManifestOp::WorkerEntry(int32_t worker_id) {
RETURN_IF_NOT_OK
(
io_block_queues_
[
worker_id
]
->
PopFront
(
&
io_block
));
while
(
io_block
!=
nullptr
)
{
if
(
io_block
->
eoe
()
==
true
)
{
RETURN_IF_NOT_OK
(
out_connector_
->
Add
(
worker_id
,
make_unique
<
DataBuffer
>
(
0
,
DataBuffer
::
kDeBFlagEOE
)));
RETURN_IF_NOT_OK
(
out_connector_
->
Add
(
worker_id
,
std
::
make_unique
<
DataBuffer
>
(
0
,
DataBuffer
::
kDeBFlagEOE
)));
buffer_id
=
worker_id
;
}
else
if
(
io_block
->
eof
()
==
true
)
{
RETURN_IF_NOT_OK
(
out_connector_
->
Add
(
worker_id
,
make_unique
<
DataBuffer
>
(
0
,
DataBuffer
::
kDeBFlagEOF
)));
RETURN_IF_NOT_OK
(
out_connector_
->
Add
(
worker_id
,
std
::
make_unique
<
DataBuffer
>
(
0
,
DataBuffer
::
kDeBFlagEOF
)));
}
else
{
std
::
vector
<
int64_t
>
keys
;
RETURN_IF_NOT_OK
(
io_block
->
GetKeys
(
&
keys
));
if
(
keys
.
empty
())
{
return
Status
::
OK
();
// empty key is a quit signal for workers
}
std
::
unique_ptr
<
DataBuffer
>
db
=
make_unique
<
DataBuffer
>
(
buffer_id
,
DataBuffer
::
kDeBFlagNone
);
std
::
unique_ptr
<
DataBuffer
>
db
=
std
::
make_unique
<
DataBuffer
>
(
buffer_id
,
DataBuffer
::
kDeBFlagNone
);
RETURN_IF_NOT_OK
(
LoadBuffer
(
keys
,
&
db
));
RETURN_IF_NOT_OK
(
out_connector_
->
Add
(
worker_id
,
std
::
move
(
db
)));
buffer_id
+=
num_workers_
;
...
...
@@ -227,7 +227,7 @@ Status ManifestOp::LoadTensorRow(const std::pair<std::string, std::vector<std::s
// Looping over LoadTensorRow to make 1 DataBuffer. 1 function call produces 1 buffer
Status
ManifestOp
::
LoadBuffer
(
const
std
::
vector
<
int64_t
>
&
keys
,
std
::
unique_ptr
<
DataBuffer
>
*
db
)
{
std
::
unique_ptr
<
TensorQTable
>
deq
=
make_unique
<
TensorQTable
>
();
std
::
unique_ptr
<
TensorQTable
>
deq
=
std
::
make_unique
<
TensorQTable
>
();
for
(
const
auto
&
key
:
keys
)
{
TensorRow
trow
;
RETURN_IF_NOT_OK
(
LoadTensorRow
(
image_labelname_
[
static_cast
<
size_t
>
(
key
)],
&
trow
));
...
...
mindspore/ccsrc/dataset/engine/datasetops/source/mindrecord_op.cc
浏览文件 @
6d1ea7af
...
...
@@ -28,7 +28,6 @@
#include "dataset/engine/datasetops/dataset_op.h"
#include "dataset/engine/db_connector.h"
#include "dataset/engine/execution_tree.h"
#include "dataset/util/make_unique.h"
#include "utils/log_adapter.h"
namespace
mindspore
{
...
...
@@ -94,19 +93,19 @@ MindRecordOp::MindRecordOp(int32_t num_mind_record_workers, int32_t rows_per_buf
io_blk_queues_
.
Init
(
num_workers_
,
op_connector_queue_size
);
if
(
!
block_reader_
)
return
;
for
(
int32_t
i
=
0
;
i
<
num_workers_
;
++
i
)
{
block_buffer_
.
emplace_back
(
make_unique
<
std
::
vector
<
ShardTuple
>>
(
std
::
vector
<
ShardTuple
>
{}));
block_buffer_
.
emplace_back
(
std
::
make_unique
<
std
::
vector
<
ShardTuple
>>
(
std
::
vector
<
ShardTuple
>
{}));
}
}
// Private helper method to encapsulate some common construction/reset tasks
Status
MindRecordOp
::
Init
()
{
shard_reader_
=
mindspore
::
make_unique
<
ShardReader
>
();
shard_reader_
=
std
::
make_unique
<
ShardReader
>
();
auto
rc
=
shard_reader_
->
Open
(
dataset_file_
,
num_mind_record_workers_
,
columns_to_load_
,
operators_
,
block_reader_
);
CHECK_FAIL_RETURN_UNEXPECTED
(
rc
!=
MSRStatus
::
FAILED
,
"MindRecordOp init failed. Error message: "
+
ErrnoToMessage
(
rc
));
data_schema_
=
mindspore
::
make_unique
<
DataSchema
>
();
data_schema_
=
std
::
make_unique
<
DataSchema
>
();
std
::
vector
<
std
::
shared_ptr
<
Schema
>>
schema_vec
=
shard_reader_
->
get_shard_header
()
->
get_schemas
();
// check whether schema exists, if so use the first one
...
...
@@ -143,7 +142,7 @@ Status MindRecordOp::Init() {
}
if
(
!
load_all_cols
)
{
std
::
unique_ptr
<
DataSchema
>
tmp_schema
=
make_unique
<
DataSchema
>
();
std
::
unique_ptr
<
DataSchema
>
tmp_schema
=
std
::
make_unique
<
DataSchema
>
();
for
(
std
::
string
colname
:
columns_to_load_
)
{
CHECK_FAIL_RETURN_UNEXPECTED
(
colname_to_ind
.
find
(
colname
)
!=
colname_to_ind
.
end
(),
colname
+
": doesn't exist"
);
RETURN_IF_NOT_OK
(
tmp_schema
->
AddColumn
(
data_schema_
->
column
(
colname_to_ind
[
colname
])));
...
...
@@ -297,7 +296,7 @@ Status MindRecordOp::LoadFloat(TensorShape *new_shape, std::unique_ptr<T[]> *arr
RETURN_IF_NOT_OK
(
GetFloat
(
&
value
,
columns_json
[
column_name
],
use_double
));
*
new_shape
=
TensorShape
::
CreateScalar
();
*
array_data
=
mindspore
::
make_unique
<
T
[]
>
(
1
);
*
array_data
=
std
::
make_unique
<
T
[]
>
(
1
);
(
*
array_data
)[
0
]
=
value
;
}
else
{
if
(
column
.
hasShape
())
{
...
...
@@ -308,7 +307,7 @@ Status MindRecordOp::LoadFloat(TensorShape *new_shape, std::unique_ptr<T[]> *arr
}
int
idx
=
0
;
*
array_data
=
mindspore
::
make_unique
<
T
[]
>
(
new_shape
->
NumOfElements
());
*
array_data
=
std
::
make_unique
<
T
[]
>
(
new_shape
->
NumOfElements
());
for
(
auto
&
element
:
columns_json
[
column_name
])
{
T
value
=
0
;
RETURN_IF_NOT_OK
(
GetFloat
(
&
value
,
element
,
use_double
));
...
...
@@ -349,7 +348,7 @@ Status MindRecordOp::LoadInt(TensorShape *new_shape, std::unique_ptr<T[]> *array
RETURN_IF_NOT_OK
(
GetInt
(
&
value
,
columns_json
[
column_name
]));
*
new_shape
=
TensorShape
::
CreateScalar
();
*
array_data
=
mindspore
::
make_unique
<
T
[]
>
(
1
);
*
array_data
=
std
::
make_unique
<
T
[]
>
(
1
);
(
*
array_data
)[
0
]
=
value
;
}
else
{
if
(
column
.
hasShape
())
{
...
...
@@ -360,7 +359,7 @@ Status MindRecordOp::LoadInt(TensorShape *new_shape, std::unique_ptr<T[]> *array
}
int
idx
=
0
;
*
array_data
=
mindspore
::
make_unique
<
T
[]
>
(
new_shape
->
NumOfElements
());
*
array_data
=
std
::
make_unique
<
T
[]
>
(
new_shape
->
NumOfElements
());
for
(
auto
&
element
:
columns_json
[
column_name
])
{
T
value
=
0
;
RETURN_IF_NOT_OK
(
GetInt
(
&
value
,
element
));
...
...
@@ -430,12 +429,14 @@ Status MindRecordOp::WorkerEntry(int32_t worker_id) {
RETURN_IF_NOT_OK
(
io_blk_queues_
[
worker_id
]
->
PopFront
(
&
io_block
));
while
(
io_block
!=
nullptr
)
{
if
(
io_block
->
eoe
()
==
true
)
{
RETURN_IF_NOT_OK
(
out_connector_
->
Add
(
worker_id
,
std
::
move
(
make_unique
<
DataBuffer
>
(
0
,
DataBuffer
::
kDeBFlagEOE
))));
RETURN_IF_NOT_OK
(
out_connector_
->
Add
(
worker_id
,
std
::
move
(
std
::
make_unique
<
DataBuffer
>
(
0
,
DataBuffer
::
kDeBFlagEOE
))));
RETURN_IF_NOT_OK
(
io_blk_queues_
[
worker_id
]
->
PopFront
(
&
io_block
));
continue
;
}
if
(
io_block
->
eof
()
==
true
)
{
RETURN_IF_NOT_OK
(
out_connector_
->
Add
(
worker_id
,
std
::
move
(
make_unique
<
DataBuffer
>
(
0
,
DataBuffer
::
kDeBFlagEOF
))));
RETURN_IF_NOT_OK
(
out_connector_
->
Add
(
worker_id
,
std
::
move
(
std
::
make_unique
<
DataBuffer
>
(
0
,
DataBuffer
::
kDeBFlagEOF
))));
RETURN_IF_NOT_OK
(
io_blk_queues_
[
worker_id
]
->
PopFront
(
&
io_block
));
continue
;
}
...
...
@@ -485,9 +486,9 @@ Status MindRecordOp::WorkerEntry(int32_t worker_id) {
Status
MindRecordOp
::
GetBufferFromReader
(
std
::
unique_ptr
<
DataBuffer
>
*
fetched_buffer
,
int64_t
buffer_id
,
int32_t
worker_id
)
{
*
fetched_buffer
=
mindspore
::
make_unique
<
DataBuffer
>
(
buffer_id
,
DataBuffer
::
kDeBFlagNone
);
*
fetched_buffer
=
std
::
make_unique
<
DataBuffer
>
(
buffer_id
,
DataBuffer
::
kDeBFlagNone
);
(
*
fetched_buffer
)
->
set_column_name_map
(
column_name_mapping_
);
std
::
unique_ptr
<
TensorQTable
>
tensor_table
=
mindspore
::
make_unique
<
TensorQTable
>
();
std
::
unique_ptr
<
TensorQTable
>
tensor_table
=
std
::
make_unique
<
TensorQTable
>
();
for
(
int32_t
i
=
0
;
i
<
rows_per_buffer_
;
++
i
)
{
ShardTuple
tupled_buffer
;
if
(
block_reader_
)
{
...
...
@@ -596,22 +597,22 @@ Status MindRecordOp::operator()() {
for
(
int32_t
i
=
0
;
i
<
buffers_needed_
;
++
i
)
{
if
(
block_reader_
)
RETURN_IF_NOT_OK
(
FetchBlockBuffer
(
i
));
std
::
vector
<
int64_t
>
keys
(
1
,
i
);
RETURN_IF_NOT_OK
(
io_blk_queues_
[
buf_cnt_
++
%
num_workers_
]
->
Add
(
make_unique
<
IOBlock
>
(
IOBlock
(
keys
,
IOBlock
::
kDeIoBlockNone
))));
RETURN_IF_NOT_OK
(
io_blk_queues_
[
buf_cnt_
++
%
num_workers_
]
->
Add
(
std
::
make_unique
<
IOBlock
>
(
IOBlock
(
keys
,
IOBlock
::
kDeIoBlockNone
))));
}
if
(
!
BitTest
(
op_ctrl_flags_
,
kDeOpRepeated
)
||
BitTest
(
op_ctrl_flags_
,
kDeOpLastRepeat
))
{
RETURN_IF_NOT_OK
(
io_blk_queues_
[(
buf_cnt_
++
)
%
num_workers_
]
->
Add
(
make_unique
<
IOBlock
>
(
IOBlock
::
kDeIoBlockFlagEoe
)));
io_blk_queues_
[(
buf_cnt_
++
)
%
num_workers_
]
->
Add
(
std
::
make_unique
<
IOBlock
>
(
IOBlock
::
kDeIoBlockFlagEoe
)));
RETURN_IF_NOT_OK
(
io_blk_queues_
[(
buf_cnt_
++
)
%
num_workers_
]
->
Add
(
make_unique
<
IOBlock
>
(
IOBlock
::
kDeIoBlockFlagEof
)));
io_blk_queues_
[(
buf_cnt_
++
)
%
num_workers_
]
->
Add
(
std
::
make_unique
<
IOBlock
>
(
IOBlock
::
kDeIoBlockFlagEof
)));
for
(
int32_t
i
=
0
;
i
<
num_workers_
;
i
++
)
{
RETURN_IF_NOT_OK
(
io_blk_queues_
[
i
]
->
Add
(
std
::
move
(
make_unique
<
IOBlock
>
(
std
::
vector
<
int64_t
>
(),
IOBlock
::
kDeIoBlockNone
))));
RETURN_IF_NOT_OK
(
io_blk_queues_
[
i
]
->
Add
(
std
::
move
(
std
::
make_unique
<
IOBlock
>
(
std
::
vector
<
int64_t
>
(),
IOBlock
::
kDeIoBlockNone
))));
}
return
Status
::
OK
();
}
else
{
// not the last repeat. Acquire lock, sleeps master thread, wait for the wake-up from reset
RETURN_IF_NOT_OK
(
io_blk_queues_
[(
buf_cnt_
++
)
%
num_workers_
]
->
Add
(
make_unique
<
IOBlock
>
(
IOBlock
::
kDeIoBlockFlagEoe
)));
io_blk_queues_
[(
buf_cnt_
++
)
%
num_workers_
]
->
Add
(
std
::
make_unique
<
IOBlock
>
(
IOBlock
::
kDeIoBlockFlagEoe
)));
// reset our buffer count and go to loop again.
RETURN_IF_NOT_OK
(
shard_reader_wait_post_
.
Wait
());
...
...
@@ -655,7 +656,7 @@ Status MindRecordOp::LaunchThreadAndInitOp() {
}
Status
MindRecordOp
::
CountTotalRows
(
const
std
::
string
dataset_path
,
int64_t
*
count
)
{
std
::
unique_ptr
<
ShardReader
>
shard_reader
=
mindspore
::
make_unique
<
ShardReader
>
();
std
::
unique_ptr
<
ShardReader
>
shard_reader
=
std
::
make_unique
<
ShardReader
>
();
MSRStatus
rc
=
shard_reader
->
CountTotalRows
(
dataset_path
,
count
);
if
(
rc
==
MSRStatus
::
FAILED
)
{
RETURN_STATUS_UNEXPECTED
(
"MindRecordOp count total rows failed."
);
...
...
mindspore/ccsrc/dataset/engine/datasetops/source/mnist_op.cc
浏览文件 @
6d1ea7af
...
...
@@ -43,7 +43,7 @@ Status MnistOp::Builder::Build(std::shared_ptr<MnistOp> *ptr) {
if
(
builder_sampler_
==
nullptr
)
{
builder_sampler_
=
std
::
make_shared
<
SequentialSampler
>
();
}
builder_schema_
=
make_unique
<
DataSchema
>
();
builder_schema_
=
std
::
make_unique
<
DataSchema
>
();
RETURN_IF_NOT_OK
(
builder_schema_
->
AddColumn
(
ColDescriptor
(
"image"
,
DataType
(
DataType
::
DE_UINT8
),
TensorImpl
::
kCv
,
1
)));
TensorShape
scalar
=
TensorShape
::
CreateScalar
();
...
...
@@ -89,7 +89,7 @@ Status MnistOp::TraversalSampleIds(const std::shared_ptr<Tensor> &sample_ids, st
row_cnt_
++
;
if
(
row_cnt_
%
rows_per_buffer_
==
0
)
{
RETURN_IF_NOT_OK
(
io_block_queues_
[
buf_cnt_
++
%
num_workers_
]
->
Add
(
make_unique
<
IOBlock
>
(
IOBlock
(
*
keys
,
IOBlock
::
kDeIoBlockNone
))));
std
::
make_unique
<
IOBlock
>
(
IOBlock
(
*
keys
,
IOBlock
::
kDeIoBlockNone
))));
keys
->
clear
();
}
}
...
...
@@ -115,21 +115,21 @@ Status MnistOp::operator()() {
}
if
(
keys
.
empty
()
==
false
)
{
RETURN_IF_NOT_OK
(
io_block_queues_
[(
buf_cnt_
++
)
%
num_workers_
]
->
Add
(
make_unique
<
IOBlock
>
(
IOBlock
(
keys
,
IOBlock
::
kDeIoBlockNone
))));
std
::
make_unique
<
IOBlock
>
(
IOBlock
(
keys
,
IOBlock
::
kDeIoBlockNone
))));
}
if
(
!
BitTest
(
op_ctrl_flags_
,
kDeOpRepeated
)
||
BitTest
(
op_ctrl_flags_
,
kDeOpLastRepeat
))
{
RETURN_IF_NOT_OK
(
io_block_queues_
[(
buf_cnt_
++
)
%
num_workers_
]
->
Add
(
make_unique
<
IOBlock
>
(
IOBlock
::
kDeIoBlockFlagEoe
)));
io_block_queues_
[(
buf_cnt_
++
)
%
num_workers_
]
->
Add
(
std
::
make_unique
<
IOBlock
>
(
IOBlock
::
kDeIoBlockFlagEoe
)));
RETURN_IF_NOT_OK
(
io_block_queues_
[(
buf_cnt_
++
)
%
num_workers_
]
->
Add
(
make_unique
<
IOBlock
>
(
IOBlock
::
kDeIoBlockFlagEof
)));
io_block_queues_
[(
buf_cnt_
++
)
%
num_workers_
]
->
Add
(
std
::
make_unique
<
IOBlock
>
(
IOBlock
::
kDeIoBlockFlagEof
)));
for
(
int32_t
i
=
0
;
i
<
num_workers_
;
++
i
)
{
RETURN_IF_NOT_OK
(
io_block_queues_
[
i
]
->
Add
(
make_unique
<
IOBlock
>
(
std
::
vector
<
int64_t
>
(),
IOBlock
::
kDeIoBlockNone
)));
io_block_queues_
[
i
]
->
Add
(
std
::
make_unique
<
IOBlock
>
(
std
::
vector
<
int64_t
>
(),
IOBlock
::
kDeIoBlockNone
)));
}
return
Status
::
OK
();
}
else
{
RETURN_IF_NOT_OK
(
io_block_queues_
[(
buf_cnt_
++
)
%
num_workers_
]
->
Add
(
make_unique
<
IOBlock
>
(
IOBlock
::
kDeIoBlockFlagEoe
)));
io_block_queues_
[(
buf_cnt_
++
)
%
num_workers_
]
->
Add
(
std
::
make_unique
<
IOBlock
>
(
IOBlock
::
kDeIoBlockFlagEoe
)));
RETURN_IF_NOT_OK
(
wp_
.
Wait
());
// Master thread goes to sleep after it has made all the IOBlocks
wp_
.
Clear
();
RETURN_IF_NOT_OK
(
sampler_
->
GetNextBuffer
(
&
sampler_buffer
));
...
...
@@ -145,15 +145,15 @@ Status MnistOp::WorkerEntry(int32_t worker_id) {
RETURN_IF_NOT_OK
(
io_block_queues_
[
worker_id
]
->
PopFront
(
&
iOBlock
));
while
(
iOBlock
!=
nullptr
)
{
if
(
iOBlock
->
eoe
()
==
true
)
{
RETURN_IF_NOT_OK
(
out_connector_
->
Add
(
worker_id
,
make_unique
<
DataBuffer
>
(
0
,
DataBuffer
::
kDeBFlagEOE
)));
RETURN_IF_NOT_OK
(
out_connector_
->
Add
(
worker_id
,
std
::
make_unique
<
DataBuffer
>
(
0
,
DataBuffer
::
kDeBFlagEOE
)));
buffer_id
=
worker_id
;
}
else
if
(
iOBlock
->
eof
()
==
true
)
{
RETURN_IF_NOT_OK
(
out_connector_
->
Add
(
worker_id
,
make_unique
<
DataBuffer
>
(
0
,
DataBuffer
::
kDeBFlagEOF
)));
RETURN_IF_NOT_OK
(
out_connector_
->
Add
(
worker_id
,
std
::
make_unique
<
DataBuffer
>
(
0
,
DataBuffer
::
kDeBFlagEOF
)));
}
else
{
std
::
vector
<
int64_t
>
keys
;
RETURN_IF_NOT_OK
(
iOBlock
->
GetKeys
(
&
keys
));
if
(
keys
.
empty
()
==
true
)
return
Status
::
OK
();
// empty key is a quit signal for workers
std
::
unique_ptr
<
DataBuffer
>
db
=
make_unique
<
DataBuffer
>
(
buffer_id
,
DataBuffer
::
kDeBFlagNone
);
std
::
unique_ptr
<
DataBuffer
>
db
=
std
::
make_unique
<
DataBuffer
>
(
buffer_id
,
DataBuffer
::
kDeBFlagNone
);
RETURN_IF_NOT_OK
(
LoadBuffer
(
keys
,
&
db
));
RETURN_IF_NOT_OK
(
out_connector_
->
Add
(
worker_id
,
std
::
move
(
db
)));
buffer_id
+=
num_workers_
;
...
...
@@ -178,7 +178,7 @@ Status MnistOp::LoadTensorRow(const MnistLabelPair &mnist_pair, TensorRow *trow)
// Looping over LoadTensorRow to make 1 DataBuffer. 1 function call produces 1 buffer
Status
MnistOp
::
LoadBuffer
(
const
std
::
vector
<
int64_t
>
&
keys
,
std
::
unique_ptr
<
DataBuffer
>
*
db
)
{
std
::
unique_ptr
<
TensorQTable
>
deq
=
make_unique
<
TensorQTable
>
();
std
::
unique_ptr
<
TensorQTable
>
deq
=
std
::
make_unique
<
TensorQTable
>
();
TensorRow
trow
;
for
(
const
int64_t
&
key
:
keys
)
{
RETURN_IF_NOT_OK
(
this
->
LoadTensorRow
(
image_label_pairs_
[
key
],
&
trow
));
...
...
@@ -309,8 +309,8 @@ Status MnistOp::ReadImageAndLabel(std::ifstream *image_reader, std::ifstream *la
CHECK_FAIL_RETURN_UNEXPECTED
((
num_images
==
num_labels
),
"num_images != num_labels"
);
// The image size of the Mnist dataset is fixed at [28,28]
int64_t
size
=
kMnistImageRows
*
kMnistImageCols
;
auto
images_buf
=
mindspore
::
make_unique
<
char
[]
>
(
size
*
num_images
);
auto
labels_buf
=
mindspore
::
make_unique
<
char
[]
>
(
num_images
);
auto
images_buf
=
std
::
make_unique
<
char
[]
>
(
size
*
num_images
);
auto
labels_buf
=
std
::
make_unique
<
char
[]
>
(
num_images
);
if
(
images_buf
==
nullptr
||
labels_buf
==
nullptr
)
{
std
::
string
err_msg
=
"Fail to allocate memory for MNIST Buffer."
;
MS_LOG
(
ERROR
)
<<
err_msg
.
c_str
();
...
...
mindspore/ccsrc/dataset/engine/datasetops/source/sampler/distributed_sampler.cc
浏览文件 @
6d1ea7af
...
...
@@ -52,9 +52,9 @@ Status DistributedSampler::GetNextBuffer(std::unique_ptr<DataBuffer> *out_buffer
if
(
cnt_
>
samples_per_buffer_
)
{
RETURN_STATUS_UNEXPECTED
(
"Distributed Sampler Error"
);
}
else
if
(
cnt_
==
samples_per_buffer_
)
{
(
*
out_buffer
)
=
mindspore
::
make_unique
<
DataBuffer
>
(
0
,
DataBuffer
::
kDeBFlagEOE
);
(
*
out_buffer
)
=
std
::
make_unique
<
DataBuffer
>
(
0
,
DataBuffer
::
kDeBFlagEOE
);
}
else
{
(
*
out_buffer
)
=
mindspore
::
make_unique
<
DataBuffer
>
(
cnt_
,
DataBuffer
::
kDeBFlagNone
);
(
*
out_buffer
)
=
std
::
make_unique
<
DataBuffer
>
(
cnt_
,
DataBuffer
::
kDeBFlagNone
);
std
::
shared_ptr
<
Tensor
>
sample_ids
;
RETURN_IF_NOT_OK
(
CreateSamplerTensor
(
&
sample_ids
,
samples_per_buffer_
));
int64_t
*
id_ptr
=
reinterpret_cast
<
int64_t
*>
(
sample_ids
->
StartAddr
());
...
...
@@ -63,7 +63,7 @@ Status DistributedSampler::GetNextBuffer(std::unique_ptr<DataBuffer> *out_buffer
*
(
id_ptr
++
)
=
shuffle_
?
shuffle_vec_
[
static_cast
<
size_t
>
(
next_id
)]
:
next_id
;
}
TensorRow
row
(
1
,
sample_ids
);
(
*
out_buffer
)
->
set_tensor_table
(
make_unique
<
TensorQTable
>
(
1
,
row
));
(
*
out_buffer
)
->
set_tensor_table
(
std
::
make_unique
<
TensorQTable
>
(
1
,
row
));
}
return
Status
::
OK
();
}
...
...
mindspore/ccsrc/dataset/engine/datasetops/source/sampler/pk_sampler.cc
浏览文件 @
6d1ea7af
...
...
@@ -53,9 +53,9 @@ Status PKSampler::GetNextBuffer(std::unique_ptr<DataBuffer> *out_buffer) {
if
(
next_id_
>
num_pk_samples_
||
num_pk_samples_
==
0
)
{
RETURN_STATUS_UNEXPECTED
(
"Index out of bound in PKSampler"
);
}
else
if
(
next_id_
==
num_pk_samples_
)
{
(
*
out_buffer
)
=
mindspore
::
make_unique
<
DataBuffer
>
(
0
,
DataBuffer
::
kDeBFlagEOE
);
(
*
out_buffer
)
=
std
::
make_unique
<
DataBuffer
>
(
0
,
DataBuffer
::
kDeBFlagEOE
);
}
else
{
(
*
out_buffer
)
=
mindspore
::
make_unique
<
DataBuffer
>
(
next_id_
,
DataBuffer
::
kDeBFlagNone
);
(
*
out_buffer
)
=
std
::
make_unique
<
DataBuffer
>
(
next_id_
,
DataBuffer
::
kDeBFlagNone
);
std
::
shared_ptr
<
Tensor
>
sample_ids
;
int64_t
last_id
=
(
samples_per_buffer_
+
next_id_
>
num_pk_samples_
)
?
num_pk_samples_
:
samples_per_buffer_
+
next_id_
;
...
...
@@ -68,7 +68,7 @@ Status PKSampler::GetNextBuffer(std::unique_ptr<DataBuffer> *out_buffer) {
*
(
id_ptr
++
)
=
samples
[
rnd_ind
];
}
TensorRow
row
(
1
,
sample_ids
);
(
*
out_buffer
)
->
set_tensor_table
(
make_unique
<
TensorQTable
>
(
1
,
row
));
(
*
out_buffer
)
->
set_tensor_table
(
std
::
make_unique
<
TensorQTable
>
(
1
,
row
));
}
return
Status
::
OK
();
}
...
...
mindspore/ccsrc/dataset/engine/datasetops/source/sampler/random_sampler.cc
浏览文件 @
6d1ea7af
...
...
@@ -32,9 +32,9 @@ Status RandomSampler::GetNextBuffer(std::unique_ptr<DataBuffer> *out_buffer) {
if
(
next_id_
>
num_samples_
)
{
RETURN_STATUS_UNEXPECTED
(
"RandomSampler Internal Error"
);
}
else
if
(
next_id_
==
num_samples_
)
{
(
*
out_buffer
)
=
make_unique
<
DataBuffer
>
(
0
,
DataBuffer
::
kDeBFlagEOE
);
(
*
out_buffer
)
=
std
::
make_unique
<
DataBuffer
>
(
0
,
DataBuffer
::
kDeBFlagEOE
);
}
else
{
(
*
out_buffer
)
=
make_unique
<
DataBuffer
>
(
next_id_
,
DataBuffer
::
kDeBFlagNone
);
(
*
out_buffer
)
=
std
::
make_unique
<
DataBuffer
>
(
next_id_
,
DataBuffer
::
kDeBFlagNone
);
std
::
shared_ptr
<
Tensor
>
sampleIds
;
int64_t
last_id
=
samples_per_buffer_
+
next_id_
>
num_samples_
?
num_samples_
:
samples_per_buffer_
+
next_id_
;
RETURN_IF_NOT_OK
(
CreateSamplerTensor
(
&
sampleIds
,
last_id
-
next_id_
));
...
...
@@ -44,7 +44,7 @@ Status RandomSampler::GetNextBuffer(std::unique_ptr<DataBuffer> *out_buffer) {
}
next_id_
=
last_id
;
TensorRow
row
(
1
,
sampleIds
);
(
*
out_buffer
)
->
set_tensor_table
(
make_unique
<
TensorQTable
>
(
1
,
row
));
(
*
out_buffer
)
->
set_tensor_table
(
std
::
make_unique
<
TensorQTable
>
(
1
,
row
));
}
return
Status
::
OK
();
}
...
...
@@ -61,7 +61,7 @@ Status RandomSampler::Init(const RandomAccessOp *op) {
}
std
::
shuffle
(
shuffled_ids_
.
begin
(),
shuffled_ids_
.
end
(),
rnd_
);
}
else
{
dist
=
make_unique
<
std
::
uniform_int_distribution
<
int64_t
>>
(
0
,
num_rows_
-
1
);
dist
=
std
::
make_unique
<
std
::
uniform_int_distribution
<
int64_t
>>
(
0
,
num_rows_
-
1
);
}
rnd_
.
seed
(
seed_
++
);
return
Status
::
OK
();
...
...
mindspore/ccsrc/dataset/engine/datasetops/source/sampler/sampler.cc
浏览文件 @
6d1ea7af
...
...
@@ -35,7 +35,7 @@ Status Sampler::CreateSamplerTensor(std::shared_ptr<Tensor> *sample_ids, int64_t
}
if
(
col_desc_
==
nullptr
)
{
// a ColDescriptor for Tensor that holds SampleIds
col_desc_
=
make_unique
<
ColDescriptor
>
(
"sampleIds"
,
DataType
(
DataType
::
DE_INT64
),
TensorImpl
::
kFlexible
,
1
);
col_desc_
=
std
::
make_unique
<
ColDescriptor
>
(
"sampleIds"
,
DataType
(
DataType
::
DE_INT64
),
TensorImpl
::
kFlexible
,
1
);
}
TensorShape
shape
(
std
::
vector
<
dsize_t
>
(
1
,
num_elements
));
RETURN_IF_NOT_OK
(
Tensor
::
CreateTensor
(
sample_ids
,
col_desc_
->
tensorImpl
(),
shape
,
col_desc_
->
type
()));
...
...
mindspore/ccsrc/dataset/engine/datasetops/source/sampler/sampler.h
浏览文件 @
6d1ea7af
...
...
@@ -27,7 +27,6 @@
#include "dataset/engine/data_buffer.h"
#include "dataset/engine/data_schema.h"
#include "dataset/engine/datasetops/dataset_op.h"
#include "dataset/util/make_unique.h"
namespace
mindspore
{
namespace
dataset
{
...
...
mindspore/ccsrc/dataset/engine/datasetops/source/sampler/sequential_sampler.cc
浏览文件 @
6d1ea7af
...
...
@@ -25,9 +25,9 @@ Status SequentialSampler::GetNextBuffer(std::unique_ptr<DataBuffer> *out_buffer)
if
(
next_id_
>
num_samples_
)
{
RETURN_STATUS_UNEXPECTED
(
"Sequential Sampler Internal Error"
);
}
else
if
(
next_id_
==
num_samples_
)
{
(
*
out_buffer
)
=
make_unique
<
DataBuffer
>
(
0
,
DataBuffer
::
kDeBFlagEOE
);
(
*
out_buffer
)
=
std
::
make_unique
<
DataBuffer
>
(
0
,
DataBuffer
::
kDeBFlagEOE
);
}
else
{
(
*
out_buffer
)
=
make_unique
<
DataBuffer
>
(
next_id_
,
DataBuffer
::
kDeBFlagNone
);
(
*
out_buffer
)
=
std
::
make_unique
<
DataBuffer
>
(
next_id_
,
DataBuffer
::
kDeBFlagNone
);
std
::
shared_ptr
<
Tensor
>
sampleIds
;
int64_t
lastId
=
(
samples_per_buffer_
+
next_id_
>
num_samples_
)
?
num_samples_
:
samples_per_buffer_
+
next_id_
;
RETURN_IF_NOT_OK
(
CreateSamplerTensor
(
&
sampleIds
,
lastId
-
next_id_
));
...
...
@@ -36,7 +36,7 @@ Status SequentialSampler::GetNextBuffer(std::unique_ptr<DataBuffer> *out_buffer)
*
(
idPtr
++
)
=
next_id_
++
;
}
TensorRow
row
(
1
,
sampleIds
);
(
*
out_buffer
)
->
set_tensor_table
(
make_unique
<
TensorQTable
>
(
1
,
row
));
(
*
out_buffer
)
->
set_tensor_table
(
std
::
make_unique
<
TensorQTable
>
(
1
,
row
));
}
return
Status
::
OK
();
}
...
...
mindspore/ccsrc/dataset/engine/datasetops/source/sampler/subset_random_sampler.cc
浏览文件 @
6d1ea7af
...
...
@@ -64,9 +64,9 @@ Status SubsetRandomSampler::Reset() {
Status
SubsetRandomSampler
::
GetNextBuffer
(
std
::
unique_ptr
<
DataBuffer
>
*
out_buffer
)
{
// All samples have been drawn
if
(
sample_id_
==
indices_
.
size
())
{
(
*
out_buffer
)
=
make_unique
<
DataBuffer
>
(
buffer_id_
++
,
DataBuffer
::
kDeBFlagEOE
);
(
*
out_buffer
)
=
std
::
make_unique
<
DataBuffer
>
(
buffer_id_
++
,
DataBuffer
::
kDeBFlagEOE
);
}
else
{
(
*
out_buffer
)
=
make_unique
<
DataBuffer
>
(
buffer_id_
++
,
DataBuffer
::
kDeBFlagNone
);
(
*
out_buffer
)
=
std
::
make_unique
<
DataBuffer
>
(
buffer_id_
++
,
DataBuffer
::
kDeBFlagNone
);
std
::
shared_ptr
<
Tensor
>
outputIds
;
int64_t
last_id
=
sample_id_
+
samples_per_buffer_
;
...
...
@@ -92,7 +92,7 @@ Status SubsetRandomSampler::GetNextBuffer(std::unique_ptr<DataBuffer> *out_buffe
}
// Create a TensorTable from that single tensor and push into DataBuffer
(
*
out_buffer
)
->
set_tensor_table
(
make_unique
<
TensorQTable
>
(
1
,
TensorRow
(
1
,
outputIds
)));
(
*
out_buffer
)
->
set_tensor_table
(
std
::
make_unique
<
TensorQTable
>
(
1
,
TensorRow
(
1
,
outputIds
)));
}
return
Status
::
OK
();
...
...
mindspore/ccsrc/dataset/engine/datasetops/source/sampler/weighted_random_sampler.cc
浏览文件 @
6d1ea7af
...
...
@@ -46,10 +46,10 @@ Status WeightedRandomSampler::Init(const RandomAccessOp *op) {
CHECK_FAIL_RETURN_UNEXPECTED
(
num_samples_
>
0
&&
samples_per_buffer_
>
0
,
"Fail to init WeightedRandomSampler"
);
if
(
!
replacement_
)
{
exp_dist_
=
mindspore
::
make_unique
<
std
::
exponential_distribution
<>>
(
1
);
exp_dist_
=
std
::
make_unique
<
std
::
exponential_distribution
<>>
(
1
);
InitOnePassSampling
();
}
else
{
discrete_dist_
=
mindspore
::
make_unique
<
std
::
discrete_distribution
<
int64_t
>>
(
weights_
.
begin
(),
weights_
.
end
());
discrete_dist_
=
std
::
make_unique
<
std
::
discrete_distribution
<
int64_t
>>
(
weights_
.
begin
(),
weights_
.
end
());
}
return
Status
::
OK
();
...
...
@@ -96,9 +96,9 @@ Status WeightedRandomSampler::GetNextBuffer(std::unique_ptr<DataBuffer> *out_buf
}
if
(
sample_id_
==
num_samples_
)
{
(
*
out_buffer
)
=
make_unique
<
DataBuffer
>
(
buffer_id_
++
,
DataBuffer
::
kDeBFlagEOE
);
(
*
out_buffer
)
=
std
::
make_unique
<
DataBuffer
>
(
buffer_id_
++
,
DataBuffer
::
kDeBFlagEOE
);
}
else
{
(
*
out_buffer
)
=
make_unique
<
DataBuffer
>
(
buffer_id_
++
,
DataBuffer
::
kDeBFlagNone
);
(
*
out_buffer
)
=
std
::
make_unique
<
DataBuffer
>
(
buffer_id_
++
,
DataBuffer
::
kDeBFlagNone
);
std
::
shared_ptr
<
Tensor
>
outputIds
;
int64_t
last_id
=
sample_id_
+
samples_per_buffer_
;
...
...
@@ -132,7 +132,7 @@ Status WeightedRandomSampler::GetNextBuffer(std::unique_ptr<DataBuffer> *out_buf
}
// Create a TensorTable from that single tensor and push into DataBuffer
(
*
out_buffer
)
->
set_tensor_table
(
make_unique
<
TensorQTable
>
(
1
,
TensorRow
(
1
,
outputIds
)));
(
*
out_buffer
)
->
set_tensor_table
(
std
::
make_unique
<
TensorQTable
>
(
1
,
TensorRow
(
1
,
outputIds
)));
}
return
Status
::
OK
();
...
...
mindspore/ccsrc/dataset/engine/datasetops/source/storage_client.cc
浏览文件 @
6d1ea7af
...
...
@@ -24,7 +24,6 @@
#include "dataset/engine/datasetops/source/storage_client.h"
#include "dataset/engine/datasetops/source/storage_op.h"
#include "dataset/engine/datasetops/source/tf_client.h"
#include "dataset/util/make_unique.h"
#include "dataset/util/status.h"
namespace
mindspore
{
...
...
@@ -57,7 +56,7 @@ static Status CreateStorageClientSwitch(
case
DatasetType
::
kTf
:
{
// Construct the derived class TFClient, stored as base class StorageClient
store_op
->
set_rows_per_buffer
(
32
);
*
out_client
=
mindspore
::
make_unique
<
TFClient
>
(
std
::
move
(
schema
),
store_op
);
*
out_client
=
std
::
make_unique
<
TFClient
>
(
std
::
move
(
schema
),
store_op
);
break
;
}
case
DatasetType
::
kUnknown
:
...
...
@@ -83,7 +82,7 @@ Status StorageClient::CreateStorageClient(
std
::
shared_ptr
<
StorageClient
>
*
out_client
)
{
// Out: the created storage client
// Make a new schema first. This only assigns the dataset type. It does not
// create the columns yet.
auto
new_schema
=
mindspore
::
make_unique
<
DataSchema
>
();
auto
new_schema
=
std
::
make_unique
<
DataSchema
>
();
RETURN_IF_NOT_OK
(
new_schema
->
LoadDatasetType
(
dataset_schema_path
));
RETURN_IF_NOT_OK
(
CreateStorageClientSwitch
(
std
::
move
(
new_schema
),
store_op
,
out_client
));
return
Status
::
OK
();
...
...
@@ -99,7 +98,7 @@ Status StorageClient::CreateStorageClient(
std
::
shared_ptr
<
StorageClient
>
*
out_client
)
{
// Out: the created storage client
// The dataset type is passed in by the user. Create an empty schema with only
// only the dataset type filled in and then create the client with it.
auto
new_schema
=
mindspore
::
make_unique
<
DataSchema
>
();
auto
new_schema
=
std
::
make_unique
<
DataSchema
>
();
new_schema
->
set_dataset_type
(
in_type
);
RETURN_IF_NOT_OK
(
CreateStorageClientSwitch
(
std
::
move
(
new_schema
),
store_op
,
out_client
));
return
Status
::
OK
();
...
...
@@ -147,7 +146,7 @@ Status StorageClient::AssignDatasetLayout(uint32_t num_rows, // In: Th
// The current schema was just an empty one with only the dataset field populated.
// Let's copy construct a new one that will be a copy of the input schema (releasing the old
// one) and then set the number of rows that the user requested.
data_schema_
=
mindspore
::
make_unique
<
DataSchema
>
(
schema
);
data_schema_
=
std
::
make_unique
<
DataSchema
>
(
schema
);
CHECK_FAIL_RETURN_UNEXPECTED
(
num_rows
<=
MAX_INTEGER_INT32
,
"numRows exceeds the boundary numRows>2147483647"
);
num_rows_in_dataset_
=
num_rows
;
...
...
mindspore/ccsrc/dataset/engine/datasetops/source/storage_op.cc
浏览文件 @
6d1ea7af
...
...
@@ -303,7 +303,7 @@ Status StorageOp::init() {
// For simplicity, we'll make both of them 3 so they are the same size.
int32_t
action_queue_size
=
(
buffers_needed
/
num_workers_
)
+
1
;
for
(
int32_t
i
=
0
;
i
<
num_workers_
;
++
i
)
{
auto
new_queue
=
mindspore
::
make_unique
<
Queue
<
int32_t
>>
(
action_queue_size
);
auto
new_queue
=
std
::
make_unique
<
Queue
<
int32_t
>>
(
action_queue_size
);
action_queue_
.
push_back
(
std
::
move
(
new_queue
));
}
}
...
...
@@ -483,10 +483,10 @@ Status StorageOp::operator()() {
// Post the control message to tell the workers to stop waiting on action queue
// because we are done!
RETURN_IF_NOT_OK
(
this
->
PostEndOfData
());
std
::
unique_ptr
<
DataBuffer
>
eoeBuffer
=
mindspore
::
make_unique
<
DataBuffer
>
(
0
,
DataBuffer
::
kDeBFlagEOE
);
std
::
unique_ptr
<
DataBuffer
>
eoeBuffer
=
std
::
make_unique
<
DataBuffer
>
(
0
,
DataBuffer
::
kDeBFlagEOE
);
RETURN_IF_NOT_OK
(
out_connector_
->
Add
(
0
,
std
::
move
(
eoeBuffer
)));
MS_LOG
(
INFO
)
<<
"StorageOp master: Flow end-of-data eof message."
;
std
::
unique_ptr
<
DataBuffer
>
eofBuffer
=
mindspore
::
make_unique
<
DataBuffer
>
(
0
,
DataBuffer
::
kDeBFlagEOF
);
std
::
unique_ptr
<
DataBuffer
>
eofBuffer
=
std
::
make_unique
<
DataBuffer
>
(
0
,
DataBuffer
::
kDeBFlagEOF
);
RETURN_IF_NOT_OK
(
out_connector_
->
Add
(
0
,
std
::
move
(
eofBuffer
)));
MS_LOG
(
INFO
)
<<
"StorageOp master: Main execution loop complete."
;
done
=
true
;
// while loop exit
...
...
@@ -496,7 +496,7 @@ Status StorageOp::operator()() {
// RepeatOp above us somewhere in the tree will re-init us with the data to fetch again
// once it gets the end-of-epoch message.
MS_LOG
(
INFO
)
<<
"StorageOp master: Flow end-of-epoch eoe message."
;
std
::
unique_ptr
<
DataBuffer
>
eoe_buffer
=
mindspore
::
make_unique
<
DataBuffer
>
(
0
,
DataBuffer
::
kDeBFlagEOE
);
std
::
unique_ptr
<
DataBuffer
>
eoe_buffer
=
std
::
make_unique
<
DataBuffer
>
(
0
,
DataBuffer
::
kDeBFlagEOE
);
RETURN_IF_NOT_OK
(
out_connector_
->
Add
(
0
,
std
::
move
(
eoe_buffer
)));
// reset our buffer count and go to loop again.
...
...
mindspore/ccsrc/dataset/engine/datasetops/source/tf_buffer.cc
浏览文件 @
6d1ea7af
...
...
@@ -27,7 +27,6 @@
#include "dataset/core/data_type.h"
#include "dataset/engine/datasetops/source/storage_client.h"
#include "dataset/engine/data_schema.h"
#include "dataset/util/make_unique.h"
namespace
mindspore
{
namespace
dataset
{
...
...
@@ -72,7 +71,7 @@ Status TFBuffer::Load() {
}
// Construct the Tensor table for this buffer.
tensor_table_
=
mindspore
::
make_unique
<
TensorQTable
>
();
tensor_table_
=
std
::
make_unique
<
TensorQTable
>
();
// At each position in the tensor table, instantiate the shared pointer to it's Tensor.
uint32_t
row
=
0
;
...
...
@@ -272,7 +271,7 @@ Status TFBuffer::LoadFloatList(const ColDescriptor ¤t_col, const dataengin
// Identify how many values we have and then create a local array of these
// to deserialize into
*
num_elements
=
float_list
.
value_size
();
*
float_array
=
mindspore
::
make_unique
<
float
[]
>
(
*
num_elements
);
*
float_array
=
std
::
make_unique
<
float
[]
>
(
*
num_elements
);
for
(
int
i
=
0
;
i
<
float_list
.
value_size
();
i
++
)
{
(
*
float_array
)[
i
]
=
float_list
.
value
(
i
);
}
...
...
@@ -294,7 +293,7 @@ Status TFBuffer::LoadIntList(const ColDescriptor ¤t_col, const dataengine:
// Identify how many values we have and then create a local array of these
// to deserialize into
*
num_elements
=
int64_list
.
value_size
();
*
int_array
=
mindspore
::
make_unique
<
int64_t
[]
>
(
*
num_elements
);
*
int_array
=
std
::
make_unique
<
int64_t
[]
>
(
*
num_elements
);
for
(
int
i
=
0
;
i
<
int64_list
.
value_size
();
i
++
)
{
(
*
int_array
)[
i
]
=
int64_list
.
value
(
i
);
}
...
...
mindspore/ccsrc/dataset/engine/datasetops/source/tf_reader_op.cc
浏览文件 @
6d1ea7af
...
...
@@ -36,7 +36,6 @@
#include "dataset/engine/db_connector.h"
#include "dataset/engine/execution_tree.h"
#include "dataset/engine/jagged_connector.h"
#include "dataset/util/make_unique.h"
#include "dataset/util/path.h"
#include "dataset/util/queue.h"
#include "dataset/util/random.h"
...
...
@@ -54,7 +53,7 @@ TFReaderOp::Builder::Builder()
builder_op_connector_size_
=
config_manager
->
op_connector_size
();
builder_rows_per_buffer_
=
config_manager
->
rows_per_buffer
();
builder_shuffle_files_
=
false
;
builder_data_schema_
=
make_unique
<
DataSchema
>
();
builder_data_schema_
=
std
::
make_unique
<
DataSchema
>
();
}
Status
TFReaderOp
::
Builder
::
ValidateInputs
()
const
{
...
...
@@ -103,7 +102,7 @@ TFReaderOp::TFReaderOp(int32_t num_workers, int32_t worker_connector_size, int64
finished_reading_dataset_
(
false
),
shuffle_files_
(
shuffle_files
),
data_schema_
(
std
::
move
(
data_schema
)),
filename_index_
(
make_unique
<
StringIndex
>
()),
filename_index_
(
std
::
make_unique
<
StringIndex
>
()),
load_io_block_queue_
(
true
),
load_jagged_connector_
(
true
),
num_rows_
(
0
),
...
...
@@ -129,7 +128,7 @@ Status TFReaderOp::Init() {
// parallel op base.
RETURN_IF_NOT_OK
(
ParallelOp
::
CreateWorkerConnector
(
worker_connector_size_
));
jagged_buffer_connector_
=
mindspore
::
make_unique
<
JaggedConnector
>
(
num_workers_
,
1
,
worker_connector_size_
);
jagged_buffer_connector_
=
std
::
make_unique
<
JaggedConnector
>
(
num_workers_
,
1
,
worker_connector_size_
);
// temporary: make size large enough to hold all files + EOE to avoid hangs
int32_t
safe_queue_size
=
static_cast
<
int32_t
>
(
std
::
ceil
(
dataset_files_list_
.
size
()
/
num_workers_
))
+
1
;
...
...
@@ -229,7 +228,7 @@ Status TFReaderOp::operator()() {
}
// all workers finished reading for this epoch, and we have read all the data from all workers
std
::
unique_ptr
<
DataBuffer
>
eoe_buffer
=
mindspore
::
make_unique
<
DataBuffer
>
(
0
,
DataBuffer
::
kDeBFlagEOE
);
std
::
unique_ptr
<
DataBuffer
>
eoe_buffer
=
std
::
make_unique
<
DataBuffer
>
(
0
,
DataBuffer
::
kDeBFlagEOE
);
RETURN_IF_NOT_OK
(
out_connector_
->
Add
(
0
,
std
::
move
(
eoe_buffer
)));
if
(
!
BitTest
(
op_ctrl_flags_
,
kDeOpRepeated
)
||
BitTest
(
op_ctrl_flags_
,
kDeOpLastRepeat
))
{
...
...
@@ -241,7 +240,7 @@ Status TFReaderOp::operator()() {
}
}
std
::
unique_ptr
<
DataBuffer
>
eof_buffer
=
mindspore
::
make_unique
<
DataBuffer
>
(
0
,
DataBuffer
::
kDeBFlagEOF
);
std
::
unique_ptr
<
DataBuffer
>
eof_buffer
=
std
::
make_unique
<
DataBuffer
>
(
0
,
DataBuffer
::
kDeBFlagEOF
);
RETURN_IF_NOT_OK
(
out_connector_
->
Add
(
0
,
std
::
move
(
eof_buffer
)));
RETURN_IF_NOT_OK
(
PostEndOfData
());
...
...
@@ -274,7 +273,7 @@ Status TFReaderOp::WorkerEntry(int32_t worker_id) {
MS_LOG
(
INFO
)
<<
"TFReader operator worker "
<<
worker_id
<<
" loaded file "
<<
filename
<<
"."
;
}
}
else
{
std
::
unique_ptr
<
DataBuffer
>
eoe_buffer
=
mindspore
::
make_unique
<
DataBuffer
>
(
1
,
DataBuffer
::
kDeBFlagEOE
);
std
::
unique_ptr
<
DataBuffer
>
eoe_buffer
=
std
::
make_unique
<
DataBuffer
>
(
1
,
DataBuffer
::
kDeBFlagEOE
);
RETURN_IF_NOT_OK
(
jagged_buffer_connector_
->
Add
(
worker_id
,
std
::
move
(
eoe_buffer
)));
}
...
...
@@ -288,7 +287,7 @@ Status TFReaderOp::WorkerEntry(int32_t worker_id) {
// When the worker pops this control indicator, it will shut itself down gracefully.
Status
TFReaderOp
::
PostEndOfData
()
{
for
(
int
i
=
0
;
i
<
num_workers_
;
++
i
)
{
std
::
unique_ptr
<
FilenameBlock
>
eof
=
mindspore
::
make_unique
<
FilenameBlock
>
(
IOBlock
::
kDeIoBlockFlagEof
);
std
::
unique_ptr
<
FilenameBlock
>
eof
=
std
::
make_unique
<
FilenameBlock
>
(
IOBlock
::
kDeIoBlockFlagEof
);
RETURN_IF_NOT_OK
(
PushIoBlockQueue
(
i
,
std
::
move
(
eof
)));
}
...
...
@@ -299,7 +298,7 @@ Status TFReaderOp::PostEndOfData() {
// pops this control indicator, it will wait until the next epoch starts and then resume execution.
Status
TFReaderOp
::
PostEndOfEpoch
(
int32_t
queue_index
)
{
for
(
int
i
=
0
;
i
<
num_workers_
;
++
i
)
{
std
::
unique_ptr
<
FilenameBlock
>
eoe
=
mindspore
::
make_unique
<
FilenameBlock
>
(
IOBlock
::
kDeIoBlockFlagEoe
);
std
::
unique_ptr
<
FilenameBlock
>
eoe
=
std
::
make_unique
<
FilenameBlock
>
(
IOBlock
::
kDeIoBlockFlagEoe
);
RETURN_IF_NOT_OK
(
PushIoBlockQueue
((
queue_index
+
i
)
%
num_workers_
,
std
::
move
(
eoe
)));
}
...
...
@@ -358,7 +357,7 @@ Status TFReaderOp::FillIOBlockShuffle(const std::vector<int64_t> &i_keys) {
}
if
(
!
equal_rows_per_shard_
)
{
if
(
key_index
++
%
num_devices_
==
device_id_
)
{
auto
ioBlock
=
make_unique
<
FilenameBlock
>
(
*
it
,
kInvalidOffset
,
kInvalidOffset
,
IOBlock
::
kDeIoBlockNone
);
auto
ioBlock
=
std
::
make_unique
<
FilenameBlock
>
(
*
it
,
kInvalidOffset
,
kInvalidOffset
,
IOBlock
::
kDeIoBlockNone
);
RETURN_IF_NOT_OK
(
PushIoBlockQueue
(
queue_index
,
std
::
move
(
ioBlock
)));
queue_index
=
(
queue_index
+
1
)
%
num_workers_
;
}
...
...
@@ -367,7 +366,7 @@ Status TFReaderOp::FillIOBlockShuffle(const std::vector<int64_t> &i_keys) {
auto
file_it
=
filename_index_
->
Search
(
*
it
);
std
::
string
file_name
=
file_it
.
value
();
if
(
NeedPushFileToblockQueue
(
file_name
,
&
start_offset
,
&
end_offset
,
pre_count
))
{
auto
ioBlock
=
make_unique
<
FilenameBlock
>
(
*
it
,
start_offset
,
end_offset
,
IOBlock
::
kDeIoBlockNone
);
auto
ioBlock
=
std
::
make_unique
<
FilenameBlock
>
(
*
it
,
start_offset
,
end_offset
,
IOBlock
::
kDeIoBlockNone
);
RETURN_IF_NOT_OK
(
PushIoBlockQueue
(
queue_index
,
std
::
move
(
ioBlock
)));
MS_LOG
(
DEBUG
)
<<
"File name "
<<
*
it
<<
" start offset "
<<
start_offset
<<
" end_offset "
<<
end_offset
;
queue_index
=
(
queue_index
+
1
)
%
num_workers_
;
...
...
@@ -404,14 +403,15 @@ Status TFReaderOp::FillIOBlockNoShuffle() {
}
if
(
!
equal_rows_per_shard_
)
{
if
(
key_index
++
%
num_devices_
==
device_id_
)
{
auto
ioBlock
=
make_unique
<
FilenameBlock
>
(
it
.
key
(),
kInvalidOffset
,
kInvalidOffset
,
IOBlock
::
kDeIoBlockNone
);
auto
ioBlock
=
std
::
make_unique
<
FilenameBlock
>
(
it
.
key
(),
kInvalidOffset
,
kInvalidOffset
,
IOBlock
::
kDeIoBlockNone
);
RETURN_IF_NOT_OK
(
PushIoBlockQueue
(
queue_index
,
std
::
move
(
ioBlock
)));
queue_index
=
(
queue_index
+
1
)
%
num_workers_
;
}
}
else
{
std
::
string
file_name
=
it
.
value
();
if
(
NeedPushFileToblockQueue
(
file_name
,
&
start_offset
,
&
end_offset
,
pre_count
))
{
auto
ioBlock
=
make_unique
<
FilenameBlock
>
(
it
.
key
(),
start_offset
,
end_offset
,
IOBlock
::
kDeIoBlockNone
);
auto
ioBlock
=
std
::
make_unique
<
FilenameBlock
>
(
it
.
key
(),
start_offset
,
end_offset
,
IOBlock
::
kDeIoBlockNone
);
RETURN_IF_NOT_OK
(
PushIoBlockQueue
(
queue_index
,
std
::
move
(
ioBlock
)));
queue_index
=
(
queue_index
+
1
)
%
num_workers_
;
}
...
...
@@ -490,14 +490,13 @@ Status TFReaderOp::LoadFile(const std::string &filename, const int64_t start_off
int64_t
rows_read
=
0
;
int64_t
rows_total
=
0
;
std
::
unique_ptr
<
DataBuffer
>
current_buffer
=
mindspore
::
make_unique
<
DataBuffer
>
(
0
,
DataBuffer
::
BufferFlags
::
kDeBFlagNone
);
std
::
unique_ptr
<
DataBuffer
>
current_buffer
=
std
::
make_unique
<
DataBuffer
>
(
0
,
DataBuffer
::
BufferFlags
::
kDeBFlagNone
);
std
::
unordered_map
<
std
::
string
,
int32_t
>
column_name_map
;
for
(
int32_t
i
=
0
;
i
<
data_schema_
->
NumColumns
();
++
i
)
{
column_name_map
[
data_schema_
->
column
(
i
).
name
()]
=
i
;
}
current_buffer
->
set_column_name_map
(
column_name_map
);
std
::
unique_ptr
<
TensorQTable
>
new_tensor_table
=
make_unique
<
TensorQTable
>
();
std
::
unique_ptr
<
TensorQTable
>
new_tensor_table
=
std
::
make_unique
<
TensorQTable
>
();
while
(
reader
.
peek
()
!=
EOF
)
{
if
(
!
load_jagged_connector_
)
{
...
...
@@ -532,9 +531,9 @@ Status TFReaderOp::LoadFile(const std::string &filename, const int64_t start_off
current_buffer
->
set_tensor_table
(
std
::
move
(
new_tensor_table
));
RETURN_IF_NOT_OK
(
jagged_buffer_connector_
->
Add
(
worker_id
,
std
::
move
(
current_buffer
)));
current_buffer
=
make_unique
<
DataBuffer
>
(
0
,
DataBuffer
::
BufferFlags
::
kDeBFlagNone
);
current_buffer
=
std
::
make_unique
<
DataBuffer
>
(
0
,
DataBuffer
::
BufferFlags
::
kDeBFlagNone
);
current_buffer
->
set_column_name_map
(
column_name_map
);
new_tensor_table
=
make_unique
<
TensorQTable
>
();
new_tensor_table
=
std
::
make_unique
<
TensorQTable
>
();
rows_read
=
0
;
}
}
...
...
@@ -742,7 +741,7 @@ Status TFReaderOp::LoadFloatList(const ColDescriptor ¤t_col, const dataeng
// Identify how many values we have and then create a local array of these
// to deserialize into
*
num_elements
=
float_list
.
value_size
();
*
float_array
=
mindspore
::
make_unique
<
float
[]
>
(
*
num_elements
);
*
float_array
=
std
::
make_unique
<
float
[]
>
(
*
num_elements
);
for
(
int
i
=
0
;
i
<
float_list
.
value_size
();
++
i
)
{
(
*
float_array
)[
i
]
=
float_list
.
value
(
i
);
}
...
...
mindspore/ccsrc/dataset/engine/datasetops/source/voc_op.cc
浏览文件 @
6d1ea7af
...
...
@@ -38,7 +38,7 @@ Status VOCOp::Builder::Build(std::shared_ptr<VOCOp> *ptr) {
if
(
builder_sampler_
==
nullptr
)
{
builder_sampler_
=
std
::
make_shared
<
SequentialSampler
>
();
}
builder_schema_
=
make_unique
<
DataSchema
>
();
builder_schema_
=
std
::
make_unique
<
DataSchema
>
();
RETURN_IF_NOT_OK
(
builder_schema_
->
AddColumn
(
ColDescriptor
(
"image"
,
DataType
(
DataType
::
DE_UINT8
),
TensorImpl
::
kFlexible
,
1
)));
RETURN_IF_NOT_OK
(
...
...
@@ -85,7 +85,7 @@ Status VOCOp::TraverseSampleIds(const std::shared_ptr<Tensor> &sample_ids, std::
row_cnt_
++
;
if
(
row_cnt_
%
rows_per_buffer_
==
0
)
{
RETURN_IF_NOT_OK
(
io_block_queues_
[
buf_cnt_
++
%
num_workers_
]
->
Add
(
make_unique
<
IOBlock
>
(
IOBlock
(
*
keys
,
IOBlock
::
kDeIoBlockNone
))));
std
::
make_unique
<
IOBlock
>
(
IOBlock
(
*
keys
,
IOBlock
::
kDeIoBlockNone
))));
keys
->
clear
();
}
}
...
...
@@ -110,21 +110,21 @@ Status VOCOp::operator()() {
}
if
(
keys
.
empty
()
==
false
)
{
RETURN_IF_NOT_OK
(
io_block_queues_
[(
buf_cnt_
++
)
%
num_workers_
]
->
Add
(
make_unique
<
IOBlock
>
(
IOBlock
(
keys
,
IOBlock
::
kDeIoBlockNone
))));
std
::
make_unique
<
IOBlock
>
(
IOBlock
(
keys
,
IOBlock
::
kDeIoBlockNone
))));
}
if
(
!
BitTest
(
op_ctrl_flags_
,
kDeOpRepeated
)
||
BitTest
(
op_ctrl_flags_
,
kDeOpLastRepeat
))
{
std
::
unique_ptr
<
IOBlock
>
eoe_block
=
make_unique
<
IOBlock
>
(
IOBlock
::
kDeIoBlockFlagEoe
);
std
::
unique_ptr
<
IOBlock
>
eof_block
=
make_unique
<
IOBlock
>
(
IOBlock
::
kDeIoBlockFlagEof
);
std
::
unique_ptr
<
IOBlock
>
eoe_block
=
std
::
make_unique
<
IOBlock
>
(
IOBlock
::
kDeIoBlockFlagEoe
);
std
::
unique_ptr
<
IOBlock
>
eof_block
=
std
::
make_unique
<
IOBlock
>
(
IOBlock
::
kDeIoBlockFlagEof
);
RETURN_IF_NOT_OK
(
io_block_queues_
[(
buf_cnt_
++
)
%
num_workers_
]
->
Add
(
std
::
move
(
eoe_block
)));
RETURN_IF_NOT_OK
(
io_block_queues_
[(
buf_cnt_
++
)
%
num_workers_
]
->
Add
(
std
::
move
(
eof_block
)));
for
(
int32_t
i
=
0
;
i
<
num_workers_
;
i
++
)
{
RETURN_IF_NOT_OK
(
io_block_queues_
[
i
]
->
Add
(
make_unique
<
IOBlock
>
(
std
::
vector
<
int64_t
>
(),
IOBlock
::
kDeIoBlockNone
)));
io_block_queues_
[
i
]
->
Add
(
std
::
make_unique
<
IOBlock
>
(
std
::
vector
<
int64_t
>
(),
IOBlock
::
kDeIoBlockNone
)));
}
return
Status
::
OK
();
}
else
{
RETURN_IF_NOT_OK
(
io_block_queues_
[(
buf_cnt_
++
)
%
num_workers_
]
->
Add
(
make_unique
<
IOBlock
>
(
IOBlock
::
kDeIoBlockFlagEoe
)));
io_block_queues_
[(
buf_cnt_
++
)
%
num_workers_
]
->
Add
(
std
::
make_unique
<
IOBlock
>
(
IOBlock
::
kDeIoBlockFlagEoe
)));
RETURN_IF_NOT_OK
(
wp_
.
Wait
());
wp_
.
Clear
();
RETURN_IF_NOT_OK
(
sampler_
->
GetNextBuffer
(
&
sampler_buffer
));
...
...
@@ -164,7 +164,7 @@ Status VOCOp::LoadTensorRow(const std::string &image_id, TensorRow *trow) {
}
Status
VOCOp
::
LoadBuffer
(
const
std
::
vector
<
int64_t
>
&
keys
,
std
::
unique_ptr
<
DataBuffer
>
*
db
)
{
std
::
unique_ptr
<
TensorQTable
>
deq
=
make_unique
<
TensorQTable
>
();
std
::
unique_ptr
<
TensorQTable
>
deq
=
std
::
make_unique
<
TensorQTable
>
();
TensorRow
trow
;
for
(
const
uint64_t
&
key
:
keys
)
{
RETURN_IF_NOT_OK
(
this
->
LoadTensorRow
(
image_ids_
[
key
],
&
trow
));
...
...
@@ -182,15 +182,15 @@ Status VOCOp::WorkerEntry(int32_t worker_id) {
RETURN_IF_NOT_OK
(
io_block_queues_
[
worker_id
]
->
PopFront
(
&
io_block
));
while
(
io_block
!=
nullptr
)
{
if
(
io_block
->
eoe
()
==
true
)
{
RETURN_IF_NOT_OK
(
out_connector_
->
Add
(
worker_id
,
make_unique
<
DataBuffer
>
(
0
,
DataBuffer
::
kDeBFlagEOE
)));
RETURN_IF_NOT_OK
(
out_connector_
->
Add
(
worker_id
,
std
::
make_unique
<
DataBuffer
>
(
0
,
DataBuffer
::
kDeBFlagEOE
)));
buffer_id
=
worker_id
;
}
else
if
(
io_block
->
eof
()
==
true
)
{
RETURN_IF_NOT_OK
(
out_connector_
->
Add
(
worker_id
,
(
make_unique
<
DataBuffer
>
(
0
,
DataBuffer
::
kDeBFlagEOF
))));
RETURN_IF_NOT_OK
(
out_connector_
->
Add
(
worker_id
,
(
std
::
make_unique
<
DataBuffer
>
(
0
,
DataBuffer
::
kDeBFlagEOF
))));
}
else
{
std
::
vector
<
int64_t
>
keys
;
RETURN_IF_NOT_OK
(
io_block
->
GetKeys
(
&
keys
));
if
(
keys
.
empty
()
==
true
)
return
Status
::
OK
();
std
::
unique_ptr
<
DataBuffer
>
db
=
make_unique
<
DataBuffer
>
(
buffer_id
,
DataBuffer
::
kDeBFlagNone
);
std
::
unique_ptr
<
DataBuffer
>
db
=
std
::
make_unique
<
DataBuffer
>
(
buffer_id
,
DataBuffer
::
kDeBFlagNone
);
RETURN_IF_NOT_OK
(
LoadBuffer
(
keys
,
&
db
));
RETURN_IF_NOT_OK
(
out_connector_
->
Add
(
worker_id
,
std
::
move
(
db
)));
buffer_id
+=
num_workers_
;
...
...
mindspore/ccsrc/dataset/engine/datasetops/zip_op.cc
浏览文件 @
6d1ea7af
...
...
@@ -65,13 +65,13 @@ Status ZipOp::operator()() {
// initialize the iterators
for
(
int32_t
i
=
0
;
i
<
children_num_
;
++
i
)
{
// magic number 0 since Zip is not a parallel Op
child_iterators_
.
push_back
(
mindspore
::
make_unique
<
ChildIterator
>
(
this
,
0
,
i
));
child_iterators_
.
push_back
(
std
::
make_unique
<
ChildIterator
>
(
this
,
0
,
i
));
}
// Loop until eof is true
while
(
!
eof_
)
{
// Create tensor table and prepare it by fetching and packing the first zipped row into it.
std
::
unique_ptr
<
TensorQTable
>
curr_table
=
mindspore
::
make_unique
<
TensorQTable
>
();
std
::
unique_ptr
<
TensorQTable
>
curr_table
=
std
::
make_unique
<
TensorQTable
>
();
RETURN_IF_NOT_OK
(
prepare
(
curr_table
.
get
()));
// If an eof got picked up during the above prepare, then we're done
...
...
@@ -81,7 +81,7 @@ Status ZipOp::operator()() {
while
(
!
draining_
)
{
// 1. If a previous loop iteration sent the current table out, then create a new one.
if
(
curr_table
==
nullptr
)
{
curr_table
=
mindspore
::
make_unique
<
TensorQTable
>
();
curr_table
=
std
::
make_unique
<
TensorQTable
>
();
}
// 2 fill the table. Note: draining mode might get turned on if any of the child inputs were done
...
...
@@ -89,8 +89,7 @@ Status ZipOp::operator()() {
// 3 create and update buffer and send it to the out connector
if
(
!
curr_table
->
empty
())
{
std
::
unique_ptr
<
DataBuffer
>
curr_buffer
=
mindspore
::
make_unique
<
DataBuffer
>
(
buffer_id_
,
DataBuffer
::
kDeBFlagNone
);
std
::
unique_ptr
<
DataBuffer
>
curr_buffer
=
std
::
make_unique
<
DataBuffer
>
(
buffer_id_
,
DataBuffer
::
kDeBFlagNone
);
curr_buffer
->
set_tensor_table
(
std
::
move
(
curr_table
));
curr_buffer
->
set_column_name_map
(
col_name_id_map_
);
MS_LOG
(
DEBUG
)
<<
"Zip operator finished one buffer, pushing, rows "
<<
curr_buffer
->
NumRows
()
<<
", cols "
...
...
@@ -105,15 +104,14 @@ Status ZipOp::operator()() {
MS_LOG
(
DEBUG
)
<<
"Zip operator is now draining child inputs."
;
RETURN_IF_NOT_OK
(
drainPipeline
());
// Now that we have drained child inputs, send the eoe up.
RETURN_IF_NOT_OK
(
out_connector_
->
Add
(
0
,
std
::
move
(
mindspore
::
make_unique
<
DataBuffer
>
(
0
,
DataBuffer
::
kDeBFlagEOE
))));
RETURN_IF_NOT_OK
(
out_connector_
->
Add
(
0
,
std
::
move
(
std
::
make_unique
<
DataBuffer
>
(
0
,
DataBuffer
::
kDeBFlagEOE
))));
}
}
// 5 handle eof
// propagate eof here.
MS_LOG
(
INFO
)
<<
"Zip operator got EOF, propagating."
;
RETURN_IF_NOT_OK
(
out_connector_
->
Add
(
0
,
std
::
move
(
mindspore
::
make_unique
<
DataBuffer
>
(
0
,
DataBuffer
::
kDeBFlagEOF
))));
RETURN_IF_NOT_OK
(
out_connector_
->
Add
(
0
,
std
::
move
(
std
::
make_unique
<
DataBuffer
>
(
0
,
DataBuffer
::
kDeBFlagEOF
))));
return
Status
::
OK
();
}
...
...
mindspore/ccsrc/dataset/engine/db_connector.h
浏览文件 @
6d1ea7af
...
...
@@ -65,7 +65,7 @@ class DbConnector : public Connector<std::unique_ptr<DataBuffer>> {
RETURN_IF_NOT_OK
(
cv_
.
Wait
(
&
lk
,
[
this
,
worker_id
]()
{
return
expect_consumer_
==
worker_id
;
}));
// Once an EOF message is encountered this flag will be set and we can return early.
if
(
end_of_file_
)
{
*
result
=
mindspore
::
make_unique
<
DataBuffer
>
(
0
,
DataBuffer
::
kDeBFlagEOF
);
*
result
=
std
::
make_unique
<
DataBuffer
>
(
0
,
DataBuffer
::
kDeBFlagEOF
);
}
else
{
RETURN_IF_NOT_OK
(
queues_
[
pop_from_
]
->
PopFront
(
result
));
if
(
*
result
==
nullptr
)
{
...
...
mindspore/ccsrc/dataset/engine/execution_tree.cc
浏览文件 @
6d1ea7af
...
...
@@ -24,7 +24,7 @@ namespace mindspore {
namespace
dataset
{
// Constructor
ExecutionTree
::
ExecutionTree
()
:
id_count_
(
0
)
{
tg_
=
mindspore
::
make_unique
<
TaskGroup
>
();
tg_
=
std
::
make_unique
<
TaskGroup
>
();
tree_state_
=
kDeTStateInit
;
prepare_flags_
=
kDePrepNone
;
}
...
...
mindspore/ccsrc/dataset/kernels/image/image_utils.cc
浏览文件 @
6d1ea7af
...
...
@@ -24,7 +24,6 @@
#include "dataset/core/cv_tensor.h"
#include "dataset/core/tensor.h"
#include "dataset/core/tensor_shape.h"
#include "dataset/util/make_unique.h"
#include "dataset/util/random.h"
#define MAX_INT_PRECISION 16777216 // float int precision is 16777216
...
...
@@ -376,7 +375,7 @@ Status HwcToChw(std::shared_ptr<Tensor> input, std::shared_ptr<Tensor> *output)
int
width
=
input_cv
->
shape
()[
1
];
int
num_channels
=
input_cv
->
shape
()[
2
];
auto
output_cv
=
mindspore
::
make_unique
<
CVTensor
>
(
TensorShape
{
num_channels
,
height
,
width
},
input_cv
->
type
());
auto
output_cv
=
std
::
make_unique
<
CVTensor
>
(
TensorShape
{
num_channels
,
height
,
width
},
input_cv
->
type
());
for
(
int
i
=
0
;
i
<
num_channels
;
++
i
)
{
cv
::
Mat
mat
;
RETURN_IF_NOT_OK
(
output_cv
->
Mat
({
i
},
&
mat
));
...
...
mindspore/ccsrc/dataset/kernels/py_func_op.cc
浏览文件 @
6d1ea7af
...
...
@@ -20,7 +20,6 @@
#include "dataset/core/tensor.h"
#include "dataset/kernels/tensor_op.h"
#include "dataset/util/make_unique.h"
#include "dataset/util/status.h"
namespace
mindspore
{
...
...
mindspore/ccsrc/dataset/util/arena.cc
浏览文件 @
6d1ea7af
...
...
@@ -16,7 +16,6 @@
#include "dataset/util/arena.h"
#include <unistd.h>
#include <utility>
#include "dataset/util/make_unique.h"
#include "dataset/util/system_pool.h"
#include "dataset/util/de_error.h"
#include "./securec.h"
...
...
mindspore/ccsrc/dataset/util/circular_pool.cc
浏览文件 @
6d1ea7af
...
...
@@ -18,10 +18,8 @@
#include <algorithm>
#include <limits>
#include <utility>
#include "./securec.h"
#include "dataset/util/make_unique.h"
#include "dataset/util/de_error.h"
#include "dataset/util/system_pool.h"
namespace
mindspore
{
...
...
mindspore/ccsrc/dataset/util/de_error.h
浏览文件 @
6d1ea7af
...
...
@@ -16,6 +16,13 @@
#ifndef DATASET_UTIL_DE_ERROR_H_
#define DATASET_UTIL_DE_ERROR_H_
#ifdef DEBUG
#include <cassert>
#define DS_ASSERT(f) assert(f)
#else
#define DS_ASSERT(f) ((void)0)
#endif
#include <map>
#include "utils/error_code.h"
...
...
mindspore/ccsrc/dataset/util/list.h
浏览文件 @
6d1ea7af
...
...
@@ -18,8 +18,7 @@
#include <iostream>
#include <iterator>
#include "dataset/util/make_unique.h"
#include "dataset/util/de_error.h"
namespace
mindspore
{
namespace
dataset
{
...
...
mindspore/ccsrc/dataset/util/lock.cc
浏览文件 @
6d1ea7af
...
...
@@ -14,6 +14,7 @@
* limitations under the License.
*/
#include "dataset/util/lock.h"
#include "dataset/util/de_error.h"
namespace
mindspore
{
namespace
dataset
{
...
...
mindspore/ccsrc/dataset/util/lock.h
浏览文件 @
6d1ea7af
...
...
@@ -19,7 +19,6 @@
#include <atomic>
#include <condition_variable>
#include <mutex>
#include "dataset/util/make_unique.h"
namespace
mindspore
{
namespace
dataset
{
...
...
mindspore/ccsrc/dataset/util/make_unique.h
已删除
100644 → 0
浏览文件 @
71b81c8f
/**
* Copyright 2019 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef DATASET_UTIL_MAKE_UNIQUE_H_
#define DATASET_UTIL_MAKE_UNIQUE_H_
#ifdef DEBUG
#include <cassert>
#define DS_ASSERT(f) assert(f)
#else
#define DS_ASSERT(f) ((void)0)
#endif
#include <memory>
#include <type_traits>
#include <utility>
#include "dataset/util/de_error.h"
#include "utils/log_adapter.h"
namespace
mindspore
{
using
std
::
make_unique
;
}
// namespace mindspore
#endif // DATASET_UTIL_MAKE_UNIQUE_H_
mindspore/ccsrc/dataset/util/queue.h
浏览文件 @
6d1ea7af
...
...
@@ -212,7 +212,7 @@ class QueueList {
void
Init
(
int
num_queues
,
int
capacity
)
{
queue_list_
.
reserve
(
num_queues
);
for
(
int
i
=
0
;
i
<
num_queues
;
i
++
)
{
queue_list_
.
emplace_back
(
mindspore
::
make_unique
<
Queue
<
T
>>
(
capacity
));
queue_list_
.
emplace_back
(
std
::
make_unique
<
Queue
<
T
>>
(
capacity
));
}
}
...
...
mindspore/ccsrc/dataset/util/task.h
浏览文件 @
6d1ea7af
...
...
@@ -27,7 +27,6 @@
#include <string>
#include <thread>
#include "dataset/util/de_error.h"
#include "dataset/util/make_unique.h"
#include "dataset/util/intrp_resource.h"
#include "dataset/util/list.h"
#include "dataset/util/memory_pool.h"
...
...
mindspore/ccsrc/device/gpu/blocking_queue.cc
浏览文件 @
6d1ea7af
...
...
@@ -17,7 +17,6 @@
#include "device/gpu/blocking_queue.h"
#include <chrono>
#include "device/gpu/gpu_common.h"
#include "dataset/util/make_unique.h"
#include "common/utils.h"
namespace
mindspore
{
...
...
@@ -32,7 +31,7 @@ GpuQueue::GpuQueue(void *addr, size_t feature_size, size_t label_size, size_t ca
stream_
(
0
),
node_info_
(
nullptr
)
{
CHECK_CUDA_RET_WITH_ERROR
(
cudaStreamCreate
(
&
stream_
),
"Cuda Create Stream Failed"
);
node_info_
=
mindspore
::
make_unique
<
NodeInfo
[]
>
(
capacity
);
node_info_
=
std
::
make_unique
<
NodeInfo
[]
>
(
capacity
);
}
GpuQueue
::~
GpuQueue
()
{
buffer_
=
nullptr
;
}
...
...
mindspore/ccsrc/kernel/gpu/math/bias_add_gpu_kernel.h
浏览文件 @
6d1ea7af
...
...
@@ -23,7 +23,6 @@
#include <vector>
#include "kernel/gpu/gpu_kernel.h"
#include "kernel/gpu/gpu_kernel_factory.h"
#include "dataset/util/make_unique.h"
#include "kernel/gpu/kernel_constants.h"
namespace
mindspore
{
...
...
@@ -74,8 +73,8 @@ class BiasAddGpuKernel : public GpuKernel {
// Expand to 4 dims for cudnnSetTensorNdDescriptorEx.
auto
cudnn_dims
=
std
::
max
(
num_dims
,
4UL
);
std
::
unique_ptr
<
int
[]
>
x_dims
=
mindspore
::
make_unique
<
int
[]
>
(
cudnn_dims
);
std
::
unique_ptr
<
int
[]
>
b_dims
=
mindspore
::
make_unique
<
int
[]
>
(
cudnn_dims
);
std
::
unique_ptr
<
int
[]
>
x_dims
=
std
::
make_unique
<
int
[]
>
(
cudnn_dims
);
std
::
unique_ptr
<
int
[]
>
b_dims
=
std
::
make_unique
<
int
[]
>
(
cudnn_dims
);
for
(
size_t
i
=
0
;
i
<
cudnn_dims
;
i
++
)
{
x_dims
[
i
]
=
(
i
<
num_dims
)
?
SizeToInt
(
x_shape
[
i
])
:
1
;
b_dims
[
i
]
=
(
i
==
pos
)
?
SizeToInt
(
x_shape
[
i
])
:
1
;
...
...
mindspore/ccsrc/kernel/gpu/nn/bias_add_grad_gpu_kenel.h
浏览文件 @
6d1ea7af
...
...
@@ -26,7 +26,6 @@
#include "kernel/gpu/gpu_kernel.h"
#include "kernel/gpu/gpu_kernel_factory.h"
#include "kernel/gpu/kernel_constants.h"
#include "dataset/util/make_unique.h"
namespace
mindspore
{
namespace
kernel
{
...
...
@@ -84,8 +83,8 @@ class BiasAddGradGpuKernel : public GpuKernel {
// Expand to 4 dims for cudnnSetTensorNdDescriptorEx.
auto
cudnn_dims
=
std
::
max
(
num_dims
,
4UL
);
std
::
unique_ptr
<
int
[]
>
dy_dims
=
mindspore
::
make_unique
<
int
[]
>
(
cudnn_dims
);
std
::
unique_ptr
<
int
[]
>
db_dims
=
mindspore
::
make_unique
<
int
[]
>
(
cudnn_dims
);
std
::
unique_ptr
<
int
[]
>
dy_dims
=
std
::
make_unique
<
int
[]
>
(
cudnn_dims
);
std
::
unique_ptr
<
int
[]
>
db_dims
=
std
::
make_unique
<
int
[]
>
(
cudnn_dims
);
for
(
size_t
i
=
0
;
i
<
cudnn_dims
;
i
++
)
{
dy_dims
[
i
]
=
(
i
<
num_dims
)
?
SizeToInt
(
dy_shape
[
i
])
:
1
;
db_dims
[
i
]
=
(
i
==
pos
)
?
SizeToInt
(
dy_shape
[
i
])
:
1
;
...
...
mindspore/ccsrc/kernel/gpu/nn/lstm_gpu_kernel.h
浏览文件 @
6d1ea7af
...
...
@@ -22,7 +22,6 @@
#include <memory>
#include "kernel/gpu/gpu_kernel.h"
#include "kernel/gpu/gpu_kernel_factory.h"
#include "dataset/util/make_unique.h"
#include "kernel/gpu/kernel_constants.h"
namespace
mindspore
{
...
...
@@ -144,8 +143,8 @@ class LstmGpuKernel : public GpuKernel {
int
x_dims
[
3
]{
batch_size_
,
input_size_
,
1
};
int
y_dims
[
3
]{
batch_size_
,
hidden_size_
*
(
bidirectional_
?
2
:
1
),
1
};
x_desc_
=
mindspore
::
make_unique
<
cudnnTensorDescriptor_t
[]
>
(
seq_len_
);
y_desc_
=
mindspore
::
make_unique
<
cudnnTensorDescriptor_t
[]
>
(
seq_len_
);
x_desc_
=
std
::
make_unique
<
cudnnTensorDescriptor_t
[]
>
(
seq_len_
);
y_desc_
=
std
::
make_unique
<
cudnnTensorDescriptor_t
[]
>
(
seq_len_
);
for
(
size_t
i
=
0
;
i
<
IntToSize
(
seq_len_
);
++
i
)
{
CHECK_CUDNN_RET_WITH_EXCEPT
(
cudnnCreateTensorDescriptor
(
&
x_desc_
[
i
]),
"create x_desc failed"
);
...
...
mindspore/ccsrc/kernel/gpu/nn/lstm_grad_data_gpu_kernel.h
浏览文件 @
6d1ea7af
...
...
@@ -23,7 +23,6 @@
#include "kernel/gpu/gpu_kernel.h"
#include "kernel/gpu/gpu_kernel_factory.h"
#include "kernel/gpu/kernel_constants.h"
#include "dataset/util/make_unique.h"
namespace
mindspore
{
namespace
kernel
{
...
...
@@ -212,9 +211,9 @@ class LstmGradDataGpuKernel : public GpuKernel {
int
x_dims
[
3
]{
batch_size_
,
input_size_
,
1
};
int
y_dims
[
3
]{
batch_size_
,
hidden_size_
*
(
bidirectional_
?
2
:
1
),
1
};
dx_desc_
=
mindspore
::
make_unique
<
cudnnTensorDescriptor_t
[]
>
(
seq_len_
);
y_desc_
=
mindspore
::
make_unique
<
cudnnTensorDescriptor_t
[]
>
(
seq_len_
);
dy_desc_
=
mindspore
::
make_unique
<
cudnnTensorDescriptor_t
[]
>
(
seq_len_
);
dx_desc_
=
std
::
make_unique
<
cudnnTensorDescriptor_t
[]
>
(
seq_len_
);
y_desc_
=
std
::
make_unique
<
cudnnTensorDescriptor_t
[]
>
(
seq_len_
);
dy_desc_
=
std
::
make_unique
<
cudnnTensorDescriptor_t
[]
>
(
seq_len_
);
for
(
size_t
i
=
0
;
i
<
IntToSize
(
seq_len_
);
++
i
)
{
CHECK_CUDNN_RET_WITH_EXCEPT
(
cudnnCreateTensorDescriptor
(
&
dx_desc_
[
i
]),
"create x_desc failed"
);
...
...
mindspore/ccsrc/kernel/gpu/nn/lstm_grad_weight_gpu_kernel.h
浏览文件 @
6d1ea7af
...
...
@@ -22,7 +22,6 @@
#include <memory>
#include "kernel/gpu/gpu_kernel.h"
#include "kernel/gpu/gpu_kernel_factory.h"
#include "dataset/util/make_unique.h"
#include "kernel/gpu/kernel_constants.h"
namespace
mindspore
{
namespace
kernel
{
...
...
@@ -169,8 +168,8 @@ class LstmGradWeightGpuKernel : public GpuKernel {
int
x_dims
[
3
]{
batch_size_
,
input_size_
,
1
};
int
y_dims
[
3
]{
batch_size_
,
hidden_size_
*
(
bidirectional_
?
2
:
1
),
1
};
x_desc_
=
mindspore
::
make_unique
<
cudnnTensorDescriptor_t
[]
>
(
seq_len_
);
y_desc_
=
mindspore
::
make_unique
<
cudnnTensorDescriptor_t
[]
>
(
seq_len_
);
x_desc_
=
std
::
make_unique
<
cudnnTensorDescriptor_t
[]
>
(
seq_len_
);
y_desc_
=
std
::
make_unique
<
cudnnTensorDescriptor_t
[]
>
(
seq_len_
);
for
(
size_t
i
=
0
;
i
<
IntToSize
(
seq_len_
);
++
i
)
{
CHECK_CUDNN_RET_WITH_EXCEPT
(
cudnnCreateTensorDescriptor
(
&
x_desc_
[
i
]),
"create x_desc failed"
);
...
...
tests/ut/cpp/dataset/celeba_op_test.cc
浏览文件 @
6d1ea7af
...
...
@@ -116,7 +116,7 @@ TEST_F(MindDataTestCelebaDataset, TestCelebaRepeat) {
TEST_F
(
MindDataTestCelebaDataset
,
TestSubsetRandomSamplerCeleba
)
{
std
::
vector
<
int64_t
>
indices
({
1
});
std
::
unique_ptr
<
Sampler
>
sampler
=
mindspore
::
make_unique
<
SubsetRandomSampler
>
(
indices
);
std
::
unique_ptr
<
Sampler
>
sampler
=
std
::
make_unique
<
SubsetRandomSampler
>
(
indices
);
uint32_t
expect_labels
[
1
][
40
]
=
{{
0
,
0
,
0
,
1
,
0
,
0
,
0
,
1
,
0
,
0
,
0
,
1
,
0
,
0
,
0
,
0
,
0
,
0
,
0
,
1
,
0
,
1
,
0
,
0
,
1
,
0
,
0
,
0
,
0
,
0
,
0
,
1
,
0
,
0
,
0
,
0
,
0
,
0
,
0
,
1
}};
std
::
string
dir
=
datasets_root_path_
+
"/testCelebAData/"
;
uint32_t
count
=
0
;
...
...
tests/ut/cpp/dataset/cifar_op_test.cc
浏览文件 @
6d1ea7af
...
...
@@ -92,7 +92,7 @@ TEST_F(MindDataTestCifarOp, TestSequentialSamplerCifar10) {
TEST_F
(
MindDataTestCifarOp
,
TestRandomSamplerCifar10
)
{
uint32_t
original_seed
=
GlobalContext
::
config_manager
()
->
seed
();
GlobalContext
::
config_manager
()
->
set_seed
(
0
);
std
::
unique_ptr
<
Sampler
>
sampler
=
mindspore
::
make_unique
<
RandomSampler
>
(
true
,
12
);
std
::
unique_ptr
<
Sampler
>
sampler
=
std
::
make_unique
<
RandomSampler
>
(
true
,
12
);
std
::
string
folder_path
=
datasets_root_path_
+
"/testCifar10Data/"
;
auto
tree
=
Build
({
Cifarop
(
16
,
2
,
32
,
folder_path
,
std
::
move
(
sampler
),
100
)});
tree
->
Prepare
();
...
...
tests/ut/cpp/dataset/image_folder_op_test.cc
浏览文件 @
6d1ea7af
...
...
@@ -138,7 +138,7 @@ TEST_F(MindDataTestImageFolderSampler, TestRandomImageFolder) {
TEST_F
(
MindDataTestImageFolderSampler
,
TestRandomSamplerImageFolder
)
{
int32_t
original_seed
=
GlobalContext
::
config_manager
()
->
seed
();
GlobalContext
::
config_manager
()
->
set_seed
(
0
);
std
::
unique_ptr
<
Sampler
>
sampler
=
mindspore
::
make_unique
<
RandomSampler
>
(
true
,
12
);
std
::
unique_ptr
<
Sampler
>
sampler
=
std
::
make_unique
<
RandomSampler
>
(
true
,
12
);
int32_t
res
[]
=
{
2
,
2
,
2
,
3
,
2
,
3
,
2
,
3
,
1
,
2
,
2
,
1
};
// ground truth label
std
::
string
folder_path
=
datasets_root_path_
+
"/testPK/data"
;
auto
tree
=
Build
({
ImageFolder
(
16
,
2
,
32
,
folder_path
,
false
,
std
::
move
(
sampler
))});
...
...
@@ -200,7 +200,7 @@ TEST_F(MindDataTestImageFolderSampler, TestSequentialImageFolderWithRepeatBatch)
TEST_F
(
MindDataTestImageFolderSampler
,
TestSubsetRandomSamplerImageFolder
)
{
// id range 0 - 10 is label 0, and id range 11 - 21 is label 1
std
::
vector
<
int64_t
>
indices
({
0
,
1
,
2
,
3
,
4
,
5
,
12
,
13
,
14
,
15
,
16
,
11
});
std
::
unique_ptr
<
Sampler
>
sampler
=
mindspore
::
make_unique
<
SubsetRandomSampler
>
(
indices
);
std
::
unique_ptr
<
Sampler
>
sampler
=
std
::
make_unique
<
SubsetRandomSampler
>
(
indices
);
std
::
string
folder_path
=
datasets_root_path_
+
"/testPK/data"
;
// Expect 6 samples for label 0 and 1
int
res
[
2
]
=
{
6
,
6
};
...
...
@@ -238,7 +238,7 @@ TEST_F(MindDataTestImageFolderSampler, TestWeightedRandomSamplerImageFolder) {
// create sampler with replacement = replacement
std
::
unique_ptr
<
Sampler
>
sampler
=
mindspore
::
make_unique
<
WeightedRandomSampler
>
(
weights
,
num_samples
,
true
,
samples_per_buffer
);
std
::
make_unique
<
WeightedRandomSampler
>
(
weights
,
num_samples
,
true
,
samples_per_buffer
);
std
::
string
folder_path
=
datasets_root_path_
+
"/testPK/data"
;
auto
tree
=
Build
({
ImageFolder
(
16
,
2
,
32
,
folder_path
,
false
,
std
::
move
(
sampler
))});
...
...
@@ -295,7 +295,7 @@ TEST_F(MindDataTestImageFolderSampler, TestImageFolderClassIndex) {
}
TEST_F
(
MindDataTestImageFolderSampler
,
TestDistributedSampler
)
{
std
::
unique_ptr
<
Sampler
>
sampler
=
mindspore
::
make_unique
<
DistributedSampler
>
(
11
,
10
,
false
);
std
::
unique_ptr
<
Sampler
>
sampler
=
std
::
make_unique
<
DistributedSampler
>
(
11
,
10
,
false
);
std
::
string
folder_path
=
datasets_root_path_
+
"/testPK/data"
;
auto
tree
=
Build
({
ImageFolder
(
16
,
2
,
32
,
folder_path
,
false
,
std
::
move
(
sampler
)),
Repeat
(
4
)});
tree
->
Prepare
();
...
...
@@ -322,7 +322,7 @@ TEST_F(MindDataTestImageFolderSampler, TestDistributedSampler) {
}
TEST_F
(
MindDataTestImageFolderSampler
,
TestPKSamplerImageFolder
)
{
std
::
unique_ptr
<
Sampler
>
sampler
=
mindspore
::
make_unique
<
PKSampler
>
(
3
,
false
,
4
);
std
::
unique_ptr
<
Sampler
>
sampler
=
std
::
make_unique
<
PKSampler
>
(
3
,
false
,
4
);
int32_t
res
[]
=
{
0
,
0
,
0
,
1
,
1
,
1
,
2
,
2
,
2
,
3
,
3
,
3
};
// ground truth label
std
::
string
folder_path
=
datasets_root_path_
+
"/testPK/data"
;
auto
tree
=
Build
({
ImageFolder
(
16
,
2
,
32
,
folder_path
,
false
,
std
::
move
(
sampler
))});
...
...
@@ -431,7 +431,7 @@ TEST_F(MindDataTestImageFolderSampler, TestImageFolderDatasetSize) {
}
TEST_F
(
MindDataTestImageFolderSampler
,
TestImageFolderSharding1
)
{
std
::
unique_ptr
<
Sampler
>
sampler
=
mindspore
::
make_unique
<
DistributedSampler
>
(
4
,
0
,
false
);
std
::
unique_ptr
<
Sampler
>
sampler
=
std
::
make_unique
<
DistributedSampler
>
(
4
,
0
,
false
);
std
::
string
folder_path
=
datasets_root_path_
+
"/testPK/data"
;
// numWrks, rows, conns, path, shuffle, sampler, map, numSamples, decode
auto
tree
=
Build
({
ImageFolder
(
16
,
2
,
32
,
folder_path
,
false
,
std
::
move
(
sampler
),
{},
5
)});
...
...
@@ -460,7 +460,7 @@ TEST_F(MindDataTestImageFolderSampler, TestImageFolderSharding1) {
}
TEST_F
(
MindDataTestImageFolderSampler
,
TestImageFolderSharding2
)
{
std
::
unique_ptr
<
Sampler
>
sampler
=
mindspore
::
make_unique
<
DistributedSampler
>
(
4
,
3
,
false
);
std
::
unique_ptr
<
Sampler
>
sampler
=
std
::
make_unique
<
DistributedSampler
>
(
4
,
3
,
false
);
std
::
string
folder_path
=
datasets_root_path_
+
"/testPK/data"
;
// numWrks, rows, conns, path, shuffle, sampler, map, numSamples, decode
auto
tree
=
Build
({
ImageFolder
(
16
,
16
,
32
,
folder_path
,
false
,
std
::
move
(
sampler
),
{},
12
)});
...
...
tests/ut/cpp/dataset/manifest_op_test.cc
浏览文件 @
6d1ea7af
...
...
@@ -86,7 +86,7 @@ TEST_F(MindDataTestManifest, TestSequentialManifestWithRepeat) {
TEST_F
(
MindDataTestManifest
,
TestSubsetRandomSamplerManifest
)
{
std
::
vector
<
int64_t
>
indices
({
1
});
std
::
unique_ptr
<
Sampler
>
sampler
=
mindspore
::
make_unique
<
SubsetRandomSampler
>
(
indices
);
std
::
unique_ptr
<
Sampler
>
sampler
=
std
::
make_unique
<
SubsetRandomSampler
>
(
indices
);
std
::
string
file
=
datasets_root_path_
+
"/testManifestData/cpp.json"
;
// Expect 6 samples for label 0 and 1
auto
tree
=
Build
({
Manifest
(
16
,
2
,
32
,
file
,
"train"
,
std
::
move
(
sampler
))});
...
...
tests/ut/cpp/dataset/project_op_test.cc
浏览文件 @
6d1ea7af
...
...
@@ -45,7 +45,7 @@ TEST_F(MindDataTestProjectOp, TestProjectProject) {
.
SetRowsPerBuffer
(
16
)
.
SetWorkerConnectorSize
(
16
)
.
SetNumWorkers
(
16
);
std
::
unique_ptr
<
DataSchema
>
schema
=
mindspore
::
make_unique
<
DataSchema
>
();
std
::
unique_ptr
<
DataSchema
>
schema
=
std
::
make_unique
<
DataSchema
>
();
schema
->
LoadSchemaFile
(
datasets_root_path_
+
"/testTFTestAllTypes/datasetSchema.json"
,
{});
builder
.
SetDataSchema
(
std
::
move
(
schema
));
Status
rc
=
builder
.
Build
(
&
my_tfreader_op
);
ASSERT_TRUE
(
rc
.
IsOk
());
...
...
tests/ut/cpp/dataset/stand_alone_samplers_test.cc
浏览文件 @
6d1ea7af
...
...
@@ -74,7 +74,7 @@ TEST_F(MindDataTestStandAloneSampler, TestDistributedSampler) {
std
::
unique_ptr
<
DataBuffer
>
db
;
std
::
shared_ptr
<
Tensor
>
tensor
;
for
(
int
i
=
0
;
i
<
6
;
i
++
)
{
std
::
unique_ptr
<
Sampler
>
sampler
=
mindspore
::
make_unique
<
DistributedSampler
>
(
3
,
i
%
3
,
(
i
<
3
?
false
:
true
));
std
::
unique_ptr
<
Sampler
>
sampler
=
std
::
make_unique
<
DistributedSampler
>
(
3
,
i
%
3
,
(
i
<
3
?
false
:
true
));
sampler
->
Init
(
&
mock
);
sampler
->
GetNextBuffer
(
&
db
);
db
->
GetTensor
(
&
tensor
,
0
,
0
);
...
...
tests/ut/cpp/dataset/tfReader_op_test.cc
浏览文件 @
6d1ea7af
...
...
@@ -48,7 +48,7 @@ TEST_F(MindDataTestTFReaderOp, TestTFReaderBasic1) {
builder
.
SetDatasetFilesList
({
dataset_path
})
.
SetRowsPerBuffer
(
16
)
.
SetNumWorkers
(
16
);
std
::
unique_ptr
<
DataSchema
>
schema
=
mindspore
::
make_unique
<
DataSchema
>
();
std
::
unique_ptr
<
DataSchema
>
schema
=
std
::
make_unique
<
DataSchema
>
();
schema
->
LoadSchemaFile
(
datasets_root_path_
+
"/testTFTestAllTypes/datasetSchema.json"
,
{});
builder
.
SetDataSchema
(
std
::
move
(
schema
));
Status
rc
=
builder
.
Build
(
&
my_tfreader_op
);
...
...
@@ -102,7 +102,7 @@ TEST_F(MindDataTestTFReaderOp, TestTFReaderLargeRowsPerBuffer) {
builder
.
SetDatasetFilesList
({
dataset_path
})
.
SetRowsPerBuffer
(
500
)
.
SetNumWorkers
(
16
);
std
::
unique_ptr
<
DataSchema
>
schema
=
mindspore
::
make_unique
<
DataSchema
>
();
std
::
unique_ptr
<
DataSchema
>
schema
=
std
::
make_unique
<
DataSchema
>
();
schema
->
LoadSchemaFile
(
datasets_root_path_
+
"/testTFTestAllTypes/datasetSchema.json"
,
{});
builder
.
SetDataSchema
(
std
::
move
(
schema
));
Status
rc
=
builder
.
Build
(
&
my_tfreader_op
);
...
...
@@ -156,7 +156,7 @@ TEST_F(MindDataTestTFReaderOp, TestTFReaderSmallRowsPerBuffer) {
builder
.
SetDatasetFilesList
({
dataset_path
})
.
SetRowsPerBuffer
(
1
)
.
SetNumWorkers
(
16
);
std
::
unique_ptr
<
DataSchema
>
schema
=
mindspore
::
make_unique
<
DataSchema
>
();
std
::
unique_ptr
<
DataSchema
>
schema
=
std
::
make_unique
<
DataSchema
>
();
schema
->
LoadSchemaFile
(
datasets_root_path_
+
"/testTFTestAllTypes/datasetSchema.json"
,
{});
builder
.
SetDataSchema
(
std
::
move
(
schema
));
Status
rc
=
builder
.
Build
(
&
my_tfreader_op
);
...
...
@@ -211,7 +211,7 @@ TEST_F(MindDataTestTFReaderOp, TestTFReaderLargeQueueSize) {
.
SetWorkerConnectorSize
(
1
)
.
SetRowsPerBuffer
(
16
)
.
SetNumWorkers
(
16
);
std
::
unique_ptr
<
DataSchema
>
schema
=
mindspore
::
make_unique
<
DataSchema
>
();
std
::
unique_ptr
<
DataSchema
>
schema
=
std
::
make_unique
<
DataSchema
>
();
schema
->
LoadSchemaFile
(
datasets_root_path_
+
"/testTFTestAllTypes/datasetSchema.json"
,
{});
builder
.
SetDataSchema
(
std
::
move
(
schema
));
Status
rc
=
builder
.
Build
(
&
my_tfreader_op
);
...
...
@@ -265,7 +265,7 @@ TEST_F(MindDataTestTFReaderOp, TestTFReaderOneThread) {
builder
.
SetDatasetFilesList
({
dataset_path
})
.
SetRowsPerBuffer
(
16
)
.
SetNumWorkers
(
1
);
std
::
unique_ptr
<
DataSchema
>
schema
=
mindspore
::
make_unique
<
DataSchema
>
();
std
::
unique_ptr
<
DataSchema
>
schema
=
std
::
make_unique
<
DataSchema
>
();
schema
->
LoadSchemaFile
(
datasets_root_path_
+
"/testTFTestAllTypes/datasetSchema.json"
,
{});
builder
.
SetDataSchema
(
std
::
move
(
schema
));
Status
rc
=
builder
.
Build
(
&
my_tfreader_op
);
...
...
@@ -321,7 +321,7 @@ TEST_F(MindDataTestTFReaderOp, TestTFReaderRepeat) {
.
SetRowsPerBuffer
(
16
)
.
SetWorkerConnectorSize
(
16
)
.
SetNumWorkers
(
16
);
std
::
unique_ptr
<
DataSchema
>
schema
=
mindspore
::
make_unique
<
DataSchema
>
();
std
::
unique_ptr
<
DataSchema
>
schema
=
std
::
make_unique
<
DataSchema
>
();
schema
->
LoadSchemaFile
(
datasets_root_path_
+
"/testTFTestAllTypes/datasetSchema.json"
,
{});
builder
.
SetDataSchema
(
std
::
move
(
schema
));
Status
rc
=
builder
.
Build
(
&
my_tfreader_op
);
...
...
@@ -379,7 +379,7 @@ TEST_F(MindDataTestTFReaderOp, TestTFReaderSchemaConstructor) {
std
::
string
dataset_path
;
dataset_path
=
datasets_root_path_
+
"/testTFTestAllTypes"
;
std
::
unique_ptr
<
DataSchema
>
data_schema
=
mindspore
::
make_unique
<
DataSchema
>
();
std
::
unique_ptr
<
DataSchema
>
data_schema
=
std
::
make_unique
<
DataSchema
>
();
std
::
vector
<
std
::
string
>
columns_to_load
;
columns_to_load
.
push_back
(
"col_sint32"
);
columns_to_load
.
push_back
(
"col_binary"
);
...
...
@@ -445,7 +445,7 @@ TEST_F(MindDataTestTFReaderOp, TestTFReaderTake1Row) {
std
::
shared_ptr
<
TFReaderOp
>
my_tfreader_op
;
TFReaderOp
::
Builder
builder
;
builder
.
SetDatasetFilesList
({
dataset_path
+
"/test.data"
}).
SetRowsPerBuffer
(
5
).
SetNumWorkers
(
16
);
std
::
unique_ptr
<
DataSchema
>
schema
=
mindspore
::
make_unique
<
DataSchema
>
();
std
::
unique_ptr
<
DataSchema
>
schema
=
std
::
make_unique
<
DataSchema
>
();
schema
->
LoadSchemaFile
(
datasets_root_path_
+
"/testTFTestAllTypes/datasetSchema1Row.json"
,
{});
builder
.
SetDataSchema
(
std
::
move
(
schema
));
...
...
@@ -503,7 +503,7 @@ TEST_F(MindDataTestTFReaderOp, TestTFReaderTake1Buffer) {
std
::
shared_ptr
<
TFReaderOp
>
my_tfreader_op
;
TFReaderOp
::
Builder
builder
;
builder
.
SetDatasetFilesList
({
dataset_path
+
"/test.data"
}).
SetRowsPerBuffer
(
5
).
SetNumWorkers
(
16
);
std
::
unique_ptr
<
DataSchema
>
schema
=
mindspore
::
make_unique
<
DataSchema
>
();
std
::
unique_ptr
<
DataSchema
>
schema
=
std
::
make_unique
<
DataSchema
>
();
schema
->
LoadSchemaFile
(
datasets_root_path_
+
"/testTFTestAllTypes/datasetSchema5Rows.json"
,
{});
builder
.
SetDataSchema
(
std
::
move
(
schema
));
...
...
@@ -561,7 +561,7 @@ TEST_F(MindDataTestTFReaderOp, TestTFReaderTake7Rows) {
std
::
shared_ptr
<
TFReaderOp
>
my_tfreader_op
;
TFReaderOp
::
Builder
builder
;
builder
.
SetDatasetFilesList
({
dataset_path
+
"/test.data"
}).
SetRowsPerBuffer
(
5
).
SetNumWorkers
(
16
);
std
::
unique_ptr
<
DataSchema
>
schema
=
mindspore
::
make_unique
<
DataSchema
>
();
std
::
unique_ptr
<
DataSchema
>
schema
=
std
::
make_unique
<
DataSchema
>
();
schema
->
LoadSchemaFile
(
datasets_root_path_
+
"/testTFTestAllTypes/datasetSchema7Rows.json"
,
{});
builder
.
SetDataSchema
(
std
::
move
(
schema
));
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录