Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
机器未来
Paddle
提交
b789a3a4
P
Paddle
项目概览
机器未来
/
Paddle
与 Fork 源项目一致
Fork自
PaddlePaddle / Paddle
通知
1
Star
1
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
1
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
Paddle
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
1
Issue
1
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
未验证
提交
b789a3a4
编写于
7月 18, 2018
作者:
Y
yuyang18
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
Change code
上级
401e92f6
变更
4
隐藏空白更改
内联
并排
Showing
4 changed file
with
28 addition
and
11 deletion
+28
-11
paddle/fluid/operators/reader/buffered_reader.cc
paddle/fluid/operators/reader/buffered_reader.cc
+15
-6
paddle/fluid/operators/reader/buffered_reader.h
paddle/fluid/operators/reader/buffered_reader.h
+9
-2
python/paddle/fluid/tests/unittests/test_py_reader_push_pop.py
...n/paddle/fluid/tests/unittests/test_py_reader_push_pop.py
+2
-2
python/paddle/fluid/tests/unittests/test_py_reader_using_executor.py
...le/fluid/tests/unittests/test_py_reader_using_executor.py
+2
-1
未找到文件。
paddle/fluid/operators/reader/buffered_reader.cc
浏览文件 @
b789a3a4
...
...
@@ -28,15 +28,15 @@ BufferedReader::BufferedReader(
buffer_size_
(
buffer_size
)
{
cpu_buffer_
.
resize
(
buffer_size
);
gpu_buffer_
.
resize
(
buffer_size
);
AppendFutureToBatchSize
();
ReadTillBufferFullAsync
();
}
void
BufferedReader
::
AppendFutureToBatchSize
()
{
void
BufferedReader
::
ReadTillBufferFullAsync
()
{
PADDLE_ENFORCE_EQ
(
position_
.
size
(),
0U
);
for
(
size_t
i
=
0
;
i
<
buffer_size_
;
++
i
)
{
AppendFuture
(
i
);
ReadAsync
(
i
);
}
}
void
BufferedReader
::
AppendFuture
(
size_t
i
)
{
void
BufferedReader
::
ReadAsync
(
size_t
i
)
{
position_
.
emplace
(
thread_pool_
.
enqueue
([
this
,
i
]()
->
size_t
{
TensorVec
&
cpu
=
cpu_buffer_
[
i
];
reader_
->
ReadNext
(
&
cpu
);
...
...
@@ -50,6 +50,7 @@ void BufferedReader::AppendFuture(size_t i) {
gpu
.
resize
(
cpu
.
size
());
for
(
size_t
i
=
0
;
i
<
cpu
.
size
();
++
i
)
{
framework
::
TensorCopySync
(
cpu
[
i
],
place_
,
&
gpu
[
i
]);
gpu
[
i
].
set_lod
(
cpu
[
i
].
lod
());
}
}
return
i
;
...
...
@@ -60,10 +61,11 @@ void BufferedReader::ShutdownImpl() {
while
(
!
position_
.
empty
())
{
position_
.
pop
();
}
prev_pos_
=
-
1UL
;
}
void
BufferedReader
::
StartImpl
()
{
reader_
->
Start
();
AppendFutureToBatchSize
();
ReadTillBufferFullAsync
();
}
void
BufferedReader
::
ReadNextImpl
(
std
::
vector
<
framework
::
LoDTensor
>
*
out
)
{
if
(
position_
.
empty
())
{
...
...
@@ -79,7 +81,14 @@ void BufferedReader::ReadNextImpl(std::vector<framework::LoDTensor> *out) {
}
*
out
=
platform
::
is_gpu_place
(
place_
)
?
gpu_buffer_
[
i
]
:
cpu_buffer_
[
i
];
AppendFuture
(
i
);
// Do not push current position into ReadAsync. Push the previous position
// Since all computation in fluid are async, change the data of
// current position may cause data error.
if
(
prev_pos_
!=
-
1Ul
)
{
ReadAsync
(
prev_pos_
);
}
prev_pos_
=
i
;
}
}
// namespace reader
...
...
paddle/fluid/operators/reader/buffered_reader.h
浏览文件 @
b789a3a4
...
...
@@ -35,9 +35,9 @@ class BufferedReader : public framework::DecoratedReader {
~
BufferedReader
()
override
;
private:
void
AppendFutureToBatchSize
();
void
ReadTillBufferFullAsync
();
void
AppendFuture
(
size_t
i
);
void
ReadAsync
(
size_t
i
);
protected:
void
ShutdownImpl
()
override
;
...
...
@@ -50,8 +50,15 @@ class BufferedReader : public framework::DecoratedReader {
const
size_t
buffer_size_
;
std
::
queue
<
std
::
future
<
size_t
>>
position_
;
// The buffer for reading data.
// NOTE: the simplest way to implement buffered reader is do not use any
// buffer, just async read and create futures as buffer size. However, to
// malloc Tensor every time is extremely slow. Here we store all data in
// buffers and prevent alloc every time.
std
::
vector
<
TensorVec
>
cpu_buffer_
;
std
::
vector
<
TensorVec
>
gpu_buffer_
;
size_t
prev_pos_
{
-
1UL
};
};
}
// namespace reader
...
...
python/paddle/fluid/tests/unittests/test_py_reader_push_pop.py
浏览文件 @
b789a3a4
...
...
@@ -45,12 +45,12 @@ class TestPyReader(unittest.TestCase):
)
else
fluid
.
CPUPlace
()
executor
=
fluid
.
Executor
(
place
)
data_file
,
feed_queue
=
fluid
.
layers
.
py_reader
(
data_file
=
fluid
.
layers
.
py_reader
(
capacity
=
self
.
capacity
,
dtypes
=
self
.
dtypes
,
lod_levels
=
self
.
lod_levels
,
shapes
=
self
.
shapes
)
feed_queue
=
data_file
.
queue
read_out_data
=
fluid
.
layers
.
read_file
(
data_file
)
self
.
inputs
=
[]
...
...
python/paddle/fluid/tests/unittests/test_py_reader_using_executor.py
浏览文件 @
b789a3a4
...
...
@@ -52,11 +52,12 @@ def simple_fc_net(in_size,
batch_size
,
queue_capacity
,
use_double_buffer
=
False
):
reader
,
feed_queue
=
fluid
.
layers
.
py_reader
(
reader
=
fluid
.
layers
.
py_reader
(
capacity
=
queue_capacity
,
shapes
=
[[
-
1
,
in_size
],
[
-
1
,
1
]],
lod_levels
=
[
0
,
0
],
dtypes
=
[
'float32'
,
'int64'
])
feed_queue
=
reader
.
queue
reader
=
fluid
.
layers
.
batch
(
reader
,
batch_size
=
batch_size
)
if
use_double_buffer
:
reader
=
fluid
.
layers
.
double_buffer
(
reader
)
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录