Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
BaiXuePrincess
Paddle
提交
4a172611
P
Paddle
项目概览
BaiXuePrincess
/
Paddle
与 Fork 源项目一致
Fork自
PaddlePaddle / Paddle
通知
1
Star
1
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
0
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
Paddle
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
0
Issue
0
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
提交
4a172611
编写于
1月 28, 2019
作者:
Q
Qiao Longfei
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
complete test_async_ssa_graph_executor_mnist test=develop
上级
02dab46a
变更
1
隐藏空白更改
内联
并排
Showing
1 changed file
with
91 addition
and
71 deletion
+91
-71
python/paddle/fluid/tests/unittests/test_async_ssa_graph_executor_mnist.py
...id/tests/unittests/test_async_ssa_graph_executor_mnist.py
+91
-71
未找到文件。
python/paddle/fluid/tests/unittests/test_async_ssa_graph_executor_mnist.py
浏览文件 @
4a172611
...
@@ -18,60 +18,61 @@ import os
...
@@ -18,60 +18,61 @@ import os
import
unittest
import
unittest
import
numpy
import
numpy
import
time
import
paddle
import
paddle
import
paddle.fluid
as
fluid
import
paddle.fluid
as
fluid
BATCH_SIZE
=
64
BATCH_SIZE
=
64
def
loss_net
(
hidden
,
label
):
def
convolutional_neural_network
(
use_py_reader
):
prediction
=
fluid
.
layers
.
fc
(
input
=
hidden
,
size
=
10
,
act
=
'softmax'
)
with
fluid
.
unique_name
.
guard
():
loss
=
fluid
.
layers
.
cross_entropy
(
input
=
prediction
,
label
=
label
)
img
=
fluid
.
layers
.
data
(
name
=
'img'
,
shape
=
[
1
,
28
,
28
],
dtype
=
'float32'
)
avg_loss
=
fluid
.
layers
.
mean
(
loss
)
label
=
fluid
.
layers
.
data
(
name
=
'label'
,
shape
=
[
1
],
dtype
=
'int64'
)
acc
=
fluid
.
layers
.
accuracy
(
input
=
prediction
,
label
=
label
)
return
prediction
,
avg_loss
,
acc
py_reader
=
None
if
use_py_reader
:
py_reader
=
fluid
.
layers
.
create_py_reader_by_data
(
def
convolutional_neural_network
(
img
,
label
):
capacity
=
64
,
conv_pool_1
=
fluid
.
nets
.
simple_img_conv_pool
(
feed_list
=
[
img
,
label
],
input
=
img
,
name
=
'py_reader'
,
filter_size
=
5
,
use_double_buffer
=
True
)
num_filters
=
20
,
img
,
label
=
fluid
.
layers
.
read_file
(
py_reader
)
pool_size
=
2
,
pool_stride
=
2
,
conv_pool_1
=
fluid
.
nets
.
simple_img_conv_pool
(
act
=
"relu"
)
input
=
img
,
conv_pool_1
=
fluid
.
layers
.
batch_norm
(
conv_pool_1
)
filter_size
=
5
,
conv_pool_2
=
fluid
.
nets
.
simple_img_conv_pool
(
num_filters
=
20
,
input
=
conv_pool_1
,
pool_size
=
2
,
filter_size
=
5
,
pool_stride
=
2
,
num_filters
=
50
,
act
=
"relu"
)
pool_size
=
2
,
conv_pool_1
=
fluid
.
layers
.
batch_norm
(
conv_pool_1
)
pool_stride
=
2
,
conv_pool_2
=
fluid
.
nets
.
simple_img_conv_pool
(
act
=
"relu"
)
input
=
conv_pool_1
,
return
loss_net
(
conv_pool_2
,
label
)
filter_size
=
5
,
num_filters
=
50
,
pool_size
=
2
,
def
train
(
use_cuda
,
thread_num
,
cpu_num
):
pool_stride
=
2
,
if
use_cuda
and
not
fluid
.
core
.
is_compiled_with_cuda
():
act
=
"relu"
)
print
(
"paddle is not compiled with cuda, exit!"
)
return
prediction
=
fluid
.
layers
.
fc
(
input
=
conv_pool_2
,
size
=
10
,
act
=
'softmax'
)
loss
=
fluid
.
layers
.
cross_entropy
(
input
=
prediction
,
label
=
label
)
img
=
fluid
.
layers
.
data
(
name
=
'img'
,
shape
=
[
1
,
28
,
28
],
dtype
=
'float32'
)
avg_loss
=
fluid
.
layers
.
mean
(
loss
)
label
=
fluid
.
layers
.
data
(
name
=
'label'
,
shape
=
[
1
],
dtype
=
'int64'
)
acc
=
fluid
.
layers
.
accuracy
(
input
=
prediction
,
label
=
label
)
py_reader
=
fluid
.
layers
.
create_py_reader_by_data
(
return
img
,
label
,
prediction
,
avg_loss
,
acc
,
py_reader
capacity
=
64
,
feed_list
=
[
img
,
label
],
name
=
'py_reader'
,
def
test
():
use_double_buffer
=
True
)
place
=
fluid
.
CPUPlace
()
img
,
label
=
fluid
.
layers
.
read_file
(
py_reader
)
exe
=
fluid
.
Executor
(
place
)
prediction
,
avg_loss
,
acc
=
convolutional_neural_network
(
img
,
label
)
test_program
=
fluid
.
default_main_program
().
clone
(
for_test
=
True
)
test_reader
=
paddle
.
batch
(
paddle
.
dataset
.
mnist
.
test
(),
batch_size
=
BATCH_SIZE
)
optimizer
=
fluid
.
optimizer
.
Adam
(
learning_rate
=
0.001
)
img
,
label
,
prediction
,
avg_loss
,
acc
,
py_reader
=
convolutional_neural_network
(
optimizer
.
minimize
(
avg_loss
)
use_py_reader
=
False
)
feeder
=
fluid
.
DataFeeder
(
feed_list
=
[
img
,
label
],
place
=
place
)
def
train_test
(
train_test_program
,
train_test_feed
,
train_test_reader
):
def
train_test
(
train_test_program
,
train_test_feed
,
train_test_reader
):
acc_set
=
[]
acc_set
=
[]
...
@@ -87,16 +88,33 @@ def train(use_cuda, thread_num, cpu_num):
...
@@ -87,16 +88,33 @@ def train(use_cuda, thread_num, cpu_num):
avg_loss_val_mean
=
numpy
.
array
(
avg_loss_set
).
mean
()
avg_loss_val_mean
=
numpy
.
array
(
avg_loss_set
).
mean
()
return
avg_loss_val_mean
,
acc_val_mean
return
avg_loss_val_mean
,
acc_val_mean
place
=
fluid
.
CUDAPlace
(
0
)
if
use_cuda
else
fluid
.
CPUPlace
()
# test for epoch
avg_loss_val
,
acc_val
=
train_test
(
train_test_program
=
fluid
.
default_main_program
(),
train_test_reader
=
test_reader
,
train_test_feed
=
feeder
)
print
(
"Test: avg_cost: %s, acc: %s"
%
(
avg_loss_val
,
acc_val
))
assert
acc_val
>
0.96
def
train
(
use_cuda
,
thread_num
,
cpu_num
):
if
use_cuda
and
not
fluid
.
core
.
is_compiled_with_cuda
():
print
(
"paddle is not compiled with cuda, exit!"
)
return
img
,
label
,
prediction
,
avg_loss
,
acc
,
py_reader
=
convolutional_neural_network
(
use_py_reader
=
True
)
optimizer
=
fluid
.
optimizer
.
Adam
(
learning_rate
=
0.001
)
optimizer
.
minimize
(
avg_loss
)
train_reader
=
paddle
.
batch
(
train_reader
=
paddle
.
batch
(
paddle
.
reader
.
shuffle
(
paddle
.
reader
.
shuffle
(
paddle
.
dataset
.
mnist
.
train
(),
buf_size
=
500
),
paddle
.
dataset
.
mnist
.
train
(),
buf_size
=
500
),
batch_size
=
BATCH_SIZE
)
batch_size
=
BATCH_SIZE
)
test_reader
=
paddle
.
batch
(
paddle
.
dataset
.
mnist
.
test
(),
batch_size
=
BATCH_SIZE
)
feeder
=
fluid
.
DataFeeder
(
feed_list
=
[
img
,
label
],
place
=
place
)
place
=
fluid
.
CPUPlace
()
exe
=
fluid
.
Executor
(
place
)
exe
=
fluid
.
Executor
(
place
)
exe
.
run
(
fluid
.
default_startup_program
())
exe
.
run
(
fluid
.
default_startup_program
())
...
@@ -106,11 +124,11 @@ def train(use_cuda, thread_num, cpu_num):
...
@@ -106,11 +124,11 @@ def train(use_cuda, thread_num, cpu_num):
print
(
"thread_num:"
+
str
(
thread_num
))
print
(
"thread_num:"
+
str
(
thread_num
))
build_strategy
=
fluid
.
BuildStrategy
()
build_strategy
=
fluid
.
BuildStrategy
()
build_strategy
.
async_mode
=
True
# enable async mode
build_strategy
.
async_mode
=
True
exec_strategy
=
fluid
.
ExecutionStrategy
()
exec_strategy
=
fluid
.
ExecutionStrategy
()
exec_strategy
.
num_threads
=
thread_num
exec_strategy
.
num_threads
=
thread_num
exec_strategy
.
num_iteration_per_run
=
1
exec_strategy
.
num_iteration_per_run
=
1
0
main_program
=
fluid
.
default_main_program
()
main_program
=
fluid
.
default_main_program
()
pe
=
fluid
.
ParallelExecutor
(
pe
=
fluid
.
ParallelExecutor
(
...
@@ -126,37 +144,39 @@ def train(use_cuda, thread_num, cpu_num):
...
@@ -126,37 +144,39 @@ def train(use_cuda, thread_num, cpu_num):
step
=
0
step
=
0
try
:
try
:
while
True
:
while
True
:
print
(
"step %d in"
%
step
)
loss_val
=
pe
.
run
(
fetch_list
=
[
avg_loss
.
name
])
loss_val
=
pe
.
run
(
fetch_list
=
[
avg_loss
.
name
])
loss_val
=
numpy
.
mean
(
loss_val
)
loss_val
=
numpy
.
mean
(
loss_val
)
if
step
%
1
==
0
:
if
step
%
1
00
==
0
:
print
(
"Batch %d, Cost %f, queue size %d"
%
print
(
"Batch %d, Cost %f, queue size %d"
%
(
step
,
loss_val
,
py_reader
.
queue
.
size
()))
(
step
,
loss_val
,
py_reader
.
queue
.
size
()))
step
+=
1
step
+=
1
except
fluid
.
core
.
EOFException
:
except
fluid
.
core
.
EOFException
:
print
(
"train end"
)
py_reader
.
reset
()
py_reader
.
reset
()
"""
step = 0
for step_id, data in enumerate(train_reader()):
loss_val = pe.run(feed=feeder.feed(data), fetch_list=[avg_loss.name])
loss_val = numpy.mean(loss_val)
if step % 100 == 0:
print("Batch %d, Cost %f" % (step, loss_val))
step += 1
"""
# test for epoch
avg_loss_val
,
acc_val
=
train_test
(
train_test_program
=
test_program
,
train_test_reader
=
test_reader
,
train_test_feed
=
feeder
)
print
(
"Test: avg_cost: %s, acc: %s"
%
(
avg_loss_val
,
acc_val
))
return
step
class
TestAsyncSSAGraphExecutor
(
unittest
.
TestCase
):
class
TestAsyncSSAGraphExecutor
(
unittest
.
TestCase
):
def
test_check_async_ssa_exe_train
(
self
):
def
test_check_async_ssa_exe_train
(
self
):
train
(
use_cuda
=
False
,
thread_num
=
2
,
cpu_num
=
2
)
step_list
=
[]
for
cpu_num
in
[
1
,
2
,
4
]:
scope
=
fluid
.
core
.
Scope
()
with
fluid
.
scope_guard
(
scope
):
with
fluid
.
program_guard
(
fluid
.
Program
(),
startup_program
=
fluid
.
Program
()):
start_time
=
time
.
time
()
step
=
train
(
use_cuda
=
False
,
thread_num
=
cpu_num
,
cpu_num
=
cpu_num
)
end_time
=
time
.
time
()
step_list
.
append
(
step
)
print
(
"cpu_num -> "
+
str
(
cpu_num
)
+
" step -> "
+
str
(
step
)
+
" time -> "
+
str
(
end_time
-
start_time
))
with
fluid
.
program_guard
(
fluid
.
Program
(),
startup_program
=
fluid
.
Program
()):
test
()
assert
step_list
[
0
]
/
2
==
step_list
[
1
]
assert
step_list
[
1
]
/
2
==
step_list
[
2
]
if
__name__
==
"__main__"
:
if
__name__
==
"__main__"
:
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录