Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
BaiXuePrincess
Paddle
提交
ab86fb11
P
Paddle
项目概览
BaiXuePrincess
/
Paddle
与 Fork 源项目一致
Fork自
PaddlePaddle / Paddle
通知
1
Star
1
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
0
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
Paddle
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
0
Issue
0
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
提交
ab86fb11
编写于
4月 17, 2018
作者:
J
JiayiFeng
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
complete parallel accuracy test
上级
415460b5
变更
2
隐藏空白更改
内联
并排
Showing
2 changed file
with
39 addition
and
13 deletion
+39
-13
python/paddle/fluid/parallel_executor.py
python/paddle/fluid/parallel_executor.py
+2
-1
python/paddle/fluid/tests/unittests/test_parallel_executor.py
...on/paddle/fluid/tests/unittests/test_parallel_executor.py
+37
-12
未找到文件。
python/paddle/fluid/parallel_executor.py
浏览文件 @
ab86fb11
...
...
@@ -130,7 +130,8 @@ class ParallelExecutor(object):
or numpy array.
:return: fetched value list.
"""
feed
=
feed_dict
if
feed
==
{}:
feed
=
feed_dict
if
not
isinstance
(
feed
,
dict
):
raise
TypeError
(
"feed should be a dict"
)
...
...
python/paddle/fluid/tests/unittests/test_parallel_executor.py
浏览文件 @
ab86fb11
...
...
@@ -200,17 +200,29 @@ class TestParallelExecutorBase(unittest.TestCase):
def
check_network_convergence
(
self
,
method
,
memory_opt
=
True
,
iter
=
1
0
,
iter
=
5
0
,
batch_size
=
None
,
allow_op_delay
=
False
,
feed_dict
=
{},
seed
=
None
,
use_parallel_executor
=
True
):
def
run_executor
(
exe
,
feed
,
fetch_list
,
program
=
None
):
if
isinstance
(
exe
,
fluid
.
ParallelExecutor
):
res
=
exe
.
run
(
fetch_list
=
fetch_list
,
feed
=
feed
)
elif
isinstance
(
exe
,
fluid
.
Executor
):
if
program
is
None
:
program
=
fluid
.
default_main_program
()
res
=
exe
.
run
(
program
=
program
,
feed
=
feed
,
fetch_list
=
fetch_list
)
else
:
raise
ValueError
(
'Unkown type exe'
)
return
res
main
=
fluid
.
Program
()
startup
=
fluid
.
Program
()
with
fluid
.
program_guard
(
main
,
startup
):
if
seed
is
not
None
:
startup
.
random_seed
=
seed
main
.
random_seed
=
seed
loss
=
method
(
use_feed
=
len
(
feed_dict
)
>
0
)
adam
=
fluid
.
optimizer
.
Adam
()
adam
.
minimize
(
loss
)
...
...
@@ -229,13 +241,15 @@ class TestParallelExecutorBase(unittest.TestCase):
if
batch_size
is
not
None
:
batch_size
*=
fluid
.
core
.
get_cuda_device_count
()
begin
=
time
.
time
()
first_loss
,
=
exe
.
run
([
loss
.
name
],
feed
=
feed_dict
)
first_loss
,
=
run_executor
(
exe
=
exe
,
feed
=
feed_dict
,
fetch_list
=
[
loss
.
name
])
first_loss
=
numpy
.
array
(
first_loss
)
for
i
in
xrange
(
iter
):
exe
.
run
([],
feed
=
feed_dict
)
run_executor
(
exe
=
exe
,
feed
=
feed_dict
,
fetch_list
=
[]
)
last_loss
,
=
exe
.
run
([
loss
.
name
],
feed
=
feed_dict
)
last_loss
,
=
run_executor
(
exe
=
exe
,
feed
=
feed_dict
,
fetch_list
=
[
loss
.
name
])
end
=
time
.
time
()
if
batch_size
is
not
None
:
...
...
@@ -277,14 +291,25 @@ class TestMNIST(TestParallelExecutorBase):
"label"
:
label
})
def
test_simple_fc_parallel_accuracy
(
self
):
#single_first_loss, single_last_loss = self.check_network_convergence(
# simple_fc_net, seed=0, use_parallel_executor=False)
#parallel_first_loss, parallel_last_loss = self.check_network_convergence(
# simple_fc_net, seed=0, use_parallel_executor=True)
print
(
'single_first_loss='
,
single_first_loss
)
print
(
'single_last_loss='
,
single_last_loss
)
print
(
'parallel_first_loss='
,
parallel_first_loss
)
print
(
'parallel_last_loss='
,
parallel_last_loss
)
img
=
numpy
.
zeros
(
shape
=
[
32
,
784
],
dtype
=
'float32'
)
label
=
numpy
.
ones
(
shape
=
[
32
,
1
],
dtype
=
'int64'
)
single_first_loss
,
single_last_loss
=
self
.
check_network_convergence
(
method
=
simple_fc_net
,
seed
=
1000
,
feed_dict
=
{
"image"
:
img
,
"label"
:
label
},
use_parallel_executor
=
False
)
parallel_first_loss
,
parallel_last_loss
=
self
.
check_network_convergence
(
method
=
simple_fc_net
,
seed
=
1000
,
feed_dict
=
{
"image"
:
img
,
"label"
:
label
},
use_parallel_executor
=
True
)
for
p_f
in
parallel_first_loss
:
self
.
assertAlmostEquals
(
p_f
,
single_first_loss
[
0
],
delta
=
1e-6
)
for
p_l
in
parallel_last_loss
:
self
.
assertAlmostEquals
(
p_l
,
single_last_loss
[
0
],
delta
=
1e-6
)
def
test_batchnorm_fc
(
self
):
self
.
check_network_convergence
(
fc_with_batchnorm
)
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录