Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
机器未来
Paddle
提交
c42c4a67
P
Paddle
项目概览
机器未来
/
Paddle
与 Fork 源项目一致
Fork自
PaddlePaddle / Paddle
通知
1
Star
1
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
1
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
Paddle
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
1
Issue
1
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
体验新版 GitCode,发现更多精彩内容 >>
提交
c42c4a67
编写于
3月 27, 2018
作者:
Y
Yu Yang
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
Add performance tests
上级
5b92dd40
变更
1
显示空白变更内容
内联
并排
Showing
1 changed file
with
46 addition
and
27 deletion
+46
-27
python/paddle/fluid/tests/unittests/test_parallel_executor.py
...on/paddle/fluid/tests/unittests/test_parallel_executor.py
+46
-27
未找到文件。
python/paddle/fluid/tests/unittests/test_parallel_executor.py
浏览文件 @
c42c4a67
...
...
@@ -135,14 +135,11 @@ def bottleneck_block(input, num_filters, stride, cardinality, reduction_ratio):
return
fluid
.
layers
.
elementwise_add
(
x
=
short
,
y
=
scale
,
act
=
'relu'
)
def
SE_ResNeXt152
():
reader
=
fluid
.
layers
.
open_recordio_file
(
filename
=
'./flowers.recordio'
,
shapes
=
[[
-
1
,
3
,
224
,
224
],
[
-
1
,
1
]],
lod_levels
=
[
0
,
0
],
dtypes
=
[
'float32'
,
'int64'
])
img
,
label
=
fluid
.
layers
.
read_file
(
reader
)
def
SE_ResNeXt152
(
batch_size
=
4
):
img
=
fluid
.
layers
.
fill_constant
(
shape
=
[
batch_size
,
3
,
224
,
224
],
dtype
=
'float32'
,
value
=
0.0
)
label
=
fluid
.
layers
.
fill_constant
(
shape
=
[
batch_size
,
1
],
dtype
=
'int64'
,
value
=
0.0
)
conv
=
conv_bn_layer
(
input
=
img
,
num_filters
=
64
,
filter_size
=
3
,
stride
=
2
,
act
=
'relu'
)
...
...
@@ -179,8 +176,15 @@ def SE_ResNeXt152():
return
loss
import
time
class
TestParallelExecutorBase
(
unittest
.
TestCase
):
def
check_network_convergence
(
self
,
method
,
memory_opt
=
True
,
iter
=
10
):
def
check_network_convergence
(
self
,
method
,
memory_opt
=
True
,
iter
=
10
,
batch_size
=
None
):
main
=
fluid
.
Program
()
startup
=
fluid
.
Program
()
with
fluid
.
program_guard
(
main
,
startup
):
...
...
@@ -191,6 +195,9 @@ class TestParallelExecutorBase(unittest.TestCase):
fluid
.
memory_optimize
(
main
)
exe
=
fluid
.
ParallelExecutor
(
loss_name
=
loss
.
name
,
use_cuda
=
True
)
if
batch_size
is
not
None
:
batch_size
*=
fluid
.
core
.
get_cuda_device_count
()
begin
=
time
.
time
()
first_loss
,
=
exe
.
run
([
loss
.
name
])
first_loss
=
numpy
.
array
(
first_loss
)
...
...
@@ -198,6 +205,12 @@ class TestParallelExecutorBase(unittest.TestCase):
exe
.
run
([])
last_loss
,
=
exe
.
run
([
loss
.
name
])
end
=
time
.
time
()
if
batch_size
is
not
None
:
print
"%.4f Instance per second"
%
(
(
batch_size
*
iter
+
2
)
/
(
end
-
begin
))
last_loss
=
numpy
.
array
(
last_loss
)
print
first_loss
,
last_loss
...
...
@@ -229,26 +242,32 @@ class TestMNIST(TestParallelExecutorBase):
class
TestResnet
(
TestParallelExecutorBase
):
@
classmethod
def
setUpClass
(
cls
):
import
os
if
os
.
path
.
exists
(
'./flowers.recordio'
):
return
with
fluid
.
program_guard
(
fluid
.
Program
(),
fluid
.
Program
()):
reader
=
paddle
.
batch
(
flowers
.
train
(),
batch_size
=
4
)
feeder
=
fluid
.
DataFeeder
(
feed_list
=
[
fluid
.
layers
.
data
(
name
=
'image'
,
shape
=
[
3
,
224
,
224
]),
fluid
.
layers
.
data
(
name
=
'label'
,
shape
=
[
1
],
dtype
=
'int64'
),
],
place
=
fluid
.
CPUPlace
())
fluid
.
recordio_writer
.
convert_reader_to_recordio_file
(
"./flowers.recordio"
,
reader
,
feeder
)
#
@classmethod
#
def setUpClass(cls):
# #
import os
# #
if os.path.exists('./flowers.recordio'):
# #
return
#
with fluid.program_guard(fluid.Program(), fluid.Program()):
#
reader = paddle.batch(flowers.train(), batch_size=4)
#
feeder = fluid.DataFeeder(
#
feed_list=[
#
fluid.layers.data(
#
name='image', shape=[3, 224, 224]),
#
fluid.layers.data(
#
name='label', shape=[1], dtype='int64'),
#
],
#
place=fluid.CPUPlace())
#
fluid.recordio_writer.convert_reader_to_recordio_file(
# "./flowers.recordio", reader, feeder, compressor=fluid.core.RecordIOWriter.Compressor.NoCompress
)
def
test_resnet
(
self
):
self
.
check_network_convergence
(
SE_ResNeXt152
,
iter
=
200
)
import
functools
batch_size
=
4
self
.
check_network_convergence
(
functools
.
partial
(
SE_ResNeXt152
,
batch_size
=
batch_size
),
iter
=
20
,
batch_size
=
batch_size
)
class
ModelHyperParams
(
object
):
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录