Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
BaiXuePrincess
Paddle
提交
000c1f7c
P
Paddle
项目概览
BaiXuePrincess
/
Paddle
与 Fork 源项目一致
Fork自
PaddlePaddle / Paddle
通知
1
Star
1
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
0
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
Paddle
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
0
Issue
0
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
未验证
提交
000c1f7c
编写于
12月 01, 2017
作者:
T
Tao Luo
提交者:
GitHub
12月 01, 2017
浏览文件
操作
浏览文件
下载
差异文件
Merge pull request #5933 from tensor-tang/inference
enable inference benchmark
上级
ade6c832
79b17097
变更
6
隐藏空白更改
内联
并排
Showing
6 changed file
with
165 addition
and
29 deletion
+165
-29
benchmark/paddle/image/googlenet.py
benchmark/paddle/image/googlenet.py
+20
-6
benchmark/paddle/image/provider.py
benchmark/paddle/image/provider.py
+11
-5
benchmark/paddle/image/resnet.py
benchmark/paddle/image/resnet.py
+24
-9
benchmark/paddle/image/run_mkldnn_infer.sh
benchmark/paddle/image/run_mkldnn_infer.sh
+86
-0
benchmark/paddle/image/run_mkldnn_train.sh
benchmark/paddle/image/run_mkldnn_train.sh
+5
-4
benchmark/paddle/image/vgg.py
benchmark/paddle/image/vgg.py
+19
-5
未找到文件。
benchmark/paddle/image/googlenet.py
浏览文件 @
000c1f7c
...
...
@@ -6,10 +6,21 @@ width = 224
num_class
=
1000
batch_size
=
get_config_arg
(
'batch_size'
,
int
,
128
)
use_gpu
=
get_config_arg
(
'use_gpu'
,
bool
,
True
)
args
=
{
'height'
:
height
,
'width'
:
width
,
'color'
:
True
,
'num_class'
:
num_class
}
is_infer
=
get_config_arg
(
"is_infer"
,
bool
,
False
)
args
=
{
'height'
:
height
,
'width'
:
width
,
'color'
:
True
,
'num_class'
:
num_class
,
'is_infer'
:
is_infer
}
define_py_data_sources2
(
"train.list"
,
None
,
module
=
"provider"
,
obj
=
"process"
,
args
=
args
)
"train.list"
if
not
is_infer
else
None
,
"test.list"
if
is_infer
else
None
,
module
=
"provider"
,
obj
=
"process"
,
args
=
args
)
settings
(
batch_size
=
batch_size
,
...
...
@@ -146,7 +157,6 @@ def inception(name, input, channels, \
return
cat
lab
=
data_layer
(
name
=
"label"
,
size
=
1000
)
data
=
data_layer
(
name
=
"input"
,
size
=
3
*
height
*
width
)
# stage 1
...
...
@@ -224,6 +234,10 @@ pool5 = img_pool_layer(
dropout
=
dropout_layer
(
name
=
"dropout"
,
input
=
pool5
,
dropout_rate
=
0.4
)
out3
=
fc_layer
(
name
=
"output3"
,
input
=
dropout
,
size
=
1000
,
act
=
SoftmaxActivation
())
loss3
=
cross_entropy
(
name
=
'loss3'
,
input
=
out3
,
label
=
lab
)
outputs
(
loss3
)
if
is_infer
:
outputs
(
out3
)
else
:
lab
=
data_layer
(
name
=
"label"
,
size
=
num_class
)
loss3
=
cross_entropy
(
name
=
'loss3'
,
input
=
out3
,
label
=
lab
)
outputs
(
loss3
)
benchmark/paddle/image/provider.py
浏览文件 @
000c1f7c
...
...
@@ -13,14 +13,20 @@ def initHook(settings, height, width, color, num_class, **kwargs):
settings
.
data_size
=
settings
.
height
*
settings
.
width
*
3
else
:
settings
.
data_size
=
settings
.
height
*
settings
.
width
settings
.
slots
=
[
dense_vector
(
settings
.
data_size
),
integer_value
(
1
)]
settings
.
is_infer
=
kwargs
.
get
(
'is_infer'
,
False
)
if
settings
.
is_infer
:
settings
.
slots
=
[
dense_vector
(
settings
.
data_size
)]
else
:
settings
.
slots
=
[
dense_vector
(
settings
.
data_size
),
integer_value
(
1
)]
@
provider
(
init_hook
=
initHook
,
min_pool_size
=-
1
,
cache
=
CacheType
.
CACHE_PASS_IN_MEM
)
def
process
(
settings
,
file_list
):
for
i
in
xrange
(
1024
):
for
i
in
xrange
(
2560
if
settings
.
is_infer
else
1024
):
img
=
np
.
random
.
rand
(
1
,
settings
.
data_size
).
reshape
(
-
1
,
1
).
flatten
()
lab
=
random
.
randint
(
0
,
settings
.
num_class
-
1
)
yield
img
.
astype
(
'float32'
),
int
(
lab
)
if
settings
.
is_infer
:
yield
img
.
astype
(
'float32'
)
else
:
lab
=
random
.
randint
(
0
,
settings
.
num_class
-
1
)
yield
img
.
astype
(
'float32'
),
int
(
lab
)
benchmark/paddle/image/resnet.py
浏览文件 @
000c1f7c
...
...
@@ -6,11 +6,21 @@ width = 224
num_class
=
1000
batch_size
=
get_config_arg
(
'batch_size'
,
int
,
64
)
layer_num
=
get_config_arg
(
"layer_num"
,
int
,
50
)
is_test
=
get_config_arg
(
"is_test"
,
bool
,
False
)
args
=
{
'height'
:
height
,
'width'
:
width
,
'color'
:
True
,
'num_class'
:
num_class
}
is_infer
=
get_config_arg
(
"is_infer"
,
bool
,
False
)
args
=
{
'height'
:
height
,
'width'
:
width
,
'color'
:
True
,
'num_class'
:
num_class
,
'is_infer'
:
is_infer
}
define_py_data_sources2
(
"train.list"
,
None
,
module
=
"provider"
,
obj
=
"process"
,
args
=
args
)
"train.list"
if
not
is_infer
else
None
,
"test.list"
if
is_infer
else
None
,
module
=
"provider"
,
obj
=
"process"
,
args
=
args
)
settings
(
batch_size
=
batch_size
,
...
...
@@ -45,7 +55,10 @@ def conv_bn_layer(name,
act
=
LinearActivation
(),
bias_attr
=
False
)
return
batch_norm_layer
(
name
=
name
+
"_bn"
,
input
=
tmp
,
act
=
active_type
,
use_global_stats
=
is_test
)
name
=
name
+
"_bn"
,
input
=
tmp
,
act
=
active_type
,
use_global_stats
=
is_infer
)
def
bottleneck_block
(
name
,
input
,
num_filters1
,
num_filters2
):
...
...
@@ -207,7 +220,9 @@ elif layer_num == 152:
else
:
print
(
"Wrong layer number."
)
lbl
=
data_layer
(
name
=
"label"
,
size
=
num_class
)
loss
=
cross_entropy
(
name
=
'loss'
,
input
=
resnet
,
label
=
lbl
)
inputs
(
img
,
lbl
)
outputs
(
loss
)
if
is_infer
:
outputs
(
resnet
)
else
:
lbl
=
data_layer
(
name
=
"label"
,
size
=
num_class
)
loss
=
cross_entropy
(
name
=
'loss'
,
input
=
resnet
,
label
=
lbl
)
outputs
(
loss
)
benchmark/paddle/image/run_mkldnn_infer.sh
0 → 100755
浏览文件 @
000c1f7c
set
-e
function
clock_to_seconds
()
{
hours
=
`
echo
$1
|
awk
-F
':'
'{print $1}'
`
mins
=
`
echo
$1
|
awk
-F
':'
'{print $2}'
`
secs
=
`
echo
$1
|
awk
-F
':'
'{print $3}'
`
echo
`
bc
-l
<<<
"
$secs
+
$mins
* 60 +
$hours
* 3600"
`
}
function
infer
()
{
unset
OMP_NUM_THREADS MKL_NUM_THREADS OMP_DYNAMIC KMP_AFFINITY
topology
=
$1
layer_num
=
$2
bs
=
$3
use_mkldnn
=
$4
if
[
$4
==
"True"
]
;
then
thread
=
1
log
=
"logs/infer-
${
topology
}
-
${
layer_num
}
-mkldnn-
${
bs
}
.log"
elif
[
$4
==
"False"
]
;
then
thread
=
`
nproc
`
if
[
$thread
-gt
$bs
]
;
then
thread
=
$bs
fi
log
=
"logs/infer-
${
topology
}
-
${
layer_num
}
-
${
thread
}
mklml-
${
bs
}
.log"
else
echo
"Wrong input
$4
, use True or False."
exit
0
fi
models_in
=
"models/
${
topology
}
-
${
layer_num
}
/pass-00000/"
if
[
!
-d
$models_in
]
;
then
echo
"Training model
${
topology
}
_
${
layer_num
}
"
paddle train
--job
=
train
\
--config
=
"
${
topology
}
.py"
\
--use_mkldnn
=
True
\
--use_gpu
=
False
\
--trainer_count
=
1
\
--num_passes
=
1
\
--save_dir
=
"models/
${
topology
}
-
${
layer_num
}
"
\
--config_args
=
"batch_size=128,layer_num=
${
layer_num
}
"
\
>
/dev/null 2>&1
echo
"Done"
fi
log_period
=
$((
256
/
bs
))
paddle train
--job
=
test
\
--config
=
"
${
topology
}
.py"
\
--use_mkldnn
=
$use_mkldnn
\
--use_gpu
=
False
\
--trainer_count
=
$thread
\
--log_period
=
$log_period
\
--config_args
=
"batch_size=
${
bs
}
,layer_num=
${
layer_num
}
,is_infer=True"
\
--init_model_path
=
$models_in
\
2>&1 |
tee
${
log
}
# calculate the last 5 logs period time of 1280 samples,
# the time before are burning time.
start
=
`
tail
${
log
}
-n
7 |
head
-n
1 |
awk
-F
' '
'{print $2}'
| xargs
`
end
=
`
tail
${
log
}
-n
2 |
head
-n
1 |
awk
-F
' '
'{print $2}'
| xargs
`
start_sec
=
`
clock_to_seconds
$start
`
end_sec
=
`
clock_to_seconds
$end
`
fps
=
`
bc
<<<
"scale = 2; 1280 / (
$end_sec
-
$start_sec
)"
`
echo
"Last 1280 samples start:
${
start
}
(
${
start_sec
}
sec), end:
${
end
}
(
${
end_sec
}
sec;"
>>
${
log
}
echo
"FPS:
$fps
images/sec"
>>
${
log
}
}
if
[
!
-f
"train.list"
]
;
then
echo
" "
>
train.list
fi
if
[
!
-f
"test.list"
]
;
then
echo
" "
>
test.list
fi
if
[
!
-d
"logs"
]
;
then
mkdir
logs
fi
if
[
!
-d
"models"
]
;
then
mkdir
-p
models
fi
# inference benchmark
for
use_mkldnn
in
True False
;
do
for
batchsize
in
1 2 4 8 16
;
do
infer googlenet v1
$batchsize
$use_mkldnn
infer resnet 50
$batchsize
$use_mkldnn
infer vgg 19
$batchsize
$use_mkldnn
done
done
benchmark/paddle/image/run_mkldnn.sh
→
benchmark/paddle/image/run_mkldnn
_train
.sh
浏览文件 @
000c1f7c
...
...
@@ -8,13 +8,13 @@ function train() {
use_mkldnn
=
$4
if
[
$4
==
"True"
]
;
then
thread
=
1
log
=
"logs/
${
topology
}
-
${
layer_num
}
-mkldnn-
${
bs
}
.log"
log
=
"logs/
train-
${
topology
}
-
${
layer_num
}
-mkldnn-
${
bs
}
.log"
elif
[
$4
==
"False"
]
;
then
thread
=
`
nproc
`
# each trainer_count use only 1 core to avoid conflict
log
=
"logs/
${
topology
}
-
${
layer_num
}
-
${
thread
}
mklml-
${
bs
}
.log"
log
=
"logs/
train-
${
topology
}
-
${
layer_num
}
-
${
thread
}
mklml-
${
bs
}
.log"
else
echo
"Wrong input
$
3
, use True or False."
echo
"Wrong input
$
4
, use True or False."
exit
0
fi
args
=
"batch_size=
${
bs
}
,layer_num=
${
layer_num
}
"
...
...
@@ -30,13 +30,14 @@ function train() {
2>&1 |
tee
${
log
}
}
if
[
!
-
d
"train.list"
]
;
then
if
[
!
-
f
"train.list"
]
;
then
echo
" "
>
train.list
fi
if
[
!
-d
"logs"
]
;
then
mkdir
logs
fi
# training benchmark
for
use_mkldnn
in
True False
;
do
for
batchsize
in
64 128 256
;
do
train vgg 19
$batchsize
$use_mkldnn
...
...
benchmark/paddle/image/vgg.py
浏览文件 @
000c1f7c
...
...
@@ -6,10 +6,21 @@ width = 224
num_class
=
1000
batch_size
=
get_config_arg
(
'batch_size'
,
int
,
64
)
layer_num
=
get_config_arg
(
'layer_num'
,
int
,
19
)
is_infer
=
get_config_arg
(
"is_infer"
,
bool
,
False
)
args
=
{
'height'
:
height
,
'width'
:
width
,
'color'
:
True
,
'num_class'
:
num_class
}
args
=
{
'height'
:
height
,
'width'
:
width
,
'color'
:
True
,
'num_class'
:
num_class
,
'is_infer'
:
is_infer
}
define_py_data_sources2
(
"train.list"
,
None
,
module
=
"provider"
,
obj
=
"process"
,
args
=
args
)
"train.list"
if
not
is_infer
else
None
,
"test.list"
if
is_infer
else
None
,
module
=
"provider"
,
obj
=
"process"
,
args
=
args
)
settings
(
batch_size
=
batch_size
,
...
...
@@ -98,6 +109,9 @@ elif layer_num == 19:
else
:
print
(
"Wrong layer number."
)
lab
=
data_layer
(
'label'
,
num_class
)
loss
=
cross_entropy
(
input
=
vgg
,
label
=
lab
)
outputs
(
loss
)
if
is_infer
:
outputs
(
vgg
)
else
:
lab
=
data_layer
(
'label'
,
num_class
)
loss
=
cross_entropy
(
input
=
vgg
,
label
=
lab
)
outputs
(
loss
)
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录