Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
s920243400
PaddleDetection
提交
54bd17fe
P
PaddleDetection
项目概览
s920243400
/
PaddleDetection
与 Fork 源项目一致
Fork自
PaddlePaddle / PaddleDetection
通知
2
Star
0
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
0
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
PaddleDetection
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
0
Issue
0
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
提交
54bd17fe
编写于
3月 26, 2018
作者:
Y
Yu Yang
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
Complete Flowers
上级
50e7e25d
变更
4
隐藏空白更改
内联
并排
Showing
4 changed file
with
144 addition
and
4 deletion
+144
-4
paddle/fluid/framework/details/op_handle_base.cc
paddle/fluid/framework/details/op_handle_base.cc
+7
-1
paddle/fluid/framework/details/ssa_graph_builder.cc
paddle/fluid/framework/details/ssa_graph_builder.cc
+1
-1
python/paddle/fluid/tests/unittests/.gitignore
python/paddle/fluid/tests/unittests/.gitignore
+1
-0
python/paddle/fluid/tests/unittests/test_parallel_executor.py
...on/paddle/fluid/tests/unittests/test_parallel_executor.py
+135
-2
未找到文件。
paddle/fluid/framework/details/op_handle_base.cc
浏览文件 @
54bd17fe
...
...
@@ -31,7 +31,13 @@ std::string OpHandleBase::DebugString() const {
return
ss
.
str
();
}
OpHandleBase
::~
OpHandleBase
()
{}
OpHandleBase
::~
OpHandleBase
()
{
#ifdef PADDLE_WITH_CUDA
for
(
auto
&
ev
:
events_
)
{
cudaEventDestroy
(
ev
.
second
);
}
#endif
}
void
OpHandleBase
::
Run
(
bool
use_event
)
{
#ifdef PADDLE_WITH_CUDA
...
...
paddle/fluid/framework/details/ssa_graph_builder.cc
浏览文件 @
54bd17fe
...
...
@@ -21,7 +21,7 @@ void SSAGraphBuilder::PolishGraphToSupportDataHazards(SSAGraph *graph) {
for
(
auto
&
var_map
:
graph
->
vars_
)
{
for
(
auto
&
name_pair
:
var_map
)
{
if
(
name_pair
.
second
.
size
()
<=
1
)
{
return
;
continue
;
}
auto
it_new
=
name_pair
.
second
.
rbegin
();
auto
it_old
=
name_pair
.
second
.
rbegin
();
...
...
python/paddle/fluid/tests/unittests/.gitignore
浏览文件 @
54bd17fe
...
...
@@ -2,3 +2,4 @@ mnist.recordio
mnist_0.recordio
mnist_1.recordio
mnist_2.recordio
flowers.recordio
python/paddle/fluid/tests/unittests/test_parallel_executor.py
浏览文件 @
54bd17fe
...
...
@@ -16,6 +16,7 @@ import unittest
import
paddle.fluid
as
fluid
import
paddle.v2
as
paddle
import
paddle.v2.dataset.mnist
as
mnist
import
paddle.v2.dataset.flowers
as
flowers
import
numpy
...
...
@@ -64,6 +65,119 @@ def fc_with_batchnorm():
return
loss
def
squeeze_excitation
(
input
,
num_channels
,
reduction_ratio
):
# pool = fluid.layers.pool2d(
# input=input, pool_size=0, pool_type='avg', global_pooling=True)
conv
=
input
shape
=
conv
.
shape
reshape
=
fluid
.
layers
.
reshape
(
x
=
conv
,
shape
=
[
-
1
,
shape
[
1
],
shape
[
2
]
*
shape
[
3
]])
pool
=
fluid
.
layers
.
reduce_mean
(
input
=
reshape
,
dim
=
2
)
squeeze
=
fluid
.
layers
.
fc
(
input
=
pool
,
size
=
num_channels
/
reduction_ratio
,
act
=
'relu'
)
excitation
=
fluid
.
layers
.
fc
(
input
=
squeeze
,
size
=
num_channels
,
act
=
'sigmoid'
)
scale
=
fluid
.
layers
.
elementwise_mul
(
x
=
input
,
y
=
excitation
,
axis
=
0
)
return
scale
def
conv_bn_layer
(
input
,
num_filters
,
filter_size
,
stride
=
1
,
groups
=
1
,
act
=
None
):
conv
=
fluid
.
layers
.
conv2d
(
input
=
input
,
num_filters
=
num_filters
,
filter_size
=
filter_size
,
stride
=
stride
,
padding
=
(
filter_size
-
1
)
/
2
,
groups
=
groups
,
act
=
None
,
bias_attr
=
False
)
return
fluid
.
layers
.
batch_norm
(
input
=
conv
,
act
=
act
,
momentum
=
0.1
)
def
shortcut
(
input
,
ch_out
,
stride
):
ch_in
=
input
.
shape
[
1
]
if
ch_in
!=
ch_out
:
if
stride
==
1
:
filter_size
=
1
else
:
filter_size
=
3
return
conv_bn_layer
(
input
,
ch_out
,
filter_size
,
stride
)
else
:
return
input
def
bottleneck_block
(
input
,
num_filters
,
stride
,
cardinality
,
reduction_ratio
):
# The number of first 1x1 convolutional channels for each bottleneck build block
# was halved to reduce the compution cost.
conv0
=
conv_bn_layer
(
input
=
input
,
num_filters
=
num_filters
,
filter_size
=
1
,
act
=
'relu'
)
conv1
=
conv_bn_layer
(
input
=
conv0
,
num_filters
=
num_filters
*
2
,
filter_size
=
3
,
stride
=
stride
,
groups
=
cardinality
,
act
=
'relu'
)
conv2
=
conv_bn_layer
(
input
=
conv1
,
num_filters
=
num_filters
*
2
,
filter_size
=
1
,
act
=
None
)
scale
=
squeeze_excitation
(
input
=
conv2
,
num_channels
=
num_filters
*
2
,
reduction_ratio
=
reduction_ratio
)
short
=
shortcut
(
input
,
num_filters
*
2
,
stride
)
return
fluid
.
layers
.
elementwise_add
(
x
=
short
,
y
=
scale
,
act
=
'relu'
)
def
SE_ResNeXt152
():
reader
=
fluid
.
layers
.
open_recordio_file
(
filename
=
'./flowers.recordio'
,
shapes
=
[[
-
1
,
3
,
224
,
224
],
[
-
1
,
1
]],
lod_levels
=
[
0
,
0
],
dtypes
=
[
'float32'
,
'int64'
])
img
,
label
=
fluid
.
layers
.
read_file
(
reader
)
conv
=
conv_bn_layer
(
input
=
img
,
num_filters
=
64
,
filter_size
=
3
,
stride
=
2
,
act
=
'relu'
)
conv
=
conv_bn_layer
(
input
=
conv
,
num_filters
=
64
,
filter_size
=
3
,
stride
=
1
,
act
=
'relu'
)
conv
=
conv_bn_layer
(
input
=
conv
,
num_filters
=
128
,
filter_size
=
3
,
stride
=
1
,
act
=
'relu'
)
conv
=
fluid
.
layers
.
pool2d
(
input
=
conv
,
pool_size
=
3
,
pool_stride
=
2
,
pool_padding
=
1
,
pool_type
=
'max'
)
cardinality
=
64
reduction_ratio
=
16
depth
=
[
3
,
8
,
36
,
3
]
num_filters
=
[
128
,
256
,
512
,
1024
]
for
block
in
range
(
len
(
depth
)):
for
i
in
range
(
depth
[
block
]):
conv
=
bottleneck_block
(
input
=
conv
,
num_filters
=
num_filters
[
block
],
stride
=
2
if
i
==
0
and
block
!=
0
else
1
,
cardinality
=
cardinality
,
reduction_ratio
=
reduction_ratio
)
shape
=
conv
.
shape
reshape
=
fluid
.
layers
.
reshape
(
x
=
conv
,
shape
=
[
-
1
,
shape
[
1
],
shape
[
2
]
*
shape
[
3
]])
pool
=
fluid
.
layers
.
reduce_mean
(
input
=
reshape
,
dim
=
2
)
dropout
=
fluid
.
layers
.
dropout
(
x
=
pool
,
dropout_prob
=
0.2
)
# Classifier layer:
prediction
=
fluid
.
layers
.
fc
(
input
=
dropout
,
size
=
1000
,
act
=
'softmax'
)
loss
=
fluid
.
layers
.
cross_entropy
(
input
=
prediction
,
label
=
label
)
loss
=
fluid
.
layers
.
mean
(
loss
)
return
loss
class
ParallelExecutor
(
unittest
.
TestCase
):
@
classmethod
def
setUpClass
(
cls
):
...
...
@@ -81,24 +195,40 @@ class ParallelExecutor(unittest.TestCase):
fluid
.
recordio_writer
.
convert_reader_to_recordio_file
(
'./mnist.recordio'
,
reader
,
feeder
)
with
fluid
.
program_guard
(
fluid
.
Program
(),
fluid
.
Program
()):
reader
=
paddle
.
batch
(
flowers
.
train
(),
batch_size
=
4
)
feeder
=
fluid
.
DataFeeder
(
feed_list
=
[
fluid
.
layers
.
data
(
name
=
'image'
,
shape
=
[
3
,
224
,
224
]),
fluid
.
layers
.
data
(
name
=
'label'
,
shape
=
[
1
],
dtype
=
'int64'
),
],
place
=
fluid
.
CPUPlace
())
fluid
.
recordio_writer
.
convert_reader_to_recordio_file
(
"./flowers.recordio"
,
reader
,
feeder
)
def
test_simple_fc
(
self
):
self
.
check_network_convergence
(
simple_fc_net
)
def
test_batchnorm_fc
(
self
):
self
.
check_network_convergence
(
fc_with_batchnorm
)
def
check_network_convergence
(
self
,
method
):
def
check_network_convergence
(
self
,
method
,
memory_opt
=
True
,
iter
=
10
):
main
=
fluid
.
Program
()
startup
=
fluid
.
Program
()
with
fluid
.
program_guard
(
main
,
startup
):
loss
=
method
()
adam
=
fluid
.
optimizer
.
Adam
()
adam
.
minimize
(
loss
)
if
memory_opt
:
fluid
.
memory_optimize
(
main
)
exe
=
fluid
.
ParallelExecutor
(
loss_name
=
loss
.
name
,
use_cuda
=
True
)
first_loss
,
=
exe
.
run
([
loss
.
name
])
first_loss
=
numpy
.
array
(
first_loss
)
for
i
in
xrange
(
10
):
for
i
in
xrange
(
iter
):
exe
.
run
([])
last_loss
,
=
exe
.
run
([
loss
.
name
])
...
...
@@ -106,3 +236,6 @@ class ParallelExecutor(unittest.TestCase):
print
first_loss
,
last_loss
self
.
assertGreater
(
first_loss
[
0
],
last_loss
[
0
])
def
test_resnet
(
self
):
self
.
check_network_convergence
(
SE_ResNeXt152
,
iter
=
20
)
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录