Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
BaiXuePrincess
Paddle
提交
dd6b59da
P
Paddle
项目概览
BaiXuePrincess
/
Paddle
与 Fork 源项目一致
Fork自
PaddlePaddle / Paddle
通知
1
Star
1
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
0
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
Paddle
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
0
Issue
0
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
提交
dd6b59da
编写于
2月 08, 2018
作者:
C
chengduoZH
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
add Python interface of prior_boxes
上级
ae0740ce
变更
1
显示空白变更内容
内联
并排
Showing
1 changed file
with
151 addition
and
1 deletion
+151
-1
python/paddle/v2/fluid/layers/nn.py
python/paddle/v2/fluid/layers/nn.py
+151
-1
未找到文件。
python/paddle/v2/fluid/layers/nn.py
浏览文件 @
dd6b59da
...
@@ -14,7 +14,7 @@
...
@@ -14,7 +14,7 @@
"""
"""
All layers just related to the neural network.
All layers just related to the neural network.
"""
"""
import
math
from
..layer_helper
import
LayerHelper
from
..layer_helper
import
LayerHelper
from
..initializer
import
Normal
,
Constant
from
..initializer
import
Normal
,
Constant
from
..framework
import
Variable
from
..framework
import
Variable
...
@@ -65,6 +65,7 @@ __all__ = [
...
@@ -65,6 +65,7 @@ __all__ = [
'beam_search'
,
'beam_search'
,
'row_conv'
,
'row_conv'
,
'multiplex'
,
'multiplex'
,
'prior_boxes'
,
]
]
...
@@ -2993,3 +2994,152 @@ def multiplex(inputs, index):
...
@@ -2993,3 +2994,152 @@ def multiplex(inputs, index):
'Ids'
:
index
},
'Ids'
:
index
},
outputs
=
{
'Out'
:
[
out
]})
outputs
=
{
'Out'
:
[
out
]})
return
out
return
out
def
prior_box
(
input
,
image
,
min_sizes
,
max_sizes
,
aspect_ratios
,
variance
,
flip
,
clip
,
step_w
,
step_h
,
offset
,
name
=
None
):
"""
**Prior_box**
"""
helper
=
LayerHelper
(
"prior_box"
,
**
locals
())
dtype
=
helper
.
input_dtype
()
box
=
helper
.
create_tmp_variable
(
dtype
)
var
=
helper
.
create_tmp_variable
(
dtype
)
helper
.
append_op
(
type
=
"prior_box"
,
inputs
=
{
"Input"
:
input
,
"Image"
:
image
},
outputs
=
{
"Boxes"
:
box
,
"Variances"
:
var
},
attrs
=
{
'min_sizes'
:
min_sizes
,
'max_sizes'
:
max_sizes
,
'aspect_ratios'
:
aspect_ratios
,
'variances'
:
variance
,
'flip'
:
flip
,
'clip'
:
clip
,
'step_w'
:
step_w
,
'step_h'
:
step_h
,
'offset'
:
offset
})
return
box
,
var
def
prior_boxes
(
input_layers
,
image
,
min_ratio
,
max_ratio
,
steps
,
aspect_ratios
,
min_dim
,
step_w
=
None
,
step_h
=
None
,
offset
=
0.5
,
variance
=
[
0.1
],
flip
=
True
,
clip
=
True
,
name
=
None
):
"""
**Prior_boxes**
e.g.
prior_boxes(
input_layers = [conv1, conv2, conv3, conv4, conv5, conv6],
image = data,
min_ratio = 0.2,
max_ratio = 0.9,
steps = [8, 16, 32, 64, 100, 300],
aspect_ratios = [[2], [2, 3], [2, 3], [2, 3], [2], [2]],
min_dim = 300,
offset = 0.5,
variance = [0.1],
flip=True,
clip=True)
"""
assert
isinstance
(
input_layers
,
list
),
'input_layer should be a list.'
assert
not
step_h
and
not
steps
,
''
assert
not
step_w
and
not
steps
,
''
num_layer
=
len
(
input_layers
)
assert
num_layer
>
2
# TODO(zcd): currently, num_layer must be bigger than two.
min_sizes
=
[]
max_sizes
=
[]
if
num_layer
>
2
:
step
=
int
(
math
.
floor
((
max_ratio
-
min_ratio
)
/
(
num_layer
-
2
)))
for
ratio
in
xrange
(
min_ratio
,
max_ratio
+
1
,
step
):
min_sizes
.
append
(
min_dim
*
ratio
)
max_sizes
.
append
(
min_dim
*
(
ratio
+
step
))
min_sizes
=
[
min_dim
*
.
10
]
+
min_sizes
max_sizes
=
[
min_dim
*
.
20
]
+
max_sizes
if
step_h
:
assert
isinstance
(
step_h
,
list
)
and
len
(
step_h
)
==
num_layer
,
\
'step_h should be list and input_layers and step_h should have same length'
if
step_w
:
assert
isinstance
(
step_w
,
list
)
and
len
(
step_w
)
==
num_layer
,
\
'step_w should be list and input_layers and step_w should have same length'
if
steps
:
assert
isinstance
(
steps
,
list
)
and
len
(
step_w
)
==
num_layer
,
\
'steps should be list and input_layers and step_w should have same length'
step_w
=
steps
step_h
=
steps
if
aspect_ratios
:
assert
isinstance
(
aspect_ratios
,
list
)
and
len
(
aspect_ratios
)
==
num_layer
,
\
'aspect_ratios should be list and input_layers and aspect_ratios should '
\
'have same length'
helper
=
LayerHelper
(
"prior_box"
,
**
locals
())
dtype
=
helper
.
input_dtype
()
box_results
=
[]
var_results
=
[]
for
i
,
input
in
enumerate
(
input_layers
):
min_size
=
min_sizes
[
i
]
max_size
=
max_sizes
[
i
]
if
isinstance
(
min_size
,
list
):
min_size
=
[
min_size
]
if
isinstance
(
max_size
,
list
):
max_size
=
[
max_size
]
if
aspect_ratios
:
aspect_ratio
=
aspect_ratios
[
i
]
if
isinstance
(
aspect_ratio
,
list
):
aspect_ratio
=
[
aspect_ratio
]
box
,
var
=
prior_box
(
input
,
image
,
min_size
,
max_size
,
aspect_ratios
,
variance
,
flip
,
clip
,
step_w
[
i
],
step_h
[
i
],
offset
)
box_results
.
append
(
box
)
var_results
.
append
(
var
)
if
len
(
box_results
)
==
1
:
box
=
box_results
[
0
]
var
=
var_results
[
0
]
else
:
axis
=
1
box
=
helper
.
create_tmp_variable
(
dtype
)
helper
.
append_op
(
type
=
"concat"
,
inputs
=
{
"X"
:
box_results
},
outputs
=
{
"Out"
:
box
},
attrs
=
{
'axis'
:
axis
})
var
=
helper
.
create_tmp_variable
(
dtype
)
helper
.
append_op
(
type
=
"concat"
,
inputs
=
{
"X"
:
var_results
},
outputs
=
{
"Out"
:
var
},
attrs
=
{
'axis'
:
axis
})
return
box
,
var
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录