Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
机器未来
Paddle
提交
707a9c9b
P
Paddle
项目概览
机器未来
/
Paddle
与 Fork 源项目一致
Fork自
PaddlePaddle / Paddle
通知
1
Star
1
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
1
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
Paddle
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
1
Issue
1
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
提交
707a9c9b
编写于
12月 15, 2016
作者:
G
gaoyuan
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
Fix variable name and add the annotation
上级
c0076084
变更
3
显示空白变更内容
内联
并排
Showing
3 changed file
with
63 addition
and
79 deletion
+63
-79
paddle/gserver/layers/PriorBox.cpp
paddle/gserver/layers/PriorBox.cpp
+58
-72
python/paddle/trainer/config_parser.py
python/paddle/trainer/config_parser.py
+0
-2
python/paddle/trainer_config_helpers/layers.py
python/paddle/trainer_config_helpers/layers.py
+5
-5
未找到文件。
paddle/gserver/layers/PriorBox.cpp
浏览文件 @
707a9c9b
...
@@ -17,6 +17,15 @@ limitations under the License. */
...
@@ -17,6 +17,15 @@ limitations under the License. */
#include "paddle/math/BaseMatrix.h"
#include "paddle/math/BaseMatrix.h"
namespace
paddle
{
namespace
paddle
{
/**
* @brief A layer for generate prior box locations and variances.
* - Input: Two and only two input layer are accepted. The input layer must be
* be a data output layer and a convolution output layer.
* - Output: The prior box locations and variances of the input data.
* Reference:
* Wei Liu, Dragomir Anguelov, Dumitru Erhan, Christian Szegedy, Scott Reed,
* Cheng-Yang Fu, Alexander C. Berg. SSD: Single Shot MultiBox Detector
*/
class
PriorBoxLayer
:
public
Layer
{
class
PriorBoxLayer
:
public
Layer
{
public:
public:
...
@@ -24,106 +33,84 @@ public:
...
@@ -24,106 +33,84 @@ public:
bool
init
(
const
LayerMap
&
layerMap
,
const
ParameterMap
&
parameterMap
);
bool
init
(
const
LayerMap
&
layerMap
,
const
ParameterMap
&
parameterMap
);
void
forward
(
PassType
passType
);
void
forward
(
PassType
passType
);
void
backward
(
const
UpdateCallback
&
callback
)
{}
void
backward
(
const
UpdateCallback
&
callback
)
{}
void
forwardImp
(
const
Argument
&
featureMap
,
const
Argument
&
imageShape
);
int
numPriors_
;
int
numPriors_
;
std
::
vector
<
int
>
minSize_
;
std
::
vector
<
int
>
minSize_
;
std
::
vector
<
int
>
maxSize_
;
std
::
vector
<
int
>
maxSize_
;
std
::
vector
<
float
>
aspectRatio_
;
std
::
vector
<
float
>
aspectRatio_
;
std
::
vector
<
float
>
variance_
;
std
::
vector
<
float
>
variance_
;
std
::
vector
<
Argument
>
tmpCpuInput_
;
MatrixPtr
buffer_
;
MatrixPtr
buffer_
;
};
};
bool
PriorBoxLayer
::
init
(
const
LayerMap
&
layerMap
,
bool
PriorBoxLayer
::
init
(
const
LayerMap
&
layerMap
,
const
ParameterMap
&
parameterMap
)
{
const
ParameterMap
&
parameterMap
)
{
Layer
::
init
(
layerMap
,
parameterMap
);
Layer
::
init
(
layerMap
,
parameterMap
);
auto
pb
_c
onf
=
config_
.
inputs
(
0
).
priorbox_conf
();
auto
pb
C
onf
=
config_
.
inputs
(
0
).
priorbox_conf
();
std
::
copy
(
pb
_c
onf
.
min_size
().
begin
(),
std
::
copy
(
pb
C
onf
.
min_size
().
begin
(),
pb
_c
onf
.
min_size
().
end
(),
pb
C
onf
.
min_size
().
end
(),
std
::
back_inserter
(
minSize_
));
std
::
back_inserter
(
minSize_
));
std
::
copy
(
pb
_c
onf
.
max_size
().
begin
(),
std
::
copy
(
pb
C
onf
.
max_size
().
begin
(),
pb
_c
onf
.
max_size
().
end
(),
pb
C
onf
.
max_size
().
end
(),
std
::
back_inserter
(
maxSize_
));
std
::
back_inserter
(
maxSize_
));
std
::
copy
(
pb
_c
onf
.
aspect_ratio
().
begin
(),
std
::
copy
(
pb
C
onf
.
aspect_ratio
().
begin
(),
pb
_c
onf
.
aspect_ratio
().
end
(),
pb
C
onf
.
aspect_ratio
().
end
(),
std
::
back_inserter
(
aspectRatio_
));
std
::
back_inserter
(
aspectRatio_
));
std
::
copy
(
pb
_c
onf
.
variance
().
begin
(),
std
::
copy
(
pb
C
onf
.
variance
().
begin
(),
pb
_c
onf
.
variance
().
end
(),
pb
C
onf
.
variance
().
end
(),
std
::
back_inserter
(
variance_
));
std
::
back_inserter
(
variance_
));
// flip
// flip
int
input
_ratio_l
ength
=
aspectRatio_
.
size
();
int
input
RatioL
ength
=
aspectRatio_
.
size
();
for
(
int
index
=
0
;
index
<
input
_ratio_l
ength
;
index
++
)
for
(
int
index
=
0
;
index
<
input
RatioL
ength
;
index
++
)
aspectRatio_
.
push_back
(
1
/
aspectRatio_
[
index
]);
aspectRatio_
.
push_back
(
1
/
aspectRatio_
[
index
]);
aspectRatio_
.
push_back
(
1.
);
aspectRatio_
.
push_back
(
1.
);
numPriors_
=
aspectRatio_
.
size
();
numPriors_
=
aspectRatio_
.
size
();
if
(
maxSize_
.
size
()
>
0
)
numPriors_
++
;
if
(
maxSize_
.
size
()
>
0
)
numPriors_
++
;
buffer_
=
Matrix
::
create
(
1
,
1
,
false
,
false
);
if
(
useGpu_
)
{
tmpCpuInput_
.
reserve
(
inputLayers_
.
size
());
for
(
size_t
i
=
0
;
i
<
inputLayers_
.
size
();
i
++
)
{
tmpCpuInput_
.
push_back
(
Argument
());
}
}
return
true
;
return
true
;
}
}
void
PriorBoxLayer
::
forward
(
PassType
passType
)
{
void
PriorBoxLayer
::
forward
(
PassType
passType
)
{
Layer
::
forward
(
passType
);
Layer
::
forward
(
passType
);
if
(
useGpu_
)
{
auto
input
=
getInput
(
0
);
for
(
size_t
i
=
0
;
i
<
inputLayers_
.
size
();
i
++
)
{
int
layerWidth
=
input
.
getFrameWidth
();
tmpCpuInput_
[
i
].
resizeAndCopyFrom
(
int
layerHeight
=
input
.
getFrameHeight
();
getInput
(
i
),
false
,
HPPL_STREAM_DEFAULT
);
hl_stream_synchronize
(
HPPL_STREAM_DEFAULT
);
forwardImp
(
tmpCpuInput_
[
0
],
tmpCpuInput_
[
1
]);
}
}
else
{
forwardImp
(
getInput
(
0
),
getInput
(
1
));
}
}
void
PriorBoxLayer
::
forwardImp
(
const
Argument
&
featureMap
,
const
Argument
&
imageShape
)
{
int
layer_width
=
featureMap
.
getFrameWidth
();
int
layer_height
=
featureMap
.
getFrameHeight
();
MatrixPtr
inV1
=
imageShape
.
value
;
auto
image
=
getInput
(
1
)
;
int
image
_width
=
inV1
->
getElement
(
0
,
0
);
int
image
Width
=
image
.
getFrameWidth
(
);
int
image
_height
=
inV1
->
getElement
(
0
,
1
);
int
image
Height
=
image
.
getFrameHeight
(
);
float
step
_w
=
static_cast
<
float
>
(
image_width
)
/
layer_w
idth
;
float
step
W
=
static_cast
<
float
>
(
imageWidth
)
/
layerW
idth
;
float
step
_h
=
static_cast
<
float
>
(
image_height
)
/
layer_h
eight
;
float
step
H
=
static_cast
<
float
>
(
imageHeight
)
/
layerH
eight
;
int
dim
=
layer
_height
*
layer_w
idth
*
numPriors_
*
4
;
int
dim
=
layer
Height
*
layerW
idth
*
numPriors_
*
4
;
reserveOutput
(
1
,
dim
*
2
);
reserveOutput
(
1
,
dim
*
2
);
// use a cpu buffer to compute
// use a cpu buffer to compute
Matrix
::
resizeOrCreate
(
buffer_
,
1
,
dim
*
2
,
false
,
false
);
Matrix
::
resizeOrCreate
(
buffer_
,
1
,
dim
*
2
,
false
,
false
);
auto
*
tmp
_p
tr
=
buffer_
->
getData
();
auto
*
tmp
P
tr
=
buffer_
->
getData
();
int
idx
=
0
;
int
idx
=
0
;
for
(
int
h
=
0
;
h
<
layer
_h
eight
;
++
h
)
{
for
(
int
h
=
0
;
h
<
layer
H
eight
;
++
h
)
{
for
(
int
w
=
0
;
w
<
layer
_w
idth
;
++
w
)
{
for
(
int
w
=
0
;
w
<
layer
W
idth
;
++
w
)
{
float
center
_x
=
(
w
+
0.5
)
*
step_w
;
float
center
X
=
(
w
+
0.5
)
*
stepW
;
float
center
_y
=
(
h
+
0.5
)
*
step_h
;
float
center
Y
=
(
h
+
0.5
)
*
stepH
;
int
min
_s
ize
=
0
;
int
min
S
ize
=
0
;
for
(
size_t
s
=
0
;
s
<
minSize_
.
size
();
s
++
)
{
for
(
size_t
s
=
0
;
s
<
minSize_
.
size
();
s
++
)
{
// first prior.
// first prior.
min
_s
ize
=
minSize_
[
s
];
min
S
ize
=
minSize_
[
s
];
int
box
_width
=
min_s
ize
;
int
box
Width
=
minS
ize
;
int
box
_height
=
min_s
ize
;
int
box
Height
=
minS
ize
;
// xmin, ymin, xmax, ymax.
// xmin, ymin, xmax, ymax.
tmp
_ptr
[
idx
++
]
=
(
center_x
-
box_width
/
2.
)
/
image_w
idth
;
tmp
Ptr
[
idx
++
]
=
(
centerX
-
boxWidth
/
2.
)
/
imageW
idth
;
tmp
_ptr
[
idx
++
]
=
(
center_y
-
box_height
/
2.
)
/
image_h
eight
;
tmp
Ptr
[
idx
++
]
=
(
centerY
-
boxHeight
/
2.
)
/
imageH
eight
;
tmp
_ptr
[
idx
++
]
=
(
center_x
+
box_width
/
2.
)
/
image_w
idth
;
tmp
Ptr
[
idx
++
]
=
(
centerX
+
boxWidth
/
2.
)
/
imageW
idth
;
tmp
_ptr
[
idx
++
]
=
(
center_y
+
box_height
/
2.
)
/
image_h
eight
;
tmp
Ptr
[
idx
++
]
=
(
centerY
+
boxHeight
/
2.
)
/
imageH
eight
;
if
(
maxSize_
.
size
()
>
0
)
{
if
(
maxSize_
.
size
()
>
0
)
{
CHECK_EQ
(
minSize_
.
size
(),
maxSize_
.
size
());
CHECK_EQ
(
minSize_
.
size
(),
maxSize_
.
size
());
// second prior.
// second prior.
for
(
size_t
s
=
0
;
s
<
maxSize_
.
size
();
s
++
)
{
for
(
size_t
s
=
0
;
s
<
maxSize_
.
size
();
s
++
)
{
int
max
_s
ize
=
maxSize_
[
s
];
int
max
S
ize
=
maxSize_
[
s
];
box
_width
=
box_height
=
sqrt
(
min_size
*
max_s
ize
);
box
Width
=
boxHeight
=
sqrt
(
minSize
*
maxS
ize
);
tmp
_ptr
[
idx
++
]
=
(
center_x
-
box_width
/
2.
)
/
image_w
idth
;
tmp
Ptr
[
idx
++
]
=
(
centerX
-
boxWidth
/
2.
)
/
imageW
idth
;
tmp
_ptr
[
idx
++
]
=
(
center_y
-
box_height
/
2.
)
/
image_h
eight
;
tmp
Ptr
[
idx
++
]
=
(
centerY
-
boxHeight
/
2.
)
/
imageH
eight
;
tmp
_ptr
[
idx
++
]
=
(
center_x
+
box_width
/
2.
)
/
image_w
idth
;
tmp
Ptr
[
idx
++
]
=
(
centerX
+
boxWidth
/
2.
)
/
imageW
idth
;
tmp
_ptr
[
idx
++
]
=
(
center_y
+
box_height
/
2.
)
/
image_h
eight
;
tmp
Ptr
[
idx
++
]
=
(
centerY
+
boxHeight
/
2.
)
/
imageH
eight
;
}
}
}
}
}
}
...
@@ -131,27 +118,26 @@ void PriorBoxLayer::forwardImp(const Argument& featureMap,
...
@@ -131,27 +118,26 @@ void PriorBoxLayer::forwardImp(const Argument& featureMap,
for
(
size_t
r
=
0
;
r
<
aspectRatio_
.
size
();
r
++
)
{
for
(
size_t
r
=
0
;
r
<
aspectRatio_
.
size
();
r
++
)
{
float
ar
=
aspectRatio_
[
r
];
float
ar
=
aspectRatio_
[
r
];
if
(
fabs
(
ar
-
1.
)
<
1e-6
)
continue
;
if
(
fabs
(
ar
-
1.
)
<
1e-6
)
continue
;
float
box
_width
=
min_s
ize
*
sqrt
(
ar
);
float
box
Width
=
minS
ize
*
sqrt
(
ar
);
float
box
_height
=
min_s
ize
/
sqrt
(
ar
);
float
box
Height
=
minS
ize
/
sqrt
(
ar
);
tmp
_ptr
[
idx
++
]
=
(
center_x
-
box_width
/
2.
)
/
image_w
idth
;
tmp
Ptr
[
idx
++
]
=
(
centerX
-
boxWidth
/
2.
)
/
imageW
idth
;
tmp
_ptr
[
idx
++
]
=
(
center_y
-
box_height
/
2.
)
/
image_h
eight
;
tmp
Ptr
[
idx
++
]
=
(
centerY
-
boxHeight
/
2.
)
/
imageH
eight
;
tmp
_ptr
[
idx
++
]
=
(
center_x
+
box_width
/
2.
)
/
image_w
idth
;
tmp
Ptr
[
idx
++
]
=
(
centerX
+
boxWidth
/
2.
)
/
imageW
idth
;
tmp
_ptr
[
idx
++
]
=
(
center_y
+
box_height
/
2.
)
/
image_h
eight
;
tmp
Ptr
[
idx
++
]
=
(
centerY
+
boxHeight
/
2.
)
/
imageH
eight
;
}
}
}
}
}
}
// clip the prior's coordidate such that it is within [0, 1]
// clip the prior's coordidate such that it is within [0, 1]
for
(
int
d
=
0
;
d
<
dim
;
++
d
)
for
(
int
d
=
0
;
d
<
dim
;
++
d
)
tmp
_ptr
[
d
]
=
std
::
min
(
std
::
max
(
tmp_p
tr
[
d
],
(
float
)
0.
),
(
float
)
1.
);
tmp
Ptr
[
d
]
=
std
::
min
(
std
::
max
(
tmpP
tr
[
d
],
(
float
)
0.
),
(
float
)
1.
);
// set the variance.
// set the variance.
for
(
int
h
=
0
;
h
<
layer
_h
eight
;
h
++
)
for
(
int
h
=
0
;
h
<
layer
H
eight
;
h
++
)
for
(
int
w
=
0
;
w
<
layer
_w
idth
;
w
++
)
for
(
int
w
=
0
;
w
<
layer
W
idth
;
w
++
)
for
(
int
i
=
0
;
i
<
numPriors_
;
i
++
)
for
(
int
i
=
0
;
i
<
numPriors_
;
i
++
)
for
(
int
j
=
0
;
j
<
4
;
j
++
)
tmp
_p
tr
[
idx
++
]
=
variance_
[
j
];
for
(
int
j
=
0
;
j
<
4
;
j
++
)
tmp
P
tr
[
idx
++
]
=
variance_
[
j
];
MatrixPtr
outV
=
getOutputValue
();
MatrixPtr
outV
=
getOutputValue
();
outV
->
copyFrom
(
buffer_
->
data_
,
dim
*
2
);
outV
->
copyFrom
(
buffer_
->
data_
,
dim
*
2
);
}
}
REGISTER_LAYER
(
priorbox
,
PriorBoxLayer
);
REGISTER_LAYER
(
priorbox
,
PriorBoxLayer
);
}
// namespace paddle
}
// namespace paddle
python/paddle/trainer/config_parser.py
浏览文件 @
707a9c9b
...
@@ -1589,8 +1589,6 @@ class PriorBoxLayer(LayerBase):
...
@@ -1589,8 +1589,6 @@ class PriorBoxLayer(LayerBase):
self
.
config
.
inputs
[
0
].
priorbox_conf
.
aspect_ratio
.
extend
(
aspect_ratio
)
self
.
config
.
inputs
[
0
].
priorbox_conf
.
aspect_ratio
.
extend
(
aspect_ratio
)
self
.
config
.
inputs
[
0
].
priorbox_conf
.
variance
.
extend
(
variance
)
self
.
config
.
inputs
[
0
].
priorbox_conf
.
variance
.
extend
(
variance
)
self
.
config
.
size
=
size
self
.
config
.
size
=
size
input_layer0
=
self
.
get_input_layer
(
0
)
input_layer1
=
self
.
get_input_layer
(
1
)
@
config_layer
(
'data'
)
@
config_layer
(
'data'
)
...
...
python/paddle/trainer_config_helpers/layers.py
浏览文件 @
707a9c9b
...
@@ -938,7 +938,7 @@ def print_layer(input, name=None):
...
@@ -938,7 +938,7 @@ def print_layer(input, name=None):
@
wrap_name_default
(
"priorbox"
)
@
wrap_name_default
(
"priorbox"
)
def
priorbox_layer
(
input
,
def
priorbox_layer
(
input
,
im
g_shap
e
,
im
ag
e
,
aspect_ratio
,
aspect_ratio
,
variance
,
variance
,
min_size
,
min_size
,
...
@@ -951,8 +951,8 @@ def priorbox_layer(input,
...
@@ -951,8 +951,8 @@ def priorbox_layer(input,
:type name: basestring
:type name: basestring
:param input: The input layer.
:param input: The input layer.
:type input: LayerOutput
:type input: LayerOutput
:param im
g_shape: The width and height of t
he network input image.
:param im
age: T
he network input image.
:type im
g_shap
e: LayerOutput
:type im
ag
e: LayerOutput
:param aspect_ratio: The aspect ratio.
:param aspect_ratio: The aspect ratio.
:type aspect_ratio: list
:type aspect_ratio: list
:param variance: The bounding box variance.
:param variance: The bounding box variance.
...
@@ -968,7 +968,7 @@ def priorbox_layer(input,
...
@@ -968,7 +968,7 @@ def priorbox_layer(input,
Layer
(
Layer
(
name
=
name
,
name
=
name
,
type
=
LayerType
.
PRIORBOX_LAYER
,
type
=
LayerType
.
PRIORBOX_LAYER
,
inputs
=
[
input
.
name
,
im
g_shap
e
.
name
],
inputs
=
[
input
.
name
,
im
ag
e
.
name
],
size
=
size
,
size
=
size
,
min_size
=
min_size
,
min_size
=
min_size
,
max_size
=
max_size
,
max_size
=
max_size
,
...
@@ -977,7 +977,7 @@ def priorbox_layer(input,
...
@@ -977,7 +977,7 @@ def priorbox_layer(input,
return
LayerOutput
(
return
LayerOutput
(
name
,
name
,
LayerType
.
PRIORBOX_LAYER
,
LayerType
.
PRIORBOX_LAYER
,
parents
=
[
input
,
im
g_shap
e
],
parents
=
[
input
,
im
ag
e
],
num_filters
=
num_filters
,
num_filters
=
num_filters
,
size
=
size
)
size
=
size
)
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录