Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
PaddlePaddle
X2Paddle
提交
1d526a23
X
X2Paddle
项目概览
PaddlePaddle
/
X2Paddle
大约 1 年 前同步成功
通知
328
Star
698
Fork
167
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
26
列表
看板
标记
里程碑
合并请求
4
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
X
X2Paddle
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
26
Issue
26
列表
看板
标记
里程碑
合并请求
4
合并请求
4
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
提交
1d526a23
编写于
8月 01, 2019
作者:
S
SunAhong1993
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
add custom layer v2
上级
e3b4b14d
变更
12
隐藏空白更改
内联
并排
Showing
12 changed file
with
363 addition
and
436 deletion
+363
-436
x2paddle/op_mapper/caffe_custom_layer/__init__.py
x2paddle/op_mapper/caffe_custom_layer/__init__.py
+22
-6
x2paddle/op_mapper/caffe_custom_layer/axpy.py
x2paddle/op_mapper/caffe_custom_layer/axpy.py
+29
-0
x2paddle/op_mapper/caffe_custom_layer/convolutiondepthwise.py
...ddle/op_mapper/caffe_custom_layer/convolutiondepthwise.py
+7
-7
x2paddle/op_mapper/caffe_custom_layer/detectionoutput.py
x2paddle/op_mapper/caffe_custom_layer/detectionoutput.py
+57
-0
x2paddle/op_mapper/caffe_custom_layer/normalize.py
x2paddle/op_mapper/caffe_custom_layer/normalize.py
+35
-0
x2paddle/op_mapper/caffe_custom_layer/permute.py
x2paddle/op_mapper/caffe_custom_layer/permute.py
+29
-0
x2paddle/op_mapper/caffe_custom_layer/priorbox.py
x2paddle/op_mapper/caffe_custom_layer/priorbox.py
+58
-0
x2paddle/op_mapper/caffe_custom_layer/roipooling.py
x2paddle/op_mapper/caffe_custom_layer/roipooling.py
+40
-0
x2paddle/op_mapper/caffe_custom_layer/select.py
x2paddle/op_mapper/caffe_custom_layer/select.py
+51
-0
x2paddle/op_mapper/caffe_custom_layer/shufflechannel.py
x2paddle/op_mapper/caffe_custom_layer/shufflechannel.py
+23
-0
x2paddle/op_mapper/caffe_op_mapper.py
x2paddle/op_mapper/caffe_op_mapper.py
+12
-332
x2paddle/op_mapper/caffe_shape.py
x2paddle/op_mapper/caffe_shape.py
+0
-91
未找到文件。
x2paddle/op_mapper/caffe_custom_layer/__init__.py
浏览文件 @
1d526a23
from
.register
import
get_registered_layers
#custom layer import begins
# from . import roipooling
# from . import priorbox
# from . import permute
# from . import detection_out
# from . import normalize
# from . import select
from
.
import
roipooling
from
.
import
priorbox
from
.
import
permute
from
.
import
detectionoutput
from
.
import
normalize
from
.
import
select
from
.
import
shufflechannel
from
.
import
convolutiondepthwise
from
.
import
axpy
#custom layer import ends
custom_layers
=
get_registered_layers
()
...
...
@@ -38,11 +40,25 @@ def has_layer(layer_type):
def
get_params
(
layer
,
layer_type
):
import
re
if
layer_type
.
lower
()
==
"deconvolution"
or
layer_type
.
lower
(
)
==
"convolutiondepthwise"
:
param_name
=
'_'
.
join
((
'convolution'
,
'param'
))
elif
layer_type
.
lower
()
==
"normalize"
:
param_name
=
'_'
.
join
((
'norm'
,
'param'
))
elif
len
(
layer_type
)
-
len
(
re
.
sub
(
"[A-Z]"
,
""
,
layer_type
))
>=
2
:
s
=
''
tmp_name
=
''
for
i
,
ch
in
enumerate
(
layer_type
):
if
i
==
0
:
s
+=
ch
.
lower
()
continue
elif
ch
.
isupper
()
and
layer_type
[
i
-
1
].
islower
():
tmp_name
+=
(
s
+
'_'
)
s
=
''
s
+=
ch
tmp_name
+=
s
param_name
=
'_'
.
join
((
tmp_name
,
'param'
))
else
:
param_name
=
'_'
.
join
((
layer_type
.
lower
(),
'param'
))
return
getattr
(
layer
,
param_name
,
None
)
...
...
x2paddle/op_mapper/caffe_custom_layer/axpy.py
0 → 100644
浏览文件 @
1d526a23
from
.register
import
register
from
x2paddle.core.util
import
*
def
axpy_shape
(
input_shape
):
assert
len
(
input_shapes
)
==
3
,
"not valid input shape for axpy layer"
assert
len
(
input_shapes
[
0
])
==
len
(
input_shapes
[
1
]),
'should have same dims'
output_shape
=
input_shapes
[
1
]
assert
(
input_shapes
[
2
]
==
output_shape
),
\
"shape not consistent for axpy[%s <--> %s]"
\
%
(
str
(
output_shape
),
str
(
input_shapes
[
2
]))
return
[
output_shape
]
def
axpy_layer
(
inputs
,
input_shape
=
None
,
name
=
None
):
alpha
=
inputs
[
0
]
x
=
inputs
[
1
]
y
=
inputs
[
2
]
out
=
fluid
.
layers
.
elementwise_mul
(
x
,
alpha
,
axis
=
0
)
out
=
fluid
.
layers
.
elementwise_add
(
out
,
y
,
name
=
name
)
print
(
out
)
def
axpy_weights
(
name
,
data
=
None
):
weights_name
=
[]
return
weights_name
register
(
kind
=
'Axpy'
,
shape
=
axpy_shape
,
layer
=
axpy_layer
,
weights
=
axpy_weights
)
x2paddle/op_mapper/caffe_custom_layer/convolutiondepthwise.py
浏览文件 @
1d526a23
...
...
@@ -18,19 +18,19 @@ def convolutiondepthwise_shape(input_shape,
[
k_h
,
k_w
]
=
[
1
,
1
]
if
isinstance
(
kernel_size
,
numbers
.
Number
):
[
k_h
,
k_w
]
=
[
kernel_size
]
*
2
elif
isinstance
(
kernel_size
,
list
):
elif
isinstance
(
kernel_size
,
list
)
and
len
(
kernel_size
)
>
0
:
k_h
=
kernel_h
if
kernel_h
else
kernel_size
[
0
]
k_w
=
kernel_w
if
kernel_w
else
kernel_size
[
len
(
kernel_size
)
-
1
]
[
s_h
,
s_w
]
=
[
1
,
1
]
if
isinstance
(
stride
,
numbers
.
Number
):
[
s_h
,
s_w
]
=
[
stride
]
*
2
elif
isinstance
(
stride
,
list
):
elif
isinstance
(
stride
,
list
)
and
len
(
stride
)
>
0
:
s_h
=
stride_h
if
stride_h
else
stride
[
0
]
s_w
=
stride_w
if
stride_w
else
stride
[
len
(
stride
)
-
1
]
[
p_h
,
p_w
]
=
[
0
,
0
]
if
isinstance
(
pad
,
numbers
.
Number
):
[
p_h
,
p_w
]
=
[
pad
]
*
2
elif
isinstance
(
pad
,
list
):
elif
isinstance
(
pad
,
list
)
and
len
(
pad
)
>
0
:
p_h
=
pad_h
if
pad_h
else
pad
[
0
]
p_w
=
pad_w
if
pad_w
else
pad
[
len
(
pad
)
-
1
]
dila_len
=
len
(
dilation
)
...
...
@@ -67,24 +67,24 @@ def convolutiondepthwise_layer(inputs,
kernel_w
=
None
,
stride_h
=
None
,
stride_w
=
None
,
input_shape
=
[]
,
input_shape
=
None
,
name
=
None
):
[
k_h
,
k_w
]
=
[
1
,
1
]
if
isinstance
(
kernel_size
,
numbers
.
Number
):
[
k_h
,
k_w
]
=
[
kernel_size
]
*
2
elif
isinstance
(
kernel_size
,
list
):
elif
isinstance
(
kernel_size
,
list
)
and
len
(
kernel_size
)
>
0
:
k_h
=
kernel_h
if
kernel_h
else
kernel_size
[
0
]
k_w
=
kernel_w
if
kernel_w
else
kernel_size
[
len
(
kernel_size
)
-
1
]
[
s_h
,
s_w
]
=
[
1
,
1
]
if
isinstance
(
stride
,
numbers
.
Number
):
[
s_h
,
s_w
]
=
[
stride
]
*
2
elif
isinstance
(
stride
,
list
):
elif
isinstance
(
stride
,
list
)
and
len
(
stride
)
>
0
:
s_h
=
stride_h
if
stride_h
else
stride
[
0
]
s_w
=
stride_w
if
stride_w
else
stride
[
len
(
stride
)
-
1
]
[
p_h
,
p_w
]
=
[
0
,
0
]
if
isinstance
(
pad
,
numbers
.
Number
):
[
p_h
,
p_w
]
=
[
pad
]
*
2
elif
isinstance
(
pad
,
list
):
elif
isinstance
(
pad
,
list
)
and
len
(
pad
)
>
0
:
p_h
=
pad_h
if
pad_h
else
pad
[
0
]
p_w
=
pad_w
if
pad_w
else
pad
[
len
(
pad
)
-
1
]
input
=
inputs
[
0
]
...
...
x2paddle/op_mapper/caffe_custom_layer/detectionoutput.py
0 → 100644
浏览文件 @
1d526a23
from
.register
import
register
from
x2paddle.core.util
import
*
def
detectionoutput_shape
(
input_shape
):
return
[[
-
1
,
6
]]
def
detectionoutput_layer
(
inputs
,
nms_param
=
None
,
background_label_id
=
0
,
share_location
=
True
,
keep_top_k
=
100
,
confidence_threshold
=
0.1
,
input_shape
=
None
,
name
=
None
):
if
nms_param
is
None
:
nms_param
=
{
"nms_threshold"
:
0.3
,
"top_k"
:
10
,
"eta"
:
1.0
}
mbox_conf_flatten
=
inputs
[
1
]
mbox_priorbox
=
inputs
[
2
]
mbox_priorbox_list
=
fluid
.
layers
.
split
(
mbox_priorbox
,
2
,
dim
=
1
)
pb
=
mbox_priorbox_list
[
0
]
pbv
=
mbox_priorbox_list
[
1
]
pb
=
fluid
.
layers
.
reshape
(
x
=
pb
,
shape
=
[
-
1
,
4
])
pbv
=
fluid
.
layers
.
reshape
(
x
=
pbv
,
shape
=
[
-
1
,
4
])
mbox_loc
=
inputs
[
0
]
mbox_loc
=
fluid
.
layers
.
reshape
(
x
=
mbox_loc
,
shape
=
[
-
1
,
mbox_conf_flatten
.
shape
[
1
],
4
])
default
=
{
"nms_threshold"
:
0.3
,
"top_k"
:
10
,
"eta"
:
1.0
}
fields
=
[
'eta'
,
'top_k'
,
'nms_threshold'
]
for
f
in
default
.
keys
():
if
not
nms_param
.
has_key
(
f
):
nms_param
[
f
]
=
default
[
f
]
out
=
fluid
.
layers
.
detection_output
(
scores
=
mbox_conf_flatten
,
loc
=
mbox_loc
,
prior_box
=
pb
,
prior_box_var
=
pbv
,
background_label
=
background_label
,
nms_threshold
=
nms_param
[
"nms_threshold"
],
nms_top_k
=
nms_param
[
"top_k"
],
keep_top_k
=
keep_top_k
,
score_threshold
=
confidence_threshold
,
nms_eta
=
nms_param
[
"eta"
])
return
out
def
detectionoutput_weights
(
name
,
data
=
None
):
weights_name
=
[]
return
weights_name
register
(
kind
=
'DetectionOutput'
,
shape
=
detectionoutput_shape
,
layer
=
detectionoutput_layer
,
weights
=
detectionoutput_weights
)
x2paddle/op_mapper/caffe_custom_layer/normalize.py
0 → 100644
浏览文件 @
1d526a23
from
.register
import
register
from
x2paddle.core.util
import
*
def
normalize_shape
(
input_shape
):
return
input_shape
def
normalize_layer
(
inputs
,
across_spatial
=
None
,
channel_shared
=
None
,
input_shape
=
None
,
name
=
None
):
assert
across_spatial
==
False
,
"Only support across_spatial == False for Normalize"
input
=
inputs
[
0
]
l2_norm
=
fluid
.
layers
.
l2_normalize
(
input
,
axis
=
1
,
name
=
name
+
'_l2'
)
scale_param
=
fluid
.
layers
.
create_parameter
(
shape
=
[
1
]
if
channel_shared
else
[
input_shape
[
0
][
1
]],
dtype
=
input
.
dtype
,
attr
=
name
+
'_scale'
)
out
=
fluid
.
layers
.
elementwise_mul
(
x
=
l2_norm
,
y
=
scale_param
,
axis
=-
1
if
channel_shared
else
1
)
return
out
def
normalize_weights
(
name
,
data
=
None
):
weights_name
=
[
name
+
'_scale'
]
return
weights_name
register
(
kind
=
'Normalize'
,
shape
=
normalize_shape
,
layer
=
normalize_layer
,
weights
=
normalize_weights
)
x2paddle/op_mapper/caffe_custom_layer/permute.py
0 → 100644
浏览文件 @
1d526a23
from
.register
import
register
from
x2paddle.core.util
import
*
def
permute_shape
(
input_shape
,
order
=
None
):
inshape
=
input_shape
[
0
]
output_shape
=
[]
for
ii
in
order
:
assert
ii
<
len
(
inshape
),
"invalid order for permute[%s]"
%
(
name
)
output_shape
.
append
(
inshape
[
ii
])
return
[
output_shape
]
def
permute_layer
(
inputs
,
order
=
None
,
input_shape
=
None
,
name
=
None
):
input
=
inputs
[
0
]
order
=
list
(
order
)
out
=
fluid
.
layers
.
transpose
(
input
,
perm
=
order
,
name
=
name
)
return
out
def
permute_weights
(
name
,
data
=
None
):
weights_name
=
[]
return
weights_name
register
(
kind
=
'Permute'
,
shape
=
permute_shape
,
layer
=
permute_layer
,
weights
=
permute_weights
)
x2paddle/op_mapper/caffe_custom_layer/priorbox.py
0 → 100644
浏览文件 @
1d526a23
from
.register
import
register
from
x2paddle.core.util
import
*
def
priorbox_shape
(
input_shape
,
max_size
=
None
,
aspect_ratio
=
None
):
fc_shape
=
input_shapes
[
0
]
N
=
1
if
not
max_size
==
None
:
N
+=
1
if
not
aspect_ratio
==
None
:
N
+=
2
*
len
(
aspect_ratio
)
N_bbx
=
fc_shape
[
2
]
*
fc_shape
[
3
]
*
N
output_shape
=
[
1
,
2
,
4
*
N_bbx
]
return
[
output_shape
]
def
priorbox_layer
(
inputs
,
step
=
0.0
,
offset
=
0.5
,
min_size
=
None
,
max_size
=
None
,
aspect_ratio
=
[
1.0
],
flip
=
False
,
clip
=
False
,
variance
=
[
0.1
,
0.1
,
0.2
,
0.2
],
input_shape
=
None
,
name
=
None
):
input
=
input_shape
[
0
]
image
=
input_shape
[
1
]
steps
=
tuple
(
step
)
if
type
(
step
)
is
list
or
type
(
step
)
is
tuple
else
(
step
,
step
)
box
,
variance_
=
fluid
.
layers
.
prior_box
(
input
,
image
,
min_sizes
=
list
(
min_size
),
max_sizes
=
list
(
max_size
),
aspect_ratios
=
list
(
aspect_ratio
),
variance
=
list
(
variance
),
flip
=
flip
,
clip
=
clip
,
steps
=
step
,
offset
=
offset
,
name
=
name
,
min_max_aspect_ratios_order
=
True
)
box
=
fluid
.
layers
.
reshape
(
box
,
[
1
,
1
,
-
1
])
variance_
=
fluid
.
layers
.
reshape
(
variance_
,
[
1
,
1
,
-
1
])
out
=
fluid
.
layers
.
concat
([
box
,
variance_
],
axis
=
1
)
return
out
def
priorbox_weights
(
name
,
data
=
None
):
weights_name
=
[]
return
weights_name
register
(
kind
=
'PriorBox'
,
shape
=
priorbox_shape
,
layer
=
priorbox_layer
,
weights
=
priorbox_weights
)
x2paddle/op_mapper/caffe_custom_layer/roipooling.py
0 → 100644
浏览文件 @
1d526a23
from
.register
import
register
from
x2paddle.core.util
import
*
def
roipooling_shape
(
input_shape
,
pooled_w
=
None
,
pooled_h
=
None
):
base_fea_shape
=
input_shapes
[
0
]
rois_shape
=
input_shapes
[
1
]
output_shape
=
base_fea_shape
output_shape
[
0
]
=
rois_shape
[
0
]
output_shape
[
2
]
=
pooled_h
output_shape
[
3
]
=
pooled_w
return
[
output_shape
]
def
roipooling_layer
(
inputs
,
pooled_w
=
None
,
pooled_h
=
None
,
spatial_scale
=
None
,
input_shape
=
None
,
name
=
None
):
input
=
inputs
[
0
]
roi
=
inputs
[
1
]
roi
=
fluid
.
layers
.
slice
(
roi
,
axes
=
[
1
],
starts
=
[
1
],
ends
=
[
5
])
out
=
fluid
.
layers
.
roi_pool
(
input
,
roi
,
pooled_height
=
pooled_h
,
pooled_width
=
pooled_w
,
spatial_scale
=
spatial_scale
)
return
out
def
roipooling_weights
(
name
,
data
=
None
):
weights_name
=
[]
return
weights_name
register
(
kind
=
'ROIPooling'
,
shape
=
roipooling_shape
,
layer
=
roipooling_layer
,
weights
=
roipooling_weights
)
x2paddle/op_mapper/caffe_custom_layer/select.py
0 → 100644
浏览文件 @
1d526a23
from
.register
import
register
from
x2paddle.core.util
import
*
def
select_shape
(
input_shape
,
axis
=
None
,
slice_point
=
None
):
inshape
=
input_shape
[
0
]
slice_point
=
slice_point
start
=
slice_point
[
0
]
if
len
(
slice_point
)
==
2
:
end
=
slice_point
[
1
]
else
:
end
=
input_shape
[
axis
]
assert
end
>
start
,
"invalid slice_point with [start:%d, end:%d]"
%
(
start
,
end
)
output_shape
=
input_shape
output_shape
[
axis
]
=
end
-
start
return
[
output_shape
]
def
select_layer
(
inputs
,
axis
=
None
,
slice_point
=
None
,
input_shape
=
None
,
name
=
None
):
input
=
inputs
[
0
]
maxint32
=
2147483647
slice_point
=
[
0
]
+
slice_point
slice_point
.
append
(
maxint32
)
i
=
0
out
=
[]
for
i
in
range
(
len
(
slice_point
)):
out
.
append
(
fluid
.
layers
.
slice
(
input
,
axes
=
[
axis
],
starts
=
[
slice_point
[
i
]],
ends
=
[
slice_point
[
i
+
1
]],
name
=
name
+
'_'
+
str
(
i
)))
if
i
==
len
(
slice_point
)
-
2
:
break
return
out
def
select_weights
(
name
,
data
=
None
):
weights_name
=
[]
return
weights_name
register
(
kind
=
'Select'
,
shape
=
select_shape
,
layer
=
select_layer
,
weights
=
select_weights
)
x2paddle/op_mapper/caffe_custom_layer/shufflechannel.py
0 → 100644
浏览文件 @
1d526a23
from
.register
import
register
from
x2paddle.core.util
import
*
def
shufflechannel_shape
(
input_shape
):
return
input_shape
def
shufflechannel_layer
(
inputs
,
group
=
None
,
input_shape
=
None
,
name
=
None
):
input
=
inputs
[
0
]
out
=
fluid
.
layers
.
shuffle_channel
(
input
,
group
=
group
,
name
=
name
)
return
out
def
shufflechannel_weights
(
name
,
data
=
None
):
weights_name
=
[]
return
weights_name
register
(
kind
=
'ShuffleChannel'
,
shape
=
shufflechannel_shape
,
layer
=
shufflechannel_layer
,
weights
=
shufflechannel_weights
)
x2paddle/op_mapper/caffe_op_mapper.py
浏览文件 @
1d526a23
...
...
@@ -27,6 +27,8 @@ class CaffeOpMapper(OpMapper):
self
.
weights
=
dict
()
resolver
=
decoder
.
resolver
self
.
mylayers
=
{}
self
.
inputs
=
self
.
graph
.
input_nodes
self
.
outputs
=
self
.
graph
.
output_nodes
if
resolver
.
has_pycaffe
():
self
.
did_use_pb
=
False
else
:
...
...
@@ -124,36 +126,32 @@ class CaffeOpMapper(OpMapper):
data
[
idx
]
=
np
.
squeeze
(
d
,
axis
=
sq_axis
)
shape_new
=
data
[
idx
].
shape
print
(
'shape-old'
+
str
(
shape_old
))
print
(
'shape-new'
+
str
(
shape_new
))
if
len
(
shape_old
)
!=
shape_new
:
print
(
'squeeze idx:%d, with kind:%s,name:%s'
%
\
(
idx
,
node
.
layer_type
,
node
.
layer
.
name
))
return
data
def
get_kernel_parameters
(
self
,
kind
,
params
):
assert
kind
in
[
'Convolution'
,
'Pooling'
,
'Deconvolution'
,
'ConvolutionDepthwise'
]
assert
kind
in
[
'Convolution'
,
'Pooling'
,
'Deconvolution'
]
[
k_h
,
k_w
]
=
[
1
,
1
]
print
(
params
.
kernel_size
)
if
isinstance
(
params
.
kernel_size
,
numbers
.
Number
):
[
k_h
,
k_w
]
=
[
params
.
kernel_size
]
*
2
else
:
elif
isinstance
(
params
.
kernel_size
,
list
)
and
len
(
params
.
kernel_size
)
>
0
:
k_h
=
params
.
kernel_h
if
params
.
kernel_h
else
params
.
kernel_size
[
0
]
k_w
=
params
.
kernel_w
if
params
.
kernel_w
else
params
.
kernel_size
[
len
(
params
.
kernel_size
)
-
1
]
[
s_h
,
s_w
]
=
[
1
,
1
]
if
isinstance
(
params
.
stride
,
numbers
.
Number
):
[
s_h
,
s_w
]
=
[
params
.
stride
]
*
2
el
se
:
el
if
isinstance
(
params
.
stride
,
list
)
and
len
(
params
.
stride
)
>
0
:
s_h
=
params
.
stride_h
if
params
.
stride_h
else
params
.
stride
[
0
]
s_w
=
params
.
stride_w
if
params
.
stride_w
else
params
.
stride
[
len
(
params
.
stride
)
-
1
]
[
p_h
,
p_w
]
=
[
0
,
0
]
if
isinstance
(
params
.
pad
,
numbers
.
Number
):
[
p_h
,
p_w
]
=
[
params
.
pad
]
*
2
el
se
:
el
if
isinstance
(
params
.
pad
,
list
)
and
len
(
params
.
pad
)
>
0
:
p_h
=
params
.
pad_h
if
params
.
pad_h
else
params
.
pad
[
0
]
p_w
=
params
.
pad_w
if
params
.
pad_w
else
params
.
pad
[
len
(
params
.
pad
)
-
1
]
...
...
@@ -875,41 +873,6 @@ class CaffeOpMapper(OpMapper):
output
=
'_, {}'
.
format
(
node
.
layer_name
),
param_attr
=
attr
)
def
Axpy
(
self
,
node
):
assert
len
(
node
.
inputs
)
==
3
,
'The count of Axpy node
\'
s input is not 3.'
alpha
=
self
.
graph
.
get_bottom_node
(
node
,
idx
=
0
,
copy
=
True
)
if
self
.
is_Scale
(
alpha
):
tmp
=
self
.
graph
.
get_bottom_node
(
alpha
,
idx
=
0
,
copy
=
True
)
if
self
.
is_BN
(
tmp
):
alpha
=
tmp
x
=
self
.
graph
.
get_bottom_node
(
node
,
idx
=
1
,
copy
=
True
)
if
self
.
is_Scale
(
x
):
tmp
=
self
.
graph
.
get_bottom_node
(
x
,
idx
=
0
,
copy
=
True
)
if
self
.
is_BN
(
tmp
):
x
=
tmp
y
=
self
.
graph
.
get_bottom_node
(
node
,
idx
=
2
,
copy
=
True
)
if
self
.
is_Scale
(
y
):
tmp
=
self
.
graph
.
get_bottom_node
(
y
,
idx
=
0
,
copy
=
True
)
if
self
.
is_BN
(
tmp
):
y
=
tmp
attr
=
{
'axis'
:
0
,
'name'
:
string
(
node
.
layer_name
+
'_mul'
)}
node
.
fluid_code
.
add_layer
(
"elementwise_mul"
,
inputs
=
{
'x'
:
alpha
,
'y'
:
x
},
output
=
node
,
param_attr
=
attr
)
attr
=
{
'name'
:
string
(
node
.
layer_name
+
'_add'
)}
node
.
fluid_code
.
add_layer
(
"elementwise_add"
,
inputs
=
{
'x'
:
node
,
'y'
:
y
},
output
=
node
,
param_attr
=
attr
)
def
Crop
(
self
,
node
):
assert
len
(
node
.
inputs
)
==
2
,
'The count of Crop node
\'
s input is not 2.'
...
...
@@ -943,83 +906,6 @@ class CaffeOpMapper(OpMapper):
output
=
node
,
param_attr
=
attr
)
def
DetectionOutput
(
self
,
node
):
assert
len
(
node
.
inputs
)
==
3
,
'The count of DetectionOutput node
\'
s input is not 3.'
mbox_loc
=
self
.
graph
.
get_bottom_node
(
node
,
idx
=
0
,
copy
=
True
)
if
self
.
is_Scale
(
mbox_loc
):
tmp
=
self
.
graph
.
get_bottom_node
(
mbox_loc
,
idx
=
0
,
copy
=
True
)
if
self
.
is_BN
(
tmp
):
mbox_loc
=
tmp
mbox_conf_flatten
=
self
.
graph
.
get_bottom_node
(
node
,
idx
=
1
,
copy
=
True
)
if
self
.
is_Scale
(
mbox_conf_flatten
):
tmp
=
self
.
graph
.
get_bottom_node
(
mbox_conf_flatten
,
idx
=
0
,
copy
=
True
)
if
self
.
is_BN
(
tmp
):
mbox_conf_flatten
=
tmp
mbox_priorbox
=
self
.
graph
.
get_bottom_node
(
node
,
idx
=
2
,
copy
=
True
)
if
self
.
is_Scale
(
mbox_priorbox
):
tmp
=
self
.
graph
.
get_bottom_node
(
mbox_priorbox
,
idx
=
0
,
copy
=
True
)
if
self
.
is_BN
(
tmp
):
mbox_priorbox
=
tmp
params
=
node
.
layer
.
detection_output_param
nms_threshold
=
0.3
top_k
=
10
eta
=
1.0
if
hasattr
(
params
,
'nms_param'
):
nms_threshold
=
getattr
(
params
.
nms_param
,
'nms_threshold'
,
0.3
)
top_k
=
getattr
(
params
.
nms_param
,
'top_k'
,
10
)
eta
=
getattr
(
params
.
nms_param
,
'eta'
,
1.0
)
background_label
=
getattr
(
params
,
'background_label_id'
,
0
)
share_location
=
getattr
(
params
,
'share_location'
,
True
)
keep_top_k
=
getattr
(
params
,
'keep_top_k'
,
100
)
confidence_threshold
=
getattr
(
params
,
'confidence_threshold'
,
0.1
)
attr
=
{
'num_or_sections'
:
2
,
'dim'
:
1
,
'name'
:
string
(
node
.
layer_name
+
'_split'
)
}
node
.
fluid_code
.
add_layer
(
"split"
,
inputs
=
mbox_priorbox
,
output
=
'mbox_priorbox_list'
,
param_attr
=
attr
)
node
.
fluid_code
.
add_note
(
'pb = mbox_priorbox_list[0]'
)
node
.
fluid_code
.
add_note
(
'pbv = mbox_priorbox_list[1]'
)
attr
=
{
'shape'
:
[
-
1
,
4
],
'name'
:
string
(
node
.
layer_name
+
'_reshape1'
)}
node
.
fluid_code
.
add_layer
(
"reshape"
,
inputs
=
'pb'
,
output
=
'pb'
,
param_attr
=
attr
)
attr
=
{
'shape'
:
[
-
1
,
4
],
'name'
:
string
(
node
.
layer_name
+
'_reshape2'
)}
node
.
fluid_code
.
add_layer
(
"reshape"
,
inputs
=
'pbv'
,
output
=
'pbv'
,
param_attr
=
attr
)
# TODO(syf): need chaeck
attr
=
{
'shape'
:
[
-
1
,
node
.
input_shape
[
1
][
1
],
4
],
'name'
:
string
(
node
.
layer_name
+
'_reshape3'
)
}
node
.
fluid_code
.
add_layer
(
"reshape"
,
inputs
=
mbox_loc
,
output
=
'mbox_loc'
,
param_attr
=
attr
)
attr
=
{
'background_label'
:
background_label
,
'nms_threshold'
:
nms_threshold
,
'nms_top_k'
:
top_k
,
'keep_top_k'
:
keep_top_k
,
'score_threshold'
:
confidence_threshold
,
'nms_eta'
:
eta
}
inputs_str
=
get_input_name
(
mbox_conf_flatten
)
+
', mbox_loc, pb, pbv'
node
.
fluid_code
.
add_layer
(
"detection_output"
,
inputs
=
inputs_str
,
output
=
node
,
param_attr
=
attr
)
def
Flatten
(
self
,
noed
):
assert
len
(
node
.
inputs
...
...
@@ -1036,68 +922,6 @@ class CaffeOpMapper(OpMapper):
output
=
node
,
param_attr
=
attr
)
def
Normalize
(
self
,
node
):
assert
len
(
node
.
inputs
)
==
1
,
'The count of Normalize node
\'
s input is not 1.'
input
=
self
.
graph
.
get_bottom_node
(
node
,
idx
=
0
,
copy
=
True
)
if
self
.
is_Scale
(
input
):
tmp
=
self
.
graph
.
get_bottom_node
(
input
,
idx
=
0
,
copy
=
True
)
if
self
.
is_BN
(
tmp
):
input
=
tmp
params
=
node
.
layer
.
norm_param
across_spatial
=
params
.
across_spatial
channel_shared
=
params
.
channel_shared
assert
across_spatial
==
False
,
"Only support across_spatial == False for Normalize"
attr
=
{
'axis'
:
1
,
'name'
:
string
(
node
.
layer_name
+
'_l2'
)}
node
.
fluid_code
.
add_layer
(
"l2_normalize"
,
inputs
=
input
,
output
=
node
.
layer_name
+
'_l2'
,
param_attr
=
attr
)
input_name
=
self
.
get_input_name
(
input
)
data
=
node
.
data
assert
data
is
not
None
,
'The parameter of {} (type is {}) is not set. You need to use python package of caffe to set the default value.'
.
format
(
node
.
layer_name
,
node
.
layer_type
)
data
=
self
.
adjust_parameters
(
node
)
self
.
weights
[
node
.
layer_name
+
'_scale'
]
=
data
[
0
]
node
.
fluid_code
.
add_note
(
'{}_scale_attr = ParamAttr(name=
\'
{}
\'
)'
.
format
(
node
.
layer_name
,
node
.
layer_name
+
'_scale'
))
attr
=
{
'shape'
:
[
1
]
if
channel_shared
else
[
node
.
input_shape
[
0
][
1
]],
'dtype'
:
'{}.dtype'
.
format
(
input_name
),
'attr'
:
'{}_scale_attr'
.
format
(
node
.
layer_name
),
'name'
:
string
(
node
.
layer_name
+
'_param'
)
}
node
.
fluid_code
.
add_layer
(
"create_parameter"
,
inputs
=
None
,
output
=
node
.
layer_name
+
'_scale_param'
,
param_attr
=
attr
)
attr
=
{
'axis'
:
-
1
if
channel_shared
else
1
,
'name'
:
string
(
node
.
layer_name
+
'_mul'
)
}
node
.
fluid_code
.
add_layer
(
"elementwise_mul"
,
inputs
=
node
.
layer_name
+
'_l2, '
+
node
.
layer_name
+
'_scale_param'
,
output
=
node
,
param_attr
=
attr
)
def
Permute
(
self
,
node
):
assert
len
(
node
.
inputs
)
==
1
,
'The count of Permute node
\'
s input is not 1.'
input
=
self
.
graph
.
get_bottom_node
(
node
,
idx
=
0
,
copy
=
True
)
if
self
.
is_Scale
(
input
):
tmp
=
self
.
graph
.
get_bottom_node
(
input
,
idx
=
0
,
copy
=
True
)
if
self
.
is_BN
(
tmp
):
input
=
tmp
params
=
node
.
layer
.
permute_param
order
=
list
(
params
.
order
)
attr
=
{
'order'
:
order
,
'name'
:
string
(
node
.
layer_name
)}
node
.
fluid_code
.
add_layer
(
"transpose"
,
inputs
=
input
,
output
=
node
,
param_attr
=
attr
)
def
Power
(
self
,
node
):
assert
len
(
node
.
inputs
)
==
1
,
'The count of Permute node
\'
s input is not 1.'
...
...
@@ -1126,69 +950,6 @@ class CaffeOpMapper(OpMapper):
output
=
node
,
param_attr
=
attr
)
def
PriorBox
(
self
,
node
):
assert
len
(
node
.
inputs
)
==
2
,
'The count of PriorBox node
\'
s input is not 2.'
input1
=
self
.
graph
.
get_bottom_node
(
node
,
idx
=
0
,
copy
=
True
)
if
self
.
is_Scale
(
input1
):
tmp
=
self
.
graph
.
get_bottom_node
(
input1
,
idx
=
0
,
copy
=
True
)
if
self
.
is_BN
(
tmp
):
input1
=
tmp
input2
=
self
.
graph
.
get_bottom_node
(
node
,
idx
=
1
,
copy
=
True
)
if
self
.
is_Scale
(
input2
):
tmp
=
self
.
graph
.
get_bottom_node
(
input2
,
idx
=
0
,
copy
=
True
)
if
self
.
is_BN
(
tmp
):
input2
=
tmp
input_dict
=
{
'input'
:
input1
,
'image'
:
input2
}
params
=
node
.
layer
.
prior_box_param
step
=
getattr
(
params
,
'step'
,
0.0
)
offset
=
getattr
(
params
,
'offset'
,
0.5
)
min_size
=
list
(
params
.
min_size
)
max_size
=
list
(
params
.
max_size
)
aspect_ratio
=
list
(
params
.
aspect_ratio
)
flip
=
getattr
(
params
,
'flip'
,
False
)
clip
=
getattr
(
params
,
'clip'
,
False
)
variance
=
list
(
getattr
(
params
,
'variance'
,
[
0.1
,
0.1
,
0.2
,
0.2
]))
steps
=
tuple
(
step
)
if
type
(
step
)
is
list
or
type
(
step
)
is
tuple
else
(
step
,
step
)
attr
=
{
'min_sizes'
:
min_size
,
'max_sizes'
:
max_size
,
'aspect_ratios'
:
aspect_ratio
,
'variance'
:
variance
,
'flip'
:
flip
,
'clip'
:
clip
,
'step'
:
steps
,
'offset'
:
offset
,
'min_max_aspect_ratios_order'
:
True
,
'name'
:
string
(
node
.
layer_name
)
}
node
.
fluid_code
.
add_layer
(
"prior_box"
,
inputs
=
input_dict
,
output
=
'{}_box, {}_var'
.
format
(
node
.
layer_name
,
node
.
layer_name
),
param_attr
=
attr
)
attr
=
{
'shape'
:
[
1
,
1
,
-
1
],
}
node
.
fluid_code
.
add_layer
(
"reshape"
,
inputs
=
'{}_box'
.
format
(
node
.
layer_name
),
output
=
'{}_box'
.
format
(
node
.
layer_name
),
param_attr
=
attr
)
attr
=
{
'shape'
:
[
1
,
1
,
-
1
],
}
node
.
fluid_code
.
add_layer
(
"reshape"
,
inputs
=
'{}_var'
.
format
(
node
.
layer_name
),
output
=
'{}_var'
.
format
(
node
.
layer_name
),
param_attr
=
attr
)
attr
=
{
'axis'
:
1
,
'name'
:
string
(
node
.
layer_name
+
'_concat'
)}
node
.
fluid_code
.
add_layer
(
"concat"
,
inputs
=
'[{}_box, {}_var]'
.
format
(
node
.
layer_name
,
node
.
layer_name
),
output
=
node
,
param_attr
=
attr
)
def
Reduction
(
self
,
node
):
assert
len
(
node
.
inputs
)
==
1
,
'The count of Reduction node
\'
s input is not 1.'
...
...
@@ -1263,86 +1024,6 @@ class CaffeOpMapper(OpMapper):
output
=
node
,
param_attr
=
attr
)
def
ROIPooling
(
self
,
node
):
assert
len
(
node
.
inputs
)
==
2
,
'The count of ROIPooling node
\'
s input is not 2.'
input1
=
self
.
graph
.
get_bottom_node
(
node
,
idx
=
0
,
copy
=
True
)
if
self
.
is_Scale
(
input1
):
tmp
=
self
.
graph
.
get_bottom_node
(
input1
,
idx
=
0
,
copy
=
True
)
if
self
.
is_BN
(
tmp
):
input1
=
tmp
input2
=
self
.
graph
.
get_bottom_node
(
node
,
idx
=
1
,
copy
=
True
)
if
self
.
is_Scale
(
input2
):
tmp
=
self
.
graph
.
get_bottom_node
(
input2
,
idx
=
0
,
copy
=
True
)
if
self
.
is_BN
(
tmp
):
input2
=
tmp
attr
=
{
'axes'
:
[
1
],
'starts'
:
[
1
],
'ends'
:
[
5
]}
node
.
fluid_code
.
add_layer
(
"slice"
,
inputs
=
input2
,
output
=
input2
,
param_attr
=
attr
)
input_dict
=
{
'input'
:
input1
,
'rois'
:
input2
}
params
=
node
.
layer
.
roi_pooling_param
attr
=
{
'pooled_w'
:
params
.
pooled_w
,
'pooled_h'
:
params
.
pooled_h
,
'spatial_scale'
:
params
.
spatial_scale
,
'name'
:
string
(
node
.
layer_name
)
}
node
.
fluid_code
.
add_layer
(
"roi_pool"
,
inputs
=
input_dict
,
output
=
node
,
param_attr
=
attr
)
def
Select
(
self
,
node
):
assert
len
(
node
.
inputs
)
==
1
,
'The count of Select node
\'
s input is not 1.'
input
=
self
.
graph
.
get_bottom_node
(
node
,
idx
=
0
,
copy
=
True
)
if
self
.
is_Scale
(
input
):
tmp
=
self
.
graph
.
get_bottom_node
(
input
,
idx
=
0
,
copy
=
True
)
if
self
.
is_BN
(
tmp
):
input
=
tmp
params
=
node
.
layer
.
select_param
slice_point
=
list
(
params
.
slice_point
)
axis
=
params
.
axis
maxint32
=
2147483647
slice_point
=
[
0
]
+
slice_point
slice_point
.
append
(
maxint32
)
i
=
0
node
.
fluid_code
.
add_note
(
'{} = []'
.
format
(
node
.
layer_name
))
for
i
in
range
(
len
(
slice_point
)):
attr
=
{
'axes'
:
[
axis
],
'starts'
:
[
slice_point
[
i
]],
'ends'
:
[
slice_point
[
i
+
1
]],
'name'
:
string
(
node
.
layer_name
+
'_'
+
str
(
i
))
}
node
.
fluid_code
.
add_layer
(
"slice"
,
inputs
=
input
,
output
=
string
(
node
.
layer_name
+
'_'
+
str
(
i
)),
param_attr
=
attr
)
node
.
fluid_code
.
add_note
(
'{}.append({})'
.
format
(
node
.
layer_name
,
node
.
layer_name
+
'_'
+
str
(
i
)))
if
i
==
len
(
slice_point
)
-
2
:
break
def
ShuffleChannel
(
self
,
node
):
assert
len
(
node
.
inputs
)
==
1
,
'The count of ShuffleChannel node
\'
s input is not 1.'
params
=
node
.
layer
.
shuffle_channel_param
group
=
params
.
group
input
=
self
.
graph
.
get_bottom_node
(
node
,
idx
=
0
,
copy
=
True
)
if
self
.
is_Scale
(
input
):
tmp
=
self
.
graph
.
get_bottom_node
(
input
,
idx
=
0
,
copy
=
True
)
if
self
.
is_BN
(
tmp
):
input
=
tmp
attr
=
{
'group'
:
group
,
'name'
:
string
(
node
.
layer_name
)}
node
.
fluid_code
.
add_layer
(
"shuffle_channel"
,
inputs
=
input
,
output
=
node
,
param_attr
=
attr
)
def
deal_custom_layer
(
self
,
node
):
op
=
node
.
layer_type
custom_code
,
func
=
make_custom_layer
(
node
)
...
...
@@ -1351,12 +1032,11 @@ class CaffeOpMapper(OpMapper):
kwargs
[
'name'
]
=
string
(
node
.
layer_name
)
kwargs
[
'input_shape'
]
=
node
.
input_shape
data
=
node
.
data
assert
data
is
not
None
,
'The parameter of {} (type is {}) is not set. You need to use python package of caffe to set the default value.'
.
format
(
node
.
layer_name
,
node
.
layer_type
)
data
=
self
.
adjust_parameters
(
node
)
weights_name
=
deal_weights
(
node
)
for
i
in
range
(
len
(
data
)):
self
.
weights
[
weights_name
[
i
]]
=
data
[
i
]
if
data
is
not
None
:
data
=
self
.
adjust_parameters
(
node
)
weights_name
=
deal_weights
(
node
)
for
i
in
range
(
len
(
data
)):
self
.
weights
[
weights_name
[
i
]]
=
data
[
i
]
inputs_node
=
[]
for
i
in
range
(
len
(
node
.
inputs
)):
input
=
self
.
graph
.
get_bottom_node
(
node
,
idx
=
i
,
copy
=
True
)
...
...
x2paddle/op_mapper/caffe_shape.py
浏览文件 @
1d526a23
...
...
@@ -338,26 +338,11 @@ def shape_argmax(layer, input_shape):
return
[
outshape
]
def
shape_axpy
(
layer
,
input_shape
):
assert
len
(
input_shapes
)
==
3
,
"not valid input shape for axpy layer"
assert
len
(
input_shapes
[
0
])
==
len
(
input_shapes
[
1
]),
'should have same dims'
output_shape
=
input_shapes
[
1
]
assert
(
input_shapes
[
2
]
==
output_shape
),
\
"shape not consistent for axpy[%s <--> %s]"
\
%
(
str
(
output_shape
),
str
(
input_shapes
[
2
]))
return
[
output_shape
]
def
shape_crop
(
layer
,
input_shape
):
assert
len
(
input_shape
)
==
2
,
"the number of crop's inputs must be 2"
return
[
input_shape
[
1
]]
def
shape_detectionoutput
(
layer
,
input_shape
):
return
[[
-
1
,
6
]]
def
shape_flatten
(
layer
,
input_shape
):
assert
len
(
input_shape
)
==
1
,
"the number of flatten's inputs must be 1"
params
=
layer
.
flatten_param
...
...
@@ -375,43 +360,10 @@ def shape_flatten(layer, input_shape):
return
[
output_shape
]
def
shape_normalize
(
layer
,
input_shape
):
return
input_shape
def
shape_permute
(
layer
,
input_shape
):
params
=
layer
.
permute_param
order
=
list
(
params
.
order
)
inshape
=
input_shape
[
0
]
output_shape
=
[]
for
ii
in
order
:
assert
ii
<
len
(
inshape
),
"invalid order for permute[%s]"
%
(
name
)
output_shape
.
append
(
inshape
[
ii
])
return
[
output_shape
]
def
shape_power
(
layer
,
input_shape
):
return
input_shape
def
shape_priorbox
(
layer
,
input_shape
):
params
=
layer
.
prior_box_param
min_size
=
list
(
params
.
min_size
)
max_size
=
list
(
params
.
max_size
)
aspect_ratio
=
list
(
params
.
aspect_ratio
)
assert
len
(
input_shapes
[
0
])
==
2
,
"invalid inputs for Priorbox[%s]"
%
(
name
)
fc_shape
=
input_shapes
[
0
][
0
]
N
=
1
if
not
max_size
==
None
:
N
+=
1
if
not
aspect_ratio
==
None
:
N
+=
2
*
len
(
aspect_ratio
)
N_bbx
=
fc_shape
[
2
]
*
fc_shape
[
3
]
*
N
output_shape
=
[[
1
,
2
,
4
*
N_bbx
]]
return
output_shape
def
shape_reduction
(
layer
,
input_shape
):
params
=
layer
.
reduction_param
axis
=
params
.
axis
...
...
@@ -419,46 +371,3 @@ def shape_reduction(layer, input_shape):
axis
+=
len
(
input_shape
[
0
])
+
1
assert
axis
<=
len
(
input_shape
[
0
]),
'invalid axis[%d] error'
%
(
axis
)
return
[
input_shape
[
0
:
axis
]]
def
shape_roipooling
(
layer
,
input_shape
):
params
=
layer
.
roi_pooling_param
pooled_w
=
params
.
pooled_w
pooled_h
=
params
.
pooled_h
spatial_scale
=
params
.
spatial_scale
assert
len
(
input_shapes
[
0
])
==
2
,
"not valid input shape for roipooling layer"
base_fea_shape
=
input_shapes
[
0
][
0
]
rois_shape
=
input_shapes
[
0
][
1
]
output_shape
=
base_fea_shape
output_shape
[
0
]
=
rois_shape
[
0
]
output_shape
[
2
]
=
pooled_h
output_shape
[
3
]
=
pooled_w
return
[
output_shape
]
def
shape_select
(
layer
,
input_shape
):
input_shape
=
list
(
input_shape
[
0
])
params
=
layer
.
select_param
axis
=
params
.
axis
slice_point
=
list
(
params
.
slice_point
)
start
=
slice_point
[
0
]
if
len
(
slice_point
)
==
2
:
end
=
slice_point
[
1
]
else
:
end
=
input_shape
[
axis
]
assert
end
>
start
,
"invalid slice_point with [start:%d, end:%d]"
\
%
(
start
,
end
)
output_shape
=
input_shape
output_shape
[
axis
]
=
end
-
start
return
[
output_shape
]
def
shape_shufflechannel
(
layer
,
input_shape
):
return
input_shape
# def shape_convolutiondepthwise(layer, input_shape):
# params = layer.convolution_param
# return get_strided_kernel_output_shape(params, input_shape[0], math.floor)
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录