Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
PaddlePaddle
X2Paddle
提交
52fdd6c5
X
X2Paddle
项目概览
PaddlePaddle
/
X2Paddle
大约 1 年 前同步成功
通知
328
Star
698
Fork
167
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
26
列表
看板
标记
里程碑
合并请求
4
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
X
X2Paddle
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
26
Issue
26
列表
看板
标记
里程碑
合并请求
4
合并请求
4
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
未验证
提交
52fdd6c5
编写于
9月 25, 2020
作者:
J
Jason
提交者:
GitHub
9月 25, 2020
浏览文件
操作
浏览文件
下载
差异文件
Merge pull request #411 from Channingss/rm_pd2onnx
remove code for paddle2onnx
上级
56697812
423ceb0d
变更
20
显示空白变更内容
内联
并排
Showing
20 changed file
with
14 addition
and
4690 deletion
+14
-4690
x2paddle/convert.py
x2paddle/convert.py
+14
-8
x2paddle/decoder/paddle_decoder.py
x2paddle/decoder/paddle_decoder.py
+0
-28
x2paddle/op_mapper/paddle2onnx/__init__.py
x2paddle/op_mapper/paddle2onnx/__init__.py
+0
-0
x2paddle/op_mapper/paddle2onnx/opset10/__init__.py
x2paddle/op_mapper/paddle2onnx/opset10/__init__.py
+0
-0
x2paddle/op_mapper/paddle2onnx/opset10/opset.py
x2paddle/op_mapper/paddle2onnx/opset10/opset.py
+0
-49
x2paddle/op_mapper/paddle2onnx/opset10/paddle_custom_layer/__init__.py
...apper/paddle2onnx/opset10/paddle_custom_layer/__init__.py
+0
-0
x2paddle/op_mapper/paddle2onnx/opset11/__init__.py
x2paddle/op_mapper/paddle2onnx/opset11/__init__.py
+0
-0
x2paddle/op_mapper/paddle2onnx/opset11/opset.py
x2paddle/op_mapper/paddle2onnx/opset11/opset.py
+0
-277
x2paddle/op_mapper/paddle2onnx/opset11/paddle_custom_layer/__init__.py
...apper/paddle2onnx/opset11/paddle_custom_layer/__init__.py
+0
-0
x2paddle/op_mapper/paddle2onnx/opset11/paddle_custom_layer/multiclass_nms.py
...paddle2onnx/opset11/paddle_custom_layer/multiclass_nms.py
+0
-442
x2paddle/op_mapper/paddle2onnx/opset11/paddle_custom_layer/yolo_box.py
...apper/paddle2onnx/opset11/paddle_custom_layer/yolo_box.py
+0
-844
x2paddle/op_mapper/paddle2onnx/opset9/__init__.py
x2paddle/op_mapper/paddle2onnx/opset9/__init__.py
+0
-0
x2paddle/op_mapper/paddle2onnx/opset9/opset.py
x2paddle/op_mapper/paddle2onnx/opset9/opset.py
+0
-969
x2paddle/op_mapper/paddle2onnx/opset9/paddle_custom_layer/__init__.py
...mapper/paddle2onnx/opset9/paddle_custom_layer/__init__.py
+0
-0
x2paddle/op_mapper/paddle2onnx/opset9/paddle_custom_layer/box_coder.py
...apper/paddle2onnx/opset9/paddle_custom_layer/box_coder.py
+0
-401
x2paddle/op_mapper/paddle2onnx/opset9/paddle_custom_layer/im2sequence.py
...per/paddle2onnx/opset9/paddle_custom_layer/im2sequence.py
+0
-94
x2paddle/op_mapper/paddle2onnx/opset9/paddle_custom_layer/multiclass_nms.py
.../paddle2onnx/opset9/paddle_custom_layer/multiclass_nms.py
+0
-449
x2paddle/op_mapper/paddle2onnx/opset9/paddle_custom_layer/prior_box.py
...apper/paddle2onnx/opset9/paddle_custom_layer/prior_box.py
+0
-174
x2paddle/op_mapper/paddle2onnx/opset9/paddle_custom_layer/yolo_box.py
...mapper/paddle2onnx/opset9/paddle_custom_layer/yolo_box.py
+0
-847
x2paddle/op_mapper/paddle2onnx/paddle_op_mapper.py
x2paddle/op_mapper/paddle2onnx/paddle_op_mapper.py
+0
-108
未找到文件。
x2paddle/convert.py
浏览文件 @
52fdd6c5
...
...
@@ -95,6 +95,7 @@ def arg_parser():
help
=
"define the inputs' shape"
)
return
parser
def
tf2paddle
(
model_path
,
save_dir
,
without_data_format_optimization
,
...
...
@@ -236,11 +237,16 @@ def pytorch2paddle(model_path, save_dir, input_shapes):
def
paddle2onnx
(
model_path
,
save_dir
,
opset_version
=
10
):
from
x2paddle.decoder.paddle_decoder
import
PaddleDecoder
from
x2paddle.op_mapper.paddle2onnx.paddle_op_mapper
import
PaddleOpMapper
import
paddle.fluid
as
fluid
model
=
PaddleDecoder
(
model_path
,
'__model__'
,
'__params__'
)
mapper
=
PaddleOpMapper
()
try
:
import
paddle2onnx
except
:
print
(
"[ERROR] paddle2onnx not installed, use
\"
pip install paddle2onnx
\"
"
)
import
paddle2onnx
as
p2o
model
=
p2o
.
PaddleDecoder
(
model_path
,
'__model__'
,
'__params__'
)
mapper
=
p2o
.
PaddleOpMapper
()
mapper
.
convert
(
model
.
program
,
save_dir
,
...
...
x2paddle/decoder/paddle_decoder.py
已删除
100644 → 0
浏览文件 @
56697812
# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import
paddle.fluid
as
fluid
class
PaddleDecoder
(
object
):
def
__init__
(
self
,
model_dir
,
model_filename
=
'__model__'
,
params_filename
=
None
):
exe
=
fluid
.
Executor
(
fluid
.
CPUPlace
())
[
self
.
program
,
feed
,
fetchs
]
=
fluid
.
io
.
load_inference_model
(
model_dir
,
exe
,
model_filename
=
model_filename
,
params_filename
=
params_filename
)
x2paddle/op_mapper/paddle2onnx/__init__.py
已删除
100644 → 0
浏览文件 @
56697812
x2paddle/op_mapper/paddle2onnx/opset10/__init__.py
已删除
100644 → 0
浏览文件 @
56697812
x2paddle/op_mapper/paddle2onnx/opset10/opset.py
已删除
100644 → 0
浏览文件 @
56697812
# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import
math
import
sys
import
x2paddle
import
os
import
numpy
as
np
import
paddle.fluid.core
as
core
import
paddle.fluid
as
fluid
import
onnx
from
onnx
import
helper
,
onnx_pb
from
x2paddle.op_mapper.paddle2onnx.opset9.opset
import
OpSet9
class
OpSet10
(
OpSet9
):
def
__init__
(
self
):
super
(
OpSet10
,
self
).
__init__
()
def
slice
(
self
,
op
,
block
):
axes
=
op
.
attr
(
'axes'
)
starts
=
op
.
attr
(
'starts'
)
ends
=
op
.
attr
(
'ends'
)
axes_name
=
self
.
get_name
(
op
.
type
,
'axes'
)
starts_name
=
self
.
get_name
(
op
.
type
,
'starts'
)
ends_name
=
self
.
get_name
(
op
.
type
,
'ends'
)
axes_node
=
self
.
make_constant_node
(
axes_name
,
onnx_pb
.
TensorProto
.
INT64
,
axes
)
starts_node
=
self
.
make_constant_node
(
starts_name
,
onnx_pb
.
TensorProto
.
INT64
,
starts
)
ends_node
=
self
.
make_constant_node
(
ends_name
,
onnx_pb
.
TensorProto
.
INT64
,
ends
)
node
=
helper
.
make_node
(
"Slice"
,
inputs
=
[
op
.
input
(
'Input'
)[
0
],
starts_name
,
ends_name
,
axes_name
],
outputs
=
op
.
output
(
'Out'
),
)
return
[
starts_node
,
ends_node
,
axes_node
,
node
]
x2paddle/op_mapper/paddle2onnx/opset10/paddle_custom_layer/__init__.py
已删除
100644 → 0
浏览文件 @
56697812
x2paddle/op_mapper/paddle2onnx/opset11/__init__.py
已删除
100644 → 0
浏览文件 @
56697812
x2paddle/op_mapper/paddle2onnx/opset11/opset.py
已删除
100644 → 0
浏览文件 @
56697812
# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import
math
import
sys
import
x2paddle
import
os
import
numpy
as
np
import
paddle.fluid.core
as
core
import
paddle.fluid
as
fluid
import
onnx
from
onnx
import
helper
,
onnx_pb
from
x2paddle.op_mapper.paddle2onnx.opset10.opset
import
OpSet10
class
OpSet11
(
OpSet10
):
def
__init__
(
self
):
super
(
OpSet11
,
self
).
__init__
()
def
relu6
(
self
,
op
,
block
):
min_name
=
self
.
get_name
(
op
.
type
,
'min'
)
max_name
=
self
.
get_name
(
op
.
type
,
'max'
)
min_node
=
self
.
make_constant_node
(
min_name
,
onnx_pb
.
TensorProto
.
FLOAT
,
0
)
max_node
=
self
.
make_constant_node
(
max_name
,
onnx_pb
.
TensorProto
.
FLOAT
,
op
.
attr
(
'threshold'
))
node
=
helper
.
make_node
(
'Clip'
,
inputs
=
[
op
.
input
(
'X'
)[
0
],
min_name
,
max_name
],
outputs
=
op
.
output
(
'Out'
),
)
return
[
min_node
,
max_node
,
node
]
def
pad2d
(
self
,
op
,
block
):
x_shape
=
block
.
var
(
op
.
input
(
'X'
)[
0
]).
shape
paddings
=
op
.
attr
(
'paddings'
)
onnx_pads
=
[]
#TODO support pads is Variable
if
op
.
attr
(
'data_format'
)
==
'NCHW'
:
pads
=
[
0
,
0
,
paddings
[
0
],
paddings
[
2
],
0
,
0
,
paddings
[
1
],
paddings
[
3
]
]
else
:
pads
=
[
0
,
paddings
[
0
],
paddings
[
2
],
0
,
0
,
paddings
[
1
],
paddings
[
3
],
0
]
pads_name
=
self
.
get_name
(
op
.
type
,
'pads'
)
pads_node
=
self
.
make_constant_node
(
pads_name
,
onnx_pb
.
TensorProto
.
INT64
,
pads
)
constant_value_name
=
self
.
get_name
(
op
.
type
,
'constant_value'
)
constant_value_node
=
self
.
make_constant_node
(
constant_value_name
,
onnx_pb
.
TensorProto
.
FLOAT
,
op
.
attr
(
'pad_value'
))
node
=
helper
.
make_node
(
'Pad'
,
inputs
=
op
.
input
(
'X'
)
+
[
pads_name
,
constant_value_name
],
outputs
=
op
.
output
(
'Out'
),
mode
=
op
.
attr
(
'mode'
))
return
[
pads_node
,
constant_value_node
,
node
]
def
clip
(
self
,
op
,
block
):
min_name
=
self
.
get_name
(
op
.
type
,
'min'
)
max_name
=
self
.
get_name
(
op
.
type
,
'max'
)
min_node
=
self
.
make_constant_node
(
min_name
,
onnx_pb
.
TensorProto
.
FLOAT
,
op
.
attr
(
'min'
))
max_node
=
self
.
make_constant_node
(
max_name
,
onnx_pb
.
TensorProto
.
FLOAT
,
op
.
attr
(
'max'
))
node
=
helper
.
make_node
(
'Clip'
,
inputs
=
[
op
.
input
(
'X'
)[
0
],
min_name
,
max_name
],
outputs
=
op
.
output
(
'Out'
))
return
[
min_node
,
max_node
,
node
]
def
bilinear_interp
(
self
,
op
,
block
):
input_names
=
op
.
input_names
coordinate_transformation_mode
=
''
align_corners
=
op
.
attr
(
'align_corners'
)
align_mode
=
op
.
attr
(
'align_mode'
)
if
align_corners
:
coordinate_transformation_mode
=
'align_corners'
elif
align_mode
==
1
:
coordinate_transformation_mode
=
'asymmetric'
else
:
coordinate_transformation_mode
=
'half_pixel'
roi_name
=
self
.
get_name
(
op
.
type
,
'roi'
)
roi_node
=
self
.
make_constant_node
(
roi_name
,
onnx_pb
.
TensorProto
.
FLOAT
,
[
1
,
1
,
1
,
1
,
1
,
1
,
1
,
1
])
if
(
'OutSize'
in
input_names
and
len
(
op
.
input
(
'OutSize'
))
>
0
)
or
(
'SizeTensor'
in
input_names
and
len
(
op
.
input
(
'SizeTensor'
))
>
0
):
node_list
=
list
()
empty_name
=
self
.
get_name
(
op
.
type
,
'empty'
)
empty_tensor
=
helper
.
make_tensor
(
empty_name
,
onnx_pb
.
TensorProto
.
FLOAT
,
(
0
,
),
np
.
array
([]).
astype
(
'float32'
),
raw
=
False
)
empty_node
=
helper
.
make_node
(
'Constant'
,
[],
outputs
=
[
empty_name
],
value
=
empty_tensor
)
shape_name0
=
self
.
get_name
(
op
.
type
,
'shape'
)
shape_node0
=
helper
.
make_node
(
'Shape'
,
inputs
=
op
.
input
(
'X'
),
outputs
=
[
shape_name0
])
starts_name
=
self
.
get_name
(
op
.
type
,
'slice.starts'
)
starts_node
=
self
.
make_constant_node
(
starts_name
,
onnx_pb
.
TensorProto
.
INT64
,
[
0
])
ends_name
=
self
.
get_name
(
op
.
type
,
'slice.ends'
)
ends_node
=
self
.
make_constant_node
(
ends_name
,
onnx_pb
.
TensorProto
.
INT64
,
[
2
])
shape_name1
=
self
.
get_name
(
op
.
type
,
'shape'
)
shape_node1
=
helper
.
make_node
(
'Slice'
,
inputs
=
[
shape_name0
,
starts_name
,
ends_name
],
outputs
=
[
shape_name1
])
node_list
.
extend
([
roi_node
,
empty_node
,
shape_node0
,
starts_node
,
ends_node
,
shape_node1
])
if
'OutSize'
in
input_names
and
len
(
op
.
input
(
'OutSize'
))
>
0
:
cast_shape_name
=
self
.
get_name
(
op
.
type
,
"shape.cast"
)
cast_shape_node
=
helper
.
make_node
(
'Cast'
,
inputs
=
op
.
input
(
'OutSize'
),
outputs
=
[
cast_shape_name
],
to
=
onnx_pb
.
TensorProto
.
INT64
)
node_list
.
append
(
cast_shape_node
)
else
:
concat_shape_name
=
self
.
get_name
(
op
.
type
,
"shape.concat"
)
concat_shape_node
=
helper
.
make_node
(
"Concat"
,
inputs
=
op
.
input
(
'SizeTensor'
),
outputs
=
[
concat_shape_name
],
axis
=
0
)
cast_shape_name
=
self
.
get_name
(
op
.
type
,
"shape.cast"
)
cast_shape_node
=
helper
.
make_node
(
'Cast'
,
inputs
=
[
concat_shape_name
],
outputs
=
[
cast_shape_name
],
to
=
onnx_pb
.
TensorProto
.
INT64
)
node_list
.
extend
([
concat_shape_node
,
cast_shape_node
])
shape_name3
=
self
.
get_name
(
op
.
type
,
"shape.concat"
)
shape_node3
=
helper
.
make_node
(
'Concat'
,
inputs
=
[
shape_name1
,
cast_shape_name
],
outputs
=
[
shape_name3
],
axis
=
0
)
result_node
=
helper
.
make_node
(
'Resize'
,
inputs
=
[
op
.
input
(
'X'
)[
0
],
roi_name
,
empty_name
,
shape_name3
],
outputs
=
op
.
output
(
'Out'
),
mode
=
'linear'
,
coordinate_transformation_mode
=
coordinate_transformation_mode
)
node_list
.
extend
([
shape_node3
,
result_node
])
return
node_list
elif
'Scale'
in
input_names
and
len
(
op
.
input
(
'Scale'
))
>
0
:
node
=
helper
.
make_node
(
'Resize'
,
inputs
=
[
op
.
input
(
'X'
)[
0
],
roi_name
,
op
.
input
(
'Scale'
)[
0
]],
outputs
=
op
.
output
(
'Out'
),
mode
=
'linear'
,
coordinate_transformation_mode
=
coordinate_transformation_mode
)
else
:
out_shape
=
[
op
.
attr
(
'out_h'
),
op
.
attr
(
'out_w'
)]
scale
=
op
.
attr
(
'scale'
)
if
out_shape
.
count
(
-
1
)
>
0
:
scale_name
=
self
.
get_name
(
op
.
type
,
'scale'
)
scale_node
=
self
.
make_constant_node
(
scale_name
,
onnx_pb
.
TensorProto
.
FLOAT
,
[
1
,
1
,
scale
,
scale
])
node
=
helper
.
make_node
(
'Resize'
,
inputs
=
[
op
.
input
(
'X'
)[
0
],
roi_name
,
scale_name
],
outputs
=
op
.
output
(
'Out'
),
mode
=
'nearest'
,
coordinate_transformation_mode
=
coordinate_transformation_mode
)
return
[
scale_node
,
roi_node
,
node
]
else
:
raise
Exception
(
"Unexpected situation happend"
)
return
[
roi_node
,
node
]
def
nearest_interp
(
self
,
op
,
block
):
input_names
=
op
.
input_names
coordinate_transformation_mode
=
''
align_corners
=
op
.
attr
(
'align_corners'
)
if
align_corners
:
coordinate_transformation_mode
=
'align_corners'
else
:
coordinate_transformation_mode
=
'half_pixel'
roi_name
=
self
.
get_name
(
op
.
type
,
'roi'
)
roi_node
=
self
.
make_constant_node
(
roi_name
,
onnx_pb
.
TensorProto
.
FLOAT
,
[
1
,
1
,
1
,
1
,
1
,
1
,
1
,
1
])
if
'OutSize'
in
input_names
and
len
(
op
.
input
(
'OutSize'
))
>
0
:
node
=
helper
.
make_node
(
'Resize'
,
inputs
=
[
op
.
input
(
'X'
)[
0
],
roi_name
,
op
.
input
(
'OutSize'
)[
0
]],
outputs
=
op
.
output
(
'Out'
),
mode
=
'nearest'
,
coordinate_transformation_mode
=
coordinate_transformation_mode
)
elif
'Scale'
in
input_names
and
len
(
op
.
input
(
'Scale'
))
>
0
:
node
=
helper
.
make_node
(
'Resize'
,
inputs
=
[
op
.
input
(
'X'
)[
0
],
roi_name
,
op
.
input
(
'Scale'
)[
0
]],
outputs
=
op
.
output
(
'Out'
),
mode
=
'nearest'
,
coordinate_transformation_mode
=
coordinate_transformation_mode
)
else
:
out_shape
=
[
op
.
attr
(
'out_h'
),
op
.
attr
(
'out_w'
)]
scale
=
op
.
attr
(
'scale'
)
if
out_shape
.
count
(
-
1
)
>
0
:
scale_name
=
self
.
get_name
(
op
.
type
,
'scale'
)
scale_node
=
self
.
make_constant_node
(
scale_name
,
onnx_pb
.
TensorProto
.
FLOAT
,
[
1
,
1
,
scale
,
scale
])
node
=
helper
.
make_node
(
'Resize'
,
inputs
=
[
op
.
input
(
'X'
)[
0
],
roi_name
,
scale_name
],
outputs
=
op
.
output
(
'Out'
),
mode
=
'nearest'
,
coordinate_transformation_mode
=
coordinate_transformation_mode
)
return
[
scale_node
,
roi_node
,
node
]
else
:
raise
Exception
(
"Unexpected situation happend"
)
return
[
roi_node
,
node
]
def
hard_swish
(
self
,
op
,
block
):
min_name
=
self
.
get_name
(
op
.
type
,
'min'
)
max_name
=
self
.
get_name
(
op
.
type
,
'max'
)
scale_name
=
self
.
get_name
(
op
.
type
,
'scale'
)
offset_name
=
self
.
get_name
(
op
.
type
,
'offset'
)
min_node
=
self
.
make_constant_node
(
min_name
,
onnx_pb
.
TensorProto
.
FLOAT
,
0
)
max_node
=
self
.
make_constant_node
(
max_name
,
onnx_pb
.
TensorProto
.
FLOAT
,
op
.
attr
(
'threshold'
))
scale_node
=
self
.
make_constant_node
(
scale_name
,
onnx_pb
.
TensorProto
.
FLOAT
,
op
.
attr
(
'scale'
))
offset_node
=
self
.
make_constant_node
(
offset_name
,
onnx_pb
.
TensorProto
.
FLOAT
,
op
.
attr
(
'offset'
))
name0
=
self
.
get_name
(
op
.
type
,
'add'
)
node0
=
helper
.
make_node
(
'Add'
,
inputs
=
[
op
.
input
(
'X'
)[
0
],
offset_name
],
outputs
=
[
name0
])
name1
=
self
.
get_name
(
op
.
type
,
'relu'
)
node1
=
helper
.
make_node
(
'Clip'
,
inputs
=
[
name0
,
min_name
,
max_name
],
outputs
=
[
name1
],
)
name2
=
self
.
get_name
(
op
.
type
,
'mul'
)
node2
=
helper
.
make_node
(
'Mul'
,
inputs
=
[
op
.
input
(
'X'
)[
0
],
name1
],
outputs
=
[
name2
])
node3
=
helper
.
make_node
(
'Div'
,
inputs
=
[
name2
,
scale_name
],
outputs
=
op
.
output
(
'Out'
))
return
[
min_node
,
max_node
,
scale_node
,
offset_node
,
node0
,
node1
,
node2
,
node3
]
def
yolo_box
(
self
,
op
,
block
):
from
.paddle_custom_layer.yolo_box
import
yolo_box
return
yolo_box
(
op
,
block
)
def
multiclass_nms
(
self
,
op
,
block
):
from
.paddle_custom_layer.multiclass_nms
import
multiclass_nms
return
multiclass_nms
(
op
,
block
)
x2paddle/op_mapper/paddle2onnx/opset11/paddle_custom_layer/__init__.py
已删除
100644 → 0
浏览文件 @
56697812
x2paddle/op_mapper/paddle2onnx/opset11/paddle_custom_layer/multiclass_nms.py
已删除
100644 → 0
浏览文件 @
56697812
# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import
math
import
sys
import
os
import
numpy
as
np
import
paddle.fluid.core
as
core
import
paddle.fluid
as
fluid
import
onnx
import
logging
from
onnx
import
helper
,
onnx_pb
def
multiclass_nms
(
op
,
block
):
"""
Convert the paddle multiclass_nms to onnx op.
This op is get the select boxes from origin boxes.
"""
inputs
=
dict
()
outputs
=
dict
()
attrs
=
dict
()
for
name
in
op
.
input_names
:
inputs
[
name
]
=
op
.
input
(
name
)
for
name
in
op
.
output_names
:
outputs
[
name
]
=
op
.
output
(
name
)
for
name
in
op
.
attr_names
:
attrs
[
name
]
=
op
.
attr
(
name
)
result_name
=
outputs
[
'Out'
][
0
]
background
=
attrs
[
'background_label'
]
normalized
=
attrs
[
'normalized'
]
if
normalized
==
False
:
logging
.
warn
(
"The parameter normalized of multiclass_nms OP of Paddle is False, which has diff with ONNX."
\
" Please set normalized=True in multiclass_nms of Paddle, see doc Q4 in https://github.com/PaddlePaddle/X2Paddle/blob/develop/FAQ.md"
)
#convert the paddle attribute to onnx tensor
name_score_threshold
=
[
outputs
[
'Out'
][
0
]
+
"@score_threshold"
]
name_iou_threshold
=
[
outputs
[
'Out'
][
0
]
+
"@iou_threshold"
]
name_keep_top_k
=
[
outputs
[
'Out'
][
0
]
+
'@keep_top_k'
]
name_keep_top_k_2D
=
[
outputs
[
'Out'
][
0
]
+
'@keep_top_k_1D'
]
node_score_threshold
=
onnx
.
helper
.
make_node
(
'Constant'
,
inputs
=
[],
outputs
=
name_score_threshold
,
value
=
onnx
.
helper
.
make_tensor
(
name
=
name_score_threshold
[
0
]
+
"@const"
,
data_type
=
onnx
.
TensorProto
.
FLOAT
,
dims
=
(),
vals
=
[
float
(
attrs
[
'score_threshold'
])]))
node_iou_threshold
=
onnx
.
helper
.
make_node
(
'Constant'
,
inputs
=
[],
outputs
=
name_iou_threshold
,
value
=
onnx
.
helper
.
make_tensor
(
name
=
name_iou_threshold
[
0
]
+
"@const"
,
data_type
=
onnx
.
TensorProto
.
FLOAT
,
dims
=
(),
vals
=
[
float
(
attrs
[
'nms_threshold'
])]))
boxes_num
=
block
.
var
(
outputs
[
'Out'
][
0
]).
shape
[
0
]
top_k_value
=
np
.
int64
(
boxes_num
if
attrs
[
'keep_top_k'
]
==
-
1
else
attrs
[
'keep_top_k'
])
node_keep_top_k
=
onnx
.
helper
.
make_node
(
'Constant'
,
inputs
=
[],
outputs
=
name_keep_top_k
,
value
=
onnx
.
helper
.
make_tensor
(
name
=
name_keep_top_k
[
0
]
+
"@const"
,
data_type
=
onnx
.
TensorProto
.
INT64
,
dims
=
(),
vals
=
[
top_k_value
]))
node_keep_top_k_2D
=
onnx
.
helper
.
make_node
(
'Constant'
,
inputs
=
[],
outputs
=
name_keep_top_k_2D
,
value
=
onnx
.
helper
.
make_tensor
(
name
=
name_keep_top_k_2D
[
0
]
+
"@const"
,
data_type
=
onnx
.
TensorProto
.
INT64
,
dims
=
[
1
,
1
],
vals
=
[
top_k_value
]))
# the paddle data format is x1,y1,x2,y2
kwargs
=
{
'center_point_box'
:
0
}
name_select_nms
=
[
outputs
[
'Out'
][
0
]
+
"@select_index"
]
node_select_nms
=
onnx
.
helper
.
make_node
(
'NonMaxSuppression'
,
inputs
=
inputs
[
'BBoxes'
]
+
inputs
[
'Scores'
]
+
name_keep_top_k
+
\
name_iou_threshold
+
name_score_threshold
,
outputs
=
name_select_nms
)
# step 1 nodes select the nms class
node_list
=
[
node_score_threshold
,
node_iou_threshold
,
node_keep_top_k
,
node_keep_top_k_2D
,
node_select_nms
]
# create some const value to use
name_const_value
=
[
result_name
+
"@const_0"
,
result_name
+
"@const_1"
,
\
result_name
+
"@const_2"
,
\
result_name
+
"@const_-1"
]
value_const_value
=
[
0
,
1
,
2
,
-
1
]
for
name
,
value
in
zip
(
name_const_value
,
value_const_value
):
node
=
onnx
.
helper
.
make_node
(
'Constant'
,
inputs
=
[],
outputs
=
[
name
],
value
=
onnx
.
helper
.
make_tensor
(
name
=
name
+
"@const"
,
data_type
=
onnx
.
TensorProto
.
INT64
,
dims
=
[
1
],
vals
=
[
value
]))
node_list
.
append
(
node
)
# In this code block, we will deocde the raw score data, reshape N * C * M to 1 * N*C*M
# and the same time, decode the select indices to 1 * D, gather the select_indices
outputs_gather_1
=
[
result_name
+
"@gather_1"
]
node_gather_1
=
onnx
.
helper
.
make_node
(
'Gather'
,
inputs
=
name_select_nms
+
[
result_name
+
"@const_1"
],
outputs
=
outputs_gather_1
,
axis
=
1
)
node_list
.
append
(
node_gather_1
)
outputs_squeeze_gather_1
=
[
result_name
+
"@sequeeze_gather_1"
]
node_squeeze_gather_1
=
onnx
.
helper
.
make_node
(
'Squeeze'
,
inputs
=
outputs_gather_1
,
outputs
=
outputs_squeeze_gather_1
,
axes
=
[
1
])
node_list
.
append
(
node_squeeze_gather_1
)
outputs_gather_2
=
[
result_name
+
"@gather_2"
]
node_gather_2
=
onnx
.
helper
.
make_node
(
'Gather'
,
inputs
=
name_select_nms
+
[
result_name
+
"@const_2"
],
outputs
=
outputs_gather_2
,
axis
=
1
)
node_list
.
append
(
node_gather_2
)
#slice the class is not 0
if
background
==
0
:
outputs_nonzero
=
[
result_name
+
"@nonzero"
]
node_nonzero
=
onnx
.
helper
.
make_node
(
'NonZero'
,
inputs
=
outputs_squeeze_gather_1
,
outputs
=
outputs_nonzero
)
node_list
.
append
(
node_nonzero
)
else
:
name_thresh
=
[
result_name
+
"@thresh"
]
node_thresh
=
onnx
.
helper
.
make_node
(
'Constant'
,
inputs
=
[],
outputs
=
name_thresh
,
value
=
onnx
.
helper
.
make_tensor
(
name
=
name_thresh
[
0
]
+
"@const"
,
data_type
=
onnx
.
TensorProto
.
INT32
,
dims
=
[
1
],
vals
=
[
-
1
]))
node_list
.
append
(
node_thresh
)
outputs_cast
=
[
result_name
+
"@cast"
]
node_cast
=
onnx
.
helper
.
make_node
(
'Cast'
,
inputs
=
outputs_squeeze_gather_1
,
outputs
=
outputs_cast
,
to
=
6
)
node_list
.
append
(
node_cast
)
outputs_greater
=
[
result_name
+
"@greater"
]
node_greater
=
onnx
.
helper
.
make_node
(
'Greater'
,
inputs
=
outputs_cast
+
name_thresh
,
outputs
=
outputs_greater
)
node_list
.
append
(
node_greater
)
outputs_nonzero
=
[
result_name
+
"@nonzero"
]
node_nonzero
=
onnx
.
helper
.
make_node
(
'NonZero'
,
inputs
=
outputs_greater
,
outputs
=
outputs_nonzero
)
node_list
.
append
(
node_nonzero
)
outputs_gather_1_nonzero
=
[
result_name
+
"@gather_1_nonzero"
]
node_gather_1_nonzero
=
onnx
.
helper
.
make_node
(
'Gather'
,
inputs
=
outputs_gather_1
+
outputs_nonzero
,
outputs
=
outputs_gather_1_nonzero
,
axis
=
0
)
node_list
.
append
(
node_gather_1_nonzero
)
outputs_gather_2_nonzero
=
[
result_name
+
"@gather_2_nonzero"
]
node_gather_2_nonzero
=
onnx
.
helper
.
make_node
(
'Gather'
,
inputs
=
outputs_gather_2
+
outputs_nonzero
,
outputs
=
outputs_gather_2_nonzero
,
axis
=
0
)
node_list
.
append
(
node_gather_2_nonzero
)
# reshape scores N * C * M to (N*C*M) * 1
outputs_reshape_scores_rank1
=
[
result_name
+
"@reshape_scores_rank1"
]
node_reshape_scores_rank1
=
onnx
.
helper
.
make_node
(
"Reshape"
,
inputs
=
inputs
[
'Scores'
]
+
[
result_name
+
"@const_-1"
],
outputs
=
outputs_reshape_scores_rank1
)
node_list
.
append
(
node_reshape_scores_rank1
)
# get the shape of scores
outputs_shape_scores
=
[
result_name
+
"@shape_scores"
]
node_shape_scores
=
onnx
.
helper
.
make_node
(
'Shape'
,
inputs
=
inputs
[
'Scores'
],
outputs
=
outputs_shape_scores
)
node_list
.
append
(
node_shape_scores
)
# gather the index: 2 shape of scores
outputs_gather_scores_dim1
=
[
result_name
+
"@gather_scores_dim1"
]
node_gather_scores_dim1
=
onnx
.
helper
.
make_node
(
'Gather'
,
inputs
=
outputs_shape_scores
+
[
result_name
+
"@const_2"
],
outputs
=
outputs_gather_scores_dim1
,
axis
=
0
)
node_list
.
append
(
node_gather_scores_dim1
)
# mul class * M
outputs_mul_classnum_boxnum
=
[
result_name
+
"@mul_classnum_boxnum"
]
node_mul_classnum_boxnum
=
onnx
.
helper
.
make_node
(
'Mul'
,
inputs
=
outputs_gather_1_nonzero
+
outputs_gather_scores_dim1
,
outputs
=
outputs_mul_classnum_boxnum
)
node_list
.
append
(
node_mul_classnum_boxnum
)
# add class * M * index
outputs_add_class_M_index
=
[
result_name
+
"@add_class_M_index"
]
node_add_class_M_index
=
onnx
.
helper
.
make_node
(
'Add'
,
inputs
=
outputs_mul_classnum_boxnum
+
outputs_gather_2_nonzero
,
outputs
=
outputs_add_class_M_index
)
node_list
.
append
(
node_add_class_M_index
)
# Squeeze the indices to 1 dim
outputs_squeeze_select_index
=
[
result_name
+
"@squeeze_select_index"
]
node_squeeze_select_index
=
onnx
.
helper
.
make_node
(
'Squeeze'
,
inputs
=
outputs_add_class_M_index
,
outputs
=
outputs_squeeze_select_index
,
axes
=
[
0
,
2
])
node_list
.
append
(
node_squeeze_select_index
)
# gather the data from flatten scores
outputs_gather_select_scores
=
[
result_name
+
"@gather_select_scores"
]
node_gather_select_scores
=
onnx
.
helper
.
make_node
(
'Gather'
,
inputs
=
outputs_reshape_scores_rank1
+
\
outputs_squeeze_select_index
,
outputs
=
outputs_gather_select_scores
,
axis
=
0
)
node_list
.
append
(
node_gather_select_scores
)
# get nums to input TopK
outputs_shape_select_num
=
[
result_name
+
"@shape_select_num"
]
node_shape_select_num
=
onnx
.
helper
.
make_node
(
'Shape'
,
inputs
=
outputs_gather_select_scores
,
outputs
=
outputs_shape_select_num
)
node_list
.
append
(
node_shape_select_num
)
outputs_gather_select_num
=
[
result_name
+
"@gather_select_num"
]
node_gather_select_num
=
onnx
.
helper
.
make_node
(
'Gather'
,
inputs
=
outputs_shape_select_num
+
[
result_name
+
"@const_0"
],
outputs
=
outputs_gather_select_num
,
axis
=
0
)
node_list
.
append
(
node_gather_select_num
)
outputs_unsqueeze_select_num
=
[
result_name
+
"@unsqueeze_select_num"
]
node_unsqueeze_select_num
=
onnx
.
helper
.
make_node
(
'Unsqueeze'
,
inputs
=
outputs_gather_select_num
,
outputs
=
outputs_unsqueeze_select_num
,
axes
=
[
0
])
node_list
.
append
(
node_unsqueeze_select_num
)
outputs_concat_topK_select_num
=
[
result_name
+
"@conat_topK_select_num"
]
node_conat_topK_select_num
=
onnx
.
helper
.
make_node
(
'Concat'
,
inputs
=
outputs_unsqueeze_select_num
+
name_keep_top_k_2D
,
outputs
=
outputs_concat_topK_select_num
,
axis
=
0
)
node_list
.
append
(
node_conat_topK_select_num
)
outputs_cast_concat_topK_select_num
=
[
result_name
+
"@concat_topK_select_num"
]
node_outputs_cast_concat_topK_select_num
=
onnx
.
helper
.
make_node
(
'Cast'
,
inputs
=
outputs_concat_topK_select_num
,
outputs
=
outputs_cast_concat_topK_select_num
,
to
=
6
)
node_list
.
append
(
node_outputs_cast_concat_topK_select_num
)
# get min(topK, num_select)
outputs_compare_topk_num_select
=
[
result_name
+
"@compare_topk_num_select"
]
node_compare_topk_num_select
=
onnx
.
helper
.
make_node
(
'ReduceMin'
,
inputs
=
outputs_cast_concat_topK_select_num
,
outputs
=
outputs_compare_topk_num_select
,
keepdims
=
0
)
node_list
.
append
(
node_compare_topk_num_select
)
# unsqueeze the indices to 1D tensor
outputs_unsqueeze_topk_select_indices
=
[
result_name
+
"@unsqueeze_topk_select_indices"
]
node_unsqueeze_topk_select_indices
=
onnx
.
helper
.
make_node
(
'Unsqueeze'
,
inputs
=
outputs_compare_topk_num_select
,
outputs
=
outputs_unsqueeze_topk_select_indices
,
axes
=
[
0
])
node_list
.
append
(
node_unsqueeze_topk_select_indices
)
# cast the indices to INT64
outputs_cast_topk_indices
=
[
result_name
+
"@cast_topk_indices"
]
node_cast_topk_indices
=
onnx
.
helper
.
make_node
(
'Cast'
,
inputs
=
outputs_unsqueeze_topk_select_indices
,
outputs
=
outputs_cast_topk_indices
,
to
=
7
)
node_list
.
append
(
node_cast_topk_indices
)
# select topk scores indices
outputs_topk_select_topk_indices
=
[
result_name
+
"@topk_select_topk_values"
,
\
result_name
+
"@topk_select_topk_indices"
]
node_topk_select_topk_indices
=
onnx
.
helper
.
make_node
(
'TopK'
,
inputs
=
outputs_gather_select_scores
+
outputs_cast_topk_indices
,
outputs
=
outputs_topk_select_topk_indices
)
node_list
.
append
(
node_topk_select_topk_indices
)
# gather topk label, scores, boxes
outputs_gather_topk_scores
=
[
result_name
+
"@gather_topk_scores"
]
node_gather_topk_scores
=
onnx
.
helper
.
make_node
(
'Gather'
,
inputs
=
outputs_gather_select_scores
+
[
outputs_topk_select_topk_indices
[
1
]],
outputs
=
outputs_gather_topk_scores
,
axis
=
0
)
node_list
.
append
(
node_gather_topk_scores
)
outputs_gather_topk_class
=
[
result_name
+
"@gather_topk_class"
]
node_gather_topk_class
=
onnx
.
helper
.
make_node
(
'Gather'
,
inputs
=
outputs_gather_1_nonzero
+
[
outputs_topk_select_topk_indices
[
1
]],
outputs
=
outputs_gather_topk_class
,
axis
=
1
)
node_list
.
append
(
node_gather_topk_class
)
# gather the boxes need to gather the boxes id, then get boxes
outputs_gather_topk_boxes_id
=
[
result_name
+
"@gather_topk_boxes_id"
]
node_gather_topk_boxes_id
=
onnx
.
helper
.
make_node
(
'Gather'
,
inputs
=
outputs_gather_2_nonzero
+
[
outputs_topk_select_topk_indices
[
1
]],
outputs
=
outputs_gather_topk_boxes_id
,
axis
=
1
)
node_list
.
append
(
node_gather_topk_boxes_id
)
# squeeze the gather_topk_boxes_id to 1 dim
outputs_squeeze_topk_boxes_id
=
[
result_name
+
"@squeeze_topk_boxes_id"
]
node_squeeze_topk_boxes_id
=
onnx
.
helper
.
make_node
(
'Squeeze'
,
inputs
=
outputs_gather_topk_boxes_id
,
outputs
=
outputs_squeeze_topk_boxes_id
,
axes
=
[
0
,
2
])
node_list
.
append
(
node_squeeze_topk_boxes_id
)
outputs_gather_select_boxes
=
[
result_name
+
"@gather_select_boxes"
]
node_gather_select_boxes
=
onnx
.
helper
.
make_node
(
'Gather'
,
inputs
=
inputs
[
'BBoxes'
]
+
outputs_squeeze_topk_boxes_id
,
outputs
=
outputs_gather_select_boxes
,
axis
=
1
)
node_list
.
append
(
node_gather_select_boxes
)
# concat the final result
# before concat need to cast the class to float
outputs_cast_topk_class
=
[
result_name
+
"@cast_topk_class"
]
node_cast_topk_class
=
onnx
.
helper
.
make_node
(
'Cast'
,
inputs
=
outputs_gather_topk_class
,
outputs
=
outputs_cast_topk_class
,
to
=
1
)
node_list
.
append
(
node_cast_topk_class
)
outputs_unsqueeze_topk_scores
=
[
result_name
+
"@unsqueeze_topk_scores"
]
node_unsqueeze_topk_scores
=
onnx
.
helper
.
make_node
(
'Unsqueeze'
,
inputs
=
outputs_gather_topk_scores
,
outputs
=
outputs_unsqueeze_topk_scores
,
axes
=
[
0
,
2
])
node_list
.
append
(
node_unsqueeze_topk_scores
)
inputs_concat_final_results
=
outputs_cast_topk_class
+
outputs_unsqueeze_topk_scores
+
\
outputs_gather_select_boxes
outputs_sort_by_socre_results
=
[
result_name
+
"@concat_topk_scores"
]
node_sort_by_socre_results
=
onnx
.
helper
.
make_node
(
'Concat'
,
inputs
=
inputs_concat_final_results
,
outputs
=
outputs_sort_by_socre_results
,
axis
=
2
)
node_list
.
append
(
node_sort_by_socre_results
)
# select topk classes indices
outputs_squeeze_cast_topk_class
=
[
result_name
+
"@squeeze_cast_topk_class"
]
node_squeeze_cast_topk_class
=
onnx
.
helper
.
make_node
(
'Squeeze'
,
inputs
=
outputs_cast_topk_class
,
outputs
=
outputs_squeeze_cast_topk_class
,
axes
=
[
0
,
2
])
node_list
.
append
(
node_squeeze_cast_topk_class
)
outputs_topk_select_classes_indices
=
[
result_name
+
"@topk_select_topk_classes_scores"
,
\
result_name
+
"@topk_select_topk_classes_indices"
]
node_topk_select_topk_indices
=
onnx
.
helper
.
make_node
(
'TopK'
,
inputs
=
outputs_squeeze_cast_topk_class
+
outputs_cast_topk_indices
,
outputs
=
outputs_topk_select_classes_indices
,
largest
=
0
)
node_list
.
append
(
node_topk_select_topk_indices
)
outputs_concat_final_results
=
outputs
[
'Out'
]
node_concat_final_results
=
onnx
.
helper
.
make_node
(
'Gather'
,
inputs
=
outputs_sort_by_socre_results
+
[
outputs_topk_select_classes_indices
[
1
]],
outputs
=
outputs_concat_final_results
,
axis
=
1
)
node_list
.
append
(
node_concat_final_results
)
return
node_list
x2paddle/op_mapper/paddle2onnx/opset11/paddle_custom_layer/yolo_box.py
已删除
100644 → 0
浏览文件 @
56697812
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import
onnx
import
numpy
as
np
from
onnx
import
onnx_pb
,
helper
from
x2paddle.op_mapper.paddle2onnx.opset9.paddle_custom_layer.yolo_box
import
is_static_shape
from
x2paddle.op_mapper.paddle2onnx.opset9.paddle_custom_layer.yolo_box
import
get_old_name
from
x2paddle.op_mapper.paddle2onnx.opset9.paddle_custom_layer.yolo_box
import
MAX_FLOAT32
def
yolo_box
(
op
,
block
):
inputs
=
dict
()
outputs
=
dict
()
attrs
=
dict
()
for
name
in
op
.
input_names
:
inputs
[
name
]
=
op
.
input
(
name
)
for
name
in
op
.
output_names
:
outputs
[
name
]
=
op
.
output
(
name
)
for
name
in
op
.
attr_names
:
attrs
[
name
]
=
op
.
attr
(
name
)
model_name
=
outputs
[
'Boxes'
][
0
]
input_shape
=
block
.
vars
[
get_old_name
(
inputs
[
'X'
][
0
])].
shape
is_static_shape
(
input_shape
)
image_size
=
inputs
[
'ImgSize'
]
input_height
=
input_shape
[
2
]
input_width
=
input_shape
[
3
]
class_num
=
attrs
[
'class_num'
]
anchors
=
attrs
[
'anchors'
]
num_anchors
=
int
(
len
(
anchors
))
//
2
downsample_ratio
=
attrs
[
'downsample_ratio'
]
input_size
=
input_height
*
downsample_ratio
conf_thresh
=
attrs
[
'conf_thresh'
]
conf_thresh_mat
=
np
.
ones
([
num_anchors
*
input_height
*
input_width
])
*
conf_thresh
node_list
=
[]
im_outputs
=
[]
x_shape
=
[
1
,
num_anchors
,
5
+
class_num
,
input_height
,
input_width
]
name_x_shape
=
[
model_name
+
"@x_shape"
]
node_x_shape
=
onnx
.
helper
.
make_node
(
'Constant'
,
inputs
=
[],
outputs
=
name_x_shape
,
value
=
onnx
.
helper
.
make_tensor
(
name
=
name_x_shape
[
0
]
+
"@const"
,
data_type
=
onnx
.
TensorProto
.
INT64
,
dims
=
[
5
],
vals
=
x_shape
))
node_list
.
append
(
node_x_shape
)
outputs_x_reshape
=
[
model_name
+
"@reshape"
]
node_x_reshape
=
onnx
.
helper
.
make_node
(
'Reshape'
,
inputs
=
inputs
[
'X'
]
+
name_x_shape
,
outputs
=
outputs_x_reshape
)
node_list
.
append
(
node_x_reshape
)
outputs_x_transpose
=
[
model_name
+
"@x_transpose"
]
node_x_transpose
=
onnx
.
helper
.
make_node
(
'Transpose'
,
inputs
=
outputs_x_reshape
,
outputs
=
outputs_x_transpose
,
perm
=
[
0
,
1
,
3
,
4
,
2
])
node_list
.
append
(
node_x_transpose
)
range_x
=
[]
range_y
=
[]
for
i
in
range
(
0
,
input_width
):
range_x
.
append
(
i
)
for
j
in
range
(
0
,
input_height
):
range_y
.
append
(
j
)
name_range_x
=
[
model_name
+
"@range_x"
]
node_range_x
=
onnx
.
helper
.
make_node
(
'Constant'
,
inputs
=
[],
outputs
=
name_range_x
,
value
=
onnx
.
helper
.
make_tensor
(
name
=
name_range_x
[
0
]
+
"@const"
,
data_type
=
onnx
.
TensorProto
.
FLOAT
,
dims
=
[
input_width
],
vals
=
range_x
))
node_list
.
append
(
node_range_x
)
name_range_y
=
[
model_name
+
"@range_y"
]
node_range_y
=
onnx
.
helper
.
make_node
(
'Constant'
,
inputs
=
[],
outputs
=
name_range_y
,
value
=
onnx
.
helper
.
make_tensor
(
name
=
name_range_y
[
0
]
+
"@const"
,
data_type
=
onnx
.
TensorProto
.
FLOAT
,
dims
=
[
input_height
],
vals
=
range_y
))
node_list
.
append
(
node_range_y
)
range_x_new_shape
=
[
1
,
input_width
]
range_y_new_shape
=
[
input_height
,
1
]
name_range_x_new_shape
=
[
model_name
+
"@range_x_new_shape"
]
node_range_x_new_shape
=
onnx
.
helper
.
make_node
(
'Constant'
,
inputs
=
[],
outputs
=
name_range_x_new_shape
,
value
=
onnx
.
helper
.
make_tensor
(
name
=
name_range_x_new_shape
[
0
]
+
"@const"
,
data_type
=
onnx
.
TensorProto
.
INT64
,
dims
=
[
len
(
range_x_new_shape
)],
vals
=
range_x_new_shape
))
node_list
.
append
(
node_range_x_new_shape
)
name_range_y_new_shape
=
[
model_name
+
"@range_y_new_shape"
]
node_range_y_new_shape
=
onnx
.
helper
.
make_node
(
'Constant'
,
inputs
=
[],
outputs
=
name_range_y_new_shape
,
value
=
onnx
.
helper
.
make_tensor
(
name
=
name_range_y_new_shape
[
0
]
+
"@const"
,
data_type
=
onnx
.
TensorProto
.
INT64
,
dims
=
[
len
(
range_y_new_shape
)],
vals
=
range_y_new_shape
))
node_list
.
append
(
node_range_y_new_shape
)
outputs_range_x_reshape
=
[
model_name
+
"@range_x_reshape"
]
node_range_x_reshape
=
onnx
.
helper
.
make_node
(
'Reshape'
,
inputs
=
name_range_x
+
name_range_x_new_shape
,
outputs
=
outputs_range_x_reshape
)
node_list
.
append
(
node_range_x_reshape
)
outputs_range_y_reshape
=
[
model_name
+
"@range_y_reshape"
]
node_range_y_reshape
=
onnx
.
helper
.
make_node
(
'Reshape'
,
inputs
=
name_range_y
+
name_range_y_new_shape
,
outputs
=
outputs_range_y_reshape
)
node_list
.
append
(
node_range_y_reshape
)
outputs_grid_x
=
[
model_name
+
"@grid_x"
]
node_grid_x
=
onnx
.
helper
.
make_node
(
"Tile"
,
inputs
=
outputs_range_x_reshape
+
name_range_y_new_shape
,
outputs
=
outputs_grid_x
)
node_list
.
append
(
node_grid_x
)
outputs_grid_y
=
[
model_name
+
"@grid_y"
]
node_grid_y
=
onnx
.
helper
.
make_node
(
"Tile"
,
inputs
=
outputs_range_y_reshape
+
name_range_x_new_shape
,
outputs
=
outputs_grid_y
)
node_list
.
append
(
node_grid_y
)
outputs_box_x
=
[
model_name
+
"@box_x"
]
outputs_box_y
=
[
model_name
+
"@box_y"
]
outputs_box_w
=
[
model_name
+
"@box_w"
]
outputs_box_h
=
[
model_name
+
"@box_h"
]
outputs_conf
=
[
model_name
+
"@conf"
]
outputs_prob
=
[
model_name
+
"@prob"
]
node_split_input
=
onnx
.
helper
.
make_node
(
"Split"
,
inputs
=
outputs_x_transpose
,
outputs
=
outputs_box_x
+
outputs_box_y
+
outputs_box_w
\
+
outputs_box_h
+
outputs_conf
+
outputs_prob
,
axis
=-
1
,
split
=
[
1
,
1
,
1
,
1
,
1
,
class_num
])
node_list
.
append
(
node_split_input
)
outputs_box_x_sigmoid
=
[
model_name
+
"@box_x_sigmoid"
]
outputs_box_y_sigmoid
=
[
model_name
+
"@box_y_sigmoid"
]
node_box_x_sigmoid
=
onnx
.
helper
.
make_node
(
"Sigmoid"
,
inputs
=
outputs_box_x
,
outputs
=
outputs_box_x_sigmoid
)
node_list
.
append
(
node_box_x_sigmoid
)
node_box_y_sigmoid
=
onnx
.
helper
.
make_node
(
"Sigmoid"
,
inputs
=
outputs_box_y
,
outputs
=
outputs_box_y_sigmoid
)
node_list
.
append
(
node_box_y_sigmoid
)
outputs_box_x_squeeze
=
[
model_name
+
"@box_x_squeeze"
]
outputs_box_y_squeeze
=
[
model_name
+
"@box_y_squeeze"
]
node_box_x_squeeze
=
onnx
.
helper
.
make_node
(
'Squeeze'
,
inputs
=
outputs_box_x_sigmoid
,
outputs
=
outputs_box_x_squeeze
,
axes
=
[
4
])
node_list
.
append
(
node_box_x_squeeze
)
node_box_y_squeeze
=
onnx
.
helper
.
make_node
(
'Squeeze'
,
inputs
=
outputs_box_y_sigmoid
,
outputs
=
outputs_box_y_squeeze
,
axes
=
[
4
])
node_list
.
append
(
node_box_y_squeeze
)
outputs_box_x_add_grid
=
[
model_name
+
"@box_x_add_grid"
]
outputs_box_y_add_grid
=
[
model_name
+
"@box_y_add_grid"
]
node_box_x_add_grid
=
onnx
.
helper
.
make_node
(
"Add"
,
inputs
=
outputs_grid_x
+
outputs_box_x_squeeze
,
outputs
=
outputs_box_x_add_grid
)
node_list
.
append
(
node_box_x_add_grid
)
node_box_y_add_grid
=
onnx
.
helper
.
make_node
(
"Add"
,
inputs
=
outputs_grid_y
+
outputs_box_y_squeeze
,
outputs
=
outputs_box_y_add_grid
)
node_list
.
append
(
node_box_y_add_grid
)
name_input_h
=
[
model_name
+
"@input_h"
]
name_input_w
=
[
model_name
+
"@input_w"
]
node_input_h
=
onnx
.
helper
.
make_node
(
'Constant'
,
inputs
=
[],
outputs
=
name_input_h
,
value
=
onnx
.
helper
.
make_tensor
(
name
=
name_input_w
[
0
]
+
"@const"
,
data_type
=
onnx
.
TensorProto
.
FLOAT
,
dims
=
(),
vals
=
[
input_height
]))
node_list
.
append
(
node_input_h
)
node_input_w
=
onnx
.
helper
.
make_node
(
'Constant'
,
inputs
=
[],
outputs
=
name_input_w
,
value
=
onnx
.
helper
.
make_tensor
(
name
=
name_input_w
[
0
]
+
"@const"
,
data_type
=
onnx
.
TensorProto
.
FLOAT
,
dims
=
(),
vals
=
[
input_width
]))
node_list
.
append
(
node_input_w
)
outputs_box_x_encode
=
[
model_name
+
"@box_x_encode"
]
outputs_box_y_encode
=
[
model_name
+
"@box_y_encode"
]
node_box_x_encode
=
onnx
.
helper
.
make_node
(
'Div'
,
inputs
=
outputs_box_x_add_grid
+
name_input_w
,
outputs
=
outputs_box_x_encode
)
node_list
.
append
(
node_box_x_encode
)
node_box_y_encode
=
onnx
.
helper
.
make_node
(
'Div'
,
inputs
=
outputs_box_y_add_grid
+
name_input_h
,
outputs
=
outputs_box_y_encode
)
node_list
.
append
(
node_box_y_encode
)
name_anchor_tensor
=
[
model_name
+
"@anchor_tensor"
]
node_anchor_tensor
=
onnx
.
helper
.
make_node
(
"Constant"
,
inputs
=
[],
outputs
=
name_anchor_tensor
,
value
=
onnx
.
helper
.
make_tensor
(
name
=
name_anchor_tensor
[
0
]
+
"@const"
,
data_type
=
onnx
.
TensorProto
.
FLOAT
,
dims
=
[
len
(
anchors
)],
vals
=
anchors
))
node_list
.
append
(
node_anchor_tensor
)
anchor_shape
=
[
int
(
num_anchors
),
2
]
name_anchor_shape
=
[
model_name
+
"@anchor_shape"
]
node_anchor_shape
=
onnx
.
helper
.
make_node
(
"Constant"
,
inputs
=
[],
outputs
=
name_anchor_shape
,
value
=
onnx
.
helper
.
make_tensor
(
name
=
name_anchor_shape
[
0
]
+
"@const"
,
data_type
=
onnx
.
TensorProto
.
INT64
,
dims
=
[
2
],
vals
=
anchor_shape
))
node_list
.
append
(
node_anchor_shape
)
outputs_anchor_tensor_reshape
=
[
model_name
+
"@anchor_tensor_reshape"
]
node_anchor_tensor_reshape
=
onnx
.
helper
.
make_node
(
"Reshape"
,
inputs
=
name_anchor_tensor
+
name_anchor_shape
,
outputs
=
outputs_anchor_tensor_reshape
)
node_list
.
append
(
node_anchor_tensor_reshape
)
name_input_size
=
[
model_name
+
"@input_size"
]
node_input_size
=
onnx
.
helper
.
make_node
(
"Constant"
,
inputs
=
[],
outputs
=
name_input_size
,
value
=
onnx
.
helper
.
make_tensor
(
name
=
name_input_size
[
0
]
+
"@const"
,
data_type
=
onnx
.
TensorProto
.
FLOAT
,
dims
=
(),
vals
=
[
input_size
]))
node_list
.
append
(
node_input_size
)
outputs_anchors_div_input_size
=
[
model_name
+
"@anchors_div_input_size"
]
node_anchors_div_input_size
=
onnx
.
helper
.
make_node
(
"Div"
,
inputs
=
outputs_anchor_tensor_reshape
+
name_input_size
,
outputs
=
outputs_anchors_div_input_size
)
node_list
.
append
(
node_anchors_div_input_size
)
outputs_anchor_w
=
[
model_name
+
"@anchor_w"
]
outputs_anchor_h
=
[
model_name
+
"@anchor_h"
]
node_anchor_split
=
onnx
.
helper
.
make_node
(
'Split'
,
inputs
=
outputs_anchors_div_input_size
,
outputs
=
outputs_anchor_w
+
outputs_anchor_h
,
axis
=
1
,
split
=
[
1
,
1
])
node_list
.
append
(
node_anchor_split
)
new_anchor_shape
=
[
1
,
int
(
num_anchors
),
1
,
1
]
name_new_anchor_shape
=
[
model_name
+
"@new_anchor_shape"
]
node_new_anchor_shape
=
onnx
.
helper
.
make_node
(
'Constant'
,
inputs
=
[],
outputs
=
name_new_anchor_shape
,
value
=
onnx
.
helper
.
make_tensor
(
name
=
name_new_anchor_shape
[
0
]
+
"@const"
,
data_type
=
onnx
.
TensorProto
.
INT64
,
dims
=
[
len
(
new_anchor_shape
)],
vals
=
new_anchor_shape
))
node_list
.
append
(
node_new_anchor_shape
)
outputs_anchor_w_reshape
=
[
model_name
+
"@anchor_w_reshape"
]
outputs_anchor_h_reshape
=
[
model_name
+
"@anchor_h_reshape"
]
node_anchor_w_reshape
=
onnx
.
helper
.
make_node
(
'Reshape'
,
inputs
=
outputs_anchor_w
+
name_new_anchor_shape
,
outputs
=
outputs_anchor_w_reshape
)
node_list
.
append
(
node_anchor_w_reshape
)
node_anchor_h_reshape
=
onnx
.
helper
.
make_node
(
'Reshape'
,
inputs
=
outputs_anchor_h
+
name_new_anchor_shape
,
outputs
=
outputs_anchor_h_reshape
)
node_list
.
append
(
node_anchor_h_reshape
)
outputs_box_w_squeeze
=
[
model_name
+
"@box_w_squeeze"
]
node_box_w_squeeze
=
onnx
.
helper
.
make_node
(
'Squeeze'
,
inputs
=
outputs_box_w
,
outputs
=
outputs_box_w_squeeze
,
axes
=
[
4
])
node_list
.
append
(
node_box_w_squeeze
)
outputs_box_h_squeeze
=
[
model_name
+
"@box_h_squeeze"
]
node_box_h_squeeze
=
onnx
.
helper
.
make_node
(
'Squeeze'
,
inputs
=
outputs_box_h
,
outputs
=
outputs_box_h_squeeze
,
axes
=
[
4
])
node_list
.
append
(
node_box_h_squeeze
)
outputs_box_w_exp
=
[
model_name
+
"@box_w_exp"
]
node_box_w_exp
=
onnx
.
helper
.
make_node
(
"Exp"
,
inputs
=
outputs_box_w_squeeze
,
outputs
=
outputs_box_w_exp
)
node_list
.
append
(
node_box_w_exp
)
outputs_box_h_exp
=
[
model_name
+
"@box_h_exp"
]
node_box_h_exp
=
onnx
.
helper
.
make_node
(
"Exp"
,
inputs
=
outputs_box_h_squeeze
,
outputs
=
outputs_box_h_exp
)
node_list
.
append
(
node_box_h_exp
)
outputs_box_w_encode
=
[
model_name
+
"box_w_encode"
]
outputs_box_h_encode
=
[
model_name
+
"box_h_encode"
]
node_box_w_encode
=
onnx
.
helper
.
make_node
(
'Mul'
,
inputs
=
outputs_box_w_exp
+
outputs_anchor_w_reshape
,
outputs
=
outputs_box_w_encode
)
node_list
.
append
(
node_box_w_encode
)
node_box_h_encode
=
onnx
.
helper
.
make_node
(
'Mul'
,
inputs
=
outputs_box_h_exp
+
outputs_anchor_h_reshape
,
outputs
=
outputs_box_h_encode
)
node_list
.
append
(
node_box_h_encode
)
outputs_conf_sigmoid
=
[
model_name
+
"@conf_sigmoid"
]
node_conf_sigmoid
=
onnx
.
helper
.
make_node
(
'Sigmoid'
,
inputs
=
outputs_conf
,
outputs
=
outputs_conf_sigmoid
)
node_list
.
append
(
node_conf_sigmoid
)
name_conf_thresh
=
[
model_name
+
"@conf_thresh"
]
node_conf_thresh
=
onnx
.
helper
.
make_node
(
'Constant'
,
inputs
=
[],
outputs
=
name_conf_thresh
,
value
=
onnx
.
helper
.
make_tensor
(
name
=
name_conf_thresh
[
0
]
+
"@const"
,
data_type
=
onnx
.
TensorProto
.
FLOAT
,
dims
=
[
num_anchors
*
input_height
*
input_width
],
vals
=
conf_thresh_mat
))
node_list
.
append
(
node_conf_thresh
)
conf_shape
=
[
1
,
int
(
num_anchors
),
input_height
,
input_width
,
1
]
name_conf_shape
=
[
model_name
+
"@conf_shape"
]
node_conf_shape
=
onnx
.
helper
.
make_node
(
'Constant'
,
inputs
=
[],
outputs
=
name_conf_shape
,
value
=
onnx
.
helper
.
make_tensor
(
name
=
name_conf_shape
[
0
]
+
"@const"
,
data_type
=
onnx
.
TensorProto
.
INT64
,
dims
=
[
len
(
conf_shape
)],
vals
=
conf_shape
))
node_list
.
append
(
node_conf_shape
)
outputs_conf_thresh_reshape
=
[
model_name
+
"@conf_thresh_reshape"
]
node_conf_thresh_reshape
=
onnx
.
helper
.
make_node
(
'Reshape'
,
inputs
=
name_conf_thresh
+
name_conf_shape
,
outputs
=
outputs_conf_thresh_reshape
)
node_list
.
append
(
node_conf_thresh_reshape
)
outputs_conf_sub
=
[
model_name
+
"@conf_sub"
]
node_conf_sub
=
onnx
.
helper
.
make_node
(
'Sub'
,
inputs
=
outputs_conf_sigmoid
+
outputs_conf_thresh_reshape
,
outputs
=
outputs_conf_sub
)
node_list
.
append
(
node_conf_sub
)
outputs_conf_clip
=
[
model_name
+
"@conf_clip"
]
node_conf_clip
=
onnx
.
helper
.
make_node
(
'Clip'
,
inputs
=
outputs_conf_sub
,
outputs
=
outputs_conf_clip
)
node_list
.
append
(
node_conf_clip
)
zeros
=
[
0
]
name_zeros
=
[
model_name
+
"@zeros"
]
node_zeros
=
onnx
.
helper
.
make_node
(
'Constant'
,
inputs
=
[],
outputs
=
name_zeros
,
value
=
onnx
.
helper
.
make_tensor
(
name
=
name_zeros
[
0
]
+
"@const"
,
data_type
=
onnx
.
TensorProto
.
FLOAT
,
dims
=
(),
vals
=
zeros
))
node_list
.
append
(
node_zeros
)
outputs_conf_clip_bool
=
[
model_name
+
"@conf_clip_bool"
]
node_conf_clip_bool
=
onnx
.
helper
.
make_node
(
'Greater'
,
inputs
=
outputs_conf_clip
+
name_zeros
,
outputs
=
outputs_conf_clip_bool
)
node_list
.
append
(
node_conf_clip_bool
)
outputs_conf_clip_cast
=
[
model_name
+
"@conf_clip_cast"
]
node_conf_clip_cast
=
onnx
.
helper
.
make_node
(
'Cast'
,
inputs
=
outputs_conf_clip_bool
,
outputs
=
outputs_conf_clip_cast
,
to
=
1
)
node_list
.
append
(
node_conf_clip_cast
)
outputs_conf_set_zero
=
[
model_name
+
"@conf_set_zero"
]
node_conf_set_zero
=
onnx
.
helper
.
make_node
(
'Mul'
,
inputs
=
outputs_conf_sigmoid
+
outputs_conf_clip_cast
,
outputs
=
outputs_conf_set_zero
)
node_list
.
append
(
node_conf_set_zero
)
outputs_prob_sigmoid
=
[
model_name
+
"@prob_sigmoid"
]
node_prob_sigmoid
=
onnx
.
helper
.
make_node
(
'Sigmoid'
,
inputs
=
outputs_prob
,
outputs
=
outputs_prob_sigmoid
)
node_list
.
append
(
node_prob_sigmoid
)
new_shape
=
[
1
,
int
(
num_anchors
),
input_height
,
input_width
,
1
]
name_new_shape
=
[
model_name
+
"@new_shape"
]
node_new_shape
=
onnx
.
helper
.
make_node
(
'Constant'
,
inputs
=
[],
outputs
=
name_new_shape
,
value
=
onnx
.
helper
.
make_tensor
(
name
=
name_new_shape
[
0
]
+
"@const"
,
data_type
=
onnx
.
TensorProto
.
INT64
,
dims
=
[
len
(
new_shape
)],
vals
=
new_shape
))
node_list
.
append
(
node_new_shape
)
outputs_conf_new_shape
=
[
model_name
+
"@_conf_new_shape"
]
node_conf_new_shape
=
onnx
.
helper
.
make_node
(
'Reshape'
,
inputs
=
outputs_conf_set_zero
+
name_new_shape
,
outputs
=
outputs_conf_new_shape
)
node_list
.
append
(
node_conf_new_shape
)
outputs_score
=
[
model_name
+
"@score"
]
node_score
=
onnx
.
helper
.
make_node
(
'Mul'
,
inputs
=
outputs_prob_sigmoid
+
outputs_conf_new_shape
,
outputs
=
outputs_score
)
node_list
.
append
(
node_score
)
outputs_conf_bool
=
[
model_name
+
"@conf_bool"
]
node_conf_bool
=
onnx
.
helper
.
make_node
(
'Greater'
,
inputs
=
outputs_conf_new_shape
+
name_zeros
,
outputs
=
outputs_conf_bool
)
node_list
.
append
(
node_conf_bool
)
outputs_box_x_new_shape
=
[
model_name
+
"@box_x_new_shape"
]
node_box_x_new_shape
=
onnx
.
helper
.
make_node
(
'Reshape'
,
inputs
=
outputs_box_x_encode
+
name_new_shape
,
outputs
=
outputs_box_x_new_shape
)
node_list
.
append
(
node_box_x_new_shape
)
outputs_box_y_new_shape
=
[
model_name
+
"@box_y_new_shape"
]
node_box_y_new_shape
=
onnx
.
helper
.
make_node
(
'Reshape'
,
inputs
=
outputs_box_y_encode
+
name_new_shape
,
outputs
=
outputs_box_y_new_shape
)
node_list
.
append
(
node_box_y_new_shape
)
outputs_box_w_new_shape
=
[
model_name
+
"@box_w_new_shape"
]
node_box_w_new_shape
=
onnx
.
helper
.
make_node
(
'Reshape'
,
inputs
=
outputs_box_w_encode
+
name_new_shape
,
outputs
=
outputs_box_w_new_shape
)
node_list
.
append
(
node_box_w_new_shape
)
outputs_box_h_new_shape
=
[
model_name
+
"@box_h_new_shape"
]
node_box_h_new_shape
=
onnx
.
helper
.
make_node
(
'Reshape'
,
inputs
=
outputs_box_h_encode
+
name_new_shape
,
outputs
=
outputs_box_h_new_shape
)
node_list
.
append
(
node_box_h_new_shape
)
outputs_pred_box
=
[
model_name
+
"@pred_box"
]
node_pred_box
=
onnx
.
helper
.
make_node
(
'Concat'
,
inputs
=
outputs_box_x_new_shape
+
outputs_box_y_new_shape
+
\
outputs_box_w_new_shape
+
outputs_box_h_new_shape
,
outputs
=
outputs_pred_box
,
axis
=
4
)
node_list
.
append
(
node_pred_box
)
outputs_conf_cast
=
[
model_name
+
"conf_cast"
]
node_conf_cast
=
onnx
.
helper
.
make_node
(
'Cast'
,
inputs
=
outputs_conf_bool
,
outputs
=
outputs_conf_cast
,
to
=
1
)
node_list
.
append
(
node_conf_cast
)
outputs_pred_box_mul_conf
=
[
model_name
+
"@pred_box_mul_conf"
]
node_pred_box_mul_conf
=
onnx
.
helper
.
make_node
(
'Mul'
,
inputs
=
outputs_pred_box
+
outputs_conf_cast
,
outputs
=
outputs_pred_box_mul_conf
)
node_list
.
append
(
node_pred_box_mul_conf
)
box_shape
=
[
1
,
int
(
num_anchors
)
*
input_height
*
input_width
,
4
]
name_box_shape
=
[
model_name
+
"@box_shape"
]
node_box_shape
=
onnx
.
helper
.
make_node
(
'Constant'
,
inputs
=
[],
outputs
=
name_box_shape
,
value
=
onnx
.
helper
.
make_tensor
(
name
=
name_box_shape
[
0
]
+
"@const"
,
data_type
=
onnx
.
TensorProto
.
INT64
,
dims
=
[
len
(
box_shape
)],
vals
=
box_shape
))
node_list
.
append
(
node_box_shape
)
outputs_pred_box_new_shape
=
[
model_name
+
"@pred_box_new_shape"
]
node_pred_box_new_shape
=
onnx
.
helper
.
make_node
(
'Reshape'
,
inputs
=
outputs_pred_box_mul_conf
+
name_box_shape
,
outputs
=
outputs_pred_box_new_shape
)
node_list
.
append
(
node_pred_box_new_shape
)
outputs_pred_box_x
=
[
model_name
+
"@_pred_box_x"
]
outputs_pred_box_y
=
[
model_name
+
"@_pred_box_y"
]
outputs_pred_box_w
=
[
model_name
+
"@_pred_box_w"
]
outputs_pred_box_h
=
[
model_name
+
"@_pred_box_h"
]
node_pred_box_split
=
onnx
.
helper
.
make_node
(
'Split'
,
inputs
=
outputs_pred_box_new_shape
,
outputs
=
outputs_pred_box_x
+
outputs_pred_box_y
+
outputs_pred_box_w
+
outputs_pred_box_h
,
axis
=
2
)
node_list
.
append
(
node_pred_box_split
)
name_number_two
=
[
model_name
+
"@number_two"
]
node_number_two
=
onnx
.
helper
.
make_node
(
"Constant"
,
inputs
=
[],
outputs
=
name_number_two
,
value
=
onnx
.
helper
.
make_tensor
(
name
=
name_number_two
[
0
]
+
"@const"
,
data_type
=
onnx
.
TensorProto
.
FLOAT
,
dims
=
(),
vals
=
[
2
]))
node_list
.
append
(
node_number_two
)
outputs_half_w
=
[
model_name
+
"@half_w"
]
node_half_w
=
onnx
.
helper
.
make_node
(
"Div"
,
inputs
=
outputs_pred_box_w
+
name_number_two
,
outputs
=
outputs_half_w
)
node_list
.
append
(
node_half_w
)
outputs_half_h
=
[
model_name
+
"@half_h"
]
node_half_h
=
onnx
.
helper
.
make_node
(
"Div"
,
inputs
=
outputs_pred_box_h
+
name_number_two
,
outputs
=
outputs_half_h
)
node_list
.
append
(
node_half_h
)
outputs_pred_box_x1
=
[
model_name
+
"@pred_box_x1"
]
node_pred_box_x1
=
onnx
.
helper
.
make_node
(
'Sub'
,
inputs
=
outputs_pred_box_x
+
outputs_half_w
,
outputs
=
outputs_pred_box_x1
)
node_list
.
append
(
node_pred_box_x1
)
outputs_pred_box_y1
=
[
model_name
+
"@pred_box_y1"
]
node_pred_box_y1
=
onnx
.
helper
.
make_node
(
'Sub'
,
inputs
=
outputs_pred_box_y
+
outputs_half_h
,
outputs
=
outputs_pred_box_y1
)
node_list
.
append
(
node_pred_box_y1
)
outputs_pred_box_x2
=
[
model_name
+
"@pred_box_x2"
]
node_pred_box_x2
=
onnx
.
helper
.
make_node
(
'Add'
,
inputs
=
outputs_pred_box_x
+
outputs_half_w
,
outputs
=
outputs_pred_box_x2
)
node_list
.
append
(
node_pred_box_x2
)
outputs_pred_box_y2
=
[
model_name
+
"@pred_box_y2"
]
node_pred_box_y2
=
onnx
.
helper
.
make_node
(
'Add'
,
inputs
=
outputs_pred_box_y
+
outputs_half_h
,
outputs
=
outputs_pred_box_y2
)
node_list
.
append
(
node_pred_box_y2
)
outputs_sqeeze_image_size
=
[
model_name
+
"@sqeeze_image_size"
]
node_sqeeze_image_size
=
onnx
.
helper
.
make_node
(
"Squeeze"
,
axes
=
[
0
],
inputs
=
image_size
,
outputs
=
outputs_sqeeze_image_size
)
node_list
.
append
(
node_sqeeze_image_size
)
output_img_height
=
[
model_name
+
"@img_height"
]
output_img_width
=
[
model_name
+
"@img_width"
]
node_image_size_split
=
onnx
.
helper
.
make_node
(
"Split"
,
inputs
=
outputs_sqeeze_image_size
,
outputs
=
output_img_height
+
output_img_width
,
axis
=-
1
,
split
=
[
1
,
1
])
node_list
.
append
(
node_image_size_split
)
output_img_width_cast
=
[
model_name
+
"@img_width_cast"
]
node_img_width_cast
=
onnx
.
helper
.
make_node
(
'Cast'
,
inputs
=
output_img_width
,
outputs
=
output_img_width_cast
,
to
=
1
)
node_list
.
append
(
node_img_width_cast
)
output_img_height_cast
=
[
model_name
+
"@img_height_cast"
]
node_img_height_cast
=
onnx
.
helper
.
make_node
(
'Cast'
,
inputs
=
output_img_height
,
outputs
=
output_img_height_cast
,
to
=
1
)
node_list
.
append
(
node_img_height_cast
)
outputs_pred_box_x1_decode
=
[
model_name
+
"@pred_box_x1_decode"
]
outputs_pred_box_y1_decode
=
[
model_name
+
"@pred_box_y1_decode"
]
outputs_pred_box_x2_decode
=
[
model_name
+
"@pred_box_x2_decode"
]
outputs_pred_box_y2_decode
=
[
model_name
+
"@pred_box_y2_decode"
]
node_pred_box_x1_decode
=
onnx
.
helper
.
make_node
(
'Mul'
,
inputs
=
outputs_pred_box_x1
+
output_img_width_cast
,
outputs
=
outputs_pred_box_x1_decode
)
node_list
.
append
(
node_pred_box_x1_decode
)
node_pred_box_y1_decode
=
onnx
.
helper
.
make_node
(
'Mul'
,
inputs
=
outputs_pred_box_y1
+
output_img_height_cast
,
outputs
=
outputs_pred_box_y1_decode
)
node_list
.
append
(
node_pred_box_y1_decode
)
node_pred_box_x2_decode
=
onnx
.
helper
.
make_node
(
'Mul'
,
inputs
=
outputs_pred_box_x2
+
output_img_width_cast
,
outputs
=
outputs_pred_box_x2_decode
)
node_list
.
append
(
node_pred_box_x2_decode
)
node_pred_box_y2_decode
=
onnx
.
helper
.
make_node
(
'Mul'
,
inputs
=
outputs_pred_box_y2
+
output_img_height_cast
,
outputs
=
outputs_pred_box_y2_decode
)
node_list
.
append
(
node_pred_box_y2_decode
)
name_number_one
=
[
model_name
+
"@one"
]
node_number_one
=
onnx
.
helper
.
make_node
(
'Constant'
,
inputs
=
[],
outputs
=
name_number_one
,
value
=
onnx
.
helper
.
make_tensor
(
name
=
name_number_one
[
0
]
+
"@const"
,
data_type
=
onnx
.
TensorProto
.
FLOAT
,
dims
=
(),
vals
=
[
1
]))
node_list
.
append
(
node_number_one
)
output_new_img_height
=
[
model_name
+
"@new_img_height"
]
node_new_img_height
=
onnx
.
helper
.
make_node
(
'Sub'
,
inputs
=
output_img_height_cast
+
name_number_one
,
outputs
=
output_new_img_height
)
node_list
.
append
(
node_new_img_height
)
output_new_img_width
=
[
model_name
+
"@new_img_width"
]
node_new_img_width
=
onnx
.
helper
.
make_node
(
'Sub'
,
inputs
=
output_img_width_cast
+
name_number_one
,
outputs
=
output_new_img_width
)
node_list
.
append
(
node_new_img_width
)
outputs_pred_box_x2_sub_w
=
[
model_name
+
"@pred_box_x2_sub_w"
]
node_pred_box_x2_sub_w
=
onnx
.
helper
.
make_node
(
'Sub'
,
inputs
=
outputs_pred_box_x2_decode
+
output_new_img_width
,
outputs
=
outputs_pred_box_x2_sub_w
)
node_list
.
append
(
node_pred_box_x2_sub_w
)
outputs_pred_box_y2_sub_h
=
[
model_name
+
"@pred_box_y2_sub_h"
]
node_pred_box_y2_sub_h
=
onnx
.
helper
.
make_node
(
'Sub'
,
inputs
=
outputs_pred_box_y2_decode
+
output_new_img_height
,
outputs
=
outputs_pred_box_y2_sub_h
)
node_list
.
append
(
node_pred_box_y2_sub_h
)
outputs_pred_box_x1_clip
=
[
model_name
+
"@pred_box_x1_clip"
]
outputs_pred_box_y1_clip
=
[
model_name
+
"@pred_box_y1_clip"
]
outputs_pred_box_x2_clip
=
[
model_name
+
"@pred_box_x2_clip"
]
outputs_pred_box_y2_clip
=
[
model_name
+
"@pred_box_y2_clip"
]
min_const_name
=
model_name
+
"@pred_box_min_const"
max_const_name
=
model_name
+
"@pred_box_max_const"
min_const
=
onnx
.
helper
.
make_node
(
'Constant'
,
inputs
=
[],
outputs
=
[
min_const_name
],
value
=
onnx
.
helper
.
make_tensor
(
name
=
min_const_name
,
data_type
=
onnx
.
TensorProto
.
FLOAT
,
dims
=
(),
vals
=
[
0.0
]))
node_list
.
append
(
min_const
)
max_const
=
onnx
.
helper
.
make_node
(
'Constant'
,
inputs
=
[],
outputs
=
[
max_const_name
],
value
=
onnx
.
helper
.
make_tensor
(
name
=
max_const_name
,
data_type
=
onnx
.
TensorProto
.
FLOAT
,
dims
=
(),
vals
=
[
MAX_FLOAT32
]))
node_list
.
append
(
max_const
)
node_pred_box_x1_clip
=
onnx
.
helper
.
make_node
(
'Clip'
,
inputs
=
outputs_pred_box_x1_decode
+
[
min_const_name
,
max_const_name
],
outputs
=
outputs_pred_box_x1_clip
)
node_list
.
append
(
node_pred_box_x1_clip
)
node_pred_box_y1_clip
=
onnx
.
helper
.
make_node
(
'Clip'
,
inputs
=
outputs_pred_box_y1_decode
+
[
min_const_name
,
max_const_name
],
outputs
=
outputs_pred_box_y1_clip
)
node_list
.
append
(
node_pred_box_y1_clip
)
node_pred_box_x2_clip
=
onnx
.
helper
.
make_node
(
'Clip'
,
inputs
=
outputs_pred_box_x2_sub_w
+
[
min_const_name
,
max_const_name
],
outputs
=
outputs_pred_box_x2_clip
)
node_list
.
append
(
node_pred_box_x2_clip
)
node_pred_box_y2_clip
=
onnx
.
helper
.
make_node
(
'Clip'
,
inputs
=
outputs_pred_box_y2_sub_h
+
[
min_const_name
,
max_const_name
],
outputs
=
outputs_pred_box_y2_clip
)
node_list
.
append
(
node_pred_box_y2_clip
)
outputs_pred_box_x2_res
=
[
model_name
+
"@box_x2_res"
]
node_pred_box_x2_res
=
onnx
.
helper
.
make_node
(
'Sub'
,
inputs
=
outputs_pred_box_x2_decode
+
outputs_pred_box_x2_clip
,
outputs
=
outputs_pred_box_x2_res
)
node_list
.
append
(
node_pred_box_x2_res
)
outputs_pred_box_y2_res
=
[
model_name
+
"@box_y2_res"
]
node_pred_box_y2_res
=
onnx
.
helper
.
make_node
(
'Sub'
,
inputs
=
outputs_pred_box_y2_decode
+
outputs_pred_box_y2_clip
,
outputs
=
outputs_pred_box_y2_res
)
node_list
.
append
(
node_pred_box_y2_res
)
node_pred_box_result
=
onnx
.
helper
.
make_node
(
'Concat'
,
inputs
=
outputs_pred_box_x1_clip
+
outputs_pred_box_y1_clip
+
outputs_pred_box_x2_res
+
outputs_pred_box_y2_res
,
outputs
=
outputs
[
'Boxes'
],
axis
=-
1
)
node_list
.
append
(
node_pred_box_result
)
score_shape
=
[
1
,
input_height
*
input_width
*
int
(
num_anchors
),
class_num
]
name_score_shape
=
[
model_name
+
"@score_shape"
]
node_score_shape
=
onnx
.
helper
.
make_node
(
"Constant"
,
inputs
=
[],
outputs
=
name_score_shape
,
value
=
onnx
.
helper
.
make_tensor
(
name
=
name_score_shape
[
0
]
+
"@const"
,
data_type
=
onnx
.
TensorProto
.
INT64
,
dims
=
[
len
(
score_shape
)],
vals
=
score_shape
))
node_list
.
append
(
node_score_shape
)
node_score_new_shape
=
onnx
.
helper
.
make_node
(
'Reshape'
,
inputs
=
outputs_score
+
name_score_shape
,
outputs
=
outputs
[
'Scores'
])
node_list
.
append
(
node_score_new_shape
)
return
node_list
x2paddle/op_mapper/paddle2onnx/opset9/__init__.py
已删除
100644 → 0
浏览文件 @
56697812
x2paddle/op_mapper/paddle2onnx/opset9/opset.py
已删除
100644 → 0
浏览文件 @
56697812
# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import
math
import
sys
import
x2paddle
import
os
import
numpy
as
np
import
paddle.fluid.core
as
core
import
paddle.fluid
as
fluid
import
onnx
from
onnx
import
helper
,
onnx_pb
class
OpSet9
(
object
):
def
__init__
(
self
):
self
.
paddle_onnx_dtype_map
=
{
core
.
VarDesc
.
VarType
.
FP32
:
onnx_pb
.
TensorProto
.
FLOAT
,
core
.
VarDesc
.
VarType
.
FP64
:
onnx_pb
.
TensorProto
.
DOUBLE
,
core
.
VarDesc
.
VarType
.
INT32
:
onnx_pb
.
TensorProto
.
INT32
,
core
.
VarDesc
.
VarType
.
INT16
:
onnx_pb
.
TensorProto
.
INT16
,
core
.
VarDesc
.
VarType
.
INT16
:
onnx_pb
.
TensorProto
.
UINT16
,
core
.
VarDesc
.
VarType
.
INT64
:
onnx_pb
.
TensorProto
.
INT64
,
core
.
VarDesc
.
VarType
.
BOOL
:
onnx_pb
.
TensorProto
.
BOOL
}
self
.
name_counter
=
dict
()
def
get_name
(
self
,
op_name
,
var_name
):
name
=
'p2o.{}.{}'
.
format
(
op_name
,
var_name
)
if
name
not
in
self
.
name_counter
:
self
.
name_counter
[
name
]
=
0
else
:
self
.
name_counter
[
name
]
+=
1
return
name
+
'.{}'
.
format
(
self
.
name_counter
[
name
])
def
make_constant_node
(
self
,
name
,
dtype
,
value
=
None
):
if
isinstance
(
value
,
list
):
dims
=
(
len
(
value
),
)
elif
value
is
None
:
dims
=
()
value
=
[]
else
:
dims
=
()
value
=
[
value
]
tensor
=
helper
.
make_tensor
(
name
=
name
,
data_type
=
dtype
,
dims
=
dims
,
vals
=
value
)
node
=
helper
.
make_node
(
'Constant'
,
inputs
=
[],
outputs
=
[
name
],
value
=
tensor
)
return
node
def
convert_weights
(
self
,
program
,
scope
=
None
):
var_names
=
program
.
global_block
().
vars
nodes
=
list
()
for
name
in
var_names
:
var
=
program
.
global_block
().
var
(
name
)
if
name
.
endswith
(
'feed'
)
or
name
.
endswith
(
'fetch'
):
continue
if
not
var
.
persistable
:
continue
weight
=
np
.
array
(
scope
.
find_var
(
name
).
get_tensor
())
tensor
=
helper
.
make_tensor
(
name
=
name
,
dims
=
var
.
shape
,
data_type
=
self
.
paddle_onnx_dtype_map
[
var
.
dtype
],
vals
=
weight
.
flatten
().
tolist
())
node
=
helper
.
make_node
(
'Constant'
,
inputs
=
[],
outputs
=
[
name
],
value
=
tensor
)
nodes
.
append
(
node
)
return
nodes
def
conv2d
(
self
,
op
,
block
):
kernel_shape
=
block
.
var
(
op
.
input
(
'Filter'
)[
0
]).
shape
node
=
helper
.
make_node
(
'Conv'
,
inputs
=
op
.
input
(
'Input'
)
+
op
.
input
(
'Filter'
),
outputs
=
op
.
output
(
'Output'
),
dilations
=
op
.
attr
(
'dilations'
),
kernel_shape
=
kernel_shape
[
-
2
:],
strides
=
op
.
attr
(
'strides'
),
group
=
op
.
attr
(
'groups'
),
pads
=
op
.
attr
(
'paddings'
)
+
op
.
attr
(
'paddings'
))
return
node
def
conv2d_transpose
(
self
,
op
,
block
):
kernel_shape
=
block
.
var
(
op
.
input
(
'Filter'
)[
0
]).
shape
node
=
helper
.
make_node
(
'ConvTranspose'
,
inputs
=
op
.
input
(
'Input'
)
+
op
.
input
(
'Filter'
),
outputs
=
op
.
output
(
'Output'
),
dilations
=
op
.
attr
(
'dilations'
),
kernel_shape
=
kernel_shape
[
-
2
:],
strides
=
op
.
attr
(
'strides'
),
group
=
1
,
pads
=
op
.
attr
(
'paddings'
)
+
op
.
attr
(
'paddings'
))
return
node
def
relu
(
self
,
op
,
block
):
node
=
helper
.
make_node
(
'Relu'
,
inputs
=
op
.
input
(
'X'
),
outputs
=
op
.
output
(
'Out'
))
return
node
def
prelu
(
self
,
op
,
block
):
node
=
helper
.
make_node
(
'PRelu'
,
inputs
=
[
op
.
input
(
'X'
)[
0
],
op
.
input
(
'Alpha'
)[
0
]],
outputs
=
op
.
output
(
'Out'
))
return
node
def
tanh
(
self
,
op
,
block
):
node
=
helper
.
make_node
(
'Tanh'
,
inputs
=
op
.
input
(
'X'
),
outputs
=
op
.
output
(
'Out'
))
return
node
def
log
(
self
,
op
,
block
):
node
=
helper
.
make_node
(
'Log'
,
inputs
=
op
.
input
(
'X'
),
outputs
=
op
.
output
(
'Out'
))
return
node
def
sigmoid
(
self
,
op
,
block
):
node
=
helper
.
make_node
(
'Sigmoid'
,
inputs
=
op
.
input
(
'X'
),
outputs
=
op
.
output
(
'Out'
))
return
node
def
clip
(
self
,
op
,
block
):
min_value
=
op
.
attr
(
'min'
)
max_value
=
op
.
attr
(
'max'
)
node
=
helper
.
make_node
(
'Clip'
,
inputs
=
[
op
.
input
(
'X'
)[
0
]],
outputs
=
op
.
output
(
'Out'
),
max
=
max_value
,
min
=
min_value
)
return
node
def
exp
(
self
,
op
,
block
):
node
=
helper
.
make_node
(
'Exp'
,
inputs
=
op
.
input
(
'X'
),
outputs
=
op
.
output
(
'Out'
))
return
node
def
abs
(
self
,
op
,
block
):
node
=
helper
.
make_node
(
'Abs'
,
inputs
=
op
.
input
(
'X'
),
outputs
=
op
.
output
(
'Out'
))
return
node
def
leaky_relu
(
self
,
op
,
block
):
node
=
helper
.
make_node
(
'LeakyRelu'
,
inputs
=
op
.
input
(
'X'
),
outputs
=
op
.
output
(
'Out'
),
alpha
=
op
.
attr
(
'alpha'
))
return
node
def
elementwise_add
(
self
,
op
,
block
):
axis
=
op
.
attr
(
'axis'
)
x_shape
=
block
.
var
(
op
.
input
(
'X'
)[
0
]).
shape
y_shape
=
block
.
var
(
op
.
input
(
'Y'
)[
0
]).
shape
if
len
(
y_shape
)
==
1
and
axis
==
1
:
shape_name
=
self
.
get_name
(
op
.
type
,
'shape'
)
shape_value
=
[
1
]
*
len
(
x_shape
)
shape_value
[
axis
]
=
y_shape
[
0
]
shape_node
=
self
.
make_constant_node
(
shape_name
,
onnx_pb
.
TensorProto
.
INT64
,
shape_value
)
temp_value
=
self
.
get_name
(
op
.
type
,
'temp'
)
y_node
=
helper
.
make_node
(
'Reshape'
,
inputs
=
[
op
.
input
(
'Y'
)[
0
],
shape_name
],
outputs
=
[
temp_value
])
node
=
helper
.
make_node
(
'Add'
,
inputs
=
[
op
.
input
(
'X'
)[
0
],
temp_value
],
outputs
=
op
.
output
(
'Out'
))
return
[
shape_node
,
y_node
,
node
]
elif
axis
==
-
1
or
axis
==
(
len
(
x_shape
)
-
1
)
or
len
(
x_shape
)
==
len
(
y_shape
):
node
=
helper
.
make_node
(
'Add'
,
inputs
=
[
op
.
input
(
'X'
)[
0
],
op
.
input
(
'Y'
)[
0
]],
outputs
=
op
.
output
(
'Out'
))
return
node
else
:
raise
Exception
(
"Unexpected situation happend in elementwise_add"
)
def
elementwise_sub
(
self
,
op
,
block
):
axis
=
op
.
attr
(
'axis'
)
x_shape
=
block
.
var
(
op
.
input
(
'X'
)[
0
]).
shape
y_shape
=
block
.
var
(
op
.
input
(
'Y'
)[
0
]).
shape
if
len
(
y_shape
)
==
1
and
axis
==
1
:
shape_name
=
self
.
get_name
(
op
.
type
,
'shape'
)
shape_value
=
[
1
]
*
len
(
x_shape
)
shape_value
[
axis
]
=
y_shape
[
0
]
shape_node
=
self
.
make_constant_node
(
shape_name
,
onnx_pb
.
TensorProto
.
INT64
,
shape_value
)
temp_value
=
self
.
get_name
(
op
.
type
,
'temp'
)
y_node
=
helper
.
make_node
(
'Reshape'
,
inputs
=
[
op
.
input
(
'Y'
)[
0
],
shape_name
],
outputs
=
[
temp_value
])
node
=
helper
.
make_node
(
'Sub'
,
inputs
=
[
op
.
input
(
'X'
)[
0
],
temp_value
],
outputs
=
op
.
output
(
'Out'
))
return
[
shape_node
,
y_node
,
node
]
elif
axis
==
-
1
or
axis
==
(
len
(
x_shape
)
-
1
)
or
len
(
x_shape
)
==
len
(
y_shape
):
node
=
helper
.
make_node
(
'Sub'
,
inputs
=
[
op
.
input
(
'X'
)[
0
],
op
.
input
(
'Y'
)[
0
]],
outputs
=
op
.
output
(
'Out'
))
return
node
else
:
raise
Exception
(
"Unexpected situation happend in elementwise_sub"
)
def
pool2d
(
self
,
op
,
block
):
pool_type
=
{
'max'
:
(
'MaxPool'
,
'GlobalMaxPool'
),
'avg'
:
(
'AveragePool'
,
'GlobalAveragePool'
)
}
if
op
.
attr
(
'global_pooling'
):
node
=
helper
.
make_node
(
pool_type
[
op
.
attr
(
'pooling_type'
)][
1
],
inputs
=
op
.
input
(
'X'
),
outputs
=
op
.
output
(
'Out'
),
)
elif
op
.
attr
(
'adaptive'
):
raise
Excpetion
(
"ONNX cannot support adaptive pool"
)
else
:
input_shape
=
block
.
var
(
op
.
input
(
'X'
)[
0
]).
shape
k_size
=
op
.
attr
(
'ksize'
)
paddings
=
op
.
attr
(
'paddings'
)
if
input_shape
[
2
]
>
0
and
input_shape
[
2
]
+
paddings
[
0
]
<
k_size
[
0
]:
k_size
[
0
]
=
input_shape
[
2
]
+
paddings
[
0
]
if
input_shape
[
3
]
>
0
and
input_shape
[
3
]
+
paddings
[
1
]
<
k_size
[
1
]:
k_size
[
1
]
=
input_shape
[
3
]
+
paddings
[
1
]
node
=
helper
.
make_node
(
pool_type
[
op
.
attr
(
'pooling_type'
)][
0
],
inputs
=
op
.
input
(
'X'
),
outputs
=
op
.
output
(
'Out'
),
kernel_shape
=
k_size
,
strides
=
op
.
attr
(
'strides'
),
pads
=
op
.
attr
(
'paddings'
)
+
op
.
attr
(
'paddings'
))
return
node
def
pad2d
(
self
,
op
,
block
):
x_shape
=
block
.
var
(
op
.
input
(
'X'
)[
0
]).
shape
paddings
=
op
.
attr
(
'paddings'
)
onnx_pads
=
[]
if
op
.
attr
(
'data_format'
)
==
'NCHW'
:
pads
=
[
0
,
0
,
paddings
[
0
],
paddings
[
2
],
0
,
0
,
paddings
[
1
],
paddings
[
3
]
]
else
:
pads
=
[
0
,
paddings
[
0
],
paddings
[
2
],
0
,
0
,
paddings
[
1
],
paddings
[
3
],
0
]
#TODO support pads is Variable
node
=
helper
.
make_node
(
'Pad'
,
inputs
=
op
.
input
(
'X'
),
outputs
=
op
.
output
(
'Out'
),
mode
=
op
.
attr
(
'mode'
),
value
=
op
.
attr
(
'pad_value'
),
pads
=
pads
)
return
node
def
softmax
(
self
,
op
,
block
):
axis
=
op
.
attr
(
'axis'
)
shape
=
block
.
var
(
op
.
output
(
'Out'
)[
0
]).
shape
if
axis
<
0
:
axis
+=
len
(
shape
)
if
axis
==
len
(
shape
)
-
1
:
node
=
helper
.
make_node
(
'Softmax'
,
inputs
=
op
.
input
(
'X'
),
outputs
=
op
.
output
(
'Out'
),
axis
=
op
.
attr
(
'axis'
))
return
node
else
:
perm
=
[
i
for
i
in
range
(
len
(
shape
))]
perm
[
-
1
]
=
axis
perm
[
axis
]
=
len
(
shape
)
-
1
transpose_name0
=
self
.
get_name
(
op
.
type
,
'transpose'
)
transpose_node0
=
helper
.
make_node
(
'Transpose'
,
inputs
=
op
.
input
(
'X'
),
outputs
=
[
transpose_name0
],
perm
=
perm
)
softmax_name
=
self
.
get_name
(
op
.
type
,
'softmax'
)
softmax_node
=
helper
.
make_node
(
'Softmax'
,
inputs
=
[
transpose_name0
],
outputs
=
[
softmax_name
],
axis
=-
1
)
transpose_name1
=
self
.
get_name
(
op
.
type
,
'transpose'
)
transpose_node1
=
helper
.
make_node
(
'Transpose'
,
inputs
=
[
softmax_name
],
outputs
=
op
.
output
(
'Out'
),
perm
=
perm
)
return
[
transpose_node0
,
softmax_node
,
transpose_node1
]
def
scale
(
self
,
op
,
block
):
scale
=
op
.
attr
(
'scale'
)
bias
=
op
.
attr
(
'bias'
)
if
math
.
fabs
(
scale
-
1.0
)
<
1e-06
and
math
.
fabs
(
bias
-
0.0
)
<
1e-06
:
node
=
helper
.
make_node
(
'Identity'
,
inputs
=
op
.
input
(
'X'
),
outputs
=
op
.
output
(
'Out'
))
return
node
else
:
scale_name
=
self
.
get_name
(
op
.
type
,
'scale'
)
bias_name
=
self
.
get_name
(
op
.
type
,
'bias'
)
scale_node
=
self
.
make_constant_node
(
scale_name
,
onnx_pb
.
TensorProto
.
FLOAT
,
scale
)
bias_node
=
self
.
make_constant_node
(
bias_name
,
onnx_pb
.
TensorProto
.
FLOAT
,
bias
)
temp_tensor_name
=
self
.
get_name
(
op
.
type
,
'temporary'
)
if
op
.
attr
(
'bias_after_scale'
):
node1
=
helper
.
make_node
(
'Mul'
,
inputs
=
[
scale_name
,
op
.
input
(
'X'
)[
0
]],
outputs
=
[
temp_tensor_name
])
node2
=
helper
.
make_node
(
'Add'
,
inputs
=
[
bias_name
,
temp_tensor_name
],
outputs
=
op
.
output
(
'Out'
))
else
:
node1
=
helper
.
make_node
(
'Add'
,
inputs
=
[
bias_name
,
op
.
input
(
'X'
)[
0
]],
outputs
=
temp_tensor_name
)
node2
=
helper
.
make_node
(
'Mul'
,
inputs
=
[
scale_name
,
temp_tensor_name
],
outputs
=
[
op
.
output
(
'Out'
)])
return
[
scale_node
,
bias_node
,
node1
,
node2
]
def
mul
(
self
,
op
,
block
):
x_shape
=
block
.
var
(
op
.
input
(
'X'
)[
0
]).
shape
y_shape
=
block
.
var
(
op
.
input
(
'Y'
)[
0
]).
shape
out_shape
=
list
(
block
.
var
(
op
.
output
(
'Out'
)[
0
]).
shape
)
x_num_col_dims
=
op
.
attr
(
'x_num_col_dims'
)
y_num_col_dims
=
op
.
attr
(
'y_num_col_dims'
)
flatten_x_name
=
'flatten_{}'
.
format
(
op
.
input
(
'X'
)[
0
])
flatten_y_name
=
'flatten_{}'
.
format
(
op
.
input
(
'Y'
)[
0
])
shape_name
=
'temp_shape_{}'
.
format
(
op
.
output
(
'Out'
)[
0
])
temp_out_name
=
'temp_{}'
.
format
(
op
.
output
(
'Out'
)[
0
])
flatten_x
=
helper
.
make_node
(
'Flatten'
,
inputs
=
op
.
input
(
'X'
),
outputs
=
[
flatten_x_name
],
axis
=
x_num_col_dims
)
flatten_y
=
helper
.
make_node
(
'Flatten'
,
inputs
=
op
.
input
(
'Y'
),
outputs
=
[
flatten_y_name
],
axis
=
y_num_col_dims
)
shape_node
=
self
.
make_constant_node
(
shape_name
,
onnx_pb
.
TensorProto
.
INT64
,
out_shape
)
node
=
helper
.
make_node
(
'MatMul'
,
inputs
=
[
flatten_x_name
,
flatten_y_name
],
outputs
=
[
temp_out_name
])
reshape_out
=
helper
.
make_node
(
'Reshape'
,
inputs
=
[
temp_out_name
,
shape_name
],
outputs
=
op
.
output
(
'Out'
))
return
[
flatten_x
,
flatten_y
,
shape_node
,
node
,
reshape_out
]
def
batch_norm
(
self
,
op
,
block
):
kwargs
=
{
'epsilon'
:
op
.
attr
(
'epsilon'
),
'momentum'
:
op
.
attr
(
'momentum'
)
}
inputs
=
op
.
input
(
'X'
)
+
op
.
input
(
'Scale'
)
+
op
.
input
(
'Bias'
)
+
op
.
input
(
'Mean'
)
+
op
.
input
(
'Variance'
)
node
=
helper
.
make_node
(
'BatchNormalization'
,
inputs
=
inputs
,
outputs
=
op
.
output
(
'Y'
),
**
kwargs
)
return
node
def
instance_norm
(
self
,
op
,
block
):
kwargs
=
{
'epsilon'
:
op
.
attr
(
'epsilon'
),
}
inputs
=
op
.
input
(
'X'
)
+
op
.
input
(
'Scale'
)
+
op
.
input
(
'Bias'
)
node
=
helper
.
make_node
(
'InstanceNormalization'
,
inputs
=
inputs
,
outputs
=
op
.
output
(
'Y'
),
**
kwargs
)
return
node
def
concat
(
self
,
op
,
block
):
node
=
helper
.
make_node
(
'Concat'
,
inputs
=
op
.
input
(
'X'
),
outputs
=
op
.
output
(
'Out'
),
axis
=
op
.
attr
(
'axis'
))
return
node
def
sum
(
self
,
op
,
block
):
node
=
helper
.
make_node
(
'Sum'
,
inputs
=
op
.
input
(
'X'
),
outputs
=
op
.
output
(
'Out'
))
return
node
def
floor
(
self
,
op
,
block
):
node
=
helper
.
make_node
(
'Floor'
,
inputs
=
op
.
input
(
'X'
),
outputs
=
op
.
output
(
'Out'
))
return
node
def
uniform_random_batch_size_like
(
self
,
op
,
block
):
node
=
helper
.
make_node
(
'RandomUniformLike'
,
inputs
=
op
.
input
(
'Input'
),
outputs
=
op
.
output
(
'Out'
),
high
=
op
.
attr
(
'max'
),
dtype
=
self
.
paddle_onnx_dtype_map
[
op
.
attr
(
'dtype'
)],
low
=
op
.
attr
(
'min'
),
seed
=
float
(
op
.
attr
(
'seed'
)),
)
return
node
def
depthwise_conv2d
(
self
,
op
,
block
):
return
self
.
conv2d
(
op
,
block
)
def
relu6
(
self
,
op
,
block
):
threshold
=
op
.
attr
(
'threshold'
)
node
=
helper
.
make_node
(
'Clip'
,
inputs
=
[
op
.
input
(
'X'
)[
0
]],
outputs
=
op
.
output
(
'Out'
),
max
=
threshold
,
min
=
0.0
)
return
[
node
]
def
shape
(
self
,
op
,
block
):
node
=
helper
.
make_node
(
'Shape'
,
inputs
=
op
.
input
(
'Input'
),
outputs
=
op
.
output
(
'Out'
))
return
node
def
split
(
self
,
op
,
block
):
sections
=
op
.
attr
(
'sections'
)
if
len
(
sections
)
>
0
:
node
=
helper
.
make_node
(
'Split'
,
inputs
=
op
.
input
(
'X'
),
outputs
=
op
.
output
(
'Out'
),
axis
=
op
.
attr
(
'axis'
),
split
=
sections
)
else
:
node
=
helper
.
make_node
(
'Split'
,
inputs
=
op
.
input
(
'X'
),
outputs
=
op
.
output
(
'Out'
),
axis
=
op
.
attr
(
'axis'
))
return
node
def
slice
(
self
,
op
,
block
):
axes
=
op
.
attr
(
'axes'
)
starts
=
op
.
attr
(
'starts'
)
ends
=
op
.
attr
(
'ends'
)
node
=
helper
.
make_node
(
"Slice"
,
inputs
=
[
op
.
input
(
'Input'
)[
0
]],
outputs
=
op
.
output
(
'Out'
),
axes
=
axes
,
starts
=
starts
,
ends
=
ends
)
return
[
node
]
def
fill_constant
(
self
,
op
,
block
):
value
=
op
.
attr
(
'value'
)
dtype
=
op
.
attr
(
'dtype'
)
shape
=
op
.
attr
(
'shape'
)
value
=
np
.
ones
(
shape
)
*
value
if
dtype
==
2
:
value
=
value
.
astype
(
'int32'
)
node
=
helper
.
make_node
(
'Constant'
,
inputs
=
[],
outputs
=
op
.
output
(
'Out'
),
value
=
helper
.
make_tensor
(
name
=
op
.
output
(
'Out'
)[
0
],
data_type
=
self
.
paddle_onnx_dtype_map
[
dtype
],
dims
=
shape
,
vals
=
value
.
tolist
()))
return
node
def
transpose2
(
self
,
op
,
block
):
node
=
helper
.
make_node
(
'Transpose'
,
inputs
=
op
.
input
(
'X'
),
outputs
=
op
.
output
(
'Out'
),
perm
=
op
.
attr
(
'axis'
))
return
node
def
flatten2
(
self
,
op
,
block
):
node
=
helper
.
make_node
(
'Flatten'
,
inputs
=
op
.
input
(
'X'
),
outputs
=
op
.
output
(
'Out'
),
axis
=
op
.
attr
(
'axis'
))
return
node
def
reshape2
(
self
,
op
,
block
):
input_names
=
op
.
input_names
if
len
(
op
.
input
(
'ShapeTensor'
))
>
1
:
cast_shape_nodes
=
list
()
cast_shape_names
=
list
()
for
i
in
range
(
len
(
op
.
input
(
'ShapeTensor'
))):
dim
=
op
.
input
(
'ShapeTensor'
)[
i
]
temp_name
=
self
.
get_name
(
op
.
type
,
'shape.cast'
)
node
=
helper
.
make_node
(
'Cast'
,
inputs
=
[
dim
],
outputs
=
[
temp_name
],
to
=
onnx_pb
.
TensorProto
.
INT64
)
cast_shape_nodes
.
append
(
node
)
cast_shape_names
.
append
(
temp_name
)
temp_name
=
self
.
get_name
(
op
.
type
,
'shape.concat'
)
shape_node
=
helper
.
make_node
(
'Concat'
,
inputs
=
cast_shape_names
,
outputs
=
[
temp_name
],
axis
=-
1
)
node
=
helper
.
make_node
(
'Reshape'
,
inputs
=
[
op
.
input
(
'X'
)[
0
],
temp_name
],
outputs
=
op
.
output
(
'Out'
))
return
cast_shape_nodes
+
[
shape_node
,
node
]
elif
len
(
op
.
input
(
'ShapeTensor'
))
==
1
:
temp_name
=
self
.
get_name
(
op
.
type
,
'shape.cast'
)
cast_shape_node
=
helper
.
make_node
(
'Cast'
,
inputs
=
op
.
input
(
'ShapeTensor'
),
outputs
=
[
temp_name
],
to
=
onnx_pb
.
TensorProto
.
INT64
)
node
=
helper
.
make_node
(
'Reshape'
,
inputs
=
[
op
.
input
(
'X'
)[
0
],
temp_name
],
outputs
=
op
.
output
(
'Out'
))
return
[
cast_shape_node
,
node
]
elif
op
.
attr
(
'shape'
)
is
not
None
and
len
(
op
.
attr
(
'shape'
))
>
0
:
shape_name
=
self
.
get_name
(
op
.
type
,
'shape'
)
shape_node
=
self
.
make_constant_node
(
shape_name
,
onnx_pb
.
TensorProto
.
INT64
,
op
.
attr
(
'shape'
))
reshape_node
=
helper
.
make_node
(
'Reshape'
,
inputs
=
[
op
.
input
(
'X'
)[
0
],
shape_name
],
outputs
=
op
.
output
(
'Out'
))
return
[
shape_node
,
reshape_node
]
def
dropout
(
self
,
op
,
block
):
dropout_mode
=
op
.
attr
(
'dropout_implementation'
)
dropout_prob
=
op
.
attr
(
'dropout_prob'
)
if
dropout_mode
==
'upscale_in_train'
:
node
=
helper
.
make_node
(
'Identity'
,
inputs
=
op
.
input
(
'X'
),
outputs
=
op
.
output
(
'Out'
))
return
node
elif
dropout_mode
==
'downgrade_in_infer'
:
scale_name
=
self
.
get_name
(
op
.
type
,
'scale'
)
scale_node
=
self
.
make_constant_node
(
scale_name
,
onnx_pb
.
TensorProto
.
FLOAT
,
1
-
dropout_prob
)
node
=
helper
.
make_node
(
"Mul"
,
inputs
=
[
op
.
input
(
'X'
)[
0
],
scale_name
],
outputs
=
op
.
output
(
'Out'
))
return
[
scale_node
,
node
]
else
:
raise
Exception
(
"Unexpected situation happend"
)
def
reduce_mean
(
self
,
op
,
block
):
node
=
helper
.
make_node
(
'ReduceMean'
,
inputs
=
op
.
input
(
'X'
),
outputs
=
op
.
output
(
'Out'
),
axes
=
op
.
attr
(
'dim'
),
keepdims
=
op
.
attr
(
'keep_dim'
))
return
node
def
bilinear_interp
(
self
,
op
,
block
):
input_names
=
op
.
input_names
input_shape
=
block
.
vars
[
op
.
input
(
'X'
)[
0
]].
shape
if
op
.
attr
(
'align_corners'
)
or
op
.
attr
(
'align_mode'
)
==
0
:
raise
Exception
(
"Resize in onnx(opset<=10) only support coordinate_transformation_mode: 'asymmetric', Try converting with --onnx_opset 11"
)
if
(
'OutSize'
in
input_names
and
len
(
op
.
input
(
'OutSize'
))
>
0
)
or
(
'SizeTensor'
in
input_names
and
len
(
op
.
input
(
'SizeTensor'
))
>
0
):
node_list
=
list
()
shape_name0
=
self
.
get_name
(
op
.
type
,
'shape'
)
shape_node0
=
helper
.
make_node
(
'Shape'
,
inputs
=
op
.
input
(
'X'
),
outputs
=
[
shape_name0
])
starts_name
=
self
.
get_name
(
op
.
type
,
'slice.starts'
)
starts_node
=
self
.
make_constant_node
(
starts_name
,
onnx_pb
.
TensorProto
.
INT64
,
[
0
])
ends_name
=
self
.
get_name
(
op
.
type
,
'slice.ends'
)
ends_node
=
self
.
make_constant_node
(
ends_name
,
onnx_pb
.
TensorProto
.
INT64
,
[
2
])
shape_name1
=
self
.
get_name
(
op
.
type
,
'shape'
)
shape_node1
=
helper
.
make_node
(
'Slice'
,
inputs
=
[
shape_name0
,
starts_name
,
ends_name
],
outputs
=
[
shape_name1
])
node_list
.
extend
([
shape_node0
,
starts_node
,
ends_node
,
shape_node1
])
if
'OutSize'
in
input_names
and
len
(
op
.
input
(
'OutSize'
))
>
0
:
cast_shape_name
=
self
.
get_name
(
op
.
type
,
"shape.cast"
)
cast_shape_node
=
helper
.
make_node
(
'Cast'
,
inputs
=
op
.
input
(
'OutSize'
),
outputs
=
[
cast_shape_name
],
to
=
onnx_pb
.
TensorProto
.
INT64
)
node_list
.
append
(
cast_shape_node
)
else
:
concat_shape_name
=
self
.
get_name
(
op
.
type
,
op
.
output
(
'Out'
)[
0
]
+
"shape.concat"
)
concat_shape_node
=
helper
.
make_node
(
"Concat"
,
inputs
=
op
.
input
(
'SizeTensor'
),
outputs
=
[
concat_shape_name
],
axis
=
0
)
cast_shape_name
=
self
.
get_name
(
op
.
type
,
"shape.cast"
)
cast_shape_node
=
helper
.
make_node
(
'Cast'
,
inputs
=
[
concat_shape_name
],
outputs
=
[
cast_shape_name
],
to
=
onnx_pb
.
TensorProto
.
INT64
)
node_list
.
extend
([
concat_shape_node
,
cast_shape_node
])
shape_name2
=
self
.
get_name
(
op
.
type
,
"shape.concat"
)
shape_node2
=
helper
.
make_node
(
'Concat'
,
inputs
=
[
shape_name1
,
cast_shape_name
],
outputs
=
[
shape_name2
],
axis
=
0
)
node_list
.
append
(
shape_node2
)
cast_shape_name2
=
self
.
get_name
(
op
.
type
,
"shape.cast"
)
cast_shape_node2
=
helper
.
make_node
(
'Cast'
,
inputs
=
[
shape_name2
],
outputs
=
[
cast_shape_name2
],
to
=
onnx_pb
.
TensorProto
.
FLOAT
)
node_list
.
append
(
cast_shape_node2
)
cast_shape_name0
=
self
.
get_name
(
op
.
type
,
"shape.cast"
)
cast_shape_node0
=
helper
.
make_node
(
'Cast'
,
inputs
=
[
shape_name0
],
outputs
=
[
cast_shape_name0
],
to
=
onnx_pb
.
TensorProto
.
FLOAT
)
node_list
.
append
(
cast_shape_node0
)
outputs_h_w_scales
=
op
.
output
(
'Out'
)[
0
]
+
"@out_hw_scales"
node_h_w_scales
=
helper
.
make_node
(
'Div'
,
inputs
=
[
cast_shape_name2
,
cast_shape_name0
],
outputs
=
[
outputs_h_w_scales
])
node_list
.
append
(
node_h_w_scales
)
result_node
=
helper
.
make_node
(
'Resize'
,
inputs
=
[
op
.
input
(
'X'
)[
0
],
outputs_h_w_scales
],
outputs
=
op
.
output
(
'Out'
),
mode
=
'linear'
)
node_list
.
extend
([
result_node
])
return
node_list
elif
'Scale'
in
input_names
and
len
(
op
.
input
(
'Scale'
))
>
0
:
node
=
helper
.
make_node
(
'Resize'
,
inputs
=
[
op
.
input
(
'X'
)[
0
],
op
.
input
(
'Scale'
)[
0
]],
outputs
=
op
.
output
(
'Out'
),
mode
=
'linear'
)
else
:
out_shape
=
[
op
.
attr
(
'out_h'
),
op
.
attr
(
'out_w'
)]
scale
=
op
.
attr
(
'scale'
)
if
out_shape
.
count
(
-
1
)
>
0
:
scale_name
=
self
.
get_name
(
op
.
type
,
'scale'
)
scale_node
=
self
.
make_constant_node
(
scale_name
,
onnx_pb
.
TensorProto
.
FLOAT
,
[
1
,
1
,
scale
,
scale
])
node
=
helper
.
make_node
(
'Resize'
,
inputs
=
[
op
.
input
(
'X'
)[
0
],
scale_name
],
outputs
=
op
.
output
(
'Out'
),
mode
=
'linear'
)
return
[
scale_node
,
node
]
else
:
raise
Exception
(
"Unexpected situation happend"
)
return
node
def
nearest_interp
(
self
,
op
,
block
):
input_names
=
op
.
input_names
if
op
.
attr
(
'align_corners'
):
raise
Exception
(
"Resize in onnx(opset<=10) only support coordinate_transformation_mode: 'asymmetric', Try converting with --onnx_opset 11"
)
if
'OutSize'
in
input_names
and
len
(
op
.
input
(
'OutSize'
))
>
0
:
node_list
=
list
()
shape_name0
=
self
.
get_name
(
op
.
type
,
'shape'
)
shape_node0
=
helper
.
make_node
(
'Shape'
,
inputs
=
op
.
input
(
'X'
),
outputs
=
[
shape_name0
])
starts_name
=
self
.
get_name
(
op
.
type
,
'slice.starts'
)
starts_node
=
self
.
make_constant_node
(
starts_name
,
onnx_pb
.
TensorProto
.
INT64
,
[
0
])
ends_name
=
self
.
get_name
(
op
.
type
,
'slice.ends'
)
ends_node
=
self
.
make_constant_node
(
ends_name
,
onnx_pb
.
TensorProto
.
INT64
,
[
2
])
shape_name1
=
self
.
get_name
(
op
.
type
,
'shape'
)
shape_node1
=
helper
.
make_node
(
'Slice'
,
inputs
=
[
shape_name0
,
starts_name
,
ends_name
],
outputs
=
[
shape_name1
])
node_list
.
extend
([
shape_node0
,
starts_node
,
ends_node
,
shape_node1
])
if
'OutSize'
in
input_names
and
len
(
op
.
input
(
'OutSize'
))
>
0
:
cast_shape_name
=
self
.
get_name
(
op
.
type
,
"shape.cast"
)
cast_shape_node
=
helper
.
make_node
(
'Cast'
,
inputs
=
op
.
input
(
'OutSize'
),
outputs
=
[
cast_shape_name
],
to
=
onnx_pb
.
TensorProto
.
INT64
)
node_list
.
append
(
cast_shape_node
)
else
:
concat_shape_name
=
self
.
get_name
(
op
.
type
,
op
.
output
(
'Out'
)[
0
]
+
"shape.concat"
)
concat_shape_node
=
helper
.
make_node
(
"Concat"
,
inputs
=
op
.
input
(
'SizeTensor'
),
outputs
=
[
concat_shape_name
],
axis
=
0
)
cast_shape_name
=
self
.
get_name
(
op
.
type
,
"shape.cast"
)
cast_shape_node
=
helper
.
make_node
(
'Cast'
,
inputs
=
[
concat_shape_name
],
outputs
=
[
cast_shape_name
],
to
=
onnx_pb
.
TensorProto
.
INT64
)
node_list
.
extend
([
concat_shape_node
,
cast_shape_node
])
shape_name2
=
self
.
get_name
(
op
.
type
,
"shape.concat"
)
shape_node2
=
helper
.
make_node
(
'Concat'
,
inputs
=
[
shape_name1
,
cast_shape_name
],
outputs
=
[
shape_name2
],
axis
=
0
)
node_list
.
append
(
shape_node2
)
cast_shape_name2
=
self
.
get_name
(
op
.
type
,
"shape.cast"
)
cast_shape_node2
=
helper
.
make_node
(
'Cast'
,
inputs
=
[
shape_name2
],
outputs
=
[
cast_shape_name2
],
to
=
onnx_pb
.
TensorProto
.
FLOAT
)
node_list
.
append
(
cast_shape_node2
)
cast_shape_name0
=
self
.
get_name
(
op
.
type
,
"shape.cast"
)
cast_shape_node0
=
helper
.
make_node
(
'Cast'
,
inputs
=
[
shape_name0
],
outputs
=
[
cast_shape_name0
],
to
=
onnx_pb
.
TensorProto
.
FLOAT
)
node_list
.
append
(
cast_shape_node0
)
outputs_h_w_scales
=
op
.
output
(
'Out'
)[
0
]
+
"@out_hw_scales"
node_h_w_scales
=
helper
.
make_node
(
'Div'
,
inputs
=
[
cast_shape_name2
,
cast_shape_name0
],
outputs
=
[
outputs_h_w_scales
])
node_list
.
append
(
node_h_w_scales
)
result_node
=
helper
.
make_node
(
'Resize'
,
inputs
=
[
op
.
input
(
'X'
)[
0
],
outputs_h_w_scales
],
outputs
=
op
.
output
(
'Out'
),
mode
=
'linear'
)
node_list
.
extend
([
result_node
])
return
node_list
elif
'Scale'
in
input_names
and
len
(
op
.
input
(
'Scale'
))
>
0
:
node
=
helper
.
make_node
(
'Resize'
,
inputs
=
[
op
.
input
(
'X'
)[
0
],
op
.
input
(
'Scale'
)[
0
]],
outputs
=
op
.
output
(
'Out'
),
mode
=
'nearest'
)
else
:
out_shape
=
[
op
.
attr
(
'out_h'
),
op
.
attr
(
'out_w'
)]
scale
=
op
.
attr
(
'scale'
)
if
out_shape
.
count
(
-
1
)
>
0
:
scale_name
=
self
.
get_name
(
op
.
type
,
'scale'
)
scale_node
=
self
.
make_constant_node
(
scale_name
,
onnx_pb
.
TensorProto
.
FLOAT
,
[
1
,
1
,
scale
,
scale
])
node
=
helper
.
make_node
(
'Resize'
,
inputs
=
[
op
.
input
(
'X'
)[
0
],
scale_name
],
outputs
=
op
.
output
(
'Out'
),
mode
=
'nearest'
)
return
[
scale_node
,
node
]
else
:
raise
Exception
(
"Unexpected situation happend"
)
return
node
def
hard_sigmoid
(
self
,
op
,
block
):
slope
=
op
.
attr
(
'slope'
)
offset
=
op
.
attr
(
'offset'
)
node
=
helper
.
make_node
(
'HardSigmoid'
,
inputs
=
op
.
input
(
'X'
),
outputs
=
op
.
output
(
'Out'
),
alpha
=
slope
,
beta
=
offset
)
return
node
def
swish
(
self
,
op
,
block
):
beta
=
op
.
attr
(
'beta'
)
beta_name
=
self
.
get_name
(
op
.
type
,
'beta'
)
beta_node
=
onnx
.
helper
.
make_node
(
'Constant'
,
name
=
beta_name
,
inputs
=
[],
outputs
=
[
beta_name
],
value
=
onnx
.
helper
.
make_tensor
(
name
=
beta_name
,
data_type
=
onnx
.
TensorProto
.
FLOAT
,
dims
=
(),
vals
=
[
beta
]))
beta_x_name
=
self
.
get_name
(
op
.
type
,
'beta_x'
)
beta_x_node
=
onnx
.
helper
.
make_node
(
'Mul'
,
name
=
beta_x_name
,
inputs
=
[
op
.
input
(
'X'
)[
0
],
beta_name
],
outputs
=
[
beta_x_name
])
sigmoid_name
=
self
.
get_name
(
op
.
type
,
'sigmoid'
)
sigmoid_node
=
onnx
.
helper
.
make_node
(
'Sigmoid'
,
name
=
sigmoid_name
,
inputs
=
[
beta_x_name
],
outputs
=
[
sigmoid_name
])
swish_node
=
onnx
.
helper
.
make_node
(
'Mul'
,
inputs
=
[
op
.
input
(
'X'
)[
0
],
sigmoid_name
],
outputs
=
op
.
output
(
'Out'
))
return
[
beta_node
,
beta_x_node
,
sigmoid_node
,
swish_node
]
def
hard_swish
(
self
,
op
,
block
):
scale_name
=
self
.
get_name
(
op
.
type
,
'scale'
)
offset_name
=
self
.
get_name
(
op
.
type
,
'offset'
)
scale_node
=
self
.
make_constant_node
(
scale_name
,
onnx_pb
.
TensorProto
.
FLOAT
,
op
.
attr
(
'scale'
))
offset_node
=
self
.
make_constant_node
(
offset_name
,
onnx_pb
.
TensorProto
.
FLOAT
,
op
.
attr
(
'offset'
))
name0
=
self
.
get_name
(
op
.
type
,
'add'
)
node0
=
helper
.
make_node
(
'Add'
,
inputs
=
[
op
.
input
(
'X'
)[
0
],
offset_name
],
outputs
=
[
name0
])
name1
=
self
.
get_name
(
op
.
type
,
'relu'
)
min_value
=
0.0
max_value
=
op
.
attr
(
'threshold'
)
node1
=
helper
.
make_node
(
'Clip'
,
inputs
=
[
name0
],
outputs
=
[
name1
],
max
=
max_value
,
min
=
min_value
)
name2
=
self
.
get_name
(
op
.
type
,
'mul'
)
node2
=
helper
.
make_node
(
'Mul'
,
inputs
=
[
op
.
input
(
'X'
)[
0
],
name1
],
outputs
=
[
name2
])
node3
=
helper
.
make_node
(
'Div'
,
inputs
=
[
name2
,
scale_name
],
outputs
=
op
.
output
(
'Out'
))
return
[
scale_node
,
offset_node
,
node0
,
node1
,
node2
,
node3
]
def
elementwise_mul
(
self
,
op
,
block
):
axis
=
op
.
attr
(
'axis'
)
x_shape
=
block
.
var
(
op
.
input
(
'X'
)[
0
]).
shape
y_shape
=
block
.
var
(
op
.
input
(
'Y'
)[
0
]).
shape
if
len
(
y_shape
)
==
1
and
axis
==
1
:
shape_name
=
self
.
get_name
(
op
.
type
,
'shape'
)
shape_value
=
[
1
]
*
len
(
x_shape
)
shape_value
[
axis
]
=
y_shape
[
0
]
shape_node
=
self
.
make_constant_node
(
shape_name
,
onnx_pb
.
TensorProto
.
INT64
,
shape_value
)
temp_value
=
self
.
get_name
(
op
.
type
,
'temp'
)
y_node
=
helper
.
make_node
(
'Reshape'
,
inputs
=
[
op
.
input
(
'Y'
)[
0
],
shape_name
],
outputs
=
[
temp_value
])
node
=
helper
.
make_node
(
'Mul'
,
inputs
=
[
op
.
input
(
'X'
)[
0
],
temp_value
],
outputs
=
op
.
output
(
'Out'
))
return
[
shape_node
,
y_node
,
node
]
elif
axis
==
-
1
or
axis
==
(
len
(
x_shape
)
-
1
)
or
len
(
x_shape
)
==
len
(
y_shape
):
node
=
helper
.
make_node
(
'Mul'
,
inputs
=
[
op
.
input
(
'X'
)[
0
],
op
.
input
(
'Y'
)[
0
]],
outputs
=
op
.
output
(
'Out'
))
return
node
else
:
raise
Exception
(
"Unexpected situation happend in elementwise_mul"
)
return
node
def
feed
(
self
,
op
,
block
):
name
=
op
.
output
(
'Out'
)[
0
]
var
=
block
.
var
(
name
)
tensor_info
=
helper
.
make_tensor_value_info
(
name
=
name
,
shape
=
var
.
shape
,
elem_type
=
self
.
paddle_onnx_dtype_map
[
var
.
dtype
])
return
tensor_info
def
fetch
(
self
,
op
,
block
):
name
=
op
.
input
(
'X'
)[
0
]
var
=
block
.
var
(
name
)
tensor_info
=
helper
.
make_tensor_value_info
(
name
=
name
,
shape
=
var
.
shape
,
elem_type
=
self
.
paddle_onnx_dtype_map
[
var
.
dtype
])
return
tensor_info
def
unsqueeze2
(
self
,
op
,
block
):
node
=
helper
.
make_node
(
'Unsqueeze'
,
inputs
=
op
.
input
(
'X'
),
outputs
=
op
.
output
(
'Out'
),
axes
=
op
.
attr
(
'axes'
))
return
node
def
cast
(
self
,
op
,
block
):
node
=
helper
.
make_node
(
'Cast'
,
inputs
=
op
.
input
(
'X'
),
outputs
=
op
.
output
(
'Out'
),
to
=
self
.
paddle_onnx_dtype_map
[
op
.
attr
(
'out_dtype'
)])
return
node
def
arg_max
(
self
,
op
,
block
):
node
=
helper
.
make_node
(
'ArgMax'
,
inputs
=
op
.
input
(
'X'
),
outputs
=
op
.
output
(
'Out'
),
axis
=
op
.
attr
(
'axis'
),
keepdims
=
0
)
return
node
def
reciprocal
(
self
,
op
,
block
):
inputs
=
op
.
input
(
op
.
input_names
[
0
])
outputs
=
op
.
output
(
op
.
output_names
[
0
])
node
=
helper
.
make_node
(
'Reciprocal'
,
inputs
=
inputs
,
outputs
=
outputs
)
return
node
def
im2sequence
(
self
,
op
,
block
):
from
.paddle_custom_layer.im2sequence
import
im2sequence
return
im2sequence
(
op
,
block
)
def
yolo_box
(
self
,
op
,
block
):
from
.paddle_custom_layer.yolo_box
import
yolo_box
return
yolo_box
(
op
,
block
)
def
multiclass_nms
(
self
,
op
,
block
):
from
.paddle_custom_layer.multiclass_nms
import
multiclass_nms
return
multiclass_nms
(
op
,
block
)
def
box_coder
(
self
,
op
,
block
):
from
.paddle_custom_layer.box_coder
import
box_coder
return
box_coder
(
op
,
block
)
def
prior_box
(
self
,
op
,
block
):
from
.paddle_custom_layer.prior_box
import
prior_box
return
prior_box
(
op
,
block
)
x2paddle/op_mapper/paddle2onnx/opset9/paddle_custom_layer/__init__.py
已删除
100644 → 0
浏览文件 @
56697812
x2paddle/op_mapper/paddle2onnx/opset9/paddle_custom_layer/box_coder.py
已删除
100644 → 0
浏览文件 @
56697812
# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import
sys
import
math
import
onnx
import
warnings
import
numpy
as
np
from
functools
import
partial
from
onnx
import
TensorProto
from
onnx.helper
import
make_node
,
make_tensor
from
onnx
import
onnx_pb
from
paddle.fluid.executor
import
_fetch_var
as
fetch_var
from
onnx
import
helper
import
paddle.fluid
as
fluid
import
paddle.fluid.core
as
core
def
box_coder
(
op
,
block
):
"""
In this function, we will use the decode the prior box to target box,
we just use the decode mode to transform this op.
"""
node_list
=
[]
input_names
=
op
.
input_names
prior_var
=
block
.
var
(
op
.
input
(
'PriorBox'
)[
0
])
t_size
=
block
.
var
(
op
.
input
(
'TargetBox'
)[
0
]).
shape
p_size
=
prior_var
.
shape
# get the outout_name
result_name
=
op
.
output
(
'OutputBox'
)[
0
]
# n is size of batch, m is boxes num of targe_boxes
n
=
t_size
[
0
]
m
=
t_size
[
0
]
axis
=
int
(
op
.
attr
(
'axis'
))
#norm
norm
=
bool
(
op
.
attr
(
'box_normalized'
))
name_slice_x1
=
op
.
output
(
'OutputBox'
)[
0
]
+
"@x1"
name_slice_y1
=
op
.
output
(
'OutputBox'
)[
0
]
+
"@y1"
name_slice_x2
=
op
.
output
(
'OutputBox'
)[
0
]
+
"@x2"
name_slice_y2
=
op
.
output
(
'OutputBox'
)[
0
]
+
"@y2"
#make onnx tensor to save the intermeidate reslut
name_slice_indices
=
[[
op
.
output
(
'OutputBox'
)[
0
]
+
"@slice_"
+
str
(
i
)]
for
i
in
range
(
1
,
3
)]
node_slice_indices
=
[
None
for
i
in
range
(
1
,
3
)]
# create the range(0, 4) const data to slice
for
i
in
range
(
1
,
3
):
node
=
onnx
.
helper
.
make_node
(
'Constant'
,
inputs
=
[],
outputs
=
name_slice_indices
[
i
-
1
],
value
=
onnx
.
helper
.
make_tensor
(
name
=
name_slice_indices
[
i
-
1
][
0
]
+
"@const"
,
data_type
=
onnx
.
TensorProto
.
FLOAT
,
dims
=
(),
vals
=
[
i
]))
node_list
.
append
(
node
)
# make node split data
name_box_split
=
[
name_slice_x1
,
name_slice_y1
,
name_slice_x2
,
name_slice_y2
]
split_shape
=
list
(
p_size
)
split_shape
[
-
1
]
=
1
node_split_prior_node
=
onnx
.
helper
.
make_node
(
'Split'
,
inputs
=
op
.
input
(
'PriorBox'
),
outputs
=
name_box_split
,
axis
=
1
)
node_list
.
append
(
node_split_prior_node
)
# make node get centor node for decode
final_outputs_vars
=
[]
if
not
norm
:
name_centor_w_tmp
=
[
op
.
output
(
'OutputBox'
)[
0
]
+
"@centor_w_tmp"
]
name_centor_h_tmp
=
[
op
.
output
(
'OutputBox'
)[
0
]
+
"@centor_h_tmp"
]
node_centor_w_tmp
=
None
node_centor_h_tmp
=
None
name_centor_tmp_list
=
[
name_centor_w_tmp
,
name_centor_h_tmp
]
node_centor_tmp_list
=
[
node_centor_w_tmp
,
node_centor_h_tmp
]
count
=
2
for
(
name
,
node
)
in
zip
(
name_centor_tmp_list
,
node_centor_tmp_list
):
node
=
onnx
.
helper
.
make_node
(
'Add'
,
inputs
=
[
op
.
output
(
'OutputBox'
)[
0
]
+
"@slice_"
+
str
(
1
)]
\
+
[
name_box_split
[
count
]],
outputs
=
name
)
node_list
.
append
(
node
)
count
=
count
+
1
if
not
norm
:
inputs_sub
=
[[
name_centor_w_tmp
[
0
],
name_box_split
[
0
]],
[
name_centor_h_tmp
[
0
],
name_box_split
[
1
]]]
else
:
inputs_sub
=
[[
name_box_split
[
2
],
name_box_split
[
0
]],
[
name_box_split
[
3
],
name_box_split
[
1
]]]
outputs_sub
=
[
result_name
+
"@pb_w"
,
result_name
+
"@pb_h"
]
for
i
in
range
(
0
,
2
):
node
=
onnx
.
helper
.
make_node
(
'Sub'
,
inputs
=
inputs_sub
[
i
],
outputs
=
[
outputs_sub
[
i
]])
node_list
.
append
(
node
)
# according to prior_box height and weight to get centor x, y
name_half_value
=
[
result_name
+
"@half_value"
]
node_half_value
=
onnx
.
helper
.
make_node
(
'Constant'
,
inputs
=
[],
outputs
=
name_half_value
,
value
=
onnx
.
helper
.
make_tensor
(
name
=
name_slice_indices
[
i
][
0
]
+
"@const"
,
data_type
=
onnx
.
TensorProto
.
FLOAT
,
dims
=
(),
vals
=
[
0.5
]))
node_list
.
append
(
node_half_value
)
outputs_half_wh
=
[[
result_name
+
"@pb_w_half"
],
[
result_name
+
"@pb_h_half"
]]
inputs_half_wh
=
[[
result_name
+
"@pb_w"
,
name_half_value
[
0
]],
[
result_name
+
"@pb_h"
,
name_half_value
[
0
]]]
for
i
in
range
(
0
,
2
):
node
=
onnx
.
helper
.
make_node
(
'Mul'
,
inputs
=
inputs_half_wh
[
i
],
outputs
=
outputs_half_wh
[
i
])
node_list
.
append
(
node
)
inputs_centor_xy
=
[[
outputs_half_wh
[
0
][
0
],
name_slice_x1
],
[
outputs_half_wh
[
1
][
0
],
name_slice_y1
]]
outputs_centor_xy
=
[[
result_name
+
"@pb_x"
],
[
result_name
+
"@pb_y"
]]
# final calc the centor x ,y
for
i
in
range
(
0
,
2
):
node
=
onnx
.
helper
.
make_node
(
'Add'
,
inputs
=
inputs_centor_xy
[
i
],
outputs
=
outputs_centor_xy
[
i
])
node_list
.
append
(
node
)
# reshape the data
shape
=
(
1
,
split_shape
[
0
])
if
axis
==
0
else
(
split_shape
[
0
],
1
)
# need to reshape the data
inputs_transpose_pb
=
[
[
result_name
+
"@pb_w"
],
[
result_name
+
"@pb_h"
],
[
result_name
+
"@pb_x"
],
[
result_name
+
"@pb_y"
],
]
outputs_transpose_pb
=
[
[
result_name
+
"@pb_w_transpose"
],
[
result_name
+
"@pb_h_transpose"
],
[
result_name
+
"@pb_x_transpose"
],
[
result_name
+
"@pb_y_transpose"
],
]
if
axis
==
0
:
name_reshape_pb
=
[
result_name
+
"@pb_transpose"
]
# reshape the data
for
i
in
range
(
0
,
4
):
node
=
onnx
.
helper
.
make_node
(
'Transpose'
,
inputs
=
inputs_transpose_pb
[
i
],
outputs
=
outputs_transpose_pb
[
i
])
node_list
.
append
(
node
)
# decoder the box according to the target_box and variacne
name_variance_raw
=
[
result_name
+
"@variance_raw"
]
name_variance_unsqueeze
=
[
result_name
+
"@variance_unsqueeze"
]
shape
=
[]
# make node to extend the data
var_split_axis
=
0
var_split_inputs_name
=
[]
if
'PriorBoxVar'
in
input_names
and
len
(
op
.
input
(
'PriorBoxVar'
))
>
0
:
if
axis
==
1
:
raise
Exception
(
"The op box_coder has variable do not support aixs broadcast"
)
prior_variance_var
=
block
.
var
(
op
.
input
(
'PriorBoxVar'
)[
0
])
axes
=
[]
var_split_inputs_name
=
[
result_name
+
"@variance_split"
]
node
=
onnx
.
helper
.
make_node
(
'Transpose'
,
inputs
=
op
.
input
(
'PriorBoxVar'
),
outputs
=
var_split_inputs_name
)
node_list
.
append
(
node
)
var_split_axis
=
0
else
:
variances
=
[
1.0
,
1.0
,
1.0
,
1.0
]
if
'variance'
in
op
.
attr
and
len
(
op
.
attr
(
'variance'
))
>
0
:
variances
=
[
float
(
var
)
for
var
in
op
.
attr
(
'variance'
)]
node_variance_create
=
onnx
.
helper
.
make_node
(
'Constant'
,
inputs
=
[],
outputs
=
name_variance_raw
,
value
=
onnx
.
helper
.
make_tensor
(
name
=
name_variance_raw
[
0
]
+
"@const"
,
data_type
=
onnx
.
TensorProto
.
FLOAT
,
dims
=
[
len
(
variances
)],
vals
=
variances
))
node_list
.
append
(
node_variance_create
)
var_split_axis
=
0
var_split_inputs_name
=
name_variance_raw
# decode the result
outputs_split_variance
=
[
result_name
+
"@variance_split"
+
str
(
i
)
for
i
in
range
(
0
,
4
)
]
outputs_split_targebox
=
[
result_name
+
"@targebox_split"
+
str
(
i
)
for
i
in
range
(
0
,
4
)
]
node_split_var
=
onnx
.
helper
.
make_node
(
'Split'
,
inputs
=
var_split_inputs_name
,
outputs
=
outputs_split_variance
,
axis
=
var_split_axis
)
node_split_target
=
onnx
.
helper
.
make_node
(
'Split'
,
inputs
=
op
.
input
(
'TargetBox'
),
outputs
=
outputs_split_targebox
,
axis
=
2
)
node_list
.
extend
([
node_split_var
,
node_split_target
])
outputs_squeeze_targebox
=
[
result_name
+
"@targebox_squeeze"
+
str
(
i
)
for
i
in
range
(
0
,
4
)
]
for
(
input_name
,
output_name
)
in
zip
(
outputs_split_targebox
,
outputs_squeeze_targebox
):
node
=
onnx
.
helper
.
make_node
(
'Squeeze'
,
inputs
=
[
input_name
],
outputs
=
[
output_name
],
axes
=
[
2
])
node_list
.
append
(
node
)
output_shape_step1
=
list
(
t_size
)[:
-
1
]
inputs_tb_step1
=
[
[
outputs_squeeze_targebox
[
0
],
outputs_split_variance
[
0
]],
[
outputs_squeeze_targebox
[
1
],
outputs_split_variance
[
1
]],
[
outputs_squeeze_targebox
[
2
],
outputs_split_variance
[
2
]],
[
outputs_squeeze_targebox
[
3
],
outputs_split_variance
[
3
]]
]
outputs_tb_step1
=
[[
result_name
+
"@decode_x_step1"
],
[
result_name
+
"@decode_y_step1"
],
[
result_name
+
"@decode_w_step1"
],
[
result_name
+
"@decode_h_step1"
]]
for
input_step1
,
output_step_1
in
zip
(
inputs_tb_step1
,
outputs_tb_step1
):
node
=
onnx
.
helper
.
make_node
(
'Mul'
,
inputs
=
input_step1
,
outputs
=
output_step_1
)
node_list
.
append
(
node
)
if
axis
==
0
:
inputs_tbxy_step2
=
[
[
outputs_tb_step1
[
0
][
0
],
outputs_transpose_pb
[
0
][
0
]],
[
outputs_tb_step1
[
1
][
0
],
outputs_transpose_pb
[
1
][
0
]]
]
else
:
inputs_tbxy_step2
=
[
[
outputs_tb_step1
[
0
][
0
],
inputs_transpose_pb
[
0
][
0
]],
[
outputs_tb_step1
[
1
][
0
],
inputs_transpose_pb
[
1
][
0
]]
]
outputs_tbxy_step2
=
[[
result_name
+
"@decode_x_step2"
],
[
result_name
+
"@decode_y_step2"
]]
for
input_step2
,
output_step_2
in
zip
(
inputs_tbxy_step2
,
outputs_tbxy_step2
):
node
=
onnx
.
helper
.
make_node
(
'Mul'
,
inputs
=
input_step2
,
outputs
=
output_step_2
)
node_list
.
append
(
node
)
if
axis
==
0
:
inputs_tbxy_step3
=
[
[
outputs_tbxy_step2
[
0
][
0
],
outputs_transpose_pb
[
2
][
0
]],
[
outputs_tbxy_step2
[
1
][
0
],
outputs_transpose_pb
[
3
][
0
]]
]
else
:
inputs_tbxy_step3
=
[
[
outputs_tbxy_step2
[
0
][
0
],
inputs_transpose_pb
[
2
][
0
]],
[
outputs_tbxy_step2
[
1
][
0
],
inputs_transpose_pb
[
3
][
0
]]
]
outputs_tbxy_step3
=
[[
result_name
+
"@decode_x_step3"
],
[
result_name
+
"@decode_y_step3"
]]
for
input_step3
,
output_step_3
in
zip
(
inputs_tbxy_step3
,
outputs_tbxy_step3
):
node
=
onnx
.
helper
.
make_node
(
'Add'
,
inputs
=
input_step3
,
outputs
=
output_step_3
)
node_list
.
append
(
node
)
# deal with width & height
inputs_tbwh_step2
=
[
outputs_tb_step1
[
2
],
outputs_tb_step1
[
3
]]
outputs_tbwh_step2
=
[[
result_name
+
"@decode_w_step2"
],
[
result_name
+
"@decode_h_step2"
]]
for
input_name
,
output_name
in
zip
(
inputs_tbwh_step2
,
outputs_tbwh_step2
):
node
=
onnx
.
helper
.
make_node
(
'Exp'
,
inputs
=
input_name
,
outputs
=
output_name
)
node_list
.
append
(
node
)
if
axis
==
0
:
inputs_tbwh_step3
=
[
[
outputs_tbwh_step2
[
0
][
0
],
outputs_transpose_pb
[
0
][
0
]],
[
outputs_tbwh_step2
[
1
][
0
],
outputs_transpose_pb
[
1
][
0
]]
]
else
:
inputs_tbwh_step3
=
[
[
outputs_tbwh_step2
[
0
][
0
],
inputs_transpose_pb
[
0
][
0
]],
[
outputs_tbwh_step2
[
1
][
0
],
inputs_transpose_pb
[
1
][
0
]]
]
outputs_tbwh_step3
=
[[
result_name
+
"@decode_w_step3"
],
[
result_name
+
"@decode_h_step3"
]]
for
input_name
,
output_name
in
zip
(
inputs_tbwh_step3
,
outputs_tbwh_step3
):
node
=
onnx
.
helper
.
make_node
(
'Mul'
,
inputs
=
input_name
,
outputs
=
output_name
)
node_list
.
append
(
node
)
# final step to calc the result, and concat the result to output
# return the output box, [(x1, y1), (x2, y2)]
inputs_half_tbwh_step4
=
[
[
outputs_tbwh_step3
[
0
][
0
],
result_name
+
"@slice_2"
],
[
outputs_tbwh_step3
[
1
][
0
],
result_name
+
"@slice_2"
]
]
outputs_half_tbwh_step4
=
[[
result_name
+
"@decode_half_w_step4"
],
[
result_name
+
"@decode_half_h_step4"
]]
for
inputs_name
,
outputs_name
in
zip
(
inputs_half_tbwh_step4
,
outputs_half_tbwh_step4
):
node
=
onnx
.
helper
.
make_node
(
'Div'
,
inputs
=
inputs_name
,
outputs
=
outputs_name
)
node_list
.
append
(
node
)
inputs_output_point1
=
[
[
outputs_tbxy_step3
[
0
][
0
],
outputs_half_tbwh_step4
[
0
][
0
]],
[
outputs_tbxy_step3
[
1
][
0
],
outputs_half_tbwh_step4
[
1
][
0
]]
]
outputs_output_point1
=
[[
result_name
+
"@ouput_x1"
],
[
result_name
+
"@output_y1"
]]
for
input_name
,
output_name
in
zip
(
inputs_output_point1
,
outputs_output_point1
):
node
=
onnx
.
helper
.
make_node
(
'Sub'
,
inputs
=
input_name
,
outputs
=
output_name
)
node_list
.
append
(
node
)
inputs_output_point2
=
[
[
outputs_tbxy_step3
[
0
][
0
],
outputs_half_tbwh_step4
[
0
][
0
]],
[
outputs_tbxy_step3
[
1
][
0
],
outputs_half_tbwh_step4
[
1
][
0
]]
]
outputs_output_point2
=
[[
result_name
+
"@ouput_x2"
],
[
result_name
+
"@output_y2"
]]
for
input_name
,
output_name
in
zip
(
inputs_output_point2
,
outputs_output_point2
):
node
=
onnx
.
helper
.
make_node
(
'Add'
,
inputs
=
input_name
,
outputs
=
output_name
)
node_list
.
append
(
node
)
if
not
norm
:
inputs_unnorm_point2
=
[
[
outputs_output_point2
[
0
][
0
],
result_name
+
"@slice_1"
],
[
outputs_output_point2
[
1
][
0
],
result_name
+
"@slice_1"
]
]
outputs_unnorm_point2
=
[[
result_name
+
"@ouput_unnorm_x2"
],
[
result_name
+
"@ouput_unnorm_y2"
]]
for
input_name
,
output_name
in
zip
(
inputs_unnorm_point2
,
outputs_unnorm_point2
):
node
=
onnx
.
helper
.
make_node
(
'Sub'
,
inputs
=
input_name
,
outputs
=
output_name
)
node_list
.
append
(
node
)
outputs_output_point2
=
outputs_unnorm_point2
outputs_output_point1
.
extend
(
outputs_output_point2
)
ouputs_points_unsqueeze
=
[[
result_name
+
"@points_unsqueeze_x1"
],
[
result_name
+
"points_unsqueeze_y1"
],
[
result_name
+
"points_unsqueeze_x2"
],
[
result_name
+
"points_unsqueeze_y2"
]]
for
input_name
,
output_name
in
zip
(
outputs_output_point1
,
ouputs_points_unsqueeze
):
node
=
onnx
.
helper
.
make_node
(
'Unsqueeze'
,
inputs
=
input_name
,
outputs
=
output_name
,
axes
=
[
len
(
output_shape_step1
)])
node_list
.
append
(
node
)
outputs_points_unsqueeze_list
=
[
output
[
0
]
for
output
in
ouputs_points_unsqueeze
]
node_point_final
=
onnx
.
helper
.
make_node
(
'Concat'
,
inputs
=
outputs_points_unsqueeze_list
,
outputs
=
op
.
output
(
'OutputBox'
),
axis
=
len
(
output_shape_step1
))
node_list
.
append
(
node_point_final
)
return
node_list
x2paddle/op_mapper/paddle2onnx/opset9/paddle_custom_layer/im2sequence.py
已删除
100644 → 0
浏览文件 @
56697812
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import
onnx
import
numpy
as
np
from
onnx
import
onnx_pb
,
helper
im2seq_counter
=
0
def
im2sequence
(
op
,
block
):
global
im2sequence_counter
n
,
c
,
h
,
w
=
block
.
var
(
op
.
input
(
'X'
)[
0
]).
shape
assert
h
>
0
and
w
>
0
,
"Only supported fixed input shape for im2sequence operator."
stride_h
,
stride_w
=
op
.
attr
(
'strides'
)
paddings
=
op
.
attr
(
'paddings'
)
assert
op
.
attr
(
'out_stride'
)
!=
1
,
"Only out_stride==1 is supported for im2sequence operator."
h
=
h
+
paddings
[
0
]
+
paddings
[
1
]
w
=
w
+
paddings
[
1
]
+
paddings
[
2
]
kernel_h
,
kernel_w
=
op
.
attr
(
'kernels'
)
out_h
=
1
+
(
h
-
kernel_h
+
stride_h
-
1
)
//
stride_h
out_w
=
1
+
(
w
-
kernel_w
+
stride_w
-
1
)
//
stride_w
h_steps
=
list
()
for
i
in
range
(
out_h
):
h_steps
.
append
([
i
*
stride_h
,
i
*
stride_h
+
kernel_h
])
w_steps
=
list
()
for
i
in
range
(
out_w
):
w_steps
.
append
([
i
*
stride_w
,
i
*
stride_w
+
kernel_w
])
nodes
=
list
()
slice_blocks
=
list
()
for
i
in
range
(
out_h
):
for
j
in
range
(
out_w
):
starts_name
=
"im2sequence.starts.{}.{}.{}"
.
format
(
im2seq_counter
,
i
,
j
)
starts_tensor
=
helper
.
make_tensor
(
name
=
starts_name
,
data_type
=
onnx_pb
.
TensorProto
.
INT64
,
dims
=
[
4
],
vals
=
[
0
,
0
,
h_steps
[
i
][
0
],
w_steps
[
j
][
0
]])
ends_name
=
"im2sequence.ends.{}.{}.{}"
.
format
(
im2seq_counter
,
i
,
j
)
ends_tensor
=
helper
.
make_tensor
(
name
=
ends_name
,
data_type
=
onnx_pb
.
TensorProto
.
INT64
,
dims
=
[
4
],
vals
=
[
999999
,
999999
,
h_steps
[
i
][
1
],
w_steps
[
j
][
1
]])
starts_node
=
helper
.
make_node
(
'Constant'
,
inputs
=
[],
outputs
=
[
starts_name
],
value
=
starts_tensor
)
ends_node
=
helper
.
make_node
(
'Constant'
,
inputs
=
[],
outputs
=
[
ends_name
],
value
=
ends_tensor
)
nodes
.
extend
([
starts_node
,
ends_node
])
slice_block_name
=
"im2sequence.slice.{}.{}.{}"
.
format
(
im2seq_counter
,
i
,
j
)
slice_block_node
=
helper
.
make_node
(
'Slice'
,
inputs
=
[
op
.
input
(
'X'
)[
0
],
starts_name
,
ends_name
],
outputs
=
[
slice_block_name
])
flatten_block_name
=
"im2sequence.flatten.{}.{}.{}"
.
format
(
im2seq_counter
,
i
,
j
)
flatten_block_node
=
helper
.
make_node
(
"Flatten"
,
inputs
=
[
slice_block_name
],
outputs
=
[
flatten_block_name
],
axis
=
0
)
nodes
.
extend
([
slice_block_node
,
flatten_block_node
])
slice_blocks
.
append
(
flatten_block_name
)
concat_block_name
=
"im2sequence.concat_block.{}"
.
format
(
im2seq_counter
)
# concat_block_node = helper.make_node("Concat", inputs=slice_blocks, outputs=[concat_block_name], axis=0)
concat_block_node
=
helper
.
make_node
(
"Concat"
,
inputs
=
slice_blocks
,
outputs
=
op
.
output
(
'Out'
),
axis
=
0
)
nodes
.
append
(
concat_block_node
)
print
(
"
\n\n
==========Importance Notice==========="
)
print
(
"Since im2sequence operator is used in your paddlepaddle model, the translated onnx model only support input data with batch_size=1."
)
print
(
"======================================
\n
"
)
return
nodes
x2paddle/op_mapper/paddle2onnx/opset9/paddle_custom_layer/multiclass_nms.py
已删除
100644 → 0
浏览文件 @
56697812
# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import
math
import
sys
import
os
import
numpy
as
np
import
paddle.fluid.core
as
core
import
paddle.fluid
as
fluid
import
onnx
import
logging
from
onnx
import
helper
,
onnx_pb
def
multiclass_nms
(
op
,
block
):
"""
Convert the paddle multiclass_nms to onnx op.
This op is get the select boxes from origin boxes.
"""
inputs
=
dict
()
outputs
=
dict
()
attrs
=
dict
()
for
name
in
op
.
input_names
:
inputs
[
name
]
=
op
.
input
(
name
)
for
name
in
op
.
output_names
:
outputs
[
name
]
=
op
.
output
(
name
)
for
name
in
op
.
attr_names
:
attrs
[
name
]
=
op
.
attr
(
name
)
result_name
=
outputs
[
'Out'
][
0
]
background
=
attrs
[
'background_label'
]
normalized
=
attrs
[
'normalized'
]
if
normalized
==
False
:
logging
.
warn
(
"The parameter normalized of multiclass_nms OP of Paddle is False, which has diff with ONNX."
\
" Please set normalized=True in multiclass_nms of Paddle, see doc Q4 in https://github.com/PaddlePaddle/X2Paddle/blob/develop/FAQ.md"
)
#convert the paddle attribute to onnx tensor
name_score_threshold
=
[
outputs
[
'Out'
][
0
]
+
"@score_threshold"
]
name_iou_threshold
=
[
outputs
[
'Out'
][
0
]
+
"@iou_threshold"
]
name_keep_top_k
=
[
outputs
[
'Out'
][
0
]
+
'@keep_top_k'
]
name_keep_top_k_2D
=
[
outputs
[
'Out'
][
0
]
+
'@keep_top_k_1D'
]
node_score_threshold
=
onnx
.
helper
.
make_node
(
'Constant'
,
inputs
=
[],
outputs
=
name_score_threshold
,
value
=
onnx
.
helper
.
make_tensor
(
name
=
name_score_threshold
[
0
]
+
"@const"
,
data_type
=
onnx
.
TensorProto
.
FLOAT
,
dims
=
(),
vals
=
[
float
(
attrs
[
'score_threshold'
])]))
node_iou_threshold
=
onnx
.
helper
.
make_node
(
'Constant'
,
inputs
=
[],
outputs
=
name_iou_threshold
,
value
=
onnx
.
helper
.
make_tensor
(
name
=
name_iou_threshold
[
0
]
+
"@const"
,
data_type
=
onnx
.
TensorProto
.
FLOAT
,
dims
=
(),
vals
=
[
float
(
attrs
[
'nms_threshold'
])]))
boxes_num
=
block
.
var
(
outputs
[
'Out'
][
0
]).
shape
[
0
]
top_k_value
=
np
.
int64
(
boxes_num
if
attrs
[
'keep_top_k'
]
==
-
1
else
attrs
[
'keep_top_k'
])
node_keep_top_k
=
onnx
.
helper
.
make_node
(
'Constant'
,
inputs
=
[],
outputs
=
name_keep_top_k
,
value
=
onnx
.
helper
.
make_tensor
(
name
=
name_keep_top_k
[
0
]
+
"@const"
,
data_type
=
onnx
.
TensorProto
.
INT64
,
dims
=
(),
vals
=
[
top_k_value
]))
node_keep_top_k_2D
=
onnx
.
helper
.
make_node
(
'Constant'
,
inputs
=
[],
outputs
=
name_keep_top_k_2D
,
value
=
onnx
.
helper
.
make_tensor
(
name
=
name_keep_top_k_2D
[
0
]
+
"@const"
,
data_type
=
onnx
.
TensorProto
.
INT64
,
dims
=
[
1
,
1
],
vals
=
[
top_k_value
]))
# the paddle data format is x1,y1,x2,y2
kwargs
=
{
'center_point_box'
:
0
}
name_select_nms
=
[
outputs
[
'Out'
][
0
]
+
"@select_index"
]
node_select_nms
=
onnx
.
helper
.
make_node
(
'NonMaxSuppression'
,
inputs
=
inputs
[
'BBoxes'
]
+
inputs
[
'Scores'
]
+
name_keep_top_k
+
\
name_iou_threshold
+
name_score_threshold
,
outputs
=
name_select_nms
)
# step 1 nodes select the nms class
node_list
=
[
node_score_threshold
,
node_iou_threshold
,
node_keep_top_k
,
node_keep_top_k_2D
,
node_select_nms
]
# create some const value to use
name_const_value
=
[
result_name
+
"@const_0"
,
result_name
+
"@const_1"
,
\
result_name
+
"@const_2"
,
\
result_name
+
"@const_-1"
]
value_const_value
=
[
0
,
1
,
2
,
-
1
]
for
name
,
value
in
zip
(
name_const_value
,
value_const_value
):
node
=
onnx
.
helper
.
make_node
(
'Constant'
,
inputs
=
[],
outputs
=
[
name
],
value
=
onnx
.
helper
.
make_tensor
(
name
=
name
+
"@const"
,
data_type
=
onnx
.
TensorProto
.
INT64
,
dims
=
[
1
],
vals
=
[
value
]))
node_list
.
append
(
node
)
# In this code block, we will deocde the raw score data, reshape N * C * M to 1 * N*C*M
# and the same time, decode the select indices to 1 * D, gather the select_indices
outputs_gather_1
=
[
result_name
+
"@gather_1"
]
node_gather_1
=
onnx
.
helper
.
make_node
(
'Gather'
,
inputs
=
name_select_nms
+
[
result_name
+
"@const_1"
],
outputs
=
outputs_gather_1
,
axis
=
1
)
node_list
.
append
(
node_gather_1
)
outputs_squeeze_gather_1
=
[
result_name
+
"@sequeeze_gather_1"
]
node_squeeze_gather_1
=
onnx
.
helper
.
make_node
(
'Squeeze'
,
inputs
=
outputs_gather_1
,
outputs
=
outputs_squeeze_gather_1
,
axes
=
[
1
])
node_list
.
append
(
node_squeeze_gather_1
)
outputs_gather_2
=
[
result_name
+
"@gather_2"
]
node_gather_2
=
onnx
.
helper
.
make_node
(
'Gather'
,
inputs
=
name_select_nms
+
[
result_name
+
"@const_2"
],
outputs
=
outputs_gather_2
,
axis
=
1
)
node_list
.
append
(
node_gather_2
)
#slice the class is not 0
if
background
==
0
:
outputs_nonzero
=
[
result_name
+
"@nonzero"
]
node_nonzero
=
onnx
.
helper
.
make_node
(
'NonZero'
,
inputs
=
outputs_squeeze_gather_1
,
outputs
=
outputs_nonzero
)
node_list
.
append
(
node_nonzero
)
else
:
name_thresh
=
[
result_name
+
"@thresh"
]
node_thresh
=
onnx
.
helper
.
make_node
(
'Constant'
,
inputs
=
[],
outputs
=
name_thresh
,
value
=
onnx
.
helper
.
make_tensor
(
name
=
name_thresh
[
0
]
+
"@const"
,
data_type
=
onnx
.
TensorProto
.
INT32
,
dims
=
[
1
],
vals
=
[
-
1
]))
node_list
.
append
(
node_thresh
)
outputs_cast
=
[
result_name
+
"@cast"
]
node_cast
=
onnx
.
helper
.
make_node
(
'Cast'
,
inputs
=
outputs_squeeze_gather_1
,
outputs
=
outputs_cast
,
to
=
6
)
node_list
.
append
(
node_cast
)
outputs_greater
=
[
result_name
+
"@greater"
]
node_greater
=
onnx
.
helper
.
make_node
(
'Greater'
,
inputs
=
outputs_cast
+
name_thresh
,
outputs
=
outputs_greater
)
node_list
.
append
(
node_greater
)
outputs_nonzero
=
[
result_name
+
"@nonzero"
]
node_nonzero
=
onnx
.
helper
.
make_node
(
'NonZero'
,
inputs
=
outputs_greater
,
outputs
=
outputs_nonzero
)
node_list
.
append
(
node_nonzero
)
outputs_gather_1_nonzero
=
[
result_name
+
"@gather_1_nonzero"
]
node_gather_1_nonzero
=
onnx
.
helper
.
make_node
(
'Gather'
,
inputs
=
outputs_gather_1
+
outputs_nonzero
,
outputs
=
outputs_gather_1_nonzero
,
axis
=
0
)
node_list
.
append
(
node_gather_1_nonzero
)
outputs_gather_2_nonzero
=
[
result_name
+
"@gather_2_nonzero"
]
node_gather_2_nonzero
=
onnx
.
helper
.
make_node
(
'Gather'
,
inputs
=
outputs_gather_2
+
outputs_nonzero
,
outputs
=
outputs_gather_2_nonzero
,
axis
=
0
)
node_list
.
append
(
node_gather_2_nonzero
)
# reshape scores N * C * M to (N*C*M) * 1
outputs_reshape_scores_rank1
=
[
result_name
+
"@reshape_scores_rank1"
]
node_reshape_scores_rank1
=
onnx
.
helper
.
make_node
(
"Reshape"
,
inputs
=
inputs
[
'Scores'
]
+
[
result_name
+
"@const_-1"
],
outputs
=
outputs_reshape_scores_rank1
)
node_list
.
append
(
node_reshape_scores_rank1
)
# get the shape of scores
outputs_shape_scores
=
[
result_name
+
"@shape_scores"
]
node_shape_scores
=
onnx
.
helper
.
make_node
(
'Shape'
,
inputs
=
inputs
[
'Scores'
],
outputs
=
outputs_shape_scores
)
node_list
.
append
(
node_shape_scores
)
# gather the index: 2 shape of scores
outputs_gather_scores_dim1
=
[
result_name
+
"@gather_scores_dim1"
]
node_gather_scores_dim1
=
onnx
.
helper
.
make_node
(
'Gather'
,
inputs
=
outputs_shape_scores
+
[
result_name
+
"@const_2"
],
outputs
=
outputs_gather_scores_dim1
,
axis
=
0
)
node_list
.
append
(
node_gather_scores_dim1
)
# mul class * M
outputs_mul_classnum_boxnum
=
[
result_name
+
"@mul_classnum_boxnum"
]
node_mul_classnum_boxnum
=
onnx
.
helper
.
make_node
(
'Mul'
,
inputs
=
outputs_gather_1_nonzero
+
outputs_gather_scores_dim1
,
outputs
=
outputs_mul_classnum_boxnum
)
node_list
.
append
(
node_mul_classnum_boxnum
)
# add class * M * index
outputs_add_class_M_index
=
[
result_name
+
"@add_class_M_index"
]
node_add_class_M_index
=
onnx
.
helper
.
make_node
(
'Add'
,
inputs
=
outputs_mul_classnum_boxnum
+
outputs_gather_2_nonzero
,
outputs
=
outputs_add_class_M_index
)
node_list
.
append
(
node_add_class_M_index
)
# Squeeze the indices to 1 dim
outputs_squeeze_select_index
=
[
result_name
+
"@squeeze_select_index"
]
node_squeeze_select_index
=
onnx
.
helper
.
make_node
(
'Squeeze'
,
inputs
=
outputs_add_class_M_index
,
outputs
=
outputs_squeeze_select_index
,
axes
=
[
0
,
2
])
node_list
.
append
(
node_squeeze_select_index
)
# gather the data from flatten scores
outputs_gather_select_scores
=
[
result_name
+
"@gather_select_scores"
]
node_gather_select_scores
=
onnx
.
helper
.
make_node
(
'Gather'
,
inputs
=
outputs_reshape_scores_rank1
+
\
outputs_squeeze_select_index
,
outputs
=
outputs_gather_select_scores
,
axis
=
0
)
node_list
.
append
(
node_gather_select_scores
)
# get nums to input TopK
outputs_shape_select_num
=
[
result_name
+
"@shape_select_num"
]
node_shape_select_num
=
onnx
.
helper
.
make_node
(
'Shape'
,
inputs
=
outputs_gather_select_scores
,
outputs
=
outputs_shape_select_num
)
node_list
.
append
(
node_shape_select_num
)
outputs_gather_select_num
=
[
result_name
+
"@gather_select_num"
]
node_gather_select_num
=
onnx
.
helper
.
make_node
(
'Gather'
,
inputs
=
outputs_shape_select_num
+
[
result_name
+
"@const_0"
],
outputs
=
outputs_gather_select_num
,
axis
=
0
)
node_list
.
append
(
node_gather_select_num
)
outputs_unsqueeze_select_num
=
[
result_name
+
"@unsqueeze_select_num"
]
node_unsqueeze_select_num
=
onnx
.
helper
.
make_node
(
'Unsqueeze'
,
inputs
=
outputs_gather_select_num
,
outputs
=
outputs_unsqueeze_select_num
,
axes
=
[
0
])
node_list
.
append
(
node_unsqueeze_select_num
)
outputs_concat_topK_select_num
=
[
result_name
+
"@conat_topK_select_num"
]
node_conat_topK_select_num
=
onnx
.
helper
.
make_node
(
'Concat'
,
inputs
=
outputs_unsqueeze_select_num
+
name_keep_top_k_2D
,
outputs
=
outputs_concat_topK_select_num
,
axis
=
0
)
node_list
.
append
(
node_conat_topK_select_num
)
outputs_cast_concat_topK_select_num
=
[
result_name
+
"@concat_topK_select_num"
]
node_outputs_cast_concat_topK_select_num
=
onnx
.
helper
.
make_node
(
'Cast'
,
inputs
=
outputs_concat_topK_select_num
,
outputs
=
outputs_cast_concat_topK_select_num
,
to
=
6
)
node_list
.
append
(
node_outputs_cast_concat_topK_select_num
)
# get min(topK, num_select)
outputs_compare_topk_num_select
=
[
result_name
+
"@compare_topk_num_select"
]
node_compare_topk_num_select
=
onnx
.
helper
.
make_node
(
'ReduceMin'
,
inputs
=
outputs_cast_concat_topK_select_num
,
outputs
=
outputs_compare_topk_num_select
,
keepdims
=
0
)
node_list
.
append
(
node_compare_topk_num_select
)
# unsqueeze the indices to 1D tensor
outputs_unsqueeze_topk_select_indices
=
[
result_name
+
"@unsqueeze_topk_select_indices"
]
node_unsqueeze_topk_select_indices
=
onnx
.
helper
.
make_node
(
'Unsqueeze'
,
inputs
=
outputs_compare_topk_num_select
,
outputs
=
outputs_unsqueeze_topk_select_indices
,
axes
=
[
0
])
node_list
.
append
(
node_unsqueeze_topk_select_indices
)
# cast the indices to INT64
outputs_cast_topk_indices
=
[
result_name
+
"@cast_topk_indices"
]
node_cast_topk_indices
=
onnx
.
helper
.
make_node
(
'Cast'
,
inputs
=
outputs_unsqueeze_topk_select_indices
,
outputs
=
outputs_cast_topk_indices
,
to
=
7
)
node_list
.
append
(
node_cast_topk_indices
)
# select topk scores indices
outputs_topk_select_topk_indices
=
[
result_name
+
"@topk_select_topk_values"
,
\
result_name
+
"@topk_select_topk_indices"
]
node_topk_select_topk_indices
=
onnx
.
helper
.
make_node
(
'TopK'
,
inputs
=
outputs_gather_select_scores
+
outputs_cast_topk_indices
,
outputs
=
outputs_topk_select_topk_indices
)
node_list
.
append
(
node_topk_select_topk_indices
)
# gather topk label, scores, boxes
outputs_gather_topk_scores
=
[
result_name
+
"@gather_topk_scores"
]
node_gather_topk_scores
=
onnx
.
helper
.
make_node
(
'Gather'
,
inputs
=
outputs_gather_select_scores
+
[
outputs_topk_select_topk_indices
[
1
]],
outputs
=
outputs_gather_topk_scores
,
axis
=
0
)
node_list
.
append
(
node_gather_topk_scores
)
outputs_gather_topk_class
=
[
result_name
+
"@gather_topk_class"
]
node_gather_topk_class
=
onnx
.
helper
.
make_node
(
'Gather'
,
inputs
=
outputs_gather_1_nonzero
+
[
outputs_topk_select_topk_indices
[
1
]],
outputs
=
outputs_gather_topk_class
,
axis
=
1
)
node_list
.
append
(
node_gather_topk_class
)
# gather the boxes need to gather the boxes id, then get boxes
outputs_gather_topk_boxes_id
=
[
result_name
+
"@gather_topk_boxes_id"
]
node_gather_topk_boxes_id
=
onnx
.
helper
.
make_node
(
'Gather'
,
inputs
=
outputs_gather_2_nonzero
+
[
outputs_topk_select_topk_indices
[
1
]],
outputs
=
outputs_gather_topk_boxes_id
,
axis
=
1
)
node_list
.
append
(
node_gather_topk_boxes_id
)
# squeeze the gather_topk_boxes_id to 1 dim
outputs_squeeze_topk_boxes_id
=
[
result_name
+
"@squeeze_topk_boxes_id"
]
node_squeeze_topk_boxes_id
=
onnx
.
helper
.
make_node
(
'Squeeze'
,
inputs
=
outputs_gather_topk_boxes_id
,
outputs
=
outputs_squeeze_topk_boxes_id
,
axes
=
[
0
,
2
])
node_list
.
append
(
node_squeeze_topk_boxes_id
)
outputs_gather_select_boxes
=
[
result_name
+
"@gather_select_boxes"
]
node_gather_select_boxes
=
onnx
.
helper
.
make_node
(
'Gather'
,
inputs
=
inputs
[
'BBoxes'
]
+
outputs_squeeze_topk_boxes_id
,
outputs
=
outputs_gather_select_boxes
,
axis
=
1
)
node_list
.
append
(
node_gather_select_boxes
)
# concat the final result
# before concat need to cast the class to float
outputs_cast_topk_class
=
[
result_name
+
"@cast_topk_class"
]
node_cast_topk_class
=
onnx
.
helper
.
make_node
(
'Cast'
,
inputs
=
outputs_gather_topk_class
,
outputs
=
outputs_cast_topk_class
,
to
=
1
)
node_list
.
append
(
node_cast_topk_class
)
outputs_unsqueeze_topk_scores
=
[
result_name
+
"@unsqueeze_topk_scores"
]
node_unsqueeze_topk_scores
=
onnx
.
helper
.
make_node
(
'Unsqueeze'
,
inputs
=
outputs_gather_topk_scores
,
outputs
=
outputs_unsqueeze_topk_scores
,
axes
=
[
0
,
2
])
node_list
.
append
(
node_unsqueeze_topk_scores
)
inputs_concat_final_results
=
outputs_cast_topk_class
+
outputs_unsqueeze_topk_scores
+
\
outputs_gather_select_boxes
outputs_sort_by_socre_results
=
[
result_name
+
"@concat_topk_scores"
]
node_sort_by_socre_results
=
onnx
.
helper
.
make_node
(
'Concat'
,
inputs
=
inputs_concat_final_results
,
outputs
=
outputs_sort_by_socre_results
,
axis
=
2
)
node_list
.
append
(
node_sort_by_socre_results
)
# select topk classes indices
outputs_squeeze_cast_topk_class
=
[
result_name
+
"@squeeze_cast_topk_class"
]
node_squeeze_cast_topk_class
=
onnx
.
helper
.
make_node
(
'Squeeze'
,
inputs
=
outputs_cast_topk_class
,
outputs
=
outputs_squeeze_cast_topk_class
,
axes
=
[
0
,
2
])
node_list
.
append
(
node_squeeze_cast_topk_class
)
outputs_neg_squeeze_cast_topk_class
=
[
result_name
+
"@neg_squeeze_cast_topk_class"
]
node_neg_squeeze_cast_topk_class
=
onnx
.
helper
.
make_node
(
'Neg'
,
inputs
=
outputs_squeeze_cast_topk_class
,
outputs
=
outputs_neg_squeeze_cast_topk_class
)
node_list
.
append
(
node_neg_squeeze_cast_topk_class
)
outputs_topk_select_classes_indices
=
[
result_name
+
"@topk_select_topk_classes_scores"
,
\
result_name
+
"@topk_select_topk_classes_indices"
]
node_topk_select_topk_indices
=
onnx
.
helper
.
make_node
(
'TopK'
,
inputs
=
outputs_neg_squeeze_cast_topk_class
+
outputs_cast_topk_indices
,
outputs
=
outputs_topk_select_classes_indices
)
node_list
.
append
(
node_topk_select_topk_indices
)
outputs_concat_final_results
=
outputs
[
'Out'
]
node_concat_final_results
=
onnx
.
helper
.
make_node
(
'Gather'
,
inputs
=
outputs_sort_by_socre_results
+
[
outputs_topk_select_classes_indices
[
1
]],
outputs
=
outputs_concat_final_results
,
axis
=
1
)
node_list
.
append
(
node_concat_final_results
)
return
node_list
x2paddle/op_mapper/paddle2onnx/opset9/paddle_custom_layer/prior_box.py
已删除
100644 → 0
浏览文件 @
56697812
# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import
sys
import
math
import
onnx
import
warnings
import
numpy
as
np
from
functools
import
partial
from
onnx
import
TensorProto
from
onnx.helper
import
make_node
,
make_tensor
from
onnx
import
onnx_pb
from
paddle.fluid.executor
import
_fetch_var
as
fetch_var
from
onnx
import
helper
import
paddle.fluid
as
fluid
import
paddle.fluid.core
as
core
def
ExpandAspectRations
(
input_aspect_ratior
,
flip
):
expsilon
=
1e-6
output_ratios
=
[
1.0
]
for
input_ratio
in
input_aspect_ratior
:
already_exis
=
False
for
output_ratio
in
output_ratios
:
if
abs
(
input_ratio
-
output_ratio
)
<
expsilon
:
already_exis
=
True
break
if
already_exis
==
False
:
output_ratios
.
append
(
input_ratio
)
if
flip
:
output_ratios
.
append
(
1.0
/
input_ratio
)
return
output_ratios
def
prior_box
(
op
,
block
):
"""
In this function, use the attribute to get the prior box, because we do not use
the image data and feature map, wo could the python code to create the varaible,
and to create the onnx tensor as output.
"""
flip
=
bool
(
op
.
attr
(
'flip'
))
clip
=
bool
(
op
.
attr
(
'clip'
))
min_max_aspect_ratios_order
=
bool
(
op
.
attr
(
'min_max_aspect_ratios_order'
))
min_sizes
=
[
float
(
size
)
for
size
in
op
.
attr
(
'min_sizes'
)]
max_sizes
=
[
float
(
size
)
for
size
in
op
.
attr
(
'max_sizes'
)]
if
isinstance
(
op
.
attr
(
'aspect_ratios'
),
list
):
aspect_ratios
=
[
float
(
ratio
)
for
ratio
in
op
.
attr
(
'aspect_ratios'
)]
else
:
aspect_ratios
=
[
float
(
op
.
attr
(
'aspect_ratios'
))]
variances
=
[
float
(
var
)
for
var
in
op
.
attr
(
'variances'
)]
# set min_max_aspect_ratios_order = false
output_ratios
=
ExpandAspectRations
(
aspect_ratios
,
flip
)
step_w
=
float
(
op
.
attr
(
'step_w'
))
step_h
=
float
(
op
.
attr
(
'step_h'
))
offset
=
float
(
op
.
attr
(
'offset'
))
input_shape
=
block
.
var
(
op
.
input
(
'Input'
)[
0
]).
shape
image_shape
=
block
.
var
(
op
.
input
(
'Image'
)[
0
]).
shape
img_width
=
image_shape
[
3
]
img_height
=
image_shape
[
2
]
feature_width
=
input_shape
[
3
]
feature_height
=
input_shape
[
2
]
step_width
=
1.0
step_height
=
1.0
if
step_w
==
0.0
or
step_h
==
0.0
:
step_w
=
float
(
img_width
/
feature_width
)
step_h
=
float
(
img_height
/
feature_height
)
num_priors
=
len
(
output_ratios
)
*
len
(
min_sizes
)
if
len
(
max_sizes
)
>
0
:
num_priors
+=
len
(
max_sizes
)
out_dim
=
(
feature_height
,
feature_width
,
num_priors
,
4
)
out_boxes
=
np
.
zeros
(
out_dim
).
astype
(
'float32'
)
out_var
=
np
.
zeros
(
out_dim
).
astype
(
'float32'
)
idx
=
0
for
h
in
range
(
feature_height
):
for
w
in
range
(
feature_width
):
c_x
=
(
w
+
offset
)
*
step_w
c_y
=
(
h
+
offset
)
*
step_h
idx
=
0
for
s
in
range
(
len
(
min_sizes
)):
min_size
=
min_sizes
[
s
]
if
not
min_max_aspect_ratios_order
:
# rest of priors
for
r
in
range
(
len
(
output_ratios
)):
ar
=
output_ratios
[
r
]
c_w
=
min_size
*
math
.
sqrt
(
ar
)
/
2
c_h
=
(
min_size
/
math
.
sqrt
(
ar
))
/
2
out_boxes
[
h
,
w
,
idx
,
:]
=
[
(
c_x
-
c_w
)
/
img_width
,
(
c_y
-
c_h
)
/
img_height
,
(
c_x
+
c_w
)
/
img_width
,
(
c_y
+
c_h
)
/
img_height
]
idx
+=
1
if
len
(
max_sizes
)
>
0
:
max_size
=
max_sizes
[
s
]
# second prior: aspect_ratio = 1,
c_w
=
c_h
=
math
.
sqrt
(
min_size
*
max_size
)
/
2
out_boxes
[
h
,
w
,
idx
,
:]
=
[
(
c_x
-
c_w
)
/
img_width
,
(
c_y
-
c_h
)
/
img_height
,
(
c_x
+
c_w
)
/
img_width
,
(
c_y
+
c_h
)
/
img_height
]
idx
+=
1
else
:
c_w
=
c_h
=
min_size
/
2.
out_boxes
[
h
,
w
,
idx
,
:]
=
[
(
c_x
-
c_w
)
/
img_width
,
(
c_y
-
c_h
)
/
img_height
,
(
c_x
+
c_w
)
/
img_width
,
(
c_y
+
c_h
)
/
img_height
]
idx
+=
1
if
len
(
max_sizes
)
>
0
:
max_size
=
max_sizes
[
s
]
# second prior: aspect_ratio = 1,
c_w
=
c_h
=
math
.
sqrt
(
min_size
*
max_size
)
/
2
out_boxes
[
h
,
w
,
idx
,
:]
=
[
(
c_x
-
c_w
)
/
img_width
,
(
c_y
-
c_h
)
/
img_height
,
(
c_x
+
c_w
)
/
img_width
,
(
c_y
+
c_h
)
/
img_height
]
idx
+=
1
# rest of priors
for
r
in
range
(
len
(
output_ratios
)):
ar
=
output_ratios
[
r
]
if
abs
(
ar
-
1.
)
<
1e-6
:
continue
c_w
=
min_size
*
math
.
sqrt
(
ar
)
/
2
c_h
=
(
min_size
/
math
.
sqrt
(
ar
))
/
2
out_boxes
[
h
,
w
,
idx
,
:]
=
[
(
c_x
-
c_w
)
/
img_width
,
(
c_y
-
c_h
)
/
img_height
,
(
c_x
+
c_w
)
/
img_width
,
(
c_y
+
c_h
)
/
img_height
]
idx
+=
1
if
clip
:
out_boxes
=
np
.
clip
(
out_boxes
,
0.0
,
1.0
)
# set the variance.
out_var
=
np
.
tile
(
variances
,
(
feature_height
,
feature_width
,
num_priors
,
1
))
#make node that
node_boxes
=
onnx
.
helper
.
make_node
(
'Constant'
,
inputs
=
[],
outputs
=
op
.
output
(
'Boxes'
),
value
=
onnx
.
helper
.
make_tensor
(
name
=
op
.
output
(
'Boxes'
)[
0
]
+
"@const"
,
data_type
=
onnx
.
TensorProto
.
FLOAT
,
dims
=
out_boxes
.
shape
,
vals
=
out_boxes
.
flatten
()))
node_vars
=
onnx
.
helper
.
make_node
(
'Constant'
,
inputs
=
[],
outputs
=
op
.
output
(
'Variances'
),
value
=
onnx
.
helper
.
make_tensor
(
name
=
op
.
output
(
'Variances'
)[
0
]
+
"@const"
,
data_type
=
onnx
.
TensorProto
.
FLOAT
,
dims
=
out_var
.
shape
,
vals
=
out_var
.
flatten
()))
return
[
node_boxes
,
node_vars
]
x2paddle/op_mapper/paddle2onnx/opset9/paddle_custom_layer/yolo_box.py
已删除
100644 → 0
浏览文件 @
56697812
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import
onnx
import
numpy
as
np
from
onnx
import
onnx_pb
,
helper
MAX_FLOAT32
=
np
.
asarray
(
[
255
,
255
,
127
,
127
],
dtype
=
np
.
uint8
).
view
(
np
.
float32
)[
0
]
def
get_old_name
(
arg
,
name_prefix
=
''
):
prefix_index
=
arg
.
find
(
name_prefix
)
if
prefix_index
!=
-
1
:
last_prefix
=
arg
[
len
(
name_prefix
):]
else
:
last_prefix
=
arg
idx
=
last_prefix
.
find
(
'@'
)
if
idx
!=
-
1
:
last_prefix
=
last_prefix
[:
idx
]
return
name_prefix
+
last_prefix
def
is_static_shape
(
shape
):
if
len
(
shape
)
>
1
and
shape
.
count
(
-
1
)
>
1
:
raise
Exception
(
"Converting this model to ONNX need with static input shape, please fix input shape of this model, see doc Q5 in https://github.com/PaddlePaddle/X2Paddle/blob/develop/FAQ.md."
)
def
yolo_box
(
op
,
block
):
inputs
=
dict
()
outputs
=
dict
()
attrs
=
dict
()
for
name
in
op
.
input_names
:
inputs
[
name
]
=
op
.
input
(
name
)
for
name
in
op
.
output_names
:
outputs
[
name
]
=
op
.
output
(
name
)
for
name
in
op
.
attr_names
:
attrs
[
name
]
=
op
.
attr
(
name
)
model_name
=
outputs
[
'Boxes'
][
0
]
input_shape
=
block
.
vars
[
get_old_name
(
inputs
[
'X'
][
0
])].
shape
is_static_shape
(
input_shape
)
image_size
=
inputs
[
'ImgSize'
]
input_height
=
input_shape
[
2
]
input_width
=
input_shape
[
3
]
class_num
=
attrs
[
'class_num'
]
anchors
=
attrs
[
'anchors'
]
num_anchors
=
int
(
len
(
anchors
))
//
2
downsample_ratio
=
attrs
[
'downsample_ratio'
]
input_size
=
input_height
*
downsample_ratio
conf_thresh
=
attrs
[
'conf_thresh'
]
conf_thresh_mat
=
np
.
ones
([
num_anchors
*
input_height
*
input_width
])
*
conf_thresh
node_list
=
[]
im_outputs
=
[]
x_shape
=
[
1
,
num_anchors
,
5
+
class_num
,
input_height
,
input_width
]
name_x_shape
=
[
model_name
+
"@x_shape"
]
node_x_shape
=
onnx
.
helper
.
make_node
(
'Constant'
,
inputs
=
[],
outputs
=
name_x_shape
,
value
=
onnx
.
helper
.
make_tensor
(
name
=
name_x_shape
[
0
]
+
"@const"
,
data_type
=
onnx
.
TensorProto
.
INT64
,
dims
=
[
5
],
vals
=
x_shape
))
node_list
.
append
(
node_x_shape
)
outputs_x_reshape
=
[
model_name
+
"@reshape"
]
node_x_reshape
=
onnx
.
helper
.
make_node
(
'Reshape'
,
inputs
=
inputs
[
'X'
]
+
name_x_shape
,
outputs
=
outputs_x_reshape
)
node_list
.
append
(
node_x_reshape
)
outputs_x_transpose
=
[
model_name
+
"@x_transpose"
]
node_x_transpose
=
onnx
.
helper
.
make_node
(
'Transpose'
,
inputs
=
outputs_x_reshape
,
outputs
=
outputs_x_transpose
,
perm
=
[
0
,
1
,
3
,
4
,
2
])
node_list
.
append
(
node_x_transpose
)
range_x
=
[]
range_y
=
[]
for
i
in
range
(
0
,
input_width
):
range_x
.
append
(
i
)
for
j
in
range
(
0
,
input_height
):
range_y
.
append
(
j
)
name_range_x
=
[
model_name
+
"@range_x"
]
node_range_x
=
onnx
.
helper
.
make_node
(
'Constant'
,
inputs
=
[],
outputs
=
name_range_x
,
value
=
onnx
.
helper
.
make_tensor
(
name
=
name_range_x
[
0
]
+
"@const"
,
data_type
=
onnx
.
TensorProto
.
FLOAT
,
dims
=
[
input_width
],
vals
=
range_x
))
node_list
.
append
(
node_range_x
)
name_range_y
=
[
model_name
+
"@range_y"
]
node_range_y
=
onnx
.
helper
.
make_node
(
'Constant'
,
inputs
=
[],
outputs
=
name_range_y
,
value
=
onnx
.
helper
.
make_tensor
(
name
=
name_range_y
[
0
]
+
"@const"
,
data_type
=
onnx
.
TensorProto
.
FLOAT
,
dims
=
[
input_height
],
vals
=
range_y
))
node_list
.
append
(
node_range_y
)
range_x_new_shape
=
[
1
,
input_width
]
range_y_new_shape
=
[
input_height
,
1
]
name_range_x_new_shape
=
[
model_name
+
"@range_x_new_shape"
]
node_range_x_new_shape
=
onnx
.
helper
.
make_node
(
'Constant'
,
inputs
=
[],
outputs
=
name_range_x_new_shape
,
value
=
onnx
.
helper
.
make_tensor
(
name
=
name_range_x_new_shape
[
0
]
+
"@const"
,
data_type
=
onnx
.
TensorProto
.
INT64
,
dims
=
[
len
(
range_x_new_shape
)],
vals
=
range_x_new_shape
))
node_list
.
append
(
node_range_x_new_shape
)
name_range_y_new_shape
=
[
model_name
+
"@range_y_new_shape"
]
node_range_y_new_shape
=
onnx
.
helper
.
make_node
(
'Constant'
,
inputs
=
[],
outputs
=
name_range_y_new_shape
,
value
=
onnx
.
helper
.
make_tensor
(
name
=
name_range_y_new_shape
[
0
]
+
"@const"
,
data_type
=
onnx
.
TensorProto
.
INT64
,
dims
=
[
len
(
range_y_new_shape
)],
vals
=
range_y_new_shape
))
node_list
.
append
(
node_range_y_new_shape
)
outputs_range_x_reshape
=
[
model_name
+
"@range_x_reshape"
]
node_range_x_reshape
=
onnx
.
helper
.
make_node
(
'Reshape'
,
inputs
=
name_range_x
+
name_range_x_new_shape
,
outputs
=
outputs_range_x_reshape
)
node_list
.
append
(
node_range_x_reshape
)
outputs_range_y_reshape
=
[
model_name
+
"@range_y_reshape"
]
node_range_y_reshape
=
onnx
.
helper
.
make_node
(
'Reshape'
,
inputs
=
name_range_y
+
name_range_y_new_shape
,
outputs
=
outputs_range_y_reshape
)
node_list
.
append
(
node_range_y_reshape
)
outputs_grid_x
=
[
model_name
+
"@grid_x"
]
node_grid_x
=
onnx
.
helper
.
make_node
(
"Tile"
,
inputs
=
outputs_range_x_reshape
+
name_range_y_new_shape
,
outputs
=
outputs_grid_x
)
node_list
.
append
(
node_grid_x
)
outputs_grid_y
=
[
model_name
+
"@grid_y"
]
node_grid_y
=
onnx
.
helper
.
make_node
(
"Tile"
,
inputs
=
outputs_range_y_reshape
+
name_range_x_new_shape
,
outputs
=
outputs_grid_y
)
node_list
.
append
(
node_grid_y
)
outputs_box_x
=
[
model_name
+
"@box_x"
]
outputs_box_y
=
[
model_name
+
"@box_y"
]
outputs_box_w
=
[
model_name
+
"@box_w"
]
outputs_box_h
=
[
model_name
+
"@box_h"
]
outputs_conf
=
[
model_name
+
"@conf"
]
outputs_prob
=
[
model_name
+
"@prob"
]
node_split_input
=
onnx
.
helper
.
make_node
(
"Split"
,
inputs
=
outputs_x_transpose
,
outputs
=
outputs_box_x
+
outputs_box_y
+
outputs_box_w
\
+
outputs_box_h
+
outputs_conf
+
outputs_prob
,
axis
=-
1
,
split
=
[
1
,
1
,
1
,
1
,
1
,
class_num
])
node_list
.
append
(
node_split_input
)
outputs_box_x_sigmoid
=
[
model_name
+
"@box_x_sigmoid"
]
outputs_box_y_sigmoid
=
[
model_name
+
"@box_y_sigmoid"
]
node_box_x_sigmoid
=
onnx
.
helper
.
make_node
(
"Sigmoid"
,
inputs
=
outputs_box_x
,
outputs
=
outputs_box_x_sigmoid
)
node_list
.
append
(
node_box_x_sigmoid
)
node_box_y_sigmoid
=
onnx
.
helper
.
make_node
(
"Sigmoid"
,
inputs
=
outputs_box_y
,
outputs
=
outputs_box_y_sigmoid
)
node_list
.
append
(
node_box_y_sigmoid
)
outputs_box_x_squeeze
=
[
model_name
+
"@box_x_squeeze"
]
outputs_box_y_squeeze
=
[
model_name
+
"@box_y_squeeze"
]
node_box_x_squeeze
=
onnx
.
helper
.
make_node
(
'Squeeze'
,
inputs
=
outputs_box_x_sigmoid
,
outputs
=
outputs_box_x_squeeze
,
axes
=
[
4
])
node_list
.
append
(
node_box_x_squeeze
)
node_box_y_squeeze
=
onnx
.
helper
.
make_node
(
'Squeeze'
,
inputs
=
outputs_box_y_sigmoid
,
outputs
=
outputs_box_y_squeeze
,
axes
=
[
4
])
node_list
.
append
(
node_box_y_squeeze
)
outputs_box_x_add_grid
=
[
model_name
+
"@box_x_add_grid"
]
outputs_box_y_add_grid
=
[
model_name
+
"@box_y_add_grid"
]
node_box_x_add_grid
=
onnx
.
helper
.
make_node
(
"Add"
,
inputs
=
outputs_grid_x
+
outputs_box_x_squeeze
,
outputs
=
outputs_box_x_add_grid
)
node_list
.
append
(
node_box_x_add_grid
)
node_box_y_add_grid
=
onnx
.
helper
.
make_node
(
"Add"
,
inputs
=
outputs_grid_y
+
outputs_box_y_squeeze
,
outputs
=
outputs_box_y_add_grid
)
node_list
.
append
(
node_box_y_add_grid
)
name_input_h
=
[
model_name
+
"@input_h"
]
name_input_w
=
[
model_name
+
"@input_w"
]
node_input_h
=
onnx
.
helper
.
make_node
(
'Constant'
,
inputs
=
[],
outputs
=
name_input_h
,
value
=
onnx
.
helper
.
make_tensor
(
name
=
name_input_w
[
0
]
+
"@const"
,
data_type
=
onnx
.
TensorProto
.
FLOAT
,
dims
=
(),
vals
=
[
input_height
]))
node_list
.
append
(
node_input_h
)
node_input_w
=
onnx
.
helper
.
make_node
(
'Constant'
,
inputs
=
[],
outputs
=
name_input_w
,
value
=
onnx
.
helper
.
make_tensor
(
name
=
name_input_w
[
0
]
+
"@const"
,
data_type
=
onnx
.
TensorProto
.
FLOAT
,
dims
=
(),
vals
=
[
input_width
]))
node_list
.
append
(
node_input_w
)
outputs_box_x_encode
=
[
model_name
+
"@box_x_encode"
]
outputs_box_y_encode
=
[
model_name
+
"@box_y_encode"
]
node_box_x_encode
=
onnx
.
helper
.
make_node
(
'Div'
,
inputs
=
outputs_box_x_add_grid
+
name_input_w
,
outputs
=
outputs_box_x_encode
)
node_list
.
append
(
node_box_x_encode
)
node_box_y_encode
=
onnx
.
helper
.
make_node
(
'Div'
,
inputs
=
outputs_box_y_add_grid
+
name_input_h
,
outputs
=
outputs_box_y_encode
)
node_list
.
append
(
node_box_y_encode
)
name_anchor_tensor
=
[
model_name
+
"@anchor_tensor"
]
node_anchor_tensor
=
onnx
.
helper
.
make_node
(
"Constant"
,
inputs
=
[],
outputs
=
name_anchor_tensor
,
value
=
onnx
.
helper
.
make_tensor
(
name
=
name_anchor_tensor
[
0
]
+
"@const"
,
data_type
=
onnx
.
TensorProto
.
FLOAT
,
dims
=
[
len
(
anchors
)],
vals
=
anchors
))
node_list
.
append
(
node_anchor_tensor
)
anchor_shape
=
[
int
(
num_anchors
),
2
]
name_anchor_shape
=
[
model_name
+
"@anchor_shape"
]
node_anchor_shape
=
onnx
.
helper
.
make_node
(
"Constant"
,
inputs
=
[],
outputs
=
name_anchor_shape
,
value
=
onnx
.
helper
.
make_tensor
(
name
=
name_anchor_shape
[
0
]
+
"@const"
,
data_type
=
onnx
.
TensorProto
.
INT64
,
dims
=
[
2
],
vals
=
anchor_shape
))
node_list
.
append
(
node_anchor_shape
)
outputs_anchor_tensor_reshape
=
[
model_name
+
"@anchor_tensor_reshape"
]
node_anchor_tensor_reshape
=
onnx
.
helper
.
make_node
(
"Reshape"
,
inputs
=
name_anchor_tensor
+
name_anchor_shape
,
outputs
=
outputs_anchor_tensor_reshape
)
node_list
.
append
(
node_anchor_tensor_reshape
)
name_input_size
=
[
model_name
+
"@input_size"
]
node_input_size
=
onnx
.
helper
.
make_node
(
"Constant"
,
inputs
=
[],
outputs
=
name_input_size
,
value
=
onnx
.
helper
.
make_tensor
(
name
=
name_input_size
[
0
]
+
"@const"
,
data_type
=
onnx
.
TensorProto
.
FLOAT
,
dims
=
(),
vals
=
[
input_size
]))
node_list
.
append
(
node_input_size
)
outputs_anchors_div_input_size
=
[
model_name
+
"@anchors_div_input_size"
]
node_anchors_div_input_size
=
onnx
.
helper
.
make_node
(
"Div"
,
inputs
=
outputs_anchor_tensor_reshape
+
name_input_size
,
outputs
=
outputs_anchors_div_input_size
)
node_list
.
append
(
node_anchors_div_input_size
)
outputs_anchor_w
=
[
model_name
+
"@anchor_w"
]
outputs_anchor_h
=
[
model_name
+
"@anchor_h"
]
node_anchor_split
=
onnx
.
helper
.
make_node
(
'Split'
,
inputs
=
outputs_anchors_div_input_size
,
outputs
=
outputs_anchor_w
+
outputs_anchor_h
,
axis
=
1
,
split
=
[
1
,
1
])
node_list
.
append
(
node_anchor_split
)
new_anchor_shape
=
[
1
,
int
(
num_anchors
),
1
,
1
]
name_new_anchor_shape
=
[
model_name
+
"@new_anchor_shape"
]
node_new_anchor_shape
=
onnx
.
helper
.
make_node
(
'Constant'
,
inputs
=
[],
outputs
=
name_new_anchor_shape
,
value
=
onnx
.
helper
.
make_tensor
(
name
=
name_new_anchor_shape
[
0
]
+
"@const"
,
data_type
=
onnx
.
TensorProto
.
INT64
,
dims
=
[
len
(
new_anchor_shape
)],
vals
=
new_anchor_shape
))
node_list
.
append
(
node_new_anchor_shape
)
outputs_anchor_w_reshape
=
[
model_name
+
"@anchor_w_reshape"
]
outputs_anchor_h_reshape
=
[
model_name
+
"@anchor_h_reshape"
]
node_anchor_w_reshape
=
onnx
.
helper
.
make_node
(
'Reshape'
,
inputs
=
outputs_anchor_w
+
name_new_anchor_shape
,
outputs
=
outputs_anchor_w_reshape
)
node_list
.
append
(
node_anchor_w_reshape
)
node_anchor_h_reshape
=
onnx
.
helper
.
make_node
(
'Reshape'
,
inputs
=
outputs_anchor_h
+
name_new_anchor_shape
,
outputs
=
outputs_anchor_h_reshape
)
node_list
.
append
(
node_anchor_h_reshape
)
outputs_box_w_squeeze
=
[
model_name
+
"@box_w_squeeze"
]
node_box_w_squeeze
=
onnx
.
helper
.
make_node
(
'Squeeze'
,
inputs
=
outputs_box_w
,
outputs
=
outputs_box_w_squeeze
,
axes
=
[
4
])
node_list
.
append
(
node_box_w_squeeze
)
outputs_box_h_squeeze
=
[
model_name
+
"@box_h_squeeze"
]
node_box_h_squeeze
=
onnx
.
helper
.
make_node
(
'Squeeze'
,
inputs
=
outputs_box_h
,
outputs
=
outputs_box_h_squeeze
,
axes
=
[
4
])
node_list
.
append
(
node_box_h_squeeze
)
outputs_box_w_exp
=
[
model_name
+
"@box_w_exp"
]
node_box_w_exp
=
onnx
.
helper
.
make_node
(
"Exp"
,
inputs
=
outputs_box_w_squeeze
,
outputs
=
outputs_box_w_exp
)
node_list
.
append
(
node_box_w_exp
)
outputs_box_h_exp
=
[
model_name
+
"@box_h_exp"
]
node_box_h_exp
=
onnx
.
helper
.
make_node
(
"Exp"
,
inputs
=
outputs_box_h_squeeze
,
outputs
=
outputs_box_h_exp
)
node_list
.
append
(
node_box_h_exp
)
outputs_box_w_encode
=
[
model_name
+
"box_w_encode"
]
outputs_box_h_encode
=
[
model_name
+
"box_h_encode"
]
node_box_w_encode
=
onnx
.
helper
.
make_node
(
'Mul'
,
inputs
=
outputs_box_w_exp
+
outputs_anchor_w_reshape
,
outputs
=
outputs_box_w_encode
)
node_list
.
append
(
node_box_w_encode
)
node_box_h_encode
=
onnx
.
helper
.
make_node
(
'Mul'
,
inputs
=
outputs_box_h_exp
+
outputs_anchor_h_reshape
,
outputs
=
outputs_box_h_encode
)
node_list
.
append
(
node_box_h_encode
)
outputs_conf_sigmoid
=
[
model_name
+
"@conf_sigmoid"
]
node_conf_sigmoid
=
onnx
.
helper
.
make_node
(
'Sigmoid'
,
inputs
=
outputs_conf
,
outputs
=
outputs_conf_sigmoid
)
node_list
.
append
(
node_conf_sigmoid
)
name_conf_thresh
=
[
model_name
+
"@conf_thresh"
]
node_conf_thresh
=
onnx
.
helper
.
make_node
(
'Constant'
,
inputs
=
[],
outputs
=
name_conf_thresh
,
value
=
onnx
.
helper
.
make_tensor
(
name
=
name_conf_thresh
[
0
]
+
"@const"
,
data_type
=
onnx
.
TensorProto
.
FLOAT
,
dims
=
[
num_anchors
*
input_height
*
input_width
],
vals
=
conf_thresh_mat
))
node_list
.
append
(
node_conf_thresh
)
conf_shape
=
[
1
,
int
(
num_anchors
),
input_height
,
input_width
,
1
]
name_conf_shape
=
[
model_name
+
"@conf_shape"
]
node_conf_shape
=
onnx
.
helper
.
make_node
(
'Constant'
,
inputs
=
[],
outputs
=
name_conf_shape
,
value
=
onnx
.
helper
.
make_tensor
(
name
=
name_conf_shape
[
0
]
+
"@const"
,
data_type
=
onnx
.
TensorProto
.
INT64
,
dims
=
[
len
(
conf_shape
)],
vals
=
conf_shape
))
node_list
.
append
(
node_conf_shape
)
outputs_conf_thresh_reshape
=
[
model_name
+
"@conf_thresh_reshape"
]
node_conf_thresh_reshape
=
onnx
.
helper
.
make_node
(
'Reshape'
,
inputs
=
name_conf_thresh
+
name_conf_shape
,
outputs
=
outputs_conf_thresh_reshape
)
node_list
.
append
(
node_conf_thresh_reshape
)
outputs_conf_sub
=
[
model_name
+
"@conf_sub"
]
node_conf_sub
=
onnx
.
helper
.
make_node
(
'Sub'
,
inputs
=
outputs_conf_sigmoid
+
outputs_conf_thresh_reshape
,
outputs
=
outputs_conf_sub
)
node_list
.
append
(
node_conf_sub
)
outputs_conf_clip
=
[
model_name
+
"@conf_clip"
]
node_conf_clip
=
onnx
.
helper
.
make_node
(
'Clip'
,
inputs
=
outputs_conf_sub
,
outputs
=
outputs_conf_clip
)
node_list
.
append
(
node_conf_clip
)
zeros
=
[
0
]
name_zeros
=
[
model_name
+
"@zeros"
]
node_zeros
=
onnx
.
helper
.
make_node
(
'Constant'
,
inputs
=
[],
outputs
=
name_zeros
,
value
=
onnx
.
helper
.
make_tensor
(
name
=
name_zeros
[
0
]
+
"@const"
,
data_type
=
onnx
.
TensorProto
.
FLOAT
,
dims
=
(),
vals
=
zeros
))
node_list
.
append
(
node_zeros
)
outputs_conf_clip_bool
=
[
model_name
+
"@conf_clip_bool"
]
node_conf_clip_bool
=
onnx
.
helper
.
make_node
(
'Greater'
,
inputs
=
outputs_conf_clip
+
name_zeros
,
outputs
=
outputs_conf_clip_bool
)
node_list
.
append
(
node_conf_clip_bool
)
outputs_conf_clip_cast
=
[
model_name
+
"@conf_clip_cast"
]
node_conf_clip_cast
=
onnx
.
helper
.
make_node
(
'Cast'
,
inputs
=
outputs_conf_clip_bool
,
outputs
=
outputs_conf_clip_cast
,
to
=
1
)
node_list
.
append
(
node_conf_clip_cast
)
outputs_conf_set_zero
=
[
model_name
+
"@conf_set_zero"
]
node_conf_set_zero
=
onnx
.
helper
.
make_node
(
'Mul'
,
inputs
=
outputs_conf_sigmoid
+
outputs_conf_clip_cast
,
outputs
=
outputs_conf_set_zero
)
node_list
.
append
(
node_conf_set_zero
)
outputs_prob_sigmoid
=
[
model_name
+
"@prob_sigmoid"
]
node_prob_sigmoid
=
onnx
.
helper
.
make_node
(
'Sigmoid'
,
inputs
=
outputs_prob
,
outputs
=
outputs_prob_sigmoid
)
node_list
.
append
(
node_prob_sigmoid
)
new_shape
=
[
1
,
int
(
num_anchors
),
input_height
,
input_width
,
1
]
name_new_shape
=
[
model_name
+
"@new_shape"
]
node_new_shape
=
onnx
.
helper
.
make_node
(
'Constant'
,
inputs
=
[],
outputs
=
name_new_shape
,
value
=
onnx
.
helper
.
make_tensor
(
name
=
name_new_shape
[
0
]
+
"@const"
,
data_type
=
onnx
.
TensorProto
.
INT64
,
dims
=
[
len
(
new_shape
)],
vals
=
new_shape
))
node_list
.
append
(
node_new_shape
)
outputs_conf_new_shape
=
[
model_name
+
"@_conf_new_shape"
]
node_conf_new_shape
=
onnx
.
helper
.
make_node
(
'Reshape'
,
inputs
=
outputs_conf_set_zero
+
name_new_shape
,
outputs
=
outputs_conf_new_shape
)
node_list
.
append
(
node_conf_new_shape
)
outputs_score
=
[
model_name
+
"@score"
]
node_score
=
onnx
.
helper
.
make_node
(
'Mul'
,
inputs
=
outputs_prob_sigmoid
+
outputs_conf_new_shape
,
outputs
=
outputs_score
)
node_list
.
append
(
node_score
)
outputs_conf_bool
=
[
model_name
+
"@conf_bool"
]
node_conf_bool
=
onnx
.
helper
.
make_node
(
'Greater'
,
inputs
=
outputs_conf_new_shape
+
name_zeros
,
outputs
=
outputs_conf_bool
)
node_list
.
append
(
node_conf_bool
)
outputs_box_x_new_shape
=
[
model_name
+
"@box_x_new_shape"
]
node_box_x_new_shape
=
onnx
.
helper
.
make_node
(
'Reshape'
,
inputs
=
outputs_box_x_encode
+
name_new_shape
,
outputs
=
outputs_box_x_new_shape
)
node_list
.
append
(
node_box_x_new_shape
)
outputs_box_y_new_shape
=
[
model_name
+
"@box_y_new_shape"
]
node_box_y_new_shape
=
onnx
.
helper
.
make_node
(
'Reshape'
,
inputs
=
outputs_box_y_encode
+
name_new_shape
,
outputs
=
outputs_box_y_new_shape
)
node_list
.
append
(
node_box_y_new_shape
)
outputs_box_w_new_shape
=
[
model_name
+
"@box_w_new_shape"
]
node_box_w_new_shape
=
onnx
.
helper
.
make_node
(
'Reshape'
,
inputs
=
outputs_box_w_encode
+
name_new_shape
,
outputs
=
outputs_box_w_new_shape
)
node_list
.
append
(
node_box_w_new_shape
)
outputs_box_h_new_shape
=
[
model_name
+
"@box_h_new_shape"
]
node_box_h_new_shape
=
onnx
.
helper
.
make_node
(
'Reshape'
,
inputs
=
outputs_box_h_encode
+
name_new_shape
,
outputs
=
outputs_box_h_new_shape
)
node_list
.
append
(
node_box_h_new_shape
)
outputs_pred_box
=
[
model_name
+
"@pred_box"
]
node_pred_box
=
onnx
.
helper
.
make_node
(
'Concat'
,
inputs
=
outputs_box_x_new_shape
+
outputs_box_y_new_shape
+
\
outputs_box_w_new_shape
+
outputs_box_h_new_shape
,
outputs
=
outputs_pred_box
,
axis
=
4
)
node_list
.
append
(
node_pred_box
)
outputs_conf_cast
=
[
model_name
+
"conf_cast"
]
node_conf_cast
=
onnx
.
helper
.
make_node
(
'Cast'
,
inputs
=
outputs_conf_bool
,
outputs
=
outputs_conf_cast
,
to
=
1
)
node_list
.
append
(
node_conf_cast
)
outputs_pred_box_mul_conf
=
[
model_name
+
"@pred_box_mul_conf"
]
node_pred_box_mul_conf
=
onnx
.
helper
.
make_node
(
'Mul'
,
inputs
=
outputs_pred_box
+
outputs_conf_cast
,
outputs
=
outputs_pred_box_mul_conf
)
node_list
.
append
(
node_pred_box_mul_conf
)
box_shape
=
[
1
,
int
(
num_anchors
)
*
input_height
*
input_width
,
4
]
name_box_shape
=
[
model_name
+
"@box_shape"
]
node_box_shape
=
onnx
.
helper
.
make_node
(
'Constant'
,
inputs
=
[],
outputs
=
name_box_shape
,
value
=
onnx
.
helper
.
make_tensor
(
name
=
name_box_shape
[
0
]
+
"@const"
,
data_type
=
onnx
.
TensorProto
.
INT64
,
dims
=
[
len
(
box_shape
)],
vals
=
box_shape
))
node_list
.
append
(
node_box_shape
)
outputs_pred_box_new_shape
=
[
model_name
+
"@pred_box_new_shape"
]
node_pred_box_new_shape
=
onnx
.
helper
.
make_node
(
'Reshape'
,
inputs
=
outputs_pred_box_mul_conf
+
name_box_shape
,
outputs
=
outputs_pred_box_new_shape
)
node_list
.
append
(
node_pred_box_new_shape
)
outputs_pred_box_x
=
[
model_name
+
"@_pred_box_x"
]
outputs_pred_box_y
=
[
model_name
+
"@_pred_box_y"
]
outputs_pred_box_w
=
[
model_name
+
"@_pred_box_w"
]
outputs_pred_box_h
=
[
model_name
+
"@_pred_box_h"
]
node_pred_box_split
=
onnx
.
helper
.
make_node
(
'Split'
,
inputs
=
outputs_pred_box_new_shape
,
outputs
=
outputs_pred_box_x
+
outputs_pred_box_y
+
outputs_pred_box_w
+
outputs_pred_box_h
,
axis
=
2
)
node_list
.
append
(
node_pred_box_split
)
name_number_two
=
[
model_name
+
"@number_two"
]
node_number_two
=
onnx
.
helper
.
make_node
(
"Constant"
,
inputs
=
[],
outputs
=
name_number_two
,
value
=
onnx
.
helper
.
make_tensor
(
name
=
name_number_two
[
0
]
+
"@const"
,
data_type
=
onnx
.
TensorProto
.
FLOAT
,
dims
=
(),
vals
=
[
2
]))
node_list
.
append
(
node_number_two
)
outputs_half_w
=
[
model_name
+
"@half_w"
]
node_half_w
=
onnx
.
helper
.
make_node
(
"Div"
,
inputs
=
outputs_pred_box_w
+
name_number_two
,
outputs
=
outputs_half_w
)
node_list
.
append
(
node_half_w
)
outputs_half_h
=
[
model_name
+
"@half_h"
]
node_half_h
=
onnx
.
helper
.
make_node
(
"Div"
,
inputs
=
outputs_pred_box_h
+
name_number_two
,
outputs
=
outputs_half_h
)
node_list
.
append
(
node_half_h
)
outputs_pred_box_x1
=
[
model_name
+
"@pred_box_x1"
]
node_pred_box_x1
=
onnx
.
helper
.
make_node
(
'Sub'
,
inputs
=
outputs_pred_box_x
+
outputs_half_w
,
outputs
=
outputs_pred_box_x1
)
node_list
.
append
(
node_pred_box_x1
)
outputs_pred_box_y1
=
[
model_name
+
"@pred_box_y1"
]
node_pred_box_y1
=
onnx
.
helper
.
make_node
(
'Sub'
,
inputs
=
outputs_pred_box_y
+
outputs_half_h
,
outputs
=
outputs_pred_box_y1
)
node_list
.
append
(
node_pred_box_y1
)
outputs_pred_box_x2
=
[
model_name
+
"@pred_box_x2"
]
node_pred_box_x2
=
onnx
.
helper
.
make_node
(
'Add'
,
inputs
=
outputs_pred_box_x
+
outputs_half_w
,
outputs
=
outputs_pred_box_x2
)
node_list
.
append
(
node_pred_box_x2
)
outputs_pred_box_y2
=
[
model_name
+
"@pred_box_y2"
]
node_pred_box_y2
=
onnx
.
helper
.
make_node
(
'Add'
,
inputs
=
outputs_pred_box_y
+
outputs_half_h
,
outputs
=
outputs_pred_box_y2
)
node_list
.
append
(
node_pred_box_y2
)
outputs_sqeeze_image_size
=
[
model_name
+
"@sqeeze_image_size"
]
node_sqeeze_image_size
=
onnx
.
helper
.
make_node
(
"Squeeze"
,
axes
=
[
0
],
inputs
=
image_size
,
outputs
=
outputs_sqeeze_image_size
)
node_list
.
append
(
node_sqeeze_image_size
)
output_img_height
=
[
model_name
+
"@img_height"
]
output_img_width
=
[
model_name
+
"@img_width"
]
node_image_size_split
=
onnx
.
helper
.
make_node
(
"Split"
,
inputs
=
outputs_sqeeze_image_size
,
outputs
=
output_img_height
+
output_img_width
,
axis
=-
1
,
split
=
[
1
,
1
])
node_list
.
append
(
node_image_size_split
)
output_img_width_cast
=
[
model_name
+
"@img_width_cast"
]
node_img_width_cast
=
onnx
.
helper
.
make_node
(
'Cast'
,
inputs
=
output_img_width
,
outputs
=
output_img_width_cast
,
to
=
1
)
node_list
.
append
(
node_img_width_cast
)
output_img_height_cast
=
[
model_name
+
"@img_height_cast"
]
node_img_height_cast
=
onnx
.
helper
.
make_node
(
'Cast'
,
inputs
=
output_img_height
,
outputs
=
output_img_height_cast
,
to
=
1
)
node_list
.
append
(
node_img_height_cast
)
outputs_pred_box_x1_decode
=
[
model_name
+
"@pred_box_x1_decode"
]
outputs_pred_box_y1_decode
=
[
model_name
+
"@pred_box_y1_decode"
]
outputs_pred_box_x2_decode
=
[
model_name
+
"@pred_box_x2_decode"
]
outputs_pred_box_y2_decode
=
[
model_name
+
"@pred_box_y2_decode"
]
node_pred_box_x1_decode
=
onnx
.
helper
.
make_node
(
'Mul'
,
inputs
=
outputs_pred_box_x1
+
output_img_width_cast
,
outputs
=
outputs_pred_box_x1_decode
)
node_list
.
append
(
node_pred_box_x1_decode
)
node_pred_box_y1_decode
=
onnx
.
helper
.
make_node
(
'Mul'
,
inputs
=
outputs_pred_box_y1
+
output_img_height_cast
,
outputs
=
outputs_pred_box_y1_decode
)
node_list
.
append
(
node_pred_box_y1_decode
)
node_pred_box_x2_decode
=
onnx
.
helper
.
make_node
(
'Mul'
,
inputs
=
outputs_pred_box_x2
+
output_img_width_cast
,
outputs
=
outputs_pred_box_x2_decode
)
node_list
.
append
(
node_pred_box_x2_decode
)
node_pred_box_y2_decode
=
onnx
.
helper
.
make_node
(
'Mul'
,
inputs
=
outputs_pred_box_y2
+
output_img_height_cast
,
outputs
=
outputs_pred_box_y2_decode
)
node_list
.
append
(
node_pred_box_y2_decode
)
name_number_one
=
[
model_name
+
"@one"
]
node_number_one
=
onnx
.
helper
.
make_node
(
'Constant'
,
inputs
=
[],
outputs
=
name_number_one
,
value
=
onnx
.
helper
.
make_tensor
(
name
=
name_number_one
[
0
]
+
"@const"
,
data_type
=
onnx
.
TensorProto
.
FLOAT
,
dims
=
(),
vals
=
[
1
]))
node_list
.
append
(
node_number_one
)
output_new_img_height
=
[
model_name
+
"@new_img_height"
]
node_new_img_height
=
onnx
.
helper
.
make_node
(
'Sub'
,
inputs
=
output_img_height_cast
+
name_number_one
,
outputs
=
output_new_img_height
)
node_list
.
append
(
node_new_img_height
)
output_new_img_width
=
[
model_name
+
"@new_img_width"
]
node_new_img_width
=
onnx
.
helper
.
make_node
(
'Sub'
,
inputs
=
output_img_width_cast
+
name_number_one
,
outputs
=
output_new_img_width
)
node_list
.
append
(
node_new_img_width
)
outputs_pred_box_x2_sub_w
=
[
model_name
+
"@pred_box_x2_sub_w"
]
node_pred_box_x2_sub_w
=
onnx
.
helper
.
make_node
(
'Sub'
,
inputs
=
outputs_pred_box_x2_decode
+
output_new_img_width
,
outputs
=
outputs_pred_box_x2_sub_w
)
node_list
.
append
(
node_pred_box_x2_sub_w
)
outputs_pred_box_y2_sub_h
=
[
model_name
+
"@pred_box_y2_sub_h"
]
node_pred_box_y2_sub_h
=
onnx
.
helper
.
make_node
(
'Sub'
,
inputs
=
outputs_pred_box_y2_decode
+
output_new_img_height
,
outputs
=
outputs_pred_box_y2_sub_h
)
node_list
.
append
(
node_pred_box_y2_sub_h
)
outputs_pred_box_x1_clip
=
[
model_name
+
"@pred_box_x1_clip"
]
outputs_pred_box_y1_clip
=
[
model_name
+
"@pred_box_y1_clip"
]
outputs_pred_box_x2_clip
=
[
model_name
+
"@pred_box_x2_clip"
]
outputs_pred_box_y2_clip
=
[
model_name
+
"@pred_box_y2_clip"
]
node_pred_box_x1_clip
=
onnx
.
helper
.
make_node
(
'Clip'
,
inputs
=
outputs_pred_box_x1_decode
,
outputs
=
outputs_pred_box_x1_clip
,
min
=
0.0
,
max
=
float
(
MAX_FLOAT32
))
node_list
.
append
(
node_pred_box_x1_clip
)
node_pred_box_y1_clip
=
onnx
.
helper
.
make_node
(
'Clip'
,
inputs
=
outputs_pred_box_y1_decode
,
outputs
=
outputs_pred_box_y1_clip
,
min
=
0.0
,
max
=
float
(
MAX_FLOAT32
))
node_list
.
append
(
node_pred_box_y1_clip
)
node_pred_box_x2_clip
=
onnx
.
helper
.
make_node
(
'Clip'
,
inputs
=
outputs_pred_box_x2_sub_w
,
outputs
=
outputs_pred_box_x2_clip
,
min
=
0.0
,
max
=
float
(
MAX_FLOAT32
))
node_list
.
append
(
node_pred_box_x2_clip
)
node_pred_box_y2_clip
=
onnx
.
helper
.
make_node
(
'Clip'
,
inputs
=
outputs_pred_box_y2_sub_h
,
outputs
=
outputs_pred_box_y2_clip
,
min
=
0.0
,
max
=
float
(
MAX_FLOAT32
))
node_list
.
append
(
node_pred_box_y2_clip
)
outputs_pred_box_x2_res
=
[
model_name
+
"@box_x2_res"
]
node_pred_box_x2_res
=
onnx
.
helper
.
make_node
(
'Sub'
,
inputs
=
outputs_pred_box_x2_decode
+
outputs_pred_box_x2_clip
,
outputs
=
outputs_pred_box_x2_res
)
node_list
.
append
(
node_pred_box_x2_res
)
outputs_pred_box_y2_res
=
[
model_name
+
"@box_y2_res"
]
node_pred_box_y2_res
=
onnx
.
helper
.
make_node
(
'Sub'
,
inputs
=
outputs_pred_box_y2_decode
+
outputs_pred_box_y2_clip
,
outputs
=
outputs_pred_box_y2_res
)
node_list
.
append
(
node_pred_box_y2_res
)
node_pred_box_result
=
onnx
.
helper
.
make_node
(
'Concat'
,
inputs
=
outputs_pred_box_x1_clip
+
outputs_pred_box_y1_clip
+
outputs_pred_box_x2_res
+
outputs_pred_box_y2_res
,
outputs
=
outputs
[
'Boxes'
],
axis
=-
1
)
node_list
.
append
(
node_pred_box_result
)
score_shape
=
[
1
,
input_height
*
input_width
*
int
(
num_anchors
),
class_num
]
name_score_shape
=
[
model_name
+
"@score_shape"
]
node_score_shape
=
onnx
.
helper
.
make_node
(
"Constant"
,
inputs
=
[],
outputs
=
name_score_shape
,
value
=
onnx
.
helper
.
make_tensor
(
name
=
name_score_shape
[
0
]
+
"@const"
,
data_type
=
onnx
.
TensorProto
.
INT64
,
dims
=
[
len
(
score_shape
)],
vals
=
score_shape
))
node_list
.
append
(
node_score_shape
)
node_score_new_shape
=
onnx
.
helper
.
make_node
(
'Reshape'
,
inputs
=
outputs_score
+
name_score_shape
,
outputs
=
outputs
[
'Scores'
])
node_list
.
append
(
node_score_new_shape
)
return
node_list
x2paddle/op_mapper/paddle2onnx/paddle_op_mapper.py
已删除
100644 → 0
浏览文件 @
56697812
# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import
math
import
sys
import
x2paddle
import
os
import
numpy
as
np
import
paddle.fluid.core
as
core
import
paddle.fluid
as
fluid
import
onnx
from
onnx
import
helper
,
onnx_pb
from
x2paddle.op_mapper.paddle2onnx.opset9.opset
import
OpSet9
from
x2paddle.op_mapper.paddle2onnx.opset10.opset
import
OpSet10
from
x2paddle.op_mapper.paddle2onnx.opset11.opset
import
OpSet11
class
PaddleOpMapper
(
object
):
def
__init__
(
self
):
self
.
support_opsets
=
[
9
,
10
,
11
]
self
.
default_opset
=
10
self
.
name_counter
=
dict
()
self
.
op_set
=
None
def
convert
(
self
,
program
,
save_dir
,
scope
=
None
,
opset_version
=
10
):
self
.
op_set
=
self
.
create_opset
(
opset_version
)
weight_nodes
=
self
.
op_set
.
convert_weights
(
program
,
scope
=
scope
)
op_nodes
=
list
()
input_nodes
=
list
()
output_nodes
=
list
()
unsupported_ops
=
set
()
print
(
"Translating PaddlePaddle to ONNX...
\n
"
)
for
block
in
program
.
blocks
:
for
i
,
op
in
enumerate
(
block
.
ops
):
sys
.
stdout
.
write
(
"
\r
Total:{}, Current:{} : {} "
.
format
(
len
(
block
.
ops
),
i
+
1
,
op
.
type
))
sys
.
stdout
.
flush
()
if
not
hasattr
(
self
.
op_set
,
op
.
type
):
unsupported_ops
.
add
(
op
.
type
)
continue
if
len
(
unsupported_ops
)
>
0
:
continue
node
=
getattr
(
self
.
op_set
,
op
.
type
)(
op
,
block
)
if
op
.
type
==
'feed'
:
print
(
node
.
name
)
input_nodes
.
append
(
node
)
elif
op
.
type
==
'fetch'
:
output_nodes
.
append
(
node
)
else
:
if
isinstance
(
node
,
list
):
op_nodes
=
op_nodes
+
node
else
:
op_nodes
.
append
(
node
)
if
len
(
unsupported_ops
)
>
0
:
print
(
"
\n
There's {} ops are not supported yet"
.
format
(
len
(
unsupported_ops
)))
for
op
in
unsupported_ops
:
print
(
"=========== {} ==========="
.
format
(
op
))
return
graph
=
helper
.
make_graph
(
nodes
=
weight_nodes
+
op_nodes
,
name
=
'onnx_model_from_paddle'
,
initializer
=
[],
inputs
=
input_nodes
,
outputs
=
output_nodes
)
opset_imports
=
[
helper
.
make_opsetid
(
""
,
opset_version
)]
model
=
helper
.
make_model
(
graph
,
producer_name
=
'X2Paddle'
,
opset_imports
=
opset_imports
)
onnx
.
checker
.
check_model
(
model
)
if
not
os
.
path
.
isdir
(
save_dir
):
os
.
makedirs
(
save_dir
)
with
open
(
os
.
path
.
join
(
save_dir
,
'x2paddle_model.onnx'
),
'wb'
)
as
f
:
f
.
write
(
model
.
SerializeToString
())
print
(
"
\n
Translated model saved in {}"
.
format
(
os
.
path
.
join
(
save_dir
,
'x2paddle_model.onnx'
)))
def
create_opset
(
self
,
opset_version
=
10
):
run_opset
=
self
.
default_opset
opset
=
''
if
opset_version
in
self
.
support_opsets
:
run_opset
=
opset_version
else
:
for
support_opset_version
in
self
.
support_opsets
:
if
support_opset_version
<
opset_version
:
run_opset
=
support_opset_version
else
:
break
print
(
'Now, onnx2paddle support convert onnx model opset_verison {},'
'opset_verison of your onnx model is {}, automatically treated as op_set: {}.'
.
format
(
self
.
support_opsets
,
opset_version
,
run_opset
))
opset
=
'OpSet'
+
str
(
run_opset
)
return
eval
(
opset
)()
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录