Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
PaddlePaddle
X2Paddle
提交
22c18dd4
X
X2Paddle
项目概览
PaddlePaddle
/
X2Paddle
大约 2 年 前同步成功
通知
329
Star
698
Fork
167
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
26
列表
看板
标记
里程碑
合并请求
4
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
X
X2Paddle
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
26
Issue
26
列表
看板
标记
里程碑
合并请求
4
合并请求
4
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
未验证
提交
22c18dd4
编写于
7月 17, 2020
作者:
J
Jason
提交者:
GitHub
7月 17, 2020
浏览文件
操作
浏览文件
下载
差异文件
Merge pull request #309 from Channingss/paddle_onnx
Paddle2ONNX support export ONNX opset9,10,11
上级
b7ba7dc7
00674c14
变更
29
显示空白变更内容
内联
并排
Showing
29 changed file
with
2476 addition
and
215 deletion
+2476
-215
README.md
README.md
+1
-0
x2paddle/convert.py
x2paddle/convert.py
+11
-5
x2paddle/op_mapper/onnx2paddle/__init__.py
x2paddle/op_mapper/onnx2paddle/__init__.py
+0
-0
x2paddle/op_mapper/onnx2paddle/onnx_op_mapper.py
x2paddle/op_mapper/onnx2paddle/onnx_op_mapper.py
+1
-1
x2paddle/op_mapper/onnx2paddle/opsets/__init__.py
x2paddle/op_mapper/onnx2paddle/opsets/__init__.py
+0
-0
x2paddle/op_mapper/onnx2paddle/opsets/_shape_inference.py
x2paddle/op_mapper/onnx2paddle/opsets/_shape_inference.py
+599
-0
x2paddle/op_mapper/onnx2paddle/opsets/custom_layer/__init__.py
...dle/op_mapper/onnx2paddle/opsets/custom_layer/__init__.py
+0
-4
x2paddle/op_mapper/onnx2paddle/opsets/custom_layer/register.py
...dle/op_mapper/onnx2paddle/opsets/custom_layer/register.py
+0
-0
x2paddle/op_mapper/onnx2paddle/opsets/opset9.py
x2paddle/op_mapper/onnx2paddle/opsets/opset9.py
+0
-0
x2paddle/op_mapper/paddle2onnx/__init__.py
x2paddle/op_mapper/paddle2onnx/__init__.py
+0
-0
x2paddle/op_mapper/paddle2onnx/opset10/__init__.py
x2paddle/op_mapper/paddle2onnx/opset10/__init__.py
+0
-0
x2paddle/op_mapper/paddle2onnx/opset10/opset.py
x2paddle/op_mapper/paddle2onnx/opset10/opset.py
+61
-0
x2paddle/op_mapper/paddle2onnx/opset10/paddle_custom_layer/__init__.py
...apper/paddle2onnx/opset10/paddle_custom_layer/__init__.py
+0
-0
x2paddle/op_mapper/paddle2onnx/opset10/paddle_custom_layer/im2sequence.py
...er/paddle2onnx/opset10/paddle_custom_layer/im2sequence.py
+8
-0
x2paddle/op_mapper/paddle2onnx/opset10/paddle_custom_layer/multiclass_nms.py
...paddle2onnx/opset10/paddle_custom_layer/multiclass_nms.py
+32
-0
x2paddle/op_mapper/paddle2onnx/opset10/paddle_custom_layer/yolo_box.py
...apper/paddle2onnx/opset10/paddle_custom_layer/yolo_box.py
+8
-0
x2paddle/op_mapper/paddle2onnx/opset11/__init__.py
x2paddle/op_mapper/paddle2onnx/opset11/__init__.py
+0
-0
x2paddle/op_mapper/paddle2onnx/opset11/opset.py
x2paddle/op_mapper/paddle2onnx/opset11/opset.py
+249
-0
x2paddle/op_mapper/paddle2onnx/opset11/paddle_custom_layer/__init__.py
...apper/paddle2onnx/opset11/paddle_custom_layer/__init__.py
+0
-0
x2paddle/op_mapper/paddle2onnx/opset11/paddle_custom_layer/im2sequence.py
...er/paddle2onnx/opset11/paddle_custom_layer/im2sequence.py
+8
-0
x2paddle/op_mapper/paddle2onnx/opset11/paddle_custom_layer/multiclass_nms.py
...paddle2onnx/opset11/paddle_custom_layer/multiclass_nms.py
+29
-5
x2paddle/op_mapper/paddle2onnx/opset11/paddle_custom_layer/yolo_box.py
...apper/paddle2onnx/opset11/paddle_custom_layer/yolo_box.py
+0
-0
x2paddle/op_mapper/paddle2onnx/opset9/__init__.py
x2paddle/op_mapper/paddle2onnx/opset9/__init__.py
+0
-0
x2paddle/op_mapper/paddle2onnx/opset9/opset.py
x2paddle/op_mapper/paddle2onnx/opset9/opset.py
+92
-200
x2paddle/op_mapper/paddle2onnx/opset9/paddle_custom_layer/__init__.py
...mapper/paddle2onnx/opset9/paddle_custom_layer/__init__.py
+0
-0
x2paddle/op_mapper/paddle2onnx/opset9/paddle_custom_layer/im2sequence.py
...per/paddle2onnx/opset9/paddle_custom_layer/im2sequence.py
+0
-0
x2paddle/op_mapper/paddle2onnx/opset9/paddle_custom_layer/multiclass_nms.py
.../paddle2onnx/opset9/paddle_custom_layer/multiclass_nms.py
+447
-0
x2paddle/op_mapper/paddle2onnx/opset9/paddle_custom_layer/yolo_box.py
...mapper/paddle2onnx/opset9/paddle_custom_layer/yolo_box.py
+822
-0
x2paddle/op_mapper/paddle2onnx/paddle_op_mapper.py
x2paddle/op_mapper/paddle2onnx/paddle_op_mapper.py
+108
-0
未找到文件。
README.md
浏览文件 @
22c18dd4
...
...
@@ -61,6 +61,7 @@ x2paddle --framework=paddle2onnx --model=paddle_infer_model_dir --save_dir=onnx_
|--without_data_format_optimization |
**[可选]**
For TensorFlow, 当指定该参数时,关闭NHWC->NCHW的优化,见
[
文档Q2
](
FAQ.md
)
|
|--define_input_shape |
**[可选]**
For TensorFlow, 当指定该参数时,强制用户输入每个Placeholder的shape,见
[
文档Q2
](
FAQ.md
)
|
|--params_merge |
**[可选]**
当指定该参数时,转换完成后,inference_model中的所有模型参数将合并保存为一个文件__params__ |
|--onnx_opset |
**[可选]**
当framework为paddle2onnx时,该参数可设置转换为ONNX的OpSet版本,目前支持9、10、11,默认为10 |
...
...
x2paddle/convert.py
浏览文件 @
22c18dd4
...
...
@@ -75,6 +75,12 @@ def arg_parser():
action
=
"store_true"
,
default
=
False
,
help
=
"define input shape for tf model"
)
parser
.
add_argument
(
"--onnx_opset"
,
"-oo"
,
type
=
int
,
default
=
10
,
help
=
"when paddle2onnx set onnx opset version to export"
)
parser
.
add_argument
(
"--params_merge"
,
"-pm"
,
...
...
@@ -172,7 +178,7 @@ def onnx2paddle(model_path, save_dir, params_merge=False):
return
print
(
"Now translating model from onnx to paddle."
)
from
x2paddle.op_mapper.onnx_op_mapper
import
ONNXOpMapper
from
x2paddle.op_mapper.onnx
2paddle.onnx
_op_mapper
import
ONNXOpMapper
from
x2paddle.decoder.onnx_decoder
import
ONNXDecoder
from
x2paddle.optimizer.onnx_optimizer
import
ONNXOptimizer
model
=
ONNXDecoder
(
model_path
)
...
...
@@ -186,12 +192,12 @@ def onnx2paddle(model_path, save_dir, params_merge=False):
print
(
"Paddle model and code generated."
)
def
paddle2onnx
(
model_path
,
save_dir
):
def
paddle2onnx
(
model_path
,
save_dir
,
opset_number
):
from
x2paddle.decoder.paddle_decoder
import
PaddleDecoder
from
x2paddle.op_mapper.paddle_op_mapper
import
PaddleOpMapper
from
x2paddle.op_mapper.paddle
2onnx.paddle
_op_mapper
import
PaddleOpMapper
model
=
PaddleDecoder
(
model_path
,
'__model__'
,
'__params__'
)
mapper
=
PaddleOpMapper
()
mapper
.
convert
(
model
.
program
,
save_dir
)
mapper
.
convert
(
model
.
program
,
save_dir
,
opset_number
=
opset_number
)
def
main
():
...
...
@@ -258,7 +264,7 @@ def main():
elif
args
.
framework
==
"paddle2onnx"
:
assert
args
.
model
is
not
None
,
"--model should be defined while translating paddle model to onnx"
paddle2onnx
(
args
.
model
,
args
.
save_dir
)
paddle2onnx
(
args
.
model
,
args
.
save_dir
,
args
.
onnx_opset
)
else
:
raise
Exception
(
...
...
x2paddle/op_mapper/onnx
_opsets
/__init__.py
→
x2paddle/op_mapper/onnx
2paddle
/__init__.py
浏览文件 @
22c18dd4
文件已移动
x2paddle/op_mapper/onnx_op_mapper.py
→
x2paddle/op_mapper/onnx
2paddle/onnx
_op_mapper.py
浏览文件 @
22c18dd4
...
...
@@ -12,7 +12,7 @@
# See the License for the specific language governing permissions and
# limitations under the License.
from
x2paddle.op_mapper.onnx
_
opsets.opset9
import
OpSet9
from
x2paddle.op_mapper.onnx
2paddle.
opsets.opset9
import
OpSet9
from
x2paddle.core.op_mapper
import
OpMapper
from
x2paddle.op_mapper.onnx_opsets.custom_layer
import
*
from
x2paddle.decoder.onnx_decoder
import
ONNXGraph
,
ONNXGraphNode
,
ONNXGraphDataNode
...
...
x2paddle/op_mapper/
paddle_custom_layer
/__init__.py
→
x2paddle/op_mapper/
onnx2paddle/opsets
/__init__.py
浏览文件 @
22c18dd4
文件已移动
x2paddle/op_mapper/onnx2paddle/opsets/_shape_inference.py
0 → 100644
浏览文件 @
22c18dd4
# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from
x2paddle.decoder.onnx_decoder
import
ONNXGraph
,
ONNXGraphNode
,
ONNXGraphDataNode
import
numpy
as
np
import
sympy
def
handle_negative_axis
(
axis
,
rank
):
return
axis
if
axis
>=
0
else
axis
+
rank
class
ShapeInference
():
def
__init__
(
self
,
decoder
,
auto_merge
=
False
):
self
.
decoder
=
decoder
self
.
fluid_data
=
{}
self
.
suggested_merge_
=
{}
self
.
symbolic_dims_
=
{}
self
.
auto_merge_
=
auto_merge
self
.
dispatcher
=
{
# activation ops
'Relu'
:
self
.
activation_ops
,
'LeakyRelu'
:
self
.
activation_ops
,
'Elu'
:
self
.
activation_ops
,
'ThresholdRelu'
:
self
.
activation_ops
,
'Prelu'
:
self
.
activation_ops
,
'Tanh'
:
self
.
activation_ops
,
'Sigmoid'
:
self
.
activation_ops
,
'Softplus'
:
self
.
activation_ops
,
'Softsign'
:
self
.
activation_ops
,
'HardSigmoid'
:
self
.
activation_ops
,
'Shrink'
:
self
.
activation_ops
,
'Exp'
:
self
.
activation_ops
,
'Clip'
:
self
.
activation_ops
,
# elementwise ops
'Add'
:
self
.
elementwise_ops
,
'Div'
:
self
.
elementwise_ops
,
'Sub'
:
self
.
elementwise_ops
,
'Mul'
:
self
.
elementwise_ops
,
'Pow'
:
self
.
elementwise_ops
,
'Sqrt'
:
self
.
elementwise_ops
,
'Softmax'
:
self
.
elementwise_ops
,
'Constant'
:
self
.
constant
,
'AveragePool'
:
self
.
pool
,
'MaxPool'
:
self
.
pool
,
'Cast'
:
self
.
cast
,
'Conv'
:
self
.
conv
,
'BatchNormalization'
:
self
.
batch_norm
,
'Pad'
:
self
.
pad
,
'Gather'
:
self
.
gather
,
'Split'
:
self
.
split
,
'Transpose'
:
self
.
transpose
,
'Reshape'
:
self
.
reshape
,
'MatMul'
:
self
.
matmul
,
'Squeeze'
:
self
.
squeeze
,
'Unsqueeze'
:
self
.
unsqueeze
,
'Concat'
:
self
.
concat
,
}
self
.
run_
=
True
self
.
suggested_merge_
=
{}
self
.
symbolic_dims_
=
{}
self
.
input_symbols_
=
{}
def
__call__
(
self
):
"""
run shape inference
"""
nodes
=
self
.
decoder
.
model
.
graph
.
node
node_map
=
self
.
decoder
.
onnx_graph
.
node_map
value_infos
=
self
.
decoder
.
onnx_graph
.
value_infos
onnx_model
=
self
.
decoder
.
model
#self._apply_suggested_merge(graph_input_only=True)
for
layer
in
nodes
:
node
=
node_map
[
layer
.
name
]
for
opt
in
layer
.
output
:
if
opt
in
value_infos
:
value_info
=
value_infos
[
opt
]
#if len(value_info['shape']) == 0 or value_info[
# 'dtype'] is None or 0 in value_info['shape']:
# #TODO add node shape inference
# if self.is_support_inference(node):
# op_infer = self.dispatcher[node.layer_type]
# #shapes = op_infer(node)
# print(node.layer_name + ': ')
# print(node.layer_type + ': ')
#else:
# print(node.layer_name)
node
.
dtype
=
value_info
[
'dtype'
]
node
.
out_shapes
.
append
(
value_info
[
'shape'
])
else
:
#TODO add node shape inference
if
self
.
is_support_inference
(
node
):
op_infer
=
self
.
dispatcher
[
node
.
layer_type
]
#shapes = op_infer(node)
#print(node.layer_name + ': ')
#print(node.layer_type + ': ')
def
get_input_node
(
self
,
node
,
idx
,
copy
=
False
):
return
self
.
decoder
.
onnx_graph
.
get_input_node
(
node
,
idx
=
idx
,
copy
=
copy
)
def
get_fluid_data
(
self
,
node
,
return_ndarray
=
False
):
data
=
None
if
node
.
layer_name
in
self
.
fluid_data
:
data
=
self
.
fluid_data
[
node
.
layer_name
]
elif
isinstance
(
node
,
ONNXGraphDataNode
):
data
=
node
.
weight
elif
isinstance
(
node
,
ONNXGraphNode
):
data
=
node
.
value
if
return_ndarray
:
return
data
else
:
return
data
.
tolist
()
def
is_support_inference
(
self
,
node
):
if
node
.
layer_type
not
in
self
.
dispatcher
:
print
(
"[WARNNING] Shape inference not support Node[{}](op type: {}) "
.
format
(
node
.
layer_name
,
node
.
layer_type
))
return
False
return
True
def
_try_get_value
(
self
,
node
,
idx
):
if
idx
>=
len
(
node
.
inputs
):
return
None
return
self
.
get_input_node
(
node
,
idx
=
idx
,
return_ndarray
=
True
)
def
_get_int_values
(
self
,
node
,
broadcast
=
False
):
values
=
[
self
.
_try_get_value
(
node
,
i
)
for
i
in
range
(
len
(
node
.
input
))]
if
all
([
v
is
not
None
for
v
in
values
]):
# some shape compute is in floating point, cast to int for sympy
for
i
,
v
in
enumerate
(
values
):
if
type
(
v
)
!=
np
.
ndarray
:
continue
if
len
(
v
.
shape
)
>
1
:
new_v
=
None
# ignore value for rank > 1
elif
len
(
v
.
shape
)
==
0
:
new_v
=
int
(
np
.
asscalar
(
v
))
else
:
assert
len
(
v
.
shape
)
==
1
new_v
=
[
int
(
vv
)
for
vv
in
v
]
values
[
i
]
=
new_v
values_len
=
[
len
(
v
)
if
type
(
v
)
==
list
else
0
for
v
in
values
]
max_len
=
max
(
values_len
)
if
max_len
>=
1
and
broadcast
:
# broadcast
for
i
,
v
in
enumerate
(
values
):
if
v
is
None
:
continue
# don't broadcast if value is unknown
if
type
(
v
)
==
list
:
if
len
(
v
)
<
max_len
:
values
[
i
]
=
v
*
max_len
else
:
assert
len
(
v
)
==
max_len
else
:
values
[
i
]
=
[
v
]
*
max_len
return
values
def
_compute_on_sympy_data
(
self
,
node
,
op_func
):
assert
len
(
node
.
outputs
)
==
1
values
=
self
.
_get_int_values
(
node
,
broadcast
=
True
)
if
all
([
v
is
not
None
for
v
in
values
]):
is_list
=
[
type
(
v
)
==
list
for
v
in
values
]
as_list
=
any
(
is_list
)
if
as_list
:
data
=
[
op_func
(
vs
)
for
vs
in
zip
(
*
values
)]
self
.
fluid_data
[
node
.
layer_name
]
=
data
node
.
out_shapes
.
append
(
data
.
shape
)
print
(
'*'
*
10
,
data
)
else
:
data
=
op_func
(
values
)
self
.
fluid_data
[
node
.
layer_name
]
=
data
print
(
'*'
*
10
,
data
)
node
.
out_shapes
.
append
(
data
.
shape
)
def
_pass_on_sympy_data
(
self
,
node
):
assert
len
(
node
.
inputs
)
==
1
or
node
.
layer_type
==
'Reshape'
self
.
_compute_on_sympy_data
(
node
,
lambda
x
:
x
[
0
])
def
_get_sympy_shape
(
self
,
node
,
idx
):
sympy_shape
=
[]
for
d
in
self
.
_get_shape
(
node
,
idx
):
if
type
(
d
)
==
str
:
sympy_shape
.
append
(
self
.
symbolic_dims_
[
d
]
if
d
in
self
.
symbolic_dims_
else
sympy
.
Symbol
(
d
,
integer
=
True
))
else
:
assert
None
!=
d
sympy_shape
.
append
(
d
)
return
sympy_shape
def
_check_merged_dims
(
self
,
dims
,
allow_broadcast
=
True
):
if
allow_broadcast
:
dims
=
[
d
for
d
in
dims
if
not
(
is_literal
(
d
)
and
int
(
d
)
<=
1
)]
if
not
all
([
d
==
dims
[
0
]
for
d
in
dims
]):
self
.
_add_suggested_merge
(
dims
,
apply
=
True
)
def
check_specific_shape
(
self
,
input_node
,
output_node
,
shape
):
if
-
1
in
input_node
.
out_shapes
[
0
]:
assert
"Shape inference failed, when calculate output_node[{}]'s
\
shape need specific shape, but got input_node[{}]'s shape: {}"
.
format
(
output_node
.
layer_name
,
input_node
.
layer_name
,
input_node
.
out_shapes
[
0
])
def
_add_suggested_merge
(
self
,
symbols
,
apply
=
False
):
assert
all
([(
type
(
s
)
==
str
and
s
in
self
.
symbolic_dims_
)
or
is_literal
(
s
)
for
s
in
symbols
])
symbols
=
set
(
symbols
)
for
k
,
v
in
self
.
suggested_merge_
.
items
():
if
k
in
symbols
:
symbols
.
remove
(
k
)
symbols
.
add
(
v
)
map_to
=
None
# if there is literal, map to it first
for
s
in
symbols
:
if
is_literal
(
s
):
map_to
=
s
break
# when no literals, map to input symbolic dims, then existing symbolic dims
if
map_to
is
None
:
for
s
in
symbols
:
if
s
in
self
.
input_symbols_
:
map_to
=
s
break
if
map_to
is
None
:
for
s
in
symbols
:
if
type
(
self
.
symbolic_dims_
[
s
])
==
sympy
.
Symbol
:
map_to
=
s
break
# when nothing to map to, use the shorter one
if
map_to
is
None
:
if
self
.
verbose_
>
0
:
print
(
'Potential unsafe merge between symbolic expressions: ({})'
.
format
(
','
.
join
(
symbols
)))
symbols_list
=
list
(
symbols
)
lens
=
[
len
(
s
)
for
s
in
symbols_list
]
map_to
=
symbols_list
[
lens
.
index
(
min
(
lens
))]
symbols
.
remove
(
map_to
)
def
_merge_symbols
(
self
,
dims
):
if
not
all
([
type
(
d
)
==
str
for
d
in
dims
]):
if
self
.
auto_merge_
:
assert
len
(
dims
)
==
2
# only allow symbol->int merge in binary ops for now
is_int
=
[
is_literal
(
d
)
for
d
in
dims
]
if
sum
(
is_int
)
==
1
:
int_dim
=
is_int
.
index
(
1
)
if
self
.
verbose_
>
0
:
print
(
'dim {} has been merged with value {}'
.
format
(
dims
[
1
-
int_dim
],
dims
[
int_dim
]))
self
.
_check_merged_dims
(
dims
,
allow_broadcast
=
False
)
return
dims
[
int_dim
]
else
:
if
self
.
verbose_
>
0
:
print
(
'dim {} has been mergd with dim {}'
.
format
(
dims
[
0
],
dims
[
1
]))
return
dims
[
0
]
else
:
return
None
if
all
([
d
==
dims
[
0
]
for
d
in
dims
]):
return
dims
[
0
]
merged
=
[
self
.
suggested_merge_
[
d
]
if
d
in
self
.
suggested_merge_
else
d
for
d
in
dims
]
if
all
([
d
==
merged
[
0
]
for
d
in
merged
]):
assert
merged
[
0
]
in
self
.
symbolic_dims_
return
merged
[
0
]
else
:
return
None
# broadcast from right to left, and merge symbolic dims if needed
def
_broadcast_shapes
(
self
,
shape1
,
shape2
):
new_shape
=
[]
rank1
=
len
(
shape1
)
rank2
=
len
(
shape2
)
new_rank
=
max
(
rank1
,
rank2
)
for
i
in
range
(
new_rank
):
dim1
=
shape1
[
rank1
-
1
-
i
]
if
i
<
rank1
else
1
dim2
=
shape2
[
rank2
-
1
-
i
]
if
i
<
rank2
else
1
if
dim1
==
1
or
dim1
==
dim2
:
new_dim
=
dim2
elif
dim2
==
1
:
new_dim
=
dim1
else
:
new_dim
=
self
.
_merge_symbols
([
dim1
,
dim2
])
if
not
new_dim
:
# warning about unsupported broadcast when not auto merge
# note that auto merge has the risk of incorrectly merge symbols while one of them being 1
# for example, 'a' = 1, 'b' = 5 at runtime is valid broadcasting, but with auto merge 'a' == 'b'
if
self
.
auto_merge_
:
self
.
_add_suggested_merge
([
dim1
,
dim2
],
apply
=
True
)
else
:
print
(
'unsupported broadcast between '
+
str
(
dim1
)
+
' '
+
str
(
dim2
))
new_shape
=
[
new_dim
]
+
new_shape
return
new_shape
def
_apply_suggested_merge
(
self
,
graph_input_only
=
False
):
if
not
self
.
suggested_merge_
:
return
for
i
in
list
(
self
.
decoder
.
model
.
graph
.
input
)
+
(
[]
if
graph_input_only
else
list
(
self
.
decoder
.
model
.
graph
.
value_info
)):
for
d
in
i
.
type
.
tensor_type
.
shape
.
dim
:
if
d
.
dim_param
in
self
.
suggested_merge_
:
v
=
self
.
suggested_merge_
[
d
.
dim_param
]
if
is_literal
(
v
):
d
.
dim_value
=
int
(
v
)
else
:
d
.
dim_param
=
v
def
_add_suggested_merge
(
self
,
symbols
,
apply
=
False
):
assert
all
([(
type
(
s
)
==
str
and
s
in
self
.
symbolic_dims_
)
or
is_literal
(
s
)
for
s
in
symbols
])
symbols
=
set
(
symbols
)
for
k
,
v
in
self
.
suggested_merge_
.
items
():
if
k
in
symbols
:
symbols
.
remove
(
k
)
symbols
.
add
(
v
)
map_to
=
None
# if there is literal, map to it first
for
s
in
symbols
:
if
is_literal
(
s
):
map_to
=
s
break
# when no literals, map to input symbolic dims, then existing symbolic dims
if
map_to
is
None
:
for
s
in
symbols
:
if
s
in
self
.
input_symbols_
:
map_to
=
s
break
if
map_to
is
None
:
for
s
in
symbols
:
if
type
(
self
.
symbolic_dims_
[
s
])
==
sympy
.
Symbol
:
map_to
=
s
break
# when nothing to map to, use the shorter one
if
map_to
is
None
:
if
self
.
verbose_
>
0
:
print
(
'Potential unsafe merge between symbolic expressions: ({})'
.
format
(
','
.
join
(
symbols
)))
symbols_list
=
list
(
symbols
)
lens
=
[
len
(
s
)
for
s
in
symbols_list
]
map_to
=
symbols_list
[
lens
.
index
(
min
(
lens
))]
symbols
.
remove
(
map_to
)
for
s
in
symbols
:
if
s
==
map_to
:
continue
if
is_literal
(
map_to
)
and
is_literal
(
s
):
assert
int
(
map_to
)
==
int
(
s
)
self
.
suggested_merge_
[
s
]
=
int
(
map_to
)
if
is_literal
(
map_to
)
else
map_to
for
k
,
v
in
self
.
suggested_merge_
.
items
():
if
v
==
s
:
self
.
suggested_merge_
[
k
]
=
map_to
if
apply
and
self
.
auto_merge_
:
self
.
_apply_suggested_merge
()
def
pool_conv_ops
(
self
,
node
):
fluid_shape
=
self
.
get_input_node
(
node
,
idx
=
0
).
out_shapes
[
0
]
if
len
(
node
.
inputs
)
>
1
:
W_shape
=
self
.
get_input_node
(
node
,
idx
=
1
).
out_shapes
[
0
]
rank
=
len
(
W_shape
)
-
2
# number of spatial axes
kernel_shape
=
W_shape
[
-
rank
:]
sympy_shape
[
1
]
=
W_shape
[
0
]
else
:
W_shape
=
None
kernel_shape
=
node
.
get_attr
(
'kernel_shape'
)
rank
=
len
(
kernel_shape
)
dilations
=
node
.
get_attr
(
'dilations'
,
[
1
]
*
rank
)
strides
=
node
.
get_attr
(
'strides'
,
[
1
]
*
rank
)
pads
=
node
.
get_attr
(
'pads'
)
effective_kernel_shape
=
[(
k
-
1
)
*
d
+
1
for
k
,
d
in
zip
(
kernel_shape
,
dilations
)]
if
pads
is
None
:
pads
=
[
0
]
*
(
2
*
rank
)
auto_pad
=
node
.
get_attr
(
'auto_pad'
,
b
'NOTSET'
).
decode
(
'utf-8'
)
if
auto_pad
!=
'VALID'
and
auto_pad
!=
'NOTSET'
:
try
:
residual
=
[
sympy
.
Mod
(
d
,
s
)
for
d
,
s
in
zip
(
fluid_shape
[
-
rank
:],
strides
)
]
total_pads
=
[
max
(
0
,
(
k
-
s
)
if
r
==
0
else
(
k
-
r
))
for
k
,
s
,
r
in
zip
(
effective_kernel_shape
,
strides
,
residual
)
]
except
TypeError
:
# sympy may throw TypeError: cannot determine truth value of Relational
total_pads
=
[
max
(
0
,
(
k
-
s
))
for
k
,
s
in
zip
(
effective_kernel_shape
,
strides
)
]
# assuming no residual if sympy throws error
elif
auto_pad
==
'VALID'
:
total_pads
=
[]
else
:
total_pads
=
[
0
]
*
rank
else
:
assert
len
(
pads
)
==
2
*
rank
total_pads
=
[
p1
+
p2
for
p1
,
p2
in
zip
(
pads
[:
rank
],
pads
[
rank
:])]
ceil_mode
=
node
.
get_attr
(
'ceil_mode'
,
0
)
for
i
in
range
(
rank
):
effective_input_size
=
fluid_shape
[
-
rank
+
i
]
if
len
(
total_pads
)
>
0
:
effective_input_size
=
effective_input_size
+
total_pads
[
i
]
if
ceil_mode
:
strided_kernel_positions
=
sympy
.
ceiling
(
(
effective_input_size
-
effective_kernel_shape
[
i
])
/
strides
[
i
])
else
:
strided_kernel_positions
=
(
effective_input_size
-
effective_kernel_shape
[
i
]
)
//
strides
[
i
]
fluid_shape
[
-
rank
+
i
]
=
strided_kernel_positions
+
1
node
.
out_shapes
.
append
(
fluid_shape
)
return
fluid_shape
def
cast
(
self
,
node
):
fluid_shape
=
self
.
get_input_node
(
node
,
idx
=
0
).
out_shape
[
0
]
node
.
out_shapes
.
append
(
fluid_shape
)
return
fluid_shape
def
pool
(
self
,
node
):
return
self
.
conv_pool_ops
(
node
)
def
conv
(
self
,
node
):
return
self
.
conv_pool_ops
(
node
)
def
batch_norm
(
self
,
node
):
fluid_shape
=
self
.
get_input_node
(
node
,
idx
=
0
).
out_shapes
[
0
]
node
.
out_shapes
.
append
(
fluid_shape
)
return
fluid_shape
def
activation_ops
(
self
,
node
):
fluid_shape
=
self
.
get_input_node
(
node
,
idx
=
0
).
out_shapes
[
0
]
node
.
out_shapes
.
append
(
fluid_shape
)
return
fluid_shape
def
elementwise_ops
(
self
,
node
):
fluid_shape
=
self
.
get_input_node
(
node
,
idx
=
0
).
out_shapes
[
0
]
node
.
out_shapes
.
append
(
fluid_shape
)
return
fluid_shape
def
pad
(
self
,
node
):
fluid_shape
=
self
.
get_input_node
(
node
,
idx
=
0
).
out_shapes
[
0
]
# op_set <= 10
pads
=
node
.
get_attr
(
'pads'
)
rank
=
len
(
fluid_shape
)
fluid_shape
=
[
d
+
pad_up
+
pad_down
for
d
,
pad_up
,
pad_down
in
zip
(
fluid_shape
,
pads
[:
rank
],
pads
[
rank
:])
]
node
.
out_shapes
.
append
(
fluid_shape
)
return
fluid_shape
def
gather
(
self
,
node
):
fluid_shape
=
self
.
get_input_node
(
node
,
idx
=
0
).
out_shapes
[
0
]
axis
=
handle_negative_axis
(
node
.
get_attr
(
'axis'
,
0
),
len
(
fluid_shape
))
indices_shape
=
self
.
get_input_node
(
node
,
idx
=
1
).
out_shapes
[
0
]
fluid_shape
=
fluid_shape
[:
axis
]
+
list
(
indices_shape
)
+
fluid_shape
[
axis
+
1
:]
input
=
self
.
get_input_node
(
node
,
0
)
if
input
.
layer_name
in
self
.
fluid_data
:
assert
0
==
axis
# only handle 1D sympy compute
idx
=
self
.
get_fluid_date
(
indices_shape
)
data
=
self
.
fluid_data
[
input
.
layer_name
]
if
type
(
data
)
==
list
:
if
type
(
idx
)
==
np
.
ndarray
and
len
(
idx
.
shape
)
==
1
:
self
.
fluid_data
[
node
.
layer_name
]
=
[
data
[
int
(
i
)]
for
i
in
idx
]
else
:
self
.
fluid_data
[
node
.
layer_name
]
=
data
[
int
(
idx
)]
else
:
assert
idx
==
0
self
.
fluid_data
[
node
.
layer_name
]
=
data
node
.
out_shapes
.
append
(
fluid_shape
)
return
fluid_shape
def
constant
(
self
,
node
):
if
isinstance
(
node
,
ONNXGraphNode
):
fluid_shape
=
node
.
value
.
shape
else
:
fluid_shape
=
node
.
weight
.
shape
node
.
out_shapes
.
append
(
fluid_shape
)
return
fluid_shape
def
split
(
self
,
node
):
fluid_shape
=
self
.
get_input_node
(
node
,
idx
=
0
).
out_shapes
[
0
]
axis
=
handle_negative_axis
(
node
.
get_attr
(
'axis'
,
0
),
len
(
fluid_shape
))
split
=
node
.
get_attr
(
'split'
)
if
not
split
:
num_outputs
=
len
(
node
.
outputs
)
split
=
[
fluid_shape
[
axis
]
/
sympy
.
Integer
(
num_outputs
)]
*
num_outputs
else
:
split
=
[
sympy
.
Integer
(
s
)
for
s
in
split
]
shapes
=
[]
for
i_o
in
range
(
len
(
split
)):
shape
=
fluid_shape
[:
axis
]
+
[
split
[
i_o
]]
+
fluid_shape
[
axis
+
1
:]
shapes
.
append
(
shape
)
node
.
out_shapes
+=
shapes
return
shapes
def
shape
(
self
,
node
):
fluid_shape
=
self
.
get_input_node
(
node
,
idx
=
0
).
out_shapes
[
0
]
fluid_shape
=
[
len
(
fluid_shape
),
]
node
.
out_shapes
.
append
(
fluid_shape
)
self
.
fluid_data
[
node
.
layer_name
]
=
np
.
array
(
fluid_shape
)
return
fluid_shape
def
transpose
(
self
,
node
):
fluid_shape
=
self
.
get_input_node
(
node
,
idx
=
0
).
out_shapes
[
0
]
perm
=
node
.
get_attr
(
'perm'
)
fulid_shape
=
np
.
array
(
fluid_shape
)[
perm
].
tolist
()
node
.
out_shapes
.
append
(
fluid_shape
)
return
fluid_shape
def
reshape
(
self
,
node
):
shape
=
self
.
get_input_node
(
node
,
idx
=
1
)
shape_data
=
self
.
get_fluid_data
(
shape
)
if
shape_data
is
not
None
:
if
-
1
in
shape_data
:
fluid_shape
=
self
.
get_input_node
(
node
,
idx
=
0
).
out_shapes
[
0
]
print
(
fluid_shape
)
index
=
shape_data
.
index
(
-
1
)
total_elements
=
1
for
dim
in
fluid_shape
:
total_elements
*=
dim
part_elements
=
1
for
dim
in
shape_data
:
if
dim
!=
-
1
:
part_elements
*=
dim
shape_data
[
index
]
=
total_elements
//
part_elements
node
.
out_shapes
.
append
(
shape_data
)
else
:
pass
return
shape_data
def
matmul
(
self
,
node
):
x_shape
=
self
.
get_input_node
(
node
,
idx
=
0
).
out_shapes
[
0
]
y_shape
=
self
.
get_input_node
(
node
,
idx
=
1
).
out_shapes
[
0
]
x_rank
=
len
(
x_shape
)
y_rank
=
len
(
y_shape
)
if
x_rank
==
1
and
y_rank
==
1
:
new_shape
=
[]
elif
x_rank
==
1
:
y_reduce_dim
=
-
2
new_shape
=
x_shape
[:
y_reduce_dim
]
+
[
x_shape
[
-
1
]]
elif
y_rank
==
1
:
x_reduce_dim
=
-
1
new_shape
=
x_shape
[:
x_reduce_dim
]
else
:
x_reduce_dim
=
-
1
y_reduce_dim
=
-
2
new_shape
=
self
.
_broadcast_shapes
(
x_shape
[:
-
2
],
y_shape
[:
-
2
])
+
[
x_shape
[
-
2
]]
+
[
y_shape
[
-
1
]]
node
.
out_shapes
.
append
(
new_shape
)
return
new_shape
def
squeeze
(
self
,
node
):
self
.
_pass_on_sympy_data
(
node
)
def
unsqueeze
(
self
,
node
):
self
.
_pass_on_sympy_data
(
node
)
def
concat
(
self
,
node
):
if
any
([
i
in
self
.
fluid_data
for
i
in
node
.
inputs
]):
values
=
self
.
_get_int_values
(
node
)
if
all
([
v
is
not
None
for
v
in
values
]):
assert
0
==
get_attribute
(
node
,
'axis'
)
self
.
fluid_data
[
node
.
layer_name
]
=
[]
for
i
in
range
(
len
(
node
.
input
)):
value
=
values
[
i
]
if
type
(
value
)
==
list
:
self
.
fluid_data
[
node
.
layer_name
].
extend
(
value
)
else
:
self
.
fluid_data
[
node
.
layer_name
].
append
(
value
)
x2paddle/op_mapper/onnx
_
opsets/custom_layer/__init__.py
→
x2paddle/op_mapper/onnx
2paddle/
opsets/custom_layer/__init__.py
浏览文件 @
22c18dd4
...
...
@@ -13,10 +13,6 @@
# limitations under the License.
from
.register
import
get_registered_layers
#custom layer import begins
from
.
import
InstanceNormalization
#custom layer import ends
custom_layers
=
get_registered_layers
()
...
...
x2paddle/op_mapper/onnx
_
opsets/custom_layer/register.py
→
x2paddle/op_mapper/onnx
2paddle/
opsets/custom_layer/register.py
浏览文件 @
22c18dd4
文件已移动
x2paddle/op_mapper/onnx
_
opsets/opset9.py
→
x2paddle/op_mapper/onnx
2paddle/
opsets/opset9.py
浏览文件 @
22c18dd4
文件已移动
x2paddle/op_mapper/paddle2onnx/__init__.py
0 → 100644
浏览文件 @
22c18dd4
x2paddle/op_mapper/paddle2onnx/opset10/__init__.py
0 → 100644
浏览文件 @
22c18dd4
x2paddle/op_mapper/paddle2onnx/opset10/opset.py
0 → 100644
浏览文件 @
22c18dd4
# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import
math
import
sys
import
x2paddle
import
os
import
numpy
as
np
import
paddle.fluid.core
as
core
import
paddle.fluid
as
fluid
import
onnx
from
onnx
import
helper
,
onnx_pb
from
x2paddle.op_mapper.paddle2onnx.opset9.opset
import
OpSet9
class
OpSet10
(
OpSet9
):
def
__init__
(
self
):
super
(
OpSet10
,
self
).
__init__
()
def
slice
(
self
,
op
,
block
):
axes
=
op
.
attr
(
'axes'
)
starts
=
op
.
attr
(
'starts'
)
ends
=
op
.
attr
(
'ends'
)
axes_name
=
self
.
get_name
(
op
.
type
,
'axes'
)
starts_name
=
self
.
get_name
(
op
.
type
,
'starts'
)
ends_name
=
self
.
get_name
(
op
.
type
,
'ends'
)
axes_node
=
self
.
make_constant_node
(
axes_name
,
onnx_pb
.
TensorProto
.
INT64
,
axes
)
starts_node
=
self
.
make_constant_node
(
starts_name
,
onnx_pb
.
TensorProto
.
INT64
,
starts
)
ends_node
=
self
.
make_constant_node
(
ends_name
,
onnx_pb
.
TensorProto
.
INT64
,
ends
)
node
=
helper
.
make_node
(
"Slice"
,
inputs
=
[
op
.
input
(
'Input'
)[
0
],
starts_name
,
ends_name
,
axes_name
],
outputs
=
op
.
output
(
'Out'
),
)
return
[
starts_node
,
ends_node
,
axes_node
,
node
]
def
im2sequence
(
self
,
op
,
block
):
from
.paddle_custom_layer.im2sequence
import
im2sequence
return
im2sequence
(
op
,
block
)
def
yolo_box
(
self
,
op
,
block
):
from
.paddle_custom_layer.yolo_box
import
yolo_box
return
yolo_box
(
op
,
block
)
def
multiclass_nms
(
self
,
op
,
block
):
from
.paddle_custom_layer.multiclass_nms
import
multiclass_nms
return
multiclass_nms
(
op
,
block
)
x2paddle/op_mapper/paddle2onnx/opset10/paddle_custom_layer/__init__.py
0 → 100644
浏览文件 @
22c18dd4
x2paddle/op_mapper/paddle2onnx/opset10/paddle_custom_layer/im2sequence.py
0 → 100644
浏览文件 @
22c18dd4
import
onnx
import
numpy
as
np
from
onnx
import
onnx_pb
,
helper
from
x2paddle.op_mapper.paddle2onnx.opset9.paddle_custom_layer.im2sequence
import
im2sequence
as
im2sequence9
def
im2sequence
(
op
,
block
):
return
im2sequence9
(
op
,
block
)
x2paddle/op_mapper/
onnx_opsets/custom_layer/InstanceNormalization
.py
→
x2paddle/op_mapper/
paddle2onnx/opset10/paddle_custom_layer/multiclass_nms
.py
浏览文件 @
22c18dd4
...
...
@@ -12,45 +12,21 @@
# See the License for the specific language governing permissions and
# limitations under the License.
from
.register
import
register
def
InstanceNormalization_shape
(
input_shape
):
return
input_shape
def
InstanceNormalization_layer
(
inputs
,
name
=
None
):
# TODO(lvmengsi@baidu.com): Check the accuracy when using fluid.layers.layer_norm.
epsilon
=
1e-5
input_
=
inputs
[
0
]
mean
=
fluid
.
layers
.
reduce_mean
(
input_
,
dim
=
[
2
,
3
],
keep_dim
=
True
)
var
=
fluid
.
layers
.
reduce_mean
(
fluid
.
layers
.
square
(
input_
-
mean
),
dim
=
[
2
,
3
],
keep_dim
=
True
)
if
name
is
not
None
:
scale_name
=
name
+
"_scale"
offset_name
=
name
+
"_offset"
scale_param
=
inputs
[
1
]
offset_param
=
inputs
[
2
]
scale
=
fluid
.
layers
.
create_parameter
(
name
=
scale_param
.
name
,
shape
=
input_
.
shape
[
1
:
2
],
dtype
=
"float32"
)
offset
=
fluid
.
layers
.
create_parameter
(
name
=
offset_param
.
name
,
shape
=
input_
.
shape
[
1
:
2
],
dtype
=
"float32"
)
tmp
=
fluid
.
layers
.
elementwise_mul
(
x
=
(
input_
-
mean
),
y
=
scale
,
axis
=
1
)
tmp
=
tmp
/
fluid
.
layers
.
sqrt
(
var
+
epsilon
)
tmp
=
fluid
.
layers
.
elementwise_add
(
tmp
,
offset
,
axis
=
1
)
return
tmp
def
InstanceNormalization_weights
(
name
,
data
=
None
):
weights_name
=
[
name
+
'_scale'
]
return
weights_name
register
(
kind
=
'InstanceNormalization'
,
shape
=
InstanceNormalization_shape
,
layer
=
InstanceNormalization_layer
,
child_func
=
None
,
weights
=
InstanceNormalization_weights
)
import
math
import
sys
import
os
import
numpy
as
np
import
paddle.fluid.core
as
core
import
paddle.fluid
as
fluid
import
onnx
import
warnings
from
onnx
import
helper
,
onnx_pb
from
x2paddle.op_mapper.paddle2onnx.opset9.paddle_custom_layer.multiclass_nms
import
multiclass_nms
as
multiclass_nms9
def
multiclass_nms
(
op
,
block
):
"""
Convert the paddle multiclass_nms to onnx op.
This op is get the select boxes from origin boxes.
"""
return
multiclass_nms9
(
op
,
block
)
x2paddle/op_mapper/paddle2onnx/opset10/paddle_custom_layer/yolo_box.py
0 → 100644
浏览文件 @
22c18dd4
import
onnx
import
numpy
as
np
from
onnx
import
onnx_pb
,
helper
from
x2paddle.op_mapper.paddle2onnx.opset9.paddle_custom_layer.yolo_box
import
yolo_box
as
yolo_box9
def
yolo_box
(
op
,
block
):
return
yolo_box9
(
op
,
block
)
x2paddle/op_mapper/paddle2onnx/opset11/__init__.py
0 → 100644
浏览文件 @
22c18dd4
x2paddle/op_mapper/paddle2onnx/opset11/opset.py
0 → 100644
浏览文件 @
22c18dd4
# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import
math
import
sys
import
x2paddle
import
os
import
numpy
as
np
import
paddle.fluid.core
as
core
import
paddle.fluid
as
fluid
import
onnx
from
onnx
import
helper
,
onnx_pb
from
x2paddle.op_mapper.paddle2onnx.opset10.opset
import
OpSet10
class
OpSet11
(
OpSet10
):
def
__init__
(
self
):
super
(
OpSet11
,
self
).
__init__
()
def
relu6
(
self
,
op
,
block
):
min_name
=
self
.
get_name
(
op
.
type
,
'min'
)
max_name
=
self
.
get_name
(
op
.
type
,
'max'
)
min_node
=
self
.
make_constant_node
(
min_name
,
onnx_pb
.
TensorProto
.
FLOAT
,
0
)
max_node
=
self
.
make_constant_node
(
max_name
,
onnx_pb
.
TensorProto
.
FLOAT
,
op
.
attr
(
'threshold'
))
node
=
helper
.
make_node
(
'Clip'
,
inputs
=
[
op
.
input
(
'X'
)[
0
],
min_name
,
max_name
],
outputs
=
op
.
output
(
'Out'
),
)
return
[
min_node
,
max_node
,
node
]
def
bilinear_interp
(
self
,
op
,
block
):
input_names
=
op
.
input_names
coordinate_transformation_mode
=
''
align_corners
=
op
.
attr
(
'align_corners'
)
align_mode
=
op
.
attr
(
'align_mode'
)
if
align_corners
:
coordinate_transformation_mode
=
'align_corners'
elif
align_mode
==
1
:
coordinate_transformation_mode
=
'asymmetric'
else
:
coordinate_transformation_mode
=
'half_pixel'
if
(
'OutSize'
in
input_names
and
len
(
op
.
input
(
'OutSize'
))
>
0
)
or
(
'SizeTensor'
in
input_names
and
len
(
op
.
input
(
'SizeTensor'
))
>
0
):
node_list
=
list
()
roi_node
=
self
.
make_constant_node
(
self
.
get_name
(
op
.
type
,
'roi'
),
onnx_pb
.
TensorProto
.
FLOAT
,
[
1
,
1
,
1
,
1
,
1
,
1
,
1
,
1
])
roi_name
=
self
.
get_name
(
op
.
type
,
'roi'
)
roi_node
=
self
.
make_constant_node
(
roi_name
,
onnx_pb
.
TensorProto
.
FLOAT
,
[
1
,
1
,
1
,
1
,
1
,
1
,
1
,
1
])
empty_name
=
self
.
get_name
(
op
.
type
,
'empty'
)
empty_tensor
=
helper
.
make_tensor
(
empty_name
,
onnx_pb
.
TensorProto
.
FLOAT
,
(
0
,
),
np
.
array
([]).
astype
(
'float32'
),
raw
=
False
)
empty_node
=
helper
.
make_node
(
'Constant'
,
[],
outputs
=
[
empty_name
],
value
=
empty_tensor
)
shape_name0
=
self
.
get_name
(
op
.
type
,
'shape'
)
shape_node0
=
helper
.
make_node
(
'Shape'
,
inputs
=
op
.
input
(
'X'
),
outputs
=
[
shape_name0
])
starts_name
=
self
.
get_name
(
op
.
type
,
'slice.starts'
)
starts_node
=
self
.
make_constant_node
(
starts_name
,
onnx_pb
.
TensorProto
.
INT64
,
[
0
])
ends_name
=
self
.
get_name
(
op
.
type
,
'slice.ends'
)
ends_node
=
self
.
make_constant_node
(
ends_name
,
onnx_pb
.
TensorProto
.
INT64
,
[
2
])
shape_name1
=
self
.
get_name
(
op
.
type
,
'shape'
)
shape_node1
=
helper
.
make_node
(
'Slice'
,
inputs
=
[
shape_name0
,
starts_name
,
ends_name
],
outputs
=
[
shape_name1
])
node_list
.
extend
([
roi_node
,
empty_node
,
shape_node0
,
starts_node
,
ends_node
,
shape_node1
])
if
'OutSize'
in
input_names
and
len
(
op
.
input
(
'OutSize'
))
>
0
:
cast_shape_name
=
self
.
get_name
(
op
.
type
,
"shape.cast"
)
cast_shape_node
=
helper
.
make_node
(
'Cast'
,
inputs
=
op
.
input
(
'OutSize'
),
outputs
=
[
cast_shape_name
],
to
=
onnx_pb
.
TensorProto
.
INT64
)
node_list
.
append
(
cast_shape_node
)
else
:
concat_shape_name
=
self
.
get_name
(
op
.
type
,
"shape.concat"
)
concat_shape_node
=
helper
.
make_node
(
"Concat"
,
inputs
=
op
.
input
(
'SizeTensor'
),
outputs
=
[
concat_shape_name
],
axis
=
0
)
cast_shape_name
=
self
.
get_name
(
op
.
type
,
"shape.cast"
)
cast_shape_node
=
helper
.
make_node
(
'Cast'
,
inputs
=
[
concat_shape_name
],
outputs
=
[
cast_shape_name
],
to
=
onnx_pb
.
TensorProto
.
INT64
)
node_list
.
extend
([
concat_shape_node
,
cast_shape_node
])
shape_name3
=
self
.
get_name
(
op
.
type
,
"shape.concat"
)
shape_node3
=
helper
.
make_node
(
'Concat'
,
inputs
=
[
shape_name1
,
cast_shape_name
],
outputs
=
[
shape_name3
],
axis
=
0
)
result_node
=
helper
.
make_node
(
'Resize'
,
inputs
=
[
op
.
input
(
'X'
)[
0
],
roi_name
,
empty_name
,
shape_name3
],
outputs
=
op
.
output
(
'Out'
),
mode
=
'linear'
,
coordinate_transformation_mode
=
coordinate_transformation_mode
)
node_list
.
extend
([
shape_node3
,
result_node
])
return
node_list
elif
'Scale'
in
input_names
and
len
(
op
.
input
(
'Scale'
))
>
0
:
node
=
helper
.
make_node
(
'Resize'
,
inputs
=
[
op
.
input
(
'X'
)[
0
],
op
.
input
(
'Scale'
)[
0
]],
outputs
=
op
.
output
(
'Out'
),
mode
=
'linear'
,
coordinate_transformation_mode
=
coordinate_transformation_mode
)
else
:
out_shape
=
[
op
.
attr
(
'out_h'
),
op
.
attr
(
'out_w'
)]
scale
=
op
.
attr
(
'scale'
)
if
out_shape
.
count
(
-
1
)
>
0
:
scale_name
=
self
.
get_name
(
op
.
type
,
'scale'
)
scale_node
=
self
.
make_constant_node
(
scale_name
,
onnx_pb
.
TensorProto
.
FLOAT
,
[
1
,
1
,
scale
,
scale
])
roi_name
=
self
.
get_name
(
op
.
type
,
'roi'
)
roi_node
=
self
.
make_constant_node
(
roi_name
,
onnx_pb
.
TensorProto
.
FLOAT
,
[
1
,
1
,
1
,
1
,
1
,
1
,
1
,
1
])
node
=
helper
.
make_node
(
'Resize'
,
inputs
=
[
op
.
input
(
'X'
)[
0
],
roi_name
,
scale_name
],
outputs
=
op
.
output
(
'Out'
),
mode
=
'nearest'
,
coordinate_transformation_mode
=
coordinate_transformation_mode
)
return
[
scale_node
,
roi_node
,
node
]
else
:
raise
Exception
(
"Unexpected situation happend"
)
return
node
def
nearest_interp
(
self
,
op
,
block
):
input_names
=
op
.
input_names
coordinate_transformation_mode
=
''
align_corners
=
op
.
attr
(
'align_corners'
)
if
align_corners
:
coordinate_transformation_mode
=
'align_corners'
else
:
coordinate_transformation_mode
=
'asymmetric'
if
'OutSize'
in
input_names
and
len
(
op
.
input
(
'OutSize'
))
>
0
:
node
=
helper
.
make_node
(
'Resize'
,
inputs
=
[
op
.
input
(
'X'
)[
0
],
''
,
op
.
input
(
'OutSize'
)[
0
]],
outputs
=
op
.
output
(
'Out'
),
mode
=
'nearest'
,
coordinate_transformation_mode
=
coordinate_transformation_mode
)
elif
'Scale'
in
input_names
and
len
(
op
.
input
(
'Scale'
))
>
0
:
node
=
helper
.
make_node
(
'Resize'
,
inputs
=
[
op
.
input
(
'X'
)[
0
],
op
.
input
(
'Scale'
)[
0
]],
outputs
=
op
.
output
(
'Out'
),
mode
=
'nearest'
,
coordinate_transformation_mode
=
coordinate_transformation_mode
)
else
:
out_shape
=
[
op
.
attr
(
'out_h'
),
op
.
attr
(
'out_w'
)]
scale
=
op
.
attr
(
'scale'
)
if
out_shape
.
count
(
-
1
)
>
0
:
scale_name
=
self
.
get_name
(
op
.
type
,
'scale'
)
scale_node
=
self
.
make_constant_node
(
scale_name
,
onnx_pb
.
TensorProto
.
FLOAT
,
[
1
,
1
,
scale
,
scale
])
roi_name
=
self
.
get_name
(
op
.
type
,
'roi'
)
roi_node
=
self
.
make_constant_node
(
roi_name
,
onnx_pb
.
TensorProto
.
FLOAT
,
[
1
,
1
,
1
,
1
,
1
,
1
,
1
,
1
])
node
=
helper
.
make_node
(
'Resize'
,
inputs
=
[
op
.
input
(
'X'
)[
0
],
roi_name
,
scale_name
],
outputs
=
op
.
output
(
'Out'
),
mode
=
'nearest'
,
coordinate_transformation_mode
=
coordinate_transformation_mode
)
return
[
scale_node
,
roi_node
,
node
]
else
:
raise
Exception
(
"Unexpected situation happend"
)
return
node
def
hard_swish
(
self
,
op
,
block
):
min_name
=
self
.
get_name
(
op
.
type
,
'min'
)
max_name
=
self
.
get_name
(
op
.
type
,
'max'
)
scale_name
=
self
.
get_name
(
op
.
type
,
'scale'
)
offset_name
=
self
.
get_name
(
op
.
type
,
'offset'
)
min_node
=
self
.
make_constant_node
(
min_name
,
onnx_pb
.
TensorProto
.
FLOAT
,
0
)
max_node
=
self
.
make_constant_node
(
max_name
,
onnx_pb
.
TensorProto
.
FLOAT
,
op
.
attr
(
'threshold'
))
scale_node
=
self
.
make_constant_node
(
scale_name
,
onnx_pb
.
TensorProto
.
FLOAT
,
op
.
attr
(
'scale'
))
offset_node
=
self
.
make_constant_node
(
offset_name
,
onnx_pb
.
TensorProto
.
FLOAT
,
op
.
attr
(
'offset'
))
name0
=
self
.
get_name
(
op
.
type
,
'add'
)
node0
=
helper
.
make_node
(
'Add'
,
inputs
=
[
op
.
input
(
'X'
)[
0
],
offset_name
],
outputs
=
[
name0
])
name1
=
self
.
get_name
(
op
.
type
,
'relu'
)
node1
=
helper
.
make_node
(
'Clip'
,
inputs
=
[
name0
,
min_name
,
max_name
],
outputs
=
[
name1
],
)
name2
=
self
.
get_name
(
op
.
type
,
'mul'
)
node2
=
helper
.
make_node
(
'Mul'
,
inputs
=
[
op
.
input
(
'X'
)[
0
],
name1
],
outputs
=
[
name2
])
node3
=
helper
.
make_node
(
'Div'
,
inputs
=
[
name2
,
scale_name
],
outputs
=
op
.
output
(
'Out'
))
return
[
min_node
,
max_node
,
scale_node
,
offset_node
,
node0
,
node1
,
node2
,
node3
]
def
im2sequence
(
self
,
op
,
block
):
from
.paddle_custom_layer.im2sequence
import
im2sequence
return
im2sequence
(
op
,
block
)
def
yolo_box
(
self
,
op
,
block
):
from
.paddle_custom_layer.yolo_box
import
yolo_box
return
yolo_box
(
op
,
block
)
def
multiclass_nms
(
self
,
op
,
block
):
from
.paddle_custom_layer.multiclass_nms
import
multiclass_nms
return
multiclass_nms
(
op
,
block
)
x2paddle/op_mapper/paddle2onnx/opset11/paddle_custom_layer/__init__.py
0 → 100644
浏览文件 @
22c18dd4
x2paddle/op_mapper/paddle2onnx/opset11/paddle_custom_layer/im2sequence.py
0 → 100644
浏览文件 @
22c18dd4
import
onnx
import
numpy
as
np
from
onnx
import
onnx_pb
,
helper
from
x2paddle.op_mapper.paddle2onnx.opset10.paddle_custom_layer.im2sequence
import
im2sequence
as
im2sequence10
def
im2sequence
(
op
,
block
):
return
im2sequence10
(
op
,
block
)
x2paddle/op_mapper/paddle_custom_layer/multiclass_nms.py
→
x2paddle/op_mapper/paddle
2onnx/opset11/paddle
_custom_layer/multiclass_nms.py
浏览文件 @
22c18dd4
...
...
@@ -125,7 +125,7 @@ def multiclass_nms(op, block):
vals
=
[
value
]))
node_list
.
append
(
node
)
# In
e
this code block, we will deocde the raw score data, reshape N * C * M to 1 * N*C*M
# In this code block, we will deocde the raw score data, reshape N * C * M to 1 * N*C*M
# and the same time, decode the select indices to 1 * D, gather the select_indices
outputs_gather_1
=
[
result_name
+
"@gather_1"
]
node_gather_1
=
onnx
.
helper
.
make_node
(
...
...
@@ -405,12 +405,36 @@ def multiclass_nms(op, block):
inputs_concat_final_results
=
outputs_cast_topk_class
+
outputs_unsqueeze_topk_scores
+
\
outputs_gather_select_boxes
outputs_
concat_final_results
=
outputs
[
'Out'
]
node_
concat_final
_results
=
onnx
.
helper
.
make_node
(
outputs_
sort_by_socre_results
=
[
result_name
+
"@concat_topk_scores"
]
node_
sort_by_socre
_results
=
onnx
.
helper
.
make_node
(
'Concat'
,
inputs
=
inputs_concat_final_results
,
outputs
=
outputs_
concat_final
_results
,
outputs
=
outputs_
sort_by_socre
_results
,
axis
=
2
)
node_list
.
append
(
node_
concat_final
_results
)
node_list
.
append
(
node_
sort_by_socre
_results
)
# select topk classes indices
outputs_squeeze_cast_topk_class
=
[
result_name
+
"@squeeze_cast_topk_class"
]
node_squeeze_cast_topk_class
=
onnx
.
helper
.
make_node
(
'Squeeze'
,
inputs
=
outputs_cast_topk_class
,
outputs
=
outputs_squeeze_cast_topk_class
,
axes
=
[
0
,
2
])
node_list
.
append
(
node_squeeze_cast_topk_class
)
outputs_topk_select_classes_indices
=
[
result_name
+
"@topk_select_topk_classes_scores"
,
\
result_name
+
"@topk_select_topk_classes_indices"
]
node_topk_select_topk_indices
=
onnx
.
helper
.
make_node
(
'TopK'
,
inputs
=
outputs_squeeze_cast_topk_class
+
outputs_cast_topk_indices
,
outputs
=
outputs_topk_select_classes_indices
,
largest
=
0
)
node_list
.
append
(
node_topk_select_topk_indices
)
outputs_concat_final_results
=
outputs
[
'Out'
]
node_concat_final_results
=
onnx
.
helper
.
make_node
(
'Gather'
,
inputs
=
outputs_sort_by_socre_results
+
[
outputs_topk_select_classes_indices
[
1
]],
outputs
=
outputs_concat_final_results
,
axis
=
1
)
node_list
.
append
(
node_concat_final_results
)
return
node_list
x2paddle/op_mapper/paddle_custom_layer/yolo_box.py
→
x2paddle/op_mapper/paddle
2onnx/opset11/paddle
_custom_layer/yolo_box.py
浏览文件 @
22c18dd4
文件已移动
x2paddle/op_mapper/paddle2onnx/opset9/__init__.py
0 → 100644
浏览文件 @
22c18dd4
x2paddle/op_mapper/paddle
_op_mapper
.py
→
x2paddle/op_mapper/paddle
2onnx/opset9/opset
.py
浏览文件 @
22c18dd4
...
...
@@ -23,7 +23,7 @@ import onnx
from
onnx
import
helper
,
onnx_pb
class
PaddleOpMapper
(
object
):
class
OpSet9
(
object
):
def
__init__
(
self
):
self
.
paddle_onnx_dtype_map
=
{
core
.
VarDesc
.
VarType
.
FP32
:
onnx_pb
.
TensorProto
.
FLOAT
,
...
...
@@ -34,62 +34,8 @@ class PaddleOpMapper(object):
core
.
VarDesc
.
VarType
.
INT64
:
onnx_pb
.
TensorProto
.
INT64
,
core
.
VarDesc
.
VarType
.
BOOL
:
onnx_pb
.
TensorProto
.
BOOL
}
self
.
name_counter
=
dict
()
def
convert
(
self
,
program
,
save_dir
):
weight_nodes
=
self
.
convert_weights
(
program
)
op_nodes
=
list
()
input_nodes
=
list
()
output_nodes
=
list
()
unsupported_ops
=
set
()
print
(
"Translating PaddlePaddle to ONNX...
\n
"
)
for
block
in
program
.
blocks
:
for
i
,
op
in
enumerate
(
block
.
ops
):
sys
.
stdout
.
write
(
"
\r
Total:{}, Current:{} : {} "
.
format
(
len
(
block
.
ops
),
i
+
1
,
op
.
type
))
sys
.
stdout
.
flush
()
if
not
hasattr
(
self
,
op
.
type
):
unsupported_ops
.
add
(
op
.
type
)
continue
if
len
(
unsupported_ops
)
>
0
:
continue
node
=
getattr
(
self
,
op
.
type
)(
op
,
block
)
if
op
.
type
==
'feed'
:
input_nodes
.
append
(
node
)
elif
op
.
type
==
'fetch'
:
output_nodes
.
append
(
node
)
else
:
if
isinstance
(
node
,
list
):
op_nodes
=
op_nodes
+
node
else
:
op_nodes
.
append
(
node
)
if
len
(
unsupported_ops
)
>
0
:
print
(
"
\n
There's {} ops are not supported yet"
.
format
(
len
(
unsupported_ops
)))
for
op
in
unsupported_ops
:
print
(
"=========== {} ==========="
.
format
(
op
))
return
graph
=
helper
.
make_graph
(
nodes
=
weight_nodes
+
op_nodes
,
name
=
'onnx_model_from_paddle'
,
initializer
=
[],
inputs
=
input_nodes
,
outputs
=
output_nodes
)
model
=
helper
.
make_model
(
graph
,
producer_name
=
'X2Paddle'
)
onnx
.
checker
.
check_model
(
model
)
if
not
os
.
path
.
isdir
(
save_dir
):
os
.
makedirs
(
save_dir
)
with
open
(
os
.
path
.
join
(
save_dir
,
'x2paddle_model.onnx'
),
'wb'
)
as
f
:
f
.
write
(
model
.
SerializeToString
())
print
(
"
\n
Translated model saved in {}"
.
format
(
os
.
path
.
join
(
save_dir
,
'x2paddle_model.onnx'
)))
def
get_name
(
self
,
op_name
,
var_name
):
name
=
'p2o.{}.{}'
.
format
(
op_name
,
var_name
)
if
name
not
in
self
.
name_counter
:
...
...
@@ -98,6 +44,21 @@ class PaddleOpMapper(object):
self
.
name_counter
[
name
]
+=
1
return
name
+
'.{}'
.
format
(
self
.
name_counter
[
name
])
def
make_constant_node
(
self
,
name
,
dtype
,
value
=
None
):
if
isinstance
(
value
,
list
):
dims
=
(
len
(
value
),
)
elif
value
is
None
:
dims
=
()
value
=
[]
else
:
dims
=
()
value
=
[
value
]
tensor
=
helper
.
make_tensor
(
name
=
name
,
data_type
=
dtype
,
dims
=
dims
,
vals
=
value
)
node
=
helper
.
make_node
(
'Constant'
,
inputs
=
[],
outputs
=
[
name
],
value
=
tensor
)
return
node
def
convert_weights
(
self
,
program
):
var_names
=
program
.
global_block
().
vars
nodes
=
list
()
...
...
@@ -118,21 +79,6 @@ class PaddleOpMapper(object):
nodes
.
append
(
node
)
return
nodes
def
make_constant_node
(
self
,
name
,
dtype
,
value
=
None
):
if
isinstance
(
value
,
list
):
dims
=
(
len
(
value
),
)
elif
value
is
None
:
dims
=
()
value
=
[]
else
:
dims
=
()
value
=
[
value
]
tensor
=
helper
.
make_tensor
(
name
=
name
,
data_type
=
dtype
,
dims
=
dims
,
vals
=
value
)
node
=
helper
.
make_node
(
'Constant'
,
inputs
=
[],
outputs
=
[
name
],
value
=
tensor
)
return
node
def
conv2d
(
self
,
op
,
block
):
kernel_shape
=
block
.
var
(
op
.
input
(
'Filter'
)[
0
]).
shape
node
=
helper
.
make_node
(
...
...
@@ -182,41 +128,6 @@ class PaddleOpMapper(object):
alpha
=
op
.
attr
(
'alpha'
))
return
node
def
swish
(
self
,
op
,
block
):
"""
The activation swish, y = x / (1 + exp(-beta * x))
"""
beta
=
op
.
attr
(
'beta'
)
beta_name
=
self
.
get_name
(
op
.
type
,
'beta'
)
beta_node
=
onnx
.
helper
.
make_node
(
'Constant'
,
name
=
beta_name
,
inputs
=
[],
outputs
=
[
beta_name
],
value
=
onnx
.
helper
.
make_tensor
(
name
=
beta_name
,
data_type
=
onnx
.
TensorProto
.
FLOAT
,
dims
=
(),
vals
=
[
beta
]))
beta_x_name
=
self
.
get_name
(
op
.
type
,
'beta_x'
)
beta_x_node
=
onnx
.
helper
.
make_node
(
'Mul'
,
name
=
beta_x_name
,
inputs
=
[
op
.
input
(
'X'
)[
0
],
beta_name
],
outputs
=
[
beta_x_name
])
sigmoid_name
=
self
.
get_name
(
op
.
type
,
'sigmoid'
)
sigmoid_node
=
onnx
.
helper
.
make_node
(
'Sigmoid'
,
name
=
sigmoid_name
,
inputs
=
[
beta_x_name
],
outputs
=
[
sigmoid_name
])
swish_node
=
onnx
.
helper
.
make_node
(
'Mul'
,
inputs
=
[
op
.
input
(
'X'
)[
0
],
sigmoid_name
],
outputs
=
op
.
output
(
'Out'
))
return
[
beta_node
,
beta_x_node
,
sigmoid_node
,
swish_node
]
def
elementwise_add
(
self
,
op
,
block
):
axis
=
op
.
attr
(
'axis'
)
x_shape
=
block
.
var
(
op
.
input
(
'X'
)[
0
]).
shape
...
...
@@ -285,6 +196,8 @@ class PaddleOpMapper(object):
pool_type
[
op
.
attr
(
'pooling_type'
)][
1
],
inputs
=
op
.
input
(
'X'
),
outputs
=
op
.
output
(
'Out'
),
)
elif
op
.
attr
(
'adaptive'
):
raise
Excpetion
(
"ONNX cannot support adaptive pool"
)
else
:
input_shape
=
block
.
var
(
op
.
input
(
'X'
)[
0
]).
shape
k_size
=
op
.
attr
(
'ksize'
)
...
...
@@ -431,17 +344,14 @@ class PaddleOpMapper(object):
return
self
.
conv2d
(
op
,
block
)
def
relu6
(
self
,
op
,
block
):
min_name
=
self
.
get_name
(
op
.
type
,
'min'
)
max_name
=
self
.
get_name
(
op
.
type
,
'max'
)
min_node
=
self
.
make_constant_node
(
min_name
,
onnx_pb
.
TensorProto
.
FLOAT
,
0
)
max_node
=
self
.
make_constant_node
(
max_name
,
onnx_pb
.
TensorProto
.
FLOAT
,
op
.
attr
(
'threshold'
))
threshold
=
op
.
attr
(
'threshold'
)
node
=
helper
.
make_node
(
'Clip'
,
inputs
=
[
op
.
input
(
'X'
)[
0
],
min_name
,
max_name
],
outputs
=
op
.
output
(
'Out'
),
)
return
[
min_node
,
max_node
,
node
]
inputs
=
[
op
.
input
(
'X'
)[
0
]],
outputs
=
op
.
output
(
'Out'
),
max
=
threshold
,
min
=
0.0
)
return
[
node
]
def
shape
(
self
,
op
,
block
):
node
=
helper
.
make_node
(
...
...
@@ -469,21 +379,14 @@ class PaddleOpMapper(object):
axes
=
op
.
attr
(
'axes'
)
starts
=
op
.
attr
(
'starts'
)
ends
=
op
.
attr
(
'ends'
)
axes_name
=
self
.
get_name
(
op
.
type
,
'axes'
)
starts_name
=
self
.
get_name
(
op
.
type
,
'starts'
)
ends_name
=
self
.
get_name
(
op
.
type
,
'ends'
)
axes_node
=
self
.
make_constant_node
(
axes_name
,
onnx_pb
.
TensorProto
.
INT64
,
axes
)
starts_node
=
self
.
make_constant_node
(
starts_name
,
onnx_pb
.
TensorProto
.
INT64
,
starts
)
ends_node
=
self
.
make_constant_node
(
ends_name
,
onnx_pb
.
TensorProto
.
INT64
,
ends
)
node
=
helper
.
make_node
(
"Slice"
,
inputs
=
[
op
.
input
(
'Input'
)[
0
],
starts_name
,
ends_name
,
axes_name
],
outputs
=
op
.
output
(
'Out'
),
)
return
[
starts_node
,
ends_node
,
axes_node
,
node
]
outputs
=
op
.
output
(
'Out'
),
axes
=
axes
,
starts
=
starts
,
ends
=
ends
)
return
[
node
]
def
fill_constant
(
self
,
op
,
block
):
value
=
op
.
attr
(
'value'
)
...
...
@@ -578,27 +481,15 @@ class PaddleOpMapper(object):
def
bilinear_interp
(
self
,
op
,
block
):
input_names
=
op
.
input_names
coordinate_transformation_mode
=
'half_pixel'
if
op
.
attr
(
'align_corners'
):
coordinate_transformation_mode
=
'align_corners'
input_shape
=
block
.
vars
[
op
.
input
(
'X'
)[
0
]].
shape
if
op
.
attr
(
'align_corners'
)
or
op
.
attr
(
'align_mode'
)
==
0
:
raise
Exception
(
"Resize in onnx(opset<=10) only support coordinate_transformation_mode: 'asymmetric'."
)
if
(
'OutSize'
in
input_names
and
len
(
op
.
input
(
'OutSize'
))
>
0
)
or
(
'SizeTensor'
in
input_names
and
len
(
op
.
input
(
'SizeTensor'
))
>
0
):
node_list
=
list
()
roi_node
=
self
.
make_constant_node
(
self
.
get_name
(
op
.
type
,
'roi'
),
onnx_pb
.
TensorProto
.
FLOAT
,
[
1
,
1
,
1
,
1
,
1
,
1
,
1
,
1
])
roi_name
=
self
.
get_name
(
op
.
type
,
'roi'
)
roi_node
=
self
.
make_constant_node
(
roi_name
,
onnx_pb
.
TensorProto
.
FLOAT
,
[
1
,
1
,
1
,
1
,
1
,
1
,
1
,
1
])
empty_name
=
self
.
get_name
(
op
.
type
,
'empty'
)
empty_tensor
=
helper
.
make_tensor
(
empty_name
,
onnx_pb
.
TensorProto
.
FLOAT
,
(
0
,
),
np
.
array
([]).
astype
(
'float32'
),
raw
=
False
)
empty_node
=
helper
.
make_node
(
'Constant'
,
[],
outputs
=
[
empty_name
],
value
=
empty_tensor
)
shape_name0
=
self
.
get_name
(
op
.
type
,
'shape'
)
shape_node0
=
helper
.
make_node
(
'Shape'
,
inputs
=
op
.
input
(
'X'
),
outputs
=
[
shape_name0
])
...
...
@@ -613,16 +504,7 @@ class PaddleOpMapper(object):
'Slice'
,
inputs
=
[
shape_name0
,
starts_name
,
ends_name
],
outputs
=
[
shape_name1
])
node_list
.
extend
([
roi_node
,
empty_node
,
shape_node0
,
starts_node
,
ends_node
,
shape_node1
])
# shape_name2 = self.get_name(op.type, "shape.cast")
# shape_node2 = helper.make_node(
# 'Cast',
# inputs=op.input('OutSize'),
# outputs=[shape_name2],
# to=onnx_pb.TensorProto.INT64)
node_list
.
extend
([
shape_node0
,
starts_node
,
ends_node
,
shape_node1
])
if
'OutSize'
in
input_names
and
len
(
op
.
input
(
'OutSize'
))
>
0
:
cast_shape_name
=
self
.
get_name
(
op
.
type
,
"shape.cast"
)
cast_shape_node
=
helper
.
make_node
(
...
...
@@ -632,7 +514,8 @@ class PaddleOpMapper(object):
to
=
onnx_pb
.
TensorProto
.
INT64
)
node_list
.
append
(
cast_shape_node
)
else
:
concat_shape_name
=
self
.
get_name
(
op
.
type
,
"shape.concat"
)
concat_shape_name
=
self
.
get_name
(
op
.
type
,
op
.
output
(
'Out'
)[
0
]
+
"shape.concat"
)
concat_shape_node
=
helper
.
make_node
(
"Concat"
,
inputs
=
op
.
input
(
'SizeTensor'
),
...
...
@@ -645,27 +528,46 @@ class PaddleOpMapper(object):
outputs
=
[
cast_shape_name
],
to
=
onnx_pb
.
TensorProto
.
INT64
)
node_list
.
extend
([
concat_shape_node
,
cast_shape_node
])
shape_name
3
=
self
.
get_name
(
op
.
type
,
"shape.concat"
)
shape_node
3
=
helper
.
make_node
(
shape_name
2
=
self
.
get_name
(
op
.
type
,
"shape.concat"
)
shape_node
2
=
helper
.
make_node
(
'Concat'
,
inputs
=
[
shape_name1
,
cast_shape_name
],
outputs
=
[
shape_name
3
],
outputs
=
[
shape_name
2
],
axis
=
0
)
node_list
.
append
(
shape_node2
)
cast_shape_name2
=
self
.
get_name
(
op
.
type
,
"shape.cast"
)
cast_shape_node2
=
helper
.
make_node
(
'Cast'
,
inputs
=
[
shape_name2
],
outputs
=
[
cast_shape_name2
],
to
=
onnx_pb
.
TensorProto
.
FLOAT
)
node_list
.
append
(
cast_shape_node2
)
cast_shape_name0
=
self
.
get_name
(
op
.
type
,
"shape.cast"
)
cast_shape_node0
=
helper
.
make_node
(
'Cast'
,
inputs
=
[
shape_name0
],
outputs
=
[
cast_shape_name0
],
to
=
onnx_pb
.
TensorProto
.
FLOAT
)
node_list
.
append
(
cast_shape_node0
)
outputs_h_w_scales
=
op
.
output
(
'Out'
)[
0
]
+
"@out_hw_scales"
node_h_w_scales
=
helper
.
make_node
(
'Div'
,
inputs
=
[
cast_shape_name2
,
cast_shape_name0
],
outputs
=
[
outputs_h_w_scales
])
node_list
.
append
(
node_h_w_scales
)
result_node
=
helper
.
make_node
(
'Resize'
,
inputs
=
[
op
.
input
(
'X'
)[
0
],
roi_name
,
empty_name
,
shape_name3
],
inputs
=
[
op
.
input
(
'X'
)[
0
],
outputs_h_w_scales
],
outputs
=
op
.
output
(
'Out'
),
mode
=
'linear'
,
coordinate_transformation_mode
=
coordinate_transformation_mode
)
node_list
.
extend
([
shape_node3
,
result_node
])
mode
=
'linear'
)
node_list
.
extend
([
result_node
])
return
node_list
elif
'Scale'
in
input_names
and
len
(
op
.
input
(
'Scale'
))
>
0
:
node
=
helper
.
make_node
(
'Resize'
,
inputs
=
[
op
.
input
(
'X'
)[
0
],
op
.
input
(
'Scale'
)[
0
]],
outputs
=
op
.
output
(
'Out'
),
mode
=
'linear'
,
coordinate_transformation_mode
=
coordinate_transformation_mode
)
mode
=
'linear'
)
else
:
out_shape
=
[
op
.
attr
(
'out_h'
),
op
.
attr
(
'out_w'
)]
scale
=
op
.
attr
(
'scale'
)
...
...
@@ -674,41 +576,34 @@ class PaddleOpMapper(object):
scale_node
=
self
.
make_constant_node
(
scale_name
,
onnx_pb
.
TensorProto
.
FLOAT
,
[
1
,
1
,
scale
,
scale
])
roi_name
=
self
.
get_name
(
op
.
type
,
'roi'
)
roi_node
=
self
.
make_constant_node
(
roi_name
,
onnx_pb
.
TensorProto
.
FLOAT
,
[
1
,
1
,
1
,
1
,
1
,
1
,
1
,
1
])
node
=
helper
.
make_node
(
'Resize'
,
inputs
=
[
op
.
input
(
'X'
)[
0
],
roi_name
,
scale_name
],
inputs
=
[
op
.
input
(
'X'
)[
0
],
scale_name
],
outputs
=
op
.
output
(
'Out'
),
mode
=
'nearest'
,
coordinate_transformation_mode
=
coordinate_transformation_mode
)
return
[
scale_node
,
roi_node
,
node
]
mode
=
'linear'
)
return
[
scale_node
,
node
]
else
:
raise
Exception
(
"Unexpected situation happend"
)
return
node
def
nearest_interp
(
self
,
op
,
block
):
input_names
=
op
.
input_names
coordinate_transformation_mode
=
'half_pixel'
if
op
.
attr
(
'align_corners'
):
coordinate_transformation_mode
=
'align_corners'
raise
Exception
(
"Resize in onnx(opset<=10) only support coordinate_transformation_mode: 'asymmetric'."
)
if
'OutSize'
in
input_names
and
len
(
op
.
input
(
'OutSize'
))
>
0
:
node
=
helper
.
make_node
(
'Resize'
,
inputs
=
[
op
.
input
(
'X'
)[
0
],
''
,
op
.
input
(
'OutSize'
)[
0
]],
inputs
=
[
op
.
input
(
'X'
)[
0
],
op
.
input
(
'OutSize'
)[
0
]],
outputs
=
op
.
output
(
'Out'
),
mode
=
'nearest'
,
coordinate_transformation_mode
=
coordinate_transformation_mode
)
mode
=
'nearest'
)
elif
'Scale'
in
input_names
and
len
(
op
.
input
(
'Scale'
))
>
0
:
node
=
helper
.
make_node
(
'Resize'
,
inputs
=
[
op
.
input
(
'X'
)[
0
],
op
.
input
(
'Scale'
)[
0
]],
outputs
=
op
.
output
(
'Out'
),
mode
=
'nearest'
,
coordinate_transformation_mode
=
coordinate_transformation_mode
)
mode
=
'nearest'
)
else
:
out_shape
=
[
op
.
attr
(
'out_h'
),
op
.
attr
(
'out_w'
)]
scale
=
op
.
attr
(
'scale'
)
...
...
@@ -717,18 +612,12 @@ class PaddleOpMapper(object):
scale_node
=
self
.
make_constant_node
(
scale_name
,
onnx_pb
.
TensorProto
.
FLOAT
,
[
1
,
1
,
scale
,
scale
])
roi_name
=
self
.
get_name
(
op
.
type
,
'roi'
)
roi_node
=
self
.
make_constant_node
(
roi_name
,
onnx_pb
.
TensorProto
.
FLOAT
,
[
1
,
1
,
1
,
1
,
1
,
1
,
1
,
1
])
node
=
helper
.
make_node
(
'Resize'
,
inputs
=
[
op
.
input
(
'X'
)[
0
],
roi_name
,
scale_name
],
inputs
=
[
op
.
input
(
'X'
)[
0
],
scale_name
],
outputs
=
op
.
output
(
'Out'
),
mode
=
'nearest'
,
coordinate_transformation_mode
=
coordinate_transformation_mode
)
return
[
scale_node
,
roi_node
,
node
]
mode
=
'nearest'
)
return
[
scale_node
,
node
]
else
:
raise
Exception
(
"Unexpected situation happend"
)
return
node
...
...
@@ -745,14 +634,8 @@ class PaddleOpMapper(object):
return
node
def
hard_swish
(
self
,
op
,
block
):
min_name
=
self
.
get_name
(
op
.
type
,
'min'
)
max_name
=
self
.
get_name
(
op
.
type
,
'max'
)
scale_name
=
self
.
get_name
(
op
.
type
,
'scale'
)
offset_name
=
self
.
get_name
(
op
.
type
,
'offset'
)
min_node
=
self
.
make_constant_node
(
min_name
,
onnx_pb
.
TensorProto
.
FLOAT
,
0
)
max_node
=
self
.
make_constant_node
(
max_name
,
onnx_pb
.
TensorProto
.
FLOAT
,
op
.
attr
(
'threshold'
))
scale_node
=
self
.
make_constant_node
(
scale_name
,
onnx_pb
.
TensorProto
.
FLOAT
,
op
.
attr
(
'scale'
))
...
...
@@ -764,19 +647,20 @@ class PaddleOpMapper(object):
node0
=
helper
.
make_node
(
'Add'
,
inputs
=
[
op
.
input
(
'X'
)[
0
],
offset_name
],
outputs
=
[
name0
])
name1
=
self
.
get_name
(
op
.
type
,
'relu'
)
min_value
=
op
.
attr
(
'min'
)
max_value
=
op
.
attr
(
'max'
)
node1
=
helper
.
make_node
(
'Clip'
,
inputs
=
[
name0
,
min_name
,
max_name
],
outputs
=
[
name1
],
)
inputs
=
[
name0
],
outputs
=
[
name1
],
max
=
max_value
,
min
=
min_value
)
name2
=
self
.
get_name
(
op
.
type
,
'mul'
)
node2
=
helper
.
make_node
(
'Mul'
,
inputs
=
[
op
.
input
(
'X'
)[
0
],
name1
],
outputs
=
[
name2
])
node3
=
helper
.
make_node
(
'Div'
,
inputs
=
[
name2
,
scale_name
],
outputs
=
op
.
output
(
'Out'
))
return
[
min_node
,
max_node
,
scale_node
,
offset_node
,
node0
,
node1
,
node2
,
node3
]
return
[
scale_node
,
offset_node
,
node0
,
node1
,
node2
,
node3
]
def
elementwise_mul
(
self
,
op
,
block
):
axis
=
op
.
attr
(
'axis'
)
...
...
@@ -852,3 +736,11 @@ class PaddleOpMapper(object):
def
im2sequence
(
self
,
op
,
block
):
from
.paddle_custom_layer.im2sequence
import
im2sequence
return
im2sequence
(
op
,
block
)
def
yolo_box
(
self
,
op
,
block
):
from
.paddle_custom_layer.yolo_box
import
yolo_box
return
yolo_box
(
op
,
block
)
def
multiclass_nms
(
self
,
op
,
block
):
from
.paddle_custom_layer.multiclass_nms
import
multiclass_nms
return
multiclass_nms
(
op
,
block
)
x2paddle/op_mapper/paddle2onnx/opset9/paddle_custom_layer/__init__.py
0 → 100644
浏览文件 @
22c18dd4
x2paddle/op_mapper/paddle_custom_layer/im2sequence.py
→
x2paddle/op_mapper/paddle
2onnx/opset9/paddle
_custom_layer/im2sequence.py
浏览文件 @
22c18dd4
文件已移动
x2paddle/op_mapper/paddle2onnx/opset9/paddle_custom_layer/multiclass_nms.py
0 → 100644
浏览文件 @
22c18dd4
# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import
math
import
sys
import
os
import
numpy
as
np
import
paddle.fluid.core
as
core
import
paddle.fluid
as
fluid
import
onnx
import
warnings
from
onnx
import
helper
,
onnx_pb
def
multiclass_nms
(
op
,
block
):
"""
Convert the paddle multiclass_nms to onnx op.
This op is get the select boxes from origin boxes.
"""
inputs
=
dict
()
outputs
=
dict
()
attrs
=
dict
()
for
name
in
op
.
input_names
:
inputs
[
name
]
=
op
.
input
(
name
)
for
name
in
op
.
output_names
:
outputs
[
name
]
=
op
.
output
(
name
)
for
name
in
op
.
attr_names
:
attrs
[
name
]
=
op
.
attr
(
name
)
result_name
=
outputs
[
'Out'
][
0
]
background
=
attrs
[
'background_label'
]
normalized
=
attrs
[
'normalized'
]
if
normalized
==
False
:
warnings
.
warn
(
'The parameter normalized of multiclass_nms OP of Paddle is False, which has diff with ONNX.
\
Please set normalized=True in multiclass_nms of Paddle'
)
#convert the paddle attribute to onnx tensor
name_score_threshold
=
[
outputs
[
'Out'
][
0
]
+
"@score_threshold"
]
name_iou_threshold
=
[
outputs
[
'Out'
][
0
]
+
"@iou_threshold"
]
name_keep_top_k
=
[
outputs
[
'Out'
][
0
]
+
'@keep_top_k'
]
name_keep_top_k_2D
=
[
outputs
[
'Out'
][
0
]
+
'@keep_top_k_1D'
]
node_score_threshold
=
onnx
.
helper
.
make_node
(
'Constant'
,
inputs
=
[],
outputs
=
name_score_threshold
,
value
=
onnx
.
helper
.
make_tensor
(
name
=
name_score_threshold
[
0
]
+
"@const"
,
data_type
=
onnx
.
TensorProto
.
FLOAT
,
dims
=
(),
vals
=
[
float
(
attrs
[
'score_threshold'
])]))
node_iou_threshold
=
onnx
.
helper
.
make_node
(
'Constant'
,
inputs
=
[],
outputs
=
name_iou_threshold
,
value
=
onnx
.
helper
.
make_tensor
(
name
=
name_iou_threshold
[
0
]
+
"@const"
,
data_type
=
onnx
.
TensorProto
.
FLOAT
,
dims
=
(),
vals
=
[
float
(
attrs
[
'nms_threshold'
])]))
node_keep_top_k
=
onnx
.
helper
.
make_node
(
'Constant'
,
inputs
=
[],
outputs
=
name_keep_top_k
,
value
=
onnx
.
helper
.
make_tensor
(
name
=
name_keep_top_k
[
0
]
+
"@const"
,
data_type
=
onnx
.
TensorProto
.
INT64
,
dims
=
(),
vals
=
[
np
.
int64
(
attrs
[
'keep_top_k'
])]))
node_keep_top_k_2D
=
onnx
.
helper
.
make_node
(
'Constant'
,
inputs
=
[],
outputs
=
name_keep_top_k_2D
,
value
=
onnx
.
helper
.
make_tensor
(
name
=
name_keep_top_k_2D
[
0
]
+
"@const"
,
data_type
=
onnx
.
TensorProto
.
INT64
,
dims
=
[
1
,
1
],
vals
=
[
np
.
int64
(
attrs
[
'keep_top_k'
])]))
# the paddle data format is x1,y1,x2,y2
kwargs
=
{
'center_point_box'
:
0
}
name_select_nms
=
[
outputs
[
'Out'
][
0
]
+
"@select_index"
]
node_select_nms
=
onnx
.
helper
.
make_node
(
'NonMaxSuppression'
,
inputs
=
inputs
[
'BBoxes'
]
+
inputs
[
'Scores'
]
+
name_keep_top_k
+
\
name_iou_threshold
+
name_score_threshold
,
outputs
=
name_select_nms
)
# step 1 nodes select the nms class
node_list
=
[
node_score_threshold
,
node_iou_threshold
,
node_keep_top_k
,
node_keep_top_k_2D
,
node_select_nms
]
# create some const value to use
name_const_value
=
[
result_name
+
"@const_0"
,
result_name
+
"@const_1"
,
\
result_name
+
"@const_2"
,
\
result_name
+
"@const_-1"
]
value_const_value
=
[
0
,
1
,
2
,
-
1
]
for
name
,
value
in
zip
(
name_const_value
,
value_const_value
):
node
=
onnx
.
helper
.
make_node
(
'Constant'
,
inputs
=
[],
outputs
=
[
name
],
value
=
onnx
.
helper
.
make_tensor
(
name
=
name
+
"@const"
,
data_type
=
onnx
.
TensorProto
.
INT64
,
dims
=
[
1
],
vals
=
[
value
]))
node_list
.
append
(
node
)
# In this code block, we will deocde the raw score data, reshape N * C * M to 1 * N*C*M
# and the same time, decode the select indices to 1 * D, gather the select_indices
outputs_gather_1
=
[
result_name
+
"@gather_1"
]
node_gather_1
=
onnx
.
helper
.
make_node
(
'Gather'
,
inputs
=
name_select_nms
+
[
result_name
+
"@const_1"
],
outputs
=
outputs_gather_1
,
axis
=
1
)
node_list
.
append
(
node_gather_1
)
outputs_squeeze_gather_1
=
[
result_name
+
"@sequeeze_gather_1"
]
node_squeeze_gather_1
=
onnx
.
helper
.
make_node
(
'Squeeze'
,
inputs
=
outputs_gather_1
,
outputs
=
outputs_squeeze_gather_1
,
axes
=
[
1
])
node_list
.
append
(
node_squeeze_gather_1
)
outputs_gather_2
=
[
result_name
+
"@gather_2"
]
node_gather_2
=
onnx
.
helper
.
make_node
(
'Gather'
,
inputs
=
name_select_nms
+
[
result_name
+
"@const_2"
],
outputs
=
outputs_gather_2
,
axis
=
1
)
node_list
.
append
(
node_gather_2
)
#slice the class is not 0
if
background
==
0
:
outputs_nonzero
=
[
result_name
+
"@nonzero"
]
node_nonzero
=
onnx
.
helper
.
make_node
(
'NonZero'
,
inputs
=
outputs_squeeze_gather_1
,
outputs
=
outputs_nonzero
)
node_list
.
append
(
node_nonzero
)
else
:
name_thresh
=
[
result_name
+
"@thresh"
]
node_thresh
=
onnx
.
helper
.
make_node
(
'Constant'
,
inputs
=
[],
outputs
=
name_thresh
,
value
=
onnx
.
helper
.
make_tensor
(
name
=
name_thresh
[
0
]
+
"@const"
,
data_type
=
onnx
.
TensorProto
.
INT32
,
dims
=
[
1
],
vals
=
[
-
1
]))
node_list
.
append
(
node_thresh
)
outputs_cast
=
[
result_name
+
"@cast"
]
node_cast
=
onnx
.
helper
.
make_node
(
'Cast'
,
inputs
=
outputs_squeeze_gather_1
,
outputs
=
outputs_cast
,
to
=
6
)
node_list
.
append
(
node_cast
)
outputs_greater
=
[
result_name
+
"@greater"
]
node_greater
=
onnx
.
helper
.
make_node
(
'Greater'
,
inputs
=
outputs_cast
+
name_thresh
,
outputs
=
outputs_greater
)
node_list
.
append
(
node_greater
)
outputs_nonzero
=
[
result_name
+
"@nonzero"
]
node_nonzero
=
onnx
.
helper
.
make_node
(
'NonZero'
,
inputs
=
outputs_greater
,
outputs
=
outputs_nonzero
)
node_list
.
append
(
node_nonzero
)
outputs_gather_1_nonzero
=
[
result_name
+
"@gather_1_nonzero"
]
node_gather_1_nonzero
=
onnx
.
helper
.
make_node
(
'Gather'
,
inputs
=
outputs_gather_1
+
outputs_nonzero
,
outputs
=
outputs_gather_1_nonzero
,
axis
=
0
)
node_list
.
append
(
node_gather_1_nonzero
)
outputs_gather_2_nonzero
=
[
result_name
+
"@gather_2_nonzero"
]
node_gather_2_nonzero
=
onnx
.
helper
.
make_node
(
'Gather'
,
inputs
=
outputs_gather_2
+
outputs_nonzero
,
outputs
=
outputs_gather_2_nonzero
,
axis
=
0
)
node_list
.
append
(
node_gather_2_nonzero
)
# reshape scores N * C * M to (N*C*M) * 1
outputs_reshape_scores_rank1
=
[
result_name
+
"@reshape_scores_rank1"
]
node_reshape_scores_rank1
=
onnx
.
helper
.
make_node
(
"Reshape"
,
inputs
=
inputs
[
'Scores'
]
+
[
result_name
+
"@const_-1"
],
outputs
=
outputs_reshape_scores_rank1
)
node_list
.
append
(
node_reshape_scores_rank1
)
# get the shape of scores
outputs_shape_scores
=
[
result_name
+
"@shape_scores"
]
node_shape_scores
=
onnx
.
helper
.
make_node
(
'Shape'
,
inputs
=
inputs
[
'Scores'
],
outputs
=
outputs_shape_scores
)
node_list
.
append
(
node_shape_scores
)
# gather the index: 2 shape of scores
outputs_gather_scores_dim1
=
[
result_name
+
"@gather_scores_dim1"
]
node_gather_scores_dim1
=
onnx
.
helper
.
make_node
(
'Gather'
,
inputs
=
outputs_shape_scores
+
[
result_name
+
"@const_2"
],
outputs
=
outputs_gather_scores_dim1
,
axis
=
0
)
node_list
.
append
(
node_gather_scores_dim1
)
# mul class * M
outputs_mul_classnum_boxnum
=
[
result_name
+
"@mul_classnum_boxnum"
]
node_mul_classnum_boxnum
=
onnx
.
helper
.
make_node
(
'Mul'
,
inputs
=
outputs_gather_1_nonzero
+
outputs_gather_scores_dim1
,
outputs
=
outputs_mul_classnum_boxnum
)
node_list
.
append
(
node_mul_classnum_boxnum
)
# add class * M * index
outputs_add_class_M_index
=
[
result_name
+
"@add_class_M_index"
]
node_add_class_M_index
=
onnx
.
helper
.
make_node
(
'Add'
,
inputs
=
outputs_mul_classnum_boxnum
+
outputs_gather_2_nonzero
,
outputs
=
outputs_add_class_M_index
)
node_list
.
append
(
node_add_class_M_index
)
# Squeeze the indices to 1 dim
outputs_squeeze_select_index
=
[
result_name
+
"@squeeze_select_index"
]
node_squeeze_select_index
=
onnx
.
helper
.
make_node
(
'Squeeze'
,
inputs
=
outputs_add_class_M_index
,
outputs
=
outputs_squeeze_select_index
,
axes
=
[
0
,
2
])
node_list
.
append
(
node_squeeze_select_index
)
# gather the data from flatten scores
outputs_gather_select_scores
=
[
result_name
+
"@gather_select_scores"
]
node_gather_select_scores
=
onnx
.
helper
.
make_node
(
'Gather'
,
inputs
=
outputs_reshape_scores_rank1
+
\
outputs_squeeze_select_index
,
outputs
=
outputs_gather_select_scores
,
axis
=
0
)
node_list
.
append
(
node_gather_select_scores
)
# get nums to input TopK
outputs_shape_select_num
=
[
result_name
+
"@shape_select_num"
]
node_shape_select_num
=
onnx
.
helper
.
make_node
(
'Shape'
,
inputs
=
outputs_gather_select_scores
,
outputs
=
outputs_shape_select_num
)
node_list
.
append
(
node_shape_select_num
)
outputs_gather_select_num
=
[
result_name
+
"@gather_select_num"
]
node_gather_select_num
=
onnx
.
helper
.
make_node
(
'Gather'
,
inputs
=
outputs_shape_select_num
+
[
result_name
+
"@const_0"
],
outputs
=
outputs_gather_select_num
,
axis
=
0
)
node_list
.
append
(
node_gather_select_num
)
outputs_unsqueeze_select_num
=
[
result_name
+
"@unsqueeze_select_num"
]
node_unsqueeze_select_num
=
onnx
.
helper
.
make_node
(
'Unsqueeze'
,
inputs
=
outputs_gather_select_num
,
outputs
=
outputs_unsqueeze_select_num
,
axes
=
[
0
])
node_list
.
append
(
node_unsqueeze_select_num
)
outputs_concat_topK_select_num
=
[
result_name
+
"@conat_topK_select_num"
]
node_conat_topK_select_num
=
onnx
.
helper
.
make_node
(
'Concat'
,
inputs
=
outputs_unsqueeze_select_num
+
name_keep_top_k_2D
,
outputs
=
outputs_concat_topK_select_num
,
axis
=
0
)
node_list
.
append
(
node_conat_topK_select_num
)
outputs_cast_concat_topK_select_num
=
[
result_name
+
"@concat_topK_select_num"
]
node_outputs_cast_concat_topK_select_num
=
onnx
.
helper
.
make_node
(
'Cast'
,
inputs
=
outputs_concat_topK_select_num
,
outputs
=
outputs_cast_concat_topK_select_num
,
to
=
6
)
node_list
.
append
(
node_outputs_cast_concat_topK_select_num
)
# get min(topK, num_select)
outputs_compare_topk_num_select
=
[
result_name
+
"@compare_topk_num_select"
]
node_compare_topk_num_select
=
onnx
.
helper
.
make_node
(
'ReduceMin'
,
inputs
=
outputs_cast_concat_topK_select_num
,
outputs
=
outputs_compare_topk_num_select
,
keepdims
=
0
)
node_list
.
append
(
node_compare_topk_num_select
)
# unsqueeze the indices to 1D tensor
outputs_unsqueeze_topk_select_indices
=
[
result_name
+
"@unsqueeze_topk_select_indices"
]
node_unsqueeze_topk_select_indices
=
onnx
.
helper
.
make_node
(
'Unsqueeze'
,
inputs
=
outputs_compare_topk_num_select
,
outputs
=
outputs_unsqueeze_topk_select_indices
,
axes
=
[
0
])
node_list
.
append
(
node_unsqueeze_topk_select_indices
)
# cast the indices to INT64
outputs_cast_topk_indices
=
[
result_name
+
"@cast_topk_indices"
]
node_cast_topk_indices
=
onnx
.
helper
.
make_node
(
'Cast'
,
inputs
=
outputs_unsqueeze_topk_select_indices
,
outputs
=
outputs_cast_topk_indices
,
to
=
7
)
node_list
.
append
(
node_cast_topk_indices
)
# select topk scores indices
outputs_topk_select_topk_indices
=
[
result_name
+
"@topk_select_topk_values"
,
\
result_name
+
"@topk_select_topk_indices"
]
node_topk_select_topk_indices
=
onnx
.
helper
.
make_node
(
'TopK'
,
inputs
=
outputs_gather_select_scores
+
outputs_cast_topk_indices
,
outputs
=
outputs_topk_select_topk_indices
)
node_list
.
append
(
node_topk_select_topk_indices
)
# gather topk label, scores, boxes
outputs_gather_topk_scores
=
[
result_name
+
"@gather_topk_scores"
]
node_gather_topk_scores
=
onnx
.
helper
.
make_node
(
'Gather'
,
inputs
=
outputs_gather_select_scores
+
[
outputs_topk_select_topk_indices
[
1
]],
outputs
=
outputs_gather_topk_scores
,
axis
=
0
)
node_list
.
append
(
node_gather_topk_scores
)
outputs_gather_topk_class
=
[
result_name
+
"@gather_topk_class"
]
node_gather_topk_class
=
onnx
.
helper
.
make_node
(
'Gather'
,
inputs
=
outputs_gather_1_nonzero
+
[
outputs_topk_select_topk_indices
[
1
]],
outputs
=
outputs_gather_topk_class
,
axis
=
1
)
node_list
.
append
(
node_gather_topk_class
)
# gather the boxes need to gather the boxes id, then get boxes
outputs_gather_topk_boxes_id
=
[
result_name
+
"@gather_topk_boxes_id"
]
node_gather_topk_boxes_id
=
onnx
.
helper
.
make_node
(
'Gather'
,
inputs
=
outputs_gather_2_nonzero
+
[
outputs_topk_select_topk_indices
[
1
]],
outputs
=
outputs_gather_topk_boxes_id
,
axis
=
1
)
node_list
.
append
(
node_gather_topk_boxes_id
)
# squeeze the gather_topk_boxes_id to 1 dim
outputs_squeeze_topk_boxes_id
=
[
result_name
+
"@squeeze_topk_boxes_id"
]
node_squeeze_topk_boxes_id
=
onnx
.
helper
.
make_node
(
'Squeeze'
,
inputs
=
outputs_gather_topk_boxes_id
,
outputs
=
outputs_squeeze_topk_boxes_id
,
axes
=
[
0
,
2
])
node_list
.
append
(
node_squeeze_topk_boxes_id
)
outputs_gather_select_boxes
=
[
result_name
+
"@gather_select_boxes"
]
node_gather_select_boxes
=
onnx
.
helper
.
make_node
(
'Gather'
,
inputs
=
inputs
[
'BBoxes'
]
+
outputs_squeeze_topk_boxes_id
,
outputs
=
outputs_gather_select_boxes
,
axis
=
1
)
node_list
.
append
(
node_gather_select_boxes
)
# concat the final result
# before concat need to cast the class to float
outputs_cast_topk_class
=
[
result_name
+
"@cast_topk_class"
]
node_cast_topk_class
=
onnx
.
helper
.
make_node
(
'Cast'
,
inputs
=
outputs_gather_topk_class
,
outputs
=
outputs_cast_topk_class
,
to
=
1
)
node_list
.
append
(
node_cast_topk_class
)
outputs_unsqueeze_topk_scores
=
[
result_name
+
"@unsqueeze_topk_scores"
]
node_unsqueeze_topk_scores
=
onnx
.
helper
.
make_node
(
'Unsqueeze'
,
inputs
=
outputs_gather_topk_scores
,
outputs
=
outputs_unsqueeze_topk_scores
,
axes
=
[
0
,
2
])
node_list
.
append
(
node_unsqueeze_topk_scores
)
inputs_concat_final_results
=
outputs_cast_topk_class
+
outputs_unsqueeze_topk_scores
+
\
outputs_gather_select_boxes
outputs_sort_by_socre_results
=
[
result_name
+
"@concat_topk_scores"
]
node_sort_by_socre_results
=
onnx
.
helper
.
make_node
(
'Concat'
,
inputs
=
inputs_concat_final_results
,
outputs
=
outputs_sort_by_socre_results
,
axis
=
2
)
node_list
.
append
(
node_sort_by_socre_results
)
# select topk classes indices
outputs_squeeze_cast_topk_class
=
[
result_name
+
"@squeeze_cast_topk_class"
]
node_squeeze_cast_topk_class
=
onnx
.
helper
.
make_node
(
'Squeeze'
,
inputs
=
outputs_cast_topk_class
,
outputs
=
outputs_squeeze_cast_topk_class
,
axes
=
[
0
,
2
])
node_list
.
append
(
node_squeeze_cast_topk_class
)
outputs_neg_squeeze_cast_topk_class
=
[
result_name
+
"@neg_squeeze_cast_topk_class"
]
node_neg_squeeze_cast_topk_class
=
onnx
.
helper
.
make_node
(
'Neg'
,
inputs
=
outputs_squeeze_cast_topk_class
,
outputs
=
outputs_neg_squeeze_cast_topk_class
)
node_list
.
append
(
node_neg_squeeze_cast_topk_class
)
outputs_topk_select_classes_indices
=
[
result_name
+
"@topk_select_topk_classes_scores"
,
\
result_name
+
"@topk_select_topk_classes_indices"
]
node_topk_select_topk_indices
=
onnx
.
helper
.
make_node
(
'TopK'
,
inputs
=
outputs_neg_squeeze_cast_topk_class
+
outputs_cast_topk_indices
,
outputs
=
outputs_topk_select_classes_indices
)
node_list
.
append
(
node_topk_select_topk_indices
)
outputs_concat_final_results
=
outputs
[
'Out'
]
node_concat_final_results
=
onnx
.
helper
.
make_node
(
'Gather'
,
inputs
=
outputs_sort_by_socre_results
+
[
outputs_topk_select_classes_indices
[
1
]],
outputs
=
outputs_concat_final_results
,
axis
=
1
)
node_list
.
append
(
node_concat_final_results
)
return
node_list
x2paddle/op_mapper/paddle2onnx/opset9/paddle_custom_layer/yolo_box.py
0 → 100644
浏览文件 @
22c18dd4
import
onnx
import
numpy
as
np
from
onnx
import
onnx_pb
,
helper
def
get_old_name
(
arg
,
name_prefix
=
''
):
prefix_index
=
arg
.
find
(
name_prefix
)
if
prefix_index
!=
-
1
:
last_prefix
=
arg
[
len
(
name_prefix
):]
else
:
last_prefix
=
arg
idx
=
last_prefix
.
find
(
'@'
)
if
idx
!=
-
1
:
last_prefix
=
last_prefix
[:
idx
]
return
name_prefix
+
last_prefix
def
yolo_box
(
op
,
block
):
inputs
=
dict
()
outputs
=
dict
()
attrs
=
dict
()
for
name
in
op
.
input_names
:
inputs
[
name
]
=
op
.
input
(
name
)
for
name
in
op
.
output_names
:
outputs
[
name
]
=
op
.
output
(
name
)
for
name
in
op
.
attr_names
:
attrs
[
name
]
=
op
.
attr
(
name
)
model_name
=
outputs
[
'Boxes'
][
0
]
input_shape
=
block
.
vars
[
get_old_name
(
inputs
[
'X'
][
0
])].
shape
image_size
=
inputs
[
'ImgSize'
]
input_height
=
input_shape
[
2
]
input_width
=
input_shape
[
3
]
class_num
=
attrs
[
'class_num'
]
anchors
=
attrs
[
'anchors'
]
num_anchors
=
int
(
len
(
anchors
))
//
2
downsample_ratio
=
attrs
[
'downsample_ratio'
]
input_size
=
input_height
*
downsample_ratio
conf_thresh
=
attrs
[
'conf_thresh'
]
conf_thresh_mat
=
np
.
ones
([
num_anchors
*
input_height
*
input_width
])
*
conf_thresh
node_list
=
[]
im_outputs
=
[]
x_shape
=
[
1
,
num_anchors
,
5
+
class_num
,
input_height
,
input_width
]
name_x_shape
=
[
model_name
+
"@x_shape"
]
node_x_shape
=
onnx
.
helper
.
make_node
(
'Constant'
,
inputs
=
[],
outputs
=
name_x_shape
,
value
=
onnx
.
helper
.
make_tensor
(
name
=
name_x_shape
[
0
]
+
"@const"
,
data_type
=
onnx
.
TensorProto
.
INT64
,
dims
=
[
5
],
vals
=
x_shape
))
node_list
.
append
(
node_x_shape
)
outputs_x_reshape
=
[
model_name
+
"@reshape"
]
node_x_reshape
=
onnx
.
helper
.
make_node
(
'Reshape'
,
inputs
=
inputs
[
'X'
]
+
name_x_shape
,
outputs
=
outputs_x_reshape
)
node_list
.
append
(
node_x_reshape
)
outputs_x_transpose
=
[
model_name
+
"@x_transpose"
]
node_x_transpose
=
onnx
.
helper
.
make_node
(
'Transpose'
,
inputs
=
outputs_x_reshape
,
outputs
=
outputs_x_transpose
,
perm
=
[
0
,
1
,
3
,
4
,
2
])
node_list
.
append
(
node_x_transpose
)
range_x
=
[]
range_y
=
[]
for
i
in
range
(
0
,
input_width
):
range_x
.
append
(
i
)
for
j
in
range
(
0
,
input_height
):
range_y
.
append
(
j
)
name_range_x
=
[
model_name
+
"@range_x"
]
node_range_x
=
onnx
.
helper
.
make_node
(
'Constant'
,
inputs
=
[],
outputs
=
name_range_x
,
value
=
onnx
.
helper
.
make_tensor
(
name
=
name_range_x
[
0
]
+
"@const"
,
data_type
=
onnx
.
TensorProto
.
FLOAT
,
dims
=
[
input_width
],
vals
=
range_x
))
node_list
.
append
(
node_range_x
)
name_range_y
=
[
model_name
+
"@range_y"
]
node_range_y
=
onnx
.
helper
.
make_node
(
'Constant'
,
inputs
=
[],
outputs
=
name_range_y
,
value
=
onnx
.
helper
.
make_tensor
(
name
=
name_range_y
[
0
]
+
"@const"
,
data_type
=
onnx
.
TensorProto
.
FLOAT
,
dims
=
[
input_height
],
vals
=
range_y
))
node_list
.
append
(
node_range_y
)
range_x_new_shape
=
[
1
,
input_width
]
range_y_new_shape
=
[
input_height
,
1
]
name_range_x_new_shape
=
[
model_name
+
"@range_x_new_shape"
]
node_range_x_new_shape
=
onnx
.
helper
.
make_node
(
'Constant'
,
inputs
=
[],
outputs
=
name_range_x_new_shape
,
value
=
onnx
.
helper
.
make_tensor
(
name
=
name_range_x_new_shape
[
0
]
+
"@const"
,
data_type
=
onnx
.
TensorProto
.
INT64
,
dims
=
[
len
(
range_x_new_shape
)],
vals
=
range_x_new_shape
))
node_list
.
append
(
node_range_x_new_shape
)
name_range_y_new_shape
=
[
model_name
+
"@range_y_new_shape"
]
node_range_y_new_shape
=
onnx
.
helper
.
make_node
(
'Constant'
,
inputs
=
[],
outputs
=
name_range_y_new_shape
,
value
=
onnx
.
helper
.
make_tensor
(
name
=
name_range_y_new_shape
[
0
]
+
"@const"
,
data_type
=
onnx
.
TensorProto
.
INT64
,
dims
=
[
len
(
range_y_new_shape
)],
vals
=
range_y_new_shape
))
node_list
.
append
(
node_range_y_new_shape
)
outputs_range_x_reshape
=
[
model_name
+
"@range_x_reshape"
]
node_range_x_reshape
=
onnx
.
helper
.
make_node
(
'Reshape'
,
inputs
=
name_range_x
+
name_range_x_new_shape
,
outputs
=
outputs_range_x_reshape
)
node_list
.
append
(
node_range_x_reshape
)
outputs_range_y_reshape
=
[
model_name
+
"@range_y_reshape"
]
node_range_y_reshape
=
onnx
.
helper
.
make_node
(
'Reshape'
,
inputs
=
name_range_y
+
name_range_y_new_shape
,
outputs
=
outputs_range_y_reshape
)
node_list
.
append
(
node_range_y_reshape
)
outputs_grid_x
=
[
model_name
+
"@grid_x"
]
node_grid_x
=
onnx
.
helper
.
make_node
(
"Tile"
,
inputs
=
outputs_range_x_reshape
+
name_range_y_new_shape
,
outputs
=
outputs_grid_x
)
node_list
.
append
(
node_grid_x
)
outputs_grid_y
=
[
model_name
+
"@grid_y"
]
node_grid_y
=
onnx
.
helper
.
make_node
(
"Tile"
,
inputs
=
outputs_range_y_reshape
+
name_range_x_new_shape
,
outputs
=
outputs_grid_y
)
node_list
.
append
(
node_grid_y
)
outputs_box_x
=
[
model_name
+
"@box_x"
]
outputs_box_y
=
[
model_name
+
"@box_y"
]
outputs_box_w
=
[
model_name
+
"@box_w"
]
outputs_box_h
=
[
model_name
+
"@box_h"
]
outputs_conf
=
[
model_name
+
"@conf"
]
outputs_prob
=
[
model_name
+
"@prob"
]
node_split_input
=
onnx
.
helper
.
make_node
(
"Split"
,
inputs
=
outputs_x_transpose
,
outputs
=
outputs_box_x
+
outputs_box_y
+
outputs_box_w
\
+
outputs_box_h
+
outputs_conf
+
outputs_prob
,
axis
=-
1
,
split
=
[
1
,
1
,
1
,
1
,
1
,
class_num
])
node_list
.
append
(
node_split_input
)
outputs_box_x_sigmoid
=
[
model_name
+
"@box_x_sigmoid"
]
outputs_box_y_sigmoid
=
[
model_name
+
"@box_y_sigmoid"
]
node_box_x_sigmoid
=
onnx
.
helper
.
make_node
(
"Sigmoid"
,
inputs
=
outputs_box_x
,
outputs
=
outputs_box_x_sigmoid
)
node_list
.
append
(
node_box_x_sigmoid
)
node_box_y_sigmoid
=
onnx
.
helper
.
make_node
(
"Sigmoid"
,
inputs
=
outputs_box_y
,
outputs
=
outputs_box_y_sigmoid
)
node_list
.
append
(
node_box_y_sigmoid
)
outputs_box_x_squeeze
=
[
model_name
+
"@box_x_squeeze"
]
outputs_box_y_squeeze
=
[
model_name
+
"@box_y_squeeze"
]
node_box_x_squeeze
=
onnx
.
helper
.
make_node
(
'Squeeze'
,
inputs
=
outputs_box_x_sigmoid
,
outputs
=
outputs_box_x_squeeze
,
axes
=
[
4
])
node_list
.
append
(
node_box_x_squeeze
)
node_box_y_squeeze
=
onnx
.
helper
.
make_node
(
'Squeeze'
,
inputs
=
outputs_box_y_sigmoid
,
outputs
=
outputs_box_y_squeeze
,
axes
=
[
4
])
node_list
.
append
(
node_box_y_squeeze
)
outputs_box_x_add_grid
=
[
model_name
+
"@box_x_add_grid"
]
outputs_box_y_add_grid
=
[
model_name
+
"@box_y_add_grid"
]
node_box_x_add_grid
=
onnx
.
helper
.
make_node
(
"Add"
,
inputs
=
outputs_grid_x
+
outputs_box_x_squeeze
,
outputs
=
outputs_box_x_add_grid
)
node_list
.
append
(
node_box_x_add_grid
)
node_box_y_add_grid
=
onnx
.
helper
.
make_node
(
"Add"
,
inputs
=
outputs_grid_y
+
outputs_box_y_squeeze
,
outputs
=
outputs_box_y_add_grid
)
node_list
.
append
(
node_box_y_add_grid
)
name_input_h
=
[
model_name
+
"@input_h"
]
name_input_w
=
[
model_name
+
"@input_w"
]
node_input_h
=
onnx
.
helper
.
make_node
(
'Constant'
,
inputs
=
[],
outputs
=
name_input_h
,
value
=
onnx
.
helper
.
make_tensor
(
name
=
name_input_w
[
0
]
+
"@const"
,
data_type
=
onnx
.
TensorProto
.
FLOAT
,
dims
=
(),
vals
=
[
input_height
]))
node_list
.
append
(
node_input_h
)
node_input_w
=
onnx
.
helper
.
make_node
(
'Constant'
,
inputs
=
[],
outputs
=
name_input_w
,
value
=
onnx
.
helper
.
make_tensor
(
name
=
name_input_w
[
0
]
+
"@const"
,
data_type
=
onnx
.
TensorProto
.
FLOAT
,
dims
=
(),
vals
=
[
input_width
]))
node_list
.
append
(
node_input_w
)
outputs_box_x_encode
=
[
model_name
+
"@box_x_encode"
]
outputs_box_y_encode
=
[
model_name
+
"@box_y_encode"
]
node_box_x_encode
=
onnx
.
helper
.
make_node
(
'Div'
,
inputs
=
outputs_box_x_add_grid
+
name_input_w
,
outputs
=
outputs_box_x_encode
)
node_list
.
append
(
node_box_x_encode
)
node_box_y_encode
=
onnx
.
helper
.
make_node
(
'Div'
,
inputs
=
outputs_box_y_add_grid
+
name_input_h
,
outputs
=
outputs_box_y_encode
)
node_list
.
append
(
node_box_y_encode
)
name_anchor_tensor
=
[
model_name
+
"@anchor_tensor"
]
node_anchor_tensor
=
onnx
.
helper
.
make_node
(
"Constant"
,
inputs
=
[],
outputs
=
name_anchor_tensor
,
value
=
onnx
.
helper
.
make_tensor
(
name
=
name_anchor_tensor
[
0
]
+
"@const"
,
data_type
=
onnx
.
TensorProto
.
FLOAT
,
dims
=
[
len
(
anchors
)],
vals
=
anchors
))
node_list
.
append
(
node_anchor_tensor
)
anchor_shape
=
[
int
(
num_anchors
),
2
]
name_anchor_shape
=
[
model_name
+
"@anchor_shape"
]
node_anchor_shape
=
onnx
.
helper
.
make_node
(
"Constant"
,
inputs
=
[],
outputs
=
name_anchor_shape
,
value
=
onnx
.
helper
.
make_tensor
(
name
=
name_anchor_shape
[
0
]
+
"@const"
,
data_type
=
onnx
.
TensorProto
.
INT64
,
dims
=
[
2
],
vals
=
anchor_shape
))
node_list
.
append
(
node_anchor_shape
)
outputs_anchor_tensor_reshape
=
[
model_name
+
"@anchor_tensor_reshape"
]
node_anchor_tensor_reshape
=
onnx
.
helper
.
make_node
(
"Reshape"
,
inputs
=
name_anchor_tensor
+
name_anchor_shape
,
outputs
=
outputs_anchor_tensor_reshape
)
node_list
.
append
(
node_anchor_tensor_reshape
)
name_input_size
=
[
model_name
+
"@input_size"
]
node_input_size
=
onnx
.
helper
.
make_node
(
"Constant"
,
inputs
=
[],
outputs
=
name_input_size
,
value
=
onnx
.
helper
.
make_tensor
(
name
=
name_input_size
[
0
]
+
"@const"
,
data_type
=
onnx
.
TensorProto
.
FLOAT
,
dims
=
(),
vals
=
[
input_size
]))
node_list
.
append
(
node_input_size
)
outputs_anchors_div_input_size
=
[
model_name
+
"@anchors_div_input_size"
]
node_anchors_div_input_size
=
onnx
.
helper
.
make_node
(
"Div"
,
inputs
=
outputs_anchor_tensor_reshape
+
name_input_size
,
outputs
=
outputs_anchors_div_input_size
)
node_list
.
append
(
node_anchors_div_input_size
)
outputs_anchor_w
=
[
model_name
+
"@anchor_w"
]
outputs_anchor_h
=
[
model_name
+
"@anchor_h"
]
node_anchor_split
=
onnx
.
helper
.
make_node
(
'Split'
,
inputs
=
outputs_anchors_div_input_size
,
outputs
=
outputs_anchor_w
+
outputs_anchor_h
,
axis
=
1
,
split
=
[
1
,
1
])
node_list
.
append
(
node_anchor_split
)
new_anchor_shape
=
[
1
,
int
(
num_anchors
),
1
,
1
]
name_new_anchor_shape
=
[
model_name
+
"@new_anchor_shape"
]
node_new_anchor_shape
=
onnx
.
helper
.
make_node
(
'Constant'
,
inputs
=
[],
outputs
=
name_new_anchor_shape
,
value
=
onnx
.
helper
.
make_tensor
(
name
=
name_new_anchor_shape
[
0
]
+
"@const"
,
data_type
=
onnx
.
TensorProto
.
INT64
,
dims
=
[
len
(
new_anchor_shape
)],
vals
=
new_anchor_shape
))
node_list
.
append
(
node_new_anchor_shape
)
outputs_anchor_w_reshape
=
[
model_name
+
"@anchor_w_reshape"
]
outputs_anchor_h_reshape
=
[
model_name
+
"@anchor_h_reshape"
]
node_anchor_w_reshape
=
onnx
.
helper
.
make_node
(
'Reshape'
,
inputs
=
outputs_anchor_w
+
name_new_anchor_shape
,
outputs
=
outputs_anchor_w_reshape
)
node_list
.
append
(
node_anchor_w_reshape
)
node_anchor_h_reshape
=
onnx
.
helper
.
make_node
(
'Reshape'
,
inputs
=
outputs_anchor_h
+
name_new_anchor_shape
,
outputs
=
outputs_anchor_h_reshape
)
node_list
.
append
(
node_anchor_h_reshape
)
outputs_box_w_squeeze
=
[
model_name
+
"@box_w_squeeze"
]
node_box_w_squeeze
=
onnx
.
helper
.
make_node
(
'Squeeze'
,
inputs
=
outputs_box_w
,
outputs
=
outputs_box_w_squeeze
,
axes
=
[
4
])
node_list
.
append
(
node_box_w_squeeze
)
outputs_box_h_squeeze
=
[
model_name
+
"@box_h_squeeze"
]
node_box_h_squeeze
=
onnx
.
helper
.
make_node
(
'Squeeze'
,
inputs
=
outputs_box_h
,
outputs
=
outputs_box_h_squeeze
,
axes
=
[
4
])
node_list
.
append
(
node_box_h_squeeze
)
outputs_box_w_exp
=
[
model_name
+
"@box_w_exp"
]
node_box_w_exp
=
onnx
.
helper
.
make_node
(
"Exp"
,
inputs
=
outputs_box_w_squeeze
,
outputs
=
outputs_box_w_exp
)
node_list
.
append
(
node_box_w_exp
)
outputs_box_h_exp
=
[
model_name
+
"@box_h_exp"
]
node_box_h_exp
=
onnx
.
helper
.
make_node
(
"Exp"
,
inputs
=
outputs_box_h_squeeze
,
outputs
=
outputs_box_h_exp
)
node_list
.
append
(
node_box_h_exp
)
outputs_box_w_encode
=
[
model_name
+
"box_w_encode"
]
outputs_box_h_encode
=
[
model_name
+
"box_h_encode"
]
node_box_w_encode
=
onnx
.
helper
.
make_node
(
'Mul'
,
inputs
=
outputs_box_w_exp
+
outputs_anchor_w_reshape
,
outputs
=
outputs_box_w_encode
)
node_list
.
append
(
node_box_w_encode
)
node_box_h_encode
=
onnx
.
helper
.
make_node
(
'Mul'
,
inputs
=
outputs_box_h_exp
+
outputs_anchor_h_reshape
,
outputs
=
outputs_box_h_encode
)
node_list
.
append
(
node_box_h_encode
)
outputs_conf_sigmoid
=
[
model_name
+
"@conf_sigmoid"
]
node_conf_sigmoid
=
onnx
.
helper
.
make_node
(
'Sigmoid'
,
inputs
=
outputs_conf
,
outputs
=
outputs_conf_sigmoid
)
node_list
.
append
(
node_conf_sigmoid
)
name_conf_thresh
=
[
model_name
+
"@conf_thresh"
]
node_conf_thresh
=
onnx
.
helper
.
make_node
(
'Constant'
,
inputs
=
[],
outputs
=
name_conf_thresh
,
value
=
onnx
.
helper
.
make_tensor
(
name
=
name_conf_thresh
[
0
]
+
"@const"
,
data_type
=
onnx
.
TensorProto
.
FLOAT
,
dims
=
[
num_anchors
*
input_height
*
input_width
],
vals
=
conf_thresh_mat
))
node_list
.
append
(
node_conf_thresh
)
conf_shape
=
[
1
,
int
(
num_anchors
),
input_height
,
input_width
,
1
]
name_conf_shape
=
[
model_name
+
"@conf_shape"
]
node_conf_shape
=
onnx
.
helper
.
make_node
(
'Constant'
,
inputs
=
[],
outputs
=
name_conf_shape
,
value
=
onnx
.
helper
.
make_tensor
(
name
=
name_conf_shape
[
0
]
+
"@const"
,
data_type
=
onnx
.
TensorProto
.
INT64
,
dims
=
[
len
(
conf_shape
)],
vals
=
conf_shape
))
node_list
.
append
(
node_conf_shape
)
outputs_conf_thresh_reshape
=
[
model_name
+
"@conf_thresh_reshape"
]
node_conf_thresh_reshape
=
onnx
.
helper
.
make_node
(
'Reshape'
,
inputs
=
name_conf_thresh
+
name_conf_shape
,
outputs
=
outputs_conf_thresh_reshape
)
node_list
.
append
(
node_conf_thresh_reshape
)
outputs_conf_sub
=
[
model_name
+
"@conf_sub"
]
node_conf_sub
=
onnx
.
helper
.
make_node
(
'Sub'
,
inputs
=
outputs_conf_sigmoid
+
outputs_conf_thresh_reshape
,
outputs
=
outputs_conf_sub
)
node_list
.
append
(
node_conf_sub
)
outputs_conf_clip
=
[
model_name
+
"@conf_clip"
]
node_conf_clip
=
onnx
.
helper
.
make_node
(
'Clip'
,
inputs
=
outputs_conf_sub
,
outputs
=
outputs_conf_clip
)
node_list
.
append
(
node_conf_clip
)
zeros
=
[
0
]
name_zeros
=
[
model_name
+
"@zeros"
]
node_zeros
=
onnx
.
helper
.
make_node
(
'Constant'
,
inputs
=
[],
outputs
=
name_zeros
,
value
=
onnx
.
helper
.
make_tensor
(
name
=
name_zeros
[
0
]
+
"@const"
,
data_type
=
onnx
.
TensorProto
.
FLOAT
,
dims
=
(),
vals
=
zeros
))
node_list
.
append
(
node_zeros
)
outputs_conf_clip_bool
=
[
model_name
+
"@conf_clip_bool"
]
node_conf_clip_bool
=
onnx
.
helper
.
make_node
(
'Greater'
,
inputs
=
outputs_conf_clip
+
name_zeros
,
outputs
=
outputs_conf_clip_bool
)
node_list
.
append
(
node_conf_clip_bool
)
outputs_conf_clip_cast
=
[
model_name
+
"@conf_clip_cast"
]
node_conf_clip_cast
=
onnx
.
helper
.
make_node
(
'Cast'
,
inputs
=
outputs_conf_clip_bool
,
outputs
=
outputs_conf_clip_cast
,
to
=
1
)
node_list
.
append
(
node_conf_clip_cast
)
outputs_conf_set_zero
=
[
model_name
+
"@conf_set_zero"
]
node_conf_set_zero
=
onnx
.
helper
.
make_node
(
'Mul'
,
inputs
=
outputs_conf_sigmoid
+
outputs_conf_clip_cast
,
outputs
=
outputs_conf_set_zero
)
node_list
.
append
(
node_conf_set_zero
)
outputs_prob_sigmoid
=
[
model_name
+
"@prob_sigmoid"
]
node_prob_sigmoid
=
onnx
.
helper
.
make_node
(
'Sigmoid'
,
inputs
=
outputs_prob
,
outputs
=
outputs_prob_sigmoid
)
node_list
.
append
(
node_prob_sigmoid
)
new_shape
=
[
1
,
int
(
num_anchors
),
input_height
,
input_width
,
1
]
name_new_shape
=
[
model_name
+
"@new_shape"
]
node_new_shape
=
onnx
.
helper
.
make_node
(
'Constant'
,
inputs
=
[],
outputs
=
name_new_shape
,
value
=
onnx
.
helper
.
make_tensor
(
name
=
name_new_shape
[
0
]
+
"@const"
,
data_type
=
onnx
.
TensorProto
.
INT64
,
dims
=
[
len
(
new_shape
)],
vals
=
new_shape
))
node_list
.
append
(
node_new_shape
)
outputs_conf_new_shape
=
[
model_name
+
"@_conf_new_shape"
]
node_conf_new_shape
=
onnx
.
helper
.
make_node
(
'Reshape'
,
inputs
=
outputs_conf_set_zero
+
name_new_shape
,
outputs
=
outputs_conf_new_shape
)
node_list
.
append
(
node_conf_new_shape
)
outputs_score
=
[
model_name
+
"@score"
]
node_score
=
onnx
.
helper
.
make_node
(
'Mul'
,
inputs
=
outputs_prob_sigmoid
+
outputs_conf_new_shape
,
outputs
=
outputs_score
)
node_list
.
append
(
node_score
)
outputs_conf_bool
=
[
model_name
+
"@conf_bool"
]
node_conf_bool
=
onnx
.
helper
.
make_node
(
'Greater'
,
inputs
=
outputs_conf_new_shape
+
name_zeros
,
outputs
=
outputs_conf_bool
)
node_list
.
append
(
node_conf_bool
)
outputs_box_x_new_shape
=
[
model_name
+
"@box_x_new_shape"
]
node_box_x_new_shape
=
onnx
.
helper
.
make_node
(
'Reshape'
,
inputs
=
outputs_box_x_encode
+
name_new_shape
,
outputs
=
outputs_box_x_new_shape
)
node_list
.
append
(
node_box_x_new_shape
)
outputs_box_y_new_shape
=
[
model_name
+
"@box_y_new_shape"
]
node_box_y_new_shape
=
onnx
.
helper
.
make_node
(
'Reshape'
,
inputs
=
outputs_box_y_encode
+
name_new_shape
,
outputs
=
outputs_box_y_new_shape
)
node_list
.
append
(
node_box_y_new_shape
)
outputs_box_w_new_shape
=
[
model_name
+
"@box_w_new_shape"
]
node_box_w_new_shape
=
onnx
.
helper
.
make_node
(
'Reshape'
,
inputs
=
outputs_box_w_encode
+
name_new_shape
,
outputs
=
outputs_box_w_new_shape
)
node_list
.
append
(
node_box_w_new_shape
)
outputs_box_h_new_shape
=
[
model_name
+
"@box_h_new_shape"
]
node_box_h_new_shape
=
onnx
.
helper
.
make_node
(
'Reshape'
,
inputs
=
outputs_box_h_encode
+
name_new_shape
,
outputs
=
outputs_box_h_new_shape
)
node_list
.
append
(
node_box_h_new_shape
)
outputs_pred_box
=
[
model_name
+
"@pred_box"
]
node_pred_box
=
onnx
.
helper
.
make_node
(
'Concat'
,
inputs
=
outputs_box_x_new_shape
+
outputs_box_y_new_shape
+
\
outputs_box_w_new_shape
+
outputs_box_h_new_shape
,
outputs
=
outputs_pred_box
,
axis
=
4
)
node_list
.
append
(
node_pred_box
)
outputs_conf_cast
=
[
model_name
+
"conf_cast"
]
node_conf_cast
=
onnx
.
helper
.
make_node
(
'Cast'
,
inputs
=
outputs_conf_bool
,
outputs
=
outputs_conf_cast
,
to
=
1
)
node_list
.
append
(
node_conf_cast
)
outputs_pred_box_mul_conf
=
[
model_name
+
"@pred_box_mul_conf"
]
node_pred_box_mul_conf
=
onnx
.
helper
.
make_node
(
'Mul'
,
inputs
=
outputs_pred_box
+
outputs_conf_cast
,
outputs
=
outputs_pred_box_mul_conf
)
node_list
.
append
(
node_pred_box_mul_conf
)
box_shape
=
[
1
,
int
(
num_anchors
)
*
input_height
*
input_width
,
4
]
name_box_shape
=
[
model_name
+
"@box_shape"
]
node_box_shape
=
onnx
.
helper
.
make_node
(
'Constant'
,
inputs
=
[],
outputs
=
name_box_shape
,
value
=
onnx
.
helper
.
make_tensor
(
name
=
name_box_shape
[
0
]
+
"@const"
,
data_type
=
onnx
.
TensorProto
.
INT64
,
dims
=
[
len
(
box_shape
)],
vals
=
box_shape
))
node_list
.
append
(
node_box_shape
)
outputs_pred_box_new_shape
=
[
model_name
+
"@pred_box_new_shape"
]
node_pred_box_new_shape
=
onnx
.
helper
.
make_node
(
'Reshape'
,
inputs
=
outputs_pred_box_mul_conf
+
name_box_shape
,
outputs
=
outputs_pred_box_new_shape
)
node_list
.
append
(
node_pred_box_new_shape
)
outputs_pred_box_x
=
[
model_name
+
"@_pred_box_x"
]
outputs_pred_box_y
=
[
model_name
+
"@_pred_box_y"
]
outputs_pred_box_w
=
[
model_name
+
"@_pred_box_w"
]
outputs_pred_box_h
=
[
model_name
+
"@_pred_box_h"
]
node_pred_box_split
=
onnx
.
helper
.
make_node
(
'Split'
,
inputs
=
outputs_pred_box_new_shape
,
outputs
=
outputs_pred_box_x
+
outputs_pred_box_y
+
outputs_pred_box_w
+
outputs_pred_box_h
,
axis
=
2
)
node_list
.
append
(
node_pred_box_split
)
name_number_two
=
[
model_name
+
"@number_two"
]
node_number_two
=
onnx
.
helper
.
make_node
(
"Constant"
,
inputs
=
[],
outputs
=
name_number_two
,
value
=
onnx
.
helper
.
make_tensor
(
name
=
name_number_two
[
0
]
+
"@const"
,
data_type
=
onnx
.
TensorProto
.
FLOAT
,
dims
=
(),
vals
=
[
2
]))
node_list
.
append
(
node_number_two
)
outputs_half_w
=
[
model_name
+
"@half_w"
]
node_half_w
=
onnx
.
helper
.
make_node
(
"Div"
,
inputs
=
outputs_pred_box_w
+
name_number_two
,
outputs
=
outputs_half_w
)
node_list
.
append
(
node_half_w
)
outputs_half_h
=
[
model_name
+
"@half_h"
]
node_half_h
=
onnx
.
helper
.
make_node
(
"Div"
,
inputs
=
outputs_pred_box_h
+
name_number_two
,
outputs
=
outputs_half_h
)
node_list
.
append
(
node_half_h
)
outputs_pred_box_x1
=
[
model_name
+
"@pred_box_x1"
]
node_pred_box_x1
=
onnx
.
helper
.
make_node
(
'Sub'
,
inputs
=
outputs_pred_box_x
+
outputs_half_w
,
outputs
=
outputs_pred_box_x1
)
node_list
.
append
(
node_pred_box_x1
)
outputs_pred_box_y1
=
[
model_name
+
"@pred_box_y1"
]
node_pred_box_y1
=
onnx
.
helper
.
make_node
(
'Sub'
,
inputs
=
outputs_pred_box_y
+
outputs_half_h
,
outputs
=
outputs_pred_box_y1
)
node_list
.
append
(
node_pred_box_y1
)
outputs_pred_box_x2
=
[
model_name
+
"@pred_box_x2"
]
node_pred_box_x2
=
onnx
.
helper
.
make_node
(
'Add'
,
inputs
=
outputs_pred_box_x
+
outputs_half_w
,
outputs
=
outputs_pred_box_x2
)
node_list
.
append
(
node_pred_box_x2
)
outputs_pred_box_y2
=
[
model_name
+
"@pred_box_y2"
]
node_pred_box_y2
=
onnx
.
helper
.
make_node
(
'Add'
,
inputs
=
outputs_pred_box_y
+
outputs_half_h
,
outputs
=
outputs_pred_box_y2
)
node_list
.
append
(
node_pred_box_y2
)
outputs_sqeeze_image_size
=
[
model_name
+
"@sqeeze_image_size"
]
node_sqeeze_image_size
=
onnx
.
helper
.
make_node
(
"Squeeze"
,
axes
=
[
0
],
inputs
=
image_size
,
outputs
=
outputs_sqeeze_image_size
)
node_list
.
append
(
node_sqeeze_image_size
)
output_img_height
=
[
model_name
+
"@img_height"
]
output_img_width
=
[
model_name
+
"@img_width"
]
node_image_size_split
=
onnx
.
helper
.
make_node
(
"Split"
,
inputs
=
outputs_sqeeze_image_size
,
outputs
=
output_img_height
+
output_img_width
,
axis
=-
1
,
split
=
[
1
,
1
])
node_list
.
append
(
node_image_size_split
)
output_img_width_cast
=
[
model_name
+
"@img_width_cast"
]
node_img_width_cast
=
onnx
.
helper
.
make_node
(
'Cast'
,
inputs
=
output_img_width
,
outputs
=
output_img_width_cast
,
to
=
1
)
node_list
.
append
(
node_img_width_cast
)
output_img_height_cast
=
[
model_name
+
"@img_height_cast"
]
node_img_height_cast
=
onnx
.
helper
.
make_node
(
'Cast'
,
inputs
=
output_img_height
,
outputs
=
output_img_height_cast
,
to
=
1
)
node_list
.
append
(
node_img_height_cast
)
outputs_pred_box_x1_decode
=
[
model_name
+
"@pred_box_x1_decode"
]
outputs_pred_box_y1_decode
=
[
model_name
+
"@pred_box_y1_decode"
]
outputs_pred_box_x2_decode
=
[
model_name
+
"@pred_box_x2_decode"
]
outputs_pred_box_y2_decode
=
[
model_name
+
"@pred_box_y2_decode"
]
node_pred_box_x1_decode
=
onnx
.
helper
.
make_node
(
'Mul'
,
inputs
=
outputs_pred_box_x1
+
output_img_width_cast
,
outputs
=
outputs_pred_box_x1_decode
)
node_list
.
append
(
node_pred_box_x1_decode
)
node_pred_box_y1_decode
=
onnx
.
helper
.
make_node
(
'Mul'
,
inputs
=
outputs_pred_box_y1
+
output_img_height_cast
,
outputs
=
outputs_pred_box_y1_decode
)
node_list
.
append
(
node_pred_box_y1_decode
)
node_pred_box_x2_decode
=
onnx
.
helper
.
make_node
(
'Mul'
,
inputs
=
outputs_pred_box_x2
+
output_img_width_cast
,
outputs
=
outputs_pred_box_x2_decode
)
node_list
.
append
(
node_pred_box_x2_decode
)
node_pred_box_y2_decode
=
onnx
.
helper
.
make_node
(
'Mul'
,
inputs
=
outputs_pred_box_y2
+
output_img_height_cast
,
outputs
=
outputs_pred_box_y2_decode
)
node_list
.
append
(
node_pred_box_y2_decode
)
name_number_one
=
[
model_name
+
"@one"
]
node_number_one
=
onnx
.
helper
.
make_node
(
'Constant'
,
inputs
=
[],
outputs
=
name_number_one
,
value
=
onnx
.
helper
.
make_tensor
(
name
=
name_number_one
[
0
]
+
"@const"
,
data_type
=
onnx
.
TensorProto
.
FLOAT
,
dims
=
(),
vals
=
[
1
]))
node_list
.
append
(
node_number_one
)
output_new_img_height
=
[
model_name
+
"@new_img_height"
]
node_new_img_height
=
onnx
.
helper
.
make_node
(
'Sub'
,
inputs
=
output_img_height_cast
+
name_number_one
,
outputs
=
output_new_img_height
)
node_list
.
append
(
node_new_img_height
)
output_new_img_width
=
[
model_name
+
"@new_img_width"
]
node_new_img_width
=
onnx
.
helper
.
make_node
(
'Sub'
,
inputs
=
output_img_width_cast
+
name_number_one
,
outputs
=
output_new_img_width
)
node_list
.
append
(
node_new_img_width
)
outputs_pred_box_x2_sub_w
=
[
model_name
+
"@pred_box_x2_sub_w"
]
node_pred_box_x2_sub_w
=
onnx
.
helper
.
make_node
(
'Sub'
,
inputs
=
outputs_pred_box_x2_decode
+
output_new_img_width
,
outputs
=
outputs_pred_box_x2_sub_w
)
node_list
.
append
(
node_pred_box_x2_sub_w
)
outputs_pred_box_y2_sub_h
=
[
model_name
+
"@pred_box_y2_sub_h"
]
node_pred_box_y2_sub_h
=
onnx
.
helper
.
make_node
(
'Sub'
,
inputs
=
outputs_pred_box_y2_decode
+
output_new_img_height
,
outputs
=
outputs_pred_box_y2_sub_h
)
node_list
.
append
(
node_pred_box_y2_sub_h
)
outputs_pred_box_x1_clip
=
[
model_name
+
"@pred_box_x1_clip"
]
outputs_pred_box_y1_clip
=
[
model_name
+
"@pred_box_y1_clip"
]
outputs_pred_box_x2_clip
=
[
model_name
+
"@pred_box_x2_clip"
]
outputs_pred_box_y2_clip
=
[
model_name
+
"@pred_box_y2_clip"
]
node_pred_box_x1_clip
=
onnx
.
helper
.
make_node
(
'Clip'
,
inputs
=
outputs_pred_box_x1_decode
,
outputs
=
outputs_pred_box_x1_clip
,
min
=
0.0
,
max
=
float
(
np
.
inf
))
node_list
.
append
(
node_pred_box_x1_clip
)
node_pred_box_y1_clip
=
onnx
.
helper
.
make_node
(
'Clip'
,
inputs
=
outputs_pred_box_y1_decode
,
outputs
=
outputs_pred_box_y1_clip
,
min
=
0.0
,
max
=
float
(
np
.
inf
))
node_list
.
append
(
node_pred_box_y1_clip
)
node_pred_box_x2_clip
=
onnx
.
helper
.
make_node
(
'Clip'
,
inputs
=
outputs_pred_box_x2_sub_w
,
outputs
=
outputs_pred_box_x2_clip
,
min
=
0.0
,
max
=
float
(
np
.
inf
))
node_list
.
append
(
node_pred_box_x2_clip
)
node_pred_box_y2_clip
=
onnx
.
helper
.
make_node
(
'Clip'
,
inputs
=
outputs_pred_box_y2_sub_h
,
outputs
=
outputs_pred_box_y2_clip
,
min
=
0.0
,
max
=
float
(
np
.
inf
))
node_list
.
append
(
node_pred_box_y2_clip
)
outputs_pred_box_x2_res
=
[
model_name
+
"@box_x2_res"
]
node_pred_box_x2_res
=
onnx
.
helper
.
make_node
(
'Sub'
,
inputs
=
outputs_pred_box_x2_decode
+
outputs_pred_box_x2_clip
,
outputs
=
outputs_pred_box_x2_res
)
node_list
.
append
(
node_pred_box_x2_res
)
outputs_pred_box_y2_res
=
[
model_name
+
"@box_y2_res"
]
node_pred_box_y2_res
=
onnx
.
helper
.
make_node
(
'Sub'
,
inputs
=
outputs_pred_box_y2_decode
+
outputs_pred_box_y2_clip
,
outputs
=
outputs_pred_box_y2_res
)
node_list
.
append
(
node_pred_box_y2_res
)
node_pred_box_result
=
onnx
.
helper
.
make_node
(
'Concat'
,
inputs
=
outputs_pred_box_x1_clip
+
outputs_pred_box_y1_clip
+
outputs_pred_box_x2_res
+
outputs_pred_box_y2_res
,
outputs
=
outputs
[
'Boxes'
],
axis
=-
1
)
node_list
.
append
(
node_pred_box_result
)
score_shape
=
[
1
,
input_height
*
input_width
*
int
(
num_anchors
),
class_num
]
name_score_shape
=
[
model_name
+
"@score_shape"
]
node_score_shape
=
onnx
.
helper
.
make_node
(
"Constant"
,
inputs
=
[],
outputs
=
name_score_shape
,
value
=
onnx
.
helper
.
make_tensor
(
name
=
name_score_shape
[
0
]
+
"@const"
,
data_type
=
onnx
.
TensorProto
.
INT64
,
dims
=
[
len
(
score_shape
)],
vals
=
score_shape
))
node_list
.
append
(
node_score_shape
)
node_score_new_shape
=
onnx
.
helper
.
make_node
(
'Reshape'
,
inputs
=
outputs_score
+
name_score_shape
,
outputs
=
outputs
[
'Scores'
])
node_list
.
append
(
node_score_new_shape
)
return
node_list
x2paddle/op_mapper/paddle2onnx/paddle_op_mapper.py
0 → 100644
浏览文件 @
22c18dd4
# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import
math
import
sys
import
x2paddle
import
os
import
numpy
as
np
import
paddle.fluid.core
as
core
import
paddle.fluid
as
fluid
import
onnx
from
onnx
import
helper
,
onnx_pb
from
x2paddle.op_mapper.paddle2onnx.opset9.opset
import
OpSet9
from
x2paddle.op_mapper.paddle2onnx.opset10.opset
import
OpSet10
from
x2paddle.op_mapper.paddle2onnx.opset11.opset
import
OpSet11
class
PaddleOpMapper
(
object
):
def
__init__
(
self
):
self
.
support_opsets
=
[
9
,
10
,
11
]
self
.
default_opset
=
10
self
.
name_counter
=
dict
()
self
.
op_set
=
None
def
convert
(
self
,
program
,
save_dir
,
opset_number
=
10
):
self
.
op_set
=
self
.
create_opset
(
opset_number
)
weight_nodes
=
self
.
op_set
.
convert_weights
(
program
)
op_nodes
=
list
()
input_nodes
=
list
()
output_nodes
=
list
()
unsupported_ops
=
set
()
print
(
"Translating PaddlePaddle to ONNX...
\n
"
)
for
block
in
program
.
blocks
:
for
i
,
op
in
enumerate
(
block
.
ops
):
sys
.
stdout
.
write
(
"
\r
Total:{}, Current:{} : {} "
.
format
(
len
(
block
.
ops
),
i
+
1
,
op
.
type
))
sys
.
stdout
.
flush
()
if
not
hasattr
(
self
.
op_set
,
op
.
type
):
unsupported_ops
.
add
(
op
.
type
)
continue
if
len
(
unsupported_ops
)
>
0
:
continue
node
=
getattr
(
self
.
op_set
,
op
.
type
)(
op
,
block
)
if
op
.
type
==
'feed'
:
print
(
node
.
name
)
input_nodes
.
append
(
node
)
elif
op
.
type
==
'fetch'
:
output_nodes
.
append
(
node
)
else
:
if
isinstance
(
node
,
list
):
op_nodes
=
op_nodes
+
node
else
:
op_nodes
.
append
(
node
)
if
len
(
unsupported_ops
)
>
0
:
print
(
"
\n
There's {} ops are not supported yet"
.
format
(
len
(
unsupported_ops
)))
for
op
in
unsupported_ops
:
print
(
"=========== {} ==========="
.
format
(
op
))
return
graph
=
helper
.
make_graph
(
nodes
=
weight_nodes
+
op_nodes
,
name
=
'onnx_model_from_paddle'
,
initializer
=
[],
inputs
=
input_nodes
,
outputs
=
output_nodes
)
opset_imports
=
[
helper
.
make_opsetid
(
""
,
opset_number
)]
model
=
helper
.
make_model
(
graph
,
producer_name
=
'X2Paddle'
,
opset_imports
=
opset_imports
)
onnx
.
checker
.
check_model
(
model
)
if
not
os
.
path
.
isdir
(
save_dir
):
os
.
makedirs
(
save_dir
)
with
open
(
os
.
path
.
join
(
save_dir
,
'x2paddle_model.onnx'
),
'wb'
)
as
f
:
f
.
write
(
model
.
SerializeToString
())
print
(
"
\n
Translated model saved in {}"
.
format
(
os
.
path
.
join
(
save_dir
,
'x2paddle_model.onnx'
)))
def
create_opset
(
self
,
opset_number
):
run_opset
=
self
.
default_opset
opset
=
''
if
opset_number
in
self
.
support_opsets
:
run_opset
=
opset_number
else
:
for
support_opset_number
in
self
.
support_opsets
:
if
support_opset_number
<
opset_number
:
run_opset
=
support_opset_number
else
:
break
print
(
'Now, onnx2paddle support convert onnx model opset_verison {},'
'opset_verison of your onnx model is {}, automatically treated as op_set: {}.'
.
format
(
self
.
support_opsets
,
opset_number
,
run_opset
))
opset
=
'OpSet'
+
str
(
run_opset
)
return
eval
(
opset
)()
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录