Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
PaddlePaddle
X2Paddle
提交
c1f65a10
X
X2Paddle
项目概览
PaddlePaddle
/
X2Paddle
大约 1 年 前同步成功
通知
328
Star
698
Fork
167
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
26
列表
看板
标记
里程碑
合并请求
4
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
X
X2Paddle
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
26
Issue
26
列表
看板
标记
里程碑
合并请求
4
合并请求
4
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
提交
c1f65a10
编写于
7月 15, 2020
作者:
C
Channingss
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
paddle2onnx support opset:9,10,11
上级
027bfe06
变更
30
展开全部
显示空白变更内容
内联
并排
Showing
30 changed file
with
7651 addition
and
0 deletion
+7651
-0
x2paddle/op_mapper/onnx2paddle/__init__.py
x2paddle/op_mapper/onnx2paddle/__init__.py
+0
-0
x2paddle/op_mapper/onnx2paddle/onnx_op_mapper.py
x2paddle/op_mapper/onnx2paddle/onnx_op_mapper.py
+92
-0
x2paddle/op_mapper/onnx2paddle/opsets/__init__.py
x2paddle/op_mapper/onnx2paddle/opsets/__init__.py
+0
-0
x2paddle/op_mapper/onnx2paddle/opsets/_shape_inference.py
x2paddle/op_mapper/onnx2paddle/opsets/_shape_inference.py
+599
-0
x2paddle/op_mapper/onnx2paddle/opsets/custom_layer/InstanceNormalization.py
.../onnx2paddle/opsets/custom_layer/InstanceNormalization.py
+56
-0
x2paddle/op_mapper/onnx2paddle/opsets/custom_layer/__init__.py
...dle/op_mapper/onnx2paddle/opsets/custom_layer/__init__.py
+115
-0
x2paddle/op_mapper/onnx2paddle/opsets/custom_layer/register.py
...dle/op_mapper/onnx2paddle/opsets/custom_layer/register.py
+55
-0
x2paddle/op_mapper/onnx2paddle/opsets/opset10.py
x2paddle/op_mapper/onnx2paddle/opsets/opset10.py
+37
-0
x2paddle/op_mapper/onnx2paddle/opsets/opset11.py
x2paddle/op_mapper/onnx2paddle/opsets/opset11.py
+37
-0
x2paddle/op_mapper/onnx2paddle/opsets/opset9.py
x2paddle/op_mapper/onnx2paddle/opsets/opset9.py
+1523
-0
x2paddle/op_mapper/paddle2onnx/__init__.py
x2paddle/op_mapper/paddle2onnx/__init__.py
+0
-0
x2paddle/op_mapper/paddle2onnx/opset10/__init__.py
x2paddle/op_mapper/paddle2onnx/opset10/__init__.py
+0
-0
x2paddle/op_mapper/paddle2onnx/opset10/opset.py
x2paddle/op_mapper/paddle2onnx/opset10/opset.py
+61
-0
x2paddle/op_mapper/paddle2onnx/opset10/paddle_custom_layer/__init__.py
...apper/paddle2onnx/opset10/paddle_custom_layer/__init__.py
+0
-0
x2paddle/op_mapper/paddle2onnx/opset10/paddle_custom_layer/im2sequence.py
...er/paddle2onnx/opset10/paddle_custom_layer/im2sequence.py
+80
-0
x2paddle/op_mapper/paddle2onnx/opset10/paddle_custom_layer/multiclass_nms.py
...paddle2onnx/opset10/paddle_custom_layer/multiclass_nms.py
+416
-0
x2paddle/op_mapper/paddle2onnx/opset10/paddle_custom_layer/yolo_box.py
...apper/paddle2onnx/opset10/paddle_custom_layer/yolo_box.py
+822
-0
x2paddle/op_mapper/paddle2onnx/opset11/__init__.py
x2paddle/op_mapper/paddle2onnx/opset11/__init__.py
+0
-0
x2paddle/op_mapper/paddle2onnx/opset11/opset.py
x2paddle/op_mapper/paddle2onnx/opset11/opset.py
+249
-0
x2paddle/op_mapper/paddle2onnx/opset11/paddle_custom_layer/__init__.py
...apper/paddle2onnx/opset11/paddle_custom_layer/__init__.py
+0
-0
x2paddle/op_mapper/paddle2onnx/opset11/paddle_custom_layer/im2sequence.py
...er/paddle2onnx/opset11/paddle_custom_layer/im2sequence.py
+80
-0
x2paddle/op_mapper/paddle2onnx/opset11/paddle_custom_layer/multiclass_nms.py
...paddle2onnx/opset11/paddle_custom_layer/multiclass_nms.py
+416
-0
x2paddle/op_mapper/paddle2onnx/opset11/paddle_custom_layer/yolo_box.py
...apper/paddle2onnx/opset11/paddle_custom_layer/yolo_box.py
+841
-0
x2paddle/op_mapper/paddle2onnx/opset9/__init__.py
x2paddle/op_mapper/paddle2onnx/opset9/__init__.py
+0
-0
x2paddle/op_mapper/paddle2onnx/opset9/opset.py
x2paddle/op_mapper/paddle2onnx/opset9/opset.py
+746
-0
x2paddle/op_mapper/paddle2onnx/opset9/paddle_custom_layer/__init__.py
...mapper/paddle2onnx/opset9/paddle_custom_layer/__init__.py
+0
-0
x2paddle/op_mapper/paddle2onnx/opset9/paddle_custom_layer/im2sequence.py
...per/paddle2onnx/opset9/paddle_custom_layer/im2sequence.py
+80
-0
x2paddle/op_mapper/paddle2onnx/opset9/paddle_custom_layer/multiclass_nms.py
.../paddle2onnx/opset9/paddle_custom_layer/multiclass_nms.py
+416
-0
x2paddle/op_mapper/paddle2onnx/opset9/paddle_custom_layer/yolo_box.py
...mapper/paddle2onnx/opset9/paddle_custom_layer/yolo_box.py
+822
-0
x2paddle/op_mapper/paddle2onnx/paddle_op_mapper.py
x2paddle/op_mapper/paddle2onnx/paddle_op_mapper.py
+108
-0
未找到文件。
x2paddle/op_mapper/onnx2paddle/__init__.py
0 → 100644
浏览文件 @
c1f65a10
x2paddle/op_mapper/onnx2paddle/onnx_op_mapper.py
0 → 100644
浏览文件 @
c1f65a10
# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from
x2paddle.op_mapper.onnx2paddle.opsets.opset9
import
OpSet9
from
x2paddle.core.op_mapper
import
OpMapper
from
x2paddle.op_mapper.onnx_opsets.custom_layer
import
*
from
x2paddle.decoder.onnx_decoder
import
ONNXGraph
,
ONNXGraphNode
,
ONNXGraphDataNode
class
ONNXOpMapper
(
OpMapper
):
def
__init__
(
self
,
decoder
):
super
(
ONNXOpMapper
,
self
).
__init__
()
self
.
support_op_sets
=
[
9
,
]
self
.
default_op_set
=
9
self
.
graph
=
decoder
.
graph
self
.
opset
=
self
.
create_opset
(
decoder
)
if
not
self
.
op_checker
():
raise
Exception
(
"Model are not supported yet."
)
#mapping op
print
(
"Total nodes: {}"
.
format
(
sum
([
isinstance
(
node
,
ONNXGraphNode
)
for
name
,
node
in
self
.
graph
.
node_map
.
items
()
])))
print
(
"Nodes converting ..."
)
for
node_name
in
self
.
graph
.
topo_sort
:
node
=
self
.
graph
.
get_node
(
node_name
)
op
=
node
.
layer_type
if
hasattr
(
self
.
opset
,
op
):
func
=
getattr
(
self
.
opset
,
op
)
func
(
node
)
elif
op
in
self
.
opset
.
default_op_mapping
:
self
.
opset
.
directly_map
(
node
)
elif
op
in
custom_layers
:
self
.
opset
.
deal_custom_layer
(
node
)
elif
op
in
self
.
opset
.
elementwise_ops
:
self
.
opset
.
elementwise_map
(
node
)
print
(
"Nodes converted."
)
self
.
weights
=
self
.
opset
.
weights
self
.
omit_nodes
=
self
.
opset
.
omit_nodes
self
.
used_custom_layers
=
self
.
opset
.
used_custom_layers
def
op_checker
(
self
):
unsupported_ops
=
set
()
for
node_name
in
self
.
graph
.
topo_sort
:
node
=
self
.
graph
.
get_node
(
node_name
)
op
=
node
.
layer_type
if
not
hasattr
(
self
.
opset
,
op
)
and
\
op
not
in
self
.
opset
.
default_op_mapping
and
\
op
not
in
custom_layers
and
\
op
not
in
self
.
opset
.
elementwise_ops
:
unsupported_ops
.
add
(
op
)
if
len
(
unsupported_ops
)
==
0
:
return
True
else
:
print
(
"There are {} ops not supported yet, list as below"
.
format
(
len
(
unsupported_ops
)))
for
op
in
unsupported_ops
:
print
(
op
)
return
False
def
create_opset
(
self
,
decoder
):
run_op_set
=
self
.
default_op_set
opset
=
''
if
decoder
.
op_set
in
self
.
support_op_sets
:
opset
=
'OpSet'
+
str
(
decoder
.
op_set
)
elif
decoder
.
op_set
<
self
.
default_op_set
:
opset
=
'OpSet'
+
str
(
self
.
default_op_set
)
else
:
for
op_set
in
self
.
support_op_sets
:
if
decoder
.
op_set
>
op_set
:
run_op_set
=
op_set
else
:
break
opset
=
'OpSet'
+
str
(
run_op_set
)
print
(
'Now, onnx2paddle support convert onnx model opset_verison {},'
'opset_verison of your onnx model is {}, automatically treated as op_set: {}.'
.
format
(
self
.
support_op_sets
,
decoder
.
op_set
,
run_op_set
))
return
eval
(
opset
)(
decoder
)
x2paddle/op_mapper/onnx2paddle/opsets/__init__.py
0 → 100644
浏览文件 @
c1f65a10
x2paddle/op_mapper/onnx2paddle/opsets/_shape_inference.py
0 → 100644
浏览文件 @
c1f65a10
此差异已折叠。
点击以展开。
x2paddle/op_mapper/onnx2paddle/opsets/custom_layer/InstanceNormalization.py
0 → 100644
浏览文件 @
c1f65a10
# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from
.register
import
register
def
InstanceNormalization_shape
(
input_shape
):
return
input_shape
def
InstanceNormalization_layer
(
inputs
,
name
=
None
):
# TODO(lvmengsi@baidu.com): Check the accuracy when using fluid.layers.layer_norm.
epsilon
=
1e-5
input_
=
inputs
[
0
]
mean
=
fluid
.
layers
.
reduce_mean
(
input_
,
dim
=
[
2
,
3
],
keep_dim
=
True
)
var
=
fluid
.
layers
.
reduce_mean
(
fluid
.
layers
.
square
(
input_
-
mean
),
dim
=
[
2
,
3
],
keep_dim
=
True
)
if
name
is
not
None
:
scale_name
=
name
+
"_scale"
offset_name
=
name
+
"_offset"
scale_param
=
inputs
[
1
]
offset_param
=
inputs
[
2
]
scale
=
fluid
.
layers
.
create_parameter
(
name
=
scale_param
.
name
,
shape
=
input_
.
shape
[
1
:
2
],
dtype
=
"float32"
)
offset
=
fluid
.
layers
.
create_parameter
(
name
=
offset_param
.
name
,
shape
=
input_
.
shape
[
1
:
2
],
dtype
=
"float32"
)
tmp
=
fluid
.
layers
.
elementwise_mul
(
x
=
(
input_
-
mean
),
y
=
scale
,
axis
=
1
)
tmp
=
tmp
/
fluid
.
layers
.
sqrt
(
var
+
epsilon
)
tmp
=
fluid
.
layers
.
elementwise_add
(
tmp
,
offset
,
axis
=
1
)
return
tmp
def
InstanceNormalization_weights
(
name
,
data
=
None
):
weights_name
=
[
name
+
'_scale'
]
return
weights_name
register
(
kind
=
'InstanceNormalization'
,
shape
=
InstanceNormalization_shape
,
layer
=
InstanceNormalization_layer
,
child_func
=
None
,
weights
=
InstanceNormalization_weights
)
x2paddle/op_mapper/onnx2paddle/opsets/custom_layer/__init__.py
0 → 100644
浏览文件 @
c1f65a10
# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from
.register
import
get_registered_layers
#custom layer import begins
from
.
import
InstanceNormalization
#custom layer import ends
custom_layers
=
get_registered_layers
()
def
set_args
(
f
,
params
):
""" set args for function 'f' using the parameters in node.layer.param
Args:
f (function): a python function object
params (object): a object contains attributes needed by f's arguments
Returns:
arg_names (list): a list of argument names
kwargs (dict): a dict contains needed arguments
"""
argc
=
f
.
__code__
.
co_argcount
arg_list
=
f
.
__code__
.
co_varnames
[
0
:
argc
]
kwargs
=
{}
for
arg_name
in
arg_list
:
if
hasattr
(
params
,
arg_name
)
and
params
is
not
None
:
kwargs
[
arg_name
]
=
getattr
(
params
,
arg_name
)
return
arg_list
,
kwargs
def
has_layer
(
layer_type
):
""" test whether this layer exists in custom layer
"""
return
layer_type
in
custom_layers
def
get_params
(
layer
,
layer_type
):
import
re
if
layer_type
.
lower
()
==
"deconvolution"
or
layer_type
.
lower
(
)
==
"convolutiondepthwise"
:
param_name
=
'_'
.
join
((
'convolution'
,
'param'
))
elif
layer_type
.
lower
()
==
"normalize"
:
param_name
=
'_'
.
join
((
'norm'
,
'param'
))
elif
len
(
layer_type
)
-
len
(
re
.
sub
(
"[A-Z]"
,
""
,
layer_type
))
>=
2
:
s
=
''
tmp_name
=
''
for
i
,
ch
in
enumerate
(
layer_type
):
if
i
==
0
:
s
+=
ch
.
lower
()
continue
elif
ch
.
isupper
()
and
layer_type
[
i
-
1
].
islower
():
tmp_name
+=
(
s
+
'_'
)
s
=
''
s
+=
ch
.
lower
()
tmp_name
+=
s
param_name
=
'_'
.
join
((
tmp_name
,
'param'
))
else
:
param_name
=
'_'
.
join
((
layer_type
.
lower
(),
'param'
))
return
getattr
(
layer
,
param_name
,
None
)
def
compute_output_shape
(
node
):
""" compute the output shape of custom layer
"""
layer_type
=
node
.
layer_type
assert
layer_type
in
custom_layers
,
"layer[%s] not exist in custom layers"
%
(
layer_type
)
shape_func
=
custom_layers
[
layer_type
][
'shape'
]
layer
=
node
.
layer
params
=
get_params
(
layer
,
layer_type
)
arg_names
,
kwargs
=
set_args
(
shape_func
,
params
)
input_shape
=
node
.
input_shape
return
shape_func
(
input_shape
,
**
kwargs
)
def
make_custom_layer
(
node
):
""" get the code which implement the custom layer function
"""
layer_type
=
node
.
layer_type
assert
layer_type
in
custom_layers
,
"layer[%s] not exist in custom layers"
%
(
layer_type
)
layer_func
=
custom_layers
[
layer_type
][
'layer'
]
import
inspect
return
inspect
.
getsource
(
layer_func
),
layer_func
def
make_custom_child_func
(
node
):
""" get the code which implement the custom layer function
"""
layer_type
=
node
.
layer_type
child_func
=
custom_layers
[
layer_type
][
'child_func'
]
if
child_func
is
None
:
return
None
,
child_func
import
inspect
return
inspect
.
getsource
(
child_func
),
child_func
def
deal_weights
(
node
,
data
=
None
):
""" deal the weights of the custom layer
"""
layer_type
=
node
.
layer_type
weights_func
=
custom_layers
[
layer_type
][
'weights'
]
name
=
node
.
layer_name
return
weights_func
(
name
,
data
)
x2paddle/op_mapper/onnx2paddle/opsets/custom_layer/register.py
0 → 100644
浏览文件 @
c1f65a10
# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" this module provides 'register' for registering customized layers
"""
g_custom_layers
=
{}
def
register
(
kind
,
shape
,
layer
,
child_func
,
weights
):
""" register a custom layer or a list of custom layers
Args:
@kind (str or list): type name of the layer
@shape (function): a function to generate the shape of layer's output
@layer (function): a function to generate the paddle code of layer
@weights (function): a function to deal with weights data
Returns:
None
"""
assert
type
(
shape
).
__name__
==
'function'
,
'shape should be a function'
assert
type
(
layer
).
__name__
==
'function'
,
'layer should be a function'
if
type
(
kind
)
is
str
:
kind
=
[
kind
]
else
:
assert
type
(
kind
)
is
list
,
'invalid param "kind" for register, not a list or str'
for
k
in
kind
:
assert
type
(
k
)
is
str
,
'invalid param "kind" for register, not a list of str'
assert
k
not
in
g_custom_layers
,
'this type[%s] has already been registered'
%
(
k
)
g_custom_layers
[
k
]
=
{
'shape'
:
shape
,
'layer'
:
layer
,
'child_func'
:
child_func
,
'weights'
:
weights
}
def
get_registered_layers
():
return
g_custom_layers
x2paddle/op_mapper/onnx2paddle/opsets/opset10.py
0 → 100644
浏览文件 @
c1f65a10
# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from
x2paddle.core.graph
import
GraphNode
from
x2paddle.core.op_mapper
import
OpMapper
from
x2paddle.core.fluid_code
import
Layer
from
x2paddle.core.fluid_code
import
FluidCode
from
x2paddle.decoder.onnx_decoder
import
ONNXGraph
,
ONNXGraphNode
,
ONNXGraphDataNode
from
x2paddle.op_mapper.onnx.custom_layer
import
*
from
x2paddle.op_mapper.onnx.opset9
import
ONNXOpMapperOpSet9
from
x2paddle.core.util
import
string
import
numpy
as
np
import
onnx
import
onnx.numpy_helper
as
numpy_helper
from
onnx.mapping
import
TENSOR_TYPE_TO_NP_TYPE
import
logging
as
_logging
from
collections
import
OrderedDict
import
math
import
os
import
shutil
from
functools
import
reduce
class
ONNXOpMapperOpSet10
(
ONNXOpMapperOpSet9
):
def
__init__
(
self
,
decoder
):
super
(
ONNXOpMapperOpSet10
,
self
).
__init__
(
decoder
)
x2paddle/op_mapper/onnx2paddle/opsets/opset11.py
0 → 100644
浏览文件 @
c1f65a10
# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from
x2paddle.core.graph
import
GraphNode
from
x2paddle.core.op_mapper
import
OpMapper
from
x2paddle.core.fluid_code
import
Layer
from
x2paddle.core.fluid_code
import
FluidCode
from
x2paddle.decoder.onnx_decoder
import
ONNXGraph
,
ONNXGraphNode
,
ONNXGraphDataNode
from
x2paddle.op_mapper.onnx.custom_layer
import
*
from
x2paddle.op_mapper.onnx.opset10
import
ONNXOpMapperOpSet10
from
x2paddle.core.util
import
string
import
numpy
as
np
import
onnx
import
onnx.numpy_helper
as
numpy_helper
from
onnx.mapping
import
TENSOR_TYPE_TO_NP_TYPE
import
logging
as
_logging
from
collections
import
OrderedDict
import
math
import
os
import
shutil
from
functools
import
reduce
class
ONNXOpMapperOpSet11
(
ONNXOpMapperOpSet10
):
def
__init__
(
self
,
decoder
):
super
(
ONNXOpMapperOpSet11
,
self
).
__init__
(
decoder
)
x2paddle/op_mapper/onnx2paddle/opsets/opset9.py
0 → 100644
浏览文件 @
c1f65a10
此差异已折叠。
点击以展开。
x2paddle/op_mapper/paddle2onnx/__init__.py
0 → 100644
浏览文件 @
c1f65a10
x2paddle/op_mapper/paddle2onnx/opset10/__init__.py
0 → 100644
浏览文件 @
c1f65a10
x2paddle/op_mapper/paddle2onnx/opset10/opset.py
0 → 100644
浏览文件 @
c1f65a10
# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import
math
import
sys
import
x2paddle
import
os
import
numpy
as
np
import
paddle.fluid.core
as
core
import
paddle.fluid
as
fluid
import
onnx
from
onnx
import
helper
,
onnx_pb
from
x2paddle.op_mapper.paddle2onnx.opset9.opset
import
OpSet9
class
OpSet10
(
OpSet9
):
def
__init__
(
self
):
super
(
OpSet10
,
self
).
__init__
()
def
slice
(
self
,
op
,
block
):
axes
=
op
.
attr
(
'axes'
)
starts
=
op
.
attr
(
'starts'
)
ends
=
op
.
attr
(
'ends'
)
axes_name
=
self
.
get_name
(
op
.
type
,
'axes'
)
starts_name
=
self
.
get_name
(
op
.
type
,
'starts'
)
ends_name
=
self
.
get_name
(
op
.
type
,
'ends'
)
axes_node
=
self
.
make_constant_node
(
axes_name
,
onnx_pb
.
TensorProto
.
INT64
,
axes
)
starts_node
=
self
.
make_constant_node
(
starts_name
,
onnx_pb
.
TensorProto
.
INT64
,
starts
)
ends_node
=
self
.
make_constant_node
(
ends_name
,
onnx_pb
.
TensorProto
.
INT64
,
ends
)
node
=
helper
.
make_node
(
"Slice"
,
inputs
=
[
op
.
input
(
'Input'
)[
0
],
starts_name
,
ends_name
,
axes_name
],
outputs
=
op
.
output
(
'Out'
),
)
return
[
starts_node
,
ends_node
,
axes_node
,
node
]
def
im2sequence
(
self
,
op
,
block
):
from
.paddle_custom_layer.im2sequence
import
im2sequence
return
im2sequence
(
op
,
block
)
def
yolo_box
(
self
,
op
,
block
):
from
.paddle_custom_layer.yolo_box
import
yolo_box
return
yolo_box
(
op
,
block
)
def
multiclass_nms
(
self
,
op
,
block
):
from
.paddle_custom_layer.multiclass_nms
import
multiclass_nms
return
multiclass_nms
(
op
,
block
)
x2paddle/op_mapper/paddle2onnx/opset10/paddle_custom_layer/__init__.py
0 → 100644
浏览文件 @
c1f65a10
x2paddle/op_mapper/paddle2onnx/opset10/paddle_custom_layer/im2sequence.py
0 → 100644
浏览文件 @
c1f65a10
import
onnx
import
numpy
as
np
from
onnx
import
onnx_pb
,
helper
im2seq_counter
=
0
def
im2sequence
(
op
,
block
):
global
im2sequence_counter
n
,
c
,
h
,
w
=
block
.
var
(
op
.
input
(
'X'
)[
0
]).
shape
assert
h
>
0
and
w
>
0
,
"Only supported fixed input shape for im2sequence operator."
stride_h
,
stride_w
=
op
.
attr
(
'strides'
)
paddings
=
op
.
attr
(
'paddings'
)
assert
op
.
attr
(
'out_stride'
)
!=
1
,
"Only out_stride==1 is supported for im2sequence operator."
h
=
h
+
paddings
[
0
]
+
paddings
[
1
]
w
=
w
+
paddings
[
1
]
+
paddings
[
2
]
kernel_h
,
kernel_w
=
op
.
attr
(
'kernels'
)
out_h
=
1
+
(
h
-
kernel_h
+
stride_h
-
1
)
//
stride_h
out_w
=
1
+
(
w
-
kernel_w
+
stride_w
-
1
)
//
stride_w
h_steps
=
list
()
for
i
in
range
(
out_h
):
h_steps
.
append
([
i
*
stride_h
,
i
*
stride_h
+
kernel_h
])
w_steps
=
list
()
for
i
in
range
(
out_w
):
w_steps
.
append
([
i
*
stride_w
,
i
*
stride_w
+
kernel_w
])
nodes
=
list
()
slice_blocks
=
list
()
for
i
in
range
(
out_h
):
for
j
in
range
(
out_w
):
starts_name
=
"im2sequence.starts.{}.{}.{}"
.
format
(
im2seq_counter
,
i
,
j
)
starts_tensor
=
helper
.
make_tensor
(
name
=
starts_name
,
data_type
=
onnx_pb
.
TensorProto
.
INT64
,
dims
=
[
4
],
vals
=
[
0
,
0
,
h_steps
[
i
][
0
],
w_steps
[
j
][
0
]])
ends_name
=
"im2sequence.ends.{}.{}.{}"
.
format
(
im2seq_counter
,
i
,
j
)
ends_tensor
=
helper
.
make_tensor
(
name
=
ends_name
,
data_type
=
onnx_pb
.
TensorProto
.
INT64
,
dims
=
[
4
],
vals
=
[
999999
,
999999
,
h_steps
[
i
][
1
],
w_steps
[
j
][
1
]])
starts_node
=
helper
.
make_node
(
'Constant'
,
inputs
=
[],
outputs
=
[
starts_name
],
value
=
starts_tensor
)
ends_node
=
helper
.
make_node
(
'Constant'
,
inputs
=
[],
outputs
=
[
ends_name
],
value
=
ends_tensor
)
nodes
.
extend
([
starts_node
,
ends_node
])
slice_block_name
=
"im2sequence.slice.{}.{}.{}"
.
format
(
im2seq_counter
,
i
,
j
)
slice_block_node
=
helper
.
make_node
(
'Slice'
,
inputs
=
[
op
.
input
(
'X'
)[
0
],
starts_name
,
ends_name
],
outputs
=
[
slice_block_name
])
flatten_block_name
=
"im2sequence.flatten.{}.{}.{}"
.
format
(
im2seq_counter
,
i
,
j
)
flatten_block_node
=
helper
.
make_node
(
"Flatten"
,
inputs
=
[
slice_block_name
],
outputs
=
[
flatten_block_name
],
axis
=
0
)
nodes
.
extend
([
slice_block_node
,
flatten_block_node
])
slice_blocks
.
append
(
flatten_block_name
)
concat_block_name
=
"im2sequence.concat_block.{}"
.
format
(
im2seq_counter
)
# concat_block_node = helper.make_node("Concat", inputs=slice_blocks, outputs=[concat_block_name], axis=0)
concat_block_node
=
helper
.
make_node
(
"Concat"
,
inputs
=
slice_blocks
,
outputs
=
op
.
output
(
'Out'
),
axis
=
0
)
nodes
.
append
(
concat_block_node
)
print
(
"
\n\n
==========Importance Notice==========="
)
print
(
"Since im2sequence operator is used in your paddlepaddle model, the translated onnx model only support input data with batch_size=1."
)
print
(
"======================================
\n
"
)
return
nodes
x2paddle/op_mapper/paddle2onnx/opset10/paddle_custom_layer/multiclass_nms.py
0 → 100644
浏览文件 @
c1f65a10
# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import
math
import
sys
import
os
import
numpy
as
np
import
paddle.fluid.core
as
core
import
paddle.fluid
as
fluid
import
onnx
import
warnings
from
onnx
import
helper
,
onnx_pb
def
multiclass_nms
(
op
,
block
):
"""
Convert the paddle multiclass_nms to onnx op.
This op is get the select boxes from origin boxes.
"""
inputs
=
dict
()
outputs
=
dict
()
attrs
=
dict
()
for
name
in
op
.
input_names
:
inputs
[
name
]
=
op
.
input
(
name
)
for
name
in
op
.
output_names
:
outputs
[
name
]
=
op
.
output
(
name
)
for
name
in
op
.
attr_names
:
attrs
[
name
]
=
op
.
attr
(
name
)
result_name
=
outputs
[
'Out'
][
0
]
background
=
attrs
[
'background_label'
]
normalized
=
attrs
[
'normalized'
]
if
normalized
==
False
:
warnings
.
warn
(
'The parameter normalized of multiclass_nms OP of Paddle is False, which has diff with ONNX.
\
Please set normalized=True in multiclass_nms of Paddle'
)
#convert the paddle attribute to onnx tensor
name_score_threshold
=
[
outputs
[
'Out'
][
0
]
+
"@score_threshold"
]
name_iou_threshold
=
[
outputs
[
'Out'
][
0
]
+
"@iou_threshold"
]
name_keep_top_k
=
[
outputs
[
'Out'
][
0
]
+
'@keep_top_k'
]
name_keep_top_k_2D
=
[
outputs
[
'Out'
][
0
]
+
'@keep_top_k_1D'
]
node_score_threshold
=
onnx
.
helper
.
make_node
(
'Constant'
,
inputs
=
[],
outputs
=
name_score_threshold
,
value
=
onnx
.
helper
.
make_tensor
(
name
=
name_score_threshold
[
0
]
+
"@const"
,
data_type
=
onnx
.
TensorProto
.
FLOAT
,
dims
=
(),
vals
=
[
float
(
attrs
[
'score_threshold'
])]))
node_iou_threshold
=
onnx
.
helper
.
make_node
(
'Constant'
,
inputs
=
[],
outputs
=
name_iou_threshold
,
value
=
onnx
.
helper
.
make_tensor
(
name
=
name_iou_threshold
[
0
]
+
"@const"
,
data_type
=
onnx
.
TensorProto
.
FLOAT
,
dims
=
(),
vals
=
[
float
(
attrs
[
'nms_threshold'
])]))
node_keep_top_k
=
onnx
.
helper
.
make_node
(
'Constant'
,
inputs
=
[],
outputs
=
name_keep_top_k
,
value
=
onnx
.
helper
.
make_tensor
(
name
=
name_keep_top_k
[
0
]
+
"@const"
,
data_type
=
onnx
.
TensorProto
.
INT64
,
dims
=
(),
vals
=
[
np
.
int64
(
attrs
[
'keep_top_k'
])]))
node_keep_top_k_2D
=
onnx
.
helper
.
make_node
(
'Constant'
,
inputs
=
[],
outputs
=
name_keep_top_k_2D
,
value
=
onnx
.
helper
.
make_tensor
(
name
=
name_keep_top_k_2D
[
0
]
+
"@const"
,
data_type
=
onnx
.
TensorProto
.
INT64
,
dims
=
[
1
,
1
],
vals
=
[
np
.
int64
(
attrs
[
'keep_top_k'
])]))
# the paddle data format is x1,y1,x2,y2
kwargs
=
{
'center_point_box'
:
0
}
name_select_nms
=
[
outputs
[
'Out'
][
0
]
+
"@select_index"
]
node_select_nms
=
onnx
.
helper
.
make_node
(
'NonMaxSuppression'
,
inputs
=
inputs
[
'BBoxes'
]
+
inputs
[
'Scores'
]
+
name_keep_top_k
+
\
name_iou_threshold
+
name_score_threshold
,
outputs
=
name_select_nms
)
# step 1 nodes select the nms class
node_list
=
[
node_score_threshold
,
node_iou_threshold
,
node_keep_top_k
,
node_keep_top_k_2D
,
node_select_nms
]
# create some const value to use
name_const_value
=
[
result_name
+
"@const_0"
,
result_name
+
"@const_1"
,
\
result_name
+
"@const_2"
,
\
result_name
+
"@const_-1"
]
value_const_value
=
[
0
,
1
,
2
,
-
1
]
for
name
,
value
in
zip
(
name_const_value
,
value_const_value
):
node
=
onnx
.
helper
.
make_node
(
'Constant'
,
inputs
=
[],
outputs
=
[
name
],
value
=
onnx
.
helper
.
make_tensor
(
name
=
name
+
"@const"
,
data_type
=
onnx
.
TensorProto
.
INT64
,
dims
=
[
1
],
vals
=
[
value
]))
node_list
.
append
(
node
)
# Ine this code block, we will deocde the raw score data, reshape N * C * M to 1 * N*C*M
# and the same time, decode the select indices to 1 * D, gather the select_indices
outputs_gather_1
=
[
result_name
+
"@gather_1"
]
node_gather_1
=
onnx
.
helper
.
make_node
(
'Gather'
,
inputs
=
name_select_nms
+
[
result_name
+
"@const_1"
],
outputs
=
outputs_gather_1
,
axis
=
1
)
node_list
.
append
(
node_gather_1
)
outputs_squeeze_gather_1
=
[
result_name
+
"@sequeeze_gather_1"
]
node_squeeze_gather_1
=
onnx
.
helper
.
make_node
(
'Squeeze'
,
inputs
=
outputs_gather_1
,
outputs
=
outputs_squeeze_gather_1
,
axes
=
[
1
])
node_list
.
append
(
node_squeeze_gather_1
)
outputs_gather_2
=
[
result_name
+
"@gather_2"
]
node_gather_2
=
onnx
.
helper
.
make_node
(
'Gather'
,
inputs
=
name_select_nms
+
[
result_name
+
"@const_2"
],
outputs
=
outputs_gather_2
,
axis
=
1
)
node_list
.
append
(
node_gather_2
)
#slice the class is not 0
if
background
==
0
:
outputs_nonzero
=
[
result_name
+
"@nonzero"
]
node_nonzero
=
onnx
.
helper
.
make_node
(
'NonZero'
,
inputs
=
outputs_squeeze_gather_1
,
outputs
=
outputs_nonzero
)
node_list
.
append
(
node_nonzero
)
else
:
name_thresh
=
[
result_name
+
"@thresh"
]
node_thresh
=
onnx
.
helper
.
make_node
(
'Constant'
,
inputs
=
[],
outputs
=
name_thresh
,
value
=
onnx
.
helper
.
make_tensor
(
name
=
name_thresh
[
0
]
+
"@const"
,
data_type
=
onnx
.
TensorProto
.
INT32
,
dims
=
[
1
],
vals
=
[
-
1
]))
node_list
.
append
(
node_thresh
)
outputs_cast
=
[
result_name
+
"@cast"
]
node_cast
=
onnx
.
helper
.
make_node
(
'Cast'
,
inputs
=
outputs_squeeze_gather_1
,
outputs
=
outputs_cast
,
to
=
6
)
node_list
.
append
(
node_cast
)
outputs_greater
=
[
result_name
+
"@greater"
]
node_greater
=
onnx
.
helper
.
make_node
(
'Greater'
,
inputs
=
outputs_cast
+
name_thresh
,
outputs
=
outputs_greater
)
node_list
.
append
(
node_greater
)
outputs_nonzero
=
[
result_name
+
"@nonzero"
]
node_nonzero
=
onnx
.
helper
.
make_node
(
'NonZero'
,
inputs
=
outputs_greater
,
outputs
=
outputs_nonzero
)
node_list
.
append
(
node_nonzero
)
outputs_gather_1_nonzero
=
[
result_name
+
"@gather_1_nonzero"
]
node_gather_1_nonzero
=
onnx
.
helper
.
make_node
(
'Gather'
,
inputs
=
outputs_gather_1
+
outputs_nonzero
,
outputs
=
outputs_gather_1_nonzero
,
axis
=
0
)
node_list
.
append
(
node_gather_1_nonzero
)
outputs_gather_2_nonzero
=
[
result_name
+
"@gather_2_nonzero"
]
node_gather_2_nonzero
=
onnx
.
helper
.
make_node
(
'Gather'
,
inputs
=
outputs_gather_2
+
outputs_nonzero
,
outputs
=
outputs_gather_2_nonzero
,
axis
=
0
)
node_list
.
append
(
node_gather_2_nonzero
)
# reshape scores N * C * M to (N*C*M) * 1
outputs_reshape_scores_rank1
=
[
result_name
+
"@reshape_scores_rank1"
]
node_reshape_scores_rank1
=
onnx
.
helper
.
make_node
(
"Reshape"
,
inputs
=
inputs
[
'Scores'
]
+
[
result_name
+
"@const_-1"
],
outputs
=
outputs_reshape_scores_rank1
)
node_list
.
append
(
node_reshape_scores_rank1
)
# get the shape of scores
outputs_shape_scores
=
[
result_name
+
"@shape_scores"
]
node_shape_scores
=
onnx
.
helper
.
make_node
(
'Shape'
,
inputs
=
inputs
[
'Scores'
],
outputs
=
outputs_shape_scores
)
node_list
.
append
(
node_shape_scores
)
# gather the index: 2 shape of scores
outputs_gather_scores_dim1
=
[
result_name
+
"@gather_scores_dim1"
]
node_gather_scores_dim1
=
onnx
.
helper
.
make_node
(
'Gather'
,
inputs
=
outputs_shape_scores
+
[
result_name
+
"@const_2"
],
outputs
=
outputs_gather_scores_dim1
,
axis
=
0
)
node_list
.
append
(
node_gather_scores_dim1
)
# mul class * M
outputs_mul_classnum_boxnum
=
[
result_name
+
"@mul_classnum_boxnum"
]
node_mul_classnum_boxnum
=
onnx
.
helper
.
make_node
(
'Mul'
,
inputs
=
outputs_gather_1_nonzero
+
outputs_gather_scores_dim1
,
outputs
=
outputs_mul_classnum_boxnum
)
node_list
.
append
(
node_mul_classnum_boxnum
)
# add class * M * index
outputs_add_class_M_index
=
[
result_name
+
"@add_class_M_index"
]
node_add_class_M_index
=
onnx
.
helper
.
make_node
(
'Add'
,
inputs
=
outputs_mul_classnum_boxnum
+
outputs_gather_2_nonzero
,
outputs
=
outputs_add_class_M_index
)
node_list
.
append
(
node_add_class_M_index
)
# Squeeze the indices to 1 dim
outputs_squeeze_select_index
=
[
result_name
+
"@squeeze_select_index"
]
node_squeeze_select_index
=
onnx
.
helper
.
make_node
(
'Squeeze'
,
inputs
=
outputs_add_class_M_index
,
outputs
=
outputs_squeeze_select_index
,
axes
=
[
0
,
2
])
node_list
.
append
(
node_squeeze_select_index
)
# gather the data from flatten scores
outputs_gather_select_scores
=
[
result_name
+
"@gather_select_scores"
]
node_gather_select_scores
=
onnx
.
helper
.
make_node
(
'Gather'
,
inputs
=
outputs_reshape_scores_rank1
+
\
outputs_squeeze_select_index
,
outputs
=
outputs_gather_select_scores
,
axis
=
0
)
node_list
.
append
(
node_gather_select_scores
)
# get nums to input TopK
outputs_shape_select_num
=
[
result_name
+
"@shape_select_num"
]
node_shape_select_num
=
onnx
.
helper
.
make_node
(
'Shape'
,
inputs
=
outputs_gather_select_scores
,
outputs
=
outputs_shape_select_num
)
node_list
.
append
(
node_shape_select_num
)
outputs_gather_select_num
=
[
result_name
+
"@gather_select_num"
]
node_gather_select_num
=
onnx
.
helper
.
make_node
(
'Gather'
,
inputs
=
outputs_shape_select_num
+
[
result_name
+
"@const_0"
],
outputs
=
outputs_gather_select_num
,
axis
=
0
)
node_list
.
append
(
node_gather_select_num
)
outputs_unsqueeze_select_num
=
[
result_name
+
"@unsqueeze_select_num"
]
node_unsqueeze_select_num
=
onnx
.
helper
.
make_node
(
'Unsqueeze'
,
inputs
=
outputs_gather_select_num
,
outputs
=
outputs_unsqueeze_select_num
,
axes
=
[
0
])
node_list
.
append
(
node_unsqueeze_select_num
)
outputs_concat_topK_select_num
=
[
result_name
+
"@conat_topK_select_num"
]
node_conat_topK_select_num
=
onnx
.
helper
.
make_node
(
'Concat'
,
inputs
=
outputs_unsqueeze_select_num
+
name_keep_top_k_2D
,
outputs
=
outputs_concat_topK_select_num
,
axis
=
0
)
node_list
.
append
(
node_conat_topK_select_num
)
outputs_cast_concat_topK_select_num
=
[
result_name
+
"@concat_topK_select_num"
]
node_outputs_cast_concat_topK_select_num
=
onnx
.
helper
.
make_node
(
'Cast'
,
inputs
=
outputs_concat_topK_select_num
,
outputs
=
outputs_cast_concat_topK_select_num
,
to
=
6
)
node_list
.
append
(
node_outputs_cast_concat_topK_select_num
)
# get min(topK, num_select)
outputs_compare_topk_num_select
=
[
result_name
+
"@compare_topk_num_select"
]
node_compare_topk_num_select
=
onnx
.
helper
.
make_node
(
'ReduceMin'
,
inputs
=
outputs_cast_concat_topK_select_num
,
outputs
=
outputs_compare_topk_num_select
,
keepdims
=
0
)
node_list
.
append
(
node_compare_topk_num_select
)
# unsqueeze the indices to 1D tensor
outputs_unsqueeze_topk_select_indices
=
[
result_name
+
"@unsqueeze_topk_select_indices"
]
node_unsqueeze_topk_select_indices
=
onnx
.
helper
.
make_node
(
'Unsqueeze'
,
inputs
=
outputs_compare_topk_num_select
,
outputs
=
outputs_unsqueeze_topk_select_indices
,
axes
=
[
0
])
node_list
.
append
(
node_unsqueeze_topk_select_indices
)
# cast the indices to INT64
outputs_cast_topk_indices
=
[
result_name
+
"@cast_topk_indices"
]
node_cast_topk_indices
=
onnx
.
helper
.
make_node
(
'Cast'
,
inputs
=
outputs_unsqueeze_topk_select_indices
,
outputs
=
outputs_cast_topk_indices
,
to
=
7
)
node_list
.
append
(
node_cast_topk_indices
)
# select topk scores indices
outputs_topk_select_topk_indices
=
[
result_name
+
"@topk_select_topk_values"
,
\
result_name
+
"@topk_select_topk_indices"
]
node_topk_select_topk_indices
=
onnx
.
helper
.
make_node
(
'TopK'
,
inputs
=
outputs_gather_select_scores
+
outputs_cast_topk_indices
,
outputs
=
outputs_topk_select_topk_indices
)
node_list
.
append
(
node_topk_select_topk_indices
)
# gather topk label, scores, boxes
outputs_gather_topk_scores
=
[
result_name
+
"@gather_topk_scores"
]
node_gather_topk_scores
=
onnx
.
helper
.
make_node
(
'Gather'
,
inputs
=
outputs_gather_select_scores
+
[
outputs_topk_select_topk_indices
[
1
]],
outputs
=
outputs_gather_topk_scores
,
axis
=
0
)
node_list
.
append
(
node_gather_topk_scores
)
outputs_gather_topk_class
=
[
result_name
+
"@gather_topk_class"
]
node_gather_topk_class
=
onnx
.
helper
.
make_node
(
'Gather'
,
inputs
=
outputs_gather_1_nonzero
+
[
outputs_topk_select_topk_indices
[
1
]],
outputs
=
outputs_gather_topk_class
,
axis
=
1
)
node_list
.
append
(
node_gather_topk_class
)
# gather the boxes need to gather the boxes id, then get boxes
outputs_gather_topk_boxes_id
=
[
result_name
+
"@gather_topk_boxes_id"
]
node_gather_topk_boxes_id
=
onnx
.
helper
.
make_node
(
'Gather'
,
inputs
=
outputs_gather_2_nonzero
+
[
outputs_topk_select_topk_indices
[
1
]],
outputs
=
outputs_gather_topk_boxes_id
,
axis
=
1
)
node_list
.
append
(
node_gather_topk_boxes_id
)
# squeeze the gather_topk_boxes_id to 1 dim
outputs_squeeze_topk_boxes_id
=
[
result_name
+
"@squeeze_topk_boxes_id"
]
node_squeeze_topk_boxes_id
=
onnx
.
helper
.
make_node
(
'Squeeze'
,
inputs
=
outputs_gather_topk_boxes_id
,
outputs
=
outputs_squeeze_topk_boxes_id
,
axes
=
[
0
,
2
])
node_list
.
append
(
node_squeeze_topk_boxes_id
)
outputs_gather_select_boxes
=
[
result_name
+
"@gather_select_boxes"
]
node_gather_select_boxes
=
onnx
.
helper
.
make_node
(
'Gather'
,
inputs
=
inputs
[
'BBoxes'
]
+
outputs_squeeze_topk_boxes_id
,
outputs
=
outputs_gather_select_boxes
,
axis
=
1
)
node_list
.
append
(
node_gather_select_boxes
)
# concat the final result
# before concat need to cast the class to float
outputs_cast_topk_class
=
[
result_name
+
"@cast_topk_class"
]
node_cast_topk_class
=
onnx
.
helper
.
make_node
(
'Cast'
,
inputs
=
outputs_gather_topk_class
,
outputs
=
outputs_cast_topk_class
,
to
=
1
)
node_list
.
append
(
node_cast_topk_class
)
outputs_unsqueeze_topk_scores
=
[
result_name
+
"@unsqueeze_topk_scores"
]
node_unsqueeze_topk_scores
=
onnx
.
helper
.
make_node
(
'Unsqueeze'
,
inputs
=
outputs_gather_topk_scores
,
outputs
=
outputs_unsqueeze_topk_scores
,
axes
=
[
0
,
2
])
node_list
.
append
(
node_unsqueeze_topk_scores
)
inputs_concat_final_results
=
outputs_cast_topk_class
+
outputs_unsqueeze_topk_scores
+
\
outputs_gather_select_boxes
outputs_concat_final_results
=
outputs
[
'Out'
]
node_concat_final_results
=
onnx
.
helper
.
make_node
(
'Concat'
,
inputs
=
inputs_concat_final_results
,
outputs
=
outputs_concat_final_results
,
axis
=
2
)
node_list
.
append
(
node_concat_final_results
)
return
node_list
x2paddle/op_mapper/paddle2onnx/opset10/paddle_custom_layer/yolo_box.py
0 → 100644
浏览文件 @
c1f65a10
此差异已折叠。
点击以展开。
x2paddle/op_mapper/paddle2onnx/opset11/__init__.py
0 → 100644
浏览文件 @
c1f65a10
x2paddle/op_mapper/paddle2onnx/opset11/opset.py
0 → 100644
浏览文件 @
c1f65a10
# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import
math
import
sys
import
x2paddle
import
os
import
numpy
as
np
import
paddle.fluid.core
as
core
import
paddle.fluid
as
fluid
import
onnx
from
onnx
import
helper
,
onnx_pb
from
x2paddle.op_mapper.paddle2onnx.opset10.opset
import
OpSet10
class
OpSet11
(
OpSet10
):
def
__init__
(
self
):
super
(
OpSet11
,
self
).
__init__
()
def
relu6
(
self
,
op
,
block
):
min_name
=
self
.
get_name
(
op
.
type
,
'min'
)
max_name
=
self
.
get_name
(
op
.
type
,
'max'
)
min_node
=
self
.
make_constant_node
(
min_name
,
onnx_pb
.
TensorProto
.
FLOAT
,
0
)
max_node
=
self
.
make_constant_node
(
max_name
,
onnx_pb
.
TensorProto
.
FLOAT
,
op
.
attr
(
'threshold'
))
node
=
helper
.
make_node
(
'Clip'
,
inputs
=
[
op
.
input
(
'X'
)[
0
],
min_name
,
max_name
],
outputs
=
op
.
output
(
'Out'
),
)
return
[
min_node
,
max_node
,
node
]
def
bilinear_interp
(
self
,
op
,
block
):
input_names
=
op
.
input_names
coordinate_transformation_mode
=
''
align_corners
=
op
.
attr
(
'align_corners'
)
align_mode
=
op
.
attr
(
'align_mode'
)
if
align_corners
:
coordinate_transformation_mode
=
'align_corners'
elif
align_mode
==
1
:
coordinate_transformation_mode
=
'asymmetric'
else
:
coordinate_transformation_mode
=
'half_pixel'
if
(
'OutSize'
in
input_names
and
len
(
op
.
input
(
'OutSize'
))
>
0
)
or
(
'SizeTensor'
in
input_names
and
len
(
op
.
input
(
'SizeTensor'
))
>
0
):
node_list
=
list
()
roi_node
=
self
.
make_constant_node
(
self
.
get_name
(
op
.
type
,
'roi'
),
onnx_pb
.
TensorProto
.
FLOAT
,
[
1
,
1
,
1
,
1
,
1
,
1
,
1
,
1
])
roi_name
=
self
.
get_name
(
op
.
type
,
'roi'
)
roi_node
=
self
.
make_constant_node
(
roi_name
,
onnx_pb
.
TensorProto
.
FLOAT
,
[
1
,
1
,
1
,
1
,
1
,
1
,
1
,
1
])
empty_name
=
self
.
get_name
(
op
.
type
,
'empty'
)
empty_tensor
=
helper
.
make_tensor
(
empty_name
,
onnx_pb
.
TensorProto
.
FLOAT
,
(
0
,
),
np
.
array
([]).
astype
(
'float32'
),
raw
=
False
)
empty_node
=
helper
.
make_node
(
'Constant'
,
[],
outputs
=
[
empty_name
],
value
=
empty_tensor
)
shape_name0
=
self
.
get_name
(
op
.
type
,
'shape'
)
shape_node0
=
helper
.
make_node
(
'Shape'
,
inputs
=
op
.
input
(
'X'
),
outputs
=
[
shape_name0
])
starts_name
=
self
.
get_name
(
op
.
type
,
'slice.starts'
)
starts_node
=
self
.
make_constant_node
(
starts_name
,
onnx_pb
.
TensorProto
.
INT64
,
[
0
])
ends_name
=
self
.
get_name
(
op
.
type
,
'slice.ends'
)
ends_node
=
self
.
make_constant_node
(
ends_name
,
onnx_pb
.
TensorProto
.
INT64
,
[
2
])
shape_name1
=
self
.
get_name
(
op
.
type
,
'shape'
)
shape_node1
=
helper
.
make_node
(
'Slice'
,
inputs
=
[
shape_name0
,
starts_name
,
ends_name
],
outputs
=
[
shape_name1
])
node_list
.
extend
([
roi_node
,
empty_node
,
shape_node0
,
starts_node
,
ends_node
,
shape_node1
])
if
'OutSize'
in
input_names
and
len
(
op
.
input
(
'OutSize'
))
>
0
:
cast_shape_name
=
self
.
get_name
(
op
.
type
,
"shape.cast"
)
cast_shape_node
=
helper
.
make_node
(
'Cast'
,
inputs
=
op
.
input
(
'OutSize'
),
outputs
=
[
cast_shape_name
],
to
=
onnx_pb
.
TensorProto
.
INT64
)
node_list
.
append
(
cast_shape_node
)
else
:
concat_shape_name
=
self
.
get_name
(
op
.
type
,
"shape.concat"
)
concat_shape_node
=
helper
.
make_node
(
"Concat"
,
inputs
=
op
.
input
(
'SizeTensor'
),
outputs
=
[
concat_shape_name
],
axis
=
0
)
cast_shape_name
=
self
.
get_name
(
op
.
type
,
"shape.cast"
)
cast_shape_node
=
helper
.
make_node
(
'Cast'
,
inputs
=
[
concat_shape_name
],
outputs
=
[
cast_shape_name
],
to
=
onnx_pb
.
TensorProto
.
INT64
)
node_list
.
extend
([
concat_shape_node
,
cast_shape_node
])
shape_name3
=
self
.
get_name
(
op
.
type
,
"shape.concat"
)
shape_node3
=
helper
.
make_node
(
'Concat'
,
inputs
=
[
shape_name1
,
cast_shape_name
],
outputs
=
[
shape_name3
],
axis
=
0
)
result_node
=
helper
.
make_node
(
'Resize'
,
inputs
=
[
op
.
input
(
'X'
)[
0
],
roi_name
,
empty_name
,
shape_name3
],
outputs
=
op
.
output
(
'Out'
),
mode
=
'linear'
,
coordinate_transformation_mode
=
coordinate_transformation_mode
)
node_list
.
extend
([
shape_node3
,
result_node
])
return
node_list
elif
'Scale'
in
input_names
and
len
(
op
.
input
(
'Scale'
))
>
0
:
node
=
helper
.
make_node
(
'Resize'
,
inputs
=
[
op
.
input
(
'X'
)[
0
],
op
.
input
(
'Scale'
)[
0
]],
outputs
=
op
.
output
(
'Out'
),
mode
=
'linear'
,
coordinate_transformation_mode
=
coordinate_transformation_mode
)
else
:
out_shape
=
[
op
.
attr
(
'out_h'
),
op
.
attr
(
'out_w'
)]
scale
=
op
.
attr
(
'scale'
)
if
out_shape
.
count
(
-
1
)
>
0
:
scale_name
=
self
.
get_name
(
op
.
type
,
'scale'
)
scale_node
=
self
.
make_constant_node
(
scale_name
,
onnx_pb
.
TensorProto
.
FLOAT
,
[
1
,
1
,
scale
,
scale
])
roi_name
=
self
.
get_name
(
op
.
type
,
'roi'
)
roi_node
=
self
.
make_constant_node
(
roi_name
,
onnx_pb
.
TensorProto
.
FLOAT
,
[
1
,
1
,
1
,
1
,
1
,
1
,
1
,
1
])
node
=
helper
.
make_node
(
'Resize'
,
inputs
=
[
op
.
input
(
'X'
)[
0
],
roi_name
,
scale_name
],
outputs
=
op
.
output
(
'Out'
),
mode
=
'nearest'
,
coordinate_transformation_mode
=
coordinate_transformation_mode
)
return
[
scale_node
,
roi_node
,
node
]
else
:
raise
Exception
(
"Unexpected situation happend"
)
return
node
def
nearest_interp
(
self
,
op
,
block
):
input_names
=
op
.
input_names
coordinate_transformation_mode
=
''
align_corners
=
op
.
attr
(
'align_corners'
)
if
align_corners
:
coordinate_transformation_mode
=
'align_corners'
else
:
coordinate_transformation_mode
=
'asymmetric'
if
'OutSize'
in
input_names
and
len
(
op
.
input
(
'OutSize'
))
>
0
:
node
=
helper
.
make_node
(
'Resize'
,
inputs
=
[
op
.
input
(
'X'
)[
0
],
''
,
op
.
input
(
'OutSize'
)[
0
]],
outputs
=
op
.
output
(
'Out'
),
mode
=
'nearest'
,
coordinate_transformation_mode
=
coordinate_transformation_mode
)
elif
'Scale'
in
input_names
and
len
(
op
.
input
(
'Scale'
))
>
0
:
node
=
helper
.
make_node
(
'Resize'
,
inputs
=
[
op
.
input
(
'X'
)[
0
],
op
.
input
(
'Scale'
)[
0
]],
outputs
=
op
.
output
(
'Out'
),
mode
=
'nearest'
,
coordinate_transformation_mode
=
coordinate_transformation_mode
)
else
:
out_shape
=
[
op
.
attr
(
'out_h'
),
op
.
attr
(
'out_w'
)]
scale
=
op
.
attr
(
'scale'
)
if
out_shape
.
count
(
-
1
)
>
0
:
scale_name
=
self
.
get_name
(
op
.
type
,
'scale'
)
scale_node
=
self
.
make_constant_node
(
scale_name
,
onnx_pb
.
TensorProto
.
FLOAT
,
[
1
,
1
,
scale
,
scale
])
roi_name
=
self
.
get_name
(
op
.
type
,
'roi'
)
roi_node
=
self
.
make_constant_node
(
roi_name
,
onnx_pb
.
TensorProto
.
FLOAT
,
[
1
,
1
,
1
,
1
,
1
,
1
,
1
,
1
])
node
=
helper
.
make_node
(
'Resize'
,
inputs
=
[
op
.
input
(
'X'
)[
0
],
roi_name
,
scale_name
],
outputs
=
op
.
output
(
'Out'
),
mode
=
'nearest'
,
coordinate_transformation_mode
=
coordinate_transformation_mode
)
return
[
scale_node
,
roi_node
,
node
]
else
:
raise
Exception
(
"Unexpected situation happend"
)
return
node
def
hard_swish
(
self
,
op
,
block
):
min_name
=
self
.
get_name
(
op
.
type
,
'min'
)
max_name
=
self
.
get_name
(
op
.
type
,
'max'
)
scale_name
=
self
.
get_name
(
op
.
type
,
'scale'
)
offset_name
=
self
.
get_name
(
op
.
type
,
'offset'
)
min_node
=
self
.
make_constant_node
(
min_name
,
onnx_pb
.
TensorProto
.
FLOAT
,
0
)
max_node
=
self
.
make_constant_node
(
max_name
,
onnx_pb
.
TensorProto
.
FLOAT
,
op
.
attr
(
'threshold'
))
scale_node
=
self
.
make_constant_node
(
scale_name
,
onnx_pb
.
TensorProto
.
FLOAT
,
op
.
attr
(
'scale'
))
offset_node
=
self
.
make_constant_node
(
offset_name
,
onnx_pb
.
TensorProto
.
FLOAT
,
op
.
attr
(
'offset'
))
name0
=
self
.
get_name
(
op
.
type
,
'add'
)
node0
=
helper
.
make_node
(
'Add'
,
inputs
=
[
op
.
input
(
'X'
)[
0
],
offset_name
],
outputs
=
[
name0
])
name1
=
self
.
get_name
(
op
.
type
,
'relu'
)
node1
=
helper
.
make_node
(
'Clip'
,
inputs
=
[
name0
,
min_name
,
max_name
],
outputs
=
[
name1
],
)
name2
=
self
.
get_name
(
op
.
type
,
'mul'
)
node2
=
helper
.
make_node
(
'Mul'
,
inputs
=
[
op
.
input
(
'X'
)[
0
],
name1
],
outputs
=
[
name2
])
node3
=
helper
.
make_node
(
'Div'
,
inputs
=
[
name2
,
scale_name
],
outputs
=
op
.
output
(
'Out'
))
return
[
min_node
,
max_node
,
scale_node
,
offset_node
,
node0
,
node1
,
node2
,
node3
]
def
im2sequence
(
self
,
op
,
block
):
from
.paddle_custom_layer.im2sequence
import
im2sequence
return
im2sequence
(
op
,
block
)
def
yolo_box
(
self
,
op
,
block
):
from
.paddle_custom_layer.yolo_box
import
yolo_box
return
yolo_box
(
op
,
block
)
def
multiclass_nms
(
self
,
op
,
block
):
from
.paddle_custom_layer.multiclass_nms
import
multiclass_nms
return
multiclass_nms
(
op
,
block
)
x2paddle/op_mapper/paddle2onnx/opset11/paddle_custom_layer/__init__.py
0 → 100644
浏览文件 @
c1f65a10
x2paddle/op_mapper/paddle2onnx/opset11/paddle_custom_layer/im2sequence.py
0 → 100644
浏览文件 @
c1f65a10
import
onnx
import
numpy
as
np
from
onnx
import
onnx_pb
,
helper
im2seq_counter
=
0
def
im2sequence
(
op
,
block
):
global
im2sequence_counter
n
,
c
,
h
,
w
=
block
.
var
(
op
.
input
(
'X'
)[
0
]).
shape
assert
h
>
0
and
w
>
0
,
"Only supported fixed input shape for im2sequence operator."
stride_h
,
stride_w
=
op
.
attr
(
'strides'
)
paddings
=
op
.
attr
(
'paddings'
)
assert
op
.
attr
(
'out_stride'
)
!=
1
,
"Only out_stride==1 is supported for im2sequence operator."
h
=
h
+
paddings
[
0
]
+
paddings
[
1
]
w
=
w
+
paddings
[
1
]
+
paddings
[
2
]
kernel_h
,
kernel_w
=
op
.
attr
(
'kernels'
)
out_h
=
1
+
(
h
-
kernel_h
+
stride_h
-
1
)
//
stride_h
out_w
=
1
+
(
w
-
kernel_w
+
stride_w
-
1
)
//
stride_w
h_steps
=
list
()
for
i
in
range
(
out_h
):
h_steps
.
append
([
i
*
stride_h
,
i
*
stride_h
+
kernel_h
])
w_steps
=
list
()
for
i
in
range
(
out_w
):
w_steps
.
append
([
i
*
stride_w
,
i
*
stride_w
+
kernel_w
])
nodes
=
list
()
slice_blocks
=
list
()
for
i
in
range
(
out_h
):
for
j
in
range
(
out_w
):
starts_name
=
"im2sequence.starts.{}.{}.{}"
.
format
(
im2seq_counter
,
i
,
j
)
starts_tensor
=
helper
.
make_tensor
(
name
=
starts_name
,
data_type
=
onnx_pb
.
TensorProto
.
INT64
,
dims
=
[
4
],
vals
=
[
0
,
0
,
h_steps
[
i
][
0
],
w_steps
[
j
][
0
]])
ends_name
=
"im2sequence.ends.{}.{}.{}"
.
format
(
im2seq_counter
,
i
,
j
)
ends_tensor
=
helper
.
make_tensor
(
name
=
ends_name
,
data_type
=
onnx_pb
.
TensorProto
.
INT64
,
dims
=
[
4
],
vals
=
[
999999
,
999999
,
h_steps
[
i
][
1
],
w_steps
[
j
][
1
]])
starts_node
=
helper
.
make_node
(
'Constant'
,
inputs
=
[],
outputs
=
[
starts_name
],
value
=
starts_tensor
)
ends_node
=
helper
.
make_node
(
'Constant'
,
inputs
=
[],
outputs
=
[
ends_name
],
value
=
ends_tensor
)
nodes
.
extend
([
starts_node
,
ends_node
])
slice_block_name
=
"im2sequence.slice.{}.{}.{}"
.
format
(
im2seq_counter
,
i
,
j
)
slice_block_node
=
helper
.
make_node
(
'Slice'
,
inputs
=
[
op
.
input
(
'X'
)[
0
],
starts_name
,
ends_name
],
outputs
=
[
slice_block_name
])
flatten_block_name
=
"im2sequence.flatten.{}.{}.{}"
.
format
(
im2seq_counter
,
i
,
j
)
flatten_block_node
=
helper
.
make_node
(
"Flatten"
,
inputs
=
[
slice_block_name
],
outputs
=
[
flatten_block_name
],
axis
=
0
)
nodes
.
extend
([
slice_block_node
,
flatten_block_node
])
slice_blocks
.
append
(
flatten_block_name
)
concat_block_name
=
"im2sequence.concat_block.{}"
.
format
(
im2seq_counter
)
# concat_block_node = helper.make_node("Concat", inputs=slice_blocks, outputs=[concat_block_name], axis=0)
concat_block_node
=
helper
.
make_node
(
"Concat"
,
inputs
=
slice_blocks
,
outputs
=
op
.
output
(
'Out'
),
axis
=
0
)
nodes
.
append
(
concat_block_node
)
print
(
"
\n\n
==========Importance Notice==========="
)
print
(
"Since im2sequence operator is used in your paddlepaddle model, the translated onnx model only support input data with batch_size=1."
)
print
(
"======================================
\n
"
)
return
nodes
x2paddle/op_mapper/paddle2onnx/opset11/paddle_custom_layer/multiclass_nms.py
0 → 100644
浏览文件 @
c1f65a10
# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import
math
import
sys
import
os
import
numpy
as
np
import
paddle.fluid.core
as
core
import
paddle.fluid
as
fluid
import
onnx
import
warnings
from
onnx
import
helper
,
onnx_pb
def
multiclass_nms
(
op
,
block
):
"""
Convert the paddle multiclass_nms to onnx op.
This op is get the select boxes from origin boxes.
"""
inputs
=
dict
()
outputs
=
dict
()
attrs
=
dict
()
for
name
in
op
.
input_names
:
inputs
[
name
]
=
op
.
input
(
name
)
for
name
in
op
.
output_names
:
outputs
[
name
]
=
op
.
output
(
name
)
for
name
in
op
.
attr_names
:
attrs
[
name
]
=
op
.
attr
(
name
)
result_name
=
outputs
[
'Out'
][
0
]
background
=
attrs
[
'background_label'
]
normalized
=
attrs
[
'normalized'
]
if
normalized
==
False
:
warnings
.
warn
(
'The parameter normalized of multiclass_nms OP of Paddle is False, which has diff with ONNX.
\
Please set normalized=True in multiclass_nms of Paddle'
)
#convert the paddle attribute to onnx tensor
name_score_threshold
=
[
outputs
[
'Out'
][
0
]
+
"@score_threshold"
]
name_iou_threshold
=
[
outputs
[
'Out'
][
0
]
+
"@iou_threshold"
]
name_keep_top_k
=
[
outputs
[
'Out'
][
0
]
+
'@keep_top_k'
]
name_keep_top_k_2D
=
[
outputs
[
'Out'
][
0
]
+
'@keep_top_k_1D'
]
node_score_threshold
=
onnx
.
helper
.
make_node
(
'Constant'
,
inputs
=
[],
outputs
=
name_score_threshold
,
value
=
onnx
.
helper
.
make_tensor
(
name
=
name_score_threshold
[
0
]
+
"@const"
,
data_type
=
onnx
.
TensorProto
.
FLOAT
,
dims
=
(),
vals
=
[
float
(
attrs
[
'score_threshold'
])]))
node_iou_threshold
=
onnx
.
helper
.
make_node
(
'Constant'
,
inputs
=
[],
outputs
=
name_iou_threshold
,
value
=
onnx
.
helper
.
make_tensor
(
name
=
name_iou_threshold
[
0
]
+
"@const"
,
data_type
=
onnx
.
TensorProto
.
FLOAT
,
dims
=
(),
vals
=
[
float
(
attrs
[
'nms_threshold'
])]))
node_keep_top_k
=
onnx
.
helper
.
make_node
(
'Constant'
,
inputs
=
[],
outputs
=
name_keep_top_k
,
value
=
onnx
.
helper
.
make_tensor
(
name
=
name_keep_top_k
[
0
]
+
"@const"
,
data_type
=
onnx
.
TensorProto
.
INT64
,
dims
=
(),
vals
=
[
np
.
int64
(
attrs
[
'keep_top_k'
])]))
node_keep_top_k_2D
=
onnx
.
helper
.
make_node
(
'Constant'
,
inputs
=
[],
outputs
=
name_keep_top_k_2D
,
value
=
onnx
.
helper
.
make_tensor
(
name
=
name_keep_top_k_2D
[
0
]
+
"@const"
,
data_type
=
onnx
.
TensorProto
.
INT64
,
dims
=
[
1
,
1
],
vals
=
[
np
.
int64
(
attrs
[
'keep_top_k'
])]))
# the paddle data format is x1,y1,x2,y2
kwargs
=
{
'center_point_box'
:
0
}
name_select_nms
=
[
outputs
[
'Out'
][
0
]
+
"@select_index"
]
node_select_nms
=
onnx
.
helper
.
make_node
(
'NonMaxSuppression'
,
inputs
=
inputs
[
'BBoxes'
]
+
inputs
[
'Scores'
]
+
name_keep_top_k
+
\
name_iou_threshold
+
name_score_threshold
,
outputs
=
name_select_nms
)
# step 1 nodes select the nms class
node_list
=
[
node_score_threshold
,
node_iou_threshold
,
node_keep_top_k
,
node_keep_top_k_2D
,
node_select_nms
]
# create some const value to use
name_const_value
=
[
result_name
+
"@const_0"
,
result_name
+
"@const_1"
,
\
result_name
+
"@const_2"
,
\
result_name
+
"@const_-1"
]
value_const_value
=
[
0
,
1
,
2
,
-
1
]
for
name
,
value
in
zip
(
name_const_value
,
value_const_value
):
node
=
onnx
.
helper
.
make_node
(
'Constant'
,
inputs
=
[],
outputs
=
[
name
],
value
=
onnx
.
helper
.
make_tensor
(
name
=
name
+
"@const"
,
data_type
=
onnx
.
TensorProto
.
INT64
,
dims
=
[
1
],
vals
=
[
value
]))
node_list
.
append
(
node
)
# Ine this code block, we will deocde the raw score data, reshape N * C * M to 1 * N*C*M
# and the same time, decode the select indices to 1 * D, gather the select_indices
outputs_gather_1
=
[
result_name
+
"@gather_1"
]
node_gather_1
=
onnx
.
helper
.
make_node
(
'Gather'
,
inputs
=
name_select_nms
+
[
result_name
+
"@const_1"
],
outputs
=
outputs_gather_1
,
axis
=
1
)
node_list
.
append
(
node_gather_1
)
outputs_squeeze_gather_1
=
[
result_name
+
"@sequeeze_gather_1"
]
node_squeeze_gather_1
=
onnx
.
helper
.
make_node
(
'Squeeze'
,
inputs
=
outputs_gather_1
,
outputs
=
outputs_squeeze_gather_1
,
axes
=
[
1
])
node_list
.
append
(
node_squeeze_gather_1
)
outputs_gather_2
=
[
result_name
+
"@gather_2"
]
node_gather_2
=
onnx
.
helper
.
make_node
(
'Gather'
,
inputs
=
name_select_nms
+
[
result_name
+
"@const_2"
],
outputs
=
outputs_gather_2
,
axis
=
1
)
node_list
.
append
(
node_gather_2
)
#slice the class is not 0
if
background
==
0
:
outputs_nonzero
=
[
result_name
+
"@nonzero"
]
node_nonzero
=
onnx
.
helper
.
make_node
(
'NonZero'
,
inputs
=
outputs_squeeze_gather_1
,
outputs
=
outputs_nonzero
)
node_list
.
append
(
node_nonzero
)
else
:
name_thresh
=
[
result_name
+
"@thresh"
]
node_thresh
=
onnx
.
helper
.
make_node
(
'Constant'
,
inputs
=
[],
outputs
=
name_thresh
,
value
=
onnx
.
helper
.
make_tensor
(
name
=
name_thresh
[
0
]
+
"@const"
,
data_type
=
onnx
.
TensorProto
.
INT32
,
dims
=
[
1
],
vals
=
[
-
1
]))
node_list
.
append
(
node_thresh
)
outputs_cast
=
[
result_name
+
"@cast"
]
node_cast
=
onnx
.
helper
.
make_node
(
'Cast'
,
inputs
=
outputs_squeeze_gather_1
,
outputs
=
outputs_cast
,
to
=
6
)
node_list
.
append
(
node_cast
)
outputs_greater
=
[
result_name
+
"@greater"
]
node_greater
=
onnx
.
helper
.
make_node
(
'Greater'
,
inputs
=
outputs_cast
+
name_thresh
,
outputs
=
outputs_greater
)
node_list
.
append
(
node_greater
)
outputs_nonzero
=
[
result_name
+
"@nonzero"
]
node_nonzero
=
onnx
.
helper
.
make_node
(
'NonZero'
,
inputs
=
outputs_greater
,
outputs
=
outputs_nonzero
)
node_list
.
append
(
node_nonzero
)
outputs_gather_1_nonzero
=
[
result_name
+
"@gather_1_nonzero"
]
node_gather_1_nonzero
=
onnx
.
helper
.
make_node
(
'Gather'
,
inputs
=
outputs_gather_1
+
outputs_nonzero
,
outputs
=
outputs_gather_1_nonzero
,
axis
=
0
)
node_list
.
append
(
node_gather_1_nonzero
)
outputs_gather_2_nonzero
=
[
result_name
+
"@gather_2_nonzero"
]
node_gather_2_nonzero
=
onnx
.
helper
.
make_node
(
'Gather'
,
inputs
=
outputs_gather_2
+
outputs_nonzero
,
outputs
=
outputs_gather_2_nonzero
,
axis
=
0
)
node_list
.
append
(
node_gather_2_nonzero
)
# reshape scores N * C * M to (N*C*M) * 1
outputs_reshape_scores_rank1
=
[
result_name
+
"@reshape_scores_rank1"
]
node_reshape_scores_rank1
=
onnx
.
helper
.
make_node
(
"Reshape"
,
inputs
=
inputs
[
'Scores'
]
+
[
result_name
+
"@const_-1"
],
outputs
=
outputs_reshape_scores_rank1
)
node_list
.
append
(
node_reshape_scores_rank1
)
# get the shape of scores
outputs_shape_scores
=
[
result_name
+
"@shape_scores"
]
node_shape_scores
=
onnx
.
helper
.
make_node
(
'Shape'
,
inputs
=
inputs
[
'Scores'
],
outputs
=
outputs_shape_scores
)
node_list
.
append
(
node_shape_scores
)
# gather the index: 2 shape of scores
outputs_gather_scores_dim1
=
[
result_name
+
"@gather_scores_dim1"
]
node_gather_scores_dim1
=
onnx
.
helper
.
make_node
(
'Gather'
,
inputs
=
outputs_shape_scores
+
[
result_name
+
"@const_2"
],
outputs
=
outputs_gather_scores_dim1
,
axis
=
0
)
node_list
.
append
(
node_gather_scores_dim1
)
# mul class * M
outputs_mul_classnum_boxnum
=
[
result_name
+
"@mul_classnum_boxnum"
]
node_mul_classnum_boxnum
=
onnx
.
helper
.
make_node
(
'Mul'
,
inputs
=
outputs_gather_1_nonzero
+
outputs_gather_scores_dim1
,
outputs
=
outputs_mul_classnum_boxnum
)
node_list
.
append
(
node_mul_classnum_boxnum
)
# add class * M * index
outputs_add_class_M_index
=
[
result_name
+
"@add_class_M_index"
]
node_add_class_M_index
=
onnx
.
helper
.
make_node
(
'Add'
,
inputs
=
outputs_mul_classnum_boxnum
+
outputs_gather_2_nonzero
,
outputs
=
outputs_add_class_M_index
)
node_list
.
append
(
node_add_class_M_index
)
# Squeeze the indices to 1 dim
outputs_squeeze_select_index
=
[
result_name
+
"@squeeze_select_index"
]
node_squeeze_select_index
=
onnx
.
helper
.
make_node
(
'Squeeze'
,
inputs
=
outputs_add_class_M_index
,
outputs
=
outputs_squeeze_select_index
,
axes
=
[
0
,
2
])
node_list
.
append
(
node_squeeze_select_index
)
# gather the data from flatten scores
outputs_gather_select_scores
=
[
result_name
+
"@gather_select_scores"
]
node_gather_select_scores
=
onnx
.
helper
.
make_node
(
'Gather'
,
inputs
=
outputs_reshape_scores_rank1
+
\
outputs_squeeze_select_index
,
outputs
=
outputs_gather_select_scores
,
axis
=
0
)
node_list
.
append
(
node_gather_select_scores
)
# get nums to input TopK
outputs_shape_select_num
=
[
result_name
+
"@shape_select_num"
]
node_shape_select_num
=
onnx
.
helper
.
make_node
(
'Shape'
,
inputs
=
outputs_gather_select_scores
,
outputs
=
outputs_shape_select_num
)
node_list
.
append
(
node_shape_select_num
)
outputs_gather_select_num
=
[
result_name
+
"@gather_select_num"
]
node_gather_select_num
=
onnx
.
helper
.
make_node
(
'Gather'
,
inputs
=
outputs_shape_select_num
+
[
result_name
+
"@const_0"
],
outputs
=
outputs_gather_select_num
,
axis
=
0
)
node_list
.
append
(
node_gather_select_num
)
outputs_unsqueeze_select_num
=
[
result_name
+
"@unsqueeze_select_num"
]
node_unsqueeze_select_num
=
onnx
.
helper
.
make_node
(
'Unsqueeze'
,
inputs
=
outputs_gather_select_num
,
outputs
=
outputs_unsqueeze_select_num
,
axes
=
[
0
])
node_list
.
append
(
node_unsqueeze_select_num
)
outputs_concat_topK_select_num
=
[
result_name
+
"@conat_topK_select_num"
]
node_conat_topK_select_num
=
onnx
.
helper
.
make_node
(
'Concat'
,
inputs
=
outputs_unsqueeze_select_num
+
name_keep_top_k_2D
,
outputs
=
outputs_concat_topK_select_num
,
axis
=
0
)
node_list
.
append
(
node_conat_topK_select_num
)
outputs_cast_concat_topK_select_num
=
[
result_name
+
"@concat_topK_select_num"
]
node_outputs_cast_concat_topK_select_num
=
onnx
.
helper
.
make_node
(
'Cast'
,
inputs
=
outputs_concat_topK_select_num
,
outputs
=
outputs_cast_concat_topK_select_num
,
to
=
6
)
node_list
.
append
(
node_outputs_cast_concat_topK_select_num
)
# get min(topK, num_select)
outputs_compare_topk_num_select
=
[
result_name
+
"@compare_topk_num_select"
]
node_compare_topk_num_select
=
onnx
.
helper
.
make_node
(
'ReduceMin'
,
inputs
=
outputs_cast_concat_topK_select_num
,
outputs
=
outputs_compare_topk_num_select
,
keepdims
=
0
)
node_list
.
append
(
node_compare_topk_num_select
)
# unsqueeze the indices to 1D tensor
outputs_unsqueeze_topk_select_indices
=
[
result_name
+
"@unsqueeze_topk_select_indices"
]
node_unsqueeze_topk_select_indices
=
onnx
.
helper
.
make_node
(
'Unsqueeze'
,
inputs
=
outputs_compare_topk_num_select
,
outputs
=
outputs_unsqueeze_topk_select_indices
,
axes
=
[
0
])
node_list
.
append
(
node_unsqueeze_topk_select_indices
)
# cast the indices to INT64
outputs_cast_topk_indices
=
[
result_name
+
"@cast_topk_indices"
]
node_cast_topk_indices
=
onnx
.
helper
.
make_node
(
'Cast'
,
inputs
=
outputs_unsqueeze_topk_select_indices
,
outputs
=
outputs_cast_topk_indices
,
to
=
7
)
node_list
.
append
(
node_cast_topk_indices
)
# select topk scores indices
outputs_topk_select_topk_indices
=
[
result_name
+
"@topk_select_topk_values"
,
\
result_name
+
"@topk_select_topk_indices"
]
node_topk_select_topk_indices
=
onnx
.
helper
.
make_node
(
'TopK'
,
inputs
=
outputs_gather_select_scores
+
outputs_cast_topk_indices
,
outputs
=
outputs_topk_select_topk_indices
)
node_list
.
append
(
node_topk_select_topk_indices
)
# gather topk label, scores, boxes
outputs_gather_topk_scores
=
[
result_name
+
"@gather_topk_scores"
]
node_gather_topk_scores
=
onnx
.
helper
.
make_node
(
'Gather'
,
inputs
=
outputs_gather_select_scores
+
[
outputs_topk_select_topk_indices
[
1
]],
outputs
=
outputs_gather_topk_scores
,
axis
=
0
)
node_list
.
append
(
node_gather_topk_scores
)
outputs_gather_topk_class
=
[
result_name
+
"@gather_topk_class"
]
node_gather_topk_class
=
onnx
.
helper
.
make_node
(
'Gather'
,
inputs
=
outputs_gather_1_nonzero
+
[
outputs_topk_select_topk_indices
[
1
]],
outputs
=
outputs_gather_topk_class
,
axis
=
1
)
node_list
.
append
(
node_gather_topk_class
)
# gather the boxes need to gather the boxes id, then get boxes
outputs_gather_topk_boxes_id
=
[
result_name
+
"@gather_topk_boxes_id"
]
node_gather_topk_boxes_id
=
onnx
.
helper
.
make_node
(
'Gather'
,
inputs
=
outputs_gather_2_nonzero
+
[
outputs_topk_select_topk_indices
[
1
]],
outputs
=
outputs_gather_topk_boxes_id
,
axis
=
1
)
node_list
.
append
(
node_gather_topk_boxes_id
)
# squeeze the gather_topk_boxes_id to 1 dim
outputs_squeeze_topk_boxes_id
=
[
result_name
+
"@squeeze_topk_boxes_id"
]
node_squeeze_topk_boxes_id
=
onnx
.
helper
.
make_node
(
'Squeeze'
,
inputs
=
outputs_gather_topk_boxes_id
,
outputs
=
outputs_squeeze_topk_boxes_id
,
axes
=
[
0
,
2
])
node_list
.
append
(
node_squeeze_topk_boxes_id
)
outputs_gather_select_boxes
=
[
result_name
+
"@gather_select_boxes"
]
node_gather_select_boxes
=
onnx
.
helper
.
make_node
(
'Gather'
,
inputs
=
inputs
[
'BBoxes'
]
+
outputs_squeeze_topk_boxes_id
,
outputs
=
outputs_gather_select_boxes
,
axis
=
1
)
node_list
.
append
(
node_gather_select_boxes
)
# concat the final result
# before concat need to cast the class to float
outputs_cast_topk_class
=
[
result_name
+
"@cast_topk_class"
]
node_cast_topk_class
=
onnx
.
helper
.
make_node
(
'Cast'
,
inputs
=
outputs_gather_topk_class
,
outputs
=
outputs_cast_topk_class
,
to
=
1
)
node_list
.
append
(
node_cast_topk_class
)
outputs_unsqueeze_topk_scores
=
[
result_name
+
"@unsqueeze_topk_scores"
]
node_unsqueeze_topk_scores
=
onnx
.
helper
.
make_node
(
'Unsqueeze'
,
inputs
=
outputs_gather_topk_scores
,
outputs
=
outputs_unsqueeze_topk_scores
,
axes
=
[
0
,
2
])
node_list
.
append
(
node_unsqueeze_topk_scores
)
inputs_concat_final_results
=
outputs_cast_topk_class
+
outputs_unsqueeze_topk_scores
+
\
outputs_gather_select_boxes
outputs_concat_final_results
=
outputs
[
'Out'
]
node_concat_final_results
=
onnx
.
helper
.
make_node
(
'Concat'
,
inputs
=
inputs_concat_final_results
,
outputs
=
outputs_concat_final_results
,
axis
=
2
)
node_list
.
append
(
node_concat_final_results
)
return
node_list
x2paddle/op_mapper/paddle2onnx/opset11/paddle_custom_layer/yolo_box.py
0 → 100644
浏览文件 @
c1f65a10
此差异已折叠。
点击以展开。
x2paddle/op_mapper/paddle2onnx/opset9/__init__.py
0 → 100644
浏览文件 @
c1f65a10
x2paddle/op_mapper/paddle2onnx/opset9/opset.py
0 → 100644
浏览文件 @
c1f65a10
此差异已折叠。
点击以展开。
x2paddle/op_mapper/paddle2onnx/opset9/paddle_custom_layer/__init__.py
0 → 100644
浏览文件 @
c1f65a10
x2paddle/op_mapper/paddle2onnx/opset9/paddle_custom_layer/im2sequence.py
0 → 100644
浏览文件 @
c1f65a10
import
onnx
import
numpy
as
np
from
onnx
import
onnx_pb
,
helper
im2seq_counter
=
0
def
im2sequence
(
op
,
block
):
global
im2sequence_counter
n
,
c
,
h
,
w
=
block
.
var
(
op
.
input
(
'X'
)[
0
]).
shape
assert
h
>
0
and
w
>
0
,
"Only supported fixed input shape for im2sequence operator."
stride_h
,
stride_w
=
op
.
attr
(
'strides'
)
paddings
=
op
.
attr
(
'paddings'
)
assert
op
.
attr
(
'out_stride'
)
!=
1
,
"Only out_stride==1 is supported for im2sequence operator."
h
=
h
+
paddings
[
0
]
+
paddings
[
1
]
w
=
w
+
paddings
[
1
]
+
paddings
[
2
]
kernel_h
,
kernel_w
=
op
.
attr
(
'kernels'
)
out_h
=
1
+
(
h
-
kernel_h
+
stride_h
-
1
)
//
stride_h
out_w
=
1
+
(
w
-
kernel_w
+
stride_w
-
1
)
//
stride_w
h_steps
=
list
()
for
i
in
range
(
out_h
):
h_steps
.
append
([
i
*
stride_h
,
i
*
stride_h
+
kernel_h
])
w_steps
=
list
()
for
i
in
range
(
out_w
):
w_steps
.
append
([
i
*
stride_w
,
i
*
stride_w
+
kernel_w
])
nodes
=
list
()
slice_blocks
=
list
()
for
i
in
range
(
out_h
):
for
j
in
range
(
out_w
):
starts_name
=
"im2sequence.starts.{}.{}.{}"
.
format
(
im2seq_counter
,
i
,
j
)
starts_tensor
=
helper
.
make_tensor
(
name
=
starts_name
,
data_type
=
onnx_pb
.
TensorProto
.
INT64
,
dims
=
[
4
],
vals
=
[
0
,
0
,
h_steps
[
i
][
0
],
w_steps
[
j
][
0
]])
ends_name
=
"im2sequence.ends.{}.{}.{}"
.
format
(
im2seq_counter
,
i
,
j
)
ends_tensor
=
helper
.
make_tensor
(
name
=
ends_name
,
data_type
=
onnx_pb
.
TensorProto
.
INT64
,
dims
=
[
4
],
vals
=
[
999999
,
999999
,
h_steps
[
i
][
1
],
w_steps
[
j
][
1
]])
starts_node
=
helper
.
make_node
(
'Constant'
,
inputs
=
[],
outputs
=
[
starts_name
],
value
=
starts_tensor
)
ends_node
=
helper
.
make_node
(
'Constant'
,
inputs
=
[],
outputs
=
[
ends_name
],
value
=
ends_tensor
)
nodes
.
extend
([
starts_node
,
ends_node
])
slice_block_name
=
"im2sequence.slice.{}.{}.{}"
.
format
(
im2seq_counter
,
i
,
j
)
slice_block_node
=
helper
.
make_node
(
'Slice'
,
inputs
=
[
op
.
input
(
'X'
)[
0
],
starts_name
,
ends_name
],
outputs
=
[
slice_block_name
])
flatten_block_name
=
"im2sequence.flatten.{}.{}.{}"
.
format
(
im2seq_counter
,
i
,
j
)
flatten_block_node
=
helper
.
make_node
(
"Flatten"
,
inputs
=
[
slice_block_name
],
outputs
=
[
flatten_block_name
],
axis
=
0
)
nodes
.
extend
([
slice_block_node
,
flatten_block_node
])
slice_blocks
.
append
(
flatten_block_name
)
concat_block_name
=
"im2sequence.concat_block.{}"
.
format
(
im2seq_counter
)
# concat_block_node = helper.make_node("Concat", inputs=slice_blocks, outputs=[concat_block_name], axis=0)
concat_block_node
=
helper
.
make_node
(
"Concat"
,
inputs
=
slice_blocks
,
outputs
=
op
.
output
(
'Out'
),
axis
=
0
)
nodes
.
append
(
concat_block_node
)
print
(
"
\n\n
==========Importance Notice==========="
)
print
(
"Since im2sequence operator is used in your paddlepaddle model, the translated onnx model only support input data with batch_size=1."
)
print
(
"======================================
\n
"
)
return
nodes
x2paddle/op_mapper/paddle2onnx/opset9/paddle_custom_layer/multiclass_nms.py
0 → 100644
浏览文件 @
c1f65a10
此差异已折叠。
点击以展开。
x2paddle/op_mapper/paddle2onnx/opset9/paddle_custom_layer/yolo_box.py
0 → 100644
浏览文件 @
c1f65a10
此差异已折叠。
点击以展开。
x2paddle/op_mapper/paddle2onnx/paddle_op_mapper.py
0 → 100644
浏览文件 @
c1f65a10
# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import
math
import
sys
import
x2paddle
import
os
import
numpy
as
np
import
paddle.fluid.core
as
core
import
paddle.fluid
as
fluid
import
onnx
from
onnx
import
helper
,
onnx_pb
from
x2paddle.op_mapper.paddle2onnx.opset9.opset
import
OpSet9
from
x2paddle.op_mapper.paddle2onnx.opset10.opset
import
OpSet10
from
x2paddle.op_mapper.paddle2onnx.opset11.opset
import
OpSet11
class
PaddleOpMapper
(
object
):
def
__init__
(
self
):
self
.
support_opsets
=
[
9
,
10
,
11
]
self
.
default_opset
=
10
self
.
name_counter
=
dict
()
self
.
op_set
=
None
def
convert
(
self
,
program
,
save_dir
,
opset_number
=
10
):
self
.
op_set
=
self
.
create_opset
(
opset_number
)
weight_nodes
=
self
.
op_set
.
convert_weights
(
program
)
op_nodes
=
list
()
input_nodes
=
list
()
output_nodes
=
list
()
unsupported_ops
=
set
()
print
(
"Translating PaddlePaddle to ONNX...
\n
"
)
for
block
in
program
.
blocks
:
for
i
,
op
in
enumerate
(
block
.
ops
):
sys
.
stdout
.
write
(
"
\r
Total:{}, Current:{} : {} "
.
format
(
len
(
block
.
ops
),
i
+
1
,
op
.
type
))
sys
.
stdout
.
flush
()
if
not
hasattr
(
self
.
op_set
,
op
.
type
):
unsupported_ops
.
add
(
op
.
type
)
continue
if
len
(
unsupported_ops
)
>
0
:
continue
node
=
getattr
(
self
.
op_set
,
op
.
type
)(
op
,
block
)
if
op
.
type
==
'feed'
:
print
(
node
.
name
)
input_nodes
.
append
(
node
)
elif
op
.
type
==
'fetch'
:
output_nodes
.
append
(
node
)
else
:
if
isinstance
(
node
,
list
):
op_nodes
=
op_nodes
+
node
else
:
op_nodes
.
append
(
node
)
if
len
(
unsupported_ops
)
>
0
:
print
(
"
\n
There's {} ops are not supported yet"
.
format
(
len
(
unsupported_ops
)))
for
op
in
unsupported_ops
:
print
(
"=========== {} ==========="
.
format
(
op
))
return
graph
=
helper
.
make_graph
(
nodes
=
weight_nodes
+
op_nodes
,
name
=
'onnx_model_from_paddle'
,
initializer
=
[],
inputs
=
input_nodes
,
outputs
=
output_nodes
)
opset_imports
=
[
helper
.
make_opsetid
(
""
,
opset_number
)]
model
=
helper
.
make_model
(
graph
,
producer_name
=
'X2Paddle'
,
opset_imports
=
opset_imports
)
onnx
.
checker
.
check_model
(
model
)
if
not
os
.
path
.
isdir
(
save_dir
):
os
.
makedirs
(
save_dir
)
with
open
(
os
.
path
.
join
(
save_dir
,
'x2paddle_model.onnx'
),
'wb'
)
as
f
:
f
.
write
(
model
.
SerializeToString
())
print
(
"
\n
Translated model saved in {}"
.
format
(
os
.
path
.
join
(
save_dir
,
'x2paddle_model.onnx'
)))
def
create_opset
(
self
,
opset_number
):
run_opset
=
self
.
default_opset
opset
=
''
if
opset_number
in
self
.
support_opsets
:
run_opset
=
opset_number
else
:
for
support_opset_number
in
self
.
support_opsets
:
if
support_opset_number
>
opset_number
:
run_opset
=
support_opset_number
else
:
break
print
(
'Now, onnx2paddle support convert onnx model opset_verison {},'
'opset_verison of your onnx model is {}, automatically treated as op_set: {}.'
.
format
(
self
.
support_opsets
,
opset_number
,
run_opset
))
opset
=
'OpSet'
+
str
(
run_opset
)
return
eval
(
opset
)()
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录