Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
PaddlePaddle
X2Paddle
提交
208a2821
X
X2Paddle
项目概览
PaddlePaddle
/
X2Paddle
大约 1 年 前同步成功
通知
328
Star
698
Fork
167
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
26
列表
看板
标记
里程碑
合并请求
4
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
X
X2Paddle
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
26
Issue
26
列表
看板
标记
里程碑
合并请求
4
合并请求
4
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
未验证
提交
208a2821
编写于
7月 29, 2022
作者:
J
Jason
提交者:
GitHub
7月 29, 2022
浏览文件
操作
浏览文件
下载
差异文件
Merge pull request #836 from wjj19950828/add_autoscan_test
Add autoscan test
上级
55bd2869
2d3ca5f5
变更
18
隐藏空白更改
内联
并排
Showing
18 changed file
with
984 addition
and
17 deletion
+984
-17
tests/onnx/auto_scan_test.py
tests/onnx/auto_scan_test.py
+222
-0
tests/onnx/onnxbase.py
tests/onnx/onnxbase.py
+280
-0
tests/onnx/test_auto_scan_conv2d.py
tests/onnx/test_auto_scan_conv2d.py
+135
-0
tests/onnx/test_auto_scan_unsqueeze_13.py
tests/onnx/test_auto_scan_unsqueeze_13.py
+76
-0
tests/onnx/test_auto_scan_unsqueeze_7.py
tests/onnx/test_auto_scan_unsqueeze_7.py
+75
-0
x2paddle/convert.py
x2paddle/convert.py
+10
-3
x2paddle/decoder/onnx_decoder.py
x2paddle/decoder/onnx_decoder.py
+3
-2
x2paddle/op_mapper/onnx2paddle/opset10.py
x2paddle/op_mapper/onnx2paddle/opset10.py
+14
-0
x2paddle/op_mapper/onnx2paddle/opset11.py
x2paddle/op_mapper/onnx2paddle/opset11.py
+14
-0
x2paddle/op_mapper/onnx2paddle/opset12.py
x2paddle/op_mapper/onnx2paddle/opset12.py
+14
-0
x2paddle/op_mapper/onnx2paddle/opset13.py
x2paddle/op_mapper/onnx2paddle/opset13.py
+32
-0
x2paddle/op_mapper/onnx2paddle/opset14.py
x2paddle/op_mapper/onnx2paddle/opset14.py
+14
-0
x2paddle/op_mapper/onnx2paddle/opset15.py
x2paddle/op_mapper/onnx2paddle/opset15.py
+14
-0
x2paddle/op_mapper/onnx2paddle/opset7.py
x2paddle/op_mapper/onnx2paddle/opset7.py
+32
-0
x2paddle/op_mapper/onnx2paddle/opset8.py
x2paddle/op_mapper/onnx2paddle/opset8.py
+14
-0
x2paddle/op_mapper/onnx2paddle/opset9.py
x2paddle/op_mapper/onnx2paddle/opset9.py
+14
-0
x2paddle/op_mapper/onnx2paddle/opset_legacy.py
x2paddle/op_mapper/onnx2paddle/opset_legacy.py
+20
-11
x2paddle/optimizer/optimizer.py
x2paddle/optimizer/optimizer.py
+1
-1
未找到文件。
tests/onnx/auto_scan_test.py
0 → 100644
浏览文件 @
208a2821
# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import
numpy
as
np
import
unittest
import
os
import
time
import
logging
import
paddle
import
hypothesis
from
hypothesis
import
given
,
settings
,
seed
,
reproduce_failure
import
hypothesis.strategies
as
st
from
onnxbase
import
ONNXConverter
,
randtool
from
itertools
import
product
import
copy
from
inspect
import
isfunction
paddle
.
set_device
(
"cpu"
)
logging
.
basicConfig
(
level
=
logging
.
INFO
,
format
=
"%(message)s"
)
settings
.
register_profile
(
"ci"
,
max_examples
=
100
,
suppress_health_check
=
hypothesis
.
HealthCheck
.
all
(),
deadline
=
None
,
print_blob
=
True
,
derandomize
=
True
,
report_multiple_bugs
=
False
)
settings
.
register_profile
(
"dev"
,
max_examples
=
1000
,
suppress_health_check
=
hypothesis
.
HealthCheck
.
all
(),
deadline
=
None
,
print_blob
=
True
,
derandomize
=
True
,
report_multiple_bugs
=
False
)
if
float
(
os
.
getenv
(
'TEST_NUM_PERCENT_CASES'
,
default
=
'1.0'
))
<
1
or
\
os
.
getenv
(
'HYPOTHESIS_TEST_PROFILE'
,
'dev'
)
==
'ci'
:
settings
.
load_profile
(
"ci"
)
else
:
settings
.
load_profile
(
"dev"
)
class
OPConvertAutoScanTest
(
unittest
.
TestCase
):
def
__init__
(
self
,
*
args
,
**
kwargs
):
super
(
OPConvertAutoScanTest
,
self
).
__init__
(
*
args
,
**
kwargs
)
np
.
random
.
seed
(
1024
)
paddle
.
enable_static
()
self
.
num_ran_tests
=
0
self
.
num_ignore_tests
=
0
def
add_ignore_test_case
(
self
,
configs
):
return
def
run_and_statis
(
self
,
max_examples
=
100
,
min_opset_version
=
7
,
reproduce
=
None
,
min_success_num
=
25
,
max_duration
=-
1
):
if
os
.
getenv
(
"CE_STAGE"
,
"OFF"
)
==
"ON"
:
max_examples
*=
10
min_success_num
*=
10
# while at ce phase, there's no limit on time
max_duration
=
-
1
start_time
=
time
.
time
()
settings
.
register_profile
(
"ci"
,
max_examples
=
max_examples
,
suppress_health_check
=
hypothesis
.
HealthCheck
.
all
(),
deadline
=
None
,
print_blob
=
True
,
derandomize
=
True
,
report_multiple_bugs
=
False
,
)
settings
.
load_profile
(
"ci"
)
def
sample_convert_generator
(
draw
):
return
self
.
sample_convert_config
(
draw
)
def
run_test
(
configs
):
return
self
.
run_test
(
configs
=
configs
)
generator
=
st
.
composite
(
sample_convert_generator
)
loop_func
=
given
(
generator
())(
run_test
)
if
reproduce
is
not
None
:
loop_func
=
reproduce
(
loop_func
)
logging
.
info
(
"Start to running test of {}"
.
format
(
type
(
self
)))
paddle
.
disable_static
()
loop_func
()
logging
.
info
(
"===================Statistical Information==================="
)
logging
.
info
(
"Number of Generated Programs: {}"
.
format
(
self
.
num_ran_tests
))
logging
.
info
(
"Number of Ignore Programs: {}"
.
format
(
self
.
num_ignore_tests
))
successful_ran_programs
=
int
(
self
.
num_ran_tests
-
self
.
num_ignore_tests
)
if
successful_ran_programs
<
min_success_num
:
logging
.
warning
(
"satisfied_programs = ran_programs"
)
logging
.
error
(
"At least {} programs need to ran successfully, but now only about {} programs satisfied."
.
format
(
min_success_num
,
successful_ran_programs
))
assert
False
used_time
=
time
.
time
()
-
start_time
logging
.
info
(
"Used time: {} s"
.
format
(
round
(
used_time
,
2
)))
if
max_duration
>
0
and
used_time
>
max_duration
:
logging
.
error
(
"The duration exceeds {} seconds, if this is neccessary, try to set a larger number for parameter `max_duration`."
.
format
(
max_duration
))
assert
False
def
run_test
(
self
,
configs
):
config
,
attrs
=
configs
logging
.
info
(
"Run configs: {}"
.
format
(
config
))
logging
.
info
(
"Run attrs: {}"
.
format
(
attrs
))
assert
"op_names"
in
config
.
keys
(
),
"config must include op_names in dict keys"
assert
"test_data_shapes"
in
config
.
keys
(
),
"config must include test_data_shapes in dict keys"
assert
"test_data_types"
in
config
.
keys
(
),
"config must include test_data_types in dict keys"
assert
"min_opset_version"
in
config
.
keys
(
),
"config must include min_opset_version in dict keys"
assert
"inputs_name"
in
config
.
keys
(
),
"config must include inputs_name in dict keys"
assert
"outputs_name"
in
config
.
keys
(
),
"config must include outputs_name in dict keys"
assert
"inputs_shape"
in
config
.
keys
(
),
"config must include inputs_shape in dict keys"
op_names
=
config
[
"op_names"
]
test_data_shapes
=
config
[
"test_data_shapes"
]
test_data_types
=
config
[
"test_data_types"
]
min_opset_version
=
config
[
"min_opset_version"
]
inputs_name
=
config
[
"inputs_name"
]
outputs_name
=
config
[
"outputs_name"
]
inputs_shape
=
config
[
"inputs_shape"
]
# max_opset_version is a fixed value
max_opset_version
=
15
enable_onnx_checker
=
True
self
.
num_ran_tests
+=
1
# add ignore testcases
if
self
.
add_ignore_test_case
(
configs
):
self
.
num_ignore_tests
+=
1
return
if
not
isinstance
(
op_names
,
(
tuple
,
list
)):
op_names
=
[
op_names
]
input_type_list
=
None
if
len
(
test_data_types
)
>
1
:
input_type_list
=
list
(
product
(
*
test_data_types
))
elif
len
(
test_data_types
)
==
1
:
if
isinstance
(
test_data_types
[
0
],
str
):
input_type_list
=
[
test_data_types
[
0
]]
else
:
input_type_list
=
test_data_types
elif
len
(
test_data_types
)
==
0
:
input_type_list
=
[[
"float32"
]
*
len
(
test_data_shapes
)]
delta
=
1e-5
rtol
=
1e-5
if
"delta"
in
config
.
keys
():
delta
=
config
[
"delta"
]
if
"rtol"
in
config
.
keys
():
rtol
=
config
[
"rtol"
]
if
"max_opset_version"
in
config
.
keys
():
max_opset_version
=
config
[
"max_opset_version"
]
if
"enable_onnx_checker"
in
config
.
keys
():
enable_onnx_checker
=
config
[
"enable_onnx_checker"
]
for
i
in
range
(
len
(
op_names
)):
obj
=
ONNXConverter
(
op_names
[
i
],
min_opset_version
,
max_opset_version
,
op_names
[
i
],
inputs_name
,
outputs_name
,
inputs_shape
,
delta
,
rtol
,
attrs
,
enable_onnx_checker
)
for
input_type
in
input_type_list
:
input_data
=
list
()
for
j
,
shape
in
enumerate
(
test_data_shapes
):
# Determine whether it is a user-defined data generation function
if
isfunction
(
shape
):
data
=
shape
()
data
=
data
.
astype
(
input_type
[
j
])
input_data
.
append
(
data
)
continue
if
input_type
[
j
].
count
(
'int'
)
>
0
:
input_data
.
append
(
randtool
(
"int"
,
-
20
,
20
,
shape
).
astype
(
input_type
[
j
]))
elif
input_type
[
j
].
count
(
'bool'
)
>
0
:
input_data
.
append
(
randtool
(
"bool"
,
-
2
,
2
,
shape
).
astype
(
input_type
[
j
]))
else
:
input_data
.
append
(
randtool
(
"float"
,
-
2
,
2
,
shape
).
astype
(
input_type
[
j
]))
obj
.
set_input_data
(
"input_data"
,
tuple
(
input_data
))
logging
.
info
(
"Now Run >>> dtype: {}, op_name: {}"
.
format
(
input_type
,
op_names
[
i
]))
obj
.
run
()
if
len
(
input_type_list
)
==
0
:
obj
.
run
()
logging
.
info
(
"Run Successfully!"
)
tests/onnx/onnxbase.py
0 → 100644
浏览文件 @
208a2821
# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import
os
import
numpy
as
np
import
logging
import
paddle
import
onnx
from
onnx
import
helper
from
onnx
import
TensorProto
from
onnxruntime
import
InferenceSession
DTYPE_ONNX_STR_MAP
=
{
'float32'
:
TensorProto
.
FLOAT
,
'float64'
:
TensorProto
.
DOUBLE
,
'int16'
:
TensorProto
.
INT16
,
'int32'
:
TensorProto
.
INT32
,
'int64'
:
TensorProto
.
INT64
,
'bool'
:
TensorProto
.
BOOL
,
}
def
compare
(
result
,
expect
,
delta
=
1e-10
,
rtol
=
1e-10
):
"""
param meaning:
result: onnx result
expect: paddle result
delta: absolute error
rtol: relative error
"""
if
type
(
result
)
==
np
.
ndarray
:
if
type
(
expect
)
==
list
:
expect
=
expect
[
0
]
expect
=
np
.
array
(
expect
)
res
=
np
.
allclose
(
result
,
expect
,
atol
=
delta
,
rtol
=
rtol
,
equal_nan
=
True
)
# 出错打印错误数据
if
res
is
False
:
if
result
.
dtype
==
np
.
bool_
:
diff
=
abs
(
result
.
astype
(
"int32"
)
-
expect
.
astype
(
"int32"
))
else
:
diff
=
abs
(
result
-
expect
)
logging
.
error
(
"Output has diff! max diff: {}"
.
format
(
np
.
amax
(
diff
)))
if
result
.
dtype
!=
expect
.
dtype
:
logging
.
error
(
"Different output data types! res type is: {}, and expect type is: {}"
.
format
(
result
.
dtype
,
expect
.
dtype
))
assert
res
assert
result
.
shape
==
expect
.
shape
,
"result.shape: {} != expect.shape: {}"
.
format
(
result
.
shape
,
expect
.
shape
)
assert
result
.
dtype
==
expect
.
dtype
,
"result.dtype: {} != expect.dtype: {}"
.
format
(
result
.
dtype
,
expect
.
dtype
)
elif
isinstance
(
result
,
(
list
,
tuple
))
and
len
(
result
)
>
1
:
for
i
in
range
(
len
(
result
)):
if
isinstance
(
result
[
i
],
(
np
.
generic
,
np
.
ndarray
)):
compare
(
result
[
i
],
expect
[
i
],
delta
,
rtol
)
else
:
compare
(
result
[
i
].
numpy
(),
expect
[
i
],
delta
,
rtol
)
# deal with scalar tensor
elif
len
(
expect
)
==
1
:
compare
(
result
,
expect
[
0
],
delta
,
rtol
)
def
randtool
(
dtype
,
low
,
high
,
shape
):
"""
np random tools
"""
if
dtype
==
"int"
:
return
np
.
random
.
randint
(
low
,
high
,
shape
)
elif
dtype
==
"float"
:
return
low
+
(
high
-
low
)
*
np
.
random
.
random
(
shape
)
elif
dtype
==
"bool"
:
return
np
.
random
.
randint
(
low
,
high
,
shape
).
astype
(
"bool"
)
class
ONNXConverter
(
object
):
"""
onnx model transfer to paddle
"""
def
__init__
(
self
,
file_name
,
min_opset_version
,
max_opset_version
,
op_type
=
[],
inputs_name
=
[],
outputs_name
=
[],
inputs_shape
=
[],
delta
=
1e-5
,
rtol
=
1e-5
,
attrs
=
[],
enable_onnx_checker
=
True
):
self
.
op_type
=
op_type
assert
isinstance
(
self
.
op_type
,
str
),
"The dtype of op_type must be string!"
self
.
seed
=
33
np
.
random
.
seed
(
self
.
seed
)
paddle
.
seed
(
self
.
seed
)
self
.
places
=
[
'cpu'
]
self
.
name
=
file_name
self
.
min_opset_version
=
min_opset_version
self
.
max_opset_version
=
max_opset_version
self
.
pwd
=
os
.
getcwd
()
self
.
delta
=
delta
self
.
rtol
=
rtol
self
.
static
=
False
self
.
kwargs_dict
=
{
"input_data"
:
()}
self
.
input_feed
=
{}
self
.
inputs_dtype
=
[]
self
.
inputs_name
=
inputs_name
self
.
outputs_name
=
outputs_name
self
.
inputs_shape
=
inputs_shape
self
.
attrs
=
attrs
self
.
enable_onnx_checker
=
enable_onnx_checker
def
set_input_data
(
self
,
group_name
,
*
args
):
"""
set input data
"""
self
.
kwargs_dict
[
group_name
]
=
args
if
isinstance
(
self
.
kwargs_dict
[
group_name
][
0
],
tuple
):
self
.
kwargs_dict
[
group_name
]
=
self
.
kwargs_dict
[
group_name
][
0
]
i
=
0
add_inputs_shape
=
False
if
len
(
self
.
inputs_shape
)
==
0
:
add_inputs_shape
=
True
for
in_data
in
self
.
kwargs_dict
[
group_name
]:
if
isinstance
(
in_data
,
list
):
for
data
in
in_data
:
self
.
inputs_dtype
.
append
(
str
(
data
.
dtype
))
self
.
input_feed
[
self
.
inputs_name
[
i
]]
=
data
if
add_inputs_shape
:
self
.
inputs_shape
.
append
(
data
.
shape
)
i
+=
1
else
:
if
isinstance
(
in_data
,
tuple
):
in_data
=
in_data
[
0
]
self
.
inputs_dtype
.
append
(
str
(
in_data
.
dtype
))
self
.
input_feed
[
self
.
inputs_name
[
i
]]
=
in_data
if
add_inputs_shape
:
self
.
inputs_shape
.
append
(
in_data
.
shape
)
i
+=
1
def
_mkdir
(
self
):
"""
make dir to save all
"""
save_path
=
os
.
path
.
join
(
self
.
pwd
,
self
.
name
)
if
not
os
.
path
.
exists
(
save_path
):
os
.
mkdir
(
save_path
)
def
_onnx_to_paddle
(
self
,
ver
):
"""
convert onnx to paddle
"""
from
x2paddle.convert
import
onnx2paddle
onnx_path
=
os
.
path
.
join
(
self
.
pwd
,
self
.
name
,
self
.
name
+
'_'
+
str
(
ver
)
+
'.onnx'
)
paddle_path
=
os
.
path
.
join
(
self
.
pwd
,
self
.
name
,
self
.
name
+
'_'
+
str
(
ver
)
+
'_paddle'
)
onnx2paddle
(
onnx_path
,
paddle_path
,
convert_to_lite
=
False
,
enable_onnx_checker
=
self
.
enable_onnx_checker
)
def
_mk_paddle_res
(
self
,
ver
):
"""
make paddle res
"""
paddle_path
=
os
.
path
.
join
(
self
.
pwd
,
self
.
name
,
self
.
name
+
'_'
+
str
(
ver
)
+
'_paddle/inference_model/model'
)
paddle
.
disable_static
()
# run
model
=
paddle
.
jit
.
load
(
paddle_path
)
paddle_feed
=
list
()
for
i
in
range
(
len
(
self
.
input_feed
)):
paddle_feed
.
append
(
self
.
input_feed
[
self
.
inputs_name
[
i
]])
result
=
model
(
*
paddle_feed
)
# get paddle outputs
if
isinstance
(
result
,
(
tuple
,
list
)):
result
=
tuple
(
out
.
numpy
()
for
out
in
result
)
else
:
result
=
(
result
.
numpy
(),
)
return
result
def
_mk_onnx_res
(
self
,
ver
):
"""
make onnx res
"""
sess
=
InferenceSession
(
os
.
path
.
join
(
self
.
pwd
,
self
.
name
,
self
.
name
+
'_'
+
str
(
ver
)
+
'.onnx'
))
ort_outs
=
sess
.
run
(
output_names
=
None
,
input_feed
=
self
.
input_feed
)
return
ort_outs
def
set_onnx_inputs
(
self
):
graph_inputs
=
list
()
for
i
in
range
(
len
(
self
.
inputs_name
)):
graph_inputs
.
append
(
helper
.
make_tensor_value_info
(
self
.
inputs_name
[
i
],
DTYPE_ONNX_STR_MAP
[
self
.
inputs_dtype
[
i
]],
self
.
inputs_shape
[
i
]))
return
graph_inputs
def
set_onnx_outputs
(
self
):
graph_outputs
=
list
()
for
i
in
range
(
len
(
self
.
outputs_name
)):
graph_outputs
.
append
(
onnx
.
ValueInfoProto
(
name
=
self
.
outputs_name
[
i
]))
return
graph_outputs
def
_mk_onnx_graph
(
self
,
ver
):
"""
make onnx graph
"""
node
=
onnx
.
helper
.
make_node
(
self
.
op_type
,
inputs
=
self
.
inputs_name
,
outputs
=
self
.
outputs_name
,
**
self
.
attrs
,
)
graph_inputs
=
self
.
set_onnx_inputs
()
graph_outputs
=
self
.
set_onnx_outputs
()
graph
=
helper
.
make_graph
(
[
node
],
self
.
name
,
graph_inputs
,
# graph inputs
graph_outputs
,
# graph outputs
)
opset_imports
=
[
helper
.
make_opsetid
(
""
,
ver
)]
model
=
helper
.
make_model
(
graph
,
producer_name
=
'onnx-example'
,
opset_imports
=
opset_imports
)
model
=
onnx
.
shape_inference
.
infer_shapes
(
model
)
onnx
.
save
(
model
,
os
.
path
.
join
(
self
.
pwd
,
self
.
name
,
self
.
name
+
'_'
+
str
(
ver
)
+
'.onnx'
))
if
self
.
enable_onnx_checker
:
onnx
.
checker
.
check_model
(
model
)
def
run
(
self
):
"""
1. make onnx model
2. convert onnx to paddle
3. use onnx to make res
4. compare diff
"""
self
.
_mkdir
()
for
place
in
self
.
places
:
paddle
.
set_device
(
place
)
onnx_res
=
{}
paddle_res
=
{}
# export onnx models and make onnx res
for
v
in
range
(
self
.
min_opset_version
,
self
.
max_opset_version
+
1
):
self
.
_mk_onnx_graph
(
ver
=
v
)
self
.
_onnx_to_paddle
(
ver
=
v
)
onnx_res
[
str
(
v
)]
=
self
.
_mk_onnx_res
(
ver
=
v
)
paddle_res
[
str
(
v
)]
=
self
.
_mk_paddle_res
(
ver
=
v
)
for
v
in
range
(
self
.
min_opset_version
,
self
.
max_opset_version
+
1
):
compare
(
onnx_res
[
str
(
v
)],
paddle_res
[
str
(
v
)],
delta
=
self
.
delta
,
rtol
=
self
.
rtol
)
tests/onnx/test_auto_scan_conv2d.py
0 → 100644
浏览文件 @
208a2821
# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from
auto_scan_test
import
OPConvertAutoScanTest
from
hypothesis
import
reproduce_failure
import
hypothesis.strategies
as
st
import
onnx
from
onnx
import
helper
from
onnx
import
TensorProto
import
numpy
as
np
import
unittest
class
TestConv2dConvert
(
OPConvertAutoScanTest
):
"""
ONNX op: Conv
OPset version: 7~15
"""
def
add_ignore_test_case
(
self
,
configs
):
config
,
attrs
=
configs
# Warning: SAME_UPPER and SAME_LOWER does not yet support dynamic shapes
if
"SAME"
in
attrs
[
"auto_pad"
]
and
-
1
in
config
[
"inputs_shape"
][
0
]:
return
True
else
:
return
False
def
sample_convert_config
(
self
,
draw
):
input_shape
=
draw
(
st
.
lists
(
st
.
integers
(
min_value
=
20
,
max_value
=
30
),
min_size
=
4
,
max_size
=
4
))
kernel_size
=
draw
(
st
.
lists
(
st
.
integers
(
min_value
=
1
,
max_value
=
7
),
min_size
=
4
,
max_size
=
4
))
data_format
=
"NCHW"
groups
=
draw
(
st
.
integers
(
min_value
=
1
,
max_value
=
4
))
muti1
=
draw
(
st
.
integers
(
min_value
=
1
,
max_value
=
4
))
kernel_size
[
0
]
=
groups
*
muti1
input_shape
[
1
]
=
kernel_size
[
1
]
*
groups
strides
=
draw
(
st
.
lists
(
st
.
integers
(
min_value
=
1
,
max_value
=
5
),
min_size
=
1
,
max_size
=
2
))
if
len
(
strides
)
==
1
:
strides
=
strides
[
0
]
if
strides
>
kernel_size
[
2
]:
strides
=
kernel_size
[
2
]
if
strides
>
kernel_size
[
3
]:
strides
=
kernel_size
[
3
]
strides
=
[
strides
,
strides
]
else
:
if
strides
[
0
]
>
kernel_size
[
2
]:
strides
[
0
]
=
kernel_size
[
2
]
if
strides
[
1
]
>
kernel_size
[
3
]:
strides
[
1
]
=
kernel_size
[
3
]
if
draw
(
st
.
booleans
()):
auto_pad
=
"NOTSET"
padding
=
None
if
draw
(
st
.
booleans
()):
padding
=
draw
(
st
.
lists
(
st
.
integers
(
min_value
=
1
,
max_value
=
5
),
min_size
=
2
,
max_size
=
2
))
padding
=
[
0
,
0
]
+
padding
else
:
padding
=
draw
(
st
.
lists
(
st
.
integers
(
min_value
=
1
,
max_value
=
5
),
min_size
=
4
,
max_size
=
4
))
else
:
auto_pad
=
draw
(
st
.
sampled_from
(
[
"SAME_LOWER"
,
"SAME_UPPER"
,
"VALID"
,
"NOTSET"
]))
padding
=
None
dilations
=
draw
(
st
.
lists
(
st
.
integers
(
min_value
=
1
,
max_value
=
3
),
min_size
=
2
,
max_size
=
2
))
config
=
{
"op_names"
:
[
"Conv"
],
"test_data_shapes"
:
[
input_shape
,
kernel_size
],
"test_data_types"
:
[[
'float32'
],
[
'float32'
]],
"inputs_shape"
:
[[
-
1
,
input_shape
[
1
],
-
1
,
-
1
],
kernel_size
],
"min_opset_version"
:
7
,
"inputs_name"
:
[
"x"
,
"W"
],
"outputs_name"
:
[
"y"
],
"delta"
:
1e-4
,
"rtol"
:
1e-4
}
attrs
=
{
"auto_pad"
:
auto_pad
,
"dilations"
:
dilations
,
"group"
:
groups
,
"kernel_shape"
:
kernel_size
[
2
:],
"pads"
:
padding
,
"strides"
:
strides
,
}
# if autopad equal SAME_UPPER and SAME_LOWER, dilations only support 1
if
"SAME"
in
auto_pad
:
attrs
[
"dilations"
]
=
[
1
,
1
]
return
(
config
,
attrs
)
def
test
(
self
):
self
.
run_and_statis
(
max_examples
=
50
)
if
__name__
==
"__main__"
:
unittest
.
main
()
tests/onnx/test_auto_scan_unsqueeze_13.py
0 → 100644
浏览文件 @
208a2821
# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from
auto_scan_test
import
OPConvertAutoScanTest
from
hypothesis
import
reproduce_failure
import
hypothesis.strategies
as
st
import
numpy
as
np
import
unittest
class
TestUnsqueezeConvert
(
OPConvertAutoScanTest
):
"""
ONNX op: Unsqueeze
OPset version: 13~15
"""
def
sample_convert_config
(
self
,
draw
):
input_shape
=
draw
(
st
.
lists
(
st
.
integers
(
min_value
=
2
,
max_value
=
6
),
min_size
=
2
,
max_size
=
5
))
input_dtype
=
draw
(
st
.
sampled_from
([
"int32"
,
"int64"
,
"float32"
,
"float64"
]))
def
generator_axis
():
axis
=
list
()
if
len
(
input_shape
)
==
5
:
axis
=
[
0
]
if
len
(
input_shape
)
==
4
:
axis
=
[
0
,
1
]
if
len
(
input_shape
)
==
3
:
axis
=
[
1
,
2
,
3
]
if
len
(
input_shape
)
==
2
:
if
draw
(
st
.
booleans
()):
axis
=
[
0
,
1
,
2
]
else
:
axis
=
[
1
,
3
]
axis
=
np
.
array
(
axis
)
print
(
"axis:"
,
axis
)
return
axis
config
=
{
"op_names"
:
[
"Unsqueeze"
],
"test_data_shapes"
:
[
input_shape
,
generator_axis
],
"test_data_types"
:
[[
input_dtype
],
[
"int64"
]],
"inputs_shape"
:
[],
"min_opset_version"
:
13
,
"max_opset_version"
:
15
,
"inputs_name"
:
[
"x"
,
"axes"
],
"outputs_name"
:
[
"y"
],
"delta"
:
1e-4
,
"rtol"
:
1e-4
,
"enable_onnx_checker"
:
False
,
}
attrs
=
{}
return
(
config
,
attrs
)
def
test
(
self
):
self
.
run_and_statis
(
max_examples
=
30
)
if
__name__
==
"__main__"
:
unittest
.
main
()
tests/onnx/test_auto_scan_unsqueeze_7.py
0 → 100644
浏览文件 @
208a2821
# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from
auto_scan_test
import
OPConvertAutoScanTest
from
hypothesis
import
reproduce_failure
import
hypothesis.strategies
as
st
import
numpy
as
np
import
unittest
class
TestUnsqueezeConvert
(
OPConvertAutoScanTest
):
"""
ONNX op: Unsqueeze
OPset version: 7~12
"""
def
sample_convert_config
(
self
,
draw
):
input_shape
=
draw
(
st
.
lists
(
st
.
integers
(
min_value
=
2
,
max_value
=
6
),
min_size
=
2
,
max_size
=
5
))
input_dtype
=
draw
(
st
.
sampled_from
([
"int32"
,
"int64"
,
"float32"
,
"float64"
]))
axis
=
draw
(
st
.
integers
(
min_value
=-
len
(
input_shape
),
max_value
=
len
(
input_shape
)
-
1
))
if
len
(
input_shape
)
==
5
:
axis
=
[
0
]
if
len
(
input_shape
)
==
4
:
axis
=
[
0
,
1
]
if
len
(
input_shape
)
==
3
:
axis
=
[
1
,
2
,
3
]
if
len
(
input_shape
)
==
2
:
if
draw
(
st
.
booleans
()):
axis
=
[
0
,
1
,
2
]
else
:
axis
=
[
1
,
3
]
config
=
{
"op_names"
:
[
"Unsqueeze"
],
"test_data_shapes"
:
[
input_shape
],
"test_data_types"
:
[[
input_dtype
]],
"inputs_shape"
:
[
input_shape
],
"min_opset_version"
:
7
,
"max_opset_version"
:
12
,
"inputs_name"
:
[
"x"
],
"outputs_name"
:
[
"y"
],
"delta"
:
1e-4
,
"rtol"
:
1e-4
}
attrs
=
{
"axes"
:
axis
,
}
return
(
config
,
attrs
)
def
test
(
self
):
self
.
run_and_statis
(
max_examples
=
30
)
if
__name__
==
"__main__"
:
unittest
.
main
()
x2paddle/convert.py
浏览文件 @
208a2821
...
@@ -95,6 +95,11 @@ def arg_parser():
...
@@ -95,6 +95,11 @@ def arg_parser():
"-co"
,
"-co"
,
default
=
True
,
default
=
True
,
help
=
"Turn on code optimization"
)
help
=
"Turn on code optimization"
)
parser
.
add_argument
(
"--enable_onnx_checker"
,
"-oc"
,
default
=
True
,
help
=
"Turn on onnx model checker"
)
parser
.
add_argument
(
parser
.
add_argument
(
"--disable_feedback"
,
"--disable_feedback"
,
"-df"
,
"-df"
,
...
@@ -263,7 +268,8 @@ def onnx2paddle(model_path,
...
@@ -263,7 +268,8 @@ def onnx2paddle(model_path,
convert_to_lite
=
False
,
convert_to_lite
=
False
,
lite_valid_places
=
"arm"
,
lite_valid_places
=
"arm"
,
lite_model_type
=
"naive_buffer"
,
lite_model_type
=
"naive_buffer"
,
disable_feedback
=
False
):
disable_feedback
=
False
,
enable_onnx_checker
=
True
):
# for convert_id
# for convert_id
time_info
=
int
(
time
.
time
())
time_info
=
int
(
time
.
time
())
if
not
disable_feedback
:
if
not
disable_feedback
:
...
@@ -286,7 +292,7 @@ def onnx2paddle(model_path,
...
@@ -286,7 +292,7 @@ def onnx2paddle(model_path,
from
x2paddle.decoder.onnx_decoder
import
ONNXDecoder
from
x2paddle.decoder.onnx_decoder
import
ONNXDecoder
from
x2paddle.op_mapper.onnx2paddle.onnx_op_mapper
import
ONNXOpMapper
from
x2paddle.op_mapper.onnx2paddle.onnx_op_mapper
import
ONNXOpMapper
model
=
ONNXDecoder
(
model_path
)
model
=
ONNXDecoder
(
model_path
,
enable_onnx_checker
)
mapper
=
ONNXOpMapper
(
model
)
mapper
=
ONNXOpMapper
(
model
)
mapper
.
paddle_graph
.
build
()
mapper
.
paddle_graph
.
build
()
logging
.
info
(
"Model optimizing ..."
)
logging
.
info
(
"Model optimizing ..."
)
...
@@ -478,7 +484,8 @@ def main():
...
@@ -478,7 +484,8 @@ def main():
convert_to_lite
=
args
.
to_lite
,
convert_to_lite
=
args
.
to_lite
,
lite_valid_places
=
args
.
lite_valid_places
,
lite_valid_places
=
args
.
lite_valid_places
,
lite_model_type
=
args
.
lite_model_type
,
lite_model_type
=
args
.
lite_model_type
,
disable_feedback
=
args
.
disable_feedback
)
disable_feedback
=
args
.
disable_feedback
,
enable_onnx_checker
=
args
.
enable_onnx_checker
)
elif
args
.
framework
==
"paddle2onnx"
:
elif
args
.
framework
==
"paddle2onnx"
:
logging
.
info
(
logging
.
info
(
"Paddle to ONNX tool has been migrated to the new github: https://github.com/PaddlePaddle/paddle2onnx"
"Paddle to ONNX tool has been migrated to the new github: https://github.com/PaddlePaddle/paddle2onnx"
...
...
x2paddle/decoder/onnx_decoder.py
浏览文件 @
208a2821
...
@@ -416,13 +416,14 @@ class ONNXGraph(Graph):
...
@@ -416,13 +416,14 @@ class ONNXGraph(Graph):
class
ONNXDecoder
(
object
):
class
ONNXDecoder
(
object
):
def
__init__
(
self
,
onnx_model
):
def
__init__
(
self
,
onnx_model
,
enable_onnx_checker
):
onnx_model
=
onnx
.
load
(
onnx_model
)
onnx_model
=
onnx
.
load
(
onnx_model
)
print
(
'model ir_version: {}, op version: {}'
.
format
(
print
(
'model ir_version: {}, op version: {}'
.
format
(
onnx_model
.
ir_version
,
onnx_model
.
opset_import
[
0
].
version
))
onnx_model
.
ir_version
,
onnx_model
.
opset_import
[
0
].
version
))
self
.
op_set
=
onnx_model
.
opset_import
[
0
].
version
self
.
op_set
=
onnx_model
.
opset_import
[
0
].
version
check_model
(
onnx_model
)
if
enable_onnx_checker
:
check_model
(
onnx_model
)
onnx_model
=
self
.
optimize_model_skip_op
(
onnx_model
)
onnx_model
=
self
.
optimize_model_skip_op
(
onnx_model
)
onnx_model
=
self
.
optimize_node_name
(
onnx_model
)
onnx_model
=
self
.
optimize_node_name
(
onnx_model
)
...
...
x2paddle/op_mapper/onnx2paddle/opset10.py
浏览文件 @
208a2821
...
@@ -15,6 +15,20 @@
...
@@ -15,6 +15,20 @@
from
.opset9
import
OpSet9
from
.opset9
import
OpSet9
def
print_mapping_info
(
func
):
def
run_mapping
(
*
args
,
**
kwargs
):
node
=
args
[
1
]
try
:
res
=
func
(
*
args
,
**
kwargs
)
except
:
raise
Exception
(
"convert failed node:{}, op_type is {}"
.
format
(
node
.
name
[
9
:],
node
.
layer_type
))
else
:
return
res
return
run_mapping
class
OpSet10
(
OpSet9
):
class
OpSet10
(
OpSet9
):
def
__init__
(
self
,
decoder
,
paddle_graph
):
def
__init__
(
self
,
decoder
,
paddle_graph
):
super
(
OpSet10
,
self
).
__init__
(
decoder
,
paddle_graph
)
super
(
OpSet10
,
self
).
__init__
(
decoder
,
paddle_graph
)
x2paddle/op_mapper/onnx2paddle/opset11.py
浏览文件 @
208a2821
...
@@ -15,6 +15,20 @@
...
@@ -15,6 +15,20 @@
from
.opset10
import
OpSet10
from
.opset10
import
OpSet10
def
print_mapping_info
(
func
):
def
run_mapping
(
*
args
,
**
kwargs
):
node
=
args
[
1
]
try
:
res
=
func
(
*
args
,
**
kwargs
)
except
:
raise
Exception
(
"convert failed node:{}, op_type is {}"
.
format
(
node
.
name
[
9
:],
node
.
layer_type
))
else
:
return
res
return
run_mapping
class
OpSet11
(
OpSet10
):
class
OpSet11
(
OpSet10
):
def
__init__
(
self
,
decoder
,
paddle_graph
):
def
__init__
(
self
,
decoder
,
paddle_graph
):
super
(
OpSet11
,
self
).
__init__
(
decoder
,
paddle_graph
)
super
(
OpSet11
,
self
).
__init__
(
decoder
,
paddle_graph
)
x2paddle/op_mapper/onnx2paddle/opset12.py
浏览文件 @
208a2821
...
@@ -15,6 +15,20 @@
...
@@ -15,6 +15,20 @@
from
.opset11
import
OpSet11
from
.opset11
import
OpSet11
def
print_mapping_info
(
func
):
def
run_mapping
(
*
args
,
**
kwargs
):
node
=
args
[
1
]
try
:
res
=
func
(
*
args
,
**
kwargs
)
except
:
raise
Exception
(
"convert failed node:{}, op_type is {}"
.
format
(
node
.
name
[
9
:],
node
.
layer_type
))
else
:
return
res
return
run_mapping
class
OpSet12
(
OpSet11
):
class
OpSet12
(
OpSet11
):
def
__init__
(
self
,
decoder
,
paddle_graph
):
def
__init__
(
self
,
decoder
,
paddle_graph
):
super
(
OpSet12
,
self
).
__init__
(
decoder
,
paddle_graph
)
super
(
OpSet12
,
self
).
__init__
(
decoder
,
paddle_graph
)
x2paddle/op_mapper/onnx2paddle/opset13.py
浏览文件 @
208a2821
...
@@ -15,6 +15,38 @@
...
@@ -15,6 +15,38 @@
from
.opset12
import
OpSet12
from
.opset12
import
OpSet12
def
print_mapping_info
(
func
):
def
run_mapping
(
*
args
,
**
kwargs
):
node
=
args
[
1
]
try
:
res
=
func
(
*
args
,
**
kwargs
)
except
:
raise
Exception
(
"convert failed node:{}, op_type is {}"
.
format
(
node
.
name
[
9
:],
node
.
layer_type
))
else
:
return
res
return
run_mapping
class
OpSet13
(
OpSet12
):
class
OpSet13
(
OpSet12
):
def
__init__
(
self
,
decoder
,
paddle_graph
):
def
__init__
(
self
,
decoder
,
paddle_graph
):
super
(
OpSet13
,
self
).
__init__
(
decoder
,
paddle_graph
)
super
(
OpSet13
,
self
).
__init__
(
decoder
,
paddle_graph
)
@
print_mapping_info
def
Unsqueeze
(
self
,
node
):
val_x
=
self
.
graph
.
get_input_node
(
node
,
idx
=
0
,
copy
=
True
)
axes
=
self
.
graph
.
get_input_node
(
node
,
idx
=
1
,
copy
=
True
)
# deal with scalar(0D) tensor
if
len
(
val_x
.
out_shapes
[
0
])
==
0
and
len
(
axes
.
out_shapes
[
0
])
==
1
:
self
.
paddle_graph
.
add_layer
(
'paddle.reshape'
,
inputs
=
{
"x"
:
val_x
.
name
},
outputs
=
[
node
.
name
],
shape
=
[
1
])
else
:
self
.
paddle_graph
.
add_layer
(
'paddle.unsqueeze'
,
inputs
=
{
"x"
:
val_x
.
name
,
"axis"
:
axes
.
name
},
outputs
=
[
node
.
name
])
x2paddle/op_mapper/onnx2paddle/opset14.py
浏览文件 @
208a2821
...
@@ -15,6 +15,20 @@
...
@@ -15,6 +15,20 @@
from
.opset13
import
OpSet13
from
.opset13
import
OpSet13
def
print_mapping_info
(
func
):
def
run_mapping
(
*
args
,
**
kwargs
):
node
=
args
[
1
]
try
:
res
=
func
(
*
args
,
**
kwargs
)
except
:
raise
Exception
(
"convert failed node:{}, op_type is {}"
.
format
(
node
.
name
[
9
:],
node
.
layer_type
))
else
:
return
res
return
run_mapping
class
OpSet14
(
OpSet13
):
class
OpSet14
(
OpSet13
):
def
__init__
(
self
,
decoder
,
paddle_graph
):
def
__init__
(
self
,
decoder
,
paddle_graph
):
super
(
OpSet14
,
self
).
__init__
(
decoder
,
paddle_graph
)
super
(
OpSet14
,
self
).
__init__
(
decoder
,
paddle_graph
)
x2paddle/op_mapper/onnx2paddle/opset15.py
浏览文件 @
208a2821
...
@@ -15,6 +15,20 @@
...
@@ -15,6 +15,20 @@
from
.opset14
import
OpSet14
from
.opset14
import
OpSet14
def
print_mapping_info
(
func
):
def
run_mapping
(
*
args
,
**
kwargs
):
node
=
args
[
1
]
try
:
res
=
func
(
*
args
,
**
kwargs
)
except
:
raise
Exception
(
"convert failed node:{}, op_type is {}"
.
format
(
node
.
name
[
9
:],
node
.
layer_type
))
else
:
return
res
return
run_mapping
class
OpSet15
(
OpSet14
):
class
OpSet15
(
OpSet14
):
def
__init__
(
self
,
decoder
,
paddle_graph
):
def
__init__
(
self
,
decoder
,
paddle_graph
):
super
(
OpSet15
,
self
).
__init__
(
decoder
,
paddle_graph
)
super
(
OpSet15
,
self
).
__init__
(
decoder
,
paddle_graph
)
x2paddle/op_mapper/onnx2paddle/opset7.py
浏览文件 @
208a2821
...
@@ -15,6 +15,38 @@
...
@@ -15,6 +15,38 @@
from
.opset_legacy
import
OpSet
from
.opset_legacy
import
OpSet
def
print_mapping_info
(
func
):
def
run_mapping
(
*
args
,
**
kwargs
):
node
=
args
[
1
]
try
:
res
=
func
(
*
args
,
**
kwargs
)
except
:
raise
Exception
(
"convert failed node:{}, op_type is {}"
.
format
(
node
.
name
[
9
:],
node
.
layer_type
))
else
:
return
res
return
run_mapping
class
OpSet7
(
OpSet
):
class
OpSet7
(
OpSet
):
def
__init__
(
self
,
decoder
,
paddle_graph
):
def
__init__
(
self
,
decoder
,
paddle_graph
):
super
(
OpSet7
,
self
).
__init__
(
decoder
,
paddle_graph
)
super
(
OpSet7
,
self
).
__init__
(
decoder
,
paddle_graph
)
@
print_mapping_info
def
Unsqueeze
(
self
,
node
):
val_x
=
self
.
graph
.
get_input_node
(
node
,
idx
=
0
,
copy
=
True
)
axes
=
node
.
get_attr
(
'axes'
)
# deal with scalar(0D) tensor
if
len
(
val_x
.
out_shapes
[
0
])
==
0
and
len
(
axes
)
==
1
and
axes
[
0
]
==
0
:
self
.
paddle_graph
.
add_layer
(
'paddle.reshape'
,
inputs
=
{
"x"
:
val_x
.
name
},
outputs
=
[
node
.
name
],
shape
=
[
1
])
else
:
self
.
paddle_graph
.
add_layer
(
'paddle.unsqueeze'
,
inputs
=
{
"x"
:
val_x
.
name
},
axis
=
axes
,
outputs
=
[
node
.
name
])
x2paddle/op_mapper/onnx2paddle/opset8.py
浏览文件 @
208a2821
...
@@ -15,6 +15,20 @@
...
@@ -15,6 +15,20 @@
from
.opset7
import
OpSet7
from
.opset7
import
OpSet7
def
print_mapping_info
(
func
):
def
run_mapping
(
*
args
,
**
kwargs
):
node
=
args
[
1
]
try
:
res
=
func
(
*
args
,
**
kwargs
)
except
:
raise
Exception
(
"convert failed node:{}, op_type is {}"
.
format
(
node
.
name
[
9
:],
node
.
layer_type
))
else
:
return
res
return
run_mapping
class
OpSet8
(
OpSet7
):
class
OpSet8
(
OpSet7
):
def
__init__
(
self
,
decoder
,
paddle_graph
):
def
__init__
(
self
,
decoder
,
paddle_graph
):
super
(
OpSet8
,
self
).
__init__
(
decoder
,
paddle_graph
)
super
(
OpSet8
,
self
).
__init__
(
decoder
,
paddle_graph
)
x2paddle/op_mapper/onnx2paddle/opset9.py
浏览文件 @
208a2821
...
@@ -15,6 +15,20 @@
...
@@ -15,6 +15,20 @@
from
.opset8
import
OpSet8
from
.opset8
import
OpSet8
def
print_mapping_info
(
func
):
def
run_mapping
(
*
args
,
**
kwargs
):
node
=
args
[
1
]
try
:
res
=
func
(
*
args
,
**
kwargs
)
except
:
raise
Exception
(
"convert failed node:{}, op_type is {}"
.
format
(
node
.
name
[
9
:],
node
.
layer_type
))
else
:
return
res
return
run_mapping
class
OpSet9
(
OpSet8
):
class
OpSet9
(
OpSet8
):
def
__init__
(
self
,
decoder
,
paddle_graph
):
def
__init__
(
self
,
decoder
,
paddle_graph
):
super
(
OpSet9
,
self
).
__init__
(
decoder
,
paddle_graph
)
super
(
OpSet9
,
self
).
__init__
(
decoder
,
paddle_graph
)
x2paddle/op_mapper/onnx2paddle/opset_legacy.py
浏览文件 @
208a2821
...
@@ -28,7 +28,7 @@ import copy
...
@@ -28,7 +28,7 @@ import copy
import
sys
import
sys
import
shutil
import
shutil
_logger
=
_logging
.
getLogger
(
__name__
)
_logger
=
_logging
.
getLogger
()
def
_const_weight_or_none
(
node
,
necessary
=
False
):
def
_const_weight_or_none
(
node
,
necessary
=
False
):
...
@@ -92,12 +92,15 @@ def _is_static_shape(shape):
...
@@ -92,12 +92,15 @@ def _is_static_shape(shape):
return
True
return
True
def
_get_same_padding
(
in_size
,
kernel_size
,
stride
):
def
_get_same_padding
(
in_size
,
kernel_size
,
stride
,
autopad
):
new_size
=
int
(
math
.
ceil
(
in_size
*
1.0
/
stride
))
new_size
=
int
(
math
.
ceil
(
in_size
*
1.0
/
stride
))
pad_size
=
(
new_size
-
1
)
*
stride
+
kernel_size
-
in_size
pad_size
=
(
new_size
-
1
)
*
stride
+
kernel_size
-
in_size
pad0
=
int
(
pad_size
/
2
)
pad0
=
int
(
pad_size
/
2
)
pad1
=
pad_size
-
pad0
pad1
=
pad_size
-
pad0
return
[
pad0
,
pad1
]
if
autopad
==
"SAME_UPPER"
:
return
[
pad0
,
pad1
]
if
autopad
==
"SAME_LOWER"
:
return
[
pad1
,
pad0
]
def
print_mapping_info
(
func
):
def
print_mapping_info
(
func
):
...
@@ -1541,9 +1544,9 @@ class OpSet():
...
@@ -1541,9 +1544,9 @@ class OpSet():
if
auto_pad
==
"SAME_UPPER"
or
auto_pad
==
"SAME_LOWER"
:
if
auto_pad
==
"SAME_UPPER"
or
auto_pad
==
"SAME_LOWER"
:
input_shape
=
val_x
.
out_shapes
[
0
]
input_shape
=
val_x
.
out_shapes
[
0
]
pad_h
=
_get_same_padding
(
input_shape
[
2
],
kernel_shape
[
0
],
pad_h
=
_get_same_padding
(
input_shape
[
2
],
kernel_shape
[
0
],
strides
[
0
])
strides
[
0
]
,
auto_pad
)
pad_w
=
_get_same_padding
(
input_shape
[
3
],
kernel_shape
[
1
],
pad_w
=
_get_same_padding
(
input_shape
[
3
],
kernel_shape
[
1
],
strides
[
1
])
strides
[
1
]
,
auto_pad
)
paddings
=
pad_h
+
pad_w
paddings
=
pad_h
+
pad_w
op_name
=
name_generator
(
"pool"
,
self
.
nn_name2id
)
op_name
=
name_generator
(
"pool"
,
self
.
nn_name2id
)
...
@@ -1968,9 +1971,9 @@ class OpSet():
...
@@ -1968,9 +1971,9 @@ class OpSet():
if
auto_pad
==
"SAME_UPPER"
or
auto_pad
==
"SAME_LOWER"
:
if
auto_pad
==
"SAME_UPPER"
or
auto_pad
==
"SAME_LOWER"
:
input_shape
=
val_x
.
out_shapes
[
0
]
input_shape
=
val_x
.
out_shapes
[
0
]
pad_h
=
_get_same_padding
(
input_shape
[
2
],
kernel_shape
[
0
],
pad_h
=
_get_same_padding
(
input_shape
[
2
],
kernel_shape
[
0
],
strides
[
0
])
strides
[
0
]
,
auto_pad
)
pad_w
=
_get_same_padding
(
input_shape
[
3
],
kernel_shape
[
1
],
pad_w
=
_get_same_padding
(
input_shape
[
3
],
kernel_shape
[
1
],
strides
[
1
])
strides
[
1
]
,
auto_pad
)
paddings
=
pad_h
+
pad_w
paddings
=
pad_h
+
pad_w
layer_attrs
=
{
layer_attrs
=
{
...
@@ -2197,13 +2200,19 @@ class OpSet():
...
@@ -2197,13 +2200,19 @@ class OpSet():
pads
=
node
.
get_attr
(
'pads'
,
[
0
]
*
(
convnd
*
2
))
pads
=
node
.
get_attr
(
'pads'
,
[
0
]
*
(
convnd
*
2
))
input_shape
=
val_x
.
out_shapes
[
0
]
input_shape
=
val_x
.
out_shapes
[
0
]
paddings
,
val_x
=
self
.
_pad_if_asymmetric
(
node
,
pads
,
val_x
)
paddings
=
np
.
array
(
pads
).
reshape
((
2
,
-
1
)).
transpose
().
astype
(
"int32"
)
paddings
=
paddings
.
flatten
().
tolist
()
if
auto_pad
==
"SAME_UPPER"
or
auto_pad
==
"SAME_LOWER"
:
if
auto_pad
in
[
"SAME_UPPER"
,
"SAME_LOWER"
]:
# Warning: SAME_UPPER and SAME_LOWER does not yet support dynamic shapes
if
input_shape
[
2
]
==
-
1
or
input_shape
[
3
]
==
-
1
:
_logger
.
warning
(
'SAME_UPPER and SAME_LOWER does not yet support dynamic shapes, the conversion result may have a diff!!!'
)
pad_h
=
_get_same_padding
(
input_shape
[
2
],
kernel_shape
[
0
],
pad_h
=
_get_same_padding
(
input_shape
[
2
],
kernel_shape
[
0
],
strides
[
0
])
strides
[
0
]
,
auto_pad
)
pad_w
=
_get_same_padding
(
input_shape
[
3
],
kernel_shape
[
1
],
pad_w
=
_get_same_padding
(
input_shape
[
3
],
kernel_shape
[
1
],
strides
[
1
])
strides
[
1
]
,
auto_pad
)
paddings
=
pad_h
+
pad_w
paddings
=
pad_h
+
pad_w
layer_inputs
=
{
'x'
:
val_x
if
isinstance
(
val_x
,
str
)
else
val_x
.
name
}
layer_inputs
=
{
'x'
:
val_x
if
isinstance
(
val_x
,
str
)
else
val_x
.
name
}
...
...
x2paddle/optimizer/optimizer.py
浏览文件 @
208a2821
...
@@ -58,7 +58,7 @@ class GraphOptimizer(object):
...
@@ -58,7 +58,7 @@ class GraphOptimizer(object):
before_len
=
len
(
graph
.
layers
)
before_len
=
len
(
graph
.
layers
)
pass_
.
apply
(
graph
)
pass_
.
apply
(
graph
)
after_len
=
len
(
graph
.
layers
)
after_len
=
len
(
graph
.
layers
)
if
after_len
<
=
before_len
:
if
after_len
<
before_len
:
show_pass_log
=
True
show_pass_log
=
True
if
before_len
==
after_len
:
if
before_len
==
after_len
:
break
break
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录