Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
PaddlePaddle
X2Paddle
提交
4ac94c64
X
X2Paddle
项目概览
PaddlePaddle
/
X2Paddle
大约 1 年 前同步成功
通知
328
Star
698
Fork
167
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
26
列表
看板
标记
里程碑
合并请求
4
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
X
X2Paddle
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
26
Issue
26
列表
看板
标记
里程碑
合并请求
4
合并请求
4
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
未验证
提交
4ac94c64
编写于
9月 11, 2019
作者:
J
Jason
提交者:
GitHub
9月 11, 2019
浏览文件
操作
浏览文件
下载
差异文件
Merge pull request #136 from Channingss/develop
support transformer & fix some bug
上级
75c9a63b
b0a7761a
变更
10
展开全部
显示空白变更内容
内联
并排
Showing
10 changed file
with
398 addition
and
236 deletion
+398
-236
README.md
README.md
+1
-1
setup.py
setup.py
+6
-1
x2paddle/convert.py
x2paddle/convert.py
+1
-1
x2paddle/decoder/onnx_decoder.py
x2paddle/decoder/onnx_decoder.py
+61
-127
x2paddle/onnx_infer.py
x2paddle/onnx_infer.py
+44
-0
x2paddle/op_mapper/onnx_custom_layer/InstanceNormalization.py
...ddle/op_mapper/onnx_custom_layer/InstanceNormalization.py
+7
-5
x2paddle/op_mapper/onnx_custom_layer/__init__.py
x2paddle/op_mapper/onnx_custom_layer/__init__.py
+11
-0
x2paddle/op_mapper/onnx_custom_layer/register.py
x2paddle/op_mapper/onnx_custom_layer/register.py
+2
-1
x2paddle/op_mapper/onnx_directly_map.py
x2paddle/op_mapper/onnx_directly_map.py
+8
-9
x2paddle/op_mapper/onnx_op_mapper.py
x2paddle/op_mapper/onnx_op_mapper.py
+257
-91
未找到文件。
README.md
浏览文件 @
4ac94c64
...
...
@@ -15,7 +15,7 @@ paddlepaddle >= 1.5.0
**按需安装以下依赖**
tensorflow : tensorflow == 1.14.0
caffe : 无
onnx : onnx == 1.5.0
pytorch == 1.1
.0
onnx : onnx == 1.5.0
onnxruntime == 0.4
.0
## 安装
### 安装方式一(推荐)
...
...
setup.py
浏览文件 @
4ac94c64
...
...
@@ -23,4 +23,9 @@ setuptools.setup(
"Operating System :: OS Independent"
,
],
license
=
'Apache 2.0'
,
entry_points
=
{
'console_scripts'
:
[
'x2paddle=x2paddle.convert:main'
]})
entry_points
=
{
'console_scripts'
:
[
'x2paddle=x2paddle.convert:main'
,
'onnx_infer=x2paddle.onnx_infer:main'
]
})
x2paddle/convert.py
浏览文件 @
4ac94c64
...
...
@@ -154,7 +154,7 @@ def onnx2paddle(model_path, save_dir):
model
=
ONNXDecoder
(
model_path
)
from
x2paddle.op_mapper.onnx_op_mapper
import
ONNXOpMapper
mapper
=
ONNXOpMapper
(
model
)
mapper
=
ONNXOpMapper
(
model
,
save_dir
)
from
x2paddle.optimizer.onnx_optimizer
import
ONNXOptimizer
optimizer
=
ONNXOptimizer
(
mapper
)
...
...
x2paddle/decoder/onnx_decoder.py
浏览文件 @
4ac94c64
...
...
@@ -17,7 +17,6 @@ from x2paddle.core.fluid_code import FluidCode
from
onnx.checker
import
ValidationError
from
onnx.checker
import
check_model
from
onnx.utils
import
polish_model
from
onnx.version_converter
import
convert_version
from
onnx
import
helper
from
onnx.helper
import
get_attribute_value
,
make_attribute
from
onnx.shape_inference
import
infer_shapes
...
...
@@ -26,9 +25,11 @@ from onnx.numpy_helper import to_array
from
onnx
import
AttributeProto
,
TensorProto
,
GraphProto
from
collections
import
OrderedDict
as
Dict
import
onnx
from
onnx.helper
import
ValueInfoProto
import
numpy
as
np
from
copy
import
deepcopy
import
logging
as
_logging
import
os
default_op_domain
=
'ai.onnx'
_logger
=
_logging
.
getLogger
(
__name__
)
...
...
@@ -47,6 +48,7 @@ class ONNXGraphNode(GraphNode):
self
.
weight_inputs
=
list
()
self
.
out_shapes
=
list
()
self
.
dtype
=
None
self
.
which_child
=
{}
def
get_attr_map
(
self
):
"""
...
...
@@ -60,10 +62,9 @@ class ONNXGraphNode(GraphNode):
@
property
def
value
(
self
):
assert
'Constant'
in
self
.
layer_type
,
"Only Constant | ConstantOfShape node has value."
attr
=
self
.
layer
.
attribute
[
'value'
]
if
'value'
not
in
self
.
attr_map
:
return
None
return
self
.
attr_map
[
name
]
return
self
.
attr_map
[
'value'
]
def
get_attribute_value2
(
self
,
attr
):
"""
...
...
@@ -105,29 +106,39 @@ class ONNXGraphDataNode(GraphNode):
self
.
fluid_code
=
FluidCode
()
self
.
weight
=
None
self
.
embeded_as
=
None
self
.
which_child
=
{}
@
property
def
out_shapes
(
self
):
if
isinstance
(
self
.
layer
,
ValueInfoProto
):
values
=
self
.
layer
.
type
.
tensor_type
.
shape
.
dim
out_shapes
=
list
()
out_shapes
.
append
([
dim
.
dim_value
for
dim
in
values
])
return
out_shapes
else
:
values
=
self
.
layer
.
dims
out_shapes
=
list
()
out_shapes
.
append
(
values
)
return
out_shapes
@
property
def
dtype
(
self
):
if
isinstance
(
self
.
layer
,
ValueInfoProto
):
dtype
=
self
.
layer
.
type
.
tensor_type
.
elem_type
return
TENSOR_TYPE_TO_NP_TYPE
[
dtype
]
else
:
dtype
=
self
.
layer
.
data_type
return
TENSOR_TYPE_TO_NP_TYPE
[
dtype
]
class
ONNXGraph
(
Graph
):
def
__init__
(
self
,
graph
,
onnx_model
):
super
(
ONNXGraph
,
self
).
__init__
(
graph
)
def
__init__
(
self
,
onnx_model
):
super
(
ONNXGraph
,
self
).
__init__
(
onnx_model
.
graph
)
self
.
onnx_model
=
onnx_model
self
.
initializer
=
{}
self
.
place_holder_nodes
=
list
()
self
.
get_place_holder_nodes
()
self
.
value_infos
=
self
.
inferred_model_value_info
(
graph
)
self
.
value_infos
=
self
.
inferred_model_value_info
(
self
.
model
)
self
.
results_of_inference
=
dict
()
def
get_inner_nodes
(
self
):
...
...
@@ -165,22 +176,9 @@ class ONNXGraph(Graph):
"""
build topo_sort of ONNX model
"""
data_node
=
self
.
place_holder_nodes
[
0
]
value_info
=
self
.
value_infos
[
data_node
]
input_shape
=
value_info
[
'shape'
]
self
.
get_results_of_inference
(
self
.
onnx_model
,
input_shape
)
for
layer
in
self
.
model
.
node
:
node
=
ONNXGraphNode
(
layer
)
self
.
node_map
[
layer
.
name
]
=
node
for
opt
in
layer
.
output
:
if
opt
in
self
.
value_infos
:
value_info
=
self
.
value_infos
[
opt
]
node
.
dtype
=
value_info
[
'dtype'
]
node
.
out_shapes
.
append
(
value_info
[
'shape'
])
else
:
_
,
dtype
,
shape
=
self
.
get_dynamic_shape
(
opt
)
node
.
dtype
=
dtype
node
.
out_shapes
.
append
(
shape
)
for
layer
in
self
.
model
.
input
:
if
layer
.
name
not
in
self
.
node_map
:
...
...
@@ -191,20 +189,40 @@ class ONNXGraph(Graph):
is_global_input
=
is_place_holder
)
#set data node's weight
for
name
,
weight
in
self
.
graph_weights
(
self
.
model
):
for
initializer
in
self
.
model
.
initializer
:
name
=
initializer
.
name
weight
=
to_array
(
initializer
)
if
name
in
self
.
node_map
:
if
isinstance
(
self
.
node_map
[
name
],
ONNXGraphDataNode
):
self
.
node_map
[
name
].
weight
=
weight
self
.
node_map
[
name
].
embeded_as
=
[]
else
:
self
.
node_map
[
name
]
=
ONNXGraphDataNode
(
initializer
,
layer_name
=
name
,
is_global_input
=
False
)
self
.
node_map
[
name
].
weight
=
weight
self
.
node_map
[
name
].
embeded_as
=
[]
#generate connection between nodes for topo
for
layer_name
,
node
in
self
.
node_map
.
items
():
if
isinstance
(
node
,
ONNXGraphNode
):
for
idx
,
in_node
in
enumerate
(
node
.
layer
.
input
):
if
in_node
not
in
self
.
node_map
:
flag
=
0
for
nd
in
self
.
model
.
node
:
for
idx
,
opt
in
enumerate
(
nd
.
output
):
if
opt
==
in_node
:
self
.
connect
(
nd
.
name
,
layer_name
)
flag
=
1
node
.
which_child
[
nd
.
name
]
=
idx
self
.
node_map
[
nd
.
name
].
index
=
0
break
if
flag
==
1
:
break
if
flag
==
0
:
raise
Exception
(
'input[{}] of node[{}] does not exist in node_map'
.
format
(
in_node
,
layer_name
))
'input[{}] of node[{}] does not exist in node_map'
.
format
(
in_node
,
layer_name
))
else
:
self
.
connect
(
in_node
,
layer_name
)
#generate topo
...
...
@@ -212,13 +230,16 @@ class ONNXGraph(Graph):
self
.
input_nodes
=
self
.
place_holder_nodes
def
get_nodes
(
self
,
names
,
copy
=
False
):
"""
get nodes by more than one name
"""
nodes
=
[]
for
name
in
names
:
nodes
.
add
(
self
.
get_node
(
name
,
copy
=
copy
))
def
get_input_node
(
self
,
node
,
idx
=
0
,
copy
=
False
):
if
len
(
node
.
which_child
)
==
0
:
ipt_node
=
super
(
ONNXGraph
,
self
).
get_node
(
node
.
inputs
[
idx
],
copy
)
return
ipt_node
else
:
ipt_node
=
super
(
ONNXGraph
,
self
).
get_node
(
node
.
inputs
[
idx
],
copy
)
if
ipt_node
.
layer_name
in
node
.
which_child
:
ipt_node
.
index
=
node
.
which_child
[
ipt_node
.
layer_name
]
return
ipt_node
def
graph_weights
(
self
,
graph
):
"""
...
...
@@ -270,50 +291,6 @@ class ONNXGraph(Graph):
}
return
value_info
def
get_results_of_inference
(
self
,
model
,
shape
):
try
:
import
torch
version
=
torch
.
__version__
if
'1.1.0'
not
in
version
:
print
(
"your model have dynamic graph, torch==1.1.0 is required"
)
return
except
:
print
(
"your model have dynamic graph, we use caff2 to inference graph, please use
\"
pip install torch==1.1.0
\"
."
)
return
from
x2paddle.decoder.onnx_backend
import
prepare
np_images
=
np
.
random
.
rand
(
shape
[
0
],
shape
[
1
],
shape
[
2
],
shape
[
3
]).
astype
(
'float32'
)
outputs
=
[]
for
node
in
model
.
graph
.
node
:
value_info
=
helper
.
make_tensor_value_info
(
node
.
name
,
TensorProto
.
UNDEFINED
,
[])
outputs
.
append
(
value_info
)
while
len
(
outputs
)
>
0
:
tmp_outputs
=
outputs
[:
254
]
model
.
graph
.
ClearField
(
'output'
)
model
.
graph
.
output
.
MergeFrom
(
tmp_outputs
)
prepared_backend
=
prepare
(
model
,
device
=
'CPU'
,
no_check_UNSAFE
=
True
)
res
=
prepared_backend
.
run
(
inputs
=
np_images
)
for
idx
,
info
in
enumerate
(
tmp_outputs
):
self
.
results_of_inference
[
info
.
name
]
=
res
[
idx
]
outputs
=
outputs
[
254
:]
return
def
get_dynamic_shape
(
self
,
layer
):
"""
get dynamic shape from caffe2.backend
"""
output
=
self
.
results_of_inference
[
layer
]
return
output
.
tolist
(),
output
.
dtype
,
output
.
shape
class
ONNXDecoder
(
object
):
def
__init__
(
self
,
onnx_model
):
...
...
@@ -334,8 +311,8 @@ class ONNXDecoder(object):
self
.
standardize_variable_name
(
model
.
graph
)
self
.
model
=
model
graph
_def
=
model
.
graph
self
.
onnx_graph
=
ONNXGraph
(
graph_def
,
model
)
graph
=
model
.
graph
self
.
onnx_graph
=
ONNXGraph
(
model
)
self
.
onnx_graph
.
build
()
def
build_value_refs
(
self
,
nodes
):
...
...
@@ -476,7 +453,7 @@ class ONNXDecoder(object):
if
name
==
''
:
raise
ValueError
(
'name should not be empty'
)
for
s
in
' .*?
\\
/-:'
:
#
for
s
in
' .*?
\\
/-:'
:
name
=
name
.
replace
(
s
,
'_'
)
return
'_'
+
name
...
...
@@ -499,46 +476,3 @@ class ONNXDecoder(object):
node
.
input
[
i
]
=
self
.
make_variable_name
(
node
.
input
[
i
])
for
i
in
range
(
len
(
node
.
output
)):
node
.
output
[
i
]
=
self
.
make_variable_name
(
node
.
output
[
i
])
def
split_model
(
self
,
model
,
outputs
=
None
):
"""
Takes a model and changes its outputs.
"""
if
outputs
is
None
:
raise
RuntimeError
(
"outputs is None"
)
if
outputs
==
model
.
graph
.
output
[
0
].
name
:
return
model
nodes
=
model
.
graph
.
node
keep_nodes
=
[]
# all the nodes we need to keep.
for
node
in
nodes
:
if
outputs
in
node
.
output
:
keep_nodes
.
append
(
node
)
break
keep_nodes
.
append
(
node
)
infer_shapes
=
onnx
.
shape_inference
.
infer_shapes
(
model
)
var_out
=
[]
for
value_info
in
infer_shapes
.
graph
.
value_info
:
if
value_info
.
name
==
outputs
:
var_out
.
append
(
value_info
)
break
graph
=
helper
.
make_graph
(
keep_nodes
,
model
.
graph
.
name
,
model
.
graph
.
input
,
var_out
,
model
.
graph
.
initializer
)
onnx_model
=
helper
.
make_model
(
graph
)
onnx_model
.
ir_version
=
model
.
ir_version
onnx_model
.
producer_name
=
model
.
producer_name
onnx_model
.
producer_version
=
model
.
producer_version
onnx_model
.
domain
=
model
.
domain
onnx_model
.
model_version
=
model
.
model_version
onnx_model
.
doc_string
=
model
.
doc_string
if
len
(
onnx_model
.
graph
.
input
)
!=
len
(
model
.
graph
.
input
):
raise
RuntimeError
(
"Input mismatch {} != {}"
.
format
(
len
(
onnx_model
.
input
),
len
(
model
.
input
)))
return
onnx_model
x2paddle/onnx_infer.py
0 → 100644
浏览文件 @
4ac94c64
import
onnxruntime
as
rt
import
os
import
sys
import
numpy
as
np
import
onnx
import
json
import
argparse
from
six
import
text_type
as
_text_type
def
arg_parser
():
parser
=
argparse
.
ArgumentParser
()
parser
.
add_argument
(
"--save_dir"
,
"-s"
,
type
=
_text_type
,
default
=
None
,
help
=
"define save_dir"
)
return
parser
def
main
():
parser
=
arg_parser
()
args
=
parser
.
parse_args
()
save_dir
=
args
.
save_dir
model_dir
=
os
.
path
.
join
(
save_dir
,
'onnx_model_infer.onnx'
)
data_dir
=
os
.
path
.
join
(
save_dir
,
'input_data.npy'
)
model
=
onnx
.
load
(
model_dir
)
sess
=
rt
.
InferenceSession
(
model_dir
)
inputs
=
np
.
load
(
data_dir
,
allow_pickle
=
True
)
data_dir
inputs_dict
=
{}
for
i
,
ipt
in
enumerate
(
inputs
):
inputs_dict
[
sess
.
get_inputs
()[
i
].
name
]
=
ipt
res
=
sess
.
run
(
None
,
input_feed
=
inputs_dict
)
for
idx
,
value_info
in
enumerate
(
model
.
graph
.
output
):
np
.
save
(
os
.
path
.
join
(
save_dir
,
value_info
.
name
),
res
[
idx
])
if
__name__
==
"__main__"
:
main
()
x2paddle/op_mapper/onnx_custom_layer/InstanceNormalization.py
浏览文件 @
4ac94c64
...
...
@@ -22,8 +22,9 @@ def InstanceNormalization_shape(input_shape):
def
InstanceNormalization_layer
(
inputs
,
name
=
None
):
# TODO(lvmengsi@baidu.com): Check the accuracy when using fluid.layers.layer_norm.
epsilon
=
1e-5
mean
=
fluid
.
layers
.
reduce_mean
(
inputs
,
dim
=
[
2
,
3
],
keep_dim
=
True
)
var
=
fluid
.
layers
.
reduce_mean
(
fluid
.
layers
.
square
(
inputs
-
mean
),
input_
=
inputs
[
0
]
mean
=
fluid
.
layers
.
reduce_mean
(
input_
,
dim
=
[
2
,
3
],
keep_dim
=
True
)
var
=
fluid
.
layers
.
reduce_mean
(
fluid
.
layers
.
square
(
input_
-
mean
),
dim
=
[
2
,
3
],
keep_dim
=
True
)
if
name
is
not
None
:
...
...
@@ -36,13 +37,13 @@ def InstanceNormalization_layer(inputs, name=None):
initializer
=
fluid
.
initializer
.
Constant
(
0.0
),
trainable
=
True
)
scale
=
fluid
.
layers
.
create_parameter
(
attr
=
scale_param
,
shape
=
input
s
.
shape
[
1
:
2
],
shape
=
input
_
.
shape
[
1
:
2
],
dtype
=
"float32"
)
offset
=
fluid
.
layers
.
create_parameter
(
attr
=
offset_param
,
shape
=
input
s
.
shape
[
1
:
2
],
shape
=
input
_
.
shape
[
1
:
2
],
dtype
=
"float32"
)
tmp
=
fluid
.
layers
.
elementwise_mul
(
x
=
(
input
s
-
mean
),
y
=
scale
,
axis
=
1
)
tmp
=
fluid
.
layers
.
elementwise_mul
(
x
=
(
input
_
-
mean
),
y
=
scale
,
axis
=
1
)
tmp
=
tmp
/
fluid
.
layers
.
sqrt
(
var
+
epsilon
)
tmp
=
fluid
.
layers
.
elementwise_add
(
tmp
,
offset
,
axis
=
1
)
return
tmp
...
...
@@ -56,4 +57,5 @@ def InstanceNormalization_weights(name, data=None):
register
(
kind
=
'InstanceNormalization'
,
shape
=
InstanceNormalization_shape
,
layer
=
InstanceNormalization_layer
,
child_func
=
None
,
weights
=
InstanceNormalization_weights
)
x2paddle/op_mapper/onnx_custom_layer/__init__.py
浏览文件 @
4ac94c64
...
...
@@ -95,6 +95,17 @@ def make_custom_layer(node):
return
inspect
.
getsource
(
layer_func
),
layer_func
def
make_custom_child_func
(
node
):
""" get the code which implement the custom layer function
"""
layer_type
=
node
.
layer_type
child_func
=
custom_layers
[
layer_type
][
'child_func'
]
if
child_func
is
None
:
return
None
,
child_func
import
inspect
return
inspect
.
getsource
(
child_func
),
child_func
def
deal_weights
(
node
,
data
=
None
):
""" deal the weights of the custom layer
"""
...
...
x2paddle/op_mapper/onnx_custom_layer/register.py
浏览文件 @
4ac94c64
...
...
@@ -17,7 +17,7 @@
g_custom_layers
=
{}
def
register
(
kind
,
shape
,
layer
,
weights
):
def
register
(
kind
,
shape
,
layer
,
child_func
,
weights
):
""" register a custom layer or a list of custom layers
Args:
...
...
@@ -48,6 +48,7 @@ def register(kind, shape, layer, weights):
g_custom_layers
[
k
]
=
{
'shape'
:
shape
,
'layer'
:
layer
,
'child_func'
:
child_func
,
'weights'
:
weights
}
...
...
x2paddle/op_mapper/onnx_directly_map.py
浏览文件 @
4ac94c64
...
...
@@ -29,9 +29,6 @@ default_op_mapping = {
'Gather'
:
[
'gather'
,
[
'X'
],
[
'Out'
],
dict
(
axis
=
''
)],
'Shape'
:
[
'shape'
,
[
'X'
],
[
'Out'
]],
'Mul'
:
[
'elementwise_mul'
,
[
'X'
,
'Y'
],
[
'Out'
],
dict
(),
dict
(
axis
=-
1
)],
'Clip'
:
[
'clip'
,
[
'X'
],
[
'Out'
],
dict
(),
...
...
@@ -42,6 +39,7 @@ default_op_mapping = {
dtype
=
_np
.
uint8
).
view
(
_np
.
float32
)),
)
],
'Ceil'
:
[
'ceil'
,
[
'X'
],
[
'Out'
]],
'ReduceMean'
:
[
'reduce_mean'
,
[
'X'
],
[
'Out'
],
dict
(
axes
=
'dim'
,
keepdims
=
'keep_dim'
),
...
...
@@ -52,7 +50,11 @@ default_op_mapping = {
dict
(
axes
=
'dim'
,
keepdims
=
'keep_dim'
),
dict
(
keep_dim
=
1
)
],
'ReduceMin'
:
[
'reduce_min'
,
[
'X'
],
[
'Out'
],
dict
(
axes
=
'dim'
,
keepdims
=
'keep_dim'
),
dict
(
keep_dim
=
1
)
],
#active function
'Relu'
:
[
'relu'
,
[
'X'
],
[
'Out'
]],
'LeakyRelu'
:
[
'leaky_relu'
,
[
'X'
],
[
'Out'
],
...
...
@@ -66,9 +68,6 @@ default_op_mapping = {
],
'Tanh'
:
[
'tanh'
,
[
'X'
],
[
'Out'
]],
'Sigmoid'
:
[
'sigmoid'
,
[
'X'
],
[
'Out'
]],
'Pow'
:
[
'elementwise_pow'
,
[
'X'
,
'Y'
],
[
'Out'
],
dict
(),
dict
(
axis
=-
1
)],
# TODO: pow for scalar exponent
'HardSigmoid'
:
[
'hard_sigmoid'
,
[
'X'
],
[
'Out'
],
dict
(
alpha
=
'slope'
,
beta
=
'offset'
),
...
...
@@ -78,8 +77,8 @@ default_op_mapping = {
'Softplus'
:
[
'softplus'
,
[
'X'
],
[
'Out'
]],
'Exp'
:
[
'exp'
,
[
'X'
],
[
'Out'
]],
'Softmax'
:
[
'softmax'
,
[
'X'
],
[
'Out'
],
dict
(
axis
=
''
)
,
dict
(
axis
=
1
)
],
dict
(
),
dict
(
axis
=
1
)]
,
'Sqrt'
:
[
'sqrt'
,
[
'X'
],
[
'Out'
]
],
}
activefunc_op_mapping
=
{
...
...
x2paddle/op_mapper/onnx_op_mapper.py
浏览文件 @
4ac94c64
此差异已折叠。
点击以展开。
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录