Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
PaddlePaddle
X2Paddle
提交
2d558996
X
X2Paddle
项目概览
PaddlePaddle
/
X2Paddle
大约 1 年 前同步成功
通知
328
Star
698
Fork
167
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
26
列表
看板
标记
里程碑
合并请求
4
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
X
X2Paddle
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
26
Issue
26
列表
看板
标记
里程碑
合并请求
4
合并请求
4
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
未验证
提交
2d558996
编写于
9月 23, 2019
作者:
J
Jason
提交者:
GitHub
9月 23, 2019
浏览文件
操作
浏览文件
下载
差异文件
Merge pull request #152 from Channingss/develop
support for expand, fix bug of inference by onnxruntime
上级
31140398
2ec20d64
变更
4
显示空白变更内容
内联
并排
Showing
4 changed file
with
117 addition
and
122 deletion
+117
-122
x2paddle/decoder/onnx_decoder.py
x2paddle/decoder/onnx_decoder.py
+32
-23
x2paddle/onnx_infer.py
x2paddle/onnx_infer.py
+3
-5
x2paddle/op_mapper/onnx_directly_map.py
x2paddle/op_mapper/onnx_directly_map.py
+0
-7
x2paddle/op_mapper/onnx_op_mapper.py
x2paddle/op_mapper/onnx_op_mapper.py
+82
-87
未找到文件。
x2paddle/decoder/onnx_decoder.py
浏览文件 @
2d558996
...
...
@@ -44,8 +44,6 @@ class ONNXGraphNode(GraphNode):
self
.
layer_type
=
layer
.
op_type
self
.
fluid_code
=
FluidCode
()
self
.
attr_map
=
self
.
get_attr_map
()
self
.
dtype_map
=
{
1
:
"float32"
,
3
:
"int32"
,
9
:
"int64"
}
self
.
weight_inputs
=
list
()
self
.
out_shapes
=
list
()
self
.
dtype
=
None
self
.
which_child
=
{}
...
...
@@ -206,7 +204,20 @@ class ONNXGraph(Graph):
#generate connection between nodes for topo
for
layer_name
,
node
in
self
.
node_map
.
items
():
if
isinstance
(
node
,
ONNXGraphNode
):
self
.
build_connection
(
layer_name
,
node
)
#generate topo
super
(
ONNXGraph
,
self
).
build
()
self
.
input_nodes
=
self
.
place_holder_nodes
def
build_connection
(
self
,
layer_name
,
node
):
"""
find connection for nodes
"""
for
idx
,
in_node
in
enumerate
(
node
.
layer
.
input
):
if
in_node
==
''
:
continue
if
in_node
not
in
self
.
node_map
:
flag
=
0
for
nd
in
self
.
model
.
node
:
...
...
@@ -221,14 +232,10 @@ class ONNXGraph(Graph):
break
if
flag
==
0
:
raise
Exception
(
'input[{}] of node[{}] does not exist in node_map'
.
format
(
in_node
,
layer_name
))
'input[{}] of node[{}] does not exist in node_map'
.
format
(
in_node
,
layer_name
))
else
:
self
.
connect
(
in_node
,
layer_name
)
#generate topo
super
(
ONNXGraph
,
self
).
build
()
self
.
input_nodes
=
self
.
place_holder_nodes
def
get_input_node
(
self
,
node
,
idx
=
0
,
copy
=
False
):
if
len
(
node
.
which_child
)
==
0
:
...
...
@@ -450,7 +457,6 @@ class ONNXDecoder(object):
"""
make a valid code name for ParamAttr
"""
if
name
==
''
:
raise
ValueError
(
'name should not be empty'
)
for
s
in
' .*?
\\
/-:'
:
...
...
@@ -473,6 +479,9 @@ class ONNXDecoder(object):
node
.
name
=
node
.
output
[
0
]
node
.
name
=
self
.
make_variable_name
(
node
.
name
)
for
i
in
range
(
len
(
node
.
input
)):
if
node
.
input
[
i
]
==
''
:
continue
else
:
node
.
input
[
i
]
=
self
.
make_variable_name
(
node
.
input
[
i
])
for
i
in
range
(
len
(
node
.
output
)):
node
.
output
[
i
]
=
self
.
make_variable_name
(
node
.
output
[
i
])
x2paddle/onnx_infer.py
浏览文件 @
2d558996
...
...
@@ -34,16 +34,14 @@ def main():
save_dir
=
args
.
save_dir
model_dir
=
os
.
path
.
join
(
save_dir
,
'onnx_model_infer.onnx'
)
data_dir
=
os
.
path
.
join
(
save_dir
,
'input_data.npy'
)
model
=
onnx
.
load
(
model_dir
)
sess
=
rt
.
InferenceSession
(
model_dir
)
inputs
=
np
.
load
(
data_dir
,
allow_pickle
=
True
)
data_dir
inputs_dict
=
{}
for
i
,
ipt
in
enumerate
(
inputs
):
inputs_dict
[
sess
.
get_inputs
()[
i
].
name
]
=
ipt
for
ipt
in
sess
.
get_inputs
():
data_dir
=
os
.
path
.
join
(
save_dir
,
ipt
.
name
+
'.npy'
)
inputs_dict
[
ipt
.
name
]
=
np
.
load
(
data_dir
,
allow_pickle
=
True
)
res
=
sess
.
run
(
None
,
input_feed
=
inputs_dict
)
for
idx
,
value_info
in
enumerate
(
model
.
graph
.
output
):
np
.
save
(
os
.
path
.
join
(
save_dir
,
value_info
.
name
),
res
[
idx
])
...
...
x2paddle/op_mapper/onnx_directly_map.py
浏览文件 @
2d558996
...
...
@@ -26,8 +26,6 @@ default_op_mapping_field_values['OUTPUT_PERM'] = None
default_op_mapping_field_values
[
'FILL_NAME_FIELD'
]
=
True
default_op_mapping
=
{
'Gather'
:
[
'gather'
,
[
'X'
],
[
'Out'
],
dict
(
axis
=
''
)],
'Shape'
:
[
'shape'
,
[
'X'
],
[
'Out'
]],
'Clip'
:
[
'clip'
,
[
'X'
],
[
'Out'
],
...
...
@@ -81,11 +79,6 @@ default_op_mapping = {
'Sqrt'
:
[
'sqrt'
,
[
'X'
],
[
'Out'
]],
}
activefunc_op_mapping
=
{
'LeakyRelu'
:
[
'leaky_relu'
,
[
'X'
],
[
'Out'
],
dict
(),
dict
(
alpha
=
.
01
)],
}
default_ioa_constraint
=
{
'Gather'
:
[(
lambda
i
,
o
,
a
:
a
.
get
(
'axis'
,
0
)
==
0
,
'only axis = 0 is supported'
)],
...
...
x2paddle/op_mapper/onnx_op_mapper.py
浏览文件 @
2d558996
...
...
@@ -116,12 +116,14 @@ class ONNXOpMapper(OpMapper):
return
False
def
get_results_of_inference
(
self
,
model
,
value_infos
,
data_nodes
):
inputs
=
[]
if
not
os
.
path
.
exists
(
self
.
tmp_data_dir
):
os
.
makedirs
(
self
.
tmp_data_dir
)
for
data_node
in
data_nodes
:
value_info
=
value_infos
[
data_node
]
ipt
=
np
.
random
.
random
(
value_info
[
'shape'
]).
astype
(
value_info
[
'dtype'
])
inputs
.
append
(
ipt
)
np
.
save
(
os
.
path
.
join
(
self
.
tmp_data_dir
,
data_node
),
ipt
)
model
=
onnx
.
shape_inference
.
infer_shapes
(
model
)
outputs
=
[]
...
...
@@ -130,11 +132,8 @@ class ONNXOpMapper(OpMapper):
model
.
graph
.
ClearField
(
'output'
)
model
.
graph
.
output
.
MergeFrom
(
outputs
)
if
not
os
.
path
.
exists
(
self
.
tmp_data_dir
):
os
.
makedirs
(
self
.
tmp_data_dir
)
onnx
.
save
(
model
,
os
.
path
.
join
(
self
.
tmp_data_dir
,
'onnx_model_infer.onnx'
))
np
.
save
(
os
.
path
.
join
(
self
.
tmp_data_dir
,
'input_data.npy'
),
inputs
)
os
.
system
(
'onnx_infer --save_dir='
+
self
.
tmp_data_dir
)
return
...
...
@@ -263,16 +262,19 @@ class ONNXOpMapper(OpMapper):
def
elementwise_map
(
self
,
node
):
assert
node
.
layer_type
in
self
.
elementwise_ops
op_type
=
self
.
elementwise_ops
[
node
.
layer_type
]
val_x
=
self
.
graph
.
get_input_node
(
node
,
idx
=
0
,
copy
=
True
)
val_y
=
self
.
graph
.
get_input_node
(
node
,
idx
=
1
,
copy
=
True
)
if
len
(
val_x
.
out_shapes
[
0
])
<
len
(
val_y
.
out_shapes
[
0
]):
val_x
,
val_y
=
val_y
,
val_x
val_y_shape
=
val_y
.
out_shapes
[
0
]
val_x_shape
=
val_x
.
out_shapes
[
0
]
if
len
(
val_x_shape
)
<
len
(
val_y_shape
):
val_x
,
val_y
=
val_y
,
val_x
str_y_shape
=
','
.
join
(
str
(
e
)
for
e
in
val_y_shape
)
str_x_shape
=
','
.
join
(
str
(
e
)
for
e
in
val_x_shape
)
slice_idx
=
0
if
str_y_shape
not
in
str_x_shape
:
for
dim
in
val_y_shape
:
if
dim
==
1
:
slice_idx
+=
1
...
...
@@ -353,47 +355,52 @@ class ONNXOpMapper(OpMapper):
val_scales
=
self
.
graph
.
get_input_node
(
node
,
idx
=
1
,
copy
=
True
)
val_y
=
self
.
graph
.
get_node
(
node
.
layer
.
output
[
0
],
copy
=
True
)
out_shape_
=
val_y
.
out_shapes
[
0
]
if
out_shape_
is
not
None
:
assert
len
(
out_shape_
)
==
4
,
'only 4-D Tensor as X and Y supported'
out_shape_
=
out_shape_
[
2
:]
out_shape
=
val_y
.
out_shapes
[
0
]
if
out_shape
is
not
None
:
assert
len
(
out_shape
)
==
4
,
'only 4-D Tensor as X and Y supported'
out_shape
=
out_shape
[
2
:]
scales
=
_const_weight_or_none
(
val_scales
)
if
isinstance
(
val_scales
,
ONNXGraphNode
):
scales
,
_
,
_
=
self
.
get_dynamic_shape
(
val_scales
.
layer_name
)
attr
=
{
'name'
:
string
(
node
.
layer_name
)}
use_scales
=
True
if
scales
is
not
None
:
try
:
assert
len
(
scales
)
==
4
,
'only 4-D Tensor as X and Y supported'
assert
scales
[
0
]
==
1
and
scales
[
1
]
==
1
,
'only scale on (NC)HW supported'
assert
scales
[
2
]
==
scales
[
3
],
'only aspect-ratio-invariant scale supported'
except
:
use_scales
=
False
scale
=
scales
[
2
]
if
scales
else
None
if
scale
is
None
:
assert
out_shape_
,
'neither scales nor output shape is available'
out_shape
=
out_shape_
assert
out_shape
,
'neither scales nor output shape is available'
else
:
out_shape
=
None
if
out_shape_
is
None
:
if
out_shape
is
None
:
in_shape
=
val_x
.
out_shapes
[
0
]
assert
in_shape
is
not
None
,
'out_shape required but not inferrable'
assert
len
(
in_shape
)
==
4
,
'only 4-D Tensor as X and Y supported'
out_shape
_
=
[
in_shape
[
2
]
*
scale
,
in_shape
[
3
]
*
scale
]
out_shape
=
[
in_shape
[
2
]
*
scale
,
in_shape
[
3
]
*
scale
]
mode
=
node
.
get_attr
(
'mode'
,
'nearest'
)
fluid_op
=
'resize_{}'
.
format
(
mode
)
if
'linear'
in
mode
:
print
(
'Warnning: paddle not support resize wiht mode: linear, we use bilinear replace linear'
'Warnning: paddle not support
op:
resize wiht mode: linear, we use bilinear replace linear'
)
fluid_op
=
'resize_bilinear'
if
isinstance
(
val_scales
,
ONNXGraphNode
):
scale
,
_
,
_
=
self
.
get_dynamic_shape
(
val_scales
.
layer_name
)
if
use_scales
and
scale
is
not
None
:
attr
[
'scale'
]
=
scale
else
:
attr
[
'out_shape'
]
=
out_shape
attr
=
{
'scale'
:
scale
,
'out_shape'
:
out_shape
,
'name'
:
string
(
node
.
layer_name
)
}
node
.
fluid_code
.
add_layer
(
fluid_op
,
inputs
=
val_x
,
output
=
node
,
...
...
@@ -449,7 +456,6 @@ class ONNXOpMapper(OpMapper):
def
Unsqueeze
(
self
,
node
):
val_x
=
self
.
graph
.
get_input_node
(
node
,
idx
=
0
,
copy
=
True
)
axes
=
node
.
get_attr
(
'axes'
)
if
len
(
val_x
.
out_shapes
[
0
])
==
0
:
node
.
fluid_code
.
add_layer
(
'assign'
,
inputs
=
val_x
,
...
...
@@ -483,6 +489,7 @@ class ONNXOpMapper(OpMapper):
assert
dtype
==
output_dtype
,
'tensor dtype unmatches storage dtype'
shape
=
node
.
get_attr
(
'shape'
,
None
)
if
shape
is
None
:
shape
=
val_output
.
out_shapes
[
0
]
if
shape
is
None
:
...
...
@@ -493,7 +500,7 @@ class ONNXOpMapper(OpMapper):
'using value as 1-D tensor may lead to fails'
,
val_output
.
layer_name
,
val_output
.
layer_name
)
if
len
(
value
)
==
1
:
# scalar
if
len
(
value
)
==
1
:
value
=
value
.
tolist
()
shape
=
[
1
]
value
=
value
[
0
]
...
...
@@ -520,48 +527,34 @@ class ONNXOpMapper(OpMapper):
param_attr
=
attr
)
def
Resize
(
self
,
node
):
self
.
_interpolate
(
node
)
def
Upsample
(
self
,
node
):
self
.
_interpolate
(
node
)
def
Expand
(
self
,
node
):
val_x
=
self
.
graph
.
get_input_node
(
node
,
idx
=
0
,
copy
=
True
)
val_scales
=
self
.
graph
.
get_input_node
(
node
,
idx
=
1
,
copy
=
True
)
val_y
=
self
.
graph
.
get_node
(
node
.
layer
.
output
[
0
],
copy
=
True
)
val_shape
=
self
.
graph
.
get_input_node
(
node
,
idx
=
1
,
copy
=
True
)
out_shape_
=
val_y
.
out_shapes
[
0
]
if
out_shape_
is
not
None
:
assert
len
(
out_shape_
)
==
4
,
'only 4-D Tensor as X and Y supported'
out_shape_
=
out_shape_
[
2
:]
scales
=
_const_weight_or_none
(
val_scales
)
if
scales
is
not
None
:
assert
len
(
scales
)
==
4
,
'only 4-D Tensor as X and Y supported'
assert
scales
[
0
]
==
1
and
scales
[
1
]
==
1
,
'only scale on (NC)HW supported'
assert
scales
[
2
]
==
scales
[
3
],
'only aspect-ratio-invariant scale supported'
scale
=
scales
[
2
]
if
scales
else
None
if
scale
is
None
:
assert
out_shape_
,
'neither scales nor output shape is available'
out_shape
=
out_shape_
else
:
out_shape
=
None
if
out_shape_
is
None
:
in_shape
=
val_x
.
out_shapes
[
0
]
assert
in_shape
is
not
None
,
'out_shape required but not inferrable'
assert
len
(
in_shape
)
==
4
,
'only 4-D Tensor as X and Y supported'
out_shape_
=
[
in_shape
[
2
]
*
scale
,
in_shape
[
3
]
*
scale
]
if
len
(
val_shape
.
outputs
)
==
1
:
self
.
omit_nodes
.
append
(
val_shape
.
layer_name
)
mode
=
node
.
get_attr
(
'mode'
,
'nearest'
)
fluid_op
=
'resize_{}'
.
format
(
mode
)
attr
=
{
'scale'
:
scale
,
'out_shape'
:
out_shape
,
'name'
:
string
(
node
.
layer_name
)
}
node
.
fluid_code
.
add_layer
(
fluid_op
,
inputs
=
val_x
,
output
=
node
,
param_attr
=
attr
)
val_y
=
self
.
graph
.
get_node
(
node
.
layer
.
output
[
0
],
copy
=
True
)
out_shape
=
node
.
out_shapes
[
0
]
val_x_dtype
=
val_x
.
dtype
def
Upsample
(
self
,
node
):
self
.
_interpolate
(
node
)
name_ones
=
node
.
layer_name
+
'_ones'
attr_ones
=
{
'shape'
:
out_shape
,
'dtype'
:
string
(
val_x_dtype
)}
node
.
fluid_code
.
add_layer
(
'ones'
,
inputs
=
None
,
output
=
name_ones
,
param_attr
=
attr_ones
)
inputs
=
{
'x'
:
name_ones
,
'y'
:
val_x
}
attr
=
{
'name'
:
string
(
node
.
layer_name
)}
node
.
fluid_code
.
add_layer
(
'elementwise_mul'
,
inputs
=
inputs
,
output
=
node
.
layer_name
,
param_attr
=
attr
)
def
Gather
(
self
,
node
):
val_x
=
self
.
graph
.
get_input_node
(
node
,
idx
=
0
,
copy
=
True
)
...
...
@@ -716,7 +709,7 @@ class ONNXOpMapper(OpMapper):
'dim'
:
axis
,
'name'
:
string
(
node
.
layer_name
)
}
# generation
node
.
fluid_code
.
add_layer
(
'split'
,
inputs
=
val_x
,
output
=
val_y
,
...
...
@@ -731,21 +724,23 @@ class ONNXOpMapper(OpMapper):
if
isinstance
(
val_shape
,
ONNXGraphDataNode
):
self
.
omit_nodes
.
append
(
val_shape
.
layer_name
)
attr
=
{
'name'
:
string
(
node
.
layer_name
)}
# catch dynamic graph shape
if
isinstance
(
val_shape
,
ONNXGraphNode
):
shape
,
_
,
_
=
self
.
get_dynamic_shape
(
val_shape
.
layer_name
)
if
val_shape
.
dtype
==
'int64'
:
val_shape_cast
=
val_shape
.
layer_name
+
'_cast'
node
.
fluid_code
.
add_layer
(
'cast'
,
inputs
=
val_shape
,
output
=
val_shape_cast
,
param_attr
=
{
'dtype'
:
string
(
'int32'
)})
attr
[
'actual_shape'
]
=
val_shape_cast
else
:
attr
[
'actual_shape'
]
=
val_shape
if
shape
is
None
:
shape
=
val_reshaped
.
out_shapes
[
0
]
shape_dtype
=
val_shape
.
dtype
if
shape_dtype
is
None
:
_logger
.
warning
(
'in op %s(%s -> Reshape -> %s): '
'dtype of input "shape" not inferred, int32 assumed'
,
node
.
layer_name
,
val_x
.
layer_name
,
val_reshaped
.
layer_name
)
shape_dtype
=
_np
.
dtype
(
'int32'
)
if
shape
is
None
:
shape
=
[
1
,
-
1
]
_logger
.
warning
(
...
...
@@ -753,8 +748,8 @@ class ONNXOpMapper(OpMapper):
'input "shape" not inferred, use [1, -1] as dummy value, '
'the behavior of Paddle fluid maybe undefined'
,
node
.
layer_name
,
val_x
.
layer_name
,
val_reshaped
.
layer_name
)
attr
=
{
'shape'
:
shape
,
'name'
:
string
(
node
.
layer_name
)}
attr
[
'shape'
]
=
shape
node
.
fluid_code
.
add_layer
(
'reshape'
,
inputs
=
val_x
,
output
=
node
,
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录