Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
PaddlePaddle
X2Paddle
提交
2d558996
X
X2Paddle
项目概览
PaddlePaddle
/
X2Paddle
大约 1 年 前同步成功
通知
328
Star
698
Fork
167
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
26
列表
看板
标记
里程碑
合并请求
4
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
X
X2Paddle
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
26
Issue
26
列表
看板
标记
里程碑
合并请求
4
合并请求
4
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
未验证
提交
2d558996
编写于
9月 23, 2019
作者:
J
Jason
提交者:
GitHub
9月 23, 2019
浏览文件
操作
浏览文件
下载
差异文件
Merge pull request #152 from Channingss/develop
support for expand, fix bug of inference by onnxruntime
上级
31140398
2ec20d64
变更
4
显示空白变更内容
内联
并排
Showing
4 changed file
with
117 addition
and
122 deletion
+117
-122
x2paddle/decoder/onnx_decoder.py
x2paddle/decoder/onnx_decoder.py
+32
-23
x2paddle/onnx_infer.py
x2paddle/onnx_infer.py
+3
-5
x2paddle/op_mapper/onnx_directly_map.py
x2paddle/op_mapper/onnx_directly_map.py
+0
-7
x2paddle/op_mapper/onnx_op_mapper.py
x2paddle/op_mapper/onnx_op_mapper.py
+82
-87
未找到文件。
x2paddle/decoder/onnx_decoder.py
浏览文件 @
2d558996
...
@@ -44,8 +44,6 @@ class ONNXGraphNode(GraphNode):
...
@@ -44,8 +44,6 @@ class ONNXGraphNode(GraphNode):
self
.
layer_type
=
layer
.
op_type
self
.
layer_type
=
layer
.
op_type
self
.
fluid_code
=
FluidCode
()
self
.
fluid_code
=
FluidCode
()
self
.
attr_map
=
self
.
get_attr_map
()
self
.
attr_map
=
self
.
get_attr_map
()
self
.
dtype_map
=
{
1
:
"float32"
,
3
:
"int32"
,
9
:
"int64"
}
self
.
weight_inputs
=
list
()
self
.
out_shapes
=
list
()
self
.
out_shapes
=
list
()
self
.
dtype
=
None
self
.
dtype
=
None
self
.
which_child
=
{}
self
.
which_child
=
{}
...
@@ -206,7 +204,20 @@ class ONNXGraph(Graph):
...
@@ -206,7 +204,20 @@ class ONNXGraph(Graph):
#generate connection between nodes for topo
#generate connection between nodes for topo
for
layer_name
,
node
in
self
.
node_map
.
items
():
for
layer_name
,
node
in
self
.
node_map
.
items
():
if
isinstance
(
node
,
ONNXGraphNode
):
if
isinstance
(
node
,
ONNXGraphNode
):
self
.
build_connection
(
layer_name
,
node
)
#generate topo
super
(
ONNXGraph
,
self
).
build
()
self
.
input_nodes
=
self
.
place_holder_nodes
def
build_connection
(
self
,
layer_name
,
node
):
"""
find connection for nodes
"""
for
idx
,
in_node
in
enumerate
(
node
.
layer
.
input
):
for
idx
,
in_node
in
enumerate
(
node
.
layer
.
input
):
if
in_node
==
''
:
continue
if
in_node
not
in
self
.
node_map
:
if
in_node
not
in
self
.
node_map
:
flag
=
0
flag
=
0
for
nd
in
self
.
model
.
node
:
for
nd
in
self
.
model
.
node
:
...
@@ -221,14 +232,10 @@ class ONNXGraph(Graph):
...
@@ -221,14 +232,10 @@ class ONNXGraph(Graph):
break
break
if
flag
==
0
:
if
flag
==
0
:
raise
Exception
(
raise
Exception
(
'input[{}] of node[{}] does not exist in node_map'
'input[{}] of node[{}] does not exist in node_map'
.
.
format
(
in_node
,
layer_name
))
format
(
in_node
,
layer_name
))
else
:
else
:
self
.
connect
(
in_node
,
layer_name
)
self
.
connect
(
in_node
,
layer_name
)
#generate topo
super
(
ONNXGraph
,
self
).
build
()
self
.
input_nodes
=
self
.
place_holder_nodes
def
get_input_node
(
self
,
node
,
idx
=
0
,
copy
=
False
):
def
get_input_node
(
self
,
node
,
idx
=
0
,
copy
=
False
):
if
len
(
node
.
which_child
)
==
0
:
if
len
(
node
.
which_child
)
==
0
:
...
@@ -450,7 +457,6 @@ class ONNXDecoder(object):
...
@@ -450,7 +457,6 @@ class ONNXDecoder(object):
"""
"""
make a valid code name for ParamAttr
make a valid code name for ParamAttr
"""
"""
if
name
==
''
:
if
name
==
''
:
raise
ValueError
(
'name should not be empty'
)
raise
ValueError
(
'name should not be empty'
)
for
s
in
' .*?
\\
/-:'
:
for
s
in
' .*?
\\
/-:'
:
...
@@ -473,6 +479,9 @@ class ONNXDecoder(object):
...
@@ -473,6 +479,9 @@ class ONNXDecoder(object):
node
.
name
=
node
.
output
[
0
]
node
.
name
=
node
.
output
[
0
]
node
.
name
=
self
.
make_variable_name
(
node
.
name
)
node
.
name
=
self
.
make_variable_name
(
node
.
name
)
for
i
in
range
(
len
(
node
.
input
)):
for
i
in
range
(
len
(
node
.
input
)):
if
node
.
input
[
i
]
==
''
:
continue
else
:
node
.
input
[
i
]
=
self
.
make_variable_name
(
node
.
input
[
i
])
node
.
input
[
i
]
=
self
.
make_variable_name
(
node
.
input
[
i
])
for
i
in
range
(
len
(
node
.
output
)):
for
i
in
range
(
len
(
node
.
output
)):
node
.
output
[
i
]
=
self
.
make_variable_name
(
node
.
output
[
i
])
node
.
output
[
i
]
=
self
.
make_variable_name
(
node
.
output
[
i
])
x2paddle/onnx_infer.py
浏览文件 @
2d558996
...
@@ -34,16 +34,14 @@ def main():
...
@@ -34,16 +34,14 @@ def main():
save_dir
=
args
.
save_dir
save_dir
=
args
.
save_dir
model_dir
=
os
.
path
.
join
(
save_dir
,
'onnx_model_infer.onnx'
)
model_dir
=
os
.
path
.
join
(
save_dir
,
'onnx_model_infer.onnx'
)
data_dir
=
os
.
path
.
join
(
save_dir
,
'input_data.npy'
)
model
=
onnx
.
load
(
model_dir
)
model
=
onnx
.
load
(
model_dir
)
sess
=
rt
.
InferenceSession
(
model_dir
)
sess
=
rt
.
InferenceSession
(
model_dir
)
inputs
=
np
.
load
(
data_dir
,
allow_pickle
=
True
)
data_dir
inputs_dict
=
{}
inputs_dict
=
{}
for
i
,
ipt
in
enumerate
(
inputs
):
for
ipt
in
sess
.
get_inputs
():
inputs_dict
[
sess
.
get_inputs
()[
i
].
name
]
=
ipt
data_dir
=
os
.
path
.
join
(
save_dir
,
ipt
.
name
+
'.npy'
)
inputs_dict
[
ipt
.
name
]
=
np
.
load
(
data_dir
,
allow_pickle
=
True
)
res
=
sess
.
run
(
None
,
input_feed
=
inputs_dict
)
res
=
sess
.
run
(
None
,
input_feed
=
inputs_dict
)
for
idx
,
value_info
in
enumerate
(
model
.
graph
.
output
):
for
idx
,
value_info
in
enumerate
(
model
.
graph
.
output
):
np
.
save
(
os
.
path
.
join
(
save_dir
,
value_info
.
name
),
res
[
idx
])
np
.
save
(
os
.
path
.
join
(
save_dir
,
value_info
.
name
),
res
[
idx
])
...
...
x2paddle/op_mapper/onnx_directly_map.py
浏览文件 @
2d558996
...
@@ -26,8 +26,6 @@ default_op_mapping_field_values['OUTPUT_PERM'] = None
...
@@ -26,8 +26,6 @@ default_op_mapping_field_values['OUTPUT_PERM'] = None
default_op_mapping_field_values
[
'FILL_NAME_FIELD'
]
=
True
default_op_mapping_field_values
[
'FILL_NAME_FIELD'
]
=
True
default_op_mapping
=
{
default_op_mapping
=
{
'Gather'
:
[
'gather'
,
[
'X'
],
[
'Out'
],
dict
(
axis
=
''
)],
'Shape'
:
[
'shape'
,
[
'X'
],
[
'Out'
]],
'Shape'
:
[
'shape'
,
[
'X'
],
[
'Out'
]],
'Clip'
:
[
'Clip'
:
[
'clip'
,
[
'X'
],
[
'Out'
],
'clip'
,
[
'X'
],
[
'Out'
],
...
@@ -81,11 +79,6 @@ default_op_mapping = {
...
@@ -81,11 +79,6 @@ default_op_mapping = {
'Sqrt'
:
[
'sqrt'
,
[
'X'
],
[
'Out'
]],
'Sqrt'
:
[
'sqrt'
,
[
'X'
],
[
'Out'
]],
}
}
activefunc_op_mapping
=
{
'LeakyRelu'
:
[
'leaky_relu'
,
[
'X'
],
[
'Out'
],
dict
(),
dict
(
alpha
=
.
01
)],
}
default_ioa_constraint
=
{
default_ioa_constraint
=
{
'Gather'
:
'Gather'
:
[(
lambda
i
,
o
,
a
:
a
.
get
(
'axis'
,
0
)
==
0
,
'only axis = 0 is supported'
)],
[(
lambda
i
,
o
,
a
:
a
.
get
(
'axis'
,
0
)
==
0
,
'only axis = 0 is supported'
)],
...
...
x2paddle/op_mapper/onnx_op_mapper.py
浏览文件 @
2d558996
...
@@ -116,12 +116,14 @@ class ONNXOpMapper(OpMapper):
...
@@ -116,12 +116,14 @@ class ONNXOpMapper(OpMapper):
return
False
return
False
def
get_results_of_inference
(
self
,
model
,
value_infos
,
data_nodes
):
def
get_results_of_inference
(
self
,
model
,
value_infos
,
data_nodes
):
inputs
=
[]
if
not
os
.
path
.
exists
(
self
.
tmp_data_dir
):
os
.
makedirs
(
self
.
tmp_data_dir
)
for
data_node
in
data_nodes
:
for
data_node
in
data_nodes
:
value_info
=
value_infos
[
data_node
]
value_info
=
value_infos
[
data_node
]
ipt
=
np
.
random
.
random
(
value_info
[
'shape'
]).
astype
(
ipt
=
np
.
random
.
random
(
value_info
[
'shape'
]).
astype
(
value_info
[
'dtype'
])
value_info
[
'dtype'
])
inputs
.
append
(
ipt
)
np
.
save
(
os
.
path
.
join
(
self
.
tmp_data_dir
,
data_node
),
ipt
)
model
=
onnx
.
shape_inference
.
infer_shapes
(
model
)
model
=
onnx
.
shape_inference
.
infer_shapes
(
model
)
outputs
=
[]
outputs
=
[]
...
@@ -130,11 +132,8 @@ class ONNXOpMapper(OpMapper):
...
@@ -130,11 +132,8 @@ class ONNXOpMapper(OpMapper):
model
.
graph
.
ClearField
(
'output'
)
model
.
graph
.
ClearField
(
'output'
)
model
.
graph
.
output
.
MergeFrom
(
outputs
)
model
.
graph
.
output
.
MergeFrom
(
outputs
)
if
not
os
.
path
.
exists
(
self
.
tmp_data_dir
):
os
.
makedirs
(
self
.
tmp_data_dir
)
onnx
.
save
(
model
,
os
.
path
.
join
(
self
.
tmp_data_dir
,
onnx
.
save
(
model
,
os
.
path
.
join
(
self
.
tmp_data_dir
,
'onnx_model_infer.onnx'
))
'onnx_model_infer.onnx'
))
np
.
save
(
os
.
path
.
join
(
self
.
tmp_data_dir
,
'input_data.npy'
),
inputs
)
os
.
system
(
'onnx_infer --save_dir='
+
self
.
tmp_data_dir
)
os
.
system
(
'onnx_infer --save_dir='
+
self
.
tmp_data_dir
)
return
return
...
@@ -263,16 +262,19 @@ class ONNXOpMapper(OpMapper):
...
@@ -263,16 +262,19 @@ class ONNXOpMapper(OpMapper):
def
elementwise_map
(
self
,
node
):
def
elementwise_map
(
self
,
node
):
assert
node
.
layer_type
in
self
.
elementwise_ops
assert
node
.
layer_type
in
self
.
elementwise_ops
op_type
=
self
.
elementwise_ops
[
node
.
layer_type
]
op_type
=
self
.
elementwise_ops
[
node
.
layer_type
]
val_x
=
self
.
graph
.
get_input_node
(
node
,
idx
=
0
,
copy
=
True
)
val_x
=
self
.
graph
.
get_input_node
(
node
,
idx
=
0
,
copy
=
True
)
val_y
=
self
.
graph
.
get_input_node
(
node
,
idx
=
1
,
copy
=
True
)
val_y
=
self
.
graph
.
get_input_node
(
node
,
idx
=
1
,
copy
=
True
)
if
len
(
val_x
.
out_shapes
[
0
])
<
len
(
val_y
.
out_shapes
[
0
]):
val_x
,
val_y
=
val_y
,
val_x
val_y_shape
=
val_y
.
out_shapes
[
0
]
val_y_shape
=
val_y
.
out_shapes
[
0
]
val_x_shape
=
val_x
.
out_shapes
[
0
]
val_x_shape
=
val_x
.
out_shapes
[
0
]
if
len
(
val_x_shape
)
<
len
(
val_y_shape
):
val_x
,
val_y
=
val_y
,
val_x
str_y_shape
=
','
.
join
(
str
(
e
)
for
e
in
val_y_shape
)
str_x_shape
=
','
.
join
(
str
(
e
)
for
e
in
val_x_shape
)
slice_idx
=
0
slice_idx
=
0
if
str_y_shape
not
in
str_x_shape
:
for
dim
in
val_y_shape
:
for
dim
in
val_y_shape
:
if
dim
==
1
:
if
dim
==
1
:
slice_idx
+=
1
slice_idx
+=
1
...
@@ -353,47 +355,52 @@ class ONNXOpMapper(OpMapper):
...
@@ -353,47 +355,52 @@ class ONNXOpMapper(OpMapper):
val_scales
=
self
.
graph
.
get_input_node
(
node
,
idx
=
1
,
copy
=
True
)
val_scales
=
self
.
graph
.
get_input_node
(
node
,
idx
=
1
,
copy
=
True
)
val_y
=
self
.
graph
.
get_node
(
node
.
layer
.
output
[
0
],
copy
=
True
)
val_y
=
self
.
graph
.
get_node
(
node
.
layer
.
output
[
0
],
copy
=
True
)
out_shape_
=
val_y
.
out_shapes
[
0
]
out_shape
=
val_y
.
out_shapes
[
0
]
if
out_shape_
is
not
None
:
if
out_shape
is
not
None
:
assert
len
(
out_shape_
)
==
4
,
'only 4-D Tensor as X and Y supported'
assert
len
(
out_shape
)
==
4
,
'only 4-D Tensor as X and Y supported'
out_shape_
=
out_shape_
[
2
:]
out_shape
=
out_shape
[
2
:]
scales
=
_const_weight_or_none
(
val_scales
)
scales
=
_const_weight_or_none
(
val_scales
)
if
isinstance
(
val_scales
,
ONNXGraphNode
):
scales
,
_
,
_
=
self
.
get_dynamic_shape
(
val_scales
.
layer_name
)
attr
=
{
'name'
:
string
(
node
.
layer_name
)}
use_scales
=
True
if
scales
is
not
None
:
if
scales
is
not
None
:
try
:
assert
len
(
scales
)
==
4
,
'only 4-D Tensor as X and Y supported'
assert
len
(
scales
)
==
4
,
'only 4-D Tensor as X and Y supported'
assert
scales
[
0
]
==
1
and
scales
[
assert
scales
[
0
]
==
1
and
scales
[
1
]
==
1
,
'only scale on (NC)HW supported'
1
]
==
1
,
'only scale on (NC)HW supported'
assert
scales
[
2
]
==
scales
[
assert
scales
[
2
]
==
scales
[
3
],
'only aspect-ratio-invariant scale supported'
3
],
'only aspect-ratio-invariant scale supported'
except
:
use_scales
=
False
scale
=
scales
[
2
]
if
scales
else
None
scale
=
scales
[
2
]
if
scales
else
None
if
scale
is
None
:
if
scale
is
None
:
assert
out_shape_
,
'neither scales nor output shape is available'
assert
out_shape
,
'neither scales nor output shape is available'
out_shape
=
out_shape_
else
:
else
:
out_shape
=
None
if
out_shape
is
None
:
if
out_shape_
is
None
:
in_shape
=
val_x
.
out_shapes
[
0
]
in_shape
=
val_x
.
out_shapes
[
0
]
assert
in_shape
is
not
None
,
'out_shape required but not inferrable'
assert
in_shape
is
not
None
,
'out_shape required but not inferrable'
assert
len
(
assert
len
(
in_shape
)
==
4
,
'only 4-D Tensor as X and Y supported'
in_shape
)
==
4
,
'only 4-D Tensor as X and Y supported'
out_shape
_
=
[
in_shape
[
2
]
*
scale
,
in_shape
[
3
]
*
scale
]
out_shape
=
[
in_shape
[
2
]
*
scale
,
in_shape
[
3
]
*
scale
]
mode
=
node
.
get_attr
(
'mode'
,
'nearest'
)
mode
=
node
.
get_attr
(
'mode'
,
'nearest'
)
fluid_op
=
'resize_{}'
.
format
(
mode
)
fluid_op
=
'resize_{}'
.
format
(
mode
)
if
'linear'
in
mode
:
if
'linear'
in
mode
:
print
(
print
(
'Warnning: paddle not support resize wiht mode: linear, we use bilinear replace linear'
'Warnning: paddle not support
op:
resize wiht mode: linear, we use bilinear replace linear'
)
)
fluid_op
=
'resize_bilinear'
fluid_op
=
'resize_bilinear'
if
isinstance
(
val_scales
,
ONNXGraphNode
):
if
use_scales
and
scale
is
not
None
:
scale
,
_
,
_
=
self
.
get_dynamic_shape
(
val_scales
.
layer_name
)
attr
[
'scale'
]
=
scale
else
:
attr
[
'out_shape'
]
=
out_shape
attr
=
{
'scale'
:
scale
,
'out_shape'
:
out_shape
,
'name'
:
string
(
node
.
layer_name
)
}
node
.
fluid_code
.
add_layer
(
fluid_op
,
node
.
fluid_code
.
add_layer
(
fluid_op
,
inputs
=
val_x
,
inputs
=
val_x
,
output
=
node
,
output
=
node
,
...
@@ -449,7 +456,6 @@ class ONNXOpMapper(OpMapper):
...
@@ -449,7 +456,6 @@ class ONNXOpMapper(OpMapper):
def
Unsqueeze
(
self
,
node
):
def
Unsqueeze
(
self
,
node
):
val_x
=
self
.
graph
.
get_input_node
(
node
,
idx
=
0
,
copy
=
True
)
val_x
=
self
.
graph
.
get_input_node
(
node
,
idx
=
0
,
copy
=
True
)
axes
=
node
.
get_attr
(
'axes'
)
axes
=
node
.
get_attr
(
'axes'
)
if
len
(
val_x
.
out_shapes
[
0
])
==
0
:
if
len
(
val_x
.
out_shapes
[
0
])
==
0
:
node
.
fluid_code
.
add_layer
(
'assign'
,
node
.
fluid_code
.
add_layer
(
'assign'
,
inputs
=
val_x
,
inputs
=
val_x
,
...
@@ -483,6 +489,7 @@ class ONNXOpMapper(OpMapper):
...
@@ -483,6 +489,7 @@ class ONNXOpMapper(OpMapper):
assert
dtype
==
output_dtype
,
'tensor dtype unmatches storage dtype'
assert
dtype
==
output_dtype
,
'tensor dtype unmatches storage dtype'
shape
=
node
.
get_attr
(
'shape'
,
None
)
shape
=
node
.
get_attr
(
'shape'
,
None
)
if
shape
is
None
:
if
shape
is
None
:
shape
=
val_output
.
out_shapes
[
0
]
shape
=
val_output
.
out_shapes
[
0
]
if
shape
is
None
:
if
shape
is
None
:
...
@@ -493,7 +500,7 @@ class ONNXOpMapper(OpMapper):
...
@@ -493,7 +500,7 @@ class ONNXOpMapper(OpMapper):
'using value as 1-D tensor may lead to fails'
,
'using value as 1-D tensor may lead to fails'
,
val_output
.
layer_name
,
val_output
.
layer_name
)
val_output
.
layer_name
,
val_output
.
layer_name
)
if
len
(
value
)
==
1
:
# scalar
if
len
(
value
)
==
1
:
value
=
value
.
tolist
()
value
=
value
.
tolist
()
shape
=
[
1
]
shape
=
[
1
]
value
=
value
[
0
]
value
=
value
[
0
]
...
@@ -520,48 +527,34 @@ class ONNXOpMapper(OpMapper):
...
@@ -520,48 +527,34 @@ class ONNXOpMapper(OpMapper):
param_attr
=
attr
)
param_attr
=
attr
)
def
Resize
(
self
,
node
):
def
Resize
(
self
,
node
):
self
.
_interpolate
(
node
)
def
Upsample
(
self
,
node
):
self
.
_interpolate
(
node
)
def
Expand
(
self
,
node
):
val_x
=
self
.
graph
.
get_input_node
(
node
,
idx
=
0
,
copy
=
True
)
val_x
=
self
.
graph
.
get_input_node
(
node
,
idx
=
0
,
copy
=
True
)
val_scales
=
self
.
graph
.
get_input_node
(
node
,
idx
=
1
,
copy
=
True
)
val_shape
=
self
.
graph
.
get_input_node
(
node
,
idx
=
1
,
copy
=
True
)
val_y
=
self
.
graph
.
get_node
(
node
.
layer
.
output
[
0
],
copy
=
True
)
out_shape_
=
val_y
.
out_shapes
[
0
]
if
len
(
val_shape
.
outputs
)
==
1
:
if
out_shape_
is
not
None
:
self
.
omit_nodes
.
append
(
val_shape
.
layer_name
)
assert
len
(
out_shape_
)
==
4
,
'only 4-D Tensor as X and Y supported'
out_shape_
=
out_shape_
[
2
:]
scales
=
_const_weight_or_none
(
val_scales
)
if
scales
is
not
None
:
assert
len
(
scales
)
==
4
,
'only 4-D Tensor as X and Y supported'
assert
scales
[
0
]
==
1
and
scales
[
1
]
==
1
,
'only scale on (NC)HW supported'
assert
scales
[
2
]
==
scales
[
3
],
'only aspect-ratio-invariant scale supported'
scale
=
scales
[
2
]
if
scales
else
None
if
scale
is
None
:
assert
out_shape_
,
'neither scales nor output shape is available'
out_shape
=
out_shape_
else
:
out_shape
=
None
if
out_shape_
is
None
:
in_shape
=
val_x
.
out_shapes
[
0
]
assert
in_shape
is
not
None
,
'out_shape required but not inferrable'
assert
len
(
in_shape
)
==
4
,
'only 4-D Tensor as X and Y supported'
out_shape_
=
[
in_shape
[
2
]
*
scale
,
in_shape
[
3
]
*
scale
]
mode
=
node
.
get_attr
(
'mode'
,
'nearest'
)
val_y
=
self
.
graph
.
get_node
(
node
.
layer
.
output
[
0
],
copy
=
True
)
fluid_op
=
'resize_{}'
.
format
(
mode
)
out_shape
=
node
.
out_shapes
[
0
]
attr
=
{
val_x_dtype
=
val_x
.
dtype
'scale'
:
scale
,
'out_shape'
:
out_shape
,
'name'
:
string
(
node
.
layer_name
)
}
node
.
fluid_code
.
add_layer
(
fluid_op
,
inputs
=
val_x
,
output
=
node
,
param_attr
=
attr
)
def
Upsample
(
self
,
node
):
name_ones
=
node
.
layer_name
+
'_ones'
self
.
_interpolate
(
node
)
attr_ones
=
{
'shape'
:
out_shape
,
'dtype'
:
string
(
val_x_dtype
)}
node
.
fluid_code
.
add_layer
(
'ones'
,
inputs
=
None
,
output
=
name_ones
,
param_attr
=
attr_ones
)
inputs
=
{
'x'
:
name_ones
,
'y'
:
val_x
}
attr
=
{
'name'
:
string
(
node
.
layer_name
)}
node
.
fluid_code
.
add_layer
(
'elementwise_mul'
,
inputs
=
inputs
,
output
=
node
.
layer_name
,
param_attr
=
attr
)
def
Gather
(
self
,
node
):
def
Gather
(
self
,
node
):
val_x
=
self
.
graph
.
get_input_node
(
node
,
idx
=
0
,
copy
=
True
)
val_x
=
self
.
graph
.
get_input_node
(
node
,
idx
=
0
,
copy
=
True
)
...
@@ -716,7 +709,7 @@ class ONNXOpMapper(OpMapper):
...
@@ -716,7 +709,7 @@ class ONNXOpMapper(OpMapper):
'dim'
:
axis
,
'dim'
:
axis
,
'name'
:
string
(
node
.
layer_name
)
'name'
:
string
(
node
.
layer_name
)
}
}
# generation
node
.
fluid_code
.
add_layer
(
'split'
,
node
.
fluid_code
.
add_layer
(
'split'
,
inputs
=
val_x
,
inputs
=
val_x
,
output
=
val_y
,
output
=
val_y
,
...
@@ -731,21 +724,23 @@ class ONNXOpMapper(OpMapper):
...
@@ -731,21 +724,23 @@ class ONNXOpMapper(OpMapper):
if
isinstance
(
val_shape
,
ONNXGraphDataNode
):
if
isinstance
(
val_shape
,
ONNXGraphDataNode
):
self
.
omit_nodes
.
append
(
val_shape
.
layer_name
)
self
.
omit_nodes
.
append
(
val_shape
.
layer_name
)
attr
=
{
'name'
:
string
(
node
.
layer_name
)}
# catch dynamic graph shape
# catch dynamic graph shape
if
isinstance
(
val_shape
,
ONNXGraphNode
):
if
isinstance
(
val_shape
,
ONNXGraphNode
):
shape
,
_
,
_
=
self
.
get_dynamic_shape
(
val_shape
.
layer_name
)
shape
,
_
,
_
=
self
.
get_dynamic_shape
(
val_shape
.
layer_name
)
if
val_shape
.
dtype
==
'int64'
:
val_shape_cast
=
val_shape
.
layer_name
+
'_cast'
node
.
fluid_code
.
add_layer
(
'cast'
,
inputs
=
val_shape
,
output
=
val_shape_cast
,
param_attr
=
{
'dtype'
:
string
(
'int32'
)})
attr
[
'actual_shape'
]
=
val_shape_cast
else
:
attr
[
'actual_shape'
]
=
val_shape
if
shape
is
None
:
if
shape
is
None
:
shape
=
val_reshaped
.
out_shapes
[
0
]
shape
=
val_reshaped
.
out_shapes
[
0
]
shape_dtype
=
val_shape
.
dtype
if
shape_dtype
is
None
:
_logger
.
warning
(
'in op %s(%s -> Reshape -> %s): '
'dtype of input "shape" not inferred, int32 assumed'
,
node
.
layer_name
,
val_x
.
layer_name
,
val_reshaped
.
layer_name
)
shape_dtype
=
_np
.
dtype
(
'int32'
)
if
shape
is
None
:
if
shape
is
None
:
shape
=
[
1
,
-
1
]
shape
=
[
1
,
-
1
]
_logger
.
warning
(
_logger
.
warning
(
...
@@ -753,8 +748,8 @@ class ONNXOpMapper(OpMapper):
...
@@ -753,8 +748,8 @@ class ONNXOpMapper(OpMapper):
'input "shape" not inferred, use [1, -1] as dummy value, '
'input "shape" not inferred, use [1, -1] as dummy value, '
'the behavior of Paddle fluid maybe undefined'
,
node
.
layer_name
,
'the behavior of Paddle fluid maybe undefined'
,
node
.
layer_name
,
val_x
.
layer_name
,
val_reshaped
.
layer_name
)
val_x
.
layer_name
,
val_reshaped
.
layer_name
)
attr
=
{
'shape'
:
shape
,
'name'
:
string
(
node
.
layer_name
)}
attr
[
'shape'
]
=
shape
node
.
fluid_code
.
add_layer
(
'reshape'
,
node
.
fluid_code
.
add_layer
(
'reshape'
,
inputs
=
val_x
,
inputs
=
val_x
,
output
=
node
,
output
=
node
,
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录