Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
PaddlePaddle
X2Paddle
提交
c66a5423
X
X2Paddle
项目概览
PaddlePaddle
/
X2Paddle
大约 1 年 前同步成功
通知
328
Star
698
Fork
167
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
26
列表
看板
标记
里程碑
合并请求
4
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
X
X2Paddle
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
26
Issue
26
列表
看板
标记
里程碑
合并请求
4
合并请求
4
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
提交
c66a5423
编写于
8月 13, 2019
作者:
C
channingss
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
support model Face_cyclegan
上级
339aedd2
变更
3
显示空白变更内容
内联
并排
Showing
3 changed file
with
205 addition
and
66 deletion
+205
-66
x2paddle/decoder/onnx_decoder.py
x2paddle/decoder/onnx_decoder.py
+73
-40
x2paddle/op_mapper/onnx_directly_map.py
x2paddle/op_mapper/onnx_directly_map.py
+9
-1
x2paddle/op_mapper/onnx_op_mapper.py
x2paddle/op_mapper/onnx_op_mapper.py
+123
-25
未找到文件。
x2paddle/decoder/onnx_decoder.py
浏览文件 @
c66a5423
...
@@ -44,7 +44,7 @@ class ONNXGraphNode(GraphNode):
...
@@ -44,7 +44,7 @@ class ONNXGraphNode(GraphNode):
self
.
attr_map
=
self
.
get_attr_map
()
self
.
attr_map
=
self
.
get_attr_map
()
self
.
dtype_map
=
{
1
:
"float32"
,
3
:
"int32"
,
9
:
"int64"
}
self
.
dtype_map
=
{
1
:
"float32"
,
3
:
"int32"
,
9
:
"int64"
}
self
.
weight_inputs
=
list
()
self
.
weight_inputs
=
list
()
self
.
out_shapes
=
None
self
.
out_shapes
=
list
()
self
.
dtype
=
None
self
.
dtype
=
None
def
get_attr_map
(
self
):
def
get_attr_map
(
self
):
...
@@ -58,11 +58,11 @@ class ONNXGraphNode(GraphNode):
...
@@ -58,11 +58,11 @@ class ONNXGraphNode(GraphNode):
@
property
@
property
def
value
(
self
):
def
value
(
self
):
assert
'Constant'
in
self
.
layer_type
,
"Only Constant node has value."
assert
'Constant'
in
self
.
layer_type
,
"Only Constant
| ConstantOfShape
node has value."
print
(
self
.
layer
)
attr
=
self
.
layer
.
attr
[
'value'
]
attr
=
self
.
layer
.
attr
ibute
[
'value'
]
if
'value'
in
self
.
attr_map
:
if
'value'
not
in
self
.
attr_map
:
return
default
return
None
return
self
.
attr_map
[
name
]
return
self
.
attr_map
[
name
]
def
get_attribute_value2
(
self
,
attr
):
def
get_attribute_value2
(
self
,
attr
):
...
@@ -110,13 +110,12 @@ class ONNXGraphDataNode(GraphNode):
...
@@ -110,13 +110,12 @@ class ONNXGraphDataNode(GraphNode):
def
out_shapes
(
self
):
def
out_shapes
(
self
):
values
=
self
.
layer
.
type
.
tensor_type
.
shape
.
dim
values
=
self
.
layer
.
type
.
tensor_type
.
shape
.
dim
out_shapes
=
list
()
out_shapes
=
list
()
out_shapes
=
[
dim
.
dim_value
for
dim
in
values
]
out_shapes
.
append
([
dim
.
dim_value
for
dim
in
values
])
return
out_shapes
return
out_shapes
@
property
@
property
def
dtype
(
self
):
def
dtype
(
self
):
dtype
=
self
.
layer
.
type
.
tensor_type
.
elem_type
dtype
=
self
.
layer
.
type
.
tensor_type
.
elem_type
return
TENSOR_TYPE_TO_NP_TYPE
[
dtype
]
return
TENSOR_TYPE_TO_NP_TYPE
[
dtype
]
...
@@ -126,6 +125,7 @@ class ONNXGraph(Graph):
...
@@ -126,6 +125,7 @@ class ONNXGraph(Graph):
self
.
initializer
=
{}
self
.
initializer
=
{}
self
.
place_holder_nodes
=
list
()
self
.
place_holder_nodes
=
list
()
self
.
get_place_holder_nodes
()
self
.
get_place_holder_nodes
()
self
.
value_infos
=
self
.
inferred_model_value_info
(
model
)
def
get_inner_nodes
(
self
):
def
get_inner_nodes
(
self
):
"""
"""
...
@@ -163,16 +163,12 @@ class ONNXGraph(Graph):
...
@@ -163,16 +163,12 @@ class ONNXGraph(Graph):
build topo_sort of ONNX model
build topo_sort of ONNX model
"""
"""
for
layer
in
self
.
model
.
node
:
for
layer
in
self
.
model
.
node
:
self
.
node_map
[
layer
.
name
]
=
ONNXGraphNode
(
layer
)
node
=
ONNXGraphNode
(
layer
)
self
.
node_map
[
layer
.
name
]
=
node
#set op node's dtype and out_shapes
for
opt
in
layer
.
output
:
for
item
in
self
.
model
.
value_info
:
value_info
=
self
.
value_infos
[
opt
]
if
item
.
name
in
self
.
node_map
:
node
.
dtype
=
value_info
[
'dtype'
]
self
.
node_map
[
item
.
name
].
dtype
=
TENSOR_TYPE_TO_NP_TYPE
[
node
.
out_shapes
.
append
(
value_info
[
'shape'
])
item
.
type
.
tensor_type
.
elem_type
]
self
.
node_map
[
item
.
name
].
out_shapes
=
[
dim
.
dim_value
for
dim
in
item
.
type
.
tensor_type
.
shape
.
dim
]
for
layer
in
self
.
model
.
input
:
for
layer
in
self
.
model
.
input
:
if
layer
.
name
not
in
self
.
node_map
:
if
layer
.
name
not
in
self
.
node_map
:
...
@@ -200,7 +196,9 @@ class ONNXGraph(Graph):
...
@@ -200,7 +196,9 @@ class ONNXGraph(Graph):
else
:
else
:
self
.
connect
(
in_node
,
layer_name
)
self
.
connect
(
in_node
,
layer_name
)
#generate topo
# print([layer_name for layer_name, node in self.node_map.items()])
#generate topo
super
(
ONNXGraph
,
self
).
build
()
super
(
ONNXGraph
,
self
).
build
()
self
.
input_nodes
=
self
.
place_holder_nodes
self
.
input_nodes
=
self
.
place_holder_nodes
...
@@ -227,6 +225,42 @@ class ONNXGraph(Graph):
...
@@ -227,6 +225,42 @@ class ONNXGraph(Graph):
weight
=
to_array
(
initializer
)
weight
=
to_array
(
initializer
)
yield
name
,
weight
yield
name
,
weight
def
inferred_model_value_info
(
self
,
graph
):
"""
collect value/type info for an ONNX model
"""
assert
isinstance
(
graph
,
onnx
.
GraphProto
),
'model is not a ModelProto instance'
value_info
=
Dict
()
for
item
in
graph
.
value_info
:
value_info
[
item
.
name
]
=
{
'dtype'
:
TENSOR_TYPE_TO_NP_TYPE
[
item
.
type
.
tensor_type
.
elem_type
],
'shape'
:
[
dim
.
dim_value
for
dim
in
item
.
type
.
tensor_type
.
shape
.
dim
],
'external'
:
False
}
for
item
in
graph
.
input
:
assert
item
.
name
not
in
value_info
value_info
[
item
.
name
]
=
{
'dtype'
:
TENSOR_TYPE_TO_NP_TYPE
[
item
.
type
.
tensor_type
.
elem_type
],
'shape'
:
[
dim
.
dim_value
for
dim
in
item
.
type
.
tensor_type
.
shape
.
dim
],
'external'
:
True
}
for
item
in
graph
.
output
:
value_info
[
item
.
name
]
=
{
'dtype'
:
TENSOR_TYPE_TO_NP_TYPE
[
item
.
type
.
tensor_type
.
elem_type
],
'shape'
:
[
dim
.
dim_value
for
dim
in
item
.
type
.
tensor_type
.
shape
.
dim
],
'external'
:
True
}
return
value_info
class
ONNXDecoder
(
object
):
class
ONNXDecoder
(
object
):
def
__init__
(
self
,
onnx_model
):
def
__init__
(
self
,
onnx_model
):
...
@@ -241,7 +275,6 @@ class ONNXDecoder(object):
...
@@ -241,7 +275,6 @@ class ONNXDecoder(object):
'some operator may cannot convert.'
,
'some operator may cannot convert.'
,
model
.
opset_import
[
0
].
version
)
model
.
opset_import
[
0
].
version
)
check_model
(
model
)
check_model
(
model
)
model
=
polish_model
(
model
)
model
=
polish_model
(
model
)
model
=
self
.
optimize_model_skip_op_for_inference
(
model
)
model
=
self
.
optimize_model_skip_op_for_inference
(
model
)
...
@@ -254,6 +287,8 @@ class ONNXDecoder(object):
...
@@ -254,6 +287,8 @@ class ONNXDecoder(object):
self
.
onnx_graph
=
ONNXGraph
(
graph_def
)
self
.
onnx_graph
=
ONNXGraph
(
graph_def
)
self
.
onnx_graph
.
build
()
self
.
onnx_graph
.
build
()
self
.
results_of_inference
=
dict
()
def
build_value_refs
(
self
,
nodes
):
def
build_value_refs
(
self
,
nodes
):
"""
"""
build op reference of inputs and outputs
build op reference of inputs and outputs
...
@@ -456,10 +491,7 @@ class ONNXDecoder(object):
...
@@ -456,10 +491,7 @@ class ONNXDecoder(object):
len
(
onnx_model
.
input
),
len
(
model
.
input
)))
len
(
onnx_model
.
input
),
len
(
model
.
input
)))
return
onnx_model
return
onnx_model
def
get_dynamic_shape_from_caffe2
(
self
,
layer
,
input_shapes
):
def
get_results_of_inference
(
self
,
model
,
input_shapes
):
"""
get dynamic shape from caffe2.backend
"""
try
:
try
:
import
torch
import
torch
version
=
torch
.
__version__
version
=
torch
.
__version__
...
@@ -472,26 +504,27 @@ class ONNXDecoder(object):
...
@@ -472,26 +504,27 @@ class ONNXDecoder(object):
)
)
return
return
from
caffe2.python.onnx.backend
import
prepare
from
caffe2.python.onnx.backend
import
prepare
shape
=
input_shapes
[
0
]
shape
=
input_shapes
[
0
]
np_images
=
np
.
random
.
rand
(
shape
[
0
],
shape
[
1
],
shape
[
2
],
np_images
=
np
.
random
.
rand
(
shape
[
0
],
shape
[
1
],
shape
[
2
],
shape
[
3
]).
astype
(
'float32'
)
shape
[
3
]).
astype
(
'float32'
)
num_onnx
=
self
.
split_model
(
self
.
model
,
layer
)
prepared_backend
=
prepare
(
num_onnx
,
device
=
'CPU'
)
infer_shapes
=
onnx
.
shape_inference
.
infer_shapes
(
model
)
model
.
graph
.
ClearField
(
'output'
)
model
.
graph
.
output
.
MergeFrom
(
infer_shapes
.
graph
.
value_info
)
prepared_backend
=
prepare
(
model
,
device
=
'CPU'
)
output
=
prepared_backend
.
run
(
inputs
=
np_images
)
output
=
prepared_backend
.
run
(
inputs
=
np_images
)
return
output
[
0
].
tolist
()
def
get_dynamic_shape_from_onnx
(
self
,
layer
,
input_shapes
):
for
idx
,
value_info
in
enumerate
(
infer_shapes
.
graph
.
value_info
):
self
.
results_of_inference
[
value_info
.
name
]
=
output
[
idx
]
return
def
get_dynamic_shape_from_caffe2
(
self
,
layer
,
input_shapes
):
"""
"""
get dynamic shape from
onnxruntime
get dynamic shape from
caffe2.backend
"""
"""
import
onnxruntime
as
rt
if
len
(
self
.
results_of_inference
)
==
0
:
from
onnxruntime.backend
import
prepare
self
.
get_results_of_inference
(
self
.
model
,
input_shapes
)
import
numpy
as
np
output
=
self
.
results_of_inference
[
layer
]
num_onnx
=
self
.
split_model
(
self
.
model
,
layer
)
return
output
.
tolist
()
sess
=
prepare
(
num_onnx
)
shape
=
input_shapes
[
0
]
print
(
shape
)
np_images
=
np
.
random
.
rand
(
shape
[
0
],
shape
[
1
],
shape
[
2
],
shape
[
3
]).
astype
(
'float32'
)
output
=
sess
.
run
(
model
=
sess
,
inputs
=
np_images
)
return
output
[
0
].
tolist
()
x2paddle/op_mapper/onnx_directly_map.py
浏览文件 @
c66a5423
...
@@ -24,6 +24,7 @@ default_op_mapping_field_values['DEFAULTS'] = dict()
...
@@ -24,6 +24,7 @@ default_op_mapping_field_values['DEFAULTS'] = dict()
default_op_mapping_field_values
[
'INPUT_PERM'
]
=
None
default_op_mapping_field_values
[
'INPUT_PERM'
]
=
None
default_op_mapping_field_values
[
'OUTPUT_PERM'
]
=
None
default_op_mapping_field_values
[
'OUTPUT_PERM'
]
=
None
default_op_mapping_field_values
[
'FILL_NAME_FIELD'
]
=
True
default_op_mapping_field_values
[
'FILL_NAME_FIELD'
]
=
True
default_op_mapping
=
{
default_op_mapping
=
{
'Gather'
:
[
'gather'
,
[
'X'
],
[
'Out'
],
'Gather'
:
[
'gather'
,
[
'X'
],
[
'Out'
],
dict
(
axis
=
''
)],
dict
(
axis
=
''
)],
...
@@ -47,7 +48,14 @@ default_op_mapping = {
...
@@ -47,7 +48,14 @@ default_op_mapping = {
dict
(
keep_dim
=
1
)
dict
(
keep_dim
=
1
)
],
],
'LeakyRelu'
:
[
'leaky_relu'
,
[
'X'
],
[
'Out'
],
'LeakyRelu'
:
[
'leaky_relu'
,
[
'X'
],
[
'Out'
],
dict
(),
dict
(
alpha
=
.
01
)]
dict
(),
dict
(
alpha
=
.
01
)],
'Tanh'
:
[
'tanh'
,
[
'X'
],
[
'Out'
]],
}
activefunc_op_mapping
=
{
'Relu'
:
[
'relu'
,
[
'X'
],
[
'Out'
]],
'LeakyRelu'
:
[
'leaky_relu'
,
[
'X'
],
[
'Out'
],
dict
(),
dict
(
alpha
=
.
01
)],
}
}
default_ioa_constraint
=
{
default_ioa_constraint
=
{
...
...
x2paddle/op_mapper/onnx_op_mapper.py
浏览文件 @
c66a5423
...
@@ -22,6 +22,7 @@ from x2paddle.op_mapper.onnx_directly_map import default_op_mapping_field_values
...
@@ -22,6 +22,7 @@ from x2paddle.op_mapper.onnx_directly_map import default_op_mapping_field_values
from
x2paddle.op_mapper.onnx_directly_map
import
default_op_mapping
from
x2paddle.op_mapper.onnx_directly_map
import
default_op_mapping
from
x2paddle.op_mapper.onnx_directly_map
import
default_ioa_constraint
from
x2paddle.op_mapper.onnx_directly_map
import
default_ioa_constraint
import
numpy
as
np
import
numpy
as
np
import
onnx.numpy_helper
as
numpy_helper
import
logging
as
_logging
import
logging
as
_logging
from
collections
import
OrderedDict
as
_dict
from
collections
import
OrderedDict
as
_dict
...
@@ -66,6 +67,7 @@ class ONNXOpMapper(OpMapper):
...
@@ -66,6 +67,7 @@ class ONNXOpMapper(OpMapper):
for
node_name
in
self
.
graph
.
topo_sort
:
for
node_name
in
self
.
graph
.
topo_sort
:
node
=
self
.
graph
.
get_node
(
node_name
)
node
=
self
.
graph
.
get_node
(
node_name
)
op
=
node
.
layer_type
op
=
node
.
layer_type
# print('translate{} layer_type is {}'.format(node_name, op))
if
hasattr
(
self
,
op
):
if
hasattr
(
self
,
op
):
func
=
getattr
(
self
,
op
)
func
=
getattr
(
self
,
op
)
func
(
node
)
func
(
node
)
...
@@ -134,10 +136,10 @@ class ONNXOpMapper(OpMapper):
...
@@ -134,10 +136,10 @@ class ONNXOpMapper(OpMapper):
param_attr
=
attr
)
param_attr
=
attr
)
def
place_holder
(
self
,
node
):
def
place_holder
(
self
,
node
):
self
.
input_shapes
.
append
(
node
.
out_shapes
)
self
.
input_shapes
.
append
(
node
.
out_shapes
[
0
]
)
attr
=
{
attr
=
{
"dtype"
:
string
(
node
.
dtype
),
"dtype"
:
string
(
node
.
dtype
),
"shape"
:
node
.
out_shapes
,
"shape"
:
node
.
out_shapes
[
0
]
,
"name"
:
string
(
node
.
layer_name
),
"name"
:
string
(
node
.
layer_name
),
"append_batch_size"
:
'False'
"append_batch_size"
:
'False'
}
}
...
@@ -151,7 +153,7 @@ class ONNXOpMapper(OpMapper):
...
@@ -151,7 +153,7 @@ class ONNXOpMapper(OpMapper):
if
parameter
is
not
None
:
if
parameter
is
not
None
:
node
=
parameter
node
=
parameter
dtype
=
node
.
dtype
dtype
=
node
.
dtype
shape
=
node
.
out_shapes
shape
=
node
.
out_shapes
[
0
]
self
.
weights
[
node
.
layer_name
]
=
node
.
weight
self
.
weights
[
node
.
layer_name
]
=
node
.
weight
attr
=
{
attr
=
{
...
@@ -184,8 +186,8 @@ class ONNXOpMapper(OpMapper):
...
@@ -184,8 +186,8 @@ class ONNXOpMapper(OpMapper):
pads
=
node
.
get_attr
(
'pads'
)
pads
=
node
.
get_attr
(
'pads'
)
mode
=
node
.
get_attr
(
'mode'
,
'constant'
)
mode
=
node
.
get_attr
(
'mode'
,
'constant'
)
value
=
node
.
get_attr
(
'value'
,
0.
)
value
=
node
.
get_attr
(
'value'
,
0.
)
data_shape
=
val_x
.
out_shapes
data_shape
=
val_x
.
out_shapes
[
0
]
output_shape
=
node
.
out_shapes
output_shape
=
node
.
out_shapes
[
0
]
assume_pad2d
=
False
assume_pad2d
=
False
attr
=
{}
attr
=
{}
if
len
(
pads
)
==
4
:
if
len
(
pads
)
==
4
:
...
@@ -200,8 +202,6 @@ class ONNXOpMapper(OpMapper):
...
@@ -200,8 +202,6 @@ class ONNXOpMapper(OpMapper):
attr
[
'mode'
]
=
string
(
mode
)
attr
[
'mode'
]
=
string
(
mode
)
else
:
else
:
attr
=
{
'pad_value'
:
value
}
attr
=
{
'pad_value'
:
value
}
assert
mode
==
'constant'
,
'mode {} is supported only in pad2d'
.
format
(
mode
)
fluid_op
=
'pad'
fluid_op
=
'pad'
if
len
(
pads
)
==
4
:
if
len
(
pads
)
==
4
:
paddings
=
np
.
array
(
pads
).
reshape
(
paddings
=
np
.
array
(
pads
).
reshape
(
...
@@ -209,6 +209,10 @@ class ONNXOpMapper(OpMapper):
...
@@ -209,6 +209,10 @@ class ONNXOpMapper(OpMapper):
elif
len
(
pads
)
==
8
:
elif
len
(
pads
)
==
8
:
paddings
=
np
.
array
(
pads
).
reshape
(
paddings
=
np
.
array
(
pads
).
reshape
(
(
-
1
,
4
)).
transpose
().
flatten
().
tolist
()
# SSEE -> SESE
(
-
1
,
4
)).
transpose
().
flatten
().
tolist
()
# SSEE -> SESE
if
sum
(
paddings
[:
4
])
==
0
:
fluid_op
=
'pad2d'
paddings
=
paddings
[
4
:]
attr
[
'mode'
]
=
string
(
mode
)
attr
[
'paddings'
]
=
paddings
attr
[
'paddings'
]
=
paddings
if
op_independent
:
if
op_independent
:
attr
[
'name'
]
=
string
(
node
.
layer_name
)
attr
[
'name'
]
=
string
(
node
.
layer_name
)
...
@@ -244,7 +248,7 @@ class ONNXOpMapper(OpMapper):
...
@@ -244,7 +248,7 @@ class ONNXOpMapper(OpMapper):
shape
=
node
.
get_attr
(
'shape'
,
None
)
shape
=
node
.
get_attr
(
'shape'
,
None
)
if
shape
is
None
:
if
shape
is
None
:
shape
=
val_output
.
out_shapes
shape
=
val_output
.
out_shapes
[
0
]
if
shape
is
None
:
if
shape
is
None
:
shape
=
list
(
value
.
shape
)
shape
=
list
(
value
.
shape
)
_logger
.
warning
(
_logger
.
warning
(
...
@@ -271,7 +275,7 @@ class ONNXOpMapper(OpMapper):
...
@@ -271,7 +275,7 @@ class ONNXOpMapper(OpMapper):
val_scales
=
self
.
graph
.
get_node
(
node
.
layer
.
input
[
1
],
copy
=
True
)
val_scales
=
self
.
graph
.
get_node
(
node
.
layer
.
input
[
1
],
copy
=
True
)
val_y
,
=
self
.
graph
.
get_node
(
node
.
layer
.
output
[
0
],
copy
=
True
)
val_y
,
=
self
.
graph
.
get_node
(
node
.
layer
.
output
[
0
],
copy
=
True
)
out_shape_
=
val_y
.
out_shapes
out_shape_
=
val_y
.
out_shapes
[
0
]
if
out_shape_
is
not
None
:
if
out_shape_
is
not
None
:
assert
len
(
out_shape_
)
==
4
,
'only 4-D Tensor as X and Y supported'
assert
len
(
out_shape_
)
==
4
,
'only 4-D Tensor as X and Y supported'
out_shape_
=
out_shape_
[
2
:]
out_shape_
=
out_shape_
[
2
:]
...
@@ -289,7 +293,7 @@ class ONNXOpMapper(OpMapper):
...
@@ -289,7 +293,7 @@ class ONNXOpMapper(OpMapper):
else
:
else
:
out_shape
=
None
out_shape
=
None
if
out_shape_
is
None
:
if
out_shape_
is
None
:
in_shape
=
val_x
.
out_shapes
in_shape
=
val_x
.
out_shapes
[
0
]
assert
in_shape
is
not
None
,
'out_shape required but not inferrable'
assert
in_shape
is
not
None
,
'out_shape required but not inferrable'
assert
len
(
assert
len
(
in_shape
)
==
4
,
'only 4-D Tensor as X and Y supported'
in_shape
)
==
4
,
'only 4-D Tensor as X and Y supported'
...
@@ -311,11 +315,11 @@ class ONNXOpMapper(OpMapper):
...
@@ -311,11 +315,11 @@ class ONNXOpMapper(OpMapper):
def
ConstantOfShape
(
self
,
node
):
def
ConstantOfShape
(
self
,
node
):
val_shape
=
self
.
graph
.
get_node
(
node
.
layer
.
input
[
0
],
copy
=
True
)
val_shape
=
self
.
graph
.
get_node
(
node
.
layer
.
input
[
0
],
copy
=
True
)
val_y
=
self
.
graph
.
get_node
(
node
.
layer
.
output
[
0
],
copy
=
True
)
shape
=
_const_weight_or_none
(
val_shape
)
shape
=
_const_weight_or_none
(
val_shape
)
if
shape
is
None
:
if
shape
is
None
:
shape
=
node
.
out_shapes
shape
=
node
.
out_shapes
[
0
]
assert
shape
is
not
None
,
(
assert
shape
is
not
None
,
(
'given shape is neither const value nor deductible from output, '
'given shape is neither const value nor deductible from output, '
...
@@ -362,7 +366,7 @@ class ONNXOpMapper(OpMapper):
...
@@ -362,7 +366,7 @@ class ONNXOpMapper(OpMapper):
shape
=
self
.
decoder
.
get_dynamic_shape_from_caffe2
(
shape
=
self
.
decoder
.
get_dynamic_shape_from_caffe2
(
val_shape
.
layer_name
,
self
.
input_shapes
)
val_shape
.
layer_name
,
self
.
input_shapes
)
if
shape
is
None
:
if
shape
is
None
:
shape
=
val_reshaped
.
out_shapes
shape
=
val_reshaped
.
out_shapes
[
0
]
shape_dtype
=
val_shape
.
dtype
shape_dtype
=
val_shape
.
dtype
...
@@ -417,7 +421,7 @@ class ONNXOpMapper(OpMapper):
...
@@ -417,7 +421,7 @@ class ONNXOpMapper(OpMapper):
assert
2
<=
poolnd
<=
3
,
'only pool2d and pool3d is supported'
assert
2
<=
poolnd
<=
3
,
'only pool2d and pool3d is supported'
paddings
,
val_x
=
self
.
_pad_if_asymmetric
(
node
,
pads
,
val_x
)
paddings
,
val_x
=
self
.
_pad_if_asymmetric
(
node
,
pads
,
val_x
)
input_shape
=
val_x
.
out_shapes
input_shape
=
val_x
.
out_shapes
[
0
]
if
auto_pad
==
"SAME_UPPER"
or
auto_pad
==
"SAME_LOWER"
:
if
auto_pad
==
"SAME_UPPER"
or
auto_pad
==
"SAME_LOWER"
:
pad_h
=
get_same_padding
(
input_shape
[
2
],
kernel_shape
[
0
],
pad_h
=
get_same_padding
(
input_shape
[
2
],
kernel_shape
[
0
],
strides
[
0
])
strides
[
0
])
...
@@ -572,6 +576,42 @@ class ONNXOpMapper(OpMapper):
...
@@ -572,6 +576,42 @@ class ONNXOpMapper(OpMapper):
output
=
node
,
output
=
node
,
param_attr
=
attr
)
param_attr
=
attr
)
def
InstanceNormalization
(
self
,
node
):
'''
y = scale * (x - mean) / sqrt(variance + epsilon) + B
'''
val_x
=
self
.
graph
.
get_node
(
node
.
layer
.
input
[
0
],
copy
=
True
)
val_scale
=
self
.
graph
.
get_node
(
node
.
layer
.
input
[
1
],
copy
=
True
)
val_b
=
self
.
graph
.
get_node
(
node
.
layer
.
input
[
2
],
copy
=
True
)
epsilon
=
node
.
get_attr
(
'epsilon'
,
1e-5
)
num_out_channels
=
val_scale
.
out_shapes
[
0
][
0
]
attr
=
{
"groups"
:
num_out_channels
,
"epsilon"
:
epsilon
,
"param_attr"
:
string
(
val_scale
.
layer_name
),
"bias_attr"
:
string
(
val_b
.
layer_name
),
"name"
:
string
(
node
.
layer_name
)
}
if
val_scale
.
layer_type
==
'Constant'
:
self
.
weights
[
val_scale
.
layer_name
]
=
val_scale
.
get_attr
(
'value'
)
if
val_b
.
layer_type
==
'Constant'
:
self
.
weights
[
val_b
.
layer_name
]
=
val_b
.
get_attr
(
'value'
)
# node_data_norm = node.layer_name +'data_norm'
node
.
fluid_code
.
add_layer
(
"group_norm"
,
inputs
=
val_x
,
output
=
node
,
param_attr
=
attr
)
# node.fluid_code.add_layer("elementwise_add",
# val_x.layer_name +','+ node_data_norm,
# output=node,
# param_attr=attr)
def
Softmax
(
self
,
node
):
def
Softmax
(
self
,
node
):
val_x
=
self
.
graph
.
get_node
(
node
.
layer
.
input
[
0
],
copy
=
True
)
val_x
=
self
.
graph
.
get_node
(
node
.
layer
.
input
[
0
],
copy
=
True
)
attr
=
{
"name"
:
string
(
node
.
layer_name
)}
attr
=
{
"name"
:
string
(
node
.
layer_name
)}
...
@@ -610,12 +650,17 @@ class ONNXOpMapper(OpMapper):
...
@@ -610,12 +650,17 @@ class ONNXOpMapper(OpMapper):
def
PRelu
(
self
,
node
):
def
PRelu
(
self
,
node
):
val_x
=
self
.
graph
.
get_node
(
node
.
layer
.
input
[
0
],
copy
=
True
)
val_x
=
self
.
graph
.
get_node
(
node
.
layer
.
input
[
0
],
copy
=
True
)
val_slope
=
self
.
graph
.
get_node
(
node
.
layer
.
input
[
1
],
copy
=
True
)
val_slope
=
self
.
graph
.
get_node
(
node
.
layer
.
input
[
1
],
copy
=
True
)
attr
=
{
"name"
:
string
(
node
.
layer_name
),
"mode"
:
string
(
'channel'
)}
if
isinstance
(
val_slope
,
str
):
mode
=
'channel'
attr
[
"param_attr"
]
=
string
(
val_slope
.
layer_name
)
shape_slope
=
val_slope
.
out_shapes
[
0
]
else
:
if
len
(
shape_slope
)
==
1
:
attr
[
"param_attr"
]
=
string
(
val_slope
.
layer_name
)
mode
=
'all'
elif
len
(
shape_slope
)
>
2
:
mode
=
'element'
attr
=
{
"param_attr"
:
string
(
val_slope
.
layer_name
),
'mode'
:
string
(
mode
)
}
node
.
fluid_code
.
add_layer
(
"prelu"
,
node
.
fluid_code
.
add_layer
(
"prelu"
,
inputs
=
val_x
,
inputs
=
val_x
,
output
=
node
,
output
=
node
,
...
@@ -651,7 +696,7 @@ class ONNXOpMapper(OpMapper):
...
@@ -651,7 +696,7 @@ class ONNXOpMapper(OpMapper):
assert
2
<=
poolnd
<=
3
,
'only pool2d and pool3d is supported'
assert
2
<=
poolnd
<=
3
,
'only pool2d and pool3d is supported'
paddings
,
val_x
=
self
.
_pad_if_asymmetric
(
node
,
pads
,
val_x
)
paddings
,
val_x
=
self
.
_pad_if_asymmetric
(
node
,
pads
,
val_x
)
input_shape
=
val_x
.
out_shapes
input_shape
=
val_x
.
out_shapes
[
0
]
if
auto_pad
==
"SAME_UPPER"
or
auto_pad
==
"SAME_LOWER"
:
if
auto_pad
==
"SAME_UPPER"
or
auto_pad
==
"SAME_LOWER"
:
pad_h
=
get_same_padding
(
input_shape
[
2
],
kernel_shape
[
0
],
pad_h
=
get_same_padding
(
input_shape
[
2
],
kernel_shape
[
0
],
strides
[
0
])
strides
[
0
])
...
@@ -676,8 +721,8 @@ class ONNXOpMapper(OpMapper):
...
@@ -676,8 +721,8 @@ class ONNXOpMapper(OpMapper):
def
GlobalAveragePool
(
self
,
node
):
def
GlobalAveragePool
(
self
,
node
):
val_x
=
self
.
graph
.
get_node
(
node
.
layer
.
input
[
0
],
copy
=
True
)
val_x
=
self
.
graph
.
get_node
(
node
.
layer
.
input
[
0
],
copy
=
True
)
val_y
=
self
.
graph
.
get_node
(
node
.
layer
.
output
[
0
],
copy
=
True
)
val_y
=
self
.
graph
.
get_node
(
node
.
layer
.
output
[
0
],
copy
=
True
)
input_shape
=
val_x
.
out_shapes
input_shape
=
val_x
.
out_shapes
[
0
]
output_shape
=
val_y
.
out_shapes
output_shape
=
val_y
.
out_shapes
[
0
]
assert
input_shape
is
not
None
or
output_shape
is
not
None
,
'poolnd not inferred'
# N
assert
input_shape
is
not
None
or
output_shape
is
not
None
,
'poolnd not inferred'
# N
if
input_shape
:
if
input_shape
:
poolnd
=
len
(
input_shape
)
-
2
# NC...
poolnd
=
len
(
input_shape
)
-
2
# NC...
...
@@ -701,7 +746,7 @@ class ONNXOpMapper(OpMapper):
...
@@ -701,7 +746,7 @@ class ONNXOpMapper(OpMapper):
val_y
=
self
.
graph
.
get_node
(
node
.
layer
.
output
[
0
],
copy
=
True
)
val_y
=
self
.
graph
.
get_node
(
node
.
layer
.
output
[
0
],
copy
=
True
)
self
.
omit_nodes
.
append
(
val_w
.
layer_name
)
self
.
omit_nodes
.
append
(
val_w
.
layer_name
)
input_shape
=
val_x
.
out_shapes
input_shape
=
val_x
.
out_shapes
[
0
]
has_bias
=
len
(
node
.
layer
.
input
)
==
3
has_bias
=
len
(
node
.
layer
.
input
)
==
3
if
has_bias
:
if
has_bias
:
...
@@ -709,12 +754,12 @@ class ONNXOpMapper(OpMapper):
...
@@ -709,12 +754,12 @@ class ONNXOpMapper(OpMapper):
self
.
omit_nodes
.
append
(
val_b
.
layer_name
)
self
.
omit_nodes
.
append
(
val_b
.
layer_name
)
auto_pad
=
node
.
get_attr
(
'auto_pad'
,
'NOTSET'
)
auto_pad
=
node
.
get_attr
(
'auto_pad'
,
'NOTSET'
)
kernel_shape
=
val_w
.
out_shapes
[
2
:]
# OI...
kernel_shape
=
val_w
.
out_shapes
[
0
][
2
:]
# OI...
assert
kernel_shape
==
node
.
get_attr
(
assert
kernel_shape
==
node
.
get_attr
(
'kernel_shape'
),
'kernel_shape in attr unmatches value_info'
# HW
'kernel_shape'
),
'kernel_shape in attr unmatches value_info'
# HW
convnd
=
len
(
kernel_shape
)
convnd
=
len
(
kernel_shape
)
assert
2
<=
convnd
<=
3
,
'only conv2d and conv3d is supported'
assert
2
<=
convnd
<=
3
,
'only conv2d and conv3d is supported'
num_out_channels
=
val_w
.
out_shapes
[
0
]
# OI...
num_out_channels
=
val_w
.
out_shapes
[
0
]
[
0
]
# OI...
fluid_op
=
'conv{}d'
.
format
(
convnd
)
fluid_op
=
'conv{}d'
.
format
(
convnd
)
num_groups
=
node
.
get_attr
(
'group'
,
1
)
num_groups
=
node
.
get_attr
(
'group'
,
1
)
...
@@ -749,3 +794,56 @@ class ONNXOpMapper(OpMapper):
...
@@ -749,3 +794,56 @@ class ONNXOpMapper(OpMapper):
inputs
=
val_x
,
inputs
=
val_x
,
output
=
node
,
output
=
node
,
param_attr
=
attr
)
param_attr
=
attr
)
def
ConvTranspose
(
self
,
node
):
val_x
=
self
.
graph
.
get_node
(
node
.
layer
.
input
[
0
],
copy
=
True
)
val_w
=
self
.
graph
.
get_node
(
node
.
layer
.
input
[
1
],
copy
=
True
)
val_b
=
self
.
graph
.
get_node
(
node
.
layer
.
input
[
2
],
copy
=
True
)
self
.
omit_nodes
.
append
(
val_w
.
layer_name
)
self
.
omit_nodes
.
append
(
val_b
.
layer_name
)
val_y
=
self
.
graph
.
get_node
(
node
.
layer
.
output
[
0
],
copy
=
True
)
auto_pad
=
node
.
get_attr
(
'auto_pad'
,
'NOTSET'
)
out_padding
=
node
.
get_attr
(
'output_padding'
,
[
0
,
0
])
kernel_shape
=
node
.
get_attr
(
'kernel_shape'
,
val_w
.
out_shapes
[
0
][
2
:])
assert
kernel_shape
,
'kernel_shape not inferred'
convnd
=
len
(
kernel_shape
)
assert
2
<=
convnd
<=
3
,
'only conv2d_transpose and conv3d_transpose supported'
num_out_channels
=
val_w
.
out_shapes
[
0
][
1
]
# IO...
fluid_op
=
'conv{}d_transpose'
.
format
(
convnd
)
num_groups
=
node
.
get_attr
(
'group'
,
1
)
# optional
strides
=
node
.
get_attr
(
'strides'
,
[
1
]
*
convnd
)
# optional
dilations
=
node
.
get_attr
(
'dilations'
,
[
1
]
*
convnd
)
# optional
output_size
=
node
.
get_attr
(
'output_shape'
,
[])
# optional
pads
=
node
.
get_attr
(
'pads'
,
[
0
]
*
(
convnd
*
2
))
# optional
paddings
,
var_x
=
self
.
_pad_if_asymmetric
(
node
,
pads
,
val_x
)
output_size
=
[
0
,
0
]
print
(
val_x
.
out_shapes
[
0
])
output_size
[
0
]
=
(
val_x
.
out_shapes
[
0
][
2
]
-
1
)
*
strides
[
0
]
-
2
*
paddings
[
0
]
+
dilations
[
0
]
*
(
kernel_shape
[
0
]
-
1
)
+
1
+
out_padding
[
0
]
output_size
[
1
]
=
(
val_x
.
out_shapes
[
0
][
3
]
-
1
)
*
strides
[
1
]
-
2
*
paddings
[
1
]
+
dilations
[
1
]
*
(
kernel_shape
[
1
]
-
1
)
+
1
+
out_padding
[
1
]
print
(
output_size
)
attr
=
{
'num_filters'
:
num_out_channels
,
'output_size'
:
output_size
or
None
,
'filter_size'
:
kernel_shape
,
'padding'
:
paddings
,
'stride'
:
strides
,
'dilation'
:
dilations
,
'groups'
:
num_groups
,
'param_attr'
:
string
(
val_w
.
layer_name
),
'bias_attr'
:
string
(
val_b
.
layer_name
),
'name'
:
string
(
node
.
layer_name
),
}
node
.
fluid_code
.
add_layer
(
fluid_op
,
inputs
=
val_x
,
output
=
node
,
param_attr
=
attr
)
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录