Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
PaddlePaddle
X2Paddle
提交
5bcd803c
X
X2Paddle
项目概览
PaddlePaddle
/
X2Paddle
大约 1 年 前同步成功
通知
328
Star
698
Fork
167
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
26
列表
看板
标记
里程碑
合并请求
4
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
X
X2Paddle
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
26
Issue
26
列表
看板
标记
里程碑
合并请求
4
合并请求
4
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
提交
5bcd803c
编写于
8月 13, 2020
作者:
S
SunAhong1993
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
add models
上级
4c85cdff
变更
5
显示空白变更内容
内联
并排
Showing
5 changed file
with
543 addition
and
125 deletion
+543
-125
x2paddle/core/convert_prim.py
x2paddle/core/convert_prim.py
+28
-6
x2paddle/core/program.py
x2paddle/core/program.py
+1
-0
x2paddle/op_mapper/pytorch2paddle/aten.py
x2paddle/op_mapper/pytorch2paddle/aten.py
+397
-90
x2paddle/op_mapper/pytorch2paddle/prim.py
x2paddle/op_mapper/pytorch2paddle/prim.py
+79
-17
x2paddle/op_mapper/pytorch2paddle/pytorch_op_mapper.py
x2paddle/op_mapper/pytorch2paddle/pytorch_op_mapper.py
+38
-12
未找到文件。
x2paddle/core/convert_prim.py
浏览文件 @
5bcd803c
...
@@ -73,7 +73,7 @@ def convert_prim(layer, indent=1, init_func=[], forward_func=[]):
...
@@ -73,7 +73,7 @@ def convert_prim(layer, indent=1, init_func=[], forward_func=[]):
elif
layer
.
kernel
==
"prim.min"
:
elif
layer
.
kernel
==
"prim.min"
:
line
=
"{} = min({})"
.
format
(
layer
.
outputs
[
0
],
line
=
"{} = min({})"
.
format
(
layer
.
outputs
[
0
],
list
(
layer
.
inputs
.
values
())[
0
])
list
(
layer
.
inputs
.
values
())[
0
])
elif
layer
.
kernel
==
"prim.add"
:
elif
layer
.
kernel
==
"prim.add
_
"
:
line
=
"{} = {} + {} * {}"
.
format
(
layer
.
outputs
[
0
],
line
=
"{} = {} + {} * {}"
.
format
(
layer
.
outputs
[
0
],
list
(
layer
.
inputs
.
values
())[
0
],
list
(
layer
.
inputs
.
values
())[
0
],
layer
.
attrs
[
"alpha"
],
layer
.
attrs
[
"alpha"
],
...
@@ -124,11 +124,33 @@ def convert_prim(layer, indent=1, init_func=[], forward_func=[]):
...
@@ -124,11 +124,33 @@ def convert_prim(layer, indent=1, init_func=[], forward_func=[]):
if
list
(
layer
.
inputs
.
values
())[
1
]
is
None
:
if
list
(
layer
.
inputs
.
values
())[
1
]
is
None
:
item1
=
str
(
layer
.
attrs
[
list
(
layer
.
inputs
.
keys
())[
1
]])
item1
=
str
(
layer
.
attrs
[
list
(
layer
.
inputs
.
keys
())[
1
]])
line
=
"{} = {} < {}"
.
format
(
layer
.
outputs
[
0
],
item0
,
item1
)
line
=
"{} = {} < {}"
.
format
(
layer
.
outputs
[
0
],
item0
,
item1
)
elif
layer
.
kernel
==
"prim.ne"
:
item0
=
list
(
layer
.
inputs
.
values
())[
0
]
item1
=
list
(
layer
.
inputs
.
values
())[
1
]
line
=
"{} = {} < {}"
.
format
(
layer
.
outputs
[
0
],
item0
,
item1
)
elif
layer
.
kernel
==
"prim.slice"
:
elif
layer
.
kernel
==
"prim.slice"
:
attr
s_str
=
""
input
s_str
=
""
for
k
,
v
in
layer
.
attrs
.
items
()
:
for
v
in
list
(
layer
.
inputs
.
values
())[
1
:]
:
attr
s_str
+=
"{}:"
.
format
(
v
)
input
s_str
+=
"{}:"
.
format
(
v
)
attrs_str
=
attr
s_str
[:
-
1
]
inputs_str
=
input
s_str
[:
-
1
]
line
=
"{} = {}[{}]"
.
format
(
layer
.
outputs
[
0
],
line
=
"{} = {}[{}]"
.
format
(
layer
.
outputs
[
0
],
list
(
layer
.
inputs
.
values
())[
0
],
attrs_str
)
list
(
layer
.
inputs
.
values
())[
0
],
inputs_str
)
elif
layer
.
kernel
==
"prim.add"
:
line
=
"{} = {} + {}"
.
format
(
layer
.
outputs
[
0
],
list
(
layer
.
inputs
.
values
())[
0
],
list
(
layer
.
inputs
.
values
())[
1
])
elif
layer
.
kernel
==
"prim.sub"
:
line
=
"{} = {} - {}"
.
format
(
layer
.
outputs
[
0
],
list
(
layer
.
inputs
.
values
())[
0
],
list
(
layer
.
inputs
.
values
())[
1
])
elif
layer
.
kernel
==
"prim.mul"
:
line
=
"{} = {} * {}"
.
format
(
layer
.
outputs
[
0
],
list
(
layer
.
inputs
.
values
())[
0
],
list
(
layer
.
inputs
.
values
())[
1
])
elif
layer
.
kernel
==
"prim.neg"
:
line
=
"{} = -{}"
.
format
(
layer
.
outputs
[
0
],
list
(
layer
.
inputs
.
values
())[
0
])
else
:
print
(
layer
.
kernel
)
line
=
""
forward_func
.
extend
(
gen_codes
([
line
],
indent
=
indent
))
forward_func
.
extend
(
gen_codes
([
line
],
indent
=
indent
))
x2paddle/core/program.py
浏览文件 @
5bcd803c
...
@@ -297,6 +297,7 @@ class PaddleGraph(object):
...
@@ -297,6 +297,7 @@ class PaddleGraph(object):
for
output_name
in
layer
.
outputs
:
for
output_name
in
layer
.
outputs
:
if
not
output_name
.
startswith
(
"x"
):
if
not
output_name
.
startswith
(
"x"
):
continue
continue
print
(
layer
.
kernel
)
self
.
outputs
.
append
(
output_name
)
self
.
outputs
.
append
(
output_name
)
self
.
outputs
=
list
(
set
(
self
.
outputs
))
self
.
outputs
=
list
(
set
(
self
.
outputs
))
...
...
x2paddle/op_mapper/pytorch2paddle/aten.py
浏览文件 @
5bcd803c
...
@@ -30,16 +30,19 @@ def aten_adaptive_avg_pool2d(mapper, graph, node):
...
@@ -30,16 +30,19 @@ def aten_adaptive_avg_pool2d(mapper, graph, node):
layer_inputs
=
{}
layer_inputs
=
{}
layer_attrs
=
{}
layer_attrs
=
{}
inputs_name
,
inputs_node
=
mapper
.
_get_inputs_name
(
node
)
inputs_name
,
inputs_node
=
mapper
.
_get_inputs_name
(
node
)
# 获取当前节点输出的list
current_outputs
=
[
output_name
]
# 处理输入0,即%x.3
# 处理输入0,即%x.3
mapper
.
_check_input
(
graph
,
inputs_node
[
0
],
inputs_name
[
0
],
layer
_outputs
)
mapper
.
_check_input
(
graph
,
inputs_node
[
0
],
inputs_name
[
0
],
current
_outputs
)
layer_inputs
[
"input"
]
=
inputs_name
[
0
]
layer_inputs
[
"input"
]
=
inputs_name
[
0
]
# 获取当前节点输入
、输出
的list
# 获取当前节点输入的list
current_inputs
=
list
(
layer_inputs
.
values
())
current_inputs
=
list
(
layer_inputs
.
values
())
current_outputs
=
layer_outputs
# 处理输入1,即%_output_size.1
# 处理输入1,即%_output_size.1
if
inputs_name
[
1
]
in
mapper
.
attrs
:
if
inputs_name
[
1
]
in
mapper
.
attrs
:
layer_attrs
[
"pool_size"
]
=
mapper
.
attrs
[
inputs_name
[
1
]]
layer_attrs
[
"pool_size"
]
=
mapper
.
attrs
[
inputs_name
[
1
]]
else
:
else
:
mapper
.
_check_input
(
graph
,
inputs_node
[
1
],
inputs_name
[
1
],
current_outputs
)
layer_attrs
[
"pool_size"
]
=
inputs_name
[
1
]
layer_attrs
[
"pool_size"
]
=
inputs_name
[
1
]
current_inputs
.
append
(
inputs_name
[
1
])
current_inputs
.
append
(
inputs_name
[
1
])
layer_attrs
[
"pool_type"
]
=
string
(
"avg"
)
layer_attrs
[
"pool_type"
]
=
string
(
"avg"
)
...
@@ -70,29 +73,34 @@ def aten_addmm(mapper, graph, node):
...
@@ -70,29 +73,34 @@ def aten_addmm(mapper, graph, node):
layer_inputs
=
{}
layer_inputs
=
{}
layer_attrs
=
{}
layer_attrs
=
{}
inputs_name
,
inputs_node
=
mapper
.
_get_inputs_name
(
node
)
inputs_name
,
inputs_node
=
mapper
.
_get_inputs_name
(
node
)
# 获取当前节点输出的list
current_outputs
=
[
output_name
]
# 处理输入0,即%150
# 处理输入0,即%150
mapper
.
_check_input
(
mapper
.
_check_input
(
graph
,
inputs_node
[
0
],
inputs_name
[
0
],
layer
_outputs
,
add_dim
=
True
)
graph
,
inputs_node
[
0
],
inputs_name
[
0
],
current
_outputs
,
add_dim
=
True
)
layer_inputs
[
"input"
]
=
inputs_name
[
0
]
layer_inputs
[
"input"
]
=
inputs_name
[
0
]
# 处理输入1,即%input.3
# 处理输入1,即%input.3
mapper
.
_check_input
(
graph
,
inputs_node
[
1
],
inputs_name
[
1
],
layer
_outputs
)
mapper
.
_check_input
(
graph
,
inputs_node
[
1
],
inputs_name
[
1
],
current
_outputs
)
layer_inputs
[
"x"
]
=
inputs_name
[
1
]
layer_inputs
[
"x"
]
=
inputs_name
[
1
]
# 处理输入2,即%156
# 处理输入2,即%156
mapper
.
_check_input
(
graph
,
inputs_node
[
2
],
inputs_name
[
2
],
layer
_outputs
)
mapper
.
_check_input
(
graph
,
inputs_node
[
2
],
inputs_name
[
2
],
current
_outputs
)
layer_inputs
[
"y"
]
=
inputs_name
[
2
]
layer_inputs
[
"y"
]
=
inputs_name
[
2
]
# 获取当前节点输入
、输出
的list
# 获取当前节点输入的list
current_inputs
=
list
(
layer_inputs
.
values
())
current_inputs
=
list
(
layer_inputs
.
values
())
current_outputs
=
layer_outputs
# 处理输入3,即%152
# 处理输入3,即%152
if
inputs_name
[
3
]
in
mapper
.
attrs
:
if
inputs_name
[
3
]
in
mapper
.
attrs
:
layer_attrs
[
"beta"
]
=
mapper
.
attrs
[
inputs_name
[
3
]]
layer_attrs
[
"beta"
]
=
mapper
.
attrs
[
inputs_name
[
3
]]
else
:
else
:
mapper
.
_check_input
(
graph
,
inputs_node
[
3
],
inputs_name
[
3
],
current_outputs
)
layer_attrs
[
"beta"
]
=
inputs_name
[
3
]
layer_attrs
[
"beta"
]
=
inputs_name
[
3
]
current_inputs
.
append
(
inputs_name
[
3
])
current_inputs
.
append
(
inputs_name
[
3
])
# 处理输入4,即%151
# 处理输入4,即%151
if
inputs_name
[
4
]
in
mapper
.
attrs
:
if
inputs_name
[
4
]
in
mapper
.
attrs
:
layer_attrs
[
"alpha"
]
=
mapper
.
attrs
[
inputs_name
[
4
]]
layer_attrs
[
"alpha"
]
=
mapper
.
attrs
[
inputs_name
[
4
]]
else
:
else
:
mapper
.
_check_input
(
graph
,
inputs_node
[
4
],
inputs_name
[
4
],
current_outputs
)
layer_attrs
[
"alpha"
]
=
inputs_name
[
4
]
layer_attrs
[
"alpha"
]
=
inputs_name
[
4
]
current_inputs
.
append
(
inputs_name
[
4
])
current_inputs
.
append
(
inputs_name
[
4
])
...
@@ -104,11 +112,41 @@ def aten_addmm(mapper, graph, node):
...
@@ -104,11 +112,41 @@ def aten_addmm(mapper, graph, node):
return
current_inputs
,
current_outputs
return
current_inputs
,
current_outputs
def
aten_add
(
mapper
,
graph
,
node
):
""" 构造数值相加的PaddleLayer,该节点实现out = x + y。
TorchScript示例:
%296 : int = aten::add(%i.12, %288)
参数含义:
%296 (-): 相加结果。
%i.12 (-): 输入数值 x。
%288 (-): 输入数值 y。
"""
output_name
=
mapper
.
_get_outputs_name
(
node
)[
0
]
layer_outputs
=
[
output_name
]
layer_inputs
=
{}
inputs_name
,
inputs_node
=
mapper
.
_get_inputs_name
(
node
)
# 获取当前节点输出的list
current_outputs
=
[
output_name
]
# 处理输入0,即%i.12
mapper
.
_check_input
(
graph
,
inputs_node
[
0
],
inputs_name
[
0
],
current_outputs
)
layer_inputs
[
"x"
]
=
inputs_name
[
0
]
# 处理输入1,即%288
mapper
.
_check_input
(
graph
,
inputs_node
[
1
],
inputs_name
[
1
],
current_outputs
,
add_dim
=
True
)
layer_inputs
[
"y"
]
=
inputs_name
[
1
]
# 获取当前节点输入的list
current_inputs
=
list
(
layer_inputs
.
values
())
graph
.
add_layer
(
"prim.add"
,
inputs
=
layer_inputs
,
outputs
=
layer_outputs
)
return
current_inputs
,
current_outputs
def
aten_add_
(
mapper
,
graph
,
node
):
def
aten_add_
(
mapper
,
graph
,
node
):
""" 构造
add
的PaddleLayer,该节点实现out = x + alpha * y。
""" 构造
数值相加
的PaddleLayer,该节点实现out = x + alpha * y。
TorchScript示例:
TorchScript示例:
%
output.5 : Tensor = aten::add_(%output.2, %150, %151
)
%
137 : Tensor = aten::add(%136, %130, %130
)
参数含义:
参数含义:
%output.5 (Tensor): add结果Tensor。
%output.5 (Tensor): add结果Tensor。
%output.2 (Tensor): 输入Tensor x。
%output.2 (Tensor): 输入Tensor x。
...
@@ -120,25 +158,28 @@ def aten_add_(mapper, graph, node):
...
@@ -120,25 +158,28 @@ def aten_add_(mapper, graph, node):
layer_inputs
=
{}
layer_inputs
=
{}
layer_attrs
=
{}
layer_attrs
=
{}
inputs_name
,
inputs_node
=
mapper
.
_get_inputs_name
(
node
)
inputs_name
,
inputs_node
=
mapper
.
_get_inputs_name
(
node
)
# 获取当前节点输出的list
current_outputs
=
[
output_name
]
# 处理输入0,即%output.2
# 处理输入0,即%output.2
mapper
.
_check_input
(
graph
,
inputs_node
[
0
],
inputs_name
[
0
],
layer
_outputs
)
mapper
.
_check_input
(
graph
,
inputs_node
[
0
],
inputs_name
[
0
],
current
_outputs
)
layer_inputs
[
"x"
]
=
inputs_name
[
0
]
layer_inputs
[
"x"
]
=
inputs_name
[
0
]
# 处理输入1,即%150
# 处理输入1,即%150
mapper
.
_check_input
(
mapper
.
_check_input
(
graph
,
inputs_node
[
1
],
inputs_name
[
1
],
layer
_outputs
,
add_dim
=
True
)
graph
,
inputs_node
[
1
],
inputs_name
[
1
],
current
_outputs
,
add_dim
=
True
)
layer_inputs
[
"y"
]
=
inputs_name
[
1
]
layer_inputs
[
"y"
]
=
inputs_name
[
1
]
# 获取当前节点输入
、输出
的list
# 获取当前节点输入的list
current_inputs
=
list
(
layer_inputs
.
values
())
current_inputs
=
list
(
layer_inputs
.
values
())
current_outputs
=
layer_outputs
# 处理输入2,即%151
# 处理输入2,即%151
if
inputs_name
[
2
]
in
mapper
.
attrs
:
if
inputs_name
[
2
]
in
mapper
.
attrs
:
layer_attrs
[
"alpha"
]
=
mapper
.
attrs
[
inputs_name
[
2
]]
layer_attrs
[
"alpha"
]
=
mapper
.
attrs
[
inputs_name
[
2
]]
else
:
else
:
mapper
.
_check_input
(
graph
,
inputs_node
[
2
],
inputs_name
[
2
],
current_outputs
)
layer_attrs
[
"alpha"
]
=
inputs_name
[
2
]
layer_attrs
[
"alpha"
]
=
inputs_name
[
2
]
current_inputs
.
append
(
inputs_name
[
2
])
current_inputs
.
append
(
inputs_name
[
2
])
graph
.
add_layer
(
graph
.
add_layer
(
"prim.add"
,
inputs
=
layer_inputs
,
outputs
=
layer_outputs
,
**
layer_attrs
)
"prim.add
_
"
,
inputs
=
layer_inputs
,
outputs
=
layer_outputs
,
**
layer_attrs
)
return
current_inputs
,
current_outputs
return
current_inputs
,
current_outputs
...
@@ -156,20 +197,87 @@ def aten_append(mapper, graph, node):
...
@@ -156,20 +197,87 @@ def aten_append(mapper, graph, node):
layer_outputs
=
[
output_name
]
layer_outputs
=
[
output_name
]
layer_inputs
=
{}
layer_inputs
=
{}
inputs_name
,
inputs_node
=
mapper
.
_get_inputs_name
(
node
)
inputs_name
,
inputs_node
=
mapper
.
_get_inputs_name
(
node
)
# 获取当前节点输出的list
current_outputs
=
[
output_name
]
# 处理输入0,即_output_size.1
# 处理输入0,即_output_size.1
mapper
.
_check_input
(
graph
,
inputs_node
[
0
],
inputs_name
[
0
],
layer
_outputs
)
mapper
.
_check_input
(
graph
,
inputs_node
[
0
],
inputs_name
[
0
],
current
_outputs
)
layer_inputs
[
"list"
]
=
inputs_name
[
0
]
layer_inputs
[
"list"
]
=
inputs_name
[
0
]
# 处理输入1,即v.1
# 处理输入1,即v.1
mapper
.
_check_input
(
graph
,
inputs_node
[
1
],
inputs_name
[
1
],
layer
_outputs
)
mapper
.
_check_input
(
graph
,
inputs_node
[
1
],
inputs_name
[
1
],
current
_outputs
)
layer_inputs
[
"element"
]
=
inputs_name
[
1
]
layer_inputs
[
"element"
]
=
inputs_name
[
1
]
# 获取当前节点输入
、输出
的list
# 获取当前节点输入的list
current_inputs
=
list
(
layer_inputs
.
values
())
current_inputs
=
list
(
layer_inputs
.
values
())
current_outputs
=
layer_outputs
graph
.
add_layer
(
"prim.append"
,
inputs
=
layer_inputs
,
outputs
=
layer_outputs
)
graph
.
add_layer
(
"prim.append"
,
inputs
=
layer_inputs
,
outputs
=
layer_outputs
)
return
current_inputs
,
current_outputs
return
current_inputs
,
current_outputs
def
aten_batch_norm
(
mapper
,
graph
,
node
):
""" 构造BatchNorm的PaddleLayer。
TorchScript示例:
%input.81 : Tensor = aten::batch_norm(%input.80, %778, %779, %776, %777, %780,
%exponential_average_factor.23, %766, %781)
参数含义:
%input.81 (Tensor): 输出,批处理后的结果。
%input.80 (Tensor): 需要进行批处理的特征层。
%778 (Tensor): weights。
%779 (Tensor): bias。
%776 (Tensor): 全局均值。
%777 (Tensor): 全局方差。
%780 (bool): 是否训练。
%exponential_average_factor.23 (float): 用于计算均值和方差的比例。
%766 (float): 为了数值稳定加在分母上的值。
%781 (bool): 是否启用cudnn。
"""
if
"batchnorm"
in
mapper
.
dygraph_name_id
:
mapper
.
dygraph_name_id
[
"batchnorm"
]
+=
1
else
:
mapper
.
dygraph_name_id
[
"batchnorm"
]
=
0
batchnorm_name
=
"batchnorm"
+
str
(
mapper
.
dygraph_name_id
[
"batchnorm"
])
output_name
=
mapper
.
_get_outputs_name
(
node
)[
0
]
layer_outputs
=
[
batchnorm_name
,
output_name
]
layer_inputs
=
{}
layer_attrs
=
{}
layer_attrs
[
"is_test"
]
=
True
inputs_name
,
inputs_node
=
mapper
.
_get_inputs_name
(
node
)
# 获取当前节点输出的list
current_outputs
=
[
output_name
]
# 处理输入0,即%input.80
mapper
.
_check_input
(
graph
,
inputs_node
[
0
],
inputs_name
[
0
],
current_outputs
)
layer_inputs
[
"input"
]
=
inputs_name
[
0
]
# 获取当前节点输入、输出的list
current_inputs
=
list
(
layer_inputs
.
values
())
# 处理输入1,即%778
weights
=
mapper
.
pytorch_params
[
inputs_name
[
1
]]
mapper
.
paddle_params
[
batchnorm_name
+
".weight"
]
=
weights
layer_attrs
[
'num_channels'
]
=
weights
.
shape
[
0
]
# 处理输入2,即%779
if
inputs_name
[
2
]
in
mapper
.
pytorch_params
:
bias
=
mapper
.
pytorch_params
[
inputs_name
[
2
]]
if
bias
is
not
None
:
mapper
.
paddle_params
[
batchnorm_name
+
".bias"
]
=
bias
else
:
mapper
.
paddle_params
[
batchnorm_name
+
".bias"
]
=
False
# 处理输入3,即%776
mean
=
mapper
.
pytorch_params
[
inputs_name
[
3
]]
mapper
.
paddle_params
[
batchnorm_name
+
"._mean"
]
=
mean
# 处理输入4,即%777
var
=
mapper
.
pytorch_params
[
inputs_name
[
4
]]
mapper
.
paddle_params
[
batchnorm_name
+
"._variance"
]
=
var
# 处理输入6,即%exponential_average_factor.23
layer_attrs
[
"momentum"
]
=
mapper
.
attrs
[
inputs_name
[
6
]]
# 处理输入7,即%766
layer_attrs
[
"epsilon"
]
=
mapper
.
attrs
[
inputs_name
[
7
]]
graph
.
add_layer
(
"fluid.dygraph.BatchNorm"
,
inputs
=
layer_inputs
,
outputs
=
layer_outputs
,
**
layer_attrs
)
return
current_inputs
,
current_outputs
def
aten_conv2d
(
mapper
,
graph
,
node
):
def
aten_conv2d
(
mapper
,
graph
,
node
):
""" 构造conv2d的PaddleLayer。
""" 构造conv2d的PaddleLayer。
...
@@ -195,12 +303,13 @@ def aten_conv2d(mapper, graph, node):
...
@@ -195,12 +303,13 @@ def aten_conv2d(mapper, graph, node):
layer_inputs
=
{}
layer_inputs
=
{}
layer_attrs
=
{}
layer_attrs
=
{}
inputs_name
,
inputs_node
=
mapper
.
_get_inputs_name
(
node
)
inputs_name
,
inputs_node
=
mapper
.
_get_inputs_name
(
node
)
# 获取当前节点输出的list
current_outputs
=
[
output_name
]
# 处理输入0,即%input.8
# 处理输入0,即%input.8
mapper
.
_check_input
(
graph
,
inputs_node
[
0
],
inputs_name
[
0
],
layer
_outputs
)
mapper
.
_check_input
(
graph
,
inputs_node
[
0
],
inputs_name
[
0
],
current
_outputs
)
layer_inputs
[
"input"
]
=
inputs_name
[
0
]
layer_inputs
[
"input"
]
=
inputs_name
[
0
]
# 获取当前节点输入
、输出
的list
# 获取当前节点输入的list
current_inputs
=
list
(
layer_inputs
.
values
())
current_inputs
=
list
(
layer_inputs
.
values
())
current_outputs
=
layer_outputs
[
1
:]
# 处理输入1,即%25
# 处理输入1,即%25
weights
=
mapper
.
pytorch_params
[
inputs_name
[
1
]]
weights
=
mapper
.
pytorch_params
[
inputs_name
[
1
]]
mapper
.
paddle_params
[
conv2d_name
+
".weight"
]
=
weights
mapper
.
paddle_params
[
conv2d_name
+
".weight"
]
=
weights
...
@@ -209,9 +318,12 @@ def aten_conv2d(mapper, graph, node):
...
@@ -209,9 +318,12 @@ def aten_conv2d(mapper, graph, node):
# 处理输入2,即%27
# 处理输入2,即%27
if
inputs_name
[
2
]
in
mapper
.
pytorch_params
:
if
inputs_name
[
2
]
in
mapper
.
pytorch_params
:
bias
=
mapper
.
pytorch_params
[
inputs_name
[
2
]]
bias
=
mapper
.
pytorch_params
[
inputs_name
[
2
]]
if
bias
is
not
None
:
mapper
.
paddle_params
[
conv2d_name
+
".bias"
]
=
bias
mapper
.
paddle_params
[
conv2d_name
+
".bias"
]
=
bias
else
:
else
:
mapper
.
paddle_params
[
conv2d_name
+
".bias"
]
=
False
layer_attrs
[
"bias_attr"
]
=
False
else
:
layer_attrs
[
"bias_attr"
]
=
False
# 处理输入3,即%28
# 处理输入3,即%28
layer_attrs
[
"stride"
]
=
mapper
.
attrs
[
inputs_name
[
3
]]
layer_attrs
[
"stride"
]
=
mapper
.
attrs
[
inputs_name
[
3
]]
# 处理输入4,即%29
# 处理输入4,即%29
...
@@ -244,12 +356,13 @@ def aten_dim(mapper, graph, node):
...
@@ -244,12 +356,13 @@ def aten_dim(mapper, graph, node):
layer_outputs
=
[
output_name
]
layer_outputs
=
[
output_name
]
layer_inputs
=
{}
layer_inputs
=
{}
inputs_name
,
inputs_node
=
mapper
.
_get_inputs_name
(
node
)
inputs_name
,
inputs_node
=
mapper
.
_get_inputs_name
(
node
)
# 获取当前节点输出的list
current_outputs
=
[
output_name
]
# 处理输入0,即%input.8
# 处理输入0,即%input.8
mapper
.
_check_input
(
graph
,
inputs_node
[
0
],
inputs_name
[
0
],
layer
_outputs
)
mapper
.
_check_input
(
graph
,
inputs_node
[
0
],
inputs_name
[
0
],
current
_outputs
)
layer_inputs
[
"input"
]
=
inputs_name
[
0
]
layer_inputs
[
"input"
]
=
inputs_name
[
0
]
# 获取当前节点输入
、输出
的list
# 获取当前节点输入的list
current_inputs
=
list
(
layer_inputs
.
values
())
current_inputs
=
list
(
layer_inputs
.
values
())
current_outputs
=
layer_outputs
graph
.
add_layer
(
"prim.shape"
,
inputs
=
layer_inputs
,
outputs
=
layer_outputs
)
graph
.
add_layer
(
"prim.shape"
,
inputs
=
layer_inputs
,
outputs
=
layer_outputs
)
graph
.
add_layer
(
graph
.
add_layer
(
...
@@ -276,12 +389,13 @@ def aten_dropout(mapper, graph, node):
...
@@ -276,12 +389,13 @@ def aten_dropout(mapper, graph, node):
layer_outputs
=
[
dropout_name
,
output_name
]
layer_outputs
=
[
dropout_name
,
output_name
]
layer_inputs
=
{}
layer_inputs
=
{}
inputs_name
,
inputs_node
=
mapper
.
_get_inputs_name
(
node
)
inputs_name
,
inputs_node
=
mapper
.
_get_inputs_name
(
node
)
# 获取当前节点输出的list
current_outputs
=
[
output_name
]
# 处理输入0,即%119
# 处理输入0,即%119
mapper
.
_check_input
(
graph
,
inputs_node
[
0
],
inputs_name
[
0
],
layer
_outputs
)
mapper
.
_check_input
(
graph
,
inputs_node
[
0
],
inputs_name
[
0
],
current
_outputs
)
layer_inputs
[
"input"
]
=
inputs_name
[
0
]
layer_inputs
[
"input"
]
=
inputs_name
[
0
]
# 获取当前节点输入、输出的list
# 获取当前节点输入、输出的list
current_inputs
=
list
(
layer_inputs
.
values
())
current_inputs
=
list
(
layer_inputs
.
values
())
current_outputs
=
layer_outputs
[
1
:]
graph
.
add_layer
(
graph
.
add_layer
(
"fluid.dygraph.Dropout"
,
"fluid.dygraph.Dropout"
,
...
@@ -305,15 +419,16 @@ def aten_eq(mapper, graph, node):
...
@@ -305,15 +419,16 @@ def aten_eq(mapper, graph, node):
layer_outputs
=
[
output_name
]
layer_outputs
=
[
output_name
]
layer_inputs
=
{}
layer_inputs
=
{}
inputs_name
,
inputs_node
=
mapper
.
_get_inputs_name
(
node
)
inputs_name
,
inputs_node
=
mapper
.
_get_inputs_name
(
node
)
# 获取当前节点输出的list
current_outputs
=
[
output_name
]
# 处理输入0,即%124
# 处理输入0,即%124
mapper
.
_check_input
(
graph
,
inputs_node
[
0
],
inputs_name
[
0
],
layer
_outputs
)
mapper
.
_check_input
(
graph
,
inputs_node
[
0
],
inputs_name
[
0
],
current
_outputs
)
layer_inputs
[
"
eq0
"
]
=
inputs_name
[
0
]
layer_inputs
[
"
x
"
]
=
inputs_name
[
0
]
# 处理输入1,即%123
# 处理输入1,即%123
mapper
.
_check_input
(
graph
,
inputs_node
[
1
],
inputs_name
[
1
],
layer
_outputs
)
mapper
.
_check_input
(
graph
,
inputs_node
[
1
],
inputs_name
[
1
],
current
_outputs
)
layer_inputs
[
"
eq1
"
]
=
inputs_name
[
1
]
layer_inputs
[
"
y
"
]
=
inputs_name
[
1
]
# 获取当前节点输入
、输出
的list
# 获取当前节点输入的list
current_inputs
=
list
(
layer_inputs
.
values
())
current_inputs
=
list
(
layer_inputs
.
values
())
current_outputs
=
layer_outputs
graph
.
add_layer
(
"prim.eq"
,
inputs
=
layer_inputs
,
outputs
=
layer_outputs
)
graph
.
add_layer
(
"prim.eq"
,
inputs
=
layer_inputs
,
outputs
=
layer_outputs
)
return
current_inputs
,
current_outputs
return
current_inputs
,
current_outputs
...
@@ -336,6 +451,8 @@ def aten_flatten(mapper, graph, node):
...
@@ -336,6 +451,8 @@ def aten_flatten(mapper, graph, node):
layer_outputs
=
[
output_name
]
layer_outputs
=
[
output_name
]
layer_inputs
=
{}
layer_inputs
=
{}
inputs_name
,
inputs_node
=
mapper
.
_get_inputs_name
(
node
)
inputs_name
,
inputs_node
=
mapper
.
_get_inputs_name
(
node
)
# 获取当前节点输出的list
current_outputs
=
[
output_name
]
# 处理输入1,即%4
# 处理输入1,即%4
graph
.
add_layer
(
graph
.
add_layer
(
"prim.assert"
,
"prim.assert"
,
...
@@ -353,11 +470,10 @@ def aten_flatten(mapper, graph, node):
...
@@ -353,11 +470,10 @@ def aten_flatten(mapper, graph, node):
key
=
mapper
.
attrs
[
inputs_name
[
2
]],
key
=
mapper
.
attrs
[
inputs_name
[
2
]],
value
=-
1
)
value
=-
1
)
# 处理输入0,即%x
# 处理输入0,即%x
mapper
.
_check_input
(
graph
,
inputs_node
[
0
],
inputs_name
[
0
],
layer
_outputs
)
mapper
.
_check_input
(
graph
,
inputs_node
[
0
],
inputs_name
[
0
],
current
_outputs
)
layer_inputs
[
"x"
]
=
inputs_name
[
0
]
layer_inputs
[
"x"
]
=
inputs_name
[
0
]
# 获取当前节点输入
、输出
的list
# 获取当前节点输入的list
current_inputs
=
list
(
layer_inputs
.
values
())
current_inputs
=
list
(
layer_inputs
.
values
())
current_outputs
=
layer_outputs
graph
.
add_layer
(
graph
.
add_layer
(
"fluid.layers.flatten"
,
"fluid.layers.flatten"
,
...
@@ -381,20 +497,68 @@ def aten___getitem__(mapper, graph, node):
...
@@ -381,20 +497,68 @@ def aten___getitem__(mapper, graph, node):
layer_outputs
=
[
output_name
]
layer_outputs
=
[
output_name
]
layer_inputs
=
{}
layer_inputs
=
{}
inputs_name
,
inputs_node
=
mapper
.
_get_inputs_name
(
node
)
inputs_name
,
inputs_node
=
mapper
.
_get_inputs_name
(
node
)
# 获取当前节点输出的list
current_outputs
=
[
output_name
]
# 处理输入0,即%72
# 处理输入0,即%72
mapper
.
_check_input
(
graph
,
inputs_node
[
0
],
inputs_name
[
0
],
layer
_outputs
)
mapper
.
_check_input
(
graph
,
inputs_node
[
0
],
inputs_name
[
0
],
current
_outputs
)
layer_inputs
[
"list"
]
=
inputs_name
[
0
]
layer_inputs
[
"list"
]
=
inputs_name
[
0
]
# 处理输入1,即%88
# 处理输入1,即%88
mapper
.
_check_input
(
graph
,
inputs_node
[
1
],
inputs_name
[
1
],
layer
_outputs
)
mapper
.
_check_input
(
graph
,
inputs_node
[
1
],
inputs_name
[
1
],
current
_outputs
)
layer_inputs
[
"index"
]
=
inputs_name
[
1
]
layer_inputs
[
"index"
]
=
inputs_name
[
1
]
# 获取当前节点输入
、输出
的list
# 获取当前节点输入的list
current_inputs
=
list
(
layer_inputs
.
values
())
current_inputs
=
list
(
layer_inputs
.
values
())
current_outputs
=
layer_outputs
graph
.
add_layer
(
"prim.getitem"
,
inputs
=
layer_inputs
,
outputs
=
layer_outputs
)
graph
.
add_layer
(
"prim.getitem"
,
inputs
=
layer_inputs
,
outputs
=
layer_outputs
)
return
current_inputs
,
current_outputs
return
current_inputs
,
current_outputs
def
aten_hardtanh_
(
mapper
,
graph
,
node
):
""" 构造hardtanh激活的PaddleLayer。
TorchScript示例:
%result.9 : Tensor = aten::hardtanh_(%input.20, %67, %66)
参数含义:
%result.9 (Tensor): 输出,hardtanh激活后的Tensor。
%input.20 (Tensor): 需要hardtanh激活的Tensor。
%67 (float): hardtanh激活的最小阈值。
%66 (float): hardtanh激活的最大阈值。
"""
output_name
=
mapper
.
_get_outputs_name
(
node
)[
0
]
layer_outputs
=
[
output_name
]
layer_inputs
=
{}
inputs_name
,
inputs_node
=
mapper
.
_get_inputs_name
(
node
)
# 获取当前节点输出的list
current_outputs
=
[
output_name
]
# 处理输入1,即%67
graph
.
add_layer
(
"prim.assert"
,
inputs
=
{},
outputs
=
[
inputs_name
[
1
]],
type
=
'eq'
,
key
=
mapper
.
attrs
[
inputs_name
[
1
]],
value
=
0.0
)
# 处理输入2,即%66
graph
.
add_layer
(
"prim.assert"
,
inputs
=
{},
outputs
=
[
inputs_name
[
2
]],
type
=
'eq'
,
key
=
mapper
.
attrs
[
inputs_name
[
2
]],
value
=
6.0
)
# 处理输入0,即%input.20
mapper
.
_check_input
(
graph
,
inputs_node
[
0
],
inputs_name
[
0
],
current_outputs
)
layer_inputs
[
"x"
]
=
inputs_name
[
0
]
# 获取当前节点输入的list
current_inputs
=
list
(
layer_inputs
.
values
())
graph
.
add_layer
(
'fluid.layers.relu6'
,
inputs
=
layer_inputs
,
outputs
=
layer_outputs
,
threshold
=
6.0
)
return
current_inputs
,
current_outputs
def
aten_le
(
mapper
,
graph
,
node
):
def
aten_le
(
mapper
,
graph
,
node
):
""" 构造对比大小的PaddleLayer。
""" 构造对比大小的PaddleLayer。
...
@@ -409,15 +573,16 @@ def aten_le(mapper, graph, node):
...
@@ -409,15 +573,16 @@ def aten_le(mapper, graph, node):
layer_outputs
=
[
output_name
]
layer_outputs
=
[
output_name
]
layer_inputs
=
{}
layer_inputs
=
{}
inputs_name
,
inputs_node
=
mapper
.
_get_inputs_name
(
node
)
inputs_name
,
inputs_node
=
mapper
.
_get_inputs_name
(
node
)
# 获取当前节点输出的list
current_outputs
=
[
output_name
]
# 处理输入0,即%78
# 处理输入0,即%78
mapper
.
_check_input
(
graph
,
inputs_node
[
0
],
inputs_name
[
0
],
layer
_outputs
)
mapper
.
_check_input
(
graph
,
inputs_node
[
0
],
inputs_name
[
0
],
current
_outputs
)
layer_inputs
[
"input0"
]
=
inputs_name
[
0
]
layer_inputs
[
"input0"
]
=
inputs_name
[
0
]
# 处理输入1,即%79
# 处理输入1,即%79
mapper
.
_check_input
(
graph
,
inputs_node
[
1
],
inputs_name
[
1
],
layer
_outputs
)
mapper
.
_check_input
(
graph
,
inputs_node
[
1
],
inputs_name
[
1
],
current
_outputs
)
layer_inputs
[
"input1"
]
=
inputs_name
[
1
]
layer_inputs
[
"input1"
]
=
inputs_name
[
1
]
# 获取当前节点输入
、输出
的list
# 获取当前节点输入的list
current_inputs
=
list
(
layer_inputs
.
values
())
current_inputs
=
list
(
layer_inputs
.
values
())
current_outputs
=
layer_outputs
graph
.
add_layer
(
"prim.le"
,
inputs
=
layer_inputs
,
outputs
=
layer_outputs
)
graph
.
add_layer
(
"prim.le"
,
inputs
=
layer_inputs
,
outputs
=
layer_outputs
)
return
current_inputs
,
current_outputs
return
current_inputs
,
current_outputs
...
@@ -436,12 +601,13 @@ def aten_len(mapper, graph, node):
...
@@ -436,12 +601,13 @@ def aten_len(mapper, graph, node):
layer_outputs
=
[
output_name
]
layer_outputs
=
[
output_name
]
layer_inputs
=
{}
layer_inputs
=
{}
inputs_name
,
inputs_node
=
mapper
.
_get_inputs_name
(
node
)
inputs_name
,
inputs_node
=
mapper
.
_get_inputs_name
(
node
)
# 获取当前节点输出的list
current_outputs
=
[
output_name
]
# 处理输入0,即%72
# 处理输入0,即%72
mapper
.
_check_input
(
graph
,
inputs_node
[
0
],
inputs_name
[
0
],
layer
_outputs
)
mapper
.
_check_input
(
graph
,
inputs_node
[
0
],
inputs_name
[
0
],
current
_outputs
)
layer_inputs
[
"input"
]
=
inputs_name
[
0
]
layer_inputs
[
"input"
]
=
inputs_name
[
0
]
# 获取当前节点输入
、输出
的list
# 获取当前节点输入的list
current_inputs
=
list
(
layer_inputs
.
values
())
current_inputs
=
list
(
layer_inputs
.
values
())
current_outputs
=
layer_outputs
graph
.
add_layer
(
"prim.len"
,
inputs
=
layer_inputs
,
outputs
=
layer_outputs
)
graph
.
add_layer
(
"prim.len"
,
inputs
=
layer_inputs
,
outputs
=
layer_outputs
)
return
current_inputs
,
current_outputs
return
current_inputs
,
current_outputs
...
@@ -471,12 +637,13 @@ def aten_max_pool2d(mapper, graph, node):
...
@@ -471,12 +637,13 @@ def aten_max_pool2d(mapper, graph, node):
layer_inputs
=
{}
layer_inputs
=
{}
layer_attrs
=
{}
layer_attrs
=
{}
inputs_name
,
inputs_node
=
mapper
.
_get_inputs_name
(
node
)
inputs_name
,
inputs_node
=
mapper
.
_get_inputs_name
(
node
)
# 获取当前节点输出的list
current_outputs
=
[
output_name
]
# 处理输入0,即%result.11
# 处理输入0,即%result.11
mapper
.
_check_input
(
graph
,
inputs_node
[
0
],
inputs_name
[
0
],
layer
_outputs
)
mapper
.
_check_input
(
graph
,
inputs_node
[
0
],
inputs_name
[
0
],
current
_outputs
)
layer_inputs
[
"input"
]
=
inputs_name
[
0
]
layer_inputs
[
"input"
]
=
inputs_name
[
0
]
# 获取当前节点输入
、输出
的list
# 获取当前节点输入的list
current_inputs
=
list
(
layer_inputs
.
values
())
current_inputs
=
list
(
layer_inputs
.
values
())
current_outputs
=
layer_outputs
[
1
:]
# 处理输入1,即%20
# 处理输入1,即%20
layer_attrs
[
"pool_size"
]
=
mapper
.
attrs
[
inputs_name
[
1
]]
layer_attrs
[
"pool_size"
]
=
mapper
.
attrs
[
inputs_name
[
1
]]
# 处理输入2,即%23
# 处理输入2,即%23
...
@@ -517,21 +684,106 @@ def aten_matmul(mapper, graph, node):
...
@@ -517,21 +684,106 @@ def aten_matmul(mapper, graph, node):
layer_outputs
=
[
output_name
]
layer_outputs
=
[
output_name
]
layer_inputs
=
{}
layer_inputs
=
{}
inputs_name
,
inputs_node
=
mapper
.
_get_inputs_name
(
node
)
inputs_name
,
inputs_node
=
mapper
.
_get_inputs_name
(
node
)
# 获取当前节点输出的list
current_outputs
=
[
output_name
]
# 处理输入0,即%101
# 处理输入0,即%101
mapper
.
_check_input
(
graph
,
inputs_node
[
0
],
inputs_name
[
0
],
layer
_outputs
)
mapper
.
_check_input
(
graph
,
inputs_node
[
0
],
inputs_name
[
0
],
current
_outputs
)
layer_inputs
[
"x"
]
=
inputs_name
[
0
]
layer_inputs
[
"x"
]
=
inputs_name
[
0
]
# 处理输入1,即%102
# 处理输入1,即%102
mapper
.
_check_input
(
graph
,
inputs_node
[
1
],
inputs_name
[
1
],
layer
_outputs
)
mapper
.
_check_input
(
graph
,
inputs_node
[
1
],
inputs_name
[
1
],
current
_outputs
)
layer_inputs
[
"y"
]
=
inputs_name
[
1
]
layer_inputs
[
"y"
]
=
inputs_name
[
1
]
# 获取当前节点输入
、输出
的list
# 获取当前节点输入的list
current_inputs
=
list
(
layer_inputs
.
values
())
current_inputs
=
list
(
layer_inputs
.
values
())
current_outputs
=
layer_outputs
graph
.
add_layer
(
graph
.
add_layer
(
"fluid.layers.matmul"
,
inputs
=
layer_inputs
,
outputs
=
layer_outputs
)
"fluid.layers.matmul"
,
inputs
=
layer_inputs
,
outputs
=
layer_outputs
)
return
current_inputs
,
current_outputs
return
current_inputs
,
current_outputs
def
aten_mul
(
mapper
,
graph
,
node
):
""" 构造数值相乘的PaddleLayer。
TorchScript示例:
%size_prods.39 : int = aten::mul(%size_prods.38, %114)
参数含义:
%size_prods.39 (Tensor): 输出,相乘后的结果。
%size_prods.38 (-): 数值1。
%114 (-): 数值2。
"""
output_name
=
mapper
.
_get_outputs_name
(
node
)[
0
]
layer_outputs
=
[
output_name
]
layer_inputs
=
{}
inputs_name
,
inputs_node
=
mapper
.
_get_inputs_name
(
node
)
# 获取当前节点输出的list
current_outputs
=
[
output_name
]
# 处理输入0,即%size_prods.38
mapper
.
_check_input
(
graph
,
inputs_node
[
0
],
inputs_name
[
0
],
current_outputs
)
layer_inputs
[
"x"
]
=
inputs_name
[
0
]
# 处理输入1,即%114
mapper
.
_check_input
(
graph
,
inputs_node
[
1
],
inputs_name
[
1
],
current_outputs
)
layer_inputs
[
"y"
]
=
inputs_name
[
1
]
# 获取当前节点输入的list
current_inputs
=
list
(
layer_inputs
.
values
())
current_outputs
=
layer_outputs
graph
.
add_layer
(
"prim.mul"
,
inputs
=
layer_inputs
,
outputs
=
layer_outputs
)
return
current_inputs
,
current_outputs
def
aten_ne
(
mapper
,
graph
,
node
):
""" 构造判断数值是否不相等的PaddleLayer。
TorchScript示例:
%134 : bool = aten::ne(%133, %132)
参数含义:
%134 (bool): 对比后结果。
%133 (-): 需对比的输入1。
%132 (-): 需对比的输入2。
"""
output_name
=
mapper
.
_get_outputs_name
(
node
)[
0
]
layer_outputs
=
[
output_name
]
layer_inputs
=
{}
inputs_name
,
inputs_node
=
mapper
.
_get_inputs_name
(
node
)
# 获取当前节点输出的list
current_outputs
=
[
output_name
]
# 处理输入0,即%124
mapper
.
_check_input
(
graph
,
inputs_node
[
0
],
inputs_name
[
0
],
current_outputs
)
layer_inputs
[
"x"
]
=
inputs_name
[
0
]
# 处理输入1,即%123
mapper
.
_check_input
(
graph
,
inputs_node
[
1
],
inputs_name
[
1
],
current_outputs
)
layer_inputs
[
"y"
]
=
inputs_name
[
1
]
# 获取当前节点输入的list
current_inputs
=
list
(
layer_inputs
.
values
())
graph
.
add_layer
(
"prim.ne"
,
inputs
=
layer_inputs
,
outputs
=
layer_outputs
)
return
current_inputs
,
current_outputs
def
aten_neg
(
mapper
,
graph
,
node
):
""" 构造对数值取负的PaddleLayer。
TorchScript示例:
%909 : int = aten::neg(%908)
参数含义:
%909 (int): 取负后结果。
%908 (int): 需取负的输入。
"""
output_name
=
mapper
.
_get_outputs_name
(
node
)[
0
]
layer_outputs
=
[
output_name
]
layer_inputs
=
{}
inputs_name
,
inputs_node
=
mapper
.
_get_inputs_name
(
node
)
# 获取当前节点输出的list
current_outputs
=
[
output_name
]
# 处理输入0,即%124
mapper
.
_check_input
(
graph
,
inputs_node
[
0
],
inputs_name
[
0
],
current_outputs
)
layer_inputs
[
"input"
]
=
inputs_name
[
0
]
# 获取当前节点输入的list
current_inputs
=
list
(
layer_inputs
.
values
())
graph
.
add_layer
(
"prim.neg"
,
inputs
=
layer_inputs
,
outputs
=
layer_outputs
)
return
current_inputs
,
current_outputs
def
aten_relu_
(
mapper
,
graph
,
node
):
def
aten_relu_
(
mapper
,
graph
,
node
):
""" 构造ReLU激活的PaddleLayer。
""" 构造ReLU激活的PaddleLayer。
...
@@ -547,12 +799,13 @@ def aten_relu_(mapper, graph, node):
...
@@ -547,12 +799,13 @@ def aten_relu_(mapper, graph, node):
layer_outputs
=
[
output_name
]
layer_outputs
=
[
output_name
]
layer_inputs
=
{}
layer_inputs
=
{}
inputs_name
,
inputs_node
=
mapper
.
_get_inputs_name
(
node
)
inputs_name
,
inputs_node
=
mapper
.
_get_inputs_name
(
node
)
# 获取当前节点输出的list
current_outputs
=
[
output_name
]
# 处理输入0,即%result.5
# 处理输入0,即%result.5
mapper
.
_check_input
(
graph
,
inputs_node
[
0
],
inputs_name
[
0
],
layer
_outputs
)
mapper
.
_check_input
(
graph
,
inputs_node
[
0
],
inputs_name
[
0
],
current
_outputs
)
layer_inputs
[
"x"
]
=
inputs_name
[
0
]
layer_inputs
[
"x"
]
=
inputs_name
[
0
]
# 获取当前节点输入
、输出
的list
# 获取当前节点输入的list
current_inputs
=
list
(
layer_inputs
.
values
())
current_inputs
=
list
(
layer_inputs
.
values
())
current_outputs
=
layer_outputs
graph
.
add_layer
(
graph
.
add_layer
(
"fluid.layers.relu"
,
inputs
=
layer_inputs
,
outputs
=
layer_outputs
)
"fluid.layers.relu"
,
inputs
=
layer_inputs
,
outputs
=
layer_outputs
)
...
@@ -574,12 +827,13 @@ def aten_relu6(mapper, graph, node):
...
@@ -574,12 +827,13 @@ def aten_relu6(mapper, graph, node):
layer_outputs
=
[
output_name
]
layer_outputs
=
[
output_name
]
layer_inputs
=
{}
layer_inputs
=
{}
inputs_name
,
inputs_node
=
mapper
.
_get_inputs_name
(
node
)
inputs_name
,
inputs_node
=
mapper
.
_get_inputs_name
(
node
)
# 获取当前节点输出的list
current_outputs
=
[
output_name
]
# 处理输入0,即%result.5
# 处理输入0,即%result.5
mapper
.
_check_input
(
graph
,
inputs_node
[
0
],
inputs_name
[
0
],
layer
_outputs
)
mapper
.
_check_input
(
graph
,
inputs_node
[
0
],
inputs_name
[
0
],
current
_outputs
)
layer_inputs
[
"x"
]
=
inputs_name
[
0
]
layer_inputs
[
"x"
]
=
inputs_name
[
0
]
# 获取当前节点输入、输出的list
# 获取当前节点输入、输出的list
current_inputs
=
list
(
layer_inputs
.
values
())
current_inputs
=
list
(
layer_inputs
.
values
())
current_outputs
=
layer_outputs
graph
.
add_layer
(
graph
.
add_layer
(
"fluid.layers.relu6"
,
"fluid.layers.relu6"
,
...
@@ -589,6 +843,37 @@ def aten_relu6(mapper, graph, node):
...
@@ -589,6 +843,37 @@ def aten_relu6(mapper, graph, node):
return
current_inputs
,
current_outputs
return
current_inputs
,
current_outputs
def
aten_reshape
(
mapper
,
graph
,
node
):
""" 构造调整大小的PaddleLayer。
TorchScript示例:
%x.6 : Tensor = aten::reshape(%4700, %4703)
参数含义:
%x.6 (Tensor): 输出,reshape后的Tensor。
%4700 (Tensor): 需要reshape的Tensor。
%4703 (list): 形状大小组成的list。
"""
output_name
=
mapper
.
_get_outputs_name
(
node
)[
0
]
layer_outputs
=
[
output_name
]
layer_inputs
=
{}
layer_attrs
=
{}
inputs_name
,
inputs_node
=
mapper
.
_get_inputs_name
(
node
)
# 获取当前节点输出的list
current_outputs
=
[
output_name
]
# 处理输入0,即%4700
mapper
.
_check_input
(
graph
,
inputs_node
[
0
],
inputs_name
[
0
],
current_outputs
)
layer_inputs
[
"x"
]
=
inputs_name
[
0
]
# 处理输入1,即%4703
mapper
.
_check_input
(
graph
,
inputs_node
[
1
],
inputs_name
[
1
],
current_outputs
)
layer_inputs
[
"shape"
]
=
inputs_name
[
1
]
# 获取当前节点输入、输出的list
current_inputs
=
list
(
layer_inputs
.
values
())
graph
.
add_layer
(
"fluid.layers.reshape"
,
inputs
=
layer_inputs
,
outputs
=
layer_outputs
)
return
current_inputs
,
current_outputs
def
aten_size
(
mapper
,
graph
,
node
):
def
aten_size
(
mapper
,
graph
,
node
):
""" 构造获取shape的PaddleLayer。
""" 构造获取shape的PaddleLayer。
...
@@ -602,12 +887,13 @@ def aten_size(mapper, graph, node):
...
@@ -602,12 +887,13 @@ def aten_size(mapper, graph, node):
layer_outputs
=
[
output_name
]
layer_outputs
=
[
output_name
]
layer_inputs
=
{}
layer_inputs
=
{}
inputs_name
,
inputs_node
=
mapper
.
_get_inputs_name
(
node
)
inputs_name
,
inputs_node
=
mapper
.
_get_inputs_name
(
node
)
# 获取当前节点输出的list
current_outputs
=
[
output_name
]
# 处理输入0,即%x.12
# 处理输入0,即%x.12
mapper
.
_check_input
(
graph
,
inputs_node
[
0
],
inputs_name
[
0
],
layer
_outputs
)
mapper
.
_check_input
(
graph
,
inputs_node
[
0
],
inputs_name
[
0
],
current
_outputs
)
layer_inputs
[
"input"
]
=
inputs_name
[
0
]
layer_inputs
[
"input"
]
=
inputs_name
[
0
]
# 获取当前节点输入
、输出
的list
# 获取当前节点输入的list
current_inputs
=
list
(
layer_inputs
.
values
())
current_inputs
=
list
(
layer_inputs
.
values
())
current_outputs
=
layer_outputs
graph
.
add_layer
(
"prim.shape"
,
inputs
=
layer_inputs
,
outputs
=
layer_outputs
)
graph
.
add_layer
(
"prim.shape"
,
inputs
=
layer_inputs
,
outputs
=
layer_outputs
)
return
current_inputs
,
current_outputs
return
current_inputs
,
current_outputs
...
@@ -628,35 +914,55 @@ def aten_slice(mapper, graph, node):
...
@@ -628,35 +914,55 @@ def aten_slice(mapper, graph, node):
output_name
=
mapper
.
_get_outputs_name
(
node
)[
0
]
output_name
=
mapper
.
_get_outputs_name
(
node
)[
0
]
layer_outputs
=
[
output_name
]
layer_outputs
=
[
output_name
]
layer_inputs
=
{}
layer_inputs
=
{}
layer_attrs
=
{}
inputs_name
,
inputs_node
=
mapper
.
_get_inputs_name
(
node
)
inputs_name
,
inputs_node
=
mapper
.
_get_inputs_name
(
node
)
# 获取当前节点输出的list
current_outputs
=
[
output_name
]
# 处理输入0,即%73
# 处理输入0,即%73
mapper
.
_check_input
(
graph
,
inputs_node
[
0
],
inputs_name
[
0
],
layer
_outputs
)
mapper
.
_check_input
(
graph
,
inputs_node
[
0
],
inputs_name
[
0
],
current
_outputs
)
layer_inputs
[
"input"
]
=
inputs_name
[
0
]
layer_inputs
[
"input"
]
=
inputs_name
[
0
]
# 获取当前节点输入、输出的list
current_inputs
=
list
(
layer_inputs
.
values
())
current_outputs
=
layer_outputs
# 处理输入1,即%82
# 处理输入1,即%82
if
inputs_name
[
1
]
in
mapper
.
attrs
:
mapper
.
_check_input
(
graph
,
inputs_node
[
1
],
inputs_name
[
1
],
current_outputs
)
layer_attrs
[
"start"
]
=
mapper
.
attrs
[
inputs_name
[
1
]]
layer_inputs
[
"start"
]
=
inputs_name
[
1
]
else
:
layer_attrs
[
"start"
]
=
inputs_name
[
1
]
current_inputs
.
append
(
inputs_name
[
1
])
# 处理输入2,即%75
# 处理输入2,即%75
if
inputs_name
[
2
]
in
mapper
.
attrs
:
mapper
.
_check_input
(
graph
,
inputs_node
[
2
],
inputs_name
[
2
],
current_outputs
)
layer_attrs
[
"end"
]
=
mapper
.
attrs
[
inputs_name
[
2
]]
layer_inputs
[
"end"
]
=
inputs_name
[
2
]
else
:
layer_attrs
[
"end"
]
=
inputs_name
[
2
]
current_inputs
.
append
(
inputs_name
[
2
])
# 处理输入3,即%77
# 处理输入3,即%77
if
inputs_name
[
3
]
in
mapper
.
attrs
:
mapper
.
_check_input
(
graph
,
inputs_node
[
3
],
inputs_name
[
3
],
current_outputs
)
layer_attrs
[
"step"
]
=
mapper
.
attrs
[
inputs_name
[
3
]]
layer_inputs
[
"step"
]
=
inputs_name
[
3
]
else
:
# 获取当前节点输入的list
layer_attrs
[
"step"
]
=
inputs_name
[
3
]
current_inputs
=
list
(
layer_inputs
.
values
())
current_inputs
.
append
(
inputs_name
[
3
])
graph
.
add_layer
(
graph
.
add_layer
(
"prim.slice"
,
inputs
=
layer_inputs
,
outputs
=
current_outputs
)
"prim.slice"
,
inputs
=
layer_inputs
,
outputs
=
layer_outputs
,
**
layer_attrs
)
return
current_inputs
,
current_outputs
def
aten_sub
(
mapper
,
graph
,
node
):
""" 构造数值相减的PaddleLayer。
TorchScript示例:
%840 : int = aten::sub(%839, %836)
参数含义:
%840 (-): 相减结果。
%839 (-): 输入数值 x。
%836 (-): 输入数值 y。
"""
output_name
=
mapper
.
_get_outputs_name
(
node
)[
0
]
layer_outputs
=
[
output_name
]
layer_inputs
=
{}
inputs_name
,
inputs_node
=
mapper
.
_get_inputs_name
(
node
)
# 获取当前节点输出的list
current_outputs
=
[
output_name
]
# 处理输入0,即%839
mapper
.
_check_input
(
graph
,
inputs_node
[
0
],
inputs_name
[
0
],
current_outputs
)
layer_inputs
[
"x"
]
=
inputs_name
[
0
]
# 处理输入1,即%836
mapper
.
_check_input
(
graph
,
inputs_node
[
1
],
inputs_name
[
1
],
current_outputs
,
add_dim
=
True
)
layer_inputs
[
"y"
]
=
inputs_name
[
1
]
# 获取当前节点输入的list
current_inputs
=
list
(
layer_inputs
.
values
())
graph
.
add_layer
(
"prim.sub"
,
inputs
=
layer_inputs
,
outputs
=
layer_outputs
)
return
current_inputs
,
current_outputs
return
current_inputs
,
current_outputs
...
@@ -664,7 +970,7 @@ def aten_t(mapper, graph, node):
...
@@ -664,7 +970,7 @@ def aten_t(mapper, graph, node):
""" 构造矩阵转置的PaddleLayer。
""" 构造矩阵转置的PaddleLayer。
TorchScript示例:
TorchScript示例:
%
109 : Tensor = aten::t(%102
)
%
840 : int = aten::sub(%839, %836
)
参数含义:
参数含义:
%109 (Tensor): 输出,转置后的矩阵。
%109 (Tensor): 输出,转置后的矩阵。
%102 (Tensor): 需要转置的Tensor。
%102 (Tensor): 需要转置的Tensor。
...
@@ -673,12 +979,13 @@ def aten_t(mapper, graph, node):
...
@@ -673,12 +979,13 @@ def aten_t(mapper, graph, node):
layer_outputs
=
[
output_name
]
layer_outputs
=
[
output_name
]
layer_inputs
=
{}
layer_inputs
=
{}
inputs_name
,
inputs_node
=
mapper
.
_get_inputs_name
(
node
)
inputs_name
,
inputs_node
=
mapper
.
_get_inputs_name
(
node
)
# 获取当前节点输出的list
current_outputs
=
[
output_name
]
# 处理输入0,即%x.12
# 处理输入0,即%x.12
mapper
.
_check_input
(
graph
,
inputs_node
[
0
],
inputs_name
[
0
],
layer
_outputs
)
mapper
.
_check_input
(
graph
,
inputs_node
[
0
],
inputs_name
[
0
],
current
_outputs
)
layer_inputs
[
"x"
]
=
inputs_name
[
0
]
layer_inputs
[
"x"
]
=
inputs_name
[
0
]
# 获取当前节点输入
、输出
的list
# 获取当前节点输入的list
current_inputs
=
list
(
layer_inputs
.
values
())
current_inputs
=
list
(
layer_inputs
.
values
())
current_outputs
=
layer_outputs
graph
.
add_layer
(
graph
.
add_layer
(
"fluid.layers.transpose"
,
"fluid.layers.transpose"
,
...
...
x2paddle/op_mapper/pytorch2paddle/prim.py
浏览文件 @
5bcd803c
...
@@ -53,6 +53,10 @@ def prim_GetAttr(mapper, graph, node):
...
@@ -53,6 +53,10 @@ def prim_GetAttr(mapper, graph, node):
node
=
input_node
node
=
input_node
except
Exception
:
except
Exception
:
break
break
if
"."
.
join
(
field_name_list
)
in
mapper
.
pytorch_params
:
mapper
.
pytorch_params
[
output_name
]
=
mapper
.
pytorch_params
[
"."
.
join
(
field_name_list
)]
else
:
part_script
=
mapper
.
script
part_script
=
mapper
.
script
for
field_name
in
field_name_list
:
for
field_name
in
field_name_list
:
if
hasattr
(
part_script
,
field_name
):
if
hasattr
(
part_script
,
field_name
):
...
@@ -78,12 +82,13 @@ def prim_ListConstruct(mapper, graph, node):
...
@@ -78,12 +82,13 @@ def prim_ListConstruct(mapper, graph, node):
layer_outputs
=
[
output_name
]
layer_outputs
=
[
output_name
]
layer_inputs
=
{}
layer_inputs
=
{}
inputs_name
,
inputs_node
=
mapper
.
_get_inputs_name
(
node
)
inputs_name
,
inputs_node
=
mapper
.
_get_inputs_name
(
node
)
# 获取当前节点输出的list
current_outputs
=
[
output_name
]
# 处理每个输入
# 处理每个输入
for
i
,
input_name
in
enumerate
(
inputs_name
):
for
i
,
input_name
in
enumerate
(
inputs_name
):
layer_inputs
[
"input{}"
.
format
(
i
)]
=
input_name
layer_inputs
[
"input{}"
.
format
(
i
)]
=
input_name
# 获取当前节点输入
、输出
的list
# 获取当前节点输入的list
current_inputs
=
list
(
layer_inputs
.
values
())
current_inputs
=
list
(
layer_inputs
.
values
())
current_outputs
=
layer_outputs
graph
.
add_layer
(
"prim.list"
,
inputs
=
layer_inputs
,
outputs
=
layer_outputs
)
graph
.
add_layer
(
"prim.list"
,
inputs
=
layer_inputs
,
outputs
=
layer_outputs
)
return
current_inputs
,
current_outputs
return
current_inputs
,
current_outputs
...
@@ -101,12 +106,13 @@ def prim_RaiseException(mapper, graph, node):
...
@@ -101,12 +106,13 @@ def prim_RaiseException(mapper, graph, node):
layer_outputs
=
[
output_name
]
layer_outputs
=
[
output_name
]
layer_inputs
=
{}
layer_inputs
=
{}
inputs_name
,
inputs_node
=
mapper
.
_get_inputs_name
(
node
)
inputs_name
,
inputs_node
=
mapper
.
_get_inputs_name
(
node
)
# 获取当前节点输出的list
current_outputs
=
[
output_name
]
# 处理输入0,即%76
# 处理输入0,即%76
mapper
.
_check_input
(
graph
,
inputs_node
[
0
],
inputs_name
[
0
],
layer
_outputs
)
mapper
.
_check_input
(
graph
,
inputs_node
[
0
],
inputs_name
[
0
],
current
_outputs
)
layer_inputs
[
"input"
]
=
inputs_name
[
0
]
layer_inputs
[
"input"
]
=
inputs_name
[
0
]
# 获取当前节点输入
、输出
的list
# 获取当前节点输入的list
current_inputs
=
list
(
layer_inputs
.
values
())
current_inputs
=
list
(
layer_inputs
.
values
())
current_outputs
=
layer_outputs
graph
.
add_layer
(
graph
.
add_layer
(
"prim.exception"
,
inputs
=
layer_inputs
,
outputs
=
layer_outputs
)
"prim.exception"
,
inputs
=
layer_inputs
,
outputs
=
layer_outputs
)
...
@@ -134,6 +140,9 @@ def prim_Loop(mapper, graph, node):
...
@@ -134,6 +140,9 @@ def prim_Loop(mapper, graph, node):
block
=
list
(
node
.
blocks
())[
0
]
block
=
list
(
node
.
blocks
())[
0
]
loop_outputs
=
node_outputs
loop_outputs
=
node_outputs
for
i
,
block_input_ivalue
in
enumerate
(
block
.
inputs
()):
for
i
,
block_input_ivalue
in
enumerate
(
block
.
inputs
()):
if
i
==
0
:
block_input_node_name
=
'_x'
+
str
(
mapper
.
output_index
)
else
:
block_input_node_name
=
'x'
+
str
(
mapper
.
output_index
)
block_input_node_name
=
'x'
+
str
(
mapper
.
output_index
)
unique_id
=
block_input_ivalue
.
unique
()
unique_id
=
block_input_ivalue
.
unique
()
if
unique_id
not
in
mapper
.
outputs_info
:
if
unique_id
not
in
mapper
.
outputs_info
:
...
@@ -226,12 +235,65 @@ def prim_min(mapper, graph, node):
...
@@ -226,12 +235,65 @@ def prim_min(mapper, graph, node):
layer_outputs
=
[
output_name
]
layer_outputs
=
[
output_name
]
layer_inputs
=
{}
layer_inputs
=
{}
inputs_name
,
inputs_node
=
mapper
.
_get_inputs_name
(
node
)
inputs_name
,
inputs_node
=
mapper
.
_get_inputs_name
(
node
)
# 获取当前节点输出的list
current_outputs
=
[
output_name
]
# 处理输入0,即%86
# 处理输入0,即%86
mapper
.
_check_input
(
graph
,
inputs_node
[
0
],
inputs_name
[
0
],
layer
_outputs
)
mapper
.
_check_input
(
graph
,
inputs_node
[
0
],
inputs_name
[
0
],
current
_outputs
)
layer_inputs
[
"input"
]
=
inputs_name
[
0
]
layer_inputs
[
"input"
]
=
inputs_name
[
0
]
# 获取当前节点输入
、输出
的list
# 获取当前节点输入的list
current_inputs
=
list
(
layer_inputs
.
values
())
current_inputs
=
list
(
layer_inputs
.
values
())
current_outputs
=
layer_outputs
graph
.
add_layer
(
"prim.min"
,
inputs
=
layer_inputs
,
outputs
=
layer_outputs
)
graph
.
add_layer
(
"prim.min"
,
inputs
=
layer_inputs
,
outputs
=
layer_outputs
)
return
current_inputs
,
current_outputs
return
current_inputs
,
current_outputs
def
prim_SetAttr
(
mapper
,
graph
,
node
):
""" 设置attribute信息。
TorchScript示例:
= prim::SetAttr[name="num_batches_tracked"](%260, %277)
参数含义:
%260 (-): 属性名前缀。
%277 (-): 需要设置的值。
"""
output_name
=
mapper
.
_get_outputs_name
(
node
)[
0
]
field_name_list
=
[]
tmp_node
=
node
while
True
:
input_node
=
list
(
tmp_node
.
inputs
())[
0
].
node
()
try
:
field_name_list
.
insert
(
0
,
input_node
.
s
(
'name'
))
tmp_node
=
input_node
except
Exception
:
break
field_name_list
.
append
(
node
.
s
(
'name'
))
inputs_name
,
inputs_node
=
mapper
.
_get_inputs_name
(
node
)
param
=
{
"Tensor"
:
inputs_name
[
1
]}
mapper
.
pytorch_params
[
"."
.
join
(
field_name_list
)]
=
param
return
[],
[
output_name
]
def
prim_shape
(
mapper
,
graph
,
node
):
""" 构造获取shape的PaddleLayer。
TorchScript示例:
%4701 : int[] = prim::shape(%result.1)
参数含义:
%4701 (list): 输出,shape信息。
%result.1 (Tensor): 需要获取shape的值。
"""
output_name
=
mapper
.
_get_outputs_name
(
node
)[
0
]
layer_outputs
=
[
output_name
]
layer_inputs
=
{}
inputs_name
,
inputs_node
=
mapper
.
_get_inputs_name
(
node
)
# 获取当前节点输出的list
current_outputs
=
[
output_name
]
# 处理输入0,即%input.8
mapper
.
_check_input
(
graph
,
inputs_node
[
0
],
inputs_name
[
0
],
current_outputs
)
layer_inputs
[
"input"
]
=
inputs_name
[
0
]
# 获取当前节点输入的list
current_inputs
=
list
(
layer_inputs
.
values
())
graph
.
add_layer
(
"prim.shape"
,
inputs
=
layer_inputs
,
outputs
=
layer_outputs
)
return
current_inputs
,
current_outputs
x2paddle/op_mapper/pytorch2paddle/pytorch_op_mapper.py
浏览文件 @
5bcd803c
...
@@ -32,8 +32,28 @@ class PyTorchOpMapper(OpMapper):
...
@@ -32,8 +32,28 @@ class PyTorchOpMapper(OpMapper):
self
.
output_index
=
0
self
.
output_index
=
0
self
.
dygraph_name_id
=
{}
# 动态图__init__输出名字中的id,key为kernel类型,value为id
self
.
dygraph_name_id
=
{}
# 动态图__init__输出名字中的id,key为kernel类型,value为id
# 转换
# 转换
self
.
check_op
(
decoder
.
graph
)
self
.
graph
,
_
=
self
.
traverse
(
decoder
.
graph
)
self
.
graph
,
_
=
self
.
traverse
(
decoder
.
graph
)
def
check_op
(
self
,
script_graph
):
def
_update_op_list
(
graph
):
for
node
in
graph
.
nodes
():
op_list
.
append
(
node
.
kind
())
for
block
in
node
.
blocks
():
_update_op_list
(
block
)
op_list
=
list
()
_update_op_list
(
script_graph
)
op_list
=
list
(
set
(
op_list
))
unsupported_op_list
=
[]
for
op
in
op_list
:
func_name
=
op
.
replace
(
'::'
,
'_'
)
if
not
(
hasattr
(
prim
,
func_name
)
or
hasattr
(
aten
,
func_name
)):
unsupported_op_list
.
append
(
op
)
if
len
(
unsupported_op_list
)
>
0
:
raise
Exception
(
"The kind {} in model is not supported yet."
.
format
(
unsupported_op_list
))
def
traverse
(
self
,
script_graph
,
parent_layer
=
None
):
def
traverse
(
self
,
script_graph
,
parent_layer
=
None
):
# 用于获取graph的输入
# 用于获取graph的输入
def
_update_graph_inputs
(
inputs
,
outputs
):
def
_update_graph_inputs
(
inputs
,
outputs
):
...
@@ -65,9 +85,7 @@ class PyTorchOpMapper(OpMapper):
...
@@ -65,9 +85,7 @@ class PyTorchOpMapper(OpMapper):
func
=
getattr
(
aten
,
func_name
)
func
=
getattr
(
aten
,
func_name
)
inputs
,
outputs
=
func
(
self
,
graph
,
node
)
inputs
,
outputs
=
func
(
self
,
graph
,
node
)
_update_graph_inputs
(
inputs
,
outputs
)
_update_graph_inputs
(
inputs
,
outputs
)
else
:
raise
Exception
(
"The kind {} in model is not supported yet."
.
format
(
node
.
kind
()))
# 转换输出节点
# 转换输出节点
if
hasattr
(
script_graph
,
'returnNode'
):
if
hasattr
(
script_graph
,
'returnNode'
):
for
i
,
ivalue
in
enumerate
(
script_graph
.
returnNode
().
inputs
()):
for
i
,
ivalue
in
enumerate
(
script_graph
.
returnNode
().
inputs
()):
...
@@ -97,9 +115,9 @@ class PyTorchOpMapper(OpMapper):
...
@@ -97,9 +115,9 @@ class PyTorchOpMapper(OpMapper):
self
.
outputs_info
[
script_unique_id
]
=
output_name
self
.
outputs_info
[
script_unique_id
]
=
output_name
self
.
output_index
+=
1
self
.
output_index
+=
1
outputs_name
.
append
(
output_name
)
outputs_name
.
append
(
output_name
)
# if节点没有输出的情况
# if
或loop
节点没有输出的情况
if
len
(
list
(
node
.
outputs
()))
==
0
:
if
len
(
list
(
node
.
outputs
()))
==
0
:
output_name
=
'x'
+
str
(
self
.
output_index
)
output_name
=
'
_
x'
+
str
(
self
.
output_index
)
self
.
output_index
+=
1
self
.
output_index
+=
1
outputs_name
.
append
(
output_name
)
outputs_name
.
append
(
output_name
)
return
outputs_name
return
outputs_name
...
@@ -121,12 +139,20 @@ class PyTorchOpMapper(OpMapper):
...
@@ -121,12 +139,20 @@ class PyTorchOpMapper(OpMapper):
inputs
=
{},
inputs
=
{},
outputs
=
[
output_name
],
outputs
=
[
output_name
],
value
=
"params[{}]"
.
format
(
string
(
output_name
)))
value
=
"params[{}]"
.
format
(
string
(
output_name
)))
else
:
if
isinstance
(
param
,
dict
)
and
"Tensor"
in
param
:
graph
.
add_layer
(
"prim.constant"
,
inputs
=
{},
outputs
=
[
output_name
],
value
=
param
[
"Tensor"
])
else
:
else
:
graph
.
add_layer
(
graph
.
add_layer
(
"prim.constant"
,
"prim.constant"
,
inputs
=
{},
inputs
=
{},
outputs
=
[
output_name
],
outputs
=
[
output_name
],
value
=
string
(
param
)
if
isinstance
(
param
,
str
)
else
param
)
value
=
string
(
param
)
if
isinstance
(
param
,
str
)
else
param
)
node_outputs
.
append
(
output_name
)
node_outputs
.
append
(
output_name
)
def
_get_inputs_name
(
self
,
node
):
def
_get_inputs_name
(
self
,
node
):
...
@@ -135,9 +161,9 @@ class PyTorchOpMapper(OpMapper):
...
@@ -135,9 +161,9 @@ class PyTorchOpMapper(OpMapper):
for
script_input_ivalue
in
node
.
inputs
():
for
script_input_ivalue
in
node
.
inputs
():
script_input_node
=
script_input_ivalue
.
node
()
script_input_node
=
script_input_ivalue
.
node
()
script_input_unique_id
=
script_input_ivalue
.
unique
()
script_input_unique_id
=
script_input_ivalue
.
unique
()
input_n
ode_n
ame
=
self
.
outputs_info
[
script_input_unique_id
]
input_name
=
self
.
outputs_info
[
script_input_unique_id
]
inputs_node
.
append
(
script_input_node
)
inputs_node
.
append
(
script_input_node
)
inputs_name
.
append
(
input_n
ode_n
ame
)
inputs_name
.
append
(
input_name
)
return
inputs_name
,
inputs_node
return
inputs_name
,
inputs_node
def
data
(
self
,
graph
,
node
,
uid
):
def
data
(
self
,
graph
,
node
,
uid
):
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录