Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
PaddlePaddle
X2Paddle
提交
b3e5fdc7
X
X2Paddle
项目概览
PaddlePaddle
/
X2Paddle
大约 1 年 前同步成功
通知
328
Star
698
Fork
167
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
26
列表
看板
标记
里程碑
合并请求
4
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
X
X2Paddle
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
26
Issue
26
列表
看板
标记
里程碑
合并请求
4
合并请求
4
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
提交
b3e5fdc7
编写于
11月 29, 2021
作者:
W
wjj19950828
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
Handling the case of underslashes
上级
5404f9e7
变更
2
显示空白变更内容
内联
并排
Showing
2 changed file
with
32 addition
and
364 deletion
+32
-364
x2paddle/op_mapper/pytorch2paddle/aten.py
x2paddle/op_mapper/pytorch2paddle/aten.py
+24
-364
x2paddle/op_mapper/pytorch2paddle/pytorch_op_mapper.py
x2paddle/op_mapper/pytorch2paddle/pytorch_op_mapper.py
+8
-0
未找到文件。
x2paddle/op_mapper/pytorch2paddle/aten.py
浏览文件 @
b3e5fdc7
...
...
@@ -263,41 +263,6 @@ def aten_addmm(mapper, graph, node):
def
aten_add
(
mapper
,
graph
,
node
):
""" 构造数值相加的PaddleLayer,该节点实现out = x + y。
TorchScript示例:
%296 : int = aten::add(%i.12, %288)
参数含义:
%296 (-): 相加结果。
%i.12 (-): 输入数值 x。
%288 (-): 输入数值 y。
"""
scope_name
=
mapper
.
normalize_scope_name
(
node
)
output_name
=
mapper
.
_get_outputs_name
(
node
)[
0
]
layer_outputs
=
[
output_name
]
layer_inputs
=
{}
inputs_name
,
inputs_node
=
mapper
.
_get_inputs_name
(
node
)
# 获取当前节点输出的list
current_outputs
=
[
output_name
]
# 处理输入0,即%i.12
mapper
.
_check_input
(
graph
,
inputs_node
[
0
],
inputs_name
[
0
],
current_outputs
,
scope_name
)
layer_inputs
[
"x"
]
=
inputs_name
[
0
]
# 处理输入1,即%288
mapper
.
_check_input
(
graph
,
inputs_node
[
1
],
inputs_name
[
1
],
current_outputs
,
scope_name
)
layer_inputs
[
"y"
]
=
inputs_name
[
1
]
# 获取当前节点输入的list
current_inputs
=
list
(
layer_inputs
.
values
())
graph
.
add_layer
(
"prim.add"
,
inputs
=
layer_inputs
,
outputs
=
layer_outputs
,
scope_name
=
scope_name
)
return
current_inputs
,
current_outputs
def
aten_add_
(
mapper
,
graph
,
node
):
""" 构造数值相加的PaddleLayer,该节点实现out = x + alpha * y。
TorchScript示例:
%137 : Tensor = aten::add(%136, %130, %130)
...
...
@@ -325,6 +290,7 @@ def aten_add_(mapper, graph, node):
layer_inputs
[
"y"
]
=
inputs_name
[
1
]
# 获取当前节点输入的list
current_inputs
=
list
(
layer_inputs
.
values
())
if
len
(
inputs_name
)
>
2
:
# 处理输入2,即%151
if
inputs_name
[
2
]
in
mapper
.
attrs
:
layer_attrs
[
"alpha"
]
=
mapper
.
attrs
[
inputs_name
[
2
]]
...
...
@@ -340,6 +306,13 @@ def aten_add_(mapper, graph, node):
outputs
=
layer_outputs
,
scope_name
=
scope_name
,
**
layer_attrs
)
else
:
graph
.
add_layer
(
"prim.add"
,
inputs
=
layer_inputs
,
outputs
=
layer_outputs
,
scope_name
=
scope_name
,
**
layer_attrs
)
return
current_inputs
,
current_outputs
...
...
@@ -1634,41 +1607,6 @@ def aten_dim(mapper, graph, node):
return
current_inputs
,
current_outputs
def
aten_div_
(
mapper
,
graph
,
node
):
""" 构造除法的PaddleLayer。
TorchScript示例:
%bx_bw0.3 : Tensor = aten::div_(%bx_bw.3, %2678)
参数含义:
%bx_bw0.3 (-): 除后的结果。
%bx_bw.3 (-): 被除数。
%2678 (int): 除数。
"""
scope_name
=
mapper
.
normalize_scope_name
(
node
)
output_name
=
mapper
.
_get_outputs_name
(
node
)[
0
]
layer_outputs
=
[
output_name
]
layer_inputs
=
{}
inputs_name
,
inputs_node
=
mapper
.
_get_inputs_name
(
node
)
# 获取当前节点输出的list
current_outputs
=
[
output_name
]
# 处理输入0,即%124
mapper
.
_check_input
(
graph
,
inputs_node
[
0
],
inputs_name
[
0
],
current_outputs
,
scope_name
)
layer_inputs
[
"x"
]
=
inputs_name
[
0
]
# 处理输入1,即%123
mapper
.
_check_input
(
graph
,
inputs_node
[
1
],
inputs_name
[
1
],
current_outputs
,
scope_name
)
layer_inputs
[
"y"
]
=
inputs_name
[
1
]
# 获取当前节点输入的list
current_inputs
=
list
(
layer_inputs
.
values
())
graph
.
add_layer
(
"prim.div"
,
inputs
=
layer_inputs
,
outputs
=
layer_outputs
,
scope_name
=
scope_name
)
return
current_inputs
,
current_outputs
def
aten_div
(
mapper
,
graph
,
node
):
""" 构造除法的PaddleLayer。
TorchScript示例:
...
...
@@ -1737,39 +1675,6 @@ def aten_dropout(mapper, graph, node):
return
current_inputs
,
current_outputs
def
aten_dropout_
(
mapper
,
graph
,
node
):
""" 构造Dropout的PaddleLayer。
TorchScript示例:
%119 : Tensor = aten::dropout_(%result.3, %117, %118)
参数含义:
%119 (Tensor): Dropout后的Tensor。
%result.3 (Tensor): 输入Tensor。
%118 (bool): 是否是训练阶段。
"""
scope_name
=
mapper
.
normalize_scope_name
(
node
)
op_name
=
name_generator
(
"dropout"
,
mapper
.
nn_name2id
)
output_name
=
mapper
.
_get_outputs_name
(
node
)[
0
]
layer_outputs
=
[
op_name
,
output_name
]
layer_inputs
=
{}
inputs_name
,
inputs_node
=
mapper
.
_get_inputs_name
(
node
)
# 获取当前节点输出的list
current_outputs
=
[
output_name
]
# 处理输入0,即%119
mapper
.
_check_input
(
graph
,
inputs_node
[
0
],
inputs_name
[
0
],
current_outputs
,
scope_name
)
layer_inputs
[
"input"
]
=
inputs_name
[
0
]
# 获取当前节点输入、输出的list
current_inputs
=
list
(
layer_inputs
.
values
())
graph
.
add_layer
(
"paddle.nn.Dropout"
,
inputs
=
layer_inputs
,
outputs
=
layer_outputs
,
scope_name
=
scope_name
,
p
=
0.0
)
return
current_inputs
,
current_outputs
def
aten_embedding
(
mapper
,
graph
,
node
):
""" 构造embedding的PaddleLayer。
TorchScript示例:
...
...
@@ -2607,10 +2512,10 @@ def aten_gru(mapper, graph, node):
return
current_inputs
,
current_outputs
def
aten_hardtanh
_
(
mapper
,
graph
,
node
):
def
aten_hardtanh
(
mapper
,
graph
,
node
):
""" 构造hardtanh激活的PaddleLayer。
TorchScript示例:
%result.9 : Tensor = aten::hardtanh
_
(%input.20, %67, %66)
%result.9 : Tensor = aten::hardtanh(%input.20, %67, %66)
参数含义:
%result.9 (Tensor): 输出,hardtanh激活后的Tensor。
%input.20 (Tensor): 需要hardtanh激活的Tensor。
...
...
@@ -2990,42 +2895,6 @@ def aten_le(mapper, graph, node):
return
current_inputs
,
current_outputs
def
aten_leaky_relu_
(
mapper
,
graph
,
node
):
""" 构造leaky relu激活的PaddleLayer。
TorchScript示例:
%input.117 : Tensor = aten::leaky_relu_(%input.114, %1570)
参数含义:
%input.117 (Tensor): 输出,leaky relu后的结果。
%input.114 (Tensor): 需要leaky relu的Tensor。
%1570 (float): 输入中的元素小于0时的斜率。
"""
scope_name
=
mapper
.
normalize_scope_name
(
node
)
op_name
=
name_generator
(
"leakly_relu"
,
mapper
.
nn_name2id
)
output_name
=
mapper
.
_get_outputs_name
(
node
)[
0
]
layer_outputs
=
[
op_name
,
output_name
]
layer_inputs
=
{}
layer_attrs
=
{}
inputs_name
,
inputs_node
=
mapper
.
_get_inputs_name
(
node
)
# 获取当前节点输出的list
current_outputs
=
[
output_name
]
# 处理输入0,即%result.5
mapper
.
_check_input
(
graph
,
inputs_node
[
0
],
inputs_name
[
0
],
current_outputs
,
scope_name
)
layer_inputs
[
"x"
]
=
inputs_name
[
0
]
# 获取当前节点输入、输出的list
current_inputs
=
list
(
layer_inputs
.
values
())
# 处理输入1,即%1570
layer_attrs
[
"negative_slope"
]
=
mapper
.
attrs
[
inputs_name
[
1
]]
graph
.
add_layer
(
"paddle.nn.LeakyReLU"
,
inputs
=
layer_inputs
,
outputs
=
layer_outputs
,
scope_name
=
scope_name
,
**
layer_attrs
)
return
current_inputs
,
current_outputs
def
aten_leaky_relu
(
mapper
,
graph
,
node
):
""" 构造leaky relu激活的PaddleLayer。
TorchScript示例:
...
...
@@ -3293,115 +3162,6 @@ def aten_lt(mapper, graph, node):
return
current_inputs
,
current_outputs
def
aten_masked_fill_
(
mapper
,
graph
,
node
):
""" 构造填充mask的PaddleLayer。
TorchScript示例:
%input.4 : Tensor = aten::masked_fill_(%scores.2, %mask.2, %46)
参数含义:
%input.4 (Tensor): 输出,填充后的结果。
%scores.2 (Tensor): 需要填充的Tensor。
%mask.2 (Tensor): bool型的Tensor,哪些位置需要填充。
%46 (-): 填充的值。
"""
scope_name
=
mapper
.
normalize_scope_name
(
node
)
output_name
=
mapper
.
_get_outputs_name
(
node
)[
0
]
layer_outputs
=
[
output_name
]
inputs_name
,
inputs_node
=
mapper
.
_get_inputs_name
(
node
)
# 获取当前节点输入的list
current_inputs
=
[]
# 获取当前节点输出的list
current_outputs
=
[
output_name
]
# 处理输入0,即%input.4
mapper
.
_check_input
(
graph
,
inputs_node
[
0
],
inputs_name
[
0
],
current_outputs
,
scope_name
)
current_inputs
.
append
(
inputs_name
[
0
])
graph
.
add_layer
(
"prim.type"
,
inputs
=
{
"input"
:
inputs_name
[
0
]},
outputs
=
[
inputs_name
[
0
]
+
"_type"
],
scope_name
=
scope_name
)
# 处理输入1,即%scores.2
mapper
.
_check_input
(
graph
,
inputs_node
[
1
],
inputs_name
[
1
],
current_outputs
,
scope_name
)
current_inputs
.
append
(
inputs_name
[
1
])
graph
.
add_layer
(
"paddle.logical_not"
,
inputs
=
{
"x"
:
inputs_name
[
1
]},
outputs
=
[
inputs_name
[
1
]
+
"_not"
],
scope_name
=
scope_name
)
graph
.
add_layer
(
"paddle.cast"
,
inputs
=
{
"x"
:
inputs_name
[
1
]},
outputs
=
[
inputs_name
[
1
]
+
"_mask"
],
scope_name
=
scope_name
,
dtype
=
inputs_name
[
0
]
+
"_type"
)
graph
.
add_layer
(
"paddle.cast"
,
inputs
=
{
"x"
:
inputs_name
[
1
]
+
"_not"
},
outputs
=
[
inputs_name
[
1
]
+
"_not_mask"
],
scope_name
=
scope_name
,
dtype
=
inputs_name
[
0
]
+
"_type"
)
graph
.
add_layer
(
"paddle.multiply"
,
inputs
=
{
"x"
:
inputs_name
[
0
],
"y"
:
inputs_name
[
1
]
+
"_not_mask"
},
outputs
=
[
inputs_name
[
0
]
+
"_not_mask"
],
scope_name
=
scope_name
)
# 处理输入2,即%46
mapper
.
_check_input
(
graph
,
inputs_node
[
2
],
inputs_name
[
2
],
current_outputs
,
scope_name
)
graph
.
add_layer
(
"prim.eq"
,
inputs
=
{
"x"
:
inputs_name
[
2
]},
outputs
=
[
inputs_name
[
2
]
+
"_cond1"
],
scope_name
=
scope_name
,
y
=
"-float('inf')"
)
graph
.
add_layer
(
"prim.eq"
,
inputs
=
{
"x"
:
inputs_name
[
2
]},
outputs
=
[
inputs_name
[
2
]
+
"_cond2"
],
scope_name
=
scope_name
,
y
=
"float('inf')"
)
graph
.
add_layer
(
"prim.or"
,
inputs
=
{
"x"
:
inputs_name
[
2
]
+
"_cond1"
,
"y"
:
inputs_name
[
2
]
+
"_cond2"
},
outputs
=
[
inputs_name
[
2
]
+
"_cond"
],
scope_name
=
scope_name
)
graph
.
add_layer
(
"prim.if"
,
{
'input'
:
inputs_name
[
2
]
+
"_cond"
},
outputs
=
[
inputs_name
[
2
]
+
"_if"
],
scope_name
=
scope_name
)
if_layer
=
graph
.
layers
[
list
(
graph
.
layers
.
keys
())[
-
1
]]
block
=
PaddleGraph
(
source_type
=
"pytorch"
,
parent_layer
=
if_layer
)
block
.
add_layer
(
"prim.equal"
,
inputs
=
{
"input"
:
inputs_name
[
1
]
+
"_mask"
},
outputs
=
[
inputs_name
[
2
]
+
"_1"
],
scope_name
=
scope_name
)
if_layer
.
add_block
(
block
)
block
=
PaddleGraph
(
source_type
=
"pytorch"
,
parent_layer
=
if_layer
)
block
.
add_layer
(
"prim.mul"
,
inputs
=
{
"x"
:
inputs_name
[
1
]
+
"_mask"
,
"y"
:
inputs_name
[
2
]},
outputs
=
[
inputs_name
[
2
]
+
"_1"
],
scope_name
=
scope_name
)
if_layer
.
add_block
(
block
)
if_layer
.
inputs
[
"input-0"
]
=
inputs_name
[
1
]
+
"_mask"
if_layer
.
inputs
[
"input-1"
]
=
inputs_name
[
2
]
if_layer
.
outputs
.
append
(
inputs_name
[
2
]
+
"_1"
)
graph
.
add_layer
(
"paddle.add"
,
inputs
=
{
"x"
:
inputs_name
[
2
]
+
"_1"
,
"y"
:
inputs_name
[
0
]
+
"_not_mask"
},
outputs
=
layer_outputs
,
scope_name
=
scope_name
)
return
current_inputs
,
current_outputs
def
aten_masked_fill
(
mapper
,
graph
,
node
):
""" 构造填充mask的PaddleLayer。
TorchScript示例:
...
...
@@ -3799,42 +3559,6 @@ def aten_mul(mapper, graph, node):
return
current_inputs
,
current_outputs
def
aten_mul_
(
mapper
,
graph
,
node
):
""" 构造数值相乘的PaddleLayer。
TorchScript示例:
%size_prods.39 : int = aten::mul_(%size_prods.38, %114)
参数含义:
%size_prods.39 (Tensor): 输出,相乘后的结果。
%size_prods.38 (-): 数值1。
%114 (-): 数值2。
"""
scope_name
=
mapper
.
normalize_scope_name
(
node
)
output_name
=
mapper
.
_get_outputs_name
(
node
)[
0
]
layer_outputs
=
[
output_name
]
layer_inputs
=
{}
inputs_name
,
inputs_node
=
mapper
.
_get_inputs_name
(
node
)
# 获取当前节点输出的list
current_outputs
=
[
output_name
]
# 处理输入0,即%size_prods.38
mapper
.
_check_input
(
graph
,
inputs_node
[
0
],
inputs_name
[
0
],
current_outputs
,
scope_name
)
layer_inputs
[
"x"
]
=
inputs_name
[
0
]
# 处理输入1,即%114
mapper
.
_check_input
(
graph
,
inputs_node
[
1
],
inputs_name
[
1
],
current_outputs
,
scope_name
)
layer_inputs
[
"y"
]
=
inputs_name
[
1
]
# 获取当前节点输入的list
current_inputs
=
list
(
layer_inputs
.
values
())
current_outputs
=
layer_outputs
graph
.
add_layer
(
"prim.mul"
,
inputs
=
layer_inputs
,
outputs
=
layer_outputs
,
scope_name
=
scope_name
)
return
current_inputs
,
current_outputs
def
aten_ne
(
mapper
,
graph
,
node
):
""" 构造判断数值是否不相等的PaddleLayer。
TorchScript示例:
...
...
@@ -4304,38 +4028,6 @@ def aten_relu(mapper, graph, node):
return
current_inputs
,
current_outputs
def
aten_relu_
(
mapper
,
graph
,
node
):
""" 构造ReLU激活的PaddleLayer。
TorchScript示例:
%result.3 : Tensor = aten::relu_(%input.5)
参数含义:
%result.3 (Tensor): 输出,ReLU后的结果。
%result.5 (Tensor): 需要ReLU的Tensor。
注意: inplace这个参数在paddle中未实现
"""
scope_name
=
mapper
.
normalize_scope_name
(
node
)
op_name
=
name_generator
(
"relu"
,
mapper
.
nn_name2id
)
output_name
=
mapper
.
_get_outputs_name
(
node
)[
0
]
layer_outputs
=
[
op_name
,
output_name
]
layer_inputs
=
{}
inputs_name
,
inputs_node
=
mapper
.
_get_inputs_name
(
node
)
# 获取当前节点输出的list
current_outputs
=
[
output_name
]
# 处理输入0,即%result.5
mapper
.
_check_input
(
graph
,
inputs_node
[
0
],
inputs_name
[
0
],
current_outputs
,
scope_name
)
layer_inputs
[
"x"
]
=
inputs_name
[
0
]
# 获取当前节点输入的list
current_inputs
=
list
(
layer_inputs
.
values
())
graph
.
add_layer
(
"paddle.nn.ReLU"
,
inputs
=
layer_inputs
,
outputs
=
layer_outputs
,
scope_name
=
scope_name
)
return
current_inputs
,
current_outputs
def
aten_relu6
(
mapper
,
graph
,
node
):
""" 构造ReLU6激活的PaddleLayer。
TorchScript示例:
...
...
@@ -4748,38 +4440,6 @@ def aten_silu(mapper, graph, node):
return
current_inputs
,
current_outputs
def
aten_silu_
(
mapper
,
graph
,
node
):
""" 构造Silu激活的PaddleLayer。
TorchScript示例:
%result.3 : Tensor = aten::silu_(%input.5)
参数含义:
%result.3 (Tensor): 输出,Silu后的结果。
%input.5 (Tensor): 需要Silu的Tensor。
注意: inplace这个参数在paddle中未实现
"""
scope_name
=
mapper
.
normalize_scope_name
(
node
)
op_name
=
name_generator
(
"silu"
,
mapper
.
nn_name2id
)
output_name
=
mapper
.
_get_outputs_name
(
node
)[
0
]
layer_outputs
=
[
op_name
,
output_name
]
layer_inputs
=
{}
inputs_name
,
inputs_node
=
mapper
.
_get_inputs_name
(
node
)
# 获取当前节点输出的list
current_outputs
=
[
output_name
]
# 处理输入0,即%input.5
mapper
.
_check_input
(
graph
,
inputs_node
[
0
],
inputs_name
[
0
],
current_outputs
,
scope_name
)
layer_inputs
[
"x"
]
=
inputs_name
[
0
]
# 获取当前节点输入的list
current_inputs
=
list
(
layer_inputs
.
values
())
graph
.
add_layer
(
"paddle.nn.Silu"
,
inputs
=
layer_inputs
,
outputs
=
layer_outputs
,
scope_name
=
scope_name
)
return
current_inputs
,
current_outputs
def
aten_sin
(
mapper
,
graph
,
node
):
""" 构造数学计算sin的PaddleLayer。
TorchScript示例:
...
...
x2paddle/op_mapper/pytorch2paddle/pytorch_op_mapper.py
浏览文件 @
b3e5fdc7
...
...
@@ -55,6 +55,10 @@ class PyTorchOpMapper():
unsupported_ops
=
[]
for
op
in
op_list
:
func_name
=
op
.
replace
(
'::'
,
'_'
)
# Processing suffix is "_" situation, eg: aten_relu_ to aten_relu
# avoid aten::__isnot__ situation
if
func_name
[
-
1
]
==
"_"
and
func_name
[
-
2
]
!=
"_"
:
func_name
=
func_name
[:
-
1
]
if
not
(
hasattr
(
prim
,
func_name
)
or
hasattr
(
aten
,
func_name
)):
unsupported_ops
.
append
(
op
)
if
len
(
unsupported_ops
)
==
0
:
...
...
@@ -104,6 +108,10 @@ class PyTorchOpMapper():
for
node
in
script_graph
.
nodes
():
kind
=
node
.
kind
()
func_name
=
kind
.
replace
(
'::'
,
'_'
)
# Processing suffix is "_" situation, eg: aten_relu_ to aten_relu
# avoid aten::__isnot__ situation
if
func_name
[
-
1
]
==
"_"
and
func_name
[
-
2
]
!=
"_"
:
func_name
=
func_name
[:
-
1
]
if
hasattr
(
prim
,
func_name
):
func
=
getattr
(
prim
,
func_name
)
inputs
,
outputs
=
func
(
self
,
graph
,
node
)
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录