Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
PaddlePaddle
X2Paddle
提交
094329bd
X
X2Paddle
项目概览
PaddlePaddle
/
X2Paddle
大约 1 年 前同步成功
通知
328
Star
698
Fork
167
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
26
列表
看板
标记
里程碑
合并请求
4
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
X
X2Paddle
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
26
Issue
26
列表
看板
标记
里程碑
合并请求
4
合并请求
4
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
提交
094329bd
编写于
5月 10, 2022
作者:
W
wjj19950828
浏览文件
操作
浏览文件
下载
差异文件
resolve conflict
上级
e754c856
6ae7d631
变更
2
隐藏空白更改
内联
并排
Showing
2 changed file
with
66 addition
and
20 deletion
+66
-20
docs/inference_model_convertor/op_list.md
docs/inference_model_convertor/op_list.md
+1
-1
x2paddle/op_mapper/pytorch2paddle/aten.py
x2paddle/op_mapper/pytorch2paddle/aten.py
+65
-19
未找到文件。
docs/inference_model_convertor/op_list.md
浏览文件 @
094329bd
...
@@ -115,7 +115,7 @@ Aten:
...
@@ -115,7 +115,7 @@ Aten:
| 121 | aten::repeat
\_
interleave | 122 | aten::maxpool1d | 123 | aten::frobenius
\_
norm | 124 | aten::format |
| 121 | aten::repeat
\_
interleave | 122 | aten::maxpool1d | 123 | aten::frobenius
\_
norm | 124 | aten::format |
| 125 | aten::complex | 126 | aten::real | 127 | aten::imag | 128 | aten::fft
\_
rfftn |
| 125 | aten::complex | 126 | aten::real | 127 | aten::imag | 128 | aten::fft
\_
rfftn |
| 129 | aten::fft
\_
irfftn | 130 | aten::hardsigmoid | 131 | aten::hardswish | 132 | aten::linear |
| 129 | aten::fft
\_
irfftn | 130 | aten::hardsigmoid | 131 | aten::hardswish | 132 | aten::linear |
| 133 | aten::rsqrt | 134 | aten::replication
\_
pad1d | 135 | aten::full | 136 | aten::
argmax
|
| 133 | aten::rsqrt | 134 | aten::replication
\_
pad1d | 135 | aten::full | 136 | aten::
group
\_
norm
|
| 137 | aten::argmax | 138 | aten::copy | | | | |
| 137 | aten::argmax | 138 | aten::copy | | | | |
Prim:
Prim:
...
...
x2paddle/op_mapper/pytorch2paddle/aten.py
浏览文件 @
094329bd
...
@@ -2732,6 +2732,59 @@ def aten_gt(mapper, graph, node):
...
@@ -2732,6 +2732,59 @@ def aten_gt(mapper, graph, node):
return
current_inputs
,
current_outputs
return
current_inputs
,
current_outputs
def
aten_group_norm
(
mapper
,
graph
,
node
):
"""
TorchScript Code:
%input.81 : Tensor = aten::group_norm(%input.2, %25, %60, %59, %26, %30)
Parameter meaning:
%input.81 (Tensor): Output Tensor
%input.2 (Tensor): Input Tensor
%25 (Tensor): num_groups
%60 (Tensor): weight
%59 (Tensor): bias
%26 (Tensor): eps
%30 (bool): enabled cudnn
"""
scope_name
=
mapper
.
normalize_scope_name
(
node
)
op_name
=
name_generator
(
"groupnorm"
,
mapper
.
nn_name2id
)
output_name
=
mapper
.
_get_outputs_name
(
node
)[
0
]
layer_outputs
=
[
op_name
,
output_name
]
layer_inputs
=
{}
layer_attrs
=
{}
inputs_name
,
inputs_node
=
mapper
.
_get_inputs_name
(
node
)
# output list
current_outputs
=
[
output_name
]
# process Input Tensor
mapper
.
_check_input
(
graph
,
inputs_node
[
0
],
inputs_name
[
0
],
current_outputs
,
scope_name
)
layer_inputs
[
"input"
]
=
inputs_name
[
0
]
# input list
current_inputs
=
list
(
layer_inputs
.
values
())
# process num_groups
layer_attrs
[
'num_groups'
]
=
mapper
.
attrs
[
inputs_name
[
1
]]
# process weight
weights
=
mapper
.
pytorch_params
[
inputs_name
[
2
]]
mapper
.
paddle_params
[
op_name
+
".weight"
]
=
weights
layer_attrs
[
'num_channels'
]
=
weights
.
shape
[
0
]
# process bias
if
inputs_name
[
2
]
in
mapper
.
pytorch_params
:
bias
=
mapper
.
pytorch_params
[
inputs_name
[
3
]]
if
bias
is
not
None
:
mapper
.
paddle_params
[
op_name
+
".bias"
]
=
bias
else
:
mapper
.
paddle_params
[
op_name
+
".bias"
]
=
False
# process eps
layer_attrs
[
"epsilon"
]
=
mapper
.
attrs
[
inputs_name
[
4
]]
graph
.
add_layer
(
"paddle.nn.GroupNorm"
,
inputs
=
layer_inputs
,
outputs
=
layer_outputs
,
scope_name
=
scope_name
,
**
layer_attrs
)
return
current_inputs
,
current_outputs
def
aten_gru
(
mapper
,
graph
,
node
):
def
aten_gru
(
mapper
,
graph
,
node
):
""" 构造门控循环单元网络(GRU)的PaddleLayer。
""" 构造门控循环单元网络(GRU)的PaddleLayer。
TorchScript示例:
TorchScript示例:
...
@@ -2931,45 +2984,38 @@ def aten_hardswish(mapper, graph, node):
...
@@ -2931,45 +2984,38 @@ def aten_hardswish(mapper, graph, node):
def
aten_index
(
mapper
,
graph
,
node
):
def
aten_index
(
mapper
,
graph
,
node
):
"""
构造选择元素的PaddleLayer。
"""
TorchScript
示例
:
TorchScript
Code
:
%1681 : Float = aten::index(%1653, %1680)
%1681 : Float = aten::index(%1653, %1680)
参数含义
:
Parameter meaning
:
%1681 (Tensor):
输出,选择后的Tensor。
%1681 (Tensor):
Output Tensor
%1653 (Tensor):
需要选择的Tensor。
%1653 (Tensor):
Input Tensor
%1680 (int):
选择的索引。
%1680 (int):
Index
"""
"""
scope_name
=
mapper
.
normalize_scope_name
(
node
)
scope_name
=
mapper
.
normalize_scope_name
(
node
)
output_name
=
mapper
.
_get_outputs_name
(
node
)[
0
]
output_name
=
mapper
.
_get_outputs_name
(
node
)[
0
]
layer_outputs
=
[
output_name
]
layer_outputs
=
[
output_name
]
layer_inputs
=
{}
layer_inputs
=
{}
layer_attrs
=
{}
inputs_name
,
inputs_node
=
mapper
.
_get_inputs_name
(
node
)
inputs_name
,
inputs_node
=
mapper
.
_get_inputs_name
(
node
)
#
获取当前节点输出的
list
#
output
list
current_outputs
=
[
output_name
]
current_outputs
=
[
output_name
]
#
处理输入0,即%1653
#
process Input Tensor
mapper
.
_check_input
(
graph
,
inputs_node
[
0
],
inputs_name
[
0
],
current_outputs
,
mapper
.
_check_input
(
graph
,
inputs_node
[
0
],
inputs_name
[
0
],
current_outputs
,
scope_name
)
scope_name
)
layer_inputs
[
"x"
]
=
inputs_name
[
0
]
layer_inputs
[
"x"
]
=
inputs_name
[
0
]
#
处理输入1,即%1680
#
process Index
mapper
.
_check_input
(
graph
,
inputs_node
[
1
],
inputs_name
[
1
],
current_outputs
,
mapper
.
_check_input
(
graph
,
inputs_node
[
1
],
inputs_name
[
1
],
current_outputs
,
scope_name
)
scope_name
)
layer_inputs
[
"index"
]
=
inputs_name
[
1
]
layer_inputs
[
"index"
]
=
inputs_name
[
1
]
# 获取当前节点输入的list
current_inputs
=
list
(
layer_inputs
.
values
())
current_inputs
=
list
(
layer_inputs
.
values
())
graph
.
add_layer
(
graph
.
add_layer
(
"prim.getitem"
,
"prim.getitem"
,
inputs
=
{
"list"
:
layer_inputs
[
"index"
]},
inputs
=
{
"list"
:
layer_inputs
[
"x"
]},
outputs
=
[
layer_inputs
[
"index"
]],
scope_name
=
scope_name
,
index
=
0
)
graph
.
add_layer
(
"paddle.index_select"
,
inputs
=
layer_inputs
,
outputs
=
layer_outputs
,
outputs
=
layer_outputs
,
scope_name
=
scope_name
,
scope_name
=
scope_name
,
**
layer_attrs
)
index
=
layer_inputs
[
"index"
]
)
return
current_inputs
,
current_outputs
return
current_inputs
,
current_outputs
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录