Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
PaddlePaddle
X2Paddle
提交
71be4aaa
X
X2Paddle
项目概览
PaddlePaddle
/
X2Paddle
大约 1 年 前同步成功
通知
328
Star
698
Fork
167
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
26
列表
看板
标记
里程碑
合并请求
4
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
X
X2Paddle
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
26
Issue
26
列表
看板
标记
里程碑
合并请求
4
合并请求
4
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
未验证
提交
71be4aaa
编写于
5月 10, 2022
作者:
W
WJJ1995
提交者:
GitHub
5月 10, 2022
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
Support bigbird model (#789)
上级
3da0e06f
变更
2
隐藏空白更改
内联
并排
Showing
2 changed file
with
79 addition
and
0 deletion
+79
-0
docs/inference_model_convertor/op_list.md
docs/inference_model_convertor/op_list.md
+1
-0
x2paddle/op_mapper/pytorch2paddle/aten.py
x2paddle/op_mapper/pytorch2paddle/aten.py
+78
-0
未找到文件。
docs/inference_model_convertor/op_list.md
浏览文件 @
71be4aaa
...
@@ -116,6 +116,7 @@ Aten:
...
@@ -116,6 +116,7 @@ Aten:
| 125 | aten::complex | 126 | aten::real | 127 | aten::imag | 128 | aten::fft
\_
rfftn |
| 125 | aten::complex | 126 | aten::real | 127 | aten::imag | 128 | aten::fft
\_
rfftn |
| 129 | aten::fft
\_
irfftn | 130 | aten::hardsigmoid | 131 | aten::hardswish | 132 | aten::linear |
| 129 | aten::fft
\_
irfftn | 130 | aten::hardsigmoid | 131 | aten::hardswish | 132 | aten::linear |
| 133 | aten::rsqrt | 134 | aten::replication
\_
pad1d | 135 | aten::full | 136 | aten::group
\_
norm |
| 133 | aten::rsqrt | 134 | aten::replication
\_
pad1d | 135 | aten::full | 136 | aten::group
\_
norm |
| 137 | aten::argmax | 138 | aten::copy | | | | |
Prim:
Prim:
| 序号 | OP | 序号 | OP | 序号 | OP | 序号 | OP |
| 序号 | OP | 序号 | OP | 序号 | OP | 序号 | OP |
...
...
x2paddle/op_mapper/pytorch2paddle/aten.py
浏览文件 @
71be4aaa
...
@@ -484,6 +484,55 @@ def aten_arange(mapper, graph, node):
...
@@ -484,6 +484,55 @@ def aten_arange(mapper, graph, node):
return
current_inputs
,
current_outputs
return
current_inputs
,
current_outputs
def
aten_argmax
(
mapper
,
graph
,
node
):
"""
TorchScript:
%x.28 : Tensor = aten::argmax(%result.1, %4967, %3, %2)
Parameter meaning:
%x.28 (Tensor): Output Tensor
%result.1 (Tensor): Input Tensor
%4967 (int/list): Axis
%3 (bool): Keepdim
"""
scope_name
=
mapper
.
normalize_scope_name
(
node
)
output_name
=
mapper
.
_get_outputs_name
(
node
)[
0
]
layer_outputs
=
[
output_name
]
layer_inputs
=
{}
layer_attrs
=
{}
inputs_name
,
inputs_node
=
mapper
.
_get_inputs_name
(
node
)
# output list
current_outputs
=
[
output_name
]
# process Input Tensor
mapper
.
_check_input
(
graph
,
inputs_node
[
0
],
inputs_name
[
0
],
current_outputs
,
scope_name
)
layer_inputs
[
"x"
]
=
inputs_name
[
0
]
current_inputs
=
list
(
layer_inputs
.
values
())
# process Axis
if
inputs_name
[
1
]
in
mapper
.
attrs
:
layer_attrs
[
"axis"
]
=
mapper
.
attrs
[
inputs_name
[
1
]]
else
:
mapper
.
_check_input
(
graph
,
inputs_node
[
1
],
inputs_name
[
1
],
current_outputs
,
scope_name
)
layer_inputs
[
"axis"
]
=
inputs_name
[
1
]
current_inputs
.
append
(
inputs_name
[
1
])
# process Keepdim
if
inputs_name
[
2
]
in
mapper
.
attrs
:
layer_attrs
[
"keepdim"
]
=
mapper
.
attrs
[
inputs_name
[
2
]]
else
:
mapper
.
_check_input
(
graph
,
inputs_node
[
2
],
inputs_name
[
2
],
current_outputs
,
scope_name
)
layer_inputs
[
"keepdim"
]
=
inputs_name
[
2
]
current_inputs
.
append
(
inputs_name
[
2
])
graph
.
add_layer
(
"paddle.argmax"
,
inputs
=
layer_inputs
,
outputs
=
layer_outputs
,
scope_name
=
scope_name
,
**
layer_attrs
)
return
current_inputs
,
current_outputs
def
aten_avg_pool2d
(
mapper
,
graph
,
node
):
def
aten_avg_pool2d
(
mapper
,
graph
,
node
):
""" 构造最大池化的PaddleLayer。
""" 构造最大池化的PaddleLayer。
TorchScript示例:
TorchScript示例:
...
@@ -1075,6 +1124,35 @@ def aten_complex(mapper, graph, node):
...
@@ -1075,6 +1124,35 @@ def aten_complex(mapper, graph, node):
return
current_inputs
,
current_outputs
return
current_inputs
,
current_outputs
def
aten_copy
(
mapper
,
graph
,
node
):
"""
TorchScript Code:
%107 : Tensor = aten::copy(%new_mem.1)
Parameter meaning:
%107 (Tensor): Output Tensor
%new_mem.1 (Tensor): Input Tensor
"""
scope_name
=
mapper
.
normalize_scope_name
(
node
)
output_name
=
mapper
.
_get_outputs_name
(
node
)[
0
]
layer_outputs
=
[
output_name
]
layer_inputs
=
{}
inputs_name
,
inputs_node
=
mapper
.
_get_inputs_name
(
node
)
# output list
current_outputs
=
[
output_name
]
# process Input Tensor
mapper
.
_check_input
(
graph
,
inputs_node
[
0
],
inputs_name
[
0
],
current_outputs
,
scope_name
)
layer_inputs
[
"input"
]
=
inputs_name
[
0
]
current_inputs
=
list
(
layer_inputs
.
values
())
graph
.
add_layer
(
"prim.equal"
,
inputs
=
layer_inputs
,
outputs
=
layer_outputs
,
scope_name
=
scope_name
)
return
current_inputs
,
current_outputs
def
aten___contains__
(
mapper
,
graph
,
node
):
def
aten___contains__
(
mapper
,
graph
,
node
):
""" 构造in的PaddleLayer。
""" 构造in的PaddleLayer。
TorchScript示例:
TorchScript示例:
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录