Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
PaddlePaddle
X2Paddle
提交
1c5c7985
X
X2Paddle
项目概览
PaddlePaddle
/
X2Paddle
大约 1 年 前同步成功
通知
328
Star
698
Fork
167
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
26
列表
看板
标记
里程碑
合并请求
4
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
X
X2Paddle
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
26
Issue
26
列表
看板
标记
里程碑
合并请求
4
合并请求
4
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
未验证
提交
1c5c7985
编写于
5月 07, 2022
作者:
W
WJJ1995
提交者:
GitHub
5月 07, 2022
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
Add replication pad (#783)
* add replication pad * update op_list.md * re-lint
上级
b9c2c898
变更
2
显示空白变更内容
内联
并排
Showing
2 changed file
with
49 addition
and
16 deletion
+49
-16
docs/inference_model_convertor/op_list.md
docs/inference_model_convertor/op_list.md
+1
-2
x2paddle/op_mapper/pytorch2paddle/aten.py
x2paddle/op_mapper/pytorch2paddle/aten.py
+48
-14
未找到文件。
docs/inference_model_convertor/op_list.md
浏览文件 @
1c5c7985
...
...
@@ -115,8 +115,7 @@ Aten:
| 121 | aten::repeat
\_
interleave | 122 | aten::maxpool1d | 123 | aten::frobenius
\_
norm | 124 | aten::format |
| 125 | aten::complex | 126 | aten::real | 127 | aten::imag | 128 | aten::fft
\_
rfftn |
| 129 | aten::fft
\_
irfftn | 130 | aten::hardsigmoid | 131 | aten::hardswish | 132 | aten::linear |
| 133 | aten::rsqrt | | | | | | |
| 133 | aten::rsqrt | 134 | aten::replication
\_
pad1d | | | | |
Prim:
| 序号 | OP | 序号 | OP | 序号 | OP | 序号 | OP |
...
...
x2paddle/op_mapper/pytorch2paddle/aten.py
浏览文件 @
1c5c7985
...
...
@@ -3263,27 +3263,25 @@ def aten_linear(mapper, graph, node):
# transpose weight
mapper
.
_check_input
(
graph
,
inputs_node
[
1
],
inputs_name
[
1
],
current_outputs
,
scope_name
)
layer_
attrs_transpose
=
{}
layer_attrs
_transpose
[
"perm"
]
=
[
1
,
0
]
layer_
inputs
[
"y"
]
=
inputs_name
[
1
]
layer_attrs
[
"transpose_y"
]
=
True
graph
.
add_layer
(
"paddle.
transpose
"
,
inputs
=
{
"x"
:
inputs_name
[
1
]}
,
outputs
=
[
inputs_name
[
1
]
+
"_transpose"
]
,
"paddle.
matmul
"
,
inputs
=
layer_inputs
,
outputs
=
layer_outputs
,
scope_name
=
scope_name
,
**
layer_attrs_transpose
)
layer_inputs
[
"weight"
]
=
inputs_name
[
1
]
+
"_transpose"
**
layer_attrs
)
if
len
(
inputs_name
)
==
3
:
mapper
.
_check_input
(
graph
,
inputs_node
[
2
],
inputs_name
[
2
],
current_outputs
,
scope_name
)
layer_inputs
[
"bias"
]
=
inputs_name
[
2
]
current_inputs
=
list
(
layer_inputs
.
values
())
graph
.
add_layer
(
"paddle.nn.functional.linear"
,
inputs
=
layer_inputs
,
"paddle.add"
,
inputs
=
{
"x"
:
output_name
,
"y"
:
inputs_name
[
2
]},
outputs
=
layer_outputs
,
scope_name
=
scope_name
,
**
layer_attrs
)
scope_name
=
scope_name
)
current_inputs
=
list
(
layer_inputs
.
values
())
return
current_inputs
,
current_outputs
...
...
@@ -4658,6 +4656,42 @@ def aten_repeat_interleave(mapper, graph, node):
return
current_inputs
,
current_outputs
def
aten_replication_pad1d
(
mapper
,
graph
,
node
):
"""
TorchScript Code:
%58 : Tensor = aten::replication_pad1d(%input.1, %152)
Parameter meaning:
%58 (Tensor): Output Tensor
%input.1 (Tensor): Input Tensor
%%152 (list): Padding size
"""
scope_name
=
mapper
.
normalize_scope_name
(
node
)
op_name
=
name_generator
(
"pad"
,
mapper
.
nn_name2id
)
output_name
=
mapper
.
_get_outputs_name
(
node
)[
0
]
layer_outputs
=
[
op_name
,
output_name
]
layer_inputs
=
{}
layer_attrs
=
{}
inputs_name
,
inputs_node
=
mapper
.
_get_inputs_name
(
node
)
# output list
current_outputs
=
[
output_name
]
# input list
mapper
.
_check_input
(
graph
,
inputs_node
[
0
],
inputs_name
[
0
],
current_outputs
,
scope_name
)
layer_inputs
[
"input"
]
=
inputs_name
[
0
]
layer_attrs
[
"padding"
]
=
mapper
.
attrs
[
inputs_name
[
1
]]
layer_attrs
[
"mode"
]
=
string
(
"replicate"
)
current_inputs
=
list
(
layer_inputs
.
values
())
graph
.
add_layer
(
"paddle.nn.Pad1D"
,
inputs
=
layer_inputs
,
outputs
=
layer_outputs
,
scope_name
=
scope_name
,
**
layer_attrs
)
return
current_inputs
,
current_outputs
def
aten_reshape
(
mapper
,
graph
,
node
):
""" 构造调整大小的PaddleLayer。
TorchScript示例:
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录