Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
PaddlePaddle
X2Paddle
提交
80439dab
X
X2Paddle
项目概览
PaddlePaddle
/
X2Paddle
大约 1 年 前同步成功
通知
328
Star
698
Fork
167
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
26
列表
看板
标记
里程碑
合并请求
4
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
X
X2Paddle
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
26
Issue
26
列表
看板
标记
里程碑
合并请求
4
合并请求
4
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
未验证
提交
80439dab
编写于
11月 07, 2022
作者:
J
Jason
提交者:
GitHub
11月 07, 2022
浏览文件
操作
浏览文件
下载
差异文件
Merge pull request #906 from wjj19950828/support_GFPGAN
Support GFPGAN
上级
23fa4be9
0423b067
变更
2
显示空白变更内容
内联
并排
Showing
2 changed file
with
284 addition
and
103 deletion
+284
-103
docs/inference_model_convertor/op_list.md
docs/inference_model_convertor/op_list.md
+2
-1
x2paddle/op_mapper/pytorch2paddle/aten.py
x2paddle/op_mapper/pytorch2paddle/aten.py
+282
-102
未找到文件。
docs/inference_model_convertor/op_list.md
浏览文件 @
80439dab
...
...
@@ -117,7 +117,8 @@ Aten:
| 125 | aten::complex | 126 | aten::real | 127 | aten::imag | 128 | aten::fft
\_
rfftn |
| 129 | aten::fft
\_
irfftn | 130 | aten::hardsigmoid | 131 | aten::hardswish | 132 | aten::linear |
| 133 | aten::rsqrt | 134 | aten::replication
\_
pad1d | 135 | aten::full | 136 | aten::group
\_
norm |
| 137 | aten::argmax | 138 | aten::copy | 139 | aten::upsample
\_
trilinear3d | | |
| 137 | aten::argmax | 138 | aten::copy | 139 | aten::upsample
\_
trilinear3d | 140 | aten::clone |
| 141 | aten::rand | 142 | aten::randn | | | | |
Prim:
| 序号 | OP | 序号 | OP | 序号 | OP | 序号 | OP |
...
...
x2paddle/op_mapper/pytorch2paddle/aten.py
浏览文件 @
80439dab
...
...
@@ -1087,6 +1087,36 @@ def aten_clamp_min(mapper, graph, node):
return
current_inputs
,
current_outputs
def
aten_clone
(
mapper
,
graph
,
node
):
"""
TorchScript Code:
%55 : Tensor = aten::clone(%54)
Parameter meaning:
%55 (Tensor): output tensor
%54 (Tensor): input tensor
"""
scope_name
=
mapper
.
normalize_scope_name
(
node
)
output_name
=
mapper
.
_get_outputs_name
(
node
)[
0
]
layer_outputs
=
[
output_name
]
layer_inputs
=
{}
inputs_name
,
inputs_node
=
mapper
.
_get_inputs_name
(
node
)
# outputs list
current_outputs
=
[
output_name
]
# inputs list
mapper
.
_check_input
(
graph
,
inputs_node
[
0
],
inputs_name
[
0
],
current_outputs
,
scope_name
)
layer_inputs
[
"x"
]
=
inputs_name
[
0
]
current_inputs
=
list
(
layer_inputs
.
values
())
graph
.
add_layer
(
"paddle.clone"
,
inputs
=
layer_inputs
,
outputs
=
layer_outputs
,
scope_name
=
scope_name
)
return
current_inputs
,
current_outputs
def
aten_complex
(
mapper
,
graph
,
node
):
"""
TorchScript示例:
...
...
@@ -1347,23 +1377,102 @@ def aten_conv2d(mapper, graph, node):
def
aten__convolution
(
mapper
,
graph
,
node
):
"""
构造conv2d的PaddleLayer。
TorchScript
示例
:
"""
TorchScript
Code
:
%input.10 : Tensor = aten::_convolution(%input.1, %18, %10, %19, %20, %21, %13, %22, %12, %13, %13, %15)
参数含义
:
%input.10 (Tensor):
输出,卷积后的结果。
%input.8 (Tensor):
需要进行卷积的特征层。
%18 (Tensor): weights
。
%10 (Tensor): bias
。
%19 (list):
步长大小。
%20 (list):
填充大小。
%21 (list):
空洞大小。
%13 (bool):
是否进行转置卷积。
%22 (list):
输出形状上一侧额外添加的大小。
%12 (int):
卷积的组数。
Parameter meaning
:
%input.10 (Tensor):
Output Tensor
%input.8 (Tensor):
Input Tensor
%18 (Tensor): weights
%10 (Tensor): bias
%19 (list):
stride
%20 (list):
padding
%21 (list):
dilation
%13 (bool):
whether transpose
%22 (list):
output_padding
%12 (int):
groups
"""
scope_name
=
mapper
.
normalize_scope_name
(
node
)
inputs_name
,
inputs_node
=
mapper
.
_get_inputs_name
(
node
)
layer_attrs
=
{}
# deal with stride
layer_attrs
[
"stride"
]
=
mapper
.
attrs
[
inputs_name
[
3
]]
# deal with padding
layer_attrs
[
"padding"
]
=
mapper
.
attrs
[
inputs_name
[
4
]]
# deal with dilation
layer_attrs
[
"dilation"
]
=
mapper
.
attrs
[
inputs_name
[
5
]]
# deal with groups
layer_attrs
[
"groups"
]
=
mapper
.
attrs
[
inputs_name
[
8
]]
# for weight is nn.functional.conv2d input
if
inputs_name
[
1
]
not
in
mapper
.
pytorch_params
:
output_name
=
mapper
.
_get_outputs_name
(
node
)[
0
]
layer_outputs
=
[
output_name
]
layer_inputs
=
{}
current_outputs
=
[
output_name
]
# input
mapper
.
_check_input
(
graph
,
inputs_node
[
0
],
inputs_name
[
0
],
current_outputs
,
scope_name
)
layer_inputs
[
"x"
]
=
inputs_name
[
0
]
# weight
mapper
.
_check_input
(
graph
,
inputs_node
[
1
],
inputs_name
[
1
],
current_outputs
,
scope_name
)
layer_inputs
[
"weight"
]
=
inputs_name
[
1
]
layer_attrs
[
"bias"
]
=
mapper
.
attrs
[
inputs_name
[
2
]]
current_inputs
=
list
(
layer_inputs
.
values
())
# Determine whether it is conv or convtranspose according to the attribute
if
len
(
layer_attrs
[
"stride"
])
==
1
:
if
mapper
.
attrs
[
inputs_name
[
6
]]:
# only convtranspose have output_padding attr
layer_attrs
[
"output_padding"
]
=
mapper
.
attrs
[
inputs_name
[
7
]]
graph
.
add_layer
(
"paddle.nn.functional.conv1d_transpose"
,
inputs
=
layer_inputs
,
outputs
=
layer_outputs
,
scope_name
=
scope_name
,
**
layer_attrs
)
else
:
graph
.
add_layer
(
"paddle.nn.functional.conv1d"
,
inputs
=
layer_inputs
,
outputs
=
layer_outputs
,
scope_name
=
scope_name
,
**
layer_attrs
)
elif
len
(
layer_attrs
[
"stride"
])
==
2
:
if
mapper
.
attrs
[
inputs_name
[
6
]]:
# only convtranspose have output_padding attr
layer_attrs
[
"output_padding"
]
=
mapper
.
attrs
[
inputs_name
[
7
]]
graph
.
add_layer
(
"paddle.nn.functional.conv2d_transpose"
,
inputs
=
layer_inputs
,
outputs
=
layer_outputs
,
scope_name
=
scope_name
,
**
layer_attrs
)
else
:
graph
.
add_layer
(
"paddle.nn.functional.conv2d"
,
inputs
=
layer_inputs
,
outputs
=
layer_outputs
,
scope_name
=
scope_name
,
**
layer_attrs
)
elif
len
(
layer_attrs
[
"stride"
])
==
3
:
if
mapper
.
attrs
[
inputs_name
[
6
]]:
# only convtranspose have output_padding attr
layer_attrs
[
"output_padding"
]
=
mapper
.
attrs
[
inputs_name
[
7
]]
graph
.
add_layer
(
"paddle.nn.functional.conv3d_transpose"
,
inputs
=
layer_inputs
,
outputs
=
layer_outputs
,
scope_name
=
scope_name
,
**
layer_attrs
)
else
:
graph
.
add_layer
(
"paddle.nn.functional.conv3d"
,
inputs
=
layer_inputs
,
outputs
=
layer_outputs
,
scope_name
=
scope_name
,
**
layer_attrs
)
return
current_inputs
,
current_outputs
else
:
weights
=
mapper
.
pytorch_params
[
inputs_name
[
1
]]
if
len
(
weights
.
shape
)
==
3
:
op_name
=
name_generator
(
"conv1d"
,
mapper
.
nn_name2id
)
...
...
@@ -1374,16 +1483,11 @@ def aten__convolution(mapper, graph, node):
output_name
=
mapper
.
_get_outputs_name
(
node
)[
0
]
layer_outputs
=
[
op_name
,
output_name
]
layer_inputs
=
{}
layer_attrs
=
{}
# 获取当前节点输出的list
current_outputs
=
[
output_name
]
# 处理输入0,即%input.8
mapper
.
_check_input
(
graph
,
inputs_node
[
0
],
inputs_name
[
0
],
current_outputs
,
scope_name
)
mapper
.
_check_input
(
graph
,
inputs_node
[
0
],
inputs_name
[
0
],
current_outputs
,
scope_name
)
layer_inputs
[
"input"
]
=
inputs_name
[
0
]
# 获取当前节点输入的list
current_inputs
=
list
(
layer_inputs
.
values
())
# 处理输入1,即%18
mapper
.
paddle_params
[
op_name
+
".weight"
]
=
weights
#np.swapaxes(weights, 0, 1)
if
mapper
.
attrs
[
inputs_name
[
6
]]:
...
...
@@ -1391,7 +1495,7 @@ def aten__convolution(mapper, graph, node):
else
:
layer_attrs
[
"out_channels"
]
=
weights
.
shape
[
0
]
layer_attrs
[
"kernel_size"
]
=
weights
.
shape
[
2
:]
# 处理输入2,即%10
# deal with bias
if
inputs_name
[
2
]
in
mapper
.
pytorch_params
:
bias
=
mapper
.
pytorch_params
[
inputs_name
[
2
]]
if
bias
is
not
None
:
...
...
@@ -1400,18 +1504,6 @@ def aten__convolution(mapper, graph, node):
layer_attrs
[
"bias_attr"
]
=
False
else
:
layer_attrs
[
"bias_attr"
]
=
False
# 处理输入3,即%19
layer_attrs
[
"stride"
]
=
mapper
.
attrs
[
inputs_name
[
3
]]
# 处理输入4,即%20
layer_attrs
[
"padding"
]
=
mapper
.
attrs
[
inputs_name
[
4
]]
# 处理输入5,即%21
layer_attrs
[
"dilation"
]
=
mapper
.
attrs
[
inputs_name
[
5
]]
# 处理输入6,即%13
if
mapper
.
attrs
[
inputs_name
[
6
]]:
# 处理输入7,即%22
layer_attrs
[
"output_padding"
]
=
mapper
.
attrs
[
inputs_name
[
7
]]
# 处理输入8,即%12
layer_attrs
[
"groups"
]
=
mapper
.
attrs
[
inputs_name
[
8
]]
if
mapper
.
attrs
[
inputs_name
[
6
]]:
layer_attrs
[
'in_channels'
]
=
weights
.
shape
[
0
]
*
mapper
.
attrs
[
inputs_name
[
8
]]
...
...
@@ -1420,6 +1512,8 @@ def aten__convolution(mapper, graph, node):
inputs_name
[
8
]]
if
len
(
weights
.
shape
)
==
3
:
if
mapper
.
attrs
[
inputs_name
[
6
]]:
# only convtranspose have output_padding attr
layer_attrs
[
"output_padding"
]
=
mapper
.
attrs
[
inputs_name
[
7
]]
graph
.
add_layer
(
"paddle.nn.Conv1DTranspose"
,
inputs
=
layer_inputs
,
...
...
@@ -1435,6 +1529,8 @@ def aten__convolution(mapper, graph, node):
**
layer_attrs
)
elif
len
(
weights
.
shape
)
==
4
:
if
mapper
.
attrs
[
inputs_name
[
6
]]:
# only convtranspose have output_padding attr
layer_attrs
[
"output_padding"
]
=
mapper
.
attrs
[
inputs_name
[
7
]]
graph
.
add_layer
(
"paddle.nn.Conv2DTranspose"
,
inputs
=
layer_inputs
,
...
...
@@ -1450,6 +1546,8 @@ def aten__convolution(mapper, graph, node):
**
layer_attrs
)
else
:
if
mapper
.
attrs
[
inputs_name
[
6
]]:
# only convtranspose have output_padding attr
layer_attrs
[
"output_padding"
]
=
mapper
.
attrs
[
inputs_name
[
7
]]
graph
.
add_layer
(
"paddle.nn.Conv3DTranspose"
,
inputs
=
layer_inputs
,
...
...
@@ -4385,6 +4483,88 @@ def aten_prelu(mapper, graph, node):
return
current_inputs
,
current_outputs
def
aten_rand
(
mapper
,
graph
,
node
):
"""
TorchScript Code:
%input.49 : Tensor = aten::rand(%23, %8, %6, %24, %5)
Parameter meaning:
%input.49 (Tensor): output tensor
%23 (list): input shape list
%8 (int): dtype。
%6 (int): layout。
%4995 (int): device
%4995 (bool): requires_grad
"""
scope_name
=
mapper
.
normalize_scope_name
(
node
)
output_name
=
mapper
.
_get_outputs_name
(
node
)[
0
]
layer_outputs
=
[
output_name
]
layer_inputs
=
{}
layer_attrs
=
{}
inputs_name
,
inputs_node
=
mapper
.
_get_inputs_name
(
node
)
# outputs list
current_outputs
=
[
output_name
]
current_inputs
=
[]
# deal with shape
if
inputs_name
[
0
]
in
mapper
.
attrs
:
layer_attrs
[
"shape"
]
=
mapper
.
attrs
[
inputs_name
[
0
]]
else
:
mapper
.
_check_input
(
graph
,
inputs_node
[
0
],
inputs_name
[
0
],
current_outputs
,
scope_name
)
layer_inputs
[
"shape"
]
=
inputs_name
[
0
]
current_inputs
.
append
(
inputs_name
[
0
])
# deal with dtype
layer_attrs
[
"dtype"
]
=
dtype_dict
[
mapper
.
attrs
[
inputs_name
[
1
]]]
graph
.
add_layer
(
"paddle.rand"
,
inputs
=
layer_inputs
,
outputs
=
layer_outputs
,
scope_name
=
scope_name
,
**
layer_attrs
)
return
current_inputs
,
current_outputs
def
aten_randn
(
mapper
,
graph
,
node
):
"""
TorchScript Code:
%input.49 : Tensor = aten::randn(%23, %8, %6, %24, %5)
Parameter meaning:
%input.49 (Tensor): output tensor
%23 (list): input shape list
%8 (int): dtype。
%6 (int): layout。
%4995 (int): device
%4995 (bool): requires_grad
"""
scope_name
=
mapper
.
normalize_scope_name
(
node
)
output_name
=
mapper
.
_get_outputs_name
(
node
)[
0
]
layer_outputs
=
[
output_name
]
layer_inputs
=
{}
layer_attrs
=
{}
inputs_name
,
inputs_node
=
mapper
.
_get_inputs_name
(
node
)
# outputs list
current_outputs
=
[
output_name
]
current_inputs
=
[]
# deal with shape
if
inputs_name
[
0
]
in
mapper
.
attrs
:
layer_attrs
[
"shape"
]
=
mapper
.
attrs
[
inputs_name
[
0
]]
else
:
mapper
.
_check_input
(
graph
,
inputs_node
[
0
],
inputs_name
[
0
],
current_outputs
,
scope_name
)
layer_inputs
[
"shape"
]
=
inputs_name
[
0
]
current_inputs
.
append
(
inputs_name
[
0
])
# deal with dtype
layer_attrs
[
"dtype"
]
=
dtype_dict
[
mapper
.
attrs
[
inputs_name
[
1
]]]
graph
.
add_layer
(
"paddle.randn"
,
inputs
=
layer_inputs
,
outputs
=
layer_outputs
,
scope_name
=
scope_name
,
**
layer_attrs
)
return
current_inputs
,
current_outputs
def
aten_real
(
mapper
,
graph
,
node
):
"""
TorchScript示例:
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录