Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
PaddlePaddle
X2Paddle
提交
2721567b
X
X2Paddle
项目概览
PaddlePaddle
/
X2Paddle
大约 1 年 前同步成功
通知
328
Star
698
Fork
167
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
26
列表
看板
标记
里程碑
合并请求
4
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
X
X2Paddle
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
26
Issue
26
列表
看板
标记
里程碑
合并请求
4
合并请求
4
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
未验证
提交
2721567b
编写于
6月 23, 2022
作者:
J
Jason
提交者:
GitHub
6月 23, 2022
浏览文件
操作
浏览文件
下载
差异文件
Merge pull request #817 from wjj19950828/Add_upsample_trilinear3d
Add aten::upsample_trilinear3d
上级
21e7b473
192a17f9
变更
2
隐藏空白更改
内联
并排
Showing
2 changed file
with
92 addition
and
95 deletion
+92
-95
docs/inference_model_convertor/op_list.md
docs/inference_model_convertor/op_list.md
+1
-1
x2paddle/op_mapper/pytorch2paddle/aten.py
x2paddle/op_mapper/pytorch2paddle/aten.py
+91
-94
未找到文件。
docs/inference_model_convertor/op_list.md
浏览文件 @
2721567b
...
...
@@ -117,7 +117,7 @@ Aten:
| 125 | aten::complex | 126 | aten::real | 127 | aten::imag | 128 | aten::fft
\_
rfftn |
| 129 | aten::fft
\_
irfftn | 130 | aten::hardsigmoid | 131 | aten::hardswish | 132 | aten::linear |
| 133 | aten::rsqrt | 134 | aten::replication
\_
pad1d | 135 | aten::full | 136 | aten::group
\_
norm |
| 137 | aten::argmax | 138 | aten::copy |
|
| | |
| 137 | aten::argmax | 138 | aten::copy |
139 | aten::upsample
\_
trilinear3d
| | |
Prim:
| 序号 | OP | 序号 | OP | 序号 | OP | 序号 | OP |
...
...
x2paddle/op_mapper/pytorch2paddle/aten.py
浏览文件 @
2721567b
...
...
@@ -1189,129 +1189,66 @@ def aten___contains__(mapper, graph, node):
def
aten_constant_pad_nd
(
mapper
,
graph
,
node
):
"""
构造填充固定值的PaddleLayer。
TorchScript
示例
:
"""
TorchScript
Code
:
%58 : Tensor = aten::constant_pad_nd(%input1.24, %4876, %42)
参数含义
:
%58 (Tensor):
输出,填充后的Tensor。
%input1.24 (Tensor):
需要填充的Tensor。
%4876 (list):
填充大小。
%42 (-):
填充值。
Parameter meaning
:
%58 (Tensor):
Output Tensor
%input1.24 (Tensor):
Input Tensor
%4876 (list):
pad
%42 (-):
value
"""
scope_name
=
mapper
.
normalize_scope_name
(
node
)
op_name
=
name_generator
(
"pad"
,
mapper
.
nn_name2id
)
output_name
=
mapper
.
_get_outputs_name
(
node
)[
0
]
layer_outputs
=
[
op_name
,
output_name
]
layer_inputs
=
{}
layer_attrs
=
{}
inputs_name
,
inputs_node
=
mapper
.
_get_inputs_name
(
node
)
#
获取当前节点输出的
list
#
Output
list
current_outputs
=
[
output_name
]
#
处理输入0,即%input1.24
#
process Input Tensor
mapper
.
_check_input
(
graph
,
inputs_node
[
0
],
inputs_name
[
0
],
current_outputs
,
scope_name
)
layer_inputs
[
"input"
]
=
inputs_name
[
0
]
# 处理输入1,即%4876
is_padding_tensor
=
False
# process pad
padding_attr
=
None
if
inputs_name
[
1
]
in
mapper
.
attrs
:
layer_attrs
[
"padding"
]
=
mapper
.
attrs
[
inputs_name
[
1
]]
padding_attr
=
mapper
.
attrs
[
inputs_name
[
1
]]
else
:
mapper
.
_check_input
(
graph
,
inputs_node
[
1
],
inputs_name
[
1
],
current_outputs
,
scope_name
)
layer_inputs
[
"pad"
]
=
inputs_name
[
1
]
is_padding_tensor
=
True
# 获取当前节点输入的list
current_inputs
=
list
(
layer_inputs
.
values
())
# 处理输入2,即%42
# process value
layer_attrs
[
"value"
]
=
mapper
.
attrs
[
inputs_name
[
2
]]
if
not
is_padding_tensor
:
graph
.
add_layer
(
"prim.shape"
,
inputs
=
{
"input"
:
inputs_name
[
0
]},
outputs
=
[
inputs_name
[
0
]
+
"_shape"
],
scope_name
=
scope_name
)
graph
.
add_layer
(
"prim.len"
,
inputs
=
{
"input"
:
inputs_name
[
0
]
+
"_shape"
},
outputs
=
[
inputs_name
[
0
]
+
"_len"
],
scope_name
=
scope_name
)
def
add_pad_layers
(
kernel
,
dim
):
graph
.
add_layer
(
"prim.ne"
,
inputs
=
{
"x"
:
inputs_name
[
0
]
+
"_len"
},
outputs
=
[
inputs_name
[
0
]
+
"_cond"
],
scope_name
=
scope_name
,
y
=
dim
)
if
padding_attr
is
not
None
:
layer_inputs
[
"x"
]
=
inputs_name
[
0
]
kernel_name
=
"paddle.nn.functional.pad"
if
len
(
padding_attr
)
==
2
:
layer_attrs
[
"pad"
]
=
[
0
,
0
,
0
,
0
,
0
,
0
]
+
padding_attr
elif
len
(
padding_attr
)
==
4
:
layer_inputs
[
"x"
]
=
inputs_name
[
0
]
layer_attrs
[
"pad"
]
=
[
0
,
0
,
0
,
0
]
+
padding_attr
elif
len
(
padding_attr
)
==
6
:
layer_inputs
[
"x"
]
=
inputs_name
[
0
]
layer_attrs
[
"pad"
]
=
[
0
,
0
]
+
padding_attr
else
:
layer_inputs
[
"x"
]
=
inputs_name
[
0
]
layer_attrs
[
"pad"
]
=
padding_attr
graph
.
add_layer
(
"prim.if"
,
{
'input'
:
inputs_name
[
0
]
+
"_cond"
},
outputs
=
[
inputs_name
[
0
]
+
"_if"
,
output_name
],
scope_name
=
scope_name
)
if_layer
=
graph
.
layers
[
list
(
graph
.
layers
.
keys
())[
-
1
]]
block
=
PaddleGraph
(
source_type
=
"pytorch"
,
parent_layer
=
if_layer
)
block
.
add_layer
(
"prim.sub"
,
inputs
=
{
"y"
:
inputs_name
[
0
]
+
"_len"
},
outputs
=
[
inputs_name
[
0
]
+
"_len0"
],
scope_name
=
scope_name
,
alpha
=
1.0
,
x
=
dim
)
block
.
add_layer
(
"prim.len2list"
,
inputs
=
{
"len"
:
inputs_name
[
0
]
+
"_len0"
},
outputs
=
[
inputs_name
[
0
]
+
"_list"
],
scope_name
=
scope_name
)
block
.
add_layer
(
"paddle.unsqueeze"
,
inputs
=
{
"x"
:
inputs_name
[
0
],
"axis"
:
inputs_name
[
0
]
+
"_list"
},
outputs
=
[
inputs_name
[
0
]
+
"_var"
],
scope_name
=
scope_name
)
block
.
add_layer
(
kernel
,
inputs
=
{
"input"
:
inputs_name
[
0
]
+
"_var"
},
outputs
=
copy
.
deepcopy
(
layer_outputs
),
scope_name
=
scope_name
,
**
layer_attrs
)
block
.
add_layer
(
"paddle.squeeze"
,
inputs
=
{
"x"
:
output_name
,
"axis"
:
inputs_name
[
0
]
+
"_list"
},
outputs
=
[
output_name
],
scope_name
=
scope_name
)
if_layer
.
add_block
(
block
)
block
=
PaddleGraph
(
source_type
=
"pytorch"
,
parent_layer
=
if_layer
)
layer_inputs
[
"input"
]
=
inputs_name
[
0
]
block
.
add_layer
(
kernel
,
kernel_name
,
inputs
=
layer_inputs
,
outputs
=
layer_outputs
,
outputs
=
[
output_name
]
,
scope_name
=
scope_name
,
**
layer_attrs
)
if_layer
.
add_block
(
block
)
if_layer
.
inputs
[
"input-0"
]
=
inputs_name
[
0
]
if_layer
.
inputs
[
"input-1"
]
=
inputs_name
[
0
]
+
"_len"
if
not
is_padding_tensor
:
if
len
(
layer_attrs
[
"padding"
])
==
2
:
layer_outputs
[
0
]
=
layer_outputs
[
0
].
replace
(
"pad"
,
"pad1d"
)
add_pad_layers
(
"paddle.nn.Pad1D"
,
3
)
elif
len
(
layer_attrs
[
"padding"
])
==
4
:
layer_outputs
[
0
]
=
layer_outputs
[
0
].
replace
(
"pad"
,
"pad2d"
)
add_pad_layers
(
"paddle.nn.Pad2D"
,
4
)
elif
len
(
layer_attrs
[
"padding"
])
==
6
:
layer_outputs
[
0
]
=
layer_outputs
[
0
].
replace
(
"pad"
,
"pad3d"
)
add_pad_layers
(
"paddle.nn.Pad3D"
,
5
)
else
:
raise
Exception
(
"The lenght of padding list must be 2, 4 or 6!"
)
else
:
layer_inputs
[
"input"
]
=
inputs_name
[
0
]
graph
.
add_layer
(
"custom_layer:Pad"
,
inputs
=
layer_inputs
,
outputs
=
[
output_name
],
scope_name
=
scope_name
,
**
layer_attrs
)
current_inputs
=
list
(
layer_inputs
.
values
())
return
current_inputs
,
current_outputs
...
...
@@ -6065,6 +6002,66 @@ def aten_upsample_bilinear2d(mapper, graph, node):
return
current_inputs
,
current_outputs
def
aten_upsample_trilinear3d
(
mapper
,
graph
,
node
):
"""
TorchScript Code:
%4997 : Tensor = aten::upsample_trilinear3d(%x.13, %4963, %5421, %4995)
Parameter meaning:
%4997 (Tensor): Output Tensor
%x.13 (Tensor): Input Tensor
%4963 (list): output_size
%5421 (bool): align_corners
%4995 (float): scale_factors
"""
scope_name
=
mapper
.
normalize_scope_name
(
node
)
output_name
=
mapper
.
_get_outputs_name
(
node
)[
0
]
layer_outputs
=
[
output_name
]
layer_inputs
=
{}
layer_attrs
=
{}
inputs_name
,
inputs_node
=
mapper
.
_get_inputs_name
(
node
)
# Output list
current_outputs
=
[
output_name
]
# process Input Tensor
mapper
.
_check_input
(
graph
,
inputs_node
[
0
],
inputs_name
[
0
],
current_outputs
,
scope_name
)
layer_inputs
[
"x"
]
=
inputs_name
[
0
]
current_inputs
=
list
(
layer_inputs
.
values
())
# process output_size
if
inputs_name
[
1
]
in
mapper
.
attrs
:
layer_attrs
[
"size"
]
=
mapper
.
attrs
[
inputs_name
[
1
]]
else
:
mapper
.
_check_input
(
graph
,
inputs_node
[
1
],
inputs_name
[
1
],
current_outputs
,
scope_name
)
layer_inputs
[
"size"
]
=
inputs_name
[
1
]
current_inputs
.
append
(
inputs_name
[
1
])
# process align_corners
if
inputs_name
[
2
]
in
mapper
.
attrs
:
layer_attrs
[
"align_corners"
]
=
mapper
.
attrs
[
inputs_name
[
2
]]
else
:
mapper
.
_check_input
(
graph
,
inputs_node
[
2
],
inputs_name
[
2
],
current_outputs
,
scope_name
)
layer_inputs
[
"align_corners"
]
=
inputs_name
[
2
]
current_inputs
.
append
(
inputs_name
[
2
])
# process scale_factor
if
inputs_name
[
3
]
in
mapper
.
attrs
:
layer_attrs
[
"scale_factor"
]
=
mapper
.
attrs
[
inputs_name
[
3
]]
else
:
mapper
.
_check_input
(
graph
,
inputs_node
[
3
],
inputs_name
[
3
],
current_outputs
,
scope_name
)
layer_inputs
[
"scale_factor"
]
=
inputs_name
[
3
]
current_inputs
.
append
(
inputs_name
[
3
])
layer_attrs
[
"align_mode"
]
=
0
layer_attrs
[
"mode"
]
=
string
(
"trilinear"
)
layer_attrs
[
"data_format"
]
=
string
(
"NCDHW"
)
graph
.
add_layer
(
"paddle.nn.functional.interpolate"
,
inputs
=
layer_inputs
,
outputs
=
layer_outputs
,
scope_name
=
scope_name
,
**
layer_attrs
)
return
current_inputs
,
current_outputs
def
aten_upsample_nearest2d
(
mapper
,
graph
,
node
):
""" 构造使用nearest上采样的PaddleLayer。
TorchScript示例:
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录