Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
PaddlePaddle
X2Paddle
提交
a02ad500
X
X2Paddle
项目概览
PaddlePaddle
/
X2Paddle
大约 1 年 前同步成功
通知
328
Star
698
Fork
167
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
26
列表
看板
标记
里程碑
合并请求
4
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
X
X2Paddle
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
26
Issue
26
列表
看板
标记
里程碑
合并请求
4
合并请求
4
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
未验证
提交
a02ad500
编写于
1月 06, 2021
作者:
S
SunAhong1993
提交者:
GitHub
1月 06, 2021
浏览文件
操作
浏览文件
下载
差异文件
Merge pull request #22 from PaddlePaddle/develop
Merge
上级
d6e28406
65b3b7cb
变更
5
隐藏空白更改
内联
并排
Showing
5 changed file
with
77 addition
and
5 deletion
+77
-5
x2paddle/decoder/tf_decoder.py
x2paddle/decoder/tf_decoder.py
+2
-0
x2paddle/op_mapper/dygraph/pytorch2paddle/aten.py
x2paddle/op_mapper/dygraph/pytorch2paddle/aten.py
+66
-0
x2paddle/op_mapper/dygraph/tf2paddle/tf_op_mapper.py
x2paddle/op_mapper/dygraph/tf2paddle/tf_op_mapper.py
+3
-3
x2paddle/op_mapper/static/tf2paddle/tf_op_mapper.py
x2paddle/op_mapper/static/tf2paddle/tf_op_mapper.py
+2
-2
x2paddle/optimizer/pytorch_code_optimizer/hierachical_tree.py
...ddle/optimizer/pytorch_code_optimizer/hierachical_tree.py
+4
-0
未找到文件。
x2paddle/decoder/tf_decoder.py
浏览文件 @
a02ad500
...
...
@@ -361,6 +361,8 @@ class TFDecoder(object):
continue
graph_node
=
TFGraphNode
(
layer
)
dtype
=
graph_node
.
layer
.
attr
[
'dtype'
].
type
if
dtype
==
10
:
continue
need_define_shape
=
0
if
self
.
define_input_shape
:
...
...
x2paddle/op_mapper/dygraph/pytorch2paddle/aten.py
浏览文件 @
a02ad500
...
...
@@ -4537,6 +4537,72 @@ def aten_upsample_bilinear2d(mapper, graph, node):
**
layer_attrs
)
return
current_inputs
,
current_outputs
def
aten_upsample_nearest2d
(
mapper
,
graph
,
node
):
""" 构造使用nearest上采样的PaddleLayer。
TorchScript示例:
%4997 : Tensor = aten::upsample_nearest2d(%x.13, %4963, %5421, %4995)
参数含义:
%4997 (Tensor): 输出,上采样后的Tensor。
%x.13 (Tensor): 需要上采样的Tensor。
%4963 (list): 上采样后的大小。
%4995 (float): 高度的乘数因子。
%4995 (float): 宽度的乘数因子。
"""
scope_name
=
mapper
.
normalize_scope_name
(
node
)
output_name
=
mapper
.
_get_outputs_name
(
node
)[
0
]
layer_outputs
=
[
output_name
]
layer_inputs
=
{}
layer_attrs
=
{}
inputs_name
,
inputs_node
=
mapper
.
_get_inputs_name
(
node
)
# 获取当前节点输出的list
current_outputs
=
[
output_name
]
# 处理输入0,即%x.13
mapper
.
_check_input
(
graph
,
inputs_node
[
0
],
inputs_name
[
0
],
current_outputs
,
scope_name
)
layer_inputs
[
"x"
]
=
inputs_name
[
0
]
# 获取当前节点输入的list
current_inputs
=
list
(
layer_inputs
.
values
())
# 处理输入1,即%4963
if
inputs_name
[
1
]
in
mapper
.
attrs
:
layer_attrs
[
"size"
]
=
mapper
.
attrs
[
inputs_name
[
1
]]
else
:
mapper
.
_check_input
(
graph
,
inputs_node
[
1
],
inputs_name
[
1
],
current_outputs
,
scope_name
)
layer_inputs
[
"size"
]
=
inputs_name
[
1
]
current_inputs
.
append
(
inputs_name
[
1
])
graph
.
add_layer
(
"prim.isinstance"
,
inputs
=
{
"input"
:
inputs_name
[
1
]},
outputs
=
[
inputs_name
[
1
]
+
"_isinstance"
],
scope_name
=
scope_name
,
cls
=
"paddle.fluid.Variable"
)
# TODO(syf): paddle.Variable
graph
.
add_layer
(
"prim.if"
,
{
"input"
:
inputs_name
[
1
]
+
"_isinstance"
},
outputs
=
[
inputs_name
[
0
]
+
"_if1"
],
scope_name
=
scope_name
)
if_layer
=
graph
.
layers
[
list
(
graph
.
layers
.
keys
())[
-
1
]]
block
=
PaddleGraph
(
source_type
=
"pytorch"
,
parent_layer
=
if_layer
,
graph_type
=
"dygraph"
)
block
.
add_layer
(
"prim.var2list"
,
inputs
=
{
"input"
:
inputs_name
[
1
]},
outputs
=
[
inputs_name
[
1
]],
scope_name
=
scope_name
)
if_layer
.
add_block
(
block
)
block
=
PaddleGraph
(
source_type
=
"pytorch"
,
parent_layer
=
if_layer
,
graph_type
=
"dygraph"
)
if_layer
.
add_block
(
block
)
if_layer
.
inputs
[
"input-0"
]
=
inputs_name
[
1
]
layer_inputs
[
"scale_factor"
]
=
inputs_name
[
3
]
layer_attrs
[
"align_mode"
]
=
0
layer_attrs
[
"mode"
]
=
string
(
"nearest"
)
graph
.
add_layer
(
"paddle.nn.functional.interpolate"
,
inputs
=
layer_inputs
,
outputs
=
layer_outputs
,
scope_name
=
scope_name
,
**
layer_attrs
)
return
current_inputs
,
current_outputs
def
aten_values
(
mapper
,
graph
,
node
):
""" 构造对比大小的PaddleLayer。
...
...
x2paddle/op_mapper/dygraph/tf2paddle/tf_op_mapper.py
浏览文件 @
a02ad500
...
...
@@ -676,8 +676,8 @@ class TFOpMapper(OpMapper):
input
=
self
.
graph
.
get_input_node
(
node
,
0
)
paddings
=
self
.
graph
.
get_input_node
(
node
,
1
)
assert
paddings
.
layer_type
==
"Const"
,
"Padding should be Const"
paddings
=
np
.
flip
(
paddings
.
value
,
0
).
flatten
().
tolist
()
dim
=
int
(
len
(
paddings
)
/
2
)
new_paddings
=
numpy
.
flip
(
paddings
.
value
,
0
).
flatten
().
tolist
()
dim
=
int
(
len
(
new_
paddings
)
/
2
)
transpose_name
=
gen_name
(
"pad"
,
"transpose"
)
self
.
paddle_graph
.
add_layer
(
kernel
=
"paddle.transpose"
,
...
...
@@ -688,7 +688,7 @@ class TFOpMapper(OpMapper):
kernel
=
"paddle.nn.Pad{}D"
.
format
(
dim
),
inputs
=
{
"x"
:
transpose_name
},
outputs
=
layer_outputs
,
pad
=
new_padding
)
pad
=
new_padding
s
)
self
.
paddle_graph
.
add_layer
(
kernel
=
"paddle.transpose"
,
inputs
=
{
"x"
:
node
.
name
},
...
...
x2paddle/op_mapper/static/tf2paddle/tf_op_mapper.py
浏览文件 @
a02ad500
...
...
@@ -661,7 +661,7 @@ class TFOpMapper(OpMapper):
input
=
self
.
graph
.
get_input_node
(
node
,
0
)
paddings
=
self
.
graph
.
get_input_node
(
node
,
1
)
assert
paddings
.
layer_type
==
"Const"
,
"Padding should be Const"
paddings
=
np
.
flip
(
paddings
.
value
,
0
).
flatten
().
tolist
()
new_paddings
=
numpy
.
flip
(
paddings
.
value
,
0
).
flatten
().
tolist
()
transpose_name
=
gen_name
(
"pad"
,
"transpose"
)
self
.
paddle_graph
.
add_layer
(
kernel
=
"paddle.transpose"
,
...
...
@@ -672,7 +672,7 @@ class TFOpMapper(OpMapper):
kernel
=
"paddle.nn.functional.pad"
.
format
(
dim
),
inputs
=
{
"x"
:
transpose_name
},
outputs
=
[
node
.
name
],
pad
=
new_padding
)
pad
=
new_padding
s
)
self
.
paddle_graph
.
add_layer
(
kernel
=
"paddle.transpose"
,
inputs
=
{
"x"
:
node
.
name
},
...
...
x2paddle/optimizer/pytorch_code_optimizer/hierachical_tree.py
浏览文件 @
a02ad500
...
...
@@ -300,6 +300,7 @@ class HierarchicalTree(Tree):
"""
depths
=
sorted
(
list
(
self
.
_hierarchical_order
.
keys
()),
reverse
=
True
)
all_name_old2new
=
dict
()
current_module_name_list
=
list
()
for
depth
in
depths
[
1
:]:
# Module的名字与子图的对应关系
module_name2sub_layers
=
dict
()
...
...
@@ -352,6 +353,9 @@ class HierarchicalTree(Tree):
module_name
=
None
else
:
module_name
=
name
while
module_name
in
current_module_name_list
:
module_name
+=
"__0"
current_module_name_list
.
append
(
module_name
)
self
.
merge_node
(
module_name2sub_layers
[
name
],
sequentials2attrs_table
[
name
],
node_name2sub_layers
,
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录