Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
PaddlePaddle
X2Paddle
提交
35fe7f49
X
X2Paddle
项目概览
PaddlePaddle
/
X2Paddle
大约 1 年 前同步成功
通知
328
Star
698
Fork
167
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
26
列表
看板
标记
里程碑
合并请求
4
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
X
X2Paddle
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
26
Issue
26
列表
看板
标记
里程碑
合并请求
4
合并请求
4
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
未验证
提交
35fe7f49
编写于
4月 05, 2021
作者:
J
Jason
提交者:
GitHub
4月 05, 2021
浏览文件
操作
浏览文件
下载
差异文件
Merge pull request #524 from SunAhong1993/develop
fix the parameter path string
上级
5fc5a8ed
90a29f43
变更
9
隐藏空白更改
内联
并排
Showing
9 changed file
with
165 addition
and
16 deletion
+165
-16
x2paddle/core/program.py
x2paddle/core/program.py
+2
-2
x2paddle/decoder/pytorch_decoder.py
x2paddle/decoder/pytorch_decoder.py
+1
-2
x2paddle/op_mapper/dygraph/onnx2paddle/opset9/opset.py
x2paddle/op_mapper/dygraph/onnx2paddle/opset9/opset.py
+47
-0
x2paddle/op_mapper/dygraph/pytorch2paddle/aten.py
x2paddle/op_mapper/dygraph/pytorch2paddle/aten.py
+10
-6
x2paddle/op_mapper/dygraph/tf2paddle/tf_op_mapper.py
x2paddle/op_mapper/dygraph/tf2paddle/tf_op_mapper.py
+18
-0
x2paddle/op_mapper/static/onnx2paddle/opset9/opset.py
x2paddle/op_mapper/static/onnx2paddle/opset9/opset.py
+47
-0
x2paddle/op_mapper/static/tf2paddle/tf_op_mapper.py
x2paddle/op_mapper/static/tf2paddle/tf_op_mapper.py
+19
-0
x2paddle/optimizer/pytorch_code_optimizer/hierachical_tree.py
...ddle/optimizer/pytorch_code_optimizer/hierachical_tree.py
+19
-4
x2paddle/optimizer/pytorch_code_optimizer/module_graph.py
x2paddle/optimizer/pytorch_code_optimizer/module_graph.py
+2
-2
未找到文件。
x2paddle/core/program.py
浏览文件 @
35fe7f49
...
...
@@ -510,7 +510,7 @@ class PaddleGraph(object):
comment_list
=
list
()
comment_list
.
append
(
"# There are {} inputs."
.
format
(
len
(
self
.
inputs_info
)))
for
k
,
v
in
self
.
inputs_info
.
items
():
comment_list
.
append
(
"# {}: shape-{}
,type-{}。
"
.
format
(
k
,
v
[
0
],
v
[
1
]))
comment_list
.
append
(
"# {}: shape-{}
, type-{}.
"
.
format
(
k
,
v
[
0
],
v
[
1
]))
self
.
run_func
.
extend
(
gen_codes
(
comment_list
,
...
...
@@ -518,7 +518,7 @@ class PaddleGraph(object):
use_structured_name
=
False
if
self
.
source_type
in
[
"tf"
]
else
True
self
.
run_func
.
extend
(
gen_codes
([
"paddle.disable_static()"
,
"params = paddle.load('{}
/model.pdparams')"
.
format
(
osp
.
abspath
(
code_dir
)),
"params = paddle.load('{}
')"
.
format
(
osp
.
join
(
osp
.
abspath
(
code_dir
),
"model.pdparams"
)),
"model = {}()"
.
format
(
self
.
name
),
"model.set_dict(params, use_structured_name={})"
.
format
(
use_structured_name
),
"model.eval()"
,
...
...
x2paddle/decoder/pytorch_decoder.py
浏览文件 @
35fe7f49
...
...
@@ -64,6 +64,5 @@ class TraceDecoder(Decoder):
print
(
e
)
exit
(
0
)
self
.
graph
=
self
.
_optimize_graph
(
self
.
script
.
inlined_graph
)
self
.
input_examples
=
input_examples
self
.
input_examples
=
input_examples
x2paddle/op_mapper/dygraph/onnx2paddle/opset9/opset.py
浏览文件 @
35fe7f49
...
...
@@ -2043,3 +2043,50 @@ class OpSet9():
inputs
=
{
"x"
:
val_x
.
name
},
outputs
=
layer_outputs
,
**
layer_attrs
)
@
print_mapping_info
def
DepthToSpace
(
self
,
node
):
val_x
=
self
.
graph
.
get_input_node
(
node
,
idx
=
0
,
copy
=
True
)
blocksize
=
node
.
get_attr
(
'blocksize'
)
mode
=
node
.
get_attr
(
'mode'
,
"DCR"
)
val_x_shape
=
val_x
.
out_shapes
[
0
]
b
,
c
,
h
,
w
=
val_x_shape
if
mode
==
"DCR"
:
self
.
paddle_graph
.
add_layer
(
'paddle.reshape'
,
inputs
=
{
"x"
:
val_x
.
name
},
outputs
=
[
node
.
name
],
shape
=
[
b
,
blocksize
,
blocksize
,
c
//
(
blocksize
**
2
),
h
,
w
]
)
self
.
paddle_graph
.
add_layer
(
'paddle.transpose'
,
inputs
=
{
"x"
:
node
.
name
},
outputs
=
[
node
.
name
],
perm
=
[
0
,
3
,
4
,
1
,
5
,
2
]
)
self
.
paddle_graph
.
add_layer
(
'paddle.reshape'
,
inputs
=
{
"x"
:
node
.
name
},
outputs
=
[
node
.
name
],
shape
=
[
b
,
c
//
(
blocksize
**
2
),
h
*
blocksize
,
w
*
blocksize
]
)
else
:
self
.
paddle_graph
.
add_layer
(
'paddle.reshape'
,
inputs
=
{
"x"
:
val_x
.
name
},
outputs
=
[
node
.
name
],
shape
=
[
b
,
c
//
(
blocksize
**
2
),
blocksize
,
blocksize
,
h
,
w
]
)
self
.
paddle_graph
.
add_layer
(
'paddle.transpose'
,
inputs
=
{
"x"
:
node
.
name
},
outputs
=
[
node
.
name
],
perm
=
[
0
,
1
,
4
,
2
,
5
,
3
]
)
self
.
paddle_graph
.
add_layer
(
'paddle.reshape'
,
inputs
=
{
"x"
:
node
.
name
},
outputs
=
[
node
.
name
],
shape
=
[
b
,
c
//
(
blocksize
**
2
),
h
*
blocksize
,
w
*
blocksize
]
)
\ No newline at end of file
x2paddle/op_mapper/dygraph/pytorch2paddle/aten.py
浏览文件 @
35fe7f49
...
...
@@ -2174,12 +2174,16 @@ def aten_hardtanh_(mapper, graph, node):
# 处理输入2,即%66
layer_attrs
[
"max"
]
=
mapper
.
attrs
[
inputs_name
[
2
]]
graph
.
add_layer
(
'paddle.nn.Hardtanh'
,
inputs
=
layer_inputs
,
outputs
=
layer_outputs
,
scope_name
=
scope_name
,
**
layer_attrs
)
if
layer_attrs
[
"min"
]
==
0
and
layer_attrs
[
"max"
]
==
6
:
graph
.
add_layer
(
"paddle.nn.ReLU6"
,
inputs
=
layer_inputs
,
outputs
=
layer_outputs
,
scope_name
=
scope_name
)
else
:
graph
.
add_layer
(
'paddle.nn.Hardtanh'
,
inputs
=
layer_inputs
,
outputs
=
layer_outputs
,
scope_name
=
scope_name
,
**
layer_attrs
)
return
current_inputs
,
current_outputs
...
...
x2paddle/op_mapper/dygraph/tf2paddle/tf_op_mapper.py
浏览文件 @
35fe7f49
...
...
@@ -1526,3 +1526,21 @@ class TFOpMapper(OpMapper):
inputs
=
inputs
,
outputs
=
[
node
.
name
],
**
attr
)
def
ReverseV2
(
self
,
node
):
x
=
self
.
graph
.
get_input_node
(
node
,
0
)
axis
=
self
.
graph
.
get_input_node
(
node
,
1
)
inputs
=
{
"x"
:
x
.
name
}
attr
=
dict
()
if
axis
.
layer_type
==
'Const'
:
axis
=
axis
.
value
.
tolist
()
if
not
isinstance
(
axis
,
list
):
axis
=
[
axis
]
attr
[
'axis'
]
=
axis
else
:
inputs
[
'axis'
]
=
axis
.
name
self
.
paddle_graph
.
add_layer
(
"paddle.flip"
,
inputs
=
inputs
,
outputs
=
[
node
.
name
],
**
attr
)
x2paddle/op_mapper/static/onnx2paddle/opset9/opset.py
浏览文件 @
35fe7f49
...
...
@@ -1810,3 +1810,50 @@ class OpSet9():
inputs
=
{
"x"
:
val_x
.
name
},
outputs
=
[
node
.
name
],
**
layer_attrs
)
@
print_mapping_info
def
DepthToSpace
(
self
,
node
):
val_x
=
self
.
graph
.
get_input_node
(
node
,
idx
=
0
,
copy
=
True
)
blocksize
=
node
.
get_attr
(
'blocksize'
)
mode
=
node
.
get_attr
(
'mode'
,
"DCR"
)
val_x_shape
=
val_x
.
out_shapes
[
0
]
b
,
c
,
h
,
w
=
val_x_shape
if
mode
==
"DCR"
:
self
.
paddle_graph
.
add_layer
(
'paddle.reshape'
,
inputs
=
{
"x"
:
val_x
.
name
},
outputs
=
[
node
.
name
],
shape
=
[
b
,
blocksize
,
blocksize
,
c
//
(
blocksize
**
2
),
h
,
w
]
)
self
.
paddle_graph
.
add_layer
(
'paddle.transpose'
,
inputs
=
{
"x"
:
node
.
name
},
outputs
=
[
node
.
name
],
perm
=
[
0
,
3
,
4
,
1
,
5
,
2
]
)
self
.
paddle_graph
.
add_layer
(
'paddle.reshape'
,
inputs
=
{
"x"
:
node
.
name
},
outputs
=
[
node
.
name
],
shape
=
[
b
,
c
//
(
blocksize
**
2
),
h
*
blocksize
,
w
*
blocksize
]
)
else
:
self
.
paddle_graph
.
add_layer
(
'paddle.reshape'
,
inputs
=
{
"x"
:
val_x
.
name
},
outputs
=
[
node
.
name
],
shape
=
[
b
,
c
//
(
blocksize
**
2
),
blocksize
,
blocksize
,
h
,
w
]
)
self
.
paddle_graph
.
add_layer
(
'paddle.transpose'
,
inputs
=
{
"x"
:
node
.
name
},
outputs
=
[
node
.
name
],
perm
=
[
0
,
1
,
4
,
2
,
5
,
3
]
)
self
.
paddle_graph
.
add_layer
(
'paddle.reshape'
,
inputs
=
{
"x"
:
node
.
name
},
outputs
=
[
node
.
name
],
shape
=
[
b
,
c
//
(
blocksize
**
2
),
h
*
blocksize
,
w
*
blocksize
]
)
\ No newline at end of file
x2paddle/op_mapper/static/tf2paddle/tf_op_mapper.py
浏览文件 @
35fe7f49
...
...
@@ -1509,3 +1509,22 @@ class TFOpMapper(OpMapper):
inputs
=
inputs
,
outputs
=
[
node
.
name
],
**
attr
)
def
ReverseV2
(
self
,
node
):
x
=
self
.
graph
.
get_input_node
(
node
,
0
)
axis
=
self
.
graph
.
get_input_node
(
node
,
1
)
inputs
=
{
"x"
:
x
.
name
}
attr
=
dict
()
if
axis
.
layer_type
==
'Const'
:
axis
=
axis
.
value
.
tolist
()
if
not
isinstance
(
axis
,
list
):
axis
=
[
axis
]
attr
[
'axis'
]
=
axis
else
:
inputs
[
'axis'
]
=
axis
.
name
self
.
paddle_graph
.
add_layer
(
"paddle.flip"
,
inputs
=
inputs
,
outputs
=
[
node
.
name
],
**
attr
)
x2paddle/optimizer/pytorch_code_optimizer/hierachical_tree.py
浏览文件 @
35fe7f49
...
...
@@ -336,8 +336,23 @@ class HierarchicalTree(Tree):
else
:
module_name
=
module
.
_get_name
()
if
module_name
in
module_name2sub_layers
:
module_name2sub_layers
[
module_name
].
append
(
sub_layers
)
module_name2sub_identifiers
[
module_name
].
append
(
sub_identifiers
)
if
len
(
sub_layers
[
list
(
sub_layers
.
keys
())[
-
1
]].
outputs
)
!=
\
len
(
module_name2sub_layers
[
module_name
][
0
][
list
(
module_name2sub_layers
[
module_name
][
0
].
keys
())[
-
1
]].
outputs
):
while
module_name
in
module_name2sub_layers
:
module_name
=
module_name
+
"__tmp"
if
module_name
in
module_name2sub_layers
and
\
len
(
sub_layers
[
list
(
sub_layers
.
keys
())[
-
1
]].
outputs
)
==
\
len
(
module_name2sub_layers
[
module_name
][
0
][
list
(
module_name2sub_layers
[
module_name
][
0
].
keys
())[
-
1
]].
outputs
):
break
if
module_name
not
in
module_name2sub_layers
:
module_name2sub_layers
[
module_name
]
=
[
sub_layers
]
module_name2sub_identifiers
[
module_name
]
=
[
sub_identifiers
]
else
:
module_name2sub_layers
[
module_name
].
append
(
sub_layers
)
module_name2sub_identifiers
[
module_name
].
append
(
sub_identifiers
)
else
:
module_name2sub_layers
[
module_name
].
append
(
sub_layers
)
module_name2sub_identifiers
[
module_name
].
append
(
sub_identifiers
)
else
:
module_name2sub_layers
[
module_name
]
=
[
sub_layers
]
module_name2sub_identifiers
[
module_name
]
=
[
sub_identifiers
]
...
...
@@ -385,10 +400,10 @@ class HierarchicalTree(Tree):
run_func_list
.
append
(
"def main({}):"
.
format
(
input_data_name
))
run_func_list
.
append
(
" # There are {} inputs."
.
format
(
len
(
self
.
pd_graph
.
inputs_info
)))
for
k
,
v
in
self
.
pd_graph
.
inputs_info
.
items
():
run_func_list
.
append
(
" # {}: shape-{}
,type-{}。
"
.
format
(
k
,
v
[
0
],
v
[
1
]))
run_func_list
.
append
(
" # {}: shape-{}
, type-{}.
"
.
format
(
k
,
v
[
0
],
v
[
1
]))
run_func_list
.
extend
(
[
" paddle.disable_static()"
,
" params = paddle.load('{}
/model.pdparams')"
.
format
(
osp
.
abspath
(
save_dir
)),
" params = paddle.load('{}
')"
.
format
(
osp
.
join
(
osp
.
abspath
(
save_dir
),
"model.pdparams"
)),
" model = {}()"
.
format
(
self
.
pd_graph
.
name
),
" model.set_dict(params)"
,
" model.eval()"
,
...
...
x2paddle/optimizer/pytorch_code_optimizer/module_graph.py
浏览文件 @
35fe7f49
...
...
@@ -351,10 +351,10 @@ class ModuleGraph(object):
run_func_list
.
append
(
"def main({}):"
.
format
(
input_data_name
))
run_func_list
.
append
(
" # There are {} inputs."
.
format
(
len
(
self
.
pd_graph
.
inputs_info
)))
for
k
,
v
in
self
.
pd_graph
.
inputs_info
.
items
():
run_func_list
.
append
(
" # {}: shape-{}
,
type-{}."
.
format
(
k
,
v
[
0
],
v
[
1
]))
run_func_list
.
append
(
" # {}: shape-{}
,
type-{}."
.
format
(
k
,
v
[
0
],
v
[
1
]))
run_func_list
.
extend
(
[
" paddle.disable_static()"
,
" params = paddle.load('{}
/model.pdparams')"
.
format
(
osp
.
abspath
(
save_dir
)),
" params = paddle.load('{}
')"
.
format
(
osp
.
join
(
osp
.
abspath
(
save_dir
),
"model.pdparams"
)),
" model = {}()"
.
format
(
self
.
pd_graph
.
name
),
" model.set_dict(params)"
,
" model.eval()"
,
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录