Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
PaddlePaddle
X2Paddle
提交
573b7b14
X
X2Paddle
项目概览
PaddlePaddle
/
X2Paddle
大约 1 年 前同步成功
通知
328
Star
698
Fork
167
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
26
列表
看板
标记
里程碑
合并请求
4
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
X
X2Paddle
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
26
Issue
26
列表
看板
标记
里程碑
合并请求
4
合并请求
4
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
提交
573b7b14
编写于
11月 30, 2021
作者:
W
wjj19950828
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
add aten::format and remove to_tensor
上级
b3e5fdc7
变更
5
隐藏空白更改
内联
并排
Showing
5 changed file
with
76 addition
and
8 deletion
+76
-8
x2paddle/convert.py
x2paddle/convert.py
+8
-1
x2paddle/core/program.py
x2paddle/core/program.py
+3
-3
x2paddle/op_mapper/pytorch2paddle/aten.py
x2paddle/op_mapper/pytorch2paddle/aten.py
+32
-0
x2paddle/op_mapper/pytorch2paddle/prim2code.py
x2paddle/op_mapper/pytorch2paddle/prim2code.py
+25
-4
x2paddle/optimizer/pytorch_code_optimizer/layer_code_generator.py
.../optimizer/pytorch_code_optimizer/layer_code_generator.py
+8
-0
未找到文件。
x2paddle/convert.py
浏览文件 @
573b7b14
...
...
@@ -87,6 +87,11 @@ def arg_parser():
type
=
_text_type
,
default
=
None
,
help
=
"pretrain model file of pytorch model"
)
parser
.
add_argument
(
"--code_optimizer"
,
"-co"
,
default
=
True
,
help
=
"Turn on code optimization"
)
parser
.
add_argument
(
"--to_lite"
,
"-tl"
,
default
=
False
,
help
=
"convert to Paddle-Lite format"
)
parser
.
add_argument
(
...
...
@@ -220,6 +225,7 @@ def pytorch2paddle(module,
save_dir
,
jit_type
=
"trace"
,
input_examples
=
None
,
code_optimizer
=
True
,
convert_to_lite
=
False
,
lite_valid_places
=
"arm"
,
lite_model_type
=
"naive_buffer"
):
...
...
@@ -253,7 +259,8 @@ def pytorch2paddle(module,
graph_opt
=
GraphOptimizer
(
source_frame
=
"pytorch"
,
jit_type
=
jit_type
)
graph_opt
.
optimize
(
mapper
.
paddle_graph
)
print
(
"Model optimized."
)
mapper
.
paddle_graph
.
gen_model
(
save_dir
,
jit_type
=
jit_type
)
mapper
.
paddle_graph
.
gen_model
(
save_dir
,
jit_type
=
jit_type
,
code_optimizer
=
code_optimizer
)
if
convert_to_lite
:
convert2lite
(
save_dir
,
lite_valid_places
,
lite_model_type
)
...
...
x2paddle/core/program.py
浏览文件 @
573b7b14
...
...
@@ -237,11 +237,11 @@ class PaddleGraph(object):
return
update
(
self
.
layers
)
def
gen_model
(
self
,
save_dir
,
jit_type
=
None
):
def
gen_model
(
self
,
save_dir
,
jit_type
=
None
,
code_optimizer
=
True
):
if
not
osp
.
exists
(
save_dir
):
os
.
makedirs
(
save_dir
)
if
jit_type
==
"trace"
:
if
not
self
.
has_unpack
:
if
not
self
.
has_unpack
and
code_optimizer
:
from
x2paddle.optimizer.pytorch_code_optimizer
import
HierarchicalTree
hierarchical_tree
=
HierarchicalTree
(
self
)
for
layer_id
,
layer
in
self
.
layers
.
items
():
...
...
@@ -252,7 +252,7 @@ class PaddleGraph(object):
self
.
gen_code
(
save_dir
)
self
.
dump_parameter
(
save_dir
)
else
:
if
self
.
source_type
==
"pytorch"
:
if
self
.
source_type
==
"pytorch"
and
code_optimizer
:
from
x2paddle.optimizer.pytorch_code_optimizer
import
ModuleGraph
module_graph
=
ModuleGraph
(
self
)
module_graph
.
save_source_files
(
save_dir
)
...
...
x2paddle/op_mapper/pytorch2paddle/aten.py
浏览文件 @
573b7b14
...
...
@@ -2231,6 +2231,38 @@ def aten_floor_divide(mapper, graph, node):
return
current_inputs
,
current_outputs
def
aten_format
(
mapper
,
graph
,
node
):
""" 构造取浮点型的PaddleLayer。
TorchScript示例:
%628 : str = aten::format(%8, %627)
参数含义:
%628 (str): 输出,为一个字符串
%8 (str): 输入字符串
%627 (-): format后的参数
"""
scope_name
=
mapper
.
normalize_scope_name
(
node
)
output_name
=
mapper
.
_get_outputs_name
(
node
)[
0
]
layer_outputs
=
[
output_name
]
layer_inputs
=
{}
inputs_name
,
inputs_node
=
mapper
.
_get_inputs_name
(
node
)
# 获取当前节点输出的list
current_outputs
=
[
output_name
]
# 处理输入
for
i
in
range
(
len
(
inputs_node
)):
mapper
.
_check_input
(
graph
,
inputs_node
[
i
],
inputs_name
[
i
],
current_outputs
,
scope_name
)
layer_inputs
[
"input"
+
str
(
i
)]
=
inputs_name
[
i
]
# 获取当前节点输入的list
current_inputs
=
list
(
layer_inputs
.
values
())
graph
.
add_layer
(
"prim.format"
,
inputs
=
layer_inputs
,
outputs
=
layer_outputs
,
scope_name
=
scope_name
)
return
current_inputs
,
current_outputs
def
aten_full_like
(
mapper
,
graph
,
node
):
""" 构造创建一个与输入具有相同的形状并且数据类型固定的Tensor的PaddleLayer。
TorchScript示例:
...
...
x2paddle/op_mapper/pytorch2paddle/prim2code.py
浏览文件 @
573b7b14
...
...
@@ -304,6 +304,27 @@ def prim_floordiv(layer,
forward_func
.
extend
(
gen_codes
([
line
],
indent
=
indent
))
def
prim_format
(
layer
,
indent
=
1
,
init_func
=
[],
forward_func
=
[],
layer_id
=
None
,
different_attrs
=
None
):
line
=
""
if
len
(
layer
.
inputs
)
==
3
:
line
=
"{} = {}.format({}, {})"
.
format
(
layer
.
outputs
[
0
],
get_value
(
layer
,
"input0"
,
different_attrs
),
get_value
(
layer
,
"input1"
,
different_attrs
),
get_value
(
layer
,
"input2"
,
different_attrs
))
elif
len
(
layer
.
inputs
)
==
2
:
line
=
"{} = {}.format({})"
.
format
(
layer
.
outputs
[
0
],
get_value
(
layer
,
"input0"
,
different_attrs
),
get_value
(
layer
,
"input1"
,
different_attrs
))
forward_func
.
extend
(
gen_codes
([
line
],
indent
=
indent
))
def
prim_getitem
(
layer
,
indent
=
1
,
init_func
=
[],
...
...
@@ -609,8 +630,8 @@ def prim_or(layer,
if
is_return_line
:
return
line
.
split
(
" = "
)[
1
]
forward_func
.
extend
(
gen_codes
([
line
],
indent
=
indent
))
def
prim_remainder
(
layer
,
indent
=
1
,
init_func
=
[],
...
...
@@ -619,8 +640,8 @@ def prim_remainder(layer,
different_attrs
=
None
,
is_return_line
=
False
):
line
=
"{} = {} % {}"
.
format
(
layer
.
outputs
[
0
],
get_value
(
layer
,
"x"
,
different_attrs
),
get_value
(
layer
,
"y"
,
different_attrs
))
get_value
(
layer
,
"x"
,
different_attrs
),
get_value
(
layer
,
"y"
,
different_attrs
))
if
is_return_line
:
return
line
.
split
(
" = "
)[
1
]
forward_func
.
extend
(
gen_codes
([
line
],
indent
=
indent
))
...
...
x2paddle/optimizer/pytorch_code_optimizer/layer_code_generator.py
浏览文件 @
573b7b14
...
...
@@ -257,6 +257,14 @@ def gen_layer_code(graph, sub_layers, sub_layers_name, different_attrs=dict()):
if
is_set_item
:
outputs
.
append
(
layer
.
outputs
[
0
])
no_output_count
=
0
# remove to_tensor layer
invalid_list
=
list
()
for
layer_id
,
layer
in
sub_layers
.
items
():
if
layer
.
kernel
==
"paddle.to_tensor"
:
invalid_list
.
append
(
layer_id
)
break
for
layer_id
in
invalid_list
:
sub_layers
.
pop
(
layer_id
)
for
i
,
(
layer_id
,
layer
)
in
enumerate
(
sub_layers
.
items
()):
_update_attrs
(
layer
,
different_attrs
)
if
(
"paddle.nn"
in
layer
.
kernel
and
"functional"
not
in
layer
.
kernel
)
or
\
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录