Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
PaddlePaddle
X2Paddle
提交
b4da60c6
X
X2Paddle
项目概览
PaddlePaddle
/
X2Paddle
大约 1 年 前同步成功
通知
328
Star
698
Fork
167
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
26
列表
看板
标记
里程碑
合并请求
4
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
X
X2Paddle
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
26
Issue
26
列表
看板
标记
里程碑
合并请求
4
合并请求
4
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
提交
b4da60c6
编写于
5月 10, 2022
作者:
W
wjj19950828
浏览文件
操作
浏览文件
下载
差异文件
resolve conflict
上级
2c34d762
b39b9ccf
变更
12
隐藏空白更改
内联
并排
Showing
12 changed file
with
442 addition
and
128 deletion
+442
-128
README.md
README.md
+1
-0
docs/inference_model_convertor/op_list.md
docs/inference_model_convertor/op_list.md
+2
-2
requirements.txt
requirements.txt
+0
-4
setup.py
setup.py
+4
-0
x2paddle/__init__.py
x2paddle/__init__.py
+1
-1
x2paddle/convert.py
x2paddle/convert.py
+83
-7
x2paddle/op_mapper/pytorch2paddle/aten.py
x2paddle/op_mapper/pytorch2paddle/aten.py
+271
-103
x2paddle/optimizer/fusion/onnx_layernorm_fuser.py
x2paddle/optimizer/fusion/onnx_layernorm_fuser.py
+6
-6
x2paddle/optimizer/fusion/trace_fc_fuser.py
x2paddle/optimizer/fusion/trace_fc_fuser.py
+7
-4
x2paddle/optimizer/optimizer.py
x2paddle/optimizer/optimizer.py
+6
-1
x2paddle/optimizer/pattern_matcher.py
x2paddle/optimizer/pattern_matcher.py
+3
-0
x2paddle/utils.py
x2paddle/utils.py
+58
-0
未找到文件。
README.md
浏览文件 @
b4da60c6
...
...
@@ -121,6 +121,7 @@ x2paddle --framework=caffe --prototxt=deploy.prototxt --weight=deploy.caffemodel
| --to_lite | **[可选]** 是否使用opt工具转成Paddle-Lite支持格式,默认为False |
| --lite_valid_places | **[可选]** 指定转换类型,可以同时指定多个backend(以逗号分隔),opt将会自动选择最佳方式,默认为arm |
| --lite_model_type | **[可选]** 指定模型转化类型,目前支持两种类型:protobuf和naive_buffer,默认为naive_buffer |
| --disable_feedback | **[可选]** 是否关闭X2Paddle使用反馈;X2Paddle默认会统计用户在进行模型转换时的成功率,以及转换框架来源等信息,以便于帮忙X2Paddle根据用户需求进行迭代,不会上传用户的模型文件。如若不想参与反馈,可指定此参数为False即可 |
#### X2Paddle API
目前X2Paddle提供API方式转换模型,可参考[X2PaddleAPI](docs/inference_model_convertor/x2paddle_api.md)
...
...
docs/inference_model_convertor/op_list.md
浏览文件 @
b4da60c6
...
...
@@ -114,8 +114,8 @@ Aten:
| 117 | aten::bitwise
\_
not | 118 | aten::bitwise
\_
xor | 119 | aten::bitwise
\_
and | 120 | aten::silu |
| 121 | aten::repeat
\_
interleave | 122 | aten::maxpool1d | 123 | aten::frobenius
\_
norm | 124 | aten::format |
| 125 | aten::complex | 126 | aten::real | 127 | aten::imag | 128 | aten::fft
\_
rfftn |
| 129 | aten::fft
\_
irfftn |
| | | | |
|
| 129 | aten::fft
\_
irfftn |
130 | aten::hardsigmoid | 131 | aten::hardswish | 132 | aten::linear
|
| 133 | aten::rsqrt | 134 | aten::replication
\_
pad1d | 135 | aten::full | | |
Prim:
| 序号 | OP | 序号 | OP | 序号 | OP | 序号 | OP |
...
...
requirements.txt
浏览文件 @
b4da60c6
pre-commit
yapf
== 0.28.0
pandas
treelib
setup.py
浏览文件 @
b4da60c6
...
...
@@ -6,6 +6,9 @@ long_description += "Usage: x2paddle --framework tensorflow --model tf_model.pb
long_description
+=
"GitHub: https://github.com/PaddlePaddle/X2Paddle
\n
"
long_description
+=
"Email: dltp-sz@baidu.com"
with
open
(
"requirements.txt"
)
as
fin
:
REQUIRED_PACKAGES
=
fin
.
read
()
setuptools
.
setup
(
name
=
"x2paddle"
,
version
=
x2paddle
.
__version__
,
...
...
@@ -16,6 +19,7 @@ setuptools.setup(
long_description_content_type
=
"text/plain"
,
url
=
"https://github.com/PaddlePaddle/x2paddle"
,
packages
=
setuptools
.
find_packages
(),
install_requires
=
REQUIRED_PACKAGES
,
classifiers
=
[
"Programming Language :: Python :: 3"
,
"License :: OSI Approved :: Apache Software License"
,
...
...
x2paddle/__init__.py
浏览文件 @
b4da60c6
__version__
=
"1.3.
5
"
__version__
=
"1.3.
6
"
from
.core.program
import
PaddleGraph
...
...
x2paddle/convert.py
浏览文件 @
b4da60c6
...
...
@@ -14,9 +14,11 @@
from
six
import
text_type
as
_text_type
from
x2paddle
import
program
from
x2paddle.utils
import
ConverterCheck
import
argparse
import
sys
import
logging
import
time
def
arg_parser
():
...
...
@@ -93,6 +95,11 @@ def arg_parser():
"-co"
,
default
=
True
,
help
=
"Turn on code optimization"
)
parser
.
add_argument
(
"--disable_feedback"
,
"-df"
,
default
=
False
,
help
=
"Tune off feedback of model conversion."
)
parser
.
add_argument
(
"--to_lite"
,
"-tl"
,
default
=
False
,
help
=
"convert to Paddle-Lite format"
)
parser
.
add_argument
(
...
...
@@ -130,7 +137,14 @@ def tf2paddle(model_path,
define_input_shape
=
False
,
convert_to_lite
=
False
,
lite_valid_places
=
"arm"
,
lite_model_type
=
"naive_buffer"
):
lite_model_type
=
"naive_buffer"
,
disable_feedback
=
False
):
# for convert_id
time_info
=
int
(
time
.
time
())
if
not
disable_feedback
:
ConverterCheck
(
task
=
"TensorFlow"
,
time_info
=
time_info
,
convert_state
=
"Start"
).
start
()
# check tensorflow installation and version
try
:
import
os
...
...
@@ -162,10 +176,22 @@ def tf2paddle(model_path,
logging
.
info
(
"Model optimized!"
)
mapper
.
paddle_graph
.
gen_model
(
save_dir
)
logging
.
info
(
"Successfully exported Paddle static graph model!"
)
if
not
disable_feedback
:
ConverterCheck
(
task
=
"TensorFlow"
,
time_info
=
time_info
,
convert_state
=
"Success"
).
start
()
if
convert_to_lite
:
logging
.
info
(
"Now translating model from Paddle to Paddle Lite ..."
)
if
not
disable_feedback
:
ConverterCheck
(
task
=
"TensorFlow"
,
time_info
=
time_info
,
lite_state
=
"Start"
).
start
()
convert2lite
(
save_dir
,
lite_valid_places
,
lite_model_type
)
logging
.
info
(
"Successfully exported Paddle Lite support model!"
)
if
not
disable_feedback
:
ConverterCheck
(
task
=
"TensorFlow"
,
time_info
=
time_info
,
lite_state
=
"Success"
).
start
()
def
caffe2paddle
(
proto_file
,
...
...
@@ -174,7 +200,13 @@ def caffe2paddle(proto_file,
caffe_proto
,
convert_to_lite
=
False
,
lite_valid_places
=
"arm"
,
lite_model_type
=
"naive_buffer"
):
lite_model_type
=
"naive_buffer"
,
disable_feedback
=
False
):
# for convert_id
time_info
=
int
(
time
.
time
())
if
not
disable_feedback
:
ConverterCheck
(
task
=
"Caffe"
,
time_info
=
time_info
,
convert_state
=
"Start"
).
start
()
from
x2paddle.decoder.caffe_decoder
import
CaffeDecoder
from
x2paddle.op_mapper.caffe2paddle.caffe_op_mapper
import
CaffeOpMapper
import
google.protobuf
as
gpb
...
...
@@ -195,17 +227,32 @@ def caffe2paddle(proto_file,
logging
.
info
(
"Model optimized!"
)
mapper
.
paddle_graph
.
gen_model
(
save_dir
)
logging
.
info
(
"Successfully exported Paddle static graph model!"
)
if
not
disable_feedback
:
ConverterCheck
(
task
=
"Caffe"
,
time_info
=
time_info
,
convert_state
=
"Success"
).
start
()
if
convert_to_lite
:
logging
.
info
(
"Now translating model from Paddle to Paddle Lite ..."
)
if
not
disable_feedback
:
ConverterCheck
(
task
=
"Caffe"
,
time_info
=
time_info
,
lite_state
=
"Start"
).
start
()
convert2lite
(
save_dir
,
lite_valid_places
,
lite_model_type
)
logging
.
info
(
"Successfully exported Paddle Lite support model!"
)
if
not
disable_feedback
:
ConverterCheck
(
task
=
"Caffe"
,
time_info
=
time_info
,
lite_state
=
"Success"
).
start
()
def
onnx2paddle
(
model_path
,
save_dir
,
convert_to_lite
=
False
,
lite_valid_places
=
"arm"
,
lite_model_type
=
"naive_buffer"
):
lite_model_type
=
"naive_buffer"
,
disable_feedback
=
False
):
# for convert_id
time_info
=
int
(
time
.
time
())
if
not
disable_feedback
:
ConverterCheck
(
task
=
"ONNX"
,
time_info
=
time_info
,
convert_state
=
"Start"
).
start
()
# check onnx installation and version
try
:
import
onnx
...
...
@@ -233,10 +280,19 @@ def onnx2paddle(model_path,
logging
.
info
(
"Model optimized."
)
mapper
.
paddle_graph
.
gen_model
(
save_dir
)
logging
.
info
(
"Successfully exported Paddle static graph model!"
)
if
not
disable_feedback
:
ConverterCheck
(
task
=
"ONNX"
,
time_info
=
time_info
,
convert_state
=
"Success"
).
start
()
if
convert_to_lite
:
logging
.
info
(
"Now translating model from Paddle to Paddle Lite ..."
)
if
not
disable_feedback
:
ConverterCheck
(
task
=
"ONNX"
,
time_info
=
time_info
,
lite_state
=
"Start"
).
start
()
convert2lite
(
save_dir
,
lite_valid_places
,
lite_model_type
)
logging
.
info
(
"Successfully exported Paddle Lite support model!"
)
if
not
disable_feedback
:
ConverterCheck
(
task
=
"ONNX"
,
time_info
=
time_info
,
lite_state
=
"Success"
).
start
()
def
pytorch2paddle
(
module
,
...
...
@@ -246,7 +302,13 @@ def pytorch2paddle(module,
enable_code_optim
=
True
,
convert_to_lite
=
False
,
lite_valid_places
=
"arm"
,
lite_model_type
=
"naive_buffer"
):
lite_model_type
=
"naive_buffer"
,
disable_feedback
=
False
):
# for convert_id
time_info
=
int
(
time
.
time
())
if
not
disable_feedback
:
ConverterCheck
(
task
=
"PyTorch"
,
time_info
=
time_info
,
convert_state
=
"Start"
).
start
()
# check pytorch installation and version
try
:
import
torch
...
...
@@ -287,10 +349,21 @@ def pytorch2paddle(module,
mapper
.
paddle_graph
.
gen_model
(
save_dir
,
jit_type
=
jit_type
,
enable_code_optim
=
enable_code_optim
)
logging
.
info
(
"Successfully exported Paddle static graph model!"
)
if
not
disable_feedback
:
ConverterCheck
(
task
=
"PyTorch"
,
time_info
=
time_info
,
convert_state
=
"Success"
).
start
()
if
convert_to_lite
:
logging
.
info
(
"Now translating model from Paddle to Paddle Lite ..."
)
if
not
disable_feedback
:
ConverterCheck
(
task
=
"PyTorch"
,
time_info
=
time_info
,
lite_state
=
"Start"
).
start
()
convert2lite
(
save_dir
,
lite_valid_places
,
lite_model_type
)
logging
.
info
(
"Successfully exported Paddle Lite support model!"
)
if
not
disable_feedback
:
ConverterCheck
(
task
=
"PyTorch"
,
time_info
=
time_info
,
lite_state
=
"Success"
).
start
()
def
main
():
...
...
@@ -351,7 +424,8 @@ def main():
define_input_shape
,
convert_to_lite
=
args
.
to_lite
,
lite_valid_places
=
args
.
lite_valid_places
,
lite_model_type
=
args
.
lite_model_type
)
lite_model_type
=
args
.
lite_model_type
,
disable_feedback
=
args
.
disable_feedback
)
elif
args
.
framework
==
"caffe"
:
assert
args
.
prototxt
is
not
None
and
args
.
weight
is
not
None
,
"--prototxt and --weight should be defined while translating caffe model"
...
...
@@ -362,7 +436,8 @@ def main():
args
.
caffe_proto
,
convert_to_lite
=
args
.
to_lite
,
lite_valid_places
=
args
.
lite_valid_places
,
lite_model_type
=
args
.
lite_model_type
)
lite_model_type
=
args
.
lite_model_type
,
disable_feedback
=
args
.
disable_feedback
)
elif
args
.
framework
==
"onnx"
:
assert
args
.
model
is
not
None
,
"--model should be defined while translating onnx model"
onnx2paddle
(
...
...
@@ -370,7 +445,8 @@ def main():
args
.
save_dir
,
convert_to_lite
=
args
.
to_lite
,
lite_valid_places
=
args
.
lite_valid_places
,
lite_model_type
=
args
.
lite_model_type
)
lite_model_type
=
args
.
lite_model_type
,
disable_feedback
=
args
.
disable_feedback
)
elif
args
.
framework
==
"paddle2onnx"
:
logging
.
info
(
"Paddle to ONNX tool has been migrated to the new github: https://github.com/PaddlePaddle/paddle2onnx"
...
...
x2paddle/op_mapper/pytorch2paddle/aten.py
浏览文件 @
b4da60c6
...
...
@@ -2416,6 +2416,53 @@ def aten_format(mapper, graph, node):
return
current_inputs
,
current_outputs
def
aten_full
(
mapper
,
graph
,
node
):
"""
TorchScript Code:
%159 : Tensor = aten::full(%775, %50, %49, %56, %48, %53)
Parameter meaning:
%159 (Tensor): Output Tensor
%775 (Tensor): size
%50 (int/float/bool): fill_value
%49 (int): dtype
%56 (int): layout
%48 (int): device
%53 (bool): requires_grad
"""
scope_name
=
mapper
.
normalize_scope_name
(
node
)
output_name
=
mapper
.
_get_outputs_name
(
node
)[
0
]
layer_outputs
=
[
output_name
]
layer_inputs
=
{}
layer_attrs
=
{}
inputs_name
,
inputs_node
=
mapper
.
_get_inputs_name
(
node
)
# output list
current_outputs
=
[
output_name
]
mapper
.
_check_input
(
graph
,
inputs_node
[
0
],
inputs_name
[
0
],
current_outputs
,
scope_name
)
layer_inputs
[
"shape"
]
=
inputs_name
[
0
]
# input list
current_inputs
=
list
(
layer_inputs
.
values
())
if
inputs_name
[
1
]
in
mapper
.
attrs
:
layer_attrs
[
"fill_value"
]
=
mapper
.
attrs
[
inputs_name
[
1
]]
else
:
mapper
.
_check_input
(
graph
,
inputs_node
[
1
],
inputs_name
[
1
],
current_outputs
,
scope_name
)
layer_inputs
[
"fill_value"
]
=
inputs_name
[
1
]
current_inputs
.
append
(
inputs_name
[
1
])
# dtype
if
mapper
.
attrs
[
inputs_name
[
2
]]
is
not
None
:
layer_attrs
[
"dtype"
]
=
dtype_dict
[
mapper
.
attrs
[
inputs_name
[
2
]]]
graph
.
add_layer
(
"paddle.full"
,
inputs
=
layer_inputs
,
outputs
=
layer_outputs
,
scope_name
=
scope_name
,
**
layer_attrs
)
return
current_inputs
,
current_outputs
def
aten_full_like
(
mapper
,
graph
,
node
):
""" 构造创建一个与输入具有相同的形状并且数据类型固定的Tensor的PaddleLayer。
TorchScript示例:
...
...
@@ -2743,46 +2790,101 @@ def aten_hardtanh(mapper, graph, node):
return
current_inputs
,
current_outputs
def
aten_hardsigmoid
(
mapper
,
graph
,
node
):
"""
TorchScript Code:
%55 : Tensor = aten::hardsigmoid(%54)
Parameter meaning:
%55 (Tensor): output
%54 (Tensor): input tensor
"""
scope_name
=
mapper
.
normalize_scope_name
(
node
)
op_name
=
name_generator
(
"hardsigmoid"
,
mapper
.
nn_name2id
)
output_name
=
mapper
.
_get_outputs_name
(
node
)[
0
]
layer_outputs
=
[
op_name
,
output_name
]
layer_inputs
=
{}
inputs_name
,
inputs_node
=
mapper
.
_get_inputs_name
(
node
)
# outputs list
current_outputs
=
[
output_name
]
# inputs list
mapper
.
_check_input
(
graph
,
inputs_node
[
0
],
inputs_name
[
0
],
current_outputs
,
scope_name
)
layer_inputs
[
"x"
]
=
inputs_name
[
0
]
current_inputs
=
list
(
layer_inputs
.
values
())
graph
.
add_layer
(
"paddle.nn.Hardsigmoid"
,
inputs
=
layer_inputs
,
outputs
=
layer_outputs
,
scope_name
=
scope_name
)
return
current_inputs
,
current_outputs
def
aten_hardswish
(
mapper
,
graph
,
node
):
"""
TorchScript Code:
%55 : Tensor = aten::hardswish(%54)
Parameter meaning:
%55 (Tensor): output
%54 (Tensor): input tensor
"""
scope_name
=
mapper
.
normalize_scope_name
(
node
)
op_name
=
name_generator
(
"hardswish"
,
mapper
.
nn_name2id
)
output_name
=
mapper
.
_get_outputs_name
(
node
)[
0
]
layer_outputs
=
[
op_name
,
output_name
]
layer_inputs
=
{}
inputs_name
,
inputs_node
=
mapper
.
_get_inputs_name
(
node
)
# outputs list
current_outputs
=
[
output_name
]
# inputs list
mapper
.
_check_input
(
graph
,
inputs_node
[
0
],
inputs_name
[
0
],
current_outputs
,
scope_name
)
layer_inputs
[
"x"
]
=
inputs_name
[
0
]
current_inputs
=
list
(
layer_inputs
.
values
())
graph
.
add_layer
(
"paddle.nn.Hardswish"
,
inputs
=
layer_inputs
,
outputs
=
layer_outputs
,
scope_name
=
scope_name
)
return
current_inputs
,
current_outputs
def
aten_index
(
mapper
,
graph
,
node
):
"""
构造选择元素的PaddleLayer。
TorchScript
示例
:
"""
TorchScript
Code
:
%1681 : Float = aten::index(%1653, %1680)
参数含义
:
%1681 (Tensor):
输出,选择后的Tensor。
%1653 (Tensor):
需要选择的Tensor。
%1680 (int):
选择的索引。
Parameter meaning
:
%1681 (Tensor):
Output Tensor
%1653 (Tensor):
Input Tensor
%1680 (int):
Index
"""
scope_name
=
mapper
.
normalize_scope_name
(
node
)
output_name
=
mapper
.
_get_outputs_name
(
node
)[
0
]
layer_outputs
=
[
output_name
]
layer_inputs
=
{}
layer_attrs
=
{}
inputs_name
,
inputs_node
=
mapper
.
_get_inputs_name
(
node
)
#
获取当前节点输出的
list
#
output
list
current_outputs
=
[
output_name
]
#
处理输入0,即%1653
#
process Input Tensor
mapper
.
_check_input
(
graph
,
inputs_node
[
0
],
inputs_name
[
0
],
current_outputs
,
scope_name
)
layer_inputs
[
"x"
]
=
inputs_name
[
0
]
#
处理输入1,即%1680
#
process Index
mapper
.
_check_input
(
graph
,
inputs_node
[
1
],
inputs_name
[
1
],
current_outputs
,
scope_name
)
layer_inputs
[
"index"
]
=
inputs_name
[
1
]
# 获取当前节点输入的list
current_inputs
=
list
(
layer_inputs
.
values
())
graph
.
add_layer
(
"prim.getitem"
,
inputs
=
{
"list"
:
layer_inputs
[
"index"
]},
outputs
=
[
layer_inputs
[
"index"
]],
scope_name
=
scope_name
,
index
=
0
)
graph
.
add_layer
(
"paddle.index_select"
,
inputs
=
layer_inputs
,
inputs
=
{
"list"
:
layer_inputs
[
"x"
]},
outputs
=
layer_outputs
,
scope_name
=
scope_name
,
**
layer_attrs
)
index
=
layer_inputs
[
"index"
]
)
return
current_inputs
,
current_outputs
...
...
@@ -3176,6 +3278,53 @@ def aten_len(mapper, graph, node):
return
current_inputs
,
current_outputs
def
aten_linear
(
mapper
,
graph
,
node
):
"""
TorchScript Code:
%x.6 : Float(1, 128, strides=[128, 1]) = aten::linear(%input.305, %weight.629, %bias.317)
Parameter meaning:
%x.6 (Tensor): output
%input.305 (Tensor): input tensor
%weight.629 (Tensor): weight tensor
%bias.317 (Tensor): bias tensor
"""
scope_name
=
mapper
.
normalize_scope_name
(
node
)
output_name
=
mapper
.
_get_outputs_name
(
node
)[
0
]
layer_outputs
=
[
output_name
]
layer_inputs
=
{}
layer_attrs
=
{}
inputs_name
,
inputs_node
=
mapper
.
_get_inputs_name
(
node
)
# outputs list
current_outputs
=
[
output_name
]
# inputs list
mapper
.
_check_input
(
graph
,
inputs_node
[
0
],
inputs_name
[
0
],
current_outputs
,
scope_name
)
layer_inputs
[
"x"
]
=
inputs_name
[
0
]
# transpose weight
mapper
.
_check_input
(
graph
,
inputs_node
[
1
],
inputs_name
[
1
],
current_outputs
,
scope_name
)
layer_inputs
[
"y"
]
=
inputs_name
[
1
]
layer_attrs
[
"transpose_y"
]
=
True
graph
.
add_layer
(
"paddle.matmul"
,
inputs
=
layer_inputs
,
outputs
=
layer_outputs
,
scope_name
=
scope_name
,
**
layer_attrs
)
if
len
(
inputs_name
)
==
3
:
mapper
.
_check_input
(
graph
,
inputs_node
[
2
],
inputs_name
[
2
],
current_outputs
,
scope_name
)
graph
.
add_layer
(
"paddle.add"
,
inputs
=
{
"x"
:
output_name
,
"y"
:
inputs_name
[
2
]},
outputs
=
layer_outputs
,
scope_name
=
scope_name
)
current_inputs
=
list
(
layer_inputs
.
values
())
return
current_inputs
,
current_outputs
def
aten_log
(
mapper
,
graph
,
node
):
""" 构构造log的PaddleLayer。
TorchScript示例:
...
...
@@ -3378,109 +3527,62 @@ def aten_lt(mapper, graph, node):
def
aten_masked_fill
(
mapper
,
graph
,
node
):
"""
构造填充mask的PaddleLayer。
TorchScript
示例
:
"""
TorchScript
Code
:
%input.4 : Tensor = aten::masked_fill(%scores.2, %mask.2, %46)
参数含义
:
%input.4 (Tensor):
输出,填充后的结果。
%scores.2 (Tensor):
需要填充的Tensor。
%mask.2 (Tensor): bool
型的Tensor,哪些位置需要填充。
%46 (-):
填充的值。
Parameter meaning
:
%input.4 (Tensor):
Output Tensor
%scores.2 (Tensor):
Input Tensor
%mask.2 (Tensor): bool
mask
%46 (-):
fill value
"""
scope_name
=
mapper
.
normalize_scope_name
(
node
)
output_name
=
mapper
.
_get_outputs_name
(
node
)[
0
]
layer_outputs
=
[
output_name
]
inputs_name
,
inputs_node
=
mapper
.
_get_inputs_name
(
node
)
# 获取当前节点输入的list
layer_full_inputs
=
{}
layer_full_attrs
=
{}
layer_where_inputs
=
{}
current_inputs
=
[]
# 获取当前节点输出的list
current_outputs
=
[
output_name
]
#
处理输入0,即%input.4
#
input list
mapper
.
_check_input
(
graph
,
inputs_node
[
0
],
inputs_name
[
0
],
current_outputs
,
scope_name
)
current_inputs
.
append
(
inputs_name
[
0
])
# paddle.full
graph
.
add_layer
(
"prim.
ty
pe"
,
"prim.
sha
pe"
,
inputs
=
{
"input"
:
inputs_name
[
0
]},
outputs
=
[
inputs_name
[
0
]
+
"_
ty
pe"
],
outputs
=
[
inputs_name
[
0
]
+
"_
sha
pe"
],
scope_name
=
scope_name
)
# 处理输入1,即%scores.2
mapper
.
_check_input
(
graph
,
inputs_node
[
1
],
inputs_name
[
1
],
current_outputs
,
scope_name
)
current_inputs
.
append
(
inputs_name
[
1
])
graph
.
add_layer
(
"paddle.logical_not"
,
inputs
=
{
"x"
:
inputs_name
[
1
]},
outputs
=
[
inputs_name
[
1
]
+
"_not"
],
scope_name
=
scope_name
)
graph
.
add_layer
(
"paddle.cast"
,
inputs
=
{
"x"
:
inputs_name
[
1
]},
outputs
=
[
inputs_name
[
1
]
+
"_mask"
],
scope_name
=
scope_name
,
dtype
=
inputs_name
[
0
]
+
"_type"
)
graph
.
add_layer
(
"paddle.cast"
,
inputs
=
{
"x"
:
inputs_name
[
1
]
+
"_not"
},
outputs
=
[
inputs_name
[
1
]
+
"_not_mask"
],
scope_name
=
scope_name
,
dtype
=
inputs_name
[
0
]
+
"_type"
)
layer_full_inputs
[
"shape"
]
=
inputs_name
[
0
]
+
"_shape"
if
inputs_name
[
2
]
in
mapper
.
attrs
:
layer_full_attrs
[
"fill_value"
]
=
mapper
.
attrs
[
inputs_name
[
2
]]
else
:
mapper
.
_check_input
(
graph
,
inputs_node
[
2
],
inputs_name
[
2
],
current_outputs
,
scope_name
)
layer_full_inputs
[
"fill_value"
]
=
inputs_name
[
2
]
current_inputs
.
append
(
inputs_name
[
2
])
graph
.
add_layer
(
"paddle.multiply"
,
inputs
=
{
"x"
:
inputs_name
[
0
],
"y"
:
inputs_name
[
1
]
+
"_not_mask"
},
outputs
=
[
inputs_name
[
0
]
+
"_not_mask"
],
"prim.type"
,
inputs
=
{
"input"
:
inputs_name
[
0
]},
outputs
=
[
inputs_name
[
0
]
+
"_type"
],
scope_name
=
scope_name
)
# 处理输入2,即%46
mapper
.
_check_input
(
graph
,
inputs_node
[
2
],
inputs_name
[
2
],
current_outputs
,
scope_name
)
layer_full_attrs
[
"dtype"
]
=
inputs_name
[
0
]
+
"_type"
graph
.
add_layer
(
"p
rim.eq
"
,
inputs
=
{
"x"
:
inputs_name
[
2
]}
,
outputs
=
[
inputs_name
[
2
]
+
"_cond1
"
],
"p
addle.full
"
,
inputs
=
layer_full_inputs
,
outputs
=
[
inputs_name
[
0
]
+
"_full
"
],
scope_name
=
scope_name
,
y
=
"-float('inf')"
)
graph
.
add_layer
(
"prim.eq"
,
inputs
=
{
"x"
:
inputs_name
[
2
]},
outputs
=
[
inputs_name
[
2
]
+
"_cond2"
],
scope_name
=
scope_name
,
y
=
"float('inf')"
)
graph
.
add_layer
(
"prim.or"
,
inputs
=
{
"x"
:
inputs_name
[
2
]
+
"_cond1"
,
"y"
:
inputs_name
[
2
]
+
"_cond2"
},
outputs
=
[
inputs_name
[
2
]
+
"_cond"
],
scope_name
=
scope_name
)
graph
.
add_layer
(
"prim.if"
,
{
'input'
:
inputs_name
[
2
]
+
"_cond"
},
outputs
=
[
inputs_name
[
2
]
+
"_if"
],
scope_name
=
scope_name
)
if_layer
=
graph
.
layers
[
list
(
graph
.
layers
.
keys
())[
-
1
]]
block
=
PaddleGraph
(
source_type
=
"pytorch"
,
parent_layer
=
if_layer
)
block
.
add_layer
(
"prim.equal"
,
inputs
=
{
"input"
:
inputs_name
[
1
]
+
"_mask"
},
outputs
=
[
inputs_name
[
2
]
+
"_1"
],
scope_name
=
scope_name
)
if_layer
.
add_block
(
block
)
block
=
PaddleGraph
(
source_type
=
"pytorch"
,
parent_layer
=
if_layer
)
block
.
add_layer
(
"prim.mul"
,
inputs
=
{
"x"
:
inputs_name
[
1
]
+
"_mask"
,
"y"
:
inputs_name
[
2
]},
outputs
=
[
inputs_name
[
2
]
+
"_1"
],
scope_name
=
scope_name
)
if_layer
.
add_block
(
block
)
if_layer
.
inputs
[
"input-0"
]
=
inputs_name
[
1
]
+
"_mask"
if_layer
.
inputs
[
"input-1"
]
=
inputs_name
[
2
]
if_layer
.
outputs
.
append
(
inputs_name
[
2
]
+
"_1"
)
**
layer_full_attrs
)
# paddle.where
layer_where_inputs
[
"condition"
]
=
inputs_name
[
1
]
layer_where_inputs
[
"x"
]
=
inputs_name
[
0
]
+
"_full"
layer_where_inputs
[
"y"
]
=
inputs_name
[
0
]
graph
.
add_layer
(
"paddle.add"
,
inputs
=
{
"x"
:
inputs_name
[
2
]
+
"_1"
,
"y"
:
inputs_name
[
0
]
+
"_not_mask"
},
"paddle.where"
,
inputs
=
layer_where_inputs
,
outputs
=
layer_outputs
,
scope_name
=
scope_name
)
return
current_inputs
,
current_outputs
...
...
@@ -4547,6 +4649,42 @@ def aten_repeat_interleave(mapper, graph, node):
return
current_inputs
,
current_outputs
def
aten_replication_pad1d
(
mapper
,
graph
,
node
):
"""
TorchScript Code:
%58 : Tensor = aten::replication_pad1d(%input.1, %152)
Parameter meaning:
%58 (Tensor): Output Tensor
%input.1 (Tensor): Input Tensor
%%152 (list): Padding size
"""
scope_name
=
mapper
.
normalize_scope_name
(
node
)
op_name
=
name_generator
(
"pad"
,
mapper
.
nn_name2id
)
output_name
=
mapper
.
_get_outputs_name
(
node
)[
0
]
layer_outputs
=
[
op_name
,
output_name
]
layer_inputs
=
{}
layer_attrs
=
{}
inputs_name
,
inputs_node
=
mapper
.
_get_inputs_name
(
node
)
# output list
current_outputs
=
[
output_name
]
# input list
mapper
.
_check_input
(
graph
,
inputs_node
[
0
],
inputs_name
[
0
],
current_outputs
,
scope_name
)
layer_inputs
[
"input"
]
=
inputs_name
[
0
]
layer_attrs
[
"padding"
]
=
mapper
.
attrs
[
inputs_name
[
1
]]
layer_attrs
[
"mode"
]
=
string
(
"replicate"
)
current_inputs
=
list
(
layer_inputs
.
values
())
graph
.
add_layer
(
"paddle.nn.Pad1D"
,
inputs
=
layer_inputs
,
outputs
=
layer_outputs
,
scope_name
=
scope_name
,
**
layer_attrs
)
return
current_inputs
,
current_outputs
def
aten_reshape
(
mapper
,
graph
,
node
):
""" 构造调整大小的PaddleLayer。
TorchScript示例:
...
...
@@ -4678,6 +4816,36 @@ def aten_rsub(mapper, graph, node):
return
current_inputs
,
current_outputs
def
aten_rsqrt
(
mapper
,
graph
,
node
):
"""
TorchScript Code:
%n0.3 : Tensor = aten::rsqrt(%n.3)
Parameter meaning:
%n0.3 (Tensor): output tensor
%n.3 (Tensor): input tensor
"""
scope_name
=
mapper
.
normalize_scope_name
(
node
)
output_name
=
mapper
.
_get_outputs_name
(
node
)[
0
]
layer_outputs
=
[
output_name
]
layer_inputs
=
{}
inputs_name
,
inputs_node
=
mapper
.
_get_inputs_name
(
node
)
# outputs list
current_outputs
=
[
output_name
]
# inputs list
mapper
.
_check_input
(
graph
,
inputs_node
[
0
],
inputs_name
[
0
],
current_outputs
,
scope_name
)
layer_inputs
[
"x"
]
=
inputs_name
[
0
]
current_inputs
=
list
(
layer_inputs
.
values
())
graph
.
add_layer
(
"paddle.rsqrt"
,
inputs
=
layer_inputs
,
outputs
=
layer_outputs
,
scope_name
=
scope_name
)
return
current_inputs
,
current_outputs
def
aten_ScalarImplicit
(
mapper
,
graph
,
node
):
""" 构造获取scalar的PaddleLayer。
TorchScript示例:
...
...
x2paddle/optimizer/fusion/onnx_layernorm_fuser.py
浏览文件 @
b4da60c6
...
...
@@ -56,11 +56,7 @@ class LayerNormFuser(FuseBase):
shape
=
[
1
],
fill_value
=
0.5
)
self
.
pattern
.
add_layer
(
"paddle.full"
,
inputs
=
{},
outputs
=
[
gen_name
(
3
)],
shape
=
[
1
],
fill_value
=
9.999999747378752e-06
)
"paddle.full"
,
inputs
=
{},
outputs
=
[
gen_name
(
3
)],
shape
=
[
1
])
self
.
pattern
.
add_layer
(
"paddle.mean"
,
inputs
=
{
"x"
:
"layernorm-input-0"
},
...
...
@@ -122,6 +118,7 @@ class LayerNormFuser(FuseBase):
layer_inputs
=
list
()
layer_inputs_ids
=
list
()
param_name
=
list
()
fill_value_list
=
list
()
for
layer_id
,
layer
in
matches
.
items
():
if
layer
.
kernel
==
"paddle.mean"
:
layer_inputs
.
append
(
layer
.
inputs
)
...
...
@@ -130,6 +127,8 @@ class LayerNormFuser(FuseBase):
param_name
.
append
(
layer
.
outputs
[
0
])
if
layer
.
kernel
==
"paddle.add"
:
output_name
=
layer
.
outputs
[
0
]
if
layer
.
kernel
==
"paddle.full"
:
fill_value_list
.
append
(
layer
.
attrs
[
"fill_value"
])
param
=
parameters
[
param_name
[
0
]]
c
=
param
.
shape
[
0
]
weight_param
=
parameters
.
pop
(
param_name
[
0
])
...
...
@@ -141,5 +140,6 @@ class LayerNormFuser(FuseBase):
"paddle.nn.LayerNorm"
,
inputs
=
layer_inputs
[
0
],
outputs
=
[
output_name
],
normalized_shape
=
[
c
])
normalized_shape
=
[
c
],
epsilon
=
fill_value_list
[
-
1
])
return
new_layer
,
layer_inputs_ids
[
0
]
x2paddle/optimizer/fusion/trace_fc_fuser.py
浏览文件 @
b4da60c6
...
...
@@ -113,10 +113,13 @@ class TraceFcFuser(FuseBase):
attrs
[
"out_features"
]
=
parameters
[
weight_name
].
shape
[
0
]
linear_name
=
"linear{}"
.
format
(
self
.
linear_index
)
self
.
linear_index
+=
1
parameters
[
"{}.weight"
.
format
(
linear_name
)]
=
parameters
[
weight_name
].
transpose
((
1
,
0
))
parameters
[
"{}.bias"
.
format
(
linear_name
)]
=
np
.
squeeze
(
parameters
[
bias_name
])
weight_numpy
=
parameters
[
weight_name
]
parameters
[
"{}.weight"
.
format
(
linear_name
)]
=
weight_numpy
.
transpose
(
(
1
,
0
))
self
.
rm_params
.
add
(
weight_name
)
bias_numpy
=
parameters
[
bias_name
]
parameters
[
"{}.bias"
.
format
(
linear_name
)]
=
np
.
squeeze
(
bias_numpy
)
self
.
rm_params
.
add
(
bias_name
)
new_layer
=
PaddleLayer
(
layers_id
[
0
],
"paddle.nn.Linear"
,
...
...
x2paddle/optimizer/optimizer.py
浏览文件 @
b4da60c6
...
...
@@ -42,17 +42,22 @@ class GraphOptimizer(object):
self
.
passes
=
[]
def
optimize
(
self
,
graph
):
show_pass_log
=
False
for
pass_name
in
self
.
passes
:
pass_
=
PassManager
.
lookup
(
pass_name
)()
if
pass_name
.
endswith
(
"_eliminate_pass"
)
or
pass_name
.
endswith
(
"conv2d_add_fuse_pass"
):
pass_
.
apply
(
graph
)
show_pass_log
=
True
else
:
while
True
:
before_len
=
len
(
graph
.
layers
)
pass_
.
apply
(
graph
)
after_len
=
len
(
graph
.
layers
)
if
after_len
<
before_len
:
show_pass_log
=
True
if
before_len
==
after_len
:
break
print
(
"{} done!"
.
format
(
pass_name
))
if
show_pass_log
:
print
(
"{} done!"
.
format
(
pass_name
))
return
graph
x2paddle/optimizer/pattern_matcher.py
浏览文件 @
b4da60c6
...
...
@@ -325,6 +325,7 @@ class FuseBase(object):
def
__init__
(
self
):
self
.
pattern
=
PaddleGraph
()
self
.
patterns
=
list
()
self
.
rm_params
=
set
()
def
operate
(
self
,
graph
,
match_kind
=
"topo"
):
parameters
=
graph
.
parameters
...
...
@@ -335,6 +336,8 @@ class FuseBase(object):
subgraph
=
get_subgraph
(
""
,
first_layer_id
,
graph
)
self
.
insert_new_layer
(
subgraph
,
parameters
,
match
)
self
.
delete_match
(
graph
)
for
param_name
in
self
.
rm_params
:
parameters
.
pop
(
param_name
)
graph
.
build
()
def
perform_pattern_matcher
(
self
,
graph
,
match_kind
=
"topo"
):
...
...
x2paddle/utils.py
浏览文件 @
b4da60c6
...
...
@@ -14,6 +14,14 @@
# limitations under the License.
import
paddle
import
x2paddle
import
hashlib
import
requests
import
threading
import
uuid
import
json
stats_api
=
"http://paddlepaddle.org.cn/paddlehub/stat"
def
string
(
param
):
...
...
@@ -32,6 +40,56 @@ def check_version():
return
True
def
_md5
(
text
:
str
):
'''Calculate the md5 value of the input text.'''
md5code
=
hashlib
.
md5
(
text
.
encode
())
return
md5code
.
hexdigest
()
class
ConverterCheck
(
threading
.
Thread
):
"""
Count the number of calls to model convertion
"""
def
__init__
(
self
,
task
=
"ONNX"
,
time_info
=
None
,
convert_state
=
None
,
lite_state
=
None
,
extra_info
=
None
):
threading
.
Thread
.
__init__
(
self
)
self
.
_task
=
task
self
.
_version
=
x2paddle
.
__version__
self
.
_convert_state
=
convert_state
self
.
_lite_state
=
lite_state
self
.
_extra_info
=
extra_info
self
.
_convert_id
=
_md5
(
str
(
uuid
.
uuid1
())[
-
12
:])
+
"-"
+
str
(
time_info
)
def
run
(
self
):
params
=
{
'task'
:
self
.
_task
,
'x2paddle_version'
:
self
.
_version
,
'paddle_version'
:
paddle
.
__version__
,
'from'
:
'x2paddle'
}
extra
=
{
'convert_state'
:
self
.
_convert_state
,
'convert_id'
:
self
.
_convert_id
,
}
if
self
.
_lite_state
is
not
None
:
extra
.
update
({
'lite_state'
:
self
.
_lite_state
})
if
self
.
_extra_info
is
not
None
:
extra
.
update
(
self
.
_extra_info
)
params
.
update
({
"extra"
:
json
.
dumps
(
extra
)})
try
:
requests
.
get
(
stats_api
,
params
,
timeout
=
2
)
except
Exception
:
pass
return
class
PaddleDtypes
():
def
__init__
(
self
,
is_new_version
=
True
):
if
is_new_version
:
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录