Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
PaddlePaddle
X2Paddle
提交
2c34d762
X
X2Paddle
项目概览
PaddlePaddle
/
X2Paddle
接近 2 年 前同步成功
通知
329
Star
698
Fork
167
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
26
列表
看板
标记
里程碑
合并请求
4
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
X
X2Paddle
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
26
Issue
26
列表
看板
标记
里程碑
合并请求
4
合并请求
4
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
提交
2c34d762
编写于
3月 28, 2022
作者:
W
wjj19950828
浏览文件
操作
浏览文件
下载
差异文件
Merge remote-tracking branch 'upstream/develop' into fixed_lite_readme
上级
3691424b
a19264fc
变更
7
隐藏空白更改
内联
并排
Showing
7 changed file
with
231 addition
and
20 deletion
+231
-20
x2paddle/convert.py
x2paddle/convert.py
+5
-0
x2paddle/op_mapper/onnx2paddle/onnx_custom_layer/nms.py
x2paddle/op_mapper/onnx2paddle/onnx_custom_layer/nms.py
+2
-1
x2paddle/op_mapper/onnx2paddle/opset9/opset.py
x2paddle/op_mapper/onnx2paddle/opset9/opset.py
+42
-19
x2paddle/optimizer/fusion/__init__.py
x2paddle/optimizer/fusion/__init__.py
+2
-0
x2paddle/optimizer/fusion/onnx_layernorm_fuse_pass.py
x2paddle/optimizer/fusion/onnx_layernorm_fuse_pass.py
+33
-0
x2paddle/optimizer/fusion/onnx_layernorm_fuser.py
x2paddle/optimizer/fusion/onnx_layernorm_fuser.py
+145
-0
x2paddle/optimizer/optimizer.py
x2paddle/optimizer/optimizer.py
+2
-0
未找到文件。
x2paddle/convert.py
浏览文件 @
2c34d762
...
...
@@ -226,6 +226,11 @@ def onnx2paddle(model_path,
model
=
ONNXDecoder
(
model_path
)
mapper
=
ONNXOpMapper
(
model
)
mapper
.
paddle_graph
.
build
()
logging
.
info
(
"Model optimizing ..."
)
from
x2paddle.optimizer.optimizer
import
GraphOptimizer
graph_opt
=
GraphOptimizer
(
source_frame
=
"onnx"
)
graph_opt
.
optimize
(
mapper
.
paddle_graph
)
logging
.
info
(
"Model optimized."
)
mapper
.
paddle_graph
.
gen_model
(
save_dir
)
logging
.
info
(
"Successfully exported Paddle static graph model!"
)
if
convert_to_lite
:
...
...
x2paddle/op_mapper/onnx2paddle/onnx_custom_layer/nms.py
浏览文件 @
2c34d762
...
...
@@ -106,7 +106,8 @@ class NMS(object):
if
bboxes
.
shape
[
0
]
==
1
:
batch
=
paddle
.
zeros_like
(
clas
,
dtype
=
"int64"
)
else
:
bboxes_count
=
bboxes
.
shape
[
1
]
bboxes_count
=
paddle
.
shape
(
bboxes
)[
1
]
bboxes_count
=
paddle
.
cast
(
bboxes_count
,
dtype
=
"int64"
)
batch
=
paddle
.
divide
(
index
,
bboxes_count
)
index
=
paddle
.
mod
(
index
,
bboxes_count
)
res
=
paddle
.
concat
([
batch
,
clas
,
index
],
axis
=
1
)
...
...
x2paddle/op_mapper/onnx2paddle/opset9/opset.py
浏览文件 @
2c34d762
...
...
@@ -620,15 +620,23 @@ class OpSet9():
pads
)
# NCHW
if
assume_pad
:
paddle_op
=
'paddle.nn.Pad2D'
# x1_begin,x2_begin,x3_begin,x4_begin,x1_end,x2_end,x3_end,x4_end->x1_begin,x1_end,x2_begin,x2_end,x3_begin,x3_end,x4_begin,x4_end
paddings
=
np
.
array
(
pads
).
reshape
(
(
2
,
-
1
)).
transpose
().
astype
(
"int32"
)
paddings
=
np
.
flip
(
paddings
,
axis
=
0
).
flatten
().
tolist
()
if
sum
(
paddings
[:
4
])
==
0
:
paddings
=
paddings
[
4
:]
if
mode
==
'constant'
:
paddings
=
paddings
.
flatten
().
tolist
()
layer_attrs
[
'padding'
]
=
paddings
else
:
layer_attrs
[
"pad"
]
=
paddings
paddle_op
=
"custom_layer:PadAllDim4WithOneInput"
paddings
=
np
.
flip
(
paddings
,
axis
=
0
).
flatten
().
tolist
()
if
sum
(
paddings
[:
4
])
==
0
:
paddings
=
paddings
[
4
:]
layer_attrs
[
'padding'
]
=
paddings
else
:
layer_attrs
[
"pad"
]
=
paddings
paddle_op
=
"custom_layer:PadAllDim4WithOneInput"
else
:
paddle_op
=
'paddle.nn.functional.pad'
layer_attrs
[
"pad"
]
=
np
.
array
(
pads
).
tolist
()
else
:
pad_data_temp
=
pads
[
0
::
2
]
pad_data_all
=
[]
...
...
@@ -1464,11 +1472,18 @@ class OpSet9():
outputs_list
.
append
(
"{}_p{}"
.
format
(
node
.
layer_name
,
i
))
else
:
outputs_list
.
append
(
node
.
name
)
self
.
paddle_graph
.
add_layer
(
'paddle.split'
,
inputs
=
{
"x"
:
val_x
.
name
},
outputs
=
outputs_list
,
**
layer_attrs
)
if
len
(
split
)
>
1
:
self
.
paddle_graph
.
add_layer
(
'paddle.split'
,
inputs
=
{
"x"
:
val_x
.
name
},
outputs
=
outputs_list
,
**
layer_attrs
)
else
:
self
.
paddle_graph
.
add_layer
(
"paddle.cast"
,
inputs
=
{
"x"
:
val_x
.
name
},
outputs
=
outputs_list
,
dtype
=
string
(
val_x
.
dtype
))
@
print_mapping_info
def
Reshape
(
self
,
node
):
...
...
@@ -2698,28 +2713,36 @@ class OpSet9():
layer_outputs
=
[
nn_op_name
,
output_name
]
boxes
=
self
.
graph
.
get_input_node
(
node
,
idx
=
0
,
copy
=
True
)
scores
=
self
.
graph
.
get_input_node
(
node
,
idx
=
1
,
copy
=
True
)
num_classes
=
scores
.
out_shapes
[
0
][
1
]
inputs_len
=
len
(
node
.
layer
.
input
)
layer_attrs
=
dict
()
layer_attrs
[
"keep_top_k"
]
=
-
1
layer_attrs
[
"nms_threshold"
]
=
0.0
layer_attrs
[
"score_threshold"
]
=
0.0
if
inputs_len
>
2
:
max_output_boxes_per_class
=
self
.
graph
.
get_input_node
(
node
,
idx
=
2
,
copy
=
True
)
layer_attrs
[
"keep_top_k"
]
=
_const_weight_or_none
(
max_output_boxes_per_class
).
tolist
()[
0
]
*
num_classes
else
:
layer_attrs
[
"keep_top_k"
]
=
0
max_output_boxes_per_class
=
_const_weight_or_none
(
max_output_boxes_per_class
)
if
len
(
scores
.
out_shapes
[
0
])
!=
0
:
num_classes
=
scores
.
out_shapes
[
0
][
1
]
else
:
num_classes
=
1
if
max_output_boxes_per_class
is
not
None
:
max_output_boxes_per_class
=
max_output_boxes_per_class
.
tolist
()
if
isinstance
(
max_output_boxes_per_class
,
int
):
layer_attrs
[
"keep_top_k"
]
=
max_output_boxes_per_class
*
num_classes
else
:
layer_attrs
[
"keep_top_k"
]
=
max_output_boxes_per_class
[
0
]
*
num_classes
if
inputs_len
>
3
:
iou_threshold
=
self
.
graph
.
get_input_node
(
node
,
idx
=
3
,
copy
=
True
)
layer_attrs
[
"nms_threshold"
]
=
_const_weight_or_none
(
iou_threshold
).
tolist
()[
0
]
else
:
layer_attrs
[
"nms_threshold"
]
=
0.0
if
inputs_len
>
4
:
score_threshold
=
self
.
graph
.
get_input_node
(
node
,
idx
=
4
,
copy
=
True
)
layer_attrs
[
"score_threshold"
]
=
_const_weight_or_none
(
score_threshold
).
tolist
()[
0
]
else
:
layer_attrs
[
"score_threshold"
]
=
0.0
self
.
paddle_graph
.
add_layer
(
"custom_layer:NMS"
,
inputs
=
{
"bboxes"
:
boxes
.
name
,
...
...
x2paddle/optimizer/fusion/__init__.py
浏览文件 @
2c34d762
...
...
@@ -38,3 +38,5 @@ from .tf_batchnorm_fuser import TFBatchNormFuser
from
.tf_batchnorm_fuse_pass
import
TFBatchNormFusePass
from
.trace_fc_fuser
import
TraceFcFuser
from
.trace_fc_fuse_pass
import
TraceFcFusePass
from
.onnx_layernorm_fuser
import
LayerNormFuser
from
.onnx_layernorm_fuse_pass
import
LayerNormFusePass
x2paddle/optimizer/fusion/onnx_layernorm_fuse_pass.py
0 → 100644
浏览文件 @
2c34d762
# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from
x2paddle.optimizer.pass_
import
Pass
from
x2paddle.optimizer.fusion
import
LayerNormFuser
from
x2paddle.optimizer.pass_manager
import
pass_register
@
pass_register
class
LayerNormFusePass
(
Pass
):
name
=
"onnx_layernorm_fuse_pass"
def
__init__
(
self
):
Pass
.
__init__
(
self
)
def
apply
(
self
,
graph
):
fuser
=
LayerNormFuser
()
fuser
.
operate
(
graph
,
match_kind
=
"edge"
)
# register layernorm pass
onnx_layernorm_fuse_pass
=
LayerNormFusePass
()
x2paddle/optimizer/fusion/onnx_layernorm_fuser.py
0 → 100644
浏览文件 @
2c34d762
# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import
copy
import
numpy
as
np
from
collections
import
OrderedDict
from
x2paddle.optimizer.pattern_matcher
import
FuseBase
from
x2paddle.core.program
import
PaddleGraph
,
PaddleLayer
from
x2paddle.core.util
import
*
class
LayerNormFuser
(
FuseBase
):
def
__init__
(
self
):
super
(
LayerNormFuser
,
self
).
__init__
()
def
build_pattern
(
self
):
"""
code describe:
x2paddle_ln_pre_weight = self.x2paddle_ln_pre_weight
x2paddle_ln_pre_bias = self.x2paddle_ln_pre_bias
x2paddle_166 = paddle.full(dtype='float32', shape=[1], fill_value=2.0)
x2paddle_169 = paddle.full(dtype='float32', shape=[1], fill_value=9.999999747378752e-06)
x2paddle_164 = paddle.mean(x=x2paddle_162, axis=[-1], keepdim=True)
x2paddle_165 = paddle.subtract(x=x2paddle_162, y=x2paddle_164)
x2paddle_167 = paddle.pow(x=x2paddle_165, y=x2paddle_166)
x2paddle_168 = paddle.mean(x=x2paddle_167, axis=[-1], keepdim=True)
x2paddle_170 = paddle.add(x=x2paddle_168, y=x2paddle_169)
x2paddle_171 = paddle.sqrt(x=x2paddle_170)
x2paddle_172 = paddle.divide(x=x2paddle_165, y=x2paddle_171)
x2paddle_173 = paddle.multiply(x=x2paddle_172, y=x2paddle_ln_pre_weight)
x2paddle_174 = paddle.add(x=x2paddle_173, y=x2paddle_ln_pre_bias)
"""
def
gen_name
(
id
):
return
"x"
+
str
(
id
)
self
.
pattern
.
add_layer
(
"self.create_parameter"
,
inputs
=
{},
outputs
=
[
gen_name
(
0
)])
self
.
pattern
.
add_layer
(
"self.create_parameter"
,
inputs
=
{},
outputs
=
[
gen_name
(
1
)])
self
.
pattern
.
add_layer
(
"paddle.full"
,
inputs
=
{},
outputs
=
[
gen_name
(
2
)],
shape
=
[
1
],
fill_value
=
0.5
)
self
.
pattern
.
add_layer
(
"paddle.full"
,
inputs
=
{},
outputs
=
[
gen_name
(
3
)],
shape
=
[
1
],
fill_value
=
9.999999747378752e-06
)
self
.
pattern
.
add_layer
(
"paddle.mean"
,
inputs
=
{
"x"
:
"layernorm-input-0"
},
outputs
=
[
gen_name
(
4
)],
axis
=
[
-
1
],
keep_dim
=
True
)
self
.
pattern
.
add_layer
(
"paddle.subtract"
,
inputs
=
{
"x"
:
"layernorm-input-0"
,
"y"
:
gen_name
(
4
)},
outputs
=
[
gen_name
(
5
)])
self
.
pattern
.
add_layer
(
"paddle.pow"
,
inputs
=
{
"x"
:
gen_name
(
5
),
"y"
:
gen_name
(
2
)},
outputs
=
[
gen_name
(
6
)])
self
.
pattern
.
add_layer
(
"paddle.mean"
,
inputs
=
{
"x"
:
gen_name
(
6
)},
outputs
=
[
gen_name
(
7
)],
axis
=
[
-
1
],
keep_dim
=
True
)
self
.
pattern
.
add_layer
(
"paddle.add"
,
inputs
=
{
"x"
:
gen_name
(
7
),
"y"
:
gen_name
(
3
)},
outputs
=
[
gen_name
(
8
)])
self
.
pattern
.
add_layer
(
"paddle.sqrt"
,
inputs
=
{
"x"
:
gen_name
(
8
)},
outputs
=
[
gen_name
(
9
)])
self
.
pattern
.
add_layer
(
"paddle.divide"
,
inputs
=
{
"x"
:
gen_name
(
5
),
"y"
:
gen_name
(
9
)},
outputs
=
[
gen_name
(
10
)])
self
.
pattern
.
add_layer
(
"paddle.multiply"
,
inputs
=
{
"x"
:
gen_name
(
10
),
"y"
:
gen_name
(
0
)},
outputs
=
[
gen_name
(
11
)])
self
.
pattern
.
add_layer
(
"paddle.add"
,
inputs
=
{
"x"
:
gen_name
(
11
),
"y"
:
gen_name
(
1
)},
outputs
=
[
gen_name
(
12
)])
self
.
pattern
.
build
(
inputs
=
{
"input-0"
:
"layernorm-input-0"
,
})
def
insert_new_layer
(
self
,
graph
,
parameters
,
matches
):
new_layer
,
new_layer_id
=
self
.
gen_new_layer
(
parameters
,
matches
)
graph
.
layers
[
new_layer_id
]
=
new_layer
matches_copy
=
copy
.
deepcopy
(
matches
)
for
layer_id
,
layer
in
matches_copy
.
items
():
if
layer
.
kernel
in
[
"self.create_parameter"
,
"paddle.full"
]:
matches
.
pop
(
layer_id
)
matches
.
pop
(
new_layer_id
)
def
gen_new_layer
(
self
,
parameters
,
matches
):
layer_id_list
=
list
(
matches
.
keys
())
layer_id_list
.
sort
(
key
=
int
)
layer_inputs
=
list
()
layer_inputs_ids
=
list
()
param_name
=
list
()
for
layer_id
,
layer
in
matches
.
items
():
if
layer
.
kernel
==
"paddle.mean"
:
layer_inputs
.
append
(
layer
.
inputs
)
layer_inputs_ids
.
append
(
layer_id
)
if
layer
.
kernel
==
"self.create_parameter"
:
param_name
.
append
(
layer
.
outputs
[
0
])
if
layer
.
kernel
==
"paddle.add"
:
output_name
=
layer
.
outputs
[
0
]
param
=
parameters
[
param_name
[
0
]]
c
=
param
.
shape
[
0
]
weight_param
=
parameters
.
pop
(
param_name
[
0
])
parameters
[
"{}.weight"
.
format
(
output_name
)]
=
weight_param
bias_param
=
parameters
.
pop
(
param_name
[
1
])
parameters
[
"{}.bias"
.
format
(
output_name
)]
=
bias_param
new_layer
=
PaddleLayer
(
layer_id_list
[
0
],
"paddle.nn.LayerNorm"
,
inputs
=
layer_inputs
[
0
],
outputs
=
[
output_name
],
normalized_shape
=
[
c
])
return
new_layer
,
layer_inputs_ids
[
0
]
x2paddle/optimizer/optimizer.py
浏览文件 @
2c34d762
...
...
@@ -36,6 +36,8 @@ class GraphOptimizer(object):
"conv2d_add_fuse_pass"
,
"tf_batchnorm_fuse_pass"
,
"prelu_fuse_pass"
,
"transpose_eliminate_pass"
]
elif
source_frame
==
"onnx"
:
self
.
passes
=
[
"onnx_layernorm_fuse_pass"
]
else
:
self
.
passes
=
[]
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录