Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
PaddlePaddle
X2Paddle
提交
52c6ea75
X
X2Paddle
项目概览
PaddlePaddle
/
X2Paddle
大约 1 年 前同步成功
通知
328
Star
698
Fork
167
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
26
列表
看板
标记
里程碑
合并请求
4
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
X
X2Paddle
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
26
Issue
26
列表
看板
标记
里程碑
合并请求
4
合并请求
4
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
提交
52c6ea75
编写于
9月 12, 2020
作者:
S
SunAhong1993
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
modify optimizer
上级
c5f36434
变更
10
展开全部
显示空白变更内容
内联
并排
Showing
10 changed file
with
1696 addition
and
1124 deletion
+1696
-1124
x2paddle/core/program.py
x2paddle/core/program.py
+24
-8
x2paddle/op_mapper/pytorch2paddle/aten.py
x2paddle/op_mapper/pytorch2paddle/aten.py
+23
-5
x2paddle/op_mapper/pytorch2paddle/prim.py
x2paddle/op_mapper/pytorch2paddle/prim.py
+2
-1
x2paddle/op_mapper/pytorch2paddle/prim2code.py
x2paddle/op_mapper/pytorch2paddle/prim2code.py
+16
-2
x2paddle/op_mapper/pytorch2paddle/pytorch_op_mapper.py
x2paddle/op_mapper/pytorch2paddle/pytorch_op_mapper.py
+1
-0
x2paddle/optimizer/fusion/batchnorm2d_fuser.py
x2paddle/optimizer/fusion/batchnorm2d_fuser.py
+68
-157
x2paddle/optimizer/fusion/fc_fuser.py
x2paddle/optimizer/fusion/fc_fuser.py
+5
-7
x2paddle/optimizer/fusion/interpolate_bilinear_fuser.py
x2paddle/optimizer/fusion/interpolate_bilinear_fuser.py
+1498
-924
x2paddle/optimizer/optimizer.py
x2paddle/optimizer/optimizer.py
+8
-3
x2paddle/optimizer/pattern_matcher.py
x2paddle/optimizer/pattern_matcher.py
+51
-17
未找到文件。
x2paddle/core/program.py
浏览文件 @
52c6ea75
...
...
@@ -128,10 +128,30 @@ class PaddleGraph(object):
for
output
in
layer
.
outputs
:
outputs_from_nodes
[
output
]
=
layer_id
# 将block的输出用于父图
if
inputs
is
not
None
and
outputs
is
not
None
and
set
(
layer
.
outputs
).
issubset
(
outputs
):
if
layer_id
not
in
self
.
edges_out
:
self
.
edges_out
[
layer_id
]
=
list
()
self
.
edges_out
[
layer_id
].
append
(
-
1
)
# 处理子图
if
len
(
layer
.
blocks
)
>
0
:
for
block
in
layer
.
blocks
:
block
.
build
(
layer
.
inputs
,
layer
.
outputs
)
# 删除不必要的节点
invalid_list
=
list
()
for
layer_id
,
layer
in
self
.
layers
.
items
():
if
len
(
self
.
layers
)
>
1
:
if
self
.
edges_in
.
get
(
layer_id
,
0
)
==
0
and
self
.
edges_out
.
get
(
layer_id
,
0
)
==
0
and
layer
.
kernel
!=
"prim.assert"
\
and
layer
.
kernel
!=
"prim.exception"
\
and
layer
.
kernel
!=
"prim.warnings"
:
invalid_list
.
append
(
layer_id
)
for
layer_id
in
invalid_list
:
self
.
layers
.
pop
(
layer_id
)
if
self
.
graph_type
==
"dygraph"
:
self
.
get_dygraph_inputs
()
if
len
(
self
.
outputs
)
==
0
:
...
...
@@ -244,7 +264,8 @@ class PaddleGraph(object):
else
:
self
.
gen_dygraph_code
(
save_dir
)
self
.
dump_dygraph_parameter
(
save_dir
)
self
.
dygraph2static
(
save_dir
,
input_shapes
)
#[[None, 3, 224, 224]]
# self.dygraph2static(save_dir, input_shapes) #[[None, 3, 224, 224]]
def
dump_parameter
(
self
,
param_name
,
param
,
save_dir
):
if
not
os
.
path
.
exists
(
save_dir
):
...
...
@@ -367,13 +388,8 @@ class PaddleGraph(object):
gen_head
()
for
layer_id
,
layer
in
self
.
layers
.
items
():
if
len
(
self
.
layers
)
>
1
:
if
self
.
edges_in
.
get
(
layer_id
,
0
)
==
0
and
self
.
edges_out
.
get
(
layer_id
,
0
)
==
0
and
layer
.
kernel
!=
"prim.assert"
\
and
layer
.
kernel
!=
"prim.exception"
\
and
layer
.
kernel
!=
"prim.warnings"
:
continue
if
"paddle.nn"
in
layer
.
kernel
or
layer
.
kernel
==
"fluid.dygraph.base.to_variable"
:
if
(
"paddle.nn"
in
layer
.
kernel
and
"functional"
not
in
layer
.
kernel
)
or
layer
.
kernel
==
"fluid.dygraph.base.to_variable"
:
line
=
"{}"
.
format
(
layer
.
outputs
[
0
]
)
if
layer
.
kernel
==
"fluid.dygraph.base.to_variable"
and
not
layer
.
attrs
[
...
...
x2paddle/op_mapper/pytorch2paddle/aten.py
浏览文件 @
52c6ea75
...
...
@@ -3229,17 +3229,35 @@ def aten_upsample_bilinear2d(mapper, graph, node):
current_outputs
=
[
output_name
]
# 处理输入0,即%x.13
mapper
.
_check_input
(
graph
,
inputs_node
[
0
],
inputs_name
[
0
],
current_outputs
)
layer_inputs
[
"
input
"
]
=
inputs_name
[
0
]
layer_inputs
[
"
x
"
]
=
inputs_name
[
0
]
# 获取当前节点输入的list
current_inputs
=
list
(
layer_inputs
.
values
())
# 处理输入1,即%4963
if
inputs_name
[
1
]
in
mapper
.
attrs
:
layer_attrs
[
"
out_shap
e"
]
=
mapper
.
attrs
[
inputs_name
[
1
]]
layer_attrs
[
"
siz
e"
]
=
mapper
.
attrs
[
inputs_name
[
1
]]
else
:
mapper
.
_check_input
(
graph
,
inputs_node
[
1
],
inputs_name
[
1
],
current_outputs
)
layer_inputs
[
"
out_shap
e"
]
=
inputs_name
[
1
]
layer_inputs
[
"
siz
e"
]
=
inputs_name
[
1
]
current_inputs
.
append
(
inputs_name
[
1
])
graph
.
add_layer
(
"prim.isinstance"
,
inputs
=
{
"input"
:
inputs_name
[
1
]},
outputs
=
[
inputs_name
[
1
]
+
"_isinstance"
],
cls
=
"paddle.fluid.Variable"
)
graph
.
add_layer
(
"prim.if"
,
{
"input"
:
inputs_name
[
1
]
+
"_isinstance"
},
outputs
=
[
inputs_name
[
0
]
+
"_if1"
])
if_layer
=
graph
.
layers
[
list
(
graph
.
layers
.
keys
())[
-
1
]]
block
=
PaddleGraph
(
if_layer
,
graph_type
=
"dygraph"
)
block
.
add_layer
(
"prim.var2list"
,
inputs
=
{
"input"
:
inputs_name
[
1
]},
outputs
=
[
inputs_name
[
1
]])
if_layer
.
add_block
(
block
)
block
=
PaddleGraph
(
if_layer
,
graph_type
=
"dygraph"
)
if_layer
.
add_block
(
block
)
if_layer
.
inputs
[
"input-0"
]
=
inputs_name
[
1
]
# 处理输入2,即%5421
if
inputs_name
[
2
]
in
mapper
.
attrs
:
layer_attrs
[
"align_corners"
]
=
mapper
.
attrs
[
inputs_name
[
2
]]
...
...
@@ -3261,10 +3279,10 @@ def aten_upsample_bilinear2d(mapper, graph, node):
inputs
=
list_layer_inputs
,
outputs
=
[
output_name
+
"_assert"
],
type
=
"eq"
)
layer_inputs
[
"scale"
]
=
inputs_name
[
3
]
layer_inputs
[
"scale
_factor
"
]
=
inputs_name
[
3
]
layer_attrs
[
"align_mode"
]
=
0
graph
.
add_layer
(
"
fluid.layers
.interpolate"
,
"
paddle.nn.functional
.interpolate"
,
inputs
=
layer_inputs
,
outputs
=
layer_outputs
,
**
layer_attrs
)
...
...
x2paddle/op_mapper/pytorch2paddle/prim.py
浏览文件 @
52c6ea75
...
...
@@ -442,7 +442,8 @@ def prim_shape(mapper, graph, node):
# 获取当前节点输入的list
current_inputs
=
list
(
layer_inputs
.
values
())
graph
.
add_layer
(
"prim.shape"
,
inputs
=
layer_inputs
,
outputs
=
layer_outputs
)
graph
.
add_layer
(
"fluid.layers.shape"
,
inputs
=
layer_inputs
,
outputs
=
layer_outputs
)
return
current_inputs
,
current_outputs
...
...
x2paddle/op_mapper/pytorch2paddle/prim2code.py
浏览文件 @
52c6ea75
...
...
@@ -172,10 +172,11 @@ def prim_if(layer, indent=1, init_func=[], forward_func=[]):
forward_func
.
extend
(
b_forward_lines
)
block
=
layer
.
blocks
[
1
]
if
len
(
block
.
layers
)
>
0
:
line
=
"else:"
forward_func
.
extend
(
gen_codes
([
line
],
indent
=
indent
))
b_init_lines
,
b_forward_lines
=
block
.
gen_dygraph_code
(
indent
=
indent
+
1
)
if
len
(
b_forward_lines
)
!=
0
:
line
=
"else:"
forward_func
.
extend
(
gen_codes
([
line
],
indent
=
indent
))
init_func
.
extend
(
b_init_lines
)
forward_func
.
extend
(
b_forward_lines
)
...
...
@@ -191,6 +192,13 @@ def prim_is(layer, indent=1, init_func=[], forward_func=[]):
forward_func
.
extend
(
gen_codes
([
line
],
indent
=
indent
))
def
prim_isinstance
(
layer
,
indent
=
1
,
init_func
=
[],
forward_func
=
[]):
line
=
"{} = isinstance({}, {})"
.
format
(
layer
.
outputs
[
0
],
get_value
(
layer
,
"input"
),
layer
.
attrs
[
"cls"
])
forward_func
.
extend
(
gen_codes
([
line
],
indent
=
indent
))
def
prim_isnot
(
layer
,
indent
=
1
,
init_func
=
[],
forward_func
=
[]):
line
=
"{} = {} is not {}"
.
format
(
layer
.
outputs
[
0
],
get_value
(
layer
,
"x"
),
...
...
@@ -370,6 +378,12 @@ def prim_type(layer, indent=1, init_func=[], forward_func=[]):
forward_func
.
extend
(
gen_codes
([
line
],
indent
=
indent
))
def
prim_var2list
(
layer
,
indent
=
1
,
init_func
=
[],
forward_func
=
[]):
line
=
"{} = {}.numpy().tolist()"
.
format
(
layer
.
outputs
[
0
],
get_value
(
layer
,
"input"
))
forward_func
.
extend
(
gen_codes
([
line
],
indent
=
indent
))
def
prim_warnings
(
layer
,
indent
=
1
,
init_func
=
[],
forward_func
=
[]):
lines
=
[
"import warnings"
]
line
=
"warnings.warn({}, stacklevel={})"
.
format
(
...
...
x2paddle/op_mapper/pytorch2paddle/pytorch_op_mapper.py
浏览文件 @
52c6ea75
...
...
@@ -81,6 +81,7 @@ class PyTorchOpMapper(OpMapper):
node
=
ivalue
.
node
()
if
str
(
ivalue
.
type
())
!=
"Tensor"
:
graph
.
set_name
(
str
(
ivalue
.
type
()).
split
(
"."
)[
-
1
])
continue
inputs
,
outputs
=
self
.
data
(
graph
,
node
,
ivalue
.
unique
())
# 转换中间节点
for
node
in
script_graph
.
nodes
():
...
...
x2paddle/optimizer/fusion/batchnorm2d_fuser.py
浏览文件 @
52c6ea75
...
...
@@ -25,215 +25,122 @@ class BatchNorm2dFuser(FuseBase):
def
build_pattern
(
self
):
""" 描述需要替换的batchnorm2d图结构。
batchnorm2d层模式python实现代码示例:
x2214 = fluid.layers.shape(x2207)
x2214 = len(x2214)
x2215 = x2214 != x2213
if x2215 :
raise RaiseException(x2212)
if x2218 :
x2220 = self.x2220
x2221 = x2220 + x2209
self.x2220 = x2221
x2227 = False
if x2227 :
x2230 = fluid.layers.shape(x2207.shape)
x2231 = 'Exception'
x2236 = x2230[x2233]
x2237 = len(x2230)
x2238 = x2237 - x2234
x2241 = x2236
for _x2240 in range(x2238):
x2242 = _x2240 + x2234
x2243 = x2230[x2242]
x2244 = x2241 * x2243
x2239 = x2244
x2245 = x2239 == x2235
if x2245 :
raise RaiseException(x2231)
x2248 = self.batchnorm41(x2207)
x336 = fluid.layers.shape(input=x334)
x336 = len(x336)
x337 = x336 != 4
if x337 :
raise RaiseException('Exception')
if False :
x351 = fluid.layers.shape(input=x334)
x352 = x351[0]
x353 = len(x351)
x354 = x353 - 2
x357 = x352
for _x356 in range(x354):
x358 = _x356 + 2
x359 = x351[x358]
x360 = x357 * x359
x355 = x360
x361 = x355 == 1
if x361 :
raise RaiseException('Exception')
x364 = self.batchnorm7(x334)
"""
def
gen_name
(
id
):
return
"x"
+
str
(
id
)
# self.pattern.add_layer(
# "prim.constant", inputs={}, outputs=[gen_name(0)], value=1)
# self.pattern.add_layer(
# "prim.constant", inputs={}, outputs=[gen_name(1)], value=0.1)
# self.pattern.add_layer(
# "prim.constant", inputs={}, outputs=[gen_name(2)], value=0.001)
# self.pattern.add_layer(
# "prim.constant",
# inputs={},
# outputs=[gen_name(3)],
# value="Exception")
# self.pattern.add_layer(
# "prim.constant", inputs={}, outputs=[gen_name(4)], value=4)
self
.
pattern
.
add_layer
(
"fluid.layers.shape"
,
inputs
=
{
'input'
:
"bn-input-0"
},
outputs
=
[
gen_name
(
5
)])
outputs
=
[
gen_name
(
0
)])
self
.
pattern
.
add_layer
(
"prim.len"
,
inputs
=
{
'input'
:
gen_name
(
5
)},
outputs
=
[
gen_name
(
5
)])
"prim.len"
,
inputs
=
{
'input'
:
gen_name
(
0
)},
outputs
=
[
gen_name
(
0
)])
self
.
pattern
.
add_layer
(
"prim.ne"
,
inputs
=
{
"x"
:
gen_name
(
5
),
"y"
:
"bn-input-9"
},
outputs
=
[
gen_name
(
6
)])
self
.
pattern
.
add_layer
(
"prim.if"
,
{
'input'
:
gen_name
(
6
)},
[
gen_name
(
7
)])
"prim.ne"
,
inputs
=
{
"x"
:
gen_name
(
0
)},
outputs
=
[
gen_name
(
1
)],
y
=
4
)
self
.
pattern
.
add_layer
(
"prim.if"
,
{
'input'
:
gen_name
(
1
)},
[
gen_name
(
2
)])
if_layer1
=
self
.
pattern
.
layers
[
list
(
self
.
pattern
.
layers
.
keys
())[
-
1
]]
pattern_block0
=
PaddleGraph
(
if_layer1
,
graph_type
=
"dygraph"
)
pattern_block0
.
add_layer
(
"prim.exception"
,
inputs
=
{
"input"
:
"bn-input-1"
},
outputs
=
[
gen_name
(
8
)])
if_layer1
.
inputs
[
"input-0"
]
=
"bn-input-1"
inputs
=
{},
outputs
=
[
gen_name
(
3
)],
input
=
"Exception"
)
if_layer1
.
add_block
(
pattern_block0
)
pattern_block1
=
PaddleGraph
(
if_layer1
,
graph_type
=
"dygraph"
)
if_layer1
.
add_block
(
pattern_block1
)
# self.pattern.add_layer(
# "prim.constant", inputs={}, outputs=[gen_name(9)], value=False)
self
.
pattern
.
add_layer
(
"prim.if"
,
{
'input'
:
"bn-input-2"
},
[
gen_name
(
10
)])
self
.
pattern
.
add_layer
(
"prim.if"
,
{},
[
gen_name
(
4
)],
input
=
False
)
if_layer2
=
self
.
pattern
.
layers
[
list
(
self
.
pattern
.
layers
.
keys
())[
-
1
]]
pattern_block0
=
PaddleGraph
(
if_layer2
,
graph_type
=
"dygraph"
)
pattern_block0
.
add_layer
(
"fluid.dygraph.base.to_variable"
,
inputs
=
{},
outputs
=
[
gen_name
(
11
)],
value
=
"params[{}]"
.
format
(
string
(
gen_name
(
11
))))
pattern_block0
.
add_layer
(
"prim.add"
,
inputs
=
{
"x"
:
gen_name
(
11
),
"y"
:
"bn-input-3"
},
outputs
=
[
gen_name
(
12
)])
pattern_block0
.
add_layer
(
"prim.set_attr"
,
inputs
=
{
"input"
:
gen_name
(
12
)},
outputs
=
[
"self."
+
gen_name
(
11
)])
if_layer2
.
inputs
[
"input-0"
]
=
"bn-input-3"
if_layer2
.
add_block
(
pattern_block0
)
pattern_block1
=
PaddleGraph
(
if_layer2
,
graph_type
=
"dygraph"
)
if_layer2
.
add_block
(
pattern_block1
)
# self.pattern.add_layer(
# "prim.constant", inputs={}, outputs=[gen_name(13)], value=True)
# self.pattern.add_layer(
# "prim.constant", inputs={}, outputs=[gen_name(14)], value=False)
self
.
pattern
.
add_layer
(
"prim.if"
,
{
'input'
:
"bn-input-4"
},
[
gen_name
(
15
)])
if_layer3
=
self
.
pattern
.
layers
[
list
(
self
.
pattern
.
layers
.
keys
())[
-
1
]]
pattern_block0
=
PaddleGraph
(
if_layer3
,
graph_type
=
"dygraph"
)
pattern_block0
.
add_layer
(
"fluid.layers.shape"
,
inputs
=
{
'input'
:
"bn-input-0"
},
outputs
=
[
gen_name
(
16
)])
# pattern_block0.add_layer(
# "prim.constant",
# inputs={},
# outputs=[gen_name(17)],
# value="Exception")
# pattern_block0.add_layer(
# "prim.constant", inputs={}, outputs=[gen_name(18)], value=True)
# pattern_block0.add_layer(
# "prim.constant", inputs={}, outputs=[gen_name(19)], value=0)
# pattern_block0.add_layer(
# "prim.constant", inputs={}, outputs=[gen_name(20)], value=2)
# pattern_block0.add_layer(
# "prim.constant", inputs={}, outputs=[gen_name(21)], value=1)
outputs
=
[
gen_name
(
5
)])
pattern_block0
.
add_layer
(
"prim.getitem"
,
inputs
=
{
"list"
:
gen_name
(
16
)
,
"index"
:
"bn-input-6"
}
,
outputs
=
[
gen_name
(
22
)]
)
inputs
=
{
"list"
:
gen_name
(
5
)}
,
outputs
=
[
gen_name
(
6
)]
,
index
=
0
)
pattern_block0
.
add_layer
(
"prim.len"
,
inputs
=
{
"input"
:
gen_name
(
16
)},
outputs
=
[
gen_name
(
23
)])
"prim.len"
,
inputs
=
{
"input"
:
gen_name
(
5
)},
outputs
=
[
gen_name
(
7
)])
pattern_block0
.
add_layer
(
"prim.sub"
,
inputs
=
{
"x"
:
gen_name
(
23
),
"y"
:
"bn-input-7"
},
outputs
=
[
gen_name
(
24
)])
"prim.sub"
,
inputs
=
{
"x"
:
gen_name
(
7
)},
outputs
=
[
gen_name
(
8
)],
y
=
2
)
pattern_block0
.
add_layer
(
"prim.equal"
,
inputs
=
{
"input"
:
gen_name
(
22
)},
outputs
=
[
gen_name
(
25
)])
"prim.equal"
,
inputs
=
{
"input"
:
gen_name
(
6
)},
outputs
=
[
gen_name
(
9
)])
pattern_block0
.
add_layer
(
"prim.loop"
,
inputs
=
{
"input"
:
gen_name
(
24
)},
outputs
=
[
gen_name
(
26
),
gen_name
(
27
)])
inputs
=
{
"input"
:
gen_name
(
8
)},
outputs
=
[
gen_name
(
8.1
),
gen_name
(
10
)])
loop_layer
=
pattern_block0
.
layers
[
list
(
pattern_block0
.
layers
.
keys
())[
-
1
]]
pattern_block0_block0
=
PaddleGraph
(
loop_layer
,
graph_type
=
"dygraph"
)
pattern_block0_block0
.
add_layer
(
"prim.add"
,
inputs
=
{
"x"
:
gen_name
(
27
),
"y"
:
"bn-input-7"
},
outputs
=
[
gen_name
(
28
)])
"prim.add"
,
inputs
=
{
"x"
:
gen_name
(
10
)},
outputs
=
[
gen_name
(
11
)],
y
=
2
)
pattern_block0_block0
.
add_layer
(
"prim.getitem"
,
inputs
=
{
"list"
:
gen_name
(
16
),
"index"
:
gen_name
(
28
)},
outputs
=
[
gen_name
(
29
)])
inputs
=
{
"list"
:
gen_name
(
5
),
"index"
:
gen_name
(
11
)},
outputs
=
[
gen_name
(
12
)])
pattern_block0_block0
.
add_layer
(
"prim.mul"
,
inputs
=
{
"x"
:
gen_name
(
25
),
"y"
:
gen_name
(
29
)},
outputs
=
[
gen_name
(
30
)])
inputs
=
{
"x"
:
gen_name
(
9
),
"y"
:
gen_name
(
12
)},
outputs
=
[
gen_name
(
13
)])
pattern_block0_block0
.
add_layer
(
"prim.equal"
,
inputs
=
{
"input"
:
gen_name
(
30
)},
outputs
=
[
gen_name
(
26
)])
loop_layer
.
inputs
[
"input-1"
]
=
"bn-input-7"
loop_layer
.
inputs
[
"input-2"
]
=
gen_name
(
16
)
loop_layer
.
inputs
[
"input-3"
]
=
gen_name
(
25
)
inputs
=
{
"input"
:
gen_name
(
13
)},
outputs
=
[
gen_name
(
8.1
)])
loop_layer
.
inputs
[
"input-1"
]
=
gen_name
(
5
)
loop_layer
.
inputs
[
"input-2"
]
=
gen_name
(
9
)
loop_layer
.
add_block
(
pattern_block0_block0
)
pattern_block0
.
add_layer
(
"prim.eq"
,
inputs
=
{
"x"
:
gen_name
(
26
),
"y"
:
"bn-input-8"
},
outputs
=
[
gen_name
(
31
)])
"prim.eq"
,
inputs
=
{
"x"
:
gen_name
(
8.1
)},
outputs
=
[
gen_name
(
14
)],
y
=
1
)
pattern_block0
.
add_layer
(
"prim.if"
,
inputs
=
{
"input"
:
gen_name
(
31
)},
outputs
=
[
gen_name
(
32
)])
if_layer
3
1
=
pattern_block0
.
layers
[
list
(
pattern_block0
.
layers
.
keys
())[
"prim.if"
,
inputs
=
{
"input"
:
gen_name
(
14
)},
outputs
=
[
gen_name
(
15
)])
if_layer
2
1
=
pattern_block0
.
layers
[
list
(
pattern_block0
.
layers
.
keys
())[
-
1
]]
pattern_block0_block0
=
PaddleGraph
(
if_layer
3
1
,
graph_type
=
"dygraph"
)
pattern_block0_block0
=
PaddleGraph
(
if_layer
2
1
,
graph_type
=
"dygraph"
)
pattern_block0_block0
.
add_layer
(
"prim.exception"
,
inputs
=
{
"input"
:
"bn-input-5"
},
outputs
=
[
gen_name
(
33
)])
if_layer31
.
inputs
[
"input-0"
]
=
"bn-input-5"
if_layer31
.
add_block
(
pattern_block0_block0
)
pattern_block0_block1
=
PaddleGraph
(
if_layer31
,
graph_type
=
"dygraph"
)
if_layer31
.
add_block
(
pattern_block0_block1
)
if_layer3
.
add_block
(
pattern_block0
)
pattern_block1
=
PaddleGraph
(
if_layer3
,
graph_type
=
"dygraph"
)
if_layer3
.
add_block
(
pattern_block1
)
if_layer3
.
inputs
[
"input-0"
]
=
"bn-input-5"
if_layer3
.
inputs
[
"input-1"
]
=
"bn-input-6"
if_layer3
.
inputs
[
"input-2"
]
=
"bn-input-7"
if_layer3
.
inputs
[
"input-3"
]
=
"bn-input-7"
if_layer3
.
inputs
[
"input-4"
]
=
"bn-input-8"
if_layer3
.
inputs
[
"input-5"
]
=
"bn-input-0"
inputs
=
{},
outputs
=
[
gen_name
(
15
)],
input
=
"Exception"
)
if_layer21
.
add_block
(
pattern_block0_block0
)
pattern_block0_block1
=
PaddleGraph
(
if_layer21
,
graph_type
=
"dygraph"
)
if_layer21
.
add_block
(
pattern_block0_block1
)
if_layer2
.
add_block
(
pattern_block0
)
pattern_block1
=
PaddleGraph
(
if_layer2
,
graph_type
=
"dygraph"
)
if_layer2
.
add_block
(
pattern_block1
)
if_layer2
.
inputs
[
"input-0"
]
=
"bn-input-0"
self
.
pattern
.
add_layer
(
"paddle.nn.BatchNorm"
,
inputs
=
{
"input"
:
"bn-input-0"
},
outputs
=
[
gen_name
(
34
),
gen_name
(
35
)],
outputs
=
[
gen_name
(
16
),
gen_name
(
17
)],
is_test
=
True
,
num_channels
=
160
,
momentum
=
0.1
,
epsilon
=
0.001
)
self
.
pattern
.
build
(
inputs
=
{
"input-0"
:
"bn-input-0"
,
"input-1"
:
"bn-input-1"
,
"input-2"
:
"bn-input-2"
,
"input-3"
:
"bn-input-3"
,
"input-4"
:
"bn-input-4"
,
"input-5"
:
"bn-input-5"
,
"input-6"
:
"bn-input-6"
,
"input-7"
:
"bn-input-7"
,
"input-8"
:
"bn-input-8"
,
"input-9"
:
"bn-input-9"
})
self
.
pattern
.
build
(
inputs
=
{
"input-0"
:
"bn-input-0"
})
def
insert_new_layer
(
self
,
graph
,
parameters
,
matches
):
new_layer
=
self
.
gen_new_layer
(
parameters
,
matches
)
...
...
@@ -241,6 +148,10 @@ class BatchNorm2dFuser(FuseBase):
graph
.
layers
[
new_layer_id
]
=
new_layer
matches
.
pop
(
new_layer_id
)
# for layer in matches.values():
# print(layer.outputs)
# print("-------")
def
gen_new_layer
(
self
,
parameters
,
matches
):
layers_id
=
list
(
matches
.
keys
())
layer
=
matches
[
layers_id
[
-
1
]]
...
...
x2paddle/optimizer/fusion/fc_fuser.py
浏览文件 @
52c6ea75
...
...
@@ -28,7 +28,7 @@ class FcFuser(FuseBase):
fc层模式python实现代码示例:
x133 = x128.shape
x133 = len(x133)
x134 = x133 ==
x131
x134 = x133 ==
2
if x134 :
classifier_6_weight = self.classifier_6_weight
x136 = fluid.layers.transpose(x=classifier_6_weight, perm=[1, 0])
...
...
@@ -55,9 +55,9 @@ class FcFuser(FuseBase):
"prim.len"
,
inputs
=
{
'input'
:
gen_name
(
2
)},
outputs
=
[
gen_name
(
2
)])
self
.
pattern
.
add_layer
(
"prim.eq"
,
inputs
=
{
"eq0"
:
gen_name
(
2
),
"eq1"
:
"fc-input-1"
}
,
outputs
=
[
gen_name
(
3
)]
)
inputs
=
{
"eq0"
:
gen_name
(
2
)
}
,
outputs
=
[
gen_name
(
3
)]
,
eq1
=
2
)
self
.
pattern
.
add_layer
(
"prim.if"
,
{
'input'
:
gen_name
(
3
)},
[
gen_name
(
4
)])
self
.
pattern
.
outputs
.
append
(
gen_name
(
4
))
if_layer1
=
self
.
pattern
.
layers
[
list
(
self
.
pattern
.
layers
.
keys
())[
-
1
]]
...
...
@@ -122,9 +122,7 @@ class FcFuser(FuseBase):
"prim.equal"
,
inputs
=
{
'input'
:
gen_name
(
13
)},
outputs
=
[
gen_name
(
4
)])
if_layer1
.
add_block
(
pattern_block1
)
self
.
pattern
.
build
(
inputs
=
{
"input-0"
:
"fc-input-0"
,
"input-1"
:
"fc-input-1"
})
self
.
pattern
.
build
(
inputs
=
{
"input-0"
:
"fc-input-0"
})
def
insert_new_layer
(
self
,
graph
,
parameters
,
matches
):
new_layer
=
self
.
gen_new_layer
(
parameters
,
matches
)
...
...
x2paddle/optimizer/fusion/interpolate_bilinear_fuser.py
浏览文件 @
52c6ea75
此差异已折叠。
点击以展开。
x2paddle/optimizer/optimizer.py
浏览文件 @
52c6ea75
...
...
@@ -19,9 +19,14 @@ from x2paddle.optimizer.pass_manager import PassManager
class
GraphOptimizer
(
object
):
def
__init__
(
self
):
self
.
passes
=
[
"interpolate_bilinear_fuse_pass"
,
"fc_fuse_pass"
,
"adaptive_pool2d_fuse_pass"
,
"batchnorm2d_fuse_pass"
,
"constant_fuse_pass"
,
"reshape_fuse_pass"
,
"dropout_fuse_pass"
"constant_fuse_pass"
,
"batchnorm2d_fuse_pass"
,
"interpolate_bilinear_fuse_pass"
,
"fc_fuse_pass"
,
# "interpolate_bilinear_fuse_pass",
# "fc_fuse_pass",
# "adaptive_pool2d_fuse_pass", "batchnorm2d_fuse_pass",
# "constant_fuse_pass", "reshape_fuse_pass", "dropout_fuse_pass"
]
def
optimize
(
self
,
graph
):
...
...
x2paddle/optimizer/pattern_matcher.py
浏览文件 @
52c6ea75
...
...
@@ -34,7 +34,7 @@ class PatternMatcher(object):
并将子图的id以拓扑排序存放到subgraph_id2layers。
"""
def
get_subgraph
(
pattern
,
graph
,
start_index
):
def
get_subgraph
(
pattern
,
graph
,
start_index
,
is_subblock
=
False
):
pattern_index
=
0
pattern_id2layers
=
pattern
.
get_global_layers
()
pattern_ids
=
list
(
pattern_id2layers
.
keys
())
...
...
@@ -49,13 +49,19 @@ class PatternMatcher(object):
# 判断输入连接是否一致
if
layer_id
in
graph
.
edges_in
:
if
pattern_layer_id
not
in
pattern
.
edges_in
:
print
(
"1--"
)
if
pattern_index
==
0
or
is_subblock
:
return
False
else
:
subgraph_id2layers
.
pop
(
layer_id
)
continue
else
:
if
len
(
graph
.
edges_in
[
layer_id
])
!=
len
(
pattern
.
edges_in
[
pattern_layer_id
]):
print
(
"2--"
)
if
pattern_index
==
0
or
is_subblock
:
return
False
else
:
subgraph_id2layers
.
pop
(
layer_id
)
continue
layer_in
=
graph
.
edges_in
[
layer_id
]
pattern_layer_in
=
pattern
.
edges_in
[
pattern_layer_id
]
for
i
in
range
(
len
(
layer_in
)):
...
...
@@ -70,16 +76,22 @@ class PatternMatcher(object):
# 判断pattern输入在pattern_ids的索引
# 和graph输入在subgraph_ids的索引一致
continue
print
(
"3--"
)
if
pattern_index
==
0
or
is_subblock
:
return
False
else
:
subgraph_id2layers
.
pop
(
layer_id
)
continue
# 判断subgraph中的节点是否被外部图使用到(如若被使用到则无效)
if
layer_id
in
graph
.
edges_out
:
if
pattern_layer_id
not
in
pattern
.
edges_out
:
if
not
set
(
pattern_layer
.
outputs
).
issubset
(
pattern
.
outputs
):
# 若pattern当前layer的输出是pattern的输出,则是正确的
print
(
"4--"
)
if
pattern_index
==
0
or
is_subblock
:
return
False
else
:
subgraph_id2layers
.
pop
(
layer_id
)
continue
else
:
if
len
(
graph
.
edges_out
[
layer_id
])
!=
len
(
pattern
.
edges_out
[
pattern_layer_id
]):
...
...
@@ -87,27 +99,49 @@ class PatternMatcher(object):
if
not
set
(
pattern_layer
.
outputs
).
issubset
(
pattern
.
outputs
):
# 若pattern当前layer的输出是pattern的输出,则是正确的
# print("5--")
if
pattern_index
==
0
or
is_subblock
:
return
False
else
:
subgraph_id2layers
.
pop
(
layer_id
)
continue
# 当为控制流时的处理
if
layer
.
kernel
==
"prim.if"
or
layer
.
kernel
==
"prim.loop"
:
if
len
(
pattern_layer
.
blocks
)
!=
len
(
layer
.
blocks
):
print
(
"6--"
)
if
pattern_index
==
0
or
is_subblock
:
return
False
else
:
subgraph_id2layers
.
pop
(
layer_id
)
continue
is_subblock_match
=
True
for
i
,
b
in
enumerate
(
pattern_layer
.
blocks
):
match_info
=
get_subgraph
(
pattern_layer
.
blocks
[
i
],
layer
.
blocks
[
i
],
0
)
match_info
=
get_subgraph
(
pattern_layer
.
blocks
[
i
],
layer
.
blocks
[
i
],
0
,
is_subblock
=
True
)
if
match_info
is
not
False
:
subgraph_id2layers
.
update
(
match_info
)
else
:
print
(
"7--"
)
is_subblock_match
=
False
break
if
not
is_subblock_match
:
if
pattern_index
==
0
or
is_subblock
:
return
False
else
:
index
=
list
(
subgraph_id2layers
.
keys
()).
index
(
layer_id
)
for
key
in
list
(
subgraph_id2layers
.
keys
())[
index
:]:
subgraph_id2layers
.
pop
(
key
)
continue
pattern_index
+=
1
if
pattern_index
==
len
(
pattern
.
layers
):
return
subgraph_id2layers
else
:
if
pattern_index
==
0
:
if
pattern_index
==
0
or
is_subblock
:
return
False
else
:
continue
if
pattern_index
==
len
(
pattern
.
layers
):
return
subgraph_id2layers
return
False
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录