Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
PaddlePaddle
X2Paddle
提交
28f4b2ff
X
X2Paddle
项目概览
PaddlePaddle
/
X2Paddle
大约 1 年 前同步成功
通知
328
Star
698
Fork
167
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
26
列表
看板
标记
里程碑
合并请求
4
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
X
X2Paddle
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
26
Issue
26
列表
看板
标记
里程碑
合并请求
4
合并请求
4
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
提交
28f4b2ff
编写于
9月 25, 2020
作者:
J
jiangjiajun
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
update tensorflow module
上级
52fdd6c5
变更
11
展开全部
隐藏空白更改
内联
并排
Showing
11 changed file
with
1600 addition
and
3061 deletion
+1600
-3061
x2paddle/__init__.py
x2paddle/__init__.py
+0
-12
x2paddle/convert.py
x2paddle/convert.py
+14
-27
x2paddle/core/program.py
x2paddle/core/program.py
+47
-0
x2paddle/decoder/tf_decoder.py
x2paddle/decoder/tf_decoder.py
+6
-0
x2paddle/op_mapper/tf_op_mapper.py
x2paddle/op_mapper/tf_op_mapper.py
+1019
-823
x2paddle/op_mapper/tf_op_mapper_nhwc.py
x2paddle/op_mapper/tf_op_mapper_nhwc.py
+0
-1115
x2paddle/optimizer/tensorflow/__init__.py
x2paddle/optimizer/tensorflow/__init__.py
+0
-0
x2paddle/optimizer/tensorflow/batch_norm.py
x2paddle/optimizer/tensorflow/batch_norm.py
+179
-0
x2paddle/optimizer/tensorflow/bias.py
x2paddle/optimizer/tensorflow/bias.py
+74
-0
x2paddle/optimizer/tensorflow/transpose.py
x2paddle/optimizer/tensorflow/transpose.py
+261
-0
x2paddle/optimizer/tf_optimizer.py
x2paddle/optimizer/tf_optimizer.py
+0
-1084
未找到文件。
x2paddle/__init__.py
浏览文件 @
28f4b2ff
...
...
@@ -3,15 +3,3 @@ __version__ = "0.8.5"
from
.core.program
import
PaddleGraph
program
=
PaddleGraph
()
name_counter
=
dict
()
def
gen_name
(
op_name
,
var_name
):
name
=
"{}.{}"
.
format
(
op_name
,
var_name
)
if
name
not
in
name_counter
:
name_counter
[
name
]
=
0
else
:
name_counter
[
name
]
+=
1
name
=
name
+
"."
+
str
(
name_counter
[
name
])
return
name
x2paddle/convert.py
浏览文件 @
28f4b2ff
...
...
@@ -98,7 +98,7 @@ def arg_parser():
def
tf2paddle
(
model_path
,
save_dir
,
without_data_format_optimization
,
without_data_format_optimization
=
False
,
define_input_shape
=
False
,
params_merge
=
False
):
# check tensorflow installation and version
...
...
@@ -117,37 +117,24 @@ def tf2paddle(model_path,
"[ERROR] Tensorflow is not installed, use
\"
pip install tensorflow
\"
."
)
return
from
x2paddle
import
program
from
x2paddle.decoder.tf_decoder
import
TFDecoder
from
x2paddle.op_mapper.tf_op_mapper
import
TFOpMapper
from
x2paddle.op_mapper.tf_op_mapper_nhwc
import
TFOpMapperNHWC
from
x2paddle.optimizer.tf_optimizer
import
TFOptimizer
from
x2paddle.optimizer.tensorflow.bias
import
BiasOpt
from
x2paddle.optimizer.tensorflow.transpose
import
TransposeOpt
from
x2paddle.optimizer.tensorflow.batch_norm
import
BatchNormOpt
print
(
"Now translating model from tensorflow to paddle."
)
model
=
TFDecoder
(
model_path
,
define_input_shape
=
define_input_shape
)
if
not
without_data_format_optimization
:
mapper
=
TFOpMapper
(
model
)
optimizer
=
TFOptimizer
(
mapper
)
# neccesary optimization
optimizer
.
delete_redundance_code
()
# optimizer below is experimental
optimizer
.
optimize_elementwise_op
()
optimizer
.
merge_activation
()
optimizer
.
merge_bias
()
optimizer
.
optimize_sub_graph
()
# optimizer.merge_batch_norm()
# optimizer.merge_prelu()
else
:
mapper
=
TFOpMapperNHWC
(
model
)
optimizer
=
TFOptimizer
(
mapper
)
optimizer
.
delete_redundance_code
()
optimizer
.
strip_graph
()
optimizer
.
merge_activation
()
optimizer
.
merge_bias
()
optimizer
.
make_nchw_input_output
()
optimizer
.
remove_transpose
()
mapper
.
save_inference_model
(
save_dir
,
params_merge
)
mapper
=
TFOpMapper
(
model
)
program
.
build
()
bias_opt
=
BiasOpt
()
transpose_opt
=
TransposeOpt
()
batch_norm_opt
=
BatchNormOpt
()
bias_opt
.
run
(
program
)
batch_norm_opt
.
run
(
program
)
transpose_opt
.
run
(
program
)
program
.
gen_model
(
save_dir
)
def
caffe2paddle
(
proto
,
weight
,
save_dir
,
caffe_proto
,
params_merge
=
False
):
...
...
x2paddle/core/program.py
浏览文件 @
28f4b2ff
...
...
@@ -99,6 +99,53 @@ class PaddleGraph(object):
self
.
layers
[
layer_id
]
=
layer
return
layer_id
def
del_layer
(
self
,
layer_id
):
layer
=
self
.
layers
[
layer_id
]
outputs
=
self
.
edges_out
.
get
(
layer_id
,
[])
inputs
=
self
.
edges_in
.
get
(
layer_id
,
[])
assert
len
(
inputs
)
<=
1
,
"There should be 0 or 1 input for deleted layer."
if
len
(
inputs
)
==
0
:
for
out
in
outputs
:
while
layer_id
in
self
.
edges_in
[
out
]:
index
=
self
.
edges_in
[
out
].
index
(
layer_id
)
del
self
.
edges_in
[
out
][
index
]
input_keys
=
list
(
self
.
layers
[
out
].
inputs
.
keys
())
for
k
in
input_keys
:
if
self
.
layers
[
out
].
inputs
[
k
]
==
layer
.
outputs
[
0
]:
del
self
.
layers
[
out
].
inputs
[
k
]
del
self
.
layers
[
layer_id
]
if
layer_id
in
self
.
edges_in
:
del
self
.
edges_in
[
layer_id
]
if
layer_id
in
self
.
edges_out
:
del
self
.
edges_out
[
layer_id
]
return
# 将所有输出layer的输入layer进行替换
for
out
in
outputs
:
for
i
in
range
(
len
(
self
.
edges_in
[
out
])):
if
self
.
edges_in
[
out
][
i
]
==
layer_id
:
self
.
edges_in
[
out
][
i
]
=
inputs
[
0
]
# 将输出layer赋给输入layer的输出
replace_index
=
self
.
edges_out
[
inputs
[
0
]].
index
(
layer_id
)
del
self
.
edges_out
[
inputs
[
0
]][
replace_index
]
for
i
,
out
in
enumerate
(
outputs
):
self
.
edges_out
[
inputs
[
0
]].
insert
(
replace_index
+
i
,
out
)
for
k
,
v
in
self
.
layers
[
out
].
inputs
.
items
():
if
v
==
layer
.
outputs
[
0
]:
self
.
layers
[
out
].
inputs
[
k
]
=
list
(
layer
.
inputs
.
values
())[
0
]
del
self
.
layers
[
layer_id
]
if
layer_id
in
self
.
edges_out
:
del
self
.
edges_out
[
layer_id
]
if
layer_id
in
self
.
edges_in
:
del
self
.
edges_in
[
layer_id
]
def
build
(
self
,
inputs
=
None
,
outputs
=
None
):
self
.
clear_edges
()
outputs_from_nodes
=
dict
()
...
...
x2paddle/decoder/tf_decoder.py
浏览文件 @
28f4b2ff
...
...
@@ -89,6 +89,12 @@ class TFGraphNode(GraphNode):
field
=
getattr
(
attr
,
attr
.
WhichOneof
(
'value'
))
return
tensor_util
.
MakeNdarray
(
field
)
@
property
def
name
(
self
):
if
hasattr
(
self
,
'index'
):
return
self
.
layer_name
+
"_p{}"
.
format
(
self
.
index
)
return
self
.
layer_name
def
get_attr
(
self
,
name
):
if
name
not
in
self
.
layer
.
attr
:
return
None
...
...
x2paddle/op_mapper/tf_op_mapper.py
浏览文件 @
28f4b2ff
此差异已折叠。
点击以展开。
x2paddle/op_mapper/tf_op_mapper_nhwc.py
已删除
100644 → 0
浏览文件 @
52fdd6c5
此差异已折叠。
点击以展开。
x2paddle/optimizer/tensorflow/__init__.py
0 → 100644
浏览文件 @
28f4b2ff
x2paddle/optimizer/tensorflow/batch_norm.py
0 → 100644
浏览文件 @
28f4b2ff
import
copy
from
collections
import
OrderedDict
from
x2paddle.core.program
import
PaddleLayer
class
BatchNormOpt
:
def
__init__
(
self
):
pass
def
run
(
self
,
graph
):
layers
=
copy
.
deepcopy
(
graph
.
layers
)
for
layer_id
,
layer
in
layers
.
items
():
if
layer
.
kernel
!=
"fluid.layers.elementwise_add"
:
continue
axis
=
layer
.
attrs
.
get
(
'axis'
,
-
1
)
if
axis
!=
-
1
and
axis
!=
3
:
continue
input_ids0
=
graph
.
edges_in
[
layer_id
]
mul_layer0
=
graph
.
layers
[
input_ids0
[
0
]]
sub_layer0
=
graph
.
layers
[
input_ids0
[
1
]]
if
mul_layer0
.
kernel
!=
"fluid.layers.elementwise_mul"
:
continue
if
sub_layer0
.
kernel
!=
"fluid.layers.elementwise_sub"
:
continue
axis
=
mul_layer0
.
attrs
.
get
(
'axis'
,
-
1
)
if
axis
!=
-
1
and
axis
!=
3
:
continue
axis
=
sub_layer0
.
attrs
.
get
(
'axis'
,
-
1
)
if
axis
!=
-
1
and
axis
!=
0
:
continue
if
len
(
graph
.
edges_out
.
get
(
input_ids0
[
0
],
[]))
!=
1
:
continue
if
len
(
graph
.
edges_out
.
get
(
input_ids0
[
1
],
[]))
!=
1
:
continue
input_ids1
=
graph
.
edges_in
[
input_ids0
[
0
]]
nhwc_input
=
graph
.
layers
[
input_ids1
[
0
]]
mul_layer1
=
graph
.
layers
[
input_ids1
[
1
]]
if
mul_layer1
.
kernel
!=
"fluid.layers.elementwise_mul"
:
continue
axis
=
mul_layer1
.
attrs
.
get
(
'axis'
,
-
1
)
if
axis
!=
-
1
and
axis
!=
0
:
continue
if
len
(
graph
.
edges_out
.
get
(
input_ids1
[
1
],
[]))
!=
2
:
continue
input_ids2
=
graph
.
edges_in
[
input_ids0
[
1
]]
beta
=
graph
.
layers
[
input_ids2
[
0
]]
mul_layer2
=
graph
.
layers
[
input_ids2
[
1
]]
if
beta
.
kernel
!=
"fluid.layers.create_parameter"
:
continue
axis
=
mul_layer2
.
attrs
.
get
(
'axis'
,
-
1
)
if
axis
!=
-
1
and
axis
!=
0
:
continue
if
len
(
graph
.
edges_out
.
get
(
input_ids2
[
0
],
[]))
!=
1
:
continue
if
len
(
graph
.
edges_out
.
get
(
input_ids2
[
1
],
[]))
!=
1
:
continue
if
beta
.
outputs
[
0
]
not
in
graph
.
parameters
:
continue
beta_shape
=
graph
.
parameters
[
beta
.
outputs
[
0
]].
shape
if
len
(
beta_shape
)
!=
1
:
continue
input_ids3
=
graph
.
edges_in
[
input_ids2
[
1
]]
mean
=
graph
.
layers
[
input_ids3
[
0
]]
mul_layer3
=
graph
.
layers
[
input_ids3
[
1
]]
if
mean
.
kernel
!=
"fluid.layers.create_parameter"
:
continue
axis
=
mul_layer3
.
attrs
.
get
(
'axis'
,
-
1
)
if
axis
!=
-
1
and
axis
!=
0
:
continue
if
len
(
graph
.
edges_out
.
get
(
input_ids3
[
0
],
[]))
!=
1
:
continue
if
len
(
graph
.
edges_out
.
get
(
input_ids3
[
1
],
[]))
!=
2
:
continue
if
mul_layer3
.
id
!=
mul_layer1
.
id
:
continue
if
mean
.
outputs
[
0
]
not
in
graph
.
parameters
:
continue
mean_shape
=
graph
.
parameters
[
mean
.
outputs
[
0
]].
shape
if
mean_shape
!=
beta_shape
:
continue
input_ids4
=
graph
.
edges_in
[
input_ids3
[
1
]]
rsqrt_layer
=
graph
.
layers
[
input_ids4
[
0
]]
gamma
=
graph
.
layers
[
input_ids4
[
1
]]
if
rsqrt_layer
.
kernel
!=
"fluid.layers.rsqrt"
:
continue
if
gamma
.
kernel
!=
"fluid.layers.create_parameter"
:
continue
if
len
(
graph
.
edges_out
.
get
(
input_ids4
[
0
],
[]))
!=
1
:
continue
if
len
(
graph
.
edges_out
.
get
(
input_ids4
[
1
],
[]))
!=
1
:
continue
if
gamma
.
outputs
[
0
]
not
in
graph
.
parameters
:
continue
gamma_shape
=
graph
.
parameters
[
gamma
.
outputs
[
0
]].
shape
if
gamma_shape
!=
beta_shape
:
continue
input_ids5
=
graph
.
edges_in
[
input_ids4
[
0
]]
add_layer
=
graph
.
layers
[
input_ids5
[
0
]]
if
add_layer
.
kernel
!=
"fluid.layers.elementwise_add"
:
continue
axis
=
add_layer
.
attrs
.
get
(
'axis'
,
-
1
)
if
axis
!=
-
1
and
axis
!=
0
:
continue
if
len
(
graph
.
edges_out
.
get
(
input_ids5
[
0
],
[]))
!=
1
:
continue
input_ids6
=
graph
.
edges_in
[
input_ids5
[
0
]]
variance
=
graph
.
layers
[
input_ids6
[
0
]]
other
=
graph
.
layers
[
input_ids6
[
1
]]
if
variance
.
kernel
!=
"fluid.layers.create_parameter"
:
continue
if
other
.
kernel
!=
"fluid.layers.create_parameter"
:
continue
if
len
(
graph
.
edges_out
.
get
(
input_ids6
[
0
],
[]))
!=
1
:
continue
if
len
(
graph
.
edges_out
.
get
(
input_ids6
[
1
],
[]))
!=
1
:
continue
if
variance
.
outputs
[
0
]
not
in
graph
.
parameters
:
continue
variance_shape
=
graph
.
parameters
[
variance
.
outputs
[
0
]].
shape
if
variance_shape
!=
beta_shape
:
continue
if
other
.
outputs
[
0
]
not
in
graph
.
parameters
:
continue
if
graph
.
parameters
[
other
.
outputs
[
0
]].
size
!=
1
:
continue
ids
=
set
([
layer_id
,
mul_layer0
.
id
,
sub_layer0
.
id
,
mul_layer1
.
id
,
beta
.
id
,
mul_layer2
.
id
,
mean
.
id
,
mul_layer2
.
id
,
rsqrt_layer
.
id
,
gamma
.
id
,
add_layer
.
id
,
variance
.
id
,
other
.
id
])
for
id
in
ids
:
del
graph
.
layers
[
id
]
if
id
in
graph
.
edges_in
:
del
graph
.
edges_in
[
id
]
if
id
in
graph
.
edges_out
:
del
graph
.
edges_out
[
id
]
copy_layers
=
copy
.
deepcopy
(
graph
.
layers
)
graph
.
layers
=
OrderedDict
()
for
k
,
v
in
copy_layers
.
items
():
if
k
!=
nhwc_input
.
id
:
graph
.
layers
[
k
]
=
v
continue
graph
.
layers
[
k
]
=
v
transpose0
=
PaddleLayer
(
id
=
'{}_1'
.
format
(
k
),
kernel
=
"fluid.layers.transpose"
,
inputs
=
{
"x"
:
v
.
outputs
[
0
]},
outputs
=
[
"transpose_for_bn"
],
perm
=
[
0
,
3
,
1
,
2
])
bn
=
PaddleLayer
(
id
=
'{}_2'
.
format
(
k
),
kernel
=
"fluid.layers.batch_norm"
,
inputs
=
{
"input"
:
"transpose_for_bn"
},
outputs
=
layer
.
outputs
,
epsilon
=
graph
.
parameters
[
other
.
outputs
[
0
]],
param_attr
=
"'{}'"
.
format
(
gamma
.
outputs
[
0
]),
bias_attr
=
"'{}'"
.
format
(
beta
.
outputs
[
0
]),
moving_mean_name
=
"'{}'"
.
format
(
mean
.
outputs
[
0
]),
moving_variance_name
=
"'{}'"
.
format
(
variance
.
outputs
[
0
]))
transpose1
=
PaddleLayer
(
id
=
layer_id
,
kernel
=
"fluid.layers.transpose"
,
inputs
=
{
"x"
:
layer
.
outputs
[
0
]},
outputs
=
layer
.
outputs
,
perm
=
[
0
,
2
,
3
,
1
])
graph
.
layers
[
transpose0
.
id
]
=
transpose0
graph
.
layers
[
bn
.
id
]
=
bn
graph
.
layers
[
transpose1
.
id
]
=
transpose1
graph
.
build
()
x2paddle/optimizer/tensorflow/bias.py
0 → 100644
浏览文件 @
28f4b2ff
import
copy
class
BiasOpt
:
def
__init__
(
self
):
self
.
conv_layers
=
[
'fluid.layers.conv2d'
,
'fluid.layers.conv2d_transpose'
]
self
.
act_layers
=
[
'fluid.layers.relu'
,
'fluid.layers.relu6'
,
'fluid.layers.sigmoid'
,
'fluid.layers.exp'
,
'fluid.layers.tanh'
,
'fluid.layers.softplus'
,
'fluid.layers.leaky_relu'
]
def
run
(
self
,
graph
):
layers
=
copy
.
deepcopy
(
graph
.
layers
)
for
layer_id
,
layer
in
layers
.
items
():
if
layer
.
kernel
in
self
.
conv_layers
or
layer
.
kernel
==
"fluid.layers.transpose"
:
if
len
(
graph
.
edges_out
.
get
(
layer_id
,
[]))
>
1
:
continue
if
layer
.
outputs
[
0
]
in
graph
.
outputs
:
continue
out_layer_id
=
graph
.
edges_out
[
layer_id
][
0
]
if
graph
.
layers
[
out_layer_id
].
kernel
!=
"fluid.layers.elementwise_add"
:
continue
if
graph
.
layers
[
out_layer_id
].
attrs
.
get
(
'axis'
,
-
1
)
!=
-
1
:
continue
in_layer_id
=
graph
.
edges_in
[
out_layer_id
]
bias_layer_id
=
in_layer_id
[
1
-
in_layer_id
.
index
(
layer_id
)]
if
graph
.
layers
[
bias_layer_id
].
kernel
!=
"fluid.layers.create_parameter"
:
continue
bias_layer
=
graph
.
layers
[
bias_layer_id
]
if
len
(
bias_layer
.
attrs
[
'shape'
])
!=
1
:
continue
if
len
(
graph
.
edges_out
[
bias_layer_id
])
!=
1
:
continue
if
layer
.
kernel
==
"fluid.layers.transpose"
:
if
layer
.
attrs
[
'perm'
]
!=
[
0
,
2
,
3
,
1
]:
continue
in_layer_id
=
graph
.
edges_in
[
layer_id
][
0
]
if
graph
.
layers
[
in_layer_id
].
kernel
not
in
self
.
conv_layers
:
continue
if
graph
.
layers
[
in_layer_id
].
attrs
[
'bias_attr'
]
!=
False
:
continue
if
graph
.
layers
[
in_layer_id
].
outputs
[
0
]
in
graph
.
outputs
:
continue
if
len
(
graph
.
edges_out
[
in_layer_id
])
!=
1
:
continue
graph
.
layers
[
in_layer_id
].
attrs
[
'bias_attr'
]
=
bias_layer
.
attrs
[
'name'
]
else
:
graph
.
layers
[
layer_id
].
attrs
[
'bias_attr'
]
=
bias_layer
.
attrs
[
'name'
]
bias_add_outs
=
graph
.
edges_out
.
get
(
out_layer_id
,
[])
bias_add_output
=
graph
.
layers
[
out_layer_id
].
outputs
[
0
]
graph
.
del_layer
(
bias_layer_id
)
graph
.
del_layer
(
out_layer_id
)
for
out
in
bias_add_outs
:
for
k
,
v
in
graph
.
layers
[
out
].
inputs
.
items
():
if
v
==
layer
.
outputs
[
0
]:
graph
.
layers
[
out
].
inputs
[
k
]
=
bias_add_output
graph
.
layers
[
layer_id
].
outputs
[
0
]
=
bias_add_output
if
layer
.
kernel
==
"fluid.layers.transpose"
:
in_layer_id
=
graph
.
edges_in
[
layer_id
][
0
]
graph
.
layers
[
in_layer_id
].
outputs
[
0
]
=
bias_add_output
graph
.
layers
[
layer_id
].
inputs
[
'x'
]
=
bias_add_output
x2paddle/optimizer/tensorflow/transpose.py
0 → 100644
浏览文件 @
28f4b2ff
import
copy
import
sys
class
TransposeOpt
:
def
__init__
(
self
):
self
.
image_layers
=
[
'fluid.layers.conv2d'
,
'fluid.layers.batch_norm'
,
'fluid.layers.conv2d_transpose'
,
'fluid.layers.resize_nearest'
,
'fluid.layers.resize_bilinear'
,
'fluid.layers.pool2d'
,
'fluid.layers.pad2d'
]
self
.
direct_layers
=
[
'fluid.layers.relu'
,
'fluid.layers.relu6'
,
'fluid.layers.abs'
,
'fluid.layers.sigmoid'
,
'fluid.layers.exp'
,
'fluid.layers.rsqrt'
,
'fluid.layers.swish_f32'
,
'fluid.layers.tanh'
,
'fluid.layers.softplus'
,
'fluid.layers.leaky_relu'
,
'fluid.layers.floor'
,
'fluid.layers.erf'
,
'fluid.layers.swish'
]
self
.
elementwise_layers
=
[
'fluid.layers.elementwise_add'
,
'fluid.layers.elementwise_sub'
,
'fluid.layers.elementwise_mul'
,
'fluid.layers.elementwise_div'
]
# self.reduce_layers = []
self
.
reduce_layers
=
[
'fluid.layers.reduce_mean'
,
'fluid.layers.reduce_all'
,
'fluid.layers.reduce_max'
,
'fluid.layers.reduce_any'
,
'fluid.layers.reduce_sum'
,
'fluid.layers.reduce_prod'
]
def
get_transpose_num
(
self
,
graph
):
count
=
0
for
layer_id
,
layer
in
graph
.
layers
.
items
():
if
layer
.
kernel
==
"fluid.layers.transpose"
:
count
+=
1
return
count
def
run
(
self
,
graph
):
total_layer_num
=
len
(
graph
.
layers
)
scanned_layers
=
set
()
optimized_transpose_layers
=
list
()
optimized_reduce_layers
=
list
()
optimized_concat_layers
=
list
()
optimized_elementwise_layers
=
list
()
def
strip_transpose
(
_graph
):
layers
=
copy
.
deepcopy
(
_graph
.
layers
)
for
layer_id
,
layer
in
layers
.
items
():
if
layer_id
in
scanned_layers
:
continue
scanned_layers
.
add
(
layer_id
)
percent
=
round
(
len
(
scanned_layers
)
/
total_layer_num
*
100
,
2
)
sys
.
stderr
.
write
(
"
\r
Optimize Transpose Layers...{}%"
.
format
(
percent
))
if
layer
.
kernel
!=
"fluid.layers.transpose"
:
continue
if
layer
.
attrs
[
"perm"
]
!=
[
0
,
2
,
3
,
1
]:
continue
transpose_layers
=
list
()
propagate_layers
=
list
()
reduce_layers
=
list
()
concat_layers
=
list
()
# 此elementwise_layers专用于存储shape(4) + shape(1)的形式layer
elementwise_layers
=
list
()
can_be_optimized
=
True
for
out
in
_graph
.
edges_out
.
get
(
layer_id
,
[]):
if
_graph
.
layers
[
out
].
kernel
==
"fluid.layers.transpose"
:
if
_graph
.
layers
[
out
].
attrs
[
"perm"
]
!=
[
0
,
3
,
1
,
2
]:
can_be_optimized
=
False
break
transpose_layers
.
append
(
out
)
elif
_graph
.
layers
[
out
].
kernel
in
self
.
elementwise_layers
:
propagate_layers
.
append
(
out
)
elif
_graph
.
layers
[
out
].
kernel
in
self
.
direct_layers
:
if
_graph
.
layers
[
out
].
outputs
[
0
]
in
_graph
.
outputs
:
can_be_optimized
=
False
break
propagate_layers
.
append
(
out
)
elif
_graph
.
layers
[
out
].
kernel
in
self
.
reduce_layers
:
if
_graph
.
layers
[
out
].
outputs
[
0
]
in
_graph
.
outputs
:
can_be_optimized
=
False
break
if
not
_graph
.
layers
[
out
].
attrs
.
get
(
'keep_dim'
,
False
):
can_be_optimized
=
False
break
propagate_layers
.
append
(
out
)
reduce_layers
.
append
(
out
)
elif
_graph
.
layers
[
out
].
kernel
==
"fluid.layers.concat"
:
if
_graph
.
layers
[
out
].
outputs
[
0
]
in
_graph
.
outputs
:
can_be_optimized
=
False
break
propagate_layers
.
append
(
out
)
concat_layers
.
append
(
out
)
else
:
can_be_optimized
=
False
break
visited_layers
=
set
()
while
len
(
propagate_layers
)
>
0
and
can_be_optimized
:
current_id
=
propagate_layers
.
pop
(
0
)
visited_layers
.
add
(
current_id
)
for
out
in
_graph
.
edges_out
.
get
(
current_id
,
[]):
if
_graph
.
layers
[
out
].
kernel
==
"fluid.layers.transpose"
:
if
_graph
.
layers
[
out
].
attrs
[
"perm"
]
!=
[
0
,
3
,
1
,
2
]:
can_be_optimized
=
False
break
transpose_layers
.
append
(
out
)
elif
_graph
.
layers
[
out
].
kernel
in
self
.
elementwise_layers
:
if
_graph
.
layers
[
out
].
outputs
[
0
]
in
_graph
.
outputs
:
can_be_optimized
=
False
break
if
out
not
in
visited_layers
:
propagate_layers
.
append
(
out
)
elif
_graph
.
layers
[
out
].
kernel
in
self
.
direct_layers
:
if
_graph
.
layers
[
out
].
outputs
[
0
]
in
_graph
.
outputs
:
can_be_optimized
=
False
break
if
out
not
in
visited_layers
:
propagate_layers
.
append
(
out
)
elif
_graph
.
layers
[
out
].
kernel
in
self
.
reduce_layers
:
if
_graph
.
layers
[
out
].
outputs
[
0
]
in
_graph
.
outputs
:
can_be_optimized
=
False
break
if
not
_graph
.
layers
[
out
].
attrs
.
get
(
'keep_dim'
,
False
):
can_be_optimized
=
False
break
if
out
not
in
visited_layers
:
propagate_layers
.
append
(
out
)
reduce_layers
.
append
(
out
)
elif
_graph
.
layers
[
out
].
kernel
==
"fluid.layers.concat"
:
if
_graph
.
layers
[
out
].
outputs
[
0
]
in
_graph
.
outputs
:
can_be_optimized
=
False
break
if
out
not
in
visited_layers
:
propagate_layers
.
append
(
out
)
concat_layers
.
append
(
out
)
else
:
can_be_optimized
=
False
break
for
ipt
in
_graph
.
edges_in
.
get
(
current_id
,
[]):
if
_graph
.
layers
[
current_id
].
kernel
in
self
.
elementwise_layers
:
try
:
x_shape
=
_graph
.
layers
[
current_id
].
input_shapes
[
'x'
]
y_shape
=
_graph
.
layers
[
current_id
].
input_shapes
[
'y'
]
if
_graph
.
layers
[
ipt
].
outputs
[
0
]
==
_graph
.
layers
[
current_id
].
inputs
[
'x'
]:
if
len
(
x_shape
)
<=
1
:
elementwise_layers
.
append
(
current_id
)
continue
elif
_graph
.
layers
[
ipt
].
outputs
[
0
]
==
_graph
.
layers
[
current_id
].
inputs
[
'y'
]:
if
len
(
y_shape
)
<=
1
:
elementwise_layers
.
append
(
current_id
)
continue
else
:
raise
Exception
(
"Unexcepted situation happend while optimizing transpose"
)
except
Exception
as
e
:
can_be_optimized
=
False
break
if
_graph
.
layers
[
ipt
].
kernel
==
"fluid.layers.transpose"
:
if
_graph
.
layers
[
ipt
].
attrs
[
"perm"
]
!=
[
0
,
2
,
3
,
1
]:
can_be_optimized
=
False
break
if
ipt
not
in
visited_layers
:
transpose_layers
.
append
(
ipt
)
elif
_graph
.
layers
[
ipt
].
kernel
in
self
.
elementwise_layers
:
if
_graph
.
layers
[
ipt
].
outputs
[
0
]
in
_graph
.
outputs
:
can_be_optimized
=
False
break
if
ipt
not
in
visited_layers
:
propagate_layers
.
append
(
ipt
)
elif
_graph
.
layers
[
ipt
].
kernel
in
self
.
direct_layers
:
if
_graph
.
layers
[
ipt
].
outputs
[
0
]
in
_graph
.
outputs
:
can_be_optimized
=
False
break
if
ipt
not
in
visited_layers
:
propagate_layers
.
append
(
ipt
)
elif
_graph
.
layers
[
ipt
].
kernel
in
self
.
reduce_layers
:
if
_graph
.
layers
[
ipt
].
outputs
[
0
]
in
_graph
.
outputs
:
can_be_optimized
=
False
break
if
not
_graph
.
layers
[
ipt
].
attrs
.
get
(
'keep_dim'
,
False
):
can_be_optimized
=
False
break
if
ipt
not
in
visited_layers
:
propagate_layers
.
append
(
ipt
)
reduce_layers
.
append
(
ipt
)
elif
_graph
.
layers
[
ipt
].
kernel
==
"fluid.layers.concat"
:
if
_graph
.
layers
[
ipt
].
outputs
[
0
]
in
_graph
.
outputs
:
can_be_optimized
=
False
break
if
ipt
not
in
visited_layers
:
propagate_layers
.
append
(
ipt
)
concat_layers
.
append
(
ipt
)
else
:
can_be_optimized
=
False
break
if
not
can_be_optimized
:
break
if
not
can_be_optimized
:
continue
transpose_layers
.
append
(
layer_id
)
transpose_layers
=
list
(
set
(
transpose_layers
))
for
l
in
transpose_layers
:
if
graph
.
layers
[
l
].
outputs
[
0
]
in
graph
.
outputs
:
can_be_optimized
=
False
break
if
not
can_be_optimized
:
continue
for
l
in
transpose_layers
:
_graph
.
del_layer
(
l
)
optimized_transpose_layers
.
extend
(
transpose_layers
)
optimized_reduce_layers
.
extend
(
reduce_layers
)
optimized_concat_layers
.
extend
(
concat_layers
)
optimized_elementwise_layers
.
extend
(
elementwise_layers
)
return
True
return
False
before_transpose_num
=
self
.
get_transpose_num
(
graph
)
opt_graph
=
copy
.
deepcopy
(
graph
)
total_layer_num
=
len
(
opt_graph
.
layers
)
while
strip_transpose
(
opt_graph
):
pass
for
layer_id
in
list
(
set
(
optimized_transpose_layers
)):
graph
.
del_layer
(
layer_id
)
for
layer_id
in
list
(
set
(
optimized_reduce_layers
)):
dim
=
graph
.
layers
[
layer_id
].
attrs
.
get
(
'dim'
,
None
)
if
dim
is
not
None
:
for
i
in
range
(
len
(
dim
)):
dim
[
i
]
=
[
0
,
2
,
3
,
1
][
dim
[
i
]]
graph
.
layers
[
layer_id
].
attrs
[
'dim'
]
=
dim
for
layer_id
in
list
(
set
(
optimized_concat_layers
)):
axis
=
graph
.
layers
[
layer_id
].
attrs
.
get
(
'axis'
,
0
)
graph
.
layers
[
layer_id
].
attrs
[
'axis'
]
=
[
0
,
2
,
3
,
1
][
axis
]
for
layer_id
in
list
(
set
(
optimized_elementwise_layers
)):
axis
=
graph
.
layers
[
layer_id
].
attrs
.
get
(
'axis'
,
-
1
)
graph
.
layers
[
layer_id
].
attrs
[
'axis'
]
=
[
0
,
2
,
3
,
1
][
axis
]
current_transpose_num
=
self
.
get_transpose_num
(
graph
)
print
(
"
\n
Transpose layers optimized, before: transpose_num={}, after: transpose_num={}"
.
format
(
before_transpose_num
,
current_transpose_num
))
x2paddle/optimizer/tf_optimizer.py
已删除
100644 → 0
浏览文件 @
52fdd6c5
此差异已折叠。
点击以展开。
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录