Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
PaddlePaddle
X2Paddle
提交
fb07475f
X
X2Paddle
项目概览
PaddlePaddle
/
X2Paddle
大约 1 年 前同步成功
通知
328
Star
698
Fork
167
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
26
列表
看板
标记
里程碑
合并请求
4
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
X
X2Paddle
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
26
Issue
26
列表
看板
标记
里程碑
合并请求
4
合并请求
4
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
未验证
提交
fb07475f
编写于
12月 10, 2020
作者:
J
Jason
提交者:
GitHub
12月 10, 2020
浏览文件
操作
浏览文件
下载
差异文件
Merge pull request #451 from SunAhong1993/paddle-2.0
modify the tf static
上级
58f5dc51
e748c25e
变更
21
显示空白变更内容
内联
并排
Showing
21 changed file
with
1044 addition
and
754 deletion
+1044
-754
x2paddle/convert.py
x2paddle/convert.py
+3
-12
x2paddle/op_mapper/dygraph/tf2paddle/tf_op_mapper.py
x2paddle/op_mapper/dygraph/tf2paddle/tf_op_mapper.py
+21
-1
x2paddle/op_mapper/static/caffe2paddle/caffe_op_mapper.py
x2paddle/op_mapper/static/caffe2paddle/caffe_op_mapper.py
+1
-3
x2paddle/op_mapper/static/tf2paddle/tf_op_mapper.py
x2paddle/op_mapper/static/tf2paddle/tf_op_mapper.py
+283
-306
x2paddle/optimizer/elimination/static/__init__.py
x2paddle/optimizer/elimination/static/__init__.py
+16
-0
x2paddle/optimizer/elimination/static/transpose_eliminate_pass.py
.../optimizer/elimination/static/transpose_eliminate_pass.py
+33
-0
x2paddle/optimizer/elimination/static/transpose_elimination.py
...dle/optimizer/elimination/static/transpose_elimination.py
+77
-47
x2paddle/optimizer/fusion/dygraph/conv2d_add_fuser.py
x2paddle/optimizer/fusion/dygraph/conv2d_add_fuser.py
+0
-4
x2paddle/optimizer/fusion/static/__init__.py
x2paddle/optimizer/fusion/static/__init__.py
+8
-1
x2paddle/optimizer/fusion/static/bn_scale_fuser.py
x2paddle/optimizer/fusion/static/bn_scale_fuser.py
+0
-1
x2paddle/optimizer/fusion/static/conv2d_add_fuse_pass.py
x2paddle/optimizer/fusion/static/conv2d_add_fuse_pass.py
+33
-0
x2paddle/optimizer/fusion/static/conv2d_add_fuser.py
x2paddle/optimizer/fusion/static/conv2d_add_fuser.py
+121
-0
x2paddle/optimizer/fusion/static/prelu_fuse_pass.py
x2paddle/optimizer/fusion/static/prelu_fuse_pass.py
+33
-0
x2paddle/optimizer/fusion/static/prelu_fuser.py
x2paddle/optimizer/fusion/static/prelu_fuser.py
+139
-0
x2paddle/optimizer/fusion/static/tf_batchnorm_fuse_pass.py
x2paddle/optimizer/fusion/static/tf_batchnorm_fuse_pass.py
+33
-0
x2paddle/optimizer/fusion/static/tf_batchnorm_fuser.py
x2paddle/optimizer/fusion/static/tf_batchnorm_fuser.py
+227
-0
x2paddle/optimizer/optimizer.py
x2paddle/optimizer/optimizer.py
+16
-8
x2paddle/optimizer/tensorflow/__init__.py
x2paddle/optimizer/tensorflow/__init__.py
+0
-0
x2paddle/optimizer/tensorflow/batch_norm.py
x2paddle/optimizer/tensorflow/batch_norm.py
+0
-178
x2paddle/optimizer/tensorflow/bias.py
x2paddle/optimizer/tensorflow/bias.py
+0
-70
x2paddle/optimizer/tensorflow/prelu.py
x2paddle/optimizer/tensorflow/prelu.py
+0
-123
未找到文件。
x2paddle/convert.py
浏览文件 @
fb07475f
...
...
@@ -132,18 +132,9 @@ def tf2paddle(model_path,
graph_opt
=
GraphOptimizer
(
source_frame
=
"tf"
,
paddle_type
=
paddle_type
)
graph_opt
.
optimize
(
mapper
.
paddle_graph
)
else
:
from
x2paddle.optimizer.tensorflow.bias
import
BiasOpt
from
x2paddle.optimizer.tensorflow.transpose
import
TransposeOpt
from
x2paddle.optimizer.tensorflow.batch_norm
import
BatchNormOpt
from
x2paddle.optimizer.tensorflow.prelu
import
PReLUOpt
bias_opt
=
BiasOpt
()
transpose_opt
=
TransposeOpt
()
batch_norm_opt
=
BatchNormOpt
()
prelu_opt
=
PReLUOpt
()
bias_opt
.
run
(
mapper
.
paddle_graph
)
batch_norm_opt
.
run
(
mapper
.
paddle_graph
)
prelu_opt
.
run
(
mapper
.
paddle_graph
)
transpose_opt
.
run
(
mapper
.
paddle_graph
)
from
x2paddle.optimizer.optimizer
import
GraphOptimizer
graph_opt
=
GraphOptimizer
(
source_frame
=
"tf"
,
paddle_type
=
paddle_type
)
graph_opt
.
optimize
(
mapper
.
paddle_graph
)
mapper
.
paddle_graph
.
gen_model
(
save_dir
)
...
...
x2paddle/op_mapper/dygraph/tf2paddle/tf_op_mapper.py
浏览文件 @
fb07475f
...
...
@@ -285,7 +285,6 @@ class TFOpMapper(OpMapper):
layer_attrs
[
"dtype"
]
=
string
(
input_value
.
dtype
)
layer_attrs
[
"fill_value"
]
=
input_value
.
value
self
.
paddle_graph
.
add_layer
(
"paddle.full"
,
inputs
=
inputs
,
...
...
@@ -579,6 +578,9 @@ class TFOpMapper(OpMapper):
outputs
=
[
node
.
name
],
perm
=
[
0
,
2
,
3
,
1
])
def
FusedBatchNormV3
(
self
,
node
):
self
.
FusedBatchNorm
(
node
)
def
Mean
(
self
,
node
):
input
=
self
.
graph
.
get_input_node
(
node
,
0
)
reduce_idx
=
self
.
graph
.
get_input_node
(
node
,
1
)
...
...
@@ -930,6 +932,23 @@ class TFOpMapper(OpMapper):
outputs
=
[
node
.
name
],
axis
=
axis
)
def
Concat
(
self
,
node
):
inputs_list
=
list
()
for
i
in
range
(
1
,
len
(
node
.
inputs
)):
inputs_list
.
append
(
self
.
graph
.
get_input_node
(
node
,
i
))
axis
=
self
.
graph
.
get_input_node
(
node
,
0
)
assert
axis
.
layer_type
==
"Const"
,
"axis for ConcatV2 must be type Const"
axis
=
axis
.
value
if
axis
<
0
:
axis
+=
len
(
inputs_list
[
0
].
out_shapes
[
0
])
input_names
=
[
i
.
name
for
i
in
inputs_list
]
self
.
paddle_graph
.
add_layer
(
kernel
=
"paddle.concat"
,
inputs
=
{
"x"
:
input_names
},
outputs
=
[
node
.
name
],
axis
=
axis
)
def
AddN
(
self
,
node
):
inputs_list
=
list
()
for
i
in
range
(
len
(
node
.
inputs
)
-
1
):
...
...
@@ -1400,6 +1419,7 @@ class TFOpMapper(OpMapper):
inputs
=
{
"x"
:
x
.
name
,
"y"
:
y
.
name
}
x_shape
=
x
.
out_shapes
[
0
]
y_shape
=
y
.
out_shapes
[
0
]
# TODO(syf)
layer_id
=
self
.
paddle_graph
.
add_layer
(
"fluid.layers.elementwise_sub"
,
inputs
=
inputs
,
outputs
=
[
node
.
name
])
self
.
paddle_graph
.
layers
[
layer_id
].
input_shapes
=
{
"x"
:
x_shape
,
"y"
:
y_shape
}
...
...
x2paddle/op_mapper/static/caffe2paddle/caffe_op_mapper.py
浏览文件 @
fb07475f
...
...
@@ -457,7 +457,6 @@ class CaffeOpMapper(OpMapper):
def
ReLU
(
self
,
node
):
"""
:param node:
:return:
"""
...
...
@@ -974,4 +973,3 @@ class CaffeOpMapper(OpMapper):
kernel
=
op_info
,
inputs
=
{
"x"
:
self
.
get_input_name
(
input
)},
outputs
=
[
node
.
layer_name
])
\ No newline at end of file
x2paddle/op_mapper/static/tf2paddle/tf_op_mapper.py
浏览文件 @
fb07475f
...
...
@@ -49,28 +49,29 @@ def get_same_padding(in_size, kernel_size, stride):
class
TFOpMapper
(
OpMapper
):
directly_map_ops
=
{
'Relu'
:
[
'relu'
],
'Relu6'
:
[
'relu6'
],
'Abs'
:
[
'abs'
],
'Sigmoid'
:
[
'sigmoid'
],
'
Exp'
:
[
'exp
'
],
'
Rsqrt'
:
[
'rsqrt
'
],
'
Sqrt'
:
[
'
sqrt'
],
'
swish_f32'
:
[
'swish
'
],
'
Tanh'
:
[
'tan
h'
],
'
Softplus'
:
[
'softplus
'
],
'
LeakyRelu'
:
[
'leaky_relu'
,
{
'alpha'
:
'alpha'
}
],
'Floor'
:
[
'floor'
],
'Erf'
:
[
'erf'
],
'Square'
:
[
'square'
]
'Relu'
:
[
'
paddle.nn.functional.
relu'
],
'Relu6'
:
[
'
paddle.nn.functional.
relu6'
],
'Abs'
:
[
'
paddle.
abs'
],
'Sigmoid'
:
[
'
paddle.nn.functional.
sigmoid'
],
'
Softmax'
:
[
'paddle.nn.functional.softmax
'
],
'
Exp'
:
[
'paddle.exp
'
],
'
Rsqrt'
:
[
'paddle.r
sqrt'
],
'
Sqrt'
:
[
'paddle.sqrt
'
],
'
swish_f32'
:
[
'paddle.nn.functional.swis
h'
],
'
Tanh'
:
[
'paddle.tanh
'
],
'
Softplus'
:
[
'paddle.nn.functional.softplus'
],
'LeakyRelu'
:
[
'paddle.nn.functional.leaky_relu'
,
dict
(
alpha
=
'negative_slope'
)
],
'Floor'
:
[
'
paddle.
floor'
],
'Erf'
:
[
'
paddle.
erf'
],
'Square'
:
[
'
paddle.
square'
]
}
elementwise_ops
=
{
'Add'
:
'paddle.add'
,
'AddV2'
:
'paddle.add'
,
'RealDiv'
:
'paddle.divide'
,
'DivNoNan'
:
'paddle.divide'
,
# TODO (syf): replace
'Sub'
:
'fluid.layers.elementwise_sub'
,
'Maximum'
:
'paddle.maximum'
,
'Minimum'
:
'paddle.minimum'
,
...
...
@@ -161,7 +162,7 @@ class TFOpMapper(OpMapper):
attr
[
pd_param_name
]
=
tf_param
self
.
paddle_graph
.
add_layer
(
kernel
=
"fluid.layers.{}"
.
format
(
op_info
[
0
])
,
kernel
=
op_info
[
0
]
,
inputs
=
{
"x"
:
input
.
name
},
outputs
=
[
node
.
name
],
**
attr
)
...
...
@@ -186,7 +187,7 @@ class TFOpMapper(OpMapper):
node
.
layer_name
)
dtype
=
node
.
dtype
self
.
paddle_graph
.
add_layer
(
kernel
=
"
fluid
.data"
,
kernel
=
"
paddle.static
.data"
,
inputs
=
{},
outputs
=
[
node
.
name
],
dtype
=
string
(
dtype
),
...
...
@@ -197,30 +198,29 @@ class TFOpMapper(OpMapper):
shape
=
node
.
out_shapes
[
0
]
dtype
=
node
.
dtype
value
=
node
.
value
initializer
=
"Constant(0.0)"
if
len
(
shape
)
==
0
:
assert
value
.
size
==
1
,
"Unexpected situation happend"
shape
=
[
1
]
if
value
==
float
(
'inf'
):
value
=
"float('inf')"
self
.
paddle_graph
.
add_layer
(
kernel
=
"
fluid.layers.fill_constant
"
,
kernel
=
"
paddle.full
"
,
inputs
=
{},
outputs
=
[
node
.
name
],
dtype
=
string
(
dtype
),
shape
=
[
1
],
value
=
value
)
fill_
value
=
value
)
return
self
.
params
[
node
.
name
]
=
node
.
value
self
.
paddle_graph
.
add_layer
(
kernel
=
"
fluid.layers
.create_parameter"
,
kernel
=
"
paddle.static
.create_parameter"
,
inputs
=
{},
outputs
=
[
node
.
name
],
dtype
=
string
(
dtype
),
shape
=
shape
,
name
=
string
(
node
.
name
),
default_initializer
=
initializer
)
default_initializer
=
"paddle.nn.initializer.Constant(value=0.0)"
)
def
Transpose
(
self
,
node
):
input
=
self
.
graph
.
get_node
(
node
.
layer
.
input
[
0
])
...
...
@@ -229,7 +229,7 @@ class TFOpMapper(OpMapper):
perm
=
perm
.
value
.
tolist
()
self
.
paddle_graph
.
add_layer
(
kernel
=
"
fluid.layers
.transpose"
,
kernel
=
"
paddle
.transpose"
,
inputs
=
{
"x"
:
input
.
name
},
outputs
=
[
node
.
name
],
perm
=
perm
)
...
...
@@ -245,10 +245,10 @@ class TFOpMapper(OpMapper):
else
:
inputs
[
"shape"
]
=
dims
.
name
attr
[
"dtype"
]
=
string
(
input_value
.
dtype
)
attr
[
"value"
]
=
input_value
.
value
attr
[
"
fill_
value"
]
=
input_value
.
value
self
.
paddle_graph
.
add_layer
(
"
fluid.layers.fill_constant
"
,
"
paddle.full
"
,
inputs
=
inputs
,
outputs
=
[
node
.
name
],
**
attr
)
...
...
@@ -273,7 +273,7 @@ class TFOpMapper(OpMapper):
if
data_format
==
"NHWC"
:
transpose_name
=
gen_name
(
"depth_to_space"
,
"transpose"
)
self
.
paddle_graph
.
add_layer
(
kernel
=
"
fluid.layers
.transpose"
,
kernel
=
"
paddle
.transpose"
,
inputs
=
{
"x"
:
input
.
name
},
outputs
=
[
transpose_name
],
perm
=
[
0
,
3
,
1
,
2
])
...
...
@@ -282,21 +282,21 @@ class TFOpMapper(OpMapper):
shape
=
[
0
,
block_size
*
block_size
,
-
1
,
h
,
w
]
reshape_name
=
gen_name
(
"depth_to_space"
,
"reshape"
)
self
.
paddle_graph
.
add_layer
(
kernel
=
"
fluid.layers
.reshape"
,
kernel
=
"
paddle
.reshape"
,
inputs
=
{
"x"
:
input_name
},
outputs
=
[
reshape_name
],
shape
=
shape
)
transpose_name
=
gen_name
(
"depth_to_space"
,
"transpose"
)
self
.
paddle_graph
.
add_layer
(
kernel
=
"
fluid.layers
.transpose"
,
kernel
=
"
paddle
.transpose"
,
inputs
=
{
"x"
:
reshape_name
},
outputs
=
[
transpose_name
],
perm
=
[
0
,
2
,
1
,
3
,
4
])
reshape_name
=
gen_name
(
"depth_to_space"
,
"reshape"
)
self
.
paddle_graph
.
add_layer
(
kernel
=
"
fluid.layers
.reshape"
,
kernel
=
"
paddle
.reshape"
,
inputs
=
{
"x"
:
transpose_name
},
outputs
=
[
reshape_name
],
shape
=
[
0
,
c
,
h
,
w
])
...
...
@@ -309,7 +309,7 @@ class TFOpMapper(OpMapper):
if
data_format
==
"NHWC"
:
self
.
paddle_graph
.
add_layer
(
kernel
=
"
fluid.layers
.transpose"
,
kernel
=
"
paddle
.transpose"
,
inputs
=
{
"x"
:
node
.
name
},
outputs
=
[
node
.
name
],
perm
=
[
0
,
2
,
3
,
1
])
...
...
@@ -353,7 +353,7 @@ class TFOpMapper(OpMapper):
if
data_format
==
"NHWC"
:
transpose_name
=
gen_name
(
"max_pool"
,
"transpose"
)
self
.
paddle_graph
.
add_layer
(
kernel
=
"
fluid.layers
.transpose"
,
kernel
=
"
paddle
.transpose"
,
inputs
=
{
"x"
:
input
.
name
},
outputs
=
[
transpose_name
],
perm
=
[
0
,
3
,
1
,
2
])
...
...
@@ -362,17 +362,16 @@ class TFOpMapper(OpMapper):
input_name
=
transpose_name
self
.
paddle_graph
.
add_layer
(
kernel
=
"
fluid.layers.
pool2d"
,
inputs
=
{
"
input
"
:
input_name
},
kernel
=
"
paddle.nn.functional.max_
pool2d"
,
inputs
=
{
"
x
"
:
input_name
},
outputs
=
[
node
.
name
],
pool_size
=
k_size
[
2
:
4
],
pool_type
=
string
(
"max"
),
pool_stride
=
strides
[
2
:
4
],
pool_padding
=
string
(
pad_mode
))
kernel_size
=
k_size
[
2
:
4
],
stride
=
strides
[
2
:
4
],
padding
=
string
(
pad_mode
))
if
data_format
==
"NHWC"
:
self
.
paddle_graph
.
add_layer
(
kernel
=
"
fluid.layers
.transpose"
,
kernel
=
"
paddle
.transpose"
,
inputs
=
{
"x"
:
node
.
name
},
outputs
=
[
node
.
name
],
perm
=
[
0
,
2
,
3
,
1
])
...
...
@@ -403,6 +402,13 @@ class TFOpMapper(OpMapper):
kernel_weight_name
=
kernel
.
name
.
replace
(
'/'
,
'_'
)
self
.
params
[
kernel_weight_name
]
=
numpy
.
transpose
(
kernel_value
,
(
3
,
2
,
0
,
1
))
self
.
paddle_graph
.
add_layer
(
kernel
=
"paddle.static.nn.create_parameter"
,
inputs
=
{},
outputs
=
[
kernel_weight_name
],
shape
=
self
.
params
[
kernel_weight_name
].
shape
,
dtype
=
string
(
str
(
self
.
params
[
kernel_weight_name
].
dtype
)),
name
=
string
(
kernel_weight_name
))
input_name
=
input
.
name
if
data_format
==
"NHWC"
:
...
...
@@ -410,7 +416,7 @@ class TFOpMapper(OpMapper):
dilations
=
[
dilations
[
i
]
for
i
in
[
0
,
3
,
1
,
2
]]
transpose_name
=
gen_name
(
"conv2d"
,
"transpose"
)
self
.
paddle_graph
.
add_layer
(
kernel
=
"
fluid.layers
.transpose"
,
kernel
=
"
paddle
.transpose"
,
inputs
=
{
"x"
:
input
.
name
},
outputs
=
[
transpose_name
],
perm
=
[
0
,
3
,
1
,
2
])
...
...
@@ -421,26 +427,23 @@ class TFOpMapper(OpMapper):
node
.
fluid_code
.
add_layer
(
"reshape"
,
inputs
=
input
,
output
=
input
,
param_attr
=
attr
)
self
.
paddle_graph
.
add_layer
(
kernel
=
"
fluid.layers
.reshape"
,
kernel
=
"
paddle
.reshape"
,
inputs
=
{
"x"
:
input_name
},
outputs
=
[
input_name
],
shape
=
[
0
,
k_size
[
2
],
0
,
0
])
self
.
paddle_graph
.
add_layer
(
kernel
=
"
fluid.layers
.conv2d"
,
inputs
=
{
"
input"
:
inpu
t_name
},
kernel
=
"
paddle.nn.functional
.conv2d"
,
inputs
=
{
"
x"
:
input_name
,
"weight"
:
kernel_weigh
t_name
},
outputs
=
[
node
.
name
],
bias_attr
=
False
,
param_attr
=
string
(
kernel_weight_name
),
num_filters
=
k_size
[
3
],
filter_size
=
k_size
[
0
:
2
],
bias
=
None
,
stride
=
strides
[
2
:
4
],
dilation
=
dilations
[
2
:
4
],
padding
=
string
(
pad_mode
))
if
data_format
==
"NHWC"
:
self
.
paddle_graph
.
add_layer
(
kernel
=
"
fluid.layers
.transpose"
,
kernel
=
"
paddle
.transpose"
,
inputs
=
{
"x"
:
node
.
name
},
outputs
=
[
node
.
name
],
perm
=
[
0
,
2
,
3
,
1
])
...
...
@@ -462,15 +465,6 @@ class TFOpMapper(OpMapper):
if
kernel
.
layer_type
==
'Const'
:
kernel_value
=
kernel
.
value
kernel_weight_name
=
kernel
.
name
.
replace
(
'/'
,
'_'
)
self
.
paddle_graph
.
add_layer
(
kernel
=
"paddle.static.nn.create_parameter"
,
inputs
=
{},
outputs
=
[
kernel_weight_name
],
shape
=
self
.
params
[
kernel_weight_name
].
shape
,
dtype
=
string
(
str
(
self
.
params
[
kernel_weight_name
].
dtype
)),
name
=
string
(
kernel_weight_name
))
self
.
params
[
kernel_weight_name
]
=
numpy
.
transpose
(
kernel_value
,
(
4
,
3
,
0
,
1
,
2
))
else
:
kernel_value
=
self
.
decoder
.
infer_tensor
(
kernel
,
use_diff_inputs
=
False
)
if
kernel
.
layer_type
==
'Split'
:
...
...
@@ -478,12 +472,15 @@ class TFOpMapper(OpMapper):
kernel
.
name
)
else
:
kernel_weight_name
=
kernel
.
name
.
replace
(
'/'
,
'_'
)
self
.
params
[
kernel_weight_name
]
=
numpy
.
transpose
(
kernel_value
,
(
4
,
3
,
0
,
1
,
2
))
self
.
paddle_graph
.
add_layer
(
kernel
=
"paddle.transpose
"
,
inputs
=
{
"x"
:
kernel_weight_name
},
kernel
=
"paddle.static.nn.create_parameter
"
,
inputs
=
{
},
outputs
=
[
kernel_weight_name
],
perm
=
[
4
,
3
,
0
,
1
,
2
])
shape
=
self
.
params
[
kernel_weight_name
].
shape
,
dtype
=
string
(
str
(
self
.
params
[
kernel_weight_name
].
dtype
)),
name
=
string
(
kernel_weight_name
))
input_name
=
input
.
name
if
data_format
==
"NDHWC"
:
...
...
@@ -507,9 +504,8 @@ class TFOpMapper(OpMapper):
self
.
paddle_graph
.
add_layer
(
kernel
=
"paddle.nn.functional.conv3d"
,
inputs
=
{
"x"
:
input_name
},
inputs
=
{
"x"
:
input_name
,
"weight"
:
kernel_weight_name
},
outputs
=
[
node
.
name
],
weight
=
kernel_weight_name
,
bias
=
None
,
stride
=
strides
[
2
:
5
],
dilation
=
dilations
[
2
:
5
],
...
...
@@ -526,7 +522,7 @@ class TFOpMapper(OpMapper):
input
=
self
.
graph
.
get_node
(
node
.
layer
.
input
[
0
])
bias
=
self
.
graph
.
get_node
(
node
.
layer
.
input
[
1
])
self
.
paddle_graph
.
add_layer
(
kernel
=
"
fluid.layers.elementwise_
add"
,
kernel
=
"
paddle.
add"
,
inputs
=
{
"x"
:
input
.
name
,
"y"
:
bias
.
name
},
outputs
=
[
node
.
name
])
...
...
@@ -548,30 +544,32 @@ class TFOpMapper(OpMapper):
if
data_format
==
"NHWC"
:
transpose_name
=
gen_name
(
"batch_norm"
,
"transpose"
)
self
.
paddle_graph
.
add_layer
(
kernel
=
"
fluid.layers
.transpose"
,
kernel
=
"
paddle
.transpose"
,
inputs
=
{
"x"
:
input
.
name
},
outputs
=
[
transpose_name
],
perm
=
[
0
,
3
,
1
,
2
])
input_name
=
transpose_name
self
.
paddle_graph
.
add_layer
(
kernel
=
"fluid.layers.batch_norm"
,
inputs
=
{
"input"
:
input_name
},
kernel
=
"paddle.nn.functional.batch_norm"
,
inputs
=
{
"x"
:
input_name
,
"running_mean"
:
moving_mean
.
name
,
"running_var"
:
moving_var
.
name
,
"weight"
:
gamma
.
name
,
"bias"
:
beta
.
name
},
outputs
=
[
node
.
name
],
epsilon
=
node
.
get_attr
(
"epsilon"
),
param_attr
=
string
(
gamma
.
name
),
bias_attr
=
string
(
beta
.
name
),
moving_mean_name
=
string
(
moving_mean
.
name
),
moving_variance_name
=
string
(
moving_var
.
name
),
is_test
=
True
)
epsilon
=
node
.
get_attr
(
"epsilon"
))
if
data_format
==
"NHWC"
:
self
.
paddle_graph
.
add_layer
(
kernel
=
"
fluid.layers
.transpose"
,
kernel
=
"
paddle
.transpose"
,
inputs
=
{
"x"
:
node
.
name
},
outputs
=
[
node
.
name
],
perm
=
[
0
,
2
,
3
,
1
])
def
FusedBatchNormV3
(
self
,
node
):
self
.
FusedBatchNorm
(
node
)
def
Mean
(
self
,
node
):
input
=
self
.
graph
.
get_node
(
node
.
layer
.
input
[
0
])
reduce_idx
=
self
.
graph
.
get_node
(
node
.
layer
.
input
[
1
])
...
...
@@ -614,48 +612,6 @@ class TFOpMapper(OpMapper):
inputs
=
{
"x"
:
node
.
name
},
outputs
=
[
node
.
name
],
shape
=
out_shape
.
tolist
())
# input = self.graph.get_node(node.layer.input[0])
# param = self.graph.get_node(node.layer.input[1])
# input_name = input.name
# if input.dtype == 'bool':
# cast_name = gen_name('reshape', 'cast')
# self.paddle_graph.add_layer(
# kernel="fluid.layers.cast",
# inputs={"x": input_name},
# outputs=[cast_name],
# dtype="'int32'")
# input_name = cast_name
# if param.layer_type == "Const":
# shape = param.value.tolist()
# self.paddle_graph.add_layer(
# kernel="fluid.layers.reshape",
# inputs={"x": input_name},
# outputs=[node.name],
# shape=shape)
# else:
# self.paddle_graph.add_layer(
# kernel="fluid.layers.reshape",
# inputs={"x": input_name,
# "shape": param.name},
# outputs=[node.name])
# if param.layer_type != "Const":
# out_shape = numpy.array(node.out_shapes[0])
# if (out_shape > 0).any():
# out_shape[out_shape < 0] = 0
# self.paddle_graph.add_layer(
# kernel="fluid.layers.reshape",
# inputs={"x": node.name},
# outputs=[node.name],
# shape=out_shape.tolist())
# if input.dtype == 'bool':
# self.paddle_graph.add_layer(
# kernel="fluid.layers.cast",
# inputs={"x": node.name},
# outputs=[node.name],
# dtype="'bool'")
def
Pad
(
self
,
node
):
input
=
self
.
graph
.
get_node
(
node
.
layer
.
input
[
0
])
...
...
@@ -668,37 +624,33 @@ class TFOpMapper(OpMapper):
new_padding
=
paddings
[
2
:
6
]
transpose_name
=
gen_name
(
"pad"
,
"transpose"
)
self
.
paddle_graph
.
add_layer
(
kernel
=
"
fluid.layers
.transpose"
,
kernel
=
"
paddle
.transpose"
,
inputs
=
{
"x"
:
input
.
name
},
outputs
=
[
transpose_name
],
perm
=
[
0
,
3
,
1
,
2
])
self
.
paddle_graph
.
add_layer
(
kernel
=
"
fluid.layers.pad2
d"
,
inputs
=
{
"
input
"
:
transpose_name
},
kernel
=
"
paddle.nn.functional.pa
d"
,
inputs
=
{
"
x
"
:
transpose_name
},
outputs
=
[
node
.
name
],
pad
dings
=
new_padding
)
pad
=
new_padding
)
self
.
paddle_graph
.
add_layer
(
kernel
=
"
fluid.layers
.transpose"
,
kernel
=
"
paddle
.transpose"
,
inputs
=
{
"x"
:
node
.
name
},
outputs
=
[
node
.
name
],
perm
=
[
0
,
2
,
3
,
1
])
return
self
.
paddle_graph
.
add_layer
(
kernel
=
"
fluid.layers
.pad"
,
kernel
=
"
paddle.nn.functional
.pad"
,
inputs
=
{
"x"
:
input
.
name
},
outputs
=
[
node
.
name
],
pad
dings
=
paddings
)
pad
=
paddings
)
def
MirrorPad
(
self
,
node
):
op_name
=
name_generator
(
"pad"
,
self
.
nn_name2id
)
output_name
=
node
.
name
layer_outputs
=
[
op_name
,
output_name
]
input
=
self
.
graph
.
get_input_node
(
node
,
0
)
paddings
=
self
.
graph
.
get_input_node
(
node
,
1
)
assert
paddings
.
layer_type
==
"Const"
,
"Padding should be Const"
paddings
=
np
.
flip
(
paddings
.
value
,
0
).
flatten
().
tolist
()
dim
=
int
(
len
(
paddings
)
/
2
)
transpose_name
=
gen_name
(
"pad"
,
"transpose"
)
self
.
paddle_graph
.
add_layer
(
kernel
=
"paddle.transpose"
,
...
...
@@ -706,9 +658,9 @@ class TFOpMapper(OpMapper):
outputs
=
[
transpose_name
],
perm
=
[
0
,
3
,
1
,
2
])
self
.
paddle_graph
.
add_layer
(
kernel
=
"paddle.nn.
Pad{}D
"
.
format
(
dim
),
kernel
=
"paddle.nn.
functional.pad
"
.
format
(
dim
),
inputs
=
{
"x"
:
transpose_name
},
outputs
=
layer_outputs
,
outputs
=
[
node
.
name
]
,
pad
=
new_padding
)
self
.
paddle_graph
.
add_layer
(
kernel
=
"paddle.transpose"
,
...
...
@@ -717,22 +669,13 @@ class TFOpMapper(OpMapper):
perm
=
[
0
,
2
,
3
,
1
])
def
Squeeze
(
self
,
node
):
input
=
self
.
graph
.
get_
node
(
node
.
layer
.
input
[
0
]
)
input
=
self
.
graph
.
get_
input_node
(
node
,
0
)
squeeze_dims
=
node
.
get_attr
(
'squeeze_dims'
)
self
.
paddle_graph
.
add_layer
(
kernel
=
"fluid.layers.squeeze"
,
inputs
=
{
"input"
:
input
.
name
},
outputs
=
[
node
.
name
],
axes
=
squeeze_dims
)
def
Softmax
(
self
,
node
):
input
=
self
.
graph
.
get_node
(
node
.
layer
.
input
[
0
])
axis
=
node
.
get_attr
(
"axis"
)
self
.
paddle_graph
.
add_layer
(
kernel
=
"fluid.layers.softmax"
,
inputs
=
{
"input"
:
input
.
name
},
kernel
=
"paddle.squeeze"
,
inputs
=
{
"x"
:
input
.
name
},
outputs
=
[
node
.
name
],
axis
=
axi
s
)
axis
=
squeeze_dim
s
)
def
Shape
(
self
,
node
):
input
=
self
.
graph
.
get_input_node
(
node
,
0
)
...
...
@@ -762,12 +705,12 @@ class TFOpMapper(OpMapper):
outputs
=
[
node
.
name
])
def
ArgMax
(
self
,
node
):
input
=
self
.
graph
.
get_
node
(
node
.
layer
.
input
[
0
]
)
axis
=
self
.
graph
.
get_
node
(
node
.
layer
.
input
[
1
]
)
input
=
self
.
graph
.
get_
input_node
(
node
,
0
)
axis
=
self
.
graph
.
get_
input_node
(
node
,
1
)
assert
axis
.
layer_type
==
"Const"
,
"ArgMax only support Const parameter"
axis
=
axis
.
value
self
.
paddle_graph
.
add_layer
(
kernel
=
"
fluid.layers
.argmax"
,
kernel
=
"
paddle
.argmax"
,
inputs
=
{
"x"
:
input
.
name
},
outputs
=
[
node
.
name
],
axis
=
axis
)
...
...
@@ -786,8 +729,8 @@ class TFOpMapper(OpMapper):
sorted
=
sort
)
def
MatMul
(
self
,
node
):
x
=
self
.
graph
.
get_
node
(
node
.
layer
.
input
[
0
]
)
y
=
self
.
graph
.
get_
node
(
node
.
layer
.
input
[
1
]
)
x
=
self
.
graph
.
get_
input_node
(
node
,
0
)
y
=
self
.
graph
.
get_
input_node
(
node
,
1
)
transpose_a
=
node
.
get_attr
(
'transpose_a'
)
transpose_b
=
node
.
get_attr
(
'transpose_b'
)
if
transpose_a
is
None
:
...
...
@@ -795,7 +738,7 @@ class TFOpMapper(OpMapper):
if
transpose_b
is
None
:
transpose_b
=
node
.
get_attr
(
'adj_y'
)
self
.
paddle_graph
.
add_layer
(
kernel
=
"
fluid.layers
.matmul"
,
kernel
=
"
paddle
.matmul"
,
inputs
=
{
"x"
:
x
.
name
,
"y"
:
y
.
name
},
outputs
=
[
node
.
name
],
...
...
@@ -820,8 +763,11 @@ class TFOpMapper(OpMapper):
data_format
=
node
.
get_attr
(
"data_format"
).
decode
()
pad_mode
=
node
.
get_attr
(
"padding"
).
decode
()
self
.
params
[
kernel
.
layer_name
.
replace
(
'/'
,
'_'
)]
=
numpy
.
transpose
(
kernel
.
value
,
(
2
,
3
,
0
,
1
))
self
.
paddle_graph
.
add_layer
(
kernel
=
"paddle.transpose"
,
inputs
=
{
"x"
:
kernel
.
name
},
outputs
=
[
kernel
.
name
],
perm
=
[
2
,
3
,
0
,
1
])
input_name
=
input
.
name
if
data_format
==
"NHWC"
:
...
...
@@ -830,34 +776,32 @@ class TFOpMapper(OpMapper):
dilations
=
[
dilations
[
i
]
for
i
in
[
0
,
3
,
1
,
2
]]
transpose_name
=
gen_name
(
'depthwise_conv2d'
,
'transpose'
)
self
.
paddle_graph
.
add_layer
(
kernel
=
"
fluid.layers
.transpose"
,
kernel
=
"
paddle
.transpose"
,
inputs
=
{
"x"
:
input
.
name
},
outputs
=
[
transpose_name
],
perm
=
[
0
,
3
,
1
,
2
])
input_name
=
transpose_name
self
.
paddle_graph
.
add_layer
(
kernel
=
"fluid.layers.conv2d"
,
inputs
=
{
"input"
:
input_name
},
kernel
=
"paddle.nn.functional.conv2d"
,
inputs
=
{
"x"
:
input_name
,
"weight"
:
kernel
.
name
},
outputs
=
[
node
.
name
],
num_filters
=
in_shape
[
1
],
filter_size
=
k_size
[
0
:
2
],
stride
=
strides
[
2
:
4
],
dilation
=
dilations
[
2
:
4
],
groups
=
k_size
[
3
]
*
in_shape
[
1
],
padding
=
string
(
pad_mode
),
param_attr
=
string
(
kernel
.
layer_name
),
bias_attr
=
False
)
bias
=
None
)
if
data_format
==
"NHWC"
:
self
.
paddle_graph
.
add_layer
(
kernel
=
"
fluid.layers
.transpose"
,
kernel
=
"
paddle
.transpose"
,
inputs
=
{
"x"
:
node
.
name
},
outputs
=
[
node
.
name
],
perm
=
[
0
,
2
,
3
,
1
])
def
AvgPool
(
self
,
node
):
input
=
self
.
graph
.
get_
node
(
node
.
layer
.
input
[
0
]
)
input
=
self
.
graph
.
get_
input_node
(
node
,
0
)
k_size
=
node
.
get_attr
(
"ksize"
)
strides
=
node
.
get_attr
(
"strides"
)
...
...
@@ -868,7 +812,7 @@ class TFOpMapper(OpMapper):
if
data_format
==
"NHWC"
:
transpose_name
=
gen_name
(
"avg_pool"
,
"transpose"
)
self
.
paddle_graph
.
add_layer
(
kernel
=
"
fluid.layers
.transpose"
,
kernel
=
"
paddle
.transpose"
,
inputs
=
{
"x"
:
input
.
name
},
outputs
=
[
transpose_name
],
perm
=
[
0
,
3
,
1
,
2
])
...
...
@@ -876,6 +820,8 @@ class TFOpMapper(OpMapper):
k_size
=
[
k_size
[
i
]
for
i
in
[
0
,
3
,
1
,
2
]]
input_name
=
transpose_name
# TODO(syf): The op has diff.
self
.
paddle_graph
.
add_layer
(
kernel
=
"fluid.layers.pool2d"
,
inputs
=
{
"input"
:
input_name
},
...
...
@@ -887,29 +833,31 @@ class TFOpMapper(OpMapper):
if
data_format
==
"NHWC"
:
self
.
paddle_graph
.
add_layer
(
kernel
=
"
fluid.layers
.transpose"
,
kernel
=
"
paddle
.transpose"
,
inputs
=
{
"x"
:
node
.
name
},
outputs
=
[
node
.
name
],
perm
=
[
0
,
2
,
3
,
1
])
def
Pack
(
self
,
node
):
inputs
=
[
self
.
graph
.
get_node
(
name
)
for
name
in
node
.
layer
.
input
]
input_names
=
[
i
.
name
for
i
in
inputs
]
inputs_list
=
list
()
for
i
in
range
(
len
(
node
.
inputs
)):
inputs_list
.
append
(
self
.
graph
.
get_input_node
(
node
,
i
))
input_names
=
[
i
.
name
for
i
in
inputs_list
]
axis
=
node
.
get_attr
(
"axis"
)
self
.
paddle_graph
.
add_layer
(
kernel
=
"
fluid.layers
.stack"
,
kernel
=
"
paddle
.stack"
,
inputs
=
{
"x"
:
input_names
},
outputs
=
[
node
.
name
],
axis
=
axis
)
if
len
(
node
.
out_shapes
[
0
])
==
1
:
self
.
paddle_graph
.
add_layer
(
kernel
=
"
fluid.layers
.reshape"
,
kernel
=
"
paddle
.reshape"
,
inputs
=
{
"x"
:
node
.
name
},
outputs
=
[
node
.
name
],
shape
=
[
-
1
])
def
Unpack
(
self
,
node
):
input
=
self
.
graph
.
get_
node
(
node
.
layer
.
input
[
0
]
)
input
=
self
.
graph
.
get_
input_node
(
node
,
0
)
axis
=
node
.
get_attr
(
"axis"
)
num
=
node
.
get_attr
(
"num"
)
shape
=
input
.
out_shapes
[
0
]
...
...
@@ -917,10 +865,10 @@ class TFOpMapper(OpMapper):
if
len
(
shape
)
==
1
:
if
shape
[
0
]
>
0
and
num
==
shape
[
0
]:
self
.
paddle_graph
.
add_layer
(
kernel
=
"
fluid.layers
.unsqueeze"
,
inputs
=
{
"
input
"
:
input
.
name
},
kernel
=
"
paddle
.unsqueeze"
,
inputs
=
{
"
x
"
:
input
.
name
},
outputs
=
[
node
.
name
],
ax
e
s
=
[
0
])
ax
i
s
=
[
0
])
input_name
=
node
.
name
axis
=
1
else
:
...
...
@@ -929,41 +877,45 @@ class TFOpMapper(OpMapper):
if
len
(
layer_outputs
)
==
1
:
layer_outputs
[
0
]
=
"[{}]"
.
format
(
node
.
layer_name
)
self
.
paddle_graph
.
add_layer
(
kernel
=
"
fluid.layers
.unstack"
,
kernel
=
"
paddle
.unstack"
,
inputs
=
{
"x"
:
input_name
},
outputs
=
layer_outputs
,
axis
=
axis
,
num
=
num
)
def
ConcatV2
(
self
,
node
):
inputs
=
[
self
.
graph
.
get_node
(
name
)
for
name
in
node
.
layer
.
input
[:
-
1
]]
axis
=
self
.
graph
.
get_node
(
node
.
layer
.
input
[
-
1
])
inputs_list
=
list
()
for
i
in
range
(
len
(
node
.
inputs
)
-
1
):
inputs_list
.
append
(
self
.
graph
.
get_input_node
(
node
,
i
))
axis
=
self
.
graph
.
get_input_node
(
node
,
-
1
)
assert
axis
.
layer_type
==
"Const"
,
"axis for ConcatV2 must be type Const"
axis
=
axis
.
value
if
axis
<
0
:
axis
+=
len
(
inputs
[
0
].
out_shapes
[
0
])
axis
+=
len
(
inputs
_list
[
0
].
out_shapes
[
0
])
input_names
=
[
i
.
name
for
i
in
inputs
]
for
i
,
ipt
in
enumerate
(
inputs
):
if
ipt
.
dtype
==
'bool'
:
cast_name
=
gen_name
(
'concat'
,
'cast'
)
self
.
paddle_graph
.
add_layer
(
kernel
=
"fluid.layers.cast"
,
inputs
=
{
"x"
:
ipt
.
name
},
outputs
=
[
cast_name
],
dtype
=
"'int32'"
)
input_names
[
i
]
=
cast_name
input_names
=
[
i
.
name
for
i
in
inputs_list
]
self
.
paddle_graph
.
add_layer
(
kernel
=
"
fluid.layers
.concat"
,
inputs
=
{
"
input
"
:
input_names
},
kernel
=
"
paddle
.concat"
,
inputs
=
{
"
x
"
:
input_names
},
outputs
=
[
node
.
name
],
axis
=
axis
)
if
node
.
dtype
==
'bool'
:
def
Concat
(
self
,
node
):
inputs_list
=
list
()
for
i
in
range
(
1
,
len
(
node
.
inputs
)):
inputs_list
.
append
(
self
.
graph
.
get_input_node
(
node
,
i
))
axis
=
self
.
graph
.
get_input_node
(
node
,
0
)
assert
axis
.
layer_type
==
"Const"
,
"axis for ConcatV2 must be type Const"
axis
=
axis
.
value
if
axis
<
0
:
axis
+=
len
(
inputs_list
[
0
].
out_shapes
[
0
])
input_names
=
[
i
.
name
for
i
in
inputs_list
]
self
.
paddle_graph
.
add_layer
(
kernel
=
"fluid.layers.cas
t"
,
inputs
=
{
"x"
:
node
.
name
},
kernel
=
"paddle.conca
t"
,
inputs
=
{
"x"
:
input_names
},
outputs
=
[
node
.
name
],
dtype
=
"'bool'"
)
axis
=
axis
)
def
AddN
(
self
,
node
):
inputs_list
=
list
()
...
...
@@ -977,10 +929,10 @@ class TFOpMapper(OpMapper):
outputs
=
[
node
.
name
])
def
StridedSlice
(
self
,
node
):
input
=
self
.
graph
.
get_
node
(
node
.
layer
.
input
[
0
]
)
begin
=
self
.
graph
.
get_
node
(
node
.
layer
.
input
[
1
]
)
end
=
self
.
graph
.
get_
node
(
node
.
layer
.
input
[
2
]
)
strides
=
self
.
graph
.
get_
node
(
node
.
layer
.
input
[
3
]
)
input
=
self
.
graph
.
get_
input_node
(
node
,
0
)
begin
=
self
.
graph
.
get_
input_node
(
node
,
1
)
end
=
self
.
graph
.
get_
input_node
(
node
,
2
)
strides
=
self
.
graph
.
get_
input_node
(
node
,
3
)
if
strides
.
layer_type
==
"Const"
:
strides
=
strides
.
value
.
tolist
()
...
...
@@ -1043,28 +995,43 @@ class TFOpMapper(OpMapper):
else
:
new_end
.
append
(
end
[
i
])
if
input
.
dtype
==
"bool"
:
self
.
paddle_graph
.
add_layer
(
"paddle.cast"
,
inputs
=
{
"x"
:
input
.
name
},
outputs
=
[
input
.
name
],
dtype
=
string
(
"int32"
))
self
.
paddle_graph
.
add_layer
(
kernel
=
"
fluid.layers
.slice"
,
kernel
=
"
paddle
.slice"
,
inputs
=
{
"input"
:
input
.
name
},
outputs
=
[
node
.
name
],
axes
=
[
i
for
i
in
range
(
len
(
new_begin
))],
starts
=
new_begin
,
ends
=
new_end
)
if
input
.
dtype
==
"bool"
:
self
.
paddle_graph
.
add_layer
(
"paddle.cast"
,
inputs
=
{
"x"
:
node
.
name
},
outputs
=
[
node
.
name
],
dtype
=
string
(
"bool"
))
if
len
(
new_axes
)
>
0
:
self
.
paddle_graph
.
add_layer
(
kernel
=
"
fluid.layers
.unsqueeze"
,
inputs
=
{
"
input
"
:
node
.
name
},
kernel
=
"
paddle
.unsqueeze"
,
inputs
=
{
"
x
"
:
node
.
name
},
outputs
=
[
node
.
name
],
ax
e
s
=
new_axes
)
ax
i
s
=
new_axes
)
if
len
(
shrink_axes
)
>
0
:
if
len
(
input
.
out_shapes
[
0
])
+
len
(
new_axes
)
<=
1
:
pass
else
:
self
.
paddle_graph
.
add_layer
(
kernel
=
"
fluid.layers
.squeeze"
,
inputs
=
{
"
input
"
:
node
.
name
},
kernel
=
"
paddle
.squeeze"
,
inputs
=
{
"
x
"
:
node
.
name
},
outputs
=
[
node
.
name
],
ax
e
s
=
shrink_axes
)
ax
i
s
=
shrink_axes
)
def
Prod
(
self
,
node
):
input
=
self
.
graph
.
get_input_node
(
node
,
0
)
...
...
@@ -1081,25 +1048,25 @@ class TFOpMapper(OpMapper):
axis
=
axis
)
def
Split
(
self
,
node
):
dim
=
self
.
graph
.
get_
node
(
node
.
layer
.
input
[
0
]
)
input
=
self
.
graph
.
get_
node
(
node
.
layer
.
input
[
1
]
)
dim
=
self
.
graph
.
get_
input_node
(
node
,
0
)
input
=
self
.
graph
.
get_
input_node
(
node
,
1
)
assert
dim
.
layer_type
==
"Const"
num_split
=
node
.
get_attr
(
'num_split'
)
dim
=
dim
.
value
self
.
paddle_graph
.
add_layer
(
kernel
=
"
fluid.layers
.split"
,
inputs
=
{
"
input
"
:
input
.
name
},
kernel
=
"
paddle
.split"
,
inputs
=
{
"
x
"
:
input
.
name
},
outputs
=
[
"{}_p{}"
.
format
(
node
.
layer_name
,
i
)
for
i
in
range
(
num_split
)
],
num_or_sections
=
num_split
,
dim
=
dim
)
axis
=
dim
)
def
Slice
(
self
,
node
):
input
=
self
.
graph
.
get_
node
(
node
.
layer
.
input
[
0
]
)
begin
=
self
.
graph
.
get_
node
(
node
.
layer
.
input
[
1
]
)
size
=
self
.
graph
.
get_
node
(
node
.
layer
.
input
[
2
]
)
input
=
self
.
graph
.
get_
input_node
(
node
,
0
)
begin
=
self
.
graph
.
get_
input_node
(
node
,
1
)
size
=
self
.
graph
.
get_
input_node
(
node
,
2
)
inputs
=
{
"x"
:
input
.
name
}
attrs
=
{}
...
...
@@ -1124,143 +1091,147 @@ class TFOpMapper(OpMapper):
shape
=
size
.
out_shapes
[
0
]
reshape_name
=
gen_name
(
"slice"
,
"reshape"
)
self
.
paddle_graph
.
add_layer
(
kernel
=
"
fluid.layers
.reshape"
,
kernel
=
"
paddle
.reshape"
,
inputs
=
{
"x"
:
size
.
name
},
outputs
=
[
reshape_name
],
shape
=
shape
)
inputs
[
'shape'
]
=
reshape_name
self
.
paddle_graph
.
add_layer
(
kernel
=
"
fluid.layers.crop_tensor
"
,
kernel
=
"
paddle.crop
"
,
inputs
=
inputs
,
outputs
=
[
node
.
name
],
**
attrs
)
def
ResizeNearestNeighbor
(
self
,
node
):
input
=
self
.
graph
.
get_
node
(
node
.
layer
.
input
[
0
]
)
resize_shape
=
self
.
graph
.
get_
node
(
node
.
layer
.
input
[
1
]
)
input
=
self
.
graph
.
get_
input_node
(
node
,
0
)
resize_shape
=
self
.
graph
.
get_
input_node
(
node
,
1
)
data_format
=
"NHWC"
inputs
=
{
"input"
:
input
.
name
}
attrs
=
{
"align_corners"
:
node
.
get_attr
(
"align_corners"
)}
inputs
=
{
"x"
:
input
.
name
}
attrs
=
{
"align_corners"
:
node
.
get_attr
(
"align_corners"
),
"mode"
:
string
(
"nearest"
),
"align_mode"
:
1
}
if
resize_shape
.
layer_type
==
"Const"
:
resize_shape
=
resize_shape
.
value
.
tolist
()
attrs
[
"
out_shap
e"
]
=
resize_shape
attrs
[
"
siz
e"
]
=
resize_shape
else
:
shape
=
resize_shape
.
out_shapes
[
0
]
reshape_name
=
gen_name
(
"resize_nearest"
,
"reshape"
)
self
.
paddle_graph
.
add_layer
(
kernel
=
"
fluid.layers
.reshape"
,
kernel
=
"
paddle
.reshape"
,
inputs
=
{
"x"
:
resize_shape
.
name
},
outputs
=
[
reshape_name
],
shape
=
shape
)
inputs
[
"
out_shap
e"
]
=
reshape_name
inputs
[
"
siz
e"
]
=
reshape_name
if
data_format
==
"NHWC"
:
transpose_name
=
gen_name
(
"resize_nearest"
,
"reshape"
)
self
.
paddle_graph
.
add_layer
(
kernel
=
"
fluid.layers
.transpose"
,
kernel
=
"
paddle
.transpose"
,
inputs
=
{
"x"
:
input
.
name
},
outputs
=
[
transpose_name
],
perm
=
[
0
,
3
,
1
,
2
])
inputs
[
"
input
"
]
=
transpose_name
inputs
[
"
x
"
]
=
transpose_name
self
.
paddle_graph
.
add_layer
(
kernel
=
"
fluid.layers.resize_nearest
"
,
kernel
=
"
paddle.nn.functional.interpolate
"
,
inputs
=
inputs
,
outputs
=
[
node
.
name
],
**
attrs
)
if
data_format
==
"NHWC"
:
self
.
paddle_graph
.
add_layer
(
kernel
=
"
fluid.layers
.transpose"
,
kernel
=
"
paddle
.transpose"
,
inputs
=
{
"x"
:
node
.
name
},
outputs
=
[
node
.
name
],
perm
=
[
0
,
2
,
3
,
1
])
def
ResizeBilinear
(
self
,
node
):
input
=
self
.
graph
.
get_
node
(
node
.
layer
.
input
[
0
]
)
resize_shape
=
self
.
graph
.
get_
node
(
node
.
layer
.
input
[
1
]
)
input
=
self
.
graph
.
get_
input_node
(
node
,
0
)
resize_shape
=
self
.
graph
.
get_
input_node
(
node
,
1
)
data_format
=
"NHWC"
inputs
=
{
"input"
:
input
.
name
}
attrs
=
{
"align_corners"
:
node
.
get_attr
(
"align_corners"
)}
inputs
=
{
"x"
:
input
.
name
}
attrs
=
{
"align_corners"
:
node
.
get_attr
(
"align_corners"
),
"mode"
:
string
(
"bilinear"
),
"align_mode"
:
1
}
if
resize_shape
.
layer_type
==
"Const"
:
resize_shape
=
resize_shape
.
value
.
tolist
()
attrs
[
"
out_shap
e"
]
=
resize_shape
attrs
[
"
siz
e"
]
=
resize_shape
else
:
shape
=
resize_shape
.
out_shapes
[
0
]
reshape_name
=
gen_name
(
"resize_bilinear"
,
"reshape"
)
self
.
paddle_graph
.
add_layer
(
kernel
=
"
fluid.layers
.reshape"
,
kernel
=
"
paddle
.reshape"
,
inputs
=
{
"x"
:
resize_shape
.
name
},
outputs
=
[
reshape_name
],
shape
=
shape
)
inputs
[
"
out_shap
e"
]
=
reshape_name
inputs
[
"
siz
e"
]
=
reshape_name
if
data_format
==
"NHWC"
:
transpose_name
=
gen_name
(
"resize_bilinear"
,
"reshape"
)
self
.
paddle_graph
.
add_layer
(
kernel
=
"
fluid.layers
.transpose"
,
kernel
=
"
paddle
.transpose"
,
inputs
=
{
"x"
:
input
.
name
},
outputs
=
[
transpose_name
],
perm
=
[
0
,
3
,
1
,
2
])
inputs
[
"
input
"
]
=
transpose_name
inputs
[
"
x
"
]
=
transpose_name
self
.
paddle_graph
.
add_layer
(
kernel
=
"
fluid.layers.resize_bilinear
"
,
kernel
=
"
paddle.nn.functional.interpolate
"
,
inputs
=
inputs
,
outputs
=
[
node
.
name
],
**
attrs
)
if
data_format
==
"NHWC"
:
self
.
paddle_graph
.
add_layer
(
kernel
=
"
fluid.layers
.transpose"
,
kernel
=
"
paddle
.transpose"
,
inputs
=
{
"x"
:
node
.
name
},
outputs
=
[
node
.
name
],
perm
=
[
0
,
2
,
3
,
1
])
def
Cast
(
self
,
node
):
input
=
self
.
graph
.
get_
node
(
node
.
layer
.
input
[
0
]
)
input
=
self
.
graph
.
get_
input_node
(
node
,
0
)
dtype
=
node
.
dtype
self
.
paddle_graph
.
add_layer
(
kernel
=
"
fluid.layers
.cast"
,
kernel
=
"
paddle
.cast"
,
inputs
=
{
"x"
:
input
.
name
},
outputs
=
[
node
.
name
],
dtype
=
string
(
dtype
))
def
Sum
(
self
,
node
):
input
=
self
.
graph
.
get_
node
(
node
.
layer
.
input
[
0
]
)
reduce_idx
=
self
.
graph
.
get_
node
(
node
.
layer
.
input
[
1
]
)
input
=
self
.
graph
.
get_
input_node
(
node
,
0
)
reduce_idx
=
self
.
graph
.
get_
input_node
(
node
,
1
)
assert
reduce_idx
.
layer_type
==
"Const"
,
"Only support Const parameter[reduce_idx]"
keep_dims
=
node
.
get_attr
(
"keep_dims"
)
dim
=
reduce_idx
.
value
.
tolist
()
self
.
paddle_graph
.
add_layer
(
kernel
=
"
fluid.layers.reduce_
sum"
,
inputs
=
{
"
input
"
:
input
.
name
},
kernel
=
"
paddle.
sum"
,
inputs
=
{
"
x
"
:
input
.
name
},
outputs
=
[
node
.
name
],
dim
=
dim
,
keep
_
dim
=
keep_dims
)
axis
=
dim
,
keepdim
=
keep_dims
)
def
Max
(
self
,
node
):
input
=
self
.
graph
.
get_
node
(
node
.
layer
.
input
[
0
]
)
reduce_idx
=
self
.
graph
.
get_
node
(
node
.
layer
.
input
[
1
]
)
input
=
self
.
graph
.
get_
input_node
(
node
,
0
)
reduce_idx
=
self
.
graph
.
get_
input_node
(
node
,
1
)
assert
reduce_idx
.
layer_type
==
"Const"
,
"Only support Const parameter[reduce_idx]"
keep_dims
=
node
.
get_attr
(
"keep_dims"
)
dim
=
reduce_idx
.
value
.
tolist
()
self
.
paddle_graph
.
add_layer
(
kernel
=
"
fluid.layers.reduce_
max"
,
inputs
=
{
"
input
"
:
input
.
name
},
kernel
=
"
paddle.
max"
,
inputs
=
{
"
x
"
:
input
.
name
},
outputs
=
[
node
.
name
],
dim
=
dim
,
keep
_
dim
=
keep_dims
)
axis
=
dim
,
keepdim
=
keep_dims
)
def
RandomUniform
(
self
,
node
):
shape
=
self
.
graph
.
get_
node
(
node
.
layer
.
input
[
0
]
)
shape
=
self
.
graph
.
get_
input_node
(
node
,
0
)
if
shape
.
layer_type
==
"Const"
:
shape
=
shape
.
value
.
tolist
()
self
.
paddle_graph
.
add_layer
(
kernel
=
"
fluid.layers.uniform_rando
m"
,
kernel
=
"
paddle.unifor
m"
,
inputs
=
{},
outputs
=
[
node
.
name
],
shape
=
shape
,
...
...
@@ -1268,16 +1239,16 @@ class TFOpMapper(OpMapper):
max
=
0.9999
)
else
:
self
.
paddle_graph
.
add_layer
(
kernel
=
"
fluid.layers.uniform_rando
m"
,
kernel
=
"
paddle.unifor
m"
,
inputs
=
{
'shape'
:
shape
.
name
},
outputs
=
[
node
.
name
],
min
=
0.0
,
max
=
0.9999
)
def
Conv2DBackpropInput
(
self
,
node
):
out_shape
=
self
.
graph
.
get_
node
(
node
.
layer
.
input
[
0
]
)
kernel
=
self
.
graph
.
get_
node
(
node
.
layer
.
input
[
1
]
)
input
=
self
.
graph
.
get_
node
(
node
.
layer
.
input
[
2
]
)
out_shape
=
self
.
graph
.
get_
input_node
(
node
,
0
)
kernel
=
self
.
graph
.
get_
input_node
(
node
,
1
)
input
=
self
.
graph
.
get_
input_node
(
node
,
2
)
assert
kernel
.
layer_type
==
"Const"
,
"Kernel of Conv2DBackpropInput should be Const"
...
...
@@ -1292,15 +1263,15 @@ class TFOpMapper(OpMapper):
in_shape
=
self
.
decoder
.
infer_tensor
(
input
,
use_diff_inputs
=
False
).
shape
k_size
=
kernel
.
out_shapes
[
0
]
if
k_size
.
count
(
-
1
)
>
2
:
k_size
=
self
.
decoder
.
infer_tensor
(
input
,
use_diff_inputs
=
False
).
shape
k_size
=
self
.
decoder
.
infer_tensor
(
kernel
,
use_diff_inputs
=
False
).
shape
pad_mode
=
node
.
get_attr
(
"padding"
).
decode
()
strides
=
node
.
get_attr
(
"strides"
)
dilations
=
node
.
get_attr
(
"dilations"
)
data_format
=
node
.
get_attr
(
"data_format"
).
decode
()
self
.
params
[
kernel
.
layer_name
.
replace
(
'/'
,
'_'
)
]
=
numpy
.
transpose
(
kernel
.
value
,
(
3
,
2
,
0
,
1
))
kernel_name
=
node
.
name
+
".weight"
self
.
params
[
kernel_name
]
=
numpy
.
transpose
(
kernel
.
value
,
(
3
,
2
,
0
,
1
))
input_name
=
input
.
name
if
data_format
==
"NHWC"
:
...
...
@@ -1309,20 +1280,26 @@ class TFOpMapper(OpMapper):
dilations
=
[
dilations
[
i
]
for
i
in
[
0
,
3
,
1
,
2
]]
transpose_name
=
gen_name
(
"conv2dbackpropinput"
,
"transpose"
)
self
.
paddle_graph
.
add_layer
(
kernel
=
"
fluid.layers
.transpose"
,
kernel
=
"
paddle
.transpose"
,
inputs
=
{
"x"
:
input
.
name
},
outputs
=
[
transpose_name
],
perm
=
[
0
,
3
,
1
,
2
])
input_name
=
transpose_name
self
.
paddle_graph
.
add_layer
(
kernel
=
"fluid.layers.conv2d_transpose"
,
inputs
=
{
"input"
:
input_name
},
kernel
=
"paddle.static.create_parameter"
,
inputs
=
{},
outputs
=
[
"{}_{}"
.
format
(
node
.
name
,
kernel_name
).
replace
(
"."
,
"_"
)],
dtype
=
string
(
str
(
self
.
params
[
kernel_name
].
dtype
)),
shape
=
self
.
params
[
kernel_name
].
shape
,
name
=
string
(
kernel_name
))
self
.
paddle_graph
.
add_layer
(
kernel
=
"paddle.nn.functional.conv2d_transpose"
,
inputs
=
{
"x"
:
input_name
,
"weight"
:
"{}_{}"
.
format
(
node
.
name
,
kernel_name
).
replace
(
"."
,
"_"
)},
outputs
=
[
node
.
name
],
bias_attr
=
False
,
param_attr
=
string
(
kernel
.
layer_name
),
num_filters
=
k_size
[
2
],
filter_size
=
k_size
[
0
:
2
],
bias
=
None
,
stride
=
strides
[
2
:
4
],
dilation
=
dilations
[
2
:
4
],
padding
=
string
(
pad_mode
),
...
...
@@ -1330,7 +1307,7 @@ class TFOpMapper(OpMapper):
if
data_format
==
"NHWC"
:
self
.
paddle_graph
.
add_layer
(
kernel
=
"
fluid.layers
.transpose"
,
kernel
=
"
paddle
.transpose"
,
inputs
=
{
"x"
:
node
.
name
},
outputs
=
[
node
.
name
],
perm
=
[
0
,
2
,
3
,
1
])
...
...
@@ -1403,11 +1380,12 @@ class TFOpMapper(OpMapper):
shape
=
node
.
out_shapes
[
0
])
def
SquaredDifference
(
self
,
node
):
x
=
self
.
graph
.
get_
node
(
node
.
layer
.
input
[
0
]
)
y
=
self
.
graph
.
get_
node
(
node
.
layer
.
input
[
1
]
)
x
=
self
.
graph
.
get_
input_node
(
node
,
0
)
y
=
self
.
graph
.
get_
input_node
(
node
,
1
)
inputs
=
{
"x"
:
x
.
name
,
"y"
:
y
.
name
}
x_shape
=
x
.
out_shapes
[
0
]
y_shape
=
y
.
out_shapes
[
0
]
# TODO(syf)
layer_id
=
self
.
paddle_graph
.
add_layer
(
"fluid.layers.elementwise_sub"
,
inputs
=
inputs
,
outputs
=
[
node
.
name
])
self
.
paddle_graph
.
layers
[
layer_id
].
input_shapes
=
{
"x"
:
x_shape
,
"y"
:
y_shape
}
...
...
@@ -1416,14 +1394,14 @@ class TFOpMapper(OpMapper):
x_shape
=
node
.
out_shapes
[
0
]
y_shape
=
node
.
out_shapes
[
0
]
layer_id
=
self
.
paddle_graph
.
add_layer
(
"
fluid.layers.elementwise_mul
"
,
inputs
=
inputs
,
outputs
=
[
node
.
name
])
"
paddle.multiply
"
,
inputs
=
inputs
,
outputs
=
[
node
.
name
])
self
.
paddle_graph
.
layers
[
layer_id
].
input_shapes
=
{
"x"
:
x_shape
,
"y"
:
y_shape
}
def
OneHot
(
self
,
node
):
input
=
self
.
graph
.
get_
node
(
node
.
layer
.
input
[
0
]
)
depth
=
self
.
graph
.
get_
node
(
node
.
layer
.
input
[
1
]
)
on_value
=
self
.
graph
.
get_
node
(
node
.
layer
.
input
[
2
]
)
off_value
=
self
.
graph
.
get_
node
(
node
.
layer
.
input
[
3
]
)
input
=
self
.
graph
.
get_
input_node
(
node
,
0
)
depth
=
self
.
graph
.
get_
input_node
(
node
,
1
)
on_value
=
self
.
graph
.
get_
input_node
(
node
,
2
)
off_value
=
self
.
graph
.
get_
input_node
(
node
,
3
)
assert
depth
.
layer_type
==
'Const'
,
'Parameter depth should be Const in OneHot'
assert
on_value
.
layer_type
==
'Const'
,
'Parameter on_value should be Const in OneHot'
assert
off_value
.
layer_type
==
'Const'
,
'Parameter off_value should be Const in OneHot'
...
...
@@ -1437,73 +1415,72 @@ class TFOpMapper(OpMapper):
0.0
)
<
1e-06
,
"off_value should be 0 in OneHot"
self
.
paddle_graph
.
add_layer
(
"
fluid
.one_hot"
,
inputs
=
{
"
input
"
:
input
.
name
},
"
paddle.nn.functional
.one_hot"
,
inputs
=
{
"
x
"
:
input
.
name
},
outputs
=
[
node
.
name
],
depth
=
depth
.
value
)
num_classes
=
depth
.
value
)
def
Pow
(
self
,
node
):
x
=
self
.
graph
.
get_
node
(
node
.
layer
.
input
[
0
]
)
factor
=
self
.
graph
.
get_
node
(
node
.
layer
.
input
[
1
]
)
x
=
self
.
graph
.
get_
input_node
(
node
,
0
)
factor
=
self
.
graph
.
get_
input_node
(
node
,
1
)
inputs
=
{
"x"
:
x
.
name
}
attr
=
dict
()
if
factor
.
layer_type
==
'Const'
:
attr
[
"
factor
"
]
=
factor
.
value
.
tolist
()
attr
[
"
y
"
]
=
factor
.
value
.
tolist
()
else
:
inputs
[
"
factor
"
]
=
factor
.
name
inputs
[
"
y
"
]
=
factor
.
name
self
.
paddle_graph
.
add_layer
(
"
fluid.layers
.pow"
,
inputs
=
inputs
,
outputs
=
[
node
.
name
],
**
attr
)
"
paddle
.pow"
,
inputs
=
inputs
,
outputs
=
[
node
.
name
],
**
attr
)
def
All
(
self
,
node
):
input
=
self
.
graph
.
get_
node
(
node
.
layer
.
input
[
0
]
)
reduce_idx
=
self
.
graph
.
get_
node
(
node
.
layer
.
input
[
1
]
)
input
=
self
.
graph
.
get_
input_node
(
node
,
0
)
reduce_idx
=
self
.
graph
.
get_
input_node
(
node
,
1
)
assert
reduce_idx
.
layer_type
==
"Const"
,
"Only support Const parameter[reduce_idx]"
attr
=
dict
()
attr
[
"
dim
"
]
=
reduce_idx
.
value
.
tolist
()
attr
[
"keep
_
dim"
]
=
node
.
get_attr
(
"keep_dims"
)
attr
[
"
axis
"
]
=
reduce_idx
.
value
.
tolist
()
attr
[
"keepdim"
]
=
node
.
get_attr
(
"keep_dims"
)
input_name
=
input
.
name
if
input
.
dtype
!=
"bool"
:
input_name
=
gen_name
(
"all"
,
"cast"
)
self
.
paddle_graph
.
add_layer
(
"
fluid.layers
.cast"
,
"
paddle
.cast"
,
inputs
=
{
"x"
:
input
.
name
},
outputs
=
[
input_name
],
dtype
=
string
(
"bool"
))
self
.
paddle_graph
.
add_layer
(
"
fluid.layers.reduce_
all"
,
inputs
=
{
"
input
"
:
input_name
},
"
paddle.
all"
,
inputs
=
{
"
x
"
:
input_name
},
outputs
=
[
node
.
name
],
**
attr
)
node
.
layer
.
attr
[
'dtype'
].
type
=
10
def
GatherV2
(
self
,
node
):
embeddings
=
self
.
graph
.
get_
node
(
node
.
layer
.
input
[
0
]
)
index
=
self
.
graph
.
get_
node
(
node
.
layer
.
input
[
1
]
)
axis
=
self
.
graph
.
get_
node
(
node
.
layer
.
input
[
2
]
)
embeddings
=
self
.
graph
.
get_
input_node
(
node
,
0
)
index
=
self
.
graph
.
get_
input_node
(
node
,
1
)
axis
=
self
.
graph
.
get_
input_node
(
node
,
2
)
assert
axis
.
layer_type
==
'Const'
,
"Only support Const parameter[axis]"
axis
=
axis
.
value
assert
axis
==
0
,
"Only support axis=0 in GatherV2 OP"
index_name
=
index
.
name
if
len
(
index
.
out_shapes
[
0
])
!=
1
:
reshape_name
=
gen_name
(
"gather"
,
"reshape"
)
index_name
=
reshape_name
self
.
paddle_graph
.
add_layer
(
"
fluid.layers
.reshape"
,
"
paddle
.reshape"
,
inputs
=
{
"x"
:
index
.
name
},
outputs
=
[
reshape_name
],
shape
=
[
-
1
])
inputs
=
{
'
input
'
:
embeddings
.
name
,
'index'
:
index_name
}
inputs
=
{
'
x
'
:
embeddings
.
name
,
'index'
:
index_name
}
self
.
paddle_graph
.
add_layer
(
"
fluid.layers
.gather"
,
"
paddle
.gather"
,
inputs
=
inputs
,
outputs
=
[
node
.
name
],
overwrite
=
False
)
axis
=
axis
)
if
len
(
index
.
out_shapes
[
0
])
!=
1
:
out_shape
=
node
.
out_shapes
[
0
]
self
.
paddle_graph
.
add_layer
(
kernel
=
"
fluid.layers
.reshape"
,
kernel
=
"
paddle
.reshape"
,
inputs
=
{
"x"
:
node
.
name
},
outputs
=
[
node
.
name
],
shape
=
out_shape
)
...
...
@@ -1518,19 +1495,19 @@ class TFOpMapper(OpMapper):
outputs
=
[
node
.
name
])
def
ExpandDims
(
self
,
node
):
x
=
self
.
graph
.
get_
node
(
node
.
layer
.
input
[
0
]
,
copy
=
True
)
y
=
self
.
graph
.
get_
node
(
node
.
layer
.
input
[
1
]
,
copy
=
True
)
inputs
=
{
"
input
"
:
x
.
name
}
x
=
self
.
graph
.
get_
input_node
(
node
,
0
,
copy
=
True
)
y
=
self
.
graph
.
get_
input_node
(
node
,
1
,
copy
=
True
)
inputs
=
{
"
x
"
:
x
.
name
}
attr
=
dict
()
if
y
.
layer_type
==
'Const'
:
dim
=
y
.
value
.
tolist
()
if
not
isinstance
(
dim
,
list
):
dim
=
[
dim
]
attr
[
'ax
e
s'
]
=
dim
attr
[
'ax
i
s'
]
=
dim
else
:
inputs
[
'ax
e
s'
]
=
y
.
name
inputs
[
'ax
i
s'
]
=
y
.
name
self
.
paddle_graph
.
add_layer
(
"
fluid.layers
.unsqueeze"
,
"
paddle
.unsqueeze"
,
inputs
=
inputs
,
outputs
=
[
node
.
name
],
**
attr
)
x2paddle/optimizer/elimination/static/__init__.py
0 → 100644
浏览文件 @
fb07475f
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from
.transpose_elimination
import
StaticTransposeElimination
from
.transpose_eliminate_pass
import
StaticTransposeEliminatePass
\ No newline at end of file
x2paddle/optimizer/elimination/static/transpose_eliminate_pass.py
0 → 100644
浏览文件 @
fb07475f
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from
x2paddle.optimizer.pass_
import
Pass
from
x2paddle.optimizer.elimination.static
import
StaticTransposeElimination
from
x2paddle.optimizer.pass_manager
import
pass_register
@
pass_register
class
StaticTransposeEliminatePass
(
Pass
):
name
=
"static_transpose_eliminate_pass"
def
__init__
(
self
):
Pass
.
__init__
(
self
)
def
apply
(
self
,
graph
):
fuser
=
StaticTransposeElimination
()
fuser
.
operate
(
graph
)
# 用于注册
static_transpose_eliminate_pass
=
StaticTransposeEliminatePass
()
\ No newline at end of file
x2paddle/optimizer/
tensorflow/transpose
.py
→
x2paddle/optimizer/
elimination/static/transpose_elimination
.py
浏览文件 @
fb07475f
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import
copy
import
sys
import
numpy
as
np
from
x2paddle.optimizer.pattern_matcher
import
FuseBase
from
x2paddle.core.program
import
PaddleGraph
,
PaddleLayer
from
x2paddle.core.util
import
*
class
TransposeOpt
:
class
StaticTransposeElimination
(
FuseBase
)
:
def
__init__
(
self
):
self
.
image_layers
=
[
'fluid.layers.conv2d'
,
'fluid.layers.batch_norm'
,
'fluid.layers.conv2d_transpose'
,
'fluid.layers.resize_nearest'
,
'fluid.layers.resize_bilinear'
,
'fluid.layers.pool2d'
,
'fluid.layers.pad2d'
]
super
(
StaticTransposeElimination
,
self
).
__init__
(
graph_type
=
"static"
)
self
.
direct_layers
=
[
'
fluid.layers.relu'
,
'fluid.layers.relu6'
,
'fluid.layers
.abs'
,
'
fluid.layers.sigmoid'
,
'fluid.layers.exp'
,
'fluid.layers
.rsqrt'
,
'
fluid.layers.swish_f32'
,
'fluid.layers
.tanh'
,
'
fluid.layers.softplus'
,
'fluid.layers
.leaky_relu'
,
'
fluid.layers.floor'
,
'fluid.layers.erf'
,
'fluid.layers.swish
'
'
paddle.nn.functional.relu'
,
'paddle.nn.functional.relu6'
,
'paddle
.abs'
,
'
paddle.nn.functional.sigmoid'
,
'paddle.exp'
,
'paddle
.rsqrt'
,
'
paddle.nn.functional.swish'
,
'paddle
.tanh'
,
'
paddle.nn.functional.softplus'
,
'paddle.nn.functional
.leaky_relu'
,
'
paddle.floor'
,
'paddle.erf'
,
'paddle.square
'
]
self
.
elementwise_layers
=
[
'
fluid.layers.elementwise_
add'
,
'fluid.layers.elementwise_sub'
,
'
fluid.layers.elementwise_mul'
,
'fluid.layers.elementwise_div
'
'
paddle.
add'
,
'fluid.layers.elementwise_sub'
,
'
paddle.multiply'
,
'paddle.divide
'
]
self
.
reduce_layers
=
[
'
fluid.layers.reduce_mean'
,
'fluid.layers.reduce_
all'
,
'
fluid.layers.reduce_max'
,
'fluid.layers.reduce_
any'
,
'
fluid.layers.reduce_sum'
,
'fluid.layers.reduce_
prod'
'
paddle.mean'
,
'paddle.
all'
,
'
paddle.max'
,
'paddle.
any'
,
'
paddle.sum'
,
'paddle.
prod'
]
def
get_transpose_num
(
self
,
graph
):
count
=
0
for
layer_id
,
layer
in
graph
.
layers
.
items
():
if
layer
.
kernel
==
"
fluid.layers
.transpose"
:
if
layer
.
kernel
==
"
paddle
.transpose"
:
count
+=
1
return
count
def
run
(
self
,
graph
):
print
(
"Optimize: TransposeOpt..."
)
def
operate
(
self
,
graph
):
total_layer_num
=
len
(
graph
.
layers
)
scanned_layers
=
set
()
optimized_transpose_layers
=
list
()
...
...
@@ -43,6 +55,12 @@ class TransposeOpt:
optimized_concat_layers
=
list
()
optimized_elementwise_layers
=
list
()
def
get_index
(
layer
):
if
layer
.
kernel
.
startswith
(
"paddle.nn"
)
and
"functional"
not
in
layer
.
kernel
:
return
1
else
:
return
0
def
strip_transpose
(
_graph
):
layers
=
copy
.
deepcopy
(
_graph
.
layers
)
for
layer_id
,
layer
in
layers
.
items
():
...
...
@@ -53,7 +71,7 @@ class TransposeOpt:
sys
.
stderr
.
write
(
"
\r
Optimize Transpose Layers...{}%"
.
format
(
percent
))
if
layer
.
kernel
!=
"
fluid.layers
.transpose"
:
if
layer
.
kernel
!=
"
paddle
.transpose"
:
continue
if
layer
.
attrs
[
"perm"
]
!=
[
0
,
2
,
3
,
1
]:
continue
...
...
@@ -65,7 +83,7 @@ class TransposeOpt:
elementwise_layers
=
list
()
can_be_optimized
=
True
for
out
in
_graph
.
edges_out
.
get
(
layer_id
,
[]):
if
_graph
.
layers
[
out
].
kernel
==
"
fluid.layers
.transpose"
:
if
_graph
.
layers
[
out
].
kernel
==
"
paddle
.transpose"
:
if
_graph
.
layers
[
out
].
attrs
[
"perm"
]
!=
[
0
,
3
,
1
,
2
]:
can_be_optimized
=
False
break
...
...
@@ -73,21 +91,24 @@ class TransposeOpt:
elif
_graph
.
layers
[
out
].
kernel
in
self
.
elementwise_layers
:
propagate_layers
.
append
(
out
)
elif
_graph
.
layers
[
out
].
kernel
in
self
.
direct_layers
:
if
_graph
.
layers
[
out
].
outputs
[
0
]
in
_graph
.
outputs
:
ouput_index
=
get_index
(
_graph
.
layers
[
out
])
if
_graph
.
layers
[
out
].
outputs
[
ouput_index
]
in
_graph
.
outputs
:
can_be_optimized
=
False
break
propagate_layers
.
append
(
out
)
elif
_graph
.
layers
[
out
].
kernel
in
self
.
reduce_layers
:
if
_graph
.
layers
[
out
].
outputs
[
0
]
in
_graph
.
outputs
:
ouput_index
=
get_index
(
_graph
.
layers
[
out
])
if
_graph
.
layers
[
out
].
outputs
[
ouput_index
]
in
_graph
.
outputs
:
can_be_optimized
=
False
break
if
not
_graph
.
layers
[
out
].
attrs
.
get
(
'keep
_
dim'
,
False
):
if
not
_graph
.
layers
[
out
].
attrs
.
get
(
'keepdim'
,
False
):
can_be_optimized
=
False
break
propagate_layers
.
append
(
out
)
reduce_layers
.
append
(
out
)
elif
_graph
.
layers
[
out
].
kernel
==
"fluid.layers.concat"
:
if
_graph
.
layers
[
out
].
outputs
[
0
]
in
_graph
.
outputs
:
elif
_graph
.
layers
[
out
].
kernel
==
"paddle.concat"
:
ouput_index
=
get_index
(
_graph
.
layers
[
out
])
if
_graph
.
layers
[
out
].
outputs
[
ouput_index
]
in
_graph
.
outputs
:
can_be_optimized
=
False
break
propagate_layers
.
append
(
out
)
...
...
@@ -102,37 +123,41 @@ class TransposeOpt:
visited_layers
.
add
(
current_id
)
for
out
in
_graph
.
edges_out
.
get
(
current_id
,
[]):
if
_graph
.
layers
[
out
].
kernel
==
"
fluid.layers
.transpose"
:
out
].
kernel
==
"
paddle
.transpose"
:
if
_graph
.
layers
[
out
].
attrs
[
"perm"
]
!=
[
0
,
3
,
1
,
2
]:
can_be_optimized
=
False
break
transpose_layers
.
append
(
out
)
elif
_graph
.
layers
[
out
].
kernel
in
self
.
elementwise_layers
:
if
_graph
.
layers
[
out
].
outputs
[
0
]
in
_graph
.
outputs
:
output_index
=
get_index
(
_graph
.
layers
[
out
])
if
_graph
.
layers
[
out
].
outputs
[
output_index
]
in
_graph
.
outputs
:
can_be_optimized
=
False
break
if
out
not
in
visited_layers
:
propagate_layers
.
append
(
out
)
elif
_graph
.
layers
[
out
].
kernel
in
self
.
direct_layers
:
if
_graph
.
layers
[
out
].
outputs
[
0
]
in
_graph
.
outputs
:
output_index
=
get_index
(
_graph
.
layers
[
out
])
if
_graph
.
layers
[
out
].
outputs
[
output_index
]
in
_graph
.
outputs
:
can_be_optimized
=
False
break
if
out
not
in
visited_layers
:
propagate_layers
.
append
(
out
)
elif
_graph
.
layers
[
out
].
kernel
in
self
.
reduce_layers
:
if
_graph
.
layers
[
out
].
outputs
[
0
]
in
_graph
.
outputs
:
output_index
=
get_index
(
_graph
.
layers
[
out
])
if
_graph
.
layers
[
out
].
outputs
[
output_index
]
in
_graph
.
outputs
:
can_be_optimized
=
False
break
if
not
_graph
.
layers
[
out
].
attrs
.
get
(
'keep
_
dim'
,
if
not
_graph
.
layers
[
out
].
attrs
.
get
(
'keepdim'
,
False
):
can_be_optimized
=
False
break
if
out
not
in
visited_layers
:
propagate_layers
.
append
(
out
)
reduce_layers
.
append
(
out
)
elif
_graph
.
layers
[
out
].
kernel
==
"fluid.layers.concat"
:
if
_graph
.
layers
[
out
].
outputs
[
0
]
in
_graph
.
outputs
:
elif
_graph
.
layers
[
out
].
kernel
==
"paddle.concat"
:
output_index
=
get_index
(
_graph
.
layers
[
out
])
if
_graph
.
layers
[
out
].
outputs
[
output_index
]
in
_graph
.
outputs
:
can_be_optimized
=
False
break
if
out
not
in
visited_layers
:
...
...
@@ -149,14 +174,15 @@ class TransposeOpt:
current_id
].
input_shapes
[
'x'
]
y_shape
=
_graph
.
layers
[
current_id
].
input_shapes
[
'y'
]
output_index
=
get_index
(
_graph
.
layers
[
ipt
])
if
_graph
.
layers
[
ipt
].
outputs
[
0
]
==
_graph
.
layers
[
current_id
].
inputs
[
output_index
]
==
_graph
.
layers
[
current_id
].
inputs
[
'x'
]:
if
len
(
x_shape
)
<=
1
:
elementwise_layers
.
append
(
current_id
)
continue
elif
_graph
.
layers
[
ipt
].
outputs
[
0
]
==
_graph
.
layers
[
current_id
].
inputs
[
output_index
]
==
_graph
.
layers
[
current_id
].
inputs
[
'y'
]:
if
len
(
y_shape
)
<=
1
:
elementwise_layers
.
append
(
current_id
)
...
...
@@ -168,8 +194,9 @@ class TransposeOpt:
except
Exception
as
e
:
can_be_optimized
=
False
break
output_index
=
get_index
(
_graph
.
layers
[
ipt
])
if
_graph
.
layers
[
ipt
].
kernel
==
"
fluid.layers
.transpose"
:
ipt
].
kernel
==
"
paddle
.transpose"
:
if
_graph
.
layers
[
ipt
].
attrs
[
"perm"
]
!=
[
0
,
2
,
3
,
1
]:
can_be_optimized
=
False
break
...
...
@@ -177,30 +204,30 @@ class TransposeOpt:
transpose_layers
.
append
(
ipt
)
elif
_graph
.
layers
[
ipt
].
kernel
in
self
.
elementwise_layers
:
if
_graph
.
layers
[
ipt
].
outputs
[
0
]
in
_graph
.
outputs
:
if
_graph
.
layers
[
ipt
].
outputs
[
output_index
]
in
_graph
.
outputs
:
can_be_optimized
=
False
break
if
ipt
not
in
visited_layers
:
propagate_layers
.
append
(
ipt
)
elif
_graph
.
layers
[
ipt
].
kernel
in
self
.
direct_layers
:
if
_graph
.
layers
[
ipt
].
outputs
[
0
]
in
_graph
.
outputs
:
if
_graph
.
layers
[
ipt
].
outputs
[
output_index
]
in
_graph
.
outputs
:
can_be_optimized
=
False
break
if
ipt
not
in
visited_layers
:
propagate_layers
.
append
(
ipt
)
elif
_graph
.
layers
[
ipt
].
kernel
in
self
.
reduce_layers
:
if
_graph
.
layers
[
ipt
].
outputs
[
0
]
in
_graph
.
outputs
:
if
_graph
.
layers
[
ipt
].
outputs
[
output_index
]
in
_graph
.
outputs
:
can_be_optimized
=
False
break
if
not
_graph
.
layers
[
ipt
].
attrs
.
get
(
'keep
_
dim'
,
if
not
_graph
.
layers
[
ipt
].
attrs
.
get
(
'keepdim'
,
False
):
can_be_optimized
=
False
break
if
ipt
not
in
visited_layers
:
propagate_layers
.
append
(
ipt
)
reduce_layers
.
append
(
ipt
)
elif
_graph
.
layers
[
ipt
].
kernel
==
"
fluid.layers
.concat"
:
if
_graph
.
layers
[
ipt
].
outputs
[
0
]
in
_graph
.
outputs
:
elif
_graph
.
layers
[
ipt
].
kernel
==
"
paddle
.concat"
:
if
_graph
.
layers
[
ipt
].
outputs
[
output_index
]
in
_graph
.
outputs
:
can_be_optimized
=
False
break
if
ipt
not
in
visited_layers
:
...
...
@@ -217,7 +244,8 @@ class TransposeOpt:
transpose_layers
.
append
(
layer_id
)
transpose_layers
=
list
(
set
(
transpose_layers
))
for
l
in
transpose_layers
:
if
graph
.
layers
[
l
].
outputs
[
0
]
in
graph
.
outputs
:
output_index
=
get_index
(
graph
.
layers
[
l
])
if
graph
.
layers
[
l
].
outputs
[
output_index
]
in
graph
.
outputs
:
can_be_optimized
=
False
break
if
not
can_be_optimized
:
...
...
@@ -243,17 +271,19 @@ class TransposeOpt:
for
layer_id
in
list
(
set
(
optimized_transpose_layers
)):
graph
.
del_layer
(
layer_id
)
for
layer_id
in
list
(
set
(
optimized_reduce_layers
)):
dim
=
graph
.
layers
[
layer_id
].
attrs
.
get
(
'
dim
'
,
None
)
dim
=
graph
.
layers
[
layer_id
].
attrs
.
get
(
'
axis
'
,
None
)
if
dim
is
not
None
:
for
i
in
range
(
len
(
dim
)):
dim
[
i
]
=
[
0
,
2
,
3
,
1
][
dim
[
i
]]
graph
.
layers
[
layer_id
].
attrs
[
'
dim
'
]
=
dim
graph
.
layers
[
layer_id
].
attrs
[
'
axis
'
]
=
dim
for
layer_id
in
list
(
set
(
optimized_concat_layers
)):
axis
=
graph
.
layers
[
layer_id
].
attrs
.
get
(
'axis'
,
0
)
graph
.
layers
[
layer_id
].
attrs
[
'axis'
]
=
[
0
,
2
,
3
,
1
][
axis
]
for
layer_id
in
list
(
set
(
optimized_elementwise_layers
)):
axis
=
graph
.
layers
[
layer_id
].
attrs
.
get
(
'axis'
,
-
1
)
graph
.
layers
[
layer_id
].
attrs
[
'axis'
]
=
[
0
,
2
,
3
,
1
][
axis
]
if
graph
.
layers
[
layer_id
].
kernel
==
"paddle.add"
:
graph
.
layers
[
layer_id
].
kernel
=
"fluid.layers.elementwise_add"
current_transpose_num
=
self
.
get_transpose_num
(
graph
)
print
(
...
...
x2paddle/optimizer/fusion/dygraph/conv2d_add_fuser.py
浏览文件 @
fb07475f
...
...
@@ -105,10 +105,6 @@ class DygraphConv2DAddFuser(FuseBase):
if
layer
.
kernel
==
"paddle.nn.Conv2D"
:
conv_id
=
layer_id
for
layer_id
,
layer
in
matches
.
items
():
if
layer
.
kernel
==
"paddle.nn.functional.conv2d_transpose"
:
layer
.
bias
=
bias_name
if
not
is_transpose
:
layer
.
outputs
[
0
]
=
output_name
if
layer
.
kernel
==
"paddle.nn.Conv2D"
:
layer
.
attrs
[
"bias_attr"
]
=
bias_name
if
not
is_transpose
:
...
...
x2paddle/optimizer/fusion/static/__init__.py
浏览文件 @
fb07475f
...
...
@@ -14,3 +14,10 @@
from
.bn_scale_fuser
import
Static_BNScaleFuser
from
.bn_scale_fuse_pass
import
Static_BNScaleFusePass
from
.conv2d_add_fuser
import
StaticConv2DAddFuser
from
.conv2d_add_fuse_pass
import
StaticConv2DAddFusePass
from
.prelu_fuser
import
StaticPReLUFuser
from
.prelu_fuse_pass
import
StaticPReLUFusePass
from
.tf_batchnorm_fuser
import
StaticTFBatchNormFuser
from
.tf_batchnorm_fuse_pass
import
StaticTFBatchNormFusePass
x2paddle/optimizer/fusion/static/bn_scale_fuser.py
浏览文件 @
fb07475f
...
...
@@ -79,7 +79,6 @@ class Static_BNScaleFuser(FuseBase):
graph
.
layers
[
new_layer_id
]
=
new_layer
matches
.
pop
(
new_layer_id
)
def
gen_new_layer
(
self
,
parameters
,
matches
):
layers_id
=
list
(
matches
.
keys
())
layer
=
matches
[
layers_id
[
0
]]
...
...
x2paddle/optimizer/fusion/static/conv2d_add_fuse_pass.py
0 → 100644
浏览文件 @
fb07475f
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from
x2paddle.optimizer.pass_
import
Pass
from
x2paddle.optimizer.fusion.static
import
StaticConv2DAddFuser
from
x2paddle.optimizer.pass_manager
import
pass_register
@
pass_register
class
StaticConv2DAddFusePass
(
Pass
):
name
=
"static_conv2d_add_fuse_pass"
def
__init__
(
self
):
Pass
.
__init__
(
self
)
def
apply
(
self
,
graph
):
fuser
=
StaticConv2DAddFuser
()
fuser
.
operate
(
graph
,
match_kind
=
"edge"
)
# 用于注册
static_conv2d_add_fuse_pass
=
StaticConv2DAddFusePass
()
\ No newline at end of file
x2paddle/optimizer/fusion/static/conv2d_add_fuser.py
0 → 100644
浏览文件 @
fb07475f
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import
copy
import
numpy
as
np
from
x2paddle.optimizer.pattern_matcher
import
FuseBase
from
x2paddle.core.program
import
PaddleGraph
,
PaddleLayer
from
x2paddle.core.util
import
*
class
StaticConv2DAddFuser
(
FuseBase
):
def
__init__
(
self
):
super
(
StaticConv2DAddFuser
,
self
).
__init__
(
graph_type
=
"static"
)
self
.
patterns
=
list
()
def
build_pattern
(
self
):
""" 描述需要替换的conv2d+add图结构。
conv2d+add层模式python实现代码示例:
模式一:
MobilenetV1_Logits_Conv2d_1c_1x1_biases = paddle.static.create_parameter(dtype='float32', shape=[1001], name='MobilenetV1_Logits_Conv2d_1c_1x1_biases', default_initializer=paddle.nn.initializer.Constant(value=0.0))
conv2d_transpose_14 = paddle.transpose(x=MobilenetV1_Logits_AvgPool_1a_AvgPool, perm=[0, 3, 1, 2])
MobilenetV1_Logits_Conv2d_1c_1x1_Conv2D = paddle.nn.functional.conv2d(x=conv2d_transpose_14, weight=MobilenetV1_Logits_Conv2d_1c_1x1_weights, bias=None, stride=[1, 1], dilation=[1, 1], padding='SAME')
MobilenetV1_Logits_Conv2d_1c_1x1_Conv2D = paddle.transpose(x=MobilenetV1_Logits_Conv2d_1c_1x1_Conv2D, perm=[0, 2, 3, 1])
MobilenetV1_Logits_Conv2d_1c_1x1_BiasAdd = paddle.add(x=MobilenetV1_Logits_Conv2d_1c_1x1_Conv2D, y=MobilenetV1_Logits_Conv2d_1c_1x1_biases)
模式二:
MobilenetV1_Logits_Conv2d_1c_1x1_biases = paddle.static.create_parameter(dtype='float32', shape=[1001], name='MobilenetV1_Logits_Conv2d_1c_1x1_biases', default_initializer=paddle.nn.initializer.Constant(value=0.0))
MobilenetV1_Logits_Conv2d_1c_1x1_Conv2D = paddle.nn.functional.conv2d(x=conv2d_transpose_14, weight=MobilenetV1_Logits_Conv2d_1c_1x1_weights, bias=None, stride=[1, 1], dilation=[1, 1], padding='SAME')
MobilenetV1_Logits_Conv2d_1c_1x1_BiasAdd = paddle.add(x=MobilenetV1_Logits_Conv2d_1c_1x1_Conv2D, y=MobilenetV1_Logits_Conv2d_1c_1x1_biases)
"""
def
gen_name
(
id
):
return
"x"
+
str
(
id
)
pattern
=
PaddleGraph
(
graph_type
=
"dygraph"
)
pattern
.
add_layer
(
"paddle.static.create_parameter"
,
inputs
=
{},
outputs
=
[
gen_name
(
0
)])
pattern
.
add_layer
(
kernel
=
"paddle.transpose"
,
inputs
=
{
"x"
:
"conv-input-0"
},
outputs
=
[
gen_name
(
1
)],
perm
=
[
0
,
3
,
1
,
2
])
pattern
.
add_layer
(
kernel
=
"paddle.nn.functional.conv2d"
,
inputs
=
{
"input"
:
gen_name
(
1
),
"weight"
:
"conv-input-1"
},
outputs
=
[
gen_name
(
2
)])
pattern
.
add_layer
(
kernel
=
"paddle.transpose"
,
inputs
=
{
"x"
:
gen_name
(
2
)},
outputs
=
[
gen_name
(
2
)],
perm
=
[
0
,
2
,
3
,
1
])
pattern
.
add_layer
(
kernel
=
"paddle.add"
,
inputs
=
{
"x"
:
gen_name
(
2
),
"y"
:
gen_name
(
0
)},
outputs
=
[
gen_name
(
3
)])
pattern
.
build
(
inputs
=
{
"input-0"
:
"conv-input-0"
,
"input-1"
:
"conv-input-1"
})
self
.
patterns
.
append
(
pattern
)
pattern
=
PaddleGraph
(
graph_type
=
"dygraph"
)
pattern
.
add_layer
(
"paddle.static.create_parameter"
,
inputs
=
{},
outputs
=
[
gen_name
(
0
)])
pattern
.
add_layer
(
kernel
=
"paddle.nn.functional.conv2d"
,
inputs
=
{
"input"
:
"conv-input-0"
,
"weight"
:
"conv-input-1"
},
outputs
=
[
gen_name
(
1
)])
pattern
.
add_layer
(
kernel
=
"paddle.add"
,
inputs
=
{
"x"
:
gen_name
(
1
),
"y"
:
gen_name
(
0
)},
outputs
=
[
gen_name
(
2
)])
pattern
.
build
(
inputs
=
{
"input-0"
:
"conv-input-0"
,
"input-1"
:
"conv-input-1"
})
self
.
patterns
.
append
(
pattern
)
def
insert_new_layer
(
self
,
graph
,
parameters
,
matches
):
self
.
gen_new_layer
(
matches
,
graph
)
matches_copy
=
copy
.
deepcopy
(
matches
)
for
layer_id
,
layer
in
matches_copy
.
items
():
if
layer
.
kernel
not
in
[
"paddle.add"
]:
matches
.
pop
(
layer_id
)
def
gen_new_layer
(
self
,
matches
,
graph
):
is_transpose
=
False
for
layer_id
,
layer
in
matches
.
items
():
if
layer
.
kernel
==
"paddle.static.create_parameter"
:
bias_name
=
layer
.
attrs
[
"name"
][
1
:
-
1
]
if
layer
.
kernel
==
"paddle.transpose"
:
is_transpose
=
True
if
layer
.
kernel
==
"paddle.add"
:
output_name
=
layer
.
outputs
[
0
]
if
layer
.
kernel
==
"paddle.nn.functional.conv2d"
:
conv_id
=
layer_id
for
layer_id
,
layer
in
matches
.
items
():
if
layer
.
kernel
==
"paddle.nn.functional.conv2d"
:
layer
.
inputs
[
"bias"
]
=
bias_name
layer
.
attrs
.
pop
(
"bias"
)
if
not
is_transpose
:
layer
.
outputs
[
0
]
=
output_name
if
layer
.
kernel
==
"paddle.transpose"
:
if
conv_id
in
graph
.
edges_in
[
layer_id
]:
layer
.
outputs
[
0
]
=
output_name
x2paddle/optimizer/fusion/static/prelu_fuse_pass.py
0 → 100644
浏览文件 @
fb07475f
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from
x2paddle.optimizer.pass_
import
Pass
from
x2paddle.optimizer.fusion.static
import
StaticPReLUFuser
from
x2paddle.optimizer.pass_manager
import
pass_register
@
pass_register
class
StaticPReLUFusePass
(
Pass
):
name
=
"static_prelu_fuse_pass"
def
__init__
(
self
):
Pass
.
__init__
(
self
)
def
apply
(
self
,
graph
):
fuser
=
StaticPReLUFuser
()
fuser
.
operate
(
graph
,
match_kind
=
"edge"
)
# 用于注册
static_prelu_fuse_pass
=
StaticPReLUFusePass
()
\ No newline at end of file
x2paddle/optimizer/fusion/static/prelu_fuser.py
0 → 100644
浏览文件 @
fb07475f
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import
copy
import
numpy
as
np
from
collections
import
OrderedDict
from
x2paddle.optimizer.pattern_matcher
import
FuseBase
from
x2paddle.core.program
import
PaddleGraph
,
PaddleLayer
from
x2paddle.core.util
import
*
class
StaticPReLUFuser
(
FuseBase
):
def
__init__
(
self
):
super
(
StaticPReLUFuser
,
self
).
__init__
(
graph_type
=
"static"
)
def
build_pattern
(
self
):
""" 描述需要替换的prelu图结构。
prelu层模式python实现代码示例:
conv4_alphas = paddle.static.create_parameter(dtype='float32', shape=[128], name='conv4_alphas', default_initializer=paddle.nn.initializer.Constant(value=0.0))
conv4_mul_1_y = paddle.full(dtype='float32', shape=[1], fill_value=0.5)
conv4_Relu = paddle.nn.functional.relu(x=conv4_BiasAdd)
conv4_Abs = paddle.abs(x=conv4_BiasAdd)
conv4_sub = fluid.layers.elementwise_sub(x=conv4_BiasAdd, y=conv4_Abs)
conv4_mul = paddle.multiply(x=conv4_alphas, y=conv4_sub)
conv4_mul_1 = paddle.multiply(x=conv4_mul, y=conv4_mul_1_y)
conv4_add = paddle.add(x=conv4_Relu, y=conv4_mul_1)
"""
def
gen_name
(
id
):
return
"x"
+
str
(
id
)
self
.
pattern
.
add_layer
(
"paddle.static.create_parameter"
,
inputs
=
{},
outputs
=
[
gen_name
(
0
)])
self
.
pattern
.
add_layer
(
"paddle.full"
,
inputs
=
{},
outputs
=
[
gen_name
(
1
)],
shape
=
[
1
],
fill_value
=
0.5
)
self
.
pattern
.
add_layer
(
"paddle.nn.functional.relu"
,
inputs
=
{
"x"
:
"prelu-input-0"
},
outputs
=
[
gen_name
(
2
)])
self
.
pattern
.
add_layer
(
"paddle.abs"
,
inputs
=
{
"x"
:
"prelu-input-0"
},
outputs
=
[
gen_name
(
3
)])
self
.
pattern
.
add_layer
(
"fluid.layers.elementwise_sub"
,
inputs
=
{
"x"
:
"prelu-input-0"
,
"y"
:
gen_name
(
3
)},
outputs
=
[
gen_name
(
4
)])
self
.
pattern
.
add_layer
(
"paddle.multiply"
,
inputs
=
{
"x"
:
gen_name
(
0
),
"y"
:
gen_name
(
4
)},
outputs
=
[
gen_name
(
5
)])
self
.
pattern
.
add_layer
(
"paddle.multiply"
,
inputs
=
{
"x"
:
gen_name
(
5
),
"y"
:
gen_name
(
1
)},
outputs
=
[
gen_name
(
6
)])
self
.
pattern
.
add_layer
(
"paddle.add"
,
inputs
=
{
"x"
:
gen_name
(
2
),
"y"
:
gen_name
(
6
)},
outputs
=
[
gen_name
(
7
)])
self
.
pattern
.
build
(
inputs
=
{
"input-0"
:
"prelu-input-0"
,
})
def
insert_new_layer
(
self
,
graph
,
parameters
,
matches
):
new_layers
,
last_layer_id
=
self
.
gen_new_layer
(
matches
,
parameters
,
graph
)
matches_copy
=
copy
.
deepcopy
(
matches
)
for
layer_id
,
layer
in
matches_copy
.
items
():
for
i
in
range
(
4
):
if
layer_id
==
new_layers
[
i
].
id
:
matches
.
pop
(
new_layers
[
i
].
id
)
prefix_layers
=
OrderedDict
()
mid_layers
=
OrderedDict
()
suffix_layers
=
OrderedDict
()
is_need_id
=
False
for
layer_id
,
layer
in
graph
.
layers
.
items
():
if
is_need_id
:
suffix_layers
[
layer_id
]
=
layer
else
:
if
layer_id
==
last_layer_id
:
for
i
in
range
(
4
):
mid_layers
[
new_layers
[
i
].
id
]
=
new_layers
[
i
]
is_need_id
=
True
prefix_layers
[
layer_id
]
=
layer
prefix_layers
.
update
(
mid_layers
)
prefix_layers
.
update
(
suffix_layers
)
graph
.
layers
=
prefix_layers
def
gen_new_layer
(
self
,
matches
,
parameters
,
graph
):
layer_id_list
=
list
(
matches
.
keys
())
layer_id_list
.
sort
(
key
=
int
)
for
layer_id
,
layer
in
matches
.
items
():
if
layer
.
kernel
==
"paddle.nn.functional.relu"
:
input_name
=
layer
.
inputs
[
"x"
]
if
layer
.
kernel
==
"paddle.static.create_parameter"
:
param_layer
=
layer
param_name
=
layer
.
outputs
[
0
]
if
layer
.
kernel
==
"paddle.add"
:
output_name
=
layer
.
outputs
[
0
]
transpose0
=
PaddleLayer
(
id
=
layer_id_list
[
-
1
]
+
"_1"
,
kernel
=
"paddle.transpose"
,
inputs
=
{
"x"
:
input_name
},
outputs
=
[
"{}_transpose_for_prelu"
.
format
(
input_name
)],
perm
=
[
0
,
3
,
1
,
2
])
param
=
parameters
[
param_name
]
c
=
param
.
shape
[
0
]
prelu
=
PaddleLayer
(
id
=
layer_id_list
[
-
1
]
+
"_2"
,
kernel
=
"paddle.nn.functional.prelu"
,
inputs
=
{
"x"
:
"{}_transpose_for_prelu"
.
format
(
input_name
),
"weight"
:
param_name
},
outputs
=
[
"{}_prelu"
.
format
(
input_name
)])
transpose1
=
PaddleLayer
(
id
=
layer_id_list
[
-
1
]
+
"_3"
,
kernel
=
"paddle.transpose"
,
inputs
=
{
"x"
:
"{}_prelu"
.
format
(
input_name
)},
outputs
=
[
output_name
],
perm
=
[
0
,
2
,
3
,
1
])
return
[
param_layer
,
transpose0
,
prelu
,
transpose1
],
layer_id_list
[
-
1
]
x2paddle/optimizer/fusion/static/tf_batchnorm_fuse_pass.py
0 → 100644
浏览文件 @
fb07475f
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from
x2paddle.optimizer.pass_
import
Pass
from
x2paddle.optimizer.fusion.static
import
StaticTFBatchNormFuser
from
x2paddle.optimizer.pass_manager
import
pass_register
@
pass_register
class
StaticTFBatchNormFusePass
(
Pass
):
name
=
"static_tf_batchnorm_fuse_pass"
def
__init__
(
self
):
Pass
.
__init__
(
self
)
def
apply
(
self
,
graph
):
fuser
=
StaticTFBatchNormFuser
()
fuser
.
operate
(
graph
,
match_kind
=
"edge"
)
# 用于注册
static_tf_batchnorm_fuse_pass
=
StaticTFBatchNormFusePass
()
\ No newline at end of file
x2paddle/optimizer/fusion/static/tf_batchnorm_fuser.py
0 → 100644
浏览文件 @
fb07475f
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import
copy
import
numpy
as
np
from
collections
import
OrderedDict
from
x2paddle.optimizer.pattern_matcher
import
FuseBase
from
x2paddle.core.program
import
PaddleGraph
,
PaddleLayer
from
x2paddle.core.util
import
*
class
StaticTFBatchNormFuser
(
FuseBase
):
def
__init__
(
self
):
super
(
StaticTFBatchNormFuser
,
self
).
__init__
(
graph_type
=
"static"
)
self
.
patterns
=
list
()
def
build_pattern
(
self
):
""" 描述需要替换的batchnorm图结构。
batchnorm层模式python实现代码示例:
"""
def
gen_name
(
id
):
return
"x"
+
str
(
id
)
pattern
=
PaddleGraph
(
graph_type
=
"dygraph"
)
pattern
.
add_layer
(
"paddle.static.create_parameter"
,
inputs
=
{},
outputs
=
[
gen_name
(
0
)])
pattern
.
add_layer
(
"paddle.full"
,
inputs
=
{},
outputs
=
[
gen_name
(
1
)],
shape
=
[
1
])
pattern
.
add_layer
(
"paddle.add"
,
inputs
=
{
"x"
:
gen_name
(
0
),
"y"
:
gen_name
(
1
)},
outputs
=
[
gen_name
(
2
)])
pattern
.
add_layer
(
"paddle.rsqrt"
,
inputs
=
{
"x"
:
gen_name
(
2
)},
outputs
=
[
gen_name
(
3
)])
pattern
.
add_layer
(
"paddle.static.create_parameter"
,
inputs
=
{},
outputs
=
[
gen_name
(
4
)])
pattern
.
add_layer
(
"paddle.multiply"
,
inputs
=
{
"x"
:
gen_name
(
3
),
"y"
:
gen_name
(
4
)},
outputs
=
[
gen_name
(
5
)])
pattern
.
add_layer
(
"paddle.static.create_parameter"
,
inputs
=
{},
outputs
=
[
gen_name
(
6
)])
pattern
.
add_layer
(
"paddle.multiply"
,
inputs
=
{
"x"
:
gen_name
(
6
),
"y"
:
gen_name
(
5
)},
outputs
=
[
gen_name
(
7
)])
pattern
.
add_layer
(
"paddle.static.create_parameter"
,
inputs
=
{},
outputs
=
[
gen_name
(
8
)])
pattern
.
add_layer
(
"fluid.layers.elementwise_sub"
,
inputs
=
{
"x"
:
gen_name
(
8
),
"y"
:
gen_name
(
7
)},
outputs
=
[
gen_name
(
9
)])
pattern
.
add_layer
(
"paddle.multiply"
,
inputs
=
{
"x"
:
"bn-input-0"
,
"y"
:
gen_name
(
5
)},
outputs
=
[
gen_name
(
10
)])
pattern
.
add_layer
(
"paddle.add"
,
inputs
=
{
"x"
:
gen_name
(
10
),
"y"
:
gen_name
(
9
)},
outputs
=
[
gen_name
(
11
)])
pattern
.
build
(
inputs
=
{
"input-0"
:
"bn-input-0"
,
})
self
.
patterns
.
append
(
pattern
)
pattern
=
PaddleGraph
(
graph_type
=
"dygraph"
)
pattern
.
add_layer
(
"paddle.static.create_parameter"
,
inputs
=
{},
outputs
=
[
gen_name
(
0
)])
pattern
.
add_layer
(
"paddle.full"
,
inputs
=
{},
outputs
=
[
gen_name
(
1
)],
shape
=
[
1
])
pattern
.
add_layer
(
"paddle.add"
,
inputs
=
{
"x"
:
gen_name
(
0
),
"y"
:
gen_name
(
1
)},
outputs
=
[
gen_name
(
2
)])
pattern
.
add_layer
(
"paddle.rsqrt"
,
inputs
=
{
"x"
:
gen_name
(
2
)},
outputs
=
[
gen_name
(
3
)])
pattern
.
add_layer
(
"paddle.static.create_parameter"
,
inputs
=
{},
outputs
=
[
gen_name
(
4
)])
pattern
.
add_layer
(
"paddle.multiply"
,
inputs
=
{
"x"
:
gen_name
(
3
),
"y"
:
gen_name
(
4
)},
outputs
=
[
gen_name
(
5
)])
pattern
.
add_layer
(
"paddle.multiply"
,
inputs
=
{
"x"
:
"bn-input-0"
,
"y"
:
gen_name
(
5
)},
outputs
=
[
gen_name
(
10
)])
pattern
.
add_layer
(
"paddle.static.create_parameter"
,
inputs
=
{},
outputs
=
[
gen_name
(
6
)])
pattern
.
add_layer
(
"paddle.multiply"
,
inputs
=
{
"x"
:
gen_name
(
6
),
"y"
:
gen_name
(
5
)},
outputs
=
[
gen_name
(
7
)])
pattern
.
add_layer
(
"paddle.static.create_parameter"
,
inputs
=
{},
outputs
=
[
gen_name
(
8
)])
pattern
.
add_layer
(
"fluid.layers.elementwise_sub"
,
inputs
=
{
"x"
:
gen_name
(
8
),
"y"
:
gen_name
(
7
)},
outputs
=
[
gen_name
(
9
)])
pattern
.
add_layer
(
"paddle.add"
,
inputs
=
{
"x"
:
gen_name
(
10
),
"y"
:
gen_name
(
9
)},
outputs
=
[
gen_name
(
11
)])
pattern
.
build
(
inputs
=
{
"input-0"
:
"bn-input-0"
,
})
self
.
patterns
.
append
(
pattern
)
def
insert_new_layer
(
self
,
graph
,
parameters
,
matches
):
new_layers
,
last_layer_id
=
self
.
gen_new_layer
(
matches
,
parameters
,
graph
)
matches_copy
=
copy
.
deepcopy
(
matches
)
for
layer_id
,
layer
in
matches_copy
.
items
():
for
i
in
range
(
7
):
if
layer_id
==
new_layers
[
i
].
id
:
matches
.
pop
(
new_layers
[
i
].
id
)
prefix_layers
=
OrderedDict
()
mid_layers
=
OrderedDict
()
suffix_layers
=
OrderedDict
()
is_need_id
=
False
for
layer_id
,
layer
in
graph
.
layers
.
items
():
if
is_need_id
:
suffix_layers
[
layer_id
]
=
layer
else
:
if
layer_id
==
last_layer_id
:
for
i
in
range
(
7
):
mid_layers
[
new_layers
[
i
].
id
]
=
new_layers
[
i
]
is_need_id
=
True
prefix_layers
[
layer_id
]
=
layer
prefix_layers
.
update
(
mid_layers
)
prefix_layers
.
update
(
suffix_layers
)
graph
.
layers
=
prefix_layers
def
gen_new_layer
(
self
,
matches
,
parameters
,
graph
):
layer_id_list
=
list
(
matches
.
keys
())
layer_id_list
.
sort
(
key
=
int
)
for
layer_id
,
layer
in
matches
.
items
():
if
layer
.
kernel
==
"paddle.full"
:
full_layer
=
layer
out_layer_id
=
graph
.
edges_out
[
layer_id
][
0
]
if
matches
[
out_layer_id
].
kernel
==
"paddle.add"
:
var_layer_id
=
graph
.
edges_in
[
out_layer_id
][
0
]
var_layer
=
matches
[
var_layer_id
]
if
layer
.
kernel
==
"paddle.rsqrt"
:
out_layer_id
=
graph
.
edges_out
[
layer_id
][
0
]
if
matches
[
out_layer_id
].
kernel
==
"paddle.multiply"
:
gamma_layer_id
=
graph
.
edges_in
[
out_layer_id
][
1
]
gamma_layer
=
matches
[
gamma_layer_id
]
if
layer
.
kernel
==
"fluid.layers.elementwise_sub"
:
in_layer_id
=
graph
.
edges_in
[
layer_id
][
0
]
beta_layer
=
matches
[
in_layer_id
]
in_layer_id
=
graph
.
edges_in
[
layer_id
][
1
]
in_layer_id
=
graph
.
edges_in
[
in_layer_id
][
0
]
mean_layer
=
matches
[
in_layer_id
]
out_layer_id
=
graph
.
edges_out
[
layer_id
][
0
]
add_layer
=
matches
[
out_layer_id
]
if
layer
.
kernel
==
"paddle.multiply"
:
in_layer_id
=
graph
.
edges_in
[
layer_id
][
1
]
mul_layer
=
matches
[
in_layer_id
]
if
mul_layer
.
kernel
==
"paddle.multiply"
:
in_layer_id
=
graph
.
edges_in
[
layer_id
][
0
]
if
in_layer_id
not
in
matches
:
input_name
=
layer
.
inputs
[
"x"
]
transpose0
=
PaddleLayer
(
id
=
layer_id_list
[
-
1
]
+
"_1"
,
kernel
=
"paddle.transpose"
,
inputs
=
{
"x"
:
input_name
},
outputs
=
[
"{}_transpose_for_bn"
.
format
(
input_name
)],
perm
=
[
0
,
3
,
1
,
2
])
params
=
parameters
[
gamma_layer
.
outputs
[
0
]]
c
=
params
.
shape
[
0
]
bn
=
PaddleLayer
(
id
=
layer_id_list
[
-
1
]
+
"_2"
,
kernel
=
"paddle.nn.functional.batch_norm"
,
inputs
=
{
"x"
:
"{}_transpose_for_bn"
.
format
(
input_name
),
"running_mean"
:
mean_layer
.
outputs
[
0
],
"running_var"
:
var_layer
.
outputs
[
0
],
"weight"
:
gamma_layer
.
outputs
[
0
],
"bias"
:
beta_layer
.
outputs
[
0
]},
outputs
=
[
"{}_bn"
.
format
(
input_name
)],
epsilon
=
full_layer
.
attrs
[
"fill_value"
])
transpose1
=
PaddleLayer
(
id
=
layer_id_list
[
-
1
]
+
"_3"
,
kernel
=
"paddle.transpose"
,
inputs
=
{
"x"
:
"{}_bn"
.
format
(
input_name
)},
outputs
=
add_layer
.
outputs
,
perm
=
[
0
,
2
,
3
,
1
])
mean_layer
.
id
=
layer_id_list
[
-
1
]
+
"_01"
var_layer
.
id
=
layer_id_list
[
-
1
]
+
"_02"
gamma_layer
.
id
=
layer_id_list
[
-
1
]
+
"_03"
beta_layer
.
id
=
layer_id_list
[
-
1
]
+
"_04"
return
[
mean_layer
,
var_layer
,
gamma_layer
,
beta_layer
,
transpose0
,
bn
,
transpose1
],
layer_id_list
[
-
1
]
x2paddle/optimizer/optimizer.py
浏览文件 @
fb07475f
...
...
@@ -16,13 +16,13 @@ from x2paddle.optimizer.pass_manager import PassManager
from
x2paddle.optimizer.fusion.dygraph
import
*
from
x2paddle.optimizer.fusion.static
import
*
from
x2paddle.optimizer.elimination.dygraph
import
*
from
x2paddle.optimizer.elimination.static
import
*
class
GraphOptimizer
(
object
):
def
__init__
(
self
,
source_frame
,
paddle_type
=
"dygraph"
,
jit_type
=
"trace"
):
if
source_frame
==
"pytorch"
:
if
jit_type
==
"trace"
:
self
.
passes
=
[
"dygraph_constant_fuse_pass"
,
"trace_fc_fuse_pass"
]
self
.
passes
=
[
"trace_fc_fuse_pass"
]
else
:
self
.
passes
=
[
"dygraph_constant_fuse_pass"
,
...
...
@@ -39,12 +39,20 @@ class GraphOptimizer(object):
else
:
self
.
passes
=
[
"static_bn_scale_fuse_pass"
]
elif
source_frame
==
"tf"
:
if
paddle_type
==
"dygraph"
:
self
.
passes
=
[
"dygraph_conv2d_add_fuse_pass"
,
"dygraph_tf_batchnorm_fuse_pass"
,
"dygraph_prelu_fuse_pass"
,
"transpose_eliminate_pass"
]
else
:
self
.
passes
=
[
"static_conv2d_add_fuse_pass"
,
"static_tf_batchnorm_fuse_pass"
,
"static_prelu_fuse_pass"
,
"static_transpose_eliminate_pass"
]
else
:
self
.
passes
=
[]
...
...
x2paddle/optimizer/tensorflow/__init__.py
已删除
100644 → 0
浏览文件 @
58f5dc51
x2paddle/optimizer/tensorflow/batch_norm.py
已删除
100644 → 0
浏览文件 @
58f5dc51
import
copy
from
collections
import
OrderedDict
from
x2paddle.core.program
import
PaddleLayer
class
BatchNormOpt
:
def
__init__
(
self
):
pass
def
run
(
self
,
graph
):
print
(
"Optimize: BatchNormOpt..."
)
layers
=
copy
.
deepcopy
(
graph
.
layers
)
for
layer_id
,
layer
in
layers
.
items
():
if
layer
.
kernel
!=
"fluid.layers.elementwise_add"
:
continue
axis
=
layer
.
attrs
.
get
(
'axis'
,
-
1
)
if
axis
!=
-
1
and
axis
!=
3
:
continue
input_ids0
=
graph
.
edges_in
[
layer_id
]
mul_layer0
=
graph
.
layers
[
input_ids0
[
0
]]
sub_layer0
=
graph
.
layers
[
input_ids0
[
1
]]
if
mul_layer0
.
kernel
!=
"fluid.layers.elementwise_mul"
:
continue
if
sub_layer0
.
kernel
!=
"fluid.layers.elementwise_sub"
:
continue
axis
=
mul_layer0
.
attrs
.
get
(
'axis'
,
-
1
)
if
axis
!=
-
1
and
axis
!=
3
:
continue
axis
=
sub_layer0
.
attrs
.
get
(
'axis'
,
-
1
)
if
axis
!=
-
1
and
axis
!=
0
:
continue
if
len
(
graph
.
edges_out
.
get
(
input_ids0
[
0
],
[]))
!=
1
:
continue
if
len
(
graph
.
edges_out
.
get
(
input_ids0
[
1
],
[]))
!=
1
:
continue
input_ids1
=
graph
.
edges_in
[
input_ids0
[
0
]]
nhwc_input
=
graph
.
layers
[
input_ids1
[
0
]]
mul_layer1
=
graph
.
layers
[
input_ids1
[
1
]]
if
mul_layer1
.
kernel
!=
"fluid.layers.elementwise_mul"
:
continue
axis
=
mul_layer1
.
attrs
.
get
(
'axis'
,
-
1
)
if
axis
!=
-
1
and
axis
!=
0
:
continue
if
len
(
graph
.
edges_out
.
get
(
input_ids1
[
1
],
[]))
!=
2
:
continue
input_ids2
=
graph
.
edges_in
[
input_ids0
[
1
]]
beta
=
graph
.
layers
[
input_ids2
[
0
]]
mul_layer2
=
graph
.
layers
[
input_ids2
[
1
]]
if
beta
.
kernel
!=
"fluid.layers.create_parameter"
:
continue
axis
=
mul_layer2
.
attrs
.
get
(
'axis'
,
-
1
)
if
axis
!=
-
1
and
axis
!=
0
:
continue
if
len
(
graph
.
edges_out
.
get
(
input_ids2
[
0
],
[]))
!=
1
:
continue
if
len
(
graph
.
edges_out
.
get
(
input_ids2
[
1
],
[]))
!=
1
:
continue
if
beta
.
outputs
[
0
]
not
in
graph
.
parameters
:
continue
beta_shape
=
graph
.
parameters
[
beta
.
outputs
[
0
]].
shape
if
len
(
beta_shape
)
!=
1
:
continue
input_ids3
=
graph
.
edges_in
[
input_ids2
[
1
]]
mean
=
graph
.
layers
[
input_ids3
[
0
]]
mul_layer3
=
graph
.
layers
[
input_ids3
[
1
]]
if
mean
.
kernel
!=
"fluid.layers.create_parameter"
:
continue
axis
=
mul_layer3
.
attrs
.
get
(
'axis'
,
-
1
)
if
axis
!=
-
1
and
axis
!=
0
:
continue
if
len
(
graph
.
edges_out
.
get
(
input_ids3
[
0
],
[]))
!=
1
:
continue
if
len
(
graph
.
edges_out
.
get
(
input_ids3
[
1
],
[]))
!=
2
:
continue
if
mul_layer3
.
id
!=
mul_layer1
.
id
:
continue
if
mean
.
outputs
[
0
]
not
in
graph
.
parameters
:
continue
mean_shape
=
graph
.
parameters
[
mean
.
outputs
[
0
]].
shape
if
mean_shape
!=
beta_shape
:
continue
input_ids4
=
graph
.
edges_in
[
input_ids3
[
1
]]
rsqrt_layer
=
graph
.
layers
[
input_ids4
[
0
]]
gamma
=
graph
.
layers
[
input_ids4
[
1
]]
if
rsqrt_layer
.
kernel
!=
"fluid.layers.rsqrt"
:
continue
if
gamma
.
kernel
!=
"fluid.layers.create_parameter"
:
continue
if
len
(
graph
.
edges_out
.
get
(
input_ids4
[
0
],
[]))
!=
1
:
continue
if
len
(
graph
.
edges_out
.
get
(
input_ids4
[
1
],
[]))
!=
1
:
continue
if
gamma
.
outputs
[
0
]
not
in
graph
.
parameters
:
continue
gamma_shape
=
graph
.
parameters
[
gamma
.
outputs
[
0
]].
shape
if
gamma_shape
!=
beta_shape
:
continue
input_ids5
=
graph
.
edges_in
[
input_ids4
[
0
]]
add_layer
=
graph
.
layers
[
input_ids5
[
0
]]
if
add_layer
.
kernel
!=
"fluid.layers.elementwise_add"
:
continue
axis
=
add_layer
.
attrs
.
get
(
'axis'
,
-
1
)
if
axis
!=
-
1
and
axis
!=
0
:
continue
if
len
(
graph
.
edges_out
.
get
(
input_ids5
[
0
],
[]))
!=
1
:
continue
input_ids6
=
graph
.
edges_in
[
input_ids5
[
0
]]
variance
=
graph
.
layers
[
input_ids6
[
0
]]
other
=
graph
.
layers
[
input_ids6
[
1
]]
if
variance
.
kernel
!=
"fluid.layers.create_parameter"
:
continue
if
other
.
kernel
!=
"fluid.layers.fill_constant"
:
continue
if
len
(
graph
.
edges_out
.
get
(
input_ids6
[
0
],
[]))
!=
1
:
continue
if
len
(
graph
.
edges_out
.
get
(
input_ids6
[
1
],
[]))
!=
1
:
continue
if
variance
.
outputs
[
0
]
not
in
graph
.
parameters
:
continue
variance_shape
=
graph
.
parameters
[
variance
.
outputs
[
0
]].
shape
if
variance_shape
!=
beta_shape
:
continue
ids
=
set
([
layer_id
,
mul_layer0
.
id
,
sub_layer0
.
id
,
mul_layer1
.
id
,
beta
.
id
,
mul_layer2
.
id
,
mean
.
id
,
mul_layer2
.
id
,
rsqrt_layer
.
id
,
gamma
.
id
,
add_layer
.
id
,
variance
.
id
,
other
.
id
])
for
id
in
ids
:
del
graph
.
layers
[
id
]
if
id
in
graph
.
edges_in
:
del
graph
.
edges_in
[
id
]
if
id
in
graph
.
edges_out
:
del
graph
.
edges_out
[
id
]
copy_layers
=
copy
.
deepcopy
(
graph
.
layers
)
graph
.
layers
=
OrderedDict
()
for
k
,
v
in
copy_layers
.
items
():
if
k
!=
nhwc_input
.
id
:
graph
.
layers
[
k
]
=
v
continue
graph
.
layers
[
k
]
=
v
transpose0
=
PaddleLayer
(
id
=
'{}_1'
.
format
(
k
),
kernel
=
"fluid.layers.transpose"
,
inputs
=
{
"x"
:
v
.
outputs
[
0
]},
outputs
=
[
"transpose_for_bn"
],
perm
=
[
0
,
3
,
1
,
2
])
bn
=
PaddleLayer
(
id
=
'{}_2'
.
format
(
k
),
kernel
=
"fluid.layers.batch_norm"
,
inputs
=
{
"input"
:
"transpose_for_bn"
},
outputs
=
layer
.
outputs
,
epsilon
=
other
.
attrs
[
"value"
],
param_attr
=
"'{}'"
.
format
(
gamma
.
outputs
[
0
]),
bias_attr
=
"'{}'"
.
format
(
beta
.
outputs
[
0
]),
moving_mean_name
=
"'{}'"
.
format
(
mean
.
outputs
[
0
]),
moving_variance_name
=
"'{}'"
.
format
(
variance
.
outputs
[
0
]))
transpose1
=
PaddleLayer
(
id
=
layer_id
,
kernel
=
"fluid.layers.transpose"
,
inputs
=
{
"x"
:
layer
.
outputs
[
0
]},
outputs
=
layer
.
outputs
,
perm
=
[
0
,
2
,
3
,
1
])
graph
.
layers
[
transpose0
.
id
]
=
transpose0
graph
.
layers
[
bn
.
id
]
=
bn
graph
.
layers
[
transpose1
.
id
]
=
transpose1
graph
.
build
()
x2paddle/optimizer/tensorflow/bias.py
已删除
100644 → 0
浏览文件 @
58f5dc51
import
copy
class
BiasOpt
:
def
__init__
(
self
):
self
.
conv_layers
=
[
'fluid.layers.conv2d'
,
'fluid.layers.conv2d_transpose'
]
def
run
(
self
,
graph
):
print
(
"Optimize: BiasOpt..."
)
layers
=
copy
.
deepcopy
(
graph
.
layers
)
for
layer_id
,
layer
in
layers
.
items
():
if
layer
.
kernel
in
self
.
conv_layers
or
layer
.
kernel
==
"fluid.layers.transpose"
:
if
len
(
graph
.
edges_out
.
get
(
layer_id
,
[]))
>
1
:
continue
if
layer
.
outputs
[
0
]
in
graph
.
outputs
:
continue
out_layer_id
=
graph
.
edges_out
[
layer_id
][
0
]
if
graph
.
layers
[
out_layer_id
].
kernel
!=
"fluid.layers.elementwise_add"
:
continue
if
graph
.
layers
[
out_layer_id
].
attrs
.
get
(
'axis'
,
-
1
)
!=
-
1
:
continue
in_layer_id
=
graph
.
edges_in
[
out_layer_id
]
bias_layer_id
=
in_layer_id
[
1
-
in_layer_id
.
index
(
layer_id
)]
if
graph
.
layers
[
bias_layer_id
].
kernel
!=
"fluid.layers.create_parameter"
:
continue
bias_layer
=
graph
.
layers
[
bias_layer_id
]
if
len
(
bias_layer
.
attrs
[
'shape'
])
!=
1
:
continue
if
len
(
graph
.
edges_out
[
bias_layer_id
])
!=
1
:
continue
if
layer
.
kernel
==
"fluid.layers.transpose"
:
if
layer
.
attrs
[
'perm'
]
!=
[
0
,
2
,
3
,
1
]:
continue
in_layer_id
=
graph
.
edges_in
[
layer_id
][
0
]
if
graph
.
layers
[
in_layer_id
].
kernel
not
in
self
.
conv_layers
:
continue
if
graph
.
layers
[
in_layer_id
].
attrs
[
'bias_attr'
]
!=
False
:
continue
if
graph
.
layers
[
in_layer_id
].
outputs
[
0
]
in
graph
.
outputs
:
continue
if
len
(
graph
.
edges_out
[
in_layer_id
])
!=
1
:
continue
graph
.
layers
[
in_layer_id
].
attrs
[
'bias_attr'
]
=
bias_layer
.
attrs
[
'name'
]
else
:
graph
.
layers
[
layer_id
].
attrs
[
'bias_attr'
]
=
bias_layer
.
attrs
[
'name'
]
bias_add_outs
=
graph
.
edges_out
.
get
(
out_layer_id
,
[])
bias_add_output
=
graph
.
layers
[
out_layer_id
].
outputs
[
0
]
graph
.
del_layer
(
bias_layer_id
)
graph
.
del_layer
(
out_layer_id
)
for
out
in
bias_add_outs
:
for
k
,
v
in
graph
.
layers
[
out
].
inputs
.
items
():
if
v
==
layer
.
outputs
[
0
]:
graph
.
layers
[
out
].
inputs
[
k
]
=
bias_add_output
graph
.
layers
[
layer_id
].
outputs
[
0
]
=
bias_add_output
if
layer
.
kernel
==
"fluid.layers.transpose"
:
in_layer_id
=
graph
.
edges_in
[
layer_id
][
0
]
graph
.
layers
[
in_layer_id
].
outputs
[
0
]
=
bias_add_output
graph
.
layers
[
layer_id
].
inputs
[
'x'
]
=
bias_add_output
x2paddle/optimizer/tensorflow/prelu.py
已删除
100644 → 0
浏览文件 @
58f5dc51
import
copy
import
numpy
as
np
from
collections
import
OrderedDict
from
x2paddle.core.program
import
PaddleLayer
from
x2paddle.core.util
import
*
class
PReLUOpt
:
def
__init__
(
self
):
pass
def
run
(
self
,
graph
):
print
(
"Optimize: PReLUOpt..."
)
layers
=
copy
.
deepcopy
(
graph
.
layers
)
for
layer_id
,
layer
in
layers
.
items
():
if
layer
.
kernel
!=
"fluid.layers.elementwise_add"
:
continue
axis
=
layer
.
attrs
.
get
(
'axis'
,
-
1
)
if
axis
!=
-
1
and
axis
!=
3
:
continue
input_ids0
=
graph
.
edges_in
[
layer_id
]
relu_layer0
=
graph
.
layers
[
input_ids0
[
0
]]
mul_layer0
=
graph
.
layers
[
input_ids0
[
1
]]
if
relu_layer0
.
kernel
!=
"fluid.layers.relu"
:
continue
if
mul_layer0
.
kernel
!=
"fluid.layers.elementwise_mul"
:
continue
axis
=
mul_layer0
.
attrs
.
get
(
'axis'
,
-
1
)
if
axis
!=
-
1
and
axis
!=
3
:
continue
if
len
(
graph
.
edges_out
.
get
(
input_ids0
[
0
],
[]))
!=
1
:
continue
if
len
(
graph
.
edges_out
.
get
(
input_ids0
[
1
],
[]))
!=
1
:
continue
input_ids1_0
=
graph
.
edges_in
[
input_ids0
[
0
]]
input_ids1_1
=
graph
.
edges_in
[
input_ids0
[
1
]]
fill_layer
=
graph
.
layers
[
input_ids1_1
[
1
]]
mul_layer1
=
graph
.
layers
[
input_ids1_1
[
0
]]
if
fill_layer
.
kernel
!=
"fluid.layers.fill_constant"
:
continue
if
mul_layer1
.
kernel
!=
"fluid.layers.elementwise_mul"
:
continue
axis
=
mul_layer1
.
attrs
.
get
(
'axis'
,
-
1
)
if
axis
!=
-
1
and
axis
!=
0
:
continue
if
len
(
graph
.
edges_out
.
get
(
input_ids1_1
[
1
],
[]))
!=
1
:
continue
if
len
(
graph
.
edges_out
.
get
(
input_ids1_0
[
0
],
[]))
!=
3
:
continue
input_ids2
=
graph
.
edges_in
[
input_ids1_1
[
0
]]
alpha
=
graph
.
layers
[
input_ids2
[
0
]]
sub_layer
=
graph
.
layers
[
input_ids2
[
1
]]
if
alpha
.
kernel
!=
"fluid.layers.create_parameter"
:
continue
if
sub_layer
.
kernel
!=
"fluid.layers.elementwise_sub"
:
continue
axis
=
sub_layer
.
attrs
.
get
(
'axis'
,
-
1
)
if
axis
!=
-
1
and
axis
!=
3
:
continue
if
len
(
graph
.
edges_out
.
get
(
input_ids2
[
0
],
[]))
!=
1
:
continue
if
len
(
graph
.
edges_out
.
get
(
input_ids2
[
1
],
[]))
!=
1
:
continue
if
alpha
.
outputs
[
0
]
not
in
graph
.
parameters
:
continue
input_ids3
=
graph
.
edges_in
[
input_ids2
[
1
]]
add_layer
=
graph
.
layers
[
input_ids3
[
0
]]
abs_layer
=
graph
.
layers
[
input_ids3
[
1
]]
if
abs_layer
.
kernel
!=
"fluid.layers.abs"
:
continue
if
len
(
graph
.
edges_out
.
get
(
input_ids3
[
1
],
[]))
!=
1
:
continue
ids
=
set
([
layer
.
id
,
relu_layer0
.
id
,
mul_layer0
.
id
,
fill_layer
.
id
,
mul_layer1
.
id
,
alpha
.
id
,
sub_layer
.
id
,
abs_layer
.
id
])
for
id
in
ids
:
del
graph
.
layers
[
id
]
if
id
in
graph
.
edges_in
:
del
graph
.
edges_in
[
id
]
if
id
in
graph
.
edges_out
:
del
graph
.
edges_out
[
id
]
copy_layers
=
copy
.
deepcopy
(
graph
.
layers
)
graph
.
layers
=
OrderedDict
()
for
k
,
v
in
copy_layers
.
items
():
if
k
!=
add_layer
.
id
:
graph
.
layers
[
k
]
=
v
continue
graph
.
layers
[
k
]
=
v
transpose0
=
PaddleLayer
(
id
=
'{}_1'
.
format
(
k
),
kernel
=
"fluid.layers.transpose"
,
inputs
=
{
"x"
:
v
.
outputs
[
0
]},
outputs
=
[
"transpose_for_prelu"
],
perm
=
[
0
,
3
,
1
,
2
])
prelu
=
PaddleLayer
(
id
=
'{}_2'
.
format
(
k
),
kernel
=
"fluid.layers.prelu"
,
inputs
=
{
"x"
:
"transpose_for_prelu"
},
outputs
=
layer
.
outputs
,
mode
=
string
(
"channel"
),
param_attr
=
"'{}'"
.
format
(
alpha
.
outputs
[
0
]))
transpose1
=
PaddleLayer
(
id
=
layer_id
,
kernel
=
"fluid.layers.transpose"
,
inputs
=
{
"x"
:
layer
.
outputs
[
0
]},
outputs
=
layer
.
outputs
,
perm
=
[
0
,
2
,
3
,
1
])
graph
.
layers
[
transpose0
.
id
]
=
transpose0
graph
.
layers
[
prelu
.
id
]
=
prelu
graph
.
layers
[
transpose1
.
id
]
=
transpose1
first_axis
=
graph
.
parameters
[
alpha
.
outputs
[
0
]].
shape
[
0
]
graph
.
parameters
[
alpha
.
outputs
[
0
]]
=
np
.
reshape
(
graph
.
parameters
[
alpha
.
outputs
[
0
]],
(
1
,
first_axis
,
1
,
1
))
graph
.
build
()
\ No newline at end of file
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录