Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
PaddlePaddle
X2Paddle
提交
82b1c12a
X
X2Paddle
项目概览
PaddlePaddle
/
X2Paddle
大约 1 年 前同步成功
通知
328
Star
698
Fork
167
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
26
列表
看板
标记
里程碑
合并请求
4
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
X
X2Paddle
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
26
Issue
26
列表
看板
标记
里程碑
合并请求
4
合并请求
4
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
未验证
提交
82b1c12a
编写于
9月 02, 2019
作者:
J
Jason
提交者:
GitHub
9月 02, 2019
浏览文件
操作
浏览文件
下载
差异文件
Merge pull request #104 from jiangjiajun/develop
modify optimizer and fix conflicts
上级
ea9a67b5
e6e5dbb9
变更
5
隐藏空白更改
内联
并排
Showing
5 changed file
with
209 addition
and
45 deletion
+209
-45
x2paddle/convert.py
x2paddle/convert.py
+1
-0
x2paddle/decoder/tf_decoder.py
x2paddle/decoder/tf_decoder.py
+1
-1
x2paddle/op_mapper/tf_op_mapper.py
x2paddle/op_mapper/tf_op_mapper.py
+31
-0
x2paddle/op_mapper/tf_op_mapper_nhwc.py
x2paddle/op_mapper/tf_op_mapper_nhwc.py
+5
-16
x2paddle/optimizer/tf_optimizer.py
x2paddle/optimizer/tf_optimizer.py
+171
-28
未找到文件。
x2paddle/convert.py
浏览文件 @
82b1c12a
...
@@ -113,6 +113,7 @@ def tf2paddle(model_path,
...
@@ -113,6 +113,7 @@ def tf2paddle(model_path,
optimizer
.
strip_graph
()
optimizer
.
strip_graph
()
optimizer
.
merge_activation
()
optimizer
.
merge_activation
()
optimizer
.
merge_bias
()
optimizer
.
merge_bias
()
optimizer
.
make_nchw_input_output
()
optimizer
.
remove_transpose
()
optimizer
.
remove_transpose
()
mapper
.
save_inference_model
(
save_dir
)
mapper
.
save_inference_model
(
save_dir
)
...
...
x2paddle/decoder/tf_decoder.py
浏览文件 @
82b1c12a
...
@@ -51,7 +51,7 @@ class TFGraphNode(GraphNode):
...
@@ -51,7 +51,7 @@ class TFGraphNode(GraphNode):
@
property
@
property
def
dtype
(
self
):
def
dtype
(
self
):
keys
=
[
'dtype'
,
'Tidx'
,
'T'
]
keys
=
[
'dtype'
,
'Tidx'
,
'T'
,
'DstT'
]
for
k
in
keys
:
for
k
in
keys
:
dtype
=
self
.
layer
.
attr
[
k
].
type
dtype
=
self
.
layer
.
attr
[
k
].
type
if
dtype
>
0
:
if
dtype
>
0
:
...
...
x2paddle/op_mapper/tf_op_mapper.py
浏览文件 @
82b1c12a
...
@@ -1170,6 +1170,37 @@ class TFOpMapper(OpMapper):
...
@@ -1170,6 +1170,37 @@ class TFOpMapper(OpMapper):
output
=
node
,
output
=
node
,
param_attr
=
attr
)
param_attr
=
attr
)
def
GreaterEqual
(
self
,
node
):
x
=
self
.
graph
.
get_node
(
node
.
layer
.
input
[
0
],
copy
=
True
)
y
=
self
.
graph
.
get_node
(
node
.
layer
.
input
[
1
],
copy
=
True
)
inputs
=
{
"x"
:
x
,
"y"
:
y
}
node
.
fluid_code
.
add_layer
(
"greater_equal"
,
inputs
=
inputs
,
output
=
node
,
param_attr
=
None
)
def
RandomUniform
(
self
,
node
):
shape
=
self
.
graph
.
get_node
(
node
.
layer
.
input
[
0
],
copy
=
True
)
self
.
add_omit_nodes
(
shape
.
layer_name
,
node
.
layer_name
)
if
shape
.
layer_type
==
"Const"
:
shape
=
shape
.
value
.
tolist
()
else
:
shape
=
self
.
decoder
.
infer_shape_tensor
(
shape
)
if
len
(
shape
)
==
4
and
node
.
tf_data_format
==
"NHWC"
:
shape
=
[
shape
[
i
]
for
i
in
[
0
,
3
,
1
,
2
]]
attr
=
{
"shape"
:
shape
,
"min"
:
0.0
,
"max"
:
0.9999
}
if
shape
[
0
]
<
0
:
input
=
self
.
batch_node
node
.
fluid_code
.
add_layer
(
"uniform_random_batch_size_like"
,
inputs
=
input
,
output
=
node
,
param_attr
=
attr
)
else
:
node
.
fluid_code
.
add_layer
(
"uniform_random"
,
inputs
=
None
,
output
=
node
,
param_attr
=
attr
)
def
SquaredDifference
(
self
,
node
):
def
SquaredDifference
(
self
,
node
):
x
=
self
.
graph
.
get_node
(
node
.
layer
.
input
[
0
],
copy
=
True
)
x
=
self
.
graph
.
get_node
(
node
.
layer
.
input
[
0
],
copy
=
True
)
y
=
self
.
graph
.
get_node
(
node
.
layer
.
input
[
1
],
copy
=
True
)
y
=
self
.
graph
.
get_node
(
node
.
layer
.
input
[
1
],
copy
=
True
)
...
...
x2paddle/op_mapper/tf_op_mapper_nhwc.py
浏览文件 @
82b1c12a
...
@@ -48,7 +48,8 @@ class TFOpMapperNHWC(OpMapper):
...
@@ -48,7 +48,8 @@ class TFOpMapperNHWC(OpMapper):
'RealDiv'
:
'elementwise_div'
,
'RealDiv'
:
'elementwise_div'
,
'Sub'
:
'elementwise_sub'
,
'Sub'
:
'elementwise_sub'
,
'Maximum'
:
'elementwise_max'
,
'Maximum'
:
'elementwise_max'
,
'Mul'
:
'elementwise_mul'
'Mul'
:
'elementwise_mul'
,
'FloorDiv'
:
'elementwise_floordiv'
}
}
def
__init__
(
self
,
decoder
):
def
__init__
(
self
,
decoder
):
...
@@ -200,14 +201,15 @@ class TFOpMapperNHWC(OpMapper):
...
@@ -200,14 +201,15 @@ class TFOpMapperNHWC(OpMapper):
assert
len
(
shape
)
!=
0
,
"Unknown shape of input nodes[{}]."
.
format
(
assert
len
(
shape
)
!=
0
,
"Unknown shape of input nodes[{}]."
.
format
(
node
.
layer_name
)
node
.
layer_name
)
dtype
=
node
.
dtype
dtype
=
node
.
dtype
if
shape
[
0
]
<
0
:
self
.
batch_node
=
node
attr
=
{
attr
=
{
'dtype'
:
string
(
dtype
),
'dtype'
:
string
(
dtype
),
'shape'
:
shape
,
'shape'
:
shape
,
'name'
:
string
(
node
.
layer_name
),
'name'
:
string
(
node
.
layer_name
),
'append_batch_size'
:
False
'append_batch_size'
:
False
}
}
if
shape
[
0
]
<
0
:
self
.
batch_node
=
node
node
.
fluid_code
.
add_layer
(
"data"
,
node
.
fluid_code
.
add_layer
(
"data"
,
inputs
=
None
,
inputs
=
None
,
output
=
node
,
output
=
node
,
...
@@ -988,19 +990,6 @@ class TFOpMapperNHWC(OpMapper):
...
@@ -988,19 +990,6 @@ class TFOpMapperNHWC(OpMapper):
output
=
node
,
output
=
node
,
param_attr
=
attr
)
param_attr
=
attr
)
def
FloorDiv
(
self
,
node
):
x
=
self
.
graph
.
get_node
(
node
.
layer
.
input
[
0
],
copy
=
True
)
y
=
self
.
graph
.
get_node
(
node
.
layer
.
input
[
1
],
copy
=
True
)
inputs
=
{
'x'
:
x
,
'y'
:
y
}
node
.
fluid_code
.
add_layer
(
"elementwise_div"
,
inputs
=
inputs
,
output
=
node
,
param_attr
=
None
)
node
.
fluid_code
.
add_layer
(
"floor"
,
inputs
=
node
,
output
=
node
,
param_attr
=
None
)
def
Split
(
self
,
node
):
def
Split
(
self
,
node
):
dim
=
self
.
graph
.
get_node
(
node
.
layer
.
input
[
0
],
copy
=
True
)
dim
=
self
.
graph
.
get_node
(
node
.
layer
.
input
[
0
],
copy
=
True
)
input
=
self
.
graph
.
get_node
(
node
.
layer
.
input
[
1
],
copy
=
True
)
input
=
self
.
graph
.
get_node
(
node
.
layer
.
input
[
1
],
copy
=
True
)
...
...
x2paddle/optimizer/tf_optimizer.py
浏览文件 @
82b1c12a
...
@@ -14,7 +14,9 @@
...
@@ -14,7 +14,9 @@
# TODO useless node remove
# TODO useless node remove
from
x2paddle.op_mapper.tf_op_mapper
import
TFOpMapper
from
x2paddle.op_mapper.tf_op_mapper
import
TFOpMapper
from
x2paddle.core.fluid_code
import
Layer
from
x2paddle.core.util
import
*
from
x2paddle.core.util
import
*
import
copy
as
cp
class
TFOptimizer
(
object
):
class
TFOptimizer
(
object
):
...
@@ -92,7 +94,6 @@ class TFOptimizer(object):
...
@@ -92,7 +94,6 @@ class TFOptimizer(object):
del
out_node
.
inputs
[
index
]
del
out_node
.
inputs
[
index
]
del
self
.
graph
.
node_map
[
node_name
]
del
self
.
graph
.
node_map
[
node_name
]
# TODO activation merge
def
merge_activation
(
self
):
def
merge_activation
(
self
):
act_nodes
=
list
()
act_nodes
=
list
()
for
node_name
in
self
.
graph
.
topo_sort
:
for
node_name
in
self
.
graph
.
topo_sort
:
...
@@ -126,7 +127,6 @@ class TFOptimizer(object):
...
@@ -126,7 +127,6 @@ class TFOptimizer(object):
0
].
output
0
].
output
self
.
graph
.
remove_node
(
act_node_name
)
self
.
graph
.
remove_node
(
act_node_name
)
# TODO bias merge
def
merge_bias
(
self
):
def
merge_bias
(
self
):
for
node_name
in
self
.
graph
.
topo_sort
:
for
node_name
in
self
.
graph
.
topo_sort
:
node
=
self
.
graph
.
get_node
(
node_name
)
node
=
self
.
graph
.
get_node
(
node_name
)
...
@@ -170,41 +170,184 @@ class TFOptimizer(object):
...
@@ -170,41 +170,184 @@ class TFOptimizer(object):
self
.
graph
.
remove_node
(
node
.
layer_name
)
self
.
graph
.
remove_node
(
node
.
layer_name
)
def
remove_transpose
(
self
):
def
remove_transpose
(
self
):
graph_copy
=
cp
.
deepcopy
(
self
.
graph
)
nhwc_insensitive_ops
=
[
'Relu'
,
'Relu6'
,
'Abs'
,
'Sigmoid'
,
'Exp'
,
'Rsqrt'
,
'swish_f32'
,
'LeakyRelu'
,
'Cast'
]
elementwise_ops
=
[
'Sub'
,
'Add'
,
'RealDiv'
,
'Maximum'
,
'Mul'
,
'FloorDiv'
,
'GreaterEqual'
]
for
node_name
in
self
.
graph
.
topo_sort
:
node
=
graph_copy
.
get_node
(
node_name
)
if
node
is
None
:
continue
if
node
.
layer_type
in
nhwc_insensitive_ops
:
graph_copy
.
remove_node
(
node_name
)
optimize_ops
=
[
optimize_ops
=
[
'Conv2D'
,
'MaxPool'
,
'FusedBatchNorm'
,
'DepthwiseConv2dNative'
,
'Conv2D'
,
'MaxPool'
,
'FusedBatchNorm'
,
'DepthwiseConv2dNative'
,
'AvgPool'
,
'Pad'
,
'Conv2DBackpropInput'
,
'ResizeNearestNeighbor'
,
'AvgPool'
,
'Pad'
,
'Conv2DBackpropInput'
,
'ResizeNearestNeighbor'
,
'ResizeBilinear'
'ResizeBilinear'
,
"Placeholder"
]
]
for
node_name
in
self
.
graph
.
topo_sort
:
for
node_name
in
self
.
graph
.
topo_sort
:
node
=
self
.
graph
.
get_node
(
node_name
)
node
=
graph_copy
.
get_node
(
node_name
)
if
node
is
None
:
if
node
is
None
:
continue
continue
if
node
.
layer_type
not
in
optimize_ops
:
if
node
.
layer_type
in
elementwise_ops
:
continue
is_nhwc
=
True
if
node
.
fluid_code
.
layers
[
for
in_name
in
node
.
inputs
:
-
1
].
op
!=
"transpose"
or
node
.
fluid_code
.
layers
[
in_node
=
graph_copy
.
get_node
(
in_name
)
-
1
].
param_attr
[
"perm"
]
!=
[
0
,
2
,
3
,
1
]:
if
hasattr
(
in_node
,
"is_nhwc"
):
if
not
in_node
.
is_nhwc
:
is_nhwc
=
False
else
:
if
len
(
in_node
.
fluid_code
.
layers
)
<
2
:
is_nhwc
=
False
continue
if
in_node
.
fluid_code
.
layers
[
-
1
].
op
!=
"transpose"
or
in_node
.
fluid_code
.
layers
[
-
1
].
param_attr
[
"perm"
]
!=
[
0
,
2
,
3
,
1
]:
is_nhwc
=
False
continue
node
.
is_nhwc
=
is_nhwc
for
i
in
range
(
len
(
self
.
graph
.
topo_sort
)):
node_name
=
self
.
graph
.
topo_sort
[
-
1
*
i
-
1
]
node
=
graph_copy
.
get_node
(
node_name
)
if
node
is
None
:
continue
continue
output_names
=
node
.
outputs
if
node
.
layer_type
in
elementwise_ops
:
can_be_removed
=
True
can_be_removed
=
True
for
out_name
in
output_names
:
if
len
(
node
.
fluid_code
.
layers
)
>
1
:
out_node
=
self
.
graph
.
get_node
(
out_name
)
if
out_node
.
layer_type
==
"BiasAdd"
:
can_be_removed
=
True
if
out_node
.
fluid_code
.
layers
[
0
].
op
!=
"transpose"
or
out_node
.
fluid_code
.
layers
[
0
].
param_attr
[
"perm"
]
!=
[
0
,
3
,
1
,
2
]:
can_be_removed
=
False
can_be_removed
=
False
break
if
not
node
.
is_nhwc
:
can_be_removed
=
False
for
out_name
in
node
.
outputs
:
out_node
=
graph_copy
.
get_node
(
out_name
)
if
hasattr
(
out_node
,
"is_nhwc"
):
if
not
out_node
.
is_nhwc
:
can_be_removed
=
False
else
:
if
len
(
out_node
.
fluid_code
.
layers
)
<
2
:
can_be_removed
=
False
break
if
out_node
.
fluid_code
.
layers
[
0
].
op
!=
"transpose"
or
out_node
.
fluid_code
.
layers
[
0
].
param_attr
[
"perm"
]
!=
[
0
,
3
,
1
,
2
]:
can_be_removed
=
False
break
node
.
can_be_removed
=
can_be_removed
if
can_be_removed
and
len
(
output_names
)
>
0
:
for
node_name
in
self
.
graph
.
topo_sort
:
last_out
=
node
.
fluid_code
.
layers
[
-
1
].
inputs
node
=
graph_copy
.
get_node
(
node_name
)
del
node
.
fluid_code
.
layers
[
-
1
]
if
node
is
None
:
continue
if
node
.
layer_type
in
optimize_ops
:
if
node
.
fluid_code
.
layers
[
-
1
].
op
!=
"transpose"
or
node
.
fluid_code
.
layers
[
-
1
].
param_attr
[
"perm"
]
!=
[
0
,
2
,
3
,
1
]:
continue
can_be_removed
=
True
output_names
=
node
.
outputs
for
out_name
in
output_names
:
for
out_name
in
output_names
:
out_node
=
self
.
graph
.
get_node
(
out_name
)
out_node
=
graph_copy
.
get_node
(
out_name
)
if
out_node
.
layer_type
==
"BiasAdd"
:
if
hasattr
(
out_node
,
"can_be_removed"
):
del
out_node
.
fluid_code
.
layers
[
0
]
if
not
out_node
.
can_be_removed
:
out_node
.
fluid_code
.
layers
[
0
].
inputs
[
'x'
]
=
last_out
can_be_removed
=
False
else
:
break
elif
out_node
.
fluid_code
.
layers
[
0
].
op
!=
"transpose"
or
out_node
.
fluid_code
.
layers
[
0
].
param_attr
[
"perm"
]
!=
[
0
,
3
,
1
,
2
]:
can_be_removed
=
False
break
if
can_be_removed
and
len
(
node
.
fluid_code
.
layers
)
>
1
:
true_node
=
self
.
graph
.
get_node
(
node_name
)
if
true_node
.
layer_type
==
"Placeholder"
:
index
=
self
.
graph
.
input_nodes
.
index
(
true_node
.
fluid_code
.
layers
[
-
2
].
output
)
if
isinstance
(
true_node
.
fluid_code
.
layers
[
-
1
].
output
,
str
):
self
.
graph
.
input_nodes
[
index
]
=
true_node
.
fluid_code
.
layers
[
-
1
].
output
else
:
self
.
graph
.
input_nodes
[
index
]
=
true_node
.
fluid_code
.
layers
[
-
1
].
output
.
layer_name
true_node
.
fluid_code
.
layers
[
-
2
].
output
=
true_node
.
fluid_code
.
layers
[
-
1
].
output
node
.
removed
=
True
del
true_node
.
fluid_code
.
layers
[
-
1
]
for
out_name
in
output_names
:
out_node
=
self
.
graph
.
get_node
(
out_name
)
if
out_node
.
layer_type
in
elementwise_ops
:
continue
out_node
.
fluid_code
.
layers
[
1
].
inputs
=
out_node
.
fluid_code
.
layers
[
0
].
inputs
del
out_node
.
fluid_code
.
layers
[
0
]
del
out_node
.
fluid_code
.
layers
[
0
]
out_node
.
fluid_code
.
layers
[
0
].
inputs
=
last_out
for
node_name
in
self
.
graph
.
topo_sort
:
node
=
graph_copy
.
get_node
(
node_name
)
if
node
is
None
:
continue
if
node
.
layer_type
in
elementwise_ops
:
if
not
node
.
can_be_removed
:
true_node
=
self
.
graph
.
get_node
(
node_name
)
for
i
,
in_name
in
enumerate
(
node
.
inputs
):
in_node
=
graph_copy
.
get_node
(
in_name
)
if
hasattr
(
in_node
,
"is_nhwc"
)
and
in_node
.
is_nhwc
:
if
i
==
0
:
l
=
Layer
()
l
.
op
=
"transpose"
l
.
inputs
=
true_node
.
fluid_code
.
layers
[
0
].
inputs
[
"x"
]
l
.
param_attr
=
{
"perm"
:
[
0
,
2
,
3
,
1
]}
l
.
output
=
"nhwc_"
+
l
.
inputs
.
layer_name
true_node
.
fluid_code
.
layers
[
0
].
inputs
[
"x"
]
=
l
.
output
true_node
.
fluid_code
.
layers
.
insert
(
0
,
l
)
elif
i
==
1
:
l
=
Layer
()
l
.
op
=
"transpose"
l
.
inputs
=
true_node
.
fluid_code
.
layers
[
0
].
inputs
[
"y"
]
l
.
param_attr
=
{
"perm"
:
[
0
,
2
,
3
,
1
]}
l
.
output
=
"nhwc_"
+
l
.
inputs
.
layer_name
true_node
.
fluid_code
.
layers
[
0
].
inputs
[
"y"
]
=
l
.
output
true_node
.
fluid_code
.
layers
.
insert
(
0
,
l
)
else
:
raise
Exception
(
"Unexpected situation happend"
)
continue
else
:
for
out_name
in
node
.
outputs
:
out_node
=
self
.
graph
.
get_node
(
out_name
)
if
out_node
.
layer_type
not
in
elementwise_ops
:
assert
out_node
.
fluid_code
.
layers
[
0
].
op
==
"transpose"
,
"unexpected situation happend"
out_node
.
fluid_code
.
layers
[
1
].
inputs
=
out_node
.
fluid_code
.
layers
[
0
].
inputs
del
out_node
.
fluid_code
.
layers
[
0
]
def
make_nchw_input_output
(
self
):
for
i
,
name
in
enumerate
(
self
.
graph
.
input_nodes
):
node
=
self
.
graph
.
get_node
(
name
)
if
len
(
node
.
out_shapes
[
0
])
==
4
and
node
.
tf_data_format
==
"NHWC"
:
shape
=
node
.
fluid_code
.
layers
[
0
].
param_attr
[
"shape"
]
shape
=
[
shape
[
i
]
for
i
in
[
0
,
3
,
1
,
2
]]
node
.
fluid_code
.
layers
[
0
].
param_attr
[
"shape"
]
=
shape
node
.
fluid_code
.
layers
[
0
].
output
=
"nhwc_"
+
name
attr
=
{
"perm"
:
[
0
,
2
,
3
,
1
]}
node
.
fluid_code
.
add_layer
(
"transpose"
,
inputs
=
"nhwc_"
+
name
,
output
=
node
,
param_attr
=
attr
)
self
.
graph
.
input_nodes
[
i
]
=
"nhwc_"
+
name
for
i
,
name
in
enumerate
(
self
.
graph
.
output_nodes
):
node
=
self
.
graph
.
get_node
(
name
)
if
node
.
layer_type
!=
"transpose"
:
if
node
.
fluid_code
.
layers
[
-
1
].
op
==
"transpose"
:
node
.
fluid_code
.
layers
[
-
2
].
output
=
name
del
node
.
fluid_code
.
layers
[
-
1
]
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录