Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
PaddlePaddle
X2Paddle
提交
2921ad4e
X
X2Paddle
项目概览
PaddlePaddle
/
X2Paddle
大约 1 年 前同步成功
通知
328
Star
698
Fork
167
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
26
列表
看板
标记
里程碑
合并请求
4
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
X
X2Paddle
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
26
Issue
26
列表
看板
标记
里程碑
合并请求
4
合并请求
4
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
提交
2921ad4e
编写于
2月 13, 2020
作者:
J
jiangjiajun@baidu.com
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
support tdnn
上级
64233f6b
变更
4
显示空白变更内容
内联
并排
Showing
4 changed file
with
106 addition
and
17 deletion
+106
-17
x2paddle/convert.py
x2paddle/convert.py
+13
-9
x2paddle/core/fluid_code.py
x2paddle/core/fluid_code.py
+12
-4
x2paddle/decoder/tf_decoder.py
x2paddle/decoder/tf_decoder.py
+29
-0
x2paddle/op_mapper/tf_op_mapper_nhwc.py
x2paddle/op_mapper/tf_op_mapper_nhwc.py
+52
-4
未找到文件。
x2paddle/convert.py
浏览文件 @
2921ad4e
...
@@ -90,11 +90,13 @@ def tf2paddle(model_path,
...
@@ -90,11 +90,13 @@ def tf2paddle(model_path,
version
=
tf
.
__version__
version
=
tf
.
__version__
if
version
>=
'2.0.0'
or
version
<
'1.0.0'
:
if
version
>=
'2.0.0'
or
version
<
'1.0.0'
:
print
(
print
(
"1.0.0<=tensorflow<2.0.0 is required, and v1.14.0 is recommended"
"
[ERROR]
1.0.0<=tensorflow<2.0.0 is required, and v1.14.0 is recommended"
)
)
return
return
except
:
except
:
print
(
"Tensorflow is not installed, use
\"
pip install tensorflow
\"
."
)
print
(
"[ERROR] Tensorflow is not installed, use
\"
pip install tensorflow
\"
."
)
return
return
from
x2paddle.decoder.tf_decoder
import
TFDecoder
from
x2paddle.decoder.tf_decoder
import
TFDecoder
...
@@ -140,7 +142,7 @@ def caffe2paddle(proto, weight, save_dir, caffe_proto, params_merge=False):
...
@@ -140,7 +142,7 @@ def caffe2paddle(proto, weight, save_dir, caffe_proto, params_merge=False):
if
(
int
(
ver_part
[
0
])
==
3
and
int
(
ver_part
[
1
])
>=
6
)
\
if
(
int
(
ver_part
[
0
])
==
3
and
int
(
ver_part
[
1
])
>=
6
)
\
or
(
int
(
ver_part
[
0
])
>
3
):
or
(
int
(
ver_part
[
0
])
>
3
):
version_satisfy
=
True
version_satisfy
=
True
assert
version_satisfy
,
'google.protobuf >= 3.6.0 is required'
assert
version_satisfy
,
'
[ERROR]
google.protobuf >= 3.6.0 is required'
print
(
"Now translating model from caffe to paddle."
)
print
(
"Now translating model from caffe to paddle."
)
model
=
CaffeDecoder
(
proto
,
weight
,
caffe_proto
)
model
=
CaffeDecoder
(
proto
,
weight
,
caffe_proto
)
mapper
=
CaffeOpMapper
(
model
)
mapper
=
CaffeOpMapper
(
model
)
...
@@ -156,10 +158,10 @@ def onnx2paddle(model_path, save_dir, params_merge=False):
...
@@ -156,10 +158,10 @@ def onnx2paddle(model_path, save_dir, params_merge=False):
import
onnx
import
onnx
version
=
onnx
.
version
.
version
version
=
onnx
.
version
.
version
if
version
!=
'1.6.0'
:
if
version
!=
'1.6.0'
:
print
(
"onnx==1.6.0 is required"
)
print
(
"
[ERROR]
onnx==1.6.0 is required"
)
return
return
except
:
except
:
print
(
"onnx is not installed, use
\"
pip install onnx==1.6.0
\"
."
)
print
(
"
[ERROR]
onnx is not installed, use
\"
pip install onnx==1.6.0
\"
."
)
return
return
print
(
"Now translating model from onnx to paddle."
)
print
(
"Now translating model from onnx to paddle."
)
...
@@ -199,21 +201,23 @@ def main():
...
@@ -199,21 +201,23 @@ def main():
import
onnxruntime
as
rt
import
onnxruntime
as
rt
version
=
rt
.
__version__
version
=
rt
.
__version__
if
version
!=
'1.0.0'
:
if
version
!=
'1.0.0'
:
print
(
"onnxruntime==1.0.0 is required"
)
print
(
"
[ERROR]
onnxruntime==1.0.0 is required"
)
return
return
except
:
except
:
print
(
print
(
"onnxruntime is not installed, use
\"
pip install onnxruntime==1.0.0
\"
."
"
[ERROR]
onnxruntime is not installed, use
\"
pip install onnxruntime==1.0.0
\"
."
)
)
try
:
try
:
import
paddle
import
paddle
v0
,
v1
,
v2
=
paddle
.
__version__
.
split
(
'.'
)
v0
,
v1
,
v2
=
paddle
.
__version__
.
split
(
'.'
)
if
int
(
v0
)
!=
1
or
int
(
v1
)
<
6
:
if
int
(
v0
)
!=
1
or
int
(
v1
)
<
6
:
print
(
"paddlepaddle>=1.6.0 is required"
)
print
(
"
[ERROR]
paddlepaddle>=1.6.0 is required"
)
return
return
except
:
except
:
print
(
"paddlepaddle not installed, use
\"
pip install paddlepaddle
\"
"
)
print
(
"[ERROR] paddlepaddle not installed, use
\"
pip install paddlepaddle
\"
"
)
if
args
.
framework
==
"tensorflow"
:
if
args
.
framework
==
"tensorflow"
:
assert
args
.
model
is
not
None
,
"--model should be defined while translating tensorflow model"
assert
args
.
model
is
not
None
,
"--model should be defined while translating tensorflow model"
...
...
x2paddle/core/fluid_code.py
浏览文件 @
2921ad4e
...
@@ -36,6 +36,8 @@ class Layer(object):
...
@@ -36,6 +36,8 @@ class Layer(object):
if
self
.
is_custom_layer
:
if
self
.
is_custom_layer
:
layer_code
=
layer_code
+
self
.
op
+
"("
layer_code
=
layer_code
+
self
.
op
+
"("
elif
self
.
op
==
"="
:
layer_code
=
layer_code
else
:
else
:
layer_code
=
layer_code
+
"fluid.layers."
+
self
.
op
+
"("
layer_code
=
layer_code
+
"fluid.layers."
+
self
.
op
+
"("
...
@@ -70,11 +72,15 @@ class Layer(object):
...
@@ -70,11 +72,15 @@ class Layer(object):
elif
isinstance
(
self
.
inputs
,
GraphNode
):
elif
isinstance
(
self
.
inputs
,
GraphNode
):
if
hasattr
(
self
.
inputs
,
"index"
):
if
hasattr
(
self
.
inputs
,
"index"
):
layer_code
+=
(
self
.
inputs
.
layer_name
+
layer_code
+=
(
self
.
inputs
.
layer_name
+
"[{}]"
.
format
(
self
.
inputs
.
index
)
+
", "
)
"[{}]"
.
format
(
self
.
inputs
.
index
))
else
:
else
:
layer_code
+=
(
self
.
inputs
.
layer_name
+
", "
)
layer_code
+=
(
self
.
inputs
.
layer_name
)
if
self
.
op
!=
"="
:
layer_code
+=
", "
elif
isinstance
(
self
.
inputs
,
six
.
string_types
):
elif
isinstance
(
self
.
inputs
,
six
.
string_types
):
layer_code
+=
(
self
.
inputs
+
", "
)
layer_code
+=
(
self
.
inputs
)
if
self
.
op
!=
"="
:
layer_code
+=
", "
else
:
else
:
raise
Exception
(
"Unknown type of inputs."
)
raise
Exception
(
"Unknown type of inputs."
)
...
@@ -85,7 +91,9 @@ class Layer(object):
...
@@ -85,7 +91,9 @@ class Layer(object):
layer_code
=
layer_code
+
key
+
"={}, "
.
format
(
value
)
layer_code
=
layer_code
+
key
+
"={}, "
.
format
(
value
)
layer_code
=
layer_code
.
strip
(
", "
)
layer_code
=
layer_code
.
strip
(
", "
)
return
layer_code
+
")"
if
self
.
op
!=
"="
:
layer_code
+=
")"
return
layer_code
class
FluidCode
(
object
):
class
FluidCode
(
object
):
...
...
x2paddle/decoder/tf_decoder.py
浏览文件 @
2921ad4e
...
@@ -136,6 +136,7 @@ class TFGraph(Graph):
...
@@ -136,6 +136,7 @@ class TFGraph(Graph):
# tensorflow graph optimize
# tensorflow graph optimize
self
.
_remove_isolated_node
()
self
.
_remove_isolated_node
()
self
.
_optimize_dialiation_conv
()
self
.
_remove_identity_node
()
self
.
_remove_identity_node
()
self
.
_remove_cast_node
()
self
.
_remove_cast_node
()
...
@@ -175,6 +176,34 @@ class TFGraph(Graph):
...
@@ -175,6 +176,34 @@ class TFGraph(Graph):
idx
=
self
.
topo_sort
.
index
(
node_name
)
idx
=
self
.
topo_sort
.
index
(
node_name
)
del
self
.
topo_sort
[
idx
]
del
self
.
topo_sort
[
idx
]
def
_optimize_dialiation_conv
(
self
):
for
name
in
list
(
self
.
node_map
.
keys
()):
node
=
self
.
node_map
[
name
]
if
node
.
layer_type
==
"SpaceToBatchND"
:
is_dilation
=
True
out_node0
=
self
.
node_map
[
node
.
outputs
[
0
]]
if
out_node0
.
layer_type
!=
'ExpandDims'
:
is_dilation
=
False
continue
out_node1
=
self
.
node_map
[
out_node0
.
outputs
[
0
]]
if
out_node1
.
layer_type
!=
'Conv2D'
:
is_dilation
=
False
continue
out_node2
=
self
.
node_map
[
out_node1
.
outputs
[
0
]]
if
out_node2
.
layer_type
!=
'Squeeze'
:
is_dilation
=
False
continue
out_node3
=
self
.
node_map
[
out_node2
.
outputs
[
0
]]
if
out_node3
.
layer_type
!=
'BatchToSpaceND'
:
is_dilation
=
False
continue
if
is_dilation
:
node
.
skip
=
True
out_node3
.
skip
=
True
block_shape
=
self
.
node_map
[
node
.
inputs
[
1
]]
out_node1
.
dilation
=
block_shape
.
value
.
tolist
()
def
_remove_isolated_node
(
self
):
def
_remove_isolated_node
(
self
):
# delete isolated nodes
# delete isolated nodes
isolated_nodes
=
list
()
isolated_nodes
=
list
()
...
...
x2paddle/op_mapper/tf_op_mapper_nhwc.py
浏览文件 @
2921ad4e
...
@@ -40,6 +40,7 @@ class TFOpMapperNHWC(OpMapper):
...
@@ -40,6 +40,7 @@ class TFOpMapperNHWC(OpMapper):
'Sigmoid'
:
[
'sigmoid'
],
'Sigmoid'
:
[
'sigmoid'
],
'Exp'
:
[
'exp'
],
'Exp'
:
[
'exp'
],
'Rsqrt'
:
[
'rsqrt'
],
'Rsqrt'
:
[
'rsqrt'
],
'Sqrt'
:
[
'sqrt'
],
'swish_f32'
:
[
'swish'
],
'swish_f32'
:
[
'swish'
],
'Tanh'
:
[
'tanh'
],
'Tanh'
:
[
'tanh'
],
'LeakyRelu'
:
[
'leaky_relu'
,
{
'LeakyRelu'
:
[
'leaky_relu'
,
{
...
@@ -48,6 +49,7 @@ class TFOpMapperNHWC(OpMapper):
...
@@ -48,6 +49,7 @@ class TFOpMapperNHWC(OpMapper):
}
}
elementwise_ops
=
{
elementwise_ops
=
{
'Add'
:
'elementwise_add'
,
'Add'
:
'elementwise_add'
,
'AddV2'
:
'elementwise_add'
,
'RealDiv'
:
'elementwise_div'
,
'RealDiv'
:
'elementwise_div'
,
'Sub'
:
'elementwise_sub'
,
'Sub'
:
'elementwise_sub'
,
'Maximum'
:
'elementwise_max'
,
'Maximum'
:
'elementwise_max'
,
...
@@ -90,10 +92,12 @@ class TFOpMapperNHWC(OpMapper):
...
@@ -90,10 +92,12 @@ class TFOpMapperNHWC(OpMapper):
if
len
(
unsupported_ops
)
>
0
:
if
len
(
unsupported_ops
)
>
0
:
continue
continue
func
=
getattr
(
self
,
op
)
func
=
getattr
(
self
,
op
)
try
:
func
(
node
)
func
(
node
)
except
:
unsupported_ops
.
add
(
op
)
else
:
else
:
unsupported_ops
.
add
(
op
)
unsupported_ops
.
add
(
op
)
continue
if
len
(
unsupported_ops
)
>
0
:
if
len
(
unsupported_ops
)
>
0
:
print
(
"========= {} OPs are not supported yet ==========="
.
format
(
print
(
"========= {} OPs are not supported yet ==========="
.
format
(
len
(
unsupported_ops
)))
len
(
unsupported_ops
)))
...
@@ -342,7 +346,6 @@ class TFOpMapperNHWC(OpMapper):
...
@@ -342,7 +346,6 @@ class TFOpMapperNHWC(OpMapper):
def
Conv2D
(
self
,
node
):
def
Conv2D
(
self
,
node
):
input
=
self
.
graph
.
get_node
(
node
.
layer
.
input
[
0
],
copy
=
True
)
input
=
self
.
graph
.
get_node
(
node
.
layer
.
input
[
0
],
copy
=
True
)
kernel
=
self
.
graph
.
get_node
(
node
.
layer
.
input
[
1
],
copy
=
True
)
kernel
=
self
.
graph
.
get_node
(
node
.
layer
.
input
[
1
],
copy
=
True
)
assert
kernel
.
layer_type
==
"Const"
,
"Kernel of Conv2D should be Const"
self
.
add_omit_nodes
(
kernel
.
layer_name
,
node
.
layer_name
)
self
.
add_omit_nodes
(
kernel
.
layer_name
,
node
.
layer_name
)
in_shape
=
input
.
out_shapes
[
0
]
in_shape
=
input
.
out_shapes
[
0
]
...
@@ -358,8 +361,12 @@ class TFOpMapperNHWC(OpMapper):
...
@@ -358,8 +361,12 @@ class TFOpMapperNHWC(OpMapper):
pad_mode
=
node
.
get_attr
(
"padding"
).
decode
()
pad_mode
=
node
.
get_attr
(
"padding"
).
decode
()
channel_first
=
data_format
==
"NCHW"
channel_first
=
data_format
==
"NCHW"
if
kernel
.
layer_type
==
'Const'
:
kernel_value
=
kernel
.
value
else
:
kernel_value
=
self
.
decoder
.
infer_tensor
(
kernel
)
self
.
weights
[
kernel
.
layer_name
.
replace
(
'/'
,
'_'
)]
=
numpy
.
transpose
(
self
.
weights
[
kernel
.
layer_name
.
replace
(
'/'
,
'_'
)]
=
numpy
.
transpose
(
kernel
.
value
,
(
3
,
2
,
0
,
1
))
kernel
_
value
,
(
3
,
2
,
0
,
1
))
if
not
channel_first
:
if
not
channel_first
:
in_shape
=
[
in_shape
[
i
]
for
i
in
[
0
,
3
,
1
,
2
]]
in_shape
=
[
in_shape
[
i
]
for
i
in
[
0
,
3
,
1
,
2
]]
...
@@ -381,6 +388,11 @@ class TFOpMapperNHWC(OpMapper):
...
@@ -381,6 +388,11 @@ class TFOpMapperNHWC(OpMapper):
"dilation"
:
dilations
[
2
:
4
],
"dilation"
:
dilations
[
2
:
4
],
"padding"
:
string
(
pad_mode
)
"padding"
:
string
(
pad_mode
)
}
}
if
hasattr
(
node
,
'dilation'
)
and
attr
[
'dilation'
]
==
[
1
,
1
]:
if
len
(
node
.
dilation
)
==
1
:
attr
[
'dilation'
]
=
[
1
,
node
.
dilation
[
0
]]
node
.
fluid_code
.
add_layer
(
"conv2d"
,
node
.
fluid_code
.
add_layer
(
"conv2d"
,
inputs
=
input
,
inputs
=
input
,
output
=
node
,
output
=
node
,
...
@@ -1135,3 +1147,39 @@ class TFOpMapperNHWC(OpMapper):
...
@@ -1135,3 +1147,39 @@ class TFOpMapperNHWC(OpMapper):
inputs
=
inputs
,
inputs
=
inputs
,
output
=
node
,
output
=
node
,
param_attr
=
None
)
param_attr
=
None
)
def
ExpandDims
(
self
,
node
):
x
=
self
.
graph
.
get_node
(
node
.
layer
.
input
[
0
],
copy
=
True
)
y
=
self
.
graph
.
get_node
(
node
.
layer
.
input
[
1
],
copy
=
True
)
if
y
.
layer_type
==
'Const'
:
dim
=
y
.
value
.
tolist
()
else
:
dim
=
self
.
decoder
.
infer_tensor
(
y
)
self
.
add_omit_nodes
(
y
.
layer_name
,
node
.
layer_name
)
attr
=
{
'axes'
:
[
dim
]}
node
.
fluid_code
.
add_layer
(
"unsqueeze"
,
inputs
=
x
,
output
=
node
,
param_attr
=
attr
)
def
BatchToSpaceND
(
self
,
node
):
x
=
self
.
graph
.
get_node
(
node
.
layer
.
input
[
0
],
copy
=
True
)
y
=
self
.
graph
.
get_node
(
node
.
layer
.
input
[
1
],
copy
=
True
)
if
hasattr
(
node
,
'skip'
)
and
node
.
skip
:
node
.
fluid_code
.
add_layer
(
"="
,
inputs
=
x
,
output
=
node
,
param_attr
=
None
)
else
:
raise
Exception
(
"BatchToSpaceND is not supported"
)
def
SpaceToBatchND
(
self
,
node
):
x
=
self
.
graph
.
get_node
(
node
.
layer
.
input
[
0
],
copy
=
True
)
y
=
self
.
graph
.
get_node
(
node
.
layer
.
input
[
1
],
copy
=
True
)
if
hasattr
(
node
,
'skip'
)
and
node
.
skip
:
node
.
fluid_code
.
add_layer
(
"="
,
inputs
=
x
,
output
=
node
,
param_attr
=
None
)
else
:
raise
Exception
(
"SpaceToBatchND is not supported"
)
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录