Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
PaddlePaddle
X2Paddle
提交
b670c23e
X
X2Paddle
项目概览
PaddlePaddle
/
X2Paddle
大约 1 年 前同步成功
通知
328
Star
698
Fork
167
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
26
列表
看板
标记
里程碑
合并请求
4
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
X
X2Paddle
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
26
Issue
26
列表
看板
标记
里程碑
合并请求
4
合并请求
4
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
提交
b670c23e
编写于
8月 03, 2020
作者:
J
jiangjiajun
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
modify program
上级
842108b5
变更
5
展开全部
隐藏空白更改
内联
并排
Showing
5 changed file
with
771 addition
and
800 deletion
+771
-800
x2paddle/__init__.py
x2paddle/__init__.py
+2
-2
x2paddle/convert.py
x2paddle/convert.py
+4
-22
x2paddle/core/program.py
x2paddle/core/program.py
+131
-27
x2paddle/decoder/tf_decoder.py
x2paddle/decoder/tf_decoder.py
+10
-0
x2paddle/op_mapper/tf_op_mapper_nhwc.py
x2paddle/op_mapper/tf_op_mapper_nhwc.py
+624
-749
未找到文件。
x2paddle/__init__.py
浏览文件 @
b670c23e
...
...
@@ -8,10 +8,10 @@ name_counter = dict()
def
gen_name
(
op_name
,
var_name
):
name
=
"{}
.
{}"
.
format
(
op_name
,
var_name
)
name
=
"{}
_
{}"
.
format
(
op_name
,
var_name
)
if
name
not
in
name_counter
:
name_counter
[
name
]
=
0
else
:
name_counter
[
name
]
+=
1
name
=
name
+
"
.
"
+
str
(
name_counter
[
name
])
name
=
name
+
"
_
"
+
str
(
name_counter
[
name
])
return
name
x2paddle/convert.py
浏览文件 @
b670c23e
...
...
@@ -13,6 +13,7 @@
# limitations under the License.
from
six
import
text_type
as
_text_type
from
x2paddle
import
program
import
argparse
import
sys
...
...
@@ -120,29 +121,10 @@ def tf2paddle(model_path,
print
(
"Now translating model from tensorflow to paddle."
)
model
=
TFDecoder
(
model_path
,
define_input_shape
=
define_input_shape
)
if
not
without_data_format_optimization
:
mapper
=
TFOpMapper
(
model
)
optimizer
=
TFOptimizer
(
mapper
)
# neccesary optimization
optimizer
.
delete_redundance_code
()
# optimizer below is experimental
optimizer
.
optimize_elementwise_op
()
optimizer
.
merge_activation
()
optimizer
.
merge_bias
()
optimizer
.
optimize_sub_graph
()
# optimizer.merge_batch_norm()
# optimizer.merge_prelu()
else
:
mapper
=
TFOpMapperNHWC
(
model
)
optimizer
=
TFOptimizer
(
mapper
)
optimizer
.
delete_redundance_code
()
optimizer
.
strip_graph
()
optimizer
.
merge_activation
()
optimizer
.
merge_bias
()
optimizer
.
make_nchw_input_output
()
optimizer
.
remove_transpose
()
mapper
.
save_inference_model
(
save_dir
,
params_merge
)
mapper
=
TFOpMapperNHWC
(
model
)
program
.
build
()
program
.
gen_model
(
save_dir
)
def
caffe2paddle
(
proto
,
weight
,
save_dir
,
caffe_proto
,
params_merge
=
False
):
...
...
x2paddle/core/program.py
浏览文件 @
b670c23e
...
...
@@ -14,8 +14,13 @@
from
__future__
import
print_function
from
__future__
import
division
import
paddle.fluid
as
fluid
from
paddle.fluid.proto
import
framework_pb2
import
numpy
import
collections
import
sys
import
os
import
six
class
PaddleLayer
(
object
):
...
...
@@ -25,7 +30,21 @@ class PaddleLayer(object):
dict
),
"parameter 'inputs' for PaddleLayer should be type of dict"
assert
isinstance
(
outputs
,
list
),
"parameter, 'outputs' for PaddleLayer should be type of list"
list
),
"parameter 'outputs' for PaddleLayer should be type of list"
for
k
,
v
in
inputs
.
items
():
if
isinstance
(
v
,
list
):
for
i
in
v
:
assert
isinstance
(
i
,
six
.
string_types
),
"value in inputs should be type of string or list of string"
else
:
assert
isinstance
(
v
,
six
.
string_types
)
or
isinstance
(
v
,
list
),
"value in inputs should be type of string or list of string"
for
v
in
outputs
:
assert
isinstance
(
v
,
six
.
string_types
),
"elements in outputs should be type of string"
self
.
kernel
=
kernel
self
.
inputs
=
inputs
self
.
outputs
=
outputs
...
...
@@ -41,29 +60,40 @@ class PaddleProgram(object):
self
.
outputs
=
list
()
self
.
parameters
=
dict
()
def
clear
(
self
):
self
.
layers
=
list
()
self
.
edges_out
=
dict
()
self
.
edges_in
=
dict
()
self
.
inputs
=
list
()
self
.
outputs
=
list
()
self
.
parameters
=
dict
()
def
add_layer
(
self
,
kernel
,
inputs
,
outputs
,
**
kwargs
):
layer
=
PaddleLayer
(
kernel
,
inputs
,
outputs
,
**
kwargs
)
index
=
len
(
self
.
layers
)
self
.
layers
.
append
(
layer
)
return
index
def
build
(
self
):
outputs
=
dict
()
for
i
in
range
(
len
(
self
.
layers
)):
layer
=
self
.
layers
[
i
]
outputs_from_nodes
=
dict
()
for
i
,
layer
in
enumerate
(
self
.
layers
):
for
input_key
,
input_var
in
layer
.
inputs
.
items
():
vs
=
input_var
if
not
isinstance
(
vs
,
list
):
vs
=
[
vs
]
for
v
in
vs
:
assert
v
in
outputs_from_nodes
,
"Couldn't find {} in previous layers, the layers should be make by topological sort"
.
format
(
v
)
in_layer_index
=
outputs_from_nodes
[
v
]
if
in_layer_index
not
in
self
.
edges_out
:
self
.
edges_out
[
in_layer_index
]
=
list
()
self
.
edges_out
[
in_layer_index
].
append
(
i
)
if
i
not
in
self
.
edges_in
:
self
.
edges_in
[
i
]
=
list
()
self
.
edges_in
[
i
].
append
(
in_layer_index
)
for
output
in
layer
.
outputs
:
outputs
[
output
]
=
i
for
k
,
v
in
layer
.
inputs
.
items
():
assert
v
in
outputs
,
"Couldn't find {} in previous layers, the layers should be make by topological sort"
.
format
(
v
)
in_layer_index
=
outputs
[
v
]
if
in_layer_index
not
in
self
.
edges_out
:
self
.
edges_out
[
in_layer_index
]
=
list
()
self
.
edges_out
[
in_layer_index
].
append
(
i
)
if
i
not
in
self
.
edges_in
:
self
.
edges_in
[
i
]
=
list
()
self
.
edges_in
[
i
].
append
(
in_layer_index
)
outputs_from_nodes
[
output
]
=
i
def
get_layer_outputs
(
self
,
i
):
return
self
.
edges_out
[
i
]
...
...
@@ -80,7 +110,9 @@ class PaddleProgram(object):
else
:
f
.
write
(
indent_blank
+
code_line
+
'
\n
'
)
f
=
open
(
os
.
path
.
join
(
code_dir
,
'model.py'
),
'w'
)
if
not
os
.
path
.
exists
(
code_dir
):
os
.
makedirs
(
code_dir
)
f
=
open
(
os
.
path
.
join
(
code_dir
,
'x2paddle_model.py'
),
'w'
)
write_code
(
f
,
[
...
...
@@ -90,9 +122,10 @@ class PaddleProgram(object):
""
,
"def x2paddle_net():"
],
indent
=
0
)
for
i
,
layer
in
enumerate
(
self
.
layers
):
if
self
.
edges_in
.
get
(
i
,
0
)
==
0
and
self
.
edges_out
.
get
(
i
,
0
)
==
0
:
edges_in
=
self
.
edges_in
.
get
(
i
,
[])
edges_out
=
self
.
edges_out
.
get
(
i
,
[])
if
len
(
edges_in
)
==
0
and
len
(
edges_out
)
==
0
:
continue
line
=
""
...
...
@@ -106,16 +139,87 @@ class PaddleProgram(object):
line
+=
" = {}("
.
format
(
layer
.
kernel
)
for
k
,
v
in
layer
.
inputs
.
items
():
line
+=
"{}={}, "
.
format
(
k
,
v
)
if
isinstance
(
v
,
list
):
line
+=
"{}=[{}], "
.
format
(
k
,
", "
.
join
(
v
))
else
:
line
+=
"{}={}, "
.
format
(
k
,
v
)
for
k
,
v
in
layer
.
attrs
.
items
():
line
+=
"{}={}, "
.
format
(
k
,
v
)
line
=
line
.
strip
(
", "
)
line
+=
")"
write_code
(
f
,
[
line
],
indent
=
1
)
f
.
close
()
def
gen_parameters
(
self
,
code_dir
):
pass
write_code
(
f
,
[
"return [{}], [{}]"
.
format
(
", "
.
join
(
self
.
inputs
),
", "
.
join
(
self
.
outputs
))
],
indent
=
1
)
f
.
close
()
def
gen_inference_model
(
self
,
model_dir
):
pass
def
gen_model
(
self
,
save_dir
):
code_dir
=
os
.
path
.
join
(
save_dir
,
'model_with_code'
)
infer_dir
=
os
.
path
.
join
(
save_dir
,
'inference_model'
)
self
.
gen_code
(
code_dir
)
sys
.
path
.
append
(
code_dir
)
import
x2paddle_model
scope
=
fluid
.
Scope
()
startup_program
=
fluid
.
Program
()
main_program
=
fluid
.
Program
()
with
fluid
.
scope_guard
(
scope
):
with
fluid
.
program_guard
(
main_program
,
startup_program
):
inputs
,
outputs
=
x2paddle_model
.
x2paddle_net
()
exe
=
fluid
.
Executor
(
fluid
.
CPUPlace
())
exe
.
run
(
startup_program
)
param_dir
=
os
.
path
.
join
(
code_dir
,
'weights'
)
for
k
,
v
in
self
.
parameters
.
items
():
if
scope
.
find_var
(
k
):
self
.
dump_parameter
(
k
,
v
,
param_dir
)
def
if_exist
(
var
):
b
=
os
.
path
.
exists
(
os
.
path
.
join
(
os
.
path
.
join
(
param_dir
,
var
.
name
)))
return
b
fluid
.
io
.
load_vars
(
exe
,
param_dir
,
main_program
,
predicate
=
if_exist
)
fluid
.
io
.
save_inference_model
(
dirname
=
infer_dir
,
feeded_var_names
=
[
i
.
name
for
i
in
inputs
],
target_vars
=
outputs
,
executor
=
exe
)
def
dump_parameter
(
self
,
param_name
,
param
,
save_dir
):
if
not
os
.
path
.
exists
(
save_dir
):
os
.
makedirs
(
save_dir
)
dtype_map
=
{
"int16"
:
[
framework_pb2
.
VarType
.
INT16
,
'h'
],
"int32"
:
[
framework_pb2
.
VarType
.
INT32
,
'i'
],
"int64"
:
[
framework_pb2
.
VarType
.
INT64
,
'q'
],
"float16"
:
[
framework_pb2
.
VarType
.
FP16
,
'e'
],
"float32"
:
[
framework_pb2
.
VarType
.
FP32
,
'f'
],
"float64"
:
[
framework_pb2
.
VarType
.
FP64
,
'd'
],
"bool"
:
[
framework_pb2
.
VarType
.
BOOL
,
None
]
}
shape
=
param
.
shape
if
str
(
param
.
dtype
)
in
[
'uint8'
,
'uint_8'
,
'bool'
]:
param
=
param
.
astype
(
'int64'
)
if
len
(
shape
)
==
0
:
assert
param
.
size
==
1
,
"Unexpected situation happend!"
shape
=
[
1
]
assert
str
(
param
.
dtype
)
in
dtype_map
,
"Unknown dtype {} of params: {}."
.
format
(
str
(
param
.
dtype
),
param_name
)
fp
=
open
(
os
.
path
.
join
(
save_dir
,
param_name
),
'wb'
)
numpy
.
array
([
0
],
dtype
=
'int32'
).
tofile
(
fp
)
numpy
.
array
([
0
],
dtype
=
'int64'
).
tofile
(
fp
)
numpy
.
array
([
0
],
dtype
=
'int32'
).
tofile
(
fp
)
tensor_desc
=
framework_pb2
.
VarType
.
TensorDesc
()
tensor_desc
.
data_type
=
dtype_map
[
str
(
param
.
dtype
)][
0
]
tensor_desc
.
dims
.
extend
(
shape
)
desc_size
=
tensor_desc
.
ByteSize
()
numpy
.
array
([
desc_size
],
dtype
=
'int32'
).
tofile
(
fp
)
fp
.
write
(
tensor_desc
.
SerializeToString
())
param
.
tofile
(
fp
)
fp
.
close
()
x2paddle/decoder/tf_decoder.py
浏览文件 @
b670c23e
...
...
@@ -89,6 +89,16 @@ class TFGraphNode(GraphNode):
field
=
getattr
(
attr
,
attr
.
WhichOneof
(
'value'
))
return
tensor_util
.
MakeNdarray
(
field
)
@
property
def
name
(
self
):
multi_out_ops
=
[
'Split'
,
'SplitV'
,
'IteratorV2'
]
if
self
.
layer_type
in
multi_out_ops
:
if
self
.
layer_name
.
count
(
':'
)
>
0
:
return
self
.
layer_name
.
replace
(
':'
,
'_p'
)
else
:
return
"{}_p0"
.
format
(
self
.
layer_name
)
return
self
.
layer_name
def
get_attr
(
self
,
name
):
if
name
not
in
self
.
layer
.
attr
:
return
None
...
...
x2paddle/op_mapper/tf_op_mapper_nhwc.py
浏览文件 @
b670c23e
此差异已折叠。
点击以展开。
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录