Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
PaddlePaddle
X2Paddle
提交
13320626
X
X2Paddle
项目概览
PaddlePaddle
/
X2Paddle
1 年多 前同步成功
通知
328
Star
698
Fork
167
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
26
列表
看板
标记
里程碑
合并请求
4
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
X
X2Paddle
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
26
Issue
26
列表
看板
标记
里程碑
合并请求
4
合并请求
4
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
未验证
提交
13320626
编写于
7月 30, 2019
作者:
J
Jason
提交者:
GitHub
7月 30, 2019
浏览文件
操作
浏览文件
下载
差异文件
Merge pull request #1 from PaddlePaddle/develop
Develop
上级
2e33688a
6ce44b8e
变更
4
展开全部
隐藏空白更改
内联
并排
Showing
4 changed file
with
1325 addition
and
102 deletion
+1325
-102
README.md
README.md
+24
-18
x2paddle/decoder/caffe_decoder.py
x2paddle/decoder/caffe_decoder.py
+41
-20
x2paddle/decoder/caffe_shape.py
x2paddle/decoder/caffe_shape.py
+276
-2
x2paddle/op_mapper/caffe_op_mapper.py
x2paddle/op_mapper/caffe_op_mapper.py
+984
-62
未找到文件。
README.md
浏览文件 @
13320626
# X2Paddle
X2Paddle is a toolkit for converting trained model to PaddlePaddle from other deep learning frameworks. 支持主流深度学习框架模型转换至PaddlePaddle(飞桨)
X2Paddle支持将其余深度学习框架训练得到的模型,转换至PaddlePaddle模型。
X2Paddle is a toolkit for converting trained model to PaddlePaddle from other deep learning frameworks.
## Requirements
python >= 3.5
paddlepaddle >= 1.5.0
tensorflow == 1.x
## Installation
```
pip install git+https://github.com/PaddlePaddle/X2Paddle.git@develop
```
## How To Use
```
x2paddle --framework=tensorflow --model=tf_model.pb --save_dir=pd_model
```
## 转换tensorflow vgg_16模型
...
...
@@ -40,14 +56,9 @@ with tf.Session() as sess:
### 步骤三 模型转换
```
git clone https://github.com/PaddlePaddle/X2Paddle.git
cd X2Paddle
git checkout develop
export PYTHONPATH=${PWD}
mkdir paddle_model
python x2paddle/convert.py --framework=tensorflow \
--model=../vgg16.pb \
--save_dir=paddle_model
x2paddle --framework=tensorflow \
--model=../vgg16.pb \
--save_dir=paddle_model
```
## 转换caffe SqueezeNet模型
...
...
@@ -60,12 +71,7 @@ wget https://github.com/DeepScale/SqueezeNet/blob/master/SqueezeNet_v1.1/deploy.
### 步骤二 模型转换
```
git clone https://github.com/PaddlePaddle/X2Paddle.git
cd X2Paddle
git checkout develop
export PYTHONPATH=${PWD}:$PYTHONPATH
mkdir paddle_model
python x2paddle/convert.py --framework=caffe \
--weight=../squeezenet_v1.1.caffemodel \
--proto =../deploy.prototxt \
--save_dir=paddle_model
x2paddle --framework=caffe \
--weight=../squeezenet_v1.1.caffemodel \
--proto =../deploy.prototxt \
--save_dir=paddle_model
x2paddle/decoder/caffe_decoder.py
浏览文件 @
13320626
...
...
@@ -60,11 +60,14 @@ class CaffeResolver(object):
class
CaffeGraphNode
(
GraphNode
):
def
__init__
(
self
,
layer
,
layer_name
=
None
):
if
layer_name
is
None
:
super
(
CaffeGraphNode
,
self
).
__init__
(
layer
,
layer
.
name
.
replace
(
'/'
,
'_'
))
super
(
CaffeGraphNode
,
self
).
__init__
(
layer
,
layer
.
name
.
replace
(
'/'
,
'_'
))
else
:
super
(
CaffeGraphNode
,
self
).
__init__
(
layer
,
layer_name
.
replace
(
'/'
,
'_'
))
super
(
CaffeGraphNode
,
self
).
__init__
(
layer
,
layer_name
.
replace
(
'/'
,
'_'
))
self
.
layer_type
=
layer
.
type
self
.
fluid_code
=
FluidCode
()
self
.
data
=
None
def
set_params
(
self
,
params
):
self
.
data
=
params
...
...
@@ -117,24 +120,42 @@ class CaffeGraph(Graph):
inputs_num
=
len
(
self
.
model
.
input
)
if
inputs_num
!=
0
:
input_dims_num
=
len
(
self
.
model
.
input_dim
)
if
input_dims_num
>
0
and
input_dims_num
!=
inputs_num
*
4
:
raise
Error
(
'invalid input_dim[%d] param in prototxt'
%
(
input_dims_num
))
for
i
in
range
(
inputs_num
):
dims
=
self
.
model
.
input_dim
[
i
*
4
:(
i
+
1
)
*
4
]
data
=
self
.
model
.
layer
.
add
()
try
:
from
caffe
import
layers
as
L
data
.
CopyFrom
(
L
.
Input
(
input_param
=
dict
(
shape
=
dict
(
dim
=
[
dims
[
0
],
dims
[
1
],
dims
[
2
],
dims
[
3
]
]))).
to_proto
().
layer
[
0
])
except
:
raise
Error
(
'You must install the caffe first when you use old style prototxt.'
)
data
.
name
=
self
.
model
.
input
[
0
]
data
.
top
[
0
]
=
self
.
model
.
input
[
0
]
if
input_dims_num
!=
0
:
if
input_dims_num
>
0
and
input_dims_num
!=
inputs_num
*
4
:
raise
Error
(
'invalid input_dim[%d] param in prototxt'
%
(
input_dims_num
))
for
i
in
range
(
inputs_num
):
dims
=
self
.
model
.
input_dim
[
i
*
4
:(
i
+
1
)
*
4
]
data
=
self
.
model
.
layer
.
add
()
try
:
from
caffe
import
layers
as
L
data
.
CopyFrom
(
L
.
Input
(
input_param
=
dict
(
shape
=
dict
(
dim
=
[
dims
[
0
],
dims
[
1
],
dims
[
2
],
dims
[
3
]
]))).
to_proto
().
layer
[
0
])
except
:
raise
ImportError
(
'You must install the caffe first when you use old style prototxt.'
)
data
.
name
=
self
.
model
.
input
[
i
]
data
.
top
[
0
]
=
self
.
model
.
input
[
i
]
else
:
for
i
in
range
(
inputs_num
):
dims
=
self
.
model
.
input_shape
[
i
].
dim
[
0
:
4
]
data
=
self
.
model
.
layer
.
add
()
try
:
from
caffe
import
layers
as
L
data
.
CopyFrom
(
L
.
Input
(
input_param
=
dict
(
shape
=
dict
(
dim
=
[
dims
[
0
],
dims
[
1
],
dims
[
2
],
dims
[
3
]
]))).
to_proto
().
layer
[
0
])
except
:
raise
ImportError
(
'You must install the caffe first when you use old style prototxt.'
)
data
.
name
=
self
.
model
.
input
[
i
]
data
.
top
[
0
]
=
self
.
model
.
input
[
i
]
layers
=
[
data
]
+
layers
top_layer
=
{}
for
layer
in
layers
:
...
...
x2paddle/decoder/caffe_shape.py
浏览文件 @
13320626
...
...
@@ -110,7 +110,7 @@ def get_strided_kernel_output_shape(params, input_shape, round_func):
round_func
)
has_c_o
=
hasattr
(
params
,
'num_output'
)
c
=
params
.
num_output
if
has_c_o
else
input_shape
[
1
]
return
[[
input_shape
[
0
],
c
,
o_h
,
o_w
]]
...
...
@@ -169,6 +169,7 @@ def shape_softmax(layer, input_shape):
def
shape_input
(
layer
,
input_shape
):
return
[
list
(
layer
.
input_param
.
shape
[
0
].
dim
)]
def
shape_concat
(
layer
,
input_shape
):
params
=
layer
.
concat_param
axis
=
params
.
axis
...
...
@@ -178,4 +179,277 @@ def shape_concat(layer, input_shape):
output_shape
=
shape
else
:
output_shape
[
axis
]
+=
shape
[
axis
]
return
[
output_shape
]
\ No newline at end of file
return
[
output_shape
]
def
shape_slice
(
layer
,
input_shape
):
inshape
=
input_shape
[
0
]
params
=
layer
.
slice_param
axis
=
params
.
axis
count
=
inshape
[
axis
]
points
=
list
(
params
.
slice_point
)
points
=
[
0
]
+
points
+
[
count
]
output_shape
=
[]
for
i
in
range
(
len
(
points
)):
shape
=
inshape
size
=
points
[
i
+
1
]
-
points
[
i
]
shape
[
axis
]
=
size
output_shape
.
append
(
shape
)
if
i
==
len
(
points
)
-
2
:
break
return
output_shape
def
shape_prelu
(
layer
,
input_shape
):
return
input_shape
def
shape_sigmoid
(
layer
,
input_shape
):
return
input_shape
def
shape_absval
(
layer
,
input_shape
):
return
input_shape
def
shape_accuracy
(
layer
,
input_shape
):
return
[[
1
]]
def
shape_tanh
(
layer
,
input_shape
):
return
input_shape
def
shape_eltwise
(
layer
,
input_shape
):
return
[
input_shape
[
0
]]
def
shape_batchnorm
(
layer
,
input_shape
):
return
input_shape
def
shape_scale
(
layer
,
input_shape
):
return
input_shape
def
shape_reshape
(
layer
,
input_shape
):
def
count
(
num_list
):
return
reduce
(
lambda
a
,
b
:
a
*
b
,
num_list
)
inshape
=
input_shape
[
0
]
params
=
layer
.
reshape_param
axis
=
params
.
axis
if
hasattr
(
params
,
axis
)
else
0
num_axes
=
params
.
num_axes
if
hasattr
(
params
,
num_axes
)
else
-
1
if
inshape
[
0
]
==
-
1
:
inshape
[
0
]
=
1
input_count
=
count
(
inshape
)
input_num_axes
=
len
(
inshape
)
input_start_axis
=
axis
start_axis
=
input_start_axis
if
input_start_axis
>=
0
\
else
input_num_axes
+
input_start_axis
+
1
assert
start_axis
>=
0
,
"[Reshape]axis %d out of range"
%
(
input_start_axis
)
assert
start_axis
<=
input_num_axes
,
"[Reshape]axis %d out of range for %d-D input data"
\
%
(
input_start_axis
,
input_num_axes
)
assert
num_axes
>=
-
1
,
"[Reshape]num_axes must be >= 0, or -1 for all"
end_axis
=
input_num_axes
if
num_axes
==
-
1
else
start_axis
+
num_axes
assert
end_axis
<=
input_num_axes
,
"end_axis[%d] = axis[%d] + num_axes[%d] is out of range"
\
%
(
end_axis
,
start_axis
,
num_axes
)
num_axes_replaced
=
end_axis
-
start_axis
num_axes_retained
=
input_num_axes
-
num_axes_replaced
num_new_axes
=
len
(
shape
[
'dim'
])
outshape
=
[]
for
i
in
range
(
start_axis
):
outshape
.
append
(
inshape
[
i
])
for
i
in
range
(
num_new_axes
):
outshape
.
append
(
shape
[
'dim'
][
i
])
for
i
in
range
(
end_axis
,
input_num_axes
):
outshape
.
append
(
inshape
[
i
])
assert
len
(
outshape
)
==
num_axes_retained
+
num_new_axes
,
\
"[Reshape]invalid dims of output shape[%s]"
%
(
str
(
outshape
))
inferred_axis
=
-
1
copy_axes
=
[]
constant_count
=
1
for
i
in
range
(
num_new_axes
):
top_dim
=
shape
[
'dim'
][
i
]
if
top_dim
==
0
:
copy_axes
.
append
(
i
)
copy_axis_index
=
start_axis
+
i
outshape
[
copy_axis_index
]
=
inshape
[
copy_axis_index
]
elif
top_dim
==
-
1
:
assert
inferred_axis
==
-
1
,
"[Reshape]new shape contains multiple -1 dims"
inferred_axis
=
i
else
:
constant_count
*=
top_dim
if
inferred_axis
>=
0
:
explicit_count
=
constant_count
l
=
inshape
[
0
:
start_axis
]
if
len
(
l
)
>
0
:
explicit_count
*=
count
(
l
)
l
=
inshape
[
end_axis
:]
if
len
(
l
)
>
0
:
explicit_count
*=
count
(
l
)
for
i
in
range
(
len
(
copy_axes
)):
explicit_count
*=
outshape
[
start_axis
+
copy_axes
[
i
]]
assert
input_count
%
explicit_count
==
0
,
"[Reshape]botom count[%d] "
\
"must be divisible by product of the specified dimensions[%d] "
\
%
(
input_count
,
explicit_count
)
outshape
[
start_axis
+
inferred_axis
]
=
input_count
/
explicit_count
output_count
=
count
(
outshape
)
assert
output_count
==
input_count
,
"[Reshape]output count[%d] must match input count[%d]"
%
(
output_count
,
input_count
)
if
inshape
[
0
]
==
-
1
:
outshape
[
0
]
=
-
1
return
[
outshape
]
def
shape_argmax
(
layer
,
input_shape
):
inshape
=
input_shape
[
0
]
params
=
layer
.
argmax_param
out_max_val
=
params
.
out_max_val
if
hasattr
(
params
,
out_max_val
)
else
False
top_k
=
params
.
top_k
if
hasattr
(
params
,
top_k
)
else
1
axis
=
parmas
.
axis
if
hasattr
(
params
,
axis
)
else
-
1
if
axis
<
0
:
axis
+=
len
(
inshape
)
assert
(
axis
+
1
==
len
(
inshape
)
),
'only can be applied on the last dimension[axis:%d, %s] now,'
\
'make sure you have set axis param in xxx.prototxt file'
\
%
(
axis
,
str
(
inshape
))
outshape
=
inshape
outshape
[
-
1
]
=
top_k
if
out_max_val
is
True
:
outshape
[
-
1
]
*=
2
return
[
outshape
]
def
shape_axpy
(
layer
,
input_shape
):
assert
len
(
input_shapes
)
==
3
,
"not valid input shape for axpy layer"
assert
len
(
input_shapes
[
0
])
==
len
(
input_shapes
[
1
]),
'should have same dims'
output_shape
=
input_shapes
[
1
]
assert
(
input_shapes
[
2
]
==
output_shape
),
\
"shape not consistent for axpy[%s <--> %s]"
\
%
(
str
(
output_shape
),
str
(
input_shapes
[
2
]))
return
[
output_shape
]
def
shape_crop
(
layer
,
input_shape
):
assert
len
(
input_shape
)
==
2
,
"the number of crop's inputs must be 2"
return
[
input_shape
[
1
]]
def
shape_detectionoutput
(
layer
,
input_shape
):
return
[[
-
1
,
6
]]
def
shape_flatten
(
layer
,
input_shape
):
assert
len
(
input_shape
)
==
1
,
"the number of flatten's inputs must be 1"
params
=
layer
.
flatten_param
start_axis
=
params
.
axis
end_axis
=
params
.
end_axis
if
start_axis
<
0
:
start_axis
+=
len
(
input_shape
[
0
])
if
end_axis
<
0
:
end_axis
+=
len
(
input_shape
[
0
])
+
1
assert
start_axis
<=
end_axis
,
'invalid axis[%d] or end_axis[%d] params'
\
%
(
start_axis
,
end_axis
)
output_shape
=
[
0
]
*
(
start_axis
-
0
)
+
[
-
1
]
+
[
0
]
*
(
len
(
input_shape
[
0
])
-
end_axis
)
return
[
output_shape
]
def
shape_normalize
(
layer
,
input_shape
):
return
input_shape
def
shape_permute
(
layer
,
input_shape
):
params
=
layer
.
permute_param
order
=
list
(
params
.
order
)
inshape
=
input_shape
[
0
]
output_shape
=
[]
for
ii
in
order
:
assert
ii
<
len
(
inshape
),
"invalid order for permute[%s]"
%
(
name
)
output_shape
.
append
(
inshape
[
ii
])
return
[
output_shape
]
def
shape_power
(
layer
,
input_shape
):
return
input_shape
def
shape_priorbox
(
layer
,
input_shape
):
params
=
layer
.
prior_box_param
min_size
=
list
(
params
.
min_size
)
max_size
=
list
(
params
.
max_size
)
aspect_ratio
=
list
(
params
.
aspect_ratio
)
assert
len
(
input_shapes
[
0
])
==
2
,
"invalid inputs for Priorbox[%s]"
%
(
name
)
fc_shape
=
input_shapes
[
0
][
0
]
N
=
1
if
not
max_size
==
None
:
N
+=
1
if
not
aspect_ratio
==
None
:
N
+=
2
*
len
(
aspect_ratio
)
N_bbx
=
fc_shape
[
2
]
*
fc_shape
[
3
]
*
N
output_shape
=
[[
1
,
2
,
4
*
N_bbx
]]
return
output_shape
def
shape_reduction
(
layer
,
input_shape
):
params
=
layer
.
reduction_param
axis
=
params
.
axis
if
axis
<
0
:
axis
+=
len
(
input_shape
[
0
])
+
1
assert
axis
<=
len
(
input_shape
[
0
]),
'invalid axis[%d] error'
%
(
axis
)
return
[
input_shape
[
0
:
axis
]]
def
shape_roipooling
(
layer
,
input_shape
):
params
=
layer
.
roi_pooling_param
pooled_w
=
params
.
pooled_w
pooled_h
=
params
.
pooled_h
spatial_scale
=
params
.
spatial_scale
assert
len
(
input_shapes
[
0
])
==
2
,
"not valid input shape for roipooling layer"
base_fea_shape
=
input_shapes
[
0
][
0
]
rois_shape
=
input_shapes
[
0
][
1
]
output_shape
=
base_fea_shape
output_shape
[
0
]
=
rois_shape
[
0
]
output_shape
[
2
]
=
pooled_h
output_shape
[
3
]
=
pooled_w
return
[
output_shape
]
def
shape_select
(
layer
,
input_shape
):
input_shape
=
list
(
input_shape
[
0
])
params
=
layer
.
select_param
axis
=
params
.
axis
slice_point
=
list
(
params
.
slice_point
)
start
=
slice_point
[
0
]
if
len
(
slice_point
)
==
2
:
end
=
slice_point
[
1
]
else
:
end
=
input_shape
[
axis
]
assert
end
>
start
,
"invalid slice_point with [start:%d, end:%d]"
\
%
(
start
,
end
)
output_shape
=
input_shape
output_shape
[
axis
]
=
end
-
start
return
[
output_shape
]
x2paddle/op_mapper/caffe_op_mapper.py
浏览文件 @
13320626
此差异已折叠。
点击以展开。
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录