Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
PaddlePaddle
X2Paddle
提交
f0dede1f
X
X2Paddle
项目概览
PaddlePaddle
/
X2Paddle
大约 1 年 前同步成功
通知
328
Star
698
Fork
167
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
26
列表
看板
标记
里程碑
合并请求
4
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
X
X2Paddle
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
26
Issue
26
列表
看板
标记
里程碑
合并请求
4
合并请求
4
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
提交
f0dede1f
编写于
3月 28, 2019
作者:
M
Macrobull
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
rename onnx2paddle to onnx2fluid
上级
b483d12e
变更
20
展开全部
隐藏空白更改
内联
并排
Showing
20 changed file
with
4323 addition
and
1582 deletion
+4323
-1582
onnx2fluid/.gitignore
onnx2fluid/.gitignore
+1
-0
onnx2fluid/README.md
onnx2fluid/README.md
+7
-0
onnx2fluid/examples/convert_data_npz_0.py
onnx2fluid/examples/convert_data_npz_0.py
+1
-1
onnx2fluid/examples/convert_data_pb_0.py
onnx2fluid/examples/convert_data_pb_0.py
+0
-0
onnx2fluid/examples/gen_some_samples.py
onnx2fluid/examples/gen_some_samples.py
+26
-27
onnx2fluid/examples/onnx_model_zoo.sh
onnx2fluid/examples/onnx_model_zoo.sh
+35
-35
onnx2fluid/onnx2fluid/__init__.py
onnx2fluid/onnx2fluid/__init__.py
+0
-0
onnx2fluid/onnx2fluid/__main__.py
onnx2fluid/onnx2fluid/__main__.py
+93
-0
onnx2fluid/onnx2fluid/cmdline.py
onnx2fluid/onnx2fluid/cmdline.py
+42
-37
onnx2fluid/onnx2fluid/conversion.py
onnx2fluid/onnx2fluid/conversion.py
+134
-71
onnx2fluid/onnx2fluid/framework_pb2.py
onnx2fluid/onnx2fluid/framework_pb2.py
+1634
-0
onnx2fluid/onnx2fluid/onnx_utils.py
onnx2fluid/onnx2fluid/onnx_utils.py
+107
-86
onnx2fluid/onnx2fluid/symbolic.py
onnx2fluid/onnx2fluid/symbolic.py
+2041
-0
onnx2fluid/onnx2fluid/torch_export_helper.py
onnx2fluid/onnx2fluid/torch_export_helper.py
+19
-13
onnx2fluid/onnx2fluid/validation.py
onnx2fluid/onnx2fluid/validation.py
+72
-59
onnx2fluid/onnx2fluid/writer.py
onnx2fluid/onnx2fluid/writer.py
+102
-78
onnx2fluid/requirements.txt
onnx2fluid/requirements.txt
+1
-1
onnx2fluid/setup.cfg
onnx2fluid/setup.cfg
+8
-8
onnx2fluid/setup.py
onnx2fluid/setup.py
+0
-1
onnx2paddle/onnx2paddle/framework_pb2.py
onnx2paddle/onnx2paddle/framework_pb2.py
+0
-1165
未找到文件。
onnx2
paddle
/.gitignore
→
onnx2
fluid
/.gitignore
浏览文件 @
f0dede1f
...
...
@@ -57,3 +57,4 @@ coverage.xml
/examples/*.aria2
/examples/*.onnx
/examples/*.np?
**/.*
onnx2
paddle
/README.md
→
onnx2
fluid
/README.md
浏览文件 @
f0dede1f
Onnx2
paddle
Onnx2
Fluid
===
Inference model conversion from ONNX/PyTorch to Paddle
Inference model conversion from ONNX/PyTorch to Paddle
fluid
快速开始
---
...
...
onnx2
paddle
/examples/convert_data_npz_0.py
→
onnx2
fluid
/examples/convert_data_npz_0.py
浏览文件 @
f0dede1f
...
...
@@ -22,4 +22,4 @@ output_data = data['outputs']
inputs
=
Dict
(
zip
(
input_names
,
[
input_data
]))
outputs
=
Dict
(
zip
(
output_name
,
[
output_data
]))
np
.
savez
(
fn
,
inputs
=
inputs
,
outputs
=
outputs
)
# overwrite
np
.
savez
(
fn
,
inputs
=
inputs
,
outputs
=
outputs
)
# overwrite
onnx2
paddle
/examples/convert_data_pb_0.py
→
onnx2
fluid
/examples/convert_data_pb_0.py
浏览文件 @
f0dede1f
文件已移动
onnx2
paddle
/examples/gen_some_samples.py
→
onnx2
fluid
/examples/gen_some_samples.py
浏览文件 @
f0dede1f
...
...
@@ -6,7 +6,7 @@ Created on Fri Mar 22 11:19:45 2019
@author: Macrobull
Not all ops in this file are supported by both Pytorch and ONNX
This only demostrates the conversion/validation workflow from Pytorch to ONNX to Paddle
This only demostrates the conversion/validation workflow from Pytorch to ONNX to Paddle
fluid
"""
...
...
@@ -16,12 +16,10 @@ import torch
import
torch.nn
as
nn
import
torch.nn.functional
as
F
from
onnx2paddle.torch_export_helper
import
export_onnx_with_validation
from
onnx2fluid.torch_export_helper
import
export_onnx_with_validation
idx
=
0
######### example: RNN ########
#
#class Model(nn.Module):
...
...
@@ -44,7 +42,6 @@ idx = 0
# ['x'], ['y'],
# verbose=True, training=False)
######### example: random ########
#
#class Model(nn.Module):
...
...
@@ -66,9 +63,9 @@ idx = 0
# ['x'], ['y'],
# verbose=True, training=False)
######## example: fc ########
class
Model
(
nn
.
Module
):
def
__init__
(
self
):
super
(
Model
,
self
).
__init__
()
...
...
@@ -85,13 +82,12 @@ xb = torch.rand((2, 3))
yp
=
model
(
xb
)
idx
+=
1
print
(
'index: '
,
idx
)
export_onnx_with_validation
(
model
,
(
xb
,
),
't'
+
str
(
idx
),
[
'x'
],
[
'y'
],
verbose
=
True
,
training
=
False
)
export_onnx_with_validation
(
model
,
(
xb
,
),
't'
+
str
(
idx
),
[
'x'
],
[
'y'
],
verbose
=
True
,
training
=
False
)
######## example: compare ########
class
Model
(
nn
.
Module
):
def
__init__
(
self
):
super
(
Model
,
self
).
__init__
()
...
...
@@ -110,12 +106,15 @@ xb1 = torch.rand((2, 3))
ya
,
yb
,
yc
=
model
(
xb0
,
xb1
)
idx
+=
1
print
(
'index: '
,
idx
)
export_onnx_with_validation
(
model
,
(
xb0
,
xb1
),
't'
+
str
(
idx
),
[
'x0'
,
'x1'
],
[
'ya'
,
'yb'
,
'yc'
],
verbose
=
True
,
training
=
False
)
export_onnx_with_validation
(
model
,
(
xb0
,
xb1
),
't'
+
str
(
idx
),
[
'x0'
,
'x1'
],
[
'ya'
,
'yb'
,
'yc'
],
verbose
=
True
,
training
=
False
)
######## example: affine_grid ########
class
Model
(
nn
.
Module
):
def
__init__
(
self
):
super
(
Model
,
self
).
__init__
()
...
...
@@ -130,13 +129,15 @@ theta = torch.rand((2, 2, 3))
grid
=
model
(
theta
)
idx
+=
1
print
(
'index: '
,
idx
)
export_onnx_with_validation
(
model
,
(
theta
,
),
't'
+
str
(
idx
),
[
'theta'
],
[
'grid'
],
verbose
=
True
,
training
=
False
)
export_onnx_with_validation
(
model
,
(
theta
,
),
't'
+
str
(
idx
),
[
'theta'
],
[
'grid'
],
verbose
=
True
,
training
=
False
)
######## example: conv2d_transpose ########
class
Model
(
nn
.
Module
):
def
__init__
(
self
):
super
(
Model
,
self
).
__init__
()
...
...
@@ -155,12 +156,12 @@ xb = torch.rand((2, 3, 4, 5))
yp
=
model
(
xb
)
idx
+=
1
print
(
'index: '
,
idx
)
export_onnx_with_validation
(
model
,
(
xb
,
),
't'
+
str
(
idx
),
[
'x'
],
[
'y'
],
verbose
=
True
,
training
=
False
)
export_onnx_with_validation
(
model
,
(
xb
,
),
't'
+
str
(
idx
),
[
'x'
],
[
'y'
],
verbose
=
True
,
training
=
False
)
######## example: conv2d ########
class
Model
(
nn
.
Module
):
def
__init__
(
self
):
super
(
Model
,
self
).
__init__
()
...
...
@@ -181,10 +182,8 @@ xb = torch.rand((2, 3, 4, 5))
yp
=
model
(
xb
)
idx
+=
1
print
(
'index: '
,
idx
)
export_onnx_with_validation
(
model
,
(
xb
,
),
't'
+
str
(
idx
),
[
'x'
],
[
'y'
],
verbose
=
True
,
training
=
False
)
export_onnx_with_validation
(
model
,
(
xb
,
),
't'
+
str
(
idx
),
[
'x'
],
[
'y'
],
verbose
=
True
,
training
=
False
)
######### example: conv1d ########
#
...
...
@@ -210,6 +209,7 @@ export_onnx_with_validation(model, (xb, ), 't' + str(idx),
######## example: empty ########
class
Model
(
nn
.
Module
):
def
__init__
(
self
):
super
(
Model
,
self
).
__init__
()
...
...
@@ -223,6 +223,5 @@ xb = torch.rand((2, 3))
yp
=
model
(
xb
)
idx
+=
1
print
(
'index: '
,
idx
)
export_onnx_with_validation
(
model
,
(
xb
,
),
't'
+
str
(
idx
),
[
'y'
],
[
'y'
],
verbose
=
True
,
training
=
False
)
export_onnx_with_validation
(
model
,
(
xb
,
),
't'
+
str
(
idx
),
[
'y'
],
[
'y'
],
verbose
=
True
,
training
=
False
)
onnx2
paddle
/examples/onnx_model_zoo.sh
→
onnx2
fluid
/examples/onnx_model_zoo.sh
浏览文件 @
f0dede1f
#! /usr/bin/env sh
get_url
=
"
proxychains4
aria2c -c -s8 -x8"
get_url
=
"aria2c -c -s8 -x8"
base_url
=
"https://s3.amazonaws.com/download.onnx/models/opset_9/"
flags
=
"-
d
e -o /tmp/export/"
flags
=
"-e -o /tmp/export/"
bvlc_alexnet
()
{
...
...
@@ -18,13 +18,13 @@ bvlc_alexnet()
do
echo
"converting
$npz
..."
python convert_data_npz_0.py
"
$npz
"
"data_0"
"prob_1"
python
-m
onnx2
paddle
$flags
"
$fn_model
"
-t
$npz
python
-m
onnx2
fluid
$flags
"
$fn_model
"
-t
$npz
done
for
pb_dir
in
$bn_tar
/
*
/
do
echo
"converting
$pb_dir
..."
python convert_data_pb_0.py
"
$pb_dir
"
"data_0"
"prob_1"
python
-m
onnx2
paddle
$flags
"
$fn_model
"
-t
echo
$(
dirname
"
$pb_dir
/x"
)
.npz
python
-m
onnx2
fluid
$flags
"
$fn_model
"
-t
$(
dirname
"
$pb_dir
/x"
)
.npz
done
}
...
...
@@ -42,7 +42,7 @@ bvlc_googlenet()
do
echo
"converting
$pb_dir
"
python convert_data_pb_0.py
"
$pb_dir
"
"data_0"
"prob_1"
python
-m
onnx2
paddle
$flags
"
$fn_model
"
-t
$(
dirname
"
$pb_dir
/x"
)
.npz
python
-m
onnx2
fluid
$flags
"
$fn_model
"
-t
$(
dirname
"
$pb_dir
/x"
)
.npz
done
}
...
...
@@ -60,7 +60,7 @@ bvlc_reference_caffenet()
do
echo
"converting
$pb_dir
"
python convert_data_pb_0.py
"
$pb_dir
"
"data_0"
"prob_1"
python
-m
onnx2
paddle
$flags
"
$fn_model
"
-t
$(
dirname
"
$pb_dir
/x"
)
.npz
python
-m
onnx2
fluid
$flags
"
$fn_model
"
-t
$(
dirname
"
$pb_dir
/x"
)
.npz
done
}
...
...
@@ -69,7 +69,7 @@ bvlc_reference_rcnn_ilsvrc13()
bn_tar
=
"bvlc_reference_rcnn_ilsvrc13"
fn_tar
=
"
$bn_tar
.tar.gz"
fn_model
=
"
$bn_tar
/model.onnx"
$get_url
"
$base_url$fn_tar
"
echo
"extracting ..."
tar
xf
"
$fn_tar
"
...
...
@@ -77,8 +77,8 @@ bvlc_reference_rcnn_ilsvrc13()
for
pb_dir
in
$bn_tar
/
*
/
do
echo
"converting
$pb_dir
"
python convert_data_pb_0.py
"
$pb_dir
"
"data_0"
"
softmaxout
_1"
python
-m
onnx2
paddle
$flags
"
$fn_model
"
-t
$(
dirname
"
$pb_dir
/x"
)
.npz
python convert_data_pb_0.py
"
$pb_dir
"
"data_0"
"
fc_rcnn
_1"
python
-m
onnx2
fluid
$flags
"
$fn_model
"
-t
$(
dirname
"
$pb_dir
/x"
)
.npz
done
}
...
...
@@ -87,7 +87,7 @@ inception_v1()
bn_tar
=
"inception_v1"
fn_tar
=
"
$bn_tar
.tar.gz"
fn_model
=
"
$bn_tar
/model.onnx"
$get_url
"
$base_url$fn_tar
"
echo
"extracting ..."
tar
xf
"
$fn_tar
"
...
...
@@ -96,14 +96,14 @@ inception_v1()
do
echo
"converting
$npz
..."
python convert_data_npz_0.py
"
$npz
"
"data_0"
"prob_1"
python
-m
onnx2
paddle
$flags
"
$fn_model
"
-t
$npz
python
-m
onnx2
fluid
$flags
"
$fn_model
"
-t
$npz
done
for
pb_dir
in
$bn_tar
/
*
/
do
echo
"converting
$pb_dir
"
python convert_data_pb_0.py
"
$pb_dir
"
"data_0"
"prob_1"
python
-m
onnx2
paddle
$flags
"
$fn_model
"
-t
$(
dirname
"
$pb_dir
/x"
)
.npz
python
-m
onnx2
fluid
$flags
"
$fn_model
"
-t
$(
dirname
"
$pb_dir
/x"
)
.npz
done
}
...
...
@@ -112,7 +112,7 @@ inception_v2()
bn_tar
=
"inception_v2"
fn_tar
=
"
$bn_tar
.tar.gz"
fn_model
=
"
$bn_tar
/model.onnx"
$get_url
"
$base_url$fn_tar
"
echo
"extracting ..."
tar
xf
"
$fn_tar
"
...
...
@@ -121,14 +121,14 @@ inception_v2()
do
echo
"converting
$npz
..."
python convert_data_npz_0.py
"
$npz
"
"data_0"
"prob_1"
python
-m
onnx2
paddle
$flags
"
$fn_model
"
-t
$npz
python
-m
onnx2
fluid
$flags
"
$fn_model
"
-t
$npz
done
for
pb_dir
in
$bn_tar
/
*
/
do
echo
"converting
$pb_dir
"
python convert_data_pb_0.py
"
$pb_dir
"
"data_0"
"prob_1"
python
-m
onnx2
paddle
$flags
"
$fn_model
"
-t
$(
dirname
"
$pb_dir
/x"
)
.npz
python
-m
onnx2
fluid
$flags
"
$fn_model
"
-t
$(
dirname
"
$pb_dir
/x"
)
.npz
done
}
...
...
@@ -137,7 +137,7 @@ resnet50()
bn_tar
=
"resnet50"
fn_tar
=
"
$bn_tar
.tar.gz"
fn_model
=
"
$bn_tar
/model.onnx"
$get_url
"
$base_url$fn_tar
"
echo
"extracting ..."
tar
xf
"
$fn_tar
"
...
...
@@ -146,14 +146,14 @@ resnet50()
do
echo
"converting
$npz
..."
python convert_data_npz_0.py
"
$npz
"
"gpu_0/data_0"
"gpu_0/softmaxout_1"
python
-m
onnx2
paddle
$flags
"
$fn_model
"
-t
$npz
python
-m
onnx2
fluid
$flags
"
$fn_model
"
-t
$npz
done
for
pb_dir
in
$bn_tar
/
*
/
do
echo
"converting
$pb_dir
"
python convert_data_pb_0.py
"
$pb_dir
"
"gpu_0/data_0"
"gpu_0/softmaxout_1"
python
-m
onnx2
paddle
$flags
"
$fn_model
"
-t
$(
dirname
"
$pb_dir
/x"
)
.npz
python
-m
onnx2
fluid
$flags
"
$fn_model
"
-t
$(
dirname
"
$pb_dir
/x"
)
.npz
done
}
...
...
@@ -162,7 +162,7 @@ shufflenet()
bn_tar
=
"shufflenet"
fn_tar
=
"
$bn_tar
.tar.gz"
fn_model
=
"
$bn_tar
/model.onnx"
$get_url
"
$base_url$fn_tar
"
echo
"extracting ..."
tar
xf
"
$fn_tar
"
...
...
@@ -171,7 +171,7 @@ shufflenet()
do
echo
"converting
$pb_dir
"
python convert_data_pb_0.py
"
$pb_dir
"
"gpu_0/data_0"
"gpu_0/softmaxout_1"
python
-m
onnx2
paddle
$flags
"
$fn_model
"
-t
$(
dirname
"
$pb_dir
/x"
)
.npz
python
-m
onnx2
fluid
$flags
"
$fn_model
"
-t
$(
dirname
"
$pb_dir
/x"
)
.npz
done
}
...
...
@@ -180,7 +180,7 @@ squeezenet()
bn_tar
=
"squeezenet"
fn_tar
=
"
$bn_tar
.tar.gz"
fn_model
=
"
$bn_tar
/model.onnx"
$get_url
"
$base_url$fn_tar
"
echo
"extracting ..."
tar
xf
"
$fn_tar
"
...
...
@@ -189,7 +189,7 @@ squeezenet()
do
echo
"converting
$pb_dir
"
python convert_data_pb_0.py
"
$pb_dir
"
"data_0"
"softmaxout_1"
python
-m
onnx2
paddle
$flags
"
$fn_model
"
-t
$(
dirname
"
$pb_dir
/x"
)
.npz
python
-m
onnx2
fluid
$flags
"
$fn_model
"
-t
$(
dirname
"
$pb_dir
/x"
)
.npz
done
}
...
...
@@ -198,7 +198,7 @@ tiny_yolov2()
bn_tar
=
"tiny_yolov2"
fn_tar
=
"
$bn_tar
.tar.gz"
fn_model
=
"
$bn_tar
/model.onnx"
$get_url
"https://onnxzoo.blob.core.windows.net/models/opset_8/tiny_yolov2/
$fn_tar
"
echo
"extracting ..."
tar
xf
"
$fn_tar
"
...
...
@@ -207,7 +207,7 @@ tiny_yolov2()
do
echo
"converting
$pb_dir
"
python convert_data_pb_0.py
"
$pb_dir
"
"image"
"grid"
python
-m
onnx2
paddle
$flags
"
$fn_model
"
-t
$(
dirname
"
$pb_dir
/x"
)
.npz
-x
python
-m
onnx2
fluid
$flags
"
$fn_model
"
-t
$(
dirname
"
$pb_dir
/x"
)
.npz
-x
done
}
...
...
@@ -216,7 +216,7 @@ vgg19()
bn_tar
=
"vgg19"
fn_tar
=
"
$bn_tar
.tar.gz"
fn_model
=
"
$bn_tar
/model.onnx"
$get_url
"
$base_url$fn_tar
"
echo
"extracting ..."
tar
xf
"
$fn_tar
"
...
...
@@ -225,7 +225,7 @@ vgg19()
do
echo
"converting
$pb_dir
"
python convert_data_pb_0.py
"
$pb_dir
"
"data_0"
"prob_1"
python
-m
onnx2
paddle
$flags
"
$fn_model
"
-t
$(
dirname
"
$pb_dir
/x"
)
.npz
python
-m
onnx2
fluid
$flags
"
$fn_model
"
-t
$(
dirname
"
$pb_dir
/x"
)
.npz
done
}
...
...
@@ -234,7 +234,7 @@ zfnet512()
bn_tar
=
"zfnet512"
fn_tar
=
"
$bn_tar
.tar.gz"
fn_model
=
"
$bn_tar
/model.onnx"
$get_url
"
$base_url$fn_tar
"
echo
"extracting ..."
tar
xf
"
$fn_tar
"
...
...
@@ -243,20 +243,20 @@ zfnet512()
do
echo
"converting
$pb_dir
"
python convert_data_pb_0.py
"
$pb_dir
"
"gpu_0/data_0"
"gpu_0/softmax_1"
python
-m
onnx2
paddle
$flags
"
$fn_model
"
-t
$(
dirname
"
$pb_dir
/x"
)
.npz
python
-m
onnx2
fluid
$flags
"
$fn_model
"
-t
$(
dirname
"
$pb_dir
/x"
)
.npz
done
}
bvlc_alexnet
# data error
bvlc_googlenet
# desc error
bvlc_alexnet
bvlc_googlenet
bvlc_reference_caffenet
bvlc_reference_rcnn_ilsvrc13
inception_v1
###
inception_v2
###
resnet50
# data error
shufflenet
###
inception_v1
inception_v2
resnet50
shufflenet
squeezenet
tiny_yolov2
# not supported
vgg19
zfnet512
# data error
zfnet512
onnx2
paddle/onnx2paddle
/__init__.py
→
onnx2
fluid/onnx2fluid
/__init__.py
浏览文件 @
f0dede1f
文件已移动
onnx2
paddle/onnx2paddle
/__main__.py
→
onnx2
fluid/onnx2fluid
/__main__.py
浏览文件 @
f0dede1f
...
...
@@ -5,7 +5,7 @@
#
################################################################################
"""
本文件允许模块包以python -m onnx2
paddle
方式直接执行。
本文件允许模块包以python -m onnx2
fluid
方式直接执行。
Authors: Macrobull
Date: 2019/02/22 10:25:46
...
...
@@ -21,43 +21,67 @@ import argparse
import
logging
import
sys
parser
=
argparse
.
ArgumentParser
(
description
=
'onnx2paddle'
,
formatter_class
=
argparse
.
ArgumentDefaultsHelpFormatter
,
)
parser
.
add_argument
(
'model'
,
nargs
=
1
,
help
=
'path to model.onnx'
,
)
parser
.
add_argument
(
'--debug'
,
'-d'
,
action
=
'store_true'
,
help
=
'enable debug logging and checking'
,
)
parser
.
add_argument
(
'--output-dir'
,
'-o'
,
type
=
str
,
default
=
''
,
help
=
'output directory'
,
)
parser
.
add_argument
(
'--test_data'
,
'-t'
,
type
=
str
,
default
=
''
,
help
=
'I/O golden data for validation, e.g. test.npy, test.npz'
,
)
parser
.
add_argument
(
'--embed_params'
,
'-e'
,
action
=
'store_true'
,
help
=
'try to embed parameters for trainable Paddle layers'
,
)
parser
.
add_argument
(
'--pedantic'
,
action
=
'store_true'
,
default
=
True
,
help
=
'accept and convert only standard ONNX opset'
,
)
parser
.
add_argument
(
'--no-pedantic'
,
'-x'
,
action
=
'store_false'
,
dest
=
'pedantic'
,
help
=
'process non-standard ONNX ops, this may lead to fails'
,
)
parser
.
add_argument
(
'--precision'
,
'-p'
,
type
=
int
,
default
=
4
,
help
=
'assertion decimal for validation'
,
)
parser
=
argparse
.
ArgumentParser
(
description
=
'onnx2fluid'
,
formatter_class
=
argparse
.
ArgumentDefaultsHelpFormatter
,
)
parser
.
add_argument
(
'model'
,
nargs
=
1
,
help
=
'path to model.onnx'
,
)
parser
.
add_argument
(
'--debug'
,
'-d'
,
action
=
'store_true'
,
help
=
'enable debug logging and checking'
,
)
parser
.
add_argument
(
'--output_dir'
,
'-o'
,
type
=
str
,
default
=
''
,
help
=
'output directory'
,
)
parser
.
add_argument
(
'--test_data'
,
'-t'
,
type
=
str
,
default
=
''
,
help
=
'I/O golden data for validation, e.g. test.npy, test.npz'
,
)
parser
.
add_argument
(
'--embed_params'
,
'-e'
,
action
=
'store_true'
,
help
=
'try to embed parameters for trainable Paddle fluid layers'
,
)
parser
.
add_argument
(
'--pedantic'
,
action
=
'store_true'
,
default
=
True
,
help
=
'accept and convert only standard ONNX opset'
,
)
parser
.
add_argument
(
'--no-pedantic'
,
'-x'
,
action
=
'store_false'
,
dest
=
'pedantic'
,
help
=
'process non-standard ONNX ops, this may lead to fails'
,
)
parser
.
add_argument
(
'--precision'
,
'-p'
,
type
=
int
,
default
=
4
,
help
=
'assertion decimal for validation'
,
)
args
=
parser
.
parse_args
()
logging_format
=
'[%(levelname)8s]%(name)s::%(funcName)s:%(lineno)04d: %(message)s'
logging_level
=
logging
.
DEBUG
if
args
.
debug
else
logging
.
INFO
logging
.
basicConfig
(
format
=
logging_format
,
level
=
logging_level
)
try
:
from
.
import
cmdline
except
ImportError
:
...
...
@@ -66,5 +90,4 @@ except ImportError:
# imports
main
=
cmdline
.
main
sys
.
exit
(
main
(
**
args
.
__dict__
))
onnx2
paddle/onnx2paddle
/cmdline.py
→
onnx2
fluid/onnx2fluid
/cmdline.py
浏览文件 @
f0dede1f
...
...
@@ -21,7 +21,6 @@ import logging
import
shutil
import
zipfile
__all__
=
[
'main'
,
]
...
...
@@ -42,7 +41,7 @@ def main(**kwargs):
# imports
convert
=
conversion
.
convert
logger
=
logging
.
getLogger
(
'onnx2
paddle
'
)
logger
=
logging
.
getLogger
(
'onnx2
fluid
'
)
debug
=
kwargs
.
get
(
'debug'
,
False
)
# prepare arguments
...
...
@@ -58,13 +57,15 @@ def main(**kwargs):
onnx_opset_pedantic
=
kwargs
.
get
(
'pedantic'
,
True
)
# convert
convert
(
filename
,
save_dir
,
model_basename
=
model_basename
,
model_func_name
=
model_func_name
,
embed_params
=
embed_params
,
onnx_opset_version
=
onnx_opset_version
,
onnx_opset_pedantic
=
onnx_opset_pedantic
,
debug
=
debug
)
convert
(
filename
,
save_dir
,
model_basename
=
model_basename
,
model_func_name
=
model_func_name
,
embed_params
=
embed_params
,
onnx_opset_version
=
onnx_opset_version
,
onnx_opset_pedantic
=
onnx_opset_pedantic
,
debug
=
debug
)
# validate
passed
=
True
...
...
@@ -80,21 +81,23 @@ def main(**kwargs):
# in fact fluid can not fully clear the context
# continuous validation may be inaccurate
precision
=
10
**
-
kwargs
.
get
(
'precision'
,
4
)
precision
=
10
**
-
kwargs
.
get
(
'precision'
,
4
)
logger
.
info
(
'starting validation on desc ...'
)
passed
&=
validate
(
shutil
.
os
.
path
.
join
(
save_dir
,
'__model__'
),
golden_data_filename
,
precision
=
precision
,
)
passed
&=
validate
(
shutil
.
os
.
path
.
join
(
save_dir
,
'__model__'
),
golden_data_filename
,
precision
=
precision
,
)
logger
.
info
(
'starting validation on code ...'
)
passed
&=
validate
(
shutil
.
os
.
path
.
join
(
save_dir
,
model_basename
),
golden_data_filename
,
model_func_name
=
model_func_name
,
precision
=
precision
,
save_inference_model
=
debug
,
# this overwrite desc file for test
)
passed
&=
validate
(
shutil
.
os
.
path
.
join
(
save_dir
,
model_basename
),
golden_data_filename
,
model_func_name
=
model_func_name
,
precision
=
precision
,
save_inference_model
=
debug
,
# this overwrite desc file for test
)
if
not
passed
:
logger
.
error
(
'validation failed, exit'
)
...
...
@@ -112,20 +115,22 @@ def main(**kwargs):
if
__name__
==
'__main__'
:
logging
.
basicConfig
(
format
=
'[%(levelname)8s]%(name)s::%(funcName)s:%(lineno)04d: %(message)s'
,
level
=
logging
.
DEBUG
,
)
# main(model=['../examples/t5.onnx'],
# output_dir='/tmp/export/',
# embed_params=False,
# pedantic=False,
# test_data='../examples/t5.npz',
# debug=True)
main
(
model
=
[
'../examples/shufflenet/model.onnx'
],
output_dir
=
'/tmp/export/'
,
embed_params
=
True
,
pedantic
=
False
,
test_data
=
'../examples/shufflenet/test_data_set_0.npz'
,
debug
=
True
)
format
=
'[%(levelname)8s]%(name)s::%(funcName)s:%(lineno)04d: %(message)s'
,
level
=
logging
.
DEBUG
,
)
# main(model=['../examples/t5.onnx'],
# output_dir='/tmp/export/',
# embed_params=False,
# pedantic=False,
# test_data='../examples/t5.npz',
# debug=True)
main
(
model
=
[
'../examples/inception_v2/model.onnx'
],
output_dir
=
'/tmp/export/'
,
embed_params
=
True
,
pedantic
=
False
,
test_data
=
'../examples/inception_v2/test_data_set_2.npz'
,
debug
=
True
)
onnx2
paddle/onnx2paddle
/conversion.py
→
onnx2
fluid/onnx2fluid
/conversion.py
浏览文件 @
f0dede1f
...
...
@@ -12,19 +12,21 @@ from __future__ import division
import
logging
import
shutil
__all__
=
[
'convert'
,
]
def
convert
(
onnx_model_filename
,
save_dir
,
model_basename
=
'model.py'
,
model_func_name
=
'inference'
,
def
convert
(
onnx_model_filename
,
save_dir
,
model_basename
=
'model.py'
,
model_func_name
=
'inference'
,
embed_params
=
False
,
onnx_opset_version
=
9
,
onnx_opset_pedantic
=
True
,
onnx_opset_version
=
9
,
onnx_opset_pedantic
=
True
,
debug
=
False
):
"""
convert an ONNX model to Paddle Python code and desc pb
convert an ONNX model to Paddle
fluid
Python code and desc pb
"""
import
onnx
...
...
@@ -59,10 +61,11 @@ def convert(onnx_model_filename, save_dir,
logger
.
info
(
'checking model ...'
)
check_model
(
onnx_model
)
logger
.
debug
(
'using opset version: %d'
,
onnx_opset_version
)
if
onnx_opset_pedantic
:
# WORKAROUND: RuntimeError: No Adapter For OP
if
onnx_opset_pedantic
:
# WORKAROUND: RuntimeError: No Adapter For OP
onnx_model
=
convert_version
(
onnx_model
,
onnx_opset_version
)
else
:
# TODO: add new argument for this option
logger
.
warning
(
'opset conversion skipped for onnx_opset_pedantic is OFF'
)
else
:
# TODO: add new argument for this option
logger
.
warning
(
'opset conversion skipped for onnx_opset_pedantic is OFF'
)
onnx_model
=
polish_model
(
onnx_model
)
except
ValidationError
as
e
:
if
onnx_opset_pedantic
:
...
...
@@ -90,13 +93,13 @@ def convert(onnx_model_filename, save_dir,
onnx
.
save
(
model
,
debug_model_filename
+
'.optimized_and_inffered.onnx'
)
# onnx.save(model, '/tmp/export/optimized_and_inffered.onnx')
# I/O instances
# I/O instances
onnx_graph
=
onnx_model
.
graph
paddle
_program
=
Program
()
paddle
_writer
=
Writer
()
fluid
_program
=
Program
()
fluid
_writer
=
Writer
()
# model components
# graph_name = onnx_graph.name
# graph_name = onnx_graph.name
graph_inputs
=
[
value
.
name
for
value
in
onnx_graph
.
input
]
graph_outputs
=
[
value
.
name
for
value
in
onnx_graph
.
output
]
graph_params
=
[]
...
...
@@ -107,29 +110,37 @@ def convert(onnx_model_filename, save_dir,
for
name
,
weight
in
graph_weights
(
onnx_graph
):
value_info
=
graph_value_infos
[
name
]
value_info
[
'embeded_as'
]
=
[]
value_info
[
'get_weight'
]
=
lambda
:
weight
.
tolist
()
# lazy getter
value_info
[
'get_weight'
]
=
(
lambda
w
:
lambda
:
w
.
tolist
())(
weight
)
# lazy getter
logger
.
info
(
'conversion started'
)
# op set conversion
# topo = 'backward' if embed_params else 'forward'
# topo = 'backward' if embed_params else 'forward'
topo
=
'forward'
for
name
,
domain
,
op_type
,
inputs
,
outputs
,
attrs
in
graph_ops
(
onnx_graph
,
topo
=
topo
):
for
name
,
domain
,
op_type
,
inputs
,
outputs
,
attrs
in
graph_ops
(
onnx_graph
,
topo
=
topo
):
logger
.
debug
(
'translating op %s %s::%s ...'
,
name
,
domain
,
op_type
)
if
domain
==
DEFAULT_OP_DOMAIN
:
domain
=
''
try
:
paddle_writer
.
emit_op
(
paddle_program
,
name
,
domain
,
op_type
,
inputs
,
outputs
,
attrs
,
graph_value_infos
,
embed_params
=
embed_params
,
)
fluid_writer
.
emit_op
(
fluid_program
,
name
,
domain
,
op_type
,
inputs
,
outputs
,
attrs
,
graph_value_infos
,
embed_params
=
embed_params
,
)
except
BaseException
as
e
:
logger
.
fatal
(
'conversion failed for:
\n\t
%s -> %s::%s -> %s'
,
inputs
,
domain
,
op_type
,
outputs
)
logger
.
fatal
(
'conversion failed for:
\n\t
%s -> %s::%s -> %s'
,
inputs
,
domain
,
op_type
,
outputs
)
raise
e
op_codes
=
paddle
_program
.
codes
paddle
_program
.
codes
=
[]
logger
.
info
(
'%d ops converted'
,
len
(
paddle
_program
.
op_descs
))
op_codes
=
fluid
_program
.
codes
fluid
_program
.
codes
=
[]
logger
.
info
(
'%d ops converted'
,
len
(
fluid
_program
.
op_descs
))
# weight writer
for
name
,
weight
in
graph_weights
(
onnx_graph
):
...
...
@@ -138,18 +149,24 @@ def convert(onnx_model_filename, save_dir,
var_names
=
value_info
.
get
(
'embeded_as'
,
[])
if
var_names
:
if
len
(
var_names
)
>
1
:
logger
.
info
(
'weight %s is shared between ops, more disk space will be consumed'
,
name
)
logger
.
debug
(
'saving weight %s with size of %d, in %d bytes, as %s ...'
,
name
,
weight
.
size
,
weight
.
nbytes
,
var_names
)
for
var_name
in
var_names
:
# multiple references
paddle_writer
.
write_weight
(
weight
,
shutil
.
os
.
path
.
join
(
save_dir
,
var_name
))
logger
.
info
(
'weight %s is shared between ops, more disk space will be consumed'
,
name
)
logger
.
debug
(
'saving weight %s with size of %d, in %d bytes, as %s ...'
,
name
,
weight
.
size
,
weight
.
nbytes
,
var_names
)
for
var_name
in
var_names
:
# multiple references
fluid_writer
.
write_weight
(
weight
,
shutil
.
os
.
path
.
join
(
save_dir
,
var_name
))
else
:
logger
.
debug
(
'saving weight %s with size of %d, in %d bytes, to %s ...'
,
name
,
weight
.
size
,
weight
.
nbytes
,
make_var_name
(
name
))
paddle_writer
.
write_weight
(
weight
,
shutil
.
os
.
path
.
join
(
save_dir
,
make_var_name
(
name
)))
paddle_writer
.
emit_param
(
paddle_program
,
name
,
value_info
)
param_codes
=
paddle_program
.
codes
paddle_program
.
codes
=
[]
logger
.
debug
(
'saving weight %s with size of %d, in %d bytes, to %s ...'
,
name
,
weight
.
size
,
weight
.
nbytes
,
make_var_name
(
name
))
fluid_writer
.
write_weight
(
weight
,
shutil
.
os
.
path
.
join
(
save_dir
,
make_var_name
(
name
)))
fluid_writer
.
emit_param
(
fluid_program
,
name
,
value_info
)
param_codes
=
fluid_program
.
codes
fluid_program
.
codes
=
[]
logger
.
info
(
'%d weights converted'
,
len
(
graph_params
))
# input writer
...
...
@@ -159,9 +176,11 @@ def convert(onnx_model_filename, save_dir,
value_info
=
graph_value_infos
[
name
]
assert
value_info
[
'external'
]
external_inputs
.
append
(
name
)
paddle_writer
.
emit_inputs
(
paddle_program
,
external_inputs
,
graph_value_infos
,
remove_batch
=
False
)
# TODO:
input_codes
=
paddle_program
.
codes
paddle_program
.
codes
=
[]
fluid_writer
.
emit_inputs
(
fluid_program
,
external_inputs
,
graph_value_infos
,
remove_batch
=
False
)
# TODO:
input_codes
=
fluid_program
.
codes
fluid_program
.
codes
=
[]
logger
.
info
(
'%d inputs converted'
,
len
(
external_inputs
))
# output writer
...
...
@@ -171,49 +190,93 @@ def convert(onnx_model_filename, save_dir,
value_info
=
graph_value_infos
[
name
]
assert
value_info
[
'external'
]
external_outputs
.
append
(
name
)
paddle_writer
.
emit_outputs
(
paddle
_program
,
external_outputs
)
output_codes
=
[
''
]
+
paddle_program
.
codes
# add an empty line
paddle
_program
.
codes
=
[]
fluid_writer
.
emit_outputs
(
fluid
_program
,
external_outputs
)
output_codes
=
[
''
]
+
fluid_program
.
codes
# add an empty line
fluid
_program
.
codes
=
[]
logger
.
info
(
'%d outputs converted'
,
len
(
external_outputs
))
# code generation
header_codes
=
fluid_writer
.
header_code
(
model_func_name
,
'From: {}'
.
format
(
onnx_model_filename
))
code_filename
=
shutil
.
os
.
path
.
join
(
save_dir
,
model_basename
)
paddle_writer
.
write_code_file
(
code_filename
,
paddle_writer
.
header_code
(
model_func_name
),
input_codes
,
param_codes
,
op_codes
,
output_codes
)
logger
.
info
(
'code saved to %s, factory function: %s'
,
code_filename
,
model_func_name
)
fluid_writer
.
write_code_file
(
code_filename
,
header_codes
,
input_codes
,
param_codes
,
op_codes
,
output_codes
)
logger
.
info
(
'code saved to %s, factory function: %s'
,
code_filename
,
model_func_name
)
# desc generation
desc_filename
=
shutil
.
os
.
path
.
join
(
save_dir
,
'__model__'
)
paddle_writer
.
write_desc_file
(
desc_filename
,
op_descs
=
paddle_program
.
op_descs
,
var_descs
=
paddle_program
.
var_descs
,
)
fluid_writer
.
write_desc_file
(
desc_filename
,
op_descs
=
fluid_program
.
op_descs
,
var_descs
=
fluid_program
.
var_descs
,
)
logger
.
info
(
'program saved to %s'
,
desc_filename
)
logger
.
info
(
'conversion finished'
)
# globals().update(locals())
# globals().update(locals())
if
__name__
==
'__main__'
:
logging
.
basicConfig
(
format
=
'[%(levelname)8s]%(name)s::%(funcName)s:%(lineno)04d: %(message)s'
,
level
=
logging
.
DEBUG
,
)
import
argparse
parser
=
argparse
.
ArgumentParser
(
description
=
'onnx2fluid.convert'
,
formatter_class
=
argparse
.
ArgumentDefaultsHelpFormatter
,
)
parser
.
add_argument
(
'model'
,
nargs
=
1
,
help
=
'path to model.onnx'
,
)
parser
.
add_argument
(
'--debug'
,
'-d'
,
action
=
'store_true'
,
help
=
'enable debug logging and checking'
,
)
parser
.
add_argument
(
'--output_dir'
,
'-o'
,
type
=
str
,
default
=
''
,
help
=
'output directory'
,
)
parser
.
add_argument
(
'--embed_params'
,
'-e'
,
action
=
'store_true'
,
help
=
'try to embed parameters for trainable Paddle fluid layers'
,
)
parser
.
add_argument
(
'--pedantic'
,
action
=
'store_true'
,
default
=
True
,
help
=
'accept and convert only standard ONNX opset'
,
)
parser
.
add_argument
(
'--no-pedantic'
,
'-x'
,
action
=
'store_false'
,
dest
=
'pedantic'
,
help
=
'process non-standard ONNX ops, this may lead to fails'
,
)
args
=
parser
.
parse_args
()
logging_format
=
'[%(levelname)8s]%(name)s::%(funcName)s:%(lineno)04d: %(message)s'
logging_level
=
logging
.
DEBUG
if
args
.
debug
else
logging
.
INFO
logging
.
basicConfig
(
format
=
logging_format
,
level
=
logging_level
)
debug
=
args
.
debug
model_filename
=
args
.
model
[
0
]
save_dir
=
args
.
output_dir
embed_params
=
args
.
embed_params
pedantic
=
args
.
pedantic
model_list
=
[
'../examples/t1.onnx'
,
'../examples/t2.onnx'
,
'../examples/t3.onnx'
,
'../examples/t4.onnx'
,
'../examples/t5.onnx'
,
'../examples/t6.onnx'
,
# '../examples/t7.onnx',
# '../examples/t8.onnx',
]
for
model
in
model_list
:
pathname
,
_
=
shutil
.
os
.
path
.
splitext
(
model
)
convert
(
model
,
pathname
,
onnx_opset_pedantic
=
False
,
debug
=
True
)
convert
(
model
,
pathname
+
'.embeded'
,
embed_params
=
True
,
onnx_opset_pedantic
=
False
,
debug
=
True
)
convert
(
model_filename
,
save_dir
,
embed_params
=
embed_params
,
onnx_opset_pedantic
=
pedantic
,
debug
=
debug
)
onnx2fluid/onnx2fluid/framework_pb2.py
0 → 100644
浏览文件 @
f0dede1f
此差异已折叠。
点击以展开。
onnx2
paddle/onnx2paddle
/onnx_utils.py
→
onnx2
fluid/onnx2fluid
/onnx_utils.py
浏览文件 @
f0dede1f
...
...
@@ -12,34 +12,36 @@ import logging
import
numpy
as
np
import
onnx
from
collections
import
OrderedDict
as
Dict
# as default dict
from
collections
import
OrderedDict
as
Dict
# as default dict
from
onnx.helper
import
get_attribute_value
,
make_attribute
from
onnx.mapping
import
TENSOR_TYPE_TO_NP_TYPE
from
onnx.numpy_helper
import
to_array
from
onnx.shape_inference
import
infer_shapes
logger
=
logging
.
getLogger
(
__name__
)
__all__
=
[
'print_pb_structure'
,
'build_value_refs'
,
'node_attrs'
,
'node_topo'
,
'node_iter'
,
'node_attrs'
,
'node_topo'
,
'node_iter'
,
'tensor_shape'
,
'graph_ops'
,
'graph_weights'
,
'graph_ops'
,
'graph_weights'
,
'inferred_model_value_info'
,
'optimize_model_skip_op_for_inference'
,
'optimize_model_strip_initializer'
,
'optimize_model_cast'
,
'optimize_model_slice'
,
'optimize_model_cast'
,
'optimize_model_slice'
,
]
ONNX_INT_MAX
=
2
**
63
-
1
ONNX_INT_MAX
=
2
**
63
-
1
DEFAULT_OP_DOMAIN
=
'ai.onnx'
def
print_pb_structure
(
message
,
loop_iterative
=
False
,
depth
=
0
):
def
print_pb_structure
(
message
,
loop_iterative
=
False
,
depth
=
0
):
"""
print pb fields in its structure
"""
...
...
@@ -47,14 +49,17 @@ def print_pb_structure(message,
if
hasattr
(
message
,
'DESCRIPTOR'
)
and
hasattr
(
message
.
DESCRIPTOR
,
'fields'
):
for
field
in
message
.
DESCRIPTOR
.
fields
:
print
(
'
\t
'
*
depth
+
'-'
,
field
.
name
)
print_pb_structure
(
getattr
(
message
,
field
.
name
),
loop_iterative
=
loop_iterative
,
depth
=
(
depth
+
1
))
print_pb_structure
(
getattr
(
message
,
field
.
name
),
loop_iterative
=
loop_iterative
,
depth
=
(
depth
+
1
))
if
loop_iterative
and
hasattr
(
message
,
'MergeFrom'
)
and
hasattr
(
message
,
'__len__'
):
if
loop_iterative
and
hasattr
(
message
,
'MergeFrom'
)
and
hasattr
(
message
,
'__len__'
):
for
idx
,
item
in
enumerate
(
message
):
print
(
'
\t
'
*
depth
+
'-'
,
idx
)
print_pb_structure
(
item
,
loop_iterative
=
loop_iterative
,
depth
=
(
depth
+
1
))
print_pb_structure
(
item
,
loop_iterative
=
loop_iterative
,
depth
=
(
depth
+
1
))
def
build_value_refs
(
nodes
):
...
...
@@ -80,7 +85,8 @@ def get_attribute_value2(attr):
if
attr
.
type
==
onnx
.
AttributeProto
.
TENSOR
:
dtype
=
np
.
dtype
(
TENSOR_TYPE_TO_NP_TYPE
[
attr
.
t
.
data_type
])
data
=
attr
.
t
.
raw_data
value
=
np
.
frombuffer
(
data
,
dtype
=
dtype
,
count
=
(
len
(
data
)
//
dtype
.
itemsize
))
value
=
np
.
frombuffer
(
data
,
dtype
=
dtype
,
count
=
(
len
(
data
)
//
dtype
.
itemsize
))
else
:
value
=
get_attribute_value
(
attr
)
return
value
...
...
@@ -91,7 +97,8 @@ def node_attrs(node):
convert ONNX node attributes to dict
"""
return
{
attr
.
name
:
get_attribute_value2
(
attr
)
for
attr
in
node
.
attribute
}
# dict
return
{
attr
.
name
:
get_attribute_value2
(
attr
)
for
attr
in
node
.
attribute
}
# dict
def
tensor_shape
(
tensor
):
...
...
@@ -137,7 +144,7 @@ def node_topo(nodes, topo='default'):
for
next_idx
in
input_refs
[
val_name
]:
node_in_degrees
[
next_idx
]
-=
1
if
node_in_degrees
[
next_idx
]
==
0
:
queue
.
insert
(
0
,
next_idx
)
# make it lazy
queue
.
insert
(
0
,
next_idx
)
# make it lazy
return
node_topo
if
topo
==
'backward'
:
...
...
@@ -162,14 +169,13 @@ def node_topo(nodes, topo='default'):
for
next_idx
in
output_refs
[
val_name
]:
node_out_degrees
[
next_idx
]
-=
1
if
node_out_degrees
[
next_idx
]
==
0
:
queue
.
insert
(
0
,
next_idx
)
# make it lazy
queue
.
insert
(
0
,
next_idx
)
# make it lazy
return
node_topo
raise
ValueError
(
'unkown given topo: {}'
.
format
(
topo
))
def
node_iter
(
nodes
,
indices
=
None
):
def
node_iter
(
nodes
,
indices
=
None
):
"""
generator for ONNX node graph with given indices
"""
...
...
@@ -194,8 +200,7 @@ def node_iter(nodes,
yield
name
,
domain
,
op_type
,
inputs
,
outputs
,
attrs
def
graph_ops
(
graph
,
topo
=
'default'
):
def
graph_ops
(
graph
,
topo
=
'default'
):
"""
generator for ONNX node graph with given topology
"""
...
...
@@ -232,24 +237,24 @@ def inferred_model_value_info(model):
value_info
=
Dict
()
for
item
in
graph
.
value_info
:
value_info
[
item
.
name
]
=
dict
(
dtype
=
TENSOR_TYPE_TO_NP_TYPE
[
item
.
type
.
tensor_type
.
elem_type
],
shape
=
tensor_shape
(
item
),
external
=
False
,
)
dtype
=
TENSOR_TYPE_TO_NP_TYPE
[
item
.
type
.
tensor_type
.
elem_type
],
shape
=
tensor_shape
(
item
),
external
=
False
,
)
for
item
in
graph
.
input
:
assert
item
.
name
not
in
value_info
value_info
[
item
.
name
]
=
dict
(
dtype
=
TENSOR_TYPE_TO_NP_TYPE
[
item
.
type
.
tensor_type
.
elem_type
],
shape
=
tensor_shape
(
item
),
external
=
True
,
)
dtype
=
TENSOR_TYPE_TO_NP_TYPE
[
item
.
type
.
tensor_type
.
elem_type
],
shape
=
tensor_shape
(
item
),
external
=
True
,
)
for
item
in
graph
.
output
:
# assert item.name not in value_info, 'bypass-model not supported'
# assert item.name not in value_info, 'bypass-model not supported'
value_info
[
item
.
name
]
=
dict
(
dtype
=
TENSOR_TYPE_TO_NP_TYPE
[
item
.
type
.
tensor_type
.
elem_type
],
shape
=
tensor_shape
(
item
),
external
=
True
,
)
dtype
=
TENSOR_TYPE_TO_NP_TYPE
[
item
.
type
.
tensor_type
.
elem_type
],
shape
=
tensor_shape
(
item
),
external
=
True
,
)
return
value_info
...
...
@@ -283,9 +288,7 @@ def skip_node_backward(nodes, src_input_name, dst_output_name, output_refs):
return
processed
def
optimize_model_skip_op_for_inference
(
model
,
op_list
=
None
):
def
optimize_model_skip_op_for_inference
(
model
,
op_list
=
None
):
"""
skip ops can be bypassed for inference
"""
...
...
@@ -297,38 +300,42 @@ def optimize_model_skip_op_for_inference(
ret
=
type
(
model
)()
ret
.
CopyFrom
(
model
)
ret
.
graph
.
ClearField
(
'value_info'
)
# WORKAROUND: onnx do not drop old value_info
ret
.
graph
.
ClearField
(
'value_info'
)
# WORKAROUND: onnx do not drop old value_info
ret_nodes
=
ret
.
graph
.
node
nodes_to_remove
=
[]
for
node_idx
,
node
in
enumerate
(
nodes
):
if
not
(
node
.
domain
==
DEFAULT_OP_DOMAIN
or
node
.
domain
==
''
):
if
not
(
node
.
domain
==
DEFAULT_OP_DOMAIN
or
node
.
domain
==
''
):
continue
op_type
=
node
.
op_type
if
not
(
op_type
in
op_list
):
if
not
(
op_type
in
op_list
):
continue
if
op_type
in
[
'Dropout'
]:
input_name
=
node
.
input
[
0
]
output_name
=
node
.
output
[
0
]
elif
not
(
len
(
node
.
input
)
==
1
and
len
(
node
.
output
)
==
1
):
logger
.
warning
(
'currently only 1-input-1-output op supported, skip required %d: %s'
,
node_idx
,
node
.
op_type
)
elif
not
(
len
(
node
.
input
)
==
1
and
len
(
node
.
output
)
==
1
):
logger
.
warning
(
'currently only 1-input-1-output op supported, skip required %d: %s'
,
node_idx
,
node
.
op_type
)
continue
else
:
input_name
=
node
.
input
[
0
]
output_name
=
node
.
output
[
0
]
if
output_name
in
input_refs
:
processed
=
skip_node_forward
(
ret_nodes
,
output_name
,
input_name
,
input_refs
)
processed
=
skip_node_forward
(
ret_nodes
,
output_name
,
input_name
,
input_refs
)
elif
input_name
in
output_refs
:
processed
=
skip_node_backward
(
ret_nodes
,
input_name
,
output_name
,
output_refs
)
processed
=
skip_node_backward
(
ret_nodes
,
input_name
,
output_name
,
output_refs
)
else
:
processed
=
-
1
if
processed
>
0
:
nodes_to_remove
.
append
(
node_idx
)
logger
.
debug
(
'skip op %d: %s -> %s -> %s'
,
node
_idx
,
input_name
,
node
.
op_type
,
output_name
)
logger
.
debug
(
'skip op %d: %s -> %s -> %s'
,
node_idx
,
input_name
,
node
.
op_type
,
output_name
)
elif
processed
==
0
:
logger
.
warning
(
'weird, no node processed'
)
else
:
...
...
@@ -342,8 +349,7 @@ def optimize_model_skip_op_for_inference(
return
ret
def
optimize_model_strip_initializer
(
model
,
keep_input_only
=
True
):
def
optimize_model_strip_initializer
(
model
,
keep_input_only
=
True
):
"""
strip weights for inference
"""
...
...
@@ -354,7 +360,8 @@ def optimize_model_strip_initializer(model,
ret
=
type
(
model
)()
ret
.
CopyFrom
(
model
)
ret
.
graph
.
ClearField
(
'value_info'
)
# WORKAROUND: onnx do not drop old value_info
ret
.
graph
.
ClearField
(
'value_info'
)
# WORKAROUND: onnx do not drop old value_info
# strip initializers
ret
.
graph
.
ClearField
(
'initializer'
)
...
...
@@ -366,8 +373,7 @@ def optimize_model_strip_initializer(model,
elif
not
keep_input_only
and
name
in
output_refs
:
ret_initializers
.
add
().
CopyFrom
(
initializer
)
else
:
logger
.
debug
(
'initializer %s(%s[%d]) stripped'
,
name
,
logger
.
debug
(
'initializer %s(%s[%d]) stripped'
,
name
,
TENSOR_TYPE_TO_NP_TYPE
[
initializer
.
data_type
],
len
(
initializer
.
raw_data
))
...
...
@@ -379,10 +385,10 @@ def optimize_model_strip_initializer(model,
if
name
in
input_refs
or
name
in
out_names
:
ret_inputs
.
add
().
CopyFrom
(
item
)
else
:
logger
.
debug
(
'input %s(%s%s) stripped'
,
name
,
TENSOR_TYPE_TO_NP_TYPE
[
item
.
type
.
tensor_type
.
elem_type
],
tensor_shape
(
item
))
logger
.
debug
(
'input %s(%s%s) stripped'
,
name
,
TENSOR_TYPE_TO_NP_TYPE
[
item
.
type
.
tensor_type
.
elem_type
],
tensor_shape
(
item
))
return
ret
...
...
@@ -397,18 +403,19 @@ def optimize_model_cast(model):
ret
=
type
(
model
)()
ret
.
CopyFrom
(
model
)
ret
.
graph
.
ClearField
(
'value_info'
)
# WORKAROUND: onnx do not drop old value_info
ret
.
graph
.
ClearField
(
'value_info'
)
# WORKAROUND: onnx do not drop old value_info
ret_nodes
=
ret
.
graph
.
node
nodes_to_remove
=
[]
for
node_idx
,
node
in
enumerate
(
nodes
):
if
not
(
node
.
domain
==
DEFAULT_OP_DOMAIN
or
node
.
domain
==
''
):
if
not
(
node
.
domain
==
DEFAULT_OP_DOMAIN
or
node
.
domain
==
''
):
continue
if
not
(
node
.
op_type
==
'Cast'
):
if
not
(
node
.
op_type
==
'Cast'
):
continue
attrs
=
node_attrs
(
node
)
output_dtype
=
TENSOR_TYPE_TO_NP_TYPE
[
attrs
[
'to'
]]
input_name
=
node
.
input
[
0
]
info
=
value_info
.
get
(
'input_name'
,
None
)
# relax for un-inferrable
info
=
value_info
.
get
(
'input_name'
,
None
)
# relax for un-inferrable
if
info
is
None
:
continue
input_dtype
=
info
.
get
(
'dtype'
,
None
)
...
...
@@ -417,21 +424,23 @@ def optimize_model_cast(model):
output_name
=
node
.
output
[
0
]
if
output_name
in
input_refs
:
processed
=
skip_node_forward
(
ret_nodes
,
output_name
,
input_name
,
input_refs
)
processed
=
skip_node_forward
(
ret_nodes
,
output_name
,
input_name
,
input_refs
)
elif
input_name
in
output_refs
:
processed
=
skip_node_backward
(
ret_nodes
,
input_name
,
output_name
,
output_refs
)
processed
=
skip_node_backward
(
ret_nodes
,
input_name
,
output_name
,
output_refs
)
else
:
processed
=
-
1
if
processed
>
0
:
nodes_to_remove
.
append
(
node_idx
)
logger
.
debug
(
'skip %s: %s -> %s Cast op'
,
node
.
name
,
input_dtype
,
output_dtype
)
logger
.
debug
(
'skip %s: %s -> %s Cast op'
,
node
.
name
,
input_dtype
,
output_dtype
)
elif
processed
==
0
:
logger
.
warning
(
'weird, no node processed'
)
else
:
logger
.
debug
(
'keep standalone %s: %s -> %s Cast op'
,
node
.
name
,
input_dtype
,
output_dtype
)
logger
.
debug
(
'keep standalone %s: %s -> %s Cast op'
,
node
.
name
,
input_dtype
,
output_dtype
)
nodes_to_remove
.
sort
(
reverse
=
True
)
for
node_idx
in
nodes_to_remove
:
...
...
@@ -452,13 +461,14 @@ def optimize_model_slice(model):
chain
=
[]
while
True
:
node
=
nodes
[
node_idx
]
if
not
(
node
.
domain
==
DEFAULT_OP_DOMAIN
or
node
.
domain
==
''
):
if
not
(
node
.
domain
==
DEFAULT_OP_DOMAIN
or
node
.
domain
==
''
):
return
chain
if
not
node
.
op_type
==
'Slice'
:
return
chain
chain
.
append
(
node_idx
)
output_name
=
node
.
output
[
0
]
if
output_name
not
in
input_refs
or
len
(
input_refs
[
output_name
])
!=
1
:
if
output_name
not
in
input_refs
or
len
(
input_refs
[
output_name
])
!=
1
:
return
chain
node_idx
=
list
(
input_refs
[
output_name
])[
0
]
...
...
@@ -468,7 +478,8 @@ def optimize_model_slice(model):
for
slice_node_idx
in
slice_chain
:
node
=
nodes
[
slice_node_idx
]
attrs
=
node_attrs
(
node
)
for
axis
,
start
,
end
in
zip
(
attrs
[
'axes'
],
attrs
[
'starts'
],
attrs
[
'ends'
]):
for
axis
,
start
,
end
in
zip
(
attrs
[
'axes'
],
attrs
[
'starts'
],
attrs
[
'ends'
]):
if
start
==
0
and
end
==
ONNX_INT_MAX
:
continue
if
axis
in
merged_slice
:
...
...
@@ -480,7 +491,8 @@ def optimize_model_slice(model):
ret
=
type
(
model
)()
ret
.
CopyFrom
(
model
)
ret
.
graph
.
ClearField
(
'value_info'
)
# WORKAROUND: onnx do not drop old value_info
ret
.
graph
.
ClearField
(
'value_info'
)
# WORKAROUND: onnx do not drop old value_info
ret_nodes
=
ret
.
graph
.
node
nodes_to_remove
=
[]
for
node_idx
in
range
(
len
(
nodes
)):
...
...
@@ -488,7 +500,7 @@ def optimize_model_slice(model):
if
len
(
slice_chain
)
==
0
:
continue
merged_slice
=
_merge_slice
(
slice_chain
)
if
len
(
merged_slice
)
>
0
and
len
(
slice_chain
)
==
1
:
# no need to merge
if
len
(
merged_slice
)
>
0
and
len
(
slice_chain
)
==
1
:
# no need to merge
continue
attrs
=
dict
(
axes
=
[],
starts
=
[],
ends
=
[])
...
...
@@ -501,42 +513,50 @@ def optimize_model_slice(model):
input_name
=
first_node
.
input
[
0
]
output_name
=
last_node
.
output
[
0
]
processed
=
-
1
if
output_name
in
input_refs
:
# 0, [1...]
new_input_name
=
first_node
.
output
[
0
]
if
len
(
merged_slice
)
>
0
else
input_name
processed
=
skip_node_forward
(
ret_nodes
,
output_name
,
new_input_name
,
input_refs
)
if
output_name
in
input_refs
:
# 0, [1...]
new_input_name
=
first_node
.
output
[
0
]
if
len
(
merged_slice
)
>
0
else
input_name
processed
=
skip_node_forward
(
ret_nodes
,
output_name
,
new_input_name
,
input_refs
)
if
processed
>
0
:
if
len
(
merged_slice
)
>
0
:
remain_idx
=
slice_chain
[
0
]
remove_chain
=
slice_chain
[
1
:]
slice_node
=
ret_nodes
[
remain_idx
]
for
attr
in
slice_node
.
attribute
:
attr
.
CopyFrom
(
make_attribute
(
attr
.
name
,
attrs
[
attr
.
name
]))
attr
.
CopyFrom
(
make_attribute
(
attr
.
name
,
attrs
[
attr
.
name
]))
logger
.
debug
(
'merged slice chain %s -> %s%s -> %s'
,
input_name
,
remain_idx
,
remove_chain
,
output_name
)
input_name
,
remain_idx
,
remove_chain
,
output_name
)
else
:
remove_chain
=
slice_chain
if
processed
<
0
and
input_name
in
output_refs
:
new_output_name
=
last_node
.
input
[
0
]
if
len
(
merged_slice
)
>
0
else
output_name
processed
=
skip_node_backward
(
ret_nodes
,
input_name
,
new_output_name
,
output_refs
)
new_output_name
=
last_node
.
input
[
0
]
if
len
(
merged_slice
)
>
0
else
output_name
processed
=
skip_node_backward
(
ret_nodes
,
input_name
,
new_output_name
,
output_refs
)
if
processed
>
0
:
if
len
(
merged_slice
)
>
0
:
remain_idx
=
slice_chain
[
-
1
]
remove_chain
=
slice_chain
[:
-
1
]
slice_node
=
ret_nodes
[
remain_idx
]
for
attr
in
slice_node
.
attribute
:
attr
.
CopyFrom
(
make_attribute
(
attr
.
name
,
attrs
[
attr
.
name
]))
attr
.
CopyFrom
(
make_attribute
(
attr
.
name
,
attrs
[
attr
.
name
]))
logger
.
debug
(
'merged slice chain %s -> %s%s -> %s'
,
input_name
,
remove_chain
,
remain_idx
,
output_name
)
input_name
,
remove_chain
,
remain_idx
,
output_name
)
else
:
remove_chain
=
slice_chain
if
processed
>
0
:
nodes_to_remove
.
extend
(
remove_chain
)
if
len
(
merged_slice
)
==
0
:
logger
.
debug
(
'skip slice chain %s -> %s -> %s'
,
input_name
,
slice_chain
,
output_name
)
elif
processed
<
0
:
# NEVERFIX: not merge standalone slice chain
logger
.
debug
(
'skip slice chain %s -> %s -> %s'
,
input_name
,
slice_chain
,
output_name
)
elif
processed
<
0
:
# NEVERFIX: not merge standalone slice chain
logger
.
debug
(
'keep standalone slice chain %s -> %s -> %s'
,
input_name
,
slice_chain
,
output_name
)
...
...
@@ -549,9 +569,10 @@ def optimize_model_slice(model):
if
__name__
==
'__main__'
:
logging
.
basicConfig
(
format
=
'[%(levelname)8s]%(name)s::%(funcName)s:%(lineno)04d: %(message)s'
,
level
=
logging
.
DEBUG
,
)
format
=
'[%(levelname)8s]%(name)s::%(funcName)s:%(lineno)04d: %(message)s'
,
level
=
logging
.
DEBUG
,
)
from
onnx.checker
import
check_model
from
onnx.utils
import
polish_model
...
...
onnx2
paddle/onnx2paddle
/symbolic.py
→
onnx2
fluid/onnx2fluid
/symbolic.py
浏览文件 @
f0dede1f
此差异已折叠。
点击以展开。
onnx2
paddle/onnx2paddle
/torch_export_helper.py
→
onnx2
fluid/onnx2fluid
/torch_export_helper.py
浏览文件 @
f0dede1f
...
...
@@ -24,8 +24,7 @@ def _ensure_tuple(obj):
return
(
obj
,
)
def
_flatten_list
(
obj
,
out
=
None
):
def
_flatten_list
(
obj
,
out
=
None
):
assert
isinstance
(
obj
,
list
)
if
out
is
None
:
out
=
type
(
obj
)()
...
...
@@ -37,8 +36,7 @@ def _flatten_list(obj,
return
out
def
export_data
(
state_dict
,
prefix
=
''
):
def
export_data
(
state_dict
,
prefix
=
''
):
"""
export binary data with meta text for raw C++ inference engines
"""
...
...
@@ -65,10 +63,14 @@ def export_data(state_dict,
fp
.
close
()
def
export_onnx_with_validation
(
model
,
inputs
,
export_basepath
,
input_names
=
None
,
output_names
=
None
,
def
export_onnx_with_validation
(
model
,
inputs
,
export_basepath
,
input_names
=
None
,
output_names
=
None
,
use_npz
=
True
,
*
args
,
**
kwargs
):
*
args
,
**
kwargs
):
"""
export PyTorch model to ONNX model and export sample inputs and outputs in a Numpy file
"""
...
...
@@ -95,12 +97,16 @@ def export_onnx_with_validation(model, inputs, export_basepath,
ret
[
key
]
=
value
return
ret
torch_inputs
=
_ensure_tuple
(
inputs
)
# WORKAROUND: for torch.onnx
outputs
=
torch
.
onnx
.
export
(
model
,
torch_inputs
,
export_basepath
+
'.onnx'
,
input_names
=
_flatten_list
(
input_names
),
output_names
=
_flatten_list
(
output_names
),
*
args
,
**
kwargs
)
if
outputs
is
None
:
# WORKAROUND: for torch.onnx
torch_inputs
=
_ensure_tuple
(
inputs
)
# WORKAROUND: for torch.onnx
outputs
=
torch
.
onnx
.
export
(
model
,
torch_inputs
,
export_basepath
+
'.onnx'
,
input_names
=
_flatten_list
(
input_names
),
output_names
=
_flatten_list
(
output_names
),
*
args
,
**
kwargs
)
if
outputs
is
None
:
# WORKAROUND: for torch.onnx
outputs
=
model
(
*
inputs
)
torch_outputs
=
_ensure_tuple
(
outputs
)
...
...
onnx2
paddle/onnx2paddle
/validation.py
→
onnx2
fluid/onnx2fluid
/validation.py
浏览文件 @
f0dede1f
...
...
@@ -13,8 +13,7 @@ import os
import
sys
def
_flatten_dict
(
obj
,
out
=
None
):
def
_flatten_dict
(
obj
,
out
=
None
):
assert
isinstance
(
obj
,
dict
)
if
out
is
None
:
out
=
type
(
obj
)()
...
...
@@ -34,12 +33,13 @@ def _ensure_list(obj):
return
[
obj
]
def
validate
(
paddle_model_filename
,
golden_data_filename
,
def
validate
(
fluid_model_filename
,
golden_data_filename
,
model_func_name
=
'inference'
,
precision
=
1e-4
,
save_inference_model
=
False
):
"""
inferece the converted Paddle model, validate with given golden data
inferece the converted Paddle
fluid
model, validate with given golden data
"""
import
numpy
as
np
...
...
@@ -52,17 +52,17 @@ def validate(paddle_model_filename, golden_data_filename,
exe
.
run
(
fluid
.
default_startup_program
())
# load model
paddle_model_dir
,
basename
=
os
.
path
.
split
(
paddle
_model_filename
)
if
basename
==
'__model__'
:
# is desc model
fluid_model_dir
,
basename
=
os
.
path
.
split
(
fluid
_model_filename
)
if
basename
==
'__model__'
:
# is desc model
logger
.
debug
(
'using desc file %s'
,
basename
)
prog
,
in_names
,
var_outs
=
fluid
.
io
.
load_inference_model
(
paddle
_model_dir
,
exe
)
out_names
=
var_outs
# HINT: pass var if fetch ops already created
prog
,
_
,
var_outs
=
fluid
.
io
.
load_inference_model
(
fluid
_model_dir
,
exe
)
out_names
=
var_outs
# HINT: pass var if fetch ops already created
logger
.
info
(
'model load passed'
)
elif
basename
.
endswith
(
'.py'
):
# is python code
elif
basename
.
endswith
(
'.py'
):
# is python code
logger
.
debug
(
'using python code file %s'
,
basename
)
module_name
,
_
=
os
.
path
.
splitext
(
basename
)
sys_path
=
sys
.
path
.
copy
()
sys
.
path
.
append
(
paddle
_model_dir
)
sys
.
path
.
append
(
fluid
_model_dir
)
try
:
module
=
importlib
.
import_module
(
module_name
)
func
=
getattr
(
module
,
model_func_name
)
...
...
@@ -71,18 +71,21 @@ def validate(paddle_model_filename, golden_data_filename,
module
=
importlib
.
import_module
(
module_name
)
func
=
getattr
(
module
,
model_func_name
)
sys
.
path
=
sys_path
logger
.
debug
(
'from %s imported %s: %s'
,
module_name
,
model_func_name
,
func
)
logger
.
debug
(
'from %s imported %s: %s'
,
module_name
,
model_func_name
,
func
)
var_outs
=
func
()
var_outs
=
_ensure_list
(
var_outs
)
out_names
=
[
var
.
name
for
var
in
var_outs
]
# HINT: pass string to create fetch ops
out_names
=
[
var
.
name
for
var
in
var_outs
]
# HINT: pass string to create fetch ops
logger
.
info
(
'import passed'
)
prog
=
fluid
.
default_main_program
()
fluid
.
io
.
load_persistables
(
executor
=
exe
,
dirname
=
paddle_model_dir
,
main_program
=
prog
)
fluid
.
io
.
load_persistables
(
executor
=
exe
,
dirname
=
fluid_model_dir
,
main_program
=
prog
)
logger
.
info
(
'weight load passed'
)
else
:
raise
ValueError
(
'unsupported Paddle model'
)
raise
ValueError
(
'unsupported Paddle
fluid
model'
)
# load data
logger
.
info
(
'using golden data %s'
,
golden_data_filename
)
...
...
@@ -100,10 +103,15 @@ def validate(paddle_model_filename, golden_data_filename,
# DEBUG: reload test for python code
if
basename
.
endswith
(
'.py'
)
and
save_inference_model
:
fluid
.
io
.
save_inference_model
(
paddle_model_dir
,
input_data
.
keys
(),
var_outs
,
exe
,
main_program
=
prog
,
export_for_deployment
=
True
)
fluid
.
io
.
save_inference_model
(
fluid_model_dir
,
input_data
.
keys
(),
var_outs
,
exe
,
main_program
=
prog
,
export_for_deployment
=
True
)
logger
.
info
(
'model re-save passed'
)
fluid
.
io
.
load_inference_model
(
paddle
_model_dir
,
exe
)
fluid
.
io
.
load_inference_model
(
fluid
_model_dir
,
exe
)
logger
.
info
(
'model re-load passed'
)
# execute
...
...
@@ -124,49 +132,54 @@ def validate(paddle_model_filename, golden_data_filename,
else
:
logger
.
info
(
'accuracy not passed'
)
# globals().update(locals())
return
passed
if
__name__
==
'__main__'
:
logging
.
basicConfig
(
format
=
'[%(levelname)8s]%(name)s::%(funcName)s:%(lineno)04d: %(message)s'
,
level
=
logging
.
DEBUG
,
)
logger
=
logging
.
getLogger
(
'validation_test'
)
model_rc_list
=
[
'../examples/t{}/model.py'
,
'../examples/t{}/__model__'
,
'../examples/t{}.embeded/model.py'
,
'../examples/t{}.embeded/__model__'
,
]
import
numpy
as
np
idx_model
=
np
.
random
.
randint
(
1
,
7
)
model
=
np
.
random
.
choice
(
model_rc_list
).
format
(
idx_model
)
precision
=
10
**
(
np
.
random
.
rand
()
*
-
4
-
2
)
debug
=
False
model
=
'/tmp/export/model.py'
# model = '../examples/t1/__model__'
# model = '../examples/t1.embeded/model.py'
# model = '../examples/t1.embeded/__model__'
debug
=
True
logger
.
info
(
'args: %s %.6f'
,
model
,
precision
)
data_dir
,
dir_name
=
os
.
path
.
split
(
os
.
path
.
split
(
model
)[
0
])
data_pathname
=
os
.
path
.
splitext
(
dir_name
)[
0
]
# proto debug test
from
framework_pb2
import
ProgramDesc
pd
=
ProgramDesc
()
pd
.
ParseFromString
(
open
(
os
.
path
.
join
(
data_dir
,
dir_name
,
'__model__'
),
'rb'
).
read
())
# validate
# validate(model, os.path.join(data_dir, data_pathname + '.npz'),
# precision=precision, save_inference_model=debug)
validate
(
model
,
'../examples/bvlc_alexnet/test_data_0.npz'
,
precision
=
precision
,
save_inference_model
=
debug
)
import
argparse
parser
=
argparse
.
ArgumentParser
(
description
=
'onnx2fluid.validate'
,
formatter_class
=
argparse
.
ArgumentDefaultsHelpFormatter
,
)
parser
.
add_argument
(
'model'
,
nargs
=
1
,
help
=
'path to model.py or __model__'
,
)
parser
.
add_argument
(
'--debug'
,
'-d'
,
action
=
'store_true'
,
help
=
'enable debug logging and checking'
,
)
parser
.
add_argument
(
'--test_data'
,
'-t'
,
type
=
str
,
help
=
'I/O golden data for validation, e.g. test.npy, test.npz'
,
)
parser
.
add_argument
(
'--precision'
,
'-p'
,
type
=
int
,
default
=
4
,
help
=
'assertion decimal for validation'
,
)
args
=
parser
.
parse_args
()
logging_format
=
'[%(levelname)8s]%(name)s::%(funcName)s:%(lineno)04d: %(message)s'
logging_level
=
logging
.
DEBUG
if
args
.
debug
else
logging
.
INFO
logging
.
basicConfig
(
format
=
logging_format
,
level
=
logging_level
)
debug
=
args
.
debug
fluid_model_filename
=
args
.
model
[
0
]
golden_data_filename
=
args
.
test_data
precision
=
args
.
precision
validate
(
fluid_model_filename
,
golden_data_filename
,
precision
=
precision
,
save_inference_model
=
debug
)
onnx2
paddle/onnx2paddle
/writer.py
→
onnx2
fluid/onnx2fluid
/writer.py
浏览文件 @
f0dede1f
...
...
@@ -34,15 +34,13 @@ except ImportError:
logger
.
warning
(
'importing paddle.fluid.proto.framework_pb2d failed,'
'using fallback framework_pb2'
)
__all__
=
[
'Program'
,
'Writer'
,
'Program'
,
'Writer'
,
]
def
_irepr
(
obj
,
to
=
'_'
):
def
_irepr
(
obj
,
to
=
'_'
):
"""inline repr"""
s
=
repr
(
obj
)
...
...
@@ -53,8 +51,7 @@ def _irepr(obj,
return
s
def
_flatten_list
(
obj
,
out
=
None
):
def
_flatten_list
(
obj
,
out
=
None
):
if
out
is
None
:
out
=
type
(
obj
)()
for
item
in
obj
:
...
...
@@ -72,7 +69,7 @@ def make_attr_name(name):
if
name
==
''
:
raise
ValueError
(
'name should not be empty'
)
for
s
in
' *?\
/-:'
:
#
for
s
in
' *?
\
\
/-:'
:
#
name
=
name
.
replace
(
s
,
'_'
)
if
not
name
.
startswith
(
'_'
):
name
=
'_'
+
name
...
...
@@ -85,15 +82,15 @@ class Program(object):
"""
DTYPE_TO_FRAMEWORK_DTYPE
=
{
'bool'
:
framework_pb2
.
VarType
.
BOOL
,
'int8'
:
framework_pb2
.
VarType
.
INT8
,
'uint8'
:
framework_pb2
.
VarType
.
UINT8
,
'int16'
:
framework_pb2
.
VarType
.
INT16
,
'int32'
:
framework_pb2
.
VarType
.
INT32
,
'int64'
:
framework_pb2
.
VarType
.
INT64
,
'float16'
:
framework_pb2
.
VarType
.
FP16
,
'float32'
:
framework_pb2
.
VarType
.
FP32
,
'float64'
:
framework_pb2
.
VarType
.
FP64
'bool'
:
framework_pb2
.
VarType
.
BOOL
,
'int8'
:
framework_pb2
.
VarType
.
INT8
,
'uint8'
:
framework_pb2
.
VarType
.
UINT8
,
'int16'
:
framework_pb2
.
VarType
.
INT16
,
'int32'
:
framework_pb2
.
VarType
.
INT32
,
'int64'
:
framework_pb2
.
VarType
.
INT64
,
'float16'
:
framework_pb2
.
VarType
.
FP16
,
'float32'
:
framework_pb2
.
VarType
.
FP32
,
'float64'
:
framework_pb2
.
VarType
.
FP64
}
@
staticmethod
...
...
@@ -116,7 +113,7 @@ class Program(object):
od_var
=
framework_pb2
.
OpDesc
.
Var
()
od_var
.
parameter
=
key
if
idx
<
len
(
vals
):
od_var
.
arguments
.
append
(
vals
[
idx
])
#
od_var
.
arguments
.
append
(
vals
[
idx
])
#
od_vars
.
append
(
od_var
)
return
od_vars
...
...
@@ -130,10 +127,10 @@ class Program(object):
for
key
,
value
in
attrs
.
items
():
od_attr
=
framework_pb2
.
OpDesc
.
Attr
()
od_attr
.
name
=
key
if
isinstance
(
value
,
bool
):
# bool.mro() = [bool, int, object]
if
isinstance
(
value
,
bool
):
# bool.mro() = [bool, int, object]
od_attr
.
type
=
framework_pb2
.
BOOLEAN
od_attr
.
b
=
value
elif
isinstance
(
value
,
int
):
# only cast to int32
elif
isinstance
(
value
,
int
):
# only cast to int32
od_attr
.
type
=
framework_pb2
.
INT
od_attr
.
i
=
value
elif
isinstance
(
value
,
float
):
...
...
@@ -143,10 +140,10 @@ class Program(object):
od_attr
.
type
=
framework_pb2
.
STRING
od_attr
.
s
=
value
elif
isinstance
(
value
,
list
)
and
len
(
value
)
>
0
:
if
isinstance
(
value
,
bool
):
# bool.mro() = [bool, int, object]
if
isinstance
(
value
,
bool
):
# bool.mro() = [bool, int, object]
od_attr
.
type
=
framework_pb2
.
BOOLEANS
od_attr
.
bools
.
extend
(
value
)
elif
isinstance
(
value
[
0
],
int
):
# only cast to int32 list
elif
isinstance
(
value
[
0
],
int
):
# only cast to int32 list
od_attr
.
type
=
framework_pb2
.
INTS
od_attr
.
ints
.
extend
(
value
)
elif
isinstance
(
value
[
0
],
float
):
...
...
@@ -168,11 +165,8 @@ class Program(object):
return
(
'Program(code mutable: {}) with:
\n
'
'codes: {}
\n
'
'op_descs: {}
\n
'
'var_descs: {}
\n
'
).
format
(
self
.
code_mutable
,
self
.
codes
,
self
.
op_descs
,
self
.
var_descs
)
'var_descs: {}
\n
'
).
format
(
self
.
code_mutable
,
self
.
codes
,
self
.
op_descs
,
self
.
var_descs
)
def
__repr__
(
self
):
return
self
.
__str__
()
...
...
@@ -185,8 +179,11 @@ class Program(object):
if
self
.
code_mutable
:
self
.
codes
.
append
(
code
)
def
OpDesc
(
self
,
name
,
input_val_keys
=
None
,
output_val_keys
=
None
,
attrs
=
None
):
def
OpDesc
(
self
,
name
,
input_val_keys
=
None
,
output_val_keys
=
None
,
attrs
=
None
):
"""
add OpDesc
"""
...
...
@@ -202,10 +199,15 @@ class Program(object):
self
.
op_descs
.
append
(
desc
)
return
desc
def
VarDesc
(
self
,
name
,
persistable
=
False
,
value_info
=
None
,
remove_batch
=
None
):
def
VarDesc
(
self
,
name
,
persistable
=
False
,
value_info
=
None
,
remove_batch
=
None
,
dummy_dtype
=
'float32'
):
"""
add VarDesc
add VarDesc,
dummy_dtype: WORKAROUND for Netron viewer
"""
var_desc
=
framework_pb2
.
VarDesc
()
...
...
@@ -213,14 +215,19 @@ class Program(object):
var_desc
.
persistable
=
persistable
var_desc
.
type
.
type
=
framework_pb2
.
VarType
.
LOD_TENSOR
# REMOVEIT: WORKAROUND: Netron: null.tensor error
tensor_desc
=
var_desc
.
type
.
lod_tensor
.
tensor
tensor_desc
.
data_type
=
self
.
Dtype
(
dummy_dtype
)
# required
if
value_info
and
'dtype'
in
value_info
:
tensor_desc
=
var_desc
.
type
.
lod_tensor
.
tensor
tensor_desc
.
data_type
=
self
.
Dtype
(
value_info
[
'dtype'
])
# required
tensor_desc
.
data_type
=
self
.
Dtype
(
value_info
[
'dtype'
])
# required
if
'shape'
in
value_info
:
tensor_desc
.
dims
.
extend
(
value_info
[
'shape'
])
if
len
(
value_info
[
'shape'
])
>
0
:
# skip scalars
if
len
(
value_info
[
'shape'
])
>
0
:
# skip scalars
if
remove_batch
is
None
:
remove_batch
=
value_info
.
get
(
'remove_batch'
,
not
persistable
)
remove_batch
=
value_info
.
get
(
'remove_batch'
,
not
persistable
)
if
remove_batch
:
tensor_desc
.
dims
[
0
]
=
-
1
...
...
@@ -231,7 +238,7 @@ class Program(object):
convert an ONNX op and add it to program
"""
if
domain
!=
''
:
# TODO: symbolic file routing by domain
if
domain
!=
''
:
# TODO: symbolic file routing by domain
raise
ValueError
(
'only default domain supported'
)
if
op_type
in
symbolic
.
DEFAULT_OP_MAPPING
:
...
...
@@ -240,8 +247,8 @@ class Program(object):
fn
=
getattr
(
symbolic
,
op_type
)
fn
(
self
,
*
args
,
**
kwargs
)
else
:
raise
ValueError
(
'conversion for {}::{} not supported'
.
format
(
domain
,
op_type
))
raise
ValueError
(
'conversion for {}::{} not supported'
.
format
(
domain
,
op_type
))
def
IntermediateOp
(
self
,
domain
,
op_type
,
*
args
,
**
kwargs
):
"""
...
...
@@ -267,14 +274,15 @@ class Writer(object):
CODE_INDENT
=
' '
*
4
@
staticmethod
def
header_code
(
func_name
):
def
header_code
(
func_name
,
info
=
''
):
"""
Python header codes
"""
codes
=
list
()
codes
.
append
(
'"""'
)
codes
.
append
(
'This code is generated by onnx2paddle.'
)
codes
.
append
(
'This code is generated by onnx2fluid.'
)
codes
.
append
(
'{}'
.
format
(
info
))
codes
.
append
(
'"""'
)
codes
.
append
(
''
)
codes
.
append
(
'from __future__ import division'
)
...
...
@@ -287,16 +295,25 @@ class Writer(object):
return
codes
@
staticmethod
def
emit_op
(
prog
,
name
,
domain
,
op_type
,
inputs
,
outputs
,
attrs
,
value_infos
,
*
args
,
**
kwargs
):
def
emit_op
(
prog
,
name
,
domain
,
op_type
,
inputs
,
outputs
,
attrs
,
value_infos
,
*
args
,
**
kwargs
):
"""
emit an ONNX op into program
"""
prog
.
Code
(
'# {}, {}::{}: {} -> {}, {}'
.
format
(
name
,
domain
,
op_type
,
inputs
,
outputs
,
_irepr
(
attrs
,
to
=
', '
)))
prog
.
Op
(
domain
,
op_type
,
inputs
,
outputs
,
attrs
,
value_infos
=
value_infos
,
name
=
name
,
*
args
,
**
kwargs
)
prog
.
Code
(
'# {}, {}::{}: {} -> {}, {}'
.
format
(
name
,
domain
,
op_type
,
inputs
,
outputs
,
_irepr
(
attrs
,
to
=
', '
)))
prog
.
Op
(
domain
,
op_type
,
inputs
,
outputs
,
attrs
,
value_infos
=
value_infos
,
name
=
name
,
*
args
,
**
kwargs
)
@
staticmethod
def
emit_param
(
prog
,
name
,
value_info
):
...
...
@@ -313,18 +330,18 @@ class Writer(object):
var_name
=
make_var_name
(
name
)
attr_name
=
make_attr_name
(
name
)
prog
.
Code
(
'# parameter: {}'
.
format
(
name
))
prog
.
Code
(
'{} = ParamAttr(name={})'
# , trainable=True
prog
.
Code
(
'{} = ParamAttr(name={})'
# , trainable=True
.
format
(
attr_name
,
repr
(
var_name
)))
prog
.
Code
(
'{} = layers.create_parameter(shape={}, dtype={}, name={}, attr={}'
', default_initializer=initializer.Constant(0))'
#, is_bias={}
.
format
(
var_name
,
value_info
[
'shape'
],
repr
(
value_info
[
'dtype'
].
name
),
repr
(
name
),
attr_name
))
#, value_info.get('is_bias', False)))
prog
.
Code
(
'{} = layers.create_parameter(shape={}, dtype={}, name={}, attr={}'
', default_initializer=initializer.Constant(0))'
#, is_bias={}
.
format
(
var_name
,
value_info
[
'shape'
],
repr
(
value_info
[
'dtype'
].
name
),
repr
(
name
),
attr_name
))
#, value_info.get('is_bias', False)))
prog
.
VarDesc
(
var_name
,
persistable
=
True
,
value_info
=
value_info
)
@
staticmethod
def
emit_inputs
(
prog
,
names
,
value_infos
,
remove_batch
=
None
):
def
emit_inputs
(
prog
,
names
,
value_infos
,
remove_batch
=
None
):
"""
emit ONNX inputs into program
"""
...
...
@@ -334,27 +351,33 @@ class Writer(object):
value_info
=
value_infos
[
name
]
shape
=
value_info
[
'shape'
]
if
remove_batch
is
None
:
remove_batch
=
value_info
.
get
(
'remove_batch'
,
True
)
# HINT: True by default ?
remove_batch
=
value_info
.
get
(
'remove_batch'
,
True
)
# HINT: True by default ?
if
remove_batch
:
shape
=
shape
[
1
:]
prog
.
Code
(
'# input: {}'
.
format
(
name
))
prog
.
Code
((
'{} = layers.data(name={}, shape={}, dtype={}, '
'append_batch_size={})'
# , stop_gradient=True
).
format
(
var_name
,
repr
(
name
),
shape
,
repr
(
value_info
[
'dtype'
].
name
),
remove_batch
,
))
prog
.
OpDesc
(
'feed'
,
([
'feed'
],
'X'
),
([
var_name
],
'Out'
),
dict
(
col
=
idx
),
)
prog
.
VarDesc
(
var_name
,
value_info
=
value_info
,
remove_batch
=
remove_batch
)
prog
.
Code
((
'{} = layers.data(name={}, shape={}, dtype={}, '
'append_batch_size={})'
# , stop_gradient=True
).
format
(
var_name
,
repr
(
name
),
shape
,
repr
(
value_info
[
'dtype'
].
name
),
remove_batch
,
))
prog
.
OpDesc
(
'feed'
,
([
'feed'
],
'X'
),
([
var_name
],
'Out'
),
dict
(
col
=
idx
),
)
prog
.
VarDesc
(
var_name
,
value_info
=
value_info
,
remove_batch
=
remove_batch
)
@
staticmethod
def
emit_outputs
(
prog
,
names
):
#, value_infos
def
emit_outputs
(
prog
,
names
):
#, value_infos
"""
emit ONNX outputs into program
"""
...
...
@@ -364,11 +387,12 @@ class Writer(object):
var_name
=
make_var_name
(
name
)
code
+=
var_name
+
', '
prog
.
OpDesc
(
'fetch'
,
([
var_name
],
'X'
),
([
'fetch'
],
'Out'
),
dict
(
col
=
idx
),
)
prog
.
OpDesc
(
'fetch'
,
([
var_name
],
'X'
),
([
'fetch'
],
'Out'
),
dict
(
col
=
idx
),
)
# var is emitted over ops
prog
.
Code
(
code
)
...
...
@@ -396,9 +420,9 @@ class Writer(object):
tensor_desc
.
dims
.
extend
(
weight
.
shape
)
fp
=
open
(
filename
,
'wb'
)
np
.
array
([
0
],
dtype
=
np
.
int32
).
tofile
(
fp
)
# version
np
.
array
([
0
],
dtype
=
np
.
int64
).
tofile
(
fp
)
# LOD level
np
.
array
([
0
],
dtype
=
np
.
int32
).
tofile
(
fp
)
# tensor version
np
.
array
([
0
],
dtype
=
np
.
int32
).
tofile
(
fp
)
# version
np
.
array
([
0
],
dtype
=
np
.
int64
).
tofile
(
fp
)
# LOD level
np
.
array
([
0
],
dtype
=
np
.
int32
).
tofile
(
fp
)
# tensor version
np
.
array
([
tensor_desc
.
ByteSize
()],
dtype
=
np
.
int32
).
tofile
(
fp
)
fp
.
write
(
tensor_desc
.
SerializeToString
())
weight
.
tofile
(
fp
)
...
...
@@ -463,4 +487,4 @@ class Writer(object):
fp
=
open
(
filename
,
'wb'
)
fp
.
write
(
prog_desc
.
SerializeToString
())
fp
.
close
()
logger
.
debug
(
'saved descs to %s'
,
filename
)
\ No newline at end of file
logger
.
debug
(
'saved descs to %s'
,
filename
)
onnx2
paddle
/requirements.txt
→
onnx2
fluid
/requirements.txt
浏览文件 @
f0dede1f
-e .
onnx>=1.4.0
paddlepaddle
\ No newline at end of file
paddlepaddle
onnx2
paddle
/setup.cfg
→
onnx2
fluid
/setup.cfg
浏览文件 @
f0dede1f
...
...
@@ -2,14 +2,14 @@
# https://setuptools.readthedocs.io/en/latest/setuptools.html#configuring-setup-using-setup-cfg-files
[metadata]
# 项目名称,发布、安装时以此作为包名
name = onnx2
paddle
name = onnx2
fluid
# 作者姓名和邮箱地址
author = Macrobull
# author_email = .Github@github.com
# 项目版本号,1.0以上版本才视为正式版
version = 0.1.0
# 项目概要描述信息,一句话让用户明白项目概要,不支持中文
description = Inference model conversion from ONNX/PyTorch to Paddle
description = Inference model conversion from ONNX/PyTorch to Paddle
fluid
# 项目的详细描述内容和格式,包括readme和changelog等,通常使用md或rst等格式
long_description = file: README.md, CHANGELOG.md
long_description_content_type = text/markdown
...
...
@@ -25,7 +25,7 @@ classifier =
Programming Language :: Python :: 3.5
# 关键字,用于检索,方便用户搜索到你的项目
keywords =
onnx paddle
onnx paddle
paddle
[options]
# 包名称,find:表示自动寻找,可在options.packages.find中进行详细配置
...
...
@@ -44,21 +44,21 @@ install_requires =
# mock
# 单测代码目录
#test_suite = onnx2
paddle
.tests
#test_suite = onnx2
fluid
.tests
# 自动添加被版本控制的数据文件
include_package_data = True
# 项目是纯py项目,可以直接执行zip源码包
zip_safe = False
# 可以通过以下配置将指定的函数变成命令行工具,允许用户直接执行
#
[options.entry_points]
#
console_scripts =
# onnx2paddle = onnx2paddle
.cmdline:main
[options.entry_points]
console_scripts =
onnx2fluid = onnx2fluid
.cmdline:main
# 可以通过以下配置向包中添加conf或data等非py文件,安装时会一同安装到site-packages目录下
# 仅支持文件,不支持目录,但可以使用通配
#[options.package_data]
#onnx2
paddle
=
#onnx2
fluid
=
# conf/*
# data/*
...
...
onnx2
paddle
/setup.py
→
onnx2
fluid
/setup.py
浏览文件 @
f0dede1f
...
...
@@ -15,4 +15,3 @@ Date: 2019/02/22 10:25:46
import
setuptools
setuptools
.
setup
()
onnx2paddle/onnx2paddle/framework_pb2.py
已删除
100644 → 0
浏览文件 @
b483d12e
此差异已折叠。
点击以展开。
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录