提交 7c3e9379 编写于 作者: M Macrobull

bugfix

上级 816ac6e2
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Mar 27 11:50:03 2019
@author: Macrobull
"""
import sys
import numpy as np
from collections import OrderedDict as Dict
def make_var_name(name):
"""
make a valid variable name in Python code
"""
if name == '':
return '_'
if name[0].isdigit():
return 'var_' + name
for s in ' \\|/:': #
name = name.replace(s, '_')
if name.startswith('_'):
name = 'var' + name
return name
fn = sys.argv[1]
input_names = sys.argv[2].split(',')
output_names = sys.argv[3].split(',')
squeeze_data = len(sys.argv) > 4
data = np.load(fn, encoding='bytes')
input_data = data['inputs']
output_data = data['outputs']
while squeeze_data and input_data.ndim > 4 and input_data.shape[0] == 1:
input_data = input_data.squeeze(0)
while squeeze_data and output_data.ndim > 2 and output_data.shape[0] == 1:
output_data = output_data.squeeze(0)
inputs = Dict(zip(map(make_var_name, input_names), [input_data]))
outputs = Dict(zip(map(make_var_name, output_names), [output_data]))
np.savez(fn, inputs=inputs, outputs=outputs) # overwrite
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Mar 27 11:50:03 2019
@author: Macrobull
"""
import os, sys
import numpy as np
import onnx
import onnx.numpy_helper as numpy_helper
from collections import OrderedDict as Dict
from glob import glob
def make_var_name(name):
"""
make a valid variable name in Python code
"""
if name == '':
return '_'
if name[0].isdigit():
return 'var_' + name
for s in ' \\|/:': #
name = name.replace(s, '_')
if name.startswith('_'):
name = 'var' + name
return name
data_dir = os.path.dirname(sys.argv[1])
input_names = sys.argv[2].split(',')
output_names = sys.argv[3].split(',')
squeeze_data = len(sys.argv) > 4
# Load inputs
inputs = []
for fn in glob(os.path.join(data_dir, 'input_*.pb')):
tensor = onnx.TensorProto()
with open(fn, 'rb') as f:
tensor.ParseFromString(f.read())
tensor = numpy_helper.to_array(tensor)
while squeeze_data and tensor.ndim > 4 and tensor.shape[0] == 1:
tensor = tensor.squeeze(0)
inputs.append(tensor)
# Load outputs
outputs = []
for fn in glob(os.path.join(data_dir, 'output_*.pb')):
tensor = onnx.TensorProto()
with open(fn, 'rb') as f:
tensor.ParseFromString(f.read())
tensor = numpy_helper.to_array(tensor)
while squeeze_data and tensor.ndim > 2 and tensor.shape[0] == 1:
tensor = tensor.squeeze(0)
outputs.append(tensor)
inputs = Dict(zip(map(make_var_name, input_names), inputs))
outputs = Dict(zip(map(make_var_name, output_names), outputs))
np.savez(data_dir, inputs=inputs, outputs=outputs)
......@@ -39,7 +39,7 @@ idx = 0
#yp = model(xb)
#idx += 1
#print('index: ', idx)
#export_onnx_with_validation(model, (xb, ), prefix + str(idx),
#export_onnx_with_validation(model, [xb], prefix + str(idx),
# ['x'], ['y'],
# verbose=True, training=False)
......@@ -61,7 +61,7 @@ idx = 0
#yp = model(xb)
#idx += 1
#print('index: ', idx)
#export_onnx_with_validation(model, (xb, ), prefix + str(idx),
#export_onnx_with_validation(model, [xb], prefix + str(idx),
# ['x'], ['y'],
# verbose=True, training=False)
......@@ -85,11 +85,10 @@ xb = torch.rand((2, 3))
yp = model(xb)
idx += 1
print('index: ', idx)
export_onnx_with_validation(
model, (xb, ),
prefix + str(idx), ['x'], ['y'],
verbose=True,
training=False)
export_onnx_with_validation(model, [xb],
prefix + str(idx), ['x'], ['y'],
verbose=True,
training=False)
######## example: compare ########
......@@ -113,11 +112,10 @@ xb1 = torch.rand((2, 3))
ya, yb, yc = model(xb0, xb1)
idx += 1
print('index: ', idx)
export_onnx_with_validation(
model, (xb0, xb1),
prefix + str(idx), ['x0', 'x1'], ['ya', 'yb', 'yc'],
verbose=True,
training=False)
export_onnx_with_validation(model, [xb0, xb1],
prefix + str(idx), ['x0', 'x1'], ['ya', 'yb', 'yc'],
verbose=True,
training=False)
######## example: affine_grid ########
......@@ -137,11 +135,10 @@ theta = torch.rand((2, 2, 3))
grid = model(theta)
idx += 1
print('index: ', idx)
export_onnx_with_validation(
model, (theta, ),
prefix + str(idx), ['theta'], ['grid'],
verbose=True,
training=False)
export_onnx_with_validation(model, (theta, ),
prefix + str(idx), ['theta'], ['grid'],
verbose=True,
training=False)
######## example: conv2d_transpose ########
......@@ -165,11 +162,10 @@ xb = torch.rand((2, 3, 4, 5))
yp = model(xb)
idx += 1
print('index: ', idx)
export_onnx_with_validation(
model, (xb, ),
prefix + str(idx), ['x'], ['y'],
verbose=True,
training=False)
export_onnx_with_validation(model, [xb],
prefix + str(idx), ['x'], ['y'],
verbose=True,
training=False)
######## example: conv2d ########
......@@ -195,11 +191,10 @@ xb = torch.rand((2, 3, 4, 5))
yp = model(xb)
idx += 1
print('index: ', idx)
export_onnx_with_validation(
model, (xb, ),
prefix + str(idx), ['x'], ['y'],
verbose=True,
training=False)
export_onnx_with_validation(model, [xb],
prefix + str(idx), ['x'], ['y'],
verbose=True,
training=False)
######### example: conv1d ########
#
......@@ -220,7 +215,7 @@ export_onnx_with_validation(
#yp = model(xb)
#idx += 1
#print('index: ', idx)
#export_onnx_with_validation(model, (xb, ), prefix + str(idx),
#export_onnx_with_validation(model, [xb], prefix + str(idx),
# ['x'], ['y'],
# verbose=True, training=False)
......@@ -241,8 +236,7 @@ xb = torch.rand((2, 3))
yp = model(xb)
idx += 1
print('index: ', idx)
export_onnx_with_validation(
model, (xb, ),
prefix + str(idx), ['y'], ['y'],
verbose=True,
training=False)
export_onnx_with_validation(model, [xb],
prefix + str(idx), ['y'], ['y'],
verbose=True,
training=False)
......@@ -21,10 +21,10 @@ class double_conv(nn.Module):
def __init__(self, in_ch, out_ch):
super(double_conv, self).__init__()
self.conv = nn.Sequential(
nn.Conv2d(in_ch, out_ch, 3, padding=1), nn.BatchNorm2d(out_ch),
nn.ReLU(inplace=True), nn.Conv2d(out_ch, out_ch, 3, padding=1),
nn.BatchNorm2d(out_ch), nn.ReLU(inplace=True))
self.conv = nn.Sequential(nn.Conv2d(in_ch, out_ch, 3, padding=1),
nn.BatchNorm2d(out_ch), nn.ReLU(inplace=True),
nn.Conv2d(out_ch, out_ch, 3, padding=1),
nn.BatchNorm2d(out_ch), nn.ReLU(inplace=True))
def forward(self, x):
x = self.conv(x)
......@@ -58,8 +58,8 @@ class up(nn.Module):
# would be a nice idea if the upsampling could be learned too,
# but my machine do not have enough memory to handle all those weights
if bilinear:
self.up = nn.Upsample(
scale_factor=2, mode='bilinear') #, align_corners=True)
self.up = nn.Upsample(scale_factor=2,
mode='bilinear') #, align_corners=True)
else:
self.up = nn.ConvTranspose2d(in_ch // 2, in_ch // 2, 2, stride=2)
......@@ -131,8 +131,7 @@ model = UNet(3, 80)
model.eval()
xb = torch.rand((1, 3, 512, 512))
yp = model(xb)
export_onnx_with_validation(
model, (xb, ),
'sample_unet', ['image'], ['pred'],
verbose=True,
training=False)
export_onnx_with_validation(model, [xb],
'sample_unet', ['image'], ['pred'],
verbose=True,
training=False)
......@@ -20,188 +20,166 @@ class Yolov2(nn.Module):
def __init__(self):
super(Yolov2, self).__init__()
self.conv1 = nn.Conv2d(
in_channels=3,
out_channels=32,
kernel_size=3,
stride=1,
padding=1,
bias=False)
self.conv1 = nn.Conv2d(in_channels=3,
out_channels=32,
kernel_size=3,
stride=1,
padding=1,
bias=False)
self.batchnorm1 = nn.BatchNorm2d(32)
self.conv2 = nn.Conv2d(
in_channels=32,
out_channels=64,
kernel_size=3,
stride=1,
padding=1,
bias=False)
self.conv2 = nn.Conv2d(in_channels=32,
out_channels=64,
kernel_size=3,
stride=1,
padding=1,
bias=False)
self.batchnorm2 = nn.BatchNorm2d(64)
self.conv3 = nn.Conv2d(
in_channels=64,
out_channels=128,
kernel_size=3,
stride=1,
padding=1,
bias=False)
self.conv3 = nn.Conv2d(in_channels=64,
out_channels=128,
kernel_size=3,
stride=1,
padding=1,
bias=False)
self.batchnorm3 = nn.BatchNorm2d(128)
self.conv4 = nn.Conv2d(
in_channels=128,
out_channels=64,
kernel_size=1,
stride=1,
padding=0,
bias=False)
self.conv4 = nn.Conv2d(in_channels=128,
out_channels=64,
kernel_size=1,
stride=1,
padding=0,
bias=False)
self.batchnorm4 = nn.BatchNorm2d(64)
self.conv5 = nn.Conv2d(
in_channels=64,
out_channels=128,
kernel_size=3,
stride=1,
padding=1,
bias=False)
self.conv5 = nn.Conv2d(in_channels=64,
out_channels=128,
kernel_size=3,
stride=1,
padding=1,
bias=False)
self.batchnorm5 = nn.BatchNorm2d(128)
self.conv6 = nn.Conv2d(
in_channels=128,
out_channels=256,
kernel_size=3,
stride=1,
padding=1,
bias=False)
self.conv6 = nn.Conv2d(in_channels=128,
out_channels=256,
kernel_size=3,
stride=1,
padding=1,
bias=False)
self.batchnorm6 = nn.BatchNorm2d(256)
self.conv7 = nn.Conv2d(
in_channels=256,
out_channels=128,
kernel_size=1,
stride=1,
padding=0,
bias=False)
self.conv7 = nn.Conv2d(in_channels=256,
out_channels=128,
kernel_size=1,
stride=1,
padding=0,
bias=False)
self.batchnorm7 = nn.BatchNorm2d(128)
self.conv8 = nn.Conv2d(
in_channels=128,
out_channels=256,
kernel_size=3,
stride=1,
padding=1,
bias=False)
self.conv8 = nn.Conv2d(in_channels=128,
out_channels=256,
kernel_size=3,
stride=1,
padding=1,
bias=False)
self.batchnorm8 = nn.BatchNorm2d(256)
self.conv9 = nn.Conv2d(
in_channels=256,
out_channels=512,
kernel_size=3,
stride=1,
padding=1,
bias=False)
self.conv9 = nn.Conv2d(in_channels=256,
out_channels=512,
kernel_size=3,
stride=1,
padding=1,
bias=False)
self.batchnorm9 = nn.BatchNorm2d(512)
self.conv10 = nn.Conv2d(
in_channels=512,
out_channels=256,
kernel_size=1,
stride=1,
padding=0,
bias=False)
self.conv10 = nn.Conv2d(in_channels=512,
out_channels=256,
kernel_size=1,
stride=1,
padding=0,
bias=False)
self.batchnorm10 = nn.BatchNorm2d(256)
self.conv11 = nn.Conv2d(
in_channels=256,
out_channels=512,
kernel_size=3,
stride=1,
padding=1,
bias=False)
self.conv11 = nn.Conv2d(in_channels=256,
out_channels=512,
kernel_size=3,
stride=1,
padding=1,
bias=False)
self.batchnorm11 = nn.BatchNorm2d(512)
self.conv12 = nn.Conv2d(
in_channels=512,
out_channels=256,
kernel_size=1,
stride=1,
padding=0,
bias=False)
self.conv12 = nn.Conv2d(in_channels=512,
out_channels=256,
kernel_size=1,
stride=1,
padding=0,
bias=False)
self.batchnorm12 = nn.BatchNorm2d(256)
self.conv13 = nn.Conv2d(
in_channels=256,
out_channels=512,
kernel_size=3,
stride=1,
padding=1,
bias=False)
self.conv13 = nn.Conv2d(in_channels=256,
out_channels=512,
kernel_size=3,
stride=1,
padding=1,
bias=False)
self.batchnorm13 = nn.BatchNorm2d(512)
self.conv14 = nn.Conv2d(
in_channels=512,
out_channels=1024,
kernel_size=3,
stride=1,
padding=1,
bias=False)
self.conv14 = nn.Conv2d(in_channels=512,
out_channels=1024,
kernel_size=3,
stride=1,
padding=1,
bias=False)
self.batchnorm14 = nn.BatchNorm2d(1024)
self.conv15 = nn.Conv2d(
in_channels=1024,
out_channels=512,
kernel_size=1,
stride=1,
padding=0,
bias=False)
self.conv15 = nn.Conv2d(in_channels=1024,
out_channels=512,
kernel_size=1,
stride=1,
padding=0,
bias=False)
self.batchnorm15 = nn.BatchNorm2d(512)
self.conv16 = nn.Conv2d(
in_channels=512,
out_channels=1024,
kernel_size=3,
stride=1,
padding=1,
bias=False)
self.conv16 = nn.Conv2d(in_channels=512,
out_channels=1024,
kernel_size=3,
stride=1,
padding=1,
bias=False)
self.batchnorm16 = nn.BatchNorm2d(1024)
self.conv17 = nn.Conv2d(
in_channels=1024,
out_channels=512,
kernel_size=1,
stride=1,
padding=0,
bias=False)
self.conv17 = nn.Conv2d(in_channels=1024,
out_channels=512,
kernel_size=1,
stride=1,
padding=0,
bias=False)
self.batchnorm17 = nn.BatchNorm2d(512)
self.conv18 = nn.Conv2d(
in_channels=512,
out_channels=1024,
kernel_size=3,
stride=1,
padding=1,
bias=False)
self.conv18 = nn.Conv2d(in_channels=512,
out_channels=1024,
kernel_size=3,
stride=1,
padding=1,
bias=False)
self.batchnorm18 = nn.BatchNorm2d(1024)
self.conv19 = nn.Conv2d(
in_channels=1024,
out_channels=1024,
kernel_size=3,
stride=1,
padding=1,
bias=False)
self.conv19 = nn.Conv2d(in_channels=1024,
out_channels=1024,
kernel_size=3,
stride=1,
padding=1,
bias=False)
self.batchnorm19 = nn.BatchNorm2d(1024)
self.conv20 = nn.Conv2d(
in_channels=1024,
out_channels=1024,
kernel_size=3,
stride=1,
padding=1,
bias=False)
self.conv20 = nn.Conv2d(in_channels=1024,
out_channels=1024,
kernel_size=3,
stride=1,
padding=1,
bias=False)
self.batchnorm20 = nn.BatchNorm2d(1024)
self.conv21 = nn.Conv2d(
in_channels=3072,
out_channels=1024,
kernel_size=3,
stride=1,
padding=1,
bias=False)
self.conv21 = nn.Conv2d(in_channels=3072,
out_channels=1024,
kernel_size=3,
stride=1,
padding=1,
bias=False)
self.batchnorm21 = nn.BatchNorm2d(1024)
self.conv22 = nn.Conv2d(
in_channels=1024,
out_channels=125,
kernel_size=1,
stride=1,
padding=0)
self.conv22 = nn.Conv2d(in_channels=1024,
out_channels=125,
kernel_size=1,
stride=1,
padding=0)
def reorg_layer(self, x):
stride = 2
......@@ -227,14 +205,14 @@ class Yolov2(nn.Module):
return passthrough
def forward(self, x):
out = F.max_pool2d(
F.leaky_relu(self.batchnorm1(self.conv1(x)), negative_slope=0.1),
2,
stride=2)
out = F.max_pool2d(
F.leaky_relu(self.batchnorm2(self.conv2(out)), negative_slope=0.1),
2,
stride=2)
out = F.max_pool2d(F.leaky_relu(self.batchnorm1(self.conv1(x)),
negative_slope=0.1),
2,
stride=2)
out = F.max_pool2d(F.leaky_relu(self.batchnorm2(self.conv2(out)),
negative_slope=0.1),
2,
stride=2)
out = F.leaky_relu(self.batchnorm3(self.conv3(out)), negative_slope=0.1)
out = F.leaky_relu(self.batchnorm4(self.conv4(out)), negative_slope=0.1)
......@@ -247,36 +225,36 @@ class Yolov2(nn.Module):
out = F.max_pool2d(out, 2, stride=2)
out = F.leaky_relu(self.batchnorm9(self.conv9(out)), negative_slope=0.1)
out = F.leaky_relu(
self.batchnorm10(self.conv10(out)), negative_slope=0.1)
out = F.leaky_relu(
self.batchnorm11(self.conv11(out)), negative_slope=0.1)
out = F.leaky_relu(
self.batchnorm12(self.conv12(out)), negative_slope=0.1)
out = F.leaky_relu(
self.batchnorm13(self.conv13(out)), negative_slope=0.1)
out = F.leaky_relu(self.batchnorm10(self.conv10(out)),
negative_slope=0.1)
out = F.leaky_relu(self.batchnorm11(self.conv11(out)),
negative_slope=0.1)
out = F.leaky_relu(self.batchnorm12(self.conv12(out)),
negative_slope=0.1)
out = F.leaky_relu(self.batchnorm13(self.conv13(out)),
negative_slope=0.1)
passthrough = self.reorg_layer(out)
out = F.max_pool2d(out, 2, stride=2)
out = F.leaky_relu(
self.batchnorm14(self.conv14(out)), negative_slope=0.1)
out = F.leaky_relu(
self.batchnorm15(self.conv15(out)), negative_slope=0.1)
out = F.leaky_relu(
self.batchnorm16(self.conv16(out)), negative_slope=0.1)
out = F.leaky_relu(
self.batchnorm17(self.conv17(out)), negative_slope=0.1)
out = F.leaky_relu(
self.batchnorm18(self.conv18(out)), negative_slope=0.1)
out = F.leaky_relu(self.batchnorm14(self.conv14(out)),
negative_slope=0.1)
out = F.leaky_relu(self.batchnorm15(self.conv15(out)),
negative_slope=0.1)
out = F.leaky_relu(self.batchnorm16(self.conv16(out)),
negative_slope=0.1)
out = F.leaky_relu(self.batchnorm17(self.conv17(out)),
negative_slope=0.1)
out = F.leaky_relu(self.batchnorm18(self.conv18(out)),
negative_slope=0.1)
out = F.leaky_relu(
self.batchnorm19(self.conv19(out)), negative_slope=0.1)
out = F.leaky_relu(
self.batchnorm20(self.conv20(out)), negative_slope=0.1)
out = F.leaky_relu(self.batchnorm19(self.conv19(out)),
negative_slope=0.1)
out = F.leaky_relu(self.batchnorm20(self.conv20(out)),
negative_slope=0.1)
out = torch.cat([passthrough, out], 1)
out = F.leaky_relu(
self.batchnorm21(self.conv21(out)), negative_slope=0.1)
out = F.leaky_relu(self.batchnorm21(self.conv21(out)),
negative_slope=0.1)
out = self.conv22(out)
return out
......@@ -286,8 +264,7 @@ model = Yolov2()
model.eval()
xb = torch.rand((1, 3, 224, 224))
yp = model(xb)
export_onnx_with_validation(
model, (xb, ),
'sample_yolov2', ['image'], ['pred'],
verbose=True,
training=False)
export_onnx_with_validation(model, [xb],
'sample_yolov2', ['image'], ['pred'],
verbose=True,
training=False)
......@@ -11,284 +11,570 @@ validate_flags2="/tmp/export/__model__"
alias http_get="aria2c -c -s8 -x8"
# alias python="python3" # if ...
bvlc_alexnet()
{
bn_tar="bvlc_alexnet"
fn_tar="$bn_tar.tar.gz"
fn_model="$bn_tar/model.onnx"
http_get "$base_url$fn_tar"
rm -rf "$bn_tar/"
echo "extracting ..."
tar xf "$fn_tar"
python -m onnx2fluid $convert_flags "$fn_model"
for npz in "$bn_tar"/*.npz
do
echo "converting $npz ..."
python convert_data_npz_0.py "$npz" data_0 prob_1 -s
python -m onnx2fluid.validation $validate_flags1 -t "$npz"
python -m onnx2fluid.validation $validate_flags2 -t "$npz"
done
for pb_dir in "$bn_tar"/*/
do
echo "converting $pb_dir ..."
python convert_data_pb_0.py "$pb_dir" data_0 prob_1
python -m onnx2fluid.validation $validate_flags1 -t $(dirname "$pb_dir/x").npz
python -m onnx2fluid.validation $validate_flags2 -t $(dirname "$pb_dir/x").npz
done
bn_tar="bvlc_alexnet"
fn_tar="$bn_tar.tar.gz"
fn_model="$bn_tar/model.onnx"
http_get "$base_url$fn_tar"
rm -rf "$bn_tar/"
echo "extracting ..."
tar xf "$fn_tar"
python -m onnx2fluid $convert_flags "$fn_model"
for npz in "$bn_tar"/*.npz
do
echo "converting $npz ..."
python convert_data_npz.py "$npz" data_0 prob_1 -s
python -m onnx2fluid.validation $validate_flags1 -t "$npz"
python -m onnx2fluid.validation $validate_flags2 -t "$npz"
done
for pb_dir in "$bn_tar"/*/
do
echo "converting $pb_dir ..."
python convert_data_pb.py "$pb_dir" data_0 prob_1
python -m onnx2fluid.validation $validate_flags1 -t $(dirname "$pb_dir/x").npz
python -m onnx2fluid.validation $validate_flags2 -t $(dirname "$pb_dir/x").npz
done
rm -rf "$bn_tar/"
}
bvlc_googlenet()
{
bn_tar="bvlc_googlenet"
fn_tar="$bn_tar.tar.gz"
fn_model="$bn_tar/model.onnx"
http_get "$base_url$fn_tar"
rm -rf "$bn_tar/"
echo "extracting ..."
tar xf "$fn_tar"
python -m onnx2fluid $convert_flags "$fn_model"
for pb_dir in "$bn_tar"/*/
do
echo "converting $pb_dir"
python convert_data_pb_0.py "$pb_dir" data_0 prob_1
python -m onnx2fluid.validation $validate_flags1 -t $(dirname "$pb_dir/x").npz
python -m onnx2fluid.validation $validate_flags2 -t $(dirname "$pb_dir/x").npz
done
bn_tar="bvlc_googlenet"
fn_tar="$bn_tar.tar.gz"
fn_model="$bn_tar/model.onnx"
http_get "$base_url$fn_tar"
rm -rf "$bn_tar/"
echo "extracting ..."
tar xf "$fn_tar"
python -m onnx2fluid $convert_flags "$fn_model"
for pb_dir in "$bn_tar"/*/
do
echo "converting $pb_dir"
python convert_data_pb.py "$pb_dir" data_0 prob_1
python -m onnx2fluid.validation $validate_flags1 -t $(dirname "$pb_dir/x").npz
python -m onnx2fluid.validation $validate_flags2 -t $(dirname "$pb_dir/x").npz
done
rm -rf "$bn_tar/"
}
bvlc_reference_caffenet()
{
bn_tar="bvlc_reference_caffenet"
fn_tar="$bn_tar.tar.gz"
fn_model="$bn_tar/model.onnx"
http_get "$base_url$fn_tar"
rm -rf "$bn_tar/"
echo "extracting ..."
tar xf "$fn_tar"
python -m onnx2fluid $convert_flags "$fn_model"
for pb_dir in "$bn_tar"/*/
do
echo "converting $pb_dir"
python convert_data_pb_0.py "$pb_dir" data_0 prob_1
python -m onnx2fluid.validation $validate_flags1 -t $(dirname "$pb_dir/x").npz
python -m onnx2fluid.validation $validate_flags2 -t $(dirname "$pb_dir/x").npz
done
bn_tar="bvlc_reference_caffenet"
fn_tar="$bn_tar.tar.gz"
fn_model="$bn_tar/model.onnx"
http_get "$base_url$fn_tar"
rm -rf "$bn_tar/"
echo "extracting ..."
tar xf "$fn_tar"
python -m onnx2fluid $convert_flags "$fn_model"
for pb_dir in "$bn_tar"/*/
do
echo "converting $pb_dir"
python convert_data_pb.py "$pb_dir" data_0 prob_1
python -m onnx2fluid.validation $validate_flags1 -t $(dirname "$pb_dir/x").npz
python -m onnx2fluid.validation $validate_flags2 -t $(dirname "$pb_dir/x").npz
done
rm -rf "$bn_tar/"
}
bvlc_reference_rcnn_ilsvrc13()
{
bn_tar="bvlc_reference_rcnn_ilsvrc13"
fn_tar="$bn_tar.tar.gz"
fn_model="$bn_tar/model.onnx"
http_get "$base_url$fn_tar"
rm -rf "$bn_tar/"
echo "extracting ..."
tar xf "$fn_tar"
python -m onnx2fluid $convert_flags "$fn_model"
for pb_dir in "$bn_tar"/*/
do
echo "converting $pb_dir"
python convert_data_pb_0.py "$pb_dir" data_0 fc-rcnn_1
python -m onnx2fluid.validation $validate_flags1 -t $(dirname "$pb_dir/x").npz
python -m onnx2fluid.validation $validate_flags2 -t $(dirname "$pb_dir/x").npz
done
bn_tar="bvlc_reference_rcnn_ilsvrc13"
fn_tar="$bn_tar.tar.gz"
fn_model="$bn_tar/model.onnx"
http_get "$base_url$fn_tar"
rm -rf "$bn_tar/"
echo "extracting ..."
tar xf "$fn_tar"
python -m onnx2fluid $convert_flags "$fn_model"
for pb_dir in "$bn_tar"/*/
do
echo "converting $pb_dir"
python convert_data_pb.py "$pb_dir" data_0 fc-rcnn_1
python -m onnx2fluid.validation $validate_flags1 -t $(dirname "$pb_dir/x").npz
python -m onnx2fluid.validation $validate_flags2 -t $(dirname "$pb_dir/x").npz
done
rm -rf "$bn_tar/"
}
densenet121()
{
bn_tar="densenet121"
fn_tar="$bn_tar.tar.gz"
fn_model="$bn_tar/model.onnx"
http_get "$base_url$fn_tar"
rm -rf "$bn_tar/"
echo "extracting ..."
tar xf "$fn_tar"
python -m onnx2fluid $convert_flags "$fn_model"
for npz in "$bn_tar"/*.npz
do
echo "converting $npz ..."
python convert_data_npz.py "$npz" data_0 fc6_1 -s
python -m onnx2fluid.validation $validate_flags1 -t "$npz"
python -m onnx2fluid.validation $validate_flags2 -t "$npz"
done
for pb_dir in "$bn_tar"/*/
do
echo "converting $pb_dir"
python convert_data_pb.py "$pb_dir" data_0 fc6_1
python -m onnx2fluid.validation $validate_flags1 -t $(dirname "$pb_dir/x").npz
python -m onnx2fluid.validation $validate_flags2 -t $(dirname "$pb_dir/x").npz
done
rm -rf "$bn_tar/"
}
emotion_ferplus()
{
bn_tar="emotion_ferplus"
fn_tar="$bn_tar.tar.gz"
fn_model="$bn_tar/model.onnx"
http_get "https://onnxzoo.blob.core.windows.net/models/opset_8/emotion_ferplus/$fn_tar"
rm -rf "$bn_tar/"
echo "extracting ..."
tar xf "$fn_tar"
python -m onnx2fluid $convert_flags "$fn_model" -y
for pb_dir in "$bn_tar"/*/
do
echo "converting $pb_dir ..."
python convert_data_pb.py "$pb_dir" Input3 Plus692_Output_0
python -m onnx2fluid.validation $validate_flags1 -t $(dirname "$pb_dir/x").npz
python -m onnx2fluid.validation $validate_flags2 -t $(dirname "$pb_dir/x").npz
done
rm -rf "$bn_tar/"
}
inception_v1()
{
bn_tar="inception_v1"
fn_tar="$bn_tar.tar.gz"
fn_model="$bn_tar/model.onnx"
http_get "$base_url$fn_tar"
rm -rf "$bn_tar/"
echo "extracting ..."
tar xf "$fn_tar"
python -m onnx2fluid $convert_flags "$fn_model"
for npz in "$bn_tar"/*.npz
do
echo "converting $npz ..."
python convert_data_npz_0.py "$npz" data_0 prob_1 -s
python -m onnx2fluid.validation $validate_flags1 -t "$npz"
python -m onnx2fluid.validation $validate_flags2 -t "$npz"
done
for pb_dir in "$bn_tar"/*/
do
echo "converting $pb_dir ..."
python convert_data_pb_0.py "$pb_dir" data_0 prob_1
python -m onnx2fluid.validation $validate_flags1 -t $(dirname "$pb_dir/x").npz
python -m onnx2fluid.validation $validate_flags2 -t $(dirname "$pb_dir/x").npz
done
bn_tar="inception_v1"
fn_tar="$bn_tar.tar.gz"
fn_model="$bn_tar/model.onnx"
http_get "$base_url$fn_tar"
rm -rf "$bn_tar/"
echo "extracting ..."
tar xf "$fn_tar"
python -m onnx2fluid $convert_flags "$fn_model"
for npz in "$bn_tar"/*.npz
do
echo "converting $npz ..."
python convert_data_npz.py "$npz" data_0 prob_1 -s
python -m onnx2fluid.validation $validate_flags1 -t "$npz"
python -m onnx2fluid.validation $validate_flags2 -t "$npz"
done
for pb_dir in "$bn_tar"/*/
do
echo "converting $pb_dir ..."
python convert_data_pb.py "$pb_dir" data_0 prob_1
python -m onnx2fluid.validation $validate_flags1 -t $(dirname "$pb_dir/x").npz
python -m onnx2fluid.validation $validate_flags2 -t $(dirname "$pb_dir/x").npz
done
rm -rf "$bn_tar/"
}
inception_v2()
{
bn_tar="inception_v2"
fn_tar="$bn_tar.tar.gz"
fn_model="$bn_tar/model.onnx"
http_get "$base_url$fn_tar"
rm -rf "$bn_tar/"
echo "extracting ..."
tar xf "$fn_tar"
python -m onnx2fluid $convert_flags "$fn_model"
for npz in "$bn_tar"/*.npz
do
echo "converting $npz ..."
python convert_data_npz_0.py "$npz" data_0 prob_1 -s
python -m onnx2fluid.validation $validate_flags1 -t "$npz"
python -m onnx2fluid.validation $validate_flags2 -t "$npz"
done
for pb_dir in "$bn_tar"/*/
do
echo "converting $pb_dir ..."
python convert_data_pb_0.py "$pb_dir" data_0 prob_1
python -m onnx2fluid.validation $validate_flags1 -t $(dirname "$pb_dir/x").npz
python -m onnx2fluid.validation $validate_flags2 -t $(dirname "$pb_dir/x").npz
done
bn_tar="inception_v2"
fn_tar="$bn_tar.tar.gz"
fn_model="$bn_tar/model.onnx"
http_get "$base_url$fn_tar"
rm -rf "$bn_tar/"
echo "extracting ..."
tar xf "$fn_tar"
python -m onnx2fluid $convert_flags "$fn_model"
for npz in "$bn_tar"/*.npz
do
echo "converting $npz ..."
python convert_data_npz.py "$npz" data_0 prob_1 -s
python -m onnx2fluid.validation $validate_flags1 -t "$npz"
python -m onnx2fluid.validation $validate_flags2 -t "$npz"
done
for pb_dir in "$bn_tar"/*/
do
echo "converting $pb_dir ..."
python convert_data_pb.py "$pb_dir" data_0 prob_1
python -m onnx2fluid.validation $validate_flags1 -t $(dirname "$pb_dir/x").npz
python -m onnx2fluid.validation $validate_flags2 -t $(dirname "$pb_dir/x").npz
done
rm -rf "$bn_tar/"
}
mobilenet()
{
bn_tar="mobilenetv2-1.0"
fn_tar="$bn_tar.tar.gz"
fn_model="$bn_tar/$bn_tar.onnx"
http_get "https://s3.amazonaws.com/onnx-model-zoo/mobilenet/mobilenetv2-1.0/$fn_tar"
rm -rf "$bn_tar/"
echo "extracting ..."
tar xf "$fn_tar"
python -m onnx2fluid $convert_flags "$fn_model" -y
for pb_dir in "$bn_tar"/*/
do
echo "converting $pb_dir ..."
python convert_data_pb.py "$pb_dir" data mobilenetv20_output_flatten0_reshape0
python -m onnx2fluid.validation $validate_flags1 -t $(dirname "$pb_dir/x").npz
python -m onnx2fluid.validation $validate_flags2 -t $(dirname "$pb_dir/x").npz
done
rm -rf "$bn_tar/"
}
resnet18()
{
bn_tar="resnet18v1"
fn_tar="$bn_tar.tar.gz"
fn_model="$bn_tar/$bn_tar.onnx"
http_get "https://s3.amazonaws.com/onnx-model-zoo/resnet/resnet18v1/$fn_tar"
rm -rf "$bn_tar/"
echo "extracting ..."
tar xf "$fn_tar"
python -m onnx2fluid $convert_flags "$fn_model" -y
for pb_dir in "$bn_tar"/*/
do
echo "converting $pb_dir ..."
python convert_data_pb.py "$pb_dir" data resnetv15_dense0_fwd
python -m onnx2fluid.validation $validate_flags1 -t $(dirname "$pb_dir/x").npz
python -m onnx2fluid.validation $validate_flags2 -t $(dirname "$pb_dir/x").npz
done
rm -rf "$bn_tar/"
}
resnet50()
{
bn_tar="resnet50"
fn_tar="$bn_tar.tar.gz"
fn_model="$bn_tar/model.onnx"
http_get "$base_url$fn_tar"
rm -rf "$bn_tar/"
echo "extracting ..."
tar xf "$fn_tar"
python -m onnx2fluid $convert_flags "$fn_model"
for npz in "$bn_tar"/*.npz
do
echo "converting $npz ..."
python convert_data_npz_0.py "$npz" gpu_0/data_0 gpu_0/softmaxout_1 -s
python -m onnx2fluid.validation $validate_flags1 -t "$npz"
python -m onnx2fluid.validation $validate_flags2 -t "$npz"
done
for pb_dir in "$bn_tar"/*/
do
echo "converting $pb_dir ..."
python convert_data_pb_0.py "$pb_dir" gpu_0/data_0 gpu_0/softmaxout_1
python -m onnx2fluid.validation $validate_flags1 -t $(dirname "$pb_dir/x").npz
python -m onnx2fluid.validation $validate_flags2 -t $(dirname "$pb_dir/x").npz
done
bn_tar="resnet50"
fn_tar="$bn_tar.tar.gz"
fn_model="$bn_tar/model.onnx"
http_get "$base_url$fn_tar"
rm -rf "$bn_tar/"
echo "extracting ..."
tar xf "$fn_tar"
python -m onnx2fluid $convert_flags "$fn_model"
for npz in "$bn_tar"/*.npz
do
echo "converting $npz ..."
python convert_data_npz.py "$npz" gpu_0/data_0 gpu_0/softmaxout_1 -s
python -m onnx2fluid.validation $validate_flags1 -t "$npz"
python -m onnx2fluid.validation $validate_flags2 -t "$npz"
done
for pb_dir in "$bn_tar"/*/
do
echo "converting $pb_dir ..."
python convert_data_pb.py "$pb_dir" gpu_0/data_0 gpu_0/softmaxout_1
python -m onnx2fluid.validation $validate_flags1 -t $(dirname "$pb_dir/x").npz
python -m onnx2fluid.validation $validate_flags2 -t $(dirname "$pb_dir/x").npz
done
rm -rf "$bn_tar/"
}
resnet100_arcface()
{
bn_tar="resnet100"
fn_tar="$bn_tar.tar.gz"
fn_model="$bn_tar/$bn_tar.onnx"
http_get "https://s3.amazonaws.com/onnx-model-zoo/arcface/resnet100/$fn_tar"
rm -rf "$bn_tar/"
echo "extracting ..."
tar xf "$fn_tar"
python -m onnx2fluid -o /tmp/export/ "$fn_model" -y
for pb_dir in "$bn_tar"/*/
do
echo "converting $pb_dir ..."
python convert_data_pb.py "$pb_dir" data fc1
python -m onnx2fluid.validation $validate_flags1 -t $(dirname "$pb_dir/x").npz
python -m onnx2fluid.validation $validate_flags2 -t $(dirname "$pb_dir/x").npz
done
rm -rf "$bn_tar/"
}
resnet101_duc()
{
bn_tar="ResNet101_DUC_HDC"
fn_tar="$bn_tar.tar.gz"
fn_model="$bn_tar/$bn_tar.onnx"
http_get "https://s3.amazonaws.com/onnx-model-zoo/duc/$fn_tar"
rm -rf "$bn_tar/"
echo "extracting ..."
tar xf "$fn_tar"
python -m onnx2fluid $convert_flags "$fn_model" -y
for pb_dir in "$bn_tar"/*/
do
echo "converting $pb_dir ..."
python convert_data_pb.py "$pb_dir" data seg_loss
python -m onnx2fluid.validation $validate_flags1 -t $(dirname "$pb_dir/x").npz
python -m onnx2fluid.validation $validate_flags2 -t $(dirname "$pb_dir/x").npz
done
rm -rf "$bn_tar/"
}
resnet152()
{
bn_tar="resnet152v2"
fn_tar="$bn_tar.tar.gz"
fn_model="$bn_tar/$bn_tar.onnx"
http_get "https://s3.amazonaws.com/onnx-model-zoo/resnet/resnet152v2/$fn_tar"
rm -rf "$bn_tar/"
echo "extracting ..."
tar xf "$fn_tar"
python -m onnx2fluid $convert_flags "$fn_model" -y
for pb_dir in "$bn_tar"/*/
do
echo "converting $pb_dir ..."
python convert_data_pb.py "$pb_dir" data resnetv27_dense0_fwd
python -m onnx2fluid.validation $validate_flags1 -t $(dirname "$pb_dir/x").npz
python -m onnx2fluid.validation $validate_flags2 -t $(dirname "$pb_dir/x").npz
done
rm -rf "$bn_tar/"
}
shufflenet()
{
bn_tar="shufflenet"
fn_tar="$bn_tar.tar.gz"
fn_model="$bn_tar/model.onnx"
http_get "$base_url$fn_tar"
rm -rf "$bn_tar/"
echo "extracting ..."
tar xf "$fn_tar"
python -m onnx2fluid $convert_flags "$fn_model"
for pb_dir in "$bn_tar"/*/
do
echo "converting $pb_dir ..."
python convert_data_pb_0.py "$pb_dir" gpu_0/data_0 gpu_0/softmaxout_1
python -m onnx2fluid.validation $validate_flags1 -t $(dirname "$pb_dir/x").npz
python -m onnx2fluid.validation $validate_flags2 -t $(dirname "$pb_dir/x").npz
done
bn_tar="shufflenet"
fn_tar="$bn_tar.tar.gz"
fn_model="$bn_tar/model.onnx"
http_get "$base_url$fn_tar"
rm -rf "$bn_tar/"
echo "extracting ..."
tar xf "$fn_tar"
python -m onnx2fluid $convert_flags "$fn_model"
for pb_dir in "$bn_tar"/*/
do
echo "converting $pb_dir ..."
python convert_data_pb.py "$pb_dir" gpu_0/data_0 gpu_0/softmax_1
python -m onnx2fluid.validation $validate_flags1 -t $(dirname "$pb_dir/x").npz
python -m onnx2fluid.validation $validate_flags2 -t $(dirname "$pb_dir/x").npz
done
rm -rf "$bn_tar/"
}
squeezenet()
{
bn_tar="squeezenet"
fn_tar="$bn_tar.tar.gz"
fn_model="$bn_tar/model.onnx"
http_get "$base_url$fn_tar"
rm -rf "$bn_tar/"
echo "extracting ..."
tar xf "$fn_tar"
python -m onnx2fluid $convert_flags "$fn_model"
for pb_dir in "$bn_tar"/*/
do
echo "converting $pb_dir"
python convert_data_pb_0.py "$pb_dir" data_0 softmaxout_1
python -m onnx2fluid.validation $validate_flags1 -t $(dirname "$pb_dir/x").npz
python -m onnx2fluid.validation $validate_flags2 -t $(dirname "$pb_dir/x").npz
done
bn_tar="squeezenet"
fn_tar="$bn_tar.tar.gz"
fn_model="$bn_tar/model.onnx"
http_get "$base_url$fn_tar"
rm -rf "$bn_tar/"
echo "extracting ..."
tar xf "$fn_tar"
python -m onnx2fluid $convert_flags "$fn_model"
for pb_dir in "$bn_tar"/*/
do
echo "converting $pb_dir"
python convert_data_pb.py "$pb_dir" data_0 softmaxout_1
python -m onnx2fluid.validation $validate_flags1 -t $(dirname "$pb_dir/x").npz
python -m onnx2fluid.validation $validate_flags2 -t $(dirname "$pb_dir/x").npz
done
rm -rf "$bn_tar/"
}
squeezenet1v1()
{
bn_tar="squeezenet1.1"
fn_tar="$bn_tar.tar.gz"
fn_model="$bn_tar/$bn_tar.onnx"
http_get "https://s3.amazonaws.com/onnx-model-zoo/squeezenet/squeezenet1.1/$fn_tar"
rm -rf "$bn_tar/"
echo "extracting ..."
tar xf "$fn_tar"
python -m onnx2fluid $convert_flags "$fn_model"
for pb_dir in "$bn_tar"/*/
do
echo "converting $pb_dir ..."
python convert_data_pb.py "$pb_dir" data squeezenet0_flatten0_reshape0
python -m onnx2fluid.validation $validate_flags1 -t $(dirname "$pb_dir/x").npz
python -m onnx2fluid.validation $validate_flags2 -t $(dirname "$pb_dir/x").npz
done
rm -rf "$bn_tar/"
}
ssd()
{
bn_tar="ssd"
fn_tar="$bn_tar.tar.gz"
fn_model="$bn_tar/model.onnx"
http_get "https://onnxzoo.blob.core.windows.net/models/opset_10/ssd/$fn_tar"
rm -rf "$bn_tar/"
echo "extracting ..."
mkdir "$bn_tar"
tar xf "$fn_tar" -C "$bn_tar"/
python -m onnx2fluid $convert_flags "$fn_model"
for pb_dir in "$bn_tar"/*/
do
echo "converting $pb_dir ..."
python convert_data_pb.py "$pb_dir" image bboxes,labels,scores
python -m onnx2fluid.validation $validate_flags1 -t $(dirname "$pb_dir/x").npz
python -m onnx2fluid.validation $validate_flags2 -t $(dirname "$pb_dir/x").npz
done
rm -rf "$bn_tar/"
}
tiny_yolov2()
{
bn_tar="tiny_yolov2"
fn_tar="$bn_tar.tar.gz"
fn_model="$bn_tar/model.onnx"
http_get "https://onnxzoo.blob.core.windows.net/models/opset_8/tiny_yolov2/$fn_tar"
rm -rf "$bn_tar/"
echo "extracting ..."
tar xf "$fn_tar"
python -m onnx2fluid $convert_flags "$fn_model" -xy
for pb_dir in "$bn_tar"/*/
do
echo "converting $pb_dir"
python convert_data_pb_0.py "$pb_dir" image grid
python -m onnx2fluid.validation $validate_flags1 -t $(dirname "$pb_dir/x").npz
python -m onnx2fluid.validation $validate_flags2 -t $(dirname "$pb_dir/x").npz
done
bn_tar="tiny_yolov2"
fn_tar="$bn_tar.tar.gz"
fn_model="$bn_tar/model.onnx"
http_get "https://onnxzoo.blob.core.windows.net/models/opset_8/tiny_yolov2/$fn_tar"
rm -rf "$bn_tar/"
echo "extracting ..."
tar xf "$fn_tar"
python -m onnx2fluid $convert_flags "$fn_model" -y
for pb_dir in "$bn_tar"/*/
do
echo "converting $pb_dir"
python convert_data_pb.py "$pb_dir" image grid
python -m onnx2fluid.validation $validate_flags1 -t $(dirname "$pb_dir/x").npz
python -m onnx2fluid.validation $validate_flags2 -t $(dirname "$pb_dir/x").npz
done
rm -rf "$bn_tar/"
}
vgg16bn()
{
bn_tar="vgg16-bn"
fn_tar="$bn_tar.tar.gz"
fn_model="$bn_tar/$bn_tar.onnx"
http_get "https://s3.amazonaws.com/onnx-model-zoo/vgg/vgg16-bn/$fn_tar"
rm -rf "$bn_tar/"
echo "extracting ..."
tar xf "$fn_tar"
python -m onnx2fluid $convert_flags "$fn_model" -y
for pb_dir in "$bn_tar"/*/
do
echo "converting $pb_dir ..."
python convert_data_pb.py "$pb_dir" data vgg0_dense2_fwd
python -m onnx2fluid.validation $validate_flags1 -t $(dirname "$pb_dir/x").npz
python -m onnx2fluid.validation $validate_flags2 -t $(dirname "$pb_dir/x").npz
done
rm -rf "$bn_tar/"
}
vgg19()
{
bn_tar="vgg19"
fn_tar="$bn_tar.tar.gz"
fn_model="$bn_tar/model.onnx"
http_get "$base_url$fn_tar"
rm -rf "$bn_tar/"
echo "extracting ..."
tar xf "$fn_tar"
python -m onnx2fluid $convert_flags "$fn_model"
for pb_dir in "$bn_tar"/*/
do
echo "converting $pb_dir"
python convert_data_pb_0.py "$pb_dir" data_0 prob_1
python -m onnx2fluid.validation $validate_flags1 -t $(dirname "$pb_dir/x").npz
python -m onnx2fluid.validation $validate_flags2 -t $(dirname "$pb_dir/x").npz
done
bn_tar="vgg19"
fn_tar="$bn_tar.tar.gz"
fn_model="$bn_tar/model.onnx"
http_get "$base_url$fn_tar"
rm -rf "$bn_tar/"
echo "extracting ..."
tar xf "$fn_tar"
python -m onnx2fluid $convert_flags "$fn_model"
for pb_dir in "$bn_tar"/*/
do
echo "converting $pb_dir"
python convert_data_pb.py "$pb_dir" data_0 prob_1
python -m onnx2fluid.validation $validate_flags1 -t $(dirname "$pb_dir/x").npz
python -m onnx2fluid.validation $validate_flags2 -t $(dirname "$pb_dir/x").npz
done
rm -rf "$bn_tar/"
}
yolov3()
{
bn_tar="yolov3"
fn_tar="$bn_tar.tar.gz"
fn_model="$bn_tar/model.onnx"
http_get "https://onnxzoo.blob.core.windows.net/models/opset_10/yolov3/$fn_tar"
rm -rf "$bn_tar/"
echo "extracting ..."
tar xf "$fn_tar"
python -m onnx2fluid $convert_flags "$fn_model" -x #
for pb_dir in "$bn_tar"/*/
do
echo "converting $pb_dir ..."
python convert_data_pb.py "$pb_dir" input_1:01,image_shape:01 yolonms_layer_1/ExpandDims_1:0,yolonms_layer_1/ExpandDims_3:0,yolonms_layer_1/concat_2:0
python -m onnx2fluid.validation $validate_flags1 -t $(dirname "$pb_dir/x").npz
python -m onnx2fluid.validation $validate_flags2 -t $(dirname "$pb_dir/x").npz
done
rm -rf "$bn_tar/"
}
zfnet512()
{
bn_tar="zfnet512"
fn_tar="$bn_tar.tar.gz"
fn_model="$bn_tar/model.onnx"
http_get "$base_url$fn_tar"
rm -rf "$bn_tar/"
echo "extracting ..."
tar xf "$fn_tar"
python -m onnx2fluid $convert_flags "$fn_model"
for pb_dir in "$bn_tar"/*/
do
echo "converting $pb_dir"
python convert_data_pb_0.py "$pb_dir" gpu_0/data_0 gpu_0/softmax_1
python -m onnx2fluid.validation $validate_flags1 -t $(dirname "$pb_dir/x").npz
python -m onnx2fluid.validation $validate_flags2 -t $(dirname "$pb_dir/x").npz
done
bn_tar="zfnet512"
fn_tar="$bn_tar.tar.gz"
fn_model="$bn_tar/model.onnx"
http_get "$base_url$fn_tar"
rm -rf "$bn_tar/"
echo "extracting ..."
tar xf "$fn_tar"
python -m onnx2fluid $convert_flags "$fn_model"
for pb_dir in "$bn_tar"/*/
do
echo "converting $pb_dir"
python convert_data_pb.py "$pb_dir" gpu_0/data_0 gpu_0/softmax_1
python -m onnx2fluid.validation $validate_flags1 -t $(dirname "$pb_dir/x").npz
python -m onnx2fluid.validation $validate_flags2 -t $(dirname "$pb_dir/x").npz
done
rm -rf "$bn_tar/"
}
......@@ -296,11 +582,22 @@ bvlc_alexnet
bvlc_googlenet
bvlc_reference_caffenet
bvlc_reference_rcnn_ilsvrc13
densenet121
emotion_ferplus # not supported
inception_v1
inception_v2
mobilenet
resnet18
resnet50
resnet100_arcface
resnet101_duc
resnet152
shufflenet
squeezenet # softmax bug
# tiny_yolov2 # not supported
squeezenet1v1
ssd # version not supported
tiny_yolov2 # not supported
vgg16bn
vgg19
yolov3 # malformed model ?
zfnet512
......@@ -92,7 +92,7 @@ parser.add_argument(
parser.add_argument(
'--rtol',
type=float,
default=1e-4,
default=1e-2,
help='assertion relative tolerance for validation',
)
args = parser.parse_args()
......
......@@ -22,7 +22,6 @@ __all__ = [
'main',
]
DEFAULT_ONNX_OPSET_VERSION = 9
DEFAULT_MODEL_MODULE = 'model'
DEFAULT_MODEL_FUNC = 'inference'
......@@ -30,6 +29,7 @@ DEFAULT_MODEL_FUNC = 'inference'
def main(**kwargs):
"""主程序入口"""
from .conversion import DEFAULT_ONNX_OPSET_VERSION
from .conversion import convert
logger = logging.getLogger('onnx2fluid')
......@@ -44,9 +44,9 @@ def main(**kwargs):
if save_dir else basepath) + shutil.os.sep
model_basename = DEFAULT_MODEL_MODULE + '.py'
model_func_name = DEFAULT_MODEL_FUNC
onnx_opset_version = DEFAULT_ONNX_OPSET_VERSION
onnx_opset_pedantic = kwargs.pop('pedantic', True)
onnx_skip_version_conversion = kwargs.pop('skip_version_conversion', False)
skip_version_conversion = kwargs.pop('skip_version_conversion', False)
onnx_opset_version = None if skip_version_conversion else DEFAULT_ONNX_OPSET_VERSION
# convert
convert(filename,
......@@ -55,7 +55,6 @@ def main(**kwargs):
model_func_name=model_func_name,
onnx_opset_version=onnx_opset_version,
onnx_opset_pedantic=onnx_opset_pedantic,
onnx_skip_version_conversion=onnx_skip_version_conversion,
**kwargs)
# validate
......@@ -69,13 +68,12 @@ def main(**kwargs):
golden_data_filename, **kwargs)
logger.info('starting validation on code ...')
passed &= validate(
shutil.os.path.join(save_dir, model_basename),
golden_data_filename,
model_func_name=model_func_name,
save_inference_model=
debug, # re-generate desc proto with python code when debug on
**kwargs)
# this re-generate desc proto with Python code when debug on
passed &= validate(shutil.os.path.join(save_dir, model_basename),
golden_data_filename,
model_func_name=model_func_name,
save_inference_model=debug,
**kwargs)
if not passed:
logger.error('validation failed, exit')
......
......@@ -14,20 +14,21 @@ __all__ = [
'convert',
]
DEFAULT_ONNX_OPSET_VERSION = 9
def convert(onnx_model_filename,
save_dir,
model_basename='model.py',
model_func_name='inference',
embed_params=False,
onnx_opset_version=9,
onnx_opset_version=None,
onnx_opset_pedantic=True,
onnx_skip_version_conversion=False,
debug=False,
**kwargs):
"""
convert an ONNX model to Paddle fluid Python code and desc pb
"""
convert an ONNX model to Paddle fluid Python code and desc pb
"""
import onnx
......@@ -50,11 +51,13 @@ def convert(onnx_model_filename,
# prepare onnx model
logger.info('loading model: %s ...', onnx_model_filename)
onnx_model = onnx.load(onnx_model_filename)
try:
logger.info('checking model ...')
check_model(onnx_model)
if onnx_skip_version_conversion: # WORKAROUND: RuntimeError: No Adapter For OP
logger.debug('assumed opset version: %d', onnx_opset_version)
if onnx_opset_version is None: # WORKAROUND: RuntimeError: No Adapter For OP
logger.debug('assumed opset version: %d',
DEFAULT_ONNX_OPSET_VERSION)
logger.warning(
'opset conversion skipped for onnx_opset_pedantic is OFF')
else:
......@@ -68,6 +71,7 @@ def convert(onnx_model_filename,
logger.warning('due to onnx_opset_pedantic is OFF')
logger.warning('the ONNX model sanity checking error is suppressed')
logger.warning('value_info inferring may be uncompleted')
# onnx model optimization
logger.info('model has %d ops', len(onnx_model.graph.node))
logger.info('optimizing model ...')
......@@ -87,10 +91,7 @@ def convert(onnx_model_filename,
debug_model_filename, _ = shutil.os.path.splitext(onnx_model_filename)
onnx.save(model, debug_model_filename + '.optimized_and_inffered.onnx')
# onnx.save(model, '/tmp/export/optimized_and_inffered.onnx')
# I/O instances
# I/O instances
onnx_graph = onnx_model.graph
fluid_program = Program()
fluid_writer = Writer()
......@@ -114,8 +115,8 @@ def convert(onnx_model_filename,
# op set conversion
# topo = 'backward' if embed_params else 'forward'
topo = 'forward'
for name, domain, op_type, inputs, outputs, attrs in graph_ops(
onnx_graph, topo=topo):
for name, domain, op_type, inputs, outputs, attrs in graph_ops(onnx_graph,
topo=topo):
logger.debug('translating op %s %s::%s ...', name, domain, op_type)
if domain == DEFAULT_OP_DOMAIN:
domain = ''
......@@ -140,6 +141,24 @@ def convert(onnx_model_filename,
logger.info('%d ops in, %d ops out', len(onnx_graph.node),
len(fluid_program.op_descs))
# shape-inference
for name, value_info in graph_value_infos.items():
var_name = make_var_name(name)
fluid_program.VarTypeInfo(var_name, value_info,
remove_batch=False) # shape-infer only
bad_var_names = []
for var_name, var_desc in fluid_program.var_descs.items():
if not var_desc.type.lod_tensor.HasField('tensor'):
bad_var_names.append(var_name)
if len(bad_var_names) > 0:
logger.warning('type info not infered for var %s ...',
', '.join(bad_var_names[:5]))
logger.warning('this causes little problem for PaddlePaddle, '
'but Paddle Mobile may not infer correctly')
logger.warning(
'please consider adding option -d to invoke PaddlePaddle shape-inference'
)
# weight writer
for name, weight in graph_weights(onnx_graph):
graph_params.append(name)
......@@ -173,9 +192,10 @@ def convert(onnx_model_filename,
value_info = graph_value_infos[name]
assert value_info['external']
external_inputs.append(name)
fluid_writer.emit_inputs(
fluid_program, external_inputs, graph_value_infos,
remove_batch=False) # TODO:
fluid_writer.emit_inputs(fluid_program,
external_inputs,
graph_value_infos,
remove_batch=False) # TODO:
input_codes = fluid_program.codes
fluid_program.codes = []
logger.info('%d inputs converted', len(external_inputs))
......@@ -206,12 +226,13 @@ def convert(onnx_model_filename,
fluid_writer.write_desc_file(
desc_filename,
op_descs=fluid_program.op_descs,
var_descs=fluid_program.var_descs,
var_descs=list(fluid_program.var_descs.values()),
)
logger.info('program saved to %s', desc_filename)
logger.info('conversion finished')
if __name__ == '__main__':
del convert
......@@ -283,10 +304,9 @@ if __name__ == '__main__':
pedantic = args.pedantic
skip_version_conversion = args.skip_version_conversion
convert(
model_filename,
save_dir,
embed_params=embed_params,
onnx_opset_pedantic=pedantic,
onnx_skip_version_conversion=skip_version_conversion,
debug=debug)
convert(model_filename,
save_dir,
embed_params=embed_params,
onnx_opset_pedantic=pedantic,
onnx_skip_version_conversion=skip_version_conversion,
debug=debug)
......@@ -28,30 +28,66 @@ _ATTRTYPE = _descriptor.EnumDescriptor(
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name='INT', index=0, number=0, options=None, type=None),
_descriptor.EnumValueDescriptor(
name='FLOAT', index=1, number=1, options=None, type=None),
_descriptor.EnumValueDescriptor(
name='STRING', index=2, number=2, options=None, type=None),
_descriptor.EnumValueDescriptor(
name='INTS', index=3, number=3, options=None, type=None),
_descriptor.EnumValueDescriptor(
name='FLOATS', index=4, number=4, options=None, type=None),
_descriptor.EnumValueDescriptor(
name='STRINGS', index=5, number=5, options=None, type=None),
_descriptor.EnumValueDescriptor(
name='BOOLEAN', index=6, number=6, options=None, type=None),
_descriptor.EnumValueDescriptor(
name='BOOLEANS', index=7, number=7, options=None, type=None),
_descriptor.EnumValueDescriptor(
name='BLOCK', index=8, number=8, options=None, type=None),
_descriptor.EnumValueDescriptor(
name='LONG', index=9, number=9, options=None, type=None),
_descriptor.EnumValueDescriptor(
name='BLOCKS', index=10, number=10, options=None, type=None),
_descriptor.EnumValueDescriptor(
name='LONGS', index=11, number=11, options=None, type=None),
_descriptor.EnumValueDescriptor(name='INT',
index=0,
number=0,
options=None,
type=None),
_descriptor.EnumValueDescriptor(name='FLOAT',
index=1,
number=1,
options=None,
type=None),
_descriptor.EnumValueDescriptor(name='STRING',
index=2,
number=2,
options=None,
type=None),
_descriptor.EnumValueDescriptor(name='INTS',
index=3,
number=3,
options=None,
type=None),
_descriptor.EnumValueDescriptor(name='FLOATS',
index=4,
number=4,
options=None,
type=None),
_descriptor.EnumValueDescriptor(name='STRINGS',
index=5,
number=5,
options=None,
type=None),
_descriptor.EnumValueDescriptor(name='BOOLEAN',
index=6,
number=6,
options=None,
type=None),
_descriptor.EnumValueDescriptor(name='BOOLEANS',
index=7,
number=7,
options=None,
type=None),
_descriptor.EnumValueDescriptor(name='BLOCK',
index=8,
number=8,
options=None,
type=None),
_descriptor.EnumValueDescriptor(name='LONG',
index=9,
number=9,
options=None,
type=None),
_descriptor.EnumValueDescriptor(name='BLOCKS',
index=10,
number=10,
options=None,
type=None),
_descriptor.EnumValueDescriptor(name='LONGS',
index=11,
number=11,
options=None,
type=None),
],
containing_type=None,
options=None,
......@@ -80,53 +116,111 @@ _VARTYPE_TYPE = _descriptor.EnumDescriptor(
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name='BOOL', index=0, number=0, options=None, type=None),
_descriptor.EnumValueDescriptor(
name='INT16', index=1, number=1, options=None, type=None),
_descriptor.EnumValueDescriptor(
name='INT32', index=2, number=2, options=None, type=None),
_descriptor.EnumValueDescriptor(
name='INT64', index=3, number=3, options=None, type=None),
_descriptor.EnumValueDescriptor(
name='FP16', index=4, number=4, options=None, type=None),
_descriptor.EnumValueDescriptor(
name='FP32', index=5, number=5, options=None, type=None),
_descriptor.EnumValueDescriptor(
name='FP64', index=6, number=6, options=None, type=None),
_descriptor.EnumValueDescriptor(
name='SIZE_T', index=7, number=19, options=None, type=None),
_descriptor.EnumValueDescriptor(
name='UINT8', index=8, number=20, options=None, type=None),
_descriptor.EnumValueDescriptor(
name='INT8', index=9, number=21, options=None, type=None),
_descriptor.EnumValueDescriptor(
name='LOD_TENSOR', index=10, number=7, options=None, type=None),
_descriptor.EnumValueDescriptor(
name='SELECTED_ROWS', index=11, number=8, options=None, type=None),
_descriptor.EnumValueDescriptor(
name='FEED_MINIBATCH', index=12, number=9, options=None, type=None),
_descriptor.EnumValueDescriptor(
name='FETCH_LIST', index=13, number=10, options=None, type=None),
_descriptor.EnumValueDescriptor(
name='STEP_SCOPES', index=14, number=11, options=None, type=None),
_descriptor.EnumValueDescriptor(
name='LOD_RANK_TABLE', index=15, number=12, options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='LOD_TENSOR_ARRAY',
index=16,
number=13,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='PLACE_LIST', index=17, number=14, options=None, type=None),
_descriptor.EnumValueDescriptor(
name='READER', index=18, number=15, options=None, type=None),
_descriptor.EnumValueDescriptor(
name='RAW', index=19, number=17, options=None, type=None),
_descriptor.EnumValueDescriptor(
name='TUPLE', index=20, number=18, options=None, type=None),
_descriptor.EnumValueDescriptor(name='BOOL',
index=0,
number=0,
options=None,
type=None),
_descriptor.EnumValueDescriptor(name='INT16',
index=1,
number=1,
options=None,
type=None),
_descriptor.EnumValueDescriptor(name='INT32',
index=2,
number=2,
options=None,
type=None),
_descriptor.EnumValueDescriptor(name='INT64',
index=3,
number=3,
options=None,
type=None),
_descriptor.EnumValueDescriptor(name='FP16',
index=4,
number=4,
options=None,
type=None),
_descriptor.EnumValueDescriptor(name='FP32',
index=5,
number=5,
options=None,
type=None),
_descriptor.EnumValueDescriptor(name='FP64',
index=6,
number=6,
options=None,
type=None),
_descriptor.EnumValueDescriptor(name='SIZE_T',
index=7,
number=19,
options=None,
type=None),
_descriptor.EnumValueDescriptor(name='UINT8',
index=8,
number=20,
options=None,
type=None),
_descriptor.EnumValueDescriptor(name='INT8',
index=9,
number=21,
options=None,
type=None),
_descriptor.EnumValueDescriptor(name='LOD_TENSOR',
index=10,
number=7,
options=None,
type=None),
_descriptor.EnumValueDescriptor(name='SELECTED_ROWS',
index=11,
number=8,
options=None,
type=None),
_descriptor.EnumValueDescriptor(name='FEED_MINIBATCH',
index=12,
number=9,
options=None,
type=None),
_descriptor.EnumValueDescriptor(name='FETCH_LIST',
index=13,
number=10,
options=None,
type=None),
_descriptor.EnumValueDescriptor(name='STEP_SCOPES',
index=14,
number=11,
options=None,
type=None),
_descriptor.EnumValueDescriptor(name='LOD_RANK_TABLE',
index=15,
number=12,
options=None,
type=None),
_descriptor.EnumValueDescriptor(name='LOD_TENSOR_ARRAY',
index=16,
number=13,
options=None,
type=None),
_descriptor.EnumValueDescriptor(name='PLACE_LIST',
index=17,
number=14,
options=None,
type=None),
_descriptor.EnumValueDescriptor(name='READER',
index=18,
number=15,
options=None,
type=None),
_descriptor.EnumValueDescriptor(name='RAW',
index=19,
number=17,
options=None,
type=None),
_descriptor.EnumValueDescriptor(name='TUPLE',
index=20,
number=18,
options=None,
type=None),
],
containing_type=None,
options=None,
......@@ -1480,11 +1574,10 @@ DESCRIPTOR.enum_types_by_name['AttrType'] = _ATTRTYPE
Version = _reflection.GeneratedProtocolMessageType(
'Version',
(_message.Message, ),
dict(
DESCRIPTOR=_VERSION,
__module__='framework_pb2'
# @@protoc_insertion_point(class_scope:paddle.framework.proto.Version)
))
dict(DESCRIPTOR=_VERSION,
__module__='framework_pb2'
# @@protoc_insertion_point(class_scope:paddle.framework.proto.Version)
))
_sym_db.RegisterMessage(Version)
OpDesc = _reflection.GeneratedProtocolMessageType(
......@@ -1601,11 +1694,10 @@ _sym_db.RegisterMessage(VarType.Tuple)
VarDesc = _reflection.GeneratedProtocolMessageType(
'VarDesc',
(_message.Message, ),
dict(
DESCRIPTOR=_VARDESC,
__module__='framework_pb2'
# @@protoc_insertion_point(class_scope:paddle.framework.proto.VarDesc)
))
dict(DESCRIPTOR=_VARDESC,
__module__='framework_pb2'
# @@protoc_insertion_point(class_scope:paddle.framework.proto.VarDesc)
))
_sym_db.RegisterMessage(VarDesc)
BlockDesc = _reflection.GeneratedProtocolMessageType(
......
......@@ -44,29 +44,29 @@ DEFAULT_OP_DOMAIN = 'ai.onnx'
def print_pb_structure(message, loop_iterative=False, depth=0):
"""
print pb fields in its structure
"""
print pb fields in its structure
"""
if hasattr(message, 'DESCRIPTOR') and hasattr(message.DESCRIPTOR, 'fields'):
for field in message.DESCRIPTOR.fields:
print('\t' * depth + '-', field.name)
print_pb_structure(
getattr(message, field.name),
loop_iterative=loop_iterative,
depth=(depth + 1))
print_pb_structure(getattr(message, field.name),
loop_iterative=loop_iterative,
depth=(depth + 1))
if loop_iterative and hasattr(message, 'MergeFrom') and hasattr(
message, '__len__'):
for idx, item in enumerate(message):
print('\t' * depth + '-', idx)
print_pb_structure(
item, loop_iterative=loop_iterative, depth=(depth + 1))
print_pb_structure(item,
loop_iterative=loop_iterative,
depth=(depth + 1))
def build_value_refs(nodes):
"""
build op reference of inputs and outputs
"""
build op reference of inputs and outputs
"""
input_refs = Dict()
output_refs = Dict()
......@@ -80,14 +80,15 @@ def build_value_refs(nodes):
def get_attribute_value2(attr):
"""
get_attribute_value enhanced
"""
get_attribute_value enhanced
"""
if attr.type == onnx.AttributeProto.TENSOR:
dtype = np.dtype(TENSOR_TYPE_TO_NP_TYPE[attr.t.data_type])
data = attr.t.raw_data
value = np.frombuffer(
data, dtype=dtype, count=(len(data) // dtype.itemsize))
value = np.frombuffer(data,
dtype=dtype,
count=(len(data) // dtype.itemsize))
elif attr.type == onnx.AttributeProto.STRING:
value = attr.s
value = value.decode() if isinstance(value, bytes) else value
......@@ -98,24 +99,24 @@ def get_attribute_value2(attr):
def tensor_dtype(tensor):
"""
get ONNX tensor in np.dtype
"""
get ONNX tensor in np.dtype
"""
return TENSOR_TYPE_TO_NP_TYPE[tensor.type.tensor_type.elem_type]
def tensor_shape(tensor):
"""
get ONNX tensor shape
"""
get ONNX tensor shape
"""
return [dim.dim_value for dim in tensor.type.tensor_type.shape.dim]
def node_attrs(node):
"""
convert ONNX node attributes to dict
"""
convert ONNX node attributes to dict
"""
return {attr.name: get_attribute_value2(attr)
for attr in node.attribute} # dict
......@@ -123,8 +124,8 @@ def node_attrs(node):
def node_topo(nodes, topo='default'):
"""
build indices with given topology to an ONNX node graph
"""
build indices with given topology to an ONNX node graph
"""
if topo == 'default':
return list(range(len(nodes)))
......@@ -191,8 +192,8 @@ def node_topo(nodes, topo='default'):
def node_iter(nodes, indices=None):
"""
generator for ONNX node graph with given indices
"""
generator for ONNX node graph with given indices
"""
if indices is None:
indices = range(len(nodes))
......@@ -208,6 +209,9 @@ def node_iter(nodes, indices=None):
if name == '':
name = 'op_' + str(index)
else: # make_op_name
for s in ' \\|/:': #
name = name.replace(s, '_')
if domain == '':
domain = DEFAULT_OP_DOMAIN
......@@ -216,8 +220,8 @@ def node_iter(nodes, indices=None):
def graph_ops(graph, topo='default'):
"""
generator for ONNX node graph with given topology
"""
generator for ONNX node graph with given topology
"""
if not isinstance(graph, onnx.GraphProto):
logger.error('graph is not a GraphProto instance')
......@@ -228,8 +232,8 @@ def graph_ops(graph, topo='default'):
def graph_weights(graph):
"""
generator for weights of an ONNX model
"""
generator for weights of an ONNX model
"""
if not isinstance(graph, onnx.GraphProto):
logger.error('graph is not a GraphProto instance')
......@@ -243,39 +247,39 @@ def graph_weights(graph):
def inferred_model_value_info(model):
"""
collect value/type info for an ONNX model
"""
collect value/type info for an ONNX model
"""
model = infer_shapes(model)
graph = model.graph
value_info = Dict()
for item in graph.value_info:
value_info[item.name] = dict(
dtype=tensor_dtype(item),
shape=tensor_shape(item),
external=False,
)
value_info[item.name] = {
'dtype': tensor_dtype(item),
'shape': tensor_shape(item),
'external': False,
}
for item in graph.input:
assert item.name not in value_info
value_info[item.name] = dict(
dtype=tensor_dtype(item),
shape=tensor_shape(item),
external=True,
)
value_info[item.name] = {
'dtype': tensor_dtype(item),
'shape': tensor_shape(item),
'external': True,
}
for item in graph.output:
# assert item.name not in value_info, 'bypass-model not supported'
value_info[item.name] = dict(
dtype=tensor_dtype(item),
shape=tensor_shape(item),
external=True,
)
value_info[item.name] = {
'dtype': tensor_dtype(item),
'shape': tensor_shape(item),
'external': True,
}
return value_info
def skip_node_forward(nodes, src_output_name, dst_input_name, input_refs):
"""
skip nodes between src_output_name -> dst_input_name and connect this pair
"""
skip nodes between src_output_name -> dst_input_name and connect this pair
"""
processed = 0
for next_idx in input_refs[src_output_name]:
......@@ -289,8 +293,8 @@ def skip_node_forward(nodes, src_output_name, dst_input_name, input_refs):
def skip_node_backward(nodes, src_input_name, dst_output_name, output_refs):
"""
skip nodes between dst_output_name -> src_input_name and connect this pair
"""
skip nodes between dst_output_name -> src_input_name and connect this pair
"""
processed = 0
for prev_idx in output_refs[src_input_name]:
......@@ -304,10 +308,10 @@ def skip_node_backward(nodes, src_input_name, dst_output_name, output_refs):
def optimize_model_skip_op_for_inference(model, op_list=None):
"""
skip ops can be bypassed for inference
"""
skip ops can be bypassed for inference
"""
if op_list is None:
op_list = ['Dropout']
op_list = ('Dropout', 'Identity')
nodes = model.graph.node
input_refs, output_refs = build_value_refs(nodes)
......@@ -325,7 +329,7 @@ def optimize_model_skip_op_for_inference(model, op_list=None):
if not (op_type in op_list):
continue
if op_type in ['Dropout']:
if op_type in ('Dropout', ):
input_name = node.input[0]
output_name = node.output[0]
elif not (len(node.input) == 1 and len(node.output) == 1):
......@@ -365,8 +369,8 @@ def optimize_model_skip_op_for_inference(model, op_list=None):
def optimize_model_strip_initializer(model, keep_input_only=True):
"""
strip weights for inference
"""
strip weights for inference
"""
nodes = model.graph.node
input_refs, output_refs = build_value_refs(nodes)
......@@ -406,8 +410,8 @@ def optimize_model_strip_initializer(model, keep_input_only=True):
def optimize_model_cast(model):
"""
strip cascade and unecessary onnx::Cast
"""
strip cascade and unecessary onnx::Cast-9:
"""
nodes = model.graph.node
input_refs, output_refs = build_value_refs(nodes)
......@@ -463,13 +467,13 @@ def optimize_model_cast(model):
def optimize_model_slice(model):
"""
strip cascade and unecessary onnx::Slice
"""
strip cascade and unecessary onnx::Slice-1:9
"""
nodes = model.graph.node
input_refs, output_refs = build_value_refs(nodes)
def _build_slice_node_chain(node_idx):
def build_slice_node_chain(node_idx):
chain = []
while True:
node = nodes[node_idx]
......@@ -485,7 +489,7 @@ def optimize_model_slice(model):
node_idx = list(input_refs[output_name])[0]
# axis: (start, end)
def _merge_slice(slice_chain):
def merge_slice(slice_chain):
merged_slice = dict()
for slice_node_idx in slice_chain:
node = nodes[slice_node_idx]
......@@ -508,14 +512,14 @@ def optimize_model_slice(model):
ret_nodes = ret.graph.node
nodes_to_remove = []
for node_idx in range(len(nodes)):
slice_chain = _build_slice_node_chain(node_idx)
slice_chain = build_slice_node_chain(node_idx)
if len(slice_chain) == 0:
continue
merged_slice = _merge_slice(slice_chain)
merged_slice = merge_slice(slice_chain)
if len(merged_slice) > 0 and len(slice_chain) == 1: # no need to merge
continue
attrs = dict(axes=[], starts=[], ends=[])
attrs = {'axes': [], 'starts': [], 'ends': []}
for axis, (start, end) in merged_slice.items():
attrs['axes'].append(axis)
attrs['starts'].append(start)
......
......@@ -38,76 +38,77 @@ DEFAULT_OP_MAPPING_FIELD_VALUES[
DEFAULT_OP_MAPPING_FIELD_VALUES[
'OUTPUT_PERM'] = None # sampler: [idx_onnx_arg...]
DEFAULT_OP_MAPPING_FIELD_VALUES['FILL_NAME_FIELD'] = True
DEFAULT_OP_MAPPING_VALUES = list(DEFAULT_OP_MAPPING_FIELD_VALUES.values())
DEFAULT_OP_MAPPING = {
## nil ops ##
'RandomUniform':
['uniform_random', [], ['Out'], dict(high='max', low='min'),
dict(), None, None, False],
'RandomNormal':
['gaussian_random', [], ['Out'], dict(scale='std'),
dict(), None, None, False],
## unary ops ##
'Abs': ['abs', ['X'], ['Out']],
'ArgMax': ['argmax', ['X'], ['Out'], dict(keepdims='')],
'ArgMin': ['argmin', ['X'], ['Out'], dict(keepdims='')],
'Ceil': ['ceil', ['X'], ['Out']],
'Clip': ['clip', ['X'], ['Out']], # attrs bypassed
'Cos': ['cos', ['X'], ['Out']],
'Elu': ['elu', ['X'], ['Out']],
'Exp': ['exp', ['X'], ['Out']],
'Flatten': ['flatten', ['X'], ['Out']], # attrs bypassed, FIXME: emit flatten2
'Floor': ['floor', ['X'], ['Out']],
'Gather': ['gather', ['X'], ['Out'], dict(axis='')],
'LeakyRelu': ['leaky_relu', ['X'], ['Out']],
'Log': ['log', ['X'], ['Out']],
'LRN': ['lrn', ['X'], ['Out', 'MidOut'], dict(size='n', bias='k')], #
'Reciprocal': ['reciprocal', ['X'], ['Out']],
'Relu': ['relu', ['X'], ['Out']],
'Selu': ['selu', ['X'], ['Out'], dict(gamma='scale')],
'Shape': ['shape', ['X'], ['Out']], # FIXME: out is int64 vs int32
'Shrink': ['softshrink', ['X'], ['Out'], dict(bias='', labmd='')],
'Sigmoid': ['sigmoid', ['X'], ['Out']],
'Sin': ['sin', ['X'], ['Out']],
'Squeeze': ['squeeze', ['X'], ['Out']], # attrs bypassed, FIXME: emit squeeze2
'Softplus': ['softplus', ['X'], ['Out']],
# FIXME: default axis = -1, reshape required before and after
'Softmax': ['softmax', ['X'], ['Out'], dict(axis='')],
'Softsign': ['softsign', ['X'], ['Out']],
'Sqrt': ['sqrt', ['X'], ['Out']],
'Tanh': ['tanh', ['X'], ['Out']],
'ThresholdedRelu': ['thresholded_relu', ['X'], ['Out'], dict(alpha='threshold')],
#'Transpose': ['transpose', ['X'], ['Out']],
'Unsqueeze': ['unsqueeze', ['X'], ['Out']], # attrs bypassed, FIXME: emit unsqueeze2
## binary ops ##
'Add': ['elementwise_add', ['X', 'Y'], ['Out'], dict(), dict(axis=-1)],
#'AffineGrid': ['affine_grid', ['Theta'], ['Output'], dict(size='out_shape')],
'And': ['logical_and', ['X', 'Y'], ['Out']],
'Div': ['elementwise_div', ['X', 'Y'], ['Out'], dict(), dict(axis=-1)],
'Equal': ['equal', ['X', 'Y'], ['Out'], dict(), dict(), None, None, False],
'Greater': ['less_than', ['X', 'Y'], ['Out'], dict(), dict(), [1, 0], None, False],
'Less': ['less_than', ['X', 'Y'], ['Out'], dict(), dict(), None, None, False],
'MatMul': ['matmul', ['X', 'Y'], ['Out']], # defaults excluded for transpose_x vs transpose_X
'Max': ['elementwise_max', ['X', 'Y'], ['Out'], dict(), dict(axis=-1)],
'Min': ['elementwise_min', ['X', 'Y'], ['Out'], dict(), dict(axis=-1)],
'Mul': ['elementwise_mul', ['X', 'Y'], ['Out'], dict(), dict(axis=-1)],
'Not': ['logical_not', ['X', 'Y'], ['Out']],
'OneHot': # assuming values=[0, 1], axis=-1 and drop them
['one_hot', ['Input', 'Depth'], ['Out'], dict(axis=''), dict(),
[0, 1], None, False],
'Or': ['logical_or', ['X', 'Y'], ['Out']],
'Pow': ['elementwise_pow', ['X', 'Y'], ['Out'], dict(), dict(axis=-1)], # TODO: pow for scalar exponent
'Sub': ['elementwise_sub', ['X', 'Y'], ['Out'], dict(), dict(axis=-1)],
'Xor': ['logical_xor', ['X', 'Y'], ['Out']],
# reduce ops
'ReduceMax': ['reduce_max', ['X'], ['Out'], dict(axes='dim', keepdims='keep_dim')],
'ReduceMean': ['reduce_mean', ['X'], ['Out'], dict(axes='dim', keepdims='keep_dim')],
'ReduceMin': ['reduce_min', ['X'], ['Out'], dict(axes='dim', keepdims='keep_dim')],
'ReduceProd': ['reduce_prod', ['X'], ['Out'], dict(axes='dim', keepdims='keep_dim')],
'ReduceSum': ['reduce_sum', ['X'], ['Out'], dict(axes='dim', keepdims='keep_dim')],
# other ops
'Scatter': ['scatter', ['X', 'Index', 'Updates'], ['Out']],
'TopK': ['topk', ['X', 'K'], ['Out', 'Indices']],
## nil ops ##
'RandomUniform':
['uniform_random', [], ['Out'], dict(high='max', low='min'),
dict(), None, None, False],
'RandomNormal':
['gaussian_random', [], ['Out'], dict(scale='std'),
dict(), None, None, False],
## unary ops ##
'Abs': ['abs', ['X'], ['Out']],
'ArgMax': ['argmax', ['X'], ['Out'], dict(keepdims='')],
'ArgMin': ['argmin', ['X'], ['Out'], dict(keepdims='')],
'Ceil': ['ceil', ['X'], ['Out']],
'Clip': ['clip', ['X'], ['Out']], # attrs bypassed
'Cos': ['cos', ['X'], ['Out']],
'Elu': ['elu', ['X'], ['Out']],
'Exp': ['exp', ['X'], ['Out']],
'Flatten': ['flatten', ['X'], ['Out']], # attrs bypassed, FIXME: emit flatten2
'Floor': ['floor', ['X'], ['Out']],
'Gather': ['gather', ['X'], ['Out'], dict(axis='')],
'LeakyRelu': ['leaky_relu', ['X'], ['Out']],
'Log': ['log', ['X'], ['Out']],
'LRN': ['lrn', ['X'], ['Out', 'MidOut'], dict(size='n', bias='k')], #
'Reciprocal': ['reciprocal', ['X'], ['Out']],
'Relu': ['relu', ['X'], ['Out']],
'Selu': ['selu', ['X'], ['Out'], dict(gamma='scale')],
'Shape': ['shape', ['X'], ['Out']], # FIXME: out is int64 vs int32
'Shrink': ['softshrink', ['X'], ['Out'], dict(bias='', labmd='')],
'Sigmoid': ['sigmoid', ['X'], ['Out']],
'Sin': ['sin', ['X'], ['Out']],
'Squeeze': ['squeeze', ['X'], ['Out']], # attrs bypassed, FIXME: emit squeeze2
'Softplus': ['softplus', ['X'], ['Out']],
# FIXME: default axis = -1, reshape required before and after
'Softmax': ['softmax', ['X'], ['Out'], dict(axis='')],
'Softsign': ['softsign', ['X'], ['Out']],
'Sqrt': ['sqrt', ['X'], ['Out']],
'Tanh': ['tanh', ['X'], ['Out']],
'ThresholdedRelu': ['thresholded_relu', ['X'], ['Out'], dict(alpha='threshold')],
#'Transpose': ['transpose', ['X'], ['Out']],
'Unsqueeze': ['unsqueeze', ['X'], ['Out']], # attrs bypassed, FIXME: emit unsqueeze2
## binary ops ##
'Add': ['elementwise_add', ['X', 'Y'], ['Out'], dict(), dict(axis=-1)],
#'AffineGrid': ['affine_grid', ['Theta'], ['Output'], dict(size='out_shape')],
'And': ['logical_and', ['X', 'Y'], ['Out']],
'Div': ['elementwise_div', ['X', 'Y'], ['Out'], dict(), dict(axis=-1)],
'Equal': ['equal', ['X', 'Y'], ['Out'], dict(), dict(), None, None, False],
'Greater': ['less_than', ['X', 'Y'], ['Out'], dict(), dict(), [1, 0], None, False],
'Less': ['less_than', ['X', 'Y'], ['Out'], dict(), dict(), None, None, False],
'MatMul': ['matmul', ['X', 'Y'], ['Out']], # defaults excluded for transpose_x vs transpose_X
'Max': ['elementwise_max', ['X', 'Y'], ['Out'], dict(), dict(axis=-1)],
'Min': ['elementwise_min', ['X', 'Y'], ['Out'], dict(), dict(axis=-1)],
'Mul': ['elementwise_mul', ['X', 'Y'], ['Out'], dict(), dict(axis=-1)],
'Not': ['logical_not', ['X', 'Y'], ['Out']],
'OneHot': # assuming values=[0, 1], axis=-1 and drop them
['one_hot', ['Input', 'Depth'], ['Out'], dict(axis=''), dict(),
[0, 1], None, False],
'Or': ['logical_or', ['X', 'Y'], ['Out']],
'Pow': ['elementwise_pow', ['X', 'Y'], ['Out'], dict(), dict(axis=-1)], # TODO: pow for scalar exponent
'Sub': ['elementwise_sub', ['X', 'Y'], ['Out'], dict(), dict(axis=-1)],
'Xor': ['logical_xor', ['X', 'Y'], ['Out']],
# reduce ops
'ReduceMax': ['reduce_max', ['X'], ['Out'], dict(axes='dim', keepdims='keep_dim')],
'ReduceMean': ['reduce_mean', ['X'], ['Out'], dict(axes='dim', keepdims='keep_dim')],
'ReduceMin': ['reduce_min', ['X'], ['Out'], dict(axes='dim', keepdims='keep_dim')],
'ReduceProd': ['reduce_prod', ['X'], ['Out'], dict(axes='dim', keepdims='keep_dim')],
'ReduceSum': ['reduce_sum', ['X'], ['Out'], dict(axes='dim', keepdims='keep_dim')],
# other ops
'Scatter': ['scatter', ['X', 'Index', 'Updates'], ['Out']],
'TopK': ['topk', ['X', 'K'], ['Out', 'Indices']],
}
DEFAULT_IOA_CONSTRAINTS = {
......@@ -145,24 +146,20 @@ DEFAULT_IOA_CONSTRAINTS = {
def _make_var_name(name):
"""
make a valid variable name in Python code
"""
make a valid variable name in Python code and in filesystem
"""
if name == '':
return '_'
if name[0].isdigit():
return 'var_' + name
for s in ' *?\\/-:':
for s in ' \\|/:': #
name = name.replace(s, '_')
if name.startswith('_'):
name = 'var' + name
return name
#def _value_info_or_none(value_infos, val_name):
# return value_infos.get(val_name, None)
def _dtype(value_infos, val_name):
return _np.dtype(value_infos[val_name]['dtype'])
......@@ -204,7 +201,7 @@ def _const_weight_or_none(value_infos, val_name):
def _default(prog, op_type, inputs, outputs, attrs, *args, name='', **kwargs):
info = DEFAULT_OP_MAPPING[op_type]
info.extend(list(DEFAULT_OP_MAPPING_FIELD_VALUES.values())[len(info):])
info.extend(DEFAULT_OP_MAPPING_VALUES[len(info):])
(
fluid_op,
......@@ -295,7 +292,7 @@ def _zeros_like(prog, val_ref, val_out, value_infos):
'Sub',
[val_ref, val_ref],
[val_out], # val
dict(axis=0),
{'axis': 0},
value_infos,
)
......@@ -317,11 +314,11 @@ def _pad_if_asymmetric(prog, pads, val_name, value_infos): # pads: SSEE
'Pad',
[val_name],
[val_padded], # val
dict(
mode='constant',
value=0.,
pads=pads,
),
{
'mode': 'constant',
'value': 0.,
'pads': pads,
},
value_infos=value_infos,
name=val_padded,
)
......@@ -372,14 +369,14 @@ def _adaptive_pool(prog, pool_type, inputs, outputs, attrs, name=''):
fluid_op,
([var_x], 'X'),
([var_y] + ([var_indices] if has_indices else []), 'Out', 'Indices'),
dict(
global_pooling=False,
adaptive=True,
exclusive=True,
require_index=has_indices,
pooling_type=pool_type,
ksize=pool_size,
),
{
'global_pooling': False,
'adaptive': True,
'exclusive': True,
'require_index': has_indices,
'pooling_type': pool_type,
'ksize': pool_size,
},
)
......@@ -419,12 +416,12 @@ def _global_pool(prog, pool_type, inputs, outputs, attrs, value_infos, name=''):
fluid_op,
([var_x], 'X'),
([var_y], 'Out'),
dict(
global_pooling=True,
adaptive=False,
pooling_type=pool_type,
ksize=[-1, -1],
),
{
'global_pooling': True,
'adaptive': False,
'pooling_type': pool_type,
'ksize': [-1, -1],
},
)
......@@ -481,17 +478,17 @@ def _pool(prog, pool_type, inputs, outputs, attrs, value_infos, name=''):
fluid_op,
([var_x], 'X'),
([var_y] + ([var_indices] if has_indices else []), 'Out', 'Indices'),
dict(
global_pooling=False,
adaptive=False,
exclusive=True,
require_index=has_indices,
pooling_type=pool_type,
ksize=pool_size,
strides=strides,
paddings=paddings,
ceil_mode=ceil_mode,
),
{
'global_pooling': False,
'adaptive': False,
'exclusive': True,
'require_index': has_indices,
'pooling_type': pool_type,
'ksize': pool_size,
'strides': strides,
'paddings': paddings,
'ceil_mode': ceil_mode,
},
)
......@@ -506,11 +503,11 @@ def _roi_pool(prog, fluid_op, inputs, outputs, attrs, value_infos, name):
# interpretation
spatial_scale = attrs['spatial_scale'] # required
pooled_height, pooled_width = attrs['pooled_shape'] # required
od_attrs = dict(
pooled_height=pooled_height,
pooled_width=pooled_width,
spatial_scale=spatial_scale,
)
od_attrs = {
'pooled_height': pooled_height,
'pooled_width': pooled_width,
'spatial_scale': spatial_scale,
}
feature_attr = ''
is_max_pool = fluid_op == 'roi_pool'
if 'sampling_ratio' in attrs: #
......@@ -606,34 +603,34 @@ def _interpolate(prog, inputs, outputs, attrs, value_infos, name=''):
fluid_op,
([var_x], 'X'),
([var_y], 'Out'),
dict(
interp_method=mode,
out_h=out_shape_[0],
out_w=out_shape_[1],
),
{
'interp_method': mode,
'out_h ': out_shape_[0],
'out_w ': out_shape_[1],
},
)
def AdaptiveAveragePool(prog, inputs, outputs, attrs, *args, name='', **kwargs):
"""
aten::adaptive_avg_poolnd
"""
aten::adaptive_avg_poolnd
"""
return _adaptive_pool(prog, 'avg', inputs, outputs, attrs, name=name)
def AdaptiveMaxPool(prog, inputs, outputs, attrs, *args, name='', **kwargs):
"""
aten::adaptive_max_poolnd
"""
aten::adaptive_max_poolnd
"""
return _adaptive_pool(prog, 'max', inputs, outputs, attrs, name=name)
def AffineGrid(prog, inputs, outputs, attrs, *args, name='', **kwargs):
"""
aten::affine_grid
"""
aten::affine_grid
"""
# I/O
val_theta, = inputs
......@@ -662,7 +659,7 @@ def AffineGrid(prog, inputs, outputs, attrs, *args, name='', **kwargs):
fluid_op,
([var_theta], 'Theta'),
([var_grid], 'Output'),
dict(output_shape=size), # f**k you API
{'output_shape': size}, # f**k you API
)
......@@ -675,8 +672,8 @@ def AveragePool(prog,
*args,
**kwargs):
"""
onnx::AveragePool-10:
"""
onnx::AveragePool-10:
"""
return _pool(prog, 'avg', inputs, outputs, attrs, value_infos, name=name)
......@@ -691,8 +688,8 @@ def BatchNormalization(prog,
*args,
**kwargs):
"""
onnx::BatchNormalization-9:
"""
onnx::BatchNormalization-9:
"""
# I/O
val_x, val_scale, val_b, val_mean, val_var = inputs
......@@ -747,23 +744,24 @@ def BatchNormalization(prog,
prog.VarDesc(var_saved_variance)
prog.OpDesc(
fluid_op,
([var_x, var_scale, var_b, var_mean, var_var], 'X', 'Scale', 'Bias',
'Mean', 'Variance'),
([var_y, var_mean, var_saved_mean, var_saved_variance, var_var], 'Y',
'MeanOut', 'SavedMean', 'SavedVariance', 'VarianceOut'),
dict(
is_test=1,
data_layout='NCHW',
use_global_stats=False,
momentum=momentum,
epsilon=epsilon),
([var_x, var_scale, var_b, var_mean, var_var
], 'X', 'Scale', 'Bias', 'Mean', 'Variance'),
([var_y, var_mean, var_saved_mean, var_saved_variance, var_var
], 'Y', 'MeanOut', 'SavedMean', 'SavedVariance', 'VarianceOut'),
{
'is_test': 1,
'data_layout': 'NCHW',
'use_global_stats': False,
'momentum': momentum,
'epsilon': epsilon,
},
)
def Cast(prog, inputs, outputs, attrs, value_infos, *args, **kwargs):
"""
onnx::Cast-9:
"""
onnx::Cast-9:
"""
# I/O
val_input, = inputs
......@@ -796,17 +794,18 @@ def Cast(prog, inputs, outputs, attrs, value_infos, *args, **kwargs):
fluid_op,
([var_input], 'X'),
([var_output], 'Out'),
dict(
in_dtype=prog.Dtype(_dtype(value_infos,
val_input)), # holy, required
out_dtype=prog.Dtype(dtype),
))
{
'in_dtype': prog.Dtype(_dtype(value_infos,
val_input)), # holy, required
'out_dtype': prog.Dtype(dtype),
},
)
def Concat(prog, inputs, outputs, attrs, *args, name='', **kwargs):
"""
onnx::Concat-4:
"""
onnx::Concat-4:
"""
# I/O
val_concat_result, = outputs
......@@ -834,14 +833,14 @@ def Concat(prog, inputs, outputs, attrs, *args, name='', **kwargs):
fluid_op,
(var_inps, *(['X'] * len(var_inps))),
([var_concat_result], 'Out'),
dict(axis=axis),
{'axis': axis},
)
def Constant(prog, inputs, outputs, attrs, value_infos, *args, **kwargs):
"""
onnx::Constant-9:
"""
onnx::Constant-9:
"""
# I/O
assert len(inputs) == 0
......@@ -886,11 +885,11 @@ def Constant(prog, inputs, outputs, attrs, value_infos, *args, **kwargs):
fluid_op,
([], ),
([var_output], 'Out'),
dict(
shape=shape,
dtype=prog.Dtype(dtype),
value=value,
),
{
'shape': shape,
'dtype': prog.Dtype(dtype),
'value': value,
},
)
else: # list parameter -> const_value
prog.Code('# {} = {} # passed directly as literal'.format(
......@@ -901,8 +900,8 @@ def Constant(prog, inputs, outputs, attrs, value_infos, *args, **kwargs):
def ConstantOfShape(prog, inputs, outputs, attrs, value_infos, *args, **kwargs):
"""
onnx::ConstantOfShape-9:
"""
onnx::ConstantOfShape-9:
"""
# I/O
val_shape, = inputs
......@@ -917,7 +916,7 @@ def ConstantOfShape(prog, inputs, outputs, attrs, value_infos, *args, **kwargs):
'this is not supported')
dtype = attrs['value'].dtype
attrs = attrs.copy()
attrs.update(dict(shape=shape, dtype=dtype)) # pass const
attrs.update({'shape': shape, 'dtype': dtype}) # pass const
prog.Code('# shape:{}={} # const as literal'.format(var_shape, shape))
prog.Op(
......@@ -940,8 +939,8 @@ def Conv(prog,
*args,
**kwargs):
"""
onnx::Conv-1:
"""
onnx::Conv-1:
"""
# I/O
val_x, val_w = inputs[:2]
......@@ -1015,12 +1014,13 @@ def Conv(prog,
fluid_op,
([var_x, var_w], 'Input', 'Filter'), # , 'Bias', 'ResidualData'
([var_conv if has_bias else var_y], 'Output'),
dict(
strides=strides,
paddings=paddings,
dilations=dilations,
groups=num_groups,
))
{
'strides': strides,
'paddings': paddings,
'dilations': dilations,
'groups': num_groups,
},
)
if has_bias:
prog.VarDesc(var_conv)
prog.IntermediateOp(
......@@ -1028,7 +1028,7 @@ def Conv(prog,
'Add',
[var_conv, var_b], #
[val_y],
dict(axis=1),
{'axis': 1},
value_infos=value_infos,
name=(name + '.bias'),
)
......@@ -1046,8 +1046,8 @@ def ConvTranspose(prog,
*args,
**kwargs):
"""
onnx::ConvTranspose-1:
"""
onnx::ConvTranspose-1:
"""
# I/O
val_x, val_w = inputs[:2]
......@@ -1125,13 +1125,14 @@ def ConvTranspose(prog,
fluid_op,
([var_x, var_w], 'Input', 'Filter'), # , 'Bias', 'ResidualData'
([var_conv if has_bias else var_y], 'Output'),
dict(
strides=strides,
paddings=paddings,
dilations=dilations,
# output_size=output_size,
groups=num_groups,
))
{
'strides': strides,
'paddings': paddings,
'dilations': dilations,
# 'output_size': output_size,
'groups': num_groups,
},
)
if has_bias:
prog.VarDesc(var_conv)
prog.IntermediateOp(
......@@ -1139,7 +1140,7 @@ def ConvTranspose(prog,
'Add',
[var_conv, var_b], #
[val_y],
dict(axis=1),
{'axis': 1},
value_infos=value_infos,
name=(name + '.bias'),
)
......@@ -1166,8 +1167,8 @@ def ConvTranspose(prog,
def Gemm(prog, inputs, outputs, attrs, value_infos, name, *args, **kwargs):
"""
onnx::Gemm-9:
"""
onnx::Gemm-9:
"""
# due to fluid fc don't support transposed weight, we use matmul + ew_add
val_a, val_b, val_c = inputs
......@@ -1184,19 +1185,19 @@ def Gemm(prog, inputs, outputs, attrs, value_infos, name, *args, **kwargs):
'MatMul',
[val_a, val_b],
[val_mm], # val
dict(
transpose_x=trans_a,
transpose_y=trans_b,
alpha=alpha,
),
{
'transpose_x': trans_a,
'transpose_y': trans_b,
'alpha': alpha,
},
value_infos=value_infos,
name=val_mm,
)
prog.op_descs[-1].attrs.extend(
prog.OpDescAttrs(dict(
transpose_X=trans_a,
transpose_Y=trans_b,
))) # f**k you API
prog.OpDescAttrs({
'transpose_X': trans_a,
'transpose_Y': trans_b,
})) # f**k you API
if beta != 0:
if beta == 1.: # exactly
prog.Op(
......@@ -1204,7 +1205,7 @@ def Gemm(prog, inputs, outputs, attrs, value_infos, name, *args, **kwargs):
'Add',
[val_mm, val_c],
[val_y], # val
dict(axis=1),
{'axis': 1},
value_infos=value_infos,
name=(name + '_beta'),
)
......@@ -1226,7 +1227,7 @@ def Gemm(prog, inputs, outputs, attrs, value_infos, name, *args, **kwargs):
'Constant',
[],
[val_beta], # val
dict(value=beta),
{'value': beta},
value_infos=value_infos,
name=val_beta,
)
......@@ -1244,7 +1245,7 @@ def Gemm(prog, inputs, outputs, attrs, value_infos, name, *args, **kwargs):
'Add',
[val_mm, val_vm],
[val_y], # val
dict(axis=1),
{'axis': 1},
name=(name + '_bias'),
)
......@@ -1258,11 +1259,16 @@ def GlobalAveragePool(prog,
*args,
**kwargs):
"""
onnx::GlobalAveragePool-1:
"""
onnx::GlobalAveragePool-1:
"""
return _global_pool(
prog, 'avg', inputs, outputs, attrs, value_infos, name=name)
return _global_pool(prog,
'avg',
inputs,
outputs,
attrs,
value_infos,
name=name)
def GlobalMaxPool(prog,
......@@ -1274,70 +1280,23 @@ def GlobalMaxPool(prog,
*args,
**kwargs):
"""
onnx::GlobalMaxPool-1:
"""
onnx::GlobalMaxPool-1:
"""
return _global_pool(
prog, 'max', inputs, outputs, attrs, value_infos, name=name)
#def LRN(
# prog, inputs, outputs, attrs, value_infos, name, # name required
# *args, **kwargs):
# """
# onnx::LRN-1:
# """
#
# # I/O
# val_x, = inputs
# val_y, = outputs
# var_x = _make_var_name(val_x)
# var_y = _make_var_name(val_y)
#
# # interpretation
# fluid_op = 'lrn'
# size = attrs['size'] # required
# alpha = attrs.get('alpha', 0.0001) # optional
# beta = attrs.get('beta', 0.75) # optional
# bias = attrs.get('bias', 1.0) # optional
# name_attr = ', name={}'.format(repr(name)) if name else ''
#
# # generation
# prog.Code('{} = layers.{}({}'
# ', n={}'
# ', k={}'
# ', alpha={}'
# ', beta={}'
# '{})'
# .format(var_y,
# fluid_op,
# var_x,
# # attrs
# size,
# bias,
# alpha,
# beta,
# name_attr,
# ))
# var_mid = name + '.mid' # hidden variable
# prog.VarDesc(var_y)
# prog.VarDesc(var_mid)
# prog.OpDesc(fluid_op,
# ([var_x], 'X'),
# ([var_y, var_mid], 'Out', 'MidOut'),
# dict(n=size,
# k=bias,
# alpha=alpha,
# beta=beta,
# ),
# )
return _global_pool(prog,
'max',
inputs,
outputs,
attrs,
value_infos,
name=name)
def MaxPool(prog, inputs, outputs, attrs, value_infos, name='', *args,
**kwargs):
"""
onnx::MaxPool-10:
"""
onnx::MaxPool-10:
"""
return _pool(prog, 'max', inputs, outputs, attrs, value_infos, name=name)
......@@ -1345,16 +1304,16 @@ def MaxPool(prog, inputs, outputs, attrs, value_infos, name='', *args,
def MaxRoiPool(prog, inputs, outputs, attrs, value_infos, name, *args,
**kwargs):
"""
onnx::MaxRoiPool-1:
"""
onnx::MaxRoiPool-1:
"""
_roi_pool(prog, 'roi_pool', inputs, outputs, attrs, value_infos, name)
def Pad(prog, inputs, outputs, attrs, value_infos, name='', *args, **kwargs):
"""
onnx::Pad-2:
"""
onnx::Pad-2:
"""
# I/O
val_data, = inputs
......@@ -1375,7 +1334,7 @@ def Pad(prog, inputs, outputs, attrs, value_infos, name='', *args, **kwargs):
assume_pad2d |= data_shape and len(data_shape) == 4 # NCHW
if output_shape:
assume_pad2d |= output_shape and len(output_shape) == 4 # NCHW
od_attrs = dict(pad_value=value)
od_attrs = {'pad_value': value}
if assume_pad2d:
fluid_op = 'pad2d'
pad2d_attr = ', mode={}, data_format="NCHW"'.format(repr(mode))
......@@ -1424,8 +1383,8 @@ def PRelu(prog,
*args,
**kwargs):
"""
onnx::PRelu-9:
"""
onnx::PRelu-9:
"""
# I/O
val_x, val_slope = inputs
......@@ -1434,11 +1393,20 @@ def PRelu(prog,
var_y = _make_var_name(val_y)
# interpretation
mode = 'channel'
slope_shape = _shape_or_none(value_infos, val_slope)
if slope_shape is not None:
if len(slope_shape) == 0:
mode = 'all'
elif len(slope_shape) >= 2:
if slope_shape[1] != _np.product(
slope_shape): # not channel broadcasting
mode = 'element'
fluid_op = 'prelu'
name_attr = ', name={}'.format(repr(name)) if name else ''
if embed_params:
assert name != ''
var_slope = '{}.w_0'.format(val_slope)
var_slope = name + '.w_0'
value_infos[val_slope].setdefault('embeded_as', []).append(var_slope)
param_attr = ''
else:
......@@ -1446,36 +1414,38 @@ def PRelu(prog,
param_attr = ', param_attr={}'.format(repr(var_slope))
# generation
prog.Code('{} = layers.{}({}, mode="all"'
prog.Code('{} = layers.{}({}'
', mode={}'
'{}{})'.format(
var_y,
fluid_op,
var_x,
# attrs
repr(mode),
param_attr,
name_attr,
))
prog.VarDesc(var_y)
prog.OpDesc(
fluid_op,
([var_x], 'X'),
([var_x, var_slope], 'X', 'Alpha'),
([var_y], 'Out'),
dict(mode='all'),
{'mode': mode},
)
def PsRoiPool(prog, inputs, outputs, attrs, value_infos, name, *args, **kwargs):
"""
caffe2::PsRoiPool
"""
caffe2::PsRoiPool
"""
_roi_pool(prog, 'psroi_pool', inputs, outputs, attrs, value_infos, name)
def Reshape(prog, inputs, outputs, attrs, value_infos, name, *args, **kwargs):
"""
onnx::Reshape-5:
"""
onnx::Reshape-5:
"""
# I/O
val_data, val_shape = inputs
......@@ -1524,7 +1494,7 @@ def Reshape(prog, inputs, outputs, attrs, value_infos, name, *args, **kwargs):
'Cast',
[val_shape],
[val_shape_int32], # var
dict(to=_np.dtype('int32')), # use np.dtype
{'to': _np.dtype('int32')}, # use np.dtype
value_infos=value_infos,
name=(name + '_cast'),
)
......@@ -1549,29 +1519,29 @@ def Reshape(prog, inputs, outputs, attrs, value_infos, name, *args, **kwargs):
fluid_op,
([var_data], 'X'),
([var_reshaped, var_xshape], 'Out', 'XShape'),
dict(shape=shape),
{'shape': shape},
)
else:
prog.OpDesc(
fluid_op,
([var_data, var_shape_int32], 'X', 'Shape'),
([var_reshaped, var_xshape], 'Out', 'XShape'),
dict(shape=shape),
{'shape': shape},
)
def Resize(prog, inputs, outputs, attrs, value_infos, name='', *args, **kwargs):
"""
onnx::Resize-10:
"""
onnx::Resize-10:
"""
return _interpolate(prog, inputs, outputs, attrs, value_infos, name=name)
def RoiAlign(prog, inputs, outputs, attrs, value_infos, name, *args, **kwargs):
"""
caffe2::RoiAlign
"""
caffe2::RoiAlign
"""
_roi_pool(prog, 'roi_align', inputs, outputs, attrs, value_infos, name)
......@@ -1610,8 +1580,8 @@ def RoiAlign(prog, inputs, outputs, attrs, value_infos, name, *args, **kwargs):
def Slice(prog, inputs, outputs, attrs, value_infos, *args, **kwargs):
"""
onnx::Slice-1:9
"""
onnx::Slice-1:9
"""
# I/O
val_data, = inputs
......@@ -1659,18 +1629,18 @@ def Slice(prog, inputs, outputs, attrs, value_infos, *args, **kwargs):
fluid_op,
([var_data], 'Input'),
([var_output], 'Out'),
dict(
axes=axes,
starts=starts,
ends=ends,
),
{
'axes': axes,
'starts': starts,
'ends': ends,
},
)
def Split(prog, inputs, outputs, attrs, *args, name='', **kwargs):
"""
onnx::Split-2:
"""
onnx::Split-2:
"""
# I/O
val_input, = inputs
......@@ -1701,17 +1671,17 @@ def Split(prog, inputs, outputs, attrs, *args, name='', **kwargs):
fluid_op,
(var_input, 'X'),
([var_outs], *(['Out'] * len(var_outs))),
dict(
axis=axis,
sections=split,
),
{
'axis': axis,
'sections': split,
},
)
def Sum(prog, inputs, outputs, *args, **kwargs):
"""
onnx::Sum-8:
"""
onnx::Sum-8:
"""
# I/O
val_sum, = outputs
......@@ -1740,8 +1710,8 @@ def Sum(prog, inputs, outputs, *args, **kwargs):
def Tile(prog, inputs, outputs, attrs, value_infos, name='', *args, **kwargs):
"""
onnx::Tile-1:
"""
onnx::Tile-1:
"""
# I/O
val_input, val_repeats = inputs
......@@ -1773,14 +1743,14 @@ def Tile(prog, inputs, outputs, attrs, value_infos, name='', *args, **kwargs):
fluid_op,
([var_input], 'X'),
([var_output], 'Out'),
dict(expand_times=repeats),
{'expand_times': repeats},
)
def Transpose(prog, inputs, outputs, attrs, *args, name='', **kwargs):
"""
onnx::Transpose-1:
"""
onnx::Transpose-1:
"""
# I/O
val_data, = inputs
......@@ -1812,7 +1782,7 @@ def Transpose(prog, inputs, outputs, attrs, *args, name='', **kwargs):
fluid_op,
([var_data], 'X'),
([var_transposed, var_xshape], 'Out', 'XShape'),
dict(axis=perm), # f**k you API
{'axis': perm}, # f**k you API
)
......@@ -1825,8 +1795,8 @@ def Upsample(prog,
*args,
**kwargs):
"""
onnx::Upsample-9:9
"""
onnx::Upsample-9:9
"""
return _interpolate(prog, inputs, outputs, attrs, value_infos, name=name)
......@@ -1902,9 +1872,8 @@ if __name__ == '__main__':
['input'],
['output'],
dict(to=2), # TensorProto.UINT8
dict(
input=dict(shape=(2, 3), dtype=np.float32),
output=dict(shape=(2, 3), dtype=np.uint8)),
dict(input=dict(shape=(2, 3), dtype=np.float32),
output=dict(shape=(2, 3), dtype=np.uint8)),
)
logger.info('Cast program:\n%s', prog)
......@@ -2101,12 +2070,11 @@ if __name__ == '__main__':
logger.info('Less program:\n%s', prog)
prog = Program()
_default(
prog,
'MatMul', ['A', 'B'], ['Y'],
dict(),
dict(Y=dict(shape=(2, 8), dtype=np.float32)),
name='MatMul')
_default(prog,
'MatMul', ['A', 'B'], ['Y'],
dict(),
dict(Y=dict(shape=(2, 8), dtype=np.float32)),
name='MatMul')
logger.info('MatMul program:\n%s', prog)
prog = Program()
......@@ -2168,11 +2136,9 @@ if __name__ == '__main__':
logger.info('PRelu program:\n%s', prog)
prog = Program()
Tile(
prog, ['input', 'repeats'], ['output'],
dict(),
dict(
repeats=dict(const_value=[1, 2]),
output=dict(shape=(2, 2, 4), dtype=np.float32)),
name='Tile')
Tile(prog, ['input', 'repeats'], ['output'],
dict(),
dict(repeats=dict(const_value=[1, 2]),
output=dict(shape=(2, 2, 4), dtype=np.float32)),
name='Tile')
logger.info('Tile program:\n%s', prog)
......@@ -12,25 +12,25 @@ import torch
from collections import OrderedDict as Dict
def _ensure_list(obj):
if isinstance(obj, (list, set, tuple)):
def ensure_list(obj):
if isinstance(obj, (list, tuple, set)):
return list(obj)
return [obj]
def _ensure_tuple(obj):
if isinstance(obj, (list, set, tuple)):
def ensure_tuple(obj):
if isinstance(obj, (tuple, list, set)):
return tuple(obj)
return (obj, )
def _flatten_list(obj, out=None):
def flatten_list(obj, out=None):
assert isinstance(obj, list)
if out is None:
out = type(obj)()
for item in obj:
if isinstance(item, list):
_flatten_list(item, out)
flatten_list(item, out)
else:
out.append(item)
return out
......@@ -38,10 +38,10 @@ def _flatten_list(obj, out=None):
def export_data(state_dict, prefix=''):
"""
export binary data with meta text for raw C++ inference engines
"""
export binary data with meta text for raw C++ inference engines
"""
def _str(obj):
def str_(obj):
if isinstance(obj, (tuple, list)):
return str(obj)[1:-1].replace(' ', '')
return str(obj)
......@@ -52,14 +52,14 @@ def export_data(state_dict, prefix=''):
data = None
if torch and torch.is_tensor(value):
data = value.data.cpu().numpy()
elif np and isinstance(value, np.ndarray):
elif isinstance(value, np.ndarray):
data = value
if data is not None:
data.tofile('{}{}.bin'.format(prefix_, key))
fp.write('{}.dtype={}\n'.format(key, _str(data.dtype.name)))
fp.write('{}.shape={}\n'.format(key, _str(data.shape)))
fp.write('{}.dtype={}\n'.format(key, str_(data.dtype.name)))
fp.write('{}.shape={}\n'.format(key, str_(data.shape)))
else:
fp.write('{}={}\n'.format(key, _str(value)))
fp.write('{}={}\n'.format(key, str_(value)))
fp.close()
......@@ -72,46 +72,45 @@ def export_onnx_with_validation(model,
*args,
**kwargs):
"""
export PyTorch model to ONNX model and export sample inputs and outputs in a Numpy file
"""
export PyTorch model to ONNX model and export sample inputs and outputs in a Numpy file
"""
is_list_or_tuple = lambda x: isinstance(x, (list, tuple))
is_tuple_or_list = lambda x: isinstance(x, (tuple, list))
def _tensors_to_arrays(tensors):
def tensors_to_arrays(tensors):
if torch.is_tensor(tensors):
return tensors.data.cpu().numpy()
arrays = []
for tensor in tensors:
arrays.append(_tensors_to_arrays(tensor))
arrays.append(tensors_to_arrays(tensor))
return arrays
def _zip_dict(keys, values):
def zip_dict(keys, values):
ret = Dict()
for idx, (key, value) in enumerate(zip(keys, values)):
is_key_list = is_list_or_tuple(key)
is_value_list = is_list_or_tuple(value)
is_key_list = is_tuple_or_list(key)
is_value_list = is_tuple_or_list(value)
assert is_key_list == is_value_list, 'keys and values mismatch'
if is_value_list:
ret[str(idx)] = _zip_dict(key, value)
ret[str(idx)] = zip_dict(key, value)
else:
ret[key] = value
return ret
torch_inputs = _ensure_tuple(inputs) # WORKAROUND: for torch.onnx
outputs = torch.onnx.export(
model,
torch_inputs,
export_basepath + '.onnx',
input_names=_flatten_list(input_names),
output_names=_flatten_list(output_names),
*args,
**kwargs)
torch_inputs = ensure_tuple(inputs) # WORKAROUND: for torch.onnx
outputs = torch.onnx.export(model,
torch_inputs,
export_basepath + '.onnx',
input_names=flatten_list(input_names),
output_names=flatten_list(output_names),
*args,
**kwargs)
if outputs is None: # WORKAROUND: for torch.onnx
outputs = model(*inputs)
torch_outputs = _ensure_tuple(outputs)
torch_outputs = ensure_tuple(outputs)
inputs = _zip_dict(input_names, _tensors_to_arrays(torch_inputs))
outputs = _zip_dict(output_names, _tensors_to_arrays(torch_outputs))
inputs = zip_dict(input_names, tensors_to_arrays(torch_inputs))
outputs = zip_dict(output_names, tensors_to_arrays(torch_outputs))
if use_npz:
np.savez(export_basepath + '.npz', inputs=inputs, outputs=outputs)
else:
......
......@@ -9,23 +9,22 @@ Created on Fri Mar 22 12:17:19 2019
import importlib, logging, os, sys
def _flatten_dict(obj, out=None):
def flatten_dict(obj, out=None):
assert isinstance(obj, dict)
if out is None:
out = type(obj)()
for key, value in obj.items():
if isinstance(value, dict):
_flatten_dict(value, out)
flatten_dict(value, out)
else:
assert key not in out
out[key] = value
return out
def _ensure_list(obj):
for cls in [list, set, tuple]:
if isinstance(obj, cls):
return list(obj)
def ensure_list(obj):
if isinstance(obj, (list, tuple, set)):
return list(obj)
return [obj]
......@@ -33,12 +32,12 @@ def validate(fluid_model_filename,
golden_data_filename,
model_func_name='inference',
atol=1e-3,
rtol=1e-4,
rtol=1e-3,
save_inference_model=False,
**kwargs):
"""
inference the converted Paddle fluid model, validate with given golden data
"""
inference the converted Paddle fluid model, validate with given golden data
"""
import numpy as np
import paddle.fluid as fluid
......@@ -56,8 +55,8 @@ def validate(fluid_model_filename,
prog, _, var_outs = fluid.io.load_inference_model(fluid_model_dir, exe)
out_names = var_outs # HINT: pass var if fetch ops already created
logger.info('model load passed')
elif basename.endswith('.py'): # is python code
logger.debug('using python code file %s', basename)
elif basename.endswith('.py'): # is Python code
logger.debug('using code file %s', basename)
module_name, _ = os.path.splitext(basename)
sys_path = sys.path.copy()
sys.path.append(fluid_model_dir)
......@@ -73,14 +72,15 @@ def validate(fluid_model_filename,
func)
var_outs = func()
var_outs = _ensure_list(var_outs)
var_outs = ensure_list(var_outs)
out_names = [var.name for var in var_outs
] # HINT: pass string to create fetch ops
logger.info('import passed')
prog = fluid.default_main_program()
fluid.io.load_persistables(
executor=exe, dirname=fluid_model_dir, main_program=prog)
fluid.io.load_persistables(executor=exe,
dirname=fluid_model_dir,
main_program=prog)
logger.info('weight load passed')
else:
raise ValueError('unsupported Paddle fluid model filename')
......@@ -95,20 +95,19 @@ def validate(fluid_model_filename,
test_data = np.load(golden_data_filename, encoding='bytes').tolist()
input_data = test_data['inputs']
output_data = test_data['outputs']
input_data = _flatten_dict(input_data)
output_data = _flatten_dict(output_data)
input_data = flatten_dict(input_data)
output_data = flatten_dict(output_data)
logger.info('found %d I/O golden data, starting test ...',
len(input_data) + len(output_data))
# DEBUG: reload test for python code
# DEBUG: reload test for Python code
if basename.endswith('.py') and save_inference_model:
fluid.io.save_inference_model(
fluid_model_dir,
input_data.keys(),
var_outs,
exe,
main_program=prog,
export_for_deployment=True)
fluid.io.save_inference_model(fluid_model_dir,
input_data.keys(),
var_outs,
exe,
main_program=prog,
export_for_deployment=True)
logger.info('model re-save passed')
fluid.io.load_inference_model(fluid_model_dir, exe)
logger.info('model re-load passed')
......@@ -122,13 +121,12 @@ def validate(fluid_model_filename,
for (name, truth), output in zip(output_data.items(), outputs):
logger.info('testing output {} ...'.format(name))
try:
np.testing.assert_allclose(
output,
truth,
rtol=rtol,
atol=atol,
equal_nan=False,
verbose=True)
np.testing.assert_allclose(output,
truth,
rtol=rtol,
atol=atol,
equal_nan=False,
verbose=True)
except AssertionError as e:
passed = False
logger.error('failed: %s\n', e)
......@@ -174,7 +172,7 @@ if __name__ == '__main__':
parser.add_argument(
'--rtol',
type=float,
default=1e-4,
default=1e-2,
help='assertion relative tolerance for validation',
)
args = parser.parse_args()
......@@ -188,9 +186,8 @@ if __name__ == '__main__':
golden_data_filename = args.test_data
atol, rtol = args.atol, args.rtol
validate(
fluid_model_filename,
golden_data_filename,
atol=atol,
rtol=rtol,
save_inference_model=debug)
validate(fluid_model_filename,
golden_data_filename,
atol=atol,
rtol=rtol,
save_inference_model=debug)
......@@ -11,6 +11,8 @@ from __future__ import division
import logging, os
import numpy as np
from collections import OrderedDict as Dict
logger = logging.getLogger(__name__)
from . import symbolic
......@@ -30,7 +32,7 @@ __all__ = [
]
def _irepr(obj, to='_'):
def irepr(obj, to='_'):
"""inline repr"""
s = repr(obj)
......@@ -41,12 +43,12 @@ def _irepr(obj, to='_'):
return s
def _flatten_list(obj, out=None):
def flatten_list(obj, out=None):
if out is None:
out = type(obj)()
for item in obj:
if isinstance(item, list):
_flatten_list(item, out)
flatten_list(item, out)
else:
out.append(item)
return out
......@@ -54,12 +56,12 @@ def _flatten_list(obj, out=None):
def make_attr_name(name):
"""
make a valid code name for ParamAttr
"""
make a valid code name for ParamAttr
"""
if name == '':
raise ValueError('name should not be empty')
for s in ' *?\\/-:': #
for s in ' \\|/:': #
name = name.replace(s, '_')
if not name.startswith('_'):
name = '_' + name
......@@ -68,8 +70,8 @@ def make_attr_name(name):
class Program(object):
"""
fluid Python code and ProgramDesc wrapper
"""
fluid Python code and ProgramDesc wrapper
"""
DTYPE_TO_FRAMEWORK_DTYPE = {
'bool': framework_pb2.VarType.BOOL,
......@@ -86,8 +88,8 @@ class Program(object):
@staticmethod
def Dtype(dtype):
"""
convert dtype to fulid framework dtype
"""
convert dtype to fulid framework dtype
"""
dtype = np.dtype(dtype).name
return Program.DTYPE_TO_FRAMEWORK_DTYPE[dtype]
......@@ -95,8 +97,8 @@ class Program(object):
@staticmethod
def OpDescVars(vals, *keys):
"""
make (OpDesc.Var)s
"""
make (OpDesc.Var)s
"""
od_vars = []
for idx, key in enumerate(keys):
......@@ -110,8 +112,8 @@ class Program(object):
@staticmethod
def OpDescAttrs(attrs):
"""
make (OpDesc.Attr)s
"""
make (OpDesc.Attr)s
"""
od_attrs = []
for key, value in attrs.items():
......@@ -130,8 +132,8 @@ class Program(object):
od_attr.type = framework_pb2.STRING
od_attr.s = value
elif isinstance(value, list):
if len(value) > 0:
if isinstance(value,
if len(value) > 0: # TODO: test all items
if isinstance(value[0],
bool): # bool.mro() = [bool, int, object]
od_attr.type = framework_pb2.BOOLEANS
od_attr.bools.extend(value)
......@@ -164,34 +166,35 @@ class Program(object):
self.code_mutable = True
self.codes = []
self.op_descs = []
self.var_descs = []
self.var_descs = Dict()
def __repr__(self):
return ('Program(code mutable: {}) with:\n'
'codes: {}\n'
'op_descs: {}\n'
'var_descs: {}\n').format(self.code_mutable, self.codes,
self.op_descs, self.var_descs)
self.op_descs,
list(self.var_descs.values()))
def Code(self, code):
"""
add Python code
"""
add Python code
"""
if self.code_mutable:
self.codes.append(code)
def OpDesc(self,
name,
op_type,
input_val_keys=None,
output_val_keys=None,
attrs=None):
"""
add OpDesc
"""
add OpDesc
"""
desc = framework_pb2.OpDesc()
desc.type = name
desc.type = op_type
if input_val_keys is not None:
desc.inputs.extend(self.OpDescVars(*input_val_keys))
if output_val_keys is not None:
......@@ -202,37 +205,28 @@ class Program(object):
return desc
def VarDesc(self,
name,
var_name,
persistable=False,
value_info=None,
remove_batch=None):
"""
add VarDesc,
"""
add VarDesc,
"""
assert var_name not in self.var_descs, 'var naming conflicted'
var_desc = framework_pb2.VarDesc()
var_desc.name = name
var_desc.name = var_name
var_desc.persistable = persistable
var_desc.type.type = framework_pb2.VarType.LOD_TENSOR
if value_info and 'dtype' in value_info:
tensor_desc = var_desc.type.lod_tensor.tensor
tensor_desc.data_type = self.Dtype(value_info['dtype']) # required
if 'shape' in value_info:
tensor_desc.dims.extend(value_info['shape'])
if len(value_info['shape']) > 0: # skip scalars
if remove_batch is None:
remove_batch = value_info.get('remove_batch',
not persistable)
if remove_batch:
tensor_desc.dims[0] = -1
self.var_descs.append(var_desc)
self.var_descs[var_name] = var_desc
if value_info:
self.VarTypeInfo(var_name, value_info, remove_batch=remove_batch)
def Op(self, domain, op_type, *args, **kwargs):
"""
convert an ONNX op and add it to program
"""
convert an ONNX op and add it to program
"""
if domain != '': # TODO: symbolic file routing by domain
raise ValueError('only default domain supported')
......@@ -248,8 +242,8 @@ class Program(object):
def IntermediateOp(self, domain, op_type, *args, **kwargs):
"""
convert an intermediate ONNX op declaring in desc program only
"""
convert an intermediate ONNX op declaring in desc program only
"""
code_mutable = self.code_mutable
self.code_mutable = False
......@@ -261,21 +255,48 @@ class Program(object):
else:
self.code_mutable = code_mutable
def VarTypeInfo(self, var_name, value_info, remove_batch=None):
"""
set value_info for var
"""
if var_name not in self.var_descs:
return
dtype = value_info.get('dtype', None)
if dtype is None:
return
var_desc = self.var_descs[var_name]
tensor_desc = var_desc.type.lod_tensor.tensor
tensor_desc.data_type = self.Dtype(dtype) # required
shape = value_info.get('shape', None)
if shape is not None:
tensor_desc.dims.extend(shape)
if len(shape) > 0: # skip scalars
if remove_batch is None:
remove_batch = value_info.get('remove_batch',
False) #not persistable)
if remove_batch:
tensor_desc.dims[0] = -1
class Writer(object):
"""
fluid code and desc writter
"""
fluid code and desc writter
"""
CODE_INDENT = ' ' * 4
# CODE_INDENT = ' ' * 4
CODE_INDENT = '\t'
@staticmethod
def header_code(func_name, info=''):
"""
Python header codes
"""
Python header codes
"""
codes = list()
codes = []
codes.append('"""')
codes.append('This code is generated by onnx2fluid.')
codes.append('{}'.format(info))
......@@ -294,28 +315,27 @@ class Writer(object):
def emit_op(prog, name, domain, op_type, inputs, outputs, attrs,
value_infos, *args, **kwargs):
"""
emit an ONNX op into program
"""
emit an ONNX op into program
"""
prog.Code('# {}, {}::{}: {} -> {}, {}'.format(name, domain, op_type,
inputs, outputs,
_irepr(attrs, to=', ')))
prog.Op(
domain,
op_type,
inputs,
outputs,
attrs,
value_infos=value_infos,
name=name,
*args,
**kwargs)
irepr(attrs, to=', ')))
prog.Op(domain,
op_type,
inputs,
outputs,
attrs,
value_infos=value_infos,
name=name,
*args,
**kwargs)
@staticmethod
def emit_param(prog, name, value_info):
"""
emit an ONNX weight into program
"""
emit an ONNX weight into program
"""
if value_info.get('embeded_as', []):
var_names = value_info['embeded_as']
......@@ -339,8 +359,8 @@ class Writer(object):
@staticmethod
def emit_inputs(prog, names, value_infos, remove_batch=None):
"""
emit ONNX inputs into program
"""
emit ONNX inputs into program
"""
for idx, name in enumerate(names):
var_name = make_var_name(name)
......@@ -367,16 +387,17 @@ class Writer(object):
'feed',
(['feed'], 'X'),
([var_name], 'Out'),
dict(col=idx),
{'col': idx},
)
prog.VarDesc(
var_name, value_info=value_info, remove_batch=remove_batch)
prog.VarDesc(var_name,
value_info=value_info,
remove_batch=remove_batch)
@staticmethod
def emit_outputs(prog, names): #, value_infos
"""
emit ONNX outputs into program
"""
emit ONNX outputs into program
"""
code = 'return '
for idx, name in enumerate(names):
......@@ -387,7 +408,7 @@ class Writer(object):
'fetch',
([var_name], 'X'),
(['fetch'], 'Out'),
dict(col=idx),
{'col': idx},
)
# var is emitted over ops
prog.Code(code)
......@@ -395,18 +416,18 @@ class Writer(object):
@staticmethod
def add_codes(codes, others, indent):
"""
flatten codes in program
"""
flatten codes in program
"""
for code in _flatten_list(others):
for code in flatten_list(others):
codes.append(Writer.CODE_INDENT * indent + code)
return codes
@staticmethod
def write_weight(weight, filename):
"""
write single weight in fluid desc
"""
write single weight in fluid desc
"""
if not isinstance(weight, np.ndarray):
raise TypeError('weight is not an ndarray')
......@@ -427,8 +448,8 @@ class Writer(object):
@staticmethod
def write_weights(weights, save_dir):
"""
write multiple weights in each fluid desc
"""
write multiple weights in each fluid desc
"""
for name, weight in weights.items():
if not isinstance(weights, dict):
......@@ -442,8 +463,8 @@ class Writer(object):
@staticmethod
def write_code_file(filename, header_code, *body_codes):
"""
write Python code to file
"""
write Python code to file
"""
codes = []
Writer.add_codes(codes, header_code, 0)
......@@ -451,7 +472,7 @@ class Writer(object):
Writer.add_codes(codes, body_code, 1)
fp = open(filename, 'w')
for code in _flatten_list(codes):
for code in flatten_list(codes):
fp.write(code)
fp.write('\n')
fp.close()
......@@ -460,8 +481,8 @@ class Writer(object):
@staticmethod
def write_desc_file(filename, op_descs, var_descs):
"""
write desc program to file
"""
write desc program to file
"""
prog_desc = framework_pb2.ProgramDesc()
block_desc = prog_desc.blocks.add()
......
......@@ -19,13 +19,13 @@ license = MIT
# 从PyPI官方给出的列表中选择符合的内容进行填写
# https://pypi.org/pypi?%3Aaction=list_classifiers
classifier =
Private :: Do Not Upload
Programming Language :: Python
Programming Language :: Python :: 3
Programming Language :: Python :: 3.5
Private :: Do Not Upload
Programming Language :: Python
Programming Language :: Python :: 3
Programming Language :: Python :: 3.5
# 关键字,用于检索,方便用户搜索到你的项目
keywords =
onnx paddlepaddle
onnx paddlepaddle
[options]
# 包名称,find:表示自动寻找,可在options.packages.find中进行详细配置
......@@ -34,7 +34,7 @@ packages = find:
# 每行一个依赖库,只写直接依赖,通常无需考虑间接依赖
# 在这里指定的版本限制应当尽量抽象,通常只要指定最低版本和大版本号即可
install_requires =
onnx >= 1.4
onnx >= 1.4
# 测试依赖,包含项目测试时所需要的额外的依赖库,格式与install_requires一致
# 可以使用内置的unittest,也可以使用更简单的pytest或nose等单测框架
......@@ -53,7 +53,9 @@ zip_safe = True
# 可以通过以下配置将指定的函数变成命令行工具,允许用户直接执行
[options.entry_points]
console_scripts =
onnx2fluid = onnx2fluid.__main__
onnx2fluid = onnx2fluid.__main__
onnx2fluid_convert = onnx2fluid.conversion
onnx2fluid_validate = onnx2fluid.validation
# 可以通过以下配置向包中添加conf或data等非py文件,安装时会一同安装到site-packages目录下
# 仅支持文件,不支持目录,但可以使用通配
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册