未验证 提交 bdae1084 编写于 作者: J Jason 提交者: GitHub

Merge pull request #18 from MacroBull/master

Bugfixes
...@@ -11,15 +11,32 @@ import numpy as np ...@@ -11,15 +11,32 @@ import numpy as np
from collections import OrderedDict as Dict from collections import OrderedDict as Dict
def _make_var_name(name):
"""
make a valid variable name in Python code
"""
if name == '':
return '_'
if name[0].isdigit():
return 'var_' + name
for s in ' *?\\/-:':
name = name.replace(s, '_')
if name.startswith('_'):
name = 'var' + name
return name
fn = sys.argv[1] fn = sys.argv[1]
input_names = sys.argv[2].split(':') input_names = sys.argv[2].split(':')
output_name = sys.argv[3].split(':') output_name = sys.argv[3].split(':')
data = np.load(fn) data = np.load(fn, encoding='bytes')
input_data = data['inputs'] input_data = data['inputs']
output_data = data['outputs'] output_data = data['outputs']
inputs = Dict(zip(input_names, [input_data])) inputs = Dict(zip(map(_make_var_name, input_names), [input_data]))
outputs = Dict(zip(output_name, [output_data])) outputs = Dict(zip(map(_make_var_name, output_name), [output_data]))
np.savez(fn, inputs=inputs, outputs=outputs) # overwrite np.savez(fn, inputs=inputs, outputs=outputs) # overwrite
...@@ -16,6 +16,23 @@ import onnx.numpy_helper as numpy_helper ...@@ -16,6 +16,23 @@ import onnx.numpy_helper as numpy_helper
from collections import OrderedDict as Dict from collections import OrderedDict as Dict
from glob import glob from glob import glob
def _make_var_name(name):
"""
make a valid variable name in Python code
"""
if name == '':
return '_'
if name[0].isdigit():
return 'var_' + name
for s in ' *?\\/-:':
name = name.replace(s, '_')
if name.startswith('_'):
name = 'var' + name
return name
data_dir = os.path.dirname(sys.argv[1]) data_dir = os.path.dirname(sys.argv[1])
input_names = sys.argv[2].split(':') input_names = sys.argv[2].split(':')
output_name = sys.argv[3].split(':') output_name = sys.argv[3].split(':')
...@@ -36,7 +53,7 @@ for fn in glob(os.path.join(data_dir, 'output_*.pb')): ...@@ -36,7 +53,7 @@ for fn in glob(os.path.join(data_dir, 'output_*.pb')):
tensor.ParseFromString(f.read()) tensor.ParseFromString(f.read())
outputs.append(numpy_helper.to_array(tensor)) outputs.append(numpy_helper.to_array(tensor))
inputs = Dict(zip(input_names, inputs)) inputs = Dict(zip(map(_make_var_name, input_names), inputs))
outputs = Dict(zip(output_name, outputs)) outputs = Dict(zip(map(_make_var_name, output_name), outputs))
np.savez(data_dir, inputs=inputs, outputs=outputs) np.savez(data_dir, inputs=inputs, outputs=outputs)
#! /usr/bin/env sh #! /usr/bin/env sh
get_url="aria2c -c -s8 -x8" # setopt SH_WORD_SPLIT # if zsh
base_url="https://s3.amazonaws.com/download.onnx/models/opset_9/" base_url="https://s3.amazonaws.com/download.onnx/models/opset_9/"
flags="-e -o /tmp/export/" convert_flags="-e -o /tmp/export/"
validate_flags1="/tmp/export/model.py"
validate_flags2="/tmp/export/__model__"
# alias http_get="wget -c" # if no aria2
alias http_get="aria2c -c -s8 -x8"
# alias python="python3" # if ...
bvlc_alexnet() bvlc_alexnet()
{ {
...@@ -10,21 +17,24 @@ bvlc_alexnet() ...@@ -10,21 +17,24 @@ bvlc_alexnet()
fn_tar="$bn_tar.tar.gz" fn_tar="$bn_tar.tar.gz"
fn_model="$bn_tar/model.onnx" fn_model="$bn_tar/model.onnx"
$get_url "$base_url$fn_tar" http_get "$base_url$fn_tar"
echo "extracting ..." echo "extracting ..."
tar xf "$fn_tar" tar xf "$fn_tar"
for npz in $bn_tar/*.npz python -m onnx2fluid $convert_flags "$fn_model"
for npz in "$bn_tar"/*.npz
do do
echo "converting $npz ..." echo "converting $npz ..."
python convert_data_npz_0.py "$npz" "data_0" "prob_1" python convert_data_npz_0.py "$npz" data_0 prob_1
python -m onnx2fluid $flags "$fn_model" -t $npz python -m onnx2fluid.validation $validate_flags1 -t "$npz"
python -m onnx2fluid.validation $validate_flags2 -t "$npz"
done done
for pb_dir in $bn_tar/*/ for pb_dir in "$bn_tar"/*/
do do
echo "converting $pb_dir ..." echo "converting $pb_dir ..."
python convert_data_pb_0.py "$pb_dir" "data_0" "prob_1" python convert_data_pb_0.py "$pb_dir" data_0 prob_1
python -m onnx2fluid $flags "$fn_model" -t $(dirname "$pb_dir/x").npz python -m onnx2fluid.validation $validate_flags1 -t $(dirname "$pb_dir/x").npz
python -m onnx2fluid.validation $validate_flags2 -t $(dirname "$pb_dir/x").npz
done done
} }
...@@ -34,15 +44,17 @@ bvlc_googlenet() ...@@ -34,15 +44,17 @@ bvlc_googlenet()
fn_tar="$bn_tar.tar.gz" fn_tar="$bn_tar.tar.gz"
fn_model="$bn_tar/model.onnx" fn_model="$bn_tar/model.onnx"
$get_url "$base_url$fn_tar" http_get "$base_url$fn_tar"
echo "extracting ..." echo "extracting ..."
tar xf "$fn_tar" tar xf "$fn_tar"
for pb_dir in $bn_tar/*/ python -m onnx2fluid $convert_flags "$fn_model"
for pb_dir in "$bn_tar"/*/
do do
echo "converting $pb_dir" echo "converting $pb_dir"
python convert_data_pb_0.py "$pb_dir" "data_0" "prob_1" python convert_data_pb_0.py "$pb_dir" data_0 prob_1
python -m onnx2fluid $flags "$fn_model" -t $(dirname "$pb_dir/x").npz python -m onnx2fluid.validation $validate_flags1 -t $(dirname "$pb_dir/x").npz
python -m onnx2fluid.validation $validate_flags2 -t $(dirname "$pb_dir/x").npz
done done
} }
...@@ -52,15 +64,17 @@ bvlc_reference_caffenet() ...@@ -52,15 +64,17 @@ bvlc_reference_caffenet()
fn_tar="$bn_tar.tar.gz" fn_tar="$bn_tar.tar.gz"
fn_model="$bn_tar/model.onnx" fn_model="$bn_tar/model.onnx"
$get_url "$base_url$fn_tar" http_get "$base_url$fn_tar"
echo "extracting ..." echo "extracting ..."
tar xf "$fn_tar" tar xf "$fn_tar"
for pb_dir in $bn_tar/*/ python -m onnx2fluid $convert_flags "$fn_model"
for pb_dir in "$bn_tar"/*/
do do
echo "converting $pb_dir" echo "converting $pb_dir"
python convert_data_pb_0.py "$pb_dir" "data_0" "prob_1" python convert_data_pb_0.py "$pb_dir" data_0 prob_1
python -m onnx2fluid $flags "$fn_model" -t $(dirname "$pb_dir/x").npz python -m onnx2fluid.validation $validate_flags1 -t $(dirname "$pb_dir/x").npz
python -m onnx2fluid.validation $validate_flags2 -t $(dirname "$pb_dir/x").npz
done done
} }
...@@ -70,15 +84,17 @@ bvlc_reference_rcnn_ilsvrc13() ...@@ -70,15 +84,17 @@ bvlc_reference_rcnn_ilsvrc13()
fn_tar="$bn_tar.tar.gz" fn_tar="$bn_tar.tar.gz"
fn_model="$bn_tar/model.onnx" fn_model="$bn_tar/model.onnx"
$get_url "$base_url$fn_tar" http_get "$base_url$fn_tar"
echo "extracting ..." echo "extracting ..."
tar xf "$fn_tar" tar xf "$fn_tar"
for pb_dir in $bn_tar/*/ python -m onnx2fluid $convert_flags "$fn_model"
for pb_dir in "$bn_tar"/*/
do do
echo "converting $pb_dir" echo "converting $pb_dir"
python convert_data_pb_0.py "$pb_dir" "data_0" "fc_rcnn_1" python convert_data_pb_0.py "$pb_dir" data_0 fc-rcnn_1
python -m onnx2fluid $flags "$fn_model" -t $(dirname "$pb_dir/x").npz python -m onnx2fluid.validation $validate_flags1 -t $(dirname "$pb_dir/x").npz
python -m onnx2fluid.validation $validate_flags2 -t $(dirname "$pb_dir/x").npz
done done
} }
...@@ -88,22 +104,24 @@ inception_v1() ...@@ -88,22 +104,24 @@ inception_v1()
fn_tar="$bn_tar.tar.gz" fn_tar="$bn_tar.tar.gz"
fn_model="$bn_tar/model.onnx" fn_model="$bn_tar/model.onnx"
$get_url "$base_url$fn_tar" http_get "$base_url$fn_tar"
echo "extracting ..." echo "extracting ..."
tar xf "$fn_tar" tar xf "$fn_tar"
for npz in $bn_tar/*.npz python -m onnx2fluid $convert_flags "$fn_model"
for npz in "$bn_tar"/*.npz
do do
echo "converting $npz ..." echo "converting $npz ..."
python convert_data_npz_0.py "$npz" "data_0" "prob_1" python convert_data_npz_0.py "$npz" data_0 prob_1
python -m onnx2fluid $flags "$fn_model" -t $npz python -m onnx2fluid.validation $validate_flags1 -t "$npz"
python -m onnx2fluid.validation $validate_flags2 -t "$npz"
done done
for pb_dir in "$bn_tar"/*/
for pb_dir in $bn_tar/*/
do do
echo "converting $pb_dir" echo "converting $pb_dir ..."
python convert_data_pb_0.py "$pb_dir" "data_0" "prob_1" python convert_data_pb_0.py "$pb_dir" data_0 prob_1
python -m onnx2fluid $flags "$fn_model" -t $(dirname "$pb_dir/x").npz python -m onnx2fluid.validation $validate_flags1 -t $(dirname "$pb_dir/x").npz
python -m onnx2fluid.validation $validate_flags2 -t $(dirname "$pb_dir/x").npz
done done
} }
...@@ -113,22 +131,24 @@ inception_v2() ...@@ -113,22 +131,24 @@ inception_v2()
fn_tar="$bn_tar.tar.gz" fn_tar="$bn_tar.tar.gz"
fn_model="$bn_tar/model.onnx" fn_model="$bn_tar/model.onnx"
$get_url "$base_url$fn_tar" http_get "$base_url$fn_tar"
echo "extracting ..." echo "extracting ..."
tar xf "$fn_tar" tar xf "$fn_tar"
for npz in $bn_tar/*.npz python -m onnx2fluid $convert_flags "$fn_model"
for npz in "$bn_tar"/*.npz
do do
echo "converting $npz ..." echo "converting $npz ..."
python convert_data_npz_0.py "$npz" "data_0" "prob_1" python convert_data_npz_0.py "$npz" data_0 prob_1
python -m onnx2fluid $flags "$fn_model" -t $npz python -m onnx2fluid.validation $validate_flags1 -t "$npz"
python -m onnx2fluid.validation $validate_flags2 -t "$npz"
done done
for pb_dir in "$bn_tar"/*/
for pb_dir in $bn_tar/*/
do do
echo "converting $pb_dir" echo "converting $pb_dir ..."
python convert_data_pb_0.py "$pb_dir" "data_0" "prob_1" python convert_data_pb_0.py "$pb_dir" data_0 prob_1
python -m onnx2fluid $flags "$fn_model" -t $(dirname "$pb_dir/x").npz python -m onnx2fluid.validation $validate_flags1 -t $(dirname "$pb_dir/x").npz
python -m onnx2fluid.validation $validate_flags2 -t $(dirname "$pb_dir/x").npz
done done
} }
...@@ -138,22 +158,24 @@ resnet50() ...@@ -138,22 +158,24 @@ resnet50()
fn_tar="$bn_tar.tar.gz" fn_tar="$bn_tar.tar.gz"
fn_model="$bn_tar/model.onnx" fn_model="$bn_tar/model.onnx"
$get_url "$base_url$fn_tar" http_get "$base_url$fn_tar"
echo "extracting ..." echo "extracting ..."
tar xf "$fn_tar" tar xf "$fn_tar"
for npz in $bn_tar/*.npz python -m onnx2fluid $convert_flags "$fn_model"
for npz in "$bn_tar"/*.npz
do do
echo "converting $npz ..." echo "converting $npz ..."
python convert_data_npz_0.py "$npz" "gpu_0/data_0" "gpu_0/softmaxout_1" python convert_data_npz_0.py "$npz" gpu_0/data_0 gpu_0/softmaxout_1
python -m onnx2fluid $flags "$fn_model" -t $npz python -m onnx2fluid.validation $validate_flags1 -t "$npz"
python -m onnx2fluid.validation $validate_flags2 -t "$npz"
done done
for pb_dir in "$bn_tar"/*/
for pb_dir in $bn_tar/*/
do do
echo "converting $pb_dir" echo "converting $pb_dir ..."
python convert_data_pb_0.py "$pb_dir" "gpu_0/data_0" "gpu_0/softmaxout_1" python convert_data_pb_0.py "$pb_dir" gpu_0/data_0 gpu_0/softmaxout_1
python -m onnx2fluid $flags "$fn_model" -t $(dirname "$pb_dir/x").npz python -m onnx2fluid.validation $validate_flags1 -t $(dirname "$pb_dir/x").npz
python -m onnx2fluid.validation $validate_flags2 -t $(dirname "$pb_dir/x").npz
done done
} }
...@@ -163,15 +185,17 @@ shufflenet() ...@@ -163,15 +185,17 @@ shufflenet()
fn_tar="$bn_tar.tar.gz" fn_tar="$bn_tar.tar.gz"
fn_model="$bn_tar/model.onnx" fn_model="$bn_tar/model.onnx"
$get_url "$base_url$fn_tar" http_get "$base_url$fn_tar"
echo "extracting ..." echo "extracting ..."
tar xf "$fn_tar" tar xf "$fn_tar"
for pb_dir in $bn_tar/*/ python -m onnx2fluid $convert_flags "$fn_model"
for pb_dir in "$bn_tar"/*/
do do
echo "converting $pb_dir" echo "converting $pb_dir ..."
python convert_data_pb_0.py "$pb_dir" "gpu_0/data_0" "gpu_0/softmaxout_1" python convert_data_pb_0.py "$pb_dir" gpu_0/data_0 gpu_0/softmaxout_1
python -m onnx2fluid $flags "$fn_model" -t $(dirname "$pb_dir/x").npz python -m onnx2fluid.validation $validate_flags1 -t $(dirname "$pb_dir/x").npz
python -m onnx2fluid.validation $validate_flags2 -t $(dirname "$pb_dir/x").npz
done done
} }
...@@ -181,15 +205,17 @@ squeezenet() ...@@ -181,15 +205,17 @@ squeezenet()
fn_tar="$bn_tar.tar.gz" fn_tar="$bn_tar.tar.gz"
fn_model="$bn_tar/model.onnx" fn_model="$bn_tar/model.onnx"
$get_url "$base_url$fn_tar" http_get "$base_url$fn_tar"
echo "extracting ..." echo "extracting ..."
tar xf "$fn_tar" tar xf "$fn_tar"
for pb_dir in $bn_tar/*/ python -m onnx2fluid $convert_flags "$fn_model"
for pb_dir in "$bn_tar"/*/
do do
echo "converting $pb_dir" echo "converting $pb_dir"
python convert_data_pb_0.py "$pb_dir" "data_0" "softmaxout_1" python convert_data_pb_0.py "$pb_dir" data_0 softmaxout_1
python -m onnx2fluid $flags "$fn_model" -t $(dirname "$pb_dir/x").npz python -m onnx2fluid.validation $validate_flags1 -t $(dirname "$pb_dir/x").npz
python -m onnx2fluid.validation $validate_flags2 -t $(dirname "$pb_dir/x").npz
done done
} }
...@@ -199,15 +225,17 @@ tiny_yolov2() ...@@ -199,15 +225,17 @@ tiny_yolov2()
fn_tar="$bn_tar.tar.gz" fn_tar="$bn_tar.tar.gz"
fn_model="$bn_tar/model.onnx" fn_model="$bn_tar/model.onnx"
$get_url "https://onnxzoo.blob.core.windows.net/models/opset_8/tiny_yolov2/$fn_tar" http_get "https://onnxzoo.blob.core.windows.net/models/opset_8/tiny_yolov2/$fn_tar"
echo "extracting ..." echo "extracting ..."
tar xf "$fn_tar" tar xf "$fn_tar"
for pb_dir in $bn_tar/*/ python -m onnx2fluid $convert_flags "$fn_model" -xy
for pb_dir in "$bn_tar"/*/
do do
echo "converting $pb_dir" echo "converting $pb_dir"
python convert_data_pb_0.py "$pb_dir" "image" "grid" python convert_data_pb_0.py "$pb_dir" image grid
python -m onnx2fluid $flags "$fn_model" -t $(dirname "$pb_dir/x").npz -x python -m onnx2fluid.validation $validate_flags1 -t $(dirname "$pb_dir/x").npz
python -m onnx2fluid.validation $validate_flags2 -t $(dirname "$pb_dir/x").npz
done done
} }
...@@ -217,15 +245,17 @@ vgg19() ...@@ -217,15 +245,17 @@ vgg19()
fn_tar="$bn_tar.tar.gz" fn_tar="$bn_tar.tar.gz"
fn_model="$bn_tar/model.onnx" fn_model="$bn_tar/model.onnx"
$get_url "$base_url$fn_tar" http_get "$base_url$fn_tar"
echo "extracting ..." echo "extracting ..."
tar xf "$fn_tar" tar xf "$fn_tar"
for pb_dir in $bn_tar/*/ python -m onnx2fluid $convert_flags "$fn_model"
for pb_dir in "$bn_tar"/*/
do do
echo "converting $pb_dir" echo "converting $pb_dir"
python convert_data_pb_0.py "$pb_dir" "data_0" "prob_1" python convert_data_pb_0.py "$pb_dir" data_0 prob_1
python -m onnx2fluid $flags "$fn_model" -t $(dirname "$pb_dir/x").npz python -m onnx2fluid.validation $validate_flags1 -t $(dirname "$pb_dir/x").npz
python -m onnx2fluid.validation $validate_flags2 -t $(dirname "$pb_dir/x").npz
done done
} }
...@@ -235,15 +265,17 @@ zfnet512() ...@@ -235,15 +265,17 @@ zfnet512()
fn_tar="$bn_tar.tar.gz" fn_tar="$bn_tar.tar.gz"
fn_model="$bn_tar/model.onnx" fn_model="$bn_tar/model.onnx"
$get_url "$base_url$fn_tar" http_get "$base_url$fn_tar"
echo "extracting ..." echo "extracting ..."
tar xf "$fn_tar" tar xf "$fn_tar"
for pb_dir in $bn_tar/*/ python -m onnx2fluid $convert_flags "$fn_model"
for pb_dir in "$bn_tar"/*/
do do
echo "converting $pb_dir" echo "converting $pb_dir"
python convert_data_pb_0.py "$pb_dir" "gpu_0/data_0" "gpu_0/softmax_1" python convert_data_pb_0.py "$pb_dir" gpu_0/data_0 gpu_0/softmax_1
python -m onnx2fluid $flags "$fn_model" -t $(dirname "$pb_dir/x").npz python -m onnx2fluid.validation $validate_flags1 -t $(dirname "$pb_dir/x").npz
python -m onnx2fluid.validation $validate_flags2 -t $(dirname "$pb_dir/x").npz
done done
} }
......
...@@ -69,11 +69,27 @@ parser.add_argument( ...@@ -69,11 +69,27 @@ parser.add_argument(
dest='pedantic', dest='pedantic',
help='process non-standard ONNX ops, this may lead to fails', help='process non-standard ONNX ops, this may lead to fails',
) )
parser.add_argument(
'--skip-version-conversion',
'-y',
action='store_true',
default=False,
help='skip ONNX op version conversion, workaround for RumtimeErrors',
)
parser.add_argument(
'--archive',
'-z',
nargs='?',
type=str,
default=None,
const='',
help='compress outputs to ZIP file if conversion successed',
)
parser.add_argument( parser.add_argument(
'--precision', '--precision',
'-p', '-p',
type=int, type=int,
default=4, default=3,
help='assertion decimal for validation', help='assertion decimal for validation',
) )
args = parser.parse_args() args = parser.parse_args()
......
...@@ -16,10 +16,10 @@ from __future__ import division ...@@ -16,10 +16,10 @@ from __future__ import division
from __future__ import print_function from __future__ import print_function
from __future__ import unicode_literals from __future__ import unicode_literals
# import logging, shutil, zipfile import logging, shutil, zipfile
import logging #import logging
import shutil #import shutil
import zipfile #import zipfile
__all__ = [ __all__ = [
'main', 'main',
...@@ -49,12 +49,14 @@ def main(**kwargs): ...@@ -49,12 +49,14 @@ def main(**kwargs):
basepath, _ = shutil.os.path.splitext(filename) basepath, _ = shutil.os.path.splitext(filename)
save_dir = kwargs.get('output_dir', '') save_dir = kwargs.get('output_dir', '')
# model.onnx -> model/ # model.onnx -> model/
save_dir = shutil.os.path.dirname(save_dir) if save_dir else basepath save_dir = (save_dir.rstrip('/') if save_dir else basepath) + '/'
model_basename = DEFAULT_MODEL_MODULE + '.py' model_basename = DEFAULT_MODEL_MODULE + '.py'
model_func_name = DEFAULT_MODEL_FUNC model_func_name = DEFAULT_MODEL_FUNC
embed_params = kwargs.get('embed_params', False) embed_params = kwargs.get('embed_params', False)
onnx_opset_version = DEFAULT_ONNX_OPSET_VERSION onnx_opset_version = DEFAULT_ONNX_OPSET_VERSION
onnx_opset_pedantic = kwargs.get('pedantic', True) onnx_opset_pedantic = kwargs.get('pedantic', True)
onnx_skip_version_conversion = kwargs.get('skip_version_conversion', False)
archive = kwargs.get('archive', None)
# convert # convert
convert( convert(
...@@ -65,6 +67,7 @@ def main(**kwargs): ...@@ -65,6 +67,7 @@ def main(**kwargs):
embed_params=embed_params, embed_params=embed_params,
onnx_opset_version=onnx_opset_version, onnx_opset_version=onnx_opset_version,
onnx_opset_pedantic=onnx_opset_pedantic, onnx_opset_pedantic=onnx_opset_pedantic,
onnx_skip_version_conversion=onnx_skip_version_conversion,
debug=debug) debug=debug)
# validate # validate
...@@ -81,13 +84,13 @@ def main(**kwargs): ...@@ -81,13 +84,13 @@ def main(**kwargs):
# in fact fluid can not fully clear the context # in fact fluid can not fully clear the context
# continuous validation may be inaccurate # continuous validation may be inaccurate
precision = 10**-kwargs.get('precision', 4) decimal = kwargs.get('precision', 3)
logger.info('starting validation on desc ...') logger.info('starting validation on desc ...')
passed &= validate( passed &= validate(
shutil.os.path.join(save_dir, '__model__'), shutil.os.path.join(save_dir, '__model__'),
golden_data_filename, golden_data_filename,
precision=precision, decimal=decimal,
) )
logger.info('starting validation on code ...') logger.info('starting validation on code ...')
...@@ -95,7 +98,7 @@ def main(**kwargs): ...@@ -95,7 +98,7 @@ def main(**kwargs):
shutil.os.path.join(save_dir, model_basename), shutil.os.path.join(save_dir, model_basename),
golden_data_filename, golden_data_filename,
model_func_name=model_func_name, model_func_name=model_func_name,
precision=precision, decimal=decimal,
save_inference_model=debug, # this overwrite desc file for test save_inference_model=debug, # this overwrite desc file for test
) )
...@@ -104,13 +107,21 @@ def main(**kwargs): ...@@ -104,13 +107,21 @@ def main(**kwargs):
return return
# create zip file # create zip file
fn_zip = save_dir.rstrip('/') + '.zip' if archive is not None:
logger.info('compressing file to %s ...', fn_zip) if archive == '':
fz = zipfile.ZipFile(fn_zip, 'w', compression=zipfile.ZIP_LZMA) archive = save_dir.rstrip('/') + '.zip'
for fn in shutil.os.listdir(save_dir): logger.info('compressing file to %s ...', archive)
fz.write(shutil.os.path.join(save_dir, fn), arcname=fn) shutil.sys.stderr.write('\n')
fz.close() shutil.sys.stderr.flush()
logger.info('compressing done') file_list = shutil.os.listdir(save_dir)
fz = zipfile.ZipFile(archive, 'w', compression=zipfile.ZIP_LZMA)
for idx, fn in enumerate(file_list):
shutil.sys.stderr.write('\033[F\033[2K')
logger.info('file {}/{}: {}'.format(idx + 1, len(file_list), fn))
shutil.sys.stderr.flush()
fz.write(shutil.os.path.join(save_dir, fn), arcname=fn)
fz.close()
logger.info('compressing done')
if __name__ == '__main__': if __name__ == '__main__':
...@@ -120,17 +131,19 @@ if __name__ == '__main__': ...@@ -120,17 +131,19 @@ if __name__ == '__main__':
level=logging.DEBUG, level=logging.DEBUG,
) )
# main(model=['../examples/t5.onnx'], main(
# output_dir='/tmp/export/', model=['../examples/t1.onnx'],
# embed_params=False, output_dir='/tmp/export/',
# pedantic=False, embed_params=False,
# test_data='../examples/t5.npz', pedantic=False,
# debug=True) test_data='../examples/t1.npz',
debug=True)
main( main(
model=['../examples/inception_v2/model.onnx'], model=['../examples/inception_v2/model.onnx'],
output_dir='/tmp/export/', output_dir='/tmp/export/',
embed_params=True, embed_params=True,
pedantic=False, pedantic=False,
skip_version_conversion=False,
test_data='../examples/inception_v2/test_data_set_2.npz', test_data='../examples/inception_v2/test_data_set_2.npz',
debug=True) debug=True)
...@@ -8,9 +8,9 @@ Created on Mon Feb 25 09:50:35 2019 ...@@ -8,9 +8,9 @@ Created on Mon Feb 25 09:50:35 2019
from __future__ import division from __future__ import division
# import logging, shutil import logging, shutil
import logging #import logging
import shutil #import shutil
__all__ = [ __all__ = [
'convert', 'convert',
...@@ -24,6 +24,7 @@ def convert(onnx_model_filename, ...@@ -24,6 +24,7 @@ def convert(onnx_model_filename,
embed_params=False, embed_params=False,
onnx_opset_version=9, onnx_opset_version=9,
onnx_opset_pedantic=True, onnx_opset_pedantic=True,
onnx_skip_version_conversion=False,
debug=False): debug=False):
""" """
convert an ONNX model to Paddle fluid Python code and desc pb convert an ONNX model to Paddle fluid Python code and desc pb
...@@ -60,12 +61,13 @@ def convert(onnx_model_filename, ...@@ -60,12 +61,13 @@ def convert(onnx_model_filename,
try: try:
logger.info('checking model ...') logger.info('checking model ...')
check_model(onnx_model) check_model(onnx_model)
logger.debug('using opset version: %d', onnx_opset_version) if onnx_skip_version_conversion: # WORKAROUND: RuntimeError: No Adapter For OP
if onnx_opset_pedantic: # WORKAROUND: RuntimeError: No Adapter For OP logger.debug('assumed opset version: %d', onnx_opset_version)
onnx_model = convert_version(onnx_model, onnx_opset_version)
else: # TODO: add new argument for this option
logger.warning( logger.warning(
'opset conversion skipped for onnx_opset_pedantic is OFF') 'opset conversion skipped for onnx_opset_pedantic is OFF')
else:
logger.debug('using opset version: %d', onnx_opset_version)
onnx_model = convert_version(onnx_model, onnx_opset_version)
onnx_model = polish_model(onnx_model) onnx_model = polish_model(onnx_model)
except ValidationError as e: except ValidationError as e:
if onnx_opset_pedantic: if onnx_opset_pedantic:
...@@ -152,16 +154,15 @@ def convert(onnx_model_filename, ...@@ -152,16 +154,15 @@ def convert(onnx_model_filename,
logger.info( logger.info(
'weight %s is shared between ops, more disk space will be consumed', 'weight %s is shared between ops, more disk space will be consumed',
name) name)
logger.debug( logger.debug('saving weight %s(%s[%d], %dB) as %s ...', name,
'saving weight %s with size of %d, in %d bytes, as %s ...', weight.dtype, weight.size, weight.nbytes, var_names)
name, weight.size, weight.nbytes, var_names)
for var_name in var_names: # multiple references for var_name in var_names: # multiple references
fluid_writer.write_weight( fluid_writer.write_weight(
weight, shutil.os.path.join(save_dir, var_name)) weight, shutil.os.path.join(save_dir, var_name))
else: else:
logger.debug( logger.debug('saving weight %s(%s[%d], %dB) to %s ...', name,
'saving weight %s with size of %d, in %d bytes, to %s ...', weight.dtype, weight.size, weight.nbytes,
name, weight.size, weight.nbytes, make_var_name(name)) make_var_name(name))
fluid_writer.write_weight( fluid_writer.write_weight(
weight, shutil.os.path.join(save_dir, make_var_name(name))) weight, shutil.os.path.join(save_dir, make_var_name(name)))
fluid_writer.emit_param(fluid_program, name, value_info) fluid_writer.emit_param(fluid_program, name, value_info)
...@@ -262,6 +263,13 @@ if __name__ == '__main__': ...@@ -262,6 +263,13 @@ if __name__ == '__main__':
dest='pedantic', dest='pedantic',
help='process non-standard ONNX ops, this may lead to fails', help='process non-standard ONNX ops, this may lead to fails',
) )
parser.add_argument(
'--skip-version-conversion',
'-y',
action='store_true',
default=False,
help='skip ONNX op version conversion, workaround for RumtimeErrors',
)
args = parser.parse_args() args = parser.parse_args()
logging_format = '[%(levelname)8s]%(name)s::%(funcName)s:%(lineno)04d: %(message)s' logging_format = '[%(levelname)8s]%(name)s::%(funcName)s:%(lineno)04d: %(message)s'
...@@ -273,10 +281,12 @@ if __name__ == '__main__': ...@@ -273,10 +281,12 @@ if __name__ == '__main__':
save_dir = args.output_dir save_dir = args.output_dir
embed_params = args.embed_params embed_params = args.embed_params
pedantic = args.pedantic pedantic = args.pedantic
skip_version_conversion = args.skip_version_conversion
convert( convert(
model_filename, model_filename,
save_dir, save_dir,
embed_params=embed_params, embed_params=embed_params,
onnx_opset_pedantic=pedantic, onnx_opset_pedantic=pedantic,
onnx_skip_version_conversion=skip_version_conversion,
debug=debug) debug=debug)
...@@ -26,6 +26,7 @@ __all__ = [ ...@@ -26,6 +26,7 @@ __all__ = [
'node_attrs', 'node_attrs',
'node_topo', 'node_topo',
'node_iter', 'node_iter',
'tensor_dtype',
'tensor_shape', 'tensor_shape',
'graph_ops', 'graph_ops',
'graph_weights', 'graph_weights',
...@@ -92,13 +93,12 @@ def get_attribute_value2(attr): ...@@ -92,13 +93,12 @@ def get_attribute_value2(attr):
return value return value
def node_attrs(node): def tensor_dtype(tensor):
""" """
convert ONNX node attributes to dict get ONNX tensor in np.dtype
""" """
return {attr.name: get_attribute_value2(attr) return TENSOR_TYPE_TO_NP_TYPE[tensor.type.tensor_type.elem_type]
for attr in node.attribute} # dict
def tensor_shape(tensor): def tensor_shape(tensor):
...@@ -109,6 +109,15 @@ def tensor_shape(tensor): ...@@ -109,6 +109,15 @@ def tensor_shape(tensor):
return [dim.dim_value for dim in tensor.type.tensor_type.shape.dim] return [dim.dim_value for dim in tensor.type.tensor_type.shape.dim]
def node_attrs(node):
"""
convert ONNX node attributes to dict
"""
return {attr.name: get_attribute_value2(attr)
for attr in node.attribute} # dict
def node_topo(nodes, topo='default'): def node_topo(nodes, topo='default'):
""" """
build indices with given topology to an ONNX node graph build indices with given topology to an ONNX node graph
...@@ -237,21 +246,21 @@ def inferred_model_value_info(model): ...@@ -237,21 +246,21 @@ def inferred_model_value_info(model):
value_info = Dict() value_info = Dict()
for item in graph.value_info: for item in graph.value_info:
value_info[item.name] = dict( value_info[item.name] = dict(
dtype=TENSOR_TYPE_TO_NP_TYPE[item.type.tensor_type.elem_type], dtype=tensor_dtype(item),
shape=tensor_shape(item), shape=tensor_shape(item),
external=False, external=False,
) )
for item in graph.input: for item in graph.input:
assert item.name not in value_info assert item.name not in value_info
value_info[item.name] = dict( value_info[item.name] = dict(
dtype=TENSOR_TYPE_TO_NP_TYPE[item.type.tensor_type.elem_type], dtype=tensor_dtype(item),
shape=tensor_shape(item), shape=tensor_shape(item),
external=True, external=True,
) )
for item in graph.output: for item in graph.output:
# assert item.name not in value_info, 'bypass-model not supported' # assert item.name not in value_info, 'bypass-model not supported'
value_info[item.name] = dict( value_info[item.name] = dict(
dtype=TENSOR_TYPE_TO_NP_TYPE[item.type.tensor_type.elem_type], dtype=tensor_dtype(item),
shape=tensor_shape(item), shape=tensor_shape(item),
external=True, external=True,
) )
...@@ -373,9 +382,9 @@ def optimize_model_strip_initializer(model, keep_input_only=True): ...@@ -373,9 +382,9 @@ def optimize_model_strip_initializer(model, keep_input_only=True):
elif not keep_input_only and name in output_refs: elif not keep_input_only and name in output_refs:
ret_initializers.add().CopyFrom(initializer) ret_initializers.add().CopyFrom(initializer)
else: else:
logger.debug('initializer %s(%s[%d]) stripped', name, dtype = TENSOR_TYPE_TO_NP_TYPE[initializer.data_type]
TENSOR_TYPE_TO_NP_TYPE[initializer.data_type], logger.debug('initializer %s(%s[%d]) stripped', name, dtype,
len(initializer.raw_data)) len(initializer.raw_data) // dtype.itemsize)
# strip inputs # strip inputs
ret.graph.ClearField('input') ret.graph.ClearField('input')
...@@ -385,10 +394,8 @@ def optimize_model_strip_initializer(model, keep_input_only=True): ...@@ -385,10 +394,8 @@ def optimize_model_strip_initializer(model, keep_input_only=True):
if name in input_refs or name in out_names: if name in input_refs or name in out_names:
ret_inputs.add().CopyFrom(item) ret_inputs.add().CopyFrom(item)
else: else:
logger.debug( logger.debug('input %s(%s%s) stripped', name, tensor_dtype(item),
'input %s(%s%s) stripped', name, tensor_shape(item))
TENSOR_TYPE_TO_NP_TYPE[item.type.tensor_type.elem_type],
tensor_shape(item))
return ret return ret
......
...@@ -19,7 +19,7 @@ from onnx.mapping import TENSOR_TYPE_TO_NP_TYPE ...@@ -19,7 +19,7 @@ from onnx.mapping import TENSOR_TYPE_TO_NP_TYPE
_logger = _logging.getLogger(__name__) _logger = _logging.getLogger(__name__)
ONNX_INT_MAX = 2**63 - 1 ONNX_INT_MAX = 2**63 - 1
FLUID_INT_MAX = 2**31 - 1 FLUID_INT_MAX = 2**31 - 1 #
DEFAULT_ONNX_OP_DOMAIN = '' DEFAULT_ONNX_OP_DOMAIN = ''
DEFAULT_FLUID_OP_NAMESCOPE = '/' DEFAULT_FLUID_OP_NAMESCOPE = '/'
...@@ -186,13 +186,17 @@ def _shape_or_none(value_infos, val_name): ...@@ -186,13 +186,17 @@ def _shape_or_none(value_infos, val_name):
return list(value_info['shape']) return list(value_info['shape'])
#def _maybe_const_value(value_infos, val_name): def _const_weight_or_none(value_infos, val_name):
# var_name = _make_var_name(val_name) if val_name not in value_infos:
# if val_name not in value_infos: return None
# return var_name value_info = value_infos[val_name]
# value_info = value_infos[val_name] const_value = value_info.get('const_value', None)
# assert value_info.get('remove_batch', False) == False, 'const value should not have batch dim' if const_value:
# return value_info.get('const_value', var_name) return const_value
get_weight_func = value_info.get('get_weight', None)
if get_weight_func:
return get_weight_func()
return None
def _default(prog, op_type, inputs, outputs, attrs, *args, name='', **kwargs): def _default(prog, op_type, inputs, outputs, attrs, *args, name='', **kwargs):
...@@ -246,20 +250,17 @@ def _default(prog, op_type, inputs, outputs, attrs, *args, name='', **kwargs): ...@@ -246,20 +250,17 @@ def _default(prog, op_type, inputs, outputs, attrs, *args, name='', **kwargs):
arg_name, arg_name,
)) ))
for var_out in var_outs:
prog.VarDesc(var_out)
# dummy var_out # dummy var_out
num_vars = len(var_outs) num_vars = len(var_outs)
num_args = len(fluid_output_args) num_args = len(fluid_output_args)
if num_vars < num_args: if num_vars < num_args:
assert fill_name_field, 'name required to naming dummy output variable' assert fill_name_field, 'name required to name dummy output variables'
for idx_out in range(num_vars, num_args): for idx_out in range(num_vars, num_args):
var_out = _make_var_name(name + '.' + var_out = name + '.' + fluid_output_args[idx_out] # dummy output
fluid_output_args[idx_out].lower())
var_outs.append(var_out) var_outs.append(var_out)
prog.VarDesc(var_out)
for var_out in var_outs:
prog.VarDesc(var_out)
prog.OpDesc(fluid_op, (var_inps, *fluid_input_args), prog.OpDesc(fluid_op, (var_inps, *fluid_input_args),
(var_outs, *fluid_output_args), fluid_attrs) (var_outs, *fluid_output_args), fluid_attrs)
...@@ -294,11 +295,10 @@ def _pad_if_asymmetric(prog, pads, val_name, value_infos): # pads: SSEE ...@@ -294,11 +295,10 @@ def _pad_if_asymmetric(prog, pads, val_name, value_infos): # pads: SSEE
if pads[idx_dim] != pads[ndims + idx_dim]: if pads[idx_dim] != pads[ndims + idx_dim]:
symmetric = False symmetric = False
break break
if symmetric: if symmetric:
return pads[:ndims], None return pads[:ndims], val_name
val_padded = val_name + '_padded' val_padded = val_name + '_padded' # explicit variable
prog.Op( prog.Op(
'', '',
'Pad', 'Pad',
...@@ -315,13 +315,7 @@ def _pad_if_asymmetric(prog, pads, val_name, value_infos): # pads: SSEE ...@@ -315,13 +315,7 @@ def _pad_if_asymmetric(prog, pads, val_name, value_infos): # pads: SSEE
return [0] * ndims, val_padded return [0] * ndims, val_padded
def _adaptive_pool(prog, def _adaptive_pool(prog, pool_type, inputs, outputs, attrs, name=''):
pool_type,
inputs,
outputs,
attrs,
value_infos,
name=''):
# I/O # I/O
val_x, = inputs val_x, = inputs
val_y, = outputs[:1] val_y, = outputs[:1]
...@@ -335,10 +329,6 @@ def _adaptive_pool(prog, ...@@ -335,10 +329,6 @@ def _adaptive_pool(prog,
# interpretation # interpretation
pool_size = attrs['output_size'] # required pool_size = attrs['output_size'] # required
output_shape = _shape_or_none(value_infos, val_y)
if output_shape is not None:
assert pool_size == output_shape[
2:], 'pool_size unmatches shape of Y' # NC...
poolnd = len(pool_size) poolnd = len(pool_size)
assert 2 <= poolnd <= 3, 'only pool2d and pool3d supported' assert 2 <= poolnd <= 3, 'only pool2d and pool3d supported'
...@@ -420,6 +410,7 @@ def _global_pool(prog, pool_type, inputs, outputs, attrs, value_infos, name=''): ...@@ -420,6 +410,7 @@ def _global_pool(prog, pool_type, inputs, outputs, attrs, value_infos, name=''):
global_pooling=True, global_pooling=True,
adaptive=False, adaptive=False,
pooling_type=pool_type, pooling_type=pool_type,
ksize=[-1, -1],
), ),
) )
...@@ -445,11 +436,9 @@ def _pool(prog, pool_type, inputs, outputs, attrs, value_infos, name=''): ...@@ -445,11 +436,9 @@ def _pool(prog, pool_type, inputs, outputs, attrs, value_infos, name=''):
fluid_op = 'pool{}d'.format(poolnd) fluid_op = 'pool{}d'.format(poolnd)
strides = attrs.get('strides', [1] * poolnd) # optional strides = attrs.get('strides', [1] * poolnd) # optional
pads = attrs.get('pads', [0] * len(pool_size * 2)) # optional
paddings, val_x_padded = _pad_if_asymmetric(prog, pads, val_x, value_infos)
if val_x_padded:
val_x = val_x_padded
ceil_mode = bool(attrs.get('ceil_mode', 0)) # optional ceil_mode = bool(attrs.get('ceil_mode', 0)) # optional
pads = attrs.get('pads', [0] * (poolnd * 2)) # optional
paddings, val_x = _pad_if_asymmetric(prog, pads, val_x, value_infos)
var_x = _make_var_name(val_x) var_x = _make_var_name(val_x)
name_attr = ', name={}'.format(repr(name)) if name else '' name_attr = ', name={}'.format(repr(name)) if name else ''
...@@ -506,17 +495,17 @@ def _roi_pool(prog, fluid_op, inputs, outputs, attrs, value_infos, name): ...@@ -506,17 +495,17 @@ def _roi_pool(prog, fluid_op, inputs, outputs, attrs, value_infos, name):
spatial_scale = attrs['spatial_scale'] # required spatial_scale = attrs['spatial_scale'] # required
pooled_height, pooled_width = attrs['pooled_shape'] # required pooled_height, pooled_width = attrs['pooled_shape'] # required
od_attrs = dict( od_attrs = dict(
spatial_scale=spatial_scale,
pooled_height=pooled_height, pooled_height=pooled_height,
pooled_width=pooled_width, pooled_width=pooled_width,
spatial_scale=spatial_scale,
) )
feature_attr = '' feature_attr = ''
is_max_pool = fluid_op == 'roi_pool' is_max_pool = fluid_op == 'roi_pool'
if 'sampling_ratio' in attrs: if 'sampling_ratio' in attrs: #
sampling_ratio = attrs['sampling_ratio'] sampling_ratio = attrs['sampling_ratio']
od_attrs['sampling_ratio'] = sampling_ratio od_attrs['sampling_ratio'] = sampling_ratio
feature_attr += ', sampling_ratio={}'.format(sampling_ratio) feature_attr += ', sampling_ratio={}'.format(sampling_ratio)
if 'output_channels' in attrs: if 'output_channels' in attrs: #
output_channels = attrs['output_channels'] output_channels = attrs['output_channels']
od_attrs['output_channels'] = output_channels od_attrs['output_channels'] = output_channels
feature_attr += ', output_channels={}'.format(output_channels) feature_attr += ', output_channels={}'.format(output_channels)
...@@ -560,36 +549,20 @@ def _zeros_like(prog, val_ref, val_out, value_infos): ...@@ -560,36 +549,20 @@ def _zeros_like(prog, val_ref, val_out, value_infos):
) )
def AdaptiveAveragePool(prog, def AdaptiveAveragePool(prog, inputs, outputs, attrs, *args, name='', **kwargs):
inputs,
outputs,
attrs,
value_infos,
name='',
*args,
**kwargs):
""" """
aten::adaptive_avg_poolnd aten::adaptive_avg_poolnd
""" """
return _adaptive_pool( return _adaptive_pool(prog, 'avg', inputs, outputs, attrs, name=name)
prog, 'avg', inputs, outputs, attrs, value_infos, name=name)
def AdaptiveMaxPool(prog, def AdaptiveMaxPool(prog, inputs, outputs, attrs, *args, name='', **kwargs):
inputs,
outputs,
attrs,
value_infos,
name='',
*args,
**kwargs):
""" """
aten::adaptive_max_poolnd aten::adaptive_max_poolnd
""" """
return _adaptive_pool( return _adaptive_pool(prog, 'max', inputs, outputs, attrs, name=name)
prog, 'max', inputs, outputs, attrs, value_infos, name=name)
def AveragePool(prog, def AveragePool(prog,
...@@ -661,6 +634,8 @@ def BatchNormalization(prog, ...@@ -661,6 +634,8 @@ def BatchNormalization(prog,
val_y, = outputs val_y, = outputs
var_x = _make_var_name(val_x) var_x = _make_var_name(val_x)
var_y = _make_var_name(val_y) var_y = _make_var_name(val_y)
var_saved_mean = name + '.saved_mean' # dummy output
var_saved_variance = name + '.saved_variance' # dummy output
# interpretation # interpretation
fluid_op = 'batch_norm' fluid_op = 'batch_norm'
...@@ -669,10 +644,10 @@ def BatchNormalization(prog, ...@@ -669,10 +644,10 @@ def BatchNormalization(prog,
name_attr = ', name={}'.format(repr(name)) if name else '' name_attr = ', name={}'.format(repr(name)) if name else ''
if embed_params: if embed_params:
assert name != '' assert name != ''
var_scale = '{}.w_0'.format(name) var_scale = name + '.w_0'
var_b = '{}.b_0'.format(name) var_b = name + '.b_0'
var_mean = '{}.w_1'.format(name) var_mean = name + '.w_1'
var_var = '{}.w_2'.format(name) var_var = name + '.w_2'
value_infos[val_scale].setdefault('embeded_as', []).append(var_scale) value_infos[val_scale].setdefault('embeded_as', []).append(var_scale)
value_infos[val_b].setdefault('embeded_as', []).append(var_b) value_infos[val_b].setdefault('embeded_as', []).append(var_b)
value_infos[val_mean].setdefault('embeded_as', []).append(var_mean) value_infos[val_mean].setdefault('embeded_as', []).append(var_mean)
...@@ -687,8 +662,6 @@ def BatchNormalization(prog, ...@@ -687,8 +662,6 @@ def BatchNormalization(prog,
', moving_mean_name={}, moving_variance_name={}').format( ', moving_mean_name={}, moving_variance_name={}').format(
repr(var_scale), repr(var_b), repr(var_mean), repr(var_scale), repr(var_b), repr(var_mean),
repr(var_var)) repr(var_var))
var_saved_mean = '{}.saved_mean'.format(name) # dropped var
var_saved_variance = '{}.saved_variance'.format(name) # dropped var
# generationvalue_infos # generationvalue_infos
prog.Code('{} = layers.{}({}, is_test=True, data_layout="NCHW"' prog.Code('{} = layers.{}({}, is_test=True, data_layout="NCHW"'
...@@ -734,9 +707,9 @@ def Cast(prog, inputs, outputs, attrs, value_infos, *args, **kwargs): ...@@ -734,9 +707,9 @@ def Cast(prog, inputs, outputs, attrs, value_infos, *args, **kwargs):
var_output = _make_var_name(val_output) var_output = _make_var_name(val_output)
# interpretation # interpretation
dtype = attrs['to'] dtype = attrs['to'] # required
if not isinstance(dtype, np.dtype): if not isinstance(dtype, np.dtype): # additional: possible np.dtype
dtype = TENSOR_TYPE_TO_NP_TYPE[dtype] # required dtype = TENSOR_TYPE_TO_NP_TYPE[dtype]
output_dtype = _dtype_or_none(value_infos, val_output) output_dtype = _dtype_or_none(value_infos, val_output)
if output_dtype: if output_dtype:
assert dtype == output_dtype, 'dtype of to unmatches output' assert dtype == output_dtype, 'dtype of to unmatches output'
...@@ -818,18 +791,20 @@ def Constant(prog, inputs, outputs, attrs, value_infos, *args, **kwargs): ...@@ -818,18 +791,20 @@ def Constant(prog, inputs, outputs, attrs, value_infos, *args, **kwargs):
assert dtype == output_dtype, 'tensor dtype unmatches storage dtype' assert dtype == output_dtype, 'tensor dtype unmatches storage dtype'
# dtype = np.dtype('float32') # force to float32 # dtype = np.dtype('float32') # HINT: force to float32
shape = attrs.get('shape', None) # additional, maybe var_name shape = attrs.get('shape', None) #
if shape is None: if shape is None:
shape = _shape_or_none(value_infos, val_output) shape = _shape_or_none(value_infos, val_output)
if shape is None: if shape is None:
shape = list(value.shape) shape = list(value.shape)
_logger.warning( _logger.warning(
'shape of %s not inferred, using value as 1-D tensor may lead to fails', 'in (Constant -> %s): '
val_output) 'shape of %s not inferred, '
'using value as 1-D tensor may lead to fails', outputs, val_output)
# generation # generation
if value.size == 1: # scalar if value.size == 1: # scalar
value = value[0]
fluid_op = 'fill_constant' fluid_op = 'fill_constant'
prog.Code('{} = layers.{}(shape={}, dtype={}, value={})'.format( prog.Code('{} = layers.{}(shape={}, dtype={}, value={})'.format(
var_output, var_output,
...@@ -837,12 +812,22 @@ def Constant(prog, inputs, outputs, attrs, value_infos, *args, **kwargs): ...@@ -837,12 +812,22 @@ def Constant(prog, inputs, outputs, attrs, value_infos, *args, **kwargs):
# attrs # attrs
shape, shape,
repr(dtype.name), repr(dtype.name),
value[0], # shape can be list or var_name value,
)) ))
value_infos[val_output]['const_value'] = value[0] value_infos[val_output]['const_value'] = value
prog.VarDesc(var_output) prog.VarDesc(var_output)
prog.OpDesc(
fluid_op,
([], ),
([var_output], 'Out'),
dict(
shape=shape,
dtype=dtype.name,
value=value,
),
)
else: # list parameter -> const_value else: # list parameter -> const_value
prog.Code('{} = {}'.format( prog.Code('# {} = {} # passed directly as literal'.format(
var_output, var_output,
value.tolist(), value.tolist(),
)) ))
...@@ -855,18 +840,29 @@ def ConstantOfShape(prog, inputs, outputs, attrs, value_infos, *args, **kwargs): ...@@ -855,18 +840,29 @@ def ConstantOfShape(prog, inputs, outputs, attrs, value_infos, *args, **kwargs):
""" """
# I/O # I/O
val_input, = inputs val_shape, = inputs
val_output, = outputs
var_shape = _make_var_name(val_shape)
is_const_shape = 'const_value' in value_infos[val_input] shape = _const_weight_or_none(value_infos, val_shape)
if is_const_shape: if shape is None:
shape = _make_var_name(val_input) shape = _shape_or_none(value_infos, val_output)
else: assert shape is not None, (
shape = value_infos[val_input]['get_weight']() 'given shape is neither const value nor deductible from output, '
'this is not supported')
dtype = attrs['value'].dtype dtype = attrs['value'].dtype
attrs = attrs.copy() attrs = attrs.copy()
attrs.update(dict(shape=shape, dtype=dtype)) # pass var_name attrs.update(dict(shape=shape, dtype=dtype)) # pass const
Constant(prog, [], outputs, attrs, value_infos) prog.Code('# shape:{}={} # const as literal'.format(var_shape, shape))
prog.Op(
'',
'Constant',
[],
outputs, # val
attrs,
value_infos,
)
def Conv(prog, def Conv(prog,
...@@ -903,21 +899,19 @@ def Conv(prog, ...@@ -903,21 +899,19 @@ def Conv(prog,
num_out_channels = _shape(value_infos, val_w)[0] # OI... num_out_channels = _shape(value_infos, val_w)[0] # OI...
fluid_op = 'conv{}d'.format(convnd) fluid_op = 'conv{}d'.format(convnd)
num_groups = attrs.get('group', 1) # optional
strides = attrs.get('strides', [1] * convnd) # optional strides = attrs.get('strides', [1] * convnd) # optional
pads = attrs.get('pads', [0] * convnd * 2) # optional
paddings, val_x_padded = _pad_if_asymmetric(prog, pads, val_x, value_infos)
if val_x_padded:
val_x = val_x_padded
dilations = attrs.get('dilations', [1] * convnd) # optional dilations = attrs.get('dilations', [1] * convnd) # optional
num_groups = attrs.get('group', 1) # optional pads = attrs.get('pads', [0] * (convnd * 2)) # optional
paddings, val_x = _pad_if_asymmetric(prog, pads, val_x, value_infos)
var_x = _make_var_name(val_x) var_x = _make_var_name(val_x)
name_attr = ', name={}'.format(repr(name)) if name else '' name_attr = ', name={}'.format(repr(name)) if name else ''
if embed_params: if embed_params:
assert name != '' assert name != ''
var_w = '{}.w_0'.format(name) var_w = name + '.w_0'
value_infos[val_w].setdefault('embeded_as', []).append(var_w) value_infos[val_w].setdefault('embeded_as', []).append(var_w)
if has_bias: if has_bias:
var_b = '{}.b_0'.format(name) var_b = name + '.b_0'
value_infos[val_b].setdefault('embeded_as', []).append(var_b) value_infos[val_b].setdefault('embeded_as', []).append(var_b)
param_attr = '' param_attr = ''
else: else:
...@@ -951,7 +945,7 @@ def Conv(prog, ...@@ -951,7 +945,7 @@ def Conv(prog,
param_attr, param_attr,
name_attr, name_attr,
)) ))
var_conv = _make_var_name(name + '.conv') # hidden variable var_conv = name + '.conv' # hidden variable
prog.OpDesc( prog.OpDesc(
fluid_op, fluid_op,
([var_x, var_w], 'Input', 'Filter'), # , 'Bias', 'ResidualData' ([var_x, var_w], 'Input', 'Filter'), # , 'Bias', 'ResidualData'
...@@ -967,8 +961,8 @@ def Conv(prog, ...@@ -967,8 +961,8 @@ def Conv(prog,
prog.IntermediateOp( prog.IntermediateOp(
'', '',
'Add', 'Add',
[var_conv, var_b], [var_conv, var_b], #
[var_y], # var [val_y],
dict(axis=1), dict(axis=1),
value_infos=value_infos, value_infos=value_infos,
name=(name + '.bias'), name=(name + '.bias'),
...@@ -1014,21 +1008,19 @@ def ConvTranspose(prog, ...@@ -1014,21 +1008,19 @@ def ConvTranspose(prog,
num_out_channels = _shape(value_infos, val_w)[1] # IO... num_out_channels = _shape(value_infos, val_w)[1] # IO...
fluid_op = 'conv{}d_transpose'.format(convnd) fluid_op = 'conv{}d_transpose'.format(convnd)
num_groups = attrs.get('group', 1) # optional
strides = attrs.get('strides', [1] * convnd) # optional strides = attrs.get('strides', [1] * convnd) # optional
pads = attrs.get('pads', [0] * convnd * 2) # optional
paddings, val_x_padded = _pad_if_asymmetric(prog, pads, val_x, value_infos)
if val_x_padded:
val_x = val_x_padded
dilations = attrs.get('dilations', [1] * convnd) # optional dilations = attrs.get('dilations', [1] * convnd) # optional
num_groups = attrs.get('group', 1) # optional pads = attrs.get('pads', [0] * (convnd * 2)) # optional
paddings, val_x = _pad_if_asymmetric(prog, pads, val_x, value_infos)
var_x = _make_var_name(val_x) var_x = _make_var_name(val_x)
name_attr = ', name={}'.format(repr(name)) if name else '' name_attr = ', name={}'.format(repr(name)) if name else ''
if embed_params: if embed_params:
assert name != '' assert name != ''
var_w = '{}.w_0'.format(name) var_w = name + '.w_0'
value_infos[val_w].setdefault('embeded_as', []).append(var_w) value_infos[val_w].setdefault('embeded_as', []).append(var_w)
if has_bias: if has_bias:
var_b = '{}.b_0'.format(name) var_b = name + '.b_0'
value_infos[val_b].setdefault('embeded_as', []).append(var_b) value_infos[val_b].setdefault('embeded_as', []).append(var_b)
param_attr = '' param_attr = ''
else: else:
...@@ -1063,7 +1055,7 @@ def ConvTranspose(prog, ...@@ -1063,7 +1055,7 @@ def ConvTranspose(prog,
param_attr, param_attr,
name_attr, name_attr,
)) ))
var_conv = _make_var_name(name + '.conv') # hidden variable var_conv = name + '.conv' # hidden variable
prog.OpDesc( prog.OpDesc(
fluid_op, fluid_op,
([var_x, var_w], 'Input', 'Filter'), # , 'Bias', 'ResidualData' ([var_x, var_w], 'Input', 'Filter'), # , 'Bias', 'ResidualData'
...@@ -1080,8 +1072,8 @@ def ConvTranspose(prog, ...@@ -1080,8 +1072,8 @@ def ConvTranspose(prog,
prog.IntermediateOp( prog.IntermediateOp(
'', '',
'Add', 'Add',
[var_conv, var_b], [var_conv, var_b], #
[var_y], # var [val_y],
dict(axis=1), dict(axis=1),
value_infos=value_infos, value_infos=value_infos,
name=(name + '.bias'), name=(name + '.bias'),
...@@ -1090,7 +1082,7 @@ def ConvTranspose(prog, ...@@ -1090,7 +1082,7 @@ def ConvTranspose(prog,
prog.VarDesc(var_y) prog.VarDesc(var_y)
# should not appears # should not appear
#def Dropout( #def Dropout(
# prog, inputs, outputs, value_infos, # prog, inputs, outputs, value_infos,
# *args, **kwargs): # *args, **kwargs):
...@@ -1154,10 +1146,16 @@ def Gemm(prog, inputs, outputs, attrs, value_infos, name, *args, **kwargs): ...@@ -1154,10 +1146,16 @@ def Gemm(prog, inputs, outputs, attrs, value_infos, name, *args, **kwargs):
else: else:
val_beta = name + '_beta' # explicit variable val_beta = name + '_beta' # explicit variable
val_vm = name + '_vm' # explicit variable val_vm = name + '_vm' # explicit variable
vm_dtype = _dtype_or_none(value_infos, val_c) if beta.is_integer():
if vm_dtype is None: vm_dtype = _dtype_or_none(value_infos, val_c)
vm_dtype = np.dtype('float32') if vm_dtype is None:
beta = np.dtype(vm_dtype).type(beta) vm_dtype = np.dtype('float32')
_logger.warning(
'in %s(%s -> Gemm -> %s): '
'beta seems to be an interger, '
'however dtype can not be inferred, '
'still use float32', name, inputs, outputs)
beta = np.dtype(vm_dtype).type(beta)
prog.Op( prog.Op(
'', '',
'Constant', 'Constant',
...@@ -1426,19 +1424,22 @@ def Reshape(prog, inputs, outputs, attrs, value_infos, name, *args, **kwargs): ...@@ -1426,19 +1424,22 @@ def Reshape(prog, inputs, outputs, attrs, value_infos, name, *args, **kwargs):
val_data, val_shape = inputs val_data, val_shape = inputs
val_reshaped, = outputs val_reshaped, = outputs
var_data = _make_var_name(val_data) var_data = _make_var_name(val_data)
var_shape = _make_var_name(val_shape)
var_reshaped = _make_var_name(val_reshaped) var_reshaped = _make_var_name(val_reshaped)
# interpretation # interpretation
shape = _const_weight_or_none(value_infos, val_shape)
is_const_shape = shape and 'const_value' in value_infos[val_shape]
if shape is None:
shape = _shape_or_none(value_infos, val_reshaped)
assert shape is not None, (
'given shape is neither const value nor deductible from output, '
'this is not supported')
fluid_op = 'reshape' fluid_op = 'reshape'
is_const_shape = 'const_value' in value_infos[val_shape]
var_shape = _make_var_name(val_shape) # for code
if is_const_shape:
shape = value_infos[val_shape]['const_value'] # for desc
else:
shape = value_infos[val_shape]['get_weight']() # for desc
name_attr = ', name={}'.format(repr(name)) if name else '' name_attr = ', name={}'.format(repr(name)) if name else ''
# generation # generation
prog.Code('# shape:{}={} # const as literal'.format(var_shape, shape))
if is_const_shape: if is_const_shape:
prog.Code('{} = layers.{}({}' prog.Code('{} = layers.{}({}'
', shape={}' ', shape={}'
...@@ -1447,17 +1448,18 @@ def Reshape(prog, inputs, outputs, attrs, value_infos, name, *args, **kwargs): ...@@ -1447,17 +1448,18 @@ def Reshape(prog, inputs, outputs, attrs, value_infos, name, *args, **kwargs):
fluid_op, fluid_op,
var_data, var_data,
# attrs # attrs
var_shape, shape,
name_attr, name_attr,
)) ))
else: else:
var_shape_int32 = var_shape + '_int32' val_shape_int32 = val_shape + '_int32' # explicit variable
var_shape_int32 = _make_var_name(val_shape_int32)
prog.Op( prog.Op(
'', '',
'Cast', 'Cast',
[var_shape], [val_shape],
[var_shape_int32], # var [val_shape_int32], # var
dict(to=np.dtype('int32')), dict(to=np.dtype('int32')), # use np.dtype
value_infos=value_infos, value_infos=value_infos,
name=(name + '_cast'), name=(name + '_cast'),
) )
...@@ -1474,7 +1476,7 @@ def Reshape(prog, inputs, outputs, attrs, value_infos, name, *args, **kwargs): ...@@ -1474,7 +1476,7 @@ def Reshape(prog, inputs, outputs, attrs, value_infos, name, *args, **kwargs):
name_attr, name_attr,
)) ))
fluid_op = 'reshape2' fluid_op = 'reshape2'
var_xshape = _make_var_name(name + '.xshape') var_xshape = name + '.xshape' # dummy output
prog.VarDesc(var_reshaped) prog.VarDesc(var_reshaped)
prog.VarDesc(var_xshape) prog.VarDesc(var_xshape)
if is_const_shape: if is_const_shape:
...@@ -1590,29 +1592,30 @@ def Tile(prog, inputs, outputs, attrs, value_infos, name='', *args, **kwargs): ...@@ -1590,29 +1592,30 @@ def Tile(prog, inputs, outputs, attrs, value_infos, name='', *args, **kwargs):
val_input, val_repeats = inputs val_input, val_repeats = inputs
val_output, = outputs val_output, = outputs
var_input = _make_var_name(val_input) var_input = _make_var_name(val_input)
var_repeats = _make_var_name(val_repeats)
var_output = _make_var_name(val_output) var_output = _make_var_name(val_output)
# interpretation # interpretation
repeats = _const_weight_or_none(value_infos, val_repeats)
assert repeats is not None, 'only const repeats is supported'
fluid_op = 'expand' fluid_op = 'expand'
is_const_repeats = 'const_value' in value_infos[val_repeats]
if is_const_repeats:
code_repeats = _make_var_name(val_repeats) # for code
repeats = value_infos[val_repeats]['const_value'] # for desc
else:
repeats = value_infos[val_input]['get_weight']() # for desc
code_repeats = repeats # for code
name_attr = ', name={}'.format(repr(name)) if name else '' name_attr = ', name={}'.format(repr(name)) if name else ''
# generation # generation
prog.Code('# repeats:{}={} # const as literal'.format(var_repeats, repeats))
prog.Code('{} = layers.{}({}' prog.Code('{} = layers.{}({}'
', expand_times={}' ', expand_times={}'
'{})'.format( '{})'
' # {} = {}'.format(
var_output, var_output,
fluid_op, fluid_op,
var_input, var_input,
# attrs # attrs
code_repeats, repeats,
name_attr, name_attr,
# comment
_make_var_name(val_repeats),
repeats,
)) ))
prog.VarDesc(var_output) prog.VarDesc(var_output)
prog.OpDesc( prog.OpDesc(
......
...@@ -6,11 +6,12 @@ Created on Fri Mar 22 12:17:19 2019 ...@@ -6,11 +6,12 @@ Created on Fri Mar 22 12:17:19 2019
@author: Macrobull @author: Macrobull
""" """
# import importlib, logging, os, sys import importlib, logging, os, sys
import importlib
import logging #import importlib
import os #import logging
import sys #import os
#import sys
def _flatten_dict(obj, out=None): def _flatten_dict(obj, out=None):
...@@ -36,7 +37,7 @@ def _ensure_list(obj): ...@@ -36,7 +37,7 @@ def _ensure_list(obj):
def validate(fluid_model_filename, def validate(fluid_model_filename,
golden_data_filename, golden_data_filename,
model_func_name='inference', model_func_name='inference',
precision=1e-4, decimal=3,
save_inference_model=False): save_inference_model=False):
""" """
inferece the converted Paddle fluid model, validate with given golden data inferece the converted Paddle fluid model, validate with given golden data
...@@ -90,16 +91,17 @@ def validate(fluid_model_filename, ...@@ -90,16 +91,17 @@ def validate(fluid_model_filename,
# load data # load data
logger.info('using golden data %s', golden_data_filename) logger.info('using golden data %s', golden_data_filename)
if golden_data_filename.endswith('.npz'): if golden_data_filename.endswith('.npz'):
test_data = np.load(golden_data_filename) test_data = np.load(golden_data_filename, encoding='bytes')
input_data = test_data['inputs'].tolist() input_data = test_data['inputs'].tolist()
output_data = test_data['outputs'].tolist() output_data = test_data['outputs'].tolist()
else: else:
test_data = np.load(golden_data_filename).tolist() test_data = np.load(golden_data_filename, encoding='bytes').tolist()
input_data = input_data['inputs'] input_data = test_data['inputs']
output_data = output_data['outputs'] output_data = test_data['outputs']
input_data = _flatten_dict(input_data) input_data = _flatten_dict(input_data)
output_data = _flatten_dict(output_data) output_data = _flatten_dict(output_data)
logger.info('found %d I/O golden data, starting test ...', len(test_data)) logger.info('found %d I/O golden data, starting test ...',
len(input_data) + len(output_data))
# DEBUG: reload test for python code # DEBUG: reload test for python code
if basename.endswith('.py') and save_inference_model: if basename.endswith('.py') and save_inference_model:
...@@ -123,7 +125,7 @@ def validate(fluid_model_filename, ...@@ -123,7 +125,7 @@ def validate(fluid_model_filename,
for (name, truth), output in zip(output_data.items(), outputs): for (name, truth), output in zip(output_data.items(), outputs):
logger.info('testing output {} ...'.format(name)) logger.info('testing output {} ...'.format(name))
try: try:
np.testing.assert_almost_equal(output, truth, decimal=precision) np.testing.assert_almost_equal(output, truth, decimal=decimal)
except AssertionError as e: except AssertionError as e:
passed = False passed = False
logger.error('failed: %s\n', e) logger.error('failed: %s\n', e)
...@@ -164,7 +166,7 @@ if __name__ == '__main__': ...@@ -164,7 +166,7 @@ if __name__ == '__main__':
'--precision', '--precision',
'-p', '-p',
type=int, type=int,
default=4, default=3,
help='assertion decimal for validation', help='assertion decimal for validation',
) )
args = parser.parse_args() args = parser.parse_args()
...@@ -176,10 +178,10 @@ if __name__ == '__main__': ...@@ -176,10 +178,10 @@ if __name__ == '__main__':
debug = args.debug debug = args.debug
fluid_model_filename = args.model[0] fluid_model_filename = args.model[0]
golden_data_filename = args.test_data golden_data_filename = args.test_data
precision = args.precision decimal = args.precision
validate( validate(
fluid_model_filename, fluid_model_filename,
golden_data_filename, golden_data_filename,
precision=precision, decimal=decimal,
save_inference_model=debug) save_inference_model=debug)
...@@ -8,9 +8,9 @@ Created on Sun Feb 24 20:44:43 2019 ...@@ -8,9 +8,9 @@ Created on Sun Feb 24 20:44:43 2019
from __future__ import division from __future__ import division
# import logging, os import logging, os
import logging #import logging
import os #import os
import numpy as np import numpy as np
logger = logging.getLogger(__name__) logger = logging.getLogger(__name__)
...@@ -215,10 +215,6 @@ class Program(object): ...@@ -215,10 +215,6 @@ class Program(object):
var_desc.persistable = persistable var_desc.persistable = persistable
var_desc.type.type = framework_pb2.VarType.LOD_TENSOR var_desc.type.type = framework_pb2.VarType.LOD_TENSOR
# REMOVEIT: WORKAROUND: Netron: null.tensor error
tensor_desc = var_desc.type.lod_tensor.tensor
tensor_desc.data_type = self.Dtype(dummy_dtype) # required
if value_info and 'dtype' in value_info: if value_info and 'dtype' in value_info:
tensor_desc = var_desc.type.lod_tensor.tensor tensor_desc = var_desc.type.lod_tensor.tensor
tensor_desc.data_type = self.Dtype(value_info['dtype']) # required tensor_desc.data_type = self.Dtype(value_info['dtype']) # required
...@@ -230,6 +226,9 @@ class Program(object): ...@@ -230,6 +226,9 @@ class Program(object):
not persistable) not persistable)
if remove_batch: if remove_batch:
tensor_desc.dims[0] = -1 tensor_desc.dims[0] = -1
else: # REMOVEIT: WORKAROUND: Netron: null.tensor error
tensor_desc = var_desc.type.lod_tensor.tensor
tensor_desc.data_type = self.Dtype(dummy_dtype) # required
self.var_descs.append(var_desc) self.var_descs.append(var_desc)
...@@ -329,7 +328,7 @@ class Writer(object): ...@@ -329,7 +328,7 @@ class Writer(object):
else: else:
var_name = make_var_name(name) var_name = make_var_name(name)
attr_name = make_attr_name(name) attr_name = make_attr_name(name)
prog.Code('# parameter: {}'.format(name)) prog.Code('# parameter {}: {}'.format(name, var_name))
prog.Code('{} = ParamAttr(name={})' # , trainable=True prog.Code('{} = ParamAttr(name={})' # , trainable=True
.format(attr_name, repr(var_name))) .format(attr_name, repr(var_name)))
prog.Code( prog.Code(
...@@ -356,13 +355,13 @@ class Writer(object): ...@@ -356,13 +355,13 @@ class Writer(object):
if remove_batch: if remove_batch:
shape = shape[1:] shape = shape[1:]
prog.Code('# input: {}'.format(name)) prog.Code('# input {}: {}'.format(name, var_name))
prog.Code(( prog.Code((
'{} = layers.data(name={}, shape={}, dtype={}, ' '{} = layers.data(name={}, shape={}, dtype={}, '
'append_batch_size={})' # , stop_gradient=True 'append_batch_size={})' # , stop_gradient=True
).format( ).format(
var_name, var_name,
repr(name), repr(var_name),
shape, shape,
repr(value_info['dtype'].name), repr(value_info['dtype'].name),
remove_batch, remove_batch,
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册