未验证 提交 f0b954a8 编写于 作者: J Jason 提交者: GitHub

Merge pull request #212 from Channingss/develop

update onnxruntime&onnx package
......@@ -15,7 +15,7 @@ paddlepaddle >= 1.6.0
**按需安装以下依赖**
tensorflow : tensorflow == 1.14.0
caffe : 无
onnx : onnx == 1.5.0 onnxruntime == 0.4.0
onnx : onnx == 1.6.0 onnxruntime == 1.1.0
## 安装
### 安装方式一(推荐)
......
......@@ -23,9 +23,6 @@ setuptools.setup(
"Operating System :: OS Independent",
],
license='Apache 2.0',
entry_points={
'console_scripts': [
'x2paddle=x2paddle.convert:main',
'onnx_infer=x2paddle.onnx_infer:main'
]
})
entry_points={'console_scripts': [
'x2paddle=x2paddle.convert:main',
]})
......@@ -155,21 +155,20 @@ def onnx2paddle(model_path, save_dir, params_merge=False):
try:
import onnx
version = onnx.version.version
if version != '1.5.0':
print("onnx==1.5.0 is required")
if version != '1.6.0':
print("onnx==1.6.0 is required")
return
except:
print("onnx is not installed, use \"pip install onnx==1.5.0\".")
print("onnx is not installed, use \"pip install onnx==1.6.0\".")
return
print("Now translating model from onnx to paddle.")
from x2paddle.op_mapper.onnx_op_mapper import ONNXOpMapper
from x2paddle.decoder.onnx_decoder import ONNXDecoder
from x2paddle.optimizer.onnx_optimizer import ONNXOptimizer
import onnxruntime
model = ONNXDecoder(model_path)
from x2paddle.op_mapper.onnx_op_mapper import ONNXOpMapper
mapper = ONNXOpMapper(model, save_dir)
from x2paddle.optimizer.onnx_optimizer import ONNXOptimizer
optimizer = ONNXOptimizer(mapper)
optimizer.delete_redundance_code()
......
此差异已折叠。
......@@ -316,12 +316,14 @@ class ONNXDecoder(object):
model.ir_version, model.opset_import[0].version))
if model.opset_import[0].version < 9:
_logger.warning(
'Now, onnx2paddle main support convert onnx model opset_verison == 9,'
'Now, onnx2paddle support convert onnx model opset_verison == 9,'
'opset_verison of your onnx model is %d < 9,'
'some operator may cannot convert.',
'some operator maybe unsuccessful in convertion.',
model.opset_import[0].version)
check_model(model)
self.check_model_running_state(onnx_model)
model = onnx.shape_inference.infer_shapes(model)
model = self.optimize_model_skip_op_for_inference(model)
model = self.optimize_model_strip_initializer(model)
......@@ -471,7 +473,46 @@ class ONNXDecoder(object):
raise ValueError('name should not be empty')
for s in ' .*?\\/-:':
name = name.replace(s, '_')
return '_' + name
return 'x2paddle_' + name
def check_model_running_state(self, model_path):
try:
import onnxruntime as rt
version = rt.__version__
if version != '1.0.0':
print("onnxruntime==1.0.0 is required")
return
except:
raise Exception(
"onnxruntime is not installed, use \"pip install onnxruntime==1.0.0\"."
)
model = onnx.load(model_path)
model = onnx.shape_inference.infer_shapes(model)
if len(model.graph.value_info) < len(model.graph.node) - 1:
_logger.warning(
'shape inference for some operators failed, '
'those operators will be assignd node.out_shape==None, '
'refer to https://github.com/onnx/onnx/blob/master/docs/ShapeInference.md'
)
try:
datatype_map = {
'tensor(int64)': 'int',
'tensor(float)': 'float32',
'tensor(int32)': 'int32'
}
input_dict = {}
sess = rt.InferenceSession(model_path)
for ipt in sess.get_inputs():
datatype = datatype_map[ipt.type]
input_dict[ipt.name] = np.random.random(
ipt.shape).astype(datatype)
res = sess.run(None, input_feed=input_dict)
except:
raise Exception(
"onnxruntime inference onnx model failed, Please confirm the correctness of onnx model by onnxruntime, if onnx model is correct, please submit issue in github."
)
def standardize_variable_name(self, graph):
"""
......
import os
import sys
import numpy as np
import onnx
import json
import argparse
from six import text_type as _text_type
def arg_parser():
parser = argparse.ArgumentParser()
parser.add_argument("--save_dir",
"-s",
type=_text_type,
default=None,
help="define save_dir")
return parser
def main():
try:
import onnxruntime as rt
version = rt.__version__
if version != '0.4.0':
print("onnxruntime==0.4.0 is required")
return
except:
print(
"onnxruntime is not installed, use \"pip install onnxruntime==0.4.0\"."
)
return
parser = arg_parser()
args = parser.parse_args()
save_dir = args.save_dir
model_dir = os.path.join(save_dir, 'onnx_model_infer.onnx')
model = onnx.load(model_dir)
sess = rt.InferenceSession(model_dir)
inputs_dict = {}
for ipt in sess.get_inputs():
data_dir = os.path.join(save_dir, ipt.name + '.npy')
inputs_dict[ipt.name] = np.load(data_dir, allow_pickle=True)
res = sess.run(None, input_feed=inputs_dict)
for idx, value_info in enumerate(model.graph.output):
np.save(os.path.join(save_dir, value_info.name), res[idx])
if __name__ == "__main__":
main()
......@@ -32,7 +32,7 @@ import math
import os
import shutil
from functools import reduce
import onnxruntime as rt
_logger = _logging.getLogger(__name__)
......@@ -71,6 +71,7 @@ class ONNXOpMapper(OpMapper):
self.used_custom_layers = dict()
self.is_inference = False
self.tmp_data_dir = os.path.join(save_dir, 'tmp_data')
self.tmp_outputs_dict = {}
self.get_output_shapes()
if not self.op_checker():
......@@ -119,7 +120,7 @@ class ONNXOpMapper(OpMapper):
def get_results_of_inference(self, model, value_infos, data_nodes):
if not os.path.exists(self.tmp_data_dir):
os.makedirs(self.tmp_data_dir)
inputs_dict = {}
for data_node in data_nodes:
value_info = value_infos[data_node]
shape = value_info['shape']
......@@ -129,29 +130,32 @@ class ONNXOpMapper(OpMapper):
if dim_shape == 0 and i != 0:
assert 'shape of input is not assigned'
ipt = np.random.random(shape).astype(value_info['dtype'])
np.save(os.path.join(self.tmp_data_dir, data_node), ipt)
inputs_dict[data_node] = ipt
model = onnx.shape_inference.infer_shapes(model)
outputs = []
for value_info in model.graph.value_info:
outputs.append(value_info)
outputs.append(value_info.name)
model.graph.ClearField('output')
model.graph.output.MergeFrom(outputs)
model.graph.output.MergeFrom(model.graph.value_info)
onnx.save(model, os.path.join(self.tmp_data_dir,
'onnx_model_infer.onnx'))
sess = rt.InferenceSession(
os.path.join(self.tmp_data_dir, 'onnx_model_infer.onnx'))
res = sess.run(None, input_feed=inputs_dict)
self.tmp_outputs_dict = dict(zip(outputs, res))
os.system('onnx_infer --save_dir=' + self.tmp_data_dir)
return
def get_dynamic_shape(self, layer):
"""
get dynamic shape from infer_result
"""
path = os.path.join(self.tmp_data_dir, layer + '.npy')
if not os.path.exists(path):
if layer not in self.tmp_outputs_dict:
return [None, None, None]
output = np.load(path)
output = self.tmp_outputs_dict[layer]
return output.tolist(), output.dtype, output.shape
def get_output_shapes(self):
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册