提交 69b3b90d 编写于 作者: S syyxsxx

fix some bug

上级 f69c5d03
......@@ -16,9 +16,9 @@ import os
from six import text_type as _text_type
import argparse
import sys
from utils import logging
import paddlex as pdx
def arg_parser():
parser = argparse.ArgumentParser()
parser.add_argument(
......@@ -46,41 +46,45 @@ def arg_parser():
return parser
def reverse_input(shape):
shape_list = shape[1:-1].split(',')
shape = '[1,3,' + shape_list[1] + ',' + shape_list[0] + ']'
return shape
def export_openvino_model(model, args):
if model.model_type == "detector" or model.__class__.__name__ == "FastSCNN":
logging.error(
"Only image classifier models and semantic segmentation models(except FastSCNN) are supported to export to openvino")
print(
"Only image classifier models and semantic segmentation models(except FastSCNN) are supported to export to openvino"
)
try:
import x2paddle
if x2paddle.__version__ < '0.7.4':
logging.error("You need to upgrade x2paddle >= 0.7.4")
except:
logging.error(
print(
"You need to install x2paddle first, pip install x2paddle>=0.7.4")
import x2paddle.convert as x2pc
x2pc.paddle2onnx(args.model_dir, args.save_dir)
import mo.main as mo
from mo.utils.cli_parser import get_onnx_cli_parser
onnx_parser = get_onnx_cli_parser()
onnx_parser.add_argument("--model_dir",type=_text_type)
onnx_parser.add_argument("--save_dir",type=_text_type)
onnx_parser.add_argument("--model_dir", type=_text_type)
onnx_parser.add_argument("--save_dir", type=_text_type)
onnx_parser.add_argument("--fixed_input_shape")
onnx_input = os.path.join(args.save_dir, 'x2paddle_model.onnx')
onnx_parser.set_defaults(input_model=onnx_input)
onnx_parser.set_defaults(output_dir=args.save_dir)
shape = '[1,3,'
shape = shape + args.fixed_input_shape[1:]
shape_list = args.fixed_input_shape[1:-1].split(',')
shape = '[1,3,' + shape_list[1] + ',' + shape_list[0] + ']'
if model.__class__.__name__ == "YOLOV3":
shape = shape + ",[1,2]"
inputs = "image,im_size"
onnx_parser.set_defaults(input = inputs)
onnx_parser.set_defaults(input_shape = shape)
mo.main(onnx_parser,'onnx')
onnx_parser.set_defaults(input=inputs)
onnx_parser.set_defaults(input_shape=shape)
mo.main(onnx_parser, 'onnx')
def main():
......@@ -90,12 +94,11 @@ def main():
assert args.save_dir is not None, "--save_dir should be defined to create openvino model"
model = pdx.load_model(args.model_dir)
if model.status == "Normal" or model.status == "Prune":
logging.error(
print(
"Only support inference model, try to export model first as below,",
exit=False)
export_openvino_model(model, args)
if __name__ == "__main__":
main()
if __name__ == "__main__":
main()
# copyright (c) 2020 PaddlePaddle Authors. All Rights Reserve.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
import os
import os.path as osp
import time
import cv2
import numpy as np
import yaml
from six import text_type as _text_type
from paddlelite.lite import *
class Predictor:
def __init__(self, model_nb, model_yaml, thread_num, shape):
if not osp.exists(model_nb):
print("model nb file is not exists in {}".format(model_xml))
self.model_nb = model_nb
self.shape = shape
config = MobileConfig()
config.set_model_from_file(model_nb)
config.set_threads(thread_num)
if not osp.exists(model_yaml):
print("model yaml file is not exists in {}".format(model_yaml))
with open(model_yaml) as f:
self.info = yaml.load(f.read(), Loader=yaml.Loader)
self.model_type = self.info['_Attributes']['model_type']
self.model_name = self.info['Model']
self.num_classes = self.info['_Attributes']['num_classes']
self.labels = self.info['_Attributes']['labels']
if self.info['Model'] == 'MaskRCNN':
if self.info['_init_params']['with_fpn']:
self.mask_head_resolution = 28
else:
self.mask_head_resolution = 14
transforms_mode = self.info.get('TransformsMode', 'RGB')
if transforms_mode == 'RGB':
to_rgb = True
else:
to_rgb = False
self.transforms = self.build_transforms(self.info['Transforms'],
to_rgb)
self.predictor = create_paddle_predictor(config)
self.total_time = 0
self.count_num = 0
def build_transforms(self, transforms_info, to_rgb=True):
if self.model_type == "classifier":
import transforms.cls_transforms as transforms
elif self.model_type == "detector":
import transforms.det_transforms as transforms
elif self.model_type == "segmenter":
import transforms.seg_transforms as transforms
op_list = list()
for op_info in transforms_info:
op_name = list(op_info.keys())[0]
op_attr = op_info[op_name]
if not hasattr(transforms, op_name):
raise Exception(
"There's no operator named '{}' in transforms of {}".
format(op_name, self.model_type))
op_list.append(getattr(transforms, op_name)(**op_attr))
eval_transforms = transforms.Compose(op_list)
if hasattr(eval_transforms, 'to_rgb'):
eval_transforms.to_rgb = to_rgb
self.arrange_transforms(eval_transforms)
return eval_transforms
def arrange_transforms(self, eval_transforms):
if self.model_type == 'classifier':
import transforms.cls_transforms as transforms
arrange_transform = transforms.ArrangeClassifier
elif self.model_type == 'segmenter':
import transforms.seg_transforms as transforms
arrange_transform = transforms.ArrangeSegmenter
elif self.model_type == 'detector':
import transforms.det_transforms as transforms
arrange_name = 'Arrange{}'.format(self.model_name)
arrange_transform = getattr(transforms, arrange_name)
else:
raise Exception("Unrecognized model type: {}".format(
self.model_type))
if type(eval_transforms.transforms[-1]).__name__.startswith('Arrange'):
eval_transforms.transforms[-1] = arrange_transform(mode='test')
else:
eval_transforms.transforms.append(arrange_transform(mode='test'))
def raw_predict(self, preprocessed_input):
self.count_num += 1
input_tensor = self.predictor.get_input(0)
input_tensor.resize(self.shape)
input_tensor.set_float_data(preprocessed_input['image'])
if self.model_name == "YOLOv3":
input_size_tensor = self.predictor.get_input(1)
input_size_tensor.resize([1, 2])
input_size_tensor.set_float_data(preprocessed_input['im_size'])
#Start inference
start_time = time.time()
self.predictor.run()
time_use = time.time() - start_time
if (self.count_num >= 20):
self.total_time += time_use
if (self.count_num >= 120):
print("avgtime:", self.total_time * 10)
#Processing output blob
print("Processing output blob")
return
def preprocess(self, image):
res = dict()
if self.model_type == "classifier":
im, = self.transforms(image)
im = np.expand_dims(im, axis=0).copy()
im = im.flatten()
res['image'] = im
elif self.model_type == "detector":
if self.model_name == "YOLOv3":
im, im_shape = self.transforms(image)
im = np.expand_dims(im, axis=0).copy()
im_shape = np.expand_dims(im_shape, axis=0).copy()
res['image'] = im
res['im_size'] = im_shape
if self.model_name.count('RCNN') > 0:
im, im_resize_info, im_shape = self.transforms(image)
im = np.expand_dims(im, axis=0).copy()
im_resize_info = np.expand_dims(im_resize_info, axis=0).copy()
im_shape = np.expand_dims(im_shape, axis=0).copy()
res['image'] = im
res['im_info'] = im_resize_info
res['im_shape'] = im_shape
elif self.model_type == "segmenter":
im, im_info = self.transforms(image)
im = np.expand_dims(im, axis=0).copy()
#np.savetxt('./input_data.txt',im.flatten())
res['image'] = im
res['im_info'] = im_info
return res
def classifier_postprocess(self, topk=1):
output_tensor = self.predictor.get_output(0)
output_data = output_tensor.float_data()
true_topk = min(self.num_classes, topk)
pred_label = np.argsort(-np.array(output_data))[:true_topk]
result = [{
'category_id': l,
'category': self.labels[l],
'score': output_data[l],
} for l in pred_label]
print(result)
return result
def segmenter_postprocess(self, preprocessed_inputs):
out_label_tensor = self.predictor.get_output(0)
out_label = out_label_tensor.float_data()
label_shape = tuple(out_label_tensor.shape())
label_map = np.array(out_label).astype('uint8')
label_map = label_map.reshap(label_shape)
label_map = np.squeeze(label_map)
out_score_tensor = self.predictor.get_output(1)
out_score = out_score_tensor.float_data()
score_shape = tuple(out_score_tensor.shape())
score_map = np.array(out_score)
score_map = score_map.reshap(score_shape)
score_map = np.transpose(score_map, (1, 2, 0))
im_info = preprocessed_inputs['im_info']
for info in im_info[::-1]:
if info[0] == 'resize':
w, h = info[1][1], info[1][0]
label_map = cv2.resize(label_map, (w, h), cv2.INTER_NEAREST)
score_map = cv2.resize(score_map, (w, h), cv2.INTER_LINEAR)
elif info[0] == 'padding':
w, h = info[1][1], info[1][0]
label_map = label_map[0:h, 0:w]
score_map = score_map[0:h, 0:w, :]
else:
raise Exception("Unexpected info '{}' in im_info".format(info[
0]))
return {'label_map': label_map, 'score_map': score_map}
def detector_postprocess(self, preprocessed_inputs):
out_tensor = self.predictor.get_output(0)
out_data = out_tensor.float_data()
out_shape = tuple(out_tensor.shape())
out_data = np.array(out_data)
outputs = label_data.reshap(out_shape)
result = []
for out in outputs:
result.append(out.tolist())
return result
def predict(self, image, topk=1, threshold=0.5):
preprocessed_input = self.preprocess(image)
self.raw_predict(preprocessed_input)
if self.model_type == "classifier":
results = self.classifier_postprocess(topk)
elif self.model_type == "detector":
results = self.detector_postprocess(preprocessed_input)
elif self.model_type == "segmenter":
pass
results = self.segmenter_postprocess(preprocessed_input)
......@@ -33,6 +33,7 @@ python convertor.py --model_dir /path/to/inference_model --save_dir /path/to/ope
| --save_dir | OpenVINO模型保存路径 |
| --fixed_input_shape | 模型输入的[W,H] |
| --data type(option) | FP32、FP16,默认为FP32,VPU下的IR需要为FP16 |
**注意**
- 由于OpenVINO不支持ONNX的resize-11 OP的原因,目前还不支持Paddle的分割模型
- YOLOv3在通过OpenVINO部署时,由于OpenVINO对ONNX OP的支持限制,我们在将YOLOv3的Paddle模型导出时,对最后一层multiclass_nms进行了特殊处理,导出的ONNX模型,最终输出的Box结果包括背景类别(而Paddle模型不包含),此处在OpenVINO的部署代码中,我们通过后处理过滤了背景类别。
......@@ -14,8 +14,11 @@ Windows 平台下,我们使用`Visual Studio 2019 Community` 进行了测试
- 我的电脑->属性->高级系统设置->环境变量
- 在系统变量中找到Path(如没有,自行创建),并双击编辑
- 新建,分别将OpenVINO以下路径填入并保存:
`C:\Program File (x86)\IntelSWTools\openvino\inference_engine\bin\intel64\Release`
`C:\Program File (x86)\IntelSWTools\openvino\inference_engine\external\tbb\bin`
`C:\Program File (x86)\IntelSWTools\openvino\deployment_tools\ngraph\lib`
请确保系统已经安装好上述基本软件,并配置好相应环境,**下面所有示例以工作目录为 `D:\projects`演示。**
......@@ -50,13 +53,13 @@ git clone https://github.com/PaddlePaddle/PaddleX.git
### Step3: 使用Visual Studio 2019直接编译CMake
1. 打开Visual Studio 2019 Community,点击`继续但无需代码`
2. 点击: `文件`->`打开`->`CMake` 选择C++预测代码所在路径(例如`D:\projects\PaddleX\deploy\openvino`),并打开`CMakeList.txt`
2. 点击: `文件`->`打开`->`CMake` 选择C++预测代码所在路径(例如`D:\projects\PaddleX\deploy\openvino`),并打开`CMakeList.txt`
3. 点击:`项目`->`CMake设置`
4. 点击`浏览`,分别设置编译选项指定`OpenVINO``Gflags``GLOG``NGRAPH``OPENCV`的路径
| 参数名 | 含义 |
| ---- | ---- |
| OPENCV_DIR | opencv库路径 |
| OPENCV_DIR | OpenCV库路径 |
| OPENVINO_DIR | OpenVINO推理库路径,在OpenVINO安装目录下的deployment/inference_engine目录,若未修改OpenVINO默认安装目录可以不用修改 |
| NGRAPH_LIB | OpenVINO的ngraph库路径,在OpenVINO安装目录下的deployment/ngraph/lib目录,若未修改OpenVINO默认安装目录可以不用修改 |
| GFLAGS_DIR | gflags库路径 |
......
......@@ -125,7 +125,8 @@ OPENCV_DIR=$(pwd)/deps/opencv/
系统:raspbian OS
软件:paddle-lite 2.6.1
### 测试结果
单位ms,num表示paddle-lite下使用的线程数
单位ms,num表示paddle-lite下使用的线程数
|模型|lite(num=4)|输入图片大小|
| ----| ---- | ----|
|mobilenet-v2|136.19|224*224|
......@@ -148,6 +149,7 @@ OPENCV_DIR=$(pwd)/deps/opencv/
|Xception41|1418.29|224*224|
|Xception65|2094.7|224*224|
从测试结果看建议用户在树莓派上使用MobileNetV1-V3,ShuffleNetV2这类型的小型网络
## NCS2部署
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册