deploy.py 11.2 KB
Newer Older
M
mamingjie-China 已提交
1
# copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
J
jiangjiajun 已提交
2 3 4 5 6 7 8 9 10 11 12 13 14 15
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import os.path as osp
S
syyxsxx 已提交
16
import psutil
J
jiangjiajun 已提交
17 18 19 20 21
import cv2
import numpy as np
import yaml
import paddlex
import paddle.fluid as fluid
22
from paddlex.cv.transforms import build_transforms
F
FlyingQianMM 已提交
23 24 25
from paddlex.cv.models import BaseClassifier
from paddlex.cv.models import PPYOLO, FasterRCNN, MaskRCNN
from paddlex.cv.models import DeepLabv3p
J
jiangjiajun 已提交
26 27 28 29 30 31 32 33


class Predictor:
    def __init__(self,
                 model_dir,
                 use_gpu=True,
                 gpu_id=0,
                 use_mkl=False,
S
syyxsxx 已提交
34
                 mkl_thread_num=psutil.cpu_count(),
J
jiangjiajun 已提交
35 36 37 38 39 40 41 42 43 44
                 use_trt=False,
                 use_glog=False,
                 memory_optimize=True):
        """ 创建Paddle Predictor

            Args:
                model_dir: 模型路径(必须是导出的部署或量化模型)
                use_gpu: 是否使用gpu,默认True
                gpu_id: 使用gpu的id,默认0
                use_mkl: 是否使用mkldnn计算库,CPU情况下使用,默认False
45
                mkl_thread_num: mkldnn计算线程数,默认为4
J
jiangjiajun 已提交
46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77
                use_trt: 是否使用TensorRT,默认False
                use_glog: 是否启用glog日志, 默认False
                memory_optimize: 是否启动内存优化,默认True
        """
        if not osp.isdir(model_dir):
            raise Exception("[ERROR] Path {} not exist.".format(model_dir))
        if not osp.exists(osp.join(model_dir, "model.yml")):
            raise Exception("There's not model.yml in {}".format(model_dir))
        with open(osp.join(model_dir, "model.yml")) as f:
            self.info = yaml.load(f.read(), Loader=yaml.Loader)

        self.status = self.info['status']

        if self.status != "Quant" and self.status != "Infer":
            raise Exception("[ERROR] Only quantized model or exported "
                            "inference model is supported.")

        self.model_dir = model_dir
        self.model_type = self.info['_Attributes']['model_type']
        self.model_name = self.info['Model']
        self.num_classes = self.info['_Attributes']['num_classes']
        self.labels = self.info['_Attributes']['labels']
        if self.info['Model'] == 'MaskRCNN':
            if self.info['_init_params']['with_fpn']:
                self.mask_head_resolution = 28
            else:
                self.mask_head_resolution = 14
        transforms_mode = self.info.get('TransformsMode', 'RGB')
        if transforms_mode == 'RGB':
            to_rgb = True
        else:
            to_rgb = False
78 79
        self.transforms = build_transforms(self.model_type,
                                           self.info['Transforms'], to_rgb)
80 81 82
        self.predictor = self.create_predictor(use_gpu, gpu_id, use_mkl,
                                               mkl_thread_num, use_trt,
                                               use_glog, memory_optimize)
J
jiangjiajun 已提交
83 84 85 86 87

    def create_predictor(self,
                         use_gpu=True,
                         gpu_id=0,
                         use_mkl=False,
S
syyxsxx 已提交
88
                         mkl_thread_num=psutil.cpu_count(),
J
jiangjiajun 已提交
89 90 91 92 93 94 95 96 97 98 99 100 101
                         use_trt=False,
                         use_glog=False,
                         memory_optimize=True):
        config = fluid.core.AnalysisConfig(
            os.path.join(self.model_dir, '__model__'),
            os.path.join(self.model_dir, '__params__'))

        if use_gpu:
            # 设置GPU初始显存(单位M)和Device ID
            config.enable_use_gpu(100, gpu_id)
        else:
            config.disable_gpu()
        if use_mkl:
S
syyxsxx 已提交
102 103 104
            if self.model_name not in ["HRNet", "DeepLabv3p"]:
                config.enable_mkldnn()
                config.set_cpu_math_library_num_threads(mkl_thread_num)
J
jiangjiajun 已提交
105 106 107 108 109 110 111 112 113 114 115 116 117 118
        if use_glog:
            config.enable_glog_info()
        else:
            config.disable_glog_info()
        if memory_optimize:
            config.enable_memory_optim()

        # 开启计算图分析优化,包括OP融合等
        config.switch_ir_optim(True)
        # 关闭feed和fetch OP使用,使用ZeroCopy接口必须设置此项
        config.switch_use_feed_fetch_ops(False)
        predictor = fluid.core.create_paddle_predictor(config)
        return predictor

119
    def preprocess(self, image, thread_num=1):
J
jiangjiajun 已提交
120 121 122
        """ 对图像做预处理

            Args:
F
FlyingQianMM 已提交
123
                image(list|tuple): 数组中的元素可以是图像路径,也可以是解码后的排列格式为(H,W,C)
124
                    且类型为float32且为BGR格式的数组。
J
jiangjiajun 已提交
125 126 127
        """
        res = dict()
        if self.model_type == "classifier":
128 129 130 131 132 133
            im = BaseClassifier._preprocess(
                image,
                self.transforms,
                self.model_type,
                self.model_name,
                thread_num=thread_num)
J
jiangjiajun 已提交
134 135
            res['image'] = im
        elif self.model_type == "detector":
F
FlyingQianMM 已提交
136 137
            if self.model_name in ["PPYOLO", "YOLOv3"]:
                im, im_size = PPYOLO._preprocess(
138 139 140 141 142
                    image,
                    self.transforms,
                    self.model_type,
                    self.model_name,
                    thread_num=thread_num)
J
jiangjiajun 已提交
143
                res['image'] = im
144
                res['im_size'] = im_size
J
jiangjiajun 已提交
145
            if self.model_name.count('RCNN') > 0:
146 147 148 149 150 151
                im, im_resize_info, im_shape = FasterRCNN._preprocess(
                    image,
                    self.transforms,
                    self.model_type,
                    self.model_name,
                    thread_num=thread_num)
J
jiangjiajun 已提交
152 153 154 155
                res['image'] = im
                res['im_info'] = im_resize_info
                res['im_shape'] = im_shape
        elif self.model_type == "segmenter":
F
FlyingQianMM 已提交
156
            im, im_info = DeepLabv3p._preprocess(
157 158 159 160 161
                image,
                self.transforms,
                self.model_type,
                self.model_name,
                thread_num=thread_num)
J
jiangjiajun 已提交
162 163 164 165
            res['image'] = im
            res['im_info'] = im_info
        return res

166 167 168 169 170 171
    def postprocess(self,
                    results,
                    topk=1,
                    batch_size=1,
                    im_shape=None,
                    im_info=None):
F
FlyingQianMM 已提交
172 173 174 175 176 177 178 179 180 181
        """ 对预测结果做后处理

            Args:
                results (list): 预测结果
                topk (int): 分类预测时前k个最大值
                batch_size (int): 预测时图像批量大小
                im_shape (list): MaskRCNN的图像输入大小
                im_info (list):RCNN系列和分割网络的原图大小
        """

182 183 184 185 186 187 188
        def offset_to_lengths(lod):
            offset = lod[0]
            lengths = [
                offset[i + 1] - offset[i] for i in range(len(offset) - 1)
            ]
            return [lengths]

189 190
        if self.model_type == "classifier":
            true_topk = min(self.num_classes, topk)
191
            preds = BaseClassifier._postprocess([results[0][0]], true_topk,
192 193
                                                self.labels)
        elif self.model_type == "detector":
194 195 196
            res = {'bbox': (results[0][0], offset_to_lengths(results[0][1])), }
            res['im_id'] = (np.array(
                [[i] for i in range(batch_size)]).astype('int32'), [[]])
F
FlyingQianMM 已提交
197 198
            if self.model_name in ["PPYOLO", "YOLOv3"]:
                preds = PPYOLO._postprocess(res, batch_size, self.num_classes,
199
                                            self.labels)
200
            elif self.model_name == "FasterRCNN":
201
                preds = FasterRCNN._postprocess(res, batch_size,
202 203
                                                self.num_classes, self.labels)
            elif self.model_name == "MaskRCNN":
204 205
                res['mask'] = (results[1][0], offset_to_lengths(results[1][1]))
                res['im_shape'] = (im_shape, [])
206
                preds = MaskRCNN._postprocess(
207
                    res, batch_size, self.num_classes,
208
                    self.mask_head_resolution, self.labels)
209 210 211
        elif self.model_type == "segmenter":
            res = [results[0][0], results[1][0]]
            preds = DeepLabv3p._postprocess(res, im_info)
212 213
        return preds

J
jiangjiajun 已提交
214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230
    def raw_predict(self, inputs):
        """ 接受预处理过后的数据进行预测

            Args:
                inputs(tuple): 预处理过后的数据
        """
        for k, v in inputs.items():
            try:
                tensor = self.predictor.get_input_tensor(k)
            except:
                continue
            tensor.copy_from_cpu(v)
        self.predictor.zero_copy_run()
        output_names = self.predictor.get_output_names()
        output_results = list()
        for name in output_names:
            output_tensor = self.predictor.get_output_tensor(name)
231 232 233
            output_tensor_lod = output_tensor.lod()
            output_results.append(
                [output_tensor.copy_to_cpu(), output_tensor_lod])
J
jiangjiajun 已提交
234 235
        return output_results

236 237
    def predict(self, image, topk=1):
        """ 图片预测
J
jiangjiajun 已提交
238

239
            Args:
F
FlyingQianMM 已提交
240
                image(str|np.ndarray): 图像路径;或者是解码后的排列格式为(H, W, C)且类型为float32且为BGR格式的数组。
241
                topk(int): 分类预测时使用,表示预测前topk的结果
J
jiangjiajun 已提交
242
        """
243 244
        preprocessed_input = self.preprocess([image])
        model_pred = self.raw_predict(preprocessed_input)
245
        im_shape = None if 'im_shape' not in preprocessed_input else preprocessed_input[
246
            'im_shape']
247 248
        im_info = None if 'im_info' not in preprocessed_input else preprocessed_input[
            'im_info']
249
        results = self.postprocess(
250 251 252 253 254
            model_pred,
            topk=topk,
            batch_size=1,
            im_shape=im_shape,
            im_info=im_info)
J
jiangjiajun 已提交
255

256
        return results[0]
J
jiangjiajun 已提交
257

258
    def batch_predict(self, image_list, topk=1, thread_num=2):
J
jiangjiajun 已提交
259 260 261
        """ 图片预测

            Args:
F
FlyingQianMM 已提交
262 263 264 265
                image_list(list|tuple): 对列表(或元组)中的图像同时进行预测,列表中的元素可以是图像路径
                    也可以是解码后的排列格式为(H,W,C)且类型为float32且为BGR格式的数组。
                thread_num (int): 并发执行各图像预处理时的线程数。

J
jiangjiajun 已提交
266 267
                topk(int): 分类预测时使用,表示预测前topk的结果
        """
268
        preprocessed_input = self.preprocess(image_list)
J
jiangjiajun 已提交
269
        model_pred = self.raw_predict(preprocessed_input)
270
        im_shape = None if 'im_shape' not in preprocessed_input else preprocessed_input[
271
            'im_shape']
272 273
        im_info = None if 'im_info' not in preprocessed_input else preprocessed_input[
            'im_info']
274
        results = self.postprocess(
275 276 277 278 279
            model_pred,
            topk=topk,
            batch_size=len(image_list),
            im_shape=im_shape,
            im_info=im_info)
J
jiangjiajun 已提交
280 281

        return results