deploy.py 12.1 KB
Newer Older
M
mamingjie-China 已提交
1
# copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
J
jiangjiajun 已提交
2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import os.path as osp
import cv2
import numpy as np
import yaml
J
jiangjiajun 已提交
19
import multiprocessing as mp
J
jiangjiajun 已提交
20 21
import paddlex
import paddle.fluid as fluid
22
from paddlex.cv.transforms import build_transforms
F
FlyingQianMM 已提交
23 24 25
from paddlex.cv.models import BaseClassifier
from paddlex.cv.models import PPYOLO, FasterRCNN, MaskRCNN
from paddlex.cv.models import DeepLabv3p
F
FlyingQianMM 已提交
26
import paddlex.utils.logging as logging
J
jiangjiajun 已提交
27 28 29 30 31 32 33


class Predictor:
    def __init__(self,
                 model_dir,
                 use_gpu=True,
                 gpu_id=0,
F
FlyingQianMM 已提交
34
                 use_mkl=True,
S
syyxsxx 已提交
35
                 mkl_thread_num=4,
J
jiangjiajun 已提交
36 37 38 39 40 41 42 43 44 45
                 use_trt=False,
                 use_glog=False,
                 memory_optimize=True):
        """ 创建Paddle Predictor

            Args:
                model_dir: 模型路径(必须是导出的部署或量化模型)
                use_gpu: 是否使用gpu,默认True
                gpu_id: 使用gpu的id,默认0
                use_mkl: 是否使用mkldnn计算库,CPU情况下使用,默认False
46
                mkl_thread_num: mkldnn计算线程数,默认为4
J
jiangjiajun 已提交
47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78
                use_trt: 是否使用TensorRT,默认False
                use_glog: 是否启用glog日志, 默认False
                memory_optimize: 是否启动内存优化,默认True
        """
        if not osp.isdir(model_dir):
            raise Exception("[ERROR] Path {} not exist.".format(model_dir))
        if not osp.exists(osp.join(model_dir, "model.yml")):
            raise Exception("There's not model.yml in {}".format(model_dir))
        with open(osp.join(model_dir, "model.yml")) as f:
            self.info = yaml.load(f.read(), Loader=yaml.Loader)

        self.status = self.info['status']

        if self.status != "Quant" and self.status != "Infer":
            raise Exception("[ERROR] Only quantized model or exported "
                            "inference model is supported.")

        self.model_dir = model_dir
        self.model_type = self.info['_Attributes']['model_type']
        self.model_name = self.info['Model']
        self.num_classes = self.info['_Attributes']['num_classes']
        self.labels = self.info['_Attributes']['labels']
        if self.info['Model'] == 'MaskRCNN':
            if self.info['_init_params']['with_fpn']:
                self.mask_head_resolution = 28
            else:
                self.mask_head_resolution = 14
        transforms_mode = self.info.get('TransformsMode', 'RGB')
        if transforms_mode == 'RGB':
            to_rgb = True
        else:
            to_rgb = False
79 80
        self.transforms = build_transforms(self.model_type,
                                           self.info['Transforms'], to_rgb)
81 82 83
        self.predictor = self.create_predictor(use_gpu, gpu_id, use_mkl,
                                               mkl_thread_num, use_trt,
                                               use_glog, memory_optimize)
J
jiangjiajun 已提交
84 85 86 87 88 89 90 91 92
        # 线程池,在模型在预测时用于对输入数据以图片为单位进行并行处理
        # 主要用于batch_predict接口
        thread_num = mp.cpu_count() if mp.cpu_count() < 8 else 8
        self.thread_pool = mp.pool.ThreadPool(thread_num)

    def reset_thread_pool(self, thread_num):
        self.thread_pool.close()
        self.thread_pool.join()
        self.thread_pool = mp.pool.ThreadPool(thread_num)
J
jiangjiajun 已提交
93 94 95 96 97

    def create_predictor(self,
                         use_gpu=True,
                         gpu_id=0,
                         use_mkl=False,
F
FlyingQianMM 已提交
98
                         mkl_thread_num=4,
J
jiangjiajun 已提交
99 100 101 102 103 104 105 106 107 108 109 110
                         use_trt=False,
                         use_glog=False,
                         memory_optimize=True):
        config = fluid.core.AnalysisConfig(
            os.path.join(self.model_dir, '__model__'),
            os.path.join(self.model_dir, '__params__'))

        if use_gpu:
            # 设置GPU初始显存(单位M)和Device ID
            config.enable_use_gpu(100, gpu_id)
        else:
            config.disable_gpu()
J
jiangjiajun 已提交
111
        if use_mkl and not use_gpu:
F
FlyingQianMM 已提交
112
            if self.model_name not in ["HRNet", "DeepLabv3p", "PPYOLO"]:
S
syyxsxx 已提交
113 114
                config.enable_mkldnn()
                config.set_cpu_math_library_num_threads(mkl_thread_num)
F
FlyingQianMM 已提交
115 116 117 118
            else:
                logging.warning(
                    "HRNet/DeepLabv3p/PPYOLO are not supported for the use of mkldnn\n"
                )
J
jiangjiajun 已提交
119 120 121 122 123 124 125 126 127 128 129 130 131 132
        if use_glog:
            config.enable_glog_info()
        else:
            config.disable_glog_info()
        if memory_optimize:
            config.enable_memory_optim()

        # 开启计算图分析优化,包括OP融合等
        config.switch_ir_optim(True)
        # 关闭feed和fetch OP使用,使用ZeroCopy接口必须设置此项
        config.switch_use_feed_fetch_ops(False)
        predictor = fluid.core.create_paddle_predictor(config)
        return predictor

J
jiangjiajun 已提交
133
    def preprocess(self, image, thread_pool=None):
J
jiangjiajun 已提交
134 135 136
        """ 对图像做预处理

            Args:
F
FlyingQianMM 已提交
137
                image(list|tuple): 数组中的元素可以是图像路径,也可以是解码后的排列格式为(H,W,C)
138
                    且类型为float32且为BGR格式的数组。
J
jiangjiajun 已提交
139 140 141
        """
        res = dict()
        if self.model_type == "classifier":
142 143 144 145 146
            im = BaseClassifier._preprocess(
                image,
                self.transforms,
                self.model_type,
                self.model_name,
J
jiangjiajun 已提交
147
                thread_pool=thread_pool)
J
jiangjiajun 已提交
148 149
            res['image'] = im
        elif self.model_type == "detector":
F
FlyingQianMM 已提交
150 151
            if self.model_name in ["PPYOLO", "YOLOv3"]:
                im, im_size = PPYOLO._preprocess(
152 153 154 155
                    image,
                    self.transforms,
                    self.model_type,
                    self.model_name,
J
jiangjiajun 已提交
156
                    thread_pool=thread_pool)
J
jiangjiajun 已提交
157
                res['image'] = im
158
                res['im_size'] = im_size
J
jiangjiajun 已提交
159
            if self.model_name.count('RCNN') > 0:
160 161 162 163 164
                im, im_resize_info, im_shape = FasterRCNN._preprocess(
                    image,
                    self.transforms,
                    self.model_type,
                    self.model_name,
J
jiangjiajun 已提交
165
                    thread_pool=thread_pool)
J
jiangjiajun 已提交
166 167 168 169
                res['image'] = im
                res['im_info'] = im_resize_info
                res['im_shape'] = im_shape
        elif self.model_type == "segmenter":
F
FlyingQianMM 已提交
170
            im, im_info = DeepLabv3p._preprocess(
171 172 173 174
                image,
                self.transforms,
                self.model_type,
                self.model_name,
J
jiangjiajun 已提交
175
                thread_pool=thread_pool)
J
jiangjiajun 已提交
176 177 178 179
            res['image'] = im
            res['im_info'] = im_info
        return res

180 181 182 183 184 185
    def postprocess(self,
                    results,
                    topk=1,
                    batch_size=1,
                    im_shape=None,
                    im_info=None):
F
FlyingQianMM 已提交
186 187 188 189 190 191 192 193 194 195
        """ 对预测结果做后处理

            Args:
                results (list): 预测结果
                topk (int): 分类预测时前k个最大值
                batch_size (int): 预测时图像批量大小
                im_shape (list): MaskRCNN的图像输入大小
                im_info (list):RCNN系列和分割网络的原图大小
        """

196 197 198 199 200 201 202
        def offset_to_lengths(lod):
            offset = lod[0]
            lengths = [
                offset[i + 1] - offset[i] for i in range(len(offset) - 1)
            ]
            return [lengths]

203 204
        if self.model_type == "classifier":
            true_topk = min(self.num_classes, topk)
205
            preds = BaseClassifier._postprocess([results[0][0]], true_topk,
206 207
                                                self.labels)
        elif self.model_type == "detector":
208 209 210
            res = {'bbox': (results[0][0], offset_to_lengths(results[0][1])), }
            res['im_id'] = (np.array(
                [[i] for i in range(batch_size)]).astype('int32'), [[]])
F
FlyingQianMM 已提交
211 212
            if self.model_name in ["PPYOLO", "YOLOv3"]:
                preds = PPYOLO._postprocess(res, batch_size, self.num_classes,
213
                                            self.labels)
214
            elif self.model_name == "FasterRCNN":
215
                preds = FasterRCNN._postprocess(res, batch_size,
216 217
                                                self.num_classes, self.labels)
            elif self.model_name == "MaskRCNN":
218 219
                res['mask'] = (results[1][0], offset_to_lengths(results[1][1]))
                res['im_shape'] = (im_shape, [])
220
                preds = MaskRCNN._postprocess(
221
                    res, batch_size, self.num_classes,
222
                    self.mask_head_resolution, self.labels)
223 224 225
        elif self.model_type == "segmenter":
            res = [results[0][0], results[1][0]]
            preds = DeepLabv3p._postprocess(res, im_info)
226 227
        return preds

J
jiangjiajun 已提交
228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244
    def raw_predict(self, inputs):
        """ 接受预处理过后的数据进行预测

            Args:
                inputs(tuple): 预处理过后的数据
        """
        for k, v in inputs.items():
            try:
                tensor = self.predictor.get_input_tensor(k)
            except:
                continue
            tensor.copy_from_cpu(v)
        self.predictor.zero_copy_run()
        output_names = self.predictor.get_output_names()
        output_results = list()
        for name in output_names:
            output_tensor = self.predictor.get_output_tensor(name)
245 246 247
            output_tensor_lod = output_tensor.lod()
            output_results.append(
                [output_tensor.copy_to_cpu(), output_tensor_lod])
J
jiangjiajun 已提交
248 249
        return output_results

250
    def predict(self, image, topk=1, transforms=None):
251
        """ 图片预测
J
jiangjiajun 已提交
252

253
            Args:
F
FlyingQianMM 已提交
254
                image(str|np.ndarray): 图像路径;或者是解码后的排列格式为(H, W, C)且类型为float32且为BGR格式的数组。
255 256
                topk(int): 分类预测时使用,表示预测前topk的结果。
                transforms (paddlex.cls.transforms): 数据预处理操作。
J
jiangjiajun 已提交
257
        """
258 259
        if transforms is not None:
            self.transforms = transforms
260 261
        preprocessed_input = self.preprocess([image])
        model_pred = self.raw_predict(preprocessed_input)
262
        im_shape = None if 'im_shape' not in preprocessed_input else preprocessed_input[
263
            'im_shape']
264 265
        im_info = None if 'im_info' not in preprocessed_input else preprocessed_input[
            'im_info']
266
        results = self.postprocess(
267 268 269 270 271
            model_pred,
            topk=topk,
            batch_size=1,
            im_shape=im_shape,
            im_info=im_info)
J
jiangjiajun 已提交
272

273
        return results[0]
J
jiangjiajun 已提交
274

275
    def batch_predict(self, image_list, topk=1, transforms=None):
J
jiangjiajun 已提交
276 277 278
        """ 图片预测

            Args:
F
FlyingQianMM 已提交
279 280 281
                image_list(list|tuple): 对列表(或元组)中的图像同时进行预测,列表中的元素可以是图像路径
                    也可以是解码后的排列格式为(H,W,C)且类型为float32且为BGR格式的数组。

282 283
                topk(int): 分类预测时使用,表示预测前topk的结果。
                transforms (paddlex.cls.transforms): 数据预处理操作。
J
jiangjiajun 已提交
284
        """
285 286
        if transforms is not None:
            self.transforms = transforms
J
jiangjiajun 已提交
287
        preprocessed_input = self.preprocess(image_list, self.thread_pool)
J
jiangjiajun 已提交
288
        model_pred = self.raw_predict(preprocessed_input)
289
        im_shape = None if 'im_shape' not in preprocessed_input else preprocessed_input[
290
            'im_shape']
291 292
        im_info = None if 'im_info' not in preprocessed_input else preprocessed_input[
            'im_info']
293
        results = self.postprocess(
294 295 296 297 298
            model_pred,
            topk=topk,
            batch_size=len(image_list),
            im_shape=im_shape,
            im_info=im_info)
J
jiangjiajun 已提交
299 300

        return results