deploy.py 11.6 KB
Newer Older
M
mamingjie-China 已提交
1
# copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
J
jiangjiajun 已提交
2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import os.path as osp
import cv2
import numpy as np
import yaml
J
jiangjiajun 已提交
19
import multiprocessing as mp
J
jiangjiajun 已提交
20 21
import paddlex
import paddle.fluid as fluid
22
from paddlex.cv.transforms import build_transforms
F
FlyingQianMM 已提交
23 24 25
from paddlex.cv.models import BaseClassifier
from paddlex.cv.models import PPYOLO, FasterRCNN, MaskRCNN
from paddlex.cv.models import DeepLabv3p
J
jiangjiajun 已提交
26 27 28 29 30 31 32 33


class Predictor:
    def __init__(self,
                 model_dir,
                 use_gpu=True,
                 gpu_id=0,
                 use_mkl=False,
S
syyxsxx 已提交
34
                 mkl_thread_num=4,
J
jiangjiajun 已提交
35 36 37 38 39 40 41 42 43 44
                 use_trt=False,
                 use_glog=False,
                 memory_optimize=True):
        """ 创建Paddle Predictor

            Args:
                model_dir: 模型路径(必须是导出的部署或量化模型)
                use_gpu: 是否使用gpu,默认True
                gpu_id: 使用gpu的id,默认0
                use_mkl: 是否使用mkldnn计算库,CPU情况下使用,默认False
45
                mkl_thread_num: mkldnn计算线程数,默认为4
J
jiangjiajun 已提交
46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77
                use_trt: 是否使用TensorRT,默认False
                use_glog: 是否启用glog日志, 默认False
                memory_optimize: 是否启动内存优化,默认True
        """
        if not osp.isdir(model_dir):
            raise Exception("[ERROR] Path {} not exist.".format(model_dir))
        if not osp.exists(osp.join(model_dir, "model.yml")):
            raise Exception("There's not model.yml in {}".format(model_dir))
        with open(osp.join(model_dir, "model.yml")) as f:
            self.info = yaml.load(f.read(), Loader=yaml.Loader)

        self.status = self.info['status']

        if self.status != "Quant" and self.status != "Infer":
            raise Exception("[ERROR] Only quantized model or exported "
                            "inference model is supported.")

        self.model_dir = model_dir
        self.model_type = self.info['_Attributes']['model_type']
        self.model_name = self.info['Model']
        self.num_classes = self.info['_Attributes']['num_classes']
        self.labels = self.info['_Attributes']['labels']
        if self.info['Model'] == 'MaskRCNN':
            if self.info['_init_params']['with_fpn']:
                self.mask_head_resolution = 28
            else:
                self.mask_head_resolution = 14
        transforms_mode = self.info.get('TransformsMode', 'RGB')
        if transforms_mode == 'RGB':
            to_rgb = True
        else:
            to_rgb = False
78 79
        self.transforms = build_transforms(self.model_type,
                                           self.info['Transforms'], to_rgb)
80 81 82
        self.predictor = self.create_predictor(use_gpu, gpu_id, use_mkl,
                                               mkl_thread_num, use_trt,
                                               use_glog, memory_optimize)
J
jiangjiajun 已提交
83 84 85 86 87 88 89 90 91
        # 线程池,在模型在预测时用于对输入数据以图片为单位进行并行处理
        # 主要用于batch_predict接口
        thread_num = mp.cpu_count() if mp.cpu_count() < 8 else 8
        self.thread_pool = mp.pool.ThreadPool(thread_num)

    def reset_thread_pool(self, thread_num):
        self.thread_pool.close()
        self.thread_pool.join()
        self.thread_pool = mp.pool.ThreadPool(thread_num)
J
jiangjiajun 已提交
92 93 94 95 96

    def create_predictor(self,
                         use_gpu=True,
                         gpu_id=0,
                         use_mkl=False,
97
                         mkl_thread_num=mp.cpu_count(),
J
jiangjiajun 已提交
98 99 100 101 102 103 104 105 106 107 108 109 110
                         use_trt=False,
                         use_glog=False,
                         memory_optimize=True):
        config = fluid.core.AnalysisConfig(
            os.path.join(self.model_dir, '__model__'),
            os.path.join(self.model_dir, '__params__'))

        if use_gpu:
            # 设置GPU初始显存(单位M)和Device ID
            config.enable_use_gpu(100, gpu_id)
        else:
            config.disable_gpu()
        if use_mkl:
S
syyxsxx 已提交
111 112 113
            if self.model_name not in ["HRNet", "DeepLabv3p"]:
                config.enable_mkldnn()
                config.set_cpu_math_library_num_threads(mkl_thread_num)
J
jiangjiajun 已提交
114 115 116 117 118 119 120 121 122 123 124 125 126 127
        if use_glog:
            config.enable_glog_info()
        else:
            config.disable_glog_info()
        if memory_optimize:
            config.enable_memory_optim()

        # 开启计算图分析优化,包括OP融合等
        config.switch_ir_optim(True)
        # 关闭feed和fetch OP使用,使用ZeroCopy接口必须设置此项
        config.switch_use_feed_fetch_ops(False)
        predictor = fluid.core.create_paddle_predictor(config)
        return predictor

J
jiangjiajun 已提交
128
    def preprocess(self, image, thread_pool=None):
J
jiangjiajun 已提交
129 130 131
        """ 对图像做预处理

            Args:
F
FlyingQianMM 已提交
132
                image(list|tuple): 数组中的元素可以是图像路径,也可以是解码后的排列格式为(H,W,C)
133
                    且类型为float32且为BGR格式的数组。
J
jiangjiajun 已提交
134 135 136
        """
        res = dict()
        if self.model_type == "classifier":
137 138 139 140 141
            im = BaseClassifier._preprocess(
                image,
                self.transforms,
                self.model_type,
                self.model_name,
J
jiangjiajun 已提交
142
                thread_pool=thread_pool)
J
jiangjiajun 已提交
143 144
            res['image'] = im
        elif self.model_type == "detector":
F
FlyingQianMM 已提交
145 146
            if self.model_name in ["PPYOLO", "YOLOv3"]:
                im, im_size = PPYOLO._preprocess(
147 148 149 150
                    image,
                    self.transforms,
                    self.model_type,
                    self.model_name,
J
jiangjiajun 已提交
151
                    thread_pool=thread_pool)
J
jiangjiajun 已提交
152
                res['image'] = im
153
                res['im_size'] = im_size
J
jiangjiajun 已提交
154
            if self.model_name.count('RCNN') > 0:
155 156 157 158 159
                im, im_resize_info, im_shape = FasterRCNN._preprocess(
                    image,
                    self.transforms,
                    self.model_type,
                    self.model_name,
J
jiangjiajun 已提交
160
                    thread_pool=thread_pool)
J
jiangjiajun 已提交
161 162 163 164
                res['image'] = im
                res['im_info'] = im_resize_info
                res['im_shape'] = im_shape
        elif self.model_type == "segmenter":
F
FlyingQianMM 已提交
165
            im, im_info = DeepLabv3p._preprocess(
166 167 168 169
                image,
                self.transforms,
                self.model_type,
                self.model_name,
J
jiangjiajun 已提交
170
                thread_pool=thread_pool)
J
jiangjiajun 已提交
171 172 173 174
            res['image'] = im
            res['im_info'] = im_info
        return res

175 176 177 178 179 180
    def postprocess(self,
                    results,
                    topk=1,
                    batch_size=1,
                    im_shape=None,
                    im_info=None):
F
FlyingQianMM 已提交
181 182 183 184 185 186 187 188 189 190
        """ 对预测结果做后处理

            Args:
                results (list): 预测结果
                topk (int): 分类预测时前k个最大值
                batch_size (int): 预测时图像批量大小
                im_shape (list): MaskRCNN的图像输入大小
                im_info (list):RCNN系列和分割网络的原图大小
        """

191 192 193 194 195 196 197
        def offset_to_lengths(lod):
            offset = lod[0]
            lengths = [
                offset[i + 1] - offset[i] for i in range(len(offset) - 1)
            ]
            return [lengths]

198 199
        if self.model_type == "classifier":
            true_topk = min(self.num_classes, topk)
200
            preds = BaseClassifier._postprocess([results[0][0]], true_topk,
201 202
                                                self.labels)
        elif self.model_type == "detector":
203 204 205
            res = {'bbox': (results[0][0], offset_to_lengths(results[0][1])), }
            res['im_id'] = (np.array(
                [[i] for i in range(batch_size)]).astype('int32'), [[]])
F
FlyingQianMM 已提交
206 207
            if self.model_name in ["PPYOLO", "YOLOv3"]:
                preds = PPYOLO._postprocess(res, batch_size, self.num_classes,
208
                                            self.labels)
209
            elif self.model_name == "FasterRCNN":
210
                preds = FasterRCNN._postprocess(res, batch_size,
211 212
                                                self.num_classes, self.labels)
            elif self.model_name == "MaskRCNN":
213 214
                res['mask'] = (results[1][0], offset_to_lengths(results[1][1]))
                res['im_shape'] = (im_shape, [])
215
                preds = MaskRCNN._postprocess(
216
                    res, batch_size, self.num_classes,
217
                    self.mask_head_resolution, self.labels)
218 219 220
        elif self.model_type == "segmenter":
            res = [results[0][0], results[1][0]]
            preds = DeepLabv3p._postprocess(res, im_info)
221 222
        return preds

J
jiangjiajun 已提交
223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239
    def raw_predict(self, inputs):
        """ 接受预处理过后的数据进行预测

            Args:
                inputs(tuple): 预处理过后的数据
        """
        for k, v in inputs.items():
            try:
                tensor = self.predictor.get_input_tensor(k)
            except:
                continue
            tensor.copy_from_cpu(v)
        self.predictor.zero_copy_run()
        output_names = self.predictor.get_output_names()
        output_results = list()
        for name in output_names:
            output_tensor = self.predictor.get_output_tensor(name)
240 241 242
            output_tensor_lod = output_tensor.lod()
            output_results.append(
                [output_tensor.copy_to_cpu(), output_tensor_lod])
J
jiangjiajun 已提交
243 244
        return output_results

245 246
    def predict(self, image, topk=1):
        """ 图片预测
J
jiangjiajun 已提交
247

248
            Args:
F
FlyingQianMM 已提交
249
                image(str|np.ndarray): 图像路径;或者是解码后的排列格式为(H, W, C)且类型为float32且为BGR格式的数组。
250
                topk(int): 分类预测时使用,表示预测前topk的结果
J
jiangjiajun 已提交
251
        """
252 253
        preprocessed_input = self.preprocess([image])
        model_pred = self.raw_predict(preprocessed_input)
254
        im_shape = None if 'im_shape' not in preprocessed_input else preprocessed_input[
255
            'im_shape']
256 257
        im_info = None if 'im_info' not in preprocessed_input else preprocessed_input[
            'im_info']
258
        results = self.postprocess(
259 260 261 262 263
            model_pred,
            topk=topk,
            batch_size=1,
            im_shape=im_shape,
            im_info=im_info)
J
jiangjiajun 已提交
264

265
        return results[0]
J
jiangjiajun 已提交
266

J
jiangjiajun 已提交
267
    def batch_predict(self, image_list, topk=1):
J
jiangjiajun 已提交
268 269 270
        """ 图片预测

            Args:
F
FlyingQianMM 已提交
271 272 273
                image_list(list|tuple): 对列表(或元组)中的图像同时进行预测,列表中的元素可以是图像路径
                    也可以是解码后的排列格式为(H,W,C)且类型为float32且为BGR格式的数组。

J
jiangjiajun 已提交
274 275
                topk(int): 分类预测时使用,表示预测前topk的结果
        """
J
jiangjiajun 已提交
276
        preprocessed_input = self.preprocess(image_list, self.thread_pool)
J
jiangjiajun 已提交
277
        model_pred = self.raw_predict(preprocessed_input)
278
        im_shape = None if 'im_shape' not in preprocessed_input else preprocessed_input[
279
            'im_shape']
280 281
        im_info = None if 'im_info' not in preprocessed_input else preprocessed_input[
            'im_info']
282
        results = self.postprocess(
283 284 285 286 287
            model_pred,
            topk=topk,
            batch_size=len(image_list),
            im_shape=im_shape,
            im_info=im_info)
J
jiangjiajun 已提交
288 289

        return results