model_builder.py 12.5 KB
Newer Older
W
wuzewu 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26
# coding: utf8
# copyright (c) 2019 PaddlePaddle Authors. All Rights Reserve.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#    http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

import sys
import struct
import importlib

import paddle.fluid as fluid
import numpy as np
from paddle.fluid.proto.framework_pb2 import VarType

import solver
from utils.config import cfg
from loss import multi_softmax_with_loss
W
wuyefeilin 已提交
27 28
from loss import multi_dice_loss
from loss import multi_bce_loss
W
wuzewu 已提交
29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77


class ModelPhase(object):
    """
    Standard name for model phase in PaddleSeg

    The following standard keys are defined:
    * `TRAIN`: training mode.
    * `EVAL`: testing/evaluation mode.
    * `PREDICT`: prediction/inference mode.
    * `VISUAL` : visualization mode
    """

    TRAIN = 'train'
    EVAL = 'eval'
    PREDICT = 'predict'
    VISUAL = 'visual'

    @staticmethod
    def is_train(phase):
        return phase == ModelPhase.TRAIN

    @staticmethod
    def is_predict(phase):
        return phase == ModelPhase.PREDICT

    @staticmethod
    def is_eval(phase):
        return phase == ModelPhase.EVAL

    @staticmethod
    def is_visual(phase):
        return phase == ModelPhase.VISUAL

    @staticmethod
    def is_valid_phase(phase):
        """ Check valid phase """
        if ModelPhase.is_train(phase) or ModelPhase.is_predict(phase) \
                or ModelPhase.is_eval(phase) or ModelPhase.is_visual(phase):
            return True

        return False


def map_model_name(model_name):
    name_dict = {
        "unet": "unet.unet",
        "deeplabv3p": "deeplab.deeplabv3p",
        "icnet": "icnet.icnet",
P
pengmian 已提交
78
        "pspnet": "pspnet.pspnet",
W
wuyefeilin 已提交
79
        "hrnet": "hrnet.hrnet"
W
wuzewu 已提交
80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114
    }
    if model_name in name_dict.keys():
        return name_dict[model_name]
    else:
        raise Exception(
            "unknow model name, only support unet, deeplabv3p, icnet")


def get_func(func_name):
    """Helper to return a function object by name. func_name must identify a
    function in this module or the path to a function relative to the base
    'modeling' module.
    """
    if func_name == '':
        return None
    try:
        parts = func_name.split('.')
        # Refers to a function in this module
        if len(parts) == 1:
            return globals()[parts[0]]
        # Otherwise, assume we're referencing a module under modeling
        module_name = 'models.' + '.'.join(parts[:-1])
        module = importlib.import_module(module_name)
        return getattr(module, parts[-1])
    except Exception:
        print('Failed to find function: {}'.format(func_name))
    return module


def softmax(logit):
    logit = fluid.layers.transpose(logit, [0, 2, 3, 1])
    logit = fluid.layers.softmax(logit)
    logit = fluid.layers.transpose(logit, [0, 3, 1, 2])
    return logit

W
wuyefeilin 已提交
115

W
wuyefeilin 已提交
116 117 118 119 120 121 122 123 124 125 126
def sigmoid_to_softmax(logit):
    """
    one channel to two channel
    """
    logit = fluid.layers.transpose(logit, [0, 2, 3, 1])
    logit = fluid.layers.sigmoid(logit)
    logit_back = 1 - logit
    logit = fluid.layers.concat([logit_back, logit], axis=-1)
    logit = fluid.layers.transpose(logit, [0, 3, 1, 2])
    return logit

127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176
def export_preprocess(image):
    """导出模型的预处理流程"""

    image = fluid.layers.transpose(image, [0, 3, 1, 2])
    origin_shape = fluid.layers.shape(image)[-2:]

    # 不同AUG_METHOD方法的resize
    if cfg.AUG.AUG_METHOD == 'unpadding':
        h_fix = cfg.AUG.FIX_RESIZE_SIZE[1]
        w_fix = cfg.AUG.FIX_RESIZE_SIZE[0]
        image = fluid.layers.resize_bilinear(
            image,
            out_shape=[h_fix, w_fix],
            align_corners=False,
            align_mode=0)
    elif cfg.AUG.AUG_METHOD == 'rangescaling':
        size = cfg.AUG.INF_RESIZE_VALUE
        value = fluid.layers.reduce_max(origin_shape)
        scale = float(size) / value.astype('float32')
        image = fluid.layers.resize_bilinear(
            image, scale=scale, align_corners=False, align_mode=0)

    # 存储resize后图像shape
    valid_shape = fluid.layers.shape(image)[-2:]

    # padding到eval_crop_size大小
    width = cfg.EVAL_CROP_SIZE[0]
    height = cfg.EVAL_CROP_SIZE[1]
    pad_target = fluid.layers.assign(
        np.array([height, width]).astype('float32'))
    up = fluid.layers.assign(np.array([0]).astype('float32'))
    down = pad_target[0] - valid_shape[0]
    left = up
    right = pad_target[1] - valid_shape[1]
    paddings = fluid.layers.concat([up, down, left, right])
    paddings = fluid.layers.cast(paddings, 'int32')
    image = fluid.layers.pad2d(
        image, paddings=paddings, pad_value=127.5)

    # normalize
    mean = np.array(cfg.MEAN).reshape(1, len(cfg.MEAN), 1, 1)
    mean = fluid.layers.assign(mean.astype('float32'))
    std = np.array(cfg.STD).reshape(1, len(cfg.STD), 1, 1)
    std = fluid.layers.assign(std.astype('float32'))
    image = (image / 255 - mean) / std
    # 使后面的网络能通过类似image.shape获取特征图的shape
    image = fluid.layers.reshape(
        image, shape=[-1, cfg.DATASET.DATA_DIM, height, width])
    return image, valid_shape, origin_shape

W
wuzewu 已提交
177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193

def build_model(main_prog, start_prog, phase=ModelPhase.TRAIN):
    if not ModelPhase.is_valid_phase(phase):
        raise ValueError("ModelPhase {} is not valid!".format(phase))
    if ModelPhase.is_train(phase):
        width = cfg.TRAIN_CROP_SIZE[0]
        height = cfg.TRAIN_CROP_SIZE[1]
    else:
        width = cfg.EVAL_CROP_SIZE[0]
        height = cfg.EVAL_CROP_SIZE[1]

    image_shape = [cfg.DATASET.DATA_DIM, height, width]
    grt_shape = [1, height, width]
    class_num = cfg.DATASET.NUM_CLASSES

    with fluid.program_guard(main_prog, start_prog):
        with fluid.unique_name.guard():
194 195 196
            # 在导出模型的时候,增加图像标准化预处理,减小预测部署时图像的处理流程
            # 预测部署时只须对输入图像增加batch_size维度即可
            if ModelPhase.is_predict(phase):
W
wuyefeilin 已提交
197 198
                origin_image = fluid.layers.data(
                    name='image',
199
                    shape=[-1, -1, -1, cfg.DATASET.DATA_DIM],
W
wuyefeilin 已提交
200 201
                    dtype='float32',
                    append_batch_size=False)
202 203
                image, valid_shape, origin_shape = export_preprocess(origin_image)

204 205 206
            else:
                image = fluid.layers.data(
                    name='image', shape=image_shape, dtype='float32')
W
wuzewu 已提交
207 208 209 210 211 212 213 214 215 216 217 218
            label = fluid.layers.data(
                name='label', shape=grt_shape, dtype='int32')
            mask = fluid.layers.data(
                name='mask', shape=grt_shape, dtype='int32')

            # use PyReader when doing traning and evaluation
            if ModelPhase.is_train(phase) or ModelPhase.is_eval(phase):
                py_reader = fluid.io.PyReader(
                    feed_list=[image, label, mask],
                    capacity=cfg.DATALOADER.BUF_SIZE,
                    iterable=False,
                    use_double_buffer=True)
219

W
wuzewu 已提交
220 221
            model_name = map_model_name(cfg.MODEL.MODEL_NAME)
            model_func = get_func("modeling." + model_name)
222

W
wuyefeilin 已提交
223
            loss_type = cfg.SOLVER.LOSS
224 225
            if not isinstance(loss_type, list):
                loss_type = list(loss_type)
226

227
            # dice_loss或bce_loss只适用两类分割中
W
wuyefeilin 已提交
228 229 230 231 232 233
            if class_num > 2 and (("dice_loss" in loss_type) or
                                  ("bce_loss" in loss_type)):
                raise Exception(
                    "dice loss and bce loss is only applicable to binary classfication"
                )

234
            # 在两类分割情况下,当loss函数选择dice_loss或bce_loss的时候,最后logit输出通道数设置为1
W
wuyefeilin 已提交
235 236 237
            if ("dice_loss" in loss_type) or ("bce_loss" in loss_type):
                class_num = 1
                if "softmax_loss" in loss_type:
W
wuyefeilin 已提交
238 239 240
                    raise Exception(
                        "softmax loss can not combine with dice loss or bce loss"
                    )
W
wuzewu 已提交
241 242
            logits = model_func(image, class_num)

243
            # 根据选择的loss函数计算相应的损失函数
W
wuzewu 已提交
244
            if ModelPhase.is_train(phase) or ModelPhase.is_eval(phase):
W
wuyefeilin 已提交
245 246
                loss_valid = False
                avg_loss_list = []
247
                valid_loss = []
W
wuyefeilin 已提交
248 249 250
                if "softmax_loss" in loss_type:
                    avg_loss_list.append(
                        multi_softmax_with_loss(logits, label, mask, class_num))
W
wuyefeilin 已提交
251
                    loss_valid = True
252
                    valid_loss.append("softmax_loss")
W
wuyefeilin 已提交
253 254 255
                if "dice_loss" in loss_type:
                    avg_loss_list.append(multi_dice_loss(logits, label, mask))
                    loss_valid = True
256
                    valid_loss.append("dice_loss")
W
wuyefeilin 已提交
257 258 259
                if "bce_loss" in loss_type:
                    avg_loss_list.append(multi_bce_loss(logits, label, mask))
                    loss_valid = True
260
                    valid_loss.append("bce_loss")
W
wuyefeilin 已提交
261
                if not loss_valid:
W
wuyefeilin 已提交
262 263 264 265 266 267
                    raise Exception(
                        "SOLVER.LOSS: {} is set wrong. it should "
                        "include one of (softmax_loss, bce_loss, dice_loss) at least"
                        " example: ['softmax_loss'], ['dice_loss'], ['bce_loss', 'dice_loss']"
                        .format(cfg.SOLVER.LOSS))

268 269
                invalid_loss = [x for x in loss_type if x not in valid_loss]
                if len(invalid_loss) > 0:
W
wuyefeilin 已提交
270 271 272
                    print(
                        "Warning: the loss {} you set is invalid. it will not be included in loss computed."
                        .format(invalid_loss))
273

W
wuyefeilin 已提交
274 275 276
                avg_loss = 0
                for i in range(0, len(avg_loss_list)):
                    avg_loss += avg_loss_list[i]
W
wuzewu 已提交
277 278 279 280 281 282 283 284 285 286 287 288

            #get pred result in original size
            if isinstance(logits, tuple):
                logit = logits[0]
            else:
                logit = logits

            if logit.shape[2:] != label.shape[2:]:
                logit = fluid.layers.resize_bilinear(logit, label.shape[2:])

            # return image input and logit output for inference graph prune
            if ModelPhase.is_predict(phase):
289
                # 两类分割中,使用dice_loss或bce_loss返回的logit为单通道,进行到两通道的变换
W
wuyefeilin 已提交
290 291 292 293
                if class_num == 1:
                    logit = sigmoid_to_softmax(logit)
                else:
                    logit = softmax(logit)
294 295 296 297 298

                # 获取有效部分
                logit = fluid.layers.slice(
                    logit, axes=[2, 3], starts=[0, 0], ends=valid_shape)

W
wuyefeilin 已提交
299 300 301 302 303
                logit = fluid.layers.resize_bilinear(
                    logit,
                    out_shape=origin_shape,
                    align_corners=False,
                    align_mode=0)
304
                logit = fluid.layers.argmax(logit, axis=1)
305
                return origin_image, logit
306

W
wuyefeilin 已提交
307 308 309 310 311
            if class_num == 1:
                out = sigmoid_to_softmax(logit)
                out = fluid.layers.transpose(out, [0, 2, 3, 1])
            else:
                out = fluid.layers.transpose(logit, [0, 2, 3, 1])
312

W
wuzewu 已提交
313 314 315
            pred = fluid.layers.argmax(out, axis=3)
            pred = fluid.layers.unsqueeze(pred, axes=[3])
            if ModelPhase.is_visual(phase):
W
wuyefeilin 已提交
316 317 318 319
                if class_num == 1:
                    logit = sigmoid_to_softmax(logit)
                else:
                    logit = softmax(logit)
W
wuzewu 已提交
320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346
                return pred, logit

            if ModelPhase.is_eval(phase):
                return py_reader, avg_loss, pred, label, mask

            if ModelPhase.is_train(phase):
                optimizer = solver.Solver(main_prog, start_prog)
                decayed_lr = optimizer.optimise(avg_loss)
                return py_reader, avg_loss, decayed_lr, pred, label, mask


def to_int(string, dest="I"):
    return struct.unpack(dest, string)[0]


def parse_shape_from_file(filename):
    with open(filename, "rb") as file:
        version = file.read(4)
        lod_level = to_int(file.read(8), dest="Q")
        for i in range(lod_level):
            _size = to_int(file.read(8), dest="Q")
            _ = file.read(_size)
        version = file.read(4)
        tensor_desc_size = to_int(file.read(4))
        tensor_desc = VarType.TensorDesc()
        tensor_desc.ParseFromString(file.read(tensor_desc_size))
    return tuple(tensor_desc.dims)