program.py 14.8 KB
Newer Older
1
# copyright (c) 2020 PaddlePaddle Authors. All Rights Reserve.
W
WuHaobo 已提交
2
#
3 4 5
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
W
WuHaobo 已提交
6 7 8
#
#    http://www.apache.org/licenses/LICENSE-2.0
#
9 10 11 12 13
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
W
WuHaobo 已提交
14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30

from __future__ import absolute_import
from __future__ import division
from __future__ import print_function

import os
import time

from collections import OrderedDict

import paddle.fluid as fluid

from ppcls.optimizer import LearningRateBuilder
from ppcls.optimizer import OptimizerBuilder
from ppcls.modeling import architectures
from ppcls.modeling.loss import CELoss
from ppcls.modeling.loss import MixCELoss
littletomatodonkey's avatar
littletomatodonkey 已提交
31
from ppcls.modeling.loss import JSDivLoss
W
WuHaobo 已提交
32 33 34 35 36 37 38
from ppcls.modeling.loss import GoogLeNetLoss
from ppcls.utils.misc import AverageMeter
from ppcls.utils import logger

from paddle.fluid.incubate.fleet.collective import fleet
from paddle.fluid.incubate.fleet.collective import DistributedStrategy

S
shippingwang 已提交
39
from ema import ExponentialMovingAverage
R
fix  
root 已提交
40

W
WuHaobo 已提交
41

littletomatodonkey's avatar
littletomatodonkey 已提交
42
def create_feeds(image_shape, use_mix=None):
W
WuHaobo 已提交
43 44 45 46 47
    """
    Create feeds as model input

    Args:
        image_shape(list[int]): model input shape, such as [3, 224, 224]
littletomatodonkey's avatar
littletomatodonkey 已提交
48
        use_mix(bool): whether to use mix(include mixup, cutmix, fmix)
W
WuHaobo 已提交
49 50 51 52 53 54 55

    Returns:
        feeds(dict): dict of model input variables
    """
    feeds = OrderedDict()
    feeds['image'] = fluid.data(
        name="feed_image", shape=[None] + image_shape, dtype="float32")
littletomatodonkey's avatar
littletomatodonkey 已提交
56
    if use_mix:
W
WuHaobo 已提交
57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90
        feeds['feed_y_a'] = fluid.data(
            name="feed_y_a", shape=[None, 1], dtype="int64")
        feeds['feed_y_b'] = fluid.data(
            name="feed_y_b", shape=[None, 1], dtype="int64")
        feeds['feed_lam'] = fluid.data(
            name="feed_lam", shape=[None, 1], dtype="float32")
    else:
        feeds['label'] = fluid.data(
            name="feed_label", shape=[None, 1], dtype="int64")

    return feeds


def create_dataloader(feeds):
    """
    Create a dataloader with model input variables

    Args:
        feeds(dict): dict of model input variables

    Returns:
        dataloader(fluid dataloader):
    """
    trainer_num = int(os.environ.get('PADDLE_TRAINERS_NUM', 1))
    capacity = 64 if trainer_num <= 1 else 8
    dataloader = fluid.io.DataLoader.from_generator(
        feed_list=feeds,
        capacity=capacity,
        use_double_buffer=True,
        iterable=True)

    return dataloader


S
add ema  
shippingwang 已提交
91
def create_model(architecture, image, classes_num, is_train):
W
WuHaobo 已提交
92 93 94 95
    """
    Create a model

    Args:
96 97
        architecture(dict): architecture information,
            name(such as ResNet50) is needed
W
WuHaobo 已提交
98 99 100 101 102 103
        image(variable): model input variable
        classes_num(int): num of classes

    Returns:
        out(variable): model output variable
    """
littletomatodonkey's avatar
littletomatodonkey 已提交
104
    name = architecture["name"]
littletomatodonkey's avatar
littletomatodonkey 已提交
105
    params = architecture.get("params", {})
S
add ema  
shippingwang 已提交
106
    params['is_test'] = not is_train
littletomatodonkey's avatar
littletomatodonkey 已提交
107
    model = architectures.__dict__[name](**params)
W
WuHaobo 已提交
108 109 110 111 112 113 114 115 116
    out = model.net(input=image, class_dim=classes_num)
    return out


def create_loss(out,
                feeds,
                architecture,
                classes_num=1000,
                epsilon=None,
littletomatodonkey's avatar
littletomatodonkey 已提交
117 118
                use_mix=False,
                use_distillation=False):
W
WuHaobo 已提交
119 120 121 122 123 124 125 126 127 128 129
    """
    Create a loss for optimization, such as:
        1. CrossEnotry loss
        2. CrossEnotry loss with label smoothing
        3. CrossEnotry loss with mix(mixup, cutmix, fmix)
        4. CrossEnotry loss with label smoothing and (mixup, cutmix, fmix)
        5. GoogLeNet loss

    Args:
        out(variable): model output variable
        feeds(dict): dict of model input variables
130 131
        architecture(dict): architecture information,
            name(such as ResNet50) is needed
W
WuHaobo 已提交
132 133
        classes_num(int): num of classes
        epsilon(float): parameter for label smoothing, 0.0 <= epsilon <= 1.0
littletomatodonkey's avatar
littletomatodonkey 已提交
134
        use_mix(bool): whether to use mix(include mixup, cutmix, fmix)
W
WuHaobo 已提交
135 136 137 138

    Returns:
        loss(variable): loss variable
    """
littletomatodonkey's avatar
littletomatodonkey 已提交
139
    if architecture["name"] == "GoogLeNet":
W
WuHaobo 已提交
140 141 142 143 144
        assert len(out) == 3, "GoogLeNet should have 3 outputs"
        loss = GoogLeNetLoss(class_dim=classes_num, epsilon=epsilon)
        target = feeds['label']
        return loss(out[0], out[1], out[2], target)

littletomatodonkey's avatar
littletomatodonkey 已提交
145
    if use_distillation:
146 147
        assert len(out) == 2, ("distillation output length must be 2, "
                               "but got {}".format(len(out)))
littletomatodonkey's avatar
littletomatodonkey 已提交
148 149 150 151
        loss = JSDivLoss(class_dim=classes_num, epsilon=epsilon)
        return loss(out[1], out[0])

    if use_mix:
W
WuHaobo 已提交
152 153 154 155 156 157 158 159 160 161 162
        loss = MixCELoss(class_dim=classes_num, epsilon=epsilon)
        feed_y_a = feeds['feed_y_a']
        feed_y_b = feeds['feed_y_b']
        feed_lam = feeds['feed_lam']
        return loss(out, feed_y_a, feed_y_b, feed_lam)
    else:
        loss = CELoss(class_dim=classes_num, epsilon=epsilon)
        target = feeds['label']
        return loss(out, target)


W
WuHaobo 已提交
163 164 165 166 167
def create_metric(out,
                  feeds,
                  architecture,
                  topk=5,
                  classes_num=1000,
littletomatodonkey's avatar
littletomatodonkey 已提交
168
                  use_distillation=False):
W
WuHaobo 已提交
169 170 171 172 173 174 175 176 177 178 179 180
    """
    Create measures of model accuracy, such as top1 and top5

    Args:
        out(variable): model output variable
        feeds(dict): dict of model input variables(included label)
        topk(int): usually top5
        classes_num(int): num of classes

    Returns:
        fetchs(dict): dict of measures
    """
W
WuHaobo 已提交
181 182 183 184 185 186 187 188 189
    if architecture["name"] == "GoogLeNet":
        assert len(out) == 3, "GoogLeNet should have 3 outputs"
        softmax_out = out[0]
    else:
        # just need student label to get metrics
        if use_distillation:
            out = out[1]
        softmax_out = fluid.layers.softmax(out, use_cudnn=False)

W
WuHaobo 已提交
190
    fetchs = OrderedDict()
W
WuHaobo 已提交
191 192
    # set top1 to fetchs
    top1 = fluid.layers.accuracy(softmax_out, label=feeds['label'], k=1)
193
    fetchs['top1'] = (top1, AverageMeter('top1', '.4f', need_avg=True))
W
WuHaobo 已提交
194
    # set topk to fetchs
W
WuHaobo 已提交
195
    k = min(topk, classes_num)
W
WuHaobo 已提交
196
    topk = fluid.layers.accuracy(softmax_out, label=feeds['label'], k=k)
W
WuHaobo 已提交
197
    topk_name = 'top{}'.format(k)
198
    fetchs[topk_name] = (topk, AverageMeter(topk_name, '.4f', need_avg=True))
W
WuHaobo 已提交
199 200 201 202 203 204 205 206 207 208

    return fetchs


def create_fetchs(out,
                  feeds,
                  architecture,
                  topk=5,
                  classes_num=1000,
                  epsilon=None,
littletomatodonkey's avatar
littletomatodonkey 已提交
209 210
                  use_mix=False,
                  use_distillation=False):
W
WuHaobo 已提交
211 212
    """
    Create fetchs as model outputs(included loss and measures),
littletomatodonkey's avatar
littletomatodonkey 已提交
213
    will call create_loss and create_metric(if use_mix).
W
WuHaobo 已提交
214 215 216

    Args:
        out(variable): model output variable
W
WuHaobo 已提交
217 218
        feeds(dict): dict of model input variables.
            If use mix_up, it will not include label.
219 220
        architecture(dict): architecture information,
            name(such as ResNet50) is needed
W
WuHaobo 已提交
221 222 223
        topk(int): usually top5
        classes_num(int): num of classes
        epsilon(float): parameter for label smoothing, 0.0 <= epsilon <= 1.0
littletomatodonkey's avatar
littletomatodonkey 已提交
224
        use_mix(bool): whether to use mix(include mixup, cutmix, fmix)
W
WuHaobo 已提交
225 226 227 228 229

    Returns:
        fetchs(dict): dict of model outputs(included loss and measures)
    """
    fetchs = OrderedDict()
littletomatodonkey's avatar
littletomatodonkey 已提交
230 231
    loss = create_loss(out, feeds, architecture, classes_num, epsilon, use_mix,
                       use_distillation)
232
    fetchs['loss'] = (loss, AverageMeter('loss', '7.4f', need_avg=True))
littletomatodonkey's avatar
littletomatodonkey 已提交
233
    if not use_mix:
W
WuHaobo 已提交
234 235
        metric = create_metric(out, feeds, architecture, topk, classes_num,
                               use_distillation)
W
WuHaobo 已提交
236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302
        fetchs.update(metric)

    return fetchs


def create_optimizer(config):
    """
    Create an optimizer using config, usually including
    learning rate and regularization.

    Args:
        config(dict):  such as
        {
            'LEARNING_RATE':
                {'function': 'Cosine',
                 'params': {'lr': 0.1}
                },
            'OPTIMIZER':
                {'function': 'Momentum',
                 'params':{'momentum': 0.9},
                 'regularizer':
                    {'function': 'L2', 'factor': 0.0001}
                }
        }

    Returns:
        an optimizer instance
    """
    # create learning_rate instance
    lr_config = config['LEARNING_RATE']
    lr_config['params'].update({
        'epochs': config['epochs'],
        'step_each_epoch':
        config['total_images'] // config['TRAIN']['batch_size'],
    })
    lr = LearningRateBuilder(**lr_config)()

    # create optimizer instance
    opt_config = config['OPTIMIZER']
    opt = OptimizerBuilder(**opt_config)
    return opt(lr)


def dist_optimizer(config, optimizer):
    """
    Create a distributed optimizer based on a normal optimizer

    Args:
        config(dict):
        optimizer(): a normal optimizer

    Returns:
        optimizer: a distributed optimizer
    """
    exec_strategy = fluid.ExecutionStrategy()
    exec_strategy.num_threads = 3
    exec_strategy.num_iteration_per_drop_scope = 10

    dist_strategy = DistributedStrategy()
    dist_strategy.nccl_comm_num = 1
    dist_strategy.fuse_all_reduce_ops = True
    dist_strategy.exec_strategy = exec_strategy
    optimizer = fleet.distributed_optimizer(optimizer, strategy=dist_strategy)

    return optimizer


303 304 305 306 307 308 309 310 311 312 313 314 315
def mixed_precision_optimizer(config, optimizer):
    use_fp16 = config.get('use_fp16', False)
    amp_scale_loss = config.get('amp_scale_loss', 1.0)
    use_dynamic_loss_scaling = config.get('use_dynamic_loss_scaling', False)
    if use_fp16:
        optimizer = fluid.contrib.mixed_precision.decorate(
            optimizer,
            init_loss_scaling=amp_scale_loss,
            use_dynamic_loss_scaling=use_dynamic_loss_scaling)

    return optimizer


W
WuHaobo 已提交
316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337
def build(config, main_prog, startup_prog, is_train=True):
    """
    Build a program using a model and an optimizer
        1. create feeds
        2. create a dataloader
        3. create a model
        4. create fetchs
        5. create an optimizer

    Args:
        config(dict): config
        main_prog(): main program
        startup_prog(): startup program
        is_train(bool): train or valid

    Returns:
        dataloader(): a bridge between the model and the data
        fetchs(dict): dict of model outputs(included loss and measures)
    """
    with fluid.program_guard(main_prog, startup_prog):
        with fluid.unique_name.guard():
            use_mix = config.get('use_mix') and is_train
littletomatodonkey's avatar
littletomatodonkey 已提交
338 339
            use_distillation = config.get('use_distillation')
            feeds = create_feeds(config.image_shape, use_mix=use_mix)
W
WuHaobo 已提交
340
            dataloader = create_dataloader(feeds.values())
littletomatodonkey's avatar
littletomatodonkey 已提交
341
            out = create_model(config.ARCHITECTURE, feeds['image'],
S
add ema  
shippingwang 已提交
342
                               config.classes_num, is_train)
W
WuHaobo 已提交
343 344 345
            fetchs = create_fetchs(
                out,
                feeds,
littletomatodonkey's avatar
littletomatodonkey 已提交
346
                config.ARCHITECTURE,
W
WuHaobo 已提交
347 348 349
                config.topk,
                config.classes_num,
                epsilon=config.get('ls_epsilon'),
littletomatodonkey's avatar
littletomatodonkey 已提交
350 351
                use_mix=use_mix,
                use_distillation=use_distillation)
W
WuHaobo 已提交
352 353 354
            if is_train:
                optimizer = create_optimizer(config)
                lr = optimizer._global_learning_rate()
355
                fetchs['lr'] = (lr, AverageMeter('lr', 'f', need_avg=False))
356 357

                optimizer = mixed_precision_optimizer(config, optimizer)
W
WuHaobo 已提交
358 359
                optimizer = dist_optimizer(config, optimizer)
                optimizer.minimize(fetchs['loss'][0])
S
add ema  
shippingwang 已提交
360 361
                if config.get('use_ema'):

S
shippingwang 已提交
362 363 364 365
                    global_steps = fluid.layers.learning_rate_scheduler._decay_step_counter(
                    )
                    ema = ExponentialMovingAverage(
                        config.get('ema_decay'), thres_steps=global_steps)
S
add ema  
shippingwang 已提交
366
                    ema.update()
S
shippingwang 已提交
367
                    return dataloader, fetchs, ema
W
WuHaobo 已提交
368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397

    return dataloader, fetchs


def compile(config, program, loss_name=None):
    """
    Compile the program

    Args:
        config(dict): config
        program(): the program which is wrapped by
        loss_name(str): loss name

    Returns:
        compiled_program(): a compiled program
    """
    build_strategy = fluid.compiler.BuildStrategy()
    exec_strategy = fluid.ExecutionStrategy()

    exec_strategy.num_threads = 1
    exec_strategy.num_iteration_per_drop_scope = 10

    compiled_program = fluid.CompiledProgram(program).with_data_parallel(
        loss_name=loss_name,
        build_strategy=build_strategy,
        exec_strategy=exec_strategy)

    return compiled_program


S
shippingwang 已提交
398 399 400
total_step = 0


S
shippingwang 已提交
401 402 403 404 405 406 407
def run(dataloader,
        exe,
        program,
        fetchs,
        epoch=0,
        mode='train',
        vdl_writer=None):
W
WuHaobo 已提交
408 409 410 411 412 413 414 415 416 417 418 419 420
    """
    Feed data to the model and fetch the measures and loss

    Args:
        dataloader(fluid dataloader):
        exe():
        program():
        fetchs(dict): dict of measures and the loss
        epoch(int): epoch of training or validation
        model(str): log only

    Returns:
    """
S
shippingwang 已提交
421
    print(fetchs)
W
WuHaobo 已提交
422 423
    fetch_list = [f[0] for f in fetchs.values()]
    metric_list = [f[1] for f in fetchs.values()]
W
WuHaobo 已提交
424 425
    for m in metric_list:
        m.reset()
S
shippingwang 已提交
426
    batch_time = AverageMeter('elapse', '.3f')
W
WuHaobo 已提交
427 428 429 430 431 432 433
    tic = time.time()
    for idx, batch in enumerate(dataloader()):
        metrics = exe.run(program=program, feed=batch, fetch_list=fetch_list)
        batch_time.update(time.time() - tic)
        tic = time.time()
        for i, m in enumerate(metrics):
            metric_list[i].update(m[0], len(batch[0]))
littletomatodonkey's avatar
littletomatodonkey 已提交
434
        fetchs_str = ''.join([str(m.value) + ' '
435
                              for m in metric_list] + [batch_time.value]) + 's'
S
fixed  
shippingwang 已提交
436
        if vdl_writer:
S
shippingwang 已提交
437
            global total_step
S
fixed  
shippingwang 已提交
438
            logger.scaler('loss', metrics[0][0], total_step, vdl_writer)
S
shippingwang 已提交
439
            total_step += 1
W
WuHaobo 已提交
440
        if mode == 'eval':
W
WuHaobo 已提交
441 442
            logger.info("{:s} step:{:<4d} {:s}s".format(mode, idx, fetchs_str))
        else:
S
shippingwang 已提交
443 444 445 446
            epoch_str = "epoch:{:<3d}".format(epoch)
            step_str = "{:s} step:{:<4d}".format(mode, idx)

            logger.info("{:s} {:s} {:s}".format(
447 448 449 450
                logger.coloring(epoch_str, "HEADER")
                if idx == 0 else epoch_str,
                logger.coloring(step_str, "PURPLE"),
                logger.coloring(fetchs_str, 'OKGREEN')))
S
refine  
shippingwang 已提交
451

littletomatodonkey's avatar
littletomatodonkey 已提交
452
    end_str = ''.join([str(m.mean) + ' '
453
                       for m in metric_list] + [batch_time.total]) + 's'
W
WuHaobo 已提交
454
    if mode == 'eval':
S
refine  
shippingwang 已提交
455
        logger.info("END {:s} {:s}s".format(mode, end_str))
W
WuHaobo 已提交
456
    else:
S
shippingwang 已提交
457 458
        end_epoch_str = "END epoch:{:<3d}".format(epoch)

459 460 461 462
        logger.info("{:s} {:s} {:s}".format(
            logger.coloring(end_epoch_str, "RED"),
            logger.coloring(mode, "PURPLE"),
            logger.coloring(end_str, "OKGREEN")))
littletomatodonkey's avatar
littletomatodonkey 已提交
463

W
WuHaobo 已提交
464
    # return top1_acc in order to save the best model
W
WuHaobo 已提交
465
    if mode == 'valid':
W
WuHaobo 已提交
466
        return fetchs["top1"][1].avg