utility.py 25.2 KB
Newer Older
R
ruri 已提交
1
# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserve.
2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17
#
#Licensed under the Apache License, Version 2.0 (the "License");
#you may not use this file except in compliance with the License.
#You may obtain a copy of the License at
#
#    http://www.apache.org/licenses/LICENSE-2.0
#
#Unless required by applicable law or agreed to in writing, software
#distributed under the License is distributed on an "AS IS" BASIS,
#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#See the License for the specific language governing permissions and
#limitations under the License.

from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
R
ruri 已提交
18

19 20
import distutils.util
import numpy as np
R
root 已提交
21
import six
R
ruri 已提交
22 23 24 25 26 27 28 29 30 31
import argparse
import functools
import logging
import sys
import os
import warnings
import signal

import paddle
import paddle.fluid as fluid
32 33 34
from paddle.fluid.wrapped_decorator import signature_safe_contextmanager
from paddle.fluid.framework import Program, program_guard, name_scope, default_main_program
from paddle.fluid import unique_name, layers
35
from utils import dist_utils
36

37

38 39 40 41 42 43 44 45 46 47 48 49 50 51 52
def print_arguments(args):
    """Print argparse's arguments.

    Usage:

    .. code-block:: python

        parser = argparse.ArgumentParser()
        parser.add_argument("name", default="Jonh", type=str, help="User name.")
        args = parser.parse_args()
        print_arguments(args)

    :param args: Input argparse.Namespace for printing.
    :type args: argparse.Namespace
    """
53
    print("-------------  Configuration Arguments -------------")
R
root 已提交
54
    for arg, value in sorted(six.iteritems(vars(args))):
55 56
        print("%25s : %s" % (arg, value))
    print("----------------------------------------------------")
57 58 59


def add_arguments(argname, type, default, help, argparser, **kwargs):
R
ruri 已提交
60
    """Add argparse's argument. 
61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76

    Usage:

    .. code-block:: python

        parser = argparse.ArgumentParser()
        add_argument("name", str, "Jonh", "User name.", parser)
        args = parser.parse_args()
    """
    type = distutils.util.strtobool if type == bool else type
    argparser.add_argument(
        "--" + argname,
        default=default,
        type=type,
        help=help + ' Default: %(default)s.',
        **kwargs)
R
ruri 已提交
77

R
ruri 已提交
78 79 80 81 82 83

def parse_args():
    """Add arguments

    Returns: 
        all training args
R
ruri 已提交
84
    """
R
ruri 已提交
85 86 87 88 89 90 91 92 93 94 95
    parser = argparse.ArgumentParser(description=__doc__)
    add_arg = functools.partial(add_arguments, argparser=parser)
    # yapf: disable

    # ENV
    add_arg('use_gpu',                  bool,   True,                   "Whether to use GPU.")
    add_arg('model_save_dir',           str,    "./output",        "The directory path to save model.")
    add_arg('data_dir',                 str,    "./data/ILSVRC2012/",   "The ImageNet dataset root directory.")
    add_arg('pretrained_model',         str,    None,                   "Whether to load pretrained model.")
    add_arg('checkpoint',               str,    None,                   "Whether to resume checkpoint.")
    add_arg('print_step',               int,    10,                     "The steps interval to print logs")
96
    add_arg('save_step',                int,    1,                      "The steps interval to save checkpoints")
R
ruri 已提交
97 98 99 100

    # SOLVER AND HYPERPARAMETERS
    add_arg('model',                    str,    "ResNet50",   "The name of network.")
    add_arg('total_images',             int,    1281167,                "The number of total training images.")
R
ruri 已提交
101
    parser.add_argument('--image_shape', nargs='+', type=int, default=[3, 224, 224], help="The shape of image")
R
ruri 已提交
102 103 104 105 106 107 108 109
    add_arg('num_epochs',               int,    120,                    "The number of total epochs.")
    add_arg('class_dim',                int,    1000,                   "The number of total classes.")
    add_arg('batch_size',               int,    8,                      "Minibatch size on a device.")
    add_arg('test_batch_size',          int,    16,                     "Test batch size on a deveice.")
    add_arg('lr',                       float,  0.1,                    "The learning rate.")
    add_arg('lr_strategy',              str,    "piecewise_decay",      "The learning rate decay strategy.")
    add_arg('l2_decay',                 float,  1e-4,                   "The l2_decay parameter.")
    add_arg('momentum_rate',            float,  0.9,                    "The value of momentum_rate.")
110 111 112 113
    add_arg('warm_up_epochs',           float,  5.0,                    "The value of warm up epochs")
    add_arg('decay_epochs',             float,  2.4,                    "Decay epochs of exponential decay learning rate scheduler")
    add_arg('decay_rate',               float,  0.97,                   "Decay rate of exponential decay learning rate scheduler")
    add_arg('drop_connect_rate',        float,  0.2,                    "The value of drop connect rate")
R
ruri 已提交
114
    parser.add_argument('--step_epochs', nargs='+', type=int, default=[30, 60, 90], help="piecewise decay step")
115

R
ruri 已提交
116
    # READER AND PREPROCESS
117
    add_arg('use_dali',                 bool,   False,                  "Whether to use nvidia DALI for preprocessing")
R
ruri 已提交
118 119 120 121 122 123 124 125 126
    add_arg('lower_scale',              float,  0.08,                   "The value of lower_scale in ramdom_crop")
    add_arg('lower_ratio',              float,  3./4.,                  "The value of lower_ratio in ramdom_crop")
    add_arg('upper_ratio',              float,  4./3.,                  "The value of upper_ratio in ramdom_crop")
    add_arg('resize_short_size',        int,    256,                    "The value of resize_short_size")
    add_arg('use_mixup',                bool,   False,                  "Whether to use mixup")
    add_arg('mixup_alpha',              float,  0.2,                    "The value of mixup_alpha")
    add_arg('reader_thread',            int,    8,                      "The number of multi thread reader")
    add_arg('reader_buf_size',          int,    2048,                   "The buf size of multi thread reader")
    add_arg('interpolation',            int,    None,                   "The interpolation mode")
127
    add_arg('use_aa',                   bool,   False,                  "Whether to use auto augment")
R
ruri 已提交
128 129 130 131 132 133 134 135
    parser.add_argument('--image_mean', nargs='+', type=float, default=[0.485, 0.456, 0.406], help="The mean of input image data")
    parser.add_argument('--image_std', nargs='+', type=float, default=[0.229, 0.224, 0.225], help="The std of input image data")

    # SWITCH
    #NOTE: (2019/08/08) FP16 is moving to PaddlePaddle/Fleet now
    #add_arg('use_fp16',                 bool,   False,                  "Whether to enable half precision training with fp16." )
    #add_arg('scale_loss',               float,  1.0,                    "The value of scale_loss for fp16." )
    add_arg('use_label_smoothing',      bool,   False,                  "Whether to use label_smoothing")
136
    add_arg('label_smoothing_epsilon',  float,  0.1,                    "The value of label_smoothing_epsilon parameter")
R
ruri 已提交
137 138
    #NOTE: (2019/08/08) temporary disable use_distill
    #add_arg('use_distill',              bool,   False,                  "Whether to use distill")
R
ruri 已提交
139
    add_arg("enable_ce",                bool,   False,                  "Whether to enable ce")
R
ruri 已提交
140
    add_arg('random_seed',              int,    None,                   "random seed")
R
ruri 已提交
141

142 143 144
    add_arg('use_ema',                  bool,   False,                  "Whether to use ExponentialMovingAverage.")
    add_arg('ema_decay',                float,  0.9999,                 "The value of ema decay rate")
    add_arg('padding_type',             str,    "SAME",                 "Padding type of convolution")
145
    add_arg('use_se',                   bool,   True,                   "Whether to use Squeeze-and-Excitation module for EfficientNet.")
146 147 148 149 150
    #NOTE: args for profiler
    add_arg('is_profiler',              int,    0,                      "the profiler switch.(used for benchmark)")
    add_arg('profiler_path',            str,    './',                   "the profiler output file path.(used for benchmark)")
    add_arg('max_iter',                 int,    0,                    "the max train batch num.(used for benchmark)")
    add_arg('validate',                 int,    1,                      "whether validate.(used for benchmark)")
R
ruri 已提交
151
    add_arg('same_feed',                int,    0,                      "whether to feed same images")
R
ruri 已提交
152 153 154


    # yapf: enable
R
ruri 已提交
155 156 157 158 159 160 161
    args = parser.parse_args()

    return args


def check_gpu():
    """   
R
ruri 已提交
162
    Log error and exit when set use_gpu=true in paddlepaddle
R
ruri 已提交
163
    cpu ver sion.
R
ruri 已提交
164
    """
R
ruri 已提交
165
    logger = logging.getLogger(__name__)
R
ruri 已提交
166
    err = "Config use_gpu cannot be set as true while you are " \
R
ruri 已提交
167 168 169 170
                "using paddlepaddle cpu version ! \nPlease try: \n" \
                "\t1. Install paddlepaddle-gpu to run model on GPU \n" \
                "\t2. Set use_gpu as false in config file to run " \
                "model on CPU"
R
ruri 已提交
171

172
    try:
R
ruri 已提交
173 174
        if args.use_gpu and not fluid.is_compiled_with_cuda():
            print(err)
R
ruri 已提交
175 176 177
            sys.exit(1)
    except Exception as e:
        pass
R
ruri 已提交
178 179


180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195
def check_version():
    """
    Log error and exit when the installed version of paddlepaddle is
    not satisfied.
    """
    err = "PaddlePaddle version 1.6 or higher is required, " \
          "or a suitable develop version is satisfied as well. \n" \
          "Please make sure the version is good with your code." \

    try:
        fluid.require_version('1.6.0')
    except Exception as e:
        print(err)
        sys.exit(1)


R
ruri 已提交
196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211
def check_args(args):
    """check arguments before running

    Args:
        all arguments
    """

    # check models name
    sys.path.append("..")
    import models
    model_list = [m for m in dir(models) if "__" not in m]
    assert args.model in model_list, "{} is not in lists: {}, please check the model name".format(
        args.model, model_list)

    # check learning rate strategy
    lr_strategy_list = [
212 213
        "piecewise_decay", "cosine_decay", "linear_decay",
        "cosine_decay_warmup", "exponential_decay_warmup"
R
ruri 已提交
214 215 216 217 218 219 220 221 222 223 224 225 226 227 228
    ]
    if args.lr_strategy not in lr_strategy_list:
        warnings.warn(
            "\n{} is not in lists: {}, \nUse default learning strategy now.".
            format(args.lr_strategy, lr_strategy_list))
        args.lr_strategy = "default_decay"
    # check confict of GoogLeNet and mixup
    if args.model == "GoogLeNet":
        assert args.use_mixup == False, "Cannot use mixup processing in GoogLeNet, please set use_mixup = False."

    if args.interpolation:
        assert args.interpolation in [
            0, 1, 2, 3, 4
        ], "Wrong interpolation, please set:\n0: cv2.INTER_NEAREST\n1: cv2.INTER_LINEAR\n2: cv2.INTER_CUBIC\n3: cv2.INTER_AREA\n4: cv2.INTER_LANCZOS4"

229 230 231 232 233
    if args.padding_type:
        assert args.padding_type in [
            "SAME", "VALID", "DYNAMIC"
        ], "Wrong padding_type, please set:\nSAME\nVALID\nDYNAMIC"

R
ruri 已提交
234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268
    assert args.checkpoint is None or args.pretrained_model is None, "Do not init model by checkpoint and pretrained_model both."

    # check pretrained_model path for loading
    if args.pretrained_model is not None:
        assert isinstance(args.pretrained_model, str)
        assert os.path.isdir(
            args.
            pretrained_model), "please support available pretrained_model path."

    #FIXME: check checkpoint path for saving
    if args.checkpoint is not None:
        assert isinstance(args.checkpoint, str)
        assert os.path.isdir(
            args.checkpoint
        ), "please support available checkpoint path for initing model."

    # check params for loading
    """
    if args.save_params:
        assert isinstance(args.save_params, str)
        assert os.path.isdir(
            args.save_params), "please support available save_params path."
    """

    # check gpu: when using gpu, the number of visible cards should divide batch size
    if args.use_gpu:
        assert args.batch_size % fluid.core.get_cuda_device_count(
        ) == 0, "please support correct batch_size({}), which can be divided by available cards({}), you can change the number of cards by indicating: export CUDA_VISIBLE_DEVICES= ".format(
            args.batch_size, fluid.core.get_cuda_device_count())

    # check data directory
    assert os.path.isdir(
        args.data_dir
    ), "Data doesn't exist in {}, please load right path".format(args.data_dir)

R
ruri 已提交
269 270 271 272
    if args.enable_ce:
        args.random_seed = 0
        print("CE is running now!")

R
ruri 已提交
273 274 275
    #check gpu

    check_gpu()
276
    check_version()
R
ruri 已提交
277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303


def init_model(exe, args, program):
    if args.checkpoint:
        fluid.io.load_persistables(exe, args.checkpoint, main_program=program)
        print("Finish initing model from %s" % (args.checkpoint))

    if args.pretrained_model:

        def if_exist(var):
            return os.path.exists(os.path.join(args.pretrained_model, var.name))

        fluid.io.load_vars(
            exe,
            args.pretrained_model,
            main_program=program,
            predicate=if_exist)


def save_model(args, exe, train_prog, info):
    model_path = os.path.join(args.model_save_dir, args.model, str(info))
    if not os.path.isdir(model_path):
        os.makedirs(model_path)
    fluid.io.save_persistables(exe, model_path, main_program=train_prog)
    print("Already save model in %s" % (model_path))


304 305
def create_data_loader(is_train, args):
    """create data_loader
R
ruri 已提交
306 307

    Usage:
308
        Using mixup process in training, it will return 5 results, include data_loader, image, y_a(label), y_b(label) and lamda, or it will return 3 results, include data_loader, image, and label.
R
ruri 已提交
309 310 311 312 313 314

    Args: 
        is_train: mode
        args: arguments

    Returns:
315
        data_loader and the input data of net, 
R
ruri 已提交
316
    """
R
ruri 已提交
317
    image_shape = args.image_shape
318 319 320 321 322
    feed_image = fluid.data(
        name="feed_image",
        shape=[None] + image_shape,
        dtype="float32",
        lod_level=0)
R
ruri 已提交
323

324 325 326 327
    feed_label = fluid.data(
        name="feed_label", shape=[None, 1], dtype="int64", lod_level=0)
    feed_y_a = fluid.data(
        name="feed_y_a", shape=[None, 1], dtype="int64", lod_level=0)
R
ruri 已提交
328 329

    if is_train and args.use_mixup:
330 331 332 333
        feed_y_b = fluid.data(
            name="feed_y_b", shape=[None, 1], dtype="int64", lod_level=0)
        feed_lam = fluid.data(
            name="feed_lam", shape=[None, 1], dtype="float32", lod_level=0)
R
ruri 已提交
334

335
        data_loader = fluid.io.DataLoader.from_generator(
R
ruri 已提交
336 337 338
            feed_list=[feed_image, feed_y_a, feed_y_b, feed_lam],
            capacity=64,
            use_double_buffer=True,
339
            iterable=True)
340
        return data_loader, [feed_image, feed_y_a, feed_y_b, feed_lam]
R
ruri 已提交
341
    else:
342 343 344
        if args.use_dali:
            return None, [feed_image, feed_label]

345
        data_loader = fluid.io.DataLoader.from_generator(
R
ruri 已提交
346 347 348
            feed_list=[feed_image, feed_label],
            capacity=64,
            use_double_buffer=True,
349
            iterable=True)
R
ruri 已提交
350

351
        return data_loader, [feed_image, feed_label]
R
ruri 已提交
352 353


R
ruri 已提交
354 355 356 357 358 359 360
def print_info(info_mode,
               metrics,
               time_info,
               pass_id=0,
               batch_id=0,
               print_step=1,
               device_num=1):
R
ruri 已提交
361 362 363 364 365 366 367 368 369 370
    """print function

    Args:
        pass_id: epoch index
        batch_id: batch index
        print_step: the print_step arguments
        metrics: message to print
        time_info: time infomation
        info_mode: mode
    """
R
ruri 已提交
371
    #XXX: Use specific name to choose pattern, not the length of metrics. 
R
ruri 已提交
372 373 374 375 376 377 378 379 380
    if info_mode == "batch":
        if batch_id % print_step == 0:
            #if isinstance(metrics,np.ndarray):
            # train and mixup output
            if len(metrics) == 2:
                loss, lr = metrics
                print(
                    "[Pass {0}, train batch {1}] \tloss {2}, lr {3}, elapse {4}".
                    format(pass_id, batch_id, "%.5f" % loss, "%.5f" % lr,
381
                           "%2.4f sec" % time_info))
R
ruri 已提交
382 383 384 385 386 387
            # train and no mixup output
            elif len(metrics) == 4:
                loss, acc1, acc5, lr = metrics
                print(
                    "[Pass {0}, train batch {1}] \tloss {2}, acc1 {3}, acc5 {4}, lr {5}, elapse {6}".
                    format(pass_id, batch_id, "%.5f" % loss, "%.5f" % acc1,
388
                           "%.5f" % acc5, "%.5f" % lr, "%2.4f sec" % time_info))
R
ruri 已提交
389 390 391 392 393 394
            # test output
            elif len(metrics) == 3:
                loss, acc1, acc5 = metrics
                print(
                    "[Pass {0}, test  batch {1}] \tloss {2}, acc1 {3}, acc5 {4}, elapse {5}".
                    format(pass_id, batch_id, "%.5f" % loss, "%.5f" % acc1,
395
                           "%.5f" % acc5, "%2.4f sec" % time_info))
R
ruri 已提交
396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418
            else:
                raise Exception(
                    "length of metrics {} is not implemented, It maybe caused by wrong format of build_program_output".
                    format(len(metrics)))
            sys.stdout.flush()

    elif info_mode == "epoch":
        ## TODO add time elapse
        if len(metrics) == 5:
            train_loss, _, test_loss, test_acc1, test_acc5 = metrics
            print(
                "[End pass {0}]\ttrain_loss {1}, test_loss {2}, test_acc1 {3}, test_acc5 {4}".
                format(pass_id, "%.5f" % train_loss, "%.5f" % test_loss, "%.5f"
                       % test_acc1, "%.5f" % test_acc5))
        elif len(metrics) == 7:
            train_loss, train_acc1, train_acc5, _, test_loss, test_acc1, test_acc5 = metrics
            print(
                "[End pass {0}]\ttrain_loss {1}, train_acc1 {2}, train_acc5 {3},test_loss {4}, test_acc1 {5}, test_acc5 {6}".
                format(pass_id, "%.5f" % train_loss, "%.5f" % train_acc1, "%.5f"
                       % train_acc5, "%.5f" % test_loss, "%.5f" % test_acc1,
                       "%.5f" % test_acc5))
        sys.stdout.flush()
    elif info_mode == "ce":
R
ruri 已提交
419 420 421 422 423 424 425 426
        assert len(
            metrics
        ) == 7, "Enable CE: The Metrics should contain train_loss, train_acc1, train_acc5, test_loss, test_acc1, test_acc5, and train_speed"
        assert len(
            time_info
        ) > 10, "0~9th batch statistics will drop when doing benchmark or ce, because it might be mixed with startup time, so please make sure training at least 10 batches."
        print_ce(device_num, metrics, time_info)
        #raise Warning("CE code is not ready")
R
ruri 已提交
427 428 429 430
    else:
        raise Exception("Illegal info_mode")


R
ruri 已提交
431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446
def print_ce(device_num, metrics, time_info):
    """ Print log for CE(for internal test).
    """
    train_loss, train_acc1, train_acc5, _, test_loss, test_acc1, test_acc5 = metrics

    train_speed = np.mean(np.array(time_info[10:]))

    print("kpis\ttrain_cost_card{}\t{}".format(device_num, train_loss))
    print("kpis\ttrain_acc1_card{}\t{}".format(device_num, train_acc1))
    print("kpis\ttrain_acc5_card{}\t{}".format(device_num, train_acc5))
    print("kpis\ttest_loss_card{}\t{}".format(device_num, test_loss))
    print("kpis\ttest_acc1_card{}\t{}".format(device_num, test_acc1))
    print("kpis\ttest_acc5_card{}\t{}".format(device_num, test_acc5))
    print("kpis\ttrain_speed_card{}\t{}".format(device_num, train_speed))


447
def best_strategy_compiled(args, program, loss, exe):
R
ruri 已提交
448 449 450 451 452 453 454 455 456
    """make a program which wrapped by a compiled program
    """

    if os.getenv('FLAGS_use_ngraph'):
        return program
    else:
        build_strategy = fluid.compiler.BuildStrategy()

        exec_strategy = fluid.ExecutionStrategy()
R
ruri 已提交
457 458 459 460

        if args.use_gpu:
            exec_strategy.num_threads = fluid.core.get_cuda_device_count()

R
ruri 已提交
461 462
        exec_strategy.num_iteration_per_drop_scope = 10

463 464 465 466 467 468 469
        num_trainers = int(os.environ.get('PADDLE_TRAINERS_NUM', 1))
        if num_trainers > 1 and args.use_gpu:
            dist_utils.prepare_for_multi_process(exe, build_strategy, program)
            # NOTE: the process is fast when num_threads is 1
            # for multi-process training.
            exec_strategy.num_threads = 1

R
ruri 已提交
470 471 472 473 474 475
        compiled_program = fluid.CompiledProgram(program).with_data_parallel(
            loss_name=loss.name,
            build_strategy=build_strategy,
            exec_strategy=exec_strategy)

        return compiled_program
476 477 478


class ExponentialMovingAverage(object):
479 480 481 482 483
    def __init__(self,
                 decay=0.999,
                 thres_steps=None,
                 zero_debias=False,
                 name=None):
484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502
        self._decay = decay
        self._thres_steps = thres_steps
        self._name = name if name is not None else ''
        self._decay_var = self._get_ema_decay()

        self._params_tmps = []
        for param in default_main_program().global_block().all_parameters():
            if param.do_model_average != False:
                tmp = param.block.create_var(
                    name=unique_name.generate(".".join(
                        [self._name + param.name, 'ema_tmp'])),
                    dtype=param.dtype,
                    persistable=False,
                    stop_gradient=True)
                self._params_tmps.append((param, tmp))

        self._ema_vars = {}
        for param, tmp in self._params_tmps:
            with param.block.program._optimized_guard(
503
                [param, tmp]), name_scope('moving_average'):
504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572
                self._ema_vars[param.name] = self._create_ema_vars(param)

        self.apply_program = Program()
        block = self.apply_program.global_block()
        with program_guard(main_program=self.apply_program):
            decay_pow = self._get_decay_pow(block)
            for param, tmp in self._params_tmps:
                param = block._clone_variable(param)
                tmp = block._clone_variable(tmp)
                ema = block._clone_variable(self._ema_vars[param.name])
                layers.assign(input=param, output=tmp)
                # bias correction
                if zero_debias:
                    ema = ema / (1.0 - decay_pow)
                layers.assign(input=ema, output=param)

        self.restore_program = Program()
        block = self.restore_program.global_block()
        with program_guard(main_program=self.restore_program):
            for param, tmp in self._params_tmps:
                tmp = block._clone_variable(tmp)
                param = block._clone_variable(param)
                layers.assign(input=tmp, output=param)

    def _get_ema_decay(self):
        with default_main_program()._lr_schedule_guard():
            decay_var = layers.tensor.create_global_var(
                shape=[1],
                value=self._decay,
                dtype='float32',
                persistable=True,
                name="scheduled_ema_decay_rate")

            if self._thres_steps is not None:
                decay_t = (self._thres_steps + 1.0) / (self._thres_steps + 10.0)
                with layers.control_flow.Switch() as switch:
                    with switch.case(decay_t < self._decay):
                        layers.tensor.assign(decay_t, decay_var)
                    with switch.default():
                        layers.tensor.assign(
                            np.array(
                                [self._decay], dtype=np.float32),
                            decay_var)
        return decay_var

    def _get_decay_pow(self, block):
        global_steps = layers.learning_rate_scheduler._decay_step_counter()
        decay_var = block._clone_variable(self._decay_var)
        decay_pow_acc = layers.elementwise_pow(decay_var, global_steps + 1)
        return decay_pow_acc

    def _create_ema_vars(self, param):
        param_ema = layers.create_global_var(
            name=unique_name.generate(self._name + param.name + '_ema'),
            shape=param.shape,
            value=0.0,
            dtype=param.dtype,
            persistable=True)

        return param_ema

    def update(self):
        """
        Update Exponential Moving Average. Should only call this method in
        train program.
        """
        param_master_emas = []
        for param, tmp in self._params_tmps:
            with param.block.program._optimized_guard(
573
                [param, tmp]), name_scope('moving_average'):
574 575 576 577 578 579
                param_ema = self._ema_vars[param.name]
                if param.name + '.master' in self._ema_vars:
                    master_ema = self._ema_vars[param.name + '.master']
                    param_master_emas.append([param_ema, master_ema])
                else:
                    ema_t = param_ema * self._decay_var + param * (
580
                        1 - self._decay_var)
581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616
                    layers.assign(input=ema_t, output=param_ema)

        # for fp16 params
        for param_ema, master_ema in param_master_emas:
            default_main_program().global_block().append_op(
                type="cast",
                inputs={"X": master_ema},
                outputs={"Out": param_ema},
                attrs={
                    "in_dtype": master_ema.dtype,
                    "out_dtype": param_ema.dtype
                })

    @signature_safe_contextmanager
    def apply(self, executor, need_restore=True):
        """
        Apply moving average to parameters for evaluation.

        Args:
            executor (Executor): The Executor to execute applying.
            need_restore (bool): Whether to restore parameters after applying.
        """
        executor.run(self.apply_program)
        try:
            yield
        finally:
            if need_restore:
                self.restore(executor)

    def restore(self, executor):
        """Restore parameters.

        Args:
            executor (Executor): The Executor to execute restoring.
        """
        executor.run(self.restore_program)