optimizer.py 8.7 KB
Newer Older
Q
qingqing01 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19
# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

from __future__ import absolute_import
from __future__ import division
from __future__ import print_function

import math
W
wangxinxin08 已提交
20
import copy
Q
qingqing01 已提交
21 22 23 24
import paddle
import paddle.nn as nn

import paddle.optimizer as optimizer
25
from paddle.optimizer.lr import CosineAnnealingDecay
Q
qingqing01 已提交
26 27 28 29 30 31 32 33 34 35 36
import paddle.regularizer as regularizer
from paddle import cos

from ppdet.core.workspace import register, serializable

__all__ = ['LearningRate', 'OptimizerBuilder']

from ppdet.utils.logger import setup_logger
logger = setup_logger(__name__)


37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72
@serializable
class CosineDecay(object):
    """
    Cosine learning rate decay

    Args:
        max_epochs (int): max epochs for the training process.
            if you commbine cosine decay with warmup, it is recommended that
            the max_iters is much larger than the warmup iter
    """

    def __init__(self, max_epochs=1000, use_warmup=True):
        self.max_epochs = max_epochs
        self.use_warmup = use_warmup

    def __call__(self,
                 base_lr=None,
                 boundary=None,
                 value=None,
                 step_per_epoch=None):
        assert base_lr is not None, "either base LR or values should be provided"

        max_iters = self.max_epochs * int(step_per_epoch)

        if boundary is not None and value is not None and self.use_warmup:
            for i in range(int(boundary[-1]), max_iters):
                boundary.append(i)

                decayed_lr = base_lr * 0.5 * (
                    math.cos(i * math.pi / max_iters) + 1)
                value.append(decayed_lr)
            return optimizer.lr.PiecewiseDecay(boundary, value)

        return optimizer.lr.CosineAnnealingDecay(base_lr, T_max=max_iters)


Q
qingqing01 已提交
73 74 75 76 77 78 79 80 81 82
@serializable
class PiecewiseDecay(object):
    """
    Multi step learning rate decay

    Args:
        gamma (float | list): decay factor
        milestones (list): steps at which to decay learning rate
    """

83 84 85 86 87
    def __init__(self,
                 gamma=[0.1, 0.01],
                 milestones=[8, 11],
                 values=None,
                 use_warmup=True):
Q
qingqing01 已提交
88 89 90 91 92 93 94 95
        super(PiecewiseDecay, self).__init__()
        if type(gamma) is not list:
            self.gamma = []
            for i in range(len(milestones)):
                self.gamma.append(gamma / 10**i)
        else:
            self.gamma = gamma
        self.milestones = milestones
96 97
        self.values = values
        self.use_warmup = use_warmup
Q
qingqing01 已提交
98 99 100 101 102 103

    def __call__(self,
                 base_lr=None,
                 boundary=None,
                 value=None,
                 step_per_epoch=None):
104
        if boundary is not None and self.use_warmup:
Q
qingqing01 已提交
105
            boundary.extend([int(step_per_epoch) * i for i in self.milestones])
106 107 108
        else:
            # do not use LinearWarmup
            boundary = [int(step_per_epoch) * i for i in self.milestones]
G
George Ni 已提交
109
            value = [base_lr]  # during step[0, boundary[0]] is base_lr
Q
qingqing01 已提交
110

111 112 113 114 115 116
        # self.values is setted directly in config 
        if self.values is not None:
            assert len(self.milestones) + 1 == len(self.values)
            return optimizer.lr.PiecewiseDecay(boundary, self.values)

        # value is computed by self.gamma
117 118 119
        value = value if value is not None else [base_lr]
        for i in self.gamma:
            value.append(base_lr * i)
Q
qingqing01 已提交
120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138

        return optimizer.lr.PiecewiseDecay(boundary, value)


@serializable
class LinearWarmup(object):
    """
    Warm up learning rate linearly

    Args:
        steps (int): warm up steps
        start_factor (float): initial learning rate factor
    """

    def __init__(self, steps=500, start_factor=1. / 3):
        super(LinearWarmup, self).__init__()
        self.steps = steps
        self.start_factor = start_factor

G
George Ni 已提交
139
    def __call__(self, base_lr, step_per_epoch):
Q
qingqing01 已提交
140 141 142
        boundary = []
        value = []
        for i in range(self.steps + 1):
143 144 145 146 147
            if self.steps > 0:
                alpha = i / self.steps
                factor = self.start_factor * (1 - alpha) + alpha
                lr = base_lr * factor
                value.append(lr)
Q
qingqing01 已提交
148 149 150 151 152
            if i > 0:
                boundary.append(i)
        return boundary, value


G
George Ni 已提交
153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177
@serializable
class BurninWarmup(object):
    """
    Warm up learning rate in burnin mode
    Args:
        steps (int): warm up steps
    """

    def __init__(self, steps=1000):
        super(BurninWarmup, self).__init__()
        self.steps = steps

    def __call__(self, base_lr, step_per_epoch):
        boundary = []
        value = []
        burnin = min(self.steps, step_per_epoch)
        for i in range(burnin + 1):
            factor = (i * 1.0 / burnin)**4
            lr = base_lr * factor
            value.append(lr)
            if i > 0:
                boundary.append(i)
        return boundary, value


Q
qingqing01 已提交
178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196
@register
class LearningRate(object):
    """
    Learning Rate configuration

    Args:
        base_lr (float): base learning rate
        schedulers (list): learning rate schedulers
    """
    __category__ = 'optim'

    def __init__(self,
                 base_lr=0.01,
                 schedulers=[PiecewiseDecay(), LinearWarmup()]):
        super(LearningRate, self).__init__()
        self.base_lr = base_lr
        self.schedulers = schedulers

    def __call__(self, step_per_epoch):
197 198 199 200 201
        assert len(self.schedulers) >= 1
        if not self.schedulers[0].use_warmup:
            return self.schedulers[0](base_lr=self.base_lr,
                                      step_per_epoch=step_per_epoch)

Q
qingqing01 已提交
202 203
        # TODO: split warmup & decay 
        # warmup
G
George Ni 已提交
204
        boundary, value = self.schedulers[1](self.base_lr, step_per_epoch)
Q
qingqing01 已提交
205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232
        # decay
        decay_lr = self.schedulers[0](self.base_lr, boundary, value,
                                      step_per_epoch)
        return decay_lr


@register
class OptimizerBuilder():
    """
    Build optimizer handles
    Args:
        regularizer (object): an `Regularizer` instance
        optimizer (object): an `Optimizer` instance
    """
    __category__ = 'optim'

    def __init__(self,
                 clip_grad_by_norm=None,
                 regularizer={'type': 'L2',
                              'factor': .0001},
                 optimizer={'type': 'Momentum',
                            'momentum': .9}):
        self.clip_grad_by_norm = clip_grad_by_norm
        self.regularizer = regularizer
        self.optimizer = optimizer

    def __call__(self, learning_rate, params=None):
        if self.clip_grad_by_norm is not None:
W
wangxinxin08 已提交
233
            grad_clip = nn.ClipGradByGlobalNorm(
Q
qingqing01 已提交
234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252
                clip_norm=self.clip_grad_by_norm)
        else:
            grad_clip = None
        if self.regularizer:
            reg_type = self.regularizer['type'] + 'Decay'
            reg_factor = self.regularizer['factor']
            regularization = getattr(regularizer, reg_type)(reg_factor)
        else:
            regularization = None

        optim_args = self.optimizer.copy()
        optim_type = optim_args['type']
        del optim_args['type']
        op = getattr(optimizer, optim_type)
        return op(learning_rate=learning_rate,
                  parameters=params,
                  weight_decay=regularization,
                  grad_clip=grad_clip,
                  **optim_args)
W
wangxinxin08 已提交
253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271


class ModelEMA(object):
    def __init__(self, decay, model, use_thres_step=False):
        self.step = 0
        self.decay = decay
        self.state_dict = dict()
        for k, v in model.state_dict().items():
            self.state_dict[k] = paddle.zeros_like(v)
        self.use_thres_step = use_thres_step

    def update(self, model):
        if self.use_thres_step:
            decay = min(self.decay, (1 + self.step) / (10 + self.step))
        else:
            decay = self.decay
        self._decay = decay
        model_dict = model.state_dict()
        for k, v in self.state_dict.items():
W
wangxinxin08 已提交
272 273 274
            v = decay * v + (1 - decay) * model_dict[k]
            v.stop_gradient = True
            self.state_dict[k] = v
W
wangxinxin08 已提交
275 276 277
        self.step += 1

    def apply(self):
278 279
        if self.step == 0:
            return self.state_dict
W
wangxinxin08 已提交
280 281
        state_dict = dict()
        for k, v in self.state_dict.items():
W
wangxinxin08 已提交
282 283 284
            v = v / (1 - self._decay**self.step)
            v.stop_gradient = True
            state_dict[k] = v
W
wangxinxin08 已提交
285
        return state_dict