optimizer.py 8.7 KB
Newer Older
Q
qingqing01 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33
# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

from __future__ import absolute_import
from __future__ import division
from __future__ import print_function

import math
import paddle
import paddle.nn as nn

import paddle.optimizer as optimizer
import paddle.regularizer as regularizer

from ppdet.core.workspace import register, serializable

__all__ = ['LearningRate', 'OptimizerBuilder']

from ppdet.utils.logger import setup_logger
logger = setup_logger(__name__)


34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69
@serializable
class CosineDecay(object):
    """
    Cosine learning rate decay

    Args:
        max_epochs (int): max epochs for the training process.
            if you commbine cosine decay with warmup, it is recommended that
            the max_iters is much larger than the warmup iter
    """

    def __init__(self, max_epochs=1000, use_warmup=True):
        self.max_epochs = max_epochs
        self.use_warmup = use_warmup

    def __call__(self,
                 base_lr=None,
                 boundary=None,
                 value=None,
                 step_per_epoch=None):
        assert base_lr is not None, "either base LR or values should be provided"

        max_iters = self.max_epochs * int(step_per_epoch)

        if boundary is not None and value is not None and self.use_warmup:
            for i in range(int(boundary[-1]), max_iters):
                boundary.append(i)

                decayed_lr = base_lr * 0.5 * (
                    math.cos(i * math.pi / max_iters) + 1)
                value.append(decayed_lr)
            return optimizer.lr.PiecewiseDecay(boundary, value)

        return optimizer.lr.CosineAnnealingDecay(base_lr, T_max=max_iters)


Q
qingqing01 已提交
70 71 72 73 74 75 76 77 78 79
@serializable
class PiecewiseDecay(object):
    """
    Multi step learning rate decay

    Args:
        gamma (float | list): decay factor
        milestones (list): steps at which to decay learning rate
    """

80 81 82 83 84
    def __init__(self,
                 gamma=[0.1, 0.01],
                 milestones=[8, 11],
                 values=None,
                 use_warmup=True):
Q
qingqing01 已提交
85 86 87 88 89 90 91 92
        super(PiecewiseDecay, self).__init__()
        if type(gamma) is not list:
            self.gamma = []
            for i in range(len(milestones)):
                self.gamma.append(gamma / 10**i)
        else:
            self.gamma = gamma
        self.milestones = milestones
93 94
        self.values = values
        self.use_warmup = use_warmup
Q
qingqing01 已提交
95 96 97 98 99 100

    def __call__(self,
                 base_lr=None,
                 boundary=None,
                 value=None,
                 step_per_epoch=None):
101
        if boundary is not None and self.use_warmup:
Q
qingqing01 已提交
102
            boundary.extend([int(step_per_epoch) * i for i in self.milestones])
103 104 105
        else:
            # do not use LinearWarmup
            boundary = [int(step_per_epoch) * i for i in self.milestones]
G
George Ni 已提交
106
            value = [base_lr]  # during step[0, boundary[0]] is base_lr
Q
qingqing01 已提交
107

108 109 110 111 112 113
        # self.values is setted directly in config 
        if self.values is not None:
            assert len(self.milestones) + 1 == len(self.values)
            return optimizer.lr.PiecewiseDecay(boundary, self.values)

        # value is computed by self.gamma
114 115 116
        value = value if value is not None else [base_lr]
        for i in self.gamma:
            value.append(base_lr * i)
Q
qingqing01 已提交
117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135

        return optimizer.lr.PiecewiseDecay(boundary, value)


@serializable
class LinearWarmup(object):
    """
    Warm up learning rate linearly

    Args:
        steps (int): warm up steps
        start_factor (float): initial learning rate factor
    """

    def __init__(self, steps=500, start_factor=1. / 3):
        super(LinearWarmup, self).__init__()
        self.steps = steps
        self.start_factor = start_factor

G
George Ni 已提交
136
    def __call__(self, base_lr, step_per_epoch):
Q
qingqing01 已提交
137 138 139
        boundary = []
        value = []
        for i in range(self.steps + 1):
140 141 142 143 144
            if self.steps > 0:
                alpha = i / self.steps
                factor = self.start_factor * (1 - alpha) + alpha
                lr = base_lr * factor
                value.append(lr)
Q
qingqing01 已提交
145 146 147 148 149
            if i > 0:
                boundary.append(i)
        return boundary, value


G
George Ni 已提交
150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174
@serializable
class BurninWarmup(object):
    """
    Warm up learning rate in burnin mode
    Args:
        steps (int): warm up steps
    """

    def __init__(self, steps=1000):
        super(BurninWarmup, self).__init__()
        self.steps = steps

    def __call__(self, base_lr, step_per_epoch):
        boundary = []
        value = []
        burnin = min(self.steps, step_per_epoch)
        for i in range(burnin + 1):
            factor = (i * 1.0 / burnin)**4
            lr = base_lr * factor
            value.append(lr)
            if i > 0:
                boundary.append(i)
        return boundary, value


Q
qingqing01 已提交
175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193
@register
class LearningRate(object):
    """
    Learning Rate configuration

    Args:
        base_lr (float): base learning rate
        schedulers (list): learning rate schedulers
    """
    __category__ = 'optim'

    def __init__(self,
                 base_lr=0.01,
                 schedulers=[PiecewiseDecay(), LinearWarmup()]):
        super(LearningRate, self).__init__()
        self.base_lr = base_lr
        self.schedulers = schedulers

    def __call__(self, step_per_epoch):
194 195 196 197 198
        assert len(self.schedulers) >= 1
        if not self.schedulers[0].use_warmup:
            return self.schedulers[0](base_lr=self.base_lr,
                                      step_per_epoch=step_per_epoch)

Q
qingqing01 已提交
199 200
        # TODO: split warmup & decay 
        # warmup
G
George Ni 已提交
201
        boundary, value = self.schedulers[1](self.base_lr, step_per_epoch)
Q
qingqing01 已提交
202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229
        # decay
        decay_lr = self.schedulers[0](self.base_lr, boundary, value,
                                      step_per_epoch)
        return decay_lr


@register
class OptimizerBuilder():
    """
    Build optimizer handles
    Args:
        regularizer (object): an `Regularizer` instance
        optimizer (object): an `Optimizer` instance
    """
    __category__ = 'optim'

    def __init__(self,
                 clip_grad_by_norm=None,
                 regularizer={'type': 'L2',
                              'factor': .0001},
                 optimizer={'type': 'Momentum',
                            'momentum': .9}):
        self.clip_grad_by_norm = clip_grad_by_norm
        self.regularizer = regularizer
        self.optimizer = optimizer

    def __call__(self, learning_rate, params=None):
        if self.clip_grad_by_norm is not None:
W
wangxinxin08 已提交
230
            grad_clip = nn.ClipGradByGlobalNorm(
Q
qingqing01 已提交
231 232 233
                clip_norm=self.clip_grad_by_norm)
        else:
            grad_clip = None
234
        if self.regularizer and self.regularizer != 'None':
Q
qingqing01 已提交
235 236 237 238 239 240 241 242 243
            reg_type = self.regularizer['type'] + 'Decay'
            reg_factor = self.regularizer['factor']
            regularization = getattr(regularizer, reg_type)(reg_factor)
        else:
            regularization = None

        optim_args = self.optimizer.copy()
        optim_type = optim_args['type']
        del optim_args['type']
244 245
        if optim_type != 'AdamW':
            optim_args['weight_decay'] = regularization
Q
qingqing01 已提交
246 247 248 249 250
        op = getattr(optimizer, optim_type)
        return op(learning_rate=learning_rate,
                  parameters=params,
                  grad_clip=grad_clip,
                  **optim_args)
W
wangxinxin08 已提交
251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269


class ModelEMA(object):
    def __init__(self, decay, model, use_thres_step=False):
        self.step = 0
        self.decay = decay
        self.state_dict = dict()
        for k, v in model.state_dict().items():
            self.state_dict[k] = paddle.zeros_like(v)
        self.use_thres_step = use_thres_step

    def update(self, model):
        if self.use_thres_step:
            decay = min(self.decay, (1 + self.step) / (10 + self.step))
        else:
            decay = self.decay
        self._decay = decay
        model_dict = model.state_dict()
        for k, v in self.state_dict.items():
W
wangxinxin08 已提交
270 271 272
            v = decay * v + (1 - decay) * model_dict[k]
            v.stop_gradient = True
            self.state_dict[k] = v
W
wangxinxin08 已提交
273 274 275
        self.step += 1

    def apply(self):
276 277
        if self.step == 0:
            return self.state_dict
W
wangxinxin08 已提交
278 279
        state_dict = dict()
        for k, v in self.state_dict.items():
W
wangxinxin08 已提交
280 281 282
            v = v / (1 - self._decay**self.step)
            v.stop_gradient = True
            state_dict[k] = v
W
wangxinxin08 已提交
283
        return state_dict