optimizer.py 4.7 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18
# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

from __future__ import absolute_import
from __future__ import division
from __future__ import print_function

19
import math
20 21
import logging

W
wangxinxin08 已提交
22 23
import paddle
import paddle.nn as nn
24

W
wangxinxin08 已提交
25
import paddle.optimizer as optimizer
26
import paddle.fluid.regularizer as regularizer
W
wangxinxin08 已提交
27
from paddle import cos
28 29 30 31 32 33 34 35 36 37 38 39 40 41

from ppdet.core.workspace import register, serializable

__all__ = ['LearningRate', 'OptimizerBuilder']

logger = logging.getLogger(__name__)


@serializable
class PiecewiseDecay(object):
    """
    Multi step learning rate decay

    Args:
F
FDInSky 已提交
42
        gamma (float | list): decay factor
43 44 45
        milestones (list): steps at which to decay learning rate
    """

F
FDInSky 已提交
46
    def __init__(self, gamma=[0.1, 0.01], milestones=[60000, 80000]):
47
        super(PiecewiseDecay, self).__init__()
F
FDInSky 已提交
48 49 50 51 52 53
        if type(gamma) is not list:
            self.gamma = []
            for i in range(len(milestones)):
                self.gamma.append(gamma / 10**i)
        else:
            self.gamma = gamma
54 55
        self.milestones = milestones

F
FDInSky 已提交
56 57 58
    def __call__(self, base_lr=None, boundary=None, value=None):
        if boundary is not None:
            boundary.extend(self.milestones)
59

F
FDInSky 已提交
60 61 62
        if value is not None:
            for i in self.gamma:
                value.append(base_lr * i)
63

W
wangguanzhong 已提交
64
        return optimizer.lr.PiecewiseDecay(boundary, value)
65 66


67 68 69 70 71 72 73 74 75 76 77 78 79 80 81
@serializable
class LinearWarmup(object):
    """
    Warm up learning rate linearly

    Args:
        steps (int): warm up steps
        start_factor (float): initial learning rate factor
    """

    def __init__(self, steps=500, start_factor=1. / 3):
        super(LinearWarmup, self).__init__()
        self.steps = steps
        self.start_factor = start_factor

F
FDInSky 已提交
82 83 84
    def __call__(self, base_lr):
        boundary = []
        value = []
W
wangguanzhong 已提交
85
        for i in range(self.steps + 1):
F
FDInSky 已提交
86 87 88 89 90 91 92
            alpha = i / self.steps
            factor = self.start_factor * (1 - alpha) + alpha
            lr = base_lr * factor
            value.append(lr)
            if i > 0:
                boundary.append(i)
        return boundary, value
93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113


@register
class LearningRate(object):
    """
    Learning Rate configuration

    Args:
        base_lr (float): base learning rate
        schedulers (list): learning rate schedulers
    """
    __category__ = 'optim'

    def __init__(self,
                 base_lr=0.01,
                 schedulers=[PiecewiseDecay(), LinearWarmup()]):
        super(LearningRate, self).__init__()
        self.base_lr = base_lr
        self.schedulers = schedulers

    def __call__(self):
F
FDInSky 已提交
114 115 116 117 118 119
        # TODO: split warmup & decay 
        # warmup
        boundary, value = self.schedulers[1](self.base_lr)
        # decay
        decay_lr = self.schedulers[0](self.base_lr, boundary, value)
        return decay_lr
120 121 122 123 124 125 126 127 128 129 130 131 132 133


@register
class OptimizerBuilder():
    """
    Build optimizer handles

    Args:
        regularizer (object): an `Regularizer` instance
        optimizer (object): an `Optimizer` instance
    """
    __category__ = 'optim'

    def __init__(self,
134
                 clip_grad_by_norm=None,
135 136 137 138
                 regularizer={'type': 'L2',
                              'factor': .0001},
                 optimizer={'type': 'Momentum',
                            'momentum': .9}):
139
        self.clip_grad_by_norm = clip_grad_by_norm
140 141 142
        self.regularizer = regularizer
        self.optimizer = optimizer

F
FDInSky 已提交
143
    def __call__(self, learning_rate, params=None):
144
        if self.clip_grad_by_norm is not None:
W
wangxinxin08 已提交
145 146 147 148
            grad_clip = nn.GradientClipByGlobalNorm(
                clip_norm=self.clip_grad_by_norm)
        else:
            grad_clip = None
F
FDInSky 已提交
149

W
wangguanzhong 已提交
150 151 152 153 154 155
        if self.regularizer:
            reg_type = self.regularizer['type'] + 'Decay'
            reg_factor = self.regularizer['factor']
            regularization = getattr(regularizer, reg_type)(reg_factor)
        else:
            regularization = None
F
FDInSky 已提交
156

157 158 159 160 161
        optim_args = self.optimizer.copy()
        optim_type = optim_args['type']
        del optim_args['type']
        op = getattr(optimizer, optim_type)
        return op(learning_rate=learning_rate,
W
wangxinxin08 已提交
162 163 164
                  parameters=params,
                  weight_decay=regularization,
                  grad_clip=grad_clip,
165
                  **optim_args)