optimizer.py 4.9 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18
# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

from __future__ import absolute_import
from __future__ import division
from __future__ import print_function

19
import math
20 21
import logging

W
wangxinxin08 已提交
22 23
import paddle
import paddle.nn as nn
24

W
wangxinxin08 已提交
25
import paddle.optimizer as optimizer
26
import paddle.fluid.regularizer as regularizer
W
wangxinxin08 已提交
27
from paddle import cos
28 29 30 31 32 33 34 35 36 37 38 39 40 41

from ppdet.core.workspace import register, serializable

__all__ = ['LearningRate', 'OptimizerBuilder']

logger = logging.getLogger(__name__)


@serializable
class PiecewiseDecay(object):
    """
    Multi step learning rate decay

    Args:
F
FDInSky 已提交
42
        gamma (float | list): decay factor
43 44 45
        milestones (list): steps at which to decay learning rate
    """

G
Guanghua Yu 已提交
46
    def __init__(self, gamma=[0.1, 0.01], milestones=[8, 11]):
47
        super(PiecewiseDecay, self).__init__()
F
FDInSky 已提交
48 49 50 51 52 53
        if type(gamma) is not list:
            self.gamma = []
            for i in range(len(milestones)):
                self.gamma.append(gamma / 10**i)
        else:
            self.gamma = gamma
54 55
        self.milestones = milestones

G
Guanghua Yu 已提交
56 57 58 59 60
    def __call__(self,
                 base_lr=None,
                 boundary=None,
                 value=None,
                 step_per_epoch=None):
F
FDInSky 已提交
61
        if boundary is not None:
G
Guanghua Yu 已提交
62
            boundary.extend([int(step_per_epoch) * i for i in self.milestones])
63

F
FDInSky 已提交
64 65 66
        if value is not None:
            for i in self.gamma:
                value.append(base_lr * i)
67

W
wangguanzhong 已提交
68
        return optimizer.lr.PiecewiseDecay(boundary, value)
69 70


71 72 73 74 75 76 77 78 79 80 81 82 83 84 85
@serializable
class LinearWarmup(object):
    """
    Warm up learning rate linearly

    Args:
        steps (int): warm up steps
        start_factor (float): initial learning rate factor
    """

    def __init__(self, steps=500, start_factor=1. / 3):
        super(LinearWarmup, self).__init__()
        self.steps = steps
        self.start_factor = start_factor

F
FDInSky 已提交
86 87 88
    def __call__(self, base_lr):
        boundary = []
        value = []
W
wangguanzhong 已提交
89
        for i in range(self.steps + 1):
F
FDInSky 已提交
90 91 92 93 94 95 96
            alpha = i / self.steps
            factor = self.start_factor * (1 - alpha) + alpha
            lr = base_lr * factor
            value.append(lr)
            if i > 0:
                boundary.append(i)
        return boundary, value
97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116


@register
class LearningRate(object):
    """
    Learning Rate configuration

    Args:
        base_lr (float): base learning rate
        schedulers (list): learning rate schedulers
    """
    __category__ = 'optim'

    def __init__(self,
                 base_lr=0.01,
                 schedulers=[PiecewiseDecay(), LinearWarmup()]):
        super(LearningRate, self).__init__()
        self.base_lr = base_lr
        self.schedulers = schedulers

G
Guanghua Yu 已提交
117
    def __call__(self, step_per_epoch):
F
FDInSky 已提交
118 119 120 121
        # TODO: split warmup & decay 
        # warmup
        boundary, value = self.schedulers[1](self.base_lr)
        # decay
G
Guanghua Yu 已提交
122 123
        decay_lr = self.schedulers[0](self.base_lr, boundary, value,
                                      step_per_epoch)
F
FDInSky 已提交
124
        return decay_lr
125 126 127 128 129 130 131 132 133 134 135 136 137 138


@register
class OptimizerBuilder():
    """
    Build optimizer handles

    Args:
        regularizer (object): an `Regularizer` instance
        optimizer (object): an `Optimizer` instance
    """
    __category__ = 'optim'

    def __init__(self,
139
                 clip_grad_by_norm=None,
140 141 142 143
                 regularizer={'type': 'L2',
                              'factor': .0001},
                 optimizer={'type': 'Momentum',
                            'momentum': .9}):
144
        self.clip_grad_by_norm = clip_grad_by_norm
145 146 147
        self.regularizer = regularizer
        self.optimizer = optimizer

F
FDInSky 已提交
148
    def __call__(self, learning_rate, params=None):
149
        if self.clip_grad_by_norm is not None:
W
wangxinxin08 已提交
150 151 152 153
            grad_clip = nn.GradientClipByGlobalNorm(
                clip_norm=self.clip_grad_by_norm)
        else:
            grad_clip = None
F
FDInSky 已提交
154

W
wangguanzhong 已提交
155 156 157 158 159 160
        if self.regularizer:
            reg_type = self.regularizer['type'] + 'Decay'
            reg_factor = self.regularizer['factor']
            regularization = getattr(regularizer, reg_type)(reg_factor)
        else:
            regularization = None
F
FDInSky 已提交
161

162 163 164 165 166
        optim_args = self.optimizer.copy()
        optim_type = optim_args['type']
        del optim_args['type']
        op = getattr(optimizer, optim_type)
        return op(learning_rate=learning_rate,
W
wangxinxin08 已提交
167 168 169
                  parameters=params,
                  weight_decay=regularization,
                  grad_clip=grad_clip,
170
                  **optim_args)