optimizer.py 4.8 KB
Newer Older
Q
qiaolongfei 已提交
1
import py_paddle.swig_paddle as swig_api
Y
Yu Yang 已提交
2

Q
qiaolongfei 已提交
3
import paddle.trainer_config_helpers.config_parser_utils as config_parser_utils
Y
Yu Yang 已提交
4 5 6 7 8 9
import paddle.trainer_config_helpers.optimizers as v1_optimizers
"""
Optimizers(update equation) for SGD method.

TODO(yuyang18): Complete comments.
"""
Q
qiaolongfei 已提交
10

L
Luo Tao 已提交
11 12 13 14
__all__ = [
    'Momentum', 'Adam', 'Adamax', 'AdaGrad', 'DecayedAdaGrad', 'AdaDelta',
    'RMSProp', 'ModelAverage', 'L2Regularization'
]
Q
qiaolongfei 已提交
15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40


class Optimizer(object):
    def __init__(self, **kwargs):
        if 'batch_size' in kwargs:
            del kwargs['batch_size']  # not important for python library.

        def __impl__():
            v1_optimizers.settings(batch_size=1, **kwargs)

        self.__opt_conf_proto__ = config_parser_utils.parse_optimizer_config(
            __impl__)
        self.__opt_conf__ = swig_api.OptimizationConfig.createFromProto(
            self.__opt_conf_proto__)

    def enable_types(self):
        """
        get enable_types for each optimizer.
        enable_types = [value, gradient, momentum, etc]
        For each optimizer(SGD, Adam), GradientMachine should enable different
        buffers.
        """
        tmp = swig_api.ParameterOptimizer.create(self.__opt_conf__)
        assert isinstance(tmp, swig_api.ParameterOptimizer)
        return tmp.getParameterTypes()

Q
qiaolongfei 已提交
41
    def __create_local_updater__(self):
Q
qiaolongfei 已提交
42 43
        return swig_api.ParameterUpdater.createLocalUpdater(self.__opt_conf__)

Q
qiaolongfei 已提交
44
    def __create_remote_updater__(self, pass_num, use_sparse_updater):
Q
qiaolongfei 已提交
45 46
        return swig_api.ParameterUpdater.createRemoteUpdater(
            self.__opt_conf__, pass_num, use_sparse_updater)
Q
qiaolongfei 已提交
47

Q
qiaolongfei 已提交
48
    def create_updater(self, is_local, num_passes, use_sparse_updater):
Q
qiaolongfei 已提交
49 50 51 52 53 54 55 56 57 58 59 60 61 62 63
        """
        create proper parameter_updater by configuration.
        :param is_local: create local or remote parameter updater
        :param num_passes: remote parameter updater will use this to config
        parameter server.
        :param use_sparse_updater: when use remote updater, if some parameter is
        sparse, updater should do some extra thing:

        ..  code-block:: python

            if use_sparse_remote_updater:
                        gradient_machine.prefetch(in_args)
                        parameter_updater.getParametersRemote()
        :return: parameter_updater
        """
Q
qiaolongfei 已提交
64
        if is_local:
Q
qiaolongfei 已提交
65
            parameter_updater = self.__create_local_updater__()
Q
qiaolongfei 已提交
66
        else:
Q
qiaolongfei 已提交
67 68
            parameter_updater = self.__create_remote_updater__(
                num_passes, use_sparse_updater)
Q
qiaolongfei 已提交
69 70
        return parameter_updater

Q
qiaolongfei 已提交
71

L
Luo Tao 已提交
72 73 74
class Momentum(Optimizer):
    def __init__(self, momentum=None, sparse=False, **kwargs):
        learning_method = v1_optimizers.MomentumOptimizer(
Y
Yu Yang 已提交
75
            momentum=momentum, sparse=sparse)
L
Luo Tao 已提交
76 77 78 79
        super(Momentum, self).__init__(
            learning_method=learning_method, **kwargs)


Q
qiaolongfei 已提交
80 81 82 83 84 85 86 87 88 89 90 91 92 93
class Adam(Optimizer):
    def __init__(self, beta1=0.9, beta2=0.999, epsilon=1e-8, **kwargs):
        learning_method = v1_optimizers.AdamOptimizer(
            beta1=beta1, beta2=beta2, epsilon=epsilon)
        super(Adam, self).__init__(learning_method=learning_method, **kwargs)


class Adamax(Optimizer):
    def __init__(self, beta1=0.9, beta2=0.999, **kwargs):
        learning_method = v1_optimizers.AdamaxOptimizer(
            beta1=beta1, beta2=beta2)
        super(Adamax, self).__init__(learning_method=learning_method, **kwargs)


L
Luo Tao 已提交
94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125
class AdaGrad(Optimizer):
    def __init__(self, **kwargs):
        learning_method = v1_optimizers.AdaGradOptimizer()
        super(AdaGrad, self).__init__(learning_method=learning_method, **kwargs)


class DecayedAdaGrad(Optimizer):
    def __init__(self, rho=0.95, epsilon=1e-06, **kwargs):
        learning_method = v1_optimizers.DecayedAdaGradOptimizer(
            rho=rho, epsilon=epsilon)
        super(DecayedAdaGrad, self).__init__(
            learning_method=learning_method, **kwargs)


class AdaDelta(Optimizer):
    def __init__(self, rho=0.95, epsilon=1e-06, **kwargs):
        learning_method = v1_optimizers.AdaDeltaOptimizer(
            rho=rho, epsilon=epsilon)
        super(AdaDelta, self).__init__(
            learning_method=learning_method, **kwargs)


class RMSProp(Optimizer):
    def __init__(self, rho=0.95, epsilon=1e-6, **kwargs):
        learning_method = v1_optimizers.RMSPropOptimizer(
            rho=rho, epsilon=epsilon)
        super(RMSProp, self).__init__(learning_method=learning_method, **kwargs)


ModelAverage = v1_optimizers.ModelAverage
L2Regularization = v1_optimizers.L2Regularization

Q
qiaolongfei 已提交
126 127
if __name__ == '__main__':
    swig_api.initPaddle('--use_gpu=false')
L
Luo Tao 已提交
128 129 130 131 132 133 134 135
    for opt in [
            Momentum(), Adam(), Adamax(), AdaGrad(), DecayedAdaGrad(),
            AdaDelta(), RMSProp(), Adam(
                model_average=ModelAverage(average_window=0.5),
                regularization=L2Regularization(rate=0.5),
                gradient_clipping_threshold=25)
    ]:
        print opt, opt.enable_types()