test_dgc_optimizer.py 6.4 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18
# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

from __future__ import print_function

import unittest

W
WangXi 已提交
19
import paddle
20 21
import paddle.fluid.framework as framework
import paddle.fluid.optimizer as optimizer
22
import paddle.fluid.regularizer as regularizer
23
import paddle.fluid.clip as clip
24 25
import paddle.compat as cpt
from paddle.fluid.backward import append_backward
W
WangXi 已提交
26
paddle.enable_static()
27 28 29 30 31 32 33 34


class TestDGCMomentumOptimizer(unittest.TestCase):
    class MockDGCMomentum(optimizer.DGCMomentumOptimizer):
        def get_accumulators(self):
            return self._accumulators

        def get_velocity_str(self):
35
            return self._u_velocity_acc_str
36

37 38 39
    def check_dgc_momentum_optimizer(self,
                                     dims=[5, 10, 8],
                                     name="momentum",
40 41
                                     regularization=None,
                                     use_recompute=False):
42 43 44 45 46 47 48 49
        init_program = framework.Program()
        program = framework.Program()
        block = program.global_block()
        mul_x = block.create_parameter(
            dtype="float32",
            shape=[dims[0], dims[1]],
            lod_level=0,
            name="mul.x",
50 51 52
            optimize_attr={'learning_rate': 1.1},
            regularizer=None if regularization is not None else
            regularizer.L2DecayRegularizer(2e-4))
53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69
        mul_y = block.create_var(
            dtype="float32",
            shape=[dims[1], dims[2]],
            lod_level=0,
            name="mul.y")
        mul_out = block.create_var(
            dtype="float32",
            shape=[dims[0], dims[2]],
            lod_level=0,
            name="mul.out")
        block.append_op(
            type="mul",
            inputs={"X": mul_x,
                    "Y": mul_y},
            outputs={"Out": mul_out},
            attrs={"x_num_col_dims": 1})
        learning_rate = 0.01
70

71
        dgc_momentum_optimizer = self.MockDGCMomentum(
72 73 74
            learning_rate=learning_rate,
            momentum=0.2,
            rampup_begin_step=0,
75
            num_trainers=2,
76 77
            regularization=regularization,
            grad_clip=clip.GradientClipByNorm(1.0))
78 79 80 81

        if use_recompute:
            dgc_momentum_optimizer = optimizer.RecomputeOptimizer(
                dgc_momentum_optimizer)
82
            dgc_momentum_optimizer._set_checkpoints([])
83 84 85
            dgc_momentum_optimizer.get_accumulators = dgc_momentum_optimizer._optimizer.get_accumulators
            dgc_momentum_optimizer.get_velocity_str = dgc_momentum_optimizer._optimizer.get_velocity_str

86 87 88 89 90
        mean_out = block.create_var(
            dtype="float32", shape=[1], lod_level=0, name="mean.out")
        block.append_op(
            type="mean", inputs={"X": mul_out}, outputs={"Out": mean_out})
        # params_grads = append_backward(mean_out)
W
WangXi 已提交
91 92 93 94 95 96
        params_grads = dgc_momentum_optimizer.backward(
            mean_out, startup_program=init_program)

        with framework.program_guard(program, init_program):
            opts = dgc_momentum_optimizer.apply_gradients(params_grads)

97
        accumulator_count = 1 if name == "momentum" else 2
98
        self.assertEqual(len(params_grads), 1)
99 100
        self.assertEqual(
            len(dgc_momentum_optimizer.get_accumulators()), accumulator_count)
W
WangXi 已提交
101

102 103 104 105 106 107 108
        self.assertEqual(len(opts), 2)
        sgd_op = opts[-1]
        self.assertEqual([op.type for op in opts], ["scale", name])
        self.assertFalse(sgd_op.attr('use_nesterov'))

        # Check accumulators
        accumulators = dgc_momentum_optimizer.get_accumulators()
109
        self.assertEqual(len(accumulators), accumulator_count)
110 111 112 113 114 115 116
        self.assertTrue(
            dgc_momentum_optimizer.get_velocity_str() in accumulators)
        velocity_acc = accumulators[dgc_momentum_optimizer.get_velocity_str()]
        self.assertEqual(len(velocity_acc), 1)
        self.assertTrue(mul_x.name in velocity_acc)

        # Check init_program
W
WangXi 已提交
117 118 119
        # dgc not apply include: lr, dgc(count, nranks, begin step), (u,)
        # dgc apply include: lr, dgc(count, nranks, begin_step), (u,v,k,encode,gather)
        init_ops_count = 5 if name == "momentum" else 9
120
        init_ops = init_program.global_block().ops
W
WangXi 已提交
121
        self.assertEqual(len(init_ops), init_ops_count)
122 123 124
        self.assertEqual(init_ops[0].type, "fill_constant")
        self.assertAlmostEqual(init_ops[0].attr('value'), learning_rate)

125 126 127 128 129 130 131 132
        # check dgc op regularization coeff
        train_ops = program.global_block().ops
        for op in train_ops:
            if op.type == "dgc":
                coeff = 2e-4 if regularization is None else 1e-4
                self.assertAlmostEqual(op.attr('regular_coeff'), coeff)
                print("dgc regular_coeff=" + str(coeff))

133 134 135 136 137 138 139 140 141 142
    def test_tpyeError(self):
        # the type of DGCMomentumOptimizer(grad_clip=) must be 'GradientClipByNorm'
        with self.assertRaises(TypeError):
            dgc_momentum_optimizer = self.MockDGCMomentum(
                learning_rate=0.01,
                momentum=0.2,
                rampup_begin_step=0,
                num_trainers=2,
                grad_clip=clip.GradientClipByGlobalNorm(1.0))

143
    def test_momentum_without_dgc(self):
144 145
        self.check_dgc_momentum_optimizer(
            regularization=regularizer.L1Decay(1e-4))
146 147 148 149

    def test_momentum_with_dgc(self):
        # 16 * 1024 = 16384, use dgc momentum
        self.check_dgc_momentum_optimizer(
150 151 152
            dims=[16, 1024, 8],
            name="dgc_momentum",
            regularization=regularizer.L2Decay(1e-4))
153

154 155 156 157
        # check param.regularizer in dgc
        self.check_dgc_momentum_optimizer(
            dims=[16, 1024, 8], name="dgc_momentum")

158 159 160 161 162 163 164 165
    def test_momentum_with_dgc_recompute(self):
        # 16 * 1024 = 16384, use dgc momentum
        self.check_dgc_momentum_optimizer(
            dims=[16, 1024, 8],
            name="dgc_momentum",
            regularization=regularizer.L2Decay(1e-4),
            use_recompute=True)

166 167 168

if __name__ == '__main__':
    unittest.main()