test_dgc_optimizer.py 6.3 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20
# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

from __future__ import print_function

import unittest

import paddle.fluid.framework as framework
import paddle.fluid.optimizer as optimizer
21
import paddle.fluid.regularizer as regularizer
22
import paddle.fluid.clip as clip
23 24 25 26 27 28 29 30 31 32 33
import paddle.compat as cpt
from paddle.fluid.backward import append_backward
from paddle.fluid.transpiler.details import program_to_code


class TestDGCMomentumOptimizer(unittest.TestCase):
    class MockDGCMomentum(optimizer.DGCMomentumOptimizer):
        def get_accumulators(self):
            return self._accumulators

        def get_velocity_str(self):
34
            return self._u_velocity_acc_str
35

36 37 38
    def check_dgc_momentum_optimizer(self,
                                     dims=[5, 10, 8],
                                     name="momentum",
39 40
                                     regularization=None,
                                     use_recompute=False):
41 42 43 44 45 46 47 48
        init_program = framework.Program()
        program = framework.Program()
        block = program.global_block()
        mul_x = block.create_parameter(
            dtype="float32",
            shape=[dims[0], dims[1]],
            lod_level=0,
            name="mul.x",
49 50 51
            optimize_attr={'learning_rate': 1.1},
            regularizer=None if regularization is not None else
            regularizer.L2DecayRegularizer(2e-4))
52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68
        mul_y = block.create_var(
            dtype="float32",
            shape=[dims[1], dims[2]],
            lod_level=0,
            name="mul.y")
        mul_out = block.create_var(
            dtype="float32",
            shape=[dims[0], dims[2]],
            lod_level=0,
            name="mul.out")
        block.append_op(
            type="mul",
            inputs={"X": mul_x,
                    "Y": mul_y},
            outputs={"Out": mul_out},
            attrs={"x_num_col_dims": 1})
        learning_rate = 0.01
69

70
        dgc_momentum_optimizer = self.MockDGCMomentum(
71 72 73
            learning_rate=learning_rate,
            momentum=0.2,
            rampup_begin_step=0,
74
            num_trainers=2,
75 76
            regularization=regularization,
            grad_clip=clip.GradientClipByNorm(1.0))
77 78 79 80 81 82 83

        if use_recompute:
            dgc_momentum_optimizer = optimizer.RecomputeOptimizer(
                dgc_momentum_optimizer)
            dgc_momentum_optimizer.get_accumulators = dgc_momentum_optimizer._optimizer.get_accumulators
            dgc_momentum_optimizer.get_velocity_str = dgc_momentum_optimizer._optimizer.get_velocity_str

84 85 86 87 88 89
        mean_out = block.create_var(
            dtype="float32", shape=[1], lod_level=0, name="mean.out")
        block.append_op(
            type="mean", inputs={"X": mul_out}, outputs={"Out": mean_out})
        # params_grads = append_backward(mean_out)
        params_grads = dgc_momentum_optimizer.backward(mean_out)
90
        accumulator_count = 1 if name == "momentum" else 2
91
        self.assertEqual(len(params_grads), 1)
92 93
        self.assertEqual(
            len(dgc_momentum_optimizer.get_accumulators()), accumulator_count)
94 95 96 97 98 99 100 101 102
        with framework.program_guard(program, init_program):
            opts = dgc_momentum_optimizer.apply_gradients(params_grads)
        self.assertEqual(len(opts), 2)
        sgd_op = opts[-1]
        self.assertEqual([op.type for op in opts], ["scale", name])
        self.assertFalse(sgd_op.attr('use_nesterov'))

        # Check accumulators
        accumulators = dgc_momentum_optimizer.get_accumulators()
103
        self.assertEqual(len(accumulators), accumulator_count)
104 105 106 107 108 109 110 111
        self.assertTrue(
            dgc_momentum_optimizer.get_velocity_str() in accumulators)
        velocity_acc = accumulators[dgc_momentum_optimizer.get_velocity_str()]
        self.assertEqual(len(velocity_acc), 1)
        self.assertTrue(mul_x.name in velocity_acc)

        # Check init_program
        init_ops = init_program.global_block().ops
112
        self.assertEqual(len(init_ops), 1)
113 114 115
        self.assertEqual(init_ops[0].type, "fill_constant")
        self.assertAlmostEqual(init_ops[0].attr('value'), learning_rate)

116 117 118 119 120 121 122 123
        # check dgc op regularization coeff
        train_ops = program.global_block().ops
        for op in train_ops:
            if op.type == "dgc":
                coeff = 2e-4 if regularization is None else 1e-4
                self.assertAlmostEqual(op.attr('regular_coeff'), coeff)
                print("dgc regular_coeff=" + str(coeff))

124 125 126
        # for local test debug
        #with open("test_dgc_optimizer_" + name + str(use_recompute) + ".log", "w") as f:
        #    program_to_code(program, fout=f)
127

128 129 130 131 132 133 134 135 136 137
    def test_tpyeError(self):
        # the type of DGCMomentumOptimizer(grad_clip=) must be 'GradientClipByNorm'
        with self.assertRaises(TypeError):
            dgc_momentum_optimizer = self.MockDGCMomentum(
                learning_rate=0.01,
                momentum=0.2,
                rampup_begin_step=0,
                num_trainers=2,
                grad_clip=clip.GradientClipByGlobalNorm(1.0))

138
    def test_momentum_without_dgc(self):
139 140
        self.check_dgc_momentum_optimizer(
            regularization=regularizer.L1Decay(1e-4))
141 142 143 144

    def test_momentum_with_dgc(self):
        # 16 * 1024 = 16384, use dgc momentum
        self.check_dgc_momentum_optimizer(
145 146 147
            dims=[16, 1024, 8],
            name="dgc_momentum",
            regularization=regularizer.L2Decay(1e-4))
148

149 150 151 152
        # check param.regularizer in dgc
        self.check_dgc_momentum_optimizer(
            dims=[16, 1024, 8], name="dgc_momentum")

153 154 155 156 157 158 159 160
    def test_momentum_with_dgc_recompute(self):
        # 16 * 1024 = 16384, use dgc momentum
        self.check_dgc_momentum_optimizer(
            dims=[16, 1024, 8],
            name="dgc_momentum",
            regularization=regularizer.L2Decay(1e-4),
            use_recompute=True)

161 162 163

if __name__ == '__main__':
    unittest.main()