test_lambv2_op.py 11.0 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21
#   Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

from __future__ import print_function

import unittest
import numpy as np
from op_test import OpTest
from paddle.fluid import core
from paddle.fluid.op import Operator
22
from paddle.fluid.dygraph.base import switch_to_static_graph
23
import paddle
24 25 26 27 28
import paddle.fluid as fluid
import paddle.fluid.layers as layers


class LAMBOptimizer(paddle.optimizer.Lamb):
29

30 31 32 33 34 35 36 37 38 39 40 41
    def _append_optimize_op(self, block, param_and_grad):
        assert isinstance(block, fluid.framework.Block)
        block.program._use_lamb = True

        m = moment1 = self._get_accumulator(self._moment1_acc_str,
                                            param_and_grad[0])
        v = self._get_accumulator(self._moment2_acc_str, param_and_grad[0])
        beta_1_pow_acc = self._get_accumulator(self._beta1_pow_acc_str,
                                               param_and_grad[0])
        beta_2_pow_acc = self._get_accumulator(self._beta2_pow_acc_str,
                                               param_and_grad[0])

42 43 44 45 46 47 48 49 50 51 52 53
        beta_1 = layers.fill_constant(dtype='float32',
                                      shape=[1],
                                      value=self._beta1,
                                      name='lamb_beta_1')
        beta_2 = layers.fill_constant(dtype='float32',
                                      shape=[1],
                                      value=self._beta2,
                                      name='lamb_beta_2')
        epsilon = layers.fill_constant(dtype='float32',
                                       shape=[1],
                                       value=self._epsilon,
                                       name='epsilon')
54 55 56 57

        one = paddle.ones(shape=[1]).astype('float32')
        zero = paddle.zeros(shape=[1]).astype('float32')

58 59
        next_m = paddle.multiply(m, beta_1) + paddle.multiply(
            param_and_grad[1], one - beta_1)
60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82
        next_v = paddle.multiply(v, beta_2) + paddle.multiply(
            paddle.pow(param_and_grad[1], 2), one - beta_2)

        beta1_correction = one - beta_1_pow_acc
        beta2_correction = one - beta_2_pow_acc

        next_m_unbiased = next_m / beta1_correction
        next_v_unbiased = next_v / beta2_correction

        update = next_m_unbiased / (paddle.sqrt(next_v_unbiased) + epsilon)

        if self._exclude_from_weight_decay_fn is not None and self._exclude_from_weight_decay_fn(
                param_and_grad[0]):
            self._lamb_weight_decay = 0.0
        update += self._lamb_weight_decay * param_and_grad[0]

        w_norm = paddle.norm(param_and_grad[0], p=2)
        g_norm = paddle.norm(update, p=2)

        learning_rate = self._create_param_lr(param_and_grad)

        ratio = paddle.where(
            paddle.greater_than(w_norm, zero),
83 84
            paddle.where(paddle.greater_than(g_norm, zero), (w_norm / g_norm),
                         one), one)
85 86 87 88 89 90 91 92 93 94 95
        update_with_lr = ratio * learning_rate * update
        next_param = param_and_grad[0] - update_with_lr

        beta_1_pow_acc *= beta_1
        beta_2_pow_acc *= beta_2

        paddle.assign(next_m, m)
        paddle.assign(next_v, v)
        paddle.assign(next_param, param_and_grad[0])

        return None
96 97 98


class TestLambOpV2(unittest.TestCase):
99

100
    def test_lamb_op(self):
101 102 103 104 105
        shape = [2, 4, 8, 8]
        data = paddle.to_tensor(np.random.random(size=shape).astype("float32"))
        conv = paddle.nn.Conv2D(4, 6, (3, 3))
        data = conv(data)
        loss = paddle.mean(data)
106 107 108
        opt = paddle.optimizer.Lamb(learning_rate=1e-5,
                                    epsilon=1e-8,
                                    parameters=conv.parameters())
109 110 111 112 113 114 115
        loss.backward()
        opt.minimize(loss)

        assert loss.numpy() is not None


class TestLambOpWithCombinedOp(unittest.TestCase):
116

117
    def test_lamb_op_with_multi_steps(self):
118
        paddle.enable_static()
119 120 121 122 123 124 125 126 127

        def _build_static_model(main, startup, seed=100):
            with fluid.program_guard(main, startup):
                main.random_seed = seed
                startup.random_seed = seed
                x = fluid.layers.data(name='X', shape=[13], dtype='float32')
                y = fluid.layers.data(name='Y', shape=[1], dtype='float32')
                prediction = fluid.layers.fc(input=x, size=1, act=None)
                loss = fluid.layers.square_error_cost(input=prediction, label=y)
128
                avg_loss = paddle.mean(loss)
129 130
            return avg_loss

131
        place = fluid.CPUPlace()
132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147
        num_steps = 10

        for i in range(num_steps):
            feed_x = np.random.random(size=(10, 13)).astype('float32')
            feed_y = np.random.random(size=(10, 1)).astype('float32')

            main_program = fluid.Program()
            startup_program = fluid.Program()
            with fluid.program_guard(main_program, startup_program):
                avg_loss = _build_static_model(main_program, startup_program)
                lamb_kernel = paddle.optimizer.Lamb(learning_rate=0.2)
                lamb_kernel.minimize(avg_loss)

            executor = fluid.Executor(place)
            executor.run(startup_program)
            output = executor.run(program=main_program,
148 149 150 151
                                  feed={
                                      'X': feed_x,
                                      'Y': feed_y
                                  },
152 153 154 155 156 157 158 159 160 161 162 163
                                  fetch_list=[avg_loss.name])

            main = fluid.Program()
            startup = fluid.Program()
            with fluid.program_guard(main, startup):
                loss = _build_static_model(main, startup)
                lamb = LAMBOptimizer(learning_rate=0.2)
                lamb.minimize(loss)

            exe = fluid.Executor(place)
            exe.run(startup)
            out = exe.run(program=main,
164 165 166 167
                          feed={
                              'X': feed_x,
                              'Y': feed_y
                          },
168 169
                          fetch_list=[loss.name])

170
            np.testing.assert_allclose(out, output, rtol=1e-05)
171 172


173
class TestLambOpV2Group(TestLambOpV2):
174

175 176 177 178 179 180 181
    def test_lamb_op(self):
        paddle.disable_static()
        value = np.arange(26).reshape(2, 13).astype("float32")
        a = paddle.to_tensor(value)
        linear_1 = paddle.nn.Linear(13, 5)
        linear_2 = paddle.nn.Linear(5, 3)
        # This can be any optimizer supported by dygraph.
182 183 184 185 186 187 188 189 190 191
        adam = paddle.optimizer.Lamb(learning_rate=0.01,
                                     parameters=[{
                                         'params': linear_1.parameters()
                                     }, {
                                         'params': linear_2.parameters(),
                                         'lamb_weight_decay': 0.001,
                                         'beta1': 0.9,
                                         'beta2': 0.99
                                     }],
                                     lamb_weight_decay=0.01)
192 193 194 195 196 197 198
        out = linear_1(a)
        out = linear_2(out)
        out.backward()
        adam.step()
        adam.clear_gradients()


199
class TestLambOpMultiPrecision(unittest.TestCase):
200

201 202 203 204 205 206
    def check_main(self, x_np, place, multi_precision=False, seed=10, n=10):
        main_prog = paddle.static.Program()
        startup_prog = paddle.static.Program()
        with paddle.static.program_guard(main_prog, startup_prog):
            paddle.seed(seed)
            with paddle.static.amp.fp16_guard():
207 208 209
                x = paddle.static.data(name='x',
                                       shape=[None, 10],
                                       dtype='float32')
210 211 212 213
                linear = paddle.nn.Linear(10, 2)
                hidden = linear(x)
                loss = paddle.mean(hidden)

214 215
            original_optimizer = paddle.optimizer.Lamb(learning_rate=1e-3)
            original_optimizer._multi_precision = multi_precision
216
            if multi_precision:
217 218 219
                optimizer = paddle.static.amp.decorate(original_optimizer,
                                                       use_pure_fp16=True,
                                                       use_fp16_guard=True)
220 221
            else:
                optimizer = original_optimizer
222 223 224 225 226 227 228 229 230
            optimizer.minimize(loss)

        weight, bias = linear.weight, linear.bias
        exe = paddle.static.Executor(place)
        scope = paddle.static.Scope()
        x = main_prog.global_block().var(x.name)
        if x.dtype == core.VarDesc.VarType.FP16:
            x_np = x_np.astype(np.float16)

231 232 233 234 235 236 237 238 239
        def get_parameter(var):
            name = var if isinstance(var, (str, bytes)) else var.name
            params = original_optimizer._get_parameter(name, scope)
            assert isinstance(params, (list, tuple))
            params = list(params)
            assert len(params) == 2
            if multi_precision:
                params[0] = np.array(params[0])
                params[1] = np.array(params[1])
240 241
                np.testing.assert_array_equal(params[0],
                                              params[1].astype(np.float16))
242 243 244 245 246 247 248
                return params[0].astype(np.float32)
            else:
                self.assertTrue(params[0] is not None)
                self.assertTrue(params[1] is None)
                params[0] = np.array(params[0])
                return params[0]

249 250 251 252
        with paddle.static.scope_guard(scope):
            exe.run(startup_prog)
            if multi_precision:
                optimizer.amp_init(place)
253

254 255 256 257 258 259
            weight_np, bias_np = None, None
            for i in range(n):
                feed_dict = {x.name: x_np}
                weight_np, bias_np = exe.run(main_prog,
                                             feed=feed_dict,
                                             fetch_list=[weight, bias])
260 261
                weight_np = weight_np.astype('float32')
                bias_np = bias_np.astype('float32')
262 263
                np.testing.assert_array_equal(weight_np, get_parameter(weight))
                np.testing.assert_array_equal(bias_np, get_parameter(bias))
264
            return weight_np, bias_np
265 266 267 268 269 270 271 272 273 274 275 276 277 278

    @switch_to_static_graph
    def test_main(self):
        if not paddle.is_compiled_with_cuda():
            return

        place = paddle.CUDAPlace(0)
        x_np = np.random.random(size=[5, 10]).astype('float32')
        weight_1, bias_1 = self.check_main(x_np, place, multi_precision=False)
        weight_2, bias_2 = self.check_main(x_np, place, multi_precision=True)
        self.assertTrue(np.all(np.abs(weight_1 - weight_2) < 1e-3))
        self.assertTrue(np.all(np.abs(bias_1 - bias_2) < 1e-7))


279 280
if __name__ == "__main__":
    unittest.main()