test_momentum_op.py 37.1 KB
Newer Older
1
#   Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
D
dzhwinter 已提交
2
#
D
dzhwinter 已提交
3 4 5
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
D
dzhwinter 已提交
6
#
D
dzhwinter 已提交
7
#     http://www.apache.org/licenses/LICENSE-2.0
D
dzhwinter 已提交
8
#
D
dzhwinter 已提交
9 10 11 12 13 14
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

15 16
from __future__ import print_function

S
sidgoyal78 已提交
17 18
import unittest
import numpy as np
19 20
import paddle.fluid.core as core
from paddle.fluid.op import Operator
21
from op_test import OpTest
J
Jiawei Wang 已提交
22 23
import paddle
import paddle.fluid as fluid
24
import numpy
25
from paddle.fluid.framework import _test_eager_guard
S
sidgoyal78 已提交
26 27


28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54
def calculate_momentum_by_numpy(param,
                                grad,
                                mu,
                                velocity,
                                use_nesterov,
                                learning_rate,
                                regularization_method=None,
                                regularization_coeff=1.0):
    if regularization_method == "l2_decay":
        grad = grad + regularization_coeff * param

        velocity_out = mu * velocity + grad
        if use_nesterov:
            param_out = param - (grad + velocity_out * mu) * learning_rate
        else:
            param_out = param - learning_rate * velocity_out
    else:
        velocity_out = mu * velocity + grad
        if use_nesterov:
            param_out = param - grad * learning_rate - \
                        velocity_out * mu * learning_rate
        else:
            param_out = param - learning_rate * velocity_out

    return param_out, velocity_out


K
kavyasrinet 已提交
55
class TestMomentumOp1(OpTest):
56

S
sidgoyal78 已提交
57 58
    def setUp(self):
        self.op_type = "momentum"
W
Wu Yi 已提交
59 60
        self.dtype = np.float32
        self.init_dtype()
S
sidgoyal78 已提交
61

W
Wu Yi 已提交
62 63 64
        param = np.random.random((123, 321)).astype(self.dtype)
        grad = np.random.random((123, 321)).astype(self.dtype)
        velocity = np.zeros((123, 321)).astype(self.dtype)
65
        learning_rate = np.array([0.001]).astype(np.float32)
S
sidgoyal78 已提交
66
        mu = 0.0001
K
kavyasrinet 已提交
67
        use_nesterov = False
S
sidgoyal78 已提交
68 69 70 71 72 73 74 75 76 77

        self.inputs = {
            'Param': param,
            'Grad': grad,
            'Velocity': velocity,
            'LearningRate': learning_rate
        }

        self.attrs = {'mu': mu}

78 79 80 81 82 83 84
        param_out, velocity_out = calculate_momentum_by_numpy(
            param=param,
            grad=grad,
            mu=mu,
            velocity=velocity,
            use_nesterov=use_nesterov,
            learning_rate=learning_rate)
K
kavyasrinet 已提交
85 86 87

        self.outputs = {'ParamOut': param_out, 'VelocityOut': velocity_out}

W
Wu Yi 已提交
88 89 90
    def init_dtype(self):
        pass

K
kavyasrinet 已提交
91 92 93 94
    def test_check_output(self):
        self.check_output()


W
Wu Yi 已提交
95
class TestMomentumOpFp16(TestMomentumOp1):
96

W
Wu Yi 已提交
97 98 99 100 101 102 103
    def init_dtype(self):
        self.dtype = np.float16

    def test_check_output(self):
        self.check_output(atol=1e-3)


K
kavyasrinet 已提交
104
class TestMomentumOp2(OpTest):
105
    '''Test Momentum with default values for attributes
K
kavyasrinet 已提交
106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124
    '''

    def setUp(self):
        self.op_type = "momentum"

        param = np.random.random((123, 321)).astype("float32")
        grad = np.random.random((123, 321)).astype("float32")
        velocity = np.zeros((123, 321)).astype("float32")
        learning_rate = np.array([0.001]).astype("float32")
        mu = 0.0001
        use_nesterov = True

        self.inputs = {
            'Param': param,
            'Grad': grad,
            'Velocity': velocity,
            'LearningRate': learning_rate
        }

125
        self.attrs = {'mu': mu, 'use_nesterov': use_nesterov}
K
kavyasrinet 已提交
126

127 128 129 130 131 132 133
        param_out, velocity_out = calculate_momentum_by_numpy(
            param=param,
            grad=grad,
            mu=mu,
            velocity=velocity,
            use_nesterov=use_nesterov,
            learning_rate=learning_rate)
S
sidgoyal78 已提交
134 135 136 137 138 139 140

        self.outputs = {'ParamOut': param_out, 'VelocityOut': velocity_out}

    def test_check_output(self):
        self.check_output()


141 142 143
@unittest.skipIf(not core.is_compiled_with_cuda(),
                 "core is not compiled with CUDA")
class TestLarsMomentumOpWithMP(OpTest):
144

145
    def setUp(self):
L
limingshu 已提交
146
        self.config()
147 148 149 150 151 152
        self.op_type = "lars_momentum"
        mu = 0.0001
        lars_coeff = 0.001
        lars_weight_decay = 0.0005
        rescale_grad = 1.0

L
limingshu 已提交
153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189
        params = []
        grads = []
        velocitys = []
        learning_rates = []
        master_params = []
        param_outs = []
        velocity_outs = []
        master_param_outs = []
        for i in range(self.params_num):
            master_param = np.random.random((123, 321)).astype("float32")
            param = master_param.astype("float16")
            grad = np.random.random((123, 321)).astype("float16")
            velocity = np.zeros((123, 321)).astype("float32")
            learning_rate = np.array([0.001]).astype("float32")

            fp32_grad = grad.astype("float32")
            pnorm = np.sqrt(np.square(master_param).sum())
            gnorm = np.sqrt(np.square(fp32_grad).sum())
            local_lr = learning_rate * lars_coeff * pnorm / (
                gnorm + lars_weight_decay * pnorm)
            fp32_grad = fp32_grad * rescale_grad
            velocity_out = mu * velocity + local_lr * (
                fp32_grad + lars_weight_decay * master_param)
            p_new = master_param - velocity_out
            param_out = p_new.astype("float16")
            master_param_out = p_new

            params.append(("SubParam_" + str(i), param))
            grads.append(("SubGrad_" + str(i), grad))
            velocitys.append(("SubVelocity_" + str(i), velocity))
            learning_rates.append(("SubLearning_rate_" + str(i), learning_rate))
            velocity_outs.append(("SubVelocity_out_" + str(i), velocity_out))
            param_outs.append(("SubParam_out_" + str(i), param_out))
            master_params.append(("SubMasterParam_" + str(i), master_param))
            master_param_outs.append(
                ("SubMasterParamOut_" + str(i), master_param_out))

190
        self.inputs = {
L
limingshu 已提交
191 192 193 194 195
            'Param': params,
            'Grad': grads,
            'Velocity': velocitys,
            'LearningRate': learning_rates,
            'MasterParam': master_params,
196 197 198 199 200
        }

        self.attrs = {
            'mu': mu,
            'lars_coeff': lars_coeff,
L
limingshu 已提交
201
            'lars_weight_decay': [lars_weight_decay],
202 203 204 205 206
            'multi_precision': True,
            'rescale_grad': rescale_grad
        }

        self.outputs = {
L
limingshu 已提交
207 208 209
            'ParamOut': param_outs,
            'VelocityOut': velocity_outs,
            'MasterParamOut': master_param_outs
210 211 212 213 214 215 216 217 218
        }

    def test_check_output(self):
        paddle.enable_static()
        if core.is_compiled_with_cuda():
            place = fluid.CUDAPlace(0)
            if core.is_float16_supported(place):
                self.check_output_with_place(place)

L
limingshu 已提交
219 220 221
    def config(self):
        self.params_num = 1

222

223
class TestLarsMomentumOp(OpTest):
224

225
    def setUp(self):
L
limingshu 已提交
226
        self.config()
227 228 229 230 231
        self.op_type = "lars_momentum"
        mu = 0.0001
        lars_coeff = 0.001
        lars_weight_decay = 0.0005

L
limingshu 已提交
232 233 234 235 236 237 238 239 240 241 242 243 244 245 246
        params = []
        grads = []
        velocitys = []
        param_outs = []
        velocity_outs = []
        learning_rates = []
        for i in range(self.params_num):
            param = np.random.random((123, 321)).astype("float32")
            grad = np.random.random((123, 321)).astype("float32")
            velocity = np.zeros((123, 321)).astype("float32")
            learning_rate = np.array([0.001]).astype("float32")
            pnorm = np.sqrt(np.square(param).sum())
            gnorm = np.sqrt(np.square(grad).sum())
            local_lr = learning_rate * lars_coeff * pnorm / (
                gnorm + lars_weight_decay * param)
247 248
            velocity_out = mu * velocity + local_lr * (
                grad + lars_weight_decay * param)
L
limingshu 已提交
249 250 251 252 253 254 255 256 257
            param_out = param - velocity_out

            params.append(("SubParam_" + str(i), param))
            grads.append(("SubGrad_" + str(i), grad))
            velocitys.append(("SubVelocity_" + str(i), velocity))
            learning_rates.append(("SubLearning_rate_" + str(i), learning_rate))
            velocity_outs.append(("SubVelocity_out_" + str(i), velocity_out))
            param_outs.append(("SubParam_out_" + str(i), param_out))

258
        self.inputs = {
L
limingshu 已提交
259 260 261 262
            'Param': params,
            'Grad': grads,
            'Velocity': velocitys,
            'LearningRate': learning_rates
263 264 265 266 267
        }

        self.attrs = {
            'mu': mu,
            'lars_coeff': lars_coeff,
L
limingshu 已提交
268
            'lars_weight_decay': [lars_weight_decay]
269
        }
L
limingshu 已提交
270
        self.outputs = {'ParamOut': param_outs, 'VelocityOut': velocity_outs}
271 272

    def test_check_output(self):
273
        paddle.enable_static()
274 275
        self.check_output()

L
limingshu 已提交
276 277 278
    def config(self):
        self.params_num = 1

279

280
class TestSparseMomentumOp(unittest.TestCase):
281

282 283
    def setUp(self):
        self.use_nesterov = False
284 285
        self.regularization_method = ""
        self.regularization_coeff = 1.0
286 287 288 289 290 291 292 293 294 295

    def check_with_place(self, place):
        self.init_kernel()
        scope = core.Scope()
        # create and initialize Grad Variable
        height = 10
        rows = [0, 4, 7]
        row_numel = 12
        mu = 1.0
        use_nesterov = self.use_nesterov
296 297
        regularization_method = self.regularization_method
        regularization_coeff = self.regularization_coeff
298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315

        # create and initialize Param Variable
        param = scope.var('Param').get_tensor()
        param_array = np.full((height, row_numel), 5.0).astype("float32")
        param.set(param_array, place)
        param_out = scope.var("ParamOut").get_tensor()
        param_out_array = np.full((height, row_numel), 0.0).astype("float32")
        param_out.set(param_out_array, place)

        grad_selected_rows = scope.var('Grad').get_selected_rows()
        grad_selected_rows.set_height(height)
        grad_selected_rows.set_rows(rows)
        grad_np_array = np.ones((len(rows), row_numel)).astype("float32")
        grad_np_array[0, 0] = 2.0
        grad_np_array[2, 8] = 4.0
        grad_tensor = grad_selected_rows.get_tensor()
        grad_tensor.set(grad_np_array, place)

D
dzhwinter 已提交
316 317 318 319 320
        velocity = scope.var('Velocity').get_tensor()
        velocity_np_array = np.ones((height, row_numel)).astype("float32")
        velocity.set(velocity_np_array, place)
        velocity_out = scope.var('VelocityOut').get_tensor()
        velocity_out_np_array = np.full((height, row_numel),
321
                                        0.0).astype("float32")
D
dzhwinter 已提交
322
        velocity_out.set(velocity_out_np_array, place)
323

324
        # create and initialize LearningRate Variable
325 326 327 328 329
        lr = scope.var('LearningRate').get_tensor()
        lr_array = np.full((1), 2.0).astype("float32")
        lr.set(lr_array, place)

        # create and run operator
330 331 332 333 334 335 336 337 338 339 340
        op = Operator("momentum",
                      Param='Param',
                      Grad='Grad',
                      Velocity='Velocity',
                      ParamOut='ParamOut',
                      VelocityOut='VelocityOut',
                      LearningRate='LearningRate',
                      mu=mu,
                      use_nesterov=use_nesterov,
                      regularization_method=regularization_method,
                      regularization_coeff=regularization_coeff)
341 342 343 344
        op.run(scope, place)

        # get and compare result
        param_out_np_array = np.array(param_out)
D
dzhwinter 已提交
345
        velocity_out_np_array = np.array(velocity_out)
346 347 348

        # TODO(dzh): add a more suitable general numpy interface
        # for sparse update.
D
dzhwinter 已提交
349 350 351
        _grad_np_array = np.full((height, row_numel), 0.0).astype("float32")
        for i in range(len(rows)):
            _grad_np_array[rows[i]] = grad_np_array[i]
352

D
dzhwinter 已提交
353
        _param = param_array
354 355 356 357 358 359 360 361 362 363 364

        _param_out, _velocity_out = calculate_momentum_by_numpy(
            param=_param,
            grad=_grad_np_array,
            mu=mu,
            velocity=velocity_np_array,
            use_nesterov=use_nesterov,
            learning_rate=lr_array,
            regularization_method=regularization_method,
            regularization_coeff=regularization_coeff)

365
        self.assertTrue((_velocity_out == velocity_out_np_array).all())
D
dzhwinter 已提交
366
        self.assertTrue((_param_out == param_out_np_array).all())
367 368 369 370 371 372 373 374 375 376 377 378 379

    def init_kernel(self):
        pass

    def test_sparse_momentum(self):
        places = [core.CPUPlace()]
        if core.is_compiled_with_cuda():
            places.append(core.CUDAPlace(0))
        for place in places:
            self.check_with_place(place)


class TestSparseMomentumOp2(TestSparseMomentumOp):
380

381 382 383 384
    def init_kernel(self):
        self.use_nesterov = True


385
class TestSparseMomentumOpWithMultiPrecision(unittest.TestCase):
386

387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439
    def setUp(self):
        self.init_args()
        self.regularization_method = ""
        self.regularization_coeff = 1.0

    def check_with_place(self, place):
        scope = core.Scope()
        # create and initialize Grad Variable
        height = 10
        rows = [0, 4, 7]
        row_numel = 12
        mu = 1.0
        use_nesterov = self.use_nesterov
        regularization_method = self.regularization_method
        regularization_coeff = self.regularization_coeff

        # create and initialize Param Variable
        param_array = np.full((height, row_numel), 5.0).astype("float32")
        param_out_array = np.full((height, row_numel), 0.0).astype("float32")

        param = scope.var('Param').get_tensor()
        param.set(param_array.astype("float16"), place)
        param_out = scope.var("ParamOut").get_tensor()
        param_out.set(param_out_array.astype("float16"), place)

        master_param = scope.var('MasterParam').get_tensor()
        master_param.set(param_array, place)
        master_param_out = scope.var("MasterParamOut").get_tensor()
        master_param_out.set(param_out_array, place)

        grad_selected_rows = scope.var('Grad').get_selected_rows()
        grad_selected_rows.set_height(height)
        grad_selected_rows.set_rows(rows)
        grad_np_array = np.ones((len(rows), row_numel)).astype("float32")
        grad_np_array[0, 0] = 2.0
        grad_np_array[2, 8] = 4.0
        grad_tensor = grad_selected_rows.get_tensor()
        grad_tensor.set(grad_np_array.astype("float16"), place)

        velocity = scope.var('Velocity').get_tensor()
        velocity_np_array = np.ones((height, row_numel)).astype("float32")
        velocity.set(velocity_np_array, place)
        velocity_out = scope.var('VelocityOut').get_tensor()
        velocity_out_np_array = np.full((height, row_numel),
                                        0.0).astype("float32")
        velocity_out.set(velocity_out_np_array, place)

        # create and initialize LearningRate Variable
        lr = scope.var('LearningRate').get_tensor()
        lr_array = np.full((1), 2.0).astype("float32")
        lr.set(lr_array, place)

        # create and run operator
440 441 442 443 444 445 446 447 448 449 450 451 452 453 454
        op = Operator("momentum",
                      Param='Param',
                      Grad='Grad',
                      Velocity='Velocity',
                      MasterParam='MasterParam',
                      ParamOut='ParamOut',
                      VelocityOut='VelocityOut',
                      MasterParamOut='MasterParamOut',
                      LearningRate='LearningRate',
                      mu=mu,
                      use_nesterov=use_nesterov,
                      regularization_method=regularization_method,
                      regularization_coeff=regularization_coeff,
                      multi_precision=True,
                      rescale_grad=1.0)
455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489
        op.run(scope, place)

        # get and compare result
        param_out_np_array = np.array(param_out)
        velocity_out_np_array = np.array(velocity_out)

        _grad_np_array = np.full((height, row_numel), 0.0).astype("float32")
        for i in range(len(rows)):
            _grad_np_array[rows[i]] = grad_np_array[i]

        _param = param_array

        _param_out, _velocity_out = calculate_momentum_by_numpy(
            param=_param,
            grad=_grad_np_array,
            mu=mu,
            velocity=velocity_np_array,
            use_nesterov=use_nesterov,
            learning_rate=lr_array,
            regularization_method=regularization_method,
            regularization_coeff=regularization_coeff)

        self.assertTrue((_velocity_out == velocity_out_np_array).all())
        self.assertTrue((_param_out == param_out_np_array).all())

    def init_args(self):
        self.use_nesterov = False

    def test_sparse_momentum(self):
        if core.is_compiled_with_cuda():
            self.check_with_place(fluid.CUDAPlace(0))


class TestSparseMomentumOpWithMultiPrecision2(
        TestSparseMomentumOpWithMultiPrecision):
490

491 492 493 494
    def init_args(self):
        self.use_nesterov = True


J
Jiawei Wang 已提交
495
class TestMomentumV2(unittest.TestCase):
496

J
Jiawei Wang 已提交
497 498 499 500 501 502
    def test_momentum_dygraph(self):
        paddle.disable_static()
        value = np.arange(26).reshape(2, 13).astype("float32")
        a = paddle.to_tensor(value)
        linear = paddle.nn.Linear(13, 5)
        # This can be any optimizer supported by dygraph.
503 504 505
        adam = paddle.optimizer.Momentum(learning_rate=0.01,
                                         momentum=0.9,
                                         parameters=linear.parameters())
J
Jiawei Wang 已提交
506 507 508 509 510 511
        out = linear(a)
        out.backward()
        adam.step()
        adam.clear_gradients()

    def test_momentum(self):
512
        paddle.enable_static()
J
Jiawei Wang 已提交
513 514 515 516 517 518 519
        place = fluid.CPUPlace()
        main = fluid.Program()
        with fluid.program_guard(main):
            x = fluid.layers.data(name='x', shape=[13], dtype='float32')
            y = fluid.layers.data(name='y', shape=[1], dtype='float32')
            y_predict = fluid.layers.fc(input=x, size=1, act=None)
            cost = fluid.layers.square_error_cost(input=y_predict, label=y)
520
            avg_cost = paddle.mean(cost)
J
Jiawei Wang 已提交
521

522 523
            rms_optimizer = paddle.optimizer.Momentum(learning_rate=0.1,
                                                      momentum=0.9)
J
Jiawei Wang 已提交
524 525 526
            rms_optimizer.minimize(avg_cost)

            fetch_list = [avg_cost]
527 528
            train_reader = paddle.batch(paddle.dataset.uci_housing.train(),
                                        batch_size=1)
J
Jiawei Wang 已提交
529 530 531 532 533 534 535
            feeder = fluid.DataFeeder(place=place, feed_list=[x, y])
            exe = fluid.Executor(place)
            exe.run(fluid.default_startup_program())
            for data in train_reader():
                exe.run(main, feed=feeder.feed(data), fetch_list=fetch_list)

    def test_raise_error(self):
536 537 538
        self.assertRaises(ValueError,
                          paddle.optimizer.Momentum,
                          learning_rate=None)
J
Jiawei Wang 已提交
539 540
        self.assertRaises(ValueError, paddle.optimizer.Momentum, momentum=None)

541 542 543 544 545
    def test_api_eager_dygraph(self):
        with _test_eager_guard():
            self.test_momentum_dygraph()
            self.test_raise_error()

J
Jiawei Wang 已提交
546

547
class TestMomentumOpWithDecay(OpTest):
548

549 550 551 552 553 554 555 556 557 558 559
    def setUp(self):
        self.op_type = "momentum"
        self.dtype = np.float32
        self.use_nesterov = True
        self.regularization_method = 'l2_decay'
        self.regularization_coeff = 0.9
        self.init_config()

        param = np.random.random((123, 321)).astype(self.dtype)
        grad = np.random.random((123, 321)).astype(self.dtype)
        velocity = np.zeros((123, 321)).astype(self.dtype)
560
        learning_rate = np.array([0.001]).astype(np.float32)
561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600
        mu = 0.0001
        use_nesterov = self.use_nesterov
        regularization_method = self.regularization_method
        regularization_coeff = self.regularization_coeff

        self.inputs = {
            'Param': param,
            'Grad': grad,
            'Velocity': velocity,
            'LearningRate': learning_rate
        }

        self.attrs = {
            'mu': mu,
            'use_nesterov': use_nesterov,
            'regularization_method': regularization_method,
            'regularization_coeff': regularization_coeff
        }

        grad = grad + regularization_coeff * param

        param_out, velocity_out = calculate_momentum_by_numpy(
            param=param,
            grad=grad,
            mu=mu,
            velocity=velocity,
            use_nesterov=use_nesterov,
            learning_rate=learning_rate)

        self.outputs = {'ParamOut': param_out, 'VelocityOut': velocity_out}

    def init_config(self):
        pass

    def test_check_output(self):
        paddle.enable_static()
        self.check_output()


class TestMomentumOpWithDecayFP16(TestMomentumOpWithDecay):
601

602 603 604 605 606 607 608 609 610
    def init_config(self):
        self.dtype = np.float16

    def test_check_output(self):
        paddle.enable_static()
        self.check_output(atol=1e-3)


class TestMomentumOpWithDecay2(TestMomentumOpWithDecay):
611

612 613 614 615 616
    def init_config(self):
        self.use_nesterov = False


class TestSparseMomentumOpWithDecay(TestSparseMomentumOp):
617

618 619 620 621 622 623 624
    def setUp(self):
        self.use_nesterov = False
        self.regularization_method = 'l2_decay'
        self.regularization_coeff = 0.9


class TestSparseMomentumOpWithDecay2(TestSparseMomentumOpWithDecay):
625

626 627 628 629 630
    def init_kernel(self):
        self.use_nesterov = True


class TestMomentumOpWithDecayAPI(unittest.TestCase):
631

632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660
    def _test_momentum_dygraph_common(self, regularization):
        paddle.disable_static()
        inp = np.random.uniform(-0.1, 0.1, [10, 10]).astype("float32")
        linear = paddle.nn.Linear(10, 10)
        inp = paddle.to_tensor(inp)
        out = linear(inp)
        loss = paddle.mean(out)
        # This can be any optimizer supported by dygraph.
        momentum = paddle.fluid.contrib.optimizer.Momentum(
            learning_rate=0.01,
            momentum=0.9,
            parameter_list=linear.parameters(),
            regularization=regularization)
        momentum.minimize(loss)

    def test_momentum_dygraph_1(self):
        self._test_momentum_dygraph_common(
            regularization=paddle.fluid.regularizer.L2Decay(
                regularization_coeff=0.1))

    def test_momentum_static(self):
        paddle.enable_static()
        place = fluid.CPUPlace()
        main = fluid.Program()
        with fluid.program_guard(main):
            x = fluid.layers.data(name='x', shape=[13], dtype='float32')
            y = fluid.layers.data(name='y', shape=[1], dtype='float32')
            y_predict = fluid.layers.fc(input=x, size=1, act=None)
            cost = fluid.layers.square_error_cost(input=y_predict, label=y)
661
            avg_cost = paddle.mean(cost)
662 663 664 665 666 667

            momentum_optimizer = paddle.fluid.contrib.optimizer.Momentum(
                learning_rate=0.1, momentum=0.9)
            momentum_optimizer.minimize(avg_cost)

            fetch_list = [avg_cost]
668 669
            train_reader = paddle.batch(paddle.dataset.uci_housing.train(),
                                        batch_size=1)
670 671 672 673 674 675 676
            feeder = fluid.DataFeeder(place=place, feed_list=[x, y])
            exe = fluid.Executor(place)
            exe.run(fluid.default_startup_program())
            for data in train_reader():
                exe.run(main, feed=feeder.feed(data), fetch_list=fetch_list)


677
class TestFusedMomentumWithDecayAPI(unittest.TestCase):
678

679 680 681
    def get_program(self, weight_attr, bias_attr=False):
        main_program = paddle.static.Program()
        startup_program = paddle.static.Program()
682 683
        with paddle.static.program_guard(main_program=main_program,
                                         startup_program=startup_program):
684
            x = paddle.static.data(name='x', shape=[10, 10])
685 686 687 688
            linear = paddle.nn.Linear(10,
                                      10,
                                      weight_attr=weight_attr,
                                      bias_attr=bias_attr)
689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730
            out = linear(x)
            loss = paddle.mean(out)
            optimizer = paddle.optimizer.Momentum(
                learning_rate=0.01,
                momentum=0.9,
                weight_decay=paddle.regularizer.L2Decay(0.5))
            optimizer.minimize(loss)
        return main_program

    def test_param_has_l2decay(self):
        paddle.enable_static()
        weight_attr = paddle.ParamAttr(
            name="weight",
            initializer=paddle.nn.initializer.Constant(value=0.5),
            regularizer=paddle.regularizer.L2Decay(0.1))
        program = self.get_program(weight_attr, bias_attr=False)
        ops = program.global_block().ops

        self.assertEqual(ops[-1].attr('regularization_method'), 'l2_decay')
        self.assertEqual(ops[-1].attr('regularization_coeff'), np.float32(0.1))
        for i in range(len(ops)):
            self.assertTrue('sum' not in ops[i].type)
            self.assertTrue('scale' not in ops[i].type)

    def test_param_has_l1decay(self):
        paddle.enable_static()
        weight_attr = paddle.ParamAttr(
            name="weight",
            initializer=paddle.nn.initializer.Constant(value=0.5),
            regularizer=paddle.regularizer.L1Decay(0.1))
        bias_attr = paddle.ParamAttr(
            name="bias",
            initializer=paddle.nn.initializer.Constant(value=0.),
            regularizer=None)
        program = self.get_program(weight_attr, bias_attr)
        ops = program.global_block().ops

        self.assertEqual(ops[-1].type, 'momentum')
        self.assertEqual(ops[-2].type, 'momentum')
        self.assertEqual(ops[-3].type, 'sum')
        self.assertEqual(ops[-4].type, 'scale')
        self.assertEqual(ops[-5].type, 'sign')
731
        self.assertEqual(ops[-6].type, 'matmul_v2_grad')
732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750
        if 'weight' in ops[-1].input('Param'):
            self.assertEqual(ops[-1].attr('regularization_method'), '')
            self.assertEqual(ops[-1].attr('regularization_coeff'), 0)
        if 'bias' in ops[-2].input('Param'):
            self.assertEqual(ops[-2].attr('regularization_method'), 'l2_decay')
            self.assertEqual(ops[-2].attr('regularization_coeff'),
                             np.float32(0.5))

    def test_param_has_no_regularizer(self):
        paddle.enable_static()
        program = self.get_program(weight_attr=None)
        ops = program.global_block().ops
        self.assertEqual(ops[-1].attr('regularization_method'), 'l2_decay')
        self.assertEqual(ops[-1].attr('regularization_coeff'), np.float32(0.5))
        for i in range(len(ops)):
            self.assertTrue('sum' not in ops[i].type)
            self.assertTrue('scale' not in ops[i].type)


751
class TestMomentumOpVsMomentumOpWithDecayAPI(unittest.TestCase):
752

753 754
    def __update_params(self, momentum, linear):
        for i in range(10):
755 756
            inp = paddle.full(shape=[2, 2], fill_value=i,
                              dtype='float32').astype("float32")
757 758 759 760 761
            inp = paddle.to_tensor(inp)
            out = linear(inp)
            loss = paddle.mean(out)
            loss.backward()
            momentum.minimize(loss)
762
            linear.clear_gradients()
763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806

    def __test_vs(self, place=fluid.CPUPlace()):
        paddle.disable_static(place=place)

        linear_old = paddle.nn.Linear(
            2,
            2,
            weight_attr=paddle.nn.initializer.Constant(value=2.0),
            bias_attr=paddle.nn.initializer.Constant(value=2.0))
        momentum_old = paddle.fluid.optimizer.Momentum(
            learning_rate=0.01,
            momentum=0.9,
            parameter_list=linear_old.parameters(),
            regularization=paddle.fluid.regularizer.L2Decay(
                regularization_coeff=0.1))
        self.__update_params(momentum=momentum_old, linear=linear_old)

        linear_new = paddle.nn.Linear(
            2,
            2,
            weight_attr=paddle.nn.initializer.Constant(value=2.0),
            bias_attr=paddle.nn.initializer.Constant(value=2.0))
        momentum_new = paddle.fluid.contrib.optimizer.Momentum(
            learning_rate=0.01,
            momentum=0.9,
            parameter_list=linear_new.parameters(),
            regularization=paddle.fluid.regularizer.L2Decay(
                regularization_coeff=0.1))
        self.__update_params(momentum=momentum_new, linear=linear_new)

        self.assertEqual(
            (linear_old.weight.numpy() == linear_new.weight.numpy()).all(),
            True,
            'the param weight updated by two Momentum optimizers should equal')

    def test_vs(self, place=fluid.CPUPlace()):
        places = [fluid.CPUPlace()]
        if paddle.fluid.core.is_compiled_with_cuda():
            places.append(fluid.CUDAPlace(0))

        for place in places:
            self.__test_vs(place=place)


807
class TestMomentumV2Group(TestMomentumV2):
808

809 810 811 812 813 814 815
    def test_momentum_dygraph(self):
        paddle.disable_static()
        value = np.arange(26).reshape(2, 13).astype("float32")
        a = paddle.to_tensor(value)
        linear_1 = paddle.nn.Linear(13, 5)
        linear_2 = paddle.nn.Linear(5, 3)
        # This can be any optimizer supported by dygraph.
816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831
        adam = paddle.optimizer.Momentum(learning_rate=0.01,
                                         parameters=[{
                                             'params':
                                             linear_1.parameters()
                                         }, {
                                             'params':
                                             linear_2.parameters(),
                                             'weight_decay':
                                             0.001,
                                             'learning_rate':
                                             0.1,
                                             'momentum':
                                             0.99
                                         }],
                                         weight_decay=0.1,
                                         momentum=0.9)
832 833 834 835 836 837 838
        out = linear_1(a)
        out = linear_2(out)
        out.backward()
        adam.step()
        adam.clear_gradients()


839
class TestMultiTensorMomentumDygraph(unittest.TestCase):
840

841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905
    def _momentum_optimize_dygraph(self,
                                   place,
                                   use_param_attr=False,
                                   use_param_group=False,
                                   use_amp=False,
                                   use_multi_tensor=False):
        paddle.disable_static()
        paddle.seed(10)
        paddle.set_device(place)
        input = paddle.randn((5, 5))
        weight_attr = paddle.ParamAttr(
            learning_rate=0.5,
            regularizer=paddle.regularizer.L2Decay(1.0),
            trainable=True)
        if use_param_attr:
            model = paddle.nn.Linear(5, 5, weight_attr)
        else:
            model = paddle.nn.Linear(5, 5)
        if not use_param_group:
            optimizer = paddle.optimizer.Momentum(
                parameters=model.parameters(),
                use_multi_tensor=use_multi_tensor,
                multi_precision=use_amp)
        else:
            optimizer = paddle.optimizer.Momentum(
                parameters=[{
                    'params': model.parameters(),
                    'weight_decay': 0.001,
                    'learning_rate': 0.1,
                    'momentum': 0.99
                }],
                use_multi_tensor=use_multi_tensor,
                multi_precision=use_amp)
        for idx in range(5):
            if place == 'gpu' and use_amp == True:
                model = paddle.amp.decorate(models=model, level='O2')
                scaler = paddle.amp.GradScaler(init_loss_scaling=1024)
            if place == 'gpu' and use_amp == True:
                with paddle.amp.auto_cast(level='O2'):
                    output = model(input)
                    loss = paddle.mean(output)
                scaled = scaler.scale(loss)
                scaled.backward()
                scaler.step(optimizer)
                optimizer.clear_grad(set_to_zero=False)
            else:
                output = model(input)
                loss = paddle.mean(output)
                # This can be any optimizer supported by dygraph.
                loss.backward()
                optimizer.step()
                optimizer.clear_grad(set_to_zero=False)
        return output, model.parameters()

    def _get_places(self):
        places = ['cpu']
        if paddle.is_compiled_with_cuda():
            places.append('gpu')
        return places

    def _check_with_place_amp(self, place, use_amp):
        output1, params1 = self._momentum_optimize_dygraph(
            place=place, use_amp=use_amp, use_multi_tensor=True)
        output2, params2 = self._momentum_optimize_dygraph(
            place=place, use_amp=use_amp, use_multi_tensor=False)
H
hong 已提交
906

907
        np.testing.assert_allclose(output1, output2, rtol=1e-05)
908
        for idx in range(len(params1)):
909
            np.testing.assert_allclose(params1[idx], params2[idx], rtol=1e-05)
910 911 912 913 914 915 916 917 918 919 920 921

    def _check_with_param_arrt(self, place, use_amp):
        output1, params1 = self._momentum_optimize_dygraph(
            place=place,
            use_amp=use_amp,
            use_param_attr=True,
            use_multi_tensor=True)
        output2, params2 = self._momentum_optimize_dygraph(
            place=place,
            use_amp=use_amp,
            use_param_attr=True,
            use_multi_tensor=False)
922
        np.testing.assert_allclose(output1, output2, rtol=1e-05)
923
        for idx in range(len(params1)):
924
            np.testing.assert_allclose(params1[idx], params2[idx], rtol=1e-05)
925 926 927 928 929 930 931 932 933 934 935 936

    def _check_with_param_group(self, place, use_amp):
        output1, params1 = self._momentum_optimize_dygraph(
            place=place,
            use_amp=use_amp,
            use_param_group=True,
            use_multi_tensor=True)
        output2, params2 = self._momentum_optimize_dygraph(
            place=place,
            use_amp=use_amp,
            use_param_group=True,
            use_multi_tensor=False)
937
        np.testing.assert_allclose(output1, output2, rtol=1e-05)
938
        for idx in range(len(params1)):
939
            np.testing.assert_allclose(params1[idx], params2[idx], rtol=1e-05)
940 941 942 943 944 945 946 947 948

    def test_main(self):
        for place in self._get_places():
            use_amp_list = [True, False]
            for use_amp in use_amp_list:
                self._check_with_place_amp(place, use_amp)
                self._check_with_param_arrt(place, use_amp)
                self._check_with_param_group(place, use_amp)

949 950 951 952
    def test_api_eager_dygraph(self):
        with _test_eager_guard():
            self.test_main()

953 954

class TestMultiTensorMomentumStatic(unittest.TestCase):
955

956 957 958 959 960 961 962 963 964 965 966 967
    def _momentum_optimize_static(self,
                                  place,
                                  use_amp=False,
                                  use_multi_tensor=False):
        paddle.enable_static()
        paddle.seed(10)
        np.random.seed(10)
        if place == 'cpu':
            use_amp = False
        exe = paddle.static.Executor(place=place)
        train_program = paddle.static.Program()
        startup_program = paddle.static.Program()
968 969
        optimizer = paddle.optimizer.Momentum(multi_precision=use_amp,
                                              use_multi_tensor=use_multi_tensor)
970 971 972 973 974 975 976 977 978
        if use_amp:
            optimizer = paddle.static.amp.decorate(
                optimizer,
                init_loss_scaling=128.0,
                use_dynamic_loss_scaling=True,
                use_pure_fp16=True,
                use_fp16_guard=False)
        with paddle.static.program_guard(train_program, startup_program):
            if use_amp:
979 980 981
                data = paddle.static.data(shape=[2, 2],
                                          name='X',
                                          dtype='float16')
982
            else:
983 984 985
                data = paddle.static.data(shape=[2, 2],
                                          name='X',
                                          dtype='float32')
986
            hidden = paddle.static.nn.fc(x=data, size=10)
987
            loss = paddle.mean(hidden)
988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009
            optimizer.minimize(loss)
        exe.run(startup_program)
        if use_amp:
            optimizer.amp_init(place=place, scope=paddle.static.global_scope())
            x = numpy.random.random(size=(2, 2)).astype('float16')
        else:
            x = numpy.random.random(size=(2, 2)).astype('float32')
        out = []
        for idx in range(5):
            loss_data, = exe.run(train_program,
                                 feed={"X": x},
                                 fetch_list=[loss.name])
            out.append(loss_data)
        return out

    def _get_places(self):
        places = ['cpu']
        if paddle.is_compiled_with_cuda():
            places.append('gpu')
        return places

    def _check_with_place_amp(self, place, use_amp):
1010 1011 1012 1013 1014 1015
        output1 = self._momentum_optimize_static(place=place,
                                                 use_amp=use_amp,
                                                 use_multi_tensor=True)
        output2 = self._momentum_optimize_static(place=place,
                                                 use_amp=use_amp,
                                                 use_multi_tensor=False)
1016
        for idx in range(len(output1)):
1017
            np.testing.assert_allclose(output1[idx], output2[idx], rtol=1e-05)
1018 1019 1020 1021 1022 1023 1024 1025

    def test_main(self):
        for place in self._get_places():
            use_amp_list = [True, False]
            for use_amp in use_amp_list:
                self._check_with_place_amp(place, use_amp)


S
sidgoyal78 已提交
1026
if __name__ == "__main__":
H
hong 已提交
1027
    paddle.enable_static()
S
sidgoyal78 已提交
1028
    unittest.main()