test_optimizer.py 53.0 KB
Newer Older
1
#   Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
D
dzhwinter 已提交
2
#
D
dzhwinter 已提交
3 4 5
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
D
dzhwinter 已提交
6
#
D
dzhwinter 已提交
7
#     http://www.apache.org/licenses/LICENSE-2.0
D
dzhwinter 已提交
8
#
D
dzhwinter 已提交
9 10 11 12 13 14
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

15 16
import os
import tempfile
Q
Qiao Longfei 已提交
17 18
import unittest

19 20 21 22
import numpy
import numpy as np

import paddle
23
import paddle.fluid as fluid
24
import paddle.fluid.core as core
25 26 27
import paddle.fluid.framework as framework
import paddle.fluid.optimizer as optimizer
from paddle.fluid.backward import append_backward
28 29
from paddle.fluid.framework import (
    Program,
30
    _test_eager_guard,
31
    convert_np_dtype_to_dtype_,
32
    program_guard,
33
)
34
from paddle.io import Dataset
35

Q
Qiao Longfei 已提交
36 37 38

class TestOptimizer(unittest.TestCase):
    def test_sgd_optimizer(self):
Q
qiaolongfei 已提交
39 40 41 42
        def check_sgd_optimizer(optimizer_attr):
            init_program = framework.Program()
            program = framework.Program()
            block = program.global_block()
43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67
            mul_x = block.create_parameter(
                dtype="float32",
                shape=[5, 10],
                lod_level=0,
                name="mul.x",
                optimize_attr=optimizer_attr,
            )
            mul_y = block.create_var(
                dtype="float32", shape=[10, 8], lod_level=0, name="mul.y"
            )
            mul_out = block.create_var(
                dtype="float32", shape=[5, 8], lod_level=0, name="mul.out"
            )
            mean_out = block.create_var(
                dtype="float32", shape=[1], lod_level=0, name="mean.out"
            )
            block.append_op(
                type="mul",
                inputs={"X": mul_x, "Y": mul_y},
                outputs={"Out": mul_out},
                attrs={"x_num_col_dims": 1},
            )
            block.append_op(
                type="mean", inputs={"X": mul_out}, outputs={"Out": mean_out}
            )
Q
qiaolongfei 已提交
68 69 70 71 72
            sgd_optimizer = optimizer.SGDOptimizer(learning_rate=0.01)
            opts, _ = sgd_optimizer.minimize(mean_out, init_program)
            return opts

        opts = check_sgd_optimizer({'learning_rate': 1.1})
73 74
        self.assertEqual(len(opts), 2)
        self.assertEqual([op.type for op in opts], ["scale", "sgd"])
Q
Qiao Longfei 已提交
75

Q
qiaolongfei 已提交
76 77 78 79
        opts = check_sgd_optimizer({'learning_rate': 1.0})
        self.assertEqual(len(opts), 1)
        self.assertEqual([op.type for op in opts], ["sgd"])

Q
Qiao Longfei 已提交
80

81 82 83 84 85 86
class TestOptimizerBackwardApplygrad(unittest.TestCase):
    def test_sgd_optimizer(self):
        def check_sgd_optimizer(optimizer_attr):
            init_program = framework.Program()
            program = framework.Program()
            block = program.global_block()
87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111
            mul_x = block.create_parameter(
                dtype="float32",
                shape=[5, 10],
                lod_level=0,
                name="mul.x",
                optimize_attr=optimizer_attr,
            )
            mul_y = block.create_var(
                dtype="float32", shape=[10, 8], lod_level=0, name="mul.y"
            )
            mul_out = block.create_var(
                dtype="float32", shape=[5, 8], lod_level=0, name="mul.out"
            )
            mean_out = block.create_var(
                dtype="float32", shape=[1], lod_level=0, name="mean.out"
            )
            block.append_op(
                type="mul",
                inputs={"X": mul_x, "Y": mul_y},
                outputs={"Out": mul_out},
                attrs={"x_num_col_dims": 1},
            )
            block.append_op(
                type="mean", inputs={"X": mul_out}, outputs={"Out": mean_out}
            )
112 113 114 115 116 117 118
            sgd_optimizer = optimizer.SGDOptimizer(learning_rate=0.01)
            with framework.program_guard(program, init_program):
                p_g = sgd_optimizer.backward(mean_out)
                opts = sgd_optimizer.apply_gradients(p_g)
            return opts

        opts = check_sgd_optimizer({'learning_rate': 1.1})
119 120
        self.assertEqual(len(opts), 2)
        self.assertEqual([op.type for op in opts], ["scale", "sgd"])
121 122 123 124 125 126

        opts = check_sgd_optimizer({'learning_rate': 1.0})
        self.assertEqual(len(opts), 1)
        self.assertEqual([op.type for op in opts], ["sgd"])


127 128 129 130 131 132 133 134
class TestMomentumOptimizer(unittest.TestCase):
    class MockMomentum(optimizer.MomentumOptimizer):
        def get_accumulators(self):
            return self._accumulators

        def get_velocity_str(self):
            return self._velocity_acc_str

135
    def test_vanilla_momentum_optimizer(self):
Q
Qiao Longfei 已提交
136
        init_program = framework.Program()
137 138
        program = framework.Program()
        block = program.global_block()
139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157
        mul_x = block.create_parameter(
            dtype="float32",
            shape=[5, 10],
            lod_level=0,
            name="mul.x",
            optimize_attr={'learning_rate': 1.1},
        )
        mul_y = block.create_var(
            dtype="float32", shape=[10, 8], lod_level=0, name="mul.y"
        )
        mul_out = block.create_var(
            dtype="float32", shape=[5, 8], lod_level=0, name="mul.out"
        )
        block.append_op(
            type="mul",
            inputs={"X": mul_x, "Y": mul_y},
            outputs={"Out": mul_out},
            attrs={"x_num_col_dims": 1},
        )
Q
Qiao Longfei 已提交
158
        learning_rate = 0.01
159 160 161 162 163 164 165 166 167
        momentum_optimizer = self.MockMomentum(
            learning_rate=learning_rate, momentum=0.2
        )
        mean_out = block.create_var(
            dtype="float32", shape=[1], lod_level=0, name="mean.out"
        )
        block.append_op(
            type="mean", inputs={"X": mul_out}, outputs={"Out": mean_out}
        )
F
fengjiayi 已提交
168
        params_grads = append_backward(mean_out)
169 170
        self.assertEqual(len(params_grads), 1)
        self.assertEqual(len(momentum_optimizer.get_accumulators()), 0)
171 172
        with framework.program_guard(program, init_program):
            opts = momentum_optimizer.apply_gradients(params_grads)
173
        self.assertEqual(len(opts), 2)
Y
Yancey1989 已提交
174
        sgd_op = opts[-1]
175
        self.assertEqual([op.type for op in opts], ["scale", "momentum"])
176
        self.assertFalse(sgd_op.attr('use_nesterov'))
177 178 179 180 181 182 183 184 185

        # Check accumulators
        accumulators = momentum_optimizer.get_accumulators()
        self.assertEqual(len(accumulators), 1)
        self.assertTrue(momentum_optimizer.get_velocity_str() in accumulators)
        velocity_acc = accumulators[momentum_optimizer.get_velocity_str()]
        self.assertEqual(len(velocity_acc), 1)
        self.assertTrue(mul_x.name in velocity_acc)

Q
Qiao Longfei 已提交
186 187 188 189
        # Check init_program
        init_ops = init_program.global_block().ops
        self.assertEqual(len(init_ops), 2)
        self.assertEqual(init_ops[1].type, "fill_constant")
190 191 192
        self.assertAlmostEqual(init_ops[1].attr('value'), learning_rate)
        self.assertEqual(init_ops[0].type, "fill_constant")
        self.assertAlmostEqual(init_ops[0].attr('value'), 0.0)
Q
Qiao Longfei 已提交
193

194
    def test_nesterov_momentum_optimizer(self):
Q
Qiao Longfei 已提交
195
        init_program = framework.Program()
196 197
        program = framework.Program()
        block = program.global_block()
198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222
        mul_x = block.create_parameter(
            dtype="float32",
            shape=[5, 10],
            lod_level=0,
            name="mul.x",
            optimize_attr={'learning_rate': 1.1},
        )
        mul_y = block.create_var(
            dtype="float32", shape=[10, 8], lod_level=0, name="mul.y"
        )
        mul_out = block.create_var(
            dtype="float32", shape=[5, 8], lod_level=0, name="mul.out"
        )
        block.append_op(
            type="mul",
            inputs={"X": mul_x, "Y": mul_y},
            outputs={"Out": mul_out},
            attrs={"x_num_col_dims": 1},
        )
        mean_out = block.create_var(
            dtype="float32", shape=[1], lod_level=0, name="mean.out"
        )
        block.append_op(
            type="mean", inputs={"X": mul_out}, outputs={"Out": mean_out}
        )
Q
Qiao Longfei 已提交
223
        learning_rate = 0.01
224 225 226
        momentum_optimizer = self.MockMomentum(
            learning_rate=learning_rate, momentum=0.2, use_nesterov=True
        )
F
fengjiayi 已提交
227
        params_grads = append_backward(mean_out)
228 229
        self.assertEqual(len(params_grads), 1)
        self.assertEqual(len(momentum_optimizer.get_accumulators()), 0)
230 231
        with framework.program_guard(program, init_program):
            opts = momentum_optimizer.apply_gradients(params_grads)
232
        self.assertEqual(len(opts), 2)
Y
Yancey1989 已提交
233
        sgd_op = opts[-1]
234
        self.assertEqual([op.type for op in opts], ["scale", "momentum"])
235
        self.assertTrue(sgd_op.attr('use_nesterov'))
236 237 238 239 240 241 242 243 244

        # Check accumulators
        accumulators = momentum_optimizer.get_accumulators()
        self.assertEqual(len(accumulators), 1)
        self.assertTrue(momentum_optimizer.get_velocity_str() in accumulators)
        velocity_acc = accumulators[momentum_optimizer.get_velocity_str()]
        self.assertEqual(len(velocity_acc), 1)
        self.assertTrue(mul_x.name in velocity_acc)

Q
Qiao Longfei 已提交
245 246 247 248
        # Check init_program
        init_ops = init_program.global_block().ops
        self.assertEqual(len(init_ops), 2)
        self.assertEqual(init_ops[1].type, "fill_constant")
249 250 251
        self.assertAlmostEqual(init_ops[1].attr('value'), learning_rate)
        self.assertEqual(init_ops[0].type, "fill_constant")
        self.assertAlmostEqual(init_ops[0].attr('value'), 0.0)
Q
Qiao Longfei 已提交
252

253

254 255 256 257 258 259 260 261 262
class TestAdagradOptimizer(unittest.TestCase):
    class MockAdagrad(optimizer.AdagradOptimizer):
        def get_accumulators(self):
            return self._accumulators

        def get_moment_str(self):
            return self._moment_acc_str

    def test_adagrad_optimizer(self):
Q
Qiao Longfei 已提交
263
        init_program = framework.Program()
264 265
        program = framework.Program()
        block = program.global_block()
266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290
        mul_x = block.create_parameter(
            dtype="float32",
            shape=[5, 10],
            lod_level=0,
            name="mul.x",
            optimize_attr={'learning_rate': 1.1},
        )
        mul_y = block.create_var(
            dtype="float32", shape=[10, 8], lod_level=0, name="mul.y"
        )
        mul_out = block.create_var(
            dtype="float32", shape=[5, 8], lod_level=0, name="mul.out"
        )
        block.append_op(
            type="mul",
            inputs={"X": mul_x, "Y": mul_y},
            outputs={"Out": mul_out},
            attrs={"x_num_col_dims": 1},
        )
        mean_out = block.create_var(
            dtype="float32", shape=[1], lod_level=0, name="mean.out"
        )
        block.append_op(
            type="mean", inputs={"X": mul_out}, outputs={"Out": mean_out}
        )
Q
Qiao Longfei 已提交
291
        learning_rate = 0.01
292 293 294
        adagrad_optimizer = self.MockAdagrad(
            learning_rate=learning_rate, epsilon=1.0e-6
        )
F
fengjiayi 已提交
295
        params_grads = append_backward(mean_out)
296 297
        self.assertEqual(len(params_grads), 1)
        self.assertEqual(len(adagrad_optimizer.get_accumulators()), 0)
298 299
        with framework.program_guard(program, init_program):
            opts = adagrad_optimizer.apply_gradients(params_grads)
300 301
        self.assertEqual(len(opts), 2)
        self.assertEqual([op.type for op in opts], ["scale", "adagrad"])
302

303
        # Check accumulators
304 305 306 307 308 309 310
        accumulators = adagrad_optimizer.get_accumulators()
        self.assertEqual(len(accumulators), 1)
        self.assertTrue(adagrad_optimizer.get_moment_str() in accumulators)
        moment_acc = accumulators[adagrad_optimizer.get_moment_str()]
        self.assertEqual(len(moment_acc), 1)
        self.assertTrue(mul_x.name in moment_acc)

Q
Qiao Longfei 已提交
311 312
        # Check init_program
        init_ops = init_program.global_block().ops
Z
zhongpu 已提交
313
        self.assertEqual(len(init_ops), 2)
Q
Qiao Longfei 已提交
314
        self.assertEqual(init_ops[1].type, "fill_constant")
315 316 317
        self.assertAlmostEqual(init_ops[1].attr('value'), learning_rate)
        self.assertEqual(init_ops[0].type, "fill_constant")
        self.assertAlmostEqual(init_ops[0].attr('value'), 0.0)
Q
Qiao Longfei 已提交
318

319

320 321 322 323 324 325 326 327 328 329 330 331
class TestAdamOptimizer(unittest.TestCase):
    class MockAdam(optimizer.AdamOptimizer):
        def get_accumulators(self):
            return self._accumulators

        def get_moment1_str(self):
            return self._moment1_acc_str

        def get_moment2_str(self):
            return self._moment2_acc_str

    def test_adam_optimizer(self):
Q
Qiao Longfei 已提交
332
        init_program = framework.Program()
333 334
        program = framework.Program()
        block = program.global_block()
335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359
        mul_x = block.create_parameter(
            dtype="float32",
            shape=[5, 10],
            lod_level=0,
            name="mul.x",
            optimize_attr={'learning_rate': 1.1},
        )
        mul_y = block.create_var(
            dtype="float32", shape=[10, 8], lod_level=0, name="mul.y"
        )
        mul_out = block.create_var(
            dtype="float32", shape=[5, 8], lod_level=0, name="mul.out"
        )
        block.append_op(
            type="mul",
            inputs={"X": mul_x, "Y": mul_y},
            outputs={"Out": mul_out},
            attrs={"x_num_col_dims": 1},
        )
        mean_out = block.create_var(
            dtype="float32", shape=[1], lod_level=0, name="mean.out"
        )
        block.append_op(
            type="mean", inputs={"X": mul_out}, outputs={"Out": mean_out}
        )
Q
Qiao Longfei 已提交
360
        learning_rate = 0.01
361 362 363
        adam_optimizer = self.MockAdam(
            learning_rate=learning_rate, beta1=0.9, beta2=0.999
        )
F
fengjiayi 已提交
364
        params_grads = append_backward(mean_out)
365 366
        self.assertEqual(len(params_grads), 1)
        self.assertEqual(len(adam_optimizer.get_accumulators()), 0)
367 368
        with framework.program_guard(program, init_program):
            opts = adam_optimizer.apply_gradients(params_grads)
A
Aurelius84 已提交
369 370
        self.assertEqual(len(opts), 2)
        self.assertEqual([op.type for op in opts], ["scale", "adam"])
371 372 373

        # Check accumulators
        accumulators = adam_optimizer.get_accumulators()
Q
qiaolongfei 已提交
374
        self.assertEqual(len(accumulators), 4)
375 376 377 378 379 380 381 382 383
        self.assertTrue(adam_optimizer.get_moment1_str() in accumulators)
        self.assertTrue(adam_optimizer.get_moment2_str() in accumulators)
        moment1_acc = accumulators[adam_optimizer.get_moment1_str()]
        moment2_acc = accumulators[adam_optimizer.get_moment2_str()]
        self.assertEqual(len(moment1_acc), 1)
        self.assertEqual(len(moment2_acc), 1)
        self.assertTrue(mul_x.name in moment1_acc)
        self.assertTrue(mul_x.name in moment2_acc)

Q
Qiao Longfei 已提交
384 385 386
        # Check init_program
        init_ops = init_program.global_block().ops
        self.assertEqual(len(init_ops), 5)
387 388
        self.assertEqual(init_ops[-1].type, "fill_constant")
        self.assertAlmostEqual(init_ops[-1].attr('value'), learning_rate)
Q
Qiao Longfei 已提交
389

390

391 392 393 394 395 396 397 398 399 400 401 402
class TestAdamaxOptimizer(unittest.TestCase):
    class MockAdamax(optimizer.AdamaxOptimizer):
        def get_accumulators(self):
            return self._accumulators

        def get_moment_str(self):
            return self._moment_acc_str

        def get_inf_norm_str(self):
            return self._inf_norm_acc_str

    def test_adamax_optimizer(self):
Q
Qiao Longfei 已提交
403
        init_program = framework.Program()
404 405
        program = framework.Program()
        block = program.global_block()
406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430
        mul_x = block.create_parameter(
            dtype="float32",
            shape=[5, 10],
            lod_level=0,
            name="mul.x",
            optimize_attr={'learning_rate': 1.1},
        )
        mul_y = block.create_var(
            dtype="float32", shape=[10, 8], lod_level=0, name="mul.y"
        )
        mul_out = block.create_var(
            dtype="float32", shape=[5, 8], lod_level=0, name="mul.out"
        )
        block.append_op(
            type="mul",
            inputs={"X": mul_x, "Y": mul_y},
            outputs={"Out": mul_out},
            attrs={"x_num_col_dims": 1},
        )
        mean_out = block.create_var(
            dtype="float32", shape=[1], lod_level=0, name="mean.out"
        )
        block.append_op(
            type="mean", inputs={"X": mul_out}, outputs={"Out": mean_out}
        )
Q
Qiao Longfei 已提交
431
        learning_rate = 0.01
432 433 434
        adamax_optimizer = self.MockAdamax(
            learning_rate=learning_rate, beta1=0.9, beta2=0.999
        )
F
fengjiayi 已提交
435
        params_grads = append_backward(mean_out)
436 437
        self.assertEqual(len(params_grads), 1)
        self.assertEqual(len(adamax_optimizer.get_accumulators()), 0)
438 439
        with framework.program_guard(program, init_program):
            opts = adamax_optimizer.apply_gradients(params_grads)
440 441
        self.assertEqual(len(opts), 3)
        self.assertEqual([op.type for op in opts], ["scale", "adamax", "scale"])
442 443 444

        # Check accumulators
        accumulators = adamax_optimizer.get_accumulators()
Q
qiaolongfei 已提交
445
        self.assertEqual(len(accumulators), 3)
446 447 448 449 450 451 452 453 454
        self.assertTrue(adamax_optimizer.get_moment_str() in accumulators)
        self.assertTrue(adamax_optimizer.get_inf_norm_str() in accumulators)
        moment_acc = accumulators[adamax_optimizer.get_moment_str()]
        inf_norm_acc = accumulators[adamax_optimizer.get_inf_norm_str()]
        self.assertEqual(len(moment_acc), 1)
        self.assertEqual(len(inf_norm_acc), 1)
        self.assertTrue(mul_x.name in moment_acc)
        self.assertTrue(mul_x.name in inf_norm_acc)

Q
Qiao Longfei 已提交
455 456 457
        # Check init_program
        init_ops = init_program.global_block().ops
        self.assertEqual(len(init_ops), 4)
458 459
        self.assertEqual(init_ops[-1].type, "fill_constant")
        self.assertAlmostEqual(init_ops[-1].attr('value'), learning_rate)
Q
Qiao Longfei 已提交
460

461

462 463 464 465 466 467
class TestDpsgdOptimizer(unittest.TestCase):
    def test_dpsgd_optimizer(self):
        def check_dpsgd_optimizer(optimizer_attr):
            init_program = framework.Program()
            program = framework.Program()
            block = program.global_block()
468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495
            mul_x = block.create_parameter(
                dtype="float32",
                shape=[5, 10],
                lod_level=0,
                name="mul.x",
                optimize_attr=optimizer_attr,
            )
            mul_y = block.create_var(
                dtype="float32", shape=[10, 8], lod_level=0, name="mul.y"
            )
            mul_out = block.create_var(
                dtype="float32", shape=[5, 8], lod_level=0, name="mul.out"
            )
            block.append_op(
                type="mul",
                inputs={"X": mul_x, "Y": mul_y},
                outputs={"Out": mul_out},
                attrs={"x_num_col_dims": 1},
            )
            mean_out = block.create_var(
                dtype="float32", shape=[1], lod_level=0, name="mean.out"
            )
            block.append_op(
                type="mean", inputs={"X": mul_out}, outputs={"Out": mean_out}
            )
            dpsgd_optimizer = optimizer.DpsgdOptimizer(
                learning_rate=0.01, clip=100.0, batch_size=16.0, sigma=0.0
            )
496 497 498
            opts, _ = dpsgd_optimizer.minimize(mean_out, init_program)
            return opts

499 500 501 502 503 504 505 506
        opts = check_dpsgd_optimizer(
            {
                'learning_rate': 1.1,
                'clip': 100.0,
                'batch_size': 16.0,
                'sigma': 4.0,
            }
        )
507 508 509 510
        self.assertEqual(len(opts), 2)
        self.assertEqual([op.type for op in opts], ["scale", "dpsgd"])


511 512 513 514 515 516 517 518 519 520 521 522
class TestDecayedAdagradOptimizer(unittest.TestCase):
    class MockDecayedAdagrad(optimizer.DecayedAdagradOptimizer):
        def get_accumulators(self):
            return self._accumulators

        def get_moment_str(self):
            return self._moment_acc_str

    def test_decayed_adagrad_optimizer(self):
        init_program = framework.Program()
        program = framework.Program()
        block = program.global_block()
523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547
        mul_x = block.create_parameter(
            dtype="float32",
            shape=[5, 10],
            lod_level=0,
            name="mul.x",
            optimize_attr={'learning_rate': 1.1},
        )
        mul_y = block.create_var(
            dtype="float32", shape=[10, 8], lod_level=0, name="mul.y"
        )
        mul_out = block.create_var(
            dtype="float32", shape=[5, 8], lod_level=0, name="mul.out"
        )
        block.append_op(
            type="mul",
            inputs={"X": mul_x, "Y": mul_y},
            outputs={"Out": mul_out},
            attrs={"x_num_col_dims": 1},
        )
        mean_out = block.create_var(
            dtype="float32", shape=[1], lod_level=0, name="mean.out"
        )
        block.append_op(
            type="mean", inputs={"X": mul_out}, outputs={"Out": mean_out}
        )
548 549
        learning_rate = 0.01
        decayed_adagrad_optimizer = self.MockDecayedAdagrad(
550 551
            learning_rate=learning_rate, decay=0.95, epsilon=1.0e-6
        )
F
fengjiayi 已提交
552
        params_grads = append_backward(mean_out)
553 554
        self.assertEqual(len(params_grads), 1)
        self.assertEqual(len(decayed_adagrad_optimizer.get_accumulators()), 0)
555 556
        with framework.program_guard(program, init_program):
            opts = decayed_adagrad_optimizer.apply_gradients(params_grads)
557 558
        self.assertEqual(len(opts), 2)
        self.assertEqual([op.type for op in opts], ["scale", "decayed_adagrad"])
559 560 561 562 563

        # Check accumulators
        accumulators = decayed_adagrad_optimizer.get_accumulators()
        self.assertEqual(len(accumulators), 1)
        self.assertTrue(
564 565
            decayed_adagrad_optimizer.get_moment_str() in accumulators
        )
566 567 568 569 570 571 572 573
        moment_acc = accumulators[decayed_adagrad_optimizer.get_moment_str()]
        self.assertEqual(len(moment_acc), 1)
        self.assertTrue(mul_x.name in moment_acc)

        # Check init_program
        init_ops = init_program.global_block().ops
        self.assertEqual(len(init_ops), 2)
        self.assertEqual(init_ops[1].type, "fill_constant")
574 575 576
        self.assertAlmostEqual(init_ops[1].attr('value'), learning_rate)
        self.assertEqual(init_ops[0].type, "fill_constant")
        self.assertAlmostEqual(init_ops[0].attr('value'), 0.0)
577 578


Q
qiaolongfei 已提交
579 580 581 582 583 584 585 586 587 588 589 590 591 592 593
class TestFtrlOptimizer(unittest.TestCase):
    class MockFtrl(optimizer.FtrlOptimizer):
        def get_accumulators(self):
            return self._accumulators

        def get_squared_str(self):
            return self._squared_acc_str

        def get_linear_str(self):
            return self._linear_acc_str

    def test_ftrl_optimizer(self):
        init_program = framework.Program()
        program = framework.Program()
        block = program.global_block()
594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618
        mul_x = block.create_parameter(
            dtype="float32",
            shape=[5, 10],
            lod_level=0,
            name="mul.x",
            optimize_attr={'learning_rate': 1.1},
        )
        mul_y = block.create_var(
            dtype="float32", shape=[10, 8], lod_level=0, name="mul.y"
        )
        mul_out = block.create_var(
            dtype="float32", shape=[5, 8], lod_level=0, name="mul.out"
        )
        block.append_op(
            type="mul",
            inputs={"X": mul_x, "Y": mul_y},
            outputs={"Out": mul_out},
            attrs={"x_num_col_dims": 1},
        )
        mean_out = block.create_var(
            dtype="float32", shape=[1], lod_level=0, name="mean.out"
        )
        block.append_op(
            type="mean", inputs={"X": mul_out}, outputs={"Out": mean_out}
        )
Q
qiaolongfei 已提交
619
        learning_rate = 0.01
620 621 622
        ftrl_optimizer = self.MockFtrl(
            learning_rate=learning_rate, l1=0.0, l2=0.0, lr_power=-0.5
        )
Q
qiaolongfei 已提交
623 624 625
        params_grads = append_backward(mean_out)
        self.assertEqual(len(params_grads), 1)
        self.assertEqual(len(ftrl_optimizer.get_accumulators()), 0)
626 627
        with framework.program_guard(program, init_program):
            opts = ftrl_optimizer.apply_gradients(params_grads)
628 629
        self.assertEqual(len(opts), 2)
        self.assertEqual([op.type for op in opts], ["scale", "ftrl"])
Q
qiaolongfei 已提交
630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645

        # Check accumulators
        accumulators = ftrl_optimizer.get_accumulators()
        self.assertEqual(len(accumulators), 2)
        self.assertTrue(ftrl_optimizer.get_squared_str() in accumulators)
        self.assertTrue(ftrl_optimizer.get_linear_str() in accumulators)
        squared_acc = accumulators[ftrl_optimizer.get_squared_str()]
        linear_acc = accumulators[ftrl_optimizer.get_linear_str()]
        self.assertEqual(len(squared_acc), 1)
        self.assertEqual(len(linear_acc), 1)
        self.assertTrue(mul_x.name in squared_acc)
        self.assertTrue(mul_x.name in linear_acc)

        # Check init_program
        init_ops = init_program.global_block().ops
        self.assertEqual(len(init_ops), 3)
646 647
        self.assertEqual(init_ops[-1].type, "fill_constant")
        self.assertAlmostEqual(init_ops[-1].attr('value'), learning_rate)
Q
qiaolongfei 已提交
648 649


M
mapingshuo 已提交
650 651 652 653 654 655
class TestLookaheadOptimizer(unittest.TestCase):
    def test_lookahead_optimizer(self):
        init_program = framework.Program()
        program = framework.Program()
        block = program.global_block()
        init_block = init_program.global_block()
656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684
        mul_x = block.create_parameter(
            dtype="float32",
            shape=[5, 10],
            lod_level=0,
            name="mul.x",
            optimize_attr={'learning_rate': 1.1},
        )
        init_mul_x = init_block.create_parameter(
            dtype="float32", shape=[5, 10], lod_level=0, name="mul.x"
        )
        mul_y = block.create_var(
            dtype="float32", shape=[10, 8], lod_level=0, name="mul.y"
        )
        mul_out = block.create_var(
            dtype="float32", shape=[5, 8], lod_level=0, name="mul.out"
        )
        mean_out = block.create_var(
            dtype="float32", shape=[1], lod_level=0, name="mean.out"
        )

        block.append_op(
            type="mul",
            inputs={"X": mul_x, "Y": mul_y},
            outputs={"Out": mul_out},
            attrs={"x_num_col_dims": 1},
        )
        block.append_op(
            type="mean", inputs={"X": mul_out}, outputs={"Out": mean_out}
        )
M
mapingshuo 已提交
685 686 687 688 689

        sgd = optimizer.SGD(learning_rate=0.01)
        lookahead = optimizer.LookaheadOptimizer(sgd, alpha=0.5, k=5)
        with framework.program_guard(program, init_program):
            opts, _ = lookahead.minimize(mean_out)
690 691
        self.assertEqual(len(opts), 2)
        self.assertEqual([op.type for op in opts], ["scale", "sgd"])
M
mapingshuo 已提交
692 693


M
mapingshuo 已提交
694
class TestRecomputeOptimizer(unittest.TestCase):
695
    def net(self, return_input=False, with_dropout=False, with_seed=False):
M
mapingshuo 已提交
696 697
        program = framework.Program()
        block = program.global_block()
698 699 700 701 702 703 704 705 706
        mul_x = block.create_parameter(
            dtype="float32", shape=[5, 10], lod_level=0, name="mul.x"
        )
        mul_y = block.create_var(
            dtype="float32", shape=[10, 8], lod_level=0, name="mul.y"
        )
        mul_out = block.create_var(
            dtype="float32", shape=[5, 8], lod_level=0, name="mul.out"
        )
707 708

        if with_dropout is True:
709 710 711 712 713 714 715 716 717
            mul_out_drop = block.create_var(
                dtype="float32",
                shape=[5, 8],
                lod_level=0,
                name="mul.out.dropout",
            )
            mul_out_mask = block.create_var(
                dtype="uint8", shape=[5, 8], lod_level=0, name="mul.out.mask"
            )
718
            if with_seed is True:
719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743
                seed_out = block.create_var(
                    dtype="int32", shape=[1], name="seed.out"
                )

        b1 = block.create_parameter(
            dtype="float32", shape=[5, 8], lod_level=0, name="b1"
        )
        b1_out = block.create_var(
            dtype="float32", shape=[5, 8], lod_level=0, name="b1_out"
        )
        b2 = block.create_parameter(
            dtype="float32", shape=[5, 8], lod_level=0, name="b2"
        )
        b2_out = block.create_var(
            dtype="float32", shape=[5, 8], lod_level=0, name="b2_out"
        )
        mean_out = block.create_var(
            dtype="float32", shape=[1], lod_level=0, name="mean.out"
        )
        block.append_op(
            type="mul",
            inputs={"X": mul_x, "Y": mul_y},
            outputs={"Out": mul_out},
            attrs={"x_num_col_dims": 1},
        )
744 745 746 747

        if with_dropout is True:
            dropout_inputs = {'X': [mul_out]}
            if with_seed is True:
748 749 750 751 752 753 754 755 756
                block.append_op(
                    type='seed',
                    outputs={'Out': seed_out},
                    attrs={
                        'deterministic': True,
                        'rng_name': 'rng0',
                        'force_cpu': True,
                    },
                )
757 758
                dropout_inputs = {'X': [mul_out], 'Seed': [seed_out]}

759 760 761 762 763 764 765 766 767 768 769 770 771
            block.append_op(
                type='dropout',
                inputs=dropout_inputs,
                outputs={'Out': [mul_out_drop], 'Mask': [mul_out_mask]},
                attrs={
                    'dropout_prob': 0.5,
                },
            )
            block.append_op(
                type="elementwise_add",
                inputs={"X": mul_out_drop, "Y": b1},
                outputs={"Out": b1_out},
            )
M
mapingshuo 已提交
772
        else:
773 774 775 776 777 778 779 780 781 782 783 784 785 786
            block.append_op(
                type="elementwise_add",
                inputs={"X": mul_out, "Y": b1},
                outputs={"Out": b1_out},
            )

        block.append_op(
            type="elementwise_add",
            inputs={"X": b1_out, "Y": b2},
            outputs={"Out": b2_out},
        )
        block.append_op(
            type="mean", inputs={"X": b2_out}, outputs={"Out": mean_out}
        )
M
mapingshuo 已提交
787

788
        if return_input:
789
            return mul_x, mul_out, b1_out, b2_out, mean_out
M
mapingshuo 已提交
790 791 792 793 794
        return mul_out, b1_out, b2_out, mean_out

    def test_no_checkpoint(self):
        mul_out, b1_out, b2_out, mean_out = self.net()
        self.assertEqual(len(mean_out.block.ops), 4)
795 796 797 798
        self.assertEqual(
            [op.type for op in mean_out.block.ops],
            ["mul", "elementwise_add", "elementwise_add", "mean"],
        )
M
mapingshuo 已提交
799 800 801 802 803 804
        sgd_optimizer = optimizer.SGD(learning_rate=1.0)
        recompute_optimizer = optimizer.RecomputeOptimizer(sgd_optimizer)
        recompute_optimizer._set_checkpoints([])
        opts, params_grads = recompute_optimizer.minimize(mean_out)

        self.assertEqual(len(mean_out.block.ops), 12)
805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821
        self.assertEqual(
            [op.type for op in mean_out.block.ops],
            [
                "mul",
                "elementwise_add",
                "elementwise_add",
                "mean",
                "fill_constant",
                "mean_grad",
                "elementwise_add_grad",
                "elementwise_add_grad",
                "mul_grad",
                "sgd",
                "sgd",
                "sgd",
            ],
        )
M
mapingshuo 已提交
822 823 824 825

    def test_one_checkpoint(self):
        mul_out, b1_out, b2_out, mean_out = self.net()
        self.assertEqual(len(mean_out.block.ops), 4)
826 827 828 829
        self.assertEqual(
            [op.type for op in mean_out.block.ops],
            ["mul", "elementwise_add", "elementwise_add", "mean"],
        )
M
mapingshuo 已提交
830 831 832 833 834 835
        sgd_optimizer = optimizer.SGD(learning_rate=1.0)
        recompute_optimizer = optimizer.RecomputeOptimizer(sgd_optimizer)
        recompute_optimizer._set_checkpoints([b1_out])
        opts, params_grads = recompute_optimizer.minimize(mean_out)

        self.assertEqual(len(mean_out.block.ops), 13)
836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853
        self.assertEqual(
            [op.type for op in mean_out.block.ops],
            [
                "mul",
                "elementwise_add",
                "elementwise_add",
                "mean",
                "fill_constant",
                "mean_grad",
                "elementwise_add_grad",
                "mul",
                "elementwise_add_grad",
                "mul_grad",
                "sgd",
                "sgd",
                "sgd",
            ],
        )
854 855 856 857

    def test_str_checkpoints(self):
        mul_out, b1_out, b2_out, mean_out = self.net()
        self.assertEqual(len(mean_out.block.ops), 4)
858 859 860 861
        self.assertEqual(
            [op.type for op in mean_out.block.ops],
            ["mul", "elementwise_add", "elementwise_add", "mean"],
        )
862 863 864 865 866 867
        sgd_optimizer = optimizer.SGD(learning_rate=1.0)
        recompute_optimizer = optimizer.RecomputeOptimizer(sgd_optimizer)
        recompute_optimizer._set_checkpoints([b1_out.name])
        opts, params_grads = recompute_optimizer.minimize(mean_out)

        self.assertEqual(len(mean_out.block.ops), 13)
868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885
        self.assertEqual(
            [op.type for op in mean_out.block.ops],
            [
                "mul",
                "elementwise_add",
                "elementwise_add",
                "mean",
                "fill_constant",
                "mean_grad",
                "elementwise_add_grad",
                "mul",
                "elementwise_add_grad",
                "mul_grad",
                "sgd",
                "sgd",
                "sgd",
            ],
        )
M
mapingshuo 已提交
886 887 888 889

    def test_multi_checkpoint(self):
        mul_out, b1_out, b2_out, mean_out = self.net()
        self.assertEqual(len(mean_out.block.ops), 4)
890 891 892 893
        self.assertEqual(
            [op.type for op in mean_out.block.ops],
            ["mul", "elementwise_add", "elementwise_add", "mean"],
        )
M
mapingshuo 已提交
894 895 896 897 898 899
        sgd_optimizer = optimizer.SGD(learning_rate=1.0)
        recompute_optimizer = optimizer.RecomputeOptimizer(sgd_optimizer)
        recompute_optimizer._set_checkpoints([mul_out, b2_out])
        opts, params_grads = recompute_optimizer.minimize(mean_out)

        self.assertEqual(len(mean_out.block.ops), 13)
900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917
        self.assertEqual(
            [op.type for op in mean_out.block.ops],
            [
                "mul",
                "elementwise_add",
                "elementwise_add",
                "mean",
                "fill_constant",
                "mean_grad",
                "elementwise_add",
                "elementwise_add_grad",
                "elementwise_add_grad",
                "mul_grad",
                "sgd",
                "sgd",
                "sgd",
            ],
        )
M
mapingshuo 已提交
918 919 920 921

    def test_adjacent_checkpoint(self):
        mul_out, b1_out, b2_out, mean_out = self.net()
        self.assertEqual(len(mean_out.block.ops), 4)
922 923 924 925
        self.assertEqual(
            [op.type for op in mean_out.block.ops],
            ["mul", "elementwise_add", "elementwise_add", "mean"],
        )
M
mapingshuo 已提交
926 927 928 929 930 931
        sgd_optimizer = optimizer.SGD(learning_rate=1.0)
        recompute_optimizer = optimizer.RecomputeOptimizer(sgd_optimizer)
        recompute_optimizer._set_checkpoints([mul_out, b1_out])
        opts, params_grads = recompute_optimizer.minimize(mean_out)

        self.assertEqual(len(mean_out.block.ops), 12)
932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948
        self.assertEqual(
            [op.type for op in mean_out.block.ops],
            [
                "mul",
                "elementwise_add",
                "elementwise_add",
                "mean",
                "fill_constant",
                "mean_grad",
                "elementwise_add_grad",
                "elementwise_add_grad",
                "mul_grad",
                "sgd",
                "sgd",
                "sgd",
            ],
        )
M
mapingshuo 已提交
949

950 951 952
    def test_out_of_order_checkpoint(self):
        mul_out, b1_out, b2_out, mean_out = self.net()
        self.assertEqual(len(mean_out.block.ops), 4)
953 954 955 956
        self.assertEqual(
            [op.type for op in mean_out.block.ops],
            ["mul", "elementwise_add", "elementwise_add", "mean"],
        )
957 958 959 960 961 962
        sgd_optimizer = optimizer.SGD(learning_rate=1.0)
        recompute_optimizer = optimizer.RecomputeOptimizer(sgd_optimizer)
        recompute_optimizer._set_checkpoints([b2_out, mul_out])
        opts, params_grads = recompute_optimizer.minimize(mean_out)

        self.assertEqual(len(mean_out.block.ops), 13)
963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980
        self.assertEqual(
            [op.type for op in mean_out.block.ops],
            [
                "mul",
                "elementwise_add",
                "elementwise_add",
                "mean",
                "fill_constant",
                "mean_grad",
                "elementwise_add",
                "elementwise_add_grad",
                "elementwise_add_grad",
                "mul_grad",
                "sgd",
                "sgd",
                "sgd",
            ],
        )
981 982 983 984

    def test_input_as_checkpoints(self):
        mul_x, mul_out, b1_out, b2_out, mean_out = self.net(return_input=True)
        self.assertEqual(len(mean_out.block.ops), 4)
985 986 987 988
        self.assertEqual(
            [op.type for op in mean_out.block.ops],
            ["mul", "elementwise_add", "elementwise_add", "mean"],
        )
989 990 991 992 993 994
        sgd_optimizer = optimizer.SGD(learning_rate=1.0)
        recompute_optimizer = optimizer.RecomputeOptimizer(sgd_optimizer)
        recompute_optimizer._set_checkpoints([mul_x, b2_out])
        opts, params_grads = recompute_optimizer.minimize(mean_out)

        self.assertEqual(len(mean_out.block.ops), 14)
995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013
        self.assertEqual(
            [op.type for op in mean_out.block.ops],
            [
                "mul",
                "elementwise_add",
                "elementwise_add",
                "mean",
                "fill_constant",
                "mean_grad",
                "mul",
                "elementwise_add",
                "elementwise_add_grad",
                "elementwise_add_grad",
                "mul_grad",
                "sgd",
                "sgd",
                "sgd",
            ],
        )
1014

M
mapingshuo 已提交
1015 1016 1017 1018 1019 1020
    def test_apply_gradients(self):
        mul_out, b1_out, b2_out, mean_out = self.net()
        sgd_optimizer = optimizer.SGD(learning_rate=1.0)
        recompute_optimizer = optimizer.RecomputeOptimizer(sgd_optimizer)
        recompute_optimizer._set_checkpoints([b1_out])
        # apply backward
1021 1022 1023 1024 1025 1026
        params_grads = recompute_optimizer.backward(
            mean_out,
            startup_program=None,
            parameter_list=None,
            no_grad_set=None,
        )
M
mapingshuo 已提交
1027 1028 1029 1030 1031 1032 1033

        # apply gradient
        program = mean_out.block.program
        with framework.program_guard(program, None):
            optimize_ops = recompute_optimizer.apply_gradients(params_grads)

        self.assertEqual(len(mean_out.block.ops), 13)
1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051
        self.assertEqual(
            [op.type for op in mean_out.block.ops],
            [
                "mul",
                "elementwise_add",
                "elementwise_add",
                "mean",
                "fill_constant",
                "mean_grad",
                "elementwise_add_grad",
                "mul",
                "elementwise_add_grad",
                "mul_grad",
                "sgd",
                "sgd",
                "sgd",
            ],
        )
M
mapingshuo 已提交
1052 1053 1054 1055 1056 1057 1058

    def test_load(self):
        mul_out, b1_out, b2_out, mean_out = self.net()
        sgd_optimizer = optimizer.SGD(learning_rate=1.0)
        recompute_optimizer = optimizer.RecomputeOptimizer(sgd_optimizer)
        recompute_optimizer._set_checkpoints([b1_out])
        try:
1059 1060
            state_dict = {}
            recompute_optimizer.load(state_dict)
M
mapingshuo 已提交
1061 1062 1063
        except NotImplementedError as e:
            self.assertEqual(
                "load function is not supported by Recompute Optimizer for now",
1064 1065
                str(e),
            )
M
mapingshuo 已提交
1066

M
mapingshuo 已提交
1067 1068 1069 1070 1071 1072 1073 1074 1075
    def test_dropout(self):
        """
        If there are dropout layers in the forward nets, we should add a
        seed op
        """
        mul_out, b1_out, b2_out, mean_out = self.net(with_dropout=True)
        self.assertEqual(len(mean_out.block.ops), 5)
        self.assertEqual(
            [op.type for op in mean_out.block.ops],
1076 1077
            ["mul", "dropout", "elementwise_add", "elementwise_add", "mean"],
        )
M
mapingshuo 已提交
1078 1079 1080 1081 1082 1083
        sgd_optimizer = optimizer.SGD(learning_rate=1.0)
        recompute_optimizer = optimizer.RecomputeOptimizer(sgd_optimizer)
        recompute_optimizer._set_checkpoints([b1_out])
        opts, params_grads = recompute_optimizer.minimize(mean_out)

        self.assertEqual(len(mean_out.block.ops), 17)
1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105
        self.assertEqual(
            [op.type for op in mean_out.block.ops],
            [
                "mul",
                "seed",
                "dropout",
                "elementwise_add",
                "elementwise_add",
                "mean",
                "fill_constant",
                "mean_grad",
                "elementwise_add_grad",
                "mul",
                "dropout",
                "elementwise_add_grad",
                "dropout_grad",
                "mul_grad",
                "sgd",
                "sgd",
                "sgd",
            ],
        )
M
mapingshuo 已提交
1106

1107
    def test_dropout_with_determinate_seed(self):
1108 1109 1110
        mul_out, b1_out, b2_out, mean_out = self.net(
            with_dropout=True, with_seed=True
        )
1111
        self.assertEqual(len(mean_out.block.ops), 6)
1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122
        self.assertEqual(
            [op.type for op in mean_out.block.ops],
            [
                "mul",
                "seed",
                "dropout",
                "elementwise_add",
                "elementwise_add",
                "mean",
            ],
        )
1123 1124 1125 1126 1127 1128
        sgd_optimizer = optimizer.SGD(learning_rate=1.0)
        recompute_optimizer = optimizer.RecomputeOptimizer(sgd_optimizer)
        recompute_optimizer._set_checkpoints([b1_out])
        opts, params_grads = recompute_optimizer.minimize(mean_out)

        self.assertEqual(len(mean_out.block.ops), 17)
1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150
        self.assertEqual(
            [op.type for op in mean_out.block.ops],
            [
                "mul",
                "seed",
                "dropout",
                "elementwise_add",
                "elementwise_add",
                "mean",
                "fill_constant",
                "mean_grad",
                "elementwise_add_grad",
                "mul",
                "dropout",
                "elementwise_add_grad",
                "dropout_grad",
                "mul_grad",
                "sgd",
                "sgd",
                "sgd",
            ],
        )
1151

1152 1153 1154
    def test_dropout_with_seed(self):
        """
        when we recompute a dropout op, make sure that the recomputed one
1155 1156
        is the same as the original var.
        """
1157 1158 1159 1160

        def gen_data():
            return {
                "x": np.random.random(size=(100, 3)).astype('float32'),
1161
                "y": np.random.randint(2, size=(100, 1)).astype('int64'),
1162 1163 1164
            }

        def mlp(input_x, input_y):
1165 1166 1167 1168 1169 1170
            drop_res = fluid.layers.dropout(
                input_x, dropout_prob=0.5, name="dropout_with_seed_cpu"
            )
            prediction = fluid.layers.fc(
                input=[drop_res], size=2, act='softmax'
            )
1171
            cost = fluid.layers.cross_entropy(input=prediction, label=input_y)
1172
            sum_cost = paddle.mean(cost)
1173 1174 1175 1176 1177 1178 1179
            return drop_res, prediction, sum_cost

        main_program = Program()
        startup_program = Program()
        scope = fluid.Scope()
        with fluid.scope_guard(scope):
            with program_guard(main_program, startup_program):
1180 1181 1182
                input_x = fluid.layers.data(
                    name="x", shape=[3], dtype='float32'
                )
1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193
                input_y = fluid.layers.data(name="y", shape=[1], dtype='int64')
                drop_res, prediction, cost = mlp(input_x, input_y)
                sgd = fluid.optimizer.Adam(learning_rate=0.01)
                sgd = fluid.optimizer.RecomputeOptimizer(sgd)
                sgd._set_checkpoints([prediction])
                sgd.minimize(cost)

                place = fluid.CPUPlace()
                exe = fluid.Executor(place)
                exe.run(fluid.default_startup_program())
                feed_data = gen_data()
1194 1195 1196 1197 1198 1199 1200 1201
                drop_vec = exe.run(
                    feed=feed_data,
                    program=fluid.default_main_program(),
                    fetch_list=[
                        "dropout_with_seed_cpu.tmp_1",
                        "dropout_with_seed_cpu.tmp_1.subprog_0",
                    ],
                )
1202 1203 1204
                self.assertEqual(drop_vec[0].tolist(), drop_vec[1].tolist())


1205 1206 1207
@unittest.skipIf(
    not core.is_compiled_with_cuda(), "core is not compiled with CUDA"
)
1208 1209 1210 1211 1212 1213 1214 1215 1216 1217
class TestRecomputeOptimizerCUDA(unittest.TestCase):
    def test_dropout_with_seed(self):
        """
        when we recompute a dropout op, make sure that the recomputed one
        is the same as the original var.
        """

        def gen_data():
            return {
                "x": np.random.random(size=(100, 3)).astype('float32'),
1218
                "y": np.random.randint(2, size=(100, 1)).astype('int64'),
1219 1220 1221
            }

        def mlp(input_x, input_y):
1222 1223 1224 1225 1226 1227
            drop_res = fluid.layers.dropout(
                input_x, dropout_prob=0.5, name="dropout_with_seed_gpu"
            )
            prediction = fluid.layers.fc(
                input=[drop_res], size=2, act='softmax'
            )
1228
            cost = fluid.layers.cross_entropy(input=prediction, label=input_y)
1229
            sum_cost = paddle.mean(cost)
1230 1231 1232 1233 1234 1235 1236
            return drop_res, prediction, sum_cost

        main_program = Program()
        startup_program = Program()
        scope = fluid.Scope()
        with fluid.scope_guard(scope):
            with program_guard(main_program, startup_program):
1237 1238 1239
                input_x = fluid.layers.data(
                    name="x", shape=[3], dtype='float32'
                )
1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250
                input_y = fluid.layers.data(name="y", shape=[1], dtype='int64')
                drop_res, prediction, cost = mlp(input_x, input_y)
                sgd = fluid.optimizer.Adam(learning_rate=0.01)
                sgd = fluid.optimizer.RecomputeOptimizer(sgd)
                sgd._set_checkpoints([prediction])
                sgd.minimize(cost)

                place = fluid.CUDAPlace(0)
                exe = fluid.Executor(place)
                exe.run(fluid.default_startup_program())
                feed_data = gen_data()
1251 1252 1253 1254 1255 1256 1257 1258
                drop_vec = exe.run(
                    feed=feed_data,
                    program=fluid.default_main_program(),
                    fetch_list=[
                        "dropout_with_seed_gpu.tmp_1",
                        "dropout_with_seed_gpu.tmp_1.subprog_0",
                    ],
                )
1259 1260
                self.assertEqual(drop_vec[0].tolist(), drop_vec[1].tolist())

M
mapingshuo 已提交
1261

1262 1263 1264 1265
class TestGradientMergeOptimizer(unittest.TestCase):
    def net(self):
        program = framework.Program()
        block = program.global_block()
1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297
        mul_x = block.create_parameter(
            dtype="float32", shape=[5, 10], lod_level=0, name="mul.x"
        )
        mul_y = block.create_var(
            dtype="float32", shape=[10, 8], lod_level=0, name="mul.y"
        )
        mul_out = block.create_var(
            dtype="float32", shape=[5, 8], lod_level=0, name="mul.out"
        )
        b1 = block.create_parameter(
            dtype="float32", shape=[5, 8], lod_level=0, name="b1"
        )
        b1_out = block.create_var(
            dtype="float32", shape=[5, 8], lod_level=0, name="b1_out"
        )
        mean_out = block.create_var(
            dtype="float32", shape=[1], lod_level=0, name="mean.out"
        )
        block.append_op(
            type="mul",
            inputs={"X": mul_x, "Y": mul_y},
            outputs={"Out": mul_out},
            attrs={"x_num_col_dims": 1},
        )
        block.append_op(
            type="elementwise_add",
            inputs={"X": mul_out, "Y": b1},
            outputs={"Out": b1_out},
        )
        block.append_op(
            type="mean", inputs={"X": b1_out}, outputs={"Out": mean_out}
        )
1298 1299
        return mean_out

1300 1301 1302
    def test_program_desc(
        self,
    ):
1303 1304 1305 1306 1307
        cost = self.net()
        main_program = cost.block.program
        init_program = framework.Program()
        self.assertEqual(main_program.num_blocks, 1)
        self.assertEqual(len(cost.block.ops), 3)
1308 1309 1310 1311
        self.assertEqual(
            [op.type for op in cost.block.ops],
            ["mul", "elementwise_add", "mean"],
        )
1312 1313 1314 1315 1316 1317

        opt = optimizer.SGD(learning_rate=1.0)
        opt = optimizer.GradientMergeOptimizer(opt, k_steps=4)
        with framework.program_guard(main_program, init_program):
            ops, params_grads = opt.minimize(cost)

1318
        self.assertEqual(main_program.num_blocks, 2)
1319 1320

        # main block
1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337
        self.assertEqual(len(cost.block.ops), 13)
        self.assertEqual(
            [op.type for op in cost.block.ops],
            [
                'mul',
                'elementwise_add',
                'mean',
                'fill_constant',
                'mean_grad',
                'elementwise_add_grad',
                'mul_grad',
                'increment',  # step += 1
                'elementwise_mod',  # step %= k_steps
                'equal',  # cond_var == (step == 0)
                'elementwise_add',
                'elementwise_add',
                'conditional_block',
1338 1339
            ],
        )
1340

1341 1342
        # optimize block
        self.assertEqual(len(main_program.block(1).ops), 6)
1343 1344
        self.assertEqual(
            [op.type for op in main_program.block(1).ops],
1345 1346
            ['scale', 'scale', 'sgd', 'sgd', 'fill_constant', 'fill_constant'],
        )
1347 1348


L
Leo Chen 已提交
1349 1350 1351 1352 1353 1354 1355 1356 1357
class TestOptimizerDtype(unittest.TestCase):
    '''
    The dtype of optimizer should be inferred by parameters, and the learning rate
    is cteated with the same dtype.
    '''

    def check_with_dtype(self, dtype):
        class MyLayer(paddle.nn.Layer):
            def __init__(self, dtype):
1358
                super().__init__()
L
Leo Chen 已提交
1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379
                self._w = self.create_parameter([2, 3], dtype=dtype)
                self._b = self.create_parameter([2, 3], dtype=dtype)

            def forward(self, x):
                return x * self._w + self._b

        with paddle.fluid.dygraph.guard():
            model = MyLayer(dtype)
            x = paddle.rand([10, 2, 3], dtype=dtype)
            loss = model(x)
            adam = paddle.optimizer.Adam(parameters=model.parameters())
            loss.backward()
            adam.step()
            self.assertEqual(adam._dtype, convert_np_dtype_to_dtype_(dtype))

    def test_float64(self):
        self.check_with_dtype('float64')

    def test_float32(self):
        self.check_with_dtype('float32')

C
chentianyu03 已提交
1380 1381 1382 1383 1384
    def test_api_eager_dygraph(self):
        with _test_eager_guard():
            self.test_float64()
            self.test_float32()

L
Leo Chen 已提交
1385

1386 1387
class TestMasterWeightSaveForFP16(unittest.TestCase):
    '''
1388
    For Amp-O2, some optimizer(Momentum, Adam ...) will create master weights for parameters to improve the accuracy.
1389 1390 1391
    Master weights will be saved by optimizer::state_dict.
    '''

1392 1393 1394 1395 1396 1397
    def setUp(self):
        self.temp_dir = tempfile.TemporaryDirectory()

    def tearDown(self):
        self.temp_dir.cleanup()

1398 1399 1400 1401 1402 1403
    def check_with_opt_state_dict(self, use_save_load=True):
        paddle.seed(100)
        numpy.random.seed(100)

        class SimpleNet(paddle.nn.Layer):
            def __init__(self, input_size, output_size):
1404
                super().__init__()
1405 1406 1407 1408 1409 1410
                self.linears = paddle.nn.LayerList(
                    [
                        paddle.nn.Linear(input_size, output_size)
                        for i in range(1)
                    ]
                )
1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434

            def forward(self, x):
                for i, l in enumerate(self.linears):
                    x = self.linears[i](x)
                return x

        input_size = 2  # 设为较大的值
        output_size = 2  # 设为较大的值
        batch_size = 2  # batch_size 为8的倍数
        nums_batch = 10

        class RandomDataset(Dataset):
            def __init__(self, num_samples):
                self.num_samples = num_samples

            def __getitem__(self, idx):
                data = numpy.random.random([input_size]).astype('float16')
                label = numpy.random.random([output_size]).astype('float16')
                return data, label

            def __len__(self):
                return self.num_samples

        dataset = RandomDataset(nums_batch * batch_size)
1435 1436 1437 1438 1439 1440 1441
        loader = paddle.io.DataLoader(
            dataset,
            batch_size=batch_size,
            shuffle=False,
            drop_last=True,
            num_workers=0,
        )
1442 1443 1444

        mse = paddle.nn.MSELoss()
        model = SimpleNet(input_size, output_size)  # 定义模型
1445 1446 1447 1448 1449
        optimizer = paddle.optimizer.Momentum(
            learning_rate=0.0001,
            parameters=model.parameters(),
            multi_precision=True,
        )  # 定义优化器
1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463
        scaler = paddle.amp.GradScaler(init_loss_scaling=1024)
        model = paddle.amp.decorate(models=model, level='O2')

        for i, (data, label) in enumerate(loader):
            with paddle.amp.auto_cast(level='O2'):
                output = model(data)
                loss = mse(output, label)
            scaled = scaler.scale(loss)
            scaled.backward()
            scaler.step(optimizer)
            scaler.update()
            optimizer.clear_grad(set_to_zero=False)

            if use_save_load and i == 5:
1464 1465 1466 1467 1468 1469
                model_path = os.path.join(self.temp_dir.name, "model.pdparams")
                optimizer_path = os.path.join(self.temp_dir.name, "opt.pdopt")
                paddle.save(model.state_dict(), model_path)
                paddle.save(optimizer.state_dict(), optimizer_path)
                model.set_state_dict(paddle.load(model_path))
                optimizer.set_state_dict(paddle.load(optimizer_path))
1470 1471 1472 1473 1474 1475 1476

        return loss.numpy()

    def test_with_state_dict(self):
        if core.is_compiled_with_cuda():
            with fluid.dygraph.guard():
                out_use_state_dict = self.check_with_opt_state_dict(
1477 1478
                    use_save_load=True
                )
1479
                out_no_state_dict = self.check_with_opt_state_dict(
1480 1481
                    use_save_load=False
                )
1482
            np.testing.assert_array_equal(out_use_state_dict, out_no_state_dict)
1483 1484


Q
Qiao Longfei 已提交
1485
if __name__ == '__main__':
1486
    paddle.enable_static()
Q
Qiao Longfei 已提交
1487
    unittest.main()