test_initializer.py 45.3 KB
Newer Older
1
#   Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
D
dzhwinter 已提交
2
#
D
dzhwinter 已提交
3 4 5
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
D
dzhwinter 已提交
6
#
D
dzhwinter 已提交
7
#     http://www.apache.org/licenses/LICENSE-2.0
D
dzhwinter 已提交
8
#
D
dzhwinter 已提交
9 10 11 12 13 14
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

15 16
from __future__ import print_function

17
import numpy as np
18
import math
19 20
import unittest

21
import paddle
22
import paddle.fluid as fluid
23 24
import paddle.fluid.framework as framework
import paddle.fluid.initializer as initializer
25
from paddle.fluid.core import VarDesc
26
from paddle.regularizer import L2Decay
27 28 29 30

DELTA = 0.00001


31 32 33
def check_cast_op(op):
    return op.type == 'cast' and \
           op.attr('in_dtype') == VarDesc.VarType.FP32 and \
34
           op.attr('out_dtype') in [VarDesc.VarType.FP16, VarDesc.VarType.BF16]
35 36


37 38 39 40 41 42 43 44
def output_hist(out):
    hist, _ = np.histogram(out, range=(-1, 1))
    hist = hist.astype("float32")
    hist /= float(out.size)
    prob = 0.1 * np.ones((10))
    return hist, prob


45
class TestConstantInitializer(unittest.TestCase):
46

47 48 49 50 51
    def test_calculate_gain(self):
        self.assertEqual(paddle.nn.initializer.calculate_gain('sigmoid'), 1)
        self.assertEqual(paddle.nn.initializer.calculate_gain('linear'), 1)
        self.assertEqual(paddle.nn.initializer.calculate_gain('conv2d'), 1)
        self.assertEqual(paddle.nn.initializer.calculate_gain('tanh'), 5.0 / 3)
52 53 54 55
        self.assertEqual(paddle.nn.initializer.calculate_gain('relu'),
                         math.sqrt(2.0))
        self.assertEqual(paddle.nn.initializer.calculate_gain('leaky_relu', 1),
                         1)
56 57
        self.assertEqual(paddle.nn.initializer.calculate_gain('selu'), 3.0 / 4)

58
    def test_constant_initializer_default_value(self, dtype="float32"):
59 60 61 62
        """Test the constant initializer with default value
        """
        program = framework.Program()
        block = program.global_block()
63 64
        for _ in range(2):
            block.create_parameter(
65
                dtype=dtype,
66 67 68 69
                shape=[5, 10],
                lod_level=0,
                name="param",
                initializer=initializer.ConstantInitializer())
70
        num_ops = 1
71
        self.assertEqual(len(block.ops), num_ops)
72 73 74
        init_op = block.ops[0]
        self.assertEqual(init_op.type, 'fill_constant')
        self.assertAlmostEqual(init_op.attr('value'), 0.0, delta=DELTA)
75
        return block
76

77
    def test_constant_initializer(self, dtype="float32"):
78 79 80 81
        """Test constant initializer with supplied value
        """
        program = framework.Program()
        block = program.global_block()
82 83
        for _ in range(2):
            block.create_parameter(
84
                dtype=dtype,
85 86 87 88
                shape=[5, 10],
                lod_level=0,
                name="param",
                initializer=initializer.ConstantInitializer(2.3))
89
        num_ops = 1
90
        self.assertEqual(len(block.ops), num_ops)
91 92 93
        init_op = block.ops[0]
        self.assertEqual(init_op.type, 'fill_constant')
        self.assertAlmostEqual(init_op.attr('value'), 2.3, delta=DELTA)
94 95 96 97 98
        return block

    def test_constant_initializer_fp16(self):
        """Test constant initializer with float16
        """
99 100
        self.test_constant_initializer_default_value("float16")
        self.test_constant_initializer("float16")
101

102 103 104 105 106 107 108
    def test_constant_initializer_bf16(self):
        """Test constant initializer with bfloat16
           No cast operator has been added here
        """
        self.test_constant_initializer_default_value("uint16")
        self.test_constant_initializer("uint16")

109 110

class TestUniformInitializer(unittest.TestCase):
111

112
    def test_uniform_initializer_default_value(self, dtype="float32"):
113 114 115 116
        """Test the uniform initializer with default value
        """
        program = framework.Program()
        block = program.global_block()
117
        for _ in range(2):
118 119 120 121 122
            block.create_parameter(dtype=dtype,
                                   shape=[5, 10],
                                   lod_level=0,
                                   name="param",
                                   initializer=initializer.UniformInitializer())
123
        num_ops = 2 if dtype == "float16" else 1
124
        self.assertEqual(len(block.ops), num_ops)
125 126 127 128 129
        init_op = block.ops[0]
        self.assertEqual(init_op.type, 'uniform_random')
        self.assertAlmostEqual(init_op.attr('min'), -1.0, delta=DELTA)
        self.assertAlmostEqual(init_op.attr('max'), 1.0, delta=DELTA)
        self.assertEqual(init_op.attr('seed'), 0)
130
        return block
131

D
dzhwinter 已提交
132 133 134 135 136 137
    def test_uniform_initializer_random_seed(self):
        """Test the uniform initializer with manually setting seed
        """
        program = framework.Program()
        program.random_seed = 123
        block = program.global_block()
138
        for _ in range(2):
139 140 141 142 143
            block.create_parameter(dtype="float32",
                                   shape=[5, 10],
                                   lod_level=0,
                                   name="param1",
                                   initializer=initializer.UniformInitializer())
144 145 146 147
            block.create_parameter(
                dtype="float32",
                shape=[5, 10],
                lod_level=0,
Q
qiaolongfei 已提交
148
                name="param2",
149
                initializer=initializer.UniformInitializer(seed=456))
D
dzhwinter 已提交
150
        init_op = block.ops[1]
151
        self.assertEqual(init_op.attr("seed"), 456)
D
dzhwinter 已提交
152
        init_op1 = block.ops[0]
153
        self.assertEqual(init_op1.attr("seed"), 123)
D
dzhwinter 已提交
154

155
    def test_uniform_initializer(self, dtype="float32"):
156 157 158 159
        """Test uniform initializer with supplied attributes
        """
        program = framework.Program()
        block = program.global_block()
160
        for _ in range(2):
161 162 163 164 165 166
            block.create_parameter(dtype=dtype,
                                   shape=[5, 10],
                                   lod_level=0,
                                   name="param",
                                   initializer=initializer.UniformInitializer(
                                       -4.2, 3.1, 123))
167
        num_ops = 2 if dtype == "float16" else 1
168
        self.assertEqual(len(block.ops), num_ops)
169 170 171 172 173
        init_op = block.ops[0]
        self.assertEqual(init_op.type, 'uniform_random')
        self.assertAlmostEqual(init_op.attr('min'), -4.2, delta=DELTA)
        self.assertAlmostEqual(init_op.attr('max'), 3.1, delta=DELTA)
        self.assertEqual(init_op.attr('seed'), 123)
174
        return block
175

176
    def test_uniform_initializer_two_op(self, dtype="float32"):
177 178 179 180 181
        """Test uniform initializer with supplied attributes
        """
        program = framework.Program()
        block = program.global_block()
        for i in range(2):
182 183 184 185 186 187
            block.create_parameter(dtype=dtype,
                                   shape=[5, 10],
                                   lod_level=0,
                                   name="param",
                                   initializer=initializer.UniformInitializer(
                                       -4.2, float(i), 123))
188
        num_ops = 2 if dtype == "float16" else 1
189
        self.assertEqual(len(block.ops), num_ops)
190 191 192
        init_op0 = block.ops[0]
        self.assertEqual(init_op0.type, 'uniform_random')
        self.assertAlmostEqual(init_op0.attr('min'), -4.2, delta=DELTA)
Q
qiaolongfei 已提交
193
        self.assertAlmostEqual(init_op0.attr('max'), 0.0, delta=DELTA)
194
        self.assertEqual(init_op0.attr('seed'), 123)
195 196 197 198 199 200 201 202 203 204 205
        return block

    def test_uniform_initializer_fp16(self):
        """Test uniform initializer with float16
        """
        block = self.test_uniform_initializer_default_value("float16")
        self.assertTrue(check_cast_op(block.ops[1]))
        block = self.test_uniform_initializer(dtype="float16")
        self.assertTrue(check_cast_op(block.ops[1]))
        block = self.test_uniform_initializer_two_op("float16")
        self.assertTrue(check_cast_op(block.ops[1]))
206

207 208
    def test_uniform_initializer_bf16(self):
        """Test uniform initializer with bfloat16
209
           No cast operator has been added here
210 211 212 213 214
        """
        block = self.test_uniform_initializer_default_value("uint16")
        block = self.test_uniform_initializer(dtype="uint16")
        block = self.test_uniform_initializer_two_op("uint16")

215 216

class TestNormalInitializer(unittest.TestCase):
217

218 219 220 221 222
    def test_normal_initializer_default_value(self):
        """Test the normal initializer with default value
        """
        program = framework.Program()
        block = program.global_block()
223
        for _ in range(2):
224 225 226 227 228
            block.create_parameter(dtype="float32",
                                   shape=[5, 10],
                                   lod_level=0,
                                   name="param",
                                   initializer=initializer.NormalInitializer())
229 230 231 232 233 234 235
        self.assertEqual(len(block.ops), 1)
        init_op = block.ops[0]
        self.assertEqual(init_op.type, 'gaussian_random')
        self.assertAlmostEqual(init_op.attr('mean'), 0.0, delta=DELTA)
        self.assertAlmostEqual(init_op.attr('std'), 1.0, delta=DELTA)
        self.assertEqual(init_op.attr('seed'), 0)

236
    def test_normal_initializer(self, dtype="float32"):
237 238 239 240
        """Test normal initializer with supplied attributes
        """
        program = framework.Program()
        block = program.global_block()
241
        for _ in range(2):
242 243 244 245 246 247
            block.create_parameter(dtype=dtype,
                                   shape=[5, 10],
                                   lod_level=0,
                                   name="param",
                                   initializer=initializer.NormalInitializer(
                                       2.3, 1.9, 123))
248
        num_ops = 2 if (dtype == "float16" or dtype == "uint16") else 1
249
        self.assertEqual(len(block.ops), num_ops)
250 251 252 253 254
        init_op = block.ops[0]
        self.assertEqual(init_op.type, 'gaussian_random')
        self.assertAlmostEqual(init_op.attr('mean'), 2.3, delta=DELTA)
        self.assertAlmostEqual(init_op.attr('std'), 1.9, delta=DELTA)
        self.assertEqual(init_op.attr('seed'), 123)
255 256 257 258 259
        return block

    def test_normal_initializer_fp16(self):
        """Test normal initializer with float16
        """
260
        self.test_normal_initializer("float16")
261

262 263 264
    def test_normal_initializer_bf16(self):
        """Test normal initializer with bfloat16
        """
265
        self.test_normal_initializer("uint16")
266

267

268
class TestXavierInitializer(unittest.TestCase):
269

270 271 272 273 274 275
    def test_uniform_xavier_initializer(self):
        """Test Xavier initializer with uniform distribution on
           for matrix multiply.
        """
        program = framework.Program()
        block = program.global_block()
276 277 278 279 280 281 282
        for _ in range(2):
            param = block.create_parameter(
                dtype="float32",
                shape=[5, 10],
                lod_level=0,
                name="param",
                initializer=initializer.XavierInitializer())
283 284 285 286 287 288 289 290 291 292 293 294 295 296
        self.assertEqual(len(block.ops), 1)
        init_op = block.ops[0]
        self.assertEqual(init_op.type, 'uniform_random')
        limit = np.sqrt(6.0 / (param.shape[0] + param.shape[1]))
        self.assertAlmostEqual(init_op.attr('min'), -limit, delta=DELTA)
        self.assertAlmostEqual(init_op.attr('max'), limit, delta=DELTA)
        self.assertEqual(init_op.attr('seed'), 0)

    def test_uniform_xavier_initializer_conv(self):
        """Test Xavier initializer with uniform distribution on
           for convolutions.
        """
        program = framework.Program()
        block = program.global_block()
297 298 299 300 301 302 303
        for _ in range(2):
            param = block.create_parameter(
                dtype="float32",
                shape=[5, 10, 15, 20],
                lod_level=0,
                name="param",
                initializer=initializer.XavierInitializer())
304 305 306 307
        self.assertEqual(len(block.ops), 1)
        init_op = block.ops[0]
        self.assertEqual(init_op.type, 'uniform_random')
        receptive_field_size = float(15 * 20)
308 309
        limit = np.sqrt(
            6.0 / ((param.shape[0] + param.shape[1]) * receptive_field_size))
310 311 312 313 314 315 316 317 318 319
        self.assertAlmostEqual(init_op.attr('min'), -limit, delta=DELTA)
        self.assertAlmostEqual(init_op.attr('max'), limit, delta=DELTA)
        self.assertEqual(init_op.attr('seed'), 0)

    def test_normal_xavier_initializer(self):
        """Test Xavier initializer with normal distribution on
           for matrix multiply.
        """
        program = framework.Program()
        block = program.global_block()
320 321 322 323 324 325 326
        for _ in range(2):
            param = block.create_parameter(
                dtype="float32",
                shape=[5, 10],
                lod_level=0,
                name="param",
                initializer=initializer.XavierInitializer(uniform=False))
327 328 329 330 331 332 333 334 335 336 337 338 339 340
        self.assertEqual(len(block.ops), 1)
        init_op = block.ops[0]
        self.assertEqual(init_op.type, 'gaussian_random')
        std = np.sqrt(2.0 / (param.shape[0] + param.shape[1]))
        self.assertAlmostEqual(init_op.attr('mean'), 0.0, delta=DELTA)
        self.assertAlmostEqual(init_op.attr('std'), std, delta=DELTA)
        self.assertEqual(init_op.attr('seed'), 0)

    def test_normal_xavier_initializer_conv(self):
        """Test Xavier initializer with normal distribution on
           for convolutions.
        """
        program = framework.Program()
        block = program.global_block()
341 342 343 344 345 346 347
        for _ in range(2):
            param = block.create_parameter(
                dtype="float32",
                shape=[5, 10, 15, 20],
                lod_level=0,
                name="param",
                initializer=initializer.XavierInitializer(uniform=False))
348 349 350 351
        self.assertEqual(len(block.ops), 1)
        init_op = block.ops[0]
        self.assertEqual(init_op.type, 'gaussian_random')
        receptive_field_size = float(15 * 20)
352 353
        std = np.sqrt(
            2.0 / ((param.shape[0] + param.shape[1]) * receptive_field_size))
354 355 356 357
        self.assertAlmostEqual(init_op.attr('mean'), 0.0, delta=DELTA)
        self.assertAlmostEqual(init_op.attr('std'), std, delta=DELTA)
        self.assertEqual(init_op.attr('seed'), 0)

358 359 360
    def test_xavier_initializer_supplied_arguments(self,
                                                   dtype="float32",
                                                   uniform=True):
361 362 363 364
        """Test the Xavier initializer with supplied arguments
        """
        program = framework.Program()
        block = program.global_block()
365
        for _ in range(2):
366 367 368 369 370 371 372 373 374 375 376
            block.create_parameter(dtype=dtype,
                                   shape=[5, 10],
                                   lod_level=0,
                                   name="param",
                                   initializer=initializer.XavierInitializer(
                                       uniform=uniform,
                                       fan_in=12,
                                       fan_out=23,
                                       seed=134))
        num_ops = 2 if (dtype == "float16" or
                        (dtype == "uint16" and not uniform)) else 1
377
        self.assertEqual(len(block.ops), num_ops)
378
        init_op = block.ops[0]
379 380 381 382 383 384 385
        if uniform:
            self.assertEqual(init_op.type, 'uniform_random')
            limit = np.sqrt(6.0 / (12 + 23))
            self.assertAlmostEqual(init_op.attr('min'), -limit, delta=DELTA)
            self.assertAlmostEqual(init_op.attr('max'), limit, delta=DELTA)
        else:
            self.assertEqual(init_op.type, 'gaussian_random')
386
        self.assertEqual(init_op.attr('seed'), 134)
387 388 389 390 391 392 393
        return block

    def test_xavier_initializer_fp16(self):
        """Test the Xavier initializer with float16
        """
        block = self.test_xavier_initializer_supplied_arguments("float16")
        self.assertTrue(check_cast_op(block.ops[1]))
394

395 396 397
    def test_xavier_initializer_bf16(self):
        """Test the Xavier initializer with bfloat16
        """
398 399 400 401 402 403
        block_uniform = self.test_xavier_initializer_supplied_arguments(
            "uint16")
        self.assertEqual(len(block_uniform.ops), 1)
        block_gaussian = self.test_xavier_initializer_supplied_arguments(
            "uint16", False)
        self.assertTrue(check_cast_op(block_gaussian.ops[1]))
404

405

406
class TestMSRAInitializer(unittest.TestCase):
407

408 409 410 411 412 413
    def test_uniform_msra_initializer(self):
        """Test MSRA initializer with uniform distribution on
           for matrix multiply.
        """
        program = framework.Program()
        block = program.global_block()
414 415 416 417 418 419 420
        for _ in range(2):
            param = block.create_parameter(
                dtype="float32",
                shape=[5, 10],
                lod_level=0,
                name="param",
                initializer=initializer.MSRAInitializer())
421 422 423 424 425 426 427 428 429 430 431 432 433 434
        self.assertEqual(len(block.ops), 1)
        init_op = block.ops[0]
        self.assertEqual(init_op.type, 'uniform_random')
        limit = np.sqrt(6.0 / param.shape[0])
        self.assertAlmostEqual(init_op.attr('min'), -limit, delta=DELTA)
        self.assertAlmostEqual(init_op.attr('max'), limit, delta=DELTA)
        self.assertEqual(init_op.attr('seed'), 0)

    def test_uniform_msra_initializer_conv(self):
        """Test MSRA initializer with uniform distribution on
           for convolutions.
        """
        program = framework.Program()
        block = program.global_block()
435 436 437 438 439 440 441
        for _ in range(2):
            param = block.create_parameter(
                dtype="float32",
                shape=[5, 10, 15, 20],
                lod_level=0,
                name="param",
                initializer=initializer.MSRAInitializer())
442 443 444 445 446 447 448 449 450 451 452 453 454 455 456
        self.assertEqual(len(block.ops), 1)
        init_op = block.ops[0]
        self.assertEqual(init_op.type, 'uniform_random')
        receptive_field_size = float(15 * 20)
        limit = np.sqrt(6.0 / (param.shape[1] * receptive_field_size))
        self.assertAlmostEqual(init_op.attr('min'), -limit, delta=DELTA)
        self.assertAlmostEqual(init_op.attr('max'), limit, delta=DELTA)
        self.assertEqual(init_op.attr('seed'), 0)

    def test_normal_msra_initializer(self):
        """Test MSRA initializer with normal distribution on
           for matrix multiply.
        """
        program = framework.Program()
        block = program.global_block()
457 458 459 460 461 462 463
        for _ in range(2):
            param = block.create_parameter(
                dtype="float32",
                shape=[5, 10],
                lod_level=0,
                name="param",
                initializer=initializer.MSRAInitializer(uniform=False))
464 465 466 467 468 469 470 471 472 473 474 475 476 477
        self.assertEqual(len(block.ops), 1)
        init_op = block.ops[0]
        self.assertEqual(init_op.type, 'gaussian_random')
        std = np.sqrt(2.0 / param.shape[0])
        self.assertAlmostEqual(init_op.attr('mean'), 0.0, delta=DELTA)
        self.assertAlmostEqual(init_op.attr('std'), std, delta=DELTA)
        self.assertEqual(init_op.attr('seed'), 0)

    def test_normal_msra_initializer_conv(self):
        """Test MSRA initializer with normal distribution on
           for convolutions.
        """
        program = framework.Program()
        block = program.global_block()
478 479 480 481 482 483 484
        for _ in range(2):
            param = block.create_parameter(
                dtype="float32",
                shape=[5, 10, 15, 20],
                lod_level=0,
                name="param",
                initializer=initializer.MSRAInitializer(uniform=False))
485 486 487 488 489 490 491 492 493
        self.assertEqual(len(block.ops), 1)
        init_op = block.ops[0]
        self.assertEqual(init_op.type, 'gaussian_random')
        receptive_field_size = float(15 * 20)
        std = np.sqrt(2.0 / (param.shape[1] * receptive_field_size))
        self.assertAlmostEqual(init_op.attr('mean'), 0.0, delta=DELTA)
        self.assertAlmostEqual(init_op.attr('std'), std, delta=DELTA)
        self.assertEqual(init_op.attr('seed'), 0)

494
    def test_msra_initializer_supplied_arguments(self, dtype="float32"):
495 496 497 498
        """Test the MSRA initializer with supplied arguments
        """
        program = framework.Program()
        block = program.global_block()
499
        for _ in range(2):
500 501 502 503 504 505
            block.create_parameter(dtype=dtype,
                                   shape=[5, 10],
                                   lod_level=0,
                                   name="param",
                                   initializer=initializer.MSRAInitializer(
                                       fan_in=12, seed=134))
506
        num_ops = 2 if dtype == "float16" else 1
507
        self.assertEqual(len(block.ops), num_ops)
508 509 510 511 512 513
        init_op = block.ops[0]
        self.assertEqual(init_op.type, 'uniform_random')
        limit = np.sqrt(6.0 / 12)
        self.assertAlmostEqual(init_op.attr('min'), -limit, delta=DELTA)
        self.assertAlmostEqual(init_op.attr('max'), limit, delta=DELTA)
        self.assertEqual(init_op.attr('seed'), 134)
514
        return block
515

516 517 518 519 520
    def test_msra_initializer_fp16(self):
        """Test the MSRA initializer with float16
        """
        block = self.test_msra_initializer_supplied_arguments("float16")
        self.assertTrue(check_cast_op(block.ops[1]))
521

522 523 524 525 526
    def test_msra_initializer_bf16(self):
        """Test the MSRA initializer with bfloat16
        """
        block = self.test_msra_initializer_supplied_arguments("uint16")

527 528

class TestBilinearInitializer(unittest.TestCase):
529

530
    def test_bilinear_initializer(self, dtype="float32"):
531 532 533 534
        """Test the bilinear initializer with supplied arguments
        """
        program = framework.Program()
        block = program.global_block()
535 536
        for _ in range(2):
            block.create_parameter(
537
                dtype=dtype,
538 539 540 541
                shape=[8, 1, 3, 3],
                lod_level=0,
                name="param",
                initializer=initializer.BilinearInitializer())
542
        num_ops = 2 if dtype in ["float16", "uint16", "float64"] else 1
543
        self.assertEqual(len(block.ops), num_ops)
544 545
        init_op = block.ops[0]
        self.assertEqual(init_op.type, 'assign_value')
546 547
        return block

548 549 550
    def test_bilinear_initializer_fp64(self):
        self.test_bilinear_initializer(dtype='float64')

551 552 553 554 555
    def test_bilinear_initializer_fp16(self):
        """Test the bilinear initializer with supplied arguments
        """
        block = self.test_bilinear_initializer("float16")
        self.assertTrue(check_cast_op(block.ops[1]))
556

557 558 559 560 561 562
    def test_bilinear_initializer_bf16(self):
        """Test the bilinear initializer with supplied arguments
        """
        block = self.test_bilinear_initializer("uint16")
        self.assertTrue(check_cast_op(block.ops[1]))

563 564 565
    def test_type_error(self):
        self.assertRaises(TypeError, self.test_bilinear_initializer, 'int32')

566

567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615
class TestBilinearInitializerDygraphAPI(unittest.TestCase):

    def func_test_case(self):
        factor = 2
        C = 2
        B = 8
        H = W = 32
        w_attr = paddle.ParamAttr(learning_rate=0.,
                                  regularizer=L2Decay(0.),
                                  initializer=initializer.BilinearInitializer())
        data = paddle.rand([B, 3, H, W], dtype='float32')
        conv_up = paddle.nn.Conv2DTranspose(3,
                                            out_channels=C,
                                            kernel_size=2 * factor - factor % 2,
                                            padding=int(
                                                math.ceil((factor - 1) / 2.)),
                                            stride=factor,
                                            weight_attr=w_attr,
                                            bias_attr=False)
        x = conv_up(data)
        return x

    def func_test_case_fp16(self):
        paddle.set_default_dtype("float16")
        paddle.seed(1234)
        w_attr = paddle.ParamAttr(learning_rate=0.,
                                  regularizer=L2Decay(0.),
                                  initializer=initializer.BilinearInitializer())
        conv2d = paddle.nn.Conv2D(1, 2, 3, weight_attr=w_attr)
        paddle.set_default_dtype("float32")
        return conv2d.weight

    def test_bilinear_initializer(self):
        paddle.disable_static()
        with framework._test_eager_guard():
            eager_x = self.func_test_case()
        legacy_x = self.func_test_case()
        self.assertEqual(eager_x.numpy().all(), legacy_x.numpy().all())
        paddle.enable_static()

    def test_bilinear_initializer_fp16(self):
        paddle.disable_static()
        with framework._test_eager_guard():
            eager_x = self.func_test_case_fp16()
        legacy_x = self.func_test_case_fp16()
        self.assertEqual(eager_x.numpy().all(), legacy_x.numpy().all())
        paddle.enable_static()


Q
Qiao Longfei 已提交
616
class TestNumpyArrayInitializer(unittest.TestCase):
617

618
    def test_numpy_array_initializer(self, dtype="float32"):
Q
Qiao Longfei 已提交
619 620 621 622 623
        """Test the numpy array initializer with supplied arguments
        """
        import numpy
        program = framework.Program()
        block = program.global_block()
624
        np_array = numpy.random.random((10000)).astype(dtype)
Q
Qiao Longfei 已提交
625 626 627 628 629 630 631
        for _ in range(2):
            block.create_parameter(
                dtype=np_array.dtype,
                shape=np_array.shape,
                lod_level=0,
                name="param",
                initializer=initializer.NumpyArrayInitializer(np_array))
632
        num_ops = 2 if dtype in ["float16", "uint16"] else 1
633
        self.assertEqual(len(block.ops), num_ops)
Q
Qiao Longfei 已提交
634 635
        init_op = block.ops[0]
        self.assertEqual(init_op.type, 'assign_value')
636
        assert (init_op.attr('fp32_values') == np_array).all()
637 638 639 640 641 642 643
        return block

    def test_numpy_array_initializer_fp16(self):
        """Test the numpy array initializer with float16
        """
        block = self.test_numpy_array_initializer("float16")
        self.assertTrue(block.ops[1])
Q
Qiao Longfei 已提交
644

645 646 647 648 649 650
    def test_numpy_array_initializer_bf16(self):
        """Test the numpy array initializer with bfloat16
        """
        block = self.test_numpy_array_initializer("uint16")
        self.assertTrue(block.ops[1])

Q
Qiao Longfei 已提交
651

652
class TestSetGlobalInitializer(unittest.TestCase):
653

654 655 656 657 658 659 660 661 662 663 664 665 666 667
    def test_set_global_weight_initilizer(self):
        """Test Set Global Param initilizer with UniformInitializer
        """
        main_prog = framework.Program()
        startup_prog = framework.Program()
        fluid.set_global_initializer(initializer.Uniform(low=-0.5, high=0.5))
        with fluid.program_guard(main_prog, startup_prog):
            x = fluid.data(name="x", shape=[1, 3, 32, 32])
            # default initilizer of param in layers.conv2d is NormalInitializer
            conv = fluid.layers.conv2d(x, 5, 3)

        block = startup_prog.global_block()
        self.assertEqual(len(block.ops), 2)

668 669
        # init weight is the first op, and bias is the second
        bias_init_op = block.ops[1]
670 671 672
        self.assertEqual(bias_init_op.type, 'fill_constant')
        self.assertAlmostEqual(bias_init_op.attr('value'), 0.0, delta=DELTA)

673
        param_init_op = block.ops[0]
674 675 676 677 678 679 680 681 682 683 684
        self.assertEqual(param_init_op.type, 'uniform_random')
        self.assertAlmostEqual(param_init_op.attr('min'), -0.5, delta=DELTA)
        self.assertAlmostEqual(param_init_op.attr('max'), 0.5, delta=DELTA)
        self.assertEqual(param_init_op.attr('seed'), 0)
        fluid.set_global_initializer(None)

    def test_set_global_bias_initilizer(self):
        """Test Set Global Bias initilizer with NormalInitializer
        """
        main_prog = framework.Program()
        startup_prog = framework.Program()
685 686 687
        fluid.set_global_initializer(initializer.Uniform(low=-0.5, high=0.5),
                                     bias_init=initializer.Normal(loc=0.0,
                                                                  scale=2.0))
688 689 690 691 692 693 694 695
        with fluid.program_guard(main_prog, startup_prog):
            x = fluid.data(name="x", shape=[1, 3, 32, 32])
            # default initilizer of bias in layers.conv2d is ConstantInitializer
            conv = fluid.layers.conv2d(x, 5, 3)

        block = startup_prog.global_block()
        self.assertEqual(len(block.ops), 2)

696 697
        # init weight is the first op, and bias is the second
        bias_init_op = block.ops[1]
698 699 700 701 702
        self.assertEqual(bias_init_op.type, 'gaussian_random')
        self.assertAlmostEqual(bias_init_op.attr('mean'), 0.0, delta=DELTA)
        self.assertAlmostEqual(bias_init_op.attr('std'), 2.0, delta=DELTA)
        self.assertEqual(bias_init_op.attr('seed'), 0)

703
        param_init_op = block.ops[0]
704 705 706 707 708 709 710
        self.assertEqual(param_init_op.type, 'uniform_random')
        self.assertAlmostEqual(param_init_op.attr('min'), -0.5, delta=DELTA)
        self.assertAlmostEqual(param_init_op.attr('max'), 0.5, delta=DELTA)
        self.assertEqual(param_init_op.attr('seed'), 0)
        fluid.set_global_initializer(None)


711
class TestUniformInitializerDygraph(unittest.TestCase):
712

J
Jiabin Yang 已提交
713
    def func_uniform_initializer(self, dtype="float32"):
714 715 716 717 718
        """
        In dygraph mode, we can use initializer directly to initialize a tensor.
        """
        paddle.disable_static()

L
Leo Chen 已提交
719
        tensor = paddle.zeros([1024, 1024, 16])
720
        tensor.stop_gradient = False
721 722 723
        np.testing.assert_allclose(np.zeros((1024, 1024, 16)),
                                   tensor.numpy(),
                                   rtol=1e-05)
724 725 726 727 728 729 730 731 732

        uniform_ = paddle.nn.initializer.Uniform()
        uniform_(tensor)

        self.assertEqual(tensor.stop_gradient,
                         False)  # stop_gradient is not changed

        hist, prob = output_hist(tensor.numpy())

733
        np.testing.assert_allclose(hist, prob, rtol=0, atol=0.001)
734 735 736

        paddle.enable_static()

J
Jiabin Yang 已提交
737 738 739 740 741
    def test_uniform_initializer(self, dtype="float32"):
        with framework._test_eager_guard():
            self.func_uniform_initializer()
        self.func_uniform_initializer()

742

743
class TestXavierInitializerDygraph(unittest.TestCase):
744

745 746 747 748 749 750 751 752 753
    def func_xvarier_initializer(self, dtype="float32"):
        """
        In dygraph mode, we can use initializer directly to initialize a tensor.
        """
        paddle.disable_static()

        tensor = paddle.zeros([1024, 1024, 16])
        tensor.stop_gradient = False

754 755 756
        xavier_ = paddle.fluid.initializer.XavierInitializer(uniform=False,
                                                             fan_in=3,
                                                             fan_out=5)
757 758 759 760 761 762 763
        xavier_(tensor)

        hist, _ = output_hist(tensor.numpy())

        hist2, _ = output_hist(
            np.random.normal(0, np.sqrt(2.0 / (3 + 5)), [1024, 1024, 16]))

764
        np.testing.assert_allclose(hist, hist2, rtol=0, atol=0.01)
765 766 767 768 769 770 771 772 773
        paddle.enable_static()

    def test_xavier_initializer(self, dtype="float32"):
        with framework._test_eager_guard():
            self.func_xvarier_initializer()
        self.func_xvarier_initializer()


class TestMSRAInitializerDygraph(unittest.TestCase):
774

775 776 777 778 779 780 781 782 783
    def func_msra_initializer(self, dtype="float32"):
        """
        In dygraph mode, we can use initializer directly to initialize a tensor.
        """
        paddle.disable_static()

        tensor = paddle.zeros([1024, 1024, 16])
        tensor.stop_gradient = False

784 785
        msra_ = paddle.fluid.initializer.MSRAInitializer(uniform=False,
                                                         fan_in=4)
786 787 788 789 790 791 792
        msra_(tensor)

        hist, _ = output_hist(tensor.numpy())

        hist2, _ = output_hist(
            np.random.normal(0, np.sqrt(2.0 / (4)), [1024, 1024, 16]))

793
        np.testing.assert_allclose(hist, hist2, rtol=0, atol=0.01)
794 795 796 797 798 799 800 801
        paddle.enable_static()

    def test_msra_initializer(self, dtype="float32"):
        with framework._test_eager_guard():
            self.func_msra_initializer()
        self.func_msra_initializer()


802
class TesetconsistencyOfDynamicAndStaticGraph(unittest.TestCase):
803

J
Jiabin Yang 已提交
804
    def func_order(self):
805 806 807 808 809 810 811
        paddle.set_device('cpu')
        SEED = 123
        weight_attr = paddle.framework.ParamAttr(
            name="linear_weight",
            learning_rate=1.0,
            trainable=False,
            regularizer=None,
812 813
            initializer=paddle.nn.initializer.TruncatedNormal(mean=0.0,
                                                              std=2.0))
814 815 816 817 818
        bias_attr = paddle.framework.ParamAttr(
            name="linear_bias",
            learning_rate=1.0,
            trainable=False,
            regularizer=None,
819 820
            initializer=paddle.nn.initializer.TruncatedNormal(mean=0.0,
                                                              std=2.0))
821 822 823 824

        def run_dynamic_graph():
            paddle.disable_static()
            paddle.seed(SEED)
825 826 827 828
            linear = paddle.nn.Linear(1,
                                      1,
                                      weight_attr=weight_attr,
                                      bias_attr=bias_attr)
829 830 831 832 833 834 835
            return linear.weight.numpy(), linear.bias.numpy()
            paddle.enable_static()

        def run_static_graph():
            paddle.enable_static()
            exe = paddle.static.Executor(paddle.CPUPlace())
            paddle.seed(SEED)
836 837 838 839
            linear = paddle.nn.Linear(1,
                                      1,
                                      weight_attr=weight_attr,
                                      bias_attr=bias_attr)
840 841 842 843 844 845 846
            res = exe.run(paddle.static.default_startup_program(),
                          fetch_list=['linear_weight', 'linear_bias'])
            return res[0], res[1]

        dynamic_res = run_dynamic_graph()
        static_res = run_static_graph()

847 848
        np.testing.assert_array_equal(dynamic_res[0], static_res[0])
        np.testing.assert_array_equal(dynamic_res[1], static_res[1])
849

J
Jiabin Yang 已提交
850 851 852 853 854
    def test_order(self):
        with framework._test_eager_guard():
            self.func_order()
        self.func_order()

855

856 857 858 859 860 861 862 863 864 865 866 867 868 869 870
# 2-D Parameter with shape: [10, 15]
class TestOrthogonalInitializer1(unittest.TestCase):
    """
    case 1
    """

    def config(self):
        self.weight_attr = paddle.ParamAttr(
            initializer=paddle.nn.initializer.Orthogonal(gain=3.0))
        self.dtype = "float64"
        self.in_features = 10
        self.out_features = 15
        self.num_ops = 9

    def check_result(self, a, b):
871
        np.testing.assert_array_equal(a, b)
872 873 874 875
        np.testing.assert_allclose(np.matmul(a, a.T),
                                   9 * np.eye(10),
                                   rtol=1e-5,
                                   atol=1e-8)
876

J
Jiabin Yang 已提交
877
    def func_orthogonal(self):
878 879 880 881 882
        self.config()
        paddle.set_default_dtype(self.dtype)

        paddle.disable_static()
        paddle.seed(2021)
883 884 885
        linear = paddle.nn.Linear(self.in_features,
                                  self.out_features,
                                  weight_attr=self.weight_attr)
886 887 888 889 890 891 892
        res_dygraph = linear.weight.numpy()

        paddle.enable_static()
        paddle.seed(2021)
        start_prog = paddle.static.Program()
        main_prog = paddle.static.Program()
        with paddle.static.program_guard(main_prog, start_prog):
893 894 895
            linear = paddle.nn.Linear(self.in_features,
                                      self.out_features,
                                      weight_attr=self.weight_attr)
896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911

            block = start_prog.global_block()
            self.assertEqual(len(block.ops), self.num_ops)
            self.assertEqual(block.ops[0].type, 'gaussian_random')
            self.assertEqual(block.ops[1].type, 'qr')
            self.assertEqual(block.ops[2].type, 'diag_v2')
            self.assertEqual(block.ops[3].type, 'sign')
            self.assertEqual(block.ops[4].type, 'elementwise_mul')
            self.assertEqual(block.ops[-3].type, 'reshape2')
            self.assertEqual(block.ops[-2].type, 'scale')

            exe = paddle.static.Executor()
            res_static = exe.run(start_prog, fetch_list=[linear.weight])[0]

        self.check_result(res_dygraph, res_static)

J
Jiabin Yang 已提交
912 913 914 915 916
    def test_orthogonal(self):
        with framework._test_eager_guard():
            self.func_orthogonal()
        self.func_orthogonal()

917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932

# 2-D Parameter with shape: [15, 10]
class TestOrthogonalInitializer2(TestOrthogonalInitializer1):
    """
    case 2
    """

    def config(self):
        self.weight_attr = paddle.ParamAttr(
            initializer=paddle.nn.initializer.Orthogonal(gain=2.0))
        self.dtype = "float64"
        self.in_features = 15
        self.out_features = 10
        self.num_ops = 8

    def check_result(self, a, b):
933
        np.testing.assert_array_equal(a, b)
934 935 936 937
        np.testing.assert_allclose(np.matmul(a.T, a),
                                   4 * np.eye(10),
                                   rtol=1e-5,
                                   atol=1e-8)
938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954


# 2-D Parameter with shape: [10, 10]
class TestOrthogonalInitializer3(TestOrthogonalInitializer1):
    """
    case 3
    """

    def config(self):
        self.weight_attr = paddle.ParamAttr(
            initializer=paddle.nn.initializer.Orthogonal())
        self.dtype = "float32"
        self.in_features = 10
        self.out_features = 10
        self.num_ops = 8

    def check_result(self, a, b):
955
        np.testing.assert_array_equal(a, b)
956 957 958 959 960 961 962 963
        np.testing.assert_allclose(np.matmul(a.T, a),
                                   np.eye(10),
                                   rtol=1e-05,
                                   atol=1e-06)
        np.testing.assert_allclose(np.matmul(a, a.T),
                                   np.eye(10),
                                   rtol=1e-05,
                                   atol=1e-06)
964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985

    def test_error(self):
        self.config()
        with self.assertRaises(AssertionError):
            paddle.nn.Linear(10, 10, bias_attr=self.weight_attr)


# 4-D Parameter with shape: [6, 4, 3, 3]
class TestOrthogonalInitializer4(unittest.TestCase):
    """
    case 4
    """

    def config(self):
        self.weight_attr = paddle.ParamAttr(
            initializer=paddle.nn.initializer.Orthogonal(gain=3.0))
        self.dtype = "float64"
        self.in_features = 4
        self.out_features = 6
        self.kernel_size = (3, 3)

    def check_result(self, a, b):
986
        np.testing.assert_array_equal(a, b)
987
        a = a.reshape(6, -1)
988 989 990 991
        np.testing.assert_allclose(np.matmul(a, a.T),
                                   9 * np.eye(6),
                                   rtol=1e-5,
                                   atol=1e-8)
992

J
Jiabin Yang 已提交
993
    def func_orthogonal(self):
994 995 996 997 998
        self.config()
        paddle.set_default_dtype(self.dtype)

        paddle.disable_static()
        paddle.seed(2021)
999 1000 1001 1002
        conv2d = paddle.nn.Conv2D(self.in_features,
                                  self.out_features,
                                  self.kernel_size,
                                  weight_attr=self.weight_attr)
1003 1004 1005 1006 1007 1008 1009
        res_dygraph = conv2d.weight.numpy()

        paddle.enable_static()
        paddle.seed(2021)
        start_prog = paddle.static.Program()
        main_prog = paddle.static.Program()
        with paddle.static.program_guard(main_prog, start_prog):
1010 1011 1012 1013
            conv2d = paddle.nn.Conv2D(self.in_features,
                                      self.out_features,
                                      self.kernel_size,
                                      weight_attr=self.weight_attr)
1014 1015 1016 1017 1018
            exe = paddle.static.Executor()
            res_static = exe.run(paddle.static.default_startup_program(),
                                 fetch_list=[conv2d.weight])[0]
        self.check_result(res_dygraph, res_static)

J
Jiabin Yang 已提交
1019 1020 1021 1022 1023
    def test_orthogonal(self):
        with framework._test_eager_guard():
            self.func_orthogonal()
        self.func_orthogonal()

1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039

# 4-D Parameter with shape: [50, 4, 3, 3]
class TestOrthogonalInitializer5(TestOrthogonalInitializer4):
    """
    case 5
    """

    def config(self):
        self.weight_attr = paddle.ParamAttr(
            initializer=paddle.nn.initializer.Orthogonal(gain=2.0))
        self.dtype = "float64"
        self.in_features = 4
        self.out_features = 50
        self.kernel_size = (3, 3)

    def check_result(self, a, b):
1040
        np.testing.assert_array_equal(a, b)
1041
        a = a.reshape(50, -1)
1042 1043 1044 1045
        np.testing.assert_allclose(np.matmul(a.T, a),
                                   4 * np.eye(36),
                                   rtol=1e-5,
                                   atol=1e-8)
1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062


# 4-D Parameter with shape: [36, 4, 3, 3]
class TestOrthogonalInitializer6(TestOrthogonalInitializer4):
    """
    case 6
    """

    def config(self):
        self.weight_attr = paddle.ParamAttr(
            initializer=paddle.nn.initializer.Orthogonal())
        self.dtype = "float32"
        self.in_features = 4
        self.out_features = 36
        self.kernel_size = (3, 3)

    def check_result(self, a, b):
1063
        np.testing.assert_array_equal(a, b)
1064
        a = a.reshape(36, -1)
1065 1066 1067 1068 1069 1070 1071 1072
        np.testing.assert_allclose(np.matmul(a.T, a),
                                   np.eye(36),
                                   rtol=1e-05,
                                   atol=1e-06)
        np.testing.assert_allclose(np.matmul(a, a.T),
                                   np.eye(36),
                                   rtol=1e-05,
                                   atol=1e-06)
1073 1074


1075 1076
# initialize Conv1D weight
class TestDiracInitializer1(unittest.TestCase):
1077

1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089
    def config(self):
        self.weight_attr = paddle.ParamAttr(
            initializer=paddle.nn.initializer.Dirac())
        self.dtype = "float64"
        self.in_channels = 3
        self.out_channels = 2
        self.kernel_size = 3
        self.input_shape = [8, self.in_channels, 10]
        self.conv_layer = paddle.nn.Conv1D
        self.num_ops = 8  #fill_constant*2, reshape*2, assign_value*2, scatter, cast

    def check_result(self, w_dygraph, w_static, conv_in, conv_out):
1090 1091
        np.testing.assert_array_equal(w_dygraph, w_static)
        np.testing.assert_array_equal(conv_out, conv_in[:, 0:2, 1:9])
1092

J
Jiabin Yang 已提交
1093
    def func_dirac(self):
1094 1095 1096 1097
        self.config()
        paddle.set_default_dtype(self.dtype)

        paddle.disable_static()
1098 1099 1100 1101
        conv = self.conv_layer(self.in_channels,
                               self.out_channels,
                               self.kernel_size,
                               weight_attr=self.weight_attr)
1102 1103 1104 1105 1106 1107 1108
        weight_dygraph = conv.weight.numpy()

        paddle.enable_static()
        start_prog = paddle.static.Program()
        main_prog = paddle.static.Program()
        with paddle.static.program_guard(main_prog, start_prog):
            inp = paddle.rand(self.input_shape)
1109 1110 1111 1112
            conv = self.conv_layer(self.in_channels,
                                   self.out_channels,
                                   self.kernel_size,
                                   weight_attr=self.weight_attr)
1113 1114 1115 1116 1117

            output = conv(inp)
            block = start_prog.global_block()
            self.assertEqual(len(block.ops), self.num_ops)
            self.assertEqual(block.ops[0].type, 'fill_constant')
1118
            self.assertEqual(block.ops[1].type, 'reshape2')
1119 1120 1121
            self.assertEqual(block.ops[2].type, 'assign_value')
            self.assertEqual(block.ops[3].type, 'assign_value')
            self.assertEqual(block.ops[4].type, 'scatter')
1122
            self.assertEqual(block.ops[5].type, 'reshape2')
1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133

            exe = paddle.static.Executor()
            exe.run(start_prog)
            fetch = exe.run(main_prog, fetch_list=[inp, output, conv.weight])
            conv_input = fetch[0]
            conv_output = fetch[1]
            weight_static = fetch[2]

        self.check_result(weight_dygraph, weight_static, conv_input,
                          conv_output)

J
Jiabin Yang 已提交
1134 1135 1136 1137 1138
    def test_dirac(self):
        with framework._test_eager_guard():
            self.func_dirac()
        self.func_dirac()

1139 1140 1141

# initialize Conv2D weight
class TestDiracInitializer2(TestDiracInitializer1):
1142

1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154
    def config(self):
        self.weight_attr = paddle.ParamAttr(
            initializer=paddle.nn.initializer.Dirac(groups=1))
        self.dtype = "float64"
        self.in_channels = 4
        self.out_channels = 8
        self.kernel_size = (3, 3)
        self.input_shape = [8, self.in_channels, 10, 10]
        self.conv_layer = paddle.nn.Conv2D
        self.num_ops = 8

    def check_result(self, w_dygraph, w_static, conv_in, conv_out):
1155 1156 1157 1158 1159
        np.testing.assert_array_equal(w_dygraph, w_static)
        np.testing.assert_array_equal(conv_out[:, 0:4, :, :], conv_in[:, :, 1:9,
                                                                      1:9])
        np.testing.assert_array_equal(conv_out[:, 4:8, :, :],
                                      np.zeros([8, 4, 8, 8]))
1160 1161 1162 1163


# initialize Conv3D weight
class TestDiracInitializer3(TestDiracInitializer1):
1164

1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176
    def config(self):
        self.weight_attr = paddle.ParamAttr(
            initializer=paddle.nn.initializer.Dirac(groups=2))
        self.dtype = "float32"
        self.in_channels = 5
        self.out_channels = 10
        self.kernel_size = (3, 3, 3)
        self.input_shape = [8, self.in_channels, 10, 10, 10]
        self.conv_layer = paddle.nn.Conv3D
        self.num_ops = 7

    def check_result(self, w_dygraph, w_static, conv_in, conv_out):
1177 1178 1179 1180 1181
        np.testing.assert_array_equal(w_dygraph, w_static)
        np.testing.assert_array_equal(conv_out[:, 0:5, :, :, :],
                                      conv_in[:, :, 1:9, 1:9, 1:9])
        np.testing.assert_array_equal(conv_out[:, 5:10, :, :, :],
                                      conv_in[:, :, 1:9, 1:9, 1:9])
1182 1183 1184 1185 1186 1187 1188 1189 1190 1191

    def test_error(self):
        self.config()
        with self.assertRaises(AssertionError):
            paddle.nn.Linear(10, 10, weight_attr=self.weight_attr)

        with self.assertRaises(AssertionError):
            paddle.nn.Conv2D(5, 9, (3, 3), weight_attr=self.weight_attr)


1192
if __name__ == '__main__':
H
hong 已提交
1193
    paddle.enable_static()
1194
    unittest.main()