test_initializer.py 28.0 KB
Newer Older
1
#   Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
D
dzhwinter 已提交
2
#
D
dzhwinter 已提交
3 4 5
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
D
dzhwinter 已提交
6
#
D
dzhwinter 已提交
7
#     http://www.apache.org/licenses/LICENSE-2.0
D
dzhwinter 已提交
8
#
D
dzhwinter 已提交
9 10 11 12 13 14
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

15 16
from __future__ import print_function

17
import numpy as np
18 19
import unittest

20
import paddle
21
import paddle.fluid as fluid
22 23
import paddle.fluid.framework as framework
import paddle.fluid.initializer as initializer
24
from paddle.fluid.core import VarDesc
25 26 27 28

DELTA = 0.00001


29 30 31
def check_cast_op(op):
    return op.type == 'cast' and \
           op.attr('in_dtype') == VarDesc.VarType.FP32 and \
32
           op.attr('out_dtype') in [VarDesc.VarType.FP16, VarDesc.VarType.BF16]
33 34


35 36 37 38 39 40 41 42
def output_hist(out):
    hist, _ = np.histogram(out, range=(-1, 1))
    hist = hist.astype("float32")
    hist /= float(out.size)
    prob = 0.1 * np.ones((10))
    return hist, prob


43
class TestConstantInitializer(unittest.TestCase):
44
    def test_constant_initializer_default_value(self, dtype="float32"):
45 46 47 48
        """Test the constant initializer with default value
        """
        program = framework.Program()
        block = program.global_block()
49 50
        for _ in range(2):
            block.create_parameter(
51
                dtype=dtype,
52 53 54 55
                shape=[5, 10],
                lod_level=0,
                name="param",
                initializer=initializer.ConstantInitializer())
56
        num_ops = 2 if dtype == "float16" else 1
57
        self.assertEqual(len(block.ops), num_ops)
58 59 60
        init_op = block.ops[0]
        self.assertEqual(init_op.type, 'fill_constant')
        self.assertAlmostEqual(init_op.attr('value'), 0.0, delta=DELTA)
61
        return block
62

63
    def test_constant_initializer(self, dtype="float32"):
64 65 66 67
        """Test constant initializer with supplied value
        """
        program = framework.Program()
        block = program.global_block()
68 69
        for _ in range(2):
            block.create_parameter(
70
                dtype=dtype,
71 72 73 74
                shape=[5, 10],
                lod_level=0,
                name="param",
                initializer=initializer.ConstantInitializer(2.3))
75
        num_ops = 2 if dtype == "float16" else 1
76
        self.assertEqual(len(block.ops), num_ops)
77 78 79
        init_op = block.ops[0]
        self.assertEqual(init_op.type, 'fill_constant')
        self.assertAlmostEqual(init_op.attr('value'), 2.3, delta=DELTA)
80 81 82 83 84 85 86 87 88
        return block

    def test_constant_initializer_fp16(self):
        """Test constant initializer with float16
        """
        block = self.test_constant_initializer_default_value("float16")
        self.assertTrue(check_cast_op(block.ops[1]))
        block = self.test_constant_initializer("float16")
        self.assertTrue(check_cast_op(block.ops[1]))
89

90 91 92 93 94 95 96
    def test_constant_initializer_bf16(self):
        """Test constant initializer with bfloat16
           No cast operator has been added here
        """
        self.test_constant_initializer_default_value("uint16")
        self.test_constant_initializer("uint16")

97 98

class TestUniformInitializer(unittest.TestCase):
99
    def test_uniform_initializer_default_value(self, dtype="float32"):
100 101 102 103
        """Test the uniform initializer with default value
        """
        program = framework.Program()
        block = program.global_block()
104 105
        for _ in range(2):
            block.create_parameter(
106
                dtype=dtype,
107 108 109 110
                shape=[5, 10],
                lod_level=0,
                name="param",
                initializer=initializer.UniformInitializer())
111
        num_ops = 2 if dtype == "float16" else 1
112
        self.assertEqual(len(block.ops), num_ops)
113 114 115 116 117
        init_op = block.ops[0]
        self.assertEqual(init_op.type, 'uniform_random')
        self.assertAlmostEqual(init_op.attr('min'), -1.0, delta=DELTA)
        self.assertAlmostEqual(init_op.attr('max'), 1.0, delta=DELTA)
        self.assertEqual(init_op.attr('seed'), 0)
118
        return block
119

D
dzhwinter 已提交
120 121 122 123 124 125
    def test_uniform_initializer_random_seed(self):
        """Test the uniform initializer with manually setting seed
        """
        program = framework.Program()
        program.random_seed = 123
        block = program.global_block()
126 127 128 129 130
        for _ in range(2):
            block.create_parameter(
                dtype="float32",
                shape=[5, 10],
                lod_level=0,
Q
qiaolongfei 已提交
131
                name="param1",
132 133 134 135 136
                initializer=initializer.UniformInitializer())
            block.create_parameter(
                dtype="float32",
                shape=[5, 10],
                lod_level=0,
Q
qiaolongfei 已提交
137
                name="param2",
138
                initializer=initializer.UniformInitializer(seed=456))
D
dzhwinter 已提交
139
        init_op = block.ops[1]
140
        self.assertEqual(init_op.attr("seed"), 456)
D
dzhwinter 已提交
141
        init_op1 = block.ops[0]
142
        self.assertEqual(init_op1.attr("seed"), 123)
D
dzhwinter 已提交
143

144
    def test_uniform_initializer(self, dtype="float32"):
145 146 147 148
        """Test uniform initializer with supplied attributes
        """
        program = framework.Program()
        block = program.global_block()
149 150
        for _ in range(2):
            block.create_parameter(
151
                dtype=dtype,
152 153 154 155
                shape=[5, 10],
                lod_level=0,
                name="param",
                initializer=initializer.UniformInitializer(-4.2, 3.1, 123))
156
        num_ops = 2 if dtype == "float16" else 1
157
        self.assertEqual(len(block.ops), num_ops)
158 159 160 161 162
        init_op = block.ops[0]
        self.assertEqual(init_op.type, 'uniform_random')
        self.assertAlmostEqual(init_op.attr('min'), -4.2, delta=DELTA)
        self.assertAlmostEqual(init_op.attr('max'), 3.1, delta=DELTA)
        self.assertEqual(init_op.attr('seed'), 123)
163
        return block
164

165
    def test_uniform_initializer_two_op(self, dtype="float32"):
166 167 168 169 170 171
        """Test uniform initializer with supplied attributes
        """
        program = framework.Program()
        block = program.global_block()
        for i in range(2):
            block.create_parameter(
172
                dtype=dtype,
173 174 175 176
                shape=[5, 10],
                lod_level=0,
                name="param",
                initializer=initializer.UniformInitializer(-4.2, float(i), 123))
177
        num_ops = 2 if dtype == "float16" else 1
178
        self.assertEqual(len(block.ops), num_ops)
179 180 181
        init_op0 = block.ops[0]
        self.assertEqual(init_op0.type, 'uniform_random')
        self.assertAlmostEqual(init_op0.attr('min'), -4.2, delta=DELTA)
Q
qiaolongfei 已提交
182
        self.assertAlmostEqual(init_op0.attr('max'), 0.0, delta=DELTA)
183
        self.assertEqual(init_op0.attr('seed'), 123)
184 185 186 187 188 189 190 191 192 193 194
        return block

    def test_uniform_initializer_fp16(self):
        """Test uniform initializer with float16
        """
        block = self.test_uniform_initializer_default_value("float16")
        self.assertTrue(check_cast_op(block.ops[1]))
        block = self.test_uniform_initializer(dtype="float16")
        self.assertTrue(check_cast_op(block.ops[1]))
        block = self.test_uniform_initializer_two_op("float16")
        self.assertTrue(check_cast_op(block.ops[1]))
195

196 197
    def test_uniform_initializer_bf16(self):
        """Test uniform initializer with bfloat16
198
           No cast operator has been added here
199 200 201 202 203
        """
        block = self.test_uniform_initializer_default_value("uint16")
        block = self.test_uniform_initializer(dtype="uint16")
        block = self.test_uniform_initializer_two_op("uint16")

204 205 206 207 208 209 210

class TestNormalInitializer(unittest.TestCase):
    def test_normal_initializer_default_value(self):
        """Test the normal initializer with default value
        """
        program = framework.Program()
        block = program.global_block()
211 212 213 214 215 216 217
        for _ in range(2):
            block.create_parameter(
                dtype="float32",
                shape=[5, 10],
                lod_level=0,
                name="param",
                initializer=initializer.NormalInitializer())
218 219 220 221 222 223 224
        self.assertEqual(len(block.ops), 1)
        init_op = block.ops[0]
        self.assertEqual(init_op.type, 'gaussian_random')
        self.assertAlmostEqual(init_op.attr('mean'), 0.0, delta=DELTA)
        self.assertAlmostEqual(init_op.attr('std'), 1.0, delta=DELTA)
        self.assertEqual(init_op.attr('seed'), 0)

225
    def test_normal_initializer(self, dtype="float32"):
226 227 228 229
        """Test normal initializer with supplied attributes
        """
        program = framework.Program()
        block = program.global_block()
230 231
        for _ in range(2):
            block.create_parameter(
232
                dtype=dtype,
233 234 235 236
                shape=[5, 10],
                lod_level=0,
                name="param",
                initializer=initializer.NormalInitializer(2.3, 1.9, 123))
237
        num_ops = 2 if dtype in ["float16", "uint16"] else 1
238
        self.assertEqual(len(block.ops), num_ops)
239 240 241 242 243
        init_op = block.ops[0]
        self.assertEqual(init_op.type, 'gaussian_random')
        self.assertAlmostEqual(init_op.attr('mean'), 2.3, delta=DELTA)
        self.assertAlmostEqual(init_op.attr('std'), 1.9, delta=DELTA)
        self.assertEqual(init_op.attr('seed'), 123)
244 245 246 247 248 249 250
        return block

    def test_normal_initializer_fp16(self):
        """Test normal initializer with float16
        """
        block = self.test_normal_initializer("float16")
        self.assertTrue(check_cast_op(block.ops[1]))
251

252 253 254 255 256 257
    def test_normal_initializer_bf16(self):
        """Test normal initializer with bfloat16
        """
        block = self.test_normal_initializer("uint16")
        self.assertTrue(check_cast_op(block.ops[1]))

258

259 260 261 262 263 264 265
class TestXavierInitializer(unittest.TestCase):
    def test_uniform_xavier_initializer(self):
        """Test Xavier initializer with uniform distribution on
           for matrix multiply.
        """
        program = framework.Program()
        block = program.global_block()
266 267 268 269 270 271 272
        for _ in range(2):
            param = block.create_parameter(
                dtype="float32",
                shape=[5, 10],
                lod_level=0,
                name="param",
                initializer=initializer.XavierInitializer())
273 274 275 276 277 278 279 280 281 282 283 284 285 286
        self.assertEqual(len(block.ops), 1)
        init_op = block.ops[0]
        self.assertEqual(init_op.type, 'uniform_random')
        limit = np.sqrt(6.0 / (param.shape[0] + param.shape[1]))
        self.assertAlmostEqual(init_op.attr('min'), -limit, delta=DELTA)
        self.assertAlmostEqual(init_op.attr('max'), limit, delta=DELTA)
        self.assertEqual(init_op.attr('seed'), 0)

    def test_uniform_xavier_initializer_conv(self):
        """Test Xavier initializer with uniform distribution on
           for convolutions.
        """
        program = framework.Program()
        block = program.global_block()
287 288 289 290 291 292 293
        for _ in range(2):
            param = block.create_parameter(
                dtype="float32",
                shape=[5, 10, 15, 20],
                lod_level=0,
                name="param",
                initializer=initializer.XavierInitializer())
294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309
        self.assertEqual(len(block.ops), 1)
        init_op = block.ops[0]
        self.assertEqual(init_op.type, 'uniform_random')
        receptive_field_size = float(15 * 20)
        limit = np.sqrt(6.0 / (
            (param.shape[0] + param.shape[1]) * receptive_field_size))
        self.assertAlmostEqual(init_op.attr('min'), -limit, delta=DELTA)
        self.assertAlmostEqual(init_op.attr('max'), limit, delta=DELTA)
        self.assertEqual(init_op.attr('seed'), 0)

    def test_normal_xavier_initializer(self):
        """Test Xavier initializer with normal distribution on
           for matrix multiply.
        """
        program = framework.Program()
        block = program.global_block()
310 311 312 313 314 315 316
        for _ in range(2):
            param = block.create_parameter(
                dtype="float32",
                shape=[5, 10],
                lod_level=0,
                name="param",
                initializer=initializer.XavierInitializer(uniform=False))
317 318 319 320 321 322 323 324 325 326 327 328 329 330
        self.assertEqual(len(block.ops), 1)
        init_op = block.ops[0]
        self.assertEqual(init_op.type, 'gaussian_random')
        std = np.sqrt(2.0 / (param.shape[0] + param.shape[1]))
        self.assertAlmostEqual(init_op.attr('mean'), 0.0, delta=DELTA)
        self.assertAlmostEqual(init_op.attr('std'), std, delta=DELTA)
        self.assertEqual(init_op.attr('seed'), 0)

    def test_normal_xavier_initializer_conv(self):
        """Test Xavier initializer with normal distribution on
           for convolutions.
        """
        program = framework.Program()
        block = program.global_block()
331 332 333 334 335 336 337
        for _ in range(2):
            param = block.create_parameter(
                dtype="float32",
                shape=[5, 10, 15, 20],
                lod_level=0,
                name="param",
                initializer=initializer.XavierInitializer(uniform=False))
338 339 340 341 342 343 344 345 346 347
        self.assertEqual(len(block.ops), 1)
        init_op = block.ops[0]
        self.assertEqual(init_op.type, 'gaussian_random')
        receptive_field_size = float(15 * 20)
        std = np.sqrt(2.0 / (
            (param.shape[0] + param.shape[1]) * receptive_field_size))
        self.assertAlmostEqual(init_op.attr('mean'), 0.0, delta=DELTA)
        self.assertAlmostEqual(init_op.attr('std'), std, delta=DELTA)
        self.assertEqual(init_op.attr('seed'), 0)

348 349 350
    def test_xavier_initializer_supplied_arguments(self,
                                                   dtype="float32",
                                                   uniform=True):
351 352 353 354
        """Test the Xavier initializer with supplied arguments
        """
        program = framework.Program()
        block = program.global_block()
355 356
        for _ in range(2):
            block.create_parameter(
357
                dtype=dtype,
358 359 360 361
                shape=[5, 10],
                lod_level=0,
                name="param",
                initializer=initializer.XavierInitializer(
362 363 364
                    uniform=uniform, fan_in=12, fan_out=23, seed=134))
        num_ops = 2 if (dtype == "float16" or (dtype == "uint16" and
                                               not uniform)) else 1
365
        self.assertEqual(len(block.ops), num_ops)
366
        init_op = block.ops[0]
367 368 369 370 371 372 373
        if uniform:
            self.assertEqual(init_op.type, 'uniform_random')
            limit = np.sqrt(6.0 / (12 + 23))
            self.assertAlmostEqual(init_op.attr('min'), -limit, delta=DELTA)
            self.assertAlmostEqual(init_op.attr('max'), limit, delta=DELTA)
        else:
            self.assertEqual(init_op.type, 'gaussian_random')
374
        self.assertEqual(init_op.attr('seed'), 134)
375 376 377 378 379 380 381
        return block

    def test_xavier_initializer_fp16(self):
        """Test the Xavier initializer with float16
        """
        block = self.test_xavier_initializer_supplied_arguments("float16")
        self.assertTrue(check_cast_op(block.ops[1]))
382

383 384 385
    def test_xavier_initializer_bf16(self):
        """Test the Xavier initializer with bfloat16
        """
386 387 388 389 390 391
        block_uniform = self.test_xavier_initializer_supplied_arguments(
            "uint16")
        self.assertEqual(len(block_uniform.ops), 1)
        block_gaussian = self.test_xavier_initializer_supplied_arguments(
            "uint16", False)
        self.assertTrue(check_cast_op(block_gaussian.ops[1]))
392

393

394 395 396 397 398 399 400
class TestMSRAInitializer(unittest.TestCase):
    def test_uniform_msra_initializer(self):
        """Test MSRA initializer with uniform distribution on
           for matrix multiply.
        """
        program = framework.Program()
        block = program.global_block()
401 402 403 404 405 406 407
        for _ in range(2):
            param = block.create_parameter(
                dtype="float32",
                shape=[5, 10],
                lod_level=0,
                name="param",
                initializer=initializer.MSRAInitializer())
408 409 410 411 412 413 414 415 416 417 418 419 420 421
        self.assertEqual(len(block.ops), 1)
        init_op = block.ops[0]
        self.assertEqual(init_op.type, 'uniform_random')
        limit = np.sqrt(6.0 / param.shape[0])
        self.assertAlmostEqual(init_op.attr('min'), -limit, delta=DELTA)
        self.assertAlmostEqual(init_op.attr('max'), limit, delta=DELTA)
        self.assertEqual(init_op.attr('seed'), 0)

    def test_uniform_msra_initializer_conv(self):
        """Test MSRA initializer with uniform distribution on
           for convolutions.
        """
        program = framework.Program()
        block = program.global_block()
422 423 424 425 426 427 428
        for _ in range(2):
            param = block.create_parameter(
                dtype="float32",
                shape=[5, 10, 15, 20],
                lod_level=0,
                name="param",
                initializer=initializer.MSRAInitializer())
429 430 431 432 433 434 435 436 437 438 439 440 441 442 443
        self.assertEqual(len(block.ops), 1)
        init_op = block.ops[0]
        self.assertEqual(init_op.type, 'uniform_random')
        receptive_field_size = float(15 * 20)
        limit = np.sqrt(6.0 / (param.shape[1] * receptive_field_size))
        self.assertAlmostEqual(init_op.attr('min'), -limit, delta=DELTA)
        self.assertAlmostEqual(init_op.attr('max'), limit, delta=DELTA)
        self.assertEqual(init_op.attr('seed'), 0)

    def test_normal_msra_initializer(self):
        """Test MSRA initializer with normal distribution on
           for matrix multiply.
        """
        program = framework.Program()
        block = program.global_block()
444 445 446 447 448 449 450
        for _ in range(2):
            param = block.create_parameter(
                dtype="float32",
                shape=[5, 10],
                lod_level=0,
                name="param",
                initializer=initializer.MSRAInitializer(uniform=False))
451 452 453 454 455 456 457 458 459 460 461 462 463 464
        self.assertEqual(len(block.ops), 1)
        init_op = block.ops[0]
        self.assertEqual(init_op.type, 'gaussian_random')
        std = np.sqrt(2.0 / param.shape[0])
        self.assertAlmostEqual(init_op.attr('mean'), 0.0, delta=DELTA)
        self.assertAlmostEqual(init_op.attr('std'), std, delta=DELTA)
        self.assertEqual(init_op.attr('seed'), 0)

    def test_normal_msra_initializer_conv(self):
        """Test MSRA initializer with normal distribution on
           for convolutions.
        """
        program = framework.Program()
        block = program.global_block()
465 466 467 468 469 470 471
        for _ in range(2):
            param = block.create_parameter(
                dtype="float32",
                shape=[5, 10, 15, 20],
                lod_level=0,
                name="param",
                initializer=initializer.MSRAInitializer(uniform=False))
472 473 474 475 476 477 478 479 480
        self.assertEqual(len(block.ops), 1)
        init_op = block.ops[0]
        self.assertEqual(init_op.type, 'gaussian_random')
        receptive_field_size = float(15 * 20)
        std = np.sqrt(2.0 / (param.shape[1] * receptive_field_size))
        self.assertAlmostEqual(init_op.attr('mean'), 0.0, delta=DELTA)
        self.assertAlmostEqual(init_op.attr('std'), std, delta=DELTA)
        self.assertEqual(init_op.attr('seed'), 0)

481
    def test_msra_initializer_supplied_arguments(self, dtype="float32"):
482 483 484 485
        """Test the MSRA initializer with supplied arguments
        """
        program = framework.Program()
        block = program.global_block()
486 487
        for _ in range(2):
            block.create_parameter(
488
                dtype=dtype,
489 490 491 492 493
                shape=[5, 10],
                lod_level=0,
                name="param",
                initializer=initializer.MSRAInitializer(
                    fan_in=12, seed=134))
494
        num_ops = 2 if dtype == "float16" else 1
495
        self.assertEqual(len(block.ops), num_ops)
496 497 498 499 500 501
        init_op = block.ops[0]
        self.assertEqual(init_op.type, 'uniform_random')
        limit = np.sqrt(6.0 / 12)
        self.assertAlmostEqual(init_op.attr('min'), -limit, delta=DELTA)
        self.assertAlmostEqual(init_op.attr('max'), limit, delta=DELTA)
        self.assertEqual(init_op.attr('seed'), 134)
502
        return block
503

504 505 506 507 508
    def test_msra_initializer_fp16(self):
        """Test the MSRA initializer with float16
        """
        block = self.test_msra_initializer_supplied_arguments("float16")
        self.assertTrue(check_cast_op(block.ops[1]))
509

510 511 512 513 514
    def test_msra_initializer_bf16(self):
        """Test the MSRA initializer with bfloat16
        """
        block = self.test_msra_initializer_supplied_arguments("uint16")

515 516 517

class TestBilinearInitializer(unittest.TestCase):
    def test_bilinear_initializer(self, dtype="float32"):
518 519 520 521
        """Test the bilinear initializer with supplied arguments
        """
        program = framework.Program()
        block = program.global_block()
522 523
        for _ in range(2):
            block.create_parameter(
524
                dtype=dtype,
525 526 527 528
                shape=[8, 1, 3, 3],
                lod_level=0,
                name="param",
                initializer=initializer.BilinearInitializer())
529
        num_ops = 2 if dtype in ["float16", "uint16", "float64"] else 1
530
        self.assertEqual(len(block.ops), num_ops)
531 532
        init_op = block.ops[0]
        self.assertEqual(init_op.type, 'assign_value')
533 534
        return block

535 536 537
    def test_bilinear_initializer_fp64(self):
        self.test_bilinear_initializer(dtype='float64')

538 539 540 541 542
    def test_bilinear_initializer_fp16(self):
        """Test the bilinear initializer with supplied arguments
        """
        block = self.test_bilinear_initializer("float16")
        self.assertTrue(check_cast_op(block.ops[1]))
543

544 545 546 547 548 549
    def test_bilinear_initializer_bf16(self):
        """Test the bilinear initializer with supplied arguments
        """
        block = self.test_bilinear_initializer("uint16")
        self.assertTrue(check_cast_op(block.ops[1]))

550 551 552
    def test_type_error(self):
        self.assertRaises(TypeError, self.test_bilinear_initializer, 'int32')

553

Q
Qiao Longfei 已提交
554
class TestNumpyArrayInitializer(unittest.TestCase):
555
    def test_numpy_array_initializer(self, dtype="float32"):
Q
Qiao Longfei 已提交
556 557 558 559 560
        """Test the numpy array initializer with supplied arguments
        """
        import numpy
        program = framework.Program()
        block = program.global_block()
561
        np_array = numpy.random.random((10000)).astype(dtype)
Q
Qiao Longfei 已提交
562 563 564 565 566 567 568
        for _ in range(2):
            block.create_parameter(
                dtype=np_array.dtype,
                shape=np_array.shape,
                lod_level=0,
                name="param",
                initializer=initializer.NumpyArrayInitializer(np_array))
569
        num_ops = 2 if dtype in ["float16", "uint16"] else 1
570
        self.assertEqual(len(block.ops), num_ops)
Q
Qiao Longfei 已提交
571 572
        init_op = block.ops[0]
        self.assertEqual(init_op.type, 'assign_value')
573
        assert (init_op.attr('fp32_values') == np_array).all()
574 575 576 577 578 579 580
        return block

    def test_numpy_array_initializer_fp16(self):
        """Test the numpy array initializer with float16
        """
        block = self.test_numpy_array_initializer("float16")
        self.assertTrue(block.ops[1])
Q
Qiao Longfei 已提交
581

582 583 584 585 586 587
    def test_numpy_array_initializer_bf16(self):
        """Test the numpy array initializer with bfloat16
        """
        block = self.test_numpy_array_initializer("uint16")
        self.assertTrue(block.ops[1])

Q
Qiao Longfei 已提交
588

589 590 591 592 593 594 595 596 597 598 599 600 601 602 603
class TestSetGlobalInitializer(unittest.TestCase):
    def test_set_global_weight_initilizer(self):
        """Test Set Global Param initilizer with UniformInitializer
        """
        main_prog = framework.Program()
        startup_prog = framework.Program()
        fluid.set_global_initializer(initializer.Uniform(low=-0.5, high=0.5))
        with fluid.program_guard(main_prog, startup_prog):
            x = fluid.data(name="x", shape=[1, 3, 32, 32])
            # default initilizer of param in layers.conv2d is NormalInitializer
            conv = fluid.layers.conv2d(x, 5, 3)

        block = startup_prog.global_block()
        self.assertEqual(len(block.ops), 2)

604 605
        # init weight is the first op, and bias is the second
        bias_init_op = block.ops[1]
606 607 608
        self.assertEqual(bias_init_op.type, 'fill_constant')
        self.assertAlmostEqual(bias_init_op.attr('value'), 0.0, delta=DELTA)

609
        param_init_op = block.ops[0]
610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633
        self.assertEqual(param_init_op.type, 'uniform_random')
        self.assertAlmostEqual(param_init_op.attr('min'), -0.5, delta=DELTA)
        self.assertAlmostEqual(param_init_op.attr('max'), 0.5, delta=DELTA)
        self.assertEqual(param_init_op.attr('seed'), 0)
        fluid.set_global_initializer(None)

    def test_set_global_bias_initilizer(self):
        """Test Set Global Bias initilizer with NormalInitializer
        """
        main_prog = framework.Program()
        startup_prog = framework.Program()
        fluid.set_global_initializer(
            initializer.Uniform(
                low=-0.5, high=0.5),
            bias_init=initializer.Normal(
                loc=0.0, scale=2.0))
        with fluid.program_guard(main_prog, startup_prog):
            x = fluid.data(name="x", shape=[1, 3, 32, 32])
            # default initilizer of bias in layers.conv2d is ConstantInitializer
            conv = fluid.layers.conv2d(x, 5, 3)

        block = startup_prog.global_block()
        self.assertEqual(len(block.ops), 2)

634 635
        # init weight is the first op, and bias is the second
        bias_init_op = block.ops[1]
636 637 638 639 640
        self.assertEqual(bias_init_op.type, 'gaussian_random')
        self.assertAlmostEqual(bias_init_op.attr('mean'), 0.0, delta=DELTA)
        self.assertAlmostEqual(bias_init_op.attr('std'), 2.0, delta=DELTA)
        self.assertEqual(bias_init_op.attr('seed'), 0)

641
        param_init_op = block.ops[0]
642 643 644 645 646 647 648
        self.assertEqual(param_init_op.type, 'uniform_random')
        self.assertAlmostEqual(param_init_op.attr('min'), -0.5, delta=DELTA)
        self.assertAlmostEqual(param_init_op.attr('max'), 0.5, delta=DELTA)
        self.assertEqual(param_init_op.attr('seed'), 0)
        fluid.set_global_initializer(None)


649 650 651 652 653 654 655
class TestUniformInitializerDygraph(unittest.TestCase):
    def test_uniform_initializer(self, dtype="float32"):
        """
        In dygraph mode, we can use initializer directly to initialize a tensor.
        """
        paddle.disable_static()

L
Leo Chen 已提交
656
        tensor = paddle.zeros([1024, 1024, 16])
657
        tensor.stop_gradient = False
L
Leo Chen 已提交
658
        self.assertTrue(np.allclose(np.zeros((1024, 1024, 16)), tensor.numpy()))
659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674

        uniform_ = paddle.nn.initializer.Uniform()
        uniform_(tensor)

        self.assertEqual(tensor.stop_gradient,
                         False)  # stop_gradient is not changed

        hist, prob = output_hist(tensor.numpy())

        self.assertTrue(
            np.allclose(
                hist, prob, rtol=0, atol=1e-3), "hist: " + str(hist))

        paddle.enable_static()


675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718
class TesetconsistencyOfDynamicAndStaticGraph(unittest.TestCase):
    def test_order(self):
        paddle.set_device('cpu')
        SEED = 123
        weight_attr = paddle.framework.ParamAttr(
            name="linear_weight",
            learning_rate=1.0,
            trainable=False,
            regularizer=None,
            initializer=paddle.nn.initializer.TruncatedNormal(
                mean=0.0, std=2.0))
        bias_attr = paddle.framework.ParamAttr(
            name="linear_bias",
            learning_rate=1.0,
            trainable=False,
            regularizer=None,
            initializer=paddle.nn.initializer.TruncatedNormal(
                mean=0.0, std=2.0))

        def run_dynamic_graph():
            paddle.disable_static()
            paddle.seed(SEED)
            linear = paddle.nn.Linear(
                1, 1, weight_attr=weight_attr, bias_attr=bias_attr)
            return linear.weight.numpy(), linear.bias.numpy()
            paddle.enable_static()

        def run_static_graph():
            paddle.enable_static()
            exe = paddle.static.Executor(paddle.CPUPlace())
            paddle.seed(SEED)
            linear = paddle.nn.Linear(
                1, 1, weight_attr=weight_attr, bias_attr=bias_attr)
            res = exe.run(paddle.static.default_startup_program(),
                          fetch_list=['linear_weight', 'linear_bias'])
            return res[0], res[1]

        dynamic_res = run_dynamic_graph()
        static_res = run_static_graph()

        self.assertTrue(np.array_equal(dynamic_res[0], static_res[0]))
        self.assertTrue(np.array_equal(dynamic_res[1], static_res[1]))


719 720
if __name__ == '__main__':
    unittest.main()