test_initializer.py 27.7 KB
Newer Older
1
#   Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
D
dzhwinter 已提交
2
#
D
dzhwinter 已提交
3 4 5
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
D
dzhwinter 已提交
6
#
D
dzhwinter 已提交
7
#     http://www.apache.org/licenses/LICENSE-2.0
D
dzhwinter 已提交
8
#
D
dzhwinter 已提交
9 10 11 12 13 14
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

15 16
from __future__ import print_function

17
import numpy as np
18 19
import unittest

20
import paddle
21
import paddle.fluid as fluid
22 23
import paddle.fluid.framework as framework
import paddle.fluid.initializer as initializer
24
from paddle.fluid.core import VarDesc
25 26 27 28

DELTA = 0.00001


29 30 31
def check_cast_op(op):
    return op.type == 'cast' and \
           op.attr('in_dtype') == VarDesc.VarType.FP32 and \
32
           op.attr('out_dtype') in [VarDesc.VarType.FP16, VarDesc.VarType.BF16]
33 34


35 36 37 38 39 40 41 42
def output_hist(out):
    hist, _ = np.histogram(out, range=(-1, 1))
    hist = hist.astype("float32")
    hist /= float(out.size)
    prob = 0.1 * np.ones((10))
    return hist, prob


43
class TestConstantInitializer(unittest.TestCase):
44
    def test_constant_initializer_default_value(self, dtype="float32"):
45 46 47 48
        """Test the constant initializer with default value
        """
        program = framework.Program()
        block = program.global_block()
49 50
        for _ in range(2):
            block.create_parameter(
51
                dtype=dtype,
52 53 54 55
                shape=[5, 10],
                lod_level=0,
                name="param",
                initializer=initializer.ConstantInitializer())
56
        num_ops = 2 if dtype in ["float16"] else 1
57
        self.assertEqual(len(block.ops), num_ops)
58 59 60
        init_op = block.ops[0]
        self.assertEqual(init_op.type, 'fill_constant')
        self.assertAlmostEqual(init_op.attr('value'), 0.0, delta=DELTA)
61
        return block
62

63
    def test_constant_initializer(self, dtype="float32"):
64 65 66 67
        """Test constant initializer with supplied value
        """
        program = framework.Program()
        block = program.global_block()
68 69
        for _ in range(2):
            block.create_parameter(
70
                dtype=dtype,
71 72 73 74
                shape=[5, 10],
                lod_level=0,
                name="param",
                initializer=initializer.ConstantInitializer(2.3))
75
        num_ops = 2 if dtype in ["float16"] else 1
76
        self.assertEqual(len(block.ops), num_ops)
77 78 79
        init_op = block.ops[0]
        self.assertEqual(init_op.type, 'fill_constant')
        self.assertAlmostEqual(init_op.attr('value'), 2.3, delta=DELTA)
80 81 82 83 84 85 86 87 88
        return block

    def test_constant_initializer_fp16(self):
        """Test constant initializer with float16
        """
        block = self.test_constant_initializer_default_value("float16")
        self.assertTrue(check_cast_op(block.ops[1]))
        block = self.test_constant_initializer("float16")
        self.assertTrue(check_cast_op(block.ops[1]))
89

90 91 92 93 94 95 96
    def test_constant_initializer_bf16(self):
        """Test constant initializer with bfloat16
           No cast operator has been added here
        """
        self.test_constant_initializer_default_value("uint16")
        self.test_constant_initializer("uint16")

97 98

class TestUniformInitializer(unittest.TestCase):
99
    def test_uniform_initializer_default_value(self, dtype="float32"):
100 101 102 103
        """Test the uniform initializer with default value
        """
        program = framework.Program()
        block = program.global_block()
104 105
        for _ in range(2):
            block.create_parameter(
106
                dtype=dtype,
107 108 109 110
                shape=[5, 10],
                lod_level=0,
                name="param",
                initializer=initializer.UniformInitializer())
111
        num_ops = 2 if dtype in ["float16", "uint16"] else 1
112
        self.assertEqual(len(block.ops), num_ops)
113 114 115 116 117
        init_op = block.ops[0]
        self.assertEqual(init_op.type, 'uniform_random')
        self.assertAlmostEqual(init_op.attr('min'), -1.0, delta=DELTA)
        self.assertAlmostEqual(init_op.attr('max'), 1.0, delta=DELTA)
        self.assertEqual(init_op.attr('seed'), 0)
118
        return block
119

D
dzhwinter 已提交
120 121 122 123 124 125
    def test_uniform_initializer_random_seed(self):
        """Test the uniform initializer with manually setting seed
        """
        program = framework.Program()
        program.random_seed = 123
        block = program.global_block()
126 127 128 129 130
        for _ in range(2):
            block.create_parameter(
                dtype="float32",
                shape=[5, 10],
                lod_level=0,
Q
qiaolongfei 已提交
131
                name="param1",
132 133 134 135 136
                initializer=initializer.UniformInitializer())
            block.create_parameter(
                dtype="float32",
                shape=[5, 10],
                lod_level=0,
Q
qiaolongfei 已提交
137
                name="param2",
138
                initializer=initializer.UniformInitializer(seed=456))
D
dzhwinter 已提交
139
        init_op = block.ops[1]
140
        self.assertEqual(init_op.attr("seed"), 456)
D
dzhwinter 已提交
141
        init_op1 = block.ops[0]
142
        self.assertEqual(init_op1.attr("seed"), 123)
D
dzhwinter 已提交
143

144
    def test_uniform_initializer(self, dtype="float32"):
145 146 147 148
        """Test uniform initializer with supplied attributes
        """
        program = framework.Program()
        block = program.global_block()
149 150
        for _ in range(2):
            block.create_parameter(
151
                dtype=dtype,
152 153 154 155
                shape=[5, 10],
                lod_level=0,
                name="param",
                initializer=initializer.UniformInitializer(-4.2, 3.1, 123))
156
        num_ops = 2 if dtype in ["float16", "uint16"] else 1
157
        self.assertEqual(len(block.ops), num_ops)
158 159 160 161 162
        init_op = block.ops[0]
        self.assertEqual(init_op.type, 'uniform_random')
        self.assertAlmostEqual(init_op.attr('min'), -4.2, delta=DELTA)
        self.assertAlmostEqual(init_op.attr('max'), 3.1, delta=DELTA)
        self.assertEqual(init_op.attr('seed'), 123)
163
        return block
164

165
    def test_uniform_initializer_two_op(self, dtype="float32"):
166 167 168 169 170 171
        """Test uniform initializer with supplied attributes
        """
        program = framework.Program()
        block = program.global_block()
        for i in range(2):
            block.create_parameter(
172
                dtype=dtype,
173 174 175 176
                shape=[5, 10],
                lod_level=0,
                name="param",
                initializer=initializer.UniformInitializer(-4.2, float(i), 123))
177
        num_ops = 2 if dtype in ["float16", "uint16"] else 1
178
        self.assertEqual(len(block.ops), num_ops)
179 180 181
        init_op0 = block.ops[0]
        self.assertEqual(init_op0.type, 'uniform_random')
        self.assertAlmostEqual(init_op0.attr('min'), -4.2, delta=DELTA)
Q
qiaolongfei 已提交
182
        self.assertAlmostEqual(init_op0.attr('max'), 0.0, delta=DELTA)
183
        self.assertEqual(init_op0.attr('seed'), 123)
184 185 186 187 188 189 190 191 192 193 194
        return block

    def test_uniform_initializer_fp16(self):
        """Test uniform initializer with float16
        """
        block = self.test_uniform_initializer_default_value("float16")
        self.assertTrue(check_cast_op(block.ops[1]))
        block = self.test_uniform_initializer(dtype="float16")
        self.assertTrue(check_cast_op(block.ops[1]))
        block = self.test_uniform_initializer_two_op("float16")
        self.assertTrue(check_cast_op(block.ops[1]))
195

196 197 198 199 200 201 202 203 204 205
    def test_uniform_initializer_bf16(self):
        """Test uniform initializer with bfloat16
        """
        block = self.test_uniform_initializer_default_value("uint16")
        self.assertTrue(check_cast_op(block.ops[1]))
        block = self.test_uniform_initializer(dtype="uint16")
        self.assertTrue(check_cast_op(block.ops[1]))
        block = self.test_uniform_initializer_two_op("uint16")
        self.assertTrue(check_cast_op(block.ops[1]))

206 207 208 209 210 211 212

class TestNormalInitializer(unittest.TestCase):
    def test_normal_initializer_default_value(self):
        """Test the normal initializer with default value
        """
        program = framework.Program()
        block = program.global_block()
213 214 215 216 217 218 219
        for _ in range(2):
            block.create_parameter(
                dtype="float32",
                shape=[5, 10],
                lod_level=0,
                name="param",
                initializer=initializer.NormalInitializer())
220 221 222 223 224 225 226
        self.assertEqual(len(block.ops), 1)
        init_op = block.ops[0]
        self.assertEqual(init_op.type, 'gaussian_random')
        self.assertAlmostEqual(init_op.attr('mean'), 0.0, delta=DELTA)
        self.assertAlmostEqual(init_op.attr('std'), 1.0, delta=DELTA)
        self.assertEqual(init_op.attr('seed'), 0)

227
    def test_normal_initializer(self, dtype="float32"):
228 229 230 231
        """Test normal initializer with supplied attributes
        """
        program = framework.Program()
        block = program.global_block()
232 233
        for _ in range(2):
            block.create_parameter(
234
                dtype=dtype,
235 236 237 238
                shape=[5, 10],
                lod_level=0,
                name="param",
                initializer=initializer.NormalInitializer(2.3, 1.9, 123))
239
        num_ops = 2 if dtype in ["float16", "uint16"] else 1
240
        self.assertEqual(len(block.ops), num_ops)
241 242 243 244 245
        init_op = block.ops[0]
        self.assertEqual(init_op.type, 'gaussian_random')
        self.assertAlmostEqual(init_op.attr('mean'), 2.3, delta=DELTA)
        self.assertAlmostEqual(init_op.attr('std'), 1.9, delta=DELTA)
        self.assertEqual(init_op.attr('seed'), 123)
246 247 248 249 250 251 252
        return block

    def test_normal_initializer_fp16(self):
        """Test normal initializer with float16
        """
        block = self.test_normal_initializer("float16")
        self.assertTrue(check_cast_op(block.ops[1]))
253

254 255 256 257 258 259
    def test_normal_initializer_bf16(self):
        """Test normal initializer with bfloat16
        """
        block = self.test_normal_initializer("uint16")
        self.assertTrue(check_cast_op(block.ops[1]))

260

261 262 263 264 265 266 267
class TestXavierInitializer(unittest.TestCase):
    def test_uniform_xavier_initializer(self):
        """Test Xavier initializer with uniform distribution on
           for matrix multiply.
        """
        program = framework.Program()
        block = program.global_block()
268 269 270 271 272 273 274
        for _ in range(2):
            param = block.create_parameter(
                dtype="float32",
                shape=[5, 10],
                lod_level=0,
                name="param",
                initializer=initializer.XavierInitializer())
275 276 277 278 279 280 281 282 283 284 285 286 287 288
        self.assertEqual(len(block.ops), 1)
        init_op = block.ops[0]
        self.assertEqual(init_op.type, 'uniform_random')
        limit = np.sqrt(6.0 / (param.shape[0] + param.shape[1]))
        self.assertAlmostEqual(init_op.attr('min'), -limit, delta=DELTA)
        self.assertAlmostEqual(init_op.attr('max'), limit, delta=DELTA)
        self.assertEqual(init_op.attr('seed'), 0)

    def test_uniform_xavier_initializer_conv(self):
        """Test Xavier initializer with uniform distribution on
           for convolutions.
        """
        program = framework.Program()
        block = program.global_block()
289 290 291 292 293 294 295
        for _ in range(2):
            param = block.create_parameter(
                dtype="float32",
                shape=[5, 10, 15, 20],
                lod_level=0,
                name="param",
                initializer=initializer.XavierInitializer())
296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311
        self.assertEqual(len(block.ops), 1)
        init_op = block.ops[0]
        self.assertEqual(init_op.type, 'uniform_random')
        receptive_field_size = float(15 * 20)
        limit = np.sqrt(6.0 / (
            (param.shape[0] + param.shape[1]) * receptive_field_size))
        self.assertAlmostEqual(init_op.attr('min'), -limit, delta=DELTA)
        self.assertAlmostEqual(init_op.attr('max'), limit, delta=DELTA)
        self.assertEqual(init_op.attr('seed'), 0)

    def test_normal_xavier_initializer(self):
        """Test Xavier initializer with normal distribution on
           for matrix multiply.
        """
        program = framework.Program()
        block = program.global_block()
312 313 314 315 316 317 318
        for _ in range(2):
            param = block.create_parameter(
                dtype="float32",
                shape=[5, 10],
                lod_level=0,
                name="param",
                initializer=initializer.XavierInitializer(uniform=False))
319 320 321 322 323 324 325 326 327 328 329 330 331 332
        self.assertEqual(len(block.ops), 1)
        init_op = block.ops[0]
        self.assertEqual(init_op.type, 'gaussian_random')
        std = np.sqrt(2.0 / (param.shape[0] + param.shape[1]))
        self.assertAlmostEqual(init_op.attr('mean'), 0.0, delta=DELTA)
        self.assertAlmostEqual(init_op.attr('std'), std, delta=DELTA)
        self.assertEqual(init_op.attr('seed'), 0)

    def test_normal_xavier_initializer_conv(self):
        """Test Xavier initializer with normal distribution on
           for convolutions.
        """
        program = framework.Program()
        block = program.global_block()
333 334 335 336 337 338 339
        for _ in range(2):
            param = block.create_parameter(
                dtype="float32",
                shape=[5, 10, 15, 20],
                lod_level=0,
                name="param",
                initializer=initializer.XavierInitializer(uniform=False))
340 341 342 343 344 345 346 347 348 349
        self.assertEqual(len(block.ops), 1)
        init_op = block.ops[0]
        self.assertEqual(init_op.type, 'gaussian_random')
        receptive_field_size = float(15 * 20)
        std = np.sqrt(2.0 / (
            (param.shape[0] + param.shape[1]) * receptive_field_size))
        self.assertAlmostEqual(init_op.attr('mean'), 0.0, delta=DELTA)
        self.assertAlmostEqual(init_op.attr('std'), std, delta=DELTA)
        self.assertEqual(init_op.attr('seed'), 0)

350
    def test_xavier_initializer_supplied_arguments(self, dtype="float32"):
351 352 353 354
        """Test the Xavier initializer with supplied arguments
        """
        program = framework.Program()
        block = program.global_block()
355 356
        for _ in range(2):
            block.create_parameter(
357
                dtype=dtype,
358 359 360 361 362
                shape=[5, 10],
                lod_level=0,
                name="param",
                initializer=initializer.XavierInitializer(
                    fan_in=12, fan_out=23, seed=134))
363
        num_ops = 2 if dtype in ["float16", "uint16"] else 1
364
        self.assertEqual(len(block.ops), num_ops)
365 366 367 368 369 370
        init_op = block.ops[0]
        self.assertEqual(init_op.type, 'uniform_random')
        limit = np.sqrt(6.0 / (12 + 23))
        self.assertAlmostEqual(init_op.attr('min'), -limit, delta=DELTA)
        self.assertAlmostEqual(init_op.attr('max'), limit, delta=DELTA)
        self.assertEqual(init_op.attr('seed'), 134)
371 372 373 374 375 376 377
        return block

    def test_xavier_initializer_fp16(self):
        """Test the Xavier initializer with float16
        """
        block = self.test_xavier_initializer_supplied_arguments("float16")
        self.assertTrue(check_cast_op(block.ops[1]))
378

379 380 381 382 383 384
    def test_xavier_initializer_bf16(self):
        """Test the Xavier initializer with bfloat16
        """
        block = self.test_xavier_initializer_supplied_arguments("uint16")
        self.assertTrue(check_cast_op(block.ops[1]))

385

386 387 388 389 390 391 392
class TestMSRAInitializer(unittest.TestCase):
    def test_uniform_msra_initializer(self):
        """Test MSRA initializer with uniform distribution on
           for matrix multiply.
        """
        program = framework.Program()
        block = program.global_block()
393 394 395 396 397 398 399
        for _ in range(2):
            param = block.create_parameter(
                dtype="float32",
                shape=[5, 10],
                lod_level=0,
                name="param",
                initializer=initializer.MSRAInitializer())
400 401 402 403 404 405 406 407 408 409 410 411 412 413
        self.assertEqual(len(block.ops), 1)
        init_op = block.ops[0]
        self.assertEqual(init_op.type, 'uniform_random')
        limit = np.sqrt(6.0 / param.shape[0])
        self.assertAlmostEqual(init_op.attr('min'), -limit, delta=DELTA)
        self.assertAlmostEqual(init_op.attr('max'), limit, delta=DELTA)
        self.assertEqual(init_op.attr('seed'), 0)

    def test_uniform_msra_initializer_conv(self):
        """Test MSRA initializer with uniform distribution on
           for convolutions.
        """
        program = framework.Program()
        block = program.global_block()
414 415 416 417 418 419 420
        for _ in range(2):
            param = block.create_parameter(
                dtype="float32",
                shape=[5, 10, 15, 20],
                lod_level=0,
                name="param",
                initializer=initializer.MSRAInitializer())
421 422 423 424 425 426 427 428 429 430 431 432 433 434 435
        self.assertEqual(len(block.ops), 1)
        init_op = block.ops[0]
        self.assertEqual(init_op.type, 'uniform_random')
        receptive_field_size = float(15 * 20)
        limit = np.sqrt(6.0 / (param.shape[1] * receptive_field_size))
        self.assertAlmostEqual(init_op.attr('min'), -limit, delta=DELTA)
        self.assertAlmostEqual(init_op.attr('max'), limit, delta=DELTA)
        self.assertEqual(init_op.attr('seed'), 0)

    def test_normal_msra_initializer(self):
        """Test MSRA initializer with normal distribution on
           for matrix multiply.
        """
        program = framework.Program()
        block = program.global_block()
436 437 438 439 440 441 442
        for _ in range(2):
            param = block.create_parameter(
                dtype="float32",
                shape=[5, 10],
                lod_level=0,
                name="param",
                initializer=initializer.MSRAInitializer(uniform=False))
443 444 445 446 447 448 449 450 451 452 453 454 455 456
        self.assertEqual(len(block.ops), 1)
        init_op = block.ops[0]
        self.assertEqual(init_op.type, 'gaussian_random')
        std = np.sqrt(2.0 / param.shape[0])
        self.assertAlmostEqual(init_op.attr('mean'), 0.0, delta=DELTA)
        self.assertAlmostEqual(init_op.attr('std'), std, delta=DELTA)
        self.assertEqual(init_op.attr('seed'), 0)

    def test_normal_msra_initializer_conv(self):
        """Test MSRA initializer with normal distribution on
           for convolutions.
        """
        program = framework.Program()
        block = program.global_block()
457 458 459 460 461 462 463
        for _ in range(2):
            param = block.create_parameter(
                dtype="float32",
                shape=[5, 10, 15, 20],
                lod_level=0,
                name="param",
                initializer=initializer.MSRAInitializer(uniform=False))
464 465 466 467 468 469 470 471 472
        self.assertEqual(len(block.ops), 1)
        init_op = block.ops[0]
        self.assertEqual(init_op.type, 'gaussian_random')
        receptive_field_size = float(15 * 20)
        std = np.sqrt(2.0 / (param.shape[1] * receptive_field_size))
        self.assertAlmostEqual(init_op.attr('mean'), 0.0, delta=DELTA)
        self.assertAlmostEqual(init_op.attr('std'), std, delta=DELTA)
        self.assertEqual(init_op.attr('seed'), 0)

473
    def test_msra_initializer_supplied_arguments(self, dtype="float32"):
474 475 476 477
        """Test the MSRA initializer with supplied arguments
        """
        program = framework.Program()
        block = program.global_block()
478 479
        for _ in range(2):
            block.create_parameter(
480
                dtype=dtype,
481 482 483 484 485
                shape=[5, 10],
                lod_level=0,
                name="param",
                initializer=initializer.MSRAInitializer(
                    fan_in=12, seed=134))
486
        num_ops = 2 if dtype in ["float16", "uint16"] else 1
487
        self.assertEqual(len(block.ops), num_ops)
488 489 490 491 492 493
        init_op = block.ops[0]
        self.assertEqual(init_op.type, 'uniform_random')
        limit = np.sqrt(6.0 / 12)
        self.assertAlmostEqual(init_op.attr('min'), -limit, delta=DELTA)
        self.assertAlmostEqual(init_op.attr('max'), limit, delta=DELTA)
        self.assertEqual(init_op.attr('seed'), 134)
494
        return block
495

496 497 498 499 500
    def test_msra_initializer_fp16(self):
        """Test the MSRA initializer with float16
        """
        block = self.test_msra_initializer_supplied_arguments("float16")
        self.assertTrue(check_cast_op(block.ops[1]))
501

502 503 504 505 506 507
    def test_msra_initializer_bf16(self):
        """Test the MSRA initializer with bfloat16
        """
        block = self.test_msra_initializer_supplied_arguments("uint16")
        self.assertTrue(check_cast_op(block.ops[1]))

508 509 510

class TestBilinearInitializer(unittest.TestCase):
    def test_bilinear_initializer(self, dtype="float32"):
511 512 513 514
        """Test the bilinear initializer with supplied arguments
        """
        program = framework.Program()
        block = program.global_block()
515 516
        for _ in range(2):
            block.create_parameter(
517
                dtype=dtype,
518 519 520 521
                shape=[8, 1, 3, 3],
                lod_level=0,
                name="param",
                initializer=initializer.BilinearInitializer())
522
        num_ops = 2 if dtype in ["float16", "uint16", "float64"] else 1
523
        self.assertEqual(len(block.ops), num_ops)
524 525
        init_op = block.ops[0]
        self.assertEqual(init_op.type, 'assign_value')
526 527
        return block

528 529 530
    def test_bilinear_initializer_fp64(self):
        self.test_bilinear_initializer(dtype='float64')

531 532 533 534 535
    def test_bilinear_initializer_fp16(self):
        """Test the bilinear initializer with supplied arguments
        """
        block = self.test_bilinear_initializer("float16")
        self.assertTrue(check_cast_op(block.ops[1]))
536

537 538 539 540 541 542
    def test_bilinear_initializer_bf16(self):
        """Test the bilinear initializer with supplied arguments
        """
        block = self.test_bilinear_initializer("uint16")
        self.assertTrue(check_cast_op(block.ops[1]))

543 544 545
    def test_type_error(self):
        self.assertRaises(TypeError, self.test_bilinear_initializer, 'int32')

546

Q
Qiao Longfei 已提交
547
class TestNumpyArrayInitializer(unittest.TestCase):
548
    def test_numpy_array_initializer(self, dtype="float32"):
Q
Qiao Longfei 已提交
549 550 551 552 553
        """Test the numpy array initializer with supplied arguments
        """
        import numpy
        program = framework.Program()
        block = program.global_block()
554
        np_array = numpy.random.random((10000)).astype(dtype)
Q
Qiao Longfei 已提交
555 556 557 558 559 560 561
        for _ in range(2):
            block.create_parameter(
                dtype=np_array.dtype,
                shape=np_array.shape,
                lod_level=0,
                name="param",
                initializer=initializer.NumpyArrayInitializer(np_array))
562
        num_ops = 2 if dtype in ["float16", "uint16"] else 1
563
        self.assertEqual(len(block.ops), num_ops)
Q
Qiao Longfei 已提交
564 565
        init_op = block.ops[0]
        self.assertEqual(init_op.type, 'assign_value')
566
        assert (init_op.attr('fp32_values') == np_array).all()
567 568 569 570 571 572 573
        return block

    def test_numpy_array_initializer_fp16(self):
        """Test the numpy array initializer with float16
        """
        block = self.test_numpy_array_initializer("float16")
        self.assertTrue(block.ops[1])
Q
Qiao Longfei 已提交
574

575 576 577 578 579 580
    def test_numpy_array_initializer_bf16(self):
        """Test the numpy array initializer with bfloat16
        """
        block = self.test_numpy_array_initializer("uint16")
        self.assertTrue(block.ops[1])

Q
Qiao Longfei 已提交
581

582 583 584 585 586 587 588 589 590 591 592 593 594 595 596
class TestSetGlobalInitializer(unittest.TestCase):
    def test_set_global_weight_initilizer(self):
        """Test Set Global Param initilizer with UniformInitializer
        """
        main_prog = framework.Program()
        startup_prog = framework.Program()
        fluid.set_global_initializer(initializer.Uniform(low=-0.5, high=0.5))
        with fluid.program_guard(main_prog, startup_prog):
            x = fluid.data(name="x", shape=[1, 3, 32, 32])
            # default initilizer of param in layers.conv2d is NormalInitializer
            conv = fluid.layers.conv2d(x, 5, 3)

        block = startup_prog.global_block()
        self.assertEqual(len(block.ops), 2)

597 598
        # init weight is the first op, and bias is the second
        bias_init_op = block.ops[1]
599 600 601
        self.assertEqual(bias_init_op.type, 'fill_constant')
        self.assertAlmostEqual(bias_init_op.attr('value'), 0.0, delta=DELTA)

602
        param_init_op = block.ops[0]
603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626
        self.assertEqual(param_init_op.type, 'uniform_random')
        self.assertAlmostEqual(param_init_op.attr('min'), -0.5, delta=DELTA)
        self.assertAlmostEqual(param_init_op.attr('max'), 0.5, delta=DELTA)
        self.assertEqual(param_init_op.attr('seed'), 0)
        fluid.set_global_initializer(None)

    def test_set_global_bias_initilizer(self):
        """Test Set Global Bias initilizer with NormalInitializer
        """
        main_prog = framework.Program()
        startup_prog = framework.Program()
        fluid.set_global_initializer(
            initializer.Uniform(
                low=-0.5, high=0.5),
            bias_init=initializer.Normal(
                loc=0.0, scale=2.0))
        with fluid.program_guard(main_prog, startup_prog):
            x = fluid.data(name="x", shape=[1, 3, 32, 32])
            # default initilizer of bias in layers.conv2d is ConstantInitializer
            conv = fluid.layers.conv2d(x, 5, 3)

        block = startup_prog.global_block()
        self.assertEqual(len(block.ops), 2)

627 628
        # init weight is the first op, and bias is the second
        bias_init_op = block.ops[1]
629 630 631 632 633
        self.assertEqual(bias_init_op.type, 'gaussian_random')
        self.assertAlmostEqual(bias_init_op.attr('mean'), 0.0, delta=DELTA)
        self.assertAlmostEqual(bias_init_op.attr('std'), 2.0, delta=DELTA)
        self.assertEqual(bias_init_op.attr('seed'), 0)

634
        param_init_op = block.ops[0]
635 636 637 638 639 640 641
        self.assertEqual(param_init_op.type, 'uniform_random')
        self.assertAlmostEqual(param_init_op.attr('min'), -0.5, delta=DELTA)
        self.assertAlmostEqual(param_init_op.attr('max'), 0.5, delta=DELTA)
        self.assertEqual(param_init_op.attr('seed'), 0)
        fluid.set_global_initializer(None)


642 643 644 645 646 647 648
class TestUniformInitializerDygraph(unittest.TestCase):
    def test_uniform_initializer(self, dtype="float32"):
        """
        In dygraph mode, we can use initializer directly to initialize a tensor.
        """
        paddle.disable_static()

L
Leo Chen 已提交
649
        tensor = paddle.zeros([1024, 1024, 16])
650
        tensor.stop_gradient = False
L
Leo Chen 已提交
651
        self.assertTrue(np.allclose(np.zeros((1024, 1024, 16)), tensor.numpy()))
652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667

        uniform_ = paddle.nn.initializer.Uniform()
        uniform_(tensor)

        self.assertEqual(tensor.stop_gradient,
                         False)  # stop_gradient is not changed

        hist, prob = output_hist(tensor.numpy())

        self.assertTrue(
            np.allclose(
                hist, prob, rtol=0, atol=1e-3), "hist: " + str(hist))

        paddle.enable_static()


668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711
class TesetconsistencyOfDynamicAndStaticGraph(unittest.TestCase):
    def test_order(self):
        paddle.set_device('cpu')
        SEED = 123
        weight_attr = paddle.framework.ParamAttr(
            name="linear_weight",
            learning_rate=1.0,
            trainable=False,
            regularizer=None,
            initializer=paddle.nn.initializer.TruncatedNormal(
                mean=0.0, std=2.0))
        bias_attr = paddle.framework.ParamAttr(
            name="linear_bias",
            learning_rate=1.0,
            trainable=False,
            regularizer=None,
            initializer=paddle.nn.initializer.TruncatedNormal(
                mean=0.0, std=2.0))

        def run_dynamic_graph():
            paddle.disable_static()
            paddle.seed(SEED)
            linear = paddle.nn.Linear(
                1, 1, weight_attr=weight_attr, bias_attr=bias_attr)
            return linear.weight.numpy(), linear.bias.numpy()
            paddle.enable_static()

        def run_static_graph():
            paddle.enable_static()
            exe = paddle.static.Executor(paddle.CPUPlace())
            paddle.seed(SEED)
            linear = paddle.nn.Linear(
                1, 1, weight_attr=weight_attr, bias_attr=bias_attr)
            res = exe.run(paddle.static.default_startup_program(),
                          fetch_list=['linear_weight', 'linear_bias'])
            return res[0], res[1]

        dynamic_res = run_dynamic_graph()
        static_res = run_static_graph()

        self.assertTrue(np.array_equal(dynamic_res[0], static_res[0]))
        self.assertTrue(np.array_equal(dynamic_res[1], static_res[1]))


712 713
if __name__ == '__main__':
    unittest.main()