initializer.py 44.1 KB
Newer Older
1
#   Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
D
dzhwinter 已提交
2
#
D
dzhwinter 已提交
3 4 5
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
D
dzhwinter 已提交
6
#
D
dzhwinter 已提交
7
#     http://www.apache.org/licenses/LICENSE-2.0
D
dzhwinter 已提交
8
#
D
dzhwinter 已提交
9 10 11 12 13 14
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

15 16
from __future__ import print_function

17
import math
18
from . import framework
19
from . import core
20
from .framework import _non_static_mode, in_dygraph_mode, _in_legacy_dygraph, default_main_program, _current_expected_place
21
import numpy as np
22
from .core import VarDesc
W
Wu Yi 已提交
23
from . import unique_name
24
from .data_feeder import check_variable_and_dtype, check_type, check_dtype
25
from paddle import _C_ops
26

27
__all__ = [
28
    'Constant', 'Uniform', 'Normal', 'TruncatedNormal', 'Xavier', 'Bilinear',
29 30
    'MSRA', 'ConstantInitializer', 'UniformInitializer', 'NormalInitializer',
    'TruncatedNormalInitializer', 'XavierInitializer', 'BilinearInitializer',
31
    'MSRAInitializer', 'NumpyArrayInitializer', 'set_global_initializer'
32
]
33

34 35 36
_global_weight_initializer_ = None
_global_bias_initializer_ = None

37 38 39 40 41 42 43 44 45 46

class Initializer(object):
    """Base class for variable initializers

    Defines the common interface of variable initializers.
    They add operations to the init program that are used
    to initialize variables. Users should not use this class
    directly, but need to use one of its implementations.
    """

W
whs 已提交
47
    def __init__(self):
48 49
        pass

50
    def __call__(self, param, block=None):
51 52 53 54
        """Add corresponding initialization operations to the network
        """
        raise NotImplementedError()

55 56
    def _check_block(self, block):
        if block is None:
57
            block = default_main_program().global_block()
58 59 60

        return block

61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95
    def _compute_fans(self, var):
        """Compute the fan_in and the fan_out for layers

        This method computes the fan_in and the fan_out
        for neural network layers, if not specified. It is
        not possible to perfectly estimate fan_in and fan_out.
        This method will estimate it correctly for matrix multiply and
        convolutions.

        Args:
            var: variable for which fan_in and fan_out have to be computed

        Returns:
            tuple of two integers (fan_in, fan_out)
        """
        shape = var.shape
        if not shape or len(shape) == 0:
            fan_in = fan_out = 1
        elif len(shape) == 1:
            fan_in = fan_out = shape[0]
        elif len(shape) == 2:
            # This is the case for simple matrix multiply
            fan_in = shape[0]
            fan_out = shape[1]
        else:
            # Assume this to be a convolutional kernel
            # In PaddlePaddle, the shape of the kernel is like:
            # [num_filters, num_filter_channels, ...] where the remaining
            # dimensions are the filter_size
            receptive_field_size = np.prod(shape[2:])
            fan_in = shape[1] * receptive_field_size
            fan_out = shape[0] * receptive_field_size

        return (fan_in, fan_out)

96 97 98

class ConstantInitializer(Initializer):
    """Implements the constant initializer
99 100

    Args:
D
Double_V 已提交
101
        value (float32): constant value to initialize the variable 
102 103 104 105

    Examples:
        .. code-block:: python

106 107 108
            import paddle
            import paddle.fluid as fluid
            paddle.enable_static()
D
Double_V 已提交
109
            x = fluid.data(name="data", shape=[8, 32, 32], dtype="float32")
110 111 112 113
            fc = fluid.layers.fc(
                input=x,
                size=10,
                param_attr=fluid.initializer.Constant(value=2.0))
114

115 116
    """

117
    def __init__(self, value=0.0, force_cpu=False):
118 119 120
        assert value is not None
        super(ConstantInitializer, self).__init__()
        self._value = value
121
        self._force_cpu = force_cpu
122

123 124
    def __call__(self, var, block=None):
        """Initialize the input tensor with constant.
125 126

        Args:
127 128 129
            var(Tensor): Tensor that needs to be initialized.
            block(Block, optional): The block in which initialization ops
                   should be added. Used in static graph only, default None.
130 131

        Returns:
132
            The initialization op
133
        """
134 135
        block = self._check_block(block)

136 137
        assert (isinstance(var, framework.Variable) or
                isinstance(var, framework.EagerParamBase))
138
        assert isinstance(block, framework.Block)
139

J
Jiabin Yang 已提交
140
        if framework._non_static_mode():
141 142 143 144 145
            _C_ops.fill_constant(var, 'value',
                                 float(self._value), 'force_cpu',
                                 self._force_cpu, 'dtype',
                                 int(var.dtype), 'str_value',
                                 str(float(self._value)), 'shape', var.shape)
146 147 148 149 150
            return None
        else:
            # fill constant should set the "str_value" to preserve precision
            op = block.append_op(
                type="fill_constant",
151
                outputs={"Out": var},
152 153
                attrs={
                    "shape": var.shape,
154
                    "dtype": int(var.dtype),
155 156 157 158 159
                    "value": float(self._value),
                    'str_value': str(float(self._value)),
                    'force_cpu': self._force_cpu
                },
                stop_gradient=True)
160

161
            var.op = op
162
            return op
163 164 165


class UniformInitializer(Initializer):
166
    """Implements the random uniform distribution initializer
167 168 169 170 171

    Args:
        low (float): lower boundary of the uniform distribution
        high (float): upper boundary of the uniform distribution
        seed (int): random seed
172 173 174 175 176 177
        diag_num (int): the number of diagonal elements to initialize.
            If set to 0, diagonal initialization will be not performed.
        diag_step (int): Step size between two diagonal elements,
            which is generally the width of the square matrix.
        diag_val (float): the value of the diagonal element to be initialized,
            default 1.0. It takes effect only if the diag_num is greater than 0.
178 179 180 181

    Examples:
        .. code-block:: python

X
xiaoting 已提交
182
            import paddle.fluid as fluid
183
            x = fluid.data(name='x', shape=[None, 1], dtype='float32')
184
            fc = fluid.layers.fc(input=x, size=10,
185
    		param_attr=fluid.initializer.Uniform(low=-0.5, high=0.5))
186 187
    """

188 189 190 191 192 193 194
    def __init__(self,
                 low=-1.0,
                 high=1.0,
                 seed=0,
                 diag_num=0,
                 diag_step=0,
                 diag_val=1.0):
195 196
        assert low is not None
        assert high is not None
197
        assert high >= low
198
        assert seed is not None
199 200 201 202 203
        assert diag_num is not None
        assert diag_step is not None
        assert diag_val is not None
        if diag_num > 0 or diag_step > 0:
            assert (diag_num > 0 and diag_step > 0)
204 205 206 207
        super(UniformInitializer, self).__init__()
        self._low = low
        self._high = high
        self._seed = seed
208 209 210
        self._diag_num = diag_num
        self._diag_step = diag_step
        self._diag_val = diag_val
211

212 213
    def __call__(self, var, block=None):
        """Initialize the input tensor with Uniform distribution.
214 215

        Args:
216 217 218
            var(Tensor): Tensor that needs to be initialized.
            block(Block, optional): The block in which initialization ops
                   should be added. Used in static graph only, default None.
219 220

        Returns:
221
            The initialization op
222
        """
223 224
        block = self._check_block(block)

225
        assert isinstance(block, framework.Block)
226 227
        check_variable_and_dtype(var, "Out",
                                 ["uint16", "float16", "float32", "float64"],
228 229
                                 "uniform_random")

D
dzhwinter 已提交
230 231
        if self._seed == 0:
            self._seed = block.program.random_seed
W
Wu Yi 已提交
232

X
polish  
Xin Pan 已提交
233
        # to be compatible of fp16 initializers
234
        if var.dtype == VarDesc.VarType.FP16:
W
Wu Yi 已提交
235 236
            out_dtype = VarDesc.VarType.FP32
            out_var = block.create_var(
237 238
                name=unique_name.generate(".".join(
                    ['uniform_random', var.name, 'tmp'])),
W
Wu Yi 已提交
239 240 241 242 243 244 245 246
                shape=var.shape,
                dtype=out_dtype,
                type=VarDesc.VarType.LOD_TENSOR,
                persistable=False)
        else:
            out_dtype = var.dtype
            out_var = var

J
Jiabin Yang 已提交
247
        if framework._non_static_mode():
248 249 250 251 252 253 254
            out_var = _C_ops.uniform_random(
                'shape', var.shape, 'min', self._low, 'max', self._high, 'seed',
                self._seed, 'dtype', out_dtype, 'diag_num', self._diag_num,
                'diag_step', self._diag_step, 'diag_val', self._diag_val)
            if var.dtype == VarDesc.VarType.FP16:
                var_tmp = _C_ops.cast(out_var, 'in_dtype', out_var.dtype,
                                      'out_dtype', var.dtype)
255
                var_tmp._share_underline_tensor_to(var)
256
            else:
257
                out_var._share_underline_tensor_to(var)
258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282
            return None
        else:
            op = block.append_op(
                type="uniform_random",
                inputs={},
                outputs={"Out": out_var},
                attrs={
                    "shape": var.shape,
                    "dtype": out_dtype,
                    "min": self._low,
                    "max": self._high,
                    "seed": self._seed,
                    "diag_num": self._diag_num,
                    "diag_step": self._diag_step,
                    "diag_val": self._diag_val
                },
                stop_gradient=True)

            if var.dtype == VarDesc.VarType.FP16:
                block.append_op(
                    type="cast",
                    inputs={"X": out_var},
                    outputs={"Out": var},
                    attrs={"in_dtype": out_var.dtype,
                           "out_dtype": var.dtype})
W
Wu Yi 已提交
283

284
            var.op = op
285
            return op
286 287 288


class NormalInitializer(Initializer):
289 290 291 292 293 294 295 296 297 298
    """Implements the Random Normal(Gaussian) distribution initializer

    Args:
        loc (float): mean of the normal distribution
        scale (float): standard deviation of the normal distribution
        seed (int): random seed

    Examples:
        .. code-block:: python

X
xsrobin 已提交
299
            import paddle.fluid as fluid
300
            x = fluid.data(name="data", shape=[None, 32, 32], dtype="float32")
X
xsrobin 已提交
301 302
            fc = fluid.layers.fc(input=x, size=10,
                param_attr=fluid.initializer.Normal(loc=0.0, scale=2.0))
303

304 305 306 307 308 309 310 311 312 313 314
    """

    def __init__(self, loc=0.0, scale=1.0, seed=0):
        assert loc is not None
        assert scale is not None
        assert seed is not None
        super(NormalInitializer, self).__init__()
        self._mean = loc
        self._std_dev = scale
        self._seed = seed

315 316
    def __call__(self, var, block=None):
        """Initialize the input tensor with Normal distribution.
317 318

        Args:
319 320 321
            var(Tensor): Tensor that needs to be initialized.
            block(Block, optional): The block in which initialization ops
                   should be added. Used in static graph only, default None.
322 323

        Returns:
324
            The initialization op
325
        """
326 327
        block = self._check_block(block)

328
        assert isinstance(block, framework.Block)
329

330 331
        check_variable_and_dtype(var, "Out",
                                 ["uint16", "float16", "float32", "float64"],
332
                                 "guassian_random")
333

334 335 336 337 338 339 340 341 342 343 344 345 346 347
        # to be compatible of fp16 initalizers
        if var.dtype in [VarDesc.VarType.FP16, VarDesc.VarType.BF16]:
            out_dtype = VarDesc.VarType.FP32
            out_var = block.create_var(
                name=unique_name.generate(".".join(
                    ['normal_init', var.name, 'tmp'])),
                shape=var.shape,
                dtype=out_dtype,
                type=VarDesc.VarType.LOD_TENSOR,
                persistable=False)
        else:
            out_dtype = var.dtype
            out_var = var

D
dzhwinter 已提交
348 349
        if self._seed == 0:
            self._seed = block.program.random_seed
W
Wu Yi 已提交
350

351 352 353 354 355 356 357 358 359 360 361 362 363 364
        if in_dygraph_mode():
            place = _current_expected_place()
            out_var = _C_ops.final_state_gaussian_random(
                var.shape, self._mean, self._std_dev, self._seed, out_dtype,
                place)

            if var.dtype in [VarDesc.VarType.FP16, VarDesc.VarType.BF16]:
                var_tmp = _C_ops.final_state_cast(out_var, var.dtype)
                var_tmp._share_underline_tensor_to(var)
            else:
                out_var._share_underline_tensor_to(var)
            return None

        if _in_legacy_dygraph():
365
            out_var = _C_ops.gaussian_random(
366
                'shape', var.shape, 'dtype', out_dtype, 'mean', self._mean,
367
                'std', self._std_dev, 'seed', self._seed, 'use_mkldnn', False)
368 369 370 371 372 373 374

            if var.dtype in [VarDesc.VarType.FP16, VarDesc.VarType.BF16]:
                var_tmp = _C_ops.cast(out_var, 'in_dtype', out_var.dtype,
                                      'out_dtype', var.dtype)
                var_tmp._share_underline_tensor_to(var)
            else:
                out_var._share_underline_tensor_to(var)
375 376 377 378
            return None
        else:
            op = block.append_op(
                type="gaussian_random",
379
                outputs={"Out": out_var},
380 381
                attrs={
                    "shape": var.shape,
382
                    "dtype": out_dtype,
383 384 385 386 387 388 389
                    "mean": self._mean,
                    "std": self._std_dev,
                    "seed": self._seed,
                    "use_mkldnn": False
                },
                stop_gradient=True)

390 391 392 393 394 395 396
            if var.dtype in [VarDesc.VarType.FP16, VarDesc.VarType.BF16]:
                block.append_op(
                    type="cast",
                    inputs={"X": out_var},
                    outputs={"Out": var},
                    attrs={"in_dtype": out_var.dtype,
                           "out_dtype": var.dtype})
397
            var.op = op
398
            return op
399 400


401 402 403 404 405 406 407 408 409 410 411
class TruncatedNormalInitializer(Initializer):
    """Implements the Random TruncatedNormal(Gaussian) distribution initializer

    Args:
        loc (float): mean of the normal distribution
        scale (float): standard deviation of the normal distribution
        seed (int): random seed

    Examples:
        .. code-block:: python

X
xiaoting 已提交
412
            import paddle.fluid as fluid
413
            x = fluid.data(name='x', shape=[None, 1], dtype='float32')
414 415 416 417 418 419 420 421
            fc = fluid.layers.fc(input=x, size=10,
                param_attr=fluid.initializer.TruncatedNormal(loc=0.0, scale=2.0))
    """

    def __init__(self, loc=0.0, scale=1.0, seed=0):
        assert loc is not None
        assert scale is not None
        assert seed is not None
W
whs 已提交
422
        super(TruncatedNormalInitializer, self).__init__()
423 424 425 426
        self._mean = loc
        self._std_dev = scale
        self._seed = seed

427 428
    def __call__(self, var, block=None):
        """Initialize the input tensor with TruncatedNormal distribution.
429 430

        Args:
431 432 433
            var(Tensor): Tensor that needs to be initialized.
            block(Block, optional): The block in which initialization ops
                   should be added. Used in static graph only, default None.
434 435

        Returns:
436
            The initialization op
437
        """
438 439
        block = self._check_block(block)

440 441
        assert isinstance(var, framework.Variable)
        assert isinstance(block, framework.Block)
442

443 444
        if self._seed == 0:
            self._seed = block.program.random_seed
445 446

        # to be compatible of fp16 initalizers
447
        if var.dtype in [VarDesc.VarType.FP16, VarDesc.VarType.BF16]:
448 449 450
            out_dtype = VarDesc.VarType.FP32
            out_var = block.create_var(
                name=unique_name.generate(".".join(
451
                    ['truncated_gaussian_random', var.name, 'tmp'])),
452 453 454 455 456 457 458 459
                shape=var.shape,
                dtype=out_dtype,
                type=VarDesc.VarType.LOD_TENSOR,
                persistable=False)
        else:
            out_dtype = var.dtype
            out_var = var

460 461 462 463 464 465 466 467 468 469 470 471
        if in_dygraph_mode():
            out_var = _C_ops.final_state_truncated_gaussian_random(
                var.shape, self._mean, self._std_dev, self._seed, out_dtype,
                _current_expected_place())
            if var.dtype in [VarDesc.VarType.FP16, VarDesc.VarType.BF16]:
                var_tmp = _C_ops.final_state_cast(out_var, var.dtype)
                var_tmp._share_underline_tensor_to(var)
            else:
                out_var._share_underline_tensor_to(var)
            return None

        if _in_legacy_dygraph():
472 473 474 475 476 477
            out_var = _C_ops.truncated_gaussian_random(
                'shape', var.shape, 'dtype', out_dtype, 'mean', self._mean,
                'std', self._std_dev, 'seed', self._seed)
            if var.dtype in [VarDesc.VarType.FP16, VarDesc.VarType.BF16]:
                var_tmp = _C_ops.cast(out_var, 'in_dtype', out_var.dtype,
                                      'out_dtype', var.dtype)
478
                var_tmp._share_underline_tensor_to(var)
479
            else:
480
                out_var._share_underline_tensor_to(var)
481 482 483 484 485 486 487 488 489 490 491 492 493
            return None
        else:
            op = block.append_op(
                type="truncated_gaussian_random",
                outputs={"Out": out_var},
                attrs={
                    "shape": var.shape,
                    "dtype": out_dtype,
                    "mean": self._mean,
                    "std": self._std_dev,
                    "seed": self._seed
                },
                stop_gradient=True)
494

495 496 497 498 499 500 501
            if var.dtype in [VarDesc.VarType.FP16, VarDesc.VarType.BF16]:
                block.append_op(
                    type="cast",
                    inputs={"X": out_var},
                    outputs={"Out": var},
                    attrs={"in_dtype": out_var.dtype,
                           "out_dtype": var.dtype})
502
            var.op = op
503
            return op
504 505


506
class XavierInitializer(Initializer):
507
    r"""
508
    This class implements the Xavier weight initializer from the paper
Q
qiaolongfei 已提交
509 510 511
    `Understanding the difficulty of training deep feedforward neural
    networks <http://proceedings.mlr.press/v9/glorot10a/glorot10a.pdf>`_
    by Xavier Glorot and Yoshua Bengio.
512 513 514

    This initializer is designed to keep the scale of the gradients
    approximately same in all the layers. In case of Uniform distribution,
Q
qiaolongfei 已提交
515 516 517 518 519 520
    the range is [-x, x], where

    .. math::

        x = \sqrt{\\frac{6.0}{fan\_in + fan\_out}}

521
    In case of Normal distribution, the mean is 0 and the standard deviation
Q
qiaolongfei 已提交
522
    is
523

Q
qiaolongfei 已提交
524
    .. math::
525

Q
qiaolongfei 已提交
526
        \sqrt{\\frac{2.0}{fan\_in + fan\_out}}
527 528


Q
qiaolongfei 已提交
529
    Args:
X
xiaoting 已提交
530 531
        uniform (bool,default True): whether to use uniform ,if False use normal distribution
        fan_in (float,default None): fan_in for Xavier initialization. If None, it is
Q
qiaolongfei 已提交
532
                inferred from the variable.
X
xiaoting 已提交
533
        fan_out (float,default None): fan_out for Xavier initialization. If None, it is
Q
qiaolongfei 已提交
534 535 536 537 538 539 540 541 542
                 inferred from the variable.
        seed (int): random seed

    Note:
        It is recommended to set fan_in and fan_out to None for most cases.

    Examples:
        .. code-block:: python

X
xiaoting 已提交
543
            import paddle.fluid as fluid
X
xiaoting 已提交
544
            queries = fluid.data(name='x', shape=[None,1], dtype='float32')
Q
qiaolongfei 已提交
545 546 547 548 549 550 551
            fc = fluid.layers.fc(
                input=queries, size=10,
                param_attr=fluid.initializer.Xavier(uniform=False))

    """

    def __init__(self, uniform=True, fan_in=None, fan_out=None, seed=0):
552 553 554 555 556 557 558 559
        assert uniform is not None
        assert seed is not None
        super(XavierInitializer, self).__init__()
        self._uniform = uniform
        self._fan_in = fan_in
        self._fan_out = fan_out
        self._seed = seed

560 561
    def __call__(self, var, block=None):
        """Initialize the input tensor with Xavier initialization.
562 563

        Args:
564 565 566
            var(Tensor): Tensor that needs to be initialized.
            block(Block, optional): The block in which initialization ops
                   should be added. Used in static graph only, default None.
567 568

        Returns:
569
            The initialization op
570
        """
571 572
        block = self._check_block(block)

573
        assert isinstance(block, framework.Block)
574 575
        check_variable_and_dtype(var, "Out",
                                 ["uint16", "float16", "float32", "float64"],
576 577
                                 "xavier_init")

578 579 580 581 582 583
        f_in, f_out = self._compute_fans(var)

        # If fan_in and fan_out are passed, use them
        fan_in = f_in if self._fan_in is None else self._fan_in
        fan_out = f_out if self._fan_out is None else self._fan_out

D
dzhwinter 已提交
584 585 586
        if self._seed == 0:
            self._seed = block.program.random_seed

587
        # to be compatible of fp16 initalizers
588 589
        if var.dtype == VarDesc.VarType.FP16 or (
                var.dtype == VarDesc.VarType.BF16 and not self._uniform):
590 591 592 593 594 595 596 597 598 599 600 601
            out_dtype = VarDesc.VarType.FP32
            out_var = block.create_var(
                name=unique_name.generate(".".join(
                    ['xavier_init', var.name, 'tmp'])),
                shape=var.shape,
                dtype=out_dtype,
                type=VarDesc.VarType.LOD_TENSOR,
                persistable=False)
        else:
            out_dtype = var.dtype
            out_var = var

J
Jiabin Yang 已提交
602
        if framework._non_static_mode():
603 604
            if self._uniform:
                limit = np.sqrt(6.0 / float(fan_in + fan_out))
605
                out_var = _C_ops.uniform_random('shape', out_var.shape, 'min',
606 607 608
                                                -limit, 'max', limit, 'seed',
                                                self._seed, 'dtype', out_dtype)
            else:
609 610 611 612 613 614 615 616 617 618
                std = math.sqrt(2.0 / float(fan_in + fan_out))

                if in_dygraph_mode():
                    place = _current_expected_place()
                    out_var = _C_ops.final_state_gaussian_random(
                        out_var.shape, 0.0, std, self._seed, out_dtype, place)
                else:
                    out_var = _C_ops.gaussian_random(
                        'shape', out_var.shape, 'dtype', out_dtype, 'mean', 0.0,
                        'std', std, 'seed', self._seed)
619 620 621 622 623

            if var.dtype == VarDesc.VarType.FP16 or (
                    var.dtype == VarDesc.VarType.BF16 and not self._uniform):
                var_tmp = _C_ops.cast(out_var, 'in_dtype', out_var.dtype,
                                      'out_dtype', var.dtype)
624
                var_tmp._share_underline_tensor_to(var)
625
            else:
626
                out_var._share_underline_tensor_to(var)
627
            return None
628
        else:
629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664
            if self._uniform:
                limit = np.sqrt(6.0 / float(fan_in + fan_out))
                op = block.append_op(
                    type="uniform_random",
                    inputs={},
                    outputs={"Out": out_var},
                    attrs={
                        "shape": out_var.shape,
                        "dtype": out_dtype,
                        "min": -limit,
                        "max": limit,
                        "seed": self._seed
                    },
                    stop_gradient=True)
            else:
                std = np.sqrt(2.0 / float(fan_in + fan_out))
                op = block.append_op(
                    type="gaussian_random",
                    outputs={"Out": out_var},
                    attrs={
                        "shape": out_var.shape,
                        "dtype": out_dtype,
                        "mean": 0.0,
                        "std": std,
                        "seed": self._seed
                    },
                    stop_gradient=True)

            if var.dtype == VarDesc.VarType.FP16 or (
                    var.dtype == VarDesc.VarType.BF16 and not self._uniform):
                block.append_op(
                    type="cast",
                    inputs={"X": out_var},
                    outputs={"Out": var},
                    attrs={"in_dtype": out_var.dtype,
                           "out_dtype": var.dtype})
665

666
            var.op = op
667
            return op
668 669 670


class MSRAInitializer(Initializer):
671
    r"""Implements the MSRA initializer a.k.a. Kaiming Initializer
672 673

    This class implements the weight initialization from the paper
674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692
    `Delving Deep into Rectifiers: Surpassing Human-Level Performance on
    ImageNet Classification <https://arxiv.org/abs/1502.01852>`_
    by Kaiming He, Xiangyu Zhang, Shaoqing Ren and Jian Sun. This is a
    robust initialization method that particularly considers the rectifier
    nonlinearities. In case of Uniform distribution, the range is [-x, x], where

    .. math::

        x = \sqrt{\\frac{6.0}{fan\_in}}

    In case of Normal distribution, the mean is 0 and the standard deviation
    is

    .. math::

        \sqrt{\\frac{2.0}{fan\_in}}

    Args:
        uniform (bool): whether to use uniform or normal distribution
D
Double_V 已提交
693 694 695
        fan_in (float32|None): fan_in for MSRAInitializer. If None, it is\
        inferred from the variable. default is None.
        seed (int32): random seed
696 697 698 699 700 701

    Note:
        It is recommended to set fan_in to None for most cases.

    Examples:
        .. code-block:: python
X
xsrobin 已提交
702

703
            import paddle
X
xsrobin 已提交
704
            import paddle.fluid as fluid
705
            paddle.enable_static()
D
Double_V 已提交
706
            x = fluid.data(name="data", shape=[8, 32, 32], dtype="float32")
X
xsrobin 已提交
707 708
            fc = fluid.layers.fc(input=x, size=10,
                param_attr=fluid.initializer.MSRA(uniform=False))
709

710 711 712 713 714 715 716 717 718 719 720 721
    """

    def __init__(self, uniform=True, fan_in=None, seed=0):
        """Constructor for MSRAInitializer
        """
        assert uniform is not None
        assert seed is not None
        super(MSRAInitializer, self).__init__()
        self._uniform = uniform
        self._fan_in = fan_in
        self._seed = seed

722 723
    def __call__(self, var, block=None):
        """Initialize the input tensor with MSRA initialization.
724 725

        Args:
726 727 728
            var(Tensor): Tensor that needs to be initialized.
            block(Block, optional): The block in which initialization ops
                   should be added. Used in static graph only, default None.
729 730

        Returns:
731
            The initialization op
732
        """
733 734
        block = self._check_block(block)

735 736 737 738 739 740 741
        assert isinstance(var, framework.Variable)
        assert isinstance(block, framework.Block)
        f_in, f_out = self._compute_fans(var)

        # If fan_in is passed, use it
        fan_in = f_in if self._fan_in is None else self._fan_in

D
dzhwinter 已提交
742 743 744
        if self._seed == 0:
            self._seed = block.program.random_seed

745
        # to be compatible of fp16 initalizers
746 747
        if var.dtype == VarDesc.VarType.FP16 or (
                var.dtype == VarDesc.VarType.BF16 and not self._uniform):
748 749 750 751 752 753 754 755 756 757 758 759
            out_dtype = VarDesc.VarType.FP32
            out_var = block.create_var(
                name=unique_name.generate(".".join(
                    ['masra_init', var.name, 'tmp'])),
                shape=var.shape,
                dtype=out_dtype,
                type=VarDesc.VarType.LOD_TENSOR,
                persistable=False)
        else:
            out_dtype = var.dtype
            out_var = var

J
Jiabin Yang 已提交
760
        if framework._non_static_mode():
761 762 763 764 765 766 767
            if self._uniform:
                limit = np.sqrt(6.0 / float(fan_in))
                out_var = _C_ops.uniform_random('shape', out_var.shape, 'min',
                                                -limit, 'max', limit, 'seed',
                                                self._seed, 'dtype',
                                                int(out_dtype))
            else:
768 769 770 771 772 773 774 775 776 777
                std = math.sqrt(2.0 / float(fan_in))
                if in_dygraph_mode():
                    place = _current_expected_place()
                    out_var = _C_ops.final_state_gaussian_random(
                        out_var.shape, 0.0, std, self._seed, out_dtype, place)
                else:
                    out_var = _C_ops.gaussian_random(
                        'shape', out_var.shape, 'dtype',
                        int(out_dtype), 'mean', 0.0, 'std', std, 'seed',
                        self._seed)
778 779 780 781 782

            if var.dtype == VarDesc.VarType.FP16 or (
                    var.dtype == VarDesc.VarType.BF16 and not self._uniform):
                var_tmp = _C_ops.cast(out_var, 'in_dtype', out_var.dtype,
                                      'out_dtype', var.dtype)
783
                var_tmp._share_underline_tensor_to(var)
784
            else:
785
                out_var._share_underline_tensor_to(var)
786
            return None
787
        else:
788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824
            if self._uniform:
                limit = np.sqrt(6.0 / float(fan_in))
                op = block.append_op(
                    type="uniform_random",
                    inputs={},
                    outputs={"Out": out_var},
                    attrs={
                        "shape": out_var.shape,
                        "dtype": int(out_dtype),
                        "min": -limit,
                        "max": limit,
                        "seed": self._seed
                    },
                    stop_gradient=True)

            else:
                std = np.sqrt(2.0 / float(fan_in))
                op = block.append_op(
                    type="gaussian_random",
                    outputs={"Out": out_var},
                    attrs={
                        "shape": out_var.shape,
                        "dtype": int(out_dtype),
                        "mean": 0.0,
                        "std": std,
                        "seed": self._seed
                    },
                    stop_gradient=True)

            if var.dtype == VarDesc.VarType.FP16 or (
                    var.dtype == VarDesc.VarType.BF16 and not self._uniform):
                block.append_op(
                    type="cast",
                    inputs={"X": out_var},
                    outputs={"Out": var},
                    attrs={"in_dtype": out_var.dtype,
                           "out_dtype": var.dtype})
825

826
            var.op = op
827
            return op
828 829


830
class BilinearInitializer(Initializer):
831
    """
832 833 834
    This initializer can be used in transposed convolution operator to
    act as upsampling. Users can upsample a feature map with shape of
    (B, C, H, W) by any integer factor. The usage is:
835 836 837 838 839

    Examples:

        .. code-block:: python

840
            import math
841 842 843 844 845

            import paddle
            import paddle.nn as nn
            from paddle.regularizer import L2Decay

X
xsrobin 已提交
846 847
            factor = 2
            C = 2
D
Double_V 已提交
848 849
            B = 8
            H = W = 32
850 851 852 853
            w_attr = paddle.ParamAttr(learning_rate=0.,
                                      regularizer=L2Decay(0.),
                                      initializer=nn.initializer.Bilinear())
            data = paddle.rand([B, 3, H, W], dtype='float32')
C
cnn 已提交
854
            conv_up = nn.Conv2DTranspose(3,
855 856 857 858 859 860 861 862 863 864 865
                                         out_channels=C,
                                         kernel_size=2 * factor - factor % 2,
                                         padding=int(
                                             math.ceil((factor - 1) / 2.)),
                                         stride=factor,
                                         weight_attr=w_attr,
                                         bias_attr=False)
            x = conv_up(data)

    Where, `out_channels=C` and `groups=C` means this is channel-wise transposed
    convolution. The filter shape will be (C, 1, K, K) where K is `kernel_size`,
866 867 868 869
    This initializer will set a (K, K) interpolation kernel for every channel
    of the filter identically. The resulting shape of the output feature map
    will be (B, C, factor * H, factor * W). Note that the learning rate and the
    weight decay are set to 0 in order to keep coefficient values of bilinear
870 871
    interpolation unchanged during training.

872 873 874 875 876 877 878
    """

    def __init__(self):
        """Constructor for BilinearInitializer.
        """
        super(BilinearInitializer, self).__init__()

879 880
    def __call__(self, var, block=None):
        """Initialize the input tensor with Bilinear initialization.
881 882

        Args:
883 884 885
            var(Tensor): Tensor that needs to be initialized.
            block(Block, optional): The block in which initialization ops
                   should be added. Used in static graph only, default None.
886 887

        Returns:
888
            The initialization op
889
        """
890 891
        block = self._check_block(block)

892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915
        if not isinstance(var, framework.Variable):
            raise ValueError("var must be framework.Variable.")

        if not isinstance(block, framework.Block):
            raise ValueError("block must be framework.Block.")

        shape = var.shape
        if len(shape) != 4:
            raise ValueError("the length of shape must be 4.")
        if shape[2] != shape[3]:
            raise ValueError("shape[2] must be equal to shape[3].")

        weight = np.zeros(np.prod(var.shape), dtype='float32')
        size = shape[3]
        # factor
        f = np.ceil(size / 2.)
        # center
        c = (2 * f - 1 - f % 2) / (2. * f)
        for i in range(np.prod(shape)):
            x = i % size
            y = (i / size) % size
            weight[i] = (1 - abs(x / f - c)) * (1 - abs(y / f - c))
        weight = np.reshape(weight, shape)

916
        # to be compatible of fp16 initalizers
917 918 919
        if var.dtype in [
                VarDesc.VarType.FP16, VarDesc.VarType.BF16, VarDesc.VarType.FP64
        ]:
920 921 922 923 924 925 926 927 928 929 930 931 932
            out_dtype = VarDesc.VarType.FP32
            out_var = block.create_var(
                name=unique_name.generate(".".join(
                    ['bilinear_init', var.name, 'tmp'])),
                shape=var.shape,
                dtype=out_dtype,
                type=VarDesc.VarType.LOD_TENSOR,
                persistable=False)
        else:
            out_dtype = var.dtype
            out_var = var

        if out_dtype == VarDesc.VarType.FP32:
933 934 935
            value_name = "fp32_values"
            values = [float(v) for v in weight.flat]
        else:
936 937
            raise TypeError("Unsupported dtype %s", var.dtype)

938 939
        if np.prod(shape) > 1024 * 1024:
            raise ValueError("The size of input is too big. ")
940

J
Jiabin Yang 已提交
941
        if framework._non_static_mode():
W
wanghuancoder 已提交
942 943 944
            _C_ops.assign_value(out_var, 'shape',
                                list(shape), 'dtype', out_dtype, value_name,
                                values)
945 946 947 948 949 950
            if var.dtype in [
                    VarDesc.VarType.FP16, VarDesc.VarType.BF16,
                    VarDesc.VarType.FP64
            ]:
                var_tmp = _C_ops.cast(out_var, 'in_dtype', out_var.dtype,
                                      'out_dtype', var.dtype)
951
                var_tmp._share_underline_tensor_to(var)
952
            else:
953
                out_var._share_underline_tensor_to(var)
954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975
            return None
        else:
            op = block.append_op(
                type='assign_value',
                outputs={'Out': [out_var]},
                attrs={
                    'dtype': out_dtype,
                    'shape': list(shape),
                    value_name: values
                })

            if var.dtype in [
                    VarDesc.VarType.FP16, VarDesc.VarType.BF16,
                    VarDesc.VarType.FP64
            ]:
                block.append_op(
                    type="cast",
                    inputs={"X": out_var},
                    outputs={"Out": var},
                    attrs={"in_dtype": out_var.dtype,
                           "out_dtype": var.dtype})

976
            var.op = op
977
            return op
978 979


980 981
class NumpyArrayInitializer(Initializer):
    """Init an parameter with an numpy array
982
    This op initialize the variable by numpy array.
983 984 985 986

    Args:
        value (numpy): numpy array to initialize the variable

987 988 989
    Returns:
        A Tensor variable initialized by numpy.

990 991 992
    Examples:
        .. code-block:: python

993
            import paddle.fluid as fluid
994 995
            import numpy
            x = fluid.data(name="x", shape=[2, 1], dtype='float32')
996 997 998 999 1000 1001 1002 1003 1004 1005
            fc = fluid.layers.fc(input=x, size=10,
                param_attr=fluid.initializer.NumpyArrayInitializer(numpy.array([1,2])))
    """

    def __init__(self, value):
        import numpy
        assert isinstance(value, numpy.ndarray)
        super(NumpyArrayInitializer, self).__init__()
        self._value = value

1006 1007
    def __call__(self, var, block=None):
        """Initialize the input tensor with Numpy array.
1008 1009

        Args:
1010 1011 1012
            var(Tensor): Tensor that needs to be initialized.
            block(Block, optional): The block in which initialization ops
                   should be added. Used in static graph only, default None.
1013 1014

        Returns:
1015
            The initialization op
1016
        """
1017 1018
        block = self._check_block(block)

1019 1020
        assert isinstance(var, framework.Variable)
        assert isinstance(block, framework.Block)
1021 1022

        # to be compatible of fp16 initalizers
1023
        if var.dtype in [VarDesc.VarType.FP16, VarDesc.VarType.BF16]:
1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038
            out_dtype = VarDesc.VarType.FP32
            np_value = self._value.astype("float32")
            out_var = block.create_var(
                name=unique_name.generate(".".join(
                    ['numpy_array_init', var.name, 'tmp'])),
                shape=var.shape,
                dtype=out_dtype,
                type=VarDesc.VarType.LOD_TENSOR,
                persistable=False)
        else:
            out_var = var
            out_dtype = var.dtype
            np_value = self._value

        if out_dtype == VarDesc.VarType.FP32:
1039
            value_name = "fp32_values"
1040 1041
            values = [float(v) for v in np_value.flat]
        elif out_dtype == VarDesc.VarType.INT32:
1042
            value_name = "int32_values"
1043
            values = [int(v) for v in np_value.flat]
1044 1045
        else:
            raise ValueError("Unsupported dtype %s", self._value.dtype)
X
Xin Pan 已提交
1046
        if self._value.size > 1024 * 1024 * 1024:
1047 1048
            raise ValueError("The size of input is too big. Please consider "
                             "saving it to file and 'load_op' to load it")
1049

J
Jiabin Yang 已提交
1050
        if framework._non_static_mode():
W
wanghuancoder 已提交
1051 1052 1053
            _C_ops.assign_value(out_var, 'shape',
                                list(self._value.shape), 'dtype', out_dtype,
                                value_name, values)
1054 1055 1056
            if var.dtype in [VarDesc.VarType.FP16, VarDesc.VarType.BF16]:
                var_tmp = _C_ops.cast(out_var, 'in_dtype', out_var.dtype,
                                      'out_dtype', var.dtype)
1057
                var_tmp._share_underline_tensor_to(var)
1058
            else:
1059
                out_var._share_underline_tensor_to(var)
1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079
            return None
        else:
            op = block.append_op(
                type='assign_value',
                outputs={'Out': out_var},
                attrs={
                    'dtype': out_dtype,
                    'shape': list(self._value.shape),
                    value_name: values
                },
                stop_gradient=True)

            if var.dtype in [VarDesc.VarType.FP16, VarDesc.VarType.BF16]:
                block.append_op(
                    type="cast",
                    inputs={"X": out_var},
                    outputs={"Out": var},
                    attrs={"in_dtype": out_var.dtype,
                           "out_dtype": var.dtype})

1080
            var.op = op
1081
            return op
1082 1083


1084 1085 1086 1087 1088 1089 1090
def set_global_initializer(weight_init, bias_init=None):
    """
    This API is used to set up global model parameter initializer in framework.

    After this API is invoked, the global initializer will takes effect in subsequent code.

    The model parameters include ``weight`` and ``bias`` . In the framework, they correspond 
1091
    to ``paddle.ParamAttr`` , which is inherited from ``paddle.Tensor`` , and is a persistable Variable.
1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110
    This API only takes effect for model parameters, not for variables created through apis such as 
    :ref:`api_fluid_layers_create_global_var` , :ref:`api_fluid_layers_create_tensor`.
    
    If the initializer is also set up by ``param_attr`` or ``bias_attr`` when creating a network layer,
    the global initializer setting here will not take effect because it has a lower priority.

    If you want to cancel the global initializer in framework, please set global initializer to ``None`` .

    Args:
        weight_init (Initializer): set the global initializer for ``weight`` of model parameters.
        bias_init (Initializer, optional): set the global initializer for ``bias`` of model parameters. 
            Default: None.

    Returns:
        None

    Examples:
        .. code-block:: python

1111 1112 1113 1114 1115
            import paddle
            import paddle.nn as nn

            nn.initializer.set_global_initializer(nn.initializer.Uniform(), nn.initializer.Constant())
            x_var = paddle.uniform((2, 4, 8, 8), dtype='float32', min=-1., max=1.)
1116 1117 1118

            # The weight of conv1 is initialized by Uniform
            # The bias of conv1 is initialized by Constant
1119 1120
            conv1 = nn.Conv2D(4, 6, (3, 3))
            y_var1 = conv1(x_var)
1121 1122 1123 1124

            # If set param_attr/bias_attr too, global initializer will not take effect
            # The weight of conv2 is initialized by Xavier
            # The bias of conv2 is initialized by Normal
1125 1126 1127 1128
            conv2 = nn.Conv2D(4, 6, (3, 3), 
                weight_attr=nn.initializer.XavierUniform(),
                bias_attr=nn.initializer.Normal())
            y_var2 = conv2(x_var)
1129 1130

            # Cancel the global initializer in framework, it will takes effect in subsequent code
1131
            nn.initializer.set_global_initializer(None)
1132
    """
1133

1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158
    check_type(weight_init, 'weight_init', (Initializer, type(None)),
               'set_global_initializer')
    global _global_weight_initializer_
    _global_weight_initializer_ = weight_init

    check_type(bias_init, 'bias_init', (Initializer, type(None)),
               'set_global_initializer')
    global _global_bias_initializer_
    _global_bias_initializer_ = bias_init


def _global_weight_initializer():
    """
    Return the global weight initializer, The user doesn't need to use it.
    """
    return _global_weight_initializer_


def _global_bias_initializer():
    """
    Return the global weight initializer, The user doesn't need to use it.
    """
    return _global_bias_initializer_


1159 1160
def calculate_gain(nonlinearity, param=None):
    """
1161 1162
    Get the recommended ``gain`` value of some nonlinearity function. ``gain`` value can be used in some 
    ``paddle.nn.initializer`` api to adjust the initialization value.
1163 1164

    Args:
1165 1166
        nonlinearity(str): name of nonlinearity activation function. If it is a linear function, such as: 
            `linear/conv1d/conv2d/conv3d/conv1d_transpose/conv2d_transpose/conv3d_transpose` , 1.0 will be returned.
1167
        param(bool|int|float, optional): optional parameter for somme nonlinearity function. Now, it only applies to 
1168
            'leaky_relu'. Default: None, it will be calculated as 0.01 in the formula.
1169 1170

    Returns:
1171
        A float value, which is the recommended gain for this nonlinearity function.
1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191

    Examples:
        .. code-block:: python

            import paddle
            gain = paddle.nn.initializer.calculate_gain('tanh') # 5.0 / 3
            gain = paddle.nn.initializer.calculate_gain('leaky_relu', param=1.0) # 1.0 = math.sqrt(2.0 / (1+param^2))

    """
    if param is None:
        param = 0.01
    else:
        assert isinstance(param, (bool, int, float))
        param = float(param)
    recommended_gain = {
        'sigmoid': 1,
        'linear': 1,
        'conv1d': 1,
        'conv2d': 1,
        'conv3d': 1,
1192 1193 1194
        'conv1d_transpose': 1,
        'conv2d_transpose': 1,
        'conv3d_transpose': 1,
1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206
        'tanh': 5.0 / 3,
        'relu': math.sqrt(2.0),
        'leaky_relu': math.sqrt(2.0 / (1 + param**2)),
        'selu': 3.0 / 4
    }
    if nonlinearity in recommended_gain.keys():
        return recommended_gain[nonlinearity]
    else:
        raise ValueError("nonlinearity function {} is not suppported now.".
                         format(nonlinearity))


1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218
# We short the class name, since users will use the initializer with the package
# name. The sample code:
#
# import paddle.fluid as fluid
#
# hidden = fluid.layers.fc(...,
#                          param_attr=ParamAttr(fluid.initializer.Xavier()))
#
# It is no need to add an `Initializer` as the class suffix
Constant = ConstantInitializer
Uniform = UniformInitializer
Normal = NormalInitializer
1219
TruncatedNormal = TruncatedNormalInitializer
1220 1221
Xavier = XavierInitializer
MSRA = MSRAInitializer
1222
Bilinear = BilinearInitializer