initializer.py 47.0 KB
Newer Older
1
#   Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
D
dzhwinter 已提交
2
#
D
dzhwinter 已提交
3 4 5
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
D
dzhwinter 已提交
6
#
D
dzhwinter 已提交
7
#     http://www.apache.org/licenses/LICENSE-2.0
D
dzhwinter 已提交
8
#
D
dzhwinter 已提交
9 10 11 12 13 14
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

15 16
from __future__ import print_function

17
import math
18
from . import framework
19
from . import core
20
from .framework import _non_static_mode, in_dygraph_mode, _in_legacy_dygraph, default_main_program, _current_expected_place
21
import numpy as np
22
from .core import VarDesc
W
Wu Yi 已提交
23
from . import unique_name
24
from .data_feeder import check_variable_and_dtype, check_type, check_dtype
25
from paddle import _C_ops
26

27
__all__ = [
28
    'Constant', 'Uniform', 'Normal', 'TruncatedNormal', 'Xavier', 'Bilinear',
29 30
    'MSRA', 'ConstantInitializer', 'UniformInitializer', 'NormalInitializer',
    'TruncatedNormalInitializer', 'XavierInitializer', 'BilinearInitializer',
31
    'MSRAInitializer', 'NumpyArrayInitializer', 'set_global_initializer'
32
]
33

34 35 36
_global_weight_initializer_ = None
_global_bias_initializer_ = None

37 38 39 40 41 42 43 44 45 46

class Initializer(object):
    """Base class for variable initializers

    Defines the common interface of variable initializers.
    They add operations to the init program that are used
    to initialize variables. Users should not use this class
    directly, but need to use one of its implementations.
    """

W
whs 已提交
47
    def __init__(self):
48 49
        pass

50
    def __call__(self, param, block=None):
51 52 53 54
        """Add corresponding initialization operations to the network
        """
        raise NotImplementedError()

55 56
    def _check_block(self, block):
        if block is None:
57
            block = default_main_program().global_block()
58 59 60

        return block

61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95
    def _compute_fans(self, var):
        """Compute the fan_in and the fan_out for layers

        This method computes the fan_in and the fan_out
        for neural network layers, if not specified. It is
        not possible to perfectly estimate fan_in and fan_out.
        This method will estimate it correctly for matrix multiply and
        convolutions.

        Args:
            var: variable for which fan_in and fan_out have to be computed

        Returns:
            tuple of two integers (fan_in, fan_out)
        """
        shape = var.shape
        if not shape or len(shape) == 0:
            fan_in = fan_out = 1
        elif len(shape) == 1:
            fan_in = fan_out = shape[0]
        elif len(shape) == 2:
            # This is the case for simple matrix multiply
            fan_in = shape[0]
            fan_out = shape[1]
        else:
            # Assume this to be a convolutional kernel
            # In PaddlePaddle, the shape of the kernel is like:
            # [num_filters, num_filter_channels, ...] where the remaining
            # dimensions are the filter_size
            receptive_field_size = np.prod(shape[2:])
            fan_in = shape[1] * receptive_field_size
            fan_out = shape[0] * receptive_field_size

        return (fan_in, fan_out)

96 97 98

class ConstantInitializer(Initializer):
    """Implements the constant initializer
99 100

    Args:
D
Double_V 已提交
101
        value (float32): constant value to initialize the variable 
102 103 104 105

    Examples:
        .. code-block:: python

106 107 108
            import paddle
            import paddle.fluid as fluid
            paddle.enable_static()
D
Double_V 已提交
109
            x = fluid.data(name="data", shape=[8, 32, 32], dtype="float32")
110 111 112 113
            fc = fluid.layers.fc(
                input=x,
                size=10,
                param_attr=fluid.initializer.Constant(value=2.0))
114

115 116
    """

117
    def __init__(self, value=0.0, force_cpu=False):
118 119 120
        assert value is not None
        super(ConstantInitializer, self).__init__()
        self._value = value
121
        self._force_cpu = force_cpu
122

123 124
    def __call__(self, var, block=None):
        """Initialize the input tensor with constant.
125 126

        Args:
127 128 129
            var(Tensor): Tensor that needs to be initialized.
            block(Block, optional): The block in which initialization ops
                   should be added. Used in static graph only, default None.
130 131

        Returns:
132
            The initialization op
133
        """
134 135
        block = self._check_block(block)

136 137
        assert (isinstance(var, framework.Variable)
                or isinstance(var, framework.EagerParamBase))
138
        assert isinstance(block, framework.Block)
139

J
Jiabin Yang 已提交
140
        if framework._non_static_mode():
141 142
            _C_ops.fill_constant(var, 'value', float(self._value),
                                 'force_cpu', self._force_cpu, 'dtype',
143 144
                                 int(var.dtype), 'str_value',
                                 str(float(self._value)), 'shape', var.shape)
145 146 147
            return None
        else:
            # fill constant should set the "str_value" to preserve precision
148 149 150 151 152 153 154 155 156 157
            op = block.append_op(type="fill_constant",
                                 outputs={"Out": var},
                                 attrs={
                                     "shape": var.shape,
                                     "dtype": int(var.dtype),
                                     "value": float(self._value),
                                     'str_value': str(float(self._value)),
                                     'force_cpu': self._force_cpu
                                 },
                                 stop_gradient=True)
158

159
            var.op = op
160
            return op
161 162 163


class UniformInitializer(Initializer):
164
    """Implements the random uniform distribution initializer
165 166 167 168 169

    Args:
        low (float): lower boundary of the uniform distribution
        high (float): upper boundary of the uniform distribution
        seed (int): random seed
170 171 172 173 174 175
        diag_num (int): the number of diagonal elements to initialize.
            If set to 0, diagonal initialization will be not performed.
        diag_step (int): Step size between two diagonal elements,
            which is generally the width of the square matrix.
        diag_val (float): the value of the diagonal element to be initialized,
            default 1.0. It takes effect only if the diag_num is greater than 0.
176 177 178 179

    Examples:
        .. code-block:: python

X
xiaoting 已提交
180
            import paddle.fluid as fluid
181
            x = fluid.data(name='x', shape=[None, 1], dtype='float32')
182
            fc = fluid.layers.fc(input=x, size=10,
183
    		param_attr=fluid.initializer.Uniform(low=-0.5, high=0.5))
184 185
    """

186 187 188 189 190 191 192
    def __init__(self,
                 low=-1.0,
                 high=1.0,
                 seed=0,
                 diag_num=0,
                 diag_step=0,
                 diag_val=1.0):
193 194
        assert low is not None
        assert high is not None
195
        assert high >= low
196
        assert seed is not None
197 198 199 200 201
        assert diag_num is not None
        assert diag_step is not None
        assert diag_val is not None
        if diag_num > 0 or diag_step > 0:
            assert (diag_num > 0 and diag_step > 0)
202 203 204 205
        super(UniformInitializer, self).__init__()
        self._low = low
        self._high = high
        self._seed = seed
206 207 208
        self._diag_num = diag_num
        self._diag_step = diag_step
        self._diag_val = diag_val
209

210 211
    def __call__(self, var, block=None):
        """Initialize the input tensor with Uniform distribution.
212 213

        Args:
214 215 216
            var(Tensor): Tensor that needs to be initialized.
            block(Block, optional): The block in which initialization ops
                   should be added. Used in static graph only, default None.
217 218

        Returns:
219
            The initialization op
220
        """
221 222
        block = self._check_block(block)

223
        assert isinstance(block, framework.Block)
224 225
        check_variable_and_dtype(var, "Out",
                                 ["uint16", "float16", "float32", "float64"],
226 227
                                 "uniform_random")

D
dzhwinter 已提交
228 229
        if self._seed == 0:
            self._seed = block.program.random_seed
W
Wu Yi 已提交
230

X
polish  
Xin Pan 已提交
231
        # to be compatible of fp16 initializers
232
        if var.dtype == VarDesc.VarType.FP16:
W
Wu Yi 已提交
233
            out_dtype = VarDesc.VarType.FP32
234 235 236 237 238 239
            out_var = block.create_var(name=unique_name.generate(".".join(
                ['uniform_random', var.name, 'tmp'])),
                                       shape=var.shape,
                                       dtype=out_dtype,
                                       type=VarDesc.VarType.LOD_TENSOR,
                                       persistable=False)
W
Wu Yi 已提交
240 241 242 243
        else:
            out_dtype = var.dtype
            out_var = var

J
Jiabin Yang 已提交
244
        if framework._non_static_mode():
245 246 247 248 249 250 251
            out_var = _C_ops.uniform_random(
                'shape', var.shape, 'min', self._low, 'max', self._high, 'seed',
                self._seed, 'dtype', out_dtype, 'diag_num', self._diag_num,
                'diag_step', self._diag_step, 'diag_val', self._diag_val)
            if var.dtype == VarDesc.VarType.FP16:
                var_tmp = _C_ops.cast(out_var, 'in_dtype', out_var.dtype,
                                      'out_dtype', var.dtype)
252
                var_tmp._share_underline_tensor_to(var)
253
            else:
254
                out_var._share_underline_tensor_to(var)
255 256
            return None
        else:
257 258 259 260 261 262 263 264 265 266 267 268 269 270
            op = block.append_op(type="uniform_random",
                                 inputs={},
                                 outputs={"Out": out_var},
                                 attrs={
                                     "shape": var.shape,
                                     "dtype": out_dtype,
                                     "min": self._low,
                                     "max": self._high,
                                     "seed": self._seed,
                                     "diag_num": self._diag_num,
                                     "diag_step": self._diag_step,
                                     "diag_val": self._diag_val
                                 },
                                 stop_gradient=True)
271 272

            if var.dtype == VarDesc.VarType.FP16:
273 274 275 276 277 278 279
                block.append_op(type="cast",
                                inputs={"X": out_var},
                                outputs={"Out": var},
                                attrs={
                                    "in_dtype": out_var.dtype,
                                    "out_dtype": var.dtype
                                })
W
Wu Yi 已提交
280

281
            var.op = op
282
            return op
283 284 285


class NormalInitializer(Initializer):
286 287 288 289 290 291 292 293 294 295
    """Implements the Random Normal(Gaussian) distribution initializer

    Args:
        loc (float): mean of the normal distribution
        scale (float): standard deviation of the normal distribution
        seed (int): random seed

    Examples:
        .. code-block:: python

X
xsrobin 已提交
296
            import paddle.fluid as fluid
297
            x = fluid.data(name="data", shape=[None, 32, 32], dtype="float32")
X
xsrobin 已提交
298 299
            fc = fluid.layers.fc(input=x, size=10,
                param_attr=fluid.initializer.Normal(loc=0.0, scale=2.0))
300

301 302 303 304 305 306 307 308 309 310 311
    """

    def __init__(self, loc=0.0, scale=1.0, seed=0):
        assert loc is not None
        assert scale is not None
        assert seed is not None
        super(NormalInitializer, self).__init__()
        self._mean = loc
        self._std_dev = scale
        self._seed = seed

312 313
    def __call__(self, var, block=None):
        """Initialize the input tensor with Normal distribution.
314 315

        Args:
316 317 318
            var(Tensor): Tensor that needs to be initialized.
            block(Block, optional): The block in which initialization ops
                   should be added. Used in static graph only, default None.
319 320

        Returns:
321
            The initialization op
322
        """
323 324
        block = self._check_block(block)

325
        assert isinstance(block, framework.Block)
326

327 328
        check_variable_and_dtype(var, "Out",
                                 ["uint16", "float16", "float32", "float64"],
329
                                 "guassian_random")
330

331 332 333
        # to be compatible of fp16 initalizers
        if var.dtype in [VarDesc.VarType.FP16, VarDesc.VarType.BF16]:
            out_dtype = VarDesc.VarType.FP32
334 335 336 337 338 339
            out_var = block.create_var(name=unique_name.generate(".".join(
                ['normal_init', var.name, 'tmp'])),
                                       shape=var.shape,
                                       dtype=out_dtype,
                                       type=VarDesc.VarType.LOD_TENSOR,
                                       persistable=False)
340 341 342 343
        else:
            out_dtype = var.dtype
            out_var = var

D
dzhwinter 已提交
344 345
        if self._seed == 0:
            self._seed = block.program.random_seed
W
Wu Yi 已提交
346

347 348 349 350 351 352 353 354 355 356 357 358 359 360
        if in_dygraph_mode():
            place = _current_expected_place()
            out_var = _C_ops.final_state_gaussian_random(
                var.shape, self._mean, self._std_dev, self._seed, out_dtype,
                place)

            if var.dtype in [VarDesc.VarType.FP16, VarDesc.VarType.BF16]:
                var_tmp = _C_ops.final_state_cast(out_var, var.dtype)
                var_tmp._share_underline_tensor_to(var)
            else:
                out_var._share_underline_tensor_to(var)
            return None

        if _in_legacy_dygraph():
361 362 363 364
            out_var = _C_ops.gaussian_random('shape', var.shape, 'dtype',
                                             out_dtype, 'mean', self._mean,
                                             'std', self._std_dev, 'seed',
                                             self._seed, 'use_mkldnn', False)
365 366 367 368 369 370 371

            if var.dtype in [VarDesc.VarType.FP16, VarDesc.VarType.BF16]:
                var_tmp = _C_ops.cast(out_var, 'in_dtype', out_var.dtype,
                                      'out_dtype', var.dtype)
                var_tmp._share_underline_tensor_to(var)
            else:
                out_var._share_underline_tensor_to(var)
372 373
            return None
        else:
374 375 376 377 378 379 380 381 382 383 384
            op = block.append_op(type="gaussian_random",
                                 outputs={"Out": out_var},
                                 attrs={
                                     "shape": var.shape,
                                     "dtype": out_dtype,
                                     "mean": self._mean,
                                     "std": self._std_dev,
                                     "seed": self._seed,
                                     "use_mkldnn": False
                                 },
                                 stop_gradient=True)
385

386
            if var.dtype in [VarDesc.VarType.FP16, VarDesc.VarType.BF16]:
387 388 389 390 391 392 393
                block.append_op(type="cast",
                                inputs={"X": out_var},
                                outputs={"Out": var},
                                attrs={
                                    "in_dtype": out_var.dtype,
                                    "out_dtype": var.dtype
                                })
394
            var.op = op
395
            return op
396 397


398 399 400 401 402 403 404 405 406 407 408
class TruncatedNormalInitializer(Initializer):
    """Implements the Random TruncatedNormal(Gaussian) distribution initializer

    Args:
        loc (float): mean of the normal distribution
        scale (float): standard deviation of the normal distribution
        seed (int): random seed

    Examples:
        .. code-block:: python

X
xiaoting 已提交
409
            import paddle.fluid as fluid
410
            x = fluid.data(name='x', shape=[None, 1], dtype='float32')
411 412 413 414 415 416 417 418
            fc = fluid.layers.fc(input=x, size=10,
                param_attr=fluid.initializer.TruncatedNormal(loc=0.0, scale=2.0))
    """

    def __init__(self, loc=0.0, scale=1.0, seed=0):
        assert loc is not None
        assert scale is not None
        assert seed is not None
W
whs 已提交
419
        super(TruncatedNormalInitializer, self).__init__()
420 421 422 423
        self._mean = loc
        self._std_dev = scale
        self._seed = seed

424 425
    def __call__(self, var, block=None):
        """Initialize the input tensor with TruncatedNormal distribution.
426 427

        Args:
428 429 430
            var(Tensor): Tensor that needs to be initialized.
            block(Block, optional): The block in which initialization ops
                   should be added. Used in static graph only, default None.
431 432

        Returns:
433
            The initialization op
434
        """
435 436
        block = self._check_block(block)

437 438
        assert isinstance(var, framework.Variable)
        assert isinstance(block, framework.Block)
439

440 441
        if self._seed == 0:
            self._seed = block.program.random_seed
442 443

        # to be compatible of fp16 initalizers
444
        if var.dtype in [VarDesc.VarType.FP16, VarDesc.VarType.BF16]:
445
            out_dtype = VarDesc.VarType.FP32
446 447 448 449 450 451
            out_var = block.create_var(name=unique_name.generate(".".join(
                ['truncated_gaussian_random', var.name, 'tmp'])),
                                       shape=var.shape,
                                       dtype=out_dtype,
                                       type=VarDesc.VarType.LOD_TENSOR,
                                       persistable=False)
452 453 454 455
        else:
            out_dtype = var.dtype
            out_var = var

456 457 458 459 460 461 462 463 464 465 466 467
        if in_dygraph_mode():
            out_var = _C_ops.final_state_truncated_gaussian_random(
                var.shape, self._mean, self._std_dev, self._seed, out_dtype,
                _current_expected_place())
            if var.dtype in [VarDesc.VarType.FP16, VarDesc.VarType.BF16]:
                var_tmp = _C_ops.final_state_cast(out_var, var.dtype)
                var_tmp._share_underline_tensor_to(var)
            else:
                out_var._share_underline_tensor_to(var)
            return None

        if _in_legacy_dygraph():
468 469 470 471 472
            out_var = _C_ops.truncated_gaussian_random('shape', var.shape,
                                                       'dtype', out_dtype,
                                                       'mean', self._mean,
                                                       'std', self._std_dev,
                                                       'seed', self._seed)
473 474 475
            if var.dtype in [VarDesc.VarType.FP16, VarDesc.VarType.BF16]:
                var_tmp = _C_ops.cast(out_var, 'in_dtype', out_var.dtype,
                                      'out_dtype', var.dtype)
476
                var_tmp._share_underline_tensor_to(var)
477
            else:
478
                out_var._share_underline_tensor_to(var)
479 480
            return None
        else:
481 482 483 484 485 486 487 488 489 490
            op = block.append_op(type="truncated_gaussian_random",
                                 outputs={"Out": out_var},
                                 attrs={
                                     "shape": var.shape,
                                     "dtype": out_dtype,
                                     "mean": self._mean,
                                     "std": self._std_dev,
                                     "seed": self._seed
                                 },
                                 stop_gradient=True)
491

492
            if var.dtype in [VarDesc.VarType.FP16, VarDesc.VarType.BF16]:
493 494 495 496 497 498 499
                block.append_op(type="cast",
                                inputs={"X": out_var},
                                outputs={"Out": var},
                                attrs={
                                    "in_dtype": out_var.dtype,
                                    "out_dtype": var.dtype
                                })
500
            var.op = op
501
            return op
502 503


504
class XavierInitializer(Initializer):
505
    r"""
506
    This class implements the Xavier weight initializer from the paper
Q
qiaolongfei 已提交
507 508 509
    `Understanding the difficulty of training deep feedforward neural
    networks <http://proceedings.mlr.press/v9/glorot10a/glorot10a.pdf>`_
    by Xavier Glorot and Yoshua Bengio.
510 511 512

    This initializer is designed to keep the scale of the gradients
    approximately same in all the layers. In case of Uniform distribution,
Q
qiaolongfei 已提交
513 514 515 516 517 518
    the range is [-x, x], where

    .. math::

        x = \sqrt{\\frac{6.0}{fan\_in + fan\_out}}

519
    In case of Normal distribution, the mean is 0 and the standard deviation
Q
qiaolongfei 已提交
520
    is
521

Q
qiaolongfei 已提交
522
    .. math::
523

Q
qiaolongfei 已提交
524
        \sqrt{\\frac{2.0}{fan\_in + fan\_out}}
525 526


Q
qiaolongfei 已提交
527
    Args:
X
xiaoting 已提交
528 529
        uniform (bool,default True): whether to use uniform ,if False use normal distribution
        fan_in (float,default None): fan_in for Xavier initialization. If None, it is
Q
qiaolongfei 已提交
530
                inferred from the variable.
X
xiaoting 已提交
531
        fan_out (float,default None): fan_out for Xavier initialization. If None, it is
Q
qiaolongfei 已提交
532 533 534 535 536 537 538 539 540
                 inferred from the variable.
        seed (int): random seed

    Note:
        It is recommended to set fan_in and fan_out to None for most cases.

    Examples:
        .. code-block:: python

X
xiaoting 已提交
541
            import paddle.fluid as fluid
X
xiaoting 已提交
542
            queries = fluid.data(name='x', shape=[None,1], dtype='float32')
Q
qiaolongfei 已提交
543 544 545 546 547 548 549
            fc = fluid.layers.fc(
                input=queries, size=10,
                param_attr=fluid.initializer.Xavier(uniform=False))

    """

    def __init__(self, uniform=True, fan_in=None, fan_out=None, seed=0):
550 551 552 553 554 555 556 557
        assert uniform is not None
        assert seed is not None
        super(XavierInitializer, self).__init__()
        self._uniform = uniform
        self._fan_in = fan_in
        self._fan_out = fan_out
        self._seed = seed

558 559
    def __call__(self, var, block=None):
        """Initialize the input tensor with Xavier initialization.
560 561

        Args:
562 563 564
            var(Tensor): Tensor that needs to be initialized.
            block(Block, optional): The block in which initialization ops
                   should be added. Used in static graph only, default None.
565 566

        Returns:
567
            The initialization op
568
        """
569 570
        block = self._check_block(block)

571
        assert isinstance(block, framework.Block)
572 573
        check_variable_and_dtype(var, "Out",
                                 ["uint16", "float16", "float32", "float64"],
574 575
                                 "xavier_init")

576 577 578 579 580 581
        f_in, f_out = self._compute_fans(var)

        # If fan_in and fan_out are passed, use them
        fan_in = f_in if self._fan_in is None else self._fan_in
        fan_out = f_out if self._fan_out is None else self._fan_out

D
dzhwinter 已提交
582 583 584
        if self._seed == 0:
            self._seed = block.program.random_seed

585
        # to be compatible of fp16 initalizers
586 587
        if var.dtype == VarDesc.VarType.FP16 or (
                var.dtype == VarDesc.VarType.BF16 and not self._uniform):
588
            out_dtype = VarDesc.VarType.FP32
589 590 591 592 593 594
            out_var = block.create_var(name=unique_name.generate(".".join(
                ['xavier_init', var.name, 'tmp'])),
                                       shape=var.shape,
                                       dtype=out_dtype,
                                       type=VarDesc.VarType.LOD_TENSOR,
                                       persistable=False)
595 596 597 598
        else:
            out_dtype = var.dtype
            out_var = var

J
Jiabin Yang 已提交
599
        if framework._non_static_mode():
600
            if self._uniform:
601
                limit = math.sqrt(6.0 / float(fan_in + fan_out))
602
                out_var = _C_ops.uniform_random('shape', out_var.shape, 'min',
603 604 605
                                                -limit, 'max', limit, 'seed',
                                                self._seed, 'dtype', out_dtype)
            else:
606
                std = math.sqrt(2.0 / float(fan_in + fan_out))
607 608 609 610 611 612

                if in_dygraph_mode():
                    place = _current_expected_place()
                    out_var = _C_ops.final_state_gaussian_random(
                        out_var.shape, 0.0, std, self._seed, out_dtype, place)
                else:
613 614 615 616
                    out_var = _C_ops.gaussian_random('shape', out_var.shape,
                                                     'dtype', out_dtype, 'mean',
                                                     0.0, 'std', std, 'seed',
                                                     self._seed)
617 618 619 620 621

            if var.dtype == VarDesc.VarType.FP16 or (
                    var.dtype == VarDesc.VarType.BF16 and not self._uniform):
                var_tmp = _C_ops.cast(out_var, 'in_dtype', out_var.dtype,
                                      'out_dtype', var.dtype)
622
                var_tmp._share_underline_tensor_to(var)
623
            else:
624
                out_var._share_underline_tensor_to(var)
625
            return None
626
        else:
627
            if self._uniform:
628
                limit = math.sqrt(6.0 / float(fan_in + fan_out))
629 630 631 632 633 634 635 636 637 638 639
                op = block.append_op(type="uniform_random",
                                     inputs={},
                                     outputs={"Out": out_var},
                                     attrs={
                                         "shape": out_var.shape,
                                         "dtype": out_dtype,
                                         "min": -limit,
                                         "max": limit,
                                         "seed": self._seed
                                     },
                                     stop_gradient=True)
640
            else:
641
                std = math.sqrt(2.0 / float(fan_in + fan_out))
642 643 644 645 646 647 648 649 650 651
                op = block.append_op(type="gaussian_random",
                                     outputs={"Out": out_var},
                                     attrs={
                                         "shape": out_var.shape,
                                         "dtype": out_dtype,
                                         "mean": 0.0,
                                         "std": std,
                                         "seed": self._seed
                                     },
                                     stop_gradient=True)
652 653 654

            if var.dtype == VarDesc.VarType.FP16 or (
                    var.dtype == VarDesc.VarType.BF16 and not self._uniform):
655 656 657 658 659 660 661
                block.append_op(type="cast",
                                inputs={"X": out_var},
                                outputs={"Out": var},
                                attrs={
                                    "in_dtype": out_var.dtype,
                                    "out_dtype": var.dtype
                                })
662

663
            var.op = op
664
            return op
665 666 667


class MSRAInitializer(Initializer):
668
    r"""Implements the MSRA initializer a.k.a. Kaiming Initializer
669 670

    This class implements the weight initialization from the paper
671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689
    `Delving Deep into Rectifiers: Surpassing Human-Level Performance on
    ImageNet Classification <https://arxiv.org/abs/1502.01852>`_
    by Kaiming He, Xiangyu Zhang, Shaoqing Ren and Jian Sun. This is a
    robust initialization method that particularly considers the rectifier
    nonlinearities. In case of Uniform distribution, the range is [-x, x], where

    .. math::

        x = \sqrt{\\frac{6.0}{fan\_in}}

    In case of Normal distribution, the mean is 0 and the standard deviation
    is

    .. math::

        \sqrt{\\frac{2.0}{fan\_in}}

    Args:
        uniform (bool): whether to use uniform or normal distribution
D
Double_V 已提交
690 691 692
        fan_in (float32|None): fan_in for MSRAInitializer. If None, it is\
        inferred from the variable. default is None.
        seed (int32): random seed
693 694 695 696 697 698

    Note:
        It is recommended to set fan_in to None for most cases.

    Examples:
        .. code-block:: python
X
xsrobin 已提交
699

700
            import paddle
X
xsrobin 已提交
701
            import paddle.fluid as fluid
702
            paddle.enable_static()
D
Double_V 已提交
703
            x = fluid.data(name="data", shape=[8, 32, 32], dtype="float32")
X
xsrobin 已提交
704 705
            fc = fluid.layers.fc(input=x, size=10,
                param_attr=fluid.initializer.MSRA(uniform=False))
706

707 708 709 710 711 712 713 714 715 716 717 718
    """

    def __init__(self, uniform=True, fan_in=None, seed=0):
        """Constructor for MSRAInitializer
        """
        assert uniform is not None
        assert seed is not None
        super(MSRAInitializer, self).__init__()
        self._uniform = uniform
        self._fan_in = fan_in
        self._seed = seed

719 720
    def __call__(self, var, block=None):
        """Initialize the input tensor with MSRA initialization.
721 722

        Args:
723 724 725
            var(Tensor): Tensor that needs to be initialized.
            block(Block, optional): The block in which initialization ops
                   should be added. Used in static graph only, default None.
726 727

        Returns:
728
            The initialization op
729
        """
730 731
        block = self._check_block(block)

732 733 734 735 736 737 738
        assert isinstance(var, framework.Variable)
        assert isinstance(block, framework.Block)
        f_in, f_out = self._compute_fans(var)

        # If fan_in is passed, use it
        fan_in = f_in if self._fan_in is None else self._fan_in

D
dzhwinter 已提交
739 740 741
        if self._seed == 0:
            self._seed = block.program.random_seed

742
        # to be compatible of fp16 initalizers
743 744
        if var.dtype == VarDesc.VarType.FP16 or (
                var.dtype == VarDesc.VarType.BF16 and not self._uniform):
745
            out_dtype = VarDesc.VarType.FP32
746 747 748 749 750 751
            out_var = block.create_var(name=unique_name.generate(".".join(
                ['masra_init', var.name, 'tmp'])),
                                       shape=var.shape,
                                       dtype=out_dtype,
                                       type=VarDesc.VarType.LOD_TENSOR,
                                       persistable=False)
752 753 754 755
        else:
            out_dtype = var.dtype
            out_var = var

J
Jiabin Yang 已提交
756
        if framework._non_static_mode():
757
            if self._uniform:
758
                limit = math.sqrt(6.0 / float(fan_in))
759 760 761 762 763
                out_var = _C_ops.uniform_random('shape', out_var.shape, 'min',
                                                -limit, 'max', limit, 'seed',
                                                self._seed, 'dtype',
                                                int(out_dtype))
            else:
764
                std = math.sqrt(2.0 / float(fan_in))
765 766 767 768 769
                if in_dygraph_mode():
                    place = _current_expected_place()
                    out_var = _C_ops.final_state_gaussian_random(
                        out_var.shape, 0.0, std, self._seed, out_dtype, place)
                else:
770 771 772 773 774
                    out_var = _C_ops.gaussian_random('shape',
                                                     out_var.shape, 'dtype',
                                                     int(out_dtype), 'mean',
                                                     0.0, 'std', std, 'seed',
                                                     self._seed)
775 776 777 778 779

            if var.dtype == VarDesc.VarType.FP16 or (
                    var.dtype == VarDesc.VarType.BF16 and not self._uniform):
                var_tmp = _C_ops.cast(out_var, 'in_dtype', out_var.dtype,
                                      'out_dtype', var.dtype)
780
                var_tmp._share_underline_tensor_to(var)
781
            else:
782
                out_var._share_underline_tensor_to(var)
783
            return None
784
        else:
785
            if self._uniform:
786
                limit = math.sqrt(6.0 / float(fan_in))
787 788 789 790 791 792 793 794 795 796 797
                op = block.append_op(type="uniform_random",
                                     inputs={},
                                     outputs={"Out": out_var},
                                     attrs={
                                         "shape": out_var.shape,
                                         "dtype": int(out_dtype),
                                         "min": -limit,
                                         "max": limit,
                                         "seed": self._seed
                                     },
                                     stop_gradient=True)
798 799

            else:
800
                std = math.sqrt(2.0 / float(fan_in))
801 802 803 804 805 806 807 808 809 810
                op = block.append_op(type="gaussian_random",
                                     outputs={"Out": out_var},
                                     attrs={
                                         "shape": out_var.shape,
                                         "dtype": int(out_dtype),
                                         "mean": 0.0,
                                         "std": std,
                                         "seed": self._seed
                                     },
                                     stop_gradient=True)
811 812 813

            if var.dtype == VarDesc.VarType.FP16 or (
                    var.dtype == VarDesc.VarType.BF16 and not self._uniform):
814 815 816 817 818 819 820
                block.append_op(type="cast",
                                inputs={"X": out_var},
                                outputs={"Out": var},
                                attrs={
                                    "in_dtype": out_var.dtype,
                                    "out_dtype": var.dtype
                                })
821

822
            var.op = op
823
            return op
824 825


826
class BilinearInitializer(Initializer):
827
    """
828 829 830
    This initializer can be used in transposed convolution operator to
    act as upsampling. Users can upsample a feature map with shape of
    (B, C, H, W) by any integer factor. The usage is:
831 832 833 834 835

    Examples:

        .. code-block:: python

836
            import math
837 838 839 840 841

            import paddle
            import paddle.nn as nn
            from paddle.regularizer import L2Decay

X
xsrobin 已提交
842 843
            factor = 2
            C = 2
D
Double_V 已提交
844 845
            B = 8
            H = W = 32
846 847 848 849
            w_attr = paddle.ParamAttr(learning_rate=0.,
                                      regularizer=L2Decay(0.),
                                      initializer=nn.initializer.Bilinear())
            data = paddle.rand([B, 3, H, W], dtype='float32')
C
cnn 已提交
850
            conv_up = nn.Conv2DTranspose(3,
851 852 853 854 855 856 857 858 859 860 861
                                         out_channels=C,
                                         kernel_size=2 * factor - factor % 2,
                                         padding=int(
                                             math.ceil((factor - 1) / 2.)),
                                         stride=factor,
                                         weight_attr=w_attr,
                                         bias_attr=False)
            x = conv_up(data)

    Where, `out_channels=C` and `groups=C` means this is channel-wise transposed
    convolution. The filter shape will be (C, 1, K, K) where K is `kernel_size`,
862 863 864 865
    This initializer will set a (K, K) interpolation kernel for every channel
    of the filter identically. The resulting shape of the output feature map
    will be (B, C, factor * H, factor * W). Note that the learning rate and the
    weight decay are set to 0 in order to keep coefficient values of bilinear
866 867
    interpolation unchanged during training.

868 869 870 871 872 873 874
    """

    def __init__(self):
        """Constructor for BilinearInitializer.
        """
        super(BilinearInitializer, self).__init__()

875 876
    def __call__(self, var, block=None):
        """Initialize the input tensor with Bilinear initialization.
877 878

        Args:
879 880 881
            var(Tensor): Tensor that needs to be initialized.
            block(Block, optional): The block in which initialization ops
                   should be added. Used in static graph only, default None.
882 883

        Returns:
884
            The initialization op
885
        """
886 887
        block = self._check_block(block)

888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911
        if not isinstance(var, framework.Variable):
            raise ValueError("var must be framework.Variable.")

        if not isinstance(block, framework.Block):
            raise ValueError("block must be framework.Block.")

        shape = var.shape
        if len(shape) != 4:
            raise ValueError("the length of shape must be 4.")
        if shape[2] != shape[3]:
            raise ValueError("shape[2] must be equal to shape[3].")

        weight = np.zeros(np.prod(var.shape), dtype='float32')
        size = shape[3]
        # factor
        f = np.ceil(size / 2.)
        # center
        c = (2 * f - 1 - f % 2) / (2. * f)
        for i in range(np.prod(shape)):
            x = i % size
            y = (i / size) % size
            weight[i] = (1 - abs(x / f - c)) * (1 - abs(y / f - c))
        weight = np.reshape(weight, shape)

912
        # to be compatible of fp16 initalizers
913 914 915
        if var.dtype in [
                VarDesc.VarType.FP16, VarDesc.VarType.BF16, VarDesc.VarType.FP64
        ]:
916
            out_dtype = VarDesc.VarType.FP32
917 918 919 920 921 922
            out_var = block.create_var(name=unique_name.generate(".".join(
                ['bilinear_init', var.name, 'tmp'])),
                                       shape=var.shape,
                                       dtype=out_dtype,
                                       type=VarDesc.VarType.LOD_TENSOR,
                                       persistable=False)
923 924 925 926 927
        else:
            out_dtype = var.dtype
            out_var = var

        if out_dtype == VarDesc.VarType.FP32:
928 929 930
            value_name = "fp32_values"
            values = [float(v) for v in weight.flat]
        else:
931 932
            raise TypeError("Unsupported dtype %s", var.dtype)

933 934
        if np.prod(shape) > 1024 * 1024:
            raise ValueError("The size of input is too big. ")
935

J
Jiabin Yang 已提交
936
        if framework._non_static_mode():
937 938
            _C_ops.assign_value(out_var, 'shape', list(shape), 'dtype',
                                out_dtype, value_name, values)
939 940 941 942 943 944
            if var.dtype in [
                    VarDesc.VarType.FP16, VarDesc.VarType.BF16,
                    VarDesc.VarType.FP64
            ]:
                var_tmp = _C_ops.cast(out_var, 'in_dtype', out_var.dtype,
                                      'out_dtype', var.dtype)
945
                var_tmp._share_underline_tensor_to(var)
946
            else:
947
                out_var._share_underline_tensor_to(var)
948 949
            return None
        else:
950 951 952 953 954 955 956
            op = block.append_op(type='assign_value',
                                 outputs={'Out': [out_var]},
                                 attrs={
                                     'dtype': out_dtype,
                                     'shape': list(shape),
                                     value_name: values
                                 })
957 958 959 960 961

            if var.dtype in [
                    VarDesc.VarType.FP16, VarDesc.VarType.BF16,
                    VarDesc.VarType.FP64
            ]:
962 963 964 965 966 967 968
                block.append_op(type="cast",
                                inputs={"X": out_var},
                                outputs={"Out": var},
                                attrs={
                                    "in_dtype": out_var.dtype,
                                    "out_dtype": var.dtype
                                })
969

970
            var.op = op
971
            return op
972 973


974 975
class NumpyArrayInitializer(Initializer):
    """Init an parameter with an numpy array
976
    This op initialize the variable by numpy array.
977 978 979 980

    Args:
        value (numpy): numpy array to initialize the variable

981 982 983
    Returns:
        A Tensor variable initialized by numpy.

984 985 986
    Examples:
        .. code-block:: python

987
            import paddle.fluid as fluid
988 989
            import numpy
            x = fluid.data(name="x", shape=[2, 1], dtype='float32')
990 991 992 993 994 995 996 997 998 999
            fc = fluid.layers.fc(input=x, size=10,
                param_attr=fluid.initializer.NumpyArrayInitializer(numpy.array([1,2])))
    """

    def __init__(self, value):
        import numpy
        assert isinstance(value, numpy.ndarray)
        super(NumpyArrayInitializer, self).__init__()
        self._value = value

1000 1001
    def __call__(self, var, block=None):
        """Initialize the input tensor with Numpy array.
1002 1003

        Args:
1004 1005 1006
            var(Tensor): Tensor that needs to be initialized.
            block(Block, optional): The block in which initialization ops
                   should be added. Used in static graph only, default None.
1007 1008

        Returns:
1009
            The initialization op
1010
        """
1011 1012
        block = self._check_block(block)

1013 1014
        assert isinstance(var, framework.Variable)
        assert isinstance(block, framework.Block)
1015 1016

        # to be compatible of fp16 initalizers
1017
        if var.dtype in [VarDesc.VarType.FP16, VarDesc.VarType.BF16]:
1018 1019
            out_dtype = VarDesc.VarType.FP32
            np_value = self._value.astype("float32")
1020 1021 1022 1023 1024 1025
            out_var = block.create_var(name=unique_name.generate(".".join(
                ['numpy_array_init', var.name, 'tmp'])),
                                       shape=var.shape,
                                       dtype=out_dtype,
                                       type=VarDesc.VarType.LOD_TENSOR,
                                       persistable=False)
1026 1027 1028 1029 1030 1031
        else:
            out_var = var
            out_dtype = var.dtype
            np_value = self._value

        if out_dtype == VarDesc.VarType.FP32:
1032
            value_name = "fp32_values"
1033 1034
            values = [float(v) for v in np_value.flat]
        elif out_dtype == VarDesc.VarType.INT32:
1035
            value_name = "int32_values"
1036
            values = [int(v) for v in np_value.flat]
1037 1038
        else:
            raise ValueError("Unsupported dtype %s", self._value.dtype)
X
Xin Pan 已提交
1039
        if self._value.size > 1024 * 1024 * 1024:
1040 1041
            raise ValueError("The size of input is too big. Please consider "
                             "saving it to file and 'load_op' to load it")
1042

J
Jiabin Yang 已提交
1043
        if framework._non_static_mode():
1044 1045
            _C_ops.assign_value(out_var, 'shape', list(self._value.shape),
                                'dtype', out_dtype, value_name, values)
1046 1047 1048
            if var.dtype in [VarDesc.VarType.FP16, VarDesc.VarType.BF16]:
                var_tmp = _C_ops.cast(out_var, 'in_dtype', out_var.dtype,
                                      'out_dtype', var.dtype)
1049
                var_tmp._share_underline_tensor_to(var)
1050
            else:
1051
                out_var._share_underline_tensor_to(var)
1052 1053
            return None
        else:
1054 1055 1056 1057 1058 1059 1060 1061
            op = block.append_op(type='assign_value',
                                 outputs={'Out': out_var},
                                 attrs={
                                     'dtype': out_dtype,
                                     'shape': list(self._value.shape),
                                     value_name: values
                                 },
                                 stop_gradient=True)
1062 1063

            if var.dtype in [VarDesc.VarType.FP16, VarDesc.VarType.BF16]:
1064 1065 1066 1067 1068 1069 1070
                block.append_op(type="cast",
                                inputs={"X": out_var},
                                outputs={"Out": var},
                                attrs={
                                    "in_dtype": out_var.dtype,
                                    "out_dtype": var.dtype
                                })
1071

1072
            var.op = op
1073
            return op
1074 1075


1076 1077 1078 1079 1080 1081 1082
def set_global_initializer(weight_init, bias_init=None):
    """
    This API is used to set up global model parameter initializer in framework.

    After this API is invoked, the global initializer will takes effect in subsequent code.

    The model parameters include ``weight`` and ``bias`` . In the framework, they correspond 
1083
    to ``paddle.ParamAttr`` , which is inherited from ``paddle.Tensor`` , and is a persistable Variable.
1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102
    This API only takes effect for model parameters, not for variables created through apis such as 
    :ref:`api_fluid_layers_create_global_var` , :ref:`api_fluid_layers_create_tensor`.
    
    If the initializer is also set up by ``param_attr`` or ``bias_attr`` when creating a network layer,
    the global initializer setting here will not take effect because it has a lower priority.

    If you want to cancel the global initializer in framework, please set global initializer to ``None`` .

    Args:
        weight_init (Initializer): set the global initializer for ``weight`` of model parameters.
        bias_init (Initializer, optional): set the global initializer for ``bias`` of model parameters. 
            Default: None.

    Returns:
        None

    Examples:
        .. code-block:: python

1103 1104 1105 1106 1107
            import paddle
            import paddle.nn as nn

            nn.initializer.set_global_initializer(nn.initializer.Uniform(), nn.initializer.Constant())
            x_var = paddle.uniform((2, 4, 8, 8), dtype='float32', min=-1., max=1.)
1108 1109 1110

            # The weight of conv1 is initialized by Uniform
            # The bias of conv1 is initialized by Constant
1111 1112
            conv1 = nn.Conv2D(4, 6, (3, 3))
            y_var1 = conv1(x_var)
1113 1114 1115 1116

            # If set param_attr/bias_attr too, global initializer will not take effect
            # The weight of conv2 is initialized by Xavier
            # The bias of conv2 is initialized by Normal
1117 1118 1119 1120
            conv2 = nn.Conv2D(4, 6, (3, 3), 
                weight_attr=nn.initializer.XavierUniform(),
                bias_attr=nn.initializer.Normal())
            y_var2 = conv2(x_var)
1121 1122

            # Cancel the global initializer in framework, it will takes effect in subsequent code
1123
            nn.initializer.set_global_initializer(None)
1124
    """
1125

1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150
    check_type(weight_init, 'weight_init', (Initializer, type(None)),
               'set_global_initializer')
    global _global_weight_initializer_
    _global_weight_initializer_ = weight_init

    check_type(bias_init, 'bias_init', (Initializer, type(None)),
               'set_global_initializer')
    global _global_bias_initializer_
    _global_bias_initializer_ = bias_init


def _global_weight_initializer():
    """
    Return the global weight initializer, The user doesn't need to use it.
    """
    return _global_weight_initializer_


def _global_bias_initializer():
    """
    Return the global weight initializer, The user doesn't need to use it.
    """
    return _global_bias_initializer_


1151 1152
def calculate_gain(nonlinearity, param=None):
    """
1153 1154
    Get the recommended ``gain`` value of some nonlinearity function. ``gain`` value can be used in some 
    ``paddle.nn.initializer`` api to adjust the initialization value.
1155 1156

    Args:
1157 1158
        nonlinearity(str): name of nonlinearity activation function. If it is a linear function, such as: 
            `linear/conv1d/conv2d/conv3d/conv1d_transpose/conv2d_transpose/conv3d_transpose` , 1.0 will be returned.
1159
        param(bool|int|float, optional): optional parameter for somme nonlinearity function. Now, it only applies to 
1160
            'leaky_relu'. Default: None, it will be calculated as 0.01 in the formula.
1161 1162

    Returns:
1163
        A float value, which is the recommended gain for this nonlinearity function.
1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183

    Examples:
        .. code-block:: python

            import paddle
            gain = paddle.nn.initializer.calculate_gain('tanh') # 5.0 / 3
            gain = paddle.nn.initializer.calculate_gain('leaky_relu', param=1.0) # 1.0 = math.sqrt(2.0 / (1+param^2))

    """
    if param is None:
        param = 0.01
    else:
        assert isinstance(param, (bool, int, float))
        param = float(param)
    recommended_gain = {
        'sigmoid': 1,
        'linear': 1,
        'conv1d': 1,
        'conv2d': 1,
        'conv3d': 1,
1184 1185 1186
        'conv1d_transpose': 1,
        'conv2d_transpose': 1,
        'conv3d_transpose': 1,
1187 1188 1189 1190 1191 1192 1193 1194
        'tanh': 5.0 / 3,
        'relu': math.sqrt(2.0),
        'leaky_relu': math.sqrt(2.0 / (1 + param**2)),
        'selu': 3.0 / 4
    }
    if nonlinearity in recommended_gain.keys():
        return recommended_gain[nonlinearity]
    else:
1195 1196 1197
        raise ValueError(
            "nonlinearity function {} is not suppported now.".format(
                nonlinearity))
1198 1199


1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211
# We short the class name, since users will use the initializer with the package
# name. The sample code:
#
# import paddle.fluid as fluid
#
# hidden = fluid.layers.fc(...,
#                          param_attr=ParamAttr(fluid.initializer.Xavier()))
#
# It is no need to add an `Initializer` as the class suffix
Constant = ConstantInitializer
Uniform = UniformInitializer
Normal = NormalInitializer
1212
TruncatedNormal = TruncatedNormalInitializer
1213 1214
Xavier = XavierInitializer
MSRA = MSRAInitializer
1215
Bilinear = BilinearInitializer