# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from __future__ import print_function from paddle.fluid import framework from paddle.fluid.framework import in_dygraph_mode, default_main_program import numpy as np from paddle.fluid.core import VarDesc from paddle.fluid import unique_name __all__ = [ 'MSRAInitializer' ] class Initializer(object): """Base class for variable initializers Defines the common interface of variable initializers. They add operations to the init program that are used to initialize variables. Users should not use this class directly, but need to use one of its implementations. """ def __init__(self): pass def __call__(self, param, block=None): """Add corresponding initialization operations to the network """ raise NotImplementedError() def _check_block(self, block): if block is None: block = default_main_program().global_block() return block def _compute_fans(self, var): """Compute the fan_in and the fan_out for layers This method computes the fan_in and the fan_out for neural network layers, if not specified. It is not possible to perfectly estimate fan_in and fan_out. This method will estimate it correctly for matrix multiply and convolutions. Args: var: variable for which fan_in and fan_out have to be computed Returns: tuple of two integers (fan_in, fan_out) """ shape = var.shape if not shape or len(shape) == 0: fan_in = fan_out = 1 elif len(shape) == 1: fan_in = fan_out = shape[0] elif len(shape) == 2: # This is the case for simple matrix multiply fan_in = shape[0] fan_out = shape[1] else: # Assume this to be a convolutional kernel # In PaddlePaddle, the shape of the kernel is like: # [num_filters, num_filter_channels, ...] where the remaining # dimensions are the filter_size receptive_field_size = np.prod(shape[2:]) fan_in = shape[1] * receptive_field_size fan_out = shape[0] * receptive_field_size return (fan_in, fan_out) class MSRAInitializer(Initializer): r"""Implements the MSRA initializer a.k.a. Kaiming Initializer This class implements the weight initialization from the paper `Delving Deep into Rectifiers: Surpassing Human-Level Performance on ImageNet Classification `_ by Kaiming He, Xiangyu Zhang, Shaoqing Ren and Jian Sun. This is a robust initialization method that particularly considers the rectifier nonlinearities. In case of Uniform distribution, the range is [-x, x], where .. math:: x = \sqrt{\\frac{6.0}{fan\_in}} In case of Normal distribution, the mean is 0 and the standard deviation is .. math:: \sqrt{\\frac{2.0}{fan\_in}} Args: uniform (bool): whether to use uniform or normal distribution fan_in (float32|None): fan_in for MSRAInitializer. If None, it is\ inferred from the variable. default is None. seed (int32): random seed Note: It is recommended to set fan_in to None for most cases. Examples: .. code-block:: python import paddle import paddle.fluid as fluid paddle.enable_static() x = fluid.data(name="data", shape=[8, 32, 32], dtype="float32") fc = fluid.layers.fc(input=x, size=10, param_attr=fluid.initializer.MSRA(uniform=False)) """ def __init__(self, uniform=True, fan_in=None, seed=0): """Constructor for MSRAInitializer """ assert uniform is not None assert seed is not None super(MSRAInitializer, self).__init__() self._uniform = uniform self._fan_in = fan_in self._seed = seed def __call__(self, var, block=None): """Initialize the input tensor with MSRA initialization. Args: var(Tensor): Tensor that needs to be initialized. block(Block, optional): The block in which initialization ops should be added. Used in static graph only, default None. Returns: The initialization op """ block = self._check_block(block) assert isinstance(var, framework.Variable) assert isinstance(block, framework.Block) f_in, f_out = self._compute_fans(var) # If fan_in is passed, use it fan_in = f_in if self._fan_in is None else self._fan_in if self._seed == 0: self._seed = block.program.random_seed # to be compatible of fp16 initalizers if var.dtype == VarDesc.VarType.FP16 or ( var.dtype == VarDesc.VarType.BF16 and not self._uniform): out_dtype = VarDesc.VarType.FP32 out_var = block.create_var( name=unique_name.generate(".".join( ['masra_init', var.name, 'tmp'])), shape=var.shape, dtype=out_dtype, type=VarDesc.VarType.LOD_TENSOR, persistable=False) else: out_dtype = var.dtype out_var = var if self._uniform: limit = np.sqrt(1.0 / float(fan_in)) op = block.append_op( type="uniform_random", inputs={}, outputs={"Out": out_var}, attrs={ "shape": out_var.shape, "dtype": int(out_dtype), "min": -limit, "max": limit, "seed": self._seed }, stop_gradient=True) else: std = np.sqrt(2.0 / float(fan_in)) op = block.append_op( type="gaussian_random", outputs={"Out": out_var}, attrs={ "shape": out_var.shape, "dtype": int(out_dtype), "mean": 0.0, "std": std, "seed": self._seed }, stop_gradient=True) if var.dtype == VarDesc.VarType.FP16 or ( var.dtype == VarDesc.VarType.BF16 and not self._uniform): block.append_op( type="cast", inputs={"X": out_var}, outputs={"Out": var}, attrs={"in_dtype": out_var.dtype, "out_dtype": var.dtype}) if not framework.in_dygraph_mode(): var.op = op return op class KaimingUniform(MSRAInitializer): r"""Implements the Kaiming Uniform initializer This class implements the weight initialization from the paper `Delving Deep into Rectifiers: Surpassing Human-Level Performance on ImageNet Classification `_ by Kaiming He, Xiangyu Zhang, Shaoqing Ren and Jian Sun. This is a robust initialization method that particularly considers the rectifier nonlinearities. In case of Uniform distribution, the range is [-x, x], where .. math:: x = \sqrt{\frac{6.0}{fan\_in}} Args: fan_in (float32|None): fan_in for Kaiming uniform Initializer. If None, it is\ inferred from the variable. default is None. Note: It is recommended to set fan_in to None for most cases. Examples: .. code-block:: python import paddle import paddle.nn as nn linear = nn.Linear(2, 4, weight_attr=nn.initializer.KaimingUniform()) data = paddle.rand([30, 10, 2], dtype='float32') res = linear(data) """ def __init__(self, fan_in=None): super(KaimingUniform, self).__init__( uniform=True, fan_in=fan_in, seed=0) # We short the class name, since users will use the initializer with the package # name. The sample code: # # import paddle.fluid as fluid # # hidden = fluid.layers.fc(..., # param_attr=ParamAttr(fluid.initializer.Xavier())) # # It is no need to add an `Initializer` as the class suffix MSRA = MSRAInitializer