layers.py 91.9 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

X
Xin Pan 已提交
15
import collections
16
import copy
17
import inspect
18 19 20 21 22
import re
import warnings
import weakref

import numpy as np
23

24
import paddle
25
from paddle import nn, profiler
26 27 28
from paddle.base import core, framework, unique_name
from paddle.base.core import VarDesc
from paddle.base.dygraph import no_grad
29
from paddle.base.dygraph.base import in_declarative_mode  # noqa F401
30
from paddle.base.dygraph.base import (
31
    _convert_into_variable,
32
    in_to_static_mode,
33
    program_desc_tracing_guard,
34
)
35 36 37 38 39
from paddle.base.dygraph_utils import _append_activation_in_dygraph
from paddle.base.executor import Executor, global_scope
from paddle.base.framework import Parameter, Program
from paddle.base.framework import _current_expected_place as _get_device
from paddle.base.framework import (
40
    _global_flags,
41
    convert_np_dtype_to_dtype_,
42
    default_main_program,
43 44
    in_dygraph_mode,
)
45 46
from paddle.base.layer_helper_base import LayerHelperBase
from paddle.base.param_attr import ParamAttr
47
from paddle.profiler.utils import in_profiler_mode
48
from paddle.utils import deprecated
49

50
__all__ = []
51

52 53 54 55
_first_cap_re = re.compile('(.)([A-Z][a-z]+)')
_all_cap_re = re.compile('([a-z])([A-Z])')


56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100
def record_program_ops_pre_hook(layer, inputs):
    """
    A pre-hook to mark op numbers before enter layer.forward.
    """
    if not in_dygraph_mode():
        if layer._op_recorder.start < 0:
            layer._op_recorder.start = len(
                default_main_program().current_block().ops
            )
            layer._op_recorder.is_valid = True
        else:
            layer._op_recorder.is_valid = False
            warnings.warn(
                "{} has recorded the op information before. Please check whether you call this layer twice.".format(
                    layer._full_name
                )
            )

    return None


def set_op_customized_attrs_post_hook(layer, inputs, outputs):
    """
    A post-hook to append customized attributes into all operators generated in current layer.
    """
    if not in_dygraph_mode() and layer._op_recorder.is_valid:
        start = layer._op_recorder.start
        end = len(default_main_program().current_block().ops)
        assert start >= 0 and end >= start
        ops = default_main_program().current_block().ops[start:end]

        layer._op_recorder.end = end
        layer._op_recorder.ops = ops

        for op in ops:
            for attr_name, val in layer._customized_attrs.items():
                op._set_attr(attr_name, val)

        # remove pre-hook and post-hook
        for hook_helper in layer._op_recorder.hooks:
            hook_helper.remove()

    return None


101 102 103 104 105 106
def _scope_dist2single(dist_scope):
    mapping = {
        "row_parallel_linear": "linear",
        "column_parallel_linear": "linear",
        "vocab_parallel_embedding": "embedding",
        # "parallel_cross_entropy": "cross_entropy", while mp_layer has parallel_cross_entropy,
S
Shuangchi He 已提交
107
        # but there is no parameters so the mapping of parallel_cross_entropy is not necessary.
108 109 110 111
    }
    return mapping.get(dist_scope, dist_scope)


112 113 114 115
def _convert_camel_to_snake(name):
    s1 = _first_cap_re.sub(r'\1_\2', name)
    return _all_cap_re.sub(r'\1_\2', s1).lower()

116

117 118 119 120 121 122 123 124 125 126 127
def _addindent(string, indent):
    s1 = string.split('\n')
    if len(s1) == 1:
        return string
    s2 = []
    for idx, line in enumerate(s1):
        if idx > 0:
            s2.append(str((indent * ' ') + line))
    return s1[0] + '\n' + '\n'.join(s2)


128 129 130 131 132 133 134
def _layer_trans_dtype(layer, dtype, excluded_layers):
    if type(layer) in excluded_layers:
        return

    layer._to_impl(dtype=dtype, floating_only=True, include_sublayers=False)


135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179
class LayerObjectHelper(LayerHelperBase):
    def __init__(self, name):
        super().__init__(name, layer_type=name)

    def append_op(
        self,
        type=None,
        inputs=None,
        outputs=None,
        attrs=None,
        stop_gradient=None,
    ):
        """append an operator for this layer object.

           Args:
               type: operator type
               inputs: input variable of the operator
               dtype: data type of this parameter
               is_bias: if this is a bias parameter
               default_initializer: set the default initializer for this parameter

        Returns created parameter Variable.
        """
        return self.main_program.current_block().append_op(
            type=type,
            inputs=inputs,
            outputs=outputs,
            attrs=attrs,
            stop_gradient=stop_gradient,
        )

    def _multiple_input(self, inputs_in):
        inputs = inputs_in
        ret = []
        if isinstance(inputs, (list, tuple)):
            for inp in inputs:
                ret.append(self.to_variable(inp))
        else:
            ret.append(self.to_variable(inputs))
        return ret

    # TODO: make it public when we need it
    def _input(self, inputs_in):
        inputs = self._multiple_input(inputs_in)
        if len(inputs) != 1:
180
            raise f"{self.layer_type} layer only takes one input in"
181 182 183 184 185 186 187 188
        return inputs[0]

    def _multiple_param_attr(self, length, param_attr_in=None):
        param_attr = param_attr_in
        if isinstance(param_attr, ParamAttr):
            param_attr = [param_attr]

        if len(param_attr) != 1 and len(param_attr) != length:
189
            raise ValueError(f"parameter number mismatch in {self.name}")
190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207
        elif len(param_attr) == 1 and length != 1:
            tmp = [None] * length
            for i in range(length):
                tmp[i] = copy.deepcopy(param_attr[0])
            param_attr = tmp
        return param_attr

    def iter_inputs_and_params(self, inputs_in, param_attr_in=None):
        """Access all inputs and params one by one

           Args:
               inputs_in: inputs to be iter
               param_attr_in: param_attr to be iter

        Returns input, param_attr
        """
        param_attr_in = ParamAttr._to_attr(param_attr_in)
        if isinstance(param_attr_in, bool):
208
            raise ValueError(f'Param_attr should not be False in {self.name}')
209 210 211
        inputs = inputs_in if (inputs_in is not None) else []
        inputs = self._multiple_input(inputs)
        param_attrs = self._multiple_param_attr(len(inputs), param_attr_in)
212
        yield from zip(inputs, param_attrs)
213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244

    def input_dtype(self, inputs_in):
        """Get input data type

           Args:
               inputs_in: inputs wanted know the data type

        Returns dtype of the input
        """
        inputs_in = inputs_in if (inputs_in is not None) else []
        inputs = self._multiple_input(inputs_in)
        dtype = None
        for each in inputs:
            if dtype is None:
                dtype = each.dtype
            elif dtype != each.dtype:
                raise ValueError(
                    "Data Type mismatch: %d to %d in %s"
                    % (dtype, each.dtype, self.name)
                )
        return dtype

    def get_parameter(self, name):
        """Get parameter specifically

           Args:
               name: parameter's name

        Returns target parameter
        """
        param = self.main_program.global_block().var(name)
        if not isinstance(param, Parameter):
245
            raise ValueError(f"no Parameter name {name} found in {self.name}")
246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323
        return param

    # TODO: this should not be called anymore after all activation func move to Layers
    def append_activation(self, input_var, act=None, use_cudnn=None):
        """Append activation

            Args:
                input_var: the input variable. The len(input_var.shape) is
                larger or equal than 2.
                act: activation type
                use_cudnn: if use cudnn

        Return the Variable of after append activation
        """
        act = act
        if act is None:
            return input_var
        if isinstance(act, str):
            act = {'type': act}
        else:
            raise TypeError(
                str(act) + " should be unicode or str in %s ", self.name
            )

        if (use_cudnn is not None) and use_cudnn:
            act['use_cudnn'] = use_cudnn
        use_mkldnn = _global_flags()["FLAGS_use_mkldnn"]
        if (use_mkldnn is not None) and use_mkldnn:
            act['use_mkldnn'] = use_mkldnn
        act_type = act.pop('type')
        if in_dygraph_mode():
            res = _append_activation_in_dygraph(
                input_var, act_type, use_cudnn, use_mkldnn
            )
            return res
        else:
            tmp = self.create_variable_for_type_inference(dtype=input_var.dtype)
            self.append_op(
                type=act_type,
                inputs={"X": [input_var]},
                outputs={"Out": [tmp]},
                attrs=act,
            )
            return tmp

    def is_instance(self, param, cls):
        """Check if the input parameter is instance of input class

            Args:
                param: parameter to be check
                cls: class of the parameter

        Return result of the check (True or False)
        """
        param = param
        if not isinstance(param, cls):
            raise TypeError(
                "The input {0} parameter of method {1} must be {2}, in layer {3}",
                param,
                self.layer_type,
                cls.__name__,
                self.name,
            )


class LayerOpsRecoder:
    """
    Record generated operators information in nn.Layer.
    """

    def __init__(self, start=-1, end=-1, ops=None, is_valid=False, hooks=None):
        self.start = start
        self.end = end
        self.ops = ops
        self.is_valid = is_valid
        self.hooks = hooks


324
class HookRemoveHelper:
325
    """A HookRemoveHelper that can be used to remove hook."""
326 327 328 329 330 331 332 333 334 335 336 337 338 339

    next_hook_id = 0

    def __init__(self, hooks):
        self._hooks_ref = weakref.ref(hooks)
        self._hook_id = HookRemoveHelper.next_hook_id
        HookRemoveHelper.next_hook_id += 1

    def remove(self):
        hooks = self._hooks_ref()
        if hooks is not None and self._hook_id in hooks:
            del hooks[self._hook_id]


340
class Layer:
341 342
    """
    Dynamic graph Layer based on OOD, includes the parameters of the layer, the structure of the forward graph and so on.
X
Xin Pan 已提交
343

344
    Parameters:
345 346
        name_scope (str, optional): prefix name used by the layer to name parameters.
            If prefix is "my_layer", parameter name in MyLayer
347 348 349
            can be "my_layer_0.w_n", where "w" is the parameter
            base name and "n" is an unique suffix auto-generated.
            If None, prefix name will be snake cased class name. Default: None.
350
        dtype(str, optional): data type of this parameter.
351 352
                If set str, it can be "bool",  "float16", "float32", "float64",
                "int8", "int16", "int32", "int64", "uint8" or "uint16".
353
                Default: "float32"
354

355 356
    Returns:
        None
357 358 359 360

    Examples:
        .. code-block:: python

361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392
            >>> import paddle
            >>> paddle.seed(100)

            >>> class MyLayer(paddle.nn.Layer):
            ...     def __init__(self):
            ...         super().__init__()
            ...         self._linear = paddle.nn.Linear(1, 1)
            ...         self._dropout = paddle.nn.Dropout(p=0.5)
            ...
            ...     def forward(self, input):
            ...         temp = self._linear(input)
            ...         temp = self._dropout(temp)
            ...         return temp
            ...
            >>> x = paddle.randn([10, 1], 'float32')
            >>> mylayer = MyLayer()
            >>> mylayer.eval()  # set mylayer._dropout to eval mode
            >>> out = mylayer(x)
            >>> mylayer.train()  # set mylayer._dropout to train mode
            >>> out = mylayer(x)
            >>> print(out)
            Tensor(shape=[10, 1], dtype=float32, place=Place(cpu), stop_gradient=False,
            [[-3.44879317],
             [ 0.        ],
             [ 0.        ],
             [-0.73825276],
             [ 0.        ],
             [ 0.        ],
             [ 0.64444798],
             [-3.22185946],
             [ 0.        ],
             [-0.68077987]])
X
Xin Pan 已提交
393
    """
X
Xin Pan 已提交
394

395
    def __init__(self, name_scope=None, dtype="float32"):
396
        self.training = True
397
        if name_scope is None:
398
            name_scope = _convert_camel_to_snake(self.__class__.__name__)
399
            name_scope = _scope_dist2single(name_scope)
400
        self._full_name = unique_name.generate(name_scope)
401
        self._helper = LayerObjectHelper(self._full_name)
X
Xin Pan 已提交
402
        self._built = False
M
minqiyang 已提交
403
        self._dtype = dtype
姜永久 已提交
404
        self._init_in_dynamic_mode = in_dygraph_mode()
405

X
Xin Pan 已提交
406
        self._parameters = collections.OrderedDict()
407 408 409
        # Buffers the variable (not parameter) created in layer
        self._buffers = collections.OrderedDict()
        self._non_persistable_buffer_names_set = set()
X
Xin Pan 已提交
410
        self._sub_layers = collections.OrderedDict()
L
lujun 已提交
411
        self._loaddict_holder = collections.OrderedDict()
412

413 414 415 416
        # Record generated op_descs in this layer
        self._op_recorder = LayerOpsRecoder(ops=[], hooks=[])
        self._customized_attrs = {}

417 418 419
        self._forward_pre_hooks = collections.OrderedDict()
        self._forward_post_hooks = collections.OrderedDict()

420 421
        # only used in AMP Training
        self._cast_to_low_precison = True
422 423

        self._state_dict_hooks = collections.OrderedDict()
424 425
        # Records orignal functions after @to_static to support to rollback
        self._original_funcs = collections.OrderedDict()
426

M
minqiyang 已提交
427
    def train(self):
428
        """
U
ustiniankw 已提交
429

430 431 432 433 434
        Sets this Layer and all its sublayers to training mode.
        This only effects certain modules like `Dropout` and `BatchNorm`.

        Returns:
            None
435

U
ustiniankw 已提交
436
        Examples:
437 438
            .. code-block:: python

439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470
                >>> import paddle
                >>> paddle.seed(100)

                >>> class MyLayer(paddle.nn.Layer):
                ...     def __init__(self):
                ...         super().__init__()
                ...         self._linear = paddle.nn.Linear(1, 1)
                ...         self._dropout = paddle.nn.Dropout(p=0.5)
                ...
                ...     def forward(self, input):
                ...         temp = self._linear(input)
                ...         temp = self._dropout(temp)
                ...         return temp
                ...
                >>> x = paddle.randn([10, 1], 'float32')
                >>> mylayer = MyLayer()
                >>> mylayer.eval()  # set mylayer._dropout to eval mode
                >>> out = mylayer(x)
                >>> mylayer.train()  # set mylayer._dropout to train mode
                >>> out = mylayer(x)
                >>> print(out)
                Tensor(shape=[10, 1], dtype=float32, place=Place(cpu), stop_gradient=False,
                [[-3.44879317],
                 [ 0.        ],
                 [ 0.        ],
                 [-0.73825276],
                 [ 0.        ],
                 [ 0.        ],
                 [ 0.64444798],
                 [-3.22185946],
                 [ 0.        ],
                 [-0.68077987]])
471

472
        """
473 474 475
        # global setting in dygraph
        # NOTE(chenweihang): nn.Layer also can be used in static mode,
        # but _dygraph_tracer() can not be called in static mode
姜永久 已提交
476
        if in_dygraph_mode():
477
            framework._dygraph_tracer().train_mode()
478 479 480
        # Layer-level setting
        self.training = True
        for layer in self.sublayers():
481
            layer.training = True
M
minqiyang 已提交
482 483

    def eval(self):
484 485 486 487 488 489
        """
        Sets this Layer and all its sublayers to evaluation mode.
        This only effects certain modules like `Dropout` and `BatchNorm`.

        Returns:
            None
490 491 492 493

        Example::
            .. code-block:: python

494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522
                >>> import paddle
                >>> paddle.seed(100)
                >>> class MyLayer(paddle.nn.Layer):
                ...     def __init__(self):
                ...         super().__init__()
                ...         self._linear = paddle.nn.Linear(1, 1)
                ...         self._dropout = paddle.nn.Dropout(p=0.5)
                ...
                ...     def forward(self, input):
                ...         temp = self._linear(input)
                ...         temp = self._dropout(temp)
                ...         return temp
                ...
                >>> x = paddle.randn([10, 1], 'float32')
                >>> mylayer = MyLayer()
                >>> mylayer.eval()  # set mylayer._dropout to eval mode
                >>> out = mylayer(x)
                >>> print(out)
                Tensor(shape=[10, 1], dtype=float32, place=Place(cpu), stop_gradient=False,
                [[-1.72439659],
                 [ 0.31532824],
                 [ 0.01192369],
                 [-0.36912638],
                 [-1.63426113],
                 [-0.93169814],
                 [ 0.32222399],
                 [-1.61092973],
                 [ 0.77209264],
                 [-0.34038994]])
523

524
        """
525 526 527
        # global setting in dygraph
        # NOTE(chenweihang): nn.Layer also can be used in static mode,
        # but _dygraph_tracer() can not be called in static mode
姜永久 已提交
528
        if in_dygraph_mode():
529
            framework._dygraph_tracer().eval_mode()
530 531 532
        # Layer-level setting
        self.training = False
        for layer in self.sublayers():
533
            layer.training = False
M
minqiyang 已提交
534

L
LielinJiang 已提交
535 536
    def apply(self, fn):
        """
U
ustiniankw 已提交
537

L
LielinJiang 已提交
538 539 540 541 542 543 544
        Applies ``fn`` recursively to every sublayer (as returned by ``.sublayers()``)
        as well as self. Typical use includes initializing the parameters of a model.

        Parameters:
            fn (function): a function to be applied to each sublayer

        Returns:
U
ustiniankw 已提交
545
            Layer, self
L
LielinJiang 已提交
546 547 548 549

        Example::
            .. code-block:: python

550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584
                >>> import paddle
                >>> import paddle.nn as nn
                >>> paddle.seed(2023)

                >>> net = nn.Sequential(nn.Linear(2, 2), nn.Linear(2, 2))

                >>> def init_weights(layer):
                ...     if type(layer) == nn.Linear:
                ...         print('before init weight:', layer.weight.numpy())
                ...         new_weight = paddle.full(shape=layer.weight.shape, dtype=layer.weight.dtype, fill_value=0.9)
                ...         layer.weight.set_value(new_weight)
                ...         print('after init weight:', layer.weight.numpy())
                ...
                >>> net.apply(init_weights)

                >>> print(net.state_dict())
                before init weight: [[ 0.89611185  0.04935038]
                                     [-0.5888344   0.99266374]]
                after init weight: [[0.9 0.9]
                                    [0.9 0.9]]
                before init weight: [[-0.18615901 -0.22924072]
                                     [ 1.1517721   0.59859073]]
                after init weight: [[0.9 0.9]
                                    [0.9 0.9]]
                OrderedDict([('0.weight', Parameter containing:
                Tensor(shape=[2, 2], dtype=float32, place=Place(cpu), stop_gradient=False,
                [[0.89999998, 0.89999998],
                 [0.89999998, 0.89999998]])), ('0.bias', Parameter containing:
                Tensor(shape=[2], dtype=float32, place=Place(cpu), stop_gradient=False,
                [0., 0.])), ('1.weight', Parameter containing:
                Tensor(shape=[2, 2], dtype=float32, place=Place(cpu), stop_gradient=False,
                [[0.89999998, 0.89999998],
                 [0.89999998, 0.89999998]])), ('1.bias', Parameter containing:
                Tensor(shape=[2], dtype=float32, place=Place(cpu), stop_gradient=False,
                [0., 0.]))])
L
LielinJiang 已提交
585
        """
586
        for layer in self.children():
L
LielinJiang 已提交
587 588 589 590 591 592
            layer.apply(fn)

        fn(self)

        return self

X
Xin Pan 已提交
593
    def full_name(self):
U
ustiniankw 已提交
594 595 596
        """

        Full name for this layer, composed by name_scope + "/" + MyLayer.__class__.__name__
X
Xin Pan 已提交
597

598
        Returns:
U
ustiniankw 已提交
599
            str, full name of this layer.
600 601 602 603

        Example::
            .. code-block:: python

604
                >>> import paddle
605

606 607 608 609 610 611 612 613 614 615 616
                >>> class LinearNet(paddle.nn.Layer):
                ...     def __init__(self):
                ...         super().__init__(name_scope = "demo_linear_net")
                ...         self._linear = paddle.nn.Linear(1, 1)
                ...
                ...     def forward(self, x):
                ...         return self._linear(x)
                ...
                >>> linear_net = LinearNet()
                >>> print(linear_net.full_name())
                demo_linear_net_0
617

X
Xin Pan 已提交
618 619 620
        """
        return self._full_name

621
    def register_forward_post_hook(self, hook):
U
ustiniankw 已提交
622 623 624
        """

        Register a forward post-hook for Layer. The hook will be called after `forward` function has been computed.
625 626 627

        It should have the following form, `input` and `output` of the `hook` is `input` and `output` of the `Layer` respectively.
        User can use forward post-hook to change the output of the Layer or perform information statistics tasks on the Layer.
628

629 630 631 632 633 634
        hook(Layer, input, output) -> None or modified output

        Parameters:
            hook(function): a function registered as a forward post-hook

        Returns:
U
ustiniankw 已提交
635
            HookRemoveHelper, a HookRemoveHelper object that can be used to remove the added hook by calling `hook_remove_helper.remove()` .
636 637 638 639

        Examples:
            .. code-block:: python

640 641
                >>> import paddle
                >>> import numpy as np
642

643 644 645 646 647 648 649 650
                >>> # the forward_post_hook change the output of the layer: output = output * 2
                >>> def forward_post_hook(layer, input, output):
                ...     # user can use layer, input and output for information statistis tasks
                ...
                ...     # change the output
                ...     return output * 2
                ...
                >>> linear = paddle.nn.Linear(13, 5)
651

652 653
                >>> # register the hook
                >>> forward_post_hook_handle = linear.register_forward_post_hook(forward_post_hook)
654

655 656
                >>> value1 = np.arange(26).reshape(2, 13).astype("float32")
                >>> in1 = paddle.to_tensor(value1)
657

658
                >>> out0 = linear(in1)
659

660 661
                >>> # remove the hook
                >>> forward_post_hook_handle.remove()
662

663
                >>> out1 = linear(in1)
664

665 666
                >>> # hook change the linear's output to output * 2, so out0 is equal to out1 * 2.
                >>> assert (out0.numpy() == (out1.numpy()) * 2).any()
U
ustiniankw 已提交
667

668 669 670 671 672 673
        """
        hook_remove_helper = HookRemoveHelper(self._forward_post_hooks)
        self._forward_post_hooks[hook_remove_helper._hook_id] = hook
        return hook_remove_helper

    def register_forward_pre_hook(self, hook):
U
ustiniankw 已提交
674 675 676
        """

        Register a forward pre-hook for Layer. The hook will be called before `forward` function has been computed.
677

678
        It should have the following form, `input` of the `hook` is `input` of the `Layer`,
679
        hook can either return a tuple or a single modified value in the hook. We will wrap the value into a tuple if
680 681 682 683 684 685 686 687 688
        a single value is returned(unless that value is already a tuple).
        User can use forward pre-hook to change the input of the Layer or perform information statistics tasks on the Layer.

        hook(Layer, input) -> None or modified input

        Parameters:
            hook(function): a function registered as a forward pre-hook

        Returns:
U
ustiniankw 已提交
689
            HookRemoveHelper, a HookRemoveHelper object that can be used to remove the added hook by calling `hook_remove_helper.remove()` .
690 691 692 693

        Examples:
            .. code-block:: python

694 695
                >>> import paddle
                >>> import numpy as np
696

697 698 699 700 701 702 703 704 705
                >>> # the forward_pre_hook change the input of the layer: input = input * 2
                >>> def forward_pre_hook(layer, input):
                ...     # user can use layer and input for information statistis tasks
                ...
                ...     # change the input
                ...     input_return = (input[0] * 2)
                ...     return input_return
                ...
                >>> linear = paddle.nn.Linear(13, 5)
706

707 708
                >>> # register the hook
                >>> forward_pre_hook_handle = linear.register_forward_pre_hook(forward_pre_hook)
709

710 711 712
                >>> value0 = np.arange(26).reshape(2, 13).astype("float32")
                >>> in0 = paddle.to_tensor(value0)
                >>> out0 = linear(in0)
713

714 715
                >>> # remove the hook
                >>> forward_pre_hook_handle.remove()
716

717 718 719
                >>> value1 = value0 * 2
                >>> in1 = paddle.to_tensor(value1)
                >>> out1 = linear(in1)
720

721 722
                >>> # hook change the linear's input to input * 2, so out0 is equal to out1.
                >>> assert (out0.numpy() == out1.numpy()).any()
723 724 725 726 727
        """
        hook_remove_helper = HookRemoveHelper(self._forward_pre_hooks)
        self._forward_pre_hooks[hook_remove_helper._hook_id] = hook
        return hook_remove_helper

728 729 730 731 732 733 734 735
    def create_parameter(
        self,
        shape,
        attr=None,
        dtype=None,
        is_bias=False,
        default_initializer=None,
    ):
736
        """Create parameters for this layer.
737

738
        Parameters:
739
            shape(list): Shape of the parameter.
740 741
            attr(ParamAttr, optional): Parameter attribute of weight. Please refer to :ref:`api_paddle_ParamAttr`. Default: None.
            dtype(str, optional): Data type of this parameter.
742
                If set str, it can be "bool",  "float16", "float32", "float64",
743 744
                "int8", "int16", "int32", "int64", "uint8" or "uint16". Default: "float32".
            is_bias(bool, optional): if this is a bias parameter. Default: False.
745
            default_initializer(Initializer, optional): the default initializer for this parameter.
746
                If set None, default initializer will be set to paddle.nn.initializer.Xavier and paddle.nn.initializer.Constant
747
                for non-bias and bias parameter, respectively. Default: None.
748

749
        Returns:
750 751 752 753 754
            :Tensor, created parameter.

        Examples:
            .. code-block:: python

755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779
                >>> import paddle
                >>> paddle.seed(2023)

                >>> class MyLayer(paddle.nn.Layer):
                ...     def __init__(self):
                ...         super().__init__()
                ...         self._linear = paddle.nn.Linear(1, 1)
                ...         w_tmp = self.create_parameter([1,1])
                ...         self.add_parameter("w_tmp", w_tmp)
                ...
                ...     def forward(self, input):
                ...         return self._linear(input)
                ...
                >>> mylayer = MyLayer()
                >>> for name, param in mylayer.named_parameters():
                ...     print(name, param)      # will print w_tmp,_linear.weight,_linear.bias
                w_tmp Parameter containing:
                Tensor(shape=[1, 1], dtype=float32, place=Place(cpu), stop_gradient=False,
                [[0.06979191]])
                _linear.weight Parameter containing:
                Tensor(shape=[1, 1], dtype=float32, place=Place(cpu), stop_gradient=False,
                [[1.26729357]])
                _linear.bias Parameter containing:
                Tensor(shape=[1], dtype=float32, place=Place(cpu), stop_gradient=False,
                [0.])
780
        """
H
hong 已提交
781
        temp_attr = copy.deepcopy(attr)
782
        if isinstance(temp_attr, str) and temp_attr == "":
H
hong 已提交
783
            temp_attr = None
784 785 786 787 788 789 790 791 792
        return self._helper.create_parameter(
            temp_attr, shape, dtype, is_bias, default_initializer
        )

    @deprecated(
        since="2.0.0",
        update_to="paddle.nn.Layer.create_tensor",
        reason="New api in create_tensor, easier to use.",
    )
793
    def create_variable(self, name=None, persistable=None, dtype=None):
W
wanghuancoder 已提交
794 795 796
        """

        Create Tensor for this layer.
797

798
        Parameters:
W
wanghuancoder 已提交
799 800 801 802 803 804 805 806 807 808 809 810
            name(str, optional): name of the tensor. Please refer to :ref:`api_guide_Name` . Default: None

            persistable(bool, optional): if set this tensor persistable. Default: False

            dtype(str, optional): data type of this parameter. If set str, it can be "bool", "float16", "float32", "float64","int8", "int16", "int32", "int64", "uint8" or "uint16". If set None, it will be "float32". Default: None

        Returns:
            Tensor, created Tensor.

        Examples:
            .. code-block:: python

811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826
                >>> import paddle

                >>> class MyLinear(paddle.nn.Layer):
                ...     def __init__(self,
                ...                 in_features,
                ...                 out_features):
                ...         super().__init__()
                ...         self.linear = paddle.nn.Linear( 10, 10)
                ...
                ...         self.back_var = self.create_variable(name = "linear_tmp_0", dtype=self._dtype)
                ...
                ...     def forward(self, input):
                ...         out = self.linear(input)
                ...         paddle.assign( out, self.back_var)
                ...
                ...         return out
W
wanghuancoder 已提交
827 828 829 830 831

        """
        if name is not None:
            var_name = ".".join([self._full_name, name])
        else:
832 833 834
            var_name = unique_name.generate(
                ".".join([self._full_name, "_generated_var"])
            )
W
wanghuancoder 已提交
835 836 837 838 839

        return self._helper.main_program.current_block().create_var(
            name=var_name,
            persistable=persistable,
            dtype=dtype,
840 841
            type=core.VarDesc.VarType.LOD_TENSOR,
        )
W
wanghuancoder 已提交
842 843 844 845 846 847 848 849 850 851

    # TODO: Add more parameter list when we need them
    def create_tensor(self, name=None, persistable=None, dtype=None):
        """

        Create Tensor for this layer.

        Parameters:
            name(str, optional): name of the tensor. Please refer to :ref:`api_guide_Name` . Default: None
            persistable(bool, optional): if set this tensor persistable. Default: False
852
            dtype(str, optional): data type of this parameter.
853 854
                If set str, it can be "bool",  "float16", "float32", "float64",
                "int8", "int16", "int32", "int64", "uint8" or "uint16".
855
                If set None, it will be "float32". Default: None
856

857
        Returns:
W
wanghuancoder 已提交
858
            Tensor, created Tensor.
859 860 861 862

        Examples:
            .. code-block:: python

863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878
                >>> import paddle

                >>> class MyLinear(paddle.nn.Layer):
                ...     def __init__(self,
                ...                  in_features,
                ...                  out_features):
                ...         super().__init__()
                ...         self.linear = paddle.nn.Linear(10, 10)
                ...
                ...         self.back_var = self.create_tensor(name = "linear_tmp_0", dtype=self._dtype)
                ...
                ...     def forward(self, input):
                ...         out = self.linear(input)
                ...         paddle.assign(out, self.back_var)
                ...
                ...         return out
879

880 881 882 883
        """
        if name is not None:
            var_name = ".".join([self._full_name, name])
        else:
884 885 886
            var_name = unique_name.generate(
                ".".join([self._full_name, "_generated_var"])
            )
887 888

        return self._helper.main_program.current_block().create_var(
889 890 891
            name=var_name,
            persistable=persistable,
            dtype=dtype,
892 893
            type=core.VarDesc.VarType.LOD_TENSOR,
        )
894

X
polish  
Xin Pan 已提交
895
    def parameters(self, include_sublayers=True):
U
ustiniankw 已提交
896 897 898
        """

        Returns a list of all Parameters from current layer and its sub-layers.
X
Xin Pan 已提交
899

900
        Returns:
U
ustiniankw 已提交
901
            list of Tensor, a list of Parameters.
902 903 904 905

        Examples:
            .. code-block:: python

906 907
                >>> import paddle
                >>> paddle.seed(100)
908

909 910 911 912 913 914 915
                >>> linear = paddle.nn.Linear(1, 1)
                >>> print(linear.parameters())
                [Parameter containing:
                Tensor(shape=[1, 1], dtype=float32, place=Place(cpu), stop_gradient=False,
                [[0.18551230]]), Parameter containing:
                Tensor(shape=[1], dtype=float32, place=Place(cpu), stop_gradient=False,
                [0.])]
916

X
Xin Pan 已提交
917
        """
918
        ret = [
919 920 921 922
            param
            for _, param in self.named_parameters(
                include_sublayers=include_sublayers
            )
923
        ]
X
polish  
Xin Pan 已提交
924
        return ret
X
Xin Pan 已提交
925

926
    def children(self):
U
ustiniankw 已提交
927 928 929
        """

        Returns an iterator over immediate children layers.
930 931 932 933 934 935 936

        Yields:
            Layer: a child layer

        Examples:
            .. code-block:: python

937
                >>> import paddle
938

939 940 941
                >>> linear1 = paddle.nn.Linear(10, 3)
                >>> linear2 = paddle.nn.Linear(3, 10, bias_attr=False)
                >>> model = paddle.nn.Sequential(linear1, linear2)
942

943
                >>> layer_list = list(model.children())
944

945 946
                >>> print(layer_list)
                [Linear(in_features=10, out_features=3, dtype=float32), Linear(in_features=3, out_features=10, dtype=float32)]
947 948 949 950 951 952 953 954 955 956 957 958 959 960 961

        """
        for _, layer in self.named_children():
            yield layer

    def named_children(self):
        """Returns an iterator over immediate children layers, yielding both
        the name of the layer as well as the layer itself.

        Yields:
            (string, Layer): Tuple containing a name and child layer

        Examples:
            .. code-block:: python

962
                >>> import paddle
963

964 965 966 967 968 969 970
                >>> linear1 = paddle.nn.Linear(10, 3)
                >>> linear2 = paddle.nn.Linear(3, 10, bias_attr=False)
                >>> model = paddle.nn.Sequential(linear1, linear2)
                >>> for prefix, layer in model.named_children():
                ...     print(prefix, layer)
                0 Linear(in_features=10, out_features=3, dtype=float32)
                1 Linear(in_features=3, out_features=10, dtype=float32)
971 972 973 974 975 976 977
        """
        memo = set()
        for name, layer in self._sub_layers.items():
            if layer is not None and layer not in memo:
                memo.add(layer)
                yield name, layer

J
Jiabin Yang 已提交
978
    def sublayers(self, include_self=False):
U
ustiniankw 已提交
979 980 981
        """

        Returns a list of sub layers.
X
Xin Pan 已提交
982

983
        Parameters:
J
Jiabin Yang 已提交
984
            include_self(bool, optional): Whether return self as sublayers. Default: False
X
Xin Pan 已提交
985

986
        Returns:
U
ustiniankw 已提交
987
            list of Layer, a list of sub layers.
988 989 990 991

        Examples:
            .. code-block:: python

992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007
                >>> import paddle

                >>> class MyLayer(paddle.nn.Layer):
                ...     def __init__(self):
                ...         super().__init__()
                ...         self._linear = paddle.nn.Linear(1, 1)
                ...         self._dropout = paddle.nn.Dropout(p=0.5)
                ...
                ...     def forward(self, input):
                ...         temp = self._linear(input)
                ...         temp = self._dropout(temp)
                ...         return temp
                ...
                >>> mylayer = MyLayer()
                >>> print(mylayer.sublayers())
                [Linear(in_features=1, out_features=1, dtype=float32), Dropout(p=0.5, axis=None, mode=upscale_in_train)]
1008

X
Xin Pan 已提交
1009
        """
1010 1011
        ret = [
            layer
J
Jiabin Yang 已提交
1012
            for _, layer in self.named_sublayers(include_self=include_self)
1013
        ]
X
Xin Pan 已提交
1014 1015
        return ret

1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030
    def named_parameters(self, prefix='', include_sublayers=True):
        """
        Returns an iterator over all parameters in the Layer, yielding tuple of name and parameter.

        Parameters:
            prefix(str, optional): Prefix to prepend to all parameter names. Default: ''.
            include_sublayers(bool, optional): Whether include the parameters of sublayers.
                If True, also include the named parameters from sublayers. Default: True.

        Yields:
            (string, Parameter): Tuple of name and Parameter

        Examples:
            .. code-block:: python

1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061
                >>> import paddle
                >>> paddle.seed(100)

                >>> fc1 = paddle.nn.Linear(10, 3)
                >>> fc2 = paddle.nn.Linear(3, 10, bias_attr=False)
                >>> model = paddle.nn.Sequential(fc1, fc2)
                >>> for name, param in model.named_parameters():
                ...     print(name, param)
                0.weight Parameter containing:
                Tensor(shape=[10, 3], dtype=float32, place=Place(cpu), stop_gradient=False,
                [[ 0.07276392, -0.39791510, -0.66356444],
                 [ 0.02143478, -0.18519843, -0.32485050],
                 [-0.42249614,  0.08450919, -0.66838276],
                 [ 0.38208580, -0.24303678,  0.55127048],
                 [ 0.47745085,  0.62117910, -0.08336520],
                 [-0.28653207,  0.47237599, -0.05868882],
                 [-0.14385653,  0.29945642,  0.12832761],
                 [-0.21237159,  0.38539791, -0.62760031],
                 [ 0.02637231,  0.20621127,  0.43255770],
                 [-0.19984481, -0.26259184, -0.29696006]])
                0.bias Parameter containing:
                Tensor(shape=[3], dtype=float32, place=Place(cpu), stop_gradient=False,
                [0., 0., 0.])
                1.weight Parameter containing:
                Tensor(shape=[3, 10], dtype=float32, place=Place(cpu), stop_gradient=False,
                [[ 0.01985580, -0.40268910,  0.41172385, -0.47249708, -0.09002256,
                 -0.00533628, -0.52048630,  0.62360322,  0.20848787, -0.02033746],
                 [ 0.58281910,  0.12841827,  0.12907702,  0.02325618, -0.07746267,
                 0.31950659, -0.37924835, -0.59209681, -0.11732036, -0.58378261],
                 [-0.62100595,  0.22293305,  0.28229684, -0.03687060, -0.59323978,
                 0.08411229,  0.53275704,  0.40431368,  0.03171402, -0.17922515]])
1062 1063
        """
        params_set = set()
1064 1065 1066 1067 1068
        named_sublayers = (
            self.named_sublayers(prefix=prefix, include_self=True)
            if include_sublayers
            else zip([prefix], [self])
        )
1069 1070 1071 1072 1073 1074 1075 1076 1077
        for layer_prefix, sublayer in named_sublayers:
            params = sublayer._parameters.items()
            for key, param in params:
                if param is None or param in params_set:
                    continue
                params_set.add(param)
                name = layer_prefix + ('.' if layer_prefix else '') + key
                yield name, param

J
Jiabin Yang 已提交
1078
    def named_sublayers(self, prefix='', include_self=False, layers_set=None):
1079 1080 1081 1082 1083 1084 1085
        """
        Returns an iterator over all sublayers in the Layer, yielding tuple of name and sublayer.
        The duplicate sublayer will only be yielded once.

        Parameters:
            prefix(str, optional): Prefix to prepend to all parameter names. Default: ''.
            include_self(bool, optional): Whether include the Layer itself. Default: False.
1086
            layers_set(set, optional): The set to record duplicate sublayers. Default: None.
1087 1088 1089 1090 1091 1092 1093

        Yields:
            (string, Layer): Tuple of name and Layer

        Examples:
            .. code-block:: python

1094
                >>> import paddle
1095

1096 1097 1098 1099 1100 1101 1102
                >>> fc1 = paddle.nn.Linear(10, 3)
                >>> fc2 = paddle.nn.Linear(3, 10, bias_attr=False)
                >>> model = paddle.nn.Sequential(fc1, fc2)
                >>> for prefix, layer in model.named_sublayers():
                ...     print(prefix, layer)
                0 Linear(in_features=10, out_features=3, dtype=float32)
                1 Linear(in_features=3, out_features=10, dtype=float32)
1103 1104 1105 1106 1107 1108
        """
        if layers_set is None:
            layers_set = set()
        if include_self and self not in layers_set:
            layers_set.add(self)
            yield prefix, self
J
Jiabin Yang 已提交
1109 1110 1111 1112
        for key, layer in self._sub_layers.items():
            if layer is None:
                continue
            layer_prefix = prefix + ('.' if prefix else '') + key
1113 1114 1115
            for p, l in layer.named_sublayers(
                prefix=layer_prefix, include_self=True, layers_set=layers_set
            ):
J
Jiabin Yang 已提交
1116
                yield p, l
1117

1118
    def register_buffer(self, name, tensor, persistable=True):
1119
        """
1120
        Registers a tensor as buffer into the layer.
1121

1122
        `buffer` is a non-trainable tensor and will not be updated by optimizer,
1123 1124 1125 1126 1127 1128 1129 1130 1131 1132
        but is necessary for evaluation and inference. For example, the mean and variance in BatchNorm layers.
        The registered buffer is persistable by default, and will be saved into
        `state_dict` alongside parameters. If set persistable=False, it registers
        a non-persistable buffer, so that it will not be a part of `state_dict` .

        Buffers can be accessed as attributes using given names.

        Parameters:
            name (string): name of the buffer. The buffer can be accessed
                from this layer using the given name
1133
            tensor (Tensor): the tensor to be registered as buffer.
1134 1135 1136 1137 1138
            persistable (bool): whether the buffer is part of this layer's
                state_dict.

        Returns:
            None
1139

1140 1141 1142
        Examples:
            .. code-block:: python

1143 1144
                >>> import numpy as np
                >>> import paddle
1145

1146 1147 1148 1149
                >>> linear = paddle.nn.Linear(10, 3)
                >>> value = np.array([0]).astype("float32")
                >>> buffer = paddle.to_tensor(value)
                >>> linear.register_buffer("buf_name", buffer, persistable=True)
1150

1151 1152 1153 1154
                >>> # get the buffer by attribute.
                >>> print(linear.buf_name)
                Tensor(shape=[1], dtype=float32, place=Place(cpu), stop_gradient=True,
                [0.])
1155 1156 1157 1158

        """

        if '_buffers' not in self.__dict__:
1159
            raise ValueError("super().__init__() should be called first")
1160
        elif not isinstance(name, str):
1161
            raise TypeError(
1162 1163 1164 1165
                "The name of buffer should be a string, but received {}.".format(
                    type(name).__name__
                )
            )
1166
        elif '.' in name:
1167 1168 1169
            raise KeyError(
                "The name of buffer can not contain `.`, "
                "because when you access the newly added buffer in the "
1170 1171
                "form of `self.**.**`, it will cause AttributeError."
            )
1172 1173 1174
        elif name == '':
            raise KeyError("The name of buffer can not be empty.")
        elif hasattr(self, name) and name not in self._buffers:
1175
            raise KeyError(f"attribute '{name}' already exists.")
W
wanghuancoder 已提交
1176
        elif tensor is not None and not (type(tensor) == core.eager.Tensor):
1177
            raise TypeError(
1178 1179 1180 1181
                "The registered buffer should be a Paddle.Tensor, but received {}.".format(
                    type(tensor).__name__
                )
            )
1182
        else:
1183
            self._buffers[name] = tensor
1184 1185 1186 1187 1188 1189 1190
            if persistable:
                self._non_persistable_buffer_names_set.discard(name)
            else:
                self._non_persistable_buffer_names_set.add(name)

    def buffers(self, include_sublayers=True):
        """
U
ustiniankw 已提交
1191

1192 1193 1194 1195 1196 1197
        Returns a list of all buffers from current layer and its sub-layers.

        Parameters:
            include_sublayers(bool, optional): Whether include the buffers of sublayers. If True, also include the buffers from sublayers. Default: True

        Returns:
U
ustiniankw 已提交
1198
            list of Tensor, a list of buffers.
1199 1200 1201 1202

        Examples:
            .. code-block:: python

1203 1204
                >>> import numpy as np
                >>> import paddle
1205

1206 1207 1208 1209
                >>> linear = paddle.nn.Linear(10, 3)
                >>> value = np.array([0]).astype("float32")
                >>> buffer = paddle.to_tensor(value)
                >>> linear.register_buffer("buf_name", buffer, persistable=True)
1210

1211 1212 1213
                >>> print(linear.buffers())
                [Tensor(shape=[1], dtype=float32, place=Place(cpu), stop_gradient=True,
                [0.])]
1214

1215 1216
        """
        ret = [
1217 1218 1219 1220
            buffer
            for _, buffer in self.named_buffers(
                include_sublayers=include_sublayers
            )
1221 1222 1223 1224 1225
        ]
        return ret

    def named_buffers(self, prefix='', include_sublayers=True):
        """
1226
        Returns an iterator over all buffers in the Layer, yielding tuple of name and Tensor.
1227 1228 1229 1230 1231 1232 1233

        Parameters:
            prefix(str, optional): Prefix to prepend to all buffer names. Default: ''.
            include_sublayers(bool, optional): Whether include the buffers of sublayers.
                If True, also include the named buffers from sublayers. Default: True.

        Yields:
1234
            (string, Tensor): Tuple of name and tensor
1235 1236 1237 1238

        Examples:
            .. code-block:: python

1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261
                >>> import numpy as np
                >>> import paddle

                >>> fc1 = paddle.nn.Linear(10, 3)
                >>> buffer1 = paddle.to_tensor(np.array([0]).astype("float32"))
                >>> # register a tensor as buffer by specific `persistable`
                >>> fc1.register_buffer("buf_name_1", buffer1, persistable=True)

                >>> fc2 = paddle.nn.Linear(3, 10)
                >>> buffer2 = paddle.to_tensor(np.array([1]).astype("float32"))
                >>> # register a buffer by assigning an attribute with Tensor.
                >>> # The `persistable` can only be False by this way.
                >>> fc2.buf_name_2 = buffer2

                >>> model = paddle.nn.Sequential(fc1, fc2)

                >>> # get all named buffers
                >>> for name, buffer in model.named_buffers():
                ...     print(name, buffer)
                0.buf_name_1 Tensor(shape=[1], dtype=float32, place=Place(cpu), stop_gradient=True,
                [0.])
                1.buf_name_2 Tensor(shape=[1], dtype=float32, place=Place(cpu), stop_gradient=True,
                [1.])
1262 1263
        """
        buffers_set = set()
1264 1265 1266 1267 1268
        named_sublayers = (
            self.named_sublayers(prefix=prefix, include_self=True)
            if include_sublayers
            else zip([prefix], [self])
        )
1269 1270 1271 1272 1273 1274 1275 1276 1277
        for layer_prefix, sublayer in named_sublayers:
            buffers = sublayer._buffers.items()
            for key, buffer in buffers:
                if buffer is None or buffer in buffers_set:
                    continue
                buffers_set.add(buffer)
                name = layer_prefix + ('.' if layer_prefix else '') + key
                yield name, buffer

X
Xin Pan 已提交
1278
    def clear_gradients(self):
1279 1280
        """
        Clear the gradients of all parameters for this layer.
1281

1282 1283
        Returns:
            None
1284

1285 1286 1287
        Examples:
            .. code-block:: python

1288 1289
                >>> import paddle
                >>> import numpy as np
1290

1291 1292 1293 1294 1295 1296 1297 1298 1299
                >>> value = np.arange(26).reshape(2, 13).astype("float32")
                >>> a = paddle.to_tensor(value)
                >>> linear = paddle.nn.Linear(13, 5)
                >>> adam = paddle.optimizer.Adam(learning_rate=0.01,
                ...                              parameters=linear.parameters())
                >>> out = linear(a)
                >>> out.backward()
                >>> adam.step()
                >>> linear.clear_gradients()
1300 1301

        """
X
Xin Pan 已提交
1302
        for p in self.parameters():
1303 1304
            if p.trainable:
                p.clear_gradient()
X
Xin Pan 已提交
1305

1306
    def _build_once(self, *args, **kwargs):
1307 1308
        pass

1309 1310 1311 1312 1313
    def _dygraph_call_func(self, *inputs, **kwargs):
        for forward_pre_hook in self._forward_pre_hooks.values():
            hook_result = forward_pre_hook(self, inputs)
            if hook_result is not None:
                if not isinstance(hook_result, tuple):
1314
                    hook_result = (hook_result,)
1315 1316 1317 1318 1319 1320 1321 1322
                inputs = hook_result

        if not self._built:
            with program_desc_tracing_guard(False):
                self._build_once(*inputs, **kwargs)

            self._built = True

1323
        if in_profiler_mode():
1324 1325 1326
            with profiler.RecordEvent(
                self.__class__.__name__, profiler.TracerEventType.Forward
            ):
1327 1328
                outputs = self.forward(*inputs, **kwargs)
        else:
C
chenjian 已提交
1329
            outputs = self.forward(*inputs, **kwargs)
1330 1331 1332 1333 1334 1335 1336 1337

        for forward_post_hook in self._forward_post_hooks.values():
            hook_result = forward_post_hook(self, inputs, outputs)
            if hook_result is not None:
                outputs = hook_result

        return outputs

1338
    def __call__(self, *inputs, **kwargs):
1339
        if (
1340
            (not in_to_static_mode())
1341 1342 1343 1344 1345 1346
            and (not self._forward_pre_hooks)
            and (not self._forward_post_hooks)
            and (not self._built)
            and in_dygraph_mode()
            and (not in_profiler_mode())
        ):
1347 1348 1349 1350
            self._build_once(*inputs, **kwargs)
            return self.forward(*inputs, **kwargs)
        else:
            return self._dygraph_call_func(*inputs, **kwargs)
M
minqiyang 已提交
1351

1352
    def forward(self, *inputs, **kwargs):
1353 1354 1355 1356 1357 1358 1359 1360
        """
        Defines the computation performed at every call.
        Should be overridden by all subclasses.

        Parameters:
            *inputs(tuple): unpacked tuple arguments
            **kwargs(dict): unpacked dict arguments
        """
1361
        raise NotImplementedError
X
Xin Pan 已提交
1362 1363 1364 1365

    def backward(self, *inputs):
        raise ValueError("Layer shouldn't implement backward")

X
Xin Pan 已提交
1366
    def add_sublayer(self, name, sublayer):
U
ustiniankw 已提交
1367 1368 1369
        """

        Adds a sub Layer instance.
X
Xin Pan 已提交
1370

1371
        Added sublayer can be accessed by self.name
X
Xin Pan 已提交
1372

1373 1374 1375
        Parameters:
            name(str): name of this sublayer.
            sublayer(Layer): an instance of Layer.
X
Xin Pan 已提交
1376
        Returns:
U
ustiniankw 已提交
1377
            Layer, the sublayer passed in.
1378

1379 1380 1381
        Examples:
            .. code-block:: python

1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405
                >>> import paddle

                >>> class MySequential(paddle.nn.Layer):
                ...     def __init__(self, *layers):
                ...         super().__init__()
                ...         if len(layers) > 0 and isinstance(layers[0], tuple):
                ...             for name, layer in layers:
                ...                 self.add_sublayer(name, layer)
                ...         else:
                ...             for idx, layer in enumerate(layers):
                ...                 self.add_sublayer(str(idx), layer)
                ...
                ...     def forward(self, input):
                ...         for layer in self._sub_layers.values():
                ...             input = layer(input)
                ...         return input
                ...
                >>> fc1 = paddle.nn.Linear(10, 3)
                >>> fc2 = paddle.nn.Linear(3, 10, bias_attr=False)
                >>> model = MySequential(fc1, fc2)
                >>> for prefix, layer in model.named_sublayers():
                ...     print(prefix, layer)
                0 Linear(in_features=10, out_features=3, dtype=float32)
                1 Linear(in_features=3, out_features=10, dtype=float32)
X
Xin Pan 已提交
1406
        """
1407
        assert isinstance(sublayer, Layer) or sublayer is None
1408

X
Xin Pan 已提交
1409 1410 1411 1412 1413 1414
        self._sub_layers[name] = sublayer
        return sublayer

    def add_parameter(self, name, parameter):
        """Adds a Parameter instance.

1415
        Added parameter can be accessed by self.name
X
Xin Pan 已提交
1416

1417 1418 1419
        Parameters:
            name(str): name of this sublayer.
            parameter(Parameter): an instance of Parameter.
X
Xin Pan 已提交
1420
        Returns:
U
ustiniankw 已提交
1421
            Parameter, the parameter passed in.
1422 1423 1424
        Examples:
            .. code-block:: python

1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449
                >>> import paddle
                >>> paddle.seed(100)

                >>> class MyLayer(paddle.nn.Layer):
                ...     def __init__(self):
                ...         super().__init__()
                ...         self._linear = paddle.nn.Linear(1, 1)
                ...         w_tmp = self.create_parameter([1,1])
                ...         self.add_parameter("w_tmp", w_tmp)
                ...
                ...     def forward(self, input):
                ...         return self._linear(input)
                ...
                >>> mylayer = MyLayer()
                >>> for name, param in mylayer.named_parameters():
                ...     print(name, param)
                w_tmp Parameter containing:
                Tensor(shape=[1, 1], dtype=float32, place=Place(cpu), stop_gradient=False,
                [[-1.01448846]])
                _linear.weight Parameter containing:
                Tensor(shape=[1, 1], dtype=float32, place=Place(cpu), stop_gradient=False,
                [[0.18551230]])
                _linear.bias Parameter containing:
                Tensor(shape=[1], dtype=float32, place=Place(cpu), stop_gradient=False,
                [0.])
X
Xin Pan 已提交
1450
        """
1451
        if '_parameters' not in self.__dict__:
1452
            raise RuntimeError("super().__init__() should be called firstly.")
1453
        elif not isinstance(name, str):
1454
            raise TypeError(
1455 1456 1457 1458
                "The name of parameter should be a string, but received {}.".format(
                    type(name).__name__
                )
            )
1459 1460 1461 1462
        elif '.' in name:
            raise KeyError(
                "The name of parameter can not contain `.`, "
                "because when you access the newly added parameter in the "
1463 1464
                "form of `self.**.**`, it will cause AttributeError."
            )
1465 1466 1467
        elif name == '':
            raise KeyError("The name of parameter can not be empty.")
        elif hasattr(self, name) and name not in self._parameters:
1468
            raise KeyError(f"The parameter '{name}' already exists.")
1469 1470 1471
        elif parameter is not None and not isinstance(
            parameter, framework.Parameter
        ):
1472
            raise TypeError(
1473 1474 1475 1476
                "The parameter to be added should be a Parameter, but received {}.".format(
                    type(parameter).__name__
                )
            )
1477 1478 1479
        else:
            if parameter is None:
                self._parameters[name] = None
1480

1481
            if len(self._loaddict_holder) > 0:
1482 1483 1484 1485 1486
                assert (
                    parameter.name in self._loaddict_holder
                ), "Parameter not found, Can't not find [ {} ] in state_dict".format(
                    parameter.name
                )
H
hong 已提交
1487

1488
                parameter.set_value(self._loaddict_holder[parameter.name])
1489

1490
            self._parameters[name] = parameter
X
Xin Pan 已提交
1491 1492
        return parameter

1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504
    def _set_op_attrs(self, attrs):
        """
        Add customized attribute while append_op. In case of quantization, we want to save
        some attributes into op_desc while exporting inference model by @to_static.

        Arguments:
            attrs(dict): customized attributes that will be added into op_descs.

        NOTE: The interface is only exposed to developers.
        """

        def is_already_registered(is_pre_hook):
1505 1506 1507 1508 1509 1510 1511 1512 1513 1514
            layers_hooks = (
                self._forward_pre_hooks
                if is_pre_hook
                else self._forward_post_hooks
            )
            candidate_hook = (
                record_program_ops_pre_hook
                if is_pre_hook
                else set_op_customized_attrs_post_hook
            )
1515 1516 1517 1518

            already_registed = False
            if layers_hooks:
                last_key = next(reversed(layers_hooks))
1519
                already_registed = layers_hooks[last_key] == candidate_hook
1520 1521 1522 1523

            return already_registed

        if not isinstance(attrs, dict):
1524 1525
            raise TypeError(
                "attrs should be type(dict), but received {}".format(
1526 1527 1528
                    type(attrs).__name__
                )
            )
1529 1530 1531 1532 1533 1534

        # NOTE: Overwrite behavior for same key.
        self._customized_attrs.update(attrs)

        if not is_already_registered(is_pre_hook=True):
            pre_hook_helper = self.register_forward_pre_hook(
1535 1536
                record_program_ops_pre_hook
            )
1537 1538 1539 1540 1541 1542
            assert len(self._op_recorder.hooks) == 0
            self._op_recorder.hooks = [pre_hook_helper]

        # manually register post_hook to ensure it is inserted into the head.
        if not is_already_registered(is_pre_hook=False):
            post_hook_helper = self.register_forward_post_hook(
1543 1544
                set_op_customized_attrs_post_hook
            )
1545
            if len(self._forward_post_hooks) > 1:
1546 1547 1548
                self._forward_post_hooks.move_to_end(
                    post_hook_helper._hook_id, last=False
                )
1549 1550 1551 1552 1553 1554

            assert len(self._op_recorder.hooks) == 1

            # hooks that need to be removed once we finish executing them.
            self._op_recorder.hooks.append(post_hook_helper)

1555 1556 1557 1558 1559 1560
    def __getstate__(self):
        return self.__dict__

    def __setstate__(self, state):
        self.__dict__.update(state)

X
Xin Pan 已提交
1561
    def __getattr__(self, name):
1562 1563 1564
        if '_parameters' in self.__dict__:
            _parameters = self.__dict__['_parameters']
            if name in self._parameters:
1565
                if in_to_static_mode():
1566
                    return _convert_into_variable(self._parameters[name])
1567 1568 1569 1570 1571 1572 1573 1574
                return self._parameters[name]
        if '_sub_layers' in self.__dict__:
            _sub_layers = self.__dict__['_sub_layers']
            if name in self._sub_layers:
                return self._sub_layers[name]
        if '_buffers' in self.__dict__:
            _buffers = self.__dict__['_buffers']
            if name in _buffers:
1575
                if in_to_static_mode():
1576
                    return _convert_into_variable(_buffers[name])
1577 1578
                return _buffers[name]
        return object.__getattribute__(self, name)
X
Xin Pan 已提交
1579 1580

    def __setattr__(self, name, value):
S
songyouwei 已提交
1581 1582 1583 1584 1585
        def _remove_if_exist(*dicts):
            for d in dicts:
                if name in d:
                    del d[name]

1586 1587
        if isinstance(getattr(type(self), name, None), property):
            object.__setattr__(self, name, value)
1588
        params = self.__dict__.get('_parameters', None)
X
Xin Pan 已提交
1589 1590
        if isinstance(value, framework.Parameter):
            if params is None:
1591
                raise ValueError("super().__init__() should be called first")
H
hong 已提交
1592
            if len(self._loaddict_holder) > 0:
1593 1594 1595 1596 1597
                assert (
                    value.name in self._loaddict_holder
                ), "Parameter not found, Can't not find [ {} ] in state_dict".format(
                    value.name
                )
H
hong 已提交
1598 1599 1600

                value.set_value(self._loaddict_holder[value.name])

1601
            _remove_if_exist(self.__dict__, self._buffers, self._sub_layers)
1602
            params[name] = value
1603 1604 1605
        elif params is not None and name in params:
            if value is not None:
                raise TypeError(
1606 1607 1608 1609
                    "assignment to parameter '{}' should be of type Parameter or None, but got '{}'".format(
                        name, type(value).__name__
                    )
                )
1610
            params[name] = None
X
Xin Pan 已提交
1611
        else:
1612
            layers = self.__dict__.get('_sub_layers', None)
J
Jiabin Yang 已提交
1613
            if isinstance(value, Layer):
1614 1615
                if layers is None:
                    raise ValueError(
1616
                        "super().__init__() should be called first"
1617 1618
                    )

1619
                _remove_if_exist(self.__dict__, self._parameters, self._buffers)
1620 1621 1622 1623
                layers[name] = value
            elif layers is not None and name in layers:
                if value is not None:
                    raise TypeError(
1624 1625 1626 1627
                        "assignment to sublayer '{}' should be of type Layer or None, but got '{}'".format(
                            name, type(value).__name__
                        )
                    )
1628 1629
                layers[name] = None
            else:
1630
                _buffers = self.__dict__.get('_buffers', None)
W
wanghuancoder 已提交
1631
                if isinstance(value, core.eager.Tensor):
1632 1633
                    if _buffers is None:
                        raise ValueError(
1634
                            "super().__init__() should be called first"
1635
                        )
1636 1637 1638
                    _remove_if_exist(
                        self.__dict__, self._parameters, self._sub_layers
                    )
1639 1640 1641 1642
                    # Set persistable=False by default. Only `register_buffer` can
                    # add a persistable buffer.
                    if name not in self._buffers:
                        self._non_persistable_buffer_names_set.add(name)
1643 1644
                    if not value.name:
                        value.name = unique_name.generate('_buffers_' + name)
1645 1646
                    _buffers[name] = value
                elif _buffers is not None and name in _buffers:
1647
                    # Note(Aurelius84): In Dy2stat, the value of the Buffer may be modified in
1648 1649 1650 1651
                    # decorated function, such as `self.buffer = new_tensor`. So we update its
                    # value via `assign`.
                    if type(value) == framework.Variable:
                        from paddle import assign
1652

1653 1654 1655 1656
                        # Note(zhhsplendid): the condition below happens in PaddleGan model,
                        # but should all non-Variable _buffers[name] be re-assign? We
                        # should consider it in the future. I current wrote this as
                        # conservative code.
1657
                        if in_to_static_mode() and _buffers[name] is None:
1658 1659
                            raise RuntimeError(
                                'In Dy2stat, self.{0} is a buffer and self.{0} is '
1660 1661 1662 1663 1664 1665
                                'not allowed to be set to Variable when self.{0} is None.'.format(
                                    name
                                )
                            )
                        elif (
                            _buffers[name] is None
W
wanghuancoder 已提交
1666
                            or type(getattr(self, name)) == core.eager.Tensor
1667
                        ):
1668 1669
                            _buffers[name] = assign(value)
                        else:
1670
                            assign(value, getattr(self, name))
1671
                    elif value is not None:
1672
                        raise TypeError(
W
wanghuancoder 已提交
1673
                            "assignment to buffers '{}' should be of type core.Tensor or None, but got '{}'".format(
1674 1675 1676
                                name, type(value).__name__
                            )
                        )
1677 1678 1679 1680
                    else:
                        # Assigning None will remove the buffer, but if re-assign a new varBase to it,
                        # it will be remarked as a buffer with same `persistable` attribute.
                        _buffers[name] = None
1681 1682
                else:
                    object.__setattr__(self, name, value)
X
Xin Pan 已提交
1683 1684 1685 1686 1687 1688

    def __delattr__(self, name):
        if name in self._parameters:
            del self._parameters[name]
        elif name in self._sub_layers:
            del self._sub_layers[name]
1689 1690 1691
        elif name in self._buffers:
            del self._buffers[name]
            self._non_persistable_buffer_names_set.discard(name)
X
Xin Pan 已提交
1692 1693 1694
        else:
            object.__delattr__(self, name)

1695 1696
    def __dir__(self):
        """
W
wanghuancoder 已提交
1697
        Return a list. Get all parameters, buffers(non-parameter tensors), sublayers, method and attr of Layer.
1698 1699

        Examples:
1700
            .. code-block:: python
1701 1702 1703 1704 1705 1706 1707 1708 1709 1710 1711 1712 1713 1714 1715
                >>> import paddle
                >>> import numpy as np

                >>> class Mylayer(paddle.nn.Layer):
                ...     def __init__(self):
                ...         super().__init__()
                ...         self.linear1 = paddle.nn.Linear(10, 10)
                ...         self.linear2 = paddle.nn.Linear(5, 5)
                ...         self.conv2d = paddle.nn.Conv2D(3, 2, 3)
                ...         self.embedding = paddle.nn.Embedding(128, 16)
                ...         self.h_0 = paddle.to_tensor(np.zeros([10, 10]).astype('float32'))
                ...
                >>> mylayer = Mylayer()
                >>> print(dir(mylayer))
                ['__call__', '__class__', '__delattr__', '__dict__', ..., 'training']
1716 1717 1718 1719 1720 1721 1722 1723 1724 1725 1726
        """
        method = dir(self.__class__)
        attrs = list(self.__dict__.keys())
        parameters = list(self._parameters.keys())
        sublayers = list(self._sub_layers.keys())
        buffers = list(self._buffers.keys())

        keys = method + attrs + parameters + sublayers + buffers

        return keys

1727 1728 1729 1730 1731 1732 1733 1734 1735 1736 1737 1738 1739 1740 1741 1742 1743 1744 1745 1746 1747 1748 1749 1750 1751 1752 1753 1754 1755
    def extra_repr(self):
        """
        Extra representation of this layer, you can have custom implementation
        of your own layer.
        """
        return ''

    def __repr__(self):
        extra_lines = []
        extra_repr = self.extra_repr()
        extra_lines = extra_repr.split('\n')
        sublayer_lines = []
        for name, layer in self._sub_layers.items():
            sublayer_str = repr(layer)
            sublayer_str = _addindent(sublayer_str, 2)
            sublayer_lines.append('(' + name + '): ' + sublayer_str)

        final_str = self.__class__.__name__ + '('
        if extra_lines:
            if len(extra_lines) > 1:
                final_str += '\n  ' + '\n  '.join(extra_lines) + '\n'
            elif len(extra_lines) == 1:
                final_str += extra_lines[0]
        if sublayer_lines:
            final_str += '\n  ' + '\n  '.join(sublayer_lines) + '\n'

        final_str += ')'
        return final_str

1756 1757 1758 1759 1760
    def register_state_dict_hook(self, hook):
        hook_remove_helper = HookRemoveHelper(self._state_dict_hooks)
        self._state_dict_hooks[hook_remove_helper._hook_id] = hook
        return hook_remove_helper

1761 1762 1763 1764 1765 1766
    def _obtain_parameters_buffers(
        self,
        destination=None,
        include_sublayers=True,
        structured_name_prefix="",
    ):
S
ShenLiang 已提交
1767
        """
1768
        The difference from state_dict() is that state_dict_hook will not be called,
S
ShenLiang 已提交
1769 1770 1771 1772 1773 1774 1775 1776
        but the original types of parameters and buffers will be maintained.
        """
        if destination is None:
            destination = collections.OrderedDict()
        for name, data in self._parameters.items():
            if data is not None:
                destination[structured_name_prefix + name] = data
        for name, buffer in self._buffers.items():
1777 1778 1779 1780
            if (
                buffer is not None
                and name not in self._non_persistable_buffer_names_set
            ):
S
ShenLiang 已提交
1781 1782 1783 1784 1785 1786 1787 1788
                destination[structured_name_prefix + name] = buffer

        if include_sublayers:
            for layer_name, layer_item in self._sub_layers.items():
                if layer_item is not None:
                    destination_temp = destination.copy()
                    destination_temp.update(
                        layer_item._obtain_parameters_buffers(
1789 1790 1791 1792 1793
                            destination_temp,
                            include_sublayers,
                            structured_name_prefix + layer_name + ".",
                        )
                    )
S
ShenLiang 已提交
1794 1795 1796
                    destination = destination_temp
        return destination

1797 1798 1799 1800 1801 1802 1803 1804
    def _state_dict_impl(
        self,
        destination=None,
        include_sublayers=True,
        structured_name_prefix="",
        include_non_persistable_buffer=False,
        use_hook=True,
    ):
1805 1806 1807 1808 1809 1810 1811
        """
        Get all parameters and persistable buffers of current layer and its sub-layers. And set them into a dict

        Parameters:
            destination(dict, optional) : If provide, all the parameters and persistable buffers will be set to this dict . Default: None
            include_sublayers(bool, optional) : If true, also include the parameters and persistable buffers from sublayers. Default: True
            include_non_persistable_buffer(bool, optional): If true, include non persistable buffers of current layer and its sub-layers, it is used in pure fp16 and jit.save. Default: False
1812
            use_hook(bool, optional) : If true, the operations contained in _state_dict_hooks will be appended to the destination. Default: True
1813 1814 1815 1816 1817 1818 1819 1820 1821
        """

        if destination is None:
            destination = collections.OrderedDict()
        for name, data in self._parameters.items():
            if data is not None:
                destination[structured_name_prefix + name] = data
        for name, buffer in self._buffers.items():
            if not include_non_persistable_buffer:
1822 1823 1824 1825
                if (
                    buffer is not None
                    and name not in self._non_persistable_buffer_names_set
                ):
1826 1827 1828 1829 1830 1831 1832 1833 1834 1835 1836
                    destination[structured_name_prefix + name] = buffer
            else:
                if buffer is not None:
                    destination[structured_name_prefix + name] = buffer

        if include_sublayers:
            for layer_name, layer_item in self._sub_layers.items():
                if layer_item is not None:
                    destination_temp = destination.copy()
                    destination_temp.update(
                        layer_item._state_dict_impl(
1837 1838
                            destination_temp,
                            include_sublayers,
1839
                            structured_name_prefix + layer_name + ".",
1840 1841 1842 1843
                            include_non_persistable_buffer,
                            use_hook,
                        )
                    )
1844
                    destination = destination_temp
1845 1846 1847 1848 1849
        if use_hook:
            for state_dict_hook in self._state_dict_hooks.values():
                hook_result = state_dict_hook(destination)
                if hook_result is not None:
                    destination = hook_result
1850 1851 1852

        return destination

1853 1854 1855 1856 1857 1858 1859
    def to_static_state_dict(
        self,
        destination=None,
        include_sublayers=True,
        structured_name_prefix="",
        use_hook=True,
    ):
1860
        '''
U
ustiniankw 已提交
1861

1862 1863 1864 1865 1866
        Get all parameters and buffers of current layer and its sub-layers. And set them into a dict

        Parameters:
            destination(dict, optional) : If provide, all the parameters and persistable buffers will be set to this dict . Default: None
            include_sublayers(bool, optional) : If true, also include the parameters and persistable buffers from sublayers. Default: True
1867
            use_hook(bool, optional) : If true, the operations contained in _state_dict_hooks will be appended to the destination. Default: True
1868

1869
        Retruns:
U
ustiniankw 已提交
1870
            dict, a dict contains all the parameters and persistable buffers.
1871 1872 1873 1874

        Examples:
            .. code-block:: python

1875
                >>> import paddle
1876

1877
                >>> emb = paddle.nn.Embedding(10, 10)
1878

1879 1880
                >>> state_dict = emb.to_static_state_dict()
                >>> paddle.save( state_dict, "paddle_dy.pdparams")
1881 1882 1883 1884 1885 1886

        '''
        return self._state_dict_impl(
            destination=destination,
            include_sublayers=include_sublayers,
            structured_name_prefix=structured_name_prefix,
1887
            include_non_persistable_buffer=True,
1888 1889 1890 1891 1892 1893 1894 1895 1896 1897
            use_hook=use_hook,
        )

    def state_dict(
        self,
        destination=None,
        include_sublayers=True,
        structured_name_prefix="",
        use_hook=True,
    ):
H
hong 已提交
1898
        '''
1899
        Get all parameters and persistable buffers of current layer and its sub-layers. And set them into a dict
H
hong 已提交
1900

1901
        Parameters:
1902 1903
            destination(dict, optional) : If provide, all the parameters and persistable buffers will be set to this dict . Default: None
            include_sublayers(bool, optional) : If true, also include the parameters and persistable buffers from sublayers. Default: True
1904
            use_hook(bool, optional) : If true, the operations contained in _state_dict_hooks will be appended to the destination. Default: True
1905

H
hong 已提交
1906
        Retruns:
1907
            dict: a dict contains all the parameters and persistable buffers.
H
hong 已提交
1908 1909

        Examples:
1910 1911
            .. code-block:: python

1912
                >>> import paddle
H
hong 已提交
1913

1914
                >>> emb = paddle.nn.Embedding(10, 10)
1915

1916 1917
                >>> state_dict = emb.state_dict()
                >>> paddle.save( state_dict, "paddle_dy.pdparams")
H
hong 已提交
1918 1919

        '''
1920 1921 1922 1923
        return self._state_dict_impl(
            destination=destination,
            include_sublayers=include_sublayers,
            structured_name_prefix=structured_name_prefix,
1924
            include_non_persistable_buffer=False,
1925 1926
            use_hook=use_hook,
        )
1927

1928
    @framework.deprecate_stat_dict
J
Jiabin Yang 已提交
1929
    def set_state_dict(self, state_dict, use_structured_name=True):
H
hong 已提交
1930
        '''
1931
        Set parameters and persistable buffers from state_dict. All the parameters and buffers will be reset by the tensor in the state_dict
H
hong 已提交
1932

1933
        Parameters:
1934
            state_dict(dict) : Dict contains all the parameters and persistable buffers.
1935
            use_structured_name(bool, optional) : If true, use structured name as key, otherwise, use parameter or buffer name as key.
H
hong 已提交
1936
                                                  Default: True
H
hong 已提交
1937
        Returns:
1938 1939
            missing_keys(list):A list of str containing the missing keys
            unexpected_keys(list):A list of str containing the unexpected keys
H
hong 已提交
1940 1941

        Examples:
1942 1943
            .. code-block:: python

1944
                >>> import paddle
1945

1946
                >>> emb = paddle.nn.Embedding(10, 10)
H
hong 已提交
1947

1948 1949 1950 1951
                >>> state_dict = emb.state_dict()
                >>> paddle.save(state_dict, "paddle_dy.pdparams")
                >>> para_state_dict = paddle.load("paddle_dy.pdparams")
                >>> emb.set_state_dict(para_state_dict)
H
hong 已提交
1952

H
hong 已提交
1953
        '''
1954 1955 1956
        missing_keys = []
        match_keys = set()
        unexpected_keys = []
H
hong 已提交
1957

1958 1959 1960
        def _check_match(key, param):
            state = state_dict.get(key, None)
            if state is None:
1961
                missing_keys.append(key)
1962
                raise ValueError(f"{key} is not found in the provided dict.")
1963
            if isinstance(state, (dict, list)):
1964
                if len(state) != len(param):
1965
                    missing_keys.append(key)
1966 1967 1968 1969 1970 1971
                    raise ValueError(
                        "{} receieves the length of {}, "
                        "but the expected shape is {}".format(
                            key, len(state), len(param)
                        )
                    )
S
Steffy-zxf 已提交
1972
                else:
1973
                    match_keys.add(key)
S
Steffy-zxf 已提交
1974 1975
                    return param, state
            else:
1976 1977 1978 1979 1980
                state_shape = (
                    state.shape()
                    if inspect.ismethod(state.shape)
                    else state.shape
                )
S
Steffy-zxf 已提交
1981 1982

                if list(state_shape) != list(param.shape):
1983
                    missing_keys.append(key)
S
Steffy-zxf 已提交
1984
                    raise ValueError(
1985 1986 1987 1988
                        "{} receives a shape {}, but the expected shape is {}.".format(
                            key, list(state_shape), list(param.shape)
                        )
                    )
1989
                match_keys.add(key)
S
Steffy-zxf 已提交
1990
                return param, state
1991 1992

        matched_param_state = []
S
sneaxiy 已提交
1993
        for key, param in self._state_dict_impl(use_hook=False).items():
1994 1995 1996 1997 1998
            key_name = key if use_structured_name else param.name
            try:
                match_res = _check_match(key_name, param)
                matched_param_state.append(match_res)
            except ValueError as err:
1999
                warnings.warn(f"Skip loading for {key}. " + str(err))
2000 2001 2002
        for key in state_dict.keys():
            if key not in match_keys:
                unexpected_keys.append(key)
姜永久 已提交
2003
        if in_dygraph_mode():
2004 2005 2006
            for param, state in matched_param_state:
                param.set_value(state)
        else:
H
hong 已提交
2007

2008 2009 2010 2011 2012 2013 2014
            def _set_var(var, ndarray):
                t = global_scope().find_var(var.name).get_tensor()
                p = t._place()
                if p.is_cpu_place():
                    place = core.CPUPlace()
                elif p.is_cuda_pinned_place():
                    place = core.CUDAPinnedPlace()
2015 2016 2017 2018
                elif p.is_xpu_place():
                    p = core.Place()
                    p.set_place(t._place())
                    place = core.XPUPlace(p.xpu_device_id())
2019 2020 2021 2022 2023 2024 2025
                elif p.is_custom_place():
                    p = core.Place()
                    p.set_place(t._place())
                    place = core.CustomPlace(
                        paddle.device.get_device().split(':')[0],
                        p.custom_device_id(),
                    )
2026 2027 2028 2029 2030 2031
                else:
                    p = core.Place()
                    p.set_place(t._place())
                    place = core.CUDAPlace(p.gpu_device_id())
                t.set(ndarray, place)

2032 2033 2034 2035 2036
            try:
                executor = Executor(_get_device())._default_executor
                # restore parameter states
                core._create_loaded_parameter(
                    [param for param, state in matched_param_state],
2037 2038 2039
                    global_scope(),
                    executor,
                )
2040 2041 2042 2043 2044 2045
                for param, state in matched_param_state:
                    _set_var(param, state)
            except ValueError as e:
                raise ValueError(
                    "This error might happens in dy2static, while calling 'set_state_dict' dynamicly in 'forward', which is not supported. If you only need call 'set_state_dict' once, move it to '__init__'."
                )
2046

2047 2048
        return missing_keys, unexpected_keys

C
chentianyu03 已提交
2049 2050 2051 2052 2053
    def to(self, device=None, dtype=None, blocking=None):
        '''
        Cast the parameters and buffers of Layer by the give device, dtype and blocking.

        Parameters:
2054 2055 2056 2057
            device(str|paddle.CPUPlace()|paddle.CUDAPlace()|paddle.CUDAPinnedPlace()|paddle.XPUPlace()|None, optional): The device of the Layer which want to be stored.
            If None, the device is the same with the original Tensor. If device is string, it can be ``cpu``, ``gpu:x`` and ``xpu:x``, where ``x`` is the
            index of the GPUs or XPUs. Default: None.

2058
            dtype(str|numpy.dtype|paddle.dtype|None, optional): The type of the data. If None, the dtype is the same with the original Tensor. Default: None.
C
chentianyu03 已提交
2059

2060
            blocking(bool|None, optional): If False and the source is in pinned memory, the copy will be
C
chentianyu03 已提交
2061
              asynchronous with respect to the host. Otherwise, the argument has no effect. If None, the blocking is set True. Default: None.
2062

C
chentianyu03 已提交
2063
        Returns:
2064
            self
C
chentianyu03 已提交
2065 2066 2067 2068

        Examples:
            .. code-block:: python

2069 2070 2071 2072 2073 2074 2075 2076 2077 2078 2079 2080 2081 2082 2083 2084 2085 2086 2087 2088 2089 2090 2091 2092 2093 2094 2095 2096 2097 2098 2099 2100 2101 2102
                >>> import paddle
                >>> paddle.seed(2023)

                >>> linear=paddle.nn.Linear(2, 2)
                >>> linear.weight
                >>> print(linear.weight)
                Parameter containing:
                Tensor(shape=[2, 2], dtype=float32, place=Place(gpu:0), stop_gradient=False,
                [[ 0.89611185,  0.04935038],
                 [-0.58883440,  0.99266374]])

                >>> linear.to(dtype='float64')
                >>> linear.weight
                >>> print(linear.weight)
                Parameter containing:
                Tensor(shape=[2, 2], dtype=float64, place=Place(gpu:0), stop_gradient=False,
                [[ 0.89611185,  0.04935038],
                 [-0.58883440,  0.99266374]])

                >>> linear.to(device='cpu')
                >>> linear.weight
                >>> print(linear.weight)
                Parameter containing:
                Tensor(shape=[2, 2], dtype=float64, place=Place(cpu), stop_gradient=False,
                [[ 0.89611185,  0.04935038],
                 [-0.58883440,  0.99266374]])

                >>> # doctest: +REQUIRES(env:GPU)
                >>> linear.to(device=paddle.CUDAPinnedPlace(), blocking=False)
                >>> linear.weight
                >>> print(linear.weight)
                Tensor(shape=[2, 2], dtype=float64, place=Place(gpu_pinned), stop_gradient=False,
                [[ 0.89611185,  0.04935038],
                 [-0.58883440,  0.99266374]])
2103

2104
        '''
2105 2106 2107 2108 2109 2110 2111
        return self._to_impl(
            device=device,
            dtype=dtype,
            blocking=blocking,
            include_sublayers=True,
            floating_only=False,
        )
2112 2113 2114 2115 2116 2117 2118 2119 2120 2121 2122 2123 2124

    def _apply(self, func, device, dtype, blocking, include_sublayers=True):
        if include_sublayers:
            for layer in self.children():
                layer._apply(func, device, dtype, blocking, include_sublayers)

        for key, param in self._parameters.items():
            if param is not None:
                with no_grad():
                    param_applied = func(param, device, dtype, blocking)

                if param.grad is not None:
                    with no_grad():
2125 2126 2127
                        grad_applied = func(
                            param._grad_ivar(), device, dtype, blocking
                        )
2128 2129

        for key, buf in self._buffers.items():
2130 2131
            if buf is not None:
                self._buffers[key] = func(buf, device, dtype, blocking)
2132

2133 2134
        self._dtype = dtype

2135 2136 2137 2138 2139 2140 2141 2142 2143 2144 2145 2146 2147 2148 2149 2150
    def _transform(self, t, device, dtype, blocking):
        if device is None:
            device = t.place
        if dtype is None:
            dtype = t.dtype

        if type(dtype) is not VarDesc.VarType:
            dtype = convert_np_dtype_to_dtype_(dtype)

        # 1. gpu place need to determine whether the memory is sufficient for allocation:
        if t.place.is_gpu_place():
            # for gpu, minimum memory allocation unit is 256 bytes.
            size_dtype = core.size_of_dtype(dtype)
            # Note(zhangbo): Paddle GPU minimum memory allocation unit is 256 bytes, waiting_alloc_memory will comput ‘t’ occupied memory space.
            # Coefficient 1.2 is used to avoid OOM that may occur in this critical state when the memory is just enough.
            waiting_alloc_memory = (
2151 2152
                ((np.prod(t.shape) * size_dtype) / 256 + 1) * 256 * 1.2
            )
2153 2154 2155
            gpu_memory_available = core.gpu_memory_available()
            if gpu_memory_available < waiting_alloc_memory:
                # Copy param / Tensor to cpu
2156 2157 2158
                t_used = t._copy_to(
                    paddle.CPUPlace(), blocking
                )  # k-v type will error
2159 2160 2161 2162 2163 2164 2165 2166 2167
                # Release mem of t
                t.value().get_tensor()._clear()
            else:
                t_used = t
        else:
            t_used = t

        # 2. cast param / Tensor to dtype
        if dtype is not None and dtype != t_used.dtype:
2168
            with paddle.base.framework._dygraph_place_guard(place=t_used.place):
2169 2170 2171 2172 2173 2174 2175 2176 2177 2178 2179 2180 2181 2182 2183 2184 2185
                t_casted = t_used.cast(dtype=dtype)
        else:
            t_casted = t_used

        # 3. Copy casted cpu param / Tensor to device
        if device is not None and not t_casted.place._equals(device):
            new_t = t_casted._copy_to(device, blocking)
        else:
            new_t = t_casted

        # 4. share Tensor to origin param / Tensor
        dst_tensor = t.value().get_tensor()
        src_tensor = new_t.value().get_tensor()
        dst_tensor._share_data_with(src_tensor)

        return t

2186 2187 2188 2189 2190 2191 2192 2193
    def _to_impl(
        self,
        device=None,
        dtype=None,
        blocking=None,
        include_sublayers=True,
        floating_only=False,
    ):
2194 2195 2196 2197 2198 2199 2200 2201 2202 2203 2204 2205
        '''
        Cast the parameters and buffers of Layer by the give device, dtype and blocking.

        Parameters:
            device(str|paddle.CPUPlace()|paddle.CUDAPlace()|paddle.CUDAPinnedPlace()|paddle.XPUPlace()|None, optional): The device of the Layer which want to be stored.
            If None, the device is the same with the original Tensor. If device is string, it can be ``cpu``, ``gpu:x`` and ``xpu:x``, where ``x`` is the
            index of the GPUs or XPUs. Default: None.

            dtype(str|numpy.dtype|paddle.dtype|None, optional): The type of the data. If None, the dtype is the same with the original Tensor. Default: None.

            blocking(bool|None, optional): If False and the source is in pinned memory, the copy will be
              asynchronous with respect to the host. Otherwise, the argument has no effect. If None, the blocking is set True. Default: None.
2206

2207 2208
            include_sublayers(bool|True, optional): If True, deal with self and all sublayers parameters and buffers, if not only deal with self parameters and buffers. Default: True.

2209 2210
            floating_only(bool|False, optional): If True, only cast all floating point parameters and buffers of Layer by the give device, dtype and blocking.

2211 2212
        Returns:
            self
C
chentianyu03 已提交
2213 2214 2215 2216

        '''

        if device is None and dtype is None and blocking is None:
2217
            return self
C
chentianyu03 已提交
2218 2219 2220 2221

        if device is not None:
            if isinstance(device, str):
                device = paddle.device._convert_to_place(device)
2222 2223 2224 2225 2226 2227 2228 2229 2230
            elif isinstance(
                device,
                (
                    core.CPUPlace,
                    core.CUDAPlace,
                    core.CUDAPinnedPlace,
                    core.XPUPlace,
                ),
            ):
C
chentianyu03 已提交
2231 2232 2233 2234
                pass
            else:
                raise ValueError(
                    "device value error, must be str, paddle.CPUPlace(), paddle.CUDAPlace(), paddle.CUDAPinnedPlace() or paddle.XPUPlace(), but the type of device is "
2235 2236
                    + type(device).__name__
                )
C
chentianyu03 已提交
2237 2238 2239 2240 2241

        if blocking is None:
            blocking = True
        else:
            assert isinstance(
2242 2243
                blocking, bool
            ), "blocking value error, must be the True, False or None"
C
chentianyu03 已提交
2244 2245

        def transform(t, device, dtype, blocking):
2246 2247 2248
            if floating_only and (not paddle.is_floating_point(t)):
                return t
            return self._transform(t, device, dtype, blocking)
C
chentianyu03 已提交
2249

2250 2251
        with warnings.catch_warnings():
            warnings.filterwarnings("ignore", category=UserWarning)
2252
            self._apply(transform, device, dtype, blocking, include_sublayers)
2253

2254
        self._dtype = dtype
2255
        return self
C
chentianyu03 已提交
2256

2257 2258 2259 2260 2261 2262 2263 2264 2265 2266 2267 2268
    def _startup_program(self):
        """
        Return starup program containing initialization operations of all parameters.

        NOTE(dev): This is a very low level API and only for inner developer.
        """
        startup_program = Program()
        for param in self.parameters():
            param._create_init_op(startup_program.global_block())

        return startup_program

2269 2270 2271
    # [aliases] Compatible with old method names
    set_dict = set_state_dict
    load_dict = set_state_dict
2272 2273 2274 2275 2276 2277 2278 2279 2280 2281 2282 2283 2284 2285

    def float(self, excluded_layers=None):
        '''
        Casts all floating point parameters and buffers to ``float`` data type.

        Parameters:
            excluded_layers(nn.Layer|list|None, optional): Specify the layers that need to be kept original data type. if excluded_layers is None, casts all floating point parameters and buffers. Default: None.

        Returns:
            Layer: self

        Examples:
            .. code-block:: python

2286 2287 2288 2289 2290 2291 2292 2293 2294 2295 2296 2297 2298 2299 2300 2301 2302 2303 2304
                >>> import paddle

                >>> class Model(paddle.nn.Layer):
                ...     def __init__(self):
                ...         super().__init__()
                ...         self.linear = paddle.nn.Linear(1, 1)
                ...         self.dropout = paddle.nn.Dropout(p=0.5)
                ...
                ...     def forward(self, input):
                ...         out = self.linear(input)
                ...         out = self.dropout(out)
                ...         return out
                ...
                >>> model = Model()
                >>> model.float()
                Model(
                    (linear): Linear(in_features=1, out_features=1, dtype=paddle.float32)
                    (dropout): Dropout(p=0.5, axis=None, mode=upscale_in_train)
                )
2305 2306 2307 2308 2309 2310 2311 2312 2313 2314 2315 2316 2317 2318 2319 2320 2321 2322 2323 2324 2325 2326 2327 2328 2329 2330 2331 2332 2333 2334 2335 2336 2337 2338 2339 2340 2341
        '''

        excluded_layers = [] if excluded_layers is None else excluded_layers

        if isinstance(excluded_layers, type):
            excluded_layers = [excluded_layers]
        elif isinstance(excluded_layers, list):
            pass
        else:
            raise TypeError(
                "excluded_layers should be type nn.Layer or list, but got %s.",
                type(excluded_layers).__name__,
            )

        def layer_trans(layer):
            _layer_trans_dtype(layer, paddle.float32, excluded_layers)

        return self.apply(layer_trans)

    def float16(self, excluded_layers=None):
        '''
        Casts all floating point parameters and buffers to ``float16`` data type.


        .. note::
            ``nn.BatchNorm`` does not support ``bfloat16`` weights, so it would not be converted by default.


        Parameters:
           excluded_layers(nn.Layer|list|None, optional): Specify the layers that need to be kept original data type. if excluded_layers is None, casts all floating point parameters and buffers except ``nn.BatchNorm``. Default: None.

        Returns:
            Layer: self

        Examples:
            .. code-block:: python

2342 2343 2344 2345 2346 2347 2348 2349 2350 2351 2352 2353 2354 2355 2356 2357 2358 2359 2360 2361
                >>> # doctest: +SKIP('Paddle compiled by the user does not support float16, so keep original data type.')
                >>> import paddle

                >>> class Model(paddle.nn.Layer):
                ...     def __init__(self):
                ...         super().__init__()
                ...         self.linear = paddle.nn.Linear(1, 1)
                ...         self.dropout = paddle.nn.Dropout(p=0.5)
                ...
                ...     def forward(self, input):
                ...         out = self.linear(input)
                ...         out = self.dropout(out)
                ...         return out
                ...
                >>> model = Model()
                >>> model.float16()
                Model(
                    (linear): Linear(in_features=1, out_features=1, dtype=float32)
                    (dropout): Dropout(p=0.5, axis=None, mode=upscale_in_train)
                )
2362 2363 2364 2365 2366 2367 2368 2369 2370 2371 2372 2373 2374 2375 2376 2377 2378 2379 2380 2381 2382 2383 2384 2385 2386 2387 2388 2389 2390 2391 2392 2393 2394 2395 2396 2397 2398 2399 2400 2401 2402 2403 2404 2405 2406
        '''

        if paddle.amp.is_float16_supported() is False:
            warnings.warn(
                "Paddle compiled by the user does not support float16, so keep original data type."
            )
            return self

        excluded_layers = (
            [nn.BatchNorm] if excluded_layers is None else excluded_layers
        )

        if isinstance(excluded_layers, type):
            excluded_layers = [excluded_layers]
        elif isinstance(excluded_layers, list):
            pass
        else:
            raise TypeError(
                "excluded_layers should be type nn.Layer or list, but got %s.",
                type(excluded_layers).__name__,
            )

        def layer_trans(layer):
            _layer_trans_dtype(layer, paddle.float16, excluded_layers)

        return self.apply(layer_trans)

    def bfloat16(self, excluded_layers=None):
        '''
        Casts all floating point parameters and buffers to ``bfloat16`` data type.


        .. note::
            ``nn.BatchNorm`` does not support ``bfloat16`` weights, so it would not be converted by default.


        Parameters:
            excluded_layers(nn.Layer|list|None, optional): Specify the layers that need to be kept original data type. if excluded_layers is None, casts all floating point parameters and buffers except ``nn.BatchNorm``. Default: None.

        Returns:
            Layer: self

        Examples:
            .. code-block:: python

2407 2408 2409 2410 2411 2412 2413 2414 2415 2416 2417 2418 2419 2420 2421 2422 2423 2424 2425 2426 2427
                >>> # doctest: +SKIP('bfloat need V100 compile')
                >>> import paddle

                >>> class Model(paddle.nn.Layer):
                ...     def __init__(self):
                ...         super().__init__()
                ...         self.linear = paddle.nn.Linear(1, 1)
                ...         self.dropout = paddle.nn.Dropout(p=0.5)
                ...
                ...     def forward(self, input):
                ...         out = self.linear(input)
                ...         out = self.dropout(out)
                ...         return out
                ...
                >>> model = Model()
                >>> model.bfloat16()
                >>> #UserWarning: Paddle compiled by the user does not support bfloat16, so keep original data type.
                Model(
                    (linear): Linear(in_features=1, out_features=1, dtype=float32)
                    (dropout): Dropout(p=0.5, axis=None, mode=upscale_in_train)
                )
2428 2429 2430 2431 2432 2433 2434 2435 2436 2437 2438 2439 2440 2441 2442 2443 2444 2445 2446 2447 2448 2449 2450 2451 2452 2453
        '''

        if paddle.amp.is_bfloat16_supported() is False:
            warnings.warn(
                "Paddle compiled by the user does not support bfloat16, so keep original data type."
            )
            return self

        excluded_layers = (
            [nn.BatchNorm] if excluded_layers is None else excluded_layers
        )

        if isinstance(excluded_layers, type):
            excluded_layers = [excluded_layers]
        elif isinstance(excluded_layers, list):
            pass
        else:
            raise TypeError(
                "excluded_layers should be type nn.Layer or list, but got %s.",
                type(excluded_layers).__name__,
            )

        def layer_trans(layer):
            _layer_trans_dtype(layer, paddle.bfloat16, excluded_layers)

        return self.apply(layer_trans)