layers.py 91.9 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

X
Xin Pan 已提交
15
import collections
16
import copy
17
import inspect
18 19 20 21 22
import re
import warnings
import weakref

import numpy as np
23

24
import paddle
25
from paddle import nn, profiler
26 27 28 29
from paddle.base import core, framework, unique_name
from paddle.base.core import VarDesc
from paddle.base.dygraph import no_grad
from paddle.base.dygraph.base import (
30
    _convert_into_variable,
31 32
    in_declarative_mode,
    program_desc_tracing_guard,
33
)
34 35 36 37 38
from paddle.base.dygraph_utils import _append_activation_in_dygraph
from paddle.base.executor import Executor, global_scope
from paddle.base.framework import Parameter, Program
from paddle.base.framework import _current_expected_place as _get_device
from paddle.base.framework import (
39
    _global_flags,
40
    convert_np_dtype_to_dtype_,
41
    default_main_program,
42 43
    in_dygraph_mode,
)
44 45
from paddle.base.layer_helper_base import LayerHelperBase
from paddle.base.param_attr import ParamAttr
46
from paddle.profiler.utils import in_profiler_mode
47
from paddle.utils import deprecated
48

49
__all__ = []
50

51 52 53 54
_first_cap_re = re.compile('(.)([A-Z][a-z]+)')
_all_cap_re = re.compile('([a-z])([A-Z])')


55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99
def record_program_ops_pre_hook(layer, inputs):
    """
    A pre-hook to mark op numbers before enter layer.forward.
    """
    if not in_dygraph_mode():
        if layer._op_recorder.start < 0:
            layer._op_recorder.start = len(
                default_main_program().current_block().ops
            )
            layer._op_recorder.is_valid = True
        else:
            layer._op_recorder.is_valid = False
            warnings.warn(
                "{} has recorded the op information before. Please check whether you call this layer twice.".format(
                    layer._full_name
                )
            )

    return None


def set_op_customized_attrs_post_hook(layer, inputs, outputs):
    """
    A post-hook to append customized attributes into all operators generated in current layer.
    """
    if not in_dygraph_mode() and layer._op_recorder.is_valid:
        start = layer._op_recorder.start
        end = len(default_main_program().current_block().ops)
        assert start >= 0 and end >= start
        ops = default_main_program().current_block().ops[start:end]

        layer._op_recorder.end = end
        layer._op_recorder.ops = ops

        for op in ops:
            for attr_name, val in layer._customized_attrs.items():
                op._set_attr(attr_name, val)

        # remove pre-hook and post-hook
        for hook_helper in layer._op_recorder.hooks:
            hook_helper.remove()

    return None


100 101 102 103 104 105
def _scope_dist2single(dist_scope):
    mapping = {
        "row_parallel_linear": "linear",
        "column_parallel_linear": "linear",
        "vocab_parallel_embedding": "embedding",
        # "parallel_cross_entropy": "cross_entropy", while mp_layer has parallel_cross_entropy,
S
Shuangchi He 已提交
106
        # but there is no parameters so the mapping of parallel_cross_entropy is not necessary.
107 108 109 110
    }
    return mapping.get(dist_scope, dist_scope)


111 112 113 114
def _convert_camel_to_snake(name):
    s1 = _first_cap_re.sub(r'\1_\2', name)
    return _all_cap_re.sub(r'\1_\2', s1).lower()

115

116 117 118 119 120 121 122 123 124 125 126
def _addindent(string, indent):
    s1 = string.split('\n')
    if len(s1) == 1:
        return string
    s2 = []
    for idx, line in enumerate(s1):
        if idx > 0:
            s2.append(str((indent * ' ') + line))
    return s1[0] + '\n' + '\n'.join(s2)


127 128 129 130 131 132 133
def _layer_trans_dtype(layer, dtype, excluded_layers):
    if type(layer) in excluded_layers:
        return

    layer._to_impl(dtype=dtype, floating_only=True, include_sublayers=False)


134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178
class LayerObjectHelper(LayerHelperBase):
    def __init__(self, name):
        super().__init__(name, layer_type=name)

    def append_op(
        self,
        type=None,
        inputs=None,
        outputs=None,
        attrs=None,
        stop_gradient=None,
    ):
        """append an operator for this layer object.

           Args:
               type: operator type
               inputs: input variable of the operator
               dtype: data type of this parameter
               is_bias: if this is a bias parameter
               default_initializer: set the default initializer for this parameter

        Returns created parameter Variable.
        """
        return self.main_program.current_block().append_op(
            type=type,
            inputs=inputs,
            outputs=outputs,
            attrs=attrs,
            stop_gradient=stop_gradient,
        )

    def _multiple_input(self, inputs_in):
        inputs = inputs_in
        ret = []
        if isinstance(inputs, (list, tuple)):
            for inp in inputs:
                ret.append(self.to_variable(inp))
        else:
            ret.append(self.to_variable(inputs))
        return ret

    # TODO: make it public when we need it
    def _input(self, inputs_in):
        inputs = self._multiple_input(inputs_in)
        if len(inputs) != 1:
179
            raise f"{self.layer_type} layer only takes one input in"
180 181 182 183 184 185 186 187
        return inputs[0]

    def _multiple_param_attr(self, length, param_attr_in=None):
        param_attr = param_attr_in
        if isinstance(param_attr, ParamAttr):
            param_attr = [param_attr]

        if len(param_attr) != 1 and len(param_attr) != length:
188
            raise ValueError(f"parameter number mismatch in {self.name}")
189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206
        elif len(param_attr) == 1 and length != 1:
            tmp = [None] * length
            for i in range(length):
                tmp[i] = copy.deepcopy(param_attr[0])
            param_attr = tmp
        return param_attr

    def iter_inputs_and_params(self, inputs_in, param_attr_in=None):
        """Access all inputs and params one by one

           Args:
               inputs_in: inputs to be iter
               param_attr_in: param_attr to be iter

        Returns input, param_attr
        """
        param_attr_in = ParamAttr._to_attr(param_attr_in)
        if isinstance(param_attr_in, bool):
207
            raise ValueError(f'Param_attr should not be False in {self.name}')
208 209 210
        inputs = inputs_in if (inputs_in is not None) else []
        inputs = self._multiple_input(inputs)
        param_attrs = self._multiple_param_attr(len(inputs), param_attr_in)
211
        yield from zip(inputs, param_attrs)
212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243

    def input_dtype(self, inputs_in):
        """Get input data type

           Args:
               inputs_in: inputs wanted know the data type

        Returns dtype of the input
        """
        inputs_in = inputs_in if (inputs_in is not None) else []
        inputs = self._multiple_input(inputs_in)
        dtype = None
        for each in inputs:
            if dtype is None:
                dtype = each.dtype
            elif dtype != each.dtype:
                raise ValueError(
                    "Data Type mismatch: %d to %d in %s"
                    % (dtype, each.dtype, self.name)
                )
        return dtype

    def get_parameter(self, name):
        """Get parameter specifically

           Args:
               name: parameter's name

        Returns target parameter
        """
        param = self.main_program.global_block().var(name)
        if not isinstance(param, Parameter):
244
            raise ValueError(f"no Parameter name {name} found in {self.name}")
245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322
        return param

    # TODO: this should not be called anymore after all activation func move to Layers
    def append_activation(self, input_var, act=None, use_cudnn=None):
        """Append activation

            Args:
                input_var: the input variable. The len(input_var.shape) is
                larger or equal than 2.
                act: activation type
                use_cudnn: if use cudnn

        Return the Variable of after append activation
        """
        act = act
        if act is None:
            return input_var
        if isinstance(act, str):
            act = {'type': act}
        else:
            raise TypeError(
                str(act) + " should be unicode or str in %s ", self.name
            )

        if (use_cudnn is not None) and use_cudnn:
            act['use_cudnn'] = use_cudnn
        use_mkldnn = _global_flags()["FLAGS_use_mkldnn"]
        if (use_mkldnn is not None) and use_mkldnn:
            act['use_mkldnn'] = use_mkldnn
        act_type = act.pop('type')
        if in_dygraph_mode():
            res = _append_activation_in_dygraph(
                input_var, act_type, use_cudnn, use_mkldnn
            )
            return res
        else:
            tmp = self.create_variable_for_type_inference(dtype=input_var.dtype)
            self.append_op(
                type=act_type,
                inputs={"X": [input_var]},
                outputs={"Out": [tmp]},
                attrs=act,
            )
            return tmp

    def is_instance(self, param, cls):
        """Check if the input parameter is instance of input class

            Args:
                param: parameter to be check
                cls: class of the parameter

        Return result of the check (True or False)
        """
        param = param
        if not isinstance(param, cls):
            raise TypeError(
                "The input {0} parameter of method {1} must be {2}, in layer {3}",
                param,
                self.layer_type,
                cls.__name__,
                self.name,
            )


class LayerOpsRecoder:
    """
    Record generated operators information in nn.Layer.
    """

    def __init__(self, start=-1, end=-1, ops=None, is_valid=False, hooks=None):
        self.start = start
        self.end = end
        self.ops = ops
        self.is_valid = is_valid
        self.hooks = hooks


323
class HookRemoveHelper:
324
    """A HookRemoveHelper that can be used to remove hook."""
325 326 327 328 329 330 331 332 333 334 335 336 337 338

    next_hook_id = 0

    def __init__(self, hooks):
        self._hooks_ref = weakref.ref(hooks)
        self._hook_id = HookRemoveHelper.next_hook_id
        HookRemoveHelper.next_hook_id += 1

    def remove(self):
        hooks = self._hooks_ref()
        if hooks is not None and self._hook_id in hooks:
            del hooks[self._hook_id]


339
class Layer:
340 341
    """
    Dynamic graph Layer based on OOD, includes the parameters of the layer, the structure of the forward graph and so on.
X
Xin Pan 已提交
342

343
    Parameters:
344 345
        name_scope (str, optional): prefix name used by the layer to name parameters.
            If prefix is "my_layer", parameter name in MyLayer
346 347 348
            can be "my_layer_0.w_n", where "w" is the parameter
            base name and "n" is an unique suffix auto-generated.
            If None, prefix name will be snake cased class name. Default: None.
349
        dtype(str, optional): data type of this parameter.
350 351
                If set str, it can be "bool",  "float16", "float32", "float64",
                "int8", "int16", "int32", "int64", "uint8" or "uint16".
352
                Default: "float32"
353

354 355
    Returns:
        None
356 357 358 359

    Examples:
        .. code-block:: python

360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391
            >>> import paddle
            >>> paddle.seed(100)

            >>> class MyLayer(paddle.nn.Layer):
            ...     def __init__(self):
            ...         super().__init__()
            ...         self._linear = paddle.nn.Linear(1, 1)
            ...         self._dropout = paddle.nn.Dropout(p=0.5)
            ...
            ...     def forward(self, input):
            ...         temp = self._linear(input)
            ...         temp = self._dropout(temp)
            ...         return temp
            ...
            >>> x = paddle.randn([10, 1], 'float32')
            >>> mylayer = MyLayer()
            >>> mylayer.eval()  # set mylayer._dropout to eval mode
            >>> out = mylayer(x)
            >>> mylayer.train()  # set mylayer._dropout to train mode
            >>> out = mylayer(x)
            >>> print(out)
            Tensor(shape=[10, 1], dtype=float32, place=Place(cpu), stop_gradient=False,
            [[-3.44879317],
             [ 0.        ],
             [ 0.        ],
             [-0.73825276],
             [ 0.        ],
             [ 0.        ],
             [ 0.64444798],
             [-3.22185946],
             [ 0.        ],
             [-0.68077987]])
X
Xin Pan 已提交
392
    """
X
Xin Pan 已提交
393

394
    def __init__(self, name_scope=None, dtype="float32"):
395
        self.training = True
396
        if name_scope is None:
397
            name_scope = _convert_camel_to_snake(self.__class__.__name__)
398
            name_scope = _scope_dist2single(name_scope)
399
        self._full_name = unique_name.generate(name_scope)
400
        self._helper = LayerObjectHelper(self._full_name)
X
Xin Pan 已提交
401
        self._built = False
M
minqiyang 已提交
402
        self._dtype = dtype
姜永久 已提交
403
        self._init_in_dynamic_mode = in_dygraph_mode()
404

X
Xin Pan 已提交
405
        self._parameters = collections.OrderedDict()
406 407 408
        # Buffers the variable (not parameter) created in layer
        self._buffers = collections.OrderedDict()
        self._non_persistable_buffer_names_set = set()
X
Xin Pan 已提交
409
        self._sub_layers = collections.OrderedDict()
L
lujun 已提交
410
        self._loaddict_holder = collections.OrderedDict()
411

412 413 414 415
        # Record generated op_descs in this layer
        self._op_recorder = LayerOpsRecoder(ops=[], hooks=[])
        self._customized_attrs = {}

416 417 418
        self._forward_pre_hooks = collections.OrderedDict()
        self._forward_post_hooks = collections.OrderedDict()

419 420
        # only used in AMP Training
        self._cast_to_low_precison = True
421 422

        self._state_dict_hooks = collections.OrderedDict()
423 424
        # Records orignal functions after @to_static to support to rollback
        self._original_funcs = collections.OrderedDict()
425

M
minqiyang 已提交
426
    def train(self):
427
        """
U
ustiniankw 已提交
428

429 430 431 432 433
        Sets this Layer and all its sublayers to training mode.
        This only effects certain modules like `Dropout` and `BatchNorm`.

        Returns:
            None
434

U
ustiniankw 已提交
435
        Examples:
436 437
            .. code-block:: python

438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469
                >>> import paddle
                >>> paddle.seed(100)

                >>> class MyLayer(paddle.nn.Layer):
                ...     def __init__(self):
                ...         super().__init__()
                ...         self._linear = paddle.nn.Linear(1, 1)
                ...         self._dropout = paddle.nn.Dropout(p=0.5)
                ...
                ...     def forward(self, input):
                ...         temp = self._linear(input)
                ...         temp = self._dropout(temp)
                ...         return temp
                ...
                >>> x = paddle.randn([10, 1], 'float32')
                >>> mylayer = MyLayer()
                >>> mylayer.eval()  # set mylayer._dropout to eval mode
                >>> out = mylayer(x)
                >>> mylayer.train()  # set mylayer._dropout to train mode
                >>> out = mylayer(x)
                >>> print(out)
                Tensor(shape=[10, 1], dtype=float32, place=Place(cpu), stop_gradient=False,
                [[-3.44879317],
                 [ 0.        ],
                 [ 0.        ],
                 [-0.73825276],
                 [ 0.        ],
                 [ 0.        ],
                 [ 0.64444798],
                 [-3.22185946],
                 [ 0.        ],
                 [-0.68077987]])
470

471
        """
472 473 474
        # global setting in dygraph
        # NOTE(chenweihang): nn.Layer also can be used in static mode,
        # but _dygraph_tracer() can not be called in static mode
姜永久 已提交
475
        if in_dygraph_mode():
476
            framework._dygraph_tracer().train_mode()
477 478 479
        # Layer-level setting
        self.training = True
        for layer in self.sublayers():
480
            layer.training = True
M
minqiyang 已提交
481 482

    def eval(self):
483 484 485 486 487 488
        """
        Sets this Layer and all its sublayers to evaluation mode.
        This only effects certain modules like `Dropout` and `BatchNorm`.

        Returns:
            None
489 490 491 492

        Example::
            .. code-block:: python

493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521
                >>> import paddle
                >>> paddle.seed(100)
                >>> class MyLayer(paddle.nn.Layer):
                ...     def __init__(self):
                ...         super().__init__()
                ...         self._linear = paddle.nn.Linear(1, 1)
                ...         self._dropout = paddle.nn.Dropout(p=0.5)
                ...
                ...     def forward(self, input):
                ...         temp = self._linear(input)
                ...         temp = self._dropout(temp)
                ...         return temp
                ...
                >>> x = paddle.randn([10, 1], 'float32')
                >>> mylayer = MyLayer()
                >>> mylayer.eval()  # set mylayer._dropout to eval mode
                >>> out = mylayer(x)
                >>> print(out)
                Tensor(shape=[10, 1], dtype=float32, place=Place(cpu), stop_gradient=False,
                [[-1.72439659],
                 [ 0.31532824],
                 [ 0.01192369],
                 [-0.36912638],
                 [-1.63426113],
                 [-0.93169814],
                 [ 0.32222399],
                 [-1.61092973],
                 [ 0.77209264],
                 [-0.34038994]])
522

523
        """
524 525 526
        # global setting in dygraph
        # NOTE(chenweihang): nn.Layer also can be used in static mode,
        # but _dygraph_tracer() can not be called in static mode
姜永久 已提交
527
        if in_dygraph_mode():
528
            framework._dygraph_tracer().eval_mode()
529 530 531
        # Layer-level setting
        self.training = False
        for layer in self.sublayers():
532
            layer.training = False
M
minqiyang 已提交
533

L
LielinJiang 已提交
534 535
    def apply(self, fn):
        """
U
ustiniankw 已提交
536

L
LielinJiang 已提交
537 538 539 540 541 542 543
        Applies ``fn`` recursively to every sublayer (as returned by ``.sublayers()``)
        as well as self. Typical use includes initializing the parameters of a model.

        Parameters:
            fn (function): a function to be applied to each sublayer

        Returns:
U
ustiniankw 已提交
544
            Layer, self
L
LielinJiang 已提交
545 546 547 548

        Example::
            .. code-block:: python

549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583
                >>> import paddle
                >>> import paddle.nn as nn
                >>> paddle.seed(2023)

                >>> net = nn.Sequential(nn.Linear(2, 2), nn.Linear(2, 2))

                >>> def init_weights(layer):
                ...     if type(layer) == nn.Linear:
                ...         print('before init weight:', layer.weight.numpy())
                ...         new_weight = paddle.full(shape=layer.weight.shape, dtype=layer.weight.dtype, fill_value=0.9)
                ...         layer.weight.set_value(new_weight)
                ...         print('after init weight:', layer.weight.numpy())
                ...
                >>> net.apply(init_weights)

                >>> print(net.state_dict())
                before init weight: [[ 0.89611185  0.04935038]
                                     [-0.5888344   0.99266374]]
                after init weight: [[0.9 0.9]
                                    [0.9 0.9]]
                before init weight: [[-0.18615901 -0.22924072]
                                     [ 1.1517721   0.59859073]]
                after init weight: [[0.9 0.9]
                                    [0.9 0.9]]
                OrderedDict([('0.weight', Parameter containing:
                Tensor(shape=[2, 2], dtype=float32, place=Place(cpu), stop_gradient=False,
                [[0.89999998, 0.89999998],
                 [0.89999998, 0.89999998]])), ('0.bias', Parameter containing:
                Tensor(shape=[2], dtype=float32, place=Place(cpu), stop_gradient=False,
                [0., 0.])), ('1.weight', Parameter containing:
                Tensor(shape=[2, 2], dtype=float32, place=Place(cpu), stop_gradient=False,
                [[0.89999998, 0.89999998],
                 [0.89999998, 0.89999998]])), ('1.bias', Parameter containing:
                Tensor(shape=[2], dtype=float32, place=Place(cpu), stop_gradient=False,
                [0., 0.]))])
L
LielinJiang 已提交
584
        """
585
        for layer in self.children():
L
LielinJiang 已提交
586 587 588 589 590 591
            layer.apply(fn)

        fn(self)

        return self

X
Xin Pan 已提交
592
    def full_name(self):
U
ustiniankw 已提交
593 594 595
        """

        Full name for this layer, composed by name_scope + "/" + MyLayer.__class__.__name__
X
Xin Pan 已提交
596

597
        Returns:
U
ustiniankw 已提交
598
            str, full name of this layer.
599 600 601 602

        Example::
            .. code-block:: python

603
                >>> import paddle
604

605 606 607 608 609 610 611 612 613 614 615
                >>> class LinearNet(paddle.nn.Layer):
                ...     def __init__(self):
                ...         super().__init__(name_scope = "demo_linear_net")
                ...         self._linear = paddle.nn.Linear(1, 1)
                ...
                ...     def forward(self, x):
                ...         return self._linear(x)
                ...
                >>> linear_net = LinearNet()
                >>> print(linear_net.full_name())
                demo_linear_net_0
616

X
Xin Pan 已提交
617 618 619
        """
        return self._full_name

620
    def register_forward_post_hook(self, hook):
U
ustiniankw 已提交
621 622 623
        """

        Register a forward post-hook for Layer. The hook will be called after `forward` function has been computed.
624 625 626

        It should have the following form, `input` and `output` of the `hook` is `input` and `output` of the `Layer` respectively.
        User can use forward post-hook to change the output of the Layer or perform information statistics tasks on the Layer.
627

628 629 630 631 632 633
        hook(Layer, input, output) -> None or modified output

        Parameters:
            hook(function): a function registered as a forward post-hook

        Returns:
U
ustiniankw 已提交
634
            HookRemoveHelper, a HookRemoveHelper object that can be used to remove the added hook by calling `hook_remove_helper.remove()` .
635 636 637 638

        Examples:
            .. code-block:: python

639 640
                >>> import paddle
                >>> import numpy as np
641

642 643 644 645 646 647 648 649
                >>> # the forward_post_hook change the output of the layer: output = output * 2
                >>> def forward_post_hook(layer, input, output):
                ...     # user can use layer, input and output for information statistis tasks
                ...
                ...     # change the output
                ...     return output * 2
                ...
                >>> linear = paddle.nn.Linear(13, 5)
650

651 652
                >>> # register the hook
                >>> forward_post_hook_handle = linear.register_forward_post_hook(forward_post_hook)
653

654 655
                >>> value1 = np.arange(26).reshape(2, 13).astype("float32")
                >>> in1 = paddle.to_tensor(value1)
656

657
                >>> out0 = linear(in1)
658

659 660
                >>> # remove the hook
                >>> forward_post_hook_handle.remove()
661

662
                >>> out1 = linear(in1)
663

664 665
                >>> # hook change the linear's output to output * 2, so out0 is equal to out1 * 2.
                >>> assert (out0.numpy() == (out1.numpy()) * 2).any()
U
ustiniankw 已提交
666

667 668 669 670 671 672
        """
        hook_remove_helper = HookRemoveHelper(self._forward_post_hooks)
        self._forward_post_hooks[hook_remove_helper._hook_id] = hook
        return hook_remove_helper

    def register_forward_pre_hook(self, hook):
U
ustiniankw 已提交
673 674 675
        """

        Register a forward pre-hook for Layer. The hook will be called before `forward` function has been computed.
676

677
        It should have the following form, `input` of the `hook` is `input` of the `Layer`,
678
        hook can either return a tuple or a single modified value in the hook. We will wrap the value into a tuple if
679 680 681 682 683 684 685 686 687
        a single value is returned(unless that value is already a tuple).
        User can use forward pre-hook to change the input of the Layer or perform information statistics tasks on the Layer.

        hook(Layer, input) -> None or modified input

        Parameters:
            hook(function): a function registered as a forward pre-hook

        Returns:
U
ustiniankw 已提交
688
            HookRemoveHelper, a HookRemoveHelper object that can be used to remove the added hook by calling `hook_remove_helper.remove()` .
689 690 691 692

        Examples:
            .. code-block:: python

693 694
                >>> import paddle
                >>> import numpy as np
695

696 697 698 699 700 701 702 703 704
                >>> # the forward_pre_hook change the input of the layer: input = input * 2
                >>> def forward_pre_hook(layer, input):
                ...     # user can use layer and input for information statistis tasks
                ...
                ...     # change the input
                ...     input_return = (input[0] * 2)
                ...     return input_return
                ...
                >>> linear = paddle.nn.Linear(13, 5)
705

706 707
                >>> # register the hook
                >>> forward_pre_hook_handle = linear.register_forward_pre_hook(forward_pre_hook)
708

709 710 711
                >>> value0 = np.arange(26).reshape(2, 13).astype("float32")
                >>> in0 = paddle.to_tensor(value0)
                >>> out0 = linear(in0)
712

713 714
                >>> # remove the hook
                >>> forward_pre_hook_handle.remove()
715

716 717 718
                >>> value1 = value0 * 2
                >>> in1 = paddle.to_tensor(value1)
                >>> out1 = linear(in1)
719

720 721
                >>> # hook change the linear's input to input * 2, so out0 is equal to out1.
                >>> assert (out0.numpy() == out1.numpy()).any()
722 723 724 725 726
        """
        hook_remove_helper = HookRemoveHelper(self._forward_pre_hooks)
        self._forward_pre_hooks[hook_remove_helper._hook_id] = hook
        return hook_remove_helper

727 728 729 730 731 732 733 734
    def create_parameter(
        self,
        shape,
        attr=None,
        dtype=None,
        is_bias=False,
        default_initializer=None,
    ):
735
        """Create parameters for this layer.
736

737
        Parameters:
738
            shape(list): Shape of the parameter.
739 740
            attr(ParamAttr, optional): Parameter attribute of weight. Please refer to :ref:`api_paddle_ParamAttr`. Default: None.
            dtype(str, optional): Data type of this parameter.
741
                If set str, it can be "bool",  "float16", "float32", "float64",
742 743
                "int8", "int16", "int32", "int64", "uint8" or "uint16". Default: "float32".
            is_bias(bool, optional): if this is a bias parameter. Default: False.
744
            default_initializer(Initializer, optional): the default initializer for this parameter.
745
                If set None, default initializer will be set to paddle.nn.initializer.Xavier and paddle.nn.initializer.Constant
746
                for non-bias and bias parameter, respectively. Default: None.
747

748
        Returns:
749 750 751 752 753
            :Tensor, created parameter.

        Examples:
            .. code-block:: python

754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778
                >>> import paddle
                >>> paddle.seed(2023)

                >>> class MyLayer(paddle.nn.Layer):
                ...     def __init__(self):
                ...         super().__init__()
                ...         self._linear = paddle.nn.Linear(1, 1)
                ...         w_tmp = self.create_parameter([1,1])
                ...         self.add_parameter("w_tmp", w_tmp)
                ...
                ...     def forward(self, input):
                ...         return self._linear(input)
                ...
                >>> mylayer = MyLayer()
                >>> for name, param in mylayer.named_parameters():
                ...     print(name, param)      # will print w_tmp,_linear.weight,_linear.bias
                w_tmp Parameter containing:
                Tensor(shape=[1, 1], dtype=float32, place=Place(cpu), stop_gradient=False,
                [[0.06979191]])
                _linear.weight Parameter containing:
                Tensor(shape=[1, 1], dtype=float32, place=Place(cpu), stop_gradient=False,
                [[1.26729357]])
                _linear.bias Parameter containing:
                Tensor(shape=[1], dtype=float32, place=Place(cpu), stop_gradient=False,
                [0.])
779
        """
H
hong 已提交
780
        temp_attr = copy.deepcopy(attr)
781
        if isinstance(temp_attr, str) and temp_attr == "":
H
hong 已提交
782
            temp_attr = None
783 784 785 786 787 788 789 790 791
        return self._helper.create_parameter(
            temp_attr, shape, dtype, is_bias, default_initializer
        )

    @deprecated(
        since="2.0.0",
        update_to="paddle.nn.Layer.create_tensor",
        reason="New api in create_tensor, easier to use.",
    )
792
    def create_variable(self, name=None, persistable=None, dtype=None):
W
wanghuancoder 已提交
793 794 795
        """

        Create Tensor for this layer.
796

797
        Parameters:
W
wanghuancoder 已提交
798 799 800 801 802 803 804 805 806 807 808 809
            name(str, optional): name of the tensor. Please refer to :ref:`api_guide_Name` . Default: None

            persistable(bool, optional): if set this tensor persistable. Default: False

            dtype(str, optional): data type of this parameter. If set str, it can be "bool", "float16", "float32", "float64","int8", "int16", "int32", "int64", "uint8" or "uint16". If set None, it will be "float32". Default: None

        Returns:
            Tensor, created Tensor.

        Examples:
            .. code-block:: python

810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825
                >>> import paddle

                >>> class MyLinear(paddle.nn.Layer):
                ...     def __init__(self,
                ...                 in_features,
                ...                 out_features):
                ...         super().__init__()
                ...         self.linear = paddle.nn.Linear( 10, 10)
                ...
                ...         self.back_var = self.create_variable(name = "linear_tmp_0", dtype=self._dtype)
                ...
                ...     def forward(self, input):
                ...         out = self.linear(input)
                ...         paddle.assign( out, self.back_var)
                ...
                ...         return out
W
wanghuancoder 已提交
826 827 828 829 830

        """
        if name is not None:
            var_name = ".".join([self._full_name, name])
        else:
831 832 833
            var_name = unique_name.generate(
                ".".join([self._full_name, "_generated_var"])
            )
W
wanghuancoder 已提交
834 835 836 837 838

        return self._helper.main_program.current_block().create_var(
            name=var_name,
            persistable=persistable,
            dtype=dtype,
839 840
            type=core.VarDesc.VarType.LOD_TENSOR,
        )
W
wanghuancoder 已提交
841 842 843 844 845 846 847 848 849 850

    # TODO: Add more parameter list when we need them
    def create_tensor(self, name=None, persistable=None, dtype=None):
        """

        Create Tensor for this layer.

        Parameters:
            name(str, optional): name of the tensor. Please refer to :ref:`api_guide_Name` . Default: None
            persistable(bool, optional): if set this tensor persistable. Default: False
851
            dtype(str, optional): data type of this parameter.
852 853
                If set str, it can be "bool",  "float16", "float32", "float64",
                "int8", "int16", "int32", "int64", "uint8" or "uint16".
854
                If set None, it will be "float32". Default: None
855

856
        Returns:
W
wanghuancoder 已提交
857
            Tensor, created Tensor.
858 859 860 861

        Examples:
            .. code-block:: python

862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877
                >>> import paddle

                >>> class MyLinear(paddle.nn.Layer):
                ...     def __init__(self,
                ...                  in_features,
                ...                  out_features):
                ...         super().__init__()
                ...         self.linear = paddle.nn.Linear(10, 10)
                ...
                ...         self.back_var = self.create_tensor(name = "linear_tmp_0", dtype=self._dtype)
                ...
                ...     def forward(self, input):
                ...         out = self.linear(input)
                ...         paddle.assign(out, self.back_var)
                ...
                ...         return out
878

879 880 881 882
        """
        if name is not None:
            var_name = ".".join([self._full_name, name])
        else:
883 884 885
            var_name = unique_name.generate(
                ".".join([self._full_name, "_generated_var"])
            )
886 887

        return self._helper.main_program.current_block().create_var(
888 889 890
            name=var_name,
            persistable=persistable,
            dtype=dtype,
891 892
            type=core.VarDesc.VarType.LOD_TENSOR,
        )
893

X
polish  
Xin Pan 已提交
894
    def parameters(self, include_sublayers=True):
U
ustiniankw 已提交
895 896 897
        """

        Returns a list of all Parameters from current layer and its sub-layers.
X
Xin Pan 已提交
898

899
        Returns:
U
ustiniankw 已提交
900
            list of Tensor, a list of Parameters.
901 902 903 904

        Examples:
            .. code-block:: python

905 906
                >>> import paddle
                >>> paddle.seed(100)
907

908 909 910 911 912 913 914
                >>> linear = paddle.nn.Linear(1, 1)
                >>> print(linear.parameters())
                [Parameter containing:
                Tensor(shape=[1, 1], dtype=float32, place=Place(cpu), stop_gradient=False,
                [[0.18551230]]), Parameter containing:
                Tensor(shape=[1], dtype=float32, place=Place(cpu), stop_gradient=False,
                [0.])]
915

X
Xin Pan 已提交
916
        """
917
        ret = [
918 919 920 921
            param
            for _, param in self.named_parameters(
                include_sublayers=include_sublayers
            )
922
        ]
X
polish  
Xin Pan 已提交
923
        return ret
X
Xin Pan 已提交
924

925
    def children(self):
U
ustiniankw 已提交
926 927 928
        """

        Returns an iterator over immediate children layers.
929 930 931 932 933 934 935

        Yields:
            Layer: a child layer

        Examples:
            .. code-block:: python

936
                >>> import paddle
937

938 939 940
                >>> linear1 = paddle.nn.Linear(10, 3)
                >>> linear2 = paddle.nn.Linear(3, 10, bias_attr=False)
                >>> model = paddle.nn.Sequential(linear1, linear2)
941

942
                >>> layer_list = list(model.children())
943

944 945
                >>> print(layer_list)
                [Linear(in_features=10, out_features=3, dtype=float32), Linear(in_features=3, out_features=10, dtype=float32)]
946 947 948 949 950 951 952 953 954 955 956 957 958 959 960

        """
        for _, layer in self.named_children():
            yield layer

    def named_children(self):
        """Returns an iterator over immediate children layers, yielding both
        the name of the layer as well as the layer itself.

        Yields:
            (string, Layer): Tuple containing a name and child layer

        Examples:
            .. code-block:: python

961
                >>> import paddle
962

963 964 965 966 967 968 969
                >>> linear1 = paddle.nn.Linear(10, 3)
                >>> linear2 = paddle.nn.Linear(3, 10, bias_attr=False)
                >>> model = paddle.nn.Sequential(linear1, linear2)
                >>> for prefix, layer in model.named_children():
                ...     print(prefix, layer)
                0 Linear(in_features=10, out_features=3, dtype=float32)
                1 Linear(in_features=3, out_features=10, dtype=float32)
970 971 972 973 974 975 976
        """
        memo = set()
        for name, layer in self._sub_layers.items():
            if layer is not None and layer not in memo:
                memo.add(layer)
                yield name, layer

J
Jiabin Yang 已提交
977
    def sublayers(self, include_self=False):
U
ustiniankw 已提交
978 979 980
        """

        Returns a list of sub layers.
X
Xin Pan 已提交
981

982
        Parameters:
J
Jiabin Yang 已提交
983
            include_self(bool, optional): Whether return self as sublayers. Default: False
X
Xin Pan 已提交
984

985
        Returns:
U
ustiniankw 已提交
986
            list of Layer, a list of sub layers.
987 988 989 990

        Examples:
            .. code-block:: python

991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006
                >>> import paddle

                >>> class MyLayer(paddle.nn.Layer):
                ...     def __init__(self):
                ...         super().__init__()
                ...         self._linear = paddle.nn.Linear(1, 1)
                ...         self._dropout = paddle.nn.Dropout(p=0.5)
                ...
                ...     def forward(self, input):
                ...         temp = self._linear(input)
                ...         temp = self._dropout(temp)
                ...         return temp
                ...
                >>> mylayer = MyLayer()
                >>> print(mylayer.sublayers())
                [Linear(in_features=1, out_features=1, dtype=float32), Dropout(p=0.5, axis=None, mode=upscale_in_train)]
1007

X
Xin Pan 已提交
1008
        """
1009 1010
        ret = [
            layer
J
Jiabin Yang 已提交
1011
            for _, layer in self.named_sublayers(include_self=include_self)
1012
        ]
X
Xin Pan 已提交
1013 1014
        return ret

1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029
    def named_parameters(self, prefix='', include_sublayers=True):
        """
        Returns an iterator over all parameters in the Layer, yielding tuple of name and parameter.

        Parameters:
            prefix(str, optional): Prefix to prepend to all parameter names. Default: ''.
            include_sublayers(bool, optional): Whether include the parameters of sublayers.
                If True, also include the named parameters from sublayers. Default: True.

        Yields:
            (string, Parameter): Tuple of name and Parameter

        Examples:
            .. code-block:: python

1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060
                >>> import paddle
                >>> paddle.seed(100)

                >>> fc1 = paddle.nn.Linear(10, 3)
                >>> fc2 = paddle.nn.Linear(3, 10, bias_attr=False)
                >>> model = paddle.nn.Sequential(fc1, fc2)
                >>> for name, param in model.named_parameters():
                ...     print(name, param)
                0.weight Parameter containing:
                Tensor(shape=[10, 3], dtype=float32, place=Place(cpu), stop_gradient=False,
                [[ 0.07276392, -0.39791510, -0.66356444],
                 [ 0.02143478, -0.18519843, -0.32485050],
                 [-0.42249614,  0.08450919, -0.66838276],
                 [ 0.38208580, -0.24303678,  0.55127048],
                 [ 0.47745085,  0.62117910, -0.08336520],
                 [-0.28653207,  0.47237599, -0.05868882],
                 [-0.14385653,  0.29945642,  0.12832761],
                 [-0.21237159,  0.38539791, -0.62760031],
                 [ 0.02637231,  0.20621127,  0.43255770],
                 [-0.19984481, -0.26259184, -0.29696006]])
                0.bias Parameter containing:
                Tensor(shape=[3], dtype=float32, place=Place(cpu), stop_gradient=False,
                [0., 0., 0.])
                1.weight Parameter containing:
                Tensor(shape=[3, 10], dtype=float32, place=Place(cpu), stop_gradient=False,
                [[ 0.01985580, -0.40268910,  0.41172385, -0.47249708, -0.09002256,
                 -0.00533628, -0.52048630,  0.62360322,  0.20848787, -0.02033746],
                 [ 0.58281910,  0.12841827,  0.12907702,  0.02325618, -0.07746267,
                 0.31950659, -0.37924835, -0.59209681, -0.11732036, -0.58378261],
                 [-0.62100595,  0.22293305,  0.28229684, -0.03687060, -0.59323978,
                 0.08411229,  0.53275704,  0.40431368,  0.03171402, -0.17922515]])
1061 1062
        """
        params_set = set()
1063 1064 1065 1066 1067
        named_sublayers = (
            self.named_sublayers(prefix=prefix, include_self=True)
            if include_sublayers
            else zip([prefix], [self])
        )
1068 1069 1070 1071 1072 1073 1074 1075 1076
        for layer_prefix, sublayer in named_sublayers:
            params = sublayer._parameters.items()
            for key, param in params:
                if param is None or param in params_set:
                    continue
                params_set.add(param)
                name = layer_prefix + ('.' if layer_prefix else '') + key
                yield name, param

J
Jiabin Yang 已提交
1077
    def named_sublayers(self, prefix='', include_self=False, layers_set=None):
1078 1079 1080 1081 1082 1083 1084
        """
        Returns an iterator over all sublayers in the Layer, yielding tuple of name and sublayer.
        The duplicate sublayer will only be yielded once.

        Parameters:
            prefix(str, optional): Prefix to prepend to all parameter names. Default: ''.
            include_self(bool, optional): Whether include the Layer itself. Default: False.
1085
            layers_set(set, optional): The set to record duplicate sublayers. Default: None.
1086 1087 1088 1089 1090 1091 1092

        Yields:
            (string, Layer): Tuple of name and Layer

        Examples:
            .. code-block:: python

1093
                >>> import paddle
1094

1095 1096 1097 1098 1099 1100 1101
                >>> fc1 = paddle.nn.Linear(10, 3)
                >>> fc2 = paddle.nn.Linear(3, 10, bias_attr=False)
                >>> model = paddle.nn.Sequential(fc1, fc2)
                >>> for prefix, layer in model.named_sublayers():
                ...     print(prefix, layer)
                0 Linear(in_features=10, out_features=3, dtype=float32)
                1 Linear(in_features=3, out_features=10, dtype=float32)
1102 1103 1104 1105 1106 1107
        """
        if layers_set is None:
            layers_set = set()
        if include_self and self not in layers_set:
            layers_set.add(self)
            yield prefix, self
J
Jiabin Yang 已提交
1108 1109 1110 1111
        for key, layer in self._sub_layers.items():
            if layer is None:
                continue
            layer_prefix = prefix + ('.' if prefix else '') + key
1112 1113 1114
            for p, l in layer.named_sublayers(
                prefix=layer_prefix, include_self=True, layers_set=layers_set
            ):
J
Jiabin Yang 已提交
1115
                yield p, l
1116

1117
    def register_buffer(self, name, tensor, persistable=True):
1118
        """
1119
        Registers a tensor as buffer into the layer.
1120

1121
        `buffer` is a non-trainable tensor and will not be updated by optimizer,
1122 1123 1124 1125 1126 1127 1128 1129 1130 1131
        but is necessary for evaluation and inference. For example, the mean and variance in BatchNorm layers.
        The registered buffer is persistable by default, and will be saved into
        `state_dict` alongside parameters. If set persistable=False, it registers
        a non-persistable buffer, so that it will not be a part of `state_dict` .

        Buffers can be accessed as attributes using given names.

        Parameters:
            name (string): name of the buffer. The buffer can be accessed
                from this layer using the given name
1132
            tensor (Tensor): the tensor to be registered as buffer.
1133 1134 1135 1136 1137
            persistable (bool): whether the buffer is part of this layer's
                state_dict.

        Returns:
            None
1138

1139 1140 1141
        Examples:
            .. code-block:: python

1142 1143
                >>> import numpy as np
                >>> import paddle
1144

1145 1146 1147 1148
                >>> linear = paddle.nn.Linear(10, 3)
                >>> value = np.array([0]).astype("float32")
                >>> buffer = paddle.to_tensor(value)
                >>> linear.register_buffer("buf_name", buffer, persistable=True)
1149

1150 1151 1152 1153
                >>> # get the buffer by attribute.
                >>> print(linear.buf_name)
                Tensor(shape=[1], dtype=float32, place=Place(cpu), stop_gradient=True,
                [0.])
1154 1155 1156 1157

        """

        if '_buffers' not in self.__dict__:
1158
            raise ValueError("super().__init__() should be called first")
1159
        elif not isinstance(name, str):
1160
            raise TypeError(
1161 1162 1163 1164
                "The name of buffer should be a string, but received {}.".format(
                    type(name).__name__
                )
            )
1165
        elif '.' in name:
1166 1167 1168
            raise KeyError(
                "The name of buffer can not contain `.`, "
                "because when you access the newly added buffer in the "
1169 1170
                "form of `self.**.**`, it will cause AttributeError."
            )
1171 1172 1173
        elif name == '':
            raise KeyError("The name of buffer can not be empty.")
        elif hasattr(self, name) and name not in self._buffers:
1174
            raise KeyError(f"attribute '{name}' already exists.")
W
wanghuancoder 已提交
1175
        elif tensor is not None and not (type(tensor) == core.eager.Tensor):
1176
            raise TypeError(
1177 1178 1179 1180
                "The registered buffer should be a Paddle.Tensor, but received {}.".format(
                    type(tensor).__name__
                )
            )
1181
        else:
1182
            self._buffers[name] = tensor
1183 1184 1185 1186 1187 1188 1189
            if persistable:
                self._non_persistable_buffer_names_set.discard(name)
            else:
                self._non_persistable_buffer_names_set.add(name)

    def buffers(self, include_sublayers=True):
        """
U
ustiniankw 已提交
1190

1191 1192 1193 1194 1195 1196
        Returns a list of all buffers from current layer and its sub-layers.

        Parameters:
            include_sublayers(bool, optional): Whether include the buffers of sublayers. If True, also include the buffers from sublayers. Default: True

        Returns:
U
ustiniankw 已提交
1197
            list of Tensor, a list of buffers.
1198 1199 1200 1201

        Examples:
            .. code-block:: python

1202 1203
                >>> import numpy as np
                >>> import paddle
1204

1205 1206 1207 1208
                >>> linear = paddle.nn.Linear(10, 3)
                >>> value = np.array([0]).astype("float32")
                >>> buffer = paddle.to_tensor(value)
                >>> linear.register_buffer("buf_name", buffer, persistable=True)
1209

1210 1211 1212
                >>> print(linear.buffers())
                [Tensor(shape=[1], dtype=float32, place=Place(cpu), stop_gradient=True,
                [0.])]
1213

1214 1215
        """
        ret = [
1216 1217 1218 1219
            buffer
            for _, buffer in self.named_buffers(
                include_sublayers=include_sublayers
            )
1220 1221 1222 1223 1224
        ]
        return ret

    def named_buffers(self, prefix='', include_sublayers=True):
        """
1225
        Returns an iterator over all buffers in the Layer, yielding tuple of name and Tensor.
1226 1227 1228 1229 1230 1231 1232

        Parameters:
            prefix(str, optional): Prefix to prepend to all buffer names. Default: ''.
            include_sublayers(bool, optional): Whether include the buffers of sublayers.
                If True, also include the named buffers from sublayers. Default: True.

        Yields:
1233
            (string, Tensor): Tuple of name and tensor
1234 1235 1236 1237

        Examples:
            .. code-block:: python

1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260
                >>> import numpy as np
                >>> import paddle

                >>> fc1 = paddle.nn.Linear(10, 3)
                >>> buffer1 = paddle.to_tensor(np.array([0]).astype("float32"))
                >>> # register a tensor as buffer by specific `persistable`
                >>> fc1.register_buffer("buf_name_1", buffer1, persistable=True)

                >>> fc2 = paddle.nn.Linear(3, 10)
                >>> buffer2 = paddle.to_tensor(np.array([1]).astype("float32"))
                >>> # register a buffer by assigning an attribute with Tensor.
                >>> # The `persistable` can only be False by this way.
                >>> fc2.buf_name_2 = buffer2

                >>> model = paddle.nn.Sequential(fc1, fc2)

                >>> # get all named buffers
                >>> for name, buffer in model.named_buffers():
                ...     print(name, buffer)
                0.buf_name_1 Tensor(shape=[1], dtype=float32, place=Place(cpu), stop_gradient=True,
                [0.])
                1.buf_name_2 Tensor(shape=[1], dtype=float32, place=Place(cpu), stop_gradient=True,
                [1.])
1261 1262
        """
        buffers_set = set()
1263 1264 1265 1266 1267
        named_sublayers = (
            self.named_sublayers(prefix=prefix, include_self=True)
            if include_sublayers
            else zip([prefix], [self])
        )
1268 1269 1270 1271 1272 1273 1274 1275 1276
        for layer_prefix, sublayer in named_sublayers:
            buffers = sublayer._buffers.items()
            for key, buffer in buffers:
                if buffer is None or buffer in buffers_set:
                    continue
                buffers_set.add(buffer)
                name = layer_prefix + ('.' if layer_prefix else '') + key
                yield name, buffer

X
Xin Pan 已提交
1277
    def clear_gradients(self):
1278 1279
        """
        Clear the gradients of all parameters for this layer.
1280

1281 1282
        Returns:
            None
1283

1284 1285 1286
        Examples:
            .. code-block:: python

1287 1288
                >>> import paddle
                >>> import numpy as np
1289

1290 1291 1292 1293 1294 1295 1296 1297 1298
                >>> value = np.arange(26).reshape(2, 13).astype("float32")
                >>> a = paddle.to_tensor(value)
                >>> linear = paddle.nn.Linear(13, 5)
                >>> adam = paddle.optimizer.Adam(learning_rate=0.01,
                ...                              parameters=linear.parameters())
                >>> out = linear(a)
                >>> out.backward()
                >>> adam.step()
                >>> linear.clear_gradients()
1299 1300

        """
X
Xin Pan 已提交
1301
        for p in self.parameters():
1302 1303
            if p.trainable:
                p.clear_gradient()
X
Xin Pan 已提交
1304

1305
    def _build_once(self, *args, **kwargs):
1306 1307
        pass

1308 1309 1310 1311 1312
    def _dygraph_call_func(self, *inputs, **kwargs):
        for forward_pre_hook in self._forward_pre_hooks.values():
            hook_result = forward_pre_hook(self, inputs)
            if hook_result is not None:
                if not isinstance(hook_result, tuple):
1313
                    hook_result = (hook_result,)
1314 1315 1316 1317 1318 1319 1320 1321
                inputs = hook_result

        if not self._built:
            with program_desc_tracing_guard(False):
                self._build_once(*inputs, **kwargs)

            self._built = True

1322
        if in_profiler_mode():
1323 1324 1325
            with profiler.RecordEvent(
                self.__class__.__name__, profiler.TracerEventType.Forward
            ):
1326 1327
                outputs = self.forward(*inputs, **kwargs)
        else:
C
chenjian 已提交
1328
            outputs = self.forward(*inputs, **kwargs)
1329 1330 1331 1332 1333 1334 1335 1336

        for forward_post_hook in self._forward_post_hooks.values():
            hook_result = forward_post_hook(self, inputs, outputs)
            if hook_result is not None:
                outputs = hook_result

        return outputs

1337
    def __call__(self, *inputs, **kwargs):
1338 1339 1340 1341 1342 1343 1344 1345
        if (
            (not in_declarative_mode())
            and (not self._forward_pre_hooks)
            and (not self._forward_post_hooks)
            and (not self._built)
            and in_dygraph_mode()
            and (not in_profiler_mode())
        ):
1346 1347 1348 1349
            self._build_once(*inputs, **kwargs)
            return self.forward(*inputs, **kwargs)
        else:
            return self._dygraph_call_func(*inputs, **kwargs)
M
minqiyang 已提交
1350

1351
    def forward(self, *inputs, **kwargs):
1352 1353 1354 1355 1356 1357 1358 1359
        """
        Defines the computation performed at every call.
        Should be overridden by all subclasses.

        Parameters:
            *inputs(tuple): unpacked tuple arguments
            **kwargs(dict): unpacked dict arguments
        """
1360
        raise NotImplementedError
X
Xin Pan 已提交
1361 1362 1363 1364

    def backward(self, *inputs):
        raise ValueError("Layer shouldn't implement backward")

X
Xin Pan 已提交
1365
    def add_sublayer(self, name, sublayer):
U
ustiniankw 已提交
1366 1367 1368
        """

        Adds a sub Layer instance.
X
Xin Pan 已提交
1369

1370
        Added sublayer can be accessed by self.name
X
Xin Pan 已提交
1371

1372 1373 1374
        Parameters:
            name(str): name of this sublayer.
            sublayer(Layer): an instance of Layer.
X
Xin Pan 已提交
1375
        Returns:
U
ustiniankw 已提交
1376
            Layer, the sublayer passed in.
1377

1378 1379 1380
        Examples:
            .. code-block:: python

1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404
                >>> import paddle

                >>> class MySequential(paddle.nn.Layer):
                ...     def __init__(self, *layers):
                ...         super().__init__()
                ...         if len(layers) > 0 and isinstance(layers[0], tuple):
                ...             for name, layer in layers:
                ...                 self.add_sublayer(name, layer)
                ...         else:
                ...             for idx, layer in enumerate(layers):
                ...                 self.add_sublayer(str(idx), layer)
                ...
                ...     def forward(self, input):
                ...         for layer in self._sub_layers.values():
                ...             input = layer(input)
                ...         return input
                ...
                >>> fc1 = paddle.nn.Linear(10, 3)
                >>> fc2 = paddle.nn.Linear(3, 10, bias_attr=False)
                >>> model = MySequential(fc1, fc2)
                >>> for prefix, layer in model.named_sublayers():
                ...     print(prefix, layer)
                0 Linear(in_features=10, out_features=3, dtype=float32)
                1 Linear(in_features=3, out_features=10, dtype=float32)
X
Xin Pan 已提交
1405
        """
1406
        assert isinstance(sublayer, Layer) or sublayer is None
1407

X
Xin Pan 已提交
1408 1409 1410 1411 1412 1413
        self._sub_layers[name] = sublayer
        return sublayer

    def add_parameter(self, name, parameter):
        """Adds a Parameter instance.

1414
        Added parameter can be accessed by self.name
X
Xin Pan 已提交
1415

1416 1417 1418
        Parameters:
            name(str): name of this sublayer.
            parameter(Parameter): an instance of Parameter.
X
Xin Pan 已提交
1419
        Returns:
U
ustiniankw 已提交
1420
            Parameter, the parameter passed in.
1421 1422 1423
        Examples:
            .. code-block:: python

1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448
                >>> import paddle
                >>> paddle.seed(100)

                >>> class MyLayer(paddle.nn.Layer):
                ...     def __init__(self):
                ...         super().__init__()
                ...         self._linear = paddle.nn.Linear(1, 1)
                ...         w_tmp = self.create_parameter([1,1])
                ...         self.add_parameter("w_tmp", w_tmp)
                ...
                ...     def forward(self, input):
                ...         return self._linear(input)
                ...
                >>> mylayer = MyLayer()
                >>> for name, param in mylayer.named_parameters():
                ...     print(name, param)
                w_tmp Parameter containing:
                Tensor(shape=[1, 1], dtype=float32, place=Place(cpu), stop_gradient=False,
                [[-1.01448846]])
                _linear.weight Parameter containing:
                Tensor(shape=[1, 1], dtype=float32, place=Place(cpu), stop_gradient=False,
                [[0.18551230]])
                _linear.bias Parameter containing:
                Tensor(shape=[1], dtype=float32, place=Place(cpu), stop_gradient=False,
                [0.])
X
Xin Pan 已提交
1449
        """
1450
        if '_parameters' not in self.__dict__:
1451
            raise RuntimeError("super().__init__() should be called firstly.")
1452
        elif not isinstance(name, str):
1453
            raise TypeError(
1454 1455 1456 1457
                "The name of parameter should be a string, but received {}.".format(
                    type(name).__name__
                )
            )
1458 1459 1460 1461
        elif '.' in name:
            raise KeyError(
                "The name of parameter can not contain `.`, "
                "because when you access the newly added parameter in the "
1462 1463
                "form of `self.**.**`, it will cause AttributeError."
            )
1464 1465 1466
        elif name == '':
            raise KeyError("The name of parameter can not be empty.")
        elif hasattr(self, name) and name not in self._parameters:
1467
            raise KeyError(f"The parameter '{name}' already exists.")
1468 1469 1470
        elif parameter is not None and not isinstance(
            parameter, framework.Parameter
        ):
1471
            raise TypeError(
1472 1473 1474 1475
                "The parameter to be added should be a Parameter, but received {}.".format(
                    type(parameter).__name__
                )
            )
1476 1477 1478
        else:
            if parameter is None:
                self._parameters[name] = None
1479

1480
            if len(self._loaddict_holder) > 0:
1481 1482 1483 1484 1485
                assert (
                    parameter.name in self._loaddict_holder
                ), "Parameter not found, Can't not find [ {} ] in state_dict".format(
                    parameter.name
                )
H
hong 已提交
1486

1487
                parameter.set_value(self._loaddict_holder[parameter.name])
1488

1489
            self._parameters[name] = parameter
X
Xin Pan 已提交
1490 1491
        return parameter

1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503
    def _set_op_attrs(self, attrs):
        """
        Add customized attribute while append_op. In case of quantization, we want to save
        some attributes into op_desc while exporting inference model by @to_static.

        Arguments:
            attrs(dict): customized attributes that will be added into op_descs.

        NOTE: The interface is only exposed to developers.
        """

        def is_already_registered(is_pre_hook):
1504 1505 1506 1507 1508 1509 1510 1511 1512 1513
            layers_hooks = (
                self._forward_pre_hooks
                if is_pre_hook
                else self._forward_post_hooks
            )
            candidate_hook = (
                record_program_ops_pre_hook
                if is_pre_hook
                else set_op_customized_attrs_post_hook
            )
1514 1515 1516 1517

            already_registed = False
            if layers_hooks:
                last_key = next(reversed(layers_hooks))
1518
                already_registed = layers_hooks[last_key] == candidate_hook
1519 1520 1521 1522

            return already_registed

        if not isinstance(attrs, dict):
1523 1524
            raise TypeError(
                "attrs should be type(dict), but received {}".format(
1525 1526 1527
                    type(attrs).__name__
                )
            )
1528 1529 1530 1531 1532 1533

        # NOTE: Overwrite behavior for same key.
        self._customized_attrs.update(attrs)

        if not is_already_registered(is_pre_hook=True):
            pre_hook_helper = self.register_forward_pre_hook(
1534 1535
                record_program_ops_pre_hook
            )
1536 1537 1538 1539 1540 1541
            assert len(self._op_recorder.hooks) == 0
            self._op_recorder.hooks = [pre_hook_helper]

        # manually register post_hook to ensure it is inserted into the head.
        if not is_already_registered(is_pre_hook=False):
            post_hook_helper = self.register_forward_post_hook(
1542 1543
                set_op_customized_attrs_post_hook
            )
1544
            if len(self._forward_post_hooks) > 1:
1545 1546 1547
                self._forward_post_hooks.move_to_end(
                    post_hook_helper._hook_id, last=False
                )
1548 1549 1550 1551 1552 1553

            assert len(self._op_recorder.hooks) == 1

            # hooks that need to be removed once we finish executing them.
            self._op_recorder.hooks.append(post_hook_helper)

1554 1555 1556 1557 1558 1559
    def __getstate__(self):
        return self.__dict__

    def __setstate__(self, state):
        self.__dict__.update(state)

X
Xin Pan 已提交
1560
    def __getattr__(self, name):
1561 1562 1563
        if '_parameters' in self.__dict__:
            _parameters = self.__dict__['_parameters']
            if name in self._parameters:
1564
                if in_declarative_mode():
1565
                    return _convert_into_variable(self._parameters[name])
1566 1567 1568 1569 1570 1571 1572 1573
                return self._parameters[name]
        if '_sub_layers' in self.__dict__:
            _sub_layers = self.__dict__['_sub_layers']
            if name in self._sub_layers:
                return self._sub_layers[name]
        if '_buffers' in self.__dict__:
            _buffers = self.__dict__['_buffers']
            if name in _buffers:
1574
                if in_declarative_mode():
1575
                    return _convert_into_variable(_buffers[name])
1576 1577
                return _buffers[name]
        return object.__getattribute__(self, name)
X
Xin Pan 已提交
1578 1579

    def __setattr__(self, name, value):
S
songyouwei 已提交
1580 1581 1582 1583 1584
        def _remove_if_exist(*dicts):
            for d in dicts:
                if name in d:
                    del d[name]

1585 1586
        if isinstance(getattr(type(self), name, None), property):
            object.__setattr__(self, name, value)
1587
        params = self.__dict__.get('_parameters', None)
X
Xin Pan 已提交
1588 1589
        if isinstance(value, framework.Parameter):
            if params is None:
1590
                raise ValueError("super().__init__() should be called first")
H
hong 已提交
1591
            if len(self._loaddict_holder) > 0:
1592 1593 1594 1595 1596
                assert (
                    value.name in self._loaddict_holder
                ), "Parameter not found, Can't not find [ {} ] in state_dict".format(
                    value.name
                )
H
hong 已提交
1597 1598 1599

                value.set_value(self._loaddict_holder[value.name])

1600
            _remove_if_exist(self.__dict__, self._buffers, self._sub_layers)
1601
            params[name] = value
1602 1603 1604
        elif params is not None and name in params:
            if value is not None:
                raise TypeError(
1605 1606 1607 1608
                    "assignment to parameter '{}' should be of type Parameter or None, but got '{}'".format(
                        name, type(value).__name__
                    )
                )
1609
            params[name] = None
X
Xin Pan 已提交
1610
        else:
1611
            layers = self.__dict__.get('_sub_layers', None)
J
Jiabin Yang 已提交
1612
            if isinstance(value, Layer):
1613 1614
                if layers is None:
                    raise ValueError(
1615
                        "super().__init__() should be called first"
1616 1617
                    )

1618
                _remove_if_exist(self.__dict__, self._parameters, self._buffers)
1619 1620 1621 1622
                layers[name] = value
            elif layers is not None and name in layers:
                if value is not None:
                    raise TypeError(
1623 1624 1625 1626
                        "assignment to sublayer '{}' should be of type Layer or None, but got '{}'".format(
                            name, type(value).__name__
                        )
                    )
1627 1628
                layers[name] = None
            else:
1629
                _buffers = self.__dict__.get('_buffers', None)
W
wanghuancoder 已提交
1630
                if isinstance(value, core.eager.Tensor):
1631 1632
                    if _buffers is None:
                        raise ValueError(
1633
                            "super().__init__() should be called first"
1634
                        )
1635 1636 1637
                    _remove_if_exist(
                        self.__dict__, self._parameters, self._sub_layers
                    )
1638 1639 1640 1641
                    # Set persistable=False by default. Only `register_buffer` can
                    # add a persistable buffer.
                    if name not in self._buffers:
                        self._non_persistable_buffer_names_set.add(name)
1642 1643
                    if not value.name:
                        value.name = unique_name.generate('_buffers_' + name)
1644 1645
                    _buffers[name] = value
                elif _buffers is not None and name in _buffers:
1646
                    # Note(Aurelius84): In Dy2stat, the value of the Buffer may be modified in
1647 1648 1649 1650
                    # decorated function, such as `self.buffer = new_tensor`. So we update its
                    # value via `assign`.
                    if type(value) == framework.Variable:
                        from paddle import assign
1651

1652 1653 1654 1655
                        # Note(zhhsplendid): the condition below happens in PaddleGan model,
                        # but should all non-Variable _buffers[name] be re-assign? We
                        # should consider it in the future. I current wrote this as
                        # conservative code.
1656 1657 1658
                        if in_declarative_mode() and _buffers[name] is None:
                            raise RuntimeError(
                                'In Dy2stat, self.{0} is a buffer and self.{0} is '
1659 1660 1661 1662 1663 1664
                                'not allowed to be set to Variable when self.{0} is None.'.format(
                                    name
                                )
                            )
                        elif (
                            _buffers[name] is None
W
wanghuancoder 已提交
1665
                            or type(getattr(self, name)) == core.eager.Tensor
1666
                        ):
1667 1668
                            _buffers[name] = assign(value)
                        else:
1669
                            assign(value, getattr(self, name))
1670
                    elif value is not None:
1671
                        raise TypeError(
W
wanghuancoder 已提交
1672
                            "assignment to buffers '{}' should be of type core.Tensor or None, but got '{}'".format(
1673 1674 1675
                                name, type(value).__name__
                            )
                        )
1676 1677 1678 1679
                    else:
                        # Assigning None will remove the buffer, but if re-assign a new varBase to it,
                        # it will be remarked as a buffer with same `persistable` attribute.
                        _buffers[name] = None
1680 1681
                else:
                    object.__setattr__(self, name, value)
X
Xin Pan 已提交
1682 1683 1684 1685 1686 1687

    def __delattr__(self, name):
        if name in self._parameters:
            del self._parameters[name]
        elif name in self._sub_layers:
            del self._sub_layers[name]
1688 1689 1690
        elif name in self._buffers:
            del self._buffers[name]
            self._non_persistable_buffer_names_set.discard(name)
X
Xin Pan 已提交
1691 1692 1693
        else:
            object.__delattr__(self, name)

1694 1695
    def __dir__(self):
        """
W
wanghuancoder 已提交
1696
        Return a list. Get all parameters, buffers(non-parameter tensors), sublayers, method and attr of Layer.
1697 1698

        Examples:
1699
            .. code-block:: python
1700 1701 1702 1703 1704 1705 1706 1707 1708 1709 1710 1711 1712 1713 1714
                >>> import paddle
                >>> import numpy as np

                >>> class Mylayer(paddle.nn.Layer):
                ...     def __init__(self):
                ...         super().__init__()
                ...         self.linear1 = paddle.nn.Linear(10, 10)
                ...         self.linear2 = paddle.nn.Linear(5, 5)
                ...         self.conv2d = paddle.nn.Conv2D(3, 2, 3)
                ...         self.embedding = paddle.nn.Embedding(128, 16)
                ...         self.h_0 = paddle.to_tensor(np.zeros([10, 10]).astype('float32'))
                ...
                >>> mylayer = Mylayer()
                >>> print(dir(mylayer))
                ['__call__', '__class__', '__delattr__', '__dict__', ..., 'training']
1715 1716 1717 1718 1719 1720 1721 1722 1723 1724 1725
        """
        method = dir(self.__class__)
        attrs = list(self.__dict__.keys())
        parameters = list(self._parameters.keys())
        sublayers = list(self._sub_layers.keys())
        buffers = list(self._buffers.keys())

        keys = method + attrs + parameters + sublayers + buffers

        return keys

1726 1727 1728 1729 1730 1731 1732 1733 1734 1735 1736 1737 1738 1739 1740 1741 1742 1743 1744 1745 1746 1747 1748 1749 1750 1751 1752 1753 1754
    def extra_repr(self):
        """
        Extra representation of this layer, you can have custom implementation
        of your own layer.
        """
        return ''

    def __repr__(self):
        extra_lines = []
        extra_repr = self.extra_repr()
        extra_lines = extra_repr.split('\n')
        sublayer_lines = []
        for name, layer in self._sub_layers.items():
            sublayer_str = repr(layer)
            sublayer_str = _addindent(sublayer_str, 2)
            sublayer_lines.append('(' + name + '): ' + sublayer_str)

        final_str = self.__class__.__name__ + '('
        if extra_lines:
            if len(extra_lines) > 1:
                final_str += '\n  ' + '\n  '.join(extra_lines) + '\n'
            elif len(extra_lines) == 1:
                final_str += extra_lines[0]
        if sublayer_lines:
            final_str += '\n  ' + '\n  '.join(sublayer_lines) + '\n'

        final_str += ')'
        return final_str

1755 1756 1757 1758 1759
    def register_state_dict_hook(self, hook):
        hook_remove_helper = HookRemoveHelper(self._state_dict_hooks)
        self._state_dict_hooks[hook_remove_helper._hook_id] = hook
        return hook_remove_helper

1760 1761 1762 1763 1764 1765
    def _obtain_parameters_buffers(
        self,
        destination=None,
        include_sublayers=True,
        structured_name_prefix="",
    ):
S
ShenLiang 已提交
1766
        """
1767
        The difference from state_dict() is that state_dict_hook will not be called,
S
ShenLiang 已提交
1768 1769 1770 1771 1772 1773 1774 1775
        but the original types of parameters and buffers will be maintained.
        """
        if destination is None:
            destination = collections.OrderedDict()
        for name, data in self._parameters.items():
            if data is not None:
                destination[structured_name_prefix + name] = data
        for name, buffer in self._buffers.items():
1776 1777 1778 1779
            if (
                buffer is not None
                and name not in self._non_persistable_buffer_names_set
            ):
S
ShenLiang 已提交
1780 1781 1782 1783 1784 1785 1786 1787
                destination[structured_name_prefix + name] = buffer

        if include_sublayers:
            for layer_name, layer_item in self._sub_layers.items():
                if layer_item is not None:
                    destination_temp = destination.copy()
                    destination_temp.update(
                        layer_item._obtain_parameters_buffers(
1788 1789 1790 1791 1792
                            destination_temp,
                            include_sublayers,
                            structured_name_prefix + layer_name + ".",
                        )
                    )
S
ShenLiang 已提交
1793 1794 1795
                    destination = destination_temp
        return destination

1796 1797 1798 1799 1800 1801 1802 1803
    def _state_dict_impl(
        self,
        destination=None,
        include_sublayers=True,
        structured_name_prefix="",
        include_non_persistable_buffer=False,
        use_hook=True,
    ):
1804 1805 1806 1807 1808 1809 1810
        """
        Get all parameters and persistable buffers of current layer and its sub-layers. And set them into a dict

        Parameters:
            destination(dict, optional) : If provide, all the parameters and persistable buffers will be set to this dict . Default: None
            include_sublayers(bool, optional) : If true, also include the parameters and persistable buffers from sublayers. Default: True
            include_non_persistable_buffer(bool, optional): If true, include non persistable buffers of current layer and its sub-layers, it is used in pure fp16 and jit.save. Default: False
1811
            use_hook(bool, optional) : If true, the operations contained in _state_dict_hooks will be appended to the destination. Default: True
1812 1813 1814 1815 1816 1817 1818 1819 1820
        """

        if destination is None:
            destination = collections.OrderedDict()
        for name, data in self._parameters.items():
            if data is not None:
                destination[structured_name_prefix + name] = data
        for name, buffer in self._buffers.items():
            if not include_non_persistable_buffer:
1821 1822 1823 1824
                if (
                    buffer is not None
                    and name not in self._non_persistable_buffer_names_set
                ):
1825 1826 1827 1828 1829 1830 1831 1832 1833 1834 1835
                    destination[structured_name_prefix + name] = buffer
            else:
                if buffer is not None:
                    destination[structured_name_prefix + name] = buffer

        if include_sublayers:
            for layer_name, layer_item in self._sub_layers.items():
                if layer_item is not None:
                    destination_temp = destination.copy()
                    destination_temp.update(
                        layer_item._state_dict_impl(
1836 1837
                            destination_temp,
                            include_sublayers,
1838
                            structured_name_prefix + layer_name + ".",
1839 1840 1841 1842
                            include_non_persistable_buffer,
                            use_hook,
                        )
                    )
1843
                    destination = destination_temp
1844 1845 1846 1847 1848
        if use_hook:
            for state_dict_hook in self._state_dict_hooks.values():
                hook_result = state_dict_hook(destination)
                if hook_result is not None:
                    destination = hook_result
1849 1850 1851

        return destination

1852 1853 1854 1855 1856 1857 1858
    def to_static_state_dict(
        self,
        destination=None,
        include_sublayers=True,
        structured_name_prefix="",
        use_hook=True,
    ):
1859
        '''
U
ustiniankw 已提交
1860

1861 1862 1863 1864 1865
        Get all parameters and buffers of current layer and its sub-layers. And set them into a dict

        Parameters:
            destination(dict, optional) : If provide, all the parameters and persistable buffers will be set to this dict . Default: None
            include_sublayers(bool, optional) : If true, also include the parameters and persistable buffers from sublayers. Default: True
1866
            use_hook(bool, optional) : If true, the operations contained in _state_dict_hooks will be appended to the destination. Default: True
1867

1868
        Retruns:
U
ustiniankw 已提交
1869
            dict, a dict contains all the parameters and persistable buffers.
1870 1871 1872 1873

        Examples:
            .. code-block:: python

1874
                >>> import paddle
1875

1876
                >>> emb = paddle.nn.Embedding(10, 10)
1877

1878 1879
                >>> state_dict = emb.to_static_state_dict()
                >>> paddle.save( state_dict, "paddle_dy.pdparams")
1880 1881 1882 1883 1884 1885

        '''
        return self._state_dict_impl(
            destination=destination,
            include_sublayers=include_sublayers,
            structured_name_prefix=structured_name_prefix,
1886
            include_non_persistable_buffer=True,
1887 1888 1889 1890 1891 1892 1893 1894 1895 1896
            use_hook=use_hook,
        )

    def state_dict(
        self,
        destination=None,
        include_sublayers=True,
        structured_name_prefix="",
        use_hook=True,
    ):
H
hong 已提交
1897
        '''
1898
        Get all parameters and persistable buffers of current layer and its sub-layers. And set them into a dict
H
hong 已提交
1899

1900
        Parameters:
1901 1902
            destination(dict, optional) : If provide, all the parameters and persistable buffers will be set to this dict . Default: None
            include_sublayers(bool, optional) : If true, also include the parameters and persistable buffers from sublayers. Default: True
1903
            use_hook(bool, optional) : If true, the operations contained in _state_dict_hooks will be appended to the destination. Default: True
1904

H
hong 已提交
1905
        Retruns:
1906
            dict: a dict contains all the parameters and persistable buffers.
H
hong 已提交
1907 1908

        Examples:
1909 1910
            .. code-block:: python

1911
                >>> import paddle
H
hong 已提交
1912

1913
                >>> emb = paddle.nn.Embedding(10, 10)
1914

1915 1916
                >>> state_dict = emb.state_dict()
                >>> paddle.save( state_dict, "paddle_dy.pdparams")
H
hong 已提交
1917 1918

        '''
1919 1920 1921 1922
        return self._state_dict_impl(
            destination=destination,
            include_sublayers=include_sublayers,
            structured_name_prefix=structured_name_prefix,
1923
            include_non_persistable_buffer=False,
1924 1925
            use_hook=use_hook,
        )
1926

1927
    @framework.deprecate_stat_dict
J
Jiabin Yang 已提交
1928
    def set_state_dict(self, state_dict, use_structured_name=True):
H
hong 已提交
1929
        '''
1930
        Set parameters and persistable buffers from state_dict. All the parameters and buffers will be reset by the tensor in the state_dict
H
hong 已提交
1931

1932
        Parameters:
1933
            state_dict(dict) : Dict contains all the parameters and persistable buffers.
1934
            use_structured_name(bool, optional) : If true, use structured name as key, otherwise, use parameter or buffer name as key.
H
hong 已提交
1935
                                                  Default: True
H
hong 已提交
1936
        Returns:
1937 1938
            missing_keys(list):A list of str containing the missing keys
            unexpected_keys(list):A list of str containing the unexpected keys
H
hong 已提交
1939 1940

        Examples:
1941 1942
            .. code-block:: python

1943
                >>> import paddle
1944

1945
                >>> emb = paddle.nn.Embedding(10, 10)
H
hong 已提交
1946

1947 1948 1949 1950
                >>> state_dict = emb.state_dict()
                >>> paddle.save(state_dict, "paddle_dy.pdparams")
                >>> para_state_dict = paddle.load("paddle_dy.pdparams")
                >>> emb.set_state_dict(para_state_dict)
H
hong 已提交
1951

H
hong 已提交
1952
        '''
1953 1954 1955
        missing_keys = []
        match_keys = set()
        unexpected_keys = []
H
hong 已提交
1956

1957 1958 1959
        def _check_match(key, param):
            state = state_dict.get(key, None)
            if state is None:
1960
                missing_keys.append(key)
1961
                raise ValueError(f"{key} is not found in the provided dict.")
1962
            if isinstance(state, (dict, list)):
1963
                if len(state) != len(param):
1964
                    missing_keys.append(key)
1965 1966 1967 1968 1969 1970
                    raise ValueError(
                        "{} receieves the length of {}, "
                        "but the expected shape is {}".format(
                            key, len(state), len(param)
                        )
                    )
S
Steffy-zxf 已提交
1971
                else:
1972
                    match_keys.add(key)
S
Steffy-zxf 已提交
1973 1974
                    return param, state
            else:
1975 1976 1977 1978 1979
                state_shape = (
                    state.shape()
                    if inspect.ismethod(state.shape)
                    else state.shape
                )
S
Steffy-zxf 已提交
1980 1981

                if list(state_shape) != list(param.shape):
1982
                    missing_keys.append(key)
S
Steffy-zxf 已提交
1983
                    raise ValueError(
1984 1985 1986 1987
                        "{} receives a shape {}, but the expected shape is {}.".format(
                            key, list(state_shape), list(param.shape)
                        )
                    )
1988
                match_keys.add(key)
S
Steffy-zxf 已提交
1989
                return param, state
1990 1991

        matched_param_state = []
S
sneaxiy 已提交
1992
        for key, param in self._state_dict_impl(use_hook=False).items():
1993 1994 1995 1996 1997
            key_name = key if use_structured_name else param.name
            try:
                match_res = _check_match(key_name, param)
                matched_param_state.append(match_res)
            except ValueError as err:
1998
                warnings.warn(f"Skip loading for {key}. " + str(err))
1999 2000 2001
        for key in state_dict.keys():
            if key not in match_keys:
                unexpected_keys.append(key)
姜永久 已提交
2002
        if in_dygraph_mode():
2003 2004 2005
            for param, state in matched_param_state:
                param.set_value(state)
        else:
H
hong 已提交
2006

2007 2008 2009 2010 2011 2012 2013
            def _set_var(var, ndarray):
                t = global_scope().find_var(var.name).get_tensor()
                p = t._place()
                if p.is_cpu_place():
                    place = core.CPUPlace()
                elif p.is_cuda_pinned_place():
                    place = core.CUDAPinnedPlace()
2014 2015 2016 2017
                elif p.is_xpu_place():
                    p = core.Place()
                    p.set_place(t._place())
                    place = core.XPUPlace(p.xpu_device_id())
2018 2019 2020 2021 2022 2023 2024
                elif p.is_custom_place():
                    p = core.Place()
                    p.set_place(t._place())
                    place = core.CustomPlace(
                        paddle.device.get_device().split(':')[0],
                        p.custom_device_id(),
                    )
2025 2026 2027 2028 2029 2030
                else:
                    p = core.Place()
                    p.set_place(t._place())
                    place = core.CUDAPlace(p.gpu_device_id())
                t.set(ndarray, place)

2031 2032 2033 2034 2035
            try:
                executor = Executor(_get_device())._default_executor
                # restore parameter states
                core._create_loaded_parameter(
                    [param for param, state in matched_param_state],
2036 2037 2038
                    global_scope(),
                    executor,
                )
2039 2040 2041 2042 2043 2044
                for param, state in matched_param_state:
                    _set_var(param, state)
            except ValueError as e:
                raise ValueError(
                    "This error might happens in dy2static, while calling 'set_state_dict' dynamicly in 'forward', which is not supported. If you only need call 'set_state_dict' once, move it to '__init__'."
                )
2045

2046 2047
        return missing_keys, unexpected_keys

C
chentianyu03 已提交
2048 2049 2050 2051 2052
    def to(self, device=None, dtype=None, blocking=None):
        '''
        Cast the parameters and buffers of Layer by the give device, dtype and blocking.

        Parameters:
2053 2054 2055 2056
            device(str|paddle.CPUPlace()|paddle.CUDAPlace()|paddle.CUDAPinnedPlace()|paddle.XPUPlace()|None, optional): The device of the Layer which want to be stored.
            If None, the device is the same with the original Tensor. If device is string, it can be ``cpu``, ``gpu:x`` and ``xpu:x``, where ``x`` is the
            index of the GPUs or XPUs. Default: None.

2057
            dtype(str|numpy.dtype|paddle.dtype|None, optional): The type of the data. If None, the dtype is the same with the original Tensor. Default: None.
C
chentianyu03 已提交
2058

2059
            blocking(bool|None, optional): If False and the source is in pinned memory, the copy will be
C
chentianyu03 已提交
2060
              asynchronous with respect to the host. Otherwise, the argument has no effect. If None, the blocking is set True. Default: None.
2061

C
chentianyu03 已提交
2062
        Returns:
2063
            self
C
chentianyu03 已提交
2064 2065 2066 2067

        Examples:
            .. code-block:: python

2068 2069 2070 2071 2072 2073 2074 2075 2076 2077 2078 2079 2080 2081 2082 2083 2084 2085 2086 2087 2088 2089 2090 2091 2092 2093 2094 2095 2096 2097 2098 2099 2100 2101
                >>> import paddle
                >>> paddle.seed(2023)

                >>> linear=paddle.nn.Linear(2, 2)
                >>> linear.weight
                >>> print(linear.weight)
                Parameter containing:
                Tensor(shape=[2, 2], dtype=float32, place=Place(gpu:0), stop_gradient=False,
                [[ 0.89611185,  0.04935038],
                 [-0.58883440,  0.99266374]])

                >>> linear.to(dtype='float64')
                >>> linear.weight
                >>> print(linear.weight)
                Parameter containing:
                Tensor(shape=[2, 2], dtype=float64, place=Place(gpu:0), stop_gradient=False,
                [[ 0.89611185,  0.04935038],
                 [-0.58883440,  0.99266374]])

                >>> linear.to(device='cpu')
                >>> linear.weight
                >>> print(linear.weight)
                Parameter containing:
                Tensor(shape=[2, 2], dtype=float64, place=Place(cpu), stop_gradient=False,
                [[ 0.89611185,  0.04935038],
                 [-0.58883440,  0.99266374]])

                >>> # doctest: +REQUIRES(env:GPU)
                >>> linear.to(device=paddle.CUDAPinnedPlace(), blocking=False)
                >>> linear.weight
                >>> print(linear.weight)
                Tensor(shape=[2, 2], dtype=float64, place=Place(gpu_pinned), stop_gradient=False,
                [[ 0.89611185,  0.04935038],
                 [-0.58883440,  0.99266374]])
2102

2103
        '''
2104 2105 2106 2107 2108 2109 2110
        return self._to_impl(
            device=device,
            dtype=dtype,
            blocking=blocking,
            include_sublayers=True,
            floating_only=False,
        )
2111 2112 2113 2114 2115 2116 2117 2118 2119 2120 2121 2122 2123

    def _apply(self, func, device, dtype, blocking, include_sublayers=True):
        if include_sublayers:
            for layer in self.children():
                layer._apply(func, device, dtype, blocking, include_sublayers)

        for key, param in self._parameters.items():
            if param is not None:
                with no_grad():
                    param_applied = func(param, device, dtype, blocking)

                if param.grad is not None:
                    with no_grad():
2124 2125 2126
                        grad_applied = func(
                            param._grad_ivar(), device, dtype, blocking
                        )
2127 2128

        for key, buf in self._buffers.items():
2129 2130
            if buf is not None:
                self._buffers[key] = func(buf, device, dtype, blocking)
2131

2132 2133
        self._dtype = dtype

2134 2135 2136 2137 2138 2139 2140 2141 2142 2143 2144 2145 2146 2147 2148 2149
    def _transform(self, t, device, dtype, blocking):
        if device is None:
            device = t.place
        if dtype is None:
            dtype = t.dtype

        if type(dtype) is not VarDesc.VarType:
            dtype = convert_np_dtype_to_dtype_(dtype)

        # 1. gpu place need to determine whether the memory is sufficient for allocation:
        if t.place.is_gpu_place():
            # for gpu, minimum memory allocation unit is 256 bytes.
            size_dtype = core.size_of_dtype(dtype)
            # Note(zhangbo): Paddle GPU minimum memory allocation unit is 256 bytes, waiting_alloc_memory will comput ‘t’ occupied memory space.
            # Coefficient 1.2 is used to avoid OOM that may occur in this critical state when the memory is just enough.
            waiting_alloc_memory = (
2150 2151
                ((np.prod(t.shape) * size_dtype) / 256 + 1) * 256 * 1.2
            )
2152 2153 2154
            gpu_memory_available = core.gpu_memory_available()
            if gpu_memory_available < waiting_alloc_memory:
                # Copy param / Tensor to cpu
2155 2156 2157
                t_used = t._copy_to(
                    paddle.CPUPlace(), blocking
                )  # k-v type will error
2158 2159 2160 2161 2162 2163 2164 2165 2166
                # Release mem of t
                t.value().get_tensor()._clear()
            else:
                t_used = t
        else:
            t_used = t

        # 2. cast param / Tensor to dtype
        if dtype is not None and dtype != t_used.dtype:
2167
            with paddle.base.framework._dygraph_place_guard(place=t_used.place):
2168 2169 2170 2171 2172 2173 2174 2175 2176 2177 2178 2179 2180 2181 2182 2183 2184
                t_casted = t_used.cast(dtype=dtype)
        else:
            t_casted = t_used

        # 3. Copy casted cpu param / Tensor to device
        if device is not None and not t_casted.place._equals(device):
            new_t = t_casted._copy_to(device, blocking)
        else:
            new_t = t_casted

        # 4. share Tensor to origin param / Tensor
        dst_tensor = t.value().get_tensor()
        src_tensor = new_t.value().get_tensor()
        dst_tensor._share_data_with(src_tensor)

        return t

2185 2186 2187 2188 2189 2190 2191 2192
    def _to_impl(
        self,
        device=None,
        dtype=None,
        blocking=None,
        include_sublayers=True,
        floating_only=False,
    ):
2193 2194 2195 2196 2197 2198 2199 2200 2201 2202 2203 2204
        '''
        Cast the parameters and buffers of Layer by the give device, dtype and blocking.

        Parameters:
            device(str|paddle.CPUPlace()|paddle.CUDAPlace()|paddle.CUDAPinnedPlace()|paddle.XPUPlace()|None, optional): The device of the Layer which want to be stored.
            If None, the device is the same with the original Tensor. If device is string, it can be ``cpu``, ``gpu:x`` and ``xpu:x``, where ``x`` is the
            index of the GPUs or XPUs. Default: None.

            dtype(str|numpy.dtype|paddle.dtype|None, optional): The type of the data. If None, the dtype is the same with the original Tensor. Default: None.

            blocking(bool|None, optional): If False and the source is in pinned memory, the copy will be
              asynchronous with respect to the host. Otherwise, the argument has no effect. If None, the blocking is set True. Default: None.
2205

2206 2207
            include_sublayers(bool|True, optional): If True, deal with self and all sublayers parameters and buffers, if not only deal with self parameters and buffers. Default: True.

2208 2209
            floating_only(bool|False, optional): If True, only cast all floating point parameters and buffers of Layer by the give device, dtype and blocking.

2210 2211
        Returns:
            self
C
chentianyu03 已提交
2212 2213 2214 2215

        '''

        if device is None and dtype is None and blocking is None:
2216
            return self
C
chentianyu03 已提交
2217 2218 2219 2220

        if device is not None:
            if isinstance(device, str):
                device = paddle.device._convert_to_place(device)
2221 2222 2223 2224 2225 2226 2227 2228 2229
            elif isinstance(
                device,
                (
                    core.CPUPlace,
                    core.CUDAPlace,
                    core.CUDAPinnedPlace,
                    core.XPUPlace,
                ),
            ):
C
chentianyu03 已提交
2230 2231 2232 2233
                pass
            else:
                raise ValueError(
                    "device value error, must be str, paddle.CPUPlace(), paddle.CUDAPlace(), paddle.CUDAPinnedPlace() or paddle.XPUPlace(), but the type of device is "
2234 2235
                    + type(device).__name__
                )
C
chentianyu03 已提交
2236 2237 2238 2239 2240

        if blocking is None:
            blocking = True
        else:
            assert isinstance(
2241 2242
                blocking, bool
            ), "blocking value error, must be the True, False or None"
C
chentianyu03 已提交
2243 2244

        def transform(t, device, dtype, blocking):
2245 2246 2247
            if floating_only and (not paddle.is_floating_point(t)):
                return t
            return self._transform(t, device, dtype, blocking)
C
chentianyu03 已提交
2248

2249 2250
        with warnings.catch_warnings():
            warnings.filterwarnings("ignore", category=UserWarning)
2251
            self._apply(transform, device, dtype, blocking, include_sublayers)
2252

2253
        self._dtype = dtype
2254
        return self
C
chentianyu03 已提交
2255

2256 2257 2258 2259 2260 2261 2262 2263 2264 2265 2266 2267
    def _startup_program(self):
        """
        Return starup program containing initialization operations of all parameters.

        NOTE(dev): This is a very low level API and only for inner developer.
        """
        startup_program = Program()
        for param in self.parameters():
            param._create_init_op(startup_program.global_block())

        return startup_program

2268 2269 2270
    # [aliases] Compatible with old method names
    set_dict = set_state_dict
    load_dict = set_state_dict
2271 2272 2273 2274 2275 2276 2277 2278 2279 2280 2281 2282 2283 2284

    def float(self, excluded_layers=None):
        '''
        Casts all floating point parameters and buffers to ``float`` data type.

        Parameters:
            excluded_layers(nn.Layer|list|None, optional): Specify the layers that need to be kept original data type. if excluded_layers is None, casts all floating point parameters and buffers. Default: None.

        Returns:
            Layer: self

        Examples:
            .. code-block:: python

2285 2286 2287 2288 2289 2290 2291 2292 2293 2294 2295 2296 2297 2298 2299 2300 2301 2302 2303
                >>> import paddle

                >>> class Model(paddle.nn.Layer):
                ...     def __init__(self):
                ...         super().__init__()
                ...         self.linear = paddle.nn.Linear(1, 1)
                ...         self.dropout = paddle.nn.Dropout(p=0.5)
                ...
                ...     def forward(self, input):
                ...         out = self.linear(input)
                ...         out = self.dropout(out)
                ...         return out
                ...
                >>> model = Model()
                >>> model.float()
                Model(
                    (linear): Linear(in_features=1, out_features=1, dtype=paddle.float32)
                    (dropout): Dropout(p=0.5, axis=None, mode=upscale_in_train)
                )
2304 2305 2306 2307 2308 2309 2310 2311 2312 2313 2314 2315 2316 2317 2318 2319 2320 2321 2322 2323 2324 2325 2326 2327 2328 2329 2330 2331 2332 2333 2334 2335 2336 2337 2338 2339 2340
        '''

        excluded_layers = [] if excluded_layers is None else excluded_layers

        if isinstance(excluded_layers, type):
            excluded_layers = [excluded_layers]
        elif isinstance(excluded_layers, list):
            pass
        else:
            raise TypeError(
                "excluded_layers should be type nn.Layer or list, but got %s.",
                type(excluded_layers).__name__,
            )

        def layer_trans(layer):
            _layer_trans_dtype(layer, paddle.float32, excluded_layers)

        return self.apply(layer_trans)

    def float16(self, excluded_layers=None):
        '''
        Casts all floating point parameters and buffers to ``float16`` data type.


        .. note::
            ``nn.BatchNorm`` does not support ``bfloat16`` weights, so it would not be converted by default.


        Parameters:
           excluded_layers(nn.Layer|list|None, optional): Specify the layers that need to be kept original data type. if excluded_layers is None, casts all floating point parameters and buffers except ``nn.BatchNorm``. Default: None.

        Returns:
            Layer: self

        Examples:
            .. code-block:: python

2341 2342 2343 2344 2345 2346 2347 2348 2349 2350 2351 2352 2353 2354 2355 2356 2357 2358 2359 2360
                >>> # doctest: +SKIP('Paddle compiled by the user does not support float16, so keep original data type.')
                >>> import paddle

                >>> class Model(paddle.nn.Layer):
                ...     def __init__(self):
                ...         super().__init__()
                ...         self.linear = paddle.nn.Linear(1, 1)
                ...         self.dropout = paddle.nn.Dropout(p=0.5)
                ...
                ...     def forward(self, input):
                ...         out = self.linear(input)
                ...         out = self.dropout(out)
                ...         return out
                ...
                >>> model = Model()
                >>> model.float16()
                Model(
                    (linear): Linear(in_features=1, out_features=1, dtype=float32)
                    (dropout): Dropout(p=0.5, axis=None, mode=upscale_in_train)
                )
2361 2362 2363 2364 2365 2366 2367 2368 2369 2370 2371 2372 2373 2374 2375 2376 2377 2378 2379 2380 2381 2382 2383 2384 2385 2386 2387 2388 2389 2390 2391 2392 2393 2394 2395 2396 2397 2398 2399 2400 2401 2402 2403 2404 2405
        '''

        if paddle.amp.is_float16_supported() is False:
            warnings.warn(
                "Paddle compiled by the user does not support float16, so keep original data type."
            )
            return self

        excluded_layers = (
            [nn.BatchNorm] if excluded_layers is None else excluded_layers
        )

        if isinstance(excluded_layers, type):
            excluded_layers = [excluded_layers]
        elif isinstance(excluded_layers, list):
            pass
        else:
            raise TypeError(
                "excluded_layers should be type nn.Layer or list, but got %s.",
                type(excluded_layers).__name__,
            )

        def layer_trans(layer):
            _layer_trans_dtype(layer, paddle.float16, excluded_layers)

        return self.apply(layer_trans)

    def bfloat16(self, excluded_layers=None):
        '''
        Casts all floating point parameters and buffers to ``bfloat16`` data type.


        .. note::
            ``nn.BatchNorm`` does not support ``bfloat16`` weights, so it would not be converted by default.


        Parameters:
            excluded_layers(nn.Layer|list|None, optional): Specify the layers that need to be kept original data type. if excluded_layers is None, casts all floating point parameters and buffers except ``nn.BatchNorm``. Default: None.

        Returns:
            Layer: self

        Examples:
            .. code-block:: python

2406 2407 2408 2409 2410 2411 2412 2413 2414 2415 2416 2417 2418 2419 2420 2421 2422 2423 2424 2425 2426
                >>> # doctest: +SKIP('bfloat need V100 compile')
                >>> import paddle

                >>> class Model(paddle.nn.Layer):
                ...     def __init__(self):
                ...         super().__init__()
                ...         self.linear = paddle.nn.Linear(1, 1)
                ...         self.dropout = paddle.nn.Dropout(p=0.5)
                ...
                ...     def forward(self, input):
                ...         out = self.linear(input)
                ...         out = self.dropout(out)
                ...         return out
                ...
                >>> model = Model()
                >>> model.bfloat16()
                >>> #UserWarning: Paddle compiled by the user does not support bfloat16, so keep original data type.
                Model(
                    (linear): Linear(in_features=1, out_features=1, dtype=float32)
                    (dropout): Dropout(p=0.5, axis=None, mode=upscale_in_train)
                )
2427 2428 2429 2430 2431 2432 2433 2434 2435 2436 2437 2438 2439 2440 2441 2442 2443 2444 2445 2446 2447 2448 2449 2450 2451 2452
        '''

        if paddle.amp.is_bfloat16_supported() is False:
            warnings.warn(
                "Paddle compiled by the user does not support bfloat16, so keep original data type."
            )
            return self

        excluded_layers = (
            [nn.BatchNorm] if excluded_layers is None else excluded_layers
        )

        if isinstance(excluded_layers, type):
            excluded_layers = [excluded_layers]
        elif isinstance(excluded_layers, list):
            pass
        else:
            raise TypeError(
                "excluded_layers should be type nn.Layer or list, but got %s.",
                type(excluded_layers).__name__,
            )

        def layer_trans(layer):
            _layer_trans_dtype(layer, paddle.bfloat16, excluded_layers)

        return self.apply(layer_trans)