diff --git a/python/paddle/nn/functional/loss.py b/python/paddle/nn/functional/loss.py index 3605d62ceeba2d5f61e2200a045f7a94f49faa4a..450d250248433cff5ee0e1f8edf67033626903a7 100755 --- a/python/paddle/nn/functional/loss.py +++ b/python/paddle/nn/functional/loss.py @@ -513,7 +513,7 @@ def smooth_l1_loss(input, label, reduction='mean', delta=1.0, name=None): label_data = np.random.rand(3,3).astype("float32") input = paddle.to_tensor(input_data) label = paddle.to_tensor(label_data) - output = paddle.nn.functioanl.smooth_l1_loss(input, label) + output = paddle.nn.functional.smooth_l1_loss(input, label) print(output) """ fluid.data_feeder.check_variable_and_dtype( @@ -1187,12 +1187,16 @@ def cross_entropy(input, .. code-block:: python import paddle + import numpy as np + input_data = np.random.random([5, 100]).astype("float64") label_data = np.random.randint(0, 100, size=(5)).astype(np.int64) weight_data = np.random.random([100]).astype("float64") + input = paddle.to_tensor(input_data) label = paddle.to_tensor(label_data) weight = paddle.to_tensor(weight_data) + loss = paddle.nn.functional.cross_entropy(input=input, label=label, weight=weight) print(loss) # [4.28546723] diff --git a/python/paddle/nn/functional/norm.py b/python/paddle/nn/functional/norm.py index efde54182e5a0b2de0a13ccddbd33bff1b3f8f78..56b5068bfb4e11064fe43e3f73b675606a30c7c0 100644 --- a/python/paddle/nn/functional/norm.py +++ b/python/paddle/nn/functional/norm.py @@ -271,9 +271,7 @@ def layer_norm(x, np.random.seed(123) x_data = np.random.random(size=(2, 2, 2, 3)).astype('float32') x = paddle.to_tensor(x_data) - layer_norm = paddle.nn.functional.layer_norm(x, x.shape[1:]) - layer_norm_out = layer_norm(x) - + layer_norm_out = paddle.nn.functional.layer_norm(x, x.shape[1:]) print(layer_norm_out) """ input_shape = list(x.shape) @@ -363,7 +361,7 @@ def instance_norm(x, np.random.seed(123) x_data = np.random.random(size=(2, 2, 2, 3)).astype('float32') x = paddle.to_tensor(x_data) - instance_norm_out = paddle.nn.functional.instancenorm(x) + instance_norm_out = paddle.nn.functional.instance_norm(x) print(instance_norm_out) diff --git a/python/paddle/nn/functional/pooling.py b/python/paddle/nn/functional/pooling.py index 1c3a035bbccea8366fcfa766c69145e4ec0d73ca..9d89b4236113526c849f403e93a1837728f10909 100755 --- a/python/paddle/nn/functional/pooling.py +++ b/python/paddle/nn/functional/pooling.py @@ -198,11 +198,14 @@ def avg_pool1d(x, Examples: .. code-block:: python - import paddle - import paddle.nn.functional as F - data = paddle.to_tensor(np.random.uniform(-1, 1, [1, 3, 32]).astype(np.float32)) - out = F.avg_pool1d(data, kernel_size=2, stride=2, padding=0) - # out shape: [1, 3, 16] + + import paddle + import paddle.nn.functional as F + import numpy as np + + data = paddle.to_tensor(np.random.uniform(-1, 1, [1, 3, 32]).astype(np.float32)) + out = F.avg_pool1d(data, kernel_size=2, stride=2, padding=0) + # out shape: [1, 3, 16] """ """NCL to NCHW""" data_format = "NCHW" @@ -302,23 +305,28 @@ def avg_pool2d(x, name(str, optional): For detailed information, please refer to :ref:`api_guide_Name`. Usually name is no need to set and None by default. + Returns: Tensor: The output tensor of pooling result. The data type is same as input tensor. + Raises: ValueError: If `padding` is a string, but not "SAME" or "VALID". ValueError: If `padding` is "VALID", but `ceil_mode` is True. ShapeError: If the output's shape calculated is not greater than 0. + Examples: .. code-block:: python - import paddle - import paddle.nn.functional as F - import numpy as np - # avg pool2d - x = paddle.to_tensor(np.random.uniform(-1, 1, [1, 3, 32, 32]).astype(np.float32)) - out = F.avg_pool2d(x, - kernel_size=2, - stride=2, padding=0) - # out.shape [1, 3, 16, 16] + + import paddle + import paddle.nn.functional as F + import numpy as np + + # avg pool2d + x = paddle.to_tensor(np.random.uniform(-1, 1, [1, 3, 32, 32]).astype(np.float32)) + out = F.avg_pool2d(x, + kernel_size=2, + stride=2, padding=0) + # out.shape [1, 3, 16, 16] """ check_variable_and_dtype(x, 'x', ['float32', 'float64'], 'avg_pool2d') kernel_size = utils.convert_to_list(kernel_size, 2, 'pool_size') @@ -415,16 +423,21 @@ def avg_pool3d(x, name(str, optional): For detailed information, please refer to :ref:`api_guide_Name`. Usually name is no need to set and None by default. + Returns: Tensor: The output tensor of pooling result. The data type is same as input tensor. + Raises: ValueError: If `padding` is a string, but not "SAME" or "VALID". ValueError: If `padding` is "VALID", but `ceil_mode` is True. ShapeError: If the output's shape calculated is not greater than 0. + Examples: .. code-block:: python - import paddle.fluid as fluid + import paddle + import numpy as np + x = paddle.to_tensor(np.random.uniform(-1, 1, [1, 3, 32, 32, 32]).astype(np.float32)) # avg pool3d out = paddle.nn.functional.avg_pool3d( @@ -537,6 +550,8 @@ def max_pool1d(x, import paddle import paddle.nn.functional as F + import numpy as np + data = paddle.to_tensor(np.random.uniform(-1, 1, [1, 3, 32]).astype(np.float32)) pool_out = F.max_pool1d(data, kernel_size=2, stride=2, padding=0) # pool_out shape: [1, 3, 16] @@ -650,29 +665,32 @@ def max_pool2d(x, None by default. Returns: Tensor: The output tensor of pooling result. The data type is same as input tensor. - Raises: + + Raises: ValueError: If `padding` is a string, but not "SAME" or "VALID". ValueError: If `padding` is "VALID", but `ceil_mode` is True. ShapeError: If the output's shape calculated is not greater than 0. + Examples: .. code-block:: python - import paddle - import paddle.nn.functional as F - import numpy as np - # max pool2d - x = paddle.to_tensor(np.random.uniform(-1, 1, [1, 3, 32, 32]).astype(np.float32)) - out = F.max_pool2d(x, - kernel_size=2, - stride=2, padding=0) - # output.shape [1, 3, 16, 16] - # for return_mask=True - out, max_indices = F.max_pool2d(x, - kernel_size=2, - stride=2, - padding=0, - return_mask=True) - # out.shape [1, 3, 16, 16], max_indices.shape [1, 3, 16, 16], + import paddle + import paddle.nn.functional as F + import numpy as np + + # max pool2d + x = paddle.to_tensor(np.random.uniform(-1, 1, [1, 3, 32, 32]).astype(np.float32)) + out = F.max_pool2d(x, + kernel_size=2, + stride=2, padding=0) + # output.shape [1, 3, 16, 16] + # for return_mask=True + out, max_indices = F.max_pool2d(x, + kernel_size=2, + stride=2, + padding=0, + return_mask=True) + # out.shape [1, 3, 16, 16], max_indices.shape [1, 3, 16, 16], """ check_variable_and_dtype(x, 'x', ['float32', 'float64'], 'max_pool2d') kernel_size = utils.convert_to_list(kernel_size, 2, 'pool_size') @@ -778,33 +796,36 @@ def max_pool3d(x, name(str, optional): For detailed information, please refer to :ref:`api_guide_Name`. Usually name is no need to set and None by default. + Returns: Tensor: The output tensor of pooling result. The data type is same as input tensor. + Raises: ValueError: If `padding` is a string, but not "SAME" or "VALID". ValueError: If `padding` is "VALID", but `ceil_mode` is True. ShapeError: If the output's shape calculated is not greater than 0. + Examples: .. code-block:: python - import paddle - import paddle.nn.functional as F - import numpy as np + import paddle + import paddle.nn.functional as F + import numpy as np - # max pool3d - x = paddle.to_tensor(np.random.uniform(-1, 1, [1, 3, 32, 32, 32]).astype(np.float32)) - output = F.max_pool2d(x, - kernel_size=2, - stride=2, padding=0) - output.shape [1, 3, 16, 16, 16] - # for return_mask=True - x = paddle.to_tensor(np.random.uniform(-1, 1, [1, 3, 32, 32, 32]).astype(np.float32)) - output, max_indices = paddle.nn.functional.max_pool3d(x, - kernel_size = 2, - stride = 2, - padding=0, - return_mask=True) - # output.shape [None, 3, 16, 16, 16], max_indices.shape [None, 3, 16, 16, 16], + # max pool3d + x = paddle.to_tensor(np.random.uniform(-1, 1, [1, 3, 32, 32, 32]).astype(np.float32)) + output = F.max_pool2d(x, + kernel_size=2, + stride=2, padding=0) + output.shape [1, 3, 16, 16, 16] + # for return_mask=True + x = paddle.to_tensor(np.random.uniform(-1, 1, [1, 3, 32, 32, 32]).astype(np.float32)) + output, max_indices = paddle.nn.functional.max_pool3d(x, + kernel_size = 2, + stride = 2, + padding=0, + return_mask=True) + # output.shape [None, 3, 16, 16, 16], max_indices.shape [None, 3, 16, 16, 16], """ check_variable_and_dtype(x, 'x', ['float32', 'float64'], 'max_pool3d') kernel_size = utils.convert_to_list(kernel_size, 3, 'pool_size') @@ -905,6 +926,7 @@ def adaptive_avg_pool1d(x, output_size, name=None): # import paddle import paddle.nn.functional as F + import numpy as np data = paddle.to_tensor(np.random.uniform(-1, 1, [1, 3, 32]).astype(np.float32)) pool_out = F.adaptive_average_pool1d(data, output_size=16) @@ -1187,6 +1209,7 @@ def adaptive_max_pool1d(x, output_size, return_mask=False, name=None): # import paddle import paddle.nn.functional as F + import numpy as np data = paddle.to_tensor(np.random.uniform(-1, 1, [1, 3, 32]).astype(np.float32)) pool_out = F.adaptive_max_pool1d(data, output_size=16) diff --git a/python/paddle/nn/layer/activation.py b/python/paddle/nn/layer/activation.py index 1d1c7becea0f48a90e2075fb897298b0b9a38c59..482382300a784a7eea631b9d2e1d7995fb67b6ac 100644 --- a/python/paddle/nn/layer/activation.py +++ b/python/paddle/nn/layer/activation.py @@ -515,6 +515,7 @@ class LeakyReLU(layers.Layer): .. code-block:: python import paddle + import numpy as np m = paddle.nn.LeakyReLU() x = paddle.to_tensor(np.array([-2, 0, 1], 'float32')) diff --git a/python/paddle/nn/layer/common.py b/python/paddle/nn/layer/common.py index 7d1100e34befc9375fe463243ae8ddf5df94f203..389af0b1a875787675a68d1ad28ecea7bc91a6d1 100644 --- a/python/paddle/nn/layer/common.py +++ b/python/paddle/nn/layer/common.py @@ -332,6 +332,7 @@ class Upsample(layers.Layer): Examples: .. code-block:: python + import paddle import paddle.nn as nn import numpy as np diff --git a/python/paddle/nn/layer/loss.py b/python/paddle/nn/layer/loss.py index e8687af063e5d497b351c77115b67f3155a97b7c..ac1cb5a8187720292ff5e942110b6af280f6f9d6 100644 --- a/python/paddle/nn/layer/loss.py +++ b/python/paddle/nn/layer/loss.py @@ -207,6 +207,7 @@ class CrossEntropyLoss(fluid.dygraph.Layer): Examples: .. code-block:: python + import paddle import numpy as np @@ -491,28 +492,28 @@ class L1Loss(fluid.dygraph.Layer): Examples: .. code-block:: python + import paddle import numpy as np - paddle.disable_static() input_data = np.array([[1.5, 0.8], [0.2, 1.3]]).astype("float32") label_data = np.array([[1.7, 1], [0.4, 0.5]]).astype("float32") input = paddle.to_tensor(input_data) label = paddle.to_tensor(label_data) - l1_loss = paddle.nn.loss.L1Loss() + l1_loss = paddle.nn.L1Loss() output = l1_loss(input, label) print(output.numpy()) # [0.35] - l1_loss = paddle.nn.loss.L1Loss(reduction='sum') + l1_loss = paddle.nn.L1Loss(reduction='sum') output = l1_loss(input, label) print(output.numpy()) # [1.4] - l1_loss = paddle.nn.loss.L1Loss(reduction='none') + l1_loss = paddle.nn.L1Loss(reduction='none') output = l1_loss(input, label) - print(output.numpy()) + print(output) # [[0.20000005 0.19999999] # [0.2 0.79999995]] """ @@ -596,12 +597,11 @@ class BCELoss(fluid.dygraph.Layer): input_data = np.array([0.5, 0.6, 0.7]).astype("float32") label_data = np.array([1.0, 0.0, 1.0]).astype("float32") - paddle.disable_static() input = paddle.to_tensor(input_data) label = paddle.to_tensor(label_data) - bce_loss = paddle.nn.loss.BCELoss() + bce_loss = paddle.nn.BCELoss() output = bce_loss(input, label) - print(output.numpy()) # [0.65537095] + print(output) # [0.65537095] """ @@ -850,8 +850,8 @@ class MarginRankingLoss(fluid.dygraph.Layer): import paddle - input = paddle.to_tensor([[1, 2], [3, 4]]), dtype="float32") - other = paddle.to_tensor([[2, 1], [2, 4]]), dtype="float32") + input = paddle.to_tensor([[1, 2], [3, 4]], dtype="float32") + other = paddle.to_tensor([[2, 1], [2, 4]], dtype="float32") label = paddle.to_tensor([[1, -1], [-1, -1]], dtype="float32") margin_rank_loss = paddle.nn.MarginRankingLoss() loss = margin_rank_loss(input, other, label) diff --git a/python/paddle/nn/layer/pooling.py b/python/paddle/nn/layer/pooling.py index bc2121c198b7ad408711a8bd576721df33af800b..1d9875d45b40ffbf46d08573775cd8becb1c8eb6 100755 --- a/python/paddle/nn/layer/pooling.py +++ b/python/paddle/nn/layer/pooling.py @@ -90,6 +90,7 @@ class AvgPool1D(layers.Layer): import paddle import paddle.nn as nn + import numpy as np data = paddle.to_tensor(np.random.uniform(-1, 1, [1, 3, 32]).astype(np.float32)) AvgPool1D = nn.AvgPool1D(kernel_size=2, stride=2, padding=0) @@ -185,7 +186,7 @@ class AvgPool2D(layers.Layer): input = paddle.to_tensor(np.random.uniform(-1, 1, [1, 3, 32, 32]).astype(np.float32)) AvgPool2D = nn.AvgPool2D(kernel_size=2, stride=2, padding=0) - output = AvgPoo2d(input) + output = AvgPool2D(input) # output.shape [1, 3, 16, 16] """ @@ -367,6 +368,7 @@ class MaxPool1D(layers.Layer): import paddle import paddle.nn as nn + import numpy as np data = paddle.to_tensor(np.random.uniform(-1, 1, [1, 3, 32]).astype(np.float32)) MaxPool1D = nn.MaxPool1D(kernel_size=2, stride=2, padding=0) @@ -646,6 +648,7 @@ class AdaptiveAvgPool1D(layers.Layer): # import paddle import paddle.nn as nn + import numpy as np data = paddle.to_tensor(np.random.uniform(-1, 1, [1, 3, 32]).astype(np.float32)) AdaptiveAvgPool1D = nn.AdaptiveAvgPool1D(output_size=16) @@ -884,8 +887,9 @@ class AdaptiveMaxPool1D(layers.Layer): # lend = ceil((i + 1) * L / m) # output[:, :, i] = max(input[:, :, lstart: lend]) # - import paddle + import paddle import paddle.nn as nn + import numpy as np data = paddle.to_tensor(np.random.uniform(-1, 1, [1, 3, 32]).astype(np.float32)) AdaptiveMaxPool1D = nn.AdaptiveMaxPool1D(output_size=16) diff --git a/python/paddle/nn/layer/transformer.py b/python/paddle/nn/layer/transformer.py index c0ca8350fac08c35592e8d6907fe259ca1d2a20e..4e6bb050e702980c82f9a99482c1a5978009168c 100644 --- a/python/paddle/nn/layer/transformer.py +++ b/python/paddle/nn/layer/transformer.py @@ -120,7 +120,7 @@ class MultiHeadAttention(Layer): query = paddle.rand((2, 4, 128)) # self attention mask: [batch_size, num_heads, query_len, query_len] attn_mask = paddle.rand((2, 2, 4, 4)) - multi_head_attn = paddle.MultiHeadAttention(128, 2) + multi_head_attn = paddle.nn.MultiHeadAttention(128, 2) output = multi_head_attn(query, None, None, attn_mask=attn_mask) # [2, 4, 128] """ diff --git a/python/paddle/nn/utils/weight_norm_hook.py b/python/paddle/nn/utils/weight_norm_hook.py index 59a69337f2e0ef391ef6ddf7b855e06f7381cc95..fdf7a1b5bb2e2dc7e5e729a15c76fcbbb32ca12d 100755 --- a/python/paddle/nn/utils/weight_norm_hook.py +++ b/python/paddle/nn/utils/weight_norm_hook.py @@ -212,6 +212,7 @@ def remove_weight_norm(layer, name='weight'): Examples: .. code-block:: python + import paddle from paddle.nn import Conv2D from paddle.nn.utils import weight_norm, remove_weight_norm diff --git a/python/paddle/optimizer/adamax.py b/python/paddle/optimizer/adamax.py index 5d164fa76235147c2ec2f0dd72e98cb978129fad..bd65fc19c32aaf5ec75b3bd9d77f7bdefd9fdac8 100644 --- a/python/paddle/optimizer/adamax.py +++ b/python/paddle/optimizer/adamax.py @@ -78,10 +78,10 @@ class Adamax(Optimizer): Examples: .. code-block:: python + import paddle import numpy as np - paddle.disable_static() inp = np.random.uniform(-0.1, 0.1, [10, 10]).astype("float32") linear = paddle.nn.Linear(10, 10) inp = paddle.to_tensor(inp) diff --git a/python/paddle/optimizer/adamw.py b/python/paddle/optimizer/adamw.py index 2aa7fa115ec2efdcfe3765bca5916696e830a84a..2c963e816abcd1fcab795772979888c3f5f6fe2d 100644 --- a/python/paddle/optimizer/adamw.py +++ b/python/paddle/optimizer/adamw.py @@ -79,6 +79,7 @@ class AdamW(Adam): Examples: .. code-block:: python + import paddle linear = paddle.nn.Linear(10, 10) diff --git a/python/paddle/optimizer/lamb.py b/python/paddle/optimizer/lamb.py index c6275a823022ae3d5ed30356c62ffd66d3fd218e..f3351ce092fa6cdc0591a35278ce5d596e3fd65f 100644 --- a/python/paddle/optimizer/lamb.py +++ b/python/paddle/optimizer/lamb.py @@ -65,6 +65,7 @@ class Lamb(Optimizer): :ref:`api_guide_Name` . Usually name is no need to set and None by default. Examples: .. code-block:: python + import paddle import numpy as np inp = paddle.uniform(min=-0.1, max=0.1, shape=[10, 10], dtype='float32') diff --git a/python/paddle/static/__init__.py b/python/paddle/static/__init__.py index d683b4772e82cb171cc9169bb0fa84909da1abbc..e67676708bc101026e5e4e72236525febffe7a45 100644 --- a/python/paddle/static/__init__.py +++ b/python/paddle/static/__init__.py @@ -26,6 +26,12 @@ __all__ = [ from . import nn from .io import save_inference_model #DEFINE_ALIAS from .io import load_inference_model #DEFINE_ALIAS +from .io import deserialize_persistables #DEFINE_ALIAS +from .io import serialize_persistables #DEFINE_ALIAS +from .io import deserialize_program #DEFINE_ALIAS +from .io import serialize_program #DEFINE_ALIAS +from .io import load_from_file #DEFINE_ALIAS +from .io import save_to_file #DEFINE_ALIAS from ..fluid import Scope #DEFINE_ALIAS from .input import data #DEFINE_ALIAS from .input import InputSpec #DEFINE_ALIAS diff --git a/python/paddle/static/io.py b/python/paddle/static/io.py index e88a052730414192cd4bc99b3abc2c6b46377c3e..887401861784a7550814ee1f47960d36944fc78a 100644 --- a/python/paddle/static/io.py +++ b/python/paddle/static/io.py @@ -213,8 +213,7 @@ def serialize_program(feed_vars, fetch_vars, **kwargs): Args: feed_vars(Variable | list[Variable]): Variables needed by inference. fetch_vars(Variable | list[Variable]): Variables returned by inference. - kwargs: Supported keys including 'program'. - Attention please, kwargs is used for backward compatibility mainly. + kwargs: Supported keys including 'program'.Attention please, kwargs is used for backward compatibility mainly. - program(Program): specify a program if you don't want to use default main program. Returns: @@ -277,8 +276,7 @@ def serialize_persistables(feed_vars, fetch_vars, executor, **kwargs): Args: feed_vars(Variable | list[Variable]): Variables needed by inference. fetch_vars(Variable | list[Variable]): Variables returned by inference. - kwargs: Supported keys including 'program'. - Attention please, kwargs is used for backward compatibility mainly. + kwargs: Supported keys including 'program'.Attention please, kwargs is used for backward compatibility mainly. - program(Program): specify a program if you don't want to use default main program. Returns: @@ -403,8 +401,7 @@ def save_inference_model(path_prefix, feed_vars, fetch_vars, executor, fetch_vars(Variable | list[Variable]): Variables returned by inference. executor(Executor): The executor that saves the inference model. You can refer to :ref:`api_guide_executor_en` for more details. - kwargs: Supported keys including 'program'. - Attention please, kwargs is used for backward compatibility mainly. + kwargs: Supported keys including 'program'.Attention please, kwargs is used for backward compatibility mainly. - program(Program): specify a program if you don't want to use default main program. Returns: None @@ -645,8 +642,7 @@ def load_inference_model(path_prefix, executor, **kwargs): - Set to None when reading the model from memory. executor(Executor): The executor to run for loading inference model. See :ref:`api_guide_executor_en` for more details about it. - kwargs: Supported keys including 'model_filename', 'params_filename'. - Attention please, kwargs is used for backward compatibility mainly. + kwargs: Supported keys including 'model_filename', 'params_filename'.Attention please, kwargs is used for backward compatibility mainly. - model_filename(str): specify model_filename if you don't want to use default name. - params_filename(str): specify params_filename if you don't want to use default name. diff --git a/python/paddle/tensor/manipulation.py b/python/paddle/tensor/manipulation.py index 40a8fdb7ef0954b1eac183fdb4b06af4b4b3dc66..5aa4e76b97fcd324ccd50ff8b26ff26048db7de2 100644 --- a/python/paddle/tensor/manipulation.py +++ b/python/paddle/tensor/manipulation.py @@ -284,6 +284,7 @@ def roll(x, shifts, axis=None, name=None): Examples: .. code-block:: python + import paddle x = paddle.to_tensor([[1.0, 2.0, 3.0], diff --git a/python/paddle/tensor/math.py b/python/paddle/tensor/math.py index d53ff7b766b5ee1db2fea92b10fb95a0d3962388..83ff36f88eff6d3fb476711cf44113fce1aa9c35 100755 --- a/python/paddle/tensor/math.py +++ b/python/paddle/tensor/math.py @@ -174,7 +174,8 @@ def pow(x, y, name=None): print(res) # [1 4 9] # example 2: y is a Tensor - y = paddle.full(shape=[1], fill_value=2, dtype='float32') + y = paddle.full(shape=[1], fill_value=2, dtype='int64') + res = paddle.pow(x, y) print(res) # [1 4 9]