未验证 提交 faf2bb39 编写于 作者: C Chen Long 提交者: GitHub

[cherry-pick] Fix 2.0 bugs (#29992)

* fix doc bugs test=document_fix

* fix code bugs test=document_fix

* fix code bugs test=document_fix

* fix doc bugs test=document_fix

* fix doc bugs test=document_fix

* fix doc bugs test=document_fix
上级 d6a4f89a
...@@ -513,7 +513,7 @@ def smooth_l1_loss(input, label, reduction='mean', delta=1.0, name=None): ...@@ -513,7 +513,7 @@ def smooth_l1_loss(input, label, reduction='mean', delta=1.0, name=None):
label_data = np.random.rand(3,3).astype("float32") label_data = np.random.rand(3,3).astype("float32")
input = paddle.to_tensor(input_data) input = paddle.to_tensor(input_data)
label = paddle.to_tensor(label_data) label = paddle.to_tensor(label_data)
output = paddle.nn.functioanl.smooth_l1_loss(input, label) output = paddle.nn.functional.smooth_l1_loss(input, label)
print(output) print(output)
""" """
fluid.data_feeder.check_variable_and_dtype( fluid.data_feeder.check_variable_and_dtype(
...@@ -1187,12 +1187,16 @@ def cross_entropy(input, ...@@ -1187,12 +1187,16 @@ def cross_entropy(input,
.. code-block:: python .. code-block:: python
import paddle import paddle
import numpy as np
input_data = np.random.random([5, 100]).astype("float64") input_data = np.random.random([5, 100]).astype("float64")
label_data = np.random.randint(0, 100, size=(5)).astype(np.int64) label_data = np.random.randint(0, 100, size=(5)).astype(np.int64)
weight_data = np.random.random([100]).astype("float64") weight_data = np.random.random([100]).astype("float64")
input = paddle.to_tensor(input_data) input = paddle.to_tensor(input_data)
label = paddle.to_tensor(label_data) label = paddle.to_tensor(label_data)
weight = paddle.to_tensor(weight_data) weight = paddle.to_tensor(weight_data)
loss = paddle.nn.functional.cross_entropy(input=input, label=label, weight=weight) loss = paddle.nn.functional.cross_entropy(input=input, label=label, weight=weight)
print(loss) print(loss)
# [4.28546723] # [4.28546723]
......
...@@ -271,9 +271,7 @@ def layer_norm(x, ...@@ -271,9 +271,7 @@ def layer_norm(x,
np.random.seed(123) np.random.seed(123)
x_data = np.random.random(size=(2, 2, 2, 3)).astype('float32') x_data = np.random.random(size=(2, 2, 2, 3)).astype('float32')
x = paddle.to_tensor(x_data) x = paddle.to_tensor(x_data)
layer_norm = paddle.nn.functional.layer_norm(x, x.shape[1:]) layer_norm_out = paddle.nn.functional.layer_norm(x, x.shape[1:])
layer_norm_out = layer_norm(x)
print(layer_norm_out) print(layer_norm_out)
""" """
input_shape = list(x.shape) input_shape = list(x.shape)
...@@ -363,7 +361,7 @@ def instance_norm(x, ...@@ -363,7 +361,7 @@ def instance_norm(x,
np.random.seed(123) np.random.seed(123)
x_data = np.random.random(size=(2, 2, 2, 3)).astype('float32') x_data = np.random.random(size=(2, 2, 2, 3)).astype('float32')
x = paddle.to_tensor(x_data) x = paddle.to_tensor(x_data)
instance_norm_out = paddle.nn.functional.instancenorm(x) instance_norm_out = paddle.nn.functional.instance_norm(x)
print(instance_norm_out) print(instance_norm_out)
......
...@@ -198,11 +198,14 @@ def avg_pool1d(x, ...@@ -198,11 +198,14 @@ def avg_pool1d(x,
Examples: Examples:
.. code-block:: python .. code-block:: python
import paddle
import paddle.nn.functional as F import paddle
data = paddle.to_tensor(np.random.uniform(-1, 1, [1, 3, 32]).astype(np.float32)) import paddle.nn.functional as F
out = F.avg_pool1d(data, kernel_size=2, stride=2, padding=0) import numpy as np
# out shape: [1, 3, 16]
data = paddle.to_tensor(np.random.uniform(-1, 1, [1, 3, 32]).astype(np.float32))
out = F.avg_pool1d(data, kernel_size=2, stride=2, padding=0)
# out shape: [1, 3, 16]
""" """
"""NCL to NCHW""" """NCL to NCHW"""
data_format = "NCHW" data_format = "NCHW"
...@@ -302,23 +305,28 @@ def avg_pool2d(x, ...@@ -302,23 +305,28 @@ def avg_pool2d(x,
name(str, optional): For detailed information, please refer name(str, optional): For detailed information, please refer
to :ref:`api_guide_Name`. Usually name is no need to set and to :ref:`api_guide_Name`. Usually name is no need to set and
None by default. None by default.
Returns: Returns:
Tensor: The output tensor of pooling result. The data type is same as input tensor. Tensor: The output tensor of pooling result. The data type is same as input tensor.
Raises: Raises:
ValueError: If `padding` is a string, but not "SAME" or "VALID". ValueError: If `padding` is a string, but not "SAME" or "VALID".
ValueError: If `padding` is "VALID", but `ceil_mode` is True. ValueError: If `padding` is "VALID", but `ceil_mode` is True.
ShapeError: If the output's shape calculated is not greater than 0. ShapeError: If the output's shape calculated is not greater than 0.
Examples: Examples:
.. code-block:: python .. code-block:: python
import paddle
import paddle.nn.functional as F import paddle
import numpy as np import paddle.nn.functional as F
# avg pool2d import numpy as np
x = paddle.to_tensor(np.random.uniform(-1, 1, [1, 3, 32, 32]).astype(np.float32))
out = F.avg_pool2d(x, # avg pool2d
kernel_size=2, x = paddle.to_tensor(np.random.uniform(-1, 1, [1, 3, 32, 32]).astype(np.float32))
stride=2, padding=0) out = F.avg_pool2d(x,
# out.shape [1, 3, 16, 16] kernel_size=2,
stride=2, padding=0)
# out.shape [1, 3, 16, 16]
""" """
check_variable_and_dtype(x, 'x', ['float32', 'float64'], 'avg_pool2d') check_variable_and_dtype(x, 'x', ['float32', 'float64'], 'avg_pool2d')
kernel_size = utils.convert_to_list(kernel_size, 2, 'pool_size') kernel_size = utils.convert_to_list(kernel_size, 2, 'pool_size')
...@@ -415,16 +423,21 @@ def avg_pool3d(x, ...@@ -415,16 +423,21 @@ def avg_pool3d(x,
name(str, optional): For detailed information, please refer name(str, optional): For detailed information, please refer
to :ref:`api_guide_Name`. Usually name is no need to set and to :ref:`api_guide_Name`. Usually name is no need to set and
None by default. None by default.
Returns: Returns:
Tensor: The output tensor of pooling result. The data type is same as input tensor. Tensor: The output tensor of pooling result. The data type is same as input tensor.
Raises: Raises:
ValueError: If `padding` is a string, but not "SAME" or "VALID". ValueError: If `padding` is a string, but not "SAME" or "VALID".
ValueError: If `padding` is "VALID", but `ceil_mode` is True. ValueError: If `padding` is "VALID", but `ceil_mode` is True.
ShapeError: If the output's shape calculated is not greater than 0. ShapeError: If the output's shape calculated is not greater than 0.
Examples: Examples:
.. code-block:: python .. code-block:: python
import paddle.fluid as fluid
import paddle import paddle
import numpy as np
x = paddle.to_tensor(np.random.uniform(-1, 1, [1, 3, 32, 32, 32]).astype(np.float32)) x = paddle.to_tensor(np.random.uniform(-1, 1, [1, 3, 32, 32, 32]).astype(np.float32))
# avg pool3d # avg pool3d
out = paddle.nn.functional.avg_pool3d( out = paddle.nn.functional.avg_pool3d(
...@@ -537,6 +550,8 @@ def max_pool1d(x, ...@@ -537,6 +550,8 @@ def max_pool1d(x,
import paddle import paddle
import paddle.nn.functional as F import paddle.nn.functional as F
import numpy as np
data = paddle.to_tensor(np.random.uniform(-1, 1, [1, 3, 32]).astype(np.float32)) data = paddle.to_tensor(np.random.uniform(-1, 1, [1, 3, 32]).astype(np.float32))
pool_out = F.max_pool1d(data, kernel_size=2, stride=2, padding=0) pool_out = F.max_pool1d(data, kernel_size=2, stride=2, padding=0)
# pool_out shape: [1, 3, 16] # pool_out shape: [1, 3, 16]
...@@ -650,29 +665,32 @@ def max_pool2d(x, ...@@ -650,29 +665,32 @@ def max_pool2d(x,
None by default. None by default.
Returns: Returns:
Tensor: The output tensor of pooling result. The data type is same as input tensor. Tensor: The output tensor of pooling result. The data type is same as input tensor.
Raises:
Raises:
ValueError: If `padding` is a string, but not "SAME" or "VALID". ValueError: If `padding` is a string, but not "SAME" or "VALID".
ValueError: If `padding` is "VALID", but `ceil_mode` is True. ValueError: If `padding` is "VALID", but `ceil_mode` is True.
ShapeError: If the output's shape calculated is not greater than 0. ShapeError: If the output's shape calculated is not greater than 0.
Examples: Examples:
.. code-block:: python .. code-block:: python
import paddle import paddle
import paddle.nn.functional as F import paddle.nn.functional as F
import numpy as np import numpy as np
# max pool2d
x = paddle.to_tensor(np.random.uniform(-1, 1, [1, 3, 32, 32]).astype(np.float32)) # max pool2d
out = F.max_pool2d(x, x = paddle.to_tensor(np.random.uniform(-1, 1, [1, 3, 32, 32]).astype(np.float32))
kernel_size=2, out = F.max_pool2d(x,
stride=2, padding=0) kernel_size=2,
# output.shape [1, 3, 16, 16] stride=2, padding=0)
# for return_mask=True # output.shape [1, 3, 16, 16]
out, max_indices = F.max_pool2d(x, # for return_mask=True
kernel_size=2, out, max_indices = F.max_pool2d(x,
stride=2, kernel_size=2,
padding=0, stride=2,
return_mask=True) padding=0,
# out.shape [1, 3, 16, 16], max_indices.shape [1, 3, 16, 16], return_mask=True)
# out.shape [1, 3, 16, 16], max_indices.shape [1, 3, 16, 16],
""" """
check_variable_and_dtype(x, 'x', ['float32', 'float64'], 'max_pool2d') check_variable_and_dtype(x, 'x', ['float32', 'float64'], 'max_pool2d')
kernel_size = utils.convert_to_list(kernel_size, 2, 'pool_size') kernel_size = utils.convert_to_list(kernel_size, 2, 'pool_size')
...@@ -778,33 +796,36 @@ def max_pool3d(x, ...@@ -778,33 +796,36 @@ def max_pool3d(x,
name(str, optional): For detailed information, please refer name(str, optional): For detailed information, please refer
to :ref:`api_guide_Name`. Usually name is no need to set and to :ref:`api_guide_Name`. Usually name is no need to set and
None by default. None by default.
Returns: Returns:
Tensor: The output tensor of pooling result. The data type is same as input tensor. Tensor: The output tensor of pooling result. The data type is same as input tensor.
Raises: Raises:
ValueError: If `padding` is a string, but not "SAME" or "VALID". ValueError: If `padding` is a string, but not "SAME" or "VALID".
ValueError: If `padding` is "VALID", but `ceil_mode` is True. ValueError: If `padding` is "VALID", but `ceil_mode` is True.
ShapeError: If the output's shape calculated is not greater than 0. ShapeError: If the output's shape calculated is not greater than 0.
Examples: Examples:
.. code-block:: python .. code-block:: python
import paddle import paddle
import paddle.nn.functional as F import paddle.nn.functional as F
import numpy as np import numpy as np
# max pool3d # max pool3d
x = paddle.to_tensor(np.random.uniform(-1, 1, [1, 3, 32, 32, 32]).astype(np.float32)) x = paddle.to_tensor(np.random.uniform(-1, 1, [1, 3, 32, 32, 32]).astype(np.float32))
output = F.max_pool2d(x, output = F.max_pool2d(x,
kernel_size=2, kernel_size=2,
stride=2, padding=0) stride=2, padding=0)
output.shape [1, 3, 16, 16, 16] output.shape [1, 3, 16, 16, 16]
# for return_mask=True # for return_mask=True
x = paddle.to_tensor(np.random.uniform(-1, 1, [1, 3, 32, 32, 32]).astype(np.float32)) x = paddle.to_tensor(np.random.uniform(-1, 1, [1, 3, 32, 32, 32]).astype(np.float32))
output, max_indices = paddle.nn.functional.max_pool3d(x, output, max_indices = paddle.nn.functional.max_pool3d(x,
kernel_size = 2, kernel_size = 2,
stride = 2, stride = 2,
padding=0, padding=0,
return_mask=True) return_mask=True)
# output.shape [None, 3, 16, 16, 16], max_indices.shape [None, 3, 16, 16, 16], # output.shape [None, 3, 16, 16, 16], max_indices.shape [None, 3, 16, 16, 16],
""" """
check_variable_and_dtype(x, 'x', ['float32', 'float64'], 'max_pool3d') check_variable_and_dtype(x, 'x', ['float32', 'float64'], 'max_pool3d')
kernel_size = utils.convert_to_list(kernel_size, 3, 'pool_size') kernel_size = utils.convert_to_list(kernel_size, 3, 'pool_size')
...@@ -905,6 +926,7 @@ def adaptive_avg_pool1d(x, output_size, name=None): ...@@ -905,6 +926,7 @@ def adaptive_avg_pool1d(x, output_size, name=None):
# #
import paddle import paddle
import paddle.nn.functional as F import paddle.nn.functional as F
import numpy as np
data = paddle.to_tensor(np.random.uniform(-1, 1, [1, 3, 32]).astype(np.float32)) data = paddle.to_tensor(np.random.uniform(-1, 1, [1, 3, 32]).astype(np.float32))
pool_out = F.adaptive_average_pool1d(data, output_size=16) pool_out = F.adaptive_average_pool1d(data, output_size=16)
...@@ -1187,6 +1209,7 @@ def adaptive_max_pool1d(x, output_size, return_mask=False, name=None): ...@@ -1187,6 +1209,7 @@ def adaptive_max_pool1d(x, output_size, return_mask=False, name=None):
# #
import paddle import paddle
import paddle.nn.functional as F import paddle.nn.functional as F
import numpy as np
data = paddle.to_tensor(np.random.uniform(-1, 1, [1, 3, 32]).astype(np.float32)) data = paddle.to_tensor(np.random.uniform(-1, 1, [1, 3, 32]).astype(np.float32))
pool_out = F.adaptive_max_pool1d(data, output_size=16) pool_out = F.adaptive_max_pool1d(data, output_size=16)
......
...@@ -515,6 +515,7 @@ class LeakyReLU(layers.Layer): ...@@ -515,6 +515,7 @@ class LeakyReLU(layers.Layer):
.. code-block:: python .. code-block:: python
import paddle import paddle
import numpy as np
m = paddle.nn.LeakyReLU() m = paddle.nn.LeakyReLU()
x = paddle.to_tensor(np.array([-2, 0, 1], 'float32')) x = paddle.to_tensor(np.array([-2, 0, 1], 'float32'))
......
...@@ -332,6 +332,7 @@ class Upsample(layers.Layer): ...@@ -332,6 +332,7 @@ class Upsample(layers.Layer):
Examples: Examples:
.. code-block:: python .. code-block:: python
import paddle import paddle
import paddle.nn as nn import paddle.nn as nn
import numpy as np import numpy as np
......
...@@ -207,6 +207,7 @@ class CrossEntropyLoss(fluid.dygraph.Layer): ...@@ -207,6 +207,7 @@ class CrossEntropyLoss(fluid.dygraph.Layer):
Examples: Examples:
.. code-block:: python .. code-block:: python
import paddle import paddle
import numpy as np import numpy as np
...@@ -491,28 +492,28 @@ class L1Loss(fluid.dygraph.Layer): ...@@ -491,28 +492,28 @@ class L1Loss(fluid.dygraph.Layer):
Examples: Examples:
.. code-block:: python .. code-block:: python
import paddle import paddle
import numpy as np import numpy as np
paddle.disable_static()
input_data = np.array([[1.5, 0.8], [0.2, 1.3]]).astype("float32") input_data = np.array([[1.5, 0.8], [0.2, 1.3]]).astype("float32")
label_data = np.array([[1.7, 1], [0.4, 0.5]]).astype("float32") label_data = np.array([[1.7, 1], [0.4, 0.5]]).astype("float32")
input = paddle.to_tensor(input_data) input = paddle.to_tensor(input_data)
label = paddle.to_tensor(label_data) label = paddle.to_tensor(label_data)
l1_loss = paddle.nn.loss.L1Loss() l1_loss = paddle.nn.L1Loss()
output = l1_loss(input, label) output = l1_loss(input, label)
print(output.numpy()) print(output.numpy())
# [0.35] # [0.35]
l1_loss = paddle.nn.loss.L1Loss(reduction='sum') l1_loss = paddle.nn.L1Loss(reduction='sum')
output = l1_loss(input, label) output = l1_loss(input, label)
print(output.numpy()) print(output.numpy())
# [1.4] # [1.4]
l1_loss = paddle.nn.loss.L1Loss(reduction='none') l1_loss = paddle.nn.L1Loss(reduction='none')
output = l1_loss(input, label) output = l1_loss(input, label)
print(output.numpy()) print(output)
# [[0.20000005 0.19999999] # [[0.20000005 0.19999999]
# [0.2 0.79999995]] # [0.2 0.79999995]]
""" """
...@@ -596,12 +597,11 @@ class BCELoss(fluid.dygraph.Layer): ...@@ -596,12 +597,11 @@ class BCELoss(fluid.dygraph.Layer):
input_data = np.array([0.5, 0.6, 0.7]).astype("float32") input_data = np.array([0.5, 0.6, 0.7]).astype("float32")
label_data = np.array([1.0, 0.0, 1.0]).astype("float32") label_data = np.array([1.0, 0.0, 1.0]).astype("float32")
paddle.disable_static()
input = paddle.to_tensor(input_data) input = paddle.to_tensor(input_data)
label = paddle.to_tensor(label_data) label = paddle.to_tensor(label_data)
bce_loss = paddle.nn.loss.BCELoss() bce_loss = paddle.nn.BCELoss()
output = bce_loss(input, label) output = bce_loss(input, label)
print(output.numpy()) # [0.65537095] print(output) # [0.65537095]
""" """
...@@ -850,8 +850,8 @@ class MarginRankingLoss(fluid.dygraph.Layer): ...@@ -850,8 +850,8 @@ class MarginRankingLoss(fluid.dygraph.Layer):
import paddle import paddle
input = paddle.to_tensor([[1, 2], [3, 4]]), dtype="float32") input = paddle.to_tensor([[1, 2], [3, 4]], dtype="float32")
other = paddle.to_tensor([[2, 1], [2, 4]]), dtype="float32") other = paddle.to_tensor([[2, 1], [2, 4]], dtype="float32")
label = paddle.to_tensor([[1, -1], [-1, -1]], dtype="float32") label = paddle.to_tensor([[1, -1], [-1, -1]], dtype="float32")
margin_rank_loss = paddle.nn.MarginRankingLoss() margin_rank_loss = paddle.nn.MarginRankingLoss()
loss = margin_rank_loss(input, other, label) loss = margin_rank_loss(input, other, label)
......
...@@ -90,6 +90,7 @@ class AvgPool1D(layers.Layer): ...@@ -90,6 +90,7 @@ class AvgPool1D(layers.Layer):
import paddle import paddle
import paddle.nn as nn import paddle.nn as nn
import numpy as np
data = paddle.to_tensor(np.random.uniform(-1, 1, [1, 3, 32]).astype(np.float32)) data = paddle.to_tensor(np.random.uniform(-1, 1, [1, 3, 32]).astype(np.float32))
AvgPool1D = nn.AvgPool1D(kernel_size=2, stride=2, padding=0) AvgPool1D = nn.AvgPool1D(kernel_size=2, stride=2, padding=0)
...@@ -185,7 +186,7 @@ class AvgPool2D(layers.Layer): ...@@ -185,7 +186,7 @@ class AvgPool2D(layers.Layer):
input = paddle.to_tensor(np.random.uniform(-1, 1, [1, 3, 32, 32]).astype(np.float32)) input = paddle.to_tensor(np.random.uniform(-1, 1, [1, 3, 32, 32]).astype(np.float32))
AvgPool2D = nn.AvgPool2D(kernel_size=2, AvgPool2D = nn.AvgPool2D(kernel_size=2,
stride=2, padding=0) stride=2, padding=0)
output = AvgPoo2d(input) output = AvgPool2D(input)
# output.shape [1, 3, 16, 16] # output.shape [1, 3, 16, 16]
""" """
...@@ -367,6 +368,7 @@ class MaxPool1D(layers.Layer): ...@@ -367,6 +368,7 @@ class MaxPool1D(layers.Layer):
import paddle import paddle
import paddle.nn as nn import paddle.nn as nn
import numpy as np
data = paddle.to_tensor(np.random.uniform(-1, 1, [1, 3, 32]).astype(np.float32)) data = paddle.to_tensor(np.random.uniform(-1, 1, [1, 3, 32]).astype(np.float32))
MaxPool1D = nn.MaxPool1D(kernel_size=2, stride=2, padding=0) MaxPool1D = nn.MaxPool1D(kernel_size=2, stride=2, padding=0)
...@@ -646,6 +648,7 @@ class AdaptiveAvgPool1D(layers.Layer): ...@@ -646,6 +648,7 @@ class AdaptiveAvgPool1D(layers.Layer):
# #
import paddle import paddle
import paddle.nn as nn import paddle.nn as nn
import numpy as np
data = paddle.to_tensor(np.random.uniform(-1, 1, [1, 3, 32]).astype(np.float32)) data = paddle.to_tensor(np.random.uniform(-1, 1, [1, 3, 32]).astype(np.float32))
AdaptiveAvgPool1D = nn.AdaptiveAvgPool1D(output_size=16) AdaptiveAvgPool1D = nn.AdaptiveAvgPool1D(output_size=16)
...@@ -884,8 +887,9 @@ class AdaptiveMaxPool1D(layers.Layer): ...@@ -884,8 +887,9 @@ class AdaptiveMaxPool1D(layers.Layer):
# lend = ceil((i + 1) * L / m) # lend = ceil((i + 1) * L / m)
# output[:, :, i] = max(input[:, :, lstart: lend]) # output[:, :, i] = max(input[:, :, lstart: lend])
# #
import paddle import paddle
import paddle.nn as nn import paddle.nn as nn
import numpy as np
data = paddle.to_tensor(np.random.uniform(-1, 1, [1, 3, 32]).astype(np.float32)) data = paddle.to_tensor(np.random.uniform(-1, 1, [1, 3, 32]).astype(np.float32))
AdaptiveMaxPool1D = nn.AdaptiveMaxPool1D(output_size=16) AdaptiveMaxPool1D = nn.AdaptiveMaxPool1D(output_size=16)
......
...@@ -120,7 +120,7 @@ class MultiHeadAttention(Layer): ...@@ -120,7 +120,7 @@ class MultiHeadAttention(Layer):
query = paddle.rand((2, 4, 128)) query = paddle.rand((2, 4, 128))
# self attention mask: [batch_size, num_heads, query_len, query_len] # self attention mask: [batch_size, num_heads, query_len, query_len]
attn_mask = paddle.rand((2, 2, 4, 4)) attn_mask = paddle.rand((2, 2, 4, 4))
multi_head_attn = paddle.MultiHeadAttention(128, 2) multi_head_attn = paddle.nn.MultiHeadAttention(128, 2)
output = multi_head_attn(query, None, None, attn_mask=attn_mask) # [2, 4, 128] output = multi_head_attn(query, None, None, attn_mask=attn_mask) # [2, 4, 128]
""" """
......
...@@ -212,6 +212,7 @@ def remove_weight_norm(layer, name='weight'): ...@@ -212,6 +212,7 @@ def remove_weight_norm(layer, name='weight'):
Examples: Examples:
.. code-block:: python .. code-block:: python
import paddle import paddle
from paddle.nn import Conv2D from paddle.nn import Conv2D
from paddle.nn.utils import weight_norm, remove_weight_norm from paddle.nn.utils import weight_norm, remove_weight_norm
......
...@@ -78,10 +78,10 @@ class Adamax(Optimizer): ...@@ -78,10 +78,10 @@ class Adamax(Optimizer):
Examples: Examples:
.. code-block:: python .. code-block:: python
import paddle import paddle
import numpy as np import numpy as np
paddle.disable_static()
inp = np.random.uniform(-0.1, 0.1, [10, 10]).astype("float32") inp = np.random.uniform(-0.1, 0.1, [10, 10]).astype("float32")
linear = paddle.nn.Linear(10, 10) linear = paddle.nn.Linear(10, 10)
inp = paddle.to_tensor(inp) inp = paddle.to_tensor(inp)
......
...@@ -79,6 +79,7 @@ class AdamW(Adam): ...@@ -79,6 +79,7 @@ class AdamW(Adam):
Examples: Examples:
.. code-block:: python .. code-block:: python
import paddle import paddle
linear = paddle.nn.Linear(10, 10) linear = paddle.nn.Linear(10, 10)
......
...@@ -65,6 +65,7 @@ class Lamb(Optimizer): ...@@ -65,6 +65,7 @@ class Lamb(Optimizer):
:ref:`api_guide_Name` . Usually name is no need to set and None by default. :ref:`api_guide_Name` . Usually name is no need to set and None by default.
Examples: Examples:
.. code-block:: python .. code-block:: python
import paddle import paddle
import numpy as np import numpy as np
inp = paddle.uniform(min=-0.1, max=0.1, shape=[10, 10], dtype='float32') inp = paddle.uniform(min=-0.1, max=0.1, shape=[10, 10], dtype='float32')
......
...@@ -26,6 +26,12 @@ __all__ = [ ...@@ -26,6 +26,12 @@ __all__ = [
from . import nn from . import nn
from .io import save_inference_model #DEFINE_ALIAS from .io import save_inference_model #DEFINE_ALIAS
from .io import load_inference_model #DEFINE_ALIAS from .io import load_inference_model #DEFINE_ALIAS
from .io import deserialize_persistables #DEFINE_ALIAS
from .io import serialize_persistables #DEFINE_ALIAS
from .io import deserialize_program #DEFINE_ALIAS
from .io import serialize_program #DEFINE_ALIAS
from .io import load_from_file #DEFINE_ALIAS
from .io import save_to_file #DEFINE_ALIAS
from ..fluid import Scope #DEFINE_ALIAS from ..fluid import Scope #DEFINE_ALIAS
from .input import data #DEFINE_ALIAS from .input import data #DEFINE_ALIAS
from .input import InputSpec #DEFINE_ALIAS from .input import InputSpec #DEFINE_ALIAS
......
...@@ -213,8 +213,7 @@ def serialize_program(feed_vars, fetch_vars, **kwargs): ...@@ -213,8 +213,7 @@ def serialize_program(feed_vars, fetch_vars, **kwargs):
Args: Args:
feed_vars(Variable | list[Variable]): Variables needed by inference. feed_vars(Variable | list[Variable]): Variables needed by inference.
fetch_vars(Variable | list[Variable]): Variables returned by inference. fetch_vars(Variable | list[Variable]): Variables returned by inference.
kwargs: Supported keys including 'program'. kwargs: Supported keys including 'program'.Attention please, kwargs is used for backward compatibility mainly.
Attention please, kwargs is used for backward compatibility mainly.
- program(Program): specify a program if you don't want to use default main program. - program(Program): specify a program if you don't want to use default main program.
Returns: Returns:
...@@ -277,8 +276,7 @@ def serialize_persistables(feed_vars, fetch_vars, executor, **kwargs): ...@@ -277,8 +276,7 @@ def serialize_persistables(feed_vars, fetch_vars, executor, **kwargs):
Args: Args:
feed_vars(Variable | list[Variable]): Variables needed by inference. feed_vars(Variable | list[Variable]): Variables needed by inference.
fetch_vars(Variable | list[Variable]): Variables returned by inference. fetch_vars(Variable | list[Variable]): Variables returned by inference.
kwargs: Supported keys including 'program'. kwargs: Supported keys including 'program'.Attention please, kwargs is used for backward compatibility mainly.
Attention please, kwargs is used for backward compatibility mainly.
- program(Program): specify a program if you don't want to use default main program. - program(Program): specify a program if you don't want to use default main program.
Returns: Returns:
...@@ -403,8 +401,7 @@ def save_inference_model(path_prefix, feed_vars, fetch_vars, executor, ...@@ -403,8 +401,7 @@ def save_inference_model(path_prefix, feed_vars, fetch_vars, executor,
fetch_vars(Variable | list[Variable]): Variables returned by inference. fetch_vars(Variable | list[Variable]): Variables returned by inference.
executor(Executor): The executor that saves the inference model. You can refer executor(Executor): The executor that saves the inference model. You can refer
to :ref:`api_guide_executor_en` for more details. to :ref:`api_guide_executor_en` for more details.
kwargs: Supported keys including 'program'. kwargs: Supported keys including 'program'.Attention please, kwargs is used for backward compatibility mainly.
Attention please, kwargs is used for backward compatibility mainly.
- program(Program): specify a program if you don't want to use default main program. - program(Program): specify a program if you don't want to use default main program.
Returns: Returns:
None None
...@@ -645,8 +642,7 @@ def load_inference_model(path_prefix, executor, **kwargs): ...@@ -645,8 +642,7 @@ def load_inference_model(path_prefix, executor, **kwargs):
- Set to None when reading the model from memory. - Set to None when reading the model from memory.
executor(Executor): The executor to run for loading inference model. executor(Executor): The executor to run for loading inference model.
See :ref:`api_guide_executor_en` for more details about it. See :ref:`api_guide_executor_en` for more details about it.
kwargs: Supported keys including 'model_filename', 'params_filename'. kwargs: Supported keys including 'model_filename', 'params_filename'.Attention please, kwargs is used for backward compatibility mainly.
Attention please, kwargs is used for backward compatibility mainly.
- model_filename(str): specify model_filename if you don't want to use default name. - model_filename(str): specify model_filename if you don't want to use default name.
- params_filename(str): specify params_filename if you don't want to use default name. - params_filename(str): specify params_filename if you don't want to use default name.
......
...@@ -284,6 +284,7 @@ def roll(x, shifts, axis=None, name=None): ...@@ -284,6 +284,7 @@ def roll(x, shifts, axis=None, name=None):
Examples: Examples:
.. code-block:: python .. code-block:: python
import paddle import paddle
x = paddle.to_tensor([[1.0, 2.0, 3.0], x = paddle.to_tensor([[1.0, 2.0, 3.0],
......
...@@ -174,7 +174,8 @@ def pow(x, y, name=None): ...@@ -174,7 +174,8 @@ def pow(x, y, name=None):
print(res) # [1 4 9] print(res) # [1 4 9]
# example 2: y is a Tensor # example 2: y is a Tensor
y = paddle.full(shape=[1], fill_value=2, dtype='float32') y = paddle.full(shape=[1], fill_value=2, dtype='int64')
res = paddle.pow(x, y) res = paddle.pow(x, y)
print(res) # [1 4 9] print(res) # [1 4 9]
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册