未验证 提交 0a15b0db 编写于 作者: Y yuchen202 提交者: GitHub

[xdoctest] reformat example code with google style in No.36-43 (#56440)

上级 71e28b12
......@@ -368,24 +368,24 @@ def conv1d(
Examples:
.. code-block:: python
import paddle
import paddle.nn.functional as F
x = paddle.to_tensor([[[4, 8, 1, 9],
[7, 2, 0, 9],
[6, 9, 2, 6]]], dtype="float32")
w = paddle.to_tensor([[[9, 3, 4],
[0, 0, 7],
[2, 5, 6]],
[[0, 3, 4],
[2, 9, 7],
[5, 6, 8]]], dtype="float32")
y = F.conv1d(x, w)
print(y)
# Tensor(shape=[1, 2, 2], dtype=float32, place=Place(gpu:0), stop_gradient=True,
# [[[133., 238.],
# [160., 211.]]])
>>> import paddle
>>> import paddle.nn.functional as F
>>> x = paddle.to_tensor([[[4, 8, 1, 9],
... [7, 2, 0, 9],
... [6, 9, 2, 6]]], dtype="float32")
>>> w = paddle.to_tensor([[[9, 3, 4],
... [0, 0, 7],
... [2, 5, 6]],
... [[0, 3, 4],
... [2, 9, 7],
... [5, 6, 8]]], dtype="float32")
>>> y = F.conv1d(x, w)
>>> print(y)
Tensor(shape=[1, 2, 2], dtype=float32, place=Place(cpu), stop_gradient=True,
[[[133., 238.],
[160., 211.]]])
"""
cudnn_version = get_cudnn_version()
if cudnn_version is not None:
......@@ -632,16 +632,16 @@ def conv2d(
Examples:
.. code-block:: python
import paddle
import paddle.nn.functional as F
>>> import paddle
>>> import paddle.nn.functional as F
x_var = paddle.randn((2, 3, 8, 8), dtype='float32')
w_var = paddle.randn((6, 3, 3, 3), dtype='float32')
>>> x_var = paddle.randn((2, 3, 8, 8), dtype='float32')
>>> w_var = paddle.randn((6, 3, 3, 3), dtype='float32')
y_var = F.conv2d(x_var, w_var)
>>> y_var = F.conv2d(x_var, w_var)
print(y_var.shape)
# [2, 6, 6, 6]
>>> print(y_var.shape)
[2, 6, 6, 6]
"""
# entry checks
if data_format not in ["NCHW", "NHWC"]:
......@@ -887,20 +887,20 @@ def conv1d_transpose(
Examples:
.. code-block:: python
import paddle
import paddle.nn.functional as F
>>> import paddle
>>> import paddle.nn.functional as F
# shape: (1, 2, 4)
x = paddle.to_tensor([[[4, 0, 9, 7],
[8, 0, 9, 2,]]], dtype="float32")
# shape: (2, 1, 2)
w = paddle.to_tensor([[[7, 0]],
[[4, 2]]], dtype="float32")
>>> # shape: (1, 2, 4)
>>> x = paddle.to_tensor([[[4, 0, 9, 7],
>>> [8, 0, 9, 2,]]], dtype="float32")
>>> # shape: (2, 1, 2)
>>> w = paddle.to_tensor([[[7, 0]],
>>> [[4, 2]]], dtype="float32")
y = F.conv1d_transpose(x, w)
print(y)
# Tensor(shape=[1, 1, 5], dtype=float32, place=Place(gpu:0), stop_gradient=True,
# [[[60., 16., 99., 75., 4. ]]])
>>> y = F.conv1d_transpose(x, w)
>>> print(y)
Tensor(shape=[1, 1, 5], dtype=float32, place=Place(cpu), stop_gradient=True,
[[[60., 16., 99., 75., 4. ]]])
"""
cudnn_version = get_cudnn_version()
if cudnn_version is not None:
......@@ -1183,16 +1183,16 @@ def conv2d_transpose(
Examples:
.. code-block:: python
import paddle
import paddle.nn.functional as F
>>> import paddle
>>> import paddle.nn.functional as F
x_var = paddle.randn((2, 3, 8, 8), dtype='float32')
w_var = paddle.randn((3, 6, 3, 3), dtype='float32')
>>> x_var = paddle.randn((2, 3, 8, 8), dtype='float32')
>>> w_var = paddle.randn((3, 6, 3, 3), dtype='float32')
y_var = F.conv2d_transpose(x_var, w_var)
>>> y_var = F.conv2d_transpose(x_var, w_var)
print(y_var.shape)
# [2, 6, 10, 10]
>>> print(y_var.shape)
[2, 6, 10, 10]
"""
if data_format not in ['NCHW', 'NHWC']:
......@@ -1476,16 +1476,16 @@ def conv3d(
Examples:
.. code-block:: python
import paddle
import paddle.nn.functional as F
>>> import paddle
>>> import paddle.nn.functional as F
x_var = paddle.randn((2, 3, 8, 8, 8), dtype='float32')
w_var = paddle.randn((6, 3, 3, 3, 3), dtype='float32')
>>> x_var = paddle.randn((2, 3, 8, 8, 8), dtype='float32')
>>> w_var = paddle.randn((6, 3, 3, 3, 3), dtype='float32')
y_var = F.conv3d(x_var, w_var)
>>> y_var = F.conv3d(x_var, w_var)
print(y_var.shape)
# [2, 6, 6, 6, 6]
>>> print(y_var.shape)
[2, 6, 6, 6, 6]
"""
# entry check
if data_format not in ["NCDHW", "NDHWC"]:
......@@ -1688,18 +1688,18 @@ def conv3d_transpose(
variable storing transposed convolution and non-linearity activation result.
Examples:
.. code-block:: python
.. code-block:: python
import paddle
import paddle.nn.functional as F
>>> import paddle
>>> import paddle.nn.functional as F
x_var = paddle.randn((2, 3, 8, 8, 8), dtype='float32')
w_var = paddle.randn((3, 6, 3, 3, 3), dtype='float32')
>>> x_var = paddle.randn((2, 3, 8, 8, 8), dtype='float32')
>>> w_var = paddle.randn((3, 6, 3, 3, 3), dtype='float32')
y_var = F.conv3d_transpose(x_var, w_var)
>>> y_var = F.conv3d_transpose(x_var, w_var)
print(y_var.shape)
# [2, 6, 10, 10, 10]
>>> print(y_var.shape)
[2, 6, 10, 10, 10]
"""
# entry checks
if data_format not in ["NCDHW", "NDHWC"]:
......
......@@ -59,14 +59,13 @@ def pairwise_distance(x, y, p=2.0, epsilon=1e-6, keepdim=False, name=None):
Examples:
.. code-block:: python
import paddle
x = paddle.to_tensor([[1., 3.], [3., 5.]], dtype=paddle.float64)
y = paddle.to_tensor([[5., 6.], [7., 8.]], dtype=paddle.float64)
distance = paddle.nn.functional.pairwise_distance(x, y)
print(distance)
# Tensor(shape=[2], dtype=float64, place=Place(gpu:0), stop_gradient=True,
# [4.99999860, 4.99999860])
>>> import paddle
>>> x = paddle.to_tensor([[1., 3.], [3., 5.]], dtype=paddle.float64)
>>> y = paddle.to_tensor([[5., 6.], [7., 8.]], dtype=paddle.float64)
>>> distance = paddle.nn.functional.pairwise_distance(x, y)
>>> print(distance)
Tensor(shape=[2], dtype=float64, place=Place(cpu), stop_gradient=True,
[4.99999860, 4.99999860])
"""
if in_dynamic_mode():
sub = _C_ops.subtract(x, y)
......
......@@ -55,48 +55,46 @@ def diag_embed(input, offset=0, dim1=-2, dim2=-1):
Examples:
.. code-block:: python
import paddle
import paddle.nn.functional as F
diag_embed_input = paddle.arange(6)
diag_embed_output1 = F.diag_embed(diag_embed_input)
print(diag_embed_output1)
# Tensor(shape=[6, 6], dtype=int64, place=Place(cpu), stop_gradient=True,
# [[0, 0, 0, 0, 0, 0],
# [0, 1, 0, 0, 0, 0],
# [0, 0, 2, 0, 0, 0],
# [0, 0, 0, 3, 0, 0],
# [0, 0, 0, 0, 4, 0],
# [0, 0, 0, 0, 0, 5]])
diag_embed_output2 = F.diag_embed(diag_embed_input, offset=-1, dim1=0,dim2=1 )
print(diag_embed_output2)
# Tensor(shape=[7, 7], dtype=int64, place=Place(cpu), stop_gradient=True,
# [[0, 0, 0, 0, 0, 0, 0],
# [0, 0, 0, 0, 0, 0, 0],
# [0, 1, 0, 0, 0, 0, 0],
# [0, 0, 2, 0, 0, 0, 0],
# [0, 0, 0, 3, 0, 0, 0],
# [0, 0, 0, 0, 4, 0, 0],
# [0, 0, 0, 0, 0, 5, 0]])
diag_embed_input_2dim = paddle.reshape(diag_embed_input,[2,3])
print(diag_embed_input_2dim)
# Tensor(shape=[2, 3], dtype=int64, place=Place(cpu), stop_gradient=True,
# [[0, 1, 2],
# [3, 4, 5]])
diag_embed_output3 = F.diag_embed(diag_embed_input_2dim,offset= 0, dim1=0, dim2=2 )
print(diag_embed_output3)
# Tensor(shape=[3, 2, 3], dtype=int64, place=Place(cpu), stop_gradient=True,
# [[[0, 0, 0],
# [3, 0, 0]],
# [[0, 1, 0],
# [0, 4, 0]],
# [[0, 0, 2],
# [0, 0, 5]]])
>>> import paddle
>>> import paddle.nn.functional as F
>>> diag_embed_input = paddle.arange(6)
>>> diag_embed_output1 = F.diag_embed(diag_embed_input)
>>> print(diag_embed_output1)
Tensor(shape=[6, 6], dtype=int64, place=Place(cpu), stop_gradient=True,
[[0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0],
[0, 0, 2, 0, 0, 0],
[0, 0, 0, 3, 0, 0],
[0, 0, 0, 0, 4, 0],
[0, 0, 0, 0, 0, 5]])
>>> diag_embed_output2 = F.diag_embed(diag_embed_input, offset=-1, dim1=0,dim2=1 )
>>> print(diag_embed_output2)
Tensor(shape=[7, 7], dtype=int64, place=Place(cpu), stop_gradient=True,
[[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0],
[0, 0, 2, 0, 0, 0, 0],
[0, 0, 0, 3, 0, 0, 0],
[0, 0, 0, 0, 4, 0, 0],
[0, 0, 0, 0, 0, 5, 0]])
>>> diag_embed_input_2dim = paddle.reshape(diag_embed_input,[2,3])
>>> print(diag_embed_input_2dim)
Tensor(shape=[2, 3], dtype=int64, place=Place(cpu), stop_gradient=True,
[[0, 1, 2],
[3, 4, 5]])
>>> diag_embed_output3 = F.diag_embed(diag_embed_input_2dim,offset= 0, dim1=0, dim2=2 )
>>> print(diag_embed_output3)
Tensor(shape=[3, 2, 3], dtype=int64, place=Place(cpu), stop_gradient=True,
[[[0, 0, 0],
[3, 0, 0]],
[[0, 1, 0],
[0, 4, 0]],
[[0, 0, 2],
[0, 0, 5]]])
"""
if not isinstance(input, Variable):
input = assign(input)
......@@ -200,16 +198,16 @@ def sequence_mask(x, maxlen=None, dtype='int64', name=None):
Examples:
.. code-block:: python
import paddle
>>> import paddle
lengths = paddle.to_tensor([10, 9, 8])
mask = paddle.nn.functional.sequence_mask(lengths)
>>> lengths = paddle.to_tensor([10, 9, 8])
>>> mask = paddle.nn.functional.sequence_mask(lengths)
print(mask)
# Tensor(shape=[3, 10], dtype=int64, place=Place(gpu:0), stop_gradient=True,
# [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
# [1, 1, 1, 1, 1, 1, 1, 1, 1, 0],
# [1, 1, 1, 1, 1, 1, 1, 1, 0, 0]])
>>> print(mask)
Tensor(shape=[3, 10], dtype=int64, place=Place(cpu), stop_gradient=True,
[[1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 0, 0]])
"""
......@@ -296,14 +294,24 @@ def gather_tree(ids, parents):
Examples:
.. code-block:: python
import paddle
>>> import paddle
ids = paddle.to_tensor([[[2, 2], [6, 1]], [[3, 9], [6, 1]], [[0, 1], [9, 0]]])
>>> ids = paddle.to_tensor([[[2, 2], [6, 1]], [[3, 9], [6, 1]], [[0, 1], [9, 0]]])
parents = paddle.to_tensor([[[0, 0], [1, 1]], [[1, 0], [1, 0]], [[0, 0], [0, 1]]])
>>> parents = paddle.to_tensor([[[0, 0], [1, 1]], [[1, 0], [1, 0]], [[0, 0], [0, 1]]])
>>> final_sequences = paddle.nn.functional.gather_tree(ids, parents)
>>> [[[2, 2], [1, 6]], [[3, 3], [6, 1]], [[0, 1], [9, 0]]]
>>> final_sequences = paddle.nn.functional.gather_tree(ids, parents)
>>> print(final_sequences)
Tensor(shape=[3, 2, 2], dtype=int64, place=Place(cpu), stop_gradient=True,
[[[2, 2],
[1, 6]],
[[3, 3],
[6, 1]],
[[0, 1],
[9, 0]]])
final_sequences = paddle.nn.functional.gather_tree(ids, parents)
# [[[2, 2], [1, 6]], [[3, 3], [6, 1]], [[0, 1], [9, 0]]]
"""
if ids.ndim != 3:
......@@ -388,11 +396,11 @@ def temporal_shift(x, seg_num, shift_ratio=0.25, name=None, data_format="NCHW"):
Examples:
.. code-block:: python
import paddle
import paddle.nn.functional as F
>>> import paddle
>>> import paddle.nn.functional as F
input = paddle.randn([6, 4, 2, 2])
out = F.temporal_shift(x=input, seg_num=2, shift_ratio=0.2)
>>> input = paddle.randn([6, 4, 2, 2])
>>> out = F.temporal_shift(x=input, seg_num=2, shift_ratio=0.2)
"""
if data_format not in ["NCHW", "NHWC"]:
raise ValueError(
......
......@@ -181,13 +181,12 @@ def flash_attention(
Examples:
.. code-block:: python
# required: skiptest
import paddle
>>> import paddle
q = paddle.rand((1, 128, 2, 16), dtype=paddle.float16)
>>> paddle.seed(1)
>>> q = paddle.rand((1, 128, 2, 16))
output = paddle.nn.functional.flash_attention(q, q, q, 0.9, False, False)
print(output)
>>> output = paddle.nn.functional.flash_attention.flash_attention(q, q, q, 0.9, False, False)
"""
head_dim = query.shape[3]
sdp_func_name = _select_sdp(head_dim)
......@@ -340,13 +339,12 @@ def flash_attn_unpadded(
Examples:
.. code-block:: python
# required: skiptest
import paddle
q = paddle.rand((1, 128, 2, 16), dtype=paddle.float16)
>>> import paddle
>>> paddle.seed(1)
>>> q = paddle.rand((1, 128, 2, 16))
output = paddle.nn.functional.flash_attn_unpadded(q, q, q, 0.9, False, False)
print(output)
>>> output = paddle.nn.functional.flash_attention.flash_attn_unpadded(q, q, q, 0.9, False, False)
>>> print(output)
"""
if in_dynamic_mode():
(
......
......@@ -158,24 +158,33 @@ class Uniform(UniformInitializer):
Examples:
.. code-block:: python
import paddle
data = paddle.ones(shape=[3, 1, 2], dtype='float32')
weight_attr = paddle.framework.ParamAttr(
name="linear_weight",
initializer=paddle.nn.initializer.Uniform(low=-0.5, high=0.5))
bias_attr = paddle.framework.ParamAttr(
name="linear_bias",
initializer=paddle.nn.initializer.Uniform(low=-0.5, high=0.5))
linear = paddle.nn.Linear(2, 2, weight_attr=weight_attr, bias_attr=bias_attr)
# linear.weight: [[-0.46245047 0.05260676]
# [ 0.38054508 0.29169726]]
# linear.bias: [-0.2734719 0.23939109]
res = linear(data)
# res: [[[-0.3553773 0.5836951]]
# [[-0.3553773 0.5836951]]
# [[-0.3553773 0.5836951]]]
>>> import paddle
>>> paddle.seed(1)
>>> data = paddle.ones(shape=[3, 1, 2], dtype='float32')
>>> weight_attr = paddle.framework.ParamAttr(
... name="linear_weight",
... initializer=paddle.nn.initializer.Uniform(low=-0.5, high=0.5))
>>> bias_attr = paddle.framework.ParamAttr(
... name="linear_bias",
... initializer=paddle.nn.initializer.Uniform(low=-0.5, high=0.5))
>>> linear = paddle.nn.Linear(2, 2, weight_attr=weight_attr, bias_attr=bias_attr)
>>> print(linear.weight)
Parameter containing:
Tensor(shape=[2, 2], dtype=float32, place=Place(cpu), stop_gradient=False,
[[-0.48212373, 0.26492310],
[ 0.17605734, -0.45379421]])
>>> print(linear.bias)
Parameter containing:
Tensor(shape=[2], dtype=float32, place=Place(cpu), stop_gradient=False,
[-0.11236754, 0.46462214])
>>> res = linear(data)
>>> print(res)
Tensor(shape=[3, 1, 2], dtype=float32, place=Place(cpu), stop_gradient=False,
[[[-0.41843393, 0.27575102]],
[[-0.41843393, 0.27575102]],
[[-0.41843393, 0.27575102]]])
"""
def __init__(self, low=-1.0, high=1.0, name=None):
......
......@@ -214,24 +214,33 @@ class XavierNormal(XavierInitializer):
Examples:
.. code-block:: python
import paddle
data = paddle.ones(shape=[3, 1, 2], dtype='float32')
weight_attr = paddle.framework.ParamAttr(
name="linear_weight",
initializer=paddle.nn.initializer.XavierNormal())
bias_attr = paddle.framework.ParamAttr(
name="linear_bias",
initializer=paddle.nn.initializer.XavierNormal())
linear = paddle.nn.Linear(2, 2, weight_attr=weight_attr, bias_attr=bias_attr)
# inear.weight: [[ 0.06910077 -0.18103665]
# [-0.02546741 -1.0402188 ]]
# linear.bias: [-0.5012929 0.12418364]
res = linear(data)
# res: [[[-0.4576595 -1.0970719]]
# [[-0.4576595 -1.0970719]]
# [[-0.4576595 -1.0970719]]]
>>> import paddle
>>> paddle.seed(1)
>>> data = paddle.ones(shape=[3, 1, 2], dtype='float32')
>>> weight_attr = paddle.framework.ParamAttr(
... name="linear_weight",
... initializer=paddle.nn.initializer.XavierNormal())
>>> bias_attr = paddle.framework.ParamAttr(
... name="linear_bias",
... initializer=paddle.nn.initializer.XavierNormal())
>>> linear = paddle.nn.Linear(2, 2, weight_attr=weight_attr, bias_attr=bias_attr)
>>> print(linear.weight)
Parameter containing:
Tensor(shape=[2, 2], dtype=float32, place=Place(cpu), stop_gradient=False,
[[-0.21607460, 0.08382989],
[ 0.29147008, -0.07049121]])
>>> print(linear.bias)
Parameter containing:
Tensor(shape=[2], dtype=float32, place=Place(cpu), stop_gradient=False,
[1.06076419, 0.87684733])
>>> res = linear(data)
>>> print(res)
Tensor(shape=[3, 1, 2], dtype=float32, place=Place(cpu), stop_gradient=False,
[[[1.13615966, 0.89018601]],
[[1.13615966, 0.89018601]],
[[1.13615966, 0.89018601]]])
"""
def __init__(self, fan_in=None, fan_out=None, name=None):
......@@ -266,24 +275,32 @@ class XavierUniform(XavierInitializer):
Examples:
.. code-block:: python
import paddle
data = paddle.ones(shape=[3, 1, 2], dtype='float32')
weight_attr = paddle.framework.ParamAttr(
name="linear_weight",
initializer=paddle.nn.initializer.XavierUniform())
bias_attr = paddle.framework.ParamAttr(
name="linear_bias",
initializer=paddle.nn.initializer.XavierUniform())
linear = paddle.nn.Linear(2, 2, weight_attr=weight_attr, bias_attr=bias_attr)
# linear.weight: [[-0.04229349 -1.1248565 ]
# [-0.10789523 -0.5938053 ]]
# linear.bias: [ 1.1983747 -0.40201235]
res = linear(data)
# res: [[[ 1.0481861 -2.1206741]]
# [[ 1.0481861 -2.1206741]]
# [[ 1.0481861 -2.1206741]]]
>>> import paddle
>>> paddle.seed(1)
>>> data = paddle.ones(shape=[3, 1, 2], dtype='float32')
>>> weight_attr = paddle.framework.ParamAttr(
... name="linear_weight",
... initializer=paddle.nn.initializer.XavierUniform())
>>> bias_attr = paddle.framework.ParamAttr(
... name="linear_bias",
... initializer=paddle.nn.initializer.XavierUniform())
>>> linear = paddle.nn.Linear(2, 2, weight_attr=weight_attr, bias_attr=bias_attr)
>>> print(linear.weight)
Parameter containing:
Tensor(shape=[2, 2], dtype=float32, place=Place(cpu), stop_gradient=False,
[[-1.18095720, 0.64892638],
[ 0.43125069, -1.11156428]])
>>> print(linear.bias)
Parameter containing:
Tensor(shape=[2], dtype=float32, place=Place(cpu), stop_gradient=False,
[-0.27524316, 1.13808715])
>>> res = linear(data)
>>> print(res)
Tensor(shape=[3, 1, 2], dtype=float32, place=Place(cpu), stop_gradient=False,
[[[-1.02494967, 0.67544925]],
[[-1.02494967, 0.67544925]],
[[-1.02494967, 0.67544925]]])
"""
def __init__(self, fan_in=None, fan_out=None, name=None):
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册