未验证 提交 98adc8f0 编写于 作者: L Leo Chen 提交者: GitHub

Dev/fix doc of some api (#28785)

* refine doc of bernoulli

* fix some problems

* fix unsqueeze

* fix squeeze

* fix doc
上级 f77a78cd
......@@ -54,9 +54,11 @@ class GradScaler(AmpScaler):
optimizer = paddle.optimizer.SGD(learning_rate=0.01, parameters=model.parameters())
scaler = paddle.amp.GradScaler(init_loss_scaling=1024)
data = paddle.rand([10, 3, 32, 32])
with paddle.amp.auto_cast():
conv = model(data)
loss = paddle.mean(conv)
scaled = scaler.scale(loss) # scale the loss
scaled.backward() # do backward
scaler.minimize(optimizer, scaled) # update parameters
......@@ -86,6 +88,7 @@ class GradScaler(AmpScaler):
The scaled tensor or original tensor.
Examples:
.. code-block:: python
import paddle
......@@ -94,9 +97,11 @@ class GradScaler(AmpScaler):
optimizer = paddle.optimizer.SGD(learning_rate=0.01, parameters=model.parameters())
scaler = paddle.amp.GradScaler(init_loss_scaling=1024)
data = paddle.rand([10, 3, 32, 32])
with paddle.amp.auto_cast():
conv = model(data)
loss = paddle.mean(conv)
scaled = scaler.scale(loss) # scale the loss
scaled.backward() # do backward
scaler.minimize(optimizer, scaled) # update parameters
......@@ -118,6 +123,7 @@ class GradScaler(AmpScaler):
kwargs: Keyword arguments, which will be forward to `optimizer.minimize()`.
Examples:
.. code-block:: python
import paddle
......@@ -126,9 +132,11 @@ class GradScaler(AmpScaler):
optimizer = paddle.optimizer.SGD(learning_rate=0.01, parameters=model.parameters())
scaler = paddle.amp.GradScaler(init_loss_scaling=1024)
data = paddle.rand([10, 3, 32, 32])
with paddle.amp.auto_cast():
conv = model(data)
loss = paddle.mean(conv)
scaled = scaler.scale(loss) # scale the loss
scaled.backward() # do backward
scaler.minimize(optimizer, scaled) # update parameters
......
......@@ -491,29 +491,27 @@ class L1Loss(fluid.dygraph.Layer):
If `reduction` is ``'mean'`` or ``'sum'``, the shape of output loss is [1].
Examples:
.. code-block:: python
import paddle
import numpy as np
paddle.disable_static()
input_data = np.array([[1.5, 0.8], [0.2, 1.3]]).astype("float32")
label_data = np.array([[1.7, 1], [0.4, 0.5]]).astype("float32")
input = paddle.to_tensor(input_data)
label = paddle.to_tensor(label_data)
input = paddle.to_tensor([[1.5, 0.8], [0.2, 1.3]])
label = paddle.to_tensor([[1.7, 1.0], [0.4, 0.5]])
l1_loss = paddle.nn.loss.L1Loss()
output = l1_loss(input, label)
print(output.numpy())
print(output)
# [0.35]
l1_loss = paddle.nn.loss.L1Loss(reduction='sum')
output = l1_loss(input, label)
print(output.numpy())
print(output)
# [1.4]
l1_loss = paddle.nn.loss.L1Loss(reduction='none')
output = l1_loss(input, label)
print(output.numpy())
print(output)
# [[0.20000005 0.19999999]
# [0.2 0.79999995]]
"""
......@@ -1001,7 +999,7 @@ class SmoothL1Loss(fluid.dygraph.Layer):
is the same as the shape of input.
Returns:
The tensor variable storing the smooth_l1_loss of input and label.
The tensor storing the smooth_l1_loss of input and label.
Return type: Tensor.
......
......@@ -354,9 +354,6 @@ def roll(x, shifts, axis=None, name=None):
def stack(x, axis=0, name=None):
"""
:alias_main: paddle.stack
:alias: paddle.stack, paddle.tensor.stack, paddle.tensor.manipulation.stack
This OP stacks all the input tensors ``x`` along ``axis`` dimemsion.
All tensors must be of the same shape and same dtype.
......@@ -423,13 +420,12 @@ def stack(x, axis=0, name=None):
import paddle
paddle.disable_static()
x1 = paddle.to_tensor([[1.0, 2.0]])
x2 = paddle.to_tensor([[3.0, 4.0]])
x3 = paddle.to_tensor([[5.0, 6.0]])
out = paddle.stack([x1, x2, x3], axis=0)
print(out.shape) # [3, 1, 2]
print(out.numpy())
print(out)
# [[[1., 2.]],
# [[3., 4.]],
# [[5., 6.]]]
......@@ -459,34 +455,31 @@ def split(x, num_or_sections, axis=0, name=None):
Example:
.. code-block:: python
import numpy as np
import paddle
# x is a Tensor which shape is [3, 9, 5]
x_np = np.random.random([3, 9, 5]).astype("int32")
x = paddle.to_tensor(x_np)
# x is a Tensor of shape [3, 9, 5]
x = paddle.rand([3, 9, 5])
out0, out1, out22 = paddle.split(x, num_or_sections=3, axis=1)
# out0.shape [3, 3, 5]
# out1.shape [3, 3, 5]
# out2.shape [3, 3, 5]
out0, out1, out2 = paddle.split(x, num_or_sections=3, axis=1)
print(out0.shape) # [3, 3, 5]
print(out1.shape) # [3, 3, 5]
print(out2.shape) # [3, 3, 5]
out0, out1, out2 = paddle.split(x, num_or_sections=[2, 3, 4], axis=1)
# out0.shape [3, 2, 5]
# out1.shape [3, 3, 5]
# out2.shape [3, 4, 5]
print(out0.shape) # [3, 2, 5]
print(out1.shape) # [3, 3, 5]
print(out2.shape) # [3, 4, 5]
out0, out1, out2 = paddle.split(x, num_or_sections=[2, 3, -1], axis=1)
# out0.shape [3, 2, 5]
# out1.shape [3, 3, 5]
# out2.shape [3, 4, 5]
print(out0.shape) # [3, 2, 5]
print(out1.shape) # [3, 3, 5]
print(out2.shape) # [3, 4, 5]
# axis is negative, the real axis is (rank(x) + axis) which real
# value is 1.
# axis is negative, the real axis is (rank(x) + axis)=1
out0, out1, out2 = paddle.split(x, num_or_sections=3, axis=-2)
# out0.shape [3, 3, 5]
# out1.shape [3, 3, 5]
# out2.shape [3, 3, 5]
print(out0.shape) # [3, 3, 5]
print(out1.shape) # [3, 3, 5]
print(out2.shape) # [3, 3, 5]
"""
return paddle.fluid.layers.split(
input=x, num_or_sections=num_or_sections, dim=axis, name=name)
......@@ -494,9 +487,6 @@ def split(x, num_or_sections, axis=0, name=None):
def squeeze(x, axis=None, name=None):
"""
:alias_main: paddle.squeeze
:alias: paddle.squeeze, paddle.tensor.squeeze, paddle.tensor.manipulation.squeeze
This OP will squeeze the dimension(s) of size 1 of input tensor x's shape.
If axis is provided, it will remove the dimension(s) by given axis that of size 1.
......@@ -553,11 +543,9 @@ def squeeze(x, axis=None, name=None):
import paddle
paddle.disable_static()
x = paddle.rand([5, 1, 10])
output = paddle.squeeze(x, axis=1)
# output.shape [5, 10]
print(output.shape) # [5, 10]
"""
if axis is None:
......@@ -695,9 +683,6 @@ def unique(x,
def unsqueeze(x, axis, name=None):
"""
:alias_main: paddle.unsqueeze
:alias: paddle.unsqueeze, paddle.tensor.unsqueeze, paddle.tensor.manipulation.unsqueeze
Insert single-dimensional entries to the shape of input Tensor ``x``. Takes one
required argument axis, a dimension or list of dimensions that will be inserted.
Dimension indices in axis are as seen in the output tensor.
......@@ -718,7 +703,6 @@ def unsqueeze(x, axis, name=None):
import paddle
paddle.disable_static()
x = paddle.rand([5, 10])
print(x.shape) # [5, 10]
......@@ -728,7 +712,7 @@ def unsqueeze(x, axis, name=None):
out2 = paddle.unsqueeze(x, axis=[0, 2])
print(out2.shape) # [1, 5, 1, 10]
axis = paddle.fluid.dygraph.to_variable([0, 1, 2])
axis = paddle.to_tensor([0, 1, 2])
out3 = paddle.unsqueeze(x, axis=axis)
print(out3.shape) # [1, 1, 1, 5, 10]
......
......@@ -59,17 +59,18 @@ def bernoulli(x, name=None):
import paddle
paddle.seed(100) # on CPU device
paddle.set_device('cpu') # on CPU device
paddle.seed(100)
x = paddle.rand([2,3])
print(x.numpy())
# [[0.5535528 0.20714243 0.01162981]
# [0.51577556 0.36369765 0.2609165 ]]
print(x)
# [[0.55355281, 0.20714243, 0.01162981],
# [0.51577556, 0.36369765, 0.26091650]]
paddle.seed(200) # on CPU device
out = paddle.bernoulli(x)
print(out.numpy())
# [[0. 0. 0.]
# [1. 1. 0.]]
print(out)
# [[1., 0., 1.],
# [0., 1., 0.]]
"""
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册