未验证 提交 bd14926e 编写于 作者: C cyberslack_lee 提交者: GitHub

[xdoctest] reformat example code with google style in No.91-94 (#55832)

* test=docs_preview

* test=docs_preview

* test=docs_preview

* test=docs_preview
上级 97ab6aa6
...@@ -189,20 +189,20 @@ class BeamSearchDecoder(Decoder): ...@@ -189,20 +189,20 @@ class BeamSearchDecoder(Decoder):
.. code-block:: python .. code-block:: python
import numpy as np >>> import numpy as np
import paddle >>> import paddle
from paddle.nn import BeamSearchDecoder, dynamic_decode >>> from paddle.nn import BeamSearchDecoder, dynamic_decode
from paddle.nn import GRUCell, Linear, Embedding >>> from paddle.nn import GRUCell, Linear, Embedding
trg_embeder = Embedding(100, 32) >>> trg_embeder = Embedding(100, 32)
output_layer = Linear(32, 32) >>> output_layer = Linear(32, 32)
decoder_cell = GRUCell(input_size=32, hidden_size=32) >>> decoder_cell = GRUCell(input_size=32, hidden_size=32)
decoder = BeamSearchDecoder(decoder_cell, >>> decoder = BeamSearchDecoder(decoder_cell,
start_token=0, ... start_token=0,
end_token=1, ... end_token=1,
beam_size=4, ... beam_size=4,
embedding_fn=trg_embeder, ... embedding_fn=trg_embeder,
output_fn=output_layer) ... output_fn=output_layer)
...
""" """
def __init__( def __init__(
...@@ -1054,22 +1054,24 @@ def dynamic_decode( ...@@ -1054,22 +1054,24 @@ def dynamic_decode(
.. code-block:: python .. code-block:: python
import paddle >>> import paddle
from paddle.nn import BeamSearchDecoder, dynamic_decode >>> from paddle.nn import BeamSearchDecoder, dynamic_decode
from paddle.nn import GRUCell, Linear, Embedding >>> from paddle.nn import GRUCell, Linear, Embedding
trg_embeder = Embedding(100, 32) >>> trg_embeder = Embedding(100, 32)
output_layer = Linear(32, 32) >>> output_layer = Linear(32, 32)
decoder_cell = GRUCell(input_size=32, hidden_size=32) >>> decoder_cell = GRUCell(input_size=32, hidden_size=32)
decoder = BeamSearchDecoder(decoder_cell, >>> decoder = BeamSearchDecoder(decoder_cell,
start_token=0, ... start_token=0,
end_token=1, ... end_token=1,
beam_size=4, ... beam_size=4,
embedding_fn=trg_embeder, ... embedding_fn=trg_embeder,
output_fn=output_layer) ... output_fn=output_layer)
encoder_output = paddle.ones((4, 8, 32), dtype=paddle.get_default_dtype()) >>> encoder_output = paddle.ones((4, 8, 32), dtype=paddle.get_default_dtype())
outputs = dynamic_decode(decoder=decoder, >>> outputs = dynamic_decode(decoder=decoder,
inits=decoder_cell.get_initial_states(encoder_output), ... inits=decoder_cell.get_initial_states(encoder_output),
max_step_num=10) ... max_step_num=10)
>>> print(outputs[0].shape)
[4, 11, 4]
""" """
if in_dynamic_mode(): if in_dynamic_mode():
return _dynamic_decode_imperative( return _dynamic_decode_imperative(
......
...@@ -91,15 +91,15 @@ class BCEWithLogitsLoss(Layer): ...@@ -91,15 +91,15 @@ class BCEWithLogitsLoss(Layer):
.. code-block:: python .. code-block:: python
import paddle >>> import paddle
logit = paddle.to_tensor([5.0, 1.0, 3.0], dtype="float32") >>> logit = paddle.to_tensor([5.0, 1.0, 3.0], dtype="float32")
label = paddle.to_tensor([1.0, 0.0, 1.0], dtype="float32") >>> label = paddle.to_tensor([1.0, 0.0, 1.0], dtype="float32")
bce_logit_loss = paddle.nn.BCEWithLogitsLoss() >>> bce_logit_loss = paddle.nn.BCEWithLogitsLoss()
output = bce_logit_loss(logit, label) >>> output = bce_logit_loss(logit, label)
print(output) >>> print(output)
# Tensor(shape=[], dtype=float32, place=Place(gpu:0), stop_gradient=True, Tensor(shape=[], dtype=float32, place=Place(cpu), stop_gradient=True,
# 0.45618814) 0.45618808)
""" """
...@@ -303,50 +303,47 @@ class CrossEntropyLoss(Layer): ...@@ -303,50 +303,47 @@ class CrossEntropyLoss(Layer):
.. code-block:: python .. code-block:: python
# hard labels >>> # hard labels
import paddle >>> import paddle
paddle.seed(99999) >>> paddle.seed(2023)
N=100 >>> N=100
C=200 >>> C=200
reduction='mean' >>> reduction='mean'
input = paddle.rand([N, C], dtype='float64') >>> input = paddle.rand([N, C], dtype='float64')
label = paddle.randint(0, C, shape=[N], dtype='int64') >>> label = paddle.randint(0, C, shape=[N], dtype='int64')
weight = paddle.rand([C], dtype='float64') >>> weight = paddle.rand([C], dtype='float64')
cross_entropy_loss = paddle.nn.loss.CrossEntropyLoss( >>> cross_entropy_loss = paddle.nn.loss.CrossEntropyLoss(
weight=weight, reduction=reduction) ... weight=weight, reduction=reduction)
dy_ret = cross_entropy_loss( >>> dy_ret = cross_entropy_loss(input, label)
input, >>> print(dy_ret)
label) Tensor(shape=[], dtype=float64, place=Place(cpu), stop_gradient=True,
print(dy_ret) 5.33697682)
# Tensor(shape=[], dtype=float64, place=Place(gpu:0), stop_gradient=True,
# 5.34043430)
.. code-block:: python .. code-block:: python
# soft labels >>> # soft labels
import paddle >>> import paddle
paddle.seed(99999) >>> paddle.seed(2023)
axis = -1 >>> axis = -1
ignore_index = -100 >>> ignore_index = -100
N = 4 >>> N = 4
C = 3 >>> C = 3
shape = [N, C] >>> shape = [N, C]
reduction='mean' >>> reduction='mean'
weight = None >>> weight = None
logits = paddle.uniform(shape, dtype='float64', min=0.1, max=1.0) >>> logits = paddle.uniform(shape, dtype='float64', min=0.1, max=1.0)
labels = paddle.uniform(shape, dtype='float64', min=0.1, max=1.0) >>> labels = paddle.uniform(shape, dtype='float64', min=0.1, max=1.0)
labels /= paddle.sum(labels, axis=axis, keepdim=True) >>> labels /= paddle.sum(labels, axis=axis, keepdim=True)
paddle_loss_mean = paddle.nn.functional.cross_entropy( >>> paddle_loss_mean = paddle.nn.functional.cross_entropy(logits,
logits, ... labels,
labels, ... soft_label=True,
soft_label=True, ... axis=axis,
axis=axis, ... weight=weight,
weight=weight, ... reduction=reduction)
reduction=reduction) >>> print(paddle_loss_mean)
print(paddle_loss_mean) Tensor(shape=[], dtype=float64, place=Place(cpu), stop_gradient=True,
# Tensor(shape=[], dtype=float64, place=Place(gpu:0), stop_gradient=True, 1.14554912)
# 1.11043464)
""" """
...@@ -439,21 +436,25 @@ class HSigmoidLoss(Layer): ...@@ -439,21 +436,25 @@ class HSigmoidLoss(Layer):
Examples: Examples:
.. code-block:: python .. code-block:: python
import paddle >>> import paddle
paddle.set_device('cpu') >>> paddle.set_device('cpu')
>>> paddle.seed(2023)
input = paddle.uniform([4, 3]) >>> input = paddle.uniform([4, 3])
# [[0.56194401 -0.22450298 -0.10741806] # random >>> print(input)
# [0.36136317 0.23556745 0.88748658] # random Tensor(shape=[4, 3], dtype=float32, place=Place(cpu), stop_gradient=True,
# [0.18151939 0.80947340 -0.31078976] # random [[ 0.73167229, 0.04029441, -0.48078126],
# [0.68886101 -0.14239830 -0.41297770]] # random [ 0.81050646, -0.15199822, -0.18717426],
label = paddle.to_tensor([0, 1, 4, 5]) [ 0.94041789, 0.48874724, 0.03570259],
m = paddle.nn.HSigmoidLoss(3, 5) [ 0.46585739, 0.95573163, -0.91368192]])
out = m(input, label) >>> label = paddle.to_tensor([0, 1, 4, 5])
# [[2.42524505] >>> m = paddle.nn.HSigmoidLoss(3, 6)
# [1.74917245] >>> out = m(input, label)
# [3.14571381] >>> print(out)
# [2.34564662]] Tensor(shape=[4, 1], dtype=float32, place=Place(cpu), stop_gradient=False,
[[1.94512916],
[2.26129627],
[2.36135936],
[2.97453213]])
""" """
def __init__( def __init__(
...@@ -558,13 +559,14 @@ class MSELoss(Layer): ...@@ -558,13 +559,14 @@ class MSELoss(Layer):
.. code-block:: python .. code-block:: python
import paddle >>> import paddle
mse_loss = paddle.nn.loss.MSELoss() >>> mse_loss = paddle.nn.loss.MSELoss()
input = paddle.to_tensor([1.5]) >>> input = paddle.to_tensor([1.5])
label = paddle.to_tensor([1.7]) >>> label = paddle.to_tensor([1.7])
output = mse_loss(input, label) >>> output = mse_loss(input, label)
print(output) >>> print(output)
# 0.04000002 Tensor(shape=[], dtype=float32, place=Place(cpu), stop_gradient=True,
0.04000002)
""" """
...@@ -642,29 +644,29 @@ class L1Loss(Layer): ...@@ -642,29 +644,29 @@ class L1Loss(Layer):
Examples: Examples:
.. code-block:: python .. code-block:: python
import paddle >>> import paddle
input = paddle.to_tensor([[1.5, 0.8], [0.2, 1.3]]) >>> input = paddle.to_tensor([[1.5, 0.8], [0.2, 1.3]])
label = paddle.to_tensor([[1.7, 1], [0.4, 0.5]]) >>> label = paddle.to_tensor([[1.7, 1], [0.4, 0.5]])
l1_loss = paddle.nn.L1Loss() >>> l1_loss = paddle.nn.L1Loss()
output = l1_loss(input, label) >>> output = l1_loss(input, label)
print(output) >>> print(output)
# Tensor(shape=[], dtype=float32, place=Place(gpu:0), stop_gradient=True, Tensor(shape=[], dtype=float32, place=Place(cpu), stop_gradient=True,
# 0.34999999) 0.34999999)
l1_loss = paddle.nn.L1Loss(reduction='sum') >>> l1_loss = paddle.nn.L1Loss(reduction='sum')
output = l1_loss(input, label) >>> output = l1_loss(input, label)
print(output) >>> print(output)
# Tensor(shape=[], dtype=float32, place=Place(gpu:0), stop_gradient=True, Tensor(shape=[], dtype=float32, place=Place(cpu), stop_gradient=True,
# 1.39999998) 1.39999998)
l1_loss = paddle.nn.L1Loss(reduction='none') >>> l1_loss = paddle.nn.L1Loss(reduction='none')
output = l1_loss(input, label) >>> output = l1_loss(input, label)
print(output) >>> print(output)
# Tensor(shape=[2, 2], dtype=float32, place=Place(gpu:0), stop_gradient=True, Tensor(shape=[2, 2], dtype=float32, place=Place(cpu), stop_gradient=True,
# [[0.20000005, 0.19999999], [[0.20000005, 0.19999999],
# [0.20000000, 0.79999995]]) [0.20000000, 0.79999995]])
""" """
...@@ -740,15 +742,15 @@ class BCELoss(Layer): ...@@ -740,15 +742,15 @@ class BCELoss(Layer):
Examples: Examples:
.. code-block:: python .. code-block:: python
import paddle >>> import paddle
input = paddle.to_tensor([0.5, 0.6, 0.7]) >>> input = paddle.to_tensor([0.5, 0.6, 0.7])
label = paddle.to_tensor([1.0, 0.0, 1.0]) >>> label = paddle.to_tensor([1.0, 0.0, 1.0])
bce_loss = paddle.nn.BCELoss() >>> bce_loss = paddle.nn.BCELoss()
output = bce_loss(input, label) >>> output = bce_loss(input, label)
print(output) >>> print(output)
# Tensor(shape=[], dtype=float32, place=Place(gpu:0), stop_gradient=True, Tensor(shape=[], dtype=float32, place=Place(cpu), stop_gradient=True,
# 0.65537101) 0.65537095)
""" """
...@@ -840,20 +842,22 @@ class NLLLoss(Layer): ...@@ -840,20 +842,22 @@ class NLLLoss(Layer):
Examples: Examples:
.. code-block:: python .. code-block:: python
import paddle >>> import paddle
nll_loss = paddle.nn.loss.NLLLoss() >>> nll_loss = paddle.nn.loss.NLLLoss()
log_softmax = paddle.nn.LogSoftmax(axis=1) >>> log_softmax = paddle.nn.LogSoftmax(axis=1)
input = paddle.to_tensor([[0.88103855, 0.9908683 , 0.6226845 ], >>> input = paddle.to_tensor([[0.88103855, 0.9908683 , 0.6226845 ],
[0.53331435, 0.07999352, 0.8549948 ], ... [0.53331435, 0.07999352, 0.8549948 ],
[0.25879037, 0.39530203, 0.698465 ], ... [0.25879037, 0.39530203, 0.698465 ],
[0.73427284, 0.63575995, 0.18827209], ... [0.73427284, 0.63575995, 0.18827209],
[0.05689114, 0.0862954 , 0.6325046 ]], "float32") ... [0.05689114, 0.0862954 , 0.6325046 ]], "float32")
log_out = log_softmax(input) >>> log_out = log_softmax(input)
label = paddle.to_tensor([0, 2, 1, 1, 0], "int64") >>> label = paddle.to_tensor([0, 2, 1, 1, 0], "int64")
result = nll_loss(log_out, label) >>> result = nll_loss(log_out, label)
print(result) # Tensor(shape=[], dtype=float32, place=CPUPlace, stop_gradient=True, 1.07202101) >>> print(result)
Tensor(shape=[], dtype=float32, place=Place(cpu), stop_gradient=True,
1.07202101)
""" """
...@@ -929,12 +933,15 @@ class PoissonNLLLoss(Layer): ...@@ -929,12 +933,15 @@ class PoissonNLLLoss(Layer):
Examples: Examples:
.. code-block:: python .. code-block:: python
import paddle >>> import paddle
>>> paddle.seed(2023)
poisson_nll_loss = paddle.nn.loss.PoissonNLLLoss() >>> poisson_nll_loss = paddle.nn.loss.PoissonNLLLoss()
input = paddle.randn([5, 2], dtype=paddle.float32) >>> input = paddle.randn([5, 2], dtype=paddle.float32)
label = paddle.randn([5, 2], dtype=paddle.float32) >>> label = paddle.randn([5, 2], dtype=paddle.float32)
loss = poisson_nll_loss(input, label) >>> loss = poisson_nll_loss(input, label)
>>> print(loss)
Tensor(shape=[], dtype=float32, place=Place(cpu), stop_gradient=True,
1.52983975)
""" """
...@@ -1017,32 +1024,36 @@ class KLDivLoss(Layer): ...@@ -1017,32 +1024,36 @@ class KLDivLoss(Layer):
Examples: Examples:
.. code-block:: python .. code-block:: python
import paddle >>> import paddle
import paddle.nn as nn >>> import paddle.nn as nn
shape = (5, 20) >>> shape = (5, 20)
x = paddle.uniform(shape, min=-10, max=10).astype('float32') >>> x = paddle.uniform(shape, min=-10, max=10).astype('float32')
target = paddle.uniform(shape, min=-10, max=10).astype('float32') >>> target = paddle.uniform(shape, min=-10, max=10).astype('float32')
# 'batchmean' reduction, loss shape will be [] >>> # 'batchmean' reduction, loss shape will be []
kldiv_criterion = nn.KLDivLoss(reduction='batchmean') >>> kldiv_criterion = nn.KLDivLoss(reduction='batchmean')
pred_loss = kldiv_criterion(x, target) >>> pred_loss = kldiv_criterion(x, target)
# shape=[] >>> print(pred_loss.shape)
[]
# 'mean' reduction, loss shape will be []
kldiv_criterion = nn.KLDivLoss(reduction='mean') >>> # 'mean' reduction, loss shape will be []
pred_loss = kldiv_criterion(x, target) >>> kldiv_criterion = nn.KLDivLoss(reduction='mean')
# shape=[] >>> pred_loss = kldiv_criterion(x, target)
>>> print(pred_loss.shape)
# 'sum' reduction, loss shape will be [] []
kldiv_criterion = nn.KLDivLoss(reduction='sum')
pred_loss = kldiv_criterion(x, target) >>> # 'sum' reduction, loss shape will be []
# shape=[] >>> kldiv_criterion = nn.KLDivLoss(reduction='sum')
>>> pred_loss = kldiv_criterion(x, target)
# 'none' reduction, loss shape is same with X shape >>> print(pred_loss.shape)
kldiv_criterion = nn.KLDivLoss(reduction='none') []
pred_loss = kldiv_criterion(x, target)
# shape=[5, 20] >>> # 'none' reduction, loss shape is same with X shape
>>> kldiv_criterion = nn.KLDivLoss(reduction='none')
>>> pred_loss = kldiv_criterion(x, target)
>>> print(pred_loss.shape)
[5, 20]
""" """
...@@ -1099,16 +1110,16 @@ class MarginRankingLoss(Layer): ...@@ -1099,16 +1110,16 @@ class MarginRankingLoss(Layer):
.. code-block:: python .. code-block:: python
import paddle >>> import paddle
input = paddle.to_tensor([[1, 2], [3, 4]], dtype="float32")
other = paddle.to_tensor([[2, 1], [2, 4]], dtype="float32")
label = paddle.to_tensor([[1, -1], [-1, -1]], dtype="float32")
margin_rank_loss = paddle.nn.MarginRankingLoss()
loss = margin_rank_loss(input, other, label)
print(loss) >>> input = paddle.to_tensor([[1, 2], [3, 4]], dtype="float32")
# 0.75 >>> other = paddle.to_tensor([[2, 1], [2, 4]], dtype="float32")
>>> label = paddle.to_tensor([[1, -1], [-1, -1]], dtype="float32")
>>> margin_rank_loss = paddle.nn.MarginRankingLoss()
>>> loss = margin_rank_loss(input, other, label)
>>> print(loss)
Tensor(shape=[], dtype=float32, place=Place(cpu), stop_gradient=True,
0.75000000)
""" """
def __init__(self, margin=0.0, reduction='mean', name=None): def __init__(self, margin=0.0, reduction='mean', name=None):
...@@ -1155,50 +1166,41 @@ class CTCLoss(Layer): ...@@ -1155,50 +1166,41 @@ class CTCLoss(Layer):
.. code-block:: python .. code-block:: python
# declarative mode >>> # declarative mode
import paddle >>> import paddle
# length of the longest logit sequence >>> # length of the longest logit sequence
max_seq_length = 4 >>> max_seq_length = 4
#length of the longest label sequence >>> #length of the longest label sequence
max_label_length = 3 >>> max_label_length = 3
# number of logit sequences >>> # number of logit sequences
batch_size = 2 >>> batch_size = 2
# class num >>> # class num
class_num = 3 >>> class_num = 3
log_probs = paddle.to_tensor([[[4.17021990e-01, 7.20324516e-01, 1.14374816e-04], >>> log_probs = paddle.to_tensor([[[4.17021990e-01, 7.20324516e-01, 1.14374816e-04],
[3.02332580e-01, 1.46755889e-01, 9.23385918e-02]], ... [3.02332580e-01, 1.46755889e-01, 9.23385918e-02]],
... [[1.86260208e-01, 3.45560730e-01, 3.96767467e-01],
[[1.86260208e-01, 3.45560730e-01, 3.96767467e-01], ... [5.38816750e-01, 4.19194520e-01, 6.85219526e-01]],
[5.38816750e-01, 4.19194520e-01, 6.85219526e-01]], ... [[2.04452246e-01, 8.78117442e-01, 2.73875929e-02],
... [6.70467496e-01, 4.17304814e-01, 5.58689833e-01]],
[[2.04452246e-01, 8.78117442e-01, 2.73875929e-02], ... [[1.40386939e-01, 1.98101491e-01, 8.00744593e-01],
[6.70467496e-01, 4.17304814e-01, 5.58689833e-01]], ... [9.68261600e-01, 3.13424170e-01, 6.92322612e-01]],
... [[8.76389146e-01, 8.94606650e-01, 8.50442126e-02],
[[1.40386939e-01, 1.98101491e-01, 8.00744593e-01], ... [3.90547849e-02, 1.69830427e-01, 8.78142476e-01]]], dtype="float32")
[9.68261600e-01, 3.13424170e-01, 6.92322612e-01]], >>> labels = paddle.to_tensor([[1, 2, 2], [1, 2, 2]], dtype="int32")
>>> input_lengths = paddle.to_tensor([5, 5], dtype="int64")
[[8.76389146e-01, 8.94606650e-01, 8.50442126e-02], >>> label_lengths = paddle.to_tensor([3, 3], dtype="int64")
[3.90547849e-02, 1.69830427e-01, 8.78142476e-01]]], dtype="float32")
labels = paddle.to_tensor([[1, 2, 2], >>> loss = paddle.nn.CTCLoss(blank=0, reduction='none')(log_probs, labels, input_lengths, label_lengths)
[1, 2, 2]], dtype="int32") >>> print(loss)
input_lengths = paddle.to_tensor([5, 5], dtype="int64") Tensor(shape=[2], dtype=float32, place=Place(cpu), stop_gradient=True,
label_lengths = paddle.to_tensor([3, 3], dtype="int64") [3.91798496, 2.90765214])
loss = paddle.nn.CTCLoss(blank=0, reduction='none')(log_probs, labels, >>> loss = paddle.nn.CTCLoss(blank=0, reduction='mean')(log_probs, labels, input_lengths, label_lengths)
input_lengths, >>> print(loss)
label_lengths) Tensor(shape=[], dtype=float32, place=Place(cpu), stop_gradient=True,
print(loss) 1.13760614)
# Tensor(shape=[2], dtype=float32, place=Place(gpu:0), stop_gradient=True,
# [3.91798496, 2.90765190])
loss = paddle.nn.CTCLoss(blank=0, reduction='mean')(log_probs, labels,
input_lengths,
label_lengths)
print(loss)
# Tensor(shape=[], dtype=float32, place=Place(gpu:0), stop_gradient=True,
# 1.13760614)
""" """
def __init__(self, blank=0, reduction='mean'): def __init__(self, blank=0, reduction='mean'):
...@@ -1247,33 +1249,33 @@ class RNNTLoss(Layer): ...@@ -1247,33 +1249,33 @@ class RNNTLoss(Layer):
Examples: Examples:
.. code-block:: python .. code-block:: python
# declarative mode >>> # declarative mode
import numpy as np >>> import numpy as np
import paddle >>> import paddle
from paddle.nn import RNNTLoss >>> from paddle.nn import RNNTLoss
fn = RNNTLoss(reduction='sum', fastemit_lambda=0.0) >>> fn = RNNTLoss(reduction='sum', fastemit_lambda=0.0)
acts = np.array([[[[0.1, 0.6, 0.1, 0.1, 0.1], >>> acts = np.array([[[[0.1, 0.6, 0.1, 0.1, 0.1],
[0.1, 0.1, 0.6, 0.1, 0.1], ... [0.1, 0.1, 0.6, 0.1, 0.1],
[0.1, 0.1, 0.2, 0.8, 0.1]], ... [0.1, 0.1, 0.2, 0.8, 0.1]],
[[0.1, 0.6, 0.1, 0.1, 0.1], ... [[0.1, 0.6, 0.1, 0.1, 0.1],
[0.1, 0.1, 0.2, 0.1, 0.1], ... [0.1, 0.1, 0.2, 0.1, 0.1],
[0.7, 0.1, 0.2, 0.1, 0.1]]]]) ... [0.7, 0.1, 0.2, 0.1, 0.1]]]])
labels = [[1, 2]] >>> labels = [[1, 2]]
acts = paddle.to_tensor(acts, stop_gradient=False) >>> acts = paddle.to_tensor(acts, stop_gradient=False)
lengths = [acts.shape[1]] * acts.shape[0] >>> lengths = [acts.shape[1]] * acts.shape[0]
label_lengths = [len(l) for l in labels] >>> label_lengths = [len(l) for l in labels]
labels = paddle.to_tensor(labels, paddle.int32) >>> labels = paddle.to_tensor(labels, paddle.int32)
lengths = paddle.to_tensor(lengths, paddle.int32) >>> lengths = paddle.to_tensor(lengths, paddle.int32)
label_lengths = paddle.to_tensor(label_lengths, paddle.int32) >>> label_lengths = paddle.to_tensor(label_lengths, paddle.int32)
costs = fn(acts, labels, lengths, label_lengths) >>> costs = fn(acts, labels, lengths, label_lengths)
print(costs) >>> print(costs)
# Tensor(shape=[], dtype=float64, place=Place(gpu:0), stop_gradient=False, Tensor(shape=[], dtype=float64, place=Place(cpu), stop_gradient=False,
# 4.49566677) -2.85042444)
""" """
def __init__( def __init__(
...@@ -1346,13 +1348,15 @@ class SmoothL1Loss(Layer): ...@@ -1346,13 +1348,15 @@ class SmoothL1Loss(Layer):
Examples: Examples:
.. code-block:: python .. code-block:: python
import paddle >>> import paddle
input = paddle.rand([3, 3]).astype("float32") >>> paddle.seed(2023)
label = paddle.rand([3, 3]).astype("float32") >>> input = paddle.rand([3, 3]).astype("float32")
loss = paddle.nn.SmoothL1Loss() >>> label = paddle.rand([3, 3]).astype("float32")
output = loss(input, label) >>> loss = paddle.nn.SmoothL1Loss()
print(output) >>> output = loss(input, label)
# 0.049606 >>> print(output)
Tensor(shape=[], dtype=float32, place=Place(cpu), stop_gradient=True,
0.08307374)
""" """
def __init__(self, reduction='mean', delta=1.0, name=None): def __init__(self, reduction='mean', delta=1.0, name=None):
...@@ -1414,21 +1418,23 @@ class MultiLabelSoftMarginLoss(Layer): ...@@ -1414,21 +1418,23 @@ class MultiLabelSoftMarginLoss(Layer):
Examples: Examples:
.. code-block:: python .. code-block:: python
import paddle >>> import paddle
import paddle.nn as nn >>> import paddle.nn as nn
input = paddle.to_tensor([[1, -2, 3], [0, -1, 2], [1, 0, 1]], dtype=paddle.float32) >>> input = paddle.to_tensor([[1, -2, 3], [0, -1, 2], [1, 0, 1]], dtype=paddle.float32)
label = paddle.to_tensor([[-1, 1, -1], [1, 1, 1], [1, -1, 1]], dtype=paddle.float32) >>> label = paddle.to_tensor([[-1, 1, -1], [1, 1, 1], [1, -1, 1]], dtype=paddle.float32)
multi_label_soft_margin_loss = nn.MultiLabelSoftMarginLoss(reduction='none') >>> multi_label_soft_margin_loss = nn.MultiLabelSoftMarginLoss(reduction='none')
loss = multi_label_soft_margin_loss(input, label) >>> loss = multi_label_soft_margin_loss(input, label)
print(loss) >>> print(loss)
# Tensor([3.49625897, 0.71111226, 0.43989015]) Tensor(shape=[3], dtype=float32, place=Place(cpu), stop_gradient=True,
[3.49625897, 0.71111226, 0.43989015])
multi_label_soft_margin_loss = nn.MultiLabelSoftMarginLoss(reduction='mean') >>> multi_label_soft_margin_loss = nn.MultiLabelSoftMarginLoss(reduction='mean')
loss = multi_label_soft_margin_loss(input, label) >>> loss = multi_label_soft_margin_loss(input, label)
print(loss) >>> print(loss)
# Tensor(1.54908717) Tensor(shape=[], dtype=float32, place=Place(cpu), stop_gradient=True,
1.54908717)
""" """
def __init__(self, weight=None, reduction="mean", name=None): def __init__(self, weight=None, reduction="mean", name=None):
...@@ -1512,24 +1518,26 @@ class HingeEmbeddingLoss(Layer): ...@@ -1512,24 +1518,26 @@ class HingeEmbeddingLoss(Layer):
Examples: Examples:
.. code-block:: python .. code-block:: python
import paddle >>> import paddle
import paddle.nn as nn >>> import paddle.nn as nn
input = paddle.to_tensor([[1, -2, 3], [0, -1, 2], [1, 0, 1]], dtype=paddle.float32) >>> input = paddle.to_tensor([[1, -2, 3], [0, -1, 2], [1, 0, 1]], dtype=paddle.float32)
# label elements in {1., -1.} >>> # label elements in {1., -1.}
label = paddle.to_tensor([[-1, 1, -1], [1, 1, 1], [1, -1, 1]], dtype=paddle.float32) >>> label = paddle.to_tensor([[-1, 1, -1], [1, 1, 1], [1, -1, 1]], dtype=paddle.float32)
hinge_embedding_loss = nn.HingeEmbeddingLoss(margin=1.0, reduction='none') >>> hinge_embedding_loss = nn.HingeEmbeddingLoss(margin=1.0, reduction='none')
loss = hinge_embedding_loss(input, label) >>> loss = hinge_embedding_loss(input, label)
print(loss) >>> print(loss)
# Tensor([[0., -2., 0.], Tensor(shape=[3, 3], dtype=float32, place=Place(cpu), stop_gradient=True,
# [0., -1., 2.], [[ 0., -2., 0.],
# [1., 1., 1.]]) [ 0., -1., 2.],
[ 1., 1., 1.]])
hinge_embedding_loss = nn.HingeEmbeddingLoss(margin=1.0, reduction='mean')
loss = hinge_embedding_loss(input, label) >>> hinge_embedding_loss = nn.HingeEmbeddingLoss(margin=1.0, reduction='mean')
print(loss) >>> loss = hinge_embedding_loss(input, label)
# Tensor(0.22222222) >>> print(loss)
Tensor(shape=[], dtype=float32, place=Place(cpu), stop_gradient=True,
0.22222222)
""" """
def __init__(self, margin=1.0, reduction="mean", name=None): def __init__(self, margin=1.0, reduction="mean", name=None):
...@@ -1595,23 +1603,29 @@ class CosineEmbeddingLoss(Layer): ...@@ -1595,23 +1603,29 @@ class CosineEmbeddingLoss(Layer):
Examples: Examples:
.. code-block:: python .. code-block:: python
import paddle >>> import paddle
input1 = paddle.to_tensor([[1.6, 1.2, -0.5], [3.2, 2.6, -5.8]], 'float32') >>> input1 = paddle.to_tensor([[1.6, 1.2, -0.5], [3.2, 2.6, -5.8]], 'float32')
input2 = paddle.to_tensor([[0.5, 0.5, -1.8], [2.3, -1.4, 1.1]], 'float32') >>> input2 = paddle.to_tensor([[0.5, 0.5, -1.8], [2.3, -1.4, 1.1]], 'float32')
label = paddle.to_tensor([1, -1], 'int64') >>> label = paddle.to_tensor([1, -1], 'int64')
cosine_embedding_loss = paddle.nn.CosineEmbeddingLoss(margin=0.5, reduction='mean') >>> cosine_embedding_loss = paddle.nn.CosineEmbeddingLoss(margin=0.5, reduction='mean')
output = cosine_embedding_loss(input1, input2, label) >>> output = cosine_embedding_loss(input1, input2, label)
print(output) # 0.21155193 >>> print(output)
Tensor(shape=[], dtype=float32, place=Place(cpu), stop_gradient=True,
0.21155193)
cosine_embedding_loss = paddle.nn.CosineEmbeddingLoss(margin=0.5, reduction='sum') >>> cosine_embedding_loss = paddle.nn.CosineEmbeddingLoss(margin=0.5, reduction='sum')
output = cosine_embedding_loss(input1, input2, label) >>> output = cosine_embedding_loss(input1, input2, label)
print(output) # 0.42310387 >>> print(output)
Tensor(shape=[], dtype=float32, place=Place(cpu), stop_gradient=True,
0.42310387)
cosine_embedding_loss = paddle.nn.CosineEmbeddingLoss(margin=0.5, reduction='none') >>> cosine_embedding_loss = paddle.nn.CosineEmbeddingLoss(margin=0.5, reduction='none')
output = cosine_embedding_loss(input1, input2, label) >>> output = cosine_embedding_loss(input1, input2, label)
print(output) # [0.42310387, 0. ] >>> print(output)
Tensor(shape=[2], dtype=float32, place=Place(cpu), stop_gradient=True,
[0.42310387, 0. ])
""" """
...@@ -1703,21 +1717,23 @@ class TripletMarginWithDistanceLoss(Layer): ...@@ -1703,21 +1717,23 @@ class TripletMarginWithDistanceLoss(Layer):
Examples: Examples:
.. code-block:: python .. code-block:: python
import paddle >>> import paddle
from paddle.nn import TripletMarginWithDistanceLoss >>> from paddle.nn import TripletMarginWithDistanceLoss
input = paddle.to_tensor([[1, 5, 3], [0, 3, 2], [1, 4, 1]], dtype=paddle.float32) >>> input = paddle.to_tensor([[1, 5, 3], [0, 3, 2], [1, 4, 1]], dtype=paddle.float32)
positive= paddle.to_tensor([[5, 1, 2], [3, 2, 1], [3, -1, 1]], dtype=paddle.float32) >>> positive= paddle.to_tensor([[5, 1, 2], [3, 2, 1], [3, -1, 1]], dtype=paddle.float32)
negative = paddle.to_tensor([[2, 1, -3], [1, 1, -1], [4, -2, 1]], dtype=paddle.float32) >>> negative = paddle.to_tensor([[2, 1, -3], [1, 1, -1], [4, -2, 1]], dtype=paddle.float32)
triplet_margin_with_distance_loss = TripletMarginWithDistanceLoss(reduction='none') >>> triplet_margin_with_distance_loss = TripletMarginWithDistanceLoss(reduction='none')
loss = triplet_margin_with_distance_loss(input, positive, negative,) >>> loss = triplet_margin_with_distance_loss(input, positive, negative,)
print(loss) >>> print(loss)
# Tensor([0. , 0.57496738, 0. ]) Tensor(shape=[3], dtype=float32, place=Place(cpu), stop_gradient=True,
[0. , 0.57496595, 0. ])
triplet_margin_with_distance_loss = TripletMarginWithDistanceLoss(reduction='mean') >>> triplet_margin_with_distance_loss = TripletMarginWithDistanceLoss(reduction='mean')
loss = triplet_margin_with_distance_loss(input, positive, negative,) >>> loss = triplet_margin_with_distance_loss(input, positive, negative,)
print(loss) >>> print(loss)
# Tensor(0.19165580) Tensor(shape=[], dtype=float32, place=Place(cpu), stop_gradient=True,
0.19165532)
""" """
...@@ -1812,20 +1828,22 @@ class TripletMarginLoss(Layer): ...@@ -1812,20 +1828,22 @@ class TripletMarginLoss(Layer):
Examples: Examples:
.. code-block:: python .. code-block:: python
import paddle >>> import paddle
input = paddle.to_tensor([[1, 5, 3], [0, 3, 2], [1, 4, 1]], dtype=paddle.float32) >>> input = paddle.to_tensor([[1, 5, 3], [0, 3, 2], [1, 4, 1]], dtype=paddle.float32)
positive= paddle.to_tensor([[5, 1, 2], [3, 2, 1], [3, -1, 1]], dtype=paddle.float32) >>> positive= paddle.to_tensor([[5, 1, 2], [3, 2, 1], [3, -1, 1]], dtype=paddle.float32)
negative = paddle.to_tensor([[2, 1, -3], [1, 1, -1], [4, -2, 1]], dtype=paddle.float32) >>> negative = paddle.to_tensor([[2, 1, -3], [1, 1, -1], [4, -2, 1]], dtype=paddle.float32)
triplet_margin_loss = paddle.nn.TripletMarginLoss(reduction='none') >>> triplet_margin_loss = paddle.nn.TripletMarginLoss(reduction='none')
loss = triplet_margin_loss(input, positive, negative) >>> loss = triplet_margin_loss(input, positive, negative)
print(loss) >>> print(loss)
# Tensor([0. , 0.57496738, 0. ]) Tensor(shape=[3], dtype=float32, place=Place(cpu), stop_gradient=True,
[0. , 0.57496595, 0. ])
triplet_margin_loss = paddle.nn.TripletMarginLoss(margin=1.0, swap=True, reduction='mean', ) >>> triplet_margin_loss = paddle.nn.TripletMarginLoss(margin=1.0, swap=True, reduction='mean')
loss = triplet_margin_loss(input, positive, negative,) >>> loss = triplet_margin_loss(input, positive, negative)
print(loss) >>> print(loss)
# Tensor(0.19165580) Tensor(shape=[], dtype=float32, place=Place(cpu), stop_gradient=True,
2.40039468)
""" """
...@@ -1924,15 +1942,17 @@ class MultiMarginLoss(Layer): ...@@ -1924,15 +1942,17 @@ class MultiMarginLoss(Layer):
Examples: Examples:
.. code-block:: python .. code-block:: python
import paddle >>> import paddle
import paddle.nn as nn >>> import paddle.nn as nn
input = paddle.to_tensor([[1, -2, 3], [0, -1, 2], [1, 0, 1]], dtype=paddle.float32) >>> input = paddle.to_tensor([[1, -2, 3], [0, -1, 2], [1, 0, 1]], dtype=paddle.float32)
label = paddle.to_tensor([0, 1, 2], dtype=paddle.int32) >>> label = paddle.to_tensor([0, 1, 2], dtype=paddle.int32)
multi_margin_loss = nn.MultiMarginLoss(reduction='mean') >>> multi_margin_loss = nn.MultiMarginLoss(reduction='mean')
loss = multi_margin_loss(input, label) >>> loss = multi_margin_loss(input, label)
print(loss) >>> print(loss)
Tensor(shape=[], dtype=float32, place=Place(cpu), stop_gradient=True,
1.11111104)
""" """
def __init__( def __init__(
...@@ -2003,31 +2023,30 @@ class SoftMarginLoss(Layer): ...@@ -2003,31 +2023,30 @@ class SoftMarginLoss(Layer):
Examples: Examples:
.. code-block:: python .. code-block:: python
import paddle >>> import paddle
>>> paddle.seed(2023)
input = paddle.to_tensor([[0.5, 0.6, 0.7],[0.3, 0.5, 0.2]], 'float32') >>> input = paddle.to_tensor([[0.5, 0.6, 0.7],[0.3, 0.5, 0.2]], 'float32')
label = paddle.to_tensor([[1.0, -1.0, 1.0],[-1.0, 1.0, 1.0]], 'float32') >>> label = paddle.to_tensor([[1.0, -1.0, 1.0],[-1.0, 1.0, 1.0]], 'float32')
soft_margin_loss = paddle.nn.SoftMarginLoss() >>> soft_margin_loss = paddle.nn.SoftMarginLoss()
output = soft_margin_loss(input, label) >>> output = soft_margin_loss(input, label)
print(output) >>> print(output)
# Tensor(shape=[], dtype=float32, place=Place(gpu:0), stop_gradient=True, Tensor(shape=[], dtype=float32, place=Place(cpu), stop_gradient=True,
# 0.64022040) 0.64022040)
input_np = paddle.uniform(shape=(5, 5), min=0.1, max=0.8, dtype="float64") >>> input_np = paddle.uniform(shape=(5, 5), min=0.1, max=0.8, dtype="float64")
label_np = paddle.randint(high=2, shape=(5, 5), dtype="int64") >>> label_np = paddle.randint(high=2, shape=(5, 5), dtype="int64")
label_np[label_np==0]=-1 >>> label_np[label_np==0]=-1
input = paddle.to_tensor(input_np) >>> input = paddle.to_tensor(input_np)
label = paddle.to_tensor(label_np) >>> label = paddle.to_tensor(label_np)
soft_margin_loss = paddle.nn.SoftMarginLoss(reduction='none') >>> soft_margin_loss = paddle.nn.SoftMarginLoss(reduction='none')
output = soft_margin_loss(input, label) >>> output = soft_margin_loss(input, label)
print(output) >>> print(output)
# Tensor(shape=[5, 5], dtype=float64, place=Place(gpu:0), stop_gradient=True, Tensor(shape=[5, 5], dtype=float64, place=Place(cpu), stop_gradient=True,
# [[0.61739663, 0.51405668, 1.09346100, 0.42385561, 0.91602303], [[1.10725628, 0.48778139, 0.56217249, 1.12581404, 0.51430043],
# [0.76997038, 1.01977148, 0.98971722, 1.13976032, 0.88152088], [0.90375795, 0.37761249, 0.43007557, 0.95089798, 0.43288319],
# [0.55476735, 1.10505384, 0.89923519, 0.45018155, 1.06587511], [1.16043599, 0.63015939, 0.51362715, 0.43617541, 0.57783301],
# [0.37998142, 0.48067240, 0.47791212, 0.55664053, 0.98581399], [0.81927846, 0.52558369, 0.59713908, 0.83100696, 0.50811616],
# [0.78571653, 0.59319711, 0.39701841, 0.76172109, 0.83781742]]) [0.82684205, 1.02064907, 0.50296995, 1.13461733, 0.93222519]])
""" """
def __init__(self, reduction='mean', name=None): def __init__(self, reduction='mean', name=None):
...@@ -2100,16 +2119,23 @@ class GaussianNLLLoss(Layer): ...@@ -2100,16 +2119,23 @@ class GaussianNLLLoss(Layer):
Examples:: Examples::
.. code-block:: python .. code-block:: python
import paddle >>> import paddle
import paddle.nn as nn >>> import paddle.nn as nn
>>> paddle.seed(2023)
input = paddle.randn([5, 2], dtype=paddle.float32)
label = paddle.randn([5, 2], dtype=paddle.float32) >>> input = paddle.randn([5, 2], dtype=paddle.float32)
variance = paddle.ones([5, 2], dtype=paddle.float32) >>> label = paddle.randn([5, 2], dtype=paddle.float32)
>>> variance = paddle.ones([5, 2], dtype=paddle.float32)
gs_nll_loss = nn.GaussianNLLLoss(full=False, epsilon=1e-6, reduction='none')
loss = gs_nll_loss(input, label, variance) >>> gs_nll_loss = nn.GaussianNLLLoss(full=False, epsilon=1e-6, reduction='none')
print(loss) >>> loss = gs_nll_loss(input, label, variance)
>>> print(loss)
Tensor(shape=[5, 2], dtype=float32, place=Place(cpu), stop_gradient=True,
[[0.21808575, 1.43013096],
[1.05245590, 0.00394560],
[1.20861185, 0.00000062],
[0.56946373, 0.73300570],
[0.37142906, 0.12038800]])
Note: Note:
The clamping of ``variance`` is ignored with respect to autograd, and so the The clamping of ``variance`` is ignored with respect to autograd, and so the
......
...@@ -149,29 +149,31 @@ class ConvertibleQuantedLayer(Layer, metaclass=abc.ABCMeta): ...@@ -149,29 +149,31 @@ class ConvertibleQuantedLayer(Layer, metaclass=abc.ABCMeta):
It defines some functions to convert quantizers and observers to quantize It defines some functions to convert quantizers and observers to quantize
or dequantize operators that maintain the quantization parameters used or dequantize operators that maintain the quantization parameters used
during inference. during inference.
Examples: Examples:
.. code-block:: python .. code-block:: python
# Given codes in ./customized_quanter.py >>> # Given codes in ./customized_quanter.py
class CustomizedQuantedLayer(ConvertibleQuantedLayer): >>> class CustomizedQuantedLayer(ConvertibleQuantedLayer):
def __init__(self): ... def __init__(self):
super().__init__() ... super().__init__()
self.weight_a = paddle.create_parameter(shape=[1], dtype='float32') ... self.weight_a = paddle.create_parameter(shape=[1], dtype='float32')
self.weight_b = paddle.create_parameter(shape=[1], dtype='float32') ... self.weight_b = paddle.create_parameter(shape=[1], dtype='float32')
self.quanter_for_weight_a = None ... self.quanter_for_weight_a = None
self.activation_weight = None ... self.activation_weight = None
def forward(self, input): ...
qweight_a = self.quanter_for_weight_a(self.weight_a) ... def forward(self, input):
weight_b = self.weight_b ... qweight_a = self.quanter_for_weight_a(self.weight_a)
qinput = self.activation_weight(input) ... weight_b = self.weight_b
// compute with qweight_a, weight_b and qinput. ... qinput = self.activation_weight(input)
return qweight * qinput + weight_b ... # compute with qweight_a, weight_b and qinput.
... return qweight * qinput + weight_b
def weights_to_quanters(self): ...
return [('weight_a', 'quanter_for_weight_a')] ... def weights_to_quanters(self):
... return [('weight_a', 'quanter_for_weight_a')]
def activation_quanters(self): ...
return ['activation_weight'] ... def activation_quanters(self):
... return ['activation_weight']
""" """
def __init__(self): def __init__(self):
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册