未验证 提交 f4019077 编写于 作者: L Li Fuchen 提交者: GitHub

modified sample code of CTCLoss & ctc_loss by remove disable_static() &...

modified sample code of CTCLoss & ctc_loss by remove disable_static() & print([.*].numpy()) & alias, test=document_fix (#28403)
上级 05114693
...@@ -1112,7 +1112,6 @@ def ctc_loss(log_probs, ...@@ -1112,7 +1112,6 @@ def ctc_loss(log_probs,
input_lengths = np.array([5, 5]).astype("int64") input_lengths = np.array([5, 5]).astype("int64")
label_lengths = np.array([3, 3]).astype("int64") label_lengths = np.array([3, 3]).astype("int64")
paddle.disable_static()
log_probs = paddle.to_tensor(log_probs) log_probs = paddle.to_tensor(log_probs)
labels = paddle.to_tensor(labels) labels = paddle.to_tensor(labels)
input_lengths = paddle.to_tensor(input_lengths) input_lengths = paddle.to_tensor(input_lengths)
...@@ -1123,14 +1122,14 @@ def ctc_loss(log_probs, ...@@ -1123,14 +1122,14 @@ def ctc_loss(log_probs,
label_lengths, label_lengths,
blank=0, blank=0,
reduction='none') reduction='none')
print(loss.numpy()) #[3.9179852 2.9076521] print(loss) #[3.9179852 2.9076521]
loss = F.ctc_loss(log_probs, labels, loss = F.ctc_loss(log_probs, labels,
input_lengths, input_lengths,
label_lengths, label_lengths,
blank=0, blank=0,
reduction='mean') reduction='mean')
print(loss.numpy()) #[1.1376063] print(loss) #[1.1376063]
""" """
......
...@@ -883,8 +883,6 @@ class MarginRankingLoss(fluid.dygraph.Layer): ...@@ -883,8 +883,6 @@ class MarginRankingLoss(fluid.dygraph.Layer):
class CTCLoss(fluid.dygraph.Layer): class CTCLoss(fluid.dygraph.Layer):
""" """
:alias_main: paddle.nn.CTCLoss
:alias: paddle.nn.CTCLoss, paddle.nn.layer.CTCLoss, paddle.nn.layer.loss.CTCLoss
An operator integrating the open source Warp-CTC library (https://github.com/baidu-research/warp-ctc) An operator integrating the open source Warp-CTC library (https://github.com/baidu-research/warp-ctc)
to compute Connectionist Temporal Classification (CTC) loss. to compute Connectionist Temporal Classification (CTC) loss.
...@@ -941,7 +939,6 @@ class CTCLoss(fluid.dygraph.Layer): ...@@ -941,7 +939,6 @@ class CTCLoss(fluid.dygraph.Layer):
input_lengths = np.array([5, 5]).astype("int64") input_lengths = np.array([5, 5]).astype("int64")
label_lengths = np.array([3, 3]).astype("int64") label_lengths = np.array([3, 3]).astype("int64")
paddle.disable_static()
log_probs = paddle.to_tensor(log_probs) log_probs = paddle.to_tensor(log_probs)
labels = paddle.to_tensor(labels) labels = paddle.to_tensor(labels)
input_lengths = paddle.to_tensor(input_lengths) input_lengths = paddle.to_tensor(input_lengths)
...@@ -950,12 +947,12 @@ class CTCLoss(fluid.dygraph.Layer): ...@@ -950,12 +947,12 @@ class CTCLoss(fluid.dygraph.Layer):
loss = paddle.nn.CTCLoss(blank=0, reduction='none')(log_probs, labels, loss = paddle.nn.CTCLoss(blank=0, reduction='none')(log_probs, labels,
input_lengths, input_lengths,
label_lengths) label_lengths)
print(loss.numpy()) #[3.9179852 2.9076521] print(loss) #[3.9179852 2.9076521]
loss = paddle.nn.CTCLoss(blank=0, reduction='mean')(log_probs, labels, loss = paddle.nn.CTCLoss(blank=0, reduction='mean')(log_probs, labels,
input_lengths, input_lengths,
label_lengths) label_lengths)
print(loss.numpy()) #[1.1376063] print(loss) #[1.1376063]
""" """
def __init__(self, blank=0, reduction='mean'): def __init__(self, blank=0, reduction='mean'):
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册