From 527d2e24fc00c5a4a24c387754662e62235a44ab Mon Sep 17 00:00:00 2001 From: smallv0221 <397551318@qq.com> Date: Fri, 18 Dec 2020 09:23:21 +0000 Subject: [PATCH] change api back --- PaddleNLP/examples/language_model/rnnlm/model.py | 4 ++-- .../DuReader-robust/run_du.py | 10 ++++++---- .../machine_reading_comprehension/SQuAD/run_squad.py | 10 ++++++---- 3 files changed, 14 insertions(+), 10 deletions(-) diff --git a/PaddleNLP/examples/language_model/rnnlm/model.py b/PaddleNLP/examples/language_model/rnnlm/model.py index 35366cf1..4cffbcbb 100644 --- a/PaddleNLP/examples/language_model/rnnlm/model.py +++ b/PaddleNLP/examples/language_model/rnnlm/model.py @@ -77,8 +77,8 @@ class CrossEntropyLossForLm(nn.Layer): def forward(self, y, label): label = paddle.unsqueeze(label, axis=2) - loss = paddle.nn.functional.cross_entropy( - input=y, label=label, reduction='none') + loss = paddle.nn.functional.softmax_with_cross_entropy( + logits=y, label=label, soft_label=False) loss = paddle.squeeze(loss, axis=[2]) loss = paddle.mean(loss, axis=[0]) loss = paddle.sum(loss) diff --git a/PaddleNLP/examples/machine_reading_comprehension/DuReader-robust/run_du.py b/PaddleNLP/examples/machine_reading_comprehension/DuReader-robust/run_du.py index 6cb0b83d..feaed689 100644 --- a/PaddleNLP/examples/machine_reading_comprehension/DuReader-robust/run_du.py +++ b/PaddleNLP/examples/machine_reading_comprehension/DuReader-robust/run_du.py @@ -54,10 +54,12 @@ class CrossEntropyLossForSQuAD(paddle.nn.Layer): start_position, end_position = label start_position = paddle.unsqueeze(start_position, axis=-1) end_position = paddle.unsqueeze(end_position, axis=-1) - start_loss = paddle.nn.functional.cross_entropy( - input=start_logits, label=start_position) - end_loss = paddle.nn.functional.cross_entropy( - input=end_logits, label=end_position) + start_loss = paddle.nn.functional.softmax_with_cross_entropy( + logits=start_logits, label=start_position, soft_label=False) + start_loss = paddle.mean(start_loss) + end_loss = paddle.nn.functional.softmax_with_cross_entropy( + logits=end_logits, label=end_position, soft_label=False) + end_loss = paddle.mean(end_loss) loss = (start_loss + end_loss) / 2 return loss diff --git a/PaddleNLP/examples/machine_reading_comprehension/SQuAD/run_squad.py b/PaddleNLP/examples/machine_reading_comprehension/SQuAD/run_squad.py index e73087e9..04a11573 100644 --- a/PaddleNLP/examples/machine_reading_comprehension/SQuAD/run_squad.py +++ b/PaddleNLP/examples/machine_reading_comprehension/SQuAD/run_squad.py @@ -51,10 +51,12 @@ class CrossEntropyLossForSQuAD(paddle.nn.Layer): start_position, end_position = label start_position = paddle.unsqueeze(start_position, axis=-1) end_position = paddle.unsqueeze(end_position, axis=-1) - start_loss = paddle.nn.functional.cross_entropy( - input=start_logits, label=start_position) - end_loss = paddle.nn.functional.cross_entropy( - input=end_logits, label=end_position) + start_loss = paddle.nn.functional.softmax_with_cross_entropy( + logits=start_logits, label=start_position, soft_label=False) + start_loss = paddle.mean(start_loss) + end_loss = paddle.nn.functional.softmax_with_cross_entropy( + logits=end_logits, label=end_position, soft_label=False) + end_loss = paddle.mean(end_loss) loss = (start_loss + end_loss) / 2 return loss -- GitLab