提交 527d2e24 编写于 作者: S smallv0221

change api back

上级 34e4d191
...@@ -77,8 +77,8 @@ class CrossEntropyLossForLm(nn.Layer): ...@@ -77,8 +77,8 @@ class CrossEntropyLossForLm(nn.Layer):
def forward(self, y, label): def forward(self, y, label):
label = paddle.unsqueeze(label, axis=2) label = paddle.unsqueeze(label, axis=2)
loss = paddle.nn.functional.cross_entropy( loss = paddle.nn.functional.softmax_with_cross_entropy(
input=y, label=label, reduction='none') logits=y, label=label, soft_label=False)
loss = paddle.squeeze(loss, axis=[2]) loss = paddle.squeeze(loss, axis=[2])
loss = paddle.mean(loss, axis=[0]) loss = paddle.mean(loss, axis=[0])
loss = paddle.sum(loss) loss = paddle.sum(loss)
......
...@@ -54,10 +54,12 @@ class CrossEntropyLossForSQuAD(paddle.nn.Layer): ...@@ -54,10 +54,12 @@ class CrossEntropyLossForSQuAD(paddle.nn.Layer):
start_position, end_position = label start_position, end_position = label
start_position = paddle.unsqueeze(start_position, axis=-1) start_position = paddle.unsqueeze(start_position, axis=-1)
end_position = paddle.unsqueeze(end_position, axis=-1) end_position = paddle.unsqueeze(end_position, axis=-1)
start_loss = paddle.nn.functional.cross_entropy( start_loss = paddle.nn.functional.softmax_with_cross_entropy(
input=start_logits, label=start_position) logits=start_logits, label=start_position, soft_label=False)
end_loss = paddle.nn.functional.cross_entropy( start_loss = paddle.mean(start_loss)
input=end_logits, label=end_position) end_loss = paddle.nn.functional.softmax_with_cross_entropy(
logits=end_logits, label=end_position, soft_label=False)
end_loss = paddle.mean(end_loss)
loss = (start_loss + end_loss) / 2 loss = (start_loss + end_loss) / 2
return loss return loss
......
...@@ -51,10 +51,12 @@ class CrossEntropyLossForSQuAD(paddle.nn.Layer): ...@@ -51,10 +51,12 @@ class CrossEntropyLossForSQuAD(paddle.nn.Layer):
start_position, end_position = label start_position, end_position = label
start_position = paddle.unsqueeze(start_position, axis=-1) start_position = paddle.unsqueeze(start_position, axis=-1)
end_position = paddle.unsqueeze(end_position, axis=-1) end_position = paddle.unsqueeze(end_position, axis=-1)
start_loss = paddle.nn.functional.cross_entropy( start_loss = paddle.nn.functional.softmax_with_cross_entropy(
input=start_logits, label=start_position) logits=start_logits, label=start_position, soft_label=False)
end_loss = paddle.nn.functional.cross_entropy( start_loss = paddle.mean(start_loss)
input=end_logits, label=end_position) end_loss = paddle.nn.functional.softmax_with_cross_entropy(
logits=end_logits, label=end_position, soft_label=False)
end_loss = paddle.mean(end_loss)
loss = (start_loss + end_loss) / 2 loss = (start_loss + end_loss) / 2
return loss return loss
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册