提交 8f0a11c5 编写于 作者: C chenguowei01

update cross_entropy_loss.py

上级 4c9ad90f
......@@ -107,34 +107,19 @@ class CrossEntropyLoss(nn.Layer):
if len(label.shape) != len(logit.shape):
label = paddle.unsqueeze(label, 1)
# logit = paddle.transpose(logit, [0, 2, 3, 1])
# label = paddle.transpose(label, [0, 2, 3, 1])
# loss = F.softmax_with_cross_entropy(
# logit, label, ignore_index=self.ignore_index, axis=-1)
# loss = paddle.reduce_mean(loss)
# mask = label != self.ignore_index
# mask = paddle.cast(mask, 'float32')
# avg_loss = loss / (paddle.mean(mask) + self.EPS)
# label.stop_gradient = True
# mask.stop_gradient = True
# return avg_loss
logit = fluid.layers.transpose(logit, [0, 2, 3, 1])
label = fluid.layers.transpose(label, [0, 2, 3, 1])
mask = label != self.ignore_index
mask = fluid.layers.cast(mask, 'float32')
loss, probs = fluid.layers.softmax_with_cross_entropy(
logit,
label,
ignore_index=self.ignore_index,
return_softmax=True,
axis=-1)
loss = fluid.layers.softmax_with_cross_entropy(
logit, label, ignore_index=self.ignore_index, axis=-1)
# if has not this operation, loss can not converge when label has ignore_index.
loss = loss * mask
avg_loss = fluid.layers.mean(loss) / (
fluid.layers.mean(mask) + self.EPS)
loss = fluid.layers.mean(loss)
avg_loss = loss / (fluid.layers.mean(mask) + self.EPS)
label.stop_gradient = True
mask.stop_gradient = True
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册