diff --git a/paddle/fluid/operators/softmax_with_cross_entropy_op.h b/paddle/fluid/operators/softmax_with_cross_entropy_op.h index b9eaa9bece8a234d13acf8072e650caae4636ff5..e513b99b876240a6ce2054c1eb9dcb4c922524f6 100644 --- a/paddle/fluid/operators/softmax_with_cross_entropy_op.h +++ b/paddle/fluid/operators/softmax_with_cross_entropy_op.h @@ -61,7 +61,7 @@ class SoftmaxWithCrossEntropyKernel : public framework::OpKernel { PADDLE_ENFORCE_GT( n, 0, platform::errors::InvalidArgument( "The size of axis should be larger than 0, but received " - "axis size is %d.", + "SizeToAxis of softmax is %d.", n)); const int d = SizeFromAxis(axis, softmax->dims()); @@ -110,7 +110,7 @@ class SoftmaxWithCrossEntropyKernel : public framework::OpKernel { PADDLE_ENFORCE_GT( n, 0, platform::errors::InvalidArgument( "The size of axis should be larger than 0, but received " - "axis size is %d.", + "SizeToAxis of logits is %d.", n)); const int d = SizeFromAxis(axis, logits->dims()); @@ -162,7 +162,7 @@ class SoftmaxWithCrossEntropyGradKernel : public framework::OpKernel { PADDLE_ENFORCE_GT( n, 0, platform::errors::InvalidArgument( "The size of axis should be larger than 0, but received " - "axis size is %d.", + "SizeToAxis of logit_grad is %d.", n)); const int d = SizeFromAxis(axis, logit_grad->dims()); diff --git a/python/paddle/nn/functional/loss.py b/python/paddle/nn/functional/loss.py index fdbc5b9c47669a28405c0e12485ab71a320a6579..6fcd60ad1135b8617fc9be94f1cbe738c230de73 100755 --- a/python/paddle/nn/functional/loss.py +++ b/python/paddle/nn/functional/loss.py @@ -1644,6 +1644,9 @@ def cross_entropy(input, ignore_index) input_dims = len(list(input.shape)) + if input_dims == 0: + raise ValueError('The dimention of input should be larger than zero!') + label_dims = len(list(label.shape)) if input_dims - 1 != label_dims and input_dims != label_dims: raise ValueError( @@ -2016,3 +2019,20 @@ def sigmoid_focal_loss(logit, loss = paddle.sum(loss, name=name) return loss + + +if __name__ == "__main__": + input_arr = np.array([], dtype=np.float32) + input = paddle.to_tensor(np.reshape(input_arr, (0, 0)), dtype='float32') + + label = paddle.to_tensor([], dtype='float32') + + weight = paddle.to_tensor([], dtype='float32') + + result = cross_entropy( + input, + label, + weight=weight, + ignore_index=-100, + soft_label=False, + axis=-1)