From fee424411a8a18fe66f8b1a4cb5db6964c717776 Mon Sep 17 00:00:00 2001 From: wawltor Date: Mon, 11 Jan 2021 10:30:30 +0800 Subject: [PATCH] just add the op error message for the matmul xpu (#30246) add the op error message for the matmul xpu --- paddle/fluid/operators/matmul_op_xpu.cc | 36 ++++++++++++++++++------- 1 file changed, 26 insertions(+), 10 deletions(-) diff --git a/paddle/fluid/operators/matmul_op_xpu.cc b/paddle/fluid/operators/matmul_op_xpu.cc index 103ac9add18..14bef89a71b 100644 --- a/paddle/fluid/operators/matmul_op_xpu.cc +++ b/paddle/fluid/operators/matmul_op_xpu.cc @@ -127,10 +127,18 @@ class MatMulXPUKernel : public framework::OpKernel { PADDLE_ENFORCE_EQ( mat_dim_a.width_, mat_dim_b.height_, - platform::errors::InvalidArgument("Shape mistake in matmul_op")); - PADDLE_ENFORCE_EQ( - mat_dim_a.batch_size_, mat_dim_b.batch_size_, - platform::errors::InvalidArgument("Shape mistake in matmul_op")); + platform::errors::InvalidArgument("Shape mistake in matmul_op, the " + "first tensor width must be same as " + "second tensor height, but received " + "width:%d, height:%d", + mat_dim_a.width_, mat_dim_b.height_)); + PADDLE_ENFORCE_EQ(mat_dim_a.batch_size_, mat_dim_b.batch_size_, + platform::errors::InvalidArgument( + "Shape mistake in matmul_op, the two input" + "tensor batch_size must be same, but received first " + "tensor batch_size:%d, second " + "tensor batch_size:%d", + mat_dim_a.batch_size_, mat_dim_b.batch_size_)); T alpha = static_cast(context.Attr("alpha")); auto &dev_ctx = context.template device_context(); @@ -251,12 +259,20 @@ class MatMulGradXPUKernel : public framework::OpKernel { } } - PADDLE_ENFORCE_EQ( - mat_dim_a.width_, mat_dim_b.height_, - platform::errors::InvalidArgument("Shape mistake in matmul_grad_op")); - PADDLE_ENFORCE_EQ( - mat_dim_a.batch_size_, mat_dim_b.batch_size_, - platform::errors::InvalidArgument("Shape mistake in matmul_grad_op")); + PADDLE_ENFORCE_EQ(mat_dim_a.width_, mat_dim_b.height_, + platform::errors::InvalidArgument( + "Shape mistake in matmul_grad_op, the " + "first tensor width must be same as second tensor " + "height, but received " + "width:%d, height:%d", + mat_dim_a.width_, mat_dim_b.height_)); + PADDLE_ENFORCE_EQ(mat_dim_a.batch_size_, mat_dim_b.batch_size_, + platform::errors::InvalidArgument( + "Shape mistake in matmul_grad_op, the two input" + "tensor batch_size must be same, but received first " + "tensor batch_size:%d, second " + "tensor batch_size:%d", + mat_dim_a.batch_size_, mat_dim_b.batch_size_)); T alpha = static_cast(context.Attr("alpha")); -- GitLab