diff --git a/paddle/fluid/operators/matmul_op_xpu.cc b/paddle/fluid/operators/matmul_op_xpu.cc index 103ac9add18876ec078d765bfef3b3fbce3a68af..14bef89a71b8b4ea96f15b5bf4664456045ccb90 100644 --- a/paddle/fluid/operators/matmul_op_xpu.cc +++ b/paddle/fluid/operators/matmul_op_xpu.cc @@ -127,10 +127,18 @@ class MatMulXPUKernel : public framework::OpKernel { PADDLE_ENFORCE_EQ( mat_dim_a.width_, mat_dim_b.height_, - platform::errors::InvalidArgument("Shape mistake in matmul_op")); - PADDLE_ENFORCE_EQ( - mat_dim_a.batch_size_, mat_dim_b.batch_size_, - platform::errors::InvalidArgument("Shape mistake in matmul_op")); + platform::errors::InvalidArgument("Shape mistake in matmul_op, the " + "first tensor width must be same as " + "second tensor height, but received " + "width:%d, height:%d", + mat_dim_a.width_, mat_dim_b.height_)); + PADDLE_ENFORCE_EQ(mat_dim_a.batch_size_, mat_dim_b.batch_size_, + platform::errors::InvalidArgument( + "Shape mistake in matmul_op, the two input" + "tensor batch_size must be same, but received first " + "tensor batch_size:%d, second " + "tensor batch_size:%d", + mat_dim_a.batch_size_, mat_dim_b.batch_size_)); T alpha = static_cast(context.Attr("alpha")); auto &dev_ctx = context.template device_context(); @@ -251,12 +259,20 @@ class MatMulGradXPUKernel : public framework::OpKernel { } } - PADDLE_ENFORCE_EQ( - mat_dim_a.width_, mat_dim_b.height_, - platform::errors::InvalidArgument("Shape mistake in matmul_grad_op")); - PADDLE_ENFORCE_EQ( - mat_dim_a.batch_size_, mat_dim_b.batch_size_, - platform::errors::InvalidArgument("Shape mistake in matmul_grad_op")); + PADDLE_ENFORCE_EQ(mat_dim_a.width_, mat_dim_b.height_, + platform::errors::InvalidArgument( + "Shape mistake in matmul_grad_op, the " + "first tensor width must be same as second tensor " + "height, but received " + "width:%d, height:%d", + mat_dim_a.width_, mat_dim_b.height_)); + PADDLE_ENFORCE_EQ(mat_dim_a.batch_size_, mat_dim_b.batch_size_, + platform::errors::InvalidArgument( + "Shape mistake in matmul_grad_op, the two input" + "tensor batch_size must be same, but received first " + "tensor batch_size:%d, second " + "tensor batch_size:%d", + mat_dim_a.batch_size_, mat_dim_b.batch_size_)); T alpha = static_cast(context.Attr("alpha"));