From b6a4349dd40eee17e485e149e09af4b29caa3d66 Mon Sep 17 00:00:00 2001 From: wawltor Date: Fri, 18 Sep 2020 10:14:47 +0800 Subject: [PATCH] fix the error message for the math dir https://github.com/PaddlePaddle/Paddle/pull/27332 --- paddle/fluid/operators/math/beam_search.cc | 5 +- paddle/fluid/operators/math/beam_search.cu | 5 +- paddle/fluid/operators/math/blas.cc | 6 +- paddle/fluid/operators/math/blas_impl.cu.h | 26 ++-- paddle/fluid/operators/math/blas_impl.h | 148 ++++++++++++++++----- 5 files changed, 146 insertions(+), 44 deletions(-) diff --git a/paddle/fluid/operators/math/beam_search.cc b/paddle/fluid/operators/math/beam_search.cc index 0155ef188ef..550de1aadde 100644 --- a/paddle/fluid/operators/math/beam_search.cc +++ b/paddle/fluid/operators/math/beam_search.cc @@ -87,7 +87,10 @@ class BeamSearchFunctor { lod[0].assign(high_level.begin(), high_level.end()); lod[1].assign(low_level.begin(), low_level.end()); if (!framework::CheckLoD(lod)) { - PADDLE_THROW("lod %s is not right", framework::LoDToString(lod)); + PADDLE_THROW(platform::errors::InvalidArgument( + "lod %s is not right in" + " beam_search, please check your code.", + framework::LoDToString(lod))); } selected_ids->set_lod(lod); selected_scores->set_lod(lod); diff --git a/paddle/fluid/operators/math/beam_search.cu b/paddle/fluid/operators/math/beam_search.cu index cf6d44c1abc..ed3ead47d17 100644 --- a/paddle/fluid/operators/math/beam_search.cu +++ b/paddle/fluid/operators/math/beam_search.cu @@ -400,7 +400,10 @@ class BeamSearchFunctor { context.Wait(); if (!framework::CheckLoD(selected_lod)) { - PADDLE_THROW("lod %s is not right", framework::LoDToString(selected_lod)); + PADDLE_THROW(platform::errors::InvalidArgument( + "lod %s is not right in" + " beam_search, please check your code.", + framework::LoDToString(selected_lod))); } selected_ids->set_lod(selected_lod); diff --git a/paddle/fluid/operators/math/blas.cc b/paddle/fluid/operators/math/blas.cc index 6a143b3c056..2a7ce83967f 100644 --- a/paddle/fluid/operators/math/blas.cc +++ b/paddle/fluid/operators/math/blas.cc @@ -20,7 +20,11 @@ namespace operators { namespace math { MatDescriptor CreateMatrixDescriptor(const framework::DDim &tensor_dim, int num_flatten_cols, bool trans) { - PADDLE_ENFORCE_GT(tensor_dim.size(), 1); + PADDLE_ENFORCE_GT( + tensor_dim.size(), 1, + platform::errors::InvalidArgument("The tensor dim size should be greater " + "than 1, but reveived dim size is %d", + tensor_dim.size())); MatDescriptor retv; if (num_flatten_cols > 1) { auto flatten_dim = framework::flatten_to_2d(tensor_dim, num_flatten_cols); diff --git a/paddle/fluid/operators/math/blas_impl.cu.h b/paddle/fluid/operators/math/blas_impl.cu.h index d0c5f74d4ef..a0464cf70e2 100644 --- a/paddle/fluid/operators/math/blas_impl.cu.h +++ b/paddle/fluid/operators/math/blas_impl.cu.h @@ -60,7 +60,8 @@ struct CUBlas { PADDLE_ENFORCE_CUDA_SUCCESS( platform::dynload::cublasSgemmStridedBatched(args...)); #else - PADDLE_THROW("SgemmStridedBatched is not supported on cuda <= 7.5"); + PADDLE_THROW(platform::errors::Unimplemented( + "SgemmStridedBatched is not supported on cuda <= 7.5")); #endif } @@ -85,7 +86,8 @@ struct CUBlas { beta, C, Ctype, ldc)); }); #else - PADDLE_THROW("cublasSgemmEx is supported on cuda >= 8.0"); + PADDLE_THROW(platform::errors::Unimplemented( + "cublasSgemmEx is not supported on cuda <= 7.5")); #endif } @@ -146,13 +148,15 @@ struct CUBlas { PADDLE_ENFORCE_CUDA_SUCCESS( platform::dynload::cublasDgemmStridedBatched(args...)); #else - PADDLE_THROW("DgemmStridedBatched is not supported on cuda <= 7.5"); + PADDLE_THROW(platform::errors::Unimplemented( + "DgemmStridedBatched is not supported on cuda <= 7.5")); #endif } template static void GEMM_EX(ARGS... args) { - PADDLE_THROW("Currently there are not cublasDgemmEx."); + PADDLE_THROW(platform::errors::Unimplemented( + "Currently there are not cublasDgemmEx.")); } template @@ -216,7 +220,8 @@ struct CUBlas { reinterpret_cast(beta), reinterpret_cast<__half *>(C), ldc, strideC, batchCount)); #else - PADDLE_THROW("HgemmStridedBatched is not supported on cuda <= 7.5"); + PADDLE_THROW(platform::errors::Unimplemented( + "HgemmStridedBatched is not supported on cuda <= 7.5")); #endif } @@ -247,7 +252,8 @@ struct CUBlas { beta, C, Ctype, ldc, computeType, algo)); }); #else - PADDLE_THROW("cublasGemmEx is supported on cuda >= 8.0"); + PADDLE_THROW(platform::errors::Unimplemented( + "cublasGemmEx is not supported on cuda <= 7.5")); #endif } }; @@ -302,8 +308,12 @@ inline void Blas::GEMM( (transB == CblasNoTrans) ? CUBLAS_OP_N : CUBLAS_OP_T; // TODO(kexinzhao): add processing code for compute capability < 53 case - PADDLE_ENFORCE_GE(context_.GetComputeCapability(), 53, - "cublas fp16 gemm requires GPU compute capability >= 53"); + PADDLE_ENFORCE_GE( + context_.GetComputeCapability(), 53, + platform::errors::InvalidArgument( + "cublas fp16 gemm requires GPU compute capability >= 53," + "but received %d", + context_.GetComputeCapability())); float h_alpha = static_cast(alpha); float h_beta = static_cast(beta); diff --git a/paddle/fluid/operators/math/blas_impl.h b/paddle/fluid/operators/math/blas_impl.h index 892bf157381..515d6a2435e 100644 --- a/paddle/fluid/operators/math/blas_impl.h +++ b/paddle/fluid/operators/math/blas_impl.h @@ -29,7 +29,8 @@ template <> struct CBlas { template static void VCOPY(ARGS... args) { - PADDLE_THROW("Blas VCOPY don't support int8_t"); + PADDLE_THROW(platform::errors::Unimplemented( + "Blas VCOPY do not supported on CPU, please check your code")); } }; @@ -347,22 +348,47 @@ struct CBlas { template <> struct CBlas { - static void GEMM(...) { PADDLE_THROW("float16 GEMM not supported on CPU"); } + static void GEMM(...) { + PADDLE_THROW(platform::errors::Unimplemented( + "float16 GEMM not supported on CPU, please check your code")); + } + static void SMM_GEMM(...) { - PADDLE_THROW("float16 SMM_GEMM not supported on CPU"); + PADDLE_THROW(platform::errors::Unimplemented( + "float16 SMM_GEMM not supported on CPU, please check your code")); } - static void VMUL(...) { PADDLE_THROW("float16 VMUL not supported on CPU"); } - static void VEXP(...) { PADDLE_THROW("float16 VEXP not supported on CPU"); } - static void VSQUARE(...) { - PADDLE_THROW("float16 VSQUARE not supported on CPU"); + static void VMUL(...) { + PADDLE_THROW(platform::errors::Unimplemented( + "float16 VMUL not supported on CPU, please check your code")); } - static void VPOW(...) { PADDLE_THROW("float16 VPOW not supported on CPU"); } - static void DOT(...) { PADDLE_THROW("float16 DOT not supported on CPU"); }; - static void SCAL(...) { PADDLE_THROW("float16 SCAL not supported on CPU"); }; - static void ASUM(...) { PADDLE_THROW("float16 ASUM not supported on CPU"); }; + static void VEXP(...) { + PADDLE_THROW(platform::errors::Unimplemented( + "float16 VEXP not supported on CPU, please check your code")); + } + static void VSQUARE(...) { + PADDLE_THROW(platform::errors::Unimplemented( + "float16 VSQUARE not supported on CPU, please check your code")); + } + static void VPOW(...) { + PADDLE_THROW(platform::errors::Unimplemented( + "float16 VPOW not supported on CPU, please check your code")); + } + static void DOT(...) { + PADDLE_THROW(platform::errors::Unimplemented( + "float16 DOT not supported on CPU, please check your code")); + }; + static void SCAL(...) { + PADDLE_THROW(platform::errors::Unimplemented( + "float16 SCAL not supported on CPU, please check your code")); + }; + static void ASUM(...) { + PADDLE_THROW(platform::errors::Unimplemented( + "float16 ASUM not supported on CPU, please check your code")); + }; #ifdef PADDLE_WITH_MKLML static void GEMM_BATCH(...) { - PADDLE_THROW("float16 GEMM_BATCH not supported on CPU"); + PADDLE_THROW(platform::errors::Unimplemented( + "float16 GEMM_BATCH not supported on CPU, please check your code")); } #endif }; @@ -446,11 +472,18 @@ void Blas::MatMul(const framework::Tensor &mat_a, bool trans_a, auto dim_a = mat_a.dims(); auto dim_b = mat_b.dims(); auto dim_out = mat_out->dims(); - PADDLE_ENFORCE(dim_a.size() == 2 && dim_b.size() == 2 && dim_out.size() == 2, - "The input and output of matmul be matrix"); - PADDLE_ENFORCE( - mat_a.place() == mat_b.place() && mat_a.place() == mat_out->place(), - "The places of matrices must be same"); + PADDLE_ENFORCE_EQ( + dim_a.size() == 2 && dim_b.size() == 2 && dim_out.size() == 2, true, + platform::errors::InvalidArgument( + "The input and output of matmul should be matrix, the dim size must " + "be 2," + "but received dim size input_a:%d, input_b:%d, output:%d", + dim_a.size(), dim_b.size(), dim_out.size())); + PADDLE_ENFORCE_EQ( + mat_a.place() == mat_b.place() && mat_a.place() == mat_out->place(), true, + platform::errors::InvalidArgument("The places of matrices in the matmul " + "should be same, please check your " + "code.")); int M = dim_out[0]; int N = dim_out[1]; @@ -715,7 +748,13 @@ void Blas::BatchedGEMMWithHead( } } else { - PADDLE_ENFORCE_EQ(W1, H2); + PADDLE_ENFORCE_EQ( + W1, H2, + platform::errors::InvalidArgument( + "The fisrt matrix width should be same as second matrix height," + "but received fisrt matrix width %d" + ", second matrix height %d", + W1, H2)); int ldc = W2 * head_number; int sub_width = W1 / head_number; @@ -785,7 +824,14 @@ void Blas::MatMul(const framework::Tensor &mat_a, const framework::Tensor &mat_b, const MatDescriptor &dim_b, T alpha, framework::Tensor *mat_out, T beta) const { - PADDLE_ENFORCE_EQ(dim_a.width_, dim_b.height_); + PADDLE_ENFORCE_EQ( + dim_a.width_, dim_b.height_, + platform::errors::InvalidArgument( + "The fisrt matrix width should be same as second matrix height," + "but received fisrt matrix width %d" + ", second matrix height %d", + dim_a.width_, dim_b.height_)); + CBLAS_TRANSPOSE transA = !dim_a.trans_ ? CblasNoTrans : CblasTrans; CBLAS_TRANSPOSE transB = !dim_b.trans_ ? CblasNoTrans : CblasTrans; if (dim_a.batch_size_ == 0 && dim_b.batch_size_ == 0) { @@ -793,12 +839,14 @@ void Blas::MatMul(const framework::Tensor &mat_a, dim_a.width_, alpha, mat_a.data(), mat_b.data(), beta, mat_out->data()); } else { - PADDLE_ENFORCE(dim_a.batch_size_ == dim_b.batch_size_ || - dim_a.batch_size_ == 0 || dim_b.batch_size_ == 0, - "dim_a.batch_size should be equal to dim_b.batch_size, or " - "one of dim_a.batch_size and dim_b.batch_size should be 0. " - "But got dim_a.batch_size = %d, dim_b.batch_size = %d.", - dim_a.batch_size_, dim_b.batch_size_); + PADDLE_ENFORCE_EQ( + dim_a.batch_size_ == dim_b.batch_size_ || dim_a.batch_size_ == 0 || + dim_b.batch_size_ == 0, + true, platform::errors::InvalidArgument( + "dim_a.batch_size should be equal to dim_b.batch_size, or " + "one of dim_a.batch_size and dim_b.batch_size should be 0. " + "But got dim_a.batch_size = %d, dim_b.batch_size = %d.", + dim_a.batch_size_, dim_b.batch_size_)); this->template BatchedGEMM( transA, transB, dim_a.height_, dim_b.width_, dim_a.width_, alpha, mat_a.data(), mat_b.data(), beta, mat_out->data(), @@ -834,15 +882,42 @@ void Blas::MatMulWithHead(const framework::Tensor &mat_a, int head_number, framework::Tensor *mat_out, T beta, bool mat_b_split_vertical) const { - PADDLE_ENFORCE_EQ(dim_a.width_ % head_number, 0); - PADDLE_ENFORCE_GE(head_number, 1); - PADDLE_ENFORCE_LE(head_number, dim_a.width_); + PADDLE_ENFORCE_EQ( + dim_a.width_ % head_number, 0, + platform::errors::InvalidArgument( + "The first input width must be some times the head number" + "but received first input width %d" + ", head_number %d", + dim_a.width_, head_number)); + PADDLE_ENFORCE_GE(head_number, 1, + platform::errors::InvalidArgument( + "The head number should be greater equal 1," + "but received head number %d", + head_number)); + PADDLE_ENFORCE_LE( + head_number, dim_a.width_, + platform::errors::InvalidArgument( + "The head number should be less equal first input width," + "but received first input width %d" + ", head_number %d", + dim_a.width_, head_number)); CBLAS_TRANSPOSE transA = !dim_a.trans_ ? CblasNoTrans : CblasTrans; CBLAS_TRANSPOSE transB = !dim_b.trans_ ? CblasNoTrans : CblasTrans; if (mat_b_split_vertical) { - PADDLE_ENFORCE_EQ(dim_b.height_, dim_a.width_ / head_number); - PADDLE_ENFORCE_EQ(dim_b.width_ % head_number, 0); + PADDLE_ENFORCE_EQ( + dim_b.height_, dim_a.width_ / head_number, + platform::errors::InvalidArgument( + "The second input height should be equal than first input width," + "but received second input height %d, first input width %d", + dim_b.height_, dim_a.width_ / head_number)); + PADDLE_ENFORCE_EQ( + dim_a.width_ % head_number, 0, + platform::errors::InvalidArgument( + "The second input width should be some times the head number" + "but received second input width %d" + ", head_number %d", + dim_b.width_, head_number)); } if (dim_a.batch_size_ == 0 && dim_b.batch_size_ == 0) { @@ -888,9 +963,16 @@ void Blas::MatMulWithHead(const framework::Tensor &mat_a, mat_out->data() + sub_matC_offset, ldc); } } else { - PADDLE_ENFORCE_EQ((dim_a.batch_size_ == dim_b.batch_size_ || - dim_a.batch_size_ == 0 || dim_b.batch_size_ == 0), - true); + PADDLE_ENFORCE_EQ( + (dim_a.batch_size_ == dim_b.batch_size_ || dim_a.batch_size_ == 0 || + dim_b.batch_size_ == 0), + true, + platform::errors::InvalidArgument( + "The first input batch size should be equal than second input," + "either two input batch size is 0, but received first input batch " + "size" + " %d, second input batch size %d", + dim_a.batch_size_, dim_b.batch_size_)); this->template BatchedGEMMWithHead( transA, transB, dim_a.width_, dim_a.height_, dim_b.width_, -- GitLab