diff --git a/paddle/fluid/operators/math/concat_test.cc b/paddle/fluid/operators/math/concat_test.cc index 411dbca25bb48c99dfd16779f54e46a3e80d0d4e..270a9d3f80a80d5ea2c8b97d4a69125355ddef61 100644 --- a/paddle/fluid/operators/math/concat_test.cc +++ b/paddle/fluid/operators/math/concat_test.cc @@ -79,8 +79,16 @@ void ConcatCase1(DeviceContext* context) { concat_functor(*context, input, 0, &out); // check the dim of input_a, input_b - PADDLE_ENFORCE_EQ(input_a.dims(), dim_a); - PADDLE_ENFORCE_EQ(input_b.dims(), dim_b); + PADDLE_ENFORCE_EQ(input_a.dims(), dim_a, + paddle::platform::errors::InvalidArgument( + "The dims of Input tensor should be the same as the " + "declared dims. Tensor dims: [%s], declared dims: [%s]", + input_a.dims(), dim_a)); + PADDLE_ENFORCE_EQ(input_b.dims(), dim_b, + paddle::platform::errors::InvalidArgument( + "The dims of Input tensor should be the same as the " + "declared dims. Tensor dims: [%s], declared dims: [%s]", + input_b.dims(), dim_b)); int* out_ptr = nullptr; if (paddle::platform::is_gpu_place(Place())) { @@ -95,10 +103,14 @@ void ConcatCase1(DeviceContext* context) { int idx_a = 0, idx_b = 0; for (int j = 0; j < 5 * 3 * 4; ++j) { if (j >= cols) { - PADDLE_ENFORCE_EQ(out_ptr[j], b_ptr[idx_b]); + PADDLE_ENFORCE_EQ(out_ptr[j], b_ptr[idx_b], + paddle::platform::errors::InvalidArgument( + "Concat test failed, the result should be equal.")); ++idx_b; } else { - PADDLE_ENFORCE_EQ(out_ptr[j], a_ptr[idx_a]); + PADDLE_ENFORCE_EQ(out_ptr[j], a_ptr[idx_a], + paddle::platform::errors::InvalidArgument( + "Concat test failed, the result should be equal.")); ++idx_a; } } @@ -166,8 +178,16 @@ void ConcatCase2(DeviceContext* context) { concat_functor(*context, input, 1, &out); // check the dim of input_a, input_b - PADDLE_ENFORCE_EQ(input_a.dims(), dim_a); - PADDLE_ENFORCE_EQ(input_b.dims(), dim_b); + PADDLE_ENFORCE_EQ(input_a.dims(), dim_a, + paddle::platform::errors::InvalidArgument( + "The dims of Input tensor should be the same as the " + "declared dims. Tensor dims: [%s], declared dims: [%s]", + input_a.dims(), dim_a)); + PADDLE_ENFORCE_EQ(input_b.dims(), dim_b, + paddle::platform::errors::InvalidArgument( + "The dims of Input tensor should be the same as the " + "declared dims. Tensor dims: [%s], declared dims: [%s]", + input_b.dims(), dim_b)); int* out_ptr = nullptr; if (paddle::platform::is_gpu_place(Place())) { @@ -183,10 +203,16 @@ void ConcatCase2(DeviceContext* context) { for (int i = 0; i < 2; ++i) { for (int j = 0; j < 28; ++j) { if (j >= cols) { - PADDLE_ENFORCE_EQ(out_ptr[i * 28 + j], b_ptr[idx_b]); + PADDLE_ENFORCE_EQ( + out_ptr[i * 28 + j], b_ptr[idx_b], + paddle::platform::errors::InvalidArgument( + "Concat test failed, the result should be equal.")); ++idx_b; } else { - PADDLE_ENFORCE_EQ(out_ptr[i * 28 + j], a_ptr[idx_a]); + PADDLE_ENFORCE_EQ( + out_ptr[i * 28 + j], a_ptr[idx_a], + paddle::platform::errors::InvalidArgument( + "Concat test failed, the result should be equal.")); ++idx_a; } } @@ -255,8 +281,16 @@ void ConcatCase3(DeviceContext* context) { concat_functor(*context, input, 2, &out); // check the dim of input_a, input_b - PADDLE_ENFORCE_EQ(input_a.dims(), dim_a); - PADDLE_ENFORCE_EQ(input_b.dims(), dim_b); + PADDLE_ENFORCE_EQ(input_a.dims(), dim_a, + paddle::platform::errors::InvalidArgument( + "The dims of Input tensor should be the same as the " + "declared dims. Tensor dims: [%s], declared dims: [%s]", + input_a.dims(), dim_a)); + PADDLE_ENFORCE_EQ(input_b.dims(), dim_b, + paddle::platform::errors::InvalidArgument( + "The dims of Input tensor should be the same as the " + "declared dims. Tensor dims: [%s], declared dims: [%s]", + input_b.dims(), dim_b)); int* out_ptr = nullptr; if (paddle::platform::is_gpu_place(Place())) { @@ -273,10 +307,16 @@ void ConcatCase3(DeviceContext* context) { for (int i = 0; i < 6; ++i) { for (int j = 0; j < 9; ++j) { if (j >= cols) { - PADDLE_ENFORCE_EQ(out_ptr[i * 9 + j], b_ptr[idx_b]); + PADDLE_ENFORCE_EQ( + out_ptr[i * 9 + j], b_ptr[idx_b], + paddle::platform::errors::InvalidArgument( + "Concat test failed, the result should be equal.")); ++idx_b; } else { - PADDLE_ENFORCE_EQ(out_ptr[i * 9 + j], a_ptr[idx_a]); + PADDLE_ENFORCE_EQ( + out_ptr[i * 9 + j], a_ptr[idx_a], + paddle::platform::errors::InvalidArgument( + "Concat test failed, the result should be equal.")); ++idx_a; } } @@ -347,8 +387,16 @@ void ConcatCase4(DeviceContext* context) { context->Wait(); // check the dim of input_a, input_b - PADDLE_ENFORCE_EQ(input_a.dims(), dim_a); - PADDLE_ENFORCE_EQ(input_b.dims(), dim_b); + PADDLE_ENFORCE_EQ(input_a.dims(), dim_a, + paddle::platform::errors::InvalidArgument( + "The dims of Input tensor should be the same as the " + "declared dims. Tensor dims: [%s], declared dims: [%s]", + input_a.dims(), dim_a)); + PADDLE_ENFORCE_EQ(input_b.dims(), dim_b, + paddle::platform::errors::InvalidArgument( + "The dims of Input tensor should be the same as the " + "declared dims. Tensor dims: [%s], declared dims: [%s]", + input_b.dims(), dim_b)); int* out_ptr = nullptr; if (paddle::platform::is_gpu_place(Place())) { @@ -365,10 +413,16 @@ void ConcatCase4(DeviceContext* context) { for (int i = 0; i < 2; ++i) { for (int j = 0; j < 24; ++j) { if (j >= cols) { - PADDLE_ENFORCE_EQ(out_ptr[i * 24 + j], b_ptr[idx_b]); + PADDLE_ENFORCE_EQ( + out_ptr[i * 24 + j], b_ptr[idx_b], + paddle::platform::errors::InvalidArgument( + "Concat test failed, the result should be equal.")); ++idx_b; } else { - PADDLE_ENFORCE_EQ(out_ptr[i * 24 + j], a_ptr[idx_a]); + PADDLE_ENFORCE_EQ( + out_ptr[i * 24 + j], a_ptr[idx_a], + paddle::platform::errors::InvalidArgument( + "Concat test failed, the result should be equal.")); ++idx_a; } } diff --git a/paddle/fluid/operators/math/context_project.h b/paddle/fluid/operators/math/context_project.h index e9019c6d2fe6890ee92cb5a3b047666e3c2a7e04..051c6019d74f7d2820dc0ba668da3cafe8864346 100644 --- a/paddle/fluid/operators/math/context_project.h +++ b/paddle/fluid/operators/math/context_project.h @@ -134,7 +134,10 @@ class ContextProjectFunctor { } } if (padding_trainable) { - PADDLE_ENFORCE_NOT_NULL(padding_data); + PADDLE_ENFORCE_NOT_NULL( + padding_data, + platform::errors::InvalidArgument( + "The input tensor 'padding_data' should not be NULL.")); for (int i = 0; i < static_cast(lod_level_0.size()) - 1; ++i) { if (lod_level_0[i] == lod_level_0[i + 1]) continue; diff --git a/paddle/fluid/operators/math/cpu_vec.h b/paddle/fluid/operators/math/cpu_vec.h index 8940a41424b01c975f1264ca309cc09fc3c7ae85..925f3b6161ae8506107f917196e77ecb2d9c5593 100644 --- a/paddle/fluid/operators/math/cpu_vec.h +++ b/paddle/fluid/operators/math/cpu_vec.h @@ -621,7 +621,10 @@ class VecActivations { } else if (type == "identity" || type == "") { return vec_identity; } - PADDLE_THROW("Not support type: %s", type); + PADDLE_THROW(platform::errors::InvalidArgument( + "Expected type should be one of sigmod, relu, tanh, identity. But got " + "not support type: %s.", + type)); } }; diff --git a/paddle/fluid/operators/math/cross_entropy.cu b/paddle/fluid/operators/math/cross_entropy.cu index c7fac60dd3e663088813f795352e4d751059de39..84fa0d6af990e22083ec1a0e3993893cefad1ab5 100644 --- a/paddle/fluid/operators/math/cross_entropy.cu +++ b/paddle/fluid/operators/math/cross_entropy.cu @@ -27,8 +27,8 @@ __global__ void CrossEntropyKernel(T* Y, const T* X, const int64_t* label, const int ignore_index) { CUDA_KERNEL_LOOP(i, N) { PADDLE_ENFORCE(label[i] >= 0 && label[i] < D || label[i] == ignore_index, - "label[%d] expected >= 0 and < %ld, or == %ld, but got " - "%ld. Please check input value.", + "The value of label[%d] expected >= 0 and < %ld, or == %ld, " + "but got %ld. Please check input value.", i, D, ignore_index, label[i]); Y[i] = ignore_index == label[i] ? static_cast(0) diff --git a/paddle/fluid/operators/math/im2col.cc b/paddle/fluid/operators/math/im2col.cc index 094a7237826610af574061263e5b0df5eafdf239..6fb393d791cc2a077dbcd0a912bcf31b5d59ad65 100644 --- a/paddle/fluid/operators/math/im2col.cc +++ b/paddle/fluid/operators/math/im2col.cc @@ -34,9 +34,16 @@ class Im2ColFunctor& stride, const std::vector& padding, framework::Tensor* col, const DataLayout data_layout) { - PADDLE_ENFORCE_EQ(im.dims().size(), 3, "The dimension of im should be 3."); + PADDLE_ENFORCE_EQ(im.dims().size(), 3, + platform::errors::InvalidArgument( + "The dimension of tensor 'im' should be 3. But got " + "the dims of tensor 'im' is [%s].", + im.dims())); PADDLE_ENFORCE_EQ(col->dims().size(), 5, - "The dimension of col should be 5."); + platform::errors::InvalidArgument( + "The dimension of tensor 'col' should be 5. But got " + "the dims of tensor 'col' is [%s].", + col->dims())); if (stride[0] == 1 && stride[1] == 1 && dilation[0] == 1 && dilation[1] == 1) { @@ -70,9 +77,16 @@ class Col2ImFunctor& stride, const std::vector& padding, framework::Tensor* im, const DataLayout data_layout) { - PADDLE_ENFORCE_EQ(im->dims().size(), 3, "The dimension of im should be 3."); + PADDLE_ENFORCE_EQ(im->dims().size(), 3, + platform::errors::InvalidArgument( + "The dimension of tensor 'im' should be 3. But got " + "the dims of tensor 'im' is [%s].", + im->dims())); PADDLE_ENFORCE_EQ(col.dims().size(), 5, - "The dimension of col should be 5."); + platform::errors::InvalidArgument( + "The dimension of tensor 'col' should be 5. But got " + "the dims of tensor 'col' is [%s].", + col.dims())); int im_channels = (data_layout != DataLayout::kNHWC ? im->dims()[0] : im->dims()[2]); int im_height = @@ -88,16 +102,16 @@ class Col2ImFunctor& stride, const std::vector& padding, framework::Tensor* col, const DataLayout data_layout) { - PADDLE_ENFORCE_EQ(im.dims().size(), 3, "The dimension of im should be 3."); + PADDLE_ENFORCE_EQ(im.dims().size(), 3, + platform::errors::InvalidArgument( + "The dimension of tensor 'im' should be 3. But got " + "the dims of tensor 'im' is [%s].", + im.dims())); PADDLE_ENFORCE_EQ(col->dims().size(), 5, - "The dimension of col should be 5."); + platform::errors::InvalidArgument( + "The dimension of tensor 'col' should be 5. But got " + "the dims of tensor 'col' is [%s].", + col->dims())); int im_channels = im.dims()[0]; int im_height = im.dims()[1]; int im_width = im.dims()[2]; @@ -218,9 +239,16 @@ class Col2ImFunctor& stride, const std::vector& padding, framework::Tensor* im, const DataLayout data_layout) { - PADDLE_ENFORCE_EQ(im->dims().size(), 3, "The dimension of im should be 3."); + PADDLE_ENFORCE_EQ(im->dims().size(), 3, + platform::errors::InvalidArgument( + "The dimension of tensor 'im' should be 3. But got " + "the dims of tensor 'im' is [%s].", + im->dims())); PADDLE_ENFORCE_EQ(col.dims().size(), 5, - "The dimension of col should be 5."); + platform::errors::InvalidArgument( + "The dimension of tensor 'col' should be 5. But got " + "the dims of tensor 'col' is [%s].", + col.dims())); int im_channels = im->dims()[0]; int im_height = im->dims()[1]; int im_width = im->dims()[2]; @@ -231,14 +259,14 @@ class Col2ImFunctordata(); const T* col_data = col.data(); diff --git a/paddle/fluid/operators/math/im2col.cu b/paddle/fluid/operators/math/im2col.cu index 97719300daed9c02a716f31d853e3a381312961c..f2a2148ba6954f50cf59ae30f4f4be6aa070739f 100644 --- a/paddle/fluid/operators/math/im2col.cu +++ b/paddle/fluid/operators/math/im2col.cu @@ -81,9 +81,16 @@ class Im2ColFunctor& stride, const std::vector& padding, framework::Tensor* col, const DataLayout data_layout) { - PADDLE_ENFORCE_EQ(im.dims().size(), 3, "The dimension of im should be 3."); + PADDLE_ENFORCE_EQ(im.dims().size(), 3, + platform::errors::InvalidArgument( + "The dimension of tensor 'im' should be 3. But got " + "the dims of tensor 'im' is [%s].", + im.dims())); PADDLE_ENFORCE_EQ(col->dims().size(), 5, - "The dimension of col should be 5."); + platform::errors::InvalidArgument( + "The dimension of tensor 'col' should be 5. But got " + "the dims of tensor 'col' is [%s].", + col->dims())); int im_channels = (data_layout != DataLayout::kNHWC ? im.dims()[0] : im.dims()[2]); @@ -182,9 +189,16 @@ class Col2ImFunctor& stride, const std::vector& padding, framework::Tensor* im, const DataLayout data_layout) { - PADDLE_ENFORCE_EQ(im->dims().size(), 3, "The dimension of im should be 3."); + PADDLE_ENFORCE_EQ(im->dims().size(), 3, + platform::errors::InvalidArgument( + "The dimension of tensor 'im' should be 3. But got " + "the dims of tensor 'im' is [%s].", + im->dims())); PADDLE_ENFORCE_EQ(col.dims().size(), 5, - "The dimension of col should be 5."); + platform::errors::InvalidArgument( + "The dimension of tensor 'col' should be 5. But got " + "the dims of tensor 'col' is [%s].", + col.dims())); int im_channels = (data_layout != DataLayout::kNHWC ? im->dims()[0] : im->dims()[2]); @@ -201,16 +215,16 @@ class Col2ImFunctor& stride, const std::vector& padding, framework::Tensor* col, const DataLayout data_layout) { - PADDLE_ENFORCE_EQ(im.dims().size(), 3, "The dimension of im should be 3."); + PADDLE_ENFORCE_EQ(im.dims().size(), 3, + platform::errors::InvalidArgument( + "The dimension of tensor 'im' should be 3. But got " + "the dims of tensor 'im' is [%s].", + im.dims())); PADDLE_ENFORCE_EQ(col->dims().size(), 5, - "The dimension of col should be 5."); + platform::errors::InvalidArgument( + "The dimension of tensor 'col' should be 5. But got " + "the dims of tensor 'col' is [%s].", + col->dims())); int im_channels = im.dims()[0]; int im_height = im.dims()[1]; @@ -370,9 +391,16 @@ class Col2ImFunctor& stride, const std::vector& padding, framework::Tensor* im, const DataLayout data_layout) { - PADDLE_ENFORCE_EQ(im->dims().size(), 3, "The dimension of im should be 3."); + PADDLE_ENFORCE_EQ(im->dims().size(), 3, + platform::errors::InvalidArgument( + "The dimension of tensor 'im' should be 3. But got " + "the dims of tensor 'im' is [%s].", + im->dims())); PADDLE_ENFORCE_EQ(col.dims().size(), 5, - "The dimension of col should be 5."); + platform::errors::InvalidArgument( + "The dimension of tensor 'col' should be 5. But got " + "the dims of tensor 'col' is [%s].", + col.dims())); int im_channels = im->dims()[0]; int im_height = im->dims()[1]; @@ -386,16 +414,16 @@ class Col2ImFunctor