未验证 提交 9d783aed 编写于 作者: D Double_V 提交者: GitHub

Error message opt, test=develop (#27467)

* Error message opt, test=develop

* solve comments, test=develop

* fix typo, test=develop
上级 d1c2a3bc
...@@ -30,8 +30,10 @@ __global__ void ComputeDifferent(T *centers_diff, const T *X, const T *centers, ...@@ -30,8 +30,10 @@ __global__ void ComputeDifferent(T *centers_diff, const T *X, const T *centers,
while (idy < K) { while (idy < K) {
int64_t id = ids[idy]; int64_t id = ids[idy];
PADDLE_ENFORCE(id >= 0, "received id:", id); PADDLE_ENFORCE(id >= 0, "Id should larger than 0 but received id: %d.", id);
PADDLE_ENFORCE(id < N, "received id:", id); PADDLE_ENFORCE(id < N, "Id should smaller than %d but received id: %d.", N,
id);
T *out = centers_diff + idy * D; T *out = centers_diff + idy * D;
const T *x = X + idy * D; const T *x = X + idy * D;
const T *cent = centers + id * D; const T *cent = centers + id * D;
...@@ -52,8 +54,9 @@ __global__ void UpdateCenters(T *centers, T *centers_diff, const int64_t *ids, ...@@ -52,8 +54,9 @@ __global__ void UpdateCenters(T *centers, T *centers_diff, const int64_t *ids,
while (idy < K) { while (idy < K) {
int count = 1; int count = 1;
int64_t id = ids[idy]; int64_t id = ids[idy];
PADDLE_ENFORCE(id >= 0, "received id:", id); PADDLE_ENFORCE(id >= 0, "Id should larger than 0 but received id: %d.", id);
PADDLE_ENFORCE(id < N, "received id:", id); PADDLE_ENFORCE(id < N, "Id should smaller than %d but received id: %d.", N,
id);
for (int i = 0; i < K; i++) { for (int i = 0; i < K; i++) {
if (ids[i] == id) { if (ids[i] == id) {
......
...@@ -69,8 +69,10 @@ template <typename T> ...@@ -69,8 +69,10 @@ template <typename T>
class CTCAlignOpCUDAKernel : public framework::OpKernel<T> { class CTCAlignOpCUDAKernel : public framework::OpKernel<T> {
public: public:
void Compute(const framework::ExecutionContext& ctx) const override { void Compute(const framework::ExecutionContext& ctx) const override {
PADDLE_ENFORCE(platform::is_gpu_place(ctx.GetPlace()), PADDLE_ENFORCE_EQ(platform::is_gpu_place(ctx.GetPlace()), true,
"It must use CUDAPlace."); platform::errors::InvalidArgument(
"CTCAlign operator CUDA kernel must use CUDAPlace "
"rather than CPUPlace."));
auto* input = ctx.Input<LoDTensor>("Input"); auto* input = ctx.Input<LoDTensor>("Input");
auto* output = ctx.Output<LoDTensor>("Output"); auto* output = ctx.Output<LoDTensor>("Output");
const int blank = ctx.Attr<int>("blank"); const int blank = ctx.Attr<int>("blank");
......
...@@ -72,8 +72,11 @@ class CTCAlignKernel : public framework::OpKernel<T> { ...@@ -72,8 +72,11 @@ class CTCAlignKernel : public framework::OpKernel<T> {
// check input dims and lod // check input dims and lod
PADDLE_ENFORCE_EQ( PADDLE_ENFORCE_EQ(
input_dims[0], static_cast<int64_t>(input_lod[level].back()), input_dims[0], static_cast<int64_t>(input_lod[level].back()),
"The first dimension of Input(Input) should be equal to " platform::errors::InvalidArgument(
"the sum of all sequences' lengths."); "The first dimension %d of CTCAlign operator Input(Input) should "
"be equal to "
"the sum of all sequences' lengths %d.",
input_dims[0], static_cast<int64_t>(input_lod[level].back())));
const size_t num_sequences = input_lod[level].size() - 1; const size_t num_sequences = input_lod[level].size() - 1;
......
...@@ -45,8 +45,10 @@ template <typename T> ...@@ -45,8 +45,10 @@ template <typename T>
class PoolCUDNNOpKernel : public framework::OpKernel<T> { class PoolCUDNNOpKernel : public framework::OpKernel<T> {
public: public:
void Compute(const framework::ExecutionContext &ctx) const override { void Compute(const framework::ExecutionContext &ctx) const override {
PADDLE_ENFORCE_EQ(platform::is_gpu_place(ctx.GetPlace()), true, PADDLE_ENFORCE_EQ(
"It must use CUDAPlace."); platform::is_gpu_place(ctx.GetPlace()), true,
platform::errors::InvalidArgument("Pool operator CUDA kernel must use "
"CUDAPlace rather than CPUPlace."));
const Tensor *input = ctx.Input<Tensor>("X"); const Tensor *input = ctx.Input<Tensor>("X");
Tensor *output = ctx.Output<Tensor>("Out"); Tensor *output = ctx.Output<Tensor>("Out");
...@@ -175,8 +177,10 @@ template <typename T> ...@@ -175,8 +177,10 @@ template <typename T>
class PoolCUDNNGradOpKernel : public framework::OpKernel<T> { class PoolCUDNNGradOpKernel : public framework::OpKernel<T> {
public: public:
void Compute(const framework::ExecutionContext &ctx) const override { void Compute(const framework::ExecutionContext &ctx) const override {
PADDLE_ENFORCE_EQ(platform::is_gpu_place(ctx.GetPlace()), true, PADDLE_ENFORCE_EQ(
"It must use CUDAPlace."); platform::is_gpu_place(ctx.GetPlace()), true,
platform::errors::InvalidArgument("Pool operator CUDA kernel must use "
"CUDAPlace rather than CPUPlace."));
const Tensor *input = ctx.Input<Tensor>("X"); const Tensor *input = ctx.Input<Tensor>("X");
const Tensor *output = ctx.Input<Tensor>("Out"); const Tensor *output = ctx.Input<Tensor>("Out");
......
...@@ -38,18 +38,22 @@ int PoolOutputSize(int input_size, int filter_size, int padding_1, ...@@ -38,18 +38,22 @@ int PoolOutputSize(int input_size, int filter_size, int padding_1,
} }
PADDLE_ENFORCE_GT( PADDLE_ENFORCE_GT(
output_size, 0, output_size, 0,
"ShapeError: the output size must be greater than 0. But received: " platform::errors::InvalidArgument(
"output_size = %d due to the settings of input_size(%d), padding(%d,%d), " "the output size must be greater than 0. But received: "
"k_size(%d) and stride(%d). Please check again!", "output_size = %d due to the settings of input_size(%d), "
output_size, input_size, padding_1, padding_2, filter_size, stride); "padding(%d,%d), "
"k_size(%d) and stride(%d). Please check again!",
output_size, input_size, padding_1, padding_2, filter_size, stride));
return output_size; return output_size;
} }
void PoolOp::InferShape(framework::InferShapeContext* ctx) const { void PoolOp::InferShape(framework::InferShapeContext* ctx) const {
PADDLE_ENFORCE_EQ(ctx->HasInput("X"), true, PADDLE_ENFORCE_EQ(
"X(Input) of Pooling should not be null."); ctx->HasInput("X"), true,
PADDLE_ENFORCE_EQ(ctx->HasOutput("Out"), true, platform::errors::NotFound("Input(X) of Pool operator is not found."));
"Out(Output) of Pooling should not be null."); PADDLE_ENFORCE_EQ(
ctx->HasOutput("Out"), true,
platform::errors::NotFound("Output(Out) of Pool operator is not found."));
std::string pooling_type = ctx->Attrs().Get<std::string>("pooling_type"); std::string pooling_type = ctx->Attrs().Get<std::string>("pooling_type");
std::vector<int> ksize = ctx->Attrs().Get<std::vector<int>>("ksize"); std::vector<int> ksize = ctx->Attrs().Get<std::vector<int>>("ksize");
...@@ -65,28 +69,32 @@ void PoolOp::InferShape(framework::InferShapeContext* ctx) const { ...@@ -65,28 +69,32 @@ void PoolOp::InferShape(framework::InferShapeContext* ctx) const {
auto in_x_dims = ctx->GetInputDim("X"); auto in_x_dims = ctx->GetInputDim("X");
PADDLE_ENFORCE_EQ( PADDLE_ENFORCE_EQ(
in_x_dims.size() == 4 || in_x_dims.size() == 5, true, in_x_dims.size() == 4 || in_x_dims.size() == 5, true,
"ShapeError: the input of Op(pool) should be 4-D or 5-D Tensor. But " platform::errors::InvalidArgument(
"received: %u-D Tensor and it's shape is [%s].", "the input of Op(pool) should be 4-D or 5-D Tensor. But "
in_x_dims.size(), in_x_dims); "received: %u-D Tensor and it's shape is [%s].",
in_x_dims.size(), in_x_dims));
PADDLE_ENFORCE_EQ( PADDLE_ENFORCE_EQ(
in_x_dims.size() - ksize.size(), 2U, in_x_dims.size() - ksize.size(), 2U,
"ShapeError: the dimension of input minus the size of " platform::errors::InvalidArgument(
"Attr(ksize) must be euqal to 2 in Op(pool). " "the dimension of input minus the size of "
"But received: the dimension of input minus the size " "Attr(ksize) must be euqal to 2 in Op(pool). "
"of Attr(ksize) is %d, the " "But received: the dimension of input minus the size "
"input's dimension is %d, the shape of input " "of Attr(ksize) is %d, the "
"is [%s], the Attr(ksize)'s size is %d, the Attr(ksize) is [%s].", "input's dimension is %d, the shape of input "
in_x_dims.size() - ksize.size(), in_x_dims.size(), in_x_dims, "is [%s], the Attr(ksize)'s size is %d, the Attr(ksize) is [%s].",
ksize.size(), framework::make_ddim(ksize)); in_x_dims.size() - ksize.size(), in_x_dims.size(), in_x_dims,
ksize.size(), framework::make_ddim(ksize)));
PADDLE_ENFORCE_EQ(ksize.size(), strides.size(),
"ShapeError: the size of Attr(ksize) and Attr(strides) in " PADDLE_ENFORCE_EQ(
"Op(pool) must be equal. " ksize.size(), strides.size(),
"But received: Attr(ksize)'s size is %d, Attr(strides)'s " platform::errors::InvalidArgument(
"size is %d, Attr(ksize) is [%s], Attr(strides)is [%s].", "the size of Attr(ksize) and Attr(strides) in "
ksize.size(), strides.size(), framework::make_ddim(ksize), "Op(pool) must be equal. "
framework::make_ddim(strides)); "But received: Attr(ksize)'s size is %d, Attr(strides)'s "
"size is %d, Attr(ksize) is [%s], Attr(strides)is [%s].",
ksize.size(), strides.size(), framework::make_ddim(ksize),
framework::make_ddim(strides)));
// MKL-DNN Kernels are using NCHW order of dims description // MKL-DNN Kernels are using NCHW order of dims description
// so we ignore data_format consideration for MKL-DNN kernel // so we ignore data_format consideration for MKL-DNN kernel
...@@ -182,9 +190,12 @@ framework::OpKernelType PoolOp::GetKernelTypeForVar( ...@@ -182,9 +190,12 @@ framework::OpKernelType PoolOp::GetKernelTypeForVar(
} }
void PoolOpGrad::InferShape(framework::InferShapeContext* ctx) const { void PoolOpGrad::InferShape(framework::InferShapeContext* ctx) const {
PADDLE_ENFORCE_EQ(ctx->HasInput("X"), true, "Input(X) must not be null."); PADDLE_ENFORCE_EQ(ctx->HasInput("X"), true,
platform::errors::NotFound(
"Input(X) of Pool Gradoperator is not found."));
PADDLE_ENFORCE_EQ(ctx->HasOutput(framework::GradVarName("X")), true, PADDLE_ENFORCE_EQ(ctx->HasOutput(framework::GradVarName("X")), true,
"Input(X@GRAD) should not be null."); platform::errors::NotFound(
"Input(X@GRAD) of Pool Gradoperator is not found."));
ctx->SetOutputDim(framework::GradVarName("X"), ctx->GetInputDim("X")); ctx->SetOutputDim(framework::GradVarName("X"), ctx->GetInputDim("X"));
} }
...@@ -210,7 +221,8 @@ framework::OpKernelType PoolOpGrad::GetExpectedKernelType( ...@@ -210,7 +221,8 @@ framework::OpKernelType PoolOpGrad::GetExpectedKernelType(
auto input_data_type = OperatorWithKernel::IndicateVarDataType(ctx, "X"); auto input_data_type = OperatorWithKernel::IndicateVarDataType(ctx, "X");
if (input_data_type == framework::proto::VarType::FP16) { if (input_data_type == framework::proto::VarType::FP16) {
PADDLE_ENFORCE_EQ(library_, framework::LibraryType::kCUDNN, PADDLE_ENFORCE_EQ(library_, framework::LibraryType::kCUDNN,
"float16 can only be used when CUDNN is used"); platform::errors::InvalidArgument(
"Float16 can only be used when CUDNN is used"));
} }
return framework::OpKernelType(input_data_type, ctx.GetPlace(), layout_, return framework::OpKernelType(input_data_type, ctx.GetPlace(), layout_,
library_); library_);
......
...@@ -81,9 +81,11 @@ inline void UpdatePadding(std::vector<T>* paddings, const bool global_pooling, ...@@ -81,9 +81,11 @@ inline void UpdatePadding(std::vector<T>* paddings, const bool global_pooling,
paddings->insert(paddings->begin() + 2 * i + 1, copy_pad); paddings->insert(paddings->begin() + 2 * i + 1, copy_pad);
} }
} else { } else {
PADDLE_ENFORCE_EQ( PADDLE_ENFORCE_EQ(data_dims.size() * 2, paddings->size(),
data_dims.size() * 2, paddings->size(), platform::errors::InvalidArgument(
"Paddings size should be the same or twice as the pooling size."); "Paddings size %d should be the same or twice as the "
"pooling size %d.",
paddings->size(), data_dims.size() * 2));
} }
// when padding_algorithm is "VALID" or "SAME" // when padding_algorithm is "VALID" or "SAME"
...@@ -200,7 +202,10 @@ class PoolKernel : public framework::OpKernel<T> { ...@@ -200,7 +202,10 @@ class PoolKernel : public framework::OpKernel<T> {
pool_process, exclusive, adaptive, out); pool_process, exclusive, adaptive, out);
} }
} break; } break;
default: { PADDLE_THROW("Pool op only supports 2D and 3D input."); } default: {
PADDLE_THROW(platform::errors::InvalidArgument(
"Pool op only supports 2D and 3D input."));
}
} }
} }
}; };
...@@ -287,7 +292,10 @@ class PoolGradKernel : public framework::OpKernel<T> { ...@@ -287,7 +292,10 @@ class PoolGradKernel : public framework::OpKernel<T> {
adaptive, in_x_grad); adaptive, in_x_grad);
} }
} break; } break;
default: { PADDLE_THROW("Pool op only supports 2D and 3D input."); } default: {
PADDLE_THROW(platform::errors::InvalidArgument(
"Pool op only supports 2D and 3D input."));
}
} }
} }
} }
......
...@@ -46,8 +46,11 @@ class MaxPoolWithIndexOp : public framework::OperatorWithKernel { ...@@ -46,8 +46,11 @@ class MaxPoolWithIndexOp : public framework::OperatorWithKernel {
std::vector<int> paddings = ctx->Attrs().Get<std::vector<int>>("paddings"); std::vector<int> paddings = ctx->Attrs().Get<std::vector<int>>("paddings");
bool adaptive = ctx->Attrs().Get<bool>("adaptive"); bool adaptive = ctx->Attrs().Get<bool>("adaptive");
PADDLE_ENFORCE(in_x_dims.size() == 4 || in_x_dims.size() == 5, PADDLE_ENFORCE(
"Pooling intput should be 4-D or 5-D tensor."); in_x_dims.size() == 4 || in_x_dims.size() == 5,
platform::errors::InvalidArgument("Pooling intput should be 4-D or 5-D "
"tensor but received %dD-Tensor",
in_x_dims.size()));
if (ctx->Attrs().Get<bool>("global_pooling")) { if (ctx->Attrs().Get<bool>("global_pooling")) {
ksize.resize(static_cast<size_t>(in_x_dims.size()) - 2); ksize.resize(static_cast<size_t>(in_x_dims.size()) - 2);
...@@ -57,16 +60,21 @@ class MaxPoolWithIndexOp : public framework::OperatorWithKernel { ...@@ -57,16 +60,21 @@ class MaxPoolWithIndexOp : public framework::OperatorWithKernel {
} }
} }
PADDLE_ENFORCE_EQ(in_x_dims.size() - ksize.size(), 2U, PADDLE_ENFORCE_EQ(
platform::errors::InvalidArgument( in_x_dims.size() - ksize.size(), 2U,
"Input size and pooling size should be consistent.")); platform::errors::InvalidArgument(
PADDLE_ENFORCE_EQ(ksize.size(), strides.size(), "The input size %d minus the kernel size %d should equal to 2.",
platform::errors::InvalidArgument( in_x_dims.size(), ksize.size()));
"Strides size and pooling size should be the same.")); PADDLE_ENFORCE_EQ(
ksize.size(), strides.size(),
platform::errors::InvalidArgument(
"Strides size %d and pooling size %d should be the same.",
strides.size(), ksize.size()));
PADDLE_ENFORCE_EQ( PADDLE_ENFORCE_EQ(
ksize.size(), paddings.size(), ksize.size(), paddings.size(),
platform::errors::InvalidArgument( platform::errors::InvalidArgument(
"Paddings size and pooling size should be the same.")); "Paddings size %d and pooling size %d should be the same.",
paddings.size(), ksize.size()));
std::vector<int64_t> output_shape({in_x_dims[0], in_x_dims[1]}); std::vector<int64_t> output_shape({in_x_dims[0], in_x_dims[1]});
if (adaptive) { if (adaptive) {
......
...@@ -61,7 +61,10 @@ class MaxPoolWithIndexKernel : public framework::OpKernel<T1> { ...@@ -61,7 +61,10 @@ class MaxPoolWithIndexKernel : public framework::OpKernel<T1> {
pool3d_forward(dev_ctx, *in_x, ksize, strides, paddings, adaptive, out, pool3d_forward(dev_ctx, *in_x, ksize, strides, paddings, adaptive, out,
mask); mask);
} break; } break;
default: { PADDLE_THROW("Pool op only supports 2D and 3D input."); } default: {
PADDLE_THROW(platform::errors::InvalidArgument(
"Pool op only supports 2D and 3D input."));
}
} }
} }
}; };
...@@ -106,7 +109,10 @@ class MaxPoolWithIndexGradKernel : public framework::OpKernel<T1> { ...@@ -106,7 +109,10 @@ class MaxPoolWithIndexGradKernel : public framework::OpKernel<T1> {
pool3d_backward(device_ctx, *out_grad, *mask, ksize, strides, pool3d_backward(device_ctx, *out_grad, *mask, ksize, strides,
paddings, adaptive, in_x_grad); paddings, adaptive, in_x_grad);
} break; } break;
default: { PADDLE_THROW("Pool op only supports 2D and 3D input."); } default: {
PADDLE_THROW(platform::errors::InvalidArgument(
"Pool op only supports 2D and 3D input."));
}
} }
} }
} }
......
...@@ -176,22 +176,31 @@ class GPUPSROIPoolOpKernel : public framework::OpKernel<T> { ...@@ -176,22 +176,31 @@ class GPUPSROIPoolOpKernel : public framework::OpKernel<T> {
int height = in_dims[2]; int height = in_dims[2];
int width = in_dims[3]; int width = in_dims[3];
PADDLE_ENFORCE_EQ(input_channels, PADDLE_ENFORCE_EQ(
output_channels * pooled_height * pooled_width, input_channels, output_channels * pooled_height * pooled_width,
"the channels of input X should equal the product of " platform::errors::InvalidArgument(
"output_channels x pooled_height x pooled_width"); "The channels %d of input X should equal the product of "
"output_channels %d x pooled_height %d x pooled_width %d.",
input_channels, output_channels, pooled_height, pooled_width));
int rois_num = rois->dims()[0]; int rois_num = rois->dims()[0];
if (rois_num == 0) return; if (rois_num == 0) return;
auto rois_lod = rois->lod().back(); auto rois_lod = rois->lod().back();
int rois_batch_size = rois_lod.size() - 1; int rois_batch_size = rois_lod.size() - 1;
PADDLE_ENFORCE_EQ( PADDLE_ENFORCE_EQ(rois_batch_size, batch_size,
rois_batch_size, batch_size, platform::errors::InvalidArgument(
"The rois_batch_size and input(X) batch_size must be the same."); "The batch size of input(ROIs) and input(X) must be "
"the same but received batch size of input(ROIs) and "
"input(X) is %d and %d respectively.",
rois_batch_size, batch_size));
int rois_num_with_lod = rois_lod[rois_batch_size]; int rois_num_with_lod = rois_lod[rois_batch_size];
PADDLE_ENFORCE_EQ(rois_num, rois_num_with_lod, PADDLE_ENFORCE_EQ(rois_num, rois_num_with_lod,
"The rois_num from input and lod must be the same."); platform::errors::InvalidArgument(
"The number of rois from input(ROIs) and its LOD "
"must be the same. Received rois %d of input(ROIs) "
"but the number of rois %d from its LOD is %d",
rois_num, rois_num_with_lod));
// set rois batch id // set rois batch id
framework::Tensor rois_batch_id_list; framework::Tensor rois_batch_id_list;
......
...@@ -160,9 +160,14 @@ class GPUROIPoolOpKernel : public framework::OpKernel<T> { ...@@ -160,9 +160,14 @@ class GPUROIPoolOpKernel : public framework::OpKernel<T> {
if (ctx.HasInput("RoisNum")) { if (ctx.HasInput("RoisNum")) {
auto* rois_num_t = ctx.Input<Tensor>("RoisNum"); auto* rois_num_t = ctx.Input<Tensor>("RoisNum");
int rois_batch_size = rois_num_t->numel(); int rois_batch_size = rois_num_t->numel();
PADDLE_ENFORCE_EQ( PADDLE_ENFORCE_EQ(
rois_batch_size, batch_size, rois_batch_size, batch_size,
"The rois_batch_size and imgs batch_size must be the same."); platform::errors::InvalidArgument(
"The batch size of input(ROIs) and input(X) must be the same but "
"received batch size of input(ROIs) and input(X) is %d and %d "
"respectively.",
rois_batch_size, batch_size));
std::vector<int> rois_num_list(rois_batch_size); std::vector<int> rois_num_list(rois_batch_size);
memory::Copy(cplace, rois_num_list.data(), gplace, memory::Copy(cplace, rois_num_list.data(), gplace,
rois_num_t->data<int>(), sizeof(int) * rois_batch_size, 0); rois_num_t->data<int>(), sizeof(int) * rois_batch_size, 0);
...@@ -178,10 +183,19 @@ class GPUROIPoolOpKernel : public framework::OpKernel<T> { ...@@ -178,10 +183,19 @@ class GPUROIPoolOpKernel : public framework::OpKernel<T> {
int rois_batch_size = rois_lod.size() - 1; int rois_batch_size = rois_lod.size() - 1;
PADDLE_ENFORCE_EQ( PADDLE_ENFORCE_EQ(
rois_batch_size, batch_size, rois_batch_size, batch_size,
"The rois_batch_size and imgs batch_size must be the same."); platform::errors::InvalidArgument(
"The batch size of input(ROIs) and input(X) must be the same but "
"received batch size of input(ROIs) and input(X) is %d and %d "
"respectively.",
rois_batch_size, batch_size));
int rois_num_with_lod = rois_lod[rois_batch_size]; int rois_num_with_lod = rois_lod[rois_batch_size];
PADDLE_ENFORCE_EQ(rois_num, rois_num_with_lod, PADDLE_ENFORCE_EQ(rois_num, rois_num_with_lod,
"The rois_num from input and lod must be the same."); platform::errors::InvalidArgument(
"The number of rois from input(ROIs) and its LOD "
"must be the same. Received rois %d of input(ROIs) "
"but the number of rois %d from its LOD is %d",
rois_num, rois_num_with_lod));
for (int n = 0; n < rois_batch_size; ++n) { for (int n = 0; n < rois_batch_size; ++n) {
for (size_t i = rois_lod[n]; i < rois_lod[n + 1]; ++i) { for (size_t i = rois_lod[n]; i < rois_lod[n + 1]; ++i) {
roi_batch_id_data[i] = n; roi_batch_id_data[i] = n;
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册