未验证 提交 4ba977c7 编写于 作者: C Chen Weihang 提交者: GitHub

Polish some error message in opeators (#27876)

* polish some error message

* add white list

* revert shell script change
上级 8e70b18e
......@@ -41,7 +41,8 @@ struct StridedMemcpyFunctor<T, 0> {
memory::Copy(gpu_place, dst, gpu_place, src, sizeof(T),
cuda_ctx.stream());
#else
PADDLE_THROW("Paddle is not compiled with GPU");
PADDLE_THROW(
platform::errors::Unavailable("Paddle is not compiled with GPU."));
#endif
}
}
......@@ -64,7 +65,8 @@ struct StridedMemcpyFunctor<T, 1> {
memory::Copy(gpu_place, dst, gpu_place, src, sizeof(T) * dst_dim[0],
cuda_ctx.stream());
#else
PADDLE_THROW("Paddle is not compiled with GPU");
PADDLE_THROW(
platform::errors::Unavailable("Paddle is not compiled with GPU."));
#endif
}
}
......
......@@ -86,8 +86,10 @@ void RecvSparseLodTensor(const CommContext &rpc_ctx,
height += splited_var->Get<framework::LoDTensor>().dims()[0];
}
PADDLE_ENFORCE_EQ(merged_var->Get<framework::LoDTensor>().dims()[0], height,
"recved var must has same dims with local var");
PADDLE_ENFORCE_EQ(
merged_var->Get<framework::LoDTensor>().dims()[0], height,
platform::errors::InvalidArgument(
"Received variable must has same dimension with local variable."));
auto *merged_t = merged_var->GetMutable<framework::LoDTensor>();
auto *merged_d = merged_t->mutable_data<float>(cpu_place);
......
......@@ -218,9 +218,11 @@ class FusedFCElementwiseLayerNormOpMaker
.SetDefault(1e-5)
.AddCustomChecker([](const float &epsilon) {
PADDLE_ENFORCE_GE(epsilon, 0.0f,
"'epsilon' should be between 0.0 and 0.001.");
platform::errors::InvalidArgument(
"'epsilon' should be between 0.0 and 0.001."));
PADDLE_ENFORCE_LE(epsilon, 0.001f,
"'epsilon' should be between 0.0 and 0.001.");
platform::errors::InvalidArgument(
"'epsilon' should be between 0.0 and 0.001."));
});
AddAttr<int>("begin_norm_axis",
"the axis of `begin_norm_axis ... Rank(Y) - 1` will be "
......@@ -228,8 +230,10 @@ class FusedFCElementwiseLayerNormOpMaker
"matrix [N,H]. [default 1].")
.SetDefault(1)
.AddCustomChecker([](const int &begin_norm_axis) {
PADDLE_ENFORCE_GT(begin_norm_axis, 0,
"'begin_norm_axis' should be greater than zero.");
PADDLE_ENFORCE_GT(
begin_norm_axis, 0,
platform::errors::InvalidArgument(
"'begin_norm_axis' should be greater than zero."));
});
AddComment(R"DOC(
fc_out <= fc(X, W, Bias0)
......
......@@ -118,9 +118,10 @@ static void Interpolate2DInferShapeCheck(framework::InferShapeContext* ctx) {
PADDLE_ENFORCE(
"bilinear" == interp_method || "nearest" == interp_method ||
"bicubic" == interp_method,
"Interpolation method can only be \"bilinear\" or \"nearest\" when "
"Input(X) dimension is 4, but got method = %s .",
interp_method);
platform::errors::InvalidArgument(
"Interpolation method can only be \"bilinear\" or \"nearest\" when "
"Input(X) dimension is 4, but got method = %s.",
interp_method));
const DataLayout data_layout = framework::StringToDataLayout(
ctx->Attrs().Get<std::string>("data_layout"));
......@@ -305,12 +306,15 @@ static void Interpolate3DInferShapeCheck(framework::InferShapeContext* ctx) {
if (ctx->HasInput("OutSize") && ctx->IsRuntime()) {
auto out_size_dim = ctx->GetInputDim("OutSize");
PADDLE_ENFORCE_EQ(out_size_dim.size(), 1,
"OutSize's dimension size must be 1, but got size =%d .",
out_size_dim.size());
PADDLE_ENFORCE_EQ(
out_size_dim.size(), 1,
platform::errors::InvalidArgument(
"OutSize's dimension size must be 1, but got size is %d.",
out_size_dim.size()));
PADDLE_ENFORCE_EQ(out_size_dim[0], 3,
"OutSize's dim[0] must be 3, but got size = %d .",
out_size_dim[0]);
platform::errors::InvalidArgument(
"OutSize's dim[0] must be 3, but got size is %d.",
out_size_dim[0]));
ctx->ShareLoD("X", "Out");
return;
}
......@@ -330,10 +334,8 @@ class InterpolateV2Op : public framework::OperatorWithKernel {
protected:
void InferShape(framework::InferShapeContext* ctx) const override {
PADDLE_ENFORCE(ctx->HasInput("X"),
"Input(X) of InterpolateV2Op should not be null.");
PADDLE_ENFORCE(ctx->HasOutput("Out"),
"Output(Out) of InterpolationOp should not be null.");
OP_INOUT_CHECK(ctx->HasInput("X"), "Input", "X", "Interpolate");
OP_INOUT_CHECK(ctx->HasOutput("Out"), "Output", "Out", "Interpolate");
auto dim_x = ctx->GetInputDim("X"); // NCHW format
PADDLE_ENFORCE(
......@@ -576,9 +578,10 @@ class InterpolateV2OpGrad : public framework::OperatorWithKernel {
protected:
void InferShape(framework::InferShapeContext* ctx) const override {
PADDLE_ENFORCE(ctx->HasInput("X"), "Input(X) should not be null");
PADDLE_ENFORCE(ctx->HasInput(framework::GradVarName("Out")),
"Input(Out@GRAD) should not be null");
OP_INOUT_CHECK(ctx->HasInput("X"), "Input", "X", "InterpolateGrad");
OP_INOUT_CHECK(ctx->HasInput(framework::GradVarName("Out")), "Input",
"Out@GRAD", "InterpolateGrad");
auto dim_x = ctx->GetInputDim("X");
if (ctx->HasOutput(framework::GradVarName("X"))) {
ctx->SetOutputDim(framework::GradVarName("X"), dim_x);
......
......@@ -98,7 +98,7 @@ class AccuracyXPUKernel : public framework::OpKernel<T> {
label_int32_device, num_samples, class_dim,
correct_data, total_data, accuracy_data);
PADDLE_ENFORCE_EQ(r, xpu::Error_t::SUCCESS,
platform::errors::Fatal("XPU kernel error!"));
platform::errors::Fatal("XPU accuracy kernel error!"));
dev_ctx.Wait();
xpu_free(indices_int32_device);
xpu_free(label_int32_device);
......
......@@ -71,7 +71,8 @@ class AdadeltaOp : public framework::OperatorWithKernel {
auto param_dim = ctx->GetInputDim("Param");
PADDLE_ENFORCE_EQ(
param_dim, ctx->GetInputDim("Grad"),
"param and grad input of AdadeltaOp should have same dimension");
platform::errors::InvalidArgument(
"Param and grad input of AdadeltaOp should have same dimension."));
PADDLE_ENFORCE_NE(
framework::product(ctx->GetInputDim("AvgSquaredGrad")), 0,
platform::errors::InvalidArgument(
......
......@@ -50,8 +50,14 @@ class DpsgdOpKernel : public framework::OpKernel<T> {
auto *param_out = ctx.Output<framework::Tensor>("ParamOut");
auto sz = param_out->numel();
PADDLE_ENFORCE_EQ(param->numel(), sz);
PADDLE_ENFORCE_EQ(grad->numel(), sz);
PADDLE_ENFORCE_EQ(param->numel(), sz,
platform::errors::InvalidArgument(
"Input parameter's number of elements is error, "
"expected %zu, but received %zu."));
PADDLE_ENFORCE_EQ(grad->numel(), sz,
platform::errors::InvalidArgument(
"Input gradient's number of elements is error, "
"expected %zu, but received %zu."));
const T *lr = learning_rate->data<T>();
const T *param_data = param->data<T>();
......
......@@ -49,7 +49,7 @@ class ScaleXPUKernel : public framework::OpKernel<T> {
int r = xpu::scale(dev_ctx.x_context(), in->numel(), scale, bias,
bias_after_scale, in->data<float>(), out->data<float>());
PADDLE_ENFORCE_EQ(r, xpu::Error_t::SUCCESS,
platform::errors::Fatal("XPU kernel error!"));
platform::errors::Fatal("XPU scale kernel error!"));
}
};
......
......@@ -30,7 +30,7 @@ class SignXPUKernel : public framework::OpKernel<T> {
int r = xpu::activation_forward(xpu_context, xpu::Activation_t::SIGN,
in->numel(), in->data<T>(), out->data<T>());
PADDLE_ENFORCE_EQ(r, xpu::Error_t::SUCCESS,
platform::errors::Fatal("XPU kernel error!"));
platform::errors::Fatal("XPU sign kernel error!"));
}
};
......
......@@ -51,7 +51,7 @@ class SumXPUKernel : public framework::OpKernel<T> {
int r = xpu::sum_batch(dev_ctx.x_context(), ptrs.data(), out->data<T>(),
valid_count, out->numel());
PADDLE_ENFORCE_EQ(r, xpu::Error_t::SUCCESS,
platform::errors::Fatal("XPU kernel error!"));
platform::errors::Fatal("XPU sum kernel error!"));
}
};
......
......@@ -36,7 +36,9 @@ class TopkOp : public framework::OperatorWithKernel {
auto input_dims = ctx->GetInputDim("X");
const int k = static_cast<int>(ctx->Attrs().Get<int>("k"));
PADDLE_ENFORCE_GE(k, 1, "k must >= 1");
PADDLE_ENFORCE_GE(k, 1,
platform::errors::InvalidArgument(
"Attribute k must be >= 1, but got k is %d.", k));
PADDLE_ENFORCE_GE(input_dims.size(), 1, platform::errors::InvalidArgument(
"input must have >= 1d shape"));
......
......@@ -96,7 +96,8 @@ class TopkOpCUDAKernel : public framework::OpKernel<T> {
output_data, k, indices_data, input_data, input_width,
input_width, static_cast<int>(k), gridx, input_height));
default:
PADDLE_THROW("Error");
PADDLE_THROW(platform::errors::Unavailable(
"Calculation error occurred in TopK Operator's CUDA Kernel."));
}
}
};
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册