未验证 提交 4ba977c7 编写于 作者: C Chen Weihang 提交者: GitHub

Polish some error message in opeators (#27876)

* polish some error message

* add white list

* revert shell script change
上级 8e70b18e
...@@ -41,7 +41,8 @@ struct StridedMemcpyFunctor<T, 0> { ...@@ -41,7 +41,8 @@ struct StridedMemcpyFunctor<T, 0> {
memory::Copy(gpu_place, dst, gpu_place, src, sizeof(T), memory::Copy(gpu_place, dst, gpu_place, src, sizeof(T),
cuda_ctx.stream()); cuda_ctx.stream());
#else #else
PADDLE_THROW("Paddle is not compiled with GPU"); PADDLE_THROW(
platform::errors::Unavailable("Paddle is not compiled with GPU."));
#endif #endif
} }
} }
...@@ -64,7 +65,8 @@ struct StridedMemcpyFunctor<T, 1> { ...@@ -64,7 +65,8 @@ struct StridedMemcpyFunctor<T, 1> {
memory::Copy(gpu_place, dst, gpu_place, src, sizeof(T) * dst_dim[0], memory::Copy(gpu_place, dst, gpu_place, src, sizeof(T) * dst_dim[0],
cuda_ctx.stream()); cuda_ctx.stream());
#else #else
PADDLE_THROW("Paddle is not compiled with GPU"); PADDLE_THROW(
platform::errors::Unavailable("Paddle is not compiled with GPU."));
#endif #endif
} }
} }
......
...@@ -86,8 +86,10 @@ void RecvSparseLodTensor(const CommContext &rpc_ctx, ...@@ -86,8 +86,10 @@ void RecvSparseLodTensor(const CommContext &rpc_ctx,
height += splited_var->Get<framework::LoDTensor>().dims()[0]; height += splited_var->Get<framework::LoDTensor>().dims()[0];
} }
PADDLE_ENFORCE_EQ(merged_var->Get<framework::LoDTensor>().dims()[0], height, PADDLE_ENFORCE_EQ(
"recved var must has same dims with local var"); merged_var->Get<framework::LoDTensor>().dims()[0], height,
platform::errors::InvalidArgument(
"Received variable must has same dimension with local variable."));
auto *merged_t = merged_var->GetMutable<framework::LoDTensor>(); auto *merged_t = merged_var->GetMutable<framework::LoDTensor>();
auto *merged_d = merged_t->mutable_data<float>(cpu_place); auto *merged_d = merged_t->mutable_data<float>(cpu_place);
......
...@@ -218,9 +218,11 @@ class FusedFCElementwiseLayerNormOpMaker ...@@ -218,9 +218,11 @@ class FusedFCElementwiseLayerNormOpMaker
.SetDefault(1e-5) .SetDefault(1e-5)
.AddCustomChecker([](const float &epsilon) { .AddCustomChecker([](const float &epsilon) {
PADDLE_ENFORCE_GE(epsilon, 0.0f, PADDLE_ENFORCE_GE(epsilon, 0.0f,
"'epsilon' should be between 0.0 and 0.001."); platform::errors::InvalidArgument(
"'epsilon' should be between 0.0 and 0.001."));
PADDLE_ENFORCE_LE(epsilon, 0.001f, PADDLE_ENFORCE_LE(epsilon, 0.001f,
"'epsilon' should be between 0.0 and 0.001."); platform::errors::InvalidArgument(
"'epsilon' should be between 0.0 and 0.001."));
}); });
AddAttr<int>("begin_norm_axis", AddAttr<int>("begin_norm_axis",
"the axis of `begin_norm_axis ... Rank(Y) - 1` will be " "the axis of `begin_norm_axis ... Rank(Y) - 1` will be "
...@@ -228,8 +230,10 @@ class FusedFCElementwiseLayerNormOpMaker ...@@ -228,8 +230,10 @@ class FusedFCElementwiseLayerNormOpMaker
"matrix [N,H]. [default 1].") "matrix [N,H]. [default 1].")
.SetDefault(1) .SetDefault(1)
.AddCustomChecker([](const int &begin_norm_axis) { .AddCustomChecker([](const int &begin_norm_axis) {
PADDLE_ENFORCE_GT(begin_norm_axis, 0, PADDLE_ENFORCE_GT(
"'begin_norm_axis' should be greater than zero."); begin_norm_axis, 0,
platform::errors::InvalidArgument(
"'begin_norm_axis' should be greater than zero."));
}); });
AddComment(R"DOC( AddComment(R"DOC(
fc_out <= fc(X, W, Bias0) fc_out <= fc(X, W, Bias0)
......
...@@ -118,9 +118,10 @@ static void Interpolate2DInferShapeCheck(framework::InferShapeContext* ctx) { ...@@ -118,9 +118,10 @@ static void Interpolate2DInferShapeCheck(framework::InferShapeContext* ctx) {
PADDLE_ENFORCE( PADDLE_ENFORCE(
"bilinear" == interp_method || "nearest" == interp_method || "bilinear" == interp_method || "nearest" == interp_method ||
"bicubic" == interp_method, "bicubic" == interp_method,
"Interpolation method can only be \"bilinear\" or \"nearest\" when " platform::errors::InvalidArgument(
"Input(X) dimension is 4, but got method = %s .", "Interpolation method can only be \"bilinear\" or \"nearest\" when "
interp_method); "Input(X) dimension is 4, but got method = %s.",
interp_method));
const DataLayout data_layout = framework::StringToDataLayout( const DataLayout data_layout = framework::StringToDataLayout(
ctx->Attrs().Get<std::string>("data_layout")); ctx->Attrs().Get<std::string>("data_layout"));
...@@ -305,12 +306,15 @@ static void Interpolate3DInferShapeCheck(framework::InferShapeContext* ctx) { ...@@ -305,12 +306,15 @@ static void Interpolate3DInferShapeCheck(framework::InferShapeContext* ctx) {
if (ctx->HasInput("OutSize") && ctx->IsRuntime()) { if (ctx->HasInput("OutSize") && ctx->IsRuntime()) {
auto out_size_dim = ctx->GetInputDim("OutSize"); auto out_size_dim = ctx->GetInputDim("OutSize");
PADDLE_ENFORCE_EQ(out_size_dim.size(), 1, PADDLE_ENFORCE_EQ(
"OutSize's dimension size must be 1, but got size =%d .", out_size_dim.size(), 1,
out_size_dim.size()); platform::errors::InvalidArgument(
"OutSize's dimension size must be 1, but got size is %d.",
out_size_dim.size()));
PADDLE_ENFORCE_EQ(out_size_dim[0], 3, PADDLE_ENFORCE_EQ(out_size_dim[0], 3,
"OutSize's dim[0] must be 3, but got size = %d .", platform::errors::InvalidArgument(
out_size_dim[0]); "OutSize's dim[0] must be 3, but got size is %d.",
out_size_dim[0]));
ctx->ShareLoD("X", "Out"); ctx->ShareLoD("X", "Out");
return; return;
} }
...@@ -330,10 +334,8 @@ class InterpolateV2Op : public framework::OperatorWithKernel { ...@@ -330,10 +334,8 @@ class InterpolateV2Op : public framework::OperatorWithKernel {
protected: protected:
void InferShape(framework::InferShapeContext* ctx) const override { void InferShape(framework::InferShapeContext* ctx) const override {
PADDLE_ENFORCE(ctx->HasInput("X"), OP_INOUT_CHECK(ctx->HasInput("X"), "Input", "X", "Interpolate");
"Input(X) of InterpolateV2Op should not be null."); OP_INOUT_CHECK(ctx->HasOutput("Out"), "Output", "Out", "Interpolate");
PADDLE_ENFORCE(ctx->HasOutput("Out"),
"Output(Out) of InterpolationOp should not be null.");
auto dim_x = ctx->GetInputDim("X"); // NCHW format auto dim_x = ctx->GetInputDim("X"); // NCHW format
PADDLE_ENFORCE( PADDLE_ENFORCE(
...@@ -576,9 +578,10 @@ class InterpolateV2OpGrad : public framework::OperatorWithKernel { ...@@ -576,9 +578,10 @@ class InterpolateV2OpGrad : public framework::OperatorWithKernel {
protected: protected:
void InferShape(framework::InferShapeContext* ctx) const override { void InferShape(framework::InferShapeContext* ctx) const override {
PADDLE_ENFORCE(ctx->HasInput("X"), "Input(X) should not be null"); OP_INOUT_CHECK(ctx->HasInput("X"), "Input", "X", "InterpolateGrad");
PADDLE_ENFORCE(ctx->HasInput(framework::GradVarName("Out")), OP_INOUT_CHECK(ctx->HasInput(framework::GradVarName("Out")), "Input",
"Input(Out@GRAD) should not be null"); "Out@GRAD", "InterpolateGrad");
auto dim_x = ctx->GetInputDim("X"); auto dim_x = ctx->GetInputDim("X");
if (ctx->HasOutput(framework::GradVarName("X"))) { if (ctx->HasOutput(framework::GradVarName("X"))) {
ctx->SetOutputDim(framework::GradVarName("X"), dim_x); ctx->SetOutputDim(framework::GradVarName("X"), dim_x);
......
...@@ -98,7 +98,7 @@ class AccuracyXPUKernel : public framework::OpKernel<T> { ...@@ -98,7 +98,7 @@ class AccuracyXPUKernel : public framework::OpKernel<T> {
label_int32_device, num_samples, class_dim, label_int32_device, num_samples, class_dim,
correct_data, total_data, accuracy_data); correct_data, total_data, accuracy_data);
PADDLE_ENFORCE_EQ(r, xpu::Error_t::SUCCESS, PADDLE_ENFORCE_EQ(r, xpu::Error_t::SUCCESS,
platform::errors::Fatal("XPU kernel error!")); platform::errors::Fatal("XPU accuracy kernel error!"));
dev_ctx.Wait(); dev_ctx.Wait();
xpu_free(indices_int32_device); xpu_free(indices_int32_device);
xpu_free(label_int32_device); xpu_free(label_int32_device);
......
...@@ -71,7 +71,8 @@ class AdadeltaOp : public framework::OperatorWithKernel { ...@@ -71,7 +71,8 @@ class AdadeltaOp : public framework::OperatorWithKernel {
auto param_dim = ctx->GetInputDim("Param"); auto param_dim = ctx->GetInputDim("Param");
PADDLE_ENFORCE_EQ( PADDLE_ENFORCE_EQ(
param_dim, ctx->GetInputDim("Grad"), param_dim, ctx->GetInputDim("Grad"),
"param and grad input of AdadeltaOp should have same dimension"); platform::errors::InvalidArgument(
"Param and grad input of AdadeltaOp should have same dimension."));
PADDLE_ENFORCE_NE( PADDLE_ENFORCE_NE(
framework::product(ctx->GetInputDim("AvgSquaredGrad")), 0, framework::product(ctx->GetInputDim("AvgSquaredGrad")), 0,
platform::errors::InvalidArgument( platform::errors::InvalidArgument(
......
...@@ -50,8 +50,14 @@ class DpsgdOpKernel : public framework::OpKernel<T> { ...@@ -50,8 +50,14 @@ class DpsgdOpKernel : public framework::OpKernel<T> {
auto *param_out = ctx.Output<framework::Tensor>("ParamOut"); auto *param_out = ctx.Output<framework::Tensor>("ParamOut");
auto sz = param_out->numel(); auto sz = param_out->numel();
PADDLE_ENFORCE_EQ(param->numel(), sz); PADDLE_ENFORCE_EQ(param->numel(), sz,
PADDLE_ENFORCE_EQ(grad->numel(), sz); platform::errors::InvalidArgument(
"Input parameter's number of elements is error, "
"expected %zu, but received %zu."));
PADDLE_ENFORCE_EQ(grad->numel(), sz,
platform::errors::InvalidArgument(
"Input gradient's number of elements is error, "
"expected %zu, but received %zu."));
const T *lr = learning_rate->data<T>(); const T *lr = learning_rate->data<T>();
const T *param_data = param->data<T>(); const T *param_data = param->data<T>();
......
...@@ -49,7 +49,7 @@ class ScaleXPUKernel : public framework::OpKernel<T> { ...@@ -49,7 +49,7 @@ class ScaleXPUKernel : public framework::OpKernel<T> {
int r = xpu::scale(dev_ctx.x_context(), in->numel(), scale, bias, int r = xpu::scale(dev_ctx.x_context(), in->numel(), scale, bias,
bias_after_scale, in->data<float>(), out->data<float>()); bias_after_scale, in->data<float>(), out->data<float>());
PADDLE_ENFORCE_EQ(r, xpu::Error_t::SUCCESS, PADDLE_ENFORCE_EQ(r, xpu::Error_t::SUCCESS,
platform::errors::Fatal("XPU kernel error!")); platform::errors::Fatal("XPU scale kernel error!"));
} }
}; };
......
...@@ -30,7 +30,7 @@ class SignXPUKernel : public framework::OpKernel<T> { ...@@ -30,7 +30,7 @@ class SignXPUKernel : public framework::OpKernel<T> {
int r = xpu::activation_forward(xpu_context, xpu::Activation_t::SIGN, int r = xpu::activation_forward(xpu_context, xpu::Activation_t::SIGN,
in->numel(), in->data<T>(), out->data<T>()); in->numel(), in->data<T>(), out->data<T>());
PADDLE_ENFORCE_EQ(r, xpu::Error_t::SUCCESS, PADDLE_ENFORCE_EQ(r, xpu::Error_t::SUCCESS,
platform::errors::Fatal("XPU kernel error!")); platform::errors::Fatal("XPU sign kernel error!"));
} }
}; };
......
...@@ -51,7 +51,7 @@ class SumXPUKernel : public framework::OpKernel<T> { ...@@ -51,7 +51,7 @@ class SumXPUKernel : public framework::OpKernel<T> {
int r = xpu::sum_batch(dev_ctx.x_context(), ptrs.data(), out->data<T>(), int r = xpu::sum_batch(dev_ctx.x_context(), ptrs.data(), out->data<T>(),
valid_count, out->numel()); valid_count, out->numel());
PADDLE_ENFORCE_EQ(r, xpu::Error_t::SUCCESS, PADDLE_ENFORCE_EQ(r, xpu::Error_t::SUCCESS,
platform::errors::Fatal("XPU kernel error!")); platform::errors::Fatal("XPU sum kernel error!"));
} }
}; };
......
...@@ -36,7 +36,9 @@ class TopkOp : public framework::OperatorWithKernel { ...@@ -36,7 +36,9 @@ class TopkOp : public framework::OperatorWithKernel {
auto input_dims = ctx->GetInputDim("X"); auto input_dims = ctx->GetInputDim("X");
const int k = static_cast<int>(ctx->Attrs().Get<int>("k")); const int k = static_cast<int>(ctx->Attrs().Get<int>("k"));
PADDLE_ENFORCE_GE(k, 1, "k must >= 1"); PADDLE_ENFORCE_GE(k, 1,
platform::errors::InvalidArgument(
"Attribute k must be >= 1, but got k is %d.", k));
PADDLE_ENFORCE_GE(input_dims.size(), 1, platform::errors::InvalidArgument( PADDLE_ENFORCE_GE(input_dims.size(), 1, platform::errors::InvalidArgument(
"input must have >= 1d shape")); "input must have >= 1d shape"));
......
...@@ -96,7 +96,8 @@ class TopkOpCUDAKernel : public framework::OpKernel<T> { ...@@ -96,7 +96,8 @@ class TopkOpCUDAKernel : public framework::OpKernel<T> {
output_data, k, indices_data, input_data, input_width, output_data, k, indices_data, input_data, input_width,
input_width, static_cast<int>(k), gridx, input_height)); input_width, static_cast<int>(k), gridx, input_height));
default: default:
PADDLE_THROW("Error"); PADDLE_THROW(platform::errors::Unavailable(
"Calculation error occurred in TopK Operator's CUDA Kernel."));
} }
} }
}; };
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册