未验证 提交 df67b317 编写于 作者: S swtkiwi 提交者: GitHub

[2.0 Cherry-pick]fix 2.0 error message (#30332)

* fix datanorm error msg (#30294)

* Optimize the error message of framework. (#30134)

* modify error message based on comments (#30189)

* modify error message based on comments

* edit code according to review.

* Correct spelling according to review.

* fix enforce msg of sum xpu op (#30113)

* enhance error info for py_func (#30138)

* enhance error info for py_func

* update

* fix elugradgrad test fail & error message opt (#30171)

* fix elugradgrad test fail and error message opt

* fix unitest,test=develop

* Update prroi_pool_op.h

fix error message

* opt message,test=develop

* fix ci fail,test=develop

* Refine PADDLE_ENFORCE Error Messages. test=develop (#30149)

Improve some error messages in parallel_executor.cc, conditional_block_op.cc, recurrent_op.cc

* enhance error message, test=develop (#30220)

* fix error message for distribute_fpn_proposals_op (#30116)

* enhance error msgs of fusion_seqpool_cvm_concat_op.cc, test=develop (#30240)

* just add the op error message for the matmul xpu (#30246)

 add the op error message for the matmul xpu

* enhance error message of nll_loss op test=develop (#30125)

* enhance error message of nll_loss op test=develop
Co-authored-by: Nyaoxuefeng <yaoxuefeng@baidu.com>
Co-authored-by: Nxiemoyuan <71377852+xiemoyuan@users.noreply.github.com>
Co-authored-by: NWeiXin <weixin10@baidu.com>
Co-authored-by: NJack Zhou <zhoushunjie@baidu.com>
Co-authored-by: NWilber <jiweibo@baidu.com>
Co-authored-by: NDouble_V <liuvv0203@163.com>
Co-authored-by: NHuihuang Zheng <zhhsplendid@gmail.com>
Co-authored-by: Nzhang wenhui <frankwhzhang@126.com>
Co-authored-by: Nwangguanzhong <jerrywgz@126.com>
Co-authored-by: N石晓伟 <39303645+Shixiaowei02@users.noreply.github.com>
Co-authored-by: Nwawltor <fangzeyang0904@hotmail.com>
Co-authored-by: Nlijianshe02 <48898730+lijianshe02@users.noreply.github.com>
上级 e7cbc43f
...@@ -167,7 +167,9 @@ class ParallelExecutorPrivate { ...@@ -167,7 +167,9 @@ class ParallelExecutorPrivate {
nccl_id = new ncclUniqueId(); nccl_id = new ncclUniqueId();
PADDLE_ENFORCE_EQ( PADDLE_ENFORCE_EQ(
platform::dynload::ncclGetUniqueId(nccl_id), ncclSuccess, platform::dynload::ncclGetUniqueId(nccl_id), ncclSuccess,
platform::errors::PreconditionNotMet("Get NCCL unique ID failed.")); platform::errors::PreconditionNotMet(
"PaddlePaddle failed to get NCCL unique ID. It may due to your "
"system settings or NCCL library error, please debug on NCCL"));
VLOG(10) << "can't find nccl_id_var:" << var_name VLOG(10) << "can't find nccl_id_var:" << var_name
<< ", nccl_id:" << nccl_id; << ", nccl_id:" << nccl_id;
} }
......
...@@ -197,9 +197,9 @@ class Pool2dOpConverter : public OpConverter { ...@@ -197,9 +197,9 @@ class Pool2dOpConverter : public OpConverter {
engine_, Padding, *const_cast<nvinfer1::ITensor *>(input1), pre_pad, engine_, Padding, *const_cast<nvinfer1::ITensor *>(input1), pre_pad,
post_pad); post_pad);
PADDLE_ENFORCE_NOT_NULL( PADDLE_ENFORCE_NOT_NULL(
pad_layer, pad_layer, platform::errors::Fatal(
platform::errors::Fatal( "Pad layer in poolOp converter could not be "
"pad layer in poolOp converter could not be created.")); "created. The pointer to pad layer is `NULL`."));
input1 = pad_layer->getOutput(0); input1 = pad_layer->getOutput(0);
} }
auto *pool_layer = TRT_ENGINE_ADD_LAYER( auto *pool_layer = TRT_ENGINE_ADD_LAYER(
......
...@@ -1565,7 +1565,7 @@ struct ELUGradGradFunctor : public BaseActivationFunctor<T> { ...@@ -1565,7 +1565,7 @@ struct ELUGradGradFunctor : public BaseActivationFunctor<T> {
auto dout = framework::EigenVector<T>::Flatten( auto dout = framework::EigenVector<T>::Flatten(
GET_DATA_SAFELY(dOut, "Output", "DOut", "ELUGradGrad")); GET_DATA_SAFELY(dOut, "Output", "DOut", "ELUGradGrad"));
dx.device(*d) = ddx * dout * static_cast<T>(alpha) * x.exp() * dx.device(*d) = ddx * dout * static_cast<T>(alpha) * x.exp() *
(x < static_cast<T>(0)).template cast<T>(); (x <= static_cast<T>(0)).template cast<T>();
} }
if (ddOut) { if (ddOut) {
......
...@@ -44,14 +44,18 @@ void AttentionLSTMOp::InferShape(framework::InferShapeContext* ctx) const { ...@@ -44,14 +44,18 @@ void AttentionLSTMOp::InferShape(framework::InferShapeContext* ctx) const {
auto x_dims = ctx->GetInputDim("X"); auto x_dims = ctx->GetInputDim("X");
const int M = x_dims[1]; const int M = x_dims[1];
PADDLE_ENFORCE_EQ(x_dims.size(), 2, platform::errors::InvalidArgument( PADDLE_ENFORCE_EQ(x_dims.size(), 2,
"Input(X)'s rank must be 2.")); platform::errors::InvalidArgument(
"Expected input(X)'s dimension is 2. But received %d.",
x_dims.size()));
auto w_dims = ctx->GetInputDim("LSTMWeight"); auto w_dims = ctx->GetInputDim("LSTMWeight");
const int D = w_dims[1] / 4; const int D = w_dims[1] / 4;
PADDLE_ENFORCE_EQ( PADDLE_ENFORCE_EQ(
w_dims.size(), 2, w_dims.size(), 2,
platform::errors::InvalidArgument("Input(LSTMWeight)'s rank must be 2.")); platform::errors::InvalidArgument(
"Expected input(LSTMWeight)'s dimension is 2.But received %d.",
w_dims.size()));
PADDLE_ENFORCE_EQ( PADDLE_ENFORCE_EQ(
w_dims[0], D + M, w_dims[0], D + M,
platform::errors::InvalidArgument( platform::errors::InvalidArgument(
...@@ -77,8 +81,11 @@ void AttentionLSTMOp::InferShape(framework::InferShapeContext* ctx) const { ...@@ -77,8 +81,11 @@ void AttentionLSTMOp::InferShape(framework::InferShapeContext* ctx) const {
if (ctx->HasInput("H0")) { if (ctx->HasInput("H0")) {
auto h_dims = ctx->GetInputDim("H0"); auto h_dims = ctx->GetInputDim("H0");
PADDLE_ENFORCE_EQ(h_dims.size(), 2UL, platform::errors::InvalidArgument( PADDLE_ENFORCE_EQ(
"Input(H0)'s rank must be 2.")); h_dims.size(), 2UL,
platform::errors::InvalidArgument(
"Expected input(H0)'s dimension is 2. But received %d.",
h_dims.size()));
if (ctx->IsRuntime() || if (ctx->IsRuntime() ||
(framework::product(c_dims) > 0 && framework::product(h_dims) > 0)) { (framework::product(c_dims) > 0 && framework::product(h_dims) > 0)) {
PADDLE_ENFORCE_EQ(h_dims, c_dims, PADDLE_ENFORCE_EQ(h_dims, c_dims,
...@@ -94,7 +101,9 @@ void AttentionLSTMOp::InferShape(framework::InferShapeContext* ctx) const { ...@@ -94,7 +101,9 @@ void AttentionLSTMOp::InferShape(framework::InferShapeContext* ctx) const {
"Input(AttentionWeight)'s rank must be 2.")); "Input(AttentionWeight)'s rank must be 2."));
PADDLE_ENFORCE_EQ(atten_w_dims[0], M + D, PADDLE_ENFORCE_EQ(atten_w_dims[0], M + D,
platform::errors::InvalidArgument( platform::errors::InvalidArgument(
"AttentionWeight shapes must be (%d + %d) * 1.", M, D)); "Expected `AttentionWeight` shape is [(%d + %d), 1]. "
"But received shape = [%d, 1], shape[0] is not %d.",
M, D, atten_w_dims[0], M + D));
PADDLE_ENFORCE_EQ(atten_w_dims[1], 1, PADDLE_ENFORCE_EQ(atten_w_dims[1], 1,
platform::errors::InvalidArgument( platform::errors::InvalidArgument(
"AttentionWeight shapes must be (%d + %d) * 1.", M, D)); "AttentionWeight shapes must be (%d + %d) * 1.", M, D));
......
...@@ -50,9 +50,11 @@ class BilinearTensorProductOp : public framework::OperatorWithKernel { ...@@ -50,9 +50,11 @@ class BilinearTensorProductOp : public framework::OperatorWithKernel {
PADDLE_ENFORCE_EQ( PADDLE_ENFORCE_EQ(
y_dims.size(), 2UL, y_dims.size(), 2UL,
platform::errors::InvalidArgument("The input(Y) must be a 2D Tensor.")); platform::errors::InvalidArgument("The input(Y) must be a 2D Tensor."));
PADDLE_ENFORCE_EQ(weight_dims.size(), 3UL, PADDLE_ENFORCE_EQ(
platform::errors::InvalidArgument( weight_dims.size(), 3UL,
"The input(Weight) must be a 3D tensor.")); platform::errors::InvalidArgument("Expected the input(Weight) is a 3D "
"tensor. But received %dD tensor.",
weight_dims.size()));
if (ctx->IsRuntime() || (x_dims[0] > 0 && y_dims[0] > 0)) { if (ctx->IsRuntime() || (x_dims[0] > 0 && y_dims[0] > 0)) {
PADDLE_ENFORCE_EQ( PADDLE_ENFORCE_EQ(
x_dims[0], y_dims[0], x_dims[0], y_dims[0],
......
...@@ -57,8 +57,10 @@ class ConditionalBlockOp : public ConditionalOp { ...@@ -57,8 +57,10 @@ class ConditionalBlockOp : public ConditionalOp {
if (need_run) { if (need_run) {
auto *scope_var = scope.FindVar(Output(ConditionalOp::kScope)); auto *scope_var = scope.FindVar(Output(ConditionalOp::kScope));
PADDLE_ENFORCE_NOT_NULL( PADDLE_ENFORCE_NOT_NULL(
scope_var, platform::errors::PreconditionNotMet( scope_var,
"Scope must be set in conditional_block_op.")); platform::errors::PreconditionNotMet(
"Expect Scope variable to be set in conditional_block_op, but "
"got a null Scope variable. Please set the Scope variable."));
auto *scopes = scope_var->GetMutable<std::vector<framework::Scope *>>(); auto *scopes = scope_var->GetMutable<std::vector<framework::Scope *>>();
scopes->resize(1); scopes->resize(1);
scopes->front() = &scope.NewScope(); scopes->front() = &scope.NewScope();
...@@ -119,12 +121,16 @@ class ConditionalBlockGradOp : public ConditionalOp { ...@@ -119,12 +121,16 @@ class ConditionalBlockGradOp : public ConditionalOp {
auto *scope_var = scope.FindVar(Input(ConditionalOp::kScope)); auto *scope_var = scope.FindVar(Input(ConditionalOp::kScope));
PADDLE_ENFORCE_NOT_NULL( PADDLE_ENFORCE_NOT_NULL(
scope_var, platform::errors::PreconditionNotMet( scope_var,
"Scope must be set in conditional block op.")); platform::errors::PreconditionNotMet(
"Expect Scope variable to be set in conditional_block_op, but "
"got a null Scope variable. Please set the Scope variable."));
auto &scopes = scope_var->Get<std::vector<framework::Scope *>>(); auto &scopes = scope_var->Get<std::vector<framework::Scope *>>();
PADDLE_ENFORCE_GT(scopes.size(), 0, PADDLE_ENFORCE_GT(
platform::errors::InvalidArgument( scopes.size(), 0,
"Scope must be set in conditional block op.")); platform::errors::InvalidArgument(
"Expect Scope variable contains at least 1 scope, but got: %d",
scopes.size()));
framework::Scope &cur_scope = *scopes[0]; framework::Scope &cur_scope = *scopes[0];
framework::Executor exec(dev_place); framework::Executor exec(dev_place);
......
...@@ -30,8 +30,10 @@ class CVMOp : public framework::OperatorWithKernel { ...@@ -30,8 +30,10 @@ class CVMOp : public framework::OperatorWithKernel {
OP_INOUT_CHECK(ctx->HasOutput("Y"), "Output", "Y", "CVM"); OP_INOUT_CHECK(ctx->HasOutput("Y"), "Output", "Y", "CVM");
auto x_dims = ctx->GetInputDim("X"); auto x_dims = ctx->GetInputDim("X");
PADDLE_ENFORCE_EQ(x_dims.size(), 2UL, platform::errors::InvalidArgument( PADDLE_ENFORCE_EQ(
"Input(X)'s rank should be 2.")); x_dims.size(), 2UL,
platform::errors::InvalidArgument(
"Input(X)'s rank should be 2, but got %d", x_dims.size()));
if (ctx->Attrs().Get<bool>("use_cvm")) { if (ctx->Attrs().Get<bool>("use_cvm")) {
ctx->SetOutputDim("Y", {x_dims[0], x_dims[1]}); ctx->SetOutputDim("Y", {x_dims[0], x_dims[1]});
...@@ -68,26 +70,31 @@ class CVMGradientOp : public framework::OperatorWithKernel { ...@@ -68,26 +70,31 @@ class CVMGradientOp : public framework::OperatorWithKernel {
auto x_dims = ctx->GetInputDim("X"); auto x_dims = ctx->GetInputDim("X");
auto cvm_dims = ctx->GetInputDim("CVM"); auto cvm_dims = ctx->GetInputDim("CVM");
auto dy_dims = ctx->GetInputDim(framework::GradVarName("Y")); auto dy_dims = ctx->GetInputDim(framework::GradVarName("Y"));
PADDLE_ENFORCE_EQ(x_dims.size(), 2, platform::errors::InvalidArgument( PADDLE_ENFORCE_EQ(
"Input(X)'s rank should be 2.")); x_dims.size(), 2,
platform::errors::InvalidArgument(
"Expect Input(X)'s rank == 2, but got %d", x_dims.size()));
PADDLE_ENFORCE_EQ( PADDLE_ENFORCE_EQ(
dy_dims.size(), 2, dy_dims.size(), 2,
platform::errors::InvalidArgument("Input(Y@Grad)'s rank should be 2.")); platform::errors::InvalidArgument(
"Expect Input(X)'s rank == 2, but got %d", dy_dims.size()));
PADDLE_ENFORCE_EQ( PADDLE_ENFORCE_EQ(
cvm_dims.size(), 2, cvm_dims.size(), 2,
platform::errors::InvalidArgument("Input(CVM)'s rank should be 2.")); platform::errors::InvalidArgument(
"Expect Input(X)'s rank == 2, but got %d", cvm_dims.size()));
PADDLE_ENFORCE_EQ( PADDLE_ENFORCE_EQ(
x_dims[0], dy_dims[0], x_dims[0], dy_dims[0],
platform::errors::InvalidArgument( platform::errors::InvalidArgument(
"The 1st dimension of Input(X) and Input(Y@Grad) should " "The 1st dimension of Input(X) and Input(Y@Grad) should "
"be equal.")); "be equal, X is %d, Y@Grad is %d",
x_dims[0], dy_dims[0]));
PADDLE_ENFORCE_EQ( PADDLE_ENFORCE_EQ(
cvm_dims[1], 2, cvm_dims[1], 2,
platform::errors::InvalidArgument( platform::errors::InvalidArgument(
"When Attr(soft_label) == false, the 2nd dimension of " "When Attr(soft_label) == false, the 2nd dimension of "
"Input(CVM) should be 2.")); "Input(CVM) should be 2, but got %d cvm_dims[1]"));
ctx->SetOutputDim(framework::GradVarName("X"), x_dims); ctx->SetOutputDim(framework::GradVarName("X"), x_dims);
ctx->ShareLoD("X", framework::GradVarName("X")); ctx->ShareLoD("X", framework::GradVarName("X"));
} }
......
...@@ -390,7 +390,7 @@ class DataNormKernel<platform::CPUDeviceContext, T> ...@@ -390,7 +390,7 @@ class DataNormKernel<platform::CPUDeviceContext, T>
} }
default: default:
PADDLE_THROW(platform::errors::InvalidArgument( PADDLE_THROW(platform::errors::InvalidArgument(
"Unknown storage order: %d", data_layout)); "Unknown storage order: %d, please use NCHW or NHWC", data_layout));
} }
} }
}; };
...@@ -701,7 +701,8 @@ class DataNormGradKernel<platform::CPUDeviceContext, T> ...@@ -701,7 +701,8 @@ class DataNormGradKernel<platform::CPUDeviceContext, T>
} }
default: default:
PADDLE_THROW(platform::errors::InvalidArgument( PADDLE_THROW(platform::errors::InvalidArgument(
"Unknown storage order: %s", data_layout_str)); "Unknown storage order: %s, please use NCHW or NHWC",
data_layout_str));
} }
} }
}; };
......
...@@ -84,7 +84,8 @@ class DistributeFpnProposalsOpKernel : public framework::OpKernel<T> { ...@@ -84,7 +84,8 @@ class DistributeFpnProposalsOpKernel : public framework::OpKernel<T> {
PADDLE_ENFORCE_EQ(fpn_rois->lod().size(), 1UL, PADDLE_ENFORCE_EQ(fpn_rois->lod().size(), 1UL,
platform::errors::InvalidArgument( platform::errors::InvalidArgument(
"DistributeFpnProposalsOp needs LoD " "DistributeFpnProposalsOp needs LoD "
"with one level.")); "with one level. But received level is %d",
fpn_rois->lod().size()));
} }
std::vector<size_t> fpn_rois_lod; std::vector<size_t> fpn_rois_lod;
......
...@@ -43,7 +43,9 @@ class TargetAssignOp : public framework::OperatorWithKernel { ...@@ -43,7 +43,9 @@ class TargetAssignOp : public framework::OperatorWithKernel {
PADDLE_ENFORCE_EQ( PADDLE_ENFORCE_EQ(
in_dims.size(), 3, in_dims.size(), 3,
platform::errors::InvalidArgument("The rank of Input(X) must be 3.")); platform::errors::InvalidArgument(
"Expected the rank of Input(X) is 3. But received %d.",
in_dims.size()));
PADDLE_ENFORCE_EQ(mi_dims.size(), 2, PADDLE_ENFORCE_EQ(mi_dims.size(), 2,
platform::errors::InvalidArgument( platform::errors::InvalidArgument(
"The rank of Input(MatchIndices) must be 2.")); "The rank of Input(MatchIndices) must be 2."));
......
...@@ -31,15 +31,15 @@ void FusionSeqPoolCVMConcatOp::InferShape( ...@@ -31,15 +31,15 @@ void FusionSeqPoolCVMConcatOp::InferShape(
paddle::platform::errors::InvalidArgument( paddle::platform::errors::InvalidArgument(
"Output(Out) of FusionSeqPoolCVMConcatOp should not be null.")); "Output(Out) of FusionSeqPoolCVMConcatOp should not be null."));
int axis = ctx->Attrs().Get<int>("axis"); int axis = ctx->Attrs().Get<int>("axis");
PADDLE_ENFORCE_EQ( PADDLE_ENFORCE_EQ(axis, 1, paddle::platform::errors::InvalidArgument(
axis, 1, "FusionSeqPoolCVMConcatOp only supports "
paddle::platform::errors::InvalidArgument( "concat axis=1 yet, but received %d.",
"FusionSeqPoolCVMConcatOp only supports concat axis=1 yet.")); axis));
bool use_cvm = ctx->Attrs().Get<bool>("use_cvm"); bool use_cvm = ctx->Attrs().Get<bool>("use_cvm");
PADDLE_ENFORCE_EQ( PADDLE_ENFORCE_EQ(use_cvm, true, paddle::platform::errors::InvalidArgument(
use_cvm, true, "FusionSeqPoolCVMConcatOp only supports "
paddle::platform::errors::InvalidArgument( "use_cvm is true yet, but received %d.",
"FusionSeqPoolCVMConcatOp only supports use_cvm is true yet.")); use_cvm));
auto ins_dims = ctx->GetInputsDim("X"); auto ins_dims = ctx->GetInputsDim("X");
const size_t n = ins_dims.size(); const size_t n = ins_dims.size();
......
...@@ -127,10 +127,18 @@ class MatMulXPUKernel : public framework::OpKernel<T> { ...@@ -127,10 +127,18 @@ class MatMulXPUKernel : public framework::OpKernel<T> {
PADDLE_ENFORCE_EQ( PADDLE_ENFORCE_EQ(
mat_dim_a.width_, mat_dim_b.height_, mat_dim_a.width_, mat_dim_b.height_,
platform::errors::InvalidArgument("Shape mistake in matmul_op")); platform::errors::InvalidArgument("Shape mistake in matmul_op, the "
PADDLE_ENFORCE_EQ( "first tensor width must be same as "
mat_dim_a.batch_size_, mat_dim_b.batch_size_, "second tensor height, but received "
platform::errors::InvalidArgument("Shape mistake in matmul_op")); "width:%d, height:%d",
mat_dim_a.width_, mat_dim_b.height_));
PADDLE_ENFORCE_EQ(mat_dim_a.batch_size_, mat_dim_b.batch_size_,
platform::errors::InvalidArgument(
"Shape mistake in matmul_op, the two input"
"tensor batch_size must be same, but received first "
"tensor batch_size:%d, second "
"tensor batch_size:%d",
mat_dim_a.batch_size_, mat_dim_b.batch_size_));
T alpha = static_cast<T>(context.Attr<float>("alpha")); T alpha = static_cast<T>(context.Attr<float>("alpha"));
auto &dev_ctx = context.template device_context<DeviceContext>(); auto &dev_ctx = context.template device_context<DeviceContext>();
...@@ -251,12 +259,20 @@ class MatMulGradXPUKernel : public framework::OpKernel<T> { ...@@ -251,12 +259,20 @@ class MatMulGradXPUKernel : public framework::OpKernel<T> {
} }
} }
PADDLE_ENFORCE_EQ( PADDLE_ENFORCE_EQ(mat_dim_a.width_, mat_dim_b.height_,
mat_dim_a.width_, mat_dim_b.height_, platform::errors::InvalidArgument(
platform::errors::InvalidArgument("Shape mistake in matmul_grad_op")); "Shape mistake in matmul_grad_op, the "
PADDLE_ENFORCE_EQ( "first tensor width must be same as second tensor "
mat_dim_a.batch_size_, mat_dim_b.batch_size_, "height, but received "
platform::errors::InvalidArgument("Shape mistake in matmul_grad_op")); "width:%d, height:%d",
mat_dim_a.width_, mat_dim_b.height_));
PADDLE_ENFORCE_EQ(mat_dim_a.batch_size_, mat_dim_b.batch_size_,
platform::errors::InvalidArgument(
"Shape mistake in matmul_grad_op, the two input"
"tensor batch_size must be same, but received first "
"tensor batch_size:%d, second "
"tensor batch_size:%d",
mat_dim_a.batch_size_, mat_dim_b.batch_size_));
T alpha = static_cast<T>(context.Attr<float>("alpha")); T alpha = static_cast<T>(context.Attr<float>("alpha"));
......
...@@ -54,9 +54,14 @@ class BatchNormMKLDNNHandler ...@@ -54,9 +54,14 @@ class BatchNormMKLDNNHandler
const float epsilon = ctx.Attr<float>("epsilon"); const float epsilon = ctx.Attr<float>("epsilon");
const bool fuse_with_relu = ctx.Attr<bool>("fuse_with_relu"); const bool fuse_with_relu = ctx.Attr<bool>("fuse_with_relu");
std::vector<std::string> DataLayout_error_msg = {"kNHWC", "kNCHW",
"kAnyLayout", "kMKLDNN"};
PADDLE_ENFORCE_EQ( PADDLE_ENFORCE_EQ(
x->layout(), DataLayout::kMKLDNN, x->layout(), DataLayout::kMKLDNN,
platform::errors::InvalidArgument("Wrong layout set for X tensor")); platform::errors::InvalidArgument(
"Wrong layout set for X tensor. Expected layout is `kMKLDNN`, "
"But received %s.",
DataLayout_error_msg[static_cast<int>(DataLayout::kMKLDNN)]));
PADDLE_ENFORCE_NE( PADDLE_ENFORCE_NE(
x->format(), MKLDNNMemoryFormat::undef, x->format(), MKLDNNMemoryFormat::undef,
platform::errors::InvalidArgument("Wrong format set for X tensor")); platform::errors::InvalidArgument("Wrong format set for X tensor"));
......
...@@ -374,9 +374,12 @@ class DNNLMatMulKernel : public framework::OpKernel<T> { ...@@ -374,9 +374,12 @@ class DNNLMatMulKernel : public framework::OpKernel<T> {
public: public:
void Compute(const framework::ExecutionContext& ctx) const override { void Compute(const framework::ExecutionContext& ctx) const override {
if (ctx.HasAttr("head_number")) { if (ctx.HasAttr("head_number")) {
PADDLE_ENFORCE_EQ(ctx.Attr<int>("head_number"), 1, PADDLE_ENFORCE_EQ(
platform::errors::Unimplemented( ctx.Attr<int>("head_number"), 1,
"DNNL matmul doesn't support multiple heads.")); platform::errors::Unimplemented(
"DNNL matmul doesn't support multiple heads. Expected "
"head_number=1. But received `head_number` is %d",
ctx.Attr<int>("head_number")));
} }
platform::MKLDNNDeviceContext::tls().log_lib_version(); platform::MKLDNNDeviceContext::tls().log_lib_version();
ExecuteMatMul<T, T>(ctx); ExecuteMatMul<T, T>(ctx);
......
...@@ -53,10 +53,14 @@ class NLLLossOp : public framework::OperatorWithKernel { ...@@ -53,10 +53,14 @@ class NLLLossOp : public framework::OperatorWithKernel {
PADDLE_ENFORCE_EQ(w_dims.size(), 1, PADDLE_ENFORCE_EQ(w_dims.size(), 1,
platform::errors::InvalidArgument( platform::errors::InvalidArgument(
"Input(Weight) should be a 1D tensor.")); "Input(Weight) should be a 1D tensor."));
PADDLE_ENFORCE_EQ(x_dims[1], w_dims[0], PADDLE_ENFORCE_EQ(
platform::errors::InvalidArgument( x_dims[1], w_dims[0],
"Input(Weight) Tensor's size should match " platform::errors::InvalidArgument(
"to the the total number of classes.")); "Expected input tensor Weight's size should equal "
"to the first dimension of the input tensor X. But received "
"Weight's "
"size is %d, the first dimension of input X is %d",
w_dims[0], x_dims[1]));
} }
} }
if (x_dims.size() == 2) { if (x_dims.size() == 2) {
...@@ -68,7 +72,8 @@ class NLLLossOp : public framework::OperatorWithKernel { ...@@ -68,7 +72,8 @@ class NLLLossOp : public framework::OperatorWithKernel {
} else if (x_dims.size() == 4) { } else if (x_dims.size() == 4) {
PADDLE_ENFORCE_EQ(label_dims.size(), 3, PADDLE_ENFORCE_EQ(label_dims.size(), 3,
platform::errors::InvalidArgument( platform::errors::InvalidArgument(
"The tensor rank of Input(Label) must be 3.")); "Expected Input(Lable) dimensions=3, received %d.",
label_dims.size()));
auto input0 = x_dims[0]; auto input0 = x_dims[0];
auto input2 = x_dims[2]; auto input2 = x_dims[2];
auto input3 = x_dims[3]; auto input3 = x_dims[3];
......
...@@ -42,7 +42,9 @@ class FTRLOp : public framework::OperatorWithKernel { ...@@ -42,7 +42,9 @@ class FTRLOp : public framework::OperatorWithKernel {
auto param_dim = ctx->GetInputDim("Param"); auto param_dim = ctx->GetInputDim("Param");
PADDLE_ENFORCE_EQ(param_dim, ctx->GetInputDim("Grad"), PADDLE_ENFORCE_EQ(param_dim, ctx->GetInputDim("Grad"),
platform::errors::InvalidArgument( platform::errors::InvalidArgument(
"Two input of FTRL Op's dimension must be same.")); "Two input of FTRL Op's dimension must be same, but "
"param_dim is %d, Grad is %d",
param_dim, ctx->GetInputDim("Grad")));
auto lr_dim = ctx->GetInputDim("LearningRate"); auto lr_dim = ctx->GetInputDim("LearningRate");
PADDLE_ENFORCE_NE(framework::product(lr_dim), 0, PADDLE_ENFORCE_NE(framework::product(lr_dim), 0,
...@@ -51,9 +53,10 @@ class FTRLOp : public framework::OperatorWithKernel { ...@@ -51,9 +53,10 @@ class FTRLOp : public framework::OperatorWithKernel {
"been initialized. You may need to confirm " "been initialized. You may need to confirm "
"if you put exe.run(startup_program) " "if you put exe.run(startup_program) "
"after optimizer.minimize function.")); "after optimizer.minimize function."));
PADDLE_ENFORCE_EQ( PADDLE_ENFORCE_EQ(framework::product(lr_dim), 1,
framework::product(lr_dim), 1, platform::errors::InvalidArgument(
platform::errors::InvalidArgument("Learning Rate should be a scalar.")); "Learning Rate should be a scalar, but got %d",
framework::product(lr_dim)));
ctx->SetOutputDim("ParamOut", param_dim); ctx->SetOutputDim("ParamOut", param_dim);
ctx->SetOutputDim("SquaredAccumOut", param_dim); ctx->SetOutputDim("SquaredAccumOut", param_dim);
......
...@@ -293,19 +293,24 @@ class CPUPRROIPoolOpKernel : public framework::OpKernel<T> { ...@@ -293,19 +293,24 @@ class CPUPRROIPoolOpKernel : public framework::OpKernel<T> {
} else { } else {
PADDLE_ENFORCE_EQ(rois->lod().empty(), false, PADDLE_ENFORCE_EQ(rois->lod().empty(), false,
platform::errors::InvalidArgument( platform::errors::InvalidArgument(
"the lod of Input ROIs should not be empty when " "The lod of Input ROIs should not be empty when "
"BatchRoINums is None!")); "BatchRoINums is None!"));
auto rois_lod = rois->lod().back(); auto rois_lod = rois->lod().back();
int rois_batch_size = rois_lod.size() - 1; int rois_batch_size = rois_lod.size() - 1;
PADDLE_ENFORCE_EQ( PADDLE_ENFORCE_EQ(rois_batch_size, batch_size,
rois_batch_size, batch_size, platform::errors::InvalidArgument(
platform::errors::InvalidArgument("the rois_batch_size and input(X) " "The rois_batch_size and input(X)'s "
"batch_size should be the same.")); "batch_size should be the same but received"
"rois_batch_size: %d and batch_size: %d",
rois_batch_size, batch_size));
int rois_num_with_lod = rois_lod[rois_batch_size]; int rois_num_with_lod = rois_lod[rois_batch_size];
PADDLE_ENFORCE_EQ( PADDLE_ENFORCE_EQ(
rois_num_with_lod, rois_num, rois_num_with_lod, rois_num,
platform::errors::InvalidArgument( platform::errors::InvalidArgument("The rois_num from input should be "
"the rois_num from input and lod must be the same")); "equal to the rois_num from lod, "
"but received rois_num from input: "
"%d and the rois_num from lod: %d.",
rois_num_with_lod, rois_num));
// calculate batch id index for each roi according to LoD // calculate batch id index for each roi according to LoD
for (int n = 0; n < rois_batch_size; ++n) { for (int n = 0; n < rois_batch_size; ++n) {
......
...@@ -112,7 +112,9 @@ static void CallPythonFunc(py::object *callable, ...@@ -112,7 +112,9 @@ static void CallPythonFunc(py::object *callable,
out->ShareDataWith(*py_out_tensor); out->ShareDataWith(*py_out_tensor);
} catch (py::cast_error &) { } catch (py::cast_error &) {
PADDLE_THROW(platform::errors::InvalidArgument( PADDLE_THROW(platform::errors::InvalidArgument(
"The %d-th output must be LoDTensor.", i)); "py::cast to LoDTensor error. The %d-th output expection is "
"LoDTensor",
i));
} }
} }
} }
......
...@@ -54,7 +54,9 @@ class BlockingQueue { ...@@ -54,7 +54,9 @@ class BlockingQueue {
PADDLE_ENFORCE_LT( PADDLE_ENFORCE_LT(
queue_.size(), capacity_, queue_.size(), capacity_,
platform::errors::PermissionDenied( platform::errors::PermissionDenied(
"The queue size cannot exceed the set queue capacity.")); "The queue size cannot exceed the set queue capacity. Expected "
"queue size is less than %d. But received %d",
capacity_, queue_.size()));
queue_.push_back(elem); queue_.push_back(elem);
receive_cv_.notify_one(); receive_cv_.notify_one();
return true; return true;
...@@ -73,7 +75,9 @@ class BlockingQueue { ...@@ -73,7 +75,9 @@ class BlockingQueue {
PADDLE_ENFORCE_LT( PADDLE_ENFORCE_LT(
queue_.size(), capacity_, queue_.size(), capacity_,
platform::errors::PermissionDenied( platform::errors::PermissionDenied(
"The queue size cannot exceed the set queue capacity.")); "The queue size cannot exceed the set queue capacity. Expected "
"queue size is less than %d. But received %d",
capacity_, queue_.size()));
queue_.emplace_back(std::move(elem)); queue_.emplace_back(std::move(elem));
receive_cv_.notify_one(); receive_cv_.notify_one();
return true; return true;
......
...@@ -122,10 +122,13 @@ class ReadOp : public framework::OperatorBase { ...@@ -122,10 +122,13 @@ class ReadOp : public framework::OperatorBase {
const std::vector<framework::proto::VarType::Type>& var_types = const std::vector<framework::proto::VarType::Type>& var_types =
reader->VarTypes(); reader->VarTypes();
const std::vector<bool>& need_check_feed = reader->NeedCheckFeed(); const std::vector<bool>& need_check_feed = reader->NeedCheckFeed();
PADDLE_ENFORCE_EQ(out_arg_names.size(), need_check_feed.size(), PADDLE_ENFORCE_EQ(
platform::errors::InvalidArgument( out_arg_names.size(), need_check_feed.size(),
"output size of read_op and the number of fed " platform::errors::InvalidArgument(
"variables of reader do not match")); "Output size of read_op and the number of fed "
"variables of reader do not match. Received size of output is %d, "
"number of fed variables of reader is %d",
out_arg_names.size(), need_check_feed.size()));
for (size_t i = 0; i < out_arg_names.size(); ++i) { for (size_t i = 0; i < out_arg_names.size(); ++i) {
auto* out = auto* out =
......
...@@ -161,7 +161,9 @@ int64_t RecurrentBase::GetSequenceLength(const framework::Scope &scope) const { ...@@ -161,7 +161,9 @@ int64_t RecurrentBase::GetSequenceLength(const framework::Scope &scope) const {
} }
PADDLE_ENFORCE_GE(seq_len, 0, PADDLE_ENFORCE_GE(seq_len, 0,
platform::errors::InvalidArgument( platform::errors::InvalidArgument(
"RecurrentOp gets invalid sequence length.")); "RecurrentOp gets invalid sequence length. Expected "
"seq_len >= 0. Received seq_len = %d",
seq_len));
return seq_len; return seq_len;
} }
......
...@@ -50,8 +50,25 @@ class SumXPUKernel : public framework::OpKernel<T> { ...@@ -50,8 +50,25 @@ class SumXPUKernel : public framework::OpKernel<T> {
} }
int r = xpu::sum_batch(dev_ctx.x_context(), ptrs.data(), out->data<T>(), int r = xpu::sum_batch(dev_ctx.x_context(), ptrs.data(), out->data<T>(),
valid_count, out->numel()); valid_count, out->numel());
PADDLE_ENFORCE_EQ(r, xpu::Error_t::SUCCESS, if (r == xpu::Error_t::INVALID_PARAM) {
platform::errors::Fatal("XPU sum kernel error!")); PADDLE_ENFORCE_EQ(
r, xpu::Error_t::SUCCESS,
platform::errors::InvalidArgument(
"XPU kernel error of SumOp, error message: INVALID_PARAM, "
"please check your input & output."));
} else if (r == xpu::Error_t::RUNTIME_ERROR) {
PADDLE_ENFORCE_EQ(r, xpu::Error_t::SUCCESS,
platform::errors::Unavailable(
"XPU kernel error of SumOp, error message: "
"RUNTIME_ERROR, please check whether Baidu "
"Kunlun Card is properly installed."));
} else if (r == xpu::Error_t::NO_ENOUGH_WORKSPACE) {
PADDLE_ENFORCE_EQ(r, xpu::Error_t::SUCCESS,
platform::errors::ResourceExhausted(
"XPU kernel error of SumOp, error "
"message: NO_ENOUGH_WORKSPACE, XPU "
"has no enough memory."));
}
} }
}; };
......
...@@ -41,7 +41,8 @@ class TraceOp : public framework::OperatorWithKernel { ...@@ -41,7 +41,8 @@ class TraceOp : public framework::OperatorWithKernel {
PADDLE_ENFORCE_GE( PADDLE_ENFORCE_GE(
x_dims.size(), 2, x_dims.size(), 2,
platform::errors::OutOfRange( platform::errors::OutOfRange(
"trace requires an tensor of at least two dimensions")); "Input's dim is out of range (expected at least 2, but got %ld).",
x_dims.size()));
PADDLE_ENFORCE_LT( PADDLE_ENFORCE_LT(
dim1_, x_dims.size(), dim1_, x_dims.size(),
platform::errors::OutOfRange( platform::errors::OutOfRange(
......
...@@ -78,9 +78,9 @@ class TestLeakyReluDoubleGradCheck(unittest.TestCase): ...@@ -78,9 +78,9 @@ class TestLeakyReluDoubleGradCheck(unittest.TestCase):
class TestELUDoubleGradCheck(unittest.TestCase): class TestELUDoubleGradCheck(unittest.TestCase):
@prog_scope() @prog_scope()
def func(self, place): def func(self, place):
shape = [2, 3, 6, 6] shape = [2, 4, 4, 4]
eps = 1e-6 eps = 1e-6
alpha = 1.1 alpha = 0.2
dtype = np.float64 dtype = np.float64
SEED = 0 SEED = 0
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册