From 65207b4560f3f7a79ca2d742d8c362823cbac69b Mon Sep 17 00:00:00 2001 From: Yiqun Liu Date: Sun, 4 Oct 2020 16:49:55 +0800 Subject: [PATCH] Polish the error message of fc, fused_fc_elementwise_layernorm and fused_embedding_seq_pool. (#27692) * Polish the error message of fc_op. * Polish the error message of fused_fc_elementwise_layer_norm op. * Polish an error message in fused_embedding_seq_pool_op. --- paddle/fluid/operators/fc_op.cc | 88 +++++++----- paddle/fluid/operators/fc_op.h | 14 +- .../fused/fused_embedding_seq_pool_op.h | 4 +- .../fused_fc_elementwise_layernorm_op.cc | 132 +++++++++++++----- 4 files changed, 161 insertions(+), 77 deletions(-) diff --git a/paddle/fluid/operators/fc_op.cc b/paddle/fluid/operators/fc_op.cc index 847b24f4f0b..d791b2bcfd0 100644 --- a/paddle/fluid/operators/fc_op.cc +++ b/paddle/fluid/operators/fc_op.cc @@ -23,64 +23,80 @@ class FCOp : public framework::OperatorWithKernel { using framework::OperatorWithKernel::OperatorWithKernel; void InferShape(framework::InferShapeContext* ctx) const override { - PADDLE_ENFORCE_EQ(ctx->HasInput("Input"), true, - "X(Input) of Fully Connected should not be null."); - PADDLE_ENFORCE_EQ(ctx->HasOutput("Out"), true, - "Out(Output) of Fully Connected should not be null."); - PADDLE_ENFORCE_EQ(ctx->HasInput("W"), true, - "W(Input) of Fully Connected should not be null."); + OP_INOUT_CHECK(ctx->HasInput("Input"), "Input", "Input", "FC"); + OP_INOUT_CHECK(ctx->HasInput("W"), "Input", "W", "FC"); + OP_INOUT_CHECK(ctx->HasOutput("Out"), "Output", "Out", "FC"); - auto in_dims = ctx->GetInputDim("Input"); auto w_dims = ctx->GetInputDim("W"); bool padding_weights = ctx->Attrs().Get("padding_weights"); + PADDLE_ENFORCE_EQ( + w_dims.size(), 2, + platform::errors::InvalidArgument( + "The input Weight of fc is expected to be a 2-D tensor. " + "But received the number of Weight's dimensions is %d, " + "Weight's shape is %s.", + w_dims.size(), w_dims)); if (ctx->HasInput("Bias")) { auto bias_dims = ctx->GetInputDim("Bias"); auto w_dims1 = padding_weights ? w_dims[1] - 4 : w_dims[1]; + + PADDLE_ENFORCE_LE( + bias_dims.size(), 2, + platform::errors::InvalidArgument( + "The input Bias of fc is expected to be a 1-D or 2-D tensor. But " + "received the number of Bias's dimensions is %d, " + "Bias's shape is %s.", + bias_dims.size(), bias_dims)); + + PADDLE_ENFORCE_EQ( + bias_dims[bias_dims.size() - 1], w_dims1, + platform::errors::InvalidArgument( + "The last dimension of input Bias is expected be equal " + "to the actual width of input Weight. But received the last " + "dimension of Bias is %d, Bias's shape is %s; " + "the actual width of Weight is %d, Weight's shape is %s.", + bias_dims[bias_dims.size() - 1], bias_dims, w_dims1, w_dims)); + if (bias_dims.size() == 2) { - PADDLE_ENFORCE_EQ(bias_dims[0], 1, - platform::errors::InvalidArgument( - "The shape of Bias is invalid." - "The height of Bias should be 1." - "But received height of Bias is %d.", - bias_dims[0])); - PADDLE_ENFORCE_EQ( - bias_dims[1], w_dims1, - platform::errors::InvalidArgument( - "The shape of Bias is invalid." - "The width of Bias should be equal to width of Weight." - "But received width of Bias is %d and width of Weight is %d.", - bias_dims[1], w_dims1)); - } else if (bias_dims.size() == 1) { PADDLE_ENFORCE_EQ( - bias_dims[0], w_dims1, + bias_dims[0], 1, platform::errors::InvalidArgument( - "The shape of Bias is invalid." - "The height of Bias should be equal to the width of weight." - "But received height of Bias is %d and width of Weight is %d.", - bias_dims[0], w_dims1)); + "The first dimension of input Bias is expected to be 1, " + "but received %d, Bias's shape is %s.", + bias_dims[0], bias_dims)); } } + auto in_dims = ctx->GetInputDim("Input"); + int in_num_col_dims = ctx->Attrs().Get("in_num_col_dims"); + PADDLE_ENFORCE_LT( + in_num_col_dims, in_dims.size(), + platform::errors::InvalidArgument( + "The attribute in_num_col_dims used to flatten Input to " + "a 2-D tensor, is expected to be less than the number of " + "Input's dimensions. But recieved in_num_col_dims is %d, " + "the number of Input's dimensions is %d, Input's shape is %s.", + in_num_col_dims, in_dims.size(), in_dims)); + auto& activation_type = ctx->Attrs().Get("activation_type"); if (!activation_type.empty()) { PADDLE_ENFORCE_EQ(activation_type, "relu", - "Activation %s is not supportetd in fc now.", - activation_type.c_str()); + platform::errors::InvalidArgument( + "The attribute activation_type of fc is expected " + "to be \"relu\", but received %s.", + activation_type.c_str())); } + if (ctx->Attrs().Get("use_mkldnn")) { PADDLE_ENFORCE_EQ( in_dims.size() >= 2 && in_dims.size() <= 4, true, platform::errors::Unimplemented( - "Fully Connected input should be 2D, 3D or 4D tensor.")); + "The Input of fc is expected to be a 2-D, 3-D or 4-D tensor when " + "use_mkldnn is set. But recieved the number of Input's " + "dimensions is %d, Input's shape is %s.", + in_dims.size(), in_dims)); } - PADDLE_ENFORCE_EQ(w_dims.size(), 2, - "Fully Connected weights should be 2-D tensor."); - int in_num_col_dims = ctx->Attrs().Get("in_num_col_dims"); - PADDLE_ENFORCE_GT( - in_dims.size(), in_num_col_dims, - "The input tensor Input's rank of FCOp should be larger than " - "in_num_col_dims."); std::vector output_dims; FCOutputSize(in_dims, w_dims, output_dims, in_num_col_dims, diff --git a/paddle/fluid/operators/fc_op.h b/paddle/fluid/operators/fc_op.h index 907f61196d6..6258dd0a386 100644 --- a/paddle/fluid/operators/fc_op.h +++ b/paddle/fluid/operators/fc_op.h @@ -32,11 +32,15 @@ inline void FCOutputSize(const framework::DDim& in_dims, auto in_mat_dims = framework::flatten_to_2d(in_dims, in_num_col_dims); auto w_dims0 = padding_weights ? w_dims[0] - 4 : w_dims[0]; auto w_dims1 = padding_weights ? w_dims[1] - 4 : w_dims[1]; - PADDLE_ENFORCE_EQ(in_mat_dims[1], w_dims0, - platform::errors::InvalidArgument( - "Fully Connected input and weigth size do not match. " - "input width: %d,weight height: %d", - in_mat_dims[1], w_dims0)); + PADDLE_ENFORCE_EQ( + in_mat_dims[1], w_dims0, + platform::errors::InvalidArgument( + "The input's second dimension and weight's first dimension is " + "expected to be the same. But recieved input's second dimension is " + "%d, input's shape is %s; weight's first dimension is %d, weight's " + "shape is %s.", + in_mat_dims[1], in_mat_dims, w_dims0, + framework::make_ddim({w_dims0, w_dims1}))); out_dims.reserve(static_cast(in_num_col_dims + 1)); for (int i = 0; i < in_num_col_dims; ++i) { diff --git a/paddle/fluid/operators/fused/fused_embedding_seq_pool_op.h b/paddle/fluid/operators/fused/fused_embedding_seq_pool_op.h index aeaec84ba5c..8713d580342 100644 --- a/paddle/fluid/operators/fused/fused_embedding_seq_pool_op.h +++ b/paddle/fluid/operators/fused/fused_embedding_seq_pool_op.h @@ -204,9 +204,9 @@ class FusedEmbeddingSeqPoolGradKernel : public framework::OpKernel { auto *table_t = context.Input("W"); table_dim = table_t->value().dims(); } else { - PADDLE_THROW( + PADDLE_THROW(platform::errors::PermissionDenied( "The parameter W of a LookupTable " - "must be either LoDTensor or SelectedRows"); + "must be either LoDTensor or SelectedRows.")); } bool is_sparse = context.Attr("is_sparse"); diff --git a/paddle/fluid/operators/fused/fused_fc_elementwise_layernorm_op.cc b/paddle/fluid/operators/fused/fused_fc_elementwise_layernorm_op.cc index ea7d6a93d1b..08909bcb6fc 100644 --- a/paddle/fluid/operators/fused/fused_fc_elementwise_layernorm_op.cc +++ b/paddle/fluid/operators/fused/fused_fc_elementwise_layernorm_op.cc @@ -22,47 +22,73 @@ class FusedFCElementwiseLayerNormOp : public framework::OperatorWithKernel { using framework::OperatorWithKernel::OperatorWithKernel; void InferShape(framework::InferShapeContext *ctx) const override { - PADDLE_ENFORCE_EQ( - ctx->HasInput("X"), true, - "Input(X) of fused_fc_elementwise_layernorm should not be null."); - PADDLE_ENFORCE_EQ( - ctx->HasInput("W"), true, - "Input(W) of fused_fc_elementwise_layernorm should not be null."); - PADDLE_ENFORCE_EQ( - ctx->HasInput("Y"), true, - "Input(Y) of fused_fc_elementwise_layernorm should not be null."); - PADDLE_ENFORCE_EQ( - ctx->HasOutput("Out"), true, - "Output(Out) of fused_fc_elementwise_layernorm should not be null."); + OP_INOUT_CHECK(ctx->HasInput("X"), "Input", "X", + "FusedFcElementwiseLayernorm"); + OP_INOUT_CHECK(ctx->HasInput("W"), "Input", "W", + "FusedFcElementwiseLayernorm"); + OP_INOUT_CHECK(ctx->HasInput("Y"), "Input", "Y", + "FusedFcElementwiseLayernorm"); + OP_INOUT_CHECK(ctx->HasOutput("Out"), "Output", "Out", + "FusedFcElementwiseLayernorm"); auto w_dims = ctx->GetInputDim("W"); - PADDLE_ENFORCE_EQ(w_dims.size(), 2, - "Fully Connected input should be 2-D tensor."); + PADDLE_ENFORCE_EQ( + w_dims.size(), 2, + platform::errors::InvalidArgument( + "The input Weight of fc is expected to be a 2-D tensor. " + "But received the number of Weight's dimensions is %d, ", + "Weight's shape is %s.", w_dims.size(), w_dims)); if (ctx->HasInput("Bias0")) { auto bias0_dims = ctx->GetInputDim("Bias0"); + + PADDLE_ENFORCE_LE(bias0_dims.size(), 2, + platform::errors::InvalidArgument( + "The input Bias of fc is expected to be an 1-D or " + "2-D tensor. But received the number of Bias's " + "dimensions is %d, Bias's shape is %s.", + bias0_dims.size(), bias0_dims)); + + PADDLE_ENFORCE_EQ( + bias0_dims[bias0_dims.size() - 1], w_dims[1], + platform::errors::InvalidArgument( + "The last dimension of input Bias is expected be equal " + "to the actual width of input Weight. But received the last " + "dimension of Bias is %d, Bias's shape is %s; " + "the actual width of Weight is %d, Weight's shape is %s.", + bias0_dims[bias0_dims.size() - 1], bias0_dims, w_dims[1], + w_dims)); + if (bias0_dims.size() == 2) { - PADDLE_ENFORCE_EQ(bias0_dims[0], 1, - "The shape of Bias must be [1, dim]."); - PADDLE_ENFORCE_EQ(bias0_dims[1], w_dims[1], - "The shape of Bias must be [1, dim]."); - } else if (bias0_dims.size() == 1) { - PADDLE_ENFORCE_EQ(bias0_dims[0], w_dims[1], - "The shape of Bias must be [1, dim]."); + PADDLE_ENFORCE_EQ( + bias0_dims[0], 1, + platform::errors::InvalidArgument( + "The first dimension of input Bias is expected to be 1, " + "but received %d, Bias's shape is %s.", + bias0_dims[0], bias0_dims)); } } auto x_dims = ctx->GetInputDim("X"); int x_num_col_dims = ctx->Attrs().Get("x_num_col_dims"); - PADDLE_ENFORCE_GT( - x_dims.size(), x_num_col_dims, - "The input tensor Input's rank of FCOp should be larger than " - "in_num_col_dims."); + PADDLE_ENFORCE_LT( + x_num_col_dims, x_dims.size(), + platform::errors::InvalidArgument( + "The attribute x_num_col_dims used to flatten input X to " + "a 2-D tensor, is expected to be less than the number of " + "input X's dimensions. But recieved x_num_col_dims is %d, " + "the number of input X's dimensions is %d, input X's shape is %s.", + x_num_col_dims, x_dims.size(), x_dims)); auto x_mat_dims = framework::flatten_to_2d(x_dims, x_num_col_dims); PADDLE_ENFORCE_EQ( x_mat_dims[1], w_dims[0], - "Fully Connected input and weigth size do not match. %s, %s"); + platform::errors::InvalidArgument( + "The input's second dimension and weight's first dimension is " + "expected to be the same. But recieved input's second dimension is " + "%d, input's shape is %s; weight's first dimension is %d, weight's " + "shape is %s.", + x_mat_dims[1], x_mat_dims, w_dims[0], w_dims)); std::vector fc_out_dims; for (int i = 0; i < x_num_col_dims; ++i) { @@ -71,29 +97,67 @@ class FusedFCElementwiseLayerNormOp : public framework::OperatorWithKernel { fc_out_dims.push_back(w_dims[1]); auto y_dims = ctx->GetInputDim("Y"); - PADDLE_ENFORCE_EQ(framework::make_ddim(fc_out_dims), y_dims); + PADDLE_ENFORCE_EQ(framework::make_ddim(fc_out_dims), y_dims, + platform::errors::InvalidArgument( + "The output's shape of fc is expected to be equal to " + "that of input Y. But recieved output's shape of fc " + "is %s, input Y's shape is %s.", + framework::make_ddim(fc_out_dims), y_dims)); auto begin_norm_axis = ctx->Attrs().Get("begin_norm_axis"); PADDLE_ENFORCE_LT( begin_norm_axis, y_dims.size(), - "'begin_norm_axis' must be less than the rank of Input(Y)."); + platform::errors::InvalidArgument( + "The attribute begin_norm_axis used to flatten input Y to a 2-D " + "tensor, is expected to be less than the number of input Y's " + "dimensions. But recieved begin_norm_axis is %d, the number of " + "input Y's dimensions is %d, input Y's shape is %s.", + begin_norm_axis, y_dims.size(), y_dims)); auto y_mat_dim = framework::flatten_to_2d(y_dims, begin_norm_axis); int64_t dim_0 = y_mat_dim[0]; int64_t dim_1 = y_mat_dim[1]; if (ctx->HasInput("Scale")) { - PADDLE_ENFORCE_EQ(ctx->GetInputDim("Scale").size(), 1); + auto scale_dims = ctx->GetInputDim("Scale"); + PADDLE_ENFORCE_EQ(scale_dims.size(), 1, + platform::errors::InvalidArgument( + "The input Scale is expected to be an 1-D tensor. " + "But recieved the number of input Scale's " + "dimensions is %d, input Scale's shape is %s.", + scale_dims.size(), scale_dims)); if (ctx->IsRuntime()) { - PADDLE_ENFORCE_EQ(ctx->GetInputDim("Scale")[0], dim_1, - "scale should with right"); + PADDLE_ENFORCE_EQ( + scale_dims[0], dim_1, + platform::errors::InvalidArgument( + "The first dimension of input Scale is expected to be equal to " + "the second dimension of input Y after flattened. " + "But recieved the first dimension of input Scale is %d, input " + "Scale's shape is %s; the second dimension of flattened input " + "Y is %d, input Y's shape is %s, flattened axis is %d.", + scale_dims[0], scale_dims, dim_1, y_dims, begin_norm_axis)); } } if (ctx->HasInput("Bias1")) { - PADDLE_ENFORCE_EQ(ctx->GetInputDim("Bias1").size(), 1); + auto bias1_dims = ctx->GetInputDim("Bias1"); + PADDLE_ENFORCE_EQ( + bias1_dims.size(), 1, + platform::errors::InvalidArgument( + "The input Bias1 is expected to be an 1-D tensor. " + "But recieved the number of input Bias1's dimension is %d, " + "input Bias1's shape is %s.", + bias1_dims.size(), bias1_dims)); + if (ctx->IsRuntime()) { - PADDLE_ENFORCE_EQ(ctx->GetInputDim("Bias1")[0], dim_1, - "bias should with right"); + PADDLE_ENFORCE_EQ( + bias1_dims[0], dim_1, + platform::errors::InvalidArgument( + "The first dimension of input Bias1 is expected to be equal to " + "the second dimension of input Y after flattened. " + "But recieved the first dimension of input Bias1 is %d, input " + "Bias1's shape is %s; the second dimension of flatten input " + "Y is %d, input Y's shape is %s, flattened axis is %d.", + bias1_dims[0], bias1_dims, dim_1, y_dims, begin_norm_axis)); } } -- GitLab