diff --git a/paddle/fluid/operators/elementwise/elementwise_mul_op.h b/paddle/fluid/operators/elementwise/elementwise_mul_op.h index 6a02fe2758668eb0862c4b7b951e898f910c02df..c3695cabe7f3c834c48d32cd84f805fa065b7709 100644 --- a/paddle/fluid/operators/elementwise/elementwise_mul_op.h +++ b/paddle/fluid/operators/elementwise/elementwise_mul_op.h @@ -94,15 +94,19 @@ class ElementwiseMulKernel : public framework::OpKernel { public: void Compute(const framework::ExecutionContext& ctx) const override { auto x_var = ctx.InputVar("X"); - PADDLE_ENFORCE(x_var != nullptr, - "Cannot get input Variable X, variable name = %s", - ctx.InputName("X")); + PADDLE_ENFORCE_EQ(x_var != nullptr, true, + platform::errors::InvalidArgument( + "Cannot get input Variable X, Variable name = %s.", + ctx.InputName("X"))); auto* y = ctx.Input("Y"); framework::Tensor x, *z; if (x_var->IsType()) { - PADDLE_ENFORCE(y->dims().size() == 1 && y->dims()[0] == 1, - "For elementwise_op, if X is Sparse, Y must be scalar."); + PADDLE_ENFORCE_EQ(y->dims().size() == 1 && y->dims()[0] == 1, true, + platform::errors::InvalidArgument( + "For elementwise_op, if X is Sparse, Y must be " + "scalar. But reveived the size of Y = %s.", + y->dims().size())); auto& x_sele = x_var->Get(); auto out_sele = ctx.Output("Out"); x = x_sele.value(); @@ -115,8 +119,10 @@ class ElementwiseMulKernel : public framework::OpKernel { x = x_var->Get(); z = ctx.Output("Out"); } else { - PADDLE_THROW("X's type[%s] is not supported by elementwise_op.", - framework::ToTypeName(x_var->Type())); + PADDLE_THROW(platform::errors::InvalidArgument( + "X's type[%s] is not supported by elementwise_op. X's type should be " + "LoDTensor or SelectedRows.", + framework::ToTypeName(x_var->Type()))); } z->mutable_data(ctx.GetPlace()); diff --git a/paddle/fluid/operators/elementwise/elementwise_op.h b/paddle/fluid/operators/elementwise/elementwise_op.h index e3542f5361d657593589f6d11b06bfb4a5b96158..c613969343eb0e3970c7e46090e2b89e9a899084 100644 --- a/paddle/fluid/operators/elementwise/elementwise_op.h +++ b/paddle/fluid/operators/elementwise/elementwise_op.h @@ -38,35 +38,40 @@ class ElementwiseOp : public framework::OperatorWithKernel { using Tensor = framework::Tensor; void InferShape(framework::InferShapeContext *ctx) const override { - PADDLE_ENFORCE_EQ(ctx->HasInput("X"), true, - "Input(X) of elementwise op should not be null."); - PADDLE_ENFORCE_EQ(ctx->HasInput("Y"), true, - "Input(Y) of elementwise op should not be null."); - PADDLE_ENFORCE_EQ(ctx->HasOutput("Out"), true, - "Output(Out) of elementwise op should not be null."); - - PADDLE_ENFORCE( - ctx->GetInputsVarType("Y").front() == - framework::proto::VarType::LOD_TENSOR, - "The input var's type should be LoDTensor, but the received is %s [%s]", - ctx->GetInputsVarType("Y").front(), ctx->Inputs("Y").front()); + OP_INOUT_CHECK(ctx->HasInput("X"), "Input", "X", "ElementwiseOp"); + OP_INOUT_CHECK(ctx->HasInput("Y"), "Input", "Y", "ElementwiseOp"); + OP_INOUT_CHECK(ctx->HasOutput("Out"), "Output", "Out", "ElementwiseOp"); + + PADDLE_ENFORCE_EQ( + ctx->GetInputsVarType("Y").front(), + framework::proto::VarType::LOD_TENSOR, + platform::errors::InvalidArgument( + "The input var's type should be LoDTensor, but the " + "received is %s [%s].", + ctx->GetInputsVarType("Y").front(), ctx->Inputs("Y").front())); if (ctx->GetInputsVarType("X").front() == framework::proto::VarType::SELECTED_ROWS) { PADDLE_ENFORCE_EQ( ctx->GetInputDim("Y").size(), 1u, - "ShapeError: For elementwise_op, if X is Sparse(VarType.SELECTED_ROWS" - "), Y must be scalar. But reveived the dimension of Y = %s", - ctx->GetInputDim("Y").size()); + platform::errors::InvalidArgument( + "For elementwise_op, if X is Sparse(VarType.SELECTED_ROWS" + "), Y must be scalar, the size of Y should be 1. " + "But reveived the size of Y = %s.", + ctx->GetInputDim("Y").size())); PADDLE_ENFORCE_EQ( ctx->GetInputDim("Y")[0], 1, - "ShapeError: For elementwise_op, if X is Sparse(VarType.SELECTED_ROWS" - "), Y must be scalar. But reveived the first dimension of Y = %s", - ctx->GetInputDim("Y")[0]); + platform::errors::InvalidArgument( + "For elementwise_op, if X is Sparse(VarType.SELECTED_ROWS" + "), Y must be scalar, the first dimension of Y should be 1. " + "But reveived the first dimension of Y = %s.", + ctx->GetInputDim("Y")[0])); } else if (ctx->GetInputsVarType("X").front() != framework::proto::VarType::LOD_TENSOR) { - PADDLE_THROW("X's type[%s] is not supported by elementwise_op.", - ctx->GetInputsVarType("X").front()); + PADDLE_THROW(platform::errors::InvalidArgument( + "Input X's type[%s] is not supported by elementwise_op. Please set " + "its type to LOD_TENSOR.", + ctx->GetInputsVarType("X").front())); } if (ctx->GetInputDim("X") == ctx->GetInputDim("Y")) { @@ -212,9 +217,9 @@ class ElementwiseOpGrad : public framework::OperatorWithKernel { void InferShape(framework::InferShapeContext *ctx) const override { auto out_grad_name = framework::GradVarName("Out"); - PADDLE_ENFORCE_EQ(ctx->HasInput("Y"), true, "Input(Y) should not be null."); - PADDLE_ENFORCE_EQ(ctx->HasInput(out_grad_name), true, - "Input(Out@GRAD) should not be null."); + OP_INOUT_CHECK(ctx->HasInput("Y"), "Input", "Y", "ElementwiseOpGrad"); + OP_INOUT_CHECK(ctx->HasInput(out_grad_name), "Input", out_grad_name, + "ElementwiseOpGrad"); auto x_grad_name = framework::GradVarName("X"); auto y_grad_name = framework::GradVarName("Y"); if (ctx->HasOutput(x_grad_name)) { @@ -306,12 +311,12 @@ class ElementwiseOpDoubleGradWithoutDXDY const framework::ExecutionContext &ctx) const override { framework::proto::VarType::Type input_data_type; if (ctx.HasInput("DDX") == false) { - PADDLE_ENFORCE_EQ(ctx.HasInput("DDY"), true, - "Input(DDY) should not be null"); + OP_INOUT_CHECK(ctx.HasInput("DDY"), "Input", "DDY", + "ElementwiseOpDoubleGradWithoutDXDY"); input_data_type = OperatorWithKernel::IndicateVarDataType(ctx, "DDY"); } else if (ctx.HasInput("DDY") == false) { - PADDLE_ENFORCE_EQ(ctx.HasInput("DDX"), true, - "Input(DDX) should not be null"); + OP_INOUT_CHECK(ctx.HasInput("DDX"), "Input", "DDX", + "ElementwiseOpDoubleGradWithoutDXDY"); input_data_type = OperatorWithKernel::IndicateVarDataType(ctx, "DDX"); } else { input_data_type = OperatorWithKernel::IndicateVarDataType(ctx, "DDX"); diff --git a/paddle/fluid/operators/elementwise/elementwise_op_function.cu.h b/paddle/fluid/operators/elementwise/elementwise_op_function.cu.h index ad39441f3797afcddb3b0045a05428b701c7e7e8..afa87a0ad8a1f5141e135573b875d1f3b61a8e5c 100644 --- a/paddle/fluid/operators/elementwise/elementwise_op_function.cu.h +++ b/paddle/fluid/operators/elementwise/elementwise_op_function.cu.h @@ -32,9 +32,9 @@ limitations under the License. */ #define __h2div h2div #endif -#define DIV_ERROR_INFO \ - "InvalidArgumentError: Integer division by zero encountered in " \ - "divide.Please check.\n" +#define DIV_ERROR_INFO \ + "InvalidArgumentError: Integer division by zero encountered in divide. " \ + "Please check.\n" namespace paddle { namespace operators { diff --git a/paddle/fluid/operators/elementwise/elementwise_op_function.h b/paddle/fluid/operators/elementwise/elementwise_op_function.h old mode 100755 new mode 100644 index d0b8e97c71fbf99140337a21031f8ccbc6824694..807b2e8faa53cc5348875385bb91cf10d70263b0 --- a/paddle/fluid/operators/elementwise/elementwise_op_function.h +++ b/paddle/fluid/operators/elementwise/elementwise_op_function.h @@ -76,12 +76,13 @@ inline void get_mid_dims(const framework::DDim &x_dims, } for (int i = 0; i < y_dims.size(); ++i) { if (x_dims[i + axis] != y_dims[i]) { - PADDLE_ENFORCE(y_dims[i] == 1 || x_dims[i + axis] == 1, - "ShapeError: broadcast dimension mismatch. Operands " - "could not be broadcast together with the shape of " - "X = [%s] and the shape of Y = [%s]. Received [%d] " - "in X is not equal to [%d] in Y", - x_dims, y_dims, x_dims[i + axis], y_dims[i]); + PADDLE_ENFORCE_EQ(y_dims[i] == 1 || x_dims[i + axis] == 1, true, + platform::errors::InvalidArgument( + "Broadcast dimension mismatch. Operands " + "could not be broadcast together with the shape of " + "X = [%s] and the shape of Y = [%s]. Received [%d] " + "in X is not equal to [%d] in Y.", + x_dims, y_dims, x_dims[i + axis], y_dims[i])); *is_run_common_broadcast = 1; return; } @@ -119,8 +120,15 @@ inline void GetBroadcastDimsArrays(const framework::DDim &x_dims, int *x_dims_array, int *y_dims_array, int *out_dims_array, const int max_dim, const int axis) { - PADDLE_ENFORCE_GE(axis, 0, "Axis should be in range [0, %d)", axis); - PADDLE_ENFORCE_LT(axis, max_dim, "Axis should be in range [0, %d)", axis); + PADDLE_ENFORCE_GE( + axis, 0, + platform::errors::InvalidArgument( + "Axis should be great than or equal to 0, but received axis is %d.", + axis)); + PADDLE_ENFORCE_LT(axis, max_dim, + platform::errors::InvalidArgument( + "Axis should be less than %d, but received axis is %d.", + max_dim, axis)); if (x_dims.size() > y_dims.size()) { std::fill(y_dims_array, y_dims_array + axis, 1); if (axis + y_dims.size() < max_dim) { @@ -138,13 +146,15 @@ inline void GetBroadcastDimsArrays(const framework::DDim &x_dims, } for (int i = 0; i < max_dim; i++) { - PADDLE_ENFORCE(x_dims_array[i] == y_dims_array[i] || x_dims_array[i] <= 1 || - y_dims_array[i] <= 1, - "ShapeError: broadcast dimension mismatch. Operands could " - "not be broadcast together with the shape of X = [%s] and " - "the shape of Y = [%s]. Received [%d] in X is not equal to " - "[%d] in Y at i:%d", - x_dims, y_dims, x_dims_array[i], y_dims_array[i], i); + PADDLE_ENFORCE_EQ( + x_dims_array[i] == y_dims_array[i] || x_dims_array[i] <= 1 || + y_dims_array[i] <= 1, + true, platform::errors::InvalidArgument( + "Broadcast dimension mismatch. Operands could " + "not be broadcast together with the shape of X = [%s] and " + "the shape of Y = [%s]. Received [%d] in X is not equal to " + "[%d] in Y at i:%d.", + x_dims, y_dims, x_dims_array[i], y_dims_array[i], i)); if ((x_dims_array[i] > 1 || y_dims_array[i] > 1) || (x_dims_array[i] == 1 && y_dims_array[i] == 1)) { out_dims_array[i] = std::max(x_dims_array[i], y_dims_array[i]); @@ -1690,8 +1700,15 @@ void ElemwiseGradComputeWithBroadcast( } axis = (axis == -1 ? std::abs(x_dims.size() - y_dims.size()) : axis); - PADDLE_ENFORCE_GE(axis, 0, "Axis should be in range [0, %d)", axis); - PADDLE_ENFORCE_LT(axis, max_dim, "Axis should be in range [0, %d)", axis); + PADDLE_ENFORCE_GE( + axis, 0, + platform::errors::InvalidArgument( + "Axis should be great than or equal to 0, but received axis is %d.", + axis)); + PADDLE_ENFORCE_LT(axis, max_dim, + platform::errors::InvalidArgument( + "Axis should be less than %d, but received axis is %d.", + max_dim, axis)); int pre, n, post, is_run_common_broadcast, axis_trim = 0; if (is_xsize_larger) { @@ -1758,8 +1775,15 @@ void CommonElementwiseBroadcastForward( int axis, const bool is_xsize_larger = true) { int max_dim = std::max(x_dims.size(), y_dims.size()); axis = (axis == -1 ? std::abs(x_dims.size() - y_dims.size()) : axis); - PADDLE_ENFORCE_GE(axis, 0, "Axis should be in range [0, %d)", axis); - PADDLE_ENFORCE_LT(axis, max_dim, "Axis should be in range [0, %d)", axis); + PADDLE_ENFORCE_GE( + axis, 0, + platform::errors::InvalidArgument( + "Axis should be great than or equal to 0, but received axis is %d.", + axis)); + PADDLE_ENFORCE_LT(axis, max_dim, + platform::errors::InvalidArgument( + "Axis should be less than %d, but received axis is %d.", + max_dim, axis)); std::vector x_dims_array(max_dim); std::vector y_dims_array(max_dim); std::vector out_dims_array(max_dim); @@ -1848,8 +1872,15 @@ void ElementwiseComputeEx(const framework::ExecutionContext &ctx, } axis = (axis == -1 ? std::abs(x_dims.size() - y_dims.size()) : axis); - PADDLE_ENFORCE_GE(axis, 0, "Axis should be in range [0, %d)", axis); - PADDLE_ENFORCE_LT(axis, max_dim, "Axis should be in range [0, %d)", axis); + PADDLE_ENFORCE_GE( + axis, 0, + platform::errors::InvalidArgument( + "Axis should be great than or equal to 0, but received axis is %d.", + axis)); + PADDLE_ENFORCE_LT(axis, max_dim, + platform::errors::InvalidArgument( + "Axis should be less than %d, but received axis is %d.", + max_dim, axis)); int pre, n, post, is_run_common_broadcast, axis_trim = 0; if (is_xsize_larger) { @@ -2723,7 +2754,9 @@ void FusedElemwiseAndActGradComputeEx( const framework::DDim &x_dim = x->dims(); const framework::DDim &y_dim = y->dims(); if (UseIntermediateOut) { - PADDLE_ENFORCE(intermediate_out, "intermediate_out should not be nullptr"); + PADDLE_ENFORCE_NOT_NULL( + intermediate_out, + platform::errors::InvalidArgument("Intermediate out is null pointer.")); } if (x_dim == y_dim) { FusedElemwiseAndActGradComputeNoBroadcast< @@ -2768,9 +2801,11 @@ void FusedElemwiseAndActComputeEx(const framework::ExecutionContext &ctx, framework::Tensor *out, framework::Tensor *intermediate_out) { if (KeepIntermediateOut) { - PADDLE_ENFORCE(intermediate_out, - "The save_intermediate_out is opened, " - "intermediate_out should not be nullptr."); + PADDLE_ENFORCE_NOT_NULL( + intermediate_out, + platform::errors::InvalidArgument( + "The save_intermediate_out is opened, intermediate " + "out is null pointer.")); } const framework::DDim &x_dim = x.dims(); diff --git a/paddle/fluid/operators/elementwise/elementwise_pow_op.h b/paddle/fluid/operators/elementwise/elementwise_pow_op.h index 1e0eab493802b9f0d5825d8c1fa5f60942e80407..ff55d2f2040a17c32720df08c1ac0b00cc1d7a02 100644 --- a/paddle/fluid/operators/elementwise/elementwise_pow_op.h +++ b/paddle/fluid/operators/elementwise/elementwise_pow_op.h @@ -41,9 +41,10 @@ class ElementwisePowKernel : public framework::OpKernel { void Compute(const framework::ExecutionContext& ctx) const override { using Tensor = framework::LoDTensor; auto* x = ctx.Input("X"); - PADDLE_ENFORCE(x != nullptr, - "Cannot get input Variable X, variable name = %s", - ctx.InputName("X")); + PADDLE_ENFORCE_EQ(x != nullptr, true, + platform::errors::NotFound( + "Cannot get input Variable X, Variable name = %s", + ctx.InputName("X"))); auto* y = ctx.Input("Y"); auto* z = ctx.Output("Out"); z->mutable_data(ctx.GetPlace()); diff --git a/paddle/fluid/operators/elementwise/mkldnn/elementwise_mul_mkldnn_op.cc b/paddle/fluid/operators/elementwise/mkldnn/elementwise_mul_mkldnn_op.cc index 3f91dde73f11557d20fe440372fe8e4063f7597a..695ec23dba0ba7180c71a5aff2d308f38370548a 100644 --- a/paddle/fluid/operators/elementwise/mkldnn/elementwise_mul_mkldnn_op.cc +++ b/paddle/fluid/operators/elementwise/mkldnn/elementwise_mul_mkldnn_op.cc @@ -76,7 +76,9 @@ class ElementwiseMulMKLDNNKernel : public framework::OpKernel { get_mid_dims(x_dims, y_dims_untrimmed, axis, &pre, &num, &post, &is_run_common_broadcast); - if (post == 1) PADDLE_THROW("Not implemented when post is 1"); + if (post == 1) + PADDLE_THROW( + platform::errors::Unimplemented("Not implemented when post is 1.")); const int64_t n = x_dims[0]; const int64_t c = x_dims[1];