diff --git a/paddle/operators/concat_op.cc b/paddle/operators/concat_op.cc index 5f052689251bc023df635d41c1e64a660a0aa488..6134ac78b145e0c9db0146a38f525204d9f11fed 100644 --- a/paddle/operators/concat_op.cc +++ b/paddle/operators/concat_op.cc @@ -25,7 +25,7 @@ class ConcatOp : public framework::OperatorWithKernel { void InferShape(framework::InferShapeContext *ctx) const override { PADDLE_ENFORCE_GE(ctx->Inputs("X").size(), 1UL, - "Inputs(X) of ConcatOp should be empty.") + "Inputs(X) of ConcatOp should be empty."); PADDLE_ENFORCE(ctx->HasOutput("Out"), "Output(Out) of ConcatOp should not be null."); @@ -45,7 +45,7 @@ class ConcatOp : public framework::OperatorWithKernel { } PADDLE_ENFORCE_EQ(out_dims[j], ins[i][j], "Input tensors should have the same " - "elements except the specify axis.") + "elements except the specify axis."); } } ctx->SetOutputDim("Out", out_dims); diff --git a/paddle/operators/elementwise_op.h b/paddle/operators/elementwise_op.h index 56e5eb69bc382a2c15d88b759fa6987f02c6cabb..ea533503e4916cae7e1157ed34da9629dcff3513 100644 --- a/paddle/operators/elementwise_op.h +++ b/paddle/operators/elementwise_op.h @@ -35,7 +35,7 @@ class ElementwiseOp : public framework::OperatorWithKernel { auto x_dim = ctx->GetInputDim("X"); auto y_dim = ctx->GetInputDim("Y"); PADDLE_ENFORCE_GE(x_dim.size(), y_dim.size(), - "Rank of first input must >= rank of second input.") + "Rank of first input must >= rank of second input."); ctx->SetOutputDim("Out", x_dim); ctx->ShareLoD("X", /*->*/ "Out"); } @@ -120,7 +120,7 @@ class ElementwiseOpGrad : public framework::OperatorWithKernel { auto out_dims = ctx->GetInputDim(framework::GradVarName("Out")); PADDLE_ENFORCE_GE(x_dims.size(), y_dims.size(), - "Rank of first input must >= rank of second input.") + "Rank of first input must >= rank of second input."); auto x_grad_name = framework::GradVarName("X"); auto y_grad_name = framework::GradVarName("Y"); diff --git a/paddle/operators/elementwise_op_function.h b/paddle/operators/elementwise_op_function.h index 488a35aafc8600bb8bb252fc3a5161c72a2f6df1..8aa35b2c466785c8749739635fcd1c2b19292f3e 100644 --- a/paddle/operators/elementwise_op_function.h +++ b/paddle/operators/elementwise_op_function.h @@ -106,7 +106,7 @@ void ElementwiseCompute(const framework::ExecutionContext& ctx) { auto x_dims = x->dims(); auto y_dims = y->dims(); PADDLE_ENFORCE_GE(x_dims.size(), y_dims.size(), - "Rank of first input must >= rank of second input.") + "Rank of first input must >= rank of second input."); if (x_dims == y_dims) { functor f; diff --git a/paddle/operators/sequence_slice_op.h b/paddle/operators/sequence_slice_op.h index 6411e0a46630beb0a9abb6aa5e517978b25a5254..428ef556daa248a918f58dde608dc024144e773c 100644 --- a/paddle/operators/sequence_slice_op.h +++ b/paddle/operators/sequence_slice_op.h @@ -54,10 +54,10 @@ class SequenceSliceOpKernel : public framework::OpKernel { PADDLE_ENFORCE_EQ(lod.size(), 1UL, "Only support one level sequence now."); PADDLE_ENFORCE_EQ( n, static_cast(length->dims()[0]), - "The size of input-sequence and length-array should be the same") + "The size of input-sequence and length-array should be the same"); PADDLE_ENFORCE_EQ( n, static_cast(offset->dims()[0]), - "The size of input-sequence and offset-array should be the same") + "The size of input-sequence and offset-array should be the same"); const int64_t* offset_data = offset->data(); const int64_t* length_data = length->data(); @@ -78,11 +78,11 @@ class SequenceSliceOpKernel : public framework::OpKernel { for (size_t i = 0; i < n; ++i) { PADDLE_ENFORCE_LT(0, offset_data[i], - "The offset[%d] must greater than zero.", i) + "The offset[%d] must greater than zero.", i); PADDLE_ENFORCE_LT(0, length_data[i], - "The length[%d] must greater than zero.", i) + "The length[%d] must greater than zero.", i); PADDLE_ENFORCE_LT(lod[0][i] + offset_data[i] + length_data[i], - lod[0][i + 1], "The target tensor's length overflow.") + lod[0][i + 1], "The target tensor's length overflow."); } out->mutable_data(ctx.GetPlace()); diff --git a/paddle/operators/sum_op.h b/paddle/operators/sum_op.h index 4afec03ecef168077c9964f5cb1da7cd61861f40..a1eb3b014edc65b7ed604c8b7f17d72f7e460f70 100644 --- a/paddle/operators/sum_op.h +++ b/paddle/operators/sum_op.h @@ -84,7 +84,7 @@ class SumKernel : public framework::OpKernel { int64_t offset = 0; for (int i = 0; i < N; i++) { PADDLE_ENFORCE_EQ(out->height(), - in_vars[i]->Get().height()) + in_vars[i]->Get().height()); functor(context.device_context(), in_vars[i]->Get(), offset, out); offset += in_vars[i]->Get().value().numel(); diff --git a/paddle/platform/enforce.h b/paddle/platform/enforce.h index 415020ab965fa976c37870b7ad5794aab947fb4e..97338a4ce68da46ead7d66f7e01a3d5c8623b3c4 100644 --- a/paddle/platform/enforce.h +++ b/paddle/platform/enforce.h @@ -234,16 +234,24 @@ inline void throw_on_error(T e) { __PADDLE_BINARY_COMPARE(__VAL0, __VAL1, <, >=, __VA_ARGS__) #define PADDLE_ENFORCE_LE(__VAL0, __VAL1, ...) \ __PADDLE_BINARY_COMPARE(__VAL0, __VAL1, <=, >, __VA_ARGS__) -#define PADDLE_ENFORCE_NOT_NULL(__VAL, ...) \ - PADDLE_ENFORCE(nullptr != (__VAL), #__VAL " should not be null\n%s", \ - paddle::string::Sprintf("" __VA_ARGS__)); - -#define __PADDLE_BINARY_COMPARE(__VAL0, __VAL1, __CMP, __INV_CMP, ...) \ - PADDLE_ENFORCE(__VAL0 __CMP __VAL1, \ - "enforce %s " #__CMP " %s failed, %s " #__INV_CMP " %s\n%s", \ - #__VAL0, #__VAL1, paddle::string::to_string(__VAL0), \ - paddle::string::to_string(__VAL1), \ - paddle::string::Sprintf("" __VA_ARGS__)); +#define PADDLE_ENFORCE_NOT_NULL(__VAL, ...) \ + do { \ + if (UNLIKELY(nullptr == (__VAL))) { \ + PADDLE_THROW(#__VAL " should not be null\n%s", \ + paddle::string::Sprintf("" __VA_ARGS__)); \ + } \ + } while (0) + +#define __PADDLE_BINARY_COMPARE(__VAL0, __VAL1, __CMP, __INV_CMP, ...) \ + do { \ + if (!UNLIKELY((__VAL0)__CMP(__VAL1))) { \ + PADDLE_THROW("enforce %s " #__CMP " %s failed, %s " #__INV_CMP \ + " %s\n%s", \ + #__VAL0, #__VAL1, paddle::string::to_string(__VAL0), \ + paddle::string::to_string(__VAL1), \ + paddle::string::Sprintf("" __VA_ARGS__)); \ + } \ + } while (0) } // namespace platform } // namespace paddle