diff --git a/paddle/fluid/operators/array_to_lod_tensor_op.cc b/paddle/fluid/operators/array_to_lod_tensor_op.cc index ea1a93b5826496aafc7a092d91ad4b4a4bad7215..9379ec2dd67f27299ead85c52a10cbdc4fc13932 100644 --- a/paddle/fluid/operators/array_to_lod_tensor_op.cc +++ b/paddle/fluid/operators/array_to_lod_tensor_op.cc @@ -48,7 +48,8 @@ struct ArrayToLoDFunctor : public boost::static_visitor { #ifdef PADDLE_WITH_CUDA Apply(static_cast(pool.Get(place))); #else - PADDLE_THROW("Fluid is not compiled with CUDA"); + PADDLE_THROW( + platform::errors::Unavailable("Fluid is not compiled with CUDA")); #endif } } @@ -88,7 +89,9 @@ class ArrayToLoDTensorOp : public framework::OperatorBase { // Check dims, place and data type of input's elements and infer output's // dim - PADDLE_ENFORCE(!x.empty(), "There's no element in the input array."); + PADDLE_ENFORCE_EQ(x.empty(), false, + platform::errors::PreconditionNotMet( + "There's no element in the input array.")); int rank = x[0].dims().size(); platform::Place place = x[0].place(); auto data_type = x[0].type(); @@ -99,18 +102,24 @@ class ArrayToLoDTensorOp : public framework::OperatorBase { for (size_t i = 1; i < x.size(); ++i) { auto ins_i_dims = rank > 1 ? framework::slice_ddim(x[i].dims(), 1, rank) : framework::make_ddim({0}); - PADDLE_ENFORCE_EQ(ins_i_dims, ins_dims, - "The dimension of the %zu'th element in LoDTensorArray " - "differs from previous ones.", - i); - PADDLE_ENFORCE(x[i].place() == place, - "The place class of the %zu'th element in LoDTensorArray " - "differs from previous ones.", - i); - PADDLE_ENFORCE(x[i].type() == data_type, - "The date type of the %zu'th element in LoDTensorArray " - "differs from previous ones.", - i); + PADDLE_ENFORCE_EQ( + ins_i_dims, ins_dims, + platform::errors::InvalidArgument( + "The dimension of the %zu'th element in LoDTensorArray " + "differs from previous ones.", + i)); + PADDLE_ENFORCE_EQ( + x[i].place(), place, + platform::errors::InvalidArgument( + "The place class of the %zu'th element in LoDTensorArray " + "differs from previous ones.", + i)); + PADDLE_ENFORCE_EQ( + x[i].type(), data_type, + platform::errors::InvalidArgument( + "The date type of the %zu'th element in LoDTensorArray " + "differs from previous ones.", + i)); batch_size += x[i].dims()[0]; } auto ins_dim_vec = framework::vectorize(ins_dims); @@ -138,7 +147,13 @@ class ArrayToLoDTensorOp : public framework::OperatorBase { ArrayToLoDFunctor functor; for (size_t idx : table_item_idx) { cur_level_lod.push_back(cur_level_lod.back() + table_items[idx].length); - PADDLE_ENFORCE_LE(table_items[idx].length, x.size()); + PADDLE_ENFORCE_LE(table_items[idx].length, x.size(), + platform::errors::InvalidArgument( + "The RankTable items length should less than or " + "equal Input(X) size," + "but receive TankTable items length is %d , longer " + "than Input(X) size %d.", + table_items[idx].length, x.size())); for (size_t x_idx = 0; x_idx < table_items[idx].length; ++x_idx) { auto lod_and_offset = framework::GetSubLoDAndAbsoluteOffset( x[x_idx].lod(), idx, idx + 1, 0); @@ -151,7 +166,12 @@ class ArrayToLoDTensorOp : public framework::OperatorBase { VLOG(10) << "idx=" << idx << " x_idx=" << x_idx << " [" << ", " << end_offset << "]"; // Copy data - PADDLE_ENFORCE_GE(end_offset, start_offset); + PADDLE_ENFORCE_GE( + end_offset, start_offset, + platform::errors::InvalidArgument( + "The lod data start offset should smaller or equal end offset," + "but the start offset is %d, larger than end offset %d.", + start_offset, end_offset)); size_t len = end_offset - start_offset; if (len == 0) { continue; @@ -188,10 +208,12 @@ class ArrayToLoDTensorOpProtoMaker : public framework::OpProtoAndCheckerMaker { class ArrayToLoDTensorInferShape : public framework::InferShapeBase { public: void operator()(framework::InferShapeContext *context) const override { - PADDLE_ENFORCE(context->HasInput("X"), - "ArrayToLoDTensorOp must have input X."); - PADDLE_ENFORCE(context->HasInput("RankTable"), - "ArrayToLoDTensorOp must have input RankTable."); + PADDLE_ENFORCE_EQ( + context->HasInput("X"), true, + platform::errors::NotFound("Input(X) of BmmOp should not be null")); + PADDLE_ENFORCE_EQ(context->HasInput("RankTable", true), + platform::errors::NotFound( + "Input(RankTable) of BmmOp should not be null")); // For compile-time, the first dim of input X and output Out should be -1. // For runtime, the first dim of output Out should be the sum of all // elements's first dim in input X. The output's dims will be re-computed in diff --git a/paddle/fluid/operators/lod_tensor_to_array_op.cc b/paddle/fluid/operators/lod_tensor_to_array_op.cc index b130e84933bc9a26653b5eb164ccc450fdb7b63e..df26680a9e21bc21b5ad2c55a26c2bb3788d7ac1 100644 --- a/paddle/fluid/operators/lod_tensor_to_array_op.cc +++ b/paddle/fluid/operators/lod_tensor_to_array_op.cc @@ -61,7 +61,8 @@ struct LoDTensorToArrayFunctor : public boost::static_visitor { #ifdef PADDLE_WITH_CUDA Apply(static_cast(dev_ctx)); #else - PADDLE_THROW("Not compiled with cuda"); + PADDLE_THROW( + platform::errors::Unavailable("Fluid is not compiled with CUDA")); #endif } } @@ -107,10 +108,11 @@ class LoDTensorToArrayOp : public framework::OperatorBase { auto max_seq_len = items[0].length; auto rank_level = rank_table.level(); - PADDLE_ENFORCE_LT( - rank_level, x.lod().size(), - "Input should be a LoDTensor, and its lod_level should be at least %d", - rank_level + 1); + PADDLE_ENFORCE_LT(rank_level, x.lod().size(), + platform::errors::InvalidArgument( + `"Input should be a LoDTensor, and its lod_level should be at " + "least %d", + rank_level + 1)); out.resize(max_seq_len); std::vector> copy_ranges(max_seq_len); @@ -190,14 +192,19 @@ NOTE: this operator is an internal component of DynamicRNN, and cannot be called class LoDTensorToArrayInferShape : public framework::InferShapeBase { public: void operator()(framework::InferShapeContext *context) const override { - PADDLE_ENFORCE(context->HasInput("X"), - "Input(X) of LoDTensorToArrayOp should not be null."); - PADDLE_ENFORCE( - context->HasInput("RankTable"), - "Input(RankTable) of LoDTensorToArrayOp should not be null."); - - PADDLE_ENFORCE(context->HasOutput("Out"), - "Output(Out) of LoDTensorToArrayOp should not be null."); + PADDLE_ENFORCE_EQ( + context->HasInput("X"), true, + platform::errors::NotFound( + "Input(X) of LoDTensorToArrayOp should not be null.")); + PADDLE_ENFORCE_EQ( + context->HasInput("RankTable"), true, + platform::errors::NotFound( + "Input(RankTable) of LoDTensorToArrayOp should not be null.")); + + PADDLE_ENFORCE_EQ( + context->HasOutput("Out"), true, + platform::errors::NotFound( + "Output(Out) of LoDTensorToArrayOp should not be null.")); auto x_dim = context->GetInputDim("X"); // For compile-time, the first dim of input X and output Out should be -1.