diff --git a/paddle/fluid/operators/controlflow/conditional_block_infer_op.cc b/paddle/fluid/operators/controlflow/conditional_block_infer_op.cc index 78acd8beb3f97bc2db5bb1503f4791e0e3af6660..aa31b8875620d967f115a4d18b5022ca3eb21804 100644 --- a/paddle/fluid/operators/controlflow/conditional_block_infer_op.cc +++ b/paddle/fluid/operators/controlflow/conditional_block_infer_op.cc @@ -51,7 +51,9 @@ class ConditionalBlockInferOp : public ConditionalOp { if (need_run) { auto *scope_var = scope.FindVar(Output("Scope")); - PADDLE_ENFORCE(scope_var != nullptr, "Must set scope"); + PADDLE_ENFORCE_NOT_NULL( + scope_var, platform::errors::PreconditionNotMet( + "Scope must be set in ConditionalBlockInferOp.")); auto *scopes = scope_var->GetMutable>(); scopes->resize(1); scopes->front() = &scope.NewScope(); diff --git a/paddle/fluid/operators/controlflow/conditional_block_op.cc b/paddle/fluid/operators/controlflow/conditional_block_op.cc index 6141a10156c34ec9b172948949dc889c532f72c7..2713b7fd59a78b2a91b76b6dddaada769a2dc86c 100644 --- a/paddle/fluid/operators/controlflow/conditional_block_op.cc +++ b/paddle/fluid/operators/controlflow/conditional_block_op.cc @@ -56,7 +56,9 @@ class ConditionalBlockOp : public ConditionalOp { if (need_run) { auto *scope_var = scope.FindVar(Output(ConditionalOp::kScope)); - PADDLE_ENFORCE(scope_var != nullptr, "Must set scope"); + PADDLE_ENFORCE_NOT_NULL( + scope_var, platform::errors::PreconditionNotMet( + "Scope must be set in conditional_block_op.")); auto *scopes = scope_var->GetMutable>(); scopes->resize(1); scopes->front() = &scope.NewScope(); @@ -79,7 +81,7 @@ class ConditionalBlockInferShape : public framework::InferShapeBase { void operator()(framework::InferShapeContext *context) const override { PADDLE_ENFORCE_EQ(context->HasInputs(ConditionalOp::kCondition), true, platform::errors::InvalidArgument( - "conditional_block_op must have condition input")); + "conditional_block_op must have condition input.")); } }; @@ -116,13 +118,13 @@ class ConditionalBlockGradOp : public ConditionalOp { } auto *scope_var = scope.FindVar(Input(ConditionalOp::kScope)); - PADDLE_ENFORCE_NE(scope_var, nullptr, - platform::errors::InvalidArgument( - "Scope must be set in conditional block op")); + PADDLE_ENFORCE_NOT_NULL( + scope_var, platform::errors::PreconditionNotMet( + "Scope must be set in conditional block op.")); auto &scopes = scope_var->Get>(); PADDLE_ENFORCE_GT(scopes.size(), 0, platform::errors::InvalidArgument( - "Scope must be set in conditional block op")); + "Scope must be set in conditional block op.")); framework::Scope &cur_scope = *scopes[0]; framework::Executor exec(dev_place); @@ -192,7 +194,7 @@ class ConditionalBlockGradOp : public ConditionalOp { PADDLE_ENFORCE_EQ(outside_var->IsType(), true, platform::errors::InvalidArgument( "Type of outside_var %s is NOT LoDTensor, which " - "doesn't match input_var %s", + "doesn't match input_var %s.", outside_grad_name, input_name)); AssignZeroToOutsideTensor( place, scope, input_var->Get(), @@ -202,7 +204,7 @@ class ConditionalBlockGradOp : public ConditionalOp { true, platform::errors::InvalidArgument( "Type of outside_var %s is NOT LoDTensorArray, " - "which doesn't match input_var %s", + "which doesn't match input_var %s.", outside_grad_name, input_name)); const auto &input_tensors = input_var->Get(); auto *outside_tensors = @@ -210,7 +212,7 @@ class ConditionalBlockGradOp : public ConditionalOp { PADDLE_ENFORCE_EQ(input_tensors.size(), outside_tensors->size(), platform::errors::InvalidArgument( "LoDTensorArray outside_var %s doen't have same " - "size as input_var %s", + "size as input_var %s.", outside_grad_name, input_name)); for (size_t j = 0; j < input_tensors.size(); ++j) { AssignZeroToOutsideTensor(place, scope, input_tensors[j], @@ -220,7 +222,7 @@ class ConditionalBlockGradOp : public ConditionalOp { // TODO(huihuangzheng): add support for SelectedRows PADDLE_THROW(platform::errors::InvalidArgument( "Conditional block grad op doesn't support non-LoDTensor output " - "now")); + "now.")); } } } @@ -245,7 +247,10 @@ class ConditionalBlockGradOp : public ConditionalOp { class ConditionalBlockGradInferShape : public framework::InferShapeBase { public: void operator()(framework::InferShapeContext *context) const override { - PADDLE_ENFORCE(context->HasInputs(ConditionalOp::kCondition)); + PADDLE_ENFORCE_EQ( + context->HasInputs(ConditionalOp::kCondition), true, + platform::errors::InvalidArgument( + "Condition must be set in conditional_block_grad_op.")); if (context->HasInputs(ConditionalOp::kInputs) && context->HasOutputs(framework::GradVarName(ConditionalOp::kInputs))) { context->SetOutputsDim(framework::GradVarName(ConditionalOp::kInputs), diff --git a/paddle/fluid/operators/controlflow/conditional_block_op.h b/paddle/fluid/operators/controlflow/conditional_block_op.h index 9d65c33c51c1226b2518225c3e8efdc5b349238b..659e098c6dd20840e0efcea5ad0c5ee37300f4f4 100644 --- a/paddle/fluid/operators/controlflow/conditional_block_op.h +++ b/paddle/fluid/operators/controlflow/conditional_block_op.h @@ -49,7 +49,9 @@ class ConditionalOp : public framework::OperatorBase { xs.begin(), xs.end(), retv.begin(), [&scope](const std::string &var_name) -> const framework::LoDTensor * { auto *var = scope.FindVar(var_name); - PADDLE_ENFORCE(var != nullptr, "Cannot find variable %s", var_name); + PADDLE_ENFORCE_NOT_NULL( + var, platform::errors::InvalidArgument("Cannot find variable %s", + var_name)); return &var->Get(); }); return retv; @@ -57,15 +59,17 @@ class ConditionalOp : public framework::OperatorBase { bool ScalarCondition( const std::vector &ips) const { - if (!(ips.size() == 1UL && ips[0]->IsInitialized())) { - PADDLE_THROW("should have one initialized input as condition"); - } - - PADDLE_ENFORCE(ips[0]->type() == framework::proto::VarType::BOOL && - ips[0]->numel() == 1, - "condition input's data type should be bool, " - "numel should be 1, actual numel is %d", - ips[0]->numel()); + PADDLE_ENFORCE_EQ( + ips.size() == 1UL && ips[0]->IsInitialized(), true, + platform::errors::InvalidArgument( + "condition should have one initialized input as condition")); + + PADDLE_ENFORCE_EQ(ips[0]->type() == framework::proto::VarType::BOOL && + ips[0]->numel() == 1, + true, platform::errors::InvalidArgument( + "condition input's data type should be bool, " + "numel should be 1, actual numel is %d", + ips[0]->numel())); bool res = false; if (platform::is_gpu_place(ips[0]->place())) { #ifdef PADDLE_WITH_CUDA diff --git a/paddle/fluid/operators/controlflow/conditional_block_op_helper.cc b/paddle/fluid/operators/controlflow/conditional_block_op_helper.cc index 13a00c852a27da2b75056ffbcdc0873ee553e2a8..155a5dbfec31f3a689fe199827f8ccf1c783d4e6 100644 --- a/paddle/fluid/operators/controlflow/conditional_block_op_helper.cc +++ b/paddle/fluid/operators/controlflow/conditional_block_op_helper.cc @@ -31,7 +31,12 @@ static bool IsMatchedConditionalBlockOpAndConditionalBlockGradOp( static void FindAllConditionalBlockAndConditionalBlockGradOp( const framework::ProgramDesc &program, std::vector *fwd_ops, std::vector *bwd_ops) { - PADDLE_ENFORCE_GE(fwd_ops->size(), bwd_ops->size()); + PADDLE_ENFORCE_GE( + fwd_ops->size(), bwd_ops->size(), + platform::errors::InvalidArgument( + "Size of forward ops must be greater or equal to backward ops. The " + "number of forward ops is %d and the number of backward ops is %d", + fwd_ops->size(), bwd_ops->size())); for (size_t i = 1; i < program.Size(); ++i) { auto &block = program.Block(i); @@ -47,7 +52,11 @@ static void FindAllConditionalBlockAndConditionalBlockGradOp( PADDLE_ENFORCE_GE( fwd_ops->size(), bwd_ops->size(), - "There are extra conditional_block_grad ops in the graph or program"); + platform::errors::InvalidArgument( + "There are more conditional_block_grad ops than " + "conditional_block ops in the graph or program. The number of " + "forward ops is %d and the number of backward ops is %d", + fwd_ops->size(), bwd_ops->size())); } static void SetSkipVarsForConditionalBlockOp(OpVariant *fwd_op, @@ -102,14 +111,17 @@ static void PrepareSafeEagerDeletionOnConditionalOpAndConditionalGradOpImpl( for (auto &fwd_op : ifelse_op_set) { if (IsMatchedConditionalBlockOpAndConditionalBlockGradOp(fwd_op, bwd_op)) { - PADDLE_ENFORCE(matched_fwd_op == nullptr, - "Found multiple matched conditional_block ops"); + PADDLE_ENFORCE_EQ(matched_fwd_op, nullptr, + platform::errors::PreconditionNotMet( + "Found multiple matched conditional_block ops.")); matched_fwd_op = &fwd_op; } } - PADDLE_ENFORCE_NOT_NULL(matched_fwd_op, - "Cannot find matched forward conditional_block op"); + PADDLE_ENFORCE_NOT_NULL( + matched_fwd_op, + platform::errors::PreconditionNotMet( + "Cannot find matched forward conditional_block op.")); SetSkipVarsForConditionalBlockOp(const_cast(matched_fwd_op), &bwd_op); diff --git a/python/paddle/fluid/layers/control_flow.py b/python/paddle/fluid/layers/control_flow.py index 74e4206824c9eb009b990ba12bf849332c19cb45..7aa90e271acb83ae6044bd7324354c7ba60978b6 100755 --- a/python/paddle/fluid/layers/control_flow.py +++ b/python/paddle/fluid/layers/control_flow.py @@ -1812,8 +1812,7 @@ class ConditionalBlockGuard(BlockGuard): """ def __init__(self, block): - if not isinstance(block, ConditionalBlock): - raise TypeError("block should be conditional block") + check_type(block, "block", ConditionalBlock, "ConditionalBlockGuard") super(ConditionalBlockGuard, self).__init__(block.helper.main_program) self.block = block @@ -1855,8 +1854,7 @@ class ConditionalBlock(object): def __init__(self, inputs, is_scalar_condition=False, name=None): for each_input in inputs: - if not isinstance(each_input, Variable): - raise TypeError("Each input should be variable") + check_type(each_input, "input", Variable, "ConditionalBlock") self.inputs = inputs self.is_scalar_condition = is_scalar_condition self.helper = LayerHelper('conditional_block', name=name)