From ad61e1b22c1625db7e096d207c4240fdfea9b2b8 Mon Sep 17 00:00:00 2001 From: chengduo Date: Wed, 13 Feb 2019 05:53:43 -0600 Subject: [PATCH] fix potential bug (#15688) test=develop --- paddle/fluid/framework/feed_fetch_method.cc | 1 + paddle/fluid/framework/operator.cc | 9 ++++++--- paddle/fluid/memory/allocation/best_fit_allocator.cc | 2 ++ paddle/fluid/operators/conv_op.cc | 4 ++-- 4 files changed, 11 insertions(+), 5 deletions(-) diff --git a/paddle/fluid/framework/feed_fetch_method.cc b/paddle/fluid/framework/feed_fetch_method.cc index 6338be75a..96530b2a3 100644 --- a/paddle/fluid/framework/feed_fetch_method.cc +++ b/paddle/fluid/framework/feed_fetch_method.cc @@ -44,6 +44,7 @@ LoDTensor& GetFetchVariable(const Scope& scope, const std::string& var_name, // Since we want to fetch LodTensor from a variable, the variable must // be created alreadly. Variable* g_fetch_value = scope.FindVar(var_name); + PADDLE_ENFORCE_NOT_NULL(g_fetch_value, "%s is not found.", var_name); PADDLE_ENFORCE(g_fetch_value->IsType(), "Only %s can be invoked by GetFetchVariable", typeid(FeedFetchList).name()); diff --git a/paddle/fluid/framework/operator.cc b/paddle/fluid/framework/operator.cc index b22523e0f..e15c838f4 100644 --- a/paddle/fluid/framework/operator.cc +++ b/paddle/fluid/framework/operator.cc @@ -989,11 +989,14 @@ void OperatorWithKernel::TransferInplaceVarsBack( const Scope& transfer_scope) const { for (auto& var_name : inplace_vars) { VLOG(3) << "share inplace var " + var_name + " back to it's original scope"; + auto* origin_var = scope.FindVar(var_name); + PADDLE_ENFORCE_NOT_NULL(origin_var, "The var[%s] should not be nullptr.", + var_name); auto* original_tensor = - GetMutableLoDTensorOrSelectedRowsValueFromVar(scope.FindVar(var_name)); + GetMutableLoDTensorOrSelectedRowsValueFromVar(origin_var); auto* var = transfer_scope.FindVar(var_name); - PADDLE_ENFORCE(var != nullptr, "The var[%s] should not be nullptr", - var_name); + PADDLE_ENFORCE_NOT_NULL(var, "The var[%s] should not be nullptr.", + var_name); auto* transformed_tensor = GetLoDTensorOrSelectedRowsValueFromVar(*var); original_tensor->ShareDataWith(*transformed_tensor); } diff --git a/paddle/fluid/memory/allocation/best_fit_allocator.cc b/paddle/fluid/memory/allocation/best_fit_allocator.cc index 6f3e512fb..e3d6c2f51 100644 --- a/paddle/fluid/memory/allocation/best_fit_allocator.cc +++ b/paddle/fluid/memory/allocation/best_fit_allocator.cc @@ -111,6 +111,8 @@ size_t BestFitAllocator::NumFreeChunks() const { } void BestFitAllocator::Free(Allocation* allocation) { auto* bf_allocation = dynamic_cast(allocation); + PADDLE_ENFORCE_NOT_NULL(bf_allocation, + "The input allocation is not BestFitAllocation."); auto chunk_it = bf_allocation->ChunkIterator(); PADDLE_ENFORCE(!chunk_it->is_free); chunk_it->is_free = true; diff --git a/paddle/fluid/operators/conv_op.cc b/paddle/fluid/operators/conv_op.cc index bd788f03e..fd9f156d0 100644 --- a/paddle/fluid/operators/conv_op.cc +++ b/paddle/fluid/operators/conv_op.cc @@ -222,7 +222,7 @@ void Conv2DOpMaker::Make() { .SetDefault(4096); AddAttr("exhaustive_search", "(bool, default false) cuDNN has many algorithm to calculation " - "convolution, whether enable exhaustive search ", + "convolution, whether enable exhaustive search " "for cuDNN convolution or not, defalut is False.") .SetDefault(false); AddComment(R"DOC( @@ -341,7 +341,7 @@ void Conv3DOpMaker::Make() { .SetDefault(4096); AddAttr("exhaustive_search", "(bool, default false) cuDNN has many algorithm to calculation " - "convolution, whether enable exhaustive search ", + "convolution, whether enable exhaustive search " "for cuDNN convolution or not, defalut is False.") .SetDefault(false); AddComment(R"DOC( -- GitLab