From e4791d881acfb30af563a4e7258cd6825e4cb2de Mon Sep 17 00:00:00 2001 From: xiongkun Date: Thu, 25 Nov 2021 11:00:00 +0800 Subject: [PATCH] Fix test rnn memory helper op (#37474) * clear LoDTensorArray * fix bugs * fix * fix gpu --- .../fluid/framework/new_executor/interpretercore.cc | 4 +--- .../interpretercore_garbage_collector.cc | 1 + .../framework/new_executor/interpretercore_util.cc | 1 + paddle/fluid/operators/rnn_memory_helper_op.cc | 6 +++++- paddle/fluid/operators/sum_op.h | 12 +++++++++--- 5 files changed, 17 insertions(+), 7 deletions(-) diff --git a/paddle/fluid/framework/new_executor/interpretercore.cc b/paddle/fluid/framework/new_executor/interpretercore.cc index 71cd49bd7e..0694854544 100644 --- a/paddle/fluid/framework/new_executor/interpretercore.cc +++ b/paddle/fluid/framework/new_executor/interpretercore.cc @@ -343,7 +343,6 @@ void InterpreterCore::RunInstruction(const Instruction& instr_node) { Scope* local_scope = create_local_scope_ ? global_scope_->GetMutableLocalScope() : global_scope_->GetMutableScope(); - auto op_with_kernel = dynamic_cast(op); { platform::RecordEvent infershape_event("InferShape"); @@ -354,8 +353,7 @@ void InterpreterCore::RunInstruction(const Instruction& instr_node) { if (op_with_kernel != nullptr && FLAGS_new_executor_use_inplace) { // TODO(xiongkun03) Does operator - // base support - // inplace ? + // base support inplace ? for (auto& pair : instr_node.InplaceInfo()) { const auto& in = paddle::framework::details::GetTensorFromVar(pair.first); auto* out = diff --git a/paddle/fluid/framework/new_executor/interpretercore_garbage_collector.cc b/paddle/fluid/framework/new_executor/interpretercore_garbage_collector.cc index f17f64dbca..1255ecfc9a 100644 --- a/paddle/fluid/framework/new_executor/interpretercore_garbage_collector.cc +++ b/paddle/fluid/framework/new_executor/interpretercore_garbage_collector.cc @@ -79,6 +79,7 @@ void InterpreterCoreGarbageCollector::Add(paddle::framework::Variable* var, for (auto& t : *tensor_arr) { Add(t.MoveMemoryHolder(), event, ctx); } + tensor_arr->clear(); } else if (var->IsType>()) { // NOTE(@xiongkun03) conditional_op / while_op will create a STEP_SCOPE // refer to executor.cc to see what old garbage collector does. diff --git a/paddle/fluid/framework/new_executor/interpretercore_util.cc b/paddle/fluid/framework/new_executor/interpretercore_util.cc index 98799e049d..774e4e5c9b 100644 --- a/paddle/fluid/framework/new_executor/interpretercore_util.cc +++ b/paddle/fluid/framework/new_executor/interpretercore_util.cc @@ -411,6 +411,7 @@ void build_op_func_list(const platform::Place& place, for (auto& t : *lod_tensor_arr) { garbages->emplace_back(t.MoveMemoryHolder()); } + lod_tensor_arr->clear(); } else { PADDLE_THROW(platform::errors::Unimplemented( "Type %s of variable %s is not supported eager deletion.", diff --git a/paddle/fluid/operators/rnn_memory_helper_op.cc b/paddle/fluid/operators/rnn_memory_helper_op.cc index 95b23a0b8c..5d6876465c 100644 --- a/paddle/fluid/operators/rnn_memory_helper_op.cc +++ b/paddle/fluid/operators/rnn_memory_helper_op.cc @@ -109,7 +109,11 @@ class RNNMemoryHelperGradOp : public framework::OperatorBase { platform::DeviceContextPool &pool = platform::DeviceContextPool::Instance(); auto &dev_ctx = *pool.Get(dev_place); - if (out_grad_var == nullptr) { + // NOTE(xiongkun03): In standalone executor, after each run, the + // var.tensor.holder will be delete instead of variable. So we need exam the + // IsInitialized(). + if (out_grad_var == nullptr || + !out_grad_var->Get().IsInitialized()) { VLOG(5) << "Using fill constant 0 as starting gradient"; auto in_var_name = Input("X"); auto *in_var = scope.FindVar(in_var_name); diff --git a/paddle/fluid/operators/sum_op.h b/paddle/fluid/operators/sum_op.h index 4c8f7be6ea..61a9c8b115 100644 --- a/paddle/fluid/operators/sum_op.h +++ b/paddle/fluid/operators/sum_op.h @@ -129,6 +129,7 @@ template class SumKernel : public framework::OpKernel { public: void Compute(const framework::ExecutionContext &context) const override { + VLOG(10) << "start sum kernel"; auto in_vars = context.MultiInputVar("X"); size_t in_num = in_vars.size(); auto out_var = context.OutputVar("Out"); @@ -138,7 +139,8 @@ class SumKernel : public framework::OpKernel { if (out_var->IsType()) { auto *out = out_var->GetMutable(); auto *out_ptr = out->mutable_data(context.GetPlace()); - if (in_num >= 1 && in_vars[0]->IsType()) { + if (in_num >= 1 && in_vars[0]->IsType() && + in_vars[0]->Get().IsInitialized()) { auto &in_0_tensor = in_vars[0]->Get(); if (in_0_tensor.numel() > 0) { in_place = (in_0_tensor.data() == out_ptr); @@ -151,7 +153,9 @@ class SumKernel : public framework::OpKernel { int start = in_place ? 1 : 0; if (!in_place) { if ((in_num >= 2) && in_vars[0]->IsType() && - in_vars[1]->IsType()) { + in_vars[1]->IsType() && + in_vars[0]->Get().IsInitialized() && + in_vars[1]->Get().IsInitialized()) { auto &in_0 = in_vars[0]->Get(); auto &in_1 = in_vars[1]->Get(); if (in_0.numel() && in_1.numel()) { @@ -162,6 +166,7 @@ class SumKernel : public framework::OpKernel { } } if (start != 2) { + VLOG(10) << "Fill with constant = 0 in sum kernel."; math::SetConstant constant_functor; constant_functor(context.template device_context(), out, static_cast(0)); @@ -173,7 +178,7 @@ class SumKernel : public framework::OpKernel { for (size_t i = start; i < in_num; i++) { if (in_vars[i]->IsType()) { auto &in_t = in_vars[i]->Get(); - if (in_t.numel() == 0) { + if (!in_t.IsInitialized() || in_t.numel() == 0) { continue; } auto in = EigenVector::Flatten(in_t); @@ -200,6 +205,7 @@ class SumKernel : public framework::OpKernel { "unsupport type: %s.", framework::ToTypeName(out_var->Type()))); } + VLOG(10) << "end sum kernel"; } }; } // namespace operators -- GitLab