diff --git a/paddle/fluid/operators/recurrent_op.cc b/paddle/fluid/operators/recurrent_op.cc index ac432f4dd03cb38c9a56e7a0fb67f7aa44039245..b3bb1abf4da6fcd8f591971d56232490b8519300 100644 --- a/paddle/fluid/operators/recurrent_op.cc +++ b/paddle/fluid/operators/recurrent_op.cc @@ -272,6 +272,9 @@ class RecurrentOp : public RecurrentBase { auto *block = Attr(kStepBlock); auto *program = block->Program(); + auto ctx = executor.Prepare( + *program, block->ID(), std::vector() /*skip_ref_cnt_vars*/, + true /*force_disable_gc*/); for (size_t i = 0; i < seq_len; ++i) { size_t seq_offset = reverse ? seq_len - i - 1 : i; @@ -305,10 +308,9 @@ class RecurrentOp : public RecurrentBase { } // Every inputs are linked now, execute! - executor.Run(*program, &cur_scope, block->ID(), - false /*create_local_scope*/, true /*create_vars*/, - std::vector() /*skip_ref_cnt_vars*/, - true /*force_disable_gc*/); + executor.RunPreparedContext(ctx.get(), &cur_scope, + false /*create_local_scope*/, + true /*create_vars*/, true /* keep_kids */); // Copy inside::output -> outside::output // outside::output[seq_offset: seq_offset + 1] = inside::output @@ -366,6 +368,9 @@ class RecurrentGradOp : public RecurrentBase { framework::Executor executor(place); auto *block = Attr(kStepBlock); auto *program = block->Program(); + auto ctx = executor.Prepare( + *program, block->ID(), std::vector() /*skip_ref_cnt_vars*/, + true /*force_disable_gc*/); for (size_t step_id = 0; step_id < seq_len; ++step_id) { size_t seq_offset = reverse ? step_id : seq_len - step_id - 1; @@ -423,10 +428,9 @@ class RecurrentGradOp : public RecurrentBase { VLOG(5) << "Recurrent memory linking finished "; // Run step block with cur_scope - executor.Run(*program, &cur_scope, block->ID(), - false /*create_local_scope*/, true /*create_vars*/, - std::vector() /*skip_ref_cnt_vars*/, - true /*force_disable_gc*/); + executor.RunPreparedContext(ctx.get(), &cur_scope, + false /*create_local_scope*/, + true /*create_vars*/, true /* keep_kids */); VLOG(5) << "executor.Run finished ";