interpretercore.cc 48.4 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13
// Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
//     http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
14

15
#include "paddle/fluid/framework/new_executor/interpretercore.h"
16

17
#include <unordered_set>
18

19 20
#include "gflags/gflags.h"

21
#include "paddle/fluid/framework/details/nan_inf_utils.h"
22
#include "paddle/fluid/framework/details/share_tensor_buffer_functor.h"
23
#include "paddle/fluid/framework/new_executor/interpreter/interpreter_util.h"
24
#include "paddle/fluid/framework/operator.h"
25
#include "paddle/fluid/platform/device/gpu/gpu_info.h"
L
liutiexing 已提交
26 27
#include "paddle/fluid/platform/os_info.h"
#include "paddle/fluid/platform/profiler/event_tracing.h"
28
#include "paddle/fluid/platform/profiler/supplement_tracing.h"
29
#include "paddle/phi/common/place.h"
30
#include "paddle/phi/core/kernel_context.h"
L
Leo Chen 已提交
31 32 33
#ifdef PADDLE_WITH_MKLDNN
#include "paddle/fluid/platform/mkldnn_helper.h"
#endif
L
Leo Chen 已提交
34
#include "paddle/phi/backends/device_manager.h"
35

36
PADDLE_DEFINE_EXPORTED_bool(new_executor_use_inplace,
37
                            false,
38
                            "Use inplace in new executor");
39 40
PADDLE_DEFINE_EXPORTED_bool(new_executor_use_local_scope,
                            true,
41 42
                            "Use local_scope in new executor(especially used "
                            "in UT), can turn off for better performance");
43 44 45
PADDLE_DEFINE_EXPORTED_bool(control_flow_use_new_executor,
                            false,
                            "Use new executor in control flow op");
46

47
DECLARE_bool(check_nan_inf);
48
DECLARE_bool(benchmark);
49

50
constexpr const char* kExceptionCaught = "ExceptionCaught";
51
constexpr const char* kTaskCompletion = "TaskCompletion";
52

53 54 55
namespace paddle {
namespace framework {

L
Leo Chen 已提交
56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99
inline void SetDeviceId(const platform::Place& place) {
  // TODO(zhiqiu): reduce the cost
  if (platform::is_gpu_place(place)) {
#if !defined(PADDLE_WITH_CUDA) && !defined(PADDLE_WITH_HIP)
    PADDLE_THROW(platform::errors::Unavailable(
        "Cannot run operator on place %s, please recompile paddle or "
        "reinstall Paddle with CUDA support.",
        place));
#else
    auto dev_id = place.device;
    platform::SetDeviceId(dev_id);
#endif
  } else if (platform::is_xpu_place(place)) {
#ifndef PADDLE_WITH_XPU
    PADDLE_THROW(platform::errors::Unavailable(
        "Cannot run operator on place %s, please recompile paddle or "
        "reinstall Paddle with XPU support.",
        place));
#else
    auto dev_id = place.device;
    platform::SetXPUDeviceId(dev_id);
#endif
  } else if (platform::is_npu_place(place)) {
#ifndef PADDLE_WITH_ASCEND_CL
    PADDLE_THROW(platform::errors::Unavailable(
        "Cannot run operator on place %s, please recompile paddle or "
        "reinstall Paddle with NPU support.",
        place));
#else
    auto dev_id = place.device;
    platform::SetNPUDeviceId(dev_id);
#endif
  } else if (platform::is_custom_place(place)) {
#ifndef PADDLE_WITH_CUSTOM_DEVICE
    PADDLE_THROW(platform::errors::Unavailable(
        "Cannot run operator on place %s, please recompile paddle or "
        "reinstall Paddle with CustomDevice support.",
        place));
#else
    phi::DeviceManager::SetDevice(place);
#endif
  }
}

100
// TODO(Ruibiao): Pass skip_gc_vars, used_for_jit, and other config messages by
101
// constructing an interpreter::ExecutionConfig
102 103
InterpreterCore::InterpreterCore(const platform::Place& place,
                                 const BlockDesc& block,
104
                                 const std::set<std::string>& skip_gc_vars,
105
                                 framework::Scope* scope,
106 107
                                 bool used_for_jit,
                                 bool used_for_control_flow_op)
W
wanghuancoder 已提交
108
    : place_(place),
109
      block_(block),
110
      execution_config_(place, block.OpSize()),
R
Ruibiao Chen 已提交
111 112
      stream_analyzer_(place),
      var_scope_(scope) {
L
Leo Chen 已提交
113
  VLOG(4) << "InterpreterCore(): " << this << " on " << place_;
114

L
liutiexing 已提交
115
  exception_notifier_ = main_thread_blocker_.RegisterEvent(kExceptionCaught);
116
  completion_notifier_ = main_thread_blocker_.RegisterEvent(kTaskCompletion);
117

118
  execution_config_.used_for_jit = used_for_jit;
119 120 121 122
  execution_config_.used_for_control_flow_op = used_for_control_flow_op;
  execution_config_.create_local_scope = !used_for_jit &&
                                         FLAGS_new_executor_use_local_scope &&
                                         !used_for_control_flow_op;
123 124
  execution_config_.skip_gc_vars = skip_gc_vars;
  execution_config_.Log(/*log_level=*/8);
125

126
  if (execution_config_.create_local_scope) {
127
    auto local_scope = &var_scope_.GetMutableScope()->NewScope();
128 129
    local_scope_ = local_scope;
  }
130
  var_scope_.SetLocalScope(local_scope_);
W
wanghuancoder 已提交
131 132
}

133 134 135
InterpreterCore::~InterpreterCore() {
  // cancle gc's thread
  gc_.reset(nullptr);
136 137
  async_work_queue_.reset();
  VLOG(4) << "~InterpreterCore(): " << this << " on " << place_;
L
Leo Chen 已提交
138 139 140 141 142 143

#ifdef PADDLE_WITH_MKLDNN
  // Clear mkl-dnn cache,
  // this is needed to have mkl-dnn unit tests working
  platform::ClearMKLDNNCache(place_, this);
#endif
144 145
}

146 147
interpreter::CostInfo InterpreterCore::DryRun(
    const std::vector<std::string>& feed_names,
148
    const std::vector<phi::DenseTensor>& feed_tensors) {
L
Leo Chen 已提交
149 150
  SetDeviceId(place_);

151 152 153 154 155 156 157 158 159 160
  Prepare(feed_names, feed_tensors, true);
  interpreter::CostInfo cost_info;
  {
    interpreter::ProfilerGuard(place_, &cost_info);

    // For the program that only run once, it is no need to
    // create work_queue, so the async_work_queue_ is created
    // until the second step run.
    async_work_queue_ = GetWorkQueue();

L
Leo Chen 已提交
161 162 163 164 165
    // lazy initialization of gc, do not create gc is the program only run once
    if (!gc_) {
      gc_ = CreateInterpreterCoreGarbageCollector(place_, vec_instruction_);
    }

166 167 168 169
    ExecuteInstructionList(vec_instruction_);
    platform::DeviceContextPool::Instance().Get(place_)->Wait();
  }

L
Leo Chen 已提交
170
  if (HasLocalScope()) {
171 172 173 174
    ClearLoDTensorArrayInLocalScope();
  }

  return cost_info;
175 176
}

W
wanghuancoder 已提交
177
paddle::framework::FetchList InterpreterCore::Run(
178
    const std::vector<std::string>& feed_names,
179
    const std::vector<phi::DenseTensor>& feed_tensors) {
L
Leo Chen 已提交
180
  SetDeviceId(place_);
181

L
Leo Chen 已提交
182 183 184
#ifdef PADDLE_WITH_MKLDNN
  platform::AttachPointerHashToMKLDNNKey(this, place_);
#endif
185

186 187
  bool is_build = is_build_;
  Prepare(feed_names, feed_tensors, is_build);
188

189
  if (is_build) {
190 191 192
    // For the program that only run once, it is no need to
    // create work_queue, so the async_work_queue_ is created
    // until the second step run.
193
    async_work_queue_ = GetWorkQueue();
L
Leo Chen 已提交
194 195 196 197 198 199

    // lazy initialization of gc, do not create gc is the program only run once
    if (!gc_) {
      gc_ = CreateInterpreterCoreGarbageCollector(place_, vec_instruction_);
    }

200 201 202 203 204 205
    if (execution_config_.used_for_jit && (sync_op_num_ == 0)) {
      VLOG(4) << "Tracing Instruction List";
      TraceInstructionList(vec_instruction_);
    } else {
      ExecuteInstructionList(vec_instruction_);
    }
206
#ifdef PADDLE_WITH_ASCEND_CL
207 208 209 210 211 212 213 214
    if (platform::is_npu_place(place_)) {
      platform::DeviceContextPool::Instance().Get(place_)->Wait();
    }
#endif
#ifdef PADDLE_WITH_CUSTOM_DEVICE
    if (platform::is_custom_place(place_)) {
      platform::DeviceContextPool::Instance().Get(place_)->Wait();
    }
215
#endif
216
  }
L
Leo Chen 已提交
217
  if (HasLocalScope()) {
218 219 220
    ClearLoDTensorArrayInLocalScope();
  }

W
wanghuancoder 已提交
221
  // return Fetch Tensors
222 223 224 225 226 227
  auto* fetch_var = local_scope_->FindVar(interpreter::kFetchVarName);
  if (fetch_var) {
    return std::move(*fetch_var->GetMutable<framework::FetchList>());
  } else {
    return {};
  }
228 229
}

230
paddle::framework::FetchList InterpreterCore::Run(
231
    const std::vector<std::string>& feed_names, bool need_fetch) {
L
Leo Chen 已提交
232
  SetDeviceId(place_);
233

L
Leo Chen 已提交
234 235 236
#ifdef PADDLE_WITH_MKLDNN
  platform::AttachPointerHashToMKLDNNKey(this, place_);
#endif
237

238
  if (!is_build_) {
239
    LOG_FIRST_N(INFO, 1) << "New Executor is Running.";
L
Leo Chen 已提交
240 241
    paddle::framework::interpreter::BuildVariableScope(
        block_, &var_scope_, HasLocalScope());
242

243
    std::vector<paddle::framework::OpFuncNode> op_func_nodes;
L
Leo Chen 已提交
244
    paddle::framework::interpreter::BuildOpFuncList(
245 246 247 248 249
        place_,
        block_,
        execution_config_.skip_gc_vars,
        &op_func_nodes,
        &var_scope_,
250 251
        execution_config_,
        HasLocalScope());
252
    SetFeedVarsInplaceSkip(feed_names);
253 254
    // convert vec func_list to graph
    Convert(&op_func_nodes);
255
    is_build_ = true;
256
    UpdateSyncOpNum();
257
  } else {
258 259 260
    // For the program that only run once, it is no need to
    // create work_queue, so the async_work_queue_ is created
    // until the second step run.
261
    async_work_queue_ = GetWorkQueue();
262

L
Leo Chen 已提交
263 264 265 266 267
    // lazy initialization of gc, do not create gc is the program only run once
    if (!gc_) {
      gc_ = CreateInterpreterCoreGarbageCollector(place_, vec_instruction_);
    }

268 269 270 271 272 273
    if (execution_config_.used_for_jit && (sync_op_num_ == 0)) {
      VLOG(4) << "Tracing Instruction List";
      TraceInstructionList(vec_instruction_);
    } else {
      ExecuteInstructionList(vec_instruction_);
    }
274
#ifdef PADDLE_WITH_ASCEND_CL
275 276 277 278 279 280 281 282
    if (platform::is_npu_place(place_)) {
      platform::DeviceContextPool::Instance().Get(place_)->Wait();
    }
#endif
#ifdef PADDLE_WITH_CUSTOM_DEVICE
    if (platform::is_custom_place(place_)) {
      platform::DeviceContextPool::Instance().Get(place_)->Wait();
    }
283
#endif
284 285
  }

L
Leo Chen 已提交
286
  if (HasLocalScope()) {
287 288
    ClearLoDTensorArrayInLocalScope();
  }
L
Leo Chen 已提交
289

290
  // return Fetch Tensors
L
Leo Chen 已提交
291 292
  Scope* inner_scope =
      HasLocalScope() ? local_scope_ : var_scope_.GetMutableScope();
293
  auto* fetch_var = inner_scope->FindVar(interpreter::kFetchVarName);
294
  if (fetch_var && need_fetch) {
295 296 297 298
    return std::move(*fetch_var->GetMutable<framework::FetchList>());
  } else {
    return {};
  }
299 300
}

301 302 303 304
void InterpreterCore::SetCopyProgram(std::shared_ptr<ProgramDesc> prog) {
  copy_program_ = prog;
}

305 306
void InterpreterCore::SetSkipGcVars(const std::set<std::string>& skip_gc_vars) {
  PADDLE_ENFORCE_EQ(
307
      execution_config_.skip_gc_vars.empty(),
308 309
      true,
      platform::errors::PreconditionNotMet(
310 311
          "execution_config_.skip_gc_vars can only be initialized once, now "
          "execution_config_.skip_gc_vars is "
312
          "not empty, do not call SetSkipGcVars method repeatedly."));
313
  execution_config_.skip_gc_vars = skip_gc_vars;
314 315
}

316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331
void InterpreterCore::SetJitInputVars(
    const std::set<std::string>& jit_input_vars) {
  PADDLE_ENFORCE_EQ(
      execution_config_.jit_input_vars.empty(),
      true,
      platform::errors::PreconditionNotMet(
          "execution_config_.jit_input_vars can only be initialized once, now "
          "execution_config_.jit_input_vars is "
          "not empty, do not call SetJitInputVars method repeatedly."));
  execution_config_.jit_input_vars = jit_input_vars;
}

const std::set<std::string>& InterpreterCore::JitInputVars() const {
  return execution_config_.jit_input_vars;
}

332 333 334 335 336 337 338 339
const VariableScope* InterpreterCore::GetVariableScope() const {
  return &var_scope_;
}

void InterpreterCore::reset_scope(Scope* new_scope) {
  var_scope_.SetScope(new_scope);
  auto& var_list = var_scope_.MutableVarList();
  for (size_t i = 0; i < var_list.size(); i++) {
340 341
    const auto& var_name = var_scope_.GetNameById(i);
    var_list[i] = new_scope->FindVar(var_name);
342
  }
343 344 345 346 347 348 349 350 351
  // The index should assured valid, cause the InterpreterCore may not be fully
  // built, but was still cached and used. For example, see unit test
  // `test_assert.py`, it may exit before `InterpreterCore::Convert`, but still
  // was cached and used by later tests.
  for (size_t i = 0; i < std::min(refs_.size(), var_list.size()); i++) {
    refs_[i]->ResetVariable(var_list[i]);
  }

  for (size_t i = 0; i < vec_instruction_.size(); i++) {
352 353 354 355
    BuildAndCacheInstructionCtx(&vec_instruction_[i]);
  }
}

356 357
void InterpreterCore::ShareWorkQueueFrom(std::shared_ptr<InterpreterCore> src) {
  async_work_queue_ = src->GetWorkQueue();
358
  VLOG(8) << "Share AsyncWorkQueue from InterpreterCore(" << src.get()
359 360 361
          << ") to InterpreterCore(" << this << ")";
}

362 363
bool InterpreterCore::BuildInplaceCheckVarIsOnlyInput(
    const std::vector<std::vector<size_t>>& input_var2op, size_t var_index) {
364
  if (!var_scope_.VarDesc(var_index)) {
365
    return input_var2op.at(var_index).size() == 1;
366 367
  } else {
    int is_input_cnt = 0;
368
    for (auto inst_id : input_var2op.at(var_index)) {
369 370
      OpInOutInfo info;
      info.Build(vec_instruction_.at(inst_id).OpBase());
371
      if (info.IsInArgBufferNeeded(var_scope_.VarDesc(var_index)->Name())) {
372 373 374 375 376 377 378 379 380
        is_input_cnt++;
      }
    }
    return is_input_cnt == 1;
  }
}

std::shared_ptr<interpreter::AsyncWorkQueue> InterpreterCore::GetWorkQueue() {
  if (async_work_queue_ == nullptr) {
381 382 383 384
    async_work_queue_ = std::make_shared<interpreter::AsyncWorkQueue>(
        execution_config_.host_num_threads,
        execution_config_.deivce_num_threads,
        &main_thread_blocker_);
385 386 387 388 389
  }
  return async_work_queue_;
}

void InterpreterCore::BuildAndCacheInstructionCtx(Instruction* instr_node) {
L
Leo Chen 已提交
390 391
  Scope* inner_scope =
      HasLocalScope() ? local_scope_ : var_scope_.GetMutableScope();
392 393 394 395 396 397
  VariableValueMap ins_map;
  for (auto& var_name_item : instr_node->Inputs()) {
    std::vector<Variable*> input_vars;

    input_vars.reserve(var_name_item.second.size());
    for (auto& id : var_name_item.second) {
398
      input_vars.emplace_back(inner_scope->FindVar(var_scope_.GetNameById(id)));
399 400 401 402 403 404 405 406 407 408
    }
    ins_map.emplace(var_name_item.first, std::move(input_vars));
  }

  VariableValueMap outs_map;
  for (auto& var_name_item : instr_node->Outputs()) {
    std::vector<Variable*> out_vars;

    out_vars.reserve(var_name_item.second.size());
    for (auto& id : var_name_item.second) {
409
      out_vars.emplace_back(inner_scope->FindVar(var_scope_.GetNameById(id)));
410 411 412 413 414 415 416
    }
    outs_map.emplace(var_name_item.first, std::move(out_vars));
  }

  // set runtime_ctx and infershape_ctx_
  if (instr_node->OpBase()->Type() == "cinn_launch") {  // OP use scope in
                                                        // kernel
L
Leo Chen 已提交
417 418
    Scope* local_scope = HasLocalScope() ? var_scope_.GetMutableLocalScope()
                                         : var_scope_.GetMutableScope();
419 420 421 422 423 424 425
    instr_node->ResetContextWithScope(ins_map, outs_map, *local_scope);
  } else {
    instr_node->ResetContext(ins_map, outs_map);
  }
}

void InterpreterCore::BuildInplace() {
426 427 428 429
  // NOTE(Ruibiao): coalesce_tensor_op outputs a FusedOutput phi::DenseTensor
  // and a list of Output Tensors which are sliced from the FusedOutput. These
  // outputs sholud not be the outvar of the in-place var-pair since memory
  // reuse between FusedOutput and Output Tensors is assumed. For the following
430 431 432 433 434 435 436 437 438 439 440
  // example:
  // fused_var, var1, var2, var3 = coalesce_tensor(var1, var2, var3)
  // var1 = sum(var4, var5)
  // ...
  //
  // After running coalesce_tensor_op, var1 is assumed to share the buffer
  // slices from fused_var. However, if sum_op is in-place, then var1 would
  // re-share the buffer with var4 instead of fused_var.
  std::set<std::string> skip_inplace_outvars;
  for (Instruction& instr : vec_instruction_) {
    OperatorBase* op = instr.OpBase();
441
    if (op->Type() == kCoalesceTensor) {
442 443 444 445 446 447
      const std::vector<std::string>& outputs =
          op->OutputVars(/*has_intermediate=*/false);
      skip_inplace_outvars.insert(outputs.begin(), outputs.end());
    }
  }

L
Leo Chen 已提交
448 449
  Scope* local_scope = HasLocalScope() ? var_scope_.GetMutableLocalScope()
                                       : var_scope_.GetMutableScope();
450 451 452 453 454 455 456 457 458 459
  std::vector<std::vector<size_t>> input_var2op(var_scope_.VarSize());
  for (Instruction& instr : vec_instruction_) {
    for (auto& item : instr.Inputs()) {
      for (int var_id : item.second) {
        if (var_id != kEmptyVarIndex) {
          input_var2op.at(var_id).push_back(instr.Id());
        }
      }
    }
  }
460

461 462 463 464 465 466 467 468 469 470 471 472 473 474 475
  for (size_t i = 0; i < vec_instruction_.size(); ++i) {
    auto& instr = vec_instruction_[i];
    auto* op_base = instr.OpBase();
    if (!op_base->Info().infer_inplace_) {
      continue;
    }

    auto in_to_outs = op_base->Info().infer_inplace_(
        platform::is_gpu_place(instr.DeviceContext().GetPlace()));

    auto& inputs = instr.Inputs();
    auto& outputs = instr.Outputs();
    for (auto& pair : in_to_outs) {
      auto iter = inputs.find(pair.first);
      if (iter != inputs.end() && !iter->second.empty()) {
476
        auto in_var_desc = var_scope_.VarDesc(iter->second[0]);
477 478 479
        if (in_var_desc && in_var_desc->Persistable()) {
          continue;
        }
480
        if (var_scope_.GetVarSikpInplace(iter->second[0])) {
481 482
          continue;
        }
483
        if (BuildInplaceCheckVarIsOnlyInput(input_var2op, iter->second[0])) {
484 485
          auto iterout = outputs.find(pair.second);
          if (iterout != outputs.end() && !iterout->second.empty()) {
486 487 488 489
            const std::string& invar_name =
                var_scope_.GetNameById(iter->second[0]);
            const std::string& outvar_name =
                var_scope_.GetNameById(iterout->second[0]);
490 491
            auto invar = local_scope->FindVar(invar_name);
            auto outvar = local_scope->FindVar(outvar_name);
492

493 494
            if (invar && outvar && invar->IsType<phi::DenseTensor>() &&
                outvar->IsType<phi::DenseTensor>() &&
495 496
                skip_inplace_outvars.find(outvar_name) ==
                    skip_inplace_outvars.end()) {
497
              instr.AddInplace(invar, outvar);
498 499
              VLOG(3) << "inplace " << op_base->Type() << " " << invar_name
                      << " -> " << outvar_name;
500 501 502 503
            }
          }
        }
      }
504 505 506 507
    }
  }
}

X
xiongkun 已提交
508
void InterpreterCore::BuildOperatorDependences() {
R
Ruibiao Chen 已提交
509 510 511 512
  // analysis the dependences between ops, add next_instr_list to each instr,
  // and set the dependecy_count_
  size_t instr_num = vec_instruction_.size();
  dependecy_count_.resize(instr_num);
513
  auto downstream_map = dependency_builder_.Build(vec_instruction_);
X
xiongkun 已提交
514

R
Ruibiao Chen 已提交
515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543
  for (size_t instr_id = 0; instr_id < instr_num; ++instr_id) {
    Instruction& cur_instr = vec_instruction_[instr_id];
    const std::set<size_t>& next_instr_ids = downstream_map[instr_id];

    if (cur_instr.KernelType() == OpFuncType::kGpuAsync) {
      for (size_t next_instr_id : next_instr_ids) {
        if (vec_instruction_[next_instr_id].KernelType() ==
            OpFuncType::kGpuAsync) {
          cur_instr.AddNextInstrInSameThread(next_instr_id);
        } else {
          cur_instr.AddNextInstrInDifferentThread(next_instr_id);
        }
      }
    } else {
      bool has_instr_in_same_thread = false;
      for (size_t next_instr_id : next_instr_ids) {
        if (!has_instr_in_same_thread &&
            vec_instruction_[next_instr_id].KernelType() !=
                OpFuncType::kGpuAsync) {
          cur_instr.AddNextInstrInSameThread(next_instr_id);
          has_instr_in_same_thread = true;
        } else {
          cur_instr.AddNextInstrInDifferentThread(next_instr_id);
        }
      }
    }

    for (size_t next_instr_id : next_instr_ids) {
      ++dependecy_count_[next_instr_id];
X
xiongkun 已提交
544 545 546 547
    }
  }
}

548 549 550
// At the end of each step, the holder of phi::DenseTensor in LoDTensorArray is
// null. Clear these Tensors and leave LoDTensorArray empty, otherwise an
// exception will occur in the next step
551 552 553 554 555 556 557 558 559 560
void InterpreterCore::ClearLoDTensorArrayInLocalScope() {
  auto vars = local_scope_->LocalVars();
  for (auto var : vars) {
    if (var->IsType<LoDTensorArray>()) {
      auto* lod_tensor_arr = var->GetMutable<LoDTensorArray>();
      lod_tensor_arr->clear();
    }
  }
}

L
Leo Chen 已提交
561 562
void InterpreterCore::Convert(
    std::vector<paddle::framework::OpFuncNode>* op_func_nodes) {
563
  auto& vec_meta_info = var_scope_.MutableVecMetaInfo();
L
Leo Chen 已提交
564 565
  auto nodes = *op_func_nodes;
  auto op_nums = nodes.size();
566 567
  vec_instruction_.reserve(op_nums);
  for (size_t op_idx = 0; op_idx < op_nums; ++op_idx) {
L
Leo Chen 已提交
568
    auto& op_func_node = nodes[op_idx];
569
    auto* dev_ctx_ = stream_analyzer_.ParseDeviceContext(op_func_node);
570 571 572 573 574 575
    Priority priority =
        interpreter::IsCommunicationOp(op_func_node.operator_base_->Type())
            ? Priority::kLowest
            : Priority::kNormal;
    vec_instruction_.emplace_back(
        op_idx, std::move(op_func_node), *dev_ctx_, priority);
576
  }
577

578
  BuildOperatorDependences();
579

580 581 582 583 584 585 586
  // NOTE(Ruibiao): For cross-step stream synchronization, an event may be
  // recorded in the first step and waited in the second step. So, in the first
  // step, the WaitEvent may be called without RecordEvent. Considering that
  // before the first call to RecordEvent, an Event represents an empty set of
  // work and WaitEvent always return succeed immediately, we omit the
  // prelude-record for the first step here.
  stream_analyzer_.ConstructEvents(&vec_instruction_);
R
Ruibiao Chen 已提交
587

588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611
  // add event for the input var of jit program, since there are async copied
  // from gpu_pinned place to gpu place on compute stream.
  for (size_t i = 0; i < dependecy_count_.size(); ++i) {
    if (dependecy_count_[i] == 0) {
      auto& inst = vec_instruction_[i];
      if (inst.OpBase()->Type() == interpreter::kMemcpyD2H &&
          platform::is_gpu_place(place_)) {
        for (auto& item : inst.Inputs()) {
          for (auto var_id : item.second) {
            auto name = var_scope_.GetNameById(var_id);
            if (JitInputVars().count(name)) {
              auto device_event = std::make_shared<platform::DeviceEvent>(
                  place_, platform::GenerateDeviceEventFlag());
              VLOG(4) << "Add input event for input: " << name << " of "
                      << inst.OpBase()->Type();
              inst.AddEventToWait(
                  i, device_event, stream_analyzer_.GetWaiterType(inst));
            }
          }
        }
      }
    }
  }

612 613
  // calculate last_live_ops_
  for (size_t op_idx = 0; op_idx < op_nums; ++op_idx) {
614
    Instruction& instr = vec_instruction_[op_idx];
615
    OpInOutInfo info;
616 617 618 619 620 621 622 623 624 625 626
    info.Build(instr.OpBase());

    std::set<size_t> gc_check_vars;

    const std::map<std::string, std::vector<int>>& ins = instr.Inputs();
    const std::map<std::string, std::vector<int>>& outs = instr.Outputs();
    std::multimap<std::string, std::vector<int>> ins_and_outs{ins.begin(),
                                                              ins.end()};
    ins_and_outs.insert(outs.begin(), outs.end());

    for (auto& item : ins_and_outs) {
627
      for (auto id : item.second) {
W
wanghuancoder 已提交
628 629 630
        if (id == kEmptyVarIndex) {
          continue;
        }
631
        auto* var_desc = var_scope_.VarDesc(id);
632 633 634 635
        // skip no_need_buffer input vars
        if (var_desc && ins.count(item.first) &&
            !info.IsInArgBufferNeeded(var_desc->Name())) {
          continue;
636 637 638 639 640 641 642 643 644 645 646 647 648 649
        }
        // skip when this var is not in block and not a data_transferred var,
        // which means this var is managed by other block
        const auto& var_name = var_scope_.GetNameById(id);
        bool not_owned = !block_.HasVar(var_name);
        const auto& transferred_vars = var_scope_.DataTransferAddedVars();
        bool not_transferred =
            std::all_of(transferred_vars.begin(),
                        transferred_vars.end(),
                        [&](const std::pair<std::string, int>& elem) {
                          return elem.first != var_name;
                        });
        if (not_owned && not_transferred) {
          VLOG(10) << "[gc_check_inputs] skip gc: " << var_name;
650
          continue;
W
wanghuancoder 已提交
651
        }
652
        gc_check_vars.insert(id);
653 654
      }
    }
655 656

    for (auto var_id : gc_check_vars) {
L
Leo Chen 已提交
657 658
      Scope* inner_scope =
          HasLocalScope() ? local_scope_ : var_scope_.GetMutableScope();
659
      paddle::framework::Variable* var =
660
          inner_scope->FindVar(var_scope_.GetNameById(var_id));
661
      if (var->IsType<phi::DenseTensor>() || var->IsType<phi::SelectedRows>() ||
662
          var->IsType<LoDTensorArray>()) {
663
        last_live_ops_[var_id].insert(op_idx);
664
      } else {
665 666
        VLOG(4) << "not clear " << var_scope_.GetNameById(var_id) << " after "
                << instr.OpBase()->Type() << " because its type is "
667 668
                << framework::ToTypeName(var->Type());
      }
669 670
    }
  }
671

672
  // clear the last_live_ops list for all vars in skip_gc_vars
673
  for (const std::string& skip_gc_var : execution_config_.skip_gc_vars) {
674
    int var_id = var_scope_.GetIdByName(skip_gc_var);
675 676 677 678 679 680
    if (var_id != -1) {
      last_live_ops_[var_id].clear();
      VLOG(8) << "Skip gc for var: " << skip_gc_var;
    }
  }

681 682 683 684 685 686 687 688 689 690 691 692 693
  // shrink, find the downstream op that has no other op in the
  // downstream list happens before it
  // For example,
  // b = op1(a)
  // c = op2(a, b)
  // in this case, a is the input of op1 and op2, we only need to check
  // a after op2, because op2 always uses a after op1.
  for (size_t i = 0; i < last_live_ops_.size(); ++i) {
    std::set<size_t> minumum_last_live_ops;
    for (size_t item : last_live_ops_[i]) {
      bool not_before_any = true;
      // find the op that is not executed before any
      for (size_t other_item : last_live_ops_[i]) {
694
        if (dependency_builder_.OpHappensBefore(item, other_item)) {
695 696 697 698 699 700 701 702
          VLOG(8) << "happens_before: " << item << "->" << other_item
                  << ", so skip " << item;
          not_before_any = false;
          break;
        }
      }
      if (not_before_any) {
        VLOG(8) << "last live op of var " << i << " "
703
                << var_scope_.GetNameById(i) << " : " << item << " "
704 705 706 707 708 709 710 711
                << vec_instruction_[item].OpBase()->Type();
        minumum_last_live_ops.insert(item);
        vec_instruction_[item].AddGCCheckVar(i);
      }
    }
    last_live_ops_[i] = minumum_last_live_ops;
    vec_meta_info[i].var_ref_count_ = last_live_ops_[i].size();
  }
712 713

  for (size_t i = 0; i < vec_instruction_.size(); ++i) {
714
    BuildAndCacheInstructionCtx(&vec_instruction_[i]);
715
  }
W
wanghuancoder 已提交
716

717
  BuildSkipShareLoDInfo();
L
Leo Chen 已提交
718

719
  bool inplaced = false;
R
Ruibiao Chen 已提交
720
  for (const Instruction& inst : vec_instruction_) {
721 722 723 724 725 726
    if (inst.OpBase()->Type() == "share_buffer" ||
        inst.OpBase()->Type() == "share_data") {
      VLOG(4) << "Already inplaced, skip inplace now.";
      inplaced = true;
    }
  }
727

728
  if (FLAGS_new_executor_use_inplace && !inplaced) {
729 730 731
    BuildInplace();
  }

732 733 734 735 736 737 738
  for (auto& dep : dependecy_count_) {
    deps_.emplace_back(std::make_shared<interpreter::OpDepInfo>(dep));
  }
  for (size_t i = 0; i < vec_meta_info.size(); ++i) {
    refs_.emplace_back(std::make_shared<interpreter::VarRefInfo>(
        vec_meta_info[i].var_ref_count_, var_scope_.VarRef(i)));
  }
739 740

  AnalyseExecuteOrderForTrace();
741 742
}

743 744 745
void InterpreterCore::BuildSkipShareLoDInfo() {
  for (size_t i = 0; i < vec_instruction_.size(); ++i) {
    bool can_skip_lod = true;
746
    for (auto& input : vec_instruction_[i].InnerRuntimeContext()->inputs) {
747
      for (auto& var : input.second) {
748 749
        if (var->IsType<phi::DenseTensor>()) {
          if (var->Get<phi::DenseTensor>().lod().size() != 0) {
750 751 752 753 754 755 756 757 758
            can_skip_lod = false;
            break;
          }
        } else {
          can_skip_lod = false;
          break;
        }
      }
    }
759
    vec_instruction_[i].InnerInferShapeContext()->SetSkipLoD(can_skip_lod);
760 761 762
  }
}

763
void InterpreterCore::RunOperator(const Instruction& instr_node) {
764 765
  auto* op = instr_node.OpBase();
  auto place = instr_node.DeviceContext().GetPlace();
L
Leo Chen 已提交
766 767
  Scope* local_scope = HasLocalScope() ? var_scope_.GetMutableLocalScope()
                                       : var_scope_.GetMutableScope();
768
  VLOG(4) << "Start run " << place << " " << op->DebugStringEx(local_scope);
L
Leo Chen 已提交
769 770 771

  SetDeviceId(place);

772
#ifdef PADDLE_WITH_ASCEND_CL
773
  if (platform::is_npu_place(place)) {
774 775 776
    // NOTE(wangxi): nan/inf cannot be detected on NPU by checking the
    // variable values, but only through special `float_status` to checks
    // whether the operation is overflow. More about `float_status`, see:
777 778 779 780
    // https://gitee.com/ascend/modelzoo/issues/I3NF8V?from=project-issue
    if (FLAGS_check_nan_inf) {
      framework::details::NPUAllocAndClearFloatStatus(*op, *local_scope, place);
    }
781 782 783
  }
#endif

784
  auto op_with_kernel = dynamic_cast<const framework::OperatorWithKernel*>(op);
785
  {
786
    // If it is OperatorBase, InferShape do nothing.
787 788
    if (op_with_kernel != nullptr) {
      platform::RecordEvent infershape_event(
789 790 791
          "infer_shape",
          platform::TracerEventType::OperatorInner,
          1,
792
          platform::EventRole::kInnerOp);
793 794 795 796 797 798 799

      // see OperatorWithKernel::RunImpl in operator.cc for why
      if (!(op_with_kernel->HasAttr(kAllKernelsMustComputeRuntimeShape) &&
            op_with_kernel->Attr<bool>(kAllKernelsMustComputeRuntimeShape))) {
        op_with_kernel->Info().infer_shape_(
            instr_node.InnerInferShapeContext().get());
      }
800 801 802 803
      infershape_event.End();
      platform::RecordOpInfoSupplement(op->Type(),
                                       op->Attrs(),
                                       *(instr_node.InnerInferShapeContext()),
804 805
                                       *(instr_node.InnerRuntimeContext()),
                                       op->Id());
806
    }
807
  }
808 809
  if (op_with_kernel != nullptr && FLAGS_new_executor_use_inplace) {
    // TODO(xiongkun03) Does operator base support inplace ?
810
    for (auto& pair : instr_node.InplaceInfo()) {
811 812 813 814 815 816 817 818
      const auto& in = paddle::framework::details::GetTensorFromVar(pair.first);
      auto* out =
          paddle::framework::details::GetMutableTensorFromVar(pair.second);
      if (in.dims() == out->dims()) {
        out->ShareBufferWith(in);
      }
    }
  }
819

820
  {
821
    platform::RecordEvent compute_event(
822 823 824
        "compute",
        platform::TracerEventType::OperatorInner,
        1,
825
        platform::EventRole::kInnerOp);
826 827 828
    if (op_with_kernel == nullptr) {
      instr_node.OpBase()->Run(*local_scope, place_);
    } else {
829 830 831
      // fit for phi
      if (instr_node.PhiKernel() && instr_node.PhiKernel()->IsValid()) {
        VLOG(4) << "Run phi kernel: " << op->Type();
832 833
        VLOG(4) << instr_node.InnerRuntimeContext().get() << " "
                << &instr_node.DeviceContext();
834
        phi::KernelContext phi_kernel_context;
835
        op_with_kernel->BuildPhiKernelContext(
836
            *instr_node.InnerRuntimeContext().get(),
837
            const_cast<platform::DeviceContext*>(&instr_node.DeviceContext()),
838
            &phi_kernel_context);
839

840
        (*instr_node.PhiKernel())(&phi_kernel_context);
841 842 843 844

      } else {
        instr_node.KernelFunc()(*instr_node.InnerExecutionContext().get());
      }
845
    }
846
  }
847

848
  VLOG(4) << "End run " << place << " " << op->DebugStringEx(local_scope);
849

850
  if (!instr_node.InplaceBackMap().empty()) {
L
liutiexing 已提交
851 852
    platform::RecordEvent inplaceback_event(
        "InplaceVarsBack", platform::TracerEventType::UserDefined, 10);
853 854 855 856
    auto& m = instr_node.InplaceBackMap();
    // NOTE(zhiqiu): same logic as TransferInplaceVarsBack() in operator.cc
    for (auto& p : m) {
      auto* transformed_tensor = GetMutableLoDTensorOrSelectedRowsValueFromVar(
857
          var_scope_.VarRef(p.first));
858
      auto* original_tensor = GetMutableLoDTensorOrSelectedRowsValueFromVar(
859
          var_scope_.VarRef(p.second));
860 861
      original_tensor->ShareDataWith(*transformed_tensor);
      VLOG(4) << "Transfer inplace variable back form "
862 863
              << var_scope_.GetNameById(p.first) << " to "
              << var_scope_.GetNameById(p.second);
864 865 866
    }
  }

867 868 869
  /*For profiling/benchmark only*/
  if (FLAGS_benchmark) {
    instr_node.DeviceContext().Wait();
870 871
#if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP)
    PADDLE_ENFORCE_GPU_SUCCESS(platform::GpuGetLastError());
872 873 874 875 876
    VLOG(4) << "Operator(" << op->Type()
            << "): context wait and get last error";
#endif
  }

877
  // for debug nan/inf
878
  if (op_with_kernel != nullptr && FLAGS_check_nan_inf) {
879
    VLOG(4) << "Check nan/inf";
880
    framework::details::CheckOpHasNanOrInf(
881
        *op,
882
        *local_scope,
883
        place);  // TODO(xiongkun03) change it to inner scope.
884
  }
885 886
}

887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925
void InterpreterCore::RunInstruction(const Instruction& instr_node) {
  VLOG(5) << __func__ << " OP id:" << instr_node.Id()
          << " name:" << instr_node.OpBase()->Type() << " type:"
          << (instr_node.KernelType() == OpFuncType::kCpuSync
                  ? "kCpuSync"
                  : (instr_node.KernelType() == OpFuncType::kGpuSync
                         ? "kGpuSync"
                         : "kGpuAsync"))
          << " runs on " << platform::GetCurrentThreadName();

  auto* op = instr_node.OpBase();
  platform::RecordEvent instruction_event(
      op->Type(), platform::TracerEventType::Operator, 1);

  try {
    instr_node.WaitEvent(place_);

    if (!instr_node.IsArtificial()) {
      RunOperator(instr_node);
      CheckGC(instr_node);
      interpreter::LogDeviceMemoryStats(place_);
    }

    instr_node.RecordEvent(place_);
  } catch (platform::EnforceNotMet& ex) {
    framework::InsertCallStackInfo(op->Type(), op->Attrs(), &ex);
    exception_holder_.Catch(std::make_exception_ptr(std::move(ex)));
  } catch (platform::EOFException&) {
    exception_holder_.Catch(std::current_exception());
  } catch (std::exception& ex) {
    LOG(WARNING) << op->Type() << " raises an exception "
                 << platform::demangle(typeid(ex).name()) << ", " << ex.what();
    exception_holder_.Catch(std::current_exception());
  } catch (...) {
    LOG(WARNING) << op->Type() << " raises an unknown exception";
    exception_holder_.Catch(std::current_exception());
  }
}

926
void InterpreterCore::ExecuteInstructionList(
927
    const std::vector<Instruction>& vec_instr) {
928
  interpreter::ResetAtomicGuard guard(&deps_, &refs_);
L
Leo Chen 已提交
929 930
  unfinished_op_number_ = vec_instr.size();
  if (unfinished_op_number_ == 0) {
931 932 933 934
    VLOG(4) << "No op to run, return";
    return;
  }

935 936
  exception_holder_.Clear();

937 938
  for (size_t i = 0; i < dependecy_count_.size(); ++i) {
    if (dependecy_count_[i] == 0) {
939
      // NOTE(zhiqiu): hot fix for jit input var
940
      RecordMemcpyD2H(vec_instr.at(i));
941
      async_work_queue_->AddTask(vec_instr.at(i).KernelType(),
942
                                 [this, i] { RunInstructionAsync(i); });
943 944 945
    }
  }

946
  auto event_name = main_thread_blocker_.WaitEvent();
947 948
  VLOG(1) << "main_thread_blocker_(" << &main_thread_blocker_
          << ") got event_name: " << event_name;
949

950
  if (UNLIKELY(exception_holder_.IsCaught())) {
951
    VLOG(1) << "Exception caught " << exception_holder_.Type();
952 953 954 955 956
    // Graceful exit when the executor encountered a fatal error.
    // EOF is not a fatal error.
    if (exception_holder_.Type() != "EOF") {
      async_work_queue_->Cancel();
    }
957
    VLOG(4) << "Cancel ok";
958
    PADDLE_ENFORCE_EQ(
959 960
        main_thread_blocker_.Clear(),
        0,
961 962
        platform::errors::PreconditionNotMet(
            "main_thread_blocker_.Clear() return -1, clear failed"));
963
    VLOG(4) << "clear ok";
964 965
    exception_holder_.ReThrow();
  }
966
}
967

L
liutiexing 已提交
968
void InterpreterCore::RunNextInstructions(
969
    const Instruction& instr, std::deque<size_t>* reserved_next_ops) {
970 971
  platform::RecordEvent record(
      "RunNextInstructions", platform::TracerEventType::UserDefined, 10);
972

973 974 975 976
  auto IsReady = [this](size_t next_id) {
    VLOG(4) << "op_id: " << next_id
            << ", remain deps: " << deps_[next_id]->DynamicDep();
    return deps_[next_id]->CheckAndDecrease();
977 978
  };

R
Ruibiao Chen 已提交
979 980 981 982 983
  for (size_t next_instr_id : instr.NextInstrsInDifferenceThread()) {
    if (IsReady(next_instr_id)) {
      async_work_queue_->AddTask(
          vec_instruction_[next_instr_id].KernelType(),
          [this, next_instr_id]() { RunInstructionAsync(next_instr_id); });
984
    }
R
Ruibiao Chen 已提交
985
  }
L
Leo Chen 已提交
986

R
Ruibiao Chen 已提交
987 988 989 990 991 992
  for (size_t next_instr_id : instr.NextInstrsInSameThread()) {
    if (IsReady(next_instr_id)) {
      if (vec_instruction_[next_instr_id].GetPriority() == Priority::kLowest) {
        reserved_next_ops->push_back(next_instr_id);
      } else {
        reserved_next_ops->push_front(next_instr_id);
993 994 995 996 997
      }
    }
  }
}

998
void InterpreterCore::RunInstructionAsync(size_t instr_id) {
999 1000
  std::deque<size_t> ready_ops;
  ready_ops.push_back(instr_id);
L
liutiexing 已提交
1001 1002
  while (!ready_ops.empty()) {
    instr_id = ready_ops.front();
1003
    ready_ops.pop_front();
1004
    auto& instr_node = vec_instruction_.at(instr_id);
1005

1006
    RunInstruction(instr_node);
1007 1008 1009 1010 1011 1012 1013 1014

    if (UNLIKELY(exception_holder_.IsCaught())) {
      VLOG(4) << "Exception caught";
      if (exception_notifier_ != nullptr) {
        exception_notifier_->NotifyEvent();
      }
      return;
    }
1015

L
Leo Chen 已提交
1016 1017 1018
    VLOG(4) << "unfinished_op_number_: " << unfinished_op_number_;
    if (UNLIKELY(unfinished_op_number_.fetch_sub(
                     1, std::memory_order_relaxed) == 1)) {
1019 1020 1021 1022 1023
      if (completion_notifier_ != nullptr) {
        completion_notifier_->NotifyEvent();
      }
    }

1024
    RunNextInstructions(instr_node, &ready_ops);
L
liutiexing 已提交
1025
  }
1026 1027
}

1028
void InterpreterCore::RecordStreamForGC(const Instruction& instr) {
L
Leo Chen 已提交
1029 1030 1031 1032
#if !defined(PADDLE_WITH_CUDA) && !defined(PADDLE_WITH_HIP)
  PADDLE_THROW(platform::errors::Unimplemented(
      "RecordStreamForGC is only implemented when compiled with GPU."));
#else
1033
  if (!IsInterpretercoreFastGCEnabled() ||
R
Ruibiao Chen 已提交
1034
      instr.KernelType() != OpFuncType::kGpuAsync) {
1035 1036
    return;
  }
1037 1038
  platform::RecordEvent record(
      "RecordStreamForGC", platform::TracerEventType::UserDefined, 10);
1039

L
Leo Chen 已提交
1040 1041
  gpuStream_t stream =
      reinterpret_cast<const phi::GPUContext&>(instr.DeviceContext()).stream();
1042
  auto TensorRecordStream = [&stream](phi::DenseTensor& tensor) {
1043 1044 1045 1046 1047 1048 1049 1050 1051
    auto allocation = tensor.Holder();
    if (allocation == nullptr) {
      return;
    }

    const platform::Place& place = allocation->place();
    if (platform::is_gpu_place(place)) {
      memory::RecordStream(allocation, stream);
    } else if (platform::is_cuda_pinned_place(place)) {
1052 1053 1054
      // TODO(Ruibiao): Here should do something to make sure that the tensor
      // is not freed until the H2D copies done. However, simplely launch a
      // CUDA runtime callback to the H2D stream may lead a high performance
1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070
      // overhead. As all the cases we meet in H2D are copies from CPUPlace at
      // present, we just log a WARNING here. A better design is required.
      LOG(WARNING) << "Copy data from a CUDAPinned tensor in an asynchronous "
                      "manner may lead a data inconsistent";
    } else {
      // memory copies involve CPUPlace are always synchronous, so just do
      // nothing here
    }
  };

  /* NOTE(Ruibiao):Cross-stream tensor synchronization is required only when
   * all the following conditions are satisfied:
   * 1. The tensor will be GC after running the instruction, i.e., in
   * instr.GCCheckVars.
   * 2. The stream which initializes this tensor is different from the stream
   * which the instruction run in.
1071 1072
   * 3. The tensor is the instruction's input, cause we assume that
   * instruction will initialize all output tensors with its running stream.
1073 1074 1075 1076 1077 1078 1079 1080 1081
   * 4. In the OP function of this instruction, the tensor is an input of a
   * async CUDA kernel.
   *
   * Here we only process the first condition, because:
   * 1. Since the RecordStream function will directly return when the recored
   * stream is equal to the owning stream, recording a stream same as which
   * initialized this tensor has less time overhead. Conversely, it may take
   * more time if we try to extract those cross-stream input vars from
   * instr.GCCheckVars.
1082 1083
   * 2. Now the instruction has no idea of which vars involving async running
   * in OP function, and thus we can not recognize condition 4. It should be
1084 1085 1086
   * supported later.
   */
  for (int var_id : instr.GCCheckVars()) {
1087 1088
    VLOG(4) << "GC sync " << var_scope_.GetNameById(var_id) << " "
            << var_scope_.VarDesc(var_id);
1089 1090

    // persistable var will be ignore while GC
1091 1092
    if (var_scope_.VarDesc(var_id) &&
        var_scope_.VarDesc(var_id)->Persistable()) {
1093 1094 1095
      continue;
    }

1096
    paddle::framework::Variable* var = var_scope_.VarRef(var_id);
1097 1098 1099 1100
    if (var == nullptr) {
      continue;
    }

1101 1102
    if (var->IsType<phi::DenseTensor>()) {
      TensorRecordStream(*(var->GetMutable<phi::DenseTensor>()));
1103 1104 1105 1106
    } else if (var->IsType<
                   operators::reader::
                       OrderedMultiDeviceLoDTensorBlockingQueueHolder>()) {
      // do nothing
1107
    } else if (var->IsType<phi::SelectedRows>()) {
1108
      TensorRecordStream(
1109
          *(var->GetMutable<phi::SelectedRows>()->mutable_value()));
1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123
    } else if (var->IsType<LoDTensorArray>()) {
      auto* tensor_arr = var->GetMutable<LoDTensorArray>();
      for (auto& tensor : *tensor_arr) {
        TensorRecordStream(tensor);
      }
    } else if (var->IsType<std::vector<Scope*>>()) {
      // do nothing
    } else {
      PADDLE_THROW(platform::errors::Unimplemented(
          "The variable(%s) is not supported in eager deletion.",
          framework::ToTypeName(var->Type())));
    }
  }
#endif
L
Leo Chen 已提交
1124
}
1125

1126
void InterpreterCore::CheckGC(const Instruction& instr) {
1127 1128
  platform::RecordEvent record(
      "CheckGC", platform::TracerEventType::UserDefined, 10);
1129 1130 1131
#if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP)
  RecordStreamForGC(instr);
#endif
1132
  auto& var_scope = var_scope_;
1133

1134
  for (auto var_id : instr.GCCheckVars()) {
1135 1136 1137
    VLOG(4) << "GC:" << var_scope_.GetNameById(var_id) << ", id:" << var_id
            << ", ref:" << refs_[var_id]->DynamicRef();
    bool is_ready = refs_[var_id]->CheckAndDecrease();
1138 1139 1140 1141 1142
    // ignore all persistable var while GC
    if (var_scope.VarDesc(var_id) && var_scope.VarDesc(var_id)->Persistable()) {
      continue;
    }
    if (is_ready) {
X
xiongkun 已提交
1143 1144
      VLOG(6) << "Async delete variable with name : "
              << var_scope.GetNameById(var_id);
1145
      gc_->Add(refs_[var_id]->Var(), instr);
W
wanghuancoder 已提交
1146 1147 1148 1149
    }
  }
}

1150 1151 1152
void InterpreterCore::Prepare(const std::vector<std::string>& feed_names,
                              const std::vector<phi::DenseTensor>& feed_tensors,
                              bool prepare_feed) {
1153 1154
  PADDLE_ENFORCE_EQ(feed_names.size(),
                    feed_tensors.size(),
1155 1156 1157
                    platform::errors::PreconditionNotMet(
                        "Required feed_names.size() == feed_tensors.size(), "
                        "but received %d != %d",
1158 1159
                        feed_names.size(),
                        feed_tensors.size()));
1160
  auto FeedInput = [&] {
1161
    VLOG(4) << "Feed inputs";
1162
    for (size_t i = 0; i < feed_names.size(); ++i) {
1163
      auto* feed_var = local_scope_->FindVar(feed_names[i]);
1164
      PADDLE_ENFORCE_NOT_NULL(
1165 1166 1167
          feed_var,
          platform::errors::NotFound("Variable %s should not be nullptr.",
                                     feed_names[i]));
1168

1169
      auto feed_tensor = feed_var->GetMutable<phi::DenseTensor>();
1170
      feed_tensor->ShareDataWith(feed_tensors[i]);
1171
      feed_tensor->set_lod(feed_tensors[i].lod());
1172 1173 1174
    }
  };

1175
  if (!is_build_) {
L
Leo Chen 已提交
1176 1177
    paddle::framework::interpreter::BuildVariableScope(
        block_, &var_scope_, HasLocalScope());
1178
    FeedInput();
L
Leo Chen 已提交
1179
    std::vector<paddle::framework::OpFuncNode> op_func_nodes;
L
Leo Chen 已提交
1180
    paddle::framework::interpreter::BuildOpFuncList(
1181 1182 1183 1184 1185
        place_,
        block_,
        execution_config_.skip_gc_vars,
        &op_func_nodes,
        &var_scope_,
1186 1187
        execution_config_,
        HasLocalScope());
1188
    SetFeedVarsInplaceSkip(feed_names);
1189
    // convert vec func_list to graph
L
Leo Chen 已提交
1190
    Convert(&op_func_nodes);
1191
    UpdateSyncOpNum();
1192
    is_build_ = true;
1193
  }
W
wanghuancoder 已提交
1194
  // NOTE: Because feed_tensor will be GC after
L
Leo Chen 已提交
1195
  // paddle::framework::BuildOpFuncList, so we should
1196
  // call FeedInput again.
1197 1198 1199
  if (prepare_feed) {
    FeedInput();
  }
1200 1201
}

1202 1203 1204
void InterpreterCore::SetFeedVarsInplaceSkip(
    const std::vector<std::string>& feed_names) {
  for (auto& feed_name : feed_names) {
1205
    var_scope_.SetVarSikpInplace(feed_name, true);
1206 1207 1208
  }
}

L
Leo Chen 已提交
1209 1210
bool InterpreterCore::HasLocalScope() const { return local_scope_ != nullptr; }

1211
std::shared_ptr<InterpreterCore> CreateInterpreterCore(
1212 1213
    const platform::Place& place,
    const ProgramDesc& prog,
1214
    Scope* scope,
1215
    const std::vector<std::string>& fetch_names,
1216 1217
    const std::set<std::string>& skip_gc_vars) {
  std::shared_ptr<InterpreterCore> core = nullptr;
L
Leo Chen 已提交
1218
  // NOTE(Aurelius84): `AddFetch` will modify BlockDesc, so we should copy
1219 1220 1221
  // a new program.
  auto new_prog = std::make_shared<framework::ProgramDesc>(prog);
  auto* block = new_prog->MutableBlock(0);
L
Leo Chen 已提交
1222
  interpreter::AddFetch(fetch_names, block);
1223

1224
  core = std::make_shared<InterpreterCore>(place, *block, skip_gc_vars, scope);
1225 1226 1227 1228
  core->SetCopyProgram(new_prog);
  return core;
}

1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363
// Note(zhangbo):
// (1) What is "Trace"?
// The OP execute scheduling rule adopted by Interpretercore by default is a
// multi-threaded scheduling mode(see ExecuteInstructionList). By maintaining a
// high-performance thread pool, the OP's execute scheduling is distributed to
// the sub threads maintained by the thread pool, but the main thread does not
// have any tasks. In Trace mode, the executor will execute directly in the main
// thread according to the pre provided OP sequence(trace_execute_order_),
// instead of being distributed to the thread pool.
// (2) When we use "Trace"?
// In dygraph to static, This scheduling causes that the execution of the
// forward and backward OPs and the execution of the dygraph optimizer cannot be
// executed in the same thread. Executing thread switch may cause cpu cache
// miss. When a model is all KQueueAsync type OPs, all OPs will be distributed
// to the DeviceThread for execution, and the multithreading scheduling will not
// have any benefits. Therefore, in the dynamic to static, when the number of
// KQueueAsync Ops is 0, we choose Trace mode.
void InterpreterCore::TraceInstructionList(
    const std::vector<Instruction>& vec_instr) {
  unfinished_op_number_ = vec_instr.size();
  if (unfinished_op_number_ == 0) {
    VLOG(4) << "No op to run, return";
    return;
  }

  exception_holder_.Clear();

  for (size_t i = 0; i < dependecy_count_.size(); ++i) {
    if (dependecy_count_[i] == 0) {
      // NOTE(zhiqiu): hot fix for jit input var
      RecordMemcpyD2H(vec_instr.at(i));
    }
  }

  for (size_t idx = 0; idx < trace_execute_order_.size(); idx++) {
    auto instr_id = trace_execute_order_[idx];
    auto& instr_node = vec_instruction_.at(instr_id);

    RunInstruction(instr_node);

    if (UNLIKELY(exception_holder_.IsCaught())) {
      VLOG(4) << "Exception caught";
      break;
    }
  }

  if (UNLIKELY(exception_holder_.IsCaught())) {
    VLOG(1) << "Exception caught " << exception_holder_.Type();
    PADDLE_ENFORCE_EQ(
        main_thread_blocker_.Clear(),
        0,
        platform::errors::PreconditionNotMet(
            "main_thread_blocker_.Clear() return -1, clear failed"));
    VLOG(4) << "clear ok";
    exception_holder_.ReThrow();
  }
}

void InterpreterCore::RecordMemcpyD2H(const Instruction& instr_node) {
  // NOTE(zhiqiu): hot fix for jit input var
  if (instr_node.OpBase()->Type() == interpreter::kMemcpyD2H) {
    platform::DeviceContextPool& pool = platform::DeviceContextPool::Instance();
    auto* default_dev_ctx = pool.Get(place_);
    for (auto& event : instr_node.EventsToWait()) {
      platform::RecordEvent record(
          "RecordStreamEvent", platform::TracerEventType::UserDefined, 10);
      VLOG(3) << "Record event on default stream in jit_input_var at op: "
              << instr_node.OpBase()->Type();
      event.event_->Record(default_dev_ctx);
    }
  }
}

void InterpreterCore::UpdateSyncOpNum() {
  int64_t sync_op_num = 0;
  for (size_t i = 0; i < vec_instruction_.size(); ++i) {
    if (vec_instruction_[i].KernelType() == OpFuncType::kCpuSync ||
        vec_instruction_[i].KernelType() == OpFuncType::kGpuSync) {
      sync_op_num = sync_op_num + 1;
    }
  }
  sync_op_num_ = sync_op_num;
  VLOG(4) << "Update sync op num, sync op num is: " << sync_op_num_;
}

// Note(zhangbo):
// When there is a KQueueSync type OP in the model, breadth traversal is better
// than depth traversal. For example: OP(O) ->(direct_run)-> OP(A)
// ->(sync_run)-> OP(B) OP(O) ->(direct_run)-> OP(C) ->(direct_run)-> OP(D) If B
// is run before C, B may always block to wait for A to finish executing, but in
// fact, C can be executed first during this time.
void InterpreterCore::AnalyseExecuteOrderForTrace() {
  VLOG(4) << "Analyze the execution order of Trace scheduling mode.";
  interpreter::ResetAtomicGuard guard(&deps_, &refs_);

  auto op_downstream_map = dependency_builder_.OpDownstreamMap();

  auto IsReady = [this](size_t next_id) {
    VLOG(4) << "op_id: " << next_id
            << ", remain deps: " << deps_[next_id]->DynamicDep();
    return deps_[next_id]->CheckAndDecrease();
  };

  std::vector<size_t> trace_order;
  std::deque<size_t> ready_ops;

  for (size_t instr_id = 0; instr_id < dependecy_count_.size(); ++instr_id) {
    if (dependecy_count_[instr_id] == 0) {
      ready_ops.push_back(instr_id);
    }
  }

  while (!ready_ops.empty()) {
    auto now_id = ready_ops.front();
    ready_ops.pop_front();
    trace_order.push_back(now_id);

    auto next_op_set = op_downstream_map[now_id];

    for (size_t next_op_id : next_op_set) {
      if (IsReady(next_op_id)) {
        ready_ops.push_back(next_op_id);
      }
    }
  }

  PADDLE_ENFORCE_EQ(
      trace_order.size(),
      dependecy_count_.size(),
      platform::errors::PreconditionNotMet(
          "trace_order size should be equal to dependecy_count_."));

  trace_execute_order_ = trace_order;
}

1364 1365
}  // namespace framework
}  // namespace paddle