eager_deletion_op_handle.h 2.3 KB
Newer Older
S
sneaxiy 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16
// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
//     http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.

#pragma once

S
sneaxiy 已提交
17
#include <deque>
Z
Zeng Jinle 已提交
18
#include <memory>
S
sneaxiy 已提交
19
#include <string>
Z
Zeng Jinle 已提交
20 21
#include <unordered_set>
#include <vector>
S
sneaxiy 已提交
22
#include "paddle/fluid/framework/details/op_handle_base.h"
23
#include "paddle/fluid/framework/ir/memory_optimize_pass/reference_count_pass_helper.h"
S
sneaxiy 已提交
24 25 26 27 28

namespace paddle {
namespace framework {
class Scope;

29 30 31 32
namespace ir {
class MemOptVarInfo;
}  // namespace ir

S
sneaxiy 已提交
33 34 35 36
namespace details {

class EagerDeletionOpHandle : public OpHandleBase {
 public:
37
  EagerDeletionOpHandle(ir::Node *node, Scope *scope, size_t scope_idx,
S
sneaxiy 已提交
38
                        const platform::Place &place,
39 40
                        const std::unordered_set<ir::MemOptVarInfo *> &vars,
                        GarbageCollector *gc);
S
sneaxiy 已提交
41 42 43 44 45

  ~EagerDeletionOpHandle();

  std::string Name() const override;

Z
Zeng Jinle 已提交
46 47 48 49 50 51 52
  /**
   * Currently, EagerDeletionOpHandle has the highest priority.
   * This priority settings speed up gc 15% in Transformer
   * V100 8-GPU model.
   */
  Priority GetPriority() const override { return kHighest; }

53 54
  size_t GetScopeIdx() const { return scope_idx_; }

S
sneaxiy 已提交
55 56 57
 protected:
  void RunImpl() override;

58 59 60 61
  void InitCUDA() override;

  std::vector<Scope *> GetLocalScopes() override { return {scope_}; }

S
sneaxiy 已提交
62
 private:
S
sneaxiy 已提交
63
  void ClearGarbages(std::deque<std::shared_ptr<memory::Allocation>> *garbages);
S
sneaxiy 已提交
64

65 66 67
  void CallOnce();

  Scope *scope_;
68
  size_t scope_idx_;
69 70 71 72
  platform::Place place_;
  std::vector<ir::MemOptVarInfo *> var_infos_;  // not own
  GarbageCollector *gc_;                        // not own
  std::vector<Variable *> vars_;
S
sneaxiy 已提交
73 74 75 76 77 78 79 80 81
#ifdef PADDLE_WITH_CUDA
  platform::CUDADeviceContext *dev_ctx_{nullptr};
  cudaEvent_t event_{nullptr};
#endif
};

}  // namespace details
}  // namespace framework
}  // namespace paddle