op_handle_base.h 4.7 KB
Newer Older
Y
Yu Yang 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
//   Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
//     http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.

#pragma once
16
#include <map>
X
Xin Pan 已提交
17
#include <string>
Z
Zeng Jinle 已提交
18 19
#include <unordered_map>
#include <unordered_set>
X
Xin Pan 已提交
20
#include <vector>
21

22
#include "paddle/fluid/framework/details/execution_strategy.h"
Y
Yu Yang 已提交
23
#include "paddle/fluid/framework/details/var_handle.h"
X
Xin Pan 已提交
24
#include "paddle/fluid/framework/ir/node.h"
Y
Yu Yang 已提交
25
#include "paddle/fluid/platform/device_context.h"
Y
Yu Yang 已提交
26 27
#include "paddle/fluid/platform/macros.h"

W
wanghuancoder 已提交
28 29 30 31 32 33
namespace paddle {
namespace platform {
class DeviceContext;
}  // namespace platform
}  // namespace paddle

Y
Yu Yang 已提交
34 35 36
namespace paddle {
namespace framework {

37
class Scope;
W
wanghuancoder 已提交
38 39 40 41 42 43
namespace details {
struct VarHandleBase;
}  // namespace details
namespace ir {
class Node;
}  // namespace ir
44 45

namespace details {
46

X
clean  
Xin Pan 已提交
47 48
// Wraps ir::Node and provide helper utilities.
// It's responsible for populating necessary fields of ir::Node.
Y
Yu Yang 已提交
49 50
class OpHandleBase {
 public:
Z
Zeng Jinle 已提交
51 52 53 54 55 56 57
  /**
   * NOTE(zjl): Some op should have higher priority than others.
   * The higher priority op would run first without switching
   * threads in Executor.
   */
  enum Priority { kHighest = 0, kNormal = 1 };

X
Xin Pan 已提交
58
  // Owned by `node`. No need to be deleted explicitly.
X
clean1  
Xin Pan 已提交
59 60 61
  explicit OpHandleBase(ir::Node *node) : node_(node) {
    node_->WrappedBy(this);
  }
Y
Yu Yang 已提交
62

Z
Zeng Jinle 已提交
63
  virtual ~OpHandleBase() PADDLE_MAY_THROW;
X
Xin Pan 已提交
64

Y
Yu Yang 已提交
65 66
  std::string DebugString() const;

Z
Zeng Jinle 已提交
67 68
  virtual Priority GetPriority() const { return kNormal; }

69 70 71 72
  virtual bool GetSkipRunning() const { return skip_running_; }

  virtual void SetSkipRunning(bool skip_runing) { skip_running_ = skip_runing; }

Y
Yu Yang 已提交
73 74
  virtual std::string Name() const = 0;

75
  void Run(ExecutionStrategy::UseDevice use_device);
Y
Yu Yang 已提交
76

C
chengduoZH 已提交
77
  virtual void RecordWaitEventOnCtx(platform::DeviceContext *waited_ctx);
Y
Yu Yang 已提交
78

Y
Yu Yang 已提交
79 80 81 82
  void AddInput(VarHandleBase *in);

  void AddOutput(VarHandleBase *out);

83 84
  // This method adds the wait events of all the input on all the device
  // context.
85 86 87 88 89
  // NOTE: This Wait is asynchronous operation.
  // NOTE: wait_for_feed is added to wait for feed var, since it has
  // generated op, no event and cannot perform event wait. It is only
  // used in fetch_async_op_handle currently.
  virtual void WaitInputVarGenerated(bool wait_for_feed = false);
C
chengduoZH 已提交
90

91 92
  // This method adds the wait events of all the input on the specified device
  // context.
93
  // NOTE: This Wait is asynchronous operation.
C
chengduoZH 已提交
94 95 96 97
  virtual void WaitInputVarGenerated(const platform::Place &place);

  virtual bool NeedWait(VarHandleBase *in_var);

X
Polish  
Xin Pan 已提交
98 99 100
  // If the Op involves data transfer of multiple devices that
  // will likely block other computations.
  virtual bool IsMultiDeviceTransfer() { return false; }
X
Xin Pan 已提交
101

X
Xin Pan 已提交
102
  const platform::DeviceContext *DeviceContext(platform::Place place) {
S
sneaxiy 已提交
103 104
    auto it = dev_ctxes_.find(place);
    return it != dev_ctxes_.end() ? it->second : nullptr;
X
Xin Pan 已提交
105
  }
Y
Yancey1989 已提交
106 107 108
  const std::map<platform::Place, platform::DeviceContext *> &DeviceContext() {
    return dev_ctxes_;
  }
X
Xin Pan 已提交
109 110 111 112 113 114 115

  void SetDeviceContext(platform::Place place, platform::DeviceContext *ctx_) {
    dev_ctxes_[place] = ctx_;
  }

  const std::vector<VarHandleBase *> &Inputs() const { return inputs_; }

116 117 118 119 120 121 122 123
  size_t NoDupInputSize() const {
    std::unordered_set<VarHandleBase *> res;
    for (auto *var : inputs_) {
      res.emplace(var);
    }
    return res.size();
  }

Y
Stash  
yuyang18 已提交
124 125
  size_t NotReadyInputSize() const;

X
Xin Pan 已提交
126 127
  const std::vector<VarHandleBase *> &Outputs() const { return outputs_; }

C
chengduoZH 已提交
128 129
  size_t NoDummyInputSize() const;

X
Xin Pan 已提交
130 131
  ir::Node *Node() { return node_; }

132 133
  const ir::Node *Node() const { return node_; }

134 135 136
  void SetLocalExecScopes(
      const std::unordered_map<Scope *, Scope *> &scope_map);

Y
Yu Yang 已提交
137
 protected:
138 139
  virtual std::vector<Scope *> GetLocalScopes() = 0;

Y
Yu Yang 已提交
140 141
  void RunAndRecordEvent(const std::function<void()> &callback);

142 143 144
  void RunAndRecordEvent(platform::Place p,
                         const std::function<void()> &callback);

Y
Yu Yang 已提交
145
  virtual void RunImpl() = 0;
X
Xin Pan 已提交
146

147 148
  virtual void InitCUDA();

X
Xin Pan 已提交
149
  ir::Node *node_;
X
Xin Pan 已提交
150 151
  std::vector<VarHandleBase *> inputs_;
  std::vector<VarHandleBase *> outputs_;
152
  std::map<platform::Place, platform::DeviceContext *> dev_ctxes_;
X
Xin Pan 已提交
153

154
  std::vector<Scope *> local_exec_scopes_;
155
  bool skip_running_ = false;
156

X
Xin Pan 已提交
157 158 159 160 161
#ifdef PADDLE_WITH_CUDA
  std::unordered_map<int, cudaEvent_t> events_;
#endif

  DISABLE_COPY_AND_ASSIGN(OpHandleBase);
Y
Yu Yang 已提交
162 163 164 165 166
};

}  // namespace details
}  // namespace framework
}  // namespace paddle