grad_node_info.h 10.6 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16
// Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
//     http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.

#pragma once

17 18
#include <memory>

19
#include "paddle/fluid/eager/api/utils/global_utils.h"
20
#include "paddle/fluid/eager/eager_tensor.h"
21
#include "paddle/fluid/eager/hooks.h"
22
#include "paddle/phi/api/all.h"
23 24 25 26 27

namespace egr {
/**
 * GradNodeBase is base class of all grad node, which is what should be used by
 * eager execution, we define most of backward autograd members here, and for
28
 * each Operator, they should hold their own forward Inputs as TensorWrapper.
29 30 31 32
 *
 * The GradNodeBase will be held in autograd_meta, and it is also a member of
 * Edge, which indicates the edge of backward graph.
 *
33
 * TODO(yangzhanlue): GradNodeBase will also in charge of get the correct input
34 35
 * from GradOpDescMaker to GradNodeBase.
 *
36 37 38 39
 * NOTE: GradNodeBase has a method named run, this method should be overrided by
 * the specific derived class, it will prepare backward inputs and double
 * backward's depends. Then, it will call C++ API of backward kernel functions
 * to finish backward computation.
40
 *
41
 * NOTE: GradNodeBase holds its own inputs and Outputs
42
 *
43
 * Edge is defined to describe depend of backward, an Edge is what linked
44 45 46
 * between two node, it should contain a Node and rank of this Node (this is
 * used to indicate which input of grad this edge belong).
 **/
47
class AutogradMeta;
48
class GradNodeBase;
49

50 51 52 53 54 55 56 57
class Edge {
 public:
  // Default constructor for Edges in order to construct it for AutogradMeta
  Edge() : in_slot_id_(0), in_rank_(0), grad_node_(nullptr) {}

  // In real use cases we should create Edge from grad node and input rank which
  // indicate which edge it is.
  // Since we have slot design in operators we will have to locate an edge with
58
  // slot and rank.
59 60
  Edge(const std::shared_ptr<GradNodeBase>& grad_node,
       size_t in_slot_id,
61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76
       size_t in_rank)
      : in_slot_id_(in_slot_id), in_rank_(in_rank), grad_node_(grad_node) {}

  Edge(const std::shared_ptr<GradNodeBase>& grad_node,
       const std::pair</* slot_id */ size_t, /* rank */ size_t>& rank_info)
      : in_slot_id_(rank_info.first),
        in_rank_(rank_info.second),
        grad_node_(grad_node) {}

  GradNodeBase* GetGradNode() const { return grad_node_.get(); }

  std::shared_ptr<GradNodeBase> GetMutableGradNode() const {
    return grad_node_;
  }

  void SetGradNode(const std::shared_ptr<GradNodeBase>& node) {
J
Jiabin Yang 已提交
77
    VLOG(7) << "Reseting Edge's Grad Node";
78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108
    grad_node_ = node;
  }

  std::pair<size_t, size_t> GetEdgeRankInfo() const {
    return std::make_pair(in_slot_id_, in_rank_);
  }

  void SetEdgeRankInfo(size_t slot_id, size_t in_rank) {
    in_slot_id_ = slot_id;
    in_rank_ = in_rank;
  }

  void SetEdgeRankInfo(
      const std::pair</* slot_id */ size_t, /* rank */ size_t>& edge_rank) {
    in_slot_id_ = edge_rank.first;
    in_rank_ = edge_rank.second;
  }

  // Currently we use grad_node_ to identify if a edge is initialized.
  bool IsInitialized() const {
    if (!grad_node_) {
      return false;
    } else {
      if (!(grad_node_.get())) {
        return false;
      } else {
        return true;
      }
    }
  }

109 110 111 112 113 114
  void Clear() {
    grad_node_.reset();
    in_slot_id_ = 0;
    in_rank_ = 0;
  }

115 116 117 118 119
 private:
  size_t in_slot_id_;
  size_t in_rank_;
  std::shared_ptr<GradNodeBase> grad_node_{nullptr};
};
120 121 122 123 124 125

/**
 * GradSlotMeta is used to Record Forward Tensor info to backward, since paddle
 * has lots of operators whose backward logic is depends on if it has some
 * specific inputs or outputs. So, we need a meta info to record it's needs.
 **/
126 127 128
class GradSlotMeta {
 public:
  GradSlotMeta() = default;
129 130 131
  bool IsStopGradient() const { return stop_gradient_; }
  void SetStopGradient(bool stop_gradient = true) {
    stop_gradient_ = stop_gradient;
132 133
  }

134 135 136 137 138 139 140 141 142 143 144 145
  void SetTensorMeta(const phi::DenseTensorMeta& meta) {
    meta_ = std::make_shared<phi::DenseTensorMeta>(meta);
  }
  bool HasTensorMeta() const { return meta_ && meta_.get(); }
  const phi::DenseTensorMeta& GetTensorMeta() const {
    if (!HasTensorMeta()) {
      PADDLE_THROW(paddle::platform::errors::Fatal(
          "meta_ of GradSlotMeta has not been initialized yet."
          "You're expected to check Edge availability with HasTensorMeta()"
          "before calling GetTensorMeta() interface."));
    }
    return *meta_.get();
146 147
  }

148 149 150
  void SetPlace(const phi::Place& place) { place_ = place; }
  const phi::Place& GetPlace() const { return place_; }

151 152 153 154 155 156 157 158 159 160
  void SetEdge(const Edge& edge) { adj_edge_ = edge; }
  void SetEdge(
      const std::shared_ptr<GradNodeBase>& grad_node,
      const std::pair</* slot_id */ size_t, /* rank */ size_t>& rank_info) {
    adj_edge_.SetGradNode(grad_node);
    adj_edge_.SetEdgeRankInfo(rank_info);
  }
  Edge& GetMutableEdge() { return adj_edge_; }
  const Edge& GetEdge() const { return adj_edge_; }

161
 private:
162
  bool stop_gradient_{false};
163
  phi::Place place_;
164
  std::shared_ptr<phi::DenseTensorMeta> meta_ = nullptr;
165
  Edge adj_edge_;
166 167
};

168
class GradNodeBase {
169
 public:
J
Jiabin Yang 已提交
170
  GradNodeBase() { VLOG(7) << "Construct GradNodeBase"; }
171 172
  GradNodeBase(size_t bwd_in_slot_num, size_t bwd_out_slot_num);
  // TODO(jiabin): Should we have other constructor here?
J
Jiabin Yang 已提交
173
  virtual ~GradNodeBase() { VLOG(7) << "Destruct GradNodeBase"; }
174 175

  /**
H
HongyuJia 已提交
176
   * operator() designed to contain the real backward execution logic, it should
177 178
   * be overrided by derived class defined for each operator. It accepts a
   * vector of Tensor which contains grads input of current operator
179 180
   *
   * Note: why we need backward inputs and outputs construct as vector of vector
181
   * of paddle::Tensor?
182
   * Since all of paddle op composite in form of {"Slot name ", vector<Var>},
183
   * so, vector of vector is better choice to fit this format.
184
   * **/
185
  virtual paddle::small_vector<std::vector<paddle::Tensor>,
186
                               kSlotSmallVectorSize>
187
  operator()(paddle::small_vector<std::vector<paddle::Tensor>,
188
                                  kSlotSmallVectorSize>& grads,  // NOLINT
189 190
             bool create_graph = false,
             bool is_new_grad = false) = 0;
191

192 193
  virtual void ClearTensorWrappers() = 0;

194
  /**
195 196
   * Self-Copy interface designed for use in DoubleGrad
   * **/
197 198
  virtual std::shared_ptr<GradNodeBase> Copy() const = 0;

199 200 201 202
  // adj_edges were moved inside OutputMeta(), so no available direct access
  // from GradNodeBase.
  // To access Edges, get GradSlotMeta by calling OutputMeta(), then use
  // slot_meta.GetEdge()
203 204 205

  /**
   * Get Input Meta of current Grad node**/
206 207
  const paddle::small_vector<std::vector<GradSlotMeta>, kSlotSmallVectorSize>&
  InputMeta() const;
208 209
  /**
   * Get Output Meta of current Grad node**/
210 211 212 213 214
  const paddle::small_vector<std::vector<GradSlotMeta>, kSlotSmallVectorSize>&
  OutputMeta() const;

  paddle::small_vector<std::vector<GradSlotMeta>, kSlotSmallVectorSize>&
  MutableOutputMeta();
215 216 217 218
  /**
   * Set bwd ins and outs info with forward vars
   * **/

219
  void SetGradInMeta(const std::vector<paddle::Tensor>& fwd_out,
220
                     size_t slot_rank);
221
  void SetGradInMeta(const paddle::Tensor& fwd_out, size_t slot_rank);
222

223
  void SetGradOutMeta(const std::vector<paddle::Tensor>& fwd_in,
224
                      size_t slot_rank);
225
  void SetGradOutMeta(const std::vector<const paddle::Tensor*>& fwd_in,
226
                      size_t slot_rank);
227
  void SetGradOutMeta(const paddle::Tensor& fwd_in, size_t slot_rank);
228 229 230 231 232 233
  /**
   * Default setters for Grad in/out meta this should be used for same special
   * Node which will not create by user
   * **/
  void SetDefaultGradInOutMeta();
  /**
234
   * Register GradientHook
235
   * **/
236 237
  int64_t RegisterGradientHook(size_t slot_id,
                               size_t rank,
238 239 240
                               std::shared_ptr<egr::TensorHook>&& hook);

  /**
241 242
   * Remove GradientHook
   * **/
243 244 245 246 247 248 249
  bool RemoveGradientHook(const int64_t& hook_id) {
    auto remove_cnt = gradient_hooks_.erase(hook_id);
    if (remove_cnt == 0) {
      return false;
    }
    return true;
  }
250 251

  /**
252
   * Apply GradientHook
253
   * **/
254
  inline bool GradientHooksRegistered() { return !gradient_hooks_.empty(); }
255

256 257
  std::map<int64_t, std::tuple<size_t, size_t, std::shared_ptr<TensorHook>>>
  GetGradientHookFuntions() {
J
Jiabin Yang 已提交
258
    VLOG(7) << "GetGradientHookFuntions ";
259 260 261 262 263 264
    return gradient_hooks_;
  }

  void SetGradientHookFuntions(
      std::map<int64_t, std::tuple<size_t, size_t, std::shared_ptr<TensorHook>>>
          hooks) {
J
Jiabin Yang 已提交
265
    VLOG(7) << "SetGradientHookFuntions ";
266 267 268
    gradient_hooks_ = hooks;
  }

269 270 271
  paddle::small_vector<std::vector<paddle::Tensor>, kSlotSmallVectorSize>
  ApplyGradientHooks(const paddle::small_vector<std::vector<paddle::Tensor>,
                                                kSlotSmallVectorSize>& tensors);
272

273
  /**
274 275
   * Handle Complex - Real Type Promotion
   * **/
276
  void HandleComplexGradToRealGrad(
277 278
      paddle::small_vector<std::vector<paddle::Tensor>, kSlotSmallVectorSize>*
          out_grads);
279 280
  bool NeedComplexToRealConversion() { return need_complex_to_real_; }

281 282
  virtual std::string name() { return "GradNodeBase"; }

283
  /**
284 285
   * The following interfaces are designed for no_need_buffer
   * **/
286 287 288 289 290
  bool IsTensorWrappersCleared() { return is_tensor_wrappers_cleared_; }

  void SetIsTensorWrappersCleared(bool is_tensor_wrappers_cleared) {
    is_tensor_wrappers_cleared_ = is_tensor_wrappers_cleared;
  }
291

292
 private:
293
  // bwd_out_meta_ is used to record Grad output info for backward
294 295
  paddle::small_vector<std::vector<GradSlotMeta>, kSlotSmallVectorSize>
      bwd_out_meta_;
296 297

  // bwd_in_meta_ used to record Grad input info for backward
298 299
  paddle::small_vector<std::vector<GradSlotMeta>, kSlotSmallVectorSize>
      bwd_in_meta_;
300 301 302
  // Gradient Hooks
  // Customer may register a list of hooks which will be called in order during
  // backward
303
  // Each entry consists of one pair of
304
  // <hook_id, <out_rank, std::shared_ptr<TensorHook>>>
305 306 307 308 309
  std::map<int64_t,
           std::tuple<
               /* slot id */ size_t,
               /* rank */ size_t,
               /* hook */ std::shared_ptr<TensorHook>>>
310
      gradient_hooks_;
311
  int64_t next_hook_id_{0};
312

313 314
  // We handle complex to real conversion only if any complex GradIn is involved
  bool need_complex_to_real_ = false;
315

316
  bool is_tensor_wrappers_cleared_ = false;
317 318 319
};

}  // namespace egr