reducer.h 6.0 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18
// Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
//     http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.

#pragma once

#include <algorithm>
#include <iostream>
19
#include <map>
20
#include <memory>
21
#include <queue>
22 23
#include <string>
#include <unordered_map>
24
#include <unordered_set>
25 26
#include <utility>
#include <vector>
27

28
#include "paddle/fluid/framework/data_type.h"
29 30
#include "paddle/fluid/framework/tensor.h"
#include "paddle/fluid/framework/variable.h"
31
#include "paddle/fluid/operators/math/math_function.h"
32
#include "paddle/fluid/platform/for_range.h"
33

34 35 36
namespace paddle {
namespace platform {
class DeviceContext;
37

38 39 40 41 42 43 44 45
}  // namespace platform

namespace imperative {
class ParallelContext;
class VarBase;
class VariableWrapper;
}  // namespace imperative
}  // namespace paddle
46 47 48 49

namespace paddle {
namespace imperative {

50 51
#if defined(PADDLE_WITH_NCCL) || defined(PADDLE_WITH_RCCL) || \
    defined(PADDLE_WITH_XPU_BKCL)
52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82

template <typename T>
struct DivNRanksFunctor {
  DivNRanksFunctor(int64_t nranks, T* output)
      : nranks_(nranks), output_(output) {}
  HOSTDEVICE void operator()(size_t idx) const {
    output_[idx] /= static_cast<T>(nranks_);
  }
  int64_t nranks_;
  T* output_;
};

template <typename Dex>
struct DivNRanksForAllReduce {
  framework::Tensor* in_;
  int64_t nranks_;
  const platform::DeviceContext& ctx_;
  DivNRanksForAllReduce(framework::Tensor* in, int64_t nranks,
                        const platform::DeviceContext& ctx)
      : in_(in), nranks_(nranks), ctx_(ctx) {}

  template <typename T>
  void apply() const {
    T* data = in_->mutable_data<T>(ctx_.GetPlace());
    platform::ForRange<Dex> for_range(static_cast<const Dex&>(ctx_),
                                      static_cast<size_t>(in_->numel()));
    DivNRanksFunctor<T> functor(nranks_, data);
    for_range(functor);
  }
};

83 84 85 86 87 88 89 90 91 92 93 94 95
class Group {
 public:
  // Here, we use dense_contents_ & sparse_contents_ to
  // achieve the tensor fuse. When is_sparse_ is true, sparse_contents_ work,
  // conversely, dense_contents_ works. It is mutex relationship.
  framework::Variable dense_contents_;
  framework::Variable* sparse_contents_ = nullptr;
  bool is_sparse_ = false;

  // for concat kernel
  std::vector<framework::Tensor> dense_tensors_;

  std::vector<size_t> length_;
96 97

  int64_t all_length_{0};
98 99 100 101 102 103 104 105 106 107 108
  // Global indices of participating variables in the group
  std::vector<size_t> variable_indices_;

  // Number of params that haven't been ready. When it is 0, it means
  // the group is ready.
  size_t pending_ = -1;

  // external message of group
  framework::proto::VarType::Type dtype_;

  // context is used to select the stream for concat
109
  void ConcatTensors(const platform::DeviceContext& context);
110 111

  // context is used to select the stream for split
112
  void SplitTensors(const platform::DeviceContext& context);
113

114 115 116 117 118 119
  // use it in CUDA
  void DivNRanks(framework::Tensor* tensor, int64_t nranks,
                 const platform::DeviceContext& context);

  void DivNRanks(const platform::DeviceContext& context, int64_t nranks);

120
  friend std::ostream& operator<<(std::ostream&, const Group&);
121 122
};

123
struct VariableLocator {
124 125 126 127 128 129 130 131 132 133 134
  // record the index in groups_
  size_t group_index;
  size_t inside_group_index;
};

class Reducer {
 public:
  explicit Reducer(
      const std::vector<std::shared_ptr<imperative::VarBase>>& vars,
      const std::vector<std::vector<size_t>>& group_indices,
      const std::vector<bool>& is_sparse_gradient,
135
      std::shared_ptr<imperative::ParallelContext> parallel_ctx,
136
      const std::vector<size_t>& group_size_limits, bool find_unused_vars);
137 138 139 140 141

  virtual ~Reducer() {}

  void InitializeGroups(const std::vector<std::vector<size_t>>& group_indices);

142 143
  void InitializeDenseGroups(const std::vector<size_t>& variable_indices_,
                             Group* p_group);
144

145
  void PrepareDeps(const std::unordered_set<GradOpNode*>& init_nodes);
146

147 148
  void PrepareForBackward(
      const std::vector<std::shared_ptr<imperative::VarBase>>& outputs);
149

150
  void AddDistHook(size_t var_index);
151

152
  void MarkVarReady(const size_t var_index, const bool is_used_var);
153 154 155 156 157

  void MarkGroupReady(size_t group_index);

  void FinalizeBackward();

158 159
  std::vector<std::vector<size_t>> RebuildGruops();

160 161
  inline bool NeedRebuildGroup() { return !has_rebuilt_group_; }

162 163 164 165 166 167 168 169 170
 private:
  std::vector<std::shared_ptr<imperative::VarBase>> vars_;
  std::vector<std::vector<size_t>> group_indices_;
  std::vector<Group> groups_;
  size_t next_group_ = 0;
  platform::Place place_;
  std::once_flag once_flag_;
  std::vector<bool> is_sparse_gradient_;
  std::shared_ptr<imperative::ParallelContext> parallel_ctx_;
171
  std::vector<VariableLocator> variable_locators_;
172

173
  int nrings_ = 1;
174
  int64_t nranks_ = -1;
175 176

  // Following variables are to help rebuild group
177 178
  // TODO(shenliang03): Support rebuild in the future.
  bool has_rebuilt_group_{true};
179 180 181
  std::vector<std::shared_ptr<imperative::VarBase>> rebuild_vars_;
  std::vector<int64_t> rebuild_var_indices_;
  const std::vector<size_t> group_size_limits_;
182 183 184 185 186 187 188 189

  // Following variables are to help unused vars
  std::unordered_map<GradOpNode*, size_t> node_deps_;
  std::unordered_map<VariableWrapper*, size_t> var_index_map_;
  std::vector<size_t> unused_vars_;
  bool has_marked_unused_vars_{false};
  bool find_unused_vars_{false};
  bool all_group_ready_{false};
190 191 192 193 194
};

std::vector<std::vector<size_t>> AssignGroupBySize(
    const std::vector<std::shared_ptr<imperative::VarBase>>& tensors,
    const std::vector<bool>& is_sparse_gradient,
195 196
    const std::vector<size_t>& group_size_limits,
    const std::vector<int64_t>& tensor_indices = {});
197 198 199 200
#endif

}  // namespace imperative
}  // namespace paddle