communicator.h 15.2 KB
Newer Older
Q
Qiao Longfei 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16
/* Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.

Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at

    http://www.apache.org/licenses/LICENSE-2.0

Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */

#pragma once

17
#include <ThreadPool.h>
18
#include <atomic>
Q
Qiao Longfei 已提交
19
#include <deque>
20
#include <map>
Q
Qiao Longfei 已提交
21 22
#include <memory>
#include <string>
Q
Qiao Longfei 已提交
23
#include <unordered_map>
24
#include <unordered_set>
Q
Qiao Longfei 已提交
25
#include <utility>
Q
Qiao Longfei 已提交
26
#include <vector>
27
#include "gflags/gflags.h"
Q
Qiao Longfei 已提交
28 29 30

#include "paddle/fluid/framework/scope.h"
#include "paddle/fluid/framework/variable.h"
C
Chengmo 已提交
31 32
#include "paddle/fluid/operators/distributed/distributed.h"
#include "paddle/fluid/operators/distributed/rpc_client.h"
Q
Qiao Longfei 已提交
33
#include "paddle/fluid/operators/distributed/rpc_common.h"
C
Chengmo 已提交
34
#include "paddle/fluid/operators/distributed_ops/send_recv_util.h"
Q
Qiao Longfei 已提交
35 36
#include "paddle/fluid/operators/math/math_function.h"
#include "paddle/fluid/operators/math/selected_rows_functor.h"
Q
Qiao Longfei 已提交
37 38 39 40
#include "paddle/fluid/platform/device_context.h"
#include "paddle/fluid/platform/enforce.h"
#include "paddle/fluid/platform/place.h"

41 42
DECLARE_bool(communicator_is_sgd_optimizer);

Q
Qiao Longfei 已提交
43 44 45 46 47 48 49 50 51 52 53 54 55 56 57
namespace paddle {
namespace operators {
namespace distributed {

using Scope = framework::Scope;
using Variable = framework::Variable;

template <typename T>
class BlockingQueue {
 public:
  explicit BlockingQueue(size_t capacity) : capacity_(capacity) {
    PADDLE_ENFORCE_GT(capacity_, 0, "The capacity must be greater than 0.");
  }

  bool Push(const T& elem) {
Q
Qiao Longfei 已提交
58 59 60 61 62 63 64
    {
      std::unique_lock<std::mutex> lock(mutex_);
      cv_.wait(lock, [&] { return queue_.size() < capacity_; });
      PADDLE_ENFORCE_LT(queue_.size(), capacity_);
      queue_.push_back(elem);
    }
    cv_.notify_one();
Q
Qiao Longfei 已提交
65 66 67 68
    return true;
  }

  bool Push(T&& elem) {
Q
Qiao Longfei 已提交
69 70 71 72 73 74 75
    {
      std::unique_lock<std::mutex> lock(mutex_);
      cv_.wait(lock, [&] { return queue_.size() < capacity_; });
      PADDLE_ENFORCE_LT(queue_.size(), capacity_);
      queue_.emplace_back(std::move(elem));
    }
    cv_.notify_one();
Q
Qiao Longfei 已提交
76 77 78 79 80
    return true;
  }

  T Pop() {
    std::unique_lock<std::mutex> lock(mutex_);
Q
Qiao Longfei 已提交
81
    cv_.wait(lock, [=] { return !queue_.empty(); });
Q
Qiao Longfei 已提交
82 83
    T rc(std::move(queue_.front()));
    queue_.pop_front();
Q
Qiao Longfei 已提交
84
    cv_.notify_one();
Q
Qiao Longfei 已提交
85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102
    return rc;
  }

  size_t Cap() const {
    std::lock_guard<std::mutex> lock(mutex_);
    return capacity_;
  }

  size_t Size() const {
    std::lock_guard<std::mutex> lock(mutex_);
    return queue_.size();
  }

 private:
  const size_t capacity_;
  std::deque<T> queue_;

  mutable std::mutex mutex_;
Q
Qiao Longfei 已提交
103
  std::condition_variable cv_;
Q
Qiao Longfei 已提交
104 105
};

Q
Qiao Longfei 已提交
106 107 108 109
template <typename T, int MajorType = Eigen::RowMajor,
          typename IndexType = Eigen::DenseIndex>
using EigenVector = framework::EigenVector<T, MajorType, IndexType>;

1
123malin 已提交
110
template <typename T>
Q
Qiao Longfei 已提交
111 112
inline void MergeVars(const std::string& var_name,
                      const std::vector<std::shared_ptr<Variable>>& vars,
1
123malin 已提交
113
                      Scope* scope, bool merge_add = true) {
Q
Qiao Longfei 已提交
114 115 116 117 118 119
  PADDLE_ENFORCE(!vars.empty(), "should have value to merge!");
  auto cpu_place = platform::CPUPlace();
  auto& var0 = vars[0];
  auto* out_var = scope->Var(var_name);
  if (var0->IsType<framework::LoDTensor>()) {
    auto dims = var0->Get<framework::LoDTensor>().dims();
1
123malin 已提交
120 121
    VLOG(3) << "merge " << var_name << " LoDTensor dims " << dims
            << "; merge add: " << merge_add;
Q
Qiao Longfei 已提交
122 123
    // init output tensor
    auto* out_t = out_var->GetMutable<framework::LoDTensor>();
1
123malin 已提交
124
    out_t->mutable_data<T>(dims, cpu_place);
Q
Qiao Longfei 已提交
125 126 127 128 129 130 131 132
    // check the input dims
    for (auto& var : vars) {
      auto& var_t = var->Get<framework::LoDTensor>();
      PADDLE_ENFORCE_EQ(var_t.dims(), dims, "should have the same dims");
    }

    // set output tensor to 0.
    auto cpu_ctx = paddle::platform::CPUDeviceContext();
1
123malin 已提交
133 134
    math::SetConstant<paddle::platform::CPUDeviceContext, T> constant_functor;
    constant_functor(cpu_ctx, out_t, static_cast<T>(0));
Q
Qiao Longfei 已提交
135
    // sum all vars to out
1
123malin 已提交
136
    auto result = EigenVector<T>::Flatten(*out_t);
Q
Qiao Longfei 已提交
137 138
    for (auto& var : vars) {
      auto& in_t = var->Get<framework::LoDTensor>();
1
123malin 已提交
139
      auto in = EigenVector<T>::Flatten(in_t);
Q
Qiao Longfei 已提交
140 141
      result.device(*cpu_ctx.eigen_device()) = result + in;
    }
1
123malin 已提交
142
    if (!merge_add) {
143
      result.device(*cpu_ctx.eigen_device()) =
1
123malin 已提交
144
          result / static_cast<T>(vars.size());
145
    }
Q
Qiao Longfei 已提交
146 147 148 149
  } else if (var0->IsType<framework::SelectedRows>()) {
    auto& slr0 = var0->Get<framework::SelectedRows>();
    auto* out_slr = out_var->GetMutable<framework::SelectedRows>();
    out_slr->mutable_rows()->clear();
1
123malin 已提交
150
    out_slr->mutable_value()->mutable_data<T>({{}}, cpu_place);
Q
Qiao Longfei 已提交
151 152 153 154 155 156
    std::vector<const paddle::framework::SelectedRows*> inputs;
    inputs.reserve(vars.size());
    for (auto& var : vars) {
      inputs.push_back(&var->Get<framework::SelectedRows>());
    }
    auto dev_ctx = paddle::platform::CPUDeviceContext();
1
123malin 已提交
157 158
    if (merge_add) {
      math::scatter::MergeAdd<paddle::platform::CPUDeviceContext, T> merge_add;
159 160
      merge_add(dev_ctx, inputs, out_slr);
    } else {
1
123malin 已提交
161
      math::scatter::MergeAverage<paddle::platform::CPUDeviceContext, T>
162 163 164 165
          merge_average;
      merge_average(dev_ctx, inputs, out_slr);
    }

Q
Qiao Longfei 已提交
166
    VLOG(3) << "merge " << var_name << " SelectedRows height: " << slr0.height()
1
123malin 已提交
167
            << " dims: " << slr0.value().dims() << "; merge add: " << merge_add;
Q
Qiao Longfei 已提交
168 169 170 171 172
  } else {
    PADDLE_THROW("unsupported var type!");
  }
}

Q
Qiao Longfei 已提交
173 174
using RpcCtxMap = std::unordered_map<std::string, RpcContext>;

Q
Qiao Longfei 已提交
175 176
class Communicator {
 public:
T
tangwei12 已提交
177 178
  Communicator() {}
  virtual ~Communicator() {}
Q
Qiao Longfei 已提交
179

T
tangwei12 已提交
180 181 182
  virtual void Start() = 0;
  virtual void Stop() = 0;
  virtual bool IsRunning() { return running_; }
Q
Qiao Longfei 已提交
183

T
tangwei12 已提交
184 185
  virtual void Send(const std::string& var_name,
                    const framework::Scope& scope) = 0;
186 187 188 189 190

  virtual void Send(const std::vector<std::string>& sparse_var_names,
                    const std::vector<std::string>& sparse_var_tables,
                    const framework::Scope& scope) = 0;

T
tangwei12 已提交
191
  virtual void Recv() = 0;
Q
Qiao Longfei 已提交
192

T
tangwei12 已提交
193 194 195
  virtual void InitImpl(const RpcCtxMap& send_varname_to_ctx,
                        const RpcCtxMap& recv_varname_to_ctx,
                        Scope* recv_scope) = 0;
196

T
tangwei12 已提交
197 198
  virtual void InitImpl(const paddle::framework::ProgramDesc& program,
                        Scope* recv_scope) = 0;
Q
Qiao Longfei 已提交
199

200 201 202 203 204 205 206
  // for geo-sgd
  virtual void InitImpl(
      const paddle::framework::ProgramDesc& program, Scope* param_scope,
      std::map<std::string, std::map<std::string, std::vector<std::string>>>&
          vars_info,
      const int& trainers, const int& geo_need_push_nums) = 0;

T
tangwei12 已提交
207 208 209 210 211 212 213 214 215 216 217 218 219 220 221
  static Communicator* GetInstance() { return communicator_.get(); }

  static std::shared_ptr<Communicator> GetInstantcePtr() {
    return communicator_;
  }

  template <typename T>
  static Communicator* InitInstance(const RpcCtxMap& send_varname_to_ctx,
                                    const RpcCtxMap& recv_varname_to_ctx,
                                    Scope* recv_scope) {
    std::call_once(init_flag_, &Communicator::InitWithRpcCtx<T>,
                   send_varname_to_ctx, recv_varname_to_ctx, recv_scope);
    return communicator_.get();
  }

222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241
  template <typename T>
  static Communicator* InitInstance(
      const paddle::framework::ProgramDesc& program, Scope* recv_scope) {
    std::call_once(init_flag_, &Communicator::InitWithProgram<T>, program,
                   recv_scope);
    return communicator_.get();
  }

  template <typename T>
  static Communicator* InitInstance(
      const paddle::framework::ProgramDesc& program, Scope* training_scope,
      std::map<std::string, std::map<std::string, std::vector<std::string>>>&
          vars_info,
      const int& trainers, const int& geo_need_push_nums) {
    std::call_once(init_flag_, &Communicator::InitWithTranspilerInfo<T>,
                   program, training_scope, std::ref(vars_info),
                   std::ref(trainers), std::ref(geo_need_push_nums));
    return communicator_.get();
  }

T
tangwei12 已提交
242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262
  // Init is called by InitInstance.
  template <typename T>
  static void InitWithRpcCtx(const RpcCtxMap& send_varname_to_ctx,
                             const RpcCtxMap& recv_varname_to_ctx,
                             Scope* recv_scope) {
    if (communicator_.get() == nullptr) {
      communicator_.reset(new T());
      communicator_->InitImpl(send_varname_to_ctx, recv_varname_to_ctx,
                              recv_scope);
    }
  }

  template <typename T>
  static void InitWithProgram(const paddle::framework::ProgramDesc& program,
                              Scope* recv_scope) {
    if (communicator_.get() == nullptr) {
      communicator_.reset(new T());
      communicator_->InitImpl(program, recv_scope);
    }
  }

263 264 265 266 267 268 269 270 271 272 273 274 275
  template <typename T>
  static void InitWithTranspilerInfo(
      const paddle::framework::ProgramDesc& program, Scope* training_scope,
      std::map<std::string, std::map<std::string, std::vector<std::string>>>&
          vars_info,
      const int& trainers, const int& geo_need_push_nums) {
    if (communicator_.get() == nullptr) {
      communicator_.reset(new T());
      communicator_->InitImpl(program, training_scope, std::ref(vars_info),
                              std::ref(trainers), std::ref(geo_need_push_nums));
    }
  }

T
tangwei12 已提交
276 277 278 279 280 281
 protected:
  bool running_ = false;
  static std::shared_ptr<Communicator> communicator_;
  static std::once_flag init_flag_;
};

282
using SparseIdsMap =
C
Chengmo 已提交
283
    std::unordered_map<std::string, std::vector<std::unordered_set<int64_t>>>;
284

T
tangwei12 已提交
285 286 287 288 289 290 291 292 293 294
class AsyncCommunicator : public Communicator {
 public:
  AsyncCommunicator() {}
  ~AsyncCommunicator();
  void Start() override;
  void Stop() override;

  void Send(const std::string& var_name,
            const framework::Scope& scope) override;
  void Recv() override;
Q
Qiao Longfei 已提交
295
  void RecvAll();
T
tangwei12 已提交
296 297 298 299 300 301 302 303

  void InitImpl(const RpcCtxMap& send_varname_to_ctx,
                const RpcCtxMap& recv_varname_to_ctx,
                Scope* recv_scope) override;

  void InitImpl(const paddle::framework::ProgramDesc& program,
                Scope* recv_scope) override;

Q
Qiao Longfei 已提交
304 305 306
  void SendThread();
  void RecvThread();

307 308 309 310 311 312 313 314 315 316
  void Send(const std::vector<std::string>& sparse_var_names,
            const std::vector<std::string>& sparse_var_tables,
            const framework::Scope& scope) override;

  void InitImpl(
      const paddle::framework::ProgramDesc& program, Scope* param_scope,
      std::map<std::string, std::map<std::string, std::vector<std::string>>>&
          vars_info,
      const int& trainers, const int& geo_need_push_nums) override;

T
tangwei12 已提交
317
 private:
Q
Qiao Longfei 已提交
318 319 320
  std::unordered_map<std::string,
                     std::shared_ptr<BlockingQueue<std::shared_ptr<Variable>>>>
      send_varname_to_queue_;
Q
Qiao Longfei 已提交
321 322
  RpcCtxMap send_varname_to_ctx_;
  RpcCtxMap recv_varname_to_ctx_;
323 324
  std::unique_ptr<std::thread> send_thread_{nullptr};
  std::unique_ptr<std::thread> recv_thread_{nullptr};
Q
Qiao Longfei 已提交
325 326
  Scope* recv_scope_;                  // should be global scope
  std::unique_ptr<Scope> send_scope_;  // an independent scope
Q
Qiao Longfei 已提交
327 328
  std::unique_ptr<::ThreadPool> send_threadpool_{nullptr};
  std::unique_ptr<::ThreadPool> recv_threadpool_{nullptr};
329
  std::atomic_uint grad_num_{0};  // the num of gradient sent since last recv
Q
Qiao Longfei 已提交
330 331
};

332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364
class GeoSgdCommunicator : public Communicator {
 public:
  GeoSgdCommunicator() {}
  ~GeoSgdCommunicator();
  void InitImpl(
      const paddle::framework::ProgramDesc& program, Scope* training_scope,
      std::map<std::string, std::map<std::string, std::vector<std::string>>>&
          vars_info,
      const int& trainers, const int& geo_need_push_nums) override;

  void Start() override;
  void Stop() override;

  void Send(const std::string& var_name,
            const framework::Scope& scope) override;

  void Send(const std::vector<std::string>& sparse_var_names,
            const std::vector<std::string>& sparse_var_tables,
            const framework::Scope& scope) override;

  void Recv() override;

  void InitImpl(const RpcCtxMap& send_varname_to_ctx,
                const RpcCtxMap& recv_varname_to_ctx,
                Scope* recv_scope) override;

  void InitImpl(const paddle::framework::ProgramDesc& program,
                Scope* recv_scope) override;

 private:
  void SendThread();
  std::unordered_set<int64_t> SparseIdsMerge(
      const std::vector<SparseIdsMap>& ids_send_vec,
C
Chengmo 已提交
365
      const std::string& var_name, const std::string& splited_var_name);
366 367 368

  void SendUpdateDenseVars(const std::string& var_name);
  void SendUpdateSparseVars(const std::string& var_name,
C
Chengmo 已提交
369
                            const std::string& splited_var_name,
370
                            const std::unordered_set<int64_t>& ids_table);
C
Chengmo 已提交
371 372 373 374

  void RecvUpdateDenseVars(const std::string& var_name);
  void RecvUpdateSparseVars(const std::string& var_name,
                            const std::string& splited_var_name);
375 376 377 378 379 380 381 382 383

  void GeoSgdDenseParamInit(framework::Scope* scope_x,
                            framework::Scope* scope_y,
                            const std::string var_name);

  void GeoSgdSparseParamInit(framework::Scope* scope_x,
                             framework::Scope* scope_y,
                             const std::string var_name);

C
Chengmo 已提交
384 385 386 387 388 389 390 391
  void RpcSend(const std::string& origin_var_name,
               const std::string& splited_var_name,
               const size_t& splited_var_index);

  void RpcRecv(const std::string& origin_var_name,
               const std::string& splited_var_name,
               const size_t& splited_var_index);

392 393 394 395 396 397 398 399 400 401 402 403 404
  const std::string VarToDeltaVar(const std::string var_name) {
    std::string delta_name = var_name;
    const std::string send_name = delta_name.append(".delta");
    return send_name;
  }

  const std::string DeltaVarToVar(const std::string var_name) {
    std::string origin_name = var_name;
    origin_name.erase(origin_name.find(".delta"), 6);
    const std::string param_name = origin_name;
    return param_name;
  }

C
Chengmo 已提交
405 406 407 408 409 410 411 412 413 414 415 416 417 418
  size_t GetSplitedVarIndex(const std::string var_name,
                            const std::string splited_var_name) {
    size_t index = 0;
    for (size_t i = 0;
         i < send_varname_to_ctx_[var_name].splited_var_names.size(); i++) {
      if (send_varname_to_ctx_[var_name].splited_var_names[i] ==
          splited_var_name) {
        index = i;
        break;
      }
    }
    return index;
  }

419 420
 private:
  int trainer_nums_ = 1;
421
  size_t geo_need_push_nums_ = 100;
422 423 424 425 426 427 428 429 430 431 432 433 434 435 436
  bool is_geo_sgd_ = false;
  Scope* training_scope_;
  std::shared_ptr<Scope> delta_scope_;  // parameter local delta: recv - old
  std::shared_ptr<Scope>
      old_scope_;  // parameter local, storage the param after last recv
  std::shared_ptr<Scope> pserver_scope_;  // parameter on pserver,gloabl scope
  RpcCtxMap send_varname_to_ctx_;
  RpcCtxMap recv_varname_to_ctx_;
  std::unordered_map<std::string, bool>
      var_list_;  // if var is sparse, using selected rows, bool=true

  std::shared_ptr<BlockingQueue<std::shared_ptr<SparseIdsMap>>>
      need_push_queue_;
  std::vector<SparseIdsMap> ids_send_vec_;

C
Chengmo 已提交
437 438
  std::unordered_map<std::string, std::vector<int64_t>> absolute_section_;

439 440
  std::unique_ptr<::ThreadPool> send_threadpool_{nullptr};
  std::unique_ptr<std::thread> send_thread_{nullptr};
C
Chengmo 已提交
441 442

  size_t need_thread_nums_{0};
443 444
};

Q
Qiao Longfei 已提交
445 446 447
}  // namespace distributed
}  // namespace operators
}  // namespace paddle