communicator.cc 6.0 KB
Newer Older
Q
Qiao Longfei 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16
/* Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.

Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at

    http://www.apache.org/licenses/LICENSE-2.0

Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */

#include "paddle/fluid/operators/distributed/communicator.h"

Q
Qiao Longfei 已提交
17 18 19
#include <chrono>  // NOLINT
#include <thread>  // NOLINT

Q
Qiao Longfei 已提交
20 21 22 23 24 25 26 27 28 29 30
#include "paddle/fluid/framework/selected_rows.h"
#include "paddle/fluid/framework/tensor_util.h"
#include "paddle/fluid/framework/variable_helper.h"
#include "paddle/fluid/operators/distributed/parameter_recv.h"
#include "paddle/fluid/operators/distributed/parameter_send.h"
#include "paddle/fluid/operators/math/selected_rows_functor.h"

namespace paddle {
namespace operators {
namespace distributed {

Q
Qiao Longfei 已提交
31 32 33
static inline void MergeVars(const std::string &var_name,
                             const std::vector<std::shared_ptr<Variable>> &vars,
                             Scope *scope) {
Q
Qiao Longfei 已提交
34
  VLOG(3) << "merge " << vars.size() << " vars " << var_name << " to 1";
Q
Qiao Longfei 已提交
35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53
  PADDLE_ENFORCE(!vars.empty(), "should have value to merge!");
  auto cpu_place = platform::CPUPlace();
  auto &var0 = vars[0];
  auto *out_var = scope->Var(var_name);
  if (var0->IsType<framework::LoDTensor>()) {
    auto *out_t = out_var->GetMutable<framework::LoDTensor>();
    auto *out_ptr = out_t->mutable_data<float>(
        var0->Get<framework::LoDTensor>().dims(), cpu_place);
    auto numel = out_t->numel();
    for (auto i = 0; i < numel; ++i) {
      out_ptr[i] = 0;
      for (auto &var : vars) {
        auto &var_t = var->Get<framework::LoDTensor>();
        PADDLE_ENFORCE_EQ(var_t.numel(), numel, "should have the same dims");
        out_ptr[i] += var_t.data<float>()[i];
      }
    }
  } else if (var0->IsType<framework::SelectedRows>()) {
    auto *out_slr = out_var->GetMutable<framework::SelectedRows>();
Q
Qiao Longfei 已提交
54 55
    out_slr->mutable_rows()->clear();
    out_slr->mutable_value()->mutable_data<float>({{}}, cpu_place);
Q
Qiao Longfei 已提交
56 57 58 59 60 61 62 63 64 65 66 67 68 69
    std::vector<const paddle::framework::SelectedRows *> inputs;
    inputs.reserve(vars.size());
    for (auto &var : vars) {
      inputs.push_back(&var->Get<framework::SelectedRows>());
    }
    math::scatter::MergeAdd<paddle::platform::CPUDeviceContext, float>
        merge_add;
    auto dev_ctx = paddle::platform::CPUDeviceContext();
    merge_add(dev_ctx, inputs, out_slr, false);
  } else {
    PADDLE_THROW("unsupported var type!");
  }
}

Q
can run  
Qiao Longfei 已提交
70 71 72
std::unique_ptr<Communicator> Communicator::communicator_(nullptr);
std::once_flag Communicator::init_flag_;

Q
Qiao Longfei 已提交
73
void Communicator::SendThread() {
Q
Qiao Longfei 已提交
74
  VLOG(3) << "SendThread start!";
Q
Qiao Longfei 已提交
75 76 77
  while (running_) {
    std::vector<std::future<void>> task_futures;
    task_futures.reserve(send_varname_to_ctx_.size());
Q
Qiao Longfei 已提交
78
    VLOG(3) << "run send graph";
Q
Qiao Longfei 已提交
79
    for (auto &iter : send_varname_to_queue_) {
Q
Qiao Longfei 已提交
80 81
      auto &var_name = iter.first;
      auto &var_queue = iter.second;
Q
Qiao Longfei 已提交
82
      if (var_queue->Size() > 0) {
Q
Qiao Longfei 已提交
83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99
        auto send_task = [this, &var_name, &var_queue] {
          VLOG(3) << "merge var " << var_name << " and send";
          std::vector<std::shared_ptr<Variable>> vars;
          // TODO(qiao): need to be configurable
          const size_t max_merge_var_num = 20;
          size_t merged_var_num = 0;
          while (var_queue->Size() > 0 && merged_var_num < max_merge_var_num) {
            vars.push_back(var_queue->Pop());
            merged_var_num++;
          }
          MergeVars(var_name, vars, send_scope_.get());
          auto send_functor = distributed::ParameterSend<float>();
          auto &ctx = send_varname_to_ctx_.at(var_name);
          send_functor(ctx, *send_scope_, true);
        };
        task_futures.emplace_back(
            send_threadpool_->enqueue(std::move(send_task)));
Q
Qiao Longfei 已提交
100 101
      } else {
        VLOG(3) << var_name << " queue empty";
Q
Qiao Longfei 已提交
102
      }
Q
Qiao Longfei 已提交
103 104 105
    }
    for (auto &task_f : task_futures) {
      task_f.wait();
Q
Qiao Longfei 已提交
106
    }
Q
Qiao Longfei 已提交
107 108
    VLOG(3) << "run send graph done";
    RecvAll();
Q
Qiao Longfei 已提交
109 110 111
  }
}

Q
Qiao Longfei 已提交
112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130
void Communicator::RecvAll() {
  VLOG(3) << "parallel run recv graph";
  std::vector<std::future<void>> task_futures;
  task_futures.reserve(recv_varname_to_ctx_.size());
  for (auto &iter : recv_varname_to_ctx_) {
    auto recv_task = [this, &iter] {
      auto &var_name = iter.first;
      VLOG(3) << "recv var " << var_name;
      auto recv_functor = distributed::ParameterRecv<float>();
      recv_functor(iter.second, *recv_scope_);
    };
    task_futures.emplace_back(recv_threadpool_->enqueue(std::move(recv_task)));
  }
  for (auto &task : task_futures) {
    task.wait();
  }
  VLOG(3) << "run recv graph done";
}

Q
Qiao Longfei 已提交
131
void Communicator::RecvThread() {
Q
Qiao Longfei 已提交
132
  VLOG(3) << "RecvThread start!";
Q
Qiao Longfei 已提交
133
  while (running_) {
Q
Qiao Longfei 已提交
134
    RecvAll();
Q
Qiao Longfei 已提交
135 136
    // TODO(qiao) need to be configuable
    std::this_thread::sleep_for(std::chrono::milliseconds(200));
Q
Qiao Longfei 已提交
137 138 139 140 141
  }
}

void Communicator::Send(const std::string &var_name,
                        const framework::Scope &scope) {
Q
can run  
Qiao Longfei 已提交
142
  VLOG(3) << "communicator send " << var_name;
Q
Qiao Longfei 已提交
143 144 145 146 147
  // push var into send queue by var_name
  auto *grad_var = scope.FindVar(var_name);
  PADDLE_ENFORCE(grad_var->IsInitialized(), "grad var should be inited");
  auto tmp_grad_var = std::make_shared<Variable>();
  framework::CopyVariable(*grad_var, tmp_grad_var.get());
Q
Qiao Longfei 已提交
148 149 150
  auto &queue = send_varname_to_queue_.at(var_name);
  VLOG(3) << "send " << var_name << " queue size " << queue->Size();
  queue->Push(tmp_grad_var);
Q
Qiao Longfei 已提交
151 152
}

Q
can run  
Qiao Longfei 已提交
153 154
Communicator *Communicator::GetInstance() { return communicator_.get(); }

Q
Qiao Longfei 已提交
155
void Communicator::Start() {
Q
Qiao Longfei 已提交
156
  running_ = true;
Q
Qiao Longfei 已提交
157 158 159
  // start send and recv thread
  send_thread_.reset(
      new std::thread(std::bind(&Communicator::SendThread, this)));
Q
Qiao Longfei 已提交
160 161
  //  recv_thread_.reset(
  //      new std::thread(std::bind(&Communicator::RecvThread, this)));
Q
Qiao Longfei 已提交
162 163 164 165 166
}

}  // namespace distributed
}  // namespace operators
}  // namespace paddle