communicator.cc 7.8 KB
Newer Older
Q
Qiao Longfei 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16
/* Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.

Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at

    http://www.apache.org/licenses/LICENSE-2.0

Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */

#include "paddle/fluid/operators/distributed/communicator.h"

Q
Qiao Longfei 已提交
17
#include <gflags/gflags.h>
Q
Qiao Longfei 已提交
18 19 20
#include <chrono>  // NOLINT
#include <thread>  // NOLINT

Q
Qiao Longfei 已提交
21 22 23 24 25 26 27
#include "paddle/fluid/framework/selected_rows.h"
#include "paddle/fluid/framework/tensor_util.h"
#include "paddle/fluid/framework/variable_helper.h"
#include "paddle/fluid/operators/distributed/parameter_recv.h"
#include "paddle/fluid/operators/distributed/parameter_send.h"
#include "paddle/fluid/operators/math/selected_rows_functor.h"

Q
Qiao Longfei 已提交
28 29 30 31 32 33 34
DEFINE_bool(communicator_independent_recv_thread, true,
            "use an independent to recv vars from parameter server");
DEFINE_int32(communicator_send_queue_size, 20,
             "queue size to recv gradient before send");
DEFINE_int32(communicator_recv_wait_ms, 200, "wait time between each recv");
DEFINE_int32(communicator_thread_pool_size, 5, "wait time between each recv");

Q
Qiao Longfei 已提交
35 36 37 38
namespace paddle {
namespace operators {
namespace distributed {

Q
Qiao Longfei 已提交
39 40 41
static inline void MergeVars(const std::string &var_name,
                             const std::vector<std::shared_ptr<Variable>> &vars,
                             Scope *scope) {
Q
Qiao Longfei 已提交
42
  VLOG(3) << "merge " << vars.size() << " vars " << var_name << " to 1";
Q
Qiao Longfei 已提交
43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61
  PADDLE_ENFORCE(!vars.empty(), "should have value to merge!");
  auto cpu_place = platform::CPUPlace();
  auto &var0 = vars[0];
  auto *out_var = scope->Var(var_name);
  if (var0->IsType<framework::LoDTensor>()) {
    auto *out_t = out_var->GetMutable<framework::LoDTensor>();
    auto *out_ptr = out_t->mutable_data<float>(
        var0->Get<framework::LoDTensor>().dims(), cpu_place);
    auto numel = out_t->numel();
    for (auto i = 0; i < numel; ++i) {
      out_ptr[i] = 0;
      for (auto &var : vars) {
        auto &var_t = var->Get<framework::LoDTensor>();
        PADDLE_ENFORCE_EQ(var_t.numel(), numel, "should have the same dims");
        out_ptr[i] += var_t.data<float>()[i];
      }
    }
  } else if (var0->IsType<framework::SelectedRows>()) {
    auto *out_slr = out_var->GetMutable<framework::SelectedRows>();
Q
Qiao Longfei 已提交
62 63
    out_slr->mutable_rows()->clear();
    out_slr->mutable_value()->mutable_data<float>({{}}, cpu_place);
Q
Qiao Longfei 已提交
64 65 66 67 68 69 70 71 72 73 74 75 76 77
    std::vector<const paddle::framework::SelectedRows *> inputs;
    inputs.reserve(vars.size());
    for (auto &var : vars) {
      inputs.push_back(&var->Get<framework::SelectedRows>());
    }
    math::scatter::MergeAdd<paddle::platform::CPUDeviceContext, float>
        merge_add;
    auto dev_ctx = paddle::platform::CPUDeviceContext();
    merge_add(dev_ctx, inputs, out_slr, false);
  } else {
    PADDLE_THROW("unsupported var type!");
  }
}

Q
can run  
Qiao Longfei 已提交
78 79 80
std::unique_ptr<Communicator> Communicator::communicator_(nullptr);
std::once_flag Communicator::init_flag_;

Q
Qiao Longfei 已提交
81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112
Communicator::Communicator(const RpcCtxMap &send_varname_to_ctx,
                           const RpcCtxMap &recv_varname_to_ctx,
                           Scope *recv_scope)
    : send_varname_to_ctx_(send_varname_to_ctx),
      recv_varname_to_ctx_(recv_varname_to_ctx),
      recv_scope_(recv_scope) {
  // get all send information from graph, build vars_to_send
  VLOG(0) << "communicator_independent_recv_thread: "
          << FLAGS_communicator_independent_recv_thread;
  VLOG(0) << "communicator_send_queue_size: "
          << FLAGS_communicator_send_queue_size;
  VLOG(0) << "communicator_recv_wait_ms: " << FLAGS_communicator_recv_wait_ms;
  VLOG(0) << "communicator_thread_pool_size: "
          << FLAGS_communicator_thread_pool_size;
  send_scope_.reset(new Scope());
  for (auto &iter : send_varname_to_ctx_) {
    send_varname_to_queue_[iter.first] =
        std::make_shared<BlockingQueue<std::shared_ptr<Variable>>>(
            FLAGS_communicator_send_queue_size);
  }
  send_threadpool_.reset(new ::ThreadPool(FLAGS_communicator_thread_pool_size));
  recv_threadpool_.reset(new ::ThreadPool(FLAGS_communicator_thread_pool_size));
}

Communicator::~Communicator() {
  VLOG(3) << "~Communicator";
  running_ = false;
  if (send_thread_) send_thread_->join();
  if (recv_thread_) recv_thread_->join();
  VLOG(3) << "~Communicator done";
}

Q
Qiao Longfei 已提交
113
void Communicator::SendThread() {
Q
Qiao Longfei 已提交
114
  VLOG(3) << "SendThread start!";
Q
Qiao Longfei 已提交
115 116 117
  while (running_) {
    std::vector<std::future<void>> task_futures;
    task_futures.reserve(send_varname_to_ctx_.size());
Q
Qiao Longfei 已提交
118
    VLOG(3) << "run send graph";
Q
Qiao Longfei 已提交
119
    for (auto &iter : send_varname_to_queue_) {
Q
Qiao Longfei 已提交
120 121
      auto &var_name = iter.first;
      auto &var_queue = iter.second;
Q
Qiao Longfei 已提交
122
      if (var_queue->Size() > 0) {
Q
Qiao Longfei 已提交
123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139
        auto send_task = [this, &var_name, &var_queue] {
          VLOG(3) << "merge var " << var_name << " and send";
          std::vector<std::shared_ptr<Variable>> vars;
          // TODO(qiao): need to be configurable
          const size_t max_merge_var_num = 20;
          size_t merged_var_num = 0;
          while (var_queue->Size() > 0 && merged_var_num < max_merge_var_num) {
            vars.push_back(var_queue->Pop());
            merged_var_num++;
          }
          MergeVars(var_name, vars, send_scope_.get());
          auto send_functor = distributed::ParameterSend<float>();
          auto &ctx = send_varname_to_ctx_.at(var_name);
          send_functor(ctx, *send_scope_, true);
        };
        task_futures.emplace_back(
            send_threadpool_->enqueue(std::move(send_task)));
Q
Qiao Longfei 已提交
140 141
      } else {
        VLOG(3) << var_name << " queue empty";
Q
Qiao Longfei 已提交
142
      }
Q
Qiao Longfei 已提交
143 144 145
    }
    for (auto &task_f : task_futures) {
      task_f.wait();
Q
Qiao Longfei 已提交
146
    }
Q
Qiao Longfei 已提交
147
    VLOG(3) << "run send graph done";
Q
Qiao Longfei 已提交
148 149 150
    if (!FLAGS_communicator_independent_recv_thread) {
      RecvAll();
    }
Q
Qiao Longfei 已提交
151 152 153
  }
}

Q
Qiao Longfei 已提交
154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172
void Communicator::RecvAll() {
  VLOG(3) << "parallel run recv graph";
  std::vector<std::future<void>> task_futures;
  task_futures.reserve(recv_varname_to_ctx_.size());
  for (auto &iter : recv_varname_to_ctx_) {
    auto recv_task = [this, &iter] {
      auto &var_name = iter.first;
      VLOG(3) << "recv var " << var_name;
      auto recv_functor = distributed::ParameterRecv<float>();
      recv_functor(iter.second, *recv_scope_);
    };
    task_futures.emplace_back(recv_threadpool_->enqueue(std::move(recv_task)));
  }
  for (auto &task : task_futures) {
    task.wait();
  }
  VLOG(3) << "run recv graph done";
}

Q
Qiao Longfei 已提交
173
void Communicator::RecvThread() {
Q
Qiao Longfei 已提交
174
  VLOG(3) << "RecvThread start!";
Q
Qiao Longfei 已提交
175
  while (running_) {
Q
Qiao Longfei 已提交
176
    RecvAll();
Q
Qiao Longfei 已提交
177 178
    std::this_thread::sleep_for(
        std::chrono::milliseconds(FLAGS_communicator_recv_wait_ms));
Q
Qiao Longfei 已提交
179 180 181 182 183
  }
}

void Communicator::Send(const std::string &var_name,
                        const framework::Scope &scope) {
Q
can run  
Qiao Longfei 已提交
184
  VLOG(3) << "communicator send " << var_name;
Q
Qiao Longfei 已提交
185 186 187 188 189
  // push var into send queue by var_name
  auto *grad_var = scope.FindVar(var_name);
  PADDLE_ENFORCE(grad_var->IsInitialized(), "grad var should be inited");
  auto tmp_grad_var = std::make_shared<Variable>();
  framework::CopyVariable(*grad_var, tmp_grad_var.get());
Q
Qiao Longfei 已提交
190 191 192
  auto &queue = send_varname_to_queue_.at(var_name);
  VLOG(3) << "send " << var_name << " queue size " << queue->Size();
  queue->Push(tmp_grad_var);
Q
Qiao Longfei 已提交
193 194
}

Q
can run  
Qiao Longfei 已提交
195 196
Communicator *Communicator::GetInstance() { return communicator_.get(); }

Q
Qiao Longfei 已提交
197
void Communicator::Start() {
Q
Qiao Longfei 已提交
198
  running_ = true;
Q
Qiao Longfei 已提交
199 200 201
  // start send and recv thread
  send_thread_.reset(
      new std::thread(std::bind(&Communicator::SendThread, this)));
Q
Qiao Longfei 已提交
202 203 204 205
  if (FLAGS_communicator_independent_recv_thread) {
    recv_thread_.reset(
        new std::thread(std::bind(&Communicator::RecvThread, this)));
  }
Q
Qiao Longfei 已提交
206 207 208 209 210
}

}  // namespace distributed
}  // namespace operators
}  // namespace paddle