communicator.cc 8.2 KB
Newer Older
Q
Qiao Longfei 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16
/* Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.

Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at

    http://www.apache.org/licenses/LICENSE-2.0

Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */

#include "paddle/fluid/operators/distributed/communicator.h"

Q
Qiao Longfei 已提交
17
#include <gflags/gflags.h>
Q
Qiao Longfei 已提交
18 19 20
#include <chrono>  // NOLINT
#include <thread>  // NOLINT

Q
Qiao Longfei 已提交
21 22 23 24 25 26 27
#include "paddle/fluid/framework/selected_rows.h"
#include "paddle/fluid/framework/tensor_util.h"
#include "paddle/fluid/framework/variable_helper.h"
#include "paddle/fluid/operators/distributed/parameter_recv.h"
#include "paddle/fluid/operators/distributed/parameter_send.h"
#include "paddle/fluid/operators/math/selected_rows_functor.h"

Q
Qiao Longfei 已提交
28 29 30 31 32
DEFINE_bool(communicator_independent_recv_thread, true,
            "use an independent to recv vars from parameter server");
DEFINE_int32(communicator_send_queue_size, 20,
             "queue size to recv gradient before send");
DEFINE_int32(communicator_recv_wait_ms, 200, "wait time between each recv");
33 34 35 36 37
DEFINE_int32(communicator_thread_pool_size, 5, "thread num to do send or recv");
DEFINE_int32(communicator_max_merge_var_num, 20,
             "max var num to merge and send");
DEFINE_bool(communicator_fake_rpc, false,
            "fake mode does not really send any thing");
Q
Qiao Longfei 已提交
38

Q
Qiao Longfei 已提交
39 40 41 42
namespace paddle {
namespace operators {
namespace distributed {

Q
Qiao Longfei 已提交
43 44 45
static inline void MergeVars(const std::string &var_name,
                             const std::vector<std::shared_ptr<Variable>> &vars,
                             Scope *scope) {
Q
Qiao Longfei 已提交
46
  VLOG(3) << "merge " << vars.size() << " vars " << var_name << " to 1";
Q
Qiao Longfei 已提交
47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65
  PADDLE_ENFORCE(!vars.empty(), "should have value to merge!");
  auto cpu_place = platform::CPUPlace();
  auto &var0 = vars[0];
  auto *out_var = scope->Var(var_name);
  if (var0->IsType<framework::LoDTensor>()) {
    auto *out_t = out_var->GetMutable<framework::LoDTensor>();
    auto *out_ptr = out_t->mutable_data<float>(
        var0->Get<framework::LoDTensor>().dims(), cpu_place);
    auto numel = out_t->numel();
    for (auto i = 0; i < numel; ++i) {
      out_ptr[i] = 0;
      for (auto &var : vars) {
        auto &var_t = var->Get<framework::LoDTensor>();
        PADDLE_ENFORCE_EQ(var_t.numel(), numel, "should have the same dims");
        out_ptr[i] += var_t.data<float>()[i];
      }
    }
  } else if (var0->IsType<framework::SelectedRows>()) {
    auto *out_slr = out_var->GetMutable<framework::SelectedRows>();
Q
Qiao Longfei 已提交
66 67
    out_slr->mutable_rows()->clear();
    out_slr->mutable_value()->mutable_data<float>({{}}, cpu_place);
Q
Qiao Longfei 已提交
68 69 70 71 72 73 74 75 76 77 78 79 80 81
    std::vector<const paddle::framework::SelectedRows *> inputs;
    inputs.reserve(vars.size());
    for (auto &var : vars) {
      inputs.push_back(&var->Get<framework::SelectedRows>());
    }
    math::scatter::MergeAdd<paddle::platform::CPUDeviceContext, float>
        merge_add;
    auto dev_ctx = paddle::platform::CPUDeviceContext();
    merge_add(dev_ctx, inputs, out_slr, false);
  } else {
    PADDLE_THROW("unsupported var type!");
  }
}

Q
can run  
Qiao Longfei 已提交
82 83 84
std::unique_ptr<Communicator> Communicator::communicator_(nullptr);
std::once_flag Communicator::init_flag_;

Q
Qiao Longfei 已提交
85 86 87 88 89 90 91 92 93 94 95 96 97 98
Communicator::Communicator(const RpcCtxMap &send_varname_to_ctx,
                           const RpcCtxMap &recv_varname_to_ctx,
                           Scope *recv_scope)
    : send_varname_to_ctx_(send_varname_to_ctx),
      recv_varname_to_ctx_(recv_varname_to_ctx),
      recv_scope_(recv_scope) {
  // get all send information from graph, build vars_to_send
  VLOG(0) << "communicator_independent_recv_thread: "
          << FLAGS_communicator_independent_recv_thread;
  VLOG(0) << "communicator_send_queue_size: "
          << FLAGS_communicator_send_queue_size;
  VLOG(0) << "communicator_recv_wait_ms: " << FLAGS_communicator_recv_wait_ms;
  VLOG(0) << "communicator_thread_pool_size: "
          << FLAGS_communicator_thread_pool_size;
Q
Qiao Longfei 已提交
99
  VLOG(0) << "communicator_max_merge_var_num: "
100 101
          << FLAGS_communicator_max_merge_var_num;
  VLOG(0) << "communicator_fake_rpc: " << FLAGS_communicator_fake_rpc;
Q
Qiao Longfei 已提交
102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119
  send_scope_.reset(new Scope());
  for (auto &iter : send_varname_to_ctx_) {
    send_varname_to_queue_[iter.first] =
        std::make_shared<BlockingQueue<std::shared_ptr<Variable>>>(
            FLAGS_communicator_send_queue_size);
  }
  send_threadpool_.reset(new ::ThreadPool(FLAGS_communicator_thread_pool_size));
  recv_threadpool_.reset(new ::ThreadPool(FLAGS_communicator_thread_pool_size));
}

Communicator::~Communicator() {
  VLOG(3) << "~Communicator";
  running_ = false;
  if (send_thread_) send_thread_->join();
  if (recv_thread_) recv_thread_->join();
  VLOG(3) << "~Communicator done";
}

Q
Qiao Longfei 已提交
120
void Communicator::SendThread() {
Q
Qiao Longfei 已提交
121
  VLOG(3) << "SendThread start!";
Q
Qiao Longfei 已提交
122 123 124
  while (running_) {
    std::vector<std::future<void>> task_futures;
    task_futures.reserve(send_varname_to_ctx_.size());
Q
Qiao Longfei 已提交
125
    VLOG(3) << "run send graph";
Q
Qiao Longfei 已提交
126
    for (auto &iter : send_varname_to_queue_) {
Q
Qiao Longfei 已提交
127 128
      auto &var_name = iter.first;
      auto &var_queue = iter.second;
Q
Qiao Longfei 已提交
129
      if (var_queue->Size() > 0) {
Q
Qiao Longfei 已提交
130 131 132 133
        auto send_task = [this, &var_name, &var_queue] {
          VLOG(3) << "merge var " << var_name << " and send";
          std::vector<std::shared_ptr<Variable>> vars;
          size_t merged_var_num = 0;
134 135
          while (var_queue->Size() > 0 &&
                 merged_var_num < FLAGS_communicator_max_merge_var_num) {
Q
Qiao Longfei 已提交
136 137 138 139 140 141
            vars.push_back(var_queue->Pop());
            merged_var_num++;
          }
          MergeVars(var_name, vars, send_scope_.get());
          auto send_functor = distributed::ParameterSend<float>();
          auto &ctx = send_varname_to_ctx_.at(var_name);
142 143 144
          if (!FLAGS_communicator_fake_rpc) {
            send_functor(ctx, *send_scope_, true);
          }
Q
Qiao Longfei 已提交
145 146 147
        };
        task_futures.emplace_back(
            send_threadpool_->enqueue(std::move(send_task)));
Q
Qiao Longfei 已提交
148 149
      } else {
        VLOG(3) << var_name << " queue empty";
Q
Qiao Longfei 已提交
150
      }
Q
Qiao Longfei 已提交
151 152 153
    }
    for (auto &task_f : task_futures) {
      task_f.wait();
Q
Qiao Longfei 已提交
154
    }
Q
Qiao Longfei 已提交
155
    VLOG(3) << "run send graph done";
Q
Qiao Longfei 已提交
156 157 158
    if (!FLAGS_communicator_independent_recv_thread) {
      RecvAll();
    }
Q
Qiao Longfei 已提交
159 160 161
  }
}

Q
Qiao Longfei 已提交
162 163 164 165 166 167 168 169 170
void Communicator::RecvAll() {
  VLOG(3) << "parallel run recv graph";
  std::vector<std::future<void>> task_futures;
  task_futures.reserve(recv_varname_to_ctx_.size());
  for (auto &iter : recv_varname_to_ctx_) {
    auto recv_task = [this, &iter] {
      auto &var_name = iter.first;
      VLOG(3) << "recv var " << var_name;
      auto recv_functor = distributed::ParameterRecv<float>();
171 172 173
      if (!FLAGS_communicator_fake_rpc) {
        recv_functor(iter.second, *recv_scope_);
      }
Q
Qiao Longfei 已提交
174 175 176 177 178 179 180 181 182
    };
    task_futures.emplace_back(recv_threadpool_->enqueue(std::move(recv_task)));
  }
  for (auto &task : task_futures) {
    task.wait();
  }
  VLOG(3) << "run recv graph done";
}

Q
Qiao Longfei 已提交
183
void Communicator::RecvThread() {
Q
Qiao Longfei 已提交
184
  VLOG(3) << "RecvThread start!";
Q
Qiao Longfei 已提交
185
  while (running_) {
Q
Qiao Longfei 已提交
186
    RecvAll();
Q
Qiao Longfei 已提交
187 188
    std::this_thread::sleep_for(
        std::chrono::milliseconds(FLAGS_communicator_recv_wait_ms));
Q
Qiao Longfei 已提交
189 190 191 192 193
  }
}

void Communicator::Send(const std::string &var_name,
                        const framework::Scope &scope) {
Q
can run  
Qiao Longfei 已提交
194
  VLOG(3) << "communicator send " << var_name;
Q
Qiao Longfei 已提交
195 196 197 198 199
  // push var into send queue by var_name
  auto *grad_var = scope.FindVar(var_name);
  PADDLE_ENFORCE(grad_var->IsInitialized(), "grad var should be inited");
  auto tmp_grad_var = std::make_shared<Variable>();
  framework::CopyVariable(*grad_var, tmp_grad_var.get());
Q
Qiao Longfei 已提交
200 201 202
  auto &queue = send_varname_to_queue_.at(var_name);
  VLOG(3) << "send " << var_name << " queue size " << queue->Size();
  queue->Push(tmp_grad_var);
Q
Qiao Longfei 已提交
203 204
}

Q
can run  
Qiao Longfei 已提交
205 206
Communicator *Communicator::GetInstance() { return communicator_.get(); }

Q
Qiao Longfei 已提交
207
void Communicator::Start() {
Q
Qiao Longfei 已提交
208
  running_ = true;
Q
Qiao Longfei 已提交
209 210 211
  // start send and recv thread
  send_thread_.reset(
      new std::thread(std::bind(&Communicator::SendThread, this)));
Q
Qiao Longfei 已提交
212 213 214 215
  if (FLAGS_communicator_independent_recv_thread) {
    recv_thread_.reset(
        new std::thread(std::bind(&Communicator::RecvThread, this)));
  }
Q
Qiao Longfei 已提交
216 217 218 219 220
}

}  // namespace distributed
}  // namespace operators
}  // namespace paddle