“66c1680c96da94541527e45bab7fc2e53b0d1d41”上不存在“demo/git@gitcode.net:paddlepaddle/PaddleSlim.git”
communicator.cc 9.9 KB
Newer Older
Q
Qiao Longfei 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16
/* Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.

Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at

    http://www.apache.org/licenses/LICENSE-2.0

Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */

#include "paddle/fluid/operators/distributed/communicator.h"

Q
Qiao Longfei 已提交
17
#include <gflags/gflags.h>
Q
Qiao Longfei 已提交
18 19 20
#include <chrono>  // NOLINT
#include <thread>  // NOLINT

Q
Qiao Longfei 已提交
21
#include "paddle/fluid/framework/eigen.h"
Q
Qiao Longfei 已提交
22 23 24 25 26
#include "paddle/fluid/framework/selected_rows.h"
#include "paddle/fluid/framework/tensor_util.h"
#include "paddle/fluid/framework/variable_helper.h"
#include "paddle/fluid/operators/distributed/parameter_recv.h"
#include "paddle/fluid/operators/distributed/parameter_send.h"
Q
Qiao Longfei 已提交
27
#include "paddle/fluid/operators/math/math_function.h"
Q
Qiao Longfei 已提交
28
#include "paddle/fluid/operators/math/selected_rows_functor.h"
Q
Qiao Longfei 已提交
29
#include "paddle/fluid/platform/device_context.h"
Q
Qiao Longfei 已提交
30

Q
Qiao Longfei 已提交
31 32 33 34 35
DEFINE_bool(communicator_independent_recv_thread, true,
            "use an independent to recv vars from parameter server");
DEFINE_int32(communicator_send_queue_size, 20,
             "queue size to recv gradient before send");
DEFINE_int32(communicator_recv_wait_ms, 200, "wait time between each recv");
36 37 38 39 40
DEFINE_int32(communicator_thread_pool_size, 5, "thread num to do send or recv");
DEFINE_int32(communicator_max_merge_var_num, 20,
             "max var num to merge and send");
DEFINE_bool(communicator_fake_rpc, false,
            "fake mode does not really send any thing");
Q
Qiao Longfei 已提交
41

Q
Qiao Longfei 已提交
42 43 44 45
namespace paddle {
namespace operators {
namespace distributed {

Q
Qiao Longfei 已提交
46 47 48 49 50 51 52 53 54 55
template <typename T, int MajorType = Eigen::RowMajor,
          typename IndexType = Eigen::DenseIndex>
using EigenVector = framework::EigenVector<T, MajorType, IndexType>;

inline double GetCurrentUS() {
  struct timeval time;
  gettimeofday(&time, NULL);
  return 1e+6 * time.tv_sec + time.tv_usec;
}

Q
Qiao Longfei 已提交
56 57 58
static inline void MergeVars(const std::string &var_name,
                             const std::vector<std::shared_ptr<Variable>> &vars,
                             Scope *scope) {
Q
Qiao Longfei 已提交
59 60 61 62 63
  PADDLE_ENFORCE(!vars.empty(), "should have value to merge!");
  auto cpu_place = platform::CPUPlace();
  auto &var0 = vars[0];
  auto *out_var = scope->Var(var_name);
  if (var0->IsType<framework::LoDTensor>()) {
Q
Qiao Longfei 已提交
64 65 66 67
    VLOG(3) << "merge " << var_name << " LoDTensor"
            << var0->Get<framework::LoDTensor>().dims();

    // init output tensor
Q
Qiao Longfei 已提交
68 69 70 71
    auto *out_t = out_var->GetMutable<framework::LoDTensor>();
    auto *out_ptr = out_t->mutable_data<float>(
        var0->Get<framework::LoDTensor>().dims(), cpu_place);
    auto numel = out_t->numel();
Q
Qiao Longfei 已提交
72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90

    // check the input dims
    for (auto &var : vars) {
      auto &var_t = var->Get<framework::LoDTensor>();
      PADDLE_ENFORCE_EQ(var_t.numel(), numel, "should have the same dims");
    }

    // set output tensor to 0.
    auto cpu_ctx = paddle::platform::CPUDeviceContext();
    math::SetConstant<paddle::platform::CPUDeviceContext, float>
        constant_functor;
    constant_functor(cpu_ctx, out_t, static_cast<float>(0));

    // sum all vars to out
    auto result = EigenVector<T>::Flatten(*out_t);
    for (auto &var : vars) {
      auto &in_t = var->Get<framework::LoDTensor>();
      auto in = EigenVector<float>::Flatten(in_t);
      result.device(*cpu_ctx.eigen_device()) = result + in;
Q
Qiao Longfei 已提交
91 92
    }
  } else if (var0->IsType<framework::SelectedRows>()) {
Q
Qiao Longfei 已提交
93
    auto &slr0 = var0->Get<framework::SelectedRows>();
Q
Qiao Longfei 已提交
94
    auto *out_slr = out_var->GetMutable<framework::SelectedRows>();
Q
Qiao Longfei 已提交
95 96
    out_slr->mutable_rows()->clear();
    out_slr->mutable_value()->mutable_data<float>({{}}, cpu_place);
Q
Qiao Longfei 已提交
97 98 99 100 101 102 103 104 105
    std::vector<const paddle::framework::SelectedRows *> inputs;
    inputs.reserve(vars.size());
    for (auto &var : vars) {
      inputs.push_back(&var->Get<framework::SelectedRows>());
    }
    math::scatter::MergeAdd<paddle::platform::CPUDeviceContext, float>
        merge_add;
    auto dev_ctx = paddle::platform::CPUDeviceContext();
    merge_add(dev_ctx, inputs, out_slr, false);
Q
Qiao Longfei 已提交
106 107
    VLOG(3) << "merge " << var_name << " SelectedRows height: " << slr0.height()
            << " dims: " << slr0.value().dims();
Q
Qiao Longfei 已提交
108 109 110 111 112
  } else {
    PADDLE_THROW("unsupported var type!");
  }
}

Q
can run  
Qiao Longfei 已提交
113 114 115
std::unique_ptr<Communicator> Communicator::communicator_(nullptr);
std::once_flag Communicator::init_flag_;

Q
Qiao Longfei 已提交
116 117 118 119 120 121 122 123 124 125 126 127 128 129
Communicator::Communicator(const RpcCtxMap &send_varname_to_ctx,
                           const RpcCtxMap &recv_varname_to_ctx,
                           Scope *recv_scope)
    : send_varname_to_ctx_(send_varname_to_ctx),
      recv_varname_to_ctx_(recv_varname_to_ctx),
      recv_scope_(recv_scope) {
  // get all send information from graph, build vars_to_send
  VLOG(0) << "communicator_independent_recv_thread: "
          << FLAGS_communicator_independent_recv_thread;
  VLOG(0) << "communicator_send_queue_size: "
          << FLAGS_communicator_send_queue_size;
  VLOG(0) << "communicator_recv_wait_ms: " << FLAGS_communicator_recv_wait_ms;
  VLOG(0) << "communicator_thread_pool_size: "
          << FLAGS_communicator_thread_pool_size;
Q
Qiao Longfei 已提交
130
  VLOG(0) << "communicator_max_merge_var_num: "
131 132
          << FLAGS_communicator_max_merge_var_num;
  VLOG(0) << "communicator_fake_rpc: " << FLAGS_communicator_fake_rpc;
Q
Qiao Longfei 已提交
133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150
  send_scope_.reset(new Scope());
  for (auto &iter : send_varname_to_ctx_) {
    send_varname_to_queue_[iter.first] =
        std::make_shared<BlockingQueue<std::shared_ptr<Variable>>>(
            FLAGS_communicator_send_queue_size);
  }
  send_threadpool_.reset(new ::ThreadPool(FLAGS_communicator_thread_pool_size));
  recv_threadpool_.reset(new ::ThreadPool(FLAGS_communicator_thread_pool_size));
}

Communicator::~Communicator() {
  VLOG(3) << "~Communicator";
  running_ = false;
  if (send_thread_) send_thread_->join();
  if (recv_thread_) recv_thread_->join();
  VLOG(3) << "~Communicator done";
}

Q
Qiao Longfei 已提交
151
void Communicator::SendThread() {
Q
Qiao Longfei 已提交
152
  VLOG(3) << "SendThread start!";
Q
Qiao Longfei 已提交
153 154 155
  while (running_) {
    std::vector<std::future<void>> task_futures;
    task_futures.reserve(send_varname_to_ctx_.size());
Q
Qiao Longfei 已提交
156
    VLOG(3) << "run send graph";
Q
Qiao Longfei 已提交
157
    auto before_run_send_graph = GetCurrentUS();
Q
Qiao Longfei 已提交
158
    for (auto &iter : send_varname_to_queue_) {
Q
Qiao Longfei 已提交
159 160
      auto &var_name = iter.first;
      auto &var_queue = iter.second;
Q
Qiao Longfei 已提交
161
      if (var_queue->Size() > 0) {
Q
Qiao Longfei 已提交
162
        auto send_task = [this, &var_name, &var_queue] {
Q
Qiao Longfei 已提交
163
          VLOG(3) << var_name << " merge and send";
Q
Qiao Longfei 已提交
164 165
          std::vector<std::shared_ptr<Variable>> vars;
          size_t merged_var_num = 0;
166 167
          while (var_queue->Size() > 0 &&
                 merged_var_num < FLAGS_communicator_max_merge_var_num) {
Q
Qiao Longfei 已提交
168 169 170
            vars.push_back(var_queue->Pop());
            merged_var_num++;
          }
Q
Qiao Longfei 已提交
171
          auto before_merge = GetCurrentUS();
Q
Qiao Longfei 已提交
172
          MergeVars(var_name, vars, send_scope_.get());
Q
Qiao Longfei 已提交
173 174 175
          auto after_merge = GetCurrentUS();
          VLOG(3) << "merge " << var_name << " use time "
                  << after_merge - before_merge;
Q
Qiao Longfei 已提交
176 177
          auto send_functor = distributed::ParameterSend<float>();
          auto &ctx = send_varname_to_ctx_.at(var_name);
178 179 180
          if (!FLAGS_communicator_fake_rpc) {
            send_functor(ctx, *send_scope_, true);
          }
Q
Qiao Longfei 已提交
181 182 183
          auto after_send = GetCurrentUS();
          VLOG(3) << "send " << var_name << " use time "
                  << after_send - after_merge;
Q
Qiao Longfei 已提交
184 185 186
        };
        task_futures.emplace_back(
            send_threadpool_->enqueue(std::move(send_task)));
Q
Qiao Longfei 已提交
187 188
      } else {
        VLOG(3) << var_name << " queue empty";
Q
Qiao Longfei 已提交
189
      }
Q
Qiao Longfei 已提交
190 191 192
    }
    for (auto &task_f : task_futures) {
      task_f.wait();
Q
Qiao Longfei 已提交
193
    }
Q
Qiao Longfei 已提交
194 195 196
    auto after_run_send_graph = GetCurrentUS();
    VLOG(3) << "run send graph use time "
            << after_run_send_graph - before_run_send_graph;
Q
Qiao Longfei 已提交
197 198 199
    if (!FLAGS_communicator_independent_recv_thread) {
      RecvAll();
    }
Q
Qiao Longfei 已提交
200 201 202
  }
}

Q
Qiao Longfei 已提交
203 204
void Communicator::RecvAll() {
  VLOG(3) << "parallel run recv graph";
Q
Qiao Longfei 已提交
205
  auto before_send = GetCurrentUS();
Q
Qiao Longfei 已提交
206 207 208 209 210 211 212
  std::vector<std::future<void>> task_futures;
  task_futures.reserve(recv_varname_to_ctx_.size());
  for (auto &iter : recv_varname_to_ctx_) {
    auto recv_task = [this, &iter] {
      auto &var_name = iter.first;
      VLOG(3) << "recv var " << var_name;
      auto recv_functor = distributed::ParameterRecv<float>();
213 214 215
      if (!FLAGS_communicator_fake_rpc) {
        recv_functor(iter.second, *recv_scope_);
      }
Q
Qiao Longfei 已提交
216 217 218 219 220 221
    };
    task_futures.emplace_back(recv_threadpool_->enqueue(std::move(recv_task)));
  }
  for (auto &task : task_futures) {
    task.wait();
  }
Q
Qiao Longfei 已提交
222 223
  auto after_recv = GetCurrentUS();
  VLOG(3) << "run recv graph use time " << after_recv - before_send;
Q
Qiao Longfei 已提交
224 225
}

Q
Qiao Longfei 已提交
226
void Communicator::RecvThread() {
Q
Qiao Longfei 已提交
227
  VLOG(3) << "RecvThread start!";
Q
Qiao Longfei 已提交
228
  while (running_) {
Q
Qiao Longfei 已提交
229
    RecvAll();
Q
Qiao Longfei 已提交
230 231
    std::this_thread::sleep_for(
        std::chrono::milliseconds(FLAGS_communicator_recv_wait_ms));
Q
Qiao Longfei 已提交
232 233 234 235 236
  }
}

void Communicator::Send(const std::string &var_name,
                        const framework::Scope &scope) {
Q
Qiao Longfei 已提交
237 238 239 240 241 242 243 244 245
  VLOG(3) << "communicator send " << var_name;
  // push var into send queue by var_name
  auto *grad_var = scope.FindVar(var_name);
  PADDLE_ENFORCE(grad_var->IsInitialized(), "grad var should be inited");
  auto tmp_grad_var = std::make_shared<Variable>();
  framework::CopyVariable(*grad_var, tmp_grad_var.get());
  auto &queue = send_varname_to_queue_.at(var_name);
  VLOG(3) << "send " << var_name << " queue size " << queue->Size();
  queue->Push(tmp_grad_var);
Q
Qiao Longfei 已提交
246 247
}

Q
can run  
Qiao Longfei 已提交
248 249
Communicator *Communicator::GetInstance() { return communicator_.get(); }

Q
Qiao Longfei 已提交
250
void Communicator::Start() {
Q
Qiao Longfei 已提交
251
  running_ = true;
Q
Qiao Longfei 已提交
252 253 254
  // start send and recv thread
  send_thread_.reset(
      new std::thread(std::bind(&Communicator::SendThread, this)));
Q
Qiao Longfei 已提交
255 256 257 258
  if (FLAGS_communicator_independent_recv_thread) {
    recv_thread_.reset(
        new std::thread(std::bind(&Communicator::RecvThread, this)));
  }
Q
Qiao Longfei 已提交
259 260 261 262 263
}

}  // namespace distributed
}  // namespace operators
}  // namespace paddle