heter_client.cc 15.9 KB
Newer Older
T
tangwei12 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13 14
// Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
//     http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.

15
#include "paddle/fluid/distributed/ps/service/heter_client.h"
16

17
#include "paddle/fluid/framework/convert_utils.h"
T
tangwei12 已提交
18
#include "paddle/fluid/platform/profiler.h"
T
tangwei12 已提交
19

T
tangwei12 已提交
20 21 22
namespace paddle {
namespace distributed {

23
std::shared_ptr<HeterClient> HeterClient::s_instance_ = nullptr;
T
tangwei12 已提交
24

25 26 27 28 29 30 31 32 33
int GetMicroId(const platform::DeviceContext& ctx,
               const framework::Scope* scope) {
  framework::Variable* var = scope->FindVar("microbatch_id");
  PADDLE_ENFORCE_EQ(var->IsType<framework::LoDTensor>(), true,
                    platform::errors::InvalidArgument(
                        "the type of micro id shoulde be LoDTensor."));
  auto micro_id = -1;
  auto* tensor = var->GetMutable<framework::LoDTensor>();
  if (platform::is_cpu_place(tensor->place())) {
34
    auto data = reinterpret_cast<const float*>(tensor->data());
35 36 37 38
    micro_id = static_cast<int>(data[0]);
  } else {
#ifdef PADDLE_WITH_CUDA
    std::vector<char> temp;
39
    temp.resize(tensor->numel() * framework::DataTypeSize(tensor->dtype()));
40 41 42
    char* temp_ptr = temp.data();
    auto stream =
        reinterpret_cast<const platform::CUDADeviceContext&>(ctx).stream();
43
    memory::Copy(
44
        platform::CPUPlace(), temp_ptr, tensor->place(), tensor->data(),
45
        tensor->numel() * framework::DataTypeSize(tensor->dtype()), stream);
46 47 48 49 50 51 52
    float* temp_ptr_float = reinterpret_cast<float*>(temp_ptr);
    micro_id = static_cast<int>(temp_ptr_float[0]);
#endif
  }
  return micro_id;
}

T
tangwei12 已提交
53
void HeterClient::Stop() {
54 55
  auto status = StopHeterWorker();
  status.wait();
T
tangwei12 已提交
56 57 58 59 60 61
}

std::future<int32_t> HeterClient::StopHeterWorker() {
  return SendCmd(-1, PS_STOP_SERVER, {});
}

62 63 64 65 66 67
std::future<int32_t> HeterClient::StartProfiler() {
  return SendCmd(-1, PS_START_PROFILER, {});
}

std::future<int32_t> HeterClient::StopProfiler() {
  return SendCmd(-1, PS_STOP_PROFILER, {});
T
tangwei12 已提交
68 69 70 71 72 73
}

void HeterClient::CreateClient2XpuConnection() {
  brpc::ChannelOptions options;
  options.protocol = "baidu_std";
  options.connection_type = "single";
T
tangwei12 已提交
74
  options.timeout_ms = FLAGS_pserver_timeout_ms;
T
tangwei12 已提交
75 76 77 78 79

  xpu_channels_.resize(xpu_list_.size());
  for (size_t i = 0; i < xpu_list_.size(); ++i) {
    xpu_channels_[i].reset(new brpc::Channel());
    if (xpu_channels_[i]->Init(xpu_list_[i].c_str(), "", &options) != 0) {
80 81 82 83 84 85 86 87
      VLOG(0) << "HeterClient channel init fail. Try Again";
      auto ip_port = paddle::string::Split(xpu_list_[i], ':');
      std::string ip = ip_port[0];
      int port = std::stoi(ip_port[1]);
      std::string int_ip_port = GetIntTypeEndpoint(ip, port);
      if (xpu_channels_[i]->Init(int_ip_port.c_str(), "", &options) != 0) {
        LOG(ERROR) << "BrpcPsServer start failed, ip_port= " << int_ip_port;
      }
T
tangwei12 已提交
88 89
    }
  }
90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105
  previous_xpu_channels_.resize(previous_xpu_list_.size());
  for (size_t i = 0; i < previous_xpu_list_.size(); ++i) {
    previous_xpu_channels_[i].reset(new brpc::Channel());
    if (previous_xpu_channels_[i]->Init(previous_xpu_list_[i].c_str(), "",
                                        &options) != 0) {
      VLOG(0) << "HeterClient channel init fail. Try Again";
      auto ip_port = paddle::string::Split(previous_xpu_list_[i], ':');
      std::string ip = ip_port[0];
      int port = std::stoi(ip_port[1]);
      std::string int_ip_port = GetIntTypeEndpoint(ip, port);
      if (previous_xpu_channels_[i]->Init(int_ip_port.c_str(), "", &options) !=
          0) {
        LOG(ERROR) << "BrpcPsServer start failed, ip_port= " << int_ip_port;
      }
    }
  }
T
tangwei12 已提交
106 107 108
}

void HeterClient::SendAndRecvAsync(
109 110
    const platform::DeviceContext& ctx, const framework::Scope& scope,
    const std::string& message_name,
T
tangwei12 已提交
111
    const std::vector<std::string>& send_var_name,
112
    const std::vector<std::string>& recv_var_name, const std::string& mode) {
113 114 115
  platform::RecordEvent record_event("HeterClient->SendAndRecvAsync",
                                     platform::TracerEventType::Communication,
                                     1);
T
tangwei12 已提交
116 117 118 119
  const platform::DeviceContext* p_ctx = &ctx;
  const framework::Scope* p_scope = &scope;
  const std::vector<std::string> send_var_name_val = send_var_name;
  const std::vector<std::string> recv_var_name_val = recv_var_name;
120
  VLOG(3) << "BRPCClient::SendAndRecv Begin, message_name: " << message_name;
121 122
  brpc::Channel* channel = nullptr;
  distributed::MultiVarMsg request;
123
  OnHeterRpcDone* closure = new OnHeterRpcDone([](void* done) {
124 125 126 127 128 129 130 131 132 133
    auto* closure = reinterpret_cast<OnHeterRpcDone*>(done);
    PADDLE_ENFORCE_NE(
        closure->cntl.Failed(), true,
        platform::errors::Unimplemented(
            "HeterClient::SendAndRecv meets brpc error, error message is %s",
            closure->cntl.ErrorText()));
    VLOG(4) << "call heter_worker success";
  });
  closure->cntl.set_timeout_ms(FLAGS_pserver_timeout_ms);
  auto& request_io_buffer = closure->cntl.request_attachment();
T
tangwei12 已提交
134
  distributed::SerializeToMultiVarMsgAndIOBuf(
135
      message_name, send_var_name_val, recv_var_name_val, *p_ctx, p_scope,
T
tangwei12 已提交
136
      &request, &request_io_buffer);
137 138 139 140 141 142 143 144 145 146

  int micro_id = GetMicroId(ctx, p_scope);
  auto minibatch_id = micro_id / 10;
  // select channel according to micro id
  if (mode == "forward") {
    int num = minibatch_id % xpu_channels_.size();
    channel = xpu_channels_[num].get();
  } else if (mode == "backward") {
    int num = minibatch_id % previous_xpu_channels_.size();
    channel = previous_xpu_channels_[num].get();
147 148 149 150 151 152 153 154 155 156 157 158 159
  } else if (mode == "send_to_switch") {
    VLOG(4) << "calling switch service";
    // auto promise = std::make_shared<std::promise<int32_t>>();
    // closure->add_promise(promise);
    // std::future<int> fut = promise->get_future();
    // int idx = 1;  // for test
    // LOG(INFO) << "xpu_channels_ size: " << xpu_channels_.size();
    // channel = xpu_channels_[idx].get();  // 为了适配 send_and_recv op
    // ::paddle::distributed::PsService_Stub stub(channel);
    // stub.SendToSwitch(&closure->cntl, &request, &closure->response,
    // closure); fut.wait();
    VLOG(4) << "calling switch service done";
    return;
160 161 162 163
  }
  ::paddle::distributed::PsService_Stub stub(channel);
  stub.SendAndRecvVariable(&closure->cntl, &request, &closure->response,
                           closure);
T
tangwei12 已提交
164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191
}

std::future<int32_t> HeterClient::SendCmd(
    uint32_t table_id, int cmd_id, const std::vector<std::string>& params) {
  size_t request_call_num = xpu_channels_.size();
  paddle::distributed::DownpourBrpcClosure* closure =
      new paddle::distributed::DownpourBrpcClosure(
          request_call_num, [request_call_num, cmd_id](void* done) {
            int ret = 0;
            auto* closure = (paddle::distributed::DownpourBrpcClosure*)done;
            for (size_t i = 0; i < request_call_num; ++i) {
              if (closure->check_response(i, cmd_id) != 0) {
                ret = -1;
                break;
              }
            }
            closure->set_promise_value(ret);
          });
  auto promise = std::make_shared<std::promise<int32_t>>();
  closure->add_promise(promise);
  std::future<int> fut = promise->get_future();
  for (size_t i = 0; i < request_call_num; ++i) {
    closure->request(i)->set_cmd_id(cmd_id);
    closure->request(i)->set_table_id(table_id);
    closure->request(i)->set_client_id(trainer_id_);
    for (const auto& param : params) {
      closure->request(i)->add_params(param);
    }
T
tangwei12 已提交
192
    ::paddle::distributed::PsService_Stub rpc_stub(xpu_channels_[i].get());
T
tangwei12 已提交
193
    closure->cntl(i)->set_timeout_ms(
T
tangwei12 已提交
194
        FLAGS_pserver_timeout_ms);  // cmd msg don't limit timeout for save/load
T
tangwei12 已提交
195 196 197 198 199 200
    rpc_stub.service(closure->cntl(i), closure->request(i),
                     closure->response(i), closure);
  }
  return fut;
}

201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262
int HeterClient::Send(const platform::DeviceContext& ctx,
                      const framework::Scope& scope,
                      const std::string& message_name,
                      const std::vector<std::string>& send_var_names) {
  const framework::Scope* p_scope = &scope;  // 注意是 const
  OnHeterRpcDone* closure = new OnHeterRpcDone([](void* done) {
    auto* closure = reinterpret_cast<OnHeterRpcDone*>(done);
    int ret = 0;
    closure->set_promise_value(ret);
    if (closure->cntl.Failed()) {
      PADDLE_ENFORCE_NE(
          closure->cntl.Failed(), true,
          platform::errors::Unimplemented(
              "HeterClient::SendToSwitch meets brpc error, error message is %s",
              closure->cntl.ErrorText()));
    }
  });

  closure->cntl.set_timeout_ms(FLAGS_pserver_timeout_ms);
  auto& request_io_buffer = closure->cntl.request_attachment();

  distributed::MultiVarMsg request;
  // 1. set req message_name(string)
  request.set_message_name(message_name);

  // 2. set req send_var_names(<string>)
  for (auto& send_var_name : send_var_names) {
    request.add_send_var_names(send_var_name);
  }

  // 3. set req var_messages(<VarMessage>)
  for (auto& send_var_name : send_var_names) {
    auto* send_var_msg = request.add_var_messages();
    send_var_msg->set_varname(send_var_name);
    framework::Variable* var = p_scope->FindVar(send_var_name);
    butil::IOBuf temp_iobuf;
    if (var->IsType<framework::LoDTensor>()) {
      SerializeLodTensor(var, ctx, send_var_msg, &temp_iobuf);
    } else if (var->IsType<phi::SelectedRows>()) {
      SerializeSelectedRows(var, ctx, send_var_msg, &temp_iobuf);
    }
    request_io_buffer.append(temp_iobuf);
  }
  auto promise = std::make_shared<std::promise<int32_t>>();
  closure->add_promise(promise);
  std::future<int> fut = promise->get_future();
  if (send_switch_channels_.empty()) {
    LOG(ERROR) << "send_switch_channels_ is null, get xpu_channels_[0]";
    if (xpu_channels_.empty()) {
      LOG(ERROR) << "xpu_channels_ is null";
    }
    send_switch_channels_.push_back(xpu_channels_[0]);
  }
  brpc::Channel* channel = send_switch_channels_[0].get();
  // brpc::Channel* channel = xpu_channels_[0].get();
  ::paddle::distributed::PsService_Stub stub(channel);
  stub.SendToSwitch(&closure->cntl, &request, &closure->ps_response, closure);

  VLOG(4) << "waiting SendToSwitch response result......";
  fut.wait();
  VLOG(4) << "Send done";
  return 0;
T
tangwei12 已提交
263 264
}

265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305
int HeterClient::Send(int group_id, const std::vector<std::string>& var_names,
                      const std::vector<int>& vars_len, void* data_ptr,
                      int64_t data_size) {
  OnHeterRpcDone* closure = new OnHeterRpcDone([](void* done) {
    auto* closure = reinterpret_cast<OnHeterRpcDone*>(done);
    int ret = 0;
    closure->set_promise_value(ret);
    if (closure->cntl.Failed()) {
      LOG(ERROR) << "Send meets brpc error, err msg is %s"
                 << closure->cntl.ErrorText();
    }
  });
  distributed::MultiVarMsg request;
  closure->cntl.set_timeout_ms(FLAGS_pserver_timeout_ms);
  std::string message_name = "send and save";
  request.set_message_name(message_name);
  request.set_group_id(group_id);
  for (auto& send_var_name : var_names) {
    request.add_send_var_names(send_var_name);
  }
  for (auto var_len : vars_len) {
    request.add_vars_len(var_len);
  }
  auto& request_buffer = closure->cntl.request_attachment();
  request_buffer.append(reinterpret_cast<void*>(data_ptr),
                        data_size * sizeof(float));
  auto promise = std::make_shared<std::promise<int32_t>>();
  closure->add_promise(promise);
  std::future<int> fut = promise->get_future();
  if (send_switch_channels_.empty()) {
    LOG(ERROR) << "send_switch_channels_ is null, get xpu_channels_[0]";
    if (xpu_channels_.empty()) {
      LOG(ERROR) << "xpu_channels_ is null";
    }
    send_switch_channels_.push_back(xpu_channels_[0]);
  }
  brpc::Channel* channel = send_switch_channels_[0].get();
  ::paddle::distributed::PsService_Stub stub(channel);
  stub.SendToSwitch(&closure->cntl, &request, &closure->ps_response, closure);
  fut.wait();
  return 0;
T
tangwei12 已提交
306 307
}

308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405
int HeterClient::Recv(const platform::DeviceContext& ctx,
                      framework::Scope& recv_scope,  // NOLINT
                      const std::string& message_name,
                      const std::vector<std::string>& recv_var_names) {
  OnHeterRpcDone* closure = new OnHeterRpcDone([](void* done) {
    auto* closure = reinterpret_cast<OnHeterRpcDone*>(done);
    VLOG(4) << "Recv service call done";
    int ret = 0;
    closure->set_promise_value(ret);
    if (closure->cntl.Failed()) {
      VLOG(4) << "HeterClient::RecvFromSwitch meets "
                 "brpc error, error message is %s"
              << closure->cntl.ErrorText();
    }
  });

  closure->cntl.set_timeout_ms(FLAGS_pserver_timeout_ms);

  distributed::MultiVarMsg request;
  // 1. set req message_name(string)
  request.set_message_name(message_name);

  // 2. set req recv_var_names(<string>)
  for (auto& recv_var_name : recv_var_names) {
    request.add_recv_var_names(recv_var_name);
  }
  auto promise = std::make_shared<std::promise<int32_t>>();
  closure->add_promise(promise);
  std::future<int> fut = promise->get_future();
  if (recv_switch_channels_.empty()) {
    LOG(ERROR) << "peer_switch_channels_ is null, get xpu_channels_[1]";
    if (xpu_channels_.size() < 2) {
      LOG(ERROR) << "xpu_channels_ is null";
    }
    recv_switch_channels_.push_back(xpu_channels_[1]);
  }
  brpc::Channel* channel = recv_switch_channels_[0].get();
  ::paddle::distributed::PsService_Stub stub(channel);
  stub.RecvFromSwitch(&closure->cntl, &request, &closure->response, closure);
  fut.wait();
  VLOG(4) << "RecvFromSwitch done";
  // save in worker
  platform::DeviceContextPool& pool = platform::DeviceContextPool::Instance();
  platform::CPUPlace cpu_place;
  auto& cpu_dev_ctx = *pool.Get(cpu_place);
  auto& res_io_buffer = closure->cntl.response_attachment();
  VLOG(4) << "entering DeserializeFromMultiVarMsgAndIOBuf";
  distributed::DeserializeFromMultiVarMsgAndIOBuf(
      closure->response, &res_io_buffer, cpu_dev_ctx, &recv_scope);
  VLOG(4) << "Recv done";
  return 0;
}

int HeterClient::Recv(int group_id, const std::vector<std::string>& var_names,
                      void* data_ptr, int64_t data_size) {
  OnHeterRpcDone* closure = new OnHeterRpcDone([](void* done) {
    auto* closure = reinterpret_cast<OnHeterRpcDone*>(done);
    int ret = 0;
    closure->set_promise_value(ret);
    if (closure->cntl.Failed()) {
      LOG(ERROR) << "Recv meets brpc error, err msg is %s"
                 << closure->cntl.ErrorText();
    }
  });
  closure->cntl.set_timeout_ms(FLAGS_pserver_timeout_ms);

  distributed::MultiVarMsg request;
  std::string message_name = "query and recv";
  request.set_message_name(message_name);
  request.set_group_id(group_id);

  for (auto& recv_var_name : var_names) {
    request.add_recv_var_names(recv_var_name);
  }
  auto promise = std::make_shared<std::promise<int32_t>>();
  closure->add_promise(promise);
  std::future<int> fut = promise->get_future();
  if (recv_switch_channels_.empty()) {
    LOG(ERROR) << "peer_switch_channels_ is null, get xpu_channels_[1]";
    if (xpu_channels_.size() < 2) {
      LOG(ERROR) << "xpu_channels_ is null";
    }
    recv_switch_channels_.push_back(xpu_channels_[1]);
  }
  brpc::Channel* channel = recv_switch_channels_[0].get();
  ::paddle::distributed::PsService_Stub stub(channel);
  stub.RecvFromSwitch(&closure->cntl, &request, &closure->response, closure);
  fut.wait();
  VLOG(4) << "RecvFromSwitch done";
  // save in worker
  auto& res_io_buffer = closure->cntl.response_attachment();
  butil::IOBufBytesIterator io_buffer_itr(res_io_buffer);
  io_buffer_itr.copy_and_forward(reinterpret_cast<void*>(data_ptr),
                                 data_size * sizeof(float));
  VLOG(4) << "Recv done";
  return 0;
}
}  // namespace distributed
T
tangwei12 已提交
406
}  // end namespace paddle