brpc_ps_client.cc 71.3 KB
Newer Older
T
tangwei12 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
// Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
//     http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.

#include <memory>
16
#include <sstream>
T
tangwei12 已提交
17
#include <string>
T
tangwei12 已提交
18

19
#include "paddle/fluid/distributed/ps/service/brpc_ps_client.h"
T
tangwei12 已提交
20 21
#include "paddle/fluid/framework/archive.h"

Z
zhaocaibei123 已提交
22
static const int max_port = 65535;
T
tangwei12 已提交
23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54

DEFINE_int32(pserver_push_dense_merge_limit, 12,
             "limit max push_dense local merge requests");

DEFINE_int32(pserver_push_sparse_merge_limit, 12,
             "limit max push_sparse local merge requests");

DEFINE_int32(pserver_pull_dense_limit, 12,
             "limit max push_sparse local merge requests");

DEFINE_int32(pserver_async_push_dense_interval_ms, 10,
             "async push_dense to server interval");

DEFINE_int32(pserver_async_push_sparse_interval_ms, 10,
             "async push_sparse to server interval");

DEFINE_bool(pserver_scale_gradient_by_merge, false,
            "scale dense gradient when merged");

DEFINE_int32(pserver_communicate_compress_type, 0,
             "none:0 snappy:1 gzip:2 zlib:3 lz4:4");

DEFINE_int32(pserver_max_async_call_num, 13,
             "max task num in async_call_server");

DEFINE_int32(pserver_timeout_ms, 500000, "pserver request server timeout_ms");

DEFINE_int32(pserver_connect_timeout_ms, 10000,
             "pserver connect server timeout_ms");

DEFINE_int32(pserver_sparse_merge_thread, 1, "pserver sparse merge thread num");

Z
zhaocaibei123 已提交
55 56 57
DEFINE_int32(pserver_sparse_table_shard_num, 1000,
             "sparse table shard for save & load");

58 59
DEFINE_int32(heter_world_size, 100, "group size");  // 可配置

60 61 62 63 64 65 66
namespace paddle {
namespace framework {
class Scope;
class Variable;
}  // namespace framework
}  // namespace paddle

T
tangwei12 已提交
67 68 69 70 71 72 73 74 75 76 77 78 79
namespace paddle {
namespace distributed {

inline size_t get_sparse_shard(uint32_t shard_num, uint32_t server_num,
                               uint64_t key) {
  size_t remind = shard_num % server_num;
  size_t local_shard_num =
      remind == 0 ? shard_num / server_num : shard_num / server_num + 1;
  return (key % shard_num) / local_shard_num;
}

void DownpourPsClientService::service(
    ::google::protobuf::RpcController *controller,
T
tangwei12 已提交
80 81
    const PsRequestMessage *request, PsResponseMessage *response,
    ::google::protobuf::Closure *done) {
T
tangwei12 已提交
82
  brpc::ClosureGuard done_guard(done);
Z
zhaocaibei123 已提交
83
  int ret = _client->HandleClient2ClientMsg(
T
tangwei12 已提交
84 85 86 87 88 89 90 91 92 93
      request->cmd_id(), request->client_id(), request->data());
  response->set_err_code(0);
  response->set_err_msg("");
  if (ret != 0) {
    response->set_err_code(-1);
    response->set_err_msg("handle_client2client_msg failed");
  }
}

// 启动client端RpcService 用于数据互发等操作
Z
zhaocaibei123 已提交
94 95
int32_t BrpcPsClient::StartClientService() {
  if (_service.Configure(this, _client_id) != 0) {
T
tangwei12 已提交
96 97 98 99 100 101 102 103 104 105 106 107 108 109
    LOG(ERROR)
        << "service initialize failed, service_name:DownpourPsClientService";
    return -1;
  }
  _server.AddService(&_service, brpc::SERVER_DOESNT_OWN_SERVICE);
  brpc::ServerOptions options;
  int start_port = 8500;
  options.num_threads = 24;

  if (_server.Start(butil::my_ip_cstr(), brpc::PortRange(start_port, max_port),
                    &options) != 0) {
    LOG(ERROR) << "BrpcPsServer start failed";
    return -1;
  }
Z
zhaocaibei123 已提交
110
  _server_started = true;
Z
zhaocaibei123 已提交
111 112
  _env->RegistePsClient(butil::my_ip_cstr(), _server.listen_address().port,
                        _client_id);
T
tangwei12 已提交
113 114 115
  return 0;
}

Z
zhaocaibei123 已提交
116
int32_t BrpcPsClient::CreateClient2ClientConnection(
T
tangwei12 已提交
117 118 119 120 121 122 123 124
    int pserver_timeout_ms, int pserver_connect_timeout_ms, int max_retry) {
  brpc::ChannelOptions options;
  options.protocol = "baidu_std";
  options.timeout_ms = pserver_timeout_ms;
  options.connection_type = "pooled";
  options.connect_timeout_ms = pserver_connect_timeout_ms;
  options.max_retry = max_retry;

Z
zhaocaibei123 已提交
125
  std::vector<PSHost> client_list = _env->GetPsClients();
Z
zhaocaibei123 已提交
126 127 128 129
  VLOG(1) << "BrpcPsClient::create_c2c_connection client_list size: "
          << client_list.size();
  for (auto cc : client_list) {
    VLOG(1) << "BrpcPsClient::create_c2c_connection client_list: "
Z
zhaocaibei123 已提交
130
            << cc.ToString();
Z
zhaocaibei123 已提交
131
  }
T
tangwei12 已提交
132 133 134 135 136 137 138 139 140
  _client_channels.resize(client_list.size());
  std::ostringstream os;
  std::string server_ip_port;
  for (size_t i = 0; i < client_list.size(); ++i) {
    server_ip_port.assign(client_list[i].ip.c_str());
    server_ip_port.append(":");
    server_ip_port.append(std::to_string(client_list[i].port));
    _client_channels[i].reset(new brpc::Channel());
    if (_client_channels[i]->Init(server_ip_port.c_str(), "", &options) != 0) {
141 142 143 144 145 146 147 148 149
      VLOG(0) << "BrpcPSClient connect to Client:" << server_ip_port
              << " Failed! Try again.";
      std::string int_ip_port =
          GetIntTypeEndpoint(client_list[i].ip, client_list[i].port);
      if (_client_channels[i]->Init(int_ip_port.c_str(), "", &options) != 0) {
        LOG(ERROR) << "BrpcPSClient connect to Client:" << int_ip_port
                   << " Failed!";
        return -1;
      }
T
tangwei12 已提交
150 151 152 153 154 155 156
    }
    os << server_ip_port << ",";
  }
  LOG(INFO) << "Client connect success:" << os.str();
  return 0;
}

Z
zhaocaibei123 已提交
157
int32_t BrpcPsClient::Initialize() {
T
tangwei12 已提交
158 159 160 161 162 163 164 165 166 167 168 169 170 171
  _async_call_num = 0;

  brpc::ChannelOptions options;
  options.protocol = "baidu_std";
  options.timeout_ms = FLAGS_pserver_timeout_ms;
  options.connection_type = "pooled";
  options.connect_timeout_ms = FLAGS_pserver_connect_timeout_ms;
  options.max_retry = 3;

  std::ostringstream os;
  std::string server_ip_port;
  std::string client_ip(butil::my_ip_cstr());

  // 获取server列表,并连接
Z
zhaocaibei123 已提交
172
  std::vector<PSHost> server_list = _env->GetPsServers();
T
tangwei12 已提交
173 174 175 176 177 178 179 180 181
  _server_channels.resize(server_list.size());
  for (size_t i = 0; i < server_list.size(); ++i) {
    server_ip_port.assign(server_list[i].ip.c_str());
    server_ip_port.append(":");
    server_ip_port.append(std::to_string(server_list[i].port));
    for (size_t j = 0; j < _server_channels[i].size(); ++j) {
      _server_channels[i][j].reset(new brpc::Channel());
      if (_server_channels[i][j]->Init(server_ip_port.c_str(), "", &options) !=
          0) {
182 183 184 185 186 187 188 189 190 191
        VLOG(0) << "BrpcPSclient connect to Server:" << server_ip_port
                << " Failed! Try again.";
        std::string int_ip_port =
            GetIntTypeEndpoint(server_list[i].ip, server_list[i].port);
        if (_server_channels[i][j]->Init(int_ip_port.c_str(), "", &options) !=
            0) {
          LOG(ERROR) << "BrpcPSclient connect to Server:" << int_ip_port
                     << " Failed!";
          return -1;
        }
T
tangwei12 已提交
192 193 194 195 196
      }
    }
    os << server_ip_port << ",";
  }
  // 启动client探听接口, 并相互建立连接
Z
zhaocaibei123 已提交
197
  StartClientService();
T
tangwei12 已提交
198

Z
zhaocaibei123 已提交
199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214
  // 异步push 请求队列初始化
  const auto &worker_param = _config.worker_param().downpour_worker_param();
  for (size_t i = 0; i < worker_param.downpour_table_param_size(); ++i) {
    auto type = worker_param.downpour_table_param(i).type();
    auto table_id = worker_param.downpour_table_param(i).table_id();
    if (type == PS_DENSE_TABLE) {
      _push_dense_task_queue_map[table_id] =
          paddle::framework::MakeChannel<DenseAsyncTask *>();
    }
    if (type == PS_SPARSE_TABLE) {
      _push_sparse_task_queue_map[table_id] =
          paddle::framework::MakeChannel<SparseAsyncTask *>();
      _push_sparse_merge_count_map[table_id] = 0;
    }
  }

215 216 217
  auto &profiler = CostProfiler::instance();
  profiler.register_profiler("pserver_client_pull_dense");
  profiler.register_profiler("pserver_client_pull_sparse");
Z
zhaocaibei123 已提交
218
  profiler.register_profiler("pserver_client_pull_sparse_param");
219 220 221 222 223 224 225 226 227 228 229 230 231 232
  profiler.register_profiler("pserver_client_pull_sparse_local");
  profiler.register_profiler("pserver_client_push_sparse");
  profiler.register_profiler("pserver_client_push_sparse_parse");
  profiler.register_profiler("client_push_sparse_put");
  profiler.register_profiler("pserver_client_push_sparse");
  profiler.register_profiler("pserver_client_push_sparse_merge");
  profiler.register_profiler("pserver_client_push_sparse_rpc");
  profiler.register_profiler("pserver_client_push_dense");
  profiler.register_profiler("pserver_client_push_dense_parse");
  profiler.register_profiler("push_dense_put");
  profiler.register_profiler("pserver_client_push_dense_merge");
  profiler.register_profiler("pserver_client_push_dense_rpc");
  profiler.register_profiler("pserver_client_push_dense_send");

T
tangwei12 已提交
233 234
  _running = true;
  _flushing = false;
Z
zhaocaibei123 已提交
235 236
  // 启动异步push线程
  _async_push_sparse_thread =
Z
zhaocaibei123 已提交
237
      std::thread(std::bind(&BrpcPsClient::PushSparseTaskConsume, this));
Z
zhaocaibei123 已提交
238 239
  // _async_push_sparse_thread.detach();
  _async_push_dense_thread =
Z
zhaocaibei123 已提交
240
      std::thread(std::bind(&BrpcPsClient::PushDenseTaskConsume, this));
Z
zhaocaibei123 已提交
241 242
  // for debug
  // _print_thread =
Z
zhaocaibei123 已提交
243
  //    std::thread(std::bind(&BrpcPsClient::PrintQueueSizeThread, this));
Z
zhaocaibei123 已提交
244

T
tangwei12 已提交
245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288
  return 0;
}

int DownpourBrpcClosure::check_response(size_t request_idx, int cmd_id) {
  if (_cntls[request_idx]->Failed()) {
    LOG(ERROR) << "resquest cmd_id:" << cmd_id << " failed, "
                                                  "err:"
               << _cntls[request_idx]->ErrorText();
    return -1;
  }
  if (_responses[request_idx].err_code() != 0) {
    LOG(ERROR) << "response ret bad, server_idx:" << request_idx
               << "cmd_id:" << cmd_id
               << " err_code:" << _responses[request_idx].err_code()
               << " err_msg:" << _responses[request_idx].err_msg();
    return -1;
  }
  return 0;
}

int DownpourBrpcClosure::check_save_response(size_t request_idx, int cmd_id) {
  uint32_t feasign_size = 0;
  if (_cntls[request_idx]->Failed()) {
    LOG(ERROR) << "resquest cmd_id:" << cmd_id << " failed, "
                                                  "err:"
               << _cntls[request_idx]->ErrorText();
    return -1;
  }
  feasign_size = _responses[request_idx].err_code();
  if (feasign_size < 0) {
    LOG(ERROR) << "response ret bad, server_idx:" << request_idx
               << "cmd_id:" << cmd_id
               << " err_code:" << _responses[request_idx].err_code()
               << " err_msg:" << _responses[request_idx].err_msg();
    return -1;
  }
  return feasign_size;
}

std::string DownpourBrpcClosure::get_response(size_t request_idx, int cmd_id) {
  std::string data = _responses[request_idx].data();
  return data;
}

Z
zhaocaibei123 已提交
289
std::future<int32_t> BrpcPsClient::PrintTableStat(uint32_t table_id) {
T
tangwei12 已提交
290 291 292 293 294 295 296
  size_t request_call_num = _server_channels.size();
  DownpourBrpcClosure *closure = new DownpourBrpcClosure(
      request_call_num, [request_call_num, table_id](void *done) {
        int ret = 0;
        uint64_t feasign_size = 0;
        uint64_t mf_size = 0;
        paddle::framework::BinaryArchive ar;
Z
zhaocaibei123 已提交
297
        auto *closure = reinterpret_cast<DownpourBrpcClosure *>(done);
T
tangwei12 已提交
298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321
        for (size_t i = 0; i < request_call_num; ++i) {
          if (closure->check_response(i, PS_PRINT_TABLE_STAT) != 0) {
            ret = -1;
            break;
          }
          std::string resp = closure->get_response(i, PS_PRINT_TABLE_STAT);
          ar.SetReadBuffer(const_cast<char *>(resp.c_str()), resp.length(),
                           nullptr);

          feasign_size += ar.Get<uint64_t>();
          mf_size += ar.Get<uint64_t>();
        }
        closure->set_promise_value(ret);
        std::cout << "table id: " << table_id
                  << ", feasign size: " << feasign_size
                  << ", mf size: " << mf_size << std::endl;
      });
  auto promise = std::make_shared<std::promise<int32_t>>();
  closure->add_promise(promise);
  std::future<int> fut = promise->get_future();
  for (size_t i = 0; i < request_call_num; ++i) {
    closure->request(i)->set_cmd_id(PS_PRINT_TABLE_STAT);
    closure->request(i)->set_table_id(table_id);
    closure->request(i)->set_client_id(_client_id);
Z
zhaocaibei123 已提交
322
    PsService_Stub rpc_stub(GetCmdChannel(i));
T
tangwei12 已提交
323 324 325 326 327 328 329
    closure->cntl(i)->set_timeout_ms(
        10800000);  // cmd msg don't limit timeout for save/load
    rpc_stub.service(closure->cntl(i), closure->request(i),
                     closure->response(i), closure);
  }
  return fut;
}
Z
zhaocaibei123 已提交
330
std::future<int32_t> BrpcPsClient::SendCmd(
T
tangwei12 已提交
331 332 333 334 335
    uint32_t table_id, int cmd_id, const std::vector<std::string> &params) {
  size_t request_call_num = _server_channels.size();
  DownpourBrpcClosure *closure = new DownpourBrpcClosure(
      request_call_num, [request_call_num, cmd_id](void *done) {
        int ret = 0;
Z
zhaocaibei123 已提交
336
        auto *closure = reinterpret_cast<DownpourBrpcClosure *>(done);
T
tangwei12 已提交
337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354
        for (size_t i = 0; i < request_call_num; ++i) {
          if (closure->check_response(i, cmd_id) != 0) {
            ret = -1;
            break;
          }
        }
        closure->set_promise_value(ret);
      });
  auto promise = std::make_shared<std::promise<int32_t>>();
  closure->add_promise(promise);
  std::future<int> fut = promise->get_future();
  for (size_t i = 0; i < request_call_num; ++i) {
    closure->request(i)->set_cmd_id(cmd_id);
    closure->request(i)->set_table_id(table_id);
    closure->request(i)->set_client_id(_client_id);
    for (const auto &param : params) {
      closure->request(i)->add_params(param);
    }
Z
zhaocaibei123 已提交
355
    PsService_Stub rpc_stub(GetCmdChannel(i));
T
tangwei12 已提交
356
    closure->cntl(i)->set_timeout_ms(
Z
zhaocaibei123 已提交
357
        10800000 * 2);  // cmd msg don't limit timeout for save/load
T
tangwei12 已提交
358 359 360 361 362 363
    rpc_stub.service(closure->cntl(i), closure->request(i),
                     closure->response(i), closure);
  }
  return fut;
}

Z
zhaocaibei123 已提交
364
std::future<int32_t> BrpcPsClient::SendSaveCmd(
T
tangwei12 已提交
365 366 367 368 369 370
    uint32_t table_id, int cmd_id, const std::vector<std::string> &params) {
  size_t request_call_num = _server_channels.size();
  DownpourBrpcClosure *closure = new DownpourBrpcClosure(
      request_call_num, [request_call_num, cmd_id](void *done) {
        int ret = 0;
        uint32_t feasign_size = 0;
Z
zhaocaibei123 已提交
371
        auto *closure = reinterpret_cast<DownpourBrpcClosure *>(done);
T
tangwei12 已提交
372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394
        for (size_t i = 0; i < request_call_num; ++i) {
          if (closure->check_save_response(i, cmd_id) < 0) {
            ret = -1;
            break;
          }
          feasign_size += closure->check_save_response(i, cmd_id);
        }
        if (ret == 0) {
          closure->set_promise_value(feasign_size);
        } else {
          closure->set_promise_value(ret);
        }
      });
  auto promise = std::make_shared<std::promise<int32_t>>();
  closure->add_promise(promise);
  std::future<int> fut = promise->get_future();
  for (size_t i = 0; i < request_call_num; ++i) {
    closure->request(i)->set_cmd_id(cmd_id);
    closure->request(i)->set_table_id(table_id);
    closure->request(i)->set_client_id(_client_id);
    for (const auto &param : params) {
      closure->request(i)->add_params(param);
    }
Z
zhaocaibei123 已提交
395
    PsService_Stub rpc_stub(GetCmdChannel(i));
T
tangwei12 已提交
396 397 398 399 400 401 402 403
    closure->cntl(i)->set_timeout_ms(
        10800000);  // cmd msg don't limit timeout for save/load
    rpc_stub.service(closure->cntl(i), closure->request(i),
                     closure->response(i), closure);
  }
  return fut;
}

Z
zhaocaibei123 已提交
404
std::future<int32_t> BrpcPsClient::Shrink(uint32_t table_id,
405
                                          const std::string threshold) {
Z
zhaocaibei123 已提交
406
  return SendCmd(table_id, PS_SHRINK_TABLE, {threshold});
T
tangwei12 已提交
407 408
}

Z
zhaocaibei123 已提交
409
std::future<int32_t> BrpcPsClient::Load(const std::string &epoch,
T
tangwei12 已提交
410
                                        const std::string &mode) {
Z
zhaocaibei123 已提交
411
  return SendCmd(-1, PS_LOAD_ALL_TABLE, {epoch, mode});
T
tangwei12 已提交
412
}
Z
zhaocaibei123 已提交
413
std::future<int32_t> BrpcPsClient::Load(uint32_t table_id,
T
tangwei12 已提交
414 415
                                        const std::string &epoch,
                                        const std::string &mode) {
Z
zhaocaibei123 已提交
416
  return SendCmd(table_id, PS_LOAD_ONE_TABLE, {epoch, mode});
T
tangwei12 已提交
417 418
}

Z
zhaocaibei123 已提交
419
std::future<int32_t> BrpcPsClient::Save(const std::string &epoch,
T
tangwei12 已提交
420
                                        const std::string &mode) {
Z
zhaocaibei123 已提交
421
  VLOG(1) << "BrpcPsClient::save path " << epoch;
Z
zhaocaibei123 已提交
422
  return SendSaveCmd(-1, PS_SAVE_ALL_TABLE, {epoch, mode});
T
tangwei12 已提交
423
}
Z
zhaocaibei123 已提交
424
std::future<int32_t> BrpcPsClient::Save(uint32_t table_id,
T
tangwei12 已提交
425 426
                                        const std::string &epoch,
                                        const std::string &mode) {
Z
zhaocaibei123 已提交
427 428
  VLOG(1) << "BrpcPsClient::save one table path " << epoch << " table_id "
          << table_id;
Z
zhaocaibei123 已提交
429
  return SendSaveCmd(table_id, PS_SAVE_ONE_TABLE, {epoch, mode});
T
tangwei12 已提交
430 431
}

Z
zhaocaibei123 已提交
432 433
std::future<int32_t> BrpcPsClient::Clear() {
  return SendCmd(-1, PS_CLEAR_ALL_TABLE, {});
T
tangwei12 已提交
434
}
Z
zhaocaibei123 已提交
435 436
std::future<int32_t> BrpcPsClient::Clear(uint32_t table_id) {
  return SendCmd(table_id, PS_CLEAR_ONE_TABLE, {});
T
tangwei12 已提交
437 438
}

Z
zhaocaibei123 已提交
439
std::future<int32_t> BrpcPsClient::Flush() {
Z
zhaocaibei123 已提交
440
  VLOG(0) << "BrpcPsClient::flush begin";
T
tangwei12 已提交
441 442 443 444 445 446 447
  _flushing = true;
  std::promise<int> promise;
  std::future<int32_t> fut = promise.get_future();
  do {
    VLOG(3) << "wait _async_call_num:" << _async_call_num;
    usleep(100000);  // sleep 100ms wait async end
  } while (_async_call_num > 0);
Z
zhaocaibei123 已提交
448
  VLOG(1) << "flush _async_call_num = 0";
T
tangwei12 已提交
449 450
  promise.set_value(0);
  _flushing = false;
Z
zhaocaibei123 已提交
451
  VLOG(0) << "BrpcPsClient::flush done";
Z
zhaocaibei123 已提交
452
  PrintQueueSize();
T
tangwei12 已提交
453 454 455
  return fut;
}

Z
zhaocaibei123 已提交
456
void BrpcPsClient::PrintQueueSize() {
Z
zhaocaibei123 已提交
457 458 459
  for (auto &push_sparse_task_itr : _push_sparse_task_queue_map) {
    auto table_id = push_sparse_task_itr.first;
    auto queue_size = push_sparse_task_itr.second->Size();
Z
zhaocaibei123 已提交
460
    VLOG(0) << "BrpcPsClient::PrintQueueSize: table " << table_id
Z
zhaocaibei123 已提交
461 462 463 464 465 466
            << " size: " << queue_size;
  }

  for (auto &task_queue_itr : _push_dense_task_queue_map) {
    auto table_id = task_queue_itr.first;
    auto queue_size = task_queue_itr.second->Size();
Z
zhaocaibei123 已提交
467
    VLOG(0) << "BrpcPsClient::PrintQueueSize: table " << table_id
Z
zhaocaibei123 已提交
468 469 470 471
            << " size: " << queue_size;
  }
}

Z
zhaocaibei123 已提交
472
void BrpcPsClient::PrintQueueSizeThread() {
Z
zhaocaibei123 已提交
473 474
  while (_running) {
    usleep(1000000 * 60 * 2);
Z
zhaocaibei123 已提交
475
    PrintQueueSize();
Z
zhaocaibei123 已提交
476 477 478
  }
}

Z
zhaocaibei123 已提交
479 480 481
void BrpcPsClient::FinalizeWorker() {
  Flush();
  VLOG(0) << "BrpcPsClient::FinalizeWorker begin join thread";
T
tangwei12 已提交
482
  _running = false;
Z
zhaocaibei123 已提交
483 484 485
  _async_push_dense_thread.join();
  _async_push_sparse_thread.join();
  // _print_thread.join();
Z
zhaocaibei123 已提交
486
  VLOG(0) << "BrpcPsClient::FinalizeWorker begin join server";
T
tangwei12 已提交
487 488
  _server.Stop(1000);
  _server.Join();
Z
zhaocaibei123 已提交
489
  _server_started = false;
Z
zhaocaibei123 已提交
490
  VLOG(0) << "BrpcPsClient::FinalizeWorker done";
T
tangwei12 已提交
491 492
}

Z
zhaocaibei123 已提交
493 494
std::future<int32_t> BrpcPsClient::StopServer() {
  return SendCmd(-1, PS_STOP_SERVER, {});
T
tangwei12 已提交
495 496
}

Z
zhaocaibei123 已提交
497 498
std::future<int32_t> BrpcPsClient::StartProfiler() {
  return SendCmd(-1, PS_START_PROFILER, {});
T
tangwei12 已提交
499 500
}

Z
zhaocaibei123 已提交
501 502
std::future<int32_t> BrpcPsClient::StopProfiler() {
  return SendCmd(-1, PS_STOP_PROFILER, {});
T
tangwei12 已提交
503 504
}

Z
zhaocaibei123 已提交
505
std::future<int32_t> BrpcPsClient::Barrier(size_t table_id,
T
tangwei12 已提交
506
                                           uint32_t barrier_type) {
Z
zhaocaibei123 已提交
507
  return SendCmd(table_id, PS_BARRIER, {std::to_string(barrier_type)});
Y
yaoxuefeng 已提交
508 509
}

Z
zhaocaibei123 已提交
510 511 512 513 514
std::future<int32_t> BrpcPsClient::PullGeoParam(size_t table_id,
                                                std::vector<float> *values,
                                                std::vector<uint64_t> *keys,
                                                int pserver_idx) {
  auto *accessor = GetTableAccessor(table_id);
T
tangwei12 已提交
515 516 517
  DownpourBrpcClosure *closure =
      new DownpourBrpcClosure(1, [keys, values, accessor](void *done) {
        int ret = 0;
Z
zhaocaibei123 已提交
518
        auto *closure = reinterpret_cast<DownpourBrpcClosure *>(done);
T
tangwei12 已提交
519 520 521 522 523 524
        uint32_t shard_nums;
        if (closure->check_response(0, PS_PULL_GEO_PARAM) != 0) {
          ret = -1;
        }
        auto &res_io_buffer = closure->cntl(0)->response_attachment();
        butil::IOBufBytesIterator io_buffer_itr(res_io_buffer);
Z
zhaocaibei123 已提交
525 526
        io_buffer_itr.copy_and_forward(reinterpret_cast<void *>(&shard_nums),
                                       sizeof(uint32_t));
T
tangwei12 已提交
527
        keys->resize(shard_nums);
528
        values->resize(shard_nums * accessor->GetTableInfo(UPDATE_DIM));
Z
zhaocaibei123 已提交
529
        io_buffer_itr.copy_and_forward((void *)(keys->data()),  // NOLINT
T
tangwei12 已提交
530
                                       sizeof(uint64_t) * shard_nums);
531 532 533
        io_buffer_itr.copy_and_forward(
            (void *)(values->data()),  // NOLINT
            shard_nums * accessor->GetTableInfo(UPDATE_SIZE));
T
tangwei12 已提交
534 535 536 537 538 539 540 541 542
        closure->set_promise_value(ret);
      });
  auto promise = std::make_shared<std::promise<int32_t>>();
  closure->add_promise(promise);
  std::future<int> fut = promise->get_future();

  closure->request(0)->set_cmd_id(PS_PULL_GEO_PARAM);
  closure->request(0)->set_table_id(table_id);
  closure->request(0)->set_client_id(_client_id);
Z
zhaocaibei123 已提交
543
  PsService_Stub rpc_stub(GetCmdChannel(pserver_idx));
T
tangwei12 已提交
544 545 546 547 548 549
  closure->cntl(0)->set_log_id(butil::gettimeofday_ms());
  rpc_stub.service(closure->cntl(0), closure->request(0), closure->response(0),
                   closure);
  return fut;
}

Z
zhaocaibei123 已提交
550
// for GEO
Z
zhaocaibei123 已提交
551 552 553 554 555
std::future<int32_t> BrpcPsClient::PushSparseParam(size_t table_id,
                                                   const uint64_t *keys,
                                                   const float **update_values,
                                                   size_t num, void *done) {
  auto *accessor = GetTableAccessor(table_id);
T
tangwei12 已提交
556 557 558 559 560 561 562 563 564 565
  // 发送RPC请求
  DownpourBrpcClosure *closure = reinterpret_cast<DownpourBrpcClosure *>(done);
  auto promise = std::make_shared<std::promise<int32_t>>();
  closure->add_promise(promise);
  std::future<int> fut = promise->get_future();
  size_t request_call_num = _server_channels.size();
  std::vector<std::vector<uint64_t>> ids;
  std::vector<std::vector<const float *>> value_ptrs;
  ids.resize(request_call_num);
  value_ptrs.resize(request_call_num);
Z
zhaocaibei123 已提交
566

T
tangwei12 已提交
567
  for (size_t i = 0; i < num; ++i) {
Z
zhaocaibei123 已提交
568
    size_t pserver_idx = keys[i] % request_call_num;
T
tangwei12 已提交
569 570 571 572 573 574 575
    ids[pserver_idx].push_back(keys[i]);
    value_ptrs[pserver_idx].push_back(update_values[i]);
  }
  for (size_t shard_idx = 0; shard_idx < request_call_num; ++shard_idx) {
    auto kvs = ids[shard_idx];
    auto value_ptr = value_ptrs[shard_idx];
    size_t kv_size = kvs.size();
576
    uint32_t value_size = accessor->GetTableInfo(UPDATE_SIZE);
T
tangwei12 已提交
577 578 579 580 581
    // 发送RPC请求
    auto *push_request = closure->request(shard_idx);
    push_request->set_cmd_id(PS_PUSH_SPARSE_PARAM);
    push_request->set_table_id(table_id);
    push_request->set_client_id(_client_id);
Z
zhaocaibei123 已提交
582
    push_request->add_params((char *)&kv_size, sizeof(uint32_t));  // NOLINT
T
tangwei12 已提交
583
    auto *push_data = push_request->mutable_data();
584 585
    push_data->resize(kv_size *
                      (sizeof(uint64_t) + accessor->GetTableInfo(UPDATE_SIZE)));
T
tangwei12 已提交
586 587 588 589
    char *push_data_ptr = const_cast<char *>(push_data->data());
    memcpy(push_data_ptr, kvs.data(), kv_size * sizeof(uint64_t));
    push_data_ptr += kv_size * sizeof(uint64_t);
    for (int i = 0; i < kv_size; ++i) {
590 591
      memcpy(push_data_ptr, value_ptr[i], accessor->GetTableInfo(UPDATE_SIZE));
      push_data_ptr += accessor->GetTableInfo(UPDATE_SIZE);
T
tangwei12 已提交
592
    }
Z
zhaocaibei123 已提交
593
    PsService_Stub rpc_stub(GetSparseChannel(shard_idx));
T
tangwei12 已提交
594 595 596 597 598 599 600 601
    closure->cntl(shard_idx)->set_request_compress_type(
        (brpc::CompressType)FLAGS_pserver_communicate_compress_type);
    rpc_stub.service(closure->cntl(shard_idx), closure->request(shard_idx),
                     closure->response(shard_idx), closure);
  }
  return fut;
}

Z
zhaocaibei123 已提交
602 603
std::future<int32_t> BrpcPsClient::PullDense(Region *regions, size_t region_num,
                                             size_t table_id) {
604
  auto timer = std::make_shared<CostTimer>("pserver_client_pull_dense");
Z
zhaocaibei123 已提交
605
  auto *accessor = GetTableAccessor(table_id);
606 607
  auto fea_dim = accessor->GetTableInfo(FEA_DIM);
  auto select_size = accessor->GetTableInfo(SELECT_SIZE);
T
tangwei12 已提交
608 609
  size_t request_call_num = _server_channels.size();
  uint32_t num_per_shard =
Z
zhaocaibei123 已提交
610
      DenseDimPerShard(accessor->GetTableInfo(FEA_DIM), request_call_num);
T
tangwei12 已提交
611 612 613 614 615 616 617
  // callback 将各shard结果,顺序填入region
  DownpourBrpcClosure *closure = new DownpourBrpcClosure(
      request_call_num, [request_call_num, num_per_shard, regions, region_num,
                         accessor](void *done) {
        int ret = 0;
        size_t region_idx = 0;       // 当前填充的region偏移
        size_t region_data_idx = 0;  // 当前填充的region内data偏移
Z
zhaocaibei123 已提交
618
        auto *closure = reinterpret_cast<DownpourBrpcClosure *>(done);
619 620
        size_t shard_data_size =
            num_per_shard * accessor->GetTableInfo(SELECT_SIZE);
T
tangwei12 已提交
621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641
        for (size_t i = 0; i < request_call_num; ++i) {
          if (closure->check_response(i, PS_PULL_DENSE_TABLE) != 0) {
            ret = -1;
            break;
          }
          auto &res_io_buffer = closure->cntl(i)->response_attachment();

          butil::IOBufBytesIterator io_buffer_itr(res_io_buffer);
          size_t shard_buffer_remain = res_io_buffer.size();
          if (shard_buffer_remain != shard_data_size) {
            LOG(ERROR) << "expect res_size:" << shard_data_size
                       << ", but size:" << shard_buffer_remain
                       << ", ignore this response";
            ret = -1;
            break;
          }
          while (shard_buffer_remain > 0 && region_idx < region_num) {
            auto &region = regions[region_idx];
            if (region.size - region_data_idx >= shard_buffer_remain) {
              // region待填充空间 >= 分片buffer数据, 直接拷贝置入
              io_buffer_itr.copy_and_forward(
Z
zhaocaibei123 已提交
642 643
                  reinterpret_cast<void *>(region.data + region_data_idx),
                  shard_buffer_remain);
T
tangwei12 已提交
644 645 646 647 648 649 650 651 652
              region_data_idx += shard_buffer_remain;
              shard_buffer_remain = 0;
            } else if (region.size - region_data_idx == 0) {
              // region填满,切换到下一个region
              ++region_idx;
              region_data_idx = 0;
            } else {
              // region不足以容纳所有数据,则能放多少 拷贝多少
              io_buffer_itr.copy_and_forward(
Z
zhaocaibei123 已提交
653
                  reinterpret_cast<void *>(region.data + region_data_idx),
T
tangwei12 已提交
654 655 656 657 658 659 660 661 662
                  region.size - region_data_idx);
              shard_buffer_remain -= (region.size - region_data_idx);
              ++region_idx;
              region_data_idx = 0;
            }
          }
        }
        closure->set_promise_value(ret);
      });
663
  closure->add_timer(timer);
T
tangwei12 已提交
664 665 666 667 668 669 670
  auto promise = std::make_shared<std::promise<int32_t>>();
  closure->add_promise(promise);
  std::future<int> fut = promise->get_future();
  for (size_t i = 0; i < request_call_num; ++i) {
    closure->request(i)->set_cmd_id(PS_PULL_DENSE_TABLE);
    closure->request(i)->set_table_id(table_id);
    closure->request(i)->set_client_id(_client_id);
Z
zhaocaibei123 已提交
671
    closure->request(i)->add_params((char *)&num_per_shard,  // NOLINT
T
tangwei12 已提交
672
                                    sizeof(num_per_shard));
Z
zhaocaibei123 已提交
673
    PsService_Stub rpc_stub(GetDenseChannel(i));
T
tangwei12 已提交
674 675 676 677 678 679
    rpc_stub.service(closure->cntl(i), closure->request(i),
                     closure->response(i), closure);
  }
  return fut;
}

Z
zhaocaibei123 已提交
680 681 682 683
std::future<int32_t> BrpcPsClient::PushDenseParam(const Region *regions,
                                                  size_t region_num,
                                                  size_t table_id) {
  auto *accessor = GetTableAccessor(table_id);
T
tangwei12 已提交
684 685 686 687
  size_t request_call_num = _server_channels.size();
  // 1.拆分Region数据到shard中,后续多shard并行拷贝数据
  std::vector<std::vector<Region>> regions_partition(request_call_num);
  uint32_t num_per_shard =
Z
zhaocaibei123 已提交
688
      DenseDimPerShard(accessor->GetTableInfo(FEA_DIM), request_call_num);
689
  size_t shard_data_size = num_per_shard * accessor->GetTableInfo(UPDATE_SIZE);
T
tangwei12 已提交
690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714
  size_t current_region_idx = 0;
  size_t current_region_data_idx = 0;
  for (size_t i = 0; i < request_call_num; ++i) {
    size_t shard_data_remain_size = shard_data_size;
    while (shard_data_remain_size > 0 && current_region_idx < region_num) {
      const auto &region = regions[current_region_idx];
      size_t region_remain_size = region.size - current_region_data_idx;
      if (shard_data_remain_size >= region_remain_size) {
        regions_partition[i].push_back(
            Region(region.data + current_region_data_idx, region_remain_size));
        ++current_region_idx;
        current_region_data_idx = 0;
        shard_data_remain_size -= region_remain_size;
      } else {
        regions_partition[i].push_back(Region(
            region.data + current_region_data_idx, shard_data_remain_size));
        current_region_data_idx += shard_data_remain_size;
        shard_data_remain_size = 0;
      }
    }
  }

  DownpourBrpcClosure *closure =
      new DownpourBrpcClosure(request_call_num, [request_call_num](void *done) {
        int ret = 0;
Z
zhaocaibei123 已提交
715
        auto *closure = reinterpret_cast<DownpourBrpcClosure *>(done);
T
tangwei12 已提交
716 717 718 719 720 721 722 723 724 725 726 727
        for (size_t i = 0; i < request_call_num; ++i) {
          if (closure->check_response(i, PS_PUSH_DENSE_PARAM) != 0) {
            ret = -1;
            break;
          }
        }
        closure->set_promise_value(ret);
      });
  auto promise = std::make_shared<std::promise<int32_t>>();
  closure->add_promise(promise);
  std::future<int> fut = promise->get_future();
  static const int REGION_ASSIGN_BUFFER_SIZE = 1024 * 10;
Z
zhaocaibei123 已提交
728 729
  static char region_assign_buffer[REGION_ASSIGN_BUFFER_SIZE];  // 用于数据补齐
  // 开始多shard并行拷贝&请求
T
tangwei12 已提交
730 731 732 733 734
  for (size_t i = 0; i < request_call_num; ++i) {
    closure->request(i)->set_cmd_id(PS_PUSH_DENSE_PARAM);
    closure->request(i)->set_table_id(table_id);
    closure->request(i)->set_client_id(_client_id);
    auto &request_buffer = closure->cntl(i)->request_attachment();
Z
zhaocaibei123 已提交
735 736
    request_buffer.append(reinterpret_cast<void *>(&num_per_shard),
                          sizeof(uint32_t));
T
tangwei12 已提交
737 738 739 740
    auto &region_list = regions_partition[i];
    size_t fill_remain_size = shard_data_size;
    for (auto &region : region_list) {
      fill_remain_size -= region.size;
Z
zhaocaibei123 已提交
741
      request_buffer.append(reinterpret_cast<void *>(region.data), region.size);
T
tangwei12 已提交
742
    }
Z
zhaocaibei123 已提交
743
    // 保证各分片数据对齐
T
tangwei12 已提交
744 745 746 747
    while (fill_remain_size > 0) {
      size_t fill_num = fill_remain_size > REGION_ASSIGN_BUFFER_SIZE
                            ? REGION_ASSIGN_BUFFER_SIZE
                            : fill_remain_size;
Z
zhaocaibei123 已提交
748 749
      request_buffer.append(reinterpret_cast<void *>(region_assign_buffer),
                            fill_num);
T
tangwei12 已提交
750 751
      fill_remain_size -= fill_num;
    }
Z
zhaocaibei123 已提交
752
    PsService_Stub rpc_stub(GetDenseChannel(i));
T
tangwei12 已提交
753 754 755 756 757 758
    rpc_stub.service(closure->cntl(i), closure->request(i),
                     closure->response(i), closure);
  }
  return fut;
}

Z
zhaocaibei123 已提交
759
std::future<int32_t> BrpcPsClient::PushSparseRawGradient(
T
tangwei12 已提交
760 761
    size_t table_id, const uint64_t *keys, const float **update_values,
    size_t num, void *done) {
Z
zhaocaibei123 已提交
762
  auto *accessor = GetTableAccessor(table_id);
Z
zhaocaibei123 已提交
763
  // 发送RPC请求
T
tangwei12 已提交
764 765 766 767 768 769 770 771 772 773 774
  DownpourBrpcClosure *closure = reinterpret_cast<DownpourBrpcClosure *>(done);
  auto promise = std::make_shared<std::promise<int32_t>>();
  closure->add_promise(promise);
  std::future<int> fut = promise->get_future();

  size_t request_call_num = _server_channels.size();
  std::vector<std::vector<uint64_t>> ids;
  std::vector<std::vector<const float *>> value_ptrs;
  ids.resize(request_call_num);
  value_ptrs.resize(request_call_num);

Z
zhaocaibei123 已提交
775 776 777 778 779 780 781 782 783 784
  const auto &server_param = _config.server_param().downpour_server_param();
  uint64_t shard_num = FLAGS_pserver_sparse_table_shard_num;
  for (int i = 0; i < server_param.downpour_table_param_size(); ++i) {
    const auto &table_param = server_param.downpour_table_param(i);
    if (table_param.table_id() == table_id) {
      shard_num = table_param.shard_num();
      break;
    }
  }

T
tangwei12 已提交
785
  for (size_t i = 0; i < num; ++i) {
Z
zhaocaibei123 已提交
786
    size_t pserver_idx = get_sparse_shard(shard_num, request_call_num, keys[i]);
T
tangwei12 已提交
787 788 789 790 791 792 793 794 795
    ids[pserver_idx].push_back(keys[i]);
    value_ptrs[pserver_idx].push_back(update_values[i]);
  }

  for (size_t shard_idx = 0; shard_idx < request_call_num; ++shard_idx) {
    auto kvs = ids[shard_idx];
    auto value_ptr = value_ptrs[shard_idx];

    size_t kv_size = kvs.size();
796
    uint32_t value_size = accessor->GetTableInfo(UPDATE_SIZE);
T
tangwei12 已提交
797 798 799 800 801 802

    // 发送RPC请求
    auto *push_request = closure->request(shard_idx);
    push_request->set_cmd_id(PS_PUSH_SPARSE_TABLE);
    push_request->set_table_id(table_id);
    push_request->set_client_id(_client_id);
Z
zhaocaibei123 已提交
803
    push_request->add_params((char *)&kv_size, sizeof(uint32_t));  // NOLINT
T
tangwei12 已提交
804
    auto *push_data = push_request->mutable_data();
805 806
    push_data->resize(kv_size *
                      (sizeof(uint64_t) + accessor->GetTableInfo(UPDATE_SIZE)));
T
tangwei12 已提交
807 808 809 810 811
    char *push_data_ptr = const_cast<char *>(push_data->data());
    memcpy(push_data_ptr, kvs.data(), kv_size * sizeof(uint64_t));
    push_data_ptr += kv_size * sizeof(uint64_t);

    for (int i = 0; i < kv_size; ++i) {
812 813
      memcpy(push_data_ptr, value_ptr[i], accessor->GetTableInfo(UPDATE_SIZE));
      push_data_ptr += accessor->GetTableInfo(UPDATE_SIZE);
T
tangwei12 已提交
814
    }
Z
zhaocaibei123 已提交
815
    PsService_Stub rpc_stub(GetSparseChannel(shard_idx));
T
tangwei12 已提交
816 817 818 819 820 821 822 823
    closure->cntl(shard_idx)->set_request_compress_type(
        (brpc::CompressType)FLAGS_pserver_communicate_compress_type);
    rpc_stub.service(closure->cntl(shard_idx), closure->request(shard_idx),
                     closure->response(shard_idx), closure);
  }
  return fut;
}

Z
zhaocaibei123 已提交
824
std::future<int32_t> BrpcPsClient::PushDenseRawGradient(
T
tangwei12 已提交
825 826 827 828 829 830 831
    int table_id, float *total_send_data, size_t total_send_data_size,
    void *done) {
  size_t request_call_num = _server_channels.size();
  DownpourBrpcClosure *closure = reinterpret_cast<DownpourBrpcClosure *>(done);
  auto promise = std::make_shared<std::promise<int32_t>>();
  closure->add_promise(promise);
  std::future<int> fut = promise->get_future();
Z
zhaocaibei123 已提交
832
  auto *accessor = GetTableAccessor(table_id);
T
tangwei12 已提交
833
  uint32_t num_per_shard =
Z
zhaocaibei123 已提交
834
      DenseDimPerShard(accessor->GetTableInfo(FEA_DIM), request_call_num);
T
tangwei12 已提交
835 836 837 838 839 840 841 842 843 844 845 846 847
  for (size_t i = 0; i < request_call_num; ++i) {
    closure->request(i)->set_cmd_id(PS_PUSH_DENSE_TABLE);
    closure->request(i)->set_table_id(table_id);
    closure->request(i)->set_client_id(_client_id);
    auto *push_data = closure->request(i)->mutable_data();
    push_data->clear();
    push_data->resize(sizeof(uint32_t) + num_per_shard * sizeof(float));
    char *push_data_ptr = const_cast<char *>(push_data->data());
    memcpy(push_data_ptr, &num_per_shard, sizeof(uint32_t));
    memcpy(push_data_ptr + sizeof(uint32_t),
           total_send_data + i * num_per_shard, num_per_shard * sizeof(float));
    // closure->cntl(i)->set_request_compress_type(
    //     (brpc::CompressType)FLAGS_pserver_communicate_compress_type);
Z
zhaocaibei123 已提交
848
    PsService_Stub rpc_stub(GetDenseChannel(i));
T
tangwei12 已提交
849 850 851 852 853 854
    rpc_stub.service(closure->cntl(i), closure->request(i),
                     closure->response(i), closure);
  }
  return fut;
}

Z
zhaocaibei123 已提交
855 856 857
std::future<int32_t> BrpcPsClient::PushGlobalStep(int table_id,
                                                  int64_t *total_send_data,
                                                  void *done) {
858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875
  size_t request_call_num = _server_channels.size();
  DownpourBrpcClosure *closure = reinterpret_cast<DownpourBrpcClosure *>(done);
  auto promise = std::make_shared<std::promise<int32_t>>();
  closure->add_promise(promise);
  std::future<int> fut = promise->get_future();
  for (size_t i = 0; i < request_call_num; ++i) {
    closure->request(i)->set_cmd_id(PS_PUSH_GLOBAL_STEP);
    closure->request(i)->set_table_id(table_id);
    closure->request(i)->set_client_id(_client_id);
    auto *push_data = closure->request(i)->mutable_data();
    push_data->clear();
    int32_t num_per_shard = 1;
    push_data->resize(sizeof(uint32_t) + num_per_shard * sizeof(int64_t));
    char *push_data_ptr = const_cast<char *>(push_data->data());
    memcpy(push_data_ptr, &num_per_shard, sizeof(uint32_t));
    memcpy(push_data_ptr + sizeof(uint32_t), total_send_data,
           num_per_shard * sizeof(int64_t));

Z
zhaocaibei123 已提交
876
    PsService_Stub rpc_stub(GetDenseChannel(i));
877 878 879 880 881 882
    rpc_stub.service(closure->cntl(i), closure->request(i),
                     closure->response(i), closure);
  }
  return fut;
}

Z
zhaocaibei123 已提交
883 884 885 886
std::future<int32_t> BrpcPsClient::PullSparse(float **select_values,
                                              size_t table_id,
                                              const uint64_t *keys, size_t num,
                                              bool is_training) {
887 888 889
  auto timer = std::make_shared<CostTimer>("pserver_client_pull_sparse");
  auto local_timer =
      std::make_shared<CostTimer>("pserver_client_pull_sparse_local");
T
tangwei12 已提交
890 891 892 893 894 895
  size_t request_call_num = _server_channels.size();

  auto shard_sorted_kvs = std::make_shared<
      std::vector<std::vector<std::pair<uint64_t, float *>>>>();
  shard_sorted_kvs->resize(request_call_num);

Z
zhaocaibei123 已提交
896 897 898 899 900 901 902 903 904 905
  const auto &server_param = _config.server_param().downpour_server_param();
  uint64_t shard_num = FLAGS_pserver_sparse_table_shard_num;
  for (int i = 0; i < server_param.downpour_table_param_size(); ++i) {
    const auto &table_param = server_param.downpour_table_param(i);
    if (table_param.table_id() == table_id) {
      shard_num = table_param.shard_num();
      break;
    }
  }

T
tangwei12 已提交
906
  for (size_t i = 0; i < num; ++i) {
Z
zhaocaibei123 已提交
907
    size_t shard_id = get_sparse_shard(shard_num, request_call_num, keys[i]);
T
tangwei12 已提交
908 909 910
    shard_sorted_kvs->at(shard_id).push_back({keys[i], select_values[i]});
  }

Z
zhaocaibei123 已提交
911
  auto *accessor = GetTableAccessor(table_id);
912 913

  size_t value_size = accessor->GetTableInfo(SELECT_SIZE);
T
tangwei12 已提交
914 915 916 917

  DownpourBrpcClosure *closure = new DownpourBrpcClosure(
      request_call_num, [shard_sorted_kvs, value_size](void *done) {
        int ret = 0;
Z
zhaocaibei123 已提交
918
        auto *closure = reinterpret_cast<DownpourBrpcClosure *>(done);
T
tangwei12 已提交
919
        for (size_t i = 0; i < shard_sorted_kvs->size(); ++i) {
T
tangwei12 已提交
920 921 922 923 924 925 926 927 928 929 930 931 932 933
          if (closure->check_response(i, PS_PULL_SPARSE_TABLE) != 0) {
            ret = -1;
            break;
          }

          auto &request_kvs = shard_sorted_kvs->at(i);
          auto &res_io_buffer = closure->cntl(i)->response_attachment();
          butil::IOBufBytesIterator io_buffer_itr(res_io_buffer);
          uint64_t last_key = UINT64_MAX;
          float *last_value_data = NULL;

          for (size_t kv_idx = 0; kv_idx < request_kvs.size(); ++kv_idx) {
            auto *kv_pair = &(request_kvs[kv_idx]);
            if (kv_pair->first == last_key) {
Z
zhaocaibei123 已提交
934 935
              memcpy(reinterpret_cast<void *>(kv_pair->second),
                     reinterpret_cast<void *>(last_value_data), value_size);
T
tangwei12 已提交
936 937 938 939
            } else {
              last_key = kv_pair->first;
              last_value_data = kv_pair->second;
              if (value_size !=
Z
zhaocaibei123 已提交
940 941
                  io_buffer_itr.copy_and_forward(
                      reinterpret_cast<void *>(last_value_data), value_size)) {
T
tangwei12 已提交
942 943 944 945 946 947 948 949 950
                LOG(WARNING) << "res data is lack or not in format";
                ret = -1;
                break;
              }
            }
          }
        }
        closure->set_promise_value(ret);
      });
951
  closure->add_timer(timer);
T
tangwei12 已提交
952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967
  auto promise = std::make_shared<std::promise<int32_t>>();
  closure->add_promise(promise);
  std::future<int> fut = promise->get_future();

  for (size_t i = 0; i < request_call_num; ++i) {
    auto &sorted_kvs = shard_sorted_kvs->at(i);
    std::sort(sorted_kvs.begin(), sorted_kvs.end(),
              [](const std::pair<uint64_t, float *> &k1,
                 const std::pair<uint64_t, float *> &k2) {
                return k1.first < k2.first;
              });

    uint64_t last_key = UINT64_MAX;
    uint32_t kv_request_count = 0;
    size_t sorted_kv_size = sorted_kvs.size();
    auto &request_buffer = closure->cntl(i)->request_attachment();
968

Z
zhaocaibei123 已提交
969
    request_buffer.append(reinterpret_cast<void *>(&is_training), sizeof(bool));
970 971 972
    std::vector<uint32_t> keys_counter;
    keys_counter.reserve(sorted_kv_size);

T
tangwei12 已提交
973 974
    for (size_t kv_idx = 0; kv_idx < sorted_kv_size; ++kv_idx) {
      ++kv_request_count;
975
      uint32_t keys = 1;
T
tangwei12 已提交
976
      last_key = sorted_kvs[kv_idx].first;
Z
zhaocaibei123 已提交
977 978
      request_buffer.append(reinterpret_cast<void *>(&last_key),
                            sizeof(uint64_t));
T
tangwei12 已提交
979 980 981
      while (kv_idx < sorted_kv_size - 1 &&
             last_key == sorted_kvs[kv_idx + 1].first) {
        ++kv_idx;
982
        ++keys;
T
tangwei12 已提交
983
      }
984
      keys_counter.push_back(keys);
T
tangwei12 已提交
985 986
    }

Z
zhaocaibei123 已提交
987
    request_buffer.append(reinterpret_cast<void *>(keys_counter.data()),
988 989
                          sizeof(uint32_t) * keys_counter.size());

T
tangwei12 已提交
990 991 992 993 994 995
    if (kv_request_count == 0) {
      closure->Run();
    } else {
      closure->request(i)->set_cmd_id(PS_PULL_SPARSE_TABLE);
      closure->request(i)->set_table_id(table_id);
      closure->request(i)->set_client_id(_client_id);
Z
zhaocaibei123 已提交
996
      closure->request(i)->add_params((char *)&kv_request_count,  // NOLINT
T
tangwei12 已提交
997
                                      sizeof(uint32_t));
Z
zhaocaibei123 已提交
998
      PsService_Stub rpc_stub(GetCmdChannel(i));
T
tangwei12 已提交
999 1000 1001 1002 1003 1004 1005 1006
      closure->cntl(i)->set_log_id(butil::gettimeofday_ms());
      rpc_stub.service(closure->cntl(i), closure->request(i),
                       closure->response(i), closure);
    }
  }
  return fut;
}

Z
zhaocaibei123 已提交
1007
// for GEO
Z
zhaocaibei123 已提交
1008 1009 1010 1011 1012
std::future<int32_t> BrpcPsClient::PullSparseParam(float **select_values,
                                                   size_t table_id,
                                                   const uint64_t *keys,
                                                   size_t num,
                                                   bool is_training) {
Z
zhaocaibei123 已提交
1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024
  auto timer = std::make_shared<CostTimer>("pserver_client_pull_sparse_param");
  size_t request_call_num = _server_channels.size();

  auto shard_sorted_kvs = std::make_shared<
      std::vector<std::vector<std::pair<uint64_t, float *>>>>();
  shard_sorted_kvs->resize(request_call_num);

  for (size_t i = 0; i < num; ++i) {
    size_t shard_id = keys[i] % request_call_num;
    shard_sorted_kvs->at(shard_id).push_back({keys[i], select_values[i]});
  }

Z
zhaocaibei123 已提交
1025
  auto *accessor = GetTableAccessor(table_id);
1026
  size_t value_size = accessor->GetTableInfo(SELECT_SIZE);
Z
zhaocaibei123 已提交
1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111

  DownpourBrpcClosure *closure = new DownpourBrpcClosure(
      request_call_num, [shard_sorted_kvs, value_size](void *done) {
        int ret = 0;
        auto *closure = reinterpret_cast<DownpourBrpcClosure *>(done);
        for (size_t i = 0; i < shard_sorted_kvs->size(); ++i) {
          if (closure->check_response(i, PS_PULL_SPARSE_TABLE) != 0) {
            ret = -1;
            break;
          }

          auto &request_kvs = shard_sorted_kvs->at(i);
          auto &res_io_buffer = closure->cntl(i)->response_attachment();
          butil::IOBufBytesIterator io_buffer_itr(res_io_buffer);
          uint64_t last_key = UINT64_MAX;
          float *last_value_data = NULL;

          // can remove sort&unique
          for (size_t kv_idx = 0; kv_idx < request_kvs.size(); ++kv_idx) {
            auto *kv_pair = &(request_kvs[kv_idx]);
            if (kv_pair->first == last_key) {
              memcpy(reinterpret_cast<void *>(kv_pair->second),
                     reinterpret_cast<void *>(last_value_data), value_size);
            } else {
              last_key = kv_pair->first;
              last_value_data = kv_pair->second;
              if (value_size !=
                  io_buffer_itr.copy_and_forward(
                      reinterpret_cast<void *>(last_value_data), value_size)) {
                LOG(WARNING) << "res data is lack or not in format";
                ret = -1;
                break;
              }
            }
          }
        }
        closure->set_promise_value(ret);
      });
  closure->add_timer(timer);
  auto promise = std::make_shared<std::promise<int32_t>>();
  closure->add_promise(promise);
  std::future<int> fut = promise->get_future();

  for (size_t i = 0; i < request_call_num; ++i) {
    auto &sorted_kvs = shard_sorted_kvs->at(i);
    std::sort(sorted_kvs.begin(), sorted_kvs.end(),
              [](const std::pair<uint64_t, float *> &k1,
                 const std::pair<uint64_t, float *> &k2) {
                return k1.first < k2.first;
              });

    uint64_t last_key = UINT64_MAX;
    uint32_t kv_request_count = 0;
    size_t sorted_kv_size = sorted_kvs.size();
    auto &request_buffer = closure->cntl(i)->request_attachment();

    request_buffer.append(reinterpret_cast<void *>(&is_training), sizeof(bool));
    std::vector<uint32_t> keys_counter;
    keys_counter.reserve(sorted_kv_size);

    for (size_t kv_idx = 0; kv_idx < sorted_kv_size; ++kv_idx) {
      ++kv_request_count;
      uint32_t keys = 1;
      last_key = sorted_kvs[kv_idx].first;
      request_buffer.append(reinterpret_cast<void *>(&last_key),
                            sizeof(uint64_t));
      while (kv_idx < sorted_kv_size - 1 &&
             last_key == sorted_kvs[kv_idx + 1].first) {
        ++kv_idx;
        ++keys;
      }
      keys_counter.push_back(keys);
    }

    request_buffer.append(reinterpret_cast<void *>(keys_counter.data()),
                          sizeof(uint32_t) * keys_counter.size());

    if (kv_request_count == 0) {
      closure->Run();
    } else {
      closure->request(i)->set_cmd_id(PS_PULL_SPARSE_TABLE);
      closure->request(i)->set_table_id(table_id);
      closure->request(i)->set_client_id(_client_id);
      closure->request(i)->add_params((char *)&kv_request_count,  // NOLINT
                                      sizeof(uint32_t));
Z
zhaocaibei123 已提交
1112
      PsService_Stub rpc_stub(GetCmdChannel(i));
Z
zhaocaibei123 已提交
1113 1114 1115 1116 1117 1118 1119 1120
      closure->cntl(i)->set_log_id(butil::gettimeofday_ms());
      rpc_stub.service(closure->cntl(i), closure->request(i),
                       closure->response(i), closure);
    }
  }
  return fut;
}

Z
zhaocaibei123 已提交
1121
std::future<int32_t> BrpcPsClient::SendClient2ClientMsg(
T
tangwei12 已提交
1122 1123 1124 1125
    int msg_type, int to_client_id, const std::string &msg) {
  auto promise = std::make_shared<std::promise<int32_t>>();
  std::future<int> fut = promise->get_future();
  if (to_client_id >= _client_channels.size()) {
T
Thunderbrook 已提交
1126 1127
    VLOG(0) << "to_client_id is out of range clients, which size is "
            << _client_channels.size();
T
tangwei12 已提交
1128 1129 1130 1131
    promise->set_value(-1);
    return fut;
  }
  auto *closure = new DownpourBrpcClosure(1, [msg_type](void *done) {
Z
zhaocaibei123 已提交
1132
    auto *closure = reinterpret_cast<DownpourBrpcClosure *>(done);
T
tangwei12 已提交
1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145
    int32_t ret = closure->check_response(0, msg_type + 1000);
    closure->set_promise_value(ret);
  });
  closure->add_promise(promise);
  closure->request(0)->set_cmd_id(msg_type);
  closure->request(0)->set_client_id(_client_id);
  closure->request(0)->set_data(msg);
  PsService_Stub rpc_stub(_client_channels[to_client_id].get());
  rpc_stub.service(closure->cntl(0), closure->request(0), closure->response(0),
                   closure);
  return fut;
}

Z
zhaocaibei123 已提交
1146
std::future<int32_t> BrpcPsClient::PushSparseRawGradientPartial(
T
tangwei12 已提交
1147 1148
    size_t table_id, const uint64_t *keys, const float **update_values,
    uint32_t num, void *done, int pserver_idx) {
Z
zhaocaibei123 已提交
1149
  auto *accessor = GetTableAccessor(table_id);
1150
  size_t value_size = accessor->GetTableInfo(UPDATE_SIZE);
T
tangwei12 已提交
1151 1152 1153 1154 1155 1156 1157 1158 1159 1160
  DownpourBrpcClosure *closure = reinterpret_cast<DownpourBrpcClosure *>(done);
  auto promise = std::make_shared<std::promise<int32_t>>();
  closure->add_promise(promise);
  std::future<int> fut = promise->get_future();

  // 发送RPC请求
  auto *push_request = closure->request(0);
  push_request->set_cmd_id(PS_PUSH_SPARSE_TABLE);
  push_request->set_table_id(table_id);
  push_request->set_client_id(_client_id);
Z
zhaocaibei123 已提交
1161
  push_request->add_params((char *)&num, sizeof(uint32_t));  // NOLINT
T
tangwei12 已提交
1162 1163 1164 1165 1166 1167 1168 1169 1170
  auto *push_data = push_request->mutable_data();
  push_data->resize(num * (sizeof(uint64_t) + value_size));
  char *push_data_ptr = const_cast<char *>(push_data->data());
  memcpy(push_data_ptr, keys, num * sizeof(uint64_t));
  push_data_ptr += num * sizeof(uint64_t);
  for (int i = 0; i < num; ++i) {
    memcpy(push_data_ptr, update_values[i], value_size);
    push_data_ptr += value_size;
  }
Z
zhaocaibei123 已提交
1171
  PsService_Stub rpc_stub(GetSparseChannel(pserver_idx));
T
tangwei12 已提交
1172 1173 1174 1175 1176 1177 1178
  closure->cntl(0)->set_request_compress_type(
      (brpc::CompressType)FLAGS_pserver_communicate_compress_type);
  rpc_stub.service(closure->cntl(0), closure->request(0), closure->response(0),
                   closure);
  return fut;
}

Z
zhaocaibei123 已提交
1179 1180
int32_t BrpcPsClient::RecvAndSaveTable(const uint64_t table_id,
                                       const std::string &path) {
1181 1182 1183 1184
  // get var information
  std::string var_name = "";
  int64_t var_num = 0;
  int64_t var_shape = 0;
Z
zhaocaibei123 已提交
1185
  std::string table_class;
1186 1187 1188 1189
  const auto &worker_param = _config.worker_param().downpour_worker_param();
  for (size_t i = 0; i < worker_param.downpour_table_param_size(); ++i) {
    if (worker_param.downpour_table_param(i).table_id() == table_id) {
      var_name = worker_param.downpour_table_param(i).common().table_name();
1190 1191
      var_num = worker_param.downpour_table_param(i).common().table_num();
      var_shape = worker_param.downpour_table_param(i).common().table_dim();
Z
zhaocaibei123 已提交
1192
      table_class = worker_param.downpour_table_param(i).table_class();
1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213
      break;
    }
  }

  PADDLE_ENFORCE_NE(
      var_name, "",
      platform::errors::InvalidArgument(
          "Cannot find table id %d to save variables.", table_id));

  std::string var_store = string::Sprintf("%s", path);
  MkDirRecursively(var_store.c_str());

  // pull sparse from server
  std::vector<float> save_huge_vec(var_num * var_shape);
  std::vector<uint64_t> save_key(var_num);
  std::vector<float *> save_vec;
  for (size_t i = 0; i < save_key.size(); ++i) {
    save_key[i] = i;
    save_vec.push_back(save_huge_vec.data() + i * var_shape);
  }

Z
zhaocaibei123 已提交
1214
  VLOG(2) << "RecvAndSaveTable: table_class: " << table_class;
Z
zhaocaibei123 已提交
1215
  // TODO(zhaocaibei123): new GeoBrpcPSClient, move this to its
Z
zhaocaibei123 已提交
1216
  // RecvAndSaveTable
Z
zhaocaibei123 已提交
1217 1218
  if (table_class == "MemorySparseGeoTable") {
    auto status =
Z
zhaocaibei123 已提交
1219 1220
        PullSparseParam(reinterpret_cast<float **>(save_vec.data()), table_id,
                        save_key.data(), save_key.size(), true);
Z
zhaocaibei123 已提交
1221 1222
    status.wait();
  } else {
Z
zhaocaibei123 已提交
1223 1224
    auto status = PullSparse(reinterpret_cast<float **>(save_vec.data()),
                             table_id, save_key.data(), save_key.size(), true);
Z
zhaocaibei123 已提交
1225 1226
    status.wait();
  }
1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238

  // create lod tensor
  std::shared_ptr<framework::Scope> scope;
  scope.reset(new framework::Scope());
  auto place = platform::CPUPlace();
  platform::DeviceContextPool &pool = platform::DeviceContextPool::Instance();
  auto &dev_ctx = *pool.Get(place);

  framework::Variable *var = scope->Var(var_name);
  framework::LoDTensor *var_tensor = var->GetMutable<framework::LoDTensor>();

  std::vector<int64_t> vec_dim = {var_num, var_shape};
1239
  var_tensor->Resize(phi::make_ddim(vec_dim));
1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257

  // copy and save
  float *tensor_data = var_tensor->mutable_data<float>(place);
  memcpy(tensor_data, save_huge_vec.data(),
         var_num * var_shape * sizeof(float));

  std::string file_name = string::Sprintf("%s/%s", var_store, var_name);
  std::ofstream fout(file_name, std::ios::binary);
  PADDLE_ENFORCE_EQ(static_cast<bool>(fout), true,
                    platform::errors::Unavailable(
                        "Cannot open %s to save variables.", file_name));

  framework::SerializeToStream(fout, *var_tensor, dev_ctx);
  fout.close();

  return 0;
}

Z
zhaocaibei123 已提交
1258 1259 1260 1261
std::future<int32_t> BrpcPsClient::PushSparse(size_t table_id,
                                              const uint64_t *keys,
                                              const float **update_values,
                                              size_t num) {
1262 1263
  auto push_timer = std::make_shared<CostTimer>("pserver_client_push_sparse");
  CostTimer parse_timer("pserver_client_push_sparse_parse");
Z
zhaocaibei123 已提交
1264 1265
  int push_sparse_async_num = _push_sparse_task_queue_map[table_id]->Size();
  while (push_sparse_async_num > FLAGS_pserver_max_async_call_num) {
Z
zhaocaibei123 已提交
1266
    //    LOG(INFO) << "PushSparse Waiting for async_call_num comsume,
1267 1268 1269
    //    task_num:"
    //              << push_sparse_async_num
    //              << ", max_task_limit:" << FLAGS_pserver_max_async_call_num;
Z
zhaocaibei123 已提交
1270 1271 1272
    usleep(5000);  // 5ms
    push_sparse_async_num = _push_sparse_task_queue_map[table_id]->Size();
  }
1273
  auto put_timer = std::make_shared<CostTimer>("client_push_sparse_put");
Z
zhaocaibei123 已提交
1274 1275
  thread_local std::vector<std::vector<std::pair<uint64_t, const float *>>>
      shard_sorted_kv_list;
Z
zhaocaibei123 已提交
1276
  auto *accessor = GetTableAccessor(table_id);
Z
zhaocaibei123 已提交
1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309
  size_t request_call_num = _server_channels.size();
  shard_sorted_kv_list.resize(request_call_num);
  for (auto &x : shard_sorted_kv_list) {
    x.clear();
  }
  const auto &server_param = _config.server_param().downpour_server_param();
  uint64_t shard_num = FLAGS_pserver_sparse_table_shard_num;
  for (int i = 0; i < server_param.downpour_table_param_size(); ++i) {
    const auto &table_param = server_param.downpour_table_param(i);
    if (table_param.table_id() == table_id) {
      shard_num = table_param.shard_num();
      break;
    }
  }
  for (size_t i = 0; i < num; ++i) {
    size_t shard_id = get_sparse_shard(shard_num, request_call_num, keys[i]);
    shard_sorted_kv_list[shard_id].push_back({keys[i], update_values[i]});
  }
  auto sparse_task_data = _sparse_task_pool.get();
  sparse_task_data->shared_data.resize(request_call_num);
  auto async_task = new SparseAsyncTask(sparse_task_data, table_id, push_timer);

  for (size_t i = 0; i < request_call_num; ++i) {
    auto &sorted_kv_list = shard_sorted_kv_list[i];
    size_t sorted_kv_size = sorted_kv_list.size();
    auto &shard_kv_data = async_task->data()->shared_data[i];
    shard_kv_data.key_list.resize(sorted_kv_size);
    shard_kv_data.value_list.resize(sorted_kv_size);

    if (sorted_kv_size == 0) {
      shard_kv_data.kv_num = 0;
      continue;
    }
1310
    uint32_t value_size = accessor->GetTableInfo(UPDATE_SIZE);
Z
zhaocaibei123 已提交
1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323
    for (size_t kv_idx = 0; kv_idx < sorted_kv_size; ++kv_idx) {
      shard_kv_data.key_list[kv_idx] = sorted_kv_list[kv_idx].first;
      shard_kv_data.value_list[kv_idx].assign(
          (const char *)sorted_kv_list[kv_idx].second, value_size);
    }
    shard_kv_data.kv_num = sorted_kv_size;
  }

  std::future<int> fut = async_task->get_future();
  _push_sparse_task_queue_map[table_id]->Put(std::move(async_task));
  return fut;
}

Z
zhaocaibei123 已提交
1324
void BrpcPsClient::PushSparseTaskConsume() {
Z
zhaocaibei123 已提交
1325 1326 1327 1328 1329 1330
  uint64_t merge_size = FLAGS_pserver_push_sparse_merge_limit;
  std::vector<std::shared_ptr<SparseAsyncTask>> task_list;
  size_t request_call_num = _server_channels.size();
  ::ThreadPool async_push_sparse_shard_threads(
      FLAGS_pserver_sparse_merge_thread);
  while (_running) {
1331
    auto async_start_time_ms = butil::gettimeofday_ms();
Z
zhaocaibei123 已提交
1332 1333 1334
    // 所有sparseTable的pushTask 进行处理
    for (auto &push_sparse_task_itr : _push_sparse_task_queue_map) {
      auto table_id = push_sparse_task_itr.first;
Z
zhaocaibei123 已提交
1335
      auto *accessor = GetTableAccessor(table_id);
Z
zhaocaibei123 已提交
1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400
      auto &task_queue = push_sparse_task_itr.second;
      auto queue_size = task_queue->Size();
      if (queue_size == 0) {
        continue;
      }
      if (merge_size > 0 && (queue_size <= 1 && _flushing == false)) {
        continue;
      }
      ++_async_call_num;

      int merge_count = 0;
      for (size_t i = 0; i < task_list.size(); ++i) {
        if (task_list[i]->data()) {
          _sparse_task_pool.push(task_list[i]->data());
        }
      }
      auto sparse_task_data = _sparse_task_pool.get();

      task_list.clear();
      int cur_meger_size = task_queue->Size();

      // task_list[0] 为一个空SparseAsyncTask, 分shard异步merge结果存入此结构。
      sparse_task_data->shared_data.resize(request_call_num);
      auto push_timer =
          std::make_shared<CostTimer>("pserver_client_push_sparse");

      auto async_task =
          new SparseAsyncTask(sparse_task_data, table_id, push_timer);

      task_list.reserve(cur_meger_size + 1);

      task_list.push_back(
          std::move(std::shared_ptr<SparseAsyncTask>(async_task)));

      while (!task_queue->Empty() && merge_count < cur_meger_size) {
        ++merge_count;
        SparseAsyncTask *task;
        task_queue->Get(task);
        task_list.push_back(std::shared_ptr<SparseAsyncTask>(task));
      }

      _push_sparse_merge_count_map[table_id] += merge_count;

      // 达到或大于 merge_size发送, 发送过程中
      std::vector<int> request_kv_num(request_call_num, 0);

      if (_push_sparse_merge_count_map[table_id] >= merge_size ||
          _flushing == true) {
        DownpourBrpcClosure *closure = new DownpourBrpcClosure(
            request_call_num, [this, request_call_num](void *done) {
              int ret = 0;
              auto *closure = reinterpret_cast<DownpourBrpcClosure *>(done);
              for (size_t i = 0; i < request_call_num; ++i) {
                if (closure->check_response(i, PS_PUSH_SPARSE_TABLE) != 0) {
                  ret = -1;
                  break;
                }
              }
              closure->set_promise_value(ret);
              --_async_call_num;
            });

        for_each(task_list.begin() + 1, task_list.end(),
                 [&request_kv_num, request_call_num,
                  closure](std::shared_ptr<SparseAsyncTask> &task) {
1401
                   closure->add_timer(task->timer());
Z
zhaocaibei123 已提交
1402 1403 1404
                   closure->add_promise(task->promise());
                 });

1405 1406 1407 1408
        CostTimer merge_timer("pserver_client_push_sparse_merge");
        auto rpc_timer =
            std::make_shared<CostTimer>("pserver_client_push_sparse_rpc");
        closure->add_timer(rpc_timer);
Z
zhaocaibei123 已提交
1409 1410 1411 1412 1413

        std::vector<std::future<int>> merge_status(request_call_num);
        for (int shard_idx = 0; shard_idx < request_call_num; ++shard_idx) {
          merge_status[shard_idx] =
              async_push_sparse_shard_threads.enqueue(std::bind(
Z
zhaocaibei123 已提交
1414
                  &BrpcPsClient::PushSparseAsyncShardPush, this, task_list,
Z
zhaocaibei123 已提交
1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429
                  request_kv_num, table_id, shard_idx, closure, accessor));
        }
        for (int shard_idx = 0; shard_idx < request_call_num; ++shard_idx) {
          merge_status[shard_idx].wait();
        }
        merge_status.clear();
        std::vector<std::future<int>>().swap(merge_status);
        _push_sparse_merge_count_map[table_id] = 0;

        auto queue_size = task_queue->Size();
      } else {  // 未达到阈值 只做多路归并
        std::vector<std::future<int>> merge_status(request_call_num);
        for (int shard_idx = 0; shard_idx < request_call_num; ++shard_idx) {
          merge_status[shard_idx] =
              async_push_sparse_shard_threads.enqueue(std::bind(
Z
zhaocaibei123 已提交
1430
                  &BrpcPsClient::PushSparseAsyncShardMerge, this, task_list,
Z
zhaocaibei123 已提交
1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445
                  request_kv_num, table_id, shard_idx, accessor));
        }
        for (int shard_idx = 0; shard_idx < request_call_num; ++shard_idx) {
          merge_status[shard_idx].wait();
        }

        // meger到task_list[0]
        auto async_task = new SparseAsyncTask(*(task_list[0].get()));

        task_queue->Put(std::move(async_task));
        --_async_call_num;
        merge_status.clear();
        std::vector<std::future<int>>().swap(merge_status);
      }
    }
1446 1447
    auto wait_ms = FLAGS_pserver_async_push_sparse_interval_ms -
                   (butil::gettimeofday_ms() - async_start_time_ms);
Z
zhaocaibei123 已提交
1448 1449 1450 1451 1452 1453 1454 1455
    if (wait_ms > 0) {
      usleep(wait_ms * 1000);
    }
  }
}

void sparse_local_merge(ValueAccessor *accessor, float *merge_data,
                        const float *another_data) {
1456
  size_t col_num = accessor->GetTableInfo(UPDATE_SIZE) / sizeof(float);
Z
zhaocaibei123 已提交
1457 1458 1459 1460 1461 1462
  float *merge_data_shell[col_num];
  const float *another_data_shell[col_num];
  for (int i = 0; i < col_num; ++i) {
    merge_data_shell[i] = merge_data + i;
    another_data_shell[i] = another_data + i;
  }
1463
  accessor->Merge(merge_data_shell, another_data_shell, 1);
Z
zhaocaibei123 已提交
1464 1465
}

Z
zhaocaibei123 已提交
1466
int BrpcPsClient::PushSparseAsyncShardMerge(
Z
zhaocaibei123 已提交
1467 1468 1469 1470 1471
    std::vector<std::shared_ptr<SparseAsyncTask>> &task_list,
    std::vector<int> &request_kv_num, int table_id, int shard_idx,
    ValueAccessor *accessor) {
  size_t merged_kv_count = 0;
  uint64_t min_key = UINT64_MAX;
1472
  uint32_t value_size = accessor->GetTableInfo(UPDATE_SIZE);
Z
zhaocaibei123 已提交
1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557

  thread_local std::vector<std::pair<uint64_t, const float *>> sorted_kv_list;
  sorted_kv_list.clear();
  for (int i = 1; i < task_list.size(); ++i) {
    size_t kv_num = task_list[i]->data()->shared_data[shard_idx].kv_num;
    auto &key_list = task_list[i]->data()->shared_data[shard_idx].key_list;
    auto &value_list = task_list[i]->data()->shared_data[shard_idx].value_list;

    for (int j = 0; j < kv_num; ++j) {
      if (value_list[j].size() < value_size) {
        LOG(WARNING) << "value_list[" << j << "]: " << value_list[j].c_str()
                     << "is invalid.";
        continue;
      }
      char *task_data_ptr = const_cast<char *>(value_list[j].data());
      sorted_kv_list.push_back(
          {key_list[j], reinterpret_cast<float *>(task_data_ptr)});
    }
  }

  // 按key排序&去重
  std::sort(sorted_kv_list.begin(), sorted_kv_list.end(),
            [](const std::pair<uint64_t, const float *> &k1,
               const std::pair<uint64_t, const float *> &k2) {
              return k1.first < k2.first;
            });

  auto &async_task = task_list[0];
  size_t sorted_kv_size = sorted_kv_list.size();
  auto &shard_kv_data = async_task->data()->shared_data[shard_idx];
  shard_kv_data.key_list.resize(sorted_kv_size);
  shard_kv_data.value_list.resize(sorted_kv_size);

  // 将去重后数据写入分shard包
  if (sorted_kv_size == 0) {
    shard_kv_data.kv_num = 0;
    return 0;
  } else if (sorted_kv_size == 1) {
    shard_kv_data.kv_num = 1;
    shard_kv_data.key_list[0] = sorted_kv_list[0].first;
    shard_kv_data.value_list[0].assign((const char *)(sorted_kv_list[0].second),
                                       value_size);
    return 0;
  }

  // 去重 本地merge
  uint64_t last_key = sorted_kv_list[0].first;
  const float *last_value_data = sorted_kv_list[0].second;
  float *last_merge_data = NULL;
  std::shared_ptr<char> merger_buffer(new char[value_size],
                                      array_deleter<char>());
  for (size_t kv_idx = 1; kv_idx < sorted_kv_size; ++kv_idx) {
    while (kv_idx < sorted_kv_size &&
           last_key == sorted_kv_list[kv_idx].first) {
      if (last_merge_data == NULL) {
        last_merge_data = reinterpret_cast<float *>(merger_buffer.get());
        memcpy(last_merge_data, last_value_data, value_size);
      }
      sparse_local_merge(accessor, last_merge_data,
                         sorted_kv_list[kv_idx].second);
      ++kv_idx;
    }
    if (last_merge_data != NULL) {
      shard_kv_data.value_list[merged_kv_count].assign(
          (const char *)last_merge_data, value_size);
      last_merge_data = NULL;
    } else {
      shard_kv_data.value_list[merged_kv_count].assign(
          (const char *)sorted_kv_list[kv_idx - 1].second, value_size);
    }
    shard_kv_data.key_list[merged_kv_count++] = last_key;
    if (kv_idx < sorted_kv_size) {
      last_key = sorted_kv_list[kv_idx].first;
      last_value_data = sorted_kv_list[kv_idx].second;
    }
    if (kv_idx == sorted_kv_size - 1) {
      shard_kv_data.value_list[merged_kv_count].assign(
          (const char *)last_value_data, value_size);
      shard_kv_data.key_list[merged_kv_count++] = last_key;
    }
  }
  shard_kv_data.kv_num = merged_kv_count;
  return 0;
}

Z
zhaocaibei123 已提交
1558
int BrpcPsClient::PushSparseAsyncShardPush(
Z
zhaocaibei123 已提交
1559 1560 1561
    std::vector<std::shared_ptr<SparseAsyncTask>> &task_list,
    std::vector<int> &request_kv_num, int table_id, int shard_idx,
    DownpourBrpcClosure *closure, ValueAccessor *accessor) {
Z
zhaocaibei123 已提交
1562 1563
  PushSparseAsyncShardMerge(task_list, request_kv_num, table_id, shard_idx,
                            accessor);
Z
zhaocaibei123 已提交
1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577
  size_t merged_kv_count = task_list[0]->data()->shared_data[shard_idx].kv_num;

  auto &merged_key_list = task_list[0]->data()->shared_data[shard_idx].key_list;
  auto &merged_value_list =
      task_list[0]->data()->shared_data[shard_idx].value_list;

  // 发送RPC请求
  auto *push_request = closure->request(shard_idx);
  push_request->set_cmd_id(PS_PUSH_SPARSE_TABLE);
  push_request->set_table_id(table_id);
  push_request->set_client_id(_client_id);
  push_request->add_params(reinterpret_cast<char *>(&merged_kv_count),
                           sizeof(uint32_t));  // NOLINT
  auto *push_data = push_request->mutable_data();
1578
  int update_size = accessor->GetTableInfo(UPDATE_SIZE);
Z
zhaocaibei123 已提交
1579
  push_data->resize(merged_kv_count *
1580
                    (sizeof(uint64_t) + accessor->GetTableInfo(UPDATE_SIZE)));
Z
zhaocaibei123 已提交
1581 1582 1583 1584 1585 1586 1587 1588
  char *push_data_ptr = const_cast<char *>(push_data->data());
  memcpy(push_data_ptr, merged_key_list.data(),
         merged_kv_count * sizeof(uint64_t));
  push_data_ptr += merged_kv_count * sizeof(uint64_t);
  for (int i = 0; i < merged_kv_count; ++i) {
    const char *task_data_ptr = merged_value_list[i].data();

    memcpy(push_data_ptr, (float *)(task_data_ptr),  // NOLINT
1589 1590
           accessor->GetTableInfo(UPDATE_SIZE));
    push_data_ptr += accessor->GetTableInfo(UPDATE_SIZE);
Z
zhaocaibei123 已提交
1591
  }
Z
zhaocaibei123 已提交
1592
  PsService_Stub rpc_stub(GetSparseChannel(shard_idx));
Z
zhaocaibei123 已提交
1593 1594 1595 1596 1597 1598 1599 1600
  closure->cntl(shard_idx)->set_request_compress_type(
      (brpc::CompressType)FLAGS_pserver_communicate_compress_type);
  rpc_stub.service(closure->cntl(shard_idx), closure->request(shard_idx),
                   closure->response(shard_idx), closure);
  _push_sparse_merge_count_map[table_id] = 0;
  return 0;
}

Z
zhaocaibei123 已提交
1601 1602 1603 1604
std::future<int32_t> BrpcPsClient::PushDense(const Region *regions,
                                             size_t region_num,
                                             size_t table_id) {
  auto *accessor = GetTableAccessor(table_id);
1605 1606
  int fea_dim = accessor->GetTableInfo(FEA_DIM);
  int update_dim = accessor->GetTableInfo(UPDATE_DIM);
Z
zhaocaibei123 已提交
1607 1608 1609 1610 1611
  auto push_timer = std::make_shared<CostTimer>("pserver_client_push_dense");
  auto parse_timer =
      std::make_shared<CostTimer>("pserver_client_push_dense_parse");
  int push_dense_async_num = _push_dense_task_queue_map[table_id]->Size();
  while (push_dense_async_num > FLAGS_pserver_max_async_call_num) {
Z
zhaocaibei123 已提交
1612
    //    LOG(INFO) << "PushDense Waiting for async_call_num comsume,
1613 1614 1615
    //    task_num:"
    //              << push_dense_async_num
    //              << ", max_task_limit:" << FLAGS_pserver_max_async_call_num;
Z
zhaocaibei123 已提交
1616 1617 1618
    usleep(5000);  // 5ms
    push_dense_async_num = _push_dense_task_queue_map[table_id]->Size();
  }
1619
  auto push_dense_timer = std::make_shared<CostTimer>("push_dense_put");
Z
zhaocaibei123 已提交
1620 1621 1622 1623
  // auto dense_data = _dense_matrix_obj_pool.get();
  auto dense_data = std::make_shared<std::vector<float>>();
  auto async_task = new DenseAsyncTask(dense_data, table_id, push_timer);
  size_t request_call_num = _server_channels.size();
1624

Z
zhaocaibei123 已提交
1625
  uint32_t num_per_shard =
Z
zhaocaibei123 已提交
1626
      DenseDimPerShard(accessor->GetTableInfo(FEA_DIM), request_call_num);
Z
zhaocaibei123 已提交
1627 1628 1629

  // 将region数据拷贝到转置矩阵中
  async_task->data()->resize(num_per_shard * request_call_num *
1630
                             accessor->GetTableInfo(UPDATE_DIM));
Z
zhaocaibei123 已提交
1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647
  float *data = async_task->data()->data();
  size_t data_size = async_task->data()->size();
  uint32_t pos = 0;
  for (size_t i = 0; i < region_num; ++i) {
    uint32_t data_num = regions[i].size / sizeof(float);
    CHECK(pos + data_num <= data_size)
        << "invalid dense size, cur pos[" << pos << "]"
        << " data_num[" << data_num << "] size[" << data_size << "]";
    const float *region_data = (const float *)(regions[i].data);
    memcpy(data + pos, region_data, regions[i].size);
    pos += data_num;
  }
  std::future<int> fut = async_task->get_future();
  _push_dense_task_queue_map[table_id]->Put(std::move(async_task));
  return fut;
}

Z
zhaocaibei123 已提交
1648
void BrpcPsClient::PushDenseTaskConsume() {
Z
zhaocaibei123 已提交
1649 1650 1651 1652
  uint64_t merge_size = FLAGS_pserver_push_dense_merge_limit;
  static bool scale_gradient = FLAGS_pserver_scale_gradient_by_merge;
  ::ThreadPool async_merge_dense_threads(10);
  while (_running) {
1653
    auto async_start_time_ms = butil::gettimeofday_ms();
Z
zhaocaibei123 已提交
1654 1655 1656 1657 1658 1659 1660 1661 1662 1663 1664 1665
    for (auto &task_queue_itr : _push_dense_task_queue_map) {
      auto &task_queue = task_queue_itr.second;
      auto queue_size = task_queue->Size();
      if (queue_size == 0) {
        continue;
      }
      if (queue_size <= merge_size && _flushing == false) {
        continue;
      }
      ++_async_call_num;
      DenseAsyncTask *task;
      task_queue->Get(task);
Z
zhaocaibei123 已提交
1666
      auto *accessor = GetTableAccessor(task->table_id());
Z
zhaocaibei123 已提交
1667 1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678 1679 1680 1681 1682 1683 1684 1685 1686 1687 1688 1689 1690 1691 1692 1693 1694 1695 1696 1697 1698 1699 1700 1701
      // 设置请求回调
      size_t request_call_num = _server_channels.size();

      DownpourBrpcClosure *closure = new DownpourBrpcClosure(
          request_call_num, [this, request_call_num](void *done) {
            int ret = 0;
            auto *closure = reinterpret_cast<DownpourBrpcClosure *>(done);
            for (size_t i = 0; i < request_call_num; ++i) {
              if (closure->check_response(i, PS_PUSH_DENSE_TABLE) != 0) {
                ret = -1;
                break;
              }
            }
            closure->set_promise_value(ret);
            --_async_call_num;
          });

      auto &total_send_data_vec = *(task->data());
      float *total_send_data =
          reinterpret_cast<float *>(total_send_data_vec.data());
      size_t total_send_data_size = total_send_data_vec.size();
      {
        CostTimer merge_timer("pserver_client_push_dense_merge");
        uint32_t merge_count = 0;
        std::vector<std::future<int>> merge_status(merge_size);
        while (!task_queue->Empty() && merge_count < merge_size) {
          auto *async_task = new DenseAsyncTask();
          task_queue->Get(async_task);
          closure->add_timer(async_task->timer());
          closure->add_promise(async_task->promise());
          merge_status[merge_count] = async_merge_dense_threads.enqueue(
              [closure, accessor, &total_send_data, total_send_data_size,
               async_task]() -> int {
                auto &tmp_task_vec = *(async_task->data());
                const float *merge_data = tmp_task_vec.data();
1702
                accessor->Merge(&total_send_data, &merge_data,
Z
zhaocaibei123 已提交
1703 1704 1705 1706 1707 1708 1709 1710 1711 1712 1713 1714 1715 1716
                                total_send_data_size);
#pragma optimize("", off)
                auto *debug_closure = closure;
                auto *debug_task = async_task;
                delete async_task;
#pragma optimize("", on)
                return 0;
              });
          ++merge_count;
        }
        for (int i = 0; i < merge_count; ++i) {
          merge_status[i].wait();
        }

Z
zhaocaibei123 已提交
1717
        VLOG(3) << "BrpcPsClient::PushDenseTaskConsume before merge "
Z
zhaocaibei123 已提交
1718 1719 1720 1721 1722
                   "total_send_data[0]"
                << total_send_data[0] << " total_send_data[-2]"
                << total_send_data[total_send_data_size - 2]
                << total_send_data[0] << " total_send_data[-1]"
                << total_send_data[total_send_data_size - 1];
1723

Z
zhaocaibei123 已提交
1724 1725 1726 1727 1728 1729
        if (scale_gradient && merge_count > 1) {
          Eigen::Map<Eigen::MatrixXf> mat(total_send_data, 1,
                                          total_send_data_size);
          mat *= (1.0 / (merge_count + 1));
        }

Z
zhaocaibei123 已提交
1730
        VLOG(3) << "BrpcPsClient::PushDenseTaskConsume after merge "
Z
zhaocaibei123 已提交
1731 1732 1733 1734 1735 1736 1737 1738
                   "total_send_data[0]"
                << total_send_data[0] << " total_send_data[-2]"
                << total_send_data[total_send_data_size - 2]
                << " total_send_data[-1]"
                << total_send_data[total_send_data_size - 1] << " merge_count "
                << merge_count;
      }
      std::shared_ptr<DenseAsyncTask> task_ptr(task);
Z
zhaocaibei123 已提交
1739 1740
      PushDenseRawGradient(task_ptr, total_send_data, total_send_data_size,
                           closure);
Z
zhaocaibei123 已提交
1741
    }
1742 1743
    auto wait_ms = FLAGS_pserver_async_push_dense_interval_ms -
                   (butil::gettimeofday_ms() - async_start_time_ms);
Z
zhaocaibei123 已提交
1744 1745 1746 1747 1748 1749
    if (wait_ms > 0) {
      usleep(wait_ms * 1000);
    }
  }
}

Z
zhaocaibei123 已提交
1750 1751 1752 1753 1754
void BrpcPsClient::PushDenseRawGradient(std::shared_ptr<DenseAsyncTask> &task,
                                        float *total_send_data,
                                        size_t total_send_data_size,
                                        DownpourBrpcClosure *closure) {
  auto *accessor = GetTableAccessor(task->table_id());
Z
zhaocaibei123 已提交
1755 1756 1757 1758 1759
  size_t request_call_num = _server_channels.size();
  // 将数据拷贝到请求buffer区
  auto timer = std::make_shared<CostTimer>("pserver_client_push_dense_rpc");
  closure->add_timer(timer);
  uint32_t num_per_shard =
Z
zhaocaibei123 已提交
1760
      DenseDimPerShard(accessor->GetTableInfo(FEA_DIM), request_call_num);
1761 1762
  auto send_timer =
      std::make_shared<CostTimer>("pserver_client_push_dense_send");
Z
zhaocaibei123 已提交
1763 1764 1765 1766 1767 1768 1769 1770 1771 1772 1773 1774 1775
  for (size_t i = 0; i < request_call_num; ++i) {
    closure->request(i)->set_cmd_id(PS_PUSH_DENSE_TABLE);
    closure->request(i)->set_table_id(task->table_id());
    closure->request(i)->set_client_id(_client_id);
    auto *push_data = closure->request(i)->mutable_data();
    push_data->clear();
    push_data->resize(sizeof(uint32_t) + num_per_shard * sizeof(float));
    char *push_data_ptr = const_cast<char *>(push_data->data());
    memcpy(push_data_ptr, &num_per_shard, sizeof(uint32_t));
    memcpy(push_data_ptr + sizeof(uint32_t),
           total_send_data + i * num_per_shard, num_per_shard * sizeof(float));
    closure->cntl(i)->set_request_compress_type(
        (brpc::CompressType)FLAGS_pserver_communicate_compress_type);
Z
zhaocaibei123 已提交
1776
    PsService_Stub rpc_stub(GetDenseChannel(i));
Z
zhaocaibei123 已提交
1777 1778 1779 1780 1781
    rpc_stub.service(closure->cntl(i), closure->request(i),
                     closure->response(i), closure);
  }
}

T
tangwei12 已提交
1782
}  // namespace distributed
T
Thunderbrook 已提交
1783
}  // namespace paddle