fleet.cc 32.1 KB
Newer Older
T
tangwei12 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13 14
/* Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.

Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at

  http://www.apache.org/licenses/LICENSE-2.0

Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */

15 16
#include "paddle/fluid/distributed/ps/wrapper/fleet.h"

17 18
#include <google/protobuf/text_format.h>

19 20
#include "paddle/fluid/distributed/ps/service/communicator/communicator.h"
#include "paddle/fluid/distributed/ps/table/table.h"
T
tangwei12 已提交
21 22 23 24 25 26 27 28 29 30 31 32 33

namespace paddle {
namespace distributed {

using framework::ProgramDesc;
using framework::VarDesc;
using framework::Variable;

const uint32_t MAX_FEASIGN_NUM = 1024 * 100 * 100;
std::shared_ptr<FleetWrapper> FleetWrapper::s_instance_ = NULL;
bool FleetWrapper::is_initialized_ = false;

std::shared_ptr<paddle::distributed::PSCore> FleetWrapper::pserver_ptr_ = NULL;
34 35 36 37 38 39 40 41 42 43 44 45 46 47
std::shared_ptr<paddle::distributed::PSClient> FleetWrapper::worker_ptr_ = NULL;

int FleetWrapper::RegisterHeterCallback(HeterCallBackFunc handler) {
  VLOG(0) << "RegisterHeterCallback support later";
  return 0;
}

int32_t FleetWrapper::CopyTable(const uint64_t src_table_id,
                                const uint64_t dest_table_id) {
  VLOG(0) << "CopyTable support later";
  return 0;
}

int32_t FleetWrapper::CopyTableByFeasign(
48 49
    const uint64_t src_table_id,
    const uint64_t dest_table_id,
50 51 52 53
    const std::vector<uint64_t>& feasign_list) {
  VLOG(0) << "CopyTableByFeasign support later";
  return 0;
}
T
tangwei12 已提交
54 55 56 57 58 59 60 61 62 63 64 65 66 67

void FleetWrapper::SetClient2ClientConfig(int request_timeout_ms,
                                          int connect_timeout_ms,
                                          int max_retry) {
  client2client_request_timeout_ms_ = request_timeout_ms;
  client2client_connect_timeout_ms_ = connect_timeout_ms;
  client2client_max_retry_ = max_retry;
}

void FleetWrapper::LoadSparseOnServer(const std::string& path,
                                      const std::string& meta,
                                      uint32_t table_id) {
  VLOG(3) << "load sparse table " << table_id << " with " << path << " meta "
          << meta;
Z
zhaocaibei123 已提交
68
  pserver_ptr_->_server_ptr->GetTable(table_id)->Load(path, meta);
T
tangwei12 已提交
69 70
}

71 72
void FleetWrapper::InitServer(
    const std::string& dist_desc,
73 74 75
    const std::vector<std::string>& host_sign_list,
    int index,
    int trainers,
76
    const std::vector<framework::ProgramDesc>& server_sub_program) {
T
tangwei12 已提交
77 78 79 80
  if (!is_initialized_) {
    VLOG(3) << "Going to init server";
    pserver_ptr_ = std::shared_ptr<paddle::distributed::PSCore>(
        new paddle::distributed::PSCore());
81 82 83 84 85 86
    pserver_ptr_->InitServer(dist_desc,
                             &host_sign_list,
                             host_sign_list.size(),
                             index,
                             trainers,
                             server_sub_program);
T
tangwei12 已提交
87 88 89 90 91 92
    is_initialized_ = true;
  } else {
    VLOG(3) << "Server can be initialized only once";
  }
}

93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111
void FleetWrapper::InitGFlag(const std::string& gflags) {
  VLOG(3) << "Init With Gflags:" << gflags;
  std::vector<std::string> flags = paddle::string::split_string(gflags);
  if (flags.size() < 1) {
    flags.push_back("-max_body_size=314217728");
    flags.push_back("-bthread_concurrency=40");
    flags.push_back("-socket_max_unwritten_bytes=2048000000");
    flags.push_back("-max_connection_pool_size=1950");
  }
  auto it = flags.begin();
  flags.insert(it, "exe default");
  char* flags_ptr[flags.size()];
  for (size_t i = 0; i < flags.size(); ++i) {
    flags_ptr[i] = (char*)(flags[i].c_str());  // NOLINT
  }
  int params_cnt = flags.size();
  char** params_ptr = &(flags_ptr[0]);
  ::GFLAGS_NAMESPACE::ParseCommandLineFlags(&params_cnt, &params_ptr, true);
}
T
tangwei12 已提交
112

113 114 115 116 117 118 119 120 121 122 123 124 125 126
void FleetWrapper::InitWorker(const std::string& dist_desc,
                              const std::vector<std::string>& host_sign_list,
                              int index) {
  if (!is_initialized_) {
    // not used, just for psclient's init
    // TODO(zhaocaibei123): remove this later
    std::map<uint64_t, std::vector<paddle::distributed::Region>>
        dense_pull_regions;

    if (worker_ptr_.get() == nullptr) {
      paddle::distributed::PSParameter ps_param;
      google::protobuf::TextFormat::ParseFromString(dist_desc, &ps_param);
      InitGFlag(ps_param.init_gflags());
      int servers = host_sign_list.size();
Z
zhaocaibei123 已提交
127
      ps_env_.SetPsServers(&host_sign_list, servers);
128
      worker_ptr_ = std::shared_ptr<paddle::distributed::PSClient>(
Z
zhaocaibei123 已提交
129 130
          paddle::distributed::PSClientFactory::Create(ps_param));
      worker_ptr_->Configure(ps_param, dense_pull_regions, ps_env_, index);
131
    }
T
tangwei12 已提交
132
  } else {
133
    VLOG(3) << "Client can be initialized only once";
T
tangwei12 已提交
134 135 136
  }
}

137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164
void FleetWrapper::InitFlWorker(const std::vector<std::string>& host_list,
                                int index,
                                const std::string& self_endpoint) {
  assert(worker_ptr_.get() != nullptr);
  uint32_t coordinator_num = host_list.size();
  ps_env_.SetCoordinators(&host_list, coordinator_num);
  auto ptr = dynamic_cast<BrpcPsClient*>(worker_ptr_.get());
  ptr->InitializeFlWorker(self_endpoint);
  return;
}

void FleetWrapper::PushFLClientInfoSync(const std::string& fl_client_info) {
  // FLClientInfo fci;
  // google::protobuf::TextFormat::ParseFromString(fl_client_info, &fci);
  // InitGFlag(fci.init_gflags());
  auto ptr = dynamic_cast<BrpcPsClient*>(worker_ptr_.get());
  VLOG(0) << "fl-ps > PushFLClientInfoSync: " << typeid(worker_ptr_).name()
          << ", " << typeid(ptr).name() << ", " << typeid(BrpcPsClient).name();
  ptr->PushFLClientInfoSync(fl_client_info);
  return;
}

std::string FleetWrapper::PullFlStrategy() {
  auto ptr = dynamic_cast<BrpcPsClient*>(worker_ptr_.get());
  std::string str = ptr->PullFlStrategy();
  return str;
}

T
tangwei12 已提交
165 166
void FleetWrapper::StopServer() {
  VLOG(3) << "Going to stop server";
Z
zhaocaibei123 已提交
167
  auto status = worker_ptr_->StopServer();
T
tangwei12 已提交
168 169 170 171 172
  status.wait();
}

void FleetWrapper::FinalizeWorker() {
  VLOG(3) << "Going to finalize worker";
Z
zhaocaibei123 已提交
173
  worker_ptr_->FinalizeWorker();
T
tangwei12 已提交
174 175 176 177 178 179 180 181 182 183
}

void FleetWrapper::BarrierWithTable(uint32_t barrier_type) {
  VLOG(3) << "Going to Barrier worker";
  auto* communicator = Communicator::GetInstance();
  communicator->BarrierWithTable(barrier_type);
}

uint64_t FleetWrapper::RunServer(const std::string& ip, uint32_t port) {
  VLOG(3) << "Going to run server with ip " << ip << " port " << port;
Z
zhaocaibei123 已提交
184
  auto ret = pserver_ptr_->RunServer(ip, port);
T
tangwei12 已提交
185 186 187 188 189
  return ret;
}

std::vector<uint64_t> FleetWrapper::GetClientsInfo() {
  VLOG(3) << "Going to get client info";
Z
zhaocaibei123 已提交
190
  std::vector<uint64_t> res = ps_env_.GetClientInfo();
191 192 193
  for (auto rr : res) {
    VLOG(2) << "FleetWrapper::GetClientInfo " << rr;
  }
Z
zhaocaibei123 已提交
194
  return res;
T
tangwei12 已提交
195 196
}

197 198
int FleetWrapper::SetClients(std::vector<uint64_t>& host_sign_list) {
  int node = host_sign_list.size();
Z
zhaocaibei123 已提交
199
  return ps_env_.SetPsClients(host_sign_list.data(), node);
200 201
}

T
tangwei12 已提交
202
void FleetWrapper::CreateClient2ClientConnection() {
Z
zhaocaibei123 已提交
203
  VLOG(1) << "Going to create client2client connection";
Z
zhaocaibei123 已提交
204 205 206
  worker_ptr_->CreateClient2ClientConnection(client2client_request_timeout_ms_,
                                             client2client_connect_timeout_ms_,
                                             client2client_max_retry_);
T
tangwei12 已提交
207 208
}

209
std::future<int32_t> FleetWrapper::PullSparseVarsAsync(
210 211 212 213 214 215
    const Scope& scope,
    const uint64_t table_id,
    const std::vector<std::string>& var_names,
    std::vector<uint64_t>* fea_keys,
    std::vector<std::vector<float>>* fea_values,
    int fea_value_dim) {
216 217 218 219 220 221 222 223
  fea_keys->clear();
  fea_keys->resize(0);
  fea_keys->reserve(MAX_FEASIGN_NUM);
  for (auto name : var_names) {
    Variable* var = scope.FindVar(name);
    if (var == nullptr) {
      continue;
    }
224
    phi::DenseTensor* tensor = var->GetMutable<phi::DenseTensor>();
225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244
    CHECK(tensor != nullptr) << "tensor of var " << name << " is null";
    int64_t* ids = tensor->data<int64_t>();
    size_t len = tensor->numel();
    for (auto i = 0u; i < len; ++i) {
      if (ids[i] == 0u) {
        continue;
      }
      fea_keys->push_back(static_cast<uint64_t>(ids[i]));
    }
  }
  fea_values->resize(fea_keys->size() + 1);
  for (auto& t : *fea_values) {
    t.resize(fea_value_dim);
  }
  std::vector<float*> pull_result_ptr;
  for (auto& t : *fea_values) {
    pull_result_ptr.push_back(t.data());
  }

  bool training = true;
245 246
  return pserver_ptr_->_worker_ptr->PullSparse(pull_result_ptr.data(),
                                               table_id,
Z
zhaocaibei123 已提交
247
                                               fea_keys->data(),
248 249
                                               fea_keys->size(),
                                               training);
250 251
}

T
tangwei12 已提交
252
void FleetWrapper::PullSparseVarsSync(
253 254 255 256 257 258
    const Scope& scope,
    const uint64_t table_id,
    const std::vector<std::string>& var_names,
    std::vector<uint64_t>* fea_keys,
    std::vector<std::vector<float>>* fea_values,
    int fea_value_dim,
T
tangwei12 已提交
259 260 261 262 263 264 265 266 267 268 269 270
    const std::vector<std::string>& var_emb_names) {
  std::vector<std::future<int32_t>> pull_sparse_status;
  pull_sparse_status.resize(0);
  fea_keys->clear();
  fea_keys->resize(0);
  fea_keys->reserve(MAX_FEASIGN_NUM);
  for (size_t var_index = 0; var_index < var_names.size(); ++var_index) {
    const std::string& name = var_names[var_index];
    Variable* var = scope.FindVar(name);
    if (var == nullptr) {
      continue;
    }
271
    phi::DenseTensor* tensor = var->GetMutable<phi::DenseTensor>();
T
tangwei12 已提交
272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297
    CHECK(tensor != nullptr) << "tensor of var " << name << " is null";
    int64_t* ids = tensor->data<int64_t>();
    size_t len = tensor->numel();

    // skip slots which do not have embedding
    const std::string& emb_name = var_emb_names[var_index];
    Variable* emb_var = scope.FindVar(emb_name);
    if (emb_var == nullptr) {
      continue;
    }

    for (auto i = 0u; i < len; ++i) {
      if (ids[i] == 0u) {
        continue;
      }
      fea_keys->push_back(static_cast<uint64_t>(ids[i]));
    }
  }
  fea_values->resize(fea_keys->size() + 1);
  for (auto& t : *fea_values) {
    t.resize(fea_value_dim);
  }
  std::vector<float*> pull_result_ptr;
  for (auto& t : *fea_values) {
    pull_result_ptr.push_back(t.data());
  }
298
  bool training = true;
299 300 301 302 303
  auto status = pserver_ptr_->_worker_ptr->PullSparse(pull_result_ptr.data(),
                                                      table_id,
                                                      fea_keys->data(),
                                                      fea_keys->size(),
                                                      training);
T
tangwei12 已提交
304 305 306 307 308 309 310 311 312 313 314 315
  pull_sparse_status.push_back(std::move(status));
  for (auto& t : pull_sparse_status) {
    t.wait();
    auto status = t.get();
    if (status != 0) {
      LOG(ERROR) << "fleet pull sparse failed, status[" << status << "]";
      sleep(sleep_seconds_before_fail_exit_);
      exit(-1);
    }
  }
}

316 317 318
// is_training is true means training, false means inference, the behavior is
// different on pserver

319 320 321 322 323 324 325 326
void FleetWrapper::PullSparseToTensorSync(
    const uint64_t table_id,
    int fea_dim,
    uint64_t padding_id,
    platform::Place place,
    bool is_training,
    std::vector<const phi::DenseTensor*>* inputs,
    std::vector<phi::DenseTensor*>* outputs) {
T
tangwei12 已提交
327 328 329 330 331
  std::vector<uint64_t> fea_keys;
  std::vector<float*> pull_result_ptr;
  fea_keys.reserve(MAX_FEASIGN_NUM / 100);
  pull_result_ptr.reserve(MAX_FEASIGN_NUM / 100);
  std::vector<float> init_value(fea_dim, 0);
332
  phi::DenseTensor* output = nullptr;
T
tangwei12 已提交
333 334 335 336
  float* output_data = nullptr;
  size_t output_index = -1;
  size_t output_len = 0;
  for (size_t index = 0; index < inputs->size(); ++index) {
337
    const phi::DenseTensor* tensor = inputs->at(index);
T
tangwei12 已提交
338 339 340 341 342 343 344 345 346 347 348 349 350 351 352
    const int64_t* ids = tensor->data<int64_t>();
    size_t len = tensor->numel();
    for (size_t i = 0; i < len; ++i, output_len += fea_dim) {
      if (!output || output_len == size_t(output->numel())) {
        ++output_index;
        CHECK(output_index < outputs->size());  // NOLINT
        output = outputs->at(output_index);
        output->set_lod(tensor->lod());
        output_data = output->mutable_data<float>(place);
        output_len = 0;
        CHECK(output->numel() % fea_dim == 0);  // NOLINT
        CHECK(output_data != nullptr);          // NOLINT
      }
      uint64_t real_id = static_cast<uint64_t>(ids[i]);
      if (real_id == padding_id) {
353 354
        memcpy(output_data + output_len,
               init_value.data(),
T
tangwei12 已提交
355 356 357 358 359 360 361
               sizeof(float) * fea_dim);
        continue;
      }
      fea_keys.push_back(real_id);
      pull_result_ptr.push_back(output_data + output_len);
    }
  }
Z
zhaocaibei123 已提交
362

363 364 365 366 367
  auto status = worker_ptr_->PullSparse(pull_result_ptr.data(),
                                        table_id,
                                        fea_keys.data(),
                                        fea_keys.size(),
                                        is_training);
T
tangwei12 已提交
368 369 370 371 372 373 374 375 376
  status.wait();
  auto ret = status.get();
  if (ret != 0) {
    LOG(ERROR) << "fleet pull sparse failed, status[" << ret << "]";
    sleep(sleep_seconds_before_fail_exit_);
  }
}

void FleetWrapper::PullDenseVarsAsync(
377 378
    const Scope& scope,
    const uint64_t tid,
T
tangwei12 已提交
379
    const std::vector<std::string>& var_names,
380 381
    std::vector<std::future<int32_t>>* pull_dense_status,
    bool in_cpu) {
Z
zhaocaibei123 已提交
382
  auto& regions = regions_[tid];
T
tangwei12 已提交
383 384 385 386 387 388 389 390
  regions.clear();
  regions.resize(var_names.size());
  for (auto i = 0u; i < var_names.size(); ++i) {
    std::string varname = var_names[i];
    if (!in_cpu) {
      varname = var_names[i] + "pin";
    }
    Variable* var = scope.FindVar(varname);
391
    phi::DenseTensor* tensor = var->GetMutable<phi::DenseTensor>();
T
tangwei12 已提交
392 393 394 395
    float* w = tensor->data<float>();
    paddle::distributed::Region reg(w, tensor->numel());
    regions[i] = std::move(reg);
  }
Z
zhaocaibei123 已提交
396 397

  auto status = worker_ptr_->PullDense(regions.data(), regions.size(), tid);
T
tangwei12 已提交
398 399 400 401
  pull_dense_status->push_back(std::move(status));
}

void FleetWrapper::PullDenseVarsSync(
402 403
    const Scope& scope,
    const uint64_t tid,
T
tangwei12 已提交
404
    const std::vector<std::string>& var_names) {
Z
zhaocaibei123 已提交
405
  auto& regions = regions_[tid];
T
tangwei12 已提交
406 407 408 409
  regions.clear();
  regions.reserve(var_names.size());
  for (auto& t : var_names) {
    Variable* var = scope.FindVar(t);
410
    phi::DenseTensor* tensor = var->GetMutable<phi::DenseTensor>();
411 412 413 414 415
    if (!platform::is_gpu_place(tensor->place())) {
      float* w = tensor->data<float>();
      paddle::distributed::Region reg(w, tensor->numel());
      regions.emplace_back(std::move(reg));
    }
T
tangwei12 已提交
416
  }
Z
zhaocaibei123 已提交
417
  auto status = worker_ptr_->PullDense(regions.data(), regions.size(), tid);
T
tangwei12 已提交
418 419 420 421
  status.wait();
}

void FleetWrapper::PushDenseParamSync(
422 423
    const Scope& scope,
    const uint64_t table_id,
T
tangwei12 已提交
424 425 426 427 428 429
    const std::vector<std::string>& var_names) {
  auto place = platform::CPUPlace();
  std::vector<paddle::distributed::Region> regions;
  for (auto& t : var_names) {
    Variable* var = scope.FindVar(t);
    CHECK(var != nullptr) << "var[" << t << "] not found";
430
    phi::DenseTensor* tensor = var->GetMutable<phi::DenseTensor>();
431 432 433 434 435
    if (!platform::is_gpu_place(tensor->place())) {
      float* g = tensor->mutable_data<float>(place);
      paddle::distributed::Region reg(g, tensor->numel());
      regions.emplace_back(std::move(reg));
    }
T
tangwei12 已提交
436
  }
437
  auto push_status =
Z
zhaocaibei123 已提交
438
      worker_ptr_->PushDenseParam(regions.data(), regions.size(), table_id);
T
tangwei12 已提交
439 440 441 442 443 444
  push_status.wait();
  auto status = push_status.get();
  CHECK(status == 0) << "push dense param failed, status[" << status << "]";
}

void FleetWrapper::PushDenseVarsSync(
445 446
    Scope* scope,
    const uint64_t table_id,
T
tangwei12 已提交
447 448 449
    const std::vector<std::string>& var_names) {}

void FleetWrapper::PushDenseVarsAsync(
450 451
    const Scope& scope,
    const uint64_t table_id,
T
tangwei12 已提交
452
    const std::vector<std::string>& var_names,
453 454
    std::vector<std::future<int32_t>>* push_sparse_status,
    float scale_datanorm,
T
tangwei12 已提交
455
    int batch_size) {
Z
zhaocaibei123 已提交
456 457 458 459 460
  auto place = platform::CPUPlace();
  std::vector<paddle::distributed::Region> regions;
  for (auto& t : var_names) {
    Variable* var = scope.FindVar(t);
    CHECK(var != nullptr) << "var[" << t << "] not found";
461
    phi::DenseTensor* tensor = var->GetMutable<phi::DenseTensor>();
462
    int count = tensor->numel();
Z
zhaocaibei123 已提交
463
    float* g = tensor->mutable_data<float>(place);
464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479
    // TODO(zhaocaibei123): how to get batch_size in op?
    if (scale_datanorm >= 0) {
      if (t.find(".batch_size@GRAD") != std::string::npos ||
          t.find(".batch_sum@GRAD") != std::string::npos) {
        Eigen::Map<Eigen::MatrixXf> mat(g, 1, count);
        float scale = 1.0 / batch_size;
        mat *= scale;
      } else if (t.find(".batch_square_sum@GRAD") != std::string::npos) {
        VLOG(3) << "epsilon: " << scale_datanorm;
        for (int i = 0; i < count; ++i) {
          g[i] = (g[i] - batch_size * scale_datanorm) / batch_size +
                 batch_size * scale_datanorm;
        }
      }
    }

Z
zhaocaibei123 已提交
480 481 482 483 484 485 486
    paddle::distributed::Region reg(g, tensor->numel());
    regions.emplace_back(std::move(reg));
    VLOG(3) << "FleetWrapper::PushDenseVarsAsync Var " << t << " talbe_id "
            << table_id << " Temp_data[0] " << g[0] << " Temp_data[-1] "
            << g[tensor->numel() - 1];
  }

Z
zhaocaibei123 已提交
487 488
  auto push_status =
      worker_ptr_->PushDense(regions.data(), regions.size(), table_id);
T
tangwei12 已提交
489 490 491
}

void FleetWrapper::PushSparseVarsAsync(
492 493
    const Scope& scope,
    const uint64_t table_id,
T
tangwei12 已提交
494 495 496 497 498 499 500
    const std::string& grad_varname,
    std::vector<std::future<int32_t>>* push_sparse_status) {
  std::vector<std::string> varnames;
  varnames.push_back(grad_varname);

  auto* communicator = Communicator::GetInstance();
  PADDLE_ENFORCE_EQ(
501 502
      communicator->Check(table_id),
      true,
T
tangwei12 已提交
503 504 505 506 507 508
      platform::errors::InvalidArgument(
          "can not find table: %s, please check your config", table_id));
  communicator->Send(varnames, scope);
}

void FleetWrapper::PushSparseVarsWithLabelAsync(
509 510 511 512
    const Scope& scope,
    const uint64_t table_id,
    const std::vector<uint64_t>& fea_keys,
    const std::vector<float>& fea_labels,
T
tangwei12 已提交
513
    const std::vector<std::string>& sparse_key_names,
514 515
    const std::vector<std::string>& sparse_grad_names,
    const int emb_dim,
T
tangwei12 已提交
516
    std::vector<std::vector<float>>* push_values,
517 518 519 520 521 522
    std::vector<std::future<int32_t>>* push_sparse_status,
    const int batch_size,
    const bool use_cvm,
    const bool dump_slot,
    std::vector<uint64_t>* sparse_push_keys,
    const bool no_cvm) {
T
tangwei12 已提交
523 524 525 526 527
  // not support
  return;
}

void FleetWrapper::PushSparseFromTensorWithLabelAsync(
528 529 530 531 532 533 534 535
    const Scope& scope,
    const uint64_t table_id,
    int fea_dim,
    uint64_t padding_id,
    bool scale_sparse,
    const std::string& accesor,
    const std::string& click_name,
    platform::Place place,
T
tangwei12 已提交
536
    const std::vector<std::string>& input_names,
537 538
    std::vector<const phi::DenseTensor*>* inputs,
    std::vector<const phi::DenseTensor*>* outputs) {
T
tangwei12 已提交
539 540 541 542
  // not support
  return;
}

Z
zhaocaibei123 已提交
543
void FleetWrapper::PushSparseFromTensorAsync(
544 545 546 547
    const uint64_t table_id,
    int fea_dim,
    uint64_t padding_id,
    platform::Place place,
548
    std::vector<const phi::DenseTensor*>* inputs,
549
    std::vector<int>& slots,
550 551 552
    const phi::DenseTensor* shows,
    const phi::DenseTensor* clks,
    std::vector<phi::DenseTensor*>* outputs,
553
    bool use_cvm_op) {
554
  CHECK(slots.size() == inputs->size());
Z
zhaocaibei123 已提交
555
  int batch_size = -1;
Z
zhaocaibei123 已提交
556
  bool batch_size_consist = true;
Z
zhaocaibei123 已提交
557
  for (auto* input : *inputs) {
D
danleifeng 已提交
558
    size_t cur_batch_size =
Z
zhaocaibei123 已提交
559 560
        input->lod().size() ? input->lod()[0].size() - 1 : input->dims()[0];
    if (batch_size == -1) {
561 562
      batch_size = static_cast<int>(cur_batch_size);
    } else if (batch_size != static_cast<int>(cur_batch_size)) {
Z
zhaocaibei123 已提交
563 564 565
      // CHECK(batch_size == cur_batch_size);  // NOLINT
      batch_size_consist = false;
      break;
Z
zhaocaibei123 已提交
566 567 568 569
    }
  }
  CHECK(batch_size > 0);  // NOLINT

D
danleifeng 已提交
570
  size_t show_size =
Z
zhaocaibei123 已提交
571
      shows->lod().size() ? shows->lod()[0].size() - 1 : shows->dims()[0];
D
danleifeng 已提交
572 573
  CHECK(show_size == size_t(batch_size) || show_size == 1);
  size_t clk_size =
Z
zhaocaibei123 已提交
574
      clks->lod().size() ? clks->lod()[0].size() - 1 : clks->dims()[0];
D
danleifeng 已提交
575
  CHECK(clk_size == size_t(batch_size) || clk_size == 1);
Z
zhaocaibei123 已提交
576

577
  CHECK(outputs->size() == inputs->size());
Z
zhaocaibei123 已提交
578 579 580 581 582 583 584 585 586 587 588 589
  std::vector<uint64_t> push_keys;
  push_keys.reserve(MAX_FEASIGN_NUM / 100);
  std::vector<std::vector<float>> push_values;
  push_values.reserve(MAX_FEASIGN_NUM / 100);
  size_t output_len = 0;
  size_t input_idx = 0;

  VLOG(2) << "fleet.cc::emb_dim: " << fea_dim;

  // TODO(zhaocaibei123): check type of show/clk is int? float? uint64?
  // const long int* show_tensor = shows->data<int64_t>();
  // const long int* clk_tensor = clks->data<int64_t>();
590 591
  const float* show_tensor = shows->data<float>();
  const float* clk_tensor = clks->data<float>();
Z
zhaocaibei123 已提交
592 593

  for (size_t index = 0; index < inputs->size(); ++index) {
594
    phi::DenseTensor* g_tensor = outputs->at(index);
595 596 597 598 599 600 601
    float* g = g_tensor->data<float>();
    // no cvm
    if (batch_size_consist) {  // TODO(zhaocaibei123): add config
                               // scale_sparse_gradient_with_batch_size_
      Eigen::Map<
          Eigen::Matrix<float, Eigen::Dynamic, Eigen::Dynamic, Eigen::RowMajor>>
          g_mat(g, g_tensor->numel() / fea_dim, fea_dim);
602 603 604 605 606
      if (use_cvm_op) {
        g_mat.rightCols(fea_dim - 2) *= batch_size;
      } else {
        g_mat.rightCols(fea_dim) *= batch_size;
      }
607 608
    }

609
    const phi::DenseTensor* tensor = inputs->at(index);
Z
zhaocaibei123 已提交
610 611
    const int64_t* ids = tensor->data<int64_t>();
    size_t len = tensor->numel();
612
    output_len = 0;
Z
zhaocaibei123 已提交
613 614

    if (tensor->lod().size() > 0) {
Z
zhangchunle 已提交
615
      for (size_t i = 0; i < tensor->lod()[0].size() - 1; ++i) {
616
        for (size_t j = tensor->lod()[0][i]; j < tensor->lod()[0][i + 1];
Z
zhaocaibei123 已提交
617 618 619 620 621 622
             ++j, output_len += fea_dim) {
          uint64_t real_id = static_cast<uint64_t>(ids[j]);
          if (real_id == padding_id) {
            continue;
          }
          push_keys.emplace_back(real_id);
623 624
          if (use_cvm_op) {
            push_values.emplace_back(fea_dim + 1);
625
            push_values.back()[0] = static_cast<float>(slots[index]);
626 627 628 629 630
            float* data = push_values.back().data() + 1;
            memcpy(data, g + output_len, sizeof(float) * fea_dim);
          } else {
            push_values.emplace_back(fea_dim + 3);
            // slot show clk grad... consistent with CtrCommonPushValue defined
631 632
            // in ctr_accessor.h
            push_values.back()[0] = static_cast<float>(slots[index]);
D
danleifeng 已提交
633 634 635 636
            push_values.back()[1] =
                (i >= show_size ? 1 : static_cast<float>(show_tensor[i]));
            push_values.back()[2] =
                (i >= clk_size ? 0 : static_cast<float>(clk_tensor[i]));
637 638 639 640 641 642 643
            float* data = push_values.back().data() + 3;
            memcpy(data, g + output_len, sizeof(float) * fea_dim);
          }
          ++input_idx;
        }
      }
    } else {
Z
zhangchunle 已提交
644
      for (size_t i = 0; i < len; ++i, output_len += fea_dim) {
645 646 647 648 649 650 651
        uint64_t real_id = static_cast<uint64_t>(ids[i]);
        if (real_id == padding_id) {
          continue;
        }
        push_keys.emplace_back(real_id);
        if (use_cvm_op) {
          push_values.emplace_back(fea_dim + 1);
652
          push_values.back()[0] = static_cast<float>(slots[index]);
653 654 655
          float* data = push_values.back().data() + 1;
          memcpy(data, g + output_len, sizeof(float) * fea_dim);
        } else {
Z
zhaocaibei123 已提交
656 657 658
          push_values.emplace_back(fea_dim + 3);
          // slot show clk grad... consistent with CtrCommonPushValue defined in
          // ctr_accessor.h
659 660 661
          push_values.back()[0] = static_cast<float>(slots[index]);
          push_values.back()[1] = (i >= show_size ? 1 : show_tensor[i]);
          push_values.back()[2] = (i >= clk_size ? 0 : clk_tensor[i]);
Z
zhaocaibei123 已提交
662
          float* data = push_values.back().data() + 3;
663
          memcpy(data, g + output_len, sizeof(float) * fea_dim);
Z
zhaocaibei123 已提交
664 665 666 667
        }
        ++input_idx;
      }
    }
Z
zhangchunle 已提交
668
    CHECK(static_cast<int64_t>(output_len) == g_tensor->numel());
Z
zhaocaibei123 已提交
669 670 671 672 673 674 675 676
  }

  std::vector<float*> push_g_vec(input_idx, nullptr);

  for (auto i = 0u; i < push_keys.size(); ++i) {
    push_g_vec[i] = push_values.at(i).data();
  }

677 678
  auto status = worker_ptr_->PushSparse(table_id,
                                        push_keys.data(),
Z
zhaocaibei123 已提交
679 680
                                        (const float**)push_g_vec.data(),
                                        push_keys.size());
Z
zhaocaibei123 已提交
681 682 683
}

void FleetWrapper::LoadModel(const std::string& path, const int mode) {
Z
zhaocaibei123 已提交
684
  auto ret = worker_ptr_->Load(path, std::to_string(mode));
T
tangwei12 已提交
685 686 687 688 689 690 691
  ret.wait();
  if (ret.get() != 0) {
    LOG(ERROR) << "load model from path:" << path << " failed";
  }
}

void FleetWrapper::LoadModelOneTable(const uint64_t table_id,
692 693
                                     const std::string& path,
                                     const int mode) {
Z
zhaocaibei123 已提交
694
  auto ret = worker_ptr_->Load(table_id, path, std::to_string(mode));
T
tangwei12 已提交
695 696 697 698 699 700 701 702
  ret.wait();
  if (ret.get() != 0) {
    LOG(ERROR) << "load model of table id: " << table_id
               << ", from path: " << path << " failed";
  }
}

void FleetWrapper::SaveModel(const std::string& path, const int mode) {
Z
zhaocaibei123 已提交
703
  auto ret = worker_ptr_->Save(path, std::to_string(mode));
T
tangwei12 已提交
704 705 706 707 708 709 710 711
  ret.wait();
  int32_t feasign_cnt = ret.get();
  if (feasign_cnt == -1) {
    LOG(ERROR) << "save model failed";
  }
}

void FleetWrapper::SaveModelOneTable(const uint64_t table_id,
712 713
                                     const std::string& path,
                                     const int mode) {
Z
zhaocaibei123 已提交
714
  auto ret = worker_ptr_->Save(table_id, path, std::to_string(mode));
T
tangwei12 已提交
715 716 717 718 719 720 721
  ret.wait();
  if (ret.get() != 0) {
    LOG(ERROR) << "save model of table id: " << table_id
               << ", to path: " << path << " failed";
  }
}

722 723
void FleetWrapper::RecvAndSaveTable(const uint64_t table_id,
                                    const std::string& path) {
Z
zhaocaibei123 已提交
724
  auto ret = worker_ptr_->RecvAndSaveTable(table_id, path);
725 726 727 728 729 730
  if (ret != 0) {
    LOG(ERROR) << "save model of table id: " << table_id
               << ", to path: " << path << " failed";
  }
}

T
tangwei12 已提交
731
void FleetWrapper::PrintTableStat(const uint64_t table_id) {
Z
zhaocaibei123 已提交
732
  auto ret = worker_ptr_->PrintTableStat(table_id);
T
tangwei12 已提交
733 734 735 736
  ret.wait();
  int32_t err_code = ret.get();
  if (err_code == -1) {
    LOG(ERROR) << "print table stat failed";
L
lxsbupt 已提交
737 738 739 740 741 742 743 744 745 746 747
  }
}

void FleetWrapper::SaveCacheTable(const uint64_t table_id,
                                  uint16_t pass_id,
                                  size_t threshold) {
  auto ret = worker_ptr_->SaveCacheTable(table_id, pass_id, threshold);
  ret.wait();
  int32_t err_code = ret.get();
  if (err_code == -1) {
    LOG(ERROR) << "save cache table stat failed";
T
tangwei12 已提交
748 749 750
  }
}

751
void FleetWrapper::ShrinkSparseTable(int table_id, int threshold) {
Z
zhaocaibei123 已提交
752
  auto ret = worker_ptr_->Shrink(table_id, std::to_string(threshold));
T
tangwei12 已提交
753
  ret.wait();
754 755 756 757
  int32_t err_code = ret.get();
  if (err_code == -1) {
    LOG(ERROR) << "shrink sparse table stat failed";
  }
T
tangwei12 已提交
758 759 760
}

void FleetWrapper::ClearModel() {
Z
zhaocaibei123 已提交
761
  auto ret = pserver_ptr_->_worker_ptr->Clear();
T
tangwei12 已提交
762 763 764 765
  ret.wait();
}

void FleetWrapper::ClearOneTable(const uint64_t table_id) {
Z
zhaocaibei123 已提交
766
  auto ret = pserver_ptr_->_worker_ptr->Clear(table_id);
T
tangwei12 已提交
767 768 769
  ret.wait();
}

770 771
void FleetWrapper::ShrinkDenseTable(int table_id,
                                    Scope* scope,
T
tangwei12 已提交
772
                                    std::vector<std::string> var_list,
773 774
                                    float decay,
                                    int emb_dim) {
T
tangwei12 已提交
775 776 777 778 779
  std::vector<paddle::distributed::Region> regions;
  for (std::string& name : var_list) {
    if (name.find("batch_sum") != std::string::npos) {
      Variable* var = scope->FindVar(name);
      CHECK(var != nullptr) << "var[" << name << "] not found";
780
      VLOG(3) << "prepare shrink dense batch_sum";
781
      phi::DenseTensor* tensor = var->GetMutable<phi::DenseTensor>();
T
tangwei12 已提交
782 783 784 785
      float* g = tensor->data<float>();

      // show_batch_sum += N * log(decay)
      std::string size_name = name;
786 787
      size_name.replace(
          size_name.find("batch_sum"), size_name.length(), "batch_size");
T
tangwei12 已提交
788 789 790
      Variable* var_size = scope->FindVar(size_name);
      CHECK(var_size != nullptr) << "var[" << size_name << "] not found";
      VLOG(3) << "shrink dense batch_sum: " << name << ", " << size_name;
791
      float* g_size = var_size->GetMutable<phi::DenseTensor>()->data<float>();
T
tangwei12 已提交
792 793 794 795 796 797 798 799 800

      for (int k = 0; k < tensor->numel(); k += emb_dim) {
        g[k] = g[k] + g_size[k] * log(decay);
      }
      paddle::distributed::Region reg(g, tensor->numel());
      regions.emplace_back(std::move(reg));
    } else {
      Variable* var = scope->FindVar(name);
      CHECK(var != nullptr) << "var[" << name << "] not found";
801
      phi::DenseTensor* tensor = var->GetMutable<phi::DenseTensor>();
T
tangwei12 已提交
802 803 804 805 806
      float* g = tensor->data<float>();
      paddle::distributed::Region reg(g, tensor->numel());
      regions.emplace_back(std::move(reg));
    }
  }
Z
zhaocaibei123 已提交
807
  auto push_status = pserver_ptr_->_worker_ptr->PushDenseParam(
T
tangwei12 已提交
808 809 810 811 812 813 814 815 816 817 818 819
      regions.data(), regions.size(), table_id);
  push_status.wait();
  auto status = push_status.get();
  if (status != 0) {
    // PADDLE_THORW(platform::errors::Fatal(
    //    "push shrink dense param failed, status is [%d].", status));
    sleep(sleep_seconds_before_fail_exit_);
    exit(-1);
  }
}

void FleetWrapper::ClientFlush() {
820 821 822 823
  if (worker_ptr_.get() == nullptr) {
    VLOG(0) << "worker_ptr null, do nothing";
    return;
  }
Z
zhaocaibei123 已提交
824
  auto ret = worker_ptr_->Flush();
T
tangwei12 已提交
825
  ret.wait();
826 827 828 829
  int32_t err_code = ret.get();
  if (err_code == -1) {
    LOG(ERROR) << "Client Flush failed";
  }
T
tangwei12 已提交
830 831 832 833
}

int FleetWrapper::RegisterClientToClientMsgHandler(int msg_type,
                                                   MsgHandlerFunc handler) {
834 835
  if (worker_ptr_.get() == nullptr) {
    VLOG(0) << "FleetWrapper::Client is null";
Z
zhaocaibei123 已提交
836 837
    return -1;
  } else {
Z
zhaocaibei123 已提交
838
    return worker_ptr_->RegisteClient2ClientMsgHandler(msg_type, handler);
Z
zhaocaibei123 已提交
839
  }
T
tangwei12 已提交
840 841 842 843
}

std::future<int32_t> FleetWrapper::SendClientToClientMsg(
    int msg_type, int to_client_id, const std::string& msg) {
Z
zhaocaibei123 已提交
844
  return worker_ptr_->SendClient2ClientMsg(msg_type, to_client_id, msg);
T
tangwei12 已提交
845 846
}

Z
zhaocaibei123 已提交
847 848 849 850 851 852 853 854 855 856 857 858 859 860
double FleetWrapper::GetCacheThreshold(int table_id) {
  double cache_threshold = 0.0;
  auto ret = worker_ptr_->Flush();
  ret.wait();
  ret = worker_ptr_->GetCacheThreshold(table_id, cache_threshold);
  ret.wait();
  if (cache_threshold < 0) {
    LOG(ERROR) << "get cache threshold failed";
    sleep(sleep_seconds_before_fail_exit_);
    exit(-1);
  }
  return cache_threshold;
}

861 862 863 864 865 866
void FleetWrapper::CacheShuffle(int table_id,
                                const std::string& path,
                                const int mode,
                                const double cache_threshold) {
  auto ret = worker_ptr_->CacheShuffle(
      table_id, path, std::to_string(mode), std::to_string(cache_threshold));
Z
zhaocaibei123 已提交
867 868 869 870 871 872 873 874 875
  ret.wait();
  int32_t feasign_cnt = ret.get();
  if (feasign_cnt == -1) {
    LOG(ERROR) << "cache shuffle failed";
    sleep(sleep_seconds_before_fail_exit_);
    exit(-1);
  }
}

876 877
int32_t FleetWrapper::SaveCache(int table_id,
                                const std::string& path,
Z
zhaocaibei123 已提交
878 879 880 881 882 883 884 885 886 887 888 889
                                const int mode) {
  auto ret = worker_ptr_->SaveCache(table_id, path, std::to_string(mode));
  ret.wait();
  int32_t feasign_cnt = ret.get();
  if (feasign_cnt == -1) {
    LOG(ERROR) << "table save cache failed";
    sleep(sleep_seconds_before_fail_exit_);
    exit(-1);
  }
  return feasign_cnt;
}

Z
zhaocaibei123 已提交
890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907
void FleetWrapper::Revert() {
  auto ret = worker_ptr_->Revert();
  ret.wait();
  if (ret.get() == -1) {
    LOG(ERROR) << "table revert failed";
    exit(-1);
  }
}

void FleetWrapper::CheckSavePrePatchDone() {
  auto ret = worker_ptr_->CheckSavePrePatchDone();
  ret.wait();
  if (ret.get() == -1) {
    LOG(ERROR) << "table revert failed";
    exit(-1);
  }
}

T
tangwei12 已提交
908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924
std::default_random_engine& FleetWrapper::LocalRandomEngine() {
  struct engine_wrapper_t {
    std::default_random_engine engine;

    engine_wrapper_t() {
      struct timespec tp;
      clock_gettime(CLOCK_REALTIME, &tp);
      double cur_time = tp.tv_sec + tp.tv_nsec * 1e-9;
      static std::atomic<uint64_t> x(0);
      std::seed_seq sseq = {x++, x++, x++, (uint64_t)(cur_time * 1000)};
      engine.seed(sseq);
    }
  };
  thread_local engine_wrapper_t r;
  return r.engine;
}

925 926 927
size_t FleetWrapper::GetAbsoluteSum(size_t start,
                                    size_t end,
                                    size_t level,
T
tangwei12 已提交
928 929 930 931 932 933 934 935 936 937 938 939 940 941 942
                                    const framework::LoD& lod) {
  if (level >= lod.size() - 1) {
    return end - start;
  }
  size_t ret = 0;
  for (size_t i = start; i < end - 1; ++i) {
    size_t pos1 = lod[level][i];
    size_t pos2 = lod[level][i + 1];
    ret += GetAbsoluteSum(pos1, pos2, level + 1, lod);
  }
  return ret;
}

}  // end namespace distributed
}  // end namespace paddle