fleet_wrapper.cc 26.1 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14
// Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
//     http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.

15 16 17 18 19 20 21 22 23 24 25 26 27 28 29
/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.

Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at

  http://www.apache.org/licenses/LICENSE-2.0

Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */

#include "paddle/fluid/framework/fleet/fleet_wrapper.h"
30
#include <algorithm>
X
xujiaqi01 已提交
31
#include <utility>
32
#include "paddle/fluid/framework/data_feed.h"
33
#include "paddle/fluid/framework/op_registry.h"
34
#include "paddle/fluid/framework/scope.h"
35 36 37 38 39 40

namespace paddle {
namespace framework {

const uint32_t MAX_FEASIGN_NUM = 1024 * 100 * 100;
std::shared_ptr<FleetWrapper> FleetWrapper::s_instance_ = NULL;
41 42
bool FleetWrapper::is_initialized_ = false;

43
#ifdef PADDLE_WITH_PSLIB
D
dongdaxiang 已提交
44 45 46
template <class AR>
paddle::ps::Archive<AR>& operator<<(paddle::ps::Archive<AR>& ar,
                                    const MultiSlotType& ins) {
47 48 49 50
  ar << ins.GetType();
  ar << ins.GetOffset();
  ar << ins.GetFloatData();
  ar << ins.GetUint64Data();
X
xujiaqi01 已提交
51
  return ar;
52 53
}

D
dongdaxiang 已提交
54 55 56
template <class AR>
paddle::ps::Archive<AR>& operator>>(paddle::ps::Archive<AR>& ar,
                                    MultiSlotType& ins) {
57 58 59 60
  ar >> ins.MutableType();
  ar >> ins.MutableOffset();
  ar >> ins.MutableFloatData();
  ar >> ins.MutableUint64Data();
X
xujiaqi01 已提交
61
  return ar;
62 63 64
}
#endif

65 66 67
#ifdef PADDLE_WITH_PSLIB
std::shared_ptr<paddle::distributed::PSlib> FleetWrapper::pslib_ptr_ = NULL;
#endif
68

69 70 71 72 73 74 75 76
void FleetWrapper::SetClient2ClientConfig(int request_timeout_ms,
                                          int connect_timeout_ms,
                                          int max_retry) {
  client2client_request_timeout_ms_ = request_timeout_ms;
  client2client_connect_timeout_ms_ = connect_timeout_ms;
  client2client_max_retry_ = max_retry;
}

77 78 79
void FleetWrapper::InitServer(const std::string& dist_desc, int index) {
#ifdef PADDLE_WITH_PSLIB
  if (!is_initialized_) {
D
dongdaxiang 已提交
80
    VLOG(3) << "Going to init server";
81 82 83 84 85
    pslib_ptr_ = std::shared_ptr<paddle::distributed::PSlib>(
        new paddle::distributed::PSlib());
    pslib_ptr_->init_server(dist_desc, index);
    is_initialized_ = true;
  } else {
D
dongdaxiang 已提交
86
    VLOG(3) << "Server can be initialized only once";
87 88 89 90 91 92 93 94 95
  }
#endif
}

void FleetWrapper::InitWorker(const std::string& dist_desc,
                              const std::vector<uint64_t>& host_sign_list,
                              int node_num, int index) {
#ifdef PADDLE_WITH_PSLIB
  if (!is_initialized_) {
D
dongdaxiang 已提交
96
    VLOG(3) << "Going to init worker";
97 98 99 100 101 102 103
    pslib_ptr_ = std::shared_ptr<paddle::distributed::PSlib>(
        new paddle::distributed::PSlib());
    pslib_ptr_->init_worker(dist_desc,
                            const_cast<uint64_t*>(host_sign_list.data()),
                            node_num, index);
    is_initialized_ = true;
  } else {
D
dongdaxiang 已提交
104
    VLOG(3) << "Worker can be initialized only once";
105 106 107 108 109 110
  }
#endif
}

void FleetWrapper::StopServer() {
#ifdef PADDLE_WITH_PSLIB
D
dongdaxiang 已提交
111
  VLOG(3) << "Going to stop server";
112 113 114 115 116 117
  pslib_ptr_->stop_server();
#endif
}

uint64_t FleetWrapper::RunServer() {
#ifdef PADDLE_WITH_PSLIB
D
dongdaxiang 已提交
118
  VLOG(3) << "Going to run server";
119 120 121 122 123 124 125 126 127
  return pslib_ptr_->run_server();
#else
  return 0;
#endif
}

void FleetWrapper::GatherServers(const std::vector<uint64_t>& host_sign_list,
                                 int node_num) {
#ifdef PADDLE_WITH_PSLIB
D
dongdaxiang 已提交
128
  VLOG(3) << "Going to gather server ips";
129 130 131 132 133
  pslib_ptr_->gather_servers(const_cast<uint64_t*>(host_sign_list.data()),
                             node_num);
#endif
}

D
dongdaxiang 已提交
134
void FleetWrapper::GatherClients(const std::vector<uint64_t>& host_sign_list) {
X
xjqbest 已提交
135 136 137
#ifdef PADDLE_WITH_PSLIB
  VLOG(3) << "Going to gather client ips";
  size_t len = host_sign_list.size();
D
dongdaxiang 已提交
138
  pslib_ptr_->gather_clients(const_cast<uint64_t*>(host_sign_list.data()), len);
X
xjqbest 已提交
139 140 141 142 143 144 145 146 147 148 149 150 151 152
#endif
}

std::vector<uint64_t> FleetWrapper::GetClientsInfo() {
#ifdef PADDLE_WITH_PSLIB
  VLOG(3) << "Going to get client info";
  return pslib_ptr_->get_client_info();
#endif
  return std::vector<uint64_t>();
}

void FleetWrapper::CreateClient2ClientConnection() {
#ifdef PADDLE_WITH_PSLIB
  VLOG(3) << "Going to create client2client connection";
153 154 155
  pslib_ptr_->create_client2client_connection(client2client_request_timeout_ms_,
                                              client2client_connect_timeout_ms_,
                                              client2client_max_retry_);
X
xjqbest 已提交
156 157 158
#endif
}

159 160 161
void FleetWrapper::PullSparseVarsSync(
    const Scope& scope, const uint64_t table_id,
    const std::vector<std::string>& var_names, std::vector<uint64_t>* fea_keys,
162 163
    std::vector<std::vector<float>>* fea_values, int fea_value_dim,
    const std::vector<std::string>& var_emb_names) {
164 165 166 167 168 169
#ifdef PADDLE_WITH_PSLIB
  std::vector<::std::future<int32_t>> pull_sparse_status;
  pull_sparse_status.resize(0);
  fea_keys->clear();
  fea_keys->resize(0);
  fea_keys->reserve(MAX_FEASIGN_NUM);
170 171
  for (size_t var_index = 0; var_index < var_names.size(); ++var_index) {
    const std::string& name = var_names[var_index];
172
    Variable* var = scope.FindVar(name);
173 174 175
    if (var == nullptr) {
      continue;
    }
176
    LoDTensor* tensor = var->GetMutable<LoDTensor>();
177
    CHECK(tensor != nullptr) << "tensor of var " << name << " is null";
178 179
    int64_t* ids = tensor->data<int64_t>();
    int len = tensor->numel();
180 181 182 183 184 185 186 187

    // skip slots which do not have embedding
    const std::string& emb_name = var_emb_names[var_index];
    Variable* emb_var = scope.FindVar(emb_name);
    if (emb_var == nullptr) {
      continue;
    }

188 189 190 191 192 193 194
    for (auto i = 0u; i < len; ++i) {
      if (ids[i] == 0u) {
        continue;
      }
      fea_keys->push_back(static_cast<uint64_t>(ids[i]));
    }
  }
D
dongdaxiang 已提交
195 196 197 198 199 200 201 202 203 204 205
  fea_values->resize(fea_keys->size() + 1);
  for (auto& t : *fea_values) {
    t.resize(fea_value_dim);
  }
  std::vector<float*> pull_result_ptr;
  for (auto& t : *fea_values) {
    pull_result_ptr.push_back(t.data());
  }
  auto status = pslib_ptr_->_worker_ptr->pull_sparse(
      pull_result_ptr.data(), table_id, fea_keys->data(), fea_keys->size());
  pull_sparse_status.push_back(std::move(status));
206 207 208 209 210
  for (auto& t : pull_sparse_status) {
    t.wait();
    auto status = t.get();
    if (status != 0) {
      LOG(ERROR) << "fleet pull sparse failed, status[" << status << "]";
211
      sleep(sleep_seconds_before_fail_exit_);
212 213 214 215 216 217 218 219 220 221 222
      exit(-1);
    }
  }
#endif
}

void FleetWrapper::PullDenseVarsAsync(
    const Scope& scope, const uint64_t tid,
    const std::vector<std::string>& var_names,
    std::vector<::std::future<int32_t>>* pull_dense_status) {
#ifdef PADDLE_WITH_PSLIB
X
xujiaqi01 已提交
223 224
  auto& regions = _regions[tid];
  regions.clear();
225 226 227
  regions.resize(var_names.size());
  for (auto i = 0u; i < var_names.size(); ++i) {
    Variable* var = scope.FindVar(var_names[i]);
228 229 230
    LoDTensor* tensor = var->GetMutable<LoDTensor>();
    float* w = tensor->data<float>();
    paddle::ps::Region reg(w, tensor->numel());
231
    regions[i] = std::move(reg);
232 233 234 235 236 237 238 239 240 241 242
  }
  auto status =
      pslib_ptr_->_worker_ptr->pull_dense(regions.data(), regions.size(), tid);
  pull_dense_status->push_back(std::move(status));
#endif
}

void FleetWrapper::PullDenseVarsSync(
    const Scope& scope, const uint64_t tid,
    const std::vector<std::string>& var_names) {
#ifdef PADDLE_WITH_PSLIB
X
xujiaqi01 已提交
243 244
  auto& regions = _regions[tid];
  regions.clear();
245 246 247 248 249 250 251 252 253 254 255 256 257 258
  regions.reserve(var_names.size());
  for (auto& t : var_names) {
    Variable* var = scope.FindVar(t);
    LoDTensor* tensor = var->GetMutable<LoDTensor>();
    float* w = tensor->data<float>();
    paddle::ps::Region reg(w, tensor->numel());
    regions.emplace_back(std::move(reg));
  }
  auto status =
      pslib_ptr_->_worker_ptr->pull_dense(regions.data(), regions.size(), tid);
  status.wait();
#endif
}

259
void FleetWrapper::PushDenseParamSync(
D
dongdaxiang 已提交
260
    const Scope& scope, const uint64_t table_id,
261 262 263 264 265 266
    const std::vector<std::string>& var_names) {
#ifdef PADDLE_WITH_PSLIB
  auto place = platform::CPUPlace();
  std::vector<paddle::ps::Region> regions;
  for (auto& t : var_names) {
    Variable* var = scope.FindVar(t);
X
xjqbest 已提交
267
    CHECK(var != nullptr) << "var[" << t << "] not found";
268
    LoDTensor* tensor = var->GetMutable<LoDTensor>();
269
    float* g = tensor->mutable_data<float>(place);
270 271 272
    paddle::ps::Region reg(g, tensor->numel());
    regions.emplace_back(std::move(reg));
  }
273 274 275 276 277
  auto push_status = pslib_ptr_->_worker_ptr->push_dense_param(
      regions.data(), regions.size(), table_id);
  push_status.wait();
  auto status = push_status.get();
  CHECK(status == 0) << "push dense param failed, status[" << status << "]";
278 279 280
#endif
}

D
dongdaxiang 已提交
281 282 283 284
void FleetWrapper::PushDenseVarsSync(
    Scope* scope, const uint64_t table_id,
    const std::vector<std::string>& var_names) {}

285 286 287
void FleetWrapper::PushDenseVarsAsync(
    const Scope& scope, const uint64_t table_id,
    const std::vector<std::string>& var_names,
288 289
    std::vector<::std::future<int32_t>>* push_sparse_status,
    float scale_datanorm, int batch_size) {
290 291 292 293 294 295 296
#ifdef PADDLE_WITH_PSLIB
  std::vector<paddle::ps::Region> regions;
  for (auto& t : var_names) {
    Variable* var = scope.FindVar(t);
    LoDTensor* tensor = var->GetMutable<LoDTensor>();
    int count = tensor->numel();
    float* g = tensor->data<float>();
297 298 299 300 301 302 303 304 305 306 307 308 309 310
    if (scale_datanorm >= 0) {
      if (t.find(".batch_size@GRAD") != std::string::npos ||
          t.find(".batch_sum@GRAD") != std::string::npos) {
        Eigen::Map<Eigen::MatrixXf> mat(g, 1, count);
        float scale = 1.0 / batch_size;
        mat *= scale;
      } else if (t.find(".batch_square_sum@GRAD") != std::string::npos) {
        VLOG(3) << "epsilon: " << scale_datanorm;
        for (int i = 0; i < count; ++i) {
          g[i] = (g[i] - batch_size * scale_datanorm) / batch_size +
                 batch_size * scale_datanorm;
        }
      }
    }
311 312 313 314 315 316 317 318 319 320 321 322 323 324 325
    paddle::ps::Region reg(g, count);
    regions.emplace_back(std::move(reg));
  }
  auto status = pslib_ptr_->_worker_ptr->push_dense(regions.data(),
                                                    regions.size(), table_id);
  push_sparse_status->push_back(std::move(status));
#endif
}

void FleetWrapper::PushSparseVarsWithLabelAsync(
    const Scope& scope, const uint64_t table_id,
    const std::vector<uint64_t>& fea_keys, const std::vector<float>& fea_labels,
    const std::vector<std::string>& sparse_key_names,
    const std::vector<std::string>& sparse_grad_names, const int emb_dim,
    std::vector<std::vector<float>>* push_values,
326
    std::vector<::std::future<int32_t>>* push_sparse_status,
327 328
    const int batch_size, const bool use_cvm, const bool dump_slot,
    std::vector<uint64_t>* sparse_push_keys) {
329 330
#ifdef PADDLE_WITH_PSLIB
  int offset = 2;
T
Thunderbrook 已提交
331
  int slot_offset = 0;
332
  int grad_dim = emb_dim;
T
Thunderbrook 已提交
333 334
  int show_index = 0;
  int click_index = 1;
335 336 337 338
  if (use_cvm) {
    offset = 0;
    grad_dim = emb_dim - 2;
  }
T
Thunderbrook 已提交
339 340 341 342 343
  if (dump_slot) {
    slot_offset = 1;
    show_index = 1;
    click_index = 2;
  }
344
  CHECK_GE(grad_dim, 0);
345

346 347
  sparse_push_keys->clear();
  sparse_push_keys->reserve(fea_keys.size() + 1);
348 349
  push_values->resize(fea_keys.size() + 1);
  for (auto& t : *push_values) {
T
Thunderbrook 已提交
350
    t.resize(emb_dim + offset + slot_offset);
351
  }
352
  uint64_t fea_idx = 0u;
353 354
  for (size_t i = 0;
       i < sparse_key_names.size() && i < sparse_grad_names.size(); ++i) {
355
    Variable* var = scope.FindVar(sparse_key_names[i]);
356 357 358
    if (var == nullptr) {
      continue;
    }
359
    LoDTensor* tensor = var->GetMutable<LoDTensor>();
360 361
    if (tensor == nullptr) {
      LOG(ERROR) << "tensor of var[" << sparse_key_names[i] << "] is null";
362 363 364 365
      exit(-1);
    }
    int len = tensor->numel();
    int64_t* ids = tensor->data<int64_t>();
T
Thunderbrook 已提交
366 367 368 369
    int slot = 0;
    if (dump_slot) {
      slot = boost::lexical_cast<int>(sparse_key_names[i]);
    }
370
    Variable* g_var = scope.FindVar(sparse_grad_names[i]);
371 372 373
    if (g_var == nullptr) {
      continue;
    }
374 375 376 377
    LoDTensor* g_tensor = g_var->GetMutable<LoDTensor>();
    if (g_tensor == nullptr) {
      LOG(ERROR) << "tensor of var[" << sparse_key_names[i] << "] is null";
      exit(-1);
378
    }
379 380
    float* g = g_tensor->data<float>();

381 382 383 384 385 386 387
    if (scale_sparse_gradient_with_batch_size_ && grad_dim > 0) {
      int dim = emb_dim + offset;
      Eigen::Map<
          Eigen::Matrix<float, Eigen::Dynamic, Eigen::Dynamic, Eigen::RowMajor>>
          g_mat(g, g_tensor->numel() / dim, dim);
      g_mat.rightCols(grad_dim) *= batch_size;
    }
388 389 390 391 392
    for (auto id_idx = 0u; id_idx < len; ++id_idx) {
      if (ids[id_idx] == 0) {
        g += emb_dim;
        continue;
      }
393
      sparse_push_keys->push_back(ids[id_idx]);
394 395
      CHECK(fea_idx < (*push_values).size());
      CHECK(fea_idx < fea_labels.size());
T
Thunderbrook 已提交
396

397
      if (use_cvm) {
T
Thunderbrook 已提交
398
        memcpy((*push_values)[fea_idx].data() + offset + slot_offset, g,
399 400
               sizeof(float) * emb_dim);
      } else {
T
Thunderbrook 已提交
401
        memcpy((*push_values)[fea_idx].data() + offset + slot_offset, g,
402
               sizeof(float) * emb_dim);
T
Thunderbrook 已提交
403 404 405 406 407 408
        (*push_values)[fea_idx][show_index] = 1.0f;
        (*push_values)[fea_idx][click_index] =
            static_cast<float>(fea_labels[fea_idx]);
      }
      if (dump_slot) {
        (*push_values)[fea_idx][0] = static_cast<float>(slot);
409
      }
410 411 412 413
      g += emb_dim;
      fea_idx++;
    }
  }
414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442
  // slots whose embedding has been stop gradient or
  // not involved in forward-backward
  uint64_t no_grad_fea_num = 0u;
  for (size_t i = sparse_grad_names.size(); i < sparse_key_names.size(); ++i) {
    Variable* var = scope.FindVar(sparse_key_names[i]);
    if (var == nullptr) {
      continue;
    }
    LoDTensor* tensor = var->GetMutable<LoDTensor>();
    if (tensor == nullptr) {
      LOG(ERROR) << "tensor of var[" << sparse_key_names[i] << "] is null";
      exit(-1);
    }
    int len = tensor->numel();
    int64_t* ids = tensor->data<int64_t>();
    for (auto id_idx = 0u; id_idx < len; ++id_idx) {
      if (ids[id_idx] == 0) {
        continue;
      }
      ++no_grad_fea_num;
    }
  }
  CHECK(fea_idx + no_grad_fea_num == fea_keys.size())
      << "fea_idx: " << fea_idx << " no_grad_fea_num: " << no_grad_fea_num
      << " features size: " << fea_keys.size();
  CHECK(fea_idx == sparse_push_keys->size());
  if (fea_idx == 0) {
    return;
  }
443
  std::vector<float*> push_g_vec;
444
  for (auto i = 0u; i < sparse_push_keys->size(); ++i) {
445 446 447
    push_g_vec.push_back((*push_values)[i].data());
  }
  auto status = pslib_ptr_->_worker_ptr->push_sparse(
448 449
      table_id, sparse_push_keys->data(), (const float**)push_g_vec.data(),
      sparse_push_keys->size());
450 451 452 453
  push_sparse_status->push_back(std::move(status));
#endif
}

454 455 456 457
void FleetWrapper::LoadFromPaddleModel(Scope& scope, const uint64_t table_id,
                                       std::vector<std::string> var_list,
                                       std::string model_path,
                                       std::string model_proto_file,
458
                                       std::vector<std::string> table_var_list,
459
                                       bool load_combine) {
460
#ifdef PADDLE_WITH_PSLIB
461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525
  // load ProgramDesc from model file
  auto read_proto_func = [](const std::string& filename) -> ProgramDesc {
    std::string contents;
    std::ifstream fin(filename, std::ios::in | std::ios::binary);
    fin.seekg(0, std::ios::end);
    contents.resize(fin.tellg());
    fin.seekg(0, std::ios::beg);
    fin.read(&contents[0], contents.size());
    fin.close();
    ProgramDesc program_desc(contents);
    return program_desc;
  };
  const ProgramDesc old_program = read_proto_func(model_proto_file);
  Scope* old_scope = new Scope();
  auto& old_block = old_program.Block(0);
  auto place = platform::CPUPlace();
  std::vector<std::string> old_param_list;

  for (auto& t : var_list) {
    VarDesc* old_var_desc = old_block.FindVar(t);
    if (old_var_desc == nullptr) {
      continue;
    }
    // init variable in scope
    Variable* old_var = old_scope->Var(old_var_desc->Name());
    InitializeVariable(old_var, old_var_desc->GetType());
    old_param_list.push_back(t);
    if (load_combine) {
      continue;
    }
    // load variable from model
    paddle::framework::AttributeMap attrs;
    attrs.insert({"file_path", model_path + "/" + old_var_desc->Name()});
    auto load_op = paddle::framework::OpRegistry::CreateOp(
        "load", {}, {{"Out", {old_var_desc->Name()}}}, attrs);
    load_op->Run(*old_scope, place);
  }

  if (load_combine) {
    std::sort(old_param_list.begin(), old_param_list.end());
    paddle::framework::AttributeMap attrs;
    attrs.insert({"file_path", model_path});
    auto load_op = paddle::framework::OpRegistry::CreateOp(
        "load_combine", {}, {{"Out", old_param_list}}, attrs);
    load_op->Run(*old_scope, place);
  }

  for (auto& t : old_param_list) {
    Variable* old_var = old_scope->Var(t);
    // old model data, here we assume data type is float
    LoDTensor* old_tensor = old_var->GetMutable<LoDTensor>();
    float* old_data = old_tensor->data<float>();
    // new model data, here we assume data type is float
    Variable* var = scope.FindVar(t);
    CHECK(var != nullptr) << "var[" << t << "] not found";
    LoDTensor* tensor = var->GetMutable<LoDTensor>();
    float* data = tensor->data<float>();
    // copy from old data to new data
    if (old_tensor->numel() > tensor->numel()) {
      memcpy(data, old_data, tensor->numel() * sizeof(float));
    } else {
      memcpy(data, old_data, old_tensor->numel() * sizeof(float));
    }
  }
  delete old_scope;
526 527
  PushDenseParamSync(scope, table_id, table_var_list);
#endif
528 529
}

530 531 532 533 534 535
void FleetWrapper::LoadModel(const std::string& path, const int mode) {
#ifdef PADDLE_WITH_PSLIB
  auto ret = pslib_ptr_->_worker_ptr->load(path, std::to_string(mode));
  ret.wait();
  if (ret.get() != 0) {
    LOG(ERROR) << "load model from path:" << path << " failed";
536
    sleep(sleep_seconds_before_fail_exit_);
537 538 539 540 541 542 543
    exit(-1);
  }
#else
  VLOG(0) << "FleetWrapper::LoadModel does nothing when no pslib";
#endif
}

544 545 546 547 548 549 550 551 552 553 554 555 556 557 558
void FleetWrapper::LoadModelOneTable(const uint64_t table_id,
                                     const std::string& path, const int mode) {
#ifdef PADDLE_WITH_PSLIB
  auto ret =
      pslib_ptr_->_worker_ptr->load(table_id, path, std::to_string(mode));
  ret.wait();
  if (ret.get() != 0) {
    LOG(ERROR) << "load model of table id: " << table_id
               << ", from path: " << path << " failed";
  }
#else
  VLOG(0) << "FleetWrapper::LoadModel does nothing when no pslib";
#endif
}

559 560 561 562 563 564 565
void FleetWrapper::SaveModel(const std::string& path, const int mode) {
#ifdef PADDLE_WITH_PSLIB
  auto ret = pslib_ptr_->_worker_ptr->save(path, std::to_string(mode));
  ret.wait();
  int32_t feasign_cnt = ret.get();
  if (feasign_cnt == -1) {
    LOG(ERROR) << "save model failed";
566
    sleep(sleep_seconds_before_fail_exit_);
567 568 569 570 571 572 573
    exit(-1);
  }
#else
  VLOG(0) << "FleetWrapper::SaveModel does nothing when no pslib";
#endif
}

574 575 576 577 578 579 580 581 582
double FleetWrapper::GetCacheThreshold() {
#ifdef PADDLE_WITH_PSLIB
  double cache_threshold = 0.0;
  auto ret = pslib_ptr_->_worker_ptr->flush();
  ret.wait();
  ret = pslib_ptr_->_worker_ptr->get_cache_threshold(0, cache_threshold);
  ret.wait();
  if (cache_threshold < 0) {
    LOG(ERROR) << "get cache threshold failed";
583
    sleep(sleep_seconds_before_fail_exit_);
584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601
    exit(-1);
  }
  return cache_threshold;
#else
  VLOG(0) << "FleetWrapper::GetCacheThreshold does nothing when no pslib";
  return 0.0;
#endif
}

void FleetWrapper::CacheShuffle(int table_id, const std::string& path,
                                const int mode, const double cache_threshold) {
#ifdef PADDLE_WITH_PSLIB
  auto ret = pslib_ptr_->_worker_ptr->cache_shuffle(
      0, path, std::to_string(mode), std::to_string(cache_threshold));
  ret.wait();
  int32_t feasign_cnt = ret.get();
  if (feasign_cnt == -1) {
    LOG(ERROR) << "cache shuffle failed";
602
    sleep(sleep_seconds_before_fail_exit_);
603 604 605 606 607 608 609 610 611 612 613 614 615 616 617
    exit(-1);
  }
#else
  VLOG(0) << "FleetWrapper::CacheShuffle does nothing when no pslib";
#endif
}

int32_t FleetWrapper::SaveCache(int table_id, const std::string& path,
                                const int mode) {
#ifdef PADDLE_WITH_PSLIB
  auto ret = pslib_ptr_->_worker_ptr->save_cache(0, path, std::to_string(mode));
  ret.wait();
  int32_t feasign_cnt = ret.get();
  if (feasign_cnt == -1) {
    LOG(ERROR) << "table save cache failed";
618
    sleep(sleep_seconds_before_fail_exit_);
619 620 621 622 623 624 625 626 627
    exit(-1);
  }
  return feasign_cnt;
#else
  VLOG(0) << "FleetWrapper::SaveCache does nothing when no pslib";
  return -1;
#endif
}

628 629 630 631 632 633 634 635 636
void FleetWrapper::ShrinkSparseTable(int table_id) {
#ifdef PADDLE_WITH_PSLIB
  auto ret = pslib_ptr_->_worker_ptr->shrink(table_id);
  ret.wait();
#else
  VLOG(0) << "FleetWrapper::ShrinkSparseTable does nothing when no pslib";
#endif
}

637 638 639 640 641 642 643 644 645
void FleetWrapper::ClearModel() {
#ifdef PADDLE_WITH_PSLIB
  auto ret = pslib_ptr_->_worker_ptr->clear();
  ret.wait();
#else
  VLOG(0) << "FleetWrapper::ClearModel does nothing when no pslib";
#endif
}

646 647
void FleetWrapper::ShrinkDenseTable(int table_id, Scope* scope,
                                    std::vector<std::string> var_list,
648
                                    float decay, int emb_dim) {
649 650 651 652 653 654
#ifdef PADDLE_WITH_PSLIB
  std::vector<paddle::ps::Region> regions;
  for (std::string& name : var_list) {
    if (name.find("batch_sum") != std::string::npos) {
      Variable* var = scope->FindVar(name);
      CHECK(var != nullptr) << "var[" << name << "] not found";
655
      VLOG(0) << "prepare shrink dense batch_sum";
656 657
      LoDTensor* tensor = var->GetMutable<LoDTensor>();
      float* g = tensor->data<float>();
658 659 660 661 662 663 664 665 666 667 668 669 670

      // show_batch_sum += N * log(decay)
      std::string size_name = name;
      size_name.replace(size_name.find("batch_sum"), size_name.length(),
                        "batch_size");
      Variable* var_size = scope->FindVar(size_name);
      CHECK(var_size != nullptr) << "var[" << size_name << "] not found";
      VLOG(3) << "shrink dense batch_sum: " << name << ", " << size_name;
      float* g_size = var_size->GetMutable<LoDTensor>()->data<float>();

      for (int k = 0; k < tensor->numel(); k += emb_dim) {
        g[k] = g[k] + g_size[k] * log(decay);
      }
671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687
      paddle::ps::Region reg(g, tensor->numel());
      regions.emplace_back(std::move(reg));
    } else {
      Variable* var = scope->FindVar(name);
      CHECK(var != nullptr) << "var[" << name << "] not found";
      LoDTensor* tensor = var->GetMutable<LoDTensor>();
      float* g = tensor->data<float>();
      paddle::ps::Region reg(g, tensor->numel());
      regions.emplace_back(std::move(reg));
    }
  }
  auto push_status = pslib_ptr_->_worker_ptr->push_dense_param(
      regions.data(), regions.size(), table_id);
  push_status.wait();
  auto status = push_status.get();
  if (status != 0) {
    LOG(FATAL) << "push shrink dense param failed, status[" << status << "]";
688
    sleep(sleep_seconds_before_fail_exit_);
689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704
    exit(-1);
  }
#else
  VLOG(0) << "FleetWrapper::ShrinkSparseTable does nothing when no pslib";
#endif
}

void FleetWrapper::ClientFlush() {
#ifdef PADDLE_WITH_PSLIB
  auto ret = pslib_ptr_->_worker_ptr->flush();
  ret.wait();
#else
  VLOG(0) << "FleetWrapper::ServerFlush does nothing when no pslib";
#endif
}

705 706
int FleetWrapper::RegisterClientToClientMsgHandler(int msg_type,
                                                   MsgHandlerFunc handler) {
707
#ifdef PADDLE_WITH_PSLIB
X
xujiaqi01 已提交
708 709 710
  VLOG(3) << "calling FleetWrapper::RegisterClientToClientMsgHandler";
  VLOG(3) << "pslib_ptr_=" << pslib_ptr_;
  VLOG(3) << "_worker_ptr=" << pslib_ptr_->_worker_ptr;
711 712
  return pslib_ptr_->_worker_ptr->registe_client2client_msg_handler(msg_type,
                                                                    handler);
713 714 715 716
#else
  VLOG(0) << "FleetWrapper::RegisterClientToClientMsgHandler"
          << " does nothing when no pslib";
#endif
X
xujiaqi01 已提交
717
  return 0;
718 719
}

720 721
std::future<int32_t> FleetWrapper::SendClientToClientMsg(
    int msg_type, int to_client_id, const std::string& msg) {
722
#ifdef PADDLE_WITH_PSLIB
723 724
  return pslib_ptr_->_worker_ptr->send_client2client_msg(msg_type, to_client_id,
                                                         msg);
725 726 727 728
#else
  VLOG(0) << "FleetWrapper::SendClientToClientMsg"
          << " does nothing when no pslib";
#endif
729
  return std::future<int32_t>();
X
xujiaqi01 已提交
730 731
}

D
dongdaxiang 已提交
732
template <typename T>
733
void FleetWrapper::Serialize(const std::vector<T*>& t, std::string* str) {
734 735
#ifdef PADDLE_WITH_PSLIB
  paddle::ps::BinaryArchive ar;
736 737 738
  for (size_t i = 0; i < t.size(); ++i) {
    ar << *(t[i]);
  }
X
xujiaqi01 已提交
739
  *str = std::string(ar.buffer(), ar.length());
740
#else
741
  VLOG(0) << "FleetWrapper::Serialize does nothing when no pslib";
742 743 744
#endif
}

D
dongdaxiang 已提交
745
template <typename T>
746
void FleetWrapper::Deserialize(std::vector<T>* t, const std::string& str) {
747
#ifdef PADDLE_WITH_PSLIB
748 749 750
  if (str.length() == 0) {
    return;
  }
751 752
  paddle::ps::BinaryArchive ar;
  ar.set_read_buffer(const_cast<char*>(str.c_str()), str.length(), nullptr);
753 754 755 756 757 758 759 760
  if (ar.cursor() == ar.finish()) {
    return;
  }
  while (ar.cursor() < ar.finish()) {
    t->push_back(ar.get<T>());
  }
  CHECK(ar.cursor() == ar.finish());
  VLOG(3) << "Deserialize size " << t->size();
761
#else
762
  VLOG(0) << "FleetWrapper::Deserialize does nothing when no pslib";
763 764 765
#endif
}

766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783
std::default_random_engine& FleetWrapper::LocalRandomEngine() {
  struct engine_wrapper_t {
    std::default_random_engine engine;
#ifdef PADDLE_WITH_PSLIB
    engine_wrapper_t() {
      struct timespec tp;
      clock_gettime(CLOCK_REALTIME, &tp);
      double cur_time = tp.tv_sec + tp.tv_nsec * 1e-9;
      static std::atomic<uint64_t> x(0);
      std::seed_seq sseq = {x++, x++, x++, (uint64_t)(cur_time * 1000)};
      engine.seed(sseq);
    }
#endif
  };
  thread_local engine_wrapper_t r;
  return r.engine;
}

784
template void FleetWrapper::Serialize<std::vector<MultiSlotType>>(
785 786 787
    const std::vector<std::vector<MultiSlotType>*>&, std::string*);
template void FleetWrapper::Deserialize<std::vector<MultiSlotType>>(
    std::vector<std::vector<MultiSlotType>>*, const std::string&);
788

789 790
}  // end namespace framework
}  // end namespace paddle