executor_thread_worker.cc 21.3 KB
Newer Older
W
Wang Guibao 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved.

Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at

    http://www.apache.org/licenses/LICENSE-2.0

Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */

#include "paddle/fluid/framework/executor_thread_worker.h"
H
heqiaozhi 已提交
16
#include <algorithm>
W
Wang Guibao 已提交
17 18 19 20 21 22 23 24 25 26 27 28 29
#include "google/protobuf/io/zero_copy_stream_impl.h"
#include "google/protobuf/message.h"
#include "google/protobuf/text_format.h"

#include "gflags/gflags.h"
#include "paddle/fluid/framework/feed_fetch_method.h"
#include "paddle/fluid/framework/feed_fetch_type.h"
#include "paddle/fluid/framework/lod_rank_table.h"
#include "paddle/fluid/framework/lod_tensor_array.h"
#include "paddle/fluid/framework/op_registry.h"
#include "paddle/fluid/framework/reader.h"
#include "paddle/fluid/framework/variable_helper.h"
#include "paddle/fluid/inference/io.h"
30
#include "paddle/fluid/platform/cpu_helper.h"
W
Wang Guibao 已提交
31 32 33 34 35
#include "paddle/fluid/platform/place.h"
#include "paddle/fluid/pybind/pybind.h"
namespace paddle {
namespace framework {

H
heqiaozhi 已提交
36
#ifdef PADDLE_WITH_PSLIB
37
int DensePullThread::start() {
D
dongdaxiang 已提交
38 39 40
  _running = true;
  _t = std::thread(&DensePullThread::run, this);
  return 0;
41 42 43
}

void DensePullThread::run() {
D
dongdaxiang 已提交
44 45 46 47 48 49 50 51 52 53 54
  while (_running) {
    _pull_dense_status.resize(0);
    for (auto& t : _dense_variable_name) {
      if (check_update_param(t.first)) {
        auto status = pull_dense(t.first);
        _pull_dense_status.emplace_back(std::move(status));
        reset_thread_version(t.first);
      }
    }
    if (_pull_dense_status.size() != 0) {
      wait_all();
55
    }
H
heqiaozhi 已提交
56

D
dongdaxiang 已提交
57 58
    usleep(_sleep_time_ms * 1000);
  }
59 60
}
bool DensePullThread::check_update_param(uint64_t table_id) {
D
dongdaxiang 已提交
61 62 63 64 65 66 67 68 69 70
  {
    std::lock_guard<std::mutex> lock(_mutex_for_version);
    auto& version = _training_versions[table_id];
    _current_version[table_id] =
        *(std::min_element(version.begin(), version.end()));
  }
  if (_current_version[table_id] - _last_versions[table_id] < _threshold) {
    return false;
  }
  return true;
71 72 73
}

void DensePullThread::reset_thread_version(uint64_t table_id) {
D
dongdaxiang 已提交
74 75
  std::lock_guard<std::mutex> lock(_mutex_for_version);
  _last_versions[table_id] = _current_version[table_id];
76 77
}
std::future<int32_t> DensePullThread::pull_dense(uint64_t table_id) {
D
dongdaxiang 已提交
78 79 80 81
  auto& regions = _regions[table_id];
  regions.clear();
  auto& variables = _dense_variable_name[table_id];
  regions.resize(variables.size());
H
heqiaozhi 已提交
82

D
dongdaxiang 已提交
83 84 85 86
  for (auto i = 0u; i < variables.size(); ++i) {
    auto& t = variables[i];
    Variable* var = _root_scope->FindVar(t);
    LoDTensor* tensor = var->GetMutable<LoDTensor>();
H
heqiaozhi 已提交
87

D
dongdaxiang 已提交
88 89 90 91 92
    float* w = tensor->data<float>();
    paddle::ps::Region reg(w, tensor->numel());
    regions[i] = std::move(reg);
  }
  return _ps_client->pull_dense(regions.data(), regions.size(), table_id);
93 94 95
}

void DensePullThread::wait_all() {
D
dongdaxiang 已提交
96 97 98 99
  for (auto& t : _pull_dense_status) {
    t.wait();
    auto status = t.get();
    if (status != 0) {
H
heqiaozhi 已提交
100
      LOG(WARNING) << "pull dense failed times:" << ++_pull_dense_fail_times;
101
    }
D
dongdaxiang 已提交
102
  }
H
heqiaozhi 已提交
103

D
dongdaxiang 已提交
104 105 106 107
  if (_pull_dense_fail_times > 20) {
    LOG(FATAL) << "pull dense failed times more than 20 times";
    exit(-1);
  }
H
heqiaozhi 已提交
108

D
dongdaxiang 已提交
109
  _pull_dense_status.resize(0);
110 111
}

H
heqiaozhi 已提交
112 113
void DensePullThread::increase_thread_version(int thread_id,
                                              uint64_t table_id) {
D
dongdaxiang 已提交
114 115
  std::lock_guard<std::mutex> lock(_mutex_for_version);
  _training_versions[table_id][thread_id]++;
116
}
D
dongdaxiang 已提交
117
#endif
H
heqiaozhi 已提交
118

W
Wang Guibao 已提交
119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182
void ExecutorThreadWorker::CreateThreadOperators(const ProgramDesc& program) {
  auto& block = program.Block(0);
  op_names_.clear();
  for (auto& op_desc : block.AllOps()) {
    std::unique_ptr<OperatorBase> local_op = OpRegistry::CreateOp(*op_desc);
    op_names_.push_back(op_desc->Type());
    OperatorBase* local_op_ptr = local_op.release();
    ops_.push_back(local_op_ptr);
    continue;
  }
}

void ExecutorThreadWorker::CreateThreadResource(
    const framework::ProgramDesc& program,
    const paddle::platform::Place& place) {
  CreateThreadScope(program);
  CreateThreadOperators(program);
  SetMainProgram(program);
  SetPlace(place);
}

void ExecutorThreadWorker::CreateThreadScope(const ProgramDesc& program) {
  auto& block = program.Block(0);

  PADDLE_ENFORCE_NOT_NULL(
      root_scope_, "root_scope should be set before creating thread scope");

  thread_scope_ = &root_scope_->NewScope();
  for (auto& var : block.AllVars()) {
    if (var->Persistable()) {
      auto* ptr = root_scope_->Var(var->Name());
      InitializeVariable(ptr, var->GetType());
    } else {
      auto* ptr = thread_scope_->Var(var->Name());
      InitializeVariable(ptr, var->GetType());
    }
  }
}

void ExecutorThreadWorker::SetDataFeed(
    const std::shared_ptr<DataFeed>& datafeed) {
  thread_reader_ = datafeed;
}

void ExecutorThreadWorker::BindingDataFeedMemory() {
  const std::vector<std::string>& input_feed =
      thread_reader_->GetUseSlotAlias();
  for (auto name : input_feed) {
    thread_reader_->AddFeedVar(thread_scope_->Var(name), name);
  }
}

void ExecutorThreadWorker::SetFetchVarNames(
    const std::vector<std::string>& fetch_var_names) {
  fetch_var_names_.clear();
  fetch_var_names_.insert(fetch_var_names_.end(), fetch_var_names.begin(),
                          fetch_var_names.end());
}

void ExecutorThreadWorker::SetDevice() {
#if defined _WIN32 || defined __APPLE__
  return;
#else
  static unsigned concurrency_cap = std::thread::hardware_concurrency();
183
  LOG(WARNING) << "concurrency capacity " << concurrency_cap;
W
Wang Guibao 已提交
184 185
  int thread_id = this->thread_id_;

T
Tao Luo 已提交
186
  if (static_cast<unsigned>(thread_id) < concurrency_cap) {
W
Wang Guibao 已提交
187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226
    unsigned proc = thread_id;

    cpu_set_t mask;
    CPU_ZERO(&mask);
    CPU_SET(proc, &mask);

    if (-1 == sched_setaffinity(0, sizeof(mask), &mask)) {
      VLOG(1) << "WARNING: Failed to set thread affinity for thread "
              << thread_id;
    } else {
      CPU_ZERO(&mask);
      if ((0 != sched_getaffinity(0, sizeof(mask), &mask)) ||
          (CPU_ISSET(proc, &mask) == 0)) {
        VLOG(3) << "WARNING: Failed to set thread affinity for thread "
                << thread_id;
      }
    }
  } else {
    VLOG(1) << "WARNING: Failed to set thread affinity for thread "
            << thread_id;
  }
#endif
}

template <typename T>
void print_lod_tensor(std::string var_name, const LoDTensor& lod_tensor) {
  auto inspect = lod_tensor.data<T>();
  auto element_num = lod_tensor.numel();

  std::ostringstream sstream;
  sstream << var_name << " (element num " << element_num << "): [";
  sstream << inspect[0];
  for (int j = 1; j < element_num; ++j) {
    sstream << " " << inspect[j];
  }
  sstream << "]";

  std::cout << sstream.str() << std::endl;
}

Y
Yu Yang 已提交
227 228
static void print_fetch_var(Scope* scope, const std::string& var_name) {
  auto& tensor = scope->FindVar(var_name)->Get<LoDTensor>();
W
Wang Guibao 已提交
229

Y
Yu Yang 已提交
230 231 232 233 234 235 236 237 238 239
#define PrintLoDTensorCallback(cpp_type, proto_type) \
  do {                                               \
    if (tensor.type() == proto_type) {               \
      print_lod_tensor<cpp_type>(var_name, tensor);  \
      return;                                        \
    }                                                \
  } while (0)

  _ForEachDataType_(PrintLoDTensorCallback);
  VLOG(1) << "print_fetch_var: unrecognized data type:" << tensor.type();
W
Wang Guibao 已提交
240 241
}

242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284
void ExecutorThreadWorker::TrainFilesWithTimer() {
  platform::SetNumThreads(1);
  SetDevice();
  thread_reader_->Start();
  std::vector<double> op_total_time;
  std::vector<std::string> op_name;
  for (auto& op : ops_) {
    op_name.push_back(op->Type());
  }
  op_total_time.resize(ops_.size());
  for (int i = 0; i < op_total_time.size(); ++i) {
    op_total_time[i] = 0.0;
  }
  platform::Timer timeline;
  double total_time = 0.0;
  double read_time = 0.0;
  int cur_batch;
  int batch_cnt = 0;
  timeline.Start();
  while ((cur_batch = thread_reader_->Next()) > 0) {
    timeline.Pause();
    read_time += timeline.ElapsedSec();
    total_time += timeline.ElapsedSec();
    for (int i = 0; i < ops_.size(); ++i) {
      timeline.Start();
      ops_[i]->Run(*thread_scope_, place_);
      timeline.Pause();
      op_total_time[i] += timeline.ElapsedSec();
      total_time += timeline.ElapsedSec();
    }
    ++batch_cnt;
    thread_scope_->DropKids();
    if (batch_cnt > 0 && batch_cnt % 1000 == 0) {
      for (int i = 0; i < ops_.size(); ++i) {
        fprintf(stderr, "op_name:[%d][%s], op_mean_time:[%fs]\n", i,
                op_name[i].c_str(), op_total_time[i] / batch_cnt);
      }
      fprintf(stderr, "mean read time: %fs\n", read_time / batch_cnt);
    }
    timeline.Start();
  }
}

W
Wang Guibao 已提交
285
void ExecutorThreadWorker::TrainFiles() {
286 287
  platform::SetNumThreads(1);

W
Wang Guibao 已提交
288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332
  // todo: configurable
  SetDevice();

  int fetch_var_num = fetch_var_names_.size();
  fetch_values_.clear();
  fetch_values_.resize(fetch_var_num);

  thread_reader_->Start();

  int cur_batch;
  int batch_cnt = 0;
  while ((cur_batch = thread_reader_->Next()) > 0) {
    // executor run here
    for (auto& op : ops_) {
      op->Run(*thread_scope_, place_);
    }

    ++batch_cnt;
    thread_scope_->DropKids();

    if (debug_ == false || thread_id_ != 0) {
      continue;
    }

    for (int i = 0; i < fetch_var_num; ++i) {
      print_fetch_var(thread_scope_, fetch_var_names_[i]);
    }  // end for (int i = 0...)
  }    // end while ()
}

void ExecutorThreadWorker::SetThreadId(int tid) { thread_id_ = tid; }

void ExecutorThreadWorker::SetPlace(const platform::Place& place) {
  place_ = place;
}

void ExecutorThreadWorker::SetMainProgram(
    const ProgramDesc& main_program_desc) {
  main_program_.reset(new ProgramDesc(main_program_desc));
}

void ExecutorThreadWorker::SetRootScope(Scope* g_scope) {
  root_scope_ = g_scope;
}

H
heqiaozhi 已提交
333
#ifdef PADDLE_WITH_PSLIB
334
//  AsyncExecutor
335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362
void AsyncExecutorThreadWorker::TrainFiles() {
  SetDevice();

  int fetch_var_num = fetch_var_names_.size();
  fetch_values_.clear();
  fetch_values_.resize(fetch_var_num);

  thread_reader_->Start();

  int cur_batch;
  int batch_cnt = 0;
  while ((cur_batch = thread_reader_->Next()) > 0) {
    // executor run here
    TrainOneNetwork();

    ++batch_cnt;
    thread_scope_->DropKids();

    if (debug_ == false || thread_id_ != 0) {
      continue;
    }

    for (int i = 0; i < fetch_var_num; ++i) {
      print_fetch_var(thread_scope_, fetch_var_names_[i]);
    }  // end for (int i = 0...)
  }    // end while ()
}

363 364
void AsyncExecutorThreadWorker::SetPSlibPtr(
    std::shared_ptr<paddle::distributed::PSlib> pslib_ptr) {
D
dongdaxiang 已提交
365
  _pslib_ptr = pslib_ptr;
366
}
367 368
void AsyncExecutorThreadWorker::SetPullDenseThread(
    std::shared_ptr<DensePullThread> dpt) {
D
dongdaxiang 已提交
369
  _pull_dense_thread = dpt;
370 371
}
void AsyncExecutorThreadWorker::TrainOneNetwork() {
D
dongdaxiang 已提交
372
  PrepareParams();
H
heqiaozhi 已提交
373

D
dongdaxiang 已提交
374 375 376 377 378 379
  for (auto& op : ops_) {
    if (op->Type().find("sgd") != std::string::npos) {
      continue;
    }
    bool need_skip = false;
    for (auto t = 0u; t < _param_config->skip_op.size(); ++t) {
H
heqiaozhi 已提交
380
      if (op->Type().find(_param_config->skip_op[t]) != std::string::npos) {
D
dongdaxiang 已提交
381 382 383 384 385 386
        need_skip = true;
        break;
      }
    }
    if (!need_skip) {
      op->Run(*thread_scope_, place_);
387
    }
D
dongdaxiang 已提交
388 389
  }
  UpdateParams();
390 391
}

392 393
void AsyncExecutorThreadWorker::SetParamConfig(
    AsyncWorkerParamConfig* param_config) {
D
dongdaxiang 已提交
394
  _param_config = param_config;
395 396 397
}

void AsyncExecutorThreadWorker::PrepareParams() {
D
dongdaxiang 已提交
398 399 400 401 402 403 404 405 406
  for (auto table_id : _param_config->sparse_table_id) {
    PullSparse(table_id);
    for (auto& t : _pull_sparse_status) {
      t.wait();
      auto status = t.get();
      if (status != 0) {
        LOG(ERROR) << "pull sparse failed, status[" << status << "]";
        exit(-1);
      }
407
    }
D
dongdaxiang 已提交
408 409
  }
  _pull_sparse_status.resize(0);
410

D
dongdaxiang 已提交
411 412 413
  for (auto table_id : _param_config->sparse_table_id) {
    FillSparse(table_id);
  }
414 415 416
}

void AsyncExecutorThreadWorker::UpdateParams() {
417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432
  for (auto i : _param_config->sparse_table_id) {
    PushSparse(i);
  }
  for (auto i : _param_config->dense_table_id) {
    PushDense(i);
  }
  int32_t tmp_push_dense_wait_times = -1;
  int32_t tmp_push_sparse_wait_times = -1;
  static uint32_t push_dense_wait_times =
      static_cast<uint32_t>(tmp_push_dense_wait_times);
  static uint32_t push_sparse_wait_times =
      static_cast<uint32_t>(tmp_push_sparse_wait_times);

  if (_push_dense_status.size() >= push_dense_wait_times) {
    for (auto& t : _push_dense_status) {
      t.wait();
433
    }
434 435 436 437 438 439 440 441
    _push_dense_status.resize(0);
  }
  if (tmp_push_dense_wait_times == -1) {
    _push_dense_status.resize(0);
  }
  if (_push_sparse_status.size() >= push_sparse_wait_times) {
    for (auto& t : _push_sparse_status) {
      t.wait();
H
heqiaozhi 已提交
442
    }
443 444 445 446 447 448 449 450
    _push_sparse_status.resize(0);
  }
  if (tmp_push_sparse_wait_times == -1) {
    _push_sparse_status.resize(0);
  }
  for (auto dense_table_id : _param_config->dense_table_id) {
    _pull_dense_thread->increase_thread_version(thread_id_, dense_table_id);
  }
451 452 453
}

void AsyncExecutorThreadWorker::PushDense(int table_id) {
D
dongdaxiang 已提交
454 455 456 457 458 459 460 461 462 463
  std::vector<paddle::ps::Region> regions;
  for (auto& t : _param_config->dense_gradient_variable_name[table_id]) {
    Variable* var = thread_scope_->FindVar(t);
    CHECK(var != nullptr) << "var[" << t << "] not found";
    LoDTensor* tensor = var->GetMutable<LoDTensor>();
    int count = tensor->numel();
    float* g = tensor->data<float>();
    paddle::ps::Region reg(g, count);
    regions.emplace_back(std::move(reg));
  }
H
heqiaozhi 已提交
464 465 466

  auto status = _pslib_ptr->_worker_ptr->push_dense(regions.data(),
                                                    regions.size(), table_id);
D
dongdaxiang 已提交
467
  _push_dense_status.push_back(std::move(status));
468 469 470
}

void AsyncExecutorThreadWorker::PullSparse(int table_id) {
471 472 473 474 475 476 477
  auto& features = _features[table_id];
  auto& feature_value = _feature_value[table_id];
  auto fea_dim = _param_config->fea_dim;
  // slot id starts from 1
  features.clear();
  features.resize(0);
  features.reserve(MAX_FEASIGN_NUM);
H
heqiaozhi 已提交
478
  const std::vector<std::string>& feed_vec = thread_reader_->GetUseSlotAlias();
479 480 481 482 483 484 485 486 487 488 489 490 491
  // slot_idx = 0 is label TODO
  for (auto slot_idx = 1u; slot_idx < feed_vec.size(); ++slot_idx) {
    Variable* var = thread_scope_->FindVar(feed_vec[slot_idx]);
    LoDTensor* tensor = var->GetMutable<LoDTensor>();
    int64_t* ids = tensor->data<int64_t>();
    int len = tensor->numel();
    for (auto i = 0u; i < len; ++i) {
      // todo(colourful-tree): current trick - filter feasign=use_slot_mod(
      // bug: datafeed fill use_slot_mod for empty slot)
      if (ids[i] == 0u) {
        continue;
      }
      features.push_back(static_cast<uint64_t>(ids[i]));
H
heqiaozhi 已提交
492
    }
493
  }
H
heqiaozhi 已提交
494 495
  check_pull_push_memory(features, &feature_value, fea_dim);

496 497 498 499
  std::vector<float*> pull_feature_value;
  for (auto i = 0u; i < features.size(); ++i) {
    pull_feature_value.push_back(feature_value[i].data());
  }
H
heqiaozhi 已提交
500

501 502 503
  auto status = _pslib_ptr->_worker_ptr->pull_sparse(
      pull_feature_value.data(), table_id, features.data(), features.size());
  _pull_sparse_status.push_back(std::move(status));
H
heqiaozhi 已提交
504

505
  auto& push_g = _feature_push_value[table_id];
H
heqiaozhi 已提交
506 507
  check_pull_push_memory(features, &push_g, fea_dim);

508
  collect_feasign_info(table_id);
509 510 511
}

void AsyncExecutorThreadWorker::FillSparse(int table_id) {
512 513 514 515
  auto slot_dim = _param_config->slot_dim;
  auto fea_dim = _param_config->fea_dim;
  auto& features = _features[table_id];
  auto& fea_value = _feature_value[table_id];
H
heqiaozhi 已提交
516

517
  CHECK(features.size() > 0) << "feature size check failed";
H
heqiaozhi 已提交
518

519
  auto fea_idx = 0u;
H
heqiaozhi 已提交
520

521
  std::vector<float> init_value(fea_dim);
H
heqiaozhi 已提交
522 523

  const std::vector<std::string>& feed_vec = thread_reader_->GetUseSlotAlias();
524 525 526 527 528 529 530 531 532
  // slot_idx = 0 is label TODO
  for (auto slot_idx = 1u; slot_idx < feed_vec.size(); ++slot_idx) {
    Variable* var = thread_scope_->FindVar(feed_vec[slot_idx]);
    LoDTensor* tensor = var->GetMutable<LoDTensor>();
    int64_t* ids = tensor->data<int64_t>();
    int len = tensor->numel();
    Variable* var_emb = thread_scope_->FindVar(
        _param_config->slot_input_vec[table_id][slot_idx - 1]);
    LoDTensor* tensor_emb = var_emb->GetMutable<LoDTensor>();
H
heqiaozhi 已提交
533 534
    float* ptr =
        tensor_emb->mutable_data<float>({len, slot_dim}, platform::CPUPlace());
535 536
    memset(ptr, 0, sizeof(float) * len * slot_dim);
    auto& tensor_lod = tensor->lod()[0];
H
heqiaozhi 已提交
537

538 539
    LoD data_lod{tensor_lod};
    tensor_emb->set_lod(data_lod);
H
heqiaozhi 已提交
540

541 542
    for (auto index = 0u; index < len; ++index) {
      if (ids[index] == 0u) {
H
heqiaozhi 已提交
543 544
        memcpy(ptr + slot_dim * index, init_value.data() + 2,
               sizeof(float) * slot_dim);
545 546
        continue;
      }
H
heqiaozhi 已提交
547 548
      memcpy(ptr + slot_dim * index, fea_value[fea_idx].data() + 2,
             sizeof(float) * slot_dim);
549
      fea_idx++;
550
    }
551
  }
552 553 554
}

void AsyncExecutorThreadWorker::PushSparse(int table_id) {
555 556 557 558
  auto slot_dim = _param_config->slot_dim;
  auto fea_dim = _param_config->fea_dim;
  auto& features = _features[table_id];
  auto& push_g = _feature_push_value[table_id];
H
heqiaozhi 已提交
559 560 561 562
  check_pull_push_memory(features, &push_g, fea_dim);
  CHECK(push_g.size() == features.size() + 1)
      << "push_g size:" << push_g.size()
      << " features size:" << features.size();
563 564 565 566
  uint64_t fea_idx = 0u;
  auto& fea_info = _fea_info[table_id];
  int offset = 2;
  const std::vector<std::string>& feed_vec = thread_reader_->GetUseSlotAlias();
H
heqiaozhi 已提交
567
  // slot_idx = 0 is label
568
  for (auto slot_idx = 1u; slot_idx < feed_vec.size(); ++slot_idx) {
H
heqiaozhi 已提交
569 570 571 572 573 574
    if (_param_config->slot_alias_to_table.find(feed_vec[slot_idx]) ==
        _param_config->slot_alias_to_table.end()) {
      LOG(ERROR) << "ERROR slot_idx:" << slot_idx
                 << " name:" << feed_vec[slot_idx];
    } else if (_param_config->slot_alias_to_table[feed_vec[slot_idx]] !=
               table_id) {
575
      continue;
576
    }
577 578
    Variable* g_var = thread_scope_->FindVar(
        _param_config->gradient_var[table_id][slot_idx - 1]);
H
heqiaozhi 已提交
579 580 581
    CHECK(g_var != nullptr)
        << "var[" << _param_config->gradient_var[table_id][slot_idx - 1]
        << "] not found";
582 583
    LoDTensor* g_tensor = g_var->GetMutable<LoDTensor>();
    if (g_tensor == NULL) {
H
heqiaozhi 已提交
584 585 586
      LOG(ERROR) << "var["
                 << _param_config->gradient_var[table_id][slot_idx - 1]
                 << "] not found";
587 588 589
      exit(-1);
    }
    float* g = g_tensor->data<float>();
H
heqiaozhi 已提交
590

591 592 593 594 595 596 597 598
    Variable* var = thread_scope_->FindVar(feed_vec[slot_idx]);
    CHECK(var != nullptr) << "var[" << feed_vec[slot_idx] << "] not found";
    LoDTensor* tensor = var->GetMutable<LoDTensor>();
    if (tensor == NULL) {
      LOG(ERROR) << "var[" << feed_vec[slot_idx] << "] not found";
      exit(-1);
    }
    int len = tensor->numel();
H
heqiaozhi 已提交
599 600 601 602
    CHECK(slot_dim * len == g_tensor->numel())
        << "len:" << len << " g_numel:" << g_tensor->numel();
    CHECK(len == tensor->numel()) << "len:" << len
                                  << "t_numel:" << tensor->numel();
603 604 605 606 607 608
    int64_t* ids = tensor->data<int64_t>();
    for (auto id_idx = 0u; id_idx < len; ++id_idx) {
      if (ids[id_idx] == 0) {
        g += slot_dim;
        continue;
      }
H
heqiaozhi 已提交
609
      memcpy(push_g[fea_idx].data() + offset, g, sizeof(float) * slot_dim);
610
      push_g[fea_idx][0] = 1.0f;
H
heqiaozhi 已提交
611 612
      CHECK(fea_idx < fea_info.size()) << "fea_idx:" << fea_idx
                                       << " size:" << fea_info.size();
613 614 615
      push_g[fea_idx][1] = static_cast<float>(fea_info[fea_idx].label);
      g += slot_dim;
      fea_idx++;
616
    }
617
  }
H
heqiaozhi 已提交
618 619
  CHECK(fea_idx == features.size()) << "fea_idx:" << fea_idx
                                    << " features size:" << features.size();
620
  CHECK_GT(features.size(), 0);
H
heqiaozhi 已提交
621

622 623 624 625 626
  std::vector<float*> push_g_vec;
  for (auto i = 0u; i < features.size(); ++i) {
    push_g_vec.push_back(push_g[i].data());
  }
  auto status = _pslib_ptr->_worker_ptr->push_sparse(
H
heqiaozhi 已提交
627 628
      table_id, features.data(), (const float**)push_g_vec.data(),
      features.size());
629
  _push_sparse_status.push_back(std::move(status));
630 631
}

H
heqiaozhi 已提交
632
void AsyncExecutorThreadWorker::collect_feasign_info(int table_id) {
633 634 635 636 637 638 639
  auto& fea_info = _fea_info[table_id];
  auto& feature = _features[table_id];
  fea_info.resize(feature.size());
  const std::vector<std::string>& feed_vec = thread_reader_->GetUseSlotAlias();
  Variable* var = thread_scope_->FindVar(feed_vec[0]);
  LoDTensor* tensor = var->GetMutable<LoDTensor>();
  int64_t* label = tensor->data<int64_t>();
H
heqiaozhi 已提交
640

641 642 643
  int global_index = 0;
  for (auto slot_idx = 1u; slot_idx < feed_vec.size(); ++slot_idx) {
    Variable* var = thread_scope_->FindVar(feed_vec[slot_idx]);
644
    LoDTensor* tensor = var->GetMutable<LoDTensor>();
645
    int64_t* ids = tensor->data<int64_t>();
H
heqiaozhi 已提交
646

647 648 649 650 651
    int fea_idx = 0;
    for (auto ins_idx = 1u; ins_idx < tensor->lod()[0].size(); ++ins_idx) {
      for (; fea_idx < tensor->lod()[0][ins_idx]; ++fea_idx) {
        if (ids[fea_idx] == 0u) {
          continue;
652
        }
653
        FeasignInfo info{slot_idx, ins_idx, label[ins_idx - 1]};
H
heqiaozhi 已提交
654

655 656
        fea_info[global_index++] = std::move(info);
      }
657
    }
658
  }
H
heqiaozhi 已提交
659 660
  CHECK(global_index == feature.size())
      << "expect fea info size:" << feature.size() << " real:" << global_index;
661 662 663
}

void AsyncExecutorThreadWorker::check_pull_push_memory(
H
heqiaozhi 已提交
664 665 666 667
    const std::vector<uint64_t>& features,
    std::vector<std::vector<float>>* push_g, int dim) {
  push_g->resize(features.size() + 1);
  for (auto& t : *push_g) {
D
dongdaxiang 已提交
668 669
    t.resize(dim);
  }
670 671 672
}

void AsyncExecutorThreadWorker::check_pull_push_memory(
H
heqiaozhi 已提交
673
    const std::vector<uint64_t>& features, std::vector<float*>* push_g,
D
dongdaxiang 已提交
674
    int dim) {
H
heqiaozhi 已提交
675 676 677
  if (features.size() > push_g->size()) {
    push_g->reserve(features.size() + 1);
    auto size = features.size() - push_g->size() + 1;
D
dongdaxiang 已提交
678 679
    for (auto i = 0u; i < size; ++i) {
      float* ptr = new float[dim];
H
heqiaozhi 已提交
680
      push_g->push_back(ptr);
681
    }
D
dongdaxiang 已提交
682
  }
683
}
H
heqiaozhi 已提交
684
#endif
685

W
Wang Guibao 已提交
686 687
}  // einit_modelnd namespace framework
}  // end namespace paddle