executor_thread_worker.cc 21.6 KB
Newer Older
W
Wang Guibao 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved.

Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at

    http://www.apache.org/licenses/LICENSE-2.0

Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */

#include "paddle/fluid/framework/executor_thread_worker.h"
H
heqiaozhi 已提交
16
#include <algorithm>
W
Wang Guibao 已提交
17 18 19 20 21 22 23 24 25 26 27 28 29
#include "google/protobuf/io/zero_copy_stream_impl.h"
#include "google/protobuf/message.h"
#include "google/protobuf/text_format.h"

#include "gflags/gflags.h"
#include "paddle/fluid/framework/feed_fetch_method.h"
#include "paddle/fluid/framework/feed_fetch_type.h"
#include "paddle/fluid/framework/lod_rank_table.h"
#include "paddle/fluid/framework/lod_tensor_array.h"
#include "paddle/fluid/framework/op_registry.h"
#include "paddle/fluid/framework/reader.h"
#include "paddle/fluid/framework/variable_helper.h"
#include "paddle/fluid/inference/io.h"
30
#include "paddle/fluid/platform/cpu_helper.h"
W
Wang Guibao 已提交
31
#include "paddle/fluid/platform/place.h"
32
#include "paddle/fluid/platform/timer.h"
W
Wang Guibao 已提交
33 34 35 36
#include "paddle/fluid/pybind/pybind.h"
namespace paddle {
namespace framework {

H
heqiaozhi 已提交
37
#ifdef PADDLE_WITH_PSLIB
38
int DensePullThread::start() {
D
dongdaxiang 已提交
39 40 41
  _running = true;
  _t = std::thread(&DensePullThread::run, this);
  return 0;
42 43 44
}

void DensePullThread::run() {
D
dongdaxiang 已提交
45 46 47 48 49 50 51 52 53 54 55
  while (_running) {
    _pull_dense_status.resize(0);
    for (auto& t : _dense_variable_name) {
      if (check_update_param(t.first)) {
        auto status = pull_dense(t.first);
        _pull_dense_status.emplace_back(std::move(status));
        reset_thread_version(t.first);
      }
    }
    if (_pull_dense_status.size() != 0) {
      wait_all();
56
    }
H
heqiaozhi 已提交
57

D
dongdaxiang 已提交
58 59
    usleep(_sleep_time_ms * 1000);
  }
60 61
}
bool DensePullThread::check_update_param(uint64_t table_id) {
D
dongdaxiang 已提交
62 63 64 65 66 67 68 69 70 71
  {
    std::lock_guard<std::mutex> lock(_mutex_for_version);
    auto& version = _training_versions[table_id];
    _current_version[table_id] =
        *(std::min_element(version.begin(), version.end()));
  }
  if (_current_version[table_id] - _last_versions[table_id] < _threshold) {
    return false;
  }
  return true;
72 73 74
}

void DensePullThread::reset_thread_version(uint64_t table_id) {
D
dongdaxiang 已提交
75 76
  std::lock_guard<std::mutex> lock(_mutex_for_version);
  _last_versions[table_id] = _current_version[table_id];
77 78
}
std::future<int32_t> DensePullThread::pull_dense(uint64_t table_id) {
D
dongdaxiang 已提交
79 80 81 82
  auto& regions = _regions[table_id];
  regions.clear();
  auto& variables = _dense_variable_name[table_id];
  regions.resize(variables.size());
H
heqiaozhi 已提交
83

D
dongdaxiang 已提交
84 85 86 87
  for (auto i = 0u; i < variables.size(); ++i) {
    auto& t = variables[i];
    Variable* var = _root_scope->FindVar(t);
    LoDTensor* tensor = var->GetMutable<LoDTensor>();
H
heqiaozhi 已提交
88

D
dongdaxiang 已提交
89 90 91 92 93
    float* w = tensor->data<float>();
    paddle::ps::Region reg(w, tensor->numel());
    regions[i] = std::move(reg);
  }
  return _ps_client->pull_dense(regions.data(), regions.size(), table_id);
94 95 96
}

void DensePullThread::wait_all() {
D
dongdaxiang 已提交
97 98 99 100
  for (auto& t : _pull_dense_status) {
    t.wait();
    auto status = t.get();
    if (status != 0) {
H
heqiaozhi 已提交
101
      LOG(WARNING) << "pull dense failed times:" << ++_pull_dense_fail_times;
102
    }
D
dongdaxiang 已提交
103
  }
H
heqiaozhi 已提交
104

D
dongdaxiang 已提交
105 106 107 108
  if (_pull_dense_fail_times > 20) {
    LOG(FATAL) << "pull dense failed times more than 20 times";
    exit(-1);
  }
H
heqiaozhi 已提交
109

D
dongdaxiang 已提交
110
  _pull_dense_status.resize(0);
111 112
}

H
heqiaozhi 已提交
113 114
void DensePullThread::increase_thread_version(int thread_id,
                                              uint64_t table_id) {
D
dongdaxiang 已提交
115 116
  std::lock_guard<std::mutex> lock(_mutex_for_version);
  _training_versions[table_id][thread_id]++;
117
}
D
dongdaxiang 已提交
118
#endif
H
heqiaozhi 已提交
119

W
Wang Guibao 已提交
120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183
void ExecutorThreadWorker::CreateThreadOperators(const ProgramDesc& program) {
  auto& block = program.Block(0);
  op_names_.clear();
  for (auto& op_desc : block.AllOps()) {
    std::unique_ptr<OperatorBase> local_op = OpRegistry::CreateOp(*op_desc);
    op_names_.push_back(op_desc->Type());
    OperatorBase* local_op_ptr = local_op.release();
    ops_.push_back(local_op_ptr);
    continue;
  }
}

void ExecutorThreadWorker::CreateThreadResource(
    const framework::ProgramDesc& program,
    const paddle::platform::Place& place) {
  CreateThreadScope(program);
  CreateThreadOperators(program);
  SetMainProgram(program);
  SetPlace(place);
}

void ExecutorThreadWorker::CreateThreadScope(const ProgramDesc& program) {
  auto& block = program.Block(0);

  PADDLE_ENFORCE_NOT_NULL(
      root_scope_, "root_scope should be set before creating thread scope");

  thread_scope_ = &root_scope_->NewScope();
  for (auto& var : block.AllVars()) {
    if (var->Persistable()) {
      auto* ptr = root_scope_->Var(var->Name());
      InitializeVariable(ptr, var->GetType());
    } else {
      auto* ptr = thread_scope_->Var(var->Name());
      InitializeVariable(ptr, var->GetType());
    }
  }
}

void ExecutorThreadWorker::SetDataFeed(
    const std::shared_ptr<DataFeed>& datafeed) {
  thread_reader_ = datafeed;
}

void ExecutorThreadWorker::BindingDataFeedMemory() {
  const std::vector<std::string>& input_feed =
      thread_reader_->GetUseSlotAlias();
  for (auto name : input_feed) {
    thread_reader_->AddFeedVar(thread_scope_->Var(name), name);
  }
}

void ExecutorThreadWorker::SetFetchVarNames(
    const std::vector<std::string>& fetch_var_names) {
  fetch_var_names_.clear();
  fetch_var_names_.insert(fetch_var_names_.end(), fetch_var_names.begin(),
                          fetch_var_names.end());
}

void ExecutorThreadWorker::SetDevice() {
#if defined _WIN32 || defined __APPLE__
  return;
#else
  static unsigned concurrency_cap = std::thread::hardware_concurrency();
184
  LOG(WARNING) << "concurrency capacity " << concurrency_cap;
W
Wang Guibao 已提交
185 186
  int thread_id = this->thread_id_;

T
Tao Luo 已提交
187
  if (static_cast<unsigned>(thread_id) < concurrency_cap) {
W
Wang Guibao 已提交
188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227
    unsigned proc = thread_id;

    cpu_set_t mask;
    CPU_ZERO(&mask);
    CPU_SET(proc, &mask);

    if (-1 == sched_setaffinity(0, sizeof(mask), &mask)) {
      VLOG(1) << "WARNING: Failed to set thread affinity for thread "
              << thread_id;
    } else {
      CPU_ZERO(&mask);
      if ((0 != sched_getaffinity(0, sizeof(mask), &mask)) ||
          (CPU_ISSET(proc, &mask) == 0)) {
        VLOG(3) << "WARNING: Failed to set thread affinity for thread "
                << thread_id;
      }
    }
  } else {
    VLOG(1) << "WARNING: Failed to set thread affinity for thread "
            << thread_id;
  }
#endif
}

template <typename T>
void print_lod_tensor(std::string var_name, const LoDTensor& lod_tensor) {
  auto inspect = lod_tensor.data<T>();
  auto element_num = lod_tensor.numel();

  std::ostringstream sstream;
  sstream << var_name << " (element num " << element_num << "): [";
  sstream << inspect[0];
  for (int j = 1; j < element_num; ++j) {
    sstream << " " << inspect[j];
  }
  sstream << "]";

  std::cout << sstream.str() << std::endl;
}

Y
Yu Yang 已提交
228 229
static void print_fetch_var(Scope* scope, const std::string& var_name) {
  auto& tensor = scope->FindVar(var_name)->Get<LoDTensor>();
W
Wang Guibao 已提交
230

Y
Yu Yang 已提交
231 232 233 234 235 236 237 238 239 240
#define PrintLoDTensorCallback(cpp_type, proto_type) \
  do {                                               \
    if (tensor.type() == proto_type) {               \
      print_lod_tensor<cpp_type>(var_name, tensor);  \
      return;                                        \
    }                                                \
  } while (0)

  _ForEachDataType_(PrintLoDTensorCallback);
  VLOG(1) << "print_fetch_var: unrecognized data type:" << tensor.type();
W
Wang Guibao 已提交
241 242
}

243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274
void ExecutorThreadWorker::TrainFilesWithTimer() {
  platform::SetNumThreads(1);
  SetDevice();
  thread_reader_->Start();
  std::vector<double> op_total_time;
  std::vector<std::string> op_name;
  for (auto& op : ops_) {
    op_name.push_back(op->Type());
  }
  op_total_time.resize(ops_.size());
  for (size_t i = 0; i < op_total_time.size(); ++i) {
    op_total_time[i] = 0.0;
  }
  platform::Timer timeline;
  double total_time = 0.0;
  double read_time = 0.0;
  int cur_batch;
  int batch_cnt = 0;
  timeline.Start();
  while ((cur_batch = thread_reader_->Next()) > 0) {
    timeline.Pause();
    read_time += timeline.ElapsedSec();
    total_time += timeline.ElapsedSec();
    for (size_t i = 0; i < ops_.size(); ++i) {
      timeline.Start();
      ops_[i]->Run(*thread_scope_, place_);
      timeline.Pause();
      op_total_time[i] += timeline.ElapsedSec();
      total_time += timeline.ElapsedSec();
    }
    ++batch_cnt;
    thread_scope_->DropKids();
275 276 277 278 279 280 281 282 283 284 285
    if (thread_id_ == 0) {
      if (batch_cnt > 0 && batch_cnt % 1000 == 0) {
        for (size_t i = 0; i < ops_.size(); ++i) {
          fprintf(stderr, "op_name:[%zu][%s], op_mean_time:[%fs]\n", i,
                  op_name[i].c_str(), op_total_time[i] / batch_cnt);
        }
        fprintf(stderr, "mean read time: %fs\n", read_time / batch_cnt);
        int fetch_var_num = fetch_var_names_.size();
        for (int i = 0; i < fetch_var_num; ++i) {
          print_fetch_var(thread_scope_, fetch_var_names_[i]);
        }
286 287 288 289 290 291
      }
    }
    timeline.Start();
  }
}

W
Wang Guibao 已提交
292
void ExecutorThreadWorker::TrainFiles() {
293 294
  platform::SetNumThreads(1);

W
Wang Guibao 已提交
295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339
  // todo: configurable
  SetDevice();

  int fetch_var_num = fetch_var_names_.size();
  fetch_values_.clear();
  fetch_values_.resize(fetch_var_num);

  thread_reader_->Start();

  int cur_batch;
  int batch_cnt = 0;
  while ((cur_batch = thread_reader_->Next()) > 0) {
    // executor run here
    for (auto& op : ops_) {
      op->Run(*thread_scope_, place_);
    }

    ++batch_cnt;
    thread_scope_->DropKids();

    if (debug_ == false || thread_id_ != 0) {
      continue;
    }

    for (int i = 0; i < fetch_var_num; ++i) {
      print_fetch_var(thread_scope_, fetch_var_names_[i]);
    }  // end for (int i = 0...)
  }    // end while ()
}

void ExecutorThreadWorker::SetThreadId(int tid) { thread_id_ = tid; }

void ExecutorThreadWorker::SetPlace(const platform::Place& place) {
  place_ = place;
}

void ExecutorThreadWorker::SetMainProgram(
    const ProgramDesc& main_program_desc) {
  main_program_.reset(new ProgramDesc(main_program_desc));
}

void ExecutorThreadWorker::SetRootScope(Scope* g_scope) {
  root_scope_ = g_scope;
}

H
heqiaozhi 已提交
340
#ifdef PADDLE_WITH_PSLIB
341
//  AsyncExecutor
342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369
void AsyncExecutorThreadWorker::TrainFiles() {
  SetDevice();

  int fetch_var_num = fetch_var_names_.size();
  fetch_values_.clear();
  fetch_values_.resize(fetch_var_num);

  thread_reader_->Start();

  int cur_batch;
  int batch_cnt = 0;
  while ((cur_batch = thread_reader_->Next()) > 0) {
    // executor run here
    TrainOneNetwork();

    ++batch_cnt;
    thread_scope_->DropKids();

    if (debug_ == false || thread_id_ != 0) {
      continue;
    }

    for (int i = 0; i < fetch_var_num; ++i) {
      print_fetch_var(thread_scope_, fetch_var_names_[i]);
    }  // end for (int i = 0...)
  }    // end while ()
}

370 371
void AsyncExecutorThreadWorker::SetPSlibPtr(
    std::shared_ptr<paddle::distributed::PSlib> pslib_ptr) {
D
dongdaxiang 已提交
372
  _pslib_ptr = pslib_ptr;
373
}
374

375 376
void AsyncExecutorThreadWorker::SetPullDenseThread(
    std::shared_ptr<DensePullThread> dpt) {
D
dongdaxiang 已提交
377
  _pull_dense_thread = dpt;
378
}
379

380
void AsyncExecutorThreadWorker::TrainOneNetwork() {
D
dongdaxiang 已提交
381
  PrepareParams();
H
heqiaozhi 已提交
382

D
dongdaxiang 已提交
383 384 385 386 387 388
  for (auto& op : ops_) {
    if (op->Type().find("sgd") != std::string::npos) {
      continue;
    }
    bool need_skip = false;
    for (auto t = 0u; t < _param_config->skip_op.size(); ++t) {
H
heqiaozhi 已提交
389
      if (op->Type().find(_param_config->skip_op[t]) != std::string::npos) {
D
dongdaxiang 已提交
390 391 392 393 394 395
        need_skip = true;
        break;
      }
    }
    if (!need_skip) {
      op->Run(*thread_scope_, place_);
396
    }
D
dongdaxiang 已提交
397 398
  }
  UpdateParams();
399 400
}

401 402
void AsyncExecutorThreadWorker::SetParamConfig(
    AsyncWorkerParamConfig* param_config) {
D
dongdaxiang 已提交
403
  _param_config = param_config;
404 405 406
}

void AsyncExecutorThreadWorker::PrepareParams() {
D
dongdaxiang 已提交
407 408 409 410 411 412 413 414 415
  for (auto table_id : _param_config->sparse_table_id) {
    PullSparse(table_id);
    for (auto& t : _pull_sparse_status) {
      t.wait();
      auto status = t.get();
      if (status != 0) {
        LOG(ERROR) << "pull sparse failed, status[" << status << "]";
        exit(-1);
      }
416
    }
D
dongdaxiang 已提交
417 418
  }
  _pull_sparse_status.resize(0);
419

D
dongdaxiang 已提交
420 421 422
  for (auto table_id : _param_config->sparse_table_id) {
    FillSparse(table_id);
  }
423 424 425
}

void AsyncExecutorThreadWorker::UpdateParams() {
426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441
  for (auto i : _param_config->sparse_table_id) {
    PushSparse(i);
  }
  for (auto i : _param_config->dense_table_id) {
    PushDense(i);
  }
  int32_t tmp_push_dense_wait_times = -1;
  int32_t tmp_push_sparse_wait_times = -1;
  static uint32_t push_dense_wait_times =
      static_cast<uint32_t>(tmp_push_dense_wait_times);
  static uint32_t push_sparse_wait_times =
      static_cast<uint32_t>(tmp_push_sparse_wait_times);

  if (_push_dense_status.size() >= push_dense_wait_times) {
    for (auto& t : _push_dense_status) {
      t.wait();
442
    }
443 444 445 446 447 448 449 450
    _push_dense_status.resize(0);
  }
  if (tmp_push_dense_wait_times == -1) {
    _push_dense_status.resize(0);
  }
  if (_push_sparse_status.size() >= push_sparse_wait_times) {
    for (auto& t : _push_sparse_status) {
      t.wait();
H
heqiaozhi 已提交
451
    }
452 453 454 455 456 457 458 459
    _push_sparse_status.resize(0);
  }
  if (tmp_push_sparse_wait_times == -1) {
    _push_sparse_status.resize(0);
  }
  for (auto dense_table_id : _param_config->dense_table_id) {
    _pull_dense_thread->increase_thread_version(thread_id_, dense_table_id);
  }
460 461 462
}

void AsyncExecutorThreadWorker::PushDense(int table_id) {
D
dongdaxiang 已提交
463 464 465 466 467 468 469 470 471 472
  std::vector<paddle::ps::Region> regions;
  for (auto& t : _param_config->dense_gradient_variable_name[table_id]) {
    Variable* var = thread_scope_->FindVar(t);
    CHECK(var != nullptr) << "var[" << t << "] not found";
    LoDTensor* tensor = var->GetMutable<LoDTensor>();
    int count = tensor->numel();
    float* g = tensor->data<float>();
    paddle::ps::Region reg(g, count);
    regions.emplace_back(std::move(reg));
  }
H
heqiaozhi 已提交
473 474 475

  auto status = _pslib_ptr->_worker_ptr->push_dense(regions.data(),
                                                    regions.size(), table_id);
D
dongdaxiang 已提交
476
  _push_dense_status.push_back(std::move(status));
477 478 479
}

void AsyncExecutorThreadWorker::PullSparse(int table_id) {
480 481 482 483 484 485 486
  auto& features = _features[table_id];
  auto& feature_value = _feature_value[table_id];
  auto fea_dim = _param_config->fea_dim;
  // slot id starts from 1
  features.clear();
  features.resize(0);
  features.reserve(MAX_FEASIGN_NUM);
H
heqiaozhi 已提交
487
  const std::vector<std::string>& feed_vec = thread_reader_->GetUseSlotAlias();
488 489 490 491 492 493 494 495 496 497 498 499 500
  // slot_idx = 0 is label TODO
  for (auto slot_idx = 1u; slot_idx < feed_vec.size(); ++slot_idx) {
    Variable* var = thread_scope_->FindVar(feed_vec[slot_idx]);
    LoDTensor* tensor = var->GetMutable<LoDTensor>();
    int64_t* ids = tensor->data<int64_t>();
    int len = tensor->numel();
    for (auto i = 0u; i < len; ++i) {
      // todo(colourful-tree): current trick - filter feasign=use_slot_mod(
      // bug: datafeed fill use_slot_mod for empty slot)
      if (ids[i] == 0u) {
        continue;
      }
      features.push_back(static_cast<uint64_t>(ids[i]));
H
heqiaozhi 已提交
501
    }
502
  }
H
heqiaozhi 已提交
503 504
  check_pull_push_memory(features, &feature_value, fea_dim);

505 506 507 508
  std::vector<float*> pull_feature_value;
  for (auto i = 0u; i < features.size(); ++i) {
    pull_feature_value.push_back(feature_value[i].data());
  }
H
heqiaozhi 已提交
509

510 511 512
  auto status = _pslib_ptr->_worker_ptr->pull_sparse(
      pull_feature_value.data(), table_id, features.data(), features.size());
  _pull_sparse_status.push_back(std::move(status));
H
heqiaozhi 已提交
513

514
  auto& push_g = _feature_push_value[table_id];
H
heqiaozhi 已提交
515
  check_pull_push_memory(features, &push_g, fea_dim);
516
  collect_feasign_info(table_id);
517 518 519
}

void AsyncExecutorThreadWorker::FillSparse(int table_id) {
520 521 522 523
  auto slot_dim = _param_config->slot_dim;
  auto fea_dim = _param_config->fea_dim;
  auto& features = _features[table_id];
  auto& fea_value = _feature_value[table_id];
H
heqiaozhi 已提交
524

525
  CHECK(features.size() > 0) << "feature size check failed";
H
heqiaozhi 已提交
526

527
  auto fea_idx = 0u;
H
heqiaozhi 已提交
528

529
  std::vector<float> init_value(fea_dim);
H
heqiaozhi 已提交
530 531

  const std::vector<std::string>& feed_vec = thread_reader_->GetUseSlotAlias();
532 533 534 535 536 537 538 539 540
  // slot_idx = 0 is label TODO
  for (auto slot_idx = 1u; slot_idx < feed_vec.size(); ++slot_idx) {
    Variable* var = thread_scope_->FindVar(feed_vec[slot_idx]);
    LoDTensor* tensor = var->GetMutable<LoDTensor>();
    int64_t* ids = tensor->data<int64_t>();
    int len = tensor->numel();
    Variable* var_emb = thread_scope_->FindVar(
        _param_config->slot_input_vec[table_id][slot_idx - 1]);
    LoDTensor* tensor_emb = var_emb->GetMutable<LoDTensor>();
H
heqiaozhi 已提交
541 542
    float* ptr =
        tensor_emb->mutable_data<float>({len, slot_dim}, platform::CPUPlace());
543 544
    memset(ptr, 0, sizeof(float) * len * slot_dim);
    auto& tensor_lod = tensor->lod()[0];
H
heqiaozhi 已提交
545

546 547
    LoD data_lod{tensor_lod};
    tensor_emb->set_lod(data_lod);
H
heqiaozhi 已提交
548

549 550
    for (auto index = 0u; index < len; ++index) {
      if (ids[index] == 0u) {
H
heqiaozhi 已提交
551 552
        memcpy(ptr + slot_dim * index, init_value.data() + 2,
               sizeof(float) * slot_dim);
553 554
        continue;
      }
H
heqiaozhi 已提交
555 556
      memcpy(ptr + slot_dim * index, fea_value[fea_idx].data() + 2,
             sizeof(float) * slot_dim);
557
      fea_idx++;
558
    }
559
  }
560 561 562
}

void AsyncExecutorThreadWorker::PushSparse(int table_id) {
563 564 565 566
  auto slot_dim = _param_config->slot_dim;
  auto fea_dim = _param_config->fea_dim;
  auto& features = _features[table_id];
  auto& push_g = _feature_push_value[table_id];
H
heqiaozhi 已提交
567 568 569 570
  check_pull_push_memory(features, &push_g, fea_dim);
  CHECK(push_g.size() == features.size() + 1)
      << "push_g size:" << push_g.size()
      << " features size:" << features.size();
571 572 573 574
  uint64_t fea_idx = 0u;
  auto& fea_info = _fea_info[table_id];
  int offset = 2;
  const std::vector<std::string>& feed_vec = thread_reader_->GetUseSlotAlias();
H
heqiaozhi 已提交
575
  // slot_idx = 0 is label
576
  for (auto slot_idx = 1u; slot_idx < feed_vec.size(); ++slot_idx) {
H
heqiaozhi 已提交
577 578 579 580 581 582
    if (_param_config->slot_alias_to_table.find(feed_vec[slot_idx]) ==
        _param_config->slot_alias_to_table.end()) {
      LOG(ERROR) << "ERROR slot_idx:" << slot_idx
                 << " name:" << feed_vec[slot_idx];
    } else if (_param_config->slot_alias_to_table[feed_vec[slot_idx]] !=
               table_id) {
583
      continue;
584
    }
585 586
    Variable* g_var = thread_scope_->FindVar(
        _param_config->gradient_var[table_id][slot_idx - 1]);
H
heqiaozhi 已提交
587 588 589
    CHECK(g_var != nullptr)
        << "var[" << _param_config->gradient_var[table_id][slot_idx - 1]
        << "] not found";
590 591
    LoDTensor* g_tensor = g_var->GetMutable<LoDTensor>();
    if (g_tensor == NULL) {
H
heqiaozhi 已提交
592 593 594
      LOG(ERROR) << "var["
                 << _param_config->gradient_var[table_id][slot_idx - 1]
                 << "] not found";
595 596 597
      exit(-1);
    }
    float* g = g_tensor->data<float>();
H
heqiaozhi 已提交
598

599 600 601 602 603 604 605 606
    Variable* var = thread_scope_->FindVar(feed_vec[slot_idx]);
    CHECK(var != nullptr) << "var[" << feed_vec[slot_idx] << "] not found";
    LoDTensor* tensor = var->GetMutable<LoDTensor>();
    if (tensor == NULL) {
      LOG(ERROR) << "var[" << feed_vec[slot_idx] << "] not found";
      exit(-1);
    }
    int len = tensor->numel();
H
heqiaozhi 已提交
607 608 609 610
    CHECK(slot_dim * len == g_tensor->numel())
        << "len:" << len << " g_numel:" << g_tensor->numel();
    CHECK(len == tensor->numel()) << "len:" << len
                                  << "t_numel:" << tensor->numel();
611 612 613 614 615 616
    int64_t* ids = tensor->data<int64_t>();
    for (auto id_idx = 0u; id_idx < len; ++id_idx) {
      if (ids[id_idx] == 0) {
        g += slot_dim;
        continue;
      }
H
heqiaozhi 已提交
617
      memcpy(push_g[fea_idx].data() + offset, g, sizeof(float) * slot_dim);
618
      push_g[fea_idx][0] = 1.0f;
H
heqiaozhi 已提交
619 620
      CHECK(fea_idx < fea_info.size()) << "fea_idx:" << fea_idx
                                       << " size:" << fea_info.size();
621 622 623
      push_g[fea_idx][1] = static_cast<float>(fea_info[fea_idx].label);
      g += slot_dim;
      fea_idx++;
624
    }
625
  }
H
heqiaozhi 已提交
626 627
  CHECK(fea_idx == features.size()) << "fea_idx:" << fea_idx
                                    << " features size:" << features.size();
628
  CHECK_GT(features.size(), 0);
H
heqiaozhi 已提交
629

630 631 632 633 634
  std::vector<float*> push_g_vec;
  for (auto i = 0u; i < features.size(); ++i) {
    push_g_vec.push_back(push_g[i].data());
  }
  auto status = _pslib_ptr->_worker_ptr->push_sparse(
H
heqiaozhi 已提交
635 636
      table_id, features.data(), (const float**)push_g_vec.data(),
      features.size());
637
  _push_sparse_status.push_back(std::move(status));
638 639
}

H
heqiaozhi 已提交
640
void AsyncExecutorThreadWorker::collect_feasign_info(int table_id) {
641 642 643 644 645 646 647
  auto& fea_info = _fea_info[table_id];
  auto& feature = _features[table_id];
  fea_info.resize(feature.size());
  const std::vector<std::string>& feed_vec = thread_reader_->GetUseSlotAlias();
  Variable* var = thread_scope_->FindVar(feed_vec[0]);
  LoDTensor* tensor = var->GetMutable<LoDTensor>();
  int64_t* label = tensor->data<int64_t>();
H
heqiaozhi 已提交
648

649 650 651
  int global_index = 0;
  for (auto slot_idx = 1u; slot_idx < feed_vec.size(); ++slot_idx) {
    Variable* var = thread_scope_->FindVar(feed_vec[slot_idx]);
652
    LoDTensor* tensor = var->GetMutable<LoDTensor>();
653
    int64_t* ids = tensor->data<int64_t>();
H
heqiaozhi 已提交
654

655 656 657 658 659
    int fea_idx = 0;
    for (auto ins_idx = 1u; ins_idx < tensor->lod()[0].size(); ++ins_idx) {
      for (; fea_idx < tensor->lod()[0][ins_idx]; ++fea_idx) {
        if (ids[fea_idx] == 0u) {
          continue;
660
        }
661
        FeasignInfo info{slot_idx, ins_idx, label[ins_idx - 1]};
H
heqiaozhi 已提交
662

663 664
        fea_info[global_index++] = std::move(info);
      }
665
    }
666
  }
H
heqiaozhi 已提交
667 668
  CHECK(global_index == feature.size())
      << "expect fea info size:" << feature.size() << " real:" << global_index;
669 670 671
}

void AsyncExecutorThreadWorker::check_pull_push_memory(
H
heqiaozhi 已提交
672 673 674 675
    const std::vector<uint64_t>& features,
    std::vector<std::vector<float>>* push_g, int dim) {
  push_g->resize(features.size() + 1);
  for (auto& t : *push_g) {
D
dongdaxiang 已提交
676 677
    t.resize(dim);
  }
678 679 680
}

void AsyncExecutorThreadWorker::check_pull_push_memory(
H
heqiaozhi 已提交
681
    const std::vector<uint64_t>& features, std::vector<float*>* push_g,
D
dongdaxiang 已提交
682
    int dim) {
H
heqiaozhi 已提交
683 684 685
  if (features.size() > push_g->size()) {
    push_g->reserve(features.size() + 1);
    auto size = features.size() - push_g->size() + 1;
D
dongdaxiang 已提交
686 687
    for (auto i = 0u; i < size; ++i) {
      float* ptr = new float[dim];
H
heqiaozhi 已提交
688
      push_g->push_back(ptr);
689
    }
D
dongdaxiang 已提交
690
  }
691
}
H
heqiaozhi 已提交
692
#endif
693

W
Wang Guibao 已提交
694 695
}  // einit_modelnd namespace framework
}  // end namespace paddle