downpour_worker.cc 28.2 KB
Newer Older
1
/* Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
2 3 4 5 6 7 8 9 10 11 12 13 14 15

Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at

    http://www.apache.org/licenses/LICENSE-2.0

Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */

#include "paddle/fluid/framework/device_worker.h"
16
#include "paddle/fluid/framework/device_worker_factory.h"
17
#include "paddle/fluid/platform/cpu_helper.h"
18
#include "paddle/fluid/string/string_helper.h"
19

20 21 22 23 24
#if defined _WIN32 || defined __APPLE__
#else
#define _LINUX
#endif

25 26 27
namespace paddle {
namespace framework {

28
void DownpourWorker::Initialize(const TrainerDesc& desc) {
29
  param_ = desc.downpour_param();
D
dongdaxiang 已提交
30
  for (int i = 0; i < param_.sparse_table_size(); ++i) {
31 32 33 34
    uint64_t table_id =
        static_cast<uint64_t>(param_.sparse_table(i).table_id());
    TableParameter table = param_.sparse_table(i);
    sparse_key_names_[table_id].resize(table.sparse_key_name_size());
D
dongdaxiang 已提交
35
    for (int j = 0; j < table.sparse_key_name_size(); ++j) {
36 37 38
      sparse_key_names_[table_id][j] = table.sparse_key_name(j);
    }
    sparse_value_names_[table_id].resize(table.sparse_value_name_size());
D
dongdaxiang 已提交
39
    for (int j = 0; j < table.sparse_value_name_size(); ++j) {
40 41 42
      sparse_value_names_[table_id][j] = table.sparse_value_name(j);
    }
    sparse_grad_names_[table_id].resize(table.sparse_grad_name_size());
D
dongdaxiang 已提交
43
    for (int j = 0; j < table.sparse_grad_name_size(); ++j) {
44 45
      sparse_grad_names_[table_id][j] = table.sparse_grad_name(j);
    }
46
    label_var_name_[table_id] = table.label_var_name();
47 48
  }

D
dongdaxiang 已提交
49
  for (int i = 0; i < param_.dense_table_size(); ++i) {
50 51 52
    uint64_t table_id = static_cast<uint64_t>(param_.dense_table(i).table_id());
    auto table = param_.dense_table(i);
    dense_value_names_[table_id].resize(table.dense_value_name_size());
D
dongdaxiang 已提交
53
    for (int j = 0; j < table.dense_value_name_size(); ++j) {
54 55 56
      dense_value_names_[table_id][j] = table.dense_value_name(j);
    }
    dense_grad_names_[table_id].resize(table.dense_grad_name_size());
D
dongdaxiang 已提交
57
    for (int j = 0; j < table.dense_grad_name_size(); ++j) {
58 59 60 61 62
      dense_grad_names_[table_id][j] = table.dense_grad_name(j);
    }
  }

  skip_ops_.resize(param_.skip_ops_size());
D
dongdaxiang 已提交
63
  for (int i = 0; i < param_.skip_ops_size(); ++i) {
64 65
    skip_ops_[i] = param_.skip_ops(i);
  }
66

67 68 69 70
  for (int i = 0; i < param_.stat_var_names_size(); ++i) {
    stat_var_name_map_[param_.stat_var_names(i)] = 1;
  }

71 72 73
  need_to_push_sparse_ = param_.push_sparse();
  need_to_push_dense_ = param_.push_dense();

74
  fleet_ptr_ = FleetWrapper::GetInstance();
D
dongdaxiang 已提交
75
  fetch_config_ = desc.fetch_config();
76
  use_cvm_ = desc.use_cvm();
77
  scale_datanorm_ = desc.scale_datanorm();
T
Thunderbrook 已提交
78
  dump_slot_ = desc.dump_slot();
79 80 81 82
  dump_fields_.resize(desc.dump_fields_size());
  for (int i = 0; i < desc.dump_fields_size(); ++i) {
    dump_fields_[i] = desc.dump_fields(i);
  }
83
  adjust_ins_weight_config_ = desc.adjust_ins_weight_config();
84 85 86
  for (int i = 0; i < desc.check_nan_var_names_size(); ++i) {
    check_nan_var_names_.push_back(desc.check_nan_var_names(i));
  }
87 88
}

89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164
void DownpourWorker::SetChannelWriter(ChannelObject<std::string>* queue) {
  writer_.Reset(queue);
}

void DownpourWorker::SetNeedDump(bool need_dump_field) {
  need_dump_field_ = need_dump_field;
}

template <typename T>
std::string PrintLodTensorType(LoDTensor* tensor, int64_t start, int64_t end) {
  auto count = tensor->numel();
  if (start < 0 || end > count) {
    VLOG(3) << "access violation";
    return "access violation";
  }
  std::ostringstream os;
  for (int64_t i = start; i < end; i++) {
    os << ":" << tensor->data<T>()[i];
  }
  return os.str();
}

std::string PrintLodTensorIntType(LoDTensor* tensor, int64_t start,
                                  int64_t end) {
  auto count = tensor->numel();
  if (start < 0 || end > count) {
    VLOG(3) << "access violation";
    return "access violation";
  }
  std::ostringstream os;
  for (int64_t i = start; i < end; i++) {
    os << ":" << static_cast<uint64_t>(tensor->data<int64_t>()[i]);
  }
  return os.str();
}

std::string PrintLodTensor(LoDTensor* tensor, int64_t start, int64_t end) {
  std::string out_val;
  if (tensor->type() == proto::VarType::FP32) {
    out_val = PrintLodTensorType<float>(tensor, start, end);
  } else if (tensor->type() == proto::VarType::INT64) {
    out_val = PrintLodTensorIntType(tensor, start, end);
  } else if (tensor->type() == proto::VarType::FP64) {
    out_val = PrintLodTensorType<double>(tensor, start, end);
  } else {
    out_val = "unsupported type";
  }
  return out_val;
}

std::pair<int64_t, int64_t> GetTensorBound(LoDTensor* tensor, int index) {
  auto& dims = tensor->dims();
  if (tensor->lod().size() != 0) {
    auto& lod = tensor->lod()[0];
    return {lod[index] * dims[1], lod[index + 1] * dims[1]};
  } else {
    return {index * dims[1], (index + 1) * dims[1]};
  }
}

bool CheckValidOutput(LoDTensor* tensor, int batch_size) {
  auto& dims = tensor->dims();
  if (dims.size() != 2) return false;
  if (tensor->lod().size() != 0) {
    auto& lod = tensor->lod()[0];
    if (lod.size() != batch_size + 1) {
      return false;
    }
  } else {
    if (dims[0] != batch_size) {
      return false;
    }
  }
  return true;
}

165
void DownpourWorker::CollectLabelInfo(size_t table_idx) {
H
heqiaozhi 已提交
166
  uint64_t table_id = static_cast<uint64_t>(
167
      param_.program_config(0).pull_sparse_table_id(table_idx));
168

H
heqiaozhi 已提交
169 170 171 172 173 174 175
  TableParameter table;
  for (auto i : param_.sparse_table()) {
    if (i.table_id() == table_id) {
      table = i;
      break;
    }
  }
176 177 178
  auto& feature = features_[table_id];
  auto& feature_label = feature_labels_[table_id];
  feature_label.resize(feature.size());
179
  Variable* var = thread_scope_->FindVar(label_var_name_[table_id]);
180 181 182
  LoDTensor* tensor = var->GetMutable<LoDTensor>();
  int64_t* label_ptr = tensor->data<int64_t>();

D
dongdaxiang 已提交
183
  size_t global_index = 0;
184
  for (size_t i = 0; i < sparse_key_names_[table_id].size(); ++i) {
185 186
    VLOG(3) << "sparse_key_names_[" << i
            << "]: " << sparse_key_names_[table_id][i];
187
    Variable* fea_var = thread_scope_->FindVar(sparse_key_names_[table_id][i]);
188 189 190
    if (fea_var == nullptr) {
      continue;
    }
191
    LoDTensor* tensor = fea_var->GetMutable<LoDTensor>();
192 193
    CHECK(tensor != nullptr) << "tensor of var "
                             << sparse_key_names_[table_id][i] << " is null";
194
    int64_t* ids = tensor->data<int64_t>();
D
dongdaxiang 已提交
195
    size_t fea_idx = 0;
196
    // tensor->lod()[0].size() == batch_size + 1
197 198
    for (auto lod_idx = 1u; lod_idx < tensor->lod()[0].size(); ++lod_idx) {
      for (; fea_idx < tensor->lod()[0][lod_idx]; ++fea_idx) {
199 200 201 202
        // should be skipped feasign defined in protobuf
        if (ids[fea_idx] == 0u) {
          continue;
        }
203 204
        feature_label[global_index++] =
            static_cast<float>(label_ptr[lod_idx - 1]);
205 206 207 208 209 210 211 212
      }
    }
  }
  CHECK(global_index == feature.size())
      << "expect fea info size:" << feature.size() << " real:" << global_index;
}

void DownpourWorker::FillSparseValue(size_t table_idx) {
H
heqiaozhi 已提交
213
  uint64_t table_id = static_cast<uint64_t>(
214
      param_.program_config(0).pull_sparse_table_id(table_idx));
H
heqiaozhi 已提交
215 216 217 218 219 220 221 222

  TableParameter table;
  for (auto i : param_.sparse_table()) {
    if (i.table_id() == table_id) {
      table = i;
      break;
    }
  }
223 224 225 226

  auto& fea_value = feature_values_[table_id];
  auto fea_idx = 0u;

X
xjqbest 已提交
227
  std::vector<float> init_value(table.fea_dim());
228 229 230 231
  for (size_t i = 0; i < sparse_key_names_[table_id].size(); ++i) {
    std::string slot_name = sparse_key_names_[table_id][i];
    std::string emb_slot_name = sparse_value_names_[table_id][i];
    Variable* var = thread_scope_->FindVar(slot_name);
232 233 234
    if (var == nullptr) {
      continue;
    }
235
    LoDTensor* tensor = var->GetMutable<LoDTensor>();
236
    CHECK(tensor != nullptr) << "tensor of var " << slot_name << " is null";
237 238 239 240 241 242 243 244 245 246
    int64_t* ids = tensor->data<int64_t>();
    int len = tensor->numel();
    Variable* var_emb = thread_scope_->FindVar(emb_slot_name);
    LoDTensor* tensor_emb = var_emb->GetMutable<LoDTensor>();
    float* ptr = tensor_emb->mutable_data<float>({len, table.emb_dim()},
                                                 platform::CPUPlace());
    memset(ptr, 0, sizeof(float) * len * table.emb_dim());
    auto& tensor_lod = tensor->lod()[0];
    LoD data_lod{tensor_lod};
    tensor_emb->set_lod(data_lod);
247 248 249 250 251 252 253 254

    bool is_nid = (adjust_ins_weight_config_.need_adjust() &&
                   adjust_ins_weight_config_.nid_slot() == emb_slot_name);
    if (is_nid) {
      nid_show_.clear();
    }
    int nid_ins_index = 0;

D
dongdaxiang 已提交
255
    for (int index = 0; index < len; ++index) {
256 257 258 259
      if (use_cvm_) {
        if (ids[index] == 0u) {
          memcpy(ptr + table.emb_dim() * index, init_value.data(),
                 sizeof(float) * table.emb_dim());
260 261 262 263
          if (is_nid) {
            nid_show_.push_back(-1);
            ++nid_ins_index;
          }
264 265 266 267
          continue;
        }
        memcpy(ptr + table.emb_dim() * index, fea_value[fea_idx].data(),
               sizeof(float) * table.emb_dim());
268 269 270 271
        if (is_nid && index == tensor->lod()[0][nid_ins_index]) {
          nid_show_.push_back(fea_value[fea_idx][0]);
          ++nid_ins_index;
        }
272 273 274 275 276
        fea_idx++;
      } else {
        if (ids[index] == 0u) {
          memcpy(ptr + table.emb_dim() * index, init_value.data() + 2,
                 sizeof(float) * table.emb_dim());
277 278 279 280
          if (is_nid) {
            nid_show_.push_back(-1);
            ++nid_ins_index;
          }
281 282 283
          continue;
        }
        memcpy(ptr + table.emb_dim() * index, fea_value[fea_idx].data() + 2,
284
               sizeof(float) * table.emb_dim());
285 286 287 288
        if (is_nid && index == tensor->lod()[0][nid_ins_index]) {
          nid_show_.push_back(fea_value[fea_idx][0]);
          ++nid_ins_index;
        }
289
        fea_idx++;
290 291 292 293 294
      }
    }
  }
}

295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370
void DownpourWorker::AdjustInsWeight() {
#ifdef _LINUX
  // check var and tensor not null
  if (!adjust_ins_weight_config_.need_adjust()) {
    VLOG(0) << "need_adjust=false, skip adjust ins weight";
    return;
  }
  Variable* nid_var =
      thread_scope_->FindVar(adjust_ins_weight_config_.nid_slot());
  if (nid_var == nullptr) {
    VLOG(0) << "nid slot var " << adjust_ins_weight_config_.nid_slot()
            << " is nullptr, skip adjust ins weight";
    return;
  }
  LoDTensor* nid_tensor = nid_var->GetMutable<LoDTensor>();
  if (nid_tensor == nullptr) {
    VLOG(0) << "tensor of nid slot var " << adjust_ins_weight_config_.nid_slot()
            << " is nullptr, skip adjust ins weight";
    return;
  }
  Variable* ins_weight_var =
      thread_scope_->FindVar(adjust_ins_weight_config_.ins_weight_slot());
  if (ins_weight_var == nullptr) {
    VLOG(0) << "ins weight var " << adjust_ins_weight_config_.ins_weight_slot()
            << " is nullptr, skip adjust ins weight";
    return;
  }
  LoDTensor* ins_weight_tensor = ins_weight_var->GetMutable<LoDTensor>();
  if (ins_weight_tensor == nullptr) {
    VLOG(0) << "tensor of ins weight tensor "
            << adjust_ins_weight_config_.ins_weight_slot()
            << " is nullptr, skip adjust ins weight";
    return;
  }

  float* ins_weights = ins_weight_tensor->data<float>();
  size_t len = ins_weight_tensor->numel();  // len = batch size
  // here we assume nid_show slot only has one feasign in each instance
  CHECK(len == nid_show_.size()) << "ins_weight size should be equal to "
                                 << "nid_show size, " << len << " vs "
                                 << nid_show_.size();
  float nid_adjw_threshold = adjust_ins_weight_config_.nid_adjw_threshold();
  float nid_adjw_ratio = adjust_ins_weight_config_.nid_adjw_ratio();
  int64_t nid_adjw_num = 0;
  double nid_adjw_weight = 0.0;
  size_t ins_index = 0;
  for (int i = 0; i < len; ++i) {
    float nid_show = nid_show_[i];
    VLOG(3) << "nid_show " << nid_show;
    if (nid_show < 0) {
      VLOG(3) << "nid_show < 0, continue";
      continue;
    }
    float ins_weight = 1.0;
    if (nid_show >= 0 && nid_show < nid_adjw_threshold) {
      ins_weight = log(M_E +
                       (nid_adjw_threshold - nid_show) / nid_adjw_threshold *
                           nid_adjw_ratio);
      // count nid adjw insnum and weight
      ++nid_adjw_num;
      nid_adjw_weight += ins_weight;
      // choose large ins weight
      VLOG(3) << "ins weight new " << ins_weight << ", ins weight origin "
              << ins_weights[ins_index];
      if (ins_weight > ins_weights[ins_index]) {
        VLOG(3) << "ins " << ins_index << " weight changes to " << ins_weight;
        ins_weights[ins_index] = ins_weight;
      }
      ++ins_index;
    }
  }
  VLOG(3) << "nid adjw info: total_adjw_num: " << nid_adjw_num
          << ", avg_adjw_weight: " << nid_adjw_weight;
#endif
}

371 372 373
void DownpourWorker::TrainFilesWithProfiler() {
  VLOG(3) << "Begin to train files with profiler";
  platform::SetNumThreads(1);
374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398
  device_reader_->Start();
  std::vector<double> op_total_time;
  std::vector<std::string> op_name;
  for (auto& op : ops_) {
    bool need_skip = false;
    for (auto t = 0u; t < skip_ops_.size(); ++t) {
      if (op->Type().find(skip_ops_[t]) != std::string::npos) {
        need_skip = true;
        break;
      }
    }
    if (!need_skip) {
      op_name.push_back(op->Type());
    }
  }

  VLOG(3) << "op name size: " << op_name.size();
  op_total_time.resize(op_name.size());
  for (size_t i = 0; i < op_total_time.size(); ++i) {
    op_total_time[i] = 0.0;
  }
  platform::Timer timeline;
  double total_time = 0.0;
  double read_time = 0.0;
  double pull_sparse_time = 0.0;
399
  double adjust_ins_weight_time = 0.0;
400 401 402 403 404 405
  double collect_label_time = 0.0;
  double fill_sparse_time = 0.0;
  double push_sparse_time = 0.0;
  double push_dense_time = 0.0;
  int cur_batch;
  int batch_cnt = 0;
D
dongdaxiang 已提交
406
  uint64_t total_inst = 0;
407 408 409 410 411 412
  timeline.Start();
  while ((cur_batch = device_reader_->Next()) > 0) {
    timeline.Pause();
    read_time += timeline.ElapsedSec();
    total_time += timeline.ElapsedSec();
    VLOG(3) << "program config size: " << param_.program_config_size();
D
dongdaxiang 已提交
413
    for (int i = 0; i < param_.program_config(0).pull_sparse_table_id_size();
414 415 416 417
         ++i) {
      uint64_t tid = static_cast<uint64_t>(
          param_.program_config(0).pull_sparse_table_id(i));
      TableParameter table;
418 419 420
      for (auto j : param_.sparse_table()) {
        if (j.table_id() == tid) {
          table = j;
421 422 423 424 425 426 427 428 429
          break;
        }
      }
      timeline.Start();
      fleet_ptr_->PullSparseVarsSync(*thread_scope_, tid,
                                     sparse_key_names_[tid], &features_[tid],
                                     &feature_values_[tid], table.fea_dim());
      timeline.Pause();
      pull_sparse_time += timeline.ElapsedSec();
D
dongdaxiang 已提交
430
      total_time += timeline.ElapsedSec();
D
dongdaxiang 已提交
431
      timeline.Start();
432 433 434
      CollectLabelInfo(i);
      timeline.Pause();
      collect_label_time += timeline.ElapsedSec();
D
dongdaxiang 已提交
435
      total_time += timeline.ElapsedSec();
436 437 438 439
      timeline.Start();
      FillSparseValue(i);
      timeline.Pause();
      fill_sparse_time += timeline.ElapsedSec();
D
dongdaxiang 已提交
440
      total_time += timeline.ElapsedSec();
441 442 443 444 445 446 447 448 449 450
      timeline.Start();
      auto nid_iter = std::find(sparse_value_names_[tid].begin(),
                                sparse_value_names_[tid].end(),
                                adjust_ins_weight_config_.nid_slot());
      if (nid_iter != sparse_value_names_[tid].end()) {
        AdjustInsWeight();
      }
      timeline.Pause();
      adjust_ins_weight_time += timeline.ElapsedSec();
      total_time += timeline.ElapsedSec();
451 452 453 454 455 456 457 458 459 460 461 462 463 464
    }
    VLOG(3) << "Fill sparse value for all sparse table done.";

    int run_op_idx = 0;
    for (auto& op : ops_) {
      bool need_skip = false;
      for (auto t = 0u; t < skip_ops_.size(); ++t) {
        if (op->Type().find(skip_ops_[t]) != std::string::npos) {
          need_skip = true;
          break;
        }
      }
      if (!need_skip) {
        timeline.Start();
465
        VLOG(3) << "Going to run op " << op_name[run_op_idx];
466
        op->Run(*thread_scope_, place_);
467
        VLOG(3) << "Op " << op_name[run_op_idx] << " Finished";
468 469 470 471 472 473
        timeline.Pause();
        op_total_time[run_op_idx++] += timeline.ElapsedSec();
        total_time += timeline.ElapsedSec();
      }
    }

474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489
    // check inf and nan
    for (std::string& var_name : check_nan_var_names_) {
      Variable* var = thread_scope_->FindVar(var_name);
      if (var == nullptr) {
        continue;
      }
      LoDTensor* tensor = var->GetMutable<LoDTensor>();
      if (tensor == nullptr) {
        continue;
      }
      PADDLE_ENFORCE_EQ(framework::TensorContainsInf(*tensor), false,
                        "Tensor %s contains Inf", var_name);
      PADDLE_ENFORCE_EQ(framework::TensorContainsNAN(*tensor), false,
                        "Tensor %s contains NAN", var_name);
    }

490
    if (need_to_push_sparse_) {
D
dongdaxiang 已提交
491 492
      for (int i = 0; i < param_.program_config(0).push_sparse_table_id_size();
           ++i) {
493 494 495 496 497 498 499 500
        uint64_t tid = static_cast<uint64_t>(
            param_.program_config(0).push_sparse_table_id(i));
        TableParameter table;
        for (auto i : param_.sparse_table()) {
          if (i.table_id() == tid) {
            table = i;
            break;
          }
501
        }
502 503 504 505
        timeline.Start();
        fleet_ptr_->PushSparseVarsWithLabelAsync(
            *thread_scope_, tid, features_[tid], feature_labels_[tid],
            sparse_key_names_[tid], sparse_grad_names_[tid], table.emb_dim(),
T
Thunderbrook 已提交
506 507
            &feature_grads_[tid], &push_sparse_status_, cur_batch, use_cvm_,
            dump_slot_);
508 509 510
        timeline.Pause();
        push_sparse_time += timeline.ElapsedSec();
        total_time += timeline.ElapsedSec();
511
      }
512 513 514
    }

    if (need_to_push_dense_) {
515
      timeline.Start();
D
dongdaxiang 已提交
516 517
      for (int i = 0; i < param_.program_config(0).push_dense_table_id_size();
           ++i) {
518 519 520
        uint64_t tid = static_cast<uint64_t>(
            param_.program_config(0).push_dense_table_id(i));
        fleet_ptr_->PushDenseVarsAsync(
521 522
            *thread_scope_, tid, dense_grad_names_[tid], &push_sparse_status_,
            scale_datanorm_, cur_batch);
523
      }
524
      timeline.Pause();
525
      push_dense_time += timeline.ElapsedSec();
D
dongdaxiang 已提交
526
      total_time += timeline.ElapsedSec();
527 528 529 530 531 532 533 534 535
      VLOG(3) << "push sparse and dense gradient done.";
      int32_t tmp_push_dense_wait_times = -1;
      static uint32_t push_dense_wait_times =
          static_cast<uint32_t>(tmp_push_dense_wait_times);
      if (push_dense_status_.size() >= push_dense_wait_times) {
        for (auto& t : push_dense_status_) {
          t.wait();
        }
        push_dense_status_.resize(0);
536 537
      }

538 539
      if (tmp_push_dense_wait_times == -1) {
        push_dense_status_.resize(0);
540 541 542
      }
    }

543
    if (need_to_push_sparse_) {
544 545 546
      int32_t tmp_push_sparse_wait_times = -1;
      static uint32_t push_sparse_wait_times =
          static_cast<uint32_t>(tmp_push_sparse_wait_times);
547 548 549 550 551 552
      if (push_sparse_status_.size() >= push_sparse_wait_times) {
        for (auto& t : push_sparse_status_) {
          t.wait();
        }
        push_sparse_status_.resize(0);
      }
553

554 555 556
      if (tmp_push_sparse_wait_times == -1) {
        push_sparse_status_.resize(0);
      }
557

558 559 560
      VLOG(3) << "going to increase thread version";
      VLOG(3) << "push dense table id size: "
              << param_.program_config(0).push_dense_table_id_size();
561 562 563
    }

    if (need_to_push_dense_) {
D
dongdaxiang 已提交
564 565
      for (int i = 0; i < param_.program_config(0).push_dense_table_id_size();
           ++i) {
566 567 568 569
        uint64_t tid = static_cast<uint64_t>(
            param_.program_config(0).push_dense_table_id(i));
        pull_dense_worker_->IncreaseThreadVersion(thread_id_, tid);
      }
570 571
    }

D
dongdaxiang 已提交
572
    PrintFetchVars();
573
    thread_scope_->DropKids();
D
dongdaxiang 已提交
574
    total_inst += cur_batch;
575 576 577 578 579
    ++batch_cnt;

    if (thread_id_ == 0) {
      // should be configured here
      if (batch_cnt > 0 && batch_cnt % 100 == 0) {
580 581
        double op_sum_time = 0;
        std::unordered_map<std::string, double> op_to_time;
582 583 584
        for (size_t i = 0; i < op_total_time.size(); ++i) {
          fprintf(stderr, "op_name:[%zu][%s], op_mean_time:[%fs]\n", i,
                  op_name[i].c_str(), op_total_time[i] / batch_cnt);
585 586 587 588 589 590 591 592 593
          if (op_to_time.find(op_name[i]) == op_to_time.end()) {
            op_to_time[op_name[i]] = 0.0;
          }
          op_to_time[op_name[i]] += op_total_time[i];
          op_sum_time += op_total_time[i];
        }
        for (auto& i : op_to_time) {
          fprintf(stderr, "op [%s] run total time: [%f]ms\n", i.first.c_str(),
                  i.second / batch_cnt);
594
        }
595 596 597 598 599 600 601 602 603 604 605
        fprintf(stderr, "op run total time: %fs\n", op_sum_time / batch_cnt);
        fprintf(stderr, "train total time: %fs\n", total_time / batch_cnt);
        fprintf(stderr, "pull sparse time: %fs\n",
                pull_sparse_time / batch_cnt);
        fprintf(stderr, "fill sparse time: %fs\n",
                fill_sparse_time / batch_cnt);
        fprintf(stderr, "push sparse time: %fs\n",
                push_sparse_time / batch_cnt);
        fprintf(stderr, "push dense time: %fs\n", push_dense_time / batch_cnt);
        fprintf(stderr, "collect label time: %fs\n",
                collect_label_time / batch_cnt);
606 607
        fprintf(stderr, "adjust ins weight time: %fs\n",
                adjust_ins_weight_time / batch_cnt);
608 609
        fprintf(stderr, "mean read time: %fs\n", read_time / batch_cnt);
        fprintf(stderr, "IO percent: %f\n", read_time / total_time * 100);
610
        fprintf(stderr, "op run percent: %f\n", op_sum_time / total_time * 100);
D
dongdaxiang 已提交
611 612
        fprintf(stderr, "pull sparse time percent: %f\n",
                pull_sparse_time / total_time * 100);
613 614
        fprintf(stderr, "adjust ins weight time percent: %f\n",
                adjust_ins_weight_time / total_time * 100);
D
dongdaxiang 已提交
615 616 617 618 619 620 621 622
        fprintf(stderr, "collect label time percent: %f\n",
                collect_label_time / total_time * 100);
        fprintf(stderr, "fill sparse time percent: %f\n",
                fill_sparse_time / total_time * 100);
        fprintf(stderr, "push sparse time percent: %f\n",
                push_sparse_time / total_time * 100);
        fprintf(stderr, "push dense time percent: %f\n",
                push_dense_time / total_time * 100);
D
dongdaxiang 已提交
623
        fprintf(stderr, "%6.2f instances/s\n", total_inst / total_time);
624 625
      }
    }
D
dongdaxiang 已提交
626
    timeline.Start();
627
  }
628 629
}

630
void DownpourWorker::TrainFiles() {
D
dongdaxiang 已提交
631
  VLOG(3) << "Begin to train files";
632
  platform::SetNumThreads(1);
633
  device_reader_->Start();
634 635
  int batch_cnt = 0;
  int cur_batch;
636
  while ((cur_batch = device_reader_->Next()) > 0) {
637
    // pull sparse here
D
dongdaxiang 已提交
638
    for (int i = 0; i < param_.program_config(0).pull_sparse_table_id_size();
H
heqiaozhi 已提交
639 640 641 642
         ++i) {
      uint64_t tid = static_cast<uint64_t>(
          param_.program_config(0).pull_sparse_table_id(i));
      TableParameter table;
643 644 645
      for (auto j : param_.sparse_table()) {
        if (j.table_id() == tid) {
          table = j;
H
heqiaozhi 已提交
646 647 648 649 650 651
          break;
        }
      }
      fleet_ptr_->PullSparseVarsSync(*thread_scope_, tid,
                                     sparse_key_names_[tid], &features_[tid],
                                     &feature_values_[tid], table.fea_dim());
652 653
      CollectLabelInfo(i);
      FillSparseValue(i);
654 655 656 657 658 659
      auto nid_iter = std::find(sparse_value_names_[tid].begin(),
                                sparse_value_names_[tid].end(),
                                adjust_ins_weight_config_.nid_slot());
      if (nid_iter != sparse_value_names_[tid].end()) {
        AdjustInsWeight();
      }
660
    }
D
dongdaxiang 已提交
661
    VLOG(3) << "fill sparse value for all sparse table done.";
662 663 664

    // do computation here
    for (auto& op : ops_) {
665 666 667 668 669 670 671 672 673 674
      bool need_skip = false;
      for (auto t = 0u; t < skip_ops_.size(); ++t) {
        if (op->Type().find(skip_ops_[t]) != std::string::npos) {
          need_skip = true;
          break;
        }
      }
      if (!need_skip) {
        op->Run(*thread_scope_, place_);
      }
675 676
    }

677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692
    // check inf and nan
    for (std::string& var_name : check_nan_var_names_) {
      Variable* var = thread_scope_->FindVar(var_name);
      if (var == nullptr) {
        continue;
      }
      LoDTensor* tensor = var->GetMutable<LoDTensor>();
      if (tensor == nullptr) {
        continue;
      }
      PADDLE_ENFORCE_EQ(framework::TensorContainsInf(*tensor), false,
                        "Tensor %s contains Inf", var_name);
      PADDLE_ENFORCE_EQ(framework::TensorContainsNAN(*tensor), false,
                        "Tensor %s contains NAN", var_name);
    }

693 694
    if (need_to_push_sparse_) {
      // push gradients here
D
dongdaxiang 已提交
695 696
      for (int i = 0; i < param_.program_config(0).push_sparse_table_id_size();
           ++i) {
697 698 699 700 701 702 703 704
        uint64_t tid = static_cast<uint64_t>(
            param_.program_config(0).push_sparse_table_id(i));
        TableParameter table;
        for (auto i : param_.sparse_table()) {
          if (i.table_id() == tid) {
            table = i;
            break;
          }
H
heqiaozhi 已提交
705
        }
706 707 708
        fleet_ptr_->PushSparseVarsWithLabelAsync(
            *thread_scope_, tid, features_[tid], feature_labels_[tid],
            sparse_key_names_[tid], sparse_grad_names_[tid], table.emb_dim(),
T
Thunderbrook 已提交
709 710
            &feature_grads_[tid], &push_sparse_status_, cur_batch, use_cvm_,
            dump_slot_);
H
heqiaozhi 已提交
711
      }
712 713
    }

714
    if (need_to_push_dense_) {
D
dongdaxiang 已提交
715 716
      for (int i = 0; i < param_.program_config(0).push_dense_table_id_size();
           ++i) {
717 718 719
        uint64_t tid = static_cast<uint64_t>(
            param_.program_config(0).push_dense_table_id(i));
        fleet_ptr_->PushDenseVarsAsync(
720 721
            *thread_scope_, tid, dense_grad_names_[tid], &push_sparse_status_,
            scale_datanorm_, cur_batch);
722 723
      }
      VLOG(3) << "push dense gradient done.";
724

725 726 727 728 729
      // the following code should be more precise and clean
      // TODO(guru4elephant)
      int32_t tmp_push_dense_wait_times = -1;
      static uint32_t push_dense_wait_times =
          static_cast<uint32_t>(tmp_push_dense_wait_times);
730

731 732 733 734 735
      if (push_dense_status_.size() >= push_dense_wait_times) {
        for (auto& t : push_dense_status_) {
          t.wait();
        }
        push_dense_status_.resize(0);
736 737
      }

738 739 740
      if (tmp_push_dense_wait_times == -1) {
        push_dense_status_.resize(0);
      }
741 742
    }

743 744 745 746 747 748 749 750 751 752
    if (need_to_push_sparse_) {
      VLOG(3) << "push sparse gradient done.";
      int32_t tmp_push_sparse_wait_times = -1;
      static uint32_t push_sparse_wait_times =
          static_cast<uint32_t>(tmp_push_sparse_wait_times);
      if (push_sparse_status_.size() >= push_sparse_wait_times) {
        for (auto& t : push_sparse_status_) {
          t.wait();
        }
        push_sparse_status_.resize(0);
753 754
      }

755 756 757
      if (tmp_push_sparse_wait_times == -1) {
        push_sparse_status_.resize(0);
      }
758 759
    }

760
    if (need_to_push_dense_) {
D
dongdaxiang 已提交
761 762
      for (int i = 0; i < param_.program_config(0).push_dense_table_id_size();
           ++i) {
763 764 765 766
        uint64_t tid = static_cast<uint64_t>(
            param_.program_config(0).push_dense_table_id(i));
        pull_dense_worker_->IncreaseThreadVersion(thread_id_, tid);
      }
767
    }
768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805
    if (need_dump_field_) {
      int batch_size = device_reader_->GetCurBatchSize();
      std::vector<std::string> ars(batch_size);
      for (auto& ar : ars) {
        ar.clear();
      }
      auto& ins_id_vec = device_reader_->GetInsIdVec();
      auto& ins_content_vec = device_reader_->GetInsContentVec();
      for (size_t i = 0; i < ins_id_vec.size(); i++) {
        ars[i] += ins_id_vec[i];
        ars[i] = ars[i] + "\t" + ins_content_vec[i];
      }
      for (auto& field : dump_fields_) {
        Variable* var = thread_scope_->FindVar(field);
        if (var == nullptr) {
          continue;
        }
        LoDTensor* tensor = var->GetMutable<LoDTensor>();
        if (!CheckValidOutput(tensor, batch_size)) {
          continue;
        }
        for (int i = 0; i < batch_size; ++i) {
          auto output_dim = tensor->dims()[1];
          std::string output_dimstr =
              boost::lexical_cast<std::string>(output_dim);
          ars[i] = ars[i] + "\t" + field + ":" + output_dimstr;
          auto bound = GetTensorBound(tensor, i);
          ars[i] += PrintLodTensor(tensor, bound.first, bound.second);
        }
      }
      // #pragma omp parallel for
      for (size_t i = 0; i < ars.size(); i++) {
        if (ars[i].length() == 0) {
          continue;
        }
        writer_ << ars[i];
      }
    }
806

D
dongdaxiang 已提交
807
    PrintFetchVars();
808 809 810
    thread_scope_->DropKids();
    ++batch_cnt;
  }
811 812 813
  if (need_dump_field_) {
    writer_.Flush();
  }
814 815 816 817
}

}  // end namespace framework
}  // end namespace paddle