downpour_worker.cc 35.6 KB
Newer Older
1
/* Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
2 3 4 5 6 7 8 9 10 11 12 13 14 15

Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at

    http://www.apache.org/licenses/LICENSE-2.0

Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */

#include "paddle/fluid/framework/device_worker.h"
16
#include "paddle/fluid/framework/device_worker_factory.h"
17
#include "paddle/fluid/platform/cpu_helper.h"
18
#include "paddle/fluid/string/string_helper.h"
19

20 21 22 23 24
#if defined _WIN32 || defined __APPLE__
#else
#define _LINUX
#endif

25 26 27
namespace paddle {
namespace framework {

28
void DownpourWorker::Initialize(const TrainerDesc& desc) {
29
  param_ = desc.downpour_param();
D
dongdaxiang 已提交
30
  for (int i = 0; i < param_.sparse_table_size(); ++i) {
31 32 33 34
    uint64_t table_id =
        static_cast<uint64_t>(param_.sparse_table(i).table_id());
    TableParameter table = param_.sparse_table(i);
    sparse_key_names_[table_id].resize(table.sparse_key_name_size());
D
dongdaxiang 已提交
35
    for (int j = 0; j < table.sparse_key_name_size(); ++j) {
36 37 38
      sparse_key_names_[table_id][j] = table.sparse_key_name(j);
    }
    sparse_value_names_[table_id].resize(table.sparse_value_name_size());
D
dongdaxiang 已提交
39
    for (int j = 0; j < table.sparse_value_name_size(); ++j) {
40 41 42
      sparse_value_names_[table_id][j] = table.sparse_value_name(j);
    }
    sparse_grad_names_[table_id].resize(table.sparse_grad_name_size());
D
dongdaxiang 已提交
43
    for (int j = 0; j < table.sparse_grad_name_size(); ++j) {
44 45
      sparse_grad_names_[table_id][j] = table.sparse_grad_name(j);
    }
46
    label_var_name_[table_id] = table.label_var_name();
47
    sparse_push_keys_[table_id] = std::vector<uint64_t>();
48 49
  }

D
dongdaxiang 已提交
50
  for (int i = 0; i < param_.dense_table_size(); ++i) {
51 52 53
    uint64_t table_id = static_cast<uint64_t>(param_.dense_table(i).table_id());
    auto table = param_.dense_table(i);
    dense_value_names_[table_id].resize(table.dense_value_name_size());
D
dongdaxiang 已提交
54
    for (int j = 0; j < table.dense_value_name_size(); ++j) {
55 56 57
      dense_value_names_[table_id][j] = table.dense_value_name(j);
    }
    dense_grad_names_[table_id].resize(table.dense_grad_name_size());
D
dongdaxiang 已提交
58
    for (int j = 0; j < table.dense_grad_name_size(); ++j) {
59 60 61 62 63
      dense_grad_names_[table_id][j] = table.dense_grad_name(j);
    }
  }

  skip_ops_.resize(param_.skip_ops_size());
D
dongdaxiang 已提交
64
  for (int i = 0; i < param_.skip_ops_size(); ++i) {
65 66
    skip_ops_[i] = param_.skip_ops(i);
  }
67

68 69 70 71
  for (int i = 0; i < param_.stat_var_names_size(); ++i) {
    stat_var_name_map_[param_.stat_var_names(i)] = 1;
  }

72 73 74
  need_to_push_sparse_ = param_.push_sparse();
  need_to_push_dense_ = param_.push_dense();

75
  fleet_ptr_ = FleetWrapper::GetInstance();
D
dongdaxiang 已提交
76
  fetch_config_ = desc.fetch_config();
77
  use_cvm_ = desc.use_cvm();
78
  scale_datanorm_ = desc.scale_datanorm();
T
Thunderbrook 已提交
79
  dump_slot_ = desc.dump_slot();
80 81 82 83
  dump_fields_.resize(desc.dump_fields_size());
  for (int i = 0; i < desc.dump_fields_size(); ++i) {
    dump_fields_[i] = desc.dump_fields(i);
  }
84
  adjust_ins_weight_config_ = desc.adjust_ins_weight_config();
85 86 87 88 89 90 91 92
  need_dump_param_ = false;
  dump_param_.resize(desc.dump_param_size());
  for (int i = 0; i < desc.dump_param_size(); ++i) {
    dump_param_[i] = desc.dump_param(i);
  }
  if (desc.dump_param_size() != 0) {
    need_dump_param_ = true;
  }
93 94 95
  for (int i = 0; i < desc.check_nan_var_names_size(); ++i) {
    check_nan_var_names_.push_back(desc.check_nan_var_names(i));
  }
X
xujiaqi01 已提交
96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118
  copy_table_config_ = desc.copy_table_config();
  for (int i = 0; i < copy_table_config_.src_sparse_tables_size(); ++i) {
    uint64_t src_table = copy_table_config_.src_sparse_tables(i);
    uint64_t dest_table = copy_table_config_.dest_sparse_tables(i);
    VLOG(3) << "copy_sparse_tables_ push back " << src_table << "->"
            << dest_table;
    copy_sparse_tables_.push_back(std::make_pair(src_table, dest_table));
  }
  for (int i = 0; i < copy_table_config_.src_dense_tables_size(); ++i) {
    uint64_t src_table = copy_table_config_.src_dense_tables(i);
    uint64_t dest_table = copy_table_config_.dest_dense_tables(i);
    VLOG(3) << "copy_dense_tables_ push back " << src_table << "->"
            << dest_table;
    copy_dense_tables_.push_back(std::make_pair(src_table, dest_table));
  }
  for (auto& m : copy_table_config_.table_denpendency_map()) {
    if (sparse_key_names_.find(m.key()) != sparse_key_names_.end()) {
      // currently only support one dependency
      for (auto& value : m.values()) {
        table_dependency_[m.key()] = value;
      }
    }
  }
119 120
}

121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196
void DownpourWorker::SetChannelWriter(ChannelObject<std::string>* queue) {
  writer_.Reset(queue);
}

void DownpourWorker::SetNeedDump(bool need_dump_field) {
  need_dump_field_ = need_dump_field;
}

template <typename T>
std::string PrintLodTensorType(LoDTensor* tensor, int64_t start, int64_t end) {
  auto count = tensor->numel();
  if (start < 0 || end > count) {
    VLOG(3) << "access violation";
    return "access violation";
  }
  std::ostringstream os;
  for (int64_t i = start; i < end; i++) {
    os << ":" << tensor->data<T>()[i];
  }
  return os.str();
}

std::string PrintLodTensorIntType(LoDTensor* tensor, int64_t start,
                                  int64_t end) {
  auto count = tensor->numel();
  if (start < 0 || end > count) {
    VLOG(3) << "access violation";
    return "access violation";
  }
  std::ostringstream os;
  for (int64_t i = start; i < end; i++) {
    os << ":" << static_cast<uint64_t>(tensor->data<int64_t>()[i]);
  }
  return os.str();
}

std::string PrintLodTensor(LoDTensor* tensor, int64_t start, int64_t end) {
  std::string out_val;
  if (tensor->type() == proto::VarType::FP32) {
    out_val = PrintLodTensorType<float>(tensor, start, end);
  } else if (tensor->type() == proto::VarType::INT64) {
    out_val = PrintLodTensorIntType(tensor, start, end);
  } else if (tensor->type() == proto::VarType::FP64) {
    out_val = PrintLodTensorType<double>(tensor, start, end);
  } else {
    out_val = "unsupported type";
  }
  return out_val;
}

std::pair<int64_t, int64_t> GetTensorBound(LoDTensor* tensor, int index) {
  auto& dims = tensor->dims();
  if (tensor->lod().size() != 0) {
    auto& lod = tensor->lod()[0];
    return {lod[index] * dims[1], lod[index + 1] * dims[1]};
  } else {
    return {index * dims[1], (index + 1) * dims[1]};
  }
}

bool CheckValidOutput(LoDTensor* tensor, int batch_size) {
  auto& dims = tensor->dims();
  if (dims.size() != 2) return false;
  if (tensor->lod().size() != 0) {
    auto& lod = tensor->lod()[0];
    if (lod.size() != batch_size + 1) {
      return false;
    }
  } else {
    if (dims[0] != batch_size) {
      return false;
    }
  }
  return true;
}

197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212
void DownpourWorker::DumpParam() {
  std::string os;
  for (auto& param : dump_param_) {
    os.clear();
    os = param;
    Variable* var = thread_scope_->FindVar(param);
    if (var == nullptr) {
      continue;
    }
    LoDTensor* tensor = var->GetMutable<LoDTensor>();
    int64_t len = tensor->numel();
    os += PrintLodTensor(tensor, 0, len);
    writer_ << os;
  }
}

213
void DownpourWorker::CollectLabelInfo(size_t table_idx) {
H
heqiaozhi 已提交
214
  uint64_t table_id = static_cast<uint64_t>(
215
      param_.program_config(0).pull_sparse_table_id(table_idx));
216

H
heqiaozhi 已提交
217 218 219 220 221 222 223
  TableParameter table;
  for (auto i : param_.sparse_table()) {
    if (i.table_id() == table_id) {
      table = i;
      break;
    }
  }
224 225 226
  auto& feature = features_[table_id];
  auto& feature_label = feature_labels_[table_id];
  feature_label.resize(feature.size());
227
  Variable* var = thread_scope_->FindVar(label_var_name_[table_id]);
228 229 230
  LoDTensor* tensor = var->GetMutable<LoDTensor>();
  int64_t* label_ptr = tensor->data<int64_t>();

D
dongdaxiang 已提交
231
  size_t global_index = 0;
232
  for (size_t i = 0; i < sparse_key_names_[table_id].size(); ++i) {
233 234
    VLOG(3) << "sparse_key_names_[" << i
            << "]: " << sparse_key_names_[table_id][i];
235
    Variable* fea_var = thread_scope_->FindVar(sparse_key_names_[table_id][i]);
236 237 238
    if (fea_var == nullptr) {
      continue;
    }
239
    LoDTensor* tensor = fea_var->GetMutable<LoDTensor>();
240 241
    CHECK(tensor != nullptr) << "tensor of var "
                             << sparse_key_names_[table_id][i] << " is null";
242 243 244 245 246 247 248 249

    // skip slots which do not have embedding
    Variable* emb_var =
        thread_scope_->FindVar(sparse_value_names_[table_id][i]);
    if (emb_var == nullptr) {
      continue;
    }

250
    int64_t* ids = tensor->data<int64_t>();
D
dongdaxiang 已提交
251
    size_t fea_idx = 0;
252
    // tensor->lod()[0].size() == batch_size + 1
253 254
    for (auto lod_idx = 1u; lod_idx < tensor->lod()[0].size(); ++lod_idx) {
      for (; fea_idx < tensor->lod()[0][lod_idx]; ++fea_idx) {
255 256 257 258
        // should be skipped feasign defined in protobuf
        if (ids[fea_idx] == 0u) {
          continue;
        }
259 260
        feature_label[global_index++] =
            static_cast<float>(label_ptr[lod_idx - 1]);
261 262 263 264 265 266 267 268
      }
    }
  }
  CHECK(global_index == feature.size())
      << "expect fea info size:" << feature.size() << " real:" << global_index;
}

void DownpourWorker::FillSparseValue(size_t table_idx) {
H
heqiaozhi 已提交
269
  uint64_t table_id = static_cast<uint64_t>(
270
      param_.program_config(0).pull_sparse_table_id(table_idx));
H
heqiaozhi 已提交
271 272 273 274 275 276 277 278

  TableParameter table;
  for (auto i : param_.sparse_table()) {
    if (i.table_id() == table_id) {
      table = i;
      break;
    }
  }
279 280 281 282

  auto& fea_value = feature_values_[table_id];
  auto fea_idx = 0u;

X
xjqbest 已提交
283
  std::vector<float> init_value(table.fea_dim());
284 285 286 287
  for (size_t i = 0; i < sparse_key_names_[table_id].size(); ++i) {
    std::string slot_name = sparse_key_names_[table_id][i];
    std::string emb_slot_name = sparse_value_names_[table_id][i];
    Variable* var = thread_scope_->FindVar(slot_name);
288 289 290
    if (var == nullptr) {
      continue;
    }
291
    LoDTensor* tensor = var->GetMutable<LoDTensor>();
292
    CHECK(tensor != nullptr) << "tensor of var " << slot_name << " is null";
293 294 295
    int64_t* ids = tensor->data<int64_t>();
    int len = tensor->numel();
    Variable* var_emb = thread_scope_->FindVar(emb_slot_name);
296 297 298
    if (var_emb == nullptr) {
      continue;
    }
299 300 301 302 303 304 305
    LoDTensor* tensor_emb = var_emb->GetMutable<LoDTensor>();
    float* ptr = tensor_emb->mutable_data<float>({len, table.emb_dim()},
                                                 platform::CPUPlace());
    memset(ptr, 0, sizeof(float) * len * table.emb_dim());
    auto& tensor_lod = tensor->lod()[0];
    LoD data_lod{tensor_lod};
    tensor_emb->set_lod(data_lod);
306 307 308 309 310 311 312 313

    bool is_nid = (adjust_ins_weight_config_.need_adjust() &&
                   adjust_ins_weight_config_.nid_slot() == emb_slot_name);
    if (is_nid) {
      nid_show_.clear();
    }
    int nid_ins_index = 0;

D
dongdaxiang 已提交
314
    for (int index = 0; index < len; ++index) {
315 316 317 318
      if (use_cvm_) {
        if (ids[index] == 0u) {
          memcpy(ptr + table.emb_dim() * index, init_value.data(),
                 sizeof(float) * table.emb_dim());
319 320 321 322
          if (is_nid) {
            nid_show_.push_back(-1);
            ++nid_ins_index;
          }
323 324 325 326
          continue;
        }
        memcpy(ptr + table.emb_dim() * index, fea_value[fea_idx].data(),
               sizeof(float) * table.emb_dim());
327 328 329 330
        if (is_nid && index == tensor->lod()[0][nid_ins_index]) {
          nid_show_.push_back(fea_value[fea_idx][0]);
          ++nid_ins_index;
        }
331 332 333 334 335
        fea_idx++;
      } else {
        if (ids[index] == 0u) {
          memcpy(ptr + table.emb_dim() * index, init_value.data() + 2,
                 sizeof(float) * table.emb_dim());
336 337 338 339
          if (is_nid) {
            nid_show_.push_back(-1);
            ++nid_ins_index;
          }
340 341 342
          continue;
        }
        memcpy(ptr + table.emb_dim() * index, fea_value[fea_idx].data() + 2,
343
               sizeof(float) * table.emb_dim());
344 345 346 347
        if (is_nid && index == tensor->lod()[0][nid_ins_index]) {
          nid_show_.push_back(fea_value[fea_idx][0]);
          ++nid_ins_index;
        }
348
        fea_idx++;
349 350 351 352 353
      }
    }
  }
}

354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429
void DownpourWorker::AdjustInsWeight() {
#ifdef _LINUX
  // check var and tensor not null
  if (!adjust_ins_weight_config_.need_adjust()) {
    VLOG(0) << "need_adjust=false, skip adjust ins weight";
    return;
  }
  Variable* nid_var =
      thread_scope_->FindVar(adjust_ins_weight_config_.nid_slot());
  if (nid_var == nullptr) {
    VLOG(0) << "nid slot var " << adjust_ins_weight_config_.nid_slot()
            << " is nullptr, skip adjust ins weight";
    return;
  }
  LoDTensor* nid_tensor = nid_var->GetMutable<LoDTensor>();
  if (nid_tensor == nullptr) {
    VLOG(0) << "tensor of nid slot var " << adjust_ins_weight_config_.nid_slot()
            << " is nullptr, skip adjust ins weight";
    return;
  }
  Variable* ins_weight_var =
      thread_scope_->FindVar(adjust_ins_weight_config_.ins_weight_slot());
  if (ins_weight_var == nullptr) {
    VLOG(0) << "ins weight var " << adjust_ins_weight_config_.ins_weight_slot()
            << " is nullptr, skip adjust ins weight";
    return;
  }
  LoDTensor* ins_weight_tensor = ins_weight_var->GetMutable<LoDTensor>();
  if (ins_weight_tensor == nullptr) {
    VLOG(0) << "tensor of ins weight tensor "
            << adjust_ins_weight_config_.ins_weight_slot()
            << " is nullptr, skip adjust ins weight";
    return;
  }

  float* ins_weights = ins_weight_tensor->data<float>();
  size_t len = ins_weight_tensor->numel();  // len = batch size
  // here we assume nid_show slot only has one feasign in each instance
  CHECK(len == nid_show_.size()) << "ins_weight size should be equal to "
                                 << "nid_show size, " << len << " vs "
                                 << nid_show_.size();
  float nid_adjw_threshold = adjust_ins_weight_config_.nid_adjw_threshold();
  float nid_adjw_ratio = adjust_ins_weight_config_.nid_adjw_ratio();
  int64_t nid_adjw_num = 0;
  double nid_adjw_weight = 0.0;
  size_t ins_index = 0;
  for (int i = 0; i < len; ++i) {
    float nid_show = nid_show_[i];
    VLOG(3) << "nid_show " << nid_show;
    if (nid_show < 0) {
      VLOG(3) << "nid_show < 0, continue";
      continue;
    }
    float ins_weight = 1.0;
    if (nid_show >= 0 && nid_show < nid_adjw_threshold) {
      ins_weight = log(M_E +
                       (nid_adjw_threshold - nid_show) / nid_adjw_threshold *
                           nid_adjw_ratio);
      // count nid adjw insnum and weight
      ++nid_adjw_num;
      nid_adjw_weight += ins_weight;
      // choose large ins weight
      VLOG(3) << "ins weight new " << ins_weight << ", ins weight origin "
              << ins_weights[ins_index];
      if (ins_weight > ins_weights[ins_index]) {
        VLOG(3) << "ins " << ins_index << " weight changes to " << ins_weight;
        ins_weights[ins_index] = ins_weight;
      }
      ++ins_index;
    }
  }
  VLOG(3) << "nid adjw info: total_adjw_num: " << nid_adjw_num
          << ", avg_adjw_weight: " << nid_adjw_weight;
#endif
}

X
xujiaqi01 已提交
430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525
void DownpourWorker::CopySparseTable() {
  for (size_t i = 0; i < copy_sparse_tables_.size(); ++i) {
    int64_t src_table = copy_sparse_tables_[i].first;
    int64_t dest_table = copy_sparse_tables_[i].second;
    int32_t feanum = 0;
    if (src_table == dest_table) {
      continue;
    } else if (!copy_table_config_.sparse_copy_by_feasign()) {
      if (feasign_set_.find(src_table) == feasign_set_.end()) {
        continue;
      } else if (feasign_set_[src_table].size() == 0) {
        continue;
      }
      feanum = fleet_ptr_->CopyTable(src_table, dest_table);
    } else {
      std::vector<uint64_t> fea_vec(feasign_set_[src_table].begin(),
                                    feasign_set_[src_table].end());
      feanum = fleet_ptr_->CopyTableByFeasign(src_table, dest_table, fea_vec);
      fea_vec.clear();
      std::vector<uint64_t>().swap(fea_vec);
    }
    VLOG(3) << "copy feasign from table " << src_table << " to table "
            << dest_table << ", feasign num=" << feanum;
    feasign_set_[src_table].clear();
    std::unordered_set<uint64_t>().swap(feasign_set_[src_table]);
  }
  feasign_set_.clear();
}

void DownpourWorker::CopyDenseTable() {
  if (thread_id_ != 0) {
    return;
  }
  thread_local std::vector<std::future<int32_t>> pull_dense_status;
  for (size_t i = 0; i < copy_dense_tables_.size(); ++i) {
    uint64_t src_table = copy_dense_tables_[i].first;
    uint64_t dest_table = copy_dense_tables_[i].second;
    if (src_table == dest_table) {
      continue;
    }
    int32_t dim = fleet_ptr_->CopyTable(src_table, dest_table);
    VLOG(3) << "copy param from table " << src_table << " to table "
            << dest_table << ", dim=" << dim;
    if (copy_table_config_.dense_pull_after_copy()) {
      VLOG(3) << "dense pull after copy, table=" << dest_table;
      pull_dense_status.resize(0);
      fleet_ptr_->PullDenseVarsAsync(*root_scope_, dest_table,
                                     dense_value_names_[dest_table],
                                     &pull_dense_status);
      for (auto& t : pull_dense_status) {
        t.wait();
        auto status = t.get();
        if (status != 0) {
          LOG(WARNING) << "pull dense after copy table failed,"
                       << " table=" << dest_table;
        }
      }
    }
  }
}

void DownpourWorker::CopyDenseVars() {
  if (thread_id_ != 0) {
    return;
  }
  for (int i = 0; i < copy_table_config_.src_var_list_size(); ++i) {
    auto& src_var_name = copy_table_config_.src_var_list(i);
    auto& dest_var_name = copy_table_config_.dest_var_list(i);
    if (src_var_name == dest_var_name) {
      continue;
    }
    VLOG(3) << "copy dense var from " << src_var_name << " to "
            << dest_var_name;
    Variable* src_var = thread_scope_->FindVar(src_var_name);
    CHECK(src_var != nullptr) << src_var_name << " not found";  // NOLINT
    LoDTensor* src_tensor = src_var->GetMutable<LoDTensor>();
    CHECK(src_tensor != nullptr) << src_var_name
                                 << " tensor is null";  // NOLINT
    float* src_data = src_tensor->data<float>();

    Variable* dest_var = thread_scope_->FindVar(dest_var_name);
    CHECK(dest_var != nullptr) << dest_var_name << " not found";  // NOLINT
    LoDTensor* dest_tensor = dest_var->GetMutable<LoDTensor>();
    CHECK(dest_tensor != nullptr) << dest_var_name
                                  << " tensor is null";  // NOLINT
    float* dest_data = dest_tensor->data<float>();

    CHECK(src_tensor->numel() == dest_tensor->numel())
        << "tensor numel not equal," << src_tensor->numel() << " vs "
        << dest_tensor->numel();
    for (int i = 0; i < src_tensor->numel(); i++) {
      dest_data[i] = src_data[i];
    }
  }
}

526 527 528
void DownpourWorker::TrainFilesWithProfiler() {
  VLOG(3) << "Begin to train files with profiler";
  platform::SetNumThreads(1);
529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553
  device_reader_->Start();
  std::vector<double> op_total_time;
  std::vector<std::string> op_name;
  for (auto& op : ops_) {
    bool need_skip = false;
    for (auto t = 0u; t < skip_ops_.size(); ++t) {
      if (op->Type().find(skip_ops_[t]) != std::string::npos) {
        need_skip = true;
        break;
      }
    }
    if (!need_skip) {
      op_name.push_back(op->Type());
    }
  }

  VLOG(3) << "op name size: " << op_name.size();
  op_total_time.resize(op_name.size());
  for (size_t i = 0; i < op_total_time.size(); ++i) {
    op_total_time[i] = 0.0;
  }
  platform::Timer timeline;
  double total_time = 0.0;
  double read_time = 0.0;
  double pull_sparse_time = 0.0;
554
  double adjust_ins_weight_time = 0.0;
555 556 557 558
  double collect_label_time = 0.0;
  double fill_sparse_time = 0.0;
  double push_sparse_time = 0.0;
  double push_dense_time = 0.0;
X
xujiaqi01 已提交
559
  double copy_table_time = 0.0;
560 561
  int cur_batch;
  int batch_cnt = 0;
D
dongdaxiang 已提交
562
  uint64_t total_inst = 0;
563 564 565 566 567
  timeline.Start();
  while ((cur_batch = device_reader_->Next()) > 0) {
    timeline.Pause();
    read_time += timeline.ElapsedSec();
    total_time += timeline.ElapsedSec();
X
xujiaqi01 已提交
568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588

    timeline.Start();
    if (copy_table_config_.need_copy()) {
      VLOG(3) << "copy_sparse_tables_.size " << copy_sparse_tables_.size();
      if (copy_table_config_.sparse_copy_by_feasign()) {
        for (size_t i = 0; i < copy_sparse_tables_.size(); ++i) {
          uint64_t tid = copy_sparse_tables_[i].first;
          feasign_set_[tid].insert(sparse_push_keys_[tid].begin(),
                                   sparse_push_keys_[tid].end());
        }
      }
      if (batch_cnt % copy_table_config_.batch_num() == 0) {
        CopySparseTable();
        CopyDenseTable();
        CopyDenseVars();
      }
    }
    timeline.Pause();
    copy_table_time += timeline.ElapsedSec();
    total_time += timeline.ElapsedSec();

589
    VLOG(3) << "program config size: " << param_.program_config_size();
D
dongdaxiang 已提交
590
    for (int i = 0; i < param_.program_config(0).pull_sparse_table_id_size();
591 592 593 594
         ++i) {
      uint64_t tid = static_cast<uint64_t>(
          param_.program_config(0).pull_sparse_table_id(i));
      TableParameter table;
595 596 597
      for (auto j : param_.sparse_table()) {
        if (j.table_id() == tid) {
          table = j;
598 599 600 601
          break;
        }
      }
      timeline.Start();
602 603 604
      fleet_ptr_->PullSparseVarsSync(
          *thread_scope_, tid, sparse_key_names_[tid], &features_[tid],
          &feature_values_[tid], table.fea_dim(), sparse_value_names_[tid]);
605 606
      timeline.Pause();
      pull_sparse_time += timeline.ElapsedSec();
D
dongdaxiang 已提交
607
      total_time += timeline.ElapsedSec();
D
dongdaxiang 已提交
608
      timeline.Start();
609 610 611
      CollectLabelInfo(i);
      timeline.Pause();
      collect_label_time += timeline.ElapsedSec();
D
dongdaxiang 已提交
612
      total_time += timeline.ElapsedSec();
613 614 615 616
      timeline.Start();
      FillSparseValue(i);
      timeline.Pause();
      fill_sparse_time += timeline.ElapsedSec();
D
dongdaxiang 已提交
617
      total_time += timeline.ElapsedSec();
618 619 620 621 622 623 624 625 626 627
      timeline.Start();
      auto nid_iter = std::find(sparse_value_names_[tid].begin(),
                                sparse_value_names_[tid].end(),
                                adjust_ins_weight_config_.nid_slot());
      if (nid_iter != sparse_value_names_[tid].end()) {
        AdjustInsWeight();
      }
      timeline.Pause();
      adjust_ins_weight_time += timeline.ElapsedSec();
      total_time += timeline.ElapsedSec();
628 629 630 631 632 633 634 635 636 637 638 639 640 641
    }
    VLOG(3) << "Fill sparse value for all sparse table done.";

    int run_op_idx = 0;
    for (auto& op : ops_) {
      bool need_skip = false;
      for (auto t = 0u; t < skip_ops_.size(); ++t) {
        if (op->Type().find(skip_ops_[t]) != std::string::npos) {
          need_skip = true;
          break;
        }
      }
      if (!need_skip) {
        timeline.Start();
642
        VLOG(3) << "Going to run op " << op_name[run_op_idx];
643
        op->Run(*thread_scope_, place_);
644
        VLOG(3) << "Op " << op_name[run_op_idx] << " Finished";
645 646 647 648 649 650
        timeline.Pause();
        op_total_time[run_op_idx++] += timeline.ElapsedSec();
        total_time += timeline.ElapsedSec();
      }
    }

651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666
    // check inf and nan
    for (std::string& var_name : check_nan_var_names_) {
      Variable* var = thread_scope_->FindVar(var_name);
      if (var == nullptr) {
        continue;
      }
      LoDTensor* tensor = var->GetMutable<LoDTensor>();
      if (tensor == nullptr) {
        continue;
      }
      PADDLE_ENFORCE_EQ(framework::TensorContainsInf(*tensor), false,
                        "Tensor %s contains Inf", var_name);
      PADDLE_ENFORCE_EQ(framework::TensorContainsNAN(*tensor), false,
                        "Tensor %s contains NAN", var_name);
    }

667
    if (need_to_push_sparse_) {
D
dongdaxiang 已提交
668 669
      for (int i = 0; i < param_.program_config(0).push_sparse_table_id_size();
           ++i) {
670 671 672 673 674 675 676 677
        uint64_t tid = static_cast<uint64_t>(
            param_.program_config(0).push_sparse_table_id(i));
        TableParameter table;
        for (auto i : param_.sparse_table()) {
          if (i.table_id() == tid) {
            table = i;
            break;
          }
678
        }
679 680 681 682
        timeline.Start();
        fleet_ptr_->PushSparseVarsWithLabelAsync(
            *thread_scope_, tid, features_[tid], feature_labels_[tid],
            sparse_key_names_[tid], sparse_grad_names_[tid], table.emb_dim(),
T
Thunderbrook 已提交
683
            &feature_grads_[tid], &push_sparse_status_, cur_batch, use_cvm_,
684
            dump_slot_, &sparse_push_keys_[tid]);
685 686 687
        timeline.Pause();
        push_sparse_time += timeline.ElapsedSec();
        total_time += timeline.ElapsedSec();
688
      }
689 690 691
    }

    if (need_to_push_dense_) {
692
      timeline.Start();
D
dongdaxiang 已提交
693 694
      for (int i = 0; i < param_.program_config(0).push_dense_table_id_size();
           ++i) {
695 696 697
        uint64_t tid = static_cast<uint64_t>(
            param_.program_config(0).push_dense_table_id(i));
        fleet_ptr_->PushDenseVarsAsync(
698 699
            *thread_scope_, tid, dense_grad_names_[tid], &push_sparse_status_,
            scale_datanorm_, cur_batch);
700
      }
701
      timeline.Pause();
702
      push_dense_time += timeline.ElapsedSec();
D
dongdaxiang 已提交
703
      total_time += timeline.ElapsedSec();
704 705 706 707 708 709 710 711 712
      VLOG(3) << "push sparse and dense gradient done.";
      int32_t tmp_push_dense_wait_times = -1;
      static uint32_t push_dense_wait_times =
          static_cast<uint32_t>(tmp_push_dense_wait_times);
      if (push_dense_status_.size() >= push_dense_wait_times) {
        for (auto& t : push_dense_status_) {
          t.wait();
        }
        push_dense_status_.resize(0);
713 714
      }

715 716
      if (tmp_push_dense_wait_times == -1) {
        push_dense_status_.resize(0);
717 718 719
      }
    }

720
    if (need_to_push_sparse_) {
721 722 723
      int32_t tmp_push_sparse_wait_times = -1;
      static uint32_t push_sparse_wait_times =
          static_cast<uint32_t>(tmp_push_sparse_wait_times);
724 725 726 727 728 729
      if (push_sparse_status_.size() >= push_sparse_wait_times) {
        for (auto& t : push_sparse_status_) {
          t.wait();
        }
        push_sparse_status_.resize(0);
      }
730

731 732 733
      if (tmp_push_sparse_wait_times == -1) {
        push_sparse_status_.resize(0);
      }
734

735 736 737
      VLOG(3) << "going to increase thread version";
      VLOG(3) << "push dense table id size: "
              << param_.program_config(0).push_dense_table_id_size();
738 739 740
    }

    if (need_to_push_dense_) {
D
dongdaxiang 已提交
741 742
      for (int i = 0; i < param_.program_config(0).push_dense_table_id_size();
           ++i) {
743 744 745 746
        uint64_t tid = static_cast<uint64_t>(
            param_.program_config(0).push_dense_table_id(i));
        pull_dense_worker_->IncreaseThreadVersion(thread_id_, tid);
      }
747 748
    }

D
dongdaxiang 已提交
749
    PrintFetchVars();
750
    thread_scope_->DropKids();
D
dongdaxiang 已提交
751
    total_inst += cur_batch;
752 753 754 755 756
    ++batch_cnt;

    if (thread_id_ == 0) {
      // should be configured here
      if (batch_cnt > 0 && batch_cnt % 100 == 0) {
757 758
        double op_sum_time = 0;
        std::unordered_map<std::string, double> op_to_time;
759 760 761
        for (size_t i = 0; i < op_total_time.size(); ++i) {
          fprintf(stderr, "op_name:[%zu][%s], op_mean_time:[%fs]\n", i,
                  op_name[i].c_str(), op_total_time[i] / batch_cnt);
762 763 764 765 766 767 768 769 770
          if (op_to_time.find(op_name[i]) == op_to_time.end()) {
            op_to_time[op_name[i]] = 0.0;
          }
          op_to_time[op_name[i]] += op_total_time[i];
          op_sum_time += op_total_time[i];
        }
        for (auto& i : op_to_time) {
          fprintf(stderr, "op [%s] run total time: [%f]ms\n", i.first.c_str(),
                  i.second / batch_cnt);
771
        }
772 773 774 775 776 777 778 779 780 781 782
        fprintf(stderr, "op run total time: %fs\n", op_sum_time / batch_cnt);
        fprintf(stderr, "train total time: %fs\n", total_time / batch_cnt);
        fprintf(stderr, "pull sparse time: %fs\n",
                pull_sparse_time / batch_cnt);
        fprintf(stderr, "fill sparse time: %fs\n",
                fill_sparse_time / batch_cnt);
        fprintf(stderr, "push sparse time: %fs\n",
                push_sparse_time / batch_cnt);
        fprintf(stderr, "push dense time: %fs\n", push_dense_time / batch_cnt);
        fprintf(stderr, "collect label time: %fs\n",
                collect_label_time / batch_cnt);
783 784
        fprintf(stderr, "adjust ins weight time: %fs\n",
                adjust_ins_weight_time / batch_cnt);
X
xujiaqi01 已提交
785
        fprintf(stderr, "copy table time: %fs\n", copy_table_time / batch_cnt);
786 787
        fprintf(stderr, "mean read time: %fs\n", read_time / batch_cnt);
        fprintf(stderr, "IO percent: %f\n", read_time / total_time * 100);
788
        fprintf(stderr, "op run percent: %f\n", op_sum_time / total_time * 100);
D
dongdaxiang 已提交
789 790
        fprintf(stderr, "pull sparse time percent: %f\n",
                pull_sparse_time / total_time * 100);
791 792
        fprintf(stderr, "adjust ins weight time percent: %f\n",
                adjust_ins_weight_time / total_time * 100);
X
xujiaqi01 已提交
793 794
        fprintf(stderr, "copy table time percent: %f\n",
                copy_table_time / total_time * 100);
D
dongdaxiang 已提交
795 796 797 798 799 800 801 802
        fprintf(stderr, "collect label time percent: %f\n",
                collect_label_time / total_time * 100);
        fprintf(stderr, "fill sparse time percent: %f\n",
                fill_sparse_time / total_time * 100);
        fprintf(stderr, "push sparse time percent: %f\n",
                push_sparse_time / total_time * 100);
        fprintf(stderr, "push dense time percent: %f\n",
                push_dense_time / total_time * 100);
D
dongdaxiang 已提交
803
        fprintf(stderr, "%6.2f instances/s\n", total_inst / total_time);
804 805
      }
    }
D
dongdaxiang 已提交
806
    timeline.Start();
807
  }
X
xujiaqi01 已提交
808 809 810 811 812
  if (copy_table_config_.need_copy()) {
    CopySparseTable();
    CopyDenseTable();
    CopyDenseVars();
  }
813 814
}

815
void DownpourWorker::TrainFiles() {
D
dongdaxiang 已提交
816
  VLOG(3) << "Begin to train files";
817
  platform::SetNumThreads(1);
818
  device_reader_->Start();
819 820
  int batch_cnt = 0;
  int cur_batch;
821
  while ((cur_batch = device_reader_->Next()) > 0) {
X
xujiaqi01 已提交
822 823 824 825 826 827 828 829 830 831 832 833 834 835
    if (copy_table_config_.need_copy()) {
      if (copy_table_config_.sparse_copy_by_feasign()) {
        for (size_t i = 0; i < copy_sparse_tables_.size(); ++i) {
          uint64_t tid = copy_sparse_tables_[i].first;
          feasign_set_[tid].insert(sparse_push_keys_[tid].begin(),
                                   sparse_push_keys_[tid].end());
        }
      }
      if (batch_cnt % copy_table_config_.batch_num() == 0) {
        CopySparseTable();
        CopyDenseTable();
        CopyDenseVars();
      }
    }
836
    // pull sparse here
D
dongdaxiang 已提交
837
    for (int i = 0; i < param_.program_config(0).pull_sparse_table_id_size();
H
heqiaozhi 已提交
838 839 840 841
         ++i) {
      uint64_t tid = static_cast<uint64_t>(
          param_.program_config(0).pull_sparse_table_id(i));
      TableParameter table;
842 843 844
      for (auto j : param_.sparse_table()) {
        if (j.table_id() == tid) {
          table = j;
H
heqiaozhi 已提交
845 846 847
          break;
        }
      }
848 849 850
      fleet_ptr_->PullSparseVarsSync(
          *thread_scope_, tid, sparse_key_names_[tid], &features_[tid],
          &feature_values_[tid], table.fea_dim(), sparse_value_names_[tid]);
851 852
      CollectLabelInfo(i);
      FillSparseValue(i);
853 854 855 856 857 858
      auto nid_iter = std::find(sparse_value_names_[tid].begin(),
                                sparse_value_names_[tid].end(),
                                adjust_ins_weight_config_.nid_slot());
      if (nid_iter != sparse_value_names_[tid].end()) {
        AdjustInsWeight();
      }
859
    }
D
dongdaxiang 已提交
860
    VLOG(3) << "fill sparse value for all sparse table done.";
861 862 863

    // do computation here
    for (auto& op : ops_) {
864 865 866 867 868 869 870 871 872 873
      bool need_skip = false;
      for (auto t = 0u; t < skip_ops_.size(); ++t) {
        if (op->Type().find(skip_ops_[t]) != std::string::npos) {
          need_skip = true;
          break;
        }
      }
      if (!need_skip) {
        op->Run(*thread_scope_, place_);
      }
874 875
    }

876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891
    // check inf and nan
    for (std::string& var_name : check_nan_var_names_) {
      Variable* var = thread_scope_->FindVar(var_name);
      if (var == nullptr) {
        continue;
      }
      LoDTensor* tensor = var->GetMutable<LoDTensor>();
      if (tensor == nullptr) {
        continue;
      }
      PADDLE_ENFORCE_EQ(framework::TensorContainsInf(*tensor), false,
                        "Tensor %s contains Inf", var_name);
      PADDLE_ENFORCE_EQ(framework::TensorContainsNAN(*tensor), false,
                        "Tensor %s contains NAN", var_name);
    }

892 893
    if (need_to_push_sparse_) {
      // push gradients here
D
dongdaxiang 已提交
894 895
      for (int i = 0; i < param_.program_config(0).push_sparse_table_id_size();
           ++i) {
896 897 898 899 900 901 902 903
        uint64_t tid = static_cast<uint64_t>(
            param_.program_config(0).push_sparse_table_id(i));
        TableParameter table;
        for (auto i : param_.sparse_table()) {
          if (i.table_id() == tid) {
            table = i;
            break;
          }
H
heqiaozhi 已提交
904
        }
905 906 907
        fleet_ptr_->PushSparseVarsWithLabelAsync(
            *thread_scope_, tid, features_[tid], feature_labels_[tid],
            sparse_key_names_[tid], sparse_grad_names_[tid], table.emb_dim(),
T
Thunderbrook 已提交
908
            &feature_grads_[tid], &push_sparse_status_, cur_batch, use_cvm_,
909
            dump_slot_, &sparse_push_keys_[tid]);
H
heqiaozhi 已提交
910
      }
911 912
    }

913
    if (need_to_push_dense_) {
D
dongdaxiang 已提交
914 915
      for (int i = 0; i < param_.program_config(0).push_dense_table_id_size();
           ++i) {
916 917 918
        uint64_t tid = static_cast<uint64_t>(
            param_.program_config(0).push_dense_table_id(i));
        fleet_ptr_->PushDenseVarsAsync(
919 920
            *thread_scope_, tid, dense_grad_names_[tid], &push_sparse_status_,
            scale_datanorm_, cur_batch);
921 922
      }
      VLOG(3) << "push dense gradient done.";
923

924 925 926 927 928
      // the following code should be more precise and clean
      // TODO(guru4elephant)
      int32_t tmp_push_dense_wait_times = -1;
      static uint32_t push_dense_wait_times =
          static_cast<uint32_t>(tmp_push_dense_wait_times);
929

930 931 932 933 934
      if (push_dense_status_.size() >= push_dense_wait_times) {
        for (auto& t : push_dense_status_) {
          t.wait();
        }
        push_dense_status_.resize(0);
935 936
      }

937 938 939
      if (tmp_push_dense_wait_times == -1) {
        push_dense_status_.resize(0);
      }
940 941
    }

942 943 944 945 946 947 948 949 950 951
    if (need_to_push_sparse_) {
      VLOG(3) << "push sparse gradient done.";
      int32_t tmp_push_sparse_wait_times = -1;
      static uint32_t push_sparse_wait_times =
          static_cast<uint32_t>(tmp_push_sparse_wait_times);
      if (push_sparse_status_.size() >= push_sparse_wait_times) {
        for (auto& t : push_sparse_status_) {
          t.wait();
        }
        push_sparse_status_.resize(0);
952 953
      }

954 955 956
      if (tmp_push_sparse_wait_times == -1) {
        push_sparse_status_.resize(0);
      }
957 958
    }

959
    if (need_to_push_dense_) {
D
dongdaxiang 已提交
960 961
      for (int i = 0; i < param_.program_config(0).push_dense_table_id_size();
           ++i) {
962 963 964 965
        uint64_t tid = static_cast<uint64_t>(
            param_.program_config(0).push_dense_table_id(i));
        pull_dense_worker_->IncreaseThreadVersion(thread_id_, tid);
      }
966
    }
967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003
    if (need_dump_field_) {
      int batch_size = device_reader_->GetCurBatchSize();
      std::vector<std::string> ars(batch_size);
      for (auto& ar : ars) {
        ar.clear();
      }
      auto& ins_id_vec = device_reader_->GetInsIdVec();
      auto& ins_content_vec = device_reader_->GetInsContentVec();
      for (size_t i = 0; i < ins_id_vec.size(); i++) {
        ars[i] += ins_id_vec[i];
        ars[i] = ars[i] + "\t" + ins_content_vec[i];
      }
      for (auto& field : dump_fields_) {
        Variable* var = thread_scope_->FindVar(field);
        if (var == nullptr) {
          continue;
        }
        LoDTensor* tensor = var->GetMutable<LoDTensor>();
        if (!CheckValidOutput(tensor, batch_size)) {
          continue;
        }
        for (int i = 0; i < batch_size; ++i) {
          auto output_dim = tensor->dims()[1];
          std::string output_dimstr =
              boost::lexical_cast<std::string>(output_dim);
          ars[i] = ars[i] + "\t" + field + ":" + output_dimstr;
          auto bound = GetTensorBound(tensor, i);
          ars[i] += PrintLodTensor(tensor, bound.first, bound.second);
        }
      }
      // #pragma omp parallel for
      for (size_t i = 0; i < ars.size(); i++) {
        if (ars[i].length() == 0) {
          continue;
        }
        writer_ << ars[i];
      }
1004 1005 1006
      if (need_dump_param_ && thread_id_ == 0) {
        DumpParam();
      }
1007
    }
1008

D
dongdaxiang 已提交
1009
    PrintFetchVars();
1010 1011 1012
    thread_scope_->DropKids();
    ++batch_cnt;
  }
1013 1014 1015
  if (need_dump_field_) {
    writer_.Flush();
  }
X
xujiaqi01 已提交
1016 1017 1018 1019 1020
  if (copy_table_config_.need_copy()) {
    CopySparseTable();
    CopyDenseTable();
    CopyDenseVars();
  }
1021 1022 1023 1024
}

}  // end namespace framework
}  // end namespace paddle