data_set.cc 45.7 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14
/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
 *
 * Licensed under the Apache License, Version 2.0 (the "License");
 * you may not use this file except in compliance with the License.
 * You may obtain a copy of the License at
 *
 *     http://www.apache.org/licenses/LICENSE-2.0
 *
 *     Unless required by applicable law or agreed to in writing, software
 *     distributed under the License is distributed on an "AS IS" BASIS,
 *     WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 *     See the License for the specific language governing permissions and
 *     limitations under the License. */

15
#include "paddle/fluid/framework/data_set.h"
16
#include <algorithm>
D
dongdaxiang 已提交
17
#include <random>
18
#include <unordered_map>
19
#include <unordered_set>
20 21 22
#include "google/protobuf/io/zero_copy_stream_impl.h"
#include "google/protobuf/message.h"
#include "google/protobuf/text_format.h"
23
#include "paddle/fluid/framework/data_feed_factory.h"
24
#include "paddle/fluid/framework/fleet/fleet_wrapper.h"
25
#include "paddle/fluid/framework/io/fs.h"
H
hutuxian 已提交
26
#include "paddle/fluid/platform/monitor.h"
27
#include "paddle/fluid/platform/timer.h"
28
#include "xxhash.h"  // NOLINT
29

D
dongdaxiang 已提交
30 31 32 33 34
#if defined _WIN32 || defined __APPLE__
#else
#define _LINUX
#endif

H
hutuxian 已提交
35
USE_INT_STAT(STAT_total_feasign_num_in_mem);
36 37 38
namespace paddle {
namespace framework {

X
xjqbest 已提交
39
// constructor
40
template <typename T>
D
dongdaxiang 已提交
41
DatasetImpl<T>::DatasetImpl() {
J
jiaqi 已提交
42
  VLOG(3) << "DatasetImpl<T>::DatasetImpl() constructor";
D
dongdaxiang 已提交
43
  thread_num_ = 1;
44
  trainer_num_ = 1;
J
jiaqi 已提交
45
  channel_num_ = 1;
46
  file_idx_ = 0;
H
hutuxian 已提交
47
  total_fea_num_ = 0;
J
jiaqi 已提交
48
  cur_channel_ = 0;
49 50
  fleet_send_batch_size_ = 1024;
  fleet_send_sleep_seconds_ = 0;
51
  merge_by_insid_ = false;
52 53
  merge_by_sid_ = true;
  enable_pv_merge_ = false;
54
  merge_size_ = 2;
55 56
  parse_ins_id_ = false;
  parse_content_ = false;
57
  parse_logkey_ = false;
58
  preload_thread_num_ = 0;
59
  global_index_ = 0;
D
dongdaxiang 已提交
60
}
61

X
xjqbest 已提交
62
// set filelist, file_idx_ will reset to zero.
63 64
template <typename T>
void DatasetImpl<T>::SetFileList(const std::vector<std::string>& filelist) {
65
  VLOG(3) << "filelist size: " << filelist.size();
66
  filelist_ = filelist;
67
  file_idx_ = 0;
68 69
}

X
xjqbest 已提交
70
// set expect thread num. actually it may change
71 72
template <typename T>
void DatasetImpl<T>::SetThreadNum(int thread_num) {
73
  VLOG(3) << "SetThreadNum thread_num=" << thread_num;
74 75 76
  thread_num_ = thread_num;
}

X
xjqbest 已提交
77 78 79
// if you run distributed, and want to do global shuffle,
// set this before global shuffle.
// be sure you call CreateReaders before SetTrainerNum
80
template <typename T>
X
xujiaqi01 已提交
81 82
void DatasetImpl<T>::SetTrainerNum(int trainer_num) {
  trainer_num_ = trainer_num;
83 84
}

X
xjqbest 已提交
85 86 87 88 89 90 91 92
// if you run distributed, and want to do global shuffle,
// set this before global shuffle.
// be sure you call CreateReaders before SetFleetSendBatchSize
template <typename T>
void DatasetImpl<T>::SetFleetSendBatchSize(int64_t size) {
  fleet_send_batch_size_ = size;
}

93 94 95
template <typename T>
void DatasetImpl<T>::SetHdfsConfig(const std::string& fs_name,
                                   const std::string& fs_ugi) {
X
xjqbest 已提交
96 97
  fs_name_ = fs_name;
  fs_ugi_ = fs_ugi;
98 99 100 101
  std::string cmd = std::string("hadoop fs");
  cmd += " -D fs.default.name=" + fs_name;
  cmd += " -D hadoop.job.ugi=" + fs_ugi;
  paddle::framework::hdfs_set_command(cmd);
X
xujiaqi01 已提交
102
}
103

104 105 106 107 108 109 110 111 112 113
template <typename T>
void DatasetImpl<T>::SetDownloadCmd(const std::string& download_cmd) {
  paddle::framework::set_download_command(download_cmd);
}

template <typename T>
std::string DatasetImpl<T>::GetDownloadCmd() {
  return paddle::framework::download_cmd();
}

114 115
template <typename T>
void DatasetImpl<T>::SetDataFeedDesc(const std::string& data_feed_desc_str) {
116 117
  google::protobuf::TextFormat::ParseFromString(data_feed_desc_str,
                                                &data_feed_desc_);
118 119
}

120
template <typename T>
J
jiaqi 已提交
121 122 123 124
void DatasetImpl<T>::SetChannelNum(int channel_num) {
  channel_num_ = channel_num;
}

125 126 127 128 129 130 131 132 133 134
template <typename T>
void DatasetImpl<T>::SetParseInsId(bool parse_ins_id) {
  parse_ins_id_ = parse_ins_id;
}

template <typename T>
void DatasetImpl<T>::SetParseContent(bool parse_content) {
  parse_content_ = parse_content;
}

135 136 137 138 139
template <typename T>
void DatasetImpl<T>::SetParseLogKey(bool parse_logkey) {
  parse_logkey_ = parse_logkey;
}

140
template <typename T>
141
void DatasetImpl<T>::SetMergeByInsId(int merge_size) {
142
  merge_by_insid_ = true;
143
  parse_ins_id_ = true;
144
  merge_size_ = merge_size;
145 146
}

147 148 149 150 151 152 153 154 155 156
template <typename T>
void DatasetImpl<T>::SetMergeBySid(bool is_merge) {
  merge_by_sid_ = is_merge;
}

template <typename T>
void DatasetImpl<T>::SetEnablePvMerge(bool enable_pv_merge) {
  enable_pv_merge_ = enable_pv_merge;
}

157 158 159 160 161 162
template <typename T>
void DatasetImpl<T>::SetGenerateUniqueFeasign(bool gen_uni_feasigns) {
  gen_uni_feasigns_ = gen_uni_feasigns;
  VLOG(3) << "Set generate unique feasigns: " << gen_uni_feasigns;
}

163 164 165 166 167 168 169 170
template <typename T>
void DatasetImpl<T>::SetFeaEval(bool fea_eval, int record_candidate_size) {
  slots_shuffle_fea_eval_ = fea_eval;
  slots_shuffle_rclist_.ReSize(record_candidate_size);
  VLOG(3) << "SetFeaEval fea eval mode: " << fea_eval
          << " with record candidate size: " << record_candidate_size;
}

J
jiaqi 已提交
171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197
template <typename T>
std::vector<paddle::framework::DataFeed*> DatasetImpl<T>::GetReaders() {
  std::vector<paddle::framework::DataFeed*> ret;
  ret.reserve(readers_.size());
  for (auto i : readers_) {
    ret.push_back(i.get());
  }
  return ret;
}

template <typename T>
void DatasetImpl<T>::CreateChannel() {
  if (input_channel_ == nullptr) {
    input_channel_ = paddle::framework::MakeChannel<T>();
  }
  if (multi_output_channel_.size() == 0) {
    multi_output_channel_.reserve(channel_num_);
    for (int i = 0; i < channel_num_; ++i) {
      multi_output_channel_.push_back(paddle::framework::MakeChannel<T>());
    }
  }
  if (multi_consume_channel_.size() == 0) {
    multi_consume_channel_.reserve(channel_num_);
    for (int i = 0; i < channel_num_; ++i) {
      multi_consume_channel_.push_back(paddle::framework::MakeChannel<T>());
    }
  }
198 199 200 201 202 203 204 205 206 207 208 209 210 211 212
  if (input_pv_channel_ == nullptr) {
    input_pv_channel_ = paddle::framework::MakeChannel<PvInstance>();
  }
  if (multi_pv_output_.size() == 0) {
    multi_pv_output_.reserve(channel_num_);
    for (int i = 0; i < channel_num_; ++i) {
      multi_pv_output_.push_back(paddle::framework::MakeChannel<PvInstance>());
    }
  }
  if (multi_pv_consume_.size() == 0) {
    multi_pv_consume_.reserve(channel_num_);
    for (int i = 0; i < channel_num_; ++i) {
      multi_pv_consume_.push_back(paddle::framework::MakeChannel<PvInstance>());
    }
  }
213 214
}

215 216 217 218 219 220 221 222 223 224 225 226
// if sent message between workers, should first call this function
template <typename T>
void DatasetImpl<T>::RegisterClientToClientMsgHandler() {
  auto fleet_ptr = FleetWrapper::GetInstance();
  VLOG(3) << "RegisterClientToClientMsgHandler";
  fleet_ptr->RegisterClientToClientMsgHandler(
      0, [this](int msg_type, int client_id, const std::string& msg) -> int {
        return this->ReceiveFromClient(msg_type, client_id, msg);
      });
  VLOG(3) << "RegisterClientToClientMsgHandler done";
}

X
xjqbest 已提交
227 228
// load data into memory, Dataset hold this memory,
// which will later be fed into readers' channel
229 230 231
template <typename T>
void DatasetImpl<T>::LoadIntoMemory() {
  VLOG(3) << "DatasetImpl<T>::LoadIntoMemory() begin";
232 233
  platform::Timer timeline;
  timeline.Start();
234 235
  std::vector<std::thread> load_threads;
  for (int64_t i = 0; i < thread_num_; ++i) {
D
dongdaxiang 已提交
236 237
    load_threads.push_back(std::thread(
        &paddle::framework::DataFeed::LoadIntoMemory, readers_[i].get()));
238 239 240 241
  }
  for (std::thread& t : load_threads) {
    t.join();
  }
J
jiaqi 已提交
242 243 244
  input_channel_->Close();
  int64_t in_chan_size = input_channel_->Size();
  input_channel_->SetBlockSize(in_chan_size / thread_num_ + 1);
245

246 247
  timeline.Pause();
  VLOG(3) << "DatasetImpl<T>::LoadIntoMemory() end"
J
jiaqi 已提交
248
          << ", memory data size=" << input_channel_->Size()
249
          << ", cost time=" << timeline.ElapsedSec() << " seconds";
250 251
}

J
jiaqi 已提交
252 253 254
template <typename T>
void DatasetImpl<T>::PreLoadIntoMemory() {
  VLOG(3) << "DatasetImpl<T>::PreLoadIntoMemory() begin";
255
  if (preload_thread_num_ != 0) {
256
    CHECK(static_cast<size_t>(preload_thread_num_) == preload_readers_.size());
257 258 259 260 261 262 263
    preload_threads_.clear();
    for (int64_t i = 0; i < preload_thread_num_; ++i) {
      preload_threads_.push_back(
          std::thread(&paddle::framework::DataFeed::LoadIntoMemory,
                      preload_readers_[i].get()));
    }
  } else {
264
    CHECK(static_cast<size_t>(thread_num_) == readers_.size());
265 266 267 268 269
    preload_threads_.clear();
    for (int64_t i = 0; i < thread_num_; ++i) {
      preload_threads_.push_back(std::thread(
          &paddle::framework::DataFeed::LoadIntoMemory, readers_[i].get()));
    }
J
jiaqi 已提交
270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285
  }
  VLOG(3) << "DatasetImpl<T>::PreLoadIntoMemory() end";
}

template <typename T>
void DatasetImpl<T>::WaitPreLoadDone() {
  VLOG(3) << "DatasetImpl<T>::WaitPreLoadDone() begin";
  for (std::thread& t : preload_threads_) {
    t.join();
  }
  input_channel_->Close();
  int64_t in_chan_size = input_channel_->Size();
  input_channel_->SetBlockSize(in_chan_size / thread_num_ + 1);
  VLOG(3) << "DatasetImpl<T>::WaitPreLoadDone() end";
}

286 287 288 289
// release memory data
template <typename T>
void DatasetImpl<T>::ReleaseMemory() {
  VLOG(3) << "DatasetImpl<T>::ReleaseMemory() begin";
J
jiaqi 已提交
290 291 292 293 294 295 296 297 298 299
  if (input_channel_) {
    input_channel_->Clear();
    input_channel_ = nullptr;
  }
  for (size_t i = 0; i < multi_output_channel_.size(); ++i) {
    if (!multi_output_channel_[i]) {
      continue;
    }
    multi_output_channel_[i]->Clear();
    multi_output_channel_[i] = nullptr;
300
  }
J
jiaqi 已提交
301 302 303 304 305 306 307 308 309
  std::vector<paddle::framework::Channel<T>>().swap(multi_output_channel_);
  for (size_t i = 0; i < multi_consume_channel_.size(); ++i) {
    if (!multi_consume_channel_[i]) {
      continue;
    }
    multi_consume_channel_[i]->Clear();
    multi_consume_channel_[i] = nullptr;
  }
  std::vector<paddle::framework::Channel<T>>().swap(multi_consume_channel_);
310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330
  if (input_pv_channel_) {
    input_pv_channel_->Clear();
    input_pv_channel_ = nullptr;
  }
  for (size_t i = 0; i < multi_pv_output_.size(); ++i) {
    if (!multi_pv_output_[i]) {
      continue;
    }
    multi_pv_output_[i]->Clear();
    multi_pv_output_[i] = nullptr;
  }
  std::vector<paddle::framework::Channel<PvInstance>>().swap(multi_pv_output_);
  for (size_t i = 0; i < multi_pv_consume_.size(); ++i) {
    if (!multi_pv_consume_[i]) {
      continue;
    }
    multi_pv_consume_[i]->Clear();
    multi_pv_consume_[i] = nullptr;
  }
  std::vector<paddle::framework::Channel<PvInstance>>().swap(multi_pv_consume_);

J
jiaqi 已提交
331
  std::vector<std::shared_ptr<paddle::framework::DataFeed>>().swap(readers_);
332 333
  input_records_.clear();
  std::vector<T>().swap(input_records_);
H
hutuxian 已提交
334
  std::vector<T>().swap(slots_shuffle_original_data_);
335
  VLOG(3) << "DatasetImpl<T>::ReleaseMemory() end";
H
hutuxian 已提交
336 337 338 339 340
  VLOG(3) << "total_feasign_num_(" << STAT_GET(STAT_total_feasign_num_in_mem)
          << ") - current_fea_num_(" << total_fea_num_ << ") = ("
          << STAT_GET(STAT_total_feasign_num_in_mem) - total_fea_num_
          << ")";  // For Debug
  STAT_SUB(STAT_total_feasign_num_in_mem, total_fea_num_);
341 342
}

X
xjqbest 已提交
343
// do local shuffle
344 345 346
template <typename T>
void DatasetImpl<T>::LocalShuffle() {
  VLOG(3) << "DatasetImpl<T>::LocalShuffle() begin";
347 348
  platform::Timer timeline;
  timeline.Start();
349

J
jiaqi 已提交
350 351 352
  if (!input_channel_ || input_channel_->Size() == 0) {
    VLOG(3) << "DatasetImpl<T>::LocalShuffle() end, no data to shuffle";
    return;
353
  }
J
jiaqi 已提交
354 355 356 357 358 359 360 361 362 363 364
  auto fleet_ptr = FleetWrapper::GetInstance();
  input_channel_->Close();
  std::vector<T> data;
  input_channel_->ReadAll(data);
  std::shuffle(data.begin(), data.end(), fleet_ptr->LocalRandomEngine());
  input_channel_->Open();
  input_channel_->Write(std::move(data));
  data.clear();
  data.shrink_to_fit();
  input_channel_->Close();

365 366 367
  timeline.Pause();
  VLOG(3) << "DatasetImpl<T>::LocalShuffle() end, cost time="
          << timeline.ElapsedSec() << " seconds";
368 369
}

370
template <typename T>
371
void DatasetImpl<T>::GlobalShuffle(int thread_num) {
X
xujiaqi01 已提交
372
#ifdef PADDLE_WITH_PSLIB
373
  VLOG(3) << "DatasetImpl<T>::GlobalShuffle() begin";
374 375
  platform::Timer timeline;
  timeline.Start();
376
  auto fleet_ptr = FleetWrapper::GetInstance();
J
jiaqi 已提交
377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397

  if (!input_channel_ || input_channel_->Size() == 0) {
    VLOG(3) << "DatasetImpl<T>::GlobalShuffle() end, no data to shuffle";
    return;
  }

  // local shuffle
  input_channel_->Close();
  std::vector<T> data;
  input_channel_->ReadAll(data);
  std::shuffle(data.begin(), data.end(), fleet_ptr->LocalRandomEngine());
  input_channel_->Open();
  input_channel_->Write(std::move(data));
  data.clear();
  data.shrink_to_fit();

  input_channel_->Close();
  input_channel_->SetBlockSize(fleet_send_batch_size_);
  VLOG(3) << "DatasetImpl<T>::GlobalShuffle() input_channel_ size "
          << input_channel_->Size();

398 399 400 401 402 403 404 405 406 407
  auto get_client_id = [this, fleet_ptr](const T& data) -> size_t {
    if (!this->merge_by_insid_) {
      return fleet_ptr->LocalRandomEngine()() % this->trainer_num_;
    } else {
      return XXH64(data.ins_id_.data(), data.ins_id_.length(), 0) %
             this->trainer_num_;
    }
  };

  auto global_shuffle_func = [this, get_client_id]() {
J
jiaqi 已提交
408 409 410 411 412
    auto fleet_ptr = FleetWrapper::GetInstance();
    std::vector<T> data;
    while (this->input_channel_->Read(data)) {
      std::vector<paddle::framework::BinaryArchive> ars(this->trainer_num_);
      for (auto& t : data) {
413
        auto client_id = get_client_id(t);
J
jiaqi 已提交
414 415 416 417 418 419 420 421 422
        ars[client_id] << t;
      }
      std::vector<std::future<int32_t>> total_status;
      std::vector<int> send_index(this->trainer_num_);
      for (int i = 0; i < this->trainer_num_; ++i) {
        send_index[i] = i;
      }
      std::shuffle(send_index.begin(), send_index.end(),
                   fleet_ptr->LocalRandomEngine());
423
      for (int index = 0; index < this->trainer_num_; ++index) {
J
jiaqi 已提交
424 425 426 427 428 429 430 431 432 433 434 435 436 437 438
        int i = send_index[index];
        if (ars[i].Length() == 0) {
          continue;
        }
        std::string msg(ars[i].Buffer(), ars[i].Length());
        auto ret = fleet_ptr->SendClientToClientMsg(0, i, msg);
        total_status.push_back(std::move(ret));
      }
      for (auto& t : total_status) {
        t.wait();
      }
      ars.clear();
      ars.shrink_to_fit();
      data.clear();
      data.shrink_to_fit();
439 440 441 442 443 444
      // currently we find bottleneck is server not able to handle large data
      // in time, so we can remove this sleep and set fleet_send_batch_size to
      // 1024, and set server thread to 24.
      if (fleet_send_sleep_seconds_ != 0) {
        sleep(this->fleet_send_sleep_seconds_);
      }
J
jiaqi 已提交
445 446 447
    }
  };

448
  std::vector<std::thread> global_shuffle_threads;
449 450 451 452 453
  if (thread_num == -1) {
    thread_num = thread_num_;
  }
  VLOG(3) << "start global shuffle threads, num = " << thread_num;
  for (int i = 0; i < thread_num; ++i) {
J
jiaqi 已提交
454
    global_shuffle_threads.push_back(std::thread(global_shuffle_func));
455 456 457
  }
  for (std::thread& t : global_shuffle_threads) {
    t.join();
458
  }
J
jiaqi 已提交
459 460 461
  global_shuffle_threads.clear();
  global_shuffle_threads.shrink_to_fit();
  input_channel_->Clear();
462 463 464
  timeline.Pause();
  VLOG(3) << "DatasetImpl<T>::GlobalShuffle() end, cost time="
          << timeline.ElapsedSec() << " seconds";
X
xujiaqi01 已提交
465
#endif
466 467
}

468
template <typename T>
H
hutuxian 已提交
469 470
void DatasetImpl<T>::DynamicAdjustChannelNum(int channel_num,
                                             bool discard_remaining_ins) {
471 472 473 474 475 476 477 478 479 480
  if (channel_num_ == channel_num) {
    VLOG(3) << "DatasetImpl<T>::DynamicAdjustChannelNum channel_num_="
            << channel_num_ << ", channel_num_=channel_num, no need to adjust";
    return;
  }
  VLOG(3) << "adjust channel num from " << channel_num_ << " to "
          << channel_num;
  channel_num_ = channel_num;
  std::vector<paddle::framework::Channel<T>>* origin_channels = nullptr;
  std::vector<paddle::framework::Channel<T>>* other_channels = nullptr;
481 482 483 484 485
  std::vector<paddle::framework::Channel<PvInstance>>* origin_pv_channels =
      nullptr;
  std::vector<paddle::framework::Channel<PvInstance>>* other_pv_channels =
      nullptr;

486 487 488 489 490
  // find out which channel (output or consume) has data
  int cur_channel = 0;
  uint64_t output_channels_data_size = 0;
  uint64_t consume_channels_data_size = 0;
  CHECK(multi_output_channel_.size() == multi_consume_channel_.size());
491
  for (size_t i = 0; i < multi_output_channel_.size(); ++i) {
492 493 494 495 496 497 498 499 500 501 502 503 504
    output_channels_data_size += multi_output_channel_[i]->Size();
    consume_channels_data_size += multi_consume_channel_[i]->Size();
  }
  if (output_channels_data_size != 0) {
    CHECK(consume_channels_data_size == 0);  // NOLINT
    cur_channel = 0;
  } else {
    CHECK(output_channels_data_size == 0);  // NOLINT
    cur_channel = 1;
  }
  if (cur_channel == 0) {
    origin_channels = &multi_output_channel_;
    other_channels = &multi_consume_channel_;
505 506
    origin_pv_channels = &multi_pv_output_;
    other_pv_channels = &multi_pv_consume_;
507 508 509
  } else {
    origin_channels = &multi_consume_channel_;
    other_channels = &multi_output_channel_;
510 511
    origin_pv_channels = &multi_pv_consume_;
    other_pv_channels = &multi_pv_output_;
512
  }
513 514 515 516
  CHECK(origin_channels != nullptr);     // NOLINT
  CHECK(other_channels != nullptr);      // NOLINT
  CHECK(origin_pv_channels != nullptr);  // NOLINT
  CHECK(other_pv_channels != nullptr);   // NOLINT
517 518 519 520 521

  paddle::framework::Channel<T> total_data_channel =
      paddle::framework::MakeChannel<T>();
  std::vector<paddle::framework::Channel<T>> new_channels;
  std::vector<paddle::framework::Channel<T>> new_other_channels;
522 523 524
  std::vector<paddle::framework::Channel<PvInstance>> new_pv_channels;
  std::vector<paddle::framework::Channel<PvInstance>> new_other_pv_channels;

525
  std::vector<T> local_vec;
526
  for (size_t i = 0; i < origin_channels->size(); ++i) {
527 528 529 530 531 532
    local_vec.clear();
    (*origin_channels)[i]->Close();
    (*origin_channels)[i]->ReadAll(local_vec);
    total_data_channel->Write(std::move(local_vec));
  }
  total_data_channel->Close();
H
hutuxian 已提交
533 534 535 536
  if (static_cast<int>(total_data_channel->Size()) >= channel_num) {
    total_data_channel->SetBlockSize(total_data_channel->Size() / channel_num +
                                     (discard_remaining_ins ? 0 : 1));
  }
H
hutuxian 已提交
537
  if (static_cast<int>(input_channel_->Size()) >= channel_num) {
H
hutuxian 已提交
538 539
    input_channel_->SetBlockSize(input_channel_->Size() / channel_num +
                                 (discard_remaining_ins ? 0 : 1));
H
hutuxian 已提交
540
  }
541 542 543 544 545 546
  if (static_cast<int>(input_pv_channel_->Size()) >= channel_num) {
    input_pv_channel_->SetBlockSize(input_pv_channel_->Size() / channel_num +
                                    (discard_remaining_ins ? 0 : 1));
    VLOG(3) << "now input_pv_channle block size is "
            << input_pv_channel_->BlockSize();
  }
547 548 549 550 551 552 553

  for (int i = 0; i < channel_num; ++i) {
    local_vec.clear();
    total_data_channel->Read(local_vec);
    new_other_channels.push_back(paddle::framework::MakeChannel<T>());
    new_channels.push_back(paddle::framework::MakeChannel<T>());
    new_channels[i]->Write(std::move(local_vec));
554 555 556
    new_other_pv_channels.push_back(
        paddle::framework::MakeChannel<PvInstance>());
    new_pv_channels.push_back(paddle::framework::MakeChannel<PvInstance>());
557 558 559 560 561 562 563 564
  }

  total_data_channel->Clear();
  origin_channels->clear();
  other_channels->clear();
  *origin_channels = new_channels;
  *other_channels = new_other_channels;

565 566 567 568 569
  origin_pv_channels->clear();
  other_pv_channels->clear();
  *origin_pv_channels = new_pv_channels;
  *other_pv_channels = new_other_pv_channels;

570 571 572 573
  new_channels.clear();
  new_other_channels.clear();
  std::vector<paddle::framework::Channel<T>>().swap(new_channels);
  std::vector<paddle::framework::Channel<T>>().swap(new_other_channels);
574 575 576 577 578 579 580

  new_pv_channels.clear();
  new_other_pv_channels.clear();
  std::vector<paddle::framework::Channel<PvInstance>>().swap(new_pv_channels);
  std::vector<paddle::framework::Channel<PvInstance>>().swap(
      new_other_pv_channels);

581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604
  local_vec.clear();
  std::vector<T>().swap(local_vec);
  VLOG(3) << "adjust channel num done";
}

template <typename T>
void DatasetImpl<T>::DynamicAdjustReadersNum(int thread_num) {
  if (thread_num_ == thread_num) {
    VLOG(3) << "DatasetImpl<T>::DynamicAdjustReadersNum thread_num_="
            << thread_num_ << ", thread_num_=thread_num, no need to adjust";
    return;
  }
  VLOG(3) << "adjust readers num from " << thread_num_ << " to " << thread_num;
  thread_num_ = thread_num;
  std::vector<std::shared_ptr<paddle::framework::DataFeed>>().swap(readers_);
  CreateReaders();
  VLOG(3) << "adjust readers num done";
}

template <typename T>
void DatasetImpl<T>::SetFleetSendSleepSeconds(int seconds) {
  fleet_send_sleep_seconds_ = seconds;
}

605 606
template <typename T>
void DatasetImpl<T>::CreateReaders() {
607
  VLOG(3) << "Calling CreateReaders()";
J
jiaqi 已提交
608 609 610 611 612 613
  VLOG(3) << "thread num in Dataset: " << thread_num_;
  VLOG(3) << "Filelist size in Dataset: " << filelist_.size();
  VLOG(3) << "channel num in Dataset: " << channel_num_;
  CHECK(thread_num_ > 0) << "thread num should > 0";
  CHECK(channel_num_ > 0) << "channel num should > 0";
  CHECK(channel_num_ <= thread_num_) << "channel num should <= thread num";
614
  VLOG(3) << "readers size: " << readers_.size();
615
  if (readers_.size() != 0) {
J
jiaqi 已提交
616 617
    VLOG(3) << "readers_.size() = " << readers_.size()
            << ", will not create again";
618 619
    return;
  }
620
  VLOG(3) << "data feed class name: " << data_feed_desc_.name();
J
jiaqi 已提交
621
  int channel_idx = 0;
622
  for (int i = 0; i < thread_num_; ++i) {
623
    readers_.push_back(DataFeedFactory::CreateDataFeed(data_feed_desc_.name()));
J
jiaqi 已提交
624 625 626 627 628
    readers_[i]->Init(data_feed_desc_);
    readers_[i]->SetThreadId(i);
    readers_[i]->SetThreadNum(thread_num_);
    readers_[i]->SetFileListMutex(&mutex_for_pick_file_);
    readers_[i]->SetFileListIndex(&file_idx_);
H
hutuxian 已提交
629 630
    readers_[i]->SetFeaNumMutex(&mutex_for_fea_num_);
    readers_[i]->SetFeaNum(&total_fea_num_);
J
jiaqi 已提交
631
    readers_[i]->SetFileList(filelist_);
632 633
    readers_[i]->SetParseInsId(parse_ins_id_);
    readers_[i]->SetParseContent(parse_content_);
634 635 636 637 638 639
    readers_[i]->SetParseLogKey(parse_logkey_);
    readers_[i]->SetEnablePvMerge(enable_pv_merge_);
    // Notice: it is only valid for untest of test_paddlebox_datafeed.
    // In fact, it does not affect the train process when paddle is
    // complied with Box_Ps.
    readers_[i]->SetCurrentPhase(current_phase_);
J
jiaqi 已提交
640 641 642
    if (input_channel_ != nullptr) {
      readers_[i]->SetInputChannel(input_channel_.get());
    }
643 644 645
    if (input_pv_channel_ != nullptr) {
      readers_[i]->SetInputPvChannel(input_pv_channel_.get());
    }
646 647
    if (cur_channel_ == 0 &&
        static_cast<size_t>(channel_idx) < multi_output_channel_.size()) {
J
jiaqi 已提交
648 649
      readers_[i]->SetOutputChannel(multi_output_channel_[channel_idx].get());
      readers_[i]->SetConsumeChannel(multi_consume_channel_[channel_idx].get());
650 651
      readers_[i]->SetOutputPvChannel(multi_pv_output_[channel_idx].get());
      readers_[i]->SetConsumePvChannel(multi_pv_consume_[channel_idx].get());
652 653
    } else if (static_cast<size_t>(channel_idx) <
               multi_output_channel_.size()) {
J
jiaqi 已提交
654 655
      readers_[i]->SetOutputChannel(multi_consume_channel_[channel_idx].get());
      readers_[i]->SetConsumeChannel(multi_output_channel_[channel_idx].get());
656 657
      readers_[i]->SetOutputPvChannel(multi_pv_consume_[channel_idx].get());
      readers_[i]->SetConsumePvChannel(multi_pv_output_[channel_idx].get());
J
jiaqi 已提交
658 659 660 661 662
    }
    ++channel_idx;
    if (channel_idx >= channel_num_) {
      channel_idx = 0;
    }
663
  }
J
jiaqi 已提交
664
  VLOG(3) << "readers size: " << readers_.size();
665 666
}

667 668 669
template <typename T>
void DatasetImpl<T>::DestroyReaders() {
  VLOG(3) << "Calling DestroyReaders()";
670
  VLOG(3) << "readers size1: " << readers_.size();
671
  std::vector<std::shared_ptr<paddle::framework::DataFeed>>().swap(readers_);
672
  VLOG(3) << "readers size: " << readers_.size();
J
jiaqi 已提交
673 674
  file_idx_ = 0;
  cur_channel_ = 1 - cur_channel_;
675 676
}

677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699
template <typename T>
void DatasetImpl<T>::SetPreLoadThreadNum(int thread_num) {
  preload_thread_num_ = thread_num;
}

template <typename T>
void DatasetImpl<T>::CreatePreLoadReaders() {
  VLOG(3) << "Begin CreatePreLoadReaders";
  if (preload_thread_num_ == 0) {
    preload_thread_num_ = thread_num_;
  }
  CHECK(preload_thread_num_ > 0) << "thread num should > 0";
  CHECK(input_channel_ != nullptr);
  preload_readers_.clear();
  for (int i = 0; i < preload_thread_num_; ++i) {
    preload_readers_.push_back(
        DataFeedFactory::CreateDataFeed(data_feed_desc_.name()));
    preload_readers_[i]->Init(data_feed_desc_);
    preload_readers_[i]->SetThreadId(i);
    preload_readers_[i]->SetThreadNum(preload_thread_num_);
    preload_readers_[i]->SetFileListMutex(&mutex_for_pick_file_);
    preload_readers_[i]->SetFileListIndex(&file_idx_);
    preload_readers_[i]->SetFileList(filelist_);
H
hutuxian 已提交
700 701
    preload_readers_[i]->SetFeaNumMutex(&mutex_for_fea_num_);
    preload_readers_[i]->SetFeaNum(&total_fea_num_);
702
    preload_readers_[i]->SetParseInsId(parse_ins_id_);
703
    preload_readers_[i]->SetParseContent(parse_content_);
704 705
    preload_readers_[i]->SetParseLogKey(parse_logkey_);
    preload_readers_[i]->SetEnablePvMerge(enable_pv_merge_);
706 707 708
    preload_readers_[i]->SetInputChannel(input_channel_.get());
    preload_readers_[i]->SetOutputChannel(nullptr);
    preload_readers_[i]->SetConsumeChannel(nullptr);
709 710
    preload_readers_[i]->SetOutputPvChannel(nullptr);
    preload_readers_[i]->SetConsumePvChannel(nullptr);
711 712 713 714 715 716 717 718 719 720 721 722 723 724
  }
  VLOG(3) << "End CreatePreLoadReaders";
}

template <typename T>
void DatasetImpl<T>::DestroyPreLoadReaders() {
  VLOG(3) << "Begin DestroyPreLoadReaders";
  preload_readers_.clear();
  std::vector<std::shared_ptr<paddle::framework::DataFeed>>().swap(
      preload_readers_);
  file_idx_ = 0;
  VLOG(3) << "End DestroyPreLoadReaders";
}

725 726
template <typename T>
int64_t DatasetImpl<T>::GetMemoryDataSize() {
J
jiaqi 已提交
727
  return input_channel_->Size();
728 729
}

730 731 732 733 734 735 736 737 738 739
template <typename T>
int64_t DatasetImpl<T>::GetPvDataSize() {
  if (enable_pv_merge_) {
    return input_pv_channel_->Size();
  } else {
    VLOG(0) << "It does not merge pv..";
    return 0;
  }
}

740 741 742
template <typename T>
int64_t DatasetImpl<T>::GetShuffleDataSize() {
  int64_t sum = 0;
J
jiaqi 已提交
743 744
  for (size_t i = 0; i < multi_output_channel_.size(); ++i) {
    sum += multi_output_channel_[i]->Size() + multi_consume_channel_[i]->Size();
745 746 747 748
  }
  return sum;
}

749 750
template <typename T>
int DatasetImpl<T>::ReceiveFromClient(int msg_type, int client_id,
D
dongdaxiang 已提交
751
                                      const std::string& msg) {
D
dongdaxiang 已提交
752
#ifdef _LINUX
753
  VLOG(3) << "ReceiveFromClient msg_type=" << msg_type
754
          << ", client_id=" << client_id << ", msg length=" << msg.length();
J
jiaqi 已提交
755 756 757 758 759 760 761 762 763 764 765 766 767 768
  if (msg.length() == 0) {
    return 0;
  }
  paddle::framework::BinaryArchive ar;
  ar.SetReadBuffer(const_cast<char*>(msg.c_str()), msg.length(), nullptr);
  if (ar.Cursor() == ar.Finish()) {
    return 0;
  }
  std::vector<T> data;
  while (ar.Cursor() < ar.Finish()) {
    data.push_back(ar.Get<T>());
  }
  CHECK(ar.Cursor() == ar.Finish());

769
  auto fleet_ptr = FleetWrapper::GetInstance();
770 771 772 773 774 775 776 777 778 779
  // not use random because it doesn't perform well here.
  // to make sure each channel get data equally, we just put data to
  // channel one by one.
  // int64_t index = fleet_ptr->LocalRandomEngine()() % channel_num_;
  int64_t index = 0;
  {
    std::unique_lock<std::mutex> lk(global_index_mutex_);
    index = global_index_++;
  }
  index = index % channel_num_;
780
  VLOG(3) << "ramdom index=" << index;
J
jiaqi 已提交
781 782 783 784
  multi_output_channel_[index]->Write(std::move(data));

  data.clear();
  data.shrink_to_fit();
D
dongdaxiang 已提交
785
#endif
786 787 788
  return 0;
}

789
// explicit instantiation
J
jiaqi 已提交
790
template class DatasetImpl<Record>;
791

792 793 794
void MultiSlotDataset::PostprocessInstance() {
  // divide pv instance, and merge to input_channel_
  if (enable_pv_merge_) {
795 796 797
    auto fleet_ptr = FleetWrapper::GetInstance();
    std::shuffle(input_records_.begin(), input_records_.end(),
                 fleet_ptr->LocalRandomEngine());
798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817
    input_channel_->Open();
    input_channel_->Write(std::move(input_records_));
    for (size_t i = 0; i < multi_pv_consume_.size(); ++i) {
      multi_pv_consume_[i]->Clear();
    }
    input_channel_->Close();
    input_records_.clear();
    input_records_.shrink_to_fit();
  } else {
    input_channel_->Open();
    for (size_t i = 0; i < multi_consume_channel_.size(); ++i) {
      std::vector<Record> ins_data;
      multi_consume_channel_[i]->Close();
      multi_consume_channel_[i]->ReadAll(ins_data);
      input_channel_->Write(std::move(ins_data));
      ins_data.clear();
      ins_data.shrink_to_fit();
      multi_consume_channel_[i]->Clear();
    }
    input_channel_->Close();
818
    this->LocalShuffle();
819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880
  }
}

void MultiSlotDataset::SetCurrentPhase(int current_phase) {
  current_phase_ = current_phase;
}

void MultiSlotDataset::PreprocessInstance() {
  if (!input_channel_ || input_channel_->Size() == 0) {
    return;
  }
  if (!enable_pv_merge_) {  // means to use Record
    this->LocalShuffle();
  } else {  // means to use Pv
    auto fleet_ptr = FleetWrapper::GetInstance();
    input_channel_->Close();
    std::vector<PvInstance> pv_data;
    input_channel_->ReadAll(input_records_);
    int all_records_num = input_records_.size();
    std::vector<Record*> all_records;
    all_records.reserve(all_records_num);
    for (int index = 0; index < all_records_num; ++index) {
      all_records.push_back(&input_records_[index]);
    }

    std::sort(all_records.data(), all_records.data() + all_records_num,
              [](const Record* lhs, const Record* rhs) {
                return lhs->search_id < rhs->search_id;
              });
    if (merge_by_sid_) {
      uint64_t last_search_id = 0;
      for (int i = 0; i < all_records_num; ++i) {
        Record* ins = all_records[i];
        if (i == 0 || last_search_id != ins->search_id) {
          PvInstance pv_instance = make_pv_instance();
          pv_instance->merge_instance(ins);
          pv_data.push_back(pv_instance);
          last_search_id = ins->search_id;
          continue;
        }
        pv_data.back()->merge_instance(ins);
      }
    } else {
      for (int i = 0; i < all_records_num; ++i) {
        Record* ins = all_records[i];
        PvInstance pv_instance = make_pv_instance();
        pv_instance->merge_instance(ins);
        pv_data.push_back(pv_instance);
      }
    }

    std::shuffle(pv_data.begin(), pv_data.end(),
                 fleet_ptr->LocalRandomEngine());
    input_pv_channel_->Open();
    input_pv_channel_->Write(std::move(pv_data));

    pv_data.clear();
    pv_data.shrink_to_fit();
    input_pv_channel_->Close();
  }
}

881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959
void MultiSlotDataset::GenerateLocalTablesUnlock(int table_id, int feadim,
                                                 int read_thread_num,
                                                 int consume_thread_num,
                                                 int shard_num) {
  VLOG(3) << "MultiSlotDataset::GenerateUniqueFeasign begin";
  if (!gen_uni_feasigns_) {
    VLOG(3) << "generate_unique_feasign_=false, will not GenerateUniqueFeasign";
    return;
  }

  CHECK(multi_output_channel_.size() != 0);  // NOLINT
  auto fleet_ptr_ = FleetWrapper::GetInstance();
  std::vector<std::unordered_map<uint64_t, std::vector<float>>>&
      local_map_tables = fleet_ptr_->GetLocalTable();
  local_map_tables.resize(shard_num);
  // read thread
  int channel_num = multi_output_channel_.size();
  if (read_thread_num < channel_num) {
    read_thread_num = channel_num;
  }
  std::vector<std::thread> threads(read_thread_num);
  consume_task_pool_.resize(consume_thread_num);
  for (size_t i = 0; i < consume_task_pool_.size(); i++) {
    consume_task_pool_[i].reset(new ::ThreadPool(1));
  }
  auto consume_func = [&local_map_tables](int shard_id, int feadim,
                                          std::vector<uint64_t>& keys) {
    for (auto k : keys) {
      if (local_map_tables[shard_id].find(k) ==
          local_map_tables[shard_id].end()) {
        local_map_tables[shard_id][k] = std::vector<float>(feadim, 0);
      }
    }
  };
  auto gen_func = [this, &shard_num, &feadim, &local_map_tables,
                   &consume_func](int i) {
    std::vector<Record> vec_data;
    std::vector<std::vector<uint64_t>> task_keys(shard_num);
    std::vector<std::future<void>> task_futures;
    this->multi_output_channel_[i]->Close();
    this->multi_output_channel_[i]->ReadAll(vec_data);
    for (size_t j = 0; j < vec_data.size(); j++) {
      for (auto& feature : vec_data[j].uint64_feasigns_) {
        int shard = feature.sign().uint64_feasign_ % shard_num;
        task_keys[shard].push_back(feature.sign().uint64_feasign_);
      }
    }

    for (int shard_id = 0; shard_id < shard_num; shard_id++) {
      task_futures.emplace_back(consume_task_pool_[shard_id]->enqueue(
          consume_func, shard_id, feadim, task_keys[shard_id]));
    }

    multi_output_channel_[i]->Open();
    multi_output_channel_[i]->Write(std::move(vec_data));
    vec_data.clear();
    vec_data.shrink_to_fit();
    for (auto& tk : task_keys) {
      tk.clear();
      std::vector<uint64_t>().swap(tk);
    }
    task_keys.clear();
    std::vector<std::vector<uint64_t>>().swap(task_keys);
    for (auto& tf : task_futures) {
      tf.wait();
    }
  };
  for (size_t i = 0; i < threads.size(); i++) {
    threads[i] = std::thread(gen_func, i);
  }
  for (std::thread& t : threads) {
    t.join();
  }
  for (size_t i = 0; i < consume_task_pool_.size(); i++) {
    consume_task_pool_[i].reset();
  }
  consume_task_pool_.clear();
  fleet_ptr_->PullSparseToLocal(table_id, feadim);
}
960

961 962 963 964 965 966 967 968
void MultiSlotDataset::MergeByInsId() {
  VLOG(3) << "MultiSlotDataset::MergeByInsId begin";
  if (!merge_by_insid_) {
    VLOG(3) << "merge_by_insid=false, will not MergeByInsId";
    return;
  }
  auto multi_slot_desc = data_feed_desc_.multi_slot_desc();
  std::vector<std::string> use_slots;
969
  std::vector<bool> use_slots_is_dense;
970
  for (int i = 0; i < multi_slot_desc.slots_size(); ++i) {
971 972 973
    const auto& slot = multi_slot_desc.slots(i);
    if (slot.is_used()) {
      use_slots.push_back(slot.name());
974
      use_slots_is_dense.push_back(slot.is_dense());
975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998
    }
  }
  CHECK(multi_output_channel_.size() != 0);  // NOLINT
  auto channel_data = paddle::framework::MakeChannel<Record>();
  VLOG(3) << "multi_output_channel_.size() " << multi_output_channel_.size();
  for (size_t i = 0; i < multi_output_channel_.size(); ++i) {
    std::vector<Record> vec_data;
    multi_output_channel_[i]->Close();
    multi_output_channel_[i]->ReadAll(vec_data);
    channel_data->Write(std::move(vec_data));
    vec_data.clear();
    vec_data.shrink_to_fit();
    multi_output_channel_[i]->Clear();
  }
  channel_data->Close();
  std::vector<Record> recs;
  recs.reserve(channel_data->Size());
  channel_data->ReadAll(recs);
  channel_data->Clear();
  std::sort(recs.begin(), recs.end(), [](const Record& a, const Record& b) {
    return a.ins_id_ < b.ins_id_;
  });

  std::vector<Record> results;
999 1000 1001 1002 1003
  uint64_t drop_ins_num = 0;
  std::unordered_set<uint16_t> all_int64;
  std::unordered_set<uint16_t> all_float;
  std::unordered_set<uint16_t> local_uint64;
  std::unordered_set<uint16_t> local_float;
1004 1005 1006 1007 1008
  std::unordered_map<uint16_t, std::vector<FeatureItem>> all_dense_uint64;
  std::unordered_map<uint16_t, std::vector<FeatureItem>> all_dense_float;
  std::unordered_map<uint16_t, std::vector<FeatureItem>> local_dense_uint64;
  std::unordered_map<uint16_t, std::vector<FeatureItem>> local_dense_float;
  std::unordered_map<uint16_t, bool> dense_empty;
1009

1010 1011 1012 1013 1014 1015
  VLOG(3) << "recs.size() " << recs.size();
  for (size_t i = 0; i < recs.size();) {
    size_t j = i + 1;
    while (j < recs.size() && recs[j].ins_id_ == recs[i].ins_id_) {
      j++;
    }
1016 1017 1018 1019
    if (merge_size_ > 0 && j - i != merge_size_) {
      drop_ins_num += j - i;
      LOG(WARNING) << "drop ins " << recs[i].ins_id_ << " size=" << j - i
                   << ", because merge_size=" << merge_size_;
1020 1021 1022 1023
      i = j;
      continue;
    }

1024 1025
    all_int64.clear();
    all_float.clear();
1026 1027
    all_dense_uint64.clear();
    all_dense_float.clear();
1028 1029 1030 1031 1032 1033
    bool has_conflict_slot = false;
    uint16_t conflict_slot = 0;

    Record rec;
    rec.ins_id_ = recs[i].ins_id_;
    rec.content_ = recs[i].content_;
1034

1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081
    for (size_t k = i; k < j; k++) {
      dense_empty.clear();
      local_dense_uint64.clear();
      local_dense_float.clear();
      for (auto& feature : recs[k].uint64_feasigns_) {
        uint16_t slot = feature.slot();
        if (!use_slots_is_dense[slot]) {
          continue;
        }
        local_dense_uint64[slot].push_back(feature);
        if (feature.sign().uint64_feasign_ != 0) {
          dense_empty[slot] = false;
        } else if (dense_empty.find(slot) == dense_empty.end() &&
                   all_dense_uint64.find(slot) == all_dense_uint64.end()) {
          dense_empty[slot] = true;
        }
      }
      for (auto& feature : recs[k].float_feasigns_) {
        uint16_t slot = feature.slot();
        if (!use_slots_is_dense[slot]) {
          continue;
        }
        local_dense_float[slot].push_back(feature);
        if (fabs(feature.sign().float_feasign_) >= 1e-6) {
          dense_empty[slot] = false;
        } else if (dense_empty.find(slot) == dense_empty.end() &&
                   all_dense_float.find(slot) == all_dense_float.end()) {
          dense_empty[slot] = true;
        }
      }
      for (auto& p : dense_empty) {
        if (local_dense_uint64.find(p.first) != local_dense_uint64.end()) {
          all_dense_uint64[p.first] = std::move(local_dense_uint64[p.first]);
        } else if (local_dense_float.find(p.first) != local_dense_float.end()) {
          all_dense_float[p.first] = std::move(local_dense_float[p.first]);
        }
      }
    }
    for (auto& f : all_dense_uint64) {
      rec.uint64_feasigns_.insert(rec.uint64_feasigns_.end(), f.second.begin(),
                                  f.second.end());
    }
    for (auto& f : all_dense_float) {
      rec.float_feasigns_.insert(rec.float_feasigns_.end(), f.second.begin(),
                                 f.second.end());
    }

1082 1083 1084
    for (size_t k = i; k < j; k++) {
      local_uint64.clear();
      local_float.clear();
1085
      for (auto& feature : recs[k].uint64_feasigns_) {
1086
        uint16_t slot = feature.slot();
1087 1088 1089
        if (use_slots_is_dense[slot]) {
          continue;
        } else if (all_int64.find(slot) != all_int64.end()) {
1090 1091 1092
          has_conflict_slot = true;
          conflict_slot = slot;
          break;
1093
        }
1094 1095 1096 1097 1098
        local_uint64.insert(slot);
        rec.uint64_feasigns_.push_back(std::move(feature));
      }
      if (has_conflict_slot) {
        break;
1099
      }
1100 1101
      all_int64.insert(local_uint64.begin(), local_uint64.end());

1102
      for (auto& feature : recs[k].float_feasigns_) {
1103
        uint16_t slot = feature.slot();
1104 1105 1106
        if (use_slots_is_dense[slot]) {
          continue;
        } else if (all_float.find(slot) != all_float.end()) {
1107 1108 1109
          has_conflict_slot = true;
          conflict_slot = slot;
          break;
1110
        }
1111 1112 1113 1114 1115
        local_float.insert(slot);
        rec.float_feasigns_.push_back(std::move(feature));
      }
      if (has_conflict_slot) {
        break;
1116
      }
1117
      all_float.insert(local_float.begin(), local_float.end());
1118 1119
    }

1120 1121 1122 1123
    if (has_conflict_slot) {
      LOG(WARNING) << "drop ins " << recs[i].ins_id_ << " size=" << j - i
                   << ", because conflict_slot=" << use_slots[conflict_slot];
      drop_ins_num += j - i;
1124
    } else {
1125
      results.push_back(std::move(rec));
1126
    }
1127
    i = j;
1128
  }
1129
  std::vector<Record>().swap(recs);
1130
  VLOG(3) << "results size " << results.size();
1131
  LOG(WARNING) << "total drop ins num: " << drop_ins_num;
1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156
  results.shrink_to_fit();

  auto fleet_ptr = FleetWrapper::GetInstance();
  std::shuffle(results.begin(), results.end(), fleet_ptr->LocalRandomEngine());
  channel_data->Open();
  channel_data->Write(std::move(results));
  channel_data->Close();
  results.clear();
  results.shrink_to_fit();
  VLOG(3) << "channel data size " << channel_data->Size();
  channel_data->SetBlockSize(channel_data->Size() / channel_num_ + 1);
  VLOG(3) << "channel data block size " << channel_data->BlockSize();
  for (size_t i = 0; i < multi_output_channel_.size(); ++i) {
    std::vector<Record> vec_data;
    channel_data->Read(vec_data);
    multi_output_channel_[i]->Open();
    multi_output_channel_[i]->Write(std::move(vec_data));
    vec_data.clear();
    vec_data.shrink_to_fit();
  }
  CHECK(channel_data->Size() == 0);  // NOLINT
  channel_data->Clear();
  VLOG(3) << "MultiSlotDataset::MergeByInsId end";
}

1157 1158 1159
void MultiSlotDataset::GetRandomData(
    const std::unordered_set<uint16_t>& slots_to_replace,
    std::vector<Record>* result) {
1160 1161 1162 1163
  int debug_erase_cnt = 0;
  int debug_push_cnt = 0;
  auto multi_slot_desc = data_feed_desc_.multi_slot_desc();
  slots_shuffle_rclist_.ReInit();
1164 1165
  const auto& slots_shuffle_original_data = GetSlotsOriginalData();
  for (const auto& rec : slots_shuffle_original_data) {
1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178
    RecordCandidate rand_rec;
    Record new_rec = rec;
    slots_shuffle_rclist_.AddAndGet(rec, &rand_rec);
    for (auto it = new_rec.uint64_feasigns_.begin();
         it != new_rec.uint64_feasigns_.end();) {
      if (slots_to_replace.find(it->slot()) != slots_to_replace.end()) {
        it = new_rec.uint64_feasigns_.erase(it);
        debug_erase_cnt += 1;
      } else {
        ++it;
      }
    }
    for (auto slot : slots_to_replace) {
1179
      auto range = rand_rec.feas_.equal_range(slot);
1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190
      for (auto it = range.first; it != range.second; ++it) {
        new_rec.uint64_feasigns_.push_back({it->second, it->first});
        debug_push_cnt += 1;
      }
    }
    result->push_back(std::move(new_rec));
  }
  VLOG(2) << "erase feasign num: " << debug_erase_cnt
          << " repush feasign num: " << debug_push_cnt;
}

1191 1192 1193
void MultiSlotDataset::PreprocessChannel(
    const std::set<std::string>& slots_to_replace,
    std::unordered_set<uint16_t>& index_slots) {  // NOLINT
1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206
  int out_channel_size = 0;
  if (cur_channel_ == 0) {
    for (size_t i = 0; i < multi_output_channel_.size(); ++i) {
      out_channel_size += multi_output_channel_[i]->Size();
    }
  } else {
    for (size_t i = 0; i < multi_consume_channel_.size(); ++i) {
      out_channel_size += multi_consume_channel_[i]->Size();
    }
  }
  VLOG(2) << "DatasetImpl<T>::SlotsShuffle() begin with input channel size: "
          << input_channel_->Size()
          << " output channel size: " << out_channel_size;
1207

1208 1209 1210 1211 1212
  if ((!input_channel_ || input_channel_->Size() == 0) &&
      slots_shuffle_original_data_.size() == 0 && out_channel_size == 0) {
    VLOG(3) << "DatasetImpl<T>::SlotsShuffle() end, no data to slots shuffle";
    return;
  }
1213

1214
  auto multi_slot_desc = data_feed_desc_.multi_slot_desc();
1215
  for (int i = 0; i < multi_slot_desc.slots_size(); ++i) {
1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298
    std::string cur_slot = multi_slot_desc.slots(i).name();
    if (slots_to_replace.find(cur_slot) != slots_to_replace.end()) {
      index_slots.insert(i);
    }
  }
  if (slots_shuffle_original_data_.size() == 0) {
    // before first slots shuffle, instances could be in
    // input_channel, oupput_channel or consume_channel
    if (input_channel_ && input_channel_->Size() != 0) {
      slots_shuffle_original_data_.reserve(input_channel_->Size());
      input_channel_->Close();
      input_channel_->ReadAll(slots_shuffle_original_data_);
    } else {
      CHECK(out_channel_size > 0);  // NOLINT
      if (cur_channel_ == 0) {
        for (size_t i = 0; i < multi_output_channel_.size(); ++i) {
          std::vector<Record> vec_data;
          multi_output_channel_[i]->Close();
          multi_output_channel_[i]->ReadAll(vec_data);
          slots_shuffle_original_data_.reserve(
              slots_shuffle_original_data_.size() + vec_data.size());
          slots_shuffle_original_data_.insert(
              slots_shuffle_original_data_.end(),
              std::make_move_iterator(vec_data.begin()),
              std::make_move_iterator(vec_data.end()));
          vec_data.clear();
          vec_data.shrink_to_fit();
          multi_output_channel_[i]->Clear();
        }
      } else {
        for (size_t i = 0; i < multi_consume_channel_.size(); ++i) {
          std::vector<Record> vec_data;
          multi_consume_channel_[i]->Close();
          multi_consume_channel_[i]->ReadAll(vec_data);
          slots_shuffle_original_data_.reserve(
              slots_shuffle_original_data_.size() + vec_data.size());
          slots_shuffle_original_data_.insert(
              slots_shuffle_original_data_.end(),
              std::make_move_iterator(vec_data.begin()),
              std::make_move_iterator(vec_data.end()));
          vec_data.clear();
          vec_data.shrink_to_fit();
          multi_consume_channel_[i]->Clear();
        }
      }
    }
  } else {
    // if already have original data for slots shuffle, clear channel
    input_channel_->Clear();
    if (cur_channel_ == 0) {
      for (size_t i = 0; i < multi_output_channel_.size(); ++i) {
        if (!multi_output_channel_[i]) {
          continue;
        }
        multi_output_channel_[i]->Clear();
      }
    } else {
      for (size_t i = 0; i < multi_consume_channel_.size(); ++i) {
        if (!multi_consume_channel_[i]) {
          continue;
        }
        multi_consume_channel_[i]->Clear();
      }
    }
  }
  int end_size = 0;
  if (cur_channel_ == 0) {
    for (size_t i = 0; i < multi_output_channel_.size(); ++i) {
      if (!multi_output_channel_[i]) {
        continue;
      }
      end_size += multi_output_channel_[i]->Size();
    }
  } else {
    for (size_t i = 0; i < multi_consume_channel_.size(); ++i) {
      if (!multi_consume_channel_[i]) {
        continue;
      }
      end_size += multi_consume_channel_[i]->Size();
    }
  }
  CHECK(input_channel_->Size() == 0)
      << "input channel should be empty before slots shuffle";
1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311
}

// slots shuffle to input_channel_ with needed-shuffle slots
void MultiSlotDataset::SlotsShuffle(
    const std::set<std::string>& slots_to_replace) {
  PADDLE_ENFORCE_EQ(slots_shuffle_fea_eval_, true,
                    platform::errors::PreconditionNotMet(
                        "fea eval mode off, need to set on for slots shuffle"));
  platform::Timer timeline;
  timeline.Start();
  std::unordered_set<uint16_t> index_slots;
  PreprocessChannel(slots_to_replace, index_slots);

1312 1313 1314 1315 1316 1317 1318 1319 1320
  std::vector<Record> random_data;
  random_data.clear();
  // get slots shuffled random_data
  GetRandomData(index_slots, &random_data);
  input_channel_->Open();
  input_channel_->Write(std::move(random_data));
  random_data.clear();
  random_data.shrink_to_fit();
  input_channel_->Close();
Y
yaoxuefeng 已提交
1321
  cur_channel_ = 0;
1322 1323 1324 1325 1326 1327 1328

  timeline.Pause();
  VLOG(2) << "DatasetImpl<T>::SlotsShuffle() end"
          << ", memory data size for slots shuffle=" << input_channel_->Size()
          << ", cost time=" << timeline.ElapsedSec() << " seconds";
}

D
dongdaxiang 已提交
1329 1330
}  // end namespace framework
}  // end namespace paddle