data_feed.cc 31.9 KB
Newer Older
W
Wang Guibao 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13 14
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved.

Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at

    http://www.apache.org/licenses/LICENSE-2.0

Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */

D
dongdaxiang 已提交
15 16 17 18 19
#if defined _WIN32 || defined __APPLE__
#else
#define _LINUX
#endif

20
#include "paddle/fluid/framework/data_feed.h"
D
dongdaxiang 已提交
21
#ifdef _LINUX
D
dongdaxiang 已提交
22
#include <stdio_ext.h>
D
dongdaxiang 已提交
23
#endif
24
#include <utility>
25
#include "gflags/gflags.h"
W
Wang Guibao 已提交
26 27 28
#include "google/protobuf/io/zero_copy_stream_impl.h"
#include "google/protobuf/message.h"
#include "google/protobuf/text_format.h"
29 30
#include "io/fs.h"
#include "io/shell.h"
W
Wang Guibao 已提交
31 32
#include "paddle/fluid/framework/feed_fetch_method.h"
#include "paddle/fluid/framework/feed_fetch_type.h"
33
#include "paddle/fluid/platform/timer.h"
W
Wang Guibao 已提交
34 35 36 37 38 39 40 41

namespace paddle {
namespace framework {

void DataFeed::AddFeedVar(Variable* var, const std::string& name) {
  CheckInit();
  for (size_t i = 0; i < use_slots_.size(); ++i) {
    if (name == use_slots_[i]) {
42
      feed_vec_[i] = var->GetMutable<LoDTensor>();
W
Wang Guibao 已提交
43 44 45 46 47
    }
  }
}

bool DataFeed::SetFileList(const std::vector<std::string>& files) {
48
  std::unique_lock<std::mutex> lock(*mutex_for_pick_file_);
W
Wang Guibao 已提交
49
  CheckInit();
50 51
  // Do not set finish_set_filelist_ flag,
  // since a user may set file many times after init reader
W
Wang Guibao 已提交
52 53 54 55 56 57 58 59 60 61 62 63
  filelist_.assign(files.begin(), files.end());

  finish_set_filelist_ = true;
  return true;
}

void DataFeed::SetBatchSize(int batch_size) {
  PADDLE_ENFORCE(batch_size > 0, "Illegal batch size: %d.", batch_size);
  default_batch_size_ = batch_size;
}

bool DataFeed::PickOneFile(std::string* filename) {
64 65 66 67 68 69
  PADDLE_ENFORCE(mutex_for_pick_file_ != nullptr,
                 "should call SetFileListMutex before PickOneFile");
  PADDLE_ENFORCE(file_idx_ != nullptr,
                 "should call SetFileListIndex before PickOneFile");
  std::unique_lock<std::mutex> lock(*mutex_for_pick_file_);
  if (*file_idx_ == filelist_.size()) {
70
    VLOG(3) << "DataFeed::PickOneFile no more file to pick";
W
Wang Guibao 已提交
71 72
    return false;
  }
73 74
  VLOG(3) << "file_idx_=" << *file_idx_;
  *filename = filelist_[(*file_idx_)++];
D
dongdaxiang 已提交
75
  // LOG(ERROR) << "pick file:" << *filename;
W
Wang Guibao 已提交
76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101
  return true;
}

void DataFeed::CheckInit() {
  PADDLE_ENFORCE(finish_init_, "Initialization did not succeed.");
}

void DataFeed::CheckSetFileList() {
  PADDLE_ENFORCE(finish_set_filelist_, "Set filelist did not succeed.");
}

void DataFeed::CheckStart() {
  PADDLE_ENFORCE(finish_start_, "Datafeed has not started running yet.");
}

template <typename T>
void PrivateQueueDataFeed<T>::SetQueueSize(int queue_size) {
  PADDLE_ENFORCE(queue_size > 0, "Illegal queue size: %d.", queue_size);
  queue_size_ = queue_size;
  queue_ = std::unique_ptr<paddle::operators::reader::BlockingQueue<T>>(
      new paddle::operators::reader::BlockingQueue<T>(queue_size_));
}

template <typename T>
bool PrivateQueueDataFeed<T>::Start() {
  CheckSetFileList();
102 103
  read_thread_ = std::thread(&PrivateQueueDataFeed::ReadThread, this);
  read_thread_.detach();
W
Wang Guibao 已提交
104 105 106 107 108 109 110

  finish_start_ = true;
  return true;
}

template <typename T>
void PrivateQueueDataFeed<T>::ReadThread() {
D
dongdaxiang 已提交
111
#ifdef _LINUX
112 113 114 115 116 117 118 119 120
  std::string filename;
  while (PickOneFile(&filename)) {
    int err_no = 0;
    fp_ = fs_open_read(filename, &err_no, pipe_command_);
    __fsetlocking(&*fp_, FSETLOCKING_BYCALLER);
    T instance;
    while (ParseOneInstanceFromPipe(&instance)) {
      queue_->Send(instance);
    }
W
Wang Guibao 已提交
121
  }
122
  queue_->Close();
D
dongdaxiang 已提交
123
#endif
W
Wang Guibao 已提交
124 125 126 127
}

template <typename T>
int PrivateQueueDataFeed<T>::Next() {
X
xjqbest 已提交
128
#ifdef _LINUX
W
Wang Guibao 已提交
129 130 131 132 133 134 135 136 137 138 139 140 141 142 143
  CheckStart();
  int index = 0;
  T instance;
  T ins_vec;
  while (index < default_batch_size_) {
    if (!queue_->Receive(&instance)) {
      break;
    }
    AddInstanceToInsVec(&ins_vec, instance, index++);
  }
  batch_size_ = index;
  if (batch_size_ != 0) {
    PutToFeedVec(ins_vec);
  }
  return batch_size_;
X
xjqbest 已提交
144 145 146
#else
  return 0;
#endif
W
Wang Guibao 已提交
147 148
}

149
// explicit instantiation
W
Wang Guibao 已提交
150 151
template class PrivateQueueDataFeed<std::vector<MultiSlotType>>;

152 153 154
template <typename T>
InMemoryDataFeed<T>::InMemoryDataFeed() {
  cur_channel_ = 0;
155 156
  shuffled_ins_ = std::make_shared<paddle::framework::BlockingQueue<T>>();
  shuffled_ins_out_ = std::make_shared<paddle::framework::BlockingQueue<T>>();
D
dongdaxiang 已提交
157
  fleet_send_batch_size_ = 80000;  // hard code here
158 159 160 161
  memory_data_ = nullptr;
  mutex_for_update_memory_data_ = nullptr;
  this->file_idx_ = nullptr;
  this->mutex_for_pick_file_ = nullptr;
162 163 164 165
}

template <typename T>
bool InMemoryDataFeed<T>::Start() {
X
xjqbest 已提交
166
#ifdef _LINUX
167
  DataFeed::CheckSetFileList();
168 169
  if (shuffled_ins_->Size() == 0 && shuffled_ins_out_->Size() == 0) {
    FillMemoryDataToChannel();
170
  }
X
xjqbest 已提交
171
#endif
172 173 174 175 176 177
  DataFeed::finish_start_ = true;
  return true;
}

template <typename T>
int InMemoryDataFeed<T>::Next() {
X
xjqbest 已提交
178
#ifdef _LINUX
179 180 181 182 183 184 185 186 187 188 189 190
  DataFeed::CheckStart();
  std::shared_ptr<paddle::framework::BlockingQueue<T>> in_channel = nullptr;
  std::shared_ptr<paddle::framework::BlockingQueue<T>> out_channel = nullptr;
  if (cur_channel_ == 0) {
    in_channel = shuffled_ins_;
    out_channel = shuffled_ins_out_;
  } else {
    in_channel = shuffled_ins_out_;
    out_channel = shuffled_ins_;
  }
  CHECK(in_channel != nullptr);
  CHECK(out_channel != nullptr);
X
xujiaqi01 已提交
191 192 193
  VLOG(3) << "in_channel size=" << in_channel->Size()
          << ", out_channel size=" << out_channel->Size()
          << ", thread_id=" << thread_id_;
194
  int index = 0;
D
dongdaxiang 已提交
195 196 197 198 199
  T instance;
  T ins_vec;
  while (index < DataFeed::default_batch_size_) {
    if (in_channel->Size() == 0) {
      break;
200
    }
201 202
    in_channel->Pop(&instance);

D
dongdaxiang 已提交
203 204 205 206
    AddInstanceToInsVec(&ins_vec, instance, index++);
    out_channel->Push(std::move(instance));
  }
  DataFeed::batch_size_ = index;
207 208
  VLOG(3) << "batch_size_=" << DataFeed::batch_size_
          << ", thread_id=" << thread_id_;
D
dongdaxiang 已提交
209 210 211 212 213 214
  if (DataFeed::batch_size_ != 0) {
    PutToFeedVec(ins_vec);
  } else {
    cur_channel_ = 1 - cur_channel_;
  }
  return DataFeed::batch_size_;
X
xjqbest 已提交
215 216 217
#else
  return 0;
#endif
218 219
}

220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244
template <typename T>
void InMemoryDataFeed<T>::SetMemoryData(void* memory_data) {
  memory_data_ = static_cast<std::vector<T>*>(memory_data);
}

template <typename T>
void InMemoryDataFeed<T>::SetMemoryDataMutex(std::mutex* mutex) {
  mutex_for_update_memory_data_ = mutex;
}

template <typename T>
void InMemoryDataFeed<T>::SetThreadId(int thread_id) {
  thread_id_ = thread_id;
}

template <typename T>
void InMemoryDataFeed<T>::SetThreadNum(int thread_num) {
  thread_num_ = thread_num;
}

template <typename T>
void InMemoryDataFeed<T>::SetTrainerNum(int trainer_num) {
  trainer_num_ = trainer_num;
}

X
xjqbest 已提交
245 246 247 248 249
template <typename T>
void InMemoryDataFeed<T>::SetFleetSendBatchSize(int64_t size) {
  fleet_send_batch_size_ = size;
}

250 251
template <typename T>
void InMemoryDataFeed<T>::PutInsToChannel(const std::string& ins_str) {
X
xjqbest 已提交
252
#ifdef _LINUX
253
  std::vector<T> ins;
X
xujiaqi01 已提交
254
  DeserializeIns(&ins, ins_str);
255 256 257 258
  shuffled_ins_->Extend(std::move(ins));
  VLOG(3) << "PutInsToChannel put ins num=" << ins.size()
          << " to channel, channel size=" << shuffled_ins_->Size()
          << " thread_id=" << thread_id_;
X
xjqbest 已提交
259
#endif
260 261
}

262 263
template <typename T>
void InMemoryDataFeed<T>::FillMemoryDataToChannel() {
X
xjqbest 已提交
264
#ifdef _LINUX
X
xujiaqi01 已提交
265
  VLOG(3) << "FillMemoryDataToChannel, thread_id=" << thread_id_;
266 267
  auto interval = GetMemoryDataInterval();
  VLOG(3) << "memory data size=" << memory_data_->size()
268 269
          << ", fill data from  [" << interval.first << ", " << interval.second
          << "), thread_id=" << thread_id_;
270
  for (int64_t i = interval.first; i < interval.second; ++i) {
271 272 273
    T& t = (*memory_data_)[i];
    shuffled_ins_->Push(std::move(t));
  }
X
xjqbest 已提交
274
#endif
275 276 277 278
}

template <typename T>
void InMemoryDataFeed<T>::FillChannelToMemoryData() {
D
dongdaxiang 已提交
279
#ifdef _LINUX
X
xujiaqi01 已提交
280
  VLOG(3) << "FillChannelToMemoryData, thread_id=" << thread_id_;
281 282
  std::vector<T> local_vec;
  std::shared_ptr<paddle::framework::BlockingQueue<T>> channel = nullptr;
283
  std::shared_ptr<paddle::framework::BlockingQueue<T>> pre_channel = nullptr;
284 285
  if (cur_channel_ == 0) {
    channel = shuffled_ins_;
286
    pre_channel = shuffled_ins_out_;
287 288
  } else {
    channel = shuffled_ins_out_;
289
    pre_channel = shuffled_ins_;
290 291
  }
  CHECK(channel != nullptr);
292
  CHECK(pre_channel != nullptr);
293
  CHECK_EQ(pre_channel->Size(), 0);
X
xujiaqi01 已提交
294
  local_vec.resize(channel->Size());
295
  for (int64_t i = 0; i < local_vec.size(); ++i) {
296
    channel->Pop(&local_vec[i]);
297
  }
298
  VLOG(3) << "local_vec size=" << local_vec.size()
299
          << ", thread_id=" << thread_id_;
X
xujiaqi01 已提交
300 301 302 303
  {
    std::lock_guard<std::mutex> g(*mutex_for_update_memory_data_);
    VLOG(3) << "before insert, memory_data_ size=" << memory_data_->size()
            << ", thread_id=" << thread_id_;
304
    memory_data_->insert(memory_data_->end(), local_vec.begin(),
305
                         local_vec.end());
X
xujiaqi01 已提交
306 307 308
    VLOG(3) << "after insert memory_data_ size=" << memory_data_->size()
            << ", thread_id=" << thread_id_;
  }
309
  std::vector<T>().swap(local_vec);
D
dongdaxiang 已提交
310
#endif
311 312
}

313 314
template <typename T>
void InMemoryDataFeed<T>::LoadIntoMemory() {
D
dongdaxiang 已提交
315
#ifdef _LINUX
X
xujiaqi01 已提交
316
  VLOG(3) << "LoadIntoMemory() begin, thread_id=" << thread_id_;
317 318 319
  std::vector<T> local_vec;
  std::string filename;
  while (DataFeed::PickOneFile(&filename)) {
X
xujiaqi01 已提交
320 321
    VLOG(3) << "PickOneFile, filename=" << filename
            << ", thread_id=" << thread_id_;
322
    int err_no = 0;
D
dongdaxiang 已提交
323 324
    PrivateQueueDataFeed<T>::fp_ =
        fs_open_read(filename, &err_no, PrivateQueueDataFeed<T>::pipe_command_);
325
    CHECK(PrivateQueueDataFeed<T>::fp_ != nullptr);
326 327
    __fsetlocking(&*PrivateQueueDataFeed<T>::fp_, FSETLOCKING_BYCALLER);
    T instance;
328 329
    platform::Timer timeline;
    timeline.Start();
D
dongdaxiang 已提交
330
    while (ParseOneInstanceFromPipe(&instance)) {
331 332
      local_vec.push_back(instance);
    }
333
    timeline.Pause();
334 335
    VLOG(3) << "LoadIntoMemory() read all lines, file=" << filename
            << ", cost time=" << timeline.ElapsedSec()
336
            << " seconds, thread_id=" << thread_id_;
337 338
    {
      std::lock_guard<std::mutex> lock(*mutex_for_update_memory_data_);
339
      timeline.Start();
X
xujiaqi01 已提交
340
      memory_data_->insert(memory_data_->end(),
341 342 343 344
                           std::make_move_iterator(local_vec.begin()),
                           std::make_move_iterator(local_vec.end()));
      timeline.Pause();
      VLOG(3) << "LoadIntoMemory() memory_data insert, cost time="
345
              << timeline.ElapsedSec() << " seconds, thread_id=" << thread_id_;
346
    }
347
    local_vec.clear();
348
  }
349
  std::vector<T>().swap(local_vec);
X
xujiaqi01 已提交
350
  VLOG(3) << "LoadIntoMemory() end, thread_id=" << thread_id_;
D
dongdaxiang 已提交
351
#endif
352 353 354 355
}

template <typename T>
void InMemoryDataFeed<T>::LocalShuffle() {
X
xjqbest 已提交
356
#ifdef _LINUX
X
xujiaqi01 已提交
357
  VLOG(3) << "LocalShuffle() begin, thread_id=" << thread_id_;
358
  FillMemoryDataToChannel();
X
xujiaqi01 已提交
359
  VLOG(3) << "LocalShuffle() end, thread_id=" << thread_id_;
X
xjqbest 已提交
360
#endif
361 362
}

363
template <typename T>
364
void InMemoryDataFeed<T>::GlobalShuffle() {
D
dongdaxiang 已提交
365
#ifdef _LINUX
366
  VLOG(3) << "GlobalShuffle() begin, thread_id=" << thread_id_;
367
  auto fleet_ptr = FleetWrapper::GetInstance();
368
  std::vector<std::vector<T*>> send_vec(trainer_num_);
X
xjqbest 已提交
369 370
  std::vector<int> send_index(trainer_num_);
  uint64_t reserve_len = fleet_send_batch_size_ / trainer_num_;
371
  for (auto& vec : send_vec) {
X
xjqbest 已提交
372 373 374 375
    vec.reserve(reserve_len);
  }
  for (int i = 0; i < trainer_num_; ++i) {
    send_index[i] = i;
376 377 378 379 380 381 382
  }
  std::vector<std::future<int32_t>> total_status;
  auto interval = GetMemoryDataInterval();
  VLOG(3) << "global shuffle data from  [" << interval.first << ", "
          << interval.second << "), thread_id=" << thread_id_;
  for (int64_t i = interval.first; i < interval.second; ++i) {
    // if get ins id, can also use hash
X
xujiaqi01 已提交
383
    // std::string ins_id = memory_data_[i].ins_id;
384
    int64_t random_num = rand_r(&rand_seed);
X
xujiaqi01 已提交
385
    int64_t node_id = random_num % trainer_num_;
386
    send_vec[node_id].push_back(&((*memory_data_)[i]));
387
    if (i % fleet_send_batch_size_ == 0 && i != 0) {
X
xjqbest 已提交
388 389 390 391
      // shuffle the sequence of sending to avoid network timeout error
      std::random_shuffle(send_index.begin(), send_index.end());
      for (int index = 0; index < send_index.size(); ++index) {
        int j = send_index[index];
392 393 394
        std::string send_str;
        SerializeIns(send_vec[j], &send_str);
        VLOG(3) << "send str_length=" << send_str.length()
395 396
                << ", ins num=" << send_vec[j].size() << " to node_id=" << j
                << ", thread_id=" << thread_id_;
397 398 399 400
        auto ret = fleet_ptr->SendClientToClientMsg(0, j, send_str);
        VLOG(3) << "end send, thread_id=" << thread_id_;
        send_vec[j].clear();
        total_status.push_back(std::move(ret));
401 402 403
      }
    }
  }
X
xjqbest 已提交
404 405 406 407
  // shuffle the sequence of sending to avoid network timeout error
  std::random_shuffle(send_index.begin(), send_index.end());
  for (int index = 0; index < send_index.size(); ++index) {
    int j = send_index[index];
408
    if (send_vec[j].size() != 0) {
409 410
      std::string send_str;
      SerializeIns(send_vec[j], &send_str);
411 412
      VLOG(3) << "send str_length=" << send_str.length() << " to node_id=" << j
              << ", thread_id=" << thread_id_;
413 414 415
      auto ret = fleet_ptr->SendClientToClientMsg(0, j, send_str);
      VLOG(3) << "end send, thread_id=" << thread_id_;
      total_status.push_back(std::move(ret));
416
    }
417 418 419 420
    std::vector<T*>().swap(send_vec[j]);
  }
  for (auto& t : total_status) {
    t.wait();
421
  }
422
  VLOG(3) << "GlobalShuffle() end, thread_id=" << thread_id_;
D
dongdaxiang 已提交
423
#endif
424 425 426 427 428 429 430 431 432 433 434 435 436 437
}

template <typename T>
std::pair<int64_t, int64_t> InMemoryDataFeed<T>::GetMemoryDataInterval() {
  int64_t start = 0;
  int64_t end = 0;
  int64_t size = memory_data_->size();
  for (int64_t i = 0; i <= static_cast<int64_t>(thread_id_); ++i) {
    int64_t len = size / static_cast<int64_t>(thread_num_) +
                  (i < (size % static_cast<int64_t>(thread_num_)));
    start = end;
    end += len;
  }
  return std::make_pair(start, end);
438 439
}

440 441 442
// explicit instantiation
template class InMemoryDataFeed<std::vector<MultiSlotType>>;

W
Wang Guibao 已提交
443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468
void MultiSlotDataFeed::Init(
    const paddle::framework::DataFeedDesc& data_feed_desc) {
  finish_init_ = false;
  finish_set_filelist_ = false;
  finish_start_ = false;

  PADDLE_ENFORCE(data_feed_desc.has_multi_slot_desc(),
                 "Multi_slot_desc has not been set.");
  paddle::framework::MultiSlotDesc multi_slot_desc =
      data_feed_desc.multi_slot_desc();
  SetBatchSize(data_feed_desc.batch_size());
  SetQueueSize(data_feed_desc.batch_size());
  size_t all_slot_num = multi_slot_desc.slots_size();
  all_slots_.resize(all_slot_num);
  all_slots_type_.resize(all_slot_num);
  use_slots_index_.resize(all_slot_num);
  use_slots_.clear();
  use_slots_is_dense_.clear();
  for (size_t i = 0; i < all_slot_num; ++i) {
    const auto& slot = multi_slot_desc.slots(i);
    all_slots_[i] = slot.name();
    all_slots_type_[i] = slot.type();
    use_slots_index_[i] = slot.is_used() ? use_slots_.size() : -1;
    if (slot.is_used()) {
      use_slots_.push_back(all_slots_[i]);
      use_slots_is_dense_.push_back(slot.is_dense());
469 470 471 472 473 474 475 476 477 478 479
      std::vector<int> local_shape;
      if (slot.is_dense()) {
        // for batch size holder if is_dense
        if (slot.shape(0) > 0) {
          local_shape.push_back(0);
        }
      }
      for (size_t i = 0; i < slot.shape_size(); ++i) {
        local_shape.push_back(slot.shape(i));
      }
      use_slots_shape_.push_back(local_shape);
W
Wang Guibao 已提交
480 481 482
    }
  }
  feed_vec_.resize(use_slots_.size());
483
  pipe_command_ = data_feed_desc.pipe_command();
W
Wang Guibao 已提交
484 485 486
  finish_init_ = true;
}

D
dongdaxiang 已提交
487
void MultiSlotDataFeed::ReadThread() {
488
#ifdef _LINUX
489 490 491 492
  std::string filename;
  while (PickOneFile(&filename)) {
    int err_no = 0;
    fp_ = fs_open_read(filename, &err_no, pipe_command_);
D
dongdaxiang 已提交
493
    CHECK(fp_ != nullptr);
494 495 496 497 498 499 500
    __fsetlocking(&*fp_, FSETLOCKING_BYCALLER);
    std::vector<MultiSlotType> instance;
    int ins_num = 0;
    while (ParseOneInstanceFromPipe(&instance)) {
      ins_num++;
      queue_->Send(instance);
    }
D
dongdaxiang 已提交
501
    VLOG(3) << "filename: " << filename << " inst num: " << ins_num;
D
dongdaxiang 已提交
502
  }
503
  queue_->Close();
504
#endif
D
dongdaxiang 已提交
505 506
}

W
Wang Guibao 已提交
507
bool MultiSlotDataFeed::CheckFile(const char* filename) {
508
#ifdef _LINUX
W
Wang Guibao 已提交
509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536
  CheckInit();  // get info of slots
  std::ifstream fin(filename);
  if (!fin.good()) {
    VLOG(1) << "error: open file<" << filename << "> fail";
    return false;
  }
  std::string line;
  int instance_cout = 0;
  std::string all_slots_alias = "";
  for (const auto& alias : all_slots_) {
    all_slots_alias += alias + " ";
  }
  std::string use_slots_alias = "";
  for (const auto& alias : use_slots_) {
    use_slots_alias += alias + " ";
  }
  VLOG(3) << "total slots num: " << all_slots_.size();
  VLOG(3) << "total slots alias: " << all_slots_alias;
  VLOG(3) << "used slots num: " << use_slots_.size();
  VLOG(3) << "used slots alias: " << use_slots_alias;
  while (getline(fin, line)) {
    ++instance_cout;
    const char* str = line.c_str();
    char* endptr = const_cast<char*>(str);
    int len = line.length();
    for (size_t i = 0; i < all_slots_.size(); ++i) {
      int num = strtol(endptr, &endptr, 10);
      if (num < 0) {
537 538
        VLOG(0) << "error: the number of ids is a negative number: " << num;
        VLOG(0) << "please check line<" << instance_cout << "> in file<"
W
Wang Guibao 已提交
539 540 541
                << filename << ">";
        return false;
      } else if (num == 0) {
542
        VLOG(0)
W
Wang Guibao 已提交
543 544 545 546
            << "error: the number of ids can not be zero, you need "
               "padding it in data generator; or if there is something wrong"
               " with the data, please check if the data contains unresolvable "
               "characters.";
547
        VLOG(0) << "please check line<" << instance_cout << "> in file<"
W
Wang Guibao 已提交
548 549 550
                << filename << ">";
        return false;
      } else if (errno == ERANGE || num > INT_MAX) {
551 552
        VLOG(0) << "error: the number of ids greater than INT_MAX";
        VLOG(0) << "please check line<" << instance_cout << "> in file<"
W
Wang Guibao 已提交
553 554 555 556 557 558 559
                << filename << ">";
        return false;
      }
      if (all_slots_type_[i] == "float") {
        for (int i = 0; i < num; ++i) {
          strtof(endptr, &endptr);
          if (errno == ERANGE) {
560
            VLOG(0) << "error: the value is out of the range of "
W
Wang Guibao 已提交
561
                       "representable values for float";
562
            VLOG(0) << "please check line<" << instance_cout << "> in file<"
W
Wang Guibao 已提交
563 564 565 566
                    << filename << ">";
            return false;
          }
          if (i + 1 != num && endptr - str == len) {
567 568
            VLOG(0) << "error: there is a wrong with the number of ids.";
            VLOG(0) << "please check line<" << instance_cout << "> in file<"
W
Wang Guibao 已提交
569 570 571 572 573 574 575 576
                    << filename << ">";
            return false;
          }
        }
      } else if (all_slots_type_[i] == "uint64") {
        for (int i = 0; i < num; ++i) {
          strtoull(endptr, &endptr, 10);
          if (errno == ERANGE) {
577
            VLOG(0) << "error: the value is out of the range of "
W
Wang Guibao 已提交
578
                       "representable values for uint64_t";
579
            VLOG(0) << "please check line<" << instance_cout << "> in file<"
W
Wang Guibao 已提交
580 581 582 583
                    << filename << ">";
            return false;
          }
          if (i + 1 != num && endptr - str == len) {
584 585
            VLOG(0) << "error: there is a wrong with the number of ids.";
            VLOG(0) << "please check line<" << instance_cout << "> in file<"
W
Wang Guibao 已提交
586 587 588 589 590
                    << filename << ">";
            return false;
          }
        }
      } else {
591
        VLOG(0) << "error: this type<" << all_slots_type_[i]
W
Wang Guibao 已提交
592 593 594 595
                << "> is not supported";
        return false;
      }
    }
596 597 598
    // It may be added '\t' character to the end of the output of reduce
    // task when processes data by Hadoop(when the output of the reduce
    // task of Hadoop has only one field, it will add a '\t' at the end
599 600 601 602 603
    // of the line by default, and you can use this option to avoid it:
    // `-D mapred.textoutputformat.ignoreseparator=true`), which does
    // not affect the correctness of the data. Therefore, it should be
    // judged that the data is not normal when the end of each line of
    // data contains characters which are not spaces.
604 605 606 607 608 609 610 611
    while (endptr - str != len) {
      if (!isspace(*(endptr++))) {
        VLOG(0)
            << "error: there is some extra characters at the end of the line.";
        VLOG(0) << "please check line<" << instance_cout << "> in file<"
                << filename << ">";
        return false;
      }
W
Wang Guibao 已提交
612 613 614 615
    }
  }
  VLOG(3) << "instances cout: " << instance_cout;
  VLOG(3) << "The file format is correct";
616
#endif
W
Wang Guibao 已提交
617 618 619
  return true;
}

D
dongdaxiang 已提交
620 621
bool MultiSlotDataFeed::ParseOneInstanceFromPipe(
    std::vector<MultiSlotType>* instance) {
622
#ifdef _LINUX
623 624 625
  thread_local string::LineFileReader reader;

  if (!reader.getline(&*(fp_.get()))) {
D
dongdaxiang 已提交
626 627
    return false;
  } else {
628 629 630
    int use_slots_num = use_slots_.size();
    instance->resize(use_slots_num);

D
dongdaxiang 已提交
631 632
    const char* str = reader.get();
    std::string line = std::string(str);
633
    // VLOG(3) << line;
D
dongdaxiang 已提交
634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661
    char* endptr = const_cast<char*>(str);
    int pos = 0;
    for (size_t i = 0; i < use_slots_index_.size(); ++i) {
      int idx = use_slots_index_[i];
      int num = strtol(&str[pos], &endptr, 10);
      PADDLE_ENFORCE(
          num,
          "The number of ids can not be zero, you need padding "
          "it in data generator; or if there is something wrong with "
          "the data, please check if the data contains unresolvable "
          "characters.\nplease check this error line: %s",
          str);
      if (idx != -1) {
        (*instance)[idx].Init(all_slots_type_[i]);
        if ((*instance)[idx].GetType()[0] == 'f') {  // float
          for (int j = 0; j < num; ++j) {
            float feasign = strtof(endptr, &endptr);
            (*instance)[idx].AddValue(feasign);
          }
        } else if ((*instance)[idx].GetType()[0] == 'u') {  // uint64
          for (int j = 0; j < num; ++j) {
            uint64_t feasign = (uint64_t)strtoull(endptr, &endptr, 10);
            (*instance)[idx].AddValue(feasign);
          }
        }
        pos = endptr - str;
      } else {
        for (int j = 0; j <= num; ++j) {
D
dongdaxiang 已提交
662 663 664 665
          // pos = line.find_first_of(' ', pos + 1);
          while (line[pos + 1] != ' ') {
            pos++;
          }
D
dongdaxiang 已提交
666 667 668 669 670
        }
      }
    }
    return true;
  }
671 672 673
#else
  return true;
#endif
D
dongdaxiang 已提交
674 675
}

W
Wang Guibao 已提交
676
bool MultiSlotDataFeed::ParseOneInstance(std::vector<MultiSlotType>* instance) {
X
xjqbest 已提交
677
#ifdef _LINUX
W
Wang Guibao 已提交
678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695
  std::string line;
  if (getline(file_, line)) {
    int use_slots_num = use_slots_.size();
    instance->resize(use_slots_num);
    // parse line
    const char* str = line.c_str();
    char* endptr = const_cast<char*>(str);
    int pos = 0;
    for (size_t i = 0; i < use_slots_index_.size(); ++i) {
      int idx = use_slots_index_[i];
      int num = strtol(&str[pos], &endptr, 10);
      PADDLE_ENFORCE(
          num,
          "The number of ids can not be zero, you need padding "
          "it in data generator; or if there is something wrong with "
          "the data, please check if the data contains unresolvable "
          "characters.\nplease check this error line: %s",
          str);
696

W
Wang Guibao 已提交
697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719
      if (idx != -1) {
        (*instance)[idx].Init(all_slots_type_[i]);
        if ((*instance)[idx].GetType()[0] == 'f') {  // float
          for (int j = 0; j < num; ++j) {
            float feasign = strtof(endptr, &endptr);
            (*instance)[idx].AddValue(feasign);
          }
        } else if ((*instance)[idx].GetType()[0] == 'u') {  // uint64
          for (int j = 0; j < num; ++j) {
            uint64_t feasign = (uint64_t)strtoull(endptr, &endptr, 10);
            (*instance)[idx].AddValue(feasign);
          }
        }
        pos = endptr - str;
      } else {
        for (int j = 0; j <= num; ++j) {
          pos = line.find_first_of(' ', pos + 1);
        }
      }
    }
  } else {
    return false;
  }
X
xjqbest 已提交
720 721
#endif
  return false;
W
Wang Guibao 已提交
722 723 724 725 726
}

void MultiSlotDataFeed::AddInstanceToInsVec(
    std::vector<MultiSlotType>* ins_vec,
    const std::vector<MultiSlotType>& instance, int index) {
X
xjqbest 已提交
727
#ifdef _LINUX
W
Wang Guibao 已提交
728 729 730 731 732 733 734
  if (index == 0) {
    ins_vec->resize(instance.size());
    for (size_t i = 0; i < instance.size(); ++i) {
      (*ins_vec)[i].Init(instance[i].GetType());
      (*ins_vec)[i].InitOffset();
    }
  }
735

W
Wang Guibao 已提交
736 737 738
  for (size_t i = 0; i < instance.size(); ++i) {
    (*ins_vec)[i].AddIns(instance[i]);
  }
X
xjqbest 已提交
739
#endif
W
Wang Guibao 已提交
740 741 742 743
}

void MultiSlotDataFeed::PutToFeedVec(
    const std::vector<MultiSlotType>& ins_vec) {
X
xjqbest 已提交
744
#ifdef _LINUX
W
Wang Guibao 已提交
745 746 747 748
  for (size_t i = 0; i < use_slots_.size(); ++i) {
    const auto& type = ins_vec[i].GetType();
    const auto& offset = ins_vec[i].GetOffset();
    int total_instance = static_cast<int>(offset.back());
749

W
Wang Guibao 已提交
750 751
    if (type[0] == 'f') {  // float
      const auto& feasign = ins_vec[i].GetFloatData();
752 753 754
      float* tensor_ptr = feed_vec_[i]->mutable_data<float>(
          {total_instance, 1}, platform::CPUPlace());
      memcpy(tensor_ptr, &feasign[0], total_instance * sizeof(float));
W
Wang Guibao 已提交
755 756 757
    } else if (type[0] == 'u') {  // uint64
      // no uint64_t type in paddlepaddle
      const auto& feasign = ins_vec[i].GetUint64Data();
758 759 760 761
      int64_t* tensor_ptr = feed_vec_[i]->mutable_data<int64_t>(
          {total_instance, 1}, platform::CPUPlace());
      memcpy(tensor_ptr, &feasign[0], total_instance * sizeof(int64_t));
    }
762

763 764 765
    LoD data_lod{offset};
    feed_vec_[i]->set_lod(data_lod);
    if (use_slots_is_dense_[i]) {
766 767
      use_slots_shape_[i][0] = batch_size_;
      feed_vec_[i]->Resize(framework::make_ddim(use_slots_shape_[i]));
W
Wang Guibao 已提交
768 769
    }
  }
X
xjqbest 已提交
770
#endif
W
Wang Guibao 已提交
771 772
}

773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798
void MultiSlotInMemoryDataFeed::Init(
    const paddle::framework::DataFeedDesc& data_feed_desc) {
  finish_init_ = false;
  finish_set_filelist_ = false;
  finish_start_ = false;

  PADDLE_ENFORCE(data_feed_desc.has_multi_slot_desc(),
                 "Multi_slot_desc has not been set.");
  paddle::framework::MultiSlotDesc multi_slot_desc =
      data_feed_desc.multi_slot_desc();
  SetBatchSize(data_feed_desc.batch_size());
  SetQueueSize(data_feed_desc.batch_size());
  size_t all_slot_num = multi_slot_desc.slots_size();
  all_slots_.resize(all_slot_num);
  all_slots_type_.resize(all_slot_num);
  use_slots_index_.resize(all_slot_num);
  use_slots_.clear();
  use_slots_is_dense_.clear();
  for (size_t i = 0; i < all_slot_num; ++i) {
    const auto& slot = multi_slot_desc.slots(i);
    all_slots_[i] = slot.name();
    all_slots_type_[i] = slot.type();
    use_slots_index_[i] = slot.is_used() ? use_slots_.size() : -1;
    if (slot.is_used()) {
      use_slots_.push_back(all_slots_[i]);
      use_slots_is_dense_.push_back(slot.is_dense());
799 800 801 802 803 804 805 806 807 808
      std::vector<int> local_shape;
      if (slot.is_dense()) {
        if (slot.shape(0) > 0) {
          local_shape.push_back(0);
        }
      }
      for (size_t i = 0; i < slot.shape_size(); ++i) {
        local_shape.push_back(slot.shape(i));
      }
      use_slots_shape_.push_back(local_shape);
809 810 811 812 813 814 815 816 817
    }
  }
  feed_vec_.resize(use_slots_.size());
  pipe_command_ = data_feed_desc.pipe_command();
  finish_init_ = true;
}

bool MultiSlotInMemoryDataFeed::ParseOneInstanceFromPipe(
    std::vector<MultiSlotType>* instance) {
X
xjqbest 已提交
818
#ifdef _LINUX
819 820 821 822 823 824 825 826 827 828
  thread_local string::LineFileReader reader;

  if (!reader.getline(&*(fp_.get()))) {
    return false;
  } else {
    int use_slots_num = use_slots_.size();
    instance->resize(use_slots_num);

    const char* str = reader.get();
    std::string line = std::string(str);
829
    // VLOG(3) << line;
830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866
    char* endptr = const_cast<char*>(str);
    int pos = 0;
    for (size_t i = 0; i < use_slots_index_.size(); ++i) {
      int idx = use_slots_index_[i];
      int num = strtol(&str[pos], &endptr, 10);
      PADDLE_ENFORCE(
          num,
          "The number of ids can not be zero, you need padding "
          "it in data generator; or if there is something wrong with "
          "the data, please check if the data contains unresolvable "
          "characters.\nplease check this error line: %s",
          str);
      if (idx != -1) {
        (*instance)[idx].Init(all_slots_type_[i]);
        if ((*instance)[idx].GetType()[0] == 'f') {  // float
          for (int j = 0; j < num; ++j) {
            float feasign = strtof(endptr, &endptr);
            (*instance)[idx].AddValue(feasign);
          }
        } else if ((*instance)[idx].GetType()[0] == 'u') {  // uint64
          for (int j = 0; j < num; ++j) {
            uint64_t feasign = (uint64_t)strtoull(endptr, &endptr, 10);
            (*instance)[idx].AddValue(feasign);
          }
        }
        pos = endptr - str;
      } else {
        for (int j = 0; j <= num; ++j) {
          // pos = line.find_first_of(' ', pos + 1);
          while (line[pos + 1] != ' ') {
            pos++;
          }
        }
      }
    }
    return true;
  }
X
xjqbest 已提交
867 868 869
#else
  return false;
#endif
870 871
}

D
dongdaxiang 已提交
872 873
bool MultiSlotInMemoryDataFeed::ParseOneInstance(
    std::vector<MultiSlotType>* instance) {
X
xjqbest 已提交
874
#ifdef _LINUX
875 876 877 878
  std::string line;
  if (getline(file_, line)) {
    int use_slots_num = use_slots_.size();
    instance->resize(use_slots_num);
879
    VLOG(3) << line;
880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917
    // parse line
    const char* str = line.c_str();
    char* endptr = const_cast<char*>(str);
    int pos = 0;
    for (size_t i = 0; i < use_slots_index_.size(); ++i) {
      int idx = use_slots_index_[i];
      int num = strtol(&str[pos], &endptr, 10);
      PADDLE_ENFORCE(
          num,
          "The number of ids can not be zero, you need padding "
          "it in data generator; or if there is something wrong with "
          "the data, please check if the data contains unresolvable "
          "characters.\nplease check this error line: %s",
          str);

      if (idx != -1) {
        (*instance)[idx].Init(all_slots_type_[i]);
        if ((*instance)[idx].GetType()[0] == 'f') {  // float
          for (int j = 0; j < num; ++j) {
            float feasign = strtof(endptr, &endptr);
            (*instance)[idx].AddValue(feasign);
          }
        } else if ((*instance)[idx].GetType()[0] == 'u') {  // uint64
          for (int j = 0; j < num; ++j) {
            uint64_t feasign = (uint64_t)strtoull(endptr, &endptr, 10);
            (*instance)[idx].AddValue(feasign);
          }
        }
        pos = endptr - str;
      } else {
        for (int j = 0; j <= num; ++j) {
          pos = line.find_first_of(' ', pos + 1);
        }
      }
    }
  } else {
    return false;
  }
X
xjqbest 已提交
918 919
#endif
  return false;
920 921 922 923 924
}

void MultiSlotInMemoryDataFeed::AddInstanceToInsVec(
    std::vector<MultiSlotType>* ins_vec,
    const std::vector<MultiSlotType>& instance, int index) {
X
xjqbest 已提交
925
#ifdef _LINUX
926 927 928 929 930 931 932 933 934 935 936
  if (index == 0) {
    ins_vec->resize(instance.size());
    for (size_t i = 0; i < instance.size(); ++i) {
      (*ins_vec)[i].Init(instance[i].GetType());
      (*ins_vec)[i].InitOffset();
    }
  }

  for (size_t i = 0; i < instance.size(); ++i) {
    (*ins_vec)[i].AddIns(instance[i]);
  }
X
xjqbest 已提交
937
#endif
938 939 940 941
}

void MultiSlotInMemoryDataFeed::PutToFeedVec(
    const std::vector<MultiSlotType>& ins_vec) {
X
xjqbest 已提交
942
#ifdef _LINUX
943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963
  for (size_t i = 0; i < use_slots_.size(); ++i) {
    const auto& type = ins_vec[i].GetType();
    const auto& offset = ins_vec[i].GetOffset();
    int total_instance = static_cast<int>(offset.back());

    if (type[0] == 'f') {  // float
      const auto& feasign = ins_vec[i].GetFloatData();
      float* tensor_ptr = feed_vec_[i]->mutable_data<float>(
          {total_instance, 1}, platform::CPUPlace());
      memcpy(tensor_ptr, &feasign[0], total_instance * sizeof(float));
    } else if (type[0] == 'u') {  // uint64
      // no uint64_t type in paddlepaddle
      const auto& feasign = ins_vec[i].GetUint64Data();
      int64_t* tensor_ptr = feed_vec_[i]->mutable_data<int64_t>(
          {total_instance, 1}, platform::CPUPlace());
      memcpy(tensor_ptr, &feasign[0], total_instance * sizeof(int64_t));
    }

    LoD data_lod{offset};
    feed_vec_[i]->set_lod(data_lod);
    if (use_slots_is_dense_[i]) {
964 965
      use_slots_shape_[i][0] = batch_size_;
      feed_vec_[i]->Resize(framework::make_ddim(use_slots_shape_[i]));
966 967
    }
  }
X
xjqbest 已提交
968
#endif
969 970 971
}

// todo serialize ins in global shuffle
D
dongdaxiang 已提交
972
void MultiSlotInMemoryDataFeed::SerializeIns(
973
    const std::vector<std::vector<MultiSlotType>*>& ins, std::string* str) {
974 975
  auto fleet_ptr = FleetWrapper::GetInstance();
  fleet_ptr->Serialize(ins, str);
976 977
}
// todo deserialize ins in global shuffle
978
void MultiSlotInMemoryDataFeed::DeserializeIns(
979
    std::vector<std::vector<MultiSlotType>>* ins, const std::string& str) {
980 981
  auto fleet_ptr = FleetWrapper::GetInstance();
  fleet_ptr->Deserialize(ins, str);
982 983
}

W
Wang Guibao 已提交
984 985
}  // namespace framework
}  // namespace paddle