buffered_reader.cc 4.2 KB
Newer Older
Y
yuyang18 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16
// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
//     http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.

#include "paddle/fluid/operators/reader/buffered_reader.h"
#include <vector>
D
Dun Liang 已提交
17
#include "paddle/fluid/framework/data_type.h"
Y
yuyang18 已提交
18 19 20 21

namespace paddle {
namespace operators {
namespace reader {
F
fengjiayi 已提交
22 23 24 25 26 27
BufferedReader::~BufferedReader() {
  reader_->Shutdown();
  while (!position_.empty()) {
    position_.front().wait();
    position_.pop();
  }
D
Dun Liang 已提交
28 29 30 31 32 33
#ifdef PADDLE_WITH_CUDA
  if (platform::is_gpu_place(place_)) {
    platform::SetDeviceId(boost::get<platform::CUDAPlace>(place_).device);
    PADDLE_ENFORCE(cudaStreamDestroy(stream));
  }
#endif
F
fengjiayi 已提交
34 35
}

Y
yuyang18 已提交
36 37 38 39 40 41 42
BufferedReader::BufferedReader(
    const std::shared_ptr<framework::ReaderBase> &reader,
    const platform::Place &place, size_t buffer_size)
    : framework::DecoratedReader(reader),
      thread_pool_(1),
      place_(place),
      buffer_size_(buffer_size) {
D
Dun Liang 已提交
43 44 45 46 47 48
#ifdef PADDLE_WITH_CUDA
  if (platform::is_gpu_place(place_)) {
    platform::SetDeviceId(boost::get<platform::CUDAPlace>(place_).device);
    PADDLE_ENFORCE(cudaStreamCreate(&stream));
  }
#endif
Y
yuyang18 已提交
49 50
  cpu_buffer_.resize(buffer_size);
  gpu_buffer_.resize(buffer_size);
Y
yuyang18 已提交
51
  ReadTillBufferFullAsync();
Y
yuyang18 已提交
52
}
F
fengjiayi 已提交
53

Y
yuyang18 已提交
54
void BufferedReader::ReadTillBufferFullAsync() {
Y
yuyang18 已提交
55 56
  PADDLE_ENFORCE_EQ(position_.size(), 0U);
  for (size_t i = 0; i < buffer_size_; ++i) {
Y
yuyang18 已提交
57
    ReadAsync(i);
Y
yuyang18 已提交
58 59
  }
}
F
fengjiayi 已提交
60

Y
yuyang18 已提交
61
void BufferedReader::ReadAsync(size_t i) {
Y
yuyang18 已提交
62 63 64
  position_.emplace(thread_pool_.enqueue([this, i]() -> size_t {
    TensorVec &cpu = cpu_buffer_[i];
    reader_->ReadNext(&cpu);
Y
yuyang18 已提交
65

Y
yuyang18 已提交
66 67 68
    if (cpu.empty()) {
      return -1UL;
    }
Y
yuyang18 已提交
69

D
Dun Liang 已提交
70 71 72
#ifdef PADDLE_WITH_CUDA
    // NOTE(liangdun): using async copy instead of TensorCopySync
    // TensorCopySync would block other stream
Y
yuyang18 已提交
73 74 75 76
    if (platform::is_gpu_place(place_)) {
      TensorVec &gpu = gpu_buffer_[i];
      gpu.resize(cpu.size());
      for (size_t i = 0; i < cpu.size(); ++i) {
D
Dun Liang 已提交
77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93
        gpu[i].Resize(cpu[i].dims());
        gpu[i].set_layout(cpu[i].layout());
        auto cpu_place = cpu[i].place();
        auto cpu_ptr = cpu[i].data<void>();
        auto gpu_ptr = gpu[i].mutable_data(place_, cpu[i].type());
        auto size =
            cpu[i].numel() * paddle::framework::SizeOfType(cpu[i].type());
        if (platform::is_cuda_pinned_place(cpu_place))
          memory::Copy(boost::get<platform::CUDAPlace>(place_), gpu_ptr,
                       boost::get<platform::CUDAPinnedPlace>(cpu_place),
                       cpu_ptr, size, stream);
        else
          // if cpu place is not pinned, async copy is slower than sync copy,
          // so we use sync copy instead.
          memory::Copy(boost::get<platform::CUDAPlace>(place_), gpu_ptr,
                       boost::get<platform::CPUPlace>(cpu_place), cpu_ptr, size,
                       0);
Y
yuyang18 已提交
94
        gpu[i].set_lod(cpu[i].lod());
Y
yuyang18 已提交
95
      }
D
Dun Liang 已提交
96
      PADDLE_ENFORCE(cudaStreamSynchronize(stream));
Y
yuyang18 已提交
97
    }
D
Dun Liang 已提交
98
#endif
Y
yuyang18 已提交
99
    return i;
Y
yuyang18 已提交
100 101
  }));
}
F
fengjiayi 已提交
102

Y
yuyang18 已提交
103 104
void BufferedReader::ShutdownImpl() {
  reader_->Shutdown();
Y
yuyang18 已提交
105 106 107
  while (!position_.empty()) {
    position_.pop();
  }
Y
yuyang18 已提交
108
  prev_pos_ = -1UL;
Y
yuyang18 已提交
109
}
F
fengjiayi 已提交
110

Y
yuyang18 已提交
111 112
void BufferedReader::StartImpl() {
  reader_->Start();
Y
yuyang18 已提交
113
  ReadTillBufferFullAsync();
Y
yuyang18 已提交
114
}
F
fengjiayi 已提交
115

Y
yuyang18 已提交
116
void BufferedReader::ReadNextImpl(std::vector<framework::LoDTensor> *out) {
Y
yuyang18 已提交
117 118 119 120 121 122 123 124 125 126 127 128 129
  if (position_.empty()) {
    out->clear();
    return;
  }
  size_t i = position_.front().get();
  position_.pop();

  if (i == -1UL) {
    ReadNextImpl(out);
    return;
  }

  *out = platform::is_gpu_place(place_) ? gpu_buffer_[i] : cpu_buffer_[i];
Y
yuyang18 已提交
130 131 132 133 134 135 136 137

  // Do not push current position into ReadAsync. Push the previous position
  // Since all computation in fluid are async, change the data of
  // current position may cause data error.
  if (prev_pos_ != -1Ul) {
    ReadAsync(prev_pos_);
  }
  prev_pos_ = i;
Y
yuyang18 已提交
138 139 140 141 142
}

}  // namespace reader
}  // namespace operators
}  // namespace paddle