buffered_reader.cc 5.7 KB
Newer Older
Y
yuyang18 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
//     http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.

#include "paddle/fluid/operators/reader/buffered_reader.h"
C
chengduo 已提交
16
#include <memory>
Y
yuyang18 已提交
17
#include <vector>
D
Dun Liang 已提交
18
#include "paddle/fluid/framework/data_type.h"
Y
yuyang18 已提交
19

20
#include "paddle/fluid/platform/profiler.h"
Y
yuyang18 已提交
21 22 23
namespace paddle {
namespace operators {
namespace reader {
F
fengjiayi 已提交
24
BufferedReader::~BufferedReader() {
Q
Qiao Longfei 已提交
25
  VLOG(1) << "~BufferedReader";
F
fengjiayi 已提交
26 27 28 29 30
  reader_->Shutdown();
  while (!position_.empty()) {
    position_.front().wait();
    position_.pop();
  }
D
Dun Liang 已提交
31 32 33
#ifdef PADDLE_WITH_CUDA
  if (platform::is_gpu_place(place_)) {
    platform::SetDeviceId(boost::get<platform::CUDAPlace>(place_).device);
S
sneaxiy 已提交
34 35 36 37
    PADDLE_ENFORCE(cudaStreamDestroy(stream_));
    for (auto &event : events_) {
      PADDLE_ENFORCE(cudaEventDestroy(event));
    }
D
Dun Liang 已提交
38 39
  }
#endif
F
fengjiayi 已提交
40 41
}

Y
yuyang18 已提交
42 43 44 45 46 47 48
BufferedReader::BufferedReader(
    const std::shared_ptr<framework::ReaderBase> &reader,
    const platform::Place &place, size_t buffer_size)
    : framework::DecoratedReader(reader),
      thread_pool_(1),
      place_(place),
      buffer_size_(buffer_size) {
Q
Qiao Longfei 已提交
49
  VLOG(1) << "BufferedReader";
D
Dun Liang 已提交
50 51 52
#ifdef PADDLE_WITH_CUDA
  if (platform::is_gpu_place(place_)) {
    platform::SetDeviceId(boost::get<platform::CUDAPlace>(place_).device);
S
sneaxiy 已提交
53
    compute_stream_ =
D
Dun Liang 已提交
54 55 56
        ((platform::CUDADeviceContext *)(platform::DeviceContextPool::Instance()
                                             .Get(place_)))
            ->stream();
S
sneaxiy 已提交
57 58
    events_.resize(buffer_size);
    for (auto &event : events_) {
D
Dun Liang 已提交
59
      PADDLE_ENFORCE(cudaEventCreateWithFlags(&event, cudaEventDisableTiming));
S
sneaxiy 已提交
60 61
    }
    PADDLE_ENFORCE(cudaStreamCreateWithFlags(&stream_, cudaStreamNonBlocking));
D
Dun Liang 已提交
62 63
  }
#endif
Y
yuyang18 已提交
64 65
  cpu_buffer_.resize(buffer_size);
  gpu_buffer_.resize(buffer_size);
Y
yuyang18 已提交
66
  ReadTillBufferFullAsync();
Y
yuyang18 已提交
67
}
F
fengjiayi 已提交
68

Y
yuyang18 已提交
69
void BufferedReader::ReadTillBufferFullAsync() {
Y
yuyang18 已提交
70 71
  PADDLE_ENFORCE_EQ(position_.size(), 0U);
  for (size_t i = 0; i < buffer_size_; ++i) {
Y
yuyang18 已提交
72
    ReadAsync(i);
Y
yuyang18 已提交
73 74
  }
}
F
fengjiayi 已提交
75

Y
yuyang18 已提交
76
void BufferedReader::ReadAsync(size_t i) {
D
Dun Liang 已提交
77 78 79
#ifdef PADDLE_WITH_CUDA
  if (platform::is_gpu_place(place_)) {
    platform::SetDeviceId(boost::get<platform::CUDAPlace>(place_).device);
S
sneaxiy 已提交
80
    PADDLE_ENFORCE(cudaEventRecord(events_[i], compute_stream_));
D
Dun Liang 已提交
81 82
  }
#endif
Y
yuyang18 已提交
83 84 85
  position_.emplace(thread_pool_.enqueue([this, i]() -> size_t {
    TensorVec &cpu = cpu_buffer_[i];
    reader_->ReadNext(&cpu);
Y
yuyang18 已提交
86

Y
yuyang18 已提交
87 88 89
    if (cpu.empty()) {
      return -1UL;
    }
Y
yuyang18 已提交
90

D
Dun Liang 已提交
91 92
#ifdef PADDLE_WITH_CUDA
    // NOTE(liangdun): using async copy instead of TensorCopySync
93 94 95
    // TensorCopySync would block other stream, because TensorCopySync
    // issues the copying command to the default stream, it will make two
    // commands from different streams cannot run concurrently.
Y
yuyang18 已提交
96
    if (platform::is_gpu_place(place_)) {
D
Dun Liang 已提交
97
      platform::SetDeviceId(boost::get<platform::CUDAPlace>(place_).device);
S
sneaxiy 已提交
98
      PADDLE_ENFORCE(cudaStreamWaitEvent(stream_, events_[i], 0));
Y
yuyang18 已提交
99 100
      TensorVec &gpu = gpu_buffer_[i];
      gpu.resize(cpu.size());
101
      platform::RecordEvent record_event("BufferedReader:MemoryCopy");
Y
yuyang18 已提交
102
      for (size_t i = 0; i < cpu.size(); ++i) {
D
Dun Liang 已提交
103 104 105 106 107 108 109
        gpu[i].Resize(cpu[i].dims());
        gpu[i].set_layout(cpu[i].layout());
        auto cpu_place = cpu[i].place();
        auto cpu_ptr = cpu[i].data<void>();
        auto gpu_ptr = gpu[i].mutable_data(place_, cpu[i].type());
        auto size =
            cpu[i].numel() * paddle::framework::SizeOfType(cpu[i].type());
S
sneaxiy 已提交
110
        if (platform::is_cuda_pinned_place(cpu_place)) {
D
Dun Liang 已提交
111 112
          memory::Copy(boost::get<platform::CUDAPlace>(place_), gpu_ptr,
                       boost::get<platform::CUDAPinnedPlace>(cpu_place),
S
sneaxiy 已提交
113 114
                       cpu_ptr, size, stream_);
        } else if ((platform::is_gpu_place(cpu_place))) {
D
Dun Liang 已提交
115 116
          memory::Copy(boost::get<platform::CUDAPlace>(place_), gpu_ptr,
                       boost::get<platform::CUDAPlace>(cpu_place), cpu_ptr,
S
sneaxiy 已提交
117 118
                       size, stream_);
        } else {
D
Dun Liang 已提交
119 120
          // if cpu place is not pinned, async copy is slower than sync copy,
          // so we use sync copy instead.
121
          // TODO(zcd): The default stream should not be used here.
D
Dun Liang 已提交
122 123 124
          memory::Copy(boost::get<platform::CUDAPlace>(place_), gpu_ptr,
                       boost::get<platform::CPUPlace>(cpu_place), cpu_ptr, size,
                       0);
S
sneaxiy 已提交
125
        }
Y
yuyang18 已提交
126
        gpu[i].set_lod(cpu[i].lod());
Y
yuyang18 已提交
127
      }
S
sneaxiy 已提交
128
      PADDLE_ENFORCE(cudaStreamSynchronize(stream_));
Y
yuyang18 已提交
129
    }
D
Dun Liang 已提交
130
#endif
Y
yuyang18 已提交
131
    return i;
Y
yuyang18 已提交
132 133
  }));
}
F
fengjiayi 已提交
134

Y
yuyang18 已提交
135
void BufferedReader::ShutdownImpl() {
Q
Qiao Longfei 已提交
136
  VLOG(1) << "ShutdownImpl";
Y
yuyang18 已提交
137
  reader_->Shutdown();
Y
yuyang18 已提交
138 139 140
  while (!position_.empty()) {
    position_.pop();
  }
Y
yuyang18 已提交
141
  prev_pos_ = -1UL;
Y
yuyang18 已提交
142
}
F
fengjiayi 已提交
143

Y
yuyang18 已提交
144 145
void BufferedReader::StartImpl() {
  reader_->Start();
Y
yuyang18 已提交
146
  ReadTillBufferFullAsync();
Y
yuyang18 已提交
147
}
F
fengjiayi 已提交
148

Y
yuyang18 已提交
149
void BufferedReader::ReadNextImpl(std::vector<framework::LoDTensor> *out) {
Y
yuyang18 已提交
150 151 152 153 154 155 156 157 158 159 160 161 162
  if (position_.empty()) {
    out->clear();
    return;
  }
  size_t i = position_.front().get();
  position_.pop();

  if (i == -1UL) {
    ReadNextImpl(out);
    return;
  }

  *out = platform::is_gpu_place(place_) ? gpu_buffer_[i] : cpu_buffer_[i];
Y
yuyang18 已提交
163 164 165 166 167 168 169 170

  // Do not push current position into ReadAsync. Push the previous position
  // Since all computation in fluid are async, change the data of
  // current position may cause data error.
  if (prev_pos_ != -1Ul) {
    ReadAsync(prev_pos_);
  }
  prev_pos_ = i;
Y
yuyang18 已提交
171 172 173 174 175
}

}  // namespace reader
}  // namespace operators
}  // namespace paddle