diff --git a/paddle/fluid/operators/reader/buffered_reader.h b/paddle/fluid/operators/reader/buffered_reader.h index 7f5d3d9fff8f28c8533587f326796f846987412e..cbe2bc1b5fdd69d1a843b768e3289acd621369a6 100644 --- a/paddle/fluid/operators/reader/buffered_reader.h +++ b/paddle/fluid/operators/reader/buffered_reader.h @@ -53,8 +53,8 @@ class BufferedReader : public framework::DecoratedReader { // The buffer for reading data. // NOTE: the simplest way to implement buffered reader is do not use any - // buffer, just async read and create futures as buffer size. However, to - // malloc Tensor every time is extremely slow. Here we store all data in + // buffer, just read async and create futures as buffer size. However, to + // malloc tensors every time is extremely slow. Here we store all data in // buffers and prevent alloc every time. std::vector cpu_buffer_; std::vector gpu_buffer_; diff --git a/paddle/fluid/operators/reader/open_files_op.cc b/paddle/fluid/operators/reader/open_files_op.cc index aa55681ffca9e43113172cbff88d80f19962c74f..38223e069975a08791d58d6ae10e2112b79a61fe 100644 --- a/paddle/fluid/operators/reader/open_files_op.cc +++ b/paddle/fluid/operators/reader/open_files_op.cc @@ -134,7 +134,7 @@ class PreemptiveReaderContainer : public IReaderContainer { } else { *out = item.data_; // continue read async - AsyncRead(item.reader_it_, &future_it); + ReadAsync(item.reader_it_, &future_it); } } else { out->clear(); @@ -151,10 +151,10 @@ class PreemptiveReaderContainer : public IReaderContainer { auto future_it = futures_.end(); --future_it; - AsyncRead(reader_it, &future_it); + ReadAsync(reader_it, &future_it); } - void AsyncRead(const ReaderList::iterator& reader_it, + void ReadAsync(const ReaderList::iterator& reader_it, FutureList::iterator* future_it_ptr) { auto& future_it = *future_it_ptr; *future_it = pool_.enqueue([reader_it, future_it, this] {