dataloader_iter.py 24.7 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

import os
import six
import sys
import time
import signal
20
import numbers
21 22 23 24 25
import logging
import itertools
import threading
import numpy as np
import multiprocessing
26
from collections import namedtuple
27
from paddle.fluid.framework import _set_expected_place, _current_expected_place
28 29 30 31 32 33 34

# NOTE: queue has a different name in python2 and python3
if six.PY2:
    import Queue as queue
else:
    import queue

35 36
import paddle
from .. import core, layers
37
from ..framework import in_dygraph_mode
38
from ..multiprocess_utils import _set_SIGCHLD_handler, MP_STATUS_CHECK_INTERVAL, CleanupFuncRegistrar
39
from .fetcher import _IterableDatasetFetcher, _MapDatasetFetcher
40
from .batch_sampler import _InfiniteIterableSampler
41 42 43 44
from .collate import default_collate_fn, default_convert_fn
from .worker import ParentWatchDog, get_worker_info, _worker_loop, \
        _DatasetKind, _IterableDatasetStopIteration, _WorkerException
from .flat import _flatten_batch, _restore_batch
45 46

__all__ = ['get_worker_info']
47

48

49 50 51 52 53 54 55 56 57 58 59 60 61 62 63
class _DataLoaderIterBase(object):
    """
    Iterator implement of DataLoader, will load and feed mini-batch
    data by setting in given dataloader.

    Args:
        loader(instance of DataLoader): instance of `fluid.io.DataLoader`
    """

    def __init__(self, loader):
        self._dataset = loader.dataset
        self._feed_list = loader.feed_list or []
        self._places = loader.places
        self._return_list = loader.return_list
        self._batch_sampler = loader.batch_sampler
64
        self._auto_collate_batch = loader.auto_collate_batch
65 66 67
        self._num_workers = loader.num_workers
        self._use_buffer_reader = loader.use_buffer_reader
        self._use_shared_memory = loader.use_shared_memory
68
        self._timeout = loader.timeout if loader.timeout > 0 else MP_STATUS_CHECK_INTERVAL
69
        self._worker_init_fn = loader.worker_init_fn
70
        self._dataset_kind = loader.dataset_kind
71
        self._pin_memory = loader.pin_memory
72

73 74 75 76 77 78 79
        if self._auto_collate_batch:
            self._sampler_iter = iter(loader.batch_sampler)
            self._collate_fn = loader.collate_fn or default_collate_fn
        else:
            if self._dataset_kind == _DatasetKind.MAP:
                self._sampler_iter = iter(list(range(len(self._dataset))))
            else:
80 81
                self._sampler_iter = iter(
                    _InfiniteIterableSampler(self._dataset, 1))
82
            self._collate_fn = loader.collate_fn or default_convert_fn
83

84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108
        # LoDTensorBlockingQueue instance for create_py_reader and a thread
        # to put mini-batch data to self._blocking_queue, mini-batch data
        # will be get from:
        # 1. multi-process mode: get data from workers' result queue
        # 2. single-process mode: read mini-batch data in main process
        self._blocking_queue = None
        self._thread = None
        self._thread_done_event = threading.Event()

    def __iter__(self):
        return self

    def __len__(self):
        return len(self._batch_sampler)


class _DataLoaderIterSingleProcess(_DataLoaderIterBase):
    """
    Single process implement of DataLoaderIter, loading data from
    loader.data in main process
    """

    def __init__(self, loader):
        super(_DataLoaderIterSingleProcess, self).__init__(loader)

109
        self._dataset_fetcher = _DatasetKind.create_fetcher(
110 111
            self._dataset_kind, self._dataset, self._auto_collate_batch,
            self._collate_fn, True)
112

113 114 115 116 117 118 119 120
        # NOTE: _structrue_infos used to record the data structure of
        # batch to restore batch structure after reading Tensor
        # from blocking_queue in single-process mode. Note that
        # only single process is used in single-process mode, we
        # can record the data structure sequencely in a list without
        # recording the send and recv index
        self._structure_infos = []

121 122 123 124 125 126 127 128 129 130 131 132 133 134
        # NOTE: len(self._places) batch data compose as an output
        # iteration, set blocking_queue can cache 2 iteration datas
        # at most here
        self._blocking_queue_capacity = 2 * len(self._places)

        self._init_thread()

    def _init_thread(self):
        self._var_names = [v.name for v in self._feed_list]
        self._shapes = [v.shape for v in self._feed_list]
        self._dtypes = [v.dtype for v in self._feed_list]
        self._need_check_feed = [
            v.desc.need_check_feed() for v in self._feed_list
        ]
135
        # if only 1 place, do not need to keep order
136
        self._blocking_queue = core.init_lod_tensor_blocking_queue(
137 138
            core.Variable(), self._blocking_queue_capacity,
            len(self._places) > 1)
139 140
        self._reader = core.create_py_reader(
            self._blocking_queue, self._var_names, self._shapes, self._dtypes,
141 142
            self._need_check_feed, self._places, self._use_buffer_reader, True,
            self._pin_memory)
143

144 145
        self._thread = threading.Thread(
            target=self._thread_loop, args=(_current_expected_place(), ))
146 147 148
        self._thread.daemon = True
        self._thread.start()

149
    def _thread_loop(self, legacy_expected_place):
150
        try:
151 152 153 154 155 156 157
            #NOTE(zhiqiu): Set the expected place for new thread as the same as father thread,
            # and it will call platform::SetDeviceId() in c++ internally.
            # If we do not set cudaDeviceId in new thread, the default cudaDeviceId will be 0,
            # Which may cost hundreds of MB of GPU memory on CUDAPlace(0) if calling some cuda 
            # APIs in this thread.
            _set_expected_place(legacy_expected_place)

158 159
            for indices in self._sampler_iter:
                # read data from dataset in mini-batch
160
                batch = self._dataset_fetcher.fetch(indices)
161

162 163 164 165
                # flat batch and record structure infos
                batch, structure = _flatten_batch(batch)
                self._structure_infos.append(structure)

166 167 168
                # pack as LoDTensorArray
                array = core.LoDTensorArray()
                for slot in batch:
K
Kaipeng Deng 已提交
169 170 171
                    if isinstance(slot, paddle.Tensor):
                        slot = slot.value().get_tensor()
                    elif not isinstance(slot, core.LoDTensor):
172 173 174 175 176 177 178 179 180
                        tmp = core.LoDTensor()
                        tmp.set(slot, core.CPUPlace())
                        slot = tmp

                    array.append(slot)

                if not self._blocking_queue.push(array):
                    break

181 182 183
                if self._thread_done_event.is_set():
                    break

184
            self._blocking_queue.close()
185
            self._shutdown_thread()
186 187
        except StopIteration:
            self._blocking_queue.close()
188 189
        except Exception:
            self._blocking_queue.kill()
190
            self._shutdown_thread()
191 192 193 194 195 196
            logging.warning("DataLoader reader thread raised an exception.")
            six.reraise(*sys.exc_info())

    def __next__(self):
        try:
            if in_dygraph_mode():
197 198
                data = self._reader.read_next_var_list()
                data = _restore_batch(data, self._structure_infos.pop(0))
199 200
            else:
                if self._return_list:
201 202 203 204 205 206 207 208
                    data = self._reader.read_next_list()
                    data = [
                        _restore_batch(d, s)
                        for d, s in zip(data, self._structure_infos[:len(
                            self._places)])
                    ]
                    self._structure_infos = self._structure_infos[len(
                        self._places):]
209 210 211 212
                    # static graph organized data on multi-device with list, if
                    # place number is 1, there is only 1 device, extra the data
                    # from list for devices to be compatible with dygraph mode
                    if len(self._places) == 1:
213
                        data = data[0]
214
                else:
215 216 217
                    data = self._reader.read_next()

            return data
218
        except StopIteration:
219
            self._reader.shutdown()
220 221
            six.reraise(*sys.exc_info())

222 223 224
    def _shutdown_thread(self):
        if self._thread:
            self._thread_done_event.set()
225
            self._thread = None
226

227 228 229 230
    # python2 compatibility
    def next(self):
        return self.__next__()

231 232 233 234 235
    def __del__(self):
        # _blocking_queue in keep order mode holds sub-threads
        # need to release thread resources on unexpected exit
        if self._blocking_queue:
            self._blocking_queue.close()
236 237 238 239
        # NOTE: blocking queue should be closed firstly for
        # blocking queue read may hang and _thread_done_event
        # cannot be checked
        self._shutdown_thread()
240

241 242 243 244 245 246 247 248 249 250 251 252 253

class _DataLoaderIterMultiProcess(_DataLoaderIterBase):
    def __init__(self, loader):
        super(_DataLoaderIterMultiProcess, self).__init__(loader)

        assert self._num_workers > 0,  "Multi-process DataLoader " \
                    "invalid num_workers({})".format(self._num_workers)

        # subprocess wrokers' result queue
        self._data_queue = None

        # data get from _data_queue will be reordered by _rcvd_idx
        # for data order keeping, data index not equal _rcvd_idx 
254
        # will be cached in _task_infos
255 256 257
        self._send_idx = 0
        self._rcvd_idx = 0
        self._batches_outstanding = 0
258
        self._task_infos = {}
259
        self._structure_infos = []
260 261 262 263 264 265 266 267 268 269

        # indices outstand as _outstanding_capacity at first, and
        # blocking_queue capacity is also _outstanding_capacity.
        # _outstanding_capacity here to make sure each indices_queue
        # has at least 2 indices, and outstanding batch cached
        # output data for at least 2 iterations(Note that len(_places)
        # batches will be composed as an iteration output)
        self._outstanding_capacity = 2 * max(self._num_workers,
                                             len(self._places))

270 271 272
        # see _try_put_indices
        self._thread_lock = threading.Lock()

273
        # init workers and indices queues and put 2 indices in each indices queue
274 275 276 277
        self._init_workers()
        for _ in range(self._outstanding_capacity):
            self._try_put_indices()

278 279 280
        self._init_thread()
        self._shutdown = False

281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299
    def _init_workers(self):
        # multiprocess worker and indice queue list initial as empty
        self._workers = []
        self._worker_status = []
        self._indices_queues = []
        self._workers_idx_cycle = itertools.cycle(range(self._num_workers))

        # create data_queue for workers
        self._data_queue = multiprocessing.Queue()

        # event for workers and thread, thread event is only need 
        # in multi-processing mode
        self._workers_done_event = multiprocessing.Event()
        self._thread_done_event = threading.Event()

        for i in range(self._num_workers):
            indices_queue = multiprocessing.Queue()
            self._indices_queues.append(indices_queue)
            worker = multiprocessing.Process(
300
                target=_worker_loop,
301 302
                args=(self._dataset, self._dataset_kind, indices_queue,
                      self._data_queue, self._workers_done_event,
303 304 305
                      self._auto_collate_batch, self._collate_fn,
                      self._worker_init_fn, i, self._num_workers,
                      self._use_shared_memory))
306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330
            worker.daemon = True
            worker.start()
            self._workers.append(worker)
            self._worker_status.append(True)

        core._set_process_pids(id(self), tuple(w.pid for w in self._workers))
        _set_SIGCHLD_handler()

    def _clear_and_remove_data_queue(self):
        if self._data_queue is not None:
            while True:
                try:
                    self._data_queue.get_nowait()
                except:
                    self._data_queue.cancel_join_thread()
                    self._data_queue.close()
                    break

    def _init_thread(self):
        self._var_names = [v.name for v in self._feed_list]
        self._shapes = [v.shape for v in self._feed_list]
        self._dtypes = [v.dtype for v in self._feed_list]
        self._need_check_feed = [
            v.desc.need_check_feed() for v in self._feed_list
        ]
331
        # if only 1 place, do not need to keep order
332
        self._blocking_queue = core.init_lod_tensor_blocking_queue(
333
            core.Variable(), self._outstanding_capacity, len(self._places) > 1)
334 335
        self._reader = core.create_py_reader(
            self._blocking_queue, self._var_names, self._shapes, self._dtypes,
336 337
            self._need_check_feed, self._places, self._use_buffer_reader, True,
            self._pin_memory)
338 339

        self._thread_done_event = threading.Event()
340 341
        self._thread = threading.Thread(
            target=self._thread_loop, args=(_current_expected_place(), ))
342 343 344 345 346 347 348 349
        self._thread.daemon = True
        self._thread.start()

    def _shutdown_worker(self, worker_id):
        if self._worker_status[worker_id]:
            self._indices_queues[worker_id].put(None)
            self._worker_status[worker_id] = False

350
    def _try_shutdown_all(self, timeout=None):
351 352 353 354 355 356 357 358 359 360 361 362
        if not self._shutdown:
            try:
                self._exit_thread_expectedly()
                self._clear_and_remove_data_queue()

                # set _workers_done_event should be set before put None
                # to indices_queue, workers wll exit on reading None from
                # indices_queue
                self._workers_done_event.set()
                for i in range(self._num_workers):
                    self._shutdown_worker(i)

363 364 365 366 367 368
                if not self._shutdown:
                    for w in self._workers:
                        w.join(timeout)
                    for q in self._indices_queues:
                        q.cancel_join_thread()
                        q.close()
369 370 371 372 373 374 375 376 377 378 379 380 381
            finally:
                core._erase_process_pids(id(self))
                self._shutdown = True

    def _exit_thread_expectedly(self):
        self._thread_done_event.set()
        self._blocking_queue.close()

    def _exit_thread_unexpectedly(self):
        self._thread_done_event.set()
        self._blocking_queue.kill()
        logging.error("DataLoader reader thread raised an exception!")

382 383 384 385 386 387 388 389
    def _thread_loop(self, legacy_expected_place):
        #NOTE(zhiqiu): Set the expected place for new thread as the same as father thread,
        # and it will call platform::SetDeviceId() in c++ internally.
        # If we do not set cudaDeviceId in new thread, the default cudaDeviceId will be 0,
        # Which may cost hundreds of MB of GPU memory on CUDAPlace(0) if calling some cuda 
        # APIs in this thread.
        _set_expected_place(legacy_expected_place)

390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405
        while not self._thread_done_event.is_set():
            batch = self._get_data()
            if not self._thread_done_event.is_set():
                if batch is None:
                    self._exit_thread_expectedly()
                else:
                    try:
                        # pack as LoDTensorArray
                        array = core.LoDTensorArray()
                        if self._use_shared_memory:
                            for tensor in batch:
                                array.append(tensor)
                        else:
                            # LoDTensor not in shared memory is not
                            # serializable, cannot be create in workers
                            for slot in batch:
K
Kaipeng Deng 已提交
406 407 408
                                if isinstance(slot, paddle.Tensor):
                                    slot = slot.value().get_tensor()
                                elif not isinstance(slot, core.LoDTensor):
409 410 411 412 413 414 415 416 417 418 419 420 421 422 423
                                    tmp = core.LoDTensor()
                                    tmp.set(slot, core.CPUPlace())
                                    slot = tmp
                                array.append(slot)

                        if not self._blocking_queue.push(array):
                            self._blocking_queue.close()
                    except:
                        self._exit_thread_unexpectedly()
                        six.reraise(*sys.exc_info())
                    finally:
                        self._rcvd_idx += 1

    def _get_data(self):
        while not self._thread_done_event.is_set():
424 425 426 427 428 429
            # For IterableDataset, batch indices is generated infinitely
            # for each worker to raise StopIteration, but a StopIteration
            # raising process will discard a batch indices which is count
            # in _send_idx but will not increase _rcvd_idx, so we check 
            # whether the worker is still alive here to skip the discarded
            # batch indices and increase _rcvd_idx
430 431
            if self._dataset_kind == _DatasetKind.ITER:
                while self._rcvd_idx < self._send_idx:
432
                    sys.stdout.flush()
433
                    info = self._task_infos[self._rcvd_idx]
434
                    if len(info) == 3 or self._worker_status[info[0]]:
435 436 437 438 439 440 441 442 443 444 445 446 447
                        break
                    del self._task_infos[self._rcvd_idx]
                    self._rcvd_idx += 1
                    self._batches_outstanding -= 1
                else:
                    # NOTE: _rcvd_idx and _send_idx only record batches among
                    #       workers, if batches among workers drained, there
                    #       may also be data in blocking queue
                    if self._batches_outstanding < len(self._places):
                        return None
                    continue

            if self._rcvd_idx in self._task_infos and \
448 449 450 451
                    len(self._task_infos[self._rcvd_idx]) == 3:
                info = self._task_infos.pop(self._rcvd_idx)
                self._structure_infos.append(info[2])
                return info[1]
452

453 454 455
            try:
                # [ avoid hang ]: main process may blocking at _reader.read_next when
                # KeyboardInterrupt, we do following tradeoff:
456
                # 1. get data with timeout, MP_STATUS_CHECK_INTERVAL(5s) as timeout
457 458 459 460 461 462 463
                #    default, if KeyboardInterrupt blocking, failed workers will be
                #    checked and raise RuntimeError to quit DataLoader in timeout
                #    exception handling.
                # 2. if get data timeout and check workers all alive, continue to
                #    get data again
                data = self._data_queue.get(timeout=self._timeout)
            except Exception as e:
464 465 466 467 468
                # check if thread done event set when waiting data
                if self._thread_done_event.is_set():
                    continue

                # check failed workers
469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489
                failed_workers = []
                for i, w in enumerate(self._workers):
                    if self._worker_status[i] and not w.is_alive():
                        failed_workers.append(w)
                        self._shutdown_worker(i)
                if len(failed_workers) > 0:
                    self._exit_thread_unexpectedly()
                    pids = ', '.join(str(w.pid) for w in failed_workers)
                    raise RuntimeError("DataLoader {} workers exit unexpectedly, " \
                                "pids: {}".format(len(failed_workers), pids))

                # get(timeout) will call _poll(timeout) and may raise IOError
                if isinstance(e, queue.Empty) or isinstance(e, IOError):
                    # continue on timeout to keep getting data from queue
                    continue

                self._exit_thread_unexpectedly()
                logging.error("DataLoader reader thread failed({}) to read data from " \
                              "workers' result queue.".format(e))
                six.reraise(*sys.exc_info())
            else:
490 491 492 493 494 495 496 497 498 499 500 501
                if self._dataset_kind == _DatasetKind.ITER and isinstance(
                        data, _IterableDatasetStopIteration):
                    # if a worker get StopIteraion, we shutdown this worker,
                    # note that this batch indices to trigger StopIteration
                    # is discard, outstanding batch number should be decrease
                    # and another indices should be put for other workers
                    # may still working.
                    self._shutdown_worker(data.worker_id)
                    self._batches_outstanding -= 1
                    self._try_put_indices()
                    continue

502 503 504 505 506
                idx, batch, structure = data
                if isinstance(batch, _WorkerException):
                    self._exit_thread_unexpectedly()
                    batch.reraise()

507
                if idx == self._rcvd_idx:
508
                    del self._task_infos[idx]
509
                    self._structure_infos.append(structure)
510 511
                    return batch
                else:
512
                    self._task_infos[idx] += (batch, structure)
513 514 515
                    continue

    def _try_put_indices(self):
516
        assert self._batches_outstanding <= self._outstanding_capacity, \
517
                    "too many indices have been put to queue"
518 519 520 521 522 523 524 525 526 527 528 529 530 531
        # In multi-process mode for IterableDataset, _try_put_indices will
        # be called both in main process(for our implement has blocking queue,
        # and blocking queue read is in main process) and thread, which may
        # cause error following error
        #   1. "ValueError: generator already executing" in next(self._sampler_iter)
        #   2. re-enter in increase _send_idx
        # add a lock for threading save, for _try_put_indices is only a slight
        # function which is not in data reading pipeline, this lock almost no
        # influence on performance
        with self._thread_lock:
            try:
                indices = next(self._sampler_iter)
            except StopIteration:
                return
532

533 534 535 536 537 538
            for i in range(self._num_workers):
                worker_idx = next(self._workers_idx_cycle)
                if self._worker_status[worker_idx]:
                    break
            else:
                return
539

540 541 542 543
            self._indices_queues[worker_idx].put((self._send_idx, indices))
            self._task_infos[self._send_idx] = (worker_idx, )
            self._batches_outstanding += 1
            self._send_idx += 1
544 545 546 547

    def __del__(self):
        self._try_shutdown_all()

548 549 550
    def _shutdown_on_exit(self):
        self._try_shutdown_all(1)

551 552 553 554 555 556 557 558 559 560 561 562 563 564 565
    def __next__(self):
        try:
            # _batches_outstanding here record the total batch data number
            # in 'from after _try_put_indices to beforeoutput data', this
            # value should be _outstanding_capacity if data is not drained,
            # if _batches_outstanding is less than _places number, there are
            # no enough data to generate next output, close blocking_queue and
            # set _thread_done_event here, py_reader will raise StopIteration,
            # end workers and indices_queues in StopIteration handling
            if self._batches_outstanding < len(self._places):
                self._thread_done_event.set()
                self._blocking_queue.close()

            if in_dygraph_mode():
                data = self._reader.read_next_var_list()
566
                data = _restore_batch(data, self._structure_infos.pop(0))
567 568 569
            else:
                if self._return_list:
                    data = self._reader.read_next_list()
570 571 572 573 574 575 576
                    data = [
                        _restore_batch(d, s)
                        for d, s in zip(data, self._structure_infos[:len(
                            self._places)])
                    ]
                    self._structure_infos = self._structure_infos[len(
                        self._places):]
577 578 579 580 581 582 583 584 585 586
                    # static graph organized data on multi-device with list, if
                    # place number is 1, there is only 1 device, extra the data
                    # from list for devices to be compatible with dygraph mode
                    if len(self._places) == 1:
                        data = data[0]
                else:
                    data = self._reader.read_next()
            self._on_output_batch()
            return data
        except StopIteration:
587
            self._reader.shutdown()
588 589 590 591 592 593 594 595 596 597 598
            self._try_shutdown_all()
            six.reraise(*sys.exc_info())

    # python2 compatibility
    def next(self):
        return self.__next__()

    def _on_output_batch(self):
        for _ in range(len(self._places)):
            self._batches_outstanding -= 1
            self._try_put_indices()