async_data_reader.py 19.3 KB
Newer Older
1
"""This module contains data processing related logic.
Z
zhxfl 已提交
2
"""
3 4 5 6
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function

Z
zhxfl 已提交
7 8
import random
import struct
Y
yangyaming 已提交
9 10 11 12
import Queue
import time
import numpy as np
from threading import Thread
Y
yangyaming 已提交
13
import signal
Y
yangyaming 已提交
14
from multiprocessing import Manager, Process
15 16
import data_utils.augmentor.trans_mean_variance_norm as trans_mean_variance_norm
import data_utils.augmentor.trans_add_delta as trans_add_delta
Y
yangyaming 已提交
17
from data_utils.util import suppress_complaints, suppress_signal
18 19 20
from data_utils.util import SharedNDArray, SharedMemoryPoolManager
from data_utils.util import DaemonProcessGroup, batch_to_ndarray
from data_utils.util import CriticalException, ForceExitWrapper, EpochEndSignal
21 22 23


class SampleInfo(object):
Y
yangyaming 已提交
24
    """SampleInfo holds the necessary information to load a sample from disk.
25 26 27 28 29 30 31
    Args:
        feature_bin_path (str): File containing the feature data.
        feature_start (int): Start position of the sample's feature data.
        feature_size (int): Byte count of the sample's feature data.
        feature_frame_num (int): Time length of the sample.
        feature_dim (int): Feature dimension of one frame.
        label_bin_path (str): File containing the label data.
Y
Yibing Liu 已提交
32
        label_size (int): Byte count of the sample's label data.
33
        label_frame_num (int): Label number of the sample.
Z
zhxfl 已提交
34 35
    """

36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52
    def __init__(self, feature_bin_path, feature_start, feature_size,
                 feature_frame_num, feature_dim, label_bin_path, label_start,
                 label_size, label_frame_num):
        self.feature_bin_path = feature_bin_path
        self.feature_start = feature_start
        self.feature_size = feature_size
        self.feature_frame_num = feature_frame_num
        self.feature_dim = feature_dim

        self.label_bin_path = label_bin_path
        self.label_start = label_start
        self.label_size = label_size
        self.label_frame_num = label_frame_num


class SampleInfoBucket(object):
    """SampleInfoBucket contains paths of several description files. Feature
Y
Yibing Liu 已提交
53 54 55
    description file contains necessary information (including path of binary
    data, sample start position, sample byte number etc.) to access samples'
    feature data and the same with the label description file. SampleInfoBucket
Y
yangyaming 已提交
56
    is the minimum unit to do shuffle.
57
    Args:
Y
Yibing Liu 已提交
58
        feature_bin_paths (list|tuple): Files containing the binary feature
59
                                        data.
Y
Yibing Liu 已提交
60 61
        feature_desc_paths (list|tuple): Files containing the description of
                                         samples' feature data.
62 63 64
        label_bin_paths (list|tuple): Files containing the binary label data.
        label_desc_paths (list|tuple): Files containing the description of
                                       samples' label data.
Y
Yibing Liu 已提交
65
        split_perturb(int): Maximum perturbation value for length of
Z
zhxfl 已提交
66
                            sub-sentence when splitting long sentence.
Y
Yibing Liu 已提交
67
        split_sentence_threshold(int): Sentence whose length larger than
Z
zhxfl 已提交
68
                                the value will trigger split operation.
Y
Yibing Liu 已提交
69
        split_sub_sentence_len(int): sub-sentence length is equal to
70 71
                                    (split_sub_sentence_len + \
                                     rand() % split_perturb).
Z
zhxfl 已提交
72
    """
73

74 75 76 77 78 79 80 81
    def __init__(self,
                 feature_bin_paths,
                 feature_desc_paths,
                 label_bin_paths,
                 label_desc_paths,
                 split_perturb=50,
                 split_sentence_threshold=512,
                 split_sub_sentence_len=256):
82 83 84 85 86 87 88 89 90 91
        block_num = len(label_bin_paths)
        assert len(label_desc_paths) == block_num
        assert len(feature_bin_paths) == block_num
        assert len(feature_desc_paths) == block_num
        self._block_num = block_num

        self._feature_bin_paths = feature_bin_paths
        self._feature_desc_paths = feature_desc_paths
        self._label_bin_paths = label_bin_paths
        self._label_desc_paths = label_desc_paths
92 93 94
        self._split_perturb = split_perturb
        self._split_sentence_threshold = split_sentence_threshold
        self._split_sub_sentence_len = split_sub_sentence_len
Z
zhxfl 已提交
95
        self._rng = random.Random(0)
96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121

    def generate_sample_info_list(self):
        sample_info_list = []
        for block_idx in xrange(self._block_num):
            label_bin_path = self._label_bin_paths[block_idx]
            label_desc_path = self._label_desc_paths[block_idx]
            feature_bin_path = self._feature_bin_paths[block_idx]
            feature_desc_path = self._feature_desc_paths[block_idx]

            label_desc_lines = open(label_desc_path).readlines()
            feature_desc_lines = open(feature_desc_path).readlines()

            sample_num = int(label_desc_lines[0].split()[1])
            assert sample_num == int(feature_desc_lines[0].split()[1])

            for i in xrange(sample_num):
                feature_desc_split = feature_desc_lines[i + 1].split()
                feature_start = int(feature_desc_split[2])
                feature_size = int(feature_desc_split[3])
                feature_frame_num = int(feature_desc_split[4])
                feature_dim = int(feature_desc_split[5])

                label_desc_split = label_desc_lines[i + 1].split()
                label_start = int(label_desc_split[2])
                label_size = int(label_desc_split[3])
                label_frame_num = int(label_desc_split[4])
Z
zhxfl 已提交
122
                assert feature_frame_num == label_frame_num
123

Z
zhxfl 已提交
124 125 126 127
                if self._split_sentence_threshold == -1 or \
                        self._split_perturb == -1 or \
                        self._split_sub_sentence_len == -1 \
                        or self._split_sentence_threshold >= feature_frame_num:
128 129 130 131 132
                    sample_info_list.append(
                        SampleInfo(feature_bin_path, feature_start,
                                   feature_size, feature_frame_num, feature_dim,
                                   label_bin_path, label_start, label_size,
                                   label_frame_num))
Y
Yibing Liu 已提交
133
                #split sentence
134 135 136 137 138 139
                else:
                    cur_frame_pos = 0
                    cur_frame_len = 0
                    remain_frame_num = feature_frame_num
                    while True:
                        if remain_frame_num > self._split_sentence_threshold:
Z
zhxfl 已提交
140 141
                            cur_frame_len = self._split_sub_sentence_len + \
                                    self._rng.randint(0, self._split_perturb)
142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159
                            if cur_frame_len > remain_frame_num:
                                cur_frame_len = remain_frame_num
                        else:
                            cur_frame_len = remain_frame_num

                        sample_info_list.append(
                            SampleInfo(
                                feature_bin_path, feature_start + cur_frame_pos
                                * feature_dim * 4, cur_frame_len * feature_dim *
                                4, cur_frame_len, feature_dim, label_bin_path,
                                label_start + cur_frame_pos * 4, cur_frame_len *
                                4, cur_frame_len))

                        remain_frame_num -= cur_frame_len
                        cur_frame_pos += cur_frame_len
                        if remain_frame_num <= 0:
                            break

160 161 162
        return sample_info_list


163
class AsyncDataReader(object):
164
    """DataReader provides basic audio sample preprocessing pipeline including
Y
yangyaming 已提交
165
    data loading and data augmentation.
166
    Args:
Y
yangyaming 已提交
167 168
        feature_file_list (str): File containing paths of feature data file and
                                 corresponding description file.
Y
Yibing Liu 已提交
169
        label_file_list (str): File containing paths of label data file and
Y
yangyaming 已提交
170 171
                               corresponding description file.
        drop_frame_len (int): Samples whose label length above the value will be
172
                              dropped.(Using '-1' to disable the policy)
173
        proc_num (int): Number of processes for processing data.
Y
Yibing Liu 已提交
174
        sample_buffer_size (int): Buffer size to indicate the maximum samples
175
                                  cached.
Y
Yibing Liu 已提交
176
        sample_info_buffer_size (int): Buffer size to indicate the maximum
177
                                       sample information cached.
Y
Yibing Liu 已提交
178
        batch_buffer_size (int): Buffer size to indicate the maximum batch
Y
yangyaming 已提交
179
                                 cached.
Y
Yibing Liu 已提交
180
        shuffle_block_num (int): Block number indicating the minimum unit to do
181 182
                                 shuffle.
        random_seed (int): Random seed.
Y
Yibing Liu 已提交
183 184
        verbose (int): If set to 0, complaints including exceptions and signal
                       traceback from sub-process will be suppressed. If set
Y
yangyaming 已提交
185
                       to 1, all complaints will be printed.
Z
zhxfl 已提交
186 187
    """

Z
zhxfl 已提交
188 189 190 191
    def __init__(self,
                 feature_file_list,
                 label_file_list,
                 drop_frame_len=512,
192
                 proc_num=10,
Z
zhxfl 已提交
193 194
                 sample_buffer_size=1024,
                 sample_info_buffer_size=1024,
195
                 batch_buffer_size=10,
196
                 shuffle_block_num=10,
197 198
                 random_seed=0,
                 verbose=0):
199 200 201 202 203 204 205 206 207 208
        self._feature_file_list = feature_file_list
        self._label_file_list = label_file_list
        self._drop_frame_len = drop_frame_len
        self._shuffle_block_num = shuffle_block_num
        self._block_info_list = None
        self._rng = random.Random(random_seed)
        self._bucket_list = None
        self.generate_bucket_list(True)
        self._order_id = 0
        self._manager = Manager()
Y
yangyaming 已提交
209
        self._batch_buffer_size = batch_buffer_size
210 211 212 213
        self._proc_num = proc_num
        if self._proc_num <= 2:
            raise ValueError("Value of `proc_num` should be greater than 2.")
        self._sample_proc_num = self._proc_num - 2
214
        self._verbose = verbose
215
        self._force_exit = ForceExitWrapper(self._manager.Value('b', False))
216 217 218 219
        # buffer queue
        self._sample_info_queue = self._manager.Queue(sample_info_buffer_size)
        self._sample_queue = self._manager.Queue(sample_buffer_size)
        self._batch_queue = self._manager.Queue(batch_buffer_size)
220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252

    def generate_bucket_list(self, is_shuffle):
        if self._block_info_list is None:
            block_feature_info_lines = open(self._feature_file_list).readlines()
            block_label_info_lines = open(self._label_file_list).readlines()
            assert len(block_feature_info_lines) == len(block_label_info_lines)
            self._block_info_list = []
            for i in xrange(0, len(block_feature_info_lines), 2):
                block_info = (block_feature_info_lines[i],
                              block_feature_info_lines[i + 1],
                              block_label_info_lines[i],
                              block_label_info_lines[i + 1])
                self._block_info_list.append(
                    map(lambda line: line.strip(), block_info))

        if is_shuffle:
            self._rng.shuffle(self._block_info_list)

        self._bucket_list = []
        for i in xrange(0, len(self._block_info_list), self._shuffle_block_num):
            bucket_block_info = self._block_info_list[i:i +
                                                      self._shuffle_block_num]
            self._bucket_list.append(
                SampleInfoBucket(
                    map(lambda info: info[0], bucket_block_info),
                    map(lambda info: info[1], bucket_block_info),
                    map(lambda info: info[2], bucket_block_info),
                    map(lambda info: info[3], bucket_block_info)))

    # @TODO make this configurable
    def set_transformers(self, transformers):
        self._transformers = transformers

253 254 255 256 257 258 259
    def recycle(self, *args):
        for shared_ndarray in args:
            if not isinstance(shared_ndarray, SharedNDArray):
                raise Value("Only support recycle SharedNDArray object.")
            shared_ndarray.recycle(self._pool_manager.pool)

    def _start_async_processing(self):
260 261
        self._order_id = 0

262
        @suppress_complaints(verbose=self._verbose, notify=self._force_exit)
Y
yangyaming 已提交
263
        def ordered_feeding_task(sample_info_queue):
264 265 266 267
            if self._verbose == 0:
                signal.signal(signal.SIGTERM, suppress_signal)
                signal.signal(signal.SIGINT, suppress_signal)

268
            for sample_info_bucket in self._bucket_list:
269 270 271 272 273 274 275 276 277 278
                try:
                    sample_info_list = \
                            sample_info_bucket.generate_sample_info_list()
                except Exception as e:
                    raise CriticalException(e)
                else:
                    self._rng.shuffle(sample_info_list)  # do shuffle here
                    for sample_info in sample_info_list:
                        sample_info_queue.put((sample_info, self._order_id))
                        self._order_id += 1
279

280
            for i in xrange(self._sample_proc_num):
281 282
                sample_info_queue.put(EpochEndSignal())

283
        feeding_proc = DaemonProcessGroup(
284 285 286
            proc_num=1,
            target=ordered_feeding_task,
            args=(self._sample_info_queue, ))
287
        feeding_proc.start_all()
288

289
        @suppress_complaints(verbose=self._verbose, notify=self._force_exit)
Y
yangyaming 已提交
290
        def ordered_processing_task(sample_info_queue, sample_queue, out_order):
Y
yangyaming 已提交
291
            if self._verbose == 0:
292 293
                signal.signal(signal.SIGTERM, suppress_signal)
                signal.signal(signal.SIGINT, suppress_signal)
Y
yangyaming 已提交
294

295
            def read_bytes(fpath, start, size):
296 297 298 299 300 301 302 303
                try:
                    f = open(fpath, 'r')
                    f.seek(start, 0)
                    binary_bytes = f.read(size)
                    f.close()
                    return binary_bytes
                except Exception as e:
                    raise CriticalException(e)
304 305 306 307 308 309 310 311 312 313

            ins = sample_info_queue.get()

            while not isinstance(ins, EpochEndSignal):
                sample_info, order_id = ins

                feature_bytes = read_bytes(sample_info.feature_bin_path,
                                           sample_info.feature_start,
                                           sample_info.feature_size)

314 315 316 317 318 319
                assert sample_info.feature_frame_num \
                       * sample_info.feature_dim * 4 == len(feature_bytes), \
                       (sample_info.feature_bin_path,
                        sample_info.feature_frame_num,
                        sample_info.feature_dim,
                        len(feature_bytes))
Z
zhxfl 已提交
320

321 322 323 324
                label_bytes = read_bytes(sample_info.label_bin_path,
                                         sample_info.label_start,
                                         sample_info.label_size)

Z
zhxfl 已提交
325 326 327 328
                assert sample_info.label_frame_num * 4 == len(label_bytes), (
                    sample_info.label_bin_path, sample_info.label_array,
                    len(label_bytes))

329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352
                label_array = struct.unpack('I' * sample_info.label_frame_num,
                                            label_bytes)
                label_data = np.array(
                    label_array, dtype='int64').reshape(
                        (sample_info.label_frame_num, 1))

                feature_frame_num = sample_info.feature_frame_num
                feature_dim = sample_info.feature_dim
                assert feature_frame_num * feature_dim * 4 == len(feature_bytes)
                feature_array = struct.unpack('f' * feature_frame_num *
                                              feature_dim, feature_bytes)
                feature_data = np.array(
                    feature_array, dtype='float32').reshape((
                        sample_info.feature_frame_num, sample_info.feature_dim))

                sample_data = (feature_data, label_data)
                for transformer in self._transformers:
                    # @TODO(pkuyym) to make transfomer only accept feature_data
                    sample_data = transformer.perform_trans(sample_data)

                while order_id != out_order[0]:
                    time.sleep(0.001)

                # drop long sentence
Z
zhxfl 已提交
353 354
                if self._drop_frame_len == -1 or \
                        self._drop_frame_len >= sample_data[0].shape[0]:
355 356 357 358 359 360 361 362
                    sample_queue.put(sample_data)

                out_order[0] += 1
                ins = sample_info_queue.get()

            sample_queue.put(EpochEndSignal())

        out_order = self._manager.list([0])
363
        args = (self._sample_info_queue, self._sample_queue, out_order)
364 365 366 367 368
        sample_proc = DaemonProcessGroup(
            proc_num=self._sample_proc_num,
            target=ordered_processing_task,
            args=args)
        sample_proc.start_all()
369

370 371 372 373 374 375 376 377 378
    def batch_iterator(self, batch_size, minimum_batch_size):
        @suppress_complaints(verbose=self._verbose, notify=self._force_exit)
        def batch_assembling_task(sample_queue, batch_queue, pool):
            def conv_to_shared(ndarray):
                while self._force_exit == False:
                    try:
                        (name, shared_ndarray) = pool.popitem()
                    except Exception as e:
                        time.sleep(0.001)
379
                    else:
380 381
                        shared_ndarray.copy(ndarray)
                        return shared_ndarray
382

383 384 385
            if self._verbose == 0:
                signal.signal(signal.SIGTERM, suppress_signal)
                signal.signal(signal.SIGINT, suppress_signal)
Y
yangyaming 已提交
386

Y
yangyaming 已提交
387 388
            batch_samples = []
            lod = [0]
389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406
            done_num = 0
            while done_num < self._sample_proc_num:
                sample = sample_queue.get()
                if isinstance(sample, EpochEndSignal):
                    done_num += 1
                else:
                    batch_samples.append(sample)
                    lod.append(lod[-1] + sample[0].shape[0])
                    if len(batch_samples) == batch_size:
                        feature, label = batch_to_ndarray(batch_samples, lod)

                        feature = conv_to_shared(feature)
                        label = conv_to_shared(label)
                        lod = conv_to_shared(np.array(lod).astype('int64'))

                        batch_queue.put((feature, label, lod))
                        batch_samples = []
                        lod = [0]
Y
yangyaming 已提交
407 408

            if len(batch_samples) >= minimum_batch_size:
409 410 411 412 413 414 415
                (feature, label) = batch_to_ndarray(batch_samples, lod)

                feature = conv_to_shared(feature)
                label = conv_to_shared(label)
                lod = conv_to_shared(np.array(lod).astype('int64'))

                batch_queue.put((feature, label, lod))
Y
yangyaming 已提交
416 417 418

            batch_queue.put(EpochEndSignal())

419
        self._start_async_processing()
Y
yangyaming 已提交
420

X
Xinghai Sun 已提交
421 422 423
        self._pool_manager = SharedMemoryPoolManager(self._batch_buffer_size *
                                                     3, self._manager)

424 425
        assembling_proc = DaemonProcessGroup(
            proc_num=1,
Y
yangyaming 已提交
426
            target=batch_assembling_task,
427 428
            args=(self._sample_queue, self._batch_queue,
                  self._pool_manager.pool))
429
        assembling_proc.start_all()
Y
yangyaming 已提交
430

431
        while self._force_exit == False:
432
            try:
433
                batch_data = self._batch_queue.get_nowait()
434 435 436 437 438 439
            except Queue.Empty:
                time.sleep(0.001)
            else:
                if isinstance(batch_data, EpochEndSignal):
                    break
                yield batch_data
X
Xinghai Sun 已提交
440 441 442

        # clean the shared memory
        del self._pool_manager