diff --git a/data_utils/data.py b/data_utils/data.py index 8391dacc1f6925b6929d390ce1717a1a2551481f..44af7ffaa999c618a7dcd4884f528ef60e59eefe 100644 --- a/data_utils/data.py +++ b/data_utils/data.py @@ -7,6 +7,7 @@ from __future__ import print_function import random import numpy as np +import multiprocessing import paddle.v2 as paddle from data_utils import utils from data_utils.augmentor.augmentation import AugmentationPipeline @@ -60,7 +61,7 @@ class DataGenerator(object): window_ms=20.0, max_freq=None, specgram_type='linear', - num_threads=12, + num_threads=multiprocessing.cpu_count(), random_seed=0): self._max_duration = max_duration self._min_duration = min_duration diff --git a/infer.py b/infer.py index 7fc8482966258be0dd9721cc9510a467ccb0d232..71518133a347c459bbcf2670fa5d1dc226a619c8 100644 --- a/infer.py +++ b/infer.py @@ -6,6 +6,7 @@ from __future__ import print_function import argparse import gzip import distutils.util +import multiprocessing import paddle.v2 as paddle from data_utils.data import DataGenerator from model import deep_speech2 @@ -40,7 +41,7 @@ parser.add_argument( help="Use gpu or not. (default: %(default)s)") parser.add_argument( "--num_threads_data", - default=12, + default=multiprocessing.cpu_count(), type=int, help="Number of cpu threads for preprocessing data. (default: %(default)s)") parser.add_argument( diff --git a/train.py b/train.py index 2c3b8ce78c608801e4789998d1148adbf6460f5d..fc23ec72692f319b556a75004a7508990df5357e 100644 --- a/train.py +++ b/train.py @@ -9,6 +9,7 @@ import argparse import gzip import time import distutils.util +import multiprocessing import paddle.v2 as paddle from model import deep_speech2 from data_utils.data import DataGenerator @@ -77,7 +78,7 @@ parser.add_argument( help="Trainer number. (default: %(default)s)") parser.add_argument( "--num_threads_data", - default=12, + default=multiprocessing.cpu_count(), type=int, help="Number of cpu threads for preprocessing data. (default: %(default)s)") parser.add_argument(