model.py 22.3 KB
Newer Older
1 2 3 4
"""Contains DeepSpeech2 model."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
X
Xinghai Sun 已提交
5

6 7 8
import sys
import os
import time
9
import logging
10
import gzip
11 12
import copy
import inspect
L
lfchener 已提交
13 14 15 16
import cPickle as pickle
import collections
import multiprocessing
import numpy as np
17
from distutils.dir_util import mkpath
L
lfchener 已提交
18 19
import paddle.fluid as fluid
import paddle.fluid.compiler as compiler
Y
Yibing Liu 已提交
20 21 22
from decoders.swig_wrapper import Scorer
from decoders.swig_wrapper import ctc_greedy_decoder
from decoders.swig_wrapper import ctc_beam_search_decoder_batch
23
from model_utils.network import deep_speech_v2_network
24

25 26 27
logging.basicConfig(
    format='[%(levelname)s %(asctime)s %(filename)s:%(lineno)d] %(message)s')

28

29
class DeepSpeech2Model(object):
30 31 32 33 34 35 36 37 38 39
    """DeepSpeech2Model class.

    :param vocab_size: Decoding vocabulary size.
    :type vocab_size: int
    :param num_conv_layers: Number of stacking convolution layers.
    :type num_conv_layers: int
    :param num_rnn_layers: Number of stacking RNN layers.
    :type num_rnn_layers: int
    :param rnn_layer_size: RNN layer size (number of RNN cells).
    :type rnn_layer_size: int
L
lfchener 已提交
40 41
    :param use_gru: Use gru if set True. Use simple rnn if set False.
    :type use_gru: bool
42 43 44 45
    :param share_rnn_weights: Whether to share input-hidden weights between
                              forward and backward directional RNNs.Notice that
                              for GRU, weight sharing is not supported.
    :type share_rnn_weights: bool
L
lfchener 已提交
46 47 48 49 50 51 52
    :param place: Program running place.
    :type place: CPU or GPU
    :param init_from_pretrained_model: Pretrained model path. If None, will train
                                  from stratch.
    :type init_from_pretrained_model: string|None
    :param output_model_dir: Output model directory. If None, output to current directory. 
    :type output_model_dir: string|None
53 54
    """

L
lfchener 已提交
55 56 57 58 59 60 61 62 63 64 65
    def __init__(self,
                 vocab_size,
                 num_conv_layers,
                 num_rnn_layers,
                 rnn_layer_size,
                 use_gru=False,
                 share_rnn_weights=True,
                 place=fluid.CPUPlace(),
                 init_from_pretrain_model=None,
                 output_model_dir=None):
        self._vocab_size = vocab_size
Y
yangyaming 已提交
66
        self._num_conv_layers = num_conv_layers
L
lfchener 已提交
67 68 69 70 71 72 73 74
        self._num_rnn_layers = num_rnn_layers
        self._rnn_layer_size = rnn_layer_size
        self._use_gru = use_gru
        self._share_rnn_weights = share_rnn_weights
        self._place = place
        self._init_from_pretrain_model = init_from_pretrain_model
        self._output_model_dir = output_model_dir
        self._ext_scorer = None
75 76
        self.logger = logging.getLogger("")
        self.logger.setLevel(level=logging.INFO)
77

L
lfchener 已提交
78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220
    def create_network(self, is_infer=False):
        """Create data layers and model network.
        :param is_training: Whether to create a network for training.
        :type is_training: bool 
        :return reader: Reader for input.
        :rtype reader: read generater
        :return log_probs: An output unnormalized log probability layer.
        :rtype lig_probs: Varable
        :return loss: A ctc loss layer.
        :rtype loss: Variable
        """

        if not is_infer:
            input_fields = {
                'names': ['audio_data', 'text_data', 'seq_len_data', 'masks'],
                'shapes': [[-1, 161, 161], [-1, 1], [-1, 1], [-1, 32, 81, 1]],
                'dtypes': ['float32', 'int32', 'int64', 'float32'],
                'lod_levels': [0, 1, 0, 0]
            }

            inputs = [
                fluid.layers.data(
                    name=input_fields['names'][i],
                    shape=input_fields['shapes'][i],
                    dtype=input_fields['dtypes'][i],
                    lod_level=input_fields['lod_levels'][i])
                for i in range(len(input_fields['names']))
            ]

            reader = fluid.io.PyReader(
                feed_list=inputs,
                capacity=64,
                iterable=False,
                use_double_buffer=True)

            (audio_data, text_data, seq_len_data, masks) = inputs
        else:
            audio_data = fluid.layers.data(
                name='audio_data',
                shape=[-1, 161, 161],
                dtype='float32',
                lod_level=0)
            seq_len_data = fluid.layers.data(
                name='seq_len_data', shape=[-1, 1], dtype='int64', lod_level=0)
            masks = fluid.layers.data(
                name='masks',
                shape=[-1, 32, 81, 1],
                dtype='float32',
                lod_level=0)
            text_data = None
            reader = fluid.DataFeeder([audio_data, seq_len_data, masks],
                                      self._place)

        log_probs, loss = deep_speech_v2_network(
            audio_data=audio_data,
            text_data=text_data,
            seq_len_data=seq_len_data,
            masks=masks,
            dict_size=self._vocab_size,
            num_conv_layers=self._num_conv_layers,
            num_rnn_layers=self._num_rnn_layers,
            rnn_size=self._rnn_layer_size,
            use_gru=self._use_gru,
            share_rnn_weights=self._share_rnn_weights)
        return reader, log_probs, loss

    def init_from_pretrain_model(self, exe, program):
        '''Init params from pretrain model. '''

        assert isinstance(self._init_from_pretrain_model, str)

        if not os.path.exists(self._init_from_pretrain_model):
            print(self._init_from_pretrain_model)
            raise Warning("The pretrained params do not exist.")
            return False
        fluid.io.load_params(
            exe,
            self._init_from_pretrain_model,
            main_program=program,
            filename="params.pdparams")

        print("finish initing model from pretrained params from %s" %
              (self._init_from_pretrain_model))

        pre_epoch = 0
        dir_name = self._init_from_pretrain_model.split('_')
        if len(dir_name) >= 2 and dir_name[-2].endswith('epoch') and dir_name[
                -1].isdigit():
            pre_epoch = int(dir_name[-1])

        return pre_epoch + 1

    def save_param(self, exe, program, dirname):
        '''Save model params to dirname'''

        assert isinstance(self._output_model_dir, str)

        param_dir = os.path.join(self._output_model_dir)

        if not os.path.exists(param_dir):
            os.mkdir(param_dir)

        fluid.io.save_params(
            exe,
            os.path.join(param_dir, dirname),
            main_program=program,
            filename="params.pdparams")
        print("save parameters at %s" % (os.path.join(param_dir, dirname)))

        return True

    def test(self, exe, dev_batch_reader, test_program, test_pyreader,
             fetch_list):
        '''Test the model.

        :param exe:The executor of program.
        :type exe: Executor
        :param dev_batch_reader: The reader of test dataa.
        :type dev_batch_reader: read generator 
        :param test_program: The program of test.
        :type test_program: Program
        :param test_pyreader: Pyreader of test.
        :type test_pyreader: Pyreader
        :param fetch_list: Fetch list.
        :type fetch_list: list
        :return: An output unnormalized log probability. 
        :rtype: array
        '''
        test_pyreader.start()
        epoch_loss = []
        while True:
            try:
                each_loss = exe.run(
                    program=test_program,
                    fetch_list=fetch_list,
                    return_numpy=False)
                epoch_loss.extend(np.array(each_loss[0]))

            except fluid.core.EOFException:
                test_pyreader.reset()
                break
        return np.mean(np.array(epoch_loss))

221 222 223 224 225 226
    def train(self,
              train_batch_reader,
              dev_batch_reader,
              feeding_dict,
              learning_rate,
              gradient_clipping,
L
lfchener 已提交
227 228 229 230
              num_epoch,
              batch_size,
              num_samples,
              save_epoch=100,
231 232
              num_iterations_print=100,
              test_off=False):
233 234 235 236 237 238 239 240 241 242 243 244 245
        """Train the model.

        :param train_batch_reader: Train data reader.
        :type train_batch_reader: callable
        :param dev_batch_reader: Validation data reader.
        :type dev_batch_reader: callable
        :param feeding_dict: Feeding is a map of field name and tuple index
                             of the data that reader returns.
        :type feeding_dict: dict|list
        :param learning_rate: Learning rate for ADAM optimizer.
        :type learning_rate: float
        :param gradient_clipping: Gradient clipping threshold.
        :type gradient_clipping: float
L
lfchener 已提交
246 247 248 249 250 251 252 253
        :param num_epoch: Number of training epochs.
        :type num_epoch: int
        :param batch_size: Number of batch size.
        :type batch_size: int
        :param num_samples: The num of train samples.
        :type num_samples: int
        :param save_epoch: Number of training iterations for save checkpoint and params.
        :type save_epoch: int
254 255
        :param num_iterations_print: Number of training iterations for printing
                                     a training loss.
L
lfchener 已提交
256
        :type num_iteratons_print: int
257 258
        :param test_off: Turn off testing.
        :type test_off: bool
259 260
        """
        # prepare model output directory
L
lfchener 已提交
261 262
        if not os.path.exists(self._output_model_dir):
            mkpath(self._output_model_dir)
263

L
lfchener 已提交
264
        # adapt the feeding dict according to the network
265
        adapted_feeding_dict = self._adapt_feeding_dict(feeding_dict)
L
lfchener 已提交
266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374

        if isinstance(self._place, fluid.CUDAPlace):
            dev_count = fluid.core.get_cuda_device_count()
        else:
            dev_count = int(os.environ.get('CPU_NUM', 1))

        # prepare the network
        train_program = fluid.Program()
        startup_prog = fluid.Program()
        with fluid.program_guard(train_program, startup_prog):
            with fluid.unique_name.guard():
                train_pyreader, log_probs, ctc_loss = self.create_network()
                # prepare optimizer
                optimizer = fluid.optimizer.AdamOptimizer(
                    learning_rate=fluid.layers.exponential_decay(
                        learning_rate=learning_rate,
                        decay_steps=num_samples / batch_size / dev_count,
                        decay_rate=0.83,
                        staircase=True))
                fluid.clip.set_gradient_clip(
                    clip=fluid.clip.GradientClipByGlobalNorm(
                        clip_norm=gradient_clipping))
                optimizer.minimize(loss=ctc_loss)

        test_prog = fluid.Program()
        with fluid.program_guard(test_prog, startup_prog):
            with fluid.unique_name.guard():
                test_pyreader, _, ctc_loss = self.create_network()

        test_prog = test_prog.clone(for_test=True)

        exe = fluid.Executor(self._place)
        exe.run(startup_prog)

        # init from some pretrain models, to better solve the current task
        pre_epoch = 0
        if self._init_from_pretrain_model:
            pre_epoch = self.init_from_pretrain_model(exe, train_program)

        build_strategy = compiler.BuildStrategy()
        exec_strategy = fluid.ExecutionStrategy()

        # pass the build_strategy to with_data_parallel API
        compiled_prog = compiler.CompiledProgram(
            train_program).with_data_parallel(
                loss_name=ctc_loss.name,
                build_strategy=build_strategy,
                exec_strategy=exec_strategy)

        train_pyreader.decorate_batch_generator(train_batch_reader)
        test_pyreader.decorate_batch_generator(dev_batch_reader)

        # run train 
        for epoch_id in range(num_epoch):
            train_pyreader.start()
            epoch_loss = []
            time_begin = time.time()
            batch_id = 0
            step = 0
            while True:
                try:
                    fetch_list = [ctc_loss.name]

                    if batch_id % num_iterations_print == 0:
                        fetch = exe.run(
                            program=compiled_prog,
                            fetch_list=fetch_list,
                            return_numpy=False)
                        each_loss = fetch[0]
                        epoch_loss.extend(np.array(each_loss[0]) / batch_size)

                        print("epoch: %d, batch: %d, train loss: %f\n" %
                              (epoch_id, batch_id,
                               np.mean(each_loss[0]) / batch_size))

                    else:
                        each_loss = exe.run(
                            program=compiled_prog,
                            fetch_list=[],
                            return_numpy=False)

                    batch_id = batch_id + 1
                except fluid.core.EOFException:
                    train_pyreader.reset()
                    break
            time_end = time.time()
            used_time = time_end - time_begin
            if test_off:
                print("\n--------Time: %f sec, epoch: %d, train loss: %f\n" %
                      (used_time, epoch_id, np.mean(np.array(epoch_loss))))
            else:
                print('\n----------Begin test...')
                test_loss = self.test(
                    exe,
                    dev_batch_reader=dev_batch_reader,
                    test_program=test_prog,
                    test_pyreader=test_pyreader,
                    fetch_list=[ctc_loss])
                print(
                    "--------Time: %f sec, epoch: %d, train loss: %f, test loss: %f"
                    % (used_time, epoch_id + pre_epoch,
                       np.mean(np.array(epoch_loss)), test_loss / batch_size))
            if (epoch_id + 1) % save_epoch == 0:
                self.save_param(exe, train_program,
                                "epoch_" + str(epoch_id + pre_epoch))

        self.save_param(exe, train_program, "step_final")

        print("\n------------Training finished!!!-------------")
375

376
    def infer_batch_probs(self, infer_data, feeding_dict):
377
        """Infer the prob matrices for a batch of speech utterances.
378 379 380
        :param infer_data: List of utterances to infer, with each utterance
                           consisting of a tuple of audio features and
                           transcription text (empty string).
381
        :type infer_data: list
Y
yangyaming 已提交
382 383 384
        :param feeding_dict: Feeding is a map of field name and tuple index
                             of the data that reader returns.
        :type feeding_dict: dict|list
385 386 387
        :return: List of 2-D probability matrix, and each consists of prob
                 vectors for one speech utterancce.
        :rtype: List of matrix
388
        """
389
        # define inferer
L
lfchener 已提交
390 391 392 393
        infer_program = fluid.Program()
        startup_prog = fluid.Program()

        # adapt the feeding dict according to the network
394
        adapted_feeding_dict = self._adapt_feeding_dict(feeding_dict)
L
lfchener 已提交
395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412

        # prepare the network
        with fluid.program_guard(infer_program, startup_prog):
            with fluid.unique_name.guard():
                feeder, log_probs, _ = self.create_network(is_infer=True)

        infer_program = infer_program.clone(for_test=True)
        exe = fluid.Executor(self._place)
        exe.run(startup_prog)

        # init param from pretrain_model
        if not self._init_from_pretrain_model:
            exit("No pretrain model file path!")
        self.init_from_pretrain_model(exe, infer_program)

        infer_results = []
        time_begin = time.time()

413
        # run inference
L
lfchener 已提交
414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429
        for i in range(infer_data[0].shape[0]):
            each_log_probs = exe.run(
                program=infer_program,
                feed=feeder.feed(
                    [[infer_data[0][i], infer_data[2][i], infer_data[3][i]]]),
                fetch_list=[log_probs],
                return_numpy=False)
            infer_results.extend(np.array(each_log_probs[0]))

        # slice result 
        infer_results = np.array(infer_results)
        seq_len = (infer_data[2] - 1) // 3 + 1

        start_pos = [0] * (infer_data[0].shape[0] + 1)
        for i in range(infer_data[0].shape[0]):
            start_pos[i + 1] = start_pos[i] + seq_len[i][0]
430
        probs_split = [
Y
yangyaming 已提交
431
            infer_results[start_pos[i]:start_pos[i + 1]]
L
lfchener 已提交
432
            for i in range(0, infer_data[0].shape[0])
433
        ]
L
lfchener 已提交
434

435 436
        return probs_split

437 438
    def decode_batch_greedy(self, probs_split, vocab_list):
        """Decode by best path for a batch of probs matrix input.
439 440 441 442 443 444 445 446
        :param probs_split: List of 2-D probability matrix, and each consists
                            of prob vectors for one speech utterancce.
        :param probs_split: List of matrix
        :param vocab_list: List of tokens in the vocabulary, for decoding.
        :type vocab_list: list
        :return: List of transcription texts.
        :rtype: List of basestring
        """
447
        results = []
448 449 450 451
        for i, probs in enumerate(probs_split):
            output_transcription = ctc_greedy_decoder(
                probs_seq=probs, vocabulary=vocab_list)
            results.append(output_transcription)
L
lfchener 已提交
452
        print(results)
453
        return results
454

455 456 457
    def init_ext_scorer(self, beam_alpha, beam_beta, language_model_path,
                        vocab_list):
        """Initialize the external scorer.
458 459 460 461 462 463 464 465 466 467 468
        :param beam_alpha: Parameter associated with language model.
        :type beam_alpha: float
        :param beam_beta: Parameter associated with word count.
        :type beam_beta: float
        :param language_model_path: Filepath for language model. If it is
                                    empty, the external scorer will be set to
                                    None, and the decoding method will be pure
                                    beam search without scorer.
        :type language_model_path: basestring|None
        :param vocab_list: List of tokens in the vocabulary, for decoding.
        :type vocab_list: list
469 470 471 472 473 474 475 476 477 478 479 480 481 482
        """
        if language_model_path != '':
            self.logger.info("begin to initialize the external scorer "
                             "for decoding")
            self._ext_scorer = Scorer(beam_alpha, beam_beta,
                                      language_model_path, vocab_list)
            lm_char_based = self._ext_scorer.is_character_based()
            lm_max_order = self._ext_scorer.get_max_order()
            lm_dict_size = self._ext_scorer.get_dict_size()
            self.logger.info("language model: "
                             "is_character_based = %d," % lm_char_based +
                             " max_order = %d," % lm_max_order +
                             " dict_size = %d" % lm_dict_size)
            self.logger.info("end initializing scorer")
483
        else:
484 485 486 487
            self._ext_scorer = None
            self.logger.info("no language model provided, "
                             "decoding by pure beam search without scorer.")

488 489 490 491
    def decode_batch_beam_search(self, probs_split, beam_alpha, beam_beta,
                                 beam_size, cutoff_prob, cutoff_top_n,
                                 vocab_list, num_processes):
        """Decode by beam search for a batch of probs matrix input.
492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528
        :param probs_split: List of 2-D probability matrix, and each consists
                            of prob vectors for one speech utterancce.
        :param probs_split: List of matrix
        :param beam_alpha: Parameter associated with language model.
        :type beam_alpha: float
        :param beam_beta: Parameter associated with word count.
        :type beam_beta: float
        :param beam_size: Width for Beam search.
        :type beam_size: int
        :param cutoff_prob: Cutoff probability in pruning,
                            default 1.0, no pruning.
        :type cutoff_prob: float
        :param cutoff_top_n: Cutoff number in pruning, only top cutoff_top_n
                        characters with highest probs in vocabulary will be
                        used in beam search, default 40.
        :type cutoff_top_n: int
        :param vocab_list: List of tokens in the vocabulary, for decoding.
        :type vocab_list: list
        :param num_processes: Number of processes (CPU) for decoder.
        :type num_processes: int
        :return: List of transcription texts.
        :rtype: List of basestring
        """
        if self._ext_scorer != None:
            self._ext_scorer.reset_params(beam_alpha, beam_beta)
        # beam search decode
        num_processes = min(num_processes, len(probs_split))
        beam_search_results = ctc_beam_search_decoder_batch(
            probs_split=probs_split,
            vocabulary=vocab_list,
            beam_size=beam_size,
            num_processes=num_processes,
            ext_scoring_func=self._ext_scorer,
            cutoff_prob=cutoff_prob,
            cutoff_top_n=cutoff_top_n)

        results = [result[0][1] for result in beam_search_results]
529
        return results
530

531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548
    def _adapt_feeding_dict(self, feeding_dict):
        """Adapt feeding dict according to network struct.

        To remove impacts from padding part, we add scale_sub_region layer and
        sub_seq layer. For sub_seq layer, 'sequence_offset' and
        'sequence_length' fields are appended. For each scale_sub_region layer
        'convN_index_range' field is appended.

        :param feeding_dict: Feeding is a map of field name and tuple index
                             of the data that reader returns.
        :type feeding_dict: dict|list
        :return: Adapted feeding dict.
        :rtype: dict|list
        """
        adapted_feeding_dict = copy.deepcopy(feeding_dict)
        if isinstance(feeding_dict, dict):
            adapted_feeding_dict["sequence_offset"] = len(adapted_feeding_dict)
            adapted_feeding_dict["sequence_length"] = len(adapted_feeding_dict)
L
lfchener 已提交
549
            for i in range(self._num_conv_layers):
550 551 552 553 554
                adapted_feeding_dict["conv%d_index_range" %i] = \
                        len(adapted_feeding_dict)
        elif isinstance(feeding_dict, list):
            adapted_feeding_dict.append("sequence_offset")
            adapted_feeding_dict.append("sequence_length")
L
lfchener 已提交
555
            for i in range(self._num_conv_layers):
556 557 558 559 560 561
                adapted_feeding_dict.append("conv%d_index_range" % i)
        else:
            raise ValueError("Type of feeding_dict is %s, not supported." %
                             type(feeding_dict))

        return adapted_feeding_dict