From 6d53d22187424b144aadf11471ed3d3b1b171b21 Mon Sep 17 00:00:00 2001 From: yangyaming Date: Tue, 19 Sep 2017 15:41:34 +0800 Subject: [PATCH] Add inference scripts. --- deep_speech_2/examples/aishell/run_infer.sh | 46 ++++++++++++++++ .../examples/aishell/run_infer_golden.sh | 55 +++++++++++++++++++ 2 files changed, 101 insertions(+) create mode 100644 deep_speech_2/examples/aishell/run_infer.sh create mode 100644 deep_speech_2/examples/aishell/run_infer_golden.sh diff --git a/deep_speech_2/examples/aishell/run_infer.sh b/deep_speech_2/examples/aishell/run_infer.sh new file mode 100644 index 00000000..332bdbe1 --- /dev/null +++ b/deep_speech_2/examples/aishell/run_infer.sh @@ -0,0 +1,46 @@ +#! /usr/bin/env bash + +pushd ../.. > /dev/null + +# download language model +pushd models/lm > /dev/null +sh download_lm_ch.sh +if [ $? -ne 0 ]; then + exit 1 +fi +popd > /dev/null + + +# infer +CUDA_VISIBLE_DEVICES=0 \ +python -u infer.py \ +--num_samples=10 \ +--trainer_count=1 \ +--beam_size=300 \ +--num_proc_bsearch=8 \ +--num_conv_layers=2 \ +--num_rnn_layers=3 \ +--rnn_layer_size=1024 \ +--alpha=1.4 \ +--beta=2.4 \ +--cutoff_prob=0.99 \ +--cutoff_top_n=40 \ +--use_gru=False \ +--use_gpu=True \ +--share_rnn_weights=False \ +--infer_manifest='data/aishell/manifest.test' \ +--mean_std_path='data/aishell/mean_std.npz' \ +--vocab_path='data/aishell/vocab.txt' \ +--model_path='checkpoints/aishell/params.latest.tar.gz' \ +--lang_model_path='models/lm/zh_giga.no_cna_cmn.prune01244.klm' \ +--decoding_method='ctc_beam_search' \ +--error_rate_type='cer' \ +--specgram_type='linear' + +if [ $? -ne 0 ]; then + echo "Failed in inference!" + exit 1 +fi + + +exit 0 diff --git a/deep_speech_2/examples/aishell/run_infer_golden.sh b/deep_speech_2/examples/aishell/run_infer_golden.sh new file mode 100644 index 00000000..ac79a4dd --- /dev/null +++ b/deep_speech_2/examples/aishell/run_infer_golden.sh @@ -0,0 +1,55 @@ +#! /usr/bin/env bash + +pushd ../.. > /dev/null + +# download language model +pushd models/lm > /dev/null +sh download_lm_ch.sh +if [ $? -ne 0 ]; then + exit 1 +fi +popd > /dev/null + + +# download well-trained model +pushd models/aishell > /dev/null +sh download_model.sh +if [ $? -ne 0 ]; then + exit 1 +fi +popd > /dev/null + + +# infer +CUDA_VISIBLE_DEVICES=0 \ +python -u infer.py \ +--num_samples=10 \ +--trainer_count=1 \ +--beam_size=300 \ +--num_proc_bsearch=8 \ +--num_conv_layers=2 \ +--num_rnn_layers=3 \ +--rnn_layer_size=1024 \ +--alpha=1.4 \ +--beta=2.4 \ +--cutoff_prob=0.99 \ +--cutoff_top_n=40 \ +--use_gru=False \ +--use_gpu=True \ +--share_rnn_weights=False \ +--infer_manifest='data/aishell/manifest.test' \ +--mean_std_path='models/aishell/mean_std.npz' \ +--vocab_path='models/aishell/vocab.txt' \ +--model_path='models/aishell/params.tar.gz' \ +--lang_model_path='models/lm/zh_giga.no_cna_cmn.prune01244.klm' \ +--decoding_method='ctc_beam_search' \ +--error_rate_type='cer' \ +--specgram_type='linear' + +if [ $? -ne 0 ]; then + echo "Failed in inference!" + exit 1 +fi + + +exit 0 -- GitLab