diff --git a/deep_speech_2/examples/aishell/run_infer.sh b/deep_speech_2/examples/aishell/run_infer.sh new file mode 100644 index 0000000000000000000000000000000000000000..332bdbe1b678166448707e82ed56ab82124497f5 --- /dev/null +++ b/deep_speech_2/examples/aishell/run_infer.sh @@ -0,0 +1,46 @@ +#! /usr/bin/env bash + +pushd ../.. > /dev/null + +# download language model +pushd models/lm > /dev/null +sh download_lm_ch.sh +if [ $? -ne 0 ]; then + exit 1 +fi +popd > /dev/null + + +# infer +CUDA_VISIBLE_DEVICES=0 \ +python -u infer.py \ +--num_samples=10 \ +--trainer_count=1 \ +--beam_size=300 \ +--num_proc_bsearch=8 \ +--num_conv_layers=2 \ +--num_rnn_layers=3 \ +--rnn_layer_size=1024 \ +--alpha=1.4 \ +--beta=2.4 \ +--cutoff_prob=0.99 \ +--cutoff_top_n=40 \ +--use_gru=False \ +--use_gpu=True \ +--share_rnn_weights=False \ +--infer_manifest='data/aishell/manifest.test' \ +--mean_std_path='data/aishell/mean_std.npz' \ +--vocab_path='data/aishell/vocab.txt' \ +--model_path='checkpoints/aishell/params.latest.tar.gz' \ +--lang_model_path='models/lm/zh_giga.no_cna_cmn.prune01244.klm' \ +--decoding_method='ctc_beam_search' \ +--error_rate_type='cer' \ +--specgram_type='linear' + +if [ $? -ne 0 ]; then + echo "Failed in inference!" + exit 1 +fi + + +exit 0 diff --git a/deep_speech_2/examples/aishell/run_infer_golden.sh b/deep_speech_2/examples/aishell/run_infer_golden.sh new file mode 100644 index 0000000000000000000000000000000000000000..ac79a4dde5d6905e31e424e031f4c28a1c6cffb9 --- /dev/null +++ b/deep_speech_2/examples/aishell/run_infer_golden.sh @@ -0,0 +1,55 @@ +#! /usr/bin/env bash + +pushd ../.. > /dev/null + +# download language model +pushd models/lm > /dev/null +sh download_lm_ch.sh +if [ $? -ne 0 ]; then + exit 1 +fi +popd > /dev/null + + +# download well-trained model +pushd models/aishell > /dev/null +sh download_model.sh +if [ $? -ne 0 ]; then + exit 1 +fi +popd > /dev/null + + +# infer +CUDA_VISIBLE_DEVICES=0 \ +python -u infer.py \ +--num_samples=10 \ +--trainer_count=1 \ +--beam_size=300 \ +--num_proc_bsearch=8 \ +--num_conv_layers=2 \ +--num_rnn_layers=3 \ +--rnn_layer_size=1024 \ +--alpha=1.4 \ +--beta=2.4 \ +--cutoff_prob=0.99 \ +--cutoff_top_n=40 \ +--use_gru=False \ +--use_gpu=True \ +--share_rnn_weights=False \ +--infer_manifest='data/aishell/manifest.test' \ +--mean_std_path='models/aishell/mean_std.npz' \ +--vocab_path='models/aishell/vocab.txt' \ +--model_path='models/aishell/params.tar.gz' \ +--lang_model_path='models/lm/zh_giga.no_cna_cmn.prune01244.klm' \ +--decoding_method='ctc_beam_search' \ +--error_rate_type='cer' \ +--specgram_type='linear' + +if [ $? -ne 0 ]; then + echo "Failed in inference!" + exit 1 +fi + + +exit 0