Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
PaddlePaddle
DeepSpeech
提交
ae7ef792
D
DeepSpeech
项目概览
PaddlePaddle
/
DeepSpeech
大约 1 年 前同步成功
通知
207
Star
8425
Fork
1598
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
245
列表
看板
标记
里程碑
合并请求
3
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
D
DeepSpeech
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
245
Issue
245
列表
看板
标记
里程碑
合并请求
3
合并请求
3
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
体验新版 GitCode,发现更多精彩内容 >>
提交
ae7ef792
编写于
9月 10, 2017
作者:
X
Xinghai Sun
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
Rename some folders and update examples.
上级
a00a436b
变更
21
隐藏空白更改
内联
并排
Showing
21 changed file
with
209 addition
and
40 deletion
+209
-40
data/librispeech/librispeech.py
data/librispeech/librispeech.py
+1
-1
data/tiny/tiny.py
data/tiny/tiny.py
+126
-0
examples/librispeech/prepare_data.sh
examples/librispeech/prepare_data.sh
+1
-1
examples/tiny/run_data.sh
examples/tiny/run_data.sh
+45
-0
examples/tiny/run_infer.sh
examples/tiny/run_infer.sh
+6
-6
examples/tiny/run_test.sh
examples/tiny/run_test.sh
+0
-0
examples/tiny/run_train.sh
examples/tiny/run_train.sh
+10
-10
examples/tiny/run_tune.sh
examples/tiny/run_tune.sh
+0
-0
infer.py
infer.py
+3
-3
model_utils/__init__.py
model_utils/__init__.py
+0
-0
model_utils/decoder.py
model_utils/decoder.py
+2
-0
model_utils/lm_scorer.py
model_utils/lm_scorer.py
+0
-0
model_utils/model.py
model_utils/model.py
+4
-3
model_utils/network.py
model_utils/network.py
+0
-0
model_utils/tests/test_decoders.py
model_utils/tests/test_decoders.py
+1
-1
models/__init__.py
models/__init__.py
+0
-0
models/lm/download_en.sh
models/lm/download_en.sh
+0
-3
test.py
test.py
+3
-3
tools/build_vocab.py
tools/build_vocab.py
+2
-4
tools/tune.py
tools/tune.py
+3
-3
train.py
train.py
+2
-2
未找到文件。
data/librispeech/librispeech.py
浏览文件 @
ae7ef792
...
@@ -41,7 +41,7 @@ MD5_TRAIN_OTHER_500 = "d1a0fd59409feb2c614ce4d30c387708"
...
@@ -41,7 +41,7 @@ MD5_TRAIN_OTHER_500 = "d1a0fd59409feb2c614ce4d30c387708"
parser
=
argparse
.
ArgumentParser
(
description
=
__doc__
)
parser
=
argparse
.
ArgumentParser
(
description
=
__doc__
)
parser
.
add_argument
(
parser
.
add_argument
(
"--target_dir"
,
"--target_dir"
,
default
=
DATA_HOME
+
"/
L
ibri"
,
default
=
DATA_HOME
+
"/
l
ibri"
,
type
=
str
,
type
=
str
,
help
=
"Directory to save the dataset. (default: %(default)s)"
)
help
=
"Directory to save the dataset. (default: %(default)s)"
)
parser
.
add_argument
(
parser
.
add_argument
(
...
...
data/tiny/tiny.py
0 → 100644
浏览文件 @
ae7ef792
"""Prepare Librispeech ASR datasets.
Download, unpack and create manifest files.
Manifest file is a json-format file with each line containing the
meta data (i.e. audio filepath, transcript and audio duration)
of each audio file in the data set.
"""
from
__future__
import
absolute_import
from
__future__
import
division
from
__future__
import
print_function
import
distutils.util
import
os
import
sys
import
tarfile
import
argparse
import
soundfile
import
json
import
codecs
from
paddle.v2.dataset.common
import
md5file
DATA_HOME
=
os
.
path
.
expanduser
(
'~/.cache/paddle/dataset/speech'
)
URL_ROOT
=
"http://www.openslr.org/resources/12"
URL_DEV_CLEAN
=
URL_ROOT
+
"/dev-clean.tar.gz"
MD5_DEV_CLEAN
=
"42e2234ba48799c1f50f24a7926300a1"
parser
=
argparse
.
ArgumentParser
(
description
=
__doc__
)
parser
.
add_argument
(
"--target_dir"
,
default
=
DATA_HOME
+
"/tiny"
,
type
=
str
,
help
=
"Directory to save the dataset. (default: %(default)s)"
)
parser
.
add_argument
(
"--manifest_prefix"
,
default
=
"manifest"
,
type
=
str
,
help
=
"Filepath prefix for output manifests. (default: %(default)s)"
)
args
=
parser
.
parse_args
()
def
download
(
url
,
md5sum
,
target_dir
):
"""
Download file from url to target_dir, and check md5sum.
"""
if
not
os
.
path
.
exists
(
target_dir
):
os
.
makedirs
(
target_dir
)
filepath
=
os
.
path
.
join
(
target_dir
,
url
.
split
(
"/"
)[
-
1
])
if
not
(
os
.
path
.
exists
(
filepath
)
and
md5file
(
filepath
)
==
md5sum
):
print
(
"Downloading %s ..."
%
url
)
os
.
system
(
"wget -c "
+
url
+
" -P "
+
target_dir
)
print
(
"
\n
MD5 Chesksum %s ..."
%
filepath
)
if
not
md5file
(
filepath
)
==
md5sum
:
raise
RuntimeError
(
"MD5 checksum failed."
)
else
:
print
(
"File exists, skip downloading. (%s)"
%
filepath
)
return
filepath
def
unpack
(
filepath
,
target_dir
):
"""
Unpack the file to the target_dir.
"""
print
(
"Unpacking %s ..."
%
filepath
)
tar
=
tarfile
.
open
(
filepath
)
tar
.
extractall
(
target_dir
)
tar
.
close
()
def
create_manifest
(
data_dir
,
manifest_path
):
"""
Create a manifest json file summarizing the data set, with each line
containing the meta data (i.e. audio filepath, transcription text, audio
duration) of each audio file within the data set.
"""
print
(
"Creating manifest %s ..."
%
manifest_path
)
json_lines
=
[]
for
subfolder
,
_
,
filelist
in
sorted
(
os
.
walk
(
data_dir
)):
text_filelist
=
[
filename
for
filename
in
filelist
if
filename
.
endswith
(
'trans.txt'
)
]
if
len
(
text_filelist
)
>
0
:
text_filepath
=
os
.
path
.
join
(
data_dir
,
subfolder
,
text_filelist
[
0
])
for
line
in
open
(
text_filepath
):
segments
=
line
.
strip
().
split
()
text
=
' '
.
join
(
segments
[
1
:]).
lower
()
audio_filepath
=
os
.
path
.
join
(
data_dir
,
subfolder
,
segments
[
0
]
+
'.flac'
)
audio_data
,
samplerate
=
soundfile
.
read
(
audio_filepath
)
duration
=
float
(
len
(
audio_data
))
/
samplerate
json_lines
.
append
(
json
.
dumps
({
'audio_filepath'
:
audio_filepath
,
'duration'
:
duration
,
'text'
:
text
}))
with
codecs
.
open
(
manifest_path
,
'w'
,
'utf-8'
)
as
out_file
:
for
line
in
json_lines
:
out_file
.
write
(
line
+
'
\n
'
)
def
prepare_dataset
(
url
,
md5sum
,
target_dir
,
manifest_path
):
"""
Download, unpack and create summmary manifest file.
"""
if
not
os
.
path
.
exists
(
os
.
path
.
join
(
target_dir
,
"LibriSpeech"
)):
# download
filepath
=
download
(
url
,
md5sum
,
target_dir
)
# unpack
unpack
(
filepath
,
target_dir
)
else
:
print
(
"Skip downloading and unpacking. Data already exists in %s."
%
target_dir
)
# create manifest json file
create_manifest
(
target_dir
,
manifest_path
)
def
main
():
prepare_dataset
(
url
=
URL_DEV_CLEAN
,
md5sum
=
MD5_DEV_CLEAN
,
target_dir
=
os
.
path
.
join
(
args
.
target_dir
,
"dev-clean"
),
manifest_path
=
args
.
manifest_prefix
+
".dev-clean"
)
if
__name__
==
'__main__'
:
main
()
examples/librispeech/prepare_data.sh
浏览文件 @
ae7ef792
...
@@ -16,7 +16,7 @@ fi
...
@@ -16,7 +16,7 @@ fi
cat
data/librispeech/manifest.train
*
|
shuf
>
data/librispeech/manifest.train
cat
data/librispeech/manifest.train
*
|
shuf
>
data/librispeech/manifest.train
# build vocabulary (
for English data, we can just skip this
)
# build vocabulary (
can be skipped for English, as already provided
)
# python tools/build_vocab.py \
# python tools/build_vocab.py \
# --count_threshold=0 \
# --count_threshold=0 \
# --vocab_path='data/librispeech/eng_vocab.txt' \
# --vocab_path='data/librispeech/eng_vocab.txt' \
...
...
examples/
librispeech_tiny/prepare
_data.sh
→
examples/
tiny/run
_data.sh
浏览文件 @
ae7ef792
...
@@ -3,32 +3,38 @@
...
@@ -3,32 +3,38 @@
pushd
../..
pushd
../..
# download data, generate manifests
# download data, generate manifests
python data/librispeech/librispeech.py
\
python data/tiny/tiny.py
\
--manifest_prefix
=
'data/librispeech/manifest'
\
--manifest_prefix
=
'data/tiny/manifest'
\
--full_download
=
'True'
\
--target_dir
=
$HOME
'/.cache/paddle/dataset/speech/tiny'
--target_dir
=
'~/.cache/paddle/dataset/speech/Libri'
if
[
$?
-ne
0
]
;
then
if
[
$?
-ne
0
]
;
then
echo
"Prepare LibriSpeech failed. Terminated."
echo
"Prepare LibriSpeech failed. Terminated."
exit
1
exit
1
fi
fi
cat
data/librispeech/manifest.train
*
|
shuf
>
data/librispeech/manifest.train
cat
data/tiny/manifest.dev-clean |
head
-n
32
>
data/tiny/manifest.train
cat
data/tiny/manifest.dev-clean |
head
-n
48 |
tail
-n
16
>
data/tiny/manifest.dev
cat
data/tiny/manifest.dev-clean |
head
-n
64 |
tail
-n
16
>
data/tiny/manifest.test
# build vocabulary (for English data, we can just skip this)
# build vocabulary
# python tools/build_vocab.py \
python tools/build_vocab.py
\
# --count_threshold=0 \
--count_threshold
=
0
\
# --vocab_path='data/librispeech/eng_vocab.txt' \
--vocab_path
=
'data/tiny/vocab.txt'
\
# --manifest_paths='data/librispeech/manifeset.train'
--manifest_paths
=
'data/tiny/manifest.train'
if
[
$?
-ne
0
]
;
then
echo
"Build vocabulary failed. Terminated."
exit
1
fi
# compute mean and stddev for normalizer
# compute mean and stddev for normalizer
python tools/compute_mean_std.py
\
python tools/compute_mean_std.py
\
--manifest_path
=
'data/
librispeech
/manifest.train'
\
--manifest_path
=
'data/
tiny
/manifest.train'
\
--num_samples
=
2000
\
--num_samples
=
32
\
--specgram_type
=
'linear'
\
--specgram_type
=
'linear'
\
--output_path
=
'data/
librispeech
/mean_std.npz'
--output_path
=
'data/
tiny
/mean_std.npz'
if
[
$?
-ne
0
]
;
then
if
[
$?
-ne
0
]
;
then
echo
"Compute mean and stddev failed. Terminated."
echo
"Compute mean and stddev failed. Terminated."
...
@@ -36,4 +42,4 @@ if [ $? -ne 0 ]; then
...
@@ -36,4 +42,4 @@ if [ $? -ne 0 ]; then
fi
fi
echo
"
LibriSpeech D
ata preparation done."
echo
"
Tiny d
ata preparation done."
examples/
librispeech_
tiny/run_infer.sh
→
examples/tiny/run_infer.sh
浏览文件 @
ae7ef792
...
@@ -4,7 +4,7 @@ pushd ../..
...
@@ -4,7 +4,7 @@ pushd ../..
CUDA_VISIBLE_DEVICES
=
0
\
CUDA_VISIBLE_DEVICES
=
0
\
python
-u
infer.py
\
python
-u
infer.py
\
--num_samples
=
10
\
--num_samples
=
4
\
--trainer_count
=
1
\
--trainer_count
=
1
\
--beam_size
=
500
\
--beam_size
=
500
\
--num_proc_bsearch
=
12
\
--num_proc_bsearch
=
12
\
...
@@ -17,11 +17,11 @@ python -u infer.py \
...
@@ -17,11 +17,11 @@ python -u infer.py \
--use_gru
=
False
\
--use_gru
=
False
\
--use_gpu
=
True
\
--use_gpu
=
True
\
--share_rnn_weights
=
True
\
--share_rnn_weights
=
True
\
--infer_manifest
=
'data/
librispeech/manifest.dev-clea
n'
\
--infer_manifest
=
'data/
tiny/manifest.trai
n'
\
--mean_std_path
=
'data/
librispeech
/mean_std.npz'
\
--mean_std_path
=
'data/
tiny
/mean_std.npz'
\
--vocab_path
=
'data/
librispeech/eng_
vocab.txt'
\
--vocab_path
=
'data/
tiny/
vocab.txt'
\
--model_path
=
'checkpoints/params.
latest
.tar.gz'
\
--model_path
=
'checkpoints/params.
pass-14
.tar.gz'
\
--lang_model_path
=
'
lm/data
/common_crawl_00.prune01111.trie.klm'
\
--lang_model_path
=
'
models/lm
/common_crawl_00.prune01111.trie.klm'
\
--decoding_method
=
'ctc_beam_search'
\
--decoding_method
=
'ctc_beam_search'
\
--error_rate_type
=
'wer'
\
--error_rate_type
=
'wer'
\
--specgram_type
=
'linear'
--specgram_type
=
'linear'
examples/
librispeech_
tiny/run_test.sh
→
examples/tiny/run_test.sh
浏览文件 @
ae7ef792
文件已移动
examples/
librispeech_
tiny/run_train.sh
→
examples/tiny/run_train.sh
浏览文件 @
ae7ef792
...
@@ -2,17 +2,17 @@
...
@@ -2,17 +2,17 @@
pushd
../..
pushd
../..
CUDA_VISIBLE_DEVICES
=
0,1
,2,3,4,5,6,7
\
CUDA_VISIBLE_DEVICES
=
0,1
\
python
-u
train.py
\
python
-u
train.py
\
--batch_size
=
2
56
\
--batch_size
=
2
\
--trainer_count
=
8
\
--trainer_count
=
1
\
--num_passes
=
5
0
\
--num_passes
=
1
0
\
--num_proc_data
=
1
2
\
--num_proc_data
=
1
\
--num_conv_layers
=
2
\
--num_conv_layers
=
2
\
--num_rnn_layers
=
3
\
--num_rnn_layers
=
3
\
--rnn_layer_size
=
2048
\
--rnn_layer_size
=
2048
\
--num_iter_print
=
100
\
--num_iter_print
=
100
\
--learning_rate
=
5e-
4
\
--learning_rate
=
5e-
5
\
--max_duration
=
27.0
\
--max_duration
=
27.0
\
--min_duration
=
0.0
\
--min_duration
=
0.0
\
--use_sortagrad
=
True
\
--use_sortagrad
=
True
\
...
@@ -20,10 +20,10 @@ python -u train.py \
...
@@ -20,10 +20,10 @@ python -u train.py \
--use_gpu
=
True
\
--use_gpu
=
True
\
--is_local
=
True
\
--is_local
=
True
\
--share_rnn_weights
=
True
\
--share_rnn_weights
=
True
\
--train_manifest
=
'data/
librispeech
/manifest.train'
\
--train_manifest
=
'data/
tiny
/manifest.train'
\
--dev_manifest
=
'data/
librispeech/manifest.dev
'
\
--dev_manifest
=
'data/
tiny/manifest.train
'
\
--mean_std_path
=
'data/
librispeech
/mean_std.npz'
\
--mean_std_path
=
'data/
tiny
/mean_std.npz'
\
--vocab_path
=
'data/
librispeech/eng_
vocab.txt'
\
--vocab_path
=
'data/
tiny/
vocab.txt'
\
--output_model_dir
=
'./checkpoints'
\
--output_model_dir
=
'./checkpoints'
\
--augment_conf_path
=
'conf/augmentation.config'
\
--augment_conf_path
=
'conf/augmentation.config'
\
--specgram_type
=
'linear'
\
--specgram_type
=
'linear'
\
...
...
examples/
librispeech_
tiny/run_tune.sh
→
examples/tiny/run_tune.sh
浏览文件 @
ae7ef792
文件已移动
infer.py
浏览文件 @
ae7ef792
...
@@ -7,7 +7,7 @@ import argparse
...
@@ -7,7 +7,7 @@ import argparse
import
functools
import
functools
import
paddle.v2
as
paddle
import
paddle.v2
as
paddle
from
data_utils.data
import
DataGenerator
from
data_utils.data
import
DataGenerator
from
models.model
import
DeepSpeech2Model
from
model
_util
s.model
import
DeepSpeech2Model
from
utils.error_rate
import
wer
,
cer
from
utils.error_rate
import
wer
,
cer
from
utils.utility
import
add_arguments
,
print_arguments
from
utils.utility
import
add_arguments
,
print_arguments
...
@@ -35,10 +35,10 @@ add_arg('mean_std_path', str,
...
@@ -35,10 +35,10 @@ add_arg('mean_std_path', str,
'data/librispeech/mean_std.npz'
,
'data/librispeech/mean_std.npz'
,
"Filepath of normalizer's mean & std."
)
"Filepath of normalizer's mean & std."
)
add_arg
(
'vocab_path'
,
str
,
add_arg
(
'vocab_path'
,
str
,
'data/librispeech/
eng_
vocab.txt'
,
'data/librispeech/vocab.txt'
,
"Filepath of vocabulary."
)
"Filepath of vocabulary."
)
add_arg
(
'lang_model_path'
,
str
,
add_arg
(
'lang_model_path'
,
str
,
'
lm/data
/common_crawl_00.prune01111.trie.klm'
,
'
model_zoo/lm
/common_crawl_00.prune01111.trie.klm'
,
"Filepath for language model."
)
"Filepath for language model."
)
add_arg
(
'model_path'
,
str
,
add_arg
(
'model_path'
,
str
,
'./checkpoints/params.latest.tar.gz'
,
'./checkpoints/params.latest.tar.gz'
,
...
...
lm
/__init__.py
→
model_utils
/__init__.py
浏览文件 @
ae7ef792
文件已移动
models/decoder.py
→
model
_util
s/decoder.py
浏览文件 @
ae7ef792
...
@@ -180,6 +180,8 @@ def ctc_beam_search_decoder(probs_seq,
...
@@ -180,6 +180,8 @@ def ctc_beam_search_decoder(probs_seq,
prob
=
prob
*
ext_scoring_func
(
result
)
prob
=
prob
*
ext_scoring_func
(
result
)
log_prob
=
log
(
prob
)
log_prob
=
log
(
prob
)
beam_result
.
append
((
log_prob
,
result
))
beam_result
.
append
((
log_prob
,
result
))
else
:
beam_result
.
append
((
float
(
'-inf'
),
''
))
## output top beam_size decoding results
## output top beam_size decoding results
beam_result
=
sorted
(
beam_result
,
key
=
lambda
asd
:
asd
[
0
],
reverse
=
True
)
beam_result
=
sorted
(
beam_result
,
key
=
lambda
asd
:
asd
[
0
],
reverse
=
True
)
...
...
lm
/lm_scorer.py
→
model_utils
/lm_scorer.py
浏览文件 @
ae7ef792
文件已移动
models/model.py
→
model
_util
s/model.py
浏览文件 @
ae7ef792
...
@@ -8,9 +8,10 @@ import os
...
@@ -8,9 +8,10 @@ import os
import
time
import
time
import
gzip
import
gzip
import
paddle.v2
as
paddle
import
paddle.v2
as
paddle
from
lm.lm_scorer
import
LmScorer
from
model_utils.lm_scorer
import
LmScorer
from
models.decoder
import
ctc_greedy_decoder
,
ctc_beam_search_decoder
from
model_utils.decoder
import
ctc_greedy_decoder
,
ctc_beam_search_decoder
from
models.network
import
deep_speech_v2_network
from
model_utils.decoder
import
ctc_beam_search_decoder_batch
from
model_utils.network
import
deep_speech_v2_network
class
DeepSpeech2Model
(
object
):
class
DeepSpeech2Model
(
object
):
...
...
models/network.py
→
model
_util
s/network.py
浏览文件 @
ae7ef792
文件已移动
models/tests/test_decoders.py
→
model
_util
s/tests/test_decoders.py
浏览文件 @
ae7ef792
...
@@ -4,7 +4,7 @@ from __future__ import division
...
@@ -4,7 +4,7 @@ from __future__ import division
from
__future__
import
print_function
from
__future__
import
print_function
import
unittest
import
unittest
from
models
import
decoder
from
model
_util
s
import
decoder
class
TestDecoders
(
unittest
.
TestCase
):
class
TestDecoders
(
unittest
.
TestCase
):
...
...
models/__init__.py
已删除
100644 → 0
浏览文件 @
a00a436b
lm/ru
n.sh
→
models/lm/download_e
n.sh
浏览文件 @
ae7ef792
...
@@ -14,6 +14,3 @@ if [ $MD5 != $md5_tmp ]; then
...
@@ -14,6 +14,3 @@ if [ $MD5 != $md5_tmp ]; then
echo
"Fail to download the language model!"
echo
"Fail to download the language model!"
exit
1
exit
1
fi
fi
test.py
浏览文件 @
ae7ef792
...
@@ -7,7 +7,7 @@ import argparse
...
@@ -7,7 +7,7 @@ import argparse
import
functools
import
functools
import
paddle.v2
as
paddle
import
paddle.v2
as
paddle
from
data_utils.data
import
DataGenerator
from
data_utils.data
import
DataGenerator
from
models.model
import
DeepSpeech2Model
from
model
_util
s.model
import
DeepSpeech2Model
from
utils.error_rate
import
wer
,
cer
from
utils.error_rate
import
wer
,
cer
from
utils.utility
import
add_arguments
,
print_arguments
from
utils.utility
import
add_arguments
,
print_arguments
...
@@ -36,14 +36,14 @@ add_arg('mean_std_path', str,
...
@@ -36,14 +36,14 @@ add_arg('mean_std_path', str,
'data/librispeech/mean_std.npz'
,
'data/librispeech/mean_std.npz'
,
"Filepath of normalizer's mean & std."
)
"Filepath of normalizer's mean & std."
)
add_arg
(
'vocab_path'
,
str
,
add_arg
(
'vocab_path'
,
str
,
'data/librispeech/
eng_
vocab.txt'
,
'data/librispeech/vocab.txt'
,
"Filepath of vocabulary."
)
"Filepath of vocabulary."
)
add_arg
(
'model_path'
,
str
,
add_arg
(
'model_path'
,
str
,
'./checkpoints/params.latest.tar.gz'
,
'./checkpoints/params.latest.tar.gz'
,
"If None, the training starts from scratch, "
"If None, the training starts from scratch, "
"otherwise, it resumes from the pre-trained model."
)
"otherwise, it resumes from the pre-trained model."
)
add_arg
(
'lang_model_path'
,
str
,
add_arg
(
'lang_model_path'
,
str
,
'
lm/data
/common_crawl_00.prune01111.trie.klm'
,
'
model_zoo/lm
/common_crawl_00.prune01111.trie.klm'
,
"Filepath for language model."
)
"Filepath for language model."
)
add_arg
(
'decoding_method'
,
str
,
add_arg
(
'decoding_method'
,
str
,
'ctc_beam_search'
,
'ctc_beam_search'
,
...
...
tools/build_vocab.py
浏览文件 @
ae7ef792
...
@@ -21,10 +21,8 @@ add_arg = functools.partial(add_arguments, argparser=parser)
...
@@ -21,10 +21,8 @@ add_arg = functools.partial(add_arguments, argparser=parser)
# yapf: disable
# yapf: disable
add_arg
(
'count_threshold'
,
int
,
0
,
"Truncation threshold for char counts."
)
add_arg
(
'count_threshold'
,
int
,
0
,
"Truncation threshold for char counts."
)
add_arg
(
'vocab_path'
,
str
,
add_arg
(
'vocab_path'
,
str
,
None
,
'data/librispeech/vocab.txt'
,
"Filepath to write the vocabulary."
,
"Filepath to write the vocabulary."
)
nargs
=
'+'
,
required
=
True
)
add_arg
(
'manifest_paths'
,
str
,
add_arg
(
'manifest_paths'
,
str
,
None
,
None
,
"Filepaths of manifests for building vocabulary. "
"Filepaths of manifests for building vocabulary. "
...
...
tools/tune.py
浏览文件 @
ae7ef792
...
@@ -9,7 +9,7 @@ import functools
...
@@ -9,7 +9,7 @@ import functools
import
paddle.v2
as
paddle
import
paddle.v2
as
paddle
import
_init_paths
import
_init_paths
from
data_utils.data
import
DataGenerator
from
data_utils.data
import
DataGenerator
from
models.model
import
DeepSpeech2Model
from
model
_util
s.model
import
DeepSpeech2Model
from
utils.error_rate
import
wer
from
utils.error_rate
import
wer
from
utils.utility
import
add_arguments
,
print_arguments
from
utils.utility
import
add_arguments
,
print_arguments
...
@@ -41,10 +41,10 @@ add_arg('mean_std_path', str,
...
@@ -41,10 +41,10 @@ add_arg('mean_std_path', str,
'data/librispeech/mean_std.npz'
,
'data/librispeech/mean_std.npz'
,
"Filepath of normalizer's mean & std."
)
"Filepath of normalizer's mean & std."
)
add_arg
(
'vocab_path'
,
str
,
add_arg
(
'vocab_path'
,
str
,
'data/librispeech/
eng_
vocab.txt'
,
'data/librispeech/vocab.txt'
,
"Filepath of vocabulary."
)
"Filepath of vocabulary."
)
add_arg
(
'lang_model_path'
,
str
,
add_arg
(
'lang_model_path'
,
str
,
'
lm/data
/common_crawl_00.prune01111.trie.klm'
,
'
model_zoo/lm
/common_crawl_00.prune01111.trie.klm'
,
"Filepath for language model."
)
"Filepath for language model."
)
add_arg
(
'model_path'
,
str
,
add_arg
(
'model_path'
,
str
,
'./checkpoints/params.latest.tar.gz'
,
'./checkpoints/params.latest.tar.gz'
,
...
...
train.py
浏览文件 @
ae7ef792
...
@@ -6,7 +6,7 @@ from __future__ import print_function
...
@@ -6,7 +6,7 @@ from __future__ import print_function
import
argparse
import
argparse
import
functools
import
functools
import
paddle.v2
as
paddle
import
paddle.v2
as
paddle
from
models.model
import
DeepSpeech2Model
from
model
_util
s.model
import
DeepSpeech2Model
from
data_utils.data
import
DataGenerator
from
data_utils.data
import
DataGenerator
from
utils.utility
import
add_arguments
,
print_arguments
from
utils.utility
import
add_arguments
,
print_arguments
...
@@ -41,7 +41,7 @@ add_arg('mean_std_path', str,
...
@@ -41,7 +41,7 @@ add_arg('mean_std_path', str,
'data/librispeech/mean_std.npz'
,
'data/librispeech/mean_std.npz'
,
"Filepath of normalizer's mean & std."
)
"Filepath of normalizer's mean & std."
)
add_arg
(
'vocab_path'
,
str
,
add_arg
(
'vocab_path'
,
str
,
'data/librispeech/
eng_
vocab.txt'
,
'data/librispeech/vocab.txt'
,
"Filepath of vocabulary."
)
"Filepath of vocabulary."
)
add_arg
(
'init_model_path'
,
str
,
add_arg
(
'init_model_path'
,
str
,
None
,
None
,
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录