Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
PaddlePaddle
DeepSpeech
提交
764ce624
D
DeepSpeech
项目概览
PaddlePaddle
/
DeepSpeech
大约 2 年 前同步成功
通知
210
Star
8425
Fork
1598
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
245
列表
看板
标记
里程碑
合并请求
3
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
D
DeepSpeech
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
245
Issue
245
列表
看板
标记
里程碑
合并请求
3
合并请求
3
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
提交
764ce624
编写于
9月 25, 2017
作者:
Y
Yibing Liu
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
clean code in tuning script
上级
e6e7b132
变更
1
隐藏空白更改
内联
并排
Showing
1 changed file
with
23 addition
and
25 deletion
+23
-25
tools/tune.py
tools/tune.py
+23
-25
未找到文件。
tools/tune.py
浏览文件 @
764ce624
...
...
@@ -17,27 +17,27 @@ from utils.utility import add_arguments, print_arguments
parser
=
argparse
.
ArgumentParser
(
description
=
__doc__
)
add_arg
=
functools
.
partial
(
add_arguments
,
argparser
=
parser
)
# yapf: disable
add_arg
(
'num_batches'
,
int
,
-
1
,
"# of batches tuning on. "
"Default -1, on whole dev set."
)
add_arg
(
'batch_size'
,
int
,
256
,
"# of samples per batch."
)
add_arg
(
'trainer_count'
,
int
,
8
,
"# of Trainers (CPUs or GPUs)."
)
add_arg
(
'beam_size'
,
int
,
500
,
"Beam search width."
)
add_arg
(
'num_proc_bsearch'
,
int
,
12
,
"# of CPUs for beam search."
)
add_arg
(
'num_conv_layers'
,
int
,
2
,
"# of convolution layers."
)
add_arg
(
'num_rnn_layers'
,
int
,
3
,
"# of recurrent layers."
)
add_arg
(
'rnn_layer_size'
,
int
,
2048
,
"# of recurrent cells per layer."
)
add_arg
(
'num_alphas'
,
int
,
45
,
"# of alpha candidates for tuning."
)
add_arg
(
'num_betas'
,
int
,
8
,
"# of beta candidates for tuning."
)
add_arg
(
'alpha_from'
,
float
,
1.0
,
"Where alpha starts tuning from."
)
add_arg
(
'alpha_to'
,
float
,
3.2
,
"Where alpha ends tuning with."
)
add_arg
(
'beta_from'
,
float
,
0.1
,
"Where beta starts tuning from."
)
add_arg
(
'beta_to'
,
float
,
0.45
,
"Where beta ends tuning with."
)
add_arg
(
'cutoff_prob'
,
float
,
1.0
,
"Cutoff probability for pruning."
)
add_arg
(
'cutoff_top_n'
,
int
,
40
,
"Cutoff number for pruning."
)
add_arg
(
'use_gru'
,
bool
,
False
,
"Use GRUs instead of simple RNNs."
)
add_arg
(
'use_gpu'
,
bool
,
True
,
"Use GPU or not."
)
add_arg
(
'share_rnn_weights'
,
bool
,
True
,
"Share input-hidden weights across "
"bi-directional RNNs. Not for GRU."
)
add_arg
(
'num_batches'
,
int
,
-
1
,
"# of batches tuning on. "
"Default -1, on whole dev set."
)
add_arg
(
'batch_size'
,
int
,
256
,
"# of samples per batch."
)
add_arg
(
'trainer_count'
,
int
,
8
,
"# of Trainers (CPUs or GPUs)."
)
add_arg
(
'beam_size'
,
int
,
500
,
"Beam search width."
)
add_arg
(
'num_proc_bsearch'
,
int
,
12
,
"# of CPUs for beam search."
)
add_arg
(
'num_conv_layers'
,
int
,
2
,
"# of convolution layers."
)
add_arg
(
'num_rnn_layers'
,
int
,
3
,
"# of recurrent layers."
)
add_arg
(
'rnn_layer_size'
,
int
,
2048
,
"# of recurrent cells per layer."
)
add_arg
(
'num_alphas'
,
int
,
45
,
"# of alpha candidates for tuning."
)
add_arg
(
'num_betas'
,
int
,
8
,
"# of beta candidates for tuning."
)
add_arg
(
'alpha_from'
,
float
,
1.0
,
"Where alpha starts tuning from."
)
add_arg
(
'alpha_to'
,
float
,
3.2
,
"Where alpha ends tuning with."
)
add_arg
(
'beta_from'
,
float
,
0.1
,
"Where beta starts tuning from."
)
add_arg
(
'beta_to'
,
float
,
0.45
,
"Where beta ends tuning with."
)
add_arg
(
'cutoff_prob'
,
float
,
1.0
,
"Cutoff probability for pruning."
)
add_arg
(
'cutoff_top_n'
,
int
,
40
,
"Cutoff number for pruning."
)
add_arg
(
'use_gru'
,
bool
,
False
,
"Use GRUs instead of simple RNNs."
)
add_arg
(
'use_gpu'
,
bool
,
True
,
"Use GPU or not."
)
add_arg
(
'share_rnn_weights'
,
bool
,
True
,
"Share input-hidden weights across "
"bi-directional RNNs. Not for GRU."
)
add_arg
(
'tune_manifest'
,
str
,
'data/librispeech/manifest.dev-clean'
,
"Filepath of manifest to tune."
)
...
...
@@ -140,13 +140,11 @@ def tune():
for
target
,
result
in
zip
(
target_transcripts
,
result_transcripts
):
err_sum
[
index
]
+=
error_rate_func
(
target
,
result
)
err_ave
[
index
]
=
err_sum
[
index
]
/
num_ins
# print("alpha = %f, beta = %f, WER = %f" %
# (alpha, beta, err_ave[index]))
if
index
%
2
==
0
:
sys
.
stdout
.
write
(
'.'
)
sys
.
stdout
.
flush
()
# output on-line tuning result at the
the
end of current batch
# output on-line tuning result at the end of current batch
err_ave_min
=
min
(
err_ave
)
min_index
=
err_ave
.
index
(
err_ave_min
)
print
(
"
\n
Batch %d [%d/?], current opt (alpha, beta) = (%s, %s), "
...
...
@@ -156,7 +154,7 @@ def tune():
args
.
error_rate_type
,
err_ave_min
))
cur_batch
+=
1
# output WER/CER at every
point
# output WER/CER at every
(alpha, beta)
print
(
"
\n
Final %s:
\n
"
%
args
.
error_rate_type
)
for
index
in
xrange
(
len
(
params_grid
)):
print
(
"(alpha, beta) = (%s, %s), [%s] = %f"
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录