Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
PaddlePaddle
DeepSpeech
提交
7c7e17e2
D
DeepSpeech
项目概览
PaddlePaddle
/
DeepSpeech
大约 2 年 前同步成功
通知
210
Star
8425
Fork
1598
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
245
列表
看板
标记
里程碑
合并请求
3
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
D
DeepSpeech
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
245
Issue
245
列表
看板
标记
里程碑
合并请求
3
合并请求
3
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
提交
7c7e17e2
编写于
6月 29, 2017
作者:
Y
Yibing Liu
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
add deploy.py
上级
2f15a787
变更
1
隐藏空白更改
内联
并排
Showing
1 changed file
with
194 addition
and
0 deletion
+194
-0
deploy.py
deploy.py
+194
-0
未找到文件。
deploy.py
0 → 100644
浏览文件 @
7c7e17e2
"""Deployment for DeepSpeech2 model."""
from
__future__
import
absolute_import
from
__future__
import
division
from
__future__
import
print_function
import
argparse
import
gzip
import
distutils.util
import
multiprocessing
import
paddle.v2
as
paddle
from
data_utils.data
import
DataGenerator
from
model
import
deep_speech2
from
swig_ctc_beam_search_decoder
import
*
from
swig_scorer
import
Scorer
from
error_rate
import
wer
import
utils
parser
=
argparse
.
ArgumentParser
(
description
=
__doc__
)
parser
.
add_argument
(
"--num_samples"
,
default
=
100
,
type
=
int
,
help
=
"Number of samples for inference. (default: %(default)s)"
)
parser
.
add_argument
(
"--num_conv_layers"
,
default
=
2
,
type
=
int
,
help
=
"Convolution layer number. (default: %(default)s)"
)
parser
.
add_argument
(
"--num_rnn_layers"
,
default
=
3
,
type
=
int
,
help
=
"RNN layer number. (default: %(default)s)"
)
parser
.
add_argument
(
"--rnn_layer_size"
,
default
=
512
,
type
=
int
,
help
=
"RNN layer cell number. (default: %(default)s)"
)
parser
.
add_argument
(
"--use_gpu"
,
default
=
True
,
type
=
distutils
.
util
.
strtobool
,
help
=
"Use gpu or not. (default: %(default)s)"
)
parser
.
add_argument
(
"--num_threads_data"
,
default
=
multiprocessing
.
cpu_count
(),
type
=
int
,
help
=
"Number of cpu threads for preprocessing data. (default: %(default)s)"
)
parser
.
add_argument
(
"--mean_std_filepath"
,
default
=
'mean_std.npz'
,
type
=
str
,
help
=
"Manifest path for normalizer. (default: %(default)s)"
)
parser
.
add_argument
(
"--decode_manifest_path"
,
default
=
'datasets/manifest.test'
,
type
=
str
,
help
=
"Manifest path for decoding. (default: %(default)s)"
)
parser
.
add_argument
(
"--model_filepath"
,
default
=
'ds2_new_models_0628/params.pass-51.tar.gz'
,
type
=
str
,
help
=
"Model filepath. (default: %(default)s)"
)
parser
.
add_argument
(
"--vocab_filepath"
,
default
=
'datasets/vocab/eng_vocab.txt'
,
type
=
str
,
help
=
"Vocabulary filepath. (default: %(default)s)"
)
parser
.
add_argument
(
"--decode_method"
,
default
=
'beam_search'
,
type
=
str
,
help
=
"Method for ctc decoding: best_path or beam_search. (default: %(default)s)"
)
parser
.
add_argument
(
"--beam_size"
,
default
=
500
,
type
=
int
,
help
=
"Width for beam search decoding. (default: %(default)d)"
)
parser
.
add_argument
(
"--num_results_per_sample"
,
default
=
1
,
type
=
int
,
help
=
"Number of output per sample in beam search. (default: %(default)d)"
)
parser
.
add_argument
(
"--language_model_path"
,
default
=
"lm/data/en.00.UNKNOWN.klm"
,
type
=
str
,
help
=
"Path for language model. (default: %(default)s)"
)
parser
.
add_argument
(
"--alpha"
,
default
=
0.26
,
type
=
float
,
help
=
"Parameter associated with language model. (default: %(default)f)"
)
parser
.
add_argument
(
"--beta"
,
default
=
0.1
,
type
=
float
,
help
=
"Parameter associated with word count. (default: %(default)f)"
)
parser
.
add_argument
(
"--cutoff_prob"
,
default
=
0.99
,
type
=
float
,
help
=
"The cutoff probability of pruning"
"in beam search. (default: %(default)f)"
)
args
=
parser
.
parse_args
()
def
infer
():
"""Deployment for DeepSpeech2."""
# initialize data generator
data_generator
=
DataGenerator
(
vocab_filepath
=
args
.
vocab_filepath
,
mean_std_filepath
=
args
.
mean_std_filepath
,
augmentation_config
=
'{}'
,
num_threads
=
args
.
num_threads_data
)
# create network config
# paddle.data_type.dense_array is used for variable batch input.
# The size 161 * 161 is only an placeholder value and the real shape
# of input batch data will be induced during training.
audio_data
=
paddle
.
layer
.
data
(
name
=
"audio_spectrogram"
,
type
=
paddle
.
data_type
.
dense_array
(
161
*
161
))
text_data
=
paddle
.
layer
.
data
(
name
=
"transcript_text"
,
type
=
paddle
.
data_type
.
integer_value_sequence
(
data_generator
.
vocab_size
))
output_probs
=
deep_speech2
(
audio_data
=
audio_data
,
text_data
=
text_data
,
dict_size
=
data_generator
.
vocab_size
,
num_conv_layers
=
args
.
num_conv_layers
,
num_rnn_layers
=
args
.
num_rnn_layers
,
rnn_size
=
args
.
rnn_layer_size
,
is_inference
=
True
)
# load parameters
parameters
=
paddle
.
parameters
.
Parameters
.
from_tar
(
gzip
.
open
(
args
.
model_filepath
))
# prepare infer data
batch_reader
=
data_generator
.
batch_reader_creator
(
manifest_path
=
args
.
decode_manifest_path
,
batch_size
=
args
.
num_samples
,
min_batch_size
=
1
,
sortagrad
=
False
,
shuffle_method
=
None
)
infer_data
=
batch_reader
().
next
()
# run inference
infer_results
=
paddle
.
infer
(
output_layer
=
output_probs
,
parameters
=
parameters
,
input
=
infer_data
)
num_steps
=
len
(
infer_results
)
//
len
(
infer_data
)
probs_split
=
[
infer_results
[
i
*
num_steps
:(
i
+
1
)
*
num_steps
]
for
i
in
xrange
(
len
(
infer_data
))
]
# targe transcription
target_transcription
=
[
''
.
join
(
[
data_generator
.
vocab_list
[
index
]
for
index
in
infer_data
[
i
][
1
]])
for
i
,
probs
in
enumerate
(
probs_split
)
]
ext_scorer
=
Scorer
(
args
.
alpha
,
args
.
beta
,
args
.
language_model_path
)
## decode and print
wer_sum
,
wer_counter
=
0
,
0
for
i
,
probs
in
enumerate
(
probs_split
):
beam_result
=
ctc_beam_search_decoder
(
probs
.
tolist
(),
args
.
beam_size
,
data_generator
.
vocab_list
,
len
(
data_generator
.
vocab_list
),
args
.
cutoff_prob
,
ext_scorer
,
)
print
(
"
\n
Target Transcription:
\t
%s"
%
target_transcription
[
i
])
print
(
"Beam %d: %f
\t
%s"
%
(
0
,
beam_result
[
0
][
0
],
beam_result
[
0
][
1
]))
wer_cur
=
wer
(
target_transcription
[
i
],
beam_result
[
0
][
1
])
wer_sum
+=
wer_cur
wer_counter
+=
1
print
(
"cur wer = %f , average wer = %f"
%
(
wer_cur
,
wer_sum
/
wer_counter
))
def
main
():
utils
.
print_arguments
(
args
)
paddle
.
init
(
use_gpu
=
args
.
use_gpu
,
trainer_count
=
1
)
infer
()
if
__name__
==
'__main__'
:
main
()
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录