Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
PaddlePaddle
DeepSpeech
提交
913b2300
D
DeepSpeech
项目概览
PaddlePaddle
/
DeepSpeech
1 年多 前同步成功
通知
208
Star
8425
Fork
1598
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
245
列表
看板
标记
里程碑
合并请求
3
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
D
DeepSpeech
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
245
Issue
245
列表
看板
标记
里程碑
合并请求
3
合并请求
3
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
提交
913b2300
编写于
9月 22, 2021
作者:
H
Hui Zhang
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
nprocs 0 for cpu, other for gpu
上级
80eb6b7f
变更
50
隐藏空白更改
内联
并排
Showing
50 changed file
with
43 addition
and
269 deletion
+43
-269
deepspeech/exps/deepspeech2/bin/train.py
deepspeech/exps/deepspeech2/bin/train.py
+1
-1
deepspeech/exps/deepspeech2/model.py
deepspeech/exps/deepspeech2/model.py
+2
-2
deepspeech/exps/u2/bin/train.py
deepspeech/exps/u2/bin/train.py
+1
-1
deepspeech/exps/u2/model.py
deepspeech/exps/u2/model.py
+1
-1
deepspeech/exps/u2_kaldi/bin/train.py
deepspeech/exps/u2_kaldi/bin/train.py
+1
-1
deepspeech/exps/u2_kaldi/model.py
deepspeech/exps/u2_kaldi/model.py
+1
-1
deepspeech/exps/u2_st/bin/train.py
deepspeech/exps/u2_st/bin/train.py
+1
-1
deepspeech/exps/u2_st/model.py
deepspeech/exps/u2_st/model.py
+1
-1
deepspeech/training/cli.py
deepspeech/training/cli.py
+1
-7
deepspeech/training/trainer.py
deepspeech/training/trainer.py
+3
-3
examples/aishell/s0/local/export.sh
examples/aishell/s0/local/export.sh
+0
-6
examples/aishell/s0/local/test.sh
examples/aishell/s0/local/test.sh
+1
-6
examples/aishell/s0/local/test_export.sh
examples/aishell/s0/local/test_export.sh
+1
-6
examples/aishell/s0/local/train.sh
examples/aishell/s0/local/train.sh
+0
-6
examples/aishell/s1/local/align.sh
examples/aishell/s1/local/align.sh
+1
-6
examples/aishell/s1/local/export.sh
examples/aishell/s1/local/export.sh
+0
-6
examples/aishell/s1/local/test.sh
examples/aishell/s1/local/test.sh
+2
-9
examples/aishell/s1/local/train.sh
examples/aishell/s1/local/train.sh
+0
-6
examples/callcenter/s1/local/align.sh
examples/callcenter/s1/local/align.sh
+1
-7
examples/callcenter/s1/local/export.sh
examples/callcenter/s1/local/export.sh
+0
-6
examples/callcenter/s1/local/test.sh
examples/callcenter/s1/local/test.sh
+2
-8
examples/callcenter/s1/local/train.sh
examples/callcenter/s1/local/train.sh
+0
-5
examples/librispeech/s0/local/export.sh
examples/librispeech/s0/local/export.sh
+0
-6
examples/librispeech/s0/local/test.sh
examples/librispeech/s0/local/test.sh
+1
-6
examples/librispeech/s0/local/train.sh
examples/librispeech/s0/local/train.sh
+0
-7
examples/librispeech/s1/local/align.sh
examples/librispeech/s1/local/align.sh
+1
-6
examples/librispeech/s1/local/export.sh
examples/librispeech/s1/local/export.sh
+0
-6
examples/librispeech/s1/local/test.sh
examples/librispeech/s1/local/test.sh
+2
-9
examples/librispeech/s1/local/train.sh
examples/librispeech/s1/local/train.sh
+3
-7
examples/librispeech/s2/local/align.sh
examples/librispeech/s2/local/align.sh
+1
-6
examples/librispeech/s2/local/export.sh
examples/librispeech/s2/local/export.sh
+0
-6
examples/librispeech/s2/local/train.sh
examples/librispeech/s2/local/train.sh
+0
-7
examples/ted_en_zh/t0/local/test.sh
examples/ted_en_zh/t0/local/test.sh
+1
-6
examples/ted_en_zh/t0/local/train.sh
examples/ted_en_zh/t0/local/train.sh
+0
-7
examples/timit/s1/local/align.sh
examples/timit/s1/local/align.sh
+1
-6
examples/timit/s1/local/export.sh
examples/timit/s1/local/export.sh
+0
-6
examples/timit/s1/local/test.sh
examples/timit/s1/local/test.sh
+2
-9
examples/timit/s1/local/train.sh
examples/timit/s1/local/train.sh
+0
-7
examples/tiny/s0/local/export.sh
examples/tiny/s0/local/export.sh
+0
-6
examples/tiny/s0/local/test.sh
examples/tiny/s0/local/test.sh
+1
-6
examples/tiny/s0/local/train.sh
examples/tiny/s0/local/train.sh
+0
-7
examples/tiny/s1/local/align.sh
examples/tiny/s1/local/align.sh
+1
-6
examples/tiny/s1/local/export.sh
examples/tiny/s1/local/export.sh
+0
-6
examples/tiny/s1/local/test.sh
examples/tiny/s1/local/test.sh
+2
-8
examples/tiny/s1/local/train.sh
examples/tiny/s1/local/train.sh
+0
-6
examples/v18_to_v2x/deepspeech2x/model.py
examples/v18_to_v2x/deepspeech2x/model.py
+1
-1
examples/v18_to_v2x/exp_aishell/local/test.sh
examples/v18_to_v2x/exp_aishell/local/test.sh
+1
-6
examples/v18_to_v2x/exp_baidu_en8k/local/test.sh
examples/v18_to_v2x/exp_baidu_en8k/local/test.sh
+1
-6
examples/v18_to_v2x/exp_librispeech/local/test.sh
examples/v18_to_v2x/exp_librispeech/local/test.sh
+1
-6
tests/chains/ds2_params_lite_train_infer.txt
tests/chains/ds2_params_lite_train_infer.txt
+2
-2
未找到文件。
deepspeech/exps/deepspeech2/bin/train.py
浏览文件 @
913b2300
...
...
@@ -27,7 +27,7 @@ def main_sp(config, args):
def
main
(
config
,
args
):
if
args
.
device
==
"gpu"
and
args
.
nprocs
>
1
:
if
args
.
nprocs
>
0
:
dist
.
spawn
(
main_sp
,
args
=
(
config
,
args
),
nprocs
=
args
.
nprocs
)
else
:
main_sp
(
config
,
args
)
...
...
deepspeech/exps/deepspeech2/model.py
浏览文件 @
913b2300
...
...
@@ -403,7 +403,7 @@ class DeepSpeech2Tester(DeepSpeech2Trainer):
def
setup
(
self
):
"""Setup the experiment.
"""
paddle
.
set_device
(
self
.
args
.
device
)
paddle
.
set_device
(
'gpu'
self
.
args
.
nprocs
>
0
else
'cpu'
)
self
.
setup_output_dir
()
self
.
setup_checkpointer
()
...
...
@@ -635,7 +635,7 @@ class DeepSpeech2ExportTester(DeepSpeech2Tester):
def
setup
(
self
):
"""Setup the experiment.
"""
paddle
.
set_device
(
self
.
args
.
device
)
paddle
.
set_device
(
'gpu'
self
.
args
.
nprocs
>
0
else
'cpu'
)
self
.
setup_output_dir
()
...
...
deepspeech/exps/u2/bin/train.py
浏览文件 @
913b2300
...
...
@@ -32,7 +32,7 @@ def main_sp(config, args):
def
main
(
config
,
args
):
if
args
.
device
==
"gpu"
and
args
.
nprocs
>
1
:
if
args
.
nprocs
>
0
:
dist
.
spawn
(
main_sp
,
args
=
(
config
,
args
),
nprocs
=
args
.
nprocs
)
else
:
main_sp
(
config
,
args
)
...
...
deepspeech/exps/u2/model.py
浏览文件 @
913b2300
...
...
@@ -653,7 +653,7 @@ class U2Tester(U2Trainer):
def
setup
(
self
):
"""Setup the experiment.
"""
paddle
.
set_device
(
self
.
args
.
device
)
paddle
.
set_device
(
'gpu'
self
.
args
.
nprocs
>
0
else
'cpu'
)
self
.
setup_output_dir
()
self
.
setup_checkpointer
()
...
...
deepspeech/exps/u2_kaldi/bin/train.py
浏览文件 @
913b2300
...
...
@@ -36,7 +36,7 @@ def main_sp(config, args):
def
main
(
config
,
args
):
if
args
.
device
==
"gpu"
and
args
.
nprocs
>
1
:
if
args
.
nprocs
>
0
:
dist
.
spawn
(
main_sp
,
args
=
(
config
,
args
),
nprocs
=
args
.
nprocs
)
else
:
main_sp
(
config
,
args
)
...
...
deepspeech/exps/u2_kaldi/model.py
浏览文件 @
913b2300
...
...
@@ -637,7 +637,7 @@ class U2Tester(U2Trainer):
def
setup
(
self
):
"""Setup the experiment.
"""
paddle
.
set_device
(
self
.
args
.
device
)
paddle
.
set_device
(
'gpu'
self
.
args
.
nprocs
>
0
else
'cpu'
)
self
.
setup_output_dir
()
self
.
setup_checkpointer
()
...
...
deepspeech/exps/u2_st/bin/train.py
浏览文件 @
913b2300
...
...
@@ -30,7 +30,7 @@ def main_sp(config, args):
def
main
(
config
,
args
):
if
args
.
device
==
"gpu"
and
args
.
nprocs
>
1
:
if
args
.
nprocs
>
0
:
dist
.
spawn
(
main_sp
,
args
=
(
config
,
args
),
nprocs
=
args
.
nprocs
)
else
:
main_sp
(
config
,
args
)
...
...
deepspeech/exps/u2_st/model.py
浏览文件 @
913b2300
...
...
@@ -661,7 +661,7 @@ class U2STTester(U2STTrainer):
def
setup
(
self
):
"""Setup the experiment.
"""
paddle
.
set_device
(
self
.
args
.
device
)
paddle
.
set_device
(
'gpu'
self
.
args
.
nprocs
>
0
else
'cpu'
)
self
.
setup_output_dir
()
self
.
setup_checkpointer
()
...
...
deepspeech/training/cli.py
浏览文件 @
913b2300
...
...
@@ -30,7 +30,7 @@ def default_argument_parser():
The ``--checkpoint_path`` specifies the checkpoint to load from.
The ``--
device`` and ``--
nprocs`` specifies how to run the training.
The ``--nprocs`` specifies how to run the training.
See Also
...
...
@@ -51,12 +51,6 @@ def default_argument_parser():
default
=
None
,
help
=
"seed to use for paddle, np and random. None or 0 for random, else set seed."
)
train_group
.
add_argument
(
"--device"
,
type
=
str
,
default
=
'gpu'
,
choices
=
[
"cpu"
,
"gpu"
],
help
=
"device cpu and gpu are supported."
)
train_group
.
add_argument
(
"--nprocs"
,
type
=
int
,
...
...
deepspeech/training/trainer.py
浏览文件 @
913b2300
...
...
@@ -86,7 +86,7 @@ class Trainer():
>>> config.merge_from_list(args.opts)
>>> config.freeze()
>>>
>>> if args.nprocs >
1 and args.device == "gpu"
:
>>> if args.nprocs >
0
:
>>> dist.spawn(main_sp, args=(config, args), nprocs=args.nprocs)
>>> else:
>>> main_sp(config, args)
...
...
@@ -119,7 +119,7 @@ class Trainer():
def
setup
(
self
):
"""Setup the experiment.
"""
paddle
.
set_device
(
self
.
args
.
device
)
paddle
.
set_device
(
'gpu'
self
.
args
.
nprocs
>
0
else
'cpu'
)
if
self
.
parallel
:
self
.
init_parallel
()
...
...
@@ -139,7 +139,7 @@ class Trainer():
"""A flag indicating whether the experiment should run with
multiprocessing.
"""
return
self
.
args
.
device
==
"gpu"
and
self
.
args
.
nprocs
>
1
return
elf
.
args
.
nprocs
>
0
def
init_parallel
(
self
):
"""Init environment for multiprocess training.
...
...
examples/aishell/s0/local/export.sh
浏览文件 @
913b2300
...
...
@@ -13,13 +13,7 @@ ckpt_path_prefix=$2
jit_model_export_path
=
$3
model_type
=
$4
device
=
gpu
if
[
${
ngpu
}
==
0
]
;
then
device
=
cpu
fi
python3
-u
${
BIN_DIR
}
/export.py
\
--device
${
device
}
\
--nproc
${
ngpu
}
\
--config
${
config_path
}
\
--checkpoint_path
${
ckpt_path_prefix
}
\
...
...
examples/aishell/s0/local/test.sh
浏览文件 @
913b2300
...
...
@@ -8,10 +8,6 @@ fi
ngpu
=
$(
echo
$CUDA_VISIBLE_DEVICES
|
awk
-F
","
'{print NF}'
)
echo
"using
$ngpu
gpus..."
device
=
gpu
if
[
${
ngpu
}
==
0
]
;
then
device
=
cpu
fi
config_path
=
$1
ckpt_prefix
=
$2
model_type
=
$3
...
...
@@ -23,8 +19,7 @@ if [ $? -ne 0 ]; then
fi
python3
-u
${
BIN_DIR
}
/test.py
\
--device
${
device
}
\
--nproc
1
\
--nproc
${
ngpu
}
\
--config
${
config_path
}
\
--result_file
${
ckpt_prefix
}
.rsl
\
--checkpoint_path
${
ckpt_prefix
}
\
...
...
examples/aishell/s0/local/test_export.sh
浏览文件 @
913b2300
...
...
@@ -8,10 +8,6 @@ fi
ngpu
=
$(
echo
$CUDA_VISIBLE_DEVICES
|
awk
-F
","
'{print NF}'
)
echo
"using
$ngpu
gpus..."
device
=
gpu
if
[
${
ngpu
}
==
0
]
;
then
device
=
cpu
fi
config_path
=
$1
jit_model_export_path
=
$2
model_type
=
$3
...
...
@@ -23,8 +19,7 @@ if [ $? -ne 0 ]; then
fi
python3
-u
${
BIN_DIR
}
/test_export.py
\
--device
${
device
}
\
--nproc
1
\
--nproc
${
ngpu
}
\
--config
${
config_path
}
\
--result_file
${
jit_model_export_path
}
.rsl
\
--export_path
${
jit_model_export_path
}
\
...
...
examples/aishell/s0/local/train.sh
浏览文件 @
913b2300
...
...
@@ -12,11 +12,6 @@ config_path=$1
ckpt_name
=
$2
model_type
=
$3
device
=
gpu
if
[
${
ngpu
}
==
0
]
;
then
device
=
cpu
fi
mkdir
-p
exp
# seed may break model convergence
...
...
@@ -26,7 +21,6 @@ if [ ${seed} != 0 ]; then
fi
python3
-u
${
BIN_DIR
}
/train.py
\
--device
${
device
}
\
--nproc
${
ngpu
}
\
--config
${
config_path
}
\
--output
exp/
${
ckpt_name
}
\
...
...
examples/aishell/s1/local/align.sh
浏览文件 @
913b2300
...
...
@@ -8,10 +8,6 @@ fi
ngpu
=
$(
echo
$CUDA_VISIBLE_DEVICES
|
awk
-F
","
'{print NF}'
)
echo
"using
$ngpu
gpus..."
device
=
gpu
if
[
${
ngpu
}
==
0
]
;
then
device
=
cpu
fi
config_path
=
$1
ckpt_prefix
=
$2
...
...
@@ -22,8 +18,7 @@ mkdir -p ${output_dir}
# align dump in `result_file`
# .tier, .TextGrid dump in `dir of result_file`
python3
-u
${
BIN_DIR
}
/alignment.py
\
--device
${
device
}
\
--nproc
1
\
--nproc
${
ngpu
}
\
--config
${
config_path
}
\
--result_file
${
output_dir
}
/
${
type
}
.align
\
--checkpoint_path
${
ckpt_prefix
}
\
...
...
examples/aishell/s1/local/export.sh
浏览文件 @
913b2300
...
...
@@ -12,13 +12,7 @@ config_path=$1
ckpt_path_prefix
=
$2
jit_model_export_path
=
$3
device
=
gpu
if
[
${
ngpu
}
==
0
]
;
then
device
=
cpu
fi
python3
-u
${
BIN_DIR
}
/export.py
\
--device
${
device
}
\
--nproc
${
ngpu
}
\
--config
${
config_path
}
\
--checkpoint_path
${
ckpt_path_prefix
}
\
...
...
examples/aishell/s1/local/test.sh
浏览文件 @
913b2300
...
...
@@ -8,11 +8,6 @@ fi
ngpu
=
$(
echo
$CUDA_VISIBLE_DEVICES
|
awk
-F
","
'{print NF}'
)
echo
"using
$ngpu
gpus..."
device
=
gpu
if
[
${
ngpu
}
==
0
]
;
then
device
=
cpu
fi
config_path
=
$1
ckpt_prefix
=
$2
...
...
@@ -39,8 +34,7 @@ for type in attention ctc_greedy_search; do
output_dir
=
${
ckpt_prefix
}
mkdir
-p
${
output_dir
}
python3
-u
${
BIN_DIR
}
/test.py
\
--device
${
device
}
\
--nproc
1
\
--nproc
${
ngpu
}
\
--config
${
config_path
}
\
--result_file
${
output_dir
}
/
${
type
}
.rsl
\
--checkpoint_path
${
ckpt_prefix
}
\
...
...
@@ -58,8 +52,7 @@ for type in ctc_prefix_beam_search attention_rescoring; do
output_dir
=
${
ckpt_prefix
}
mkdir
-p
${
output_dir
}
python3
-u
${
BIN_DIR
}
/test.py
\
--device
${
device
}
\
--nproc
1
\
--nproc
${
ngpu
}
\
--config
${
config_path
}
\
--result_file
${
output_dir
}
/
${
type
}
.rsl
\
--checkpoint_path
${
ckpt_prefix
}
\
...
...
examples/aishell/s1/local/train.sh
浏览文件 @
913b2300
...
...
@@ -12,11 +12,6 @@ source ${MAIN_ROOT}/utils/parse_options.sh || exit 1;
ngpu
=
$(
echo
$CUDA_VISIBLE_DEVICES
|
awk
-F
","
'{print NF}'
)
echo
"using
$ngpu
gpus..."
device
=
gpu
if
[
${
ngpu
}
==
0
]
;
then
device
=
cpu
fi
if
[
${
seed
}
!=
0
]
;
then
export
FLAGS_cudnn_deterministic
=
True
echo
"using seed
$seed
& FLAGS_cudnn_deterministic=True ..."
...
...
@@ -34,7 +29,6 @@ mkdir -p exp
python3
-u
${
BIN_DIR
}
/train.py
\
--seed
${
seed
}
\
--device
${
device
}
\
--nproc
${
ngpu
}
\
--config
${
config_path
}
\
--output
exp/
${
ckpt_name
}
\
...
...
examples/callcenter/s1/local/align.sh
浏览文件 @
913b2300
...
...
@@ -8,10 +8,6 @@ fi
ngpu
=
$(
echo
$CUDA_VISIBLE_DEVICES
|
awk
-F
","
'{print NF}'
)
echo
"using
$ngpu
gpus..."
device
=
gpu
if
[
${
ngpu
}
==
0
]
;
then
device
=
cpu
fi
config_path
=
$1
ckpt_prefix
=
$2
...
...
@@ -20,7 +16,6 @@ ckpt_name=$(basename ${ckpt_prefxi})
mkdir
-p
exp
batch_size
=
1
output_dir
=
${
ckpt_prefix
}
mkdir
-p
${
output_dir
}
...
...
@@ -28,8 +23,7 @@ mkdir -p ${output_dir}
# align dump in `result_file`
# .tier, .TextGrid dump in `dir of result_file`
python3
-u
${
BIN_DIR
}
/alignment.py
\
--device
${
device
}
\
--nproc
1
\
--nproc
${
ngpu
}
\
--config
${
config_path
}
\
--result_file
${
output_dir
}
/
${
type
}
.align
\
--checkpoint_path
${
ckpt_prefix
}
\
...
...
examples/callcenter/s1/local/export.sh
浏览文件 @
913b2300
...
...
@@ -12,13 +12,7 @@ config_path=$1
ckpt_path_prefix
=
$2
jit_model_export_path
=
$3
device
=
gpu
if
[
${
ngpu
}
==
0
]
;
then
device
=
cpu
fi
python3
-u
${
BIN_DIR
}
/export.py
\
--device
${
device
}
\
--nproc
${
ngpu
}
\
--config
${
config_path
}
\
--checkpoint_path
${
ckpt_path_prefix
}
\
...
...
examples/callcenter/s1/local/test.sh
浏览文件 @
913b2300
...
...
@@ -8,10 +8,6 @@ fi
ngpu
=
$(
echo
$CUDA_VISIBLE_DEVICES
|
awk
-F
","
'{print NF}'
)
echo
"using
$ngpu
gpus..."
device
=
gpu
if
[
${
ngpu
}
==
0
]
;
then
device
=
cpu
fi
config_path
=
$1
ckpt_prefix
=
$2
...
...
@@ -32,8 +28,7 @@ for type in attention ctc_greedy_search; do
output_dir
=
${
ckpt_prefix
}
mkdir
-p
${
output_dir
}
python3
-u
${
BIN_DIR
}
/test.py
\
--device
${
device
}
\
--nproc
1
\
--nproc
${
ngpu
}
\
--config
${
config_path
}
\
--result_file
${
output_dir
}
/
${
type
}
.rsl
\
--checkpoint_path
${
ckpt_prefix
}
\
...
...
@@ -51,8 +46,7 @@ for type in ctc_prefix_beam_search attention_rescoring; do
output_dir
=
${
ckpt_prefix
}
mkdir
-p
${
output_dir
}
python3
-u
${
BIN_DIR
}
/test.py
\
--device
${
device
}
\
--nproc
1
\
--nproc
${
ngpu
}
\
--config
${
config_path
}
\
--result_file
${
output_dir
}
/
${
type
}
.rsl
\
--checkpoint_path
${
ckpt_prefix
}
\
...
...
examples/callcenter/s1/local/train.sh
浏览文件 @
913b2300
...
...
@@ -11,10 +11,6 @@ echo "using $ngpu gpus..."
config_path
=
$1
ckpt_name
=
$2
device
=
gpu
if
[
${
ngpu
}
==
0
]
;
then
device
=
cpu
fi
echo
"using
${
device
}
..."
mkdir
-p
exp
...
...
@@ -26,7 +22,6 @@ if [ ${seed} != 0 ]; then
fi
python3
-u
${
BIN_DIR
}
/train.py
\
--device
${
device
}
\
--nproc
${
ngpu
}
\
--config
${
config_path
}
\
--output
exp/
${
ckpt_name
}
\
...
...
examples/librispeech/s0/local/export.sh
浏览文件 @
913b2300
...
...
@@ -13,13 +13,7 @@ ckpt_path_prefix=$2
jit_model_export_path
=
$3
model_type
=
$4
device
=
gpu
if
[
${
ngpu
}
==
0
]
;
then
device
=
cpu
fi
python3
-u
${
BIN_DIR
}
/export.py
\
--device
${
device
}
\
--nproc
${
ngpu
}
\
--config
${
config_path
}
\
--checkpoint_path
${
ckpt_path_prefix
}
\
...
...
examples/librispeech/s0/local/test.sh
浏览文件 @
913b2300
...
...
@@ -8,10 +8,6 @@ fi
ngpu
=
$(
echo
$CUDA_VISIBLE_DEVICES
|
awk
-F
","
'{print NF}'
)
echo
"using
$ngpu
gpus..."
device
=
gpu
if
[
${
ngpu
}
==
0
]
;
then
device
=
cpu
fi
config_path
=
$1
ckpt_prefix
=
$2
model_type
=
$3
...
...
@@ -23,8 +19,7 @@ if [ $? -ne 0 ]; then
fi
python3
-u
${
BIN_DIR
}
/test.py
\
--device
${
device
}
\
--nproc
1
\
--nproc
${
ngpu
}
\
--config
${
config_path
}
\
--result_file
${
ckpt_prefix
}
.rsl
\
--checkpoint_path
${
ckpt_prefix
}
\
...
...
examples/librispeech/s0/local/train.sh
浏览文件 @
913b2300
...
...
@@ -12,12 +12,6 @@ config_path=$1
ckpt_name
=
$2
model_type
=
$3
device
=
gpu
if
[
${
ngpu
}
==
0
]
;
then
device
=
cpu
fi
echo
"using
${
device
}
..."
mkdir
-p
exp
# seed may break model convergence
...
...
@@ -27,7 +21,6 @@ if [ ${seed} != 0 ]; then
fi
python3
-u
${
BIN_DIR
}
/train.py
\
--device
${
device
}
\
--nproc
${
ngpu
}
\
--config
${
config_path
}
\
--output
exp/
${
ckpt_name
}
\
...
...
examples/librispeech/s1/local/align.sh
浏览文件 @
913b2300
...
...
@@ -8,10 +8,6 @@ fi
ngpu
=
$(
echo
$CUDA_VISIBLE_DEVICES
|
awk
-F
","
'{print NF}'
)
echo
"using
$ngpu
gpus..."
device
=
gpu
if
[
${
ngpu
}
==
0
]
;
then
device
=
cpu
fi
config_path
=
$1
ckpt_prefix
=
$2
...
...
@@ -22,8 +18,7 @@ mkdir -p ${output_dir}
# align dump in `result_file`
# .tier, .TextGrid dump in `dir of result_file`
python3
-u
${
BIN_DIR
}
/alignment.py
\
--device
${
device
}
\
--nproc
1
\
--nproc
${
ngpu
}
\
--config
${
config_path
}
\
--result_file
${
output_dir
}
/
${
type
}
.align
\
--checkpoint_path
${
ckpt_prefix
}
\
...
...
examples/librispeech/s1/local/export.sh
浏览文件 @
913b2300
...
...
@@ -12,13 +12,7 @@ config_path=$1
ckpt_path_prefix
=
$2
jit_model_export_path
=
$3
device
=
gpu
if
[
${
ngpu
}
==
0
]
;
then
device
=
cpu
fi
python3
-u
${
BIN_DIR
}
/export.py
\
--device
${
device
}
\
--nproc
${
ngpu
}
\
--config
${
config_path
}
\
--checkpoint_path
${
ckpt_path_prefix
}
\
...
...
examples/librispeech/s1/local/test.sh
浏览文件 @
913b2300
...
...
@@ -8,11 +8,6 @@ fi
ngpu
=
$(
echo
$CUDA_VISIBLE_DEVICES
|
awk
-F
","
'{print NF}'
)
echo
"using
$ngpu
gpus..."
device
=
gpu
if
[
${
ngpu
}
==
0
]
;
then
device
=
cpu
fi
config_path
=
$1
ckpt_prefix
=
$2
...
...
@@ -38,8 +33,7 @@ for type in attention ctc_greedy_search; do
batch_size
=
64
fi
python3
-u
${
BIN_DIR
}
/test.py
\
--device
${
device
}
\
--nproc
1
\
--nproc
${
ngpu
}
\
--config
${
config_path
}
\
--result_file
${
ckpt_prefix
}
.
${
type
}
.rsl
\
--checkpoint_path
${
ckpt_prefix
}
\
...
...
@@ -55,8 +49,7 @@ for type in ctc_prefix_beam_search attention_rescoring; do
echo
"decoding
${
type
}
"
batch_size
=
1
python3
-u
${
BIN_DIR
}
/test.py
\
--device
${
device
}
\
--nproc
1
\
--nproc
${
ngpu
}
\
--config
${
config_path
}
\
--result_file
${
ckpt_prefix
}
.
${
type
}
.rsl
\
--checkpoint_path
${
ckpt_prefix
}
\
...
...
examples/librispeech/s1/local/train.sh
浏览文件 @
913b2300
...
...
@@ -11,12 +11,6 @@ echo "using $ngpu gpus..."
config_path
=
$1
ckpt_name
=
$2
device
=
gpu
if
[
${
ngpu
}
==
0
]
;
then
device
=
cpu
fi
echo
"using
${
device
}
..."
mkdir
-p
exp
# seed may break model convergence
...
...
@@ -25,8 +19,10 @@ if [ ${seed} != 0 ]; then
export
FLAGS_cudnn_deterministic
=
True
fi
# export FLAGS_cudnn_exhaustive_search=true
# export FLAGS_conv_workspace_size_limit=4000
python3
-u
${
BIN_DIR
}
/train.py
\
--device
${
device
}
\
--nproc
${
ngpu
}
\
--config
${
config_path
}
\
--output
exp/
${
ckpt_name
}
\
...
...
examples/librispeech/s2/local/align.sh
浏览文件 @
913b2300
...
...
@@ -8,10 +8,6 @@ fi
ngpu
=
$(
echo
$CUDA_VISIBLE_DEVICES
|
awk
-F
","
'{print NF}'
)
echo
"using
$ngpu
gpus..."
device
=
gpu
if
[
${
ngpu
}
==
0
]
;
then
device
=
cpu
fi
config_path
=
$1
dict_path
=
$2
ckpt_prefix
=
$3
...
...
@@ -26,8 +22,7 @@ python3 -u ${BIN_DIR}/test.py \
--model-name
'u2_kaldi'
\
--run-mode
'align'
\
--dict-path
${
dict_path
}
\
--device
${
device
}
\
--nproc
1
\
--nproc
${
ngpu
}
\
--config
${
config_path
}
\
--result-file
${
output_dir
}
/
${
type
}
.align
\
--checkpoint_path
${
ckpt_prefix
}
\
...
...
examples/librispeech/s2/local/export.sh
浏览文件 @
913b2300
...
...
@@ -12,15 +12,9 @@ config_path=$1
ckpt_path_prefix
=
$2
jit_model_export_path
=
$3
device
=
gpu
if
[
${
ngpu
}
==
0
]
;
then
device
=
cpu
fi
python3
-u
${
BIN_DIR
}
/test.py
\
--model-name
'u2_kaldi'
\
--run-mode
'export'
\
--device
${
device
}
\
--nproc
${
ngpu
}
\
--config
${
config_path
}
\
--checkpoint_path
${
ckpt_path_prefix
}
\
...
...
examples/librispeech/s2/local/train.sh
浏览文件 @
913b2300
...
...
@@ -11,12 +11,6 @@ echo "using $ngpu gpus..."
config_path
=
$1
ckpt_name
=
$2
device
=
gpu
if
[
${
ngpu
}
==
0
]
;
then
device
=
cpu
fi
echo
"using
${
device
}
..."
mkdir
-p
exp
# seed may break model convergence
...
...
@@ -27,7 +21,6 @@ fi
python3
-u
${
BIN_DIR
}
/train.py
\
--model-name
u2_kaldi
\
--device
${
device
}
\
--nproc
${
ngpu
}
\
--config
${
config_path
}
\
--output
exp/
${
ckpt_name
}
\
...
...
examples/ted_en_zh/t0/local/test.sh
浏览文件 @
913b2300
...
...
@@ -8,10 +8,6 @@ fi
ngpu
=
$(
echo
$CUDA_VISIBLE_DEVICES
|
awk
-F
","
'{print NF}'
)
echo
"using
$ngpu
gpus..."
device
=
gpu
if
[
${
ngpu
}
==
0
]
;
then
device
=
cpu
fi
config_path
=
$1
ckpt_prefix
=
$2
...
...
@@ -19,8 +15,7 @@ for type in fullsentence; do
echo
"decoding
${
type
}
"
batch_size
=
32
python3
-u
${
BIN_DIR
}
/test.py
\
--device
${
device
}
\
--nproc
1
\
--nproc
${
ngpu
}
\
--config
${
config_path
}
\
--result_file
${
ckpt_prefix
}
.
${
type
}
.rsl
\
--checkpoint_path
${
ckpt_prefix
}
\
...
...
examples/ted_en_zh/t0/local/train.sh
浏览文件 @
913b2300
...
...
@@ -11,12 +11,6 @@ echo "using $ngpu gpus..."
config_path
=
$1
ckpt_name
=
$2
device
=
gpu
if
[
${
ngpu
}
==
0
]
;
then
device
=
cpu
fi
echo
"using
${
device
}
..."
mkdir
-p
exp
# seed may break model convergence
...
...
@@ -26,7 +20,6 @@ if [ ${seed} != 0 ]; then
fi
python3
-u
${
BIN_DIR
}
/train.py
\
--device
${
device
}
\
--nproc
${
ngpu
}
\
--config
${
config_path
}
\
--output
exp/
${
ckpt_name
}
\
...
...
examples/timit/s1/local/align.sh
浏览文件 @
913b2300
...
...
@@ -8,10 +8,6 @@ fi
ngpu
=
$(
echo
$CUDA_VISIBLE_DEVICES
|
awk
-F
","
'{print NF}'
)
echo
"using
$ngpu
gpus..."
device
=
gpu
if
[
${
ngpu
}
==
0
]
;
then
device
=
cpu
fi
config_path
=
$1
ckpt_prefix
=
$2
...
...
@@ -22,8 +18,7 @@ mkdir -p ${output_dir}
# align dump in `result_file`
# .tier, .TextGrid dump in `dir of result_file`
python3
-u
${
BIN_DIR
}
/alignment.py
\
--device
${
device
}
\
--nproc
1
\
--nproc
${
ngpu
}
\
--config
${
config_path
}
\
--result_file
${
output_dir
}
/
${
type
}
.align
\
--checkpoint_path
${
ckpt_prefix
}
\
...
...
examples/timit/s1/local/export.sh
浏览文件 @
913b2300
...
...
@@ -12,13 +12,7 @@ config_path=$1
ckpt_path_prefix
=
$2
jit_model_export_path
=
$3
device
=
gpu
if
[
${
ngpu
}
==
0
]
;
then
device
=
cpu
fi
python3
-u
${
BIN_DIR
}
/export.py
\
--device
${
device
}
\
--nproc
${
ngpu
}
\
--config
${
config_path
}
\
--checkpoint_path
${
ckpt_path_prefix
}
\
...
...
examples/timit/s1/local/test.sh
浏览文件 @
913b2300
...
...
@@ -8,11 +8,6 @@ fi
ngpu
=
$(
echo
$CUDA_VISIBLE_DEVICES
|
awk
-F
","
'{print NF}'
)
echo
"using
$ngpu
gpus..."
device
=
gpu
if
[
${
ngpu
}
==
0
]
;
then
device
=
cpu
fi
config_path
=
$1
ckpt_prefix
=
$2
...
...
@@ -37,8 +32,7 @@ for type in attention ctc_greedy_search; do
batch_size
=
64
fi
python3
-u
${
BIN_DIR
}
/test.py
\
--device
${
device
}
\
--nproc
1
\
--nproc
${
ngpu
}
\
--config
${
config_path
}
\
--result_file
${
ckpt_prefix
}
.
${
type
}
.rsl
\
--checkpoint_path
${
ckpt_prefix
}
\
...
...
@@ -54,8 +48,7 @@ for type in ctc_prefix_beam_search attention_rescoring; do
echo
"decoding
${
type
}
"
batch_size
=
1
python3
-u
${
BIN_DIR
}
/test.py
\
--device
${
device
}
\
--nproc
1
\
--nproc
${
ngpu
}
\
--config
${
config_path
}
\
--result_file
${
ckpt_prefix
}
.
${
type
}
.rsl
\
--checkpoint_path
${
ckpt_prefix
}
\
...
...
examples/timit/s1/local/train.sh
浏览文件 @
913b2300
...
...
@@ -11,12 +11,6 @@ echo "using $ngpu gpus..."
config_path
=
$1
ckpt_name
=
$2
device
=
gpu
if
[
${
ngpu
}
==
0
]
;
then
device
=
cpu
fi
echo
"using
${
device
}
..."
mkdir
-p
exp
# seed may break model convergence
...
...
@@ -26,7 +20,6 @@ if [ ${seed} != 0 ]; then
fi
python3
-u
${
BIN_DIR
}
/train.py
\
--device
${
device
}
\
--nproc
${
ngpu
}
\
--config
${
config_path
}
\
--output
exp/
${
ckpt_name
}
\
...
...
examples/tiny/s0/local/export.sh
浏览文件 @
913b2300
...
...
@@ -13,13 +13,7 @@ ckpt_path_prefix=$2
jit_model_export_path
=
$3
model_type
=
$4
device
=
gpu
if
[
${
ngpu
}
==
0
]
;
then
device
=
cpu
fi
python3
-u
${
BIN_DIR
}
/export.py
\
--device
${
device
}
\
--nproc
${
ngpu
}
\
--config
${
config_path
}
\
--checkpoint_path
${
ckpt_path_prefix
}
\
...
...
examples/tiny/s0/local/test.sh
浏览文件 @
913b2300
...
...
@@ -8,10 +8,6 @@ fi
ngpu
=
$(
echo
$CUDA_VISIBLE_DEVICES
|
awk
-F
","
'{print NF}'
)
echo
"using
$ngpu
gpus..."
device
=
gpu
if
[
${
ngpu
}
==
0
]
;
then
device
=
cpu
fi
config_path
=
$1
ckpt_prefix
=
$2
model_type
=
$3
...
...
@@ -23,8 +19,7 @@ if [ $? -ne 0 ]; then
fi
python3
-u
${
BIN_DIR
}
/test.py
\
--device
${
device
}
\
--nproc
1
\
--nproc
${
ngpu
}
\
--config
${
config_path
}
\
--result_file
${
ckpt_prefix
}
.rsl
\
--checkpoint_path
${
ckpt_prefix
}
\
...
...
examples/tiny/s0/local/train.sh
浏览文件 @
913b2300
...
...
@@ -10,17 +10,11 @@ source ${MAIN_ROOT}/utils/parse_options.sh || exit 1;
ngpu
=
$(
echo
$CUDA_VISIBLE_DEVICES
|
awk
-F
","
'{print NF}'
)
echo
"using
$ngpu
gpus..."
device
=
gpu
if
[
${
ngpu
}
==
0
]
;
then
device
=
cpu
fi
if
[
${
seed
}
!=
0
]
;
then
export
FLAGS_cudnn_deterministic
=
True
echo
"using seed
$seed
& FLAGS_cudnn_deterministic=True ..."
fi
if
[
$#
!=
3
]
;
then
echo
"usage: CUDA_VISIBLE_DEVICES=0
${
0
}
config_path ckpt_name model_type"
exit
-1
...
...
@@ -33,7 +27,6 @@ model_type=$3
mkdir
-p
exp
python3
-u
${
BIN_DIR
}
/train.py
\
--device
${
device
}
\
--nproc
${
ngpu
}
\
--config
${
config_path
}
\
--output
exp/
${
ckpt_name
}
\
...
...
examples/tiny/s1/local/align.sh
浏览文件 @
913b2300
...
...
@@ -8,10 +8,6 @@ fi
ngpu
=
$(
echo
$CUDA_VISIBLE_DEVICES
|
awk
-F
","
'{print NF}'
)
echo
"using
$ngpu
gpus..."
device
=
gpu
if
[
${
ngpu
}
==
0
]
;
then
device
=
cpu
fi
config_path
=
$1
ckpt_prefix
=
$2
...
...
@@ -22,8 +18,7 @@ mkdir -p ${output_dir}
# align dump in `result_file`
# .tier, .TextGrid dump in `dir of result_file`
python3
-u
${
BIN_DIR
}
/alignment.py
\
--device
${
device
}
\
--nproc
1
\
--nproc
${
ngpu
}
\
--config
${
config_path
}
\
--result_file
${
output_dir
}
/
${
type
}
.align
\
--checkpoint_path
${
ckpt_prefix
}
\
...
...
examples/tiny/s1/local/export.sh
浏览文件 @
913b2300
...
...
@@ -12,13 +12,7 @@ config_path=$1
ckpt_path_prefix
=
$2
jit_model_export_path
=
$3
device
=
gpu
if
[
${
ngpu
}
==
0
]
;
then
device
=
cpu
fi
python3
-u
${
BIN_DIR
}
/export.py
\
--device
${
device
}
\
--nproc
${
ngpu
}
\
--config
${
config_path
}
\
--checkpoint_path
${
ckpt_path_prefix
}
\
...
...
examples/tiny/s1/local/test.sh
浏览文件 @
913b2300
...
...
@@ -8,10 +8,6 @@ fi
ngpu
=
$(
echo
$CUDA_VISIBLE_DEVICES
|
awk
-F
","
'{print NF}'
)
echo
"using
$ngpu
gpus..."
device
=
gpu
if
[
${
ngpu
}
==
0
]
;
then
device
=
cpu
fi
config_path
=
$1
ckpt_prefix
=
$2
...
...
@@ -35,8 +31,7 @@ for type in attention ctc_greedy_search; do
batch_size
=
64
fi
python3
-u
${
BIN_DIR
}
/test.py
\
--device
${
device
}
\
--nproc
1
\
--nproc
${
ngpu
}
\
--config
${
config_path
}
\
--result_file
${
ckpt_prefix
}
.
${
type
}
.rsl
\
--checkpoint_path
${
ckpt_prefix
}
\
...
...
@@ -52,8 +47,7 @@ for type in ctc_prefix_beam_search attention_rescoring; do
echo
"decoding
${
type
}
"
batch_size
=
1
python3
-u
${
BIN_DIR
}
/test.py
\
--device
${
device
}
\
--nproc
1
\
--nproc
${
ngpu
}
\
--config
${
config_path
}
\
--result_file
${
ckpt_prefix
}
.
${
type
}
.rsl
\
--checkpoint_path
${
ckpt_prefix
}
\
...
...
examples/tiny/s1/local/train.sh
浏览文件 @
913b2300
...
...
@@ -12,11 +12,6 @@ source ${MAIN_ROOT}/utils/parse_options.sh || exit 1;
ngpu
=
$(
echo
$CUDA_VISIBLE_DEVICES
|
awk
-F
","
'{print NF}'
)
echo
"using
$ngpu
gpus..."
device
=
gpu
if
[
${
ngpu
}
==
0
]
;
then
device
=
cpu
fi
if
[
${
seed
}
!=
0
]
;
then
export
FLAGS_cudnn_deterministic
=
True
echo
"using seed
$seed
& FLAGS_cudnn_deterministic=True ..."
...
...
@@ -34,7 +29,6 @@ mkdir -p exp
python3
-u
${
BIN_DIR
}
/train.py
\
--seed
${
seed
}
\
--device
${
device
}
\
--nproc
${
ngpu
}
\
--config
${
config_path
}
\
--output
exp/
${
ckpt_name
}
\
...
...
examples/v18_to_v2x/deepspeech2x/model.py
浏览文件 @
913b2300
...
...
@@ -401,7 +401,7 @@ class DeepSpeech2Tester(DeepSpeech2Trainer):
def
setup
(
self
):
"""Setup the experiment.
"""
paddle
.
set_device
(
self
.
args
.
device
)
paddle
.
set_device
(
'gpu'
if
self
.
args
.
nprocs
>
0
else
'cpu'
)
self
.
setup_output_dir
()
self
.
setup_checkpointer
()
...
...
examples/v18_to_v2x/exp_aishell/local/test.sh
浏览文件 @
913b2300
...
...
@@ -8,10 +8,6 @@ fi
ngpu
=
$(
echo
$CUDA_VISIBLE_DEVICES
|
awk
-F
","
'{print NF}'
)
echo
"using
$ngpu
gpus..."
device
=
gpu
if
[
${
ngpu
}
==
0
]
;
then
device
=
cpu
fi
config_path
=
$1
ckpt_prefix
=
$2
model_type
=
$3
...
...
@@ -23,8 +19,7 @@ if [ $? -ne 0 ]; then
fi
python3
-u
${
BIN_DIR
}
/test.py
\
--device
${
device
}
\
--nproc
1
\
--nproc
${
ngpu
}
\
--config
${
config_path
}
\
--result_file
${
ckpt_prefix
}
.rsl
\
--checkpoint_path
${
ckpt_prefix
}
\
...
...
examples/v18_to_v2x/exp_baidu_en8k/local/test.sh
浏览文件 @
913b2300
...
...
@@ -8,10 +8,6 @@ fi
ngpu
=
$(
echo
$CUDA_VISIBLE_DEVICES
|
awk
-F
","
'{print NF}'
)
echo
"using
$ngpu
gpus..."
device
=
gpu
if
[
${
ngpu
}
==
0
]
;
then
device
=
cpu
fi
config_path
=
$1
ckpt_prefix
=
$2
model_type
=
$3
...
...
@@ -23,8 +19,7 @@ if [ $? -ne 0 ]; then
fi
python3
-u
${
BIN_DIR
}
/test.py
\
--device
${
device
}
\
--nproc
1
\
--nproc
${
ngpu
}
\
--config
${
config_path
}
\
--result_file
${
ckpt_prefix
}
.rsl
\
--checkpoint_path
${
ckpt_prefix
}
\
...
...
examples/v18_to_v2x/exp_librispeech/local/test.sh
浏览文件 @
913b2300
...
...
@@ -8,10 +8,6 @@ fi
ngpu
=
$(
echo
$CUDA_VISIBLE_DEVICES
|
awk
-F
","
'{print NF}'
)
echo
"using
$ngpu
gpus..."
device
=
gpu
if
[
${
ngpu
}
==
0
]
;
then
device
=
cpu
fi
config_path
=
$1
ckpt_prefix
=
$2
model_type
=
$3
...
...
@@ -23,8 +19,7 @@ if [ $? -ne 0 ]; then
fi
python3
-u
${
BIN_DIR
}
/test.py
\
--device
${
device
}
\
--nproc
1
\
--nproc
${
ngpu
}
\
--config
${
config_path
}
\
--result_file
${
ckpt_prefix
}
.rsl
\
--checkpoint_path
${
ckpt_prefix
}
\
...
...
tests/chains/ds2_params_lite_train_infer.txt
浏览文件 @
913b2300
...
...
@@ -13,7 +13,7 @@ null:null
null:null
##
trainer:norm_train
norm_train: ../../../deepspeech/exps/deepspeech2/bin/train.py --nproc 1 --config conf/deepspeech2.yaml --model_type offline
--device gpu
norm_train: ../../../deepspeech/exps/deepspeech2/bin/train.py --nproc 1 --config conf/deepspeech2.yaml --model_type offline
pact_train:null
fpgm_train:null
distill_train:null
...
...
@@ -21,7 +21,7 @@ null:null
null:null
##
===========================eval_params===========================
eval: ../../../deepspeech/exps/deepspeech2/bin/test.py --nproc 1 --config conf/deepspeech2.yaml --result_file tests/9.rsl --model_type offline
--device gpu
eval: ../../../deepspeech/exps/deepspeech2/bin/test.py --nproc 1 --config conf/deepspeech2.yaml --result_file tests/9.rsl --model_type offline
null:null
##
===========================infer_params===========================
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录