Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
PaddlePaddle
DeepSpeech
提交
9aa868d1
D
DeepSpeech
项目概览
PaddlePaddle
/
DeepSpeech
大约 2 年 前同步成功
通知
210
Star
8425
Fork
1598
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
245
列表
看板
标记
里程碑
合并请求
3
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
D
DeepSpeech
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
245
Issue
245
列表
看板
标记
里程碑
合并请求
3
合并请求
3
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
提交
9aa868d1
编写于
6月 10, 2022
作者:
H
huangyuxin
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
support distrbuted training
上级
2b5bc6df
变更
22
隐藏空白更改
内联
并排
Showing
22 changed file
with
146 addition
and
46 deletion
+146
-46
examples/aishell/asr0/local/train.sh
examples/aishell/asr0/local/train.sh
+10
-3
examples/aishell/asr0/run.sh
examples/aishell/asr0/run.sh
+2
-1
examples/aishell/asr1/local/train.sh
examples/aishell/asr1/local/train.sh
+11
-3
examples/aishell/asr1/run.sh
examples/aishell/asr1/run.sh
+2
-1
examples/callcenter/asr1/local/train.sh
examples/callcenter/asr1/local/train.sh
+10
-3
examples/callcenter/asr1/run.sh
examples/callcenter/asr1/run.sh
+2
-1
examples/librispeech/asr0/local/train.sh
examples/librispeech/asr0/local/train.sh
+10
-3
examples/librispeech/asr0/run.sh
examples/librispeech/asr0/run.sh
+2
-1
examples/librispeech/asr1/local/train.sh
examples/librispeech/asr1/local/train.sh
+10
-3
examples/librispeech/asr1/run.sh
examples/librispeech/asr1/run.sh
+2
-1
examples/librispeech/asr2/local/train.sh
examples/librispeech/asr2/local/train.sh
+10
-3
examples/librispeech/asr2/run.sh
examples/librispeech/asr2/run.sh
+2
-1
examples/mustc/st1/local/train.sh
examples/mustc/st1/local/train.sh
+18
-2
examples/mustc/st1/run.sh
examples/mustc/st1/run.sh
+3
-2
examples/ted_en_zh/st0/local/train.sh
examples/ted_en_zh/st0/local/train.sh
+10
-3
examples/ted_en_zh/st0/run.sh
examples/ted_en_zh/st0/run.sh
+2
-1
examples/ted_en_zh/st1/local/train.sh
examples/ted_en_zh/st1/local/train.sh
+12
-3
examples/ted_en_zh/st1/run.sh
examples/ted_en_zh/st1/run.sh
+2
-1
examples/tiny/asr0/local/train.sh
examples/tiny/asr0/local/train.sh
+10
-3
examples/tiny/asr0/run.sh
examples/tiny/asr0/run.sh
+3
-2
examples/tiny/asr1/local/train.sh
examples/tiny/asr1/local/train.sh
+10
-3
examples/tiny/asr1/run.sh
examples/tiny/asr1/run.sh
+3
-2
未找到文件。
examples/aishell/asr0/local/train.sh
浏览文件 @
9aa868d1
#!/bin/bash
#!/bin/bash
if
[
$#
!=
2
]
;
then
if
[
$#
-lt
2
]
&&
[
$#
-gt
3
]
;
then
echo
"usage: CUDA_VISIBLE_DEVICES=0
${
0
}
config_path ckpt_name"
echo
"usage: CUDA_VISIBLE_DEVICES=0
${
0
}
config_path ckpt_name
ips(optional)
"
exit
-1
exit
-1
fi
fi
...
@@ -10,6 +10,13 @@ echo "using $ngpu gpus..."
...
@@ -10,6 +10,13 @@ echo "using $ngpu gpus..."
config_path
=
$1
config_path
=
$1
ckpt_name
=
$2
ckpt_name
=
$2
ips
=
$3
if
[
!
$ips
]
;
then
ips_config
=
else
ips_config
=
"--ips="
${
ips
}
fi
mkdir
-p
exp
mkdir
-p
exp
...
@@ -26,7 +33,7 @@ python3 -u ${BIN_DIR}/train.py \
...
@@ -26,7 +33,7 @@ python3 -u ${BIN_DIR}/train.py \
--output
exp/
${
ckpt_name
}
\
--output
exp/
${
ckpt_name
}
\
--seed
${
seed
}
--seed
${
seed
}
else
else
python3
-m
paddle.distributed.launch
--gpus
=
${
CUDA_VISIBLE_DEVICES
}
${
BIN_DIR
}
/train.py
\
python3
-m
paddle.distributed.launch
--gpus
=
${
CUDA_VISIBLE_DEVICES
}
${
ips_config
}
${
BIN_DIR
}
/train.py
\
--ngpu
${
ngpu
}
\
--ngpu
${
ngpu
}
\
--config
${
config_path
}
\
--config
${
config_path
}
\
--output
exp/
${
ckpt_name
}
\
--output
exp/
${
ckpt_name
}
\
...
...
examples/aishell/asr0/run.sh
浏览文件 @
9aa868d1
...
@@ -6,6 +6,7 @@ gpus=0,1,2,3
...
@@ -6,6 +6,7 @@ gpus=0,1,2,3
stage
=
0
stage
=
0
stop_stage
=
100
stop_stage
=
100
conf_path
=
conf/deepspeech2.yaml
#conf/deepspeech2.yaml or conf/deepspeech2_online.yaml
conf_path
=
conf/deepspeech2.yaml
#conf/deepspeech2.yaml or conf/deepspeech2_online.yaml
ips
=
#xx.xx.xx.xx,xx.xx.xx.xx
decode_conf_path
=
conf/tuning/decode.yaml
decode_conf_path
=
conf/tuning/decode.yaml
avg_num
=
10
avg_num
=
10
audio_file
=
data/demo_01_03.wav
audio_file
=
data/demo_01_03.wav
...
@@ -24,7 +25,7 @@ fi
...
@@ -24,7 +25,7 @@ fi
if
[
${
stage
}
-le
1
]
&&
[
${
stop_stage
}
-ge
1
]
;
then
if
[
${
stage
}
-le
1
]
&&
[
${
stop_stage
}
-ge
1
]
;
then
# train model, all `ckpt` under `exp` dir
# train model, all `ckpt` under `exp` dir
CUDA_VISIBLE_DEVICES
=
${
gpus
}
./local/train.sh
${
conf_path
}
${
ckpt
}
CUDA_VISIBLE_DEVICES
=
${
gpus
}
./local/train.sh
${
conf_path
}
${
ckpt
}
${
ips
}
fi
fi
if
[
${
stage
}
-le
2
]
&&
[
${
stop_stage
}
-ge
2
]
;
then
if
[
${
stage
}
-le
2
]
&&
[
${
stop_stage
}
-ge
2
]
;
then
...
...
examples/aishell/asr1/local/train.sh
浏览文件 @
9aa868d1
...
@@ -17,13 +17,21 @@ if [ ${seed} != 0 ]; then
...
@@ -17,13 +17,21 @@ if [ ${seed} != 0 ]; then
echo
"using seed
$seed
& FLAGS_cudnn_deterministic=True ..."
echo
"using seed
$seed
& FLAGS_cudnn_deterministic=True ..."
fi
fi
if
[
$#
!=
2
]
;
then
if
[
$#
-lt
2
]
&&
[
$#
-gt
3
]
;
then
echo
"usage: CUDA_VISIBLE_DEVICES=0
${
0
}
config_path ckpt_name"
echo
"usage: CUDA_VISIBLE_DEVICES=0
${
0
}
config_path ckpt_name
ips(optional)
"
exit
-1
exit
-1
fi
fi
config_path
=
$1
config_path
=
$1
ckpt_name
=
$2
ckpt_name
=
$2
ips
=
$3
if
[
!
$ips
]
;
then
ips_config
=
else
ips_config
=
"--ips="
${
ips
}
fi
echo
${
ips_config
}
mkdir
-p
exp
mkdir
-p
exp
...
@@ -37,7 +45,7 @@ python3 -u ${BIN_DIR}/train.py \
...
@@ -37,7 +45,7 @@ python3 -u ${BIN_DIR}/train.py \
--benchmark-batch-size
${
benchmark_batch_size
}
\
--benchmark-batch-size
${
benchmark_batch_size
}
\
--benchmark-max-step
${
benchmark_max_step
}
--benchmark-max-step
${
benchmark_max_step
}
else
else
python3
-m
paddle.distributed.launch
--gpus
=
${
CUDA_VISIBLE_DEVICES
}
${
BIN_DIR
}
/train.py
\
python3
-m
paddle.distributed.launch
--gpus
=
${
CUDA_VISIBLE_DEVICES
}
${
ips_config
}
${
BIN_DIR
}
/train.py
\
--ngpu
${
ngpu
}
\
--ngpu
${
ngpu
}
\
--seed
${
seed
}
\
--seed
${
seed
}
\
--config
${
config_path
}
\
--config
${
config_path
}
\
...
...
examples/aishell/asr1/run.sh
浏览文件 @
9aa868d1
...
@@ -6,6 +6,7 @@ gpus=0,1,2,3
...
@@ -6,6 +6,7 @@ gpus=0,1,2,3
stage
=
0
stage
=
0
stop_stage
=
50
stop_stage
=
50
conf_path
=
conf/conformer.yaml
conf_path
=
conf/conformer.yaml
ips
=
#xx.xx.xx.xx,xx.xx.xx.xx
decode_conf_path
=
conf/tuning/decode.yaml
decode_conf_path
=
conf/tuning/decode.yaml
avg_num
=
30
avg_num
=
30
audio_file
=
data/demo_01_03.wav
audio_file
=
data/demo_01_03.wav
...
@@ -23,7 +24,7 @@ fi
...
@@ -23,7 +24,7 @@ fi
if
[
${
stage
}
-le
1
]
&&
[
${
stop_stage
}
-ge
1
]
;
then
if
[
${
stage
}
-le
1
]
&&
[
${
stop_stage
}
-ge
1
]
;
then
# train model, all `ckpt` under `exp` dir
# train model, all `ckpt` under `exp` dir
CUDA_VISIBLE_DEVICES
=
${
gpus
}
./local/train.sh
${
conf_path
}
${
ckpt
}
CUDA_VISIBLE_DEVICES
=
${
gpus
}
./local/train.sh
${
conf_path
}
${
ckpt
}
${
ips
}
fi
fi
if
[
${
stage
}
-le
2
]
&&
[
${
stop_stage
}
-ge
2
]
;
then
if
[
${
stage
}
-le
2
]
&&
[
${
stop_stage
}
-ge
2
]
;
then
...
...
examples/callcenter/asr1/local/train.sh
浏览文件 @
9aa868d1
#! /usr/bin/env bash
#! /usr/bin/env bash
if
[
$#
!=
2
]
;
then
if
[
$#
-lt
2
]
&&
[
$#
-gt
3
]
;
then
echo
"usage: CUDA_VISIBLE_DEVICES=0
${
0
}
config_path ckpt_name"
echo
"usage: CUDA_VISIBLE_DEVICES=0
${
0
}
config_path ckpt_name
ips(optional)
"
exit
-1
exit
-1
fi
fi
...
@@ -10,6 +10,13 @@ echo "using $ngpu gpus..."
...
@@ -10,6 +10,13 @@ echo "using $ngpu gpus..."
config_path
=
$1
config_path
=
$1
ckpt_name
=
$2
ckpt_name
=
$2
ips
=
$3
if
[
!
$ips
]
;
then
ips_config
=
else
ips_config
=
"--ips="
${
ips
}
fi
echo
"using
${
device
}
..."
echo
"using
${
device
}
..."
...
@@ -28,7 +35,7 @@ python3 -u ${BIN_DIR}/train.py \
...
@@ -28,7 +35,7 @@ python3 -u ${BIN_DIR}/train.py \
--output
exp/
${
ckpt_name
}
\
--output
exp/
${
ckpt_name
}
\
--seed
${
seed
}
--seed
${
seed
}
else
else
python3
-m
paddle.distributed.launch
--gpus
=
${
CUDA_VISIBLE_DEVICES
}
${
BIN_DIR
}
/train.py
\
python3
-m
paddle.distributed.launch
--gpus
=
${
CUDA_VISIBLE_DEVICES
}
${
ips_config
}
${
BIN_DIR
}
/train.py
\
--ngpu
${
ngpu
}
\
--ngpu
${
ngpu
}
\
--config
${
config_path
}
\
--config
${
config_path
}
\
--output
exp/
${
ckpt_name
}
\
--output
exp/
${
ckpt_name
}
\
...
...
examples/callcenter/asr1/run.sh
浏览文件 @
9aa868d1
...
@@ -6,6 +6,7 @@ gpus=0,1,2,3
...
@@ -6,6 +6,7 @@ gpus=0,1,2,3
stage
=
0
stage
=
0
stop_stage
=
50
stop_stage
=
50
conf_path
=
conf/conformer.yaml
conf_path
=
conf/conformer.yaml
ips
=
#xx.xx.xx.xx,xx.xx.xx.xx
decode_conf_path
=
conf/tuning/decode.yaml
decode_conf_path
=
conf/tuning/decode.yaml
avg_num
=
20
avg_num
=
20
...
@@ -22,7 +23,7 @@ fi
...
@@ -22,7 +23,7 @@ fi
if
[
${
stage
}
-le
1
]
&&
[
${
stop_stage
}
-ge
1
]
;
then
if
[
${
stage
}
-le
1
]
&&
[
${
stop_stage
}
-ge
1
]
;
then
# train model, all `ckpt` under `exp` dir
# train model, all `ckpt` under `exp` dir
CUDA_VISIBLE_DEVICES
=
${
gpus
}
./local/train.sh
${
conf_path
}
${
ckpt
}
CUDA_VISIBLE_DEVICES
=
${
gpus
}
./local/train.sh
${
conf_path
}
${
ckpt
}
${
ips
}
fi
fi
if
[
${
stage
}
-le
2
]
&&
[
${
stop_stage
}
-ge
2
]
;
then
if
[
${
stage
}
-le
2
]
&&
[
${
stop_stage
}
-ge
2
]
;
then
...
...
examples/librispeech/asr0/local/train.sh
浏览文件 @
9aa868d1
#!/bin/bash
#!/bin/bash
if
[
$#
!=
2
]
;
then
if
[
$#
-lt
2
]
&&
[
$#
-gt
3
]
;
then
echo
"usage: CUDA_VISIBLE_DEVICES=0
${
0
}
config_path ckpt_name"
echo
"usage: CUDA_VISIBLE_DEVICES=0
${
0
}
config_path ckpt_name
ips(optional)
"
exit
-1
exit
-1
fi
fi
...
@@ -10,6 +10,13 @@ echo "using $ngpu gpus..."
...
@@ -10,6 +10,13 @@ echo "using $ngpu gpus..."
config_path
=
$1
config_path
=
$1
ckpt_name
=
$2
ckpt_name
=
$2
ips
=
$3
if
[
!
$ips
]
;
then
ips_config
=
else
ips_config
=
"--ips="
${
ips
}
fi
mkdir
-p
exp
mkdir
-p
exp
...
@@ -26,7 +33,7 @@ python3 -u ${BIN_DIR}/train.py \
...
@@ -26,7 +33,7 @@ python3 -u ${BIN_DIR}/train.py \
--output
exp/
${
ckpt_name
}
\
--output
exp/
${
ckpt_name
}
\
--seed
${
seed
}
--seed
${
seed
}
else
else
python3
-m
paddle.distributed.launch
--gpus
=
${
CUDA_VISIBLE_DEVICES
}
${
BIN_DIR
}
/train.py
\
python3
-m
paddle.distributed.launch
--gpus
=
${
CUDA_VISIBLE_DEVICES
}
${
ips_config
}
${
BIN_DIR
}
/train.py
\
--ngpu
${
ngpu
}
\
--ngpu
${
ngpu
}
\
--config
${
config_path
}
\
--config
${
config_path
}
\
--output
exp/
${
ckpt_name
}
\
--output
exp/
${
ckpt_name
}
\
...
...
examples/librispeech/asr0/run.sh
浏览文件 @
9aa868d1
...
@@ -6,6 +6,7 @@ gpus=0,1,2,3
...
@@ -6,6 +6,7 @@ gpus=0,1,2,3
stage
=
0
stage
=
0
stop_stage
=
100
stop_stage
=
100
conf_path
=
conf/deepspeech2.yaml
conf_path
=
conf/deepspeech2.yaml
ips
=
#xx.xx.xx.xx,xx.xx.xx.xx
decode_conf_path
=
conf/tuning/decode.yaml
decode_conf_path
=
conf/tuning/decode.yaml
avg_num
=
5
avg_num
=
5
audio_file
=
data/demo_002_en.wav
audio_file
=
data/demo_002_en.wav
...
@@ -23,7 +24,7 @@ fi
...
@@ -23,7 +24,7 @@ fi
if
[
${
stage
}
-le
1
]
&&
[
${
stop_stage
}
-ge
1
]
;
then
if
[
${
stage
}
-le
1
]
&&
[
${
stop_stage
}
-ge
1
]
;
then
# train model, all `ckpt` under `exp` dir
# train model, all `ckpt` under `exp` dir
CUDA_VISIBLE_DEVICES
=
${
gpus
}
./local/train.sh
${
conf_path
}
${
ckpt
}
CUDA_VISIBLE_DEVICES
=
${
gpus
}
./local/train.sh
${
conf_path
}
${
ckpt
}
${
ips
}
fi
fi
if
[
${
stage
}
-le
2
]
&&
[
${
stop_stage
}
-ge
2
]
;
then
if
[
${
stage
}
-le
2
]
&&
[
${
stop_stage
}
-ge
2
]
;
then
...
...
examples/librispeech/asr1/local/train.sh
浏览文件 @
9aa868d1
#!/bin/bash
#!/bin/bash
if
[
$#
!=
2
]
;
then
if
[
$#
-lt
2
]
&&
[
$#
-gt
3
]
;
then
echo
"usage: CUDA_VISIBLE_DEVICES=0
${
0
}
config_path ckpt_name"
echo
"usage: CUDA_VISIBLE_DEVICES=0
${
0
}
config_path ckpt_name
ips(optional)
"
exit
-1
exit
-1
fi
fi
...
@@ -10,6 +10,13 @@ echo "using $ngpu gpus..."
...
@@ -10,6 +10,13 @@ echo "using $ngpu gpus..."
config_path
=
$1
config_path
=
$1
ckpt_name
=
$2
ckpt_name
=
$2
ips
=
$3
if
[
!
$ips
]
;
then
ips_config
=
else
ips_config
=
"--ips="
${
ips
}
fi
mkdir
-p
exp
mkdir
-p
exp
...
@@ -29,7 +36,7 @@ python3 -u ${BIN_DIR}/train.py \
...
@@ -29,7 +36,7 @@ python3 -u ${BIN_DIR}/train.py \
--output
exp/
${
ckpt_name
}
\
--output
exp/
${
ckpt_name
}
\
--seed
${
seed
}
--seed
${
seed
}
else
else
python3
-m
paddle.distributed.launch
--gpus
=
${
CUDA_VISIBLE_DEVICES
}
${
BIN_DIR
}
/train.py
\
python3
-m
paddle.distributed.launch
--gpus
=
${
CUDA_VISIBLE_DEVICES
}
${
ips_config
}
${
BIN_DIR
}
/train.py
\
--ngpu
${
ngpu
}
\
--ngpu
${
ngpu
}
\
--config
${
config_path
}
\
--config
${
config_path
}
\
--output
exp/
${
ckpt_name
}
\
--output
exp/
${
ckpt_name
}
\
...
...
examples/librispeech/asr1/run.sh
浏览文件 @
9aa868d1
...
@@ -8,6 +8,7 @@ gpus=0,1,2,3
...
@@ -8,6 +8,7 @@ gpus=0,1,2,3
stage
=
0
stage
=
0
stop_stage
=
50
stop_stage
=
50
conf_path
=
conf/transformer.yaml
conf_path
=
conf/transformer.yaml
ips
=
#xx.xx.xx.xx,xx.xx.xx.xx
decode_conf_path
=
conf/tuning/decode.yaml
decode_conf_path
=
conf/tuning/decode.yaml
avg_num
=
30
avg_num
=
30
audio_file
=
data/demo_002_en.wav
audio_file
=
data/demo_002_en.wav
...
@@ -25,7 +26,7 @@ fi
...
@@ -25,7 +26,7 @@ fi
if
[
${
stage
}
-le
1
]
&&
[
${
stop_stage
}
-ge
1
]
;
then
if
[
${
stage
}
-le
1
]
&&
[
${
stop_stage
}
-ge
1
]
;
then
# train model, all `ckpt` under `exp` dir
# train model, all `ckpt` under `exp` dir
CUDA_VISIBLE_DEVICES
=
${
gpus
}
./local/train.sh
${
conf_path
}
${
ckpt
}
CUDA_VISIBLE_DEVICES
=
${
gpus
}
./local/train.sh
${
conf_path
}
${
ckpt
}
${
ips
}
fi
fi
if
[
${
stage
}
-le
2
]
&&
[
${
stop_stage
}
-ge
2
]
;
then
if
[
${
stage
}
-le
2
]
&&
[
${
stop_stage
}
-ge
2
]
;
then
...
...
examples/librispeech/asr2/local/train.sh
浏览文件 @
9aa868d1
#!/bin/bash
#!/bin/bash
if
[
$#
!=
2
]
;
then
if
[
$#
-lt
2
]
&&
[
$#
-gt
3
]
;
then
echo
"usage: CUDA_VISIBLE_DEVICES=0
${
0
}
config_path ckpt_name"
echo
"usage: CUDA_VISIBLE_DEVICES=0
${
0
}
config_path ckpt_name
ips(optional)
"
exit
-1
exit
-1
fi
fi
...
@@ -10,6 +10,13 @@ echo "using $ngpu gpus..."
...
@@ -10,6 +10,13 @@ echo "using $ngpu gpus..."
config_path
=
$1
config_path
=
$1
ckpt_name
=
$2
ckpt_name
=
$2
ips
=
$3
if
[
!
$ips
]
;
then
ips_config
=
else
ips_config
=
"--ips="
${
ips
}
fi
mkdir
-p
exp
mkdir
-p
exp
...
@@ -27,7 +34,7 @@ python3 -u ${BIN_DIR}/train.py \
...
@@ -27,7 +34,7 @@ python3 -u ${BIN_DIR}/train.py \
--output
exp/
${
ckpt_name
}
\
--output
exp/
${
ckpt_name
}
\
--seed
${
seed
}
--seed
${
seed
}
else
else
python3
-m
paddle.distributed.launch
--gpus
=
${
CUDA_VISIBLE_DEVICES
}
${
BIN_DIR
}
/train.py
\
python3
-m
paddle.distributed.launch
--gpus
=
${
CUDA_VISIBLE_DEVICES
}
${
ips_config
}
${
BIN_DIR
}
/train.py
\
--ngpu
${
ngpu
}
\
--ngpu
${
ngpu
}
\
--model-name
u2_kaldi
\
--model-name
u2_kaldi
\
--config
${
config_path
}
\
--config
${
config_path
}
\
...
...
examples/librispeech/asr2/run.sh
浏览文件 @
9aa868d1
...
@@ -9,6 +9,7 @@ gpus=0,1,2,3,4,5,6,7
...
@@ -9,6 +9,7 @@ gpus=0,1,2,3,4,5,6,7
stage
=
0
stage
=
0
stop_stage
=
50
stop_stage
=
50
conf_path
=
conf/transformer.yaml
conf_path
=
conf/transformer.yaml
ips
=
#xx.xx.xx.xx,xx.xx.xx.xx
decode_conf_path
=
conf/decode/decode_base.yaml
decode_conf_path
=
conf/decode/decode_base.yaml
dict_path
=
data/lang_char/train_960_unigram5000_units.txt
dict_path
=
data/lang_char/train_960_unigram5000_units.txt
avg_num
=
10
avg_num
=
10
...
@@ -26,7 +27,7 @@ fi
...
@@ -26,7 +27,7 @@ fi
if
[
${
stage
}
-le
1
]
&&
[
${
stop_stage
}
-ge
1
]
;
then
if
[
${
stage
}
-le
1
]
&&
[
${
stop_stage
}
-ge
1
]
;
then
# train model, all `ckpt` under `exp` dir
# train model, all `ckpt` under `exp` dir
CUDA_VISIBLE_DEVICES
=
${
gpus
}
./local/train.sh
${
conf_path
}
${
ckpt
}
CUDA_VISIBLE_DEVICES
=
${
gpus
}
./local/train.sh
${
conf_path
}
${
ckpt
}
${
ips
}
fi
fi
if
[
${
stage
}
-le
2
]
&&
[
${
stop_stage
}
-ge
2
]
;
then
if
[
${
stage
}
-le
2
]
&&
[
${
stop_stage
}
-ge
2
]
;
then
...
...
examples/mustc/st1/local/train.sh
浏览文件 @
9aa868d1
#!/bin/bash
#!/bin/bash
if
[
$#
!=
3
]
;
then
if
[
$#
-lt
3
]
&&
[
$#
-gt
4
]
;
then
echo
"usage: CUDA_VISIBLE_DEVICES=0
${
0
}
config_path ckpt_name ckpt_path"
echo
"usage: CUDA_VISIBLE_DEVICES=0
${
0
}
config_path ckpt_name ckpt_path
ips(optional)
"
exit
-1
exit
-1
fi
fi
...
@@ -11,6 +11,13 @@ echo "using $ngpu gpus..."
...
@@ -11,6 +11,13 @@ echo "using $ngpu gpus..."
config_path
=
$1
config_path
=
$1
ckpt_name
=
$2
ckpt_name
=
$2
ckpt_path
=
$3
ckpt_path
=
$3
ips
=
$3
if
[
!
$ips
]
;
then
ips_config
=
else
ips_config
=
"--ips="
${
ips
}
fi
mkdir
-p
exp
mkdir
-p
exp
...
@@ -21,12 +28,21 @@ if [ ${seed} != 0 ]; then
...
@@ -21,12 +28,21 @@ if [ ${seed} != 0 ]; then
export
FLAGS_cudnn_deterministic
=
True
export
FLAGS_cudnn_deterministic
=
True
fi
fi
if
[
${
ngpu
}
==
0
]
;
then
python3
-u
${
BIN_DIR
}
/train.py
\
python3
-u
${
BIN_DIR
}
/train.py
\
--ngpu
${
ngpu
}
\
--ngpu
${
ngpu
}
\
--config
${
config_path
}
\
--config
${
config_path
}
\
--output
exp/
${
ckpt_name
}
\
--output
exp/
${
ckpt_name
}
\
--checkpoint_path
"
${
ckpt_path
}
"
\
--checkpoint_path
"
${
ckpt_path
}
"
\
--seed
${
seed
}
--seed
${
seed
}
else
python3
-m
paddle.distributed.launch
--gpus
=
${
CUDA_VISIBLE_DEVICES
}
${
ips_config
}
${
BIN_DIR
}
/train.py
\
--ngpu
${
ngpu
}
\
--config
${
config_path
}
\
--output
exp/
${
ckpt_name
}
\
--checkpoint_path
"
${
ckpt_path
}
"
\
--seed
${
seed
}
fi
if
[
${
seed
}
!=
0
]
;
then
if
[
${
seed
}
!=
0
]
;
then
unset
FLAGS_cudnn_deterministic
unset
FLAGS_cudnn_deterministic
...
...
examples/mustc/st1/run.sh
浏览文件 @
9aa868d1
...
@@ -7,6 +7,7 @@ gpus=0,1,2,3
...
@@ -7,6 +7,7 @@ gpus=0,1,2,3
stage
=
0
stage
=
0
stop_stage
=
3
stop_stage
=
3
conf_path
=
conf/transformer_es.yaml
conf_path
=
conf/transformer_es.yaml
ips
=
#xx.xx.xx.xx,xx.xx.xx.xx
decode_conf_path
=
conf/tuning/decode.yaml
decode_conf_path
=
conf/tuning/decode.yaml
must_c_path
=
must_c_path
=
lang
=
es
lang
=
es
...
@@ -25,7 +26,7 @@ fi
...
@@ -25,7 +26,7 @@ fi
if
[
${
stage
}
-le
1
]
&&
[
${
stop_stage
}
-ge
1
]
;
then
if
[
${
stage
}
-le
1
]
&&
[
${
stop_stage
}
-ge
1
]
;
then
# train model, all `ckpt` under `exp` dir
# train model, all `ckpt` under `exp` dir
CUDA_VISIBLE_DEVICES
=
${
gpus
}
./local/train.sh
${
conf_path
}
${
ckpt
}
"
${
ckpt_path
}
"
CUDA_VISIBLE_DEVICES
=
${
gpus
}
./local/train.sh
${
conf_path
}
${
ckpt
}
"
${
ckpt_path
}
"
${
ips
}
fi
fi
if
[
${
stage
}
-le
2
]
&&
[
${
stop_stage
}
-ge
2
]
;
then
if
[
${
stage
}
-le
2
]
&&
[
${
stop_stage
}
-ge
2
]
;
then
...
@@ -36,4 +37,4 @@ fi
...
@@ -36,4 +37,4 @@ fi
if
[
${
stage
}
-le
3
]
&&
[
${
stop_stage
}
-ge
3
]
;
then
if
[
${
stage
}
-le
3
]
&&
[
${
stop_stage
}
-ge
3
]
;
then
# test ckpt avg_n
# test ckpt avg_n
CUDA_VISIBLE_DEVICES
=
0 ./local/test.sh
${
conf_path
}
${
decode_conf_path
}
exp/
${
ckpt
}
/checkpoints/
${
avg_ckpt
}
${
lang
}
||
exit
-1
CUDA_VISIBLE_DEVICES
=
0 ./local/test.sh
${
conf_path
}
${
decode_conf_path
}
exp/
${
ckpt
}
/checkpoints/
${
avg_ckpt
}
${
lang
}
||
exit
-1
fi
fi
\ No newline at end of file
examples/ted_en_zh/st0/local/train.sh
浏览文件 @
9aa868d1
#!/bin/bash
#!/bin/bash
if
[
$#
!=
2
]
;
then
if
[
$#
-lt
2
]
&&
[
$#
-gt
3
]
;
then
echo
"usage: CUDA_VISIBLE_DEVICES=0
${
0
}
config_path ckpt_name"
echo
"usage: CUDA_VISIBLE_DEVICES=0
${
0
}
config_path ckpt_name
ips(optional)
"
exit
-1
exit
-1
fi
fi
...
@@ -10,6 +10,13 @@ echo "using $ngpu gpus..."
...
@@ -10,6 +10,13 @@ echo "using $ngpu gpus..."
config_path
=
$1
config_path
=
$1
ckpt_name
=
$2
ckpt_name
=
$2
ips
=
$3
if
[
!
$ips
]
;
then
ips_config
=
else
ips_config
=
"--ips="
${
ips
}
fi
mkdir
-p
exp
mkdir
-p
exp
...
@@ -26,7 +33,7 @@ python3 -u ${BIN_DIR}/train.py \
...
@@ -26,7 +33,7 @@ python3 -u ${BIN_DIR}/train.py \
--output
exp/
${
ckpt_name
}
\
--output
exp/
${
ckpt_name
}
\
--seed
${
seed
}
--seed
${
seed
}
else
else
python3
-m
paddle.distributed.launch
--gpus
=
${
CUDA_VISIBLE_DEVICES
}
${
BIN_DIR
}
/train.py
\
python3
-m
paddle.distributed.launch
--gpus
=
${
CUDA_VISIBLE_DEVICES
}
${
ips_config
}
${
BIN_DIR
}
/train.py
\
--ngpu
${
ngpu
}
\
--ngpu
${
ngpu
}
\
--config
${
config_path
}
\
--config
${
config_path
}
\
--output
exp/
${
ckpt_name
}
\
--output
exp/
${
ckpt_name
}
\
...
...
examples/ted_en_zh/st0/run.sh
浏览文件 @
9aa868d1
...
@@ -6,6 +6,7 @@ gpus=0,1,2,3
...
@@ -6,6 +6,7 @@ gpus=0,1,2,3
stage
=
0
stage
=
0
stop_stage
=
50
stop_stage
=
50
conf_path
=
conf/transformer_mtl_noam.yaml
conf_path
=
conf/transformer_mtl_noam.yaml
ips
=
#xx.xx.xx.xx,xx.xx.xx.xx
decode_conf_path
=
conf/tuning/decode.yaml
decode_conf_path
=
conf/tuning/decode.yaml
avg_num
=
5
avg_num
=
5
data_path
=
./TED_EnZh
# path to unzipped data
data_path
=
./TED_EnZh
# path to unzipped data
...
@@ -23,7 +24,7 @@ fi
...
@@ -23,7 +24,7 @@ fi
if
[
${
stage
}
-le
1
]
&&
[
${
stop_stage
}
-ge
1
]
;
then
if
[
${
stage
}
-le
1
]
&&
[
${
stop_stage
}
-ge
1
]
;
then
# train model, all `ckpt` under `exp` dir
# train model, all `ckpt` under `exp` dir
CUDA_VISIBLE_DEVICES
=
${
gpus
}
./local/train.sh
${
conf_path
}
${
ckpt
}
CUDA_VISIBLE_DEVICES
=
${
gpus
}
./local/train.sh
${
conf_path
}
${
ckpt
}
${
ips
}
fi
fi
if
[
${
stage
}
-le
2
]
&&
[
${
stop_stage
}
-ge
2
]
;
then
if
[
${
stage
}
-le
2
]
&&
[
${
stop_stage
}
-ge
2
]
;
then
...
...
examples/ted_en_zh/st1/local/train.sh
浏览文件 @
9aa868d1
#!/bin/bash
#!/bin/bash
if
[
$#
!=
3
]
;
then
if
[
$#
-lt
3
]
&&
[
$#
-gt
4
]
;
then
echo
"usage: CUDA_VISIBLE_DEVICES=0
${
0
}
config_path ckpt_name
ckpt_path
"
echo
"usage: CUDA_VISIBLE_DEVICES=0
${
0
}
config_path ckpt_name
ips(optional)
"
exit
-1
exit
-1
fi
fi
...
@@ -11,6 +11,15 @@ echo "using $ngpu gpus..."
...
@@ -11,6 +11,15 @@ echo "using $ngpu gpus..."
config_path
=
$1
config_path
=
$1
ckpt_name
=
$2
ckpt_name
=
$2
ckpt_path
=
$3
ckpt_path
=
$3
ips
=
$3
if
[
!
$ips
]
;
then
ips_config
=
else
ips_config
=
"--ips="
${
ips
}
fi
mkdir
-p
exp
mkdir
-p
exp
mkdir
-p
exp
...
@@ -28,7 +37,7 @@ python3 -u ${BIN_DIR}/train.py \
...
@@ -28,7 +37,7 @@ python3 -u ${BIN_DIR}/train.py \
--checkpoint_path
"
${
ckpt_path
}
"
\
--checkpoint_path
"
${
ckpt_path
}
"
\
--seed
${
seed
}
--seed
${
seed
}
else
else
python3
-m
paddle.distributed.launch
--gpus
=
${
CUDA_VISIBLE_DEVICES
}
${
BIN_DIR
}
/train.py
\
python3
-m
paddle.distributed.launch
--gpus
=
${
CUDA_VISIBLE_DEVICES
}
${
ips_config
}
${
BIN_DIR
}
/train.py
\
--ngpu
${
ngpu
}
\
--ngpu
${
ngpu
}
\
--config
${
config_path
}
\
--config
${
config_path
}
\
--output
exp/
${
ckpt_name
}
\
--output
exp/
${
ckpt_name
}
\
...
...
examples/ted_en_zh/st1/run.sh
浏览文件 @
9aa868d1
...
@@ -7,6 +7,7 @@ gpus=0,1,2,3
...
@@ -7,6 +7,7 @@ gpus=0,1,2,3
stage
=
1
stage
=
1
stop_stage
=
4
stop_stage
=
4
conf_path
=
conf/transformer_mtl_noam.yaml
conf_path
=
conf/transformer_mtl_noam.yaml
ips
=
#xx.xx.xx.xx,xx.xx.xx.xx
decode_conf_path
=
conf/tuning/decode.yaml
decode_conf_path
=
conf/tuning/decode.yaml
ckpt_path
=
# paddle.98 # (finetune from FAT-ST pretrained model)
ckpt_path
=
# paddle.98 # (finetune from FAT-ST pretrained model)
avg_num
=
5
avg_num
=
5
...
@@ -29,7 +30,7 @@ if [ ${stage} -le 1 ] && [ ${stop_stage} -ge 1 ]; then
...
@@ -29,7 +30,7 @@ if [ ${stage} -le 1 ] && [ ${stop_stage} -ge 1 ]; then
echo
"Finetune from Pretrained Model"
${
ckpt_path
}
echo
"Finetune from Pretrained Model"
${
ckpt_path
}
./local/download_pretrain.sh
||
exit
-1
./local/download_pretrain.sh
||
exit
-1
fi
fi
CUDA_VISIBLE_DEVICES
=
${
gpus
}
./local/train.sh
${
conf_path
}
${
ckpt
}
"
${
ckpt_path
}
"
CUDA_VISIBLE_DEVICES
=
${
gpus
}
./local/train.sh
${
conf_path
}
${
ckpt
}
"
${
ckpt_path
}
"
${
ips
}
fi
fi
if
[
${
stage
}
-le
2
]
&&
[
${
stop_stage
}
-ge
2
]
;
then
if
[
${
stage
}
-le
2
]
&&
[
${
stop_stage
}
-ge
2
]
;
then
...
...
examples/tiny/asr0/local/train.sh
浏览文件 @
9aa868d1
...
@@ -15,13 +15,20 @@ if [ ${seed} != 0 ]; then
...
@@ -15,13 +15,20 @@ if [ ${seed} != 0 ]; then
echo
"using seed
$seed
& FLAGS_cudnn_deterministic=True ..."
echo
"using seed
$seed
& FLAGS_cudnn_deterministic=True ..."
fi
fi
if
[
$#
!=
2
]
;
then
if
[
$#
-lt
2
]
&&
[
$#
-gt
3
]
;
then
echo
"usage: CUDA_VISIBLE_DEVICES=0
${
0
}
config_path ckpt_name"
echo
"usage: CUDA_VISIBLE_DEVICES=0
${
0
}
config_path ckpt_name
ips(optional)
"
exit
-1
exit
-1
fi
fi
config_path
=
$1
config_path
=
$1
ckpt_name
=
$2
ckpt_name
=
$2
ips
=
$3
if
[
!
$ips
]
;
then
ips_config
=
else
ips_config
=
"--ips="
${
ips
}
fi
mkdir
-p
exp
mkdir
-p
exp
...
@@ -33,7 +40,7 @@ python3 -u ${BIN_DIR}/train.py \
...
@@ -33,7 +40,7 @@ python3 -u ${BIN_DIR}/train.py \
--profiler-options
"
${
profiler_options
}
"
\
--profiler-options
"
${
profiler_options
}
"
\
--seed
${
seed
}
--seed
${
seed
}
else
else
python3
-m
paddle.distributed.launch
--gpus
=
${
CUDA_VISIBLE_DEVICES
}
${
BIN_DIR
}
/train.py
\
python3
-m
paddle.distributed.launch
--gpus
=
${
CUDA_VISIBLE_DEVICES
}
${
ips_config
}
${
BIN_DIR
}
/train.py
\
--ngpu
${
ngpu
}
\
--ngpu
${
ngpu
}
\
--config
${
config_path
}
\
--config
${
config_path
}
\
--output
exp/
${
ckpt_name
}
\
--output
exp/
${
ckpt_name
}
\
...
...
examples/tiny/asr0/run.sh
浏览文件 @
9aa868d1
...
@@ -2,10 +2,11 @@
...
@@ -2,10 +2,11 @@
set
-e
set
-e
source
path.sh
source
path.sh
gpus
=
0
gpus
=
4
stage
=
0
stage
=
0
stop_stage
=
100
stop_stage
=
100
conf_path
=
conf/deepspeech2.yaml
conf_path
=
conf/deepspeech2.yaml
ips
=
#xx.xx.xx.xx,xx.xx.xx.xx
decode_conf_path
=
conf/tuning/decode.yaml
decode_conf_path
=
conf/tuning/decode.yaml
avg_num
=
1
avg_num
=
1
source
${
MAIN_ROOT
}
/utils/parse_options.sh
||
exit
1
;
source
${
MAIN_ROOT
}
/utils/parse_options.sh
||
exit
1
;
...
@@ -21,7 +22,7 @@ fi
...
@@ -21,7 +22,7 @@ fi
if
[
${
stage
}
-le
1
]
&&
[
${
stop_stage
}
-ge
1
]
;
then
if
[
${
stage
}
-le
1
]
&&
[
${
stop_stage
}
-ge
1
]
;
then
# train model, all `ckpt` under `exp` dir
# train model, all `ckpt` under `exp` dir
CUDA_VISIBLE_DEVICES
=
${
gpus
}
./local/train.sh
${
conf_path
}
${
ckpt
}
CUDA_VISIBLE_DEVICES
=
${
gpus
}
./local/train.sh
${
conf_path
}
${
ckpt
}
${
ips
}
fi
fi
if
[
${
stage
}
-le
2
]
&&
[
${
stop_stage
}
-ge
2
]
;
then
if
[
${
stage
}
-le
2
]
&&
[
${
stop_stage
}
-ge
2
]
;
then
...
...
examples/tiny/asr1/local/train.sh
浏览文件 @
9aa868d1
...
@@ -17,13 +17,20 @@ if [ ${seed} != 0 ]; then
...
@@ -17,13 +17,20 @@ if [ ${seed} != 0 ]; then
echo
"using seed
$seed
& FLAGS_cudnn_deterministic=True ..."
echo
"using seed
$seed
& FLAGS_cudnn_deterministic=True ..."
fi
fi
if
[
$#
!=
2
]
;
then
if
[
$#
-lt
2
]
&&
[
$#
-gt
3
]
;
then
echo
"usage: CUDA_VISIBLE_DEVICES=0
${
0
}
config_path ckpt_name"
echo
"usage: CUDA_VISIBLE_DEVICES=0
${
0
}
config_path ckpt_name
ips(optional)
"
exit
-1
exit
-1
fi
fi
config_path
=
$1
config_path
=
$1
ckpt_name
=
$2
ckpt_name
=
$2
ips
=
$3
if
[
!
$ips
]
;
then
ips_config
=
else
ips_config
=
"--ips="
${
ips
}
fi
mkdir
-p
exp
mkdir
-p
exp
...
@@ -37,7 +44,7 @@ python3 -u ${BIN_DIR}/train.py \
...
@@ -37,7 +44,7 @@ python3 -u ${BIN_DIR}/train.py \
--benchmark-batch-size
${
benchmark_batch_size
}
\
--benchmark-batch-size
${
benchmark_batch_size
}
\
--benchmark-max-step
${
benchmark_max_step
}
--benchmark-max-step
${
benchmark_max_step
}
else
else
python3
-m
paddle.distributed.launch
--gpus
=
${
CUDA_VISIBLE_DEVICES
}
${
BIN_DIR
}
/train.py
\
python3
-m
paddle.distributed.launch
--gpus
=
${
CUDA_VISIBLE_DEVICES
}
${
ips_config
}
${
BIN_DIR
}
/train.py
\
--ngpu
${
ngpu
}
\
--ngpu
${
ngpu
}
\
--seed
${
seed
}
\
--seed
${
seed
}
\
--config
${
config_path
}
\
--config
${
config_path
}
\
...
...
examples/tiny/asr1/run.sh
浏览文件 @
9aa868d1
...
@@ -2,10 +2,11 @@
...
@@ -2,10 +2,11 @@
set
-e
set
-e
source
path.sh
source
path.sh
gpus
=
0
gpus
=
4
stage
=
0
stage
=
0
stop_stage
=
50
stop_stage
=
50
conf_path
=
conf/transformer.yaml
conf_path
=
conf/transformer.yaml
ips
=
#xx.xx.xx.xx,xx.xx.xx.xx
decode_conf_path
=
conf/tuning/decode.yaml
decode_conf_path
=
conf/tuning/decode.yaml
avg_num
=
1
avg_num
=
1
...
@@ -22,7 +23,7 @@ fi
...
@@ -22,7 +23,7 @@ fi
if
[
${
stage
}
-le
1
]
&&
[
${
stop_stage
}
-ge
1
]
;
then
if
[
${
stage
}
-le
1
]
&&
[
${
stop_stage
}
-ge
1
]
;
then
# train model, all `ckpt` under `exp` dir
# train model, all `ckpt` under `exp` dir
CUDA_VISIBLE_DEVICES
=
${
gpus
}
./local/train.sh
${
conf_path
}
${
ckpt
}
CUDA_VISIBLE_DEVICES
=
${
gpus
}
./local/train.sh
${
conf_path
}
${
ckpt
}
${
ips
}
fi
fi
if
[
${
stage
}
-le
2
]
&&
[
${
stop_stage
}
-ge
2
]
;
then
if
[
${
stage
}
-le
2
]
&&
[
${
stop_stage
}
-ge
2
]
;
then
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录