diff --git a/demos/audio_searching/src/operations/load.py b/demos/audio_searching/src/operations/load.py index 0d9edb7846198f4e0e9111b642c0c55d6ff2dbb9..d1ea00576ec4fff72e9d1554ca2514e284ec9169 100644 --- a/demos/audio_searching/src/operations/load.py +++ b/demos/audio_searching/src/operations/load.py @@ -26,8 +26,9 @@ def get_audios(path): """ supported_formats = [".wav", ".mp3", ".ogg", ".flac", ".m4a"] return [ - item for sublist in [[os.path.join(dir, file) for file in files] - for dir, _, files in list(os.walk(path))] + item + for sublist in [[os.path.join(dir, file) for file in files] + for dir, _, files in list(os.walk(path))] for item in sublist if os.path.splitext(item)[1] in supported_formats ] diff --git a/examples/aishell/asr0/local/train.sh b/examples/aishell/asr0/local/train.sh index 54c642b637d62c9ac0b6be54e1a8d125d8efaf96..102c051c164e9ca10c26e6baa4e7d40ca64a9292 100755 --- a/examples/aishell/asr0/local/train.sh +++ b/examples/aishell/asr0/local/train.sh @@ -20,12 +20,21 @@ if [ ${seed} != 0 ]; then export FLAGS_cudnn_deterministic=True fi +if [ ${ngpu} == 0 ]; then python3 -u ${BIN_DIR}/train.py \ --ngpu ${ngpu} \ --config ${config_path} \ --output exp/${ckpt_name} \ --model_type ${model_type} \ --seed ${seed} +else +python3 -m paddle.distributed.launch --gpus=${CUDA_VISIBLE_DEVICES} ${BIN_DIR}/train.py \ +--ngpu ${ngpu} \ +--config ${config_path} \ +--output exp/${ckpt_name} \ +--model_type ${model_type} \ +--seed ${seed} +fi if [ ${seed} != 0 ]; then unset FLAGS_cudnn_deterministic diff --git a/examples/aishell/asr1/local/train.sh b/examples/aishell/asr1/local/train.sh index 1c8593bddf5499f50026bd504470114c642055b0..5617f7efe5cca2004bd6b6aae8fc8291edf22f34 100755 --- a/examples/aishell/asr1/local/train.sh +++ b/examples/aishell/asr1/local/train.sh @@ -27,14 +27,25 @@ ckpt_name=$2 mkdir -p exp +if [ ${ngpu} == 0 ]; then python3 -u ${BIN_DIR}/train.py \ +--ngpu ${ngpu} \ --seed ${seed} \ +--config ${config_path} \ +--output exp/${ckpt_name} \ +--profiler-options "${profiler_options}" \ +--benchmark-batch-size ${benchmark_batch_size} \ +--benchmark-max-step ${benchmark_max_step} +else +python3 -m paddle.distributed.launch --gpus=${CUDA_VISIBLE_DEVICES} ${BIN_DIR}/train.py \ --ngpu ${ngpu} \ +--seed ${seed} \ --config ${config_path} \ --output exp/${ckpt_name} \ --profiler-options "${profiler_options}" \ --benchmark-batch-size ${benchmark_batch_size} \ --benchmark-max-step ${benchmark_max_step} +fi if [ ${seed} != 0 ]; then diff --git a/examples/callcenter/asr1/local/train.sh b/examples/callcenter/asr1/local/train.sh index 3e92fd1624db300b69dfe454cb9c0aee4eac8874..03b4588e30cb92c014f18d1509354f3caaa2b311 100755 --- a/examples/callcenter/asr1/local/train.sh +++ b/examples/callcenter/asr1/local/train.sh @@ -21,11 +21,19 @@ if [ ${seed} != 0 ]; then export FLAGS_cudnn_deterministic=True fi +if [ ${ngpu} == 0 ]; then python3 -u ${BIN_DIR}/train.py \ --ngpu ${ngpu} \ --config ${config_path} \ --output exp/${ckpt_name} \ --seed ${seed} +else +python3 -m paddle.distributed.launch --gpus=${CUDA_VISIBLE_DEVICES} ${BIN_DIR}/train.py \ +--ngpu ${ngpu} \ +--config ${config_path} \ +--output exp/${ckpt_name} \ +--seed ${seed} +fi if [ ${seed} != 0 ]; then unset FLAGS_cudnn_deterministic diff --git a/examples/librispeech/asr0/local/train.sh b/examples/librispeech/asr0/local/train.sh index 0479398ffe96cef6fd9db1f1ebb1b35ab1099a56..50d1d1922b54005a80d3b90b17e1ea6def5a438a 100755 --- a/examples/librispeech/asr0/local/train.sh +++ b/examples/librispeech/asr0/local/train.sh @@ -20,12 +20,21 @@ if [ ${seed} != 0 ]; then export FLAGS_cudnn_deterministic=True fi +if [ ${ngpu} == 0 ]; then python3 -u ${BIN_DIR}/train.py \ --ngpu ${ngpu} \ --config ${config_path} \ --output exp/${ckpt_name} \ --model_type ${model_type} \ --seed ${seed} +else +python3 -m paddle.distributed.launch --gpus=${CUDA_VISIBLE_DEVICES} ${BIN_DIR}/train.py \ +--ngpu ${ngpu} \ +--config ${config_path} \ +--output exp/${ckpt_name} \ +--model_type ${model_type} \ +--seed ${seed} +fi if [ ${seed} != 0 ]; then unset FLAGS_cudnn_deterministic diff --git a/examples/librispeech/asr1/local/train.sh b/examples/librispeech/asr1/local/train.sh index 275d3a4905654a685a65e1341d2e611bf215fefc..3860d85cf8c7a65810c2565a358aec2f537bde6b 100755 --- a/examples/librispeech/asr1/local/train.sh +++ b/examples/librispeech/asr1/local/train.sh @@ -22,11 +22,19 @@ fi # export FLAGS_cudnn_exhaustive_search=true # export FLAGS_conv_workspace_size_limit=4000 +if [ ${ngpu} == 0 ]; then python3 -u ${BIN_DIR}/train.py \ --ngpu ${ngpu} \ --config ${config_path} \ --output exp/${ckpt_name} \ --seed ${seed} +else +python3 -m paddle.distributed.launch --gpus=${CUDA_VISIBLE_DEVICES} ${BIN_DIR}/train.py \ +--ngpu ${ngpu} \ +--config ${config_path} \ +--output exp/${ckpt_name} \ +--seed ${seed} +fi if [ ${seed} != 0 ]; then unset FLAGS_cudnn_deterministic diff --git a/examples/librispeech/asr2/local/train.sh b/examples/librispeech/asr2/local/train.sh index 898391f4e4c03ed7e6e4290b65f8b4f33ff39a51..560424ea4fe5b79260d12b3678cae01a16df6226 100755 --- a/examples/librispeech/asr2/local/train.sh +++ b/examples/librispeech/asr2/local/train.sh @@ -19,12 +19,21 @@ if [ ${seed} != 0 ]; then export FLAGS_cudnn_deterministic=True fi +if [ ${ngpu} == 0 ]; then python3 -u ${BIN_DIR}/train.py \ +--ngpu ${ngpu} \ --model-name u2_kaldi \ +--config ${config_path} \ +--output exp/${ckpt_name} \ +--seed ${seed} +else +python3 -m paddle.distributed.launch --gpus=${CUDA_VISIBLE_DEVICES} ${BIN_DIR}/train.py \ --ngpu ${ngpu} \ +--model-name u2_kaldi \ --config ${config_path} \ --output exp/${ckpt_name} \ --seed ${seed} +fi if [ ${seed} != 0 ]; then unset FLAGS_cudnn_deterministic diff --git a/examples/ted_en_zh/st0/local/train.sh b/examples/ted_en_zh/st0/local/train.sh index e366376bb82da7dbefb2744cf1099949a5fadf06..ad00653b70b366addcab126f1e63fc64aec1b37c 100755 --- a/examples/ted_en_zh/st0/local/train.sh +++ b/examples/ted_en_zh/st0/local/train.sh @@ -19,11 +19,19 @@ if [ ${seed} != 0 ]; then export FLAGS_cudnn_deterministic=True fi +if [ ${ngpu} == 0 ]; then python3 -u ${BIN_DIR}/train.py \ --ngpu ${ngpu} \ --config ${config_path} \ --output exp/${ckpt_name} \ --seed ${seed} +else +python3 -m paddle.distributed.launch --gpus=${CUDA_VISIBLE_DEVICES} ${BIN_DIR}/train.py \ +--ngpu ${ngpu} \ +--config ${config_path} \ +--output exp/${ckpt_name} \ +--seed ${seed} +fi if [ ${seed} != 0 ]; then unset FLAGS_cudnn_deterministic diff --git a/examples/ted_en_zh/st1/local/train.sh b/examples/ted_en_zh/st1/local/train.sh index a8e4acaa01428c13ab2ba36b73c706119160c3bf..5da64e99c95af8e23c9f027080cddcd72c0a4d17 100755 --- a/examples/ted_en_zh/st1/local/train.sh +++ b/examples/ted_en_zh/st1/local/train.sh @@ -20,12 +20,21 @@ if [ ${seed} != 0 ]; then export FLAGS_cudnn_deterministic=True fi +if [ ${ngpu} == 0 ]; then python3 -u ${BIN_DIR}/train.py \ --ngpu ${ngpu} \ --config ${config_path} \ --output exp/${ckpt_name} \ --checkpoint_path "${ckpt_path}" \ --seed ${seed} +else +python3 -m paddle.distributed.launch --gpus=${CUDA_VISIBLE_DEVICES} ${BIN_DIR}/train.py \ +--ngpu ${ngpu} \ +--config ${config_path} \ +--output exp/${ckpt_name} \ +--checkpoint_path "${ckpt_path}" \ +--seed ${seed} +fi if [ ${seed} != 0 ]; then unset FLAGS_cudnn_deterministic @@ -36,4 +45,4 @@ if [ $? -ne 0 ]; then exit 1 fi -exit 0 \ No newline at end of file +exit 0 diff --git a/examples/timit/asr1/local/train.sh b/examples/timit/asr1/local/train.sh index 9b3fa17750262e775100313b5386d9150ae011b7..661407582cc2776e1676249cbe002bcef8c3c2e7 100755 --- a/examples/timit/asr1/local/train.sh +++ b/examples/timit/asr1/local/train.sh @@ -19,11 +19,19 @@ if [ ${seed} != 0 ]; then export FLAGS_cudnn_deterministic=True fi +if [ ${ngpu} == 0 ]; then python3 -u ${BIN_DIR}/train.py \ --ngpu ${ngpu} \ --config ${config_path} \ --output exp/${ckpt_name} \ --seed ${seed} +else +python3 -m paddle.distributed.launch --gpus=${CUDA_VISIBLE_DEVICES} ${BIN_DIR}/train.py \ +--ngpu ${ngpu} \ +--config ${config_path} \ +--output exp/${ckpt_name} \ +--seed ${seed} +fi if [ ${seed} != 0 ]; then unset FLAGS_cudnn_deterministic diff --git a/examples/tiny/asr0/local/train.sh b/examples/tiny/asr0/local/train.sh index a69b6ddb90ee9ade0320ed88f91a6d1ba25e93a2..9060be674e5804005de6b23c2575bd4888efef00 100755 --- a/examples/tiny/asr0/local/train.sh +++ b/examples/tiny/asr0/local/train.sh @@ -26,6 +26,7 @@ model_type=$3 mkdir -p exp +if [ ${ngpu} == 0 ]; then python3 -u ${BIN_DIR}/train.py \ --ngpu ${ngpu} \ --config ${config_path} \ @@ -33,6 +34,15 @@ python3 -u ${BIN_DIR}/train.py \ --model_type ${model_type} \ --profiler-options "${profiler_options}" \ --seed ${seed} +else +python3 -m paddle.distributed.launch --gpus=${CUDA_VISIBLE_DEVICES} ${BIN_DIR}/train.py \ +--ngpu ${ngpu} \ +--config ${config_path} \ +--output exp/${ckpt_name} \ +--model_type ${model_type} \ +--profiler-options "${profiler_options}" \ +--seed ${seed} +fi if [ ${seed} != 0 ]; then unset FLAGS_cudnn_deterministic diff --git a/examples/tiny/asr1/local/train.sh b/examples/tiny/asr1/local/train.sh index 1c8593bddf5499f50026bd504470114c642055b0..5617f7efe5cca2004bd6b6aae8fc8291edf22f34 100755 --- a/examples/tiny/asr1/local/train.sh +++ b/examples/tiny/asr1/local/train.sh @@ -27,14 +27,25 @@ ckpt_name=$2 mkdir -p exp +if [ ${ngpu} == 0 ]; then python3 -u ${BIN_DIR}/train.py \ +--ngpu ${ngpu} \ --seed ${seed} \ +--config ${config_path} \ +--output exp/${ckpt_name} \ +--profiler-options "${profiler_options}" \ +--benchmark-batch-size ${benchmark_batch_size} \ +--benchmark-max-step ${benchmark_max_step} +else +python3 -m paddle.distributed.launch --gpus=${CUDA_VISIBLE_DEVICES} ${BIN_DIR}/train.py \ --ngpu ${ngpu} \ +--seed ${seed} \ --config ${config_path} \ --output exp/${ckpt_name} \ --profiler-options "${profiler_options}" \ --benchmark-batch-size ${benchmark_batch_size} \ --benchmark-max-step ${benchmark_max_step} +fi if [ ${seed} != 0 ]; then diff --git a/paddlespeech/s2t/exps/deepspeech2/bin/train.py b/paddlespeech/s2t/exps/deepspeech2/bin/train.py index 09e8662f1ce75ff351d7981dc1c4382082f9b61a..e2c68d4be9ae2de9875c4a95d06c6542fd397ce3 100644 --- a/paddlespeech/s2t/exps/deepspeech2/bin/train.py +++ b/paddlespeech/s2t/exps/deepspeech2/bin/train.py @@ -12,7 +12,6 @@ # See the License for the specific language governing permissions and # limitations under the License. """Trainer for DeepSpeech2 model.""" -from paddle import distributed as dist from yacs.config import CfgNode from paddlespeech.s2t.exps.deepspeech2.model import DeepSpeech2Trainer as Trainer @@ -27,10 +26,7 @@ def main_sp(config, args): def main(config, args): - if args.ngpu > 1: - dist.spawn(main_sp, args=(config, args), nprocs=args.ngpu) - else: - main_sp(config, args) + main_sp(config, args) if __name__ == "__main__": diff --git a/paddlespeech/s2t/exps/u2/bin/train.py b/paddlespeech/s2t/exps/u2/bin/train.py index 53c223283f180397e69b6a5290a4572e5a76cc41..dc3a87c1618a508b1c4d785858e856af06b3614a 100644 --- a/paddlespeech/s2t/exps/u2/bin/train.py +++ b/paddlespeech/s2t/exps/u2/bin/train.py @@ -15,7 +15,6 @@ import cProfile import os -from paddle import distributed as dist from yacs.config import CfgNode from paddlespeech.s2t.exps.u2.model import U2Trainer as Trainer @@ -32,10 +31,7 @@ def main_sp(config, args): def main(config, args): - if args.ngpu > 1: - dist.spawn(main_sp, args=(config, args), nprocs=args.ngpu) - else: - main_sp(config, args) + main_sp(config, args) if __name__ == "__main__": diff --git a/paddlespeech/s2t/exps/u2_kaldi/bin/train.py b/paddlespeech/s2t/exps/u2_kaldi/bin/train.py index fcfc05a8aea553d154ebde5289560d9c818672ff..b11da7154732bd9b6e5267028f1efca977895e2d 100644 --- a/paddlespeech/s2t/exps/u2_kaldi/bin/train.py +++ b/paddlespeech/s2t/exps/u2_kaldi/bin/train.py @@ -15,7 +15,6 @@ import cProfile import os -from paddle import distributed as dist from yacs.config import CfgNode from paddlespeech.s2t.training.cli import default_argument_parser @@ -36,10 +35,7 @@ def main_sp(config, args): def main(config, args): - if args.ngpu > 1: - dist.spawn(main_sp, args=(config, args), nprocs=args.ngpu) - else: - main_sp(config, args) + main_sp(config, args) if __name__ == "__main__": diff --git a/paddlespeech/s2t/exps/u2_st/bin/train.py b/paddlespeech/s2t/exps/u2_st/bin/train.py index 4dec9ec8ae5da869f8b78282ea19c7a6964610db..574942e5af0f6cd5dccdcc0f83d46ffa5378b3a0 100644 --- a/paddlespeech/s2t/exps/u2_st/bin/train.py +++ b/paddlespeech/s2t/exps/u2_st/bin/train.py @@ -15,7 +15,6 @@ import cProfile import os -from paddle import distributed as dist from yacs.config import CfgNode from paddlespeech.s2t.exps.u2_st.model import U2STTrainer as Trainer @@ -30,10 +29,7 @@ def main_sp(config, args): def main(config, args): - if args.ngpu > 1: - dist.spawn(main_sp, args=(config, args), nprocs=args.ngpu) - else: - main_sp(config, args) + main_sp(config, args) if __name__ == "__main__": diff --git a/paddlespeech/s2t/io/sampler.py b/paddlespeech/s2t/io/sampler.py index ac55af1236f11d175e9e7717220980cf95c7d79b..89752bb9fdb98faecc0ccc5b8f59ea1f09efc8b6 100644 --- a/paddlespeech/s2t/io/sampler.py +++ b/paddlespeech/s2t/io/sampler.py @@ -51,7 +51,7 @@ def _batch_shuffle(indices, batch_size, epoch, clipped=False): """ rng = np.random.RandomState(epoch) shift_len = rng.randint(0, batch_size - 1) - batch_indices = list(zip(* [iter(indices[shift_len:])] * batch_size)) + batch_indices = list(zip(*[iter(indices[shift_len:])] * batch_size)) rng.shuffle(batch_indices) batch_indices = [item for batch in batch_indices for item in batch] assert clipped is False diff --git a/paddlespeech/t2s/modules/transformer/repeat.py b/paddlespeech/t2s/modules/transformer/repeat.py index 1e946adf7e469fd6c05c2a8c8d9e6f16f638524e..2073a78b9330201dba15b42badf77cee0caceab1 100644 --- a/paddlespeech/t2s/modules/transformer/repeat.py +++ b/paddlespeech/t2s/modules/transformer/repeat.py @@ -36,4 +36,4 @@ def repeat(N, fn): Returns: MultiSequential: Repeated model instance. """ - return MultiSequential(* [fn(n) for n in range(N)]) + return MultiSequential(*[fn(n) for n in range(N)])