From fe83adfbcbeb6b6d4cf05ce77c48068b6ab854b8 Mon Sep 17 00:00:00 2001 From: Hui Zhang Date: Thu, 25 Nov 2021 08:11:01 +0000 Subject: [PATCH] nproc to ngpu --- docs/source/tts/advanced_usage.md | 2 +- examples/aishell/asr0/local/export.sh | 2 +- examples/aishell/asr0/local/test.sh | 2 +- examples/aishell/asr0/local/test_export.sh | 2 +- examples/aishell/asr0/local/test_hub.sh | 2 +- examples/aishell/asr0/local/train.sh | 2 +- examples/aishell/asr1/local/align.sh | 2 +- examples/aishell/asr1/local/export.sh | 2 +- examples/aishell/asr1/local/test.sh | 4 ++-- examples/aishell/asr1/local/test_hub.sh | 2 +- examples/aishell/asr1/local/train.sh | 2 +- examples/callcenter/asr1/local/align.sh | 2 +- examples/callcenter/asr1/local/export.sh | 2 +- examples/callcenter/asr1/local/test.sh | 4 ++-- examples/callcenter/asr1/local/train.sh | 2 +- examples/librispeech/asr0/local/export.sh | 2 +- examples/librispeech/asr0/local/test.sh | 2 +- examples/librispeech/asr0/local/test_hub.sh | 2 +- examples/librispeech/asr0/local/train.sh | 2 +- examples/librispeech/asr1/local/align.sh | 2 +- examples/librispeech/asr1/local/export.sh | 2 +- examples/librispeech/asr1/local/test.sh | 6 +++--- examples/librispeech/asr1/local/test_hub.sh | 2 +- examples/librispeech/asr1/local/train.sh | 2 +- examples/librispeech/asr2/local/align.sh | 2 +- examples/librispeech/asr2/local/export.sh | 2 +- examples/librispeech/asr2/local/test.sh | 2 +- examples/librispeech/asr2/local/train.sh | 2 +- examples/other/1xt2x/aishell/local/test.sh | 2 +- examples/other/1xt2x/baidu_en8k/local/test.sh | 2 +- examples/other/1xt2x/librispeech/local/test.sh | 2 +- examples/other/1xt2x/src_deepspeech2x/test_model.py | 2 +- examples/ted_en_zh/st0/local/test.sh | 2 +- examples/ted_en_zh/st0/local/train.sh | 2 +- examples/ted_en_zh/st1/local/test.sh | 2 +- examples/ted_en_zh/st1/local/train_finetune.sh | 2 +- examples/timit/asr1/local/align.sh | 2 +- examples/timit/asr1/local/export.sh | 2 +- examples/timit/asr1/local/test.sh | 6 +++--- examples/timit/asr1/local/train.sh | 2 +- examples/tiny/asr0/local/export.sh | 2 +- examples/tiny/asr0/local/test.sh | 2 +- examples/tiny/asr0/local/train.sh | 2 +- examples/tiny/asr1/local/align.sh | 2 +- examples/tiny/asr1/local/export.sh | 2 +- examples/tiny/asr1/local/test.sh | 4 ++-- examples/tiny/asr1/local/train.sh | 2 +- examples/wenetspeech/asr1/local/test.sh | 4 ++-- paddlespeech/s2t/decoders/recog.py | 1 - paddlespeech/s2t/exps/deepspeech2/bin/test_hub.py | 2 +- paddlespeech/s2t/exps/deepspeech2/bin/train.py | 4 ++-- paddlespeech/s2t/exps/u2/bin/test_wav.py | 2 +- paddlespeech/s2t/exps/u2/bin/train.py | 4 ++-- paddlespeech/s2t/exps/u2/model.py | 4 ++-- paddlespeech/s2t/exps/u2_kaldi/bin/train.py | 4 ++-- paddlespeech/s2t/exps/u2_kaldi/model.py | 4 ++-- paddlespeech/s2t/exps/u2_st/bin/train.py | 4 ++-- paddlespeech/s2t/training/cli.py | 4 ++-- paddlespeech/s2t/training/trainer.py | 8 ++++---- .../t2s/exps/voice_cloning/tacotron2_ge2e/train.py | 2 +- paddlespeech/t2s/training/cli.py | 2 +- paddlespeech/t2s/training/experiment.py | 4 ++-- .../punctuation_restoration/chinese/local/test.sh | 8 +------- .../punctuation_restoration/chinese/local/train.sh | 8 +------- .../punctuation_restoration/english/local/test.sh | 7 +------ .../punctuation_restoration/english/local/train.sh | 8 +------- .../text/speechtask/punctuation_restoration/bin/train.py | 4 ++-- .../punctuation_restoration/training/trainer.py | 6 +++--- .../punctuation_restoration/utils/default_parser.py | 6 ++---- tests/benchmark/conformer/run_benchmark.sh | 2 +- tests/chains/ds2/ds2_params_lite_train_infer.txt | 4 ++-- tests/chains/ds2/ds2_params_whole_train_infer.txt | 4 ++-- tests/chains/ds2/speedyspeech_params_lite.txt | 2 +- tests/chains/ds2/test.sh | 2 +- .../speedyspeech/speedyspeech_params_lite_multi_gpu.txt | 2 +- .../speedyspeech/speedyspeech_params_lite_single_gpu.txt | 2 +- .../speedyspeech/speedyspeech_params_whole_multi_gpu.txt | 2 +- .../speedyspeech/speedyspeech_params_whole_single_gpu.txt | 2 +- 78 files changed, 102 insertions(+), 128 deletions(-) diff --git a/docs/source/tts/advanced_usage.md b/docs/source/tts/advanced_usage.md index 0540a1c3..04088964 100644 --- a/docs/source/tts/advanced_usage.md +++ b/docs/source/tts/advanced_usage.md @@ -290,7 +290,7 @@ The following is the basic `ArgumentParser`: 1. `--config` is used to support configuration file parsing, and the configuration file itself handles the unique options of each experiment. 2. `--train-metadata` is the path to the training data. 3. `--output-dir` is the dir to save the training results.(if there are checkpoints in `checkpoints/` of `--output-dir` , it's defalut to reload the newest checkpoint to train) -4. `--device` and `--nprocs` determine operation modes,`--device` specifies the type of running device, whether to run on `cpu` or `gpu`. `--nprocs` refers to the number of training processes. If `nprocs` > 1, it means that multi process parallel training is used. (Note: currently only GPU multi card multi process training is supported.) +4. `--ngpu` determine operation modes,`--ngpu` refers to the number of training processes. If `ngpu` > 0, it means using GPU, else CPU is used. Developers can refer to the examples in `examples` to write the default configuration file when adding new experiments. diff --git a/examples/aishell/asr0/local/export.sh b/examples/aishell/asr0/local/export.sh index a5e62c28..426a72fe 100755 --- a/examples/aishell/asr0/local/export.sh +++ b/examples/aishell/asr0/local/export.sh @@ -14,7 +14,7 @@ jit_model_export_path=$3 model_type=$4 python3 -u ${BIN_DIR}/export.py \ ---nproc ${ngpu} \ +--ngpu ${ngpu} \ --config ${config_path} \ --checkpoint_path ${ckpt_path_prefix} \ --export_path ${jit_model_export_path} \ diff --git a/examples/aishell/asr0/local/test.sh b/examples/aishell/asr0/local/test.sh index 2ae0740b..8cbff235 100755 --- a/examples/aishell/asr0/local/test.sh +++ b/examples/aishell/asr0/local/test.sh @@ -19,7 +19,7 @@ if [ $? -ne 0 ]; then fi python3 -u ${BIN_DIR}/test.py \ ---nproc ${ngpu} \ +--ngpu ${ngpu} \ --config ${config_path} \ --result_file ${ckpt_prefix}.rsl \ --checkpoint_path ${ckpt_prefix} \ diff --git a/examples/aishell/asr0/local/test_export.sh b/examples/aishell/asr0/local/test_export.sh index f0a30ce5..4f5e5c8b 100755 --- a/examples/aishell/asr0/local/test_export.sh +++ b/examples/aishell/asr0/local/test_export.sh @@ -19,7 +19,7 @@ if [ $? -ne 0 ]; then fi python3 -u ${BIN_DIR}/test_export.py \ ---nproc ${ngpu} \ +--ngpu ${ngpu} \ --config ${config_path} \ --result_file ${jit_model_export_path}.rsl \ --export_path ${jit_model_export_path} \ diff --git a/examples/aishell/asr0/local/test_hub.sh b/examples/aishell/asr0/local/test_hub.sh index d01496c4..b9cb7fa0 100755 --- a/examples/aishell/asr0/local/test_hub.sh +++ b/examples/aishell/asr0/local/test_hub.sh @@ -20,7 +20,7 @@ if [ $? -ne 0 ]; then fi python3 -u ${BIN_DIR}/test_hub.py \ ---nproc ${ngpu} \ +--ngpu ${ngpu} \ --config ${config_path} \ --result_file ${ckpt_prefix}.rsl \ --checkpoint_path ${ckpt_prefix} \ diff --git a/examples/aishell/asr0/local/train.sh b/examples/aishell/asr0/local/train.sh index edbf3383..54c642b6 100755 --- a/examples/aishell/asr0/local/train.sh +++ b/examples/aishell/asr0/local/train.sh @@ -21,7 +21,7 @@ if [ ${seed} != 0 ]; then fi python3 -u ${BIN_DIR}/train.py \ ---nproc ${ngpu} \ +--ngpu ${ngpu} \ --config ${config_path} \ --output exp/${ckpt_name} \ --model_type ${model_type} \ diff --git a/examples/aishell/asr1/local/align.sh b/examples/aishell/asr1/local/align.sh index 279461aa..c65d611c 100755 --- a/examples/aishell/asr1/local/align.sh +++ b/examples/aishell/asr1/local/align.sh @@ -18,7 +18,7 @@ mkdir -p ${output_dir} # align dump in `result_file` # .tier, .TextGrid dump in `dir of result_file` python3 -u ${BIN_DIR}/alignment.py \ ---nproc ${ngpu} \ +--ngpu ${ngpu} \ --config ${config_path} \ --result_file ${output_dir}/${type}.align \ --checkpoint_path ${ckpt_prefix} \ diff --git a/examples/aishell/asr1/local/export.sh b/examples/aishell/asr1/local/export.sh index b562218e..6b646b46 100755 --- a/examples/aishell/asr1/local/export.sh +++ b/examples/aishell/asr1/local/export.sh @@ -13,7 +13,7 @@ ckpt_path_prefix=$2 jit_model_export_path=$3 python3 -u ${BIN_DIR}/export.py \ ---nproc ${ngpu} \ +--ngpu ${ngpu} \ --config ${config_path} \ --checkpoint_path ${ckpt_path_prefix} \ --export_path ${jit_model_export_path} diff --git a/examples/aishell/asr1/local/test.sh b/examples/aishell/asr1/local/test.sh index 47bd2f63..da159de7 100755 --- a/examples/aishell/asr1/local/test.sh +++ b/examples/aishell/asr1/local/test.sh @@ -34,7 +34,7 @@ for type in attention ctc_greedy_search; do output_dir=${ckpt_prefix} mkdir -p ${output_dir} python3 -u ${BIN_DIR}/test.py \ - --nproc ${ngpu} \ + --ngpu ${ngpu} \ --config ${config_path} \ --result_file ${output_dir}/${type}.rsl \ --checkpoint_path ${ckpt_prefix} \ @@ -53,7 +53,7 @@ for type in ctc_prefix_beam_search attention_rescoring; do output_dir=${ckpt_prefix} mkdir -p ${output_dir} python3 -u ${BIN_DIR}/test.py \ - --nproc ${ngpu} \ + --ngpu ${ngpu} \ --config ${config_path} \ --result_file ${output_dir}/${type}.rsl \ --checkpoint_path ${ckpt_prefix} \ diff --git a/examples/aishell/asr1/local/test_hub.sh b/examples/aishell/asr1/local/test_hub.sh index 6e78ec78..0fd30901 100755 --- a/examples/aishell/asr1/local/test_hub.sh +++ b/examples/aishell/asr1/local/test_hub.sh @@ -29,7 +29,7 @@ for type in attention_rescoring; do output_dir=${ckpt_prefix} mkdir -p ${output_dir} python3 -u ${BIN_DIR}/test_hub.py \ - --nproc ${ngpu} \ + --ngpu ${ngpu} \ --config ${config_path} \ --result_file ${output_dir}/${type}.rsl \ --checkpoint_path ${ckpt_prefix} \ diff --git a/examples/aishell/asr1/local/train.sh b/examples/aishell/asr1/local/train.sh index 71af3a00..1c8593bd 100755 --- a/examples/aishell/asr1/local/train.sh +++ b/examples/aishell/asr1/local/train.sh @@ -29,7 +29,7 @@ mkdir -p exp python3 -u ${BIN_DIR}/train.py \ --seed ${seed} \ ---nproc ${ngpu} \ +--ngpu ${ngpu} \ --config ${config_path} \ --output exp/${ckpt_name} \ --profiler-options "${profiler_options}" \ diff --git a/examples/callcenter/asr1/local/align.sh b/examples/callcenter/asr1/local/align.sh index b679e2ea..681c77ed 100755 --- a/examples/callcenter/asr1/local/align.sh +++ b/examples/callcenter/asr1/local/align.sh @@ -23,7 +23,7 @@ mkdir -p ${output_dir} # align dump in `result_file` # .tier, .TextGrid dump in `dir of result_file` python3 -u ${BIN_DIR}/alignment.py \ ---nproc ${ngpu} \ +--ngpu ${ngpu} \ --config ${config_path} \ --result_file ${output_dir}/${type}.align \ --checkpoint_path ${ckpt_prefix} \ diff --git a/examples/callcenter/asr1/local/export.sh b/examples/callcenter/asr1/local/export.sh index d5f912e9..36de2d66 100755 --- a/examples/callcenter/asr1/local/export.sh +++ b/examples/callcenter/asr1/local/export.sh @@ -13,7 +13,7 @@ ckpt_path_prefix=$2 jit_model_export_path=$3 python3 -u ${BIN_DIR}/export.py \ ---nproc ${ngpu} \ +--ngpu ${ngpu} \ --config ${config_path} \ --checkpoint_path ${ckpt_path_prefix} \ --export_path ${jit_model_export_path} diff --git a/examples/callcenter/asr1/local/test.sh b/examples/callcenter/asr1/local/test.sh index 0aa99e19..fc43c5a2 100755 --- a/examples/callcenter/asr1/local/test.sh +++ b/examples/callcenter/asr1/local/test.sh @@ -28,7 +28,7 @@ for type in attention ctc_greedy_search; do output_dir=${ckpt_prefix} mkdir -p ${output_dir} python3 -u ${BIN_DIR}/test.py \ - --nproc ${ngpu} \ + --ngpu ${ngpu} \ --config ${config_path} \ --result_file ${output_dir}/${type}.rsl \ --checkpoint_path ${ckpt_prefix} \ @@ -47,7 +47,7 @@ for type in ctc_prefix_beam_search attention_rescoring; do output_dir=${ckpt_prefix} mkdir -p ${output_dir} python3 -u ${BIN_DIR}/test.py \ - --nproc ${ngpu} \ + --ngpu ${ngpu} \ --config ${config_path} \ --result_file ${output_dir}/${type}.rsl \ --checkpoint_path ${ckpt_prefix} \ diff --git a/examples/callcenter/asr1/local/train.sh b/examples/callcenter/asr1/local/train.sh index eb8f8662..3e92fd16 100755 --- a/examples/callcenter/asr1/local/train.sh +++ b/examples/callcenter/asr1/local/train.sh @@ -22,7 +22,7 @@ if [ ${seed} != 0 ]; then fi python3 -u ${BIN_DIR}/train.py \ ---nproc ${ngpu} \ +--ngpu ${ngpu} \ --config ${config_path} \ --output exp/${ckpt_name} \ --seed ${seed} diff --git a/examples/librispeech/asr0/local/export.sh b/examples/librispeech/asr0/local/export.sh index a5e62c28..426a72fe 100755 --- a/examples/librispeech/asr0/local/export.sh +++ b/examples/librispeech/asr0/local/export.sh @@ -14,7 +14,7 @@ jit_model_export_path=$3 model_type=$4 python3 -u ${BIN_DIR}/export.py \ ---nproc ${ngpu} \ +--ngpu ${ngpu} \ --config ${config_path} \ --checkpoint_path ${ckpt_path_prefix} \ --export_path ${jit_model_export_path} \ diff --git a/examples/librispeech/asr0/local/test.sh b/examples/librispeech/asr0/local/test.sh index 4d00f30b..a627ef72 100755 --- a/examples/librispeech/asr0/local/test.sh +++ b/examples/librispeech/asr0/local/test.sh @@ -19,7 +19,7 @@ if [ $? -ne 0 ]; then fi python3 -u ${BIN_DIR}/test.py \ ---nproc ${ngpu} \ +--ngpu ${ngpu} \ --config ${config_path} \ --result_file ${ckpt_prefix}.rsl \ --checkpoint_path ${ckpt_prefix} \ diff --git a/examples/librispeech/asr0/local/test_hub.sh b/examples/librispeech/asr0/local/test_hub.sh index 2e32f24a..fd9a603a 100755 --- a/examples/librispeech/asr0/local/test_hub.sh +++ b/examples/librispeech/asr0/local/test_hub.sh @@ -20,7 +20,7 @@ if [ $? -ne 0 ]; then fi python3 -u ${BIN_DIR}/test_hub.py \ ---nproc ${ngpu} \ +--ngpu ${ngpu} \ --config ${config_path} \ --result_file ${ckpt_prefix}.rsl \ --checkpoint_path ${ckpt_prefix} \ diff --git a/examples/librispeech/asr0/local/train.sh b/examples/librispeech/asr0/local/train.sh index 519df7fe..0479398f 100755 --- a/examples/librispeech/asr0/local/train.sh +++ b/examples/librispeech/asr0/local/train.sh @@ -21,7 +21,7 @@ if [ ${seed} != 0 ]; then fi python3 -u ${BIN_DIR}/train.py \ ---nproc ${ngpu} \ +--ngpu ${ngpu} \ --config ${config_path} \ --output exp/${ckpt_name} \ --model_type ${model_type} \ diff --git a/examples/librispeech/asr1/local/align.sh b/examples/librispeech/asr1/local/align.sh index 279461aa..c65d611c 100755 --- a/examples/librispeech/asr1/local/align.sh +++ b/examples/librispeech/asr1/local/align.sh @@ -18,7 +18,7 @@ mkdir -p ${output_dir} # align dump in `result_file` # .tier, .TextGrid dump in `dir of result_file` python3 -u ${BIN_DIR}/alignment.py \ ---nproc ${ngpu} \ +--ngpu ${ngpu} \ --config ${config_path} \ --result_file ${output_dir}/${type}.align \ --checkpoint_path ${ckpt_prefix} \ diff --git a/examples/librispeech/asr1/local/export.sh b/examples/librispeech/asr1/local/export.sh index b562218e..6b646b46 100755 --- a/examples/librispeech/asr1/local/export.sh +++ b/examples/librispeech/asr1/local/export.sh @@ -13,7 +13,7 @@ ckpt_path_prefix=$2 jit_model_export_path=$3 python3 -u ${BIN_DIR}/export.py \ ---nproc ${ngpu} \ +--ngpu ${ngpu} \ --config ${config_path} \ --checkpoint_path ${ckpt_path_prefix} \ --export_path ${jit_model_export_path} diff --git a/examples/librispeech/asr1/local/test.sh b/examples/librispeech/asr1/local/test.sh index ceaa77cf..aa06132e 100755 --- a/examples/librispeech/asr1/local/test.sh +++ b/examples/librispeech/asr1/local/test.sh @@ -50,7 +50,7 @@ if [ ${stage} -le 0 ] && [ ${stop_stage} -ge 0 ]; then batch_size=64 fi python3 -u ${BIN_DIR}/test.py \ - --nproc ${ngpu} \ + --ngpu ${ngpu} \ --config ${config_path} \ --result_file ${ckpt_prefix}.${type}.rsl \ --checkpoint_path ${ckpt_prefix} \ @@ -74,7 +74,7 @@ for type in ctc_greedy_search; do batch_size=64 fi python3 -u ${BIN_DIR}/test.py \ - --nproc ${ngpu} \ + --ngpu ${ngpu} \ --config ${config_path} \ --result_file ${ckpt_prefix}.${type}.rsl \ --checkpoint_path ${ckpt_prefix} \ @@ -94,7 +94,7 @@ for type in ctc_prefix_beam_search attention_rescoring; do echo "decoding ${type}" batch_size=1 python3 -u ${BIN_DIR}/test.py \ - --nproc ${ngpu} \ + --ngpu ${ngpu} \ --config ${config_path} \ --result_file ${ckpt_prefix}.${type}.rsl \ --checkpoint_path ${ckpt_prefix} \ diff --git a/examples/librispeech/asr1/local/test_hub.sh b/examples/librispeech/asr1/local/test_hub.sh index dcf242e9..46bd8bc2 100755 --- a/examples/librispeech/asr1/local/test_hub.sh +++ b/examples/librispeech/asr1/local/test_hub.sh @@ -36,7 +36,7 @@ for type in attention_rescoring; do output_dir=${ckpt_prefix} mkdir -p ${output_dir} python3 -u ${BIN_DIR}/test_hub.py \ - --nproc ${ngpu} \ + --ngpu ${ngpu} \ --config ${config_path} \ --result_file ${output_dir}/${type}.rsl \ --checkpoint_path ${ckpt_prefix} \ diff --git a/examples/librispeech/asr1/local/train.sh b/examples/librispeech/asr1/local/train.sh index 8f92c646..275d3a49 100755 --- a/examples/librispeech/asr1/local/train.sh +++ b/examples/librispeech/asr1/local/train.sh @@ -23,7 +23,7 @@ fi # export FLAGS_conv_workspace_size_limit=4000 python3 -u ${BIN_DIR}/train.py \ ---nproc ${ngpu} \ +--ngpu ${ngpu} \ --config ${config_path} \ --output exp/${ckpt_name} \ --seed ${seed} diff --git a/examples/librispeech/asr2/local/align.sh b/examples/librispeech/asr2/local/align.sh index b45f4a0f..626c3574 100755 --- a/examples/librispeech/asr2/local/align.sh +++ b/examples/librispeech/asr2/local/align.sh @@ -22,7 +22,7 @@ python3 -u ${BIN_DIR}/test.py \ --model-name 'u2_kaldi' \ --run-mode 'align' \ --dict-path ${dict_path} \ ---nproc ${ngpu} \ +--ngpu ${ngpu} \ --config ${config_path} \ --result-file ${output_dir}/${type}.align \ --checkpoint_path ${ckpt_prefix} \ diff --git a/examples/librispeech/asr2/local/export.sh b/examples/librispeech/asr2/local/export.sh index 9c66dc62..1bdce16c 100755 --- a/examples/librispeech/asr2/local/export.sh +++ b/examples/librispeech/asr2/local/export.sh @@ -15,7 +15,7 @@ jit_model_export_path=$3 python3 -u ${BIN_DIR}/test.py \ --model-name 'u2_kaldi' \ --run-mode 'export' \ ---nproc ${ngpu} \ +--ngpu ${ngpu} \ --config ${config_path} \ --checkpoint_path ${ckpt_path_prefix} \ --export_path ${jit_model_export_path} diff --git a/examples/librispeech/asr2/local/test.sh b/examples/librispeech/asr2/local/test.sh index 23670f74..d210f2a8 100755 --- a/examples/librispeech/asr2/local/test.sh +++ b/examples/librispeech/asr2/local/test.sh @@ -76,7 +76,7 @@ for dmethd in attention ctc_greedy_search ctc_prefix_beam_search attention_resco python3 -u ${BIN_DIR}/test.py \ --model-name u2_kaldi \ --run-mode test \ - --nproc ${ngpu} \ + --ngpu ${ngpu} \ --dict-path ${dict} \ --config ${config_path} \ --checkpoint_path ${ckpt_prefix} \ diff --git a/examples/librispeech/asr2/local/train.sh b/examples/librispeech/asr2/local/train.sh index 33b46c20..898391f4 100755 --- a/examples/librispeech/asr2/local/train.sh +++ b/examples/librispeech/asr2/local/train.sh @@ -21,7 +21,7 @@ fi python3 -u ${BIN_DIR}/train.py \ --model-name u2_kaldi \ ---nproc ${ngpu} \ +--ngpu ${ngpu} \ --config ${config_path} \ --output exp/${ckpt_name} \ --seed ${seed} diff --git a/examples/other/1xt2x/aishell/local/test.sh b/examples/other/1xt2x/aishell/local/test.sh index 2ae0740b..8cbff235 100755 --- a/examples/other/1xt2x/aishell/local/test.sh +++ b/examples/other/1xt2x/aishell/local/test.sh @@ -19,7 +19,7 @@ if [ $? -ne 0 ]; then fi python3 -u ${BIN_DIR}/test.py \ ---nproc ${ngpu} \ +--ngpu ${ngpu} \ --config ${config_path} \ --result_file ${ckpt_prefix}.rsl \ --checkpoint_path ${ckpt_prefix} \ diff --git a/examples/other/1xt2x/baidu_en8k/local/test.sh b/examples/other/1xt2x/baidu_en8k/local/test.sh index 4d00f30b..a627ef72 100755 --- a/examples/other/1xt2x/baidu_en8k/local/test.sh +++ b/examples/other/1xt2x/baidu_en8k/local/test.sh @@ -19,7 +19,7 @@ if [ $? -ne 0 ]; then fi python3 -u ${BIN_DIR}/test.py \ ---nproc ${ngpu} \ +--ngpu ${ngpu} \ --config ${config_path} \ --result_file ${ckpt_prefix}.rsl \ --checkpoint_path ${ckpt_prefix} \ diff --git a/examples/other/1xt2x/librispeech/local/test.sh b/examples/other/1xt2x/librispeech/local/test.sh index 4d00f30b..a627ef72 100755 --- a/examples/other/1xt2x/librispeech/local/test.sh +++ b/examples/other/1xt2x/librispeech/local/test.sh @@ -19,7 +19,7 @@ if [ $? -ne 0 ]; then fi python3 -u ${BIN_DIR}/test.py \ ---nproc ${ngpu} \ +--ngpu ${ngpu} \ --config ${config_path} \ --result_file ${ckpt_prefix}.rsl \ --checkpoint_path ${ckpt_prefix} \ diff --git a/examples/other/1xt2x/src_deepspeech2x/test_model.py b/examples/other/1xt2x/src_deepspeech2x/test_model.py index a9afc631..82e190d8 100644 --- a/examples/other/1xt2x/src_deepspeech2x/test_model.py +++ b/examples/other/1xt2x/src_deepspeech2x/test_model.py @@ -403,7 +403,7 @@ class DeepSpeech2Tester(DeepSpeech2Trainer): def setup(self): """Setup the experiment. """ - paddle.set_device('gpu' if self.args.nprocs > 0 else 'cpu') + paddle.set_device('gpu' if self.args.ngpu > 0 else 'cpu') self.setup_output_dir() self.setup_checkpointer() diff --git a/examples/ted_en_zh/st0/local/test.sh b/examples/ted_en_zh/st0/local/test.sh index 7235c6f9..a9b18dd9 100755 --- a/examples/ted_en_zh/st0/local/test.sh +++ b/examples/ted_en_zh/st0/local/test.sh @@ -15,7 +15,7 @@ for type in fullsentence; do echo "decoding ${type}" batch_size=32 python3 -u ${BIN_DIR}/test.py \ - --nproc ${ngpu} \ + --ngpu ${ngpu} \ --config ${config_path} \ --result_file ${ckpt_prefix}.${type}.rsl \ --checkpoint_path ${ckpt_prefix} \ diff --git a/examples/ted_en_zh/st0/local/train.sh b/examples/ted_en_zh/st0/local/train.sh index e5fd19dd..e366376b 100755 --- a/examples/ted_en_zh/st0/local/train.sh +++ b/examples/ted_en_zh/st0/local/train.sh @@ -20,7 +20,7 @@ if [ ${seed} != 0 ]; then fi python3 -u ${BIN_DIR}/train.py \ ---nproc ${ngpu} \ +--ngpu ${ngpu} \ --config ${config_path} \ --output exp/${ckpt_name} \ --seed ${seed} diff --git a/examples/ted_en_zh/st1/local/test.sh b/examples/ted_en_zh/st1/local/test.sh index 7235c6f9..a9b18dd9 100755 --- a/examples/ted_en_zh/st1/local/test.sh +++ b/examples/ted_en_zh/st1/local/test.sh @@ -15,7 +15,7 @@ for type in fullsentence; do echo "decoding ${type}" batch_size=32 python3 -u ${BIN_DIR}/test.py \ - --nproc ${ngpu} \ + --ngpu ${ngpu} \ --config ${config_path} \ --result_file ${ckpt_prefix}.${type}.rsl \ --checkpoint_path ${ckpt_prefix} \ diff --git a/examples/ted_en_zh/st1/local/train_finetune.sh b/examples/ted_en_zh/st1/local/train_finetune.sh index 36701121..e54c7fff 100755 --- a/examples/ted_en_zh/st1/local/train_finetune.sh +++ b/examples/ted_en_zh/st1/local/train_finetune.sh @@ -21,7 +21,7 @@ if [ ${seed} != 0 ]; then fi python3 -u ${BIN_DIR}/train.py \ ---nproc ${ngpu} \ +--ngpu ${ngpu} \ --config ${config_path} \ --output exp/${ckpt_name} \ --checkpoint_path ${ckpt_path} \ diff --git a/examples/timit/asr1/local/align.sh b/examples/timit/asr1/local/align.sh index 279461aa..c65d611c 100755 --- a/examples/timit/asr1/local/align.sh +++ b/examples/timit/asr1/local/align.sh @@ -18,7 +18,7 @@ mkdir -p ${output_dir} # align dump in `result_file` # .tier, .TextGrid dump in `dir of result_file` python3 -u ${BIN_DIR}/alignment.py \ ---nproc ${ngpu} \ +--ngpu ${ngpu} \ --config ${config_path} \ --result_file ${output_dir}/${type}.align \ --checkpoint_path ${ckpt_prefix} \ diff --git a/examples/timit/asr1/local/export.sh b/examples/timit/asr1/local/export.sh index b562218e..6b646b46 100755 --- a/examples/timit/asr1/local/export.sh +++ b/examples/timit/asr1/local/export.sh @@ -13,7 +13,7 @@ ckpt_path_prefix=$2 jit_model_export_path=$3 python3 -u ${BIN_DIR}/export.py \ ---nproc ${ngpu} \ +--ngpu ${ngpu} \ --config ${config_path} \ --checkpoint_path ${ckpt_path_prefix} \ --export_path ${jit_model_export_path} diff --git a/examples/timit/asr1/local/test.sh b/examples/timit/asr1/local/test.sh index 575bff57..08ee0e36 100755 --- a/examples/timit/asr1/local/test.sh +++ b/examples/timit/asr1/local/test.sh @@ -41,7 +41,7 @@ if [ ${stage} -le 0 ] && [ ${stop_stage} -ge 0 ]; then batch_size=64 fi python3 -u ${BIN_DIR}/test.py \ - --nproc ${ngpu} \ + --ngpu ${ngpu} \ --config ${config_path} \ --result_file ${ckpt_prefix}.${type}.rsl \ --checkpoint_path ${ckpt_prefix} \ @@ -61,7 +61,7 @@ if [ ${stage} -le 1 ] && [ ${stop_stage} -ge 1 ]; then echo "decoding ${type}" batch_size=1 python3 -u ${BIN_DIR}/test.py \ - --nproc ${ngpu} \ + --ngpu ${ngpu} \ --config ${config_path} \ --result_file ${ckpt_prefix}.${type}.rsl \ --checkpoint_path ${ckpt_prefix} \ @@ -80,7 +80,7 @@ if [ ${stage} -le 2 ] && [ ${stop_stage} -ge 2 ]; then echo "decoding ${type}" batch_size=1 python3 -u ${BIN_DIR}/test.py \ - --nproc ${ngpu} \ + --ngpu ${ngpu} \ --config ${config_path} \ --result_file ${ckpt_prefix}.${type}.rsl \ --checkpoint_path ${ckpt_prefix} \ diff --git a/examples/timit/asr1/local/train.sh b/examples/timit/asr1/local/train.sh index 89a64327..9b3fa177 100755 --- a/examples/timit/asr1/local/train.sh +++ b/examples/timit/asr1/local/train.sh @@ -20,7 +20,7 @@ if [ ${seed} != 0 ]; then fi python3 -u ${BIN_DIR}/train.py \ ---nproc ${ngpu} \ +--ngpu ${ngpu} \ --config ${config_path} \ --output exp/${ckpt_name} \ --seed ${seed} diff --git a/examples/tiny/asr0/local/export.sh b/examples/tiny/asr0/local/export.sh index a5e62c28..426a72fe 100755 --- a/examples/tiny/asr0/local/export.sh +++ b/examples/tiny/asr0/local/export.sh @@ -14,7 +14,7 @@ jit_model_export_path=$3 model_type=$4 python3 -u ${BIN_DIR}/export.py \ ---nproc ${ngpu} \ +--ngpu ${ngpu} \ --config ${config_path} \ --checkpoint_path ${ckpt_path_prefix} \ --export_path ${jit_model_export_path} \ diff --git a/examples/tiny/asr0/local/test.sh b/examples/tiny/asr0/local/test.sh index 4d00f30b..a627ef72 100755 --- a/examples/tiny/asr0/local/test.sh +++ b/examples/tiny/asr0/local/test.sh @@ -19,7 +19,7 @@ if [ $? -ne 0 ]; then fi python3 -u ${BIN_DIR}/test.py \ ---nproc ${ngpu} \ +--ngpu ${ngpu} \ --config ${config_path} \ --result_file ${ckpt_prefix}.rsl \ --checkpoint_path ${ckpt_prefix} \ diff --git a/examples/tiny/asr0/local/train.sh b/examples/tiny/asr0/local/train.sh index 5b87780a..a69b6ddb 100755 --- a/examples/tiny/asr0/local/train.sh +++ b/examples/tiny/asr0/local/train.sh @@ -27,7 +27,7 @@ model_type=$3 mkdir -p exp python3 -u ${BIN_DIR}/train.py \ ---nproc ${ngpu} \ +--ngpu ${ngpu} \ --config ${config_path} \ --output exp/${ckpt_name} \ --model_type ${model_type} \ diff --git a/examples/tiny/asr1/local/align.sh b/examples/tiny/asr1/local/align.sh index 279461aa..c65d611c 100755 --- a/examples/tiny/asr1/local/align.sh +++ b/examples/tiny/asr1/local/align.sh @@ -18,7 +18,7 @@ mkdir -p ${output_dir} # align dump in `result_file` # .tier, .TextGrid dump in `dir of result_file` python3 -u ${BIN_DIR}/alignment.py \ ---nproc ${ngpu} \ +--ngpu ${ngpu} \ --config ${config_path} \ --result_file ${output_dir}/${type}.align \ --checkpoint_path ${ckpt_prefix} \ diff --git a/examples/tiny/asr1/local/export.sh b/examples/tiny/asr1/local/export.sh index b562218e..6b646b46 100755 --- a/examples/tiny/asr1/local/export.sh +++ b/examples/tiny/asr1/local/export.sh @@ -13,7 +13,7 @@ ckpt_path_prefix=$2 jit_model_export_path=$3 python3 -u ${BIN_DIR}/export.py \ ---nproc ${ngpu} \ +--ngpu ${ngpu} \ --config ${config_path} \ --checkpoint_path ${ckpt_path_prefix} \ --export_path ${jit_model_export_path} diff --git a/examples/tiny/asr1/local/test.sh b/examples/tiny/asr1/local/test.sh index 34088ce9..190bacff 100755 --- a/examples/tiny/asr1/local/test.sh +++ b/examples/tiny/asr1/local/test.sh @@ -31,7 +31,7 @@ for type in attention ctc_greedy_search; do batch_size=64 fi python3 -u ${BIN_DIR}/test.py \ - --nproc ${ngpu} \ + --ngpu ${ngpu} \ --config ${config_path} \ --result_file ${ckpt_prefix}.${type}.rsl \ --checkpoint_path ${ckpt_prefix} \ @@ -48,7 +48,7 @@ for type in ctc_prefix_beam_search attention_rescoring; do echo "decoding ${type}" batch_size=1 python3 -u ${BIN_DIR}/test.py \ - --nproc ${ngpu} \ + --ngpu ${ngpu} \ --config ${config_path} \ --result_file ${ckpt_prefix}.${type}.rsl \ --checkpoint_path ${ckpt_prefix} \ diff --git a/examples/tiny/asr1/local/train.sh b/examples/tiny/asr1/local/train.sh index 71af3a00..1c8593bd 100755 --- a/examples/tiny/asr1/local/train.sh +++ b/examples/tiny/asr1/local/train.sh @@ -29,7 +29,7 @@ mkdir -p exp python3 -u ${BIN_DIR}/train.py \ --seed ${seed} \ ---nproc ${ngpu} \ +--ngpu ${ngpu} \ --config ${config_path} \ --output exp/${ckpt_name} \ --profiler-options "${profiler_options}" \ diff --git a/examples/wenetspeech/asr1/local/test.sh b/examples/wenetspeech/asr1/local/test.sh index 47bd2f63..da159de7 100755 --- a/examples/wenetspeech/asr1/local/test.sh +++ b/examples/wenetspeech/asr1/local/test.sh @@ -34,7 +34,7 @@ for type in attention ctc_greedy_search; do output_dir=${ckpt_prefix} mkdir -p ${output_dir} python3 -u ${BIN_DIR}/test.py \ - --nproc ${ngpu} \ + --ngpu ${ngpu} \ --config ${config_path} \ --result_file ${output_dir}/${type}.rsl \ --checkpoint_path ${ckpt_prefix} \ @@ -53,7 +53,7 @@ for type in ctc_prefix_beam_search attention_rescoring; do output_dir=${ckpt_prefix} mkdir -p ${output_dir} python3 -u ${BIN_DIR}/test.py \ - --nproc ${ngpu} \ + --ngpu ${ngpu} \ --config ${config_path} \ --result_file ${output_dir}/${type}.rsl \ --checkpoint_path ${ckpt_prefix} \ diff --git a/paddlespeech/s2t/decoders/recog.py b/paddlespeech/s2t/decoders/recog.py index d9324ca0..3e9939f0 100644 --- a/paddlespeech/s2t/decoders/recog.py +++ b/paddlespeech/s2t/decoders/recog.py @@ -40,7 +40,6 @@ def get_config(config_path): def load_trained_model(args): - args.nprocs = args.ngpu confs = get_config(args.model_conf) class_obj = dynamic_import_tester(args.model_name) exp = class_obj(confs, args) diff --git a/paddlespeech/s2t/exps/deepspeech2/bin/test_hub.py b/paddlespeech/s2t/exps/deepspeech2/bin/test_hub.py index 8ab8fea2..831bd1ad 100644 --- a/paddlespeech/s2t/exps/deepspeech2/bin/test_hub.py +++ b/paddlespeech/s2t/exps/deepspeech2/bin/test_hub.py @@ -87,7 +87,7 @@ class DeepSpeech2Tester_hub(): def setup(self): """Setup the experiment. """ - paddle.set_device('gpu' if self.args.nprocs > 0 else 'cpu') + paddle.set_device('gpu' if self.args.ngpu > 0 else 'cpu') self.setup_output_dir() self.setup_checkpointer() diff --git a/paddlespeech/s2t/exps/deepspeech2/bin/train.py b/paddlespeech/s2t/exps/deepspeech2/bin/train.py index d9b610a0..400538f9 100644 --- a/paddlespeech/s2t/exps/deepspeech2/bin/train.py +++ b/paddlespeech/s2t/exps/deepspeech2/bin/train.py @@ -27,8 +27,8 @@ def main_sp(config, args): def main(config, args): - if args.nprocs > 0: - dist.spawn(main_sp, args=(config, args), nprocs=args.nprocs) + if args.ngpu > 1: + dist.spawn(main_sp, args=(config, args), nprocs=args.ngpu) else: main_sp(config, args) diff --git a/paddlespeech/s2t/exps/u2/bin/test_wav.py b/paddlespeech/s2t/exps/u2/bin/test_wav.py index e118b481..a9450129 100644 --- a/paddlespeech/s2t/exps/u2/bin/test_wav.py +++ b/paddlespeech/s2t/exps/u2/bin/test_wav.py @@ -47,7 +47,7 @@ class U2Infer(): vocab_filepath=config.collator.vocab_filepath, spm_model_prefix=config.collator.spm_model_prefix) - paddle.set_device('gpu' if self.args.nprocs > 0 else 'cpu') + paddle.set_device('gpu' if self.args.ngpu > 0 else 'cpu') # model model_conf = config.model diff --git a/paddlespeech/s2t/exps/u2/bin/train.py b/paddlespeech/s2t/exps/u2/bin/train.py index 127db521..d6ee8b30 100644 --- a/paddlespeech/s2t/exps/u2/bin/train.py +++ b/paddlespeech/s2t/exps/u2/bin/train.py @@ -32,8 +32,8 @@ def main_sp(config, args): def main(config, args): - if args.nprocs > 0: - dist.spawn(main_sp, args=(config, args), nprocs=args.nprocs) + if args.ngpu > 1: + dist.spawn(main_sp, args=(config, args), nprocs=args.ngpu) else: main_sp(config, args) diff --git a/paddlespeech/s2t/exps/u2/model.py b/paddlespeech/s2t/exps/u2/model.py index 27bc47d2..b6dbcf44 100644 --- a/paddlespeech/s2t/exps/u2/model.py +++ b/paddlespeech/s2t/exps/u2/model.py @@ -257,7 +257,7 @@ class U2Trainer(Trainer): maxlen_in=float('inf'), maxlen_out=float('inf'), minibatches=0, - mini_batch_size=self.args.nprocs, + mini_batch_size=self.args.ngpu, batch_count='auto', batch_bins=0, batch_frames_in=0, @@ -277,7 +277,7 @@ class U2Trainer(Trainer): maxlen_in=float('inf'), maxlen_out=float('inf'), minibatches=0, - mini_batch_size=self.args.nprocs, + mini_batch_size=self.args.ngpu, batch_count='auto', batch_bins=0, batch_frames_in=0, diff --git a/paddlespeech/s2t/exps/u2_kaldi/bin/train.py b/paddlespeech/s2t/exps/u2_kaldi/bin/train.py index d3427eec..fcfc05a8 100644 --- a/paddlespeech/s2t/exps/u2_kaldi/bin/train.py +++ b/paddlespeech/s2t/exps/u2_kaldi/bin/train.py @@ -36,8 +36,8 @@ def main_sp(config, args): def main(config, args): - if args.nprocs > 0: - dist.spawn(main_sp, args=(config, args), nprocs=args.nprocs) + if args.ngpu > 1: + dist.spawn(main_sp, args=(config, args), nprocs=args.ngpu) else: main_sp(config, args) diff --git a/paddlespeech/s2t/exps/u2_kaldi/model.py b/paddlespeech/s2t/exps/u2_kaldi/model.py index d82034c8..c23b4c24 100644 --- a/paddlespeech/s2t/exps/u2_kaldi/model.py +++ b/paddlespeech/s2t/exps/u2_kaldi/model.py @@ -239,7 +239,7 @@ class U2Trainer(Trainer): maxlen_in=float('inf'), maxlen_out=float('inf'), minibatches=0, - mini_batch_size=self.args.nprocs, + mini_batch_size=self.args.ngpu, batch_count='auto', batch_bins=0, batch_frames_in=0, @@ -258,7 +258,7 @@ class U2Trainer(Trainer): maxlen_in=float('inf'), maxlen_out=float('inf'), minibatches=0, - mini_batch_size=self.args.nprocs, + mini_batch_size=self.args.ngpu, batch_count='auto', batch_bins=0, batch_frames_in=0, diff --git a/paddlespeech/s2t/exps/u2_st/bin/train.py b/paddlespeech/s2t/exps/u2_st/bin/train.py index 3d823cc4..58496c88 100644 --- a/paddlespeech/s2t/exps/u2_st/bin/train.py +++ b/paddlespeech/s2t/exps/u2_st/bin/train.py @@ -30,8 +30,8 @@ def main_sp(config, args): def main(config, args): - if args.nprocs > 0: - dist.spawn(main_sp, args=(config, args), nprocs=args.nprocs) + if args.ngpu > 1: + dist.spawn(main_sp, args=(config, args), nprocs=args.ngpu) else: main_sp(config, args) diff --git a/paddlespeech/s2t/training/cli.py b/paddlespeech/s2t/training/cli.py index 55b010e9..3ef871c5 100644 --- a/paddlespeech/s2t/training/cli.py +++ b/paddlespeech/s2t/training/cli.py @@ -51,7 +51,7 @@ def default_argument_parser(parser=None): The ``--checkpoint_path`` specifies the checkpoint to load from. - The ``--nprocs`` specifies how to run the training. + The ``--ngpu`` specifies how to run the training. See Also @@ -78,7 +78,7 @@ def default_argument_parser(parser=None): help="seed to use for paddle, np and random. None or 0 for random, else set seed." ) train_group.add_argument( - "--nprocs", + "--ngpu", type=int, default=1, help="number of parallel processes. 0 for cpu.") diff --git a/paddlespeech/s2t/training/trainer.py b/paddlespeech/s2t/training/trainer.py index e6328cdf..f5fb2db0 100644 --- a/paddlespeech/s2t/training/trainer.py +++ b/paddlespeech/s2t/training/trainer.py @@ -88,8 +88,8 @@ class Trainer(): >>> config.merge_from_list(args.opts) >>> config.freeze() >>> - >>> if args.nprocs > 0: - >>> dist.spawn(main_sp, args=(config, args), nprocs=args.nprocs) + >>> if args.ngpu > 1: + >>> dist.spawn(main_sp, args=(config, args), nprocs=args.ngpu) >>> else: >>> main_sp(config, args) """ @@ -112,7 +112,7 @@ class Trainer(): logger.info(f"Rank: {self.rank}/{self.world_size}") # set device - paddle.set_device('gpu' if self.args.nprocs > 0 else 'cpu') + paddle.set_device('gpu' if self.args.ngpu > 0 else 'cpu') if self.parallel: self.init_parallel() @@ -162,7 +162,7 @@ class Trainer(): """A flag indicating whether the experiment should run with multiprocessing. """ - return self.args.nprocs > 1 + return self.args.ngpu > 1 def init_parallel(self): """Init environment for multiprocess training. diff --git a/paddlespeech/t2s/exps/voice_cloning/tacotron2_ge2e/train.py b/paddlespeech/t2s/exps/voice_cloning/tacotron2_ge2e/train.py index 8af1d45e..ea5f12da 100644 --- a/paddlespeech/t2s/exps/voice_cloning/tacotron2_ge2e/train.py +++ b/paddlespeech/t2s/exps/voice_cloning/tacotron2_ge2e/train.py @@ -241,7 +241,7 @@ def main_sp(config, args): def main(config, args): - if args.ngpu: + if args.ngpu > 1: dist.spawn(main_sp, args=(config, args), nprocs=args.ngpu) else: main_sp(config, args) diff --git a/paddlespeech/t2s/training/cli.py b/paddlespeech/t2s/training/cli.py index a0710fd7..83dae117 100644 --- a/paddlespeech/t2s/training/cli.py +++ b/paddlespeech/t2s/training/cli.py @@ -30,7 +30,7 @@ def default_argument_parser(): The ``--checkpoint_path`` specifies the checkpoint to load from. - The ``--device`` and ``--nprocs`` specifies how to run the training. + The ``--ngpu`` specifies how to run the training. See Also -------- diff --git a/paddlespeech/t2s/training/experiment.py b/paddlespeech/t2s/training/experiment.py index c9e7f4cc..de36db24 100644 --- a/paddlespeech/t2s/training/experiment.py +++ b/paddlespeech/t2s/training/experiment.py @@ -82,8 +82,8 @@ class ExperimentBase(object): >>> config.merge_from_list(args.opts) >>> config.freeze() >>> - >>> if args.nprocs > 1 and args.device == "gpu": - >>> dist.spawn(main_sp, args=(config, args), nprocs=args.nprocs) + >>> if args.ngpu > 1: + >>> dist.spawn(main_sp, args=(config, args), nprocs=args.ngpu) >>> else: >>> main_sp(config, args) """ diff --git a/paddlespeech/text/examples/punctuation_restoration/chinese/local/test.sh b/paddlespeech/text/examples/punctuation_restoration/chinese/local/test.sh index 6db75ca2..ee022462 100644 --- a/paddlespeech/text/examples/punctuation_restoration/chinese/local/test.sh +++ b/paddlespeech/text/examples/punctuation_restoration/chinese/local/test.sh @@ -9,17 +9,11 @@ fi ngpu=$(echo $CUDA_VISIBLE_DEVICES | awk -F "," '{print NF}') echo "using $ngpu gpus..." -device=gpu -if [ ${ngpu} == 0 ];then - device=cpu -fi config_path=$1 ckpt_prefix=$2 - python3 -u ${BIN_DIR}/test.py \ ---device ${device} \ ---nproc 1 \ +--ngpu 1 \ --config ${config_path} \ --result_file ${ckpt_prefix}.rsl \ --checkpoint_path ${ckpt_prefix} diff --git a/paddlespeech/text/examples/punctuation_restoration/chinese/local/train.sh b/paddlespeech/text/examples/punctuation_restoration/chinese/local/train.sh index f6bd2c98..fc345cc1 100644 --- a/paddlespeech/text/examples/punctuation_restoration/chinese/local/train.sh +++ b/paddlespeech/text/examples/punctuation_restoration/chinese/local/train.sh @@ -11,16 +11,10 @@ echo "using $ngpu gpus..." config_path=$1 ckpt_name=$2 -device=gpu -if [ ${ngpu} == 0 ];then - device=cpu -fi - mkdir -p exp python3 -u ${BIN_DIR}/train.py \ ---device ${device} \ ---nproc ${ngpu} \ +--ngpu ${ngpu} \ --config ${config_path} \ --output exp/${ckpt_name} diff --git a/paddlespeech/text/examples/punctuation_restoration/english/local/test.sh b/paddlespeech/text/examples/punctuation_restoration/english/local/test.sh index 6db75ca2..d8a58f34 100644 --- a/paddlespeech/text/examples/punctuation_restoration/english/local/test.sh +++ b/paddlespeech/text/examples/punctuation_restoration/english/local/test.sh @@ -9,17 +9,12 @@ fi ngpu=$(echo $CUDA_VISIBLE_DEVICES | awk -F "," '{print NF}') echo "using $ngpu gpus..." -device=gpu -if [ ${ngpu} == 0 ];then - device=cpu -fi config_path=$1 ckpt_prefix=$2 python3 -u ${BIN_DIR}/test.py \ ---device ${device} \ ---nproc 1 \ +--ngpu 1 \ --config ${config_path} \ --result_file ${ckpt_prefix}.rsl \ --checkpoint_path ${ckpt_prefix} diff --git a/paddlespeech/text/examples/punctuation_restoration/english/local/train.sh b/paddlespeech/text/examples/punctuation_restoration/english/local/train.sh index f6bd2c98..fc345cc1 100644 --- a/paddlespeech/text/examples/punctuation_restoration/english/local/train.sh +++ b/paddlespeech/text/examples/punctuation_restoration/english/local/train.sh @@ -11,16 +11,10 @@ echo "using $ngpu gpus..." config_path=$1 ckpt_name=$2 -device=gpu -if [ ${ngpu} == 0 ];then - device=cpu -fi - mkdir -p exp python3 -u ${BIN_DIR}/train.py \ ---device ${device} \ ---nproc ${ngpu} \ +--ngpu ${ngpu} \ --config ${config_path} \ --output exp/${ckpt_name} diff --git a/paddlespeech/text/speechtask/punctuation_restoration/bin/train.py b/paddlespeech/text/speechtask/punctuation_restoration/bin/train.py index 1ffd79b7..c4b67265 100644 --- a/paddlespeech/text/speechtask/punctuation_restoration/bin/train.py +++ b/paddlespeech/text/speechtask/punctuation_restoration/bin/train.py @@ -26,8 +26,8 @@ def main_sp(config, args): def main(config, args): - if args.device == "gpu" and args.nprocs > 1: - dist.spawn(main_sp, args=(config, args), nprocs=args.nprocs) + if args.ngpu > 1: + dist.spawn(main_sp, args=(config, args), nprocs=args.ngpu) else: main_sp(config, args) diff --git a/paddlespeech/text/speechtask/punctuation_restoration/training/trainer.py b/paddlespeech/text/speechtask/punctuation_restoration/training/trainer.py index 2dce88a3..d6b6eeb6 100644 --- a/paddlespeech/text/speechtask/punctuation_restoration/training/trainer.py +++ b/paddlespeech/text/speechtask/punctuation_restoration/training/trainer.py @@ -106,8 +106,8 @@ class Trainer(): >>> config.merge_from_list(args.opts) >>> config.freeze() >>> - >>> if args.nprocs > 1 and args.device == "gpu": - >>> dist.spawn(main_sp, args=(config, args), nprocs=args.nprocs) + >>> if args.ngpu > 1: + >>> dist.spawn(main_sp, args=(config, args), nprocs=args.ngpu) >>> else: >>> main_sp(config, args) """ @@ -147,7 +147,7 @@ class Trainer(): """A flag indicating whether the experiment should run with multiprocessing. """ - return self.args.device == "gpu" and self.args.nprocs > 1 + return self.args.ngpu > 1 def init_parallel(self): """Init environment for multiprocess training. diff --git a/paddlespeech/text/speechtask/punctuation_restoration/utils/default_parser.py b/paddlespeech/text/speechtask/punctuation_restoration/utils/default_parser.py index 405b29a2..de0d8aef 100644 --- a/paddlespeech/text/speechtask/punctuation_restoration/utils/default_parser.py +++ b/paddlespeech/text/speechtask/punctuation_restoration/utils/default_parser.py @@ -30,7 +30,7 @@ def default_argument_parser(): The ``--checkpoint_path`` specifies the checkpoint to load from. - The ``--device`` and ``--nprocs`` specifies how to run the training. + The ``--ngpu`` specifies how to run the training. See Also @@ -60,9 +60,7 @@ def default_argument_parser(): parser.add_argument("--result_file", type=str, help="path of save the asr result") # running - parser.add_argument("--device", type=str, default='gpu', choices=["cpu", "gpu"], - help="device type to use, cpu and gpu are supported.") - parser.add_argument("--nprocs", type=int, default=1, help="number of parallel processes to use.") + parser.add_argument("--ngpu", type=int, default=1, help="number of parallel processes to use. if ngpu=0, using cpu.") # overwrite extra config and default config # parser.add_argument("--opts", nargs=argparse.REMAINDER, diff --git a/tests/benchmark/conformer/run_benchmark.sh b/tests/benchmark/conformer/run_benchmark.sh index 56b63e76..d4efe2b9 100644 --- a/tests/benchmark/conformer/run_benchmark.sh +++ b/tests/benchmark/conformer/run_benchmark.sh @@ -38,7 +38,7 @@ function _train(){ train_cmd="--config=${config_path} --output=${output} --seed=${seed} - --nproc=${ngpu} + --ngpu=${ngpu} --profiler-options "${profiler_options}" --benchmark-batch-size ${batch_size} --benchmark-max-step ${benchmark_max_step} " diff --git a/tests/chains/ds2/ds2_params_lite_train_infer.txt b/tests/chains/ds2/ds2_params_lite_train_infer.txt index c1cbfbb9..b11872bd 100644 --- a/tests/chains/ds2/ds2_params_lite_train_infer.txt +++ b/tests/chains/ds2/ds2_params_lite_train_infer.txt @@ -21,13 +21,13 @@ null:null null:null ## ===========================eval_params=========================== -eval: ../../../paddlespeech/s2t/exps/deepspeech2/bin/test.py --nproc 1 --config conf/deepspeech2.yaml --checkpoint_path exp/deepspeech_tiny/checkpoints/9 --result_file tests/9.rsl --model_type offline +eval: ../../../paddlespeech/s2t/exps/deepspeech2/bin/test.py --ngpu 1 --config conf/deepspeech2.yaml --checkpoint_path exp/deepspeech_tiny/checkpoints/9 --result_file tests/9.rsl --model_type offline null:null ## ===========================infer_params=========================== null:null null:null -norm_export: ../../../paddlespeech/s2t/exps/deepspeech2/bin/export.py --nproc 1 --config conf/deepspeech2.yaml --model_type offline --checkpoint_path exp/deepspeech_tiny/checkpoints/9 --export_path exp/deepspeech_tiny/checkpoints/9.jit +norm_export: ../../../paddlespeech/s2t/exps/deepspeech2/bin/export.py --ngpu 1 --config conf/deepspeech2.yaml --model_type offline --checkpoint_path exp/deepspeech_tiny/checkpoints/9 --export_path exp/deepspeech_tiny/checkpoints/9.jit quant_export:null fpgm_export:null distill_export:null diff --git a/tests/chains/ds2/ds2_params_whole_train_infer.txt b/tests/chains/ds2/ds2_params_whole_train_infer.txt index bfcb745f..875e3ccf 100644 --- a/tests/chains/ds2/ds2_params_whole_train_infer.txt +++ b/tests/chains/ds2/ds2_params_whole_train_infer.txt @@ -21,13 +21,13 @@ null:null null:null ## ===========================eval_params=========================== -eval: ../../../paddlespeech/s2t/exps/deepspeech2/bin/test.py --nproc 1 --config conf/deepspeech2.yaml --result_file tests/49.rsl --checkpoint_path exp/deepspeech_whole/checkpoints/49 --model_type offline +eval: ../../../paddlespeech/s2t/exps/deepspeech2/bin/test.py --ngpu 1 --config conf/deepspeech2.yaml --result_file tests/49.rsl --checkpoint_path exp/deepspeech_whole/checkpoints/49 --model_type offline null:null ## ===========================infer_params=========================== null:null null:null -norm_export: ../../../paddlespeech/s2t/exps/deepspeech2/bin/export.py --nproc 1 --config conf/deepspeech2.yaml --model_type offline --checkpoint_path exp/deepspeech_whole/checkpoints/49 --export_path exp/deepspeech_whole/checkpoints/49.jit +norm_export: ../../../paddlespeech/s2t/exps/deepspeech2/bin/export.py --ngpu 1 --config conf/deepspeech2.yaml --model_type offline --checkpoint_path exp/deepspeech_whole/checkpoints/49 --export_path exp/deepspeech_whole/checkpoints/49.jit quant_export:null fpgm_export:null distill_export:null diff --git a/tests/chains/ds2/speedyspeech_params_lite.txt b/tests/chains/ds2/speedyspeech_params_lite.txt index c1cfb8f5..487b0b5e 100644 --- a/tests/chains/ds2/speedyspeech_params_lite.txt +++ b/tests/chains/ds2/speedyspeech_params_lite.txt @@ -21,7 +21,7 @@ null:null null:null ## ===========================eval_params=========================== -eval:../examples/speedyspeech/baker/synthesize_e2e.py --speedyspeech-config=../examples/speedyspeech/baker/conf/default.yaml --speedyspeech-checkpoint=exp/default/checkpoints/snapshot_iter_90.pdz --speedyspeech-stat=pretrain_models/speedyspeech_baker_ckpt_0.4/speedy_speech_stats.npy --pwg-config=../examples/parallelwave_gan/baker/conf/default.yaml --pwg-checkpoint=pretrain_models/pwg_baker_ckpt_0.4/pwg_snapshot_iter_400000.pdz --pwg-stat=pretrain_models/pwg_baker_ckpt_0.4/pwg_stats.npy --text=../examples/speedyspeech/baker/sentences.txt --output-dir=e2e --inference-dir=inference --device="gpu" --phones-dict=../examples/speedyspeech/baker/phones.txt --tones-dict=../examples/speedyspeech/baker/tones.txt +eval:../examples/speedyspeech/baker/synthesize_e2e.py --speedyspeech-config=../examples/speedyspeech/baker/conf/default.yaml --speedyspeech-checkpoint=exp/default/checkpoints/snapshot_iter_90.pdz --speedyspeech-stat=pretrain_models/speedyspeech_baker_ckpt_0.4/speedy_speech_stats.npy --pwg-config=../examples/parallelwave_gan/baker/conf/default.yaml --pwg-checkpoint=pretrain_models/pwg_baker_ckpt_0.4/pwg_snapshot_iter_400000.pdz --pwg-stat=pretrain_models/pwg_baker_ckpt_0.4/pwg_stats.npy --text=../examples/speedyspeech/baker/sentences.txt --output-dir=e2e --inference-dir=inference --ngpu=1 --phones-dict=../examples/speedyspeech/baker/phones.txt --tones-dict=../examples/speedyspeech/baker/tones.txt null:null ## ===========================infer_params=========================== diff --git a/tests/chains/ds2/test.sh b/tests/chains/ds2/test.sh index 0b2b4f58..c9307820 100644 --- a/tests/chains/ds2/test.sh +++ b/tests/chains/ds2/test.sh @@ -323,7 +323,7 @@ else elif [ ${#gpu} -le 15 ];then # train with multi-gpu gsu=${gpu//,/ } nump=`echo $gsu | wc -w` - cmd="${python} ${run_train} --nproc=$nump" + cmd="${python} ${run_train} --ngpu=$nump" else # train with multi-machine cmd="${python} -m paddle.distributed.launch --ips=${ips} --gpus=${gpu} ${run_train} ${set_save_model} ${set_pretrain} ${set_epoch} ${set_autocast} ${set_batchsize} ${set_train_params1}" fi diff --git a/tests/chains/speedyspeech/speedyspeech_params_lite_multi_gpu.txt b/tests/chains/speedyspeech/speedyspeech_params_lite_multi_gpu.txt index 0f64da27..634c3a5a 100644 --- a/tests/chains/speedyspeech/speedyspeech_params_lite_multi_gpu.txt +++ b/tests/chains/speedyspeech/speedyspeech_params_lite_multi_gpu.txt @@ -21,7 +21,7 @@ null:null null:null ## ===========================eval_params=========================== -eval:../../../paddlespeech/t2s/exps/speedyspeech/synthesize_e2e.py --speedyspeech-config=../../../examples/csmsc/tts2/conf/default.yaml --speedyspeech-checkpoint=exp/default/checkpoints/snapshot_iter_20.pdz --speedyspeech-stat=train_data/mini_BZNSYP/train/feats_stats.npy --pwg-config=pretrain_models/pwg_baker_ckpt_0.4/pwg_default.yaml --pwg-checkpoint=pretrain_models/pwg_baker_ckpt_0.4/pwg_snapshot_iter_400000.pdz --pwg-stat=pretrain_models/pwg_baker_ckpt_0.4/pwg_stats.npy --text=../../../paddlespeech/t2s/exps/sentences.txt --output-dir=e2e --inference-dir=inference --device="gpu" --phones-dict=train_data/mini_BZNSYP/phone_id_map.txt --tones-dict=train_data/mini_BZNSYP/tone_id_map.txt +eval:../../../paddlespeech/t2s/exps/speedyspeech/synthesize_e2e.py --speedyspeech-config=../../../examples/csmsc/tts2/conf/default.yaml --speedyspeech-checkpoint=exp/default/checkpoints/snapshot_iter_20.pdz --speedyspeech-stat=train_data/mini_BZNSYP/train/feats_stats.npy --pwg-config=pretrain_models/pwg_baker_ckpt_0.4/pwg_default.yaml --pwg-checkpoint=pretrain_models/pwg_baker_ckpt_0.4/pwg_snapshot_iter_400000.pdz --pwg-stat=pretrain_models/pwg_baker_ckpt_0.4/pwg_stats.npy --text=../../../paddlespeech/t2s/exps/sentences.txt --output-dir=e2e --inference-dir=inference --ngpu=1 --phones-dict=train_data/mini_BZNSYP/phone_id_map.txt --tones-dict=train_data/mini_BZNSYP/tone_id_map.txt null:null ## ===========================infer_params=========================== diff --git a/tests/chains/speedyspeech/speedyspeech_params_lite_single_gpu.txt b/tests/chains/speedyspeech/speedyspeech_params_lite_single_gpu.txt index beda4c04..d187d4c6 100644 --- a/tests/chains/speedyspeech/speedyspeech_params_lite_single_gpu.txt +++ b/tests/chains/speedyspeech/speedyspeech_params_lite_single_gpu.txt @@ -21,7 +21,7 @@ null:null null:null ## ===========================eval_params=========================== -eval:../../../paddlespeech/t2s/exps/speedyspeech/synthesize_e2e.py --speedyspeech-config=../../../examples/csmsc/tts2/conf/default.yaml --speedyspeech-checkpoint=exp/default/checkpoints/snapshot_iter_30.pdz --speedyspeech-stat=train_data/mini_BZNSYP/train/feats_stats.npy --pwg-config=pretrain_models/pwg_baker_ckpt_0.4/pwg_default.yaml --pwg-checkpoint=pretrain_models/pwg_baker_ckpt_0.4/pwg_snapshot_iter_400000.pdz --pwg-stat=pretrain_models/pwg_baker_ckpt_0.4/pwg_stats.npy --text=../../../paddlespeech/t2s/exps/sentences.txt --output-dir=e2e --inference-dir=inference --device="gpu" --phones-dict=train_data/mini_BZNSYP/phone_id_map.txt --tones-dict=train_data/mini_BZNSYP/tone_id_map.txt +eval:../../../paddlespeech/t2s/exps/speedyspeech/synthesize_e2e.py --speedyspeech-config=../../../examples/csmsc/tts2/conf/default.yaml --speedyspeech-checkpoint=exp/default/checkpoints/snapshot_iter_30.pdz --speedyspeech-stat=train_data/mini_BZNSYP/train/feats_stats.npy --pwg-config=pretrain_models/pwg_baker_ckpt_0.4/pwg_default.yaml --pwg-checkpoint=pretrain_models/pwg_baker_ckpt_0.4/pwg_snapshot_iter_400000.pdz --pwg-stat=pretrain_models/pwg_baker_ckpt_0.4/pwg_stats.npy --text=../../../paddlespeech/t2s/exps/sentences.txt --output-dir=e2e --inference-dir=inference --ngpu=1 --phones-dict=train_data/mini_BZNSYP/phone_id_map.txt --tones-dict=train_data/mini_BZNSYP/tone_id_map.txt null:null ## ===========================infer_params=========================== diff --git a/tests/chains/speedyspeech/speedyspeech_params_whole_multi_gpu.txt b/tests/chains/speedyspeech/speedyspeech_params_whole_multi_gpu.txt index ecdbf76d..f8ebd499 100644 --- a/tests/chains/speedyspeech/speedyspeech_params_whole_multi_gpu.txt +++ b/tests/chains/speedyspeech/speedyspeech_params_whole_multi_gpu.txt @@ -21,7 +21,7 @@ null:null null:null ## ===========================eval_params=========================== -eval:../../../paddlespeech/t2s/exps/speedyspeech/synthesize_e2e.py --speedyspeech-config=pretrain_models/speedyspeech_nosil_baker_ckpt_0.5/default.yaml --speedyspeech-checkpoint=pretrain_models/speedyspeech_nosil_baker_ckpt_0.5/snapshot_iter_11400.pdz --speedyspeech-stat=pretrain_models/speedyspeech_nosil_baker_ckpt_0.5/feats_stats.npy --pwg-config=pretrain_models/pwg_baker_ckpt_0.4/pwg_default.yaml --pwg-checkpoint=pretrain_models/pwg_baker_ckpt_0.4/pwg_snapshot_iter_400000.pdz --pwg-stat=pretrain_models/pwg_baker_ckpt_0.4/pwg_stats.npy --text=../../../paddlespeech/t2s/exps/sentences.txt --output-dir=e2e --inference-dir=inference --device="gpu" --phones-dict=pretrain_models/speedyspeech_nosil_baker_ckpt_0.5/phone_id_map.txt --tones-dict=pretrain_models/speedyspeech_nosil_baker_ckpt_0.5/tone_id_map.txt +eval:../../../paddlespeech/t2s/exps/speedyspeech/synthesize_e2e.py --speedyspeech-config=pretrain_models/speedyspeech_nosil_baker_ckpt_0.5/default.yaml --speedyspeech-checkpoint=pretrain_models/speedyspeech_nosil_baker_ckpt_0.5/snapshot_iter_11400.pdz --speedyspeech-stat=pretrain_models/speedyspeech_nosil_baker_ckpt_0.5/feats_stats.npy --pwg-config=pretrain_models/pwg_baker_ckpt_0.4/pwg_default.yaml --pwg-checkpoint=pretrain_models/pwg_baker_ckpt_0.4/pwg_snapshot_iter_400000.pdz --pwg-stat=pretrain_models/pwg_baker_ckpt_0.4/pwg_stats.npy --text=../../../paddlespeech/t2s/exps/sentences.txt --output-dir=e2e --inference-dir=inference --ngpu=1 --phones-dict=pretrain_models/speedyspeech_nosil_baker_ckpt_0.5/phone_id_map.txt --tones-dict=pretrain_models/speedyspeech_nosil_baker_ckpt_0.5/tone_id_map.txt null:null ## ===========================infer_params=========================== diff --git a/tests/chains/speedyspeech/speedyspeech_params_whole_single_gpu.txt b/tests/chains/speedyspeech/speedyspeech_params_whole_single_gpu.txt index 523b5c6e..0e3c49cb 100644 --- a/tests/chains/speedyspeech/speedyspeech_params_whole_single_gpu.txt +++ b/tests/chains/speedyspeech/speedyspeech_params_whole_single_gpu.txt @@ -21,7 +21,7 @@ null:null null:null ## ===========================eval_params=========================== -eval:../../../paddlespeech/t2s/exps/speedyspeech/synthesize_e2e.py --speedyspeech-config=pretrain_models/speedyspeech_nosil_baker_ckpt_0.5/default.yaml --speedyspeech-checkpoint=pretrain_models/speedyspeech_nosil_baker_ckpt_0.5/snapshot_iter_11400.pdz --speedyspeech-stat=pretrain_models/speedyspeech_nosil_baker_ckpt_0.5/feats_stats.npy --pwg-config=pretrain_models/pwg_baker_ckpt_0.4/pwg_default.yaml --pwg-checkpoint=pretrain_models/pwg_baker_ckpt_0.4/pwg_snapshot_iter_400000.pdz --pwg-stat=pretrain_models/pwg_baker_ckpt_0.4/pwg_stats.npy --text=../../../paddlespeech/t2s/exps/sentences.txt --output-dir=e2e --inference-dir=inference --device="gpu" --phones-dict=pretrain_models/speedyspeech_nosil_baker_ckpt_0.5/phone_id_map.txt --tones-dict=pretrain_models/speedyspeech_nosil_baker_ckpt_0.5/tone_id_map.txt +eval:../../../paddlespeech/t2s/exps/speedyspeech/synthesize_e2e.py --speedyspeech-config=pretrain_models/speedyspeech_nosil_baker_ckpt_0.5/default.yaml --speedyspeech-checkpoint=pretrain_models/speedyspeech_nosil_baker_ckpt_0.5/snapshot_iter_11400.pdz --speedyspeech-stat=pretrain_models/speedyspeech_nosil_baker_ckpt_0.5/feats_stats.npy --pwg-config=pretrain_models/pwg_baker_ckpt_0.4/pwg_default.yaml --pwg-checkpoint=pretrain_models/pwg_baker_ckpt_0.4/pwg_snapshot_iter_400000.pdz --pwg-stat=pretrain_models/pwg_baker_ckpt_0.4/pwg_stats.npy --text=../../../paddlespeech/t2s/exps/sentences.txt --output-dir=e2e --inference-dir=inference --ngpu=1 --phones-dict=pretrain_models/speedyspeech_nosil_baker_ckpt_0.5/phone_id_map.txt --tones-dict=pretrain_models/speedyspeech_nosil_baker_ckpt_0.5/tone_id_map.txt null:null ## ===========================infer_params=========================== -- GitLab