提交 913b2300 编写于 作者: H Hui Zhang

nprocs 0 for cpu, other for gpu

上级 80eb6b7f
......@@ -27,7 +27,7 @@ def main_sp(config, args):
def main(config, args):
if args.device == "gpu" and args.nprocs > 1:
if args.nprocs > 0:
dist.spawn(main_sp, args=(config, args), nprocs=args.nprocs)
else:
main_sp(config, args)
......
......@@ -403,7 +403,7 @@ class DeepSpeech2Tester(DeepSpeech2Trainer):
def setup(self):
"""Setup the experiment.
"""
paddle.set_device(self.args.device)
paddle.set_device('gpu' self.args.nprocs > 0 else 'cpu')
self.setup_output_dir()
self.setup_checkpointer()
......@@ -635,7 +635,7 @@ class DeepSpeech2ExportTester(DeepSpeech2Tester):
def setup(self):
"""Setup the experiment.
"""
paddle.set_device(self.args.device)
paddle.set_device('gpu' self.args.nprocs > 0 else 'cpu')
self.setup_output_dir()
......
......@@ -32,7 +32,7 @@ def main_sp(config, args):
def main(config, args):
if args.device == "gpu" and args.nprocs > 1:
if args.nprocs > 0:
dist.spawn(main_sp, args=(config, args), nprocs=args.nprocs)
else:
main_sp(config, args)
......
......@@ -653,7 +653,7 @@ class U2Tester(U2Trainer):
def setup(self):
"""Setup the experiment.
"""
paddle.set_device(self.args.device)
paddle.set_device('gpu' self.args.nprocs > 0 else 'cpu')
self.setup_output_dir()
self.setup_checkpointer()
......
......@@ -36,7 +36,7 @@ def main_sp(config, args):
def main(config, args):
if args.device == "gpu" and args.nprocs > 1:
if args.nprocs > 0:
dist.spawn(main_sp, args=(config, args), nprocs=args.nprocs)
else:
main_sp(config, args)
......
......@@ -637,7 +637,7 @@ class U2Tester(U2Trainer):
def setup(self):
"""Setup the experiment.
"""
paddle.set_device(self.args.device)
paddle.set_device('gpu' self.args.nprocs > 0 else 'cpu')
self.setup_output_dir()
self.setup_checkpointer()
......
......@@ -30,7 +30,7 @@ def main_sp(config, args):
def main(config, args):
if args.device == "gpu" and args.nprocs > 1:
if args.nprocs > 0:
dist.spawn(main_sp, args=(config, args), nprocs=args.nprocs)
else:
main_sp(config, args)
......
......@@ -661,7 +661,7 @@ class U2STTester(U2STTrainer):
def setup(self):
"""Setup the experiment.
"""
paddle.set_device(self.args.device)
paddle.set_device('gpu' self.args.nprocs > 0 else 'cpu')
self.setup_output_dir()
self.setup_checkpointer()
......
......@@ -30,7 +30,7 @@ def default_argument_parser():
The ``--checkpoint_path`` specifies the checkpoint to load from.
The ``--device`` and ``--nprocs`` specifies how to run the training.
The ``--nprocs`` specifies how to run the training.
See Also
......@@ -51,12 +51,6 @@ def default_argument_parser():
default=None,
help="seed to use for paddle, np and random. None or 0 for random, else set seed."
)
train_group.add_argument(
"--device",
type=str,
default='gpu',
choices=["cpu", "gpu"],
help="device cpu and gpu are supported.")
train_group.add_argument(
"--nprocs",
type=int,
......
......@@ -86,7 +86,7 @@ class Trainer():
>>> config.merge_from_list(args.opts)
>>> config.freeze()
>>>
>>> if args.nprocs > 1 and args.device == "gpu":
>>> if args.nprocs > 0:
>>> dist.spawn(main_sp, args=(config, args), nprocs=args.nprocs)
>>> else:
>>> main_sp(config, args)
......@@ -119,7 +119,7 @@ class Trainer():
def setup(self):
"""Setup the experiment.
"""
paddle.set_device(self.args.device)
paddle.set_device('gpu' self.args.nprocs > 0 else 'cpu')
if self.parallel:
self.init_parallel()
......@@ -139,7 +139,7 @@ class Trainer():
"""A flag indicating whether the experiment should run with
multiprocessing.
"""
return self.args.device == "gpu" and self.args.nprocs > 1
return elf.args.nprocs > 0
def init_parallel(self):
"""Init environment for multiprocess training.
......
......@@ -13,13 +13,7 @@ ckpt_path_prefix=$2
jit_model_export_path=$3
model_type=$4
device=gpu
if [ ${ngpu} == 0 ];then
device=cpu
fi
python3 -u ${BIN_DIR}/export.py \
--device ${device} \
--nproc ${ngpu} \
--config ${config_path} \
--checkpoint_path ${ckpt_path_prefix} \
......
......@@ -8,10 +8,6 @@ fi
ngpu=$(echo $CUDA_VISIBLE_DEVICES | awk -F "," '{print NF}')
echo "using $ngpu gpus..."
device=gpu
if [ ${ngpu} == 0 ];then
device=cpu
fi
config_path=$1
ckpt_prefix=$2
model_type=$3
......@@ -23,8 +19,7 @@ if [ $? -ne 0 ]; then
fi
python3 -u ${BIN_DIR}/test.py \
--device ${device} \
--nproc 1 \
--nproc ${ngpu} \
--config ${config_path} \
--result_file ${ckpt_prefix}.rsl \
--checkpoint_path ${ckpt_prefix} \
......
......@@ -8,10 +8,6 @@ fi
ngpu=$(echo $CUDA_VISIBLE_DEVICES | awk -F "," '{print NF}')
echo "using $ngpu gpus..."
device=gpu
if [ ${ngpu} == 0 ];then
device=cpu
fi
config_path=$1
jit_model_export_path=$2
model_type=$3
......@@ -23,8 +19,7 @@ if [ $? -ne 0 ]; then
fi
python3 -u ${BIN_DIR}/test_export.py \
--device ${device} \
--nproc 1 \
--nproc ${ngpu} \
--config ${config_path} \
--result_file ${jit_model_export_path}.rsl \
--export_path ${jit_model_export_path} \
......
......@@ -12,11 +12,6 @@ config_path=$1
ckpt_name=$2
model_type=$3
device=gpu
if [ ${ngpu} == 0 ];then
device=cpu
fi
mkdir -p exp
# seed may break model convergence
......@@ -26,7 +21,6 @@ if [ ${seed} != 0 ]; then
fi
python3 -u ${BIN_DIR}/train.py \
--device ${device} \
--nproc ${ngpu} \
--config ${config_path} \
--output exp/${ckpt_name} \
......
......@@ -8,10 +8,6 @@ fi
ngpu=$(echo $CUDA_VISIBLE_DEVICES | awk -F "," '{print NF}')
echo "using $ngpu gpus..."
device=gpu
if [ ${ngpu} == 0 ];then
device=cpu
fi
config_path=$1
ckpt_prefix=$2
......@@ -22,8 +18,7 @@ mkdir -p ${output_dir}
# align dump in `result_file`
# .tier, .TextGrid dump in `dir of result_file`
python3 -u ${BIN_DIR}/alignment.py \
--device ${device} \
--nproc 1 \
--nproc ${ngpu} \
--config ${config_path} \
--result_file ${output_dir}/${type}.align \
--checkpoint_path ${ckpt_prefix} \
......
......@@ -12,13 +12,7 @@ config_path=$1
ckpt_path_prefix=$2
jit_model_export_path=$3
device=gpu
if [ ${ngpu} == 0 ];then
device=cpu
fi
python3 -u ${BIN_DIR}/export.py \
--device ${device} \
--nproc ${ngpu} \
--config ${config_path} \
--checkpoint_path ${ckpt_path_prefix} \
......
......@@ -8,11 +8,6 @@ fi
ngpu=$(echo $CUDA_VISIBLE_DEVICES | awk -F "," '{print NF}')
echo "using $ngpu gpus..."
device=gpu
if [ ${ngpu} == 0 ];then
device=cpu
fi
config_path=$1
ckpt_prefix=$2
......@@ -39,8 +34,7 @@ for type in attention ctc_greedy_search; do
output_dir=${ckpt_prefix}
mkdir -p ${output_dir}
python3 -u ${BIN_DIR}/test.py \
--device ${device} \
--nproc 1 \
--nproc ${ngpu} \
--config ${config_path} \
--result_file ${output_dir}/${type}.rsl \
--checkpoint_path ${ckpt_prefix} \
......@@ -58,8 +52,7 @@ for type in ctc_prefix_beam_search attention_rescoring; do
output_dir=${ckpt_prefix}
mkdir -p ${output_dir}
python3 -u ${BIN_DIR}/test.py \
--device ${device} \
--nproc 1 \
--nproc ${ngpu} \
--config ${config_path} \
--result_file ${output_dir}/${type}.rsl \
--checkpoint_path ${ckpt_prefix} \
......
......@@ -12,11 +12,6 @@ source ${MAIN_ROOT}/utils/parse_options.sh || exit 1;
ngpu=$(echo $CUDA_VISIBLE_DEVICES | awk -F "," '{print NF}')
echo "using $ngpu gpus..."
device=gpu
if [ ${ngpu} == 0 ];then
device=cpu
fi
if [ ${seed} != 0 ]; then
export FLAGS_cudnn_deterministic=True
echo "using seed $seed & FLAGS_cudnn_deterministic=True ..."
......@@ -34,7 +29,6 @@ mkdir -p exp
python3 -u ${BIN_DIR}/train.py \
--seed ${seed} \
--device ${device} \
--nproc ${ngpu} \
--config ${config_path} \
--output exp/${ckpt_name} \
......
......@@ -8,10 +8,6 @@ fi
ngpu=$(echo $CUDA_VISIBLE_DEVICES | awk -F "," '{print NF}')
echo "using $ngpu gpus..."
device=gpu
if [ ${ngpu} == 0 ];then
device=cpu
fi
config_path=$1
ckpt_prefix=$2
......@@ -20,7 +16,6 @@ ckpt_name=$(basename ${ckpt_prefxi})
mkdir -p exp
batch_size=1
output_dir=${ckpt_prefix}
mkdir -p ${output_dir}
......@@ -28,8 +23,7 @@ mkdir -p ${output_dir}
# align dump in `result_file`
# .tier, .TextGrid dump in `dir of result_file`
python3 -u ${BIN_DIR}/alignment.py \
--device ${device} \
--nproc 1 \
--nproc ${ngpu} \
--config ${config_path} \
--result_file ${output_dir}/${type}.align \
--checkpoint_path ${ckpt_prefix} \
......
......@@ -12,13 +12,7 @@ config_path=$1
ckpt_path_prefix=$2
jit_model_export_path=$3
device=gpu
if [ ${ngpu} == 0 ];then
device=cpu
fi
python3 -u ${BIN_DIR}/export.py \
--device ${device} \
--nproc ${ngpu} \
--config ${config_path} \
--checkpoint_path ${ckpt_path_prefix} \
......
......@@ -8,10 +8,6 @@ fi
ngpu=$(echo $CUDA_VISIBLE_DEVICES | awk -F "," '{print NF}')
echo "using $ngpu gpus..."
device=gpu
if [ ${ngpu} == 0 ];then
device=cpu
fi
config_path=$1
ckpt_prefix=$2
......@@ -32,8 +28,7 @@ for type in attention ctc_greedy_search; do
output_dir=${ckpt_prefix}
mkdir -p ${output_dir}
python3 -u ${BIN_DIR}/test.py \
--device ${device} \
--nproc 1 \
--nproc ${ngpu} \
--config ${config_path} \
--result_file ${output_dir}/${type}.rsl \
--checkpoint_path ${ckpt_prefix} \
......@@ -51,8 +46,7 @@ for type in ctc_prefix_beam_search attention_rescoring; do
output_dir=${ckpt_prefix}
mkdir -p ${output_dir}
python3 -u ${BIN_DIR}/test.py \
--device ${device} \
--nproc 1 \
--nproc ${ngpu} \
--config ${config_path} \
--result_file ${output_dir}/${type}.rsl \
--checkpoint_path ${ckpt_prefix} \
......
......@@ -11,10 +11,6 @@ echo "using $ngpu gpus..."
config_path=$1
ckpt_name=$2
device=gpu
if [ ${ngpu} == 0 ];then
device=cpu
fi
echo "using ${device}..."
mkdir -p exp
......@@ -26,7 +22,6 @@ if [ ${seed} != 0 ]; then
fi
python3 -u ${BIN_DIR}/train.py \
--device ${device} \
--nproc ${ngpu} \
--config ${config_path} \
--output exp/${ckpt_name} \
......
......@@ -13,13 +13,7 @@ ckpt_path_prefix=$2
jit_model_export_path=$3
model_type=$4
device=gpu
if [ ${ngpu} == 0 ];then
device=cpu
fi
python3 -u ${BIN_DIR}/export.py \
--device ${device} \
--nproc ${ngpu} \
--config ${config_path} \
--checkpoint_path ${ckpt_path_prefix} \
......
......@@ -8,10 +8,6 @@ fi
ngpu=$(echo $CUDA_VISIBLE_DEVICES | awk -F "," '{print NF}')
echo "using $ngpu gpus..."
device=gpu
if [ ${ngpu} == 0 ];then
device=cpu
fi
config_path=$1
ckpt_prefix=$2
model_type=$3
......@@ -23,8 +19,7 @@ if [ $? -ne 0 ]; then
fi
python3 -u ${BIN_DIR}/test.py \
--device ${device} \
--nproc 1 \
--nproc ${ngpu} \
--config ${config_path} \
--result_file ${ckpt_prefix}.rsl \
--checkpoint_path ${ckpt_prefix} \
......
......@@ -12,12 +12,6 @@ config_path=$1
ckpt_name=$2
model_type=$3
device=gpu
if [ ${ngpu} == 0 ];then
device=cpu
fi
echo "using ${device}..."
mkdir -p exp
# seed may break model convergence
......@@ -27,7 +21,6 @@ if [ ${seed} != 0 ]; then
fi
python3 -u ${BIN_DIR}/train.py \
--device ${device} \
--nproc ${ngpu} \
--config ${config_path} \
--output exp/${ckpt_name} \
......
......@@ -8,10 +8,6 @@ fi
ngpu=$(echo $CUDA_VISIBLE_DEVICES | awk -F "," '{print NF}')
echo "using $ngpu gpus..."
device=gpu
if [ ${ngpu} == 0 ];then
device=cpu
fi
config_path=$1
ckpt_prefix=$2
......@@ -22,8 +18,7 @@ mkdir -p ${output_dir}
# align dump in `result_file`
# .tier, .TextGrid dump in `dir of result_file`
python3 -u ${BIN_DIR}/alignment.py \
--device ${device} \
--nproc 1 \
--nproc ${ngpu} \
--config ${config_path} \
--result_file ${output_dir}/${type}.align \
--checkpoint_path ${ckpt_prefix} \
......
......@@ -12,13 +12,7 @@ config_path=$1
ckpt_path_prefix=$2
jit_model_export_path=$3
device=gpu
if [ ${ngpu} == 0 ];then
device=cpu
fi
python3 -u ${BIN_DIR}/export.py \
--device ${device} \
--nproc ${ngpu} \
--config ${config_path} \
--checkpoint_path ${ckpt_path_prefix} \
......
......@@ -8,11 +8,6 @@ fi
ngpu=$(echo $CUDA_VISIBLE_DEVICES | awk -F "," '{print NF}')
echo "using $ngpu gpus..."
device=gpu
if [ ${ngpu} == 0 ];then
device=cpu
fi
config_path=$1
ckpt_prefix=$2
......@@ -38,8 +33,7 @@ for type in attention ctc_greedy_search; do
batch_size=64
fi
python3 -u ${BIN_DIR}/test.py \
--device ${device} \
--nproc 1 \
--nproc ${ngpu} \
--config ${config_path} \
--result_file ${ckpt_prefix}.${type}.rsl \
--checkpoint_path ${ckpt_prefix} \
......@@ -55,8 +49,7 @@ for type in ctc_prefix_beam_search attention_rescoring; do
echo "decoding ${type}"
batch_size=1
python3 -u ${BIN_DIR}/test.py \
--device ${device} \
--nproc 1 \
--nproc ${ngpu} \
--config ${config_path} \
--result_file ${ckpt_prefix}.${type}.rsl \
--checkpoint_path ${ckpt_prefix} \
......
......@@ -11,12 +11,6 @@ echo "using $ngpu gpus..."
config_path=$1
ckpt_name=$2
device=gpu
if [ ${ngpu} == 0 ];then
device=cpu
fi
echo "using ${device}..."
mkdir -p exp
# seed may break model convergence
......@@ -25,8 +19,10 @@ if [ ${seed} != 0 ]; then
export FLAGS_cudnn_deterministic=True
fi
# export FLAGS_cudnn_exhaustive_search=true
# export FLAGS_conv_workspace_size_limit=4000
python3 -u ${BIN_DIR}/train.py \
--device ${device} \
--nproc ${ngpu} \
--config ${config_path} \
--output exp/${ckpt_name} \
......
......@@ -8,10 +8,6 @@ fi
ngpu=$(echo $CUDA_VISIBLE_DEVICES | awk -F "," '{print NF}')
echo "using $ngpu gpus..."
device=gpu
if [ ${ngpu} == 0 ];then
device=cpu
fi
config_path=$1
dict_path=$2
ckpt_prefix=$3
......@@ -26,8 +22,7 @@ python3 -u ${BIN_DIR}/test.py \
--model-name 'u2_kaldi' \
--run-mode 'align' \
--dict-path ${dict_path} \
--device ${device} \
--nproc 1 \
--nproc ${ngpu} \
--config ${config_path} \
--result-file ${output_dir}/${type}.align \
--checkpoint_path ${ckpt_prefix} \
......
......@@ -12,15 +12,9 @@ config_path=$1
ckpt_path_prefix=$2
jit_model_export_path=$3
device=gpu
if [ ${ngpu} == 0 ];then
device=cpu
fi
python3 -u ${BIN_DIR}/test.py \
--model-name 'u2_kaldi' \
--run-mode 'export' \
--device ${device} \
--nproc ${ngpu} \
--config ${config_path} \
--checkpoint_path ${ckpt_path_prefix} \
......
......@@ -11,12 +11,6 @@ echo "using $ngpu gpus..."
config_path=$1
ckpt_name=$2
device=gpu
if [ ${ngpu} == 0 ];then
device=cpu
fi
echo "using ${device}..."
mkdir -p exp
# seed may break model convergence
......@@ -27,7 +21,6 @@ fi
python3 -u ${BIN_DIR}/train.py \
--model-name u2_kaldi \
--device ${device} \
--nproc ${ngpu} \
--config ${config_path} \
--output exp/${ckpt_name} \
......
......@@ -8,10 +8,6 @@ fi
ngpu=$(echo $CUDA_VISIBLE_DEVICES | awk -F "," '{print NF}')
echo "using $ngpu gpus..."
device=gpu
if [ ${ngpu} == 0 ];then
device=cpu
fi
config_path=$1
ckpt_prefix=$2
......@@ -19,8 +15,7 @@ for type in fullsentence; do
echo "decoding ${type}"
batch_size=32
python3 -u ${BIN_DIR}/test.py \
--device ${device} \
--nproc 1 \
--nproc ${ngpu} \
--config ${config_path} \
--result_file ${ckpt_prefix}.${type}.rsl \
--checkpoint_path ${ckpt_prefix} \
......
......@@ -11,12 +11,6 @@ echo "using $ngpu gpus..."
config_path=$1
ckpt_name=$2
device=gpu
if [ ${ngpu} == 0 ];then
device=cpu
fi
echo "using ${device}..."
mkdir -p exp
# seed may break model convergence
......@@ -26,7 +20,6 @@ if [ ${seed} != 0 ]; then
fi
python3 -u ${BIN_DIR}/train.py \
--device ${device} \
--nproc ${ngpu} \
--config ${config_path} \
--output exp/${ckpt_name} \
......
......@@ -8,10 +8,6 @@ fi
ngpu=$(echo $CUDA_VISIBLE_DEVICES | awk -F "," '{print NF}')
echo "using $ngpu gpus..."
device=gpu
if [ ${ngpu} == 0 ];then
device=cpu
fi
config_path=$1
ckpt_prefix=$2
......@@ -22,8 +18,7 @@ mkdir -p ${output_dir}
# align dump in `result_file`
# .tier, .TextGrid dump in `dir of result_file`
python3 -u ${BIN_DIR}/alignment.py \
--device ${device} \
--nproc 1 \
--nproc ${ngpu} \
--config ${config_path} \
--result_file ${output_dir}/${type}.align \
--checkpoint_path ${ckpt_prefix} \
......
......@@ -12,13 +12,7 @@ config_path=$1
ckpt_path_prefix=$2
jit_model_export_path=$3
device=gpu
if [ ${ngpu} == 0 ];then
device=cpu
fi
python3 -u ${BIN_DIR}/export.py \
--device ${device} \
--nproc ${ngpu} \
--config ${config_path} \
--checkpoint_path ${ckpt_path_prefix} \
......
......@@ -8,11 +8,6 @@ fi
ngpu=$(echo $CUDA_VISIBLE_DEVICES | awk -F "," '{print NF}')
echo "using $ngpu gpus..."
device=gpu
if [ ${ngpu} == 0 ];then
device=cpu
fi
config_path=$1
ckpt_prefix=$2
......@@ -37,8 +32,7 @@ for type in attention ctc_greedy_search; do
batch_size=64
fi
python3 -u ${BIN_DIR}/test.py \
--device ${device} \
--nproc 1 \
--nproc ${ngpu} \
--config ${config_path} \
--result_file ${ckpt_prefix}.${type}.rsl \
--checkpoint_path ${ckpt_prefix} \
......@@ -54,8 +48,7 @@ for type in ctc_prefix_beam_search attention_rescoring; do
echo "decoding ${type}"
batch_size=1
python3 -u ${BIN_DIR}/test.py \
--device ${device} \
--nproc 1 \
--nproc ${ngpu} \
--config ${config_path} \
--result_file ${ckpt_prefix}.${type}.rsl \
--checkpoint_path ${ckpt_prefix} \
......
......@@ -11,12 +11,6 @@ echo "using $ngpu gpus..."
config_path=$1
ckpt_name=$2
device=gpu
if [ ${ngpu} == 0 ];then
device=cpu
fi
echo "using ${device}..."
mkdir -p exp
# seed may break model convergence
......@@ -26,7 +20,6 @@ if [ ${seed} != 0 ]; then
fi
python3 -u ${BIN_DIR}/train.py \
--device ${device} \
--nproc ${ngpu} \
--config ${config_path} \
--output exp/${ckpt_name} \
......
......@@ -13,13 +13,7 @@ ckpt_path_prefix=$2
jit_model_export_path=$3
model_type=$4
device=gpu
if [ ${ngpu} == 0 ];then
device=cpu
fi
python3 -u ${BIN_DIR}/export.py \
--device ${device} \
--nproc ${ngpu} \
--config ${config_path} \
--checkpoint_path ${ckpt_path_prefix} \
......
......@@ -8,10 +8,6 @@ fi
ngpu=$(echo $CUDA_VISIBLE_DEVICES | awk -F "," '{print NF}')
echo "using $ngpu gpus..."
device=gpu
if [ ${ngpu} == 0 ];then
device=cpu
fi
config_path=$1
ckpt_prefix=$2
model_type=$3
......@@ -23,8 +19,7 @@ if [ $? -ne 0 ]; then
fi
python3 -u ${BIN_DIR}/test.py \
--device ${device} \
--nproc 1 \
--nproc ${ngpu} \
--config ${config_path} \
--result_file ${ckpt_prefix}.rsl \
--checkpoint_path ${ckpt_prefix} \
......
......@@ -10,17 +10,11 @@ source ${MAIN_ROOT}/utils/parse_options.sh || exit 1;
ngpu=$(echo $CUDA_VISIBLE_DEVICES | awk -F "," '{print NF}')
echo "using $ngpu gpus..."
device=gpu
if [ ${ngpu} == 0 ];then
device=cpu
fi
if [ ${seed} != 0 ]; then
export FLAGS_cudnn_deterministic=True
echo "using seed $seed & FLAGS_cudnn_deterministic=True ..."
fi
if [ $# != 3 ];then
echo "usage: CUDA_VISIBLE_DEVICES=0 ${0} config_path ckpt_name model_type"
exit -1
......@@ -33,7 +27,6 @@ model_type=$3
mkdir -p exp
python3 -u ${BIN_DIR}/train.py \
--device ${device} \
--nproc ${ngpu} \
--config ${config_path} \
--output exp/${ckpt_name} \
......
......@@ -8,10 +8,6 @@ fi
ngpu=$(echo $CUDA_VISIBLE_DEVICES | awk -F "," '{print NF}')
echo "using $ngpu gpus..."
device=gpu
if [ ${ngpu} == 0 ];then
device=cpu
fi
config_path=$1
ckpt_prefix=$2
......@@ -22,8 +18,7 @@ mkdir -p ${output_dir}
# align dump in `result_file`
# .tier, .TextGrid dump in `dir of result_file`
python3 -u ${BIN_DIR}/alignment.py \
--device ${device} \
--nproc 1 \
--nproc ${ngpu} \
--config ${config_path} \
--result_file ${output_dir}/${type}.align \
--checkpoint_path ${ckpt_prefix} \
......
......@@ -12,13 +12,7 @@ config_path=$1
ckpt_path_prefix=$2
jit_model_export_path=$3
device=gpu
if [ ${ngpu} == 0 ];then
device=cpu
fi
python3 -u ${BIN_DIR}/export.py \
--device ${device} \
--nproc ${ngpu} \
--config ${config_path} \
--checkpoint_path ${ckpt_path_prefix} \
......
......@@ -8,10 +8,6 @@ fi
ngpu=$(echo $CUDA_VISIBLE_DEVICES | awk -F "," '{print NF}')
echo "using $ngpu gpus..."
device=gpu
if [ ${ngpu} == 0 ];then
device=cpu
fi
config_path=$1
ckpt_prefix=$2
......@@ -35,8 +31,7 @@ for type in attention ctc_greedy_search; do
batch_size=64
fi
python3 -u ${BIN_DIR}/test.py \
--device ${device} \
--nproc 1 \
--nproc ${ngpu} \
--config ${config_path} \
--result_file ${ckpt_prefix}.${type}.rsl \
--checkpoint_path ${ckpt_prefix} \
......@@ -52,8 +47,7 @@ for type in ctc_prefix_beam_search attention_rescoring; do
echo "decoding ${type}"
batch_size=1
python3 -u ${BIN_DIR}/test.py \
--device ${device} \
--nproc 1 \
--nproc ${ngpu} \
--config ${config_path} \
--result_file ${ckpt_prefix}.${type}.rsl \
--checkpoint_path ${ckpt_prefix} \
......
......@@ -12,11 +12,6 @@ source ${MAIN_ROOT}/utils/parse_options.sh || exit 1;
ngpu=$(echo $CUDA_VISIBLE_DEVICES | awk -F "," '{print NF}')
echo "using $ngpu gpus..."
device=gpu
if [ ${ngpu} == 0 ];then
device=cpu
fi
if [ ${seed} != 0 ]; then
export FLAGS_cudnn_deterministic=True
echo "using seed $seed & FLAGS_cudnn_deterministic=True ..."
......@@ -34,7 +29,6 @@ mkdir -p exp
python3 -u ${BIN_DIR}/train.py \
--seed ${seed} \
--device ${device} \
--nproc ${ngpu} \
--config ${config_path} \
--output exp/${ckpt_name} \
......
......@@ -401,7 +401,7 @@ class DeepSpeech2Tester(DeepSpeech2Trainer):
def setup(self):
"""Setup the experiment.
"""
paddle.set_device(self.args.device)
paddle.set_device('gpu' if self.args.nprocs > 0 else 'cpu')
self.setup_output_dir()
self.setup_checkpointer()
......
......@@ -8,10 +8,6 @@ fi
ngpu=$(echo $CUDA_VISIBLE_DEVICES | awk -F "," '{print NF}')
echo "using $ngpu gpus..."
device=gpu
if [ ${ngpu} == 0 ];then
device=cpu
fi
config_path=$1
ckpt_prefix=$2
model_type=$3
......@@ -23,8 +19,7 @@ if [ $? -ne 0 ]; then
fi
python3 -u ${BIN_DIR}/test.py \
--device ${device} \
--nproc 1 \
--nproc ${ngpu} \
--config ${config_path} \
--result_file ${ckpt_prefix}.rsl \
--checkpoint_path ${ckpt_prefix} \
......
......@@ -8,10 +8,6 @@ fi
ngpu=$(echo $CUDA_VISIBLE_DEVICES | awk -F "," '{print NF}')
echo "using $ngpu gpus..."
device=gpu
if [ ${ngpu} == 0 ];then
device=cpu
fi
config_path=$1
ckpt_prefix=$2
model_type=$3
......@@ -23,8 +19,7 @@ if [ $? -ne 0 ]; then
fi
python3 -u ${BIN_DIR}/test.py \
--device ${device} \
--nproc 1 \
--nproc ${ngpu} \
--config ${config_path} \
--result_file ${ckpt_prefix}.rsl \
--checkpoint_path ${ckpt_prefix} \
......
......@@ -8,10 +8,6 @@ fi
ngpu=$(echo $CUDA_VISIBLE_DEVICES | awk -F "," '{print NF}')
echo "using $ngpu gpus..."
device=gpu
if [ ${ngpu} == 0 ];then
device=cpu
fi
config_path=$1
ckpt_prefix=$2
model_type=$3
......@@ -23,8 +19,7 @@ if [ $? -ne 0 ]; then
fi
python3 -u ${BIN_DIR}/test.py \
--device ${device} \
--nproc 1 \
--nproc ${ngpu} \
--config ${config_path} \
--result_file ${ckpt_prefix}.rsl \
--checkpoint_path ${ckpt_prefix} \
......
......@@ -13,7 +13,7 @@ null:null
null:null
##
trainer:norm_train
norm_train: ../../../deepspeech/exps/deepspeech2/bin/train.py --nproc 1 --config conf/deepspeech2.yaml --model_type offline --device gpu
norm_train: ../../../deepspeech/exps/deepspeech2/bin/train.py --nproc 1 --config conf/deepspeech2.yaml --model_type offline
pact_train:null
fpgm_train:null
distill_train:null
......@@ -21,7 +21,7 @@ null:null
null:null
##
===========================eval_params===========================
eval: ../../../deepspeech/exps/deepspeech2/bin/test.py --nproc 1 --config conf/deepspeech2.yaml --result_file tests/9.rsl --model_type offline --device gpu
eval: ../../../deepspeech/exps/deepspeech2/bin/test.py --nproc 1 --config conf/deepspeech2.yaml --result_file tests/9.rsl --model_type offline
null:null
##
===========================infer_params===========================
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册