未验证 提交 252f70e6 编写于 作者: H Hui Zhang 提交者: GitHub

Merge pull request #871 from Jackwaterveg/test

update chain test script
For lite\_train\_infer, Run
```
bash lite_train_infer.sh
```
For whole\_train\_infer, Run
```
bash whole_train_infer.sh
```
===========================train_params===========================
model_name:deepspeech2
python:python3.8
gpu_list:0
python:python3.7
gpu_list:0|0,1
null:null
null:null
null:null
null:null
null:null
null:null
--output:null
null:null
--checkpoint_path:
train_model_name:checkpoints/9
null:null
null:null
##
trainer:norm_train
norm_train: ../../../deepspeech/exps/deepspeech2/bin/train.py --nproc 1 --config conf/deepspeech2.yaml --model_type offline
norm_train: ../../../deepspeech/exps/deepspeech2/bin/train.py --config conf/deepspeech2.yaml --model_type offline --profiler-options "" --output exp/deepspeech_tiny --seed 0
pact_train:null
fpgm_train:null
distill_train:null
......@@ -21,13 +21,13 @@ null:null
null:null
##
===========================eval_params===========================
eval: ../../../deepspeech/exps/deepspeech2/bin/test.py --nproc 1 --config conf/deepspeech2.yaml --result_file tests/9.rsl --model_type offline
eval: ../../../deepspeech/exps/deepspeech2/bin/test.py --nproc 1 --config conf/deepspeech2.yaml --checkpoint_path exp/deepspeech_tiny/checkpoints/9 --result_file tests/9.rsl --model_type offline
null:null
##
===========================infer_params===========================
--export_path:checkpoints/9.jit
--checkpoint_path:checkpoints/9
norm_export: ../../../deepspeech/exps/deepspeech2/bin/export.py --nproc 1 --config conf/deepspeech2.yaml --model_type offline
null:null
null:null
norm_export: ../../../deepspeech/exps/deepspeech2/bin/export.py --nproc 1 --config conf/deepspeech2.yaml --model_type offline --checkpoint_path exp/deepspeech_tiny/checkpoints/9 --export_path exp/deepspeech_tiny/checkpoints/9.jit
quant_export:null
fpgm_export:null
distill_export:null
......
===========================train_params===========================
model_name:deepspeech2
python:python3.8
gpu_list:0
python:python3.7
gpu_list:0,1|0
null:null
null:null
null:null
null:null
null:null
null:null
--output:null
null:null
--checkpoint_path:
train_model_name:checkpoints/1
null:null
null:null
##
trainer:norm_train
norm_train: ../../../deepspeech/exps/deepspeech2/bin/train.py --nproc 1 --config conf/deepspeech2.yaml --model_type offline --device gpu
norm_train: ../../../deepspeech/exps/deepspeech2/bin/train.py --config conf/deepspeech2.yaml --model_type offline --profiler-options "" --output exp/deepspeech_whole --seed 0
pact_train:null
fpgm_train:null
distill_train:null
......@@ -21,13 +21,13 @@ null:null
null:null
##
===========================eval_params===========================
eval: ../../../deepspeech/exps/deepspeech2/bin/test.py --nproc 1 --config conf/deepspeech2.yaml --result_file tests/1.rsl --model_type offline --device gpu
eval: ../../../deepspeech/exps/deepspeech2/bin/test.py --nproc 1 --config conf/deepspeech2.yaml --result_file tests/49.rsl --checkpoint_path exp/deepspeech_whole/checkpoints/49 --model_type offline
null:null
##
===========================infer_params===========================
--export_path:checkpoints/1.jit
--checkpoint_path:checkpoints/1
norm_export: ../../../deepspeech/exps/deepspeech2/bin/export.py --nproc 1 --config conf/deepspeech2.yaml --model_type offline
null:null
null:null
norm_export: ../../../deepspeech/exps/deepspeech2/bin/export.py --nproc 1 --config conf/deepspeech2.yaml --model_type offline --checkpoint_path exp/deepspeech_whole/checkpoints/49 --export_path exp/deepspeech_whole/checkpoints/49.jit
quant_export:null
fpgm_export:null
distill_export:null
......
===========================train_params===========================
model_name:speedyspeech
python:python3.7
gpu_list:1|0,1
null:null
null:null
null:null
null:null
null:null
null:null
null:null
null:null
null:null
##
trainer:norm_train
norm_train:../examples/speedyspeech/baker/train.py --train-metadata=train_data/mini_BZNSYP/train/norm/metadata.jsonl --dev-metadata=train_data/mini_BZNSYP/dev/norm/metadata.jsonl --config=lite_train_infer.yaml --output-dir=exp/default
null:null
null:null
null:null
null:null
null:null
##
===========================eval_params===========================
eval:../examples/speedyspeech/baker/synthesize_e2e.py --speedyspeech-config=../examples/speedyspeech/baker/conf/default.yaml --speedyspeech-checkpoint=exp/default/checkpoints/snapshot_iter_90.pdz --speedyspeech-stat=pretrain_models/speedyspeech_baker_ckpt_0.4/speedy_speech_stats.npy --pwg-config=../examples/parallelwave_gan/baker/conf/default.yaml --pwg-checkpoint=pretrain_models/pwg_baker_ckpt_0.4/pwg_snapshot_iter_400000.pdz --pwg-stat=pretrain_models/pwg_baker_ckpt_0.4/pwg_stats.npy --text=../examples/speedyspeech/baker/sentences.txt --output-dir=e2e --inference-dir=inference --device="gpu" --phones-dict=../examples/speedyspeech/baker/phones.txt --tones-dict=../examples/speedyspeech/baker/tones.txt
null:null
##
===========================infer_params===========================
null:null
null:null
null:null
null:null
null:null
null:null
null:null
null:null
##
null:null
null:null
null:null
inference:../examples/speedyspeech/baker/inference.py --inference-dir=pretrain_models/speedyspeech_pwg_inference_0.4 --text=../examples/speedyspeech/baker/sentences.txt --output-dir=inference_out --enable-auto-log --phones-dict=../examples/speedyspeech/baker/phones.txt --tones-dict=../examples/speedyspeech/baker/tones.txt --output-dir=e2e --inference-dir=inference
--use_gpu:True
null:null
null:null
null:null
null:null
null:null
null:null
null:null
null:null
null:null
null:null
#!/bin/bash
# usage: bash test.sh ***.txt MODE
FILENAME=$1
# MODE be one of ['lite_train_infer' 'whole_infer' 'whole_train_infer', 'infer']
MODE=$2
......@@ -174,7 +176,7 @@ function func_inference(){
set_cpu_threads=$(func_set_params "${cpu_threads_key}" "${threads}")
set_model_dir=$(func_set_params "${infer_model_key}" "${_model_dir}")
set_infer_params1=$(func_set_params "${infer_key1}" "${infer_value1}")
command="${_python} ${_script} ${use_gpu_key}=${use_gpu} ${use_mkldnn_key}=${use_mkldnn} ${set_cpu_threads} ${set_model_dir} ${set_batchsize} ${set_infer_data} ${set_benchmark} ${set_infer_params1} > ${_save_log_path} 2>&1 "
command="${_python} ${_script} > ${_save_log_path} 2>&1 "
eval $command
last_status=${PIPESTATUS[0]}
eval "cat ${_save_log_path}"
......@@ -203,7 +205,7 @@ function func_inference(){
set_precision=$(func_set_params "${precision_key}" "${precision}")
set_model_dir=$(func_set_params "${infer_model_key}" "${_model_dir}")
set_infer_params1=$(func_set_params "${infer_key1}" "${infer_value1}")
command="${_python} ${_script} ${use_gpu_key}=${use_gpu} ${set_tensorrt} ${set_precision} ${set_model_dir} ${set_batchsize} ${set_infer_data} ${set_benchmark} ${set_infer_params1} > ${_save_log_path} 2>&1 "
command="${_python} ${_script} > ${_save_log_path} 2>&1 "
eval $command
last_status=${PIPESTATUS[0]}
eval "cat ${_save_log_path}"
......@@ -234,21 +236,18 @@ if [ ${MODE} = "infer" ]; then
for infer_model in ${infer_model_dir_list[*]}; do
# run export
if [ ${infer_run_exports[Count]} != "null" ];then
save_infer_dir=$(dirname $infer_model)
set_export_weight=$(func_set_params "${export_weight}" "${infer_model}")
set_save_infer_key=$(func_set_params "${save_infer_key}" "${save_infer_dir}")
set_save_infer_key=$(func_set_params "${save_infer_key}" "${infer_model}")
export_cmd="${python} ${norm_export} ${set_export_weight} ${set_save_infer_key}"
eval $export_cmd
status_export=$?
if [ ${status_export} = 0 ];then
status_check $status_export "${export_cmd}" "${status_log}"
fi
else
save_infer_dir=${infer_model}
fi
#run inference
is_quant=${infer_quant_flag[Count]}
func_inference "${python}" "${inference_py}" "${save_infer_dir}" "${LOG_PATH}" "${infer_img_dir}" ${is_quant}
func_inference "${python}" "${inference_py}" "${infer_model}" "${LOG_PATH}" "${infer_img_dir}" ${is_quant}
Count=$(($Count + 1))
done
......@@ -320,14 +319,16 @@ else
set_save_model=$(func_set_params "${save_model_key}" "${save_log}")
if [ ${#gpu} -le 2 ];then # train with cpu or single gpu
cmd="${python} ${run_train} ${set_use_gpu} ${set_save_model} ${set_epoch} ${set_pretrain} ${set_autocast} ${set_batchsize} ${set_train_params1} "
cmd="${python} ${run_train} "
elif [ ${#gpu} -le 15 ];then # train with multi-gpu
cmd="${python} -m paddle.distributed.launch --gpus=${gpu} ${run_train} ${set_save_model} ${set_epoch} ${set_pretrain} ${set_autocast} ${set_batchsize} ${set_train_params1}"
gsu=${gpu//,/ }
nump=`echo $gsu | wc -w`
cmd="${python} ${run_train} --nproc=$nump"
else # train with multi-machine
cmd="${python} -m paddle.distributed.launch --ips=${ips} --gpus=${gpu} ${run_train} ${set_save_model} ${set_pretrain} ${set_epoch} ${set_autocast} ${set_batchsize} ${set_train_params1}"
fi
# run train
#eval "unset CUDA_VISIBLE_DEVICES"
# eval "unset CUDA_VISIBLE_DEVICES"
eval $cmd
status_check $? "${cmd}" "${status_log}"
......@@ -338,6 +339,11 @@ else
fi
# run eval
if [ ${eval_py} != "null" ]; then
IFS=","
array=(${gpu})
IFS="|"
env="export CUDA_VISIBLE_DEVICES=${array[0]}"
eval $env
set_eval_params1=$(func_set_params "${eval_key1}" "${eval_value1}")
eval_cmd="${python} ${eval_py} ${set_eval_pretrain} ${set_use_gpu} ${set_eval_params1}"
eval $eval_cmd
......@@ -357,7 +363,7 @@ else
eval $env
save_infer_path="${save_log}"
func_inference "${python}" "${inference_py}" "${save_infer_path}" "${LOG_PATH}" "${train_infer_img_dir}" "${flag_quant}"
eval "unset CUDA_VISIBLE_DEVICES"
#eval "unset CUDA_VISIBLE_DEVICES"
fi
done # done with: for trainer in ${trainer_list[*]}; do
done # done with: for autocast in ${autocast_list[*]}; do
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册