提交 25c0f23b 编写于 作者: H huangyuxin

update the installing script for openfst

上级 37e02308
### Prepare the environment
Please follow the instructions shown in [here](https://github.com/PaddlePaddle/DeepSpeech/blob/develop/docs/src/install.md) to install the Deepspeech first.
### Prepare the benchmark environment
bash prepare.sh
### Start benchmarking
bash run.sh
### Analyse the sp
bash run_analysis_sp.sh
### Analyse the mp
bash run_analysis_mp.sh
#!/usr/bin/env bash
set -xe
# 运行示例:CUDA_VISIBLE_DEVICES=0 bash run_benchmark.sh ${run_mode} ${bs_item} ${fp_item} 500 ${model_mode}
# 参数说明
function _set_params(){
run_mode=${1:-"sp"} # 单卡sp|多卡mp
config_path=${2:-"conf/conformer.yaml"}
output=${3:-"exp/conformer"}
seed=${4:-"0"}
ngpu=${5:-"1"}
profiler_options=${6:-"None"}
batch_size=${7:-"32"}
fp_item=${8:-"fp32"}
TRAIN_LOG_DIR=${9:-$(pwd)}
benchmark_max_step=0
run_log_path=${TRAIN_LOG_DIR:-$(pwd)} # TRAIN_LOG_DIR 后续QA设置该参数
# 以下不用修改
device=${CUDA_VISIBLE_DEVICES//,/ }
arr=(${device})
num_gpu_devices=${#arr[*]}
log_file=${run_log_path}/recoder_${run_mode}_bs${batch_size}_${fp_item}_ngpu${ngpu}.txt
}
function _train(){
echo "Train on ${num_gpu_devices} GPUs"
echo "current CUDA_VISIBLE_DEVICES=$CUDA_VISIBLE_DEVICES, gpus=$num_gpu_devices, batch_size=$batch_size"
train_cmd="--config=${config_path}
--output=${output}
--seed=${seed}
--nproc=${ngpu}
--profiler-options "${profiler_options}"
--benchmark-batch-size ${batch_size}
--benchmark-max-step ${benchmark_max_step} "
echo "run_mode "${run_mode}
case ${run_mode} in
sp) train_cmd="python3 -u ${BIN_DIR}/train.py "${train_cmd} ;;
mp) train_cmd="python3 -u ${BIN_DIR}/train.py "${train_cmd} ;;
*) echo "choose run_mode(sp or mp)"; exit 1;
esac
echo ${train_cmd}
# 以下不用修改
timeout 15m ${train_cmd} > ${log_file} 2>&1
if [ $? -ne 0 ];then
echo -e "${model_name}, FAIL"
export job_fail_flag=1
else
echo -e "${model_name}, SUCCESS"
export job_fail_flag=0
fi
kill -9 `ps -ef|grep 'python'|awk '{print $2}'`
if [ $run_mode = "mp" -a -d mylog ]; then
rm ${log_file}
cp mylog/workerlog.0 ${log_file}
fi
}
_set_params $@
_train
......@@ -10,7 +10,7 @@ fi
if [ ! -d openfst-1.6.3 ]; then
echo "Download and extract openfst ..."
wget http://www.openfst.org/twiki/pub/FST/FstDownload/openfst-1.6.3.tar.gz
wget http://www.openfst.org/twiki/pub/FST/FstDownload/openfst-1.6.3.tar.gz --no-check-certificate
tar -xzvf openfst-1.6.3.tar.gz
echo -e "\n"
fi
......
# Benchmark Test
### Prepare the environment
Please follow the instructions shown in [here](https://github.com/PaddlePaddle/DeepSpeech/blob/develop/docs/src/install.md) to install the Deepspeech first.
## Data
### Prepare the benchmark environment
```
bash prepare.sh
```
* Aishell
### Start benchmarking
```
bash run.sh
```
## Docker
### Analyse the sp
```
bash run_analysis_sp.sh
```
### Analyse the mp
```
registry.baidubce.com/paddlepaddle/paddle 2.1.1-gpu-cuda10.2-cudnn7 59d5ec1de486
bash run_analysis_mp.sh
```
source ../tools/venv/bin/activate
#ִĿ¼
#Enter the example dir
pushd ../examples/aishell/s1
#׼
#Prepare the data
bash run.sh --stage 0 --stop_stage 0
#!/bin/bash
CUR_DIR=${PWD}
ROOT_DIR=../../
# 提供可稳定复现性能的脚本,默认在标准docker环境内py37执行:
# collect env info
bash ${ROOT_DIR}/utils/pd_env_collect.sh
#cat pd_env.txt
# 1 安装该模型需要的依赖 (如需开启优化策略请注明)
#pushd ${ROOT_DIR}/tools; make; popd
#source ${ROOT_DIR}/tools/venv/bin/activate
#pushd ${ROOT_DIR}; bash setup.sh; popd
# 2 拷贝该模型需要数据、预训练模型
# 执行目录:需说明
#pushd ${ROOT_DIR}/examples/aishell/s1
pushd ${ROOT_DIR}/examples/tiny/s1
mkdir -p exp/log
. path.sh
#bash local/data.sh &> exp/log/data.log
# 3 批量运行(如不方便批量,1,2需放到单个模型中)
model_mode_list=(conformer transformer)
fp_item_list=(fp32)
bs_item_list=(32 64 96)
for model_mode in ${model_mode_list[@]}; do
for fp_item in ${fp_item_list[@]}; do
for bs_item in ${bs_item_list[@]}
do
echo "index is speed, 1gpus, begin, ${model_name}"
run_mode=sp
CUDA_VISIBLE_DEVICES=0 bash ${CUR_DIR}/run_benchmark.sh ${run_mode} ${bs_item} ${fp_item} 500 ${model_mode} # (5min)
sleep 60
echo "index is speed, 8gpus, run_mode is multi_process, begin, ${model_name}"
run_mode=mp
CUDA_VISIBLE_DEVICES=0,1,2,3,4,5,6,7 bash ${CUR_DIR}/run_benchmark.sh ${run_mode} ${bs_item} ${fp_item} 500 ${model_mode}
sleep 60
done
done
done
popd # aishell/s1
#!/bin/bash
#!/usr/bin/env bash
set -xe
# 运行示例:CUDA_VISIBLE_DEVICES=0 bash run_benchmark.sh ${run_mode} ${bs_item} ${fp_item} 500 ${model_mode}
# 参数说明
function _set_params(){
run_mode=${1:-"sp"} # 单卡sp|多卡mp
batch_size=${2:-"64"}
fp_item=${3:-"fp32"} # fp32|fp16
max_iter=${4:-"500"} # 可选,如果需要修改代码提前中断
model_name=${5:-"model_name"}
config_path=${2:-"conf/conformer.yaml"}
output=${3:-"exp/conformer"}
seed=${4:-"0"}
ngpu=${5:-"1"}
profiler_options=${6:-"None"}
batch_size=${7:-"32"}
fp_item=${8:-"fp32"}
TRAIN_LOG_DIR=${9:-$(pwd)}
benchmark_max_step=0
run_log_path=${TRAIN_LOG_DIR:-$(pwd)} # TRAIN_LOG_DIR 后续QA设置该参数
# 以下不用修改
device=${CUDA_VISIBLE_DEVICES//,/ }
arr=(${device})
num_gpu_devices=${#arr[*]}
log_file=${run_log_path}/${model_name}_${run_mode}_bs${batch_size}_${fp_item}_${num_gpu_devices}
log_file=${run_log_path}/recoder_${run_mode}_bs${batch_size}_${fp_item}_ngpu${ngpu}.txt
}
function _train(){
echo "Train on ${num_gpu_devices} GPUs"
echo "current CUDA_VISIBLE_DEVICES=$CUDA_VISIBLE_DEVICES, gpus=$num_gpu_devices, batch_size=$batch_size"
train_cmd="--config=${config_path}
--output=${output}
--seed=${seed}
--nproc=${ngpu}
--profiler-options "${profiler_options}"
--benchmark-batch-size ${batch_size}
--benchmark-max-step ${benchmark_max_step} "
train_cmd="--benchmark-batch-size ${batch_size}
--benchmark-max-step ${max_iter}
conf/${model_name}.yaml ${model_name}"
echo "run_mode "${run_mode}
case ${run_mode} in
sp) train_cmd="bash local/train.sh "${train_cmd}"" ;;
mp)
train_cmd="bash local/train.sh "${train_cmd}"" ;;
sp) train_cmd="python3 -u ${BIN_DIR}/train.py "${train_cmd} ;;
mp) train_cmd="python3 -u ${BIN_DIR}/train.py "${train_cmd} ;;
*) echo "choose run_mode(sp or mp)"; exit 1;
esac
# 以下不用修改
echo ${train_cmd}
# 以下不用修改
timeout 15m ${train_cmd} > ${log_file} 2>&1
if [ $? -ne 0 ];then
echo -e "${model_name}, FAIL"
......@@ -43,8 +53,7 @@ function _train(){
echo -e "${model_name}, SUCCESS"
export job_fail_flag=0
fi
trap 'for pid in $(jobs -pr); do kill -KILL $pid; done' INT QUIT TERM
kill -9 `ps -ef|grep 'python'|awk '{print $2}'`
if [ $run_mode = "mp" -a -d mylog ]; then
rm ${log_file}
......@@ -54,4 +63,3 @@ function _train(){
_set_params $@
_train
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册