diff --git a/benchmark/README.md b/benchmark/README.md new file mode 100644 index 0000000000000000000000000000000000000000..2e892d97ed62ec6d02060db2015a8fef40c8a7c2 --- /dev/null +++ b/benchmark/README.md @@ -0,0 +1,27 @@ +# benchmark使用说明 + +此目录所有shell脚本是为了测试PaddleClas中不同模型的速度指标,如单卡训练速度指标、多卡训练速度指标等。 + +## 相关脚本说明 + +一共有3个脚本: + +- `prepare_data.sh`: 下载相应的测试数据,并配置好数据路径 +- `run_benchmark.sh`: 执行单独一个训练测试的脚本,具体调用方式,可查看脚本注释 +- `run_all.sh`: 执行所有训练测试的入口脚本 + +## 使用说明 + +**注意**:为了跟PaddleClas中其他的模块的执行目录保持一致,此模块的执行目录为`PaddleClas`的根目录。 + +### 1.准备数据 + +```shell +bash benchmark/prepare_data.sh +``` + +### 2.执行所有模型的测试 + +```shell +bash benchmark/run_all.sh +``` diff --git a/benchmark/prepare_data.sh b/benchmark/prepare_data.sh new file mode 100644 index 0000000000000000000000000000000000000000..83a6856983920968539d96c608559e65c37ca48a --- /dev/null +++ b/benchmark/prepare_data.sh @@ -0,0 +1,11 @@ +#!/bin/bash +dataset_url=$1 + +cd dataset +rm -rf ILSVRC2012 +wget -nc ${dataset_url} +tar xf ILSVRC2012_val.tar +ln -s ILSVRC2012_val ILSVRC2012 +cd ILSVRC2012 +ln -s val_list.txt train_list.txt +cd ../../ diff --git a/benchmark/run_all.sh b/benchmark/run_all.sh new file mode 100644 index 0000000000000000000000000000000000000000..7e7b5fe0a26c533e467f876d12929a788d23097c --- /dev/null +++ b/benchmark/run_all.sh @@ -0,0 +1,25 @@ +# 提供可稳定复现性能的脚本,默认在标准docker环境内py37执行: paddlepaddle/paddle:latest-gpu-cuda10.1-cudnn7 paddle=2.1.2 py=37 +# 执行目录:需说明 +# cd ** +# 1 安装该模型需要的依赖 (如需开启优化策略请注明) +# pip install ... +# 2 拷贝该模型需要数据、预训练模型 +# 3 批量运行(如不方便批量,1,2需放到单个模型中) + +model_mode_list=(MobileNetV1 MobileNetV2 MobileNetV3_large_x1_0 EfficientNetB0 ShuffleNetV2_x1_0 DenseNet121 HRNet_W48_C SwinTransformer_tiny_patch4_window7_224 alt_gvt_base) +fp_item_list=(fp32) +bs_list=(32 64 96 128) +for model_mode in ${model_mode_list[@]}; do + for fp_item in ${fp_item_list[@]}; do + for bs_item in ${bs_list[@]};do + echo "index is speed, 1gpus, begin, ${model_name}" + run_mode=sp + CUDA_VISIBLE_DEVICES=0 bash benchmark/run_benchmark.sh ${run_mode} ${bs_item} ${fp_item} 10 ${model_mode} # (5min) + sleep 10 + echo "index is speed, 8gpus, run_mode is multi_process, begin, ${model_name}" + run_mode=mp + CUDA_VISIBLE_DEVICES=0,1,2,3,4,5,6,7 bash benchmark/run_benchmark.sh ${run_mode} ${bs_item} ${fp_item} 10 ${model_mode} + sleep 10 + done + done +done diff --git a/benchmark/run_benchmark.sh b/benchmark/run_benchmark.sh new file mode 100644 index 0000000000000000000000000000000000000000..1e51f9a66ad8143957a80813eb4f52de7dfc4a7c --- /dev/null +++ b/benchmark/run_benchmark.sh @@ -0,0 +1,56 @@ +#!/usr/bin/env bash +set -xe +# 运行示例:CUDA_VISIBLE_DEVICES=0 bash run_benchmark.sh ${run_mode} ${bs_item} ${fp_item} 500 ${model_mode} +# 参数说明 +function _set_params(){ + run_mode=${1:-"sp"} # 单卡sp|多卡mp + batch_size=${2:-"64"} + fp_item=${3:-"fp32"} # fp32|fp16 + epochs=${4:-"10"} # 可选,如果需要修改代码提前中断 + model_name=${5:-"model_name"} + run_log_path="${TRAIN_LOG_DIR:-$(pwd)}/benchmark" # TRAIN_LOG_DIR 后续QA设置该参数 + +# 以下不用修改 + device=${CUDA_VISIBLE_DEVICES//,/ } + arr=(${device}) + num_gpu_devices=${#arr[*]} + log_file=${run_log_path}/clas_${model_name}_${run_mode}_bs${batch_size}_${fp_item}_${num_gpu_devices} +} +function _train(){ + echo "Train on ${num_gpu_devices} GPUs" + echo "current CUDA_VISIBLE_DEVICES=$CUDA_VISIBLE_DEVICES, gpus=$num_gpu_devices, batch_size=$batch_size" + + if [ ${fp_item} = "fp32" ];then + model_config=`find ppcls/configs/ -name ${model_name}.yaml` + else + model_config=`find ppcls/configs/ -name ${model_name}_fp16.yaml` + fi + + train_cmd="-c ${model_config} -o DataLoader.Train.sampler.batch_size=${batch_size} -o Global.epochs=${epochs}" + case ${run_mode} in + sp) train_cmd="python -u tools/train.py ${train_cmd}" ;; + mp) + train_cmd="python -m paddle.distributed.launch --log_dir=./mylog --gpus=$CUDA_VISIBLE_DEVICES tools/train.py ${train_cmd}" + log_parse_file="mylog/workerlog.0" ;; + *) echo "choose run_mode(sp or mp)"; exit 1; + esac + rm -rf mylog +# 以下不用修改 + timeout 15m ${train_cmd} > ${log_file} 2>&1 + if [ $? -ne 0 ];then + echo -e "${model_name}, FAIL" + export job_fail_flag=1 + else + echo -e "${model_name}, SUCCESS" + export job_fail_flag=0 + fi + kill -9 `ps -ef|grep 'python'|awk '{print $2}'` + + if [ $run_mode = "mp" -a -d mylog ]; then + rm ${log_file} + cp mylog/workerlog.0 ${log_file} + fi +} + +_set_params $@ +_train diff --git a/ppcls/engine/train/train.py b/ppcls/engine/train/train.py index 4b026a8286e47da6e90a5e25bc182cc6d1f57541..4de8d59d6cdbbd13cfcbf223019d44af2314d696 100644 --- a/ppcls/engine/train/train.py +++ b/ppcls/engine/train/train.py @@ -16,6 +16,7 @@ from __future__ import absolute_import, division, print_function import time import paddle from ppcls.engine.train.utils import update_loss, update_metric, log_info +from ppcls.utils import profiler def train_epoch(engine, epoch_id, print_batch_step): @@ -23,6 +24,7 @@ def train_epoch(engine, epoch_id, print_batch_step): for iter_id, batch in enumerate(engine.train_dataloader): if iter_id >= engine.max_iter: break + profiler.add_profiler_step(engine.config["profiler_options"]) if iter_id == 5: for key in engine.time_info: engine.time_info[key].reset() diff --git a/ppcls/utils/config.py b/ppcls/utils/config.py index b92f0d9456c8e7ced5704c0bfe931a080e5eb5cf..e3277c480943cdfb7ce49f4f3ea7bbd160c34ebb 100644 --- a/ppcls/utils/config.py +++ b/ppcls/utils/config.py @@ -199,5 +199,12 @@ def parse_args(): action='append', default=[], help='config options to be overridden') + parser.add_argument( + '-p', + '--profiler_options', + type=str, + default=None, + help='The option of profiler, which should be in format \"key1=value1;key2=value2;key3=value3\".' + ) args = parser.parse_args() return args diff --git a/tools/train.py b/tools/train.py index 1d835903638aacb459f982a7c5f8710241f01be4..e7c9d7bcc8f2e6b1ebd9ad0d7f12f94c2e58ea13 100644 --- a/tools/train.py +++ b/tools/train.py @@ -27,5 +27,6 @@ if __name__ == "__main__": args = config.parse_args() config = config.get_config( args.config, overrides=args.override, show=False) + config.profiler_options = args.profiler_options engine = Engine(config, mode="train") engine.train()