run_openblas_train.sh 1.0 KB
Newer Older
1 2 3 4 5 6 7
set -e

function train() {
  unset OMP_NUM_THREADS MKL_NUM_THREADS OMP_DYNAMIC KMP_AFFINITY
  topology=$1
  layer_num=$2
  bs=$3
8 9 10
  thread=`nproc`
  # each trainer_count use only 1 core to avoid conflict
  log="logs/train-${topology}-${layer_num}-${thread}openblas-${bs}.log"
11 12 13 14 15 16 17 18 19 20
  args="batch_size=${bs},layer_num=${layer_num}"
  config="${topology}.py"
  paddle train --job=time \
    --config=$config \
    --use_gpu=False \
    --trainer_count=$thread \
    --log_period=10 \
    --test_period=100 \
    --config_args=$args \
    2>&1 | tee ${log} 
21 22 23 24

  avg_time=`tail ${log} -n 1 | awk -F ' ' '{print $8}' | sed 's/avg=//'`
  fps=`awk 'BEGIN{printf "%.2f",('$bs' / '$avg_time' * 1000)}'`
  echo "FPS: $fps images/sec" 2>&1 | tee -a ${log}
25 26 27 28 29 30 31 32 33 34
}

if [ ! -f "train.list" ]; then
  echo " " > train.list
fi
if [ ! -d "logs" ]; then
  mkdir logs
fi

# training benchmark
35 36 37 38
for batchsize in 64 128 256; do
  train vgg 19 $batchsize
  train resnet 50 $batchsize
  train googlenet v1 $batchsize
T
tensor-tang 已提交
39
  train alexnet group2 $batchsize $use_mkldnn
40
done