run_mkldnn.sh 1.1 KB
Newer Older
1 2
set -e

3 4 5 6
unset OMP_NUM_THREADS MKL_NUM_THREADS
export OMP_DYNAMIC="FALSE"
export KMP_AFFINITY="granularity=fine,compact,0,0"

7 8 9
function train() {
  topology=$1
  bs=$2
T
tensor-tang 已提交
10 11 12
  use_mkldnn=$3
  if [ $3 == "True" ]; then
    thread=1
13
    log="logs/${topology}-mkldnn-${bs}.log"
T
tensor-tang 已提交
14 15
  elif [ $3 == "False" ]; then
    thread=`nproc`
T
tensor-tang 已提交
16 17 18
    # each trainer_count use only 1 core to avoid conflict
    export OMP_NUM_THREADS=1
    export MKL_NUM_THREADS=1
19
    log="logs/${topology}-${thread}mklml-${bs}.log"
T
tensor-tang 已提交
20 21
  else
    echo "Wrong input $3, use True or False."
T
tensor-tang 已提交
22
    exit 0
23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43
  fi
  args="batch_size=${bs}"
  config="${topology}.py"
  paddle train --job=time \
    --config=$config \
    --use_mkldnn=$use_mkldnn \
    --use_gpu=False \
    --trainer_count=$thread \
    --log_period=10 \
    --test_period=100 \
    --config_args=$args \
    2>&1 | tee ${log} 
}

if [ ! -d "train.list" ]; then
  echo " " > train.list
fi
if [ ! -d "logs" ]; then
  mkdir logs
fi

T
tensor-tang 已提交
44
#========== mkldnn ==========#
T
tensor-tang 已提交
45 46 47
train vgg 64 True
train vgg 128 True
train vgg 256 True
48 49

#========== mklml ===========#
T
tensor-tang 已提交
50 51 52
train vgg 64 False
train vgg 128 False
train vgg 256 False