run.sh 3.0 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118
export PATH="/home/guohongjie/tmp/paddle/paddle_release_home/python/bin/:$PATH"





#  CPU setting
:<<EOF
USE_CUDA=false
CPU_NUM=3 # cpu_num works only when USE_CUDA=false
# path to your python
export PATH="/home/work/guohongjie/cpu_paddle/python2/bin:$PATH"
EOF



# GPU_settting
:<<EOF
# cuda path
LD_LIBRARY_PATH=/home/work/cuda/cudnn/cudnn_v7/cuda/lib64:/usr/local/cuda/lib64:/usr/local/cuda/lib:/usr/local/cuda/lib64:/usr/local/cuda/lib:$LD_LIBRARY_PATH
export LD_LIBRARY_PATH="/home/work/guohongjie/cuda/cudnn/cudnn_v7/cuda/lib64:$LD_LIBRARY_PATH"
export LD_LIBRARY_PATH="/home/work/guohongjie/cuda/cuda-9.0/lib64:$LD_LIBRARY_PATH"
USE_CUDA=true
CPU_NUM=3 # cpu_num works only when USE_CUDA=false
export FLAGS_fraction_of_gpu_memory_to_use=0.02
export FLAGS_eager_delete_tensor_gb=0.0
export FLAGS_fast_eager_deletion_mode=1
export CUDA_VISIBLE_DEVICES=0     #   which GPU to use
# path to your python
export PATH="/home/work/guohongjie/gpu_paddle/python2/bin:$PATH"
EOF



echo "the python your use is `which python`"

MODEL_PATH=None # not loading any pretrained model
#MODEL_PATH=./model/ # the default pretrained model
INPUT_DIR=./data/input/
OUTPUT_DIR=./data/output/
TRAIN_CONF=./data/input/model.conf
BUILD_DICT=false	# if you use your new dataset, set it true to build domain and char dict
BATCH_SIZE=64




train() {
      python -u run_classifier.py \
        --use_cuda ${USE_CUDA} \
        --cpu_num ${CPU_NUM} \
        --do_train true \
        --do_eval false \
        --do_test false \
        --build_dict ${BUILD_DICT} \
        --data_dir ${INPUT_DIR} \
        --save_dir ${OUTPUT_DIR} \
        --config_path ${TRAIN_CONF} \
        --batch_size ${BATCH_SIZE} \
        --init_checkpoint ${MODEL_PATH} 
}

evaluate() {
    python -u run_classifier.py \
        --use_cuda ${USE_CUDA} \
        --cpu_num ${CPU_NUM} \
        --do_train true \
        --do_eval true \
        --do_test false \
        --build_dict ${BUILD_DICT} \
        --data_dir ${INPUT_DIR} \
        --save_dir ${OUTPUT_DIR} \
        --config_path ${TRAIN_CONF} \
        --batch_size ${BATCH_SIZE}  \
        --init_checkpoint ${MODEL_PATH} 
}


infer() {
    python -u run_classifier.py \
        --use_cuda ${USE_CUDA} \
        --cpu_num ${CPU_NUM} \
        --do_train false \
        --do_eval false \
        --do_test true \
        --build_dict ${BUILD_DICT} \
        --data_dir ${INPUT_DIR} \
        --save_dir ${OUTPUT_DIR} \
        --config_path ${TRAIN_CONF} \
        --batch_size ${BATCH_SIZE}  \
        --init_checkpoint ${MODEL_PATH} 
}

main() {
    local cmd=${1:-help}
    case "${cmd}" in
        train)
            train "$@";
            ;;
        eval)
            evaluate "$@";
            ;;
        test)
            infer "$@";
            ;;
        help)
            echo "Usage: ${BASH_SOURCE} {train|eval|test}";
            return 0;
            ;;
        *)
            echo "Unsupport commend [${cmd}]";
            echo "Usage: ${BASH_SOURCE} {train|eval|test}";
            return 1;
            ;;
    esac
}

main "$@"