pretrain.sh 1.0 KB
Newer Older
T
tianxin04 已提交
1 2
set -eux

T
tianxin 已提交
3
export FLAGS_eager_delete_tensor_gb=0
T
tianxin04 已提交
4 5
export FLAGS_sync_nccl_allreduce=1
export CUDA_VISIBLE_DEVICES=0,1,2,3,4,5,6,7
C
chenxuyi 已提交
6 7 8 9 10 11
python ./pretrain_launch.py  \
    --nproc_per_node 8 \
    --selected_gpus 0,1,2,3,4,5,6,7 \
    --node_ips $(hostname -i) \
    --node_id 0 \
./train.py --use_cuda True \
T
tianxin04 已提交
12 13 14 15 16 17 18 19 20 21 22 23 24 25
                --is_distributed False\
                --use_fast_executor True \
                --weight_sharing True \
                --in_tokens true \
                --batch_size 8192 \
                --vocab_path ./config/vocab.txt \
                --train_filelist ./data/train_filelist \
                --valid_filelist ./data/valid_filelist \
                --validation_steps 100 \
                --num_train_steps 1000000 \
                --checkpoints ./checkpoints \
                --save_steps 10000 \
                --ernie_config_path ./config/ernie_config.json \
                --learning_rate 1e-4 \
C
chenxuyi 已提交
26
                --use_fp16 false \
T
tianxin04 已提交
27 28 29
                --weight_decay 0.01 \
                --max_seq_len 512 \
                --skip_steps 10