diff --git a/examples/bert/run_classifier_single_gpu.sh b/examples/bert/run_classifier_single_gpu.sh index 16ca7230e8706db4210cc5ff7d0467cdf4007c0f..3225e7a2b9ec9f56a93eea8ee8aa095a2b7cf975 100755 --- a/examples/bert/run_classifier_single_gpu.sh +++ b/examples/bert/run_classifier_single_gpu.sh @@ -1,10 +1,9 @@ #!/bin/bash -BERT_BASE_PATH="./data/pretrained_models/uncased_L-12_H-768_A-12/" -TASK_NAME='MNLI' +BERT_BASE_PATH="./bert_uncased_L-12_H-768_A-12/" DATA_PATH="./data/glue_data/MNLI/" CKPT_PATH="./data/saved_model/mnli_models" -export CUDA_VISIBLE_DEVICES=1 +export CUDA_VISIBLE_DEVICES=0 # start fine-tuning python3.7 bert_classifier.py\ @@ -12,7 +11,6 @@ python3.7 bert_classifier.py\ --do_train true \ --do_test true \ --batch_size 64 \ - --init_pretraining_params ${BERT_BASE_PATH}/dygraph_params/ \ --data_dir ${DATA_PATH} \ --vocab_path ${BERT_BASE_PATH}/vocab.txt \ --checkpoints ${CKPT_PATH} \ diff --git a/examples/bert_leveldb/bert_classifier.py b/examples/bert_leveldb/bert_classifier.py index 012c42eba4c9be598e7cb7bd3e4b99c0e3f17f5f..51b1192f83a89de6491d82f1748bbead345df742 100644 --- a/examples/bert_leveldb/bert_classifier.py +++ b/examples/bert_leveldb/bert_classifier.py @@ -159,7 +159,7 @@ def main(): labels, device=device) - cls_model.bert_layer.load("./bert_small", reset_optimizer=True) + cls_model.bert_layer.load("./bert_uncased_L-12_H-768_A-12/bert", reset_optimizer=True) # do train cls_model.fit(train_data=train_dataloader.dataloader, diff --git a/examples/bert_leveldb/readme.md b/examples/bert_leveldb/readme.md new file mode 100644 index 0000000000000000000000000000000000000000..77241a168946b3fa4c4eb203dee039fb53004f50 --- /dev/null +++ b/examples/bert_leveldb/readme.md @@ -0,0 +1,11 @@ +0. python3.7 -m pip install leveldb + +1. download data: wget https://paddle-hapi.bj.bcebos.com/data/bert_data.tar.gz + +2. unzip data: tar -zvxf bert_data.tar.gz + +3. download pretrained parameters: wget https://paddle-hapi.bj.bcebos.com/models/bert_uncased_L-12_H-768_A-12.tar.gz + +4. unzip pretrained parameters: tar -zvxf bert_uncased_L-12_H-768_A-12.tar.gz + +4. bash run_classifier_single_gpu.sh diff --git a/examples/bert_leveldb/run_classifier_multi_gpu.sh b/examples/bert_leveldb/run_classifier_multi_gpu.sh index 7d545fe09d0fd2f540b08754caf408fe2f22de56..65c49fa2202108ab734bff2e840e6d3021f327e2 100755 --- a/examples/bert_leveldb/run_classifier_multi_gpu.sh +++ b/examples/bert_leveldb/run_classifier_multi_gpu.sh @@ -1,16 +1,14 @@ #!/bin/bash -BERT_BASE_PATH="./data/pretrained_models/uncased_L-12_H-768_A-12/" -TASK_NAME='MNLI' +BERT_BASE_PATH="./bert_uncased_L-12_H-768_A-12/" DATA_PATH="./data/glue_data/MNLI/" CKPT_PATH="./data/saved_model/mnli_models" # start fine-tuning -python3.7 -m paddle.distributed.launch --started_port 8899 --selected_gpus=1,2,3 bert_classifier.py\ +python3.7 -m paddle.distributed.launch --started_port 8899 --selected_gpus=0,1,2,3 bert_classifier.py\ --use_cuda true \ --do_train true \ --do_test true \ --batch_size 64 \ - --init_pretraining_params ${BERT_BASE_PATH}/dygraph_params/ \ --data_dir ${DATA_PATH} \ --vocab_path ${BERT_BASE_PATH}/vocab.txt \ --checkpoints ${CKPT_PATH} \ diff --git a/examples/bert_leveldb/run_classifier_single_gpu.sh b/examples/bert_leveldb/run_classifier_single_gpu.sh index 16ca7230e8706db4210cc5ff7d0467cdf4007c0f..1c764ac562b69c49c5bb86044d8596123b08ffa7 100755 --- a/examples/bert_leveldb/run_classifier_single_gpu.sh +++ b/examples/bert_leveldb/run_classifier_single_gpu.sh @@ -1,6 +1,5 @@ #!/bin/bash -BERT_BASE_PATH="./data/pretrained_models/uncased_L-12_H-768_A-12/" -TASK_NAME='MNLI' +BERT_BASE_PATH="./bert_uncased_L-12_H-768_A-12/" DATA_PATH="./data/glue_data/MNLI/" CKPT_PATH="./data/saved_model/mnli_models" @@ -12,7 +11,6 @@ python3.7 bert_classifier.py\ --do_train true \ --do_test true \ --batch_size 64 \ - --init_pretraining_params ${BERT_BASE_PATH}/dygraph_params/ \ --data_dir ${DATA_PATH} \ --vocab_path ${BERT_BASE_PATH}/vocab.txt \ --checkpoints ${CKPT_PATH} \