diff --git a/PaddleNLP/legacy/pretrain_language_models/BERT/README.md b/PaddleNLP/legacy/pretrain_language_models/BERT/README.md index 2f18176842b4cdd58f75584372262eba43b62636..b7f40a78fef1f5012970649febe93eb729b692ce 100644 --- a/PaddleNLP/legacy/pretrain_language_models/BERT/README.md +++ b/PaddleNLP/legacy/pretrain_language_models/BERT/README.md @@ -202,6 +202,7 @@ DATA_PATH=/path/to/xnli/data/ CKPT_PATH=/path/to/save/checkpoints/ python -u run_classifier.py --task_name ${TASK_NAME} \ + --use_cuda false \ --use_xpu true \ --do_train true \ --do_val true \ diff --git a/PaddleNLP/legacy/pretrain_language_models/BERT/run_classifier.py b/PaddleNLP/legacy/pretrain_language_models/BERT/run_classifier.py index 4669ffb3e9a9c0acb2669a7c83f3679e742e367e..1ead27ab4c56b38dcf80cfc3f5b7b4cbc5b28696 100644 --- a/PaddleNLP/legacy/pretrain_language_models/BERT/run_classifier.py +++ b/PaddleNLP/legacy/pretrain_language_models/BERT/run_classifier.py @@ -100,7 +100,7 @@ run_type_g.add_arg("profiler_path", str, './', "the profiler o run_type_g.add_arg("is_profiler", int, 0, "the profiler switch. (used for benchmark)") run_type_g.add_arg("max_iter", int, 0, "the max batch nums to train. (used for benchmark)") -run_type_g.add_arg("use_cuda", bool, False, "If set, use GPU for training.") +run_type_g.add_arg("use_cuda", bool, True, "If set, use GPU for training.") run_type_g.add_arg("use_xpu", bool, False, "If set, use XPU for training.") run_type_g.add_arg("use_fast_executor", bool, False, "If set, use fast parallel executor (in experiment).") run_type_g.add_arg("shuffle", bool, True, "")