From b63894b58dc55eccb7b9e75ff0f29ced67bfb7a0 Mon Sep 17 00:00:00 2001 From: wawltor Date: Wed, 27 Jan 2021 17:40:08 +0800 Subject: [PATCH] fix the pure fp16 of argument (#5236) fix the pure fp16 of argument (#5236) --- PaddleNLP/benchmark/bert/run_pretrain.py | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/PaddleNLP/benchmark/bert/run_pretrain.py b/PaddleNLP/benchmark/bert/run_pretrain.py index b5af614f..efbec181 100644 --- a/PaddleNLP/benchmark/bert/run_pretrain.py +++ b/PaddleNLP/benchmark/bert/run_pretrain.py @@ -136,6 +136,11 @@ def parse_args(): type=float, default=1.0, help="The value of scale_loss for fp16.") + parser.add_argument( + "--use_pure_fp16", + type=distutils.util.strtobool, + default=False, + help="Whether to use pure fp16 training.") parser.add_argument( "--select_device", type=str, @@ -146,8 +151,7 @@ def parse_args(): type=int, default=1, help="Number of merge steps before gradient update." - "global_batch_size = gradient_merge_steps * batch_size." - ) + "global_batch_size = gradient_merge_steps * batch_size.") args = parser.parse_args() return args -- GitLab