diff --git a/ppdet/engine/trainer.py b/ppdet/engine/trainer.py index 321c4d17deabe77d6f72a70a87b17f0956a45779..4dba75724cf2484dcc4efa2265cfe34b1db184a8 100644 --- a/ppdet/engine/trainer.py +++ b/ppdet/engine/trainer.py @@ -362,8 +362,8 @@ class Trainer(object): model = paddle.DataParallel( self.model, find_unused_parameters=find_unused_parameters) - # initial fp16 - if self.cfg.get('fp16', False): + # enabel auto mixed precision mode + if self.cfg.get('amp', False): scaler = amp.GradScaler( enable=self.cfg.use_gpu, init_loss_scaling=1024) @@ -401,7 +401,7 @@ class Trainer(object): self._compose_callback.on_step_begin(self.status) data['epoch_id'] = epoch_id - if self.cfg.get('fp16', False): + if self.cfg.get('amp', False): with amp.auto_cast(enable=self.cfg.use_gpu): # model forward outputs = model(data) diff --git a/tools/train.py b/tools/train.py index 088a41793189603cbe5f74d02c0f323a5fde70c0..ddbf24fdaadaff781c4cd57a0df40bf1d3340c97 100755 --- a/tools/train.py +++ b/tools/train.py @@ -60,10 +60,10 @@ def parse_args(): help="If set True, enable continuous evaluation job." "This flag is only used for internal test.") parser.add_argument( - "--fp16", + "--amp", action='store_true', default=False, - help="Enable mixed precision training.") + help="Enable auto mixed precision training.") parser.add_argument( "--fleet", action='store_true', default=False, help="Use fleet or not") parser.add_argument( @@ -130,7 +130,7 @@ def run(FLAGS, cfg): def main(): FLAGS = parse_args() cfg = load_config(FLAGS.config) - cfg['fp16'] = FLAGS.fp16 + cfg['amp'] = FLAGS.amp cfg['fleet'] = FLAGS.fleet cfg['use_vdl'] = FLAGS.use_vdl cfg['vdl_log_dir'] = FLAGS.vdl_log_dir