From 304fb2b578f73b77544ca557e3763dc247fa72ef Mon Sep 17 00:00:00 2001 From: zhangbo9674 <82555433+zhangbo9674@users.noreply.github.com> Date: Mon, 25 Oct 2021 11:02:57 +0800 Subject: [PATCH] [Cherry Pick] refine comments for GradScaler state_dict (#36522) (#36671) Refine comments for GradScaler state_dict. --- python/paddle/amp/grad_scaler.py | 14 +++++++++----- 1 file changed, 9 insertions(+), 5 deletions(-) diff --git a/python/paddle/amp/grad_scaler.py b/python/paddle/amp/grad_scaler.py index 83f57fc74e8..ca08ce196a9 100644 --- a/python/paddle/amp/grad_scaler.py +++ b/python/paddle/amp/grad_scaler.py @@ -579,11 +579,15 @@ class GradScaler(AmpScaler): Reurns: A dict of scaler includes: - init_loss_scaling (float, optional): The initial loss scaling factor. - incr_ratio(float, optional): The multiplier to use when increasing the loss scaling. - decr_ratio(float, optional): The less-than-one-multiplier to use when decreasing the loss scaling. - incr_every_n_steps(int, optional): Increases loss scaling every n consecutive steps with finite gradients. - decr_every_n_nan_or_inf(int, optional): Decreases loss scaling every n accumulated steps with nan or inf gradients. + scale (tensor): The loss scaling factor. + incr_ratio(float): The multiplier to use when increasing the loss scaling. + decr_ratio(float): The less-than-one-multiplier to use when decreasing the loss scaling. + incr_every_n_steps(int): Increases loss scaling every n consecutive steps with finite gradients. + decr_every_n_nan_or_inf(int): Decreases loss scaling every n accumulated steps with nan or inf gradients. + incr_count(int): The number of recent consecutive unskipped steps. + decr_count(int): The number of recent consecutive skipped steps. + use_dynamic_loss_scaling(bool): Whether to use dynamic loss scaling. If False, fixed loss_scaling is used. If True, the loss scaling is updated dynamicly. Default is True. + Examples: -- GitLab