From c48157075bb392661d9e1539ec8d37ec423efe95 Mon Sep 17 00:00:00 2001 From: hutuxian Date: Thu, 22 Apr 2021 16:52:52 +0800 Subject: [PATCH] fix doc for adamw (#32438) --- python/paddle/optimizer/adamw.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/python/paddle/optimizer/adamw.py b/python/paddle/optimizer/adamw.py index 78c9fcb83fc..32645720c4f 100644 --- a/python/paddle/optimizer/adamw.py +++ b/python/paddle/optimizer/adamw.py @@ -59,7 +59,7 @@ class AdamW(Adam): weight_decay (float|Tensor, optional): The weight decay coefficient, it can be float or Tensor. The default value is 0.01. apply_decay_param_fun (function|None, optional): If it is not None, only tensors that makes apply_decay_param_fun(Tensor.name)==True - will be updated. It only works when we want to specify tensors. + will be updated with weight decay. It only works when we want to specify tensors. Default: None. grad_clip (GradientClipBase, optional): Gradient cliping strategy, it's an instance of some derived class of ``GradientClipBase`` . There are three cliping strategies -- GitLab