提交 ffe04093 编写于 作者: M Megvii Engine Team 提交者: Xinran Xu

docs(mge/optimizer): refine the docstring of several apis

GitOrigin-RevId: a97fe5b68ace2983a16b747f5b4d73562feb8e9e
上级 0c54f2dc
......@@ -14,7 +14,7 @@ from .optimizer import Optimizer
class Adam(Optimizer):
r"""Implements Adam algorithm.
r"""Implements Adam algorithm proposed in `"Adam: A Method for Stochastic Optimization" <https://arxiv.org/abs/1412.6980>`_.
:param params: iterable of parameters to optimize or dicts defining
parameter groups.
......
......@@ -21,7 +21,7 @@ def add_update_fastpath(
beta: Union[Tensor, float, int] = 1.0,
bias: Union[Tensor, float, int] = 0.0
):
"""a fast-path ONLY used to update parameters in optimzier, since it
"""a fast-path ONLY used to update parameters in optimizer, since it
would bypass computing graph and launch dnn/add_update kernel directly,
it is more efficient than functional/add_update.
"""
......
......@@ -4,7 +4,7 @@ from .optimizer import Optimizer
class LRScheduler(metaclass=ABCMeta):
r"""Base class for all lr_schedulers.
r"""Base class for all learning rate based schedulers.
:param optimizer: Wrapped optimizer.
:param current_epoch: The index of current epoch. Default: -1
......
......@@ -17,7 +17,7 @@ class SGD(Optimizer):
r"""Implements stochastic gradient descent.
Nesterov momentum is based on the formula from
`On the importance of initialization and momentum in deep learning`.
`"On the importance of initialization and momentum in deep learning" <http://www.cs.toronto.edu/%7Ehinton/absps/momentum.pdf>`_ .
:param params: iterable of parameters to optimize or dicts defining
parameter groups.
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册