diff --git a/python_module/megengine/functional/utils.py b/python_module/megengine/functional/utils.py index b490eb2f3b42951950f3feb9b6d1fe92b1d008c1..9895059979db9475d594eb83fc583fb8512f2120 100644 --- a/python_module/megengine/functional/utils.py +++ b/python_module/megengine/functional/utils.py @@ -69,4 +69,12 @@ def accuracy(logits: Tensor, target: Tensor, topk: Union[int, Iterable[int]] = 1 @wrap_io_tensor def zero_grad(inp: Tensor) -> Tensor: + r""" + Returns a tensor which is treated as constant during backward gradient calcuation, + i.e. its gradient is zero. + + :param inp: Input tensor. + + See implementation of :func:`~.softmax` for example. + """ return mgb.opr.zero_grad(inp)