未验证 提交 8d05c00c 编写于 作者: D danleifeng 提交者: GitHub

fix paddle.fleet en-doc for apis in dynamic mode (#27354)

* fix fleet dynamic-mode en-doc;test=develop
上级 746a8ded
......@@ -608,11 +608,18 @@ class Fleet(object):
@dygraph_only
def distributed_model(self, model):
"""
Return dygraph distributed data parallel model (Layer)
Only work in dygraph mode
Return distributed data parallel model (Only work in dygraph mode)
Args:
model (Layer): the user-defind model which inherits Layer.
Returns:
distributed data parallel model which inherits Layer.
Examples:
.. code-block:: python
import paddle
import paddle.nn as nn
from paddle.distributed import fleet
......@@ -626,7 +633,6 @@ class Fleet(object):
def forward(self, x):
return self._linear2(self._linear1(x))
def train():
# 1. enable dynamic mode
paddle.disable_static()
......@@ -658,8 +664,7 @@ class Fleet(object):
adam.step()
adam.clear_grad()
if __name__ == '__main__':
paddle.distributed.spawn(train)
"""
assert model is not None
self.model = paddle.DataParallel(model)
......@@ -669,13 +674,14 @@ class Fleet(object):
def state_dict(self):
"""
Get state dict information from optimizer.
Only work in dygraph mode
(Only work in dygraph mode)
Returns:
state_dict(dict) : dict contains all the Tensor used by optimizer
Examples:
.. code-block:: python
import numpy as np
import paddle
from paddle.distributed import fleet
......@@ -700,15 +706,17 @@ class Fleet(object):
def set_state_dict(self, state_dict):
"""
Load optimizer state dict.
Only work in dygraph mode
(Only work in dygraph mode)
Args:
state_dict(dict) : Dict contains all the Tensor needed by optimizer
Returns: None
Returns:
None
Examples:
.. code-block:: python
import numpy as np
import paddle
from paddle.distributed import fleet
......@@ -736,15 +744,17 @@ class Fleet(object):
def set_lr(self, value):
"""
Set the value of the learning rate manually in the optimizer.
Only work in dygraph mode
(Only work in dygraph mode)
Args:
value (float|Tensor): the value of learning rate
Returns: None
Returns:
None
Examples:
.. code-block:: python
import numpy as np
import paddle
from paddle.distributed import fleet
......@@ -780,13 +790,14 @@ class Fleet(object):
def get_lr(self):
"""
Get current step learning rate.
Only work in dygraph mode
(Only work in dygraph mode)
Returns:
float: The learning rate of the current step.
Examples:
.. code-block:: python
import numpy as np
import paddle
from paddle.distributed import fleet
......@@ -813,9 +824,10 @@ class Fleet(object):
def step(self):
"""
Execute the optimizer once.
Only work in dygraph mode
(Only work in dygraph mode)
Returns: None
Returns:
None
Examples:
.. code-block:: python
......@@ -833,7 +845,6 @@ class Fleet(object):
def forward(self, x):
return self._linear2(self._linear1(x))
def train():
# 1. enable dynamic mode
paddle.disable_static()
......@@ -865,8 +876,6 @@ class Fleet(object):
adam.step()
adam.clear_grad()
if __name__ == '__main__':
paddle.distributed.spawn(train)
"""
# imitate target optimizer retrieval
......@@ -875,10 +884,11 @@ class Fleet(object):
@dygraph_only
def clear_grad(self):
"""
Execute the optimizer once.
Only work in dygraph mode
Clear the gradients of all optimized parameters for model.
(Only work in dygraph mode)
Returns: None
Returns:
None
Examples:
.. code-block:: python
......@@ -896,7 +906,6 @@ class Fleet(object):
def forward(self, x):
return self._linear2(self._linear1(x))
def train():
# 1. enable dynamic mode
paddle.disable_static()
......@@ -928,8 +937,6 @@ class Fleet(object):
adam.step()
adam.clear_grad()
if __name__ == '__main__':
paddle.distributed.spawn(train)
"""
# imitate target optimizer retrieval
return self.user_defined_optimizer.clear_grad()
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册