未验证 提交 8d05c00c 编写于 作者: D danleifeng 提交者: GitHub

fix paddle.fleet en-doc for apis in dynamic mode (#27354)

* fix fleet dynamic-mode en-doc;test=develop
上级 746a8ded
...@@ -608,25 +608,31 @@ class Fleet(object): ...@@ -608,25 +608,31 @@ class Fleet(object):
@dygraph_only @dygraph_only
def distributed_model(self, model): def distributed_model(self, model):
""" """
Return dygraph distributed data parallel model (Layer) Return distributed data parallel model (Only work in dygraph mode)
Only work in dygraph mode
Args:
model (Layer): the user-defind model which inherits Layer.
Returns:
distributed data parallel model which inherits Layer.
Examples: Examples:
.. code-block:: python .. code-block:: python
import paddle
import paddle.nn as nn
from paddle.distributed import fleet
class LinearNet(nn.Layer): import paddle
def __init__(self): import paddle.nn as nn
super(LinearNet, self).__init__() from paddle.distributed import fleet
self._linear1 = nn.Linear(10, 10)
self._linear2 = nn.Linear(10, 1) class LinearNet(nn.Layer):
def __init__(self):
super(LinearNet, self).__init__()
self._linear1 = nn.Linear(10, 10)
self._linear2 = nn.Linear(10, 1)
def forward(self, x): def forward(self, x):
return self._linear2(self._linear1(x)) return self._linear2(self._linear1(x))
def train():
# 1. enable dynamic mode # 1. enable dynamic mode
paddle.disable_static() paddle.disable_static()
...@@ -658,8 +664,7 @@ class Fleet(object): ...@@ -658,8 +664,7 @@ class Fleet(object):
adam.step() adam.step()
adam.clear_grad() adam.clear_grad()
if __name__ == '__main__':
paddle.distributed.spawn(train)
""" """
assert model is not None assert model is not None
self.model = paddle.DataParallel(model) self.model = paddle.DataParallel(model)
...@@ -669,29 +674,30 @@ class Fleet(object): ...@@ -669,29 +674,30 @@ class Fleet(object):
def state_dict(self): def state_dict(self):
""" """
Get state dict information from optimizer. Get state dict information from optimizer.
Only work in dygraph mode (Only work in dygraph mode)
Returns: Returns:
state_dict(dict) : dict contains all the Tensor used by optimizer state_dict(dict) : dict contains all the Tensor used by optimizer
Examples: Examples:
.. code-block:: python .. code-block:: python
import numpy as np
import paddle
from paddle.distributed import fleet
paddle.disable_static() import numpy as np
fleet.init(is_collective=True) import paddle
from paddle.distributed import fleet
paddle.disable_static()
fleet.init(is_collective=True)
value = np.arange(26).reshape(2, 13).astype("float32") value = np.arange(26).reshape(2, 13).astype("float32")
a = paddle.fluid.dygraph.to_variable(value) a = paddle.fluid.dygraph.to_variable(value)
layer = paddle.nn.Linear(13, 5) layer = paddle.nn.Linear(13, 5)
adam = paddle.optimizer.Adam(learning_rate=0.01, parameters=layer.parameters()) adam = paddle.optimizer.Adam(learning_rate=0.01, parameters=layer.parameters())
adam = fleet.distributed_optimizer(adam) adam = fleet.distributed_optimizer(adam)
dp_layer = fleet.distributed_model(layer) dp_layer = fleet.distributed_model(layer)
state_dict = adam.state_dict() state_dict = adam.state_dict()
""" """
# imitate target optimizer retrieval # imitate target optimizer retrieval
return self.user_defined_optimizer.state_dict() return self.user_defined_optimizer.state_dict()
...@@ -700,34 +706,36 @@ class Fleet(object): ...@@ -700,34 +706,36 @@ class Fleet(object):
def set_state_dict(self, state_dict): def set_state_dict(self, state_dict):
""" """
Load optimizer state dict. Load optimizer state dict.
Only work in dygraph mode (Only work in dygraph mode)
Args: Args:
state_dict(dict) : Dict contains all the Tensor needed by optimizer state_dict(dict) : Dict contains all the Tensor needed by optimizer
Returns: None Returns:
None
Examples: Examples:
.. code-block:: python .. code-block:: python
import numpy as np
import paddle
from paddle.distributed import fleet
paddle.disable_static() import numpy as np
fleet.init(is_collective=True) import paddle
from paddle.distributed import fleet
paddle.disable_static()
fleet.init(is_collective=True)
value = np.arange(26).reshape(2, 13).astype("float32") value = np.arange(26).reshape(2, 13).astype("float32")
a = paddle.fluid.dygraph.to_variable(value) a = paddle.fluid.dygraph.to_variable(value)
layer = paddle.nn.Linear(13, 5) layer = paddle.nn.Linear(13, 5)
adam = paddle.optimizer.Adam(learning_rate=0.01, parameters=layer.parameters()) adam = paddle.optimizer.Adam(learning_rate=0.01, parameters=layer.parameters())
adam = fleet.distributed_optimizer(adam) adam = fleet.distributed_optimizer(adam)
dp_layer = fleet.distributed_model(layer) dp_layer = fleet.distributed_model(layer)
state_dict = adam.state_dict() state_dict = adam.state_dict()
paddle.framework.save(state_dict, "paddle_dy") paddle.framework.save(state_dict, "paddle_dy")
para_state_dict, opti_state_dict = paddle.framework.load( "paddle_dy") para_state_dict, opti_state_dict = paddle.framework.load( "paddle_dy")
adam.set_state_dict(opti_state_dict) adam.set_state_dict(opti_state_dict)
""" """
# imitate target optimizer retrieval # imitate target optimizer retrieval
return self.user_defined_optimizer.set_state_dict(state_dict) return self.user_defined_optimizer.set_state_dict(state_dict)
...@@ -736,42 +744,44 @@ class Fleet(object): ...@@ -736,42 +744,44 @@ class Fleet(object):
def set_lr(self, value): def set_lr(self, value):
""" """
Set the value of the learning rate manually in the optimizer. Set the value of the learning rate manually in the optimizer.
Only work in dygraph mode (Only work in dygraph mode)
Args: Args:
value (float|Tensor): the value of learning rate value (float|Tensor): the value of learning rate
Returns: None Returns:
None
Examples: Examples:
.. code-block:: python .. code-block:: python
import numpy as np
import paddle
from paddle.distributed import fleet
paddle.disable_static() import numpy as np
fleet.init(is_collective=True) import paddle
from paddle.distributed import fleet
value = np.arange(26).reshape(2, 13).astype("float32") paddle.disable_static()
a = paddle.fluid.dygraph.to_variable(value) fleet.init(is_collective=True)
layer = paddle.nn.Linear(13, 5) value = np.arange(26).reshape(2, 13).astype("float32")
adam = paddle.optimizer.Adam(learning_rate=0.01, parameters=layer.parameters()) a = paddle.fluid.dygraph.to_variable(value)
adam = fleet.distributed_optimizer(adam) layer = paddle.nn.Linear(13, 5)
dp_layer = fleet.distributed_model(layer) adam = paddle.optimizer.Adam(learning_rate=0.01, parameters=layer.parameters())
lr_list = [0.2, 0.3, 0.4, 0.5, 0.6] adam = fleet.distributed_optimizer(adam)
for i in range(5): dp_layer = fleet.distributed_model(layer)
adam.set_lr(lr_list[i])
lr = adam.get_lr() lr_list = [0.2, 0.3, 0.4, 0.5, 0.6]
print("current lr is {}".format(lr)) for i in range(5):
# Print: adam.set_lr(lr_list[i])
# current lr is 0.2 lr = adam.get_lr()
# current lr is 0.3 print("current lr is {}".format(lr))
# current lr is 0.4 # Print:
# current lr is 0.5 # current lr is 0.2
# current lr is 0.6 # current lr is 0.3
# current lr is 0.4
# current lr is 0.5
# current lr is 0.6
""" """
# imitate target optimizer retrieval # imitate target optimizer retrieval
return self.user_defined_optimizer.set_lr(value) return self.user_defined_optimizer.set_lr(value)
...@@ -780,31 +790,32 @@ class Fleet(object): ...@@ -780,31 +790,32 @@ class Fleet(object):
def get_lr(self): def get_lr(self):
""" """
Get current step learning rate. Get current step learning rate.
Only work in dygraph mode (Only work in dygraph mode)
Returns: Returns:
float: The learning rate of the current step. float: The learning rate of the current step.
Examples: Examples:
.. code-block:: python .. code-block:: python
import numpy as np
import paddle
from paddle.distributed import fleet
paddle.disable_static() import numpy as np
fleet.init(is_collective=True) import paddle
from paddle.distributed import fleet
paddle.disable_static()
fleet.init(is_collective=True)
value = np.arange(26).reshape(2, 13).astype("float32") value = np.arange(26).reshape(2, 13).astype("float32")
a = paddle.fluid.dygraph.to_variable(value) a = paddle.fluid.dygraph.to_variable(value)
layer = paddle.nn.Linear(13, 5) layer = paddle.nn.Linear(13, 5)
adam = paddle.optimizer.Adam(learning_rate=0.01, parameters=layer.parameters()) adam = paddle.optimizer.Adam(learning_rate=0.01, parameters=layer.parameters())
adam = fleet.distributed_optimizer(adam) adam = fleet.distributed_optimizer(adam)
dp_layer = fleet.distributed_model(layer) dp_layer = fleet.distributed_model(layer)
lr = adam.get_lr() lr = adam.get_lr()
print(lr) # 0.01 print(lr) # 0.01
""" """
# imitate target optimizer retrieval # imitate target optimizer retrieval
return self.user_defined_optimizer.get_lr() return self.user_defined_optimizer.get_lr()
...@@ -813,27 +824,27 @@ class Fleet(object): ...@@ -813,27 +824,27 @@ class Fleet(object):
def step(self): def step(self):
""" """
Execute the optimizer once. Execute the optimizer once.
Only work in dygraph mode (Only work in dygraph mode)
Returns: None Returns:
None
Examples: Examples:
.. code-block:: python .. code-block:: python
import paddle import paddle
import paddle.nn as nn import paddle.nn as nn
from paddle.distributed import fleet from paddle.distributed import fleet
class LinearNet(nn.Layer): class LinearNet(nn.Layer):
def __init__(self): def __init__(self):
super(LinearNet, self).__init__() super(LinearNet, self).__init__()
self._linear1 = nn.Linear(10, 10) self._linear1 = nn.Linear(10, 10)
self._linear2 = nn.Linear(10, 1) self._linear2 = nn.Linear(10, 1)
def forward(self, x): def forward(self, x):
return self._linear2(self._linear1(x)) return self._linear2(self._linear1(x))
def train():
# 1. enable dynamic mode # 1. enable dynamic mode
paddle.disable_static() paddle.disable_static()
...@@ -865,8 +876,6 @@ class Fleet(object): ...@@ -865,8 +876,6 @@ class Fleet(object):
adam.step() adam.step()
adam.clear_grad() adam.clear_grad()
if __name__ == '__main__':
paddle.distributed.spawn(train)
""" """
# imitate target optimizer retrieval # imitate target optimizer retrieval
...@@ -875,28 +884,28 @@ class Fleet(object): ...@@ -875,28 +884,28 @@ class Fleet(object):
@dygraph_only @dygraph_only
def clear_grad(self): def clear_grad(self):
""" """
Execute the optimizer once. Clear the gradients of all optimized parameters for model.
Only work in dygraph mode (Only work in dygraph mode)
Returns: None Returns:
None
Examples: Examples:
.. code-block:: python .. code-block:: python
import paddle import paddle
import paddle.nn as nn import paddle.nn as nn
from paddle.distributed import fleet from paddle.distributed import fleet
class LinearNet(nn.Layer): class LinearNet(nn.Layer):
def __init__(self): def __init__(self):
super(LinearNet, self).__init__() super(LinearNet, self).__init__()
self._linear1 = nn.Linear(10, 10) self._linear1 = nn.Linear(10, 10)
self._linear2 = nn.Linear(10, 1) self._linear2 = nn.Linear(10, 1)
def forward(self, x): def forward(self, x):
return self._linear2(self._linear1(x)) return self._linear2(self._linear1(x))
def train():
# 1. enable dynamic mode # 1. enable dynamic mode
paddle.disable_static() paddle.disable_static()
...@@ -928,8 +937,6 @@ class Fleet(object): ...@@ -928,8 +937,6 @@ class Fleet(object):
adam.step() adam.step()
adam.clear_grad() adam.clear_grad()
if __name__ == '__main__':
paddle.distributed.spawn(train)
""" """
# imitate target optimizer retrieval # imitate target optimizer retrieval
return self.user_defined_optimizer.clear_grad() return self.user_defined_optimizer.clear_grad()
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册