未验证 提交 53d0869f 编写于 作者: iSerendipity's avatar iSerendipity 提交者: GitHub

[xdoctest][task 224-225] reformat example code with google style in...

[xdoctest][task 224-225] reformat example code with google style in `python/paddle/distributed/fleet` (#56815)

* [Doctest]fix No.224-225, test=docs_preview

* fix the AttributeError
上级 42869ab6
...@@ -41,44 +41,40 @@ def distributed_model(model): ...@@ -41,44 +41,40 @@ def distributed_model(model):
.. code-block:: python .. code-block:: python
import paddle >>> import paddle
import paddle.nn as nn >>> import paddle.nn as nn
from paddle.distributed import fleet >>> from paddle.distributed import fleet
class LinearNet(nn.Layer): >>> class LinearNet(nn.Layer):
def __init__(self): ... def __init__(self):
super().__init__() ... super().__init__()
self._linear1 = nn.Linear(10, 10) ... self._linear1 = nn.Linear(10, 10)
self._linear2 = nn.Linear(10, 1) ... self._linear2 = nn.Linear(10, 1)
... def forward(self, x):
def forward(self, x): ... return self._linear2(self._linear1(x))
return self._linear2(self._linear1(x))
>>> # 1. initialize fleet environment
# 1. initialize fleet environment >>> fleet.init(is_collective=True)
fleet.init(is_collective=True)
>>> # 2. create layer & optimizer
# 2. create layer & optimizer >>> layer = LinearNet()
layer = LinearNet() >>> loss_fn = nn.MSELoss()
loss_fn = nn.MSELoss() >>> adam = paddle.optimizer.Adam(
adam = paddle.optimizer.Adam( ... learning_rate=0.001, parameters=layer.parameters())
learning_rate=0.001, parameters=layer.parameters())
>>> # 3. get data_parallel model using fleet
# 3. get data_parallel model using fleet >>> adam = fleet.distributed_optimizer(adam)
adam = fleet.distributed_optimizer(adam) >>> dp_layer = fleet.distributed_model(layer)
dp_layer = fleet.distributed_model(layer)
>>> # 4. run layer
# 4. run layer >>> inputs = paddle.randn([10, 10], 'float32')
inputs = paddle.randn([10, 10], 'float32') >>> outputs = dp_layer(inputs)
outputs = dp_layer(inputs) >>> labels = paddle.randn([10, 1], 'float32')
labels = paddle.randn([10, 1], 'float32') >>> loss = loss_fn(outputs, labels)
loss = loss_fn(outputs, labels) >>> print("loss:", loss.numpy())
>>> loss.backward()
print("loss:", loss.numpy()) >>> adam.step()
>>> adam.clear_grad()
loss.backward()
adam.step()
adam.clear_grad()
""" """
......
...@@ -37,12 +37,15 @@ def _dygraph_distributed_optimizer(optimizer, strategy=None): ...@@ -37,12 +37,15 @@ def _dygraph_distributed_optimizer(optimizer, strategy=None):
Fleet: instance of fleet. Fleet: instance of fleet.
Examples: Examples:
.. code-block:: python .. code-block:: python
import paddle
import paddle.distributed.fleet as fleet >>> import paddle
fleet.init(is_collective=True) >>> import paddle.distributed.fleet as fleet
strategy = fleet.DistributedStrategy() >>> fleet.init(is_collective=True)
optimizer = paddle.optimizer.SGD(learning_rate=0.001) >>> strategy = fleet.DistributedStrategy()
optimizer = fleet.distributed_optimizer(optimizer, strategy=strategy) >>> linear = paddle.nn.Linear(10, 10)
>>> optimizer = paddle.optimizer.SGD(learning_rate=0.001, parameters=linear.parameters())
>>> optimizer = fleet.distributed_optimizer(optimizer, strategy=strategy)
""" """
fleet_env = fleet.fleet fleet_env = fleet.fleet
fleet_env.user_defined_optimizer = optimizer fleet_env.user_defined_optimizer = optimizer
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册