未验证 提交 f272d693 编写于 作者: P PommesPeter 提交者: GitHub

[xdoctest] reformat example code with google style No.102-104 (#56124)

* fix: updated code examples.

* fix: updated blank lines.

* fix: updated code style

* fix: removed extra changes

* fix: refine detail
上级 7039bef3
...@@ -70,39 +70,39 @@ class Adadelta(Optimizer): ...@@ -70,39 +70,39 @@ class Adadelta(Optimizer):
Examples: Examples:
.. code-block:: python .. code-block:: python
import paddle >>> import paddle
inp = paddle.uniform([10, 10], dtype="float32", min=-0.1, max=0.1) >>> inp = paddle.uniform([10, 10], dtype="float32", min=-0.1, max=0.1)
linear = paddle.nn.Linear(10, 10) >>> linear = paddle.nn.Linear(10, 10)
out = linear(inp) >>> out = linear(inp)
loss = paddle.mean(out) >>> loss = paddle.mean(out)
beta1 = paddle.to_tensor([0.9], dtype="float32") >>> beta1 = paddle.to_tensor([0.9], dtype="float32")
beta2 = paddle.to_tensor([0.99], dtype="float32") >>> beta2 = paddle.to_tensor([0.99], dtype="float32")
adadelta = paddle.optimizer.Adadelta(learning_rate=0.1, parameters=linear.parameters(), weight_decay=0.01) >>> adadelta = paddle.optimizer.Adadelta(learning_rate=0.1, parameters=linear.parameters(), weight_decay=0.01)
back = out.backward() >>> back = out.backward()
adadelta.step() >>> adadelta.step()
adadelta.clear_grad() >>> adadelta.clear_grad()
#Note that the learning_rate of linear_2 is 0.01. >>> # Note that the learning_rate of linear_2 is 0.01.
linear_1 = paddle.nn.Linear(10, 10) >>> linear_1 = paddle.nn.Linear(10, 10)
linear_2 = paddle.nn.Linear(10, 10) >>> linear_2 = paddle.nn.Linear(10, 10)
inp = paddle.uniform(shape=[10, 10], min=-0.1, max=0.1) >>> inp = paddle.uniform(shape=[10, 10], min=-0.1, max=0.1)
out = linear_1(inp) >>> out = linear_1(inp)
out = linear_2(out) >>> out = linear_2(out)
loss = paddle.mean(out) >>> loss = paddle.mean(out)
adadelta = paddle.optimizer.Adadelta( >>> adadelta = paddle.optimizer.Adadelta(
learning_rate=0.1, ... learning_rate=0.1,
parameters=[{ ... parameters=[{
'params': linear_1.parameters() ... 'params': linear_1.parameters()
}, { ... }, {
'params': linear_2.parameters(), ... 'params': linear_2.parameters(),
'weight_decay': 0.001, ... 'weight_decay': 0.001,
'learning_rate': 0.1, ... 'learning_rate': 0.1,
}], ... }],
weight_decay=0.01) ... weight_decay=0.01)
out.backward() >>> out.backward()
adadelta.step() >>> adadelta.step()
adadelta.clear_grad() >>> adadelta.clear_grad()
""" """
......
...@@ -70,38 +70,38 @@ class Adagrad(Optimizer): ...@@ -70,38 +70,38 @@ class Adagrad(Optimizer):
Examples: Examples:
.. code-block:: python .. code-block:: python
import paddle >>> import paddle
inp = paddle.rand(shape=[10, 10]) >>> inp = paddle.rand(shape=[10, 10])
linear = paddle.nn.Linear(10, 10) >>> linear = paddle.nn.Linear(10, 10)
out = linear(inp) >>> out = linear(inp)
loss = paddle.mean(out) >>> loss = paddle.mean(out)
adagrad = paddle.optimizer.Adagrad(learning_rate=0.1, >>> adagrad = paddle.optimizer.Adagrad(learning_rate=0.1,
parameters=linear.parameters()) ... parameters=linear.parameters())
out.backward() >>> out.backward()
adagrad.step() >>> adagrad.step()
adagrad.clear_grad() >>> adagrad.clear_grad()
#Note that the learning_rate of linear_2 is 0.01. >>> # Note that the learning_rate of linear_2 is 0.01.
linear_1 = paddle.nn.Linear(10, 10) >>> linear_1 = paddle.nn.Linear(10, 10)
linear_2 = paddle.nn.Linear(10, 10) >>> linear_2 = paddle.nn.Linear(10, 10)
inp = paddle.uniform(shape=[10, 10], min=-0.1, max=0.1) >>> inp = paddle.uniform(shape=[10, 10], min=-0.1, max=0.1)
out = linear_1(inp) >>> out = linear_1(inp)
out = linear_2(out) >>> out = linear_2(out)
loss = paddle.mean(out) >>> loss = paddle.mean(out)
adagrad = paddle.optimizer.Adagrad( >>> adagrad = paddle.optimizer.Adagrad(
learning_rate=0.1, ... learning_rate=0.1,
parameters=[{ ... parameters=[{
'params': linear_1.parameters() ... 'params': linear_1.parameters()
}, { ... }, {
'params': linear_2.parameters(), ... 'params': linear_2.parameters(),
'weight_decay': 0.001, ... 'weight_decay': 0.001,
'learning_rate': 0.1, ... 'learning_rate': 0.1,
}], ... }],
weight_decay=0.01) ... weight_decay=0.01)
out.backward() >>> out.backward()
adagrad.step() >>> adagrad.step()
adagrad.clear_grad() >>> adagrad.clear_grad()
""" """
_moment_acc_str = "moment" _moment_acc_str = "moment"
......
...@@ -98,63 +98,61 @@ class Adam(Optimizer): ...@@ -98,63 +98,61 @@ class Adam(Optimizer):
.. code-block:: python .. code-block:: python
:name: code-example1 :name: code-example1
import paddle >>> import paddle
linear = paddle.nn.Linear(10, 10) >>> linear = paddle.nn.Linear(10, 10)
inp = paddle.rand([10,10], dtype="float32") >>> inp = paddle.rand([10,10], dtype="float32")
out = linear(inp) >>> out = linear(inp)
loss = paddle.mean(out) >>> loss = paddle.mean(out)
adam = paddle.optimizer.Adam(learning_rate=0.1, >>> adam = paddle.optimizer.Adam(learning_rate=0.1,
parameters=linear.parameters()) ... parameters=linear.parameters())
loss.backward() >>> loss.backward()
adam.step() >>> adam.step()
adam.clear_grad() >>> adam.clear_grad()
.. code-block:: python .. code-block:: python
:name: code-example2 :name: code-example2
# Adam with beta1/beta2 as Tensor and weight_decay as float >>> # Adam with beta1/beta2 as Tensor and weight_decay as float
import paddle >>> import paddle
linear = paddle.nn.Linear(10, 10) >>> linear = paddle.nn.Linear(10, 10)
inp = paddle.rand([10,10], dtype="float32") >>> inp = paddle.rand([10,10], dtype="float32")
out = linear(inp) >>> out = linear(inp)
loss = paddle.mean(out) >>> loss = paddle.mean(out)
>>> beta1 = paddle.to_tensor([0.9], dtype="float32")
beta1 = paddle.to_tensor([0.9], dtype="float32") >>> beta2 = paddle.to_tensor([0.99], dtype="float32")
beta2 = paddle.to_tensor([0.99], dtype="float32") >>> adam = paddle.optimizer.Adam(learning_rate=0.1,
... parameters=linear.parameters(),
adam = paddle.optimizer.Adam(learning_rate=0.1, ... beta1=beta1,
parameters=linear.parameters(), ... beta2=beta2,
beta1=beta1, ... weight_decay=0.01)
beta2=beta2, >>> loss.backward()
weight_decay=0.01) >>> adam.step()
loss.backward() >>> adam.clear_grad()
adam.step()
adam.clear_grad() >>> # Note that the learning_rate of linear_2 is 0.01.
>>> linear_1 = paddle.nn.Linear(10, 10)
#Note that the learning_rate of linear_2 is 0.01. >>> linear_2 = paddle.nn.Linear(10, 10)
linear_1 = paddle.nn.Linear(10, 10) >>> inp = paddle.uniform(shape=[10, 10], min=-0.1, max=0.1)
linear_2 = paddle.nn.Linear(10, 10) >>> out = linear_1(inp)
inp = paddle.uniform(shape=[10, 10], min=-0.1, max=0.1) >>> out = linear_2(out)
out = linear_1(inp) >>> loss = paddle.mean(out)
out = linear_2(out) >>> adam = paddle.optimizer.Adam(
loss = paddle.mean(out) ... learning_rate=0.1,
adam = paddle.optimizer.Adam( ... parameters=[{
learning_rate=0.1, ... 'params': linear_1.parameters()
parameters=[{ ... }, {
'params': linear_1.parameters() ... 'params': linear_2.parameters(),
}, { ... 'weight_decay': 0.001,
'params': linear_2.parameters(), ... 'learning_rate': 0.1,
'weight_decay': 0.001, ... 'beta1': 0.8
'learning_rate': 0.1, ... }],
'beta1': 0.8 ... weight_decay=0.01,
}], ... beta1=0.9)
weight_decay=0.01, >>> loss.backward()
beta1=0.9) >>> adam.step()
loss.backward() >>> adam.clear_grad()
adam.step()
adam.clear_grad()
""" """
_moment1_acc_str = "moment1" _moment1_acc_str = "moment1"
...@@ -409,17 +407,17 @@ class Adam(Optimizer): ...@@ -409,17 +407,17 @@ class Adam(Optimizer):
Examples: Examples:
.. code-block:: python .. code-block:: python
import paddle >>> import paddle
a = paddle.rand([2,13], dtype="float32") >>> a = paddle.rand([2,13], dtype="float32")
linear = paddle.nn.Linear(13, 5) >>> linear = paddle.nn.Linear(13, 5)
# This can be any optimizer supported by dygraph. >>> # This can be any optimizer supported by dygraph.
adam = paddle.optimizer.Adam(learning_rate = 0.01, >>> adam = paddle.optimizer.Adam(learning_rate = 0.01,
parameters = linear.parameters()) ... parameters = linear.parameters())
out = linear(a) >>> out = linear(a)
out.backward() >>> out.backward()
adam.step() >>> adam.step()
adam.clear_grad() >>> adam.clear_grad()
""" """
if paddle.fluid.dygraph.base.in_declarative_mode(): if paddle.fluid.dygraph.base.in_declarative_mode():
self._declarative_step() self._declarative_step()
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册