未验证 提交 7daffbf8 编写于 作者: Y yuchen202 提交者: GitHub

[xdoctest] reformat example code with google style in No.297、298、302 (#56861)

* 更改相关文件

* Update ir.py

* 恢复相关文件

* Update ir.py

* Delete python/paddle/incubate/optimizer/modelaverage.py

* Delete modelaverage.py

* 尝试恢复文件

* Revert "尝试恢复文件"

This reverts commit 8a263cfd4642614a48a30f648c61fc801331e550.

* Revert "恢复相关文件"

This reverts commit 24249b8191fb3901681ffa9d0e1ad79ef43454de.

* Revert "Revert "尝试恢复文件""

This reverts commit 1b833d623770a851f202c68fff51e77723121a9d.

* Revert "Revert "Revert "尝试恢复文件"""

This reverts commit 64b3a816d1f0fef5ee9100480b8354749005a463.

* Revert "Delete python/paddle/incubate/optimizer/modelaverage.py"

This reverts commit 61986296bf48f7b9bef878bed6890c5dc2971481.

* Revert "更改相关文件"

This reverts commit a5ba675a948534401247b779d6a0fba0581d0628.

* Apply suggestions from code review

---------
Co-authored-by: NNyakku Shigure <sigure.qaq@gmail.com>
上级 9d183662
...@@ -81,46 +81,46 @@ def minimize_bfgs( ...@@ -81,46 +81,46 @@ def minimize_bfgs(
.. code-block:: python .. code-block:: python
:name: code-example1 :name: code-example1
# Example1: 1D Grid Parameters >>> # Example1: 1D Grid Parameters
import paddle >>> import paddle
# Randomly simulate a batch of input data >>> # Randomly simulate a batch of input data
inputs = paddle. normal(shape=(100, 1)) >>> inputs = paddle. normal(shape=(100, 1))
labels = inputs * 2.0 >>> labels = inputs * 2.0
# define the loss function >>> # define the loss function
def loss(w): >>> def loss(w):
y = w * inputs ... y = w * inputs
return paddle.nn.functional.square_error_cost(y, labels).mean() ... return paddle.nn.functional.square_error_cost(y, labels).mean()
# Initialize weight parameters >>> # Initialize weight parameters
w = paddle.normal(shape=(1,)) >>> w = paddle.normal(shape=(1,))
# Call the bfgs method to solve the weight that makes the loss the smallest, and update the parameters >>> # Call the bfgs method to solve the weight that makes the loss the smallest, and update the parameters
for epoch in range(0, 10): >>> for epoch in range(0, 10):
# Call the bfgs method to optimize the loss, note that the third parameter returned represents the weight ... # Call the bfgs method to optimize the loss, note that the third parameter returned represents the weight
w_update = paddle.incubate.optimizer.functional.minimize_bfgs(loss, w)[2] ... w_update = paddle.incubate.optimizer.functional.minimize_bfgs(loss, w)[2]
# Use paddle.assign to update parameters in place ... # Use paddle.assign to update parameters in place
paddle. assign(w_update, w) ... paddle. assign(w_update, w)
.. code-block:: python .. code-block:: python
:name: code-example2 :name: code-example2
# Example2: Multidimensional Grid Parameters >>> # Example2: Multidimensional Grid Parameters
import paddle >>> import paddle
def flatten(x): >>> def flatten(x):
return x. flatten() ... return x. flatten()
def unflatten(x): >>> def unflatten(x):
return x.reshape((2,2)) ... return x.reshape((2,2))
# Assume the network parameters are more than one dimension >>> # Assume the network parameters are more than one dimension
def net(x): >>> def net(x):
assert len(x.shape) > 1 ... assert len(x.shape) > 1
return x.square().mean() ... return x.square().mean()
# function to be optimized >>> # function to be optimized
def bfgs_f(flatten_x): >>> def bfgs_f(flatten_x):
return net(unflatten(flatten_x)) ... return net(unflatten(flatten_x))
x = paddle.rand([2,2]) >>> x = paddle.rand([2,2])
for i in range(0, 10): >>> for i in range(0, 10):
# Flatten x before using minimize_bfgs ... # Flatten x before using minimize_bfgs
x_update = paddle.incubate.optimizer.functional.minimize_bfgs(bfgs_f, flatten(x))[2] ... x_update = paddle.incubate.optimizer.functional.minimize_bfgs(bfgs_f, flatten(x))[2]
# unflatten x_update, then update parameters ... # unflatten x_update, then update parameters
paddle. assign(unflatten(x_update), x) ... paddle.assign(unflatten(x_update), x)
""" """
if dtype not in ['float32', 'float64']: if dtype not in ['float32', 'float64']:
......
...@@ -82,46 +82,46 @@ def minimize_lbfgs( ...@@ -82,46 +82,46 @@ def minimize_lbfgs(
.. code-block:: python .. code-block:: python
:name: code-example1 :name: code-example1
# Example1: 1D Grid Parameters >>> # Example1: 1D Grid Parameters
import paddle >>> import paddle
# Randomly simulate a batch of input data >>> # Randomly simulate a batch of input data
inputs = paddle. normal(shape=(100, 1)) >>> inputs = paddle. normal(shape=(100, 1))
labels = inputs * 2.0 >>> labels = inputs * 2.0
# define the loss function >>> # define the loss function
def loss(w): >>> def loss(w):
y = w * inputs ... y = w * inputs
return paddle.nn.functional.square_error_cost(y, labels).mean() ... return paddle.nn.functional.square_error_cost(y, labels).mean()
# Initialize weight parameters >>> # Initialize weight parameters
w = paddle.normal(shape=(1,)) >>> w = paddle.normal(shape=(1,))
# Call the bfgs method to solve the weight that makes the loss the smallest, and update the parameters >>> # Call the bfgs method to solve the weight that makes the loss the smallest, and update the parameters
for epoch in range(0, 10): >>> for epoch in range(0, 10):
# Call the bfgs method to optimize the loss, note that the third parameter returned represents the weight ... # Call the bfgs method to optimize the loss, note that the third parameter returned represents the weight
w_update = paddle.incubate.optimizer.functional.minimize_bfgs(loss, w)[2] ... w_update = paddle.incubate.optimizer.functional.minimize_bfgs(loss, w)[2]
# Use paddle.assign to update parameters in place ... # Use paddle.assign to update parameters in place
paddle. assign(w_update, w) ... paddle.assign(w_update, w)
.. code-block:: python .. code-block:: python
:name: code-example2 :name: code-example2
# Example2: Multidimensional Grid Parameters >>> # Example2: Multidimensional Grid Parameters
import paddle >>> import paddle
def flatten(x): >>> def flatten(x):
return x. flatten() ... return x. flatten()
def unflatten(x): >>> def unflatten(x):
return x.reshape((2,2)) ... return x.reshape((2,2))
# Assume the network parameters are more than one dimension >>> # Assume the network parameters are more than one dimension
def net(x): >>> def net(x):
assert len(x.shape) > 1 ... assert len(x.shape) > 1
return x.square().mean() ... return x.square().mean()
# function to be optimized >>> # function to be optimized
def bfgs_f(flatten_x): >>> def bfgs_f(flatten_x):
return net(unflatten(flatten_x)) ... return net(unflatten(flatten_x))
x = paddle.rand([2,2]) >>> x = paddle.rand([2,2])
for i in range(0, 10): >>> for i in range(0, 10):
# Flatten x before using minimize_bfgs ... # Flatten x before using minimize_bfgs
x_update = paddle.incubate.optimizer.functional.minimize_bfgs(bfgs_f, flatten(x))[2] ... x_update = paddle.incubate.optimizer.functional.minimize_bfgs(bfgs_f, flatten(x))[2]
# unflatten x_update, then update parameters ... # unflatten x_update, then update parameters
paddle. assign(unflatten(x_update), x) ... paddle.assign(unflatten(x_update), x)
""" """
if dtype not in ['float32', 'float64']: if dtype not in ['float32', 'float64']:
......
...@@ -469,16 +469,16 @@ def RegisterPass(function=None, input_specs={}): ...@@ -469,16 +469,16 @@ def RegisterPass(function=None, input_specs={}):
Examples: Examples:
.. code-block:: python .. code-block:: python
import paddle >>> import paddle
from paddle.fluid.ir import RegisterPass >>> from paddle.fluid.ir import RegisterPass
@RegisterPass >>> @RegisterPass
def multi_add_to_addn(): >>> def multi_add_to_addn():
def pattern(x, y, z): ... def pattern(x, y, z):
return paddle.add(paddle.add(x, y), z) ... return paddle.add(paddle.add(x, y), z)
def replace(x, y, z): ... def replace(x, y, z):
return paddle.add_n([x, y, z]) ... return paddle.add_n([x, y, z])
return pattern, replace ... return pattern, replace
""" """
def _is_pass_pair(check_pair): def _is_pass_pair(check_pair):
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册