未验证 提交 7daffbf8 编写于 作者: Y yuchen202 提交者: GitHub

[xdoctest] reformat example code with google style in No.297、298、302 (#56861)

* 更改相关文件

* Update ir.py

* 恢复相关文件

* Update ir.py

* Delete python/paddle/incubate/optimizer/modelaverage.py

* Delete modelaverage.py

* 尝试恢复文件

* Revert "尝试恢复文件"

This reverts commit 8a263cfd4642614a48a30f648c61fc801331e550.

* Revert "恢复相关文件"

This reverts commit 24249b8191fb3901681ffa9d0e1ad79ef43454de.

* Revert "Revert "尝试恢复文件""

This reverts commit 1b833d623770a851f202c68fff51e77723121a9d.

* Revert "Revert "Revert "尝试恢复文件"""

This reverts commit 64b3a816d1f0fef5ee9100480b8354749005a463.

* Revert "Delete python/paddle/incubate/optimizer/modelaverage.py"

This reverts commit 61986296bf48f7b9bef878bed6890c5dc2971481.

* Revert "更改相关文件"

This reverts commit a5ba675a948534401247b779d6a0fba0581d0628.

* Apply suggestions from code review

---------
Co-authored-by: NNyakku Shigure <sigure.qaq@gmail.com>
上级 9d183662
......@@ -81,46 +81,46 @@ def minimize_bfgs(
.. code-block:: python
:name: code-example1
# Example1: 1D Grid Parameters
import paddle
# Randomly simulate a batch of input data
inputs = paddle. normal(shape=(100, 1))
labels = inputs * 2.0
# define the loss function
def loss(w):
y = w * inputs
return paddle.nn.functional.square_error_cost(y, labels).mean()
# Initialize weight parameters
w = paddle.normal(shape=(1,))
# Call the bfgs method to solve the weight that makes the loss the smallest, and update the parameters
for epoch in range(0, 10):
# Call the bfgs method to optimize the loss, note that the third parameter returned represents the weight
w_update = paddle.incubate.optimizer.functional.minimize_bfgs(loss, w)[2]
# Use paddle.assign to update parameters in place
paddle. assign(w_update, w)
>>> # Example1: 1D Grid Parameters
>>> import paddle
>>> # Randomly simulate a batch of input data
>>> inputs = paddle. normal(shape=(100, 1))
>>> labels = inputs * 2.0
>>> # define the loss function
>>> def loss(w):
... y = w * inputs
... return paddle.nn.functional.square_error_cost(y, labels).mean()
>>> # Initialize weight parameters
>>> w = paddle.normal(shape=(1,))
>>> # Call the bfgs method to solve the weight that makes the loss the smallest, and update the parameters
>>> for epoch in range(0, 10):
... # Call the bfgs method to optimize the loss, note that the third parameter returned represents the weight
... w_update = paddle.incubate.optimizer.functional.minimize_bfgs(loss, w)[2]
... # Use paddle.assign to update parameters in place
... paddle. assign(w_update, w)
.. code-block:: python
:name: code-example2
# Example2: Multidimensional Grid Parameters
import paddle
def flatten(x):
return x. flatten()
def unflatten(x):
return x.reshape((2,2))
# Assume the network parameters are more than one dimension
def net(x):
assert len(x.shape) > 1
return x.square().mean()
# function to be optimized
def bfgs_f(flatten_x):
return net(unflatten(flatten_x))
x = paddle.rand([2,2])
for i in range(0, 10):
# Flatten x before using minimize_bfgs
x_update = paddle.incubate.optimizer.functional.minimize_bfgs(bfgs_f, flatten(x))[2]
# unflatten x_update, then update parameters
paddle. assign(unflatten(x_update), x)
>>> # Example2: Multidimensional Grid Parameters
>>> import paddle
>>> def flatten(x):
... return x. flatten()
>>> def unflatten(x):
... return x.reshape((2,2))
>>> # Assume the network parameters are more than one dimension
>>> def net(x):
... assert len(x.shape) > 1
... return x.square().mean()
>>> # function to be optimized
>>> def bfgs_f(flatten_x):
... return net(unflatten(flatten_x))
>>> x = paddle.rand([2,2])
>>> for i in range(0, 10):
... # Flatten x before using minimize_bfgs
... x_update = paddle.incubate.optimizer.functional.minimize_bfgs(bfgs_f, flatten(x))[2]
... # unflatten x_update, then update parameters
... paddle.assign(unflatten(x_update), x)
"""
if dtype not in ['float32', 'float64']:
......
......@@ -82,46 +82,46 @@ def minimize_lbfgs(
.. code-block:: python
:name: code-example1
# Example1: 1D Grid Parameters
import paddle
# Randomly simulate a batch of input data
inputs = paddle. normal(shape=(100, 1))
labels = inputs * 2.0
# define the loss function
def loss(w):
y = w * inputs
return paddle.nn.functional.square_error_cost(y, labels).mean()
# Initialize weight parameters
w = paddle.normal(shape=(1,))
# Call the bfgs method to solve the weight that makes the loss the smallest, and update the parameters
for epoch in range(0, 10):
# Call the bfgs method to optimize the loss, note that the third parameter returned represents the weight
w_update = paddle.incubate.optimizer.functional.minimize_bfgs(loss, w)[2]
# Use paddle.assign to update parameters in place
paddle. assign(w_update, w)
>>> # Example1: 1D Grid Parameters
>>> import paddle
>>> # Randomly simulate a batch of input data
>>> inputs = paddle. normal(shape=(100, 1))
>>> labels = inputs * 2.0
>>> # define the loss function
>>> def loss(w):
... y = w * inputs
... return paddle.nn.functional.square_error_cost(y, labels).mean()
>>> # Initialize weight parameters
>>> w = paddle.normal(shape=(1,))
>>> # Call the bfgs method to solve the weight that makes the loss the smallest, and update the parameters
>>> for epoch in range(0, 10):
... # Call the bfgs method to optimize the loss, note that the third parameter returned represents the weight
... w_update = paddle.incubate.optimizer.functional.minimize_bfgs(loss, w)[2]
... # Use paddle.assign to update parameters in place
... paddle.assign(w_update, w)
.. code-block:: python
:name: code-example2
# Example2: Multidimensional Grid Parameters
import paddle
def flatten(x):
return x. flatten()
def unflatten(x):
return x.reshape((2,2))
# Assume the network parameters are more than one dimension
def net(x):
assert len(x.shape) > 1
return x.square().mean()
# function to be optimized
def bfgs_f(flatten_x):
return net(unflatten(flatten_x))
x = paddle.rand([2,2])
for i in range(0, 10):
# Flatten x before using minimize_bfgs
x_update = paddle.incubate.optimizer.functional.minimize_bfgs(bfgs_f, flatten(x))[2]
# unflatten x_update, then update parameters
paddle. assign(unflatten(x_update), x)
>>> # Example2: Multidimensional Grid Parameters
>>> import paddle
>>> def flatten(x):
... return x. flatten()
>>> def unflatten(x):
... return x.reshape((2,2))
>>> # Assume the network parameters are more than one dimension
>>> def net(x):
... assert len(x.shape) > 1
... return x.square().mean()
>>> # function to be optimized
>>> def bfgs_f(flatten_x):
... return net(unflatten(flatten_x))
>>> x = paddle.rand([2,2])
>>> for i in range(0, 10):
... # Flatten x before using minimize_bfgs
... x_update = paddle.incubate.optimizer.functional.minimize_bfgs(bfgs_f, flatten(x))[2]
... # unflatten x_update, then update parameters
... paddle.assign(unflatten(x_update), x)
"""
if dtype not in ['float32', 'float64']:
......
......@@ -469,16 +469,16 @@ def RegisterPass(function=None, input_specs={}):
Examples:
.. code-block:: python
import paddle
from paddle.fluid.ir import RegisterPass
@RegisterPass
def multi_add_to_addn():
def pattern(x, y, z):
return paddle.add(paddle.add(x, y), z)
def replace(x, y, z):
return paddle.add_n([x, y, z])
return pattern, replace
>>> import paddle
>>> from paddle.fluid.ir import RegisterPass
>>> @RegisterPass
>>> def multi_add_to_addn():
... def pattern(x, y, z):
... return paddle.add(paddle.add(x, y), z)
... def replace(x, y, z):
... return paddle.add_n([x, y, z])
... return pattern, replace
"""
def _is_pass_pair(check_pair):
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册