diff --git a/python/paddle/incubate/optimizer/functional/bfgs.py b/python/paddle/incubate/optimizer/functional/bfgs.py index 9d98460e03c3ca8b06ef7fc4518c96ca9b112556..bc244d9c19da1a7f3658be9431d5c41a3afc63b5 100644 --- a/python/paddle/incubate/optimizer/functional/bfgs.py +++ b/python/paddle/incubate/optimizer/functional/bfgs.py @@ -81,46 +81,46 @@ def minimize_bfgs( .. code-block:: python :name: code-example1 - # Example1: 1D Grid Parameters - import paddle - # Randomly simulate a batch of input data - inputs = paddle. normal(shape=(100, 1)) - labels = inputs * 2.0 - # define the loss function - def loss(w): - y = w * inputs - return paddle.nn.functional.square_error_cost(y, labels).mean() - # Initialize weight parameters - w = paddle.normal(shape=(1,)) - # Call the bfgs method to solve the weight that makes the loss the smallest, and update the parameters - for epoch in range(0, 10): - # Call the bfgs method to optimize the loss, note that the third parameter returned represents the weight - w_update = paddle.incubate.optimizer.functional.minimize_bfgs(loss, w)[2] - # Use paddle.assign to update parameters in place - paddle. assign(w_update, w) + >>> # Example1: 1D Grid Parameters + >>> import paddle + >>> # Randomly simulate a batch of input data + >>> inputs = paddle. normal(shape=(100, 1)) + >>> labels = inputs * 2.0 + >>> # define the loss function + >>> def loss(w): + ... y = w * inputs + ... return paddle.nn.functional.square_error_cost(y, labels).mean() + >>> # Initialize weight parameters + >>> w = paddle.normal(shape=(1,)) + >>> # Call the bfgs method to solve the weight that makes the loss the smallest, and update the parameters + >>> for epoch in range(0, 10): + ... # Call the bfgs method to optimize the loss, note that the third parameter returned represents the weight + ... w_update = paddle.incubate.optimizer.functional.minimize_bfgs(loss, w)[2] + ... # Use paddle.assign to update parameters in place + ... paddle. assign(w_update, w) .. code-block:: python :name: code-example2 - # Example2: Multidimensional Grid Parameters - import paddle - def flatten(x): - return x. flatten() - def unflatten(x): - return x.reshape((2,2)) - # Assume the network parameters are more than one dimension - def net(x): - assert len(x.shape) > 1 - return x.square().mean() - # function to be optimized - def bfgs_f(flatten_x): - return net(unflatten(flatten_x)) - x = paddle.rand([2,2]) - for i in range(0, 10): - # Flatten x before using minimize_bfgs - x_update = paddle.incubate.optimizer.functional.minimize_bfgs(bfgs_f, flatten(x))[2] - # unflatten x_update, then update parameters - paddle. assign(unflatten(x_update), x) + >>> # Example2: Multidimensional Grid Parameters + >>> import paddle + >>> def flatten(x): + ... return x. flatten() + >>> def unflatten(x): + ... return x.reshape((2,2)) + >>> # Assume the network parameters are more than one dimension + >>> def net(x): + ... assert len(x.shape) > 1 + ... return x.square().mean() + >>> # function to be optimized + >>> def bfgs_f(flatten_x): + ... return net(unflatten(flatten_x)) + >>> x = paddle.rand([2,2]) + >>> for i in range(0, 10): + ... # Flatten x before using minimize_bfgs + ... x_update = paddle.incubate.optimizer.functional.minimize_bfgs(bfgs_f, flatten(x))[2] + ... # unflatten x_update, then update parameters + ... paddle.assign(unflatten(x_update), x) """ if dtype not in ['float32', 'float64']: diff --git a/python/paddle/incubate/optimizer/functional/lbfgs.py b/python/paddle/incubate/optimizer/functional/lbfgs.py index af30efe44a8daa733b249a5505708cdea6bcce45..fc482e4ca18b5db9e4e492cad220746527b89271 100644 --- a/python/paddle/incubate/optimizer/functional/lbfgs.py +++ b/python/paddle/incubate/optimizer/functional/lbfgs.py @@ -82,46 +82,46 @@ def minimize_lbfgs( .. code-block:: python :name: code-example1 - # Example1: 1D Grid Parameters - import paddle - # Randomly simulate a batch of input data - inputs = paddle. normal(shape=(100, 1)) - labels = inputs * 2.0 - # define the loss function - def loss(w): - y = w * inputs - return paddle.nn.functional.square_error_cost(y, labels).mean() - # Initialize weight parameters - w = paddle.normal(shape=(1,)) - # Call the bfgs method to solve the weight that makes the loss the smallest, and update the parameters - for epoch in range(0, 10): - # Call the bfgs method to optimize the loss, note that the third parameter returned represents the weight - w_update = paddle.incubate.optimizer.functional.minimize_bfgs(loss, w)[2] - # Use paddle.assign to update parameters in place - paddle. assign(w_update, w) + >>> # Example1: 1D Grid Parameters + >>> import paddle + >>> # Randomly simulate a batch of input data + >>> inputs = paddle. normal(shape=(100, 1)) + >>> labels = inputs * 2.0 + >>> # define the loss function + >>> def loss(w): + ... y = w * inputs + ... return paddle.nn.functional.square_error_cost(y, labels).mean() + >>> # Initialize weight parameters + >>> w = paddle.normal(shape=(1,)) + >>> # Call the bfgs method to solve the weight that makes the loss the smallest, and update the parameters + >>> for epoch in range(0, 10): + ... # Call the bfgs method to optimize the loss, note that the third parameter returned represents the weight + ... w_update = paddle.incubate.optimizer.functional.minimize_bfgs(loss, w)[2] + ... # Use paddle.assign to update parameters in place + ... paddle.assign(w_update, w) .. code-block:: python :name: code-example2 - # Example2: Multidimensional Grid Parameters - import paddle - def flatten(x): - return x. flatten() - def unflatten(x): - return x.reshape((2,2)) - # Assume the network parameters are more than one dimension - def net(x): - assert len(x.shape) > 1 - return x.square().mean() - # function to be optimized - def bfgs_f(flatten_x): - return net(unflatten(flatten_x)) - x = paddle.rand([2,2]) - for i in range(0, 10): - # Flatten x before using minimize_bfgs - x_update = paddle.incubate.optimizer.functional.minimize_bfgs(bfgs_f, flatten(x))[2] - # unflatten x_update, then update parameters - paddle. assign(unflatten(x_update), x) + >>> # Example2: Multidimensional Grid Parameters + >>> import paddle + >>> def flatten(x): + ... return x. flatten() + >>> def unflatten(x): + ... return x.reshape((2,2)) + >>> # Assume the network parameters are more than one dimension + >>> def net(x): + ... assert len(x.shape) > 1 + ... return x.square().mean() + >>> # function to be optimized + >>> def bfgs_f(flatten_x): + ... return net(unflatten(flatten_x)) + >>> x = paddle.rand([2,2]) + >>> for i in range(0, 10): + ... # Flatten x before using minimize_bfgs + ... x_update = paddle.incubate.optimizer.functional.minimize_bfgs(bfgs_f, flatten(x))[2] + ... # unflatten x_update, then update parameters + ... paddle.assign(unflatten(x_update), x) """ if dtype not in ['float32', 'float64']: diff --git a/python/paddle/incubate/passes/ir.py b/python/paddle/incubate/passes/ir.py index 0e292e51a0aeafe62a2e38478a617d07c56f42e7..c657f20abcb99524b6673c13d077ce7abc8c583d 100644 --- a/python/paddle/incubate/passes/ir.py +++ b/python/paddle/incubate/passes/ir.py @@ -469,16 +469,16 @@ def RegisterPass(function=None, input_specs={}): Examples: .. code-block:: python - import paddle - from paddle.fluid.ir import RegisterPass - - @RegisterPass - def multi_add_to_addn(): - def pattern(x, y, z): - return paddle.add(paddle.add(x, y), z) - def replace(x, y, z): - return paddle.add_n([x, y, z]) - return pattern, replace + >>> import paddle + >>> from paddle.fluid.ir import RegisterPass + + >>> @RegisterPass + >>> def multi_add_to_addn(): + ... def pattern(x, y, z): + ... return paddle.add(paddle.add(x, y), z) + ... def replace(x, y, z): + ... return paddle.add_n([x, y, z]) + ... return pattern, replace """ def _is_pass_pair(check_pair):