bfgs.py 10.0 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16
# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

import numpy as np

17 18
import paddle

19
from .line_search import strong_wolfe
20 21 22
from .utils import (
    _value_and_gradient,
    check_initial_inverse_hessian_estimate,
23
    check_input_type,
24
)
25 26


27 28 29 30 31 32 33 34 35 36 37 38 39
def minimize_bfgs(
    objective_func,
    initial_position,
    max_iters=50,
    tolerance_grad=1e-7,
    tolerance_change=1e-9,
    initial_inverse_hessian_estimate=None,
    line_search_fn='strong_wolfe',
    max_line_search_iters=50,
    initial_step_length=1.0,
    dtype='float32',
    name=None,
):
40 41
    r"""
    Minimizes a differentiable function `func` using the BFGS method.
S
Sing_chan 已提交
42 43 44
    The BFGS is a quasi-Newton method for solving an unconstrained optimization problem over a differentiable function.
    Closely related is the Newton method for minimization. Consider the iterate update formula:

45
    .. math::
S
Sing_chan 已提交
46 47 48
        x_{k+1} = x_{k} + H_k \nabla{f_k}

    If :math:`H_k` is the inverse Hessian of :math:`f` at :math:`x_k`, then it's the Newton method.
49
    If :math:`H_k` is symmetric and positive definite, used as an approximation of the inverse Hessian, then
50
    it's a quasi-Newton. In practice, the approximated Hessians are obtained
51
    by only using the gradients, over either whole or part of the search
S
Sing_chan 已提交
52
    history, the former is BFGS, the latter is L-BFGS.
53

54
    Reference:
S
Sing_chan 已提交
55
        Jorge Nocedal, Stephen J. Wright, Numerical Optimization, Second Edition, 2006. pp140: Algorithm 6.1 (BFGS Method).
56 57

    Args:
58
        objective_func: the objective function to minimize. ``objective_func`` accepts a 1D Tensor and returns a scalar.
59
        initial_position (Tensor): the starting point of the iterates, has the same shape with the input of ``objective_func`` .
S
Sing_chan 已提交
60 61 62
        max_iters (int, optional): the maximum number of minimization iterations. Default value: 50.
        tolerance_grad (float, optional): terminates if the gradient norm is smaller than this. Currently gradient norm uses inf norm. Default value: 1e-7.
        tolerance_change (float, optional): terminates if the change of function value/position/parameter between two iterations is smaller than this value. Default value: 1e-9.
63
        initial_inverse_hessian_estimate (Tensor, optional): the initial inverse hessian approximation at initial_position. It must be symmetric and positive definite. If not given, will use an identity matrix of order N, which is size of ``initial_position`` . Default value: None.
S
Sing_chan 已提交
64 65 66
        line_search_fn (str, optional): indicate which line search method to use, only support 'strong wolfe' right now. May support 'Hager Zhang' in the futrue. Default value: 'strong wolfe'.
        max_line_search_iters (int, optional): the maximum number of line search iterations. Default value: 50.
        initial_step_length (float, optional): step length used in first iteration of line search. different initial_step_length may cause different optimal result. For methods like Newton and quasi-Newton the initial trial step length should always be 1.0. Default value: 1.0.
67
        dtype ('float32' | 'float64', optional): data type used in the algorithm, the data type of the input parameter must be consistent with the dtype. Default value: 'float32'.
S
Sing_chan 已提交
68 69
        name (str, optional): Name for the operation. For more information, please refer to :ref:`api_guide_Name`. Default value: None.

70
    Returns:
S
Sing_chan 已提交
71 72 73 74 75 76 77 78
        output(tuple):

            - is_converge (bool): Indicates whether found the minimum within tolerance.
            - num_func_calls (int): number of objective function called.
            - position (Tensor): the position of the last iteration. If the search converged, this value is the argmin of the objective function regrading to the initial position.
            - objective_value (Tensor): objective function value at the `position`.
            - objective_gradient (Tensor): objective function gradient at the `position`.
            - inverse_hessian_estimate (Tensor): the estimate of inverse hessian at the `position`.
79 80 81

    Examples:
        .. code-block:: python
82
            :name: code-example1
83

84
            # Example1: 1D Grid Parameters
85
            import paddle
86 87 88 89 90 91 92 93 94 95 96 97 98 99 100
            # Randomly simulate a batch of input data
            inputs = paddle. normal(shape=(100, 1))
            labels = inputs * 2.0
            # define the loss function
            def loss(w):
                y = w * inputs
                return paddle.nn.functional.square_error_cost(y, labels).mean()
            # Initialize weight parameters
            w = paddle.normal(shape=(1,))
            # Call the bfgs method to solve the weight that makes the loss the smallest, and update the parameters
            for epoch in range(0, 10):
                # Call the bfgs method to optimize the loss, note that the third parameter returned represents the weight
                w_update = paddle.incubate.optimizer.functional.minimize_bfgs(loss, w)[2]
                # Use paddle.assign to update parameters in place
                paddle. assign(w_update, w)
101

102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123
        .. code-block:: python
            :name: code-example2

            # Example2: Multidimensional Grid Parameters
            import paddle
            def flatten(x):
                return x. flatten()
            def unflatten(x):
                return x.reshape((2,2))
            # Assume the network parameters are more than one dimension
            def net(x):
                assert len(x.shape) > 1
                return x.square().mean()
            # function to be optimized
            def bfgs_f(flatten_x):
                return net(unflatten(flatten_x))
            x = paddle.rand([2,2])
            for i in range(0, 10):
                # Flatten x before using minimize_bfgs
                x_update = paddle.incubate.optimizer.functional.minimize_bfgs(bfgs_f, flatten(x))[2]
                # unflatten x_update, then update parameters
                paddle. assign(unflatten(x_update), x)
124 125 126 127
    """

    if dtype not in ['float32', 'float64']:
        raise ValueError(
128 129 130 131
            "The dtype must be 'float32' or 'float64', but the specified is {}.".format(
                dtype
            )
        )
132 133 134 135 136 137 138 139

    op_name = 'minimize_bfgs'
    check_input_type(initial_position, 'initial_position', op_name)

    I = paddle.eye(initial_position.shape[0], dtype=dtype)
    if initial_inverse_hessian_estimate is None:
        initial_inverse_hessian_estimate = I
    else:
140 141 142 143 144
        check_input_type(
            initial_inverse_hessian_estimate,
            'initial_inverse_hessian_estimate',
            op_name,
        )
145 146 147
        check_initial_inverse_hessian_estimate(initial_inverse_hessian_estimate)

    Hk = paddle.assign(initial_inverse_hessian_estimate)
148 149
    # use detach and assign to create new tensor rather than =, or xk will share memory and grad with initial_position
    xk = paddle.assign(initial_position.detach())
150 151 152 153 154 155 156 157 158 159 160 161 162

    value, g1 = _value_and_gradient(objective_func, xk)
    num_func_calls = paddle.full(shape=[1], fill_value=1, dtype='int64')

    # when the dim of x is 1000, it needs more than 30 iters to get all element converge to minimum.
    k = paddle.full(shape=[1], fill_value=0, dtype='int64')
    done = paddle.full(shape=[1], fill_value=False, dtype='bool')
    is_converge = paddle.full(shape=[1], fill_value=False, dtype='bool')

    def cond(k, done, is_converge, num_func_calls, xk, value, g1, Hk):
        return (k < max_iters) & ~done

    def body(k, done, is_converge, num_func_calls, xk, value, g1, Hk):
163
        # --------------   compute pk   -------------- #
164 165
        pk = -paddle.matmul(Hk, g1)

166
        # --------------   compute alpha by line serach   -------------- #
167 168 169 170 171
        if line_search_fn == 'strong_wolfe':
            alpha, value, g2, ls_func_calls = strong_wolfe(
                f=objective_func,
                xk=xk,
                pk=pk,
172
                max_iters=max_line_search_iters,
173
                initial_step_length=initial_step_length,
174 175
                dtype=dtype,
            )
176 177
        else:
            raise NotImplementedError(
178 179 180 181
                "Currently only support line_search_fn = 'strong_wolfe', but the specified is '{}'".format(
                    line_search_fn
                )
            )
182 183
        num_func_calls += ls_func_calls

184
        # --------------   update Hk   -------------- #
185 186 187 188 189 190 191 192 193 194 195
        sk = alpha * pk
        yk = g2 - g1

        xk = xk + sk
        g1 = g2

        sk = paddle.unsqueeze(sk, 0)
        yk = paddle.unsqueeze(yk, 0)

        rhok_inv = paddle.dot(yk, sk)
        rhok = paddle.static.nn.cond(
196
            rhok_inv == 0.0,
197
            lambda: paddle.full(shape=[1], fill_value=1000.0, dtype=dtype),
198 199
            lambda: 1.0 / rhok_inv,
        )
200 201 202

        Vk_transpose = I - rhok * sk * yk.t()
        Vk = I - rhok * yk * sk.t()
203 204 205 206
        Hk = (
            paddle.matmul(paddle.matmul(Vk_transpose, Hk), Vk)
            + rhok * sk * sk.t()
        )
207 208 209

        k += 1

210
        # --------------   check convergence   -------------- #
211 212
        gnorm = paddle.linalg.norm(g1, p=np.inf)
        pk_norm = paddle.linalg.norm(pk, p=np.inf)
213
        paddle.assign(
214 215
            done | (gnorm < tolerance_grad) | (pk_norm < tolerance_change), done
        )
216 217
        paddle.assign(done, is_converge)
        # when alpha=0, there is no chance to get xk change.
218
        paddle.assign(done | (alpha == 0.0), done)
219 220 221 222 223
        return [k, done, is_converge, num_func_calls, xk, value, g1, Hk]

    paddle.static.nn.while_loop(
        cond=cond,
        body=body,
224 225
        loop_vars=[k, done, is_converge, num_func_calls, xk, value, g1, Hk],
    )
226
    return is_converge, num_func_calls, xk, value, g1, Hk