test_momentum_op.py 6.1 KB
Newer Older
1
#   Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
D
dzhwinter 已提交
2
#
D
dzhwinter 已提交
3 4 5
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
D
dzhwinter 已提交
6
#
D
dzhwinter 已提交
7
#     http://www.apache.org/licenses/LICENSE-2.0
D
dzhwinter 已提交
8
#
D
dzhwinter 已提交
9 10 11 12 13 14
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

15 16
from __future__ import print_function

S
sidgoyal78 已提交
17 18
import unittest
import numpy as np
19 20
import paddle.fluid.core as core
from paddle.fluid.op import Operator
21
from op_test import OpTest
S
sidgoyal78 已提交
22 23


K
kavyasrinet 已提交
24
class TestMomentumOp1(OpTest):
S
sidgoyal78 已提交
25 26 27 28 29 30 31 32
    def setUp(self):
        self.op_type = "momentum"

        param = np.random.random((123, 321)).astype("float32")
        grad = np.random.random((123, 321)).astype("float32")
        velocity = np.zeros((123, 321)).astype("float32")
        learning_rate = np.array([0.001]).astype("float32")
        mu = 0.0001
K
kavyasrinet 已提交
33
        use_nesterov = False
S
sidgoyal78 已提交
34 35 36 37 38 39 40 41 42 43

        self.inputs = {
            'Param': param,
            'Grad': grad,
            'Velocity': velocity,
            'LearningRate': learning_rate
        }

        self.attrs = {'mu': mu}

S
sidgoyal78 已提交
44
        velocity_out = mu * velocity + grad
K
kavyasrinet 已提交
45
        if use_nesterov:
46
            param_out = param - grad * learning_rate - \
K
kavyasrinet 已提交
47 48 49 50 51 52 53 54 55 56 57
                        velocity_out * mu * learning_rate
        else:
            param_out = param - learning_rate * velocity_out

        self.outputs = {'ParamOut': param_out, 'VelocityOut': velocity_out}

    def test_check_output(self):
        self.check_output()


class TestMomentumOp2(OpTest):
58
    '''Test Momentum with default values for attributes
K
kavyasrinet 已提交
59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77
    '''

    def setUp(self):
        self.op_type = "momentum"

        param = np.random.random((123, 321)).astype("float32")
        grad = np.random.random((123, 321)).astype("float32")
        velocity = np.zeros((123, 321)).astype("float32")
        learning_rate = np.array([0.001]).astype("float32")
        mu = 0.0001
        use_nesterov = True

        self.inputs = {
            'Param': param,
            'Grad': grad,
            'Velocity': velocity,
            'LearningRate': learning_rate
        }

78
        self.attrs = {'mu': mu, 'use_nesterov': use_nesterov}
K
kavyasrinet 已提交
79 80 81

        velocity_out = mu * velocity + grad
        if use_nesterov:
82
            param_out = param - grad * learning_rate - \
K
kavyasrinet 已提交
83 84 85
                        velocity_out * mu * learning_rate
        else:
            param_out = param - learning_rate * velocity_out
S
sidgoyal78 已提交
86 87 88 89 90 91 92

        self.outputs = {'ParamOut': param_out, 'VelocityOut': velocity_out}

    def test_check_output(self):
        self.check_output()


93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123
class TestSparseMomentumOp(unittest.TestCase):
    def setUp(self):
        self.use_nesterov = False

    def check_with_place(self, place):
        self.init_kernel()
        scope = core.Scope()
        # create and initialize Grad Variable
        height = 10
        rows = [0, 4, 7]
        row_numel = 12
        mu = 1.0
        use_nesterov = self.use_nesterov

        # create and initialize Param Variable
        param = scope.var('Param').get_tensor()
        param_array = np.full((height, row_numel), 5.0).astype("float32")
        param.set(param_array, place)
        param_out = scope.var("ParamOut").get_tensor()
        param_out_array = np.full((height, row_numel), 0.0).astype("float32")
        param_out.set(param_out_array, place)

        grad_selected_rows = scope.var('Grad').get_selected_rows()
        grad_selected_rows.set_height(height)
        grad_selected_rows.set_rows(rows)
        grad_np_array = np.ones((len(rows), row_numel)).astype("float32")
        grad_np_array[0, 0] = 2.0
        grad_np_array[2, 8] = 4.0
        grad_tensor = grad_selected_rows.get_tensor()
        grad_tensor.set(grad_np_array, place)

D
dzhwinter 已提交
124 125 126 127 128
        velocity = scope.var('Velocity').get_tensor()
        velocity_np_array = np.ones((height, row_numel)).astype("float32")
        velocity.set(velocity_np_array, place)
        velocity_out = scope.var('VelocityOut').get_tensor()
        velocity_out_np_array = np.full((height, row_numel),
129
                                        0.0).astype("float32")
D
dzhwinter 已提交
130
        velocity_out.set(velocity_out_np_array, place)
131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151

        # create and initialize LeraningRate Variable
        lr = scope.var('LearningRate').get_tensor()
        lr_array = np.full((1), 2.0).astype("float32")
        lr.set(lr_array, place)

        # create and run operator
        op = Operator(
            "momentum",
            Param='Param',
            Grad='Grad',
            Velocity='Velocity',
            ParamOut='ParamOut',
            VelocityOut='VelocityOut',
            LearningRate='LearningRate',
            mu=mu,
            use_nesterov=use_nesterov)
        op.run(scope, place)

        # get and compare result
        param_out_np_array = np.array(param_out)
D
dzhwinter 已提交
152
        velocity_out_np_array = np.array(velocity_out)
153 154 155

        # TODO(dzh): add a more suitable general numpy interface
        # for sparse update.
D
dzhwinter 已提交
156 157 158 159 160
        _grad_np_array = np.full((height, row_numel), 0.0).astype("float32")
        for i in range(len(rows)):
            _grad_np_array[rows[i]] = grad_np_array[i]
        _velocity_out = mu * velocity_np_array + _grad_np_array
        _param = param_array
161
        if use_nesterov:
D
dzhwinter 已提交
162 163
            _param_out = _param - (_grad_np_array + _velocity_out * mu
                                   ) * lr_array
164
        else:
D
dzhwinter 已提交
165
            _param_out = _param - lr_array * _velocity_out
166
        self.assertTrue((_velocity_out == velocity_out_np_array).all())
D
dzhwinter 已提交
167
        self.assertTrue((_param_out == param_out_np_array).all())
168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184

    def init_kernel(self):
        pass

    def test_sparse_momentum(self):
        places = [core.CPUPlace()]
        if core.is_compiled_with_cuda():
            places.append(core.CUDAPlace(0))
        for place in places:
            self.check_with_place(place)


class TestSparseMomentumOp2(TestSparseMomentumOp):
    def init_kernel(self):
        self.use_nesterov = True


S
sidgoyal78 已提交
185 186
if __name__ == "__main__":
    unittest.main()