test_adadelta_op.py 6.4 KB
Newer Older
1
#   Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
D
dzhwinter 已提交
2
#
D
dzhwinter 已提交
3 4 5
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
D
dzhwinter 已提交
6
#
D
dzhwinter 已提交
7
#     http://www.apache.org/licenses/LICENSE-2.0
D
dzhwinter 已提交
8
#
D
dzhwinter 已提交
9 10 11 12 13 14
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

15 16
import unittest
import numpy as np
17
from op_test import OpTest
J
Jiawei Wang 已提交
18 19
import paddle
import paddle.fluid as fluid
20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38


class TestAdadeltaOp1(OpTest):
    def setUp(self):
        self.op_type = "adadelta"
        param = np.random.uniform(-1, 1, (102, 105)).astype("float32")
        grad = np.random.uniform(-1, 1, (102, 105)).astype("float32")
        # The squared gradient is positive
        avg_squared_grad = np.random.random((102, 105)).astype("float32")
        # The squared update is positive
        avg_squared_update = np.random.random((102, 105)).astype("float32")

        rho = 0.95
        epsilon = 1e-6

        self.inputs = {
            'Param': param,
            'Grad': grad,
            'AvgSquaredGrad': avg_squared_grad,
39
            'AvgSquaredUpdate': avg_squared_update,
40 41 42 43
        }

        self.attrs = {'rho': rho, 'epsilon': epsilon}

44 45 46
        avg_squared_grad_out = rho * avg_squared_grad + (1 - rho) * np.square(
            grad
        )
47 48
        update = -np.multiply(
            np.sqrt(
49 50 51 52 53 54
                np.divide(
                    avg_squared_update + epsilon, avg_squared_grad_out + epsilon
                )
            ),
            grad,
        )
55

56 57 58
        avg_squared_update_out = rho * avg_squared_update + (
            1 - rho
        ) * np.square(update)
59 60 61 62 63 64

        param_out = param + update

        self.outputs = {
            'ParamOut': param_out,
            'AvgSquaredGradOut': avg_squared_grad_out,
65
            'AvgSquaredUpdateOut': avg_squared_update_out,
66 67 68 69 70 71 72
        }

    def test_check_output(self):
        self.check_output()


class TestAdadeltaOp2(OpTest):
73
    '''Test Adadelta op with default attribute values'''
74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90

    def setUp(self):
        self.op_type = "adadelta"
        param = np.random.uniform(-1, 1, (102, 105)).astype("float32")
        grad = np.random.uniform(-1, 1, (102, 105)).astype("float32")
        # The squared gradient is positive
        avg_squared_grad = np.random.random((102, 105)).astype("float32")
        # The squared update is positive
        avg_squared_update = np.random.random((102, 105)).astype("float32")

        rho = 0.95
        epsilon = 1e-6

        self.inputs = {
            'Param': param,
            'Grad': grad,
            'AvgSquaredGrad': avg_squared_grad,
91
            'AvgSquaredUpdate': avg_squared_update,
92 93
        }

94 95 96
        avg_squared_grad_out = rho * avg_squared_grad + (1 - rho) * np.square(
            grad
        )
97 98
        update = -np.multiply(
            np.sqrt(
99 100 101 102 103 104
                np.divide(
                    avg_squared_update + epsilon, avg_squared_grad_out + epsilon
                )
            ),
            grad,
        )
105

106 107 108
        avg_squared_update_out = rho * avg_squared_update + (
            1 - rho
        ) * np.square(update)
109 110 111 112 113 114

        param_out = param + update

        self.outputs = {
            'ParamOut': param_out,
            'AvgSquaredGradOut': avg_squared_grad_out,
115
            'AvgSquaredUpdateOut': avg_squared_update_out,
116 117 118 119 120 121
        }

    def test_check_output(self):
        self.check_output()


J
Jiawei Wang 已提交
122 123 124 125 126 127 128
class TestAdadeltaV2(unittest.TestCase):
    def test_adadelta_dygraph(self):
        paddle.disable_static(paddle.CPUPlace())
        value = np.arange(26).reshape(2, 13).astype("float32")
        a = paddle.to_tensor(value)
        linear = paddle.nn.Linear(13, 5)
        # This can be any optimizer supported by dygraph.
129 130 131 132 133
        adam = paddle.optimizer.Adadelta(
            learning_rate=0.01,
            parameters=linear.parameters(),
            weight_decay=0.01,
        )
J
Jiawei Wang 已提交
134 135 136 137 138 139
        out = linear(a)
        out.backward()
        adam.step()
        adam.clear_gradients()

    def test_adadelta(self):
140
        paddle.enable_static()
J
Jiawei Wang 已提交
141 142 143 144 145 146 147
        place = fluid.CPUPlace()
        main = fluid.Program()
        with fluid.program_guard(main):
            x = fluid.layers.data(name='x', shape=[13], dtype='float32')
            y = fluid.layers.data(name='y', shape=[1], dtype='float32')
            y_predict = fluid.layers.fc(input=x, size=1, act=None)
            cost = fluid.layers.square_error_cost(input=y_predict, label=y)
148
            avg_cost = paddle.mean(cost)
J
Jiawei Wang 已提交
149 150 151 152 153

            rms_optimizer = paddle.optimizer.Adadelta(learning_rate=0.1)
            rms_optimizer.minimize(avg_cost)

            fetch_list = [avg_cost]
154 155 156
            train_reader = paddle.batch(
                paddle.dataset.uci_housing.train(), batch_size=1
            )
J
Jiawei Wang 已提交
157 158 159 160 161 162 163 164
            feeder = fluid.DataFeeder(place=place, feed_list=[x, y])
            exe = fluid.Executor(place)
            exe.run(fluid.default_startup_program())
            for data in train_reader():
                exe.run(main, feed=feeder.feed(data), fetch_list=fetch_list)

    def test_raise_error(self):
        self.assertRaises(ValueError, paddle.optimizer.Adadelta, None)
165 166 167 168 169 170 171 172 173
        self.assertRaises(
            ValueError, paddle.optimizer.Adadelta, learning_rate=0.1, rho=None
        )
        self.assertRaises(
            ValueError,
            paddle.optimizer.Adadelta,
            learning_rate=0.1,
            epsilon=None,
        )
J
Jiawei Wang 已提交
174 175


176 177 178 179 180 181 182 183
class TestAdadeltaV2Group(TestAdadeltaV2):
    def test_adadelta_dygraph(self):
        paddle.disable_static(paddle.CPUPlace())
        value = np.arange(26).reshape(2, 13).astype("float32")
        a = paddle.to_tensor(value)
        linear_1 = paddle.nn.Linear(13, 5)
        linear_2 = paddle.nn.Linear(5, 5)
        # This can be any optimizer supported by dygraph.
184 185 186 187 188 189 190 191 192 193 194
        adam = paddle.optimizer.Adadelta(
            learning_rate=0.01,
            parameters=[
                {'params': linear_1.parameters()},
                {
                    'params': linear_2.parameters(),
                    'weight_decay': 0.001,
                },
            ],
            weight_decay=0.1,
        )
195 196 197 198 199 200 201
        out = linear_1(a)
        out = linear_2(out)
        out.backward()
        adam.step()
        adam.clear_gradients()


202 203
if __name__ == "__main__":
    unittest.main()