test_adadelta_op.py 6.7 KB
Newer Older
1
#   Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
D
dzhwinter 已提交
2
#
D
dzhwinter 已提交
3 4 5
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
D
dzhwinter 已提交
6
#
D
dzhwinter 已提交
7
#     http://www.apache.org/licenses/LICENSE-2.0
D
dzhwinter 已提交
8
#
D
dzhwinter 已提交
9 10 11 12 13 14
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

15 16
from __future__ import print_function

17 18
import unittest
import numpy as np
19
from op_test import OpTest
J
Jiawei Wang 已提交
20 21
import paddle
import paddle.fluid as fluid
22 23 24


class TestAdadeltaOp1(OpTest):
25

26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50
    def setUp(self):
        self.op_type = "adadelta"
        param = np.random.uniform(-1, 1, (102, 105)).astype("float32")
        grad = np.random.uniform(-1, 1, (102, 105)).astype("float32")
        # The squared gradient is positive
        avg_squared_grad = np.random.random((102, 105)).astype("float32")
        # The squared update is positive
        avg_squared_update = np.random.random((102, 105)).astype("float32")

        rho = 0.95
        epsilon = 1e-6

        self.inputs = {
            'Param': param,
            'Grad': grad,
            'AvgSquaredGrad': avg_squared_grad,
            'AvgSquaredUpdate': avg_squared_update
        }

        self.attrs = {'rho': rho, 'epsilon': epsilon}

        avg_squared_grad_out = rho * avg_squared_grad + \
            (1 - rho) * np.square(grad)
        update = -np.multiply(
            np.sqrt(
51 52
                np.divide(avg_squared_update + epsilon,
                          avg_squared_grad_out + epsilon)), grad)
53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95

        avg_squared_update_out = rho * avg_squared_update + \
            (1 - rho) * np.square(update)

        param_out = param + update

        self.outputs = {
            'ParamOut': param_out,
            'AvgSquaredGradOut': avg_squared_grad_out,
            'AvgSquaredUpdateOut': avg_squared_update_out
        }

    def test_check_output(self):
        self.check_output()


class TestAdadeltaOp2(OpTest):
    '''Test Adadelta op with default attribute values
    '''

    def setUp(self):
        self.op_type = "adadelta"
        param = np.random.uniform(-1, 1, (102, 105)).astype("float32")
        grad = np.random.uniform(-1, 1, (102, 105)).astype("float32")
        # The squared gradient is positive
        avg_squared_grad = np.random.random((102, 105)).astype("float32")
        # The squared update is positive
        avg_squared_update = np.random.random((102, 105)).astype("float32")

        rho = 0.95
        epsilon = 1e-6

        self.inputs = {
            'Param': param,
            'Grad': grad,
            'AvgSquaredGrad': avg_squared_grad,
            'AvgSquaredUpdate': avg_squared_update
        }

        avg_squared_grad_out = rho * avg_squared_grad + \
            (1 - rho) * np.square(grad)
        update = -np.multiply(
            np.sqrt(
96 97
                np.divide(avg_squared_update + epsilon,
                          avg_squared_grad_out + epsilon)), grad)
98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113

        avg_squared_update_out = rho * avg_squared_update + \
            (1 - rho) * np.square(update)

        param_out = param + update

        self.outputs = {
            'ParamOut': param_out,
            'AvgSquaredGradOut': avg_squared_grad_out,
            'AvgSquaredUpdateOut': avg_squared_update_out
        }

    def test_check_output(self):
        self.check_output()


J
Jiawei Wang 已提交
114
class TestAdadeltaV2(unittest.TestCase):
115

J
Jiawei Wang 已提交
116 117 118 119 120 121
    def test_adadelta_dygraph(self):
        paddle.disable_static(paddle.CPUPlace())
        value = np.arange(26).reshape(2, 13).astype("float32")
        a = paddle.to_tensor(value)
        linear = paddle.nn.Linear(13, 5)
        # This can be any optimizer supported by dygraph.
122 123 124
        adam = paddle.optimizer.Adadelta(learning_rate=0.01,
                                         parameters=linear.parameters(),
                                         weight_decay=0.01)
J
Jiawei Wang 已提交
125 126 127 128 129 130
        out = linear(a)
        out.backward()
        adam.step()
        adam.clear_gradients()

    def test_adadelta(self):
131
        paddle.enable_static()
J
Jiawei Wang 已提交
132 133 134 135 136 137 138 139 140 141 142 143 144
        place = fluid.CPUPlace()
        main = fluid.Program()
        with fluid.program_guard(main):
            x = fluid.layers.data(name='x', shape=[13], dtype='float32')
            y = fluid.layers.data(name='y', shape=[1], dtype='float32')
            y_predict = fluid.layers.fc(input=x, size=1, act=None)
            cost = fluid.layers.square_error_cost(input=y_predict, label=y)
            avg_cost = fluid.layers.mean(cost)

            rms_optimizer = paddle.optimizer.Adadelta(learning_rate=0.1)
            rms_optimizer.minimize(avg_cost)

            fetch_list = [avg_cost]
145 146
            train_reader = paddle.batch(paddle.dataset.uci_housing.train(),
                                        batch_size=1)
J
Jiawei Wang 已提交
147 148 149 150 151 152 153 154
            feeder = fluid.DataFeeder(place=place, feed_list=[x, y])
            exe = fluid.Executor(place)
            exe.run(fluid.default_startup_program())
            for data in train_reader():
                exe.run(main, feed=feeder.feed(data), fetch_list=fetch_list)

    def test_raise_error(self):
        self.assertRaises(ValueError, paddle.optimizer.Adadelta, None)
155 156 157 158 159 160 161 162
        self.assertRaises(ValueError,
                          paddle.optimizer.Adadelta,
                          learning_rate=0.1,
                          rho=None)
        self.assertRaises(ValueError,
                          paddle.optimizer.Adadelta,
                          learning_rate=0.1,
                          epsilon=None)
J
Jiawei Wang 已提交
163 164


165
class TestAdadeltaV2Group(TestAdadeltaV2):
166

167 168 169 170 171 172 173
    def test_adadelta_dygraph(self):
        paddle.disable_static(paddle.CPUPlace())
        value = np.arange(26).reshape(2, 13).astype("float32")
        a = paddle.to_tensor(value)
        linear_1 = paddle.nn.Linear(13, 5)
        linear_2 = paddle.nn.Linear(5, 5)
        # This can be any optimizer supported by dygraph.
174 175 176 177 178 179 180 181 182 183 184
        adam = paddle.optimizer.Adadelta(learning_rate=0.01,
                                         parameters=[{
                                             'params':
                                             linear_1.parameters()
                                         }, {
                                             'params':
                                             linear_2.parameters(),
                                             'weight_decay':
                                             0.001,
                                         }],
                                         weight_decay=0.1)
185 186 187 188 189 190 191
        out = linear_1(a)
        out = linear_2(out)
        out.backward()
        adam.step()
        adam.clear_gradients()


192 193
if __name__ == "__main__":
    unittest.main()