test_optimizer.py 17.0 KB
Newer Older
1
#   Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
D
dzhwinter 已提交
2
#
D
dzhwinter 已提交
3 4 5
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
D
dzhwinter 已提交
6
#
D
dzhwinter 已提交
7
#     http://www.apache.org/licenses/LICENSE-2.0
D
dzhwinter 已提交
8
#
D
dzhwinter 已提交
9 10 11 12 13 14
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

Q
Qiao Longfei 已提交
15 16
import unittest

17 18 19
import paddle.fluid.framework as framework
import paddle.fluid.optimizer as optimizer
from paddle.fluid.backward import append_backward
Q
Qiao Longfei 已提交
20 21 22 23


class TestOptimizer(unittest.TestCase):
    def test_sgd_optimizer(self):
Q
Qiao Longfei 已提交
24
        init_program = framework.Program()
25
        program = framework.Program()
Q
Qiao Longfei 已提交
26 27 28 29 30 31 32
        block = program.global_block()
        mul_x = block.create_parameter(
            dtype="float32", shape=[5, 10], lod_level=0, name="mul.x")
        mul_y = block.create_var(
            dtype="float32", shape=[10, 8], lod_level=0, name="mul.y")
        mul_out = block.create_var(
            dtype="float32", shape=[5, 8], lod_level=0, name="mul.out")
33 34
        mean_out = block.create_var(
            dtype="float32", shape=[1], lod_level=0, name="mean.out")
35
        block.append_op(
Q
Qiao Longfei 已提交
36 37 38 39 40
            type="mul",
            inputs={"X": mul_x,
                    "Y": mul_y},
            outputs={"Out": mul_out},
            attrs={"x_num_col_dims": 1})
41 42
        block.append_op(
            type="mean", inputs={"X": mul_out}, outputs={"Out": mean_out})
Q
Qiao Longfei 已提交
43
        sgd_optimizer = optimizer.SGDOptimizer(learning_rate=0.01)
T
fix ci  
typhoonzero 已提交
44
        opts, _ = sgd_optimizer.minimize(mean_out, init_program)
Y
Yancey1989 已提交
45 46 47
        self.assertEqual(len(opts), 3)
        self.assertEqual([op.type for op in opts],
                         ["fill_constant", "elementwise_mul", "sgd"])
Q
Qiao Longfei 已提交
48 49


50 51 52 53 54 55 56 57
class TestMomentumOptimizer(unittest.TestCase):
    class MockMomentum(optimizer.MomentumOptimizer):
        def get_accumulators(self):
            return self._accumulators

        def get_velocity_str(self):
            return self._velocity_acc_str

58
    def test_vanilla_momentum_optimizer(self):
Q
Qiao Longfei 已提交
59
        init_program = framework.Program()
60 61 62 63 64 65 66 67 68 69 70 71 72 73
        program = framework.Program()
        block = program.global_block()
        mul_x = block.create_parameter(
            dtype="float32", shape=[5, 10], lod_level=0, name="mul.x")
        mul_y = block.create_var(
            dtype="float32", shape=[10, 8], lod_level=0, name="mul.y")
        mul_out = block.create_var(
            dtype="float32", shape=[5, 8], lod_level=0, name="mul.out")
        block.append_op(
            type="mul",
            inputs={"X": mul_x,
                    "Y": mul_y},
            outputs={"Out": mul_out},
            attrs={"x_num_col_dims": 1})
Q
Qiao Longfei 已提交
74 75 76
        learning_rate = 0.01
        momentum_optimizer = self.MockMomentum(
            learning_rate=learning_rate, momentum=0.2)
77 78 79 80
        mean_out = block.create_var(
            dtype="float32", shape=[1], lod_level=0, name="mean.out")
        block.append_op(
            type="mean", inputs={"X": mul_out}, outputs={"Out": mean_out})
F
fengjiayi 已提交
81
        params_grads = append_backward(mean_out)
82 83
        self.assertEqual(len(params_grads), 1)
        self.assertEqual(len(momentum_optimizer.get_accumulators()), 0)
Q
Qiao Longfei 已提交
84 85
        opts = momentum_optimizer.create_optimization_pass(
            params_grads, mul_out, init_program)
Y
Yancey1989 已提交
86 87 88 89
        self.assertEqual(len(opts), 3)
        sgd_op = opts[-1]
        self.assertEqual([op.type for op in opts],
                         ["fill_constant", "elementwise_mul", "momentum"])
90
        self.assertFalse(sgd_op.attr('use_nesterov'))
91 92 93 94 95 96 97 98 99

        # Check accumulators
        accumulators = momentum_optimizer.get_accumulators()
        self.assertEqual(len(accumulators), 1)
        self.assertTrue(momentum_optimizer.get_velocity_str() in accumulators)
        velocity_acc = accumulators[momentum_optimizer.get_velocity_str()]
        self.assertEqual(len(velocity_acc), 1)
        self.assertTrue(mul_x.name in velocity_acc)

Q
Qiao Longfei 已提交
100 101 102 103 104 105 106 107
        # Check init_program
        init_ops = init_program.global_block().ops
        self.assertEqual(len(init_ops), 2)
        self.assertEqual(init_ops[0].type, "fill_constant")
        self.assertAlmostEqual(init_ops[0].attr('value'), learning_rate)
        self.assertEqual(init_ops[1].type, "fill_constant")
        self.assertAlmostEqual(init_ops[1].attr('value'), 0.0)

108
    def test_nesterov_momentum_optimizer(self):
Q
Qiao Longfei 已提交
109
        init_program = framework.Program()
110 111 112 113 114 115 116 117 118 119 120 121 122 123
        program = framework.Program()
        block = program.global_block()
        mul_x = block.create_parameter(
            dtype="float32", shape=[5, 10], lod_level=0, name="mul.x")
        mul_y = block.create_var(
            dtype="float32", shape=[10, 8], lod_level=0, name="mul.y")
        mul_out = block.create_var(
            dtype="float32", shape=[5, 8], lod_level=0, name="mul.out")
        block.append_op(
            type="mul",
            inputs={"X": mul_x,
                    "Y": mul_y},
            outputs={"Out": mul_out},
            attrs={"x_num_col_dims": 1})
124 125 126 127
        mean_out = block.create_var(
            dtype="float32", shape=[1], lod_level=0, name="mean.out")
        block.append_op(
            type="mean", inputs={"X": mul_out}, outputs={"Out": mean_out})
Q
Qiao Longfei 已提交
128
        learning_rate = 0.01
129
        momentum_optimizer = self.MockMomentum(
Q
Qiao Longfei 已提交
130
            learning_rate=learning_rate, momentum=0.2, use_nesterov=True)
F
fengjiayi 已提交
131
        params_grads = append_backward(mean_out)
132 133
        self.assertEqual(len(params_grads), 1)
        self.assertEqual(len(momentum_optimizer.get_accumulators()), 0)
Q
Qiao Longfei 已提交
134 135
        opts = momentum_optimizer.create_optimization_pass(
            params_grads, mul_out, init_program)
Y
Yancey1989 已提交
136 137 138 139
        self.assertEqual(len(opts), 3)
        sgd_op = opts[-1]
        self.assertEqual([op.type for op in opts],
                         ["fill_constant", "elementwise_mul", "momentum"])
140
        self.assertTrue(sgd_op.attr('use_nesterov'))
141 142 143 144 145 146 147 148 149

        # Check accumulators
        accumulators = momentum_optimizer.get_accumulators()
        self.assertEqual(len(accumulators), 1)
        self.assertTrue(momentum_optimizer.get_velocity_str() in accumulators)
        velocity_acc = accumulators[momentum_optimizer.get_velocity_str()]
        self.assertEqual(len(velocity_acc), 1)
        self.assertTrue(mul_x.name in velocity_acc)

Q
Qiao Longfei 已提交
150 151 152 153 154 155 156 157
        # Check init_program
        init_ops = init_program.global_block().ops
        self.assertEqual(len(init_ops), 2)
        self.assertEqual(init_ops[0].type, "fill_constant")
        self.assertAlmostEqual(init_ops[0].attr('value'), learning_rate)
        self.assertEqual(init_ops[1].type, "fill_constant")
        self.assertAlmostEqual(init_ops[1].attr('value'), 0.0)

158

159 160 161 162 163 164 165 166 167
class TestAdagradOptimizer(unittest.TestCase):
    class MockAdagrad(optimizer.AdagradOptimizer):
        def get_accumulators(self):
            return self._accumulators

        def get_moment_str(self):
            return self._moment_acc_str

    def test_adagrad_optimizer(self):
Q
Qiao Longfei 已提交
168
        init_program = framework.Program()
169 170 171 172 173 174 175 176 177 178 179 180 181 182
        program = framework.Program()
        block = program.global_block()
        mul_x = block.create_parameter(
            dtype="float32", shape=[5, 10], lod_level=0, name="mul.x")
        mul_y = block.create_var(
            dtype="float32", shape=[10, 8], lod_level=0, name="mul.y")
        mul_out = block.create_var(
            dtype="float32", shape=[5, 8], lod_level=0, name="mul.out")
        block.append_op(
            type="mul",
            inputs={"X": mul_x,
                    "Y": mul_y},
            outputs={"Out": mul_out},
            attrs={"x_num_col_dims": 1})
183 184 185 186
        mean_out = block.create_var(
            dtype="float32", shape=[1], lod_level=0, name="mean.out")
        block.append_op(
            type="mean", inputs={"X": mul_out}, outputs={"Out": mean_out})
Q
Qiao Longfei 已提交
187 188 189
        learning_rate = 0.01
        adagrad_optimizer = self.MockAdagrad(
            learning_rate=learning_rate, epsilon=1.0e-6)
F
fengjiayi 已提交
190
        params_grads = append_backward(mean_out)
191 192
        self.assertEqual(len(params_grads), 1)
        self.assertEqual(len(adagrad_optimizer.get_accumulators()), 0)
Q
Qiao Longfei 已提交
193 194
        opts = adagrad_optimizer.create_optimization_pass(params_grads, mul_out,
                                                          init_program)
Y
Yancey1989 已提交
195 196 197
        self.assertEqual(len(opts), 3)
        self.assertEqual([op.type for op in opts],
                         ["fill_constant", "elementwise_mul", "adagrad"])
198

199
        # Check accumulators
200 201 202 203 204 205 206
        accumulators = adagrad_optimizer.get_accumulators()
        self.assertEqual(len(accumulators), 1)
        self.assertTrue(adagrad_optimizer.get_moment_str() in accumulators)
        moment_acc = accumulators[adagrad_optimizer.get_moment_str()]
        self.assertEqual(len(moment_acc), 1)
        self.assertTrue(mul_x.name in moment_acc)

Q
Qiao Longfei 已提交
207 208 209 210 211 212 213 214
        # Check init_program
        init_ops = init_program.global_block().ops
        self.assertEqual(len(init_ops), 2)
        self.assertEqual(init_ops[0].type, "fill_constant")
        self.assertAlmostEqual(init_ops[0].attr('value'), learning_rate)
        self.assertEqual(init_ops[1].type, "fill_constant")
        self.assertAlmostEqual(init_ops[1].attr('value'), 0.0)

215

216 217 218 219 220 221 222 223 224 225 226 227
class TestAdamOptimizer(unittest.TestCase):
    class MockAdam(optimizer.AdamOptimizer):
        def get_accumulators(self):
            return self._accumulators

        def get_moment1_str(self):
            return self._moment1_acc_str

        def get_moment2_str(self):
            return self._moment2_acc_str

    def test_adam_optimizer(self):
Q
Qiao Longfei 已提交
228
        init_program = framework.Program()
229 230 231 232 233 234 235 236 237 238 239 240 241 242
        program = framework.Program()
        block = program.global_block()
        mul_x = block.create_parameter(
            dtype="float32", shape=[5, 10], lod_level=0, name="mul.x")
        mul_y = block.create_var(
            dtype="float32", shape=[10, 8], lod_level=0, name="mul.y")
        mul_out = block.create_var(
            dtype="float32", shape=[5, 8], lod_level=0, name="mul.out")
        block.append_op(
            type="mul",
            inputs={"X": mul_x,
                    "Y": mul_y},
            outputs={"Out": mul_out},
            attrs={"x_num_col_dims": 1})
243 244 245 246
        mean_out = block.create_var(
            dtype="float32", shape=[1], lod_level=0, name="mean.out")
        block.append_op(
            type="mean", inputs={"X": mul_out}, outputs={"Out": mean_out})
Q
Qiao Longfei 已提交
247
        learning_rate = 0.01
248
        adam_optimizer = self.MockAdam(
Q
Qiao Longfei 已提交
249
            learning_rate=learning_rate, beta1=0.9, beta2=0.999)
F
fengjiayi 已提交
250
        params_grads = append_backward(mean_out)
251 252
        self.assertEqual(len(params_grads), 1)
        self.assertEqual(len(adam_optimizer.get_accumulators()), 0)
Q
Qiao Longfei 已提交
253 254
        opts = adam_optimizer.create_optimization_pass(params_grads, mul_out,
                                                       init_program)
Y
Yancey1989 已提交
255 256 257 258
        self.assertEqual(len(opts), 5)
        self.assertEqual(
            [op.type for op in opts],
            ["fill_constant", "elementwise_mul", "adam", "scale", "scale"])
259 260 261 262 263 264 265 266 267 268 269 270 271

        # Check accumulators
        accumulators = adam_optimizer.get_accumulators()
        self.assertEqual(len(accumulators), 2)
        self.assertTrue(adam_optimizer.get_moment1_str() in accumulators)
        self.assertTrue(adam_optimizer.get_moment2_str() in accumulators)
        moment1_acc = accumulators[adam_optimizer.get_moment1_str()]
        moment2_acc = accumulators[adam_optimizer.get_moment2_str()]
        self.assertEqual(len(moment1_acc), 1)
        self.assertEqual(len(moment2_acc), 1)
        self.assertTrue(mul_x.name in moment1_acc)
        self.assertTrue(mul_x.name in moment2_acc)

Q
Qiao Longfei 已提交
272 273 274 275 276 277
        # Check init_program
        init_ops = init_program.global_block().ops
        self.assertEqual(len(init_ops), 5)
        self.assertEqual(init_ops[0].type, "fill_constant")
        self.assertAlmostEqual(init_ops[0].attr('value'), learning_rate)

278

279 280 281 282 283 284 285 286 287 288 289 290
class TestAdamaxOptimizer(unittest.TestCase):
    class MockAdamax(optimizer.AdamaxOptimizer):
        def get_accumulators(self):
            return self._accumulators

        def get_moment_str(self):
            return self._moment_acc_str

        def get_inf_norm_str(self):
            return self._inf_norm_acc_str

    def test_adamax_optimizer(self):
Q
Qiao Longfei 已提交
291
        init_program = framework.Program()
292 293 294 295 296 297 298 299 300 301 302 303 304 305
        program = framework.Program()
        block = program.global_block()
        mul_x = block.create_parameter(
            dtype="float32", shape=[5, 10], lod_level=0, name="mul.x")
        mul_y = block.create_var(
            dtype="float32", shape=[10, 8], lod_level=0, name="mul.y")
        mul_out = block.create_var(
            dtype="float32", shape=[5, 8], lod_level=0, name="mul.out")
        block.append_op(
            type="mul",
            inputs={"X": mul_x,
                    "Y": mul_y},
            outputs={"Out": mul_out},
            attrs={"x_num_col_dims": 1})
306 307 308 309
        mean_out = block.create_var(
            dtype="float32", shape=[1], lod_level=0, name="mean.out")
        block.append_op(
            type="mean", inputs={"X": mul_out}, outputs={"Out": mean_out})
Q
Qiao Longfei 已提交
310
        learning_rate = 0.01
311
        adamax_optimizer = self.MockAdamax(
Q
Qiao Longfei 已提交
312
            learning_rate=learning_rate, beta1=0.9, beta2=0.999)
F
fengjiayi 已提交
313
        params_grads = append_backward(mean_out)
314 315
        self.assertEqual(len(params_grads), 1)
        self.assertEqual(len(adamax_optimizer.get_accumulators()), 0)
Q
Qiao Longfei 已提交
316 317
        opts = adamax_optimizer.create_optimization_pass(params_grads, mul_out,
                                                         init_program)
Y
Yancey1989 已提交
318 319 320 321
        self.assertEqual(len(opts), 4)
        self.assertEqual(
            [op.type for op in opts],
            ["fill_constant", "elementwise_mul", "adamax", "scale"])
322 323 324 325 326 327 328 329 330 331 332 333 334

        # Check accumulators
        accumulators = adamax_optimizer.get_accumulators()
        self.assertEqual(len(accumulators), 2)
        self.assertTrue(adamax_optimizer.get_moment_str() in accumulators)
        self.assertTrue(adamax_optimizer.get_inf_norm_str() in accumulators)
        moment_acc = accumulators[adamax_optimizer.get_moment_str()]
        inf_norm_acc = accumulators[adamax_optimizer.get_inf_norm_str()]
        self.assertEqual(len(moment_acc), 1)
        self.assertEqual(len(inf_norm_acc), 1)
        self.assertTrue(mul_x.name in moment_acc)
        self.assertTrue(mul_x.name in inf_norm_acc)

Q
Qiao Longfei 已提交
335 336 337 338 339 340
        # Check init_program
        init_ops = init_program.global_block().ops
        self.assertEqual(len(init_ops), 4)
        self.assertEqual(init_ops[0].type, "fill_constant")
        self.assertAlmostEqual(init_ops[0].attr('value'), learning_rate)

341

342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365
class TestDecayedAdagradOptimizer(unittest.TestCase):
    class MockDecayedAdagrad(optimizer.DecayedAdagradOptimizer):
        def get_accumulators(self):
            return self._accumulators

        def get_moment_str(self):
            return self._moment_acc_str

    def test_decayed_adagrad_optimizer(self):
        init_program = framework.Program()
        program = framework.Program()
        block = program.global_block()
        mul_x = block.create_parameter(
            dtype="float32", shape=[5, 10], lod_level=0, name="mul.x")
        mul_y = block.create_var(
            dtype="float32", shape=[10, 8], lod_level=0, name="mul.y")
        mul_out = block.create_var(
            dtype="float32", shape=[5, 8], lod_level=0, name="mul.out")
        block.append_op(
            type="mul",
            inputs={"X": mul_x,
                    "Y": mul_y},
            outputs={"Out": mul_out},
            attrs={"x_num_col_dims": 1})
366 367 368 369
        mean_out = block.create_var(
            dtype="float32", shape=[1], lod_level=0, name="mean.out")
        block.append_op(
            type="mean", inputs={"X": mul_out}, outputs={"Out": mean_out})
370 371 372
        learning_rate = 0.01
        decayed_adagrad_optimizer = self.MockDecayedAdagrad(
            learning_rate=learning_rate, decay=0.95, epsilon=1.0e-6)
F
fengjiayi 已提交
373
        params_grads = append_backward(mean_out)
374 375 376 377
        self.assertEqual(len(params_grads), 1)
        self.assertEqual(len(decayed_adagrad_optimizer.get_accumulators()), 0)
        opts = decayed_adagrad_optimizer.create_optimization_pass(
            params_grads, mul_out, init_program)
Y
Yancey1989 已提交
378 379 380 381
        self.assertEqual(len(opts), 3)
        self.assertEqual(
            [op.type for op in opts],
            ["fill_constant", "elementwise_mul", "decayed_adagrad"])
382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400

        # Check accumulators
        accumulators = decayed_adagrad_optimizer.get_accumulators()
        self.assertEqual(len(accumulators), 1)
        self.assertTrue(
            decayed_adagrad_optimizer.get_moment_str() in accumulators)
        moment_acc = accumulators[decayed_adagrad_optimizer.get_moment_str()]
        self.assertEqual(len(moment_acc), 1)
        self.assertTrue(mul_x.name in moment_acc)

        # Check init_program
        init_ops = init_program.global_block().ops
        self.assertEqual(len(init_ops), 2)
        self.assertEqual(init_ops[0].type, "fill_constant")
        self.assertAlmostEqual(init_ops[0].attr('value'), learning_rate)
        self.assertEqual(init_ops[1].type, "fill_constant")
        self.assertAlmostEqual(init_ops[1].attr('value'), 0.0)


Q
Qiao Longfei 已提交
401 402
if __name__ == '__main__':
    unittest.main()