test_adam_op.py 14.4 KB
Newer Older
1
#   Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
D
dzhwinter 已提交
2
#
D
dzhwinter 已提交
3 4 5
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
D
dzhwinter 已提交
6
#
D
dzhwinter 已提交
7
#     http://www.apache.org/licenses/LICENSE-2.0
D
dzhwinter 已提交
8
#
D
dzhwinter 已提交
9 10 11 12 13 14
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

15 16
from __future__ import print_function

17 18
import unittest
import numpy as np
19
from op_test import OpTest
20 21
from paddle.fluid import core
from paddle.fluid.op import Operator
22
import paddle.fluid as fluid
23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54


class TestAdamOp1(OpTest):
    def setUp(self):
        '''Test Adam Op with supplied attributes
        '''
        self.op_type = "adam"
        param = np.random.uniform(-1, 1, (102, 105)).astype("float32")
        grad = np.random.uniform(-1, 1, (102, 105)).astype("float32")
        moment1 = np.random.uniform(-1, 1, (102, 105)).astype("float32")
        # The second moment is positive
        moment2 = np.random.random((102, 105)).astype("float32")

        learning_rate = 0.004
        beta1 = 0.78
        beta2 = 0.836
        epsilon = 1e-4
        beta1_pow = beta1**10
        beta2_pow = beta2**10

        self.inputs = {
            'Param': param,
            'Grad': grad,
            'Moment1': moment1,
            'Moment2': moment2,
            'LearningRate': np.array([learning_rate]).astype("float32"),
            'Beta1Pow': np.array([beta1_pow]).astype("float32"),
            'Beta2Pow': np.array([beta2_pow]).astype("float32")
        }

        self.attrs = {'epsilon': epsilon, 'beta1': beta1, 'beta2': beta2}

55 56
        param_out, moment1_out, \
            moment2_out = adam_step(self.inputs, self.attrs)
57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97

        self.outputs = {
            'Moment1Out': moment1_out,
            'Moment2Out': moment2_out,
            'ParamOut': param_out
        }

    def test_check_output(self):
        self.check_output()


class TestAdamOp2(OpTest):
    def setUp(self):
        '''Test Adam Op with supplied attributes
        '''
        self.op_type = "adam"
        param = np.random.uniform(-1, 1, (102, 105)).astype("float32")
        grad = np.random.uniform(-1, 1, (102, 105)).astype("float32")
        moment1 = np.random.uniform(-1, 1, (102, 105)).astype("float32")
        # The second moment is positive
        moment2 = np.random.random((102, 105)).astype("float32")

        learning_rate = 0.001
        beta1 = 0.9
        beta2 = 0.999
        epsilon = 1e-8
        beta1_pow = beta1**10
        beta2_pow = beta2**10

        self.inputs = {
            'Param': param,
            'Grad': grad,
            'Moment1': moment1,
            'Moment2': moment2,
            'LearningRate': np.array([learning_rate]).astype("float32"),
            'Beta1Pow': np.array([beta1_pow]).astype("float32"),
            'Beta2Pow': np.array([beta2_pow]).astype("float32")
        }

        attributes = {'epsilon': epsilon, 'beta1': beta1, 'beta2': beta2}

98 99
        param_out, moment1_out, \
            moment2_out = adam_step(self.inputs, attributes)
100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144

        self.outputs = {
            'Moment1Out': moment1_out,
            'Moment2Out': moment2_out,
            'ParamOut': param_out
        }

    def test_check_output(self):
        self.check_output()


class TestAdamOpMultipleSteps(OpTest):
    def setUp(self):
        '''Test Adam Operator with supplied attributes
        '''
        self.op_type = "adam"
        self.num_steps = 10

        param = np.random.uniform(-1, 1, (102, 105)).astype("float32")
        grad = np.random.uniform(-1, 1, (102, 105)).astype("float32")
        moment1 = np.random.uniform(-1, 1, (102, 105)).astype("float32")
        # The second moment is positive
        moment2 = np.random.random((102, 105)).astype("float32")

        learning_rate = 0.001
        beta1 = 0.9
        beta2 = 0.999
        epsilon = 1e-8
        beta1_pow = beta1**10
        beta2_pow = beta2**10

        self.inputs = {
            'Param': param,
            'Grad': grad,
            'Moment1': moment1,
            'Moment2': moment2,
            'LearningRate': np.array([learning_rate]).astype("float32"),
            'Beta1Pow': np.array([beta1_pow]).astype("float32"),
            'Beta2Pow': np.array([beta2_pow]).astype("float32")
        }

        self.attrs = {'epsilon': epsilon, 'beta1': beta1, 'beta2': beta2}

    def test_check_output(self):
        for _ in range(self.num_steps):
145 146
            param_out, moment1_out, \
                moment2_out = adam_step(self.inputs, self.attrs)
147 148 149 150 151 152 153 154 155 156 157 158 159 160

            self.outputs = {
                'Moment1Out': moment1_out,
                'Moment2Out': moment2_out,
                'ParamOut': param_out
            }

            # Verify output for this step
            self.check_output()

            # Output of this step becomes input for next step
            self.inputs['Param'] = param_out
            self.inputs['Moment1'] = moment1_out
            self.inputs['Moment2'] = moment2_out
161 162 163 164

            # Update powers of Beta1 and Beta2 for next time step
            self.inputs['Beta1Pow'] *= self.attrs['beta1']
            self.inputs['Beta2Pow'] *= self.attrs['beta1']
165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188

            # Randomize gradient for next step
            self.inputs['Grad'] = np.random.uniform(
                -1, 1, (102, 105)).astype("float32")


def adam_step(inputs, attributes):
    '''
    Simulate one step of the adam optimizer
    :param inputs: dict of inputs
    :param attributes: dict of attributes
    :return tuple: tuple of output param, moment1, moment2,
    beta1 power accumulator and beta2 power accumulator
    '''
    param = inputs['Param']
    grad = inputs['Grad']
    moment1 = inputs['Moment1']
    moment2 = inputs['Moment2']
    lr = inputs['LearningRate']
    beta1_pow = inputs['Beta1Pow']
    beta2_pow = inputs['Beta2Pow']

    epsilon = attributes['epsilon']

189 190 191 192 193 194 195 196 197
    if 'beta1' in attributes:
        beta1 = attributes['beta1']
    else:
        beta1 = inputs['Beta1Tensor'][0]
    if 'beta2' in attributes:
        beta2 = attributes['beta2']
    else:
        beta2 = inputs['Beta2Tensor'][0]

198 199
    moment1_out = beta1 * moment1 + (1 - beta1) * grad
    moment2_out = beta2 * moment2 + (1 - beta2) * np.square(grad)
200
    lr_t = lr * np.sqrt(1 - beta2_pow) / (1 - beta1_pow)
201
    param_out = param - lr_t * (moment1_out / (np.sqrt(moment2_out) + epsilon))
202
    return param_out, moment1_out, moment2_out
203 204


Q
Qiao Longfei 已提交
205
def adam_step_sparse(inputs, attributes, height, rows, row_numel, np_grad,
Q
Qiao Longfei 已提交
206
                     lazy_mode):
T
wip  
typhoonzero 已提交
207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225
    '''
    Simulate one step of the adam optimizer
    :param inputs: dict of inputs
    :param attributes: dict of attributes
    :return tuple: tuple of output param, moment1, moment2,
    beta1 power accumulator and beta2 power accumulator
    '''
    param = inputs['Param']
    # grad = inputs['Grad']
    moment1 = inputs['Moment1']
    moment2 = inputs['Moment2']
    lr = inputs['LearningRate']
    beta1_pow = inputs['Beta1Pow']
    beta2_pow = inputs['Beta2Pow']

    beta1 = attributes['beta1']
    beta2 = attributes['beta2']
    epsilon = attributes['epsilon']

T
typhoonzero 已提交
226 227 228
    moment1_out = np.zeros(shape=[height, row_numel])
    moment2_out = np.zeros(shape=[height, row_numel])
    param_out = np.zeros(shape=[height, row_numel])
T
wip  
typhoonzero 已提交
229

Q
Qiao Longfei 已提交
230
    def update_row(row_id, update_value):
T
wip  
typhoonzero 已提交
231
        moment1_out[row_id] = beta1 * moment1[row_id] + (1 - beta1
Q
Qiao Longfei 已提交
232
                                                         ) * update_value
T
wip  
typhoonzero 已提交
233
        moment2_out[row_id] = beta2 * moment2[row_id] + (
Q
Qiao Longfei 已提交
234
            1 - beta2) * np.square(update_value)
T
wip  
typhoonzero 已提交
235
        lr_t = lr * np.sqrt(1 - beta2_pow) / (1 - beta1_pow)
T
typhoonzero 已提交
236 237
        param_out[row_id] = param[row_id] - lr_t * (moment1_out[row_id] / (
            np.sqrt(moment2_out[row_id]) + epsilon))
Q
Qiao Longfei 已提交
238 239 240 241 242 243 244 245 246 247 248

    if lazy_mode:
        for idx, row_id in enumerate(rows):
            update_row(row_id, np_grad[idx])
    else:
        for row_id in range(param_out.shape[0]):
            update_value = np.zeros(np_grad[0].shape).astype("float32")
            if row_id in rows:
                update_value = np_grad[rows.index(row_id)]
            update_row(row_id, update_value)

T
wip  
typhoonzero 已提交
249 250 251 252
    return param_out, moment1_out, moment2_out


class TestSparseAdamOp(unittest.TestCase):
Q
Qiao Longfei 已提交
253
    def setup(self, scope, place, lazy_mode):
T
wip  
typhoonzero 已提交
254 255 256 257 258 259
        beta1 = 0.78
        beta2 = 0.836
        epsilon = 1e-4

        height = 10
        rows = [0, 4, 7]
T
typhoonzero 已提交
260
        self.rows = rows
T
wip  
typhoonzero 已提交
261
        row_numel = 12
T
typhoonzero 已提交
262
        self.row_numel = row_numel
T
wip  
typhoonzero 已提交
263
        self.dense_inputs = {
Q
Qiao Longfei 已提交
264 265 266 267 268
            "Param": np.full((height, row_numel), 5.0).astype("float32"),
            "Moment1": np.full((height, row_numel), 5.0).astype("float32"),
            "Moment2": np.full((height, row_numel), 5.0).astype("float32"),
            'Beta1Pow': np.array([beta1**10]).astype("float32"),
            'Beta2Pow': np.array([beta2**10]).astype("float32"),
T
wip  
typhoonzero 已提交
269 270
            "LearningRate": np.full((1), 2.0).astype("float32")
        }
Q
Qiao Longfei 已提交
271
        self.init_output = np.full((height, row_numel), 0.0).astype("float32")
272 273 274 275 276 277
        self.attrs = {
            'epsilon': epsilon,
            'beta1': beta1,
            'beta2': beta2,
            'min_row_size_to_use_multithread': 2
        }
T
wip  
typhoonzero 已提交
278 279 280 281 282 283 284 285 286 287 288 289 290

        grad_selected_rows = scope.var('Grad').get_selected_rows()
        grad_selected_rows.set_height(height)
        grad_selected_rows.set_rows(rows)
        np_array = np.ones((len(rows), row_numel)).astype("float32")
        np_array[0, 0] = 2.0
        np_array[2, 8] = 4.0

        grad_tensor = grad_selected_rows.get_tensor()
        grad_tensor.set(np_array, place)

        self.sparse_inputs = ["Grad"]

Q
Qiao Longfei 已提交
291 292
        param_out, mom1, mom2 = adam_step_sparse(self.dense_inputs, self.attrs,
                                                 height, rows, row_numel,
Q
Qiao Longfei 已提交
293
                                                 np_array, lazy_mode)
T
wip  
typhoonzero 已提交
294
        self.outputs = {
T
typhoonzero 已提交
295
            "ParamOut": param_out,
T
wip  
typhoonzero 已提交
296 297 298 299
            "Moment1Out": mom1,
            "Moment2Out": mom2
        }

Q
Qiao Longfei 已提交
300
    def check_with_place(self, place, lazy_mode):
T
wip  
typhoonzero 已提交
301
        scope = core.Scope()
Q
Qiao Longfei 已提交
302
        self.setup(scope, place, lazy_mode)
T
wip  
typhoonzero 已提交
303 304

        op_args = dict()
Q
Qiao Longfei 已提交
305
        op_args['lazy_mode'] = lazy_mode
306
        for key, np_array in self.dense_inputs.items():
T
wip  
typhoonzero 已提交
307 308 309 310 311
            var = scope.var(key).get_tensor()
            var.set(np_array, place)
            op_args[key] = key
        for s in self.sparse_inputs:
            op_args[s] = s
T
typhoonzero 已提交
312 313
        for s in self.outputs:
            var = scope.var(s).get_tensor()
Q
Qiao Longfei 已提交
314
            var.set(self.init_output, place)
T
typhoonzero 已提交
315
            op_args[s] = s
T
wip  
typhoonzero 已提交
316 317 318 319
        for k in self.attrs:
            op_args[k] = self.attrs[k]

        # create and run sgd operator
T
typhoonzero 已提交
320 321
        adam_op = Operator("adam", **op_args)
        adam_op.run(scope, place)
T
wip  
typhoonzero 已提交
322

323
        for key, np_array in self.outputs.items():
T
wip  
typhoonzero 已提交
324 325
            out_var = scope.var(key).get_tensor()
            actual = np.array(out_var)
T
typhoonzero 已提交
326 327
            actual = actual.reshape([actual.size])
            np_array = np_array.reshape([np_array.size])
Q
Qiao Longfei 已提交
328 329 330

            for i in range(np_array.size):
                self.assertLess((actual[i] - np_array[i]), 0.00001)
T
wip  
typhoonzero 已提交
331

Q
Qiao Longfei 已提交
332
    def test_sparse_adam(self):
T
wip  
typhoonzero 已提交
333
        places = [core.CPUPlace()]
334
        if core.is_compiled_with_cuda():
T
wip  
typhoonzero 已提交
335 336
            places.append(core.CUDAPlace(0))
        for place in places:
Q
Qiao Longfei 已提交
337 338
            for lazy_mode in (True, False):
                self.check_with_place(place, lazy_mode)
T
wip  
typhoonzero 已提交
339 340


341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427
class TestAdamOpBetaVariable(OpTest):
    def setUp(self):
        '''Test Adam Op with beta as Variable
        '''
        self.op_type = "adam"
        param = np.random.uniform(-1, 1, (102, 105)).astype("float32")
        grad = np.random.uniform(-1, 1, (102, 105)).astype("float32")
        moment1 = np.random.uniform(-1, 1, (102, 105)).astype("float32")
        # The second moment is positive
        moment2 = np.random.random((102, 105)).astype("float32")
        beta1 = 0.85
        beta2 = 0.95

        learning_rate = 0.001
        epsilon = 1e-8
        beta1_pow = beta1**10
        beta2_pow = beta2**10

        self.inputs = {
            'Param': param,
            'Grad': grad,
            'Moment1': moment1,
            'Moment2': moment2,
            'LearningRate': np.array([learning_rate]).astype("float32"),
            'Beta1Pow': np.array([beta1_pow]).astype("float32"),
            'Beta2Pow': np.array([beta2_pow]).astype("float32"),
            "Beta1Tensor": np.array([beta1]).astype("float32"),
            "Beta2Tensor": np.array([beta2]).astype("float32"),
        }

        attributes = {'epsilon': epsilon}

        param_out, moment1_out, \
            moment2_out = adam_step(self.inputs, attributes)

        self.outputs = {
            'Moment1Out': moment1_out,
            'Moment2Out': moment2_out,
            'ParamOut': param_out
        }

    def test_check_output(self):
        self.check_output()


class TestAdamOptimizerBetaVariable(unittest.TestCase):
    def test_adam_optimizer(self):
        def test_with_place(place, shape):
            exe = fluid.Executor(place)

            train_prog = fluid.Program()
            startup = fluid.Program()
            with fluid.program_guard(train_prog, startup):
                with fluid.unique_name.guard():
                    data = fluid.data(name="data", shape=shape)
                    conv = fluid.layers.conv2d(data, 8, 3)
                    loss = fluid.layers.reduce_mean(conv)

                    beta1 = fluid.layers.create_global_var(
                        shape=[1],
                        value=0.85,
                        dtype='float32',
                        persistable=True)
                    beta2 = fluid.layers.create_global_var(
                        shape=[1],
                        value=0.95,
                        dtype='float32',
                        persistable=True)
                    opt = fluid.optimizer.Adam(
                        learning_rate=1e-5, beta1=beta1, beta2=beta2)
                    opt.minimize(loss)

            exe.run(startup)
            data_np = np.random.random(shape).astype('float32')
            rets = exe.run(train_prog,
                           feed={"data": data_np},
                           fetch_list=[loss])
            assert rets[0] is not None

        shape = [2, 3, 8, 8]
        places = [fluid.CPUPlace()]
        if core.is_compiled_with_cuda():
            places.append(fluid.CUDAPlace(0))
        for place in places:
            test_with_place(place, shape)


428 429
if __name__ == "__main__":
    unittest.main()