test_adam_op.py 10.0 KB
Newer Older
1 2 3
import unittest
import numpy as np
from op_test import OpTest
T
typhoonzero 已提交
4 5
from paddle.v2.fluid import core
from paddle.v2.fluid.op import Operator
6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37


class TestAdamOp1(OpTest):
    def setUp(self):
        '''Test Adam Op with supplied attributes
        '''
        self.op_type = "adam"
        param = np.random.uniform(-1, 1, (102, 105)).astype("float32")
        grad = np.random.uniform(-1, 1, (102, 105)).astype("float32")
        moment1 = np.random.uniform(-1, 1, (102, 105)).astype("float32")
        # The second moment is positive
        moment2 = np.random.random((102, 105)).astype("float32")

        learning_rate = 0.004
        beta1 = 0.78
        beta2 = 0.836
        epsilon = 1e-4
        beta1_pow = beta1**10
        beta2_pow = beta2**10

        self.inputs = {
            'Param': param,
            'Grad': grad,
            'Moment1': moment1,
            'Moment2': moment2,
            'LearningRate': np.array([learning_rate]).astype("float32"),
            'Beta1Pow': np.array([beta1_pow]).astype("float32"),
            'Beta2Pow': np.array([beta2_pow]).astype("float32")
        }

        self.attrs = {'epsilon': epsilon, 'beta1': beta1, 'beta2': beta2}

38 39
        param_out, moment1_out, \
            moment2_out = adam_step(self.inputs, self.attrs)
40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80

        self.outputs = {
            'Moment1Out': moment1_out,
            'Moment2Out': moment2_out,
            'ParamOut': param_out
        }

    def test_check_output(self):
        self.check_output()


class TestAdamOp2(OpTest):
    def setUp(self):
        '''Test Adam Op with supplied attributes
        '''
        self.op_type = "adam"
        param = np.random.uniform(-1, 1, (102, 105)).astype("float32")
        grad = np.random.uniform(-1, 1, (102, 105)).astype("float32")
        moment1 = np.random.uniform(-1, 1, (102, 105)).astype("float32")
        # The second moment is positive
        moment2 = np.random.random((102, 105)).astype("float32")

        learning_rate = 0.001
        beta1 = 0.9
        beta2 = 0.999
        epsilon = 1e-8
        beta1_pow = beta1**10
        beta2_pow = beta2**10

        self.inputs = {
            'Param': param,
            'Grad': grad,
            'Moment1': moment1,
            'Moment2': moment2,
            'LearningRate': np.array([learning_rate]).astype("float32"),
            'Beta1Pow': np.array([beta1_pow]).astype("float32"),
            'Beta2Pow': np.array([beta2_pow]).astype("float32")
        }

        attributes = {'epsilon': epsilon, 'beta1': beta1, 'beta2': beta2}

81 82
        param_out, moment1_out, \
            moment2_out = adam_step(self.inputs, attributes)
83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127

        self.outputs = {
            'Moment1Out': moment1_out,
            'Moment2Out': moment2_out,
            'ParamOut': param_out
        }

    def test_check_output(self):
        self.check_output()


class TestAdamOpMultipleSteps(OpTest):
    def setUp(self):
        '''Test Adam Operator with supplied attributes
        '''
        self.op_type = "adam"
        self.num_steps = 10

        param = np.random.uniform(-1, 1, (102, 105)).astype("float32")
        grad = np.random.uniform(-1, 1, (102, 105)).astype("float32")
        moment1 = np.random.uniform(-1, 1, (102, 105)).astype("float32")
        # The second moment is positive
        moment2 = np.random.random((102, 105)).astype("float32")

        learning_rate = 0.001
        beta1 = 0.9
        beta2 = 0.999
        epsilon = 1e-8
        beta1_pow = beta1**10
        beta2_pow = beta2**10

        self.inputs = {
            'Param': param,
            'Grad': grad,
            'Moment1': moment1,
            'Moment2': moment2,
            'LearningRate': np.array([learning_rate]).astype("float32"),
            'Beta1Pow': np.array([beta1_pow]).astype("float32"),
            'Beta2Pow': np.array([beta2_pow]).astype("float32")
        }

        self.attrs = {'epsilon': epsilon, 'beta1': beta1, 'beta2': beta2}

    def test_check_output(self):
        for _ in range(self.num_steps):
128 129
            param_out, moment1_out, \
                moment2_out = adam_step(self.inputs, self.attrs)
130 131 132 133 134 135 136 137 138 139 140 141 142 143

            self.outputs = {
                'Moment1Out': moment1_out,
                'Moment2Out': moment2_out,
                'ParamOut': param_out
            }

            # Verify output for this step
            self.check_output()

            # Output of this step becomes input for next step
            self.inputs['Param'] = param_out
            self.inputs['Moment1'] = moment1_out
            self.inputs['Moment2'] = moment2_out
144 145 146 147

            # Update powers of Beta1 and Beta2 for next time step
            self.inputs['Beta1Pow'] *= self.attrs['beta1']
            self.inputs['Beta2Pow'] *= self.attrs['beta1']
148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175

            # Randomize gradient for next step
            self.inputs['Grad'] = np.random.uniform(
                -1, 1, (102, 105)).astype("float32")


def adam_step(inputs, attributes):
    '''
    Simulate one step of the adam optimizer
    :param inputs: dict of inputs
    :param attributes: dict of attributes
    :return tuple: tuple of output param, moment1, moment2,
    beta1 power accumulator and beta2 power accumulator
    '''
    param = inputs['Param']
    grad = inputs['Grad']
    moment1 = inputs['Moment1']
    moment2 = inputs['Moment2']
    lr = inputs['LearningRate']
    beta1_pow = inputs['Beta1Pow']
    beta2_pow = inputs['Beta2Pow']

    beta1 = attributes['beta1']
    beta2 = attributes['beta2']
    epsilon = attributes['epsilon']

    moment1_out = beta1 * moment1 + (1 - beta1) * grad
    moment2_out = beta2 * moment2 + (1 - beta2) * np.square(grad)
176
    lr_t = lr * np.sqrt(1 - beta2_pow) / (1 - beta1_pow)
177
    param_out = param - lr_t * (moment1_out / (np.sqrt(moment2_out) + epsilon))
178
    return param_out, moment1_out, moment2_out
179 180


T
wip  
typhoonzero 已提交
181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200
def adam_step_sparse(inputs, attributes, height, rows, row_numel, np_grad):
    '''
    Simulate one step of the adam optimizer
    :param inputs: dict of inputs
    :param attributes: dict of attributes
    :return tuple: tuple of output param, moment1, moment2,
    beta1 power accumulator and beta2 power accumulator
    '''
    param = inputs['Param']
    # grad = inputs['Grad']
    moment1 = inputs['Moment1']
    moment2 = inputs['Moment2']
    lr = inputs['LearningRate']
    beta1_pow = inputs['Beta1Pow']
    beta2_pow = inputs['Beta2Pow']

    beta1 = attributes['beta1']
    beta2 = attributes['beta2']
    epsilon = attributes['epsilon']

T
typhoonzero 已提交
201 202 203
    moment1_out = np.zeros(shape=[height, row_numel])
    moment2_out = np.zeros(shape=[height, row_numel])
    param_out = np.zeros(shape=[height, row_numel])
T
wip  
typhoonzero 已提交
204 205 206 207 208 209 210

    for idx, row_id in enumerate(rows):
        moment1_out[row_id] = beta1 * moment1[row_id] + (1 - beta1
                                                         ) * np_grad[idx]
        moment2_out[row_id] = beta2 * moment2[row_id] + (
            1 - beta2) * np.square(np_grad[idx])
        lr_t = lr * np.sqrt(1 - beta2_pow) / (1 - beta1_pow)
T
typhoonzero 已提交
211 212
        param_out[row_id] = param[row_id] - lr_t * (moment1_out[row_id] / (
            np.sqrt(moment2_out[row_id]) + epsilon))
T
wip  
typhoonzero 已提交
213 214 215 216 217 218 219 220 221 222 223
    return param_out, moment1_out, moment2_out


class TestSparseAdamOp(unittest.TestCase):
    def setup(self, scope, place):
        beta1 = 0.78
        beta2 = 0.836
        epsilon = 1e-4

        height = 10
        rows = [0, 4, 7]
T
typhoonzero 已提交
224
        self.rows = rows
T
wip  
typhoonzero 已提交
225
        row_numel = 12
T
typhoonzero 已提交
226
        self.row_numel = row_numel
T
wip  
typhoonzero 已提交
227 228 229 230
        self.dense_inputs = {
            "Param": np.full((height, row_numel), 5.0).astype("float32"),
            "Moment1": np.full((height, row_numel), 5.0).astype("float32"),
            "Moment2": np.full((height, row_numel), 5.0).astype("float32"),
T
typhoonzero 已提交
231 232
            'Beta1Pow': np.array([beta1**10]).astype("float32"),
            'Beta2Pow': np.array([beta2**10]).astype("float32"),
T
wip  
typhoonzero 已提交
233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251
            "LearningRate": np.full((1), 2.0).astype("float32")
        }
        self.attrs = {'epsilon': epsilon, 'beta1': beta1, 'beta2': beta2}

        grad_selected_rows = scope.var('Grad').get_selected_rows()
        grad_selected_rows.set_height(height)
        grad_selected_rows.set_rows(rows)
        np_array = np.ones((len(rows), row_numel)).astype("float32")
        np_array[0, 0] = 2.0
        np_array[2, 8] = 4.0

        grad_tensor = grad_selected_rows.get_tensor()
        grad_tensor.set(np_array, place)

        self.sparse_inputs = ["Grad"]

        param_out, mom1, mom2 = adam_step_sparse(
            self.dense_inputs, self.attrs, height, rows, row_numel, np_array)
        self.outputs = {
T
typhoonzero 已提交
252
            "ParamOut": param_out,
T
wip  
typhoonzero 已提交
253 254 255 256 257 258 259 260 261 262 263 264 265 266 267
            "Moment1Out": mom1,
            "Moment2Out": mom2
        }

    def check_with_place(self, place):
        scope = core.Scope()
        self.setup(scope, place)

        op_args = dict()
        for key, np_array in self.dense_inputs.iteritems():
            var = scope.var(key).get_tensor()
            var.set(np_array, place)
            op_args[key] = key
        for s in self.sparse_inputs:
            op_args[s] = s
T
typhoonzero 已提交
268 269 270 271
        for s in self.outputs:
            var = scope.var(s).get_tensor()
            var.set(self.outputs[s], place)
            op_args[s] = s
T
wip  
typhoonzero 已提交
272 273 274 275
        for k in self.attrs:
            op_args[k] = self.attrs[k]

        # create and run sgd operator
T
typhoonzero 已提交
276 277
        adam_op = Operator("adam", **op_args)
        adam_op.run(scope, place)
T
wip  
typhoonzero 已提交
278 279 280 281

        for key, np_array in self.outputs.iteritems():
            out_var = scope.var(key).get_tensor()
            actual = np.array(out_var)
T
typhoonzero 已提交
282 283 284 285 286 287
            actual = actual.reshape([actual.size])
            np_array = np_array.reshape([np_array.size])
            for idx, row_id in enumerate(self.rows):
                j = 0
                while j < self.row_numel:
                    pos = row_id * self.row_numel + j
T
update  
typhoonzero 已提交
288 289 290
                    print(actual[pos] - np_array[pos]) / actual[pos]
                    self.assertLess((actual[pos] - np_array[pos]) / actual[pos],
                                    0.00001)
T
typhoonzero 已提交
291
                    j += 1
T
wip  
typhoonzero 已提交
292 293 294 295 296 297 298 299 300

    def test_sparse_sgd(self):
        places = [core.CPUPlace()]
        if core.is_compile_gpu():
            places.append(core.CUDAPlace(0))
        for place in places:
            self.check_with_place(place)


301 302
if __name__ == "__main__":
    unittest.main()