test_regularizer.py 11.3 KB
Newer Older
1
#   Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
D
dzhwinter 已提交
2
#
D
dzhwinter 已提交
3 4 5
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
D
dzhwinter 已提交
6
#
D
dzhwinter 已提交
7
#     http://www.apache.org/licenses/LICENSE-2.0
D
dzhwinter 已提交
8
#
D
dzhwinter 已提交
9 10 11 12 13 14
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

15 16
import contextlib
import random
17
import unittest
C
chengduo 已提交
18
from functools import partial
19

C
chengduo 已提交
20
import numpy as np
21

C
chengduo 已提交
22 23
import paddle
import paddle.fluid as fluid
24
import paddle.fluid.core as core
25 26 27
import paddle.fluid.framework as framework
import paddle.fluid.regularizer as regularizer
from paddle.fluid.backward import append_backward
28 29 30 31


class TestL2DecayRegularizer(unittest.TestCase):
    def test_l2decay_regularizer(self):
L
littletomatodonkey 已提交
32
        paddle.enable_static()
33 34 35 36 37 38 39
        program = framework.Program()
        block = program.global_block()
        mul_x = block.create_parameter(
            dtype="float32",
            shape=[5, 10],
            lod_level=0,
            name="mul.x",
40 41
            regularizer=regularizer.L2DecayRegularizer(0.5),
        )
42
        self.assertIsNotNone(mul_x.regularizer)
43
        self.assertTrue(
44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63
            isinstance(mul_x.regularizer, regularizer.L2DecayRegularizer)
        )
        mul_y = block.create_var(
            dtype="float32", shape=[10, 8], lod_level=0, name="mul.y"
        )
        mul_out = block.create_var(
            dtype="float32", shape=[5, 8], lod_level=0, name="mul.out"
        )
        block.append_op(
            type="mul",
            inputs={"X": mul_x, "Y": mul_y},
            outputs={"Out": mul_out},
            attrs={"x_num_col_dims": 1},
        )
        mean_out = block.create_var(
            dtype="float32", shape=[1], lod_level=0, name="mean.out"
        )
        block.append_op(
            type="mean", inputs={"X": mul_out}, outputs={"Out": mean_out}
        )
F
fengjiayi 已提交
64
        params_grads = append_backward(mean_out)
65 66
        self.assertEqual(len(params_grads), 1)
        count_ops = len(block.ops)
67
        optimizer = paddle.optimizer.Adam()
68 69 70
        params_grads = optimizer.append_regularization_ops(params_grads)
        self.assertEqual(len(params_grads), 1)
        self.assertEqual(len(block.ops), count_ops + 2)
C
chengduo 已提交
71
        self.assertEqual(block.ops[-1].type, 'sum')
72 73 74
        self.assertEqual(block.ops[-2].type, 'scale')


75 76
class TestL1DecayRegularizer(unittest.TestCase):
    def test_l2decay_regularizer(self):
L
littletomatodonkey 已提交
77
        paddle.enable_static()
78 79 80 81 82 83 84
        program = framework.Program()
        block = program.global_block()
        mul_x = block.create_parameter(
            dtype="float32",
            shape=[5, 10],
            lod_level=0,
            name="mul.x",
85 86
            regularizer=regularizer.L1DecayRegularizer(0.5),
        )
87
        self.assertIsNotNone(mul_x.regularizer)
88
        self.assertTrue(
89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108
            isinstance(mul_x.regularizer, regularizer.L1DecayRegularizer)
        )
        mul_y = block.create_var(
            dtype="float32", shape=[10, 8], lod_level=0, name="mul.y"
        )
        mul_out = block.create_var(
            dtype="float32", shape=[5, 8], lod_level=0, name="mul.out"
        )
        block.append_op(
            type="mul",
            inputs={"X": mul_x, "Y": mul_y},
            outputs={"Out": mul_out},
            attrs={"x_num_col_dims": 1},
        )
        mean_out = block.create_var(
            dtype="float32", shape=[1], lod_level=0, name="mean.out"
        )
        block.append_op(
            type="mean", inputs={"X": mul_out}, outputs={"Out": mean_out}
        )
F
fengjiayi 已提交
109
        params_grads = append_backward(mean_out)
110 111
        self.assertEqual(len(params_grads), 1)
        count_ops = len(block.ops)
112
        optimizer = paddle.optimizer.Adam()
113 114 115
        params_grads = optimizer.append_regularization_ops(params_grads)
        self.assertEqual(len(params_grads), 1)
        self.assertEqual(len(block.ops), count_ops + 3)
C
chengduo 已提交
116
        self.assertEqual(block.ops[-1].type, 'sum')
117 118 119 120
        self.assertEqual(block.ops[-2].type, 'scale')
        self.assertEqual(block.ops[-3].type, 'sign')


121 122 123 124 125 126 127 128 129 130
def bow_net(
    data,
    label,
    dict_dim,
    is_sparse=False,
    emb_dim=8,
    hid_dim=8,
    hid_dim2=6,
    class_dim=2,
):
C
chengduo 已提交
131 132 133 134 135
    """
    BOW net
    This model is from https://github.com/PaddlePaddle/models:
    fluid/PaddleNLP/text_classification/nets.py
    """
136 137 138
    emb = fluid.layers.embedding(
        input=data, is_sparse=is_sparse, size=[dict_dim, emb_dim]
    )
C
chengduo 已提交
139
    bow = fluid.layers.sequence_pool(input=emb, pool_type='sum')
140
    bow_tanh = paddle.tanh(bow)
C
chengduo 已提交
141 142 143 144
    fc_1 = fluid.layers.fc(input=bow_tanh, size=hid_dim, act="tanh")
    fc_2 = fluid.layers.fc(input=fc_1, size=hid_dim2, act="tanh")
    prediction = fluid.layers.fc(input=[fc_2], size=class_dim, act="softmax")
    cost = fluid.layers.cross_entropy(input=prediction, label=label)
145
    avg_cost = paddle.mean(x=cost)
C
chengduo 已提交
146 147 148 149 150
    return avg_cost


class TestRegularizer(unittest.TestCase):
    def setUp(self):
L
littletomatodonkey 已提交
151
        self.word_len = 1500
152 153 154
        self.train_data = [
            [(random.sample(range(1000), 10), [0])] for _ in range(2)
        ]
C
chengduo 已提交
155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179

    def get_places(self):
        places = [core.CPUPlace()]
        if core.is_compiled_with_cuda():
            places.append(core.CUDAPlace(0))
        return places

    @contextlib.contextmanager
    def scope_prog_guard(self, main_prog, startup_prog):
        scope = fluid.core.Scope()
        with fluid.unique_name.guard():
            with fluid.scope_guard(scope):
                with fluid.program_guard(main_prog, startup_prog):
                    yield

    def run_program(self, place, feed_list):
        exe = fluid.Executor(place)
        feeder = fluid.DataFeeder(feed_list=feed_list, place=place)
        exe.run(fluid.default_startup_program())

        main_prog = fluid.default_main_program()
        param_list = [var.name for var in main_prog.block(0).all_parameters()]

        param_sum = []
        for data in self.train_data:
180 181 182
            out = exe.run(
                main_prog, feed=feeder.feed(data), fetch_list=param_list
            )
C
chengduo 已提交
183 184 185 186 187 188 189
            p_sum = 0
            for v in out:
                p_sum += np.sum(np.abs(v))
            param_sum.append(p_sum)
        return param_sum

    def check_l2decay_regularizer(self, place, model):
C
cnn 已提交
190
        paddle.seed(1)
L
Leo Chen 已提交
191
        paddle.framework.random._manual_program_seed(1)
C
chengduo 已提交
192 193
        main_prog = fluid.framework.Program()
        startup_prog = fluid.framework.Program()
194 195 196 197 198 199
        with self.scope_prog_guard(
            main_prog=main_prog, startup_prog=startup_prog
        ):
            data = fluid.layers.data(
                name="words", shape=[1], dtype="int64", lod_level=1
            )
C
chengduo 已提交
200 201
            label = fluid.layers.data(name="label", shape=[1], dtype="int64")

L
littletomatodonkey 已提交
202
            avg_cost = model(data, label, self.word_len)
C
chengduo 已提交
203 204

            optimizer = fluid.optimizer.Adagrad(
205 206
                learning_rate=0.1, regularization=fluid.regularizer.L2Decay(1.0)
            )
C
chengduo 已提交
207 208 209 210 211
            optimizer.minimize(avg_cost)
            param_sum = self.run_program(place, [data, label])
        return param_sum

    def check_l2decay(self, place, model):
C
cnn 已提交
212
        paddle.seed(1)
L
Leo Chen 已提交
213
        paddle.framework.random._manual_program_seed(1)
C
chengduo 已提交
214 215
        main_prog = fluid.framework.Program()
        startup_prog = fluid.framework.Program()
L
Leo Chen 已提交
216

217 218 219 220 221 222
        with self.scope_prog_guard(
            main_prog=main_prog, startup_prog=startup_prog
        ):
            data = fluid.layers.data(
                name="words", shape=[1], dtype="int64", lod_level=1
            )
C
chengduo 已提交
223 224
            label = fluid.layers.data(name="label", shape=[1], dtype="int64")

L
littletomatodonkey 已提交
225
            avg_cost_l2 = model(data, label, self.word_len)
C
chengduo 已提交
226 227 228 229

            param_list = fluid.default_main_program().block(0).all_parameters()
            para_sum = []
            for para in param_list:
230
                para_mul = paddle.square(x=para)
231
                para_sum.append(paddle.sum(para_mul))
232
            avg_cost_l2 += fluid.layers.sums(para_sum) * 0.5
C
chengduo 已提交
233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252

            optimizer = fluid.optimizer.Adagrad(learning_rate=0.1)
            optimizer.minimize(avg_cost_l2)
            param_sum = self.run_program(place, [data, label])
        return param_sum

    def test_l2(self):
        for place in self.get_places():
            dense_sparse_p_sum = []
            for sparse in [True, False]:
                model = partial(bow_net, is_sparse=sparse)
                framework_l2 = self.check_l2decay_regularizer(place, model)
                l2 = self.check_l2decay(place, model)
                assert len(l2) == len(framework_l2)
                for i in range(len(l2)):
                    assert np.isclose(a=framework_l2[i], b=l2[i], rtol=5e-5)
                dense_sparse_p_sum.append(framework_l2)

            assert len(dense_sparse_p_sum[0]) == len(dense_sparse_p_sum[1])
            for i in range(len(dense_sparse_p_sum[0])):
253 254 255 256 257
                assert np.isclose(
                    a=dense_sparse_p_sum[0][i],
                    b=dense_sparse_p_sum[1][i],
                    rtol=5e-5,
                )
C
chengduo 已提交
258

259
    def test_repeated_regularization(self):
260 261
        l1 = fluid.regularizer.L1Decay(regularization_coeff=0.1)
        l2 = fluid.regularizer.L2Decay(regularization_coeff=0.01)
262 263 264
        fc_param_attr = paddle.ParamAttr(
            regularizer=paddle.regularizer.L1Decay()
        )
265 266 267
        with fluid.program_guard(fluid.Program(), fluid.Program()):
            x = fluid.layers.uniform_random([2, 2, 3])
            out = fluid.layers.fc(x, 5, param_attr=fc_param_attr)
268
            loss = paddle.sum(out)
269 270
            sgd = fluid.optimizer.SGD(learning_rate=0.1, regularization=l2)
            sgd.minimize(loss)
271 272
        with fluid.dygraph.guard():
            input = fluid.dygraph.to_variable(
273 274
                np.random.randn(3, 2).astype('float32')
            )
C
cnn 已提交
275
            paddle.seed(1)
L
Leo Chen 已提交
276
            paddle.framework.random._manual_program_seed(1)
277

278 279
            linear1 = paddle.nn.Linear(
                2, 2, weight_attr=fc_param_attr, bias_attr=fc_param_attr
280
            )
281 282
            linear2 = paddle.nn.Linear(
                2, 2, weight_attr=fc_param_attr, bias_attr=fc_param_attr
283
            )
284 285 286 287

            loss1 = linear1(input)
            loss1.backward()
            # set l2 regularizer in optimizer, but l1 in fluid.ParamAttr
288

289 290 291 292 293
            fluid.optimizer.SGD(
                parameter_list=linear1.parameters(),
                learning_rate=1e-2,
                regularization=l2,
            ).minimize(loss1)
294 295 296
            # only set l1 in fluid.ParamAttr
            loss2 = linear2(input)
            loss2.backward()
297 298 299
            fluid.optimizer.SGD(
                parameter_list=linear2.parameters(), learning_rate=1e-2
            ).minimize(loss2)
300
            # they should both be applied by l1, and keep the same
301 302 303 304
            np.testing.assert_allclose(
                linear1.weight.numpy(),
                linear2.weight.numpy(),
                rtol=1e-05,
305 306
                err_msg='weight should use the regularization in fluid.ParamAttr!',
            )
307 308 309 310
            np.testing.assert_allclose(
                linear1.bias.numpy(),
                linear2.bias.numpy(),
                rtol=1e-05,
311
                err_msg='bias should use the regularization in fluid.ParamAttr!',
312
            )
313

C
chengduo 已提交
314

315 316
if __name__ == '__main__':
    unittest.main()