test_regularizer.py 11.5 KB
Newer Older
1
#   Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
D
dzhwinter 已提交
2
#
D
dzhwinter 已提交
3 4 5
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
D
dzhwinter 已提交
6
#
D
dzhwinter 已提交
7
#     http://www.apache.org/licenses/LICENSE-2.0
D
dzhwinter 已提交
8
#
D
dzhwinter 已提交
9 10 11 12 13 14
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

15 16
import contextlib
import random
17
import unittest
C
chengduo 已提交
18
from functools import partial
19

C
chengduo 已提交
20
import numpy as np
21

C
chengduo 已提交
22 23
import paddle
import paddle.fluid as fluid
24
import paddle.fluid.core as core
25 26 27
import paddle.fluid.framework as framework
import paddle.fluid.regularizer as regularizer
from paddle.fluid.backward import append_backward
28 29 30 31


class TestL2DecayRegularizer(unittest.TestCase):
    def test_l2decay_regularizer(self):
L
littletomatodonkey 已提交
32
        paddle.enable_static()
33 34 35 36 37 38 39
        program = framework.Program()
        block = program.global_block()
        mul_x = block.create_parameter(
            dtype="float32",
            shape=[5, 10],
            lod_level=0,
            name="mul.x",
40 41
            regularizer=regularizer.L2DecayRegularizer(0.5),
        )
42
        self.assertIsNotNone(mul_x.regularizer)
43
        self.assertTrue(
44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63
            isinstance(mul_x.regularizer, regularizer.L2DecayRegularizer)
        )
        mul_y = block.create_var(
            dtype="float32", shape=[10, 8], lod_level=0, name="mul.y"
        )
        mul_out = block.create_var(
            dtype="float32", shape=[5, 8], lod_level=0, name="mul.out"
        )
        block.append_op(
            type="mul",
            inputs={"X": mul_x, "Y": mul_y},
            outputs={"Out": mul_out},
            attrs={"x_num_col_dims": 1},
        )
        mean_out = block.create_var(
            dtype="float32", shape=[1], lod_level=0, name="mean.out"
        )
        block.append_op(
            type="mean", inputs={"X": mul_out}, outputs={"Out": mean_out}
        )
F
fengjiayi 已提交
64
        params_grads = append_backward(mean_out)
65 66
        self.assertEqual(len(params_grads), 1)
        count_ops = len(block.ops)
67
        optimizer = paddle.optimizer.Adam()
68 69 70
        params_grads = optimizer.append_regularization_ops(params_grads)
        self.assertEqual(len(params_grads), 1)
        self.assertEqual(len(block.ops), count_ops + 2)
C
chengduo 已提交
71
        self.assertEqual(block.ops[-1].type, 'sum')
72 73 74
        self.assertEqual(block.ops[-2].type, 'scale')


75 76
class TestL1DecayRegularizer(unittest.TestCase):
    def test_l2decay_regularizer(self):
L
littletomatodonkey 已提交
77
        paddle.enable_static()
78 79 80 81 82 83 84
        program = framework.Program()
        block = program.global_block()
        mul_x = block.create_parameter(
            dtype="float32",
            shape=[5, 10],
            lod_level=0,
            name="mul.x",
85 86
            regularizer=regularizer.L1DecayRegularizer(0.5),
        )
87
        self.assertIsNotNone(mul_x.regularizer)
88
        self.assertTrue(
89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108
            isinstance(mul_x.regularizer, regularizer.L1DecayRegularizer)
        )
        mul_y = block.create_var(
            dtype="float32", shape=[10, 8], lod_level=0, name="mul.y"
        )
        mul_out = block.create_var(
            dtype="float32", shape=[5, 8], lod_level=0, name="mul.out"
        )
        block.append_op(
            type="mul",
            inputs={"X": mul_x, "Y": mul_y},
            outputs={"Out": mul_out},
            attrs={"x_num_col_dims": 1},
        )
        mean_out = block.create_var(
            dtype="float32", shape=[1], lod_level=0, name="mean.out"
        )
        block.append_op(
            type="mean", inputs={"X": mul_out}, outputs={"Out": mean_out}
        )
F
fengjiayi 已提交
109
        params_grads = append_backward(mean_out)
110 111
        self.assertEqual(len(params_grads), 1)
        count_ops = len(block.ops)
112
        optimizer = paddle.optimizer.Adam()
113 114 115
        params_grads = optimizer.append_regularization_ops(params_grads)
        self.assertEqual(len(params_grads), 1)
        self.assertEqual(len(block.ops), count_ops + 3)
C
chengduo 已提交
116
        self.assertEqual(block.ops[-1].type, 'sum')
117 118 119 120
        self.assertEqual(block.ops[-2].type, 'scale')
        self.assertEqual(block.ops[-3].type, 'sign')


121 122 123 124 125 126 127 128 129 130
def bow_net(
    data,
    label,
    dict_dim,
    is_sparse=False,
    emb_dim=8,
    hid_dim=8,
    hid_dim2=6,
    class_dim=2,
):
C
chengduo 已提交
131 132 133 134 135
    """
    BOW net
    This model is from https://github.com/PaddlePaddle/models:
    fluid/PaddleNLP/text_classification/nets.py
    """
136 137 138
    emb = fluid.layers.embedding(
        input=data, is_sparse=is_sparse, size=[dict_dim, emb_dim]
    )
139 140 141
    bow = paddle.static.nn.sequence_lod.sequence_pool(
        input=emb, pool_type='sum'
    )
142
    bow_tanh = paddle.tanh(bow)
C
Charles-hit 已提交
143 144 145 146 147
    fc_1 = paddle.static.nn.fc(x=bow_tanh, size=hid_dim, activation="tanh")
    fc_2 = paddle.static.nn.fc(x=fc_1, size=hid_dim2, activation="tanh")
    prediction = paddle.static.nn.fc(
        x=[fc_2], size=class_dim, activation="softmax"
    )
148 149 150
    cost = paddle.nn.functional.cross_entropy(
        input=prediction, label=label, reduction='none', use_softmax=False
    )
151
    avg_cost = paddle.mean(x=cost)
C
chengduo 已提交
152 153 154 155 156
    return avg_cost


class TestRegularizer(unittest.TestCase):
    def setUp(self):
L
littletomatodonkey 已提交
157
        self.word_len = 1500
158 159 160
        self.train_data = [
            [(random.sample(range(1000), 10), [0])] for _ in range(2)
        ]
C
chengduo 已提交
161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185

    def get_places(self):
        places = [core.CPUPlace()]
        if core.is_compiled_with_cuda():
            places.append(core.CUDAPlace(0))
        return places

    @contextlib.contextmanager
    def scope_prog_guard(self, main_prog, startup_prog):
        scope = fluid.core.Scope()
        with fluid.unique_name.guard():
            with fluid.scope_guard(scope):
                with fluid.program_guard(main_prog, startup_prog):
                    yield

    def run_program(self, place, feed_list):
        exe = fluid.Executor(place)
        feeder = fluid.DataFeeder(feed_list=feed_list, place=place)
        exe.run(fluid.default_startup_program())

        main_prog = fluid.default_main_program()
        param_list = [var.name for var in main_prog.block(0).all_parameters()]

        param_sum = []
        for data in self.train_data:
186 187 188
            out = exe.run(
                main_prog, feed=feeder.feed(data), fetch_list=param_list
            )
C
chengduo 已提交
189 190 191 192 193 194 195
            p_sum = 0
            for v in out:
                p_sum += np.sum(np.abs(v))
            param_sum.append(p_sum)
        return param_sum

    def check_l2decay_regularizer(self, place, model):
C
cnn 已提交
196
        paddle.seed(1)
L
Leo Chen 已提交
197
        paddle.framework.random._manual_program_seed(1)
C
chengduo 已提交
198 199
        main_prog = fluid.framework.Program()
        startup_prog = fluid.framework.Program()
200 201 202
        with self.scope_prog_guard(
            main_prog=main_prog, startup_prog=startup_prog
        ):
G
GGBond8488 已提交
203 204 205 206 207
            data = paddle.static.data(
                name="words", shape=[-1, 1], dtype="int64", lod_level=1
            )
            label = paddle.static.data(
                name="label", shape=[-1, 1], dtype="int64"
208
            )
C
chengduo 已提交
209

L
littletomatodonkey 已提交
210
            avg_cost = model(data, label, self.word_len)
C
chengduo 已提交
211 212

            optimizer = fluid.optimizer.Adagrad(
213 214
                learning_rate=0.1, regularization=fluid.regularizer.L2Decay(1.0)
            )
C
chengduo 已提交
215 216 217 218 219
            optimizer.minimize(avg_cost)
            param_sum = self.run_program(place, [data, label])
        return param_sum

    def check_l2decay(self, place, model):
C
cnn 已提交
220
        paddle.seed(1)
L
Leo Chen 已提交
221
        paddle.framework.random._manual_program_seed(1)
C
chengduo 已提交
222 223
        main_prog = fluid.framework.Program()
        startup_prog = fluid.framework.Program()
L
Leo Chen 已提交
224

225 226 227
        with self.scope_prog_guard(
            main_prog=main_prog, startup_prog=startup_prog
        ):
G
GGBond8488 已提交
228 229 230 231 232
            data = paddle.static.data(
                name="words", shape=[-1, 1], dtype="int64", lod_level=1
            )
            label = paddle.static.data(
                name="label", shape=[-1, 1], dtype="int64"
233
            )
C
chengduo 已提交
234

L
littletomatodonkey 已提交
235
            avg_cost_l2 = model(data, label, self.word_len)
C
chengduo 已提交
236 237 238 239

            param_list = fluid.default_main_program().block(0).all_parameters()
            para_sum = []
            for para in param_list:
240
                para_mul = paddle.square(x=para)
241
                para_sum.append(paddle.sum(para_mul))
242
            avg_cost_l2 += fluid.layers.sums(para_sum) * 0.5
C
chengduo 已提交
243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262

            optimizer = fluid.optimizer.Adagrad(learning_rate=0.1)
            optimizer.minimize(avg_cost_l2)
            param_sum = self.run_program(place, [data, label])
        return param_sum

    def test_l2(self):
        for place in self.get_places():
            dense_sparse_p_sum = []
            for sparse in [True, False]:
                model = partial(bow_net, is_sparse=sparse)
                framework_l2 = self.check_l2decay_regularizer(place, model)
                l2 = self.check_l2decay(place, model)
                assert len(l2) == len(framework_l2)
                for i in range(len(l2)):
                    assert np.isclose(a=framework_l2[i], b=l2[i], rtol=5e-5)
                dense_sparse_p_sum.append(framework_l2)

            assert len(dense_sparse_p_sum[0]) == len(dense_sparse_p_sum[1])
            for i in range(len(dense_sparse_p_sum[0])):
263 264 265 266 267
                assert np.isclose(
                    a=dense_sparse_p_sum[0][i],
                    b=dense_sparse_p_sum[1][i],
                    rtol=5e-5,
                )
C
chengduo 已提交
268

269
    def test_repeated_regularization(self):
270 271
        l1 = fluid.regularizer.L1Decay(regularization_coeff=0.1)
        l2 = fluid.regularizer.L2Decay(regularization_coeff=0.01)
272 273 274
        fc_param_attr = paddle.ParamAttr(
            regularizer=paddle.regularizer.L1Decay()
        )
275
        with fluid.program_guard(fluid.Program(), fluid.Program()):
276
            x = paddle.uniform([2, 2, 3])
C
Charles-hit 已提交
277
            out = paddle.static.nn.fc(x, 5, weight_attr=fc_param_attr)
278
            loss = paddle.sum(out)
279 280
            sgd = fluid.optimizer.SGD(learning_rate=0.1, regularization=l2)
            sgd.minimize(loss)
281 282
        with fluid.dygraph.guard():
            input = fluid.dygraph.to_variable(
283 284
                np.random.randn(3, 2).astype('float32')
            )
C
cnn 已提交
285
            paddle.seed(1)
L
Leo Chen 已提交
286
            paddle.framework.random._manual_program_seed(1)
287

288 289
            linear1 = paddle.nn.Linear(
                2, 2, weight_attr=fc_param_attr, bias_attr=fc_param_attr
290
            )
291 292
            linear2 = paddle.nn.Linear(
                2, 2, weight_attr=fc_param_attr, bias_attr=fc_param_attr
293
            )
294 295 296 297

            loss1 = linear1(input)
            loss1.backward()
            # set l2 regularizer in optimizer, but l1 in fluid.ParamAttr
298

299 300 301 302 303
            fluid.optimizer.SGD(
                parameter_list=linear1.parameters(),
                learning_rate=1e-2,
                regularization=l2,
            ).minimize(loss1)
304 305 306
            # only set l1 in fluid.ParamAttr
            loss2 = linear2(input)
            loss2.backward()
307 308 309
            fluid.optimizer.SGD(
                parameter_list=linear2.parameters(), learning_rate=1e-2
            ).minimize(loss2)
310
            # they should both be applied by l1, and keep the same
311 312 313 314
            np.testing.assert_allclose(
                linear1.weight.numpy(),
                linear2.weight.numpy(),
                rtol=1e-05,
315 316
                err_msg='weight should use the regularization in fluid.ParamAttr!',
            )
317 318 319 320
            np.testing.assert_allclose(
                linear1.bias.numpy(),
                linear2.bias.numpy(),
                rtol=1e-05,
321
                err_msg='bias should use the regularization in fluid.ParamAttr!',
322
            )
323

C
chengduo 已提交
324

325 326
if __name__ == '__main__':
    unittest.main()