test_regularizer.py 11.3 KB
Newer Older
1
#   Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
D
dzhwinter 已提交
2
#
D
dzhwinter 已提交
3 4 5
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
D
dzhwinter 已提交
6
#
D
dzhwinter 已提交
7
#     http://www.apache.org/licenses/LICENSE-2.0
D
dzhwinter 已提交
8
#
D
dzhwinter 已提交
9 10 11 12 13 14
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

15 16
import contextlib
import random
17
import unittest
C
chengduo 已提交
18
from functools import partial
19

C
chengduo 已提交
20
import numpy as np
21

C
chengduo 已提交
22
import paddle
23 24
from paddle import fluid, regularizer
from paddle.fluid import core, framework
25
from paddle.fluid.backward import append_backward
26 27


28
class TestL2Decay(unittest.TestCase):
29
    def test_l2decay_regularizer(self):
L
littletomatodonkey 已提交
30
        paddle.enable_static()
31 32 33 34 35 36 37
        program = framework.Program()
        block = program.global_block()
        mul_x = block.create_parameter(
            dtype="float32",
            shape=[5, 10],
            lod_level=0,
            name="mul.x",
38
            regularizer=regularizer.L2Decay(0.5),
39
        )
40
        self.assertIsNotNone(mul_x.regularizer)
41
        self.assertTrue(isinstance(mul_x.regularizer, regularizer.L2Decay))
42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59
        mul_y = block.create_var(
            dtype="float32", shape=[10, 8], lod_level=0, name="mul.y"
        )
        mul_out = block.create_var(
            dtype="float32", shape=[5, 8], lod_level=0, name="mul.out"
        )
        block.append_op(
            type="mul",
            inputs={"X": mul_x, "Y": mul_y},
            outputs={"Out": mul_out},
            attrs={"x_num_col_dims": 1},
        )
        mean_out = block.create_var(
            dtype="float32", shape=[1], lod_level=0, name="mean.out"
        )
        block.append_op(
            type="mean", inputs={"X": mul_out}, outputs={"Out": mean_out}
        )
F
fengjiayi 已提交
60
        params_grads = append_backward(mean_out)
61 62
        self.assertEqual(len(params_grads), 1)
        count_ops = len(block.ops)
63
        optimizer = paddle.optimizer.Adam()
64 65 66
        params_grads = optimizer.append_regularization_ops(params_grads)
        self.assertEqual(len(params_grads), 1)
        self.assertEqual(len(block.ops), count_ops + 2)
C
chengduo 已提交
67
        self.assertEqual(block.ops[-1].type, 'sum')
68 69 70
        self.assertEqual(block.ops[-2].type, 'scale')


71
class TestL1Decay(unittest.TestCase):
72
    def test_l2decay_regularizer(self):
L
littletomatodonkey 已提交
73
        paddle.enable_static()
74 75 76 77 78 79 80
        program = framework.Program()
        block = program.global_block()
        mul_x = block.create_parameter(
            dtype="float32",
            shape=[5, 10],
            lod_level=0,
            name="mul.x",
81
            regularizer=regularizer.L1Decay(0.5),
82
        )
83
        self.assertIsNotNone(mul_x.regularizer)
84
        self.assertTrue(isinstance(mul_x.regularizer, regularizer.L1Decay))
85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102
        mul_y = block.create_var(
            dtype="float32", shape=[10, 8], lod_level=0, name="mul.y"
        )
        mul_out = block.create_var(
            dtype="float32", shape=[5, 8], lod_level=0, name="mul.out"
        )
        block.append_op(
            type="mul",
            inputs={"X": mul_x, "Y": mul_y},
            outputs={"Out": mul_out},
            attrs={"x_num_col_dims": 1},
        )
        mean_out = block.create_var(
            dtype="float32", shape=[1], lod_level=0, name="mean.out"
        )
        block.append_op(
            type="mean", inputs={"X": mul_out}, outputs={"Out": mean_out}
        )
F
fengjiayi 已提交
103
        params_grads = append_backward(mean_out)
104 105
        self.assertEqual(len(params_grads), 1)
        count_ops = len(block.ops)
106
        optimizer = paddle.optimizer.Adam()
107 108 109
        params_grads = optimizer.append_regularization_ops(params_grads)
        self.assertEqual(len(params_grads), 1)
        self.assertEqual(len(block.ops), count_ops + 3)
C
chengduo 已提交
110
        self.assertEqual(block.ops[-1].type, 'sum')
111 112 113 114
        self.assertEqual(block.ops[-2].type, 'scale')
        self.assertEqual(block.ops[-3].type, 'sign')


115 116 117 118 119 120 121 122 123 124
def bow_net(
    data,
    label,
    dict_dim,
    is_sparse=False,
    emb_dim=8,
    hid_dim=8,
    hid_dim2=6,
    class_dim=2,
):
C
chengduo 已提交
125 126 127 128 129
    """
    BOW net
    This model is from https://github.com/PaddlePaddle/models:
    fluid/PaddleNLP/text_classification/nets.py
    """
130 131 132
    emb = fluid.layers.embedding(
        input=data, is_sparse=is_sparse, size=[dict_dim, emb_dim]
    )
133 134 135
    bow = paddle.static.nn.sequence_lod.sequence_pool(
        input=emb, pool_type='sum'
    )
136
    bow_tanh = paddle.tanh(bow)
C
Charles-hit 已提交
137 138 139 140 141
    fc_1 = paddle.static.nn.fc(x=bow_tanh, size=hid_dim, activation="tanh")
    fc_2 = paddle.static.nn.fc(x=fc_1, size=hid_dim2, activation="tanh")
    prediction = paddle.static.nn.fc(
        x=[fc_2], size=class_dim, activation="softmax"
    )
142 143 144
    cost = paddle.nn.functional.cross_entropy(
        input=prediction, label=label, reduction='none', use_softmax=False
    )
145
    avg_cost = paddle.mean(x=cost)
C
chengduo 已提交
146 147 148 149 150
    return avg_cost


class TestRegularizer(unittest.TestCase):
    def setUp(self):
L
littletomatodonkey 已提交
151
        self.word_len = 1500
152 153 154
        self.train_data = [
            [(random.sample(range(1000), 10), [0])] for _ in range(2)
        ]
C
chengduo 已提交
155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179

    def get_places(self):
        places = [core.CPUPlace()]
        if core.is_compiled_with_cuda():
            places.append(core.CUDAPlace(0))
        return places

    @contextlib.contextmanager
    def scope_prog_guard(self, main_prog, startup_prog):
        scope = fluid.core.Scope()
        with fluid.unique_name.guard():
            with fluid.scope_guard(scope):
                with fluid.program_guard(main_prog, startup_prog):
                    yield

    def run_program(self, place, feed_list):
        exe = fluid.Executor(place)
        feeder = fluid.DataFeeder(feed_list=feed_list, place=place)
        exe.run(fluid.default_startup_program())

        main_prog = fluid.default_main_program()
        param_list = [var.name for var in main_prog.block(0).all_parameters()]

        param_sum = []
        for data in self.train_data:
180 181 182
            out = exe.run(
                main_prog, feed=feeder.feed(data), fetch_list=param_list
            )
C
chengduo 已提交
183 184 185 186 187 188 189
            p_sum = 0
            for v in out:
                p_sum += np.sum(np.abs(v))
            param_sum.append(p_sum)
        return param_sum

    def check_l2decay_regularizer(self, place, model):
C
cnn 已提交
190
        paddle.seed(1)
L
Leo Chen 已提交
191
        paddle.framework.random._manual_program_seed(1)
C
chengduo 已提交
192 193
        main_prog = fluid.framework.Program()
        startup_prog = fluid.framework.Program()
194 195 196
        with self.scope_prog_guard(
            main_prog=main_prog, startup_prog=startup_prog
        ):
G
GGBond8488 已提交
197 198 199 200 201
            data = paddle.static.data(
                name="words", shape=[-1, 1], dtype="int64", lod_level=1
            )
            label = paddle.static.data(
                name="label", shape=[-1, 1], dtype="int64"
202
            )
C
chengduo 已提交
203

L
littletomatodonkey 已提交
204
            avg_cost = model(data, label, self.word_len)
C
chengduo 已提交
205

L
LoneRanger 已提交
206
            optimizer = paddle.optimizer.Adagrad(
207
                learning_rate=0.1,
L
LoneRanger 已提交
208
                weight_decay=paddle.regularizer.L2Decay(1.0),
209
            )
C
chengduo 已提交
210 211 212 213 214
            optimizer.minimize(avg_cost)
            param_sum = self.run_program(place, [data, label])
        return param_sum

    def check_l2decay(self, place, model):
C
cnn 已提交
215
        paddle.seed(1)
L
Leo Chen 已提交
216
        paddle.framework.random._manual_program_seed(1)
C
chengduo 已提交
217 218
        main_prog = fluid.framework.Program()
        startup_prog = fluid.framework.Program()
L
Leo Chen 已提交
219

220 221 222
        with self.scope_prog_guard(
            main_prog=main_prog, startup_prog=startup_prog
        ):
G
GGBond8488 已提交
223 224 225 226 227
            data = paddle.static.data(
                name="words", shape=[-1, 1], dtype="int64", lod_level=1
            )
            label = paddle.static.data(
                name="label", shape=[-1, 1], dtype="int64"
228
            )
C
chengduo 已提交
229

L
littletomatodonkey 已提交
230
            avg_cost_l2 = model(data, label, self.word_len)
C
chengduo 已提交
231 232 233 234

            param_list = fluid.default_main_program().block(0).all_parameters()
            para_sum = []
            for para in param_list:
235
                para_mul = paddle.square(x=para)
236
                para_sum.append(paddle.sum(para_mul))
237
            avg_cost_l2 += paddle.add_n(para_sum) * 0.5
C
chengduo 已提交
238

L
LoneRanger 已提交
239
            optimizer = paddle.optimizer.Adagrad(learning_rate=0.1)
C
chengduo 已提交
240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257
            optimizer.minimize(avg_cost_l2)
            param_sum = self.run_program(place, [data, label])
        return param_sum

    def test_l2(self):
        for place in self.get_places():
            dense_sparse_p_sum = []
            for sparse in [True, False]:
                model = partial(bow_net, is_sparse=sparse)
                framework_l2 = self.check_l2decay_regularizer(place, model)
                l2 = self.check_l2decay(place, model)
                assert len(l2) == len(framework_l2)
                for i in range(len(l2)):
                    assert np.isclose(a=framework_l2[i], b=l2[i], rtol=5e-5)
                dense_sparse_p_sum.append(framework_l2)

            assert len(dense_sparse_p_sum[0]) == len(dense_sparse_p_sum[1])
            for i in range(len(dense_sparse_p_sum[0])):
258 259 260 261 262
                assert np.isclose(
                    a=dense_sparse_p_sum[0][i],
                    b=dense_sparse_p_sum[1][i],
                    rtol=5e-5,
                )
C
chengduo 已提交
263

264
    def test_repeated_regularization(self):
265 266
        l1 = paddle.regularizer.L1Decay(coeff=0.1)
        l2 = paddle.regularizer.L2Decay(coeff=0.01)
267 268 269
        fc_param_attr = paddle.ParamAttr(
            regularizer=paddle.regularizer.L1Decay()
        )
270
        with fluid.program_guard(fluid.Program(), fluid.Program()):
271
            x = paddle.uniform([2, 2, 3])
C
Charles-hit 已提交
272
            out = paddle.static.nn.fc(x, 5, weight_attr=fc_param_attr)
273
            loss = paddle.sum(out)
274 275
            sgd = fluid.optimizer.SGD(learning_rate=0.1, regularization=l2)
            sgd.minimize(loss)
276 277
        with fluid.dygraph.guard():
            input = fluid.dygraph.to_variable(
278 279
                np.random.randn(3, 2).astype('float32')
            )
C
cnn 已提交
280
            paddle.seed(1)
L
Leo Chen 已提交
281
            paddle.framework.random._manual_program_seed(1)
282

283 284
            linear1 = paddle.nn.Linear(
                2, 2, weight_attr=fc_param_attr, bias_attr=fc_param_attr
285
            )
286 287
            linear2 = paddle.nn.Linear(
                2, 2, weight_attr=fc_param_attr, bias_attr=fc_param_attr
288
            )
289 290 291 292

            loss1 = linear1(input)
            loss1.backward()
            # set l2 regularizer in optimizer, but l1 in fluid.ParamAttr
293

294 295 296 297 298
            fluid.optimizer.SGD(
                parameter_list=linear1.parameters(),
                learning_rate=1e-2,
                regularization=l2,
            ).minimize(loss1)
299 300 301
            # only set l1 in fluid.ParamAttr
            loss2 = linear2(input)
            loss2.backward()
302 303 304
            fluid.optimizer.SGD(
                parameter_list=linear2.parameters(), learning_rate=1e-2
            ).minimize(loss2)
305
            # they should both be applied by l1, and keep the same
306 307 308 309
            np.testing.assert_allclose(
                linear1.weight.numpy(),
                linear2.weight.numpy(),
                rtol=1e-05,
310 311
                err_msg='weight should use the regularization in fluid.ParamAttr!',
            )
312 313 314 315
            np.testing.assert_allclose(
                linear1.bias.numpy(),
                linear2.bias.numpy(),
                rtol=1e-05,
316
                err_msg='bias should use the regularization in fluid.ParamAttr!',
317
            )
318

C
chengduo 已提交
319

320 321
if __name__ == '__main__':
    unittest.main()