test_regularizer.py 11.3 KB
Newer Older
1
#   Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
D
dzhwinter 已提交
2
#
D
dzhwinter 已提交
3 4 5
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
D
dzhwinter 已提交
6
#
D
dzhwinter 已提交
7
#     http://www.apache.org/licenses/LICENSE-2.0
D
dzhwinter 已提交
8
#
D
dzhwinter 已提交
9 10 11 12 13 14
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

15
import unittest
C
chengduo 已提交
16 17 18
from functools import partial
import contextlib
import numpy as np
L
littletomatodonkey 已提交
19
import random
C
chengduo 已提交
20 21 22
import paddle
import paddle.fluid.core as core
import paddle.fluid as fluid
23 24 25
import paddle.fluid.framework as framework
import paddle.fluid.regularizer as regularizer
from paddle.fluid.backward import append_backward
26 27 28 29


class TestL2DecayRegularizer(unittest.TestCase):
    def test_l2decay_regularizer(self):
L
littletomatodonkey 已提交
30
        paddle.enable_static()
31 32 33 34 35 36 37
        program = framework.Program()
        block = program.global_block()
        mul_x = block.create_parameter(
            dtype="float32",
            shape=[5, 10],
            lod_level=0,
            name="mul.x",
38 39
            regularizer=regularizer.L2DecayRegularizer(0.5),
        )
40
        self.assertIsNotNone(mul_x.regularizer)
41
        self.assertTrue(
42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61
            isinstance(mul_x.regularizer, regularizer.L2DecayRegularizer)
        )
        mul_y = block.create_var(
            dtype="float32", shape=[10, 8], lod_level=0, name="mul.y"
        )
        mul_out = block.create_var(
            dtype="float32", shape=[5, 8], lod_level=0, name="mul.out"
        )
        block.append_op(
            type="mul",
            inputs={"X": mul_x, "Y": mul_y},
            outputs={"Out": mul_out},
            attrs={"x_num_col_dims": 1},
        )
        mean_out = block.create_var(
            dtype="float32", shape=[1], lod_level=0, name="mean.out"
        )
        block.append_op(
            type="mean", inputs={"X": mul_out}, outputs={"Out": mean_out}
        )
F
fengjiayi 已提交
62
        params_grads = append_backward(mean_out)
63 64
        self.assertEqual(len(params_grads), 1)
        count_ops = len(block.ops)
65
        optimizer = paddle.optimizer.Adam()
66 67 68
        params_grads = optimizer.append_regularization_ops(params_grads)
        self.assertEqual(len(params_grads), 1)
        self.assertEqual(len(block.ops), count_ops + 2)
C
chengduo 已提交
69
        self.assertEqual(block.ops[-1].type, 'sum')
70 71 72
        self.assertEqual(block.ops[-2].type, 'scale')


73 74
class TestL1DecayRegularizer(unittest.TestCase):
    def test_l2decay_regularizer(self):
L
littletomatodonkey 已提交
75
        paddle.enable_static()
76 77 78 79 80 81 82
        program = framework.Program()
        block = program.global_block()
        mul_x = block.create_parameter(
            dtype="float32",
            shape=[5, 10],
            lod_level=0,
            name="mul.x",
83 84
            regularizer=regularizer.L1DecayRegularizer(0.5),
        )
85
        self.assertIsNotNone(mul_x.regularizer)
86
        self.assertTrue(
87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106
            isinstance(mul_x.regularizer, regularizer.L1DecayRegularizer)
        )
        mul_y = block.create_var(
            dtype="float32", shape=[10, 8], lod_level=0, name="mul.y"
        )
        mul_out = block.create_var(
            dtype="float32", shape=[5, 8], lod_level=0, name="mul.out"
        )
        block.append_op(
            type="mul",
            inputs={"X": mul_x, "Y": mul_y},
            outputs={"Out": mul_out},
            attrs={"x_num_col_dims": 1},
        )
        mean_out = block.create_var(
            dtype="float32", shape=[1], lod_level=0, name="mean.out"
        )
        block.append_op(
            type="mean", inputs={"X": mul_out}, outputs={"Out": mean_out}
        )
F
fengjiayi 已提交
107
        params_grads = append_backward(mean_out)
108 109
        self.assertEqual(len(params_grads), 1)
        count_ops = len(block.ops)
110
        optimizer = paddle.optimizer.Adam()
111 112 113
        params_grads = optimizer.append_regularization_ops(params_grads)
        self.assertEqual(len(params_grads), 1)
        self.assertEqual(len(block.ops), count_ops + 3)
C
chengduo 已提交
114
        self.assertEqual(block.ops[-1].type, 'sum')
115 116 117 118
        self.assertEqual(block.ops[-2].type, 'scale')
        self.assertEqual(block.ops[-3].type, 'sign')


119 120 121 122 123 124 125 126 127 128
def bow_net(
    data,
    label,
    dict_dim,
    is_sparse=False,
    emb_dim=8,
    hid_dim=8,
    hid_dim2=6,
    class_dim=2,
):
C
chengduo 已提交
129 130 131 132 133
    """
    BOW net
    This model is from https://github.com/PaddlePaddle/models:
    fluid/PaddleNLP/text_classification/nets.py
    """
134 135 136
    emb = fluid.layers.embedding(
        input=data, is_sparse=is_sparse, size=[dict_dim, emb_dim]
    )
C
chengduo 已提交
137
    bow = fluid.layers.sequence_pool(input=emb, pool_type='sum')
138
    bow_tanh = paddle.tanh(bow)
C
chengduo 已提交
139 140 141 142
    fc_1 = fluid.layers.fc(input=bow_tanh, size=hid_dim, act="tanh")
    fc_2 = fluid.layers.fc(input=fc_1, size=hid_dim2, act="tanh")
    prediction = fluid.layers.fc(input=[fc_2], size=class_dim, act="softmax")
    cost = fluid.layers.cross_entropy(input=prediction, label=label)
143
    avg_cost = paddle.mean(x=cost)
C
chengduo 已提交
144 145 146 147 148
    return avg_cost


class TestRegularizer(unittest.TestCase):
    def setUp(self):
L
littletomatodonkey 已提交
149
        self.word_len = 1500
150 151 152
        self.train_data = [
            [(random.sample(range(1000), 10), [0])] for _ in range(2)
        ]
C
chengduo 已提交
153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177

    def get_places(self):
        places = [core.CPUPlace()]
        if core.is_compiled_with_cuda():
            places.append(core.CUDAPlace(0))
        return places

    @contextlib.contextmanager
    def scope_prog_guard(self, main_prog, startup_prog):
        scope = fluid.core.Scope()
        with fluid.unique_name.guard():
            with fluid.scope_guard(scope):
                with fluid.program_guard(main_prog, startup_prog):
                    yield

    def run_program(self, place, feed_list):
        exe = fluid.Executor(place)
        feeder = fluid.DataFeeder(feed_list=feed_list, place=place)
        exe.run(fluid.default_startup_program())

        main_prog = fluid.default_main_program()
        param_list = [var.name for var in main_prog.block(0).all_parameters()]

        param_sum = []
        for data in self.train_data:
178 179 180
            out = exe.run(
                main_prog, feed=feeder.feed(data), fetch_list=param_list
            )
C
chengduo 已提交
181 182 183 184 185 186 187
            p_sum = 0
            for v in out:
                p_sum += np.sum(np.abs(v))
            param_sum.append(p_sum)
        return param_sum

    def check_l2decay_regularizer(self, place, model):
C
cnn 已提交
188
        paddle.seed(1)
L
Leo Chen 已提交
189
        paddle.framework.random._manual_program_seed(1)
C
chengduo 已提交
190 191
        main_prog = fluid.framework.Program()
        startup_prog = fluid.framework.Program()
192 193 194 195 196 197
        with self.scope_prog_guard(
            main_prog=main_prog, startup_prog=startup_prog
        ):
            data = fluid.layers.data(
                name="words", shape=[1], dtype="int64", lod_level=1
            )
C
chengduo 已提交
198 199
            label = fluid.layers.data(name="label", shape=[1], dtype="int64")

L
littletomatodonkey 已提交
200
            avg_cost = model(data, label, self.word_len)
C
chengduo 已提交
201 202

            optimizer = fluid.optimizer.Adagrad(
203 204
                learning_rate=0.1, regularization=fluid.regularizer.L2Decay(1.0)
            )
C
chengduo 已提交
205 206 207 208 209
            optimizer.minimize(avg_cost)
            param_sum = self.run_program(place, [data, label])
        return param_sum

    def check_l2decay(self, place, model):
C
cnn 已提交
210
        paddle.seed(1)
L
Leo Chen 已提交
211
        paddle.framework.random._manual_program_seed(1)
C
chengduo 已提交
212 213
        main_prog = fluid.framework.Program()
        startup_prog = fluid.framework.Program()
L
Leo Chen 已提交
214

215 216 217 218 219 220
        with self.scope_prog_guard(
            main_prog=main_prog, startup_prog=startup_prog
        ):
            data = fluid.layers.data(
                name="words", shape=[1], dtype="int64", lod_level=1
            )
C
chengduo 已提交
221 222
            label = fluid.layers.data(name="label", shape=[1], dtype="int64")

L
littletomatodonkey 已提交
223
            avg_cost_l2 = model(data, label, self.word_len)
C
chengduo 已提交
224 225 226 227

            param_list = fluid.default_main_program().block(0).all_parameters()
            para_sum = []
            for para in param_list:
228
                para_mul = paddle.square(x=para)
229
                para_sum.append(paddle.sum(para_mul))
230
            avg_cost_l2 += fluid.layers.sums(para_sum) * 0.5
C
chengduo 已提交
231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250

            optimizer = fluid.optimizer.Adagrad(learning_rate=0.1)
            optimizer.minimize(avg_cost_l2)
            param_sum = self.run_program(place, [data, label])
        return param_sum

    def test_l2(self):
        for place in self.get_places():
            dense_sparse_p_sum = []
            for sparse in [True, False]:
                model = partial(bow_net, is_sparse=sparse)
                framework_l2 = self.check_l2decay_regularizer(place, model)
                l2 = self.check_l2decay(place, model)
                assert len(l2) == len(framework_l2)
                for i in range(len(l2)):
                    assert np.isclose(a=framework_l2[i], b=l2[i], rtol=5e-5)
                dense_sparse_p_sum.append(framework_l2)

            assert len(dense_sparse_p_sum[0]) == len(dense_sparse_p_sum[1])
            for i in range(len(dense_sparse_p_sum[0])):
251 252 253 254 255
                assert np.isclose(
                    a=dense_sparse_p_sum[0][i],
                    b=dense_sparse_p_sum[1][i],
                    rtol=5e-5,
                )
C
chengduo 已提交
256

257
    def test_repeated_regularization(self):
258 259 260 261 262 263
        l1 = fluid.regularizer.L1Decay(regularization_coeff=0.1)
        l2 = fluid.regularizer.L2Decay(regularization_coeff=0.01)
        fc_param_attr = fluid.ParamAttr(regularizer=l1)
        with fluid.program_guard(fluid.Program(), fluid.Program()):
            x = fluid.layers.uniform_random([2, 2, 3])
            out = fluid.layers.fc(x, 5, param_attr=fc_param_attr)
264
            loss = paddle.sum(out)
265 266
            sgd = fluid.optimizer.SGD(learning_rate=0.1, regularization=l2)
            sgd.minimize(loss)
267 268
        with fluid.dygraph.guard():
            input = fluid.dygraph.to_variable(
269 270
                np.random.randn(3, 2).astype('float32')
            )
C
cnn 已提交
271
            paddle.seed(1)
L
Leo Chen 已提交
272
            paddle.framework.random._manual_program_seed(1)
273

274 275 276 277 278 279
            linear1 = fluid.dygraph.Linear(
                2, 2, param_attr=fc_param_attr, bias_attr=fc_param_attr
            )
            linear2 = fluid.dygraph.Linear(
                2, 2, param_attr=fc_param_attr, bias_attr=fc_param_attr
            )
280 281 282 283

            loss1 = linear1(input)
            loss1.backward()
            # set l2 regularizer in optimizer, but l1 in fluid.ParamAttr
284

285 286 287 288 289
            fluid.optimizer.SGD(
                parameter_list=linear1.parameters(),
                learning_rate=1e-2,
                regularization=l2,
            ).minimize(loss1)
290 291 292
            # only set l1 in fluid.ParamAttr
            loss2 = linear2(input)
            loss2.backward()
293 294 295
            fluid.optimizer.SGD(
                parameter_list=linear2.parameters(), learning_rate=1e-2
            ).minimize(loss2)
296
            # they should both be applied by l1, and keep the same
297 298 299 300
            np.testing.assert_allclose(
                linear1.weight.numpy(),
                linear2.weight.numpy(),
                rtol=1e-05,
301 302
                err_msg='weight should use the regularization in fluid.ParamAttr!',
            )
303 304 305 306
            np.testing.assert_allclose(
                linear1.bias.numpy(),
                linear2.bias.numpy(),
                rtol=1e-05,
307
                err_msg='bias should use the regularization in fluid.ParamAttr!',
308
            )
309

C
chengduo 已提交
310

311 312
if __name__ == '__main__':
    unittest.main()