test_regularizer_api.py 8.4 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18
#   Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

import unittest
from functools import partial
import contextlib
import numpy as np
L
littletomatodonkey 已提交
19
import random
20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37
import paddle
import paddle.fluid.core as core
import paddle.fluid as fluid


def bow_net(data,
            label,
            dict_dim,
            is_sparse=False,
            emb_dim=8,
            hid_dim=8,
            hid_dim2=6,
            class_dim=2):
    """
    BOW net
    This model is from https://github.com/PaddlePaddle/models:
    fluid/PaddleNLP/text_classification/nets.py
    """
38 39 40
    emb = fluid.layers.embedding(input=data,
                                 is_sparse=is_sparse,
                                 size=[dict_dim, emb_dim])
41 42 43 44 45 46
    bow = fluid.layers.sequence_pool(input=emb, pool_type='sum')
    bow_tanh = fluid.layers.tanh(bow)
    fc_1 = fluid.layers.fc(input=bow_tanh, size=hid_dim, act="tanh")
    fc_2 = fluid.layers.fc(input=fc_1, size=hid_dim2, act="tanh")
    prediction = fluid.layers.fc(input=[fc_2], size=class_dim, act="softmax")
    cost = fluid.layers.cross_entropy(input=prediction, label=label)
47
    avg_cost = paddle.mean(x=cost)
48 49 50 51 52

    return avg_cost


class TestRegularizer(unittest.TestCase):
53

54
    def setUp(self):
L
littletomatodonkey 已提交
55 56 57
        self.word_len = 1500
        self.train_data = [[(random.sample(range(1000), 10), [0])]
                           for _ in range(2)]
58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92

    def get_places(self):
        places = [core.CPUPlace()]
        if core.is_compiled_with_cuda():
            places.append(core.CUDAPlace(0))
        return places

    @contextlib.contextmanager
    def scope_prog_guard(self, main_prog, startup_prog):
        scope = fluid.core.Scope()
        with fluid.unique_name.guard():
            with fluid.scope_guard(scope):
                with fluid.program_guard(main_prog, startup_prog):
                    yield

    def run_program(self, place, feed_list):
        exe = fluid.Executor(place)
        feeder = fluid.DataFeeder(feed_list=feed_list, place=place)
        exe.run(fluid.default_startup_program())

        main_prog = fluid.default_main_program()
        param_list = [var.name for var in main_prog.block(0).all_parameters()]

        param_sum = []
        for data in self.train_data:
            out = exe.run(main_prog,
                          feed=feeder.feed(data),
                          fetch_list=param_list)
            p_sum = 0
            for v in out:
                p_sum += np.sum(np.abs(v))
            param_sum.append(p_sum)
        return param_sum

    def check_l2decay_regularizer(self, place, model):
C
cnn 已提交
93
        paddle.seed(1)
94 95 96
        paddle.framework.random._manual_program_seed(1)
        main_prog = fluid.framework.Program()
        startup_prog = fluid.framework.Program()
97 98 99 100 101 102
        with self.scope_prog_guard(main_prog=main_prog,
                                   startup_prog=startup_prog):
            data = fluid.layers.data(name="words",
                                     shape=[1],
                                     dtype="int64",
                                     lod_level=1)
103 104
            label = fluid.layers.data(name="label", shape=[1], dtype="int64")

L
littletomatodonkey 已提交
105
            avg_cost = model(data, label, self.word_len)
106 107 108 109 110 111 112 113 114

            optimizer = fluid.optimizer.Adagrad(
                learning_rate=0.1,
                regularization=paddle.regularizer.L2Decay(1.0))
            optimizer.minimize(avg_cost)
            param_sum = self.run_program(place, [data, label])
        return param_sum

    def check_l2decay(self, place, model):
C
cnn 已提交
115
        paddle.seed(1)
116 117 118 119
        paddle.framework.random._manual_program_seed(1)
        main_prog = fluid.framework.Program()
        startup_prog = fluid.framework.Program()

120 121 122 123 124 125
        with self.scope_prog_guard(main_prog=main_prog,
                                   startup_prog=startup_prog):
            data = fluid.layers.data(name="words",
                                     shape=[1],
                                     dtype="int64",
                                     lod_level=1)
126 127
            label = fluid.layers.data(name="label", shape=[1], dtype="int64")

L
littletomatodonkey 已提交
128
            avg_cost_l2 = model(data, label, self.word_len)
129 130 131 132 133 134 135 136 137 138 139 140 141 142

            param_list = fluid.default_main_program().block(0).all_parameters()
            para_sum = []
            for para in param_list:
                para_mul = fluid.layers.square(x=para)
                para_sum.append(fluid.layers.reduce_sum(input=para_mul))
            avg_cost_l2 += fluid.layers.sums(para_sum) * .5

            optimizer = fluid.optimizer.Adagrad(learning_rate=0.1)
            optimizer.minimize(avg_cost_l2)
            param_sum = self.run_program(place, [data, label])
        return param_sum

    def test_l2(self):
L
littletomatodonkey 已提交
143
        paddle.enable_static()
144 145 146 147 148 149 150 151 152 153 154 155 156
        for place in self.get_places():
            dense_sparse_p_sum = []
            for sparse in [True, False]:
                model = partial(bow_net, is_sparse=sparse)
                framework_l2 = self.check_l2decay_regularizer(place, model)
                l2 = self.check_l2decay(place, model)
                assert len(l2) == len(framework_l2)
                for i in range(len(l2)):
                    assert np.isclose(a=framework_l2[i], b=l2[i], rtol=5e-5)
                dense_sparse_p_sum.append(framework_l2)

            assert len(dense_sparse_p_sum[0]) == len(dense_sparse_p_sum[1])
            for i in range(len(dense_sparse_p_sum[0])):
157 158 159
                assert np.isclose(a=dense_sparse_p_sum[0][i],
                                  b=dense_sparse_p_sum[1][i],
                                  rtol=5e-5)
160 161

    def test_repeated_regularization(self):
L
littletomatodonkey 已提交
162
        paddle.enable_static()
163 164 165 166 167 168 169 170 171 172 173 174
        l1 = paddle.regularizer.L1Decay(0.1)
        l2 = paddle.regularizer.L2Decay(0.01)
        fc_param_attr = fluid.ParamAttr(regularizer=l1)
        with fluid.program_guard(fluid.Program(), fluid.Program()):
            x = fluid.layers.uniform_random([2, 2, 3])
            out = fluid.layers.fc(x, 5, param_attr=fc_param_attr)
            loss = fluid.layers.reduce_sum(out)
            sgd = fluid.optimizer.SGD(learning_rate=0.1, regularization=l2)
            sgd.minimize(loss)
        with fluid.dygraph.guard():
            input = fluid.dygraph.to_variable(
                np.random.randn(3, 2).astype('float32'))
C
cnn 已提交
175
            paddle.seed(1)
176 177
            paddle.framework.random._manual_program_seed(1)

178 179 180 181 182 183 184 185
            linear1 = fluid.dygraph.Linear(2,
                                           2,
                                           param_attr=fc_param_attr,
                                           bias_attr=fc_param_attr)
            linear2 = fluid.dygraph.Linear(2,
                                           2,
                                           param_attr=fc_param_attr,
                                           bias_attr=fc_param_attr)
186 187 188 189 190 191 192 193 194 195 196 197 198 199

            loss1 = linear1(input)
            loss1.backward()
            # set l2 regularizer in optimizer, but l1 in fluid.ParamAttr

            fluid.optimizer.SGD(parameter_list=linear1.parameters(),
                                learning_rate=1e-2,
                                regularization=l2).minimize(loss1)
            # only set l1 in fluid.ParamAttr
            loss2 = linear2(input)
            loss2.backward()
            fluid.optimizer.SGD(parameter_list=linear2.parameters(),
                                learning_rate=1e-2).minimize(loss2)
            # they should both be applied by l1, and keep the same
200 201 202 203 204 205 206 207 208 209 210 211
            np.testing.assert_allclose(
                linear1.weight.numpy(),
                linear2.weight.numpy(),
                rtol=1e-05,
                err_msg=
                'weight should use the regularization in fluid.ParamAttr!')
            np.testing.assert_allclose(
                linear1.bias.numpy(),
                linear2.bias.numpy(),
                rtol=1e-05,
                err_msg='bias should use the regularization in fluid.ParamAttr!'
            )
212 213 214 215


if __name__ == '__main__':
    unittest.main()