test_program_prune_backward.py 12.1 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22
#   Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

import unittest

import contextlib
import numpy as np
import paddle.fluid as fluid
import paddle.fluid.core as core
from simple_nets import init_data, simple_fc_net, fc_with_batchnorm
import seresnext_net
23 24 25 26 27
from test_parallel_executor_transformer import (
    transformer,
    get_feed_data_reader,
    DeviceType,
)
28
from fake_reader import fake_imdb_reader
H
hong 已提交
29
import paddle
30 31 32 33 34 35 36 37 38


def lstm_net(use_feed):
    dict_dim = 5147
    emb_dim = 128
    hid_dim = 128
    hid_dim2 = 96
    class_dim = 2
    emb_lr = 30.0
39 40 41
    data = fluid.layers.data(
        name="words", shape=[1], dtype="int64", lod_level=1
    )
42 43 44 45
    label = fluid.layers.data(name="label", shape=[1], dtype="int64")
    emb = fluid.layers.embedding(
        input=data,
        size=[dict_dim, emb_dim],
46 47
        param_attr=fluid.ParamAttr(learning_rate=emb_lr),
    )
48
    fc0 = fluid.layers.fc(input=emb, size=hid_dim * 4)
49 50 51
    lstm_h, c = fluid.layers.dynamic_lstm(
        input=fc0, size=hid_dim * 4, is_reverse=False
    )
52
    lstm_max = fluid.layers.sequence_pool(input=lstm_h, pool_type='max')
53
    lstm_max_tanh = paddle.tanh(lstm_max)
54 55 56
    fc1 = fluid.layers.fc(input=lstm_max_tanh, size=hid_dim2, act='tanh')
    prediction = fluid.layers.fc(input=fc1, size=class_dim, act='softmax')
    cost = fluid.layers.cross_entropy(input=prediction, label=label)
57
    avg_cost = paddle.mean(x=cost)
58 59 60
    return avg_cost


61 62 63 64 65 66 67 68 69 70
def simple_fc_net_with_accuracy(use_feed):
    img = fluid.layers.data(name='image', shape=[784], dtype='float32')
    label = fluid.layers.data(name='label', shape=[1], dtype='int64')

    hidden = img
    for _ in range(4):
        hidden = fluid.layers.fc(
            hidden,
            size=200,
            act='relu',
71 72 73 74
            bias_attr=fluid.ParamAttr(
                initializer=fluid.initializer.Constant(value=1.0)
            ),
        )
75 76
    prediction = fluid.layers.fc(hidden, size=10, act='softmax')
    loss = fluid.layers.cross_entropy(input=prediction, label=label)
77
    loss = paddle.mean(loss)
78 79 80 81
    accuracy_out = fluid.layers.accuracy(input=prediction, label=label, k=5)
    return loss


82 83 84 85 86 87 88 89
def cond_net(use_feed=None):
    x = fluid.layers.data(name="x", shape=[4], dtype='float32')
    label = fluid.layers.data('label', shape=[1], dtype='int64')
    prediction = fluid.layers.fc(input=x, size=1, act=None)

    def loss1(pred, label):
        x = fluid.layers.data(name="x", shape=[4], dtype='float32')
        loss = fluid.layers.cross_entropy(input=pred, label=label)
90
        avg_loss = paddle.mean(loss, name='mean_cross_entropy_loss')
91 92 93 94
        return avg_loss

    def loss2(pred, label):
        loss = fluid.layers.softmax_with_cross_entropy(logits=pred, label=label)
95
        avg_loss = paddle.mean(loss, name='mean_softmax_loss')
96 97 98
        return avg_loss

    two = fluid.layers.fill_constant([1], 'int32', 2)
99 100 101 102 103
    pred = two == 0
    avg_loss = fluid.layers.case(
        [(pred, lambda: loss1(prediction, label))],
        lambda: loss2(prediction, label),
    )
104 105 106 107 108 109 110 111 112 113 114
    return avg_loss


def optimization_in_cond_net(with_optimize=False):
    x = fluid.layers.data(name="x", shape=[4], dtype='float32')
    label = fluid.layers.data('label', shape=[1], dtype='int64')
    prediction = fluid.layers.fc(input=x, size=1, act=None)

    def loss1(opt, pred, label, with_optimize):
        x = fluid.layers.data(name="x", shape=[4], dtype='float32')
        loss = fluid.layers.cross_entropy(input=pred, label=label)
115
        avg_loss = paddle.mean(loss, name='mean_cross_entropy_loss')
116 117 118 119 120 121
        if with_optimize:
            opt.minimize(avg_loss)
        return avg_loss

    def loss2(opt, pred, label, with_optimize):
        loss = fluid.layers.softmax_with_cross_entropy(logits=pred, label=label)
122
        avg_loss = paddle.mean(loss, name='mean_softmax_loss')
123 124 125 126 127 128
        if with_optimize:
            opt.minimize(avg_loss)
        return avg_loss

    sgd = fluid.optimizer.SGD(learning_rate=0.1)
    two = fluid.layers.fill_constant([1], 'int32', 2)
129
    pred = two == 0
130 131
    avg_loss = fluid.layers.case(
        [(pred, lambda: loss1(sgd, prediction, label, with_optimize))],
132 133
        lambda: loss2(sgd, prediction, label, with_optimize),
    )
134 135 136
    return avg_loss


137 138 139
class TestProgramPruneBackward(unittest.TestCase):
    def program_compare(self, program_a, program_b):
        assert isinstance(
140 141
            program_a, fluid.framework.Program
        ), "The first argument should be fluid.framework.Program."
142
        assert isinstance(
143 144
            program_b, fluid.framework.Program
        ), "The second argument should be fluid.framework Program."
145 146 147 148 149 150 151 152

        self.assertEqual(len(program_a.blocks), len(program_b.blocks))
        for idx in range(len(program_a.blocks)):
            block_a = program_a.blocks[idx]
            block_b = program_b.blocks[idx]
            self.assertEqual(len(block_a.ops), len(block_b.ops))
            self.assertEqual(len(block_a.vars), len(block_b.vars))
            for op_idx in range(len(block_a.ops)):
153 154 155
                self.assertEqual(
                    block_a.ops[op_idx].type, block_b.ops[op_idx].type
                )
156 157 158 159 160 161 162 163 164 165
            for var_key in list(block_a.vars.keys()):
                self.assertTrue(block_b.has_var(var_key))

    def check_prune_correctness(self, method, feed_dict, optimizer):
        loss = method(use_feed=False)

        main_program = fluid.default_main_program()
        test_prog_orig = main_program.clone(for_test=True)
        optimizer().minimize(loss)
        test_prog_prune = main_program.clone(for_test=True)
166

167 168
        self.program_compare(test_prog_orig, test_prog_prune)

169 170 171
        places = [core.CPUPlace()]
        if core.is_compiled_with_cuda():
            places.append(core.CUDAPlace(0))
172

173 174 175 176
        for place in places:
            exe = fluid.Executor(place)
            exe.run(fluid.default_startup_program())

177 178 179 180 181 182
            (loss_data_prune,) = exe.run(
                test_prog_prune, feed=feed_dict, fetch_list=[loss.name]
            )
            (loss_data_orig,) = exe.run(
                test_prog_orig, feed=feed_dict, fetch_list=[loss.name]
            )
183
            self.assertEqual(loss_data_orig, loss_data_prune)
184 185 186 187 188

    def test_simple_fc_net(self):
        def optimizer():
            optimizer = fluid.optimizer.SGD(
                learning_rate=0.001,
189 190
                regularization=fluid.regularizer.L2Decay(1e-4),
            )
191 192 193 194
            return optimizer

        with self.program_scope_guard():
            img, label = init_data()
195 196 197 198 199
            self.check_prune_correctness(
                method=simple_fc_net,
                feed_dict={"image": img, "label": label},
                optimizer=optimizer,
            )
200

201 202 203 204
    def test_simple_fc_net_with_accuracy(self):
        def optimizer():
            optimizer = fluid.optimizer.SGD(
                learning_rate=0.001,
205 206
                regularization=fluid.regularizer.L2Decay(1e-4),
            )
207 208 209 210
            return optimizer

        with self.program_scope_guard():
            img, label = init_data()
211 212 213 214 215
            self.check_prune_correctness(
                method=simple_fc_net_with_accuracy,
                feed_dict={"image": img, "label": label},
                optimizer=optimizer,
            )
216

217 218 219 220
    def test_batchnorm_fc(self):
        def optimizer():
            optimizer = fluid.optimizer.SGD(
                learning_rate=0.001,
221 222
                regularization=fluid.regularizer.L2Decay(1e-4),
            )
223 224 225 226
            return optimizer

        with self.program_scope_guard():
            img, label = init_data()
227 228 229 230 231
            self.check_prune_correctness(
                method=fc_with_batchnorm,
                feed_dict={"image": img, "label": label},
                optimizer=optimizer,
            )
232 233 234 235 236

    def test_seresnet(self):
        with self.program_scope_guard():
            self.check_prune_correctness(
                method=seresnext_net.model,
237
                feed_dict=seresnext_net.feed_dict(use_device=DeviceType.CPU),
238 239
                optimizer=seresnext_net.optimizer,
            )
240 241 242 243 244

    def test_transformer(self):
        def optimizer():
            optimizer = fluid.optimizer.Adam(
                learning_rate=0.001,
245 246
                regularization=fluid.regularizer.L2Decay(1e-4),
            )
247 248 249 250 251
            return optimizer

        with self.program_scope_guard():
            # the program argument is used to distinguish Program and CompiledProgram
            feed_dict = get_feed_data_reader().get_next(
252 253 254 255 256
                fluid.Executor(core.CPUPlace()), fluid.default_main_program()
            )
            self.check_prune_correctness(
                method=transformer, feed_dict=feed_dict, optimizer=optimizer
            )
257 258 259 260 261

    def test_lstm(self):
        def optimizer():
            optimizer = fluid.optimizer.Adagrad(
                learning_rate=0.001,
262 263
                regularization=fluid.regularizer.L2Decay(1e-4),
            )
264 265 266 267 268
            return optimizer

        with self.program_scope_guard():
            word_dict_size = 5147
            reader = fake_imdb_reader(word_dict_size, 1)
269 270 271
            data = fluid.layers.data(
                name="words", shape=[1], dtype="int64", lod_level=1
            )
272
            label = fluid.layers.data(name="label", shape=[1], dtype="int64")
273 274 275
            feeder = fluid.DataFeeder(
                feed_list=[data, label], place=core.CPUPlace()
            )
276
            feed_data = feeder.feed(reader())
277 278 279
            self.check_prune_correctness(
                method=lstm_net, feed_dict=feed_data, optimizer=optimizer
            )
280

281 282 283 284 285 286 287 288 289
    def test_cond(self):
        def optimizer():
            optimizer = fluid.optimizer.SGD(learning_rate=0.01)
            return optimizer

        with self.program_scope_guard():
            x_in = np.random.random(size=(10, 4)).astype('float32')
            label_in = np.random.randint(1, size=(10, 1)).astype('int64')
            feed_dict = {'x': x_in, 'label': label_in}
290 291 292
            self.check_prune_correctness(
                method=cond_net, feed_dict=feed_dict, optimizer=optimizer
            )
293 294 295 296 297 298 299 300 301 302 303 304

    def test_optimization_in_cond(self):
        x_in = np.random.random(size=(10, 4)).astype('float32')
        label_in = np.random.randint(1, size=(10, 1)).astype('int64')
        feed_dict = {'x': x_in, 'label': label_in}
        with self.program_scope_guard():
            loss = optimization_in_cond_net(False)
            main_program = fluid.default_main_program()
            test_prog_orig = main_program.clone(for_test=True)
            place = core.CPUPlace()
            exe = fluid.Executor(place)
            exe.run(fluid.default_startup_program())
305 306 307
            (loss_data_orig,) = exe.run(
                test_prog_orig, feed=feed_dict, fetch_list=[loss.name]
            )
308 309 310 311 312 313 314 315 316

        with self.program_scope_guard():
            loss = optimization_in_cond_net(True)
            main_program = fluid.default_main_program()
            test_prog_prune = main_program.clone(for_test=True)

            place = core.CPUPlace()
            exe = fluid.Executor(place)
            exe.run(fluid.default_startup_program())
317 318 319
            (loss_data_prune,) = exe.run(
                test_prog_prune, feed=feed_dict, fetch_list=[loss.name]
            )
320 321 322 323

        self.program_compare(test_prog_orig, test_prog_prune)
        self.assertEqual(loss_data_orig, loss_data_prune)

324 325 326 327 328 329 330
    @contextlib.contextmanager
    def program_scope_guard(self):
        prog = fluid.Program()
        startup_prog = fluid.Program()
        scope = fluid.core.Scope()
        with fluid.scope_guard(scope):
            with fluid.program_guard(prog, startup_prog):
331 332
                with fluid.unique_name.guard():
                    yield
333 334 335


if __name__ == '__main__':
H
hong 已提交
336
    paddle.enable_static()
337
    unittest.main()