test_fuse_optimizer_pass.py 7.6 KB
Newer Older
C
chengduo 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
L
Leo Chen 已提交
14

15 16
from simple_nets import simple_fc_net, fc_with_batchnorm, init_data, bow_net
from fake_reader import fake_imdb_reader
17
from parallel_executor_test_base import TestParallelExecutorBase, DeviceType
18 19
from functools import partial
import paddle
C
chengduo 已提交
20 21 22 23 24 25
import paddle.fluid as fluid
import paddle.fluid.core as core
import unittest
import os


26
class TestFuseOptimizationOps(TestParallelExecutorBase):
27

C
chengduo 已提交
28 29 30 31
    @classmethod
    def setUpClass(cls):
        os.environ['CPU_NUM'] = str(4)

32 33 34 35
    def _get_feed_dict(self):
        img, label = init_data()
        return {"image": img, "label": label}

C
chengduo 已提交
36 37
    def _compare_fused_optimizer_ops(self,
                                     model,
38
                                     use_device,
39 40
                                     feed_dict=None,
                                     get_data_from_feeder=None,
C
chengduo 已提交
41
                                     optimizer=fluid.optimizer.Adam):
42
        if use_device == DeviceType.CUDA and not core.is_compiled_with_cuda():
C
chengduo 已提交
43
            return
44

45
        not_fuse_op_first_loss, not_fuse_op_last_loss, _ = self.check_network_convergence(
C
chengduo 已提交
46
            model,
C
chengduo 已提交
47
            feed_dict=feed_dict,
48
            get_data_from_feeder=get_data_from_feeder,
49
            use_device=use_device,
C
chengduo 已提交
50 51
            fuse_all_optimizer_ops=False,
            optimizer=optimizer)
52
        fuse_op_first_loss, fuse_op_last_loss, _ = self.check_network_convergence(
C
chengduo 已提交
53
            model,
C
chengduo 已提交
54
            feed_dict=feed_dict,
55
            get_data_from_feeder=get_data_from_feeder,
56
            use_device=use_device,
C
chengduo 已提交
57 58 59 60 61 62 63 64
            fuse_all_optimizer_ops=True,
            optimizer=optimizer)

        for loss in zip(not_fuse_op_first_loss, fuse_op_first_loss):
            self.assertAlmostEquals(loss[0], loss[1], delta=1e-6)
        for loss in zip(not_fuse_op_last_loss, fuse_op_last_loss):
            self.assertAlmostEquals(loss[0], loss[1], delta=1e-6)

65 66
    def _decorate_compare_fused_optimizer_ops(self, model, use_device,
                                              optimizer):
67 68 69 70
        self._compare_fused_optimizer_ops(model,
                                          use_device,
                                          feed_dict=self._get_feed_dict(),
                                          optimizer=optimizer)
71 72 73


class TestFuseAdamOps(TestFuseOptimizationOps):
74

75 76 77
    def optimizer(self, learning_rate=1e-4):
        return fluid.optimizer.Adam(learning_rate=learning_rate)

C
chengduo 已提交
78
    def test_batchnorm_fc_with_fuse_op(self):
79 80 81 82 83 84
        self._decorate_compare_fused_optimizer_ops(fc_with_batchnorm,
                                                   DeviceType.CUDA,
                                                   optimizer=self.optimizer)
        self._decorate_compare_fused_optimizer_ops(fc_with_batchnorm,
                                                   DeviceType.CPU,
                                                   optimizer=self.optimizer)
C
chengduo 已提交
85 86 87


class TestFuseSGDOps(TestFuseAdamOps):
88

89
    def optimizer(self, learning_rate=1e-3):
C
chengduo 已提交
90 91 92
        return fluid.optimizer.SGD(learning_rate=learning_rate)


C
chengduo 已提交
93
class TestFuseMomentumOps(TestFuseAdamOps):
94

95
    def optimizer(self, learning_rate=1e-3):
96 97
        return fluid.optimizer.Momentum(learning_rate=learning_rate,
                                        momentum=0.1)
C
chengduo 已提交
98 99


100
class TestSpareFuseAdamOps(TestFuseOptimizationOps):
101

102 103 104 105 106 107 108 109 110 111 112 113 114 115
    @classmethod
    def setUpClass(cls):
        os.environ['CPU_NUM'] = str(4)
        cls.word_dict_len = 5147
        batch_size = 64
        reader = fake_imdb_reader(cls.word_dict_len, batch_size * 100)
        reader = paddle.batch(reader, batch_size=batch_size)()
        cls.train_data = next(reader)

    def _get_data_from_feeder(self):
        place = fluid.CPUPlace()
        feeder = fluid.DataFeeder(feed_list=["words", "label"], place=place)
        return feeder.feed(self.train_data)

116 117
    def _decorate_compare_fused_optimizer_ops(self, model, use_device,
                                              optimizer):
C
chengduo 已提交
118
        self._compare_fused_optimizer_ops(
119
            model,
120
            use_device,
121 122 123 124 125 126 127 128
            get_data_from_feeder=self._get_data_from_feeder,
            optimizer=optimizer)

    def optimizer(self, learning_rate=1e-4):
        return fluid.optimizer.Adam(learning_rate=learning_rate)

    def test_simple_bow_net_with_fuse_op(self):
        model = partial(bow_net, dict_dim=self.word_dict_len, is_sparse=True)
129 130 131 132 133 134
        self._decorate_compare_fused_optimizer_ops(model,
                                                   DeviceType.CUDA,
                                                   optimizer=self.optimizer)
        self._decorate_compare_fused_optimizer_ops(model,
                                                   DeviceType.CPU,
                                                   optimizer=self.optimizer)
135 136 137


class TestSpareFuseSGDOps(TestSpareFuseAdamOps):
138

139 140 141 142 143
    def optimizer(self, learning_rate=1e-3):
        return fluid.optimizer.SGD(learning_rate=learning_rate)


class TestSpareFuseMomentumOps(TestSpareFuseAdamOps):
144

145
    def optimizer(self, learning_rate=1e-3):
146 147
        return fluid.optimizer.Momentum(learning_rate=learning_rate,
                                        momentum=0.1)
C
chengduo 已提交
148 149


150
class TestPassConflictBase(TestFuseAdamOps):
151

152 153
    def _compare_fused_optimizer_ops(self,
                                     model,
154
                                     use_device,
155 156 157
                                     feed_dict=None,
                                     get_data_from_feeder=None,
                                     optimizer=fluid.optimizer.Adam):
158
        if use_device == DeviceType.CUDA and not core.is_compiled_with_cuda():
159 160
            return

161 162 163 164 165 166 167
        self.check_pass_conflict(model,
                                 feed_dict=feed_dict,
                                 get_data_from_feeder=get_data_from_feeder,
                                 use_device=use_device,
                                 fuse_all_optimizer_ops=True,
                                 optimizer=optimizer,
                                 enable_sequential_execution=True)
168 169 170


class TestFuseAdamOpsPassConflict(TestPassConflictBase):
171

172 173 174 175
    def optimizer(self, learning_rate=1e-4):
        return fluid.optimizer.Adam(learning_rate=learning_rate)

    def test_batchnorm_fc_with_fuse_op(self):
176 177 178 179 180 181
        self._decorate_compare_fused_optimizer_ops(fc_with_batchnorm,
                                                   DeviceType.CPU,
                                                   optimizer=self.optimizer)
        self._decorate_compare_fused_optimizer_ops(fc_with_batchnorm,
                                                   DeviceType.CUDA,
                                                   optimizer=self.optimizer)
182 183 184


class TestFuseSGDOpsPassConflict(TestFuseAdamOpsPassConflict):
185

186 187 188 189 190
    def optimizer(self, learning_rate=1e-3):
        return fluid.optimizer.SGD(learning_rate=learning_rate)


class TestFuseMomentumOpsPassConflict(TestFuseAdamOpsPassConflict):
191

192
    def optimizer(self, learning_rate=1e-3):
193 194
        return fluid.optimizer.Momentum(learning_rate=learning_rate,
                                        momentum=0.1)
195 196


C
chengduo 已提交
197 198
if __name__ == '__main__':
    unittest.main()