test_switch_autotune.py 6.8 KB
Newer Older
1
# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
2
#
3 4 5
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
6
#
7
#     http://www.apache.org/licenses/LICENSE-2.0
8
#
9 10 11 12 13 14 15 16
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

import paddle
import unittest
17
import numpy as np
18 19 20 21
import tempfile
import warnings
import json
import os
22 23 24


class SimpleNet(paddle.nn.Layer):
25

26 27 28 29 30 31 32 33 34
    def __init__(self):
        super(SimpleNet, self).__init__()
        self.conv = paddle.nn.Conv2D(1, 2, (3, 3))

    def forward(self, image, label=None):
        return self.conv(image)


def train_dygraph(net, data):
35
    data.stop_gradient = False
36 37 38 39 40 41 42 43 44
    out = net(data)
    loss = paddle.mean(out)
    adam = paddle.optimizer.Adam(parameters=net.parameters())
    out.backward()
    adam.step()
    adam.clear_grad()


def static_program(net, data):
45
    data.stop_gradient = False
46 47 48 49 50 51 52 53
    out = net(data)
    loss = paddle.mean(out)
    adam = paddle.optimizer.Adam()
    adam.minimize(loss)
    return loss


class TestAutoTune(unittest.TestCase):
54

55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73
    def set_flags(self, enable_autotune):
        if paddle.is_compiled_with_cuda():
            if enable_autotune:
                paddle.set_flags({'FLAGS_conv_workspace_size_limit': -1})
            else:
                paddle.set_flags({'FLAGS_conv_workspace_size_limit': 512})

    def get_flags(self, name):
        res = paddle.get_flags(name)
        return res[name]

    def get_expected_res(self, step_id, enable_autotune):
        expected_res = {
            "step_id": step_id,
            "cache_size": 0,
            "cache_hit_rate": 0
        }
        if paddle.is_compiled_with_cuda():
            # Total 3 * num_iters cache accesses, only iter 2 hits the cache.
H
hong 已提交
74 75
            expected_res["cache_size"] = 3
            expected_res["cache_hit_rate"] = (step_id + 0.0) / (step_id + 1.0)
76 77
        return expected_res

78
    def test_autotune(self):
79 80 81 82
        paddle.incubate.autotune.set_config(
            config={"kernel": {
                "enable": False
            }})
83
        self.assertEqual(self.get_flags("FLAGS_use_autotune"), False)
84

85
        paddle.incubate.autotune.set_config(config={"kernel": {"enable": True}})
86
        self.assertEqual(self.get_flags("FLAGS_use_autotune"), True)
87 88 89 90

    def check_status(self, expected_res):
        status = paddle.fluid.core.autotune_status()
        for key in status.keys():
H
hong 已提交
91
            v = status[key]
92
            if key == "cache_hit_rate":
H
hong 已提交
93
                np.testing.assert_allclose(v, expected_res[key])
94
            else:
H
hong 已提交
95
                np.testing.assert_array_equal(v, expected_res[key])
96 97 98


class TestDygraphAutoTuneStatus(TestAutoTune):
99

100
    def run_program(self, enable_autotune):
101
        self.set_flags(enable_autotune)
102
        if enable_autotune:
103 104 105 106 107
            paddle.incubate.autotune.set_config(
                config={"kernel": {
                    "enable": True,
                    "tuning_range": [1, 2]
                }})
108
        else:
109 110 111 112
            paddle.incubate.autotune.set_config(
                config={"kernel": {
                    "enable": False
                }})
113 114 115 116
        x_var = paddle.uniform((1, 1, 8, 8), dtype='float32', min=-1., max=1.)
        net = SimpleNet()
        for i in range(3):
            train_dygraph(net, x_var)
117 118
            expected_res = self.get_expected_res(i, enable_autotune)
            self.check_status(expected_res)
119

J
Jiabin Yang 已提交
120
    def func_enable_autotune(self):
121 122
        self.run_program(enable_autotune=True)

J
Jiabin Yang 已提交
123 124 125 126 127 128
    def test_enable_autotune(self):
        with paddle.fluid.framework._test_eager_guard():
            self.func_enable_autotune()
        self.func_enable_autotune()

    def func_disable_autotune(self):
129 130
        self.run_program(enable_autotune=False)

J
Jiabin Yang 已提交
131 132 133 134 135
    def test_disable_autotune(self):
        with paddle.fluid.framework._test_eager_guard():
            self.func_disable_autotune()
        self.func_disable_autotune()

136 137

class TestStaticAutoTuneStatus(TestAutoTune):
138

139 140 141 142
    def run_program(self, enable_autotune):
        paddle.enable_static()

        data_shape = [1, 1, 8, 8]
143 144 145
        main_program = paddle.static.Program()
        startup_program = paddle.static.Program()
        with paddle.static.program_guard(main_program, startup_program):
146 147 148
            data = paddle.static.data(name='X',
                                      shape=data_shape,
                                      dtype='float32')
149 150
            net = SimpleNet()
            loss = static_program(net, data)
151 152 153
        place = paddle.CUDAPlace(0) if paddle.fluid.core.is_compiled_with_cuda(
        ) else paddle.CPUPlace()
        exe = paddle.static.Executor(place)
154 155 156 157 158
        exe.run(startup_program)
        x = np.random.random(size=data_shape).astype('float32')

        self.set_flags(enable_autotune)
        if enable_autotune:
159 160 161 162 163 164
            config = {"kernel": {"enable": True, "tuning_range": [1, 2]}}
            tfile = tempfile.NamedTemporaryFile(mode="w+", delete=False)
            json.dump(config, tfile)
            tfile.close()
            paddle.incubate.autotune.set_config(tfile.name)
            os.remove(tfile.name)
165
        else:
166 167 168 169 170
            paddle.incubate.autotune.set_config(
                config={"kernel": {
                    "enable": False,
                    "tuning_range": [1, 2]
                }})
171 172

        for i in range(3):
173
            exe.run(program=main_program, feed={'X': x}, fetch_list=[loss])
174
            status = paddle.fluid.core.autotune_status()
175 176
            expected_res = self.get_expected_res(i, enable_autotune)
            self.check_status(expected_res)
177 178
        paddle.disable_static()

J
Jiabin Yang 已提交
179
    def func_enable_autotune(self):
180 181
        self.run_program(enable_autotune=True)

J
Jiabin Yang 已提交
182 183 184 185
    def test_enable_autotune(self):
        self.func_enable_autotune()

    def func_disable_autotune(self):
186 187
        self.run_program(enable_autotune=False)

J
Jiabin Yang 已提交
188 189 190
    def test_disable_autotune(self):
        self.func_disable_autotune()

191

192
class TestAutoTuneAPI(unittest.TestCase):
193

194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209
    def test_set_config_warnings(self):
        with warnings.catch_warnings(record=True) as w:
            config = {"kernel": {"enable": 1, "tuning_range": 1}}
            tfile = tempfile.NamedTemporaryFile(mode="w+", delete=False)
            json.dump(config, tfile)
            tfile.close()
            paddle.incubate.autotune.set_config(tfile.name)
            os.remove(tfile.name)
            self.assertTrue(len(w) == 2)

    def test_set_config_attr(self):
        paddle.incubate.autotune.set_config(config=None)
        self.assertEqual(
            paddle.get_flags("FLAGS_use_autotune")["FLAGS_use_autotune"], True)


210 211
if __name__ == '__main__':
    unittest.main()