test_switch_autotune.py 6.4 KB
Newer Older
1
# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
2
#
3 4 5
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
6
#
7
#     http://www.apache.org/licenses/LICENSE-2.0
8
#
9 10 11 12 13 14
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

15 16
import json
import os
17 18 19 20 21 22 23
import tempfile
import unittest
import warnings

import numpy as np

import paddle
24 25 26 27


class SimpleNet(paddle.nn.Layer):
    def __init__(self):
28
        super().__init__()
29 30 31 32 33 34 35
        self.conv = paddle.nn.Conv2D(1, 2, (3, 3))

    def forward(self, image, label=None):
        return self.conv(image)


def train_dygraph(net, data):
36
    data.stop_gradient = False
37 38 39 40 41 42 43 44 45
    out = net(data)
    loss = paddle.mean(out)
    adam = paddle.optimizer.Adam(parameters=net.parameters())
    out.backward()
    adam.step()
    adam.clear_grad()


def static_program(net, data):
46
    data.stop_gradient = False
47 48 49 50 51 52 53 54
    out = net(data)
    loss = paddle.mean(out)
    adam = paddle.optimizer.Adam()
    adam.minimize(loss)
    return loss


class TestAutoTune(unittest.TestCase):
55 56 57 58 59 60 61 62 63 64 65 66 67 68 69
    def set_flags(self, enable_autotune):
        if paddle.is_compiled_with_cuda():
            if enable_autotune:
                paddle.set_flags({'FLAGS_conv_workspace_size_limit': -1})
            else:
                paddle.set_flags({'FLAGS_conv_workspace_size_limit': 512})

    def get_flags(self, name):
        res = paddle.get_flags(name)
        return res[name]

    def get_expected_res(self, step_id, enable_autotune):
        expected_res = {
            "step_id": step_id,
            "cache_size": 0,
70
            "cache_hit_rate": 0,
71 72 73
        }
        if paddle.is_compiled_with_cuda():
            # Total 3 * num_iters cache accesses, only iter 2 hits the cache.
H
hong 已提交
74 75
            expected_res["cache_size"] = 3
            expected_res["cache_hit_rate"] = (step_id + 0.0) / (step_id + 1.0)
76 77
        return expected_res

78
    def test_autotune(self):
79
        paddle.incubate.autotune.set_config(
80 81
            config={"kernel": {"enable": False}}
        )
82
        self.assertEqual(self.get_flags("FLAGS_use_autotune"), False)
83

84
        paddle.incubate.autotune.set_config(config={"kernel": {"enable": True}})
85
        self.assertEqual(self.get_flags("FLAGS_use_autotune"), True)
86 87 88 89

    def check_status(self, expected_res):
        status = paddle.fluid.core.autotune_status()
        for key in status.keys():
H
hong 已提交
90
            v = status[key]
91
            if key == "cache_hit_rate":
H
hong 已提交
92
                np.testing.assert_allclose(v, expected_res[key])
93
            else:
H
hong 已提交
94
                np.testing.assert_array_equal(v, expected_res[key])
95 96 97 98


class TestDygraphAutoTuneStatus(TestAutoTune):
    def run_program(self, enable_autotune):
99
        self.set_flags(enable_autotune)
100
        if enable_autotune:
101
            paddle.incubate.autotune.set_config(
102 103
                config={"kernel": {"enable": True, "tuning_range": [1, 2]}}
            )
104
        else:
105
            paddle.incubate.autotune.set_config(
106 107 108
                config={"kernel": {"enable": False}}
            )
        x_var = paddle.uniform((1, 1, 8, 8), dtype='float32', min=-1.0, max=1.0)
109 110 111
        net = SimpleNet()
        for i in range(3):
            train_dygraph(net, x_var)
112 113
            expected_res = self.get_expected_res(i, enable_autotune)
            self.check_status(expected_res)
114

J
Jiabin Yang 已提交
115
    def test_enable_autotune(self):
116
        self.run_program(enable_autotune=True)
117

J
Jiabin Yang 已提交
118
    def test_disable_autotune(self):
119
        self.run_program(enable_autotune=False)
J
Jiabin Yang 已提交
120

121 122 123 124 125 126

class TestStaticAutoTuneStatus(TestAutoTune):
    def run_program(self, enable_autotune):
        paddle.enable_static()

        data_shape = [1, 1, 8, 8]
127 128 129
        main_program = paddle.static.Program()
        startup_program = paddle.static.Program()
        with paddle.static.program_guard(main_program, startup_program):
130 131 132
            data = paddle.static.data(
                name='X', shape=data_shape, dtype='float32'
            )
133 134
            net = SimpleNet()
            loss = static_program(net, data)
135 136 137 138 139
        place = (
            paddle.CUDAPlace(0)
            if paddle.fluid.core.is_compiled_with_cuda()
            else paddle.CPUPlace()
        )
140
        exe = paddle.static.Executor(place)
141 142 143 144 145
        exe.run(startup_program)
        x = np.random.random(size=data_shape).astype('float32')

        self.set_flags(enable_autotune)
        if enable_autotune:
146 147 148 149 150 151
            config = {"kernel": {"enable": True, "tuning_range": [1, 2]}}
            tfile = tempfile.NamedTemporaryFile(mode="w+", delete=False)
            json.dump(config, tfile)
            tfile.close()
            paddle.incubate.autotune.set_config(tfile.name)
            os.remove(tfile.name)
152
        else:
153
            paddle.incubate.autotune.set_config(
154 155
                config={"kernel": {"enable": False, "tuning_range": [1, 2]}}
            )
156 157

        for i in range(3):
158
            exe.run(program=main_program, feed={'X': x}, fetch_list=[loss])
159
            status = paddle.fluid.core.autotune_status()
160 161
            expected_res = self.get_expected_res(i, enable_autotune)
            self.check_status(expected_res)
162 163
        paddle.disable_static()

J
Jiabin Yang 已提交
164
    def func_enable_autotune(self):
165 166
        self.run_program(enable_autotune=True)

J
Jiabin Yang 已提交
167 168 169 170
    def test_enable_autotune(self):
        self.func_enable_autotune()

    def func_disable_autotune(self):
171 172
        self.run_program(enable_autotune=False)

J
Jiabin Yang 已提交
173 174 175
    def test_disable_autotune(self):
        self.func_disable_autotune()

176

177 178 179 180 181 182 183 184 185 186 187 188 189 190
class TestAutoTuneAPI(unittest.TestCase):
    def test_set_config_warnings(self):
        with warnings.catch_warnings(record=True) as w:
            config = {"kernel": {"enable": 1, "tuning_range": 1}}
            tfile = tempfile.NamedTemporaryFile(mode="w+", delete=False)
            json.dump(config, tfile)
            tfile.close()
            paddle.incubate.autotune.set_config(tfile.name)
            os.remove(tfile.name)
            self.assertTrue(len(w) == 2)

    def test_set_config_attr(self):
        paddle.incubate.autotune.set_config(config=None)
        self.assertEqual(
191 192
            paddle.get_flags("FLAGS_use_autotune")["FLAGS_use_autotune"], True
        )
193 194


195 196
if __name__ == '__main__':
    unittest.main()