test_switch_autotune.py 6.9 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14
# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
# 
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# 
#     http://www.apache.org/licenses/LICENSE-2.0
# 
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

15 16
import os
import json
17
import numpy as np
18
import unittest
19 20
import tempfile
import warnings
21 22

import paddle
23 24 25 26 27 28 29 30 31 32 33 34


class SimpleNet(paddle.nn.Layer):
    def __init__(self):
        super(SimpleNet, self).__init__()
        self.conv = paddle.nn.Conv2D(1, 2, (3, 3))

    def forward(self, image, label=None):
        return self.conv(image)


def train_dygraph(net, data):
35
    data.stop_gradient = False
36 37 38 39 40 41 42 43 44
    out = net(data)
    loss = paddle.mean(out)
    adam = paddle.optimizer.Adam(parameters=net.parameters())
    out.backward()
    adam.step()
    adam.clear_grad()


def static_program(net, data):
45
    data.stop_gradient = False
46 47 48 49 50 51 52 53
    out = net(data)
    loss = paddle.mean(out)
    adam = paddle.optimizer.Adam()
    adam.minimize(loss)
    return loss


class TestAutoTune(unittest.TestCase):
54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79
    def set_flags(self, enable_autotune):
        if paddle.is_compiled_with_cuda():
            if enable_autotune:
                paddle.set_flags({'FLAGS_conv_workspace_size_limit': -1})
            else:
                paddle.set_flags({'FLAGS_conv_workspace_size_limit': 512})

    def get_flags(self, name):
        res = paddle.get_flags(name)
        return res[name]

    def get_expected_res(self, step_id, enable_autotune):
        expected_res = {
            "step_id": step_id,
            "cache_size": 0,
            "cache_hit_rate": 0
        }
        if paddle.is_compiled_with_cuda():
            # Total 3 * num_iters cache accesses, only iter 2 hits the cache.
            if enable_autotune and step_id >= 1:
                expected_res["cache_size"] = 3
            if enable_autotune and step_id == 2:
                expected_res["cache_hit_rate"] = np.round(
                    float(3) / float(9), 5)
        return expected_res

80
    def test_autotune(self):
81 82 83 84
        paddle.incubate.autotune.set_config(
            config={"kernel": {
                "enable": False
            }})
85
        self.assertEqual(self.get_flags("FLAGS_use_autotune"), False)
86

87
        paddle.incubate.autotune.set_config(config={"kernel": {"enable": True}})
88
        self.assertEqual(self.get_flags("FLAGS_use_autotune"), True)
89 90 91 92

    def check_status(self, expected_res):
        status = paddle.fluid.core.autotune_status()
        for key in status.keys():
93 94 95 96 97
            if key == "cache_hit_rate":
                v = np.round(status[key], 5)
            else:
                v = status[key]
            self.assertEqual(v, expected_res[key])
98 99 100 101


class TestDygraphAutoTuneStatus(TestAutoTune):
    def run_program(self, enable_autotune):
102
        self.set_flags(enable_autotune)
103
        if enable_autotune:
104 105 106 107 108
            paddle.incubate.autotune.set_config(
                config={"kernel": {
                    "enable": True,
                    "tuning_range": [1, 2]
                }})
109
        else:
110 111 112 113
            paddle.incubate.autotune.set_config(
                config={"kernel": {
                    "enable": False
                }})
114 115 116 117
        x_var = paddle.uniform((1, 1, 8, 8), dtype='float32', min=-1., max=1.)
        net = SimpleNet()
        for i in range(3):
            train_dygraph(net, x_var)
118 119
            expected_res = self.get_expected_res(i, enable_autotune)
            self.check_status(expected_res)
120

J
Jiabin Yang 已提交
121
    def func_enable_autotune(self):
122 123
        self.run_program(enable_autotune=True)

J
Jiabin Yang 已提交
124 125 126 127 128 129
    def test_enable_autotune(self):
        with paddle.fluid.framework._test_eager_guard():
            self.func_enable_autotune()
        self.func_enable_autotune()

    def func_disable_autotune(self):
130 131
        self.run_program(enable_autotune=False)

J
Jiabin Yang 已提交
132 133 134 135 136
    def test_disable_autotune(self):
        with paddle.fluid.framework._test_eager_guard():
            self.func_disable_autotune()
        self.func_disable_autotune()

137 138 139 140 141 142

class TestStaticAutoTuneStatus(TestAutoTune):
    def run_program(self, enable_autotune):
        paddle.enable_static()

        data_shape = [1, 1, 8, 8]
143 144 145 146 147 148 149
        main_program = paddle.static.Program()
        startup_program = paddle.static.Program()
        with paddle.static.program_guard(main_program, startup_program):
            data = paddle.static.data(
                name='X', shape=data_shape, dtype='float32')
            net = SimpleNet()
            loss = static_program(net, data)
150 151 152
        place = paddle.CUDAPlace(0) if paddle.fluid.core.is_compiled_with_cuda(
        ) else paddle.CPUPlace()
        exe = paddle.static.Executor(place)
153 154 155 156 157
        exe.run(startup_program)
        x = np.random.random(size=data_shape).astype('float32')

        self.set_flags(enable_autotune)
        if enable_autotune:
158 159 160 161 162 163
            config = {"kernel": {"enable": True, "tuning_range": [1, 2]}}
            tfile = tempfile.NamedTemporaryFile(mode="w+", delete=False)
            json.dump(config, tfile)
            tfile.close()
            paddle.incubate.autotune.set_config(tfile.name)
            os.remove(tfile.name)
164
        else:
165 166 167 168 169
            paddle.incubate.autotune.set_config(
                config={"kernel": {
                    "enable": False,
                    "tuning_range": [1, 2]
                }})
170 171

        for i in range(3):
172
            exe.run(program=main_program, feed={'X': x}, fetch_list=[loss])
173
            status = paddle.fluid.core.autotune_status()
174 175
            expected_res = self.get_expected_res(i, enable_autotune)
            self.check_status(expected_res)
176 177
        paddle.disable_static()

J
Jiabin Yang 已提交
178
    def func_enable_autotune(self):
179 180
        self.run_program(enable_autotune=True)

J
Jiabin Yang 已提交
181 182 183 184
    def test_enable_autotune(self):
        self.func_enable_autotune()

    def func_disable_autotune(self):
185 186
        self.run_program(enable_autotune=False)

J
Jiabin Yang 已提交
187 188 189
    def test_disable_autotune(self):
        self.func_disable_autotune()

190

191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207
class TestAutoTuneAPI(unittest.TestCase):
    def test_set_config_warnings(self):
        with warnings.catch_warnings(record=True) as w:
            config = {"kernel": {"enable": 1, "tuning_range": 1}}
            tfile = tempfile.NamedTemporaryFile(mode="w+", delete=False)
            json.dump(config, tfile)
            tfile.close()
            paddle.incubate.autotune.set_config(tfile.name)
            os.remove(tfile.name)
            self.assertTrue(len(w) == 2)

    def test_set_config_attr(self):
        paddle.incubate.autotune.set_config(config=None)
        self.assertEqual(
            paddle.get_flags("FLAGS_use_autotune")["FLAGS_use_autotune"], True)


208 209
if __name__ == '__main__':
    unittest.main()