test_switch_autotune.py 5.5 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16
# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
# 
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# 
#     http://www.apache.org/licenses/LICENSE-2.0
# 
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

import paddle
import unittest
17
import numpy as np
18 19 20 21 22 23 24 25 26 27 28 29


class SimpleNet(paddle.nn.Layer):
    def __init__(self):
        super(SimpleNet, self).__init__()
        self.conv = paddle.nn.Conv2D(1, 2, (3, 3))

    def forward(self, image, label=None):
        return self.conv(image)


def train_dygraph(net, data):
30
    data.stop_gradient = False
31 32 33 34 35 36 37 38 39
    out = net(data)
    loss = paddle.mean(out)
    adam = paddle.optimizer.Adam(parameters=net.parameters())
    out.backward()
    adam.step()
    adam.clear_grad()


def static_program(net, data):
40
    data.stop_gradient = False
41 42 43 44 45 46 47 48
    out = net(data)
    loss = paddle.mean(out)
    adam = paddle.optimizer.Adam()
    adam.minimize(loss)
    return loss


class TestAutoTune(unittest.TestCase):
49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74
    def set_flags(self, enable_autotune):
        if paddle.is_compiled_with_cuda():
            if enable_autotune:
                paddle.set_flags({'FLAGS_conv_workspace_size_limit': -1})
            else:
                paddle.set_flags({'FLAGS_conv_workspace_size_limit': 512})

    def get_flags(self, name):
        res = paddle.get_flags(name)
        return res[name]

    def get_expected_res(self, step_id, enable_autotune):
        expected_res = {
            "step_id": step_id,
            "cache_size": 0,
            "cache_hit_rate": 0
        }
        if paddle.is_compiled_with_cuda():
            # Total 3 * num_iters cache accesses, only iter 2 hits the cache.
            if enable_autotune and step_id >= 1:
                expected_res["cache_size"] = 3
            if enable_autotune and step_id == 2:
                expected_res["cache_hit_rate"] = np.round(
                    float(3) / float(9), 5)
        return expected_res

75 76
    def test_autotune(self):
        paddle.fluid.core.disable_autotune()
77
        self.assertEqual(self.get_flags("FLAGS_use_autotune"), False)
78 79

        paddle.fluid.core.enable_autotune()
80
        self.assertEqual(self.get_flags("FLAGS_use_autotune"), True)
81 82 83 84

    def check_status(self, expected_res):
        status = paddle.fluid.core.autotune_status()
        for key in status.keys():
85 86 87 88 89
            if key == "cache_hit_rate":
                v = np.round(status[key], 5)
            else:
                v = status[key]
            self.assertEqual(v, expected_res[key])
90 91 92 93


class TestDygraphAutoTuneStatus(TestAutoTune):
    def run_program(self, enable_autotune):
94
        self.set_flags(enable_autotune)
95 96 97 98
        if enable_autotune:
            paddle.fluid.core.enable_autotune()
        else:
            paddle.fluid.core.disable_autotune()
99
        paddle.fluid.core.set_autotune_range(1, 2)
100 101 102 103
        x_var = paddle.uniform((1, 1, 8, 8), dtype='float32', min=-1., max=1.)
        net = SimpleNet()
        for i in range(3):
            train_dygraph(net, x_var)
104 105
            expected_res = self.get_expected_res(i, enable_autotune)
            self.check_status(expected_res)
106

J
Jiabin Yang 已提交
107
    def func_enable_autotune(self):
108 109
        self.run_program(enable_autotune=True)

J
Jiabin Yang 已提交
110 111 112 113 114 115
    def test_enable_autotune(self):
        with paddle.fluid.framework._test_eager_guard():
            self.func_enable_autotune()
        self.func_enable_autotune()

    def func_disable_autotune(self):
116 117
        self.run_program(enable_autotune=False)

J
Jiabin Yang 已提交
118 119 120 121 122
    def test_disable_autotune(self):
        with paddle.fluid.framework._test_eager_guard():
            self.func_disable_autotune()
        self.func_disable_autotune()

123 124 125 126 127 128

class TestStaticAutoTuneStatus(TestAutoTune):
    def run_program(self, enable_autotune):
        paddle.enable_static()

        data_shape = [1, 1, 8, 8]
129 130 131 132 133 134 135
        main_program = paddle.static.Program()
        startup_program = paddle.static.Program()
        with paddle.static.program_guard(main_program, startup_program):
            data = paddle.static.data(
                name='X', shape=data_shape, dtype='float32')
            net = SimpleNet()
            loss = static_program(net, data)
136 137 138
        place = paddle.CUDAPlace(0) if paddle.fluid.core.is_compiled_with_cuda(
        ) else paddle.CPUPlace()
        exe = paddle.static.Executor(place)
139 140 141 142 143 144 145 146 147
        exe.run(startup_program)
        x = np.random.random(size=data_shape).astype('float32')

        self.set_flags(enable_autotune)
        if enable_autotune:
            paddle.fluid.core.enable_autotune()
        else:
            paddle.fluid.core.disable_autotune()
        paddle.fluid.core.set_autotune_range(1, 2)
148 149

        for i in range(3):
150
            exe.run(program=main_program, feed={'X': x}, fetch_list=[loss])
151
            status = paddle.fluid.core.autotune_status()
152 153
            expected_res = self.get_expected_res(i, enable_autotune)
            self.check_status(expected_res)
154 155
        paddle.disable_static()

J
Jiabin Yang 已提交
156
    def func_enable_autotune(self):
157 158
        self.run_program(enable_autotune=True)

J
Jiabin Yang 已提交
159 160 161 162
    def test_enable_autotune(self):
        self.func_enable_autotune()

    def func_disable_autotune(self):
163 164
        self.run_program(enable_autotune=False)

J
Jiabin Yang 已提交
165 166 167
    def test_disable_autotune(self):
        self.func_disable_autotune()

168 169 170

if __name__ == '__main__':
    unittest.main()