test_profiler.py 10.7 KB
Newer Older
1
#   Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
D
dzhwinter 已提交
2
#
D
dzhwinter 已提交
3 4 5
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
D
dzhwinter 已提交
6
#
D
dzhwinter 已提交
7
#     http://www.apache.org/licenses/LICENSE-2.0
D
dzhwinter 已提交
8
#
D
dzhwinter 已提交
9 10 11 12 13 14
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

15
import os
16
import tempfile
17 18
import unittest

19
import numpy as np
20

21
import paddle
22 23
import paddle.fluid as fluid
import paddle.fluid.core as core
24 25
import paddle.fluid.layers as layers
import paddle.fluid.profiler as profiler
26
import paddle.fluid.proto.profiler.profiler_pb2 as profiler_pb2
27
import paddle.utils as utils
K
kuizhiqing 已提交
28 29
from paddle.utils.flops import flops

D
dangqingqing 已提交
30

31
class TestProfiler(unittest.TestCase):
32 33 34 35
    @classmethod
    def setUpClass(cls):
        os.environ['CPU_NUM'] = str(4)

36
    def build_program(self, compile_program=True):
37 38 39 40
        startup_program = fluid.Program()
        main_program = fluid.Program()
        with fluid.program_guard(main_program, startup_program):
            image = fluid.layers.data(name='x', shape=[784], dtype='float32')
X
Xin Pan 已提交
41 42
            hidden1 = fluid.layers.fc(input=image, size=64, act='relu')
            i = layers.zeros(shape=[1], dtype='int64')
43 44 45
            counter = fluid.layers.zeros(
                shape=[1], dtype='int64', force_cpu=True
            )
X
Xin Pan 已提交
46 47
            until = layers.fill_constant([1], dtype='int64', value=10)
            data_arr = layers.array_write(hidden1, i)
L
LiYuRio 已提交
48
            cond = paddle.less_than(x=counter, y=until)
X
Xin Pan 已提交
49 50 51 52 53
            while_op = fluid.layers.While(cond=cond)
            with while_op.block():
                hidden_n = fluid.layers.fc(input=hidden1, size=64, act='relu')
                layers.array_write(hidden_n, i, data_arr)
                fluid.layers.increment(x=counter, value=1, in_place=True)
L
LiYuRio 已提交
54
                paddle.assign(paddle.less_than(x=counter, y=until), cond)
X
Xin Pan 已提交
55 56 57

            hidden_n = layers.array_read(data_arr, i)
            hidden2 = fluid.layers.fc(input=hidden_n, size=64, act='relu')
58 59 60
            predict = fluid.layers.fc(input=hidden2, size=10, act='softmax')
            label = fluid.layers.data(name='y', shape=[1], dtype='int64')
            cost = fluid.layers.cross_entropy(input=predict, label=label)
61
            avg_cost = paddle.mean(cost)
F
fengjiayi 已提交
62
            batch_size = fluid.layers.create_tensor(dtype='int64')
63
            batch_acc = paddle.static.accuracy(
64 65
                input=predict, label=label, total=batch_size
            )
66

67
        optimizer = fluid.optimizer.Momentum(learning_rate=0.001, momentum=0.9)
68 69
        opts = optimizer.minimize(avg_cost, startup_program=startup_program)

70
        if compile_program:
71 72 73 74
            # TODO(luotao): profiler tool may have bug with multi-thread parallel executor.
            # https://github.com/PaddlePaddle/Paddle/pull/25200#issuecomment-650483092
            exec_strategy = fluid.ExecutionStrategy()
            exec_strategy.num_threads = 1
75
            train_program = fluid.compiler.CompiledProgram(
76 77 78 79
                main_program
            ).with_data_parallel(
                loss_name=avg_cost.name, exec_strategy=exec_strategy
            )
80 81 82 83 84 85 86 87 88 89
        else:
            train_program = main_program
        return train_program, startup_program, avg_cost, batch_size, batch_acc

    def get_profile_path(self):
        profile_path = os.path.join(tempfile.gettempdir(), "profile")
        open(profile_path, "w").write("")
        return profile_path

    def check_profile_result(self, profile_path):
90
        data = open(profile_path, 'rb').read()
91
        if len(data) > 0:
92 93 94 95 96 97
            profile_pb = profiler_pb2.Profile()
            profile_pb.ParseFromString(data)
            self.assertGreater(len(profile_pb.events), 0)
            for event in profile_pb.events:
                if event.type == profiler_pb2.Event.GPUKernel:
                    if not event.detail_info and not event.name.startswith(
98 99
                        "MEM"
                    ):
100 101
                        raise Exception(
                            "Kernel %s missing event. Has this kernel been recorded by RecordEvent?"
102 103
                            % event.name
                        )
104
                elif event.type == profiler_pb2.Event.CPU and (
105 106 107
                    event.name.startswith("Driver API")
                    or event.name.startswith("Runtime API")
                ):
108
                    print("Warning: unregister", event.name)
109

T
Tao Luo 已提交
110
    def run_iter(self, exe, main_program, fetch_list):
111 112
        x = np.random.random((32, 784)).astype("float32")
        y = np.random.randint(0, 10, (32, 1)).astype("int64")
113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132
        outs = exe.run(
            main_program, feed={'x': x, 'y': y}, fetch_list=fetch_list
        )

    def net_profiler(
        self,
        exe,
        state,
        tracer_option,
        batch_range=None,
        use_parallel_executor=False,
        use_new_api=False,
    ):
        (
            main_program,
            startup_program,
            avg_cost,
            batch_size,
            batch_acc,
        ) = self.build_program(compile_program=use_parallel_executor)
133 134 135 136 137 138 139 140
        exe.run(startup_program)

        profile_path = self.get_profile_path()
        if not use_new_api:
            with profiler.profiler(state, 'total', profile_path, tracer_option):
                for iter in range(10):
                    if iter == 2:
                        profiler.reset_profiler()
141 142 143
                    self.run_iter(
                        exe, main_program, [avg_cost, batch_acc, batch_size]
                    )
144
        else:
145 146 147 148 149
            options = utils.ProfilerOptions(
                options={
                    'state': state,
                    'sorted_key': 'total',
                    'tracer_level': tracer_option,
150 151 152 153 154 155
                    'batch_range': [0, 10]
                    if batch_range is None
                    else batch_range,
                    'profile_path': profile_path,
                }
            )
156 157
            with utils.Profiler(enabled=True, options=options) as prof:
                for iter in range(10):
158 159 160
                    self.run_iter(
                        exe, main_program, [avg_cost, batch_acc, batch_size]
                    )
161 162 163
                    utils.get_profiler().record_step()
                    if batch_range is None and iter == 2:
                        utils.get_profiler().reset()
164 165 166
        # TODO(luotao): check why nccl kernel in profile result.
        # https://github.com/PaddlePaddle/Paddle/pull/25200#issuecomment-650483092
        # self.check_profile_result(profile_path)
167

D
dangqingqing 已提交
168
    def test_cpu_profiler(self):
169 170
        exe = fluid.Executor(fluid.CPUPlace())
        for use_new_api in [False, True]:
171 172 173 174 175 176 177 178 179 180 181
            self.net_profiler(
                exe,
                'CPU',
                "Default",
                batch_range=[5, 10],
                use_new_api=use_new_api,
            )

    @unittest.skipIf(
        not core.is_compiled_with_cuda(), "profiler is enabled only with GPU"
    )
D
dangqingqing 已提交
182
    def test_cuda_profiler(self):
183 184
        exe = fluid.Executor(fluid.CUDAPlace(0))
        for use_new_api in [False, True]:
185 186 187 188 189 190 191 192 193 194 195
            self.net_profiler(
                exe,
                'GPU',
                "OpDetail",
                batch_range=[0, 10],
                use_new_api=use_new_api,
            )

    @unittest.skipIf(
        not core.is_compiled_with_cuda(), "profiler is enabled only with GPU"
    )
196
    def test_all_profiler(self):
197 198
        exe = fluid.Executor(fluid.CUDAPlace(0))
        for use_new_api in [False, True]:
199 200 201 202 203 204 205
            self.net_profiler(
                exe,
                'All',
                "AllOpDetail",
                batch_range=None,
                use_new_api=use_new_api,
            )
206 207 208 209 210


class TestProfilerAPIError(unittest.TestCase):
    def test_errors(self):
        options = utils.ProfilerOptions()
211 212
        self.assertIsNone(options['profile_path'])
        self.assertIsNone(options['timeline_path'])
213 214 215 216 217 218 219 220 221 222 223 224

        options = options.with_state('All')
        self.assertTrue(options['state'] == 'All')
        try:
            print(options['test'])
        except ValueError:
            pass

        global_profiler = utils.get_profiler()
        with utils.Profiler(enabled=True) as prof:
            self.assertTrue(utils.get_profiler() == prof)
            self.assertTrue(global_profiler != prof)
225

D
dangqingqing 已提交
226

K
kuizhiqing 已提交
227 228
class TestFLOPSAPI(unittest.TestCase):
    def test_flops(self):
229 230
        self.assertTrue(flops('relu', {'X': [[12, 12]]}, {'output': 4}) == 144)
        self.assertTrue(flops('dropout', {}, {'output': 4}) == 0)
231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299
        self.assertTrue(
            flops(
                'transpose2',
                {
                    'X': [[12, 12, 12]],
                },
                {},
            )
            == 0
        )
        self.assertTrue(
            flops(
                'reshape2',
                {
                    'X': [[12, 12, 12]],
                },
                {},
            )
            == 0
        )
        self.assertTrue(
            flops(
                'unsqueeze2',
                {
                    'X': [[12, 12, 12]],
                },
                {},
            )
            == 0
        )
        self.assertTrue(
            flops(
                'layer_norm',
                {'Bias': [[128]], 'Scale': [[128]], 'X': [[32, 128, 28, 28]]},
                {'epsilon': 0.01},
            )
            == 32 * 128 * 28 * 28 * 8
        )
        self.assertTrue(
            flops(
                'elementwise_add', {'X': [[12, 12, 12]], 'Y': [[2, 2, 12]]}, {}
            )
            == 12 * 12 * 12
        )
        self.assertTrue(
            flops('gelu', {'X': [[12, 12, 12]]}, {}) == 5 * 12 * 12 * 12
        )
        self.assertTrue(
            flops(
                'matmul',
                {'X': [[3, 12, 12, 8]], 'Y': [[12, 12, 8]]},
                {'transpose_X': False, 'transpose_Y': True},
            )
            == 3 * 12 * 12 * 12 * 2 * 8
        )
        self.assertTrue(
            flops(
                'matmul_v2',
                {'X': [[3, 12, 12, 8]], 'Y': [[12, 12, 8]]},
                {'trans_x': False, 'trans_y': True},
            )
            == 3 * 12 * 12 * 12 * 2 * 8
        )
        self.assertTrue(
            flops('relu', {'X': [[12, 12, 12]]}, {}) == 12 * 12 * 12
        )
        self.assertTrue(
            flops('softmax', {'X': [[12, 12, 12]]}, {}) == 3 * 12 * 12 * 12
        )
300 301 302 303
        self.assertTrue(
            flops('c_embedding', {'Ids': [[12, 12]], 'W': [[12, 12, 3]]}, {})
            == 0
        )
K
kuizhiqing 已提交
304 305


306
if __name__ == '__main__':
307
    paddle.enable_static()
308
    unittest.main()