profiler.py 16.9 KB
Newer Older
1
#   Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
D
dzhwinter 已提交
2
#
D
dzhwinter 已提交
3 4 5
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
D
dzhwinter 已提交
6
#
D
dzhwinter 已提交
7
#     http://www.apache.org/licenses/LICENSE-2.0
D
dzhwinter 已提交
8
#
D
dzhwinter 已提交
9 10 11 12 13 14
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

15 16
from __future__ import print_function

17
from . import core
S
rename  
sneaxiy 已提交
18
from .wrapped_decorator import signature_safe_contextmanager
19
import os
M
minqiyang 已提交
20
import six
D
dangqingqing 已提交
21

X
Xin Pan 已提交
22 23 24 25
__all__ = [
    'cuda_profiler', 'reset_profiler', 'profiler', 'start_profiler',
    'stop_profiler'
]
D
dangqingqing 已提交
26

D
dangqingqing 已提交
27
NVPROF_CONFIG = [
28 29 30 31 32 33
    "gpustarttimestamp",
    "gpuendtimestamp",
    "gridsize3d",
    "threadblocksize",
    "streamid",
    "enableonstart 0",
D
dangqingqing 已提交
34
    "conckerneltrace",
35 36 37
]


S
rename  
sneaxiy 已提交
38
@signature_safe_contextmanager
D
dangqingqing 已提交
39
def cuda_profiler(output_file, output_mode=None, config=None):
T
Tao Luo 已提交
40 41 42
    """
    The CUDA profiler.
    
D
dangqingqing 已提交
43 44
    This fuctions is used to profile CUDA program by CUDA runtime application
    programming interface. The profiling result will be written into
T
Tao Luo 已提交
45 46 47 48 49 50
    `output_file`. The users can set the output mode by `output_mode` argument 
    and set the nvidia profiling config by `config` argument. 
    
    After getting the profiling result file, users can use 
    `NVIDIA Visual Profiler <https://developer.nvidia.com/nvidia-visual-profiler>`_ 
    to load this output file to visualize results.
D
dangqingqing 已提交
51 52

    Args:
T
Tao Luo 已提交
53
        output_file (str) : The output file name, the result will be
D
dangqingqing 已提交
54
            written into this file.
T
Tao Luo 已提交
55 56 57 58 59 60
        output_mode (str, optional) : The output mode has Key-Value pair format ('kvp') 
            and Comma separated values format ('csv', default).
        config (list<str>, optional) : Nvidia profile config. Default config is 
            ['gpustarttimestamp', 'gpuendtimestamp', 'gridsize3d', 'threadblocksize', 
            'streamid', 'enableonstart 0', 'conckerneltrace']. For more details, please
            refer to `Compute Command Line Profiler User Guide <https://developer.download.nvidia.cn/compute/DevZone/docs/html/C/doc/Compute_Command_Line_Profiler_User_Guide.pdf>`_ .
D
Dang Qingqing 已提交
61 62 63 64 65 66 67 68 69 70

    Raises:
        ValueError: If `output_mode` is not in ['kvp', 'csv'].

    Examples:

        .. code-block:: python

            import paddle.fluid as fluid
            import paddle.fluid.profiler as profiler
71
            import numpy as np
D
Dang Qingqing 已提交
72 73 74

            epoc = 8
            dshape = [4, 3, 28, 28]
T
Tao Luo 已提交
75
            data = fluid.data(name='data', shape=[None, 3, 28, 28], dtype='float32')
D
Dang Qingqing 已提交
76 77 78 79 80 81 82 83 84 85 86 87 88
            conv = fluid.layers.conv2d(data, 20, 3, stride=[1, 1], padding=[1, 1])

            place = fluid.CUDAPlace(0)
            exe = fluid.Executor(place)
            exe.run(fluid.default_startup_program())

            output_file = 'cuda_profiler.txt'
            with profiler.cuda_profiler(output_file, 'csv') as nvprof:
                for i in range(epoc):
                    input = np.random.random(dshape).astype('float32')
                    exe.run(fluid.default_main_program(), feed={'data': input})
            # then use  NVIDIA Visual Profiler (nvvp) to load this output file
            # to visualize results.
D
dangqingqing 已提交
89 90 91
    """
    if output_mode is None:
        output_mode = 'csv'
D
dangqingqing 已提交
92 93 94
    if output_mode not in ['kvp', 'csv']:
        raise ValueError("The output mode must be 'kvp' or 'csv'.")
    config = NVPROF_CONFIG if config is None else config
95 96
    config_file = 'nvprof_config_file'
    with open(config_file, 'wb') as fp:
M
minqiyang 已提交
97
        fp.writelines([six.b("%s\n" % item) for item in config])
98
    core.nvprof_init(output_file, output_mode, config_file)
D
dangqingqing 已提交
99
    # Enables profiler collection by the active CUDA profiling tool.
D
dangqingqing 已提交
100
    core.nvprof_start()
101 102
    try:
        yield
D
dangqingqing 已提交
103
    # Disables profiler collection.
104 105 106
    finally:
        core.nvprof_stop()
        os.remove(config_file)
107 108


L
Leo Chen 已提交
109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167
@signature_safe_contextmanager
def npu_profiler(output_file, config=None):
    """
    The NPU profiler.
    
    This fuctions is used to profile NPU program by NPU runtime application
    programming interface. The profiling result will be written into
    `output_file`. The users can set set the NPU profiling config by `config` argument. 
    
    After getting the profiling result file, users can use 
    `tools provided by Ascend <https://support.huaweicloud.com/tg-Inference-cann/atlasprofiling_16_0006.html>`_ 
    to load this output file to visualize results.

    Args:
        output_file (str) : The output file name, the result will be
            written into this file. It should be absolute path. 
        config (list<str>, optional) : NPU profile config. For more details, please
            refer to `User Guide <https://support.huaweicloud.com/tg-Inference-cann/atlasprofiling_16_0006.html>`_ .

    Examples:

        .. code-block:: python

            import paddle.fluid as fluid
            import paddle.fluid.profiler as profiler
            import numpy as np

            epoc = 8
            dshape = [4, 3, 28, 28]
            data = fluid.data(name='data', shape=[None, 3, 28, 28], dtype='float32')
            conv = fluid.layers.conv2d(data, 20, 3, stride=[1, 1], padding=[1, 1])

            place = fluid.NPUPlace(0)
            exe = fluid.Executor(place)
            exe.run(fluid.default_startup_program())

            output_file = 'npu.txt'
            with profiler.npu_profiler(output_file) as npu_prof:
                for i in range(epoc):
                    input = np.random.random(dshape).astype('float32')
                    exe.run(fluid.default_main_program(), feed={'data': input})
            # then use  NPU profiler tools to load this output file
            # to visualize results.
    """
    # TODO: support config in python.
    if not config:
        config = core.npu_prof_create_config()

    core.npu_prof_init(output_file)
    # Enables profiler collection by the active NPU profiling tool.
    core.npu_prof_start(config)
    try:
        yield
    # Disables profiler collection.
    finally:
        core.npu_prof_stop(config)
        core.npu_prof_finalize()


168
def reset_profiler():
D
Dang Qingqing 已提交
169 170 171 172 173 174 175 176 177 178
    """
    Clear the previous time record. This interface does not work for
    `fluid.profiler.cuda_profiler`, it only works for
    `fluid.profiler.start_profiler`, `fluid.profiler.stop_profiler`,
    and `fluid.profiler.profiler`.

    Examples:

        .. code-block:: python

179
            import paddle.fluid as fluid
D
Dang Qingqing 已提交
180
            import paddle.fluid.profiler as profiler
181
            with profiler.profiler('CPU', 'total', '/tmp/profile'):
D
Dang Qingqing 已提交
182 183 184 185
                for iter in range(10):
                    if iter == 2:
                        profiler.reset_profiler()
                    # ...
186
    """
187 188 189
    core.reset_profiler()


190
def start_profiler(state, tracer_option='Default'):
D
Dang Qingqing 已提交
191 192
    """
    Enable the profiler. Uers can use `fluid.profiler.start_profiler` and
T
Tao Luo 已提交
193 194
    `fluid.profiler.stop_profiler` to profile, which is equal to the usage 
    of `fluid.profiler.profiler` interface.
X
Xin Pan 已提交
195 196

    Args:
T
Tao Luo 已提交
197 198 199 200
        state (str) : The profiling state, which should be one of 'CPU', 'GPU'
            or 'All'. 'CPU' means only profiling CPU; 'GPU' means profiling
            both CPU and GPU; 'All' means profiling both CPU and GPU, and 
            generates timeline as well.
W
wangchaochaohu 已提交
201 202 203 204 205
        tracer_option (str, optional) : tracer_option can be one of ['Default', 'OpDetail', 'AllOpDetail'], it
            can control the profile level and print the different level profile result. `Default` option print 
            the different Op type profiling result and the `OpDetail` option print the detail profiling 
            result of different op types such as compute and data transform, `AllOpDetail` option 
            print the detail profiling result of different op name same as `OpDetail`.
D
Dang Qingqing 已提交
206 207

    Raises:
W
wangchaochaohu 已提交
208 209
        ValueError: If `state` is not in ['CPU', 'GPU', 'All'] or `tracer_option` 
            is not in ['Default', 'OpDetail', 'AllOpDetail'].
D
Dang Qingqing 已提交
210 211 212 213 214

    Examples:

        .. code-block:: python

215
            import paddle.fluid as fluid
D
Dang Qingqing 已提交
216 217 218 219 220 221 222 223
            import paddle.fluid.profiler as profiler

            profiler.start_profiler('GPU')
            for iter in range(10):
                if iter == 2:
                    profiler.reset_profiler()
                # except each iteration
            profiler.stop_profiler('total', '/tmp/profile')
224 225 226 227 228 229 230
            
            profiler.start_profiler('GPU', "OpDetail")
            for iter in range(10):
                if iter == 2:
                    profiler.reset_profiler()
                # except each iteration
            profiler.stop_profiler('total', '/tmp/profile')
X
Xin Pan 已提交
231 232 233
    """
    if core.is_profiler_enabled():
        return
X
Xin Pan 已提交
234 235 236 237 238 239 240 241
    if state not in ['CPU', 'GPU', "All"]:
        raise ValueError("The state must be 'CPU' or 'GPU' or 'All'.")
    if state == "GPU":
        prof_state = core.ProfilerState.kCUDA
    elif state == "CPU":
        prof_state = core.ProfilerState.kCPU
    else:
        prof_state = core.ProfilerState.kAll
242 243 244 245 246 247 248 249 250 251 252 253

    if tracer_option not in ['Default', 'OpDetail', 'AllOpDetail']:
        raise ValueError(
            "tracer option must be 'Default', 'OpDetail', 'AllOpDetail'.")
    if tracer_option == "Default":
        prof_tracer_option = core.TracerOption.kDefault
    elif tracer_option == "OpDetail":
        prof_tracer_option = core.TracerOption.kOpDetail
    else:
        prof_tracer_option = core.TracerOption.kAllOpDetail

    core.set_tracer_option(prof_tracer_option)
X
Xin Pan 已提交
254 255 256 257
    core.enable_profiler(prof_state)


def stop_profiler(sorted_key=None, profile_path='/tmp/profile'):
D
Dang Qingqing 已提交
258 259
    """
    Stop the profiler. Uers can use `fluid.profiler.start_profiler` and
T
Tao Luo 已提交
260 261
    `fluid.profiler.stop_profiler` to profile, which is equal to the usage 
    of `fluid.profiler.profiler` interface.
X
Xin Pan 已提交
262 263

    Args:
T
Tao Luo 已提交
264 265 266 267
        sorted_key (str, optional) : The order of profiling results, which 
            should be one of None, 'calls', 'total', 'max', 'min' or 'ave'.
            Default is None, means the profiling results will be printed
            in the order of first end time of events.
X
Xin Pan 已提交
268 269 270 271 272
            The `calls` means sorting by the number of calls.
            The `total` means sorting by the total execution time.
            The `max` means sorting by the maximum execution time.
            The `min` means sorting by the minimum execution time.
            The `ave` means sorting by the average execution time.
T
Tao Luo 已提交
273
            and write it into `profile_path`. The default profile_path is `/tmp/profile`. 
274
        profile_path (str, optional) : If state == 'All', it will generate timeline,
D
Dang Qingqing 已提交
275 276 277 278 279 280 281 282 283

    Raises:
        ValueError: If `sorted_key` is not in
            ['calls', 'total', 'max', 'min', 'ave'].

    Examples:

        .. code-block:: python

284
            import paddle.fluid as fluid
D
Dang Qingqing 已提交
285 286 287 288 289 290 291 292
            import paddle.fluid.profiler as profiler

            profiler.start_profiler('GPU')
            for iter in range(10):
                if iter == 2:
                    profiler.reset_profiler()
                # except each iteration
            profiler.stop_profiler('total', '/tmp/profile')
X
Xin Pan 已提交
293 294 295
    """
    if not core.is_profiler_enabled():
        return
X
Xin Pan 已提交
296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312
    sorted_key = 'default' if sorted_key is None else sorted_key
    if sorted_key not in ['default', 'calls', 'total', 'max', 'min', 'ave']:
        raise ValueError("The sorted_key must be None or in 'calls', 'total', "
                         "'max', 'min' and 'ave'")
    key_map = {
        'default': core.EventSortingKey.kDefault,
        'calls': core.EventSortingKey.kCalls,
        'total': core.EventSortingKey.kTotal,
        'max': core.EventSortingKey.kMax,
        'min': core.EventSortingKey.kMin,
        'ave': core.EventSortingKey.kAve,
    }
    # TODO(qingqing) : redirect C++ ostream to Python stream.
    # with core.ostream_redirect(stdout=True, stderr=True):
    core.disable_profiler(key_map[sorted_key], profile_path)


S
rename  
sneaxiy 已提交
313
@signature_safe_contextmanager
314 315 316 317
def profiler(state,
             sorted_key=None,
             profile_path='/tmp/profile',
             tracer_option='Default'):
T
Tao Luo 已提交
318 319 320
    """
    The profiler interface. Different from `fluid.profiler.cuda_profiler`, 
    this profiler can be used to profile both CPU and GPU program.
321 322

    Args:
T
Tao Luo 已提交
323 324 325 326 327 328 329 330
        state (str) : The profiling state, which should be one of 'CPU', 'GPU'
            or 'All'. 'CPU' means only profiling CPU; 'GPU' means profiling
            both CPU and GPU; 'All' means profiling both CPU and GPU, and 
            generates timeline as well.
        sorted_key (str, optional) : The order of profiling results, which 
            should be one of None, 'calls', 'total', 'max', 'min' or 'ave'.
            Default is None, means the profiling results will be printed
            in the order of first end time of events.
331
            The `calls` means sorting by the number of calls.
332 333 334 335
            The `total` means sorting by the total execution time.
            The `max` means sorting by the maximum execution time.
            The `min` means sorting by the minimum execution time.
            The `ave` means sorting by the average execution time.
T
Tao Luo 已提交
336 337
        profile_path (str, optional) : If state == 'All', it will generate timeline,
            and write it into `profile_path`. The default profile_path is `/tmp/profile`. 
W
wangchaochaohu 已提交
338 339 340 341 342
        tracer_option (str, optional) : tracer_option can be one of ['Default', 'OpDetail', 'AllOpDetail'], it
            can control the profile level and print the different level profile result. `Default` option print 
            the different Op type profiling result and the `OpDetail` option print the detail profiling 
            result of different op types such as compute and data transform, `AllOpDetail` option 
            print the detail profiling result of different op name same as `OpDetail`.
D
Dang Qingqing 已提交
343 344 345 346 347 348 349 350 351

    Raises:
        ValueError: If `state` is not in ['CPU', 'GPU', 'All']. If `sorted_key` is
            not in ['calls', 'total', 'max', 'min', 'ave'].

    Examples:

        .. code-block:: python

352
            import paddle.fluid as fluid
D
Dang Qingqing 已提交
353
            import paddle.fluid.profiler as profiler
354
            import numpy as np
D
Dang Qingqing 已提交
355

356 357
            epoc = 8
            dshape = [4, 3, 28, 28]
T
Tao Luo 已提交
358
            data = fluid.data(name='data', shape=[None, 3, 28, 28], dtype='float32')
359 360 361 362 363 364
            conv = fluid.layers.conv2d(data, 20, 3, stride=[1, 1], padding=[1, 1])

            place = fluid.CPUPlace()
            exe = fluid.Executor(place)
            exe.run(fluid.default_startup_program())

365
            with profiler.profiler('CPU', 'total', '/tmp/profile', 'Default') as prof:
366 367 368
                for i in range(epoc):
                    input = np.random.random(dshape).astype('float32')
                    exe.run(fluid.default_main_program(), feed={'data': input})
T
Tao Luo 已提交
369 370 371 372 373 374 375

    Examples Results:

        .. code-block:: text

            #### Examples Results ####
            #### 1) sorted_key = 'total', 'calls', 'max', 'min', 'ave' ####
T
tianshuo78520a 已提交
376
            # The only difference in 5 sorted_key results is the following sentence: 
T
Tao Luo 已提交
377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406
            # "Sorted by number of xxx in descending order in the same thread."
            # The reason is that in this example, above 5 columns are already sorted.
            ------------------------->     Profiling Report     <-------------------------

            Place: CPU
            Time unit: ms
            Sorted by total time in descending order in the same thread
            #Sorted by number of calls in descending order in the same thread
            #Sorted by number of max in descending order in the same thread
            #Sorted by number of min in descending order in the same thread
            #Sorted by number of avg in descending order in the same thread

            Event                       Calls       Total       Min.        Max.        Ave.        Ratio.
            thread0::conv2d             8           129.406     0.304303    127.076     16.1758     0.983319
            thread0::elementwise_add    8           2.11865     0.193486    0.525592    0.264832    0.016099
            thread0::feed               8           0.076649    0.006834    0.024616    0.00958112  0.000582432

            #### 2) sorted_key = None  ####
            # Since the profiling results are printed in the order of first end time of Ops,
            # the printed order is feed->conv2d->elementwise_add 
            ------------------------->     Profiling Report     <-------------------------

            Place: CPU
            Time unit: ms
            Sorted by event first end time in descending order in the same thread

            Event                       Calls       Total       Min.        Max.        Ave.        Ratio.
            thread0::feed               8           0.077419    0.006608    0.023349    0.00967738  0.00775934
            thread0::conv2d             8           7.93456     0.291385    5.63342     0.99182     0.795243
            thread0::elementwise_add    8           1.96555     0.191884    0.518004    0.245693    0.196998
407
    """
408
    start_profiler(state, tracer_option)
409 410 411 412
    try:
        yield
    finally:
        stop_profiler(sorted_key, profile_path)