profiler.py 15.3 KB
Newer Older
1
#   Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
D
dzhwinter 已提交
2
#
D
dzhwinter 已提交
3 4 5
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
D
dzhwinter 已提交
6
#
D
dzhwinter 已提交
7
#     http://www.apache.org/licenses/LICENSE-2.0
D
dzhwinter 已提交
8
#
D
dzhwinter 已提交
9 10 11 12 13 14
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

15 16
from __future__ import print_function

17
from . import core
S
rename  
sneaxiy 已提交
18
from .wrapped_decorator import signature_safe_contextmanager
19
import os
M
minqiyang 已提交
20
import six
D
dangqingqing 已提交
21

X
Xin Pan 已提交
22 23 24 25
__all__ = [
    'cuda_profiler', 'reset_profiler', 'profiler', 'start_profiler',
    'stop_profiler'
]
D
dangqingqing 已提交
26

D
dangqingqing 已提交
27
NVPROF_CONFIG = [
28 29 30 31 32 33
    "gpustarttimestamp",
    "gpuendtimestamp",
    "gridsize3d",
    "threadblocksize",
    "streamid",
    "enableonstart 0",
D
dangqingqing 已提交
34
    "conckerneltrace",
35 36 37
]


S
rename  
sneaxiy 已提交
38
@signature_safe_contextmanager
D
dangqingqing 已提交
39
def cuda_profiler(output_file, output_mode=None, config=None):
T
Tao Luo 已提交
40
    """
41 42 43 44 45
    API cuda_profiler has been abandoned. If you have relevant requirements, you can use `paddle.utils.profiler.start_profiler` and `paddle.utils.profiler.stop_profiler`. 
    The relevant reference documents are as follows:
    <https://www.paddlepaddle.org.cn/documentation/docs/en/api/paddle/utils/profiler/start_profiler_en.html#start-profiler>
    <https://www.paddlepaddle.org.cn/documentation/docs/en/api/paddle/utils/profiler/stop_profiler_en.html#stop-profiler>
    <https://www.paddlepaddle.org.cn/documentation/docs/en/advanced_guide/performance_improving/analysis_tools/timeline_en.html>
D
dangqingqing 已提交
46
    """
47 48 49
    raise RuntimeError(
        "API cuda_profiler has been abandoned. If you have relevant requirements, you can use `paddle.utils.profiler.start_profiler` and `paddle.utils.profiler.stop_profiler`.\nThe relevant reference documents are as follows:\n<https://www.paddlepaddle.org.cn/documentation/docs/en/api/paddle/utils/profiler/start_profiler_en.html#start-profiler>\n<https://www.paddlepaddle.org.cn/documentation/docs/en/api/paddle/utils/profiler/stop_profiler_en.html#stop-profiler>\n<https://www.paddlepaddle.org.cn/documentation/docs/en/advanced_guide/performance_improving/analysis_tools/timeline_en.html>"
    )
50 51


52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110
@signature_safe_contextmanager
def npu_profiler(output_file, config=None):
    """
    The NPU profiler.
    
    This fuctions is used to profile NPU program by NPU runtime application
    programming interface. The profiling result will be written into
    `output_file`. The users can set set the NPU profiling config by `config` argument. 
    
    After getting the profiling result file, users can use 
    `tools provided by Ascend <https://support.huaweicloud.com/tg-Inference-cann/atlasprofiling_16_0006.html>`_ 
    to load this output file to visualize results.

    Args:
        output_file (str) : The output file name, the result will be
            written into this file. It should be absolute path. 
        config (list<str>, optional) : NPU profile config. For more details, please
            refer to `User Guide <https://support.huaweicloud.com/tg-Inference-cann/atlasprofiling_16_0006.html>`_ .

    Examples:

        .. code-block:: python

            import paddle.fluid as fluid
            import paddle.fluid.profiler as profiler
            import numpy as np

            epoc = 8
            dshape = [4, 3, 28, 28]
            data = fluid.data(name='data', shape=[None, 3, 28, 28], dtype='float32')
            conv = fluid.layers.conv2d(data, 20, 3, stride=[1, 1], padding=[1, 1])

            place = fluid.NPUPlace(0)
            exe = fluid.Executor(place)
            exe.run(fluid.default_startup_program())

            output_file = 'npu.txt'
            with profiler.npu_profiler(output_file) as npu_prof:
                for i in range(epoc):
                    input = np.random.random(dshape).astype('float32')
                    exe.run(fluid.default_main_program(), feed={'data': input})
            # then use  NPU profiler tools to load this output file
            # to visualize results.
    """
    # TODO: support config in python.
    if not config:
        config = core.npu_prof_create_config()

    core.npu_prof_init(output_file)
    # Enables profiler collection by the active NPU profiling tool.
    core.npu_prof_start(config)
    try:
        yield
    # Disables profiler collection.
    finally:
        core.npu_prof_stop(config)
        core.npu_prof_finalize()


111
def reset_profiler():
D
Dang Qingqing 已提交
112
    """
113
    Clear the previous time record. It works for
D
Dang Qingqing 已提交
114 115 116 117 118 119 120
    `fluid.profiler.start_profiler`, `fluid.profiler.stop_profiler`,
    and `fluid.profiler.profiler`.

    Examples:

        .. code-block:: python

121
            # required: gpu
122
            import paddle.fluid as fluid
D
Dang Qingqing 已提交
123
            import paddle.fluid.profiler as profiler
124
            with profiler.profiler('CPU', 'total', '/tmp/profile'):
D
Dang Qingqing 已提交
125 126 127 128
                for iter in range(10):
                    if iter == 2:
                        profiler.reset_profiler()
                    # ...
129
    """
130 131 132
    core.reset_profiler()


133
def start_profiler(state, tracer_option='Default'):
D
Dang Qingqing 已提交
134 135
    """
    Enable the profiler. Uers can use `fluid.profiler.start_profiler` and
T
Tao Luo 已提交
136 137
    `fluid.profiler.stop_profiler` to profile, which is equal to the usage 
    of `fluid.profiler.profiler` interface.
X
Xin Pan 已提交
138 139

    Args:
T
Tao Luo 已提交
140 141 142 143
        state (str) : The profiling state, which should be one of 'CPU', 'GPU'
            or 'All'. 'CPU' means only profiling CPU; 'GPU' means profiling
            both CPU and GPU; 'All' means profiling both CPU and GPU, and 
            generates timeline as well.
W
wangchaochaohu 已提交
144 145 146 147 148
        tracer_option (str, optional) : tracer_option can be one of ['Default', 'OpDetail', 'AllOpDetail'], it
            can control the profile level and print the different level profile result. `Default` option print 
            the different Op type profiling result and the `OpDetail` option print the detail profiling 
            result of different op types such as compute and data transform, `AllOpDetail` option 
            print the detail profiling result of different op name same as `OpDetail`.
D
Dang Qingqing 已提交
149 150

    Raises:
W
wangchaochaohu 已提交
151 152
        ValueError: If `state` is not in ['CPU', 'GPU', 'All'] or `tracer_option` 
            is not in ['Default', 'OpDetail', 'AllOpDetail'].
D
Dang Qingqing 已提交
153 154 155 156 157

    Examples:

        .. code-block:: python

158
            import paddle.fluid as fluid
D
Dang Qingqing 已提交
159 160 161 162 163 164 165 166
            import paddle.fluid.profiler as profiler

            profiler.start_profiler('GPU')
            for iter in range(10):
                if iter == 2:
                    profiler.reset_profiler()
                # except each iteration
            profiler.stop_profiler('total', '/tmp/profile')
167 168 169 170 171 172 173
            
            profiler.start_profiler('GPU', "OpDetail")
            for iter in range(10):
                if iter == 2:
                    profiler.reset_profiler()
                # except each iteration
            profiler.stop_profiler('total', '/tmp/profile')
X
Xin Pan 已提交
174 175 176
    """
    if core.is_profiler_enabled():
        return
X
Xin Pan 已提交
177 178 179 180 181 182 183 184
    if state not in ['CPU', 'GPU', "All"]:
        raise ValueError("The state must be 'CPU' or 'GPU' or 'All'.")
    if state == "GPU":
        prof_state = core.ProfilerState.kCUDA
    elif state == "CPU":
        prof_state = core.ProfilerState.kCPU
    else:
        prof_state = core.ProfilerState.kAll
185 186 187 188 189 190 191 192 193 194 195 196

    if tracer_option not in ['Default', 'OpDetail', 'AllOpDetail']:
        raise ValueError(
            "tracer option must be 'Default', 'OpDetail', 'AllOpDetail'.")
    if tracer_option == "Default":
        prof_tracer_option = core.TracerOption.kDefault
    elif tracer_option == "OpDetail":
        prof_tracer_option = core.TracerOption.kOpDetail
    else:
        prof_tracer_option = core.TracerOption.kAllOpDetail

    core.set_tracer_option(prof_tracer_option)
X
Xin Pan 已提交
197 198 199 200
    core.enable_profiler(prof_state)


def stop_profiler(sorted_key=None, profile_path='/tmp/profile'):
D
Dang Qingqing 已提交
201 202
    """
    Stop the profiler. Uers can use `fluid.profiler.start_profiler` and
T
Tao Luo 已提交
203 204
    `fluid.profiler.stop_profiler` to profile, which is equal to the usage 
    of `fluid.profiler.profiler` interface.
X
Xin Pan 已提交
205 206

    Args:
T
Tao Luo 已提交
207 208 209 210
        sorted_key (str, optional) : The order of profiling results, which 
            should be one of None, 'calls', 'total', 'max', 'min' or 'ave'.
            Default is None, means the profiling results will be printed
            in the order of first end time of events.
X
Xin Pan 已提交
211 212 213 214 215
            The `calls` means sorting by the number of calls.
            The `total` means sorting by the total execution time.
            The `max` means sorting by the maximum execution time.
            The `min` means sorting by the minimum execution time.
            The `ave` means sorting by the average execution time.
T
Tao Luo 已提交
216
            and write it into `profile_path`. The default profile_path is `/tmp/profile`. 
217
        profile_path (str, optional) : If state == 'All', it will generate timeline,
D
Dang Qingqing 已提交
218 219 220 221 222 223 224 225 226

    Raises:
        ValueError: If `sorted_key` is not in
            ['calls', 'total', 'max', 'min', 'ave'].

    Examples:

        .. code-block:: python

227
            import paddle.fluid as fluid
D
Dang Qingqing 已提交
228 229 230 231 232 233 234 235
            import paddle.fluid.profiler as profiler

            profiler.start_profiler('GPU')
            for iter in range(10):
                if iter == 2:
                    profiler.reset_profiler()
                # except each iteration
            profiler.stop_profiler('total', '/tmp/profile')
X
Xin Pan 已提交
236 237 238
    """
    if not core.is_profiler_enabled():
        return
X
Xin Pan 已提交
239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255
    sorted_key = 'default' if sorted_key is None else sorted_key
    if sorted_key not in ['default', 'calls', 'total', 'max', 'min', 'ave']:
        raise ValueError("The sorted_key must be None or in 'calls', 'total', "
                         "'max', 'min' and 'ave'")
    key_map = {
        'default': core.EventSortingKey.kDefault,
        'calls': core.EventSortingKey.kCalls,
        'total': core.EventSortingKey.kTotal,
        'max': core.EventSortingKey.kMax,
        'min': core.EventSortingKey.kMin,
        'ave': core.EventSortingKey.kAve,
    }
    # TODO(qingqing) : redirect C++ ostream to Python stream.
    # with core.ostream_redirect(stdout=True, stderr=True):
    core.disable_profiler(key_map[sorted_key], profile_path)


S
rename  
sneaxiy 已提交
256
@signature_safe_contextmanager
257 258 259 260
def profiler(state,
             sorted_key=None,
             profile_path='/tmp/profile',
             tracer_option='Default'):
T
Tao Luo 已提交
261
    """
262
    The profiler interface. This profiler can be used to profile both CPU and GPU program.
263 264

    Args:
T
Tao Luo 已提交
265 266 267 268 269 270 271 272
        state (str) : The profiling state, which should be one of 'CPU', 'GPU'
            or 'All'. 'CPU' means only profiling CPU; 'GPU' means profiling
            both CPU and GPU; 'All' means profiling both CPU and GPU, and 
            generates timeline as well.
        sorted_key (str, optional) : The order of profiling results, which 
            should be one of None, 'calls', 'total', 'max', 'min' or 'ave'.
            Default is None, means the profiling results will be printed
            in the order of first end time of events.
273
            The `calls` means sorting by the number of calls.
274 275 276 277
            The `total` means sorting by the total execution time.
            The `max` means sorting by the maximum execution time.
            The `min` means sorting by the minimum execution time.
            The `ave` means sorting by the average execution time.
T
Tao Luo 已提交
278 279
        profile_path (str, optional) : If state == 'All', it will generate timeline,
            and write it into `profile_path`. The default profile_path is `/tmp/profile`. 
W
wangchaochaohu 已提交
280 281 282 283 284
        tracer_option (str, optional) : tracer_option can be one of ['Default', 'OpDetail', 'AllOpDetail'], it
            can control the profile level and print the different level profile result. `Default` option print 
            the different Op type profiling result and the `OpDetail` option print the detail profiling 
            result of different op types such as compute and data transform, `AllOpDetail` option 
            print the detail profiling result of different op name same as `OpDetail`.
D
Dang Qingqing 已提交
285 286 287 288 289 290 291 292 293

    Raises:
        ValueError: If `state` is not in ['CPU', 'GPU', 'All']. If `sorted_key` is
            not in ['calls', 'total', 'max', 'min', 'ave'].

    Examples:

        .. code-block:: python

294
            # required: gpu
295
            import paddle.fluid as fluid
D
Dang Qingqing 已提交
296
            import paddle.fluid.profiler as profiler
297
            import numpy as np
298 299
            import paddle
            paddle.enable_static()
D
Dang Qingqing 已提交
300

301 302
            epoc = 8
            dshape = [4, 3, 28, 28]
T
Tao Luo 已提交
303
            data = fluid.data(name='data', shape=[None, 3, 28, 28], dtype='float32')
304 305 306 307 308 309
            conv = fluid.layers.conv2d(data, 20, 3, stride=[1, 1], padding=[1, 1])

            place = fluid.CPUPlace()
            exe = fluid.Executor(place)
            exe.run(fluid.default_startup_program())

310
            with profiler.profiler('CPU', 'total', '/tmp/profile', 'Default') as prof:
311 312 313
                for i in range(epoc):
                    input = np.random.random(dshape).astype('float32')
                    exe.run(fluid.default_main_program(), feed={'data': input})
T
Tao Luo 已提交
314 315 316 317 318 319 320

    Examples Results:

        .. code-block:: text

            #### Examples Results ####
            #### 1) sorted_key = 'total', 'calls', 'max', 'min', 'ave' ####
T
tianshuo78520a 已提交
321
            # The only difference in 5 sorted_key results is the following sentence: 
T
Tao Luo 已提交
322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351
            # "Sorted by number of xxx in descending order in the same thread."
            # The reason is that in this example, above 5 columns are already sorted.
            ------------------------->     Profiling Report     <-------------------------

            Place: CPU
            Time unit: ms
            Sorted by total time in descending order in the same thread
            #Sorted by number of calls in descending order in the same thread
            #Sorted by number of max in descending order in the same thread
            #Sorted by number of min in descending order in the same thread
            #Sorted by number of avg in descending order in the same thread

            Event                       Calls       Total       Min.        Max.        Ave.        Ratio.
            thread0::conv2d             8           129.406     0.304303    127.076     16.1758     0.983319
            thread0::elementwise_add    8           2.11865     0.193486    0.525592    0.264832    0.016099
            thread0::feed               8           0.076649    0.006834    0.024616    0.00958112  0.000582432

            #### 2) sorted_key = None  ####
            # Since the profiling results are printed in the order of first end time of Ops,
            # the printed order is feed->conv2d->elementwise_add 
            ------------------------->     Profiling Report     <-------------------------

            Place: CPU
            Time unit: ms
            Sorted by event first end time in descending order in the same thread

            Event                       Calls       Total       Min.        Max.        Ave.        Ratio.
            thread0::feed               8           0.077419    0.006608    0.023349    0.00967738  0.00775934
            thread0::conv2d             8           7.93456     0.291385    5.63342     0.99182     0.795243
            thread0::elementwise_add    8           1.96555     0.191884    0.518004    0.245693    0.196998
352
    """
353
    start_profiler(state, tracer_option)
354 355 356 357
    try:
        yield
    finally:
        stop_profiler(sorted_key, profile_path)