__init__.py 15.6 KB
Newer Older
1
# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
2
#
3 4 5
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
6
#
7
#     http://www.apache.org/licenses/LICENSE-2.0
8
#
9 10 11 12 13 14
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

15
# TODO: define the functions to manipulate devices
16
import re
T
taixiurong 已提交
17
import os
18 19
from paddle.fluid import core
from paddle.fluid import framework
20
from paddle.fluid.dygraph.parallel import ParallelEnv
21
from paddle.fluid.framework import is_compiled_with_cinn  # noqa: F401
22 23
from paddle.fluid.framework import is_compiled_with_cuda  # noqa: F401
from paddle.fluid.framework import is_compiled_with_rocm  # noqa: F401
24
from . import cuda
25

26
__all__ = [  # noqa
27
    'get_cudnn_version',
28
    'set_device',
29 30
    'get_device',
    'XPUPlace',
J
jianghaicheng 已提交
31
    'IPUPlace',
32
    'MLUPlace',
W
Wenyu 已提交
33
    'is_compiled_with_xpu',
J
jianghaicheng 已提交
34
    'is_compiled_with_ipu',
35
    'is_compiled_with_cinn',
36
    'is_compiled_with_cuda',
37
    'is_compiled_with_rocm',
38
    'is_compiled_with_npu',
39 40 41 42 43
    'is_compiled_with_mlu',
    'get_all_device_type',
    'get_all_custom_device_type',
    'get_available_device',
    'get_available_custom_device',
44 45
]

46 47 48
_cudnn_version = None


49 50
# TODO: WITH_ASCEND_CL may changed to WITH_NPU or others in the future
# for consistent.
51 52
def is_compiled_with_npu():
    """
53
    Whether paddle was built with WITH_ASCEND_CL=ON to support Ascend NPU.
54 55 56 57 58 59 60

    Returns (bool): `True` if NPU is supported, otherwise `False`.

    Examples:
        .. code-block:: python

            import paddle
61
            support_npu = paddle.device.is_compiled_with_npu()
62 63 64 65
    """
    return core.is_compiled_with_npu()


J
jianghaicheng 已提交
66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95
def is_compiled_with_ipu():
    """
    Whether paddle was built with WITH_IPU=ON to support Graphcore IPU.

    Returns (bool): `True` if IPU is supported, otherwise `False`.

    Examples:
        .. code-block:: python

            import paddle
            support_ipu = paddle.is_compiled_with_ipu()
    """
    return core.is_compiled_with_ipu()


def IPUPlace():
    """
    Return a Graphcore IPU Place

    Examples:
        .. code-block:: python

            # required: ipu

            import paddle
            place = paddle.device.IPUPlace()
    """
    return core.IPUPlace()


96 97 98 99 100 101 102 103 104 105
def is_compiled_with_xpu():
    """
    Whether paddle was built with WITH_XPU=ON to support Baidu Kunlun

    Returns (bool): whether paddle was built with WITH_XPU=ON

    Examples:
        .. code-block:: python

            import paddle
106
            support_xpu = paddle.device.is_compiled_with_xpu()
107 108 109 110 111 112 113 114 115 116 117 118 119
    """
    return core.is_compiled_with_xpu()


def XPUPlace(dev_id):
    """
    Return a Baidu Kunlun Place

    Parameters:
        dev_id(int): Baidu Kunlun device id

    Examples:
        .. code-block:: python
120

121
            # required: xpu
122

123
            import paddle
124
            place = paddle.device.XPUPlace(0)
125 126 127 128
    """
    return core.XPUPlace(dev_id)


129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163
def is_compiled_with_mlu():
    """
    Whether paddle was built with WITH_MLU=ON to support Cambricon MLU

    Returns (bool): whether paddle was built with WITH_MLU=ON

    Examples:
        .. code-block:: python

            # required: mlu

            import paddle
            support_mlu = paddle.device.is_compiled_with_mlu()
    """
    return core.is_compiled_with_mlu()


def MLUPlace(dev_id):
    """
    Return a Cambricon MLU Place

    Parameters:
        dev_id(int): MLU device id

    Examples:
        .. code-block:: python

            # required: mlu

            import paddle
            place = paddle.device.MLUPlace(0)
    """
    return core.MLUPlace(dev_id)


164 165
def get_cudnn_version():
    """
166
    This funciton return the version of cudnn. the retuen value is int which represents the
167
    cudnn version. For example, if it return 7600, it represents the version of cudnn is 7.6.
168

169 170 171 172 173
    Returns:
        int: A int value which represents the cudnn version. If cudnn version is not installed, it return None.

    Examples:
        .. code-block:: python
174

175 176
            import paddle

177
            cudnn_version = paddle.device.get_cudnn_version()
178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194



    """
    global _cudnn_version
    if not core.is_compiled_with_cuda():
        return None
    if _cudnn_version is None:
        cudnn_version = int(core.cudnn_version())
        _cudnn_version = cudnn_version
        if _cudnn_version < 0:
            return None
        else:
            return cudnn_version
    else:
        return _cudnn_version

195

C
chentianyu03 已提交
196
def _convert_to_place(device):
197
    lower_device = device.lower()
S
shentanyue 已提交
198 199 200 201 202 203 204
    if device in core.get_all_custom_device_type():
        selected_devices = os.getenv(
            "FLAGS_selected_{}s".format(device), "0"
        ).split(",")
        device_id = int(selected_devices[0])
        place = core.CustomPlace(device, device_id)
    elif lower_device == 'cpu':
205
        place = core.CPUPlace()
206 207
    elif lower_device == 'gpu':
        if not core.is_compiled_with_cuda():
208 209 210 211
            raise ValueError(
                "The device should not be 'gpu', "
                "since PaddlePaddle is not compiled with CUDA"
            )
212
        place = core.CUDAPlace(ParallelEnv().dev_id)
213 214
    elif lower_device == 'xpu':
        if not core.is_compiled_with_xpu():
215 216 217 218
            raise ValueError(
                "The device should not be 'xpu', "
                "since PaddlePaddle is not compiled with XPU"
            )
T
taixiurong 已提交
219 220 221
        selected_xpus = os.getenv("FLAGS_selected_xpus", "0").split(",")
        device_id = int(selected_xpus[0])
        place = core.XPUPlace(device_id)
H
houj04 已提交
222 223
    elif lower_device == 'npu':
        if not core.is_compiled_with_npu():
224 225 226 227
            raise ValueError(
                "The device should not be 'npu', "
                "since PaddlePaddle is not compiled with NPU"
            )
H
houj04 已提交
228 229 230
        selected_npus = os.getenv("FLAGS_selected_npus", "0").split(",")
        device_id = int(selected_npus[0])
        place = core.NPUPlace(device_id)
J
jianghaicheng 已提交
231 232 233
    elif lower_device == 'ipu':
        if not core.is_compiled_with_ipu():
            raise ValueError(
234 235 236
                "The device should not be 'ipu', "
                "since PaddlePaddle is not compiled with IPU"
            )
J
jianghaicheng 已提交
237
        place = core.IPUPlace()
238 239
    elif lower_device == 'mlu':
        if not core.is_compiled_with_mlu():
240 241 242 243
            raise ValueError(
                "The device should not be 'mlu', "
                "since PaddlePaddle is not compiled with MLU"
            )
244 245 246
        selected_mlus = os.getenv("FLAGS_selected_mlus", "0").split(",")
        device_id = int(selected_mlus[0])
        place = core.MLUPlace(device_id)
247
    else:
248 249
        avaliable_gpu_device = re.match(r'gpu:\d+', lower_device)
        avaliable_xpu_device = re.match(r'xpu:\d+', lower_device)
H
houj04 已提交
250
        avaliable_npu_device = re.match(r'npu:\d+', lower_device)
251
        avaliable_mlu_device = re.match(r'mlu:\d+', lower_device)
252 253 254
        if avaliable_gpu_device:
            if not core.is_compiled_with_cuda():
                raise ValueError(
255
                    "The device should not be {}, since PaddlePaddle is "
256 257
                    "not compiled with CUDA".format(avaliable_gpu_device)
                )
258 259 260 261 262 263 264
            device_info_list = device.split(':', 1)
            device_id = device_info_list[1]
            device_id = int(device_id)
            place = core.CUDAPlace(device_id)
        if avaliable_xpu_device:
            if not core.is_compiled_with_xpu():
                raise ValueError(
265
                    "The device should not be {}, since PaddlePaddle is "
266 267
                    "not compiled with XPU".format(avaliable_xpu_device)
                )
268 269 270 271
            device_info_list = device.split(':', 1)
            device_id = device_info_list[1]
            device_id = int(device_id)
            place = core.XPUPlace(device_id)
H
houj04 已提交
272 273
        if avaliable_npu_device:
            if not core.is_compiled_with_npu():
S
shentanyue 已提交
274 275 276 277 278 279 280 281 282 283 284 285 286 287
                device_info_list = device.split(':', 1)
                device_type = device_info_list[0]
                if device_type in core.get_all_custom_device_type():
                    device_id = device_info_list[1]
                    device_id = int(device_id)
                    place = core.CustomPlace(device_type, device_id)
                    return place
                else:
                    raise ValueError(
                        "The device should not be {}, since PaddlePaddle is "
                        "not compiled with NPU or compiled with custom device".format(
                            avaliable_npu_device
                        )
                    )
H
houj04 已提交
288 289 290 291
            device_info_list = device.split(':', 1)
            device_id = device_info_list[1]
            device_id = int(device_id)
            place = core.NPUPlace(device_id)
292 293 294 295
        if avaliable_mlu_device:
            if not core.is_compiled_with_mlu():
                raise ValueError(
                    "The device should not be {}, since PaddlePaddle is "
296 297
                    "not compiled with mlu".format(avaliable_mlu_device)
                )
298 299 300 301
            device_info_list = device.split(':', 1)
            device_id = device_info_list[1]
            device_id = int(device_id)
            place = core.MLUPlace(device_id)
S
shentanyue 已提交
302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323
        if (
            not avaliable_gpu_device
            and not avaliable_xpu_device
            and not avaliable_npu_device
            and not avaliable_mlu_device
        ):
            device_info_list = device.split(':', 1)
            device_type = device_info_list[0]
            if device_type in core.get_all_custom_device_type():
                device_id = device_info_list[1]
                device_id = int(device_id)
                place = core.CustomPlace(device_type, device_id)
            else:
                raise ValueError(
                    "The device must be a string which is like 'cpu', {}".format(
                        ', '.join(
                            "'{}', '{}:x'".format(x, x)
                            for x in ['gpu', 'xpu', 'npu', 'mlu']
                            + core.get_all_custom_device_type()
                        )
                    )
                )
C
chentianyu03 已提交
324
    return place
325

C
chentianyu03 已提交
326 327 328

def set_device(device):
    """
329
    Paddle supports running calculations on various types of devices, including CPU, GPU, XPU, NPU, MLU and IPU.
C
chentianyu03 已提交
330 331 332 333 334
    They are represented by string identifiers. This function can specify the global device
    which the OP will run.

    Parameters:
        device(str): This parameter determines the specific running device.
335 336
            It can be ``cpu``, ``gpu``, ``xpu``, ``npu``, ``mlu``, ``gpu:x``, ``xpu:x``, ``npu:x``, ``mlu:x`` and ``ipu``,
            where ``x`` is the index of the GPUs, XPUs, NPUs or MLUs.
C
chentianyu03 已提交
337 338 339 340

    Examples:

     .. code-block:: python
341

C
chentianyu03 已提交
342 343
        import paddle

344
        paddle.device.set_device("cpu")
C
chentianyu03 已提交
345 346 347 348 349
        x1 = paddle.ones(name='x1', shape=[1, 2], dtype='int32')
        x2 = paddle.zeros(name='x2', shape=[1, 2], dtype='int32')
        data = paddle.stack([x1,x2], axis=1)
    """
    place = _convert_to_place(device)
350 351
    framework._set_expected_place(place)
    return place
352 353 354 355 356


def get_device():
    """
    This funciton can get the current global device of the program is running.
357
    It's a string which is like 'cpu', 'gpu:x', 'xpu:x', 'mlu:x' and 'npu:x'. if the global device is not
358
    set, it will return a string which is 'gpu:x' when cuda is avaliable or it
359 360 361 362 363
    will return a string which is 'cpu' when cuda is not avaliable.

    Examples:

     .. code-block:: python
364

365
        import paddle
366
        device = paddle.device.get_device()
367 368 369 370 371 372 373 374 375

    """
    device = ''
    place = framework._current_expected_place()
    if isinstance(place, core.CPUPlace):
        device = 'cpu'
    elif isinstance(place, core.CUDAPlace):
        device_id = place.get_device_id()
        device = 'gpu:' + str(device_id)
376 377 378
    elif isinstance(place, core.XPUPlace):
        device_id = place.get_device_id()
        device = 'xpu:' + str(device_id)
H
houj04 已提交
379 380 381
    elif isinstance(place, core.NPUPlace):
        device_id = place.get_device_id()
        device = 'npu:' + str(device_id)
J
jianghaicheng 已提交
382 383 384
    elif isinstance(place, core.IPUPlace):
        num_devices = core.get_ipu_device_count()
        device = "ipus:{{0-{}}}".format(num_devices - 1)
385 386 387
    elif isinstance(place, core.MLUPlace):
        device_id = place.get_device_id()
        device = 'mlu:' + str(device_id)
388 389 390 391
    elif isinstance(place, core.CustomPlace):
        device_id = place.get_device_id()
        device_type = place.get_device_type()
        device = device_type + ':' + str(device_id)
J
jianghaicheng 已提交
392 393
    else:
        raise ValueError("The device specification {} is invalid".format(place))
394 395

    return device
396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429


def get_all_device_type():
    """
    Get all available device types.

    Returns:
        A list of all available device types.

    Examples:
        .. code-block:: python

            import paddle
            paddle.device.get_all_device_type()

            # Case 1: paddlepaddle-cpu package installed, and no custom device registerd.
            # Output: ['cpu']

            # Case 2: paddlepaddle-gpu package installed, and no custom device registerd.
            # Output: ['cpu', 'gpu']

            # Case 3: paddlepaddle-cpu package installed, and custom deivce 'CustomCPU' is registerd.
            # Output: ['cpu', 'CustomCPU']

            # Case 4: paddlepaddle-gpu package installed, and custom deivce 'CustomCPU' and 'CustomGPU' is registerd.
            # Output: ['cpu', 'gpu', 'CustomCPU', 'CustomGPU']
    """
    return core.get_all_device_type()


def get_all_custom_device_type():
    """
    Get all available custom device types.

430
    Returns:
431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495
        A list of all available custom device types.

    Examples:
        .. code-block:: python

            import paddle
            paddle.device.get_all_custom_device_type()

            # Case 1: paddlepaddle-gpu package installed, and no custom device registerd.
            # Output: None

            # Case 2: paddlepaddle-gpu package installed, and custom deivce 'CustomCPU' and 'CustomGPU' is registerd.
            # Output: ['CustomCPU', 'CustomGPU']
    """
    return core.get_all_custom_device_type()


def get_available_device():
    """
    Get all available devices.

    Returns:
        A list of all available devices.

    Examples:
        .. code-block:: python

            import paddle
            paddle.device.get_available_device()

            # Case 1: paddlepaddle-cpu package installed, and no custom device registerd.
            # Output: ['cpu']

            # Case 2: paddlepaddle-gpu package installed, and no custom device registerd.
            # Output: ['cpu', 'gpu:0', 'gpu:1']

            # Case 3: paddlepaddle-cpu package installed, and custom deivce 'CustomCPU' is registerd.
            # Output: ['cpu', 'CustomCPU']

            # Case 4: paddlepaddle-gpu package installed, and custom deivce 'CustomCPU' and 'CustomGPU' is registerd.
            # Output: ['cpu', 'gpu:0', 'gpu:1', 'CustomCPU', 'CustomGPU:0', 'CustomGPU:1']
    """
    return core.get_available_device()


def get_available_custom_device():
    """
    Get all available custom devices.

    Returns:
       A list of all available custom devices.

    Examples:
        .. code-block:: python

            import paddle
            paddle.device.get_available_custom_device()

            # Case 1: paddlepaddle-gpu package installed, and no custom device registerd.
            # Output: None

            # Case 2: paddlepaddle-gpu package installed, and custom deivce 'CustomCPU' and 'CustomGPU' is registerd.
            # Output: ['CustomCPU', 'CustomGPU:0', 'CustomGPU:1']
    """
    return core.get_available_custom_device()