__init__.py 10.9 KB
Newer Older
1 2
# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
# 
3 4 5
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
6
# 
7
#     http://www.apache.org/licenses/LICENSE-2.0
8
# 
9 10 11 12 13 14
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

15
# TODO: define the functions to manipulate devices 
16
import re
T
taixiurong 已提交
17
import os
18 19
from paddle.fluid import core
from paddle.fluid import framework
20
from paddle.fluid.dygraph.parallel import ParallelEnv
21
from paddle.fluid.framework import is_compiled_with_cinn  # noqa: F401
22 23
from paddle.fluid.framework import is_compiled_with_cuda  # noqa: F401
from paddle.fluid.framework import is_compiled_with_rocm  # noqa: F401
24
from . import cuda
25

26
__all__ = [  # noqa
27
    'get_cudnn_version',
28
    'set_device',
29 30
    'get_device',
    'XPUPlace',
J
jianghaicheng 已提交
31
    'IPUPlace',
32
    'MLUPlace',
W
Wenyu 已提交
33
    'is_compiled_with_xpu',
J
jianghaicheng 已提交
34
    'is_compiled_with_ipu',
35
    'is_compiled_with_cinn',
36
    'is_compiled_with_cuda',
37
    'is_compiled_with_rocm',
38 39
    'is_compiled_with_npu',
    'is_compiled_with_mlu'
40 41
]

42 43 44
_cudnn_version = None


45 46
# TODO: WITH_ASCEND_CL may changed to WITH_NPU or others in the future
# for consistent.
47 48
def is_compiled_with_npu():
    """
49
    Whether paddle was built with WITH_ASCEND_CL=ON to support Ascend NPU.
50 51 52 53 54 55 56

    Returns (bool): `True` if NPU is supported, otherwise `False`.

    Examples:
        .. code-block:: python

            import paddle
57
            support_npu = paddle.device.is_compiled_with_npu()
58 59 60 61
    """
    return core.is_compiled_with_npu()


J
jianghaicheng 已提交
62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91
def is_compiled_with_ipu():
    """
    Whether paddle was built with WITH_IPU=ON to support Graphcore IPU.

    Returns (bool): `True` if IPU is supported, otherwise `False`.

    Examples:
        .. code-block:: python

            import paddle
            support_ipu = paddle.is_compiled_with_ipu()
    """
    return core.is_compiled_with_ipu()


def IPUPlace():
    """
    Return a Graphcore IPU Place

    Examples:
        .. code-block:: python

            # required: ipu

            import paddle
            place = paddle.device.IPUPlace()
    """
    return core.IPUPlace()


92 93 94 95 96 97 98 99 100 101
def is_compiled_with_xpu():
    """
    Whether paddle was built with WITH_XPU=ON to support Baidu Kunlun

    Returns (bool): whether paddle was built with WITH_XPU=ON

    Examples:
        .. code-block:: python

            import paddle
102
            support_xpu = paddle.device.is_compiled_with_xpu()
103 104 105 106 107 108 109 110 111 112 113 114 115
    """
    return core.is_compiled_with_xpu()


def XPUPlace(dev_id):
    """
    Return a Baidu Kunlun Place

    Parameters:
        dev_id(int): Baidu Kunlun device id

    Examples:
        .. code-block:: python
116

117 118
            # required: xpu
            
119
            import paddle
120
            place = paddle.device.XPUPlace(0)
121 122 123 124
    """
    return core.XPUPlace(dev_id)


125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159
def is_compiled_with_mlu():
    """
    Whether paddle was built with WITH_MLU=ON to support Cambricon MLU

    Returns (bool): whether paddle was built with WITH_MLU=ON

    Examples:
        .. code-block:: python

            # required: mlu

            import paddle
            support_mlu = paddle.device.is_compiled_with_mlu()
    """
    return core.is_compiled_with_mlu()


def MLUPlace(dev_id):
    """
    Return a Cambricon MLU Place

    Parameters:
        dev_id(int): MLU device id

    Examples:
        .. code-block:: python

            # required: mlu

            import paddle
            place = paddle.device.MLUPlace(0)
    """
    return core.MLUPlace(dev_id)


160 161 162 163 164 165 166 167 168 169 170 171 172
def get_cudnn_version():
    """
    This funciton return the version of cudnn. the retuen value is int which represents the 
    cudnn version. For example, if it return 7600, it represents the version of cudnn is 7.6.
    
    Returns:
        int: A int value which represents the cudnn version. If cudnn version is not installed, it return None.

    Examples:
        .. code-block:: python
            
            import paddle

173
            cudnn_version = paddle.device.get_cudnn_version()
174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190



    """
    global _cudnn_version
    if not core.is_compiled_with_cuda():
        return None
    if _cudnn_version is None:
        cudnn_version = int(core.cudnn_version())
        _cudnn_version = cudnn_version
        if _cudnn_version < 0:
            return None
        else:
            return cudnn_version
    else:
        return _cudnn_version

191

C
chentianyu03 已提交
192
def _convert_to_place(device):
193 194 195
    lower_device = device.lower()
    if lower_device == 'cpu':
        place = core.CPUPlace()
196 197
    elif lower_device == 'gpu':
        if not core.is_compiled_with_cuda():
198 199
            raise ValueError("The device should not be 'gpu', "
                             "since PaddlePaddle is not compiled with CUDA")
200
        place = core.CUDAPlace(ParallelEnv().dev_id)
201 202
    elif lower_device == 'xpu':
        if not core.is_compiled_with_xpu():
203 204
            raise ValueError("The device should not be 'xpu', "
                             "since PaddlePaddle is not compiled with XPU")
T
taixiurong 已提交
205 206 207
        selected_xpus = os.getenv("FLAGS_selected_xpus", "0").split(",")
        device_id = int(selected_xpus[0])
        place = core.XPUPlace(device_id)
H
houj04 已提交
208 209 210 211 212 213 214
    elif lower_device == 'npu':
        if not core.is_compiled_with_npu():
            raise ValueError("The device should not be 'npu', "
                             "since PaddlePaddle is not compiled with NPU")
        selected_npus = os.getenv("FLAGS_selected_npus", "0").split(",")
        device_id = int(selected_npus[0])
        place = core.NPUPlace(device_id)
J
jianghaicheng 已提交
215 216 217 218 219 220
    elif lower_device == 'ipu':
        if not core.is_compiled_with_ipu():
            raise ValueError(
                "The device should not be 'ipu', " \
                "since PaddlePaddle is not compiled with IPU")
        place = core.IPUPlace()
221 222 223 224 225 226 227
    elif lower_device == 'mlu':
        if not core.is_compiled_with_mlu():
            raise ValueError("The device should not be 'mlu', "
                             "since PaddlePaddle is not compiled with MLU")
        selected_mlus = os.getenv("FLAGS_selected_mlus", "0").split(",")
        device_id = int(selected_mlus[0])
        place = core.MLUPlace(device_id)
228
    else:
229 230
        avaliable_gpu_device = re.match(r'gpu:\d+', lower_device)
        avaliable_xpu_device = re.match(r'xpu:\d+', lower_device)
H
houj04 已提交
231
        avaliable_npu_device = re.match(r'npu:\d+', lower_device)
232 233
        avaliable_mlu_device = re.match(r'mlu:\d+', lower_device)
        if not avaliable_gpu_device and not avaliable_xpu_device and not avaliable_npu_device and not avaliable_mlu_device:
234
            raise ValueError(
235
                "The device must be a string which is like 'cpu', 'gpu', 'gpu:x', 'xpu', 'xpu:x', 'mlu', 'mlu:x', 'npu', 'npu:x' or ipu"
236
            )
237 238 239
        if avaliable_gpu_device:
            if not core.is_compiled_with_cuda():
                raise ValueError(
240
                    "The device should not be {}, since PaddlePaddle is "
241 242 243 244 245 246 247 248
                    "not compiled with CUDA".format(avaliable_gpu_device))
            device_info_list = device.split(':', 1)
            device_id = device_info_list[1]
            device_id = int(device_id)
            place = core.CUDAPlace(device_id)
        if avaliable_xpu_device:
            if not core.is_compiled_with_xpu():
                raise ValueError(
249
                    "The device should not be {}, since PaddlePaddle is "
250 251 252 253 254
                    "not compiled with XPU".format(avaliable_xpu_device))
            device_info_list = device.split(':', 1)
            device_id = device_info_list[1]
            device_id = int(device_id)
            place = core.XPUPlace(device_id)
H
houj04 已提交
255 256 257 258 259 260 261 262 263
        if avaliable_npu_device:
            if not core.is_compiled_with_npu():
                raise ValueError(
                    "The device should not be {}, since PaddlePaddle is "
                    "not compiled with NPU".format(avaliable_npu_device))
            device_info_list = device.split(':', 1)
            device_id = device_info_list[1]
            device_id = int(device_id)
            place = core.NPUPlace(device_id)
264 265 266 267 268 269 270 271 272
        if avaliable_mlu_device:
            if not core.is_compiled_with_mlu():
                raise ValueError(
                    "The device should not be {}, since PaddlePaddle is "
                    "not compiled with mlu".format(avaliable_mlu_device))
            device_info_list = device.split(':', 1)
            device_id = device_info_list[1]
            device_id = int(device_id)
            place = core.MLUPlace(device_id)
C
chentianyu03 已提交
273
    return place
274

C
chentianyu03 已提交
275 276 277

def set_device(device):
    """
278
    Paddle supports running calculations on various types of devices, including CPU, GPU, XPU, NPU, MLU and IPU.
C
chentianyu03 已提交
279 280 281 282 283
    They are represented by string identifiers. This function can specify the global device
    which the OP will run.

    Parameters:
        device(str): This parameter determines the specific running device.
284 285
            It can be ``cpu``, ``gpu``, ``xpu``, ``npu``, ``mlu``, ``gpu:x``, ``xpu:x``, ``npu:x``, ``mlu:x`` and ``ipu``,
            where ``x`` is the index of the GPUs, XPUs, NPUs or MLUs.
C
chentianyu03 已提交
286 287 288 289 290 291 292

    Examples:

     .. code-block:: python
            
        import paddle

293
        paddle.device.set_device("cpu")
C
chentianyu03 已提交
294 295 296 297 298
        x1 = paddle.ones(name='x1', shape=[1, 2], dtype='int32')
        x2 = paddle.zeros(name='x2', shape=[1, 2], dtype='int32')
        data = paddle.stack([x1,x2], axis=1)
    """
    place = _convert_to_place(device)
299 300
    framework._set_expected_place(place)
    return place
301 302 303 304 305


def get_device():
    """
    This funciton can get the current global device of the program is running.
306
    It's a string which is like 'cpu', 'gpu:x', 'xpu:x', 'mlu:x' and 'npu:x'. if the global device is not
307
    set, it will return a string which is 'gpu:x' when cuda is avaliable or it 
308 309 310 311 312 313 314
    will return a string which is 'cpu' when cuda is not avaliable.

    Examples:

     .. code-block:: python
            
        import paddle
315
        device = paddle.device.get_device()
316 317 318 319 320 321 322 323 324

    """
    device = ''
    place = framework._current_expected_place()
    if isinstance(place, core.CPUPlace):
        device = 'cpu'
    elif isinstance(place, core.CUDAPlace):
        device_id = place.get_device_id()
        device = 'gpu:' + str(device_id)
325 326 327
    elif isinstance(place, core.XPUPlace):
        device_id = place.get_device_id()
        device = 'xpu:' + str(device_id)
H
houj04 已提交
328 329 330
    elif isinstance(place, core.NPUPlace):
        device_id = place.get_device_id()
        device = 'npu:' + str(device_id)
J
jianghaicheng 已提交
331 332 333
    elif isinstance(place, core.IPUPlace):
        num_devices = core.get_ipu_device_count()
        device = "ipus:{{0-{}}}".format(num_devices - 1)
334 335 336
    elif isinstance(place, core.MLUPlace):
        device_id = place.get_device_id()
        device = 'mlu:' + str(device_id)
J
jianghaicheng 已提交
337 338
    else:
        raise ValueError("The device specification {} is invalid".format(place))
339 340

    return device