未验证 提交 a4c25b2f 编写于 作者: Z Zhang Ting 提交者: GitHub

use paddle.is_compile_with_cuda (#27586)

* modify doc
上级 2f9cdd90
......@@ -257,6 +257,7 @@ from .tensor.stat import numel #DEFINE_ALIAS
from .device import get_cudnn_version
from .device import set_device
from .device import get_device
from .device import is_compiled_with_cuda #DEFINE_ALIAS
from .device import is_compiled_with_xpu
from .device import XPUPlace
# from .tensor.tensor import Tensor #DEFINE_ALIAS
......
......@@ -18,6 +18,7 @@ import re
from paddle.fluid import core
from paddle.fluid import framework
from paddle.fluid.dygraph.parallel import ParallelEnv
from paddle.fluid.framework import is_compiled_with_cuda #DEFINE_ALIAS
__all__ = [
'get_cudnn_version',
......@@ -31,7 +32,7 @@ __all__ = [
# 'cuda_places',
# 'CUDAPinnedPlace',
# 'CUDAPlace',
# 'is_compiled_with_cuda'
'is_compiled_with_cuda'
]
_cudnn_version = None
......
......@@ -360,13 +360,13 @@ def is_compiled_with_cuda():
"""
Whether this whl package can be used to run the model on GPU.
Returns (bool): support gpu or not.
Returns (bool): `True` if CUDA is currently available, otherwise `False`.
Examples:
.. code-block:: python
import paddle.fluid as fluid
support_gpu = fluid.is_compiled_with_cuda()
import paddle
support_gpu = paddle.is_compiled_with_cuda()
"""
return core.is_compiled_with_cuda()
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册