diff --git a/python/paddle/__init__.py b/python/paddle/__init__.py index e707de8e068640e28d3a06d539e33f767d7ab2b3..93b9a71ed7dac273c297d8d758423ddfc18666ee 100755 --- a/python/paddle/__init__.py +++ b/python/paddle/__init__.py @@ -257,6 +257,7 @@ from .tensor.stat import numel #DEFINE_ALIAS from .device import get_cudnn_version from .device import set_device from .device import get_device +from .device import is_compiled_with_cuda #DEFINE_ALIAS from .device import is_compiled_with_xpu from .device import XPUPlace # from .tensor.tensor import Tensor #DEFINE_ALIAS diff --git a/python/paddle/device.py b/python/paddle/device.py index 46d0ff7bedcecfefc7d054b0ccbcbf100c2fa0f6..c2f331caa8ab3e338d7d1148a15c1de1309aec34 100644 --- a/python/paddle/device.py +++ b/python/paddle/device.py @@ -18,6 +18,7 @@ import re from paddle.fluid import core from paddle.fluid import framework from paddle.fluid.dygraph.parallel import ParallelEnv +from paddle.fluid.framework import is_compiled_with_cuda #DEFINE_ALIAS __all__ = [ 'get_cudnn_version', @@ -31,7 +32,7 @@ __all__ = [ # 'cuda_places', # 'CUDAPinnedPlace', # 'CUDAPlace', - # 'is_compiled_with_cuda' + 'is_compiled_with_cuda' ] _cudnn_version = None diff --git a/python/paddle/fluid/framework.py b/python/paddle/fluid/framework.py index 2afe3ac3d76899775e480d53d71dd65a3757092c..d4a4b135cdb5c6d9d25b564661289892e3f57339 100644 --- a/python/paddle/fluid/framework.py +++ b/python/paddle/fluid/framework.py @@ -360,13 +360,13 @@ def is_compiled_with_cuda(): """ Whether this whl package can be used to run the model on GPU. - Returns (bool): support gpu or not. + Returns (bool): `True` if CUDA is currently available, otherwise `False`. Examples: .. code-block:: python - import paddle.fluid as fluid - support_gpu = fluid.is_compiled_with_cuda() + import paddle + support_gpu = paddle.is_compiled_with_cuda() """ return core.is_compiled_with_cuda()