diff --git a/python/paddle/__init__.py b/python/paddle/__init__.py index e749cf88b6a49846b678c1c4258d2b3c2a8c01a4..4f308a9860307334ad9e0772601a39399d85be18 100755 --- a/python/paddle/__init__.py +++ b/python/paddle/__init__.py @@ -258,6 +258,7 @@ from .tensor.stat import numel #DEFINE_ALIAS from .device import get_cudnn_version from .device import set_device from .device import get_device +from .device import is_compiled_with_cuda # from .tensor.tensor import Tensor #DEFINE_ALIAS # from .tensor.tensor import LoDTensor #DEFINE_ALIAS # from .tensor.tensor import LoDTensorArray #DEFINE_ALIAS diff --git a/python/paddle/device.py b/python/paddle/device.py index de24fd875130e84d6532d033761f68a5c77a68c2..29e6f5a3df05eaa7f5fa1236c0daeca20ad0dbe9 100644 --- a/python/paddle/device.py +++ b/python/paddle/device.py @@ -18,6 +18,7 @@ import re from paddle.fluid import core from paddle.fluid import framework from paddle.fluid.dygraph.parallel import ParallelEnv +from paddle.fluid.framework import is_compiled_with_cuda __all__ = [ 'get_cudnn_version', @@ -29,7 +30,7 @@ __all__ = [ # 'cuda_places', # 'CUDAPinnedPlace', # 'CUDAPlace', - # 'is_compiled_with_cuda' + 'is_compiled_with_cuda' ] _cudnn_version = None diff --git a/python/paddle/fluid/framework.py b/python/paddle/fluid/framework.py index 797b32f5d4768af59fa4e6aceb75e4b6d9029d91..c56644646df19b6be6744d886b5d6eba26e7b8c2 100644 --- a/python/paddle/fluid/framework.py +++ b/python/paddle/fluid/framework.py @@ -350,13 +350,13 @@ def is_compiled_with_cuda(): """ Whether this whl package can be used to run the model on GPU. - Returns (bool): support gpu or not. + Returns (bool): `True` if CUDA is currently available, otherwise `False`. Examples: .. code-block:: python - import paddle.fluid as fluid - support_gpu = fluid.is_compiled_with_cuda() + import paddle + support_gpu = paddle.is_compiled_with_cuda() """ return core.is_compiled_with_cuda()