From 3cd78f6e594d99a8cfb435bd82484dccdd035cee Mon Sep 17 00:00:00 2001 From: chengduo Date: Fri, 28 Jun 2019 13:54:10 +0800 Subject: [PATCH] add cuda_is_available (#18357) * add cuda_is_available test=release/1.5 --- paddle/fluid/API.spec | 3 ++- python/paddle/fluid/framework.py | 20 ++++++++++++++++++-- 2 files changed, 20 insertions(+), 3 deletions(-) diff --git a/paddle/fluid/API.spec b/paddle/fluid/API.spec index 16e020f14..9e521dd51 100644 --- a/paddle/fluid/API.spec +++ b/paddle/fluid/API.spec @@ -11,9 +11,10 @@ paddle.fluid.default_main_program (ArgSpec(args=[], varargs=None, keywords=None, paddle.fluid.program_guard (ArgSpec(args=['main_program', 'startup_program'], varargs=None, keywords=None, defaults=(None,)), ('document', '78fb5c7f70ef76bcf4a1862c3f6b8191')) paddle.fluid.name_scope (ArgSpec(args=['prefix'], varargs=None, keywords=None, defaults=(None,)), ('document', '61660461e1f44e0480ca22fa8a482c41')) paddle.fluid.cuda_places (ArgSpec(args=['device_ids'], varargs=None, keywords=None, defaults=(None,)), ('document', '7f3068b82fc427bfa04b1af953610992')) -paddle.fluid.cpu_places (ArgSpec(args=['device_count'], varargs=None, keywords=None, defaults=(None,)), ('document', '8b674e9a7ac7944c27fd853b675c2cb2')) +paddle.fluid.cpu_places (ArgSpec(args=['device_count'], varargs=None, keywords=None, defaults=(None,)), ('document', 'dad57e8624794766d770cea905dec1c2')) paddle.fluid.cuda_pinned_places (ArgSpec(args=['device_count'], varargs=None, keywords=None, defaults=(None,)), ('document', 'cc83b6c5ba8be38ff3ee87e9cec9de5f')) paddle.fluid.in_dygraph_mode (ArgSpec(args=[], varargs=None, keywords=None, defaults=None), ('document', 'eddb7a1f0083dcc70e9f6c71ee003cb9')) +paddle.fluid.is_compiled_with_cuda (ArgSpec(args=[], varargs=None, keywords=None, defaults=None), ('document', '60c7f107a5050aeb58bb74eb175672b5')) paddle.fluid.Executor.__init__ (ArgSpec(args=['self', 'place'], varargs=None, keywords=None, defaults=None), ('document', '6adf97f83acf6453d4a6a4b1070f3754')) paddle.fluid.Executor.close (ArgSpec(args=['self'], varargs=None, keywords=None, defaults=None), ('document', '3a584496aa1343f36eebf3c46b323a74')) paddle.fluid.Executor.infer_from_dataset (ArgSpec(args=['self', 'program', 'dataset', 'scope', 'thread', 'debug', 'fetch_list', 'fetch_info', 'print_period'], varargs=None, keywords=None, defaults=(None, None, None, 0, False, None, None, 100)), ('document', 'bedc29ad01c1b911e99032ee1e19ac59')) diff --git a/python/paddle/fluid/framework.py b/python/paddle/fluid/framework.py index 1ddc2654d..08504b922 100644 --- a/python/paddle/fluid/framework.py +++ b/python/paddle/fluid/framework.py @@ -44,6 +44,7 @@ __all__ = [ 'cpu_places', 'cuda_pinned_places', 'in_dygraph_mode', + 'is_compiled_with_cuda', ] EMPTY_VAR_NAME = core.kEmptyVarName() @@ -102,6 +103,21 @@ def _cuda_ids(): return device_ids +def is_compiled_with_cuda(): + """ + Whether this whl package can be used to run the model on GPU. + + Returns (bool): support gpu or not. + + Examples: + .. code-block:: python + + import paddle.fluid as fluid + support_gpu = fluid.is_compiled_with_cuda() + """ + return core.is_compiled_with_cuda() + + def cuda_places(device_ids=None): """ Create a list of :code:`fluid.CUDAPlace` objects. @@ -145,8 +161,8 @@ def cpu_places(device_count=None): If :code:`device_count` is None, the device count would be determined by environment variable :code:`CPU_NUM`. - If :code:`CPU_NUM` is not set, the device count would - be determined by :code:`multiprocessing.cpu_count()`. + If :code:`CPU_NUM` is not set, the default value is 1, + i.e. CPU_NUM=1. Args: device_count (None|int): device number. -- GitLab