From 06276f46a1e6e60eaff9737835a1ce1d70fe428d Mon Sep 17 00:00:00 2001 From: pangyoki Date: Sun, 25 Apr 2021 10:49:58 +0800 Subject: [PATCH] let paddle.utils.install_check support CPU package with GPU device (#32428) * let paddle.utils.install_check support CPU package with GPU device * use use_cuda in dygraph checking * add unittest for install_check --- .../test_gpu_package_without_gpu_device.py | 3 +- python/paddle/utils/install_check.py | 35 ++++++++++++++++++- 2 files changed, 36 insertions(+), 2 deletions(-) diff --git a/python/paddle/fluid/tests/unittests/test_gpu_package_without_gpu_device.py b/python/paddle/fluid/tests/unittests/test_gpu_package_without_gpu_device.py index 2b51bec9cb0..e528e742a27 100644 --- a/python/paddle/fluid/tests/unittests/test_gpu_package_without_gpu_device.py +++ b/python/paddle/fluid/tests/unittests/test_gpu_package_without_gpu_device.py @@ -34,6 +34,7 @@ class TestGPUPackagePaddle(unittest.TestCase): with open(test_file, 'w') as wb: cmd_test = """ import paddle +paddle.utils.run_check() x = paddle.rand([3,4]) assert x.place.is_gpu_place() is False, "There is no CUDA device, but Tensor's place is CUDAPlace" """ @@ -52,7 +53,7 @@ assert x.place.is_gpu_place() is False, "There is no CUDA device, but Tensor's p assert 'CPU device will be used by default' in str( stderr ), "GPU version Paddle is installed. But CPU device can't be used when CUDA device is not set properly" - assert "Error" not in str( + assert "AssertionError" not in str( stderr ), "There is no CUDA device, but Tensor's place is CUDAPlace" diff --git a/python/paddle/utils/install_check.py b/python/paddle/utils/install_check.py index 3b98680c89f..b39009985e7 100644 --- a/python/paddle/utils/install_check.py +++ b/python/paddle/utils/install_check.py @@ -74,6 +74,34 @@ def _is_cuda_available(): return False +def _run_dygraph_single(use_cuda): + """ + Testing the simple network in dygraph mode using one CPU/GPU. + + Args: + use_cuda (bool): Whether running with CUDA. + """ + paddle.disable_static() + if use_cuda: + paddle.set_device('gpu') + else: + paddle.set_device('cpu') + weight_attr = paddle.ParamAttr( + name="weight", initializer=paddle.nn.initializer.Constant(value=0.5)) + bias_attr = paddle.ParamAttr( + name="bias", initializer=paddle.nn.initializer.Constant(value=1.0)) + linear = paddle.nn.Linear( + 2, 4, weight_attr=weight_attr, bias_attr=bias_attr) + input_np = _prepare_data(1) + input_tensor = paddle.to_tensor(input_np) + linear_out = linear(input_tensor) + out = paddle.tensor.sum(linear_out) + out.backward() + opt = paddle.optimizer.Adam( + learning_rate=0.001, parameters=linear.parameters()) + opt.step() + + def _run_static_single(use_cuda): """ Testing the simple network with executor running directly, using one CPU/GPU. @@ -152,7 +180,11 @@ def run_check(): print("Running verify PaddlePaddle program ... ") - use_cuda = _is_cuda_available() + if paddle.is_compiled_with_cuda(): + use_cuda = _is_cuda_available() + else: + use_cuda = False + if use_cuda: device_str = "GPU" device_list = paddle.static.cuda_places() @@ -162,6 +194,7 @@ def run_check(): device_count = len(device_list) _run_static_single(use_cuda) + _run_dygraph_single(use_cuda) print("PaddlePaddle works well on 1 {}.".format(device_str)) try: -- GitLab