diff --git a/python/paddle/fluid/tests/unittests/test_gpu_package_without_gpu_device.py b/python/paddle/fluid/tests/unittests/test_gpu_package_without_gpu_device.py index 2b51bec9cb0e7d7087b1476220148014f9c39cae..e528e742a277a0d21657b35dd80a04e70344620b 100644 --- a/python/paddle/fluid/tests/unittests/test_gpu_package_without_gpu_device.py +++ b/python/paddle/fluid/tests/unittests/test_gpu_package_without_gpu_device.py @@ -34,6 +34,7 @@ class TestGPUPackagePaddle(unittest.TestCase): with open(test_file, 'w') as wb: cmd_test = """ import paddle +paddle.utils.run_check() x = paddle.rand([3,4]) assert x.place.is_gpu_place() is False, "There is no CUDA device, but Tensor's place is CUDAPlace" """ @@ -52,7 +53,7 @@ assert x.place.is_gpu_place() is False, "There is no CUDA device, but Tensor's p assert 'CPU device will be used by default' in str( stderr ), "GPU version Paddle is installed. But CPU device can't be used when CUDA device is not set properly" - assert "Error" not in str( + assert "AssertionError" not in str( stderr ), "There is no CUDA device, but Tensor's place is CUDAPlace" diff --git a/python/paddle/utils/install_check.py b/python/paddle/utils/install_check.py index 3b98680c89f25ed3733c424327ebf92657aa53a5..b39009985e735569ea479aa3f35557d30c13a586 100644 --- a/python/paddle/utils/install_check.py +++ b/python/paddle/utils/install_check.py @@ -74,6 +74,34 @@ def _is_cuda_available(): return False +def _run_dygraph_single(use_cuda): + """ + Testing the simple network in dygraph mode using one CPU/GPU. + + Args: + use_cuda (bool): Whether running with CUDA. + """ + paddle.disable_static() + if use_cuda: + paddle.set_device('gpu') + else: + paddle.set_device('cpu') + weight_attr = paddle.ParamAttr( + name="weight", initializer=paddle.nn.initializer.Constant(value=0.5)) + bias_attr = paddle.ParamAttr( + name="bias", initializer=paddle.nn.initializer.Constant(value=1.0)) + linear = paddle.nn.Linear( + 2, 4, weight_attr=weight_attr, bias_attr=bias_attr) + input_np = _prepare_data(1) + input_tensor = paddle.to_tensor(input_np) + linear_out = linear(input_tensor) + out = paddle.tensor.sum(linear_out) + out.backward() + opt = paddle.optimizer.Adam( + learning_rate=0.001, parameters=linear.parameters()) + opt.step() + + def _run_static_single(use_cuda): """ Testing the simple network with executor running directly, using one CPU/GPU. @@ -152,7 +180,11 @@ def run_check(): print("Running verify PaddlePaddle program ... ") - use_cuda = _is_cuda_available() + if paddle.is_compiled_with_cuda(): + use_cuda = _is_cuda_available() + else: + use_cuda = False + if use_cuda: device_str = "GPU" device_list = paddle.static.cuda_places() @@ -162,6 +194,7 @@ def run_check(): device_count = len(device_list) _run_static_single(use_cuda) + _run_dygraph_single(use_cuda) print("PaddlePaddle works well on 1 {}.".format(device_str)) try: