未验证 提交 bbe83ed1 编写于 作者: Q Qi Li 提交者: GitHub

[XPU] update XPU run check scripts, test=develop (#38698)

上级 40078103
......@@ -12,6 +12,8 @@ elseif(WITH_ROCM)
SET(PACKAGE_NAME "paddlepaddle-rocm")
elseif(WITH_ASCEND_CL)
SET(PACKAGE_NAME "paddlepaddle-npu")
elseif(WITH_XPU)
SET(PACKAGE_NAME "paddlepaddle-xpu")
else()
SET(PACKAGE_NAME "paddlepaddle")
endif()
......
......@@ -89,16 +89,35 @@ def _is_npu_available():
return False
def _run_dygraph_single(use_cuda, use_npu):
def _is_xpu_available():
"""
Testing the simple network in dygraph mode using one CPU/GPU.
Check whether XPU is avaiable.
"""
try:
assert len(paddle.static.xpu_places()) > 0
return True
except Exception as e:
logging.warning(
"You are using XPU version PaddlePaddle, but there is no XPU "
"detected on your machine. Maybe XPU devices is not set properly."
"\n Original Error is {}".format(e))
return False
def _run_dygraph_single(use_cuda, use_xpu, use_npu):
"""
Testing the simple network in dygraph mode using one CPU/GPU/XPU/NPU.
Args:
use_cuda (bool): Whether running with CUDA.
use_xpu (bool): Whether running with XPU.
use_npu (bool): Whether running with NPU.
"""
paddle.disable_static()
if use_cuda:
paddle.set_device('gpu')
elif use_xpu:
paddle.set_device('xpu')
elif use_npu:
paddle.set_device('npu')
else:
......@@ -119,12 +138,14 @@ def _run_dygraph_single(use_cuda, use_npu):
opt.step()
def _run_static_single(use_cuda, use_npu):
def _run_static_single(use_cuda, use_xpu, use_npu):
"""
Testing the simple network with executor running directly, using one CPU/GPU.
Testing the simple network with executor running directly, using one CPU/GPU/XPU/NPU.
Args:
use_cuda (bool): Whether running with CUDA.
use_xpu (bool): Whether running with XPU.
use_npu (bool): Whether running with NPU.
"""
paddle.enable_static()
with paddle.static.scope_guard(paddle.static.Scope()):
......@@ -138,6 +159,8 @@ def _run_static_single(use_cuda, use_npu):
if use_cuda:
place = paddle.CUDAPlace(0)
elif use_xpu:
place = paddle.XPUPlace(0)
elif use_npu:
place = paddle.NPUPlace(0)
else:
......@@ -151,12 +174,14 @@ def _run_static_single(use_cuda, use_npu):
paddle.disable_static()
def _run_static_parallel(use_cuda, use_npu, device_list):
def _run_static_parallel(use_cuda, use_xpu, use_npu, device_list):
"""
Testing the simple network in data parallel mode, using multiple CPU/GPU.
Args:
use_cuda (bool): Whether running with CUDA.
use_xpu (bool): Whether running with XPU.
use_npu (bool): Whether running with NPU.
device_list (int): The specified devices.
"""
paddle.enable_static()
......@@ -175,6 +200,9 @@ def _run_static_parallel(use_cuda, use_npu, device_list):
if use_cuda:
place = paddle.CUDAPlace(0)
elif use_xpu:
place = paddle.XPUPlace(0)
compiled_prog = train_prog
elif use_npu:
place = paddle.NPUPlace(0)
compiled_prog = train_prog
......@@ -210,19 +238,23 @@ def run_check():
print("Running verify PaddlePaddle program ... ")
use_cuda = False
use_xpu = False
use_npu = False
if paddle.is_compiled_with_cuda():
use_cuda = _is_cuda_available()
use_npu = False
elif paddle.is_compiled_with_xpu():
use_xpu = _is_xpu_available()
elif paddle.is_compiled_with_npu():
use_npu = _is_npu_available()
use_cuda = False
else:
use_npu = False
use_cuda = False
if use_cuda:
device_str = "GPU"
device_list = paddle.static.cuda_places()
elif use_xpu:
device_str = "XPU"
device_list = paddle.static.xpu_places()
elif use_npu:
device_str = "NPU"
device_list = paddle.static.npu_places()
......@@ -231,12 +263,12 @@ def run_check():
device_list = paddle.static.cpu_places(device_count=2)
device_count = len(device_list)
_run_static_single(use_cuda, use_npu)
_run_dygraph_single(use_cuda, use_npu)
_run_static_single(use_cuda, use_xpu, use_npu)
_run_dygraph_single(use_cuda, use_xpu, use_npu)
print("PaddlePaddle works well on 1 {}.".format(device_str))
try:
_run_static_parallel(use_cuda, use_npu, device_list)
_run_static_parallel(use_cuda, use_xpu, use_npu, device_list)
print("PaddlePaddle works well on {} {}s.".format(device_count,
device_str))
print(
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册