未验证 提交 04e55582 编写于 作者: Q Qi Li 提交者: GitHub

[Plugin] Fix Custom device in eager mode, test=develop (#43952)

* [Plugin] Fix Custom device in eager mode, test=develop

* update test case, test=develop

* update ut for coverage, test=develop
上级 b83138d0
...@@ -336,7 +336,12 @@ endif() ...@@ -336,7 +336,12 @@ endif()
if(LINUX if(LINUX
AND NOT WITH_CUSTOM_DEVICE AND NOT WITH_CUSTOM_DEVICE
AND NOT ON_INFER) AND NOT ON_INFER)
set(WITH_CUSTOM_DEVICE ON) set(WITH_CUSTOM_DEVICE
ON
CACHE BOOL "Enable Custom Device when compiling for Linux" FORCE)
message(
"Enable Custom Device when compiling for Linux. Force WITH_CUSTOM_DEVICE=ON."
)
endif() endif()
if(WIN32) if(WIN32)
......
...@@ -53,6 +53,10 @@ void Copy(const Context& dev_ctx, ...@@ -53,6 +53,10 @@ void Copy(const Context& dev_ctx,
#ifdef PADDLE_WITH_XPU #ifdef PADDLE_WITH_XPU
} else if (paddle::platform::is_xpu_place(dst_place)) { } else if (paddle::platform::is_xpu_place(dst_place)) {
dst_ptr = dev_ctx.Alloc(dst, src.dtype()); dst_ptr = dev_ctx.Alloc(dst, src.dtype());
#endif
#ifdef PADDLE_WITH_CUSTOM_DEVICE
} else if (paddle::platform::is_custom_place(dst_place)) {
dst_ptr = dev_ctx.Alloc(dst, src.dtype());
#endif #endif
} }
......
py_test(test_custom_kernel_dot SRCS test_custom_kernel_dot.py) file(
py_test(test_custom_kernel_load SRCS test_custom_kernel_load.py) GLOB TEST_OPS
RELATIVE "${CMAKE_CURRENT_SOURCE_DIR}"
"test_*.py")
string(REPLACE ".py" "" TEST_OPS "${TEST_OPS}")
set(CUSTOM_ENVS
PADDLE_SOURCE_DIR=${PADDLE_SOURCE_DIR}
PADDLE_BINARY_DIR=${PADDLE_BINARY_DIR}
CUSTOM_DEVICE_ROOT=${CMAKE_BINARY_DIR}/python/paddle/fluid/tests/custom_kernel
)
foreach(TEST_OP ${TEST_OPS})
py_test(${TEST_OP} SRCS ${TEST_OP}.py ENVS ${CUSTOM_ENVS})
endforeach()
...@@ -48,8 +48,8 @@ paddle_custom_kernel_include = [ ...@@ -48,8 +48,8 @@ paddle_custom_kernel_include = [
os.path.join(site_packages_path, 'paddle', 'include'), os.path.join(site_packages_path, 'paddle', 'include'),
] ]
# include path third_party # include path third_party
compile_third_party_path = os.path.join(os.environ['PADDLE_ROOT'], compile_third_party_path = os.path.join(os.environ['PADDLE_BINARY_DIR'],
'build/third_party') 'third_party')
paddle_custom_kernel_include += [ paddle_custom_kernel_include += [
os.path.join(compile_third_party_path, 'install/gflags/include'), # gflags os.path.join(compile_third_party_path, 'install/gflags/include'), # gflags
os.path.join(compile_third_party_path, 'install/glog/include'), # glog os.path.join(compile_third_party_path, 'install/glog/include'), # glog
......
...@@ -50,8 +50,8 @@ paddle_custom_kernel_include = list( ...@@ -50,8 +50,8 @@ paddle_custom_kernel_include = list(
site_packages_path)) site_packages_path))
# include path third_party # include path third_party
compile_third_party_path = os.path.join(os.environ['PADDLE_ROOT'], compile_third_party_path = os.path.join(os.environ['PADDLE_BINARY_DIR'],
'build/third_party') 'third_party')
paddle_custom_kernel_include += [ paddle_custom_kernel_include += [
os.path.join(compile_third_party_path, 'install/gflags/include'), # gflags os.path.join(compile_third_party_path, 'install/gflags/include'), # gflags
os.path.join(compile_third_party_path, 'install/glog/include'), # glog os.path.join(compile_third_party_path, 'install/glog/include'), # glog
......
...@@ -31,10 +31,6 @@ class TestCustomKernelDot(unittest.TestCase): ...@@ -31,10 +31,6 @@ class TestCustomKernelDot(unittest.TestCase):
cur_dir, sys.executable) cur_dir, sys.executable)
os.system(cmd) os.system(cmd)
# set environment for loading and registering compiled custom kernels
# only valid in current process
os.environ['CUSTOM_DEVICE_ROOT'] = cur_dir
def test_custom_kernel_dot_run(self): def test_custom_kernel_dot_run(self):
# test dot run # test dot run
x_data = np.random.uniform(1, 5, [2, 10]).astype(np.int8) x_data = np.random.uniform(1, 5, [2, 10]).astype(np.int8)
...@@ -52,9 +48,6 @@ class TestCustomKernelDot(unittest.TestCase): ...@@ -52,9 +48,6 @@ class TestCustomKernelDot(unittest.TestCase):
"custom kernel dot out: {},\n numpy dot out: {}".format( "custom kernel dot out: {},\n numpy dot out: {}".format(
out.numpy(), result)) out.numpy(), result))
def tearDown(self):
del os.environ['CUSTOM_DEVICE_ROOT']
class TestCustomKernelDotC(unittest.TestCase): class TestCustomKernelDotC(unittest.TestCase):
...@@ -67,10 +60,6 @@ class TestCustomKernelDotC(unittest.TestCase): ...@@ -67,10 +60,6 @@ class TestCustomKernelDotC(unittest.TestCase):
cur_dir, sys.executable) cur_dir, sys.executable)
os.system(cmd) os.system(cmd)
# set environment for loading and registering compiled custom kernels
# only valid in current process
os.environ['CUSTOM_DEVICE_ROOT'] = cur_dir
def test_custom_kernel_dot_run(self): def test_custom_kernel_dot_run(self):
# test dot run # test dot run
x_data = np.random.uniform(1, 5, [2, 10]).astype(np.int8) x_data = np.random.uniform(1, 5, [2, 10]).astype(np.int8)
...@@ -88,9 +77,6 @@ class TestCustomKernelDotC(unittest.TestCase): ...@@ -88,9 +77,6 @@ class TestCustomKernelDotC(unittest.TestCase):
"custom kernel dot out: {},\n numpy dot out: {}".format( "custom kernel dot out: {},\n numpy dot out: {}".format(
out.numpy(), result)) out.numpy(), result))
def tearDown(self):
del os.environ['CUSTOM_DEVICE_ROOT']
if __name__ == '__main__': if __name__ == '__main__':
if os.name == 'nt' or sys.platform.startswith('darwin'): if os.name == 'nt' or sys.platform.startswith('darwin'):
......
if(WITH_CUSTOM_DEVICE) if(WITH_CUSTOM_DEVICE)
py_test(test_custom_cpu_plugin SRCS test_custom_cpu_plugin.py) file(
set_tests_properties(test_custom_cpu_plugin PROPERTIES TIMEOUT 120) GLOB TEST_OPS
RELATIVE "${CMAKE_CURRENT_SOURCE_DIR}"
"test_*.py")
string(REPLACE ".py" "" TEST_OPS "${TEST_OPS}")
foreach(TEST_OP ${TEST_OPS})
py_test(${TEST_OP} SRCS ${TEST_OP}.py)
endforeach()
endif() endif()
...@@ -32,12 +32,15 @@ class TestCustomCPUPlugin(unittest.TestCase): ...@@ -32,12 +32,15 @@ class TestCustomCPUPlugin(unittest.TestCase):
os.environ['CUSTOM_DEVICE_ROOT'] = os.path.join( os.environ['CUSTOM_DEVICE_ROOT'] = os.path.join(
cur_dir, 'PaddleCustomDevice/backends/custom_cpu/build') cur_dir, 'PaddleCustomDevice/backends/custom_cpu/build')
def test_custom_device_dataloader(self): def test_custom_device(self):
import paddle import paddle
with paddle.fluid.framework._test_eager_guard(): with paddle.fluid.framework._test_eager_guard():
self._test_custom_device_dataloader() self._test_custom_device_dataloader()
self._test_custom_device_mnist()
self._test_eager_backward_api()
self._test_custom_device_dataloader() self._test_custom_device_dataloader()
self._test_custom_device_mnist()
def _test_custom_device_dataloader(self): def _test_custom_device_dataloader(self):
import paddle import paddle
...@@ -60,13 +63,6 @@ class TestCustomCPUPlugin(unittest.TestCase): ...@@ -60,13 +63,6 @@ class TestCustomCPUPlugin(unittest.TestCase):
self.assertTrue(label.place.is_custom_place()) self.assertTrue(label.place.is_custom_place())
break break
def test_custom_device_mnist(self):
import paddle
with paddle.fluid.framework._test_eager_guard():
self._test_custom_device_mnist()
self._test_custom_device_mnist()
def _test_custom_device_mnist(self): def _test_custom_device_mnist(self):
import paddle import paddle
...@@ -120,6 +116,23 @@ class TestCustomCPUPlugin(unittest.TestCase): ...@@ -120,6 +116,23 @@ class TestCustomCPUPlugin(unittest.TestCase):
self.assertTrue(pred.place.is_custom_place()) self.assertTrue(pred.place.is_custom_place())
def _test_eager_backward_api(self):
x = np.random.random([2, 2]).astype("float32")
y = np.random.random([2, 2]).astype("float32")
grad = np.ones([2, 2]).astype("float32")
import paddle
paddle.set_device('custom_cpu')
x_tensor = paddle.to_tensor(x, stop_gradient=False)
y_tensor = paddle.to_tensor(y)
z1_tensor = paddle.matmul(x_tensor, y_tensor)
z2_tensor = paddle.matmul(x_tensor, y_tensor)
grad_tensor = paddle.to_tensor(grad)
paddle.autograd.backward([z1_tensor, z2_tensor], [grad_tensor, None])
self.assertTrue(x_tensor.grad.place.is_custom_place())
def tearDown(self): def tearDown(self):
del os.environ['CUSTOM_DEVICE_ROOT'] del os.environ['CUSTOM_DEVICE_ROOT']
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册