未验证 提交 e59a693e 编写于 作者: L Leo Chen 提交者: GitHub

enable new-executor on windows to test it (#41301)

* enable new-executor on windows to test it

* add message

* fix ut
上级 a5e00bb7
...@@ -21,6 +21,19 @@ ...@@ -21,6 +21,19 @@
namespace phi { namespace phi {
template <typename T, typename Context>
inline T GetValue(const Context& dev_ctx, const DenseTensor& x) {
T value = static_cast<T>(0);
if (x.place() != CPUPlace()) {
DenseTensor cpu_x;
Copy(dev_ctx, x, CPUPlace(), true, &cpu_x);
value = cpu_x.data<T>()[0];
} else {
value = x.data<T>()[0];
}
return value;
}
template <typename T> template <typename T>
__global__ void Range(T start, T step, int64_t size, T* out) { __global__ void Range(T start, T step, int64_t size, T* out) {
CUDA_KERNEL_LOOP(index, size) { out[index] = start + step * index; } CUDA_KERNEL_LOOP(index, size) { out[index] = start + step * index; }
...@@ -32,9 +45,9 @@ void RangeKernel(const Context& dev_ctx, ...@@ -32,9 +45,9 @@ void RangeKernel(const Context& dev_ctx,
const DenseTensor& end, const DenseTensor& end,
const DenseTensor& step, const DenseTensor& step,
DenseTensor* out) { DenseTensor* out) {
T start_value = start.data<T>()[0]; T start_value = GetValue<T, Context>(dev_ctx, start);
T end_value = end.data<T>()[0]; T end_value = GetValue<T, Context>(dev_ctx, end);
T step_value = step.data<T>()[0]; T step_value = GetValue<T, Context>(dev_ctx, step);
int64_t size = 0; int64_t size = 0;
phi::funcs::GetSize(start_value, end_value, step_value, &size); phi::funcs::GetSize(start_value, end_value, step_value, &size);
......
...@@ -394,9 +394,20 @@ def _is_enable_standalone_executor(): ...@@ -394,9 +394,20 @@ def _is_enable_standalone_executor():
Whether to use experimental executor `StandaloneExecutor`. Whether to use experimental executor `StandaloneExecutor`.
""" """
flag = False flag = False
env_val = os.environ.get('FLAGS_USE_STANDALONE_EXECUTOR', None) # NOTE(zhiqiu): enable STANDALONE_EXECUTOR on windows platform by default
# It should be enabled on all platform in the future.
import platform
sysstr = platform.system().lower()
if sysstr == 'windows':
env_val = os.environ.get('FLAGS_USE_STANDALONE_EXECUTOR', 1)
else:
env_val = os.environ.get('FLAGS_USE_STANDALONE_EXECUTOR', None)
if env_val in [1, '1', True, 'True', 'true']: if env_val in [1, '1', True, 'True', 'true']:
flag = True flag = True
warnings.warn("STANDALONE_EXECUTOR is enabled.")
return flag return flag
......
...@@ -103,6 +103,14 @@ def check(use_cuda): ...@@ -103,6 +103,14 @@ def check(use_cuda):
if __name__ == '__main__': if __name__ == '__main__':
try:
check(use_cuda=False)
assert False
except Exception as e:
print(e)
print(type(e))
assert type(e) == RuntimeError
if core.is_compiled_with_cuda(): if core.is_compiled_with_cuda():
try: try:
check(use_cuda=True) check(use_cuda=True)
...@@ -113,10 +121,3 @@ if __name__ == '__main__': ...@@ -113,10 +121,3 @@ if __name__ == '__main__':
# Note. Enforce in cuda kernel may not catch in paddle, and # Note. Enforce in cuda kernel may not catch in paddle, and
# Exception type will be RuntimeError # Exception type will be RuntimeError
assert type(e) == OSError or type(e) == RuntimeError assert type(e) == OSError or type(e) == RuntimeError
try:
check(use_cuda=False)
assert False
except Exception as e:
print(e)
print(type(e))
assert type(e) == RuntimeError
...@@ -47,10 +47,12 @@ class TestNanInf(unittest.TestCase): ...@@ -47,10 +47,12 @@ class TestNanInf(unittest.TestCase):
print(out) print(out)
print(err) print(err)
assert returncode == 0
# in python3, type(out+err) is 'bytes', need use encode # in python3, type(out+err) is 'bytes', need use encode
assert (out + err if paddle.fluid.core.is_compiled_with_cuda():
).find('There are `nan` or `inf` in tensor'.encode()) != -1 assert (out + err).find('find nan or inf==='.encode()) != -1
else:
assert (out + err
).find('There are `nan` or `inf` in tensor'.encode()) != -1
def test_nan_inf_in_static_mode(self): def test_nan_inf_in_static_mode(self):
self._python_interp += " check_nan_inf_base.py" self._python_interp += " check_nan_inf_base.py"
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册