From 134c9c0ca81dafde642a9314a583d8b474c83817 Mon Sep 17 00:00:00 2001 From: Zhou Wei <1183042833@qq.com> Date: Mon, 27 Mar 2023 16:50:10 +0800 Subject: [PATCH] [Zero-Dim] add FLAGS_set_to_1d, control whether to hack process to 1D, add ut for xpu (#51899) --- paddle/fluid/pybind/CMakeLists.txt | 1 + paddle/fluid/pybind/eager_method.cc | 5 +- paddle/phi/core/flags.cc | 10 +++ paddle/scripts/paddle_build.bat | 1 + paddle/scripts/paddle_build.sh | 1 + .../tests/unittests/test_zero_dim_tensor.py | 6 +- .../unittests/xpu/test_zero_dim_tensor_xpu.py | 80 +++++++++++++++++-- 7 files changed, 93 insertions(+), 11 deletions(-) diff --git a/paddle/fluid/pybind/CMakeLists.txt b/paddle/fluid/pybind/CMakeLists.txt index dad9d368aee..e4fb312c0c1 100755 --- a/paddle/fluid/pybind/CMakeLists.txt +++ b/paddle/fluid/pybind/CMakeLists.txt @@ -504,6 +504,7 @@ if(WITH_PYTHON) list(APPEND PYBIND_DEPS tensor_api) list(APPEND PYBIND_DEPS eager_tensor_operants) list(APPEND PYBIND_DEPS pybind_util) + list(APPEND PYBIND_DEPS flags) endif() # On Linux, cc_library(paddle SHARED ..) will generate the libpaddle.so, diff --git a/paddle/fluid/pybind/eager_method.cc b/paddle/fluid/pybind/eager_method.cc index 22cc3c8c582..4b72e9fb81e 100644 --- a/paddle/fluid/pybind/eager_method.cc +++ b/paddle/fluid/pybind/eager_method.cc @@ -62,6 +62,8 @@ typedef SSIZE_T ssize_t; #include "paddle/phi/core/tensor_utils.h" #include "paddle/phi/kernels/funcs/math_function.h" +DECLARE_bool(set_to_1d); + namespace paddle { namespace pybind { @@ -124,7 +126,8 @@ static PyObject* tensor_method_numpy(TensorObject* self, size_t numel = 1; if (py_rank == 0) { Py_ssize_t args_num = PyTuple_Size(args); - bool set_to_1d = true; + // true by default + bool set_to_1d = FLAGS_set_to_1d; if (args_num == (Py_ssize_t)1) { PyObject* obj = PyTuple_GET_ITEM(args, 0); if (obj == Py_False) { diff --git a/paddle/phi/core/flags.cc b/paddle/phi/core/flags.cc index 53fd2a59d39..1b3cf23f3fc 100644 --- a/paddle/phi/core/flags.cc +++ b/paddle/phi/core/flags.cc @@ -744,6 +744,16 @@ PADDLE_DEFINE_EXPORTED_int32( "less FLAGS_max_inplace_grad_add, than it will be use several grad_add" "instead of sum. Default is 0."); +/** + * Tensor.numpy() has a hack, and this flag can close this hack + * [true]: set 0D Tensor to 1D Numpy + * [false]: not set 0D Tensor to 1D Numpy, close the hack + * + * Now, just set true by default in 2.5 transition time + * which will be removed in future (2.6 or 2.7) . + */ +PADDLE_DEFINE_EXPORTED_bool(set_to_1d, true, "set 0D Tensor to 1D numpy"); + /** * Debug related FLAG * Name: tracer_mkldnn_ops_on diff --git a/paddle/scripts/paddle_build.bat b/paddle/scripts/paddle_build.bat index 1af31da94f8..16e1befa025 100644 --- a/paddle/scripts/paddle_build.bat +++ b/paddle/scripts/paddle_build.bat @@ -685,6 +685,7 @@ for /F %%# in ('wmic os get localdatetime^|findstr 20') do set start=%%# set start=%start:~4,10% set FLAGS_call_stack_level=2 +set FLAGS_set_to_1d=False dir %THIRD_PARTY_PATH:/=\%\install\openblas\lib dir %THIRD_PARTY_PATH:/=\%\install\openblas\bin dir %THIRD_PARTY_PATH:/=\%\install\zlib\bin diff --git a/paddle/scripts/paddle_build.sh b/paddle/scripts/paddle_build.sh index 2ab3ff711be..551c46ee804 100644 --- a/paddle/scripts/paddle_build.sh +++ b/paddle/scripts/paddle_build.sh @@ -61,6 +61,7 @@ function init() { # NOTE(chenweihang): For easy debugging, CI displays the C++ error stacktrace by default export FLAGS_call_stack_level=2 + export FLAGS_set_to_1d=False } function cmake_base() { diff --git a/python/paddle/fluid/tests/unittests/test_zero_dim_tensor.py b/python/paddle/fluid/tests/unittests/test_zero_dim_tensor.py index 233d69d1fd0..7523a27e386 100644 --- a/python/paddle/fluid/tests/unittests/test_zero_dim_tensor.py +++ b/python/paddle/fluid/tests/unittests/test_zero_dim_tensor.py @@ -1100,12 +1100,10 @@ class TestSundryAPI(unittest.TestCase): def test_numpy(self): x = paddle.full([], 0.5) - # 0D Tensor hack to 1D Numpy defaut, will remove in future x_np = x.numpy() - np.testing.assert_array_equal(x_np.shape, (1,)) - np.testing.assert_array_equal(x_np, np.array([0.5])) + np.testing.assert_array_equal(x_np.shape, ()) + np.testing.assert_array_equal(x_np, np.array(0.5)) - # return origin correct numpy x_np = x.numpy(False) np.testing.assert_array_equal(x_np.shape, ()) np.testing.assert_array_equal(x_np, np.array(0.5)) diff --git a/python/paddle/fluid/tests/unittests/xpu/test_zero_dim_tensor_xpu.py b/python/paddle/fluid/tests/unittests/xpu/test_zero_dim_tensor_xpu.py index f25b7716ab2..1a9f59040d5 100644 --- a/python/paddle/fluid/tests/unittests/xpu/test_zero_dim_tensor_xpu.py +++ b/python/paddle/fluid/tests/unittests/xpu/test_zero_dim_tensor_xpu.py @@ -161,6 +161,20 @@ class TestReduceAPI(unittest.TestCase): np.testing.assert_allclose(x.grad.numpy(), np.array(1.0)) np.testing.assert_allclose(out.grad.numpy(), np.array(1.0)) + out1 = api(x, 0) + self.assertEqual(out1.shape, []) + self.assertEqual(out1, out) + out1.backward() + + out2 = api(x, -1) + self.assertEqual(out2.shape, []) + self.assertEqual(out2, out) + out2.backward() + + if x.grad is not None: + self.assertEqual(x.grad.shape, []) + np.testing.assert_allclose(x.grad.numpy(), np.array(3.0)) + paddle.enable_static() @@ -463,32 +477,72 @@ class TestSundryAPI(unittest.TestCase): tmp = paddle.topk(x1, k=1, axis=2) def test_argmin(self): + # 1) x is 0D x = paddle.rand([]) out1 = paddle.argmin(x, 0) out2 = paddle.argmin(x, -1) out3 = paddle.argmin(x, None) + self.assertEqual(out1.shape, []) - np.testing.assert_allclose(out1, 0.0) + np.testing.assert_allclose(out1, 0) self.assertEqual(out2.shape, []) - np.testing.assert_allclose(out2, 0.0) + np.testing.assert_allclose(out2, 0) self.assertEqual(out3.shape, []) - np.testing.assert_allclose(out3, 0.0) + np.testing.assert_allclose(out3, 0) + + # 2) x is 1D + x = paddle.rand([5]) + x.stop_gradient = False + out = paddle.argmin(x, 0) + out.backward() + self.assertEqual(out.shape, []) + + # 3) x is ND + x = paddle.rand([3, 5]) + x.stop_gradient = False + out = paddle.argmin(x) + out.backward() + self.assertEqual(out.shape, []) + + # 4) x is ND, keepdim=True + x = paddle.rand([3, 5]) + x.stop_gradient = False + out = paddle.argmin(x, keepdim=True) + out.backward() + self.assertEqual(out.shape, [1, 1]) def test_argmax(self): + # 1) x is 0D x = paddle.rand([]) out1 = paddle.argmax(x, 0) out2 = paddle.argmax(x, -1) out3 = paddle.argmax(x, None) + self.assertEqual(out1.shape, []) - np.testing.assert_allclose(out1, 0.0) + np.testing.assert_allclose(out1, 0) self.assertEqual(out2.shape, []) - np.testing.assert_allclose(out2, 0.0) + np.testing.assert_allclose(out2, 0) self.assertEqual(out3.shape, []) - np.testing.assert_allclose(out3, 0.0) + np.testing.assert_allclose(out3, 0) + + # 2) x is 1D + x = paddle.rand([5]) + out = paddle.argmax(x, 0) + self.assertEqual(out.shape, []) + + # 3) x is ND + x = paddle.rand([3, 5]) + out = paddle.argmax(x) + self.assertEqual(out.shape, []) + + # 4) x is ND, keepdim=True + x = paddle.rand([3, 5]) + out = paddle.argmax(x, keepdim=True) + self.assertEqual(out.shape, [1, 1]) def test_median(self): x = paddle.rand([]) @@ -575,15 +629,29 @@ class TestSundryAPI(unittest.TestCase): np.testing.assert_array_equal(x.numpy(), np.array(0.5)) def test_numel(self): + # 1) x is 0D out = paddle.numel(self.x) self.assertEqual(out.shape, []) np.testing.assert_array_equal(out.numpy(), np.array(1)) + # 2) x is ND + x = paddle.full([3, 5], 0.5) + out = paddle.numel(x) + self.assertEqual(out.shape, []) + np.testing.assert_array_equal(out.numpy(), np.array(15)) + def test_rank(self): + # 1) x is 0D out = paddle.rank(self.x) self.assertEqual(out.shape, []) np.testing.assert_array_equal(out.numpy(), np.array(0)) + # 1) x is ND + x = paddle.full([3, 5], 0.5) + out = paddle.rank(x) + self.assertEqual(out.shape, []) + np.testing.assert_array_equal(out.numpy(), np.array(2)) + def test_shape(self): out = paddle.shape(self.x) self.assertEqual(out.shape, [0]) -- GitLab