未验证 提交 134c9c0c 编写于 作者: zhouweiwei2014's avatar zhouweiwei2014 提交者: GitHub

[Zero-Dim] add FLAGS_set_to_1d, control whether to hack process to 1D, add ut for xpu (#51899)

上级 10145cb6
...@@ -504,6 +504,7 @@ if(WITH_PYTHON) ...@@ -504,6 +504,7 @@ if(WITH_PYTHON)
list(APPEND PYBIND_DEPS tensor_api) list(APPEND PYBIND_DEPS tensor_api)
list(APPEND PYBIND_DEPS eager_tensor_operants) list(APPEND PYBIND_DEPS eager_tensor_operants)
list(APPEND PYBIND_DEPS pybind_util) list(APPEND PYBIND_DEPS pybind_util)
list(APPEND PYBIND_DEPS flags)
endif() endif()
# On Linux, cc_library(paddle SHARED ..) will generate the libpaddle.so, # On Linux, cc_library(paddle SHARED ..) will generate the libpaddle.so,
......
...@@ -62,6 +62,8 @@ typedef SSIZE_T ssize_t; ...@@ -62,6 +62,8 @@ typedef SSIZE_T ssize_t;
#include "paddle/phi/core/tensor_utils.h" #include "paddle/phi/core/tensor_utils.h"
#include "paddle/phi/kernels/funcs/math_function.h" #include "paddle/phi/kernels/funcs/math_function.h"
DECLARE_bool(set_to_1d);
namespace paddle { namespace paddle {
namespace pybind { namespace pybind {
...@@ -124,7 +126,8 @@ static PyObject* tensor_method_numpy(TensorObject* self, ...@@ -124,7 +126,8 @@ static PyObject* tensor_method_numpy(TensorObject* self,
size_t numel = 1; size_t numel = 1;
if (py_rank == 0) { if (py_rank == 0) {
Py_ssize_t args_num = PyTuple_Size(args); Py_ssize_t args_num = PyTuple_Size(args);
bool set_to_1d = true; // true by default
bool set_to_1d = FLAGS_set_to_1d;
if (args_num == (Py_ssize_t)1) { if (args_num == (Py_ssize_t)1) {
PyObject* obj = PyTuple_GET_ITEM(args, 0); PyObject* obj = PyTuple_GET_ITEM(args, 0);
if (obj == Py_False) { if (obj == Py_False) {
......
...@@ -744,6 +744,16 @@ PADDLE_DEFINE_EXPORTED_int32( ...@@ -744,6 +744,16 @@ PADDLE_DEFINE_EXPORTED_int32(
"less FLAGS_max_inplace_grad_add, than it will be use several grad_add" "less FLAGS_max_inplace_grad_add, than it will be use several grad_add"
"instead of sum. Default is 0."); "instead of sum. Default is 0.");
/**
* Tensor.numpy() has a hack, and this flag can close this hack
* [true]: set 0D Tensor to 1D Numpy
* [false]: not set 0D Tensor to 1D Numpy, close the hack
*
* Now, just set true by default in 2.5 transition time
* which will be removed in future (2.6 or 2.7) .
*/
PADDLE_DEFINE_EXPORTED_bool(set_to_1d, true, "set 0D Tensor to 1D numpy");
/** /**
* Debug related FLAG * Debug related FLAG
* Name: tracer_mkldnn_ops_on * Name: tracer_mkldnn_ops_on
......
...@@ -685,6 +685,7 @@ for /F %%# in ('wmic os get localdatetime^|findstr 20') do set start=%%# ...@@ -685,6 +685,7 @@ for /F %%# in ('wmic os get localdatetime^|findstr 20') do set start=%%#
set start=%start:~4,10% set start=%start:~4,10%
set FLAGS_call_stack_level=2 set FLAGS_call_stack_level=2
set FLAGS_set_to_1d=False
dir %THIRD_PARTY_PATH:/=\%\install\openblas\lib dir %THIRD_PARTY_PATH:/=\%\install\openblas\lib
dir %THIRD_PARTY_PATH:/=\%\install\openblas\bin dir %THIRD_PARTY_PATH:/=\%\install\openblas\bin
dir %THIRD_PARTY_PATH:/=\%\install\zlib\bin dir %THIRD_PARTY_PATH:/=\%\install\zlib\bin
......
...@@ -61,6 +61,7 @@ function init() { ...@@ -61,6 +61,7 @@ function init() {
# NOTE(chenweihang): For easy debugging, CI displays the C++ error stacktrace by default # NOTE(chenweihang): For easy debugging, CI displays the C++ error stacktrace by default
export FLAGS_call_stack_level=2 export FLAGS_call_stack_level=2
export FLAGS_set_to_1d=False
} }
function cmake_base() { function cmake_base() {
......
...@@ -1100,12 +1100,10 @@ class TestSundryAPI(unittest.TestCase): ...@@ -1100,12 +1100,10 @@ class TestSundryAPI(unittest.TestCase):
def test_numpy(self): def test_numpy(self):
x = paddle.full([], 0.5) x = paddle.full([], 0.5)
# 0D Tensor hack to 1D Numpy defaut, will remove in future
x_np = x.numpy() x_np = x.numpy()
np.testing.assert_array_equal(x_np.shape, (1,)) np.testing.assert_array_equal(x_np.shape, ())
np.testing.assert_array_equal(x_np, np.array([0.5])) np.testing.assert_array_equal(x_np, np.array(0.5))
# return origin correct numpy
x_np = x.numpy(False) x_np = x.numpy(False)
np.testing.assert_array_equal(x_np.shape, ()) np.testing.assert_array_equal(x_np.shape, ())
np.testing.assert_array_equal(x_np, np.array(0.5)) np.testing.assert_array_equal(x_np, np.array(0.5))
......
...@@ -161,6 +161,20 @@ class TestReduceAPI(unittest.TestCase): ...@@ -161,6 +161,20 @@ class TestReduceAPI(unittest.TestCase):
np.testing.assert_allclose(x.grad.numpy(), np.array(1.0)) np.testing.assert_allclose(x.grad.numpy(), np.array(1.0))
np.testing.assert_allclose(out.grad.numpy(), np.array(1.0)) np.testing.assert_allclose(out.grad.numpy(), np.array(1.0))
out1 = api(x, 0)
self.assertEqual(out1.shape, [])
self.assertEqual(out1, out)
out1.backward()
out2 = api(x, -1)
self.assertEqual(out2.shape, [])
self.assertEqual(out2, out)
out2.backward()
if x.grad is not None:
self.assertEqual(x.grad.shape, [])
np.testing.assert_allclose(x.grad.numpy(), np.array(3.0))
paddle.enable_static() paddle.enable_static()
...@@ -463,32 +477,72 @@ class TestSundryAPI(unittest.TestCase): ...@@ -463,32 +477,72 @@ class TestSundryAPI(unittest.TestCase):
tmp = paddle.topk(x1, k=1, axis=2) tmp = paddle.topk(x1, k=1, axis=2)
def test_argmin(self): def test_argmin(self):
# 1) x is 0D
x = paddle.rand([]) x = paddle.rand([])
out1 = paddle.argmin(x, 0) out1 = paddle.argmin(x, 0)
out2 = paddle.argmin(x, -1) out2 = paddle.argmin(x, -1)
out3 = paddle.argmin(x, None) out3 = paddle.argmin(x, None)
self.assertEqual(out1.shape, []) self.assertEqual(out1.shape, [])
np.testing.assert_allclose(out1, 0.0) np.testing.assert_allclose(out1, 0)
self.assertEqual(out2.shape, []) self.assertEqual(out2.shape, [])
np.testing.assert_allclose(out2, 0.0) np.testing.assert_allclose(out2, 0)
self.assertEqual(out3.shape, []) self.assertEqual(out3.shape, [])
np.testing.assert_allclose(out3, 0.0) np.testing.assert_allclose(out3, 0)
# 2) x is 1D
x = paddle.rand([5])
x.stop_gradient = False
out = paddle.argmin(x, 0)
out.backward()
self.assertEqual(out.shape, [])
# 3) x is ND
x = paddle.rand([3, 5])
x.stop_gradient = False
out = paddle.argmin(x)
out.backward()
self.assertEqual(out.shape, [])
# 4) x is ND, keepdim=True
x = paddle.rand([3, 5])
x.stop_gradient = False
out = paddle.argmin(x, keepdim=True)
out.backward()
self.assertEqual(out.shape, [1, 1])
def test_argmax(self): def test_argmax(self):
# 1) x is 0D
x = paddle.rand([]) x = paddle.rand([])
out1 = paddle.argmax(x, 0) out1 = paddle.argmax(x, 0)
out2 = paddle.argmax(x, -1) out2 = paddle.argmax(x, -1)
out3 = paddle.argmax(x, None) out3 = paddle.argmax(x, None)
self.assertEqual(out1.shape, []) self.assertEqual(out1.shape, [])
np.testing.assert_allclose(out1, 0.0) np.testing.assert_allclose(out1, 0)
self.assertEqual(out2.shape, []) self.assertEqual(out2.shape, [])
np.testing.assert_allclose(out2, 0.0) np.testing.assert_allclose(out2, 0)
self.assertEqual(out3.shape, []) self.assertEqual(out3.shape, [])
np.testing.assert_allclose(out3, 0.0) np.testing.assert_allclose(out3, 0)
# 2) x is 1D
x = paddle.rand([5])
out = paddle.argmax(x, 0)
self.assertEqual(out.shape, [])
# 3) x is ND
x = paddle.rand([3, 5])
out = paddle.argmax(x)
self.assertEqual(out.shape, [])
# 4) x is ND, keepdim=True
x = paddle.rand([3, 5])
out = paddle.argmax(x, keepdim=True)
self.assertEqual(out.shape, [1, 1])
def test_median(self): def test_median(self):
x = paddle.rand([]) x = paddle.rand([])
...@@ -575,15 +629,29 @@ class TestSundryAPI(unittest.TestCase): ...@@ -575,15 +629,29 @@ class TestSundryAPI(unittest.TestCase):
np.testing.assert_array_equal(x.numpy(), np.array(0.5)) np.testing.assert_array_equal(x.numpy(), np.array(0.5))
def test_numel(self): def test_numel(self):
# 1) x is 0D
out = paddle.numel(self.x) out = paddle.numel(self.x)
self.assertEqual(out.shape, []) self.assertEqual(out.shape, [])
np.testing.assert_array_equal(out.numpy(), np.array(1)) np.testing.assert_array_equal(out.numpy(), np.array(1))
# 2) x is ND
x = paddle.full([3, 5], 0.5)
out = paddle.numel(x)
self.assertEqual(out.shape, [])
np.testing.assert_array_equal(out.numpy(), np.array(15))
def test_rank(self): def test_rank(self):
# 1) x is 0D
out = paddle.rank(self.x) out = paddle.rank(self.x)
self.assertEqual(out.shape, []) self.assertEqual(out.shape, [])
np.testing.assert_array_equal(out.numpy(), np.array(0)) np.testing.assert_array_equal(out.numpy(), np.array(0))
# 1) x is ND
x = paddle.full([3, 5], 0.5)
out = paddle.rank(x)
self.assertEqual(out.shape, [])
np.testing.assert_array_equal(out.numpy(), np.array(2))
def test_shape(self): def test_shape(self):
out = paddle.shape(self.x) out = paddle.shape(self.x)
self.assertEqual(out.shape, [0]) self.assertEqual(out.shape, [0])
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册