未验证 提交 d7fc3781 编写于 作者: W wanghuancoder 提交者: GitHub

fix contiguous (#56863)

* fix contiguous
上级 83b942f3
...@@ -2819,9 +2819,10 @@ static PyObject* tensor_contiguous(TensorObject* self, ...@@ -2819,9 +2819,10 @@ static PyObject* tensor_contiguous(TensorObject* self,
return reinterpret_cast<PyObject*>(self); return reinterpret_cast<PyObject*>(self);
} else { } else {
eager_gil_scoped_release guard; eager_gil_scoped_release guard;
return ToPyObject( self->tensor.set_impl(std::make_shared<phi::DenseTensor>(std::move(
paddle::Tensor(std::make_shared<phi::DenseTensor>(std::move( paddle::experimental::Trans2Contiguous(*(dense_tensor.get())))));
paddle::experimental::Trans2Contiguous(*(dense_tensor.get())))))); Py_INCREF(self);
return reinterpret_cast<PyObject*>(self);
} }
} else { } else {
......
...@@ -42,13 +42,11 @@ class TestStride(unittest.TestCase): ...@@ -42,13 +42,11 @@ class TestStride(unittest.TestCase):
x_c = x_transposed1.contiguous() x_c = x_transposed1.contiguous()
self.assertTrue(np.allclose(x_c.numpy(), x_np_transposed1)) self.assertTrue(np.allclose(x_c.numpy(), x_np_transposed1))
self.assertFalse(x_c._is_shared_buffer_with(x_transposed1))
x_transposed2 = paddle.transpose(x_transposed1, perm=[2, 0, 1]) x_transposed2 = paddle.transpose(x_transposed1, perm=[2, 0, 1])
x_np_transposed2 = x_np_transposed1.transpose(2, 0, 1) x_np_transposed2 = x_np_transposed1.transpose(2, 0, 1)
self.assertTrue(np.allclose(x_transposed2.numpy(), x_np_transposed2)) self.assertTrue(np.allclose(x_transposed2.numpy(), x_np_transposed2))
self.assertFalse(x_transposed2.is_contiguous()) self.assertFalse(x_transposed2.is_contiguous())
self.assertTrue(x._is_shared_buffer_with(x_transposed2))
y = x_transposed2 + 2 y = x_transposed2 + 2
y_np = x_np_transposed2 + 2 y_np = x_np_transposed2 + 2
...@@ -96,11 +94,6 @@ class TestStride(unittest.TestCase): ...@@ -96,11 +94,6 @@ class TestStride(unittest.TestCase):
self.assertTrue(np.allclose(out3_c.numpy(), np_out3)) self.assertTrue(np.allclose(out3_c.numpy(), np_out3))
self.assertTrue(np.allclose(out4_c.numpy(), np_out4)) self.assertTrue(np.allclose(out4_c.numpy(), np_out4))
self.assertFalse(out_c._is_shared_buffer_with(out))
self.assertFalse(out2_c._is_shared_buffer_with(out2))
self.assertFalse(out3_c._is_shared_buffer_with(out3))
self.assertFalse(out4_c._is_shared_buffer_with(out4))
def call_slice(self): def call_slice(self):
x_np = np.random.random(size=[10, 10, 10, 20]).astype('float32') x_np = np.random.random(size=[10, 10, 10, 20]).astype('float32')
x = paddle.to_tensor(x_np) x = paddle.to_tensor(x_np)
...@@ -141,8 +134,6 @@ class TestStride(unittest.TestCase): ...@@ -141,8 +134,6 @@ class TestStride(unittest.TestCase):
self.assertTrue(np.allclose(out_c.numpy(), np_out)) self.assertTrue(np.allclose(out_c.numpy(), np_out))
self.assertFalse(out_c._is_shared_buffer_with(out))
def call_index_select(self): def call_index_select(self):
x_np = np.random.random(size=[10, 10, 10, 20]).astype('float32') x_np = np.random.random(size=[10, 10, 10, 20]).astype('float32')
x = paddle.to_tensor(x_np) x = paddle.to_tensor(x_np)
...@@ -161,8 +152,6 @@ class TestStride(unittest.TestCase): ...@@ -161,8 +152,6 @@ class TestStride(unittest.TestCase):
self.assertTrue(np.allclose(out_c.numpy(), np_out)) self.assertTrue(np.allclose(out_c.numpy(), np_out))
self.assertFalse(out_c._is_shared_buffer_with(out))
def call_reshape(self): def call_reshape(self):
x_np = np.random.random(size=[10, 10, 10, 20]).astype('float32') x_np = np.random.random(size=[10, 10, 10, 20]).astype('float32')
x = paddle.to_tensor(x_np) x = paddle.to_tensor(x_np)
...@@ -201,8 +190,6 @@ class TestStride(unittest.TestCase): ...@@ -201,8 +190,6 @@ class TestStride(unittest.TestCase):
self.assertTrue(np.allclose(out_c.numpy(), np_out)) self.assertTrue(np.allclose(out_c.numpy(), np_out))
self.assertFalse(out_c._is_shared_buffer_with(out))
def call_imag(self): def call_imag(self):
x_np = np.random.random(size=[10, 10, 10, 20]).astype('complex128') x_np = np.random.random(size=[10, 10, 10, 20]).astype('complex128')
x = paddle.to_tensor(x_np) x = paddle.to_tensor(x_np)
...@@ -221,8 +208,6 @@ class TestStride(unittest.TestCase): ...@@ -221,8 +208,6 @@ class TestStride(unittest.TestCase):
self.assertTrue(np.allclose(out_c.numpy(), np_out)) self.assertTrue(np.allclose(out_c.numpy(), np_out))
self.assertFalse(out_c._is_shared_buffer_with(out))
def call_as_real(self): def call_as_real(self):
x_np = np.random.random(size=[10, 10, 10, 20]).astype('complex128') x_np = np.random.random(size=[10, 10, 10, 20]).astype('complex128')
x = paddle.to_tensor(x_np) x = paddle.to_tensor(x_np)
...@@ -351,10 +336,6 @@ class TestStride(unittest.TestCase): ...@@ -351,10 +336,6 @@ class TestStride(unittest.TestCase):
self.assertTrue(np.allclose(out1_c.numpy(), np_out1)) self.assertTrue(np.allclose(out1_c.numpy(), np_out1))
self.assertTrue(np.allclose(out2_c.numpy(), np_out2)) self.assertTrue(np.allclose(out2_c.numpy(), np_out2))
self.assertFalse(out0_c._is_shared_buffer_with(out0))
self.assertFalse(out1_c._is_shared_buffer_with(out1))
self.assertFalse(out2_c._is_shared_buffer_with(out2))
def call_split2(self): def call_split2(self):
x_np = np.random.random(size=[3, 9, 5]).astype('float32') x_np = np.random.random(size=[3, 9, 5]).astype('float32')
x = paddle.to_tensor(x_np) x = paddle.to_tensor(x_np)
...@@ -386,10 +367,6 @@ class TestStride(unittest.TestCase): ...@@ -386,10 +367,6 @@ class TestStride(unittest.TestCase):
self.assertTrue(np.allclose(out1_c.numpy(), np_out1)) self.assertTrue(np.allclose(out1_c.numpy(), np_out1))
self.assertTrue(np.allclose(out2_c.numpy(), np_out2)) self.assertTrue(np.allclose(out2_c.numpy(), np_out2))
self.assertFalse(out0_c._is_shared_buffer_with(out0))
self.assertFalse(out1_c._is_shared_buffer_with(out1))
self.assertFalse(out2_c._is_shared_buffer_with(out2))
def call_split3(self): def call_split3(self):
x_np = np.random.random(size=[9, 3, 5]).astype('float32') x_np = np.random.random(size=[9, 3, 5]).astype('float32')
x = paddle.to_tensor(x_np) x = paddle.to_tensor(x_np)
...@@ -485,10 +462,6 @@ class TestStride(unittest.TestCase): ...@@ -485,10 +462,6 @@ class TestStride(unittest.TestCase):
self.assertTrue(np.allclose(out1_c.numpy(), np_out1)) self.assertTrue(np.allclose(out1_c.numpy(), np_out1))
self.assertTrue(np.allclose(out2_c.numpy(), np_out2)) self.assertTrue(np.allclose(out2_c.numpy(), np_out2))
self.assertFalse(out0_c._is_shared_buffer_with(out0))
self.assertFalse(out1_c._is_shared_buffer_with(out1))
self.assertFalse(out2_c._is_shared_buffer_with(out2))
def call_unbind(self): def call_unbind(self):
x_np = np.random.random(size=[3, 9, 5]).astype('float32') x_np = np.random.random(size=[3, 9, 5]).astype('float32')
x = paddle.to_tensor(x_np) x = paddle.to_tensor(x_np)
...@@ -622,8 +595,6 @@ class TestStride(unittest.TestCase): ...@@ -622,8 +595,6 @@ class TestStride(unittest.TestCase):
self.assertTrue(np.allclose(out_c.numpy(), np_out)) self.assertTrue(np.allclose(out_c.numpy(), np_out))
self.assertFalse(out_c._is_shared_buffer_with(out))
def call_stride(self): def call_stride(self):
self.call_transpose() self.call_transpose()
self.call_diagonal() self.call_diagonal()
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册