未验证 提交 8ccc61f3 编写于 作者: Q Qiao Longfei 提交者: GitHub

support empty tensor (#9338)

* support empty tensor
上级 30f1bd6a
...@@ -117,9 +117,9 @@ inline void* Tensor::mutable_data(platform::Place place, std::type_index type) { ...@@ -117,9 +117,9 @@ inline void* Tensor::mutable_data(platform::Place place, std::type_index type) {
if (holder_ != nullptr) { if (holder_ != nullptr) {
holder_->set_type(type); holder_->set_type(type);
} }
PADDLE_ENFORCE_GT( PADDLE_ENFORCE_GE(numel(), 0,
numel(), 0, "When calling this method, the Tensor's numel must be "
"When calling this method, the Tensor's numel must be larger than zero. " "equal or larger than zero. "
"Please check Tensor::Resize has been called first."); "Please check Tensor::Resize has been called first.");
int64_t size = numel() * SizeOfType(type); int64_t size = numel() * SizeOfType(type);
/* some versions of boost::variant don't have operator!= */ /* some versions of boost::variant don't have operator!= */
......
...@@ -59,7 +59,7 @@ TEST(BuddyAllocator, CPUMultAlloc) { ...@@ -59,7 +59,7 @@ TEST(BuddyAllocator, CPUMultAlloc) {
EXPECT_EQ(total_size, 0UL); EXPECT_EQ(total_size, 0UL);
for (auto size : for (auto size :
{128, 256, 1024, 4096, 16384, 65536, 262144, 1048576, 4194304}) { {0, 128, 256, 1024, 4096, 16384, 65536, 262144, 1048576, 4194304}) {
ps[paddle::memory::Alloc(cpu, size)] = size; ps[paddle::memory::Alloc(cpu, size)] = size;
// Buddy Allocator doesn't manage too large memory chunk // Buddy Allocator doesn't manage too large memory chunk
...@@ -117,7 +117,7 @@ TEST(BuddyAllocator, GPUMultAlloc) { ...@@ -117,7 +117,7 @@ TEST(BuddyAllocator, GPUMultAlloc) {
EXPECT_EQ(total_size, 0UL); EXPECT_EQ(total_size, 0UL);
for (auto size : for (auto size :
{128, 256, 1024, 4096, 16384, 65536, 262144, 1048576, 4194304}) { {0, 128, 256, 1024, 4096, 16384, 65536, 262144, 1048576, 4194304}) {
ps[paddle::memory::Alloc(gpu, size)] = size; ps[paddle::memory::Alloc(gpu, size)] = size;
// Buddy Allocator doesn't manage too large memory chunk // Buddy Allocator doesn't manage too large memory chunk
......
...@@ -126,7 +126,6 @@ class TestTensor(unittest.TestCase): ...@@ -126,7 +126,6 @@ class TestTensor(unittest.TestCase):
def test_lod_tensor_gpu_init(self): def test_lod_tensor_gpu_init(self):
if not core.is_compiled_with_cuda(): if not core.is_compiled_with_cuda():
return return
scope = core.Scope()
place = core.CUDAPlace(0) place = core.CUDAPlace(0)
lod_py = [[0, 2, 5], [0, 2, 4, 5]] lod_py = [[0, 2, 5], [0, 2, 4, 5]]
lod_tensor = core.LoDTensor() lod_tensor = core.LoDTensor()
...@@ -144,6 +143,25 @@ class TestTensor(unittest.TestCase): ...@@ -144,6 +143,25 @@ class TestTensor(unittest.TestCase):
self.assertAlmostEqual(2.0, lod_v[0, 0, 0, 1]) self.assertAlmostEqual(2.0, lod_v[0, 0, 0, 1])
self.assertListEqual(lod_py, lod_tensor.lod()) self.assertListEqual(lod_py, lod_tensor.lod())
def test_empty_tensor(self):
place = core.CPUPlace()
scope = core.Scope()
var = scope.var("test_tensor")
tensor = var.get_tensor()
tensor.set_dims([0, 1])
tensor.alloc_float(place)
tensor_array = numpy.array(tensor)
self.assertEqual((0, 1), tensor_array.shape)
if core.is_compiled_with_cuda():
gpu_place = core.CUDAPlace(0)
tensor.alloc_float(gpu_place)
tensor_array = numpy.array(tensor)
self.assertEqual((0, 1), tensor_array.shape)
if __name__ == '__main__': if __name__ == '__main__':
unittest.main() unittest.main()
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册