未验证 提交 551e9140 编写于 作者: W Weilong Wu 提交者: GitHub

[Eager] Fix numpy interface for constructing empty tensor (#41904) (#41954)

* [Eager] Fix numpy interface for constructing empty tensor

* Fix CI, construct empty tensor

* Modify empty tensor's shape from [] to [0]

* Add more test for constructing empty tensor
上级 5e8d7804
...@@ -62,7 +62,7 @@ void EmptyTensorInitializer(TensorObject* self, const std::string& name, ...@@ -62,7 +62,7 @@ void EmptyTensorInitializer(TensorObject* self, const std::string& name,
bool persistable = false, int stop_gradient = -1, bool persistable = false, int stop_gradient = -1,
framework::proto::VarType::Type dtype = framework::proto::VarType::Type dtype =
paddle::framework::proto::VarType::FP32, paddle::framework::proto::VarType::FP32,
const std::vector<int>& dims = {}, const std::vector<int>& dims = {0},
framework::proto::VarType::Type var_type = framework::proto::VarType::Type var_type =
paddle::framework::proto::VarType::LOD_TENSOR) { paddle::framework::proto::VarType::LOD_TENSOR) {
auto ddims = phi::make_ddim(dims); auto ddims = phi::make_ddim(dims);
...@@ -75,7 +75,7 @@ void EmptyTensorInitializer(TensorObject* self, const std::string& name, ...@@ -75,7 +75,7 @@ void EmptyTensorInitializer(TensorObject* self, const std::string& name,
if (var_type == paddle::framework::proto::VarType::LOD_TENSOR) { if (var_type == paddle::framework::proto::VarType::LOD_TENSOR) {
// TODO(jiabin): Maybe support LOD later // TODO(jiabin): Maybe support LOD later
std::shared_ptr<phi::DenseTensor> dense_tensor = nullptr; std::shared_ptr<phi::DenseTensor> dense_tensor = nullptr;
if (dims.empty()) { if (dims.size() == 1 && dims[0] == 0) {
std::shared_ptr<phi::Allocation> allocation_ptr = nullptr; std::shared_ptr<phi::Allocation> allocation_ptr = nullptr;
dense_tensor = std::make_shared<phi::DenseTensor>( dense_tensor = std::make_shared<phi::DenseTensor>(
allocation_ptr, allocation_ptr,
......
...@@ -194,6 +194,17 @@ static PyObject* tensor_method_numpy(TensorObject* self, PyObject* args, ...@@ -194,6 +194,17 @@ static PyObject* tensor_method_numpy(TensorObject* self, PyObject* args,
nullptr); nullptr);
if (!self->tensor.impl()->initialized()) { if (!self->tensor.impl()->initialized()) {
if (tensor_dims.size() == 0) {
py_dims[0] = 0;
py_strides[0] = 0;
PyObject* array = api.PyArray_NewFromDescr_(
api.PyArray_Type_, api.PyArray_DescrFromType_(numpy_dtype), 1,
py_dims, py_strides, nullptr,
pybind11::detail::npy_api::NPY_ARRAY_ALIGNED_ |
pybind11::detail::npy_api::NPY_ARRAY_WRITEABLE_,
nullptr);
return array;
}
return array; return array;
} }
......
...@@ -114,9 +114,6 @@ phi::DDim Tensor::dims() const { return impl_->dims(); } ...@@ -114,9 +114,6 @@ phi::DDim Tensor::dims() const { return impl_->dims(); }
std::vector<int64_t> Tensor::shape() const { std::vector<int64_t> Tensor::shape() const {
auto dims = impl_->dims(); auto dims = impl_->dims();
if (dims.size() == 1 && dims.at(0) == 0) {
return {};
}
return phi::vectorize<int64_t>(dims); return phi::vectorize<int64_t>(dims);
} }
......
...@@ -114,7 +114,7 @@ class EagerVariablePropertiesAndMethodsTestCase(unittest.TestCase): ...@@ -114,7 +114,7 @@ class EagerVariablePropertiesAndMethodsTestCase(unittest.TestCase):
egr_tensor = core.eager.Tensor() egr_tensor = core.eager.Tensor()
self.assertEqual(egr_tensor.persistable, False) self.assertEqual(egr_tensor.persistable, False)
self.assertTrue("generated" in egr_tensor.name) self.assertTrue("generated" in egr_tensor.name)
self.assertEqual(egr_tensor.shape, []) self.assertEqual(egr_tensor.shape, [0])
self.assertEqual(egr_tensor.dtype, core.VarDesc.VarType.FP32) self.assertEqual(egr_tensor.dtype, core.VarDesc.VarType.FP32)
self.assertEqual(egr_tensor.stop_gradient, True) self.assertEqual(egr_tensor.stop_gradient, True)
......
...@@ -214,6 +214,13 @@ class TestVarBase(unittest.TestCase): ...@@ -214,6 +214,13 @@ class TestVarBase(unittest.TestCase):
self.assertEqual(x.item(), 1 + 1j) self.assertEqual(x.item(), 1 + 1j)
self.assertTrue(isinstance(x.item(), complex)) self.assertTrue(isinstance(x.item(), complex))
# empty tensor
x = paddle.to_tensor([])
self.assertEqual(x.shape, [0])
expected_result = np.array([], dtype='float32')
self.assertEqual(x.numpy().shape, expected_result.shape)
self.assertTrue(np.array_equal(x.numpy(), expected_result))
numpy_array = np.random.randn(3, 4) numpy_array = np.random.randn(3, 4)
# covert core.LoDTensor to paddle.Tensor # covert core.LoDTensor to paddle.Tensor
lod_tensor = paddle.fluid.core.LoDTensor() lod_tensor = paddle.fluid.core.LoDTensor()
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册