From a85edddbfcf6568b64082440dbfc4e2165c245eb Mon Sep 17 00:00:00 2001 From: Leo Chen Date: Fri, 21 May 2021 13:48:16 +0800 Subject: [PATCH] paddle.to_tensor supports LoDTensor (#33027) --- .../fluid/tests/unittests/test_var_base.py | 15 +++++++++++++ python/paddle/tensor/creation.py | 22 +++++++++++++------ 2 files changed, 30 insertions(+), 7 deletions(-) diff --git a/python/paddle/fluid/tests/unittests/test_var_base.py b/python/paddle/fluid/tests/unittests/test_var_base.py index 83f02b629d7..b3671327ca2 100644 --- a/python/paddle/fluid/tests/unittests/test_var_base.py +++ b/python/paddle/fluid/tests/unittests/test_var_base.py @@ -248,6 +248,21 @@ class TestVarBase(unittest.TestCase): a = paddle.to_tensor(a, place=paddle.CUDAPinnedPlace()) self.assertEqual(a.place.__repr__(), "CUDAPinnedPlace") + def test_to_tensor_with_lodtensor(self): + if core.is_compiled_with_cuda(): + a_np = np.random.rand(1024, 1024) + with paddle.fluid.dygraph.guard(core.CPUPlace()): + lod_tensor = core.LoDTensor() + lod_tensor.set(a_np, core.CPUPlace()) + a = paddle.to_tensor(lod_tensor) + self.assertTrue(np.array_equal(a_np, a.numpy())) + + with paddle.fluid.dygraph.guard(core.CUDAPlace(0)): + lod_tensor = core.LoDTensor() + lod_tensor.set(a_np, core.CUDAPlace(0)) + a = paddle.to_tensor(lod_tensor) + self.assertTrue(np.array_equal(a_np, a.numpy())) + def test_to_variable(self): with fluid.dygraph.guard(): var = fluid.dygraph.to_variable(self.array, name="abc") diff --git a/python/paddle/tensor/creation.py b/python/paddle/tensor/creation.py index 361c0e80f90..e1012e7656a 100644 --- a/python/paddle/tensor/creation.py +++ b/python/paddle/tensor/creation.py @@ -118,6 +118,16 @@ def to_tensor(data, dtype=None, place=None, stop_gradient=True): place = _current_expected_place() if not isinstance(data, np.ndarray): + + def _handle_diff_place_dtype(data, dtype, place, stop_gradient): + data.stop_gradient = stop_gradient + if not data.place._equals(place): + data = data._copy_to(place, False) + if dtype: + if convert_dtype(dtype) != convert_dtype(data.dtype): + return data.astype(convert_dtype(dtype)) + return data + if np.isscalar(data) and not isinstance(data, str): data = np.array([data]) elif isinstance(data, (list, tuple)): @@ -128,13 +138,11 @@ def to_tensor(data, dtype=None, place=None, stop_gradient=True): "this means the input data contains nested lists with different lengths. " ) elif isinstance(data, paddle.Tensor): - data.stop_gradient = stop_gradient - if not data.place._equals(place): - data = data._copy_to(place, False) - if dtype: - if convert_dtype(dtype) != convert_dtype(data.dtype): - return data.astype(convert_dtype(dtype)) - return data + return _handle_diff_place_dtype(data, dtype, place, stop_gradient) + elif isinstance(data, (core.Tensor, core.LoDTensor)): + # convert LoDTensor to VarBase first, and then process it as input VarBase + data = paddle.Tensor(data) + return _handle_diff_place_dtype(data, dtype, place, stop_gradient) else: raise TypeError( "Can't constructs a 'paddle.Tensor' with data type {}, data type must be scalar|list|tuple|numpy.ndarray|paddle.Tensor". -- GitLab