未验证 提交 59fec5d6 编写于 作者: S Siming Dai 提交者: GitHub

[cherry-pick 2.4] Fix to_dlpack (#50138) (#50250)

* Fix to_dlpack (#50138)

* fix to_dlpack for loop

* fix reference count

* fix conflicts
上级 b50f04ab
...@@ -134,7 +134,59 @@ struct DLDeviceVisitor ...@@ -134,7 +134,59 @@ struct DLDeviceVisitor
}; };
} // namespace internal } // namespace internal
DLPackTensor::DLPackTensor(const Tensor &tensor, LaneType lanes) { struct PaddleDLMTensor {
phi::DenseTensor handle;
DLManagedTensor tensor;
};
void deleter(DLManagedTensor *arg) {
delete[] arg->dl_tensor.shape;
delete[] arg->dl_tensor.strides;
delete static_cast<PaddleDLMTensor *>(arg->manager_ctx);
}
DLManagedTensor *toDLPack(const phi::DenseTensor &src) {
PaddleDLMTensor *pdDLMTensor(new PaddleDLMTensor);
pdDLMTensor->handle = const_cast<phi::DenseTensor &>(src);
pdDLMTensor->tensor.manager_ctx = pdDLMTensor;
pdDLMTensor->tensor.deleter = &deleter;
pdDLMTensor->tensor.dl_tensor.data = const_cast<void *>(src.data());
// init ndim
using DimType = decltype(pdDLMTensor->tensor.dl_tensor.ndim); // int
pdDLMTensor->tensor.dl_tensor.ndim = static_cast<DimType>(src.dims().size());
DimType ndim = pdDLMTensor->tensor.dl_tensor.ndim;
// init shape
auto shape = new int64_t[ndim];
for (DimType i = 0; i < ndim; ++i) {
shape[i] = src.dims()[i];
}
pdDLMTensor->tensor.dl_tensor.shape = shape;
// init stride
auto strides = new int64_t[ndim];
for (DimType i = 0; i < ndim; ++i) {
strides[i] = 1;
}
for (DimType i = ndim - 2; i >= 0; --i) {
strides[i] = shape[i + 1] * strides[i + 1];
}
pdDLMTensor->tensor.dl_tensor.strides = strides;
// init device, DLDevice type with device_type and device_id
auto place = src.place();
pdDLMTensor->tensor.dl_tensor.device =
paddle::platform::VisitPlace(place, internal::DLDeviceVisitor());
pdDLMTensor->tensor.dl_tensor.dtype = internal::GetDLDataTypeFromTypeIndex(
framework::TransToProtoVarType(src.dtype()));
pdDLMTensor->tensor.dl_tensor.byte_offset = 0;
return &(pdDLMTensor->tensor);
}
DLPackTensor::DLPackTensor(const phi::DenseTensor &tensor, LaneType lanes) {
// init data, data buffer // init data, data buffer
t_.data = const_cast<void *>(tensor.data()); t_.data = const_cast<void *>(tensor.data());
......
...@@ -28,7 +28,7 @@ class DLPackTensor { ...@@ -28,7 +28,7 @@ class DLPackTensor {
std::remove_reference<decltype(::DLTensor::shape[0])>::type; // int64_t std::remove_reference<decltype(::DLTensor::shape[0])>::type; // int64_t
// lanes is only used in CPU to enable vectorization // lanes is only used in CPU to enable vectorization
explicit DLPackTensor(const Tensor& tensor, LaneType lanes = 1); explicit DLPackTensor(const phi::DenseTensor& tensor, LaneType lanes = 1);
inline operator const ::DLTensor&() const { return t_; } inline operator const ::DLTensor&() const { return t_; }
...@@ -44,5 +44,7 @@ class DLPackTensor { ...@@ -44,5 +44,7 @@ class DLPackTensor {
ShapeType shape_[DDim::kMaxRank]; ShapeType shape_[DDim::kMaxRank];
}; };
DLManagedTensor* toDLPack(const phi::DenseTensor& src);
} // namespace framework } // namespace framework
} // namespace paddle } // namespace paddle
...@@ -472,23 +472,16 @@ void BindTensor(pybind11::module &m) { // NOLINT ...@@ -472,23 +472,16 @@ void BindTensor(pybind11::module &m) { // NOLINT
print(t.shape()) # [5, 30] print(t.shape()) # [5, 30]
)DOC") )DOC")
.def("_to_dlpack", .def("_to_dlpack",
[](framework::Tensor &self) { [](phi::DenseTensor &self) {
DLPackTensor dlpack_tensor(self, 1); DLManagedTensor *dmt = framework::toDLPack(self);
DLManagedTensor *dmt = dlpack_tensor.ToDLManagedTensor(); auto capsule = pybind11::capsule(
auto capsule = py::capsule(
static_cast<void *>(dmt), "dltensor", [](PyObject *ptr) { static_cast<void *>(dmt), "dltensor", [](PyObject *ptr) {
if (ptr) { if (!PyCapsule_IsValid(ptr, "dltensor")) {
auto dltensor = new DLManagedTensor;
try {
dltensor = reinterpret_cast<DLManagedTensor *>(
PyCapsule_GetPointer(ptr, "used_dltensor"));
return; return;
} catch (...) {
dltensor = reinterpret_cast<DLManagedTensor *>(
PyCapsule_GetPointer(ptr, "dltensor"));
}
dltensor->deleter(dltensor);
} }
DLManagedTensor *dmt = static_cast<DLManagedTensor *>(
PyCapsule_GetPointer(ptr, "dltensor"));
dmt->deleter(dmt);
}); });
return capsule; return capsule;
}) })
......
...@@ -22,7 +22,6 @@ from paddle.fluid.framework import _test_eager_guard, in_dygraph_mode ...@@ -22,7 +22,6 @@ from paddle.fluid.framework import _test_eager_guard, in_dygraph_mode
class TestDLPack(unittest.TestCase): class TestDLPack(unittest.TestCase):
def func_test_dlpack_dygraph(self): def func_test_dlpack_dygraph(self):
paddle.disable_static() paddle.disable_static()
tensor = paddle.to_tensor(np.array([1, 2, 3, 4]).astype('int')) tensor = paddle.to_tensor(np.array([1, 2, 3, 4]).astype('int'))
...@@ -30,11 +29,13 @@ class TestDLPack(unittest.TestCase): ...@@ -30,11 +29,13 @@ class TestDLPack(unittest.TestCase):
out_from_dlpack = paddle.utils.dlpack.from_dlpack(dlpack) out_from_dlpack = paddle.utils.dlpack.from_dlpack(dlpack)
if paddle.fluid.framework.in_dygraph_mode(): if paddle.fluid.framework.in_dygraph_mode():
self.assertTrue( self.assertTrue(
isinstance(out_from_dlpack, paddle.fluid.core.eager.Tensor)) isinstance(out_from_dlpack, paddle.fluid.core.eager.Tensor)
)
else: else:
self.assertTrue(isinstance(out_from_dlpack, paddle.Tensor)) self.assertTrue(isinstance(out_from_dlpack, paddle.Tensor))
np.testing.assert_array_equal(np.array(out_from_dlpack), np.testing.assert_array_equal(
np.array([1, 2, 3, 4]).astype('int')) np.array(out_from_dlpack), np.array([1, 2, 3, 4]).astype('int')
)
def test_dlpack_dygraph(self): def test_dlpack_dygraph(self):
with _test_eager_guard(): with _test_eager_guard():
...@@ -58,26 +59,32 @@ class TestDLPack(unittest.TestCase): ...@@ -58,26 +59,32 @@ class TestDLPack(unittest.TestCase):
def test_dlpack_static(self): def test_dlpack_static(self):
paddle.enable_static() paddle.enable_static()
tensor = fluid.create_lod_tensor( tensor = fluid.create_lod_tensor(
np.array([[1], [2], [3], [4]]).astype('int'), [[1, 3]], np.array([[1], [2], [3], [4]]).astype('int'),
fluid.CPUPlace()) [[1, 3]],
fluid.CPUPlace(),
)
dlpack = paddle.utils.dlpack.to_dlpack(tensor) dlpack = paddle.utils.dlpack.to_dlpack(tensor)
out_from_dlpack = paddle.utils.dlpack.from_dlpack(dlpack) out_from_dlpack = paddle.utils.dlpack.from_dlpack(dlpack)
self.assertTrue(isinstance(out_from_dlpack, fluid.core.Tensor)) self.assertTrue(isinstance(out_from_dlpack, fluid.core.Tensor))
np.testing.assert_array_equal( np.testing.assert_array_equal(
np.array(out_from_dlpack), np.array(out_from_dlpack),
np.array([[1], [2], [3], [4]]).astype('int')) np.array([[1], [2], [3], [4]]).astype('int'),
)
# when build with cuda # when build with cuda
if core.is_compiled_with_cuda(): if core.is_compiled_with_cuda():
gtensor = fluid.create_lod_tensor( gtensor = fluid.create_lod_tensor(
np.array([[1], [2], [3], [4]]).astype('int'), [[1, 3]], np.array([[1], [2], [3], [4]]).astype('int'),
fluid.CUDAPlace(0)) [[1, 3]],
fluid.CUDAPlace(0),
)
gdlpack = paddle.utils.dlpack.to_dlpack(gtensor) gdlpack = paddle.utils.dlpack.to_dlpack(gtensor)
gout_from_dlpack = paddle.utils.dlpack.from_dlpack(gdlpack) gout_from_dlpack = paddle.utils.dlpack.from_dlpack(gdlpack)
self.assertTrue(isinstance(gout_from_dlpack, fluid.core.Tensor)) self.assertTrue(isinstance(gout_from_dlpack, fluid.core.Tensor))
np.testing.assert_array_equal( np.testing.assert_array_equal(
np.array(gout_from_dlpack), np.array(gout_from_dlpack),
np.array([[1], [2], [3], [4]]).astype('int')) np.array([[1], [2], [3], [4]]).astype('int'),
)
def func_test_dlpack_dtype_conversion(self): def func_test_dlpack_dtype_conversion(self):
paddle.disable_static() paddle.disable_static()
...@@ -104,7 +111,8 @@ class TestDLPack(unittest.TestCase): ...@@ -104,7 +111,8 @@ class TestDLPack(unittest.TestCase):
for dtype in complex_dtypes: for dtype in complex_dtypes:
x = paddle.to_tensor( x = paddle.to_tensor(
[[1 + 6j, 2 + 5j, 3 + 4j], [4 + 3j, 5 + 2j, 6 + 1j]], [[1 + 6j, 2 + 5j, 3 + 4j], [4 + 3j, 5 + 2j, 6 + 1j]],
dtype=dtype) dtype=dtype,
)
dlpack = paddle.utils.dlpack.to_dlpack(x) dlpack = paddle.utils.dlpack.to_dlpack(x)
o = paddle.utils.dlpack.from_dlpack(dlpack) o = paddle.utils.dlpack.from_dlpack(dlpack)
self.assertEqual(x.dtype, o.dtype) self.assertEqual(x.dtype, o.dtype)
...@@ -115,12 +123,18 @@ class TestDLPack(unittest.TestCase): ...@@ -115,12 +123,18 @@ class TestDLPack(unittest.TestCase):
self.func_test_dlpack_dtype_conversion() self.func_test_dlpack_dtype_conversion()
self.func_test_dlpack_dtype_conversion() self.func_test_dlpack_dtype_conversion()
def test_to_dlpack_for_loop(self):
# See Paddle issue 50120
for i in range(10):
x = paddle.rand([3, 5])
dlpack = paddle.utils.dlpack.to_dlpack(x)
class TestRaiseError(unittest.TestCase):
class TestRaiseError(unittest.TestCase):
def func_test_from_dlpack_raise_type_error(self): def func_test_from_dlpack_raise_type_error(self):
self.assertRaises(TypeError, paddle.utils.dlpack.from_dlpack, self.assertRaises(
np.zeros(5)) TypeError, paddle.utils.dlpack.from_dlpack, np.zeros(5)
)
def test_from_dlpack_raise_type_error(self): def test_from_dlpack_raise_type_error(self):
with _test_eager_guard(): with _test_eager_guard():
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册