未验证 提交 35ce2bd9 编写于 作者: S Siming Dai 提交者: GitHub

Fix to_dlpack (#50138)

* fix to_dlpack for loop

* fix reference count
上级 244e7546
...@@ -134,6 +134,58 @@ struct DLDeviceVisitor ...@@ -134,6 +134,58 @@ struct DLDeviceVisitor
}; };
} // namespace internal } // namespace internal
struct PaddleDLMTensor {
phi::DenseTensor handle;
DLManagedTensor tensor;
};
void deleter(DLManagedTensor *arg) {
delete[] arg->dl_tensor.shape;
delete[] arg->dl_tensor.strides;
delete static_cast<PaddleDLMTensor *>(arg->manager_ctx);
}
DLManagedTensor *toDLPack(const phi::DenseTensor &src) {
PaddleDLMTensor *pdDLMTensor(new PaddleDLMTensor);
pdDLMTensor->handle = const_cast<phi::DenseTensor &>(src);
pdDLMTensor->tensor.manager_ctx = pdDLMTensor;
pdDLMTensor->tensor.deleter = &deleter;
pdDLMTensor->tensor.dl_tensor.data = const_cast<void *>(src.data());
// init ndim
using DimType = decltype(pdDLMTensor->tensor.dl_tensor.ndim); // int
pdDLMTensor->tensor.dl_tensor.ndim = static_cast<DimType>(src.dims().size());
DimType ndim = pdDLMTensor->tensor.dl_tensor.ndim;
// init shape
auto shape = new int64_t[ndim];
for (DimType i = 0; i < ndim; ++i) {
shape[i] = src.dims()[i];
}
pdDLMTensor->tensor.dl_tensor.shape = shape;
// init stride
auto strides = new int64_t[ndim];
for (DimType i = 0; i < ndim; ++i) {
strides[i] = 1;
}
for (DimType i = ndim - 2; i >= 0; --i) {
strides[i] = shape[i + 1] * strides[i + 1];
}
pdDLMTensor->tensor.dl_tensor.strides = strides;
// init device, DLDevice type with device_type and device_id
auto place = src.place();
pdDLMTensor->tensor.dl_tensor.device =
paddle::platform::VisitPlace(place, internal::DLDeviceVisitor());
pdDLMTensor->tensor.dl_tensor.dtype = internal::GetDLDataTypeFromTypeIndex(
framework::TransToProtoVarType(src.dtype()));
pdDLMTensor->tensor.dl_tensor.byte_offset = 0;
return &(pdDLMTensor->tensor);
}
DLPackTensor::DLPackTensor(const phi::DenseTensor &tensor, LaneType lanes) { DLPackTensor::DLPackTensor(const phi::DenseTensor &tensor, LaneType lanes) {
// init data, data buffer // init data, data buffer
t_.data = const_cast<void *>(tensor.data()); t_.data = const_cast<void *>(tensor.data());
......
...@@ -44,5 +44,7 @@ class DLPackTensor { ...@@ -44,5 +44,7 @@ class DLPackTensor {
ShapeType shape_[DDim::kMaxRank]; ShapeType shape_[DDim::kMaxRank];
}; };
DLManagedTensor* toDLPack(const phi::DenseTensor& src);
} // namespace framework } // namespace framework
} // namespace paddle } // namespace paddle
...@@ -473,22 +473,15 @@ void BindTensor(pybind11::module &m) { // NOLINT ...@@ -473,22 +473,15 @@ void BindTensor(pybind11::module &m) { // NOLINT
)DOC") )DOC")
.def("_to_dlpack", .def("_to_dlpack",
[](phi::DenseTensor &self) { [](phi::DenseTensor &self) {
DLPackTensor dlpack_tensor(self, 1); DLManagedTensor *dmt = framework::toDLPack(self);
DLManagedTensor *dmt = dlpack_tensor.ToDLManagedTensor(); auto capsule = pybind11::capsule(
auto capsule = py::capsule(
static_cast<void *>(dmt), "dltensor", [](PyObject *ptr) { static_cast<void *>(dmt), "dltensor", [](PyObject *ptr) {
if (ptr) { if (!PyCapsule_IsValid(ptr, "dltensor")) {
auto dltensor = new DLManagedTensor; return;
try {
dltensor = reinterpret_cast<DLManagedTensor *>(
PyCapsule_GetPointer(ptr, "used_dltensor"));
return;
} catch (...) {
dltensor = reinterpret_cast<DLManagedTensor *>(
PyCapsule_GetPointer(ptr, "dltensor"));
}
dltensor->deleter(dltensor);
} }
DLManagedTensor *dmt = static_cast<DLManagedTensor *>(
PyCapsule_GetPointer(ptr, "dltensor"));
dmt->deleter(dmt);
}); });
return capsule; return capsule;
}) })
......
...@@ -116,6 +116,12 @@ class TestDLPack(unittest.TestCase): ...@@ -116,6 +116,12 @@ class TestDLPack(unittest.TestCase):
dlpack = paddle.utils.dlpack.to_dlpack(a) dlpack = paddle.utils.dlpack.to_dlpack(a)
b = paddle.utils.dlpack.from_dlpack(dlpack) b = paddle.utils.dlpack.from_dlpack(dlpack)
def test_to_dlpack_for_loop(self):
# See Paddle issue 50120
for i in range(10):
x = paddle.rand([3, 5])
dlpack = paddle.utils.dlpack.to_dlpack(x)
class TestRaiseError(unittest.TestCase): class TestRaiseError(unittest.TestCase):
def test_from_dlpack_raise_type_error(self): def test_from_dlpack_raise_type_error(self):
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册