From 0cd577cfc3496a1f4e2b50895c739d654cbc8850 Mon Sep 17 00:00:00 2001 From: ronnywang <524019753@qq.com> Date: Fri, 8 Apr 2022 10:04:39 +0800 Subject: [PATCH] pybind support CustomPlace (#41136) --- paddle/fluid/pybind/imperative.cc | 6 +++++ paddle/fluid/pybind/pybind.cc | 37 +++++++++++++++++++++++++++++++ 2 files changed, 43 insertions(+) diff --git a/paddle/fluid/pybind/imperative.cc b/paddle/fluid/pybind/imperative.cc index 7df6d8f7f7..e09c205db1 100644 --- a/paddle/fluid/pybind/imperative.cc +++ b/paddle/fluid/pybind/imperative.cc @@ -2182,6 +2182,7 @@ void BindImperative(py::module *m_ptr) { m.def("varbase_copy", &VarBaseCopy); m.def("varbase_copy", &VarBaseCopy); m.def("varbase_copy", &VarBaseCopy); + m.def("varbase_copy", &VarBaseCopy); m.def("varbase_copy", &VarBaseCopy); m.def( @@ -2341,6 +2342,11 @@ void BindImperative(py::module *m_ptr) { const py::args args, const py::kwargs kwargs) { return imperative::PyLayerApply(place, cls, args, kwargs); }); + m.def("pylayer_apply", + [](const platform::CustomPlace &place, const py::object &cls, + const py::args args, const py::kwargs kwargs) { + return imperative::PyLayerApply(place, cls, args, kwargs); + }); #if defined(PADDLE_WITH_CUDA) m.def("to_uva_tensor", diff --git a/paddle/fluid/pybind/pybind.cc b/paddle/fluid/pybind/pybind.cc index c9e304e696..396c6c5e42 100644 --- a/paddle/fluid/pybind/pybind.cc +++ b/paddle/fluid/pybind/pybind.cc @@ -845,6 +845,10 @@ PYBIND11_MODULE(core_noavx, m) { [](framework::Tensor &self, const std::string &layout) { self.set_layout(StringToDataLayout(layout)); }) + .def("_alloc_float", + [](framework::Tensor &self, paddle::platform::CustomPlace &place) { + self.mutable_data(place); + }) .def("_alloc_float", [](framework::Tensor &self, paddle::platform::CUDAPlace &place) { self.mutable_data(place); @@ -873,6 +877,10 @@ PYBIND11_MODULE(core_noavx, m) { [](framework::Tensor &self, paddle::platform::CPUPlace &place) { self.mutable_data(place); }) + .def("_alloc_int", + [](framework::Tensor &self, paddle::platform::CustomPlace &place) { + self.mutable_data(place); + }) .def("_alloc_int", [](framework::Tensor &self, paddle::platform::XPUPlace &place) { self.mutable_data(place); @@ -901,6 +909,12 @@ PYBIND11_MODULE(core_noavx, m) { return reinterpret_cast( self.mutable_data(place, framework::TransToPhiDataType(type))); }) + .def("_mutable_data", + [](framework::Tensor &self, paddle::platform::CustomPlace &place, + paddle::framework::proto::VarType::Type type) { + return reinterpret_cast( + self.mutable_data(place, framework::TransToPhiDataType(type))); + }) .def("_mutable_data", [](framework::Tensor &self, paddle::platform::XPUPlace &place, paddle::framework::proto::VarType::Type type) { @@ -934,6 +948,8 @@ PYBIND11_MODULE(core_noavx, m) { }) .def("_copy_from", &TensorCopyFrom, py::arg("tensor"), py::arg("place"), py::arg("batch_size") = -1) + .def("_copy_from", &TensorCopyFrom, + py::arg("tensor"), py::arg("place"), py::arg("batch_size") = -1) .def("_copy_from", &TensorCopyFrom, py::arg("tensor"), py::arg("place"), py::arg("batch_size") = -1) .def("_copy_from", &TensorCopyFrom, @@ -948,6 +964,8 @@ PYBIND11_MODULE(core_noavx, m) { py::arg("tensor"), py::arg("place"), py::arg("batch_size") = -1) .def("set", SetTensorFromPyArray, py::arg("array"), py::arg("place"), py::arg("zero_copy") = false) + .def("set", SetTensorFromPyArray, + py::arg("array"), py::arg("place"), py::arg("zero_copy") = false) .def("set", SetTensorFromPyArray, py::arg("array"), py::arg("place"), py::arg("zero_copy") = false) .def("set", SetTensorFromPyArray, @@ -1985,6 +2003,19 @@ All parameter, weight, gradient are variables in Paddle. "Please recompile or reinstall Paddle with NPU support.")); #else return new paddle::platform::NPUDeviceContext(place); +#endif + }) + .def_static("create", + [](paddle::platform::CustomPlace& place) + -> paddle::platform::DeviceContext* { +#ifndef PADDLE_WITH_CUSTOM_DEVICE + PADDLE_THROW( + platform::errors::PermissionDenied( + "Cannot use CustomPlace in CPU/GPU/XPU version, " + "Please recompile or reinstall Paddle with " + "CustomDevice support.")); +#else + return new paddle::platform::CustomDeviceContext(place); #endif }) .def_static("create", @@ -2722,6 +2753,12 @@ All parameter, weight, gradient are variables in Paddle. pybind11::gil_scoped_release release; self.Run(scope, place); }) + .def("run", + [](OperatorBase &self, const Scope &scope, + const platform::CustomPlace &place) { + pybind11::gil_scoped_release release; + self.Run(scope, place); + }) .def("type", [](const OperatorBase &op) -> std::string { return op.Type(); }) .def("outputs", -- GitLab