diff --git a/paddle/fluid/pybind/eager_properties.cc b/paddle/fluid/pybind/eager_properties.cc index 42d53ad7bee0197b63e89ebceaf3c19a02e1da21..2a7692ee99bb35fc01448856e487d3ba723e8d62 100644 --- a/paddle/fluid/pybind/eager_properties.cc +++ b/paddle/fluid/pybind/eager_properties.cc @@ -317,17 +317,16 @@ PyObject* tensor_properties_get_grad_fn(TensorObject* self, void* closure) { if (meta) { // Get the GradNode from meta - auto grad_node = meta->GradNode(); // Convert GradNode to a Python object - // The conversion will depend on the structure of GradNode. - - if (!grad_node) { + auto grad_node_ptr = meta->GetMutableGradNode(); + if (!grad_node_ptr) { Py_INCREF(Py_None); return Py_None; } - PyObject* py_grad_node = ToPyObject(grad_node); + PyObject* py_grad_node = ToPyObject(grad_node_ptr); return py_grad_node; + } else { // If meta does not exist, return an appropriate Python object (e.g., None // or a special value). diff --git a/paddle/fluid/pybind/eager_utils.cc b/paddle/fluid/pybind/eager_utils.cc index 8dfc7cfc8e426edc39925b6a19d6af7bc12d167a..ee270042f4176e08c0d9c70a66fbcd05b8845eaf 100644 --- a/paddle/fluid/pybind/eager_utils.cc +++ b/paddle/fluid/pybind/eager_utils.cc @@ -1006,10 +1006,9 @@ paddle::optional GetOptionalTensorFromArgs( } } -PyObject* ToPyObject(egr::GradNodeBase* grad_node) { +PyObject* ToPyObject(std::shared_ptr grad_node) { py::object py_obj = py::cast(grad_node, py::return_value_policy::reference); - py::handle py_handle = py::handle(py_obj); - PyObject* py_grad_node = py_handle.ptr(); + PyObject* py_grad_node = py_obj.release().ptr(); Py_INCREF(py_grad_node); return py_grad_node; } diff --git a/paddle/fluid/pybind/eager_utils.h b/paddle/fluid/pybind/eager_utils.h index 1fb53a3b9f7a6c688bdce76401fad4962223f6a5..f50ec9395b2f1f2b0fcbdbceee5b722915eabc0b 100644 --- a/paddle/fluid/pybind/eager_utils.h +++ b/paddle/fluid/pybind/eager_utils.h @@ -126,7 +126,7 @@ PyObject* ToPyObject( const std::unordered_map>& value); PyObject* ToPyObject(const paddle::framework::Vocab& value); -PyObject* ToPyObject(egr::GradNodeBase* grad_node); +PyObject* ToPyObject(std::shared_ptr grad_node); class PyTensorHook : public egr::TensorHook { public: diff --git a/paddle/fluid/pybind/pybind.cc b/paddle/fluid/pybind/pybind.cc index d55cab98b1eba60b1fe8f3740332d6e5fe36ec95..504e1adf22569ae84a599227b6c287e13325f8be 100644 --- a/paddle/fluid/pybind/pybind.cc +++ b/paddle/fluid/pybind/pybind.cc @@ -778,12 +778,24 @@ PYBIND11_MODULE(libpaddle, m) { } }); - py::class_(m, "GradNodeBase") - .def("name", &egr::GradNodeBase::name) - .def_property_readonly("next_functions", - &egr::GradNodeBase::NextFunctions) - .def("input_meta", &egr::GradNodeBase::InputMeta) - .def("output_meta", &egr::GradNodeBase::OutputMeta); + py::class_>( + m, "GradNodeBase") + .def("name", + [](const std::shared_ptr &self) { + return self->name(); + }) + .def_property_readonly( + "next_functions", + [](const std::shared_ptr &self) { + return self->NextFunctions(); + }) + .def("input_meta", + [](const std::shared_ptr &self) { + return self->InputMeta(); + }) + .def("output_meta", [](const std::shared_ptr &self) { + return self->OutputMeta(); + }); #if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP) m.def("cudnn_version", &platform::DnnVersion); diff --git a/test/legacy_test/test_grad_fn_and_next_functions.py b/test/legacy_test/test_grad_fn_and_next_functions.py index 5464775001253a0c6c95ef3c272e5a6a12dbcd4e..531cdfa98a070da86b8346c0d96fcba5a1d5f2ff 100644 --- a/test/legacy_test/test_grad_fn_and_next_functions.py +++ b/test/legacy_test/test_grad_fn_and_next_functions.py @@ -83,6 +83,11 @@ class TestAnonmousSurvey(unittest.TestCase): grad_fn_json (dict): grad_node_json of node """ self.assertEqual(grad_fn.name(), grad_fn_json["func_name"]) + # Recursively test other nodes + if hasattr(grad_fn, 'next_functions') and grad_fn.next_functions[0]: + next_funcs_json = grad_fn_json["next_funcs"] + for u in grad_fn.next_functions: + self.check_func(u, next_funcs_json[u.name()]) if __name__ == "__main__":