From 4e1f7692867e7d683f96000c2ea9c2e3c05104e5 Mon Sep 17 00:00:00 2001 From: wanghuancoder Date: Tue, 19 Jul 2022 20:14:51 +0800 Subject: [PATCH] [Eager]release gil when run backward (#44433) * release gil when run backward --- paddle/fluid/eager/pylayer/py_layer_node.cc | 1 + paddle/fluid/pybind/eager_functions.cc | 57 ++++++++++++--------- paddle/fluid/pybind/eager_utils.h | 12 +++++ 3 files changed, 46 insertions(+), 24 deletions(-) diff --git a/paddle/fluid/eager/pylayer/py_layer_node.cc b/paddle/fluid/eager/pylayer/py_layer_node.cc index 0383251c9a1..11e9d93da47 100644 --- a/paddle/fluid/eager/pylayer/py_layer_node.cc +++ b/paddle/fluid/eager/pylayer/py_layer_node.cc @@ -34,6 +34,7 @@ GradNodePyLayer::operator()( kSlotSmallVectorSize>& grads, // NOLINT bool create_graph, bool is_new_grad) { + pybind11::gil_scoped_acquire gil; VLOG(3) << "Running Eager Backward Node: " << name(); paddle::small_vector, diff --git a/paddle/fluid/pybind/eager_functions.cc b/paddle/fluid/pybind/eager_functions.cc index 3fe2cb170d7..76315ba2b38 100644 --- a/paddle/fluid/pybind/eager_functions.cc +++ b/paddle/fluid/pybind/eager_functions.cc @@ -119,9 +119,12 @@ static PyObject* eager_api_run_backward(PyObject* self, EAGER_TRY auto tensors = CastPyArg2VectorOfTensor(PyTuple_GET_ITEM(args, 0), 0); auto grad_tensors = CastPyArg2VectorOfTensor(PyTuple_GET_ITEM(args, 1), 1); - egr::Backward(tensors, - grad_tensors, - CastPyArg2AttrBoolean(PyTuple_GET_ITEM(args, 2), 2)); + { + eager_gil_scoped_release guard; + egr::Backward(tensors, + grad_tensors, + CastPyArg2AttrBoolean(PyTuple_GET_ITEM(args, 2), 2)); + } RETURN_PY_NONE EAGER_CATCH_AND_THROW_RETURN_NULL } @@ -138,15 +141,18 @@ static PyObject* eager_api_run_partial_grad(PyObject* self, auto only_inputs = CastPyArg2AttrBoolean(PyTuple_GET_ITEM(args, 5), 5); auto allow_unused = CastPyArg2AttrBoolean(PyTuple_GET_ITEM(args, 6), 6); auto no_grad_vars = CastPyArg2VectorOfTensor(PyTuple_GET_ITEM(args, 7), 7); - - std::vector result = egr::Grad(tensors, - inputs, - grad_tensors, - retain_graph, - create_graph, - only_inputs, - allow_unused, - no_grad_vars); + std::vector result; + { + eager_gil_scoped_release guard; + result = egr::Grad(tensors, + inputs, + grad_tensors, + retain_graph, + create_graph, + only_inputs, + allow_unused, + no_grad_vars); + } VLOG(1) << " in eager_api_run_partial_grad, after runing egr::Grad"; return ToPyObject(result, true /* return_py_none_if_not_initialize */); EAGER_CATCH_AND_THROW_RETURN_NULL @@ -179,18 +185,21 @@ static PyObject* eager_api_read_next_tensor_list(PyObject* self, auto tensor_base_list = CastPyArg2VectorOfTensorBase(PyTuple_GET_ITEM(args, 0), 0); std::vector tensor_list; - tensor_list.reserve(tensor_base_list.size()); - auto func = [](framework::Tensor& tensor_base) { - paddle::experimental::Tensor tensor( - egr::Controller::Instance().GenerateUniqueName()); - auto autograd_meta = egr::EagerUtils::autograd_meta(&tensor); - autograd_meta->SetPersistable(false); - autograd_meta->SetStopGradient(true); - tensor.set_impl(std::make_shared(tensor_base)); - return tensor; - }; - for (auto& tensor_base : tensor_base_list) { - tensor_list.emplace_back(func(tensor_base)); + { + eager_gil_scoped_release guard; + tensor_list.reserve(tensor_base_list.size()); + auto func = [](framework::Tensor& tensor_base) { + paddle::experimental::Tensor tensor( + egr::Controller::Instance().GenerateUniqueName()); + auto autograd_meta = egr::EagerUtils::autograd_meta(&tensor); + autograd_meta->SetPersistable(false); + autograd_meta->SetStopGradient(true); + tensor.set_impl(std::make_shared(tensor_base)); + return tensor; + }; + for (auto& tensor_base : tensor_base_list) { + tensor_list.emplace_back(func(tensor_base)); + } } return ToPyObject(tensor_list); EAGER_CATCH_AND_THROW_RETURN_NULL diff --git a/paddle/fluid/pybind/eager_utils.h b/paddle/fluid/pybind/eager_utils.h index b97dcb9cddb..eb1bbdb3034 100644 --- a/paddle/fluid/pybind/eager_utils.h +++ b/paddle/fluid/pybind/eager_utils.h @@ -253,5 +253,17 @@ std::vector GetScopePtrListFromArgs( ssize_t arg_idx, bool dispensable); +class eager_gil_scoped_release { + public: + eager_gil_scoped_release() { tstate = PyEval_SaveThread(); } + ~eager_gil_scoped_release() { + if (!tstate) return; + PyEval_RestoreThread(tstate); + } + + private: + PyThreadState* tstate{nullptr}; +}; + } // namespace pybind } // namespace paddle -- GitLab