From 8c43c0feaaed1c622d40900f71759de2139a751a Mon Sep 17 00:00:00 2001 From: Jiabin Yang <360788950@qq.com> Date: Fri, 29 Jul 2022 11:21:29 +0800 Subject: [PATCH] Support backward final hook (#44686) --- .../fluid/distributed/collective/reducer.cc | 2 +- .../eager/accumulation/accumulation_node.cc | 2 +- .../eager/accumulation/accumulation_node.h | 4 +- paddle/fluid/eager/api/utils/global_utils.h | 19 ++++- paddle/fluid/eager/api/utils/hook_utils.cc | 18 ++-- paddle/fluid/eager/api/utils/hook_utils.h | 7 +- paddle/fluid/eager/backward.cc | 6 ++ paddle/fluid/eager/hooks.h | 13 +-- .../accumulation_node_test.cc | 6 +- .../tests/task_tests/fwd_bwd_joint_test.cc | 12 +-- .../fluid/eager/tests/task_tests/hook_test.cc | 12 +-- .../task_tests/hook_test_intermidiate.cc | 76 ++++++++++++++--- paddle/fluid/pybind/eager_functions.cc | 15 ++++ paddle/fluid/pybind/eager_method.cc | 82 +------------------ paddle/fluid/pybind/eager_utils.cc | 50 +++++++++++ paddle/fluid/pybind/eager_utils.h | 35 ++++++++ .../unittests/test_tensor_register_hook.py | 30 +++++++ 17 files changed, 259 insertions(+), 130 deletions(-) diff --git a/paddle/fluid/distributed/collective/reducer.cc b/paddle/fluid/distributed/collective/reducer.cc index dda5f2eee6e..5f137c4d0af 100644 --- a/paddle/fluid/distributed/collective/reducer.cc +++ b/paddle/fluid/distributed/collective/reducer.cc @@ -321,7 +321,7 @@ EagerReducer::EagerReducer( const auto &accumulation_grad_node = std::dynamic_pointer_cast(grad_node); accumulation_grad_node->RegisterReduceHook( - std::make_shared(reduce_hook)); + std::make_shared(reduce_hook)); gradnode_index_map_[grad_node.get()] = global_var_index; } diff --git a/paddle/fluid/eager/accumulation/accumulation_node.cc b/paddle/fluid/eager/accumulation/accumulation_node.cc index 051ecfb70a8..0017dba7974 100644 --- a/paddle/fluid/eager/accumulation/accumulation_node.cc +++ b/paddle/fluid/eager/accumulation/accumulation_node.cc @@ -127,7 +127,7 @@ GradNodeAccumulation::operator()( } void GradNodeAccumulation::RegisterReduceHook( - std::shared_ptr&& hook) { + std::shared_ptr&& hook) { reduce_hooks_.emplace_back(std::move(hook)); } diff --git a/paddle/fluid/eager/accumulation/accumulation_node.h b/paddle/fluid/eager/accumulation/accumulation_node.h index 6374534578c..8dbc2872ca2 100644 --- a/paddle/fluid/eager/accumulation/accumulation_node.h +++ b/paddle/fluid/eager/accumulation/accumulation_node.h @@ -51,7 +51,7 @@ class GradNodeAccumulation : public GradNodeBase { /** * Register ReduceHook * **/ - void RegisterReduceHook(std::shared_ptr&& hook); + void RegisterReduceHook(std::shared_ptr&& hook); /** * Apply ReduceHook here @@ -70,7 +70,7 @@ class GradNodeAccumulation : public GradNodeBase { // TODO(Jiabin): remove this when we make our clear gradient really cleared; bool is_fake_empty_ = {false}; std::weak_ptr weak_grad_; - std::vector> reduce_hooks_; + std::vector> reduce_hooks_; std::function retain_grad_hook_; diff --git a/paddle/fluid/eager/api/utils/global_utils.h b/paddle/fluid/eager/api/utils/global_utils.h index 44ea47a2576..93149feeae3 100644 --- a/paddle/fluid/eager/api/utils/global_utils.h +++ b/paddle/fluid/eager/api/utils/global_utils.h @@ -18,11 +18,11 @@ #include #include +#include "paddle/fluid/eager/hooks.h" #include "paddle/fluid/eager/type_defs.h" #include "paddle/fluid/imperative/tracer.h" #include "paddle/phi/api/ext/op_meta_info.h" #include "paddle/utils/small_vector.h" - namespace egr { class UniqueNameGenerator { public: @@ -85,6 +85,22 @@ class Controller { GetCustomEdgesSlotMap() { return custom_edges_slot_map_; } + // For Cpp Hook + void RegisterBackwardFinalHook(const std::function& call_back) { + VLOG(6) << "RegisterBackwardFinalHook"; + final_backward_hooks_.emplace_back( + std::make_shared(std::move(call_back))); + VLOG(6) << "Size: " << final_backward_hooks_.size(); + } + // For Python hook + void RegisterBackwardFinalHook(const std::shared_ptr& call_back) { + final_backward_hooks_.emplace_back(call_back); + } + const std::vector>& FinalBackwardHooks() const { + return final_backward_hooks_; + } + + void ClearFinalBackwardHooks() { final_backward_hooks_.clear(); } private: Controller() = default; @@ -98,6 +114,7 @@ class Controller { std::unordered_map>>> custom_edges_slot_map_; + std::vector> final_backward_hooks_; DISABLE_COPY_AND_ASSIGN(Controller); }; diff --git a/paddle/fluid/eager/api/utils/hook_utils.cc b/paddle/fluid/eager/api/utils/hook_utils.cc index dacbd2ff9ad..0ac3ac4a222 100644 --- a/paddle/fluid/eager/api/utils/hook_utils.cc +++ b/paddle/fluid/eager/api/utils/hook_utils.cc @@ -25,17 +25,20 @@ namespace egr_utils_api { int64_t RegisterGradientHookForTensor( const paddle::experimental::Tensor& tensor, - std::shared_ptr&& hook) { + const std::function& hook) { // Find grad_node and out_rank from AutogradMeta std::shared_ptr grad_node = EagerUtils::grad_node(tensor); auto rank_info = EagerUtils::unsafe_autograd_meta(tensor)->OutRankInfo(); return grad_node->RegisterGradientHook( - rank_info.first, rank_info.second, std::move(hook)); + rank_info.first, + rank_info.second, + std::move(std::make_shared(hook))); } void RegisterReduceHookForTensor(const paddle::experimental::Tensor& tensor, - std::shared_ptr&& hook) { + const std::function& hook) { if (IsLeafTensor(tensor)) { VLOG(6) << "Register ReduceHook for leaf tensor"; std::shared_ptr grad_node = EagerUtils::grad_node(tensor); @@ -46,7 +49,8 @@ void RegisterReduceHookForTensor(const paddle::experimental::Tensor& tensor, "with type: GradNodeAccumulation")); auto accumulation_grad_node = std::dynamic_pointer_cast(grad_node); - accumulation_grad_node->RegisterReduceHook(std::move(hook)); + accumulation_grad_node->RegisterReduceHook( + std::move(std::make_shared(hook))); } else { PADDLE_THROW(paddle::platform::errors::Fatal( "Only can register reduce hook for leaf Tensor.")); @@ -90,10 +94,12 @@ void RetainGradForTensor(const paddle::experimental::Tensor& tensor) { }; // Append to GradientHooks - RegisterGradientHookForTensor(tensor, - std::make_shared(hook)); + RegisterGradientHookForTensor(tensor, hook); } } +void RegisterBackwardFinalHook(const std::function& hook) { + Controller::Instance().RegisterBackwardFinalHook(hook); +} } // namespace egr_utils_api } // namespace egr diff --git a/paddle/fluid/eager/api/utils/hook_utils.h b/paddle/fluid/eager/api/utils/hook_utils.h index b36ef81125a..5b3ed2633e0 100644 --- a/paddle/fluid/eager/api/utils/hook_utils.h +++ b/paddle/fluid/eager/api/utils/hook_utils.h @@ -23,11 +23,14 @@ namespace egr_utils_api { int64_t RegisterGradientHookForTensor( const paddle::experimental::Tensor& tensor, - std::shared_ptr&& hook); + const std::function& hook); void RegisterReduceHookForTensor(const paddle::experimental::Tensor& tensor, - std::shared_ptr&& hook); + const std::function& hook); void RetainGradForTensor(const paddle::experimental::Tensor& tensor); +void RegisterBackwardFinalHook(const std::function& hook); + } // namespace egr_utils_api } // namespace egr diff --git a/paddle/fluid/eager/backward.cc b/paddle/fluid/eager/backward.cc index 7c7a09db2b1..a3c3c2718d9 100644 --- a/paddle/fluid/eager/backward.cc +++ b/paddle/fluid/eager/backward.cc @@ -371,6 +371,12 @@ std::vector RunBackward( } } + VLOG(6) << "Run Backward Final hook size: " + << egr::Controller::Instance().FinalBackwardHooks().size(); + for (auto& hook : egr::Controller::Instance().FinalBackwardHooks()) { + (*hook)(); + } + egr::Controller::Instance().ClearFinalBackwardHooks(); if (!is_general_grad) return {}; return GeneralGrad::Instance().GetResults(inputs, allow_unused, create_graph); } diff --git a/paddle/fluid/eager/hooks.h b/paddle/fluid/eager/hooks.h index a98b3d9f8e4..064c96bff38 100644 --- a/paddle/fluid/eager/hooks.h +++ b/paddle/fluid/eager/hooks.h @@ -29,16 +29,16 @@ class TensorHook { const paddle::experimental::Tensor& var) = 0; }; -class TensorVoidHook { +class VoidHook { public: - virtual ~TensorVoidHook() = default; + virtual ~VoidHook() = default; virtual void operator()() = 0; }; class CppTensorHook : public TensorHook { public: - explicit CppTensorHook(std::function&& fn) + explicit CppTensorHook(const std::function& fn) : fn_(std::move(fn)) {} paddle::experimental::Tensor operator()( @@ -52,13 +52,14 @@ class CppTensorHook : public TensorHook { fn_; }; -class CppTensorVoidHook : public TensorVoidHook { +class CppVoidHook : public VoidHook { public: - explicit CppTensorVoidHook(std::function&& fn) : fn_(std::move(fn)) {} + explicit CppVoidHook(const std::function& fn) : fn_(std::move(fn)) {} void operator()() override { return fn_(); } private: std::function fn_; }; + } // namespace egr diff --git a/paddle/fluid/eager/tests/data_structure_tests/accumulation_node_test.cc b/paddle/fluid/eager/tests/data_structure_tests/accumulation_node_test.cc index c53ffe823ab..ecc0e0fe667 100644 --- a/paddle/fluid/eager/tests/data_structure_tests/accumulation_node_test.cc +++ b/paddle/fluid/eager/tests/data_structure_tests/accumulation_node_test.cc @@ -328,8 +328,7 @@ TEST(AccumulationNode, Tensor) { VLOG(6) << "Running Reduce Hook"; }; - node->RegisterReduceHook( - std::make_shared(reduce_hook_1)); + node->RegisterReduceHook(std::make_shared(reduce_hook_1)); // operator() paddle::experimental::Tensor _ret = node->operator()(et0_vec)[0][0]; @@ -354,8 +353,7 @@ TEST(AccumulationNode, Tensor) { ret_et0_ptr[0] = 100.0; // set to 100.0 VLOG(6) << "Running Reduce Hook"; }; - node->RegisterReduceHook( - std::make_shared(reduce_hook_2)); + node->RegisterReduceHook(std::make_shared(reduce_hook_2)); node->ApplyReduceHooks(); // Check ApplyReduceHooks result diff --git a/paddle/fluid/eager/tests/task_tests/fwd_bwd_joint_test.cc b/paddle/fluid/eager/tests/task_tests/fwd_bwd_joint_test.cc index c5c34860b85..570a78af8f7 100644 --- a/paddle/fluid/eager/tests/task_tests/fwd_bwd_joint_test.cc +++ b/paddle/fluid/eager/tests/task_tests/fwd_bwd_joint_test.cc @@ -256,8 +256,8 @@ TEST(FwdBwdJoint, GradientHook) { true /*bias_after_scale*/, true /*trace_backward*/); egr_utils_api::RetainGradForTensor(out0); // hook: +5 - egr_utils_api::RegisterGradientHookForTensor( - out0, std::make_shared(hook_function)); // hook: +5 + egr_utils_api::RegisterGradientHookForTensor(out0, + hook_function); // hook: +5 // Run Forward Node 1 float scale1 = 5.0; @@ -265,8 +265,8 @@ TEST(FwdBwdJoint, GradientHook) { paddle::experimental::Tensor out1 = egr::scale( out0, scale1, bias1, true /*bias_after_scale*/, true /*trace_backward*/); egr_utils_api::RetainGradForTensor(out1); // hook: +5 - egr_utils_api::RegisterGradientHookForTensor( - out1, std::make_shared(hook_function)); // hook: +5 + egr_utils_api::RegisterGradientHookForTensor(out1, + hook_function); // hook: +5 // Run Forward Node 2 float scale2 = 10.0; @@ -274,8 +274,8 @@ TEST(FwdBwdJoint, GradientHook) { paddle::experimental::Tensor out2 = egr::scale( out0, scale2, bias2, true /*bias_after_scale*/, true /*trace_backward*/); egr_utils_api::RetainGradForTensor(out2); // hook: +5 - egr_utils_api::RegisterGradientHookForTensor( - out2, std::make_shared(hook_function)); // hook: +5 + egr_utils_api::RegisterGradientHookForTensor(out2, + hook_function); // hook: +5 // 4. Run Backward std::vector outs = {out1, out2}; diff --git a/paddle/fluid/eager/tests/task_tests/hook_test.cc b/paddle/fluid/eager/tests/task_tests/hook_test.cc index 21f79b9f258..d61e7053fb5 100644 --- a/paddle/fluid/eager/tests/task_tests/hook_test.cc +++ b/paddle/fluid/eager/tests/task_tests/hook_test.cc @@ -95,8 +95,7 @@ TEST(RetainGrad, HookBeforeRetainGrad) { std::dynamic_pointer_cast( auto_grad_meta)); - egr_utils_api::RegisterGradientHookForTensor( - target_tensor, std::make_shared(hook_function)); + egr_utils_api::RegisterGradientHookForTensor(target_tensor, hook_function); egr_utils_api::RetainGradForTensor( target_tensor); // result: 1.0 + 3.0 = 4.0 egr_utils_api::RetainGradForTensor( @@ -122,8 +121,7 @@ TEST(RetainGrad, HookBeforeRetainGrad) { std::dynamic_pointer_cast( tmp_tensor0.mutable_autograd_meta())); - egr_utils_api::RegisterGradientHookForTensor( - leaf_tensor, std::make_shared(hook_function)); + egr_utils_api::RegisterGradientHookForTensor(leaf_tensor, hook_function); egr_utils_api::RetainGradForTensor( leaf_tensor); // result: 4.0*5.0 + 3.0 = 23.0 } @@ -173,8 +171,7 @@ TEST(RetainGrad, HookAfterRetainGrad) { auto_grad_meta)); egr_utils_api::RetainGradForTensor(target_tensor); // result: 1.0 - egr_utils_api::RegisterGradientHookForTensor( - target_tensor, std::make_shared(hook_function)); + egr_utils_api::RegisterGradientHookForTensor(target_tensor, hook_function); } // Retain Grad for leaf tensor1 @@ -193,8 +190,7 @@ TEST(RetainGrad, HookAfterRetainGrad) { std::dynamic_pointer_cast( tmp_tensor0.mutable_autograd_meta())); - egr_utils_api::RegisterGradientHookForTensor( - leaf_tensor, std::make_shared(hook_function)); + egr_utils_api::RegisterGradientHookForTensor(leaf_tensor, hook_function); } Backward(target_tensors, {}); diff --git a/paddle/fluid/eager/tests/task_tests/hook_test_intermidiate.cc b/paddle/fluid/eager/tests/task_tests/hook_test_intermidiate.cc index 6e32dda55a4..30d22b55620 100644 --- a/paddle/fluid/eager/tests/task_tests/hook_test_intermidiate.cc +++ b/paddle/fluid/eager/tests/task_tests/hook_test_intermidiate.cc @@ -89,12 +89,11 @@ void test_sigmoid(bool is_remove_gradient_hook) { egr_utils_api::RetainGradForTensor(tensor); VLOG(6) << "Register GradientHook for Tensor"; - int64_t hook_id = egr_utils_api::RegisterGradientHookForTensor( - tensor, std::make_shared(hook_function)); + int64_t hook_id = + egr_utils_api::RegisterGradientHookForTensor(tensor, hook_function); VLOG(6) << "Register ReduceHook for Tensor"; - egr_utils_api::RegisterReduceHookForTensor( - tensor, std::make_shared(reduce_hook)); + egr_utils_api::RegisterReduceHookForTensor(tensor, reduce_hook); VLOG(6) << "Runing Forward"; auto output_tensor = sigmoid_dygraph_function(tensor, {}); @@ -161,10 +160,9 @@ void test_elementwiseAdd(bool is_remove_gradient_hook) { }; egr_utils_api::RetainGradForTensor(Y); - int64_t hook_id = egr_utils_api::RegisterGradientHookForTensor( - Y, std::make_shared(hook_function)); - egr_utils_api::RegisterReduceHookForTensor( - Y, std::make_shared(reduce_hook)); + int64_t hook_id = + egr_utils_api::RegisterGradientHookForTensor(Y, hook_function); + egr_utils_api::RegisterReduceHookForTensor(Y, reduce_hook); auto output_tensor = elementwise_add_dygraph_function(X, Y, {}); @@ -226,10 +224,9 @@ void test_matmul(bool is_remove_gradient_hook) { }; egr_utils_api::RetainGradForTensor(Y); - int64_t hook_id = egr_utils_api::RegisterGradientHookForTensor( - Y, std::make_shared(hook_function)); - egr_utils_api::RegisterReduceHookForTensor( - Y, std::make_shared(reduce_hook)); + int64_t hook_id = + egr_utils_api::RegisterGradientHookForTensor(Y, hook_function); + egr_utils_api::RegisterReduceHookForTensor(Y, reduce_hook); auto output_tensor = matmul_v2_dygraph_function( X, Y, {{"trans_x", false}, {"trans_y", false}}); @@ -256,6 +253,59 @@ void test_matmul(bool is_remove_gradient_hook) { } } +void test_backward_final_hooks() { + // Prepare Device Contexts + VLOG(6) << "Init Env"; + eager_test::InitEnv(paddle::platform::CPUPlace()); + + VLOG(6) << "Make paddle::experimental::Tensor"; + paddle::framework::DDim ddimX = phi::make_ddim({4, 16}); + paddle::experimental::Tensor X = + egr_utils_api::CreateTensorWithValue(ddimX, + paddle::platform::CPUPlace(), + phi::DataType::FLOAT32, + phi::DataLayout::NCHW, + 3.0, + true); + paddle::framework::DDim ddimY = phi::make_ddim({16, 20}); + egr_utils_api::RetainGradForTensor(X); + + paddle::experimental::Tensor Y = + egr_utils_api::CreateTensorWithValue(ddimY, + paddle::platform::CPUPlace(), + phi::DataType::FLOAT32, + phi::DataLayout::NCHW, + 2.0, + true); + + VLOG(6) << "Make ReduceHook function"; + auto backward_final_hook = [&](void) -> void { + auto* t_ptr = + std::dynamic_pointer_cast(X.impl())->data(); + VLOG(6) << "Run Target Backward Hook"; + for (int i = 0; i < X.numel(); i++) { + t_ptr[i] = 100.0; // set to 100.0 + } + }; + VLOG(6) << "Register Backward Final Hook"; + egr_utils_api::RegisterBackwardFinalHook(backward_final_hook); + + VLOG(6) << "Runing Forward"; + auto output_tensor = matmul_v2_dygraph_function( + X, Y, {{"trans_x", false}, {"trans_y", false}}); + auto res = sigmoid_dygraph_function(output_tensor, {}); + VLOG(6) << "Finish Forward"; + + eager_test::CompareTensorWithValue(X, 3.0); + + std::vector target_tensors = {output_tensor}; + + VLOG(6) << "Runing Backward"; + Backward(target_tensors, {}); + VLOG(6) << "Finish Backward"; + eager_test::CompareTensorWithValue(X, 100.0); +} + TEST(Hook_intermidiate, Sigmoid) { // True or false represents whether to call RemoveGradientHook test_sigmoid(true); @@ -271,6 +321,8 @@ TEST(Hook_intermidiate, Matmul_v2) { test_matmul(true); test_matmul(false); } + +TEST(Hook_intermidiate, BackwardFinal) { test_backward_final_hooks(); } } // namespace egr USE_OP_ITSELF(sigmoid); diff --git a/paddle/fluid/pybind/eager_functions.cc b/paddle/fluid/pybind/eager_functions.cc index 76315ba2b38..91af1078565 100644 --- a/paddle/fluid/pybind/eager_functions.cc +++ b/paddle/fluid/pybind/eager_functions.cc @@ -907,12 +907,27 @@ static PyObject* eager_api_to_uva_tensor(PyObject* self, } #endif +static PyObject* eager_api__add_backward_final_hook(PyObject* self, + PyObject* args, + PyObject* kwargs) { + EAGER_TRY + PyObject* hook_func = PyTuple_GET_ITEM(args, 0); + egr::Controller::Instance().RegisterBackwardFinalHook( + std::make_shared(hook_func)); + RETURN_PY_NONE + EAGER_CATCH_AND_THROW_RETURN_NULL +} + PyMethodDef variable_functions[] = { // TODO(jiabin): Remove scale when we have final state tests {"scale", (PyCFunction)(void (*)(void))eager_api_scale, METH_VARARGS | METH_KEYWORDS, NULL}, + {"_add_backward_final_hook", + (PyCFunction)(void (*)(void))eager_api__add_backward_final_hook, + METH_VARARGS | METH_KEYWORDS, + NULL}, {"run_backward", (PyCFunction)(void (*)(void))eager_api_run_backward, METH_VARARGS | METH_KEYWORDS, diff --git a/paddle/fluid/pybind/eager_method.cc b/paddle/fluid/pybind/eager_method.cc index 086c15dafdf..977d2931cc4 100644 --- a/paddle/fluid/pybind/eager_method.cc +++ b/paddle/fluid/pybind/eager_method.cc @@ -57,86 +57,6 @@ typedef SSIZE_T ssize_t; namespace paddle { namespace pybind { -namespace py = ::pybind11; - -class PyTensorHook : public egr::TensorHook { - public: - explicit PyTensorHook(PyObject* func) : py_func_(func) { - Py_INCREF(py_func_); - } - - ~PyTensorHook() { - py::gil_scoped_acquire gil; - Py_DECREF(py_func_); - } - - paddle::experimental::Tensor operator()( - const paddle::experimental::Tensor& var) override { - py::gil_scoped_acquire gil; - VLOG(3) << "Call PyTensorHook for var " << var.name(); - - PyObject* res = nullptr; - try { - PyObject* p_tmp_var = ToPyObject(var); - res = PyObject_CallFunctionObjArgs(py_func_, p_tmp_var, nullptr); - Py_DECREF(p_tmp_var); - } catch (platform::EnforceNotMet& e) { - throw std::move(e); - } catch (std::exception& e) { - PADDLE_THROW(platform::errors::Unavailable( - "Hook function of Tensor raises an exception: %s.", e.what())); - } catch (...) { - PADDLE_THROW(platform::errors::Fatal( - "Hook function of Tensor raises an unknown exception.")); - } - - PADDLE_ENFORCE_NOT_NULL(res, - platform::errors::Unavailable( - "Hook function of Tensor return a nullptr.")); - if (res == Py_None) { - return var; - } - auto res_tensor = reinterpret_cast(res)->tensor; - Py_DECREF(res); - return res_tensor; - } - - private: - PyObject* py_func_; -}; - -class PyTensorVoidHook : public egr::TensorVoidHook { - public: - explicit PyTensorVoidHook(PyObject* func) : py_func_(func) { - Py_INCREF(py_func_); - } - - ~PyTensorVoidHook() { - py::gil_scoped_acquire gil; - Py_DECREF(py_func_); - } - - void operator()() override { - py::gil_scoped_acquire gil; - VLOG(3) << "Call PyTensorVoidHook"; - - try { - PyObject_CallFunctionObjArgs(py_func_, nullptr); - } catch (platform::EnforceNotMet& e) { - throw std::move(e); - } catch (std::exception& e) { - PADDLE_THROW(platform::errors::Unavailable( - "Hook function of Tensor raises an exception: %s.", e.what())); - } catch (...) { - PADDLE_THROW(platform::errors::Fatal( - "Hook function of Tensor raises an unknown exception.")); - } - } - - private: - PyObject* py_func_; -}; - extern void InitTensorWithNumpyValue(TensorObject* self, const pybind11::object& array, const paddle::platform::Place& place, @@ -1363,7 +1283,7 @@ static PyObject* tensor_register_reduce_hook(TensorObject* self, auto accumulation_grad_node = std::dynamic_pointer_cast(grad_node); accumulation_grad_node->RegisterReduceHook( - std::make_shared(hook_func)); + std::make_shared(hook_func)); RETURN_PY_NONE diff --git a/paddle/fluid/pybind/eager_utils.cc b/paddle/fluid/pybind/eager_utils.cc index 467835da120..253291256ef 100644 --- a/paddle/fluid/pybind/eager_utils.cc +++ b/paddle/fluid/pybind/eager_utils.cc @@ -18,6 +18,7 @@ limitations under the License. */ #include "paddle/fluid/eager/api/all.h" #include "paddle/fluid/eager/autograd_meta.h" +#include "paddle/fluid/eager/hooks.h" #include "paddle/fluid/framework/convert_utils.h" #include "paddle/fluid/framework/scope.h" #include "paddle/fluid/framework/scope_guard.h" @@ -1427,5 +1428,54 @@ paddle::DataType CastPyArg2DataType(PyObject* obj, framework::proto::VarType::Type type = CastPyArg2ProtoType(obj, arg_pos); return framework::TransToPhiDataType(type); } + +paddle::experimental::Tensor PyTensorHook::operator()( + const paddle::experimental::Tensor& var) { + py::gil_scoped_acquire gil; + VLOG(3) << "Call PyTensorHook for var " << var.name(); + + PyObject* res = nullptr; + try { + PyObject* p_tmp_var = ToPyObject(var); + res = PyObject_CallFunctionObjArgs(py_func_, p_tmp_var, nullptr); + Py_DECREF(p_tmp_var); + } catch (platform::EnforceNotMet& e) { + throw std::move(e); + } catch (std::exception& e) { + PADDLE_THROW(platform::errors::Unavailable( + "Hook function of Tensor raises an exception: %s.", e.what())); + } catch (...) { + PADDLE_THROW(platform::errors::Fatal( + "Hook function of Tensor raises an unknown exception.")); + } + + PADDLE_ENFORCE_NOT_NULL(res, + platform::errors::Unavailable( + "Hook function of Tensor return a nullptr.")); + if (res == Py_None) { + return var; + } + auto res_tensor = reinterpret_cast(res)->tensor; + Py_DECREF(res); + return res_tensor; +} + +void PyVoidHook::operator()() { + py::gil_scoped_acquire gil; + VLOG(3) << "Call PyVoidHook"; + + try { + PyObject_CallFunctionObjArgs(py_func_, nullptr); + } catch (platform::EnforceNotMet& e) { + throw std::move(e); + } catch (std::exception& e) { + PADDLE_THROW(platform::errors::Unavailable( + "Hook function of Tensor raises an exception: %s.", e.what())); + } catch (...) { + PADDLE_THROW(platform::errors::Fatal( + "Hook function of Tensor raises an unknown exception.")); + } +} + } // namespace pybind } // namespace paddle diff --git a/paddle/fluid/pybind/eager_utils.h b/paddle/fluid/pybind/eager_utils.h index ead55cc5e72..4fab8534b7b 100644 --- a/paddle/fluid/pybind/eager_utils.h +++ b/paddle/fluid/pybind/eager_utils.h @@ -17,6 +17,7 @@ typedef SSIZE_T ssize_t; #include +#include "paddle/fluid/eager/hooks.h" #include "paddle/fluid/framework/lod_tensor.h" #include "paddle/fluid/framework/tensor.h" #include "paddle/fluid/jit/base_function.h" @@ -36,6 +37,7 @@ class Scope; } namespace pybind { +namespace py = ::pybind11; #define RETURN_PY_NONE \ Py_INCREF(Py_None); \ return Py_None; @@ -110,6 +112,39 @@ PyObject* ToPyObject( const std::unordered_map>& value); PyObject* ToPyObject(const std::unordered_map& value); +class PyTensorHook : public egr::TensorHook { + public: + explicit PyTensorHook(PyObject* func) : py_func_(func) { + Py_INCREF(py_func_); + } + + ~PyTensorHook() { + py::gil_scoped_acquire gil; + Py_DECREF(py_func_); + } + + paddle::experimental::Tensor operator()( + const paddle::experimental::Tensor& var) override; + + private: + PyObject* py_func_; +}; + +class PyVoidHook : public egr::VoidHook { + public: + explicit PyVoidHook(PyObject* func) : py_func_(func) { Py_INCREF(py_func_); } + + ~PyVoidHook() { + py::gil_scoped_acquire gil; + Py_DECREF(py_func_); + } + + void operator()() override; + + private: + PyObject* py_func_; +}; + template struct TupleTensorResult { static void Run(const Tuple& out, PyObject* result) { diff --git a/python/paddle/fluid/tests/unittests/test_tensor_register_hook.py b/python/paddle/fluid/tests/unittests/test_tensor_register_hook.py index d8d1990a4fa..9767fb25243 100644 --- a/python/paddle/fluid/tests/unittests/test_tensor_register_hook.py +++ b/python/paddle/fluid/tests/unittests/test_tensor_register_hook.py @@ -639,5 +639,35 @@ class TestTensorRegisterBackwardHook(unittest.TestCase): self.func_register_backward_hook_for_var_without_gradient() +class TestRegsiterBackwardFinalHook(unittest.TestCase): + + def setUp(self): + self.devices = ["cpu"] + if paddle.is_compiled_with_cuda(): + self.devices.append("gpu") + + def test_register_backward_hook(self): + global HOOK_INIT_VALUE + global HOOK_IS_CALLED + for device in self.devices: + np_x = np.random.rand(4, 16).astype("float32") + np_y = np.random.rand(16, 20).astype("float32") + x = paddle.to_tensor(np_x, stop_gradient=False) + y = paddle.to_tensor(np_y, stop_gradient=False) + + core.eager._add_backward_final_hook(global_void_hook) + + out = paddle.matmul(x, y) + out = paddle.sum(out) + out.backward() + + self.assertEqual(HOOK_INIT_VALUE, 20) + self.assertTrue(HOOK_IS_CALLED) + + # reset initial value + HOOK_INIT_VALUE = 10 + HOOK_IS_CALLED = False + + if __name__ == '__main__': unittest.main() -- GitLab