From 93099bb8c3e112d1d102c3a4e16a97f24fc778d9 Mon Sep 17 00:00:00 2001 From: wanghuancoder Date: Thu, 1 Dec 2022 19:12:33 +0800 Subject: [PATCH] do not link python lib in tensor wrapper (#48523) * do not link python lib in tensor wrapper --- paddle/CMakeLists.txt | 3 + paddle/fluid/eager/CMakeLists.txt | 16 +-- .../eager_generated/backwards/CMakeLists.txt | 1 - .../eager_generated/forwards/CMakeLists.txt | 1 - .../generate_file_structures.py | 2 - paddle/fluid/eager/hooks.h | 56 +++++++- paddle/fluid/eager/saved_tensors_hooks.cc | 119 ----------------- paddle/fluid/eager/saved_tensors_hooks.h | 97 -------------- paddle/fluid/eager/tensor_wrapper.h | 31 +++-- paddle/fluid/pybind/CMakeLists.txt | 9 -- paddle/fluid/pybind/eager_functions.cc | 5 +- paddle/fluid/pybind/eager_py_layer.cc | 1 - paddle/fluid/pybind/eager_utils.cc | 121 ++++++++++++++++++ paddle/fluid/pybind/eager_utils.h | 43 +++++++ 14 files changed, 243 insertions(+), 262 deletions(-) delete mode 100644 paddle/fluid/eager/saved_tensors_hooks.cc delete mode 100644 paddle/fluid/eager/saved_tensors_hooks.h diff --git a/paddle/CMakeLists.txt b/paddle/CMakeLists.txt index b39f720f410..e7f788631b7 100644 --- a/paddle/CMakeLists.txt +++ b/paddle/CMakeLists.txt @@ -72,6 +72,9 @@ if(${len} GREATER_EQUAL 1) target_link_libraries(${test_name} "-Wl,-rpath,$") endif() + if(NOT ((NOT WITH_PYTHON) AND ON_INFER)) + target_link_libraries(${test_name} ${PYTHON_LIBRARIES}) + endif() if(WITH_XPU) target_link_libraries(${test_name} xpulib) endif() diff --git a/paddle/fluid/eager/CMakeLists.txt b/paddle/fluid/eager/CMakeLists.txt index d5d5f1daa1e..fd02d247788 100644 --- a/paddle/fluid/eager/CMakeLists.txt +++ b/paddle/fluid/eager/CMakeLists.txt @@ -14,8 +14,7 @@ set(eager_deps grad_node_info grad_tensor_holder accumulation_node - custom_operator_node - python) + custom_operator_node) set(fluid_deps tracer @@ -37,6 +36,7 @@ add_subdirectory(api) add_subdirectory(accumulation) add_subdirectory(custom_operator) if(NOT ((NOT WITH_PYTHON) AND ON_INFER)) + add_subdirectory(tests) add_subdirectory(pylayer) cc_library( grad_tensor_holder @@ -77,15 +77,3 @@ cc_library( scale_op autograd_meta hook_utils) - -if(NOT ((NOT WITH_PYTHON) AND ON_INFER)) - target_link_libraries(utils ${PYTHON_LIBRARIES}) -endif() - -if(NOT ((NOT WITH_PYTHON) AND ON_INFER)) - cc_library( - saved_tensors_hooks - SRCS saved_tensors_hooks.cc - DEPS hook_utils) - add_subdirectory(tests) -endif() diff --git a/paddle/fluid/eager/api/generated/eager_generated/backwards/CMakeLists.txt b/paddle/fluid/eager/api/generated/eager_generated/backwards/CMakeLists.txt index fc52d6f82a1..69bfe4d9415 100644 --- a/paddle/fluid/eager/api/generated/eager_generated/backwards/CMakeLists.txt +++ b/paddle/fluid/eager/api/generated/eager_generated/backwards/CMakeLists.txt @@ -9,5 +9,4 @@ if(NOT (NOT WITH_PYTHON AND ON_INFER)) SRCS nodes.cc ${eager_manual_nodes} DEPS ${eager_deps}) add_dependencies(final_dygraph_node eager_codegen) - target_link_libraries(final_dygraph_node ${PYTHON_LIBRARIES}) endif() diff --git a/paddle/fluid/eager/api/generated/eager_generated/forwards/CMakeLists.txt b/paddle/fluid/eager/api/generated/eager_generated/forwards/CMakeLists.txt index 97d5aed2947..c32dd2f122a 100644 --- a/paddle/fluid/eager/api/generated/eager_generated/forwards/CMakeLists.txt +++ b/paddle/fluid/eager/api/generated/eager_generated/forwards/CMakeLists.txt @@ -9,5 +9,4 @@ if(NOT (NOT WITH_PYTHON AND ON_INFER)) SRCS dygraph_functions.cc ${eager_manual_functions} DEPS ${eager_deps}) add_dependencies(final_dygraph_function eager_codegen) - target_link_libraries(final_dygraph_function ${PYTHON_LIBRARIES}) endif() diff --git a/paddle/fluid/eager/auto_code_generator/generate_file_structures.py b/paddle/fluid/eager/auto_code_generator/generate_file_structures.py index d2beeff5ac2..3de8e587eea 100644 --- a/paddle/fluid/eager/auto_code_generator/generate_file_structures.py +++ b/paddle/fluid/eager/auto_code_generator/generate_file_structures.py @@ -144,7 +144,6 @@ def GenerateFileStructureForIntermediateDygraph(eager_dir, split_count): f.write("nodes" + str(i + 1) + ".cc ") f.write("${fluid_manual_nodes} DEPS ${eager_deps} ${fluid_deps})\n") f.write("add_dependencies(dygraph_node copy_dygraph_node)\n") - f.write("target_link_libraries(dygraph_node ${PYTHON_LIBRARIES})\n") with open(forwards_level_cmakelist_path, "w") as f: f.write("add_custom_target(\n") @@ -184,7 +183,6 @@ def GenerateFileStructureForIntermediateDygraph(eager_dir, split_count): f.write( "add_dependencies(dygraph_function copy_dygraph_forward_functions)\n" ) - f.write("target_link_libraries(dygraph_function ${PYTHON_LIBRARIES})\n") with open(generated_level_cmakelist_path, "w") as f: f.write("add_subdirectory(forwards)\nadd_subdirectory(nodes)") diff --git a/paddle/fluid/eager/hooks.h b/paddle/fluid/eager/hooks.h index f501c4acc62..ff2ca4aef13 100644 --- a/paddle/fluid/eager/hooks.h +++ b/paddle/fluid/eager/hooks.h @@ -19,6 +19,7 @@ #include #include +#include "paddle/fluid/platform/enforce.h" #include "paddle/phi/api/include/tensor.h" namespace egr { @@ -62,18 +63,69 @@ class CppVoidHook : public VoidHook { std::function fn_; }; +class PyObjectHolderBase { + public: + virtual ~PyObjectHolderBase() = default; + virtual void* get() = 0; + virtual void reset(void* ptr) = 0; + virtual void inc_ref() = 0; + virtual void dec_ref() = 0; +}; + class PackHookBase { public: virtual ~PackHookBase() = default; - virtual void* operator()(const paddle::experimental::Tensor& tensor) = 0; + virtual std::shared_ptr operator()( + const paddle::experimental::Tensor& tensor) = 0; virtual void* operator()(void* py_tensor) = 0; }; class UnPackHookBase { public: virtual ~UnPackHookBase() = default; - virtual paddle::experimental::Tensor operator()(void* packed_value) = 0; + virtual paddle::experimental::Tensor operator()( + std::shared_ptr packed_value) = 0; virtual void* operator()(void* packed_value, void* other) = 0; }; +class SavedTensorsHooks { + public: + SavedTensorsHooks() = default; + + ~SavedTensorsHooks() {} + + void SetHooks(std::shared_ptr pack_hook, + std::shared_ptr unpack_hook) { + PADDLE_ENFORCE_EQ(pack_hook_ == nullptr && unpack_hook_ == nullptr, + true, + paddle::platform::errors::InvalidArgument( + "paddle.autograd.saved_tensors_hooks only one pair " + "of hooks is allowed at a time.")); + pack_hook_ = pack_hook; + unpack_hook_ = unpack_hook; + is_enable_ = true; + } + + void ResetHooks() { + pack_hook_ = nullptr; + unpack_hook_ = nullptr; + is_enable_ = false; + } + + bool IsEnable() { return is_enable_; } + + std::shared_ptr GetPackHook() { return pack_hook_; } + std::shared_ptr GetUnPackHook() { return unpack_hook_; } + + static SavedTensorsHooks& GetInstance() { + static SavedTensorsHooks instance; + return instance; + } + + private: + std::shared_ptr pack_hook_; + std::shared_ptr unpack_hook_; + bool is_enable_{false}; +}; + } // namespace egr diff --git a/paddle/fluid/eager/saved_tensors_hooks.cc b/paddle/fluid/eager/saved_tensors_hooks.cc deleted file mode 100644 index 1060e5d463d..00000000000 --- a/paddle/fluid/eager/saved_tensors_hooks.cc +++ /dev/null @@ -1,119 +0,0 @@ -// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -#include "paddle/fluid/eager/saved_tensors_hooks.h" -#include "paddle/fluid/eager/api/utils/global_utils.h" - -#if !(defined(PADDLE_NO_PYTHON) && defined(PADDLE_ON_INFERENCE)) -#include "paddle/fluid/pybind/eager.h" -#include "paddle/fluid/pybind/eager_utils.h" -#endif - -namespace egr { -#if !(defined(PADDLE_NO_PYTHON) && defined(PADDLE_ON_INFERENCE)) -PackHook::PackHook(PyObject* hook) : hook_(hook) { Py_INCREF(hook_); } - -PackHook::~PackHook() { - ::pybind11::gil_scoped_acquire gil; - Py_DECREF(hook_); -} - -void* PackHook::operator()(const paddle::experimental::Tensor& tensor) { - bool grad_tmp = egr::Controller::Instance().HasGrad(); - egr::Controller::Instance().SetHasGrad(false); - ::pybind11::gil_scoped_acquire gil; - auto args = PyTuple_New(1); - PyTuple_SET_ITEM(args, 0, paddle::pybind::ToPyObject(tensor)); - PyObject* ret = PyObject_Call(hook_, args, nullptr); - PADDLE_ENFORCE_NOT_NULL(ret, - paddle::platform::errors::External( - pybind11::detail::error_string().c_str())); - Py_XDECREF(args); - egr::Controller::Instance().SetHasGrad(grad_tmp); - return reinterpret_cast(ret); -} - -void* PackHook::operator()(void* py_tensor) { - bool grad_tmp = egr::Controller::Instance().HasGrad(); - egr::Controller::Instance().SetHasGrad(false); - ::pybind11::gil_scoped_acquire gil; - auto args = PyTuple_New(1); - Py_INCREF(reinterpret_cast(py_tensor)); - PyTuple_SET_ITEM(args, 0, reinterpret_cast(py_tensor)); - PyObject* ret = PyObject_Call(hook_, args, nullptr); - PADDLE_ENFORCE_NOT_NULL(ret, - paddle::platform::errors::External( - pybind11::detail::error_string().c_str())); - Py_XDECREF(args); - egr::Controller::Instance().SetHasGrad(grad_tmp); - return reinterpret_cast(ret); -} - -UnPackHook::UnPackHook(PyObject* hook) : hook_(hook) { Py_INCREF(hook_); } - -UnPackHook::~UnPackHook() { - ::pybind11::gil_scoped_acquire gil; - Py_DECREF(hook_); -} - -paddle::experimental::Tensor UnPackHook::operator()(void* packed_value) { - bool grad_tmp = egr::Controller::Instance().HasGrad(); - egr::Controller::Instance().SetHasGrad(false); - ::pybind11::gil_scoped_acquire gil; - auto args = PyTuple_New(1); - Py_INCREF(reinterpret_cast(packed_value)); - PyTuple_SET_ITEM(args, 0, reinterpret_cast(packed_value)); - PyObject* ret = PyObject_Call(hook_, args, nullptr); - PADDLE_ENFORCE_NOT_NULL(ret, - paddle::platform::errors::External( - pybind11::detail::error_string().c_str())); - Py_XDECREF(args); - egr::Controller::Instance().SetHasGrad(grad_tmp); - - PADDLE_ENFORCE_EQ(paddle::pybind::IsEagerTensor(ret), - true, - paddle::platform::errors::InvalidArgument( - "paddle.autograd.saved_tensors_hooks only one pair " - "of hooks is allowed at a time.")); - - auto tensor = reinterpret_cast(ret)->tensor; - Py_XDECREF(ret); - return tensor; -} - -void* UnPackHook::operator()(void* packed_value, void* other) { - bool grad_tmp = egr::Controller::Instance().HasGrad(); - egr::Controller::Instance().SetHasGrad(false); - ::pybind11::gil_scoped_acquire gil; - auto args = PyTuple_New(1); - Py_INCREF(reinterpret_cast(packed_value)); - PyTuple_SET_ITEM(args, 0, reinterpret_cast(packed_value)); - PyObject* ret = PyObject_Call(hook_, args, nullptr); - PADDLE_ENFORCE_NOT_NULL(ret, - paddle::platform::errors::External( - pybind11::detail::error_string().c_str())); - Py_XDECREF(args); - egr::Controller::Instance().SetHasGrad(grad_tmp); - - PADDLE_ENFORCE_EQ(paddle::pybind::IsEagerTensor(ret), - true, - paddle::platform::errors::InvalidArgument( - "paddle.autograd.saved_tensors_hooks only one pair " - "of hooks is allowed at a time.")); - - return reinterpret_cast(ret); -} -#endif - -} // namespace egr diff --git a/paddle/fluid/eager/saved_tensors_hooks.h b/paddle/fluid/eager/saved_tensors_hooks.h deleted file mode 100644 index 1deb30daaa8..00000000000 --- a/paddle/fluid/eager/saved_tensors_hooks.h +++ /dev/null @@ -1,97 +0,0 @@ -// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -#pragma once - -#include -#include "paddle/fluid/eager/api/utils/global_utils.h" -#include "paddle/fluid/eager/hooks.h" -#include "paddle/phi/core/enforce.h" -#include "paddle/phi/core/errors.h" - -namespace egr { -#if !(defined(PADDLE_NO_PYTHON) && defined(PADDLE_ON_INFERENCE)) -class PackHook : public PackHookBase { - public: - explicit PackHook(PyObject* hook); - - ~PackHook(); - - void* operator()(const paddle::experimental::Tensor& tensor) override; - - void* operator()(void* py_tensor) override; - - private: - PyObject* hook_; -}; - -class UnPackHook : public UnPackHookBase { - public: - explicit UnPackHook(PyObject* hook); - - ~UnPackHook(); - - paddle::experimental::Tensor operator()(void* packed_value) override; - - void* operator()(void* packed_value, void* other) override; - - private: - PyObject* hook_; -}; -#endif - -class SavedTensorsHooks { - public: - SavedTensorsHooks() = default; - - ~SavedTensorsHooks() {} - - void SetHooks(PyObject* pack_hook, PyObject* unpack_hook) { -#if !(defined(PADDLE_NO_PYTHON) && defined(PADDLE_ON_INFERENCE)) - PADDLE_ENFORCE_EQ(pack_hook_ == nullptr && unpack_hook_ == nullptr, - true, - paddle::platform::errors::InvalidArgument( - "paddle.autograd.saved_tensors_hooks only one pair " - "of hooks is allowed at a time.")); - pack_hook_ = std::make_shared(pack_hook); - unpack_hook_ = std::make_shared(unpack_hook); - is_enable_ = true; -#endif - } - - void ResetHooks() { -#if !(defined(PADDLE_NO_PYTHON) && defined(PADDLE_ON_INFERENCE)) - pack_hook_ = nullptr; - unpack_hook_ = nullptr; - is_enable_ = false; -#endif - } - - bool IsEnable() { return is_enable_; } - - std::shared_ptr GetPackHook() { return pack_hook_; } - std::shared_ptr GetUnPackHook() { return unpack_hook_; } - - static SavedTensorsHooks& GetInstance() { - static SavedTensorsHooks instance; - return instance; - } - - private: - std::shared_ptr pack_hook_; - std::shared_ptr unpack_hook_; - bool is_enable_{false}; -}; - -} // namespace egr diff --git a/paddle/fluid/eager/tensor_wrapper.h b/paddle/fluid/eager/tensor_wrapper.h index 67cd943f331..cb797c18b19 100644 --- a/paddle/fluid/eager/tensor_wrapper.h +++ b/paddle/fluid/eager/tensor_wrapper.h @@ -30,7 +30,7 @@ #include "paddle/fluid/eager/utils.h" #include "paddle/phi/api/lib/utils/allocator.h" #ifndef PADDLE_NO_PYTHON -#include "paddle/fluid/eager/saved_tensors_hooks.h" +#include "paddle/fluid/eager/hooks.h" #endif namespace egr { @@ -73,7 +73,7 @@ class TensorWrapper { } } else { #ifndef PADDLE_NO_PYTHON - if (SavedTensorsHooks::GetInstance().IsEnable() && + if (egr::SavedTensorsHooks::GetInstance().IsEnable() && tensor.is_dense_tensor() && tensor.initialized()) { phi::DenseTensor* dense_tensor = static_cast(tensor.impl().get()); @@ -81,9 +81,9 @@ class TensorWrapper { std::move(std::make_shared( std::make_shared(nullptr, 0, tensor.place()), dense_tensor->meta()))); - auto pack_hook = SavedTensorsHooks::GetInstance().GetPackHook(); - unpack_hook_ = SavedTensorsHooks::GetInstance().GetUnPackHook(); - packed_value_ = reinterpret_cast((*pack_hook)(tensor)); + auto pack_hook = egr::SavedTensorsHooks::GetInstance().GetPackHook(); + unpack_hook_ = egr::SavedTensorsHooks::GetInstance().GetUnPackHook(); + packed_value_ = (*pack_hook)(tensor); } else { #endif intermidiate_tensor_.set_impl(tensor.impl()); @@ -105,6 +105,7 @@ class TensorWrapper { weak_grad_node_ = tensor_autograd_meta->GetMutableGradNode(); } } + #ifndef PADDLE_NO_PYTHON TensorWrapper(const TensorWrapper& other) { no_need_buffer_ = other.no_need_buffer_; @@ -113,7 +114,9 @@ class TensorWrapper { inplace_version_snapshot_ = other.inplace_version_snapshot_; packed_value_ = other.packed_value_; unpack_hook_ = other.unpack_hook_; - Py_XINCREF(packed_value_); + if (packed_value_) { + packed_value_->inc_ref(); + } } TensorWrapper& operator=(const TensorWrapper& other) { @@ -123,12 +126,13 @@ class TensorWrapper { inplace_version_snapshot_ = other.inplace_version_snapshot_; packed_value_ = other.packed_value_; unpack_hook_ = other.unpack_hook_; - Py_XINCREF(packed_value_); + if (packed_value_) { + packed_value_->inc_ref(); + } return *this; } - - ~TensorWrapper() { Py_XDECREF(packed_value_); } #endif + paddle::experimental::Tensor recover() { VLOG(6) << "Recover tensor: " << intermidiate_tensor_.name() << " for wrapper"; @@ -138,8 +142,7 @@ class TensorWrapper { } #ifndef PADDLE_NO_PYTHON if (packed_value_ && unpack_hook_) { - auto tensor_unpacked = - (*unpack_hook_)(reinterpret_cast(packed_value_)); + auto tensor_unpacked = (*unpack_hook_)(packed_value_); auto src_dense_tensor = static_cast(tensor_unpacked.impl().get()); static_cast(intermidiate_tensor_.impl().get()) @@ -224,10 +227,10 @@ class TensorWrapper { std::weak_ptr weak_grad_node_; uint32_t inplace_version_snapshot_ = 0; #ifndef PADDLE_NO_PYTHON - PyObject* packed_value_{nullptr}; - std::shared_ptr unpack_hook_; + std::shared_ptr packed_value_; + std::shared_ptr unpack_hook_; #else - void* packed_value_{nullptr}; + std::shared_ptr packed_value_; std::shared_ptr unpack_hook_; #endif }; diff --git a/paddle/fluid/pybind/CMakeLists.txt b/paddle/fluid/pybind/CMakeLists.txt index dab32b5a675..973ef8a4a79 100755 --- a/paddle/fluid/pybind/CMakeLists.txt +++ b/paddle/fluid/pybind/CMakeLists.txt @@ -605,7 +605,6 @@ if(WITH_PYTHON) list(APPEND PYBIND_DEPS python) list(APPEND PYBIND_DEPS custom_operator) list(APPEND PYBIND_DEPS custom_operator_node) - list(APPEND PYBIND_DEPS saved_tensors_hooks) endif() # On Linux, cc_library(paddle SHARED ..) will generate the libpaddle.so, @@ -651,12 +650,4 @@ if(WITH_PYTHON) target_link_libraries(${SHARD_LIB_NAME} ${os_dependency_modules}) add_dependencies(${SHARD_LIB_NAME} op_function_generator_cmd) - if(APPLE) - string(REGEX REPLACE ".+/(.+)" "\\1" PYTHON_LIBRARY_NAME - ${PYTHON_LIBRARIES}) - # target_link_libraries(${SHARD_LIB_NAME} "-Wl,-rpath,${PYTHON_LIBRARY_NAME}") - else() - target_link_libraries(${SHARD_LIB_NAME} ${PYTHON_LIBRARIES}) - endif() - endif() diff --git a/paddle/fluid/pybind/eager_functions.cc b/paddle/fluid/pybind/eager_functions.cc index 9c0f189e630..2874c7b90f4 100644 --- a/paddle/fluid/pybind/eager_functions.cc +++ b/paddle/fluid/pybind/eager_functions.cc @@ -25,7 +25,6 @@ typedef SSIZE_T ssize_t; #include "paddle/fluid/eager/autograd_meta.h" #include "paddle/fluid/eager/backward.h" #include "paddle/fluid/eager/custom_operator/custom_operator_node.h" -#include "paddle/fluid/eager/saved_tensors_hooks.h" #include "paddle/fluid/eager/utils.h" #include "paddle/fluid/framework/convert_utils.h" #include "paddle/fluid/framework/custom_operator.h" @@ -715,7 +714,9 @@ static PyObject* eager_api_register_saved_tensors_hooks(PyObject* self, if (egr::Controller::Instance().HasGrad()) { auto pack_hook = PyTuple_GET_ITEM(args, 0); auto unpack_hook = PyTuple_GET_ITEM(args, 1); - egr::SavedTensorsHooks::GetInstance().SetHooks(pack_hook, unpack_hook); + egr::SavedTensorsHooks::GetInstance().SetHooks( + std::make_shared(pack_hook), + std::make_shared(unpack_hook)); } RETURN_PY_NONE EAGER_CATCH_AND_THROW_RETURN_NULL diff --git a/paddle/fluid/pybind/eager_py_layer.cc b/paddle/fluid/pybind/eager_py_layer.cc index f80a39f9f0a..8befe6318bc 100644 --- a/paddle/fluid/pybind/eager_py_layer.cc +++ b/paddle/fluid/pybind/eager_py_layer.cc @@ -20,7 +20,6 @@ limitations under the License. */ #include "paddle/fluid/eager/api/all.h" #include "paddle/fluid/eager/autograd_meta.h" #include "paddle/fluid/eager/pylayer/py_layer_node.h" -#include "paddle/fluid/eager/saved_tensors_hooks.h" #include "paddle/fluid/eager/utils.h" #include "paddle/fluid/framework/convert_utils.h" #include "paddle/fluid/memory/allocation/allocator.h" diff --git a/paddle/fluid/pybind/eager_utils.cc b/paddle/fluid/pybind/eager_utils.cc index f5f409673a1..7c9faf2fd59 100644 --- a/paddle/fluid/pybind/eager_utils.cc +++ b/paddle/fluid/pybind/eager_utils.cc @@ -1627,5 +1627,126 @@ void PyVoidHook::operator()() { } } +PyObjectHolder::PyObjectHolder(PyObject* ptr) { ptr_ = ptr; } + +PyObjectHolder::~PyObjectHolder() { + ::pybind11::gil_scoped_acquire gil; + Py_XDECREF(ptr_); +} + +void* PyObjectHolder::get() { return reinterpret_cast(ptr_); } + +void PyObjectHolder::reset(void* ptr) { + if (ptr_) { + ::pybind11::gil_scoped_acquire gil; + Py_XDECREF(ptr_); + } + ptr_ = reinterpret_cast(ptr); +} + +void PyObjectHolder::inc_ref() { + ::pybind11::gil_scoped_acquire gil; + Py_XINCREF(ptr_); +} +void PyObjectHolder::dec_ref() { + ::pybind11::gil_scoped_acquire gil; + Py_XDECREF(ptr_); +} + +PackHook::PackHook(PyObject* hook) : hook_(hook) { Py_INCREF(hook_); } + +PackHook::~PackHook() { + ::pybind11::gil_scoped_acquire gil; + Py_DECREF(hook_); +} + +std::shared_ptr PackHook::operator()( + const paddle::experimental::Tensor& tensor) { + bool grad_tmp = egr::Controller::Instance().HasGrad(); + egr::Controller::Instance().SetHasGrad(false); + ::pybind11::gil_scoped_acquire gil; + auto args = PyTuple_New(1); + PyTuple_SET_ITEM(args, 0, paddle::pybind::ToPyObject(tensor)); + PyObject* ret = PyObject_Call(hook_, args, nullptr); + PADDLE_ENFORCE_NOT_NULL(ret, + paddle::platform::errors::External( + pybind11::detail::error_string().c_str())); + Py_XDECREF(args); + egr::Controller::Instance().SetHasGrad(grad_tmp); + return std::make_shared(ret); +} + +void* PackHook::operator()(void* py_tensor) { + bool grad_tmp = egr::Controller::Instance().HasGrad(); + egr::Controller::Instance().SetHasGrad(false); + ::pybind11::gil_scoped_acquire gil; + auto args = PyTuple_New(1); + Py_INCREF(reinterpret_cast(py_tensor)); + PyTuple_SET_ITEM(args, 0, reinterpret_cast(py_tensor)); + PyObject* ret = PyObject_Call(hook_, args, nullptr); + PADDLE_ENFORCE_NOT_NULL(ret, + paddle::platform::errors::External( + pybind11::detail::error_string().c_str())); + Py_XDECREF(args); + egr::Controller::Instance().SetHasGrad(grad_tmp); + return reinterpret_cast(ret); +} + +UnPackHook::UnPackHook(PyObject* hook) : hook_(hook) { Py_INCREF(hook_); } + +UnPackHook::~UnPackHook() { + ::pybind11::gil_scoped_acquire gil; + Py_DECREF(hook_); +} + +paddle::experimental::Tensor UnPackHook::operator()( + std::shared_ptr packed_value) { + bool grad_tmp = egr::Controller::Instance().HasGrad(); + egr::Controller::Instance().SetHasGrad(false); + ::pybind11::gil_scoped_acquire gil; + auto args = PyTuple_New(1); + Py_INCREF(reinterpret_cast(packed_value->get())); + PyTuple_SET_ITEM(args, 0, reinterpret_cast(packed_value->get())); + PyObject* ret = PyObject_Call(hook_, args, nullptr); + PADDLE_ENFORCE_NOT_NULL(ret, + paddle::platform::errors::External( + pybind11::detail::error_string().c_str())); + Py_XDECREF(args); + egr::Controller::Instance().SetHasGrad(grad_tmp); + + PADDLE_ENFORCE_EQ(paddle::pybind::IsEagerTensor(ret), + true, + paddle::platform::errors::InvalidArgument( + "paddle.autograd.saved_tensors_hooks only one pair " + "of hooks is allowed at a time.")); + + auto tensor = reinterpret_cast(ret)->tensor; + Py_XDECREF(ret); + return tensor; +} + +void* UnPackHook::operator()(void* packed_value, void* other) { + bool grad_tmp = egr::Controller::Instance().HasGrad(); + egr::Controller::Instance().SetHasGrad(false); + ::pybind11::gil_scoped_acquire gil; + auto args = PyTuple_New(1); + Py_INCREF(reinterpret_cast(packed_value)); + PyTuple_SET_ITEM(args, 0, reinterpret_cast(packed_value)); + PyObject* ret = PyObject_Call(hook_, args, nullptr); + PADDLE_ENFORCE_NOT_NULL(ret, + paddle::platform::errors::External( + pybind11::detail::error_string().c_str())); + Py_XDECREF(args); + egr::Controller::Instance().SetHasGrad(grad_tmp); + + PADDLE_ENFORCE_EQ(paddle::pybind::IsEagerTensor(ret), + true, + paddle::platform::errors::InvalidArgument( + "paddle.autograd.saved_tensors_hooks only one pair " + "of hooks is allowed at a time.")); + + return reinterpret_cast(ret); +} + } // namespace pybind } // namespace paddle diff --git a/paddle/fluid/pybind/eager_utils.h b/paddle/fluid/pybind/eager_utils.h index 654a03ae880..900b2538ead 100644 --- a/paddle/fluid/pybind/eager_utils.h +++ b/paddle/fluid/pybind/eager_utils.h @@ -151,6 +151,49 @@ class PyVoidHook : public egr::VoidHook { PyObject* py_func_; }; +class PyObjectHolder : public egr::PyObjectHolderBase { + public: + PyObjectHolder() { ptr_ = nullptr; } + explicit PyObjectHolder(PyObject* ptr); + ~PyObjectHolder() override; + void* get() override; + void reset(void* ptr) override; + void inc_ref() override; + void dec_ref() override; + + private: + PyObject* ptr_{nullptr}; +}; + +class PackHook : public egr::PackHookBase { + public: + explicit PackHook(PyObject* hook); + + ~PackHook(); + + std::shared_ptr operator()( + const paddle::experimental::Tensor& tensor) override; + + void* operator()(void* py_tensor) override; + + private: + PyObject* hook_; +}; + +class UnPackHook : public egr::UnPackHookBase { + public: + explicit UnPackHook(PyObject* hook); + + ~UnPackHook(); + + paddle::experimental::Tensor operator()( + std::shared_ptr packed_value) override; + + void* operator()(void* packed_value, void* other) override; + + private: + PyObject* hook_; +}; template struct TupleTensorResult { static void Run(const Tuple& out, PyObject* result) { -- GitLab