未验证 提交 93099bb8 编写于 作者: W wanghuancoder 提交者: GitHub

do not link python lib in tensor wrapper (#48523)

* do not link python lib in tensor wrapper
上级 771811dc
...@@ -72,6 +72,9 @@ if(${len} GREATER_EQUAL 1) ...@@ -72,6 +72,9 @@ if(${len} GREATER_EQUAL 1)
target_link_libraries(${test_name} target_link_libraries(${test_name}
"-Wl,-rpath,$<TARGET_FILE_DIR:${paddle_lib}>") "-Wl,-rpath,$<TARGET_FILE_DIR:${paddle_lib}>")
endif() endif()
if(NOT ((NOT WITH_PYTHON) AND ON_INFER))
target_link_libraries(${test_name} ${PYTHON_LIBRARIES})
endif()
if(WITH_XPU) if(WITH_XPU)
target_link_libraries(${test_name} xpulib) target_link_libraries(${test_name} xpulib)
endif() endif()
......
...@@ -14,8 +14,7 @@ set(eager_deps ...@@ -14,8 +14,7 @@ set(eager_deps
grad_node_info grad_node_info
grad_tensor_holder grad_tensor_holder
accumulation_node accumulation_node
custom_operator_node custom_operator_node)
python)
set(fluid_deps set(fluid_deps
tracer tracer
...@@ -37,6 +36,7 @@ add_subdirectory(api) ...@@ -37,6 +36,7 @@ add_subdirectory(api)
add_subdirectory(accumulation) add_subdirectory(accumulation)
add_subdirectory(custom_operator) add_subdirectory(custom_operator)
if(NOT ((NOT WITH_PYTHON) AND ON_INFER)) if(NOT ((NOT WITH_PYTHON) AND ON_INFER))
add_subdirectory(tests)
add_subdirectory(pylayer) add_subdirectory(pylayer)
cc_library( cc_library(
grad_tensor_holder grad_tensor_holder
...@@ -77,15 +77,3 @@ cc_library( ...@@ -77,15 +77,3 @@ cc_library(
scale_op scale_op
autograd_meta autograd_meta
hook_utils) hook_utils)
if(NOT ((NOT WITH_PYTHON) AND ON_INFER))
target_link_libraries(utils ${PYTHON_LIBRARIES})
endif()
if(NOT ((NOT WITH_PYTHON) AND ON_INFER))
cc_library(
saved_tensors_hooks
SRCS saved_tensors_hooks.cc
DEPS hook_utils)
add_subdirectory(tests)
endif()
...@@ -9,5 +9,4 @@ if(NOT (NOT WITH_PYTHON AND ON_INFER)) ...@@ -9,5 +9,4 @@ if(NOT (NOT WITH_PYTHON AND ON_INFER))
SRCS nodes.cc ${eager_manual_nodes} SRCS nodes.cc ${eager_manual_nodes}
DEPS ${eager_deps}) DEPS ${eager_deps})
add_dependencies(final_dygraph_node eager_codegen) add_dependencies(final_dygraph_node eager_codegen)
target_link_libraries(final_dygraph_node ${PYTHON_LIBRARIES})
endif() endif()
...@@ -9,5 +9,4 @@ if(NOT (NOT WITH_PYTHON AND ON_INFER)) ...@@ -9,5 +9,4 @@ if(NOT (NOT WITH_PYTHON AND ON_INFER))
SRCS dygraph_functions.cc ${eager_manual_functions} SRCS dygraph_functions.cc ${eager_manual_functions}
DEPS ${eager_deps}) DEPS ${eager_deps})
add_dependencies(final_dygraph_function eager_codegen) add_dependencies(final_dygraph_function eager_codegen)
target_link_libraries(final_dygraph_function ${PYTHON_LIBRARIES})
endif() endif()
...@@ -144,7 +144,6 @@ def GenerateFileStructureForIntermediateDygraph(eager_dir, split_count): ...@@ -144,7 +144,6 @@ def GenerateFileStructureForIntermediateDygraph(eager_dir, split_count):
f.write("nodes" + str(i + 1) + ".cc ") f.write("nodes" + str(i + 1) + ".cc ")
f.write("${fluid_manual_nodes} DEPS ${eager_deps} ${fluid_deps})\n") f.write("${fluid_manual_nodes} DEPS ${eager_deps} ${fluid_deps})\n")
f.write("add_dependencies(dygraph_node copy_dygraph_node)\n") f.write("add_dependencies(dygraph_node copy_dygraph_node)\n")
f.write("target_link_libraries(dygraph_node ${PYTHON_LIBRARIES})\n")
with open(forwards_level_cmakelist_path, "w") as f: with open(forwards_level_cmakelist_path, "w") as f:
f.write("add_custom_target(\n") f.write("add_custom_target(\n")
...@@ -184,7 +183,6 @@ def GenerateFileStructureForIntermediateDygraph(eager_dir, split_count): ...@@ -184,7 +183,6 @@ def GenerateFileStructureForIntermediateDygraph(eager_dir, split_count):
f.write( f.write(
"add_dependencies(dygraph_function copy_dygraph_forward_functions)\n" "add_dependencies(dygraph_function copy_dygraph_forward_functions)\n"
) )
f.write("target_link_libraries(dygraph_function ${PYTHON_LIBRARIES})\n")
with open(generated_level_cmakelist_path, "w") as f: with open(generated_level_cmakelist_path, "w") as f:
f.write("add_subdirectory(forwards)\nadd_subdirectory(nodes)") f.write("add_subdirectory(forwards)\nadd_subdirectory(nodes)")
......
...@@ -19,6 +19,7 @@ ...@@ -19,6 +19,7 @@
#include <utility> #include <utility>
#include <vector> #include <vector>
#include "paddle/fluid/platform/enforce.h"
#include "paddle/phi/api/include/tensor.h" #include "paddle/phi/api/include/tensor.h"
namespace egr { namespace egr {
...@@ -62,18 +63,69 @@ class CppVoidHook : public VoidHook { ...@@ -62,18 +63,69 @@ class CppVoidHook : public VoidHook {
std::function<void()> fn_; std::function<void()> fn_;
}; };
class PyObjectHolderBase {
public:
virtual ~PyObjectHolderBase() = default;
virtual void* get() = 0;
virtual void reset(void* ptr) = 0;
virtual void inc_ref() = 0;
virtual void dec_ref() = 0;
};
class PackHookBase { class PackHookBase {
public: public:
virtual ~PackHookBase() = default; virtual ~PackHookBase() = default;
virtual void* operator()(const paddle::experimental::Tensor& tensor) = 0; virtual std::shared_ptr<PyObjectHolderBase> operator()(
const paddle::experimental::Tensor& tensor) = 0;
virtual void* operator()(void* py_tensor) = 0; virtual void* operator()(void* py_tensor) = 0;
}; };
class UnPackHookBase { class UnPackHookBase {
public: public:
virtual ~UnPackHookBase() = default; virtual ~UnPackHookBase() = default;
virtual paddle::experimental::Tensor operator()(void* packed_value) = 0; virtual paddle::experimental::Tensor operator()(
std::shared_ptr<PyObjectHolderBase> packed_value) = 0;
virtual void* operator()(void* packed_value, void* other) = 0; virtual void* operator()(void* packed_value, void* other) = 0;
}; };
class SavedTensorsHooks {
public:
SavedTensorsHooks() = default;
~SavedTensorsHooks() {}
void SetHooks(std::shared_ptr<PackHookBase> pack_hook,
std::shared_ptr<UnPackHookBase> unpack_hook) {
PADDLE_ENFORCE_EQ(pack_hook_ == nullptr && unpack_hook_ == nullptr,
true,
paddle::platform::errors::InvalidArgument(
"paddle.autograd.saved_tensors_hooks only one pair "
"of hooks is allowed at a time."));
pack_hook_ = pack_hook;
unpack_hook_ = unpack_hook;
is_enable_ = true;
}
void ResetHooks() {
pack_hook_ = nullptr;
unpack_hook_ = nullptr;
is_enable_ = false;
}
bool IsEnable() { return is_enable_; }
std::shared_ptr<PackHookBase> GetPackHook() { return pack_hook_; }
std::shared_ptr<UnPackHookBase> GetUnPackHook() { return unpack_hook_; }
static SavedTensorsHooks& GetInstance() {
static SavedTensorsHooks instance;
return instance;
}
private:
std::shared_ptr<PackHookBase> pack_hook_;
std::shared_ptr<UnPackHookBase> unpack_hook_;
bool is_enable_{false};
};
} // namespace egr } // namespace egr
// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "paddle/fluid/eager/saved_tensors_hooks.h"
#include "paddle/fluid/eager/api/utils/global_utils.h"
#if !(defined(PADDLE_NO_PYTHON) && defined(PADDLE_ON_INFERENCE))
#include "paddle/fluid/pybind/eager.h"
#include "paddle/fluid/pybind/eager_utils.h"
#endif
namespace egr {
#if !(defined(PADDLE_NO_PYTHON) && defined(PADDLE_ON_INFERENCE))
PackHook::PackHook(PyObject* hook) : hook_(hook) { Py_INCREF(hook_); }
PackHook::~PackHook() {
::pybind11::gil_scoped_acquire gil;
Py_DECREF(hook_);
}
void* PackHook::operator()(const paddle::experimental::Tensor& tensor) {
bool grad_tmp = egr::Controller::Instance().HasGrad();
egr::Controller::Instance().SetHasGrad(false);
::pybind11::gil_scoped_acquire gil;
auto args = PyTuple_New(1);
PyTuple_SET_ITEM(args, 0, paddle::pybind::ToPyObject(tensor));
PyObject* ret = PyObject_Call(hook_, args, nullptr);
PADDLE_ENFORCE_NOT_NULL(ret,
paddle::platform::errors::External(
pybind11::detail::error_string().c_str()));
Py_XDECREF(args);
egr::Controller::Instance().SetHasGrad(grad_tmp);
return reinterpret_cast<void*>(ret);
}
void* PackHook::operator()(void* py_tensor) {
bool grad_tmp = egr::Controller::Instance().HasGrad();
egr::Controller::Instance().SetHasGrad(false);
::pybind11::gil_scoped_acquire gil;
auto args = PyTuple_New(1);
Py_INCREF(reinterpret_cast<PyObject*>(py_tensor));
PyTuple_SET_ITEM(args, 0, reinterpret_cast<PyObject*>(py_tensor));
PyObject* ret = PyObject_Call(hook_, args, nullptr);
PADDLE_ENFORCE_NOT_NULL(ret,
paddle::platform::errors::External(
pybind11::detail::error_string().c_str()));
Py_XDECREF(args);
egr::Controller::Instance().SetHasGrad(grad_tmp);
return reinterpret_cast<void*>(ret);
}
UnPackHook::UnPackHook(PyObject* hook) : hook_(hook) { Py_INCREF(hook_); }
UnPackHook::~UnPackHook() {
::pybind11::gil_scoped_acquire gil;
Py_DECREF(hook_);
}
paddle::experimental::Tensor UnPackHook::operator()(void* packed_value) {
bool grad_tmp = egr::Controller::Instance().HasGrad();
egr::Controller::Instance().SetHasGrad(false);
::pybind11::gil_scoped_acquire gil;
auto args = PyTuple_New(1);
Py_INCREF(reinterpret_cast<PyObject*>(packed_value));
PyTuple_SET_ITEM(args, 0, reinterpret_cast<PyObject*>(packed_value));
PyObject* ret = PyObject_Call(hook_, args, nullptr);
PADDLE_ENFORCE_NOT_NULL(ret,
paddle::platform::errors::External(
pybind11::detail::error_string().c_str()));
Py_XDECREF(args);
egr::Controller::Instance().SetHasGrad(grad_tmp);
PADDLE_ENFORCE_EQ(paddle::pybind::IsEagerTensor(ret),
true,
paddle::platform::errors::InvalidArgument(
"paddle.autograd.saved_tensors_hooks only one pair "
"of hooks is allowed at a time."));
auto tensor = reinterpret_cast<paddle::pybind::TensorObject*>(ret)->tensor;
Py_XDECREF(ret);
return tensor;
}
void* UnPackHook::operator()(void* packed_value, void* other) {
bool grad_tmp = egr::Controller::Instance().HasGrad();
egr::Controller::Instance().SetHasGrad(false);
::pybind11::gil_scoped_acquire gil;
auto args = PyTuple_New(1);
Py_INCREF(reinterpret_cast<PyObject*>(packed_value));
PyTuple_SET_ITEM(args, 0, reinterpret_cast<PyObject*>(packed_value));
PyObject* ret = PyObject_Call(hook_, args, nullptr);
PADDLE_ENFORCE_NOT_NULL(ret,
paddle::platform::errors::External(
pybind11::detail::error_string().c_str()));
Py_XDECREF(args);
egr::Controller::Instance().SetHasGrad(grad_tmp);
PADDLE_ENFORCE_EQ(paddle::pybind::IsEagerTensor(ret),
true,
paddle::platform::errors::InvalidArgument(
"paddle.autograd.saved_tensors_hooks only one pair "
"of hooks is allowed at a time."));
return reinterpret_cast<void*>(ret);
}
#endif
} // namespace egr
// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#pragma once
#include <Python.h>
#include "paddle/fluid/eager/api/utils/global_utils.h"
#include "paddle/fluid/eager/hooks.h"
#include "paddle/phi/core/enforce.h"
#include "paddle/phi/core/errors.h"
namespace egr {
#if !(defined(PADDLE_NO_PYTHON) && defined(PADDLE_ON_INFERENCE))
class PackHook : public PackHookBase {
public:
explicit PackHook(PyObject* hook);
~PackHook();
void* operator()(const paddle::experimental::Tensor& tensor) override;
void* operator()(void* py_tensor) override;
private:
PyObject* hook_;
};
class UnPackHook : public UnPackHookBase {
public:
explicit UnPackHook(PyObject* hook);
~UnPackHook();
paddle::experimental::Tensor operator()(void* packed_value) override;
void* operator()(void* packed_value, void* other) override;
private:
PyObject* hook_;
};
#endif
class SavedTensorsHooks {
public:
SavedTensorsHooks() = default;
~SavedTensorsHooks() {}
void SetHooks(PyObject* pack_hook, PyObject* unpack_hook) {
#if !(defined(PADDLE_NO_PYTHON) && defined(PADDLE_ON_INFERENCE))
PADDLE_ENFORCE_EQ(pack_hook_ == nullptr && unpack_hook_ == nullptr,
true,
paddle::platform::errors::InvalidArgument(
"paddle.autograd.saved_tensors_hooks only one pair "
"of hooks is allowed at a time."));
pack_hook_ = std::make_shared<PackHook>(pack_hook);
unpack_hook_ = std::make_shared<UnPackHook>(unpack_hook);
is_enable_ = true;
#endif
}
void ResetHooks() {
#if !(defined(PADDLE_NO_PYTHON) && defined(PADDLE_ON_INFERENCE))
pack_hook_ = nullptr;
unpack_hook_ = nullptr;
is_enable_ = false;
#endif
}
bool IsEnable() { return is_enable_; }
std::shared_ptr<PackHookBase> GetPackHook() { return pack_hook_; }
std::shared_ptr<UnPackHookBase> GetUnPackHook() { return unpack_hook_; }
static SavedTensorsHooks& GetInstance() {
static SavedTensorsHooks instance;
return instance;
}
private:
std::shared_ptr<PackHookBase> pack_hook_;
std::shared_ptr<UnPackHookBase> unpack_hook_;
bool is_enable_{false};
};
} // namespace egr
...@@ -30,7 +30,7 @@ ...@@ -30,7 +30,7 @@
#include "paddle/fluid/eager/utils.h" #include "paddle/fluid/eager/utils.h"
#include "paddle/phi/api/lib/utils/allocator.h" #include "paddle/phi/api/lib/utils/allocator.h"
#ifndef PADDLE_NO_PYTHON #ifndef PADDLE_NO_PYTHON
#include "paddle/fluid/eager/saved_tensors_hooks.h" #include "paddle/fluid/eager/hooks.h"
#endif #endif
namespace egr { namespace egr {
...@@ -73,7 +73,7 @@ class TensorWrapper { ...@@ -73,7 +73,7 @@ class TensorWrapper {
} }
} else { } else {
#ifndef PADDLE_NO_PYTHON #ifndef PADDLE_NO_PYTHON
if (SavedTensorsHooks::GetInstance().IsEnable() && if (egr::SavedTensorsHooks::GetInstance().IsEnable() &&
tensor.is_dense_tensor() && tensor.initialized()) { tensor.is_dense_tensor() && tensor.initialized()) {
phi::DenseTensor* dense_tensor = phi::DenseTensor* dense_tensor =
static_cast<phi::DenseTensor*>(tensor.impl().get()); static_cast<phi::DenseTensor*>(tensor.impl().get());
...@@ -81,9 +81,9 @@ class TensorWrapper { ...@@ -81,9 +81,9 @@ class TensorWrapper {
std::move(std::make_shared<phi::DenseTensor>( std::move(std::make_shared<phi::DenseTensor>(
std::make_shared<phi::Allocation>(nullptr, 0, tensor.place()), std::make_shared<phi::Allocation>(nullptr, 0, tensor.place()),
dense_tensor->meta()))); dense_tensor->meta())));
auto pack_hook = SavedTensorsHooks::GetInstance().GetPackHook(); auto pack_hook = egr::SavedTensorsHooks::GetInstance().GetPackHook();
unpack_hook_ = SavedTensorsHooks::GetInstance().GetUnPackHook(); unpack_hook_ = egr::SavedTensorsHooks::GetInstance().GetUnPackHook();
packed_value_ = reinterpret_cast<PyObject*>((*pack_hook)(tensor)); packed_value_ = (*pack_hook)(tensor);
} else { } else {
#endif #endif
intermidiate_tensor_.set_impl(tensor.impl()); intermidiate_tensor_.set_impl(tensor.impl());
...@@ -105,6 +105,7 @@ class TensorWrapper { ...@@ -105,6 +105,7 @@ class TensorWrapper {
weak_grad_node_ = tensor_autograd_meta->GetMutableGradNode(); weak_grad_node_ = tensor_autograd_meta->GetMutableGradNode();
} }
} }
#ifndef PADDLE_NO_PYTHON #ifndef PADDLE_NO_PYTHON
TensorWrapper(const TensorWrapper& other) { TensorWrapper(const TensorWrapper& other) {
no_need_buffer_ = other.no_need_buffer_; no_need_buffer_ = other.no_need_buffer_;
...@@ -113,7 +114,9 @@ class TensorWrapper { ...@@ -113,7 +114,9 @@ class TensorWrapper {
inplace_version_snapshot_ = other.inplace_version_snapshot_; inplace_version_snapshot_ = other.inplace_version_snapshot_;
packed_value_ = other.packed_value_; packed_value_ = other.packed_value_;
unpack_hook_ = other.unpack_hook_; unpack_hook_ = other.unpack_hook_;
Py_XINCREF(packed_value_); if (packed_value_) {
packed_value_->inc_ref();
}
} }
TensorWrapper& operator=(const TensorWrapper& other) { TensorWrapper& operator=(const TensorWrapper& other) {
...@@ -123,12 +126,13 @@ class TensorWrapper { ...@@ -123,12 +126,13 @@ class TensorWrapper {
inplace_version_snapshot_ = other.inplace_version_snapshot_; inplace_version_snapshot_ = other.inplace_version_snapshot_;
packed_value_ = other.packed_value_; packed_value_ = other.packed_value_;
unpack_hook_ = other.unpack_hook_; unpack_hook_ = other.unpack_hook_;
Py_XINCREF(packed_value_); if (packed_value_) {
packed_value_->inc_ref();
}
return *this; return *this;
} }
~TensorWrapper() { Py_XDECREF(packed_value_); }
#endif #endif
paddle::experimental::Tensor recover() { paddle::experimental::Tensor recover() {
VLOG(6) << "Recover tensor: " << intermidiate_tensor_.name() VLOG(6) << "Recover tensor: " << intermidiate_tensor_.name()
<< " for wrapper"; << " for wrapper";
...@@ -138,8 +142,7 @@ class TensorWrapper { ...@@ -138,8 +142,7 @@ class TensorWrapper {
} }
#ifndef PADDLE_NO_PYTHON #ifndef PADDLE_NO_PYTHON
if (packed_value_ && unpack_hook_) { if (packed_value_ && unpack_hook_) {
auto tensor_unpacked = auto tensor_unpacked = (*unpack_hook_)(packed_value_);
(*unpack_hook_)(reinterpret_cast<void*>(packed_value_));
auto src_dense_tensor = auto src_dense_tensor =
static_cast<phi::DenseTensor*>(tensor_unpacked.impl().get()); static_cast<phi::DenseTensor*>(tensor_unpacked.impl().get());
static_cast<phi::DenseTensor*>(intermidiate_tensor_.impl().get()) static_cast<phi::DenseTensor*>(intermidiate_tensor_.impl().get())
...@@ -224,10 +227,10 @@ class TensorWrapper { ...@@ -224,10 +227,10 @@ class TensorWrapper {
std::weak_ptr<egr::GradNodeBase> weak_grad_node_; std::weak_ptr<egr::GradNodeBase> weak_grad_node_;
uint32_t inplace_version_snapshot_ = 0; uint32_t inplace_version_snapshot_ = 0;
#ifndef PADDLE_NO_PYTHON #ifndef PADDLE_NO_PYTHON
PyObject* packed_value_{nullptr}; std::shared_ptr<egr::PyObjectHolderBase> packed_value_;
std::shared_ptr<UnPackHookBase> unpack_hook_; std::shared_ptr<egr::UnPackHookBase> unpack_hook_;
#else #else
void* packed_value_{nullptr}; std::shared_ptr<void> packed_value_;
std::shared_ptr<void> unpack_hook_; std::shared_ptr<void> unpack_hook_;
#endif #endif
}; };
......
...@@ -605,7 +605,6 @@ if(WITH_PYTHON) ...@@ -605,7 +605,6 @@ if(WITH_PYTHON)
list(APPEND PYBIND_DEPS python) list(APPEND PYBIND_DEPS python)
list(APPEND PYBIND_DEPS custom_operator) list(APPEND PYBIND_DEPS custom_operator)
list(APPEND PYBIND_DEPS custom_operator_node) list(APPEND PYBIND_DEPS custom_operator_node)
list(APPEND PYBIND_DEPS saved_tensors_hooks)
endif() endif()
# On Linux, cc_library(paddle SHARED ..) will generate the libpaddle.so, # On Linux, cc_library(paddle SHARED ..) will generate the libpaddle.so,
...@@ -651,12 +650,4 @@ if(WITH_PYTHON) ...@@ -651,12 +650,4 @@ if(WITH_PYTHON)
target_link_libraries(${SHARD_LIB_NAME} ${os_dependency_modules}) target_link_libraries(${SHARD_LIB_NAME} ${os_dependency_modules})
add_dependencies(${SHARD_LIB_NAME} op_function_generator_cmd) add_dependencies(${SHARD_LIB_NAME} op_function_generator_cmd)
if(APPLE)
string(REGEX REPLACE ".+/(.+)" "\\1" PYTHON_LIBRARY_NAME
${PYTHON_LIBRARIES})
# target_link_libraries(${SHARD_LIB_NAME} "-Wl,-rpath,${PYTHON_LIBRARY_NAME}")
else()
target_link_libraries(${SHARD_LIB_NAME} ${PYTHON_LIBRARIES})
endif()
endif() endif()
...@@ -25,7 +25,6 @@ typedef SSIZE_T ssize_t; ...@@ -25,7 +25,6 @@ typedef SSIZE_T ssize_t;
#include "paddle/fluid/eager/autograd_meta.h" #include "paddle/fluid/eager/autograd_meta.h"
#include "paddle/fluid/eager/backward.h" #include "paddle/fluid/eager/backward.h"
#include "paddle/fluid/eager/custom_operator/custom_operator_node.h" #include "paddle/fluid/eager/custom_operator/custom_operator_node.h"
#include "paddle/fluid/eager/saved_tensors_hooks.h"
#include "paddle/fluid/eager/utils.h" #include "paddle/fluid/eager/utils.h"
#include "paddle/fluid/framework/convert_utils.h" #include "paddle/fluid/framework/convert_utils.h"
#include "paddle/fluid/framework/custom_operator.h" #include "paddle/fluid/framework/custom_operator.h"
...@@ -715,7 +714,9 @@ static PyObject* eager_api_register_saved_tensors_hooks(PyObject* self, ...@@ -715,7 +714,9 @@ static PyObject* eager_api_register_saved_tensors_hooks(PyObject* self,
if (egr::Controller::Instance().HasGrad()) { if (egr::Controller::Instance().HasGrad()) {
auto pack_hook = PyTuple_GET_ITEM(args, 0); auto pack_hook = PyTuple_GET_ITEM(args, 0);
auto unpack_hook = PyTuple_GET_ITEM(args, 1); auto unpack_hook = PyTuple_GET_ITEM(args, 1);
egr::SavedTensorsHooks::GetInstance().SetHooks(pack_hook, unpack_hook); egr::SavedTensorsHooks::GetInstance().SetHooks(
std::make_shared<PackHook>(pack_hook),
std::make_shared<UnPackHook>(unpack_hook));
} }
RETURN_PY_NONE RETURN_PY_NONE
EAGER_CATCH_AND_THROW_RETURN_NULL EAGER_CATCH_AND_THROW_RETURN_NULL
......
...@@ -20,7 +20,6 @@ limitations under the License. */ ...@@ -20,7 +20,6 @@ limitations under the License. */
#include "paddle/fluid/eager/api/all.h" #include "paddle/fluid/eager/api/all.h"
#include "paddle/fluid/eager/autograd_meta.h" #include "paddle/fluid/eager/autograd_meta.h"
#include "paddle/fluid/eager/pylayer/py_layer_node.h" #include "paddle/fluid/eager/pylayer/py_layer_node.h"
#include "paddle/fluid/eager/saved_tensors_hooks.h"
#include "paddle/fluid/eager/utils.h" #include "paddle/fluid/eager/utils.h"
#include "paddle/fluid/framework/convert_utils.h" #include "paddle/fluid/framework/convert_utils.h"
#include "paddle/fluid/memory/allocation/allocator.h" #include "paddle/fluid/memory/allocation/allocator.h"
......
...@@ -1627,5 +1627,126 @@ void PyVoidHook::operator()() { ...@@ -1627,5 +1627,126 @@ void PyVoidHook::operator()() {
} }
} }
PyObjectHolder::PyObjectHolder(PyObject* ptr) { ptr_ = ptr; }
PyObjectHolder::~PyObjectHolder() {
::pybind11::gil_scoped_acquire gil;
Py_XDECREF(ptr_);
}
void* PyObjectHolder::get() { return reinterpret_cast<void*>(ptr_); }
void PyObjectHolder::reset(void* ptr) {
if (ptr_) {
::pybind11::gil_scoped_acquire gil;
Py_XDECREF(ptr_);
}
ptr_ = reinterpret_cast<PyObject*>(ptr);
}
void PyObjectHolder::inc_ref() {
::pybind11::gil_scoped_acquire gil;
Py_XINCREF(ptr_);
}
void PyObjectHolder::dec_ref() {
::pybind11::gil_scoped_acquire gil;
Py_XDECREF(ptr_);
}
PackHook::PackHook(PyObject* hook) : hook_(hook) { Py_INCREF(hook_); }
PackHook::~PackHook() {
::pybind11::gil_scoped_acquire gil;
Py_DECREF(hook_);
}
std::shared_ptr<egr::PyObjectHolderBase> PackHook::operator()(
const paddle::experimental::Tensor& tensor) {
bool grad_tmp = egr::Controller::Instance().HasGrad();
egr::Controller::Instance().SetHasGrad(false);
::pybind11::gil_scoped_acquire gil;
auto args = PyTuple_New(1);
PyTuple_SET_ITEM(args, 0, paddle::pybind::ToPyObject(tensor));
PyObject* ret = PyObject_Call(hook_, args, nullptr);
PADDLE_ENFORCE_NOT_NULL(ret,
paddle::platform::errors::External(
pybind11::detail::error_string().c_str()));
Py_XDECREF(args);
egr::Controller::Instance().SetHasGrad(grad_tmp);
return std::make_shared<PyObjectHolder>(ret);
}
void* PackHook::operator()(void* py_tensor) {
bool grad_tmp = egr::Controller::Instance().HasGrad();
egr::Controller::Instance().SetHasGrad(false);
::pybind11::gil_scoped_acquire gil;
auto args = PyTuple_New(1);
Py_INCREF(reinterpret_cast<PyObject*>(py_tensor));
PyTuple_SET_ITEM(args, 0, reinterpret_cast<PyObject*>(py_tensor));
PyObject* ret = PyObject_Call(hook_, args, nullptr);
PADDLE_ENFORCE_NOT_NULL(ret,
paddle::platform::errors::External(
pybind11::detail::error_string().c_str()));
Py_XDECREF(args);
egr::Controller::Instance().SetHasGrad(grad_tmp);
return reinterpret_cast<void*>(ret);
}
UnPackHook::UnPackHook(PyObject* hook) : hook_(hook) { Py_INCREF(hook_); }
UnPackHook::~UnPackHook() {
::pybind11::gil_scoped_acquire gil;
Py_DECREF(hook_);
}
paddle::experimental::Tensor UnPackHook::operator()(
std::shared_ptr<egr::PyObjectHolderBase> packed_value) {
bool grad_tmp = egr::Controller::Instance().HasGrad();
egr::Controller::Instance().SetHasGrad(false);
::pybind11::gil_scoped_acquire gil;
auto args = PyTuple_New(1);
Py_INCREF(reinterpret_cast<PyObject*>(packed_value->get()));
PyTuple_SET_ITEM(args, 0, reinterpret_cast<PyObject*>(packed_value->get()));
PyObject* ret = PyObject_Call(hook_, args, nullptr);
PADDLE_ENFORCE_NOT_NULL(ret,
paddle::platform::errors::External(
pybind11::detail::error_string().c_str()));
Py_XDECREF(args);
egr::Controller::Instance().SetHasGrad(grad_tmp);
PADDLE_ENFORCE_EQ(paddle::pybind::IsEagerTensor(ret),
true,
paddle::platform::errors::InvalidArgument(
"paddle.autograd.saved_tensors_hooks only one pair "
"of hooks is allowed at a time."));
auto tensor = reinterpret_cast<paddle::pybind::TensorObject*>(ret)->tensor;
Py_XDECREF(ret);
return tensor;
}
void* UnPackHook::operator()(void* packed_value, void* other) {
bool grad_tmp = egr::Controller::Instance().HasGrad();
egr::Controller::Instance().SetHasGrad(false);
::pybind11::gil_scoped_acquire gil;
auto args = PyTuple_New(1);
Py_INCREF(reinterpret_cast<PyObject*>(packed_value));
PyTuple_SET_ITEM(args, 0, reinterpret_cast<PyObject*>(packed_value));
PyObject* ret = PyObject_Call(hook_, args, nullptr);
PADDLE_ENFORCE_NOT_NULL(ret,
paddle::platform::errors::External(
pybind11::detail::error_string().c_str()));
Py_XDECREF(args);
egr::Controller::Instance().SetHasGrad(grad_tmp);
PADDLE_ENFORCE_EQ(paddle::pybind::IsEagerTensor(ret),
true,
paddle::platform::errors::InvalidArgument(
"paddle.autograd.saved_tensors_hooks only one pair "
"of hooks is allowed at a time."));
return reinterpret_cast<void*>(ret);
}
} // namespace pybind } // namespace pybind
} // namespace paddle } // namespace paddle
...@@ -151,6 +151,49 @@ class PyVoidHook : public egr::VoidHook { ...@@ -151,6 +151,49 @@ class PyVoidHook : public egr::VoidHook {
PyObject* py_func_; PyObject* py_func_;
}; };
class PyObjectHolder : public egr::PyObjectHolderBase {
public:
PyObjectHolder() { ptr_ = nullptr; }
explicit PyObjectHolder(PyObject* ptr);
~PyObjectHolder() override;
void* get() override;
void reset(void* ptr) override;
void inc_ref() override;
void dec_ref() override;
private:
PyObject* ptr_{nullptr};
};
class PackHook : public egr::PackHookBase {
public:
explicit PackHook(PyObject* hook);
~PackHook();
std::shared_ptr<egr::PyObjectHolderBase> operator()(
const paddle::experimental::Tensor& tensor) override;
void* operator()(void* py_tensor) override;
private:
PyObject* hook_;
};
class UnPackHook : public egr::UnPackHookBase {
public:
explicit UnPackHook(PyObject* hook);
~UnPackHook();
paddle::experimental::Tensor operator()(
std::shared_ptr<egr::PyObjectHolderBase> packed_value) override;
void* operator()(void* packed_value, void* other) override;
private:
PyObject* hook_;
};
template <typename Tuple, size_t N> template <typename Tuple, size_t N>
struct TupleTensorResult { struct TupleTensorResult {
static void Run(const Tuple& out, PyObject* result) { static void Run(const Tuple& out, PyObject* result) {
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册