未验证 提交 a000e9b8 编写于 作者: H HongyuJia 提交者: GitHub

unify PyCheckTensor function (#49751)

上级 0294ab41
...@@ -52,12 +52,6 @@ typedef SSIZE_T ssize_t; ...@@ -52,12 +52,6 @@ typedef SSIZE_T ssize_t;
namespace paddle { namespace paddle {
namespace pybind { namespace pybind {
extern PyTypeObject* p_tensor_type;
bool PyCheckTensor(PyObject* obj) {
return PyObject_IsInstance(obj, reinterpret_cast<PyObject*>(p_tensor_type));
}
static bool PyCheckInteger(PyObject* obj) { static bool PyCheckInteger(PyObject* obj) {
#if PY_VERSION_HEX < 0x03000000 #if PY_VERSION_HEX < 0x03000000
return (PyLong_Check(obj) || PyInt_Check(obj)) && !PyBool_Check(obj); return (PyLong_Check(obj) || PyInt_Check(obj)) && !PyBool_Check(obj);
......
...@@ -88,10 +88,6 @@ Py_ssize_t GetSliceIndexFromPyObject(PyObject* obj) { ...@@ -88,10 +88,6 @@ Py_ssize_t GetSliceIndexFromPyObject(PyObject* obj) {
} }
} }
bool PyCheckTensor(PyObject* obj) {
return PyObject_IsInstance(obj, reinterpret_cast<PyObject*>(p_tensor_type));
}
static PyObject* tensor_method_numpy(TensorObject* self, static PyObject* tensor_method_numpy(TensorObject* self,
PyObject* args, PyObject* args,
PyObject* kwargs) { PyObject* kwargs) {
......
...@@ -49,12 +49,12 @@ std::set<paddle::experimental::Tensor*> GetTensorsFromPyObject(PyObject* obj) { ...@@ -49,12 +49,12 @@ std::set<paddle::experimental::Tensor*> GetTensorsFromPyObject(PyObject* obj) {
if (obj == nullptr) { if (obj == nullptr) {
return result; return result;
} }
if (IsEagerTensor(obj)) { if (PyCheckTensor(obj)) {
result.insert(&reinterpret_cast<TensorObject*>(obj)->tensor); // NOLINT result.insert(&reinterpret_cast<TensorObject*>(obj)->tensor); // NOLINT
} else if (PyList_Check(obj)) { } else if (PyList_Check(obj)) {
Py_ssize_t len = PyList_Size(obj); Py_ssize_t len = PyList_Size(obj);
for (Py_ssize_t i = 0; i < len; i++) { for (Py_ssize_t i = 0; i < len; i++) {
if (IsEagerTensor(PyList_GetItem(obj, i))) { if (PyCheckTensor(PyList_GetItem(obj, i))) {
result.insert( result.insert(
&reinterpret_cast<TensorObject*>(PyList_GetItem(obj, i)) // NOLINT &reinterpret_cast<TensorObject*>(PyList_GetItem(obj, i)) // NOLINT
->tensor); ->tensor);
...@@ -63,7 +63,7 @@ std::set<paddle::experimental::Tensor*> GetTensorsFromPyObject(PyObject* obj) { ...@@ -63,7 +63,7 @@ std::set<paddle::experimental::Tensor*> GetTensorsFromPyObject(PyObject* obj) {
} else if (PyTuple_Check(obj)) { } else if (PyTuple_Check(obj)) {
Py_ssize_t len = PyTuple_Size(obj); Py_ssize_t len = PyTuple_Size(obj);
for (Py_ssize_t i = 0; i < len; i++) { for (Py_ssize_t i = 0; i < len; i++) {
if (IsEagerTensor(PyTuple_GetItem(obj, i))) { if (PyCheckTensor(PyTuple_GetItem(obj, i))) {
result.insert( result.insert(
&reinterpret_cast<TensorObject*>(PyTuple_GetItem(obj, i)) // NOLINT &reinterpret_cast<TensorObject*>(PyTuple_GetItem(obj, i)) // NOLINT
->tensor); ->tensor);
...@@ -177,7 +177,7 @@ PyObject* pylayer_method_apply(PyObject* cls, ...@@ -177,7 +177,7 @@ PyObject* pylayer_method_apply(PyObject* cls,
} else { } else {
obj = PyTuple_GET_ITEM(args, i); obj = PyTuple_GET_ITEM(args, i);
} }
if (IsEagerTensor(obj)) { if (PyCheckTensor(obj)) {
input_tensorbases.insert( input_tensorbases.insert(
reinterpret_cast<TensorObject*>(obj)->tensor.impl().get()); reinterpret_cast<TensorObject*>(obj)->tensor.impl().get());
auto autograd_meta = egr::EagerUtils::nullable_autograd_meta( auto autograd_meta = egr::EagerUtils::nullable_autograd_meta(
...@@ -196,7 +196,7 @@ PyObject* pylayer_method_apply(PyObject* cls, ...@@ -196,7 +196,7 @@ PyObject* pylayer_method_apply(PyObject* cls,
Py_ssize_t len = PyList_Size(obj); Py_ssize_t len = PyList_Size(obj);
for (Py_ssize_t j = 0; j < len; j++) { for (Py_ssize_t j = 0; j < len; j++) {
PyObject* o = PyList_GetItem(obj, j); PyObject* o = PyList_GetItem(obj, j);
if (IsEagerTensor(o)) { if (PyCheckTensor(o)) {
input_tensorbases.insert( input_tensorbases.insert(
reinterpret_cast<TensorObject*>(o)->tensor.impl().get()); reinterpret_cast<TensorObject*>(o)->tensor.impl().get());
tensors.push_back(&(reinterpret_cast<TensorObject*>(o)->tensor)); tensors.push_back(&(reinterpret_cast<TensorObject*>(o)->tensor));
...@@ -219,7 +219,7 @@ PyObject* pylayer_method_apply(PyObject* cls, ...@@ -219,7 +219,7 @@ PyObject* pylayer_method_apply(PyObject* cls,
Py_ssize_t len = PyTuple_Size(obj); Py_ssize_t len = PyTuple_Size(obj);
for (Py_ssize_t j = 0; j < len; j++) { for (Py_ssize_t j = 0; j < len; j++) {
PyObject* o = PyTuple_GetItem(obj, j); PyObject* o = PyTuple_GetItem(obj, j);
if (IsEagerTensor(o)) { if (PyCheckTensor(o)) {
input_tensorbases.insert( input_tensorbases.insert(
reinterpret_cast<TensorObject*>(o)->tensor.impl().get()); reinterpret_cast<TensorObject*>(o)->tensor.impl().get());
tensors.push_back(&(reinterpret_cast<TensorObject*>(o)->tensor)); tensors.push_back(&(reinterpret_cast<TensorObject*>(o)->tensor));
...@@ -292,7 +292,7 @@ PyObject* pylayer_method_apply(PyObject* cls, ...@@ -292,7 +292,7 @@ PyObject* pylayer_method_apply(PyObject* cls,
ctx->forward_output_tensor_is_duplicable.reserve(outputs_size); ctx->forward_output_tensor_is_duplicable.reserve(outputs_size);
for (Py_ssize_t i = 0; i < outputs_size; i++) { for (Py_ssize_t i = 0; i < outputs_size; i++) {
PyObject* obj = PyTuple_GET_ITEM(outputs_tuple, i); PyObject* obj = PyTuple_GET_ITEM(outputs_tuple, i);
if (IsEagerTensor(obj)) { if (PyCheckTensor(obj)) {
outputs_tensor.push_back( outputs_tensor.push_back(
{&(reinterpret_cast<TensorObject*>(obj)->tensor)}); {&(reinterpret_cast<TensorObject*>(obj)->tensor)});
outputs_autograd_meta.push_back({egr::EagerUtils::autograd_meta( outputs_autograd_meta.push_back({egr::EagerUtils::autograd_meta(
...@@ -316,7 +316,7 @@ PyObject* pylayer_method_apply(PyObject* cls, ...@@ -316,7 +316,7 @@ PyObject* pylayer_method_apply(PyObject* cls,
Py_ssize_t len = PyList_Size(obj); Py_ssize_t len = PyList_Size(obj);
for (Py_ssize_t j = 0; j < len; j++) { for (Py_ssize_t j = 0; j < len; j++) {
PyObject* o = PyList_GetItem(obj, j); PyObject* o = PyList_GetItem(obj, j);
if (IsEagerTensor(o)) { if (PyCheckTensor(o)) {
tensors.push_back(&(reinterpret_cast<TensorObject*>(o)->tensor)); tensors.push_back(&(reinterpret_cast<TensorObject*>(o)->tensor));
if (input_tensorbases.count( if (input_tensorbases.count(
reinterpret_cast<TensorObject*>(o)->tensor.impl().get())) { reinterpret_cast<TensorObject*>(o)->tensor.impl().get())) {
...@@ -344,7 +344,7 @@ PyObject* pylayer_method_apply(PyObject* cls, ...@@ -344,7 +344,7 @@ PyObject* pylayer_method_apply(PyObject* cls,
Py_ssize_t len = PyTuple_Size(obj); Py_ssize_t len = PyTuple_Size(obj);
for (Py_ssize_t j = 0; j < len; j++) { for (Py_ssize_t j = 0; j < len; j++) {
PyObject* o = PyTuple_GetItem(obj, j); PyObject* o = PyTuple_GetItem(obj, j);
if (IsEagerTensor(o)) { if (PyCheckTensor(o)) {
tensors.push_back(&(reinterpret_cast<TensorObject*>(o)->tensor)); tensors.push_back(&(reinterpret_cast<TensorObject*>(o)->tensor));
if (input_tensorbases.count( if (input_tensorbases.count(
reinterpret_cast<TensorObject*>(o)->tensor.impl().get())) { reinterpret_cast<TensorObject*>(o)->tensor.impl().get())) {
...@@ -538,7 +538,7 @@ void call_pack_hook(PyLayerObject* self, PyObject* value) { ...@@ -538,7 +538,7 @@ void call_pack_hook(PyLayerObject* self, PyObject* value) {
for (Py_ssize_t i = 0; i < saved_value_size; i++) { for (Py_ssize_t i = 0; i < saved_value_size; i++) {
PyObject* obj = PyTuple_GET_ITEM(saved_value, i); PyObject* obj = PyTuple_GET_ITEM(saved_value, i);
if (IsEagerTensor(obj)) { if (PyCheckTensor(obj)) {
PyTuple_SET_ITEM(packed_value, PyTuple_SET_ITEM(packed_value,
i, i,
reinterpret_cast<PyObject*>( reinterpret_cast<PyObject*>(
...@@ -548,7 +548,7 @@ void call_pack_hook(PyLayerObject* self, PyObject* value) { ...@@ -548,7 +548,7 @@ void call_pack_hook(PyLayerObject* self, PyObject* value) {
auto tmp_list = PyList_New(len); auto tmp_list = PyList_New(len);
for (Py_ssize_t j = 0; j < len; j++) { for (Py_ssize_t j = 0; j < len; j++) {
PyObject* o = PyList_GetItem(obj, j); PyObject* o = PyList_GetItem(obj, j);
if (IsEagerTensor(o)) { if (PyCheckTensor(o)) {
PyTuple_SET_ITEM(tmp_list, PyTuple_SET_ITEM(tmp_list,
j, j,
reinterpret_cast<PyObject*>( reinterpret_cast<PyObject*>(
...@@ -565,7 +565,7 @@ void call_pack_hook(PyLayerObject* self, PyObject* value) { ...@@ -565,7 +565,7 @@ void call_pack_hook(PyLayerObject* self, PyObject* value) {
auto tmp_tuple = PyTuple_New(len); auto tmp_tuple = PyTuple_New(len);
for (Py_ssize_t j = 0; j < len; j++) { for (Py_ssize_t j = 0; j < len; j++) {
PyObject* o = PyTuple_GetItem(obj, j); PyObject* o = PyTuple_GetItem(obj, j);
if (IsEagerTensor(o)) { if (PyCheckTensor(o)) {
PyTuple_SET_ITEM(tmp_tuple, PyTuple_SET_ITEM(tmp_tuple,
j, j,
reinterpret_cast<PyObject*>( reinterpret_cast<PyObject*>(
......
...@@ -207,7 +207,7 @@ std::string CastPyArg2AttrString(PyObject* obj, ssize_t arg_pos) { ...@@ -207,7 +207,7 @@ std::string CastPyArg2AttrString(PyObject* obj, ssize_t arg_pos) {
} }
} }
bool IsEagerTensor(PyObject* obj) { bool PyCheckTensor(PyObject* obj) {
return PyObject_IsInstance(obj, reinterpret_cast<PyObject*>(p_tensor_type)); return PyObject_IsInstance(obj, reinterpret_cast<PyObject*>(p_tensor_type));
} }
...@@ -1307,7 +1307,7 @@ std::vector<paddle::experimental::Tensor> GetTensorListFromPyObject( ...@@ -1307,7 +1307,7 @@ std::vector<paddle::experimental::Tensor> GetTensorListFromPyObject(
} }
paddle::experimental::Tensor& GetTensorFromPyObject(PyObject* obj) { paddle::experimental::Tensor& GetTensorFromPyObject(PyObject* obj) {
if (!IsEagerTensor(obj)) { if (!PyCheckTensor(obj)) {
PADDLE_THROW(platform::errors::InvalidArgument( PADDLE_THROW(platform::errors::InvalidArgument(
"argument must be " "argument must be "
"Tensor, but got %s", "Tensor, but got %s",
...@@ -1384,7 +1384,7 @@ paddle::experimental::Scalar CastPyArg2Scalar(PyObject* obj, ...@@ -1384,7 +1384,7 @@ paddle::experimental::Scalar CastPyArg2Scalar(PyObject* obj,
} else if (PyFloat_Check(obj)) { } else if (PyFloat_Check(obj)) {
double value = CastPyArg2Double(obj, op_type, arg_pos); double value = CastPyArg2Double(obj, op_type, arg_pos);
return paddle::experimental::Scalar(value); return paddle::experimental::Scalar(value);
} else if (IsEagerTensor(obj)) { } else if (PyCheckTensor(obj)) {
paddle::experimental::Tensor& value = GetTensorFromPyObject( paddle::experimental::Tensor& value = GetTensorFromPyObject(
op_type, "" /*arg_name*/, obj, arg_pos, false /*dispensable*/); op_type, "" /*arg_name*/, obj, arg_pos, false /*dispensable*/);
return paddle::experimental::Scalar(value); return paddle::experimental::Scalar(value);
...@@ -1715,7 +1715,7 @@ paddle::experimental::Tensor UnPackHook::operator()( ...@@ -1715,7 +1715,7 @@ paddle::experimental::Tensor UnPackHook::operator()(
Py_XDECREF(args); Py_XDECREF(args);
egr::Controller::Instance().SetHasGrad(grad_tmp); egr::Controller::Instance().SetHasGrad(grad_tmp);
PADDLE_ENFORCE_EQ(paddle::pybind::IsEagerTensor(ret), PADDLE_ENFORCE_EQ(paddle::pybind::PyCheckTensor(ret),
true, true,
paddle::platform::errors::InvalidArgument( paddle::platform::errors::InvalidArgument(
"paddle.autograd.saved_tensors_hooks only one pair " "paddle.autograd.saved_tensors_hooks only one pair "
...@@ -1740,7 +1740,7 @@ void* UnPackHook::operator()(void* packed_value, void* other) { ...@@ -1740,7 +1740,7 @@ void* UnPackHook::operator()(void* packed_value, void* other) {
Py_XDECREF(args); Py_XDECREF(args);
egr::Controller::Instance().SetHasGrad(grad_tmp); egr::Controller::Instance().SetHasGrad(grad_tmp);
PADDLE_ENFORCE_EQ(paddle::pybind::IsEagerTensor(ret), PADDLE_ENFORCE_EQ(paddle::pybind::PyCheckTensor(ret),
true, true,
paddle::platform::errors::InvalidArgument( paddle::platform::errors::InvalidArgument(
"paddle.autograd.saved_tensors_hooks only one pair " "paddle.autograd.saved_tensors_hooks only one pair "
......
...@@ -46,7 +46,7 @@ namespace py = ::pybind11; ...@@ -46,7 +46,7 @@ namespace py = ::pybind11;
int TensorDtype2NumpyDtype(phi::DataType dtype); int TensorDtype2NumpyDtype(phi::DataType dtype);
bool IsEagerTensor(PyObject* obj); bool PyCheckTensor(PyObject* obj);
bool PyObject_CheckLongOrConvertToLong(PyObject** obj); bool PyObject_CheckLongOrConvertToLong(PyObject** obj);
bool PyObject_CheckFloatOrConvertToFloat(PyObject** obj); bool PyObject_CheckFloatOrConvertToFloat(PyObject** obj);
......
...@@ -350,9 +350,6 @@ Py_ssize_t GetSliceIndexFromPyObject(PyObject *obj) { ...@@ -350,9 +350,6 @@ Py_ssize_t GetSliceIndexFromPyObject(PyObject *obj) {
} }
} }
bool PyCheckTensor(PyObject *obj) {
return py::isinstance<imperative::VarBase>(obj);
}
using PyNameVarBaseMap = std::unordered_map<std::string, py::handle>; using PyNameVarBaseMap = std::unordered_map<std::string, py::handle>;
// NOTE(zjl): py::handle is a very light wrapper of PyObject *. // NOTE(zjl): py::handle is a very light wrapper of PyObject *.
...@@ -872,7 +869,7 @@ void BindImperative(py::module *m_ptr) { ...@@ -872,7 +869,7 @@ void BindImperative(py::module *m_ptr) {
self->Name())); self->Name()));
} }
if (PyCheckTensor(value_obj.ptr())) { if (py::isinstance<imperative::VarBase>(value_obj.ptr())) {
auto value_tensor = auto value_tensor =
value_obj.cast<std::shared_ptr<imperative::VarBase>>(); value_obj.cast<std::shared_ptr<imperative::VarBase>>();
ins.insert({"ValueTensor", {value_tensor}}); ins.insert({"ValueTensor", {value_tensor}});
......
...@@ -30,7 +30,6 @@ namespace py = pybind11; ...@@ -30,7 +30,6 @@ namespace py = pybind11;
namespace paddle { namespace paddle {
namespace pybind { namespace pybind {
static bool PyCheckTensor(PyObject* obj);
static Py_ssize_t GetSliceIndexFromPyObject(PyObject* obj); static Py_ssize_t GetSliceIndexFromPyObject(PyObject* obj);
// Slice related methods // Slice related methods
static bool PyCheckInteger(PyObject* obj) { static bool PyCheckInteger(PyObject* obj) {
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册