未验证 提交 a384828d 编写于 作者: W wanghuancoder 提交者: GitHub

[Eager] inc ref before return Py_None (#42505)

* fix pylayer_memleak

* inc ref before return Py_None

* refine

* refine

* refine

* refine
上级 d7728051
......@@ -119,8 +119,7 @@ static PyObject* eager_api_run_backward(PyObject* self, PyObject* args,
auto grad_tensors = CastPyArg2VectorOfTensor(PyTuple_GET_ITEM(args, 1), 1);
egr::Backward(tensors, grad_tensors,
CastPyArg2AttrBoolean(PyTuple_GET_ITEM(args, 2), 2));
Py_INCREF(Py_None);
return Py_None;
RETURN_PY_NONE
EAGER_CATCH_AND_THROW_RETURN_NULL
}
......@@ -159,8 +158,7 @@ static PyObject* eager_api_tensor_copy(PyObject* self, PyObject* args,
egr::EagerUtils::autograd_meta(&(src))->StopGradient());
egr::EagerUtils::autograd_meta(&dst)->SetPersistable(
egr::EagerUtils::autograd_meta(&(src))->Persistable());
Py_INCREF(Py_None);
return Py_None;
RETURN_PY_NONE
EAGER_CATCH_AND_THROW_RETURN_NULL
}
......@@ -455,8 +453,7 @@ static PyObject* eager_api_run_costum_op(PyObject* self, PyObject* args,
}
grad_node->SetAttrs(attrs);
}
Py_INCREF(Py_None);
return Py_None;
RETURN_PY_NONE
EAGER_CATCH_AND_THROW_RETURN_NULL
}
......@@ -688,8 +685,7 @@ static PyObject* eager_api_async_read(PyObject* self, PyObject* args,
cudaMemcpyAsync(dst_data + (numel * size), buffer_tensor->data<float>(),
index_tensor.numel() * size * sizeof(float),
cudaMemcpyHostToDevice, stream);
Py_INCREF(Py_None);
return Py_None;
RETURN_PY_NONE
EAGER_CATCH_AND_THROW_RETURN_NULL
}
......@@ -771,8 +767,7 @@ static PyObject* eager_api_async_write(PyObject* self, PyObject* args,
cudaMemcpyDeviceToHost, stream);
src_offset += c;
}
Py_INCREF(Py_None);
return Py_None;
RETURN_PY_NONE
EAGER_CATCH_AND_THROW_RETURN_NULL
}
......
......@@ -267,8 +267,7 @@ static PyObject* tensor_method_numpy(TensorObject* self, PyObject* args,
} else {
PADDLE_THROW(platform::errors::InvalidArgument(
"Tensor.numpy() only support cpu tensor."));
Py_INCREF(Py_None);
return Py_None;
RETURN_PY_NONE
}
return array;
......@@ -335,8 +334,7 @@ static PyObject* tensor_method_numpy_for_string_tensor(TensorObject* self,
} else {
PADDLE_THROW(platform::errors::InvalidArgument(
"StringTensor.numpy() only support cpu tensor."));
Py_INCREF(Py_None);
return Py_None;
RETURN_PY_NONE
}
EAGER_CATCH_AND_THROW_RETURN_NULL
}
......@@ -405,8 +403,8 @@ static PyObject* tensor_method_reconstruct_from_(TensorObject* self,
VLOG(6) << "Finished Reconstructing Tensor from" << src_tensor.name()
<< " to " << self->tensor.name();
Py_INCREF(Py_None);
return Py_None;
RETURN_PY_NONE
EAGER_CATCH_AND_THROW_RETURN_NULL
}
......@@ -436,8 +434,8 @@ static PyObject* tensor_method_copy_(TensorObject* self, PyObject* args,
VLOG(6) << "Finish Copy Tensor " << src_tensor.name() << " to "
<< self->tensor.name();
Py_INCREF(Py_None);
return Py_None;
RETURN_PY_NONE
EAGER_CATCH_AND_THROW_RETURN_NULL
}
......@@ -453,8 +451,8 @@ static PyObject* tensor_retain_grads(TensorObject* self, PyObject* args,
}
egr::egr_utils_api::RetainGradForTensor(self->tensor);
}
Py_INCREF(Py_None);
return Py_None;
RETURN_PY_NONE
EAGER_CATCH_AND_THROW_RETURN_NULL
}
......@@ -505,8 +503,8 @@ static PyObject* tensor_clear_gradient(TensorObject* self, PyObject* args,
}
}
Py_INCREF(Py_None);
return Py_None;
RETURN_PY_NONE
EAGER_CATCH_AND_THROW_RETURN_NULL
}
......@@ -535,8 +533,8 @@ static PyObject* tensor__zero_grads(TensorObject* self, PyObject* args,
}
}
Py_INCREF(Py_None);
return Py_None;
RETURN_PY_NONE
EAGER_CATCH_AND_THROW_RETURN_NULL
}
......@@ -559,8 +557,8 @@ static PyObject* tensor__share_buffer_to(TensorObject* self, PyObject* args,
static_cast<paddle::framework::Tensor*>(dst_ptr->impl().get());
dst_tensor->ShareBufferWith(*src_tensor);
dst_tensor->ShareDataTypeWith(*src_tensor);
Py_INCREF(Py_None);
return Py_None;
RETURN_PY_NONE
EAGER_CATCH_AND_THROW_RETURN_NULL
}
......@@ -600,8 +598,8 @@ static PyObject* tensor__share_underline_tensor_to(TensorObject* self,
"src tensor before share_buffer_with to other.",
self->tensor.name()));
src_ptr->set_impl(self->tensor.impl());
Py_INCREF(Py_None);
return Py_None;
RETURN_PY_NONE
EAGER_CATCH_AND_THROW_RETURN_NULL
}
......@@ -656,8 +654,7 @@ static PyObject* tensor_method_get_underline_tensor(TensorObject* self,
PyObject* kwargs) {
EAGER_TRY
if (!self->tensor.defined()) {
Py_IncRef(Py_None);
return Py_None;
RETURN_PY_NONE
}
if (self->tensor.is_dense_tensor()) {
auto* tensor =
......@@ -665,8 +662,7 @@ static PyObject* tensor_method_get_underline_tensor(TensorObject* self,
VLOG(6) << "tensor: " << tensor->IsInitialized();
return ToPyObject(tensor);
} else {
Py_IncRef(Py_None);
return Py_None;
RETURN_PY_NONE
}
EAGER_CATCH_AND_THROW_RETURN_NULL
}
......@@ -676,16 +672,14 @@ static PyObject* tensor_method_get_underline_selected_rows(TensorObject* self,
PyObject* kwargs) {
EAGER_TRY
if (!self->tensor.defined()) {
Py_IncRef(Py_None);
return Py_None;
RETURN_PY_NONE
}
if (self->tensor.is_selected_rows()) {
auto* selected_rows =
static_cast<phi::SelectedRows*>(self->tensor.impl().get());
return ToPyObject(selected_rows);
} else {
Py_IncRef(Py_None);
return Py_None;
RETURN_PY_NONE
}
EAGER_CATCH_AND_THROW_RETURN_NULL
}
......@@ -1110,8 +1104,8 @@ static PyObject* tensor_method__setitem_eager_tensor(TensorObject* self,
false);
}
}
Py_INCREF(Py_None);
return Py_None;
RETURN_PY_NONE
EAGER_CATCH_AND_THROW_RETURN_NULL
}
......@@ -1202,8 +1196,8 @@ static PyObject* tensor_register_reduce_hook(TensorObject* self, PyObject* args,
accumulation_grad_node->RegisterReduceHook(
std::make_shared<PyTensorVoidHook>(hook_func));
Py_INCREF(Py_None);
return Py_None;
RETURN_PY_NONE
EAGER_CATCH_AND_THROW_RETURN_NULL
}
......@@ -1218,7 +1212,8 @@ static PyObject* tensor__set_grad_type(TensorObject* self, PyObject* args,
} else if (var_type == framework::proto::VarType::SELECTED_ROWS) {
grad_tensor->set_impl(std::make_shared<phi::SelectedRows>());
}
return Py_None;
RETURN_PY_NONE
EAGER_CATCH_AND_THROW_RETURN_NULL
}
......@@ -1226,7 +1221,8 @@ static PyObject* tensor__clear(TensorObject* self, PyObject* args,
PyObject* kwargs) {
EAGER_TRY
self->tensor.reset();
return Py_None;
RETURN_PY_NONE
EAGER_CATCH_AND_THROW_RETURN_NULL
}
......@@ -1254,8 +1250,8 @@ static PyObject* tensor__copy_gradient_from(TensorObject* self, PyObject* args,
"Tensor %s has not been initialized", src.name()));
p_grad->set_impl(src.impl());
}
Py_INCREF(Py_None);
return Py_None;
RETURN_PY_NONE
EAGER_CATCH_AND_THROW_RETURN_NULL
}
static PyObject* tensor_method_get_non_zero_indices(TensorObject* self,
......@@ -1396,7 +1392,7 @@ static PyObject* tensor__bump_inplace_version(TensorObject* self,
PyObject* kwargs) {
EAGER_TRY
self->tensor.bump_inplace_version();
return Py_None;
RETURN_PY_NONE
EAGER_CATCH_AND_THROW_RETURN_NULL
}
......@@ -1446,8 +1442,8 @@ static PyObject* tensor__reset_grad_inplace_version(TensorObject* self,
grad->initialized()) {
grad->reset_inplace_version(set_to_zero);
}
Py_INCREF(Py_None);
return Py_None;
RETURN_PY_NONE
EAGER_CATCH_AND_THROW_RETURN_NULL
}
......@@ -1479,8 +1475,8 @@ static PyObject* tensor_method__share_memory(TensorObject* self, PyObject* args,
#else
PADDLE_THROW(platform::errors::PermissionDenied(
"Sharing memory in Windows OS is not supported currently"));
Py_INCREF(Py_None);
return Py_None;
RETURN_PY_NONE
#endif
EAGER_CATCH_AND_THROW_RETURN_NULL
}
......@@ -1522,8 +1518,7 @@ static PyObject* tensor__grad_value(TensorObject* self, PyObject* args,
"cleared the grad inside autograd_meta"));
if (!grad->defined()) {
Py_IncRef(Py_None);
return Py_None;
RETURN_PY_NONE
}
if (grad->is_dense_tensor()) {
auto* grad_tensor =
......@@ -1532,8 +1527,7 @@ static PyObject* tensor__grad_value(TensorObject* self, PyObject* args,
} else {
PADDLE_THROW(paddle::platform::errors::Fatal(
"this method is only supported for DenseTensor"));
Py_IncRef(Py_None);
return Py_None;
RETURN_PY_NONE
}
EAGER_CATCH_AND_THROW_RETURN_NULL
}
......@@ -1556,8 +1550,8 @@ static PyObject* tensor_method__uva(TensorObject* self, PyObject* args,
static_cast<paddle::framework::LoDTensor*>(self->tensor.impl().get());
tensor_uva(self_tensor, device_id);
Py_INCREF(Py_None);
return Py_None;
RETURN_PY_NONE
EAGER_CATCH_AND_THROW_RETURN_NULL
}
#endif
......
......@@ -52,8 +52,7 @@ PyObject* tensor_properties_get_type(TensorObject* self, void* closure) {
} else if (self->tensor.is_selected_rows()) {
return ToPyObject(paddle::framework::proto::VarType::SELECTED_ROWS);
} else {
Py_INCREF(Py_None);
return Py_None;
RETURN_PY_NONE
}
EAGER_CATCH_AND_THROW_RETURN_NULL
}
......@@ -87,8 +86,7 @@ PyObject* tensor_properties_get_grad(TensorObject* self, void* closure) {
if (meta && meta->Grad().initialized()) {
return ToPyObject(meta->Grad());
} else {
Py_INCREF(Py_None);
return Py_None;
RETURN_PY_NONE
}
EAGER_CATCH_AND_THROW_RETURN_NULL
}
......
......@@ -390,8 +390,7 @@ PyObject* pylayer_method_register_hook(PyObject* _self, PyObject* hook) {
PyObject* tensor_properties_get_container(PyLayerObject* self, void* closure) {
EAGER_TRY
if (self->container == nullptr) {
Py_INCREF(Py_None);
return Py_None;
RETURN_PY_NONE;
}
Py_INCREF(self->container);
return self->container;
......@@ -412,8 +411,7 @@ PyObject* tensor_properties_get_non_differentiable(PyLayerObject* self,
void* closure) {
EAGER_TRY
if (self->non_differentiable == nullptr) {
Py_INCREF(Py_None);
return Py_None;
RETURN_PY_NONE;
}
Py_INCREF(self->non_differentiable);
return self->non_differentiable;
......@@ -434,8 +432,7 @@ PyObject* tensor_properties_get_dirty_tensors(PyLayerObject* self,
void* closure) {
EAGER_TRY
if (self->dirty_tensors == nullptr) {
Py_INCREF(Py_None);
return Py_None;
RETURN_PY_NONE;
}
Py_INCREF(self->dirty_tensors);
return self->dirty_tensors;
......
......@@ -516,8 +516,7 @@ PyObject* ToPyObject(const std::string& value) {
PyObject* ToPyObject(const paddle::experimental::Tensor& value,
bool return_py_none_if_not_initialize) {
if (return_py_none_if_not_initialize && !value.initialized()) {
Py_INCREF(Py_None);
return Py_None;
RETURN_PY_NONE
}
PyObject* obj = nullptr;
if (value.initialized() && value.is_string_tensor()) {
......@@ -679,8 +678,7 @@ PyObject* ToPyObject(const phi::SelectedRows* value) {
PyObject* ToPyObject(const void* value) {
if (value == nullptr) {
Py_INCREF(Py_None);
return Py_None;
RETURN_PY_NONE
}
PADDLE_THROW(
platform::errors::Fatal("ToPyObject do not support void* with value."));
......
......@@ -31,6 +31,10 @@ class Scope;
}
namespace pybind {
#define RETURN_PY_NONE \
Py_INCREF(Py_None); \
return Py_None;
int TensorDtype2NumpyDtype(phi::DataType dtype);
bool IsEagerTensor(PyObject* obj);
......
......@@ -372,7 +372,7 @@ std::string GenerateOpFunctionsBody(
viwe_input_name, viwe_output_name);
}
if (outs_num == 0) {
return_str = "Py_INCREF(Py_None);\n return Py_None;";
return_str = "RETURN_PY_NONE";
} else if (outs_num == 1) {
return_str = "return MakeReturnPyObject(" + return_str + ");";
} else {
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册