diff --git a/paddle/fluid/pybind/eager_functions.cc b/paddle/fluid/pybind/eager_functions.cc index 2a8bedfe3250e75527ae63825dab04ab75fc25c8..ac33eb2359c8c1a9642b2c5bd6fb02af2b00fbb6 100644 --- a/paddle/fluid/pybind/eager_functions.cc +++ b/paddle/fluid/pybind/eager_functions.cc @@ -119,8 +119,7 @@ static PyObject* eager_api_run_backward(PyObject* self, PyObject* args, auto grad_tensors = CastPyArg2VectorOfTensor(PyTuple_GET_ITEM(args, 1), 1); egr::Backward(tensors, grad_tensors, CastPyArg2AttrBoolean(PyTuple_GET_ITEM(args, 2), 2)); - Py_INCREF(Py_None); - return Py_None; + RETURN_PY_NONE EAGER_CATCH_AND_THROW_RETURN_NULL } @@ -159,8 +158,7 @@ static PyObject* eager_api_tensor_copy(PyObject* self, PyObject* args, egr::EagerUtils::autograd_meta(&(src))->StopGradient()); egr::EagerUtils::autograd_meta(&dst)->SetPersistable( egr::EagerUtils::autograd_meta(&(src))->Persistable()); - Py_INCREF(Py_None); - return Py_None; + RETURN_PY_NONE EAGER_CATCH_AND_THROW_RETURN_NULL } @@ -455,8 +453,7 @@ static PyObject* eager_api_run_costum_op(PyObject* self, PyObject* args, } grad_node->SetAttrs(attrs); } - Py_INCREF(Py_None); - return Py_None; + RETURN_PY_NONE EAGER_CATCH_AND_THROW_RETURN_NULL } @@ -688,8 +685,7 @@ static PyObject* eager_api_async_read(PyObject* self, PyObject* args, cudaMemcpyAsync(dst_data + (numel * size), buffer_tensor->data(), index_tensor.numel() * size * sizeof(float), cudaMemcpyHostToDevice, stream); - Py_INCREF(Py_None); - return Py_None; + RETURN_PY_NONE EAGER_CATCH_AND_THROW_RETURN_NULL } @@ -771,8 +767,7 @@ static PyObject* eager_api_async_write(PyObject* self, PyObject* args, cudaMemcpyDeviceToHost, stream); src_offset += c; } - Py_INCREF(Py_None); - return Py_None; + RETURN_PY_NONE EAGER_CATCH_AND_THROW_RETURN_NULL } diff --git a/paddle/fluid/pybind/eager_method.cc b/paddle/fluid/pybind/eager_method.cc index e6bd1c0b52682a5a02965e8ae7639f90517046df..d3393b7cb57ac036497a45a6a04ab6a82d4b03b8 100644 --- a/paddle/fluid/pybind/eager_method.cc +++ b/paddle/fluid/pybind/eager_method.cc @@ -267,8 +267,7 @@ static PyObject* tensor_method_numpy(TensorObject* self, PyObject* args, } else { PADDLE_THROW(platform::errors::InvalidArgument( "Tensor.numpy() only support cpu tensor.")); - Py_INCREF(Py_None); - return Py_None; + RETURN_PY_NONE } return array; @@ -335,8 +334,7 @@ static PyObject* tensor_method_numpy_for_string_tensor(TensorObject* self, } else { PADDLE_THROW(platform::errors::InvalidArgument( "StringTensor.numpy() only support cpu tensor.")); - Py_INCREF(Py_None); - return Py_None; + RETURN_PY_NONE } EAGER_CATCH_AND_THROW_RETURN_NULL } @@ -405,8 +403,8 @@ static PyObject* tensor_method_reconstruct_from_(TensorObject* self, VLOG(6) << "Finished Reconstructing Tensor from" << src_tensor.name() << " to " << self->tensor.name(); - Py_INCREF(Py_None); - return Py_None; + RETURN_PY_NONE + EAGER_CATCH_AND_THROW_RETURN_NULL } @@ -436,8 +434,8 @@ static PyObject* tensor_method_copy_(TensorObject* self, PyObject* args, VLOG(6) << "Finish Copy Tensor " << src_tensor.name() << " to " << self->tensor.name(); - Py_INCREF(Py_None); - return Py_None; + RETURN_PY_NONE + EAGER_CATCH_AND_THROW_RETURN_NULL } @@ -453,8 +451,8 @@ static PyObject* tensor_retain_grads(TensorObject* self, PyObject* args, } egr::egr_utils_api::RetainGradForTensor(self->tensor); } - Py_INCREF(Py_None); - return Py_None; + RETURN_PY_NONE + EAGER_CATCH_AND_THROW_RETURN_NULL } @@ -505,8 +503,8 @@ static PyObject* tensor_clear_gradient(TensorObject* self, PyObject* args, } } - Py_INCREF(Py_None); - return Py_None; + RETURN_PY_NONE + EAGER_CATCH_AND_THROW_RETURN_NULL } @@ -535,8 +533,8 @@ static PyObject* tensor__zero_grads(TensorObject* self, PyObject* args, } } - Py_INCREF(Py_None); - return Py_None; + RETURN_PY_NONE + EAGER_CATCH_AND_THROW_RETURN_NULL } @@ -559,8 +557,8 @@ static PyObject* tensor__share_buffer_to(TensorObject* self, PyObject* args, static_cast(dst_ptr->impl().get()); dst_tensor->ShareBufferWith(*src_tensor); dst_tensor->ShareDataTypeWith(*src_tensor); - Py_INCREF(Py_None); - return Py_None; + RETURN_PY_NONE + EAGER_CATCH_AND_THROW_RETURN_NULL } @@ -600,8 +598,8 @@ static PyObject* tensor__share_underline_tensor_to(TensorObject* self, "src tensor before share_buffer_with to other.", self->tensor.name())); src_ptr->set_impl(self->tensor.impl()); - Py_INCREF(Py_None); - return Py_None; + RETURN_PY_NONE + EAGER_CATCH_AND_THROW_RETURN_NULL } @@ -656,8 +654,7 @@ static PyObject* tensor_method_get_underline_tensor(TensorObject* self, PyObject* kwargs) { EAGER_TRY if (!self->tensor.defined()) { - Py_IncRef(Py_None); - return Py_None; + RETURN_PY_NONE } if (self->tensor.is_dense_tensor()) { auto* tensor = @@ -665,8 +662,7 @@ static PyObject* tensor_method_get_underline_tensor(TensorObject* self, VLOG(6) << "tensor: " << tensor->IsInitialized(); return ToPyObject(tensor); } else { - Py_IncRef(Py_None); - return Py_None; + RETURN_PY_NONE } EAGER_CATCH_AND_THROW_RETURN_NULL } @@ -676,16 +672,14 @@ static PyObject* tensor_method_get_underline_selected_rows(TensorObject* self, PyObject* kwargs) { EAGER_TRY if (!self->tensor.defined()) { - Py_IncRef(Py_None); - return Py_None; + RETURN_PY_NONE } if (self->tensor.is_selected_rows()) { auto* selected_rows = static_cast(self->tensor.impl().get()); return ToPyObject(selected_rows); } else { - Py_IncRef(Py_None); - return Py_None; + RETURN_PY_NONE } EAGER_CATCH_AND_THROW_RETURN_NULL } @@ -1110,8 +1104,8 @@ static PyObject* tensor_method__setitem_eager_tensor(TensorObject* self, false); } } - Py_INCREF(Py_None); - return Py_None; + RETURN_PY_NONE + EAGER_CATCH_AND_THROW_RETURN_NULL } @@ -1202,8 +1196,8 @@ static PyObject* tensor_register_reduce_hook(TensorObject* self, PyObject* args, accumulation_grad_node->RegisterReduceHook( std::make_shared(hook_func)); - Py_INCREF(Py_None); - return Py_None; + RETURN_PY_NONE + EAGER_CATCH_AND_THROW_RETURN_NULL } @@ -1218,7 +1212,8 @@ static PyObject* tensor__set_grad_type(TensorObject* self, PyObject* args, } else if (var_type == framework::proto::VarType::SELECTED_ROWS) { grad_tensor->set_impl(std::make_shared()); } - return Py_None; + RETURN_PY_NONE + EAGER_CATCH_AND_THROW_RETURN_NULL } @@ -1226,7 +1221,8 @@ static PyObject* tensor__clear(TensorObject* self, PyObject* args, PyObject* kwargs) { EAGER_TRY self->tensor.reset(); - return Py_None; + RETURN_PY_NONE + EAGER_CATCH_AND_THROW_RETURN_NULL } @@ -1254,8 +1250,8 @@ static PyObject* tensor__copy_gradient_from(TensorObject* self, PyObject* args, "Tensor %s has not been initialized", src.name())); p_grad->set_impl(src.impl()); } - Py_INCREF(Py_None); - return Py_None; + RETURN_PY_NONE + EAGER_CATCH_AND_THROW_RETURN_NULL } static PyObject* tensor_method_get_non_zero_indices(TensorObject* self, @@ -1396,7 +1392,7 @@ static PyObject* tensor__bump_inplace_version(TensorObject* self, PyObject* kwargs) { EAGER_TRY self->tensor.bump_inplace_version(); - return Py_None; + RETURN_PY_NONE EAGER_CATCH_AND_THROW_RETURN_NULL } @@ -1446,8 +1442,8 @@ static PyObject* tensor__reset_grad_inplace_version(TensorObject* self, grad->initialized()) { grad->reset_inplace_version(set_to_zero); } - Py_INCREF(Py_None); - return Py_None; + RETURN_PY_NONE + EAGER_CATCH_AND_THROW_RETURN_NULL } @@ -1479,8 +1475,8 @@ static PyObject* tensor_method__share_memory(TensorObject* self, PyObject* args, #else PADDLE_THROW(platform::errors::PermissionDenied( "Sharing memory in Windows OS is not supported currently")); - Py_INCREF(Py_None); - return Py_None; + RETURN_PY_NONE + #endif EAGER_CATCH_AND_THROW_RETURN_NULL } @@ -1522,8 +1518,7 @@ static PyObject* tensor__grad_value(TensorObject* self, PyObject* args, "cleared the grad inside autograd_meta")); if (!grad->defined()) { - Py_IncRef(Py_None); - return Py_None; + RETURN_PY_NONE } if (grad->is_dense_tensor()) { auto* grad_tensor = @@ -1532,8 +1527,7 @@ static PyObject* tensor__grad_value(TensorObject* self, PyObject* args, } else { PADDLE_THROW(paddle::platform::errors::Fatal( "this method is only supported for DenseTensor")); - Py_IncRef(Py_None); - return Py_None; + RETURN_PY_NONE } EAGER_CATCH_AND_THROW_RETURN_NULL } @@ -1556,8 +1550,8 @@ static PyObject* tensor_method__uva(TensorObject* self, PyObject* args, static_cast(self->tensor.impl().get()); tensor_uva(self_tensor, device_id); - Py_INCREF(Py_None); - return Py_None; + RETURN_PY_NONE + EAGER_CATCH_AND_THROW_RETURN_NULL } #endif diff --git a/paddle/fluid/pybind/eager_properties.cc b/paddle/fluid/pybind/eager_properties.cc index de66308a7baf62aff95e4ccfe026c21a3aab0151..7af221b9ac82ecb3c7c11c6d791d5a2b41cfcba2 100644 --- a/paddle/fluid/pybind/eager_properties.cc +++ b/paddle/fluid/pybind/eager_properties.cc @@ -52,8 +52,7 @@ PyObject* tensor_properties_get_type(TensorObject* self, void* closure) { } else if (self->tensor.is_selected_rows()) { return ToPyObject(paddle::framework::proto::VarType::SELECTED_ROWS); } else { - Py_INCREF(Py_None); - return Py_None; + RETURN_PY_NONE } EAGER_CATCH_AND_THROW_RETURN_NULL } @@ -87,8 +86,7 @@ PyObject* tensor_properties_get_grad(TensorObject* self, void* closure) { if (meta && meta->Grad().initialized()) { return ToPyObject(meta->Grad()); } else { - Py_INCREF(Py_None); - return Py_None; + RETURN_PY_NONE } EAGER_CATCH_AND_THROW_RETURN_NULL } diff --git a/paddle/fluid/pybind/eager_py_layer.cc b/paddle/fluid/pybind/eager_py_layer.cc index 46381a9e9ee8406944204fa1a15676844b84183f..47a5309d691f5863e60469f9b56649c7f0eded50 100644 --- a/paddle/fluid/pybind/eager_py_layer.cc +++ b/paddle/fluid/pybind/eager_py_layer.cc @@ -390,8 +390,7 @@ PyObject* pylayer_method_register_hook(PyObject* _self, PyObject* hook) { PyObject* tensor_properties_get_container(PyLayerObject* self, void* closure) { EAGER_TRY if (self->container == nullptr) { - Py_INCREF(Py_None); - return Py_None; + RETURN_PY_NONE; } Py_INCREF(self->container); return self->container; @@ -412,8 +411,7 @@ PyObject* tensor_properties_get_non_differentiable(PyLayerObject* self, void* closure) { EAGER_TRY if (self->non_differentiable == nullptr) { - Py_INCREF(Py_None); - return Py_None; + RETURN_PY_NONE; } Py_INCREF(self->non_differentiable); return self->non_differentiable; @@ -434,8 +432,7 @@ PyObject* tensor_properties_get_dirty_tensors(PyLayerObject* self, void* closure) { EAGER_TRY if (self->dirty_tensors == nullptr) { - Py_INCREF(Py_None); - return Py_None; + RETURN_PY_NONE; } Py_INCREF(self->dirty_tensors); return self->dirty_tensors; diff --git a/paddle/fluid/pybind/eager_utils.cc b/paddle/fluid/pybind/eager_utils.cc index d07cbd5ee21a2bf0227cc3634ebcb7f8ee87e0ba..90d7024f7a746763e9a53164d2542590b06dc0fe 100644 --- a/paddle/fluid/pybind/eager_utils.cc +++ b/paddle/fluid/pybind/eager_utils.cc @@ -516,8 +516,7 @@ PyObject* ToPyObject(const std::string& value) { PyObject* ToPyObject(const paddle::experimental::Tensor& value, bool return_py_none_if_not_initialize) { if (return_py_none_if_not_initialize && !value.initialized()) { - Py_INCREF(Py_None); - return Py_None; + RETURN_PY_NONE } PyObject* obj = nullptr; if (value.initialized() && value.is_string_tensor()) { @@ -679,8 +678,7 @@ PyObject* ToPyObject(const phi::SelectedRows* value) { PyObject* ToPyObject(const void* value) { if (value == nullptr) { - Py_INCREF(Py_None); - return Py_None; + RETURN_PY_NONE } PADDLE_THROW( platform::errors::Fatal("ToPyObject do not support void* with value.")); diff --git a/paddle/fluid/pybind/eager_utils.h b/paddle/fluid/pybind/eager_utils.h index c4ddb34763228a286ed09ccd3455aaec16dc871d..5273433208d118ece1c1b0d1b6f230df3092f767 100644 --- a/paddle/fluid/pybind/eager_utils.h +++ b/paddle/fluid/pybind/eager_utils.h @@ -31,6 +31,10 @@ class Scope; } namespace pybind { +#define RETURN_PY_NONE \ + Py_INCREF(Py_None); \ + return Py_None; + int TensorDtype2NumpyDtype(phi::DataType dtype); bool IsEagerTensor(PyObject* obj); diff --git a/paddle/fluid/pybind/op_function_generator.cc b/paddle/fluid/pybind/op_function_generator.cc index a9e286a6fa049260d449b5850134db8e3ff23bfd..a905c5befc2b0b00336f8c909bd9b63127740320 100644 --- a/paddle/fluid/pybind/op_function_generator.cc +++ b/paddle/fluid/pybind/op_function_generator.cc @@ -372,7 +372,7 @@ std::string GenerateOpFunctionsBody( viwe_input_name, viwe_output_name); } if (outs_num == 0) { - return_str = "Py_INCREF(Py_None);\n return Py_None;"; + return_str = "RETURN_PY_NONE"; } else if (outs_num == 1) { return_str = "return MakeReturnPyObject(" + return_str + ");"; } else {