未验证 提交 a384828d 编写于 作者: W wanghuancoder 提交者: GitHub

[Eager] inc ref before return Py_None (#42505)

* fix pylayer_memleak

* inc ref before return Py_None

* refine

* refine

* refine

* refine
上级 d7728051
...@@ -119,8 +119,7 @@ static PyObject* eager_api_run_backward(PyObject* self, PyObject* args, ...@@ -119,8 +119,7 @@ static PyObject* eager_api_run_backward(PyObject* self, PyObject* args,
auto grad_tensors = CastPyArg2VectorOfTensor(PyTuple_GET_ITEM(args, 1), 1); auto grad_tensors = CastPyArg2VectorOfTensor(PyTuple_GET_ITEM(args, 1), 1);
egr::Backward(tensors, grad_tensors, egr::Backward(tensors, grad_tensors,
CastPyArg2AttrBoolean(PyTuple_GET_ITEM(args, 2), 2)); CastPyArg2AttrBoolean(PyTuple_GET_ITEM(args, 2), 2));
Py_INCREF(Py_None); RETURN_PY_NONE
return Py_None;
EAGER_CATCH_AND_THROW_RETURN_NULL EAGER_CATCH_AND_THROW_RETURN_NULL
} }
...@@ -159,8 +158,7 @@ static PyObject* eager_api_tensor_copy(PyObject* self, PyObject* args, ...@@ -159,8 +158,7 @@ static PyObject* eager_api_tensor_copy(PyObject* self, PyObject* args,
egr::EagerUtils::autograd_meta(&(src))->StopGradient()); egr::EagerUtils::autograd_meta(&(src))->StopGradient());
egr::EagerUtils::autograd_meta(&dst)->SetPersistable( egr::EagerUtils::autograd_meta(&dst)->SetPersistable(
egr::EagerUtils::autograd_meta(&(src))->Persistable()); egr::EagerUtils::autograd_meta(&(src))->Persistable());
Py_INCREF(Py_None); RETURN_PY_NONE
return Py_None;
EAGER_CATCH_AND_THROW_RETURN_NULL EAGER_CATCH_AND_THROW_RETURN_NULL
} }
...@@ -455,8 +453,7 @@ static PyObject* eager_api_run_costum_op(PyObject* self, PyObject* args, ...@@ -455,8 +453,7 @@ static PyObject* eager_api_run_costum_op(PyObject* self, PyObject* args,
} }
grad_node->SetAttrs(attrs); grad_node->SetAttrs(attrs);
} }
Py_INCREF(Py_None); RETURN_PY_NONE
return Py_None;
EAGER_CATCH_AND_THROW_RETURN_NULL EAGER_CATCH_AND_THROW_RETURN_NULL
} }
...@@ -688,8 +685,7 @@ static PyObject* eager_api_async_read(PyObject* self, PyObject* args, ...@@ -688,8 +685,7 @@ static PyObject* eager_api_async_read(PyObject* self, PyObject* args,
cudaMemcpyAsync(dst_data + (numel * size), buffer_tensor->data<float>(), cudaMemcpyAsync(dst_data + (numel * size), buffer_tensor->data<float>(),
index_tensor.numel() * size * sizeof(float), index_tensor.numel() * size * sizeof(float),
cudaMemcpyHostToDevice, stream); cudaMemcpyHostToDevice, stream);
Py_INCREF(Py_None); RETURN_PY_NONE
return Py_None;
EAGER_CATCH_AND_THROW_RETURN_NULL EAGER_CATCH_AND_THROW_RETURN_NULL
} }
...@@ -771,8 +767,7 @@ static PyObject* eager_api_async_write(PyObject* self, PyObject* args, ...@@ -771,8 +767,7 @@ static PyObject* eager_api_async_write(PyObject* self, PyObject* args,
cudaMemcpyDeviceToHost, stream); cudaMemcpyDeviceToHost, stream);
src_offset += c; src_offset += c;
} }
Py_INCREF(Py_None); RETURN_PY_NONE
return Py_None;
EAGER_CATCH_AND_THROW_RETURN_NULL EAGER_CATCH_AND_THROW_RETURN_NULL
} }
......
...@@ -267,8 +267,7 @@ static PyObject* tensor_method_numpy(TensorObject* self, PyObject* args, ...@@ -267,8 +267,7 @@ static PyObject* tensor_method_numpy(TensorObject* self, PyObject* args,
} else { } else {
PADDLE_THROW(platform::errors::InvalidArgument( PADDLE_THROW(platform::errors::InvalidArgument(
"Tensor.numpy() only support cpu tensor.")); "Tensor.numpy() only support cpu tensor."));
Py_INCREF(Py_None); RETURN_PY_NONE
return Py_None;
} }
return array; return array;
...@@ -335,8 +334,7 @@ static PyObject* tensor_method_numpy_for_string_tensor(TensorObject* self, ...@@ -335,8 +334,7 @@ static PyObject* tensor_method_numpy_for_string_tensor(TensorObject* self,
} else { } else {
PADDLE_THROW(platform::errors::InvalidArgument( PADDLE_THROW(platform::errors::InvalidArgument(
"StringTensor.numpy() only support cpu tensor.")); "StringTensor.numpy() only support cpu tensor."));
Py_INCREF(Py_None); RETURN_PY_NONE
return Py_None;
} }
EAGER_CATCH_AND_THROW_RETURN_NULL EAGER_CATCH_AND_THROW_RETURN_NULL
} }
...@@ -405,8 +403,8 @@ static PyObject* tensor_method_reconstruct_from_(TensorObject* self, ...@@ -405,8 +403,8 @@ static PyObject* tensor_method_reconstruct_from_(TensorObject* self,
VLOG(6) << "Finished Reconstructing Tensor from" << src_tensor.name() VLOG(6) << "Finished Reconstructing Tensor from" << src_tensor.name()
<< " to " << self->tensor.name(); << " to " << self->tensor.name();
Py_INCREF(Py_None); RETURN_PY_NONE
return Py_None;
EAGER_CATCH_AND_THROW_RETURN_NULL EAGER_CATCH_AND_THROW_RETURN_NULL
} }
...@@ -436,8 +434,8 @@ static PyObject* tensor_method_copy_(TensorObject* self, PyObject* args, ...@@ -436,8 +434,8 @@ static PyObject* tensor_method_copy_(TensorObject* self, PyObject* args,
VLOG(6) << "Finish Copy Tensor " << src_tensor.name() << " to " VLOG(6) << "Finish Copy Tensor " << src_tensor.name() << " to "
<< self->tensor.name(); << self->tensor.name();
Py_INCREF(Py_None); RETURN_PY_NONE
return Py_None;
EAGER_CATCH_AND_THROW_RETURN_NULL EAGER_CATCH_AND_THROW_RETURN_NULL
} }
...@@ -453,8 +451,8 @@ static PyObject* tensor_retain_grads(TensorObject* self, PyObject* args, ...@@ -453,8 +451,8 @@ static PyObject* tensor_retain_grads(TensorObject* self, PyObject* args,
} }
egr::egr_utils_api::RetainGradForTensor(self->tensor); egr::egr_utils_api::RetainGradForTensor(self->tensor);
} }
Py_INCREF(Py_None); RETURN_PY_NONE
return Py_None;
EAGER_CATCH_AND_THROW_RETURN_NULL EAGER_CATCH_AND_THROW_RETURN_NULL
} }
...@@ -505,8 +503,8 @@ static PyObject* tensor_clear_gradient(TensorObject* self, PyObject* args, ...@@ -505,8 +503,8 @@ static PyObject* tensor_clear_gradient(TensorObject* self, PyObject* args,
} }
} }
Py_INCREF(Py_None); RETURN_PY_NONE
return Py_None;
EAGER_CATCH_AND_THROW_RETURN_NULL EAGER_CATCH_AND_THROW_RETURN_NULL
} }
...@@ -535,8 +533,8 @@ static PyObject* tensor__zero_grads(TensorObject* self, PyObject* args, ...@@ -535,8 +533,8 @@ static PyObject* tensor__zero_grads(TensorObject* self, PyObject* args,
} }
} }
Py_INCREF(Py_None); RETURN_PY_NONE
return Py_None;
EAGER_CATCH_AND_THROW_RETURN_NULL EAGER_CATCH_AND_THROW_RETURN_NULL
} }
...@@ -559,8 +557,8 @@ static PyObject* tensor__share_buffer_to(TensorObject* self, PyObject* args, ...@@ -559,8 +557,8 @@ static PyObject* tensor__share_buffer_to(TensorObject* self, PyObject* args,
static_cast<paddle::framework::Tensor*>(dst_ptr->impl().get()); static_cast<paddle::framework::Tensor*>(dst_ptr->impl().get());
dst_tensor->ShareBufferWith(*src_tensor); dst_tensor->ShareBufferWith(*src_tensor);
dst_tensor->ShareDataTypeWith(*src_tensor); dst_tensor->ShareDataTypeWith(*src_tensor);
Py_INCREF(Py_None); RETURN_PY_NONE
return Py_None;
EAGER_CATCH_AND_THROW_RETURN_NULL EAGER_CATCH_AND_THROW_RETURN_NULL
} }
...@@ -600,8 +598,8 @@ static PyObject* tensor__share_underline_tensor_to(TensorObject* self, ...@@ -600,8 +598,8 @@ static PyObject* tensor__share_underline_tensor_to(TensorObject* self,
"src tensor before share_buffer_with to other.", "src tensor before share_buffer_with to other.",
self->tensor.name())); self->tensor.name()));
src_ptr->set_impl(self->tensor.impl()); src_ptr->set_impl(self->tensor.impl());
Py_INCREF(Py_None); RETURN_PY_NONE
return Py_None;
EAGER_CATCH_AND_THROW_RETURN_NULL EAGER_CATCH_AND_THROW_RETURN_NULL
} }
...@@ -656,8 +654,7 @@ static PyObject* tensor_method_get_underline_tensor(TensorObject* self, ...@@ -656,8 +654,7 @@ static PyObject* tensor_method_get_underline_tensor(TensorObject* self,
PyObject* kwargs) { PyObject* kwargs) {
EAGER_TRY EAGER_TRY
if (!self->tensor.defined()) { if (!self->tensor.defined()) {
Py_IncRef(Py_None); RETURN_PY_NONE
return Py_None;
} }
if (self->tensor.is_dense_tensor()) { if (self->tensor.is_dense_tensor()) {
auto* tensor = auto* tensor =
...@@ -665,8 +662,7 @@ static PyObject* tensor_method_get_underline_tensor(TensorObject* self, ...@@ -665,8 +662,7 @@ static PyObject* tensor_method_get_underline_tensor(TensorObject* self,
VLOG(6) << "tensor: " << tensor->IsInitialized(); VLOG(6) << "tensor: " << tensor->IsInitialized();
return ToPyObject(tensor); return ToPyObject(tensor);
} else { } else {
Py_IncRef(Py_None); RETURN_PY_NONE
return Py_None;
} }
EAGER_CATCH_AND_THROW_RETURN_NULL EAGER_CATCH_AND_THROW_RETURN_NULL
} }
...@@ -676,16 +672,14 @@ static PyObject* tensor_method_get_underline_selected_rows(TensorObject* self, ...@@ -676,16 +672,14 @@ static PyObject* tensor_method_get_underline_selected_rows(TensorObject* self,
PyObject* kwargs) { PyObject* kwargs) {
EAGER_TRY EAGER_TRY
if (!self->tensor.defined()) { if (!self->tensor.defined()) {
Py_IncRef(Py_None); RETURN_PY_NONE
return Py_None;
} }
if (self->tensor.is_selected_rows()) { if (self->tensor.is_selected_rows()) {
auto* selected_rows = auto* selected_rows =
static_cast<phi::SelectedRows*>(self->tensor.impl().get()); static_cast<phi::SelectedRows*>(self->tensor.impl().get());
return ToPyObject(selected_rows); return ToPyObject(selected_rows);
} else { } else {
Py_IncRef(Py_None); RETURN_PY_NONE
return Py_None;
} }
EAGER_CATCH_AND_THROW_RETURN_NULL EAGER_CATCH_AND_THROW_RETURN_NULL
} }
...@@ -1110,8 +1104,8 @@ static PyObject* tensor_method__setitem_eager_tensor(TensorObject* self, ...@@ -1110,8 +1104,8 @@ static PyObject* tensor_method__setitem_eager_tensor(TensorObject* self,
false); false);
} }
} }
Py_INCREF(Py_None); RETURN_PY_NONE
return Py_None;
EAGER_CATCH_AND_THROW_RETURN_NULL EAGER_CATCH_AND_THROW_RETURN_NULL
} }
...@@ -1202,8 +1196,8 @@ static PyObject* tensor_register_reduce_hook(TensorObject* self, PyObject* args, ...@@ -1202,8 +1196,8 @@ static PyObject* tensor_register_reduce_hook(TensorObject* self, PyObject* args,
accumulation_grad_node->RegisterReduceHook( accumulation_grad_node->RegisterReduceHook(
std::make_shared<PyTensorVoidHook>(hook_func)); std::make_shared<PyTensorVoidHook>(hook_func));
Py_INCREF(Py_None); RETURN_PY_NONE
return Py_None;
EAGER_CATCH_AND_THROW_RETURN_NULL EAGER_CATCH_AND_THROW_RETURN_NULL
} }
...@@ -1218,7 +1212,8 @@ static PyObject* tensor__set_grad_type(TensorObject* self, PyObject* args, ...@@ -1218,7 +1212,8 @@ static PyObject* tensor__set_grad_type(TensorObject* self, PyObject* args,
} else if (var_type == framework::proto::VarType::SELECTED_ROWS) { } else if (var_type == framework::proto::VarType::SELECTED_ROWS) {
grad_tensor->set_impl(std::make_shared<phi::SelectedRows>()); grad_tensor->set_impl(std::make_shared<phi::SelectedRows>());
} }
return Py_None; RETURN_PY_NONE
EAGER_CATCH_AND_THROW_RETURN_NULL EAGER_CATCH_AND_THROW_RETURN_NULL
} }
...@@ -1226,7 +1221,8 @@ static PyObject* tensor__clear(TensorObject* self, PyObject* args, ...@@ -1226,7 +1221,8 @@ static PyObject* tensor__clear(TensorObject* self, PyObject* args,
PyObject* kwargs) { PyObject* kwargs) {
EAGER_TRY EAGER_TRY
self->tensor.reset(); self->tensor.reset();
return Py_None; RETURN_PY_NONE
EAGER_CATCH_AND_THROW_RETURN_NULL EAGER_CATCH_AND_THROW_RETURN_NULL
} }
...@@ -1254,8 +1250,8 @@ static PyObject* tensor__copy_gradient_from(TensorObject* self, PyObject* args, ...@@ -1254,8 +1250,8 @@ static PyObject* tensor__copy_gradient_from(TensorObject* self, PyObject* args,
"Tensor %s has not been initialized", src.name())); "Tensor %s has not been initialized", src.name()));
p_grad->set_impl(src.impl()); p_grad->set_impl(src.impl());
} }
Py_INCREF(Py_None); RETURN_PY_NONE
return Py_None;
EAGER_CATCH_AND_THROW_RETURN_NULL EAGER_CATCH_AND_THROW_RETURN_NULL
} }
static PyObject* tensor_method_get_non_zero_indices(TensorObject* self, static PyObject* tensor_method_get_non_zero_indices(TensorObject* self,
...@@ -1396,7 +1392,7 @@ static PyObject* tensor__bump_inplace_version(TensorObject* self, ...@@ -1396,7 +1392,7 @@ static PyObject* tensor__bump_inplace_version(TensorObject* self,
PyObject* kwargs) { PyObject* kwargs) {
EAGER_TRY EAGER_TRY
self->tensor.bump_inplace_version(); self->tensor.bump_inplace_version();
return Py_None; RETURN_PY_NONE
EAGER_CATCH_AND_THROW_RETURN_NULL EAGER_CATCH_AND_THROW_RETURN_NULL
} }
...@@ -1446,8 +1442,8 @@ static PyObject* tensor__reset_grad_inplace_version(TensorObject* self, ...@@ -1446,8 +1442,8 @@ static PyObject* tensor__reset_grad_inplace_version(TensorObject* self,
grad->initialized()) { grad->initialized()) {
grad->reset_inplace_version(set_to_zero); grad->reset_inplace_version(set_to_zero);
} }
Py_INCREF(Py_None); RETURN_PY_NONE
return Py_None;
EAGER_CATCH_AND_THROW_RETURN_NULL EAGER_CATCH_AND_THROW_RETURN_NULL
} }
...@@ -1479,8 +1475,8 @@ static PyObject* tensor_method__share_memory(TensorObject* self, PyObject* args, ...@@ -1479,8 +1475,8 @@ static PyObject* tensor_method__share_memory(TensorObject* self, PyObject* args,
#else #else
PADDLE_THROW(platform::errors::PermissionDenied( PADDLE_THROW(platform::errors::PermissionDenied(
"Sharing memory in Windows OS is not supported currently")); "Sharing memory in Windows OS is not supported currently"));
Py_INCREF(Py_None); RETURN_PY_NONE
return Py_None;
#endif #endif
EAGER_CATCH_AND_THROW_RETURN_NULL EAGER_CATCH_AND_THROW_RETURN_NULL
} }
...@@ -1522,8 +1518,7 @@ static PyObject* tensor__grad_value(TensorObject* self, PyObject* args, ...@@ -1522,8 +1518,7 @@ static PyObject* tensor__grad_value(TensorObject* self, PyObject* args,
"cleared the grad inside autograd_meta")); "cleared the grad inside autograd_meta"));
if (!grad->defined()) { if (!grad->defined()) {
Py_IncRef(Py_None); RETURN_PY_NONE
return Py_None;
} }
if (grad->is_dense_tensor()) { if (grad->is_dense_tensor()) {
auto* grad_tensor = auto* grad_tensor =
...@@ -1532,8 +1527,7 @@ static PyObject* tensor__grad_value(TensorObject* self, PyObject* args, ...@@ -1532,8 +1527,7 @@ static PyObject* tensor__grad_value(TensorObject* self, PyObject* args,
} else { } else {
PADDLE_THROW(paddle::platform::errors::Fatal( PADDLE_THROW(paddle::platform::errors::Fatal(
"this method is only supported for DenseTensor")); "this method is only supported for DenseTensor"));
Py_IncRef(Py_None); RETURN_PY_NONE
return Py_None;
} }
EAGER_CATCH_AND_THROW_RETURN_NULL EAGER_CATCH_AND_THROW_RETURN_NULL
} }
...@@ -1556,8 +1550,8 @@ static PyObject* tensor_method__uva(TensorObject* self, PyObject* args, ...@@ -1556,8 +1550,8 @@ static PyObject* tensor_method__uva(TensorObject* self, PyObject* args,
static_cast<paddle::framework::LoDTensor*>(self->tensor.impl().get()); static_cast<paddle::framework::LoDTensor*>(self->tensor.impl().get());
tensor_uva(self_tensor, device_id); tensor_uva(self_tensor, device_id);
Py_INCREF(Py_None); RETURN_PY_NONE
return Py_None;
EAGER_CATCH_AND_THROW_RETURN_NULL EAGER_CATCH_AND_THROW_RETURN_NULL
} }
#endif #endif
......
...@@ -52,8 +52,7 @@ PyObject* tensor_properties_get_type(TensorObject* self, void* closure) { ...@@ -52,8 +52,7 @@ PyObject* tensor_properties_get_type(TensorObject* self, void* closure) {
} else if (self->tensor.is_selected_rows()) { } else if (self->tensor.is_selected_rows()) {
return ToPyObject(paddle::framework::proto::VarType::SELECTED_ROWS); return ToPyObject(paddle::framework::proto::VarType::SELECTED_ROWS);
} else { } else {
Py_INCREF(Py_None); RETURN_PY_NONE
return Py_None;
} }
EAGER_CATCH_AND_THROW_RETURN_NULL EAGER_CATCH_AND_THROW_RETURN_NULL
} }
...@@ -87,8 +86,7 @@ PyObject* tensor_properties_get_grad(TensorObject* self, void* closure) { ...@@ -87,8 +86,7 @@ PyObject* tensor_properties_get_grad(TensorObject* self, void* closure) {
if (meta && meta->Grad().initialized()) { if (meta && meta->Grad().initialized()) {
return ToPyObject(meta->Grad()); return ToPyObject(meta->Grad());
} else { } else {
Py_INCREF(Py_None); RETURN_PY_NONE
return Py_None;
} }
EAGER_CATCH_AND_THROW_RETURN_NULL EAGER_CATCH_AND_THROW_RETURN_NULL
} }
......
...@@ -390,8 +390,7 @@ PyObject* pylayer_method_register_hook(PyObject* _self, PyObject* hook) { ...@@ -390,8 +390,7 @@ PyObject* pylayer_method_register_hook(PyObject* _self, PyObject* hook) {
PyObject* tensor_properties_get_container(PyLayerObject* self, void* closure) { PyObject* tensor_properties_get_container(PyLayerObject* self, void* closure) {
EAGER_TRY EAGER_TRY
if (self->container == nullptr) { if (self->container == nullptr) {
Py_INCREF(Py_None); RETURN_PY_NONE;
return Py_None;
} }
Py_INCREF(self->container); Py_INCREF(self->container);
return self->container; return self->container;
...@@ -412,8 +411,7 @@ PyObject* tensor_properties_get_non_differentiable(PyLayerObject* self, ...@@ -412,8 +411,7 @@ PyObject* tensor_properties_get_non_differentiable(PyLayerObject* self,
void* closure) { void* closure) {
EAGER_TRY EAGER_TRY
if (self->non_differentiable == nullptr) { if (self->non_differentiable == nullptr) {
Py_INCREF(Py_None); RETURN_PY_NONE;
return Py_None;
} }
Py_INCREF(self->non_differentiable); Py_INCREF(self->non_differentiable);
return self->non_differentiable; return self->non_differentiable;
...@@ -434,8 +432,7 @@ PyObject* tensor_properties_get_dirty_tensors(PyLayerObject* self, ...@@ -434,8 +432,7 @@ PyObject* tensor_properties_get_dirty_tensors(PyLayerObject* self,
void* closure) { void* closure) {
EAGER_TRY EAGER_TRY
if (self->dirty_tensors == nullptr) { if (self->dirty_tensors == nullptr) {
Py_INCREF(Py_None); RETURN_PY_NONE;
return Py_None;
} }
Py_INCREF(self->dirty_tensors); Py_INCREF(self->dirty_tensors);
return self->dirty_tensors; return self->dirty_tensors;
......
...@@ -516,8 +516,7 @@ PyObject* ToPyObject(const std::string& value) { ...@@ -516,8 +516,7 @@ PyObject* ToPyObject(const std::string& value) {
PyObject* ToPyObject(const paddle::experimental::Tensor& value, PyObject* ToPyObject(const paddle::experimental::Tensor& value,
bool return_py_none_if_not_initialize) { bool return_py_none_if_not_initialize) {
if (return_py_none_if_not_initialize && !value.initialized()) { if (return_py_none_if_not_initialize && !value.initialized()) {
Py_INCREF(Py_None); RETURN_PY_NONE
return Py_None;
} }
PyObject* obj = nullptr; PyObject* obj = nullptr;
if (value.initialized() && value.is_string_tensor()) { if (value.initialized() && value.is_string_tensor()) {
...@@ -679,8 +678,7 @@ PyObject* ToPyObject(const phi::SelectedRows* value) { ...@@ -679,8 +678,7 @@ PyObject* ToPyObject(const phi::SelectedRows* value) {
PyObject* ToPyObject(const void* value) { PyObject* ToPyObject(const void* value) {
if (value == nullptr) { if (value == nullptr) {
Py_INCREF(Py_None); RETURN_PY_NONE
return Py_None;
} }
PADDLE_THROW( PADDLE_THROW(
platform::errors::Fatal("ToPyObject do not support void* with value.")); platform::errors::Fatal("ToPyObject do not support void* with value."));
......
...@@ -31,6 +31,10 @@ class Scope; ...@@ -31,6 +31,10 @@ class Scope;
} }
namespace pybind { namespace pybind {
#define RETURN_PY_NONE \
Py_INCREF(Py_None); \
return Py_None;
int TensorDtype2NumpyDtype(phi::DataType dtype); int TensorDtype2NumpyDtype(phi::DataType dtype);
bool IsEagerTensor(PyObject* obj); bool IsEagerTensor(PyObject* obj);
......
...@@ -372,7 +372,7 @@ std::string GenerateOpFunctionsBody( ...@@ -372,7 +372,7 @@ std::string GenerateOpFunctionsBody(
viwe_input_name, viwe_output_name); viwe_input_name, viwe_output_name);
} }
if (outs_num == 0) { if (outs_num == 0) {
return_str = "Py_INCREF(Py_None);\n return Py_None;"; return_str = "RETURN_PY_NONE";
} else if (outs_num == 1) { } else if (outs_num == 1) {
return_str = "return MakeReturnPyObject(" + return_str + ");"; return_str = "return MakeReturnPyObject(" + return_str + ");";
} else { } else {
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册