未验证 提交 55e26637 编写于 作者: 0 0x45f 提交者: GitHub

Fix eager try catch (#41438)

上级 b25f25d0
...@@ -409,6 +409,7 @@ void AutoInitTensorByTensor(TensorObject* py_tensor_ptr, ...@@ -409,6 +409,7 @@ void AutoInitTensorByTensor(TensorObject* py_tensor_ptr,
* ** name: std::string) * ** name: std::string)
* **/ * **/
int TensorInit(PyObject* self, PyObject* args, PyObject* kwargs) { int TensorInit(PyObject* self, PyObject* args, PyObject* kwargs) {
EAGER_TRY
// set a flag to record use kwargs or not // set a flag to record use kwargs or not
bool flag_kwargs = false; bool flag_kwargs = false;
if (kwargs) flag_kwargs = true; if (kwargs) flag_kwargs = true;
...@@ -703,7 +704,8 @@ int TensorInit(PyObject* self, PyObject* args, PyObject* kwargs) { ...@@ -703,7 +704,8 @@ int TensorInit(PyObject* self, PyObject* args, PyObject* kwargs) {
"make sure u call the existed constructor.")); "make sure u call the existed constructor."));
} }
return 1; return -1;
EAGER_CATCH_AND_THROW_RETURN_NEG
} }
static void TensorDealloc(TensorObject* self) { static void TensorDealloc(TensorObject* self) {
......
...@@ -69,7 +69,7 @@ int tensor_properties_set_name(TensorObject* self, PyObject* value, ...@@ -69,7 +69,7 @@ int tensor_properties_set_name(TensorObject* self, PyObject* value,
EAGER_TRY EAGER_TRY
self->tensor.set_name(CastPyArg2AttrString(value, 0)); self->tensor.set_name(CastPyArg2AttrString(value, 0));
return 0; return 0;
EAGER_CATCH_AND_THROW_RETURN_ZERO EAGER_CATCH_AND_THROW_RETURN_NEG
} }
PyObject* tensor_properties_get_stop_gradient(TensorObject* self, PyObject* tensor_properties_get_stop_gradient(TensorObject* self,
...@@ -110,7 +110,7 @@ int tensor_properties_set_grad(TensorObject* self, PyObject* value, ...@@ -110,7 +110,7 @@ int tensor_properties_set_grad(TensorObject* self, PyObject* value,
"the grad inside autograd_meta")); "the grad inside autograd_meta"));
grad->copy_(src, self->tensor.inner_place(), true); grad->copy_(src, self->tensor.inner_place(), true);
return 0; return 0;
EAGER_CATCH_AND_THROW_RETURN_ZERO EAGER_CATCH_AND_THROW_RETURN_NEG
} }
int tensor_properties_set_stop_gradient(TensorObject* self, PyObject* value, int tensor_properties_set_stop_gradient(TensorObject* self, PyObject* value,
...@@ -122,7 +122,7 @@ int tensor_properties_set_stop_gradient(TensorObject* self, PyObject* value, ...@@ -122,7 +122,7 @@ int tensor_properties_set_stop_gradient(TensorObject* self, PyObject* value,
meta->SetGradNode(std::make_shared<egr::GradNodeAccumulation>(meta)); meta->SetGradNode(std::make_shared<egr::GradNodeAccumulation>(meta));
} }
return 0; return 0;
EAGER_CATCH_AND_THROW_RETURN_ZERO EAGER_CATCH_AND_THROW_RETURN_NEG
} }
PyObject* tensor_properties_get_persistable(TensorObject* self, void* closure) { PyObject* tensor_properties_get_persistable(TensorObject* self, void* closure) {
...@@ -138,7 +138,7 @@ int tensor_properties_set_persistable(TensorObject* self, PyObject* value, ...@@ -138,7 +138,7 @@ int tensor_properties_set_persistable(TensorObject* self, PyObject* value,
auto meta = egr::EagerUtils::autograd_meta(&self->tensor); auto meta = egr::EagerUtils::autograd_meta(&self->tensor);
meta->SetPersistable(CastPyArg2AttrBoolean(value, 0)); meta->SetPersistable(CastPyArg2AttrBoolean(value, 0));
return 0; return 0;
EAGER_CATCH_AND_THROW_RETURN_ZERO EAGER_CATCH_AND_THROW_RETURN_NEG
} }
PyObject* tensor_properties_get_shape(TensorObject* self, void* closure) { PyObject* tensor_properties_get_shape(TensorObject* self, void* closure) {
......
...@@ -395,7 +395,7 @@ int tensor_properties_set_container(PyLayerObject* self, PyObject* value, ...@@ -395,7 +395,7 @@ int tensor_properties_set_container(PyLayerObject* self, PyObject* value,
Py_XDECREF(self->container); Py_XDECREF(self->container);
self->container = value; self->container = value;
return 0; return 0;
EAGER_CATCH_AND_THROW_RETURN_ZERO EAGER_CATCH_AND_THROW_RETURN_NEG
} }
PyObject* tensor_properties_get_non_differentiable(PyLayerObject* self, PyObject* tensor_properties_get_non_differentiable(PyLayerObject* self,
...@@ -417,7 +417,7 @@ int tensor_properties_set_non_differentiable(PyLayerObject* self, ...@@ -417,7 +417,7 @@ int tensor_properties_set_non_differentiable(PyLayerObject* self,
Py_XDECREF(self->non_differentiable); Py_XDECREF(self->non_differentiable);
self->non_differentiable = value; self->non_differentiable = value;
return 0; return 0;
EAGER_CATCH_AND_THROW_RETURN_ZERO EAGER_CATCH_AND_THROW_RETURN_NEG
} }
PyObject* tensor_properties_get_dirty_tensors(PyLayerObject* self, PyObject* tensor_properties_get_dirty_tensors(PyLayerObject* self,
...@@ -439,7 +439,7 @@ int tensor_properties_set_dirty_tensors(PyLayerObject* self, PyObject* value, ...@@ -439,7 +439,7 @@ int tensor_properties_set_dirty_tensors(PyLayerObject* self, PyObject* value,
Py_XDECREF(self->dirty_tensors); Py_XDECREF(self->dirty_tensors);
self->dirty_tensors = value; self->dirty_tensors = value;
return 0; return 0;
EAGER_CATCH_AND_THROW_RETURN_ZERO EAGER_CATCH_AND_THROW_RETURN_NEG
} }
int tensor_properties_set_materialize_grads(PyLayerObject* self, int tensor_properties_set_materialize_grads(PyLayerObject* self,
...@@ -447,7 +447,7 @@ int tensor_properties_set_materialize_grads(PyLayerObject* self, ...@@ -447,7 +447,7 @@ int tensor_properties_set_materialize_grads(PyLayerObject* self,
EAGER_TRY EAGER_TRY
self->materialize_grads = CastPyArg2AttrBoolean(value, 0); self->materialize_grads = CastPyArg2AttrBoolean(value, 0);
return 0; return 0;
EAGER_CATCH_AND_THROW_RETURN_ZERO EAGER_CATCH_AND_THROW_RETURN_NEG
} }
PyMethodDef pylayer_methods[] = { PyMethodDef pylayer_methods[] = {
......
...@@ -26,11 +26,11 @@ limitations under the License. */ ...@@ -26,11 +26,11 @@ limitations under the License. */
return nullptr; \ return nullptr; \
} }
#define EAGER_CATCH_AND_THROW_RETURN_ZERO \ #define EAGER_CATCH_AND_THROW_RETURN_NEG \
} \ } \
catch (...) { \ catch (...) { \
ThrowExceptionToPython(std::current_exception()); \ ThrowExceptionToPython(std::current_exception()); \
return 0; \ return -1; \
} }
namespace paddle { namespace paddle {
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册