From 3475c2bfda8a993bcc86c348abcf35095fc75fbb Mon Sep 17 00:00:00 2001 From: pangyoki Date: Fri, 22 Apr 2022 16:32:21 +0800 Subject: [PATCH] Cherry pick PR41990, add _grad_name and _grad_value for eager tensor (#41990) (#42079) * add _grad_name and _grad_value for eager tensor * fix paddle_enforce * fix paddle_enforce 2 * fix grad_name * _grad_value return lodtensor rather than tensor * fix --- paddle/fluid/pybind/eager_method.cc | 44 +++++++++++++++++++ .../fluid/tests/unittests/test_var_base.py | 13 ++++++ 2 files changed, 57 insertions(+) diff --git a/paddle/fluid/pybind/eager_method.cc b/paddle/fluid/pybind/eager_method.cc index 7f1673bafd..7620be96c1 100644 --- a/paddle/fluid/pybind/eager_method.cc +++ b/paddle/fluid/pybind/eager_method.cc @@ -1487,6 +1487,46 @@ static PyObject* tensor__offset(TensorObject* self, PyObject* args, EAGER_CATCH_AND_THROW_RETURN_NULL } +static PyObject* tensor__grad_name(TensorObject* self, PyObject* args, + PyObject* kwargs) { + EAGER_TRY + paddle::experimental::Tensor* grad = + egr::EagerUtils::mutable_grad(self->tensor); + PADDLE_ENFORCE_EQ(grad != nullptr, true, + platform::errors::InvalidArgument( + "Detected NULL grad. Please check if you have manually " + "cleared the grad inside autograd_meta")); + return ToPyObject(grad->name()); + EAGER_CATCH_AND_THROW_RETURN_NULL +} + +static PyObject* tensor__grad_value(TensorObject* self, PyObject* args, + PyObject* kwargs) { + EAGER_TRY + paddle::experimental::Tensor* grad = + egr::EagerUtils::mutable_grad(self->tensor); + PADDLE_ENFORCE_EQ(grad != nullptr, true, + platform::errors::InvalidArgument( + "Detected NULL grad. Please check if you have manually " + "cleared the grad inside autograd_meta")); + + if (!grad->defined()) { + Py_IncRef(Py_None); + return Py_None; + } + if (grad->is_dense_tensor()) { + auto* grad_tensor = + static_cast(grad->impl().get()); + return ToPyObject(grad_tensor); + } else { + PADDLE_THROW(paddle::platform::errors::Fatal( + "this method is only supported for DenseTensor")); + Py_IncRef(Py_None); + return Py_None; + } + EAGER_CATCH_AND_THROW_RETURN_NULL +} + #if defined(PADDLE_WITH_CUDA) static PyObject* tensor_method__uva(TensorObject* self, PyObject* args, PyObject* kwargs) { @@ -1628,6 +1668,10 @@ PyMethodDef variable_methods[] = { METH_VARARGS | METH_KEYWORDS, NULL}, {"_offset", (PyCFunction)(void (*)(void))tensor__offset, METH_VARARGS | METH_KEYWORDS, NULL}, + {"_grad_name", (PyCFunction)(void (*)(void))tensor__grad_name, + METH_VARARGS | METH_KEYWORDS, NULL}, + {"_grad_value", (PyCFunction)(void (*)(void))tensor__grad_value, + METH_VARARGS | METH_KEYWORDS, NULL}, #if defined(PADDLE_WITH_CUDA) {"_tensor_uva", (PyCFunction)(void (*)(void))tensor_method__uva, METH_VARARGS | METH_KEYWORDS, NULL}, diff --git a/python/paddle/fluid/tests/unittests/test_var_base.py b/python/paddle/fluid/tests/unittests/test_var_base.py index 2729aabf60..e6e608bea2 100644 --- a/python/paddle/fluid/tests/unittests/test_var_base.py +++ b/python/paddle/fluid/tests/unittests/test_var_base.py @@ -1743,5 +1743,18 @@ class TestVarBaseCopyGradientFrom(unittest.TestCase): self.func_test_copy_gradient_from() +class TestEagerTensorGradNameValue(unittest.TestCase): + def test_eager_tensor_grad_name_value(self): + with _test_eager_guard(): + a_np = np.array([2, 3]).astype('float32') + a = paddle.to_tensor(a_np) + a.stop_gradient = False + b = a**2 + self.assertEqual(a._grad_value(), None) + b.backward() + self.assertEqual('eager_tmp' in a._grad_name(), True) + self.assertNotEqual(a._grad_value(), None) + + if __name__ == '__main__': unittest.main() -- GitLab