diff --git a/paddle/fluid/pybind/eager_method.cc b/paddle/fluid/pybind/eager_method.cc index 7f1673bafd92ef7d3de5df66929e48540023fbfe..7620be96c10329acf8c4dab68c26b4b7d15730b6 100644 --- a/paddle/fluid/pybind/eager_method.cc +++ b/paddle/fluid/pybind/eager_method.cc @@ -1487,6 +1487,46 @@ static PyObject* tensor__offset(TensorObject* self, PyObject* args, EAGER_CATCH_AND_THROW_RETURN_NULL } +static PyObject* tensor__grad_name(TensorObject* self, PyObject* args, + PyObject* kwargs) { + EAGER_TRY + paddle::experimental::Tensor* grad = + egr::EagerUtils::mutable_grad(self->tensor); + PADDLE_ENFORCE_EQ(grad != nullptr, true, + platform::errors::InvalidArgument( + "Detected NULL grad. Please check if you have manually " + "cleared the grad inside autograd_meta")); + return ToPyObject(grad->name()); + EAGER_CATCH_AND_THROW_RETURN_NULL +} + +static PyObject* tensor__grad_value(TensorObject* self, PyObject* args, + PyObject* kwargs) { + EAGER_TRY + paddle::experimental::Tensor* grad = + egr::EagerUtils::mutable_grad(self->tensor); + PADDLE_ENFORCE_EQ(grad != nullptr, true, + platform::errors::InvalidArgument( + "Detected NULL grad. Please check if you have manually " + "cleared the grad inside autograd_meta")); + + if (!grad->defined()) { + Py_IncRef(Py_None); + return Py_None; + } + if (grad->is_dense_tensor()) { + auto* grad_tensor = + static_cast(grad->impl().get()); + return ToPyObject(grad_tensor); + } else { + PADDLE_THROW(paddle::platform::errors::Fatal( + "this method is only supported for DenseTensor")); + Py_IncRef(Py_None); + return Py_None; + } + EAGER_CATCH_AND_THROW_RETURN_NULL +} + #if defined(PADDLE_WITH_CUDA) static PyObject* tensor_method__uva(TensorObject* self, PyObject* args, PyObject* kwargs) { @@ -1628,6 +1668,10 @@ PyMethodDef variable_methods[] = { METH_VARARGS | METH_KEYWORDS, NULL}, {"_offset", (PyCFunction)(void (*)(void))tensor__offset, METH_VARARGS | METH_KEYWORDS, NULL}, + {"_grad_name", (PyCFunction)(void (*)(void))tensor__grad_name, + METH_VARARGS | METH_KEYWORDS, NULL}, + {"_grad_value", (PyCFunction)(void (*)(void))tensor__grad_value, + METH_VARARGS | METH_KEYWORDS, NULL}, #if defined(PADDLE_WITH_CUDA) {"_tensor_uva", (PyCFunction)(void (*)(void))tensor_method__uva, METH_VARARGS | METH_KEYWORDS, NULL}, diff --git a/python/paddle/fluid/tests/unittests/test_var_base.py b/python/paddle/fluid/tests/unittests/test_var_base.py index 2729aabf604bbed3827f834ed9e7a86e03d96530..e6e608bea23f49788764036c7278536add3ae364 100644 --- a/python/paddle/fluid/tests/unittests/test_var_base.py +++ b/python/paddle/fluid/tests/unittests/test_var_base.py @@ -1743,5 +1743,18 @@ class TestVarBaseCopyGradientFrom(unittest.TestCase): self.func_test_copy_gradient_from() +class TestEagerTensorGradNameValue(unittest.TestCase): + def test_eager_tensor_grad_name_value(self): + with _test_eager_guard(): + a_np = np.array([2, 3]).astype('float32') + a = paddle.to_tensor(a_np) + a.stop_gradient = False + b = a**2 + self.assertEqual(a._grad_value(), None) + b.backward() + self.assertEqual('eager_tmp' in a._grad_name(), True) + self.assertNotEqual(a._grad_value(), None) + + if __name__ == '__main__': unittest.main()