未验证 提交 3475c2bf 编写于 作者: P pangyoki 提交者: GitHub

Cherry pick PR41990, add _grad_name and _grad_value for eager tensor (#41990) (#42079)

* add _grad_name and _grad_value for eager tensor

* fix paddle_enforce

* fix paddle_enforce 2

* fix grad_name

* _grad_value return lodtensor rather than tensor

* fix
上级 81468682
......@@ -1487,6 +1487,46 @@ static PyObject* tensor__offset(TensorObject* self, PyObject* args,
EAGER_CATCH_AND_THROW_RETURN_NULL
}
static PyObject* tensor__grad_name(TensorObject* self, PyObject* args,
PyObject* kwargs) {
EAGER_TRY
paddle::experimental::Tensor* grad =
egr::EagerUtils::mutable_grad(self->tensor);
PADDLE_ENFORCE_EQ(grad != nullptr, true,
platform::errors::InvalidArgument(
"Detected NULL grad. Please check if you have manually "
"cleared the grad inside autograd_meta"));
return ToPyObject(grad->name());
EAGER_CATCH_AND_THROW_RETURN_NULL
}
static PyObject* tensor__grad_value(TensorObject* self, PyObject* args,
PyObject* kwargs) {
EAGER_TRY
paddle::experimental::Tensor* grad =
egr::EagerUtils::mutable_grad(self->tensor);
PADDLE_ENFORCE_EQ(grad != nullptr, true,
platform::errors::InvalidArgument(
"Detected NULL grad. Please check if you have manually "
"cleared the grad inside autograd_meta"));
if (!grad->defined()) {
Py_IncRef(Py_None);
return Py_None;
}
if (grad->is_dense_tensor()) {
auto* grad_tensor =
static_cast<paddle::framework::LoDTensor*>(grad->impl().get());
return ToPyObject(grad_tensor);
} else {
PADDLE_THROW(paddle::platform::errors::Fatal(
"this method is only supported for DenseTensor"));
Py_IncRef(Py_None);
return Py_None;
}
EAGER_CATCH_AND_THROW_RETURN_NULL
}
#if defined(PADDLE_WITH_CUDA)
static PyObject* tensor_method__uva(TensorObject* self, PyObject* args,
PyObject* kwargs) {
......@@ -1628,6 +1668,10 @@ PyMethodDef variable_methods[] = {
METH_VARARGS | METH_KEYWORDS, NULL},
{"_offset", (PyCFunction)(void (*)(void))tensor__offset,
METH_VARARGS | METH_KEYWORDS, NULL},
{"_grad_name", (PyCFunction)(void (*)(void))tensor__grad_name,
METH_VARARGS | METH_KEYWORDS, NULL},
{"_grad_value", (PyCFunction)(void (*)(void))tensor__grad_value,
METH_VARARGS | METH_KEYWORDS, NULL},
#if defined(PADDLE_WITH_CUDA)
{"_tensor_uva", (PyCFunction)(void (*)(void))tensor_method__uva,
METH_VARARGS | METH_KEYWORDS, NULL},
......
......@@ -1743,5 +1743,18 @@ class TestVarBaseCopyGradientFrom(unittest.TestCase):
self.func_test_copy_gradient_from()
class TestEagerTensorGradNameValue(unittest.TestCase):
def test_eager_tensor_grad_name_value(self):
with _test_eager_guard():
a_np = np.array([2, 3]).astype('float32')
a = paddle.to_tensor(a_np)
a.stop_gradient = False
b = a**2
self.assertEqual(a._grad_value(), None)
b.backward()
self.assertEqual('eager_tmp' in a._grad_name(), True)
self.assertNotEqual(a._grad_value(), None)
if __name__ == '__main__':
unittest.main()
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册