未验证 提交 1bf2eeab 编写于 作者: P pangyoki 提交者: GitHub

add _grad_name and _grad_value for eager tensor (#41990)

* add _grad_name and _grad_value for eager tensor

* fix paddle_enforce

* fix paddle_enforce 2

* fix grad_name

* _grad_value return lodtensor rather than tensor

* fix
上级 f5ac9961
......@@ -1492,6 +1492,46 @@ static PyObject* tensor__offset(TensorObject* self, PyObject* args,
EAGER_CATCH_AND_THROW_RETURN_NULL
}
static PyObject* tensor__grad_name(TensorObject* self, PyObject* args,
PyObject* kwargs) {
EAGER_TRY
paddle::experimental::Tensor* grad =
egr::EagerUtils::mutable_grad(self->tensor);
PADDLE_ENFORCE_EQ(grad != nullptr, true,
platform::errors::InvalidArgument(
"Detected NULL grad. Please check if you have manually "
"cleared the grad inside autograd_meta"));
return ToPyObject(grad->name());
EAGER_CATCH_AND_THROW_RETURN_NULL
}
static PyObject* tensor__grad_value(TensorObject* self, PyObject* args,
PyObject* kwargs) {
EAGER_TRY
paddle::experimental::Tensor* grad =
egr::EagerUtils::mutable_grad(self->tensor);
PADDLE_ENFORCE_EQ(grad != nullptr, true,
platform::errors::InvalidArgument(
"Detected NULL grad. Please check if you have manually "
"cleared the grad inside autograd_meta"));
if (!grad->defined()) {
Py_IncRef(Py_None);
return Py_None;
}
if (grad->is_dense_tensor()) {
auto* grad_tensor =
static_cast<paddle::framework::LoDTensor*>(grad->impl().get());
return ToPyObject(grad_tensor);
} else {
PADDLE_THROW(paddle::platform::errors::Fatal(
"this method is only supported for DenseTensor"));
Py_IncRef(Py_None);
return Py_None;
}
EAGER_CATCH_AND_THROW_RETURN_NULL
}
#if defined(PADDLE_WITH_CUDA)
static PyObject* tensor_method__uva(TensorObject* self, PyObject* args,
PyObject* kwargs) {
......@@ -1633,6 +1673,10 @@ PyMethodDef variable_methods[] = {
METH_VARARGS | METH_KEYWORDS, NULL},
{"_offset", (PyCFunction)(void (*)(void))tensor__offset,
METH_VARARGS | METH_KEYWORDS, NULL},
{"_grad_name", (PyCFunction)(void (*)(void))tensor__grad_name,
METH_VARARGS | METH_KEYWORDS, NULL},
{"_grad_value", (PyCFunction)(void (*)(void))tensor__grad_value,
METH_VARARGS | METH_KEYWORDS, NULL},
#if defined(PADDLE_WITH_CUDA)
{"_tensor_uva", (PyCFunction)(void (*)(void))tensor_method__uva,
METH_VARARGS | METH_KEYWORDS, NULL},
......
......@@ -1743,5 +1743,18 @@ class TestVarBaseCopyGradientFrom(unittest.TestCase):
self.func_test_copy_gradient_from()
class TestEagerTensorGradNameValue(unittest.TestCase):
def test_eager_tensor_grad_name_value(self):
with _test_eager_guard():
a_np = np.array([2, 3]).astype('float32')
a = paddle.to_tensor(a_np)
a.stop_gradient = False
b = a**2
self.assertEqual(a._grad_value(), None)
b.backward()
self.assertEqual('eager_tmp' in a._grad_name(), True)
self.assertNotEqual(a._grad_value(), None)
if __name__ == '__main__':
unittest.main()
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册