From cb8afc24df955648a5d553867aaa9753b9af56d3 Mon Sep 17 00:00:00 2001 From: pangyoki Date: Wed, 30 Mar 2022 14:24:37 +0800 Subject: [PATCH] add _reset_grad_inplace_version (#41101) --- paddle/fluid/pybind/eager_method.cc | 24 +++++++++++++++++++ paddle/phi/api/include/tensor.h | 5 ++++ paddle/phi/api/lib/tensor.cc | 11 +++++++++ .../test_reset_grad_inplace_version.py | 22 ++++++++++++++--- 4 files changed, 59 insertions(+), 3 deletions(-) diff --git a/paddle/fluid/pybind/eager_method.cc b/paddle/fluid/pybind/eager_method.cc index 895d1db43cb..1f2ab946752 100644 --- a/paddle/fluid/pybind/eager_method.cc +++ b/paddle/fluid/pybind/eager_method.cc @@ -1308,6 +1308,27 @@ static PyObject* tensor_method_get_rows(TensorObject* self, PyObject* args, EAGER_CATCH_AND_THROW_RETURN_NULL } +static PyObject* tensor__reset_grad_inplace_version(TensorObject* self, + PyObject* args, + PyObject* kwargs) { + EAGER_TRY + Py_ssize_t args_num = PyTuple_Size(args); + bool set_to_zero = true; + if (args_num == (Py_ssize_t)1) { + set_to_zero = CastPyArg2AttrBoolean(PyTuple_GET_ITEM(args, 0), 0); + } + + paddle::experimental::Tensor* grad = + egr::EagerUtils::mutable_grad(self->tensor); + if (grad && grad->defined() && grad->is_dense_tensor() && + grad->initialized()) { + grad->reset_inplace_version(set_to_zero); + } + Py_INCREF(Py_None); + return Py_None; + EAGER_CATCH_AND_THROW_RETURN_NULL +} + PyMethodDef variable_methods[] = { {"numpy", (PyCFunction)(void (*)(void))tensor_method_numpy, METH_VARARGS | METH_KEYWORDS, NULL}, @@ -1407,6 +1428,9 @@ PyMethodDef variable_methods[] = { METH_VARARGS | METH_KEYWORDS, NULL}, {"rows", (PyCFunction)(void (*)(void))tensor_method_get_rows, METH_VARARGS | METH_KEYWORDS, NULL}, + {"_reset_grad_inplace_version", + (PyCFunction)(void (*)(void))tensor__reset_grad_inplace_version, + METH_VARARGS | METH_KEYWORDS, NULL}, {NULL, NULL, 0, NULL}}; } // namespace pybind diff --git a/paddle/phi/api/include/tensor.h b/paddle/phi/api/include/tensor.h index 934c89fc927..9b0371fc380 100644 --- a/paddle/phi/api/include/tensor.h +++ b/paddle/phi/api/include/tensor.h @@ -516,6 +516,11 @@ class PADDLE_API Tensor final { */ uint32_t current_inplace_version(); + /** + * @brief Reset inplace version + */ + void reset_inplace_version(bool set_to_zero = false); + /* Part 10: Auto generated Tensor methods */ /* Part 11: Methods of converting SparseTensor and DenseTensor to each other diff --git a/paddle/phi/api/lib/tensor.cc b/paddle/phi/api/lib/tensor.cc index cfa8e80e845..5cd1fcb9196 100644 --- a/paddle/phi/api/lib/tensor.cc +++ b/paddle/phi/api/lib/tensor.cc @@ -384,5 +384,16 @@ uint32_t Tensor::current_inplace_version() { return 0; } +void Tensor::reset_inplace_version(bool set_to_zero) { + if (set_to_zero) { + if (is_dense_tensor()) { + auto &inplace_version_counter = + std::dynamic_pointer_cast(impl_) + ->InplaceVersionCounter(); + inplace_version_counter.SetInplaceVersionToZero(); + } + } +} + } // namespace experimental } // namespace paddle diff --git a/python/paddle/fluid/tests/unittests/test_reset_grad_inplace_version.py b/python/paddle/fluid/tests/unittests/test_reset_grad_inplace_version.py index fee5bb8f47f..84e22024f76 100644 --- a/python/paddle/fluid/tests/unittests/test_reset_grad_inplace_version.py +++ b/python/paddle/fluid/tests/unittests/test_reset_grad_inplace_version.py @@ -16,6 +16,7 @@ import paddle import paddle.fluid as fluid from paddle import _C_ops from paddle.fluid import framework +from paddle.fluid.framework import _test_eager_guard import unittest paddle.set_device('cpu') @@ -32,7 +33,7 @@ def clear_grad_test_0(w, a): class TestInplaceAndClearGradient(unittest.TestCase): - def test(self): + def func_test(self): input_data = np.ones([1, 1]) w = paddle.to_tensor(input_data, 'float32', stop_gradient=False) @@ -45,6 +46,11 @@ class TestInplaceAndClearGradient(unittest.TestCase): out.backward() assert w.grad[0] == 0.15 + def test(self): + with _test_eager_guard(): + self.func_test() + self.func_test() + # Test 2 class Counter: @@ -67,7 +73,7 @@ def clear_grad_test_1(w, c): class TestInplaceClearGradAccumulation(unittest.TestCase): - def test(self): + def func_test(self): input_data = np.ones([1, 1]) w = paddle.to_tensor(input_data, 'float32', stop_gradient=False) c = Counter() @@ -87,9 +93,14 @@ class TestInplaceClearGradAccumulation(unittest.TestCase): assert c.num_calls == 1 c.num_calls = 0 + def test(self): + with _test_eager_guard(): + self.func_test() + self.func_test() + class TestInplaceClearGradAccumulationAlt(unittest.TestCase): - def test(self): + def func_test(self): input_data = np.ones([1, 1]) w = paddle.to_tensor(input_data, 'float32', stop_gradient=False) out = _C_ops.scale(w, 'scale', 0.1) @@ -100,6 +111,11 @@ class TestInplaceClearGradAccumulationAlt(unittest.TestCase): assert w.grad._inplace_version() == 1 + def test(self): + with _test_eager_guard(): + self.func_test() + self.func_test() + if __name__ == '__main__': unittest.main() -- GitLab