diff --git a/paddle/fluid/framework/variable.h b/paddle/fluid/framework/variable.h index 792a2accd41d67e76d56dfdc058e4128018614e7..f44551ddbdfe935da25b05f3b6ddf78267e8c09d 100644 --- a/paddle/fluid/framework/variable.h +++ b/paddle/fluid/framework/variable.h @@ -69,6 +69,16 @@ class Variable { return holder_->Type(); } + /** + * The internal of two Variables share the same Placeholder whose type can be + * Tensor, LoDTensor, SelectedRows, LoDTensorArray, etc. + * + * NOTE(liym27): In dynamic mode, sharing the same Placeholder also means + * share the same TensorInplaceVersion, which is very important for inplace + * operations. + */ + void SharePlaceholderWith(const Variable& var); + private: // This method hides type T, so it doesn't appear as a template parameter of // Variable. @@ -113,6 +123,14 @@ class Variable { std::shared_ptr holder_; }; +inline void Variable::SharePlaceholderWith(const Variable& var) { + PADDLE_ENFORCE_EQ(var.IsInitialized(), true, + platform::errors::PreconditionNotMet( + "Variable holds no memory. " + "Call Variable::GetMutable() firstly.")); + holder_ = var.holder_; +} + inline framework::TensorInplaceVersion* Variable::InplaceVersionCounter() { framework::TensorInplaceVersion* version_counter_ptr(nullptr); if (IsType()) { diff --git a/paddle/fluid/pybind/imperative.cc b/paddle/fluid/pybind/imperative.cc index 3510c9d152c83415922297b3def94ec25151c54e..4a4f55cf57b2f1286997dca34efc9c8f27d84b2a 100644 --- a/paddle/fluid/pybind/imperative.cc +++ b/paddle/fluid/pybind/imperative.cc @@ -689,57 +689,44 @@ void BindImperative(py::module *m_ptr) { x = linear(data) print(x.numpy()) )DOC") - .def("detach", - [](const imperative::VarBase - &self) -> std::shared_ptr { - PADDLE_ENFORCE_EQ( - self.Var().IsInitialized(), true, - platform::errors::InvalidArgument( - "Tensor %s has not been initialized!", self.Name())); - PADDLE_ENFORCE_EQ( - self.Var().IsType() || - self.Var().IsType(), - true, - platform::errors::InvalidArgument( - "Type of Tensor[%s] must be LoDTensor or SelectedRows!", - self.Name())); - auto detach_var = std::make_shared( - true, "detach_" + self.Name()); - detach_var->SetPersistable(self.Persistable()); - detach_var->SetType(self.Type()); - detach_var->SetDataType(self.DataType()); - if (self.Var().IsType()) { - const auto &origin_tensor = - self.Var().Get(); - PADDLE_ENFORCE_EQ( - origin_tensor.IsInitialized(), true, - platform::errors::InvalidArgument( - "Tensor %s has not been initialized!", self.Name())); - - auto *detach_tensor = - detach_var->MutableVar()->GetMutable(); - detach_tensor->ShareDataWith(origin_tensor); - } else { - const auto &origin_selected_rows = - self.Var().Get(); - PADDLE_ENFORCE_EQ( - origin_selected_rows.value().IsInitialized(), true, - platform::errors::InvalidArgument( - "Tensor %s has not been initialized!", self.Name())); - - auto *detach_selected_rows = - detach_var->MutableVar() - ->GetMutable(); - detach_selected_rows->set_height(origin_selected_rows.height()); - detach_selected_rows->set_rows(origin_selected_rows.rows()); - detach_selected_rows->mutable_value()->ShareDataWith( - origin_selected_rows.value()); - } - VLOG(3) << "The detached Tensor(" << detach_var->Name() - << ") share data with " << self.Name(); - return detach_var; - }, - py::return_value_policy::take_ownership, R"DOC( + .def( + "detach", + [](const imperative::VarBase &self) + -> std::shared_ptr { + PADDLE_ENFORCE_EQ( + self.Var().IsInitialized(), true, + platform::errors::InvalidArgument( + "Tensor %s has not been initialized!", self.Name())); + + PADDLE_ENFORCE_EQ( + self.Var().IsType() || + self.Var().IsType(), + true, + platform::errors::InvalidArgument( + "Type of Tensor[%s] must be LoDTensor or SelectedRows!", + self.Name())); + + auto detach_var = std::make_shared( + true, "detach_" + self.Name()); + + detach_var->SetPersistable(self.Persistable()); + detach_var->SetType(self.Type()); + detach_var->SetDataType(self.DataType()); + + // NOTE(liym27): + // Call Variable::SharePlaceholderWith but not + // Tensor::ShareDataWith or Tensor::ShareBufferWith, because + // `detach_var` should share the same TensorInplaceVersion with + // `self`, and only SharePlaceholderWith can also share the same + // TensorInplaceVersion, which is used to check whether inplace + // operations are correct. + detach_var->MutableVar()->SharePlaceholderWith(self.Var()); + + VLOG(3) << "The detached Tensor(" << detach_var->Name() + << ") share data with " << self.Name(); + return detach_var; + }, + py::return_value_policy::take_ownership, R"DOC( Returns a new Tensor, detached from the current graph. It will share data with origin Tensor and always doesn't have a Tensor copy. diff --git a/python/paddle/fluid/tests/unittests/test_detach.py b/python/paddle/fluid/tests/unittests/test_detach.py index f0103f89a5940befb55b2148ecdb0453eeb5215c..431c987a51fe2ff2d11d3f7b1f21ad37a3081aa6 100644 --- a/python/paddle/fluid/tests/unittests/test_detach.py +++ b/python/paddle/fluid/tests/unittests/test_detach.py @@ -15,8 +15,9 @@ from __future__ import print_function import numpy as np -import paddle.fluid as fluid +import paddle +import paddle.fluid as fluid from paddle.fluid.dygraph import Linear from paddle.fluid.dygraph.base import to_variable @@ -161,5 +162,47 @@ class Test_Detach(unittest.TestCase): ) == "'detach' should be called by imperative Varible in imperative mode, please use fluid.dygraph.guard() as context to run it in imperative mode" +class TestInplace(unittest.TestCase): + def test_forward_version(self): + with paddle.fluid.dygraph.guard(): + var = paddle.to_tensor(np.ones((4, 2, 3)).astype(np.float32)) + self.assertEqual(var.inplace_version, 0) + detach_var_1 = var.detach() + self.assertEqual(detach_var_1.inplace_version, 0) + + var[0] = 1.1 + self.assertEqual(var.inplace_version, 1) + + detach_var_2 = var.detach() + self.assertEqual(detach_var_2.inplace_version, 1) + + var[0] = 3 + self.assertEqual(detach_var_1.inplace_version, 2) + self.assertEqual(detach_var_2.inplace_version, 2) + + def test_backward_error(self): + # It raises an error because the inplace operator will result + # in incorrect gradient computation. + with paddle.fluid.dygraph.guard(): + var_a = paddle.ones(shape=[4, 2, 3], dtype="float32") + var_a.stop_gradient = False + + var_b = var_a**2 + + # Here, the gradient computation will use the value of var_b + var_c = var_b**2 + detach_var_b = var_b.detach() + detach_var_b[1:2] = 3.3 # var_b is modified inplace + + var_d = var_b**2 + + loss = paddle.nn.functional.relu(var_c + var_d) + with self.assertRaisesRegexp( + RuntimeError, + "received tensor_version:{} != wrapper_version_snapshot:{}". + format(1, 0)): + loss.backward() + + if __name__ == '__main__': unittest.main() diff --git a/python/paddle/fluid/tests/unittests/test_var_base.py b/python/paddle/fluid/tests/unittests/test_var_base.py index 86ba5a96b8d39b05a593f26b4fbad726a77f24cf..e374e607fec58571ea71c29988b0ef259afb658e 100644 --- a/python/paddle/fluid/tests/unittests/test_var_base.py +++ b/python/paddle/fluid/tests/unittests/test_var_base.py @@ -243,11 +243,12 @@ class TestVarBase(unittest.TestCase): z.backward() self.assertTrue(np.array_equal(x.grad, [20.0])) self.assertTrue(np.array_equal(detach_x.grad, [60.0])) + # Due to sharing of data with origin Tensor, There are some unsafe operations: - # with self.assertRaises(RuntimeError): - # y = 2 * x - # detach_x[:] = 5.0 - # y.backward() + with self.assertRaises(RuntimeError): + y = 2**x + detach_x[:] = 5.0 + y.backward() def test_write_property(self): with fluid.dygraph.guard():