diff --git a/paddle/fluid/framework/tensor.cc b/paddle/fluid/framework/tensor.cc index f721caaae9c7d9943b9ebabc7bbea0bcca559352..b304a45be3cdcc5defaca9e87d0aa291d09faceb 100644 --- a/paddle/fluid/framework/tensor.cc +++ b/paddle/fluid/framework/tensor.cc @@ -39,7 +39,10 @@ void Tensor::check_memory_size() const { numel() * SizeOfType(type()), memory_size())); } -Tensor::Tensor(const proto::VarType::Type& dtype) : type_(dtype), offset_(0) {} +Tensor::Tensor(const proto::VarType::Type& dtype) + : type_(dtype), + offset_(0), + inplace_version_counter_(std::make_shared(0)) {} size_t Tensor::memory_size() const { return holder_ == nullptr ? 0UL : holder_->size() - offset_; @@ -89,6 +92,15 @@ Tensor& Tensor::ShareDataWith(const Tensor& src) { *this = src; return *this; } +Tensor& Tensor::ShareInplaceVersionCounterWith(const Tensor& src) { + PADDLE_ENFORCE_NOT_NULL( + inplace_version_counter_, + platform::errors::PreconditionNotMet( + "Tensor does not hold inplace_version_counter_.")); + + inplace_version_counter_ = src.inplace_version_counter_; + return *this; +} Tensor Tensor::Slice(int64_t begin_idx, int64_t end_idx) const { check_memory_size(); diff --git a/paddle/fluid/framework/tensor.h b/paddle/fluid/framework/tensor.h index 76119e7c70811d1a9a110f3e03fe439cc5ccdeee..0747321bcfa492e01c324954f168ff66426d1347 100644 --- a/paddle/fluid/framework/tensor.h +++ b/paddle/fluid/framework/tensor.h @@ -120,7 +120,10 @@ class Tensor { friend struct EigenVector; public: - Tensor() : type_(proto::VarType::FP32), offset_(0) {} + Tensor() + : type_(proto::VarType::FP32), + offset_(0), + inplace_version_counter_(std::make_shared(0)) {} explicit Tensor(const proto::VarType::Type&); @@ -171,6 +174,9 @@ class Tensor { /*! The internal of two tensors share the same memory block. */ Tensor& ShareDataWith(const Tensor& src); + /*! The internal of two tensors share the same inplace version counter. */ + Tensor& ShareInplaceVersionCounterWith(const Tensor& src); + /** * @brief Return a sub-tensor of the given tensor. * @@ -252,7 +258,7 @@ class Tensor { const proto::VarType::Type type); TensorInplaceVersion& InplaceVersionCounter() { - return inplace_version_counter_; + return *inplace_version_counter_; } private: @@ -290,7 +296,7 @@ class Tensor { * PlaceHolder::ptr_ and where the tensor data really begins. */ size_t offset_; - TensorInplaceVersion inplace_version_counter_; + std::shared_ptr inplace_version_counter_; }; } // namespace framework diff --git a/paddle/fluid/framework/variable.h b/paddle/fluid/framework/variable.h index f44551ddbdfe935da25b05f3b6ddf78267e8c09d..792a2accd41d67e76d56dfdc058e4128018614e7 100644 --- a/paddle/fluid/framework/variable.h +++ b/paddle/fluid/framework/variable.h @@ -69,16 +69,6 @@ class Variable { return holder_->Type(); } - /** - * The internal of two Variables share the same Placeholder whose type can be - * Tensor, LoDTensor, SelectedRows, LoDTensorArray, etc. - * - * NOTE(liym27): In dynamic mode, sharing the same Placeholder also means - * share the same TensorInplaceVersion, which is very important for inplace - * operations. - */ - void SharePlaceholderWith(const Variable& var); - private: // This method hides type T, so it doesn't appear as a template parameter of // Variable. @@ -123,14 +113,6 @@ class Variable { std::shared_ptr holder_; }; -inline void Variable::SharePlaceholderWith(const Variable& var) { - PADDLE_ENFORCE_EQ(var.IsInitialized(), true, - platform::errors::PreconditionNotMet( - "Variable holds no memory. " - "Call Variable::GetMutable() firstly.")); - holder_ = var.holder_; -} - inline framework::TensorInplaceVersion* Variable::InplaceVersionCounter() { framework::TensorInplaceVersion* version_counter_ptr(nullptr); if (IsType()) { diff --git a/paddle/fluid/pybind/imperative.cc b/paddle/fluid/pybind/imperative.cc index 08af2f023cf32dc8e03f3c56222615d7f1f1dbe0..56c6020afeb5c19998bb737df2e283d485f12076 100644 --- a/paddle/fluid/pybind/imperative.cc +++ b/paddle/fluid/pybind/imperative.cc @@ -696,44 +696,69 @@ void BindImperative(py::module *m_ptr) { x = linear(data) print(x.numpy()) )DOC") - .def( - "detach", - [](const imperative::VarBase &self) - -> std::shared_ptr { - PADDLE_ENFORCE_EQ( - self.Var().IsInitialized(), true, - platform::errors::InvalidArgument( - "Tensor %s has not been initialized!", self.Name())); - - PADDLE_ENFORCE_EQ( - self.Var().IsType() || - self.Var().IsType(), - true, - platform::errors::InvalidArgument( - "Type of Tensor[%s] must be LoDTensor or SelectedRows!", - self.Name())); - - auto detach_var = std::make_shared( - true, "detach_" + self.Name()); - - detach_var->SetPersistable(self.Persistable()); - detach_var->SetType(self.Type()); - detach_var->SetDataType(self.DataType()); - - // NOTE(liym27): - // Call Variable::SharePlaceholderWith but not - // Tensor::ShareDataWith or Tensor::ShareBufferWith, because - // `detach_var` should share the same TensorInplaceVersion with - // `self`, and only SharePlaceholderWith can also share the same - // TensorInplaceVersion, which is used to check whether inplace - // operations are correct. - detach_var->MutableVar()->SharePlaceholderWith(self.Var()); - - VLOG(3) << "The detached Tensor(" << detach_var->Name() - << ") share data with " << self.Name(); - return detach_var; - }, - py::return_value_policy::take_ownership, R"DOC( + .def("detach", + [](const imperative::VarBase + &self) -> std::shared_ptr { + PADDLE_ENFORCE_EQ( + self.Var().IsInitialized(), true, + platform::errors::InvalidArgument( + "Tensor %s has not been initialized!", self.Name())); + + PADDLE_ENFORCE_EQ( + self.Var().IsType() || + self.Var().IsType(), + true, + platform::errors::InvalidArgument( + "Type of Tensor[%s] must be LoDTensor or SelectedRows!", + self.Name())); + + auto detach_var = std::make_shared( + true, "detach_" + self.Name()); + + detach_var->SetPersistable(self.Persistable()); + detach_var->SetType(self.Type()); + detach_var->SetDataType(self.DataType()); + + if (self.Var().IsType()) { + const auto &origin_tensor = + self.Var().Get(); + PADDLE_ENFORCE_EQ( + origin_tensor.IsInitialized(), true, + platform::errors::InvalidArgument( + "Tensor %s has not been initialized!", self.Name())); + + auto *detach_tensor = + detach_var->MutableVar()->GetMutable(); + detach_tensor->ShareDataWith(origin_tensor); + // NOTE(liym27): Call ShareInplaceVersionCounterWith to share the + // same TensorInplaceVersion, which is used to check whether + // inplace + // operations are correct. + detach_tensor->ShareInplaceVersionCounterWith(origin_tensor); + } else { + const auto &origin_selected_rows = + self.Var().Get(); + PADDLE_ENFORCE_EQ( + origin_selected_rows.value().IsInitialized(), true, + platform::errors::InvalidArgument( + "Tensor %s has not been initialized!", self.Name())); + + auto *detach_selected_rows = + detach_var->MutableVar() + ->GetMutable(); + detach_selected_rows->set_height(origin_selected_rows.height()); + detach_selected_rows->set_rows(origin_selected_rows.rows()); + detach_selected_rows->mutable_value()->ShareDataWith( + origin_selected_rows.value()); + detach_selected_rows->mutable_value() + ->ShareInplaceVersionCounterWith( + origin_selected_rows.value()); + } + VLOG(3) << "The detached Tensor(" << detach_var->Name() + << ") share data with " << self.Name(); + return detach_var; + }, + py::return_value_policy::take_ownership, R"DOC( Returns a new Tensor, detached from the current graph. It will share data with origin Tensor and always doesn't have a Tensor copy.