diff --git a/paddle/fluid/framework/details/share_tensor_buffer_functor.cc b/paddle/fluid/framework/details/share_tensor_buffer_functor.cc index fb43bfbf342ea282b517694305e26990069dbf07..6fdec553f3d65debdf8f6d95eeeb8ebe30b4a36a 100644 --- a/paddle/fluid/framework/details/share_tensor_buffer_functor.cc +++ b/paddle/fluid/framework/details/share_tensor_buffer_functor.cc @@ -104,7 +104,7 @@ void ShareTensorBufferFunctor::operator()(Scope *exec_scope) { // If in_var is inplaced in the previous batch and we want to fetch // in_var in the current batch, we have to reset memory of out_var // to avoid wrong calculation result. - if (in_tensor.Holder() == out_tensor->Holder()) { + if (out_tensor->IsSharedBufferWith(in_tensor)) { VLOG(1) << "Clear " << out_var_names_[i] << " because you may want to fetch an inplaced variable " << in_var_info->Name() diff --git a/paddle/fluid/framework/tensor.h b/paddle/fluid/framework/tensor.h index f4d3457003253ca9dab2715bdfd156f0b71910b2..0b95e585d71edbc20e18f9ddb7efa3ed51cdb200 100644 --- a/paddle/fluid/framework/tensor.h +++ b/paddle/fluid/framework/tensor.h @@ -160,6 +160,10 @@ class Tensor { offset_ = tensor.offset_; } + bool IsSharedBufferWith(const Tensor& src) const { + return holder_ && holder_ == src.Holder(); + } + const std::shared_ptr& Holder() const { return holder_; } size_t offset() const { return offset_; } diff --git a/paddle/fluid/operators/elementwise/elementwise_op_function.h b/paddle/fluid/operators/elementwise/elementwise_op_function.h index 3710e008ca1b99c151cd53248dcf740ff5544c82..23afa7527966fe31e634468cbc52294377ddbecc 100644 --- a/paddle/fluid/operators/elementwise/elementwise_op_function.h +++ b/paddle/fluid/operators/elementwise/elementwise_op_function.h @@ -1100,7 +1100,7 @@ void CommonElementwiseBroadcastBackward( // for inplace strategy. memset will make dx and dout clear and get wrong // result. - if (dx && dout.Holder() == dx->Holder()) { + if (dx && dx->IsSharedBufferWith(dout)) { dx->clear(); dx->mutable_data(x_dims, ctx.GetPlace()); }