From 3ef2922bdc3bd216ac509207675fdb0d6f94746e Mon Sep 17 00:00:00 2001 From: zyfncg Date: Fri, 28 Jan 2022 11:20:26 +0800 Subject: [PATCH] =?UTF-8?q?=E3=80=90Pten=E3=80=91Remove=20WriteBackOutput?= =?UTF-8?q?=20in=20tensor=5Futils=20(#39291)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * remove remake densetensor * fix eager test error * fix bug in eager * implement AllocateFrom * remove WriteBackOutput * fix problem of eager Co-authored-by: zkh2016 --- paddle/fluid/eager/eager_tensor.h | 3 +- paddle/fluid/framework/custom_operator.cc | 12 ++--- paddle/fluid/framework/operator.cc | 19 ------- paddle/fluid/framework/operator.h | 3 -- paddle/pten/api/lib/utils/tensor_utils.cc | 62 ----------------------- paddle/pten/api/lib/utils/tensor_utils.h | 5 -- 6 files changed, 5 insertions(+), 99 deletions(-) diff --git a/paddle/fluid/eager/eager_tensor.h b/paddle/fluid/eager/eager_tensor.h index a15b16f06a..4383cf0fcf 100644 --- a/paddle/fluid/eager/eager_tensor.h +++ b/paddle/fluid/eager/eager_tensor.h @@ -245,8 +245,7 @@ class EagerTensor final { auto tensor_dense = std::dynamic_pointer_cast(tensor_->impl()); if (tensor_dense && tensor_dense.get()) { - paddle::experimental::SharesStorage(tensor_dense.get(), - framework_tensor); + *framework_tensor = *tensor_dense; } else { PADDLE_THROW(paddle::platform::errors::Fatal( "Unrecognized egr::EagerTensor type, only " diff --git a/paddle/fluid/framework/custom_operator.cc b/paddle/fluid/framework/custom_operator.cc index 30fbee5778..0fa1d1f8cb 100644 --- a/paddle/fluid/framework/custom_operator.cc +++ b/paddle/fluid/framework/custom_operator.cc @@ -207,17 +207,13 @@ static void RunKernelFunc(const framework::ExecutionContext& ctx, "Tensors.", vec_true_outs.size(), outs.size())); for (size_t j = 0; j < vec_true_outs.size(); ++j) { - experimental::SharesStorage( - std::dynamic_pointer_cast(outs.at(j).impl()) - .get(), - vec_true_outs.at(j)); + *vec_true_outs.at(j) = + *std::dynamic_pointer_cast(outs.at(j).impl()); } } else { auto* true_out = ctx.Output(out_name); - experimental::SharesStorage( - std::dynamic_pointer_cast(outs.at(i).impl()) - .get(), - true_out); + *true_out = + *std::dynamic_pointer_cast(outs.at(i).impl()); } } } catch (platform::EnforceNotMet& exception) { diff --git a/paddle/fluid/framework/operator.cc b/paddle/fluid/framework/operator.cc index 426b5ac8ff..5e6e20184f 100644 --- a/paddle/fluid/framework/operator.cc +++ b/paddle/fluid/framework/operator.cc @@ -2105,24 +2105,5 @@ void OperatorWithKernel::BuildPtenKernelContext( } } -void OperatorWithKernel::WriteBackToOutputs( - RuntimeContext* ctx, pten::KernelContext* pt_kernel_context) const { - auto& output_names = std::get<2>(pt_kernel_signature_->args); - - for (size_t i = 0; i < output_names.size(); ++i) { - auto& outs_vector = ctx->outputs.at(output_names[i]); - - auto& range_pair = pt_kernel_context->OutputRangeAt(i); - auto pten_outs = pt_kernel_context->MutableOutputBetween( - range_pair.first, range_pair.second); - - for (size_t j = 0; j < pten_outs.size(); ++j) { - if (pten_outs[j]) { - experimental::MakeVariableFromPtenTensor(pten_outs[j], outs_vector[j]); - } - } - } -} - } // namespace framework } // namespace paddle diff --git a/paddle/fluid/framework/operator.h b/paddle/fluid/framework/operator.h index 9ad13299a3..9039efbc7c 100644 --- a/paddle/fluid/framework/operator.h +++ b/paddle/fluid/framework/operator.h @@ -616,9 +616,6 @@ class OperatorWithKernel : public OperatorBase { platform::DeviceContext* dev_ctx, pten::KernelContext* pt_kernel_context) const; - void WriteBackToOutputs(RuntimeContext* ctx, - pten::KernelContext* pt_kernel_context) const; - pten::KernelSignature* PtenKernelSignature() const { return pt_kernel_signature_.get(); } diff --git a/paddle/pten/api/lib/utils/tensor_utils.cc b/paddle/pten/api/lib/utils/tensor_utils.cc index d144133175..1289353a6c 100644 --- a/paddle/pten/api/lib/utils/tensor_utils.cc +++ b/paddle/pten/api/lib/utils/tensor_utils.cc @@ -198,68 +198,6 @@ pten::ScalarArray MakePtenScalarArrayFromVarList( return {vector_data}; } -void SharesStorageBase(pten::DenseTensor* src, paddle::framework::Tensor* dst) { - PADDLE_ENFORCE_NOT_NULL( - src, - platform::errors::InvalidArgument( - "The source DenseTensor is nullptr when move allocation.")); - PADDLE_ENFORCE_NOT_NULL( - dst, - platform::errors::InvalidArgument( - "The destination Tensor is nullptr when move allocation.")); - dst->Resize(src->dims()); - dst->ResetHolderWithType(src->Holder(), - pten::TransToProtoVarType(src->dtype())); - dst->set_offset(src->meta().offset); -} - -void SharesStorage(pten::DenseTensor* src, paddle::framework::Tensor* dst) { - SharesStorageBase(src, static_cast(dst)); - SetLoD(dst->mutable_lod(), src->lod()); -} - -static bool IsSameAllocation(const std::shared_ptr& a, - const std::shared_ptr& b) { - return a->ptr() == b->ptr() && a->size() == b->size() && - platform::is_same_place(a->place(), b->place()); -} - -void MakeVariableFromPtenTensor(pten::DenseTensor* src, - framework::Variable* variable) { - if (variable->IsType()) { - auto* tensor = variable->GetMutable(); - - auto dtype = pten::TransToProtoVarType(src->dtype()); - tensor->Resize(src->dims()); - SetLoD(tensor->mutable_lod(), src->lod()); - - if (!tensor->IsInitialized() || - (tensor->IsInitialized() && - !IsSameAllocation(tensor->Holder(), src->Holder()))) { - tensor->ResetHolderWithType(std::move(src->Holder()), dtype); - } else { - // Even the pten tensor and Variable have the same Alloctation (both have - // the same pointer address, same size and same place) - // but there is possible that they do not have the same data_type. - // so, here we set the variable's type with the pten tensor dtype. - tensor->set_type(dtype); - } - - } else if (variable->IsType()) { - auto* tensor = variable->GetMutable(); - auto dtype = pten::TransToProtoVarType(src->dtype()); - - if (!tensor->value().IsInitialized()) { - tensor->mutable_value()->ResetHolderWithType(std::move(src->Holder()), - dtype); - } - } else { - PADDLE_THROW(platform::errors::Unimplemented( - "Unsupported shared input `%s` type now when call pt kernel.", - framework::ToTypeName(variable->Type()))); - } -} - void ResetTensorByArgDef(pten::DenseTensor* dst, const pten::TensorArgDef& arg_def) { VLOG(5) << "ResetTensor by TensorArgDef."; diff --git a/paddle/pten/api/lib/utils/tensor_utils.h b/paddle/pten/api/lib/utils/tensor_utils.h index 32d65eded6..bf8d9b8317 100644 --- a/paddle/pten/api/lib/utils/tensor_utils.h +++ b/paddle/pten/api/lib/utils/tensor_utils.h @@ -45,11 +45,6 @@ pten::ScalarArray MakePtenScalarArrayFromVar( pten::ScalarArray MakePtenScalarArrayFromVarList( const std::vector& variable_list); -void SharesStorage(pten::DenseTensor* src, paddle::framework::Tensor* dst); - -void MakeVariableFromPtenTensor(pten::DenseTensor* src, - framework::Variable* variable); - void ResetTensorByArgDef(pten::DenseTensor* dst, const pten::TensorArgDef& arg_def); -- GitLab