From 2982046b54eacfd1d6fc3028216c56717f3e4fdd Mon Sep 17 00:00:00 2001 From: Chen Weihang Date: Mon, 17 Jul 2023 10:00:00 +0800 Subject: [PATCH] remove useless move (#55430) --- paddle/fluid/eager/tensor_wrapper.h | 14 ++++++-------- paddle/phi/api/lib/tensor.cc | 20 ++++++++++---------- 2 files changed, 16 insertions(+), 18 deletions(-) diff --git a/paddle/fluid/eager/tensor_wrapper.h b/paddle/fluid/eager/tensor_wrapper.h index 63bb7374d9a..47b205e6fc7 100644 --- a/paddle/fluid/eager/tensor_wrapper.h +++ b/paddle/fluid/eager/tensor_wrapper.h @@ -63,10 +63,9 @@ class TensorWrapper { static_cast(tensor.impl().get()); // TODO(jiabin): It's not a good idea to set memory size to zero, find // another way and change this. - intermidiate_tensor_.set_impl( - std::move(std::make_shared( - std::make_shared(nullptr, 0, tensor.place()), - std::move(dense_tensor->meta())))); + intermidiate_tensor_.set_impl(std::make_shared( + std::make_shared(nullptr, 0, tensor.place()), + dense_tensor->meta())); } else { PADDLE_THROW(paddle::platform::errors::Fatal( "Unrecognized tensor type for no_need_buffer feature")); @@ -77,10 +76,9 @@ class TensorWrapper { tensor.is_dense_tensor() && tensor.initialized()) { phi::DenseTensor* dense_tensor = static_cast(tensor.impl().get()); - intermidiate_tensor_.set_impl( - std::move(std::make_shared( - std::make_shared(nullptr, 0, tensor.place()), - dense_tensor->meta()))); + intermidiate_tensor_.set_impl(std::make_shared( + std::make_shared(nullptr, 0, tensor.place()), + dense_tensor->meta())); auto pack_hook = egr::SavedTensorsHooks::GetInstance().GetPackHook(); unpack_hook_ = egr::SavedTensorsHooks::GetInstance().GetUnPackHook(); packed_value_ = (*pack_hook)(tensor); diff --git a/paddle/phi/api/lib/tensor.cc b/paddle/phi/api/lib/tensor.cc index e8caf525308..40319fa9ba6 100644 --- a/paddle/phi/api/lib/tensor.cc +++ b/paddle/phi/api/lib/tensor.cc @@ -64,10 +64,10 @@ Tensor::Tensor(const Place &place) { "the `place`, and datatype, shape, layout, etc. is also " "required."; DefaultAllocator alloc(place); - impl_ = std::move(std::make_shared( + impl_ = std::make_shared( &alloc, - std::move(phi::DenseTensorMeta( - phi::DataType::FLOAT32, phi::make_ddim({}), phi::DataLayout::NCHW)))); + phi::DenseTensorMeta( + phi::DataType::FLOAT32, phi::make_ddim({}), phi::DataLayout::NCHW)); } Tensor::Tensor(const Place &place, const std::vector &shape) { @@ -80,11 +80,11 @@ Tensor::Tensor(const Place &place, const std::vector &shape) { "the `place` and `shape`, and datatype, layout, etc. is also " "required."; DefaultAllocator alloc(place); - impl_ = std::move(std::make_shared( + impl_ = std::make_shared( &alloc, - std::move(phi::DenseTensorMeta(phi::DataType::FLOAT32, - phi::make_ddim({shape}), - phi::DataLayout::NCHW)))); + phi::DenseTensorMeta(phi::DataType::FLOAT32, + phi::make_ddim({shape}), + phi::DataLayout::NCHW)); } Tensor::Tensor(std::shared_ptr tensor_impl, @@ -338,11 +338,11 @@ void *Tensor::data() { // TODO(chenweihang): replace slice impl by API Tensor Tensor::slice(int64_t begin_idx, int64_t end_idx) const { if (is_dense_tensor()) { - return Tensor(std::make_shared( - std::move(phi::DenseTensorUtils::Slice( + return Tensor( + std::make_shared(phi::DenseTensorUtils::Slice( *(static_cast(impl_.get())), begin_idx, - end_idx)))); + end_idx))); } else { PADDLE_THROW(phi::errors::Unimplemented( "Only support slice operation on DenseTensor now.")); -- GitLab