未验证 提交 2982046b 编写于 作者: C Chen Weihang 提交者: GitHub

remove useless move (#55430)

上级 07567939
......@@ -63,10 +63,9 @@ class TensorWrapper {
static_cast<phi::DenseTensor*>(tensor.impl().get());
// TODO(jiabin): It's not a good idea to set memory size to zero, find
// another way and change this.
intermidiate_tensor_.set_impl(
std::move(std::make_shared<phi::DenseTensor>(
intermidiate_tensor_.set_impl(std::make_shared<phi::DenseTensor>(
std::make_shared<phi::Allocation>(nullptr, 0, tensor.place()),
std::move(dense_tensor->meta()))));
dense_tensor->meta()));
} else {
PADDLE_THROW(paddle::platform::errors::Fatal(
"Unrecognized tensor type for no_need_buffer feature"));
......@@ -77,10 +76,9 @@ class TensorWrapper {
tensor.is_dense_tensor() && tensor.initialized()) {
phi::DenseTensor* dense_tensor =
static_cast<phi::DenseTensor*>(tensor.impl().get());
intermidiate_tensor_.set_impl(
std::move(std::make_shared<phi::DenseTensor>(
intermidiate_tensor_.set_impl(std::make_shared<phi::DenseTensor>(
std::make_shared<phi::Allocation>(nullptr, 0, tensor.place()),
dense_tensor->meta())));
dense_tensor->meta()));
auto pack_hook = egr::SavedTensorsHooks::GetInstance().GetPackHook();
unpack_hook_ = egr::SavedTensorsHooks::GetInstance().GetUnPackHook();
packed_value_ = (*pack_hook)(tensor);
......
......@@ -64,10 +64,10 @@ Tensor::Tensor(const Place &place) {
"the `place`, and datatype, shape, layout, etc. is also "
"required.";
DefaultAllocator alloc(place);
impl_ = std::move(std::make_shared<phi::DenseTensor>(
impl_ = std::make_shared<phi::DenseTensor>(
&alloc,
std::move(phi::DenseTensorMeta(
phi::DataType::FLOAT32, phi::make_ddim({}), phi::DataLayout::NCHW))));
phi::DenseTensorMeta(
phi::DataType::FLOAT32, phi::make_ddim({}), phi::DataLayout::NCHW));
}
Tensor::Tensor(const Place &place, const std::vector<int64_t> &shape) {
......@@ -80,11 +80,11 @@ Tensor::Tensor(const Place &place, const std::vector<int64_t> &shape) {
"the `place` and `shape`, and datatype, layout, etc. is also "
"required.";
DefaultAllocator alloc(place);
impl_ = std::move(std::make_shared<phi::DenseTensor>(
impl_ = std::make_shared<phi::DenseTensor>(
&alloc,
std::move(phi::DenseTensorMeta(phi::DataType::FLOAT32,
phi::DenseTensorMeta(phi::DataType::FLOAT32,
phi::make_ddim({shape}),
phi::DataLayout::NCHW))));
phi::DataLayout::NCHW));
}
Tensor::Tensor(std::shared_ptr<phi::TensorBase> tensor_impl,
......@@ -338,11 +338,11 @@ void *Tensor::data() {
// TODO(chenweihang): replace slice impl by API
Tensor Tensor::slice(int64_t begin_idx, int64_t end_idx) const {
if (is_dense_tensor()) {
return Tensor(std::make_shared<phi::DenseTensor>(
std::move(phi::DenseTensorUtils::Slice(
return Tensor(
std::make_shared<phi::DenseTensor>(phi::DenseTensorUtils::Slice(
*(static_cast<phi::DenseTensor *>(impl_.get())),
begin_idx,
end_idx))));
end_idx)));
} else {
PADDLE_THROW(phi::errors::Unimplemented(
"Only support slice operation on DenseTensor now."));
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册