From 8c44ad471dfa8169b0d009f59fb2274213a57e17 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E7=9F=B3=E6=99=93=E4=BC=9F?= <39303645+Shixiaowei02@users.noreply.github.com> Date: Wed, 17 Nov 2021 19:15:51 +0800 Subject: [PATCH] change the meta modification rules, test=develop (#37255) --- paddle/pten/core/CMakeLists.txt | 3 +- paddle/pten/core/compat_utils.h | 9 +++- paddle/pten/core/dense_tensor.cc | 29 ++++-------- paddle/pten/core/dense_tensor.h | 27 ++++------- paddle/pten/core/tensor_meta.cc | 47 +++++++++++++++++++ paddle/pten/core/tensor_meta.h | 36 ++------------ paddle/pten/kernels/cpu/manipulation.cc | 27 +++++++---- paddle/pten/kernels/cuda/manipulation.cu | 28 +++++++---- .../kernels/functions/general/manipulation.h | 3 +- paddle/pten/kernels/xpu/manipulation.cc | 3 +- paddle/pten/tests/core/test_dense_tensor.cc | 9 ++-- 11 files changed, 120 insertions(+), 101 deletions(-) create mode 100644 paddle/pten/core/tensor_meta.cc diff --git a/paddle/pten/core/CMakeLists.txt b/paddle/pten/core/CMakeLists.txt index ae09507193e..e19d0a490ce 100644 --- a/paddle/pten/core/CMakeLists.txt +++ b/paddle/pten/core/CMakeLists.txt @@ -10,4 +10,5 @@ cc_library(kernel_factory SRCS kernel_factory.cc DEPS enforce) cc_library(kernel_context SRCS kernel_context.cc DEPS enforce device_context) cc_library(tensor_base SRCS tensor_base.cc allocator.cc storage.cc DEPS enforce) -cc_library(dense_tensor SRCS dense_tensor.cc DEPS tensor_base) +cc_library(tensor_meta SRCS tensor_meta.cc DEPS enforce) +cc_library(dense_tensor SRCS dense_tensor.cc DEPS tensor_meta tensor_base) diff --git a/paddle/pten/core/compat_utils.h b/paddle/pten/core/compat_utils.h index a0602f33e3d..6c8eeec6553 100644 --- a/paddle/pten/core/compat_utils.h +++ b/paddle/pten/core/compat_utils.h @@ -49,7 +49,14 @@ class CompatibleDenseTensorUtils { static DenseTensor Slice(DenseTensor* tensor, int64_t begin_idx, int64_t end_idx) { - tensor->check_memory_size(); + size_t bytes = tensor->numel() * SizeOf(tensor->dtype()); + PADDLE_ENFORCE_GE(tensor->capacity(), + bytes, + paddle::platform::errors::InvalidArgument( + "The memory size %d should be enough to meet the " + "volume required by metadata %d.", + tensor->capacity(), + bytes)); PADDLE_ENFORCE_GE(begin_idx, 0, paddle::platform::errors::OutOfRange( diff --git a/paddle/pten/core/dense_tensor.cc b/paddle/pten/core/dense_tensor.cc index bb38c53ada0..b972770f556 100644 --- a/paddle/pten/core/dense_tensor.cc +++ b/paddle/pten/core/dense_tensor.cc @@ -112,29 +112,18 @@ const void* DenseTensor::data() const { return storage_->data(); } -void DenseTensor::check_memory_size() const { - size_t bytes = numel() * SizeOf(dtype()); - PADDLE_ENFORCE_GE(memory_size(), - bytes, - paddle::platform::errors::InvalidArgument( - "The memory size %d should be enough to meet the " - "volume required by metadata %d.", - memory_size(), - bytes)); -} - -void DenseTensor::Resize(const DDim& dims) { - if (product(dims) == product(meta_.dims)) { - set_dims(dims); - } else { - meta_.dims = dims; - storage_->Clear(); - } +void DenseTensor::set_meta(DenseTensorMeta&& meta) { + PADDLE_ENFORCE(!meta_.valid(), + paddle::platform::errors::InvalidArgument( + "Only when the original attribute of Tensor is " + "incomplete, can it be reset.")); + meta_ = std::move(meta); } -void DenseTensor::set_dims(const DDim& dims) { - CHECK(product(dims) == product(meta_.dims)); +void DenseTensor::Resize(const DDim& dims, const LoD& lod) { meta_.dims = dims; + meta_.lod = lod; + mutable_data(); } #define DATA_MEMBER_FUNC_INSTANTIATION(dtype) \ diff --git a/paddle/pten/core/dense_tensor.h b/paddle/pten/core/dense_tensor.h index 8ece80f5291..9d6d05551a1 100644 --- a/paddle/pten/core/dense_tensor.h +++ b/paddle/pten/core/dense_tensor.h @@ -88,9 +88,6 @@ class DenseTensor : public TensorBase, return meta_.lod; } - /// \brief Set the lod of the tensor. - void set_lod(const std::vector>& lod) { meta_.lod = lod; } - /// \brief Returns the data type of the tensor. /// \return The data type of the tensor. DataType dtype() const noexcept override { return meta_.type; } @@ -107,6 +104,11 @@ class DenseTensor : public TensorBase, /// \return The meta information of the tensor. const DenseTensorMeta& meta() const noexcept { return meta_; } + /// \brief Sets the meta information of the tensor. Only when the original + /// attribute of Tensor is incomplete, can it be reset. + /// \param meta The meta information of the tensor. + void set_meta(DenseTensorMeta&& meta); + /// \brief Test whether the metadata is valid. /// \return Whether the metadata is valid. bool valid() const noexcept override { return meta_.valid(); } @@ -121,25 +123,16 @@ class DenseTensor : public TensorBase, /// \return Whether the storage is shared with other objects. bool IsSharedWith(const DenseTensor& b) const; - /// \brief Change the dims information in the metadata. If the new size is - /// inconsistent with the original value, the storage area will be released - /// to avoid wrong access. + /// \brief Change the shape information in the metadata. If the new size is + /// larger than the original value, the storage area will be reallocated. /// \param dims The new dims of the dense tensor. - void Resize(const DDim& dims); - - /// \brief Change the dims information in the metadata. - /// \param dims The new dims of the dense tensor. The product of the dims - /// elements must be consistent with the original value. - void set_dims(const DDim& dims); + /// \param lod The new lod of the dense tensor. + void Resize(const DDim& dims, const LoD& lod = {}); /// \brief Returns the actual storage size occupied by tensor, may be larger /// than its shape dims. /// \return The actual storage size occupied by tensor. - size_t memory_size() const { return storage_->size(); } - - /// \brief Check that the storage area is large enough to hold the data of the - /// metadata size, and throw an exception if the conditions are not met. - void check_memory_size() const; + size_t capacity() const { return storage_->size(); } /// \brief Release the storage area for other purposes. Because of the /// destruction of encapsulation, we do not support two dense tensors directly diff --git a/paddle/pten/core/tensor_meta.cc b/paddle/pten/core/tensor_meta.cc new file mode 100644 index 00000000000..ebdcd9b5f25 --- /dev/null +++ b/paddle/pten/core/tensor_meta.cc @@ -0,0 +1,47 @@ +/* Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#include "paddle/pten/core/tensor_meta.h" + +namespace pten { + +DenseTensorMeta::DenseTensorMeta(DataType type, const DDim& dims) + : dims(dims), type(type) {} + +DenseTensorMeta::DenseTensorMeta(DataType type, + const DDim& dims, + DataLayout layout) + : dims(dims), type(type), layout(layout) {} + +DenseTensorMeta::DenseTensorMeta(DataType type, + const DDim& dims, + DataLayout layout, + const std::vector>& lod) + : dims(dims), type(type), layout(layout), lod(lod) {} + +bool DenseTensorMeta::valid() const noexcept { + bool valid{true}; + valid = valid && (type != DataType::UNDEFINED); + valid = valid && (layout != DataLayout::UNDEFINED); + valid = valid && (is_scalar || product(dims) >= 0); + return valid; +} + +bool operator==(const DenseTensorMeta& lhs, const DenseTensorMeta& rhs) { + bool ret = true; + return ret && (lhs.is_scalar == rhs.is_scalar) && (lhs.dims == rhs.dims) && + (lhs.type == rhs.type) && (lhs.layout == rhs.layout) && + (lhs.lod == rhs.lod) && (lhs.offset == rhs.offset); +} +} // namespace pten diff --git a/paddle/pten/core/tensor_meta.h b/paddle/pten/core/tensor_meta.h index eae270171d8..f48d69260b5 100644 --- a/paddle/pten/core/tensor_meta.h +++ b/paddle/pten/core/tensor_meta.h @@ -52,42 +52,12 @@ struct DenseTensorMeta { /// During the entire life cycle of a DenseTensor, the following attributes /// marked with `const` are expected to remain unchanged. - const bool is_scalar{false}; + bool is_scalar{false}; DDim dims; - const DataType type{DataType::UNDEFINED}; - const DataLayout layout{DataLayout::NCHW}; + DataType type{DataType::UNDEFINED}; + DataLayout layout{DataLayout::NCHW}; LoD lod; size_t offset{0}; }; -inline DenseTensorMeta::DenseTensorMeta(DataType type, const DDim& dims) - : dims(dims), type(type) {} - -inline DenseTensorMeta::DenseTensorMeta(DataType type, - const DDim& dims, - DataLayout layout) - : dims(dims), type(type), layout(layout) {} - -inline DenseTensorMeta::DenseTensorMeta( - DataType type, - const DDim& dims, - DataLayout layout, - const std::vector>& lod) - : dims(dims), type(type), layout(layout), lod(lod) {} - -inline bool DenseTensorMeta::valid() const noexcept { - bool valid{true}; - valid = valid && (type != DataType::UNDEFINED); - valid = valid && (layout != DataLayout::UNDEFINED); - valid = valid && (is_scalar || product(dims) >= 0); - return valid; -} - -inline bool operator==(const DenseTensorMeta& lhs, const DenseTensorMeta& rhs) { - bool ret = true; - return ret && (lhs.is_scalar == rhs.is_scalar) && (lhs.dims == rhs.dims) && - (lhs.type == rhs.type) && (lhs.layout == rhs.layout) && - (lhs.lod == rhs.lod) && (lhs.offset == rhs.offset); -} - } // namespace pten diff --git a/paddle/pten/kernels/cpu/manipulation.cc b/paddle/pten/kernels/cpu/manipulation.cc index 99699ea91ee..cc2826c77b7 100644 --- a/paddle/pten/kernels/cpu/manipulation.cc +++ b/paddle/pten/kernels/cpu/manipulation.cc @@ -44,17 +44,27 @@ void FlattenWithXShape(const CPUContext& dev_ctx, general::SetXShape(x, xshape); } +void ReshapeFromVectorValImpl(const CPUContext& dev_ctx, + const DenseTensor& x, + const std::vector& shape, + DenseTensor* out, + bool set_lod) { + auto out_meta = InferShapeFromVecValue(x.meta(), shape); + if (&x != out) { + pten::Copy(dev_ctx, x, out); + } + if (set_lod) { + out->Resize(out_meta.dims, out_meta.lod); + } else { + out->Resize(out_meta.dims); + } +} + void ReshapeFromVectorVal(const CPUContext& dev_ctx, const DenseTensor& x, const std::vector& shape, DenseTensor* out) { - auto out_meta = InferShapeFromVecValue(x.meta(), shape); - if (&x == out) { - out->Resize(out_meta.dims); - return; - } - pten::Copy(dev_ctx, x, out); - out->Resize(out_meta.dims); + ReshapeFromVectorValImpl(dev_ctx, x, shape, out, false); } void ReshapeFromVectorValWithXShape(const CPUContext& dev_ctx, @@ -73,8 +83,7 @@ void ReshapeFromDT(const CPUContext& dev_ctx, auto* shape_data = shape.data(); auto vector_shape = std::vector(shape_data, shape_data + shape.numel()); - ReshapeFromVectorVal(dev_ctx, x, vector_shape, out); - out->set_lod(x.lod()); + ReshapeFromVectorValImpl(dev_ctx, x, vector_shape, out, true); } void ReshapeFromDTWithXShape(const CPUContext& dev_ctx, diff --git a/paddle/pten/kernels/cuda/manipulation.cu b/paddle/pten/kernels/cuda/manipulation.cu index b84694c0a9f..d2315965b28 100644 --- a/paddle/pten/kernels/cuda/manipulation.cu +++ b/paddle/pten/kernels/cuda/manipulation.cu @@ -44,18 +44,27 @@ void FlattenWithXShape(const CUDAContext& dev_ctx, general::SetXShape(x, xshape); } +void ReshapeFromVectorValImpl(const CUDAContext& dev_ctx, + const DenseTensor& x, + const std::vector& shape, + DenseTensor* out, + bool set_lod) { + auto out_meta = InferShapeFromVecValue(x.meta(), shape); + if (&x != out) { + pten::Copy(dev_ctx, x, false, out); + } + if (set_lod) { + out->Resize(out_meta.dims, out_meta.lod); + } else { + out->Resize(out_meta.dims); + } +} + void ReshapeFromVectorVal(const CUDAContext& dev_ctx, const DenseTensor& x, const std::vector& shape, DenseTensor* out) { - auto out_meta = InferShapeFromVecValue(x.meta(), shape); - if (&x == out) { - LOG(INFO) << "out_meta dims:" << out_meta.dims; - out->Resize(out_meta.dims); - return; - } - pten::Copy(dev_ctx, x, false, out); - out->Resize(out_meta.dims); + ReshapeFromVectorValImpl(dev_ctx, x, shape, out, false); } void ReshapeFromVectorValWithXShape(const CUDAContext& dev_ctx, @@ -74,8 +83,7 @@ void ReshapeFromDT(const CUDAContext& dev_ctx, auto* shape_data = shape.data(); auto vector_shape = std::vector(shape_data, shape_data + shape.numel()); - ReshapeFromVectorVal(dev_ctx, x, vector_shape, out); - out->set_lod(x.lod()); + ReshapeFromVectorValImpl(dev_ctx, x, vector_shape, out, true); } void ReshapeFromDTWithXShape(const CUDAContext& dev_ctx, diff --git a/paddle/pten/kernels/functions/general/manipulation.h b/paddle/pten/kernels/functions/general/manipulation.h index b05691ed7c5..cade585792c 100644 --- a/paddle/pten/kernels/functions/general/manipulation.h +++ b/paddle/pten/kernels/functions/general/manipulation.h @@ -26,8 +26,7 @@ inline void SetXShape(const DenseTensor& x, DenseTensor* xshape) { for (int i = 0; i < in_dims.size(); ++i) { xshape_dims[i + 1] = in_dims[i]; } - xshape->Resize(paddle::framework::make_ddim(xshape_dims)); - xshape->set_lod(x.meta().lod); + xshape->Resize(paddle::framework::make_ddim(xshape_dims), x.meta().lod); } } // namespace general diff --git a/paddle/pten/kernels/xpu/manipulation.cc b/paddle/pten/kernels/xpu/manipulation.cc index f19ebf35a02..e23c7b2c6d4 100644 --- a/paddle/pten/kernels/xpu/manipulation.cc +++ b/paddle/pten/kernels/xpu/manipulation.cc @@ -47,8 +47,7 @@ void FlattenWithXShape(const XPUContext& dev_ctx, for (int i = 0; i < in_dims.size(); ++i) { xshape_dims[i + 1] = in_dims[i]; } - xshape->Resize(paddle::framework::make_ddim(xshape_dims)); - xshape->set_lod(x.lod()); + xshape->Resize(paddle::framework::make_ddim(xshape_dims), x.meta().lod); } void ReshapeFromVectorVal(const XPUContext& dev_ctx, diff --git a/paddle/pten/tests/core/test_dense_tensor.cc b/paddle/pten/tests/core/test_dense_tensor.cc index 69c5e9b1260..6e7bfede06c 100644 --- a/paddle/pten/tests/core/test_dense_tensor.cc +++ b/paddle/pten/tests/core/test_dense_tensor.cc @@ -112,14 +112,11 @@ TEST(dense_tensor, resize) { auto alloc = std::make_shared(); DenseTensor tensor_0(alloc, meta); - CHECK_EQ(tensor_0.memory_size(), 2u); - tensor_0.check_memory_size(); + CHECK_EQ(tensor_0.capacity(), 2u); tensor_0.Resize({1, 2, 3}); - CHECK_EQ(tensor_0.memory_size(), 0u); - tensor_0.set_dims({2, 3}); - CHECK_EQ(tensor_0.memory_size(), 0u); + CHECK_EQ(tensor_0.capacity(), 6u); tensor_0.mutable_data(); - CHECK_EQ(tensor_0.memory_size(), 6u); + CHECK_EQ(tensor_0.capacity(), 6u); auto storage = tensor_0.release(); CHECK_EQ(storage->size(), 6u); -- GitLab