diff --git a/paddle/pten/core/CMakeLists.txt b/paddle/pten/core/CMakeLists.txt index ae09507193e9cc24f4ec341b58669eda8b651e42..e19d0a490cef39ce6e4f38714d2e0d9ecb73f7d2 100644 --- a/paddle/pten/core/CMakeLists.txt +++ b/paddle/pten/core/CMakeLists.txt @@ -10,4 +10,5 @@ cc_library(kernel_factory SRCS kernel_factory.cc DEPS enforce) cc_library(kernel_context SRCS kernel_context.cc DEPS enforce device_context) cc_library(tensor_base SRCS tensor_base.cc allocator.cc storage.cc DEPS enforce) -cc_library(dense_tensor SRCS dense_tensor.cc DEPS tensor_base) +cc_library(tensor_meta SRCS tensor_meta.cc DEPS enforce) +cc_library(dense_tensor SRCS dense_tensor.cc DEPS tensor_meta tensor_base) diff --git a/paddle/pten/core/compat_utils.h b/paddle/pten/core/compat_utils.h index a0602f33e3de22eef2a556d698c3f035d177c99e..6c8eeec6553bb0308a593168020408f11936b8d4 100644 --- a/paddle/pten/core/compat_utils.h +++ b/paddle/pten/core/compat_utils.h @@ -49,7 +49,14 @@ class CompatibleDenseTensorUtils { static DenseTensor Slice(DenseTensor* tensor, int64_t begin_idx, int64_t end_idx) { - tensor->check_memory_size(); + size_t bytes = tensor->numel() * SizeOf(tensor->dtype()); + PADDLE_ENFORCE_GE(tensor->capacity(), + bytes, + paddle::platform::errors::InvalidArgument( + "The memory size %d should be enough to meet the " + "volume required by metadata %d.", + tensor->capacity(), + bytes)); PADDLE_ENFORCE_GE(begin_idx, 0, paddle::platform::errors::OutOfRange( diff --git a/paddle/pten/core/dense_tensor.cc b/paddle/pten/core/dense_tensor.cc index bb38c53ada04e8cd0a982452c290233f1d51f37e..b972770f5566869a24299b9c352066e848c5234c 100644 --- a/paddle/pten/core/dense_tensor.cc +++ b/paddle/pten/core/dense_tensor.cc @@ -112,29 +112,18 @@ const void* DenseTensor::data() const { return storage_->data(); } -void DenseTensor::check_memory_size() const { - size_t bytes = numel() * SizeOf(dtype()); - PADDLE_ENFORCE_GE(memory_size(), - bytes, - paddle::platform::errors::InvalidArgument( - "The memory size %d should be enough to meet the " - "volume required by metadata %d.", - memory_size(), - bytes)); -} - -void DenseTensor::Resize(const DDim& dims) { - if (product(dims) == product(meta_.dims)) { - set_dims(dims); - } else { - meta_.dims = dims; - storage_->Clear(); - } +void DenseTensor::set_meta(DenseTensorMeta&& meta) { + PADDLE_ENFORCE(!meta_.valid(), + paddle::platform::errors::InvalidArgument( + "Only when the original attribute of Tensor is " + "incomplete, can it be reset.")); + meta_ = std::move(meta); } -void DenseTensor::set_dims(const DDim& dims) { - CHECK(product(dims) == product(meta_.dims)); +void DenseTensor::Resize(const DDim& dims, const LoD& lod) { meta_.dims = dims; + meta_.lod = lod; + mutable_data(); } #define DATA_MEMBER_FUNC_INSTANTIATION(dtype) \ diff --git a/paddle/pten/core/dense_tensor.h b/paddle/pten/core/dense_tensor.h index 8ece80f529161a7e01919eb5452c90967e97bbdc..9d6d05551a177aee022a7e4faefd206e817a2321 100644 --- a/paddle/pten/core/dense_tensor.h +++ b/paddle/pten/core/dense_tensor.h @@ -88,9 +88,6 @@ class DenseTensor : public TensorBase, return meta_.lod; } - /// \brief Set the lod of the tensor. - void set_lod(const std::vector>& lod) { meta_.lod = lod; } - /// \brief Returns the data type of the tensor. /// \return The data type of the tensor. DataType dtype() const noexcept override { return meta_.type; } @@ -107,6 +104,11 @@ class DenseTensor : public TensorBase, /// \return The meta information of the tensor. const DenseTensorMeta& meta() const noexcept { return meta_; } + /// \brief Sets the meta information of the tensor. Only when the original + /// attribute of Tensor is incomplete, can it be reset. + /// \param meta The meta information of the tensor. + void set_meta(DenseTensorMeta&& meta); + /// \brief Test whether the metadata is valid. /// \return Whether the metadata is valid. bool valid() const noexcept override { return meta_.valid(); } @@ -121,25 +123,16 @@ class DenseTensor : public TensorBase, /// \return Whether the storage is shared with other objects. bool IsSharedWith(const DenseTensor& b) const; - /// \brief Change the dims information in the metadata. If the new size is - /// inconsistent with the original value, the storage area will be released - /// to avoid wrong access. + /// \brief Change the shape information in the metadata. If the new size is + /// larger than the original value, the storage area will be reallocated. /// \param dims The new dims of the dense tensor. - void Resize(const DDim& dims); - - /// \brief Change the dims information in the metadata. - /// \param dims The new dims of the dense tensor. The product of the dims - /// elements must be consistent with the original value. - void set_dims(const DDim& dims); + /// \param lod The new lod of the dense tensor. + void Resize(const DDim& dims, const LoD& lod = {}); /// \brief Returns the actual storage size occupied by tensor, may be larger /// than its shape dims. /// \return The actual storage size occupied by tensor. - size_t memory_size() const { return storage_->size(); } - - /// \brief Check that the storage area is large enough to hold the data of the - /// metadata size, and throw an exception if the conditions are not met. - void check_memory_size() const; + size_t capacity() const { return storage_->size(); } /// \brief Release the storage area for other purposes. Because of the /// destruction of encapsulation, we do not support two dense tensors directly diff --git a/paddle/pten/core/tensor_meta.cc b/paddle/pten/core/tensor_meta.cc new file mode 100644 index 0000000000000000000000000000000000000000..ebdcd9b5f250b8949059043a3877565a7989ef1f --- /dev/null +++ b/paddle/pten/core/tensor_meta.cc @@ -0,0 +1,47 @@ +/* Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#include "paddle/pten/core/tensor_meta.h" + +namespace pten { + +DenseTensorMeta::DenseTensorMeta(DataType type, const DDim& dims) + : dims(dims), type(type) {} + +DenseTensorMeta::DenseTensorMeta(DataType type, + const DDim& dims, + DataLayout layout) + : dims(dims), type(type), layout(layout) {} + +DenseTensorMeta::DenseTensorMeta(DataType type, + const DDim& dims, + DataLayout layout, + const std::vector>& lod) + : dims(dims), type(type), layout(layout), lod(lod) {} + +bool DenseTensorMeta::valid() const noexcept { + bool valid{true}; + valid = valid && (type != DataType::UNDEFINED); + valid = valid && (layout != DataLayout::UNDEFINED); + valid = valid && (is_scalar || product(dims) >= 0); + return valid; +} + +bool operator==(const DenseTensorMeta& lhs, const DenseTensorMeta& rhs) { + bool ret = true; + return ret && (lhs.is_scalar == rhs.is_scalar) && (lhs.dims == rhs.dims) && + (lhs.type == rhs.type) && (lhs.layout == rhs.layout) && + (lhs.lod == rhs.lod) && (lhs.offset == rhs.offset); +} +} // namespace pten diff --git a/paddle/pten/core/tensor_meta.h b/paddle/pten/core/tensor_meta.h index eae270171d88e6b4aea35871ee4ef1408850d58c..f48d69260b5ee9089ba93bd3cd6a8cda47246fa1 100644 --- a/paddle/pten/core/tensor_meta.h +++ b/paddle/pten/core/tensor_meta.h @@ -52,42 +52,12 @@ struct DenseTensorMeta { /// During the entire life cycle of a DenseTensor, the following attributes /// marked with `const` are expected to remain unchanged. - const bool is_scalar{false}; + bool is_scalar{false}; DDim dims; - const DataType type{DataType::UNDEFINED}; - const DataLayout layout{DataLayout::NCHW}; + DataType type{DataType::UNDEFINED}; + DataLayout layout{DataLayout::NCHW}; LoD lod; size_t offset{0}; }; -inline DenseTensorMeta::DenseTensorMeta(DataType type, const DDim& dims) - : dims(dims), type(type) {} - -inline DenseTensorMeta::DenseTensorMeta(DataType type, - const DDim& dims, - DataLayout layout) - : dims(dims), type(type), layout(layout) {} - -inline DenseTensorMeta::DenseTensorMeta( - DataType type, - const DDim& dims, - DataLayout layout, - const std::vector>& lod) - : dims(dims), type(type), layout(layout), lod(lod) {} - -inline bool DenseTensorMeta::valid() const noexcept { - bool valid{true}; - valid = valid && (type != DataType::UNDEFINED); - valid = valid && (layout != DataLayout::UNDEFINED); - valid = valid && (is_scalar || product(dims) >= 0); - return valid; -} - -inline bool operator==(const DenseTensorMeta& lhs, const DenseTensorMeta& rhs) { - bool ret = true; - return ret && (lhs.is_scalar == rhs.is_scalar) && (lhs.dims == rhs.dims) && - (lhs.type == rhs.type) && (lhs.layout == rhs.layout) && - (lhs.lod == rhs.lod) && (lhs.offset == rhs.offset); -} - } // namespace pten diff --git a/paddle/pten/kernels/cpu/manipulation.cc b/paddle/pten/kernels/cpu/manipulation.cc index 99699ea91ee2038af46e3f930d66ff4613257cfb..cc2826c77b79e54e073abf91637c25cd4c4dd616 100644 --- a/paddle/pten/kernels/cpu/manipulation.cc +++ b/paddle/pten/kernels/cpu/manipulation.cc @@ -44,17 +44,27 @@ void FlattenWithXShape(const CPUContext& dev_ctx, general::SetXShape(x, xshape); } +void ReshapeFromVectorValImpl(const CPUContext& dev_ctx, + const DenseTensor& x, + const std::vector& shape, + DenseTensor* out, + bool set_lod) { + auto out_meta = InferShapeFromVecValue(x.meta(), shape); + if (&x != out) { + pten::Copy(dev_ctx, x, out); + } + if (set_lod) { + out->Resize(out_meta.dims, out_meta.lod); + } else { + out->Resize(out_meta.dims); + } +} + void ReshapeFromVectorVal(const CPUContext& dev_ctx, const DenseTensor& x, const std::vector& shape, DenseTensor* out) { - auto out_meta = InferShapeFromVecValue(x.meta(), shape); - if (&x == out) { - out->Resize(out_meta.dims); - return; - } - pten::Copy(dev_ctx, x, out); - out->Resize(out_meta.dims); + ReshapeFromVectorValImpl(dev_ctx, x, shape, out, false); } void ReshapeFromVectorValWithXShape(const CPUContext& dev_ctx, @@ -73,8 +83,7 @@ void ReshapeFromDT(const CPUContext& dev_ctx, auto* shape_data = shape.data(); auto vector_shape = std::vector(shape_data, shape_data + shape.numel()); - ReshapeFromVectorVal(dev_ctx, x, vector_shape, out); - out->set_lod(x.lod()); + ReshapeFromVectorValImpl(dev_ctx, x, vector_shape, out, true); } void ReshapeFromDTWithXShape(const CPUContext& dev_ctx, diff --git a/paddle/pten/kernels/cuda/manipulation.cu b/paddle/pten/kernels/cuda/manipulation.cu index b84694c0a9f813b643da60ec36e769f92df71663..d2315965b288e77d38211b1ddf4bf4e0f4431fc3 100644 --- a/paddle/pten/kernels/cuda/manipulation.cu +++ b/paddle/pten/kernels/cuda/manipulation.cu @@ -44,18 +44,27 @@ void FlattenWithXShape(const CUDAContext& dev_ctx, general::SetXShape(x, xshape); } +void ReshapeFromVectorValImpl(const CUDAContext& dev_ctx, + const DenseTensor& x, + const std::vector& shape, + DenseTensor* out, + bool set_lod) { + auto out_meta = InferShapeFromVecValue(x.meta(), shape); + if (&x != out) { + pten::Copy(dev_ctx, x, false, out); + } + if (set_lod) { + out->Resize(out_meta.dims, out_meta.lod); + } else { + out->Resize(out_meta.dims); + } +} + void ReshapeFromVectorVal(const CUDAContext& dev_ctx, const DenseTensor& x, const std::vector& shape, DenseTensor* out) { - auto out_meta = InferShapeFromVecValue(x.meta(), shape); - if (&x == out) { - LOG(INFO) << "out_meta dims:" << out_meta.dims; - out->Resize(out_meta.dims); - return; - } - pten::Copy(dev_ctx, x, false, out); - out->Resize(out_meta.dims); + ReshapeFromVectorValImpl(dev_ctx, x, shape, out, false); } void ReshapeFromVectorValWithXShape(const CUDAContext& dev_ctx, @@ -74,8 +83,7 @@ void ReshapeFromDT(const CUDAContext& dev_ctx, auto* shape_data = shape.data(); auto vector_shape = std::vector(shape_data, shape_data + shape.numel()); - ReshapeFromVectorVal(dev_ctx, x, vector_shape, out); - out->set_lod(x.lod()); + ReshapeFromVectorValImpl(dev_ctx, x, vector_shape, out, true); } void ReshapeFromDTWithXShape(const CUDAContext& dev_ctx, diff --git a/paddle/pten/kernels/functions/general/manipulation.h b/paddle/pten/kernels/functions/general/manipulation.h index b05691ed7c5972eacd66723a70d993a3ff95ca6b..cade585792c9655dbaa6bd851033f8fb3e7e8f3e 100644 --- a/paddle/pten/kernels/functions/general/manipulation.h +++ b/paddle/pten/kernels/functions/general/manipulation.h @@ -26,8 +26,7 @@ inline void SetXShape(const DenseTensor& x, DenseTensor* xshape) { for (int i = 0; i < in_dims.size(); ++i) { xshape_dims[i + 1] = in_dims[i]; } - xshape->Resize(paddle::framework::make_ddim(xshape_dims)); - xshape->set_lod(x.meta().lod); + xshape->Resize(paddle::framework::make_ddim(xshape_dims), x.meta().lod); } } // namespace general diff --git a/paddle/pten/kernels/xpu/manipulation.cc b/paddle/pten/kernels/xpu/manipulation.cc index f19ebf35a0254a434b94b61c5dda3ec4cd18c85d..e23c7b2c6d4e6fbe126dbe421bc195a4227af05c 100644 --- a/paddle/pten/kernels/xpu/manipulation.cc +++ b/paddle/pten/kernels/xpu/manipulation.cc @@ -47,8 +47,7 @@ void FlattenWithXShape(const XPUContext& dev_ctx, for (int i = 0; i < in_dims.size(); ++i) { xshape_dims[i + 1] = in_dims[i]; } - xshape->Resize(paddle::framework::make_ddim(xshape_dims)); - xshape->set_lod(x.lod()); + xshape->Resize(paddle::framework::make_ddim(xshape_dims), x.meta().lod); } void ReshapeFromVectorVal(const XPUContext& dev_ctx, diff --git a/paddle/pten/tests/core/test_dense_tensor.cc b/paddle/pten/tests/core/test_dense_tensor.cc index 69c5e9b12606b4cd45702f5e9519e0d2ca51137d..6e7bfede06c180cfab89f014ad7c6fd044424a13 100644 --- a/paddle/pten/tests/core/test_dense_tensor.cc +++ b/paddle/pten/tests/core/test_dense_tensor.cc @@ -112,14 +112,11 @@ TEST(dense_tensor, resize) { auto alloc = std::make_shared(); DenseTensor tensor_0(alloc, meta); - CHECK_EQ(tensor_0.memory_size(), 2u); - tensor_0.check_memory_size(); + CHECK_EQ(tensor_0.capacity(), 2u); tensor_0.Resize({1, 2, 3}); - CHECK_EQ(tensor_0.memory_size(), 0u); - tensor_0.set_dims({2, 3}); - CHECK_EQ(tensor_0.memory_size(), 0u); + CHECK_EQ(tensor_0.capacity(), 6u); tensor_0.mutable_data(); - CHECK_EQ(tensor_0.memory_size(), 6u); + CHECK_EQ(tensor_0.capacity(), 6u); auto storage = tensor_0.release(); CHECK_EQ(storage->size(), 6u);