/* Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ #include "paddle/pten/core/dense_tensor.h" // See Note [ Why still include the fluid headers? ] #include "paddle/fluid/platform/bfloat16.h" #include "paddle/fluid/platform/complex.h" #include "paddle/fluid/platform/float16.h" #include "paddle/pten/api/lib/utils/storage.h" #include "paddle/pten/core/convert_utils.h" namespace pten { DenseTensor::DenseTensor(const std::shared_ptr& a, const DenseTensorMeta& meta) : meta_(meta), storage_(make_intrusive(a, SizeOf(dtype()) * numel())) {} DenseTensor::DenseTensor(const std::shared_ptr& a, DenseTensorMeta&& meta) : meta_(std::move(meta)), storage_(make_intrusive(a, SizeOf(dtype()) * numel())) {} DenseTensor::DenseTensor(intrusive_ptr storage, const DenseTensorMeta& meta) : meta_(meta), storage_(std::move(storage)) {} DenseTensor::DenseTensor(intrusive_ptr storage, DenseTensorMeta&& meta) : meta_(std::move(meta)), storage_(std::move(storage)) {} DenseTensor::DenseTensor(const DenseTensor& other) : meta_(other.meta()) { if (storage_ == nullptr) { storage_ = make_intrusive( paddle::platform::CPUPlace()); } if (other.storage_ != nullptr && other.storage_->data_shared()) { storage_->set_data_shared(other.storage_->data_shared()); } #ifdef PADDLE_WITH_MKLDNN format_ = other.format_; #endif } DenseTensor& DenseTensor::operator=(const DenseTensor& other) { meta_ = other.meta(); if (storage_ == nullptr) { storage_ = make_intrusive( paddle::platform::CPUPlace()); } if (other.storage_ != nullptr && other.storage_->data_shared()) { storage_->set_data_shared(other.storage_->data_shared()); } #ifdef PADDLE_WITH_MKLDNN format_ = other.format_; #endif return *this; } DenseTensor& DenseTensor::operator=(DenseTensor&& other) { meta_ = std::move(other.meta_); storage_.swap(other.storage_); return *this; } int64_t DenseTensor::numel() const { if (meta_.is_scalar) { return 1; } return product(meta_.dims); } bool DenseTensor::IsSharedWith(const DenseTensor& b) const { return storage_.get() == b.storage_.get() && storage_.get() != nullptr; } void* DenseTensor::mutable_data(size_t request_bytes) { PADDLE_ENFORCE( valid(), paddle::platform::errors::PreconditionNotMet( "The meta data must be valid when call the mutable data function.")); PADDLE_ENFORCE_NOT_NULL( storage_, paddle::platform::errors::PreconditionNotMet( "The storage must be valid when call the mutable data function.")); size_t bytes = numel() * SizeOf(dtype()); if (request_bytes) { PADDLE_ENFORCE_GE(request_bytes, bytes, paddle::platform::errors::InvalidArgument( "The reserved size %d should be enough to meet the " "volume required by metadata %d.", request_bytes, bytes)); bytes = request_bytes; } if (storage_->size() < bytes + meta_.offset || storage_->size() == 0) { VLOG(10) << "mutbale data realloc, original size: " << storage_->size() << ", new size: " << bytes; storage_->Realloc(bytes); meta_.offset = 0; } return reinterpret_cast(reinterpret_cast(storage_->data()) + meta_.offset); } template T* DenseTensor::mutable_data() { // In order to be compatible with the original Tensor design and // execution system, we have to reset the datatype in mutable_data. // When the compatibility phase is over in the future, we can delete it if (meta_.dtype == DataType::UNDEFINED) { VLOG(10) << "change data type in mutbale_data, target dtype - " << paddle::experimental::CppTypeToDataType::Type(); const_cast(meta_.dtype) = paddle::experimental::CppTypeToDataType::Type(); } PADDLE_ENFORCE( (dtype() == paddle::experimental::CppTypeToDataType::Type()), paddle::platform::errors::InvalidArgument( "The type of data (%d) we are trying to retrieve does not match the " "type of data currently contained in the container (%d).", static_cast(paddle::experimental::CppTypeToDataType::Type()), static_cast(dtype()))); return static_cast(mutable_data()); } template const T* DenseTensor::data() const { check_memory_size(); PADDLE_ENFORCE( (dtype() == paddle::experimental::CppTypeToDataType::Type()), paddle::platform::errors::InvalidArgument( "The type of data we are trying to retrieve does not match the " "type of data currently contained in the container.")); return static_cast(data()); } template T* DenseTensor::data() { check_memory_size(); PADDLE_ENFORCE( (dtype() == paddle::experimental::CppTypeToDataType::Type()), paddle::platform::errors::InvalidArgument( "The type of data we are trying to retrieve does not match the " "type of data currently contained in the container.")); PADDLE_ENFORCE_NOT_NULL( storage_, paddle::platform::errors::PreconditionNotMet( "The storage must be valid when call the mutable data function.")); return reinterpret_cast(data()); } void* DenseTensor::data() { PADDLE_ENFORCE_NOT_NULL( storage_, paddle::platform::errors::PreconditionNotMet( "The storage must be valid when call the mutable data function.")); return reinterpret_cast(reinterpret_cast(storage_->data()) + meta_.offset); } const void* DenseTensor::data() const { PADDLE_ENFORCE_NOT_NULL( storage_, paddle::platform::errors::PreconditionNotMet( "The storage must be valid when call the mutable data function.")); return reinterpret_cast( reinterpret_cast(storage_->data()) + meta_.offset); } void DenseTensor::set_meta(DenseTensorMeta&& meta) { PADDLE_ENFORCE(!meta_.valid(), paddle::platform::errors::InvalidArgument( "Only when the original attribute of Tensor is " "incomplete, can it be reset.")); meta_ = std::move(meta); } /* @jim19930609: This interface will be further modified util we finalized the design for Allocator - Allocation For now, we have to temporarily accommodate two independent use cases: 1. Designed behaviour: DenseTensor constructed with its underlying storage_ initialized 2. Legacy behaviour(fluid): DenseTensor constructed using default constructor, where storage_ won't be initialized until the first call to mutable_data(place) */ void DenseTensor::Resize(const DDim& dims) { meta_.dims = dims; if (storage_ != nullptr) { mutable_data(); } } void DenseTensor::ResetLoD(const LoD& lod) { meta_.lod = lod; } #define DATA_MEMBER_FUNC_INSTANTIATION(dtype) \ template dtype* DenseTensor::mutable_data(); \ template const dtype* DenseTensor::data() const; \ template dtype* DenseTensor::data(); DATA_MEMBER_FUNC_INSTANTIATION(bool); DATA_MEMBER_FUNC_INSTANTIATION(int8_t); DATA_MEMBER_FUNC_INSTANTIATION(uint8_t); DATA_MEMBER_FUNC_INSTANTIATION(int16_t); DATA_MEMBER_FUNC_INSTANTIATION(uint16_t); DATA_MEMBER_FUNC_INSTANTIATION(int32_t); DATA_MEMBER_FUNC_INSTANTIATION(uint32_t); DATA_MEMBER_FUNC_INSTANTIATION(int64_t); DATA_MEMBER_FUNC_INSTANTIATION(uint64_t); DATA_MEMBER_FUNC_INSTANTIATION(::paddle::platform::bfloat16); DATA_MEMBER_FUNC_INSTANTIATION(::paddle::platform::float16); DATA_MEMBER_FUNC_INSTANTIATION(float); DATA_MEMBER_FUNC_INSTANTIATION(double); DATA_MEMBER_FUNC_INSTANTIATION(::paddle::experimental::complex64); DATA_MEMBER_FUNC_INSTANTIATION(::paddle::experimental::complex128); #undef DATA_MEMBER_FUNC_INSTANTIATION /* --------------------------- */ /* From framework::Tensor */ /* --------------------------- */ DenseTensor::DenseTensor() { storage_ = make_intrusive( paddle::platform::CPUPlace()); inplace_version_counter_ = std::make_shared(0); meta_.dtype = paddle::experimental::DataType::FLOAT32; meta_.offset = 0; } DenseTensor::DenseTensor(const paddle::framework::proto::VarType::Type& dtype) { storage_ = make_intrusive( paddle::platform::CPUPlace()); inplace_version_counter_ = std::make_shared(0); meta_.dtype = TransToPtenDataType(dtype); meta_.offset = 0; } size_t DenseTensor::memory_size() const { if (storage_ == nullptr || storage_->data_shared() == nullptr) { return 0UL; } return storage_->data_shared()->size() - meta_.offset; } void DenseTensor::check_memory_size() const { PADDLE_ENFORCE_NOT_NULL(storage_, paddle::platform::errors::PreconditionNotMet( "Tensor holds no memory. " "Call Tensor::mutable_data firstly.")); PADDLE_ENFORCE_NOT_NULL(storage_->data_shared(), paddle::platform::errors::PreconditionNotMet( "Tensor holds no memory. " "Call Tensor::mutable_data firstly.")); size_t size = numel() * SizeOf(dtype()); PADDLE_ENFORCE_LE( size, memory_size(), paddle::platform::errors::PreconditionNotMet( "Tensor's dimension is out of bound." "Tensor's dimension must be equal or less than the size of its " "memory." "But received Tensor's dimension is d%, memory's size is %d.", size, memory_size())); } const paddle::platform::Place& DenseTensor::place() const { PADDLE_ENFORCE_NOT_NULL( storage_, paddle::platform::errors::PreconditionNotMet( "Tensor not initialized yet when Tensor::place() is called.")); return storage_->place(); } paddle::framework::proto::VarType::Type DenseTensor::type() const { PADDLE_ENFORCE_NOT_NULL( storage_, paddle::platform::errors::PreconditionNotMet( "Tensor not initialized yet when Tensor::type() is called.")); return TransToProtoVarType(meta_.dtype); } paddle::framework::proto::VarType::Type DenseTensor::saved_type() const { return TransToProtoVarType(meta_.dtype); } void DenseTensor::set_layout(const paddle::framework::DataLayout layout) { meta_.layout = layout; } void DenseTensor::ResetHolder( const std::shared_ptr& holder) { PADDLE_ENFORCE_EQ( meta_.offset, 0, paddle::platform::errors::Fatal( "Only the offset is supported to zero when the holder is reset.")); PADDLE_ENFORCE_NOT_NULL( storage_, paddle::platform::errors::PreconditionNotMet( "The storage must be valid when call the mutable data function.")); if (storage_->data_shared()) { PADDLE_ENFORCE_LE( numel() * SizeOf(dtype()) + meta_.offset, holder->size(), paddle::platform::errors::InvalidArgument( "The size of Holder is not enough to store the Tensor.")); } storage_->set_data_shared(holder); } void DenseTensor::ResetHolderWithType( const std::shared_ptr& holder, const paddle::framework::proto::VarType::Type& type) { set_type(type); ResetHolder(holder); } void DenseTensor::set_type( const paddle::framework::proto::VarType::Type& type) { meta_.dtype = TransToPtenDataType(type); } void* DenseTensor::mutable_data(const paddle::platform::Place& place, paddle::framework::proto::VarType::Type type, size_t requested_size) { set_type(type); PADDLE_ENFORCE_GE( numel(), 0, paddle::platform::errors::PreconditionNotMet( "The Tensor's element number must be equal or greater than zero. " "The Tensor's shape is [", dims(), "] now")); size_t size = numel() * SizeOf(dtype()); if (requested_size && (requested_size > size)) { size = requested_size; } if (storage_ == nullptr) { storage_ = make_intrusive(place); } /* some versions of boost::variant don't have operator!= */ if (storage_->data_shared() == nullptr || !(storage_->data_shared()->place() == place) || storage_->data_shared()->size() < size + meta_.offset) { storage_->Clear(); storage_->set_data_shared(paddle::memory::AllocShared(place, size)); meta_.offset = 0; } return reinterpret_cast(reinterpret_cast(storage_->data()) + meta_.offset); } void* DenseTensor::mutable_data(const paddle::platform::Place& place, size_t requested_size) { return mutable_data(place, type(), requested_size); } void* DenseTensor::mutable_data(const paddle::platform::Place& place, paddle::framework::proto::VarType::Type type, const paddle::platform::Stream& stream) { set_type(type); PADDLE_ENFORCE_GE( numel(), 0, paddle::platform::errors::PreconditionNotMet( "The Tensor's element number must be equal or greater than zero. " "The Tensor's shape is [", dims(), "] now")); size_t size = numel() * SizeOf(dtype()); if (storage_ == nullptr) { storage_ = make_intrusive(place); } /* some versions of boost::variant don't have operator!= */ if (storage_->data_shared() == nullptr || !(storage_->data_shared()->place() == place) || storage_->data_shared()->size() < size + meta_.offset || !(paddle::platform::is_gpu_place(place) && paddle::memory::InSameStream(storage_->data_shared(), stream))) { storage_->Clear(); storage_->set_data_shared(paddle::memory::AllocShared(place, size, stream)); meta_.offset = 0; } return reinterpret_cast(reinterpret_cast(storage_->data()) + meta_.offset); } /* @jim19930609: The following "mutable_data" only supports specific dtypes defined in OpProto. This part need another clean up once the data type across Fluid and Pten get unified. */ template inline T* DenseTensor::mutable_data(const DDim& dims, const paddle::platform::Place& place, size_t requested_size) { static_assert(std::is_pod::value, "T must be POD"); meta_.dims = dims; return mutable_data(place, requested_size); } template inline T* DenseTensor::mutable_data(const paddle::platform::Place& place, size_t requested_size) { static_assert(std::is_pod::value, "T must be POD"); return reinterpret_cast(mutable_data( place, paddle::framework::DataTypeTrait::DataType(), requested_size)); } void DenseTensor::ShareBufferWith(const DenseTensor& tensor) { if (storage_ == nullptr) { storage_ = make_intrusive( paddle::platform::CPUPlace()); } if (storage_ != nullptr && tensor.storage_ != nullptr) { storage_->set_data_shared(tensor.storage_->data_shared()); } meta_.offset = tensor.meta().offset; } #define LEGACY_DATA_MEMBER_FUNC_INSTANTIATION(dtype) \ template dtype* DenseTensor::mutable_data( \ const DDim& dims, \ const paddle::platform::Place& place, \ size_t requested_size); \ template dtype* DenseTensor::mutable_data( \ const paddle::platform::Place& place, size_t requested_size); LEGACY_DATA_MEMBER_FUNC_INSTANTIATION(bool) LEGACY_DATA_MEMBER_FUNC_INSTANTIATION(int8_t) LEGACY_DATA_MEMBER_FUNC_INSTANTIATION(uint8_t) LEGACY_DATA_MEMBER_FUNC_INSTANTIATION(int16_t) LEGACY_DATA_MEMBER_FUNC_INSTANTIATION(int) LEGACY_DATA_MEMBER_FUNC_INSTANTIATION(int64_t) LEGACY_DATA_MEMBER_FUNC_INSTANTIATION(float) LEGACY_DATA_MEMBER_FUNC_INSTANTIATION(double) LEGACY_DATA_MEMBER_FUNC_INSTANTIATION(::paddle::platform::bfloat16) LEGACY_DATA_MEMBER_FUNC_INSTANTIATION(::paddle::platform::float16) LEGACY_DATA_MEMBER_FUNC_INSTANTIATION(::paddle::experimental::complex64) LEGACY_DATA_MEMBER_FUNC_INSTANTIATION(::paddle::experimental::complex128) #undef LEGACY_DATA_MEMBER_FUNC_INSTANTIATION /* ------------------------------ */ /* From framework::LoDTensor */ /* ------------------------------ */ DenseTensor::DenseTensor(const LoD& lod) : DenseTensor() { meta_.lod = lod; } void DenseTensor::set_lod(const LoD& lod) { meta_.lod = lod; } LoD* DenseTensor::mutable_lod() { return &meta_.lod; } std::pair DenseTensor::lod_element(size_t level, size_t elem) const { PADDLE_ENFORCE_LT( level, NumLevels(), paddle::platform::errors::InvalidArgument( "The input level of LoD is invalid, it should be less than LoD " "size. The input level is %zu, the LoD size is %zu.", level, NumLevels())); PADDLE_ENFORCE_LT(elem, NumElements(level), paddle::platform::errors::InvalidArgument( "The input element of LoD is invalid, it should be " "less than the number of elements in its level." "The input element is %zu, the number of elements in " "its level is %zu.", elem, NumElements(level))); return std::make_pair((meta_.lod)[level][elem], (meta_.lod)[level][elem + 1]); } size_t DenseTensor::NumLevels() const { return meta_.lod.size(); } size_t DenseTensor::NumElements(size_t level) const { PADDLE_ENFORCE_LT( level, NumLevels(), paddle::platform::errors::InvalidArgument( "The input level of LoD is invalid, it should be less than LoD " "size. The input level is %zu, the LoD size is %zu.", level, NumLevels())); // the last offset is the end of last element return (meta_.lod)[level].size() - 1; } } // namespace pten