diff --git a/paddle/phi/core/dense_tensor.cc b/paddle/phi/core/dense_tensor.cc index 02f0fbb895215ba38e914851da9849c49bf64172..8a2d0e8a46bd460b104d08640d637ca2ab25dba2 100644 --- a/paddle/phi/core/dense_tensor.cc +++ b/paddle/phi/core/dense_tensor.cc @@ -53,6 +53,8 @@ DenseTensor::DenseTensor(const std::shared_ptr& holder, DenseTensor::DenseTensor(const DenseTensor& other) : meta_(other.meta()) { holder_ = other.holder_; + storage_properties_ = + std::move(CopyStorageProperties(other.storage_properties_)); inplace_version_counter_ = other.inplace_version_counter_; #ifdef PADDLE_WITH_MKLDNN @@ -64,6 +66,8 @@ DenseTensor::DenseTensor(const DenseTensor& other) : meta_(other.meta()) { DenseTensor& DenseTensor::operator=(const DenseTensor& other) { meta_ = other.meta(); holder_ = other.holder_; + storage_properties_ = + std::move(CopyStorageProperties(other.storage_properties_)); inplace_version_counter_ = other.inplace_version_counter_; #ifdef PADDLE_WITH_MKLDNN format_ = other.format_; @@ -75,6 +79,7 @@ DenseTensor& DenseTensor::operator=(const DenseTensor& other) { DenseTensor& DenseTensor::operator=(DenseTensor&& other) { meta_ = std::move(other.meta_); std::swap(holder_, other.holder_); + storage_properties_ = std::move(other.storage_properties_); std::swap(inplace_version_counter_, other.inplace_version_counter_); #ifdef PADDLE_WITH_MKLDNN format_ = other.format_; @@ -241,4 +246,29 @@ DATA_MEMBER_FUNC_INSTANTIATION(::phi::dtype::complex); #undef DATA_MEMBER_FUNC_INSTANTIATION +template +const DeviceT& DenseTensor::storage_properties() const { + PADDLE_ENFORCE_NOT_NULL( + storage_properties_, + phi::errors::PreconditionNotMet( + "The storage_properties of current DenseTensor is nullptr.")); + if (DeviceT::classof(storage_properties_.get())) { + return static_cast(*storage_properties_); + } else { + PADDLE_THROW(phi::errors::InvalidArgument( + "The actual type of storage_properties is inconsistent with the type " + "of the template parameter passed in.")); + } +} + +template const NPUStorageProperties& DenseTensor::storage_properties() const; +#ifdef PADDLE_WITH_MKLDNN +template const OneDNNStorageProperties& DenseTensor::storage_properties() const; +#endif + +void DenseTensor::set_storage_properties( + std::unique_ptr&& storage_properties) { + storage_properties_ = std::move(storage_properties); +} + } // namespace phi diff --git a/paddle/phi/core/dense_tensor.h b/paddle/phi/core/dense_tensor.h index abf242acdb22a57267fdd7be9559b48e61623ed9..e0d620ac3a53e05704ec4995370e009bb78b4644 100644 --- a/paddle/phi/core/dense_tensor.h +++ b/paddle/phi/core/dense_tensor.h @@ -15,6 +15,7 @@ limitations under the License. */ #pragma once #include "paddle/phi/core/allocator.h" +#include "paddle/phi/core/storage_properties.h" #include "paddle/phi/core/stream.h" #include "paddle/phi/core/tensor_base.h" #include "paddle/phi/core/tensor_meta.h" @@ -163,6 +164,16 @@ class DenseTensor : public TensorBase, void* data(); + /// \brief Returns the storage_properties of the tensor. + /// \return The storage_properties of the tensor. + template + const DeviceT& storage_properties() const; + + /// \brief Sets the storage_properties of the tensor. + /// \param storage_properties The storage_properties of the tensor. + void set_storage_properties( + std::unique_ptr&& storage_properties); + private: friend class DenseTensorUtils; @@ -170,6 +181,42 @@ class DenseTensor : public TensorBase, DenseTensorMeta meta_; std::shared_ptr holder_; + /** [ Why need StorageProperties? ] + * + * 1. Some hardware or third-party libraries add some additional storage + * properties on top of the description of the basic DenseTensor, such as + * memory desc of MKLDNN, storage_format and storage_layout of NPU, + * these members are necessary for optimal performance, but if the properties + * of each device are added to the DenseTensor with different macro isolation, + * the memory layout of the DenseTensor will become more fragmented. + * Under different compilation conditions, the member layout of the + * DenseTensor is very unstable, which may introduce bugs that are difficult + * to debug. + * + * 2. If the layout of DenseTensor is very different from the framework + * itself, it is recommended to directly inherit TensorBase to implement + * SpatialTensor. + * + * TODO(chenweihang): merge the dnnl::memory::desc and + * dnnl::memory::format_tag into StorageProperties, dnnl::memory::desc is a + * type that takes up a lot of space, original tensor members' size: + * + * DenseTensor size: 880 + * -------- ordered members --------: + * DenseTensorMeta size: 128 + * - is_scalar_ size: 1 + * - DDim size: 80 + * - DataType size: 4 + * - DataLayout size: 4 + * - LoD size: 24 + * - offset size: 8 + * std::shared_ptr size: 16 + * std::shared_ptr size: 16 // need to be moved + * dnnl::memory::format_tag size: 4 // need to be moved + * dnnl::memory::desc size: 696 // need to be moved + */ + std::unique_ptr storage_properties_{nullptr}; + public: /* Temporarily put InplaceVersion inside DenseTensor. Will move to AutogradMeta as soon as we switch to Eager Dygraph. diff --git a/paddle/phi/core/dense_tensor_impl.cc b/paddle/phi/core/dense_tensor_impl.cc index 36c6c6e96ff901073577102adb0e78eca3f2f597..b78a9dc135e485863139605e1a7dc20c0df3bb52 100644 --- a/paddle/phi/core/dense_tensor_impl.cc +++ b/paddle/phi/core/dense_tensor_impl.cc @@ -370,6 +370,8 @@ DenseTensor& DenseTensor::ShareDataWith(const DenseTensor& src) { meta_.dtype = src.meta_.dtype; meta_.layout = src.meta_.layout; meta_.offset = src.meta_.offset; + storage_properties_ = + std::move(CopyStorageProperties(src.storage_properties_)); #ifdef PADDLE_WITH_MKLDNN format_ = src.format_; mem_desc_ = src.mem_desc_; diff --git a/paddle/phi/core/storage_properties.h b/paddle/phi/core/storage_properties.h new file mode 100644 index 0000000000000000000000000000000000000000..908abd8d9d35d040734885bdb5dfd7f9a773ca1c --- /dev/null +++ b/paddle/phi/core/storage_properties.h @@ -0,0 +1,97 @@ +/* Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#pragma once + +#include + +#include "paddle/phi/core/utils/type_registry.h" + +#ifdef PADDLE_WITH_MKLDNN +#include "dnnl.hpp" // NOLINT +#endif + +namespace phi { + +struct StorageProperties { + public: + virtual ~StorageProperties() = default; + TypeInfo type_info() const { return type_info_; } + + private: + template + friend class TypeInfoTraits; + TypeInfo type_info_{ + TypeInfo::kUnknownType}; +}; + +struct NPUStorageProperties + : public StorageProperties, + public TypeInfoTraits { + virtual ~NPUStorageProperties() = default; + static const char* name() { return "NPUStorageProperties"; } + + int64_t storage_format; + int64_t storage_layout; +}; + +// Add OneDNNStorageProperties firstly for unittest covergae +#ifdef PADDLE_WITH_MKLDNN +struct OneDNNStorageProperties + : public StorageProperties, + public TypeInfoTraits { + virtual ~OneDNNStorageProperties() = default; + static const char* name() { return "OneDNNStorageProperties"; } + + /** + * @brief the detail format of memory block which have layout as ONEDNN + * + * @note ONEDNN lib support various memory format like nchw, nhwc, nChw8C, + * nChw16c, etc. For a ONEDNN memory block, layout will be set as + * DataLayout::ONEDNN meanwhile detail memory format will be kept in + * this field. + */ + dnnl::memory::format_tag format = dnnl::memory::format_tag::undef; + + /// \brief memory descriptor of tensor which have layout set as ONEDNN + dnnl::memory::desc mem_desc; +}; +#endif + +static std::unique_ptr CopyStorageProperties( + const std::unique_ptr& sp) { + if (sp) { + if (NPUStorageProperties::classof(sp.get())) { + auto result = std::make_unique(); + result->storage_format = + static_cast(sp.get())->storage_format; + result->storage_layout = + static_cast(sp.get())->storage_layout; + return result; +#ifdef PADDLE_WITH_MKLDNN + } else if (OneDNNStorageProperties::classof(sp.get())) { + auto result = std::make_unique(); + result->format = static_cast(sp.get())->format; + result->mem_desc = + static_cast(sp.get())->mem_desc; + return result; +#endif + } else { + return nullptr; + } + } + return nullptr; +} + +} // namespace phi diff --git a/paddle/phi/core/utils/type_registry.h b/paddle/phi/core/utils/type_registry.h index c233e1f743b213c50a8f49ee6b5f4f48d1826feb..ed1c9216e99e9e089321bffcc6c89ec17c8bee1a 100644 --- a/paddle/phi/core/utils/type_registry.h +++ b/paddle/phi/core/utils/type_registry.h @@ -18,6 +18,7 @@ limitations under the License. */ #include #include #include +#include #include "paddle/phi/core/utils/type_info.h" diff --git a/paddle/phi/tests/core/test_dense_tensor.cc b/paddle/phi/tests/core/test_dense_tensor.cc index f6a3e3fa413486a1ee573ac6314f65c0d24ce7da..b997d8f1e76aebba2d0cafcac05a9f737d8815fe 100644 --- a/paddle/phi/tests/core/test_dense_tensor.cc +++ b/paddle/phi/tests/core/test_dense_tensor.cc @@ -129,5 +129,57 @@ TEST(dense_tensor, shallow_copy) { CHECK(tensor_0.meta() == tensor_1.meta()); } +struct TestStorageProperties + : public StorageProperties, + public TypeInfoTraits { + virtual ~TestStorageProperties() = default; + static const char* name() { return "TestStorageProperties"; } +}; + +TEST(dense_tensor, storage_properties) { + const DataType dtype{DataType::FLOAT32}; + const DDim dims({1, 2}); + DenseTensorMeta meta(dtype, dims); + + auto fancy_allocator = std::unique_ptr(new FancyAllocator); + DenseTensor tensor(fancy_allocator.get(), meta); + + // test no storage properties + bool caught_exception = false; + try { + tensor.storage_properties(); + } catch (phi::enforce::EnforceNotMet& error) { + caught_exception = true; + } + EXPECT_TRUE(caught_exception); + + // test custom device storage properties + auto npu_properties = std::make_unique(); + npu_properties->storage_format = 1; + npu_properties->storage_layout = 2; + tensor.set_storage_properties(std::move(npu_properties)); + auto get_npu_properties = tensor.storage_properties(); + CHECK_EQ(get_npu_properties.storage_format, 1); + CHECK_EQ(get_npu_properties.storage_layout, 2); + + // test error type storage properties +#ifdef PADDLE_WITH_MKLDNN + caught_exception = false; + try { + tensor.storage_properties(); + } catch (phi::enforce::EnforceNotMet& error) { + caught_exception = true; + } + EXPECT_TRUE(caught_exception); +#endif + + // test copy storage properties + auto cp_tensor = tensor; + auto get_cp_npu_properties = + cp_tensor.storage_properties(); + CHECK_EQ(get_cp_npu_properties.storage_format, 1); + CHECK_EQ(get_cp_npu_properties.storage_layout, 2); +} + } // namespace tests } // namespace phi