未验证 提交 246fb841 编写于 作者: C Chen Weihang 提交者: GitHub

Add storage properties into DenseTensor for supporting extra device properties (#47527)

* add storage properties for npu

* fix compile failed

* fix api name mismatch

* polish design
上级 fe8c6796
......@@ -53,6 +53,8 @@ DenseTensor::DenseTensor(const std::shared_ptr<phi::Allocation>& holder,
DenseTensor::DenseTensor(const DenseTensor& other) : meta_(other.meta()) {
holder_ = other.holder_;
storage_properties_ =
std::move(CopyStorageProperties(other.storage_properties_));
inplace_version_counter_ = other.inplace_version_counter_;
#ifdef PADDLE_WITH_MKLDNN
......@@ -64,6 +66,8 @@ DenseTensor::DenseTensor(const DenseTensor& other) : meta_(other.meta()) {
DenseTensor& DenseTensor::operator=(const DenseTensor& other) {
meta_ = other.meta();
holder_ = other.holder_;
storage_properties_ =
std::move(CopyStorageProperties(other.storage_properties_));
inplace_version_counter_ = other.inplace_version_counter_;
#ifdef PADDLE_WITH_MKLDNN
format_ = other.format_;
......@@ -75,6 +79,7 @@ DenseTensor& DenseTensor::operator=(const DenseTensor& other) {
DenseTensor& DenseTensor::operator=(DenseTensor&& other) {
meta_ = std::move(other.meta_);
std::swap(holder_, other.holder_);
storage_properties_ = std::move(other.storage_properties_);
std::swap(inplace_version_counter_, other.inplace_version_counter_);
#ifdef PADDLE_WITH_MKLDNN
format_ = other.format_;
......@@ -241,4 +246,29 @@ DATA_MEMBER_FUNC_INSTANTIATION(::phi::dtype::complex<double>);
#undef DATA_MEMBER_FUNC_INSTANTIATION
template <typename DeviceT>
const DeviceT& DenseTensor::storage_properties() const {
PADDLE_ENFORCE_NOT_NULL(
storage_properties_,
phi::errors::PreconditionNotMet(
"The storage_properties of current DenseTensor is nullptr."));
if (DeviceT::classof(storage_properties_.get())) {
return static_cast<DeviceT&>(*storage_properties_);
} else {
PADDLE_THROW(phi::errors::InvalidArgument(
"The actual type of storage_properties is inconsistent with the type "
"of the template parameter passed in."));
}
}
template const NPUStorageProperties& DenseTensor::storage_properties() const;
#ifdef PADDLE_WITH_MKLDNN
template const OneDNNStorageProperties& DenseTensor::storage_properties() const;
#endif
void DenseTensor::set_storage_properties(
std::unique_ptr<StorageProperties>&& storage_properties) {
storage_properties_ = std::move(storage_properties);
}
} // namespace phi
......@@ -15,6 +15,7 @@ limitations under the License. */
#pragma once
#include "paddle/phi/core/allocator.h"
#include "paddle/phi/core/storage_properties.h"
#include "paddle/phi/core/stream.h"
#include "paddle/phi/core/tensor_base.h"
#include "paddle/phi/core/tensor_meta.h"
......@@ -163,6 +164,16 @@ class DenseTensor : public TensorBase,
void* data();
/// \brief Returns the storage_properties of the tensor.
/// \return The storage_properties of the tensor.
template <typename DeviceT>
const DeviceT& storage_properties() const;
/// \brief Sets the storage_properties of the tensor.
/// \param storage_properties The storage_properties of the tensor.
void set_storage_properties(
std::unique_ptr<StorageProperties>&& storage_properties);
private:
friend class DenseTensorUtils;
......@@ -170,6 +181,42 @@ class DenseTensor : public TensorBase,
DenseTensorMeta meta_;
std::shared_ptr<phi::Allocation> holder_;
/** [ Why need StorageProperties? ]
*
* 1. Some hardware or third-party libraries add some additional storage
* properties on top of the description of the basic DenseTensor, such as
* memory desc of MKLDNN, storage_format and storage_layout of NPU,
* these members are necessary for optimal performance, but if the properties
* of each device are added to the DenseTensor with different macro isolation,
* the memory layout of the DenseTensor will become more fragmented.
* Under different compilation conditions, the member layout of the
* DenseTensor is very unstable, which may introduce bugs that are difficult
* to debug.
*
* 2. If the layout of DenseTensor is very different from the framework
* itself, it is recommended to directly inherit TensorBase to implement
* SpatialTensor.
*
* TODO(chenweihang): merge the dnnl::memory::desc and
* dnnl::memory::format_tag into StorageProperties, dnnl::memory::desc is a
* type that takes up a lot of space, original tensor members' size:
*
* DenseTensor size: 880
* -------- ordered members --------:
* DenseTensorMeta size: 128
* - is_scalar_ size: 1
* - DDim size: 80
* - DataType size: 4
* - DataLayout size: 4
* - LoD size: 24
* - offset size: 8
* std::shared_ptr<phi::Allocation> size: 16
* std::shared_ptr<InplaceVersion> size: 16 // need to be moved
* dnnl::memory::format_tag size: 4 // need to be moved
* dnnl::memory::desc size: 696 // need to be moved
*/
std::unique_ptr<StorageProperties> storage_properties_{nullptr};
public:
/* Temporarily put InplaceVersion inside DenseTensor.
Will move to AutogradMeta as soon as we switch to Eager Dygraph.
......
......@@ -370,6 +370,8 @@ DenseTensor& DenseTensor::ShareDataWith(const DenseTensor& src) {
meta_.dtype = src.meta_.dtype;
meta_.layout = src.meta_.layout;
meta_.offset = src.meta_.offset;
storage_properties_ =
std::move(CopyStorageProperties(src.storage_properties_));
#ifdef PADDLE_WITH_MKLDNN
format_ = src.format_;
mem_desc_ = src.mem_desc_;
......
/* Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#pragma once
#include <memory>
#include "paddle/phi/core/utils/type_registry.h"
#ifdef PADDLE_WITH_MKLDNN
#include "dnnl.hpp" // NOLINT
#endif
namespace phi {
struct StorageProperties {
public:
virtual ~StorageProperties() = default;
TypeInfo<StorageProperties> type_info() const { return type_info_; }
private:
template <typename T, typename U>
friend class TypeInfoTraits;
TypeInfo<StorageProperties> type_info_{
TypeInfo<StorageProperties>::kUnknownType};
};
struct NPUStorageProperties
: public StorageProperties,
public TypeInfoTraits<StorageProperties, NPUStorageProperties> {
virtual ~NPUStorageProperties() = default;
static const char* name() { return "NPUStorageProperties"; }
int64_t storage_format;
int64_t storage_layout;
};
// Add OneDNNStorageProperties firstly for unittest covergae
#ifdef PADDLE_WITH_MKLDNN
struct OneDNNStorageProperties
: public StorageProperties,
public TypeInfoTraits<StorageProperties, OneDNNStorageProperties> {
virtual ~OneDNNStorageProperties() = default;
static const char* name() { return "OneDNNStorageProperties"; }
/**
* @brief the detail format of memory block which have layout as ONEDNN
*
* @note ONEDNN lib support various memory format like nchw, nhwc, nChw8C,
* nChw16c, etc. For a ONEDNN memory block, layout will be set as
* DataLayout::ONEDNN meanwhile detail memory format will be kept in
* this field.
*/
dnnl::memory::format_tag format = dnnl::memory::format_tag::undef;
/// \brief memory descriptor of tensor which have layout set as ONEDNN
dnnl::memory::desc mem_desc;
};
#endif
static std::unique_ptr<StorageProperties> CopyStorageProperties(
const std::unique_ptr<StorageProperties>& sp) {
if (sp) {
if (NPUStorageProperties::classof(sp.get())) {
auto result = std::make_unique<NPUStorageProperties>();
result->storage_format =
static_cast<NPUStorageProperties*>(sp.get())->storage_format;
result->storage_layout =
static_cast<NPUStorageProperties*>(sp.get())->storage_layout;
return result;
#ifdef PADDLE_WITH_MKLDNN
} else if (OneDNNStorageProperties::classof(sp.get())) {
auto result = std::make_unique<OneDNNStorageProperties>();
result->format = static_cast<OneDNNStorageProperties*>(sp.get())->format;
result->mem_desc =
static_cast<OneDNNStorageProperties*>(sp.get())->mem_desc;
return result;
#endif
} else {
return nullptr;
}
}
return nullptr;
}
} // namespace phi
......@@ -18,6 +18,7 @@ limitations under the License. */
#include <map>
#include <mutex>
#include <string>
#include <vector>
#include "paddle/phi/core/utils/type_info.h"
......
......@@ -129,5 +129,57 @@ TEST(dense_tensor, shallow_copy) {
CHECK(tensor_0.meta() == tensor_1.meta());
}
struct TestStorageProperties
: public StorageProperties,
public TypeInfoTraits<StorageProperties, NPUStorageProperties> {
virtual ~TestStorageProperties() = default;
static const char* name() { return "TestStorageProperties"; }
};
TEST(dense_tensor, storage_properties) {
const DataType dtype{DataType::FLOAT32};
const DDim dims({1, 2});
DenseTensorMeta meta(dtype, dims);
auto fancy_allocator = std::unique_ptr<Allocator>(new FancyAllocator);
DenseTensor tensor(fancy_allocator.get(), meta);
// test no storage properties
bool caught_exception = false;
try {
tensor.storage_properties<NPUStorageProperties>();
} catch (phi::enforce::EnforceNotMet& error) {
caught_exception = true;
}
EXPECT_TRUE(caught_exception);
// test custom device storage properties
auto npu_properties = std::make_unique<NPUStorageProperties>();
npu_properties->storage_format = 1;
npu_properties->storage_layout = 2;
tensor.set_storage_properties(std::move(npu_properties));
auto get_npu_properties = tensor.storage_properties<NPUStorageProperties>();
CHECK_EQ(get_npu_properties.storage_format, 1);
CHECK_EQ(get_npu_properties.storage_layout, 2);
// test error type storage properties
#ifdef PADDLE_WITH_MKLDNN
caught_exception = false;
try {
tensor.storage_properties<OneDNNStorageProperties>();
} catch (phi::enforce::EnforceNotMet& error) {
caught_exception = true;
}
EXPECT_TRUE(caught_exception);
#endif
// test copy storage properties
auto cp_tensor = tensor;
auto get_cp_npu_properties =
cp_tensor.storage_properties<NPUStorageProperties>();
CHECK_EQ(get_cp_npu_properties.storage_format, 1);
CHECK_EQ(get_cp_npu_properties.storage_layout, 2);
}
} // namespace tests
} // namespace phi
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册