未验证 提交 42cf2bee 编写于 作者: Z Zhanlue Yang 提交者: GitHub

[Unify Tensors PR #1] Replaced pten::Allocation with...

[Unify Tensors PR #1] Replaced pten::Allocation with shared_ptr<memory::Allocation> for Storage (#38301)

* Added shared_ptr<Allocation> member & corresponding interfaces to Storage

* Removed original pten::Allocation from Storage and adjusted the interfaces accordingly

* Fixed issues with storage offset

* Used place to malloc allocation for TensorStorage
上级 52329f6f
...@@ -38,6 +38,8 @@ class DefaultAllocator : public pten::Allocator { ...@@ -38,6 +38,8 @@ class DefaultAllocator : public pten::Allocator {
return Allocation(ptr, a.release(), &Delete, place_); return Allocation(ptr, a.release(), &Delete, place_);
} }
const paddle::platform::Place& place() override { return place_; }
private: private:
paddle::platform::Place place_; paddle::platform::Place place_;
static paddle::memory::Allocator::AllocationDeleter deleter_; static paddle::memory::Allocator::AllocationDeleter deleter_;
......
...@@ -20,13 +20,15 @@ namespace experimental { ...@@ -20,13 +20,15 @@ namespace experimental {
ExternalStorage::ExternalStorage(void* ptr, ExternalStorage::ExternalStorage(void* ptr,
size_t size, size_t size,
const paddle::platform::Place& place) const paddle::platform::Place& place)
: pten::Storage(pten::Allocation(ptr, place)), size_(size) {} : pten::Storage(
std::make_shared<paddle::memory::Allocation>(ptr, size, place)),
size_(size) {}
ExternalStorage::ExternalStorage(const pten::intrusive_ptr<pten::Storage>& root, ExternalStorage::ExternalStorage(const pten::intrusive_ptr<pten::Storage>& root,
size_t delta, size_t delta,
size_t size) size_t size)
: Storage(pten::Allocation(static_cast<uint8_t*>(root->data()) + delta, : Storage(std::make_shared<paddle::memory::Allocation>(
root->place())), static_cast<uint8_t*>(root->data()) + delta, size, root->place())),
size_(size) { size_(size) {
PADDLE_ENFORCE_LE(static_cast<size_t>(delta + size), PADDLE_ENFORCE_LE(static_cast<size_t>(delta + size),
root->size(), root->size(),
......
...@@ -35,13 +35,18 @@ class ExternalStorage : public pten::Storage { ...@@ -35,13 +35,18 @@ class ExternalStorage : public pten::Storage {
} }
void Clear() override { void Clear() override {
data_.Clear(); data_ = nullptr;
size_ = 0; size_ = 0;
offset_ = 0;
} }
size_t size() const noexcept override { return size_; } size_t size() const noexcept override { return size_; }
const paddle::platform::Place& place() const override { const paddle::platform::Place& place() const override {
return data_.place(); PADDLE_ENFORCE_NOT_NULL(
data_,
paddle::platform::errors::Unavailable(
"Unable to visit place as data_ has not been initialized yet."));
return data_->place();
} }
bool OwnsMemory() const noexcept override { return false; } bool OwnsMemory() const noexcept override { return false; }
...@@ -54,74 +59,61 @@ class SharedStorage : public pten::Storage { ...@@ -54,74 +59,61 @@ class SharedStorage : public pten::Storage {
explicit SharedStorage( explicit SharedStorage(
const std::shared_ptr<paddle::memory::Allocation>& allocation, const std::shared_ptr<paddle::memory::Allocation>& allocation,
size_t offset) size_t offset)
: allocation_(allocation) { : Storage(allocation) {
CHECK(allocation); CHECK(allocation);
data_ = pten::Allocation( place_ = allocation->place();
reinterpret_cast<void*>(reinterpret_cast<uintptr_t>(allocation->ptr()) +
offset),
allocation->place());
size_ = allocation->size(); size_ = allocation->size();
offset_ = offset;
} }
// In order to be compatible with the original Tensor design and execution // In order to be compatible with the original Tensor design and execution
// system, we need to allow the uninitialized SharedStorage to exist, // system, we need to allow the uninitialized SharedStorage to exist,
// and it can be removed after the compatibility phase is over in the future // and it can be removed after the compatibility phase is over in the future
explicit SharedStorage(const paddle::platform::Place& place) { explicit SharedStorage(const paddle::platform::Place& place) {
data_ = pten::Allocation(nullptr, place); place_ = place;
} }
static const char* name() { return "SharedStorage"; }
// In order to be compatible with the original Tensor design and execution
// system, we need to allow the SharedStorage realloc,
// and it can be removed after the compatibility phase is over in the future
void Realloc(size_t n) override { void Realloc(size_t n) override {
ResetAllocation(paddle::memory::AllocShared(place(), n), 0); this->Clear();
data_ = paddle::memory::AllocShared(place(), n);
size_ = n;
} }
static const char* name() { return "SharedStorage"; }
void Clear() override { void Clear() override {
data_.Clear(); data_ = nullptr;
size_ = 0; size_ = 0;
} }
size_t size() const noexcept override { return size_; } size_t size() const noexcept override { return size_; }
const paddle::platform::Place& place() const override { const paddle::platform::Place& place() const override { return place_; }
return data_.place();
}
bool OwnsMemory() const noexcept override { return false; } bool OwnsMemory() const noexcept override { return false; }
const std::shared_ptr<paddle::memory::Allocation>& GetAllocation() { const std::shared_ptr<paddle::memory::Allocation>& GetAllocation() {
return allocation_; return data_;
} }
// Temporary method: For compatible with fluid Tensor and improve performance // Temporary method: For compatible with fluid Tensor and improve performance
void ResetAllocation(std::shared_ptr<paddle::memory::Allocation> allocation, void ResetAllocation(std::shared_ptr<paddle::memory::Allocation> allocation,
size_t offset) { size_t offset) {
allocation_ = allocation; data_ = allocation;
data_ = pten::Allocation(
reinterpret_cast<void*>(reinterpret_cast<uintptr_t>(allocation->ptr()) +
offset),
allocation->place());
size_ = allocation->size(); size_ = allocation->size();
place_ = allocation->place();
offset_ = offset;
} }
// Temporary method: For compatible with fluid Tensor and improve performance // Temporary method: For compatible with fluid Tensor and improve performance
void ResetAllocationPlace(const paddle::platform::Place& place) { void ResetAllocationPlace(const paddle::platform::Place& place) {
data_ = pten::Allocation(nullptr, place); place_ = place;
} }
// Temporary method: For compatible with fluid Tensor and improve performance // Temporary method: For compatible with fluid Tensor and improve performance
void Reset() { void Reset() { this->Clear(); }
if (allocation_ != nullptr) {
allocation_.reset();
}
data_.Clear();
size_ = 0;
}
private: private:
Place place_;
int64_t size_{0}; int64_t size_{0};
std::shared_ptr<paddle::memory::Allocation> allocation_;
}; };
class TensorStorage : public paddle::memory::allocation::Allocation { class TensorStorage : public paddle::memory::allocation::Allocation {
......
...@@ -134,9 +134,12 @@ inline void swap(Allocation& a, Allocation& b) noexcept { ...@@ -134,9 +134,12 @@ inline void swap(Allocation& a, Allocation& b) noexcept {
/// mainly used for general data structures such as Tensor. The raw /// mainly used for general data structures such as Tensor. The raw
/// allocator is more universal and efficient. /// allocator is more universal and efficient.
class Allocator { class Allocator {
using Place = paddle::platform::Place;
public: public:
virtual ~Allocator() = default; virtual ~Allocator() = default;
virtual Allocation Allocate(size_t bytes_size) = 0; virtual Allocation Allocate(size_t bytes_size) = 0;
virtual const Place& place() = 0;
}; };
inline Allocation Allocate(const std::shared_ptr<Allocator>& a, size_t n) { inline Allocation Allocate(const std::shared_ptr<Allocator>& a, size_t n) {
......
...@@ -17,8 +17,8 @@ limitations under the License. */ ...@@ -17,8 +17,8 @@ limitations under the License. */
namespace pten { namespace pten {
void TensorStorage::Realloc(size_t size) { void TensorStorage::Realloc(size_t size) {
data_.Clear(); this->Clear();
data_ = Allocate(alloc_, size); data_ = paddle::memory::AllocShared(alloc_->place(), size);
size_ = size; size_ = size;
} }
......
...@@ -21,6 +21,7 @@ limitations under the License. */ ...@@ -21,6 +21,7 @@ limitations under the License. */
#include "paddle/pten/core/utils/intrusive_ref_counter.h" #include "paddle/pten/core/utils/intrusive_ref_counter.h"
#include "paddle/pten/core/utils/type_info.h" #include "paddle/pten/core/utils/type_info.h"
#include "paddle/fluid/memory/memory.h"
#include "paddle/fluid/platform/place.h" #include "paddle/fluid/platform/place.h"
#include "paddle/pten/core/allocator.h" #include "paddle/pten/core/allocator.h"
...@@ -35,14 +36,32 @@ class Storage : public intrusive_ref_counter<Storage> { ...@@ -35,14 +36,32 @@ class Storage : public intrusive_ref_counter<Storage> {
Storage() = default; Storage() = default;
Storage(const Storage&) = delete; Storage(const Storage&) = delete;
explicit Storage(Allocation&& data) : data_(std::move(data)) {} /* --------- shared_ptr<Allocation> -------- */
// Initialize a Storage with unique Allocation
explicit Storage(std::shared_ptr<paddle::memory::Allocation>&& data)
: data_(std::move(data)) {}
virtual ~Storage() = default; // Initialize a Storage shareing Allocation with another storage
explicit Storage(const std::shared_ptr<paddle::memory::Allocation>& data)
: data_(data) {}
void* data() const {
return data_ ? reinterpret_cast<void*>(
reinterpret_cast<uintptr_t>(data_->ptr()) + offset_)
: nullptr;
}
/// \brief Get the mutable data pointer of the storage. const std::shared_ptr<paddle::memory::Allocation> data_shared() const {
/// This function is set to inline to improve performance. return data_;
/// \return The mutable data pointer of the storage. }
void* data() const noexcept { return data_.operator->(); }
virtual void ReallocShared(size_t n) {
PADDLE_THROW(paddle::platform::errors::Unimplemented(
"ReallocShared has not been overrided by the current Storage"));
}
/* --------- shared_ptr<Allocation> -------- */
virtual ~Storage() = default;
virtual void Clear() = 0; virtual void Clear() = 0;
...@@ -52,7 +71,8 @@ class Storage : public intrusive_ref_counter<Storage> { ...@@ -52,7 +71,8 @@ class Storage : public intrusive_ref_counter<Storage> {
virtual void Realloc(size_t n) = 0; virtual void Realloc(size_t n) = 0;
protected: protected:
Allocation data_; size_t offset_{0};
std::shared_ptr<paddle::memory::Allocation> data_;
}; };
class TensorStorage : public Storage { class TensorStorage : public Storage {
...@@ -60,23 +80,38 @@ class TensorStorage : public Storage { ...@@ -60,23 +80,38 @@ class TensorStorage : public Storage {
using Place = paddle::platform::Place; using Place = paddle::platform::Place;
explicit TensorStorage(const std::shared_ptr<Allocator>& a) : alloc_(a) {} explicit TensorStorage(const std::shared_ptr<Allocator>& a) : alloc_(a) {}
TensorStorage(const std::shared_ptr<Allocator>& a, size_t size) TensorStorage(const std::shared_ptr<Allocator>& a, size_t size)
: Storage(Allocate(a, size)), alloc_(a), size_(size) {} : Storage(paddle::memory::AllocShared(a->place(), size)), alloc_(a) {
size_ = data_->size();
}
void Clear() override {
data_ = nullptr;
size_ = 0;
offset_ = 0;
}
void Realloc(size_t size) override;
~TensorStorage() = default; ~TensorStorage() = default;
static const char* name() { return "TensorStorage"; } static const char* name() { return "TensorStorage"; }
void Realloc(size_t size) override;
size_t size() const noexcept override { return size_; } size_t size() const noexcept override { return size_; }
void Clear() override { const Place& place() const override {
data_.Clear(); if (!data_ && !alloc_) {
size_ = 0; PADDLE_THROW(paddle::platform::errors::Unimplemented(
"Unable to visit place: either data_ or alloc_ has to be initialized "
"first."));
}
if (data_) {
return data_->place();
}
return alloc_->place();
} }
const Place& place() const override { return data_.place(); }
bool OwnsMemory() const noexcept override { return true; } bool OwnsMemory() const noexcept override { return true; }
const std::shared_ptr<Allocator>& allocator() const noexcept { const std::shared_ptr<Allocator>& allocator() const noexcept {
return alloc_; return alloc_;
......
...@@ -44,8 +44,12 @@ class FancyAllocator : public pten::Allocator { ...@@ -44,8 +44,12 @@ class FancyAllocator : public pten::Allocator {
Allocation Allocate(size_t bytes_size) override { Allocation Allocate(size_t bytes_size) override {
void* data = ::operator new(bytes_size); void* data = ::operator new(bytes_size);
return Allocation(data, data, &Delete, paddle::platform::CPUPlace()); return Allocation(data, data, &Delete, place());
} }
const paddle::platform::Place& place() override { return place_; }
paddle::platform::Place place_ = paddle::platform::CPUPlace();
}; };
template <typename T> template <typename T>
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册