From f86d35a269a94e8af7bec5945be01ab0acd76730 Mon Sep 17 00:00:00 2001 From: typhoonzero Date: Mon, 16 Apr 2018 17:11:11 +0800 Subject: [PATCH] add sharable tensor --- paddle/fluid/framework/tensor.h | 29 ++++++++++++++++++++++++++ paddle/fluid/framework/tensor_impl.h | 31 ++++++++++++++++++++++++++++ 2 files changed, 60 insertions(+) diff --git a/paddle/fluid/framework/tensor.h b/paddle/fluid/framework/tensor.h index 6f878541e6..1e5c68a1b9 100644 --- a/paddle/fluid/framework/tensor.h +++ b/paddle/fluid/framework/tensor.h @@ -98,6 +98,9 @@ class Tensor { /*! The internal of two tensors share the same memory block. */ inline Tensor& ShareDataWith(const Tensor& src); + /*! Share part of the memory of the two tensors */ + inline Tensor& ShareDataWith(Tensor* src, size_t offset); + /** * @brief Return a sub-tensor of the given tensor. * @@ -176,6 +179,32 @@ class Tensor { std::type_index type_; }; + template + struct SharedPlaceholderImpl : public Placeholder { + SharedPlaceholderImpl(Place place, uint8_t* data, size_t size, + std::type_index type) + : ptr_(data), place_(place), size_(size), type_(type) {} + + virtual size_t size() const { return size_; } + virtual platform::Place place() const { return place_; } + virtual void* ptr() const { return static_cast(ptr_); } + virtual std::type_index type() const { return type_; } + virtual void set_type(std::type_index type) { type_ = type; } + virtual void set_place(platform::Place place) { place_ = place; } + + /*! the pointer of memory block. */ + uint8_t* ptr_; + + /*! the place of memory block. */ + platform::Place place_; + + /*! the size of memory block. */ + size_t size_; + + /* the current type of memory */ + std::type_index type_; + }; + /*! holds the memory block if allocated. */ std::shared_ptr holder_; diff --git a/paddle/fluid/framework/tensor_impl.h b/paddle/fluid/framework/tensor_impl.h index f49d1a47a3..98d53fd1e7 100644 --- a/paddle/fluid/framework/tensor_impl.h +++ b/paddle/fluid/framework/tensor_impl.h @@ -162,6 +162,37 @@ inline Tensor& Tensor::ShareDataWith(const Tensor& src) { return *this; } +inline Tensor& Tensor::ShareDataWith(Tensor* src, size_t offset) { + // NOTE: data size is determined by current tensor shape and data type + src->check_memory_size(); + PADDLE_ENFORCE_EQ(src->type(), this->type(), + "tensor data type must be the same when sharing data"); + auto place = src->place(); + auto type = src->type(); + size_t size = src->numel() * SizeOfType(src->type()); + auto* ref = static_cast(src->mutable_data(place)) + offset; + if (platform::is_cpu_place(place)) { + holder_.reset(new SharedPlaceholderImpl( + boost::get(place), ref, size, type)); + } else if (platform::is_gpu_place(place) || + platform::is_cuda_pinned_place(place)) { +#ifndef PADDLE_WITH_CUDA + PADDLE_THROW( + "CUDAPlace or CUDAPinnedPlace is not supported in CPU-only mode."); + } +#else + if (platform::is_gpu_place(place)) { + holder_.reset(new SharedPlaceholderImpl( + boost::get(place), ref, size, type)); + } else if (platform::is_cuda_pinned_place(place)) { + holder_.reset(new SharedPlaceholderImpl( + boost::get(place), ref, size, type)); + } + } +#endif + return *this; +} + inline Tensor Tensor::Slice(int begin_idx, int end_idx) const { check_memory_size(); PADDLE_ENFORCE_GE(begin_idx, 0, -- GitLab