diff --git a/paddle/fluid/framework/tensor.h b/paddle/fluid/framework/tensor.h index f30dcc000b71428bcd8cbed64bd143de086cbacd..5a6b24bfafbe76c2c1a1ed309ece33d64cc969a7 100644 --- a/paddle/fluid/framework/tensor.h +++ b/paddle/fluid/framework/tensor.h @@ -98,9 +98,6 @@ class Tensor { /*! The internal of two tensors share the same memory block. */ inline Tensor& ShareDataWith(const Tensor& src); - /*! Share part of the memory of the two tensors */ - inline Tensor& ShareDataWith(const Tensor* src, size_t offset); - /** * @brief Return a sub-tensor of the given tensor. * diff --git a/paddle/fluid/framework/tensor_impl.h b/paddle/fluid/framework/tensor_impl.h index a177ef74166f202a205c6d306e84915e0f8f1129..f49d1a47a325b2aac6185073203df124be18b54d 100644 --- a/paddle/fluid/framework/tensor_impl.h +++ b/paddle/fluid/framework/tensor_impl.h @@ -162,37 +162,6 @@ inline Tensor& Tensor::ShareDataWith(const Tensor& src) { return *this; } -inline Tensor& Tensor::ShareDataWith(const Tensor* src, size_t offset) { - // NOTE: data size is determined by current tensor shape and data type - src->check_memory_size(); - PADDLE_ENFORCE_EQ(src->type(), this->type(), - "tensor data type must be the same when sharing data"); - auto place = src->place(); - auto type = src->type(); - size_t size = src->numel() * SizeOfType(src->type()); - auto* ref = src->data() + offset; - if (platform::is_cpu_place(place)) { - holder_.reset(new SharedPlaceholderImpl( - boost::get(place), ref, size, type)); - } else if (platform::is_gpu_place(place) || - platform::is_cuda_pinned_place(place)) { -#ifndef PADDLE_WITH_CUDA - PADDLE_THROW( - "CUDAPlace or CUDAPinnedPlace is not supported in CPU-only mode."); - } -#else - if (platform::is_gpu_place(place)) { - holder_.reset(new SharedPlaceholderImpl( - boost::get(place), ref, size, type)); - } else if (platform::is_cuda_pinned_place(place)) { - holder_.reset(new SharedPlaceholderImpl( - boost::get(place), ref, size, type)); - } - } -#endif - return *this; -} - inline Tensor Tensor::Slice(int begin_idx, int end_idx) const { check_memory_size(); PADDLE_ENFORCE_GE(begin_idx, 0, diff --git a/paddle/fluid/operators/split_byref_op.h b/paddle/fluid/operators/split_byref_op.h index 7c3ab1c1b9d9550c63b56056746c6223ce1b9c77..9b54c7c74acb512b9493d603e83380b9a92ac91b 100644 --- a/paddle/fluid/operators/split_byref_op.h +++ b/paddle/fluid/operators/split_byref_op.h @@ -26,15 +26,14 @@ class SplitByrefOpKernel : public framework::OpKernel { void Compute(const framework::ExecutionContext& ctx) const override { auto* in = ctx.Input("X"); auto outs = ctx.MultiOutput("Out"); - auto in_stride = framework::stride_numel(in->dims()); auto place = ctx.GetPlace(); - size_t input_offset = 0; + size_t row_offset = 0; for (size_t i = 0; i < outs.size(); ++i) { // NOTE: no need to call mutable_data here to allocate memory. auto* out = outs[i]; - out->ShareDataWith(in, input_offset); - input_offset += out->numel() * framework::SizeOfType(out->type()); + *out = std::move(in->Slice(row_offset, out->dims()[0])); + row_offset += out->dims()[0]; } } };