From 788636f078fae8b9b68e3afcf8e0eee5f52bc4fc Mon Sep 17 00:00:00 2001 From: typhoonzero Date: Wed, 18 Apr 2018 13:28:41 +0800 Subject: [PATCH] update by comments --- paddle/fluid/framework/tensor.h | 3 --- paddle/fluid/framework/tensor_impl.h | 31 ------------------------- paddle/fluid/operators/split_byref_op.h | 7 +++--- 3 files changed, 3 insertions(+), 38 deletions(-) diff --git a/paddle/fluid/framework/tensor.h b/paddle/fluid/framework/tensor.h index f30dcc000b7..5a6b24bfafb 100644 --- a/paddle/fluid/framework/tensor.h +++ b/paddle/fluid/framework/tensor.h @@ -98,9 +98,6 @@ class Tensor { /*! The internal of two tensors share the same memory block. */ inline Tensor& ShareDataWith(const Tensor& src); - /*! Share part of the memory of the two tensors */ - inline Tensor& ShareDataWith(const Tensor* src, size_t offset); - /** * @brief Return a sub-tensor of the given tensor. * diff --git a/paddle/fluid/framework/tensor_impl.h b/paddle/fluid/framework/tensor_impl.h index a177ef74166..f49d1a47a32 100644 --- a/paddle/fluid/framework/tensor_impl.h +++ b/paddle/fluid/framework/tensor_impl.h @@ -162,37 +162,6 @@ inline Tensor& Tensor::ShareDataWith(const Tensor& src) { return *this; } -inline Tensor& Tensor::ShareDataWith(const Tensor* src, size_t offset) { - // NOTE: data size is determined by current tensor shape and data type - src->check_memory_size(); - PADDLE_ENFORCE_EQ(src->type(), this->type(), - "tensor data type must be the same when sharing data"); - auto place = src->place(); - auto type = src->type(); - size_t size = src->numel() * SizeOfType(src->type()); - auto* ref = src->data() + offset; - if (platform::is_cpu_place(place)) { - holder_.reset(new SharedPlaceholderImpl( - boost::get(place), ref, size, type)); - } else if (platform::is_gpu_place(place) || - platform::is_cuda_pinned_place(place)) { -#ifndef PADDLE_WITH_CUDA - PADDLE_THROW( - "CUDAPlace or CUDAPinnedPlace is not supported in CPU-only mode."); - } -#else - if (platform::is_gpu_place(place)) { - holder_.reset(new SharedPlaceholderImpl( - boost::get(place), ref, size, type)); - } else if (platform::is_cuda_pinned_place(place)) { - holder_.reset(new SharedPlaceholderImpl( - boost::get(place), ref, size, type)); - } - } -#endif - return *this; -} - inline Tensor Tensor::Slice(int begin_idx, int end_idx) const { check_memory_size(); PADDLE_ENFORCE_GE(begin_idx, 0, diff --git a/paddle/fluid/operators/split_byref_op.h b/paddle/fluid/operators/split_byref_op.h index 7c3ab1c1b9d..9b54c7c74ac 100644 --- a/paddle/fluid/operators/split_byref_op.h +++ b/paddle/fluid/operators/split_byref_op.h @@ -26,15 +26,14 @@ class SplitByrefOpKernel : public framework::OpKernel { void Compute(const framework::ExecutionContext& ctx) const override { auto* in = ctx.Input("X"); auto outs = ctx.MultiOutput("Out"); - auto in_stride = framework::stride_numel(in->dims()); auto place = ctx.GetPlace(); - size_t input_offset = 0; + size_t row_offset = 0; for (size_t i = 0; i < outs.size(); ++i) { // NOTE: no need to call mutable_data here to allocate memory. auto* out = outs[i]; - out->ShareDataWith(in, input_offset); - input_offset += out->numel() * framework::SizeOfType(out->type()); + *out = std::move(in->Slice(row_offset, out->dims()[0])); + row_offset += out->dims()[0]; } } }; -- GitLab