From 4a7f0698e0b7169022409b0f962e7c7d24caab85 Mon Sep 17 00:00:00 2001 From: Michal Gallus Date: Tue, 14 Aug 2018 13:29:44 +0200 Subject: [PATCH] Add consts to new MKLDNN integration Also replace memory types from int64_t to size_t --- paddle/fluid/framework/tensor.cc | 6 +++--- paddle/fluid/framework/tensor.h | 8 ++++---- paddle/fluid/framework/tensor_impl.h | 4 ++-- paddle/fluid/operators/conv_mkldnn_op.cc | 10 +++++----- 4 files changed, 14 insertions(+), 14 deletions(-) diff --git a/paddle/fluid/framework/tensor.cc b/paddle/fluid/framework/tensor.cc index 222a51672..d61dbb98a 100644 --- a/paddle/fluid/framework/tensor.cc +++ b/paddle/fluid/framework/tensor.cc @@ -32,7 +32,7 @@ size_t Tensor::memory_size() const { } void* Tensor::mutable_data(platform::Place place, std::type_index type, - int64_t requested_size) { + size_t requested_size) { if (holder_ != nullptr) { holder_->set_type(type); } @@ -40,7 +40,7 @@ void* Tensor::mutable_data(platform::Place place, std::type_index type, "When calling this method, the Tensor's numel must be " "equal or larger than zero. " "Please check Tensor::Resize has been called first."); - int64_t size = requested_size ? requested_size : numel() * SizeOfType(type); + size_t size = requested_size ? requested_size : numel() * SizeOfType(type); /* some versions of boost::variant don't have operator!= */ if (holder_ == nullptr || !(holder_->place() == place) || holder_->size() < size + offset_) { @@ -69,7 +69,7 @@ void* Tensor::mutable_data(platform::Place place, std::type_index type, offset_); } -void* Tensor::mutable_data(platform::Place place, int64_t requested_size) { +void* Tensor::mutable_data(platform::Place place, size_t requested_size) { PADDLE_ENFORCE(this->holder_ != nullptr, "Cannot invoke mutable data if current hold nothing."); return mutable_data(place, holder_->type(), requested_size); diff --git a/paddle/fluid/framework/tensor.h b/paddle/fluid/framework/tensor.h index a4454c90b..4cf95fa0a 100644 --- a/paddle/fluid/framework/tensor.h +++ b/paddle/fluid/framework/tensor.h @@ -89,12 +89,12 @@ class Tensor { * @note If not exist, then allocation. */ template - T* mutable_data(platform::Place place, int64_t requested_size = 0); + T* mutable_data(platform::Place place, size_t requested_size = 0); void* mutable_data(platform::Place place, std::type_index type, - int64_t requested_size = 0); + size_t requested_size = 0); - void* mutable_data(platform::Place place, int64_t requested_size = 0); + void* mutable_data(platform::Place place, size_t requested_size = 0); /** * @brief Return a pointer to mutable memory block. @@ -106,7 +106,7 @@ class Tensor { * @note If not exist, then allocation. */ template - T* mutable_data(DDim dims, platform::Place place, int64_t requested_size = 0); + T* mutable_data(DDim dims, platform::Place place, size_t requested_size = 0); /*! Return the dimensions of the memory block. */ const DDim& dims() const; diff --git a/paddle/fluid/framework/tensor_impl.h b/paddle/fluid/framework/tensor_impl.h index ea10c9a26..6d3047c95 100644 --- a/paddle/fluid/framework/tensor_impl.h +++ b/paddle/fluid/framework/tensor_impl.h @@ -47,14 +47,14 @@ inline T* Tensor::data() { template inline T* Tensor::mutable_data(DDim dims, platform::Place place, - int64_t requested_size) { + size_t requested_size) { static_assert(std::is_pod::value, "T must be POD"); Resize(dims); return mutable_data(place, requested_size); } template -inline T* Tensor::mutable_data(platform::Place place, int64_t requested_size) { +inline T* Tensor::mutable_data(platform::Place place, size_t requested_size) { static_assert(std::is_pod::value, "T must be POD"); return reinterpret_cast(mutable_data(place, typeid(T), requested_size)); } diff --git a/paddle/fluid/operators/conv_mkldnn_op.cc b/paddle/fluid/operators/conv_mkldnn_op.cc index 77d0cf07a..d75e6412c 100644 --- a/paddle/fluid/operators/conv_mkldnn_op.cc +++ b/paddle/fluid/operators/conv_mkldnn_op.cc @@ -53,15 +53,15 @@ class ConvMKLDNNHandler : public platform::MKLDNNHandler { key_ += "-BWD"; } - size_t GetDstMemorySize() { + size_t GetDstMemorySize() const { return conv_pd_->dst_primitive_desc().get_size(); } - size_t GetDiffWeightsMemorySize() { + size_t GetDiffWeightsMemorySize() const { return conv_bwd_weights_pd_->diff_weights_primitive_desc().get_size(); } - size_t GetDiffSourceMemorySize() { + size_t GetDiffSourceMemorySize() const { return conv_bwd_data_pd_->diff_src_primitive_desc().get_size(); } @@ -491,7 +491,7 @@ class ConvMKLDNNGradOpKernel : public paddle::framework::OpKernel { handler.AcquireDiffDstMemoryFromWeightsPrimitive( user_diff_dst_memory_p, pipeline); - size_t size = handler.GetDiffWeightsMemorySize(); + const size_t size = handler.GetDiffWeightsMemorySize(); filter_grad_data = filter_grad->mutable_data(ctx.GetPlace(), size); auto diff_weights_memory_p = @@ -516,7 +516,7 @@ class ConvMKLDNNGradOpKernel : public paddle::framework::OpKernel { handler.AcquireDiffDstMemoryFromDataPrimitive(user_diff_dst_memory_p, pipeline); - size_t size = handler.GetDiffSourceMemorySize(); + const size_t size = handler.GetDiffSourceMemorySize(); input_grad_data = input_grad->mutable_data(ctx.GetPlace(), size); auto diff_src_memory_p = handler.AcquireDiffSrcMemoryFromDataPrimitive( -- GitLab