diff --git a/paddle/fluid/framework/tensor.cc b/paddle/fluid/framework/tensor.cc index 222a51672fc827b39b5f6ba2d5e5e3384997e567..d61dbb98a235ca9af089d35318b7f4c68cb125cc 100644 --- a/paddle/fluid/framework/tensor.cc +++ b/paddle/fluid/framework/tensor.cc @@ -32,7 +32,7 @@ size_t Tensor::memory_size() const { } void* Tensor::mutable_data(platform::Place place, std::type_index type, - int64_t requested_size) { + size_t requested_size) { if (holder_ != nullptr) { holder_->set_type(type); } @@ -40,7 +40,7 @@ void* Tensor::mutable_data(platform::Place place, std::type_index type, "When calling this method, the Tensor's numel must be " "equal or larger than zero. " "Please check Tensor::Resize has been called first."); - int64_t size = requested_size ? requested_size : numel() * SizeOfType(type); + size_t size = requested_size ? requested_size : numel() * SizeOfType(type); /* some versions of boost::variant don't have operator!= */ if (holder_ == nullptr || !(holder_->place() == place) || holder_->size() < size + offset_) { @@ -69,7 +69,7 @@ void* Tensor::mutable_data(platform::Place place, std::type_index type, offset_); } -void* Tensor::mutable_data(platform::Place place, int64_t requested_size) { +void* Tensor::mutable_data(platform::Place place, size_t requested_size) { PADDLE_ENFORCE(this->holder_ != nullptr, "Cannot invoke mutable data if current hold nothing."); return mutable_data(place, holder_->type(), requested_size); diff --git a/paddle/fluid/framework/tensor.h b/paddle/fluid/framework/tensor.h index a4454c90b06f07de204af2083a06cf3f426e7856..4cf95fa0ae07823289fbf337062190f05e6c6bcf 100644 --- a/paddle/fluid/framework/tensor.h +++ b/paddle/fluid/framework/tensor.h @@ -89,12 +89,12 @@ class Tensor { * @note If not exist, then allocation. */ template - T* mutable_data(platform::Place place, int64_t requested_size = 0); + T* mutable_data(platform::Place place, size_t requested_size = 0); void* mutable_data(platform::Place place, std::type_index type, - int64_t requested_size = 0); + size_t requested_size = 0); - void* mutable_data(platform::Place place, int64_t requested_size = 0); + void* mutable_data(platform::Place place, size_t requested_size = 0); /** * @brief Return a pointer to mutable memory block. @@ -106,7 +106,7 @@ class Tensor { * @note If not exist, then allocation. */ template - T* mutable_data(DDim dims, platform::Place place, int64_t requested_size = 0); + T* mutable_data(DDim dims, platform::Place place, size_t requested_size = 0); /*! Return the dimensions of the memory block. */ const DDim& dims() const; diff --git a/paddle/fluid/framework/tensor_impl.h b/paddle/fluid/framework/tensor_impl.h index ea10c9a2658cbd8334d2c50e87c55967dbf0db65..6d3047c95d6cf30c2a5308d4f69ded367066d78c 100644 --- a/paddle/fluid/framework/tensor_impl.h +++ b/paddle/fluid/framework/tensor_impl.h @@ -47,14 +47,14 @@ inline T* Tensor::data() { template inline T* Tensor::mutable_data(DDim dims, platform::Place place, - int64_t requested_size) { + size_t requested_size) { static_assert(std::is_pod::value, "T must be POD"); Resize(dims); return mutable_data(place, requested_size); } template -inline T* Tensor::mutable_data(platform::Place place, int64_t requested_size) { +inline T* Tensor::mutable_data(platform::Place place, size_t requested_size) { static_assert(std::is_pod::value, "T must be POD"); return reinterpret_cast(mutable_data(place, typeid(T), requested_size)); } diff --git a/paddle/fluid/operators/conv_mkldnn_op.cc b/paddle/fluid/operators/conv_mkldnn_op.cc index 77d0cf07a869d1e997227ae82e2c697825a2aa48..d75e6412c8950bce33855c67b5f9725261d7ba06 100644 --- a/paddle/fluid/operators/conv_mkldnn_op.cc +++ b/paddle/fluid/operators/conv_mkldnn_op.cc @@ -53,15 +53,15 @@ class ConvMKLDNNHandler : public platform::MKLDNNHandler { key_ += "-BWD"; } - size_t GetDstMemorySize() { + size_t GetDstMemorySize() const { return conv_pd_->dst_primitive_desc().get_size(); } - size_t GetDiffWeightsMemorySize() { + size_t GetDiffWeightsMemorySize() const { return conv_bwd_weights_pd_->diff_weights_primitive_desc().get_size(); } - size_t GetDiffSourceMemorySize() { + size_t GetDiffSourceMemorySize() const { return conv_bwd_data_pd_->diff_src_primitive_desc().get_size(); } @@ -491,7 +491,7 @@ class ConvMKLDNNGradOpKernel : public paddle::framework::OpKernel { handler.AcquireDiffDstMemoryFromWeightsPrimitive( user_diff_dst_memory_p, pipeline); - size_t size = handler.GetDiffWeightsMemorySize(); + const size_t size = handler.GetDiffWeightsMemorySize(); filter_grad_data = filter_grad->mutable_data(ctx.GetPlace(), size); auto diff_weights_memory_p = @@ -516,7 +516,7 @@ class ConvMKLDNNGradOpKernel : public paddle::framework::OpKernel { handler.AcquireDiffDstMemoryFromDataPrimitive(user_diff_dst_memory_p, pipeline); - size_t size = handler.GetDiffSourceMemorySize(); + const size_t size = handler.GetDiffSourceMemorySize(); input_grad_data = input_grad->mutable_data(ctx.GetPlace(), size); auto diff_src_memory_p = handler.AcquireDiffSrcMemoryFromDataPrimitive(