From 4ff237f93c85521fbd69ac618735de3acdd822e2 Mon Sep 17 00:00:00 2001 From: chengduoZH Date: Fri, 6 Apr 2018 10:22:22 +0800 Subject: [PATCH] follow comments --- paddle/fluid/framework/tensor_impl.h | 5 ++--- paddle/fluid/pybind/tensor_py.h | 14 ++++++++++---- 2 files changed, 12 insertions(+), 7 deletions(-) diff --git a/paddle/fluid/framework/tensor_impl.h b/paddle/fluid/framework/tensor_impl.h index 07d0906ea..f49d1a47a 100644 --- a/paddle/fluid/framework/tensor_impl.h +++ b/paddle/fluid/framework/tensor_impl.h @@ -132,8 +132,7 @@ inline void* Tensor::mutable_data(platform::Place place, std::type_index type) { platform::is_cuda_pinned_place(place)) { #ifndef PADDLE_WITH_CUDA PADDLE_THROW( - "'CUDAPlace' or 'CUDAPinnedPlace' is not supported in CPU only " - "device."); + "CUDAPlace or CUDAPinnedPlace is not supported in CPU-only mode."); } #else if (platform::is_gpu_place(place)) { @@ -153,7 +152,7 @@ inline void* Tensor::mutable_data(platform::Place place, std::type_index type) { inline void* Tensor::mutable_data(platform::Place place) { PADDLE_ENFORCE(this->holder_ != nullptr, - "Cannot invoke mutable data if current hold nothing"); + "Cannot invoke mutable data if current hold nothing."); return mutable_data(place, holder_->type()); } diff --git a/paddle/fluid/pybind/tensor_py.h b/paddle/fluid/pybind/tensor_py.h index f52ffc9ef..868966433 100644 --- a/paddle/fluid/pybind/tensor_py.h +++ b/paddle/fluid/pybind/tensor_py.h @@ -143,7 +143,7 @@ void PyCPUTensorSetFromArray( std::vector dims; dims.reserve(array.ndim()); for (size_t i = 0; i < array.ndim(); ++i) { - dims.push_back((int)array.shape()[i]); + dims.push_back(static_cast(array.shape()[i])); } self.Resize(framework::make_ddim(dims)); @@ -152,6 +152,8 @@ void PyCPUTensorSetFromArray( } template <> +// This following specialization maps uint16_t in the parameter type to +// platform::float16. void PyCPUTensorSetFromArray( framework::Tensor &self, py::array_t array, @@ -159,7 +161,7 @@ void PyCPUTensorSetFromArray( std::vector dims; dims.reserve(array.ndim()); for (size_t i = 0; i < array.ndim(); ++i) { - dims.push_back((int)array.shape()[i]); + dims.push_back(static_cast(array.shape()[i])); } self.Resize(framework::make_ddim(dims)); @@ -176,7 +178,7 @@ void PyCUDATensorSetFromArray( std::vector dims; dims.reserve(array.ndim()); for (size_t i = 0; i < array.ndim(); ++i) { - dims.push_back((int)array.shape()[i]); + dims.push_back(static_cast(array.shape()[i])); } self.Resize(framework::make_ddim(dims)); @@ -190,6 +192,8 @@ void PyCUDATensorSetFromArray( } template <> +// This following specialization maps uint16_t in the parameter type to +// platform::float16. void PyCUDATensorSetFromArray( framework::Tensor &self, py::array_t array, @@ -197,7 +201,7 @@ void PyCUDATensorSetFromArray( std::vector dims; dims.reserve(array.ndim()); for (size_t i = 0; i < array.ndim(); ++i) { - dims.push_back((int)array.shape()[i]); + dims.push_back(static_cast(array.shape()[i])); } self.Resize(framework::make_ddim(dims)); @@ -228,6 +232,8 @@ void PyCUDAPinnedTensorSetFromArray( } template <> +// This following specialization maps uint16_t in the parameter type to +// platform::float16. void PyCUDAPinnedTensorSetFromArray( framework::Tensor &self, py::array_t array, -- GitLab