提交 4ff237f9 编写于 作者: C chengduoZH

follow comments

上级 17842e33
...@@ -132,8 +132,7 @@ inline void* Tensor::mutable_data(platform::Place place, std::type_index type) { ...@@ -132,8 +132,7 @@ inline void* Tensor::mutable_data(platform::Place place, std::type_index type) {
platform::is_cuda_pinned_place(place)) { platform::is_cuda_pinned_place(place)) {
#ifndef PADDLE_WITH_CUDA #ifndef PADDLE_WITH_CUDA
PADDLE_THROW( PADDLE_THROW(
"'CUDAPlace' or 'CUDAPinnedPlace' is not supported in CPU only " "CUDAPlace or CUDAPinnedPlace is not supported in CPU-only mode.");
"device.");
} }
#else #else
if (platform::is_gpu_place(place)) { if (platform::is_gpu_place(place)) {
...@@ -153,7 +152,7 @@ inline void* Tensor::mutable_data(platform::Place place, std::type_index type) { ...@@ -153,7 +152,7 @@ inline void* Tensor::mutable_data(platform::Place place, std::type_index type) {
inline void* Tensor::mutable_data(platform::Place place) { inline void* Tensor::mutable_data(platform::Place place) {
PADDLE_ENFORCE(this->holder_ != nullptr, PADDLE_ENFORCE(this->holder_ != nullptr,
"Cannot invoke mutable data if current hold nothing"); "Cannot invoke mutable data if current hold nothing.");
return mutable_data(place, holder_->type()); return mutable_data(place, holder_->type());
} }
......
...@@ -143,7 +143,7 @@ void PyCPUTensorSetFromArray( ...@@ -143,7 +143,7 @@ void PyCPUTensorSetFromArray(
std::vector<int64_t> dims; std::vector<int64_t> dims;
dims.reserve(array.ndim()); dims.reserve(array.ndim());
for (size_t i = 0; i < array.ndim(); ++i) { for (size_t i = 0; i < array.ndim(); ++i) {
dims.push_back((int)array.shape()[i]); dims.push_back(static_cast<int>(array.shape()[i]));
} }
self.Resize(framework::make_ddim(dims)); self.Resize(framework::make_ddim(dims));
...@@ -152,6 +152,8 @@ void PyCPUTensorSetFromArray( ...@@ -152,6 +152,8 @@ void PyCPUTensorSetFromArray(
} }
template <> template <>
// This following specialization maps uint16_t in the parameter type to
// platform::float16.
void PyCPUTensorSetFromArray( void PyCPUTensorSetFromArray(
framework::Tensor &self, framework::Tensor &self,
py::array_t<uint16_t, py::array::c_style | py::array::forcecast> array, py::array_t<uint16_t, py::array::c_style | py::array::forcecast> array,
...@@ -159,7 +161,7 @@ void PyCPUTensorSetFromArray( ...@@ -159,7 +161,7 @@ void PyCPUTensorSetFromArray(
std::vector<int64_t> dims; std::vector<int64_t> dims;
dims.reserve(array.ndim()); dims.reserve(array.ndim());
for (size_t i = 0; i < array.ndim(); ++i) { for (size_t i = 0; i < array.ndim(); ++i) {
dims.push_back((int)array.shape()[i]); dims.push_back(static_cast<int>(array.shape()[i]));
} }
self.Resize(framework::make_ddim(dims)); self.Resize(framework::make_ddim(dims));
...@@ -176,7 +178,7 @@ void PyCUDATensorSetFromArray( ...@@ -176,7 +178,7 @@ void PyCUDATensorSetFromArray(
std::vector<int64_t> dims; std::vector<int64_t> dims;
dims.reserve(array.ndim()); dims.reserve(array.ndim());
for (size_t i = 0; i < array.ndim(); ++i) { for (size_t i = 0; i < array.ndim(); ++i) {
dims.push_back((int)array.shape()[i]); dims.push_back(static_cast<int>(array.shape()[i]));
} }
self.Resize(framework::make_ddim(dims)); self.Resize(framework::make_ddim(dims));
...@@ -190,6 +192,8 @@ void PyCUDATensorSetFromArray( ...@@ -190,6 +192,8 @@ void PyCUDATensorSetFromArray(
} }
template <> template <>
// This following specialization maps uint16_t in the parameter type to
// platform::float16.
void PyCUDATensorSetFromArray( void PyCUDATensorSetFromArray(
framework::Tensor &self, framework::Tensor &self,
py::array_t<uint16_t, py::array::c_style | py::array::forcecast> array, py::array_t<uint16_t, py::array::c_style | py::array::forcecast> array,
...@@ -197,7 +201,7 @@ void PyCUDATensorSetFromArray( ...@@ -197,7 +201,7 @@ void PyCUDATensorSetFromArray(
std::vector<int64_t> dims; std::vector<int64_t> dims;
dims.reserve(array.ndim()); dims.reserve(array.ndim());
for (size_t i = 0; i < array.ndim(); ++i) { for (size_t i = 0; i < array.ndim(); ++i) {
dims.push_back((int)array.shape()[i]); dims.push_back(static_cast<int>(array.shape()[i]));
} }
self.Resize(framework::make_ddim(dims)); self.Resize(framework::make_ddim(dims));
...@@ -228,6 +232,8 @@ void PyCUDAPinnedTensorSetFromArray( ...@@ -228,6 +232,8 @@ void PyCUDAPinnedTensorSetFromArray(
} }
template <> template <>
// This following specialization maps uint16_t in the parameter type to
// platform::float16.
void PyCUDAPinnedTensorSetFromArray( void PyCUDAPinnedTensorSetFromArray(
framework::Tensor &self, framework::Tensor &self,
py::array_t<uint16_t, py::array::c_style | py::array::forcecast> array, py::array_t<uint16_t, py::array::c_style | py::array::forcecast> array,
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册