From d5109130f145327ae3098fd615a118d54e8016fe Mon Sep 17 00:00:00 2001 From: qijun Date: Tue, 25 Jul 2017 15:58:38 +0800 Subject: [PATCH] set default cpu place for tensor alloc --- paddle/framework/tensor.h | 17 ++++++++++++----- paddle/pybind/pybind.cc | 8 ++++++++ paddle/pybind/tensor_bind.h | 10 ++++++++-- 3 files changed, 28 insertions(+), 7 deletions(-) diff --git a/paddle/framework/tensor.h b/paddle/framework/tensor.h index 69019c7adc..10813d4aad 100644 --- a/paddle/framework/tensor.h +++ b/paddle/framework/tensor.h @@ -19,6 +19,7 @@ limitations under the License. */ #include #include #include "paddle/framework/ddim.h" +#include "paddle/memory/memcpy.h" #include "paddle/memory/memory.h" #include "paddle/platform/enforce.h" #include "paddle/platform/place.h" @@ -104,15 +105,21 @@ class Tensor { template void CopyFrom(const Tensor& src, platform::Place dst_place) { - PADDLE_ENFORCE(platform::is_cpu_place(src.holder_->place()) && - platform::is_cpu_place(dst_place), - "Tensor::CopyFrom only support CPU now."); - src.EnforceSufficientMemory(); + PADDLE_ENFORCE(platform::is_cpu_place(dst_place), + "Tensor::CopyFrom only support dst CPU now."); size_t size = product(src.dims_) * sizeof(T); Resize(src.dims()); const void* src_ptr = static_cast(src.data()); void* dst_ptr = static_cast(mutable_data(dst_place)); - memcpy(dst_ptr, src_ptr, size); + if (paddle::platform::is_cpu_place(holder_->place())) { + std::memcpy(dst_ptr, src_ptr, size); + } else if (paddle::platform::is_gpu_place(holder_->place())) { +#ifdef PADDLE_ONLY_CPU + PADDLE_THROW("'GPUPlace' is not supported in CPU only device."); +#else + GpuMemcpySync(dst_ptr, src_ptr, size, cudaMemcpyDeviceToHost); +#endif + } } template diff --git a/paddle/pybind/pybind.cc b/paddle/pybind/pybind.cc index 4b1bbc2cf2..db82c56da7 100644 --- a/paddle/pybind/pybind.cc +++ b/paddle/pybind/pybind.cc @@ -66,10 +66,18 @@ PYBIND11_PLUGIN(core) { [](pd::Tensor& self, paddle::platform::Place& place) { self.mutable_data(place); }) + .def("alloc_float", + [](pd::Tensor& self) { + self.mutable_data(paddle::platform::CPUPlace()); + }) .def("alloc_int", [](pd::Tensor& self, paddle::platform::Place& place) { self.mutable_data(place); }) + .def("alloc_int", + [](pd::Tensor& self) { + self.mutable_data(paddle::platform::CPUPlace()); + }) .def("set", paddle::pybind::PyTensorSetFromArray) .def("set", paddle::pybind::PyTensorSetFromArray) .def("shape", diff --git a/paddle/pybind/tensor_bind.h b/paddle/pybind/tensor_bind.h index 0caece6e95..1af7c0a302 100644 --- a/paddle/pybind/tensor_bind.h +++ b/paddle/pybind/tensor_bind.h @@ -57,11 +57,17 @@ struct CastToPyBufferImpl { strides[i - 1] = sizeof(CUR_TYPE) * prod; prod *= dims_outside[i - 1]; } + Tensor dst_tensor; + if (paddle::platform::is_gpu_place(tensor.holder_->place())) { + dst_tensor.CopyFrom(tensor, platform::CPUPlace()); + } else if (paddle::platform::is_gpu_place(tensor.holder_->place())) { + dst_tensor = tensor; + } return py::buffer_info( - tensor.mutable_data(tensor.holder_->place()), + dst_tensor.mutable_data(dst_tensor.holder_->place()), sizeof(CUR_TYPE), py::format_descriptor::format(), - (size_t)framework::arity(tensor.dims()), + (size_t)framework::arity(dst_tensor.dims()), dims_outside, strides); } else { -- GitLab