diff --git a/paddle/framework/tensor.h b/paddle/framework/tensor.h index a36f375d2e42ee3c46ddef42954335cba7eb88f2..69019c7adcc6bdd63e6e6b6c343994a7dc2e9fbb 100644 --- a/paddle/framework/tensor.h +++ b/paddle/framework/tensor.h @@ -137,6 +137,8 @@ class Tensor { const DDim& dims() const { return dims_; } + paddle::platform::Place place() const { return holder_->place(); } + private: // Placeholder hides type T, so it doesn't appear as a template // parameter of Variable. diff --git a/paddle/pybind/pybind.cc b/paddle/pybind/pybind.cc index d48a948d21527f6b1725a23e0b9db75cdbb879bb..4b1bbc2cf2a2148cb166b9830fad8e1dab767083 100644 --- a/paddle/pybind/pybind.cc +++ b/paddle/pybind/pybind.cc @@ -20,6 +20,7 @@ limitations under the License. */ #include "paddle/framework/op_registry.h" #include "paddle/framework/operator.h" #include "paddle/framework/scope.h" +#include "paddle/platform/place.h" #include "paddle/pybind/tensor_bind.h" #include "pybind11/numpy.h" #include "pybind11/pybind11.h" @@ -62,12 +63,12 @@ PYBIND11_PLUGIN(core) { self.Resize(pd::make_ddim(dim)); }) .def("alloc_float", - [](pd::Tensor& self) { - self.mutable_data(paddle::platform::CPUPlace()); + [](pd::Tensor& self, paddle::platform::Place& place) { + self.mutable_data(place); }) .def("alloc_int", - [](pd::Tensor& self) { - self.mutable_data(paddle::platform::CPUPlace()); + [](pd::Tensor& self, paddle::platform::Place& place) { + self.mutable_data(place); }) .def("set", paddle::pybind::PyTensorSetFromArray) .def("set", paddle::pybind::PyTensorSetFromArray) @@ -122,9 +123,20 @@ All parameter, weight, gradient are variables in Paddle. .def("temp", pd::OperatorBase::TMP_VAR_NAME); py::class_(m, "DeviceContext") - .def_static("cpu_context", []() -> paddle::platform::DeviceContext* { - return new paddle::platform::CPUDeviceContext(); - }); + .def_static( + "create", + [](paddle::platform::Place) -> paddle::platform::DeviceContext* { + if (paddle::platform::is_gpu_place(place)) { + return new paddle::platform::GPUDeviceContext(place); + } else if (paddle::platform::is_cpu_place(place)) { + return new paddle::platform::CPUDeviceContext(); + } + }); + + py::class_(m, "GPUPlace").def(py::init()); + .def(py::init<>()); + + py::class_(m, "CPUPlace").def(py::init<>()); py::class_> operator_base( m, "Operator"); diff --git a/paddle/pybind/tensor_bind.h b/paddle/pybind/tensor_bind.h index 995e102bf9d342e1604f5ae704288d6cf68d97a4..0caece6e9517470033d11d2c21e077da1fb3656b 100644 --- a/paddle/pybind/tensor_bind.h +++ b/paddle/pybind/tensor_bind.h @@ -13,9 +13,10 @@ limitations under the License. */ #pragma once -#include -#include -#include +#include "paddle/framework/tensor.h" +#include "paddle/memory/memcpy.h" +#include "pybind11/numpy.h" +#include "pybind11/pybind11.h" namespace py = pybind11; @@ -56,7 +57,6 @@ struct CastToPyBufferImpl { strides[i - 1] = sizeof(CUR_TYPE) * prod; prod *= dims_outside[i - 1]; } - return py::buffer_info( tensor.mutable_data(tensor.holder_->place()), sizeof(CUR_TYPE), @@ -87,8 +87,25 @@ void PyTensorSetFromArray( } self.Resize(framework::make_ddim(dims)); - auto *dst = self.mutable_data(paddle::platform::CPUPlace()); - std::memcpy(dst, array.data(), sizeof(T) * array.size()); + auto *dst = self.mutable_data(self.place()); + + if (paddle::platform::is_cpu_place(self.place())) { + paddle::memory::Copy( + place, dst, place, array.data(), sizeof(T) * array.size()); + } else if (paddle::platform::is_gpu_place(place)) { +#ifdef PADDLE_ONLY_CPU + PADDLE_THROW("'GPUPlace' is not supported in CPU only device."); +#else + paddle::memory::Copy( + place, + dst, + paddle::platform::CPUPlace(), + array.data(), + sizeof(T) * array.size()); +#endif + } } } // namespace pybind