diff --git a/paddle/framework/tensor.h b/paddle/framework/tensor.h index 10813d4aadb70e96ecfbbc88c9f28fe87e0fe1a1..5f07256c05087e165ed471daed9ba0dd2cd2372b 100644 --- a/paddle/framework/tensor.h +++ b/paddle/framework/tensor.h @@ -117,7 +117,7 @@ class Tensor { #ifdef PADDLE_ONLY_CPU PADDLE_THROW("'GPUPlace' is not supported in CPU only device."); #else - GpuMemcpySync(dst_ptr, src_ptr, size, cudaMemcpyDeviceToHost); + platform::GpuMemcpySync(dst_ptr, src_ptr, size, cudaMemcpyDeviceToHost); #endif } } diff --git a/paddle/pybind/pybind.cc b/paddle/pybind/pybind.cc index e53340cc9fa27314adc09364dbfa2bc90d5ddcd3..2cc26a926e37d40bd7ead59abded1b738696cd05 100644 --- a/paddle/pybind/pybind.cc +++ b/paddle/pybind/pybind.cc @@ -138,13 +138,12 @@ All parameter, weight, gradient are variables in Paddle. }) #ifndef PADDLE_ONLY_CPU .def_static("gpu_context", - [](paddle::platform::Place& place) + [](paddle::platform::GPUPlace& place) -> paddle::platform::DeviceContext* { - return new paddle::platform::CUDADeviceContext(place); - }) + return new paddle::platform::CUDADeviceContext(place); + }) #endif - ; - + ; // NOLINT py::class_(m, "GPUPlace").def(py::init()); py::class_(m, "CPUPlace").def(py::init<>()); diff --git a/paddle/pybind/tensor_bind.h b/paddle/pybind/tensor_bind.h index a94c89d328b35cb199762411fa9ac74b3f9e45fe..fdf8861b68c4750655cebc0bf46e96857f483d1a 100644 --- a/paddle/pybind/tensor_bind.h +++ b/paddle/pybind/tensor_bind.h @@ -102,7 +102,7 @@ void PyTensorSetFromArray( #ifdef PADDLE_ONLY_CPU PADDLE_THROW("'GPUPlace' is not supported in CPU only device."); #else - GpuMemcpySync( + platform::GpuMemcpySync( dst, array.data(), sizeof(T) * array.size(), cudaMemcpyHostToDevice); #endif }