diff --git a/paddle/fluid/pybind/imperative.cc b/paddle/fluid/pybind/imperative.cc index 0247cbe0e2b274cfea3a8f0c3d348ece0a245ba8..354ac0aef9f2d95082e8860552e8d0645ac230d4 100644 --- a/paddle/fluid/pybind/imperative.cc +++ b/paddle/fluid/pybind/imperative.cc @@ -1535,40 +1535,40 @@ void BindImperative(py::module *m_ptr) { "Cannot copy this Tensor to GPU in CPU version Paddle, " "Please recompile or reinstall Paddle with CUDA support.")); #else - int device_count = platform::GetGPUDeviceCount(); - int device_id = 0; - if (handle == py::none()) { - if (platform::is_gpu_place(self->Place())) { - return self; - } - } else { - PyObject *py_obj = handle.ptr(); - PADDLE_ENFORCE_EQ( - PyCheckInteger(py_obj), true, - platform::errors::InvalidArgument( - " 'device_id' must be a positive integer")); - device_id = py::cast(handle); - } - PADDLE_ENFORCE_GE( - device_id, 0, - platform::errors::InvalidArgument( - "Can not copy Tensor to Invalid CUDAPlace(%d), device id " - "must inside [0, %d)", - device_id, device_count)); - PADDLE_ENFORCE_LT( - device_id, device_count, - platform::errors::InvalidArgument( - "Can not copy Tensor to Invalid CUDAPlace(%d), device id " - "must inside [0, %d)", - device_id, device_count)); - platform::CUDAPlace place = platform::CUDAPlace(device_id); - if (platform::is_same_place(self->Place(), place)) { - return self; - } else { - auto new_var = self->NewVarBase(place, blocking); - new_var->SetOverridedStopGradient(self->OverridedStopGradient()); - return new_var; - } + int device_count = platform::GetGPUDeviceCount(); + int device_id = 0; + if (handle == py::none()) { + auto default_place = + imperative::GetCurrentTracer()->ExpectedPlace(); + device_id = default_place.GetDeviceId(); + } else { + PyObject *py_obj = handle.ptr(); + PADDLE_ENFORCE_EQ( + PyCheckInteger(py_obj), true, + platform::errors::InvalidArgument( + " 'device_id' must be a positive integer")); + device_id = py::cast(handle); + } + PADDLE_ENFORCE_GE( + device_id, 0, + platform::errors::InvalidArgument( + "Can not copy Tensor to Invalid CUDAPlace(%d), device id " + "must inside [0, %d)", + device_id, device_count)); + PADDLE_ENFORCE_LT( + device_id, device_count, + platform::errors::InvalidArgument( + "Can not copy Tensor to Invalid CUDAPlace(%d), device id " + "must inside [0, %d)", + device_id, device_count)); + platform::CUDAPlace place = platform::CUDAPlace(device_id); + if (platform::is_same_place(self->Place(), place)) { + return self; + } else { + auto new_var = self->NewVarBase(place, blocking); + new_var->SetOverridedStopGradient(self->OverridedStopGradient()); + return new_var; + } #endif }, py::arg("device_id") = py::none(), py::arg("blocking") = true, R"DOC( @@ -1588,16 +1588,17 @@ void BindImperative(py::module *m_ptr) { # required: gpu import paddle x = paddle.to_tensor(1.0, place=paddle.CPUPlace()) - print(x.place) # CPUPlace + print(x.place) # Place(cpu) y = x.cuda() - print(y.place) # CUDAPlace(0) + print(y.place) # Place(gpu:0) y = x.cuda(None) - print(y.place) # CUDAPlace(0) + print(y.place) # Place(gpu:0) - y = x.cuda(1) - print(y.place) # CUDAPlace(1) + paddle.device.set_device("gpu:1") + y = x.cuda(None) + print(y.place) # Place(gpu:1) )DOC") .def( "_share_memory",