未验证 提交 b2b78f8e 编写于 作者: S Sing_chan 提交者: GitHub

[Bug Fix] set device_id=current_id when calling Tensor.cuda() without device_id (#43510)

* make device_id=current_id when not given

* use tracer to get current device id
上级 76b02b7c
...@@ -1538,9 +1538,9 @@ void BindImperative(py::module *m_ptr) { ...@@ -1538,9 +1538,9 @@ void BindImperative(py::module *m_ptr) {
int device_count = platform::GetGPUDeviceCount(); int device_count = platform::GetGPUDeviceCount();
int device_id = 0; int device_id = 0;
if (handle == py::none()) { if (handle == py::none()) {
if (platform::is_gpu_place(self->Place())) { auto default_place =
return self; imperative::GetCurrentTracer()->ExpectedPlace();
} device_id = default_place.GetDeviceId();
} else { } else {
PyObject *py_obj = handle.ptr(); PyObject *py_obj = handle.ptr();
PADDLE_ENFORCE_EQ( PADDLE_ENFORCE_EQ(
...@@ -1588,16 +1588,17 @@ void BindImperative(py::module *m_ptr) { ...@@ -1588,16 +1588,17 @@ void BindImperative(py::module *m_ptr) {
# required: gpu # required: gpu
import paddle import paddle
x = paddle.to_tensor(1.0, place=paddle.CPUPlace()) x = paddle.to_tensor(1.0, place=paddle.CPUPlace())
print(x.place) # CPUPlace print(x.place) # Place(cpu)
y = x.cuda() y = x.cuda()
print(y.place) # CUDAPlace(0) print(y.place) # Place(gpu:0)
y = x.cuda(None) y = x.cuda(None)
print(y.place) # CUDAPlace(0) print(y.place) # Place(gpu:0)
y = x.cuda(1) paddle.device.set_device("gpu:1")
print(y.place) # CUDAPlace(1) y = x.cuda(None)
print(y.place) # Place(gpu:1)
)DOC") )DOC")
.def( .def(
"_share_memory", "_share_memory",
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册