From 68b06ba60f7716bd96db5a832e5562e870b8c4d9 Mon Sep 17 00:00:00 2001 From: Leo Chen Date: Wed, 10 Aug 2022 10:35:10 +0800 Subject: [PATCH] [new-exec] set cuda device before run (#44985) * set cuda device before run * add header file * fix compile --- .../framework/new_executor/interpretercore.cc | 16 ++++++++++++++++ paddle/fluid/platform/event.h | 2 ++ paddle/phi/backends/gpu/cuda/cuda_info.cc | 1 + 3 files changed, 19 insertions(+) diff --git a/paddle/fluid/framework/new_executor/interpretercore.cc b/paddle/fluid/framework/new_executor/interpretercore.cc index ecbce3b6102..66e8f93736a 100644 --- a/paddle/fluid/framework/new_executor/interpretercore.cc +++ b/paddle/fluid/framework/new_executor/interpretercore.cc @@ -28,6 +28,7 @@ #ifdef PADDLE_WITH_MKLDNN #include "paddle/fluid/platform/mkldnn_helper.h" #endif +#include "paddle/fluid/platform/device/gpu/gpu_info.h" PADDLE_DEFINE_EXPORTED_bool(new_executor_use_inplace, true, @@ -98,6 +99,11 @@ InterpreterCore::~InterpreterCore() { interpreter::CostInfo InterpreterCore::DryRun( const std::vector& feed_names, const std::vector& feed_tensors) { +#if defined(PADDLE_WITH_CUDA) && defined(PADDLE_WITH_HETERPS) + if (platform::is_gpu_place(place_)) { + platform::SetDeviceId(place_.device); + } +#endif Prepare(feed_names, feed_tensors, true); interpreter::CostInfo cost_info; { @@ -122,6 +128,11 @@ interpreter::CostInfo InterpreterCore::DryRun( paddle::framework::FetchList InterpreterCore::Run( const std::vector& feed_names, const std::vector& feed_tensors) { +#if defined(PADDLE_WITH_CUDA) && defined(PADDLE_WITH_HETERPS) + if (platform::is_gpu_place(place_)) { + platform::SetDeviceId(place_.device); + } +#endif #ifdef PADDLE_WITH_MKLDNN platform::AttachPointerHashToMKLDNNKey(this, place_); #endif @@ -153,6 +164,11 @@ paddle::framework::FetchList InterpreterCore::Run( paddle::framework::FetchList InterpreterCore::Run( const std::vector& feed_names) { +#if defined(PADDLE_WITH_CUDA) && defined(PADDLE_WITH_HETERPS) + if (platform::is_gpu_place(place_)) { + platform::SetDeviceId(place_.device); + } +#endif #ifdef PADDLE_WITH_MKLDNN platform::AttachPointerHashToMKLDNNKey(this, place_); #endif diff --git a/paddle/fluid/platform/event.h b/paddle/fluid/platform/event.h index f6c7bb04353..f1d7bad90f2 100644 --- a/paddle/fluid/platform/event.h +++ b/paddle/fluid/platform/event.h @@ -141,6 +141,7 @@ class CudaEvent { #else cudaEventCreateWithFlags(&event_, flags_); #endif + VLOG(4) << "CudaEvent " << event_; } explicit CudaEvent(unsigned int flags) : flags_(flags) { @@ -149,6 +150,7 @@ class CudaEvent { #else cudaEventCreateWithFlags(&event_, flags_); #endif + VLOG(4) << "CudaEvent " << event_; } ~CudaEvent() { diff --git a/paddle/phi/backends/gpu/cuda/cuda_info.cc b/paddle/phi/backends/gpu/cuda/cuda_info.cc index 7be21e85f00..4b5de3db54d 100644 --- a/paddle/phi/backends/gpu/cuda/cuda_info.cc +++ b/paddle/phi/backends/gpu/cuda/cuda_info.cc @@ -241,6 +241,7 @@ void SetDeviceId(int id) { id, GetGPUDeviceCount())); PADDLE_RETRY_CUDA_SUCCESS(cudaSetDevice(id)); + VLOG(4) << "SetDeviceId " << id; } void GpuMemcpyAsync(void *dst, -- GitLab