diff --git a/paddle/fluid/framework/conv_search_cache.h b/paddle/fluid/framework/conv_search_cache.h index db8dc22f68663bedaff1c8b7e942aab7b584528b..51446f287e94b74dcf844abf8b56764b10dc6144 100644 --- a/paddle/fluid/framework/conv_search_cache.h +++ b/paddle/fluid/framework/conv_search_cache.h @@ -17,11 +17,7 @@ limitations under the License. */ #include "paddle/fluid/framework/op_registry.h" #include "paddle/fluid/framework/operator_kernel_configs.h" -#ifdef PADDLE_WITH_HIP -#include "paddle/fluid/platform/miopen_helper.h" -#else -#include "paddle/fluid/platform/cudnn_helper.h" -#endif +#include "paddle/fluid/platform/device/gpu/gpu_dnn.h" namespace paddle { namespace framework { diff --git a/paddle/fluid/framework/details/all_reduce_op_handle.cc b/paddle/fluid/framework/details/all_reduce_op_handle.cc index 3429677a2403e4b4e0b9ef9f1e04ce748e05acb9..b1573093ec333fe76bf7f146ce4abb9992beb60c 100644 --- a/paddle/fluid/framework/details/all_reduce_op_handle.cc +++ b/paddle/fluid/framework/details/all_reduce_op_handle.cc @@ -291,13 +291,9 @@ void AllReduceOpHandle::SyncNCCLAllReduce() { nccl_ctxs_->GetRunEnvNCCLCtx(run_order_, use_hierarchical_allreduce_); auto &nccl_ctx = nccl_ctxs->at(dev_id); auto stream = nccl_ctx.stream(); -#ifdef PADDLE_WITH_HIP - PADDLE_ENFORCE_CUDA_SUCCESS(hipStreamSynchronize(stream)); - PADDLE_ENFORCE_CUDA_SUCCESS(hipGetLastError()); -#else - PADDLE_ENFORCE_CUDA_SUCCESS(cudaStreamSynchronize(stream)); - PADDLE_ENFORCE_CUDA_SUCCESS(cudaGetLastError()); -#endif + + platform::GpuStreamSync(stream); + PADDLE_ENFORCE_GPU_SUCCESS(platform::GpuGetLastError()); } } } diff --git a/paddle/fluid/framework/details/all_reduce_op_handle.h b/paddle/fluid/framework/details/all_reduce_op_handle.h index 033d9396e9bf230982f2561d90a07090fd97cc60..02e35895205b76761d11bb0ede625626af4aafee 100644 --- a/paddle/fluid/framework/details/all_reduce_op_handle.h +++ b/paddle/fluid/framework/details/all_reduce_op_handle.h @@ -33,7 +33,7 @@ class NCCLCommunicator; } // namespace paddle #if defined(PADDLE_WITH_NCCL) || defined(PADDLE_WITH_RCCL) #include "paddle/fluid/framework/details/nccl_op_handle.h" -#include "paddle/fluid/platform/nccl_helper.h" +#include "paddle/fluid/platform/device/gpu/nccl_helper.h" #elif defined(PADDLE_WITH_XPU_BKCL) #include "paddle/fluid/framework/details/bkcl_op_handle.h" #include "paddle/fluid/platform/device/xpu/bkcl_helper.h" diff --git a/paddle/fluid/framework/details/broadcast_op_handle.cc b/paddle/fluid/framework/details/broadcast_op_handle.cc index 36b840e4945a0a13921d088c1f05901a0c545003..a11a244214d4fc485c78e74da436def595f40b10 100644 --- a/paddle/fluid/framework/details/broadcast_op_handle.cc +++ b/paddle/fluid/framework/details/broadcast_op_handle.cc @@ -111,7 +111,7 @@ void BroadcastOpHandle::BroadcastOneVar( broadcast_calls.emplace_back( [send_recv_buffer, numel, type, root_id, &nccl_ctx] { - PADDLE_ENFORCE_CUDA_SUCCESS(platform::dynload::ncclBcast( + PADDLE_ENFORCE_GPU_SUCCESS(platform::dynload::ncclBcast( send_recv_buffer, numel, static_cast(type), root_id, nccl_ctx.comm_, nccl_ctx.stream())); }); diff --git a/paddle/fluid/framework/details/broadcast_op_handle.h b/paddle/fluid/framework/details/broadcast_op_handle.h index 0b062b1a3f49a40ee3b67872afcdcfcd900dd803..055c7e63863b37086a98be56138691389d62c1d5 100644 --- a/paddle/fluid/framework/details/broadcast_op_handle.h +++ b/paddle/fluid/framework/details/broadcast_op_handle.h @@ -44,7 +44,7 @@ struct BKCLContextMap; } // namespace paddle #if defined(PADDLE_WITH_NCCL) || defined(PADDLE_WITH_RCCL) -#include "paddle/fluid/platform/nccl_helper.h" +#include "paddle/fluid/platform/device/gpu/nccl_helper.h" #elif defined(PADDLE_WITH_XPU_BKCL) #include "paddle/fluid/platform/device/xpu/bkcl_helper.h" #endif diff --git a/paddle/fluid/framework/details/broadcast_op_handle_test.h b/paddle/fluid/framework/details/broadcast_op_handle_test.h index 6ca4baa6d8b0402c4421cd3063205d419dc846a9..2e82fe22dba73149e722958d6027ee6ba52f12d8 100644 --- a/paddle/fluid/framework/details/broadcast_op_handle_test.h +++ b/paddle/fluid/framework/details/broadcast_op_handle_test.h @@ -95,7 +95,7 @@ struct TestBroadcastOpHandle { #endif } else if (use_device_ == p::kCUDA) { #if defined(PADDLE_WITH_NCCL) || defined(PADDLE_WITH_RCCL) - int count = p::GetCUDADeviceCount(); + int count = p::GetGPUDeviceCount(); if (count <= 1) { LOG(WARNING) << "Cannot test multi-gpu Broadcast, because the CUDA " "device count is " diff --git a/paddle/fluid/framework/details/build_strategy.h b/paddle/fluid/framework/details/build_strategy.h index 68c5daaac5d78049b7d0778edc2b22d652ceb65d..f9c28cbee50c3e4d7ba75d9e8e5fc4dc785f2157 100644 --- a/paddle/fluid/framework/details/build_strategy.h +++ b/paddle/fluid/framework/details/build_strategy.h @@ -40,7 +40,7 @@ class NCCLCommunicator; } // namespace paddle #if defined(PADDLE_WITH_NCCL) || defined(PADDLE_WITH_RCCL) -#include "paddle/fluid/platform/nccl_helper.h" +#include "paddle/fluid/platform/device/gpu/nccl_helper.h" #elif defined(PADDLE_WITH_XPU) && defined(PADDLE_WITH_XPU_BKCL) #include "paddle/fluid/platform/device/xpu/bkcl_helper.h" #endif diff --git a/paddle/fluid/framework/details/eager_deletion_op_handle.cc b/paddle/fluid/framework/details/eager_deletion_op_handle.cc index 07f7bbdb97a8d487aa6e7701808c1e308d527637..bcdd6129230b01bdf357894498f1a79e20c7236d 100644 --- a/paddle/fluid/framework/details/eager_deletion_op_handle.cc +++ b/paddle/fluid/framework/details/eager_deletion_op_handle.cc @@ -49,10 +49,10 @@ EagerDeletionOpHandle::EagerDeletionOpHandle( platform::CUDADeviceGuard guard( BOOST_GET_CONST(platform::CUDAPlace, place).device); #ifdef PADDLE_WITH_HIP - PADDLE_ENFORCE_CUDA_SUCCESS( + PADDLE_ENFORCE_GPU_SUCCESS( hipEventCreateWithFlags(&event_, hipEventDisableTiming)); #else - PADDLE_ENFORCE_CUDA_SUCCESS( + PADDLE_ENFORCE_GPU_SUCCESS( cudaEventCreateWithFlags(&event_, cudaEventDisableTiming)); #endif PADDLE_ENFORCE_NOT_NULL(event_, platform::errors::InvalidArgument( @@ -75,9 +75,9 @@ EagerDeletionOpHandle::~EagerDeletionOpHandle() { auto gpu_place = BOOST_GET_CONST(platform::CUDAPlace, dev_ctx_->GetPlace()); platform::CUDADeviceGuard guard(gpu_place.device); #ifdef PADDLE_WITH_HIP - PADDLE_ENFORCE_CUDA_SUCCESS(hipEventDestroy(event_)); + PADDLE_ENFORCE_GPU_SUCCESS(hipEventDestroy(event_)); #else - PADDLE_ENFORCE_CUDA_SUCCESS(cudaEventDestroy(event_)); + PADDLE_ENFORCE_GPU_SUCCESS(cudaEventDestroy(event_)); #endif } #endif @@ -160,12 +160,12 @@ void EagerDeletionOpHandle::ClearGarbages( reinterpret_cast(gc_)->stream(); auto callback_func = [=]() { #ifdef PADDLE_WITH_HIP - PADDLE_ENFORCE_CUDA_SUCCESS(hipEventRecord(event_, compute_stream)); - PADDLE_ENFORCE_CUDA_SUCCESS( + PADDLE_ENFORCE_GPU_SUCCESS(hipEventRecord(event_, compute_stream)); + PADDLE_ENFORCE_GPU_SUCCESS( hipStreamWaitEvent(callback_stream, event_, 0)); #else - PADDLE_ENFORCE_CUDA_SUCCESS(cudaEventRecord(event_, compute_stream)); - PADDLE_ENFORCE_CUDA_SUCCESS( + PADDLE_ENFORCE_GPU_SUCCESS(cudaEventRecord(event_, compute_stream)); + PADDLE_ENFORCE_GPU_SUCCESS( cudaStreamWaitEvent(callback_stream, event_, 0)); #endif }; diff --git a/paddle/fluid/framework/details/fused_all_reduce_op_handle.cc b/paddle/fluid/framework/details/fused_all_reduce_op_handle.cc index 94507140a81d614172502135183e9ca4df6a77a6..bd153f24fa318a8cf81de21423fd2581e75f4b31 100644 --- a/paddle/fluid/framework/details/fused_all_reduce_op_handle.cc +++ b/paddle/fluid/framework/details/fused_all_reduce_op_handle.cc @@ -55,9 +55,9 @@ FusedAllReduceOpHandle::~FusedAllReduceOpHandle() { auto destroy_event = [](gpuEvent_t event) { if (event == nullptr) return; #ifdef PADDLE_WITH_HIP - PADDLE_ENFORCE_CUDA_SUCCESS(hipEventDestroy(event)); + PADDLE_ENFORCE_GPU_SUCCESS(hipEventDestroy(event)); #else - PADDLE_ENFORCE_CUDA_SUCCESS(cudaEventDestroy(event)); + PADDLE_ENFORCE_GPU_SUCCESS(cudaEventDestroy(event)); #endif }; destroy_event(start_event_); @@ -87,10 +87,10 @@ void FusedAllReduceOpHandle::RunImpl() { auto create_event = [](gpuEvent_t *event) { if (*event) return; #ifdef PADDLE_WITH_HIP - PADDLE_ENFORCE_CUDA_SUCCESS( + PADDLE_ENFORCE_GPU_SUCCESS( hipEventCreateWithFlags(event, hipEventDisableTiming)); #else - PADDLE_ENFORCE_CUDA_SUCCESS( + PADDLE_ENFORCE_GPU_SUCCESS( cudaEventCreateWithFlags(event, cudaEventDisableTiming)); #endif }; @@ -109,12 +109,12 @@ void FusedAllReduceOpHandle::RunImpl() { auto &nccl_ctx = flat_nccl_ctxs->at(gpu_place.device); nccl_stream = nccl_ctx.stream(); #ifdef PADDLE_WITH_HIP - PADDLE_ENFORCE_CUDA_SUCCESS(hipEventRecord(start_event_, compute_stream)); - PADDLE_ENFORCE_CUDA_SUCCESS( + PADDLE_ENFORCE_GPU_SUCCESS(hipEventRecord(start_event_, compute_stream)); + PADDLE_ENFORCE_GPU_SUCCESS( hipStreamWaitEvent(nccl_stream, start_event_, 0)); #else - PADDLE_ENFORCE_CUDA_SUCCESS(cudaEventRecord(start_event_, compute_stream)); - PADDLE_ENFORCE_CUDA_SUCCESS( + PADDLE_ENFORCE_GPU_SUCCESS(cudaEventRecord(start_event_, compute_stream)); + PADDLE_ENFORCE_GPU_SUCCESS( cudaStreamWaitEvent(nccl_stream, start_event_, 0)); #endif } else { @@ -169,12 +169,12 @@ void FusedAllReduceOpHandle::RunImpl() { #if defined(PADDLE_WITH_NCCL) || defined(PADDLE_WITH_RCCL) if (FLAGS_allreduce_record_one_event) { #ifdef PADDLE_WITH_HIP - PADDLE_ENFORCE_CUDA_SUCCESS(hipEventRecord(end_event_, nccl_stream)); - PADDLE_ENFORCE_CUDA_SUCCESS( + PADDLE_ENFORCE_GPU_SUCCESS(hipEventRecord(end_event_, nccl_stream)); + PADDLE_ENFORCE_GPU_SUCCESS( hipStreamWaitEvent(compute_stream, end_event_, 0)); #else - PADDLE_ENFORCE_CUDA_SUCCESS(cudaEventRecord(end_event_, nccl_stream)); - PADDLE_ENFORCE_CUDA_SUCCESS( + PADDLE_ENFORCE_GPU_SUCCESS(cudaEventRecord(end_event_, nccl_stream)); + PADDLE_ENFORCE_GPU_SUCCESS( cudaStreamWaitEvent(compute_stream, end_event_, 0)); #endif } diff --git a/paddle/fluid/framework/details/fused_all_reduce_op_handle.h b/paddle/fluid/framework/details/fused_all_reduce_op_handle.h index 31336b92c4dfb6351a8d2cb05304ad4a29efecff..d522981c77fa1aa30b0e9520537243b8f9135341 100644 --- a/paddle/fluid/framework/details/fused_all_reduce_op_handle.h +++ b/paddle/fluid/framework/details/fused_all_reduce_op_handle.h @@ -35,7 +35,7 @@ class NCCLCommunicator; } // namespace paddle #if defined(PADDLE_WITH_NCCL) || defined(PADDLE_WITH_RCCL) #include "paddle/fluid/framework/details/nccl_op_handle.h" -#include "paddle/fluid/platform/nccl_helper.h" +#include "paddle/fluid/platform/device/gpu/nccl_helper.h" #elif defined(PADDLE_WITH_XPU_BKCL) #include "paddle/fluid/platform/device/xpu/bkcl_helper.h" #endif diff --git a/paddle/fluid/framework/details/fused_broadcast_op_handle.h b/paddle/fluid/framework/details/fused_broadcast_op_handle.h index 2fd1e0e7e98894788b1e890ccf50b20989813cf8..e08a768f8ce0740c91e0aa12af9126b33383fb40 100644 --- a/paddle/fluid/framework/details/fused_broadcast_op_handle.h +++ b/paddle/fluid/framework/details/fused_broadcast_op_handle.h @@ -37,7 +37,7 @@ struct NCCLContextMap; } // namespace paddle #if defined(PADDLE_WITH_NCCL) || defined(PADDLE_WITH_RCCL) -#include "paddle/fluid/platform/nccl_helper.h" +#include "paddle/fluid/platform/device/gpu/nccl_helper.h" #endif namespace paddle { diff --git a/paddle/fluid/framework/details/gather_op_handle_test.cc b/paddle/fluid/framework/details/gather_op_handle_test.cc index 98c37ca3c406a556acd9f39fd3e019270ff6626c..38e20127f1612e74bd4dc6117680a3df8cc8244f 100644 --- a/paddle/fluid/framework/details/gather_op_handle_test.cc +++ b/paddle/fluid/framework/details/gather_op_handle_test.cc @@ -48,7 +48,7 @@ struct TestGatherOpHandle { void InitCtxOnGpu(bool use_gpu) { if (use_gpu) { #if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP) - int count = p::GetCUDADeviceCount(); + int count = p::GetGPUDeviceCount(); if (count <= 1) { LOG(WARNING) << "Cannot test multi-gpu Broadcast, because the CUDA " "device count is " diff --git a/paddle/fluid/framework/details/grad_merge_all_reduce_op_handle.h b/paddle/fluid/framework/details/grad_merge_all_reduce_op_handle.h index c59f61347303d4b96efb541122987382864bdcdd..9cfc3ada6ac3d73b2e505b2513af4bb7a0195b99 100644 --- a/paddle/fluid/framework/details/grad_merge_all_reduce_op_handle.h +++ b/paddle/fluid/framework/details/grad_merge_all_reduce_op_handle.h @@ -35,7 +35,7 @@ class NCCLCommunicator; } // namespace paddle #if defined(PADDLE_WITH_NCCL) || defined(PADDLE_WITH_RCCL) #include "paddle/fluid/framework/details/nccl_op_handle.h" -#include "paddle/fluid/platform/nccl_helper.h" +#include "paddle/fluid/platform/device/gpu/nccl_helper.h" #endif namespace paddle { diff --git a/paddle/fluid/framework/details/nan_inf_utils_detail.cu b/paddle/fluid/framework/details/nan_inf_utils_detail.cu index a9ea336e42545720df3f7226dac51531b26ebfff..82557076544163fc327d25526d3d4c34226786ad 100644 --- a/paddle/fluid/framework/details/nan_inf_utils_detail.cu +++ b/paddle/fluid/framework/details/nan_inf_utils_detail.cu @@ -40,7 +40,7 @@ static std::vector& multi_op_var2gpu_str_mutex() { } static void InitMultiGPUOpVarMap() { - int dev_count = platform::GetCUDADeviceCount(); + int dev_count = platform::GetGPUDeviceCount(); PADDLE_ENFORCE_GT(dev_count, 0, platform::errors::NotFound( "cuda device must > 0, now dev_count=%d", dev_count)); @@ -161,11 +161,11 @@ void TensorCheckerVisitor::apply( op_var)); #ifdef __HIPCC__ - PADDLE_ENFORCE_CUDA_SUCCESS( + PADDLE_ENFORCE_GPU_SUCCESS( hipMemcpyAsync(gpu_str_ptr, iter->first.c_str(), op_var.length() + 1, hipMemcpyHostToDevice, dev_ctx->stream())); #else - PADDLE_ENFORCE_CUDA_SUCCESS( + PADDLE_ENFORCE_GPU_SUCCESS( cudaMemcpyAsync(gpu_str_ptr, iter->first.c_str(), op_var.length() + 1, cudaMemcpyHostToDevice, dev_ctx->stream())); #endif diff --git a/paddle/fluid/framework/details/nccl_op_handle.h b/paddle/fluid/framework/details/nccl_op_handle.h index 762f4071b5cabdac75923b5cec7c381ab94aab02..324d39ed8bb77a54897fc31976e6fde6dcdc623a 100644 --- a/paddle/fluid/framework/details/nccl_op_handle.h +++ b/paddle/fluid/framework/details/nccl_op_handle.h @@ -27,7 +27,7 @@ #ifdef PADDLE_WITH_HIP #include "paddle/fluid/platform/dynload/rccl.h" #endif -#include "paddle/fluid/platform/nccl_helper.h" +#include "paddle/fluid/platform/device/gpu/nccl_helper.h" DECLARE_bool(sync_nccl_allreduce); @@ -52,16 +52,16 @@ class NCCLOpHandleBase : public OpHandleBase { virtual ~NCCLOpHandleBase() { for (auto& ev : inter_events_) { #ifdef PADDLE_WITH_HIP - PADDLE_ENFORCE_CUDA_SUCCESS(hipEventDestroy(ev.second)); + PADDLE_ENFORCE_GPU_SUCCESS(hipEventDestroy(ev.second)); #else - PADDLE_ENFORCE_CUDA_SUCCESS(cudaEventDestroy(ev.second)); + PADDLE_ENFORCE_GPU_SUCCESS(cudaEventDestroy(ev.second)); #endif } for (auto& ev : exter_events_) { #ifdef PADDLE_WITH_HIP - PADDLE_ENFORCE_CUDA_SUCCESS(hipEventDestroy(ev.second)); + PADDLE_ENFORCE_GPU_SUCCESS(hipEventDestroy(ev.second)); #else - PADDLE_ENFORCE_CUDA_SUCCESS(cudaEventDestroy(ev.second)); + PADDLE_ENFORCE_GPU_SUCCESS(cudaEventDestroy(ev.second)); #endif } } @@ -109,14 +109,14 @@ class NCCLOpHandleBase : public OpHandleBase { platform::SetDeviceId(dev_id); #ifdef PADDLE_WITH_HIP - PADDLE_ENFORCE_CUDA_SUCCESS(hipEventCreateWithFlags( + PADDLE_ENFORCE_GPU_SUCCESS(hipEventCreateWithFlags( &inter_events_[dev_id], hipEventDisableTiming)); - PADDLE_ENFORCE_CUDA_SUCCESS(hipEventCreateWithFlags( + PADDLE_ENFORCE_GPU_SUCCESS(hipEventCreateWithFlags( &exter_events_[dev_id], hipEventDisableTiming)); #else - PADDLE_ENFORCE_CUDA_SUCCESS(cudaEventCreateWithFlags( + PADDLE_ENFORCE_GPU_SUCCESS(cudaEventCreateWithFlags( &inter_events_[dev_id], cudaEventDisableTiming)); - PADDLE_ENFORCE_CUDA_SUCCESS(cudaEventCreateWithFlags( + PADDLE_ENFORCE_GPU_SUCCESS(cudaEventCreateWithFlags( &exter_events_[dev_id], cudaEventDisableTiming)); #endif VLOG(10) << "Create events on dev_id:" << dev_id @@ -142,7 +142,7 @@ class NCCLOpHandleBase : public OpHandleBase { << ", dev_id:" << dev_id << ", dtype:" << datatype << ", place:" << place; - PADDLE_ENFORCE_CUDA_SUCCESS(platform::dynload::ncclAllReduce( + PADDLE_ENFORCE_GPU_SUCCESS(platform::dynload::ncclAllReduce( sendbuff, recvbuff, count, datatype, op, comm, stream)); } @@ -192,7 +192,7 @@ class NCCLOpHandleBase : public OpHandleBase { << ", dtype:" << datatype << ", place:" << place << ", stream:" << stream; - PADDLE_ENFORCE_CUDA_SUCCESS(platform::dynload::ncclReduce( + PADDLE_ENFORCE_GPU_SUCCESS(platform::dynload::ncclReduce( sendbuff, recvbuff, count, datatype, ncclSum, 0, comm, stream)); #ifdef PADDLE_WITH_HIP @@ -202,11 +202,7 @@ class NCCLOpHandleBase : public OpHandleBase { #endif if (FLAGS_sync_nccl_allreduce) { -#ifdef PADDLE_WITH_HIP - PADDLE_ENFORCE_CUDA_SUCCESS(hipStreamSynchronize(stream)); -#else - PADDLE_ENFORCE_CUDA_SUCCESS(cudaStreamSynchronize(stream)); -#endif + platform::GpuStreamSync(stream); } } @@ -230,26 +226,21 @@ class NCCLOpHandleBase : public OpHandleBase { #ifdef PADDLE_WITH_HIP hipStreamWaitEvent(stream, inter_events_.at(dev_id), 0); - PADDLE_ENFORCE_CUDA_SUCCESS(platform::dynload::ncclAllReduce( + PADDLE_ENFORCE_GPU_SUCCESS(platform::dynload::ncclAllReduce( sendbuff, recvbuff, count, datatype, op, comm, stream)); hipEventRecord(exter_events_.at(dev_id), stream); - - if (FLAGS_sync_nccl_allreduce) { - PADDLE_ENFORCE_CUDA_SUCCESS(hipStreamSynchronize(stream)); - } #else cudaStreamWaitEvent(stream, inter_events_.at(dev_id), 0); - PADDLE_ENFORCE_CUDA_SUCCESS(platform::dynload::ncclAllReduce( + PADDLE_ENFORCE_GPU_SUCCESS(platform::dynload::ncclAllReduce( sendbuff, recvbuff, count, datatype, op, comm, stream)); cudaEventRecord(exter_events_.at(dev_id), stream); - +#endif if (FLAGS_sync_nccl_allreduce) { - PADDLE_ENFORCE_CUDA_SUCCESS(cudaStreamSynchronize(stream)); + platform::GpuStreamSync(stream); } -#endif } void InterBroadCast(platform::Place place, void* sendbuff, size_t count, @@ -269,7 +260,7 @@ class NCCLOpHandleBase : public OpHandleBase { #else cudaStreamWaitEvent(stream, exter_events_.at(dev_id), 0); #endif - PADDLE_ENFORCE_CUDA_SUCCESS(platform::dynload::ncclBcast( + PADDLE_ENFORCE_GPU_SUCCESS(platform::dynload::ncclBcast( sendbuff, count, datatype, 0, comm, stream)); } diff --git a/paddle/fluid/framework/details/op_handle_base.cc b/paddle/fluid/framework/details/op_handle_base.cc index 4b5d0563d73946af194fe0a10106170045284a26..25b5eefc05cda306bb040dda6a9ad2fd478a23a0 100644 --- a/paddle/fluid/framework/details/op_handle_base.cc +++ b/paddle/fluid/framework/details/op_handle_base.cc @@ -35,9 +35,9 @@ OpHandleBase::~OpHandleBase() PADDLE_MAY_THROW { for (auto &ev : events_) { if (ev.second) { #ifdef PADDLE_WITH_HIP - PADDLE_ENFORCE_CUDA_SUCCESS(hipEventDestroy(ev.second)); + PADDLE_ENFORCE_GPU_SUCCESS(hipEventDestroy(ev.second)); #else - PADDLE_ENFORCE_CUDA_SUCCESS(cudaEventDestroy(ev.second)); + PADDLE_ENFORCE_GPU_SUCCESS(cudaEventDestroy(ev.second)); #endif } } @@ -50,10 +50,10 @@ void OpHandleBase::InitCUDA() { int dev_id = BOOST_GET_CONST(platform::CUDAPlace, p.first).device; platform::SetDeviceId(dev_id); #ifdef PADDLE_WITH_HIP - PADDLE_ENFORCE_CUDA_SUCCESS( + PADDLE_ENFORCE_GPU_SUCCESS( hipEventCreateWithFlags(&events_[dev_id], hipEventDisableTiming)); #else - PADDLE_ENFORCE_CUDA_SUCCESS( + PADDLE_ENFORCE_GPU_SUCCESS( cudaEventCreateWithFlags(&events_[dev_id], cudaEventDisableTiming)); #endif } @@ -182,9 +182,9 @@ void OpHandleBase::RecordWaitEventOnCtx(platform::DeviceContext *waited_ctx) { static_cast(waited_ctx)->stream(); for (auto &ev : events_) { #ifdef PADDLE_WITH_HIP - PADDLE_ENFORCE_CUDA_SUCCESS(hipStreamWaitEvent(stream, ev.second, 0)); + PADDLE_ENFORCE_GPU_SUCCESS(hipStreamWaitEvent(stream, ev.second, 0)); #else - PADDLE_ENFORCE_CUDA_SUCCESS(cudaStreamWaitEvent(stream, ev.second, 0)); + PADDLE_ENFORCE_GPU_SUCCESS(cudaStreamWaitEvent(stream, ev.second, 0)); #endif } } @@ -221,10 +221,10 @@ void OpHandleBase::WaitInputVarGenerated(bool wait_for_feed) { static_cast(dev_ctxes_.at(place)) ->stream(); #ifdef PADDLE_WITH_HIP - PADDLE_ENFORCE_CUDA_SUCCESS( + PADDLE_ENFORCE_GPU_SUCCESS( hipStreamWaitEvent(stream, in_var_handle->GetEvent(), 0)); #else - PADDLE_ENFORCE_CUDA_SUCCESS( + PADDLE_ENFORCE_GPU_SUCCESS( cudaStreamWaitEvent(stream, in_var_handle->GetEvent(), 0)); #endif #else @@ -250,11 +250,7 @@ void OpHandleBase::WaitInputVarGenerated(bool wait_for_feed) { auto stream = static_cast(pool.Get(place)) ->stream(); -#ifdef PADDLE_WITH_HIP - PADDLE_ENFORCE_CUDA_SUCCESS(hipStreamSynchronize(stream)); -#else - PADDLE_ENFORCE_CUDA_SUCCESS(cudaStreamSynchronize(stream)); -#endif + platform::GpuStreamSync(stream); #else PADDLE_THROW(platform::errors::PreconditionNotMet( "Not compiled with CUDA.")); @@ -279,10 +275,10 @@ void OpHandleBase::WaitInputVarGenerated(const platform::Place &place) { dev_ctxes_.at(in_var_handle->place())) ->stream(); #ifdef PADDLE_WITH_HIP - PADDLE_ENFORCE_CUDA_SUCCESS( + PADDLE_ENFORCE_GPU_SUCCESS( hipStreamWaitEvent(stream, in_var_handle->GetEvent(), 0)); #else - PADDLE_ENFORCE_CUDA_SUCCESS( + PADDLE_ENFORCE_GPU_SUCCESS( cudaStreamWaitEvent(stream, in_var_handle->GetEvent(), 0)); #endif #else @@ -319,10 +315,10 @@ void OpHandleBase::RunAndRecordEvent(const std::function &callback) { auto *cuda_dev_ctx = static_cast(p.second); VLOG(10) << "cudadevicecontext:" << cuda_dev_ctx << ", dev_id:" << dev_id; #ifdef PADDLE_WITH_HIP - PADDLE_ENFORCE_CUDA_SUCCESS( + PADDLE_ENFORCE_GPU_SUCCESS( hipEventRecord(events_.at(dev_id), cuda_dev_ctx->stream())); #else - PADDLE_ENFORCE_CUDA_SUCCESS( + PADDLE_ENFORCE_GPU_SUCCESS( cudaEventRecord(events_.at(dev_id), cuda_dev_ctx->stream())); #endif } diff --git a/paddle/fluid/framework/details/reduce_op_handle.cc b/paddle/fluid/framework/details/reduce_op_handle.cc index a485838a95942538259a50cd6097891419d7f58d..bbc458804a195fc0d1729117eeab7cbfe3f80b61 100644 --- a/paddle/fluid/framework/details/reduce_op_handle.cc +++ b/paddle/fluid/framework/details/reduce_op_handle.cc @@ -193,7 +193,7 @@ void ReduceOpHandle::RunImpl() { size_t numel = static_cast(lod_tensor.numel()); all_reduce_calls.emplace_back( [buffer, recvbuffer, type, numel, root_id, &nccl_ctx] { - PADDLE_ENFORCE_CUDA_SUCCESS(platform::dynload::ncclReduce( + PADDLE_ENFORCE_GPU_SUCCESS(platform::dynload::ncclReduce( buffer, recvbuffer, numel, static_cast(type), ncclSum, root_id, nccl_ctx.comm_, nccl_ctx.stream())); }); diff --git a/paddle/fluid/framework/details/reduce_op_handle.h b/paddle/fluid/framework/details/reduce_op_handle.h index d56b6b3663003c6f4568f6d6619b12af2c981536..4b9f289eaa787ae50c7ce05801c68e160d36f0af 100644 --- a/paddle/fluid/framework/details/reduce_op_handle.h +++ b/paddle/fluid/framework/details/reduce_op_handle.h @@ -41,7 +41,7 @@ struct NCCLContextMap; } // namespace platform } // namespace paddle #if defined(PADDLE_WITH_NCCL) || defined(PADDLE_WITH_RCCL) -#include "paddle/fluid/platform/nccl_helper.h" +#include "paddle/fluid/platform/device/gpu/nccl_helper.h" #elif defined(PADDLE_WITH_XPU_BKCL) #include "paddle/fluid/platform/device/xpu/bkcl_helper.h" #endif diff --git a/paddle/fluid/framework/details/reduce_op_handle_test.cc b/paddle/fluid/framework/details/reduce_op_handle_test.cc index 82f5ea6a66891a1d0702713a9c4ab667ed3cdccd..35dba488454725ddc889f62a1c7511e38bd570ff 100644 --- a/paddle/fluid/framework/details/reduce_op_handle_test.cc +++ b/paddle/fluid/framework/details/reduce_op_handle_test.cc @@ -59,7 +59,7 @@ struct TestReduceOpHandle { use_gpu_ = use_gpu; if (use_gpu) { #if defined(PADDLE_WITH_NCCL) || defined(PADDLE_WITH_RCCL) - int count = p::GetCUDADeviceCount(); + int count = p::GetGPUDeviceCount(); if (count <= 1) { LOG(WARNING) << "Cannot test multi-gpu Broadcast, because the CUDA " "device count is " diff --git a/paddle/fluid/framework/details/sparse_all_reduce_op_handle.cc b/paddle/fluid/framework/details/sparse_all_reduce_op_handle.cc index 37399e5ddc09d9c8237319682e80d352d658411d..d916b9bc262765c63f62e0dd422db1d455fd7a0d 100644 --- a/paddle/fluid/framework/details/sparse_all_reduce_op_handle.cc +++ b/paddle/fluid/framework/details/sparse_all_reduce_op_handle.cc @@ -23,7 +23,7 @@ #include "paddle/fluid/framework/operator.h" #include "paddle/fluid/memory/malloc.h" #include "paddle/fluid/platform/cuda_device_guard.h" -#include "paddle/fluid/platform/gpu_info.h" +#include "paddle/fluid/platform/device/gpu/gpu_info.h" #include "paddle/fluid/platform/profiler.h" DECLARE_bool(sync_nccl_allreduce); @@ -182,7 +182,7 @@ void SparseAllReduceOpHandle::RunImplEncoded() { << ", k:" << k << ", place:" << place << ", dtype:" << dtype; all_gather_calls.emplace_back([=] { - PADDLE_ENFORCE_CUDA_SUCCESS(platform::dynload::ncclAllGather( + PADDLE_ENFORCE_GPU_SUCCESS(platform::dynload::ncclAllGather( in_tensor_buf, gather_buff, 2 * k, static_cast(dtype), comm, stream)); }); diff --git a/paddle/fluid/framework/details/sparse_all_reduce_op_handle.h b/paddle/fluid/framework/details/sparse_all_reduce_op_handle.h index 8bfea0f1ae8b8a63f5a220382f74b2a8e116f5ef..5c3aef71ec40ea68f712dbec332ee5b572eb9574 100644 --- a/paddle/fluid/framework/details/sparse_all_reduce_op_handle.h +++ b/paddle/fluid/framework/details/sparse_all_reduce_op_handle.h @@ -21,7 +21,7 @@ #include "paddle/fluid/framework/details/dgc_const_values.h" #include "paddle/fluid/framework/lod_tensor.h" #include "paddle/fluid/framework/scope.h" -#include "paddle/fluid/platform/nccl_helper.h" +#include "paddle/fluid/platform/device/gpu/nccl_helper.h" namespace paddle { namespace framework { diff --git a/paddle/fluid/framework/device_worker.h b/paddle/fluid/framework/device_worker.h index 600d75db53c7e791b3bf84ca787c90022598f729..15acedf3cf50a8f6aaffca8f9839e38fa8a0aabb 100644 --- a/paddle/fluid/framework/device_worker.h +++ b/paddle/fluid/framework/device_worker.h @@ -54,7 +54,7 @@ class DeviceContext; } // namespace paddle #if defined(PADDLE_WITH_NCCL) || defined(PADDLE_WITH_RCCL) -#include "paddle/fluid/platform/nccl_helper.h" +#include "paddle/fluid/platform/device/gpu/nccl_helper.h" #endif namespace paddle { diff --git a/paddle/fluid/framework/dlpack_tensor_test.cc b/paddle/fluid/framework/dlpack_tensor_test.cc index 4e2d7bb979b617b9ef088b419a6ab48ed3c79f1d..9b8bdebe706ebf578ad2e3dd541fa2888a63faae 100644 --- a/paddle/fluid/framework/dlpack_tensor_test.cc +++ b/paddle/fluid/framework/dlpack_tensor_test.cc @@ -115,7 +115,7 @@ void TestMainLoop() { std::vector places{platform::CPUPlace(), platform::CUDAPlace(0), platform::CUDAPinnedPlace()}; - if (platform::GetCUDADeviceCount() > 1) { + if (platform::GetGPUDeviceCount() > 1) { places.emplace_back(platform::CUDAPlace(1)); } #else diff --git a/paddle/fluid/framework/fleet/ascend_wrapper.h b/paddle/fluid/framework/fleet/ascend_wrapper.h index f749ee8cfa0baa410e3b6c4b67de07bdd30611ab..82ce3b28776f1229dfca43281fffd7ee1268f568 100644 --- a/paddle/fluid/framework/fleet/ascend_wrapper.h +++ b/paddle/fluid/framework/fleet/ascend_wrapper.h @@ -24,7 +24,7 @@ limitations under the License. */ #include "paddle/fluid/framework/data_type.h" #include "paddle/fluid/framework/lod_tensor.h" -#include "paddle/fluid/platform/gpu_info.h" +#include "paddle/fluid/platform/device/gpu/gpu_info.h" #include "paddle/fluid/platform/place.h" #include "paddle/fluid/platform/timer.h" diff --git a/paddle/fluid/framework/fleet/box_wrapper.cc b/paddle/fluid/framework/fleet/box_wrapper.cc index 37fbf47f854ade5f854d206c29eca6c11a89ee85..8564a42165961b8d1bc15e8df21e68f229b29065 100644 --- a/paddle/fluid/framework/fleet/box_wrapper.cc +++ b/paddle/fluid/framework/fleet/box_wrapper.cc @@ -19,7 +19,7 @@ #include #include #include "paddle/fluid/framework/lod_tensor.h" -#include "paddle/fluid/platform/gpu_info.h" +#include "paddle/fluid/platform/device/gpu/gpu_info.h" namespace paddle { namespace framework { diff --git a/paddle/fluid/framework/fleet/box_wrapper.cu b/paddle/fluid/framework/fleet/box_wrapper.cu index c9b5abf7a9befc01a2defe64e346e8f192cad70a..c91d371f5a155942b9baa90c419bd709fdf14f03 100644 --- a/paddle/fluid/framework/fleet/box_wrapper.cu +++ b/paddle/fluid/framework/fleet/box_wrapper.cu @@ -19,7 +19,7 @@ #include #include "paddle/fluid/framework/fleet/box_wrapper.h" #include "paddle/fluid/framework/lod_tensor.h" -#include "paddle/fluid/platform/gpu_info.h" +#include "paddle/fluid/platform/device/gpu/gpu_info.h" namespace paddle { namespace framework { diff --git a/paddle/fluid/framework/fleet/box_wrapper.h b/paddle/fluid/framework/fleet/box_wrapper.h index 645d725871a061165a1138d96bcff8261bca3056..b043edca138a8403da955660734ea37328d79b2f 100644 --- a/paddle/fluid/framework/fleet/box_wrapper.h +++ b/paddle/fluid/framework/fleet/box_wrapper.h @@ -40,7 +40,7 @@ limitations under the License. */ #include "paddle/fluid/framework/data_set.h" #include "paddle/fluid/framework/lod_tensor.h" #include "paddle/fluid/framework/scope.h" -#include "paddle/fluid/platform/gpu_info.h" +#include "paddle/fluid/platform/device/gpu/gpu_info.h" #include "paddle/fluid/platform/place.h" #include "paddle/fluid/platform/timer.h" #include "paddle/fluid/string/string_helper.h" @@ -397,7 +397,7 @@ class BoxWrapper { if (nullptr != s_instance_) { VLOG(3) << "Begin InitializeGPU"; std::vector stream_list; - for (int i = 0; i < platform::GetCUDADeviceCount(); ++i) { + for (int i = 0; i < platform::GetGPUDeviceCount(); ++i) { VLOG(3) << "before get context i[" << i << "]"; platform::CUDADeviceContext* context = dynamic_cast( @@ -416,7 +416,7 @@ class BoxWrapper { slot_name_omited_in_feedpass_.insert(slot_name); } slot_vector_ = slot_vector; - keys_tensor.resize(platform::GetCUDADeviceCount()); + keys_tensor.resize(platform::GetGPUDeviceCount()); } } diff --git a/paddle/fluid/framework/fleet/fleet_wrapper.cc b/paddle/fluid/framework/fleet/fleet_wrapper.cc index 66c043e137a247a68c8c69a88dd12112ee4786bb..225c2656fbfd1d544ab908086d4766424d199b4e 100644 --- a/paddle/fluid/framework/fleet/fleet_wrapper.cc +++ b/paddle/fluid/framework/fleet/fleet_wrapper.cc @@ -740,10 +740,10 @@ void FleetWrapper::PushDenseVarsAsync( BOOST_GET_CONST(platform::CUDAPlace, place), g_data, sizeof(float) * count, stream); #ifdef PADDLE_WITH_HIP - PADDLE_ENFORCE_CUDA_SUCCESS(hipEventRecord(event, stream)); + PADDLE_ENFORCE_GPU_SUCCESS(hipEventRecord(event, stream)); hipEventSynchronize(event); #else - PADDLE_ENFORCE_CUDA_SUCCESS(cudaEventRecord(event, stream)); + PADDLE_ENFORCE_GPU_SUCCESS(cudaEventRecord(event, stream)); cudaEventSynchronize(event); #endif diff --git a/paddle/fluid/framework/fleet/fleet_wrapper.h b/paddle/fluid/framework/fleet/fleet_wrapper.h index 6fddedccf025858de0b9235d1b86152bf267f37c..deb2b90c9335329e3c9b5a93937689d79fc5c5a6 100644 --- a/paddle/fluid/framework/fleet/fleet_wrapper.h +++ b/paddle/fluid/framework/fleet/fleet_wrapper.h @@ -35,7 +35,7 @@ limitations under the License. */ #include "paddle/fluid/framework/variable_helper.h" #include "paddle/fluid/platform/macros.h" // for DISABLE_COPY_AND_ASSIGN #ifdef PADDLE_WITH_HETERPS -#include "paddle/fluid/platform/type_defs.h" +#include "paddle/fluid/platform/device/gpu/gpu_types.h" #endif namespace paddle { diff --git a/paddle/fluid/framework/fleet/heter_ps/hashtable.h b/paddle/fluid/framework/fleet/heter_ps/hashtable.h index 646a2e97d319fbd672e1b044fd172a013cbd46b2..e7f098320c6c754481c638cd7c5fc0ad461e0990 100644 --- a/paddle/fluid/framework/fleet/heter_ps/hashtable.h +++ b/paddle/fluid/framework/fleet/heter_ps/hashtable.h @@ -28,7 +28,7 @@ limitations under the License. */ // #include "cudf/concurrent_unordered_map.cuh.h" #include "paddle/fluid/framework/fleet/heter_ps/cudf/concurrent_unordered_map.cuh.h" #ifdef PADDLE_WITH_HETERPS -#include "paddle/fluid/platform/type_defs.h" +#include "paddle/fluid/platform/device/gpu/gpu_types.h" namespace paddle { namespace framework { diff --git a/paddle/fluid/framework/fleet/heter_ps/heter_comm_inl.h b/paddle/fluid/framework/fleet/heter_ps/heter_comm_inl.h index ec852ec83ca09c29df3c7f00266f9dc945541714..c293b07e8995c12be16420ddbdc5bb456d28fa0f 100644 --- a/paddle/fluid/framework/fleet/heter_ps/heter_comm_inl.h +++ b/paddle/fluid/framework/fleet/heter_ps/heter_comm_inl.h @@ -347,7 +347,7 @@ void HeterComm::build_ps(int num, KeyType* h_keys, gpuStream_t streams[stream_num]; for (int i = 0; i < stream_num; ++i) { - PADDLE_ENFORCE_CUDA_SUCCESS(cudaStreamCreate(&(streams[i]))); + PADDLE_ENFORCE_GPU_SUCCESS(cudaStreamCreate(&(streams[i]))); auto d_k_buf = memory::AllocShared(place, chunk_size * sizeof(KeyType)); auto d_v_buf = memory::AllocShared(place, chunk_size * sizeof(ValType)); d_key_bufs.push_back(d_k_buf); @@ -360,11 +360,11 @@ void HeterComm::build_ps(int num, KeyType* h_keys, while (cur_len < len) { cur_stream = cur_stream % stream_num; int tmp_len = cur_len + chunk_size > len ? len - cur_len : chunk_size; - PADDLE_ENFORCE_CUDA_SUCCESS( + PADDLE_ENFORCE_GPU_SUCCESS( cudaMemcpyAsync(d_key_bufs[cur_stream]->ptr(), h_keys + cur_len, sizeof(KeyType) * tmp_len, cudaMemcpyHostToDevice, streams[cur_stream])); - PADDLE_ENFORCE_CUDA_SUCCESS( + PADDLE_ENFORCE_GPU_SUCCESS( cudaMemcpyAsync(d_val_bufs[cur_stream]->ptr(), h_vals + cur_len, sizeof(ValType) * tmp_len, cudaMemcpyHostToDevice, streams[cur_stream])); @@ -378,7 +378,7 @@ void HeterComm::build_ps(int num, KeyType* h_keys, for (int i = 0; i < stream_num; ++i) { cudaStreamSynchronize(streams[i]); - PADDLE_ENFORCE_CUDA_SUCCESS(cudaStreamDestroy(streams[i])); + PADDLE_ENFORCE_GPU_SUCCESS(cudaStreamDestroy(streams[i])); } } @@ -402,14 +402,14 @@ void HeterComm::merge_grad(int gpu_num, GradType* d_merge_grads_ptr = reinterpret_cast(d_merge_grads->ptr()); - PADDLE_ENFORCE_CUDA_SUCCESS(cub::DeviceRadixSort::SortPairs( + PADDLE_ENFORCE_GPU_SUCCESS(cub::DeviceRadixSort::SortPairs( NULL, temp_storage_bytes, d_keys, d_merge_keys_ptr, d_grads, d_merge_grads_ptr, len, 0, 8 * sizeof(KeyType), stream, false)); void* d_buff = NULL; auto d_temp_storage = memory::AllocShared(place, temp_storage_bytes); - PADDLE_ENFORCE_CUDA_SUCCESS(cub::DeviceRadixSort::SortPairs( + PADDLE_ENFORCE_GPU_SUCCESS(cub::DeviceRadixSort::SortPairs( d_temp_storage->ptr(), temp_storage_bytes, d_keys, d_merge_keys_ptr, d_grads, d_merge_grads_ptr, len, 0, 8 * sizeof(KeyType), stream, false)); temp_storage_bytes = 0; @@ -417,7 +417,7 @@ void HeterComm::merge_grad(int gpu_num, auto d_num_runs_out_mem = memory::AllocShared(place, sizeof(int)); int* d_num_runs_out = reinterpret_cast(d_num_runs_out_mem->ptr()); - PADDLE_ENFORCE_CUDA_SUCCESS(cub::DeviceReduce::ReduceByKey( + PADDLE_ENFORCE_GPU_SUCCESS(cub::DeviceReduce::ReduceByKey( NULL, temp_storage_bytes, d_merge_keys_ptr, d_keys, d_merge_grads_ptr, d_grads, d_num_runs_out, merger_, len, stream, false)); @@ -426,13 +426,13 @@ void HeterComm::merge_grad(int gpu_num, d_temp_storage = memory::AllocShared(place, temp_storage_bytes); } - PADDLE_ENFORCE_CUDA_SUCCESS(cub::DeviceReduce::ReduceByKey( + PADDLE_ENFORCE_GPU_SUCCESS(cub::DeviceReduce::ReduceByKey( d_temp_storage->ptr(), temp_storage_bytes, d_merge_keys_ptr, d_keys, d_merge_grads_ptr, d_grads, d_num_runs_out, merger_, len, stream, false)); cudaMemcpyAsync(&uniq_len, d_num_runs_out, sizeof(int), cudaMemcpyDeviceToHost, stream); - PADDLE_ENFORCE_CUDA_SUCCESS(cudaStreamSynchronize(stream)); + PADDLE_ENFORCE_GPU_SUCCESS(cudaStreamSynchronize(stream)); } template @@ -461,12 +461,12 @@ void HeterComm::split_input_to_shard( size_t temp_storage_bytes; const int num_bits = 1 + log2i(total_gpu); - PADDLE_ENFORCE_CUDA_SUCCESS(cub::DeviceRadixSort::SortPairs( + PADDLE_ENFORCE_GPU_SUCCESS(cub::DeviceRadixSort::SortPairs( NULL, temp_storage_bytes, d_shard_index_tmp_ptr, d_shard_index_ptr, d_idx_tmp_ptr, d_idx_ptr, len, 0, num_bits, stream)); auto d_temp_storage = memory::AllocShared(place, temp_storage_bytes); - PADDLE_ENFORCE_CUDA_SUCCESS(cub::DeviceRadixSort::SortPairs( + PADDLE_ENFORCE_GPU_SUCCESS(cub::DeviceRadixSort::SortPairs( d_temp_storage->ptr(), temp_storage_bytes, d_shard_index_tmp_ptr, d_shard_index_ptr, d_idx_tmp_ptr, d_idx_ptr, len, 0, num_bits, stream)); calc_shard_offset<<>>(d_shard_index_ptr, @@ -720,12 +720,12 @@ int HeterComm::gather_one_node_grad( cudaMemcpyHostToDevice); // allgather grad len - PADDLE_ENFORCE_CUDA_SUCCESS(platform::dynload::ncclGroupStart()); - PADDLE_ENFORCE_CUDA_SUCCESS(platform::dynload::ncclAllGather( + PADDLE_ENFORCE_GPU_SUCCESS(platform::dynload::ncclGroupStart()); + PADDLE_ENFORCE_GPU_SUCCESS(platform::dynload::ncclAllGather( (const void*)(d_node_len + gpu_num), (void*)d_node_len, 1, ncclInt, nccl_inner_comm, stream)); - PADDLE_ENFORCE_CUDA_SUCCESS(platform::dynload::ncclGroupEnd()); - PADDLE_ENFORCE_CUDA_SUCCESS(cudaStreamSynchronize(stream)); + PADDLE_ENFORCE_GPU_SUCCESS(platform::dynload::ncclGroupEnd()); + PADDLE_ENFORCE_GPU_SUCCESS(cudaStreamSynchronize(stream)); cudaMemcpy(h_node_len, d_node_len, sizeof(int) * total_gpu, cudaMemcpyDeviceToHost); @@ -737,15 +737,15 @@ int HeterComm::gather_one_node_grad( storage.alloc(max_size * total_gpu); // allgather keys and grads - PADDLE_ENFORCE_CUDA_SUCCESS(platform::dynload::ncclGroupStart()); - PADDLE_ENFORCE_CUDA_SUCCESS(platform::dynload::ncclAllGather( + PADDLE_ENFORCE_GPU_SUCCESS(platform::dynload::ncclGroupStart()); + PADDLE_ENFORCE_GPU_SUCCESS(platform::dynload::ncclAllGather( d_keys, storage.all_keys, max_size, ncclUint64, nccl_inner_comm, stream)); - PADDLE_ENFORCE_CUDA_SUCCESS(platform::dynload::ncclAllGather( + PADDLE_ENFORCE_GPU_SUCCESS(platform::dynload::ncclAllGather( d_grads, storage.all_grads, max_size * sizeof(GradType), ncclUint8, nccl_inner_comm, stream)); - PADDLE_ENFORCE_CUDA_SUCCESS(platform::dynload::ncclGroupEnd()); - PADDLE_ENFORCE_CUDA_SUCCESS(cudaStreamSynchronize(stream)); + PADDLE_ENFORCE_GPU_SUCCESS(platform::dynload::ncclGroupEnd()); + PADDLE_ENFORCE_GPU_SUCCESS(cudaStreamSynchronize(stream)); int h_left[total_gpu]; int h_right[total_gpu]; @@ -802,11 +802,11 @@ int HeterComm::gather_multi_node_grad( cudaMemcpy(d_node_len, h_node_len, sizeof(int), cudaMemcpyHostToDevice); // allgather grad len - PADDLE_ENFORCE_CUDA_SUCCESS(platform::dynload::ncclGroupStart()); - PADDLE_ENFORCE_CUDA_SUCCESS(platform::dynload::ncclAllGather( + PADDLE_ENFORCE_GPU_SUCCESS(platform::dynload::ncclGroupStart()); + PADDLE_ENFORCE_GPU_SUCCESS(platform::dynload::ncclAllGather( d_node_len, d_node_len, 1, ncclInt, nccl_inter_comm, stream)); - PADDLE_ENFORCE_CUDA_SUCCESS(platform::dynload::ncclGroupEnd()); - PADDLE_ENFORCE_CUDA_SUCCESS(cudaStreamSynchronize(stream)); + PADDLE_ENFORCE_GPU_SUCCESS(platform::dynload::ncclGroupEnd()); + PADDLE_ENFORCE_GPU_SUCCESS(cudaStreamSynchronize(stream)); cudaMemcpy(h_node_len, d_node_len, sizeof(int) * node_size_, cudaMemcpyDeviceToHost); @@ -818,15 +818,15 @@ int HeterComm::gather_multi_node_grad( storage.alloc(max_size * node_size_); // allgather keys and grads - PADDLE_ENFORCE_CUDA_SUCCESS(platform::dynload::ncclGroupStart()); - PADDLE_ENFORCE_CUDA_SUCCESS(platform::dynload::ncclAllGather( + PADDLE_ENFORCE_GPU_SUCCESS(platform::dynload::ncclGroupStart()); + PADDLE_ENFORCE_GPU_SUCCESS(platform::dynload::ncclAllGather( d_keys, storage.all_keys, max_size, ncclUint64, nccl_inter_comm, stream)); - PADDLE_ENFORCE_CUDA_SUCCESS(platform::dynload::ncclAllGather( + PADDLE_ENFORCE_GPU_SUCCESS(platform::dynload::ncclAllGather( d_grads, storage.all_grads, max_size * sizeof(GradType), ncclUint8, nccl_inter_comm, stream)); - PADDLE_ENFORCE_CUDA_SUCCESS(platform::dynload::ncclGroupEnd()); - PADDLE_ENFORCE_CUDA_SUCCESS(cudaStreamSynchronize(stream)); + PADDLE_ENFORCE_GPU_SUCCESS(platform::dynload::ncclGroupEnd()); + PADDLE_ENFORCE_GPU_SUCCESS(cudaStreamSynchronize(stream)); int merge_num = 0; for (int i = 0; i < node_size_; ++i) { diff --git a/paddle/fluid/framework/fleet/heter_ps/heter_resource.cc b/paddle/fluid/framework/fleet/heter_ps/heter_resource.cc index a369a612d4935d923ee82ee8e402da83beac3c06..ccdb6c5cdd64e478bea611e3ca23345b1fced23d 100644 --- a/paddle/fluid/framework/fleet/heter_ps/heter_resource.cc +++ b/paddle/fluid/framework/fleet/heter_ps/heter_resource.cc @@ -30,11 +30,11 @@ GPUResource::GPUResource(std::vector& dev_ids, int index) { remote_streams_.resize(dev_ids_.size()); for (size_t i = 0; i < dev_ids_.size(); ++i) { - PADDLE_ENFORCE_CUDA_SUCCESS( + PADDLE_ENFORCE_GPU_SUCCESS( cudaStreamCreateWithFlags(&local_streams_[i], cudaStreamNonBlocking)); - PADDLE_ENFORCE_CUDA_SUCCESS( + PADDLE_ENFORCE_GPU_SUCCESS( cudaStreamCreateWithFlags(&comm_streams_[i], cudaStreamNonBlocking)); - PADDLE_ENFORCE_CUDA_SUCCESS( + PADDLE_ENFORCE_GPU_SUCCESS( cudaStreamCreateWithFlags(&remote_streams_[i], cudaStreamNonBlocking)); } } @@ -42,13 +42,13 @@ GPUResource::GPUResource(std::vector& dev_ids, int index) { GPUResource::~GPUResource() { platform::CUDADeviceGuard guard(dev_id_); for (size_t i = 0; i < local_streams_.size(); ++i) { - PADDLE_ENFORCE_CUDA_SUCCESS(cudaStreamDestroy(local_streams_[i])); + PADDLE_ENFORCE_GPU_SUCCESS(cudaStreamDestroy(local_streams_[i])); } for (size_t i = 0; i < comm_streams_.size(); ++i) { - PADDLE_ENFORCE_CUDA_SUCCESS(cudaStreamDestroy(comm_streams_[i])); + PADDLE_ENFORCE_GPU_SUCCESS(cudaStreamDestroy(comm_streams_[i])); } for (size_t i = 0; i < remote_streams_.size(); ++i) { - PADDLE_ENFORCE_CUDA_SUCCESS(cudaStreamDestroy(remote_streams_[i])); + PADDLE_ENFORCE_GPU_SUCCESS(cudaStreamDestroy(remote_streams_[i])); } } @@ -58,7 +58,7 @@ void HeterPsResource::enable_p2p() { for (size_t j = 0; j < dev_ids_.size(); ++j) { if (i != j) { int p2p_flag; - PADDLE_ENFORCE_CUDA_SUCCESS( + PADDLE_ENFORCE_GPU_SUCCESS( cudaDeviceCanAccessPeer(&p2p_flag, dev_ids_[i], dev_ids_[j])); if (p2p_flag == 1) { cudaError_t ret = cudaDeviceEnablePeerAccess(dev_ids_[j], 0); diff --git a/paddle/fluid/framework/fleet/nccl_wrapper.cc b/paddle/fluid/framework/fleet/nccl_wrapper.cc index 3ac95632de6bf63d6420054529df37238cd3c24b..cbd06deeafc75a14a34a17213d003de9977405ce 100644 --- a/paddle/fluid/framework/fleet/nccl_wrapper.cc +++ b/paddle/fluid/framework/fleet/nccl_wrapper.cc @@ -22,7 +22,7 @@ bool NCCLWrapper::is_initialized_ = false; void NCCLWrapper::InitNCCL() { #if defined(PADDLE_WITH_NCCL) || defined(PADDLE_WITH_RCCL) - PADDLE_ENFORCE_CUDA_SUCCESS(platform::dynload::ncclCommInitRank( + PADDLE_ENFORCE_GPU_SUCCESS(platform::dynload::ncclCommInitRank( &(nccl_info_.comm_), nccl_info_.global_ranks_, nccl_info_.nccl_id_, nccl_info_.my_global_rank_)); #endif @@ -38,7 +38,7 @@ void NCCLWrapper::SetNCCLId(const NCCLInfo& nccl_info) { NCCLInfo NCCLWrapper::GetNCCLId() { #if defined(PADDLE_WITH_NCCL) || defined(PADDLE_WITH_RCCL) - PADDLE_ENFORCE_CUDA_SUCCESS( + PADDLE_ENFORCE_GPU_SUCCESS( platform::dynload::ncclGetUniqueId(&(nccl_info_.nccl_id_))); #endif return nccl_info_; @@ -52,9 +52,9 @@ void NCCLWrapper::SetRankInfo(const int local_rank, const int global_rank, nccl_info_.global_ranks_ = ranks; platform::SetDeviceId(local_rank); #ifdef PADDLE_WITH_RCCL - PADDLE_ENFORCE_CUDA_SUCCESS(hipStreamCreate(&(nccl_info_.stream_))); + PADDLE_ENFORCE_GPU_SUCCESS(hipStreamCreate(&(nccl_info_.stream_))); #else - PADDLE_ENFORCE_CUDA_SUCCESS(cudaStreamCreate(&(nccl_info_.stream_))); + PADDLE_ENFORCE_GPU_SUCCESS(cudaStreamCreate(&(nccl_info_.stream_))); #endif #endif return; @@ -67,7 +67,7 @@ void NCCLWrapper::SyncVar(const int root_rank, const Scope& scope, auto var = scope.FindVar(name); LoDTensor* tensor = var->GetMutable(); int32_t total_size = tensor->numel(); - PADDLE_ENFORCE_CUDA_SUCCESS(platform::dynload::ncclBcast( + PADDLE_ENFORCE_GPU_SUCCESS(platform::dynload::ncclBcast( reinterpret_cast(tensor->data()), total_size, ncclFloat, root_rank, nccl_info_.comm_, nccl_info_.stream_)); #ifdef PADDLE_WITH_RCCL diff --git a/paddle/fluid/framework/fleet/ps_gpu_wrapper.cu b/paddle/fluid/framework/fleet/ps_gpu_wrapper.cu index 6519a514ff3b693929feda5ec6ae4c3af0fbaf18..a0954ef0709dc3e5fc5c886b977a3b0e76d97ef3 100644 --- a/paddle/fluid/framework/fleet/ps_gpu_wrapper.cu +++ b/paddle/fluid/framework/fleet/ps_gpu_wrapper.cu @@ -20,7 +20,7 @@ limitations under the License. */ #include "paddle/fluid/framework/fleet/heter_ps/optimizer_conf.h" #include "paddle/fluid/framework/fleet/ps_gpu_wrapper.h" #include "paddle/fluid/framework/lod_tensor.h" -#include "paddle/fluid/platform/gpu_info.h" +#include "paddle/fluid/platform/device/gpu/gpu_info.h" namespace paddle { namespace framework { diff --git a/paddle/fluid/framework/fleet/ps_gpu_wrapper.h b/paddle/fluid/framework/fleet/ps_gpu_wrapper.h index b726a629586e18a58acac4d9995c2fbd4e7a12e5..c163c2de1101919d7793996cf4b9df4d74258cbe 100644 --- a/paddle/fluid/framework/fleet/ps_gpu_wrapper.h +++ b/paddle/fluid/framework/fleet/ps_gpu_wrapper.h @@ -37,8 +37,8 @@ limitations under the License. */ #include "paddle/fluid/framework/scope.h" #include "paddle/fluid/framework/tensor.h" #include "paddle/fluid/framework/variable_helper.h" +#include "paddle/fluid/platform/device/gpu/gpu_info.h" #include "paddle/fluid/platform/dynload/nccl.h" -#include "paddle/fluid/platform/gpu_info.h" #include "paddle/fluid/platform/macros.h" // for DISABLE_COPY_AND_ASSIGN #include "paddle/fluid/platform/place.h" #ifdef PADDLE_WITH_PSCORE @@ -230,7 +230,7 @@ class PSGPUWrapper { ? 1.0 : config["mf_max_bound"]; for (size_t i = 0; i < heter_devices_.size(); i++) { - PADDLE_ENFORCE_CUDA_SUCCESS(cudaSetDevice(heter_devices_[i])); + PADDLE_ENFORCE_GPU_SUCCESS(cudaSetDevice(heter_devices_[i])); this->SetSparseSGD(nonclk_coeff, clk_coeff, min_bound, max_bound, learning_rate, initial_g2sum, initial_range); this->SetEmbedxSGD(mf_create_thresholds, mf_learning_rate, diff --git a/paddle/fluid/framework/garbage_collector.cc b/paddle/fluid/framework/garbage_collector.cc index 9ab6b5d8c178b9272241ce8db600f3ea32276988..8b6a5747dbfced5576926b19e2203cf9a8f3129f 100644 --- a/paddle/fluid/framework/garbage_collector.cc +++ b/paddle/fluid/framework/garbage_collector.cc @@ -83,9 +83,9 @@ StreamGarbageCollector::StreamGarbageCollector(const platform::CUDAPlace &place, : GarbageCollector(place, max_memory_size) { platform::CUDADeviceGuard guard(place.device); #ifdef PADDLE_WITH_HIP - PADDLE_ENFORCE_CUDA_SUCCESS(hipStreamCreate(&stream_)); + PADDLE_ENFORCE_GPU_SUCCESS(hipStreamCreate(&stream_)); #else - PADDLE_ENFORCE_CUDA_SUCCESS(cudaStreamCreate(&stream_)); + PADDLE_ENFORCE_GPU_SUCCESS(cudaStreamCreate(&stream_)); callback_manager_.reset( new platform::StreamCallbackManager(stream_)); #endif @@ -94,13 +94,8 @@ StreamGarbageCollector::StreamGarbageCollector(const platform::CUDAPlace &place, StreamGarbageCollector::~StreamGarbageCollector() { auto place = BOOST_GET_CONST(platform::CUDAPlace, this->dev_ctx_->GetPlace()); platform::CUDADeviceGuard guard(place.device); -#ifdef PADDLE_WITH_HIP - PADDLE_ENFORCE_CUDA_SUCCESS(hipStreamSynchronize(stream_)); - PADDLE_ENFORCE_CUDA_SUCCESS(hipStreamDestroy(stream_)); -#else - PADDLE_ENFORCE_CUDA_SUCCESS(cudaStreamSynchronize(stream_)); - PADDLE_ENFORCE_CUDA_SUCCESS(cudaStreamDestroy(stream_)); -#endif + platform::GpuStreamSync(stream_); + platform::GpuDestroyStream(stream_); } gpuStream_t StreamGarbageCollector::stream() const { return stream_; } diff --git a/paddle/fluid/framework/generator.cc b/paddle/fluid/framework/generator.cc index 154154fc795179cdc37ce7bac8fd9faa2b071bce..a020bda8231670f4940caa62cc57fc2cf3abe757 100644 --- a/paddle/fluid/framework/generator.cc +++ b/paddle/fluid/framework/generator.cc @@ -18,8 +18,8 @@ limitations under the License. */ #include #include +#include "paddle/fluid/platform/device/gpu/gpu_info.h" #include "paddle/fluid/platform/enforce.h" -#include "paddle/fluid/platform/gpu_info.h" namespace paddle { namespace framework { @@ -33,7 +33,7 @@ const std::shared_ptr& GetDefaultCUDAGenerator(int64_t device_id) { static std::vector> default_cuda_generators; std::call_once(num_devices_init_flag, []() { - num_cuda_devices = paddle::platform::GetCUDADeviceCount(); + num_cuda_devices = paddle::platform::GetGPUDeviceCount(); cuda_device_flags.resize(num_cuda_devices); default_cuda_generators.resize(num_cuda_devices); }); diff --git a/paddle/fluid/framework/heterxpu_trainer.cc b/paddle/fluid/framework/heterxpu_trainer.cc index 8049a1c9424bebf271f55c1247f1277a0836d88d..93b7869cc1d25053b4473f16eb2b0d63d4a2f3d0 100644 --- a/paddle/fluid/framework/heterxpu_trainer.cc +++ b/paddle/fluid/framework/heterxpu_trainer.cc @@ -51,11 +51,11 @@ void HeterXpuTrainer::Initialize(const TrainerDesc& trainer_desc, platform::CUDAPlace place = platform::CUDAPlace(num); platform::CUDADeviceGuard guard(place.device); cudaStream_t stream; - PADDLE_ENFORCE_CUDA_SUCCESS(cudaStreamCreate(&stream)); + PADDLE_ENFORCE_GPU_SUCCESS(cudaStreamCreate(&stream)); copy_streams_.push_back(stream); places_.push_back(place); cudaEvent_t event; - PADDLE_ENFORCE_CUDA_SUCCESS( + PADDLE_ENFORCE_GPU_SUCCESS( cudaEventCreateWithFlags(&event, cudaEventDisableTiming)); events_.push_back(event); #endif @@ -104,7 +104,7 @@ void HeterXpuTrainer::Initialize(const TrainerDesc& trainer_desc, // platform::CUDAPlace place = platform::CUDAPlace(num); // platform::CUDADeviceGuard guard(place.device); // cudaStream_t stream; - // PADDLE_ENFORCE_CUDA_SUCCESS(cudaStreamCreate(&stream)); + // PADDLE_ENFORCE_GPU_SUCCESS(cudaStreamCreate(&stream)); // copy_streams_.push_back(stream); // places_.push_back(place); // } @@ -157,7 +157,7 @@ void HeterXpuTrainer::CreateThreadParam(const ProgramDesc& program, int num) { } } #ifdef PADDLE_WITH_CUDA - PADDLE_ENFORCE_CUDA_SUCCESS(cudaEventRecord(event, stream)); + PADDLE_ENFORCE_GPU_SUCCESS(cudaEventRecord(event, stream)); cudaEventSynchronize(event); #endif } @@ -287,7 +287,7 @@ void HeterXpuTrainer::InitOtherEnv(const ProgramDesc& main_program) { #ifdef PADDLE_WITH_CUDA auto dev_id = BOOST_GET_CONST(platform::CUDAPlace, place).device; platform::CUDADeviceGuard guard(dev_id); - PADDLE_ENFORCE_CUDA_SUCCESS( + PADDLE_ENFORCE_GPU_SUCCESS( cudaEventCreateWithFlags(&context->event_, cudaEventDisableTiming)); #endif object_pool_.Push(context); @@ -441,7 +441,7 @@ int HeterXpuTrainer::RunTask(const HeterRequest* request, #ifdef PADDLE_WITH_CUDA auto dev_id = BOOST_GET_CONST(platform::CUDAPlace, place).device; platform::CUDADeviceGuard guard(dev_id); - PADDLE_ENFORCE_CUDA_SUCCESS( + PADDLE_ENFORCE_GPU_SUCCESS( cudaEventCreateWithFlags(&context->event_, cudaEventDisableTiming)); #endif } @@ -461,7 +461,7 @@ int HeterXpuTrainer::RunTask(const HeterRequest* request, #endif } #ifdef PADDLE_WITH_CUDA - PADDLE_ENFORCE_CUDA_SUCCESS( + PADDLE_ENFORCE_GPU_SUCCESS( cudaEventRecord(context->event_, copy_streams_[context->place_num_])); while (cudaEventQuery(context->event_) != cudaSuccess) { VLOG(3) << "wait for kernel"; @@ -481,7 +481,7 @@ int HeterXpuTrainer::RunTask(const HeterRequest* request, #ifdef PADDLE_WITH_CUDA auto* dev_ctx = static_cast( platform::DeviceContextPool::Instance().Get(place)); - PADDLE_ENFORCE_CUDA_SUCCESS( + PADDLE_ENFORCE_GPU_SUCCESS( cudaEventRecord(context->event_, dev_ctx->stream())); // cudaEventSynchronize(context->event_); { diff --git a/paddle/fluid/framework/ir/fuse_bn_act_pass.cc b/paddle/fluid/framework/ir/fuse_bn_act_pass.cc index ae662c64af33193cf4f28c42d304bcd089c14a39..f12273e94dddd51d923b90412677b16ca7a5186d 100644 --- a/paddle/fluid/framework/ir/fuse_bn_act_pass.cc +++ b/paddle/fluid/framework/ir/fuse_bn_act_pass.cc @@ -24,12 +24,8 @@ class Node; } // namespace ir } // namespace framework } // namespace paddle -#ifdef PADDLE_WITH_CUDA -#include "paddle/fluid/platform/cudnn_helper.h" -#endif -#ifdef PADDLE_WITH_HIP -#include "paddle/fluid/platform/miopen_helper.h" -#endif + +#include "paddle/fluid/platform/device/gpu/gpu_dnn.h" namespace paddle { namespace framework { diff --git a/paddle/fluid/framework/ir/fuse_bn_add_act_pass.cc b/paddle/fluid/framework/ir/fuse_bn_add_act_pass.cc index ec014d331fa447f905a43a6dc136b80f72692dcb..005f006ab047886081cde819122e16d186116fd4 100644 --- a/paddle/fluid/framework/ir/fuse_bn_add_act_pass.cc +++ b/paddle/fluid/framework/ir/fuse_bn_add_act_pass.cc @@ -15,13 +15,8 @@ #include "paddle/fluid/framework/ir/fuse_bn_add_act_pass.h" #include #include "paddle/fluid/framework/operator.h" +#include "paddle/fluid/platform/device/gpu/gpu_dnn.h" #include "paddle/fluid/platform/enforce.h" -#ifdef PADDLE_WITH_CUDA -#include "paddle/fluid/platform/cudnn_helper.h" -#endif -#ifdef PADDLE_WITH_HIP -#include "paddle/fluid/platform/miopen_helper.h" -#endif namespace paddle { namespace framework { diff --git a/paddle/fluid/framework/mixed_vector_test.cu b/paddle/fluid/framework/mixed_vector_test.cu index 8fb59d682e40fb80b5054914fb687610015544cd..10e7ed0fb60219c69425442ec4941ac212c7efc5 100644 --- a/paddle/fluid/framework/mixed_vector_test.cu +++ b/paddle/fluid/framework/mixed_vector_test.cu @@ -24,7 +24,7 @@ #include "glog/logging.h" #include "gtest/gtest.h" #include "paddle/fluid/framework/mixed_vector.h" -#include "paddle/fluid/platform/gpu_info.h" +#include "paddle/fluid/platform/device/gpu/gpu_info.h" template using vec = paddle::framework::Vector; @@ -63,7 +63,7 @@ TEST(mixed_vector, GPU_VECTOR) { } TEST(mixed_vector, MultiGPU) { - if (paddle::platform::GetCUDADeviceCount() < 2) { + if (paddle::platform::GetGPUDeviceCount() < 2) { LOG(WARNING) << "Skip mixed_vector.MultiGPU since there are not multiple " "GPUs in your machine."; return; diff --git a/paddle/fluid/framework/new_executor/interpretercore.cc b/paddle/fluid/framework/new_executor/interpretercore.cc index 9f6e0557815062a42ff61a393a603b42abb80f8c..dcbdd12f88fb7ab88daff8d540aacc75f00c3c1c 100644 --- a/paddle/fluid/framework/new_executor/interpretercore.cc +++ b/paddle/fluid/framework/new_executor/interpretercore.cc @@ -398,13 +398,8 @@ void InterpreterCore::RunInstruction(const Instruction& instr_node) { /*For profiling/benchmark only*/ if (FLAGS_benchmark) { instr_node.DeviceContext().Wait(); -#if defined(PADDLE_WITH_CUDA) - PADDLE_ENFORCE_CUDA_SUCCESS(cudaGetLastError()); - VLOG(4) << "Operator(" << op->Type() - << "): context wait and get last error"; -#endif -#if defined(PADDLE_WITH_HIP) - PADDLE_ENFORCE_CUDA_SUCCESS(hipGetLastError()); +#if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP) + PADDLE_ENFORCE_GPU_SUCCESS(platform::GpuGetLastError()); VLOG(4) << "Operator(" << op->Type() << "): context wait and get last error"; #endif diff --git a/paddle/fluid/framework/new_executor/profiler.h b/paddle/fluid/framework/new_executor/profiler.h index 51c9e3d66a6f009a94c1eaac4dc8a68dfe72aad6..8df8db35592bb326640bb95af5a08843d4a1d55e 100644 --- a/paddle/fluid/framework/new_executor/profiler.h +++ b/paddle/fluid/framework/new_executor/profiler.h @@ -15,7 +15,7 @@ #pragma once #include "paddle/fluid/framework/tensor.h" #include "paddle/fluid/framework/variable.h" -#include "paddle/fluid/platform/gpu_info.h" +#include "paddle/fluid/platform/device/gpu/gpu_info.h" #include "paddle/fluid/platform/timer.h" namespace paddle { @@ -45,7 +45,7 @@ class ProfilerGuard { #if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP) auto cuda_place = BOOST_GET_CONST(platform::CUDAPlace, place); cost_info_->device_memory_bytes = - platform::RecordedCudaMallocSize(cuda_place.device); + platform::RecordedGpuMallocSize(cuda_place.device); #endif } } diff --git a/paddle/fluid/framework/operator.cc b/paddle/fluid/framework/operator.cc index d60fdd90e2a2a438a461a2d6fd7462ccfe5576d8..4236fcf8dc134314848a9bc82ad2fbbe921d03b1 100644 --- a/paddle/fluid/framework/operator.cc +++ b/paddle/fluid/framework/operator.cc @@ -1212,14 +1212,10 @@ void OperatorWithKernel::RunImpl(const Scope& scope, /*For profiling/benchmark only*/ if (FLAGS_benchmark) { dev_ctx->Wait(); -#if defined(PADDLE_WITH_CUDA) - PADDLE_ENFORCE_CUDA_SUCCESS(cudaGetLastError()); - VLOG(4) << "Operator(" << Type() << "): context wait and get last error"; +#if defined(PADDLE_WITH_CUDA) || defined(PADLDE_WITH_ROCM) + PADDLE_ENFORCE_GPU_SUCCESS(platform::GpuGetLastError()); #endif -#if defined(PADDLE_WITH_HIP) - PADDLE_ENFORCE_CUDA_SUCCESS(hipGetLastError()); VLOG(4) << "Operator(" << Type() << "): context wait and get last error"; -#endif } if (FLAGS_check_nan_inf) { diff --git a/paddle/fluid/framework/parallel_executor.h b/paddle/fluid/framework/parallel_executor.h index 78774f048963895081384cb53499b49ffc63c37f..18d0ee78ffbbc7b063a1f8fcaf90c825d5965228 100644 --- a/paddle/fluid/framework/parallel_executor.h +++ b/paddle/fluid/framework/parallel_executor.h @@ -34,7 +34,7 @@ limitations under the License. */ #include "paddle/fluid/platform/device_context.h" #if defined(PADDLE_WITH_NCCL) || defined(PADDLE_WITH_RCCL) -#include "paddle/fluid/platform/nccl_helper.h" +#include "paddle/fluid/platform/device/gpu/nccl_helper.h" #endif namespace paddle { diff --git a/paddle/fluid/framework/var_type_traits.cc b/paddle/fluid/framework/var_type_traits.cc index 1d5e638729361da4752a0f4ef3545333f7e825ce..eb8a1e4cea9fbd9cbbd0d5e179a7da85d0239845 100644 --- a/paddle/fluid/framework/var_type_traits.cc +++ b/paddle/fluid/framework/var_type_traits.cc @@ -22,7 +22,7 @@ #ifdef PADDLE_WITH_CUDA #if defined(PADDLE_WITH_NCCL) #include "paddle/fluid/operators/nccl/nccl_gpu_common.h" -#include "paddle/fluid/platform/nccl_helper.h" +#include "paddle/fluid/platform/device/gpu/nccl_helper.h" #endif #include #include "paddle/fluid/operators/conv_cudnn_op_cache.h" @@ -30,8 +30,8 @@ #endif #ifdef PADDLE_WITH_HIP #if defined(PADDLE_WITH_RCCL) -#include "paddle/fluid/operators/nccl/nccl_gpu_common.h" // NOLINT -#include "paddle/fluid/platform/nccl_helper.h" // NOLINT +#include "paddle/fluid/operators/nccl/nccl_gpu_common.h" // NOLINT +#include "paddle/fluid/platform/device/gpu/nccl_helper.h" // NOLINT #endif #include "paddle/fluid/operators/conv_cudnn_op_cache.h" // NOLINT #include "paddle/fluid/operators/miopen_rnn_cache.h" diff --git a/paddle/fluid/framework/var_type_traits_test.cc b/paddle/fluid/framework/var_type_traits_test.cc index ae7ae85207d849b7cb37c64d26dd3485c7da22b9..9a9b90cd811790d513a484acda1e01ccd34e1906 100644 --- a/paddle/fluid/framework/var_type_traits_test.cc +++ b/paddle/fluid/framework/var_type_traits_test.cc @@ -23,15 +23,15 @@ #ifdef PADDLE_WITH_CUDA #if defined(PADDLE_WITH_NCCL) #include "paddle/fluid/operators/nccl/nccl_gpu_common.h" -#include "paddle/fluid/platform/nccl_helper.h" +#include "paddle/fluid/platform/device/gpu/nccl_helper.h" #endif #include "paddle/fluid/operators/conv_cudnn_op_cache.h" #include "paddle/fluid/operators/cudnn_rnn_cache.h" #endif #ifdef PADDLE_WITH_HIP #if defined(PADDLE_WITH_RCCL) -#include "paddle/fluid/operators/nccl/nccl_gpu_common.h" // NOLINT -#include "paddle/fluid/platform/nccl_helper.h" // NOLINT +#include "paddle/fluid/operators/nccl/nccl_gpu_common.h" // NOLINT +#include "paddle/fluid/platform/device/gpu/nccl_helper.h" // NOLINT #endif #include "paddle/fluid/operators/conv_cudnn_op_cache.h" // NOLINT #include "paddle/fluid/operators/miopen_rnn_cache.h" diff --git a/paddle/fluid/imperative/all_reduce.cc b/paddle/fluid/imperative/all_reduce.cc index b922811b4f1045f20fcb92cda2064f7449da9bdb..31da214fbc39abb213b56e6d5cab6fe7a8b2c441 100644 --- a/paddle/fluid/imperative/all_reduce.cc +++ b/paddle/fluid/imperative/all_reduce.cc @@ -28,8 +28,8 @@ #include "paddle/fluid/framework/selected_rows.h" #include "paddle/fluid/framework/variable.h" #include "paddle/fluid/imperative/parallel_context.h" +#include "paddle/fluid/platform/device/gpu/nccl_helper.h" #include "paddle/fluid/platform/device_context.h" -#include "paddle/fluid/platform/nccl_helper.h" #include "paddle/fluid/string/string_helper.h" namespace paddle { @@ -64,7 +64,7 @@ static void AllReduce(const framework::Tensor &src, framework::Tensor *dst, dst->Resize(src.dims()); auto *dst_ptr = dst->mutable_data(src.place(), src.type()); auto nccl_dtype = platform::ToNCCLDataType(src.type()); - PADDLE_ENFORCE_CUDA_SUCCESS(platform::dynload::ncclAllReduce( + PADDLE_ENFORCE_GPU_SUCCESS(platform::dynload::ncclAllReduce( src_ptr, dst_ptr, src.numel(), nccl_dtype, ncclSum, comm->comm(), stream)); } @@ -100,16 +100,12 @@ static void AllReduce(const framework::SelectedRows &src, if (!use_calc_stream) { dev_ctx->Wait(); } - PADDLE_ENFORCE_CUDA_SUCCESS(platform::dynload::ncclAllGather( + PADDLE_ENFORCE_GPU_SUCCESS(platform::dynload::ncclAllGather( gpu_rows_num_ptr + strategy.local_rank_, gpu_rows_num_ptr, 1, ncclInt64, comm->comm(), stream)); if (!use_calc_stream) { -#ifdef PADDLE_WITH_RCCL - PADDLE_ENFORCE_CUDA_SUCCESS(hipStreamSynchronize(stream)); -#else - PADDLE_ENFORCE_CUDA_SUCCESS(cudaStreamSynchronize(stream)); -#endif + platform::GpuStreamSync(stream); } const auto *cpu_rows_num_ptr = rows_num_vector.data(); @@ -146,11 +142,11 @@ static void AllReduce(const framework::SelectedRows &src, // allgather is used to speed up the allreduce by replacing broadcast. auto row_sendcount = cpu_rows_num_ptr[0]; VLOG(3) << "allgather replaces broadcast to speed up in sparse allreduce"; - PADDLE_ENFORCE_CUDA_SUCCESS(platform::dynload::ncclAllGather( + PADDLE_ENFORCE_GPU_SUCCESS(platform::dynload::ncclAllGather( src_rows_ptr, dst_rows_ptr, row_sendcount, ncclInt64, comm->comm(), stream)); auto value_sendcount = cpu_rows_num_ptr[0] * feature_size; - PADDLE_ENFORCE_CUDA_SUCCESS(platform::dynload::ncclAllGather( + PADDLE_ENFORCE_GPU_SUCCESS(platform::dynload::ncclAllGather( src_tensor_ptr, dst_tensor_ptr, value_sendcount, nccl_dtype, comm->comm(), stream)); return; @@ -158,13 +154,13 @@ static void AllReduce(const framework::SelectedRows &src, for (int i = 0; i < strategy.nranks_; ++i) { if (cpu_rows_num_ptr[i] > 0) { // 2. Broadcast the rows of SelectedRows - PADDLE_ENFORCE_CUDA_SUCCESS(platform::dynload::ncclBroadcast( + PADDLE_ENFORCE_GPU_SUCCESS(platform::dynload::ncclBroadcast( src_rows_ptr, dst_rows_ptr + row_offset, cpu_rows_num_ptr[i], ncclInt64, i, comm->comm(), stream)); // 3. Broadcast the tensor data of SelectedRows auto *dst_tensor_ptr_i = reinterpret_cast(dst_tensor_ptr) + row_offset * feature_size * sizeof_dtype; - PADDLE_ENFORCE_CUDA_SUCCESS(platform::dynload::ncclBroadcast( + PADDLE_ENFORCE_GPU_SUCCESS(platform::dynload::ncclBroadcast( src_tensor_ptr, dst_tensor_ptr_i, cpu_rows_num_ptr[i] * feature_size, nccl_dtype, i, comm->comm(), stream)); row_offset += cpu_rows_num_ptr[i]; @@ -209,12 +205,8 @@ void AllReduce(const framework::Variable &src, framework::Variable *dst, AllReduce(src.Get(), tmp_dst.GetMutable(), strategy, stream, comm); -// stream must synchronize to ensure accuracy of the move operation -#ifdef PADDLE_WITH_RCCL - PADDLE_ENFORCE_CUDA_SUCCESS(hipStreamSynchronize(stream)); -#else - PADDLE_ENFORCE_CUDA_SUCCESS(cudaStreamSynchronize(stream)); -#endif + // stream must synchronize to ensure accuracy of the move operation + platform::GpuStreamSync(stream); *dst = std::move(tmp_dst); } #endif diff --git a/paddle/fluid/imperative/nccl_context.cc b/paddle/fluid/imperative/nccl_context.cc index 32becda4edc95a638889d9e29312f1ad3d37a640..0eb06983f409b1924f87141aa1e19bda35ca7558 100644 --- a/paddle/fluid/imperative/nccl_context.cc +++ b/paddle/fluid/imperative/nccl_context.cc @@ -153,11 +153,11 @@ void NCCLParallelContext::WaitCompute(int ring_id) { // compute_stream-->event-->comm_stream #ifdef PADDLE_WITH_HIP - PADDLE_ENFORCE_CUDA_SUCCESS(hipEventRecord(event, compute_stream)); - PADDLE_ENFORCE_CUDA_SUCCESS(hipStreamWaitEvent(comm_stream, event, 0)); + PADDLE_ENFORCE_GPU_SUCCESS(hipEventRecord(event, compute_stream)); + PADDLE_ENFORCE_GPU_SUCCESS(hipStreamWaitEvent(comm_stream, event, 0)); #else - PADDLE_ENFORCE_CUDA_SUCCESS(cudaEventRecord(event, compute_stream)); - PADDLE_ENFORCE_CUDA_SUCCESS(cudaStreamWaitEvent(comm_stream, event, 0)); + PADDLE_ENFORCE_GPU_SUCCESS(cudaEventRecord(event, compute_stream)); + PADDLE_ENFORCE_GPU_SUCCESS(cudaStreamWaitEvent(comm_stream, event, 0)); #endif } @@ -179,11 +179,11 @@ void NCCLParallelContext::WaitComm(int ring_id) { // comm_stream-->event-->compute_stream #ifdef PADDLE_WITH_HIP - PADDLE_ENFORCE_CUDA_SUCCESS(hipEventRecord(event, comm_stream)); - PADDLE_ENFORCE_CUDA_SUCCESS(hipStreamWaitEvent(compute_stream, event, 0)); + PADDLE_ENFORCE_GPU_SUCCESS(hipEventRecord(event, comm_stream)); + PADDLE_ENFORCE_GPU_SUCCESS(hipStreamWaitEvent(compute_stream, event, 0)); #else - PADDLE_ENFORCE_CUDA_SUCCESS(cudaEventRecord(event, comm_stream)); - PADDLE_ENFORCE_CUDA_SUCCESS(cudaStreamWaitEvent(compute_stream, event, 0)); + PADDLE_ENFORCE_GPU_SUCCESS(cudaEventRecord(event, comm_stream)); + PADDLE_ENFORCE_GPU_SUCCESS(cudaStreamWaitEvent(compute_stream, event, 0)); #endif } diff --git a/paddle/fluid/imperative/nccl_context.h b/paddle/fluid/imperative/nccl_context.h index 1eee393aa714bb21ab77ee6668d719b93787981f..1938fa08312f61c83690986d3afb98c125855123 100644 --- a/paddle/fluid/imperative/nccl_context.h +++ b/paddle/fluid/imperative/nccl_context.h @@ -18,7 +18,7 @@ #include #if defined(PADDLE_WITH_NCCL) || defined(PADDLE_WITH_RCCL) -#include "paddle/fluid/platform/cuda_resource_pool.h" +#include "paddle/fluid/platform/device/gpu/gpu_resource_pool.h" #endif #ifdef PADDLE_WITH_NCCL diff --git a/paddle/fluid/imperative/prepared_operator.cc b/paddle/fluid/imperative/prepared_operator.cc index 8e61b7d2eed880d58c6bdd832f9e95ff4bff6d2c..8875ef74bce14eea9d78434f2a0aa174e8bfa092 100644 --- a/paddle/fluid/imperative/prepared_operator.cc +++ b/paddle/fluid/imperative/prepared_operator.cc @@ -24,6 +24,8 @@ #ifdef PADDLE_WITH_XPU #include "paddle/fluid/platform/device/xpu/xpu_op_list.h" #endif +#include "paddle/fluid/platform/device/gpu/gpu_info.h" + DECLARE_bool(check_nan_inf); DECLARE_bool(run_pten_kernel); DECLARE_bool(benchmark); @@ -523,12 +525,8 @@ static void PreparedOpRunPtImpl( if (FLAGS_benchmark) { dev_ctx->Wait(); -#if defined(PADDLE_WITH_CUDA) - PADDLE_ENFORCE_CUDA_SUCCESS(cudaGetLastError()); - VLOG(4) << "Operator(" << op.Type() << "): context wait and get last error"; -#endif -#if defined(PADDLE_WITH_HIP) - PADDLE_ENFORCE_CUDA_SUCCESS(hipGetLastError()); +#if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP) + PADDLE_ENFORCE_GPU_SUCCESS(platform::GpuGetLastError()); VLOG(4) << "Operator(" << op.Type() << "): context wait and get last error"; #endif } diff --git a/paddle/fluid/inference/api/analysis_config.cc b/paddle/fluid/inference/api/analysis_config.cc index ceca7e8146a79077f7dc4d8487a4e63668b21fd8..49c4b8d7372e276de7b0979d8c4b9505f9453c91 100644 --- a/paddle/fluid/inference/api/analysis_config.cc +++ b/paddle/fluid/inference/api/analysis_config.cc @@ -19,8 +19,8 @@ #include "paddle/fluid/inference/api/paddle_pass_builder.h" #include "paddle/fluid/inference/utils/table_printer.h" #include "paddle/fluid/platform/cpu_info.h" +#include "paddle/fluid/platform/device/gpu/gpu_info.h" #include "paddle/fluid/platform/enforce.h" -#include "paddle/fluid/platform/gpu_info.h" #ifdef PADDLE_WITH_TENSORRT #include "paddle/fluid/inference/tensorrt/helper.h" diff --git a/paddle/fluid/inference/api/analysis_predictor.cc b/paddle/fluid/inference/api/analysis_predictor.cc index b1408995fa157bbbf5c52eae928d3226138de2e7..2293b702468532e9782e2a9477c0cb9e5afa6d57 100644 --- a/paddle/fluid/inference/api/analysis_predictor.cc +++ b/paddle/fluid/inference/api/analysis_predictor.cc @@ -41,8 +41,8 @@ #include "paddle/fluid/inference/utils/singleton.h" #include "paddle/fluid/memory/memcpy.h" #include "paddle/fluid/platform/cpu_helper.h" +#include "paddle/fluid/platform/device/gpu/gpu_info.h" #include "paddle/fluid/platform/device_context.h" -#include "paddle/fluid/platform/gpu_info.h" #include "paddle/fluid/platform/place.h" #include "paddle/fluid/platform/profiler.h" #include "paddle/pten/api/ext/op_meta_info.h" diff --git a/paddle/fluid/inference/api/paddle_infer_contrib.cc b/paddle/fluid/inference/api/paddle_infer_contrib.cc index 57b5167337e252178fbccf4afd9d4f8d9a142557..d27f20a93b3a4bfccb6640282dc63855de31a487 100644 --- a/paddle/fluid/inference/api/paddle_infer_contrib.cc +++ b/paddle/fluid/inference/api/paddle_infer_contrib.cc @@ -27,7 +27,7 @@ using paddle::PaddleDType; void* TensorUtils::CudaMallocPinnedMemory(size_t size) { #if defined(PADDLE_WITH_CUDA) void* ptr = nullptr; - PADDLE_ENFORCE_CUDA_SUCCESS(cudaMallocHost(&ptr, size)); + PADDLE_ENFORCE_GPU_SUCCESS(cudaMallocHost(&ptr, size)); return ptr; #else return nullptr; @@ -36,7 +36,7 @@ void* TensorUtils::CudaMallocPinnedMemory(size_t size) { void TensorUtils::CudaFreePinnedMemory(void* ptr) { #if defined(PADDLE_WITH_CUDA) - PADDLE_ENFORCE_CUDA_SUCCESS(cudaFreeHost(ptr)); + PADDLE_ENFORCE_GPU_SUCCESS(cudaFreeHost(ptr)); #endif } diff --git a/paddle/fluid/inference/tensorrt/convert/io_converter.cc b/paddle/fluid/inference/tensorrt/convert/io_converter.cc index d9cf9e2e860018df594ac4d84a4d9fa9b9ba669f..b468518fa5a3cbdfd88e6632ba43f1e86a54e912 100644 --- a/paddle/fluid/inference/tensorrt/convert/io_converter.cc +++ b/paddle/fluid/inference/tensorrt/convert/io_converter.cc @@ -45,7 +45,7 @@ class DefaultIOConverter : public EngineIOConverter { "the input max_size. But in's memory_size = %u, max_size = %u.", size, max_size)); if (is_cpu_place(place)) { - PADDLE_ENFORCE_CUDA_SUCCESS(cudaMemcpyAsync( + PADDLE_ENFORCE_GPU_SUCCESS(cudaMemcpyAsync( out, in.data(), size, cudaMemcpyHostToDevice, *stream_)); } else if (is_gpu_place(place)) { PADDLE_ENFORCE_EQ( diff --git a/paddle/fluid/inference/tensorrt/engine.cc b/paddle/fluid/inference/tensorrt/engine.cc index 64116b7973e71016f23a86c6b973eaa171f91864..2addff52829c8066ad0bc0c9d85fc0cf6b286dfc 100644 --- a/paddle/fluid/inference/tensorrt/engine.cc +++ b/paddle/fluid/inference/tensorrt/engine.cc @@ -20,8 +20,8 @@ limitations under the License. */ #include "cuda_runtime_api.h" // NOLINT #include "paddle/fluid/inference/tensorrt/helper.h" +#include "paddle/fluid/platform/device/gpu/gpu_info.h" #include "paddle/fluid/platform/enforce.h" -#include "paddle/fluid/platform/gpu_info.h" namespace paddle { namespace inference { diff --git a/paddle/fluid/inference/tensorrt/plugin/deformable_conv_op_plugin.cu b/paddle/fluid/inference/tensorrt/plugin/deformable_conv_op_plugin.cu index 0f32183c0fbc15f9a641355c1996ab6a17ffa87c..70e5a7bcc7b4f2af36ac7742f14312ac86757d6d 100644 --- a/paddle/fluid/inference/tensorrt/plugin/deformable_conv_op_plugin.cu +++ b/paddle/fluid/inference/tensorrt/plugin/deformable_conv_op_plugin.cu @@ -43,16 +43,16 @@ nvinfer1::Weights DeformableConvPlugin::copyToDevice(const void* hostData, size_t count) { int num_bytes = (data_type_ == nvinfer1::DataType::kFLOAT ? 4 : 2); void* deviceData; - PADDLE_ENFORCE_CUDA_SUCCESS(cudaMalloc(&deviceData, count * num_bytes)); - PADDLE_ENFORCE_CUDA_SUCCESS(cudaMemcpy( - deviceData, hostData, count * num_bytes, cudaMemcpyHostToDevice)); + PADDLE_ENFORCE_GPU_SUCCESS(cudaMalloc(&deviceData, count * num_bytes)); + PADDLE_ENFORCE_GPU_SUCCESS(cudaMemcpy(deviceData, hostData, count * num_bytes, + cudaMemcpyHostToDevice)); return nvinfer1::Weights{data_type_, deviceData, int64_t(count)}; } void DeformableConvPlugin::serializeFromDevice( void** hostBuffer, const nvinfer1::Weights& deviceWeights) const { int num_bytes = (data_type_ == nvinfer1::DataType::kFLOAT ? 4 : 2); - PADDLE_ENFORCE_CUDA_SUCCESS( + PADDLE_ENFORCE_GPU_SUCCESS( cudaMemcpy(static_cast(*hostBuffer), deviceWeights.values, deviceWeights.count * num_bytes, cudaMemcpyDeviceToHost)); hostBuffer += deviceWeights.count * num_bytes; diff --git a/paddle/fluid/inference/tensorrt/plugin/instance_norm_op_plugin.cu b/paddle/fluid/inference/tensorrt/plugin/instance_norm_op_plugin.cu index a9a50543e7bb70d7abc04b405dd581ca0c9f71fe..a4880a9997a53936681fb2d13134d5a3a4814db3 100644 --- a/paddle/fluid/inference/tensorrt/plugin/instance_norm_op_plugin.cu +++ b/paddle/fluid/inference/tensorrt/plugin/instance_norm_op_plugin.cu @@ -17,7 +17,7 @@ #include #include "glog/logging.h" #include "paddle/fluid/inference/tensorrt/plugin/instance_norm_op_plugin.h" -#include "paddle/fluid/platform/cudnn_helper.h" +#include "paddle/fluid/platform/device/gpu/gpu_dnn.h" namespace paddle { namespace inference { diff --git a/paddle/fluid/inference/tensorrt/plugin/matmul_op_int8_plugin.cu b/paddle/fluid/inference/tensorrt/plugin/matmul_op_int8_plugin.cu index 88e075386d09358c9548828c13141f79873c3da0..7cab12b625d2317c7d2a0b9612536a4a1704ea47 100644 --- a/paddle/fluid/inference/tensorrt/plugin/matmul_op_int8_plugin.cu +++ b/paddle/fluid/inference/tensorrt/plugin/matmul_op_int8_plugin.cu @@ -33,31 +33,31 @@ void Ltgemm_int8_linear( cublasLtMatmulDesc_t matmulDesc, void* alpha_scale, void* alpha_zero, void* alpha_one, void* workspace, cudaStream_t stream) { if (transA_) { - PADDLE_ENFORCE_CUDA_SUCCESS(dyl::cublasLtMatrixTransform( + PADDLE_ENFORCE_GPU_SUCCESS(dyl::cublasLtMatrixTransform( ltHandle, transformDescT, alpha_one, A, Adesc, alpha_zero, nullptr, nullptr, Atransform, AtransformDesc, stream)); } else { - PADDLE_ENFORCE_CUDA_SUCCESS(dyl::cublasLtMatrixTransform( + PADDLE_ENFORCE_GPU_SUCCESS(dyl::cublasLtMatrixTransform( ltHandle, transformDescN, alpha_one, A, Adesc, alpha_zero, nullptr, nullptr, Atransform, AtransformDesc, stream)); } if (transB_) { - PADDLE_ENFORCE_CUDA_SUCCESS(dyl::cublasLtMatrixTransform( + PADDLE_ENFORCE_GPU_SUCCESS(dyl::cublasLtMatrixTransform( ltHandle, transformDescN, alpha_one, B, Bdesc, alpha_zero, nullptr, nullptr, Btransform, BtransformDesc, stream)); } else { - PADDLE_ENFORCE_CUDA_SUCCESS(dyl::cublasLtMatrixTransform( + PADDLE_ENFORCE_GPU_SUCCESS(dyl::cublasLtMatrixTransform( ltHandle, transformDescT, alpha_one, B, Bdesc, alpha_zero, nullptr, nullptr, Btransform, BtransformDesc, stream)); } - PADDLE_ENFORCE_CUDA_SUCCESS(dyl::cublasLtMatmul( + PADDLE_ENFORCE_GPU_SUCCESS(dyl::cublasLtMatmul( ltHandle, matmulDesc, alpha_scale, Atransform, AtransformDesc, Btransform, BtransformDesc, nullptr, Ctransform, CtransformDesc, Ctransform, CtransformDesc, nullptr, workspace, 0, stream)); - PADDLE_ENFORCE_CUDA_SUCCESS(dyl::cublasLtMatrixTransform( + PADDLE_ENFORCE_GPU_SUCCESS(dyl::cublasLtMatrixTransform( ltHandle, transformDescN, alpha_one, Ctransform, CtransformDesc, alpha_zero, nullptr, nullptr, C, Cdesc, stream)); } @@ -69,7 +69,7 @@ void Ltgemm_fp32_linear(cublasLtHandle_t ltHandle, const float* A, cublasLtMatmulDesc_t matmulDesc, void* alpha_scale, void* alpha_zero, void* workspace, cudaStream_t stream) { - PADDLE_ENFORCE_CUDA_SUCCESS(dyl::cublasLtMatmul( + PADDLE_ENFORCE_GPU_SUCCESS(dyl::cublasLtMatmul( ltHandle, matmulDesc, alpha_scale, A, Adesc, B, Bdesc, alpha_zero, C, Cdesc, C, Cdesc, nullptr, workspace, 0, stream)); } @@ -81,7 +81,7 @@ void Ltgemm_fp16_linear(cublasLtHandle_t ltHandle, const half* A, cublasLtMatmulDesc_t matmulDesc, void* alpha_scale, void* alpha_zero, void* workspace, cudaStream_t stream) { - PADDLE_ENFORCE_CUDA_SUCCESS(dyl::cublasLtMatmul( + PADDLE_ENFORCE_GPU_SUCCESS(dyl::cublasLtMatmul( ltHandle, matmulDesc, alpha_scale, A, Adesc, B, Bdesc, alpha_zero, C, Cdesc, C, Cdesc, nullptr, workspace, 0, stream)); } @@ -182,98 +182,98 @@ void MatmulPlugin::configurePlugin(const nvinfer1::PluginTensorDesc* inputs, int const ldatransform = 32 * n_; int const ldbtransform = 32 * ((m_ + 8 - 1) / 8 * 8); int const ldctransform = 32 * n_; - PADDLE_ENFORCE_CUDA_SUCCESS(cudaMalloc( + PADDLE_ENFORCE_GPU_SUCCESS(cudaMalloc( (void**)&Atransform_, sizeof(int8_t) * ((k_ + 32 - 1) / 32 * 32) / 32 * ldatransform)); - PADDLE_ENFORCE_CUDA_SUCCESS(cudaMalloc( + PADDLE_ENFORCE_GPU_SUCCESS(cudaMalloc( (void**)&Btransform_, sizeof(int8_t) * ((k_ + 32 - 1) / 32 * 32) / 32 * ldbtransform)); - PADDLE_ENFORCE_CUDA_SUCCESS(cudaMalloc( + PADDLE_ENFORCE_GPU_SUCCESS(cudaMalloc( (void**)&Ctransform_, sizeof(int8_t) * ((m_ + 32 - 1) / 32 * 32) / 32 * ldctransform)); - PADDLE_ENFORCE_CUDA_SUCCESS(dyl::cublasLtMatrixLayoutCreate( + PADDLE_ENFORCE_GPU_SUCCESS(dyl::cublasLtMatrixLayoutCreate( &Adesc_, cudadataTypeIO, AopTranspose == CUBLAS_OP_N ? n_ : k_, AopTranspose == CUBLAS_OP_N ? k_ : n_, AopTranspose == CUBLAS_OP_N ? n_ : k_)); - PADDLE_ENFORCE_CUDA_SUCCESS(dyl::cublasLtMatrixLayoutSetAttribute( + PADDLE_ENFORCE_GPU_SUCCESS(dyl::cublasLtMatrixLayoutSetAttribute( Adesc_, CUBLASLT_MATRIX_LAYOUT_TYPE, &cudadataTypeIO, sizeof(cudadataTypeIO))); - PADDLE_ENFORCE_CUDA_SUCCESS(dyl::cublasLtMatrixLayoutSetAttribute( + PADDLE_ENFORCE_GPU_SUCCESS(dyl::cublasLtMatrixLayoutSetAttribute( Adesc_, CUBLASLT_MATRIX_LAYOUT_BATCH_COUNT, &(batch_), sizeof(batch_))); - PADDLE_ENFORCE_CUDA_SUCCESS(dyl::cublasLtMatrixLayoutSetAttribute( + PADDLE_ENFORCE_GPU_SUCCESS(dyl::cublasLtMatrixLayoutSetAttribute( Adesc_, CUBLASLT_MATRIX_LAYOUT_STRIDED_BATCH_OFFSET, &(stridea), sizeof(stridea))); - PADDLE_ENFORCE_CUDA_SUCCESS(dyl::cublasLtMatrixLayoutCreate( + PADDLE_ENFORCE_GPU_SUCCESS(dyl::cublasLtMatrixLayoutCreate( &Bdesc_, cudadataTypeIO, BopTranspose == CUBLAS_OP_N ? k_ : m_, BopTranspose == CUBLAS_OP_N ? m_ : k_, BopTranspose == CUBLAS_OP_N ? k_ : m_)); - PADDLE_ENFORCE_CUDA_SUCCESS(dyl::cublasLtMatrixLayoutSetAttribute( + PADDLE_ENFORCE_GPU_SUCCESS(dyl::cublasLtMatrixLayoutSetAttribute( Bdesc_, CUBLASLT_MATRIX_LAYOUT_TYPE, &cudadataTypeIO, sizeof(cudadataTypeIO))); - PADDLE_ENFORCE_CUDA_SUCCESS(dyl::cublasLtMatrixLayoutSetAttribute( + PADDLE_ENFORCE_GPU_SUCCESS(dyl::cublasLtMatrixLayoutSetAttribute( Bdesc_, CUBLASLT_MATRIX_LAYOUT_BATCH_COUNT, &(batch_), sizeof(batch_))); - PADDLE_ENFORCE_CUDA_SUCCESS(dyl::cublasLtMatrixLayoutSetAttribute( + PADDLE_ENFORCE_GPU_SUCCESS(dyl::cublasLtMatrixLayoutSetAttribute( Bdesc_, CUBLASLT_MATRIX_LAYOUT_STRIDED_BATCH_OFFSET, &(strideb), sizeof(strideb))); - PADDLE_ENFORCE_CUDA_SUCCESS( + PADDLE_ENFORCE_GPU_SUCCESS( dyl::cublasLtMatrixLayoutCreate(&Cdesc_, cudadataTypeIO, n_, m_, n_)); - PADDLE_ENFORCE_CUDA_SUCCESS(dyl::cublasLtMatrixLayoutSetAttribute( + PADDLE_ENFORCE_GPU_SUCCESS(dyl::cublasLtMatrixLayoutSetAttribute( Cdesc_, CUBLASLT_MATRIX_LAYOUT_TYPE, &cudadataTypeIO, sizeof(cudadataTypeIO))); - PADDLE_ENFORCE_CUDA_SUCCESS(dyl::cublasLtMatrixLayoutSetAttribute( + PADDLE_ENFORCE_GPU_SUCCESS(dyl::cublasLtMatrixLayoutSetAttribute( Cdesc_, CUBLASLT_MATRIX_LAYOUT_BATCH_COUNT, &(batch_), sizeof(batch_))); - PADDLE_ENFORCE_CUDA_SUCCESS(dyl::cublasLtMatrixLayoutSetAttribute( + PADDLE_ENFORCE_GPU_SUCCESS(dyl::cublasLtMatrixLayoutSetAttribute( Cdesc_, CUBLASLT_MATRIX_LAYOUT_STRIDED_BATCH_OFFSET, &(stridec), sizeof(stridec))); - PADDLE_ENFORCE_CUDA_SUCCESS(dyl::cublasLtMatrixLayoutCreate( + PADDLE_ENFORCE_GPU_SUCCESS(dyl::cublasLtMatrixLayoutCreate( &AtransformDesc_, cudadataTypeIO, n_, k_, ldatransform)); - PADDLE_ENFORCE_CUDA_SUCCESS(dyl::cublasLtMatrixLayoutSetAttribute( + PADDLE_ENFORCE_GPU_SUCCESS(dyl::cublasLtMatrixLayoutSetAttribute( AtransformDesc_, CUBLASLT_MATRIX_LAYOUT_TYPE, &cudadataTypeIO, sizeof(cudadataTypeIO))); - PADDLE_ENFORCE_CUDA_SUCCESS(dyl::cublasLtMatrixLayoutSetAttribute( + PADDLE_ENFORCE_GPU_SUCCESS(dyl::cublasLtMatrixLayoutSetAttribute( AtransformDesc_, CUBLASLT_MATRIX_LAYOUT_ORDER, &COL32, sizeof(COL32))); - PADDLE_ENFORCE_CUDA_SUCCESS(dyl::cublasLtMatrixLayoutCreate( + PADDLE_ENFORCE_GPU_SUCCESS(dyl::cublasLtMatrixLayoutCreate( &BtransformDesc_, cudadataTypeIO, m_, k_, ldbtransform)); - PADDLE_ENFORCE_CUDA_SUCCESS(dyl::cublasLtMatrixLayoutSetAttribute( + PADDLE_ENFORCE_GPU_SUCCESS(dyl::cublasLtMatrixLayoutSetAttribute( BtransformDesc_, CUBLASLT_MATRIX_LAYOUT_TYPE, &cudadataTypeIO, sizeof(cudadataTypeIO))); - PADDLE_ENFORCE_CUDA_SUCCESS(dyl::cublasLtMatrixLayoutSetAttribute( + PADDLE_ENFORCE_GPU_SUCCESS(dyl::cublasLtMatrixLayoutSetAttribute( BtransformDesc_, CUBLASLT_MATRIX_LAYOUT_ORDER, &COL4_4R2_8C, sizeof(COL4_4R2_8C))); - PADDLE_ENFORCE_CUDA_SUCCESS(dyl::cublasLtMatrixLayoutCreate( + PADDLE_ENFORCE_GPU_SUCCESS(dyl::cublasLtMatrixLayoutCreate( &CtransformDesc_, cudadataTypeIO, n_, m_, ldctransform)); - PADDLE_ENFORCE_CUDA_SUCCESS(dyl::cublasLtMatrixLayoutSetAttribute( + PADDLE_ENFORCE_GPU_SUCCESS(dyl::cublasLtMatrixLayoutSetAttribute( CtransformDesc_, CUBLASLT_MATRIX_LAYOUT_TYPE, &cudadataTypeIO, sizeof(cudadataTypeIO))); - PADDLE_ENFORCE_CUDA_SUCCESS(dyl::cublasLtMatrixLayoutSetAttribute( + PADDLE_ENFORCE_GPU_SUCCESS(dyl::cublasLtMatrixLayoutSetAttribute( CtransformDesc_, CUBLASLT_MATRIX_LAYOUT_ORDER, &COL32, sizeof(COL32))); cublasOperation_t Transpose = CUBLAS_OP_T; cublasLtPointerMode_t transform_model = CUBLASLT_POINTER_MODE_DEVICE; - PADDLE_ENFORCE_CUDA_SUCCESS(dyl::cublasLtMatrixTransformDescCreate( + PADDLE_ENFORCE_GPU_SUCCESS(dyl::cublasLtMatrixTransformDescCreate( &transformDescT_, cudaDataTypeS)); - PADDLE_ENFORCE_CUDA_SUCCESS(dyl::cublasLtMatrixTransformDescSetAttribute( + PADDLE_ENFORCE_GPU_SUCCESS(dyl::cublasLtMatrixTransformDescSetAttribute( transformDescT_, CUBLASLT_MATRIX_TRANSFORM_DESC_SCALE_TYPE, &cudaDataTypeS, sizeof(cudaDataTypeS))); - PADDLE_ENFORCE_CUDA_SUCCESS(dyl::cublasLtMatrixTransformDescSetAttribute( + PADDLE_ENFORCE_GPU_SUCCESS(dyl::cublasLtMatrixTransformDescSetAttribute( transformDescT_, CUBLASLT_MATRIX_TRANSFORM_DESC_TRANSA, &Transpose, sizeof(Transpose))); - PADDLE_ENFORCE_CUDA_SUCCESS(dyl::cublasLtMatrixTransformDescSetAttribute( + PADDLE_ENFORCE_GPU_SUCCESS(dyl::cublasLtMatrixTransformDescSetAttribute( transformDescT_, CUBLASLT_MATRIX_TRANSFORM_DESC_POINTER_MODE, &transform_model, sizeof(transform_model))); - PADDLE_ENFORCE_CUDA_SUCCESS(dyl::cublasLtMatrixTransformDescCreate( + PADDLE_ENFORCE_GPU_SUCCESS(dyl::cublasLtMatrixTransformDescCreate( &transformDescN_, cudaDataTypeS)); - PADDLE_ENFORCE_CUDA_SUCCESS(dyl::cublasLtMatrixTransformDescSetAttribute( + PADDLE_ENFORCE_GPU_SUCCESS(dyl::cublasLtMatrixTransformDescSetAttribute( transformDescN_, CUBLASLT_MATRIX_TRANSFORM_DESC_SCALE_TYPE, &cudaDataTypeS, sizeof(cudaDataTypeS))); - PADDLE_ENFORCE_CUDA_SUCCESS(dyl::cublasLtMatrixTransformDescSetAttribute( + PADDLE_ENFORCE_GPU_SUCCESS(dyl::cublasLtMatrixTransformDescSetAttribute( transformDescN_, CUBLASLT_MATRIX_TRANSFORM_DESC_POINTER_MODE, &transform_model, sizeof(transform_model))); @@ -282,20 +282,20 @@ void MatmulPlugin::configurePlugin(const nvinfer1::PluginTensorDesc* inputs, CUBLASLT_POINTER_MODE_ALPHA_DEVICE_VECTOR_BETA_ZERO; #if CUBLAS_VER_MAJOR < 11 - PADDLE_ENFORCE_CUDA_SUCCESS( + PADDLE_ENFORCE_GPU_SUCCESS( dyl::cublasLtMatmulDescCreate(&matmulDesc_, cudaComputeType)); #else - PADDLE_ENFORCE_CUDA_SUCCESS(dyl::cublasLtMatmulDescCreate( + PADDLE_ENFORCE_GPU_SUCCESS(dyl::cublasLtMatmulDescCreate( &matmulDesc_, cudaComputeType, cudaDataTypeS)); #endif - PADDLE_ENFORCE_CUDA_SUCCESS(dyl::cublasLtMatmulDescSetAttribute( + PADDLE_ENFORCE_GPU_SUCCESS(dyl::cublasLtMatmulDescSetAttribute( matmulDesc_, CUBLASLT_MATMUL_DESC_TRANSA, &ATranspose, sizeof(ATranspose))); - PADDLE_ENFORCE_CUDA_SUCCESS(dyl::cublasLtMatmulDescSetAttribute( + PADDLE_ENFORCE_GPU_SUCCESS(dyl::cublasLtMatmulDescSetAttribute( matmulDesc_, CUBLASLT_MATMUL_DESC_TRANSB, &BTranspose, sizeof(BTranspose))); - PADDLE_ENFORCE_CUDA_SUCCESS(dyl::cublasLtMatmulDescSetAttribute( + PADDLE_ENFORCE_GPU_SUCCESS(dyl::cublasLtMatmulDescSetAttribute( matmulDesc_, CUBLASLT_MATMUL_DESC_POINTER_MODE, &matmul_model, sizeof(matmul_model))); @@ -303,17 +303,16 @@ void MatmulPlugin::configurePlugin(const nvinfer1::PluginTensorDesc* inputs, for (int i = 0; i < n_; i++) { alpha_tem[i] = alpha_ * inscale_0 * inscale_1 / outscale; } - PADDLE_ENFORCE_CUDA_SUCCESS( + PADDLE_ENFORCE_GPU_SUCCESS( cudaMalloc((void**)&alpha_scale_, n_ * sizeof(float))); cudaMemcpyAsync(alpha_scale_, &alpha_tem[0], n_ * sizeof(float), cudaMemcpyHostToDevice); float zero_tem = zero; - PADDLE_ENFORCE_CUDA_SUCCESS( - cudaMalloc((void**)&alpha_zero_, sizeof(float))); + PADDLE_ENFORCE_GPU_SUCCESS(cudaMalloc((void**)&alpha_zero_, sizeof(float))); cudaMemcpyAsync(alpha_zero_, &zero_tem, sizeof(float), cudaMemcpyHostToDevice); float one_tem = 1; - PADDLE_ENFORCE_CUDA_SUCCESS(cudaMalloc((void**)&alpha_one_, sizeof(float))); + PADDLE_ENFORCE_GPU_SUCCESS(cudaMalloc((void**)&alpha_one_, sizeof(float))); cudaMemcpyAsync(alpha_one_, &one_tem, sizeof(float), cudaMemcpyHostToDevice); } else if (type_ == nvinfer1::DataType::kHALF) { @@ -324,70 +323,69 @@ void MatmulPlugin::configurePlugin(const nvinfer1::PluginTensorDesc* inputs, #else cublasComputeType_t cudaComputeType = CUBLAS_COMPUTE_16F; #endif - PADDLE_ENFORCE_CUDA_SUCCESS(dyl::cublasLtMatrixLayoutCreate( + PADDLE_ENFORCE_GPU_SUCCESS(dyl::cublasLtMatrixLayoutCreate( &Adesc_, cudadataTypeIO, AopTranspose == CUBLAS_OP_N ? n_ : k_, AopTranspose == CUBLAS_OP_N ? k_ : n_, AopTranspose == CUBLAS_OP_N ? n_ : k_)); - PADDLE_ENFORCE_CUDA_SUCCESS(dyl::cublasLtMatrixLayoutSetAttribute( + PADDLE_ENFORCE_GPU_SUCCESS(dyl::cublasLtMatrixLayoutSetAttribute( Adesc_, CUBLASLT_MATRIX_LAYOUT_TYPE, &cudadataTypeIO, sizeof(cudadataTypeIO))); - PADDLE_ENFORCE_CUDA_SUCCESS(dyl::cublasLtMatrixLayoutSetAttribute( + PADDLE_ENFORCE_GPU_SUCCESS(dyl::cublasLtMatrixLayoutSetAttribute( Adesc_, CUBLASLT_MATRIX_LAYOUT_BATCH_COUNT, &(batch_), sizeof(batch_))); - PADDLE_ENFORCE_CUDA_SUCCESS(dyl::cublasLtMatrixLayoutSetAttribute( + PADDLE_ENFORCE_GPU_SUCCESS(dyl::cublasLtMatrixLayoutSetAttribute( Adesc_, CUBLASLT_MATRIX_LAYOUT_STRIDED_BATCH_OFFSET, &(stridea), sizeof(stridea))); - PADDLE_ENFORCE_CUDA_SUCCESS(dyl::cublasLtMatrixLayoutCreate( + PADDLE_ENFORCE_GPU_SUCCESS(dyl::cublasLtMatrixLayoutCreate( &Bdesc_, cudadataTypeIO, BopTranspose == CUBLAS_OP_N ? k_ : m_, BopTranspose == CUBLAS_OP_N ? m_ : k_, BopTranspose == CUBLAS_OP_N ? k_ : m_)); - PADDLE_ENFORCE_CUDA_SUCCESS(dyl::cublasLtMatrixLayoutSetAttribute( + PADDLE_ENFORCE_GPU_SUCCESS(dyl::cublasLtMatrixLayoutSetAttribute( Bdesc_, CUBLASLT_MATRIX_LAYOUT_TYPE, &cudadataTypeIO, sizeof(cudadataTypeIO))); - PADDLE_ENFORCE_CUDA_SUCCESS(dyl::cublasLtMatrixLayoutSetAttribute( + PADDLE_ENFORCE_GPU_SUCCESS(dyl::cublasLtMatrixLayoutSetAttribute( Bdesc_, CUBLASLT_MATRIX_LAYOUT_BATCH_COUNT, &(batch_), sizeof(batch_))); - PADDLE_ENFORCE_CUDA_SUCCESS(dyl::cublasLtMatrixLayoutSetAttribute( + PADDLE_ENFORCE_GPU_SUCCESS(dyl::cublasLtMatrixLayoutSetAttribute( Bdesc_, CUBLASLT_MATRIX_LAYOUT_STRIDED_BATCH_OFFSET, &(strideb), sizeof(strideb))); - PADDLE_ENFORCE_CUDA_SUCCESS( + PADDLE_ENFORCE_GPU_SUCCESS( dyl::cublasLtMatrixLayoutCreate(&Cdesc_, cudadataTypeIO, n_, m_, n_)); - PADDLE_ENFORCE_CUDA_SUCCESS(dyl::cublasLtMatrixLayoutSetAttribute( + PADDLE_ENFORCE_GPU_SUCCESS(dyl::cublasLtMatrixLayoutSetAttribute( Cdesc_, CUBLASLT_MATRIX_LAYOUT_TYPE, &cudadataTypeIO, sizeof(cudadataTypeIO))); - PADDLE_ENFORCE_CUDA_SUCCESS(dyl::cublasLtMatrixLayoutSetAttribute( + PADDLE_ENFORCE_GPU_SUCCESS(dyl::cublasLtMatrixLayoutSetAttribute( Cdesc_, CUBLASLT_MATRIX_LAYOUT_BATCH_COUNT, &(batch_), sizeof(batch_))); - PADDLE_ENFORCE_CUDA_SUCCESS(dyl::cublasLtMatrixLayoutSetAttribute( + PADDLE_ENFORCE_GPU_SUCCESS(dyl::cublasLtMatrixLayoutSetAttribute( Cdesc_, CUBLASLT_MATRIX_LAYOUT_STRIDED_BATCH_OFFSET, &(stridec), sizeof(stridec))); cublasLtPointerMode_t matmul_model = CUBLASLT_POINTER_MODE_DEVICE; #if CUBLAS_VER_MAJOR < 11 - PADDLE_ENFORCE_CUDA_SUCCESS( + PADDLE_ENFORCE_GPU_SUCCESS( dyl::cublasLtMatmulDescCreate(&matmulDesc_, cudaComputeType)); #else - PADDLE_ENFORCE_CUDA_SUCCESS(dyl::cublasLtMatmulDescCreate( + PADDLE_ENFORCE_GPU_SUCCESS(dyl::cublasLtMatmulDescCreate( &matmulDesc_, cudaComputeType, cudaDataTypeS)); #endif - PADDLE_ENFORCE_CUDA_SUCCESS(dyl::cublasLtMatmulDescSetAttribute( + PADDLE_ENFORCE_GPU_SUCCESS(dyl::cublasLtMatmulDescSetAttribute( matmulDesc_, CUBLASLT_MATMUL_DESC_TRANSA, &AopTranspose, sizeof(AopTranspose))); - PADDLE_ENFORCE_CUDA_SUCCESS(dyl::cublasLtMatmulDescSetAttribute( + PADDLE_ENFORCE_GPU_SUCCESS(dyl::cublasLtMatmulDescSetAttribute( matmulDesc_, CUBLASLT_MATMUL_DESC_TRANSB, &BopTranspose, sizeof(BopTranspose))); - PADDLE_ENFORCE_CUDA_SUCCESS(dyl::cublasLtMatmulDescSetAttribute( + PADDLE_ENFORCE_GPU_SUCCESS(dyl::cublasLtMatmulDescSetAttribute( matmulDesc_, CUBLASLT_MATMUL_DESC_POINTER_MODE, &matmul_model, sizeof(matmul_model))); half alpha_tem = static_cast(alpha_); - PADDLE_ENFORCE_CUDA_SUCCESS( - cudaMalloc((void**)&alpha_scale_, sizeof(half))); + PADDLE_ENFORCE_GPU_SUCCESS(cudaMalloc((void**)&alpha_scale_, sizeof(half))); cudaMemcpyAsync(alpha_scale_, &alpha_tem, sizeof(half), cudaMemcpyHostToDevice); half zero_tem = static_cast(zero); - PADDLE_ENFORCE_CUDA_SUCCESS(cudaMalloc((void**)&alpha_zero_, sizeof(half))); + PADDLE_ENFORCE_GPU_SUCCESS(cudaMalloc((void**)&alpha_zero_, sizeof(half))); cudaMemcpyAsync(alpha_zero_, &zero_tem, sizeof(half), cudaMemcpyHostToDevice); } else { @@ -398,71 +396,70 @@ void MatmulPlugin::configurePlugin(const nvinfer1::PluginTensorDesc* inputs, #else cublasComputeType_t cudaComputeType = CUBLAS_COMPUTE_32F_FAST_16F; #endif - PADDLE_ENFORCE_CUDA_SUCCESS(dyl::cublasLtMatrixLayoutCreate( + PADDLE_ENFORCE_GPU_SUCCESS(dyl::cublasLtMatrixLayoutCreate( &Adesc_, cudadataTypeIO, AopTranspose == CUBLAS_OP_N ? n_ : k_, AopTranspose == CUBLAS_OP_N ? k_ : n_, AopTranspose == CUBLAS_OP_N ? n_ : k_)); - PADDLE_ENFORCE_CUDA_SUCCESS(dyl::cublasLtMatrixLayoutSetAttribute( + PADDLE_ENFORCE_GPU_SUCCESS(dyl::cublasLtMatrixLayoutSetAttribute( Adesc_, CUBLASLT_MATRIX_LAYOUT_TYPE, &cudadataTypeIO, sizeof(cudadataTypeIO))); - PADDLE_ENFORCE_CUDA_SUCCESS(dyl::cublasLtMatrixLayoutSetAttribute( + PADDLE_ENFORCE_GPU_SUCCESS(dyl::cublasLtMatrixLayoutSetAttribute( Adesc_, CUBLASLT_MATRIX_LAYOUT_BATCH_COUNT, &(batch_), sizeof(batch_))); - PADDLE_ENFORCE_CUDA_SUCCESS(dyl::cublasLtMatrixLayoutSetAttribute( + PADDLE_ENFORCE_GPU_SUCCESS(dyl::cublasLtMatrixLayoutSetAttribute( Adesc_, CUBLASLT_MATRIX_LAYOUT_STRIDED_BATCH_OFFSET, &(stridea), sizeof(stridea))); - PADDLE_ENFORCE_CUDA_SUCCESS(dyl::cublasLtMatrixLayoutCreate( + PADDLE_ENFORCE_GPU_SUCCESS(dyl::cublasLtMatrixLayoutCreate( &Bdesc_, cudadataTypeIO, BopTranspose == CUBLAS_OP_N ? k_ : m_, BopTranspose == CUBLAS_OP_N ? m_ : k_, BopTranspose == CUBLAS_OP_N ? k_ : m_)); - PADDLE_ENFORCE_CUDA_SUCCESS(dyl::cublasLtMatrixLayoutSetAttribute( + PADDLE_ENFORCE_GPU_SUCCESS(dyl::cublasLtMatrixLayoutSetAttribute( Bdesc_, CUBLASLT_MATRIX_LAYOUT_TYPE, &cudadataTypeIO, sizeof(cudadataTypeIO))); - PADDLE_ENFORCE_CUDA_SUCCESS(dyl::cublasLtMatrixLayoutSetAttribute( + PADDLE_ENFORCE_GPU_SUCCESS(dyl::cublasLtMatrixLayoutSetAttribute( Bdesc_, CUBLASLT_MATRIX_LAYOUT_BATCH_COUNT, &(batch_), sizeof(batch_))); - PADDLE_ENFORCE_CUDA_SUCCESS(dyl::cublasLtMatrixLayoutSetAttribute( + PADDLE_ENFORCE_GPU_SUCCESS(dyl::cublasLtMatrixLayoutSetAttribute( Bdesc_, CUBLASLT_MATRIX_LAYOUT_STRIDED_BATCH_OFFSET, &(strideb), sizeof(strideb))); - PADDLE_ENFORCE_CUDA_SUCCESS( + PADDLE_ENFORCE_GPU_SUCCESS( dyl::cublasLtMatrixLayoutCreate(&Cdesc_, cudadataTypeIO, n_, m_, n_)); - PADDLE_ENFORCE_CUDA_SUCCESS(dyl::cublasLtMatrixLayoutSetAttribute( + PADDLE_ENFORCE_GPU_SUCCESS(dyl::cublasLtMatrixLayoutSetAttribute( Cdesc_, CUBLASLT_MATRIX_LAYOUT_TYPE, &cudadataTypeIO, sizeof(cudadataTypeIO))); - PADDLE_ENFORCE_CUDA_SUCCESS(dyl::cublasLtMatrixLayoutSetAttribute( + PADDLE_ENFORCE_GPU_SUCCESS(dyl::cublasLtMatrixLayoutSetAttribute( Cdesc_, CUBLASLT_MATRIX_LAYOUT_BATCH_COUNT, &(batch_), sizeof(batch_))); - PADDLE_ENFORCE_CUDA_SUCCESS(dyl::cublasLtMatrixLayoutSetAttribute( + PADDLE_ENFORCE_GPU_SUCCESS(dyl::cublasLtMatrixLayoutSetAttribute( Cdesc_, CUBLASLT_MATRIX_LAYOUT_STRIDED_BATCH_OFFSET, &(stridec), sizeof(stridec))); cublasLtPointerMode_t matmul_model = CUBLASLT_POINTER_MODE_DEVICE; #if CUBLAS_VER_MAJOR < 11 - PADDLE_ENFORCE_CUDA_SUCCESS( + PADDLE_ENFORCE_GPU_SUCCESS( dyl::cublasLtMatmulDescCreate(&matmulDesc_, cudaComputeType)); #else - PADDLE_ENFORCE_CUDA_SUCCESS(dyl::cublasLtMatmulDescCreate( + PADDLE_ENFORCE_GPU_SUCCESS(dyl::cublasLtMatmulDescCreate( &matmulDesc_, cudaComputeType, cudaDataTypeS)); #endif - PADDLE_ENFORCE_CUDA_SUCCESS(dyl::cublasLtMatmulDescSetAttribute( + PADDLE_ENFORCE_GPU_SUCCESS(dyl::cublasLtMatmulDescSetAttribute( matmulDesc_, CUBLASLT_MATMUL_DESC_TRANSA, &AopTranspose, sizeof(AopTranspose))); - PADDLE_ENFORCE_CUDA_SUCCESS(dyl::cublasLtMatmulDescSetAttribute( + PADDLE_ENFORCE_GPU_SUCCESS(dyl::cublasLtMatmulDescSetAttribute( matmulDesc_, CUBLASLT_MATMUL_DESC_TRANSB, &BopTranspose, sizeof(BopTranspose))); - PADDLE_ENFORCE_CUDA_SUCCESS(dyl::cublasLtMatmulDescSetAttribute( + PADDLE_ENFORCE_GPU_SUCCESS(dyl::cublasLtMatmulDescSetAttribute( matmulDesc_, CUBLASLT_MATMUL_DESC_POINTER_MODE, &matmul_model, sizeof(matmul_model))); float alpha_tem = alpha_; - PADDLE_ENFORCE_CUDA_SUCCESS( + PADDLE_ENFORCE_GPU_SUCCESS( cudaMalloc((void**)&alpha_scale_, sizeof(float))); cudaMemcpyAsync(alpha_scale_, &alpha_tem, sizeof(float), cudaMemcpyHostToDevice); float zero_tem = zero; - PADDLE_ENFORCE_CUDA_SUCCESS( - cudaMalloc((void**)&alpha_zero_, sizeof(float))); + PADDLE_ENFORCE_GPU_SUCCESS(cudaMalloc((void**)&alpha_zero_, sizeof(float))); cudaMemcpyAsync(alpha_zero_, &zero_tem, sizeof(float), cudaMemcpyHostToDevice); } @@ -613,13 +610,13 @@ void MatmulPluginDynamic::configurePlugin( int const ldatransform = 32 * n_max; int const ldbtransform = 32 * ((m_max + 8 - 1) / 8 * 8); int const ldctransform = 32 * n_max; - PADDLE_ENFORCE_CUDA_SUCCESS(cudaMalloc( + PADDLE_ENFORCE_GPU_SUCCESS(cudaMalloc( (void**)&Atransform_, sizeof(int8_t) * ((k_max + 32 - 1) / 32 * 32) / 32 * ldatransform)); - PADDLE_ENFORCE_CUDA_SUCCESS(cudaMalloc( + PADDLE_ENFORCE_GPU_SUCCESS(cudaMalloc( (void**)&Btransform_, sizeof(int8_t) * ((k_max + 32 - 1) / 32 * 32) / 32 * ldbtransform)); - PADDLE_ENFORCE_CUDA_SUCCESS(cudaMalloc( + PADDLE_ENFORCE_GPU_SUCCESS(cudaMalloc( (void**)&Ctransform_, sizeof(int8_t) * ((m_max + 32 - 1) / 32 * 32) / 32 * ldctransform)); @@ -628,38 +625,35 @@ void MatmulPluginDynamic::configurePlugin( for (int i = 0; i < n_max; i++) { alpha_tem[i] = alpha_ * inscale_0 * inscale_1 / outscale; } - PADDLE_ENFORCE_CUDA_SUCCESS( + PADDLE_ENFORCE_GPU_SUCCESS( cudaMalloc((void**)&alpha_scale_, n_max * sizeof(float))); cudaMemcpyAsync(alpha_scale_, &alpha_tem[0], n_max * sizeof(float), cudaMemcpyHostToDevice); float zero_tem = zero; - PADDLE_ENFORCE_CUDA_SUCCESS( - cudaMalloc((void**)&alpha_zero_, sizeof(float))); + PADDLE_ENFORCE_GPU_SUCCESS(cudaMalloc((void**)&alpha_zero_, sizeof(float))); cudaMemcpyAsync(alpha_zero_, &zero_tem, sizeof(float), cudaMemcpyHostToDevice); float one_tem = 1; - PADDLE_ENFORCE_CUDA_SUCCESS(cudaMalloc((void**)&alpha_one_, sizeof(float))); + PADDLE_ENFORCE_GPU_SUCCESS(cudaMalloc((void**)&alpha_one_, sizeof(float))); cudaMemcpyAsync(alpha_one_, &one_tem, sizeof(float), cudaMemcpyHostToDevice); } else if (type_ == nvinfer1::DataType::kHALF) { half alpha_tem = static_cast(alpha_); - PADDLE_ENFORCE_CUDA_SUCCESS( - cudaMalloc((void**)&alpha_scale_, sizeof(half))); + PADDLE_ENFORCE_GPU_SUCCESS(cudaMalloc((void**)&alpha_scale_, sizeof(half))); cudaMemcpyAsync(alpha_scale_, &alpha_tem, sizeof(half), cudaMemcpyHostToDevice); half zero_tem = static_cast(zero); - PADDLE_ENFORCE_CUDA_SUCCESS(cudaMalloc((void**)&alpha_zero_, sizeof(half))); + PADDLE_ENFORCE_GPU_SUCCESS(cudaMalloc((void**)&alpha_zero_, sizeof(half))); cudaMemcpyAsync(alpha_zero_, &zero_tem, sizeof(half), cudaMemcpyHostToDevice); } else { float alpha_tem = alpha_; - PADDLE_ENFORCE_CUDA_SUCCESS( + PADDLE_ENFORCE_GPU_SUCCESS( cudaMalloc((void**)&alpha_scale_, sizeof(float))); cudaMemcpyAsync(alpha_scale_, &alpha_tem, sizeof(float), cudaMemcpyHostToDevice); float zero_tem = zero; - PADDLE_ENFORCE_CUDA_SUCCESS( - cudaMalloc((void**)&alpha_zero_, sizeof(float))); + PADDLE_ENFORCE_GPU_SUCCESS(cudaMalloc((void**)&alpha_zero_, sizeof(float))); cudaMemcpyAsync(alpha_zero_, &zero_tem, sizeof(float), cudaMemcpyHostToDevice); } @@ -766,88 +760,88 @@ int MatmulPluginDynamic::enqueue(const nvinfer1::PluginTensorDesc* inputDesc, cublasLtOrder_t COL32 = CUBLASLT_ORDER_COL32; cublasLtOrder_t COL4_4R2_8C = CUBLASLT_ORDER_COL4_4R2_8C; - PADDLE_ENFORCE_CUDA_SUCCESS(dyl::cublasLtMatrixLayoutCreate( + PADDLE_ENFORCE_GPU_SUCCESS(dyl::cublasLtMatrixLayoutCreate( &Adesc, cudadataTypeIO, AopTranspose == CUBLAS_OP_N ? n : k, AopTranspose == CUBLAS_OP_N ? k : n, AopTranspose == CUBLAS_OP_N ? n : k)); - PADDLE_ENFORCE_CUDA_SUCCESS(dyl::cublasLtMatrixLayoutSetAttribute( + PADDLE_ENFORCE_GPU_SUCCESS(dyl::cublasLtMatrixLayoutSetAttribute( Adesc, CUBLASLT_MATRIX_LAYOUT_TYPE, &cudadataTypeIO, sizeof(cudadataTypeIO))); - PADDLE_ENFORCE_CUDA_SUCCESS(dyl::cublasLtMatrixLayoutSetAttribute( + PADDLE_ENFORCE_GPU_SUCCESS(dyl::cublasLtMatrixLayoutSetAttribute( Adesc, CUBLASLT_MATRIX_LAYOUT_BATCH_COUNT, &(batch), sizeof(batch))); - PADDLE_ENFORCE_CUDA_SUCCESS(dyl::cublasLtMatrixLayoutSetAttribute( + PADDLE_ENFORCE_GPU_SUCCESS(dyl::cublasLtMatrixLayoutSetAttribute( Adesc, CUBLASLT_MATRIX_LAYOUT_STRIDED_BATCH_OFFSET, &(stridea), sizeof(stridea))); - PADDLE_ENFORCE_CUDA_SUCCESS(dyl::cublasLtMatrixLayoutCreate( + PADDLE_ENFORCE_GPU_SUCCESS(dyl::cublasLtMatrixLayoutCreate( &Bdesc, cudadataTypeIO, BopTranspose == CUBLAS_OP_N ? k : m, BopTranspose == CUBLAS_OP_N ? m : k, BopTranspose == CUBLAS_OP_N ? k : m)); - PADDLE_ENFORCE_CUDA_SUCCESS(dyl::cublasLtMatrixLayoutSetAttribute( + PADDLE_ENFORCE_GPU_SUCCESS(dyl::cublasLtMatrixLayoutSetAttribute( Bdesc, CUBLASLT_MATRIX_LAYOUT_TYPE, &cudadataTypeIO, sizeof(cudadataTypeIO))); - PADDLE_ENFORCE_CUDA_SUCCESS(dyl::cublasLtMatrixLayoutSetAttribute( + PADDLE_ENFORCE_GPU_SUCCESS(dyl::cublasLtMatrixLayoutSetAttribute( Bdesc, CUBLASLT_MATRIX_LAYOUT_BATCH_COUNT, &(batch), sizeof(batch))); - PADDLE_ENFORCE_CUDA_SUCCESS(dyl::cublasLtMatrixLayoutSetAttribute( + PADDLE_ENFORCE_GPU_SUCCESS(dyl::cublasLtMatrixLayoutSetAttribute( Bdesc, CUBLASLT_MATRIX_LAYOUT_STRIDED_BATCH_OFFSET, &(strideb), sizeof(strideb))); - PADDLE_ENFORCE_CUDA_SUCCESS( + PADDLE_ENFORCE_GPU_SUCCESS( dyl::cublasLtMatrixLayoutCreate(&Cdesc, cudadataTypeIO, n, m, n)); - PADDLE_ENFORCE_CUDA_SUCCESS(dyl::cublasLtMatrixLayoutSetAttribute( + PADDLE_ENFORCE_GPU_SUCCESS(dyl::cublasLtMatrixLayoutSetAttribute( Cdesc, CUBLASLT_MATRIX_LAYOUT_TYPE, &cudadataTypeIO, sizeof(cudadataTypeIO))); - PADDLE_ENFORCE_CUDA_SUCCESS(dyl::cublasLtMatrixLayoutSetAttribute( + PADDLE_ENFORCE_GPU_SUCCESS(dyl::cublasLtMatrixLayoutSetAttribute( Cdesc, CUBLASLT_MATRIX_LAYOUT_BATCH_COUNT, &(batch), sizeof(batch))); - PADDLE_ENFORCE_CUDA_SUCCESS(dyl::cublasLtMatrixLayoutSetAttribute( + PADDLE_ENFORCE_GPU_SUCCESS(dyl::cublasLtMatrixLayoutSetAttribute( Cdesc, CUBLASLT_MATRIX_LAYOUT_STRIDED_BATCH_OFFSET, &(stridec), sizeof(stridec))); - PADDLE_ENFORCE_CUDA_SUCCESS(dyl::cublasLtMatrixLayoutCreate( + PADDLE_ENFORCE_GPU_SUCCESS(dyl::cublasLtMatrixLayoutCreate( &AtransformDesc, cudadataTypeIO, n, k, ldatransform)); - PADDLE_ENFORCE_CUDA_SUCCESS(dyl::cublasLtMatrixLayoutSetAttribute( + PADDLE_ENFORCE_GPU_SUCCESS(dyl::cublasLtMatrixLayoutSetAttribute( AtransformDesc, CUBLASLT_MATRIX_LAYOUT_TYPE, &cudadataTypeIO, sizeof(cudadataTypeIO))); - PADDLE_ENFORCE_CUDA_SUCCESS(dyl::cublasLtMatrixLayoutSetAttribute( + PADDLE_ENFORCE_GPU_SUCCESS(dyl::cublasLtMatrixLayoutSetAttribute( AtransformDesc, CUBLASLT_MATRIX_LAYOUT_ORDER, &COL32, sizeof(COL32))); - PADDLE_ENFORCE_CUDA_SUCCESS(dyl::cublasLtMatrixLayoutCreate( + PADDLE_ENFORCE_GPU_SUCCESS(dyl::cublasLtMatrixLayoutCreate( &BtransformDesc, cudadataTypeIO, m, k, ldbtransform)); - PADDLE_ENFORCE_CUDA_SUCCESS(dyl::cublasLtMatrixLayoutSetAttribute( + PADDLE_ENFORCE_GPU_SUCCESS(dyl::cublasLtMatrixLayoutSetAttribute( BtransformDesc, CUBLASLT_MATRIX_LAYOUT_TYPE, &cudadataTypeIO, sizeof(cudadataTypeIO))); - PADDLE_ENFORCE_CUDA_SUCCESS(dyl::cublasLtMatrixLayoutSetAttribute( + PADDLE_ENFORCE_GPU_SUCCESS(dyl::cublasLtMatrixLayoutSetAttribute( BtransformDesc, CUBLASLT_MATRIX_LAYOUT_ORDER, &COL4_4R2_8C, sizeof(COL4_4R2_8C))); - PADDLE_ENFORCE_CUDA_SUCCESS(dyl::cublasLtMatrixLayoutCreate( + PADDLE_ENFORCE_GPU_SUCCESS(dyl::cublasLtMatrixLayoutCreate( &CtransformDesc, cudadataTypeIO, n, m, ldctransform)); - PADDLE_ENFORCE_CUDA_SUCCESS(dyl::cublasLtMatrixLayoutSetAttribute( + PADDLE_ENFORCE_GPU_SUCCESS(dyl::cublasLtMatrixLayoutSetAttribute( CtransformDesc, CUBLASLT_MATRIX_LAYOUT_TYPE, &cudadataTypeIO, sizeof(cudadataTypeIO))); - PADDLE_ENFORCE_CUDA_SUCCESS(dyl::cublasLtMatrixLayoutSetAttribute( + PADDLE_ENFORCE_GPU_SUCCESS(dyl::cublasLtMatrixLayoutSetAttribute( CtransformDesc, CUBLASLT_MATRIX_LAYOUT_ORDER, &COL32, sizeof(COL32))); cublasOperation_t Transpose = CUBLAS_OP_T; cublasLtPointerMode_t transform_model = CUBLASLT_POINTER_MODE_DEVICE; - PADDLE_ENFORCE_CUDA_SUCCESS( + PADDLE_ENFORCE_GPU_SUCCESS( dyl::cublasLtMatrixTransformDescCreate(&transformDescT, cudaDataTypeS)); - PADDLE_ENFORCE_CUDA_SUCCESS(dyl::cublasLtMatrixTransformDescSetAttribute( + PADDLE_ENFORCE_GPU_SUCCESS(dyl::cublasLtMatrixTransformDescSetAttribute( transformDescT, CUBLASLT_MATRIX_TRANSFORM_DESC_SCALE_TYPE, &cudaDataTypeS, sizeof(cudaDataTypeS))); - PADDLE_ENFORCE_CUDA_SUCCESS(dyl::cublasLtMatrixTransformDescSetAttribute( + PADDLE_ENFORCE_GPU_SUCCESS(dyl::cublasLtMatrixTransformDescSetAttribute( transformDescT, CUBLASLT_MATRIX_TRANSFORM_DESC_TRANSA, &Transpose, sizeof(Transpose))); - PADDLE_ENFORCE_CUDA_SUCCESS(dyl::cublasLtMatrixTransformDescSetAttribute( + PADDLE_ENFORCE_GPU_SUCCESS(dyl::cublasLtMatrixTransformDescSetAttribute( transformDescT, CUBLASLT_MATRIX_TRANSFORM_DESC_POINTER_MODE, &transform_model, sizeof(transform_model))); - PADDLE_ENFORCE_CUDA_SUCCESS( + PADDLE_ENFORCE_GPU_SUCCESS( dyl::cublasLtMatrixTransformDescCreate(&transformDescN, cudaDataTypeS)); - PADDLE_ENFORCE_CUDA_SUCCESS(dyl::cublasLtMatrixTransformDescSetAttribute( + PADDLE_ENFORCE_GPU_SUCCESS(dyl::cublasLtMatrixTransformDescSetAttribute( transformDescN, CUBLASLT_MATRIX_TRANSFORM_DESC_SCALE_TYPE, &cudaDataTypeS, sizeof(cudaDataTypeS))); - PADDLE_ENFORCE_CUDA_SUCCESS(dyl::cublasLtMatrixTransformDescSetAttribute( + PADDLE_ENFORCE_GPU_SUCCESS(dyl::cublasLtMatrixTransformDescSetAttribute( transformDescN, CUBLASLT_MATRIX_TRANSFORM_DESC_POINTER_MODE, &transform_model, sizeof(transform_model))); @@ -856,20 +850,20 @@ int MatmulPluginDynamic::enqueue(const nvinfer1::PluginTensorDesc* inputDesc, CUBLASLT_POINTER_MODE_ALPHA_DEVICE_VECTOR_BETA_ZERO; #if CUBLAS_VER_MAJOR < 11 - PADDLE_ENFORCE_CUDA_SUCCESS( + PADDLE_ENFORCE_GPU_SUCCESS( dyl::cublasLtMatmulDescCreate(&matmulDesc, cudaComputeType)); #else - PADDLE_ENFORCE_CUDA_SUCCESS(dyl::cublasLtMatmulDescCreate( + PADDLE_ENFORCE_GPU_SUCCESS(dyl::cublasLtMatmulDescCreate( &matmulDesc, cudaComputeType, cudaDataTypeS)); #endif - PADDLE_ENFORCE_CUDA_SUCCESS(dyl::cublasLtMatmulDescSetAttribute( + PADDLE_ENFORCE_GPU_SUCCESS(dyl::cublasLtMatmulDescSetAttribute( matmulDesc, CUBLASLT_MATMUL_DESC_TRANSA, &ATranspose, sizeof(ATranspose))); - PADDLE_ENFORCE_CUDA_SUCCESS(dyl::cublasLtMatmulDescSetAttribute( + PADDLE_ENFORCE_GPU_SUCCESS(dyl::cublasLtMatmulDescSetAttribute( matmulDesc, CUBLASLT_MATMUL_DESC_TRANSB, &BTranspose, sizeof(BTranspose))); - PADDLE_ENFORCE_CUDA_SUCCESS(dyl::cublasLtMatmulDescSetAttribute( + PADDLE_ENFORCE_GPU_SUCCESS(dyl::cublasLtMatmulDescSetAttribute( matmulDesc, CUBLASLT_MATMUL_DESC_POINTER_MODE, &matmul_model, sizeof(matmul_model))); @@ -889,60 +883,60 @@ int MatmulPluginDynamic::enqueue(const nvinfer1::PluginTensorDesc* inputDesc, #else cublasComputeType_t cudaComputeType = CUBLAS_COMPUTE_16F; #endif - PADDLE_ENFORCE_CUDA_SUCCESS(dyl::cublasLtMatrixLayoutCreate( + PADDLE_ENFORCE_GPU_SUCCESS(dyl::cublasLtMatrixLayoutCreate( &Adesc, cudadataTypeIO, AopTranspose == CUBLAS_OP_N ? n : k, AopTranspose == CUBLAS_OP_N ? k : n, AopTranspose == CUBLAS_OP_N ? n : k)); - PADDLE_ENFORCE_CUDA_SUCCESS(dyl::cublasLtMatrixLayoutSetAttribute( + PADDLE_ENFORCE_GPU_SUCCESS(dyl::cublasLtMatrixLayoutSetAttribute( Adesc, CUBLASLT_MATRIX_LAYOUT_TYPE, &cudadataTypeIO, sizeof(cudadataTypeIO))); - PADDLE_ENFORCE_CUDA_SUCCESS(dyl::cublasLtMatrixLayoutSetAttribute( + PADDLE_ENFORCE_GPU_SUCCESS(dyl::cublasLtMatrixLayoutSetAttribute( Adesc, CUBLASLT_MATRIX_LAYOUT_BATCH_COUNT, &(batch), sizeof(batch))); - PADDLE_ENFORCE_CUDA_SUCCESS(dyl::cublasLtMatrixLayoutSetAttribute( + PADDLE_ENFORCE_GPU_SUCCESS(dyl::cublasLtMatrixLayoutSetAttribute( Adesc, CUBLASLT_MATRIX_LAYOUT_STRIDED_BATCH_OFFSET, &(stridea), sizeof(stridea))); - PADDLE_ENFORCE_CUDA_SUCCESS(dyl::cublasLtMatrixLayoutCreate( + PADDLE_ENFORCE_GPU_SUCCESS(dyl::cublasLtMatrixLayoutCreate( &Bdesc, cudadataTypeIO, BopTranspose == CUBLAS_OP_N ? k : m, BopTranspose == CUBLAS_OP_N ? m : k, BopTranspose == CUBLAS_OP_N ? k : m)); - PADDLE_ENFORCE_CUDA_SUCCESS(dyl::cublasLtMatrixLayoutSetAttribute( + PADDLE_ENFORCE_GPU_SUCCESS(dyl::cublasLtMatrixLayoutSetAttribute( Bdesc, CUBLASLT_MATRIX_LAYOUT_TYPE, &cudadataTypeIO, sizeof(cudadataTypeIO))); - PADDLE_ENFORCE_CUDA_SUCCESS(dyl::cublasLtMatrixLayoutSetAttribute( + PADDLE_ENFORCE_GPU_SUCCESS(dyl::cublasLtMatrixLayoutSetAttribute( Bdesc, CUBLASLT_MATRIX_LAYOUT_BATCH_COUNT, &(batch), sizeof(batch))); - PADDLE_ENFORCE_CUDA_SUCCESS(dyl::cublasLtMatrixLayoutSetAttribute( + PADDLE_ENFORCE_GPU_SUCCESS(dyl::cublasLtMatrixLayoutSetAttribute( Bdesc, CUBLASLT_MATRIX_LAYOUT_STRIDED_BATCH_OFFSET, &(strideb), sizeof(strideb))); - PADDLE_ENFORCE_CUDA_SUCCESS( + PADDLE_ENFORCE_GPU_SUCCESS( dyl::cublasLtMatrixLayoutCreate(&Cdesc, cudadataTypeIO, n, m, n)); - PADDLE_ENFORCE_CUDA_SUCCESS(dyl::cublasLtMatrixLayoutSetAttribute( + PADDLE_ENFORCE_GPU_SUCCESS(dyl::cublasLtMatrixLayoutSetAttribute( Cdesc, CUBLASLT_MATRIX_LAYOUT_TYPE, &cudadataTypeIO, sizeof(cudadataTypeIO))); - PADDLE_ENFORCE_CUDA_SUCCESS(dyl::cublasLtMatrixLayoutSetAttribute( + PADDLE_ENFORCE_GPU_SUCCESS(dyl::cublasLtMatrixLayoutSetAttribute( Cdesc, CUBLASLT_MATRIX_LAYOUT_BATCH_COUNT, &(batch), sizeof(batch))); - PADDLE_ENFORCE_CUDA_SUCCESS(dyl::cublasLtMatrixLayoutSetAttribute( + PADDLE_ENFORCE_GPU_SUCCESS(dyl::cublasLtMatrixLayoutSetAttribute( Cdesc, CUBLASLT_MATRIX_LAYOUT_STRIDED_BATCH_OFFSET, &(stridec), sizeof(stridec))); cublasLtPointerMode_t matmul_model = CUBLASLT_POINTER_MODE_DEVICE; #if CUBLAS_VER_MAJOR < 11 - PADDLE_ENFORCE_CUDA_SUCCESS( + PADDLE_ENFORCE_GPU_SUCCESS( dyl::cublasLtMatmulDescCreate(&matmulDesc, cudaComputeType)); #else - PADDLE_ENFORCE_CUDA_SUCCESS(dyl::cublasLtMatmulDescCreate( + PADDLE_ENFORCE_GPU_SUCCESS(dyl::cublasLtMatmulDescCreate( &matmulDesc, cudaComputeType, cudaDataTypeS)); #endif - PADDLE_ENFORCE_CUDA_SUCCESS(dyl::cublasLtMatmulDescSetAttribute( + PADDLE_ENFORCE_GPU_SUCCESS(dyl::cublasLtMatmulDescSetAttribute( matmulDesc, CUBLASLT_MATMUL_DESC_TRANSA, &AopTranspose, sizeof(AopTranspose))); - PADDLE_ENFORCE_CUDA_SUCCESS(dyl::cublasLtMatmulDescSetAttribute( + PADDLE_ENFORCE_GPU_SUCCESS(dyl::cublasLtMatmulDescSetAttribute( matmulDesc, CUBLASLT_MATMUL_DESC_TRANSB, &BopTranspose, sizeof(BopTranspose))); - PADDLE_ENFORCE_CUDA_SUCCESS(dyl::cublasLtMatmulDescSetAttribute( + PADDLE_ENFORCE_GPU_SUCCESS(dyl::cublasLtMatmulDescSetAttribute( matmulDesc, CUBLASLT_MATMUL_DESC_POINTER_MODE, &matmul_model, sizeof(matmul_model))); @@ -959,60 +953,60 @@ int MatmulPluginDynamic::enqueue(const nvinfer1::PluginTensorDesc* inputDesc, #else cublasComputeType_t cudaComputeType = CUBLAS_COMPUTE_32F_FAST_16F; #endif - PADDLE_ENFORCE_CUDA_SUCCESS(dyl::cublasLtMatrixLayoutCreate( + PADDLE_ENFORCE_GPU_SUCCESS(dyl::cublasLtMatrixLayoutCreate( &Adesc, cudadataTypeIO, AopTranspose == CUBLAS_OP_N ? n : k, AopTranspose == CUBLAS_OP_N ? k : n, AopTranspose == CUBLAS_OP_N ? n : k)); - PADDLE_ENFORCE_CUDA_SUCCESS(dyl::cublasLtMatrixLayoutSetAttribute( + PADDLE_ENFORCE_GPU_SUCCESS(dyl::cublasLtMatrixLayoutSetAttribute( Adesc, CUBLASLT_MATRIX_LAYOUT_TYPE, &cudadataTypeIO, sizeof(cudadataTypeIO))); - PADDLE_ENFORCE_CUDA_SUCCESS(dyl::cublasLtMatrixLayoutSetAttribute( + PADDLE_ENFORCE_GPU_SUCCESS(dyl::cublasLtMatrixLayoutSetAttribute( Adesc, CUBLASLT_MATRIX_LAYOUT_BATCH_COUNT, &(batch), sizeof(batch))); - PADDLE_ENFORCE_CUDA_SUCCESS(dyl::cublasLtMatrixLayoutSetAttribute( + PADDLE_ENFORCE_GPU_SUCCESS(dyl::cublasLtMatrixLayoutSetAttribute( Adesc, CUBLASLT_MATRIX_LAYOUT_STRIDED_BATCH_OFFSET, &(stridea), sizeof(stridea))); - PADDLE_ENFORCE_CUDA_SUCCESS(dyl::cublasLtMatrixLayoutCreate( + PADDLE_ENFORCE_GPU_SUCCESS(dyl::cublasLtMatrixLayoutCreate( &Bdesc, cudadataTypeIO, BopTranspose == CUBLAS_OP_N ? k : m, BopTranspose == CUBLAS_OP_N ? m : k, BopTranspose == CUBLAS_OP_N ? k : m)); - PADDLE_ENFORCE_CUDA_SUCCESS(dyl::cublasLtMatrixLayoutSetAttribute( + PADDLE_ENFORCE_GPU_SUCCESS(dyl::cublasLtMatrixLayoutSetAttribute( Bdesc, CUBLASLT_MATRIX_LAYOUT_TYPE, &cudadataTypeIO, sizeof(cudadataTypeIO))); - PADDLE_ENFORCE_CUDA_SUCCESS(dyl::cublasLtMatrixLayoutSetAttribute( + PADDLE_ENFORCE_GPU_SUCCESS(dyl::cublasLtMatrixLayoutSetAttribute( Bdesc, CUBLASLT_MATRIX_LAYOUT_BATCH_COUNT, &(batch), sizeof(batch))); - PADDLE_ENFORCE_CUDA_SUCCESS(dyl::cublasLtMatrixLayoutSetAttribute( + PADDLE_ENFORCE_GPU_SUCCESS(dyl::cublasLtMatrixLayoutSetAttribute( Bdesc, CUBLASLT_MATRIX_LAYOUT_STRIDED_BATCH_OFFSET, &(strideb), sizeof(strideb))); - PADDLE_ENFORCE_CUDA_SUCCESS( + PADDLE_ENFORCE_GPU_SUCCESS( dyl::cublasLtMatrixLayoutCreate(&Cdesc, cudadataTypeIO, n, m, n)); - PADDLE_ENFORCE_CUDA_SUCCESS(dyl::cublasLtMatrixLayoutSetAttribute( + PADDLE_ENFORCE_GPU_SUCCESS(dyl::cublasLtMatrixLayoutSetAttribute( Cdesc, CUBLASLT_MATRIX_LAYOUT_TYPE, &cudadataTypeIO, sizeof(cudadataTypeIO))); - PADDLE_ENFORCE_CUDA_SUCCESS(dyl::cublasLtMatrixLayoutSetAttribute( + PADDLE_ENFORCE_GPU_SUCCESS(dyl::cublasLtMatrixLayoutSetAttribute( Cdesc, CUBLASLT_MATRIX_LAYOUT_BATCH_COUNT, &(batch), sizeof(batch))); - PADDLE_ENFORCE_CUDA_SUCCESS(dyl::cublasLtMatrixLayoutSetAttribute( + PADDLE_ENFORCE_GPU_SUCCESS(dyl::cublasLtMatrixLayoutSetAttribute( Cdesc, CUBLASLT_MATRIX_LAYOUT_STRIDED_BATCH_OFFSET, &(stridec), sizeof(stridec))); cublasLtPointerMode_t matmul_model = CUBLASLT_POINTER_MODE_DEVICE; #if CUBLAS_VER_MAJOR < 11 - PADDLE_ENFORCE_CUDA_SUCCESS( + PADDLE_ENFORCE_GPU_SUCCESS( dyl::cublasLtMatmulDescCreate(&matmulDesc, cudaComputeType)); #else - PADDLE_ENFORCE_CUDA_SUCCESS(dyl::cublasLtMatmulDescCreate( + PADDLE_ENFORCE_GPU_SUCCESS(dyl::cublasLtMatmulDescCreate( &matmulDesc, cudaComputeType, cudaDataTypeS)); #endif - PADDLE_ENFORCE_CUDA_SUCCESS(dyl::cublasLtMatmulDescSetAttribute( + PADDLE_ENFORCE_GPU_SUCCESS(dyl::cublasLtMatmulDescSetAttribute( matmulDesc, CUBLASLT_MATMUL_DESC_TRANSA, &AopTranspose, sizeof(AopTranspose))); - PADDLE_ENFORCE_CUDA_SUCCESS(dyl::cublasLtMatmulDescSetAttribute( + PADDLE_ENFORCE_GPU_SUCCESS(dyl::cublasLtMatmulDescSetAttribute( matmulDesc, CUBLASLT_MATMUL_DESC_TRANSB, &BopTranspose, sizeof(BopTranspose))); - PADDLE_ENFORCE_CUDA_SUCCESS(dyl::cublasLtMatmulDescSetAttribute( + PADDLE_ENFORCE_GPU_SUCCESS(dyl::cublasLtMatmulDescSetAttribute( matmulDesc, CUBLASLT_MATMUL_DESC_POINTER_MODE, &matmul_model, sizeof(matmul_model))); diff --git a/paddle/fluid/inference/tensorrt/plugin/split_op_plugin.cu b/paddle/fluid/inference/tensorrt/plugin/split_op_plugin.cu index 091680ff672d0e022462611baf74c08ca8e64a8e..ec4fcca6d74d0c5157e993a0be6b30640eb0b324 100644 --- a/paddle/fluid/inference/tensorrt/plugin/split_op_plugin.cu +++ b/paddle/fluid/inference/tensorrt/plugin/split_op_plugin.cu @@ -136,7 +136,7 @@ int SplitPlugin::enqueue(int batchSize, const void* const* inputs, float const* input_ptr = reinterpret_cast(inputs[0]); float* const* h_odatas = reinterpret_cast(outputs); float** output_ptrs = thrust::raw_pointer_cast(&d_output_ptrs_[0]); - PADDLE_ENFORCE_CUDA_SUCCESS(cudaMemcpyAsync( + PADDLE_ENFORCE_GPU_SUCCESS(cudaMemcpyAsync( output_ptrs, h_odatas, d_output_ptrs_.size() * sizeof(float*), cudaMemcpyHostToDevice, stream)); @@ -263,7 +263,7 @@ int SplitPluginDynamic::enqueue(const nvinfer1::PluginTensorDesc* input_desc, float* const* h_odatas = reinterpret_cast(outputs); float** output_ptrs = thrust::raw_pointer_cast(&d_output_ptrs[0]); - PADDLE_ENFORCE_CUDA_SUCCESS(cudaMemcpyAsync( + PADDLE_ENFORCE_GPU_SUCCESS(cudaMemcpyAsync( output_ptrs, h_odatas, d_output_ptrs.size() * sizeof(float*), cudaMemcpyHostToDevice, stream)); @@ -279,7 +279,7 @@ int SplitPluginDynamic::enqueue(const nvinfer1::PluginTensorDesc* input_desc, half* const* h_odatas = reinterpret_cast(outputs); half** output_ptrs = thrust::raw_pointer_cast(&d_output_ptrs[0]); - PADDLE_ENFORCE_CUDA_SUCCESS(cudaMemcpyAsync( + PADDLE_ENFORCE_GPU_SUCCESS(cudaMemcpyAsync( output_ptrs, h_odatas, d_output_ptrs.size() * sizeof(half*), cudaMemcpyHostToDevice, stream)); diff --git a/paddle/fluid/inference/tensorrt/trt_int8_calibrator.cc b/paddle/fluid/inference/tensorrt/trt_int8_calibrator.cc index 86666950bc36e631f4c057d0ced4c6787d177f7a..c330867607f8e4bbf05d77c77b0f61c09130dd10 100644 --- a/paddle/fluid/inference/tensorrt/trt_int8_calibrator.cc +++ b/paddle/fluid/inference/tensorrt/trt_int8_calibrator.cc @@ -85,7 +85,7 @@ bool TRTInt8Calibrator::setBatch( engine_name_, it.first)); } const auto& d = dataptr->second; - PADDLE_ENFORCE_CUDA_SUCCESS( + PADDLE_ENFORCE_GPU_SUCCESS( cudaMemcpy(d.first, it.second, d.second, cudaMemcpyDeviceToDevice)); } diff --git a/paddle/fluid/memory/allocation/allocator_facade.cc b/paddle/fluid/memory/allocation/allocator_facade.cc index b7b238bd0bf5341f2479548ecb375867fe431a3c..8314a1df931cac7596ea5c597f513c54208626bd 100644 --- a/paddle/fluid/memory/allocation/allocator_facade.cc +++ b/paddle/fluid/memory/allocation/allocator_facade.cc @@ -30,13 +30,10 @@ #include "paddle/fluid/memory/allocation/pinned_allocator.h" #include "paddle/fluid/memory/allocation/stream_safe_cuda_allocator.h" #include "paddle/fluid/memory/allocation/thread_local_allocator.h" -#include "paddle/fluid/platform/gpu_info.h" +#include "paddle/fluid/platform/device/gpu/gpu_info.h" #ifdef PADDLE_WITH_CUDA -#include -#include "paddle/fluid/platform/cuda_graph.h" -#else -#include +#include "paddle/fluid/platform/device/gpu/cuda/cuda_graph.h" #endif #if CUDA_VERSION >= 10020 @@ -145,8 +142,7 @@ class AllocatorFacadePrivate { "naive_best_fit strategy"; FLAGS_use_stream_safe_cuda_allocator = false; } - for (int dev_id = 0; dev_id < platform::GetCUDADeviceCount(); - ++dev_id) { + for (int dev_id = 0; dev_id < platform::GetGPUDeviceCount(); ++dev_id) { InitNaiveBestFitCUDAAllocator(platform::CUDAPlace(dev_id)); } InitNaiveBestFitCUDAPinnedAllocator(); @@ -172,13 +168,13 @@ class AllocatorFacadePrivate { if (FLAGS_use_stream_safe_cuda_allocator) { // TODO(Ruibiao): Support multi-stream allocator for other strategies default_stream_ = nullptr; - for (int dev_id = 0; dev_id < platform::GetCUDADeviceCount(); + for (int dev_id = 0; dev_id < platform::GetGPUDeviceCount(); ++dev_id) { InitStreamSafeCUDAAllocator(platform::CUDAPlace(dev_id), default_stream_); } } else { - for (int dev_id = 0; dev_id < platform::GetCUDADeviceCount(); + for (int dev_id = 0; dev_id < platform::GetGPUDeviceCount(); ++dev_id) { InitAutoGrowthCUDAAllocator(platform::CUDAPlace(dev_id), allow_free_idle_chunk_); @@ -208,8 +204,7 @@ class AllocatorFacadePrivate { FLAGS_use_stream_safe_cuda_allocator = false; } - for (int dev_id = 0; dev_id < platform::GetCUDADeviceCount(); - ++dev_id) { + for (int dev_id = 0; dev_id < platform::GetGPUDeviceCount(); ++dev_id) { InitThreadLocalCUDAAllocator(platform::CUDAPlace(dev_id)); } InitNaiveBestFitCUDAPinnedAllocator(); @@ -399,10 +394,10 @@ class AllocatorFacadePrivate { CUdevice device; int val; try { - PADDLE_ENFORCE_CUDA_SUCCESS( + PADDLE_ENFORCE_GPU_SUCCESS( paddle::platform::dynload::cuDeviceGet(&device, p.GetDeviceId())); - PADDLE_ENFORCE_CUDA_SUCCESS( + PADDLE_ENFORCE_GPU_SUCCESS( paddle::platform::dynload::cuDeviceGetAttribute( &val, CU_DEVICE_ATTRIBUTE_VIRTUAL_ADDRESS_MANAGEMENT_SUPPORTED, device)); @@ -476,10 +471,10 @@ class AllocatorFacadePrivate { CUdevice device; int val; try { - PADDLE_ENFORCE_CUDA_SUCCESS( + PADDLE_ENFORCE_GPU_SUCCESS( paddle::platform::dynload::cuDeviceGet(&device, p.GetDeviceId())); - PADDLE_ENFORCE_CUDA_SUCCESS( + PADDLE_ENFORCE_GPU_SUCCESS( paddle::platform::dynload::cuDeviceGetAttribute( &val, CU_DEVICE_ATTRIBUTE_VIRTUAL_ADDRESS_MANAGEMENT_SUPPORTED, device)); @@ -599,7 +594,7 @@ class AllocatorFacadePrivate { #if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP) system_allocators_[platform::CUDAPinnedPlace()] = std::make_shared(); - int device_count = platform::GetCUDADeviceCount(); + int device_count = platform::GetGPUDeviceCount(); for (int i = 0; i < device_count; ++i) { platform::CUDAPlace p(i); system_allocators_[p] = std::make_shared(p); @@ -612,7 +607,7 @@ class AllocatorFacadePrivate { std::vector places; places.emplace_back(platform::CPUPlace()); #if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP) - int device_count = platform::GetCUDADeviceCount(); + int device_count = platform::GetGPUDeviceCount(); for (int dev_id = 0; dev_id < device_count; ++dev_id) { places.emplace_back(platform::CUDAPlace(dev_id)); } diff --git a/paddle/fluid/memory/allocation/allocator_facade.h b/paddle/fluid/memory/allocation/allocator_facade.h index 4cd8b4e91e614e023dddad4d7f802bb61833538e..0d9f1043d9e86ab4fca5d06ed6768d90812668b2 100644 --- a/paddle/fluid/memory/allocation/allocator_facade.h +++ b/paddle/fluid/memory/allocation/allocator_facade.h @@ -19,7 +19,7 @@ #include "paddle/fluid/memory/allocation/npu_pinned_allocator.h" #endif #ifdef PADDLE_WITH_CUDA -#include "paddle/fluid/platform/gpu_info.h" +#include "paddle/fluid/platform/device/gpu/gpu_info.h" #endif #include "paddle/fluid/platform/place.h" diff --git a/paddle/fluid/memory/allocation/auto_growth_best_fit_allocator_facade_test.cc b/paddle/fluid/memory/allocation/auto_growth_best_fit_allocator_facade_test.cc index 193ef5a0cb922d53d5317c3d4ff2ec9f8eecea24..4469673b305bfea7b27b12972908d7f56c543f2f 100644 --- a/paddle/fluid/memory/allocation/auto_growth_best_fit_allocator_facade_test.cc +++ b/paddle/fluid/memory/allocation/auto_growth_best_fit_allocator_facade_test.cc @@ -19,7 +19,7 @@ #include // NOLINT #include "gflags/gflags.h" #include "paddle/fluid/memory/allocation/allocator_facade.h" -#include "paddle/fluid/platform/gpu_info.h" +#include "paddle/fluid/platform/device/gpu/gpu_info.h" #if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP) DECLARE_double(fraction_of_gpu_memory_to_use); diff --git a/paddle/fluid/memory/allocation/cuda_allocator.cc b/paddle/fluid/memory/allocation/cuda_allocator.cc index b1a45afa99d9a565bfc3b8b3e6192eca7d2ccd05..4242083f2e617af5e7dc8456746b3ca9738dc3f8 100644 --- a/paddle/fluid/memory/allocation/cuda_allocator.cc +++ b/paddle/fluid/memory/allocation/cuda_allocator.cc @@ -25,8 +25,8 @@ #include #include "paddle/fluid/platform/cuda_device_guard.h" +#include "paddle/fluid/platform/device/gpu/gpu_info.h" #include "paddle/fluid/platform/enforce.h" -#include "paddle/fluid/platform/gpu_info.h" namespace paddle { namespace memory { @@ -37,8 +37,8 @@ void CUDAAllocator::FreeImpl(Allocation* allocation) { BOOST_GET_CONST(platform::CUDAPlace, allocation->place()), place_, platform::errors::PermissionDenied( "GPU memory is freed in incorrect device. This may be a bug")); - platform::RecordedCudaFree(allocation->ptr(), allocation->size(), - place_.device); + platform::RecordedGpuFree(allocation->ptr(), allocation->size(), + place_.device); delete allocation; } @@ -46,13 +46,13 @@ Allocation* CUDAAllocator::AllocateImpl(size_t size) { std::call_once(once_flag_, [this] { platform::SetDeviceId(place_.device); }); void* ptr; - auto result = platform::RecordedCudaMalloc(&ptr, size, place_.device); + auto result = platform::RecordedGpuMalloc(&ptr, size, place_.device); if (LIKELY(result == gpuSuccess)) { return new Allocation(ptr, size, platform::Place(place_)); } size_t avail, total, actual_avail, actual_total; - bool is_limited = platform::RecordedCudaMemGetInfo( + bool is_limited = platform::RecordedGpuMemGetInfo( &avail, &total, &actual_avail, &actual_total, place_.device); size_t allocated = total - avail; diff --git a/paddle/fluid/memory/allocation/cuda_device_context_allocator.h b/paddle/fluid/memory/allocation/cuda_device_context_allocator.h index 3d6f1d7bcbea64be7ca18f6c24f86a2c39ad83a2..9e04fd3f0619e3c7a06fc4453ee3f2f0b28a786e 100644 --- a/paddle/fluid/memory/allocation/cuda_device_context_allocator.h +++ b/paddle/fluid/memory/allocation/cuda_device_context_allocator.h @@ -81,10 +81,10 @@ class CUDADeviceContextAllocator : public Allocator { : place_(place), default_stream_(default_stream) { platform::CUDADeviceGuard guard(place_.device); #ifdef PADDLE_WITH_HIP - PADDLE_ENFORCE_CUDA_SUCCESS( + PADDLE_ENFORCE_GPU_SUCCESS( hipEventCreateWithFlags(&event_, hipEventDisableTiming)); #else - PADDLE_ENFORCE_CUDA_SUCCESS( + PADDLE_ENFORCE_GPU_SUCCESS( cudaEventCreate(&event_, cudaEventDisableTiming)); #endif } @@ -93,9 +93,9 @@ class CUDADeviceContextAllocator : public Allocator { if (event_) { platform::CUDADeviceGuard guard(place_.device); #ifdef PADDLE_WITH_HIP - PADDLE_ENFORCE_CUDA_SUCCESS(hipEventDestroy(event_)); + PADDLE_ENFORCE_GPU_SUCCESS(hipEventDestroy(event_)); #else - PADDLE_ENFORCE_CUDA_SUCCESS(cudaEventDestroy(event_)); + PADDLE_ENFORCE_GPU_SUCCESS(cudaEventDestroy(event_)); #endif } } @@ -111,12 +111,11 @@ class CUDADeviceContextAllocator : public Allocator { new CUDADeviceContextAllocation(memory::Alloc(place_, size)); // Wait for the event on stream #ifdef PADDLE_WITH_HIP - PADDLE_ENFORCE_CUDA_SUCCESS(hipEventRecord(event_, default_stream_)); - PADDLE_ENFORCE_CUDA_SUCCESS(hipStreamWaitEvent(default_stream_, event_, 0)); + PADDLE_ENFORCE_GPU_SUCCESS(hipEventRecord(event_, default_stream_)); + PADDLE_ENFORCE_GPU_SUCCESS(hipStreamWaitEvent(default_stream_, event_, 0)); #else - PADDLE_ENFORCE_CUDA_SUCCESS(cudaEventRecord(event_, default_stream_)); - PADDLE_ENFORCE_CUDA_SUCCESS( - cudaStreamWaitEvent(default_stream_, event_, 0)); + PADDLE_ENFORCE_GPU_SUCCESS(cudaEventRecord(event_, default_stream_)); + PADDLE_ENFORCE_GPU_SUCCESS(cudaStreamWaitEvent(default_stream_, event_, 0)); #endif return allocation; } diff --git a/paddle/fluid/memory/allocation/cuda_virtual_mem_allocator.cc b/paddle/fluid/memory/allocation/cuda_virtual_mem_allocator.cc index e3780f2f11359c3d91f69e8002696c8047c03a34..f4baca8288f03ca9073dd628ce772f383b104331 100644 --- a/paddle/fluid/memory/allocation/cuda_virtual_mem_allocator.cc +++ b/paddle/fluid/memory/allocation/cuda_virtual_mem_allocator.cc @@ -23,8 +23,8 @@ #ifdef PADDLE_WITH_CUDA #include "paddle/fluid/platform/cuda_device_guard.h" +#include "paddle/fluid/platform/device/gpu/gpu_info.h" #include "paddle/fluid/platform/dynload/cuda_driver.h" -#include "paddle/fluid/platform/gpu_info.h" #endif #if CUDA_VERSION >= 10020 @@ -49,10 +49,10 @@ CUDAVirtualMemAllocator::CUDAVirtualMemAllocator( // Prepare the access descriptor array indicating where and how the backings // should be visible. - for (int dev_id = 0; dev_id < platform::GetCUDADeviceCount(); ++dev_id) { + for (int dev_id = 0; dev_id < platform::GetGPUDeviceCount(); ++dev_id) { if (place.device != dev_id) { int capable = 0; - PADDLE_ENFORCE_CUDA_SUCCESS( + PADDLE_ENFORCE_GPU_SUCCESS( cudaDeviceCanAccessPeer(&capable, place.device, dev_id)); if (!capable) { VLOG(1) << "device(" << place.device @@ -73,10 +73,10 @@ CUDAVirtualMemAllocator::CUDAVirtualMemAllocator( // Get the minimum granularity needed for all devices // (the max of the minimum granularity of each participating device) granularity_ = 0; - for (int dev_id = 0; dev_id < platform::GetCUDADeviceCount(); ++dev_id) { + for (int dev_id = 0; dev_id < platform::GetGPUDeviceCount(); ++dev_id) { size_t granularity; prop.location.id = dev_id; - PADDLE_ENFORCE_CUDA_SUCCESS( + PADDLE_ENFORCE_GPU_SUCCESS( paddle::platform::dynload::cuMemGetAllocationGranularity( &granularity, &prop, CU_MEM_ALLOC_GRANULARITY_MINIMUM)); granularity_ = std::max(granularity, granularity_); @@ -84,7 +84,7 @@ CUDAVirtualMemAllocator::CUDAVirtualMemAllocator( size_t actual_avail, actual_total; paddle::platform::CUDADeviceGuard guard(place.device); - PADDLE_ENFORCE_CUDA_SUCCESS(cudaMemGetInfo(&actual_avail, &actual_total)); + PADDLE_ENFORCE_GPU_SUCCESS(cudaMemGetInfo(&actual_avail, &actual_total)); virtual_mem_size_ = AlignedSize(actual_total, granularity_); @@ -93,7 +93,7 @@ CUDAVirtualMemAllocator::CUDAVirtualMemAllocator( // GPU, // so the virtual address space size we reserve is equal to the GPU video // memory size - PADDLE_ENFORCE_CUDA_SUCCESS(paddle::platform::dynload::cuMemAddressReserve( + PADDLE_ENFORCE_GPU_SUCCESS(paddle::platform::dynload::cuMemAddressReserve( &virtual_mem_base_, virtual_mem_size_, 0, 0, 0)); virtual_mem_alloced_offset_ = 0; @@ -123,11 +123,11 @@ void CUDAVirtualMemAllocator::FreeImpl(Allocation* allocation) { auto result = paddle::platform::dynload::cuMemUnmap(iter->first, iter->second.second); if (result != CUDA_ERROR_DEINITIALIZED) { - PADDLE_ENFORCE_CUDA_SUCCESS(result); + PADDLE_ENFORCE_GPU_SUCCESS(result); } if (result != CUDA_ERROR_DEINITIALIZED) { - PADDLE_ENFORCE_CUDA_SUCCESS(platform::RecordedCuMemRelease( + PADDLE_ENFORCE_GPU_SUCCESS(platform::RecordedGpuMemRelease( iter->second.first, iter->second.second, place_.device)); } @@ -166,12 +166,12 @@ Allocation* CUDAVirtualMemAllocator::AllocateImpl(size_t size) { // Create physical memory backing allocation. auto result = - platform::RecordedCuMemCreate(&handle, size, &prop_, 0, place_.device); + platform::RecordedGpuMemCreate(&handle, size, &prop_, 0, place_.device); if (result != CUDA_SUCCESS) { if (result == CUDA_ERROR_OUT_OF_MEMORY) { size_t actual_avail, actual_total; - PADDLE_ENFORCE_CUDA_SUCCESS(cudaMemGetInfo(&actual_avail, &actual_total)); + PADDLE_ENFORCE_GPU_SUCCESS(cudaMemGetInfo(&actual_avail, &actual_total)); size_t actual_allocated = actual_total - actual_avail; PADDLE_THROW_BAD_ALLOC(platform::errors::ResourceExhausted( @@ -186,7 +186,7 @@ Allocation* CUDAVirtualMemAllocator::AllocateImpl(size_t size) { string::HumanReadableSize(actual_allocated), string::HumanReadableSize(actual_avail), place_.device)); } else { - PADDLE_ENFORCE_CUDA_SUCCESS(result); + PADDLE_ENFORCE_GPU_SUCCESS(result); } return nullptr; } @@ -197,8 +197,8 @@ Allocation* CUDAVirtualMemAllocator::AllocateImpl(size_t size) { result = paddle::platform::dynload::cuMemMap(ptr, size, 0, handle, 0); if (result != CUDA_SUCCESS) { - platform::RecordedCuMemRelease(handle, size, place_.device); - PADDLE_ENFORCE_CUDA_SUCCESS(result); + platform::RecordedGpuMemRelease(handle, size, place_.device); + PADDLE_ENFORCE_GPU_SUCCESS(result); return nullptr; } @@ -208,8 +208,8 @@ Allocation* CUDAVirtualMemAllocator::AllocateImpl(size_t size) { if (result != CUDA_SUCCESS) { paddle::platform::dynload::cuMemUnmap(ptr, size); - platform::RecordedCuMemRelease(handle, size, place_.device); - PADDLE_ENFORCE_CUDA_SUCCESS(result); + platform::RecordedGpuMemRelease(handle, size, place_.device); + PADDLE_ENFORCE_GPU_SUCCESS(result); return nullptr; } diff --git a/paddle/fluid/memory/allocation/naive_best_fit_allocator.cc b/paddle/fluid/memory/allocation/naive_best_fit_allocator.cc index 3bdd856759dc1180f0bf8434f6bb85f0206c756f..6de32335c62b220592d8aebc2e9f6051e741fa4c 100644 --- a/paddle/fluid/memory/allocation/naive_best_fit_allocator.cc +++ b/paddle/fluid/memory/allocation/naive_best_fit_allocator.cc @@ -20,8 +20,8 @@ #include "glog/logging.h" #include "paddle/fluid/memory/detail/buddy_allocator.h" #include "paddle/fluid/memory/detail/system_allocator.h" +#include "paddle/fluid/platform/device/gpu/gpu_info.h" #include "paddle/fluid/platform/enforce.h" -#include "paddle/fluid/platform/gpu_info.h" #include "paddle/fluid/platform/profiler.h" #include "paddle/fluid/string/printf.h" diff --git a/paddle/fluid/memory/allocation/pinned_allocator.cc b/paddle/fluid/memory/allocation/pinned_allocator.cc index 5aa0514432844c8707949900a1448086a4d7e3d8..c56a7235c109ca0ab3210a0fa1e9e21fce7355c3 100644 --- a/paddle/fluid/memory/allocation/pinned_allocator.cc +++ b/paddle/fluid/memory/allocation/pinned_allocator.cc @@ -20,18 +20,18 @@ namespace allocation { bool CPUPinnedAllocator::IsAllocThreadSafe() const { return true; } void CPUPinnedAllocator::FreeImpl(Allocation *allocation) { #ifdef PADDLE_WITH_HIP - PADDLE_ENFORCE_CUDA_SUCCESS(hipHostFree(allocation->ptr())); + PADDLE_ENFORCE_GPU_SUCCESS(hipHostFree(allocation->ptr())); #else - PADDLE_ENFORCE_CUDA_SUCCESS(cudaFreeHost(allocation->ptr())); + PADDLE_ENFORCE_GPU_SUCCESS(cudaFreeHost(allocation->ptr())); #endif delete allocation; } Allocation *CPUPinnedAllocator::AllocateImpl(size_t size) { void *ptr; #ifdef PADDLE_WITH_HIP - PADDLE_ENFORCE_CUDA_SUCCESS(hipHostMalloc(&ptr, size, hipHostMallocPortable)); + PADDLE_ENFORCE_GPU_SUCCESS(hipHostMalloc(&ptr, size, hipHostMallocPortable)); #else - PADDLE_ENFORCE_CUDA_SUCCESS(cudaHostAlloc(&ptr, size, cudaHostAllocPortable)); + PADDLE_ENFORCE_GPU_SUCCESS(cudaHostAlloc(&ptr, size, cudaHostAllocPortable)); #endif return new Allocation(ptr, size, platform::CUDAPinnedPlace()); } diff --git a/paddle/fluid/memory/allocation/stream_safe_cuda_allocator.cc b/paddle/fluid/memory/allocation/stream_safe_cuda_allocator.cc index b2e13af6ef956e824355c70b556861865b466132..d11240bc84487067a57d4a76baba1d597b3a6ca3 100644 --- a/paddle/fluid/memory/allocation/stream_safe_cuda_allocator.cc +++ b/paddle/fluid/memory/allocation/stream_safe_cuda_allocator.cc @@ -112,13 +112,13 @@ void StreamSafeCUDAAllocator::CreateEventForAllRecordedStream( for (gpuStream_t stream : *recorded_streams) { gpuEvent_t event; #ifdef PADDLE_WITH_CUDA - PADDLE_ENFORCE_CUDA_SUCCESS( + PADDLE_ENFORCE_GPU_SUCCESS( cudaEventCreateWithFlags(&event, cudaEventDisableTiming)); - PADDLE_ENFORCE_CUDA_SUCCESS(cudaEventRecord(event, stream)); + PADDLE_ENFORCE_GPU_SUCCESS(cudaEventRecord(event, stream)); #else - PADDLE_ENFORCE_CUDA_SUCCESS( + PADDLE_ENFORCE_GPU_SUCCESS( hipEventCreateWithFlags(&event, hipEventDisableTiming)); - PADDLE_ENFORCE_CUDA_SUCCESS(hipEventRecord(event, stream)); + PADDLE_ENFORCE_GPU_SUCCESS(hipEventRecord(event, stream)); #endif outstanding_events->emplace_back(event); VLOG(9) << "Record event " << event << " in stream " << stream; @@ -162,8 +162,8 @@ void StreamSafeCUDAAllocator::ProcessEventsAndFree() { outstanding_events.erase(outstanding_events.begin(), deque_it); break; } - PADDLE_ENFORCE_CUDA_SUCCESS(err); - PADDLE_ENFORCE_CUDA_SUCCESS(cudaEventDestroy(*deque_it)); + PADDLE_ENFORCE_GPU_SUCCESS(err); + PADDLE_ENFORCE_GPU_SUCCESS(cudaEventDestroy(*deque_it)); #else gpuError_t err = hipEventQuery(*deque_it); if (err == hipErrorNotReady) { @@ -173,8 +173,8 @@ void StreamSafeCUDAAllocator::ProcessEventsAndFree() { outstanding_events.erase(outstanding_events.begin(), deque_it); break; } - PADDLE_ENFORCE_CUDA_SUCCESS(err); - PADDLE_ENFORCE_CUDA_SUCCESS(hipEventDestroy(*deque_it)); + PADDLE_ENFORCE_GPU_SUCCESS(err); + PADDLE_ENFORCE_GPU_SUCCESS(hipEventDestroy(*deque_it)); #endif ++deque_it; } diff --git a/paddle/fluid/memory/allocation/thread_local_allocator.h b/paddle/fluid/memory/allocation/thread_local_allocator.h index 654fb3fe7bc044b51c046d50b9b1fbc234b84b7e..c55f579981b00501e830257d7097a06f7c623fb7 100644 --- a/paddle/fluid/memory/allocation/thread_local_allocator.h +++ b/paddle/fluid/memory/allocation/thread_local_allocator.h @@ -20,7 +20,7 @@ #include "paddle/fluid/memory/allocation/allocator.h" #include "paddle/fluid/memory/detail/buddy_allocator.h" #include "paddle/fluid/memory/detail/system_allocator.h" -#include "paddle/fluid/platform/gpu_info.h" +#include "paddle/fluid/platform/device/gpu/gpu_info.h" namespace paddle { namespace memory { diff --git a/paddle/fluid/memory/detail/buddy_allocator.h b/paddle/fluid/memory/detail/buddy_allocator.h index 88dbec2bcfd0c1ccc82f3ee5c864e4d65ebfac3b..b7be895b358308816d9facc748ae74af7f8930ad 100644 --- a/paddle/fluid/memory/detail/buddy_allocator.h +++ b/paddle/fluid/memory/detail/buddy_allocator.h @@ -25,8 +25,8 @@ limitations under the License. */ #include "paddle/fluid/memory/detail/memory_block.h" #include "paddle/fluid/memory/detail/system_allocator.h" #include "paddle/fluid/platform/cpu_info.h" +#include "paddle/fluid/platform/device/gpu/gpu_info.h" #include "paddle/fluid/platform/device/npu/npu_info.h" -#include "paddle/fluid/platform/gpu_info.h" namespace paddle { namespace memory { diff --git a/paddle/fluid/memory/detail/buddy_allocator_test.cc b/paddle/fluid/memory/detail/buddy_allocator_test.cc index 8b3d776cef21024aa9b2a2c547a3af5ac998e4cb..cd152843553a9f7abda9c4a81e7231a919be9c79 100644 --- a/paddle/fluid/memory/detail/buddy_allocator_test.cc +++ b/paddle/fluid/memory/detail/buddy_allocator_test.cc @@ -24,8 +24,8 @@ limitations under the License. */ #include "gflags/gflags.h" #include "gtest/gtest.h" +#include "paddle/fluid/platform/device/gpu/gpu_info.h" #include "paddle/fluid/platform/device/npu/npu_info.h" -#include "paddle/fluid/platform/gpu_info.h" #if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP) || \ defined(PADDLE_WITH_ASCEND_CL) diff --git a/paddle/fluid/memory/detail/system_allocator.cc b/paddle/fluid/memory/detail/system_allocator.cc index 75b93088e55028be833ad4fd99df3dff0ee3cb23..b300f936f7a6831a2c1a4b158578f4426f2abae6 100644 --- a/paddle/fluid/memory/detail/system_allocator.cc +++ b/paddle/fluid/memory/detail/system_allocator.cc @@ -27,9 +27,9 @@ limitations under the License. */ #include "gflags/gflags.h" #include "paddle/fluid/memory/allocation/allocator.h" #include "paddle/fluid/platform/cpu_info.h" +#include "paddle/fluid/platform/device/gpu/gpu_info.h" #include "paddle/fluid/platform/device/npu/npu_info.h" #include "paddle/fluid/platform/enforce.h" -#include "paddle/fluid/platform/gpu_info.h" #if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP) #include "paddle/fluid/platform/cuda_device_guard.h" @@ -115,7 +115,7 @@ void* GPUAllocator::Alloc(size_t* index, size_t size) { if (size <= 0) return nullptr; void* p; - auto result = platform::RecordedCudaMalloc(&p, size, gpu_id_); + auto result = platform::RecordedGpuMalloc(&p, size, gpu_id_); if (result == gpuSuccess) { *index = 0; @@ -123,7 +123,7 @@ void* GPUAllocator::Alloc(size_t* index, size_t size) { return p; } else { size_t avail, total, actual_avail, actual_total; - bool is_limited = platform::RecordedCudaMemGetInfo( + bool is_limited = platform::RecordedGpuMemGetInfo( &avail, &total, &actual_avail, &actual_total, gpu_id_); size_t allocated = total - avail; @@ -166,7 +166,7 @@ void GPUAllocator::Free(void* p, size_t size, size_t index) { size, gpu_alloc_size_)); gpu_alloc_size_ -= size; - platform::RecordedCudaFree(p, size, gpu_id_); + platform::RecordedGpuFree(p, size, gpu_id_); } bool GPUAllocator::UseGpu() const { return true; } diff --git a/paddle/fluid/memory/detail/system_allocator_test.cc b/paddle/fluid/memory/detail/system_allocator_test.cc index ead188341dac46bd3eec490015ff934dc8a26af5..bb7f47f9d30ec431cca13c374c28541aabc08de2 100644 --- a/paddle/fluid/memory/detail/system_allocator_test.cc +++ b/paddle/fluid/memory/detail/system_allocator_test.cc @@ -19,6 +19,9 @@ limitations under the License. */ #include "gflags/gflags.h" #include "gtest/gtest.h" #include "paddle/fluid/memory/allocation/allocator.h" +#if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP) +#include "paddle/fluid/platform/device/gpu/gpu_info.h" +#endif DECLARE_bool(use_pinned_memory); @@ -77,11 +80,7 @@ TEST(GPUAllocator, AllocFailure) { allocator.Alloc(&index, alloc_size); ASSERT_TRUE(false); } catch (paddle::memory::allocation::BadAlloc&) { -#ifdef PADDLE_WITH_HIP - PADDLE_ENFORCE_CUDA_SUCCESS(hipGetLastError()); -#else - PADDLE_ENFORCE_CUDA_SUCCESS(cudaGetLastError()); -#endif + PADDLE_ENFORCE_GPU_SUCCESS(paddle::platform::GpuGetLastError()); } } #endif diff --git a/paddle/fluid/memory/memcpy.h b/paddle/fluid/memory/memcpy.h index c630437224cd093438df5d8d8a58a5c8f6ab2ad2..7d2d2526ab12453b4dcf0dc8b52366b43a3c832e 100644 --- a/paddle/fluid/memory/memcpy.h +++ b/paddle/fluid/memory/memcpy.h @@ -14,7 +14,7 @@ limitations under the License. */ #pragma once -#include "paddle/fluid/platform/gpu_info.h" +#include "paddle/fluid/platform/device/gpu/gpu_info.h" #include "paddle/fluid/platform/place.h" namespace paddle { diff --git a/paddle/fluid/memory/pinned_memory_test.cu b/paddle/fluid/memory/pinned_memory_test.cu index 76a880755e21b87466d387b54a844d4faadccfb7..837c964e2ad32cb84405365cdd6dc595cf775de6 100644 --- a/paddle/fluid/memory/pinned_memory_test.cu +++ b/paddle/fluid/memory/pinned_memory_test.cu @@ -19,7 +19,7 @@ limitations under the License. */ #include "paddle/fluid/memory/memory.h" #include "paddle/fluid/platform/cpu_info.h" -#include "paddle/fluid/platform/gpu_info.h" +#include "paddle/fluid/platform/device/gpu/gpu_info.h" #include "paddle/fluid/platform/place.h" // This unit test is an example comparing the performance between using pinned diff --git a/paddle/fluid/memory/stream_safe_cuda_alloc_test.cu b/paddle/fluid/memory/stream_safe_cuda_alloc_test.cu index 6a5818fd9603be5b7e5b8b92b48043cbb3f54754..a0293e8410c5863d466c834ac1daccb7766e33e6 100644 --- a/paddle/fluid/memory/stream_safe_cuda_alloc_test.cu +++ b/paddle/fluid/memory/stream_safe_cuda_alloc_test.cu @@ -26,7 +26,7 @@ #include "gtest/gtest.h" #include "paddle/fluid/memory/malloc.h" -#include "paddle/fluid/platform/gpu_info.h" +#include "paddle/fluid/platform/device/gpu/gpu_info.h" namespace paddle { namespace memory { @@ -53,9 +53,9 @@ class StreamSafeCUDAAllocTest : public ::testing::Test { for (size_t i = 1; i < stream_num_; ++i) { gpuStream_t stream; #ifdef PADDLE_WITH_CUDA - PADDLE_ENFORCE_CUDA_SUCCESS(cudaStreamCreate(&stream)); + PADDLE_ENFORCE_GPU_SUCCESS(cudaStreamCreate(&stream)); #else - PADDLE_ENFORCE_CUDA_SUCCESS(hipStreamCreate(&stream)); + PADDLE_ENFORCE_GPU_SUCCESS(hipStreamCreate(&stream)); #endif streams_.emplace_back(stream); } @@ -65,10 +65,10 @@ class StreamSafeCUDAAllocTest : public ::testing::Test { std::shared_ptr allocation = AllocShared(place_, allocation_size, streams_[i]); #ifdef PADDLE_WITH_CUDA - PADDLE_ENFORCE_CUDA_SUCCESS( + PADDLE_ENFORCE_GPU_SUCCESS( cudaMemset(allocation->ptr(), 0, allocation->size())); #else - PADDLE_ENFORCE_CUDA_SUCCESS( + PADDLE_ENFORCE_GPU_SUCCESS( hipMemset(allocation->ptr(), 0, allocation->size())); #endif allocations_.emplace_back(allocation); @@ -111,13 +111,13 @@ class StreamSafeCUDAAllocTest : public ::testing::Test { // tricky code, the allocations are still accessible even though // allocations_.clear() has been called #ifdef PADDLE_WITH_CUDA - PADDLE_ENFORCE_CUDA_SUCCESS( + PADDLE_ENFORCE_GPU_SUCCESS( cudaMemcpy(host_x.get(), allocations_[i]->ptr(), data_num_ * sizeof(int), cudaMemcpyDeviceToHost)); #else - PADDLE_ENFORCE_CUDA_SUCCESS( - hipMemcpy(host_x.get(), allocations_[i]->ptr(), - data_num_ * sizeof(int), hipMemcpyDeviceToHost)); + PADDLE_ENFORCE_GPU_SUCCESS(hipMemcpy(host_x.get(), allocations_[i]->ptr(), + data_num_ * sizeof(int), + hipMemcpyDeviceToHost)); #endif for (int j = 0; j < data_num_; ++j) { EXPECT_TRUE(host_x[j] == (j % thread_num) * stream_num_); @@ -127,9 +127,9 @@ class StreamSafeCUDAAllocTest : public ::testing::Test { void TearDown() override { #ifdef PADDLE_WITH_CUDA - PADDLE_ENFORCE_CUDA_SUCCESS(cudaDeviceSynchronize()); + PADDLE_ENFORCE_GPU_SUCCESS(cudaDeviceSynchronize()); #else - PADDLE_ENFORCE_CUDA_SUCCESS(hipDeviceSynchronize()); + PADDLE_ENFORCE_GPU_SUCCESS(hipDeviceSynchronize()); #endif for (gpuStream_t stream : streams_) { Release(place_, stream); @@ -137,14 +137,14 @@ class StreamSafeCUDAAllocTest : public ::testing::Test { for (size_t i = 1; i < stream_num_; ++i) { #ifdef PADDLE_WITH_CUDA - PADDLE_ENFORCE_CUDA_SUCCESS(cudaStreamDestroy(streams_[i])); + PADDLE_ENFORCE_GPU_SUCCESS(cudaStreamDestroy(streams_[i])); #else - PADDLE_ENFORCE_CUDA_SUCCESS(hipStreamDestroy(streams_[i])); + PADDLE_ENFORCE_GPU_SUCCESS(hipStreamDestroy(streams_[i])); #endif } uint64_t cuda_malloc_size = - platform::RecordedCudaMallocSize(place_.GetDeviceId()); + platform::RecordedGpuMallocSize(place_.GetDeviceId()); ASSERT_EQ(cuda_malloc_size, 0) << "Found " << cuda_malloc_size << " bytes memory that not released yet," << " there may be a memory leak problem"; @@ -192,11 +192,11 @@ TEST(StreamSafeCUDAAllocRetryTest, RetryTest) { platform::CUDAPlace place = platform::CUDAPlace(); gpuStream_t stream1, stream2; #ifdef PADDLE_WITH_CUDA - PADDLE_ENFORCE_CUDA_SUCCESS(cudaStreamCreate(&stream1)); - PADDLE_ENFORCE_CUDA_SUCCESS(cudaStreamCreate(&stream2)); + PADDLE_ENFORCE_GPU_SUCCESS(cudaStreamCreate(&stream1)); + PADDLE_ENFORCE_GPU_SUCCESS(cudaStreamCreate(&stream2)); #else - PADDLE_ENFORCE_CUDA_SUCCESS(hipStreamCreate(&stream1)); - PADDLE_ENFORCE_CUDA_SUCCESS(hipStreamCreate(&stream2)); + PADDLE_ENFORCE_GPU_SUCCESS(hipStreamCreate(&stream1)); + PADDLE_ENFORCE_GPU_SUCCESS(hipStreamCreate(&stream2)); #endif size_t available_size = platform::GpuAvailableMemToAlloc(); // alloc_size < available_size < 2 * alloc_size @@ -216,9 +216,9 @@ TEST(StreamSafeCUDAAllocRetryTest, RetryTest) { allocation2.reset(); #ifdef PADDLE_WITH_CUDA - PADDLE_ENFORCE_CUDA_SUCCESS(cudaDeviceSynchronize()); + PADDLE_ENFORCE_GPU_SUCCESS(cudaDeviceSynchronize()); #else - PADDLE_ENFORCE_CUDA_SUCCESS(hipDeviceSynchronize()); + PADDLE_ENFORCE_GPU_SUCCESS(hipDeviceSynchronize()); #endif Release(place, stream1); diff --git a/paddle/fluid/operators/activation_cudnn.cu.cc b/paddle/fluid/operators/activation_cudnn.cu.cc index 38499783eb492a129fbe953d81eac9fc706d5870..2ad92e36272b30b689476d60a535e282fedddd94 100644 --- a/paddle/fluid/operators/activation_cudnn.cu.cc +++ b/paddle/fluid/operators/activation_cudnn.cu.cc @@ -14,11 +14,7 @@ #include "paddle/fluid/framework/op_registry.h" #include "paddle/fluid/operators/activation_op.h" -#ifdef PADDLE_WITH_HIP -#include "paddle/fluid/platform/miopen_desc.h" -#else -#include "paddle/fluid/platform/cudnn_desc.h" -#endif +#include "paddle/fluid/platform/device/gpu/gpu_dnn.h" namespace paddle { namespace operators { diff --git a/paddle/fluid/operators/activation_cudnn_op.cu.cc b/paddle/fluid/operators/activation_cudnn_op.cu.cc index b197d3511f96b98b7cdbaddaa6803e2a81e098da..2776fe9c131329b239fcef37024296fed62662ff 100644 --- a/paddle/fluid/operators/activation_cudnn_op.cu.cc +++ b/paddle/fluid/operators/activation_cudnn_op.cu.cc @@ -14,11 +14,7 @@ #include "paddle/fluid/framework/op_registry.h" #include "paddle/fluid/operators/activation_op.h" -#ifdef PADDLE_WITH_HIP -#include "paddle/fluid/platform/miopen_desc.h" -#else -#include "paddle/fluid/platform/cudnn_desc.h" -#endif +#include "paddle/fluid/platform/device/gpu/gpu_dnn.h" namespace paddle { namespace platform { @@ -64,13 +60,13 @@ struct CudnnActivationFunctor { x_desc.set(x); out_desc.set(GET_DATA_SAFELY(out, "Output", "Out", "CudnnActivation")); #ifdef PADDLE_WITH_HIP - PADDLE_ENFORCE_CUDA_SUCCESS(platform::dynload::miopenActivationForward( + PADDLE_ENFORCE_GPU_SUCCESS(platform::dynload::miopenActivationForward( ctx_.cudnn_handle(), act_desc.desc(), platform::CudnnDataType::kOne(), x_desc.desc(), x.data(), platform::CudnnDataType::kZero(), out_desc.desc(), out->mutable_data(ctx_.GetPlace()))); #else - PADDLE_ENFORCE_CUDA_SUCCESS(platform::dynload::cudnnActivationForward( + PADDLE_ENFORCE_GPU_SUCCESS(platform::dynload::cudnnActivationForward( ctx_.cudnn_handle(), act_desc.desc(), platform::CudnnDataType::kOne(), x_desc.desc(), x.data(), platform::CudnnDataType::kZero(), out_desc.desc(), @@ -108,14 +104,14 @@ struct CudnnActivationGradFunctor { dout_desc.set(dout); dx_desc.set(GET_DATA_SAFELY(dx, "Output", "X@GRAD", "CudnnActivationGrad")); #ifdef PADDLE_WITH_HIP - PADDLE_ENFORCE_CUDA_SUCCESS(platform::dynload::miopenActivationBackward( + PADDLE_ENFORCE_GPU_SUCCESS(platform::dynload::miopenActivationBackward( ctx_.cudnn_handle(), act_desc.desc(), platform::CudnnDataType::kOne(), out_desc.desc(), out.data(), dout_desc.desc(), dout.data(), x_desc.desc(), x.data(), platform::CudnnDataType::kZero(), dx_desc.desc(), dx->mutable_data(ctx_.GetPlace()))); #else - PADDLE_ENFORCE_CUDA_SUCCESS(platform::dynload::cudnnActivationBackward( + PADDLE_ENFORCE_GPU_SUCCESS(platform::dynload::cudnnActivationBackward( ctx_.cudnn_handle(), act_desc.desc(), platform::CudnnDataType::kOne(), out_desc.desc(), out.data(), dout_desc.desc(), dout.data(), x_desc.desc(), x.data(), diff --git a/paddle/fluid/operators/activation_op.cu b/paddle/fluid/operators/activation_op.cu index 0294bfd5b05d5a34ef49b723d7a0ad9e016732d6..07cf516c476e8940f62beb6ebd7d3dd3991e2b79 100644 --- a/paddle/fluid/operators/activation_op.cu +++ b/paddle/fluid/operators/activation_op.cu @@ -14,7 +14,7 @@ limitations under the License. */ #include "paddle/fluid/operators/elementwise/elementwise_op_impl.cu.h" #include "paddle/fluid/operators/math/math_cuda_utils.h" #include "paddle/fluid/platform/bfloat16.h" -#include "paddle/fluid/platform/cuda_device_function.h" +#include "paddle/fluid/platform/device/gpu/gpu_device_function.h" namespace paddle { namespace operators { diff --git a/paddle/fluid/operators/affine_channel_op.cu b/paddle/fluid/operators/affine_channel_op.cu index 5fa1e18553bd53231691f3fda9de251d646f5b29..cf4041f721af2579f34c5978e27612478e300307 100644 --- a/paddle/fluid/operators/affine_channel_op.cu +++ b/paddle/fluid/operators/affine_channel_op.cu @@ -23,7 +23,7 @@ namespace cub = hipcub; #include "paddle/fluid/framework/data_layout.h" #include "paddle/fluid/framework/op_registry.h" -#include "paddle/fluid/platform/cuda_primitives.h" +#include "paddle/fluid/platform/device/gpu/gpu_primitives.h" namespace paddle { namespace operators { diff --git a/paddle/fluid/operators/affine_grid_cudnn_op.cu.cc b/paddle/fluid/operators/affine_grid_cudnn_op.cu.cc index b8ce52387b959229bf6b1bdc499cadf67cacaed9..31801b14564d3e54d5aa81da36c0b5c011853322 100644 --- a/paddle/fluid/operators/affine_grid_cudnn_op.cu.cc +++ b/paddle/fluid/operators/affine_grid_cudnn_op.cu.cc @@ -16,7 +16,7 @@ limitations under the License. */ // HIP not support cudnnSpatialTfGridGeneratorForward #include "paddle/fluid/framework/op_registry.h" -#include "paddle/fluid/platform/cudnn_helper.h" +#include "paddle/fluid/platform/device/gpu/gpu_dnn.h" namespace paddle { namespace operators { @@ -108,7 +108,7 @@ class CUDNNAffineGridGradOpKernel : public framework::OpKernel { const T* output_grad_data = output_grad->data(); T* theta_grad_data = theta_grad->mutable_data(ctx.GetPlace()); - PADDLE_ENFORCE_CUDA_SUCCESS( + PADDLE_ENFORCE_GPU_SUCCESS( platform::dynload::cudnnSpatialTfGridGeneratorBackward( handle, cudnn_st_desc, output_grad_data, theta_grad_data)); } diff --git a/paddle/fluid/operators/affine_grid_op.cc b/paddle/fluid/operators/affine_grid_op.cc index 3125e005174de2adefcd90529a5063406d425aff..d1da11028c05c58e0a66feebef2cc4d2c6b1ee18 100644 --- a/paddle/fluid/operators/affine_grid_op.cc +++ b/paddle/fluid/operators/affine_grid_op.cc @@ -18,12 +18,7 @@ limitations under the License. */ #include #include "paddle/fluid/framework/op_registry.h" #include "paddle/fluid/framework/op_version_registry.h" -#ifdef PADDLE_WITH_CUDA -#include "paddle/fluid/platform/cudnn_helper.h" -#endif -#ifdef PADDLE_WITH_HIP -#include "paddle/fluid/platform/miopen_helper.h" -#endif +#include "paddle/fluid/platform/device/gpu/gpu_dnn.h" namespace paddle { namespace operators { diff --git a/paddle/fluid/operators/affine_grid_op.cu b/paddle/fluid/operators/affine_grid_op.cu index 58b56bdcf5614ed9183ce3bf11c1767f92650d20..bcf7deefc98f03687e362808d3102e9f51f80750 100644 --- a/paddle/fluid/operators/affine_grid_op.cu +++ b/paddle/fluid/operators/affine_grid_op.cu @@ -14,9 +14,9 @@ limitations under the License. */ #include "paddle/fluid/framework/op_registry.h" #include "paddle/fluid/operators/affine_grid_op.h" -#include "paddle/fluid/platform/cuda_device_function.h" -#include "paddle/fluid/platform/cuda_primitives.h" -#include "paddle/fluid/platform/gpu_info.h" +#include "paddle/fluid/platform/device/gpu/gpu_device_function.h" +#include "paddle/fluid/platform/device/gpu/gpu_info.h" +#include "paddle/fluid/platform/device/gpu/gpu_primitives.h" namespace paddle { namespace operators { diff --git a/paddle/fluid/operators/argsort_op.cu b/paddle/fluid/operators/argsort_op.cu index f50d5e619ebea7c87246124fc4efef58ab45f5a8..6236a07de4bc613431d70793daab5d7a77d02dfb 100644 --- a/paddle/fluid/operators/argsort_op.cu +++ b/paddle/fluid/operators/argsort_op.cu @@ -26,8 +26,8 @@ namespace cub = hipcub; #include "paddle/fluid/framework/op_registry.h" #include "paddle/fluid/operators/argsort_op.h" #include "paddle/fluid/operators/transpose_op.h" -#include "paddle/fluid/platform/cuda_device_function.h" -#include "paddle/fluid/platform/cuda_primitives.h" +#include "paddle/fluid/platform/device/gpu/gpu_device_function.h" +#include "paddle/fluid/platform/device/gpu/gpu_primitives.h" #ifdef __HIPCC__ namespace rocprim { @@ -169,7 +169,7 @@ void ArgFullSort(const platform::CUDADeviceContext& ctx, const Tensor* input, num_rows, segment_offsets_t, segment_offsets_t + 1, 0, sizeof(T) * 8, cu_stream); } - PADDLE_ENFORCE_CUDA_SUCCESS(err); + PADDLE_ENFORCE_GPU_SUCCESS(err); Tensor temp_storage; temp_storage.mutable_data(ctx.GetPlace(), temp_storage_bytes); @@ -188,7 +188,7 @@ void ArgFullSort(const platform::CUDADeviceContext& ctx, const Tensor* input, cu_stream); } - PADDLE_ENFORCE_CUDA_SUCCESS(err); + PADDLE_ENFORCE_GPU_SUCCESS(err); } template diff --git a/paddle/fluid/operators/average_accumulates_op.cu b/paddle/fluid/operators/average_accumulates_op.cu index 2796a6b2239b986afeb3b6b71ce1c4c6854bb514..3bffe0a05a8f752a96ee000d566542fcdd316c16 100644 --- a/paddle/fluid/operators/average_accumulates_op.cu +++ b/paddle/fluid/operators/average_accumulates_op.cu @@ -13,7 +13,7 @@ See the License for the specific language governing permissions and limitations under the License. */ #include "paddle/fluid/operators/average_accumulates_op.h" -#include "paddle/fluid/platform/gpu_info.h" +#include "paddle/fluid/platform/device/gpu/gpu_info.h" namespace paddle { namespace operators { diff --git a/paddle/fluid/operators/batch_fc_op.cu b/paddle/fluid/operators/batch_fc_op.cu index b686c766e0f8b96f81037c2799c672620e9e403a..c326929a1468098849977b5b5f1bee101b50b524 100644 --- a/paddle/fluid/operators/batch_fc_op.cu +++ b/paddle/fluid/operators/batch_fc_op.cu @@ -16,8 +16,8 @@ limitations under the License. */ #include "paddle/fluid/framework/eigen.h" #include "paddle/fluid/operators/batch_fc_op.h" #include "paddle/fluid/operators/math/blas.h" -#include "paddle/fluid/platform/cuda_primitives.h" -#include "paddle/fluid/platform/gpu_info.h" +#include "paddle/fluid/platform/device/gpu/gpu_info.h" +#include "paddle/fluid/platform/device/gpu/gpu_primitives.h" namespace paddle { namespace operators { diff --git a/paddle/fluid/operators/batch_norm_op.cu b/paddle/fluid/operators/batch_norm_op.cu index b4cf9c48df2a804a90c633b5aeaf15a794223595..e3dc54e17cd7fdc7ce3fbe20853b8894d6f9d628 100644 --- a/paddle/fluid/operators/batch_norm_op.cu +++ b/paddle/fluid/operators/batch_norm_op.cu @@ -197,18 +197,18 @@ class BatchNormKernel // miopenTensorDescriptor_t bn_param_desc_; // miopenBatchNormMode_t mode_; -// PADDLE_ENFORCE_CUDA_SUCCESS( +// PADDLE_ENFORCE_GPU_SUCCESS( // platform::dynload::miopenCreateTensorDescriptor(&data_desc_)); -// PADDLE_ENFORCE_CUDA_SUCCESS( +// PADDLE_ENFORCE_GPU_SUCCESS( // platform::dynload::miopenCreateTensorDescriptor(&bn_param_desc_)); #else cudnnTensorDescriptor_t data_desc_; cudnnTensorDescriptor_t bn_param_desc_; cudnnBatchNormMode_t mode_; - PADDLE_ENFORCE_CUDA_SUCCESS( + PADDLE_ENFORCE_GPU_SUCCESS( platform::dynload::cudnnCreateTensorDescriptor(&data_desc_)); - PADDLE_ENFORCE_CUDA_SUCCESS( + PADDLE_ENFORCE_GPU_SUCCESS( platform::dynload::cudnnCreateTensorDescriptor(&bn_param_desc_)); #endif @@ -251,23 +251,22 @@ class BatchNormKernel #ifdef PADDLE_WITH_HIP // TODO(wangran16): wait for MIOpen to improve the performance of BN -// PADDLE_ENFORCE_CUDA_SUCCESS(platform::dynload::miopenSetTensorDescriptor( +// PADDLE_ENFORCE_GPU_SUCCESS(platform::dynload::miopenSetTensorDescriptor( // data_desc_, CudnnDataType::type, // x_dims.size() > 3 ? x_dims.size() : 4, const_cast(dims.data()), // const_cast(strides.data()))); // Note: PERSISTENT not implemented for inference -// PADDLE_ENFORCE_CUDA_SUCCESS( +// PADDLE_ENFORCE_GPU_SUCCESS( // platform::dynload::miopenDeriveBNTensorDescriptor( // bn_param_desc_, data_desc_, test_mode ? miopenBNSpatial : mode_)); #else - PADDLE_ENFORCE_CUDA_SUCCESS(platform::dynload::cudnnSetTensorNdDescriptor( + PADDLE_ENFORCE_GPU_SUCCESS(platform::dynload::cudnnSetTensorNdDescriptor( data_desc_, CudnnDataType::type, x_dims.size() > 3 ? x_dims.size() : 4, dims.data(), strides.data())); // Note: PERSISTENT not implemented for inference - PADDLE_ENFORCE_CUDA_SUCCESS( - platform::dynload::cudnnDeriveBNTensorDescriptor( - bn_param_desc_, data_desc_, - test_mode ? CUDNN_BATCHNORM_SPATIAL : mode_)); + PADDLE_ENFORCE_GPU_SUCCESS(platform::dynload::cudnnDeriveBNTensorDescriptor( + bn_param_desc_, data_desc_, + test_mode ? CUDNN_BATCHNORM_SPATIAL : mode_)); #endif const auto *scale = ctx.Input("Scale"); @@ -341,7 +340,7 @@ class BatchNormKernel } // TODO(wangran16): wait for MIOpen to improve the performance of BN -// PADDLE_ENFORCE_CUDA_SUCCESS( +// PADDLE_ENFORCE_GPU_SUCCESS( // platform::dynload::miopenBatchNormalizationForwardInference( // handle, miopenBNSpatial, // const_cast( @@ -364,7 +363,7 @@ class BatchNormKernel // est_var->template data>())), // epsilon)); #else - PADDLE_ENFORCE_CUDA_SUCCESS( + PADDLE_ENFORCE_GPU_SUCCESS( platform::dynload::cudnnBatchNormalizationForwardInference( handle, // Note: PERSISTENT not implemented for inference @@ -426,7 +425,7 @@ class BatchNormKernel "The argument ReserveSpace of batch_norm op is not found.")); // --------------- cudnn batchnorm workspace --------------- - PADDLE_ENFORCE_CUDA_SUCCESS( + PADDLE_ENFORCE_GPU_SUCCESS( platform::dynload:: cudnnGetBatchNormalizationForwardTrainingExWorkspaceSize( /*handle=*/handle, @@ -440,7 +439,7 @@ class BatchNormKernel /*sizeInBytes=*/&workspace_size)); // -------------- cudnn batchnorm reserve space -------------- - PADDLE_ENFORCE_CUDA_SUCCESS( + PADDLE_ENFORCE_GPU_SUCCESS( platform::dynload:: cudnnGetBatchNormalizationTrainingExReserveSpaceSize( /*handle=*/handle, @@ -454,7 +453,7 @@ class BatchNormKernel ctx.GetPlace(), transformed_x.type(), reserve_space_size); workspace_ptr = workspace_tensor.mutable_data( ctx.GetPlace(), transformed_x.type(), workspace_size); - PADDLE_ENFORCE_CUDA_SUCCESS( + PADDLE_ENFORCE_GPU_SUCCESS( platform::dynload::cudnnBatchNormalizationForwardTrainingEx( handle, mode_, CUDNN_BATCHNORM_OPS_BN, CudnnDataType::kOne(), CudnnDataType::kZero(), data_desc_, @@ -508,7 +507,7 @@ class BatchNormKernel } // TODO(wangran16): wait for MIOpen to improve the performance of BN -// PADDLE_ENFORCE_CUDA_SUCCESS( +// PADDLE_ENFORCE_GPU_SUCCESS( // platform::dynload::miopenBatchNormalizationForwardTraining( // handle, mode_, const_cast(static_cast( // CudnnDataType::kOne())), @@ -537,7 +536,7 @@ class BatchNormKernel // static_cast(saved_variance->template mutable_data< // BatchNormParamType>(ctx.GetPlace())))); #else - PADDLE_ENFORCE_CUDA_SUCCESS( + PADDLE_ENFORCE_GPU_SUCCESS( platform::dynload::cudnnBatchNormalizationForwardTraining( handle, mode_, CudnnDataType::kOne(), CudnnDataType::kZero(), data_desc_, @@ -568,15 +567,15 @@ class BatchNormKernel #ifdef PADDLE_WITH_HIP // TODO(wangran16): wait for MIOpen to improve the performance of BN // clean when exit. -// PADDLE_ENFORCE_CUDA_SUCCESS( +// PADDLE_ENFORCE_GPU_SUCCESS( // platform::dynload::miopenDestroyTensorDescriptor(data_desc_)); -// PADDLE_ENFORCE_CUDA_SUCCESS( +// PADDLE_ENFORCE_GPU_SUCCESS( // platform::dynload::miopenDestroyTensorDescriptor(bn_param_desc_)); #else // clean when exit. - PADDLE_ENFORCE_CUDA_SUCCESS( + PADDLE_ENFORCE_GPU_SUCCESS( platform::dynload::cudnnDestroyTensorDescriptor(data_desc_)); - PADDLE_ENFORCE_CUDA_SUCCESS( + PADDLE_ENFORCE_GPU_SUCCESS( platform::dynload::cudnnDestroyTensorDescriptor(bn_param_desc_)); #endif } @@ -981,18 +980,18 @@ class BatchNormGradKernel // miopenTensorDescriptor_t bn_param_desc_; // miopenBatchNormMode_t mode_; -// PADDLE_ENFORCE_CUDA_SUCCESS( +// PADDLE_ENFORCE_GPU_SUCCESS( // platform::dynload::miopenCreateTensorDescriptor(&data_desc_)); -// PADDLE_ENFORCE_CUDA_SUCCESS( +// PADDLE_ENFORCE_GPU_SUCCESS( // platform::dynload::miopenCreateTensorDescriptor(&bn_param_desc_)); #else cudnnTensorDescriptor_t data_desc_; cudnnTensorDescriptor_t bn_param_desc_; cudnnBatchNormMode_t mode_; - PADDLE_ENFORCE_CUDA_SUCCESS( + PADDLE_ENFORCE_GPU_SUCCESS( platform::dynload::cudnnCreateTensorDescriptor(&data_desc_)); - PADDLE_ENFORCE_CUDA_SUCCESS( + PADDLE_ENFORCE_GPU_SUCCESS( platform::dynload::cudnnCreateTensorDescriptor(&bn_param_desc_)); #endif if (epsilon <= CUDNN_BN_MIN_EPSILON - FLT_EPSILON) { @@ -1022,18 +1021,18 @@ class BatchNormGradKernel #ifdef PADDLE_WITH_HIP // TODO(wangran16): wait for MIOpen to improve the performance of BN -// PADDLE_ENFORCE_CUDA_SUCCESS(platform::dynload::miopenSetTensorDescriptor( +// PADDLE_ENFORCE_GPU_SUCCESS(platform::dynload::miopenSetTensorDescriptor( // data_desc_, CudnnDataType::type, // x_dims.size() > 3 ? x_dims.size() : 4, const_cast(dims.data()), // const_cast(strides.data()))); -// PADDLE_ENFORCE_CUDA_SUCCESS( +// PADDLE_ENFORCE_GPU_SUCCESS( // platform::dynload::miopenDeriveBNTensorDescriptor(bn_param_desc_, // data_desc_, mode_)); #else - PADDLE_ENFORCE_CUDA_SUCCESS(platform::dynload::cudnnSetTensorNdDescriptor( + PADDLE_ENFORCE_GPU_SUCCESS(platform::dynload::cudnnSetTensorNdDescriptor( data_desc_, CudnnDataType::type, x_dims.size() > 3 ? x_dims.size() : 4, dims.data(), strides.data())); - PADDLE_ENFORCE_CUDA_SUCCESS( + PADDLE_ENFORCE_GPU_SUCCESS( platform::dynload::cudnnDeriveBNTensorDescriptor(bn_param_desc_, data_desc_, mode_)); #endif @@ -1063,7 +1062,7 @@ class BatchNormGradKernel Tensor workspace_tensor; auto reserve_space_size = reserve_space->memory_size(); // --------------- cudnn batchnorm workspace --------------- - PADDLE_ENFORCE_CUDA_SUCCESS( + PADDLE_ENFORCE_GPU_SUCCESS( platform::dynload:: cudnnGetBatchNormalizationBackwardExWorkspaceSize( /*handle=*/dev_ctx.cudnn_handle(), @@ -1081,7 +1080,7 @@ class BatchNormGradKernel workspace_ptr = workspace_tensor.mutable_data( ctx.GetPlace(), transformed_x.type(), workspace_size); - PADDLE_ENFORCE_CUDA_SUCCESS( + PADDLE_ENFORCE_GPU_SUCCESS( platform::dynload::cudnnBatchNormalizationBackwardEx( /*handle=*/dev_ctx.cudnn_handle(), /*mode=*/mode_, @@ -1151,7 +1150,7 @@ class BatchNormGradKernel } // TODO(wangran16): wait for MIOpen to improve the performance of BN -// PADDLE_ENFORCE_CUDA_SUCCESS( +// PADDLE_ENFORCE_GPU_SUCCESS( // platform::dynload::miopenBatchNormalizationBackward( // dev_ctx.cudnn_handle(), mode_, CudnnDataType::kOne(), // CudnnDataType::kZero(), CudnnDataType::kOne(), @@ -1166,7 +1165,7 @@ class BatchNormGradKernel // ctx.GetPlace()), // epsilon, saved_mean_data, saved_var_data)); #else - PADDLE_ENFORCE_CUDA_SUCCESS( + PADDLE_ENFORCE_GPU_SUCCESS( platform::dynload::cudnnBatchNormalizationBackward( dev_ctx.cudnn_handle(), mode_, CudnnDataType::kOne(), CudnnDataType::kZero(), CudnnDataType::kOne(), @@ -1231,15 +1230,15 @@ class BatchNormGradKernel #ifdef PADDLE_WITH_HIP // TODO(wangran16): wait for MIOpen to improve the performance of BN // clean when exit. -// PADDLE_ENFORCE_CUDA_SUCCESS( +// PADDLE_ENFORCE_GPU_SUCCESS( // platform::dynload::miopenDestroyTensorDescriptor(data_desc_)); -// PADDLE_ENFORCE_CUDA_SUCCESS( +// PADDLE_ENFORCE_GPU_SUCCESS( // platform::dynload::miopenDestroyTensorDescriptor(bn_param_desc_)); #else // clean when exit. - PADDLE_ENFORCE_CUDA_SUCCESS( + PADDLE_ENFORCE_GPU_SUCCESS( platform::dynload::cudnnDestroyTensorDescriptor(data_desc_)); - PADDLE_ENFORCE_CUDA_SUCCESS( + PADDLE_ENFORCE_GPU_SUCCESS( platform::dynload::cudnnDestroyTensorDescriptor(bn_param_desc_)); #endif } else { diff --git a/paddle/fluid/operators/bce_loss_op.cu b/paddle/fluid/operators/bce_loss_op.cu index 8bd2b7fe2d127c379e27e42f4c189c90fb7ebf8e..73f73a81c088eb4e383dc40b206f2494605ee9eb 100644 --- a/paddle/fluid/operators/bce_loss_op.cu +++ b/paddle/fluid/operators/bce_loss_op.cu @@ -14,8 +14,8 @@ limitations under the License. */ #include #include "paddle/fluid/operators/bce_loss_op.h" #include "paddle/fluid/operators/math.h" -#include "paddle/fluid/platform/cuda_primitives.h" -#include "paddle/fluid/platform/gpu_launch_config.h" +#include "paddle/fluid/platform/device/gpu/gpu_launch_config.h" +#include "paddle/fluid/platform/device/gpu/gpu_primitives.h" #include "paddle/fluid/platform/hostdevice.h" namespace paddle { diff --git a/paddle/fluid/operators/bilateral_slice_op.cu b/paddle/fluid/operators/bilateral_slice_op.cu index 3c64ed1acc847d8f60ad39bf3437b22ad8f2bb4a..3fd8995745acb4ad81d4473e7cd91da5b892522b 100644 --- a/paddle/fluid/operators/bilateral_slice_op.cu +++ b/paddle/fluid/operators/bilateral_slice_op.cu @@ -12,8 +12,8 @@ #include #include #include "paddle/fluid/operators/bilateral_slice_op.h" -#include "paddle/fluid/platform/cuda_primitives.h" -#include "paddle/fluid/platform/gpu_launch_config.h" +#include "paddle/fluid/platform/device/gpu/gpu_launch_config.h" +#include "paddle/fluid/platform/device/gpu/gpu_primitives.h" namespace paddle { namespace operators { diff --git a/paddle/fluid/operators/bincount_op.cu b/paddle/fluid/operators/bincount_op.cu index 757f72862910695503d0f7e8c6950b5ee84c105f..34facf1ea1fa90672e09a692e027e7cece26aa0b 100644 --- a/paddle/fluid/operators/bincount_op.cu +++ b/paddle/fluid/operators/bincount_op.cu @@ -14,8 +14,8 @@ limitations under the License. */ #include "paddle/fluid/framework/eigen.h" #include "paddle/fluid/operators/bincount_op.h" -#include "paddle/fluid/platform/cuda_primitives.h" -#include "paddle/fluid/platform/gpu_launch_config.h" +#include "paddle/fluid/platform/device/gpu/gpu_launch_config.h" +#include "paddle/fluid/platform/device/gpu/gpu_primitives.h" #include "paddle/fluid/platform/hostdevice.h" namespace paddle { diff --git a/paddle/fluid/operators/cast_op.cu b/paddle/fluid/operators/cast_op.cu index bb4246e3e9b845078894d5ac6604d65668aaaa89..6b393b5666bb29e4780ef48b75a528e4d37dcd27 100644 --- a/paddle/fluid/operators/cast_op.cu +++ b/paddle/fluid/operators/cast_op.cu @@ -14,8 +14,8 @@ limitations under the License. */ #include "paddle/fluid/operators/cast_op.h" #include "paddle/fluid/platform/aligned_vector.h" +#include "paddle/fluid/platform/device/gpu/gpu_launch_config.h" #include "paddle/fluid/platform/float16.h" -#include "paddle/fluid/platform/gpu_launch_config.h" namespace paddle { namespace operators { diff --git a/paddle/fluid/operators/center_loss_op.cu b/paddle/fluid/operators/center_loss_op.cu index f15d1fe5e02ac2c168870b525b390355bdcb981b..549bb5ae75affe3694b72e4546a3dce79f3f14d9 100644 --- a/paddle/fluid/operators/center_loss_op.cu +++ b/paddle/fluid/operators/center_loss_op.cu @@ -14,8 +14,8 @@ limitations under the License. */ #include #include "paddle/fluid/operators/center_loss_op.h" -#include "paddle/fluid/platform/cuda_primitives.h" -#include "paddle/fluid/platform/gpu_info.h" +#include "paddle/fluid/platform/device/gpu/gpu_info.h" +#include "paddle/fluid/platform/device/gpu/gpu_primitives.h" namespace paddle { namespace operators { diff --git a/paddle/fluid/operators/cholesky_op.cu b/paddle/fluid/operators/cholesky_op.cu index 4426057305249bdc53175076e3bcb4da32380270..0bfddf8b5f386ef28735781432b72534b37360c8 100644 --- a/paddle/fluid/operators/cholesky_op.cu +++ b/paddle/fluid/operators/cholesky_op.cu @@ -131,27 +131,26 @@ class CholeskyGPUKernel : public framework::OpKernel { int lda, int* info) const { \ auto handle = dev_ctx.cusolver_dn_handle(); \ int workspace_size = 0; \ - PADDLE_ENFORCE_CUDA_SUCCESS( \ + PADDLE_ENFORCE_GPU_SUCCESS( \ platform::dynload::cusolverDn##C##potrf_bufferSize( \ handle, uplo, n, A, lda, &workspace_size)); \ auto workspace = memory::Alloc(dev_ctx, workspace_size); \ T* workspace_ptr = reinterpret_cast(workspace->ptr()); \ - PADDLE_ENFORCE_CUDA_SUCCESS(platform::dynload::cusolverDn##C##potrf( \ + PADDLE_ENFORCE_GPU_SUCCESS(platform::dynload::cusolverDn##C##potrf( \ handle, uplo, n, A, lda, workspace_ptr, workspace_size, info)); \ } FUNC_WITH_TYPES(POTRF_INSTANCE); #if CUDA_VERSION >= 9020 && !defined(_WIN32) -#define POTRF_BATCH_INSTANCE(T, C) \ - template <> \ - void CholeskyGPUKernel::PotrfBatched( \ - const platform::CUDADeviceContext& dev_ctx, cublasFillMode_t uplo, \ - int n, T* Aarray[], int lda, int* info_array, int batch_size) const { \ - auto handle = dev_ctx.cusolver_dn_handle(); \ - PADDLE_ENFORCE_CUDA_SUCCESS( \ - platform::dynload::cusolverDn##C##potrfBatched( \ - handle, uplo, n, Aarray, lda, info_array, batch_size)); \ +#define POTRF_BATCH_INSTANCE(T, C) \ + template <> \ + void CholeskyGPUKernel::PotrfBatched( \ + const platform::CUDADeviceContext& dev_ctx, cublasFillMode_t uplo, \ + int n, T* Aarray[], int lda, int* info_array, int batch_size) const { \ + auto handle = dev_ctx.cusolver_dn_handle(); \ + PADDLE_ENFORCE_GPU_SUCCESS(platform::dynload::cusolverDn##C##potrfBatched( \ + handle, uplo, n, Aarray, lda, info_array, batch_size)); \ } FUNC_WITH_TYPES(POTRF_BATCH_INSTANCE); diff --git a/paddle/fluid/operators/cinn_launch_op.cu.cc b/paddle/fluid/operators/cinn_launch_op.cu.cc index d557cfc7c08927900d702b9ee951faeaacccf620..fae2d6ddb487d9237088ab7072a20ce429ed9d6d 100644 --- a/paddle/fluid/operators/cinn_launch_op.cu.cc +++ b/paddle/fluid/operators/cinn_launch_op.cu.cc @@ -18,9 +18,9 @@ limitations under the License. */ #include "cinn/runtime/cinn_runtime.h" #include "paddle/fluid/framework/operator.h" #include "paddle/fluid/framework/scope.h" +#include "paddle/fluid/platform/device/gpu/gpu_types.h" #include "paddle/fluid/platform/device_context.h" #include "paddle/fluid/platform/enforce.h" -#include "paddle/fluid/platform/type_defs.h" #ifdef PADDLE_WITH_CUDA #include @@ -45,9 +45,9 @@ void CUDART_CB ReleaseBuffers(void* data) { template <> void ReleaseResource( const std::vector& resources, void* stream) { - PADDLE_ENFORCE_CUDA_SUCCESS(cudaLaunchHostFunc( + PADDLE_ENFORCE_GPU_SUCCESS(cudaLaunchHostFunc( static_cast(stream), ReleaseScope, resources[0])); - PADDLE_ENFORCE_CUDA_SUCCESS(cudaLaunchHostFunc( + PADDLE_ENFORCE_GPU_SUCCESS(cudaLaunchHostFunc( static_cast(stream), ReleaseBuffers, resources[1])); } diff --git a/paddle/fluid/operators/class_center_sample_op.cu b/paddle/fluid/operators/class_center_sample_op.cu index cfcfd04e6fc7c2421ebe05ea2b29f8d7e0f3a915..29286be0dd6b2082586299765310c7428365a87a 100644 --- a/paddle/fluid/operators/class_center_sample_op.cu +++ b/paddle/fluid/operators/class_center_sample_op.cu @@ -30,7 +30,7 @@ namespace cub = hipcub; #if defined(PADDLE_WITH_NCCL) || defined(PADDLE_WITH_RCCL) #include "paddle/fluid/platform/collective_helper.h" -#include "paddle/fluid/platform/nccl_helper.h" +#include "paddle/fluid/platform/device/gpu/nccl_helper.h" #endif namespace paddle { @@ -335,7 +335,7 @@ class ClassCenterSampleCUDAKernel : public framework::OpKernel { static_cast( platform::DeviceContextPool::Instance().Get(ctx.GetPlace())) ->stream(); - PADDLE_ENFORCE_CUDA_SUCCESS(platform::dynload::ncclAllReduce( + PADDLE_ENFORCE_GPU_SUCCESS(platform::dynload::ncclAllReduce( num_classes_per_device_ptr, num_classes_per_device_ptr, num_classes_per_device.numel(), platform::ToNCCLDataType(num_classes_per_device.type()), ncclSum, @@ -346,13 +346,13 @@ class ClassCenterSampleCUDAKernel : public framework::OpKernel { // step 2: Determine temporary device storage requirements int num_buffer_ele = std::max(batch_size, num_classes); size_t cub_sort_temp_store_size = 0; - PADDLE_ENFORCE_CUDA_SUCCESS((cub::DeviceRadixSort::SortPairs( + PADDLE_ENFORCE_GPU_SUCCESS((cub::DeviceRadixSort::SortPairs( nullptr, cub_sort_temp_store_size, nullptr, nullptr, nullptr, nullptr, num_buffer_ele, 0, sizeof(T) * 8, ctx.cuda_device_context().stream()))); size_t cub_sum_temp_store_size = 0; NotEqualToPreviousAdjacentIterator unique_counting_iter_temp(nullptr, 0); - PADDLE_ENFORCE_CUDA_SUCCESS( + PADDLE_ENFORCE_GPU_SUCCESS( (cub::DeviceScan::InclusiveSum, T*>( nullptr, cub_sum_temp_store_size, unique_counting_iter_temp, @@ -360,7 +360,7 @@ class ClassCenterSampleCUDAKernel : public framework::OpKernel { size_t cub_scan_temp_store_size = 0; ActualNumSampledFunctor actual_num_sampled_op_temp(num_samples); - PADDLE_ENFORCE_CUDA_SUCCESS((cub::DeviceScan::InclusiveScan( + PADDLE_ENFORCE_GPU_SUCCESS((cub::DeviceScan::InclusiveScan( nullptr, cub_scan_temp_store_size, num_classes_per_device_ptr, num_classes_per_device_ptr, actual_num_sampled_op_temp, nranks + 1, ctx.cuda_device_context().stream()))); @@ -384,7 +384,7 @@ class ClassCenterSampleCUDAKernel : public framework::OpKernel { void* cub_temp_storage_ptr = memory_buffer.cub_temp_storage_ptr(); // step 4: Calculate class interval among nranks - PADDLE_ENFORCE_CUDA_SUCCESS((cub::DeviceScan::InclusiveSum( + PADDLE_ENFORCE_GPU_SUCCESS((cub::DeviceScan::InclusiveSum( cub_temp_storage_ptr, cub_temp_storage_bytes, num_classes_per_device_ptr, class_interval_ptr, nranks + 1, ctx.cuda_device_context().stream()))); @@ -415,13 +415,13 @@ class ClassCenterSampleCUDAKernel : public framework::OpKernel { // step 7: sort class center by ascending, so that positive class center // always be sampled. - PADDLE_ENFORCE_CUDA_SUCCESS((cub::DeviceRadixSort::SortPairs( + PADDLE_ENFORCE_GPU_SUCCESS((cub::DeviceRadixSort::SortPairs( cub_temp_storage_ptr, cub_temp_storage_bytes, cub_sort_keys_ptr, cub_sort_keys_out_ptr, cub_sort_values_ptr, cub_sort_values_out_ptr, num_classes, 0, sizeof(T) * 8, ctx.cuda_device_context().stream()))); // step 8: sort input label ascending - PADDLE_ENFORCE_CUDA_SUCCESS((cub::DeviceRadixSort::SortPairs( + PADDLE_ENFORCE_GPU_SUCCESS((cub::DeviceRadixSort::SortPairs( cub_temp_storage_ptr, cub_temp_storage_bytes, label->data(), cub_sort_keys_out_ptr, cub_sort_values_ptr, cub_sort_keys_ptr, batch_size, 0, sizeof(T) * 8, ctx.cuda_device_context().stream()))); @@ -430,8 +430,8 @@ class ClassCenterSampleCUDAKernel : public framework::OpKernel { // label NotEqualToPreviousAdjacentIterator unique_counting_iter( cub_sort_keys_out_ptr, 0); - PADDLE_ENFORCE_CUDA_SUCCESS((cub::DeviceScan::InclusiveSum< - NotEqualToPreviousAdjacentIterator, T*>( + PADDLE_ENFORCE_GPU_SUCCESS((cub::DeviceScan::InclusiveSum< + NotEqualToPreviousAdjacentIterator, T*>( cub_temp_storage_ptr, cub_temp_storage_bytes, unique_counting_iter, cub_sort_values_ptr, batch_size, ctx.cuda_device_context().stream()))); @@ -445,13 +445,13 @@ class ClassCenterSampleCUDAKernel : public framework::OpKernel { // Since maybe num_positive_class_center > num_samples, // we need to ensure all positive class center per device are sampled. ActualNumSampledFunctor actual_num_sampled_op(num_samples); - PADDLE_ENFORCE_CUDA_SUCCESS((cub::DeviceScan::InclusiveScan( + PADDLE_ENFORCE_GPU_SUCCESS((cub::DeviceScan::InclusiveScan( cub_temp_storage_ptr, cub_temp_storage_bytes, bound_value_ptr, num_classes_per_device_ptr, actual_num_sampled_op, nranks + 1, ctx.cuda_device_context().stream()))); // step 12: Calculate actual sampled class interval among nranks - PADDLE_ENFORCE_CUDA_SUCCESS((cub::DeviceScan::InclusiveSum( + PADDLE_ENFORCE_GPU_SUCCESS((cub::DeviceScan::InclusiveSum( cub_temp_storage_ptr, cub_temp_storage_bytes, num_classes_per_device_ptr, class_interval_ptr, nranks + 1, ctx.cuda_device_context().stream()))); diff --git a/paddle/fluid/operators/collective/allreduce_op.h b/paddle/fluid/operators/collective/allreduce_op.h index 157924f08546bfcbe4f9df14588f9d462dd6677b..4e6d86d49e8632bd2dc65559614b8fcbfe819c90 100644 --- a/paddle/fluid/operators/collective/allreduce_op.h +++ b/paddle/fluid/operators/collective/allreduce_op.h @@ -22,7 +22,7 @@ limitations under the License. */ #include "paddle/fluid/framework/op_registry.h" #if defined(PADDLE_WITH_NCCL) || defined(PADDLE_WITH_RCCL) -#include "paddle/fluid/platform/nccl_helper.h" +#include "paddle/fluid/platform/device/gpu/nccl_helper.h" #endif namespace paddle { @@ -69,15 +69,11 @@ class AllReduceOpKernel : public framework::OpKernel { red_type = ncclMin; break; } - PADDLE_ENFORCE_CUDA_SUCCESS(platform::dynload::ncclAllReduce( + PADDLE_ENFORCE_GPU_SUCCESS(platform::dynload::ncclAllReduce( sendbuff, recvbuff, numel, static_cast(dtype), red_type, comm, stream)); if (ctx.Attr("sync_mode")) { -#ifdef PADDLE_WITH_RCCL - PADDLE_ENFORCE_CUDA_SUCCESS(hipStreamSynchronize(stream)); -#else - PADDLE_ENFORCE_CUDA_SUCCESS(cudaStreamSynchronize(stream)); -#endif + platform::GpuStreamSync(stream); } #else PADDLE_THROW(platform::errors::PreconditionNotMet( diff --git a/paddle/fluid/operators/collective/alltoall_op.cu.cc b/paddle/fluid/operators/collective/alltoall_op.cu.cc index 1bcb47fc686cfe4b93420697b15d0c2585f0358e..02b10f17da5a3d2d314508a95ba9649be465471c 100644 --- a/paddle/fluid/operators/collective/alltoall_op.cu.cc +++ b/paddle/fluid/operators/collective/alltoall_op.cu.cc @@ -16,7 +16,7 @@ limitations under the License. */ #if defined(PADDLE_WITH_NCCL) #include "paddle/fluid/platform/collective_helper.h" -#include "paddle/fluid/platform/nccl_helper.h" +#include "paddle/fluid/platform/device/gpu/nccl_helper.h" #endif namespace paddle { @@ -62,15 +62,15 @@ class AllToAllOpCUDAKernel : public framework::OpKernel { auto recv_buf = out->mutable_data(out_dims, place); size_t offset = 0; send_numel /= nranks; - PADDLE_ENFORCE_CUDA_SUCCESS(platform::dynload::ncclGroupStart()); + PADDLE_ENFORCE_GPU_SUCCESS(platform::dynload::ncclGroupStart()); for (auto i = 0; i < nranks; ++i) { - PADDLE_ENFORCE_CUDA_SUCCESS(platform::dynload::ncclSend( + PADDLE_ENFORCE_GPU_SUCCESS(platform::dynload::ncclSend( send_buf + offset, send_numel, dtype, i, comm->comm(), stream)); - PADDLE_ENFORCE_CUDA_SUCCESS(platform::dynload::ncclRecv( + PADDLE_ENFORCE_GPU_SUCCESS(platform::dynload::ncclRecv( recv_buf + offset, send_numel, dtype, i, comm->comm(), stream)); offset += send_numel; } - PADDLE_ENFORCE_CUDA_SUCCESS(platform::dynload::ncclGroupEnd()); + PADDLE_ENFORCE_GPU_SUCCESS(platform::dynload::ncclGroupEnd()); #else PADDLE_THROW( platform::errors::Unavailable("NCCL version >= 2.7.3 is needed.")); diff --git a/paddle/fluid/operators/collective/barrier_op.cu.cc b/paddle/fluid/operators/collective/barrier_op.cu.cc index b8631b44f14caac162dd332f715b825e42bf31af..c9aef237699f3d4219b7053d8a666b4463db50d6 100644 --- a/paddle/fluid/operators/collective/barrier_op.cu.cc +++ b/paddle/fluid/operators/collective/barrier_op.cu.cc @@ -16,7 +16,7 @@ limitations under the License. */ #if defined(PADDLE_WITH_NCCL) || defined(PADDLE_WITH_RCCL) #include "paddle/fluid/platform/collective_helper.h" -#include "paddle/fluid/platform/nccl_helper.h" +#include "paddle/fluid/platform/device/gpu/nccl_helper.h" #endif namespace paddle { @@ -41,13 +41,9 @@ class BarrierOpCUDAKernel : public framework::OpKernel { auto dev_ctx = platform::DeviceContextPool::Instance().Get(place); auto stream = static_cast(dev_ctx)->stream(); ncclRedOp_t nccl_red_type = ncclSum; - PADDLE_ENFORCE_CUDA_SUCCESS(platform::dynload::ncclAllReduce( + PADDLE_ENFORCE_GPU_SUCCESS(platform::dynload::ncclAllReduce( sendbuff, recvbuff, numel, dtype, nccl_red_type, comm->comm(), stream)); -#ifdef PADDLE_WITH_RCCL - PADDLE_ENFORCE_CUDA_SUCCESS(hipStreamSynchronize(stream)); -#else - PADDLE_ENFORCE_CUDA_SUCCESS(cudaStreamSynchronize(stream)); -#endif + platform::GpuStreamSync(stream); #else PADDLE_THROW(platform::errors::Unavailable( "PaddlePaddle should compile with NCCL.")); diff --git a/paddle/fluid/operators/collective/broadcast_op.cu.cc b/paddle/fluid/operators/collective/broadcast_op.cu.cc index fa4d7ee4cce5d11bbdc563b1d58e7cd04d1325cb..daaaf8b7a2e41045132dff2732c5faba9b3863d4 100644 --- a/paddle/fluid/operators/collective/broadcast_op.cu.cc +++ b/paddle/fluid/operators/collective/broadcast_op.cu.cc @@ -15,7 +15,7 @@ limitations under the License. */ #include "paddle/fluid/framework/op_registry.h" #if defined(PADDLE_WITH_NCCL) || defined(PADDLE_WITH_RCCL) -#include "paddle/fluid/platform/nccl_helper.h" +#include "paddle/fluid/platform/device/gpu/nccl_helper.h" #endif namespace ops = paddle::operators; @@ -54,7 +54,7 @@ class NCCLBroadcastOpKernel : public framework::OpKernel { auto comm = dev_ctx.nccl_comm(); auto stream = dev_ctx.stream(); - PADDLE_ENFORCE_CUDA_SUCCESS(platform::dynload::ncclBcast( + PADDLE_ENFORCE_GPU_SUCCESS(platform::dynload::ncclBcast( send_recv_buffer, static_cast(in->numel()), platform::ToNCCLDataType(in->type()), root_dev_id, comm, stream)); @@ -62,11 +62,7 @@ class NCCLBroadcastOpKernel : public framework::OpKernel { << " From " << root_dev_id << " to " << dev_id; if (ctx.Attr("sync_mode")) { -#ifdef PADDLE_WITH_RCCL - PADDLE_ENFORCE_CUDA_SUCCESS(hipStreamSynchronize(stream)); -#else - PADDLE_ENFORCE_CUDA_SUCCESS(cudaStreamSynchronize(stream)); -#endif + platform::GpuStreamSync(stream); } #else PADDLE_THROW(platform::errors::PreconditionNotMet( diff --git a/paddle/fluid/operators/collective/c_allgather_op.cu.cc b/paddle/fluid/operators/collective/c_allgather_op.cu.cc index 597e4321d66bdbe832e5dda50f2ece3b02be1151..f174473c049ececc03dfaca214d8ac9922b04a45 100644 --- a/paddle/fluid/operators/collective/c_allgather_op.cu.cc +++ b/paddle/fluid/operators/collective/c_allgather_op.cu.cc @@ -16,7 +16,7 @@ limitations under the License. */ #if defined(PADDLE_WITH_NCCL) || defined(PADDLE_WITH_RCCL) #include "paddle/fluid/platform/collective_helper.h" -#include "paddle/fluid/platform/nccl_helper.h" +#include "paddle/fluid/platform/device/gpu/nccl_helper.h" #endif namespace paddle { @@ -56,7 +56,7 @@ class CAllGatherOpCUDAKernel : public framework::OpKernel { stream = comm->stream(); } - PADDLE_ENFORCE_CUDA_SUCCESS(platform::dynload::ncclAllGather( + PADDLE_ENFORCE_GPU_SUCCESS(platform::dynload::ncclAllGather( send_buff, recv_buff, send_numel, static_cast(dtype), comm->comm(), stream)); #else diff --git a/paddle/fluid/operators/collective/c_allreduce_op.h b/paddle/fluid/operators/collective/c_allreduce_op.h index 6d569b454e6916eb5284c8fb1801c4a4aceaf256..714dc4e19f9b13c713b3ded8b4b1ba72112e0086 100644 --- a/paddle/fluid/operators/collective/c_allreduce_op.h +++ b/paddle/fluid/operators/collective/c_allreduce_op.h @@ -29,7 +29,7 @@ limitations under the License. */ #endif #if defined(PADDLE_WITH_NCCL) || defined(PADDLE_WITH_RCCL) -#include "paddle/fluid/platform/nccl_helper.h" +#include "paddle/fluid/platform/device/gpu/nccl_helper.h" #endif #if defined(PADDLE_WITH_XPU_BKCL) @@ -386,7 +386,7 @@ class CAllReduceOpCUDAKernel : public framework::OpKernel { "Invalid reduce type: %d", red_type)); } - PADDLE_ENFORCE_CUDA_SUCCESS(platform::dynload::ncclAllReduce( + PADDLE_ENFORCE_GPU_SUCCESS(platform::dynload::ncclAllReduce( sendbuff, recvbuff, numel, dtype, nccl_red_type, comm->comm(), stream)); #else PADDLE_THROW(platform::errors::PreconditionNotMet( diff --git a/paddle/fluid/operators/collective/c_broadcast_op.cu.cc b/paddle/fluid/operators/collective/c_broadcast_op.cu.cc index b37bd250c155837b24c27a8cf603d0fb6c1e75b3..6deb837069761de26f3ee011702c94983299ca59 100644 --- a/paddle/fluid/operators/collective/c_broadcast_op.cu.cc +++ b/paddle/fluid/operators/collective/c_broadcast_op.cu.cc @@ -16,7 +16,7 @@ limitations under the License. */ #if defined(PADDLE_WITH_NCCL) || defined(PADDLE_WITH_RCCL) #include "paddle/fluid/platform/collective_helper.h" -#include "paddle/fluid/platform/nccl_helper.h" +#include "paddle/fluid/platform/device/gpu/nccl_helper.h" #endif namespace paddle { @@ -46,7 +46,7 @@ class CBroadcastOpCUDAKernel : public framework::OpKernel { int root = ctx.Attr("root"); if (root == comm->rank()) { - PADDLE_ENFORCE_CUDA_SUCCESS(platform::dynload::ncclBcast( + PADDLE_ENFORCE_GPU_SUCCESS(platform::dynload::ncclBcast( reinterpret_cast(const_cast(x->data())), numel, dtype, root, comm->comm(), stream)); VLOG(3) << "rank " << comm->rank() << " invoke Bcast. sent " @@ -59,7 +59,7 @@ class CBroadcastOpCUDAKernel : public framework::OpKernel { static_cast(out)); } } else { - PADDLE_ENFORCE_CUDA_SUCCESS( + PADDLE_ENFORCE_GPU_SUCCESS( platform::dynload::ncclBcast(out->mutable_data(place), numel, dtype, root, comm->comm(), stream)); VLOG(3) << "rank " << comm->rank() << " invoke Bcast. recieved " diff --git a/paddle/fluid/operators/collective/c_comm_init_all_op.cc b/paddle/fluid/operators/collective/c_comm_init_all_op.cc index 60a9b1ee44fcc211fb73d2baed9da2da54fa303e..db9a8428e3d033710386b860c38b2c7c6c35bf99 100644 --- a/paddle/fluid/operators/collective/c_comm_init_all_op.cc +++ b/paddle/fluid/operators/collective/c_comm_init_all_op.cc @@ -19,7 +19,7 @@ limitations under the License. */ #include "paddle/fluid/framework/threadpool.h" #if defined(PADDLE_WITH_NCCL) || defined(PADDLE_WITH_RCCL) #include "paddle/fluid/platform/collective_helper.h" -#include "paddle/fluid/platform/nccl_helper.h" +#include "paddle/fluid/platform/device/gpu/nccl_helper.h" #endif namespace paddle { diff --git a/paddle/fluid/operators/collective/c_comm_init_multitrainer_op.cc b/paddle/fluid/operators/collective/c_comm_init_multitrainer_op.cc index aee10dcdc27323511a48610c88cd45cc85544c12..f69fe8f1e3f1fa4d337760165e2bebad146b0925 100644 --- a/paddle/fluid/operators/collective/c_comm_init_multitrainer_op.cc +++ b/paddle/fluid/operators/collective/c_comm_init_multitrainer_op.cc @@ -26,7 +26,7 @@ limitations under the License. */ // #include "paddle/fluid/operators/distributed/request_handler_impl.h" #if defined(PADDLE_WITH_NCCL) #include "paddle/fluid/platform/collective_helper.h" -#include "paddle/fluid/platform/nccl_helper.h" +#include "paddle/fluid/platform/device/gpu/nccl_helper.h" #endif namespace paddle { diff --git a/paddle/fluid/operators/collective/c_concat_op.cu.cc b/paddle/fluid/operators/collective/c_concat_op.cu.cc index bfdc49c440aae76a2aa9cebae82a419b471f4662..738ed162861317bba43b95b5edf7146431c9d7c7 100644 --- a/paddle/fluid/operators/collective/c_concat_op.cu.cc +++ b/paddle/fluid/operators/collective/c_concat_op.cu.cc @@ -19,7 +19,7 @@ limitations under the License. */ #if defined(PADDLE_WITH_NCCL) || defined(PADDLE_WITH_RCCL) #include "paddle/fluid/platform/collective_helper.h" -#include "paddle/fluid/platform/nccl_helper.h" +#include "paddle/fluid/platform/device/gpu/nccl_helper.h" #endif namespace paddle { @@ -71,7 +71,7 @@ class CConcatOpCUDAKernel : public framework::OpKernel { auto dev_ctx = platform::DeviceContextPool::Instance().Get(place); stream = static_cast(dev_ctx)->stream(); - PADDLE_ENFORCE_CUDA_SUCCESS(platform::dynload::ncclAllGather( + PADDLE_ENFORCE_GPU_SUCCESS(platform::dynload::ncclAllGather( send_buff, recv_buff, send_numel, static_cast(dtype), comm->comm(), stream)); diff --git a/paddle/fluid/operators/collective/c_embedding_op.cu b/paddle/fluid/operators/collective/c_embedding_op.cu index 858ca79f85b0e66b830e4ca78098a84e15d25df9..9b343b34a3e51f48a4cd2edd2f7450e696bc0458 100644 --- a/paddle/fluid/operators/collective/c_embedding_op.cu +++ b/paddle/fluid/operators/collective/c_embedding_op.cu @@ -15,7 +15,7 @@ limitations under the License. */ #include "paddle/fluid/framework/eigen.h" #include "paddle/fluid/framework/op_registry.h" #include "paddle/fluid/operators/collective/c_embedding_op.h" -#include "paddle/fluid/platform/cuda_primitives.h" +#include "paddle/fluid/platform/device/gpu/gpu_primitives.h" #include "paddle/fluid/platform/float16.h" namespace paddle { diff --git a/paddle/fluid/operators/collective/c_gen_nccl_id_op.cc b/paddle/fluid/operators/collective/c_gen_nccl_id_op.cc index 0a0a824b77586676fb5ffecc9c918649386f8cd0..d392beb3a48345a8db9251ac8d26c41ec5e1167d 100644 --- a/paddle/fluid/operators/collective/c_gen_nccl_id_op.cc +++ b/paddle/fluid/operators/collective/c_gen_nccl_id_op.cc @@ -30,7 +30,7 @@ namespace operators { #if defined(PADDLE_WITH_NCCL) || defined(PADDLE_WITH_RCCL) static void GenNCCLID(std::vector* nccl_ids) { for (size_t i = 0; i < nccl_ids->size(); ++i) { - PADDLE_ENFORCE_CUDA_SUCCESS( + PADDLE_ENFORCE_GPU_SUCCESS( platform::dynload::ncclGetUniqueId(&(*nccl_ids)[i])); } } diff --git a/paddle/fluid/operators/collective/c_reduce_op.h b/paddle/fluid/operators/collective/c_reduce_op.h index 74f41bff9dc865d33851d6a0c2f3429d7ccbd9aa..b950339bd22be0b24edc29f621e8cdf3d69e14e5 100644 --- a/paddle/fluid/operators/collective/c_reduce_op.h +++ b/paddle/fluid/operators/collective/c_reduce_op.h @@ -30,7 +30,7 @@ limitations under the License. */ #endif #if defined(PADDLE_WITH_NCCL) || defined(PADDLE_WITH_RCCL) -#include "paddle/fluid/platform/nccl_helper.h" +#include "paddle/fluid/platform/device/gpu/nccl_helper.h" #endif #if defined(PADDLE_WITH_XPU_BKCL) @@ -316,7 +316,7 @@ class CReduceOpCUDAKernel : public framework::OpKernel { "kRedMax, kRedMin, kRedProd.")); } - PADDLE_ENFORCE_CUDA_SUCCESS(platform::dynload::ncclReduce( + PADDLE_ENFORCE_GPU_SUCCESS(platform::dynload::ncclReduce( sendbuff, recvbuff, numel, dtype, nccl_red_type, root, comm->comm(), stream)); #else diff --git a/paddle/fluid/operators/collective/c_reducescatter_op.cu.cc b/paddle/fluid/operators/collective/c_reducescatter_op.cu.cc index 4d19ee42641f4d9a901c650f51110f8d8f83e8b6..141fa760413b3528961378f5284c71b857543da1 100644 --- a/paddle/fluid/operators/collective/c_reducescatter_op.cu.cc +++ b/paddle/fluid/operators/collective/c_reducescatter_op.cu.cc @@ -16,7 +16,7 @@ limitations under the License. */ #if defined(PADDLE_WITH_NCCL) || defined(PADDLE_WITH_RCCL) #include "paddle/fluid/platform/collective_helper.h" -#include "paddle/fluid/platform/nccl_helper.h" +#include "paddle/fluid/platform/device/gpu/nccl_helper.h" #endif namespace paddle { @@ -57,7 +57,7 @@ class CReduceScatterOpCUDAKernel : public framework::OpKernel { stream = comm->stream(); } - PADDLE_ENFORCE_CUDA_SUCCESS(platform::dynload::ncclReduceScatter( + PADDLE_ENFORCE_GPU_SUCCESS(platform::dynload::ncclReduceScatter( send_buff, recv_buff, recv_numel, static_cast(dtype), ncclSum, comm->comm(), stream)); #else diff --git a/paddle/fluid/operators/collective/c_scatter_op.cu.cc b/paddle/fluid/operators/collective/c_scatter_op.cu.cc index 0c9dc2af14f39442f5f284ac6a46e691b0d600df..4d4dc0c12af55c093f0444c4dd3f43eb8a4ab966 100644 --- a/paddle/fluid/operators/collective/c_scatter_op.cu.cc +++ b/paddle/fluid/operators/collective/c_scatter_op.cu.cc @@ -16,7 +16,7 @@ limitations under the License. */ #if defined(PADDLE_WITH_NCCL) || defined(PADDLE_WITH_RCCL) #include "paddle/fluid/platform/collective_helper.h" -#include "paddle/fluid/platform/nccl_helper.h" +#include "paddle/fluid/platform/device/gpu/nccl_helper.h" #endif namespace paddle { @@ -66,7 +66,7 @@ class CScatterOpCUDAKernel : public framework::OpKernel { framework::Tensor temp; auto out_ptr = temp.mutable_data(out_dims, place); if (root_id == comm->rank()) { - PADDLE_ENFORCE_CUDA_SUCCESS(platform::dynload::ncclBcast( + PADDLE_ENFORCE_GPU_SUCCESS(platform::dynload::ncclBcast( reinterpret_cast(const_cast(x->data())), numel, dtype, root_id, comm->comm(), stream)); @@ -74,7 +74,7 @@ class CScatterOpCUDAKernel : public framework::OpKernel { *platform::DeviceContextPool::Instance().Get(place), static_cast(&temp)); } else { - PADDLE_ENFORCE_CUDA_SUCCESS(platform::dynload::ncclBcast( + PADDLE_ENFORCE_GPU_SUCCESS(platform::dynload::ncclBcast( out_ptr, numel, dtype, root_id, comm->comm(), stream)); } diff --git a/paddle/fluid/operators/collective/c_softmax_with_cross_entropy_op.cu b/paddle/fluid/operators/collective/c_softmax_with_cross_entropy_op.cu index 77db86e7111112ac78bea270413ee9a2c2cba72b..6371d523cfa4a24cd57c9e78ebb58a907ca78381 100644 --- a/paddle/fluid/operators/collective/c_softmax_with_cross_entropy_op.cu +++ b/paddle/fluid/operators/collective/c_softmax_with_cross_entropy_op.cu @@ -16,7 +16,7 @@ limitations under the License. */ #include "paddle/fluid/operators/math/cross_entropy.h" #include "paddle/fluid/operators/math/softmax_impl.h" #include "paddle/fluid/platform/collective_helper.h" -#include "paddle/fluid/platform/nccl_helper.h" +#include "paddle/fluid/platform/device/gpu/nccl_helper.h" #include "paddle/fluid/string/string_helper.h" namespace paddle { @@ -119,7 +119,7 @@ class CSoftmaxWithCrossEntropyOpCUDAKernel : public framework::OpKernel { Eigen::DSizes along_axis(1); eigen_logits_max.device(*dev_ctx.eigen_device()) = eigen_logits.maximum(along_axis); - PADDLE_ENFORCE_CUDA_SUCCESS(platform::dynload::ncclAllReduce( + PADDLE_ENFORCE_GPU_SUCCESS(platform::dynload::ncclAllReduce( logits_max_buff, logits_max_buff, logits_max.numel(), platform::ToNCCLDataType(logits_max.type()), ncclMax, comm->comm(), stream)); @@ -160,7 +160,7 @@ class CSoftmaxWithCrossEntropyOpCUDAKernel : public framework::OpKernel { } void* predict_logits_buff = predicted_logits.mutable_data(place); - PADDLE_ENFORCE_CUDA_SUCCESS(platform::dynload::ncclAllReduce( + PADDLE_ENFORCE_GPU_SUCCESS(platform::dynload::ncclAllReduce( predict_logits_buff, predict_logits_buff, predicted_logits.numel(), platform::ToNCCLDataType(predicted_logits.type()), ncclSum, comm->comm(), stream)); @@ -178,7 +178,7 @@ class CSoftmaxWithCrossEntropyOpCUDAKernel : public framework::OpKernel { eigen_sum_exp_logits.device(*dev_ctx.eigen_device()) = eigen_softmax.sum(along_axis); - PADDLE_ENFORCE_CUDA_SUCCESS(platform::dynload::ncclAllReduce( + PADDLE_ENFORCE_GPU_SUCCESS(platform::dynload::ncclAllReduce( sum_exp_logits_buff, sum_exp_logits_buff, sum_exp_logits.numel(), platform::ToNCCLDataType(sum_exp_logits.type()), ncclSum, comm->comm(), stream)); diff --git a/paddle/fluid/operators/collective/c_split_op.cu b/paddle/fluid/operators/collective/c_split_op.cu index 034accbb480c78be767e5b2900ccc376cfa5f635..a8c4eafede41baef9b7286af7fbf779695e2e2cc 100644 --- a/paddle/fluid/operators/collective/c_split_op.cu +++ b/paddle/fluid/operators/collective/c_split_op.cu @@ -16,7 +16,7 @@ limitations under the License. */ #include "paddle/fluid/operators/collective/c_split_op.h" #include "paddle/fluid/operators/math/concat_and_split.h" -#include "paddle/fluid/platform/cuda_primitives.h" +#include "paddle/fluid/platform/device/gpu/gpu_primitives.h" namespace paddle { namespace operators { diff --git a/paddle/fluid/operators/collective/c_sync_calc_stream_op.cc b/paddle/fluid/operators/collective/c_sync_calc_stream_op.cc index 72faf4298cf60191baedf6d40e06757d38246b0b..72339bbd4875270acbbb9c22d3ea5341da53cc3a 100644 --- a/paddle/fluid/operators/collective/c_sync_calc_stream_op.cc +++ b/paddle/fluid/operators/collective/c_sync_calc_stream_op.cc @@ -55,11 +55,7 @@ class CSyncCalcStreamKernel : public framework::OpKernel { auto dev_ctx = static_cast( platform::DeviceContextPool::Instance().Get(place)); -#ifdef PADDLE_WITH_HIP - PADDLE_ENFORCE_CUDA_SUCCESS(hipStreamSynchronize(dev_ctx->stream())); -#else - PADDLE_ENFORCE_CUDA_SUCCESS(cudaStreamSynchronize(dev_ctx->stream())); -#endif + platform::GpuStreamSync(dev_ctx->stream()); #elif defined(PADDLE_WITH_ASCEND_CL) && !defined(_WIN32) auto place = ctx.GetPlace(); diff --git a/paddle/fluid/operators/collective/c_sync_comm_stream_op.cc b/paddle/fluid/operators/collective/c_sync_comm_stream_op.cc index 03894b24a913b4f6ad1c34bfd787260ed6a45e99..21bad096c2d493db4569b3c6675502c8146cfd9d 100644 --- a/paddle/fluid/operators/collective/c_sync_comm_stream_op.cc +++ b/paddle/fluid/operators/collective/c_sync_comm_stream_op.cc @@ -16,7 +16,7 @@ limitations under the License. */ #include "paddle/fluid/framework/op_registry.h" #if defined(PADDLE_WITH_NCCL) || defined(PADDLE_WITH_RCCL) -#include "paddle/fluid/platform/nccl_helper.h" +#include "paddle/fluid/platform/device/gpu/nccl_helper.h" #endif #if defined(PADDLE_WITH_ASCEND_CL) @@ -67,11 +67,7 @@ class CSyncCommStreamKernel : public framework::OpKernel { auto stream = platform::NCCLCommContext::Instance().Get(ring_id, place)->stream(); -#ifdef PADDLE_WITH_RCCL - PADDLE_ENFORCE_CUDA_SUCCESS(hipStreamSynchronize(stream)); -#else - PADDLE_ENFORCE_CUDA_SUCCESS(cudaStreamSynchronize(stream)); -#endif + platform::GpuStreamSync(stream); #elif defined(PADDLE_WITH_ASCEND_CL) PADDLE_ENFORCE_EQ(is_npu_place(place), true, diff --git a/paddle/fluid/operators/collective/c_wait_comm_op.cc b/paddle/fluid/operators/collective/c_wait_comm_op.cc index d0dfc3bb1c2e5f82d875bb4fb4c698b20064631b..dfa4dcd0fac59b0cf808a6df86991ebfa03a58a0 100644 --- a/paddle/fluid/operators/collective/c_wait_comm_op.cc +++ b/paddle/fluid/operators/collective/c_wait_comm_op.cc @@ -54,11 +54,11 @@ class CWaitCommOp : public framework::OperatorBase { // comm_stream-->event-->compute_stream #ifdef PADDLE_WITH_HIP - PADDLE_ENFORCE_CUDA_SUCCESS(hipEventRecord(event, comm_stream)); - PADDLE_ENFORCE_CUDA_SUCCESS(hipStreamWaitEvent(compute_stream, event, 0)); + PADDLE_ENFORCE_GPU_SUCCESS(hipEventRecord(event, comm_stream)); + PADDLE_ENFORCE_GPU_SUCCESS(hipStreamWaitEvent(compute_stream, event, 0)); #else - PADDLE_ENFORCE_CUDA_SUCCESS(cudaEventRecord(event, comm_stream)); - PADDLE_ENFORCE_CUDA_SUCCESS(cudaStreamWaitEvent(compute_stream, event, 0)); + PADDLE_ENFORCE_GPU_SUCCESS(cudaEventRecord(event, comm_stream)); + PADDLE_ENFORCE_GPU_SUCCESS(cudaStreamWaitEvent(compute_stream, event, 0)); #endif #else PADDLE_THROW(platform::errors::PreconditionNotMet( diff --git a/paddle/fluid/operators/collective/c_wait_compute_op.cc b/paddle/fluid/operators/collective/c_wait_compute_op.cc index 12a28040ef1c5bbf5c837cc1ede2a41cf9c96774..e038617bf3d6a9c6c43f86fc763f2eb7b5a6d870 100644 --- a/paddle/fluid/operators/collective/c_wait_compute_op.cc +++ b/paddle/fluid/operators/collective/c_wait_compute_op.cc @@ -57,11 +57,11 @@ class CWaitComputeOp : public framework::OperatorBase { // compute_stream-->event-->comm_stream #ifdef PADDLE_WITH_HIP - PADDLE_ENFORCE_CUDA_SUCCESS(hipEventRecord(event, compute_stream)); - PADDLE_ENFORCE_CUDA_SUCCESS(hipStreamWaitEvent(comm_stream, event, 0)); + PADDLE_ENFORCE_GPU_SUCCESS(hipEventRecord(event, compute_stream)); + PADDLE_ENFORCE_GPU_SUCCESS(hipStreamWaitEvent(comm_stream, event, 0)); #else - PADDLE_ENFORCE_CUDA_SUCCESS(cudaEventRecord(event, compute_stream)); - PADDLE_ENFORCE_CUDA_SUCCESS(cudaStreamWaitEvent(comm_stream, event, 0)); + PADDLE_ENFORCE_GPU_SUCCESS(cudaEventRecord(event, compute_stream)); + PADDLE_ENFORCE_GPU_SUCCESS(cudaStreamWaitEvent(comm_stream, event, 0)); #endif #else PADDLE_THROW(platform::errors::PreconditionNotMet( diff --git a/paddle/fluid/operators/collective/gen_nccl_id_op.cc b/paddle/fluid/operators/collective/gen_nccl_id_op.cc index 99a92469e8502bbc500d627899f3c56fa6bccd66..7a5b6b5f429b2e2f522ff6764004024fde2e5daf 100644 --- a/paddle/fluid/operators/collective/gen_nccl_id_op.cc +++ b/paddle/fluid/operators/collective/gen_nccl_id_op.cc @@ -20,9 +20,9 @@ limitations under the License. */ #include "paddle/fluid/framework/op_registry.h" #include "paddle/fluid/framework/operator.h" #include "paddle/fluid/framework/var_type_traits.h" +#include "paddle/fluid/platform/device/gpu/nccl_helper.h" #include "paddle/fluid/platform/enforce.h" #include "paddle/fluid/platform/gen_comm_id_helper.h" -#include "paddle/fluid/platform/nccl_helper.h" #include "paddle/fluid/platform/place.h" namespace paddle { @@ -37,7 +37,7 @@ namespace operators { #if defined(PADDLE_WITH_NCCL) || defined(PADDLE_WITH_RCCL) static void GenNCCLID(std::vector* nccl_ids) { for (size_t i = 0; i < nccl_ids->size(); ++i) { - PADDLE_ENFORCE_CUDA_SUCCESS( + PADDLE_ENFORCE_GPU_SUCCESS( platform::dynload::ncclGetUniqueId(&(*nccl_ids)[i])); } } diff --git a/paddle/fluid/operators/collective/global_gather_op.cu.cc b/paddle/fluid/operators/collective/global_gather_op.cu.cc index 70b5d0244d3852a95a4367e248a4a2ac7c7ca96c..e2ff823420aefd427410cf7db5ed2d7a8b476647 100644 --- a/paddle/fluid/operators/collective/global_gather_op.cu.cc +++ b/paddle/fluid/operators/collective/global_gather_op.cu.cc @@ -16,7 +16,7 @@ limitations under the License. */ #if defined(PADDLE_WITH_NCCL) #include "paddle/fluid/platform/collective_helper.h" -#include "paddle/fluid/platform/nccl_helper.h" +#include "paddle/fluid/platform/device/gpu/nccl_helper.h" #endif namespace paddle { @@ -103,24 +103,24 @@ class GlobalGatherOpCUDAKernel : public framework::OpKernel { auto recv_buf = out->mutable_data(out_dims, place); for (auto i = 0; i < n_expert; ++i) { - PADDLE_ENFORCE_CUDA_SUCCESS(platform::dynload::ncclGroupStart()); + PADDLE_ENFORCE_GPU_SUCCESS(platform::dynload::ncclGroupStart()); for (auto j = 0; j < nranks; ++j) { int idx = i + j * n_expert; if (cpu_global_count_data[idx]) { - PADDLE_ENFORCE_CUDA_SUCCESS( + PADDLE_ENFORCE_GPU_SUCCESS( platform::dynload::ncclSend(send_buf + send_ptr * in_feat, cpu_global_count_data[idx] * in_feat, dtype, j, comm->comm(), stream)); send_ptr += cpu_global_count_data[idx]; } if (cpu_local_count_data[idx]) { - PADDLE_ENFORCE_CUDA_SUCCESS( + PADDLE_ENFORCE_GPU_SUCCESS( platform::dynload::ncclRecv(recv_buf + expert_ptr[idx] * in_feat, cpu_local_count_data[idx] * in_feat, dtype, j, comm->comm(), stream)); } } - PADDLE_ENFORCE_CUDA_SUCCESS(platform::dynload::ncclGroupEnd()); + PADDLE_ENFORCE_GPU_SUCCESS(platform::dynload::ncclGroupEnd()); } #else PADDLE_THROW( diff --git a/paddle/fluid/operators/collective/global_scatter_op.cu.cc b/paddle/fluid/operators/collective/global_scatter_op.cu.cc index bec984c6b57e19dd890c0a8f3321d69242bd67e5..c47d27366c5f27b9c700522c86728f665aff9ac4 100644 --- a/paddle/fluid/operators/collective/global_scatter_op.cu.cc +++ b/paddle/fluid/operators/collective/global_scatter_op.cu.cc @@ -16,7 +16,7 @@ limitations under the License. */ #if defined(PADDLE_WITH_NCCL) #include "paddle/fluid/platform/collective_helper.h" -#include "paddle/fluid/platform/nccl_helper.h" +#include "paddle/fluid/platform/device/gpu/nccl_helper.h" #endif namespace paddle { @@ -102,24 +102,24 @@ class GlobalScatterOpCUDAKernel : public framework::OpKernel { auto recv_buf = out->mutable_data(out_dims, place); for (auto i = 0; i < n_expert; ++i) { - PADDLE_ENFORCE_CUDA_SUCCESS(platform::dynload::ncclGroupStart()); + PADDLE_ENFORCE_GPU_SUCCESS(platform::dynload::ncclGroupStart()); for (auto j = 0; j < nranks; ++j) { int idx = i + j * n_expert; if (cpu_local_count_data[idx]) { - PADDLE_ENFORCE_CUDA_SUCCESS( + PADDLE_ENFORCE_GPU_SUCCESS( platform::dynload::ncclSend(send_buf + expert_ptr[idx] * in_feat, cpu_local_count_data[idx] * in_feat, dtype, j, comm->comm(), stream)); } if (cpu_global_count_data[idx]) { - PADDLE_ENFORCE_CUDA_SUCCESS( + PADDLE_ENFORCE_GPU_SUCCESS( platform::dynload::ncclRecv(recv_buf + recv_ptr * in_feat, cpu_global_count_data[idx] * in_feat, dtype, j, comm->comm(), stream)); recv_ptr += cpu_global_count_data[idx]; } } - PADDLE_ENFORCE_CUDA_SUCCESS(platform::dynload::ncclGroupEnd()); + PADDLE_ENFORCE_GPU_SUCCESS(platform::dynload::ncclGroupEnd()); } #else diff --git a/paddle/fluid/operators/collective/partial_allgather_op.cu.cc b/paddle/fluid/operators/collective/partial_allgather_op.cu.cc index 8c32f8c41bbf25f687c66bb21fd3833f10258210..094847beca214b8c94f686628c632d76ba872e47 100644 --- a/paddle/fluid/operators/collective/partial_allgather_op.cu.cc +++ b/paddle/fluid/operators/collective/partial_allgather_op.cu.cc @@ -16,7 +16,7 @@ limitations under the License. */ #if defined(PADDLE_WITH_NCCL) || defined(PADDLE_WITH_RCCL) #include "paddle/fluid/platform/collective_helper.h" -#include "paddle/fluid/platform/nccl_helper.h" +#include "paddle/fluid/platform/device/gpu/nccl_helper.h" #endif namespace paddle { @@ -67,7 +67,7 @@ class PartialAllGatherOpCUDAKernel : public framework::OpKernel { stream = comm->stream(); } - PADDLE_ENFORCE_CUDA_SUCCESS(platform::dynload::ncclAllGather( + PADDLE_ENFORCE_GPU_SUCCESS(platform::dynload::ncclAllGather( send_buff, recv_buff, send_numel, static_cast(dtype), comm->comm(), stream)); #else diff --git a/paddle/fluid/operators/collective/partial_recv_op.cu.cc b/paddle/fluid/operators/collective/partial_recv_op.cu.cc index 49eafa5c7c4f5352ac8e2f761a09f40c539075b3..d59c062a31b8c89afab36b9434a3c16c7e9c659b 100644 --- a/paddle/fluid/operators/collective/partial_recv_op.cu.cc +++ b/paddle/fluid/operators/collective/partial_recv_op.cu.cc @@ -16,7 +16,7 @@ limitations under the License. */ #if defined(PADDLE_WITH_NCCL) || defined(PADDLE_WITH_RCCL) #include "paddle/fluid/platform/collective_helper.h" -#include "paddle/fluid/platform/nccl_helper.h" +#include "paddle/fluid/platform/device/gpu/nccl_helper.h" #endif namespace paddle { @@ -80,7 +80,7 @@ class PartialRecvOpCUDAKernel : public framework::OpKernel { int recv_numel = numel / num; int offset = recv_numel * id; - PADDLE_ENFORCE_CUDA_SUCCESS( + PADDLE_ENFORCE_GPU_SUCCESS( platform::dynload::ncclRecv(out->data() + offset, recv_numel, dtype, peer, comm->comm(), stream)); VLOG(3) << "rank " << comm->rank() << " recv " << recv_numel diff --git a/paddle/fluid/operators/collective/partial_send_op.cu.cc b/paddle/fluid/operators/collective/partial_send_op.cu.cc index 2463f208746ed6e40b7474dc47a5f981b8b3e57e..8a4f7f750a15b3335f29eb111caaf682e194c3b9 100644 --- a/paddle/fluid/operators/collective/partial_send_op.cu.cc +++ b/paddle/fluid/operators/collective/partial_send_op.cu.cc @@ -16,7 +16,7 @@ limitations under the License. */ #if defined(PADDLE_WITH_NCCL) || defined(PADDLE_WITH_RCCL) #include "paddle/fluid/platform/collective_helper.h" -#include "paddle/fluid/platform/nccl_helper.h" +#include "paddle/fluid/platform/device/gpu/nccl_helper.h" #endif namespace paddle { @@ -74,7 +74,7 @@ class PartialSendCUDAKernel : public framework::OpKernel { int send_numel = numel / num; int offset = send_numel * id; - PADDLE_ENFORCE_CUDA_SUCCESS(platform::dynload::ncclSend( + PADDLE_ENFORCE_GPU_SUCCESS(platform::dynload::ncclSend( x->data() + offset, send_numel, dtype, peer, comm->comm(), stream)); VLOG(3) << "rank " << comm->rank() << " send " << send_numel << " from offset[" << offset << "] to " << peer; diff --git a/paddle/fluid/operators/collective/recv_v2_op.cu.cc b/paddle/fluid/operators/collective/recv_v2_op.cu.cc index df94fee5223c6c13a2a6957390ac64b58f765cd5..18d6af4c2aaa1175e833e7d3018a65b871fd731e 100644 --- a/paddle/fluid/operators/collective/recv_v2_op.cu.cc +++ b/paddle/fluid/operators/collective/recv_v2_op.cu.cc @@ -16,7 +16,7 @@ limitations under the License. */ #if defined(PADDLE_WITH_NCCL) || defined(PADDLE_WITH_RCCL) #include "paddle/fluid/platform/collective_helper.h" -#include "paddle/fluid/platform/nccl_helper.h" +#include "paddle/fluid/platform/device/gpu/nccl_helper.h" #endif namespace paddle { @@ -69,7 +69,7 @@ class RecvOpV2CUDAKernel : public framework::OpKernel { auto out_dims = out->dims(); out->mutable_data(out_dims, place, 0); auto numel = out->numel(); - PADDLE_ENFORCE_CUDA_SUCCESS(platform::dynload::ncclRecv( + PADDLE_ENFORCE_GPU_SUCCESS(platform::dynload::ncclRecv( out->data(), numel, dtype, peer, comm->comm(), stream)); VLOG(3) << "rank " << comm->rank() << " recv " << framework::product(out_dims) << " from " << peer; @@ -83,7 +83,7 @@ class RecvOpV2CUDAKernel : public framework::OpKernel { auto numel = out->numel(); out->mutable_data(out_dims, place); - PADDLE_ENFORCE_CUDA_SUCCESS(platform::dynload::ncclRecv( + PADDLE_ENFORCE_GPU_SUCCESS(platform::dynload::ncclRecv( out->data(), numel, dtype, peer, comm->comm(), stream)); VLOG(3) << "rank " << comm->rank() << " recv " << framework::product(out->dims()) << " from " << peer; diff --git a/paddle/fluid/operators/collective/send_v2_op.cu.cc b/paddle/fluid/operators/collective/send_v2_op.cu.cc index dc28910e9ec9cb12814dc07a9f6a37e3f272f126..952fcf2065d59610bc7a562fc536c05c17abcb83 100644 --- a/paddle/fluid/operators/collective/send_v2_op.cu.cc +++ b/paddle/fluid/operators/collective/send_v2_op.cu.cc @@ -16,7 +16,7 @@ limitations under the License. */ #if defined(PADDLE_WITH_NCCL) || defined(PADDLE_WITH_RCCL) #include "paddle/fluid/platform/collective_helper.h" -#include "paddle/fluid/platform/nccl_helper.h" +#include "paddle/fluid/platform/device/gpu/nccl_helper.h" #endif namespace paddle { @@ -62,7 +62,7 @@ class SendOpV2CUDAKernel : public framework::OpKernel { auto& x = x_array.at(idx); int numel = x.numel(); ncclDataType_t dtype = platform::ToNCCLDataType(x.type()); - PADDLE_ENFORCE_CUDA_SUCCESS(platform::dynload::ncclSend( + PADDLE_ENFORCE_GPU_SUCCESS(platform::dynload::ncclSend( x.data(), numel, dtype, peer, comm->comm(), stream)); VLOG(3) << "rank " << comm->rank() << " send " << framework::product(x.dims()) << " to " << peer; @@ -73,7 +73,7 @@ class SendOpV2CUDAKernel : public framework::OpKernel { int numel = x->numel(); ncclDataType_t dtype = platform::ToNCCLDataType(x->type()); - PADDLE_ENFORCE_CUDA_SUCCESS(platform::dynload::ncclSend( + PADDLE_ENFORCE_GPU_SUCCESS(platform::dynload::ncclSend( x->data(), numel, dtype, peer, comm->comm(), stream)); VLOG(3) << "rank " << comm->rank() << " send " << framework::product(x->dims()) << " to " << peer; diff --git a/paddle/fluid/operators/controlflow/get_places_op.cc b/paddle/fluid/operators/controlflow/get_places_op.cc index dec0e789776a43264f00294c700ff3d710d56cb2..55bd4879ab7947159c7cf5053c894c3268b3476e 100644 --- a/paddle/fluid/operators/controlflow/get_places_op.cc +++ b/paddle/fluid/operators/controlflow/get_places_op.cc @@ -27,7 +27,7 @@ class OpBase; } // namespace imperative } // namespace paddle #if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP) -#include "paddle/fluid/platform/gpu_info.h" +#include "paddle/fluid/platform/device/gpu/gpu_info.h" #endif namespace paddle { @@ -35,7 +35,7 @@ namespace operators { static size_t CUDADevCount() { #if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP) - return platform::GetCUDADeviceCount(); + return platform::GetGPUDeviceCount(); #else return 0UL; #endif diff --git a/paddle/fluid/operators/conv_cudnn_helper.h b/paddle/fluid/operators/conv_cudnn_helper.h index f4183bf570926da0f95048c32dba9bdbe9bde0ff..a783a619473ef2e73246daac79cdad2c2e6cf533 100644 --- a/paddle/fluid/operators/conv_cudnn_helper.h +++ b/paddle/fluid/operators/conv_cudnn_helper.h @@ -25,7 +25,8 @@ limitations under the License. */ #include "paddle/fluid/operators/conv_cudnn_op_cache.h" #include "paddle/fluid/operators/eigen/eigen_function.h" #include "paddle/fluid/platform/cuda_graph_with_memory_pool.h" -#include "paddle/fluid/platform/cudnn_desc.h" +#include "paddle/fluid/platform/device/gpu/gpu_dnn.h" + namespace paddle { namespace operators { @@ -98,7 +99,7 @@ std::ostream& operator<<(std::ostream& out, const std::vector& v) { inline int MaxBwdFilterAlgos(cudnnHandle_t cudnn_handle) { int max_algos = 0; #if CUDNN_VERSION_MIN(7, 0, 1) - PADDLE_ENFORCE_CUDA_SUCCESS( + PADDLE_ENFORCE_GPU_SUCCESS( platform::dynload::cudnnGetConvolutionBackwardFilterAlgorithmMaxCount( cudnn_handle, &max_algos)); #endif @@ -176,22 +177,22 @@ static void SetConvMathType(const framework::ExecutionContext& ctx, #if CUDA_VERSION >= 9000 && CUDNN_VERSION_MIN(7, 0, 1) auto& dev_ctx = ctx.template device_context(); if (dev_ctx.GetComputeCapability() >= 70 && dtype == CUDNN_DATA_HALF) { - PADDLE_ENFORCE_CUDA_SUCCESS(platform::dynload::cudnnSetConvolutionMathType( + PADDLE_ENFORCE_GPU_SUCCESS(platform::dynload::cudnnSetConvolutionMathType( cdesc.desc(), CUDNN_TENSOR_OP_MATH)); VLOG(5) << "use cudnn_tensor_op_math"; #if CUDA_VERSION >= 11000 #if CUDNN_VERSION_MIN(8, 1, 0) } else if (dev_ctx.GetComputeCapability() >= 80 && dtype == CUDNN_DATA_BFLOAT16) { - PADDLE_ENFORCE_CUDA_SUCCESS(platform::dynload::cudnnSetConvolutionMathType( + PADDLE_ENFORCE_GPU_SUCCESS(platform::dynload::cudnnSetConvolutionMathType( cdesc.desc(), CUDNN_TENSOR_OP_MATH)); #endif // CUDNN_VERSION_MIN(8, 1, 0) } else if (dtype == CUDNN_DATA_FLOAT && !cdesc.allow_tf32_) { - PADDLE_ENFORCE_CUDA_SUCCESS(platform::dynload::cudnnSetConvolutionMathType( + PADDLE_ENFORCE_GPU_SUCCESS(platform::dynload::cudnnSetConvolutionMathType( cdesc.desc(), CUDNN_FMA_MATH)); #endif // CUDA_VERSION >= 11000 } else { - PADDLE_ENFORCE_CUDA_SUCCESS(platform::dynload::cudnnSetConvolutionMathType( + PADDLE_ENFORCE_GPU_SUCCESS(platform::dynload::cudnnSetConvolutionMathType( cdesc.desc(), CUDNN_DEFAULT_MATH)); VLOG(5) << "NOT use cudnn_tensor_op_math"; } @@ -245,7 +246,7 @@ struct SearchAlgorithm { int perf_count; int best_algo_idx = 0; std::unique_ptr perf_results(new perf_t[kNUM_CUDNN_FWD_ALGS]); - PADDLE_ENFORCE_CUDA_SUCCESS( + PADDLE_ENFORCE_GPU_SUCCESS( platform::dynload::cudnnGetConvolutionForwardAlgorithm_v7( args.handle, args.idesc.desc(), args.wdesc.desc(), args.cdesc.desc(), args.odesc.desc(), kNUM_CUDNN_FWD_ALGS, @@ -264,7 +265,7 @@ struct SearchAlgorithm { "the workspace size request(" << workspace_size << ") exceeds the limit(" << workspace_size_limit << ")"; - PADDLE_ENFORCE_CUDA_SUCCESS( + PADDLE_ENFORCE_GPU_SUCCESS( platform::dynload::cudnnGetConvolutionForwardAlgorithm( args.handle, args.idesc.desc(), args.wdesc.desc(), args.cdesc.desc(), args.odesc.desc(), @@ -273,7 +274,7 @@ struct SearchAlgorithm { #endif } #else - PADDLE_ENFORCE_CUDA_SUCCESS( + PADDLE_ENFORCE_GPU_SUCCESS( platform::dynload::cudnnGetConvolutionForwardAlgorithm( args.handle, args.idesc.desc(), args.wdesc.desc(), args.cdesc.desc(), args.odesc.desc(), @@ -306,7 +307,7 @@ struct SearchAlgorithm { std::array perf_stat; auto cudnn_find_func = [&](void* cudnn_workspace_ptr) { - PADDLE_ENFORCE_CUDA_SUCCESS( + PADDLE_ENFORCE_GPU_SUCCESS( platform::dynload::cudnnFindConvolutionForwardAlgorithmEx( args.handle, args.idesc.desc(), args.x->data(), args.wdesc.desc(), args.w->data(), args.cdesc.desc(), @@ -332,7 +333,7 @@ struct SearchAlgorithm { static size_t GetWorkspaceSize(const ConvArgs& args, algo_t algo) { size_t workspace_size = 0; - PADDLE_ENFORCE_CUDA_SUCCESS( + PADDLE_ENFORCE_GPU_SUCCESS( platform::dynload::cudnnGetConvolutionForwardWorkspaceSize( args.handle, args.idesc.desc(), args.wdesc.desc(), args.cdesc.desc(), args.odesc.desc(), algo, &workspace_size)); @@ -362,7 +363,7 @@ struct SearchAlgorithm { int best_algo_idx = 0; std::unique_ptr perf_results( new perf_t[kNUM_CUDNN_BWD_DATA_ALGS]); - PADDLE_ENFORCE_CUDA_SUCCESS( + PADDLE_ENFORCE_GPU_SUCCESS( platform::dynload::cudnnGetConvolutionBackwardDataAlgorithm_v7( args.handle, args.wdesc.desc(), args.odesc.desc(), args.cdesc.desc(), args.idesc.desc(), kNUM_CUDNN_BWD_DATA_ALGS, @@ -395,7 +396,7 @@ struct SearchAlgorithm { "the workspace size request(" << workspace_size << ") exceeds the limit(" << workspace_size_limit << ")"; - PADDLE_ENFORCE_CUDA_SUCCESS( + PADDLE_ENFORCE_GPU_SUCCESS( platform::dynload::cudnnGetConvolutionBackwardDataAlgorithm( args.handle, args.wdesc.desc(), args.odesc.desc(), args.cdesc.desc(), args.idesc.desc(), @@ -404,7 +405,7 @@ struct SearchAlgorithm { #endif } #else - PADDLE_ENFORCE_CUDA_SUCCESS( + PADDLE_ENFORCE_GPU_SUCCESS( platform::dynload::cudnnGetConvolutionBackwardDataAlgorithm( args.handle, args.wdesc.desc(), args.odesc.desc(), args.cdesc.desc(), args.idesc.desc(), @@ -435,7 +436,7 @@ struct SearchAlgorithm { std::array perf_stat; auto cudnn_find_func = [&](void* cudnn_workspace_ptr) { - PADDLE_ENFORCE_CUDA_SUCCESS( + PADDLE_ENFORCE_GPU_SUCCESS( platform::dynload:: cudnnFindConvolutionBackwardDataAlgorithmEx( args.handle, args.wdesc.desc(), args.w->data(), @@ -464,7 +465,7 @@ struct SearchAlgorithm { static size_t GetWorkspaceSize(const ConvArgs& args, algo_t algo) { size_t workspace_size = 0; - PADDLE_ENFORCE_CUDA_SUCCESS( + PADDLE_ENFORCE_GPU_SUCCESS( platform::dynload::cudnnGetConvolutionBackwardDataWorkspaceSize( args.handle, args.wdesc.desc(), args.odesc.desc(), args.cdesc.desc(), args.idesc.desc(), algo, &workspace_size)); @@ -496,7 +497,7 @@ struct SearchAlgorithm { int best_algo_idx = 0; std::unique_ptr perf_results( new perf_t[kNUM_CUDNN_BWD_FILTER_ALGS]); - PADDLE_ENFORCE_CUDA_SUCCESS( + PADDLE_ENFORCE_GPU_SUCCESS( platform::dynload::cudnnGetConvolutionBackwardFilterAlgorithm_v7( args.handle, args.idesc.desc(), args.odesc.desc(), args.cdesc.desc(), args.wdesc.desc(), kNUM_CUDNN_BWD_FILTER_ALGS, @@ -515,7 +516,7 @@ struct SearchAlgorithm { "the workspace size request(" << workspace_size << ") exceeds the limit(" << workspace_size_limit << ")"; - PADDLE_ENFORCE_CUDA_SUCCESS( + PADDLE_ENFORCE_GPU_SUCCESS( platform::dynload::cudnnGetConvolutionBackwardFilterAlgorithm( args.handle, args.idesc.desc(), args.odesc.desc(), args.cdesc.desc(), args.wdesc.desc(), @@ -524,7 +525,7 @@ struct SearchAlgorithm { #endif } #else - PADDLE_ENFORCE_CUDA_SUCCESS( + PADDLE_ENFORCE_GPU_SUCCESS( platform::dynload::cudnnGetConvolutionBackwardFilterAlgorithm( args.handle, args.idesc.desc(), args.odesc.desc(), args.cdesc.desc(), args.wdesc.desc(), @@ -553,7 +554,7 @@ struct SearchAlgorithm { int returned_algo_count; std::array perf_stat; auto cudnn_find_func = [&](void* cudnn_workspace_ptr) { - PADDLE_ENFORCE_CUDA_SUCCESS( + PADDLE_ENFORCE_GPU_SUCCESS( platform::dynload:: cudnnFindConvolutionBackwardFilterAlgorithmEx( args.handle, args.idesc.desc(), args.x->data(), @@ -584,7 +585,7 @@ struct SearchAlgorithm { algo_t chosen_algo; std::vector perf_results(max_algos); int actual_algos = 0; - PADDLE_ENFORCE_CUDA_SUCCESS( + PADDLE_ENFORCE_GPU_SUCCESS( platform::dynload:: cudnnFindConvolutionBackwardFilterAlgorithm( args.handle, args.idesc.desc(), args.odesc.desc(), @@ -605,7 +606,7 @@ struct SearchAlgorithm { static size_t GetWorkspaceSize(const ConvArgs& args, algo_t algo) { platform::CUDAGraphCaptureModeGuard guard; size_t workspace_size = 0; - PADDLE_ENFORCE_CUDA_SUCCESS( + PADDLE_ENFORCE_GPU_SUCCESS( platform::dynload::cudnnGetConvolutionBackwardFilterWorkspaceSize( args.handle, args.idesc.desc(), args.odesc.desc(), args.cdesc.desc(), args.wdesc.desc(), algo, &workspace_size)); diff --git a/paddle/fluid/operators/conv_cudnn_op.cu b/paddle/fluid/operators/conv_cudnn_op.cu index 275e81fc7f33a072ce2beca9ec02d37dff4a62c9..566e99c357fbed252bf15351663d7596c9c5715d 100644 --- a/paddle/fluid/operators/conv_cudnn_op.cu +++ b/paddle/fluid/operators/conv_cudnn_op.cu @@ -261,9 +261,8 @@ class CUDNNConvOpKernel : public framework::OpKernel { // cudnn 7 can support groups, no need to do it manually // FIXME(typhoonzero): find a better way to disable groups // rather than setting it to 1. - PADDLE_ENFORCE_CUDA_SUCCESS( - platform::dynload::cudnnSetConvolutionGroupCount(args.cdesc.desc(), - groups)); + PADDLE_ENFORCE_GPU_SUCCESS(platform::dynload::cudnnSetConvolutionGroupCount( + args.cdesc.desc(), groups)); groups = 1; #endif #ifdef PADDLE_WITH_HIP @@ -328,7 +327,7 @@ class CUDNNConvOpKernel : public framework::OpKernel { #ifdef PADDLE_WITH_HIP workspace_handle.RunFunc( [&](void* workspace_ptr) { - PADDLE_ENFORCE_CUDA_SUCCESS( + PADDLE_ENFORCE_GPU_SUCCESS( platform::dynload::miopenConvolutionForward( handle, &alpha, args.idesc.desc(), input_data, args.wdesc.desc(), filter_data, args.cdesc.desc(), algo, @@ -340,7 +339,7 @@ class CUDNNConvOpKernel : public framework::OpKernel { for (int i = 0; i < groups; i++) { workspace_handle.RunFunc( [&](void* workspace_ptr) { - PADDLE_ENFORCE_CUDA_SUCCESS( + PADDLE_ENFORCE_GPU_SUCCESS( platform::dynload::cudnnConvolutionForward( handle, &alpha, args.idesc.desc(), input_data + i * group_offset_in, args.wdesc.desc(), @@ -718,7 +717,7 @@ class CUDNNConvGradOpKernel : public framework::OpKernel { T* temp_tensor_data = temp_tensor.mutable_data(ctx.GetPlace()); workspace_handle.RunFunc( [&](void* cudnn_workspace_ptr) { - PADDLE_ENFORCE_CUDA_SUCCESS( + PADDLE_ENFORCE_GPU_SUCCESS( platform::dynload::miopenConvolutionBackwardData( handle, &alpha, args1.odesc.desc(), output_grad_data, args1.wdesc.desc(), filter_data, args1.cdesc.desc(), @@ -726,7 +725,7 @@ class CUDNNConvGradOpKernel : public framework::OpKernel { cudnn_workspace_ptr, workspace_size)); }, workspace_size); - PADDLE_ENFORCE_CUDA_SUCCESS(platform::dynload::miopenOpTensor( + PADDLE_ENFORCE_GPU_SUCCESS(platform::dynload::miopenOpTensor( handle, miopenTensorOpAdd, &alpha, args1.idesc.desc(), transformed_input_grad_data, &alpha, args1.idesc.desc(), temp_tensor_data, &beta, args1.idesc.desc(), @@ -734,7 +733,7 @@ class CUDNNConvGradOpKernel : public framework::OpKernel { } else { workspace_handle.RunFunc( [&](void* cudnn_workspace_ptr) { - PADDLE_ENFORCE_CUDA_SUCCESS( + PADDLE_ENFORCE_GPU_SUCCESS( platform::dynload::miopenConvolutionBackwardData( handle, &alpha, args1.odesc.desc(), output_grad_data, args1.wdesc.desc(), filter_data, args1.cdesc.desc(), @@ -749,7 +748,7 @@ class CUDNNConvGradOpKernel : public framework::OpKernel { for (int i = 0; i < groups; i++) { workspace_handle.RunFunc( [&](void* cudnn_workspace_ptr) { - PADDLE_ENFORCE_CUDA_SUCCESS( + PADDLE_ENFORCE_GPU_SUCCESS( platform::dynload::cudnnConvolutionBackwardData( handle, &alpha, args1.wdesc.desc(), filter_data + i * group_offset_filter, args1.odesc.desc(), @@ -796,7 +795,7 @@ class CUDNNConvGradOpKernel : public framework::OpKernel { #ifdef PADDLE_WITH_HIP workspace_handle.RunFunc( [&](void* cudnn_workspace_ptr) { - PADDLE_ENFORCE_CUDA_SUCCESS( + PADDLE_ENFORCE_GPU_SUCCESS( platform::dynload::miopenConvolutionBackwardWeights( handle, &alpha, args2.odesc.desc(), output_grad_data, args2.idesc.desc(), input_data, args2.cdesc.desc(), @@ -808,7 +807,7 @@ class CUDNNConvGradOpKernel : public framework::OpKernel { for (int i = 0; i < groups; i++) { workspace_handle.RunFunc( [&](void* cudnn_workspace_ptr) { - PADDLE_ENFORCE_CUDA_SUCCESS( + PADDLE_ENFORCE_GPU_SUCCESS( platform::dynload::cudnnConvolutionBackwardFilter( handle, &alpha, args2.idesc.desc(), input_data + i * group_offset_in, args2.odesc.desc(), @@ -1228,7 +1227,7 @@ class CUDNNConvDoubleGradOpKernel : public framework::OpKernel { #ifdef PADDLE_WITH_HIP wkspace_handle.RunFunc( [&](void* workspace_ptr) { - PADDLE_ENFORCE_CUDA_SUCCESS( + PADDLE_ENFORCE_GPU_SUCCESS( platform::dynload::miopenConvolutionForward( handle, &alpha, args1.idesc.desc(), ddx, args1.wdesc.desc(), w, args1.cdesc.desc(), fwd_algo1, @@ -1240,7 +1239,7 @@ class CUDNNConvDoubleGradOpKernel : public framework::OpKernel { for (int i = 0; i < groups; i++) { wkspace_handle.RunFunc( [&](void* workspace_ptr) { - PADDLE_ENFORCE_CUDA_SUCCESS( + PADDLE_ENFORCE_GPU_SUCCESS( platform::dynload::cudnnConvolutionForward( handle, &alpha, args1.idesc.desc(), ddx + i * group_offset_in, args1.wdesc.desc(), @@ -1258,7 +1257,7 @@ class CUDNNConvDoubleGradOpKernel : public framework::OpKernel { // MIOPEN ONLY support beta to be 0.0f wkspace_handle.RunFunc( [&](void* workspace_ptr) { - PADDLE_ENFORCE_CUDA_SUCCESS( + PADDLE_ENFORCE_GPU_SUCCESS( platform::dynload::miopenConvolutionForward( handle, &alpha, args2.idesc.desc(), x, args2.wdesc.desc(), ddw, args2.cdesc.desc(), fwd_algo2, &beta, @@ -1270,7 +1269,7 @@ class CUDNNConvDoubleGradOpKernel : public framework::OpKernel { for (int i = 0; i < groups; i++) { wkspace_handle.RunFunc( [&](void* workspace_ptr) { - PADDLE_ENFORCE_CUDA_SUCCESS( + PADDLE_ENFORCE_GPU_SUCCESS( platform::dynload::cudnnConvolutionForward( handle, &alpha, args2.idesc.desc(), x + i * group_offset_in, args2.wdesc.desc(), @@ -1294,7 +1293,7 @@ class CUDNNConvDoubleGradOpKernel : public framework::OpKernel { #ifdef PADDLE_WITH_HIP wkspace_handle.RunFunc( [&](void* workspace_ptr) { - PADDLE_ENFORCE_CUDA_SUCCESS( + PADDLE_ENFORCE_GPU_SUCCESS( platform::dynload::miopenConvolutionBackwardWeights( handle, &alpha, args3.odesc.desc(), transformed_dy_channel, args3.idesc.desc(), ddx, args3.cdesc.desc(), filter_algo, @@ -1306,7 +1305,7 @@ class CUDNNConvDoubleGradOpKernel : public framework::OpKernel { for (int i = 0; i < groups; i++) { wkspace_handle.RunFunc( [&](void* workspace_ptr) { - PADDLE_ENFORCE_CUDA_SUCCESS( + PADDLE_ENFORCE_GPU_SUCCESS( platform::dynload::cudnnConvolutionBackwardFilter( handle, &alpha, args3.idesc.desc(), ddx + i * group_offset_in, args3.odesc.desc(), @@ -1325,7 +1324,7 @@ class CUDNNConvDoubleGradOpKernel : public framework::OpKernel { #ifdef PADDLE_WITH_HIP wkspace_handle.RunFunc( [&](void* workspace_ptr) { - PADDLE_ENFORCE_CUDA_SUCCESS( + PADDLE_ENFORCE_GPU_SUCCESS( platform::dynload::miopenConvolutionBackwardData( handle, &alpha, args4.odesc.desc(), transformed_dy_channel, args4.wdesc.desc(), ddw, args4.cdesc.desc(), data_algo, @@ -1337,7 +1336,7 @@ class CUDNNConvDoubleGradOpKernel : public framework::OpKernel { for (int i = 0; i < groups; i++) { wkspace_handle.RunFunc( [&](void* workspace_ptr) { - PADDLE_ENFORCE_CUDA_SUCCESS( + PADDLE_ENFORCE_GPU_SUCCESS( platform::dynload::cudnnConvolutionBackwardData( handle, &alpha, args4.wdesc.desc(), ddw + i * group_offset_filter, args4.odesc.desc(), diff --git a/paddle/fluid/operators/conv_cudnn_op_cache.h b/paddle/fluid/operators/conv_cudnn_op_cache.h index 23a471cfa006746c5762fa7169248cbae60a7899..291e5f92f322cba2740078d0d055dc9ccf98b1b1 100644 --- a/paddle/fluid/operators/conv_cudnn_op_cache.h +++ b/paddle/fluid/operators/conv_cudnn_op_cache.h @@ -18,11 +18,7 @@ limitations under the License. */ #include #include #include "paddle/fluid/framework/operator.h" -#ifdef PADDLE_WITH_HIP -#include "paddle/fluid/platform/miopen_helper.h" -#else -#include "paddle/fluid/platform/cudnn_helper.h" -#endif +#include "paddle/fluid/platform/device/gpu/gpu_dnn.h" DECLARE_uint64(conv_workspace_size_limit); DECLARE_bool(cudnn_exhaustive_search); diff --git a/paddle/fluid/operators/conv_miopen_helper.h b/paddle/fluid/operators/conv_miopen_helper.h index befe09c8e6beb3d911521e4ff78f3427a3b0dd78..9c9795143eb78dc5c1b22ec792d8753f915c976e 100644 --- a/paddle/fluid/operators/conv_miopen_helper.h +++ b/paddle/fluid/operators/conv_miopen_helper.h @@ -23,7 +23,7 @@ limitations under the License. */ #include "paddle/fluid/framework/conv_search_cache.h" #include "paddle/fluid/framework/operator_kernel_configs.h" #include "paddle/fluid/operators/conv_cudnn_op_cache.h" -#include "paddle/fluid/platform/miopen_desc.h" +#include "paddle/fluid/platform/device/gpu/gpu_dnn.h" namespace paddle { namespace operators { @@ -137,7 +137,7 @@ struct SearchAlgorithm { int find_count; miopenConvAlgoPerf_t find_result; auto cudnn_find_func = [&](void* cudnn_workspace_ptr) { - PADDLE_ENFORCE_CUDA_SUCCESS( + PADDLE_ENFORCE_GPU_SUCCESS( platform::dynload::miopenFindConvolutionForwardAlgorithm( args.handle, args.idesc.desc(), args.x->data(), args.wdesc.desc(), args.w->data(), args.cdesc.desc(), @@ -154,7 +154,7 @@ struct SearchAlgorithm { static size_t GetWorkspaceSize(const ConvArgs& args) { size_t workspace_size = 0; - PADDLE_ENFORCE_CUDA_SUCCESS( + PADDLE_ENFORCE_GPU_SUCCESS( platform::dynload::miopenConvolutionForwardGetWorkSpaceSize( args.handle, args.wdesc.desc(), args.idesc.desc(), args.cdesc.desc(), args.odesc.desc(), &workspace_size)); @@ -179,7 +179,7 @@ struct SearchAlgorithm { int find_count; miopenConvAlgoPerf_t find_result; auto cudnn_find_func = [&](void* cudnn_workspace_ptr) { - PADDLE_ENFORCE_CUDA_SUCCESS( + PADDLE_ENFORCE_GPU_SUCCESS( platform::dynload::miopenFindConvolutionBackwardDataAlgorithm( args.handle, args.odesc.desc(), args.o->data(), args.wdesc.desc(), args.w->data(), args.cdesc.desc(), @@ -196,7 +196,7 @@ struct SearchAlgorithm { static size_t GetWorkspaceSize(const ConvArgs& args) { size_t workspace_size = 0; - PADDLE_ENFORCE_CUDA_SUCCESS( + PADDLE_ENFORCE_GPU_SUCCESS( platform::dynload::miopenConvolutionBackwardDataGetWorkSpaceSize( args.handle, args.odesc.desc(), args.wdesc.desc(), args.cdesc.desc(), args.idesc.desc(), &workspace_size)); @@ -221,7 +221,7 @@ struct SearchAlgorithm { int find_count; miopenConvAlgoPerf_t find_result; auto cudnn_find_func = [&](void* cudnn_workspace_ptr) { - PADDLE_ENFORCE_CUDA_SUCCESS( + PADDLE_ENFORCE_GPU_SUCCESS( platform::dynload::miopenFindConvolutionBackwardWeightsAlgorithm( args.handle, args.odesc.desc(), args.o->data(), args.idesc.desc(), args.x->data(), args.cdesc.desc(), @@ -238,7 +238,7 @@ struct SearchAlgorithm { static size_t GetWorkspaceSize(const ConvArgs& args) { size_t workspace_size = 0; - PADDLE_ENFORCE_CUDA_SUCCESS( + PADDLE_ENFORCE_GPU_SUCCESS( platform::dynload::miopenConvolutionBackwardWeightsGetWorkSpaceSize( args.handle, args.odesc.desc(), args.idesc.desc(), args.cdesc.desc(), args.wdesc.desc(), &workspace_size)); diff --git a/paddle/fluid/operators/conv_op.cc b/paddle/fluid/operators/conv_op.cc index 1610705c4694cb707f9ebce6e114126c71724513..41f6f75200697aea9ed3c361382312b2a81d14c9 100644 --- a/paddle/fluid/operators/conv_op.cc +++ b/paddle/fluid/operators/conv_op.cc @@ -20,13 +20,7 @@ limitations under the License. */ #include "paddle/fluid/framework/op_version_registry.h" -#ifdef PADDLE_WITH_CUDA -#include "paddle/fluid/platform/cudnn_helper.h" -#endif - -#ifdef PADDLE_WITH_HIP -#include "paddle/fluid/platform/miopen_helper.h" -#endif +#include "paddle/fluid/platform/device/gpu/gpu_dnn.h" #ifdef PADDLE_WITH_MKLDNN #include "paddle/fluid/platform/mkldnn_helper.h" @@ -222,7 +216,7 @@ framework::OpKernelType ConvOp::GetExpectedKernelType( if (input_data_type == framework::proto::VarType::BF16 && library == framework::LibraryType::kCUDNN) { PADDLE_ENFORCE_GE( - platform::CudnnVersion(), 8100, + platform::DnnVersion(), 8100, platform::errors::InvalidArgument( "bfloat16 can only be used when CUDNN_VERSION >= 8100")); } diff --git a/paddle/fluid/operators/conv_shift_op.cu b/paddle/fluid/operators/conv_shift_op.cu index 314d33310588ed960eecaf1a0319ebf56d925c55..2289104d2dbfbf77ef492db86f562d685f8a5f9a 100644 --- a/paddle/fluid/operators/conv_shift_op.cu +++ b/paddle/fluid/operators/conv_shift_op.cu @@ -14,7 +14,7 @@ limitations under the License. */ #include "paddle/fluid/operators/conv_shift_op.h" #include "paddle/fluid/operators/math/math_function.h" -#include "paddle/fluid/platform/cuda_primitives.h" +#include "paddle/fluid/platform/device/gpu/gpu_primitives.h" namespace paddle { namespace operators { diff --git a/paddle/fluid/operators/conv_transpose_cudnn_op.cu b/paddle/fluid/operators/conv_transpose_cudnn_op.cu index c4cd5854c0f78ae970ee953f9cb46f5c1b840630..19c0be44a1d0b702e2aaf544029c354ec721339c 100644 --- a/paddle/fluid/operators/conv_transpose_cudnn_op.cu +++ b/paddle/fluid/operators/conv_transpose_cudnn_op.cu @@ -265,7 +265,7 @@ class CUDNNConvTransposeOpKernel : public framework::OpKernel { for (int g = 0; g < groups; g++) { #ifdef PADDLE_WITH_HIP auto cudnn_func = [&](void* cudnn_workspace) { - PADDLE_ENFORCE_CUDA_SUCCESS( + PADDLE_ENFORCE_GPU_SUCCESS( platform::dynload::miopenConvolutionBackwardData( handle, &alpha, args.odesc.desc(), input_data + input_offset * g, args.wdesc.desc(), @@ -275,7 +275,7 @@ class CUDNNConvTransposeOpKernel : public framework::OpKernel { }; #else // PADDLE_WITH_HIP auto cudnn_func = [&](void* cudnn_workspace) { - PADDLE_ENFORCE_CUDA_SUCCESS( + PADDLE_ENFORCE_GPU_SUCCESS( platform::dynload::cudnnConvolutionBackwardData( handle, &alpha, args.wdesc.desc(), filter_data + filter_offset * g, args.odesc.desc(), @@ -549,7 +549,7 @@ class CUDNNConvTransposeGradOpKernel : public framework::OpKernel { for (int g = 0; g < groups; g++) { #ifdef PADDLE_WITH_HIP auto cudnn_func = [&](void* cudnn_workspace) { - PADDLE_ENFORCE_CUDA_SUCCESS( + PADDLE_ENFORCE_GPU_SUCCESS( platform::dynload::miopenConvolutionForward( handle, &alpha, args1.idesc.desc(), output_grad_data + output_grad_offset * g, args1.wdesc.desc(), @@ -560,13 +560,12 @@ class CUDNNConvTransposeGradOpKernel : public framework::OpKernel { }; #else // PADDLE_WITH_HIP auto cudnn_func = [&](void* cudnn_workspace) { - PADDLE_ENFORCE_CUDA_SUCCESS( - platform::dynload::cudnnConvolutionForward( - handle, &alpha, args1.idesc.desc(), - output_grad_data + output_grad_offset * g, args1.wdesc.desc(), - filter_data + filter_offset * g, args1.cdesc.desc(), - data_algo, cudnn_workspace, workspace_size, &beta, - args1.odesc.desc(), input_grad_data + input_offset * g)); + PADDLE_ENFORCE_GPU_SUCCESS(platform::dynload::cudnnConvolutionForward( + handle, &alpha, args1.idesc.desc(), + output_grad_data + output_grad_offset * g, args1.wdesc.desc(), + filter_data + filter_offset * g, args1.cdesc.desc(), data_algo, + cudnn_workspace, workspace_size, &beta, args1.odesc.desc(), + input_grad_data + input_offset * g)); }; #endif // PADDLE_WITH_HIP workspace_handle.RunFunc(cudnn_func, workspace_size); @@ -598,7 +597,7 @@ class CUDNNConvTransposeGradOpKernel : public framework::OpKernel { for (int g = 0; g < groups; g++) { #ifdef PADDLE_WITH_HIP auto cudnn_func = [&](void* cudnn_workspace) { - PADDLE_ENFORCE_CUDA_SUCCESS( + PADDLE_ENFORCE_GPU_SUCCESS( platform::dynload::miopenConvolutionBackwardWeights( handle, &alpha, args2.odesc.desc(), input_data + input_offset * g, args2.idesc.desc(), @@ -609,7 +608,7 @@ class CUDNNConvTransposeGradOpKernel : public framework::OpKernel { }; #else // PADDLE_WITH_HIP auto cudnn_func = [&](void* cudnn_workspace) { - PADDLE_ENFORCE_CUDA_SUCCESS( + PADDLE_ENFORCE_GPU_SUCCESS( platform::dynload::cudnnConvolutionBackwardFilter( handle, &alpha, args2.idesc.desc(), output_grad_data + output_grad_offset * g, args2.odesc.desc(), @@ -1054,7 +1053,7 @@ class CUDNNConvTransposeDoubleGradOpKernel : public framework::OpKernel { #ifdef PADDLE_WITH_HIP wkspace_handle.RunFunc( [&](void* workspace_ptr) { - PADDLE_ENFORCE_CUDA_SUCCESS( + PADDLE_ENFORCE_GPU_SUCCESS( platform::dynload::miopenConvolutionBackwardData( handle, &alpha, args1.odesc.desc(), ddx + i * group_offset_in, args1.wdesc.desc(), @@ -1067,7 +1066,7 @@ class CUDNNConvTransposeDoubleGradOpKernel : public framework::OpKernel { #else // PADDLE_WITH_HIP wkspace_handle.RunFunc( [&](void* workspace_ptr) { - PADDLE_ENFORCE_CUDA_SUCCESS( + PADDLE_ENFORCE_GPU_SUCCESS( platform::dynload::cudnnConvolutionBackwardData( handle, &alpha, args1.wdesc.desc(), w + i * group_offset_filter, args1.odesc.desc(), @@ -1089,7 +1088,7 @@ class CUDNNConvTransposeDoubleGradOpKernel : public framework::OpKernel { T* conv_x_ddw_data = conv_x_ddw.mutable_data(ctx.GetPlace()); wkspace_handle.RunFunc( [&](void* workspace_ptr) { - PADDLE_ENFORCE_CUDA_SUCCESS( + PADDLE_ENFORCE_GPU_SUCCESS( platform::dynload::miopenConvolutionBackwardData( handle, &alpha, args2.odesc.desc(), x + i * group_offset_in, args2.wdesc.desc(), @@ -1099,7 +1098,7 @@ class CUDNNConvTransposeDoubleGradOpKernel : public framework::OpKernel { workspace_size)); }, workspace_size); - PADDLE_ENFORCE_CUDA_SUCCESS(platform::dynload::miopenOpTensor( + PADDLE_ENFORCE_GPU_SUCCESS(platform::dynload::miopenOpTensor( handle, miopenTensorOpAdd, &alpha, args2.idesc.desc(), transformed_ddy_channel + i * group_offset_out, &alpha, args2.idesc.desc(), conv_x_ddw_data + i * group_offset_out, &beta, @@ -1108,7 +1107,7 @@ class CUDNNConvTransposeDoubleGradOpKernel : public framework::OpKernel { #else // PADDLE_WITH_HIP wkspace_handle.RunFunc( [&](void* workspace_ptr) { - PADDLE_ENFORCE_CUDA_SUCCESS( + PADDLE_ENFORCE_GPU_SUCCESS( platform::dynload::cudnnConvolutionBackwardData( handle, &alpha, args2.wdesc.desc(), ddw + i * group_offset_filter, args2.odesc.desc(), @@ -1152,7 +1151,7 @@ class CUDNNConvTransposeDoubleGradOpKernel : public framework::OpKernel { #ifdef PADDLE_WITH_HIP wkspace_handle.RunFunc( [&](void* workspace_ptr) { - PADDLE_ENFORCE_CUDA_SUCCESS( + PADDLE_ENFORCE_GPU_SUCCESS( platform::dynload::miopenConvolutionBackwardWeights( handle, &alpha, args3.odesc.desc(), ddx + i * group_offset_in, args3.idesc.desc(), @@ -1165,7 +1164,7 @@ class CUDNNConvTransposeDoubleGradOpKernel : public framework::OpKernel { #else // PADDLE_WITH_HIP wkspace_handle.RunFunc( [&](void* workspace_ptr) { - PADDLE_ENFORCE_CUDA_SUCCESS( + PADDLE_ENFORCE_GPU_SUCCESS( platform::dynload::cudnnConvolutionBackwardFilter( handle, &alpha, args3.idesc.desc(), transformed_dy_channel + i * group_offset_out, @@ -1185,7 +1184,7 @@ class CUDNNConvTransposeDoubleGradOpKernel : public framework::OpKernel { #ifdef PADDLE_WITH_HIP wkspace_handle.RunFunc( [&](void* workspace_ptr) { - PADDLE_ENFORCE_CUDA_SUCCESS( + PADDLE_ENFORCE_GPU_SUCCESS( platform::dynload::miopenConvolutionForward( handle, &alpha, args4.idesc.desc(), transformed_dy_channel + i * group_offset_out, @@ -1198,7 +1197,7 @@ class CUDNNConvTransposeDoubleGradOpKernel : public framework::OpKernel { #else // PADDLE_WITH_HIP wkspace_handle.RunFunc( [&](void* workspace_ptr) { - PADDLE_ENFORCE_CUDA_SUCCESS( + PADDLE_ENFORCE_GPU_SUCCESS( platform::dynload::cudnnConvolutionForward( handle, &alpha, args4.idesc.desc(), transformed_dy_channel + i * group_offset_out, diff --git a/paddle/fluid/operators/cudnn_lstm_cache.h b/paddle/fluid/operators/cudnn_lstm_cache.h index b7859237e737a1af6bfc00c2fd6b5a7b610374e4..5451cf815cae331b1cf8082dcbe31c1255527173 100644 --- a/paddle/fluid/operators/cudnn_lstm_cache.h +++ b/paddle/fluid/operators/cudnn_lstm_cache.h @@ -16,7 +16,7 @@ limitations under the License. */ #include #include "paddle/fluid/framework/tensor.h" -#include "paddle/fluid/platform/cudnn_helper.h" +#include "paddle/fluid/platform/device/gpu/gpu_dnn.h" #include "paddle/fluid/platform/dynload/cudnn.h" namespace paddle { @@ -77,7 +77,7 @@ class ScopedRNNBase { // ------------------- cudnn dropout descriptors --------------------- size_t state_size; if (!initialized_) { - PADDLE_ENFORCE_CUDA_SUCCESS( + PADDLE_ENFORCE_GPU_SUCCESS( platform::dynload::cudnnDropoutGetStatesSize(handle, &state_size)); dropout_state->mutable_data({static_cast(state_size)}, place); @@ -86,7 +86,7 @@ class ScopedRNNBase { dropout_state, seed_, state_size); // ------------------- cudnn rnn descriptors --------------------- - PADDLE_ENFORCE_CUDA_SUCCESS(platform::dynload::cudnnSetRNNDescriptor_v6( + PADDLE_ENFORCE_GPU_SUCCESS(platform::dynload::cudnnSetRNNDescriptor_v6( handle, rnn_desc_.desc(), hidden_size_, num_layers_, dropout_desc_.desc(), CUDNN_LINEAR_INPUT, is_bidirec_ ? CUDNN_BIDIRECTIONAL : CUDNN_UNIDIRECTIONAL, CUDNN_LSTM, @@ -94,14 +94,14 @@ class ScopedRNNBase { #if CUDNN_VERSION >= 7201 if (!sequence_length.empty()) { - PADDLE_ENFORCE_CUDA_SUCCESS(platform::dynload::cudnnSetRNNPaddingMode( + PADDLE_ENFORCE_GPU_SUCCESS(platform::dynload::cudnnSetRNNPaddingMode( rnn_desc_.desc(), CUDNN_RNN_PADDED_IO_ENABLED)); } #endif // ------------------- cudnn weights_size --------------------- size_t weights_size_; - PADDLE_ENFORCE_CUDA_SUCCESS(platform::dynload::cudnnGetRNNParamsSize( + PADDLE_ENFORCE_GPU_SUCCESS(platform::dynload::cudnnGetRNNParamsSize( handle, rnn_desc_.desc(), x_descs_[0], &weights_size_, cudnn_type)); PADDLE_ENFORCE_EQ( weights_size_, sizeof(T) * weight_numel_, @@ -113,10 +113,10 @@ class ScopedRNNBase { std::vector dim_w = {dim_tmp, 1, 1}; weight_desc_.descriptor(layout, dim_w); // ------------------- cudnn workspace, reserve size --------------------- - PADDLE_ENFORCE_CUDA_SUCCESS(platform::dynload::cudnnGetRNNWorkspaceSize( + PADDLE_ENFORCE_GPU_SUCCESS(platform::dynload::cudnnGetRNNWorkspaceSize( handle, rnn_desc_.desc(), seq_length_, x_descs_.data(), workspace_size)); - PADDLE_ENFORCE_CUDA_SUCCESS( + PADDLE_ENFORCE_GPU_SUCCESS( platform::dynload::cudnnGetRNNTrainingReserveSize( handle, rnn_desc_.desc(), seq_length_, x_descs_.data(), reserve_size)); diff --git a/paddle/fluid/operators/cudnn_lstm_op.cu.cc b/paddle/fluid/operators/cudnn_lstm_op.cu.cc index 27f64b41948be9190dcc32c35a484ed1fc32baca..6f696afa23886a0a1b900a89ac758147b002bca2 100644 --- a/paddle/fluid/operators/cudnn_lstm_op.cu.cc +++ b/paddle/fluid/operators/cudnn_lstm_op.cu.cc @@ -111,14 +111,14 @@ void LSTMInferece(const bool &has_seq_length, const cudnnHandle_t &handle, // for inference // This interface is used when the input/output is unpadded. #ifdef PADDLE_WITH_HIP - PADDLE_ENFORCE_CUDA_SUCCESS(platform::dynload::miopenRNNForwardInference( + PADDLE_ENFORCE_GPU_SUCCESS(platform::dynload::miopenRNNForwardInference( handle, rnn->rnn_desc(), seq_length, rnn->x_descs(), x_data, rnn->init_h_desc(), init_h_data, rnn->init_c_desc(), init_c_data, rnn->weight_desc(), w_data, rnn->y_descs(), out_data, rnn->last_h_desc(), last_h_data, rnn->last_c_desc(), last_c_data, workspace_data->data(), workspace_size)); #else - PADDLE_ENFORCE_CUDA_SUCCESS(platform::dynload::cudnnRNNForwardInference( + PADDLE_ENFORCE_GPU_SUCCESS(platform::dynload::cudnnRNNForwardInference( handle, rnn->rnn_desc(), seq_length, rnn->x_descs(), x_data, rnn->init_h_desc(), init_h_data, rnn->init_c_desc(), init_c_data, rnn->weight_desc(), w_data, rnn->y_descs(), out_data, @@ -129,7 +129,7 @@ void LSTMInferece(const bool &has_seq_length, const cudnnHandle_t &handle, #if !defined(PADDLE_WITH_HIP) && CUDNN_VERSION >= 7201 // for inference // This interface is used when the input/output is padded. - PADDLE_ENFORCE_CUDA_SUCCESS(platform::dynload::cudnnRNNForwardInferenceEx( + PADDLE_ENFORCE_GPU_SUCCESS(platform::dynload::cudnnRNNForwardInferenceEx( handle, rnn->rnn_desc(), rnn->x_seq_desc(), x_data, rnn->init_h_desc(), init_h_data, rnn->init_c_desc(), init_c_data, rnn->weight_desc(), w_data, rnn->y_seq_desc(), out_data, rnn->last_h_desc(), last_h_data, @@ -277,7 +277,7 @@ class CudnnLSTMGPUKernel : public framework::OpKernel { // for train // This interface is used when the input/output is unpadded. #ifdef PADDLE_WITH_HIP - PADDLE_ENFORCE_CUDA_SUCCESS(platform::dynload::miopenRNNForwardTraining( + PADDLE_ENFORCE_GPU_SUCCESS(platform::dynload::miopenRNNForwardTraining( handle, rnn.rnn_desc(), seq_length, rnn.x_descs(), x_data, rnn.init_h_desc(), init_h_data, rnn.init_c_desc(), init_c_data, rnn.weight_desc(), w_data, rnn.y_descs(), out_data, @@ -285,7 +285,7 @@ class CudnnLSTMGPUKernel : public framework::OpKernel { workspace_data_.data(), workspace_size, reserve_data, reserve_size)); #else - PADDLE_ENFORCE_CUDA_SUCCESS(platform::dynload::cudnnRNNForwardTraining( + PADDLE_ENFORCE_GPU_SUCCESS(platform::dynload::cudnnRNNForwardTraining( handle, rnn.rnn_desc(), seq_length, rnn.x_descs(), x_data, rnn.init_h_desc(), init_h_data, rnn.init_c_desc(), init_c_data, rnn.weight_desc(), w_data, rnn.y_descs(), out_data, @@ -297,15 +297,13 @@ class CudnnLSTMGPUKernel : public framework::OpKernel { #if !defined(PADDLE_WITH_HIP) && CUDNN_VERSION >= 7201 // for train // This interface is used when the input/output is padded. - PADDLE_ENFORCE_CUDA_SUCCESS( - platform::dynload::cudnnRNNForwardTrainingEx( - handle, rnn.rnn_desc(), rnn.x_seq_desc(), x_data, - rnn.init_h_desc(), init_h_data, rnn.init_c_desc(), init_c_data, - rnn.weight_desc(), w_data, rnn.y_seq_desc(), out_data, - rnn.last_h_desc(), last_h_data, rnn.last_c_desc(), last_c_data, - nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, - nullptr, workspace_data_.data(), workspace_size, - reserve_data, reserve_size)); + PADDLE_ENFORCE_GPU_SUCCESS(platform::dynload::cudnnRNNForwardTrainingEx( + handle, rnn.rnn_desc(), rnn.x_seq_desc(), x_data, rnn.init_h_desc(), + init_h_data, rnn.init_c_desc(), init_c_data, rnn.weight_desc(), + w_data, rnn.y_seq_desc(), out_data, rnn.last_h_desc(), last_h_data, + rnn.last_c_desc(), last_c_data, nullptr, nullptr, nullptr, nullptr, + nullptr, nullptr, nullptr, nullptr, workspace_data_.data(), + workspace_size, reserve_data, reserve_size)); #else PADDLE_THROW(platform::errors::Unavailable( "The padded input is supported by " @@ -433,7 +431,7 @@ class CudnnLSTMGPUGradKernel : public framework::OpKernel { if (!has_seq_length) { // This interface is used when the input/output is unpadded. #ifdef PADDLE_WITH_HIP - PADDLE_ENFORCE_CUDA_SUCCESS(platform::dynload::miopenRNNBackwardData( + PADDLE_ENFORCE_GPU_SUCCESS(platform::dynload::miopenRNNBackwardData( handle, rnn.rnn_desc(), seq_length, rnn.y_descs(), out_data, rnn.y_descs(), out_grad_data, rnn.last_h_desc(), last_h_grad_data, rnn.last_c_desc(), last_c_grad_data, rnn.weight_desc(), weight_data, @@ -442,13 +440,13 @@ class CudnnLSTMGPUGradKernel : public framework::OpKernel { rnn.init_c_desc(), init_c_grad_data, workspace_data_.data(), workspace_size, const_cast(reserve_data), reserve_size)); - PADDLE_ENFORCE_CUDA_SUCCESS(platform::dynload::miopenRNNBackwardWeights( + PADDLE_ENFORCE_GPU_SUCCESS(platform::dynload::miopenRNNBackwardWeights( handle, rnn.rnn_desc(), seq_length, rnn.x_descs(), input->data(), rnn.init_h_desc(), init_h->data(), rnn.y_descs(), out->data(), rnn.weight_desc(), weight_grad_data, workspace_data_.data(), workspace_size, const_cast(reserve_data), reserve_size)); #else - PADDLE_ENFORCE_CUDA_SUCCESS(platform::dynload::cudnnRNNBackwardData( + PADDLE_ENFORCE_GPU_SUCCESS(platform::dynload::cudnnRNNBackwardData( handle, rnn.rnn_desc(), seq_length, rnn.y_descs(), out_data, rnn.y_descs(), out_grad_data, rnn.last_h_desc(), last_h_grad_data, rnn.last_c_desc(), last_c_grad_data, rnn.weight_desc(), weight_data, @@ -457,7 +455,7 @@ class CudnnLSTMGPUGradKernel : public framework::OpKernel { rnn.init_c_desc(), init_c_grad_data, workspace_data_.data(), workspace_size, const_cast(reserve_data), reserve_size)); - PADDLE_ENFORCE_CUDA_SUCCESS(platform::dynload::cudnnRNNBackwardWeights( + PADDLE_ENFORCE_GPU_SUCCESS(platform::dynload::cudnnRNNBackwardWeights( handle, rnn.rnn_desc(), seq_length, rnn.x_descs(), input->data(), rnn.init_h_desc(), init_h->data(), rnn.y_descs(), out->data(), workspace_data_.data(), workspace_size, rnn.weight_desc(), @@ -467,7 +465,7 @@ class CudnnLSTMGPUGradKernel : public framework::OpKernel { #if !defined(PADDLE_WITH_HIP) && CUDNN_VERSION >= 7201 // for train // This interface is used when the input/output is padded. - PADDLE_ENFORCE_CUDA_SUCCESS(platform::dynload::cudnnRNNBackwardDataEx( + PADDLE_ENFORCE_GPU_SUCCESS(platform::dynload::cudnnRNNBackwardDataEx( handle, rnn.rnn_desc(), rnn.y_seq_desc(), out_data, rnn.y_seq_desc(), out_grad_data, nullptr, nullptr, rnn.last_h_desc(), last_h_grad_data, rnn.last_c_desc(), last_c_grad_data, rnn.weight_desc(), weight_data, @@ -477,7 +475,7 @@ class CudnnLSTMGPUGradKernel : public framework::OpKernel { workspace_data_.data(), workspace_size, const_cast(reserve_data), reserve_size)); - PADDLE_ENFORCE_CUDA_SUCCESS(platform::dynload::cudnnRNNBackwardWeightsEx( + PADDLE_ENFORCE_GPU_SUCCESS(platform::dynload::cudnnRNNBackwardWeightsEx( handle, rnn.rnn_desc(), rnn.x_seq_desc(), input->data(), rnn.init_h_desc(), init_h->data(), rnn.y_seq_desc(), out->data(), workspace_data_.data(), workspace_size, diff --git a/paddle/fluid/operators/cudnn_rnn_cache.h b/paddle/fluid/operators/cudnn_rnn_cache.h index a6a23a91c76c02ec656ad1d13aa41c1d3d93c8fd..6c059257b94e8b6e77e0bcc76de592ea44827779 100644 --- a/paddle/fluid/operators/cudnn_rnn_cache.h +++ b/paddle/fluid/operators/cudnn_rnn_cache.h @@ -16,7 +16,7 @@ limitations under the License. */ #include #include "paddle/fluid/framework/tensor.h" -#include "paddle/fluid/platform/cudnn_helper.h" +#include "paddle/fluid/platform/device/gpu/gpu_dnn.h" namespace paddle { namespace operators { @@ -92,15 +92,15 @@ struct CudnnRNNCache { std::vector strides_y = {hidden_size_ * numDirections, 1, 1}; for (size_t i = 0; i < seq_length_; ++i) { - PADDLE_ENFORCE_CUDA_SUCCESS( + PADDLE_ENFORCE_GPU_SUCCESS( platform::dynload::cudnnCreateTensorDescriptor(&x_desc_[i])); - PADDLE_ENFORCE_CUDA_SUCCESS( + PADDLE_ENFORCE_GPU_SUCCESS( platform::dynload::cudnnCreateTensorDescriptor(&y_desc_[i])); - PADDLE_ENFORCE_CUDA_SUCCESS(platform::dynload::cudnnSetTensorNdDescriptor( + PADDLE_ENFORCE_GPU_SUCCESS(platform::dynload::cudnnSetTensorNdDescriptor( x_desc_[i], cudnn_type, 3, dims.data(), strides.data())); - PADDLE_ENFORCE_CUDA_SUCCESS(platform::dynload::cudnnSetTensorNdDescriptor( + PADDLE_ENFORCE_GPU_SUCCESS(platform::dynload::cudnnSetTensorNdDescriptor( y_desc_[i], cudnn_type, 3, dims_y.data(), strides_y.data())); } @@ -108,78 +108,78 @@ struct CudnnRNNCache { hidden_size_}; std::vector strides_hx = {hidden_size_ * batch_size_, hidden_size_, 1}; - PADDLE_ENFORCE_CUDA_SUCCESS( + PADDLE_ENFORCE_GPU_SUCCESS( platform::dynload::cudnnCreateTensorDescriptor(&hx_desc_)); - PADDLE_ENFORCE_CUDA_SUCCESS( + PADDLE_ENFORCE_GPU_SUCCESS( platform::dynload::cudnnCreateTensorDescriptor(&cx_desc_)); - PADDLE_ENFORCE_CUDA_SUCCESS( + PADDLE_ENFORCE_GPU_SUCCESS( platform::dynload::cudnnCreateTensorDescriptor(&hy_desc_)); - PADDLE_ENFORCE_CUDA_SUCCESS( + PADDLE_ENFORCE_GPU_SUCCESS( platform::dynload::cudnnCreateTensorDescriptor(&cy_desc_)); - PADDLE_ENFORCE_CUDA_SUCCESS( + PADDLE_ENFORCE_GPU_SUCCESS( platform::dynload::cudnnCreateTensorDescriptor(&dhx_desc_)); - PADDLE_ENFORCE_CUDA_SUCCESS( + PADDLE_ENFORCE_GPU_SUCCESS( platform::dynload::cudnnCreateTensorDescriptor(&dcx_desc_)); - PADDLE_ENFORCE_CUDA_SUCCESS( + PADDLE_ENFORCE_GPU_SUCCESS( platform::dynload::cudnnCreateTensorDescriptor(&dhy_desc_)); - PADDLE_ENFORCE_CUDA_SUCCESS( + PADDLE_ENFORCE_GPU_SUCCESS( platform::dynload::cudnnCreateTensorDescriptor(&dcy_desc_)); - PADDLE_ENFORCE_CUDA_SUCCESS(platform::dynload::cudnnSetTensorNdDescriptor( + PADDLE_ENFORCE_GPU_SUCCESS(platform::dynload::cudnnSetTensorNdDescriptor( hx_desc_, cudnn_type, 3, dims_hx.data(), strides_hx.data())); - PADDLE_ENFORCE_CUDA_SUCCESS(platform::dynload::cudnnSetTensorNdDescriptor( + PADDLE_ENFORCE_GPU_SUCCESS(platform::dynload::cudnnSetTensorNdDescriptor( cx_desc_, cudnn_type, 3, dims_hx.data(), strides_hx.data())); - PADDLE_ENFORCE_CUDA_SUCCESS(platform::dynload::cudnnSetTensorNdDescriptor( + PADDLE_ENFORCE_GPU_SUCCESS(platform::dynload::cudnnSetTensorNdDescriptor( hy_desc_, cudnn_type, 3, dims_hx.data(), strides_hx.data())); - PADDLE_ENFORCE_CUDA_SUCCESS(platform::dynload::cudnnSetTensorNdDescriptor( + PADDLE_ENFORCE_GPU_SUCCESS(platform::dynload::cudnnSetTensorNdDescriptor( cy_desc_, cudnn_type, 3, dims_hx.data(), strides_hx.data())); - PADDLE_ENFORCE_CUDA_SUCCESS(platform::dynload::cudnnSetTensorNdDescriptor( + PADDLE_ENFORCE_GPU_SUCCESS(platform::dynload::cudnnSetTensorNdDescriptor( dhx_desc_, cudnn_type, 3, dims_hx.data(), strides_hx.data())); - PADDLE_ENFORCE_CUDA_SUCCESS(platform::dynload::cudnnSetTensorNdDescriptor( + PADDLE_ENFORCE_GPU_SUCCESS(platform::dynload::cudnnSetTensorNdDescriptor( dcx_desc_, cudnn_type, 3, dims_hx.data(), strides_hx.data())); - PADDLE_ENFORCE_CUDA_SUCCESS(platform::dynload::cudnnSetTensorNdDescriptor( + PADDLE_ENFORCE_GPU_SUCCESS(platform::dynload::cudnnSetTensorNdDescriptor( dhy_desc_, cudnn_type, 3, dims_hx.data(), strides_hx.data())); - PADDLE_ENFORCE_CUDA_SUCCESS(platform::dynload::cudnnSetTensorNdDescriptor( + PADDLE_ENFORCE_GPU_SUCCESS(platform::dynload::cudnnSetTensorNdDescriptor( dcy_desc_, cudnn_type, 3, dims_hx.data(), strides_hx.data())); - PADDLE_ENFORCE_CUDA_SUCCESS( + PADDLE_ENFORCE_GPU_SUCCESS( platform::dynload::cudnnCreateDropoutDescriptor(&dropout_desc_)); size_t state_size; if (!initialized) { - PADDLE_ENFORCE_CUDA_SUCCESS( + PADDLE_ENFORCE_GPU_SUCCESS( platform::dynload::cudnnDropoutGetStatesSize(handle, &state_size)); dropout_state_->Resize({static_cast(state_size)}); uint8_t *dropout_state_data = dropout_state_->mutable_data(place); - PADDLE_ENFORCE_CUDA_SUCCESS(platform::dynload::cudnnSetDropoutDescriptor( + PADDLE_ENFORCE_GPU_SUCCESS(platform::dynload::cudnnSetDropoutDescriptor( dropout_desc_, handle, dropout_prob_, dropout_state_data, state_size, seed_)); } else { uint8_t *dropout_state_data = dropout_state_->data(); auto dropout_state_dims = dropout_state_->dims(); state_size = dropout_state_dims[0]; - PADDLE_ENFORCE_CUDA_SUCCESS( + PADDLE_ENFORCE_GPU_SUCCESS( platform::dynload::cudnnRestoreDropoutDescriptor( dropout_desc_, handle, dropout_prob_, dropout_state_data, state_size, 0)); } - PADDLE_ENFORCE_CUDA_SUCCESS( + PADDLE_ENFORCE_GPU_SUCCESS( platform::dynload::cudnnCreateRNNDescriptor(&rnn_desc_)); - PADDLE_ENFORCE_CUDA_SUCCESS(platform::dynload::cudnnSetRNNDescriptor_v6( + PADDLE_ENFORCE_GPU_SUCCESS(platform::dynload::cudnnSetRNNDescriptor_v6( handle, rnn_desc_, hidden_size_, num_layers_, dropout_desc_, CUDNN_LINEAR_INPUT, is_bidirec_ ? CUDNN_BIDIRECTIONAL : CUDNN_UNIDIRECTIONAL, CUDNN_LSTM, CUDNN_RNN_ALGO_STANDARD, cudnn_type)); - PADDLE_ENFORCE_CUDA_SUCCESS( + PADDLE_ENFORCE_GPU_SUCCESS( platform::dynload::cudnnCreateFilterDescriptor(&w_desc_)); - PADDLE_ENFORCE_CUDA_SUCCESS( + PADDLE_ENFORCE_GPU_SUCCESS( platform::dynload::cudnnCreateFilterDescriptor(&dw_desc_)); - PADDLE_ENFORCE_CUDA_SUCCESS(platform::dynload::cudnnGetRNNParamsSize( + PADDLE_ENFORCE_GPU_SUCCESS(platform::dynload::cudnnGetRNNParamsSize( handle, rnn_desc_, x_desc_[0], &weights_size_, cudnn_type)); PADDLE_ENFORCE_EQ( @@ -191,14 +191,14 @@ struct CudnnRNNCache { dim_w[0] = weights_size_ / cudnn_size; dim_w[1] = 1; dim_w[2] = 1; - PADDLE_ENFORCE_CUDA_SUCCESS(platform::dynload::cudnnSetFilterNdDescriptor( + PADDLE_ENFORCE_GPU_SUCCESS(platform::dynload::cudnnSetFilterNdDescriptor( w_desc_, cudnn_type, CUDNN_TENSOR_NCHW, 3, dim_w)); - PADDLE_ENFORCE_CUDA_SUCCESS(platform::dynload::cudnnSetFilterNdDescriptor( + PADDLE_ENFORCE_GPU_SUCCESS(platform::dynload::cudnnSetFilterNdDescriptor( dw_desc_, cudnn_type, CUDNN_TENSOR_NCHW, 3, dim_w)); - PADDLE_ENFORCE_CUDA_SUCCESS(platform::dynload::cudnnGetRNNWorkspaceSize( + PADDLE_ENFORCE_GPU_SUCCESS(platform::dynload::cudnnGetRNNWorkspaceSize( handle, rnn_desc_, seq_length_, x_desc_, &workspace_size_)); - PADDLE_ENFORCE_CUDA_SUCCESS( + PADDLE_ENFORCE_GPU_SUCCESS( platform::dynload::cudnnGetRNNTrainingReserveSize( handle, rnn_desc_, seq_length_, x_desc_, reserve_size_)); @@ -208,40 +208,40 @@ struct CudnnRNNCache { void release() { for (size_t i = 0; i < seq_length_; ++i) { - PADDLE_ENFORCE_CUDA_SUCCESS( + PADDLE_ENFORCE_GPU_SUCCESS( platform::dynload::cudnnDestroyTensorDescriptor(x_desc_[i])); - PADDLE_ENFORCE_CUDA_SUCCESS( + PADDLE_ENFORCE_GPU_SUCCESS( platform::dynload::cudnnDestroyTensorDescriptor(y_desc_[i])); } delete[] x_desc_; delete[] y_desc_; - PADDLE_ENFORCE_CUDA_SUCCESS( + PADDLE_ENFORCE_GPU_SUCCESS( platform::dynload::cudnnDestroyTensorDescriptor(hx_desc_)); - PADDLE_ENFORCE_CUDA_SUCCESS( + PADDLE_ENFORCE_GPU_SUCCESS( platform::dynload::cudnnDestroyTensorDescriptor(cx_desc_)); - PADDLE_ENFORCE_CUDA_SUCCESS( + PADDLE_ENFORCE_GPU_SUCCESS( platform::dynload::cudnnDestroyTensorDescriptor(hy_desc_)); - PADDLE_ENFORCE_CUDA_SUCCESS( + PADDLE_ENFORCE_GPU_SUCCESS( platform::dynload::cudnnDestroyTensorDescriptor(cy_desc_)); - PADDLE_ENFORCE_CUDA_SUCCESS( + PADDLE_ENFORCE_GPU_SUCCESS( platform::dynload::cudnnDestroyTensorDescriptor(dhx_desc_)); - PADDLE_ENFORCE_CUDA_SUCCESS( + PADDLE_ENFORCE_GPU_SUCCESS( platform::dynload::cudnnDestroyTensorDescriptor(dcx_desc_)); - PADDLE_ENFORCE_CUDA_SUCCESS( + PADDLE_ENFORCE_GPU_SUCCESS( platform::dynload::cudnnDestroyTensorDescriptor(dhy_desc_)); - PADDLE_ENFORCE_CUDA_SUCCESS( + PADDLE_ENFORCE_GPU_SUCCESS( platform::dynload::cudnnDestroyTensorDescriptor(dcy_desc_)); - PADDLE_ENFORCE_CUDA_SUCCESS( + PADDLE_ENFORCE_GPU_SUCCESS( platform::dynload::cudnnDestroyDropoutDescriptor(dropout_desc_)); - PADDLE_ENFORCE_CUDA_SUCCESS( + PADDLE_ENFORCE_GPU_SUCCESS( platform::dynload::cudnnDestroyRNNDescriptor(rnn_desc_)); - PADDLE_ENFORCE_CUDA_SUCCESS( + PADDLE_ENFORCE_GPU_SUCCESS( platform::dynload::cudnnDestroyFilterDescriptor(w_desc_)); - PADDLE_ENFORCE_CUDA_SUCCESS( + PADDLE_ENFORCE_GPU_SUCCESS( platform::dynload::cudnnDestroyFilterDescriptor(dw_desc_)); } }; diff --git a/paddle/fluid/operators/cumsum_op.cu b/paddle/fluid/operators/cumsum_op.cu index d9e19eb7f61a6af10606763e0311bbc3468a7175..977e301f13663b82918dfd6814fbd85583644813 100644 --- a/paddle/fluid/operators/cumsum_op.cu +++ b/paddle/fluid/operators/cumsum_op.cu @@ -24,7 +24,7 @@ limitations under the License. */ namespace cub = hipcub; #endif #include "paddle/fluid/operators/cum_op.h" -#include "paddle/fluid/platform/gpu_launch_config.h" +#include "paddle/fluid/platform/device/gpu/gpu_launch_config.h" using Tensor = paddle::framework::Tensor; using LoDTensor = paddle::framework::LoDTensor; diff --git a/paddle/fluid/operators/cvm_op.cu b/paddle/fluid/operators/cvm_op.cu index 75976c968c9e8b7dafb172d55168a297ec875238..ad96dc24b9206c0e7c6bc172180cec829230dde1 100644 --- a/paddle/fluid/operators/cvm_op.cu +++ b/paddle/fluid/operators/cvm_op.cu @@ -16,7 +16,7 @@ limitations under the License. */ #include "paddle/fluid/framework/eigen.h" #include "paddle/fluid/framework/op_registry.h" #include "paddle/fluid/operators/cvm_op.h" -#include "paddle/fluid/platform/cuda_primitives.h" +#include "paddle/fluid/platform/device/gpu/gpu_primitives.h" namespace paddle { namespace operators { diff --git a/paddle/fluid/operators/data_norm_op.cu b/paddle/fluid/operators/data_norm_op.cu index 1043faa56f01bc337bd680625bed10024d760422..5d157a77b3dd16dcb031c6fc79a773568194f1f4 100644 --- a/paddle/fluid/operators/data_norm_op.cu +++ b/paddle/fluid/operators/data_norm_op.cu @@ -16,10 +16,10 @@ limitations under the License. */ #include #include "paddle/fluid/framework/data_layout.h" #include "paddle/fluid/operators/data_norm_op.h" -#include "paddle/fluid/platform/cuda_primitives.h" +#include "paddle/fluid/platform/device/gpu/gpu_primitives.h" #if defined(PADDLE_WITH_NCCL) || defined(PADDLE_WITH_RCCL) #include "paddle/fluid/platform/collective_helper.h" -#include "paddle/fluid/platform/nccl_helper.h" +#include "paddle/fluid/platform/device/gpu/nccl_helper.h" #endif namespace paddle { @@ -176,23 +176,19 @@ class DataNormGradKernel if (need_sync_stats) { #if defined(PADDLE_WITH_NCCL) || defined(PADDLE_WITH_RCCL) auto comm = platform::NCCLCommContext::Instance().Get(0, ctx.GetPlace()); - PADDLE_ENFORCE_CUDA_SUCCESS(platform::dynload::ncclAllReduce( + PADDLE_ENFORCE_GPU_SUCCESS(platform::dynload::ncclAllReduce( reinterpret_cast(d_batch_size), reinterpret_cast(d_batch_size), C, platform::ToNCCLDataType(x->type()), ncclSum, comm->comm(), stream)); - PADDLE_ENFORCE_CUDA_SUCCESS(platform::dynload::ncclAllReduce( + PADDLE_ENFORCE_GPU_SUCCESS(platform::dynload::ncclAllReduce( reinterpret_cast(d_batch_sum), reinterpret_cast(d_batch_sum), C, platform::ToNCCLDataType(x->type()), ncclSum, comm->comm(), stream)); - PADDLE_ENFORCE_CUDA_SUCCESS(platform::dynload::ncclAllReduce( + PADDLE_ENFORCE_GPU_SUCCESS(platform::dynload::ncclAllReduce( reinterpret_cast(d_batch_square_sum), reinterpret_cast(d_batch_square_sum), C, platform::ToNCCLDataType(x->type()), ncclSum, comm->comm(), stream)); -#ifdef PADDLE_WITH_RCCL - PADDLE_ENFORCE_CUDA_SUCCESS(hipStreamSynchronize(stream)); -#else - PADDLE_ENFORCE_CUDA_SUCCESS(cudaStreamSynchronize(stream)); -#endif + platform::GpuStreamSync(stream); #else PADDLE_THROW(platform::errors::PreconditionNotMet( "PaddlePaddle should compile with GPU, and need_sync_stats connot be " diff --git a/paddle/fluid/operators/deformable_conv_op.cu b/paddle/fluid/operators/deformable_conv_op.cu index 67f5ee332eeb2f16356b7274bf5462543c03553d..924adafa4b8d80631ba540cd7051ddcd0d687114 100644 --- a/paddle/fluid/operators/deformable_conv_op.cu +++ b/paddle/fluid/operators/deformable_conv_op.cu @@ -27,7 +27,7 @@ #include "paddle/fluid/operators/deformable_conv_op.h" #include "paddle/fluid/operators/math/blas.h" #include "paddle/fluid/operators/math/math_function.h" -#include "paddle/fluid/platform/cuda_primitives.h" +#include "paddle/fluid/platform/device/gpu/gpu_primitives.h" namespace paddle { namespace operators { diff --git a/paddle/fluid/operators/deformable_conv_v1_op.cu b/paddle/fluid/operators/deformable_conv_v1_op.cu index e399a1fafdb71dacd2b4097f30684948fbbd7432..c252700528c492e8963b5d7f2d39659e71575bc6 100644 --- a/paddle/fluid/operators/deformable_conv_v1_op.cu +++ b/paddle/fluid/operators/deformable_conv_v1_op.cu @@ -30,7 +30,7 @@ #include "paddle/fluid/operators/deformable_conv_v1_op.h" #include "paddle/fluid/operators/math/blas.h" #include "paddle/fluid/operators/math/math_function.h" -#include "paddle/fluid/platform/cuda_primitives.h" +#include "paddle/fluid/platform/device/gpu/gpu_primitives.h" namespace paddle { namespace operators { diff --git a/paddle/fluid/operators/deformable_psroi_pooling_op.cu b/paddle/fluid/operators/deformable_psroi_pooling_op.cu index c1d4cc9d17ab4bfad80457964963c35595ff6a14..6489c1f9784cf3bd8385011ed44ae70df31c6b39 100644 --- a/paddle/fluid/operators/deformable_psroi_pooling_op.cu +++ b/paddle/fluid/operators/deformable_psroi_pooling_op.cu @@ -32,7 +32,7 @@ #include "paddle/fluid/operators/deformable_psroi_pooling_op.h" #include "paddle/fluid/operators/math/blas.h" #include "paddle/fluid/operators/math/math_function.h" -#include "paddle/fluid/platform/cuda_primitives.h" +#include "paddle/fluid/platform/device/gpu/gpu_primitives.h" namespace paddle { namespace operators { diff --git a/paddle/fluid/operators/dequantize_log_op.cu b/paddle/fluid/operators/dequantize_log_op.cu index 9f63f8ed6f52019a8d15d2a4ecc3ec0ecc85e165..39f4fdb71b69dd111cdde673d8117c8fad58dc1b 100644 --- a/paddle/fluid/operators/dequantize_log_op.cu +++ b/paddle/fluid/operators/dequantize_log_op.cu @@ -14,7 +14,7 @@ limitations under the License. */ #include "paddle/fluid/operators/dequantize_log_op.h" #include "paddle/fluid/operators/math.h" -#include "paddle/fluid/platform/cuda_primitives.h" +#include "paddle/fluid/platform/device/gpu/gpu_primitives.h" #include "paddle/fluid/platform/hostdevice.h" namespace paddle { diff --git a/paddle/fluid/operators/detection/bbox_util.cu.h b/paddle/fluid/operators/detection/bbox_util.cu.h index 725983f8153e4f14f41552ae26800d363a863ec1..6f5137be620110bb1fabd9c03b6a61a5719dae78 100644 --- a/paddle/fluid/operators/detection/bbox_util.cu.h +++ b/paddle/fluid/operators/detection/bbox_util.cu.h @@ -18,15 +18,14 @@ limitations under the License. */ #include #ifdef __NVCC__ #include "cub/cub.cuh" -#include "paddle/fluid/platform/cudnn_helper.h" #endif #ifdef __HIPCC__ #include -#include "paddle/fluid/platform/miopen_helper.h" namespace cub = hipcub; #endif #include "paddle/fluid/operators/gather.cu.h" #include "paddle/fluid/operators/math/math_function.h" +#include "paddle/fluid/platform/device/gpu/gpu_dnn.h" #include "paddle/fluid/platform/for_range.h" namespace paddle { diff --git a/paddle/fluid/operators/detection/box_clip_op.cu b/paddle/fluid/operators/detection/box_clip_op.cu index e02f99a613c01915dfe78a82ce8a6877d97c9178..17013efcc98b7f66cd6f5ddfe352281cf17ad4fb 100644 --- a/paddle/fluid/operators/detection/box_clip_op.cu +++ b/paddle/fluid/operators/detection/box_clip_op.cu @@ -15,7 +15,7 @@ limitations under the License. */ #include "paddle/fluid/framework/op_registry.h" #include "paddle/fluid/operators/detection/box_clip_op.h" #include "paddle/fluid/operators/math/math_function.h" -#include "paddle/fluid/platform/cuda_primitives.h" +#include "paddle/fluid/platform/device/gpu/gpu_primitives.h" #include "paddle/fluid/platform/hostdevice.h" namespace paddle { diff --git a/paddle/fluid/operators/detection/box_coder_op.cu b/paddle/fluid/operators/detection/box_coder_op.cu index 0693029eaea9c2fa4cbf4268b9c0ef00aa3f0849..6e5fa1e293353336d8241fe49cdd12cb2b988e70 100644 --- a/paddle/fluid/operators/detection/box_coder_op.cu +++ b/paddle/fluid/operators/detection/box_coder_op.cu @@ -13,7 +13,7 @@ limitations under the License. */ #include #include "paddle/fluid/memory/memory.h" #include "paddle/fluid/operators/detection/box_coder_op.h" -#include "paddle/fluid/platform/cuda_primitives.h" +#include "paddle/fluid/platform/device/gpu/gpu_primitives.h" namespace paddle { namespace operators { diff --git a/paddle/fluid/operators/detection/box_decoder_and_assign_op.cu b/paddle/fluid/operators/detection/box_decoder_and_assign_op.cu index 70767f1d7b11527e228c1f3f521b50ebd5c8383f..ed97559aa8bb56a350ec1a9c79011968a1722291 100644 --- a/paddle/fluid/operators/detection/box_decoder_and_assign_op.cu +++ b/paddle/fluid/operators/detection/box_decoder_and_assign_op.cu @@ -11,7 +11,7 @@ limitations under the License. */ #include "paddle/fluid/memory/memcpy.h" #include "paddle/fluid/operators/detection/box_decoder_and_assign_op.h" -#include "paddle/fluid/platform/cuda_primitives.h" +#include "paddle/fluid/platform/device/gpu/gpu_primitives.h" namespace paddle { namespace operators { diff --git a/paddle/fluid/operators/detection/collect_fpn_proposals_op.cu b/paddle/fluid/operators/detection/collect_fpn_proposals_op.cu index ffd9ac6b2af806ed92bea6484a077cc83de7af3c..bd5703022db9005abf7f6252dcf5cd62045a8140 100644 --- a/paddle/fluid/operators/detection/collect_fpn_proposals_op.cu +++ b/paddle/fluid/operators/detection/collect_fpn_proposals_op.cu @@ -26,7 +26,7 @@ namespace cub = hipcub; #include "paddle/fluid/operators/gather.cu.h" #include "paddle/fluid/operators/math/concat_and_split.h" #include "paddle/fluid/operators/strided_memcpy.h" -#include "paddle/fluid/platform/cuda_primitives.h" +#include "paddle/fluid/platform/device/gpu/gpu_primitives.h" #include "paddle/fluid/platform/for_range.h" namespace paddle { diff --git a/paddle/fluid/operators/detection/distribute_fpn_proposals_op.cu b/paddle/fluid/operators/detection/distribute_fpn_proposals_op.cu index 7ccb354e1773a35957f9df97d26d79d50cbd4fd6..1df7dcbe670c0513e5b57ca9acae8d65d2c74f6c 100644 --- a/paddle/fluid/operators/detection/distribute_fpn_proposals_op.cu +++ b/paddle/fluid/operators/detection/distribute_fpn_proposals_op.cu @@ -26,7 +26,7 @@ namespace cub = hipcub; #include "paddle/fluid/operators/detection/distribute_fpn_proposals_op.h" #include "paddle/fluid/operators/gather.cu.h" #include "paddle/fluid/operators/math/math_function.h" -#include "paddle/fluid/platform/cuda_primitives.h" +#include "paddle/fluid/platform/device/gpu/gpu_primitives.h" #include "paddle/fluid/platform/for_range.h" namespace paddle { diff --git a/paddle/fluid/operators/detection/polygon_box_transform_op.cu b/paddle/fluid/operators/detection/polygon_box_transform_op.cu index 5977a434a6023f4463c9fb7ab1067f038747e44c..5ff479eac8df0eb02701e21d2f2c61af2f7567c3 100644 --- a/paddle/fluid/operators/detection/polygon_box_transform_op.cu +++ b/paddle/fluid/operators/detection/polygon_box_transform_op.cu @@ -13,8 +13,8 @@ See the License for the specific language governing permissions and limitations under the License. */ #include "paddle/fluid/framework/op_registry.h" -#include "paddle/fluid/platform/cuda_primitives.h" -#include "paddle/fluid/platform/gpu_info.h" +#include "paddle/fluid/platform/device/gpu/gpu_info.h" +#include "paddle/fluid/platform/device/gpu/gpu_primitives.h" namespace paddle { namespace operators { diff --git a/paddle/fluid/operators/detection/roi_perspective_transform_op.cu b/paddle/fluid/operators/detection/roi_perspective_transform_op.cu index 7b34e197ffe214c80af85003600e05e0a392962d..2ddcc7a06f6797027da79749381f3b63f4469865 100644 --- a/paddle/fluid/operators/detection/roi_perspective_transform_op.cu +++ b/paddle/fluid/operators/detection/roi_perspective_transform_op.cu @@ -15,7 +15,7 @@ limitations under the License. */ #include #include "paddle/fluid/framework/op_registry.h" #include "paddle/fluid/operators/math/math_function.h" -#include "paddle/fluid/platform/cuda_primitives.h" +#include "paddle/fluid/platform/device/gpu/gpu_primitives.h" #include "paddle/fluid/platform/float16.h" using paddle::platform::PADDLE_CUDA_NUM_THREADS; diff --git a/paddle/fluid/operators/detection/sigmoid_focal_loss_op.cu b/paddle/fluid/operators/detection/sigmoid_focal_loss_op.cu index ed1676200dc470a931f4e1d9bb218842a473f716..10c402e5a4078ad651543d15118c584fd2e72eff 100644 --- a/paddle/fluid/operators/detection/sigmoid_focal_loss_op.cu +++ b/paddle/fluid/operators/detection/sigmoid_focal_loss_op.cu @@ -13,7 +13,7 @@ See the License for the specific language governing permissions and limitations under the License. */ #include "paddle/fluid/operators/detection/sigmoid_focal_loss_op.h" #include "paddle/fluid/operators/math.h" -#include "paddle/fluid/platform/cuda_primitives.h" +#include "paddle/fluid/platform/device/gpu/gpu_primitives.h" #include "paddle/fluid/platform/hostdevice.h" namespace paddle { diff --git a/paddle/fluid/operators/detection/yolo_box_op.cu b/paddle/fluid/operators/detection/yolo_box_op.cu index 83a0eb87d02dd549521b68a112c5d9eea6055159..23bd6af6bd2e805295fb467865dbe297991cbdbd 100644 --- a/paddle/fluid/operators/detection/yolo_box_op.cu +++ b/paddle/fluid/operators/detection/yolo_box_op.cu @@ -15,7 +15,7 @@ limitations under the License. */ #include "paddle/fluid/memory/malloc.h" #include "paddle/fluid/operators/detection/yolo_box_op.h" #include "paddle/fluid/operators/math/math_function.h" -#include "paddle/fluid/platform/gpu_launch_config.h" +#include "paddle/fluid/platform/device/gpu/gpu_launch_config.h" namespace paddle { namespace operators { diff --git a/paddle/fluid/operators/diagonal_op.cu b/paddle/fluid/operators/diagonal_op.cu index e2b5f24d6619e1dc70a3d84256ec1aeb18b90589..b1268e903df190f7413d59a785b17c8cb058ae22 100644 --- a/paddle/fluid/operators/diagonal_op.cu +++ b/paddle/fluid/operators/diagonal_op.cu @@ -14,7 +14,7 @@ limitations under the License. */ #include "paddle/fluid/framework/op_registry.h" #include "paddle/fluid/operators/diagonal_op.h" -#include "paddle/fluid/platform/cuda_primitives.h" +#include "paddle/fluid/platform/device/gpu/gpu_primitives.h" namespace paddle { namespace operators { diff --git a/paddle/fluid/operators/dropout_impl.cu.h b/paddle/fluid/operators/dropout_impl.cu.h index bd4d690577a6fac5be79fb5086d3b5203c5616fb..c97a523caa7673c4acc3fe7cf9022b22071e26f4 100644 --- a/paddle/fluid/operators/dropout_impl.cu.h +++ b/paddle/fluid/operators/dropout_impl.cu.h @@ -33,7 +33,7 @@ limitations under the License. */ #include "paddle/fluid/operators/dropout_impl_util.h" #include "paddle/fluid/operators/dropout_op.h" #include "paddle/fluid/platform/aligned_vector.h" -#include "paddle/fluid/platform/gpu_launch_config.h" +#include "paddle/fluid/platform/device/gpu/gpu_launch_config.h" namespace paddle { namespace operators { @@ -167,14 +167,14 @@ void DropoutFwGPUKernelDriver(const platform::CUDADeviceContext& dev_ctx, auto* y_data = y->data(); if (dropout_prob == 1.0f) { #ifdef PADDLE_WITH_HIP - PADDLE_ENFORCE_CUDA_SUCCESS( + PADDLE_ENFORCE_GPU_SUCCESS( hipMemsetAsync(y_data, 0, x_numel * sizeof(T), stream)); - PADDLE_ENFORCE_CUDA_SUCCESS( + PADDLE_ENFORCE_GPU_SUCCESS( hipMemsetAsync(mask_data, 0, x_numel * sizeof(*mask_data), stream)); #else - PADDLE_ENFORCE_CUDA_SUCCESS( + PADDLE_ENFORCE_GPU_SUCCESS( cudaMemsetAsync(y_data, 0, x_numel * sizeof(T), stream)); - PADDLE_ENFORCE_CUDA_SUCCESS( + PADDLE_ENFORCE_GPU_SUCCESS( cudaMemsetAsync(mask_data, 0, x_numel * sizeof(*mask_data), stream)); #endif return; diff --git a/paddle/fluid/operators/edit_distance_op.cu b/paddle/fluid/operators/edit_distance_op.cu index 80490af33a1f9589ebc992bb556144deb085ea00..f28fa4d6338d7037b7b382a6f1a33e7504b75410 100644 --- a/paddle/fluid/operators/edit_distance_op.cu +++ b/paddle/fluid/operators/edit_distance_op.cu @@ -17,8 +17,8 @@ limitations under the License. */ #include "paddle/fluid/framework/op_registry.h" #include "paddle/fluid/operators/edit_distance_op.h" #include "paddle/fluid/operators/math/math_function.h" -#include "paddle/fluid/platform/cuda_primitives.h" -#include "paddle/fluid/platform/gpu_info.h" +#include "paddle/fluid/platform/device/gpu/gpu_info.h" +#include "paddle/fluid/platform/device/gpu/gpu_primitives.h" namespace paddle { namespace operators { diff --git a/paddle/fluid/operators/elementwise/elementwise_op_function.h b/paddle/fluid/operators/elementwise/elementwise_op_function.h index d3ab8ad9d698585fcde0a042b9717fa4f4052e2e..ad5a55aede751ec0411a42dd52c85b8ebbbfa02b 100644 --- a/paddle/fluid/operators/elementwise/elementwise_op_function.h +++ b/paddle/fluid/operators/elementwise/elementwise_op_function.h @@ -26,7 +26,7 @@ limitations under the License. */ #include "paddle/fluid/framework/operator.h" #include "paddle/fluid/memory/malloc.h" #include "paddle/fluid/operators/elementwise/elementwise_functor.h" -#include "paddle/fluid/platform/gpu_info.h" +#include "paddle/fluid/platform/device/gpu/gpu_info.h" #include "paddle/fluid/platform/transform.h" // only can include the headers in paddle/pten/include dirs @@ -43,8 +43,8 @@ limitations under the License. */ #include #include "paddle/fluid/operators/elementwise/elementwise_op_broadcast.cu.h" -#include "paddle/fluid/platform/cuda_device_function.h" -#include "paddle/fluid/platform/cuda_primitives.h" +#include "paddle/fluid/platform/device/gpu/gpu_device_function.h" +#include "paddle/fluid/platform/device/gpu/gpu_primitives.h" #ifdef __HIPCC__ constexpr int ELEMWISE_MAX_BLOCK_DIM = 256; diff --git a/paddle/fluid/operators/elementwise/test_elementwise_add_op_inplace.cc b/paddle/fluid/operators/elementwise/test_elementwise_add_op_inplace.cc index ab45b6f4de276b504b0a8958cbce3c4ab284cb35..706475bc82fadef0eaf864d69fe3ceccb087d6f2 100644 --- a/paddle/fluid/operators/elementwise/test_elementwise_add_op_inplace.cc +++ b/paddle/fluid/operators/elementwise/test_elementwise_add_op_inplace.cc @@ -30,10 +30,9 @@ namespace operators { static void Memcpy(void *dst, const void *src, size_t n, bool copy_to_gpu) { if (copy_to_gpu) { #ifdef PADDLE_WITH_CUDA - PADDLE_ENFORCE_CUDA_SUCCESS( - cudaMemcpy(dst, src, n, cudaMemcpyHostToDevice)); + PADDLE_ENFORCE_GPU_SUCCESS(cudaMemcpy(dst, src, n, cudaMemcpyHostToDevice)); #elif defined(PADDLE_WITH_HIP) - PADDLE_ENFORCE_CUDA_SUCCESS(hipMemcpy(dst, src, n, hipMemcpyHostToDevice)); + PADDLE_ENFORCE_GPU_SUCCESS(hipMemcpy(dst, src, n, hipMemcpyHostToDevice)); #else PADDLE_THROW( platform::errors::InvalidArgument("Check your paddle version, current " diff --git a/paddle/fluid/operators/fake_quantize_op.cu b/paddle/fluid/operators/fake_quantize_op.cu index 8f2235c7e3d21f3627105591dd58d8e01927aac1..b95bbc775a0d764b49fbf1d556970450b29dac7f 100644 --- a/paddle/fluid/operators/fake_quantize_op.cu +++ b/paddle/fluid/operators/fake_quantize_op.cu @@ -15,7 +15,7 @@ limitations under the License. */ #include #include "paddle/fluid/memory/memcpy.h" #include "paddle/fluid/operators/fake_quantize_op.h" -#include "paddle/fluid/platform/cuda_primitives.h" +#include "paddle/fluid/platform/device/gpu/gpu_primitives.h" namespace paddle { namespace operators { diff --git a/paddle/fluid/operators/fused/attn_bias_add.cu.h b/paddle/fluid/operators/fused/attn_bias_add.cu.h index f7478364cdfc5177780f7576f0667cc07d18e701..990ac8dbc81216e9d1a7d92891676482b7b9ab15 100644 --- a/paddle/fluid/operators/fused/attn_bias_add.cu.h +++ b/paddle/fluid/operators/fused/attn_bias_add.cu.h @@ -22,11 +22,7 @@ limitations under the License. */ namespace cub = hipcub; #endif -#ifdef PADDLE_WITH_HIP -#include "paddle/fluid/platform/miopen_helper.h" -#else -#include "paddle/fluid/platform/cudnn_helper.h" -#endif +#include "paddle/fluid/platform/device/gpu/gpu_dnn.h" #ifdef __HIPCC__ #define LAUNCH_BOUNDS(BlockDim) __launch_bounds__(BlockDim) diff --git a/paddle/fluid/operators/fused/conv_fusion_op.cc b/paddle/fluid/operators/fused/conv_fusion_op.cc index 6b94f4ea5bdd2ffc1cdf7684568fe1a7b2928278..f2ce0bccd2fb57eb9c8d29c2bf178dc6bf174904 100644 --- a/paddle/fluid/operators/fused/conv_fusion_op.cc +++ b/paddle/fluid/operators/fused/conv_fusion_op.cc @@ -15,9 +15,7 @@ limitations under the License. */ #include #include #include "paddle/fluid/operators/conv_op.h" -#ifdef PADDLE_WITH_CUDA -#include "paddle/fluid/platform/cudnn_helper.h" -#endif +#include "paddle/fluid/platform/device/gpu/gpu_dnn.h" namespace paddle { namespace operators { diff --git a/paddle/fluid/operators/fused/conv_fusion_op.cu b/paddle/fluid/operators/fused/conv_fusion_op.cu index f5ee7f559918457c600324bf2d24daa247c938da..38326e7560c0df893d6414ffcbb710fe91073e5b 100644 --- a/paddle/fluid/operators/fused/conv_fusion_op.cu +++ b/paddle/fluid/operators/fused/conv_fusion_op.cu @@ -18,11 +18,7 @@ limitations under the License. */ #include "paddle/fluid/operators/conv_cudnn_op_cache.h" #include "paddle/fluid/operators/conv_op.h" #include "paddle/fluid/operators/math/padding.h" -#ifdef PADDLE_WITH_HIP -#include "paddle/fluid/platform/miopen_helper.h" -#else -#include "paddle/fluid/platform/cudnn_helper.h" -#endif +#include "paddle/fluid/platform/device/gpu/gpu_dnn.h" DECLARE_int64(cudnn_exhaustive_search_times); @@ -169,7 +165,7 @@ class CUDNNConvFusionOpKernel : public framework::OpKernel { #ifdef PADDLE_WITH_HIP miopenConvolutionDescriptor_t cudnn_conv_desc = conv_desc.descriptor(padding_common, strides, dilations); - PADDLE_ENFORCE_CUDA_SUCCESS( + PADDLE_ENFORCE_GPU_SUCCESS( platform::dynload::miopenSetConvolutionGroupCount(cudnn_conv_desc, groups)); // Now only support NCHW @@ -194,14 +190,14 @@ class CUDNNConvFusionOpKernel : public framework::OpKernel { auto f_dims = framework::vectorize(filter->dims()); size_t workspace_size = 0; - PADDLE_ENFORCE_CUDA_SUCCESS( + PADDLE_ENFORCE_GPU_SUCCESS( platform::dynload::miopenConvolutionForwardGetWorkSpaceSize( handle, cudnn_filter_desc, cudnn_input_desc, cudnn_conv_desc, cudnn_output_desc, &workspace_size)); int find_count; miopenConvAlgoPerf_t find_result; auto cudnn_find_func = [&](void* cudnn_workspace_ptr) { - PADDLE_ENFORCE_CUDA_SUCCESS( + PADDLE_ENFORCE_GPU_SUCCESS( platform::dynload::miopenFindConvolutionForwardAlgorithm( handle, cudnn_input_desc, input_data, cudnn_filter_desc, filter_data, cudnn_conv_desc, cudnn_output_desc, output_data, @@ -215,23 +211,23 @@ class CUDNNConvFusionOpKernel : public framework::OpKernel { { ScalingParamType alpha = 1.0f, beta = 0.0f; auto cudnn_func = [&](void* cudnn_workspace) { - PADDLE_ENFORCE_CUDA_SUCCESS(platform::dynload::miopenConvolutionForward( + PADDLE_ENFORCE_GPU_SUCCESS(platform::dynload::miopenConvolutionForward( handle, &alpha, cudnn_input_desc, input_data, cudnn_filter_desc, filter_data, cudnn_conv_desc, algo, &beta, cudnn_output_desc, output_data, cudnn_workspace, workspace_size)); }; workspace_handle.RunFunc(cudnn_func, workspace_size); - PADDLE_ENFORCE_CUDA_SUCCESS( + PADDLE_ENFORCE_GPU_SUCCESS( platform::dynload::miopenConvolutionForwardBias( handle, &alpha, cudnn_bias_desc, bias_data, &beta, cudnn_output_desc, output_data)); if (activation != "identity") { - PADDLE_ENFORCE_CUDA_SUCCESS(platform::dynload::miopenActivationForward( + PADDLE_ENFORCE_GPU_SUCCESS(platform::dynload::miopenActivationForward( handle, cudnn_act_desc, &alpha, cudnn_output_desc, output_data, &beta, cudnn_output_desc, output_data)); } if (residual) { - PADDLE_ENFORCE_CUDA_SUCCESS(platform::dynload::miopenOpTensor( + PADDLE_ENFORCE_GPU_SUCCESS(platform::dynload::miopenOpTensor( handle, miopenTensorOpAdd, &alpha, cudnn_output_desc, output_data, &alpha, cudnn_output_desc, residual_data, &beta, cudnn_output_desc, output_data)); @@ -240,9 +236,8 @@ class CUDNNConvFusionOpKernel : public framework::OpKernel { #else // PADDLE_WITH_HIP cudnnConvolutionDescriptor_t cudnn_conv_desc = conv_desc.descriptor(padding_common, strides, dilations); - PADDLE_ENFORCE_CUDA_SUCCESS( - platform::dynload::cudnnSetConvolutionGroupCount(cudnn_conv_desc, - groups)); + PADDLE_ENFORCE_GPU_SUCCESS(platform::dynload::cudnnSetConvolutionGroupCount( + cudnn_conv_desc, groups)); cudnnTensorDescriptor_t cudnn_input_desc = input_desc.descriptor( layout, framework::vectorize(transformed_input.dims())); @@ -273,13 +268,12 @@ class CUDNNConvFusionOpKernel : public framework::OpKernel { auto handle = dev_ctx.cudnn_handle(); auto workspace_handle = dev_ctx.cudnn_workspace_handle(); - PADDLE_ENFORCE_CUDA_SUCCESS(platform::dynload::cudnnSetConvolutionMathType( + PADDLE_ENFORCE_GPU_SUCCESS(platform::dynload::cudnnSetConvolutionMathType( cudnn_conv_desc, CUDNN_DEFAULT_MATH)); #if CUDA_VERSION >= 11000 && CUDNN_VERSION >= 8000 if (!platform::allow_tf32_cudnn) { - PADDLE_ENFORCE_CUDA_SUCCESS( - platform::dynload::cudnnSetConvolutionMathType(cudnn_conv_desc, - CUDNN_FMA_MATH)); + PADDLE_ENFORCE_GPU_SUCCESS(platform::dynload::cudnnSetConvolutionMathType( + cudnn_conv_desc, CUDNN_FMA_MATH)); } #endif // CUDA_VERSION >= 11000 && CUDNN_VERSION >= 8000 @@ -292,20 +286,20 @@ class CUDNNConvFusionOpKernel : public framework::OpKernel { size_t tmp_size = 0; std::unique_ptr perf_results( new cudnnConvolutionFwdAlgoPerf_t[kNUM_CUDNN_FWD_ALGS]); - PADDLE_ENFORCE_CUDA_SUCCESS( + PADDLE_ENFORCE_GPU_SUCCESS( platform::dynload::cudnnGetConvolutionForwardAlgorithm_v7( handle, cudnn_input_desc, cudnn_filter_desc, cudnn_conv_desc, cudnn_output_desc, kNUM_CUDNN_FWD_ALGS, &perf_count, perf_results.get())); algo = (perf_results.get())[best_algo_idx].algo; - PADDLE_ENFORCE_CUDA_SUCCESS( + PADDLE_ENFORCE_GPU_SUCCESS( platform::dynload::cudnnGetConvolutionForwardWorkspaceSize( handle, cudnn_input_desc, cudnn_filter_desc, cudnn_conv_desc, cudnn_output_desc, algo, &workspace_size_in_bytes)); if (workspace_size_in_bytes > workspace_size_limit) workspace_size_limit = workspace_size_in_bytes; #else - PADDLE_ENFORCE_CUDA_SUCCESS( + PADDLE_ENFORCE_GPU_SUCCESS( platform::dynload::cudnnGetConvolutionForwardAlgorithm( handle, cudnn_input_desc, cudnn_filter_desc, cudnn_conv_desc, cudnn_output_desc, CUDNN_CONVOLUTION_FWD_SPECIFY_WORKSPACE_LIMIT, @@ -319,7 +313,7 @@ class CUDNNConvFusionOpKernel : public framework::OpKernel { std::array fwd_perf_stat; auto cudnn_find_func = [&](void* cudnn_workspace) { - PADDLE_ENFORCE_CUDA_SUCCESS( + PADDLE_ENFORCE_GPU_SUCCESS( platform::dynload::cudnnFindConvolutionForwardAlgorithmEx( handle, cudnn_input_desc, input_data, cudnn_filter_desc, filter_data, cudnn_conv_desc, cudnn_output_desc, output_data, @@ -355,7 +349,7 @@ class CUDNNConvFusionOpKernel : public framework::OpKernel { VLOG(3) << "choose algo " << algo; } - PADDLE_ENFORCE_CUDA_SUCCESS( + PADDLE_ENFORCE_GPU_SUCCESS( platform::dynload::cudnnGetConvolutionForwardWorkspaceSize( handle, cudnn_input_desc, cudnn_filter_desc, cudnn_conv_desc, cudnn_output_desc, algo, &workspace_size_in_bytes)); @@ -375,13 +369,13 @@ class CUDNNConvFusionOpKernel : public framework::OpKernel { // ------------- cudnn conv forward and bias add --------------------- ScalingParamType alpha = 1.0f, beta = 0.0f; auto cudnn_func = [&](void* cudnn_workspace) { - PADDLE_ENFORCE_CUDA_SUCCESS(platform::dynload::cudnnConvolutionForward( + PADDLE_ENFORCE_GPU_SUCCESS(platform::dynload::cudnnConvolutionForward( handle, &alpha, cudnn_input_desc, input_data, cudnn_filter_desc, filter_data, cudnn_conv_desc, algo, cudnn_workspace, workspace_size_in_bytes, &beta, cudnn_output_desc, output_data)); }; workspace_handle.RunFunc(cudnn_func, workspace_size_in_bytes); - PADDLE_ENFORCE_CUDA_SUCCESS(platform::dynload::cudnnAddTensor( + PADDLE_ENFORCE_GPU_SUCCESS(platform::dynload::cudnnAddTensor( handle, &alpha, cudnn_bias_desc, bias_data, &alpha, cudnn_output_desc, output_data)); } else { @@ -392,7 +386,7 @@ class CUDNNConvFusionOpKernel : public framework::OpKernel { ScalingParamType alpha1 = 1.0f; ScalingParamType alpha2 = residual ? 1.0f : 0.0f; auto cudnn_func = [&](void* cudnn_workspace) { - PADDLE_ENFORCE_CUDA_SUCCESS( + PADDLE_ENFORCE_GPU_SUCCESS( platform::dynload::cudnnConvolutionBiasActivationForward( handle, &alpha1, cudnn_input_desc, input_data, cudnn_filter_desc, filter_data, cudnn_conv_desc, algo, diff --git a/paddle/fluid/operators/fused/cudnn_bn_stats_finalize.cu.h b/paddle/fluid/operators/fused/cudnn_bn_stats_finalize.cu.h index dc703f9a822b5b7ef73485c7225491b91bfdfe4b..913772fb65050b1ee7e4baf0bdbfa7c714c0574a 100644 --- a/paddle/fluid/operators/fused/cudnn_bn_stats_finalize.cu.h +++ b/paddle/fluid/operators/fused/cudnn_bn_stats_finalize.cu.h @@ -15,8 +15,7 @@ limitations under the License. */ #pragma once #include "paddle/fluid/operators/fused/cudnn_fusion_helper.h" -#include "paddle/fluid/platform/cudnn_desc.h" -#include "paddle/fluid/platform/cudnn_helper.h" +#include "paddle/fluid/platform/device/gpu/gpu_dnn.h" namespace paddle { namespace operators { diff --git a/paddle/fluid/operators/fused/cudnn_fusion_helper.h b/paddle/fluid/operators/fused/cudnn_fusion_helper.h index 1de64cf5ad947d9b5e4185fcf79eedb5a612eca9..13fad0b7cbb3d2b87e1403bc63fca0b8486c72c6 100644 --- a/paddle/fluid/operators/fused/cudnn_fusion_helper.h +++ b/paddle/fluid/operators/fused/cudnn_fusion_helper.h @@ -31,19 +31,19 @@ class CudnnFusionOp { public: explicit CudnnFusionOp(cudnnFusedOps_t op_id) : plan_created_(false) { // New 'fused op' descriptor creation - PADDLE_ENFORCE_CUDA_SUCCESS(dynload::cudnnCreateFusedOpsPlan(&op_, op_id)); - PADDLE_ENFORCE_CUDA_SUCCESS( + PADDLE_ENFORCE_GPU_SUCCESS(dynload::cudnnCreateFusedOpsPlan(&op_, op_id)); + PADDLE_ENFORCE_GPU_SUCCESS( dynload::cudnnCreateFusedOpsConstParamPack(&op_const_params_, op_id)); - PADDLE_ENFORCE_CUDA_SUCCESS(dynload::cudnnCreateFusedOpsVariantParamPack( + PADDLE_ENFORCE_GPU_SUCCESS(dynload::cudnnCreateFusedOpsVariantParamPack( &op_variant_params_, op_id)); } ~CudnnFusionOp() PADDLE_MAY_THROW { - PADDLE_ENFORCE_CUDA_SUCCESS( + PADDLE_ENFORCE_GPU_SUCCESS( dynload::cudnnDestroyFusedOpsVariantParamPack(op_variant_params_)); - PADDLE_ENFORCE_CUDA_SUCCESS( + PADDLE_ENFORCE_GPU_SUCCESS( dynload::cudnnDestroyFusedOpsConstParamPack(op_const_params_)); - PADDLE_ENFORCE_CUDA_SUCCESS(dynload::cudnnDestroyFusedOpsPlan(op_)); + PADDLE_ENFORCE_GPU_SUCCESS(dynload::cudnnDestroyFusedOpsPlan(op_)); } // Execute fused op @@ -53,7 +53,7 @@ class CudnnFusionOp { platform::errors::Fatal( "CudnnFusionOp exec requested without a valid 'plan', need: " ", GetWorkspaceSizeBytes(), Execute().")); - PADDLE_ENFORCE_CUDA_SUCCESS( + PADDLE_ENFORCE_GPU_SUCCESS( dynload::cudnnFusedOpsExecute(cudnn_handle, op_, op_variant_params_)); } @@ -61,9 +61,8 @@ class CudnnFusionOp { template void SetOpConstParamDesc(cudnnFusedOpsConstParamLabel_t param_label, T *param_ptr) { - PADDLE_ENFORCE_CUDA_SUCCESS( - dynload::cudnnSetFusedOpsConstParamPackAttribute( - op_const_params_, param_label, param_ptr)); + PADDLE_ENFORCE_GPU_SUCCESS(dynload::cudnnSetFusedOpsConstParamPackAttribute( + op_const_params_, param_label, param_ptr)); plan_created_ = false; } @@ -81,9 +80,8 @@ class CudnnFusionOp { template void SetOpConstParamAttr(cudnnFusedOpsConstParamLabel_t param_label, T param) { - PADDLE_ENFORCE_CUDA_SUCCESS( - dynload::cudnnSetFusedOpsConstParamPackAttribute(op_const_params_, - param_label, ¶m)); + PADDLE_ENFORCE_GPU_SUCCESS(dynload::cudnnSetFusedOpsConstParamPackAttribute( + op_const_params_, param_label, ¶m)); plan_created_ = false; } @@ -101,7 +99,7 @@ class CudnnFusionOp { template void SetOpVariantParamAttrPtr(cudnnFusedOpsVariantParamLabel_t param_label, T *param_ptr) { - PADDLE_ENFORCE_CUDA_SUCCESS( + PADDLE_ENFORCE_GPU_SUCCESS( dynload::cudnnSetFusedOpsVariantParamPackAttribute( op_variant_params_, param_label, param_ptr)); } @@ -120,7 +118,7 @@ class CudnnFusionOp { size_t GetWorkspaceSizeInBytes(cudnnHandle_t cudnn_handle) { if (!plan_created_) { workspace_bytes_ = 0U; - PADDLE_ENFORCE_CUDA_SUCCESS(dynload::cudnnMakeFusedOpsPlan( + PADDLE_ENFORCE_GPU_SUCCESS(dynload::cudnnMakeFusedOpsPlan( cudnn_handle, op_, op_const_params_, &workspace_bytes_)); plan_created_ = true; } diff --git a/paddle/fluid/operators/fused/cudnn_norm_conv.cu.h b/paddle/fluid/operators/fused/cudnn_norm_conv.cu.h index 9b9328a5ca62083e8cc16e218ed75ff97a4ec79b..c8871388dd45070468c812244ea00973a56e7d25 100644 --- a/paddle/fluid/operators/fused/cudnn_norm_conv.cu.h +++ b/paddle/fluid/operators/fused/cudnn_norm_conv.cu.h @@ -15,8 +15,7 @@ limitations under the License. */ #pragma once #include "paddle/fluid/operators/fused/cudnn_fusion_helper.h" -#include "paddle/fluid/platform/cudnn_desc.h" -#include "paddle/fluid/platform/cudnn_helper.h" +#include "paddle/fluid/platform/device/gpu/gpu_dnn.h" namespace paddle { namespace operators { @@ -320,7 +319,7 @@ class CudnnNormConvolutionGrad { ScalingParamType beta = use_addto ? 1.0f : 0.0f; ctx.cudnn_workspace_handle().RunFunc( [&](void *cudnn_workspace_ptr) { - PADDLE_ENFORCE_CUDA_SUCCESS( + PADDLE_ENFORCE_GPU_SUCCESS( platform::dynload::cudnnConvolutionBackwardData( cudnn_handle, &alpha, args_.filter_desc.desc(), filter_ptr, args_.out_desc.desc(), output_grad_ptr, @@ -370,7 +369,7 @@ class CudnnNormConvolutionGrad { size_t GetWorkspaceSizeBwdData(const platform::CUDADeviceContext &ctx) { size_t workspace_size = 0U; auto handle = ctx.cudnn_handle(); - PADDLE_ENFORCE_CUDA_SUCCESS( + PADDLE_ENFORCE_GPU_SUCCESS( platform::dynload::cudnnGetConvolutionBackwardDataWorkspaceSize( handle, args_.filter_desc.desc(), args_.out_desc.desc(), args_.conv_desc.desc(), args_.in_desc.desc(), dgrad_algo_, diff --git a/paddle/fluid/operators/fused/cudnn_scale_bias_add_relu.cu.h b/paddle/fluid/operators/fused/cudnn_scale_bias_add_relu.cu.h index 5166ff27234f237f74136862cb6a29c79860ea70..d0205208acc474baf0f0faa1d58a604fff18c76c 100644 --- a/paddle/fluid/operators/fused/cudnn_scale_bias_add_relu.cu.h +++ b/paddle/fluid/operators/fused/cudnn_scale_bias_add_relu.cu.h @@ -15,8 +15,7 @@ limitations under the License. */ #pragma once #include "paddle/fluid/operators/fused/cudnn_fusion_helper.h" -#include "paddle/fluid/platform/cudnn_desc.h" -#include "paddle/fluid/platform/cudnn_helper.h" +#include "paddle/fluid/platform/device/gpu/gpu_dnn.h" namespace paddle { namespace operators { diff --git a/paddle/fluid/operators/fused/fused_attention_op.cu b/paddle/fluid/operators/fused/fused_attention_op.cu index 9f6d6e2270673d1a915d7ea71c7f218a9ddf35ea..173ef48b83dc2d2737938cb395501803a575bd58 100644 --- a/paddle/fluid/operators/fused/fused_attention_op.cu +++ b/paddle/fluid/operators/fused/fused_attention_op.cu @@ -16,8 +16,8 @@ limitations under the License. */ #include #include "paddle/fluid/framework/op_registry.h" #include "paddle/fluid/framework/operator.h" -#include "paddle/fluid/platform/cuda_device_function.h" -#include "paddle/fluid/platform/cudnn_helper.h" +#include "paddle/fluid/platform/device/gpu/gpu_device_function.h" +#include "paddle/fluid/platform/device/gpu/gpu_dnn.h" #include "paddle/fluid/operators/elementwise/elementwise_add_op.h" #include "paddle/fluid/operators/math/math_function.h" diff --git a/paddle/fluid/operators/fused/fused_bn_activation_op.cu b/paddle/fluid/operators/fused/fused_bn_activation_op.cu index 9339ae8e470de897749c1795a98baeb41ddddaba..83328caf3844fc797a0e45acec6b2928d75f4ee5 100644 --- a/paddle/fluid/operators/fused/fused_bn_activation_op.cu +++ b/paddle/fluid/operators/fused/fused_bn_activation_op.cu @@ -22,7 +22,7 @@ #include "paddle/fluid/operators/fused/fused_bn_activation_op.h" #include "paddle/fluid/operators/math/math_function.h" #include "paddle/fluid/operators/norm_utils.h" -#include "paddle/fluid/platform/cudnn_helper.h" +#include "paddle/fluid/platform/device/gpu/gpu_dnn.h" #include "paddle/fluid/platform/float16.h" DECLARE_bool(cudnn_batchnorm_spatial_persistent); @@ -107,22 +107,21 @@ class FusedBatchNormActKernel cudnnTensorDescriptor_t bn_param_desc_; cudnnBatchNormMode_t mode_ = CUDNN_BATCHNORM_SPATIAL_PERSISTENT; - PADDLE_ENFORCE_CUDA_SUCCESS( + PADDLE_ENFORCE_GPU_SUCCESS( platform::dynload::cudnnCreateTensorDescriptor(&data_desc_)); - PADDLE_ENFORCE_CUDA_SUCCESS( + PADDLE_ENFORCE_GPU_SUCCESS( platform::dynload::cudnnCreateTensorDescriptor(&bn_param_desc_)); VLOG(3) << "Setting descriptors."; std::vector dims = {N, C, H, W, D}; std::vector strides = {H * W * D * C, 1, W * D * C, D * C, C}; - PADDLE_ENFORCE_CUDA_SUCCESS(platform::dynload::cudnnSetTensorNdDescriptor( + PADDLE_ENFORCE_GPU_SUCCESS(platform::dynload::cudnnSetTensorNdDescriptor( data_desc_, CudnnDataType::type, x_dims.size() > 3 ? x_dims.size() : 4, dims.data(), strides.data())); - PADDLE_ENFORCE_CUDA_SUCCESS( - platform::dynload::cudnnDeriveBNTensorDescriptor(bn_param_desc_, - data_desc_, mode_)); + PADDLE_ENFORCE_GPU_SUCCESS(platform::dynload::cudnnDeriveBNTensorDescriptor( + bn_param_desc_, data_desc_, mode_)); double this_factor = 1. - momentum; cudnnBatchNormOps_t bnOps_ = CUDNN_BATCHNORM_OPS_BN_ACTIVATION; @@ -144,7 +143,7 @@ class FusedBatchNormActKernel "The argument ReserveSpace of batch_norm op is not found.")); // --------------- cudnn batchnorm workspace --------------- - PADDLE_ENFORCE_CUDA_SUCCESS( + PADDLE_ENFORCE_GPU_SUCCESS( platform::dynload:: cudnnGetBatchNormalizationForwardTrainingExWorkspaceSize( /*handle=*/handle, @@ -158,7 +157,7 @@ class FusedBatchNormActKernel /*sizeInBytes=*/&workspace_size)); // -------------- cudnn batchnorm reserve space -------------- - PADDLE_ENFORCE_CUDA_SUCCESS( + PADDLE_ENFORCE_GPU_SUCCESS( platform::dynload::cudnnGetBatchNormalizationTrainingExReserveSpaceSize( /*handle=*/handle, /*mode=*/mode_, @@ -171,7 +170,7 @@ class FusedBatchNormActKernel reserve_space_size); workspace_ptr = workspace_tensor.mutable_data(ctx.GetPlace(), x->type(), workspace_size); - PADDLE_ENFORCE_CUDA_SUCCESS( + PADDLE_ENFORCE_GPU_SUCCESS( platform::dynload::cudnnBatchNormalizationForwardTrainingEx( handle, mode_, bnOps_, CudnnDataType::kOne(), CudnnDataType::kZero(), data_desc_, x->template data(), @@ -190,9 +189,9 @@ class FusedBatchNormActKernel reserve_space_size)); // clean when exit. - PADDLE_ENFORCE_CUDA_SUCCESS( + PADDLE_ENFORCE_GPU_SUCCESS( platform::dynload::cudnnDestroyTensorDescriptor(data_desc_)); - PADDLE_ENFORCE_CUDA_SUCCESS( + PADDLE_ENFORCE_GPU_SUCCESS( platform::dynload::cudnnDestroyTensorDescriptor(bn_param_desc_)); } }; @@ -271,9 +270,9 @@ class FusedBatchNormActGradKernel cudnnTensorDescriptor_t bn_param_desc_; cudnnBatchNormMode_t mode_ = CUDNN_BATCHNORM_SPATIAL_PERSISTENT; - PADDLE_ENFORCE_CUDA_SUCCESS( + PADDLE_ENFORCE_GPU_SUCCESS( platform::dynload::cudnnCreateTensorDescriptor(&data_desc_)); - PADDLE_ENFORCE_CUDA_SUCCESS( + PADDLE_ENFORCE_GPU_SUCCESS( platform::dynload::cudnnCreateTensorDescriptor(&bn_param_desc_)); if (epsilon <= CUDNN_BN_MIN_EPSILON - FLT_EPSILON) { LOG(ERROR) << "Provided epsilon is smaller than " @@ -282,12 +281,11 @@ class FusedBatchNormActGradKernel } epsilon = std::max(epsilon, CUDNN_BN_MIN_EPSILON); - PADDLE_ENFORCE_CUDA_SUCCESS(platform::dynload::cudnnSetTensorNdDescriptor( + PADDLE_ENFORCE_GPU_SUCCESS(platform::dynload::cudnnSetTensorNdDescriptor( data_desc_, CudnnDataType::type, x_dims.size() > 3 ? x_dims.size() : 4, dims.data(), strides.data())); - PADDLE_ENFORCE_CUDA_SUCCESS( - platform::dynload::cudnnDeriveBNTensorDescriptor(bn_param_desc_, - data_desc_, mode_)); + PADDLE_ENFORCE_GPU_SUCCESS(platform::dynload::cudnnDeriveBNTensorDescriptor( + bn_param_desc_, data_desc_, mode_)); const auto *saved_mean = ctx.Input("SavedMean"); const auto *saved_var = ctx.Input("SavedVariance"); @@ -305,7 +303,7 @@ class FusedBatchNormActGradKernel cudnnActivationDescriptor_t activation_desc_ = scope_act_desc.descriptor(act_type); // --------------- cudnn batchnorm workspace --------------- - PADDLE_ENFORCE_CUDA_SUCCESS( + PADDLE_ENFORCE_GPU_SUCCESS( platform::dynload::cudnnGetBatchNormalizationBackwardExWorkspaceSize( /*handle=*/dev_ctx.cudnn_handle(), /*mode=*/mode_, @@ -322,7 +320,7 @@ class FusedBatchNormActGradKernel workspace_ptr = workspace_tensor.mutable_data(ctx.GetPlace(), x->type(), workspace_size); - PADDLE_ENFORCE_CUDA_SUCCESS( + PADDLE_ENFORCE_GPU_SUCCESS( platform::dynload::cudnnBatchNormalizationBackwardEx( /*handle=*/dev_ctx.cudnn_handle(), /*mode=*/mode_, @@ -358,9 +356,9 @@ class FusedBatchNormActGradKernel /*reserveSpaceSizeInBytes=*/reserve_space_size)); // clean when exit. - PADDLE_ENFORCE_CUDA_SUCCESS( + PADDLE_ENFORCE_GPU_SUCCESS( platform::dynload::cudnnDestroyTensorDescriptor(data_desc_)); - PADDLE_ENFORCE_CUDA_SUCCESS( + PADDLE_ENFORCE_GPU_SUCCESS( platform::dynload::cudnnDestroyTensorDescriptor(bn_param_desc_)); } }; diff --git a/paddle/fluid/operators/fused/fused_bn_add_activation_op.cu b/paddle/fluid/operators/fused/fused_bn_add_activation_op.cu index c92b13b5f58473d6fc07adcd6e257121bc07be9b..7c124a0d6b66120ba83e1fa8d7a54060014dbb0b 100644 --- a/paddle/fluid/operators/fused/fused_bn_add_activation_op.cu +++ b/paddle/fluid/operators/fused/fused_bn_add_activation_op.cu @@ -21,7 +21,7 @@ #include "paddle/fluid/operators/fused/fused_bn_add_activation_op.h" #include "paddle/fluid/operators/math/math_function.h" #include "paddle/fluid/operators/norm_utils.h" -#include "paddle/fluid/platform/cudnn_helper.h" +#include "paddle/fluid/platform/device/gpu/gpu_dnn.h" #include "paddle/fluid/platform/float16.h" DECLARE_bool(cudnn_batchnorm_spatial_persistent); @@ -87,20 +87,19 @@ class FusedBatchNormAddActKernel cudnnTensorDescriptor_t bn_param_desc_; cudnnBatchNormMode_t mode_ = CUDNN_BATCHNORM_SPATIAL_PERSISTENT; - PADDLE_ENFORCE_CUDA_SUCCESS( + PADDLE_ENFORCE_GPU_SUCCESS( platform::dynload::cudnnCreateTensorDescriptor(&data_desc_)); - PADDLE_ENFORCE_CUDA_SUCCESS( + PADDLE_ENFORCE_GPU_SUCCESS( platform::dynload::cudnnCreateTensorDescriptor(&bn_param_desc_)); std::vector dims = {N, C, H, W, D}; std::vector strides = {H * W * D * C, 1, W * D * C, D * C, C}; - PADDLE_ENFORCE_CUDA_SUCCESS(platform::dynload::cudnnSetTensorNdDescriptor( + PADDLE_ENFORCE_GPU_SUCCESS(platform::dynload::cudnnSetTensorNdDescriptor( data_desc_, CudnnDataType::type, in_dims.size() > 3 ? in_dims.size() : 4, dims.data(), strides.data())); - PADDLE_ENFORCE_CUDA_SUCCESS( - platform::dynload::cudnnDeriveBNTensorDescriptor(bn_param_desc_, - data_desc_, mode_)); + PADDLE_ENFORCE_GPU_SUCCESS(platform::dynload::cudnnDeriveBNTensorDescriptor( + bn_param_desc_, data_desc_, mode_)); double this_factor = 1. - momentum; cudnnBatchNormOps_t bnOps_ = CUDNN_BATCHNORM_OPS_BN_ADD_ACTIVATION; @@ -122,7 +121,7 @@ class FusedBatchNormAddActKernel "The argument ReserveSpace of batch_norm op is not found.")); // --------------- cudnn batchnorm workspace --------------- - PADDLE_ENFORCE_CUDA_SUCCESS( + PADDLE_ENFORCE_GPU_SUCCESS( platform::dynload:: cudnnGetBatchNormalizationForwardTrainingExWorkspaceSize( /*handle=*/handle, @@ -136,7 +135,7 @@ class FusedBatchNormAddActKernel /*sizeInBytes=*/&workspace_size)); // -------------- cudnn batchnorm reserve space -------------- - PADDLE_ENFORCE_CUDA_SUCCESS( + PADDLE_ENFORCE_GPU_SUCCESS( platform::dynload::cudnnGetBatchNormalizationTrainingExReserveSpaceSize( /*handle=*/handle, /*mode=*/mode_, @@ -149,7 +148,7 @@ class FusedBatchNormAddActKernel reserve_space_size); workspace_ptr = workspace_tensor.mutable_data(ctx.GetPlace(), x->type(), workspace_size); - PADDLE_ENFORCE_CUDA_SUCCESS( + PADDLE_ENFORCE_GPU_SUCCESS( platform::dynload::cudnnBatchNormalizationForwardTrainingEx( handle, mode_, bnOps_, CudnnDataType::kOne(), CudnnDataType::kZero(), data_desc_, x->template data(), @@ -169,9 +168,9 @@ class FusedBatchNormAddActKernel reserve_space_size)); // clean when exit. - PADDLE_ENFORCE_CUDA_SUCCESS( + PADDLE_ENFORCE_GPU_SUCCESS( platform::dynload::cudnnDestroyTensorDescriptor(data_desc_)); - PADDLE_ENFORCE_CUDA_SUCCESS( + PADDLE_ENFORCE_GPU_SUCCESS( platform::dynload::cudnnDestroyTensorDescriptor(bn_param_desc_)); } }; @@ -231,9 +230,9 @@ class FusedBatchNormAddActGradKernel cudnnTensorDescriptor_t bn_param_desc_; cudnnBatchNormMode_t mode_ = CUDNN_BATCHNORM_SPATIAL_PERSISTENT; - PADDLE_ENFORCE_CUDA_SUCCESS( + PADDLE_ENFORCE_GPU_SUCCESS( platform::dynload::cudnnCreateTensorDescriptor(&data_desc_)); - PADDLE_ENFORCE_CUDA_SUCCESS( + PADDLE_ENFORCE_GPU_SUCCESS( platform::dynload::cudnnCreateTensorDescriptor(&bn_param_desc_)); if (epsilon <= CUDNN_BN_MIN_EPSILON - FLT_EPSILON) { LOG(ERROR) << "Provided epsilon is smaller than " @@ -242,12 +241,11 @@ class FusedBatchNormAddActGradKernel } epsilon = std::max(epsilon, CUDNN_BN_MIN_EPSILON); - PADDLE_ENFORCE_CUDA_SUCCESS(platform::dynload::cudnnSetTensorNdDescriptor( + PADDLE_ENFORCE_GPU_SUCCESS(platform::dynload::cudnnSetTensorNdDescriptor( data_desc_, CudnnDataType::type, in_dims.size() > 3 ? in_dims.size() : 4, dims.data(), strides.data())); - PADDLE_ENFORCE_CUDA_SUCCESS( - platform::dynload::cudnnDeriveBNTensorDescriptor(bn_param_desc_, - data_desc_, mode_)); + PADDLE_ENFORCE_GPU_SUCCESS(platform::dynload::cudnnDeriveBNTensorDescriptor( + bn_param_desc_, data_desc_, mode_)); const auto *saved_mean = ctx.Input("SavedMean"); const auto *saved_var = ctx.Input("SavedVariance"); @@ -265,7 +263,7 @@ class FusedBatchNormAddActGradKernel cudnnActivationDescriptor_t activation_desc_ = scope_act_desc.descriptor(act_type); // --------------- cudnn batchnorm workspace --------------- - PADDLE_ENFORCE_CUDA_SUCCESS( + PADDLE_ENFORCE_GPU_SUCCESS( platform::dynload::cudnnGetBatchNormalizationBackwardExWorkspaceSize( /*handle=*/dev_ctx.cudnn_handle(), /*mode=*/mode_, @@ -281,7 +279,7 @@ class FusedBatchNormAddActGradKernel workspace_ptr = workspace_tensor.mutable_data(ctx.GetPlace(), x->type(), workspace_size); - PADDLE_ENFORCE_CUDA_SUCCESS( + PADDLE_ENFORCE_GPU_SUCCESS( platform::dynload::cudnnBatchNormalizationBackwardEx( /*handle=*/dev_ctx.cudnn_handle(), /*mode=*/mode_, @@ -315,9 +313,9 @@ class FusedBatchNormAddActGradKernel /*reserveSpaceSizeInBytes=*/reserve_space_size)); // clean when exit. - PADDLE_ENFORCE_CUDA_SUCCESS( + PADDLE_ENFORCE_GPU_SUCCESS( platform::dynload::cudnnDestroyTensorDescriptor(data_desc_)); - PADDLE_ENFORCE_CUDA_SUCCESS( + PADDLE_ENFORCE_GPU_SUCCESS( platform::dynload::cudnnDestroyTensorDescriptor(bn_param_desc_)); } }; diff --git a/paddle/fluid/operators/fused/fused_dropout_common.h b/paddle/fluid/operators/fused/fused_dropout_common.h index 049c37f1ea0c44e9d699413e2525cf10e166009c..eb651e4ea7b4fc7bd156f0915edec87175d44047 100644 --- a/paddle/fluid/operators/fused/fused_dropout_common.h +++ b/paddle/fluid/operators/fused/fused_dropout_common.h @@ -23,10 +23,10 @@ limitations under the License. */ #include "paddle/fluid/operators/layer_norm_kernel.cu.h" #include "paddle/fluid/operators/math/functors.h" #include "paddle/fluid/platform/aligned_vector.h" -#include "paddle/fluid/platform/cuda_device_function.h" +#include "paddle/fluid/platform/device/gpu/gpu_device_function.h" +#include "paddle/fluid/platform/device/gpu/gpu_launch_config.h" #include "paddle/fluid/platform/device_context.h" #include "paddle/fluid/platform/float16.h" -#include "paddle/fluid/platform/gpu_launch_config.h" namespace paddle { namespace operators { @@ -93,7 +93,7 @@ __forceinline__ __device__ void RandVec<8>(curandStatePhilox4_32_10_t *state, template inline void SetZero(const platform::CUDADeviceContext &ctx, T *ptr, const size_t size) { - PADDLE_ENFORCE_CUDA_SUCCESS( + PADDLE_ENFORCE_GPU_SUCCESS( cudaMemsetAsync(ptr, 0, size * sizeof(T), ctx.stream())); } diff --git a/paddle/fluid/operators/fused/fused_fc_elementwise_layernorm_op.cu b/paddle/fluid/operators/fused/fused_fc_elementwise_layernorm_op.cu index dc068e02be4ecf457b9156f392aa945a07a6a74b..c5b1fd939295043f669c1cbe83f77d5f425b9ca0 100644 --- a/paddle/fluid/operators/fused/fused_fc_elementwise_layernorm_op.cu +++ b/paddle/fluid/operators/fused/fused_fc_elementwise_layernorm_op.cu @@ -22,7 +22,7 @@ namespace cub = hipcub; #include "paddle/fluid/framework/op_registry.h" #include "paddle/fluid/operators/math/blas.h" -#include "paddle/fluid/platform/cuda_device_function.h" +#include "paddle/fluid/platform/device/gpu/gpu_device_function.h" namespace paddle { namespace operators { diff --git a/paddle/fluid/operators/fused/fused_layernorm_residual_dropout_bias.h b/paddle/fluid/operators/fused/fused_layernorm_residual_dropout_bias.h index f257d3efa433e6a817de713e18d60ced9da5acbd..1827e137c15f183ae97552c795df85556ef0cd1a 100644 --- a/paddle/fluid/operators/fused/fused_layernorm_residual_dropout_bias.h +++ b/paddle/fluid/operators/fused/fused_layernorm_residual_dropout_bias.h @@ -169,7 +169,7 @@ void LaunchLayernormResidualDropoutBias( auto cuda_place = BOOST_GET_CONST(platform::CUDAPlace, ctx.GetPlace()); memory::Copy(cuda_place, dst, cuda_place, residual, rows * cols * sizeof(T), ctx.stream()); - PADDLE_ENFORCE_CUDA_SUCCESS(cudaMemsetAsync( + PADDLE_ENFORCE_GPU_SUCCESS(cudaMemsetAsync( mask_data, 0, rows * cols * sizeof(MaskType), ctx.stream())); // call layernorm forward diff --git a/paddle/fluid/operators/fused/fusion_conv_inception_op.cc b/paddle/fluid/operators/fused/fusion_conv_inception_op.cc index ea1e9512ca519fd614cddd0ab771ca0918108061..eeeb004003c9c405ad5b11889228caf85b2114a6 100644 --- a/paddle/fluid/operators/fused/fusion_conv_inception_op.cc +++ b/paddle/fluid/operators/fused/fusion_conv_inception_op.cc @@ -16,7 +16,7 @@ limitations under the License. */ #include #include "paddle/fluid/framework/op_registry.h" #ifdef PADDLE_WITH_CUDA -#include "paddle/fluid/platform/cudnn_helper.h" +#include "paddle/fluid/platform/device/gpu/gpu_dnn.h" #endif #include "paddle/fluid/platform/cudnn_workspace_helper.h" diff --git a/paddle/fluid/operators/fused/fusion_conv_inception_op.cu b/paddle/fluid/operators/fused/fusion_conv_inception_op.cu index b3796f1df5fdf207b26bebfd89704d4387f0d256..44312be79739853232e908437043e0e6ab14c0a7 100644 --- a/paddle/fluid/operators/fused/fusion_conv_inception_op.cu +++ b/paddle/fluid/operators/fused/fusion_conv_inception_op.cu @@ -14,7 +14,7 @@ limitations under the License. */ #include "paddle/fluid/framework/op_registry.h" #include "paddle/fluid/operators/conv_cudnn_op_cache.h" -#include "paddle/fluid/platform/cudnn_helper.h" +#include "paddle/fluid/platform/device/gpu/gpu_dnn.h" DECLARE_uint64(conv_workspace_size_limit); @@ -95,15 +95,15 @@ class CUDNNConvInceptionFusionOpKernel : public framework::OpKernel { cudnnConvolutionDescriptor_t* conv_desc = new cudnnConvolutionDescriptor_t[4]; for (int i = 0; i < 4; ++i) { - PADDLE_ENFORCE_CUDA_SUCCESS( + PADDLE_ENFORCE_GPU_SUCCESS( platform::dynload::cudnnCreateFilterDescriptor(&filter_desc[i])); - PADDLE_ENFORCE_CUDA_SUCCESS( + PADDLE_ENFORCE_GPU_SUCCESS( platform::dynload::cudnnCreateTensorDescriptor(&bias_desc[i])); - PADDLE_ENFORCE_CUDA_SUCCESS( + PADDLE_ENFORCE_GPU_SUCCESS( platform::dynload::cudnnCreateTensorDescriptor(&in_desc[i])); - PADDLE_ENFORCE_CUDA_SUCCESS( + PADDLE_ENFORCE_GPU_SUCCESS( platform::dynload::cudnnCreateTensorDescriptor(&out_desc[i])); - PADDLE_ENFORCE_CUDA_SUCCESS( + PADDLE_ENFORCE_GPU_SUCCESS( platform::dynload::cudnnCreateConvolutionDescriptor(&conv_desc[i])); } @@ -127,11 +127,11 @@ class CUDNNConvInceptionFusionOpKernel : public framework::OpKernel { for (int i = 0; i < 4; ++i) { filter_dims.push_back(framework::vectorize(filters[i]->dims())); - PADDLE_ENFORCE_CUDA_SUCCESS(platform::dynload::cudnnSetFilterNdDescriptor( + PADDLE_ENFORCE_GPU_SUCCESS(platform::dynload::cudnnSetFilterNdDescriptor( filter_desc[i], cudnn_dtype, format, 4, filter_dims[i].data())); bias_dims.push_back({1, filter_dims[i][0], 1, 1}); bias_strides.push_back({filter_dims[i][0], 1, 1, 1}); - PADDLE_ENFORCE_CUDA_SUCCESS(platform::dynload::cudnnSetTensorNdDescriptor( + PADDLE_ENFORCE_GPU_SUCCESS(platform::dynload::cudnnSetTensorNdDescriptor( bias_desc[i], cudnn_dtype, 4, bias_dims[i].data(), bias_strides[i].data())); in_dims.push_back({n, filter_dims[i][1], h, w}); @@ -140,22 +140,21 @@ class CUDNNConvInceptionFusionOpKernel : public framework::OpKernel { out_strides.push_back({oc * h * w, h * w, w, 1}); if (i < 2) { - PADDLE_ENFORCE_CUDA_SUCCESS( + PADDLE_ENFORCE_GPU_SUCCESS( platform::dynload::cudnnSetConvolutionNdDescriptor( conv_desc[i], 2, k0x0.data(), k1x1.data(), k1x1.data(), CUDNN_CROSS_CORRELATION, compute_type)); } else { - PADDLE_ENFORCE_CUDA_SUCCESS( + PADDLE_ENFORCE_GPU_SUCCESS( platform::dynload::cudnnSetConvolutionNdDescriptor( conv_desc[i], 2, k1x1.data(), k1x1.data(), k1x1.data(), CUDNN_CROSS_CORRELATION, compute_type)); } - PADDLE_ENFORCE_CUDA_SUCCESS( - platform::dynload::cudnnSetConvolutionMathType(conv_desc[i], - CUDNN_DEFAULT_MATH)); + PADDLE_ENFORCE_GPU_SUCCESS(platform::dynload::cudnnSetConvolutionMathType( + conv_desc[i], CUDNN_DEFAULT_MATH)); #if CUDA_VERSION >= 11000 && CUDNN_VERSION >= 8000 if (!platform::allow_tf32_cudnn) { - PADDLE_ENFORCE_CUDA_SUCCESS( + PADDLE_ENFORCE_GPU_SUCCESS( platform::dynload::cudnnSetConvolutionMathType(conv_desc[i], CUDNN_FMA_MATH)); } @@ -165,7 +164,7 @@ class CUDNNConvInceptionFusionOpKernel : public framework::OpKernel { in_strides[2][0] = oc * h * w; out_strides[2][0] = filter_dims[2][0] * h * w; // this out is continuous. in_strides[3][0] = filter_dims[2][0] * h * w; - PADDLE_ENFORCE_CUDA_SUCCESS( + PADDLE_ENFORCE_GPU_SUCCESS( platform::dynload::cudnnSetConvolutionGroupCount(conv_desc[2], 2)); cudnnConvolutionFwdAlgo_t algo[4]; @@ -181,9 +180,9 @@ class CUDNNConvInceptionFusionOpKernel : public framework::OpKernel { } for (int i = 0; i < 4; ++i) { - PADDLE_ENFORCE_CUDA_SUCCESS(platform::dynload::cudnnSetTensorNdDescriptor( + PADDLE_ENFORCE_GPU_SUCCESS(platform::dynload::cudnnSetTensorNdDescriptor( in_desc[i], cudnn_dtype, 4, in_dims[i].data(), in_strides[i].data())); - PADDLE_ENFORCE_CUDA_SUCCESS(platform::dynload::cudnnSetTensorNdDescriptor( + PADDLE_ENFORCE_GPU_SUCCESS(platform::dynload::cudnnSetTensorNdDescriptor( out_desc[i], cudnn_dtype, 4, out_dims[i].data(), out_strides[i].data())); @@ -192,13 +191,13 @@ class CUDNNConvInceptionFusionOpKernel : public framework::OpKernel { size_t tmp_size = 0; std::unique_ptr perf_results( new cudnnConvolutionFwdAlgoPerf_t[kNUM_CUDNN_FWD_ALGS]); - PADDLE_ENFORCE_CUDA_SUCCESS( + PADDLE_ENFORCE_GPU_SUCCESS( platform::dynload::cudnnGetConvolutionForwardAlgorithm_v7( handle, in_desc[i], filter_desc[i], conv_desc[i], out_desc[i], kNUM_CUDNN_FWD_ALGS, &perf_count, perf_results.get())); algo[i] = (perf_results.get())[best_algo_idx].algo; - PADDLE_ENFORCE_CUDA_SUCCESS( + PADDLE_ENFORCE_GPU_SUCCESS( platform::dynload::cudnnGetConvolutionForwardWorkspaceSize( handle, in_desc[i], filter_desc[i], conv_desc[i], out_desc[i], algo[i], &tmp_size)); @@ -215,7 +214,7 @@ class CUDNNConvInceptionFusionOpKernel : public framework::OpKernel { // branch1: pool + 1x1 conv ScalingParamType alpha = 1.0f, beta = 0.0f; - PADDLE_ENFORCE_CUDA_SUCCESS(platform::dynload::cudnnPoolingForward( + PADDLE_ENFORCE_GPU_SUCCESS(platform::dynload::cudnnPoolingForward( handle, cudnn_pool_desc, &alpha, cudnn_input_desc, input_data, &beta, pool_out_desc, temp_data)); @@ -237,7 +236,7 @@ class CUDNNConvInceptionFusionOpKernel : public framework::OpKernel { for (int i = 0; i < 4; ++i) { auto func = [&](void* cudnn_workspace) { - PADDLE_ENFORCE_CUDA_SUCCESS( + PADDLE_ENFORCE_GPU_SUCCESS( platform::dynload::cudnnConvolutionBiasActivationForward( handle, &alpha, in_desc[i], in_datas[i], filter_desc[i], static_cast(filters[i]->data()), conv_desc[i], @@ -252,34 +251,34 @@ class CUDNNConvInceptionFusionOpKernel : public framework::OpKernel { cudnnTensorDescriptor_t x_desc; cudnnTensorDescriptor_t y_desc; - PADDLE_ENFORCE_CUDA_SUCCESS( + PADDLE_ENFORCE_GPU_SUCCESS( platform::dynload::cudnnCreateTensorDescriptor(&x_desc)); - PADDLE_ENFORCE_CUDA_SUCCESS( + PADDLE_ENFORCE_GPU_SUCCESS( platform::dynload::cudnnCreateTensorDescriptor(&y_desc)); - PADDLE_ENFORCE_CUDA_SUCCESS(platform::dynload::cudnnSetTensorNdDescriptor( + PADDLE_ENFORCE_GPU_SUCCESS(platform::dynload::cudnnSetTensorNdDescriptor( x_desc, cudnn_dtype, 4, out_dims[3].data(), out_strides[2].data())); - PADDLE_ENFORCE_CUDA_SUCCESS(platform::dynload::cudnnSetTensorNdDescriptor( + PADDLE_ENFORCE_GPU_SUCCESS(platform::dynload::cudnnSetTensorNdDescriptor( y_desc, cudnn_dtype, 4, out_dims[3].data(), out_strides[3].data())); - PADDLE_ENFORCE_CUDA_SUCCESS(platform::dynload::cudnnTransformTensor( + PADDLE_ENFORCE_GPU_SUCCESS(platform::dynload::cudnnTransformTensor( handle, CudnnDataType::kOne(), x_desc, static_cast(out_datas[2]), CudnnDataType::kZero(), y_desc, static_cast(output_data + (oc0 + oc1) * h * w))); for (int i = 0; i < 4; ++i) { - PADDLE_ENFORCE_CUDA_SUCCESS( + PADDLE_ENFORCE_GPU_SUCCESS( platform::dynload::cudnnDestroyTensorDescriptor(in_desc[i])); - PADDLE_ENFORCE_CUDA_SUCCESS( + PADDLE_ENFORCE_GPU_SUCCESS( platform::dynload::cudnnDestroyTensorDescriptor(out_desc[i])); - PADDLE_ENFORCE_CUDA_SUCCESS( + PADDLE_ENFORCE_GPU_SUCCESS( platform::dynload::cudnnDestroyFilterDescriptor(filter_desc[i])); - PADDLE_ENFORCE_CUDA_SUCCESS( + PADDLE_ENFORCE_GPU_SUCCESS( platform::dynload::cudnnDestroyTensorDescriptor(bias_desc[i])); - PADDLE_ENFORCE_CUDA_SUCCESS( + PADDLE_ENFORCE_GPU_SUCCESS( platform::dynload::cudnnDestroyConvolutionDescriptor(conv_desc[i])); } - PADDLE_ENFORCE_CUDA_SUCCESS( + PADDLE_ENFORCE_GPU_SUCCESS( platform::dynload::cudnnDestroyTensorDescriptor(x_desc)); - PADDLE_ENFORCE_CUDA_SUCCESS( + PADDLE_ENFORCE_GPU_SUCCESS( platform::dynload::cudnnDestroyTensorDescriptor(y_desc)); } }; diff --git a/paddle/fluid/operators/fused/fusion_transpose_flatten_concat_op.cu.cc b/paddle/fluid/operators/fused/fusion_transpose_flatten_concat_op.cu.cc index 37a442a78157166764b786215d97e11d661df894..1fa4225934d394c1eef50dfb4f0a0a3f6a54f589 100644 --- a/paddle/fluid/operators/fused/fusion_transpose_flatten_concat_op.cu.cc +++ b/paddle/fluid/operators/fused/fusion_transpose_flatten_concat_op.cu.cc @@ -14,7 +14,7 @@ limitations under the License. */ #include "paddle/fluid/operators/fused/fusion_transpose_flatten_concat_op.h" #include "paddle/fluid/framework/op_registry.h" -#include "paddle/fluid/platform/cudnn_helper.h" +#include "paddle/fluid/platform/device/gpu/gpu_dnn.h" namespace paddle { namespace platform { @@ -50,9 +50,9 @@ class TransposeFlattenConcatFusionKernel : public framework::OpKernel { cudnnTensorDescriptor_t in_desc; cudnnTensorDescriptor_t out_desc; - PADDLE_ENFORCE_CUDA_SUCCESS( + PADDLE_ENFORCE_GPU_SUCCESS( platform::dynload::cudnnCreateTensorDescriptor(&in_desc)); - PADDLE_ENFORCE_CUDA_SUCCESS( + PADDLE_ENFORCE_GPU_SUCCESS( platform::dynload::cudnnCreateTensorDescriptor(&out_desc)); cudnnDataType_t cudnn_dtype = CudnnDataType::type; @@ -92,12 +92,12 @@ class TransposeFlattenConcatFusionKernel : public framework::OpKernel { dims_y[i] = 1; } - PADDLE_ENFORCE_CUDA_SUCCESS(platform::dynload::cudnnSetTensorNdDescriptor( + PADDLE_ENFORCE_GPU_SUCCESS(platform::dynload::cudnnSetTensorNdDescriptor( in_desc, cudnn_dtype, max_dim, dims_y.data(), stride_x.data())); - PADDLE_ENFORCE_CUDA_SUCCESS(platform::dynload::cudnnSetTensorNdDescriptor( + PADDLE_ENFORCE_GPU_SUCCESS(platform::dynload::cudnnSetTensorNdDescriptor( out_desc, cudnn_dtype, max_dim, dims_y.data(), stride_y.data())); - PADDLE_ENFORCE_CUDA_SUCCESS(platform::dynload::cudnnTransformTensor( + PADDLE_ENFORCE_GPU_SUCCESS(platform::dynload::cudnnTransformTensor( handle, CudnnDataType::kOne(), in_desc, static_cast(ins[k]->data()), CudnnDataType::kZero(), out_desc, static_cast(odata))); @@ -108,9 +108,9 @@ class TransposeFlattenConcatFusionKernel : public framework::OpKernel { odata += flat_shape[1]; } } - PADDLE_ENFORCE_CUDA_SUCCESS( + PADDLE_ENFORCE_GPU_SUCCESS( platform::dynload::cudnnDestroyTensorDescriptor(in_desc)); - PADDLE_ENFORCE_CUDA_SUCCESS( + PADDLE_ENFORCE_GPU_SUCCESS( platform::dynload::cudnnDestroyTensorDescriptor(out_desc)); } }; diff --git a/paddle/fluid/operators/gather.cu.h b/paddle/fluid/operators/gather.cu.h index 05af4ff150f3991ee0b26757291a2648644129b5..700de8074ff8a6dfc5f2d49d201261e8dfa1726c 100644 --- a/paddle/fluid/operators/gather.cu.h +++ b/paddle/fluid/operators/gather.cu.h @@ -19,8 +19,8 @@ limitations under the License. */ #include "paddle/fluid/framework/tensor.h" #include "paddle/fluid/memory/malloc.h" #include "paddle/fluid/operators/math/math_function.h" -#include "paddle/fluid/platform/cuda_primitives.h" -#include "paddle/fluid/platform/gpu_launch_config.h" +#include "paddle/fluid/platform/device/gpu/gpu_launch_config.h" +#include "paddle/fluid/platform/device/gpu/gpu_primitives.h" #include "paddle/fluid/platform/place.h" namespace paddle { namespace operators { diff --git a/paddle/fluid/operators/graph_send_recv_op.cu b/paddle/fluid/operators/graph_send_recv_op.cu index d9f56ec4dc0388d0ec4d375ab33eeb7761e717bb..6e5e203e2d9434699333cc8974769623d132876f 100644 --- a/paddle/fluid/operators/graph_send_recv_op.cu +++ b/paddle/fluid/operators/graph_send_recv_op.cu @@ -18,7 +18,7 @@ limitations under the License. */ #include "paddle/fluid/framework/op_registry.h" #include "paddle/fluid/framework/tensor.h" #include "paddle/fluid/operators/graph_send_recv_op.h" -#include "paddle/fluid/platform/cuda_primitives.h" +#include "paddle/fluid/platform/device/gpu/gpu_primitives.h" #include "paddle/fluid/platform/place.h" namespace paddle { diff --git a/paddle/fluid/operators/grid_sampler_cudnn_op.cu.cc b/paddle/fluid/operators/grid_sampler_cudnn_op.cu.cc index d2002b487ca3372c599e44c8673736a9db325c51..080dadeacaae71302b0105bd17f38e250f221240 100644 --- a/paddle/fluid/operators/grid_sampler_cudnn_op.cu.cc +++ b/paddle/fluid/operators/grid_sampler_cudnn_op.cu.cc @@ -16,7 +16,7 @@ limitations under the License. */ // HIP not support cudnnSpatialTfGridGeneratorForward #include "paddle/fluid/framework/op_registry.h" -#include "paddle/fluid/platform/cudnn_helper.h" +#include "paddle/fluid/platform/device/gpu/gpu_dnn.h" namespace paddle { namespace framework { @@ -70,7 +70,7 @@ class CUDNNGridSampleOpKernel : public framework::OpKernel { cudnnTensorDescriptor_t cudnn_output_desc = output_desc.descriptor( DataLayout::kNCHW, framework::vectorize(output->dims())); - PADDLE_ENFORCE_CUDA_SUCCESS(platform::dynload::cudnnSpatialTfSamplerForward( + PADDLE_ENFORCE_GPU_SUCCESS(platform::dynload::cudnnSpatialTfSamplerForward( handle, cudnn_st_desc, CudnnDataType::kOne(), cudnn_input_desc, input_data, grid_data, CudnnDataType::kZero(), cudnn_output_desc, output_data)); @@ -123,13 +123,12 @@ class CUDNNGridSampleGradOpKernel : public framework::OpKernel { output_grad_desc.descriptor( DataLayout::kNCHW, framework::vectorize(output_grad->dims())); - PADDLE_ENFORCE_CUDA_SUCCESS( - platform::dynload::cudnnSpatialTfSamplerBackward( - handle, cudnn_st_dest, CudnnDataType::kOne(), cudnn_input_desc, - input_data, CudnnDataType::kZero(), cudnn_input_grad_desc, - input_grad_data, CudnnDataType::kOne(), cudnn_output_grad_desc, - output_grad_data, grid_data, CudnnDataType::kZero(), - grid_grad_data)); + PADDLE_ENFORCE_GPU_SUCCESS(platform::dynload::cudnnSpatialTfSamplerBackward( + handle, cudnn_st_dest, CudnnDataType::kOne(), cudnn_input_desc, + input_data, CudnnDataType::kZero(), cudnn_input_grad_desc, + input_grad_data, CudnnDataType::kOne(), cudnn_output_grad_desc, + output_grad_data, grid_data, CudnnDataType::kZero(), + grid_grad_data)); } }; diff --git a/paddle/fluid/operators/grid_sampler_op.cc b/paddle/fluid/operators/grid_sampler_op.cc index 0b410f07fcb576e1f68f65c2a1684e9fe0552d63..04aa6a3e10f6e3f55f9845d1b4b6bd6aa762c016 100644 --- a/paddle/fluid/operators/grid_sampler_op.cc +++ b/paddle/fluid/operators/grid_sampler_op.cc @@ -17,12 +17,7 @@ limitations under the License. */ #include #include "paddle/fluid/framework/op_registry.h" #include "paddle/fluid/framework/op_version_registry.h" -#ifdef PADDLE_WITH_CUDA -#include "paddle/fluid/platform/cudnn_helper.h" -#endif -#ifdef PADDLE_WITH_HIP -#include "paddle/fluid/platform/miopen_helper.h" -#endif +#include "paddle/fluid/platform/device/gpu/gpu_dnn.h" namespace paddle { namespace operators { diff --git a/paddle/fluid/operators/grid_sampler_op.cu b/paddle/fluid/operators/grid_sampler_op.cu index 762d14096a5ab4d094894ad7c0ec822f5cc25d3b..8e9f445f3b1169f9536821a507ab0b71d51138fb 100644 --- a/paddle/fluid/operators/grid_sampler_op.cu +++ b/paddle/fluid/operators/grid_sampler_op.cu @@ -15,9 +15,9 @@ limitations under the License. */ #include #include "paddle/fluid/framework/op_registry.h" #include "paddle/fluid/operators/grid_sampler_op.h" -#include "paddle/fluid/platform/cuda_device_function.h" -#include "paddle/fluid/platform/cuda_primitives.h" -#include "paddle/fluid/platform/gpu_info.h" +#include "paddle/fluid/platform/device/gpu/gpu_device_function.h" +#include "paddle/fluid/platform/device/gpu/gpu_info.h" +#include "paddle/fluid/platform/device/gpu/gpu_primitives.h" namespace paddle { namespace operators { diff --git a/paddle/fluid/operators/group_norm_op.cu b/paddle/fluid/operators/group_norm_op.cu index e029c84090af19e186e30be63f28b01270ef94c5..055fd791af5a3e9dfa6f3df6b7843b2a001a02a6 100644 --- a/paddle/fluid/operators/group_norm_op.cu +++ b/paddle/fluid/operators/group_norm_op.cu @@ -21,8 +21,8 @@ namespace cub = hipcub; #endif #include "paddle/fluid/operators/group_norm_op.h" -#include "paddle/fluid/platform/cuda_device_function.h" -#include "paddle/fluid/platform/cuda_primitives.h" +#include "paddle/fluid/platform/device/gpu/gpu_device_function.h" +#include "paddle/fluid/platform/device/gpu/gpu_primitives.h" namespace paddle { namespace operators { diff --git a/paddle/fluid/operators/histogram_op.cu b/paddle/fluid/operators/histogram_op.cu index 6a9183a8b465b7526f956b84b23b3d2be6c0f141..b9419cbcc57b58fc295a42f5139a56116345d7f5 100644 --- a/paddle/fluid/operators/histogram_op.cu +++ b/paddle/fluid/operators/histogram_op.cu @@ -14,8 +14,8 @@ limitations under the License. */ #include "paddle/fluid/framework/eigen.h" #include "paddle/fluid/operators/histogram_op.h" -#include "paddle/fluid/platform/cuda_primitives.h" -#include "paddle/fluid/platform/gpu_launch_config.h" +#include "paddle/fluid/platform/device/gpu/gpu_launch_config.h" +#include "paddle/fluid/platform/device/gpu/gpu_primitives.h" #include "paddle/fluid/platform/hostdevice.h" namespace paddle { diff --git a/paddle/fluid/operators/index_sample_op.cu b/paddle/fluid/operators/index_sample_op.cu index 46dd91fed6cbc17487e7e49c14003953fc8772c2..40a968b8a397d5097e32aae3443a381aa50528d9 100644 --- a/paddle/fluid/operators/index_sample_op.cu +++ b/paddle/fluid/operators/index_sample_op.cu @@ -15,8 +15,8 @@ #include "paddle/fluid/framework/op_registry.h" #include "paddle/fluid/operators/index_sample_op.h" #include "paddle/fluid/operators/math/math_function.h" -#include "paddle/fluid/platform/cuda_device_function.h" -#include "paddle/fluid/platform/cuda_primitives.h" +#include "paddle/fluid/platform/device/gpu/gpu_device_function.h" +#include "paddle/fluid/platform/device/gpu/gpu_primitives.h" namespace paddle { namespace operators { diff --git a/paddle/fluid/operators/index_select_op.cu b/paddle/fluid/operators/index_select_op.cu index 2353781daaa399aa050ea2e24ac5952d4e67410f..acf959896f949996f2f428c049a19fe082843c2a 100644 --- a/paddle/fluid/operators/index_select_op.cu +++ b/paddle/fluid/operators/index_select_op.cu @@ -15,7 +15,7 @@ #pragma once #include "paddle/fluid/framework/op_registry.h" #include "paddle/fluid/operators/index_select_op.h" -#include "paddle/fluid/platform/cuda_primitives.h" +#include "paddle/fluid/platform/device/gpu/gpu_primitives.h" namespace paddle { namespace operators { @@ -110,22 +110,14 @@ class IndexSelectCUDAKernel : public framework::OpKernel { (numel + PADDLE_CUDA_NUM_THREADS - 1) / PADDLE_CUDA_NUM_THREADS, PADDLE_CUDA_NUM_THREADS, 0, stream>>>(in_data, out_data, index_data, numel, stride, size, delta); -#ifdef PADDLE_WITH_HIP - PADDLE_ENFORCE_CUDA_SUCCESS(hipStreamSynchronize(stream)); -#else - PADDLE_ENFORCE_CUDA_SUCCESS(cudaStreamSynchronize(stream)); -#endif + platform::GpuStreamSync(stream); } else { const int* index_data = index->data(); index_select_cuda_kernel<<<(numel + PADDLE_CUDA_NUM_THREADS - 1) / PADDLE_CUDA_NUM_THREADS, PADDLE_CUDA_NUM_THREADS, 0, stream>>>( in_data, out_data, index_data, numel, stride, size, delta); -#ifdef PADDLE_WITH_HIP - PADDLE_ENFORCE_CUDA_SUCCESS(hipStreamSynchronize(stream)); -#else - PADDLE_ENFORCE_CUDA_SUCCESS(cudaStreamSynchronize(stream)); -#endif + platform::GpuStreamSync(stream); } } }; @@ -181,11 +173,7 @@ class IndexSelectGradCUDAKernel : public framework::OpKernel { PADDLE_CUDA_NUM_THREADS, 0, stream>>>(output_grad_data, in_grad_data, index_data, index_nums, out_nums, stride, size, delta); -#ifdef PADDLE_WITH_HIP - PADDLE_ENFORCE_CUDA_SUCCESS(hipStreamSynchronize(stream)); -#else - PADDLE_ENFORCE_CUDA_SUCCESS(cudaStreamSynchronize(stream)); -#endif + platform::GpuStreamSync(stream); } else { const int* index_data = index->data(); index_select_grad_cuda_kernel<<< @@ -193,11 +181,7 @@ class IndexSelectGradCUDAKernel : public framework::OpKernel { PADDLE_CUDA_NUM_THREADS, 0, stream>>>(output_grad_data, in_grad_data, index_data, index_nums, out_nums, stride, size, delta); -#ifdef PADDLE_WITH_HIP - PADDLE_ENFORCE_CUDA_SUCCESS(hipStreamSynchronize(stream)); -#else - PADDLE_ENFORCE_CUDA_SUCCESS(cudaStreamSynchronize(stream)); -#endif + platform::GpuStreamSync(stream); } } }; diff --git a/paddle/fluid/operators/instance_norm_op.cu b/paddle/fluid/operators/instance_norm_op.cu index affd0b7e1edd70b34e2838e2d8b8599dbc9568e2..e0401366693b1b46b13fccc29c3dbda3ee60c4d3 100644 --- a/paddle/fluid/operators/instance_norm_op.cu +++ b/paddle/fluid/operators/instance_norm_op.cu @@ -26,12 +26,7 @@ namespace cub = hipcub; #include "paddle/fluid/framework/data_layout.h" #include "paddle/fluid/operators/instance_norm_op.h" #include "paddle/fluid/operators/math/math_function.h" -#ifdef PADDLE_WITH_CUDA -#include "paddle/fluid/platform/cudnn_helper.h" -#endif -#ifdef PADDLE_WITH_HIP -#include "paddle/fluid/platform/miopen_helper.h" -#endif +#include "paddle/fluid/platform/device/gpu/gpu_dnn.h" namespace paddle { namespace operators { @@ -114,17 +109,17 @@ class InstanceNormKernel miopenTensorDescriptor_t data_desc_; miopenTensorDescriptor_t in_param_desc_; - PADDLE_ENFORCE_CUDA_SUCCESS( + PADDLE_ENFORCE_GPU_SUCCESS( platform::dynload::miopenCreateTensorDescriptor(&data_desc_)); - PADDLE_ENFORCE_CUDA_SUCCESS( + PADDLE_ENFORCE_GPU_SUCCESS( platform::dynload::miopenCreateTensorDescriptor(&in_param_desc_)); #else cudnnTensorDescriptor_t data_desc_; cudnnTensorDescriptor_t in_param_desc_; - PADDLE_ENFORCE_CUDA_SUCCESS( + PADDLE_ENFORCE_GPU_SUCCESS( platform::dynload::cudnnCreateTensorDescriptor(&data_desc_)); - PADDLE_ENFORCE_CUDA_SUCCESS( + PADDLE_ENFORCE_GPU_SUCCESS( platform::dynload::cudnnCreateTensorDescriptor(&in_param_desc_)); #endif if (epsilon <= CUDNN_BN_MIN_EPSILON - FLT_EPSILON) { @@ -143,20 +138,19 @@ class InstanceNormKernel auto &dev_ctx = ctx.template device_context(); #ifdef PADDLE_WITH_HIP - PADDLE_ENFORCE_CUDA_SUCCESS(platform::dynload::miopenSetTensorDescriptor( + PADDLE_ENFORCE_GPU_SUCCESS(platform::dynload::miopenSetTensorDescriptor( data_desc_, CudnnDataType::type, x_dims.size() > 3 ? x_dims.size() : 4, const_cast(dims.data()), const_cast(strides.data()))); - PADDLE_ENFORCE_CUDA_SUCCESS( + PADDLE_ENFORCE_GPU_SUCCESS( platform::dynload::miopenDeriveBNTensorDescriptor( in_param_desc_, data_desc_, miopenBNSpatial)); #else - PADDLE_ENFORCE_CUDA_SUCCESS(platform::dynload::cudnnSetTensorNdDescriptor( + PADDLE_ENFORCE_GPU_SUCCESS(platform::dynload::cudnnSetTensorNdDescriptor( data_desc_, CudnnDataType::type, x_dims.size() > 3 ? x_dims.size() : 4, dims.data(), strides.data())); - PADDLE_ENFORCE_CUDA_SUCCESS( - platform::dynload::cudnnDeriveBNTensorDescriptor( - in_param_desc_, data_desc_, CUDNN_BATCHNORM_SPATIAL)); + PADDLE_ENFORCE_GPU_SUCCESS(platform::dynload::cudnnDeriveBNTensorDescriptor( + in_param_desc_, data_desc_, CUDNN_BATCHNORM_SPATIAL)); #endif const auto *scale = ctx.Input("Scale"); @@ -202,7 +196,7 @@ class InstanceNormKernel functor(dev_ctx, saved_variance, static_cast>(0)); #ifdef PADDLE_WITH_HIP - PADDLE_ENFORCE_CUDA_SUCCESS( + PADDLE_ENFORCE_GPU_SUCCESS( platform::dynload::miopenBatchNormalizationForwardTraining( handle, miopenBNSpatial, const_cast( @@ -225,12 +219,12 @@ class InstanceNormKernel saved_variance->template mutable_data>( ctx.GetPlace())))); - PADDLE_ENFORCE_CUDA_SUCCESS( + PADDLE_ENFORCE_GPU_SUCCESS( platform::dynload::miopenDestroyTensorDescriptor(data_desc_)); - PADDLE_ENFORCE_CUDA_SUCCESS( + PADDLE_ENFORCE_GPU_SUCCESS( platform::dynload::miopenDestroyTensorDescriptor(in_param_desc_)); #else - PADDLE_ENFORCE_CUDA_SUCCESS( + PADDLE_ENFORCE_GPU_SUCCESS( platform::dynload::cudnnBatchNormalizationForwardTraining( handle, CUDNN_BATCHNORM_SPATIAL, CudnnDataType::kOne(), CudnnDataType::kZero(), data_desc_, x_tmp.template data(), @@ -243,9 +237,9 @@ class InstanceNormKernel saved_variance->template mutable_data>( ctx.GetPlace()))); - PADDLE_ENFORCE_CUDA_SUCCESS( + PADDLE_ENFORCE_GPU_SUCCESS( platform::dynload::cudnnDestroyTensorDescriptor(data_desc_)); - PADDLE_ENFORCE_CUDA_SUCCESS( + PADDLE_ENFORCE_GPU_SUCCESS( platform::dynload::cudnnDestroyTensorDescriptor(in_param_desc_)); #endif } @@ -396,17 +390,17 @@ class InstanceNormGradKernel miopenTensorDescriptor_t data_desc_; miopenTensorDescriptor_t in_param_desc_; - PADDLE_ENFORCE_CUDA_SUCCESS( + PADDLE_ENFORCE_GPU_SUCCESS( platform::dynload::miopenCreateTensorDescriptor(&data_desc_)); - PADDLE_ENFORCE_CUDA_SUCCESS( + PADDLE_ENFORCE_GPU_SUCCESS( platform::dynload::miopenCreateTensorDescriptor(&in_param_desc_)); #else cudnnTensorDescriptor_t data_desc_; cudnnTensorDescriptor_t in_param_desc_; - PADDLE_ENFORCE_CUDA_SUCCESS( + PADDLE_ENFORCE_GPU_SUCCESS( platform::dynload::cudnnCreateTensorDescriptor(&data_desc_)); - PADDLE_ENFORCE_CUDA_SUCCESS( + PADDLE_ENFORCE_GPU_SUCCESS( platform::dynload::cudnnCreateTensorDescriptor(&in_param_desc_)); #endif @@ -418,20 +412,19 @@ class InstanceNormGradKernel epsilon = std::max(epsilon, CUDNN_BN_MIN_EPSILON); #ifdef PADDLE_WITH_HIP - PADDLE_ENFORCE_CUDA_SUCCESS(platform::dynload::miopenSetTensorDescriptor( + PADDLE_ENFORCE_GPU_SUCCESS(platform::dynload::miopenSetTensorDescriptor( data_desc_, CudnnDataType::type, x_dims.size() > 3 ? x_dims.size() : 4, const_cast(dims.data()), const_cast(strides.data()))); - PADDLE_ENFORCE_CUDA_SUCCESS( + PADDLE_ENFORCE_GPU_SUCCESS( platform::dynload::miopenDeriveBNTensorDescriptor( in_param_desc_, data_desc_, miopenBNSpatial)); #else - PADDLE_ENFORCE_CUDA_SUCCESS(platform::dynload::cudnnSetTensorNdDescriptor( + PADDLE_ENFORCE_GPU_SUCCESS(platform::dynload::cudnnSetTensorNdDescriptor( data_desc_, CudnnDataType::type, x_dims.size() > 3 ? x_dims.size() : 4, dims.data(), strides.data())); - PADDLE_ENFORCE_CUDA_SUCCESS( - platform::dynload::cudnnDeriveBNTensorDescriptor( - in_param_desc_, data_desc_, CUDNN_BATCHNORM_SPATIAL)); + PADDLE_ENFORCE_GPU_SUCCESS(platform::dynload::cudnnDeriveBNTensorDescriptor( + in_param_desc_, data_desc_, CUDNN_BATCHNORM_SPATIAL)); #endif const auto *saved_mean = ctx.Input("SavedMean"); @@ -442,7 +435,7 @@ class InstanceNormGradKernel saved_var->template data>(); if (d_scale && d_bias) { #ifdef PADDLE_WITH_HIP - PADDLE_ENFORCE_CUDA_SUCCESS( + PADDLE_ENFORCE_GPU_SUCCESS( platform::dynload::miopenBatchNormalizationBackward( dev_ctx.cudnn_handle(), miopenBNSpatial, CudnnDataType::kOne(), CudnnDataType::kZero(), CudnnDataType::kOne(), @@ -456,7 +449,7 @@ class InstanceNormGradKernel ctx.GetPlace()), epsilon, saved_mean_data, saved_var_data)); #else - PADDLE_ENFORCE_CUDA_SUCCESS( + PADDLE_ENFORCE_GPU_SUCCESS( platform::dynload::cudnnBatchNormalizationBackward( dev_ctx.cudnn_handle(), CUDNN_BATCHNORM_SPATIAL, CudnnDataType::kOne(), CudnnDataType::kZero(), @@ -487,14 +480,14 @@ class InstanceNormGradKernel } #ifdef PADDLE_WITH_HIP - PADDLE_ENFORCE_CUDA_SUCCESS( + PADDLE_ENFORCE_GPU_SUCCESS( platform::dynload::miopenDestroyTensorDescriptor(data_desc_)); - PADDLE_ENFORCE_CUDA_SUCCESS( + PADDLE_ENFORCE_GPU_SUCCESS( platform::dynload::miopenDestroyTensorDescriptor(in_param_desc_)); #else - PADDLE_ENFORCE_CUDA_SUCCESS( + PADDLE_ENFORCE_GPU_SUCCESS( platform::dynload::cudnnDestroyTensorDescriptor(data_desc_)); - PADDLE_ENFORCE_CUDA_SUCCESS( + PADDLE_ENFORCE_GPU_SUCCESS( platform::dynload::cudnnDestroyTensorDescriptor(in_param_desc_)); #endif } diff --git a/paddle/fluid/operators/interpolate_op.cu b/paddle/fluid/operators/interpolate_op.cu index 6be7dbdc110d526048c46f9f0b900ccd45d9dd62..3c857eb326ace4d3afd3b89f150ed24215d0094a 100644 --- a/paddle/fluid/operators/interpolate_op.cu +++ b/paddle/fluid/operators/interpolate_op.cu @@ -12,8 +12,8 @@ #include #include #include "paddle/fluid/operators/interpolate_op.h" -#include "paddle/fluid/platform/cuda_primitives.h" -#include "paddle/fluid/platform/gpu_launch_config.h" +#include "paddle/fluid/platform/device/gpu/gpu_launch_config.h" +#include "paddle/fluid/platform/device/gpu/gpu_primitives.h" namespace paddle { namespace operators { diff --git a/paddle/fluid/operators/interpolate_v2_op.cu b/paddle/fluid/operators/interpolate_v2_op.cu index fe9228135606dc4265e3c4c88e4e46078296ef70..bc1ab704aafe3a31bc4f9ebc55a963ba9288035d 100644 --- a/paddle/fluid/operators/interpolate_v2_op.cu +++ b/paddle/fluid/operators/interpolate_v2_op.cu @@ -13,9 +13,9 @@ #include #include "paddle/fluid/operators/interpolate_v2_op.h" #include "paddle/fluid/operators/math/math_cuda_utils.h" -#include "paddle/fluid/platform/cuda_device_function.h" -#include "paddle/fluid/platform/cuda_primitives.h" -#include "paddle/fluid/platform/gpu_launch_config.h" +#include "paddle/fluid/platform/device/gpu/gpu_device_function.h" +#include "paddle/fluid/platform/device/gpu/gpu_launch_config.h" +#include "paddle/fluid/platform/device/gpu/gpu_primitives.h" namespace paddle { namespace operators { diff --git a/paddle/fluid/operators/kernel_primitives/compute_primitives.h b/paddle/fluid/operators/kernel_primitives/compute_primitives.h index 73316d66b6cf26a08e9a19105a1c38fb1875c123..2320b9e0b2fbf47610365155558b869bd5d77b38 100644 --- a/paddle/fluid/operators/kernel_primitives/compute_primitives.h +++ b/paddle/fluid/operators/kernel_primitives/compute_primitives.h @@ -21,7 +21,7 @@ #include #endif -#include "paddle/fluid/platform/cuda_device_function.h" +#include "paddle/fluid/platform/device/gpu/gpu_device_function.h" #include "paddle/fluid/platform/float16.h" namespace paddle { diff --git a/paddle/fluid/operators/layer_norm_kernel.cu.h b/paddle/fluid/operators/layer_norm_kernel.cu.h index 4280c86ca99ab80754a0adbd29cb339b05916787..3656bd1a181671bcdc853267135b623df3238f20 100644 --- a/paddle/fluid/operators/layer_norm_kernel.cu.h +++ b/paddle/fluid/operators/layer_norm_kernel.cu.h @@ -23,13 +23,8 @@ namespace cub = hipcub; #endif #include "paddle/fluid/framework/ddim.h" -#include "paddle/fluid/platform/cuda_device_function.h" -#ifdef PADDLE_WITH_CUDA -#include "paddle/fluid/platform/cudnn_helper.h" -#endif -#ifdef PADDLE_WITH_HIP -#include "paddle/fluid/platform/miopen_helper.h" -#endif +#include "paddle/fluid/platform/device/gpu/gpu_device_function.h" +#include "paddle/fluid/platform/device/gpu/gpu_dnn.h" namespace paddle { namespace operators { diff --git a/paddle/fluid/operators/linspace_op.cu b/paddle/fluid/operators/linspace_op.cu index a4f0693323297c286d24b169f1120e4017992a9b..4bf2a7cb372cb72fa4172091a5cba1d8c343029d 100644 --- a/paddle/fluid/operators/linspace_op.cu +++ b/paddle/fluid/operators/linspace_op.cu @@ -15,7 +15,7 @@ limitations under the License. */ #include "paddle/fluid/framework/data_type_transform.h" #include "paddle/fluid/framework/op_registry.h" #include "paddle/fluid/operators/linspace_op.h" -#include "paddle/fluid/platform/cuda_primitives.h" +#include "paddle/fluid/platform/device/gpu/gpu_primitives.h" namespace paddle { namespace operators { diff --git a/paddle/fluid/operators/lite/lite_engine_op.h b/paddle/fluid/operators/lite/lite_engine_op.h index ec9f5dd95d4d0f83f131fee915369941e8e55f4d..5d2a1683d381b40f9344a379618a61afc3e728d3 100644 --- a/paddle/fluid/operators/lite/lite_engine_op.h +++ b/paddle/fluid/operators/lite/lite_engine_op.h @@ -26,7 +26,7 @@ limitations under the License. */ #include "paddle/fluid/framework/op_registry.h" #include "paddle/fluid/framework/operator.h" #include "paddle/fluid/inference/analysis/helper.h" -#include "paddle/fluid/platform/gpu_info.h" +#include "paddle/fluid/platform/device/gpu/gpu_info.h" #include "paddle/fluid/inference/lite/engine.h" #include "paddle/fluid/inference/lite/tensor_utils.h" diff --git a/paddle/fluid/operators/log_softmax_op.cu b/paddle/fluid/operators/log_softmax_op.cu index 7c47ad90502ebd1f1aa0524110c501f38034b936..6676cde1cafcabfcaee325bafe3be3703fe1a0a2 100644 --- a/paddle/fluid/operators/log_softmax_op.cu +++ b/paddle/fluid/operators/log_softmax_op.cu @@ -16,7 +16,7 @@ #include "paddle/fluid/operators/amp/fp16_type_traits.h" #include "paddle/fluid/operators/log_softmax_op.h" #include "paddle/fluid/operators/math/functors.h" -#include "paddle/fluid/platform/cuda_device_function.h" +#include "paddle/fluid/platform/device/gpu/gpu_device_function.h" namespace paddle { namespace operators { diff --git a/paddle/fluid/operators/lookup_table_op.cu b/paddle/fluid/operators/lookup_table_op.cu index 3edea025b2a0440f4fe4f64be7959df3332f3b99..5aa546cbcc21aead148dba6cd21e42673f5fb9b3 100644 --- a/paddle/fluid/operators/lookup_table_op.cu +++ b/paddle/fluid/operators/lookup_table_op.cu @@ -15,7 +15,7 @@ limitations under the License. */ #include "paddle/fluid/framework/eigen.h" #include "paddle/fluid/framework/op_registry.h" #include "paddle/fluid/operators/lookup_table_op.h" -#include "paddle/fluid/platform/cuda_primitives.h" +#include "paddle/fluid/platform/device/gpu/gpu_primitives.h" #include "paddle/fluid/platform/float16.h" namespace paddle { diff --git a/paddle/fluid/operators/lookup_table_v2_op.cu b/paddle/fluid/operators/lookup_table_v2_op.cu index 493966ecda7bda2d968b1516cebdafdef58d8830..317f9eeb94f39ac1bb51dc82bec970b7aa41675e 100644 --- a/paddle/fluid/operators/lookup_table_v2_op.cu +++ b/paddle/fluid/operators/lookup_table_v2_op.cu @@ -15,7 +15,7 @@ limitations under the License. */ #include "paddle/fluid/framework/eigen.h" #include "paddle/fluid/framework/op_registry.h" #include "paddle/fluid/operators/lookup_table_v2_op.h" -#include "paddle/fluid/platform/cuda_primitives.h" +#include "paddle/fluid/platform/device/gpu/gpu_primitives.h" #include "paddle/fluid/platform/float16.h" namespace paddle { diff --git a/paddle/fluid/operators/margin_cross_entropy_op.cu b/paddle/fluid/operators/margin_cross_entropy_op.cu index 7c5e64d2afa46a9a57fb8411c0592cd567d7cd8d..1deaa3ef1ee7c6d212c63bcc06fefade4714d8a2 100644 --- a/paddle/fluid/operators/margin_cross_entropy_op.cu +++ b/paddle/fluid/operators/margin_cross_entropy_op.cu @@ -30,7 +30,7 @@ namespace cub = hipcub; #if defined(PADDLE_WITH_NCCL) || defined(PADDLE_WITH_RCCL) #include "paddle/fluid/platform/collective_helper.h" -#include "paddle/fluid/platform/nccl_helper.h" +#include "paddle/fluid/platform/device/gpu/nccl_helper.h" #endif namespace paddle { @@ -69,7 +69,7 @@ void GetClassInterval(const gpuStream_t& stream, const platform::Place& place, platform::DeviceContextPool::Instance().Get(place)) ->stream(); - PADDLE_ENFORCE_CUDA_SUCCESS(platform::dynload::ncclAllReduce( + PADDLE_ENFORCE_GPU_SUCCESS(platform::dynload::ncclAllReduce( num_classes_per_device_ptr, num_classes_per_device_ptr, num_classes_per_device.numel(), platform::ToNCCLDataType(num_classes_per_device.type()), ncclSum, @@ -314,7 +314,7 @@ class MarginCrossEntropyOpCUDAKernel : public framework::OpKernel { #if defined(PADDLE_WITH_NCCL) || defined(PADDLE_WITH_RCCL) if (nranks > 1) { - PADDLE_ENFORCE_CUDA_SUCCESS(platform::dynload::ncclAllReduce( + PADDLE_ENFORCE_GPU_SUCCESS(platform::dynload::ncclAllReduce( logits_max_buff, logits_max_buff, logits_max.numel(), platform::ToNCCLDataType(logits_max.type()), ncclMax, comm->comm(), stream)); @@ -335,7 +335,7 @@ class MarginCrossEntropyOpCUDAKernel : public framework::OpKernel { #if defined(PADDLE_WITH_NCCL) || defined(PADDLE_WITH_RCCL) if (nranks > 1) { - PADDLE_ENFORCE_CUDA_SUCCESS(platform::dynload::ncclAllReduce( + PADDLE_ENFORCE_GPU_SUCCESS(platform::dynload::ncclAllReduce( sum_exp_logits_buff, sum_exp_logits_buff, sum_exp_logits.numel(), platform::ToNCCLDataType(sum_exp_logits.type()), ncclSum, comm->comm(), stream)); @@ -368,7 +368,7 @@ class MarginCrossEntropyOpCUDAKernel : public framework::OpKernel { #if defined(PADDLE_WITH_NCCL) || defined(PADDLE_WITH_RCCL) if (nranks > 1) { - PADDLE_ENFORCE_CUDA_SUCCESS(platform::dynload::ncclAllReduce( + PADDLE_ENFORCE_GPU_SUCCESS(platform::dynload::ncclAllReduce( loss_ptr, loss_ptr, loss->numel(), platform::ToNCCLDataType(loss->type()), ncclSum, comm->comm(), stream)); diff --git a/paddle/fluid/operators/math/beam_search.cu b/paddle/fluid/operators/math/beam_search.cu index ed3ead47d171efb4128a294c7d7a24324c7187b7..0cc552d34c5872165728c9186ff8fcb7d496627c 100644 --- a/paddle/fluid/operators/math/beam_search.cu +++ b/paddle/fluid/operators/math/beam_search.cu @@ -13,7 +13,7 @@ See the License for the specific language governing permissions and limitations under the License. */ #include "paddle/fluid/operators/math/beam_search.h" -#include "paddle/fluid/platform/cuda_device_function.h" +#include "paddle/fluid/platform/device/gpu/gpu_device_function.h" namespace paddle { namespace operators { diff --git a/paddle/fluid/operators/math/blas_impl.cu.h b/paddle/fluid/operators/math/blas_impl.cu.h index 70c6cf9dcab03619a5ed8c57036b3f03365da7a7..92162e639ff860d504c07f594c9b3b23a6ac437e 100644 --- a/paddle/fluid/operators/math/blas_impl.cu.h +++ b/paddle/fluid/operators/math/blas_impl.cu.h @@ -17,7 +17,7 @@ #include "paddle/fluid/operators/math/math_function.h" #include "paddle/fluid/platform/dynload/cublas.h" -#include "paddle/fluid/platform/gpu_info.h" +#include "paddle/fluid/platform/device/gpu/gpu_info.h" DECLARE_bool(enable_cublas_tensor_op_math); @@ -32,33 +32,33 @@ template <> struct CUBlas { template static void GEMM(ARGS... args) { - PADDLE_ENFORCE_CUDA_SUCCESS(platform::dynload::cublasSgemm(args...)); + PADDLE_ENFORCE_GPU_SUCCESS(platform::dynload::cublasSgemm(args...)); } template static void AXPY(ARGS... args) { - PADDLE_ENFORCE_CUDA_SUCCESS(platform::dynload::cublasSaxpy(args...)); + PADDLE_ENFORCE_GPU_SUCCESS(platform::dynload::cublasSaxpy(args...)); } template static void SCAL(ARGS... args) { - PADDLE_ENFORCE_CUDA_SUCCESS(platform::dynload::cublasSscal(args...)); + PADDLE_ENFORCE_GPU_SUCCESS(platform::dynload::cublasSscal(args...)); } template static void VCOPY(ARGS... args) { - PADDLE_ENFORCE_CUDA_SUCCESS(platform::dynload::cublasScopy(args...)); + PADDLE_ENFORCE_GPU_SUCCESS(platform::dynload::cublasScopy(args...)); } template static void GEMV(ARGS... args) { - PADDLE_ENFORCE_CUDA_SUCCESS(platform::dynload::cublasSgemv(args...)); + PADDLE_ENFORCE_GPU_SUCCESS(platform::dynload::cublasSgemv(args...)); } template static void GEMM_STRIDED_BATCH(ARGS... args) { #if CUDA_VERSION >= 8000 - PADDLE_ENFORCE_CUDA_SUCCESS( + PADDLE_ENFORCE_GPU_SUCCESS( platform::dynload::cublasSgemmStridedBatched(args...)); #else PADDLE_THROW(platform::errors::Unimplemented( @@ -82,7 +82,7 @@ struct CUBlas { VLOG(5) << "use_tensor_op_math: " << (dev_ctx->tensor_core_available() ? "True" : "False"); dev_ctx->TensorCoreCublasCallIfAvailable([&](cublasHandle_t handle) { - PADDLE_ENFORCE_CUDA_SUCCESS(platform::dynload::cublasSgemmEx( + PADDLE_ENFORCE_GPU_SUCCESS(platform::dynload::cublasSgemmEx( handle, transa, transb, m, n, k, alpha, A, Atype, lda, B, Btype, ldb, beta, C, Ctype, ldc)); }); @@ -94,36 +94,33 @@ struct CUBlas { template static void TRSM(ARGS... args) { - PADDLE_ENFORCE_CUDA_SUCCESS(platform::dynload::cublasStrsm(args...)); + PADDLE_ENFORCE_GPU_SUCCESS(platform::dynload::cublasStrsm(args...)); } template static void GETRF_BATCH(ARGS... args) { - PADDLE_ENFORCE_CUDA_SUCCESS( - platform::dynload::cublasSgetrfBatched(args...)); + PADDLE_ENFORCE_GPU_SUCCESS(platform::dynload::cublasSgetrfBatched(args...)); } template static void GETRI_BATCH(ARGS... args) { - PADDLE_ENFORCE_CUDA_SUCCESS( - platform::dynload::cublasSgetriBatched(args...)); + PADDLE_ENFORCE_GPU_SUCCESS(platform::dynload::cublasSgetriBatched(args...)); } template static void MATINV_BATCH(ARGS... args) { - PADDLE_ENFORCE_CUDA_SUCCESS( + PADDLE_ENFORCE_GPU_SUCCESS( platform::dynload::cublasSmatinvBatched(args...)); } template static void GETRS_BATCH(ARGS... args) { - PADDLE_ENFORCE_CUDA_SUCCESS( - platform::dynload::cublasSgetrsBatched(args...)); + PADDLE_ENFORCE_GPU_SUCCESS(platform::dynload::cublasSgetrsBatched(args...)); } template static void TRSM_BATCH(ARGS... args) { - PADDLE_ENFORCE_CUDA_SUCCESS(platform::dynload::cublasStrsmBatched(args...)); + PADDLE_ENFORCE_GPU_SUCCESS(platform::dynload::cublasStrsmBatched(args...)); } }; @@ -131,33 +128,33 @@ template <> struct CUBlas { template static void GEMM(ARGS... args) { - PADDLE_ENFORCE_CUDA_SUCCESS(platform::dynload::cublasDgemm(args...)); + PADDLE_ENFORCE_GPU_SUCCESS(platform::dynload::cublasDgemm(args...)); } template static void AXPY(ARGS... args) { - PADDLE_ENFORCE_CUDA_SUCCESS(platform::dynload::cublasDaxpy(args...)); + PADDLE_ENFORCE_GPU_SUCCESS(platform::dynload::cublasDaxpy(args...)); } template static void SCAL(ARGS... args) { - PADDLE_ENFORCE_CUDA_SUCCESS(platform::dynload::cublasDscal(args...)); + PADDLE_ENFORCE_GPU_SUCCESS(platform::dynload::cublasDscal(args...)); } template static void VCOPY(ARGS... args) { - PADDLE_ENFORCE_CUDA_SUCCESS(platform::dynload::cublasDcopy(args...)); + PADDLE_ENFORCE_GPU_SUCCESS(platform::dynload::cublasDcopy(args...)); } template static void GEMV(ARGS... args) { - PADDLE_ENFORCE_CUDA_SUCCESS(platform::dynload::cublasDgemv(args...)); + PADDLE_ENFORCE_GPU_SUCCESS(platform::dynload::cublasDgemv(args...)); } template static void GEMM_STRIDED_BATCH(ARGS... args) { #if CUDA_VERSION >= 8000 - PADDLE_ENFORCE_CUDA_SUCCESS( + PADDLE_ENFORCE_GPU_SUCCESS( platform::dynload::cublasDgemmStridedBatched(args...)); #else PADDLE_THROW(platform::errors::Unimplemented( @@ -173,36 +170,33 @@ struct CUBlas { template static void TRSM(ARGS... args) { - PADDLE_ENFORCE_CUDA_SUCCESS(platform::dynload::cublasDtrsm(args...)); + PADDLE_ENFORCE_GPU_SUCCESS(platform::dynload::cublasDtrsm(args...)); } template static void GETRF_BATCH(ARGS... args) { - PADDLE_ENFORCE_CUDA_SUCCESS( - platform::dynload::cublasDgetrfBatched(args...)); + PADDLE_ENFORCE_GPU_SUCCESS(platform::dynload::cublasDgetrfBatched(args...)); } template static void GETRI_BATCH(ARGS... args) { - PADDLE_ENFORCE_CUDA_SUCCESS( - platform::dynload::cublasDgetriBatched(args...)); + PADDLE_ENFORCE_GPU_SUCCESS(platform::dynload::cublasDgetriBatched(args...)); } template static void MATINV_BATCH(ARGS... args) { - PADDLE_ENFORCE_CUDA_SUCCESS( + PADDLE_ENFORCE_GPU_SUCCESS( platform::dynload::cublasDmatinvBatched(args...)); } template static void GETRS_BATCH(ARGS... args) { - PADDLE_ENFORCE_CUDA_SUCCESS( - platform::dynload::cublasDgetrsBatched(args...)); + PADDLE_ENFORCE_GPU_SUCCESS(platform::dynload::cublasDgetrsBatched(args...)); } template static void TRSM_BATCH(ARGS... args) { - PADDLE_ENFORCE_CUDA_SUCCESS(platform::dynload::cublasDtrsmBatched(args...)); + PADDLE_ENFORCE_GPU_SUCCESS(platform::dynload::cublasDtrsmBatched(args...)); } }; @@ -215,7 +209,7 @@ struct CUBlas { const float16 *alpha, const float16 *A, int lda, const float16 *B, int ldb, const float16 *beta, float16 *C, int ldc) { - PADDLE_ENFORCE_CUDA_SUCCESS( + PADDLE_ENFORCE_GPU_SUCCESS( platform::dynload::cublasHgemm(handle, transa, transb, m, n, k, reinterpret_cast(alpha), reinterpret_cast(A), lda, @@ -235,7 +229,7 @@ struct CUBlas { long long int strideC, // NOLINT int batchCount) { #if CUDA_VERSION >= 8000 - PADDLE_ENFORCE_CUDA_SUCCESS(platform::dynload::cublasHgemmStridedBatched( + PADDLE_ENFORCE_GPU_SUCCESS(platform::dynload::cublasHgemmStridedBatched( handle, transa, transb, m, n, k, reinterpret_cast(alpha), reinterpret_cast(A), lda, strideA, @@ -270,7 +264,7 @@ struct CUBlas { #endif // CUDA_VERSION >= 9000 dev_ctx->TensorCoreCublasCallIfAvailable([&](cublasHandle_t handle) { - PADDLE_ENFORCE_CUDA_SUCCESS(platform::dynload::cublasGemmEx( + PADDLE_ENFORCE_GPU_SUCCESS(platform::dynload::cublasGemmEx( handle, transa, transb, m, n, k, alpha, A, Atype, lda, B, Btype, ldb, beta, C, Ctype, ldc, computeType, algo)); }); @@ -289,7 +283,7 @@ struct CUBlas> { const platform::complex *B, int ldb, const platform::complex *beta, platform::complex *C, int ldc) { - PADDLE_ENFORCE_CUDA_SUCCESS(platform::dynload::cublasCgemv( + PADDLE_ENFORCE_GPU_SUCCESS(platform::dynload::cublasCgemv( handle, transa, m, n, reinterpret_cast(alpha), reinterpret_cast(A), lda, reinterpret_cast(B), ldb, @@ -301,7 +295,7 @@ struct CUBlas> { const platform::complex *alpha, const platform::complex *X, const int incX, platform::complex *Y, const int incY) { - PADDLE_ENFORCE_CUDA_SUCCESS(platform::dynload::cublasCaxpy( + PADDLE_ENFORCE_GPU_SUCCESS(platform::dynload::cublasCaxpy( handle, n, reinterpret_cast(alpha), reinterpret_cast(X), incX, reinterpret_cast(Y), incY)); @@ -320,7 +314,7 @@ struct CUBlas> { long long int strideC, // NOLINT int batchCount) { #if CUDA_VERSION >= 8000 - PADDLE_ENFORCE_CUDA_SUCCESS(platform::dynload::cublasCgemmStridedBatched( + PADDLE_ENFORCE_GPU_SUCCESS(platform::dynload::cublasCgemmStridedBatched( handle, transa, transb, m, n, k, reinterpret_cast(alpha), reinterpret_cast(A), lda, strideA, @@ -340,7 +334,7 @@ struct CUBlas> { const platform::complex *B, int ldb, const platform::complex *beta, platform::complex *C, int ldc) { - PADDLE_ENFORCE_CUDA_SUCCESS(platform::dynload::cublasCgemm( + PADDLE_ENFORCE_GPU_SUCCESS(platform::dynload::cublasCgemm( handle, transa, transb, m, n, k, reinterpret_cast(alpha), reinterpret_cast(A), lda, @@ -355,7 +349,7 @@ struct CUBlas> { const paddle::platform::complex *alpha, const paddle::platform::complex *A, int lda, paddle::platform::complex *B, int ldb) { - PADDLE_ENFORCE_CUDA_SUCCESS(platform::dynload::cublasCtrsm( + PADDLE_ENFORCE_GPU_SUCCESS(platform::dynload::cublasCtrsm( handle, side, uplo, transa, diag, m, n, reinterpret_cast(alpha), reinterpret_cast(A), lda, @@ -384,7 +378,7 @@ struct CUBlas> { #endif // CUDA_VERSION >= 9000 dev_ctx->TensorCoreCublasCallIfAvailable([&](cublasHandle_t handle) { - PADDLE_ENFORCE_CUDA_SUCCESS(platform::dynload::cublasGemmEx( + PADDLE_ENFORCE_GPU_SUCCESS(platform::dynload::cublasGemmEx( handle, transa, transb, m, n, k, alpha, A, Atype, lda, B, Btype, ldb, beta, C, Ctype, ldc, computeType, algo)); }); @@ -401,7 +395,7 @@ struct CUBlas> { const paddle::platform::complex **A, int lda, paddle::platform::complex **B, int ldb, int batch_size) { - PADDLE_ENFORCE_CUDA_SUCCESS(platform::dynload::cublasCtrsmBatched( + PADDLE_ENFORCE_GPU_SUCCESS(platform::dynload::cublasCtrsmBatched( handle, side, uplo, transa, diag, m, n, reinterpret_cast(alpha), reinterpret_cast(A), lda, @@ -417,7 +411,7 @@ struct CUBlas> { const platform::complex *B, int ldb, const platform::complex *beta, platform::complex *C, int ldc) { - PADDLE_ENFORCE_CUDA_SUCCESS(platform::dynload::cublasZgemv( + PADDLE_ENFORCE_GPU_SUCCESS(platform::dynload::cublasZgemv( handle, transa, m, n, reinterpret_cast(alpha), reinterpret_cast(A), lda, reinterpret_cast(B), ldb, @@ -429,7 +423,7 @@ struct CUBlas> { const platform::complex *alpha, const platform::complex *X, const int incX, platform::complex *Y, const int incY) { - PADDLE_ENFORCE_CUDA_SUCCESS(platform::dynload::cublasZaxpy( + PADDLE_ENFORCE_GPU_SUCCESS(platform::dynload::cublasZaxpy( handle, n, reinterpret_cast(alpha), reinterpret_cast(X), incX, reinterpret_cast(Y), incY)); @@ -448,7 +442,7 @@ struct CUBlas> { long long int strideC, // NOLINT int batchCount) { #if CUDA_VERSION >= 8000 - PADDLE_ENFORCE_CUDA_SUCCESS(platform::dynload::cublasZgemmStridedBatched( + PADDLE_ENFORCE_GPU_SUCCESS(platform::dynload::cublasZgemmStridedBatched( handle, transa, transb, m, n, k, reinterpret_cast(alpha), reinterpret_cast(A), lda, strideA, @@ -468,7 +462,7 @@ struct CUBlas> { const platform::complex *B, int ldb, const platform::complex *beta, platform::complex *C, int ldc) { - PADDLE_ENFORCE_CUDA_SUCCESS(platform::dynload::cublasZgemm( + PADDLE_ENFORCE_GPU_SUCCESS(platform::dynload::cublasZgemm( handle, transa, transb, m, n, k, reinterpret_cast(alpha), reinterpret_cast(A), lda, @@ -483,7 +477,7 @@ struct CUBlas> { const paddle::platform::complex *alpha, const paddle::platform::complex *A, int lda, paddle::platform::complex *B, int ldb) { - PADDLE_ENFORCE_CUDA_SUCCESS(platform::dynload::cublasZtrsm( + PADDLE_ENFORCE_GPU_SUCCESS(platform::dynload::cublasZtrsm( handle, side, uplo, transa, diag, m, n, reinterpret_cast(alpha), reinterpret_cast(A), lda, @@ -497,7 +491,7 @@ struct CUBlas> { const paddle::platform::complex **A, int lda, paddle::platform::complex **B, int ldb, int batch_size) { - PADDLE_ENFORCE_CUDA_SUCCESS(platform::dynload::cublasZtrsmBatched( + PADDLE_ENFORCE_GPU_SUCCESS(platform::dynload::cublasZtrsmBatched( handle, side, uplo, transa, diag, m, n, reinterpret_cast(alpha), reinterpret_cast(A), lda, @@ -526,7 +520,7 @@ struct CUBlas> { #endif // CUDA_VERSION >= 9000 dev_ctx->TensorCoreCublasCallIfAvailable([&](cublasHandle_t handle) { - PADDLE_ENFORCE_CUDA_SUCCESS(platform::dynload::cublasGemmEx( + PADDLE_ENFORCE_GPU_SUCCESS(platform::dynload::cublasGemmEx( handle, transa, transb, m, n, k, alpha, A, Atype, lda, B, Btype, ldb, beta, C, Ctype, ldc, computeType, algo)); }); @@ -842,7 +836,7 @@ void Blas::BatchedGEMM( auto fp = std::is_same::value ? CUDA_R_32F : CUDA_R_16F; context_.TensorCoreCublasCallIfAvailable([&](cublasHandle_t handle) { - PADDLE_ENFORCE_CUDA_SUCCESS(platform::dynload::cublasGemmStridedBatchedEx( + PADDLE_ENFORCE_GPU_SUCCESS(platform::dynload::cublasGemmStridedBatchedEx( handle, cuTransB, cuTransA, N, M, K, &alpha, B, fp, ldb, strideB, A, fp, lda, strideA, &beta, C, fp, ldc, strideC, batchCount, fp, algo)); }); diff --git a/paddle/fluid/operators/math/blas_impl.hip.h b/paddle/fluid/operators/math/blas_impl.hip.h index f972d38adda5fbb2e507af4936546a19de4cdd41..32479189eea58d59752e47c75669c3aa1f700949 100644 --- a/paddle/fluid/operators/math/blas_impl.hip.h +++ b/paddle/fluid/operators/math/blas_impl.hip.h @@ -15,8 +15,8 @@ #pragma once #include "paddle/fluid/operators/math/math_function.h" +#include "paddle/fluid/platform/device/gpu/gpu_info.h" #include "paddle/fluid/platform/dynload/rocblas.h" -#include "paddle/fluid/platform/gpu_info.h" DECLARE_bool(enable_cublas_tensor_op_math); @@ -31,32 +31,32 @@ template <> struct CUBlas { template static void GEMM(ARGS... args) { - PADDLE_ENFORCE_CUDA_SUCCESS(platform::dynload::rocblas_sgemm(args...)); + PADDLE_ENFORCE_GPU_SUCCESS(platform::dynload::rocblas_sgemm(args...)); } template static void AXPY(ARGS... args) { - PADDLE_ENFORCE_CUDA_SUCCESS(platform::dynload::rocblas_saxpy(args...)); + PADDLE_ENFORCE_GPU_SUCCESS(platform::dynload::rocblas_saxpy(args...)); } template static void SCAL(ARGS... args) { - PADDLE_ENFORCE_CUDA_SUCCESS(platform::dynload::rocblas_sscal(args...)); + PADDLE_ENFORCE_GPU_SUCCESS(platform::dynload::rocblas_sscal(args...)); } template static void VCOPY(ARGS... args) { - PADDLE_ENFORCE_CUDA_SUCCESS(platform::dynload::rocblas_scopy(args...)); + PADDLE_ENFORCE_GPU_SUCCESS(platform::dynload::rocblas_scopy(args...)); } template static void GEMV(ARGS... args) { - PADDLE_ENFORCE_CUDA_SUCCESS(platform::dynload::rocblas_sgemv(args...)); + PADDLE_ENFORCE_GPU_SUCCESS(platform::dynload::rocblas_sgemv(args...)); } template static void GEMM_STRIDED_BATCH(ARGS... args) { - PADDLE_ENFORCE_CUDA_SUCCESS( + PADDLE_ENFORCE_GPU_SUCCESS( platform::dynload::rocblas_sgemm_strided_batched(args...)); } @@ -70,7 +70,7 @@ struct CUBlas { template static void TRSM(ARGS... args) { - PADDLE_ENFORCE_CUDA_SUCCESS(platform::dynload::rocblas_strsm(args...)); + PADDLE_ENFORCE_GPU_SUCCESS(platform::dynload::rocblas_strsm(args...)); } template @@ -102,32 +102,32 @@ template <> struct CUBlas { template static void GEMM(ARGS... args) { - PADDLE_ENFORCE_CUDA_SUCCESS(platform::dynload::rocblas_dgemm(args...)); + PADDLE_ENFORCE_GPU_SUCCESS(platform::dynload::rocblas_dgemm(args...)); } template static void AXPY(ARGS... args) { - PADDLE_ENFORCE_CUDA_SUCCESS(platform::dynload::rocblas_daxpy(args...)); + PADDLE_ENFORCE_GPU_SUCCESS(platform::dynload::rocblas_daxpy(args...)); } template static void SCAL(ARGS... args) { - PADDLE_ENFORCE_CUDA_SUCCESS(platform::dynload::rocblas_dscal(args...)); + PADDLE_ENFORCE_GPU_SUCCESS(platform::dynload::rocblas_dscal(args...)); } template static void VCOPY(ARGS... args) { - PADDLE_ENFORCE_CUDA_SUCCESS(platform::dynload::rocblas_dcopy(args...)); + PADDLE_ENFORCE_GPU_SUCCESS(platform::dynload::rocblas_dcopy(args...)); } template static void GEMV(ARGS... args) { - PADDLE_ENFORCE_CUDA_SUCCESS(platform::dynload::rocblas_dgemv(args...)); + PADDLE_ENFORCE_GPU_SUCCESS(platform::dynload::rocblas_dgemv(args...)); } template static void GEMM_STRIDED_BATCH(ARGS... args) { - PADDLE_ENFORCE_CUDA_SUCCESS( + PADDLE_ENFORCE_GPU_SUCCESS( platform::dynload::rocblas_dgemm_strided_batched(args...)); } @@ -139,7 +139,7 @@ struct CUBlas { template static void TRSM(ARGS... args) { - PADDLE_ENFORCE_CUDA_SUCCESS(platform::dynload::rocblas_dtrsm(args...)); + PADDLE_ENFORCE_GPU_SUCCESS(platform::dynload::rocblas_dtrsm(args...)); } template @@ -176,7 +176,7 @@ struct CUBlas { const float16 *alpha, const float16 *A, int lda, const float16 *B, int ldb, const float16 *beta, float16 *C, int ldc) { - PADDLE_ENFORCE_CUDA_SUCCESS(platform::dynload::rocblas_hgemm( + PADDLE_ENFORCE_GPU_SUCCESS(platform::dynload::rocblas_hgemm( handle, transa, transb, m, n, k, reinterpret_cast(alpha), reinterpret_cast(A), lda, @@ -195,14 +195,13 @@ struct CUBlas { const float16 *beta, float16 *C, int ldc, long long int strideC, // NOLINT int batchCount) { - PADDLE_ENFORCE_CUDA_SUCCESS( - platform::dynload::rocblas_hgemm_strided_batched( - handle, transa, transb, m, n, k, - reinterpret_cast(alpha), - reinterpret_cast(A), lda, strideA, - reinterpret_cast(B), ldb, strideB, - reinterpret_cast(beta), - reinterpret_cast(C), ldc, strideC, batchCount)); + PADDLE_ENFORCE_GPU_SUCCESS(platform::dynload::rocblas_hgemm_strided_batched( + handle, transa, transb, m, n, k, + reinterpret_cast(alpha), + reinterpret_cast(A), lda, strideA, + reinterpret_cast(B), ldb, strideB, + reinterpret_cast(beta), + reinterpret_cast(C), ldc, strideC, batchCount)); } // NOTES: GEMM_EX can use Tensor Core to accelerate matrix multiply. @@ -217,7 +216,7 @@ struct CUBlas { rocblas_datatype computeType) { rocblas_gemm_algo algo = rocblas_gemm_algo_standard; dev_ctx->TensorCoreCublasCallIfAvailable([&](rocblas_handle handle) { - PADDLE_ENFORCE_CUDA_SUCCESS(platform::dynload::rocblas_gemm_ex( + PADDLE_ENFORCE_GPU_SUCCESS(platform::dynload::rocblas_gemm_ex( handle, transa, transb, m, n, k, alpha, A, Atype, lda, B, Btype, ldb, beta, C, Ctype, ldc, C, Ctype, ldc, computeType, algo, 0, 0)); }); @@ -232,7 +231,7 @@ struct CUBlas> { const platform::complex *B, int ldb, const platform::complex *beta, platform::complex *C, int ldc) { - PADDLE_ENFORCE_CUDA_SUCCESS(platform::dynload::rocblas_cgemv( + PADDLE_ENFORCE_GPU_SUCCESS(platform::dynload::rocblas_cgemv( handle, transa, m, n, reinterpret_cast(alpha), reinterpret_cast(A), lda, @@ -245,7 +244,7 @@ struct CUBlas> { const platform::complex *alpha, const platform::complex *X, const int incX, platform::complex *Y, const int incY) { - PADDLE_ENFORCE_CUDA_SUCCESS(platform::dynload::rocblas_caxpy( + PADDLE_ENFORCE_GPU_SUCCESS(platform::dynload::rocblas_caxpy( handle, n, reinterpret_cast(alpha), reinterpret_cast(X), incX, reinterpret_cast(Y), incY)); @@ -263,15 +262,14 @@ struct CUBlas> { platform::complex *C, int ldc, long long int strideC, // NOLINT int batchCount) { - PADDLE_ENFORCE_CUDA_SUCCESS( - platform::dynload::rocblas_cgemm_strided_batched( - handle, transa, transb, m, n, k, - reinterpret_cast(alpha), - reinterpret_cast(A), lda, strideA, - reinterpret_cast(B), ldb, strideB, - reinterpret_cast(beta), - reinterpret_cast(C), ldc, strideC, - batchCount)); + PADDLE_ENFORCE_GPU_SUCCESS(platform::dynload::rocblas_cgemm_strided_batched( + handle, transa, transb, m, n, k, + reinterpret_cast(alpha), + reinterpret_cast(A), lda, strideA, + reinterpret_cast(B), ldb, strideB, + reinterpret_cast(beta), + reinterpret_cast(C), ldc, strideC, + batchCount)); } static void GEMM(rocblas_handle handle, rocblas_operation transa, @@ -281,7 +279,7 @@ struct CUBlas> { const platform::complex *B, int ldb, const platform::complex *beta, platform::complex *C, int ldc) { - PADDLE_ENFORCE_CUDA_SUCCESS(platform::dynload::rocblas_cgemm( + PADDLE_ENFORCE_GPU_SUCCESS(platform::dynload::rocblas_cgemm( handle, transa, transb, m, n, k, reinterpret_cast(alpha), reinterpret_cast(A), lda, @@ -302,7 +300,7 @@ struct CUBlas> { rocblas_datatype computeType) { rocblas_gemm_algo algo = rocblas_gemm_algo_standard; dev_ctx->TensorCoreCublasCallIfAvailable([&](rocblas_handle handle) { - PADDLE_ENFORCE_CUDA_SUCCESS(platform::dynload::rocblas_gemm_ex( + PADDLE_ENFORCE_GPU_SUCCESS(platform::dynload::rocblas_gemm_ex( handle, transa, transb, m, n, k, alpha, A, Atype, lda, B, Btype, ldb, beta, C, Ctype, ldc, C, Ctype, ldc, computeType, algo, 0, 0)); }); @@ -317,7 +315,7 @@ struct CUBlas> { const platform::complex *B, int ldb, const platform::complex *beta, platform::complex *C, int ldc) { - PADDLE_ENFORCE_CUDA_SUCCESS(platform::dynload::rocblas_zgemv( + PADDLE_ENFORCE_GPU_SUCCESS(platform::dynload::rocblas_zgemv( handle, transa, m, n, reinterpret_cast(alpha), reinterpret_cast(A), lda, @@ -330,7 +328,7 @@ struct CUBlas> { const platform::complex *alpha, const platform::complex *X, const int incX, platform::complex *Y, const int incY) { - PADDLE_ENFORCE_CUDA_SUCCESS(platform::dynload::rocblas_zaxpy( + PADDLE_ENFORCE_GPU_SUCCESS(platform::dynload::rocblas_zaxpy( handle, n, reinterpret_cast(alpha), reinterpret_cast(X), incX, reinterpret_cast(Y), incY)); @@ -348,15 +346,14 @@ struct CUBlas> { platform::complex *C, int ldc, long long int strideC, // NOLINT int batchCount) { - PADDLE_ENFORCE_CUDA_SUCCESS( - platform::dynload::rocblas_zgemm_strided_batched( - handle, transa, transb, m, n, k, - reinterpret_cast(alpha), - reinterpret_cast(A), lda, strideA, - reinterpret_cast(B), ldb, strideB, - reinterpret_cast(beta), - reinterpret_cast(C), ldc, strideC, - batchCount)); + PADDLE_ENFORCE_GPU_SUCCESS(platform::dynload::rocblas_zgemm_strided_batched( + handle, transa, transb, m, n, k, + reinterpret_cast(alpha), + reinterpret_cast(A), lda, strideA, + reinterpret_cast(B), ldb, strideB, + reinterpret_cast(beta), + reinterpret_cast(C), ldc, strideC, + batchCount)); } static void GEMM(rocblas_handle handle, rocblas_operation transa, @@ -366,7 +363,7 @@ struct CUBlas> { const platform::complex *B, int ldb, const platform::complex *beta, platform::complex *C, int ldc) { - PADDLE_ENFORCE_CUDA_SUCCESS(platform::dynload::rocblas_zgemm( + PADDLE_ENFORCE_GPU_SUCCESS(platform::dynload::rocblas_zgemm( handle, transa, transb, m, n, k, reinterpret_cast(alpha), reinterpret_cast(A), lda, @@ -387,7 +384,7 @@ struct CUBlas> { rocblas_datatype computeType) { rocblas_gemm_algo algo = rocblas_gemm_algo_standard; dev_ctx->TensorCoreCublasCallIfAvailable([&](rocblas_handle handle) { - PADDLE_ENFORCE_CUDA_SUCCESS(platform::dynload::rocblas_gemm_ex( + PADDLE_ENFORCE_GPU_SUCCESS(platform::dynload::rocblas_gemm_ex( handle, transa, transb, m, n, k, alpha, A, Atype, lda, B, Btype, ldb, beta, C, Ctype, ldc, C, Ctype, ldc, computeType, algo, 0, 0)); }); diff --git a/paddle/fluid/operators/math/concat_and_split.cu b/paddle/fluid/operators/math/concat_and_split.cu index 614ae93d9fa826413c3f0057aa81c8edca598298..32bb479e00517e91a4c61ae698e03b8e3a03bca4 100644 --- a/paddle/fluid/operators/math/concat_and_split.cu +++ b/paddle/fluid/operators/math/concat_and_split.cu @@ -19,7 +19,7 @@ limitations under the License. */ #include "paddle/fluid/memory/malloc.h" #include "paddle/fluid/operators/math/concat_and_split.h" #include "paddle/fluid/platform/cuda_graph_with_memory_pool.h" -#include "paddle/fluid/platform/cuda_primitives.h" +#include "paddle/fluid/platform/device/gpu/gpu_primitives.h" #include "paddle/fluid/platform/float16.h" namespace paddle { diff --git a/paddle/fluid/operators/math/cos_sim_functor.cu b/paddle/fluid/operators/math/cos_sim_functor.cu index 537c7e47155fe9a12196869ceaed84fca198335b..56ba145da1cadea8b74168b010427112ae5e6033 100644 --- a/paddle/fluid/operators/math/cos_sim_functor.cu +++ b/paddle/fluid/operators/math/cos_sim_functor.cu @@ -13,7 +13,7 @@ See the License for the specific language governing permissions and limitations under the License. */ #include "paddle/fluid/operators/math/cos_sim_functor.h" -#include "paddle/fluid/platform/cuda_primitives.h" +#include "paddle/fluid/platform/device/gpu/gpu_primitives.h" namespace paddle { namespace operators { diff --git a/paddle/fluid/operators/math/cross_entropy.cu b/paddle/fluid/operators/math/cross_entropy.cu index 55662e1d0aad7a1edee800bc2875ea5574b45e6e..3e80e40f3577c3bf0b8fe4462665e11b85ea3ca7 100644 --- a/paddle/fluid/operators/math/cross_entropy.cu +++ b/paddle/fluid/operators/math/cross_entropy.cu @@ -14,8 +14,8 @@ limitations under the License. */ #include "paddle/fluid/operators/math.h" #include "paddle/fluid/operators/math/cross_entropy.h" -#include "paddle/fluid/platform/cuda_device_function.h" -#include "paddle/fluid/platform/cuda_primitives.h" +#include "paddle/fluid/platform/device/gpu/gpu_device_function.h" +#include "paddle/fluid/platform/device/gpu/gpu_primitives.h" namespace paddle { namespace operators { diff --git a/paddle/fluid/operators/math/depthwise_conv.cu b/paddle/fluid/operators/math/depthwise_conv.cu index 6da1bfb964f24ff94aa3137053cdcab8a57726de..6ff2ddaa338df9e82724751562ff6f920be58ee3 100644 --- a/paddle/fluid/operators/math/depthwise_conv.cu +++ b/paddle/fluid/operators/math/depthwise_conv.cu @@ -23,8 +23,8 @@ namespace cub = hipcub; #endif #include "paddle/fluid/operators/math/depthwise_conv.h" #include "paddle/fluid/operators/math/math_function.h" -#include "paddle/fluid/platform/cuda_device_function.h" -#include "paddle/fluid/platform/cuda_primitives.h" +#include "paddle/fluid/platform/device/gpu/gpu_device_function.h" +#include "paddle/fluid/platform/device/gpu/gpu_primitives.h" namespace paddle { namespace operators { diff --git a/paddle/fluid/operators/math/detail/gru_gpu_kernel.h b/paddle/fluid/operators/math/detail/gru_gpu_kernel.h index 62c45f4dc098ba53a2a077e3051a9465a3722e75..75d4809a462cb776083ef16a60bd0f45bad6c33d 100644 --- a/paddle/fluid/operators/math/detail/gru_gpu_kernel.h +++ b/paddle/fluid/operators/math/detail/gru_gpu_kernel.h @@ -16,7 +16,7 @@ limitations under the License. */ #include #include "paddle/fluid/operators/math/detail/activation_functions.h" #include "paddle/fluid/operators/math/gru_compute.h" -#include "paddle/fluid/platform/cuda_primitives.h" +#include "paddle/fluid/platform/device/gpu/gpu_primitives.h" #include "paddle/fluid/platform/device_context.h" namespace paddle { diff --git a/paddle/fluid/operators/math/detail/lstm_gpu_kernel.h b/paddle/fluid/operators/math/detail/lstm_gpu_kernel.h index 24885d37020dc94a67063ff4a9d142550904a97b..851a62dbe9a48d601ab0b26106664abff7b1bb91 100644 --- a/paddle/fluid/operators/math/detail/lstm_gpu_kernel.h +++ b/paddle/fluid/operators/math/detail/lstm_gpu_kernel.h @@ -17,7 +17,7 @@ limitations under the License. */ #include "paddle/fluid/operators/math/detail/activation_functions.h" #include "paddle/fluid/operators/math/lstm_compute.h" -#include "paddle/fluid/platform/cuda_primitives.h" +#include "paddle/fluid/platform/device/gpu/gpu_primitives.h" #include "paddle/fluid/platform/device_context.h" namespace paddle { diff --git a/paddle/fluid/operators/math/eigen_values_vectors.h b/paddle/fluid/operators/math/eigen_values_vectors.h index 01f05530e34e64acfef4c0caca3930d39b2e1134..b24f5d40e8dca7c76a7e1bc09154419eab342880 100644 --- a/paddle/fluid/operators/math/eigen_values_vectors.h +++ b/paddle/fluid/operators/math/eigen_values_vectors.h @@ -184,14 +184,12 @@ struct MatrixEighFunctor { values_stride >= 32 && values_stride <= 512); syevjInfo_t syevj_params; if (use_syevj) { - PADDLE_ENFORCE_CUDA_SUCCESS( + PADDLE_ENFORCE_GPU_SUCCESS( platform::dynload::cusolverDnCreateSyevjInfo(&syevj_params)); - PADDLE_ENFORCE_CUDA_SUCCESS( - platform::dynload::cusolverDnSsyevj_bufferSize( - dev_ctx.cusolver_dn_handle(), jobz, uplo, n, - reinterpret_cast(input_vector), lda, - reinterpret_cast(out_value), &lwork, - syevj_params)); + PADDLE_ENFORCE_GPU_SUCCESS(platform::dynload::cusolverDnSsyevj_bufferSize( + dev_ctx.cusolver_dn_handle(), jobz, uplo, n, + reinterpret_cast(input_vector), lda, + reinterpret_cast(out_value), &lwork, syevj_params)); } else { EvdBuffer(dev_ctx.cusolver_dn_handle(), jobz, uplo, n, input_vector, lda, out_value, &lwork); @@ -203,7 +201,7 @@ struct MatrixEighFunctor { auto *value_data = out_value + i * values_stride; auto handle = dev_ctx.cusolver_dn_handle(); if (use_syevj) { - PADDLE_ENFORCE_CUDA_SUCCESS(platform::dynload::cusolverDnSsyevj( + PADDLE_ENFORCE_GPU_SUCCESS(platform::dynload::cusolverDnSsyevj( handle, jobz, uplo, n, reinterpret_cast(input_data), lda, reinterpret_cast(value_data), reinterpret_cast(work_ptr), lwork, info_ptr, @@ -220,7 +218,7 @@ struct MatrixEighFunctor { } if (use_syevj) { - PADDLE_ENFORCE_CUDA_SUCCESS( + PADDLE_ENFORCE_GPU_SUCCESS( platform::dynload::cusolverDnDestroySyevjInfo(syevj_params)); } if (has_vectors) { @@ -255,7 +253,7 @@ struct MatrixEighFunctor { cusolverDnHandle_t handle, cusolverEigMode_t jobz, \ cublasFillMode_t uplo, int n, const T *A, int lda, const ValueType *W, \ int *lwork) const { \ - PADDLE_ENFORCE_CUDA_SUCCESS( \ + PADDLE_ENFORCE_GPU_SUCCESS( \ platform::dynload::cusolverDn##C##evd_bufferSize( \ handle, jobz, uplo, n, reinterpret_cast(A), lda, \ W, lwork)); \ @@ -269,7 +267,7 @@ FUNC_WITH_TYPES(EVDBUFFER_INSTANCE); cusolverDnHandle_t handle, cusolverEigMode_t jobz, \ cublasFillMode_t uplo, int n, T *A, int lda, ValueType *W, T *work, \ int lwork, int *devInfo) const { \ - PADDLE_ENFORCE_CUDA_SUCCESS(platform::dynload::cusolverDn##C##evd( \ + PADDLE_ENFORCE_GPU_SUCCESS(platform::dynload::cusolverDn##C##evd( \ handle, jobz, uplo, n, reinterpret_cast(A), lda, W, \ reinterpret_cast(work), lwork, devInfo)); \ } diff --git a/paddle/fluid/operators/math/im2col.cu b/paddle/fluid/operators/math/im2col.cu index 3eadaa2677ab4f6cb69f4163079c2d891717eec1..f616e116d0aee7ef0c450f00e5d14dc57d6f7438 100644 --- a/paddle/fluid/operators/math/im2col.cu +++ b/paddle/fluid/operators/math/im2col.cu @@ -15,8 +15,8 @@ limitations under the License. */ #include #include #include "paddle/fluid/operators/math/im2col.h" -#include "paddle/fluid/platform/cuda_primitives.h" -#include "paddle/fluid/platform/gpu_launch_config.h" +#include "paddle/fluid/platform/device/gpu/gpu_launch_config.h" +#include "paddle/fluid/platform/device/gpu/gpu_primitives.h" namespace paddle { namespace operators { diff --git a/paddle/fluid/operators/math/inclusive_scan.h b/paddle/fluid/operators/math/inclusive_scan.h index 71080bf424a01218f264451f1ee5984addb2717c..54a37db1df71a1ad26cdfd10125fe33a1d376178 100644 --- a/paddle/fluid/operators/math/inclusive_scan.h +++ b/paddle/fluid/operators/math/inclusive_scan.h @@ -42,7 +42,7 @@ static void CubInclusiveScan(InputIterator x_iter, OutputIterator y_iter, void *temp_storage = nullptr; size_t temp_storage_bytes = 0; for (size_t i = 0; i < 2; ++i) { - PADDLE_ENFORCE_CUDA_SUCCESS(cub::DeviceScan::InclusiveScan( + PADDLE_ENFORCE_GPU_SUCCESS(cub::DeviceScan::InclusiveScan( temp_storage, temp_storage_bytes, x_iter, y_iter, op, static_cast(n), // Maybe overflow? dev_ctx.stream())); diff --git a/paddle/fluid/operators/math/maxouting.cu b/paddle/fluid/operators/math/maxouting.cu index 8b134a29d81cf684b93a82dac2542fa029d493da..1856fb4eb48c73f96d4f6428ba890c821a61292c 100644 --- a/paddle/fluid/operators/math/maxouting.cu +++ b/paddle/fluid/operators/math/maxouting.cu @@ -13,7 +13,7 @@ See the License for the specific language governing permissions and limitations under the License. */ #include "paddle/fluid/operators/math/maxouting.h" -#include "paddle/fluid/platform/cuda_primitives.h" +#include "paddle/fluid/platform/device/gpu/gpu_primitives.h" namespace paddle { namespace operators { diff --git a/paddle/fluid/operators/math/pooling.cu b/paddle/fluid/operators/math/pooling.cu index 84a970a9a26067eaf143188370c1cfe14c7fc72e..076d3aa3361f0f2cab8126d800dffae3804b44ab 100644 --- a/paddle/fluid/operators/math/pooling.cu +++ b/paddle/fluid/operators/math/pooling.cu @@ -16,10 +16,10 @@ limitations under the License. */ #include #include "paddle/fluid/operators/math/pooling.h" -#include "paddle/fluid/platform/cuda_device_function.h" -#include "paddle/fluid/platform/cuda_primitives.h" +#include "paddle/fluid/platform/device/gpu/gpu_device_function.h" +#include "paddle/fluid/platform/device/gpu/gpu_launch_config.h" +#include "paddle/fluid/platform/device/gpu/gpu_primitives.h" #include "paddle/fluid/platform/fast_divmod.h" -#include "paddle/fluid/platform/gpu_launch_config.h" #ifdef __HIPCC__ #define POOLING_BLOCK_SIZE 256 diff --git a/paddle/fluid/operators/math/prelu.h b/paddle/fluid/operators/math/prelu.h index dc1e3c1c3ded1015b8f6bea7724a66c6b378c4a6..70aae2ba59e2ca164006c669622d6d15026c7eec 100644 --- a/paddle/fluid/operators/math/prelu.h +++ b/paddle/fluid/operators/math/prelu.h @@ -16,11 +16,7 @@ limitations under the License. */ #include #include "paddle/fluid/operators/math/math_function.h" -#ifdef PADDLE_WITH_HIP -#include "paddle/fluid/platform/miopen_helper.h" -#else -#include "paddle/fluid/platform/cudnn_helper.h" -#endif +#include "paddle/fluid/platform/device/gpu/gpu_dnn.h" namespace paddle { namespace operators { diff --git a/paddle/fluid/operators/math/sample_prob.cu b/paddle/fluid/operators/math/sample_prob.cu index 446acc033eb7ff196f224621ab2f4addf6dd51c2..f596c1bc3dcf38c2e32d599037553055852ebc46 100644 --- a/paddle/fluid/operators/math/sample_prob.cu +++ b/paddle/fluid/operators/math/sample_prob.cu @@ -144,13 +144,13 @@ void GPUSampleWithProb::operator()( VLOG(1) << "num_tries: " << num_tries; #ifdef PADDLE_WITH_HIP - PADDLE_ENFORCE_CUDA_SUCCESS(hipMemcpy(samples_data + num_true, s_data, - sizeof(int64_t) * num_samples, - hipMemcpyHostToDevice)); + PADDLE_ENFORCE_GPU_SUCCESS(hipMemcpy(samples_data + num_true, s_data, + sizeof(int64_t) * num_samples, + hipMemcpyHostToDevice)); #else - PADDLE_ENFORCE_CUDA_SUCCESS(cudaMemcpy(samples_data + num_true, s_data, - sizeof(int64_t) * num_samples, - cudaMemcpyHostToDevice)); + PADDLE_ENFORCE_GPU_SUCCESS(cudaMemcpy(samples_data + num_true, s_data, + sizeof(int64_t) * num_samples, + cudaMemcpyHostToDevice)); #endif int threads = 512; diff --git a/paddle/fluid/operators/math/segment_pooling.cu b/paddle/fluid/operators/math/segment_pooling.cu index b49b5036ac42e2359a2840f48ab0a42ced6bc406..67cf3162460073100574e28dadfa655264e07a99 100644 --- a/paddle/fluid/operators/math/segment_pooling.cu +++ b/paddle/fluid/operators/math/segment_pooling.cu @@ -16,8 +16,8 @@ limitations under the License. */ #include "paddle/fluid/operators/gather.cu.h" #include "paddle/fluid/operators/math/math_function.h" #include "paddle/fluid/operators/math/segment_pooling.h" -#include "paddle/fluid/platform/cuda_primitives.h" -#include "paddle/fluid/platform/gpu_launch_config.h" +#include "paddle/fluid/platform/device/gpu/gpu_launch_config.h" +#include "paddle/fluid/platform/device/gpu/gpu_primitives.h" namespace paddle { namespace operators { diff --git a/paddle/fluid/operators/math/selected_rows_functor.cu b/paddle/fluid/operators/math/selected_rows_functor.cu index f3ef537a31b44c70000020f8d1a54c63ba156bc6..0e04c37ed2b12cedf664da9c599d6d84eafbcac6 100644 --- a/paddle/fluid/operators/math/selected_rows_functor.cu +++ b/paddle/fluid/operators/math/selected_rows_functor.cu @@ -17,7 +17,7 @@ limitations under the License. */ #include "paddle/fluid/operators/math/math_function.h" #include "paddle/fluid/operators/math/selected_rows_functor.h" -#include "paddle/fluid/platform/cuda_primitives.h" +#include "paddle/fluid/platform/device/gpu/gpu_primitives.h" #include "paddle/fluid/platform/float16.h" namespace paddle { diff --git a/paddle/fluid/operators/math/sequence_pooling.cu b/paddle/fluid/operators/math/sequence_pooling.cu index cba8dd935ef1b3625f5f68578e411aec81eaa4f4..b3e1922e1065744e4d7d680768681a6ecc21f25d 100644 --- a/paddle/fluid/operators/math/sequence_pooling.cu +++ b/paddle/fluid/operators/math/sequence_pooling.cu @@ -16,7 +16,7 @@ limitations under the License. */ #include #include "paddle/fluid/operators/math/math_function.h" #include "paddle/fluid/operators/math/sequence_pooling.h" -#include "paddle/fluid/platform/cuda_primitives.h" +#include "paddle/fluid/platform/device/gpu/gpu_primitives.h" #include "paddle/fluid/platform/macros.h" namespace paddle { diff --git a/paddle/fluid/operators/math/sequence_scale.cu b/paddle/fluid/operators/math/sequence_scale.cu index 5578f1f0138c424f091fc9283fe7939ee01cf528..1807c77e37ca16967d24c423a1bebac779f59ce5 100644 --- a/paddle/fluid/operators/math/sequence_scale.cu +++ b/paddle/fluid/operators/math/sequence_scale.cu @@ -13,7 +13,7 @@ See the License for the specific language governing permissions and limitations under the License. */ #include "paddle/fluid/operators/math/sequence_scale.h" -#include "paddle/fluid/platform/cuda_primitives.h" +#include "paddle/fluid/platform/device/gpu/gpu_primitives.h" namespace paddle { namespace operators { diff --git a/paddle/fluid/operators/math/softmax.cu b/paddle/fluid/operators/math/softmax.cu index 9e9fe5b9c1020db9800b187c7d7fc8b156f66b21..bc32e068f566d2878c8ab9e59058dc2296b90273 100644 --- a/paddle/fluid/operators/math/softmax.cu +++ b/paddle/fluid/operators/math/softmax.cu @@ -16,11 +16,7 @@ limitations under the License. */ #include "paddle/fluid/operators/math/math_function.h" #include "paddle/fluid/operators/math/softmax.h" #include "paddle/fluid/operators/math/softmax_impl.h" -#ifdef PADDLE_WITH_HIP -#include "paddle/fluid/platform/miopen_helper.h" -#else -#include "paddle/fluid/platform/cudnn_helper.h" -#endif +#include "paddle/fluid/platform/device/gpu/gpu_dnn.h" namespace paddle { namespace operators { @@ -54,7 +50,7 @@ void SoftmaxCUDNNFunctor::operator()( xDesc.descriptor(layout, cudnn_tensor_dims); miopenTensorDescriptor_t cudnn_y_desc = xDesc.descriptor(layout, cudnn_tensor_dims); - PADDLE_ENFORCE_CUDA_SUCCESS(platform::dynload::miopenSoftmaxForward_V2( + PADDLE_ENFORCE_GPU_SUCCESS(platform::dynload::miopenSoftmaxForward_V2( context.cudnn_handle(), CudnnDataType::kOne(), cudnn_x_desc, X->data(), CudnnDataType::kZero(), cudnn_y_desc, Y->mutable_data(context.GetPlace()), MIOPEN_SOFTMAX_ACCURATE, @@ -64,7 +60,7 @@ void SoftmaxCUDNNFunctor::operator()( xDesc.descriptor(layout, cudnn_tensor_dims); cudnnTensorDescriptor_t cudnn_y_desc = xDesc.descriptor(layout, cudnn_tensor_dims); - PADDLE_ENFORCE_CUDA_SUCCESS(platform::dynload::cudnnSoftmaxForward( + PADDLE_ENFORCE_GPU_SUCCESS(platform::dynload::cudnnSoftmaxForward( context.cudnn_handle(), CUDNN_SOFTMAX_ACCURATE, CUDNN_SOFTMAX_MODE_INSTANCE, CudnnDataType::kOne(), cudnn_x_desc, X->data(), CudnnDataType::kZero(), cudnn_y_desc, @@ -97,7 +93,7 @@ void SoftmaxGradCUDNNFunctor::operator()( dxDesc.descriptor(layout, cudnn_tensor_dims); miopenTensorDescriptor_t cudnn_ygrad_desc = dyDesc.descriptor(layout, cudnn_tensor_dims); - PADDLE_ENFORCE_CUDA_SUCCESS(platform::dynload::miopenSoftmaxBackward_V2( + PADDLE_ENFORCE_GPU_SUCCESS(platform::dynload::miopenSoftmaxBackward_V2( context.cudnn_handle(), CudnnDataType::kOne(), cudnn_y_desc, Y->data(), cudnn_ygrad_desc, YGrad->data(), CudnnDataType::kZero(), cudnn_xgrad_desc, @@ -110,7 +106,7 @@ void SoftmaxGradCUDNNFunctor::operator()( dxDesc.descriptor(layout, cudnn_tensor_dims); cudnnTensorDescriptor_t cudnn_ygrad_desc = dyDesc.descriptor(layout, cudnn_tensor_dims); - PADDLE_ENFORCE_CUDA_SUCCESS(platform::dynload::cudnnSoftmaxBackward( + PADDLE_ENFORCE_GPU_SUCCESS(platform::dynload::cudnnSoftmaxBackward( context.cudnn_handle(), CUDNN_SOFTMAX_ACCURATE, CUDNN_SOFTMAX_MODE_INSTANCE, CudnnDataType::kOne(), cudnn_y_desc, Y->data(), cudnn_ygrad_desc, YGrad->data(), diff --git a/paddle/fluid/operators/math/unpooling.cu b/paddle/fluid/operators/math/unpooling.cu index ad23892f37903aafc6cee4cd25ac035bd3d39277..dbb3d64350caea41630c91eec299b8d08f0373d5 100644 --- a/paddle/fluid/operators/math/unpooling.cu +++ b/paddle/fluid/operators/math/unpooling.cu @@ -13,7 +13,7 @@ See the License for the specific language governing permissions and limitations under the License. */ #include "paddle/fluid/operators/math/unpooling.h" -#include "paddle/fluid/platform/cuda_primitives.h" +#include "paddle/fluid/platform/device/gpu/gpu_primitives.h" namespace paddle { namespace operators { diff --git a/paddle/fluid/operators/math/vol2col.cu b/paddle/fluid/operators/math/vol2col.cu index d83b5b0fe3afb390009b1153c8e8c6175abe415f..d9c757544a9c6abbac12e0f09278987db854a694 100644 --- a/paddle/fluid/operators/math/vol2col.cu +++ b/paddle/fluid/operators/math/vol2col.cu @@ -15,8 +15,8 @@ limitations under the License. */ #include #include #include "paddle/fluid/operators/math/vol2col.h" -#include "paddle/fluid/platform/cuda_primitives.h" -#include "paddle/fluid/platform/gpu_launch_config.h" +#include "paddle/fluid/platform/device/gpu/gpu_launch_config.h" +#include "paddle/fluid/platform/device/gpu/gpu_primitives.h" namespace paddle { namespace operators { diff --git a/paddle/fluid/operators/matrix_rank_op.cu b/paddle/fluid/operators/matrix_rank_op.cu index d85a262b5e910a403620e2ac947e2c930b3fd2b5..757c780b4ea53e1a345e863df275a03013d9008e 100644 --- a/paddle/fluid/operators/matrix_rank_op.cu +++ b/paddle/fluid/operators/matrix_rank_op.cu @@ -162,9 +162,9 @@ void MatrixRankGPUKernel::GesvdjBatched( int ldt = n; int lwork = 0; auto handle = dev_ctx.cusolver_dn_handle(); - PADDLE_ENFORCE_CUDA_SUCCESS( + PADDLE_ENFORCE_GPU_SUCCESS( platform::dynload::cusolverDnCreateGesvdjInfo(&gesvdj_params)); - PADDLE_ENFORCE_CUDA_SUCCESS(platform::dynload::cusolverDnSgesvdj_bufferSize( + PADDLE_ENFORCE_GPU_SUCCESS(platform::dynload::cusolverDnSgesvdj_bufferSize( handle, jobz, thin_UV, m, n, A, lda, S, U, ldu, V, ldt, &lwork, gesvdj_params)); auto workspace = memory::Alloc(dev_ctx, lwork * sizeof(float)); @@ -173,7 +173,7 @@ void MatrixRankGPUKernel::GesvdjBatched( int stride_U = ldu * (thin_UV ? k : m); int stride_V = ldt * (thin_UV ? k : n); for (int i = 0; i < batchSize; i++) { - PADDLE_ENFORCE_CUDA_SUCCESS(platform::dynload::cusolverDnSgesvdj( + PADDLE_ENFORCE_GPU_SUCCESS(platform::dynload::cusolverDnSgesvdj( handle, jobz, thin_UV, m, n, A + stride_A * i, lda, S + k * i, U + stride_U * i, ldu, V + stride_V * i, ldt, workspace_ptr, lwork, info, gesvdj_params)); @@ -186,7 +186,7 @@ void MatrixRankGPUKernel::GesvdjBatched( platform::errors::PreconditionNotMet( "For batch [%d]: CUSolver SVD is not zero. [%d]", i, error_info)); } - PADDLE_ENFORCE_CUDA_SUCCESS( + PADDLE_ENFORCE_GPU_SUCCESS( platform::dynload::cusolverDnDestroyGesvdjInfo(gesvdj_params)); } @@ -203,9 +203,9 @@ void MatrixRankGPUKernel::GesvdjBatched( int ldt = n; int lwork = 0; auto handle = dev_ctx.cusolver_dn_handle(); - PADDLE_ENFORCE_CUDA_SUCCESS( + PADDLE_ENFORCE_GPU_SUCCESS( platform::dynload::cusolverDnCreateGesvdjInfo(&gesvdj_params)); - PADDLE_ENFORCE_CUDA_SUCCESS(platform::dynload::cusolverDnDgesvdj_bufferSize( + PADDLE_ENFORCE_GPU_SUCCESS(platform::dynload::cusolverDnDgesvdj_bufferSize( handle, jobz, thin_UV, m, n, A, lda, S, U, ldu, V, ldt, &lwork, gesvdj_params)); auto workspace = memory::Alloc(dev_ctx, lwork * sizeof(double)); @@ -214,7 +214,7 @@ void MatrixRankGPUKernel::GesvdjBatched( int stride_U = ldu * (thin_UV ? k : m); int stride_V = ldt * (thin_UV ? k : n); for (int i = 0; i < batchSize; ++i) { - PADDLE_ENFORCE_CUDA_SUCCESS(platform::dynload::cusolverDnDgesvdj( + PADDLE_ENFORCE_GPU_SUCCESS(platform::dynload::cusolverDnDgesvdj( handle, jobz, thin_UV, m, n, A + stride_A * i, lda, S + k * i, U + stride_U * i, ldu, V + stride_V * i, ldt, workspace_ptr, lwork, info, gesvdj_params)); @@ -228,7 +228,7 @@ void MatrixRankGPUKernel::GesvdjBatched( platform::errors::PreconditionNotMet( "For batch [%d]: CUSolver SVD is not zero. [%d]", i, error_info)); } - PADDLE_ENFORCE_CUDA_SUCCESS( + PADDLE_ENFORCE_GPU_SUCCESS( platform::dynload::cusolverDnDestroyGesvdjInfo(gesvdj_params)); } @@ -247,14 +247,14 @@ void MatrixRankGPUKernel::SyevjBatched( int stride_A = lda * n; int lwork = 0; syevjInfo_t params = NULL; - PADDLE_ENFORCE_CUDA_SUCCESS( + PADDLE_ENFORCE_GPU_SUCCESS( platform::dynload::cusolverDnCreateSyevjInfo(¶ms)); - PADDLE_ENFORCE_CUDA_SUCCESS(platform::dynload::cusolverDnSsyevj_bufferSize( + PADDLE_ENFORCE_GPU_SUCCESS(platform::dynload::cusolverDnSsyevj_bufferSize( handle, jobz, uplo, n, A, lda, W, &lwork, params)); auto workspace = memory::Alloc(dev_ctx, lwork * sizeof(float)); float* workspace_ptr = reinterpret_cast(workspace->ptr()); for (int i = 0; i < batchSize; i++) { - PADDLE_ENFORCE_CUDA_SUCCESS(platform::dynload::cusolverDnSsyevj( + PADDLE_ENFORCE_GPU_SUCCESS(platform::dynload::cusolverDnSsyevj( handle, jobz, uplo, n, A + stride_A * i, lda, W + n * i, workspace_ptr, lwork, info, params)); @@ -268,7 +268,7 @@ void MatrixRankGPUKernel::SyevjBatched( "For batch [%d]: CUSolver eigenvalues is not zero. [%d]", i, error_info)); } - PADDLE_ENFORCE_CUDA_SUCCESS( + PADDLE_ENFORCE_GPU_SUCCESS( platform::dynload::cusolverDnDestroySyevjInfo(params)); } @@ -285,15 +285,15 @@ void MatrixRankGPUKernel::SyevjBatched( int stride_A = lda * n; int lwork = 0; syevjInfo_t params = NULL; - PADDLE_ENFORCE_CUDA_SUCCESS( + PADDLE_ENFORCE_GPU_SUCCESS( platform::dynload::cusolverDnCreateSyevjInfo(¶ms)); - PADDLE_ENFORCE_CUDA_SUCCESS(platform::dynload::cusolverDnDsyevj_bufferSize( + PADDLE_ENFORCE_GPU_SUCCESS(platform::dynload::cusolverDnDsyevj_bufferSize( handle, jobz, uplo, n, A, lda, W, &lwork, params)); auto workspace = memory::Alloc(dev_ctx, lwork * sizeof(double)); double* workspace_ptr = reinterpret_cast(workspace->ptr()); for (int i = 0; i < batchSize; i++) { - PADDLE_ENFORCE_CUDA_SUCCESS(platform::dynload::cusolverDnDsyevj( + PADDLE_ENFORCE_GPU_SUCCESS(platform::dynload::cusolverDnDsyevj( handle, jobz, uplo, n, A + stride_A * i, lda, W + n * i, workspace_ptr, lwork, info, params)); int error_info; @@ -306,7 +306,7 @@ void MatrixRankGPUKernel::SyevjBatched( "For batch [%d]: CUSolver eigenvalues is not zero. [%d]", i, error_info)); } - PADDLE_ENFORCE_CUDA_SUCCESS( + PADDLE_ENFORCE_GPU_SUCCESS( platform::dynload::cusolverDnDestroySyevjInfo(params)); } diff --git a/paddle/fluid/operators/mean_iou_op.cu b/paddle/fluid/operators/mean_iou_op.cu index 7098a720cc3a03d1dc033d810aa2e36d6552adce..79aff52a16fa975b6dd5f34b4446c3688ae6a5a3 100644 --- a/paddle/fluid/operators/mean_iou_op.cu +++ b/paddle/fluid/operators/mean_iou_op.cu @@ -15,8 +15,8 @@ limitations under the License. */ #include "paddle/fluid/memory/malloc.h" #include "paddle/fluid/operators/math/math_function.h" #include "paddle/fluid/operators/mean_iou_op.h" -#include "paddle/fluid/platform/cuda_primitives.h" -#include "paddle/fluid/platform/gpu_info.h" +#include "paddle/fluid/platform/device/gpu/gpu_info.h" +#include "paddle/fluid/platform/device/gpu/gpu_primitives.h" namespace paddle { namespace operators { diff --git a/paddle/fluid/operators/mean_op.cu b/paddle/fluid/operators/mean_op.cu index 430036bc67de70fb46171f3e9379f252ef8a13a8..1a10b7033f69e9fe587d779bc6b64229f9262317 100644 --- a/paddle/fluid/operators/mean_op.cu +++ b/paddle/fluid/operators/mean_op.cu @@ -19,7 +19,7 @@ limitations under the License. */ namespace cub = hipcub; #endif #include "paddle/fluid/operators/mean_op.h" -#include "paddle/fluid/platform/cuda_primitives.h" +#include "paddle/fluid/platform/device/gpu/gpu_primitives.h" #include "paddle/fluid/platform/float16.h" namespace paddle { @@ -65,14 +65,14 @@ class MeanCUDAKernel : public framework::OpKernel { auto err = cub::DeviceReduce::Sum(nullptr, temp_storage_bytes, trans_x, out_data, size_prob, stream); - PADDLE_ENFORCE_CUDA_SUCCESS(err); + PADDLE_ENFORCE_GPU_SUCCESS(err); framework::Tensor tmp; auto* temp_storage = tmp.mutable_data( framework::make_ddim({static_cast(temp_storage_bytes)}), context.GetPlace()); err = cub::DeviceReduce::Sum(temp_storage, temp_storage_bytes, trans_x, out_data, size_prob, stream); - PADDLE_ENFORCE_CUDA_SUCCESS(err); + PADDLE_ENFORCE_GPU_SUCCESS(err); } }; diff --git a/paddle/fluid/operators/metrics/accuracy_op.cu b/paddle/fluid/operators/metrics/accuracy_op.cu index 3d22fc60993c7b1cd9b78b6ab175d0b3621d0949..6f19100fa9d37e2efedad60a982bf19b09cac736 100644 --- a/paddle/fluid/operators/metrics/accuracy_op.cu +++ b/paddle/fluid/operators/metrics/accuracy_op.cu @@ -15,9 +15,9 @@ limitations under the License. */ #include #include #include "paddle/fluid/operators/metrics/accuracy_op.h" -#include "paddle/fluid/platform/cuda_primitives.h" +#include "paddle/fluid/platform/device/gpu/gpu_info.h" +#include "paddle/fluid/platform/device/gpu/gpu_primitives.h" #include "paddle/fluid/platform/float16.h" -#include "paddle/fluid/platform/gpu_info.h" namespace paddle { namespace operators { diff --git a/paddle/fluid/operators/metrics/auc_op.cu b/paddle/fluid/operators/metrics/auc_op.cu index 40609381c17aee7d8dce52b30b9110ff8b39860d..1cb7eba8775e814b1150929de4a341c466ee4583 100644 --- a/paddle/fluid/operators/metrics/auc_op.cu +++ b/paddle/fluid/operators/metrics/auc_op.cu @@ -16,7 +16,7 @@ limitations under the License. */ #include "paddle/fluid/framework/eigen.h" #include "paddle/fluid/framework/op_registry.h" #include "paddle/fluid/operators/metrics/auc_op.h" -#include "paddle/fluid/platform/cuda_primitives.h" +#include "paddle/fluid/platform/device/gpu/gpu_primitives.h" namespace paddle { namespace operators { diff --git a/paddle/fluid/operators/miopen_lstm_cache.h b/paddle/fluid/operators/miopen_lstm_cache.h index a357e6e5af6af0895e07e993bcbba98a33239978..c307218baa406ed70b6930e007ecdf01973452c1 100644 --- a/paddle/fluid/operators/miopen_lstm_cache.h +++ b/paddle/fluid/operators/miopen_lstm_cache.h @@ -16,7 +16,7 @@ limitations under the License. */ #include #include "paddle/fluid/framework/tensor.h" -#include "paddle/fluid/platform/miopen_helper.h" +#include "paddle/fluid/platform/device/gpu/gpu_dnn.h" namespace paddle { namespace operators { @@ -66,7 +66,7 @@ class ScopedRNNBase { // ------------------- miopen dropout descriptors --------------------- size_t state_size; if (!initialized_) { - PADDLE_ENFORCE_CUDA_SUCCESS( + PADDLE_ENFORCE_GPU_SUCCESS( platform::dynload::miopenDropoutGetStatesSize(handle, &state_size)); dropout_state->mutable_data({static_cast(state_size)}, place); @@ -75,7 +75,7 @@ class ScopedRNNBase { dropout_state, seed_, state_size); // ------------------- miopen rnn descriptors --------------------- - PADDLE_ENFORCE_CUDA_SUCCESS(platform::dynload::miopenSetRNNDescriptor_V2( + PADDLE_ENFORCE_GPU_SUCCESS(platform::dynload::miopenSetRNNDescriptor_V2( rnn_desc_.desc(), hidden_size_, num_layers_, dropout_desc_.desc(), miopenRNNlinear, is_bidirec_ ? miopenRNNbidirection : miopenRNNunidirection, miopenLSTM, @@ -83,7 +83,7 @@ class ScopedRNNBase { // ------------------- miopen weights_size --------------------- size_t weights_size_; - PADDLE_ENFORCE_CUDA_SUCCESS(platform::dynload::miopenGetRNNParamsSize( + PADDLE_ENFORCE_GPU_SUCCESS(platform::dynload::miopenGetRNNParamsSize( handle, rnn_desc_.desc(), x_descs_[0], &weights_size_, miopen_type)); PADDLE_ENFORCE_EQ( weights_size_, sizeof(T) * weight_numel_, @@ -95,10 +95,10 @@ class ScopedRNNBase { std::vector dim_w = {dim_tmp, 1, 1}; weight_desc_.descriptor(layout, dim_w); // ------------------- miopen workspace, reserve size --------------------- - PADDLE_ENFORCE_CUDA_SUCCESS(platform::dynload::miopenGetRNNWorkspaceSize( + PADDLE_ENFORCE_GPU_SUCCESS(platform::dynload::miopenGetRNNWorkspaceSize( handle, rnn_desc_.desc(), seq_length_, x_descs_.data(), workspace_size)); - PADDLE_ENFORCE_CUDA_SUCCESS( + PADDLE_ENFORCE_GPU_SUCCESS( platform::dynload::miopenGetRNNTrainingReserveSize( handle, rnn_desc_.desc(), seq_length_, x_descs_.data(), reserve_size)); diff --git a/paddle/fluid/operators/miopen_rnn_cache.h b/paddle/fluid/operators/miopen_rnn_cache.h index 97d608331ccb5115ae16b8bc848f256dd6270b93..38cea39abd5dedea99cd1b3b1088b24ca349f32f 100644 --- a/paddle/fluid/operators/miopen_rnn_cache.h +++ b/paddle/fluid/operators/miopen_rnn_cache.h @@ -16,7 +16,7 @@ limitations under the License. */ #include #include "paddle/fluid/framework/tensor.h" -#include "paddle/fluid/platform/miopen_helper.h" +#include "paddle/fluid/platform/device/gpu/gpu_dnn.h" namespace paddle { namespace operators { @@ -95,16 +95,16 @@ struct CudnnRNNCache { std::vector strides_y = {hidden_size_ * numDirections, 1, 1}; for (size_t i = 0; i < seq_length_; ++i) { - PADDLE_ENFORCE_CUDA_SUCCESS( + PADDLE_ENFORCE_GPU_SUCCESS( platform::dynload::miopenCreateTensorDescriptor(&x_desc_[i])); - PADDLE_ENFORCE_CUDA_SUCCESS( + PADDLE_ENFORCE_GPU_SUCCESS( platform::dynload::miopenCreateTensorDescriptor(&y_desc_[i])); - PADDLE_ENFORCE_CUDA_SUCCESS(platform::dynload::miopenSetTensorDescriptor( + PADDLE_ENFORCE_GPU_SUCCESS(platform::dynload::miopenSetTensorDescriptor( x_desc_[i], miopen_type, 3, const_cast(dims.data()), const_cast(strides.data()))); - PADDLE_ENFORCE_CUDA_SUCCESS(platform::dynload::miopenSetTensorDescriptor( + PADDLE_ENFORCE_GPU_SUCCESS(platform::dynload::miopenSetTensorDescriptor( y_desc_[i], miopen_type, 3, const_cast(dims_y.data()), const_cast(strides_y.data()))); } @@ -113,85 +113,85 @@ struct CudnnRNNCache { hidden_size_}; std::vector strides_hx = {hidden_size_ * batch_size_, hidden_size_, 1}; - PADDLE_ENFORCE_CUDA_SUCCESS( + PADDLE_ENFORCE_GPU_SUCCESS( platform::dynload::miopenCreateTensorDescriptor(&hx_desc_)); - PADDLE_ENFORCE_CUDA_SUCCESS( + PADDLE_ENFORCE_GPU_SUCCESS( platform::dynload::miopenCreateTensorDescriptor(&cx_desc_)); - PADDLE_ENFORCE_CUDA_SUCCESS( + PADDLE_ENFORCE_GPU_SUCCESS( platform::dynload::miopenCreateTensorDescriptor(&hy_desc_)); - PADDLE_ENFORCE_CUDA_SUCCESS( + PADDLE_ENFORCE_GPU_SUCCESS( platform::dynload::miopenCreateTensorDescriptor(&cy_desc_)); - PADDLE_ENFORCE_CUDA_SUCCESS( + PADDLE_ENFORCE_GPU_SUCCESS( platform::dynload::miopenCreateTensorDescriptor(&dhx_desc_)); - PADDLE_ENFORCE_CUDA_SUCCESS( + PADDLE_ENFORCE_GPU_SUCCESS( platform::dynload::miopenCreateTensorDescriptor(&dcx_desc_)); - PADDLE_ENFORCE_CUDA_SUCCESS( + PADDLE_ENFORCE_GPU_SUCCESS( platform::dynload::miopenCreateTensorDescriptor(&dhy_desc_)); - PADDLE_ENFORCE_CUDA_SUCCESS( + PADDLE_ENFORCE_GPU_SUCCESS( platform::dynload::miopenCreateTensorDescriptor(&dcy_desc_)); - PADDLE_ENFORCE_CUDA_SUCCESS(platform::dynload::miopenSetTensorDescriptor( + PADDLE_ENFORCE_GPU_SUCCESS(platform::dynload::miopenSetTensorDescriptor( hx_desc_, miopen_type, 3, const_cast(dims_hx.data()), const_cast(strides_hx.data()))); - PADDLE_ENFORCE_CUDA_SUCCESS(platform::dynload::miopenSetTensorDescriptor( + PADDLE_ENFORCE_GPU_SUCCESS(platform::dynload::miopenSetTensorDescriptor( cx_desc_, miopen_type, 3, const_cast(dims_hx.data()), const_cast(strides_hx.data()))); - PADDLE_ENFORCE_CUDA_SUCCESS(platform::dynload::miopenSetTensorDescriptor( + PADDLE_ENFORCE_GPU_SUCCESS(platform::dynload::miopenSetTensorDescriptor( hy_desc_, miopen_type, 3, const_cast(dims_hx.data()), const_cast(strides_hx.data()))); - PADDLE_ENFORCE_CUDA_SUCCESS(platform::dynload::miopenSetTensorDescriptor( + PADDLE_ENFORCE_GPU_SUCCESS(platform::dynload::miopenSetTensorDescriptor( cy_desc_, miopen_type, 3, const_cast(dims_hx.data()), const_cast(strides_hx.data()))); - PADDLE_ENFORCE_CUDA_SUCCESS(platform::dynload::miopenSetTensorDescriptor( + PADDLE_ENFORCE_GPU_SUCCESS(platform::dynload::miopenSetTensorDescriptor( dhx_desc_, miopen_type, 3, const_cast(dims_hx.data()), const_cast(strides_hx.data()))); - PADDLE_ENFORCE_CUDA_SUCCESS(platform::dynload::miopenSetTensorDescriptor( + PADDLE_ENFORCE_GPU_SUCCESS(platform::dynload::miopenSetTensorDescriptor( dcx_desc_, miopen_type, 3, const_cast(dims_hx.data()), const_cast(strides_hx.data()))); - PADDLE_ENFORCE_CUDA_SUCCESS(platform::dynload::miopenSetTensorDescriptor( + PADDLE_ENFORCE_GPU_SUCCESS(platform::dynload::miopenSetTensorDescriptor( dhy_desc_, miopen_type, 3, const_cast(dims_hx.data()), const_cast(strides_hx.data()))); - PADDLE_ENFORCE_CUDA_SUCCESS(platform::dynload::miopenSetTensorDescriptor( + PADDLE_ENFORCE_GPU_SUCCESS(platform::dynload::miopenSetTensorDescriptor( dcy_desc_, miopen_type, 3, const_cast(dims_hx.data()), const_cast(strides_hx.data()))); - PADDLE_ENFORCE_CUDA_SUCCESS( + PADDLE_ENFORCE_GPU_SUCCESS( platform::dynload::miopenCreateDropoutDescriptor(&dropout_desc_)); size_t state_size; if (!initialized) { - PADDLE_ENFORCE_CUDA_SUCCESS( + PADDLE_ENFORCE_GPU_SUCCESS( platform::dynload::miopenDropoutGetStatesSize(handle, &state_size)); dropout_state_->Resize({static_cast(state_size)}); uint8_t *dropout_state_data = dropout_state_->mutable_data(place); - PADDLE_ENFORCE_CUDA_SUCCESS(platform::dynload::miopenSetDropoutDescriptor( + PADDLE_ENFORCE_GPU_SUCCESS(platform::dynload::miopenSetDropoutDescriptor( dropout_desc_, handle, dropout_prob_, dropout_state_data, state_size, seed_, false, false, MIOPEN_RNG_PSEUDO_XORWOW)); } else { uint8_t *dropout_state_data = dropout_state_->data(); auto dropout_state_dims = dropout_state_->dims(); state_size = dropout_state_dims[0]; - PADDLE_ENFORCE_CUDA_SUCCESS( + PADDLE_ENFORCE_GPU_SUCCESS( platform::dynload::miopenRestoreDropoutDescriptor( dropout_desc_, handle, dropout_prob_, dropout_state_data, state_size, 0, false, false, MIOPEN_RNG_PSEUDO_XORWOW)); } - PADDLE_ENFORCE_CUDA_SUCCESS( + PADDLE_ENFORCE_GPU_SUCCESS( platform::dynload::miopenCreateRNNDescriptor(&rnn_desc_)); - PADDLE_ENFORCE_CUDA_SUCCESS(platform::dynload::miopenSetRNNDescriptor( + PADDLE_ENFORCE_GPU_SUCCESS(platform::dynload::miopenSetRNNDescriptor( rnn_desc_, hidden_size_, num_layers_, miopenRNNlinear, is_bidirec_ ? miopenRNNbidirection : miopenRNNunidirection, miopenLSTM, miopenRNNNoBias, miopenRNNdefault, miopen_type)); - PADDLE_ENFORCE_CUDA_SUCCESS( + PADDLE_ENFORCE_GPU_SUCCESS( platform::dynload::miopenCreateTensorDescriptor(&w_desc_)); - PADDLE_ENFORCE_CUDA_SUCCESS( + PADDLE_ENFORCE_GPU_SUCCESS( platform::dynload::miopenCreateTensorDescriptor(&dw_desc_)); - PADDLE_ENFORCE_CUDA_SUCCESS(platform::dynload::miopenGetRNNParamsSize( + PADDLE_ENFORCE_GPU_SUCCESS(platform::dynload::miopenGetRNNParamsSize( handle, rnn_desc_, x_desc_[0], &weights_size_, miopen_type)); PADDLE_ENFORCE_EQ( @@ -208,14 +208,14 @@ struct CudnnRNNCache { dim_s[1] = 1; dim_s[0] = dim_w[1]; - PADDLE_ENFORCE_CUDA_SUCCESS(platform::dynload::miopenSetTensorDescriptor( + PADDLE_ENFORCE_GPU_SUCCESS(platform::dynload::miopenSetTensorDescriptor( w_desc_, miopen_type, 3, dim_w, dim_s)); - PADDLE_ENFORCE_CUDA_SUCCESS(platform::dynload::miopenSetTensorDescriptor( + PADDLE_ENFORCE_GPU_SUCCESS(platform::dynload::miopenSetTensorDescriptor( dw_desc_, miopen_type, 3, dim_w, dim_s)); - PADDLE_ENFORCE_CUDA_SUCCESS(platform::dynload::miopenGetRNNWorkspaceSize( + PADDLE_ENFORCE_GPU_SUCCESS(platform::dynload::miopenGetRNNWorkspaceSize( handle, rnn_desc_, seq_length_, x_desc_, &workspace_size_)); - PADDLE_ENFORCE_CUDA_SUCCESS( + PADDLE_ENFORCE_GPU_SUCCESS( platform::dynload::miopenGetRNNTrainingReserveSize( handle, rnn_desc_, seq_length_, x_desc_, reserve_size_)); @@ -225,40 +225,40 @@ struct CudnnRNNCache { void release() { for (size_t i = 0; i < seq_length_; ++i) { - PADDLE_ENFORCE_CUDA_SUCCESS( + PADDLE_ENFORCE_GPU_SUCCESS( platform::dynload::miopenDestroyTensorDescriptor(x_desc_[i])); - PADDLE_ENFORCE_CUDA_SUCCESS( + PADDLE_ENFORCE_GPU_SUCCESS( platform::dynload::miopenDestroyTensorDescriptor(y_desc_[i])); } delete[] x_desc_; delete[] y_desc_; - PADDLE_ENFORCE_CUDA_SUCCESS( + PADDLE_ENFORCE_GPU_SUCCESS( platform::dynload::miopenDestroyTensorDescriptor(hx_desc_)); - PADDLE_ENFORCE_CUDA_SUCCESS( + PADDLE_ENFORCE_GPU_SUCCESS( platform::dynload::miopenDestroyTensorDescriptor(cx_desc_)); - PADDLE_ENFORCE_CUDA_SUCCESS( + PADDLE_ENFORCE_GPU_SUCCESS( platform::dynload::miopenDestroyTensorDescriptor(hy_desc_)); - PADDLE_ENFORCE_CUDA_SUCCESS( + PADDLE_ENFORCE_GPU_SUCCESS( platform::dynload::miopenDestroyTensorDescriptor(cy_desc_)); - PADDLE_ENFORCE_CUDA_SUCCESS( + PADDLE_ENFORCE_GPU_SUCCESS( platform::dynload::miopenDestroyTensorDescriptor(dhx_desc_)); - PADDLE_ENFORCE_CUDA_SUCCESS( + PADDLE_ENFORCE_GPU_SUCCESS( platform::dynload::miopenDestroyTensorDescriptor(dcx_desc_)); - PADDLE_ENFORCE_CUDA_SUCCESS( + PADDLE_ENFORCE_GPU_SUCCESS( platform::dynload::miopenDestroyTensorDescriptor(dhy_desc_)); - PADDLE_ENFORCE_CUDA_SUCCESS( + PADDLE_ENFORCE_GPU_SUCCESS( platform::dynload::miopenDestroyTensorDescriptor(dcy_desc_)); - PADDLE_ENFORCE_CUDA_SUCCESS( + PADDLE_ENFORCE_GPU_SUCCESS( platform::dynload::miopenDestroyDropoutDescriptor(dropout_desc_)); - PADDLE_ENFORCE_CUDA_SUCCESS( + PADDLE_ENFORCE_GPU_SUCCESS( platform::dynload::miopenDestroyRNNDescriptor(rnn_desc_)); - PADDLE_ENFORCE_CUDA_SUCCESS( + PADDLE_ENFORCE_GPU_SUCCESS( platform::dynload::miopenDestroyTensorDescriptor(w_desc_)); - PADDLE_ENFORCE_CUDA_SUCCESS( + PADDLE_ENFORCE_GPU_SUCCESS( platform::dynload::miopenDestroyTensorDescriptor(dw_desc_)); } }; diff --git a/paddle/fluid/operators/mish_op.cu b/paddle/fluid/operators/mish_op.cu index 6513e5d95e4acc9884a824743cbd2faedd223d41..4ca07b650c80a6881c2d48b10a27171f957a5b98 100644 --- a/paddle/fluid/operators/mish_op.cu +++ b/paddle/fluid/operators/mish_op.cu @@ -11,8 +11,8 @@ limitations under the License. */ #include "paddle/fluid/framework/op_registry.h" #include "paddle/fluid/operators/mish_op.h" -#include "paddle/fluid/platform/cuda_primitives.h" -#include "paddle/fluid/platform/gpu_launch_config.h" +#include "paddle/fluid/platform/device/gpu/gpu_launch_config.h" +#include "paddle/fluid/platform/device/gpu/gpu_primitives.h" namespace paddle { namespace operators { diff --git a/paddle/fluid/operators/mv_op.cu b/paddle/fluid/operators/mv_op.cu index ee638ede22b640b36df56fd67d48aa7adc9bfb3d..cec17f1324313461492768acc32f90167cee0123 100644 --- a/paddle/fluid/operators/mv_op.cu +++ b/paddle/fluid/operators/mv_op.cu @@ -13,7 +13,7 @@ See the License for the specific language governing permissions and limitations under the License. */ #include "paddle/fluid/operators/mv_op.h" -#include "paddle/fluid/platform/gpu_launch_config.h" +#include "paddle/fluid/platform/device/gpu/gpu_launch_config.h" namespace paddle { namespace operators { diff --git a/paddle/fluid/operators/nccl/nccl_gpu_common.cc b/paddle/fluid/operators/nccl/nccl_gpu_common.cc index 169af47e95acdcc9450a892cd1dddc0d3ec9de1e..bcbc96ea1b6d1041e43498c7c6e32ec1be2029f3 100644 --- a/paddle/fluid/operators/nccl/nccl_gpu_common.cc +++ b/paddle/fluid/operators/nccl/nccl_gpu_common.cc @@ -50,7 +50,7 @@ void Communicator::InitAll(const std::vector& gpus) { for (size_t i = 0; i < gpus.size(); ++i) { (*comm_id_map)[gpus[i]] = i; } - PADDLE_ENFORCE_CUDA_SUCCESS( + PADDLE_ENFORCE_GPU_SUCCESS( dynload::ncclCommInitAll(global_comms->data(), gpus.size(), gpus.data())); inited = true; } diff --git a/paddle/fluid/operators/nccl/nccl_op.cu.cc b/paddle/fluid/operators/nccl/nccl_op.cu.cc index 9a4a036077f58784a53d029eeccbc89972f7bbed..f319ce159f6dd013ef76ae1deaf2486a5ee9a8b2 100644 --- a/paddle/fluid/operators/nccl/nccl_op.cu.cc +++ b/paddle/fluid/operators/nccl/nccl_op.cu.cc @@ -74,7 +74,7 @@ class NCCLAllReduceKernel : public framework::OpKernel { VLOG(3) << "gpu : " << " invoke allreduce. send " << x->numel() << " recv " << out->numel(); - PADDLE_ENFORCE_CUDA_SUCCESS(platform::dynload::ncclAllReduce( + PADDLE_ENFORCE_GPU_SUCCESS(platform::dynload::ncclAllReduce( x->data(), out->mutable_data(ctx.GetPlace()), out->numel(), NCCLTypeWrapper::type, reduction_op_, comm->comms().at(idx), ctx.cuda_device_context().stream())); @@ -111,7 +111,7 @@ class NCCLReduceKernel : public framework::OpKernel { } VLOG(3) << "gpu : " << gpu_id << " invoke reduce. send " << x->numel() << " recv " << out->numel(); - PADDLE_ENFORCE_CUDA_SUCCESS(platform::dynload::ncclReduce( + PADDLE_ENFORCE_GPU_SUCCESS(platform::dynload::ncclReduce( x->data(), recvbuffer, x->numel(), NCCLTypeWrapper::type, reduction_op_, root, comm->comms().at(idx), ctx.cuda_device_context().stream())); @@ -136,7 +136,7 @@ class NCCLBcastKernel : public framework::OpKernel { if (idx == root) { auto* x = ctx.Input("X"); VLOG(3) << "gpu : " << gpu_id << " invoke Bcast. send " << x->numel(); - PADDLE_ENFORCE_CUDA_SUCCESS(platform::dynload::ncclBcast( + PADDLE_ENFORCE_GPU_SUCCESS(platform::dynload::ncclBcast( reinterpret_cast(const_cast(x->data())), x->numel(), NCCLTypeWrapper::type, root, comm->comms().at(idx), ctx.cuda_device_context().stream())); @@ -145,7 +145,7 @@ class NCCLBcastKernel : public framework::OpKernel { auto* out = ctx.Output("Out"); VLOG(3) << "gpu : " << gpu_id << " invoke Bcast. recv buffer " << framework::product(out->dims()); - PADDLE_ENFORCE_CUDA_SUCCESS(platform::dynload::ncclBcast( + PADDLE_ENFORCE_GPU_SUCCESS(platform::dynload::ncclBcast( out->mutable_data(ctx.GetPlace()), out->numel(), NCCLTypeWrapper::type, root, comm->comms().at(idx), ctx.cuda_device_context().stream())); diff --git a/paddle/fluid/operators/nccl/nccl_op_test.cu.cc b/paddle/fluid/operators/nccl/nccl_op_test.cu.cc index 6c7fba8d4ac78927946764512045815a72e15dcc..41c1b4d7a8f815a07e14751b90547d5e6271c266 100644 --- a/paddle/fluid/operators/nccl/nccl_op_test.cu.cc +++ b/paddle/fluid/operators/nccl/nccl_op_test.cu.cc @@ -23,9 +23,9 @@ limitations under the License. */ #include "paddle/fluid/framework/op_registry.h" #include "paddle/fluid/framework/program_desc.h" #include "paddle/fluid/operators/nccl/nccl_gpu_common.h" +#include "paddle/fluid/platform/device/gpu/gpu_info.h" #include "paddle/fluid/platform/device_context.h" #include "paddle/fluid/platform/enforce.h" -#include "paddle/fluid/platform/gpu_info.h" #include "paddle/fluid/platform/init.h" #include "paddle/fluid/platform/place.h" @@ -44,7 +44,7 @@ const f::DDim kDims = {20, 20}; class NCCLTester : public ::testing::Test { public: void SetUp() override { - int count = p::GetCUDADeviceCount(); + int count = p::GetGPUDeviceCount(); if (count <= 0) { LOG(WARNING) << "Cannot test gpu nccl, because the CUDA device count is " << count; diff --git a/paddle/fluid/operators/nll_loss_op.cu b/paddle/fluid/operators/nll_loss_op.cu index b6e7cd256e18d46fc2cab11c5816be185e6d65d7..03af45634149df94a2547b9df7653d81f9b6be13 100644 --- a/paddle/fluid/operators/nll_loss_op.cu +++ b/paddle/fluid/operators/nll_loss_op.cu @@ -13,7 +13,7 @@ limitations under the License. */ #include #include "paddle/fluid/operators/math.h" #include "paddle/fluid/operators/nll_loss_op.h" -#include "paddle/fluid/platform/cuda_primitives.h" +#include "paddle/fluid/platform/device/gpu/gpu_primitives.h" #include "paddle/fluid/platform/hostdevice.h" namespace paddle { diff --git a/paddle/fluid/operators/norm_utils.cu.h b/paddle/fluid/operators/norm_utils.cu.h index 843736833f81563af6fbda18ca684bf97116ec72..241c634e3fc98a7c157aeeb3811c06772d355c32 100644 --- a/paddle/fluid/operators/norm_utils.cu.h +++ b/paddle/fluid/operators/norm_utils.cu.h @@ -26,11 +26,7 @@ namespace cub = hipcub; #endif #include "paddle/fluid/framework/data_layout.h" #include "paddle/fluid/operators/math/math_function.h" -#ifdef PADDLE_WITH_HIP -#include "paddle/fluid/platform/miopen_helper.h" -#else -#include "paddle/fluid/platform/cudnn_helper.h" -#endif +#include "paddle/fluid/platform/device/gpu/gpu_dnn.h" #ifdef __HIPCC__ #define LAUNCH_BOUNDS(BlockDim) __launch_bounds__(BlockDim) diff --git a/paddle/fluid/operators/one_hot_op.cu b/paddle/fluid/operators/one_hot_op.cu index bffd1d5305127cab29f39ac1f51566325ab20612..3da7a3afcc93dc90b8f5a17d3de1fe52478079d4 100644 --- a/paddle/fluid/operators/one_hot_op.cu +++ b/paddle/fluid/operators/one_hot_op.cu @@ -13,8 +13,8 @@ // limitations under the License. #include "paddle/fluid/operators/one_hot_op.h" -#include "paddle/fluid/platform/cuda_primitives.h" -#include "paddle/fluid/platform/gpu_info.h" +#include "paddle/fluid/platform/device/gpu/gpu_info.h" +#include "paddle/fluid/platform/device/gpu/gpu_primitives.h" namespace paddle { namespace operators { diff --git a/paddle/fluid/operators/one_hot_v2_op.cu b/paddle/fluid/operators/one_hot_v2_op.cu index 2366f1422244e34deaf9ba019eba1fde2620f7ad..22eb6c81845d15701742aeb578fe2e85114a8cc3 100644 --- a/paddle/fluid/operators/one_hot_v2_op.cu +++ b/paddle/fluid/operators/one_hot_v2_op.cu @@ -13,8 +13,8 @@ // limitations under the License. #include "paddle/fluid/operators/one_hot_v2_op.h" -#include "paddle/fluid/platform/cuda_primitives.h" -#include "paddle/fluid/platform/gpu_info.h" +#include "paddle/fluid/platform/device/gpu/gpu_info.h" +#include "paddle/fluid/platform/device/gpu/gpu_primitives.h" namespace paddle { namespace operators { diff --git a/paddle/fluid/operators/optimizers/adagrad_op.cu b/paddle/fluid/operators/optimizers/adagrad_op.cu index 5043468d4c5f721ae0906b1a319eb3ec10b26580..8b939b7c6b3ba275ef050abc08636ea9c8740621 100644 --- a/paddle/fluid/operators/optimizers/adagrad_op.cu +++ b/paddle/fluid/operators/optimizers/adagrad_op.cu @@ -14,7 +14,7 @@ limitations under the License. */ #include "paddle/fluid/operators/math/math_function.h" #include "paddle/fluid/operators/math/selected_rows_functor.h" #include "paddle/fluid/operators/optimizers/adagrad_op.h" -#include "paddle/fluid/platform/cuda_primitives.h" +#include "paddle/fluid/platform/device/gpu/gpu_primitives.h" namespace paddle { namespace operators { diff --git a/paddle/fluid/operators/optimizers/sgd_op.cu b/paddle/fluid/operators/optimizers/sgd_op.cu index a5d9ad271f23a4d6a17f07a6c5b6a7d57aa7a3c5..3582e939f30ac7e19d430cb6863182019acccdb1 100644 --- a/paddle/fluid/operators/optimizers/sgd_op.cu +++ b/paddle/fluid/operators/optimizers/sgd_op.cu @@ -14,7 +14,7 @@ limitations under the License. */ #include #include "paddle/fluid/operators/optimizers/sgd_op.h" -#include "paddle/fluid/platform/cuda_primitives.h" +#include "paddle/fluid/platform/device/gpu/gpu_primitives.h" namespace paddle { namespace operators { diff --git a/paddle/fluid/operators/optimizers/sparse_momentum_op.h b/paddle/fluid/operators/optimizers/sparse_momentum_op.h index f1516320ec5733a4ce3b1f7ccabea3409fc05bb7..23e37ea27b54f704fb25e2455545912a7ea78960 100644 --- a/paddle/fluid/operators/optimizers/sparse_momentum_op.h +++ b/paddle/fluid/operators/optimizers/sparse_momentum_op.h @@ -445,12 +445,12 @@ class SparseMomentumOpKernel : public framework::OpKernel { for_range_index(range_functor); size_t temp_storage_bytes = 0; - PADDLE_ENFORCE_CUDA_SUCCESS( + PADDLE_ENFORCE_GPU_SUCCESS( (cub::DeviceRadixSort::SortPairs( nullptr, temp_storage_bytes, nullptr, nullptr, nullptr, nullptr, static_cast(num_index)))); auto d_temp_storage = memory::Alloc(ctx.GetPlace(), temp_storage_bytes); - PADDLE_ENFORCE_CUDA_SUCCESS( + PADDLE_ENFORCE_GPU_SUCCESS( (cub::DeviceRadixSort::SortPairs( d_temp_storage->ptr(), temp_storage_bytes, index->data(), sorted_index_ptr, sort_value_ptr, grad_index_ptr, diff --git a/paddle/fluid/operators/pad2d_op.cu b/paddle/fluid/operators/pad2d_op.cu index a77d0a5650ef3271ce0f5a46e0e5c6d2e1fef37d..a854fa6091ab4cbe8bc4d25709cf770d11ea4f67 100644 --- a/paddle/fluid/operators/pad2d_op.cu +++ b/paddle/fluid/operators/pad2d_op.cu @@ -15,8 +15,8 @@ limitations under the License. */ #include #include "paddle/fluid/framework/op_registry.h" #include "paddle/fluid/operators/math/math_function.h" -#include "paddle/fluid/platform/cuda_primitives.h" -#include "paddle/fluid/platform/gpu_info.h" +#include "paddle/fluid/platform/device/gpu/gpu_info.h" +#include "paddle/fluid/platform/device/gpu/gpu_primitives.h" namespace paddle { namespace operators { diff --git a/paddle/fluid/operators/pad3d_op.cu b/paddle/fluid/operators/pad3d_op.cu index f243a78e5578bb4f00c929733ff3ba3dc28d847f..1567251236550d296ea2b7852d7cb9f7f2379164 100644 --- a/paddle/fluid/operators/pad3d_op.cu +++ b/paddle/fluid/operators/pad3d_op.cu @@ -15,8 +15,8 @@ limitations under the License. */ #include #include "paddle/fluid/framework/op_registry.h" #include "paddle/fluid/operators/math/math_function.h" -#include "paddle/fluid/platform/cuda_primitives.h" -#include "paddle/fluid/platform/gpu_info.h" +#include "paddle/fluid/platform/device/gpu/gpu_info.h" +#include "paddle/fluid/platform/device/gpu/gpu_primitives.h" namespace paddle { namespace operators { diff --git a/paddle/fluid/operators/pool_cudnn_op.cu.cc b/paddle/fluid/operators/pool_cudnn_op.cu.cc index 8fcd40a9a2df40ce2af5fb98421844e892c8d3b1..bbe31740129478f90d4a1398835c0b236d3bbb1e 100644 --- a/paddle/fluid/operators/pool_cudnn_op.cu.cc +++ b/paddle/fluid/operators/pool_cudnn_op.cu.cc @@ -16,14 +16,11 @@ limitations under the License. */ #include "paddle/fluid/framework/op_registry.h" #include "paddle/fluid/operators/math/math_function.h" #include "paddle/fluid/operators/pool_op.h" -#ifdef PADDLE_WITH_CUDA -#include "paddle/fluid/platform/cudnn_helper.h" -#endif #ifdef PADDLE_WITH_HIP #include "paddle/fluid/framework/data_type.h" #include "paddle/fluid/framework/operator.h" -#include "paddle/fluid/platform/miopen_helper.h" #endif +#include "paddle/fluid/platform/device/gpu/gpu_dnn.h" namespace paddle { namespace operators { @@ -204,17 +201,17 @@ class PoolCUDNNOpKernel : public framework::OpKernel { #ifdef PADDLE_WITH_HIP char *pool_workspace; size_t pool_worksize = 0; - PADDLE_ENFORCE_CUDA_SUCCESS( + PADDLE_ENFORCE_GPU_SUCCESS( platform::dynload::miopenPoolingGetWorkSpaceSizeV2( cudnn_pool_desc, cudnn_output_desc, &pool_worksize)); - PADDLE_ENFORCE_CUDA_SUCCESS(hipMalloc(&pool_workspace, pool_worksize)); - PADDLE_ENFORCE_CUDA_SUCCESS(platform::dynload::miopenPoolingForward( + PADDLE_ENFORCE_GPU_SUCCESS(hipMalloc(&pool_workspace, pool_worksize)); + PADDLE_ENFORCE_GPU_SUCCESS(platform::dynload::miopenPoolingForward( handle, cudnn_pool_desc, &alpha, cudnn_input_desc, tranformed_input_data, &beta, cudnn_output_desc, tranformed_output_data, false, pool_workspace, pool_worksize)); - PADDLE_ENFORCE_CUDA_SUCCESS(hipFree(pool_workspace)); + PADDLE_ENFORCE_GPU_SUCCESS(hipFree(pool_workspace)); #else - PADDLE_ENFORCE_CUDA_SUCCESS(platform::dynload::cudnnPoolingForward( + PADDLE_ENFORCE_GPU_SUCCESS(platform::dynload::cudnnPoolingForward( handle, cudnn_pool_desc, &alpha, cudnn_input_desc, tranformed_input_data, &beta, cudnn_output_desc, tranformed_output_data)); @@ -468,17 +465,17 @@ class PoolCUDNNGradOpKernel : public framework::OpKernel { #ifdef PADDLE_WITH_HIP char *pool_workspace; size_t pool_worksize = 0; - PADDLE_ENFORCE_CUDA_SUCCESS( + PADDLE_ENFORCE_GPU_SUCCESS( platform::dynload::miopenPoolingGetWorkSpaceSizeV2( cudnn_pool_desc, cudnn_output_desc, &pool_worksize)); - PADDLE_ENFORCE_CUDA_SUCCESS(hipMalloc(&pool_workspace, pool_worksize)); - PADDLE_ENFORCE_CUDA_SUCCESS(platform::dynload::miopenPoolingBackward( + PADDLE_ENFORCE_GPU_SUCCESS(hipMalloc(&pool_workspace, pool_worksize)); + PADDLE_ENFORCE_GPU_SUCCESS(platform::dynload::miopenPoolingBackward( handle, cudnn_pool_desc, &alpha, cudnn_output_desc, output_data, cudnn_output_desc, output_grad_data, cudnn_input_desc, input_data, &beta, cudnn_input_desc, input_grad_data, pool_workspace)); - PADDLE_ENFORCE_CUDA_SUCCESS(hipFree(pool_workspace)); + PADDLE_ENFORCE_GPU_SUCCESS(hipFree(pool_workspace)); #else - PADDLE_ENFORCE_CUDA_SUCCESS(platform::dynload::cudnnPoolingBackward( + PADDLE_ENFORCE_GPU_SUCCESS(platform::dynload::cudnnPoolingBackward( handle, cudnn_pool_desc, &alpha, cudnn_output_desc, output_data, cudnn_output_desc, output_grad_data, cudnn_input_desc, input_data, &beta, cudnn_input_desc, input_grad_data)); diff --git a/paddle/fluid/operators/pool_op.cc b/paddle/fluid/operators/pool_op.cc index 9d8f086ce0f187ba335885986f797a5c7775b9bf..fa98e76e39338f6e8585159a25db4086f24326a6 100644 --- a/paddle/fluid/operators/pool_op.cc +++ b/paddle/fluid/operators/pool_op.cc @@ -15,12 +15,7 @@ limitations under the License. */ #include "paddle/fluid/operators/pool_op.h" #include -#ifdef PADDLE_WITH_CUDA -#include "paddle/fluid/platform/cudnn_helper.h" -#endif -#ifdef PADDLE_WITH_HIP -#include "paddle/fluid/platform/miopen_helper.h" -#endif +#include "paddle/fluid/platform/device/gpu/gpu_dnn.h" #ifdef PADDLE_WITH_MKLDNN #include "paddle/fluid/platform/mkldnn_helper.h" #endif diff --git a/paddle/fluid/operators/prelu_op.cu b/paddle/fluid/operators/prelu_op.cu index ce3f5969cef49d183fb6fc3115fe564c34ce804c..06cc9ed7a96e53586d06209db46652c43bbe2601 100644 --- a/paddle/fluid/operators/prelu_op.cu +++ b/paddle/fluid/operators/prelu_op.cu @@ -16,7 +16,7 @@ limitations under the License. */ #include "paddle/fluid/operators/math/prelu.h" #include "paddle/fluid/operators/prelu_op.h" #include "paddle/fluid/operators/reduce_ops/cub_reduce.h" -#include "paddle/fluid/platform/cuda_primitives.h" +#include "paddle/fluid/platform/device/gpu/gpu_primitives.h" namespace paddle { namespace operators { diff --git a/paddle/fluid/operators/prroi_pool_op.h b/paddle/fluid/operators/prroi_pool_op.h index f9e2b78d5d31a2a24ae22e29c96efcf1d778030e..38f8d6542ac32c689a009528c2b2123ed8fc3f90 100644 --- a/paddle/fluid/operators/prroi_pool_op.h +++ b/paddle/fluid/operators/prroi_pool_op.h @@ -17,7 +17,7 @@ limitations under the License. */ #include "paddle/fluid/framework/op_registry.h" #include "paddle/fluid/operators/math/math_function.h" #if defined(__NVCC__) || defined(__HIPCC__) -#include "paddle/fluid/platform/cuda_primitives.h" +#include "paddle/fluid/platform/device/gpu/gpu_primitives.h" #endif namespace paddle { diff --git a/paddle/fluid/operators/psroi_pool_op.cu b/paddle/fluid/operators/psroi_pool_op.cu index f69edfc1fcfec947e4e8a33bd3af45b36865f2d3..5a0d1a700417cb1e2c56ee843bd6fc225e53c62e 100644 --- a/paddle/fluid/operators/psroi_pool_op.cu +++ b/paddle/fluid/operators/psroi_pool_op.cu @@ -13,7 +13,7 @@ See the License for the specific language governing permissions and limitations under the License. */ #include "paddle/fluid/operators/psroi_pool_op.h" -#include "paddle/fluid/platform/cuda_primitives.h" +#include "paddle/fluid/platform/device/gpu/gpu_primitives.h" namespace paddle { namespace operators { diff --git a/paddle/fluid/operators/pull_box_extended_sparse_op.cu b/paddle/fluid/operators/pull_box_extended_sparse_op.cu index 5bde6bc2e5cbbd332847cb868806ca44616c40e1..26a02ea622479f3c44e4b5b1c8fd4095a77c8576 100644 --- a/paddle/fluid/operators/pull_box_extended_sparse_op.cu +++ b/paddle/fluid/operators/pull_box_extended_sparse_op.cu @@ -13,8 +13,8 @@ // limitations under the License. #include "paddle/fluid/operators/pull_box_extended_sparse_op.h" -#include "paddle/fluid/platform/cuda_primitives.h" -#include "paddle/fluid/platform/gpu_info.h" +#include "paddle/fluid/platform/device/gpu/gpu_info.h" +#include "paddle/fluid/platform/device/gpu/gpu_primitives.h" namespace paddle { namespace operators { diff --git a/paddle/fluid/operators/pull_box_sparse_op.cu b/paddle/fluid/operators/pull_box_sparse_op.cu index 8bba9db5426b7055dce03ee2f5e87c11a38aef1b..96a1b1c08b79c2b564a701484bf06cf3d9c3a2bc 100644 --- a/paddle/fluid/operators/pull_box_sparse_op.cu +++ b/paddle/fluid/operators/pull_box_sparse_op.cu @@ -13,8 +13,8 @@ // limitations under the License. #include "paddle/fluid/operators/pull_box_sparse_op.h" -#include "paddle/fluid/platform/cuda_primitives.h" -#include "paddle/fluid/platform/gpu_info.h" +#include "paddle/fluid/platform/device/gpu/gpu_info.h" +#include "paddle/fluid/platform/device/gpu/gpu_primitives.h" namespace paddle { namespace operators { diff --git a/paddle/fluid/operators/qr_op.cu b/paddle/fluid/operators/qr_op.cu index 992df172ace0c7404e80e98ac1bd2ec37d60dc17..3eb5f72b5b117edd6a9b8a0f9e883fd57fc5b76a 100644 --- a/paddle/fluid/operators/qr_op.cu +++ b/paddle/fluid/operators/qr_op.cu @@ -167,7 +167,7 @@ void QrGPUKernel::BatchedGeqrf( int lwork = 0; auto handle = dev_ctx.cusolver_dn_handle(); - PADDLE_ENFORCE_CUDA_SUCCESS(platform::dynload::cusolverDnSgeqrf_bufferSize( + PADDLE_ENFORCE_GPU_SUCCESS(platform::dynload::cusolverDnSgeqrf_bufferSize( handle, m, n, a, lda, &lwork)); auto workspace = memory::Alloc(dev_ctx, lwork * sizeof(float)); float* workspace_ptr = reinterpret_cast(workspace->ptr()); @@ -178,7 +178,7 @@ void QrGPUKernel::BatchedGeqrf( float* a_working_ptr = &a[i * a_stride]; float* tau_working_ptr = &tau[i * tau_stride]; // compute geqrf - PADDLE_ENFORCE_CUDA_SUCCESS(platform::dynload::cusolverDnSgeqrf( + PADDLE_ENFORCE_GPU_SUCCESS(platform::dynload::cusolverDnSgeqrf( handle, m, n, a_working_ptr, lda, tau_working_ptr, workspace_ptr, lwork, info_d)); // Do we need synchronized here? @@ -201,7 +201,7 @@ void QrGPUKernel::BatchedGeqrf( int lwork = 0; auto handle = dev_ctx.cusolver_dn_handle(); - PADDLE_ENFORCE_CUDA_SUCCESS(platform::dynload::cusolverDnDgeqrf_bufferSize( + PADDLE_ENFORCE_GPU_SUCCESS(platform::dynload::cusolverDnDgeqrf_bufferSize( handle, m, n, a, lda, &lwork)); auto workspace = memory::Alloc(dev_ctx, lwork * sizeof(double)); double* workspace_ptr = reinterpret_cast(workspace->ptr()); @@ -212,7 +212,7 @@ void QrGPUKernel::BatchedGeqrf( double* a_working_ptr = &a[i * a_stride]; double* tau_working_ptr = &tau[i * tau_stride]; // compute geqrf - PADDLE_ENFORCE_CUDA_SUCCESS(platform::dynload::cusolverDnDgeqrf( + PADDLE_ENFORCE_GPU_SUCCESS(platform::dynload::cusolverDnDgeqrf( handle, m, n, a_working_ptr, lda, tau_working_ptr, workspace_ptr, lwork, info_d)); // Do we need synchronized here? @@ -235,7 +235,7 @@ void QrGPUKernel::BatchedOrgqr( int lwork = 0; auto handle = dev_ctx.cusolver_dn_handle(); - PADDLE_ENFORCE_CUDA_SUCCESS(platform::dynload::cusolverDnSorgqr_bufferSize( + PADDLE_ENFORCE_GPU_SUCCESS(platform::dynload::cusolverDnSorgqr_bufferSize( handle, m, n, k, a, lda, tau, &lwork)); auto workspace = memory::Alloc(dev_ctx, lwork * sizeof(float)); float* workspace_ptr = reinterpret_cast(workspace->ptr()); @@ -246,7 +246,7 @@ void QrGPUKernel::BatchedOrgqr( float* a_working_ptr = &a[i * a_stride]; float* tau_working_ptr = &tau[i * tau_stride]; // compute orggr - PADDLE_ENFORCE_CUDA_SUCCESS(platform::dynload::cusolverDnSorgqr( + PADDLE_ENFORCE_GPU_SUCCESS(platform::dynload::cusolverDnSorgqr( handle, m, n, k, a_working_ptr, lda, tau_working_ptr, workspace_ptr, lwork, info_d)); // Do we need synchronized here? @@ -270,7 +270,7 @@ void QrGPUKernel::BatchedOrgqr( int lwork = 0; auto handle = dev_ctx.cusolver_dn_handle(); - PADDLE_ENFORCE_CUDA_SUCCESS(platform::dynload::cusolverDnDorgqr_bufferSize( + PADDLE_ENFORCE_GPU_SUCCESS(platform::dynload::cusolverDnDorgqr_bufferSize( handle, m, n, k, a, lda, tau, &lwork)); auto workspace = memory::Alloc(dev_ctx, lwork * sizeof(double)); double* workspace_ptr = reinterpret_cast(workspace->ptr()); @@ -281,7 +281,7 @@ void QrGPUKernel::BatchedOrgqr( double* a_working_ptr = &a[i * a_stride]; double* tau_working_ptr = &tau[i * tau_stride]; // compute orggr - PADDLE_ENFORCE_CUDA_SUCCESS(platform::dynload::cusolverDnDorgqr( + PADDLE_ENFORCE_GPU_SUCCESS(platform::dynload::cusolverDnDorgqr( handle, m, n, k, a_working_ptr, lda, tau_working_ptr, workspace_ptr, lwork, info_d)); // Do we need synchronized here? diff --git a/paddle/fluid/operators/range_op.cu b/paddle/fluid/operators/range_op.cu index 6250d68730e138f30cf14d664e51bbe7a506dbc2..23a0f2d0a24e38794fb9e406b068358b0ebff106 100644 --- a/paddle/fluid/operators/range_op.cu +++ b/paddle/fluid/operators/range_op.cu @@ -16,7 +16,7 @@ limitations under the License. */ #include "paddle/fluid/framework/op_registry.h" #include "paddle/fluid/operators/range_op.h" #include "paddle/fluid/operators/utils.h" -#include "paddle/fluid/platform/cuda_primitives.h" +#include "paddle/fluid/platform/device/gpu/gpu_primitives.h" namespace paddle { namespace operators { diff --git a/paddle/fluid/operators/rank_attention_op.cu b/paddle/fluid/operators/rank_attention_op.cu index aaa4eec7c1bf321be6c8ea10f051e66baa0e3817..23b4475e1f7c18568f871469037acfd9c72b12b2 100644 --- a/paddle/fluid/operators/rank_attention_op.cu +++ b/paddle/fluid/operators/rank_attention_op.cu @@ -17,8 +17,8 @@ limitations under the License. */ #include "paddle/fluid/operators/math/blas.h" #include "paddle/fluid/operators/rank_attention.cu.h" #include "paddle/fluid/operators/rank_attention_op.h" -#include "paddle/fluid/platform/cuda_primitives.h" -#include "paddle/fluid/platform/gpu_info.h" +#include "paddle/fluid/platform/device/gpu/gpu_info.h" +#include "paddle/fluid/platform/device/gpu/gpu_primitives.h" namespace paddle { namespace operators { diff --git a/paddle/fluid/operators/reader/buffered_reader.cc b/paddle/fluid/operators/reader/buffered_reader.cc index 58af6309e3d28bfdd8fc09e174dfd542c5cea448..6c28daa7eac72904520f611b7caa6d53da5f64c2 100644 --- a/paddle/fluid/operators/reader/buffered_reader.cc +++ b/paddle/fluid/operators/reader/buffered_reader.cc @@ -161,14 +161,14 @@ void BufferedReader::ReadAsync(size_t i) { platform::SetDeviceId( BOOST_GET_CONST(platform::CUDAPlace, place_).device); #ifdef PADDLE_WITH_HIP - PADDLE_ENFORCE_CUDA_SUCCESS( + PADDLE_ENFORCE_GPU_SUCCESS( hipEventRecord(events_[i].get(), compute_stream_)); - PADDLE_ENFORCE_CUDA_SUCCESS( + PADDLE_ENFORCE_GPU_SUCCESS( hipStreamWaitEvent(stream_.get(), events_[i].get(), 0)); #else - PADDLE_ENFORCE_CUDA_SUCCESS( + PADDLE_ENFORCE_GPU_SUCCESS( cudaEventRecord(events_[i].get(), compute_stream_)); - PADDLE_ENFORCE_CUDA_SUCCESS( + PADDLE_ENFORCE_GPU_SUCCESS( cudaStreamWaitEvent(stream_.get(), events_[i].get(), 0)); #endif @@ -199,19 +199,12 @@ void BufferedReader::ReadAsync(size_t i) { memory::Copy(BOOST_GET_CONST(platform::CUDAPlace, place_), gpu_ptr, cuda_pinned_place, cuda_pinned_ptr, size, stream_.get()); -#ifdef PADDLE_WITH_HIP - PADDLE_ENFORCE_CUDA_SUCCESS(hipStreamSynchronize(stream_.get())); -#else - PADDLE_ENFORCE_CUDA_SUCCESS(cudaStreamSynchronize(stream_.get())); -#endif + + platform::GpuStreamSync(stream_.get()); } cuda[i].set_lod(cpu[i].lod()); } -#ifdef PADDLE_WITH_HIP - PADDLE_ENFORCE_CUDA_SUCCESS(hipStreamSynchronize(stream_.get())); -#else - PADDLE_ENFORCE_CUDA_SUCCESS(cudaStreamSynchronize(stream_.get())); -#endif + platform::GpuStreamSync(stream_.get()); } } #endif diff --git a/paddle/fluid/operators/reader/buffered_reader.h b/paddle/fluid/operators/reader/buffered_reader.h index c433cac56a43103d5984a10ffcbaf26220a063ce..3d42486c6df8815aaab8e55e29898700bb74d953 100644 --- a/paddle/fluid/operators/reader/buffered_reader.h +++ b/paddle/fluid/operators/reader/buffered_reader.h @@ -22,8 +22,8 @@ #include "ThreadPool.h" #include "paddle/fluid/framework/reader.h" #if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP) -#include "paddle/fluid/platform/cuda_resource_pool.h" -#include "paddle/fluid/platform/gpu_info.h" +#include "paddle/fluid/platform/device/gpu/gpu_info.h" +#include "paddle/fluid/platform/device/gpu/gpu_resource_pool.h" #endif #ifdef PADDLE_WITH_ASCEND_CL #include "paddle/fluid/platform/device/npu/npu_info.h" diff --git a/paddle/fluid/operators/reduce_ops/reduce_op.cu.h b/paddle/fluid/operators/reduce_ops/reduce_op.cu.h index 6b3b4843200188f81f38826bb5991c5c62de872d..9c348477963b4e6223a18930b3758bd087609d9f 100644 --- a/paddle/fluid/operators/reduce_ops/reduce_op.cu.h +++ b/paddle/fluid/operators/reduce_ops/reduce_op.cu.h @@ -36,7 +36,8 @@ namespace cub = hipcub; #include "paddle/fluid/operators/amp/fp16_type_traits.h" #include "paddle/fluid/operators/cast_op.h" #include "paddle/fluid/operators/kernel_primitives/kernel_primitives.h" -#include "paddle/fluid/platform/cuda_device_function.h" +#include "paddle/fluid/platform/device/gpu/gpu_device_function.h" +#include "paddle/fluid/platform/device/gpu/gpu_info.h" #include "paddle/fluid/platform/fast_divmod.h" // Reduce split or not, Whether to use ReduceHigherDim @@ -464,9 +465,9 @@ struct ReduceConfig { reduce_num_per_thread = details::AlignUp(reduce_num, block_dim->y); } int device_id = platform::GetCurrentDeviceId(); - int max_mp = platform::GetCUDAMultiProcessors(device_id); + int max_mp = platform::GetGPUMultiProcessors(device_id); int max_threads_per_mp = - platform::GetCUDAMaxThreadsPerMultiProcessor(device_id); + platform::GetGPUMaxThreadsPerMultiProcessor(device_id); int max_threads = max_threads_per_mp * max_mp; int num_threads = block_dim->x * block_dim->y; int max_num_blocks = max_threads / num_threads; @@ -506,9 +507,9 @@ struct ReduceConfig { left_num = last_dim_num; grid_dim->z = grid_z; int device_id = platform::GetCurrentDeviceId(); - int max_mp = platform::GetCUDAMultiProcessors(device_id); + int max_mp = platform::GetGPUMultiProcessors(device_id); int max_threads_per_mp = - platform::GetCUDAMaxThreadsPerMultiProcessor(device_id); + platform::GetGPUMaxThreadsPerMultiProcessor(device_id); int max_threads = max_threads_per_mp * max_mp; // init int num_block = (max_threads / left_num); diff --git a/paddle/fluid/operators/rnn_op.cu.cc b/paddle/fluid/operators/rnn_op.cu.cc index 07329a9175e525cfb023737b340400e0400b5ff9..de4847ddc45903ec781825585b7827f16475ffa4 100644 --- a/paddle/fluid/operators/rnn_op.cu.cc +++ b/paddle/fluid/operators/rnn_op.cu.cc @@ -16,12 +16,7 @@ limitations under the License. */ #include "paddle/fluid/framework/op_registry.h" #include "paddle/fluid/operators/math/math_function.h" #include "paddle/fluid/operators/utils.h" -#ifdef PADDLE_WITH_CUDA -#include "paddle/fluid/platform/cudnn_helper.h" -#endif -#ifdef PADDLE_WITH_HIP -#include "paddle/fluid/platform/miopen_helper.h" -#endif +#include "paddle/fluid/platform/device/gpu/gpu_dnn.h" namespace paddle { namespace operators { @@ -97,12 +92,12 @@ class RNNDescriptors { bool is_initialized = dropout_state->IsInitialized(); if (!is_test_ && !is_initialized) { #ifdef PADDLE_WITH_HIP - PADDLE_ENFORCE_CUDA_SUCCESS( + PADDLE_ENFORCE_GPU_SUCCESS( platform::dynload::miopenDropoutGetStatesSize(handle, &state_size)); dropout_state->mutable_data({static_cast(state_size)}, place); #else - PADDLE_ENFORCE_CUDA_SUCCESS( + PADDLE_ENFORCE_GPU_SUCCESS( platform::dynload::cudnnDropoutGetStatesSize(handle, &state_size)); dropout_state->mutable_data({static_cast(state_size)}, place); @@ -114,19 +109,19 @@ class RNNDescriptors { // ------------------- cudnn rnn descriptors --------------------- #ifdef PADDLE_WITH_HIP - PADDLE_ENFORCE_CUDA_SUCCESS(platform::dynload::miopenSetRNNDescriptor_V2( + PADDLE_ENFORCE_GPU_SUCCESS(platform::dynload::miopenSetRNNDescriptor_V2( rnn_desc_.desc(), hidden_size_, num_layers_, dropout_desc_.desc(), miopenRNNlinear, is_bidirec_ ? miopenRNNbidirection : miopenRNNunidirection, mode_, miopenRNNwithBias, miopenRNNdefault, cudnn_type)); #elif CUDNN_VERSION >= 6000 - PADDLE_ENFORCE_CUDA_SUCCESS(platform::dynload::cudnnSetRNNDescriptor_v6( + PADDLE_ENFORCE_GPU_SUCCESS(platform::dynload::cudnnSetRNNDescriptor_v6( handle, rnn_desc_.desc(), hidden_size_, num_layers_, dropout_desc_.desc(), CUDNN_LINEAR_INPUT, is_bidirec_ ? CUDNN_BIDIRECTIONAL : CUDNN_UNIDIRECTIONAL, mode_, CUDNN_RNN_ALGO_STANDARD, cudnn_type)); #else - PADDLE_ENFORCE_CUDA_SUCCESS(platform::dynload::cudnnSetRNNDescriptor( + PADDLE_ENFORCE_GPU_SUCCESS(platform::dynload::cudnnSetRNNDescriptor( rnn_desc_.desc(), hidden_size_, num_layers_, dropout_desc_.desc(), CUDNN_LINEAR_INPUT, is_bidirec_ ? CUDNN_BIDIRECTIONAL : CUDNN_UNIDIRECTIONAL, mode_, @@ -135,7 +130,7 @@ class RNNDescriptors { #if defined(PADDLE_WITH_CUDA) && CUDNN_VERSION >= 7201 if (!sequence_length.empty()) { - PADDLE_ENFORCE_CUDA_SUCCESS(platform::dynload::cudnnSetRNNPaddingMode( + PADDLE_ENFORCE_GPU_SUCCESS(platform::dynload::cudnnSetRNNPaddingMode( rnn_desc_.desc(), CUDNN_RNN_PADDED_IO_ENABLED)); } #endif @@ -143,10 +138,10 @@ class RNNDescriptors { // ------------------- cudnn weights_size --------------------- size_t weights_size_; #ifdef PADDLE_WITH_HIP - PADDLE_ENFORCE_CUDA_SUCCESS(platform::dynload::miopenGetRNNParamsSize( + PADDLE_ENFORCE_GPU_SUCCESS(platform::dynload::miopenGetRNNParamsSize( handle, rnn_desc_.desc(), x_descs_[0], &weights_size_, cudnn_type)); #else - PADDLE_ENFORCE_CUDA_SUCCESS(platform::dynload::cudnnGetRNNParamsSize( + PADDLE_ENFORCE_GPU_SUCCESS(platform::dynload::cudnnGetRNNParamsSize( handle, rnn_desc_.desc(), x_descs_[0], &weights_size_, cudnn_type)); #endif PADDLE_ENFORCE_EQ( @@ -160,18 +155,18 @@ class RNNDescriptors { weight_desc_.descriptor(layout, dim_w); // ------------------- cudnn workspace, reserve size --------------------- #ifdef PADDLE_WITH_HIP - PADDLE_ENFORCE_CUDA_SUCCESS(platform::dynload::miopenGetRNNWorkspaceSize( + PADDLE_ENFORCE_GPU_SUCCESS(platform::dynload::miopenGetRNNWorkspaceSize( handle, rnn_desc_.desc(), seq_length_, x_descs_.data(), workspace_size)); - PADDLE_ENFORCE_CUDA_SUCCESS( + PADDLE_ENFORCE_GPU_SUCCESS( platform::dynload::miopenGetRNNTrainingReserveSize( handle, rnn_desc_.desc(), seq_length_, x_descs_.data(), reserve_size)); #else - PADDLE_ENFORCE_CUDA_SUCCESS(platform::dynload::cudnnGetRNNWorkspaceSize( + PADDLE_ENFORCE_GPU_SUCCESS(platform::dynload::cudnnGetRNNWorkspaceSize( handle, rnn_desc_.desc(), seq_length_, x_descs_.data(), workspace_size)); - PADDLE_ENFORCE_CUDA_SUCCESS( + PADDLE_ENFORCE_GPU_SUCCESS( platform::dynload::cudnnGetRNNTrainingReserveSize( handle, rnn_desc_.desc(), seq_length_, x_descs_.data(), reserve_size)); @@ -557,7 +552,7 @@ class RNNCudnnKernel : public framework::OpKernel { // for train // This interface is used when the input/output is unpadded. #ifdef PADDLE_WITH_HIP - PADDLE_ENFORCE_CUDA_SUCCESS(platform::dynload::miopenRNNForwardTraining( + PADDLE_ENFORCE_GPU_SUCCESS(platform::dynload::miopenRNNForwardTraining( handle, rnn.rnn_desc(), seq_length, rnn.x_descs(), x_data, rnn.init_h_desc(), init_h_data, rnn.init_c_desc(), init_c_data, rnn.weight_desc(), w_data, rnn.y_descs(), out_data, @@ -565,7 +560,7 @@ class RNNCudnnKernel : public framework::OpKernel { workspace_data_.data(), workspace_size, reserve_data, reserve_size)); #else - PADDLE_ENFORCE_CUDA_SUCCESS(platform::dynload::cudnnRNNForwardTraining( + PADDLE_ENFORCE_GPU_SUCCESS(platform::dynload::cudnnRNNForwardTraining( handle, rnn.rnn_desc(), seq_length, rnn.x_descs(), x_data, rnn.init_h_desc(), init_h_data, rnn.init_c_desc(), init_c_data, rnn.weight_desc(), w_data, rnn.y_descs(), out_data, @@ -577,15 +572,13 @@ class RNNCudnnKernel : public framework::OpKernel { #if defined(PADDLE_WITH_CUDA) && CUDNN_VERSION >= 7201 // for train // This interface is used when the input/output is padded. - PADDLE_ENFORCE_CUDA_SUCCESS( - platform::dynload::cudnnRNNForwardTrainingEx( - handle, rnn.rnn_desc(), rnn.x_seq_desc(), x_data, - rnn.init_h_desc(), init_h_data, rnn.init_c_desc(), init_c_data, - rnn.weight_desc(), w_data, rnn.y_seq_desc(), out_data, - rnn.last_h_desc(), last_h_data, rnn.last_c_desc(), last_c_data, - nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, - nullptr, workspace_data_.data(), workspace_size, - reserve_data, reserve_size)); + PADDLE_ENFORCE_GPU_SUCCESS(platform::dynload::cudnnRNNForwardTrainingEx( + handle, rnn.rnn_desc(), rnn.x_seq_desc(), x_data, rnn.init_h_desc(), + init_h_data, rnn.init_c_desc(), init_c_data, rnn.weight_desc(), + w_data, rnn.y_seq_desc(), out_data, rnn.last_h_desc(), last_h_data, + rnn.last_c_desc(), last_c_data, nullptr, nullptr, nullptr, nullptr, + nullptr, nullptr, nullptr, nullptr, workspace_data_.data(), + workspace_size, reserve_data, reserve_size)); #else PADDLE_THROW(platform::errors::Unavailable( "The padded input is supported by " @@ -606,14 +599,14 @@ class RNNCudnnKernel : public framework::OpKernel { // for inference // This interface is used when the input/output is unpadded. #ifdef PADDLE_WITH_HIP - PADDLE_ENFORCE_CUDA_SUCCESS(platform::dynload::miopenRNNForwardInference( + PADDLE_ENFORCE_GPU_SUCCESS(platform::dynload::miopenRNNForwardInference( handle, rnn->rnn_desc(), seq_length, rnn->x_descs(), x_data, rnn->init_h_desc(), init_h_data, rnn->init_c_desc(), init_c_data, rnn->weight_desc(), w_data, rnn->y_descs(), out_data, rnn->last_h_desc(), last_h_data, rnn->last_c_desc(), last_c_data, workspace_data->data(), workspace_size)); #else - PADDLE_ENFORCE_CUDA_SUCCESS(platform::dynload::cudnnRNNForwardInference( + PADDLE_ENFORCE_GPU_SUCCESS(platform::dynload::cudnnRNNForwardInference( handle, rnn->rnn_desc(), seq_length, rnn->x_descs(), x_data, rnn->init_h_desc(), init_h_data, rnn->init_c_desc(), init_c_data, rnn->weight_desc(), w_data, rnn->y_descs(), out_data, @@ -624,7 +617,7 @@ class RNNCudnnKernel : public framework::OpKernel { #if defined(PADDLE_WITH_CUDA) && CUDNN_VERSION >= 7201 // for inference // This interface is used when the input/output is padded. - PADDLE_ENFORCE_CUDA_SUCCESS(platform::dynload::cudnnRNNForwardInferenceEx( + PADDLE_ENFORCE_GPU_SUCCESS(platform::dynload::cudnnRNNForwardInferenceEx( handle, rnn->rnn_desc(), rnn->x_seq_desc(), x_data, rnn->init_h_desc(), init_h_data, rnn->init_c_desc(), init_c_data, rnn->weight_desc(), w_data, rnn->y_seq_desc(), out_data, @@ -831,7 +824,7 @@ class RNNGradCudnnKernel : public framework::OpKernel { if (!has_seq_length) { if (in_grad) { #ifdef PADDLE_WITH_HIP - PADDLE_ENFORCE_CUDA_SUCCESS(platform::dynload::miopenRNNBackwardData( + PADDLE_ENFORCE_GPU_SUCCESS(platform::dynload::miopenRNNBackwardData( handle, rnn.rnn_desc(), seq_length, rnn.y_descs(), out_data, rnn.y_descs(), out_grad_data, rnn.last_h_desc(), last_h_grad_data, rnn.last_c_desc(), last_c_grad_data, rnn.weight_desc(), weight_data, @@ -842,7 +835,7 @@ class RNNGradCudnnKernel : public framework::OpKernel { const_cast(reserve_data), reserve_size)); #else // This interface is used when the input/output is unpadded. - PADDLE_ENFORCE_CUDA_SUCCESS(platform::dynload::cudnnRNNBackwardData( + PADDLE_ENFORCE_GPU_SUCCESS(platform::dynload::cudnnRNNBackwardData( handle, rnn.rnn_desc(), seq_length, rnn.y_descs(), out_data, rnn.y_descs(), out_grad_data, rnn.last_h_desc(), last_h_grad_data, rnn.last_c_desc(), last_c_grad_data, rnn.weight_desc(), weight_data, @@ -855,7 +848,7 @@ class RNNGradCudnnKernel : public framework::OpKernel { } if (!weight_grad_list.empty()) { #ifdef PADDLE_WITH_HIP - PADDLE_ENFORCE_CUDA_SUCCESS(platform::dynload::miopenRNNBackwardWeights( + PADDLE_ENFORCE_GPU_SUCCESS(platform::dynload::miopenRNNBackwardWeights( handle, rnn.rnn_desc(), seq_length, rnn.x_descs(), input->data(), rnn.init_h_desc(), init_h_data, rnn.y_descs(), out->data(), rnn.weight_desc(), weight_grad_data, @@ -865,7 +858,7 @@ class RNNGradCudnnKernel : public framework::OpKernel { tensor_to_permuted_weight(place, stream, weight_grad, &weight_grad_list, rnn_mode, is_bidirec); #else - PADDLE_ENFORCE_CUDA_SUCCESS(platform::dynload::cudnnRNNBackwardWeights( + PADDLE_ENFORCE_GPU_SUCCESS(platform::dynload::cudnnRNNBackwardWeights( handle, rnn.rnn_desc(), seq_length, rnn.x_descs(), input->data(), rnn.init_h_desc(), init_h_data, rnn.y_descs(), out->data(), workspace_data_.data(), workspace_size, rnn.weight_desc(), @@ -878,7 +871,7 @@ class RNNGradCudnnKernel : public framework::OpKernel { // for train // This interface is used when the input/output is padded. if (in_grad) { - PADDLE_ENFORCE_CUDA_SUCCESS(platform::dynload::cudnnRNNBackwardDataEx( + PADDLE_ENFORCE_GPU_SUCCESS(platform::dynload::cudnnRNNBackwardDataEx( handle, rnn.rnn_desc(), rnn.y_seq_desc(), out_data, rnn.y_seq_desc(), out_grad_data, nullptr, nullptr, rnn.last_h_desc(), last_h_grad_data, rnn.last_c_desc(), @@ -891,13 +884,12 @@ class RNNGradCudnnKernel : public framework::OpKernel { } if (!weight_grad_list.empty()) { - PADDLE_ENFORCE_CUDA_SUCCESS( - platform::dynload::cudnnRNNBackwardWeightsEx( - handle, rnn.rnn_desc(), rnn.x_seq_desc(), input->data(), - rnn.init_h_desc(), init_h_data, rnn.y_seq_desc(), - out->data(), workspace_data_.data(), workspace_size, - rnn.weight_desc(), weight_grad_data, - const_cast(reserve_data), reserve_size)); + PADDLE_ENFORCE_GPU_SUCCESS(platform::dynload::cudnnRNNBackwardWeightsEx( + handle, rnn.rnn_desc(), rnn.x_seq_desc(), input->data(), + rnn.init_h_desc(), init_h_data, rnn.y_seq_desc(), out->data(), + workspace_data_.data(), workspace_size, rnn.weight_desc(), + weight_grad_data, const_cast(reserve_data), + reserve_size)); } #else PADDLE_THROW(platform::errors::Unavailable( diff --git a/paddle/fluid/operators/roi_align_op.cu b/paddle/fluid/operators/roi_align_op.cu index 111828005222bb52990032be7a98b8bb6fb7367a..a08339d776ff1ac5b5b90a5a93cb3c611cdf2a65 100644 --- a/paddle/fluid/operators/roi_align_op.cu +++ b/paddle/fluid/operators/roi_align_op.cu @@ -15,8 +15,8 @@ limitations under the License. */ #include #include "paddle/fluid/memory/memory.h" #include "paddle/fluid/operators/roi_align_op.h" -#include "paddle/fluid/platform/cuda_primitives.h" -#include "paddle/fluid/platform/gpu_launch_config.h" +#include "paddle/fluid/platform/device/gpu/gpu_launch_config.h" +#include "paddle/fluid/platform/device/gpu/gpu_primitives.h" namespace paddle { namespace operators { diff --git a/paddle/fluid/operators/roi_pool_op.cu b/paddle/fluid/operators/roi_pool_op.cu index 562ff8d576b7d6603b3b63a88f473a5cea5ed33d..0a4a076c6caaef3cca5165d560e486dbfe97a296 100644 --- a/paddle/fluid/operators/roi_pool_op.cu +++ b/paddle/fluid/operators/roi_pool_op.cu @@ -14,7 +14,7 @@ limitations under the License. */ #include #include "paddle/fluid/memory/memory.h" #include "paddle/fluid/operators/roi_pool_op.h" -#include "paddle/fluid/platform/cuda_primitives.h" +#include "paddle/fluid/platform/device/gpu/gpu_primitives.h" namespace paddle { namespace operators { diff --git a/paddle/fluid/operators/roll_op.cu b/paddle/fluid/operators/roll_op.cu index d70bd58887f8469295bba3761d0b0a658e501709..57986d262820d02aba275b9e1a08dcfe49b62017 100644 --- a/paddle/fluid/operators/roll_op.cu +++ b/paddle/fluid/operators/roll_op.cu @@ -17,7 +17,7 @@ #include "paddle/fluid/framework/op_registry.h" #include "paddle/fluid/operators/roll_op.h" #include "paddle/fluid/platform/complex.h" -#include "paddle/fluid/platform/cuda_primitives.h" +#include "paddle/fluid/platform/device/gpu/gpu_primitives.h" namespace paddle { namespace operators { diff --git a/paddle/fluid/operators/row_conv_op.cu b/paddle/fluid/operators/row_conv_op.cu index a712878854298bc2eb372be155e1bd512aba7037..586cf3239b57526e3eefaed6ff410a0aea74bd46 100644 --- a/paddle/fluid/operators/row_conv_op.cu +++ b/paddle/fluid/operators/row_conv_op.cu @@ -13,7 +13,7 @@ limitations under the License. */ #include "paddle/fluid/operators/math/math_function.h" #include "paddle/fluid/operators/row_conv_op.h" -#include "paddle/fluid/platform/cuda_device_function.h" +#include "paddle/fluid/platform/device/gpu/gpu_device_function.h" namespace paddle { namespace operators { diff --git a/paddle/fluid/operators/scatter.cu.h b/paddle/fluid/operators/scatter.cu.h index e3791351cefb3aa092164bcfe3f92fe4e5889a23..6c7a0a8886ef0c13ee0cdc0187975da1dca5098b 100644 --- a/paddle/fluid/operators/scatter.cu.h +++ b/paddle/fluid/operators/scatter.cu.h @@ -18,7 +18,7 @@ limitations under the License. */ #include "math/math_function.h" #include "paddle/fluid/framework/tensor.h" #include "paddle/fluid/memory/malloc.h" -#include "paddle/fluid/platform/cuda_primitives.h" +#include "paddle/fluid/platform/device/gpu/gpu_primitives.h" #include "paddle/fluid/platform/place.h" namespace paddle { diff --git a/paddle/fluid/operators/segment_pool_op.cu b/paddle/fluid/operators/segment_pool_op.cu index 379a07a26dd5c3901c6f6455a3b06e280c0c7204..4e20844dc3275f840ff93029abb222e2ef02e0fa 100644 --- a/paddle/fluid/operators/segment_pool_op.cu +++ b/paddle/fluid/operators/segment_pool_op.cu @@ -14,8 +14,8 @@ limitations under the License. */ #include "paddle/fluid/operators/gather.cu.h" #include "paddle/fluid/operators/segment_pool_op.h" -#include "paddle/fluid/platform/cuda_primitives.h" -#include "paddle/fluid/platform/gpu_launch_config.h" +#include "paddle/fluid/platform/device/gpu/gpu_launch_config.h" +#include "paddle/fluid/platform/device/gpu/gpu_primitives.h" namespace ops = paddle::operators; REGISTER_OP_CUDA_KERNEL( diff --git a/paddle/fluid/operators/segment_pool_op.h b/paddle/fluid/operators/segment_pool_op.h index 5f9635c8ae111c8b0fa90bbea63562d496b3c343..307bf4010f7ffec1e0d6fb3bf883a646590878bc 100644 --- a/paddle/fluid/operators/segment_pool_op.h +++ b/paddle/fluid/operators/segment_pool_op.h @@ -72,11 +72,11 @@ void SegmentKernelLaunchHelper(const framework::ExecutionContext& context) { const IndexT* segment_ids = segment->data(); #ifdef PADDLE_WITH_HIP - PADDLE_ENFORCE_CUDA_SUCCESS( + PADDLE_ENFORCE_GPU_SUCCESS( hipMemcpy(length_data, segment_ids + num_indices - 1, sizeof(IndexT), hipMemcpyDeviceToHost)); #else - PADDLE_ENFORCE_CUDA_SUCCESS( + PADDLE_ENFORCE_GPU_SUCCESS( cudaMemcpy(length_data, segment_ids + num_indices - 1, sizeof(IndexT), cudaMemcpyDeviceToHost)); #endif diff --git a/paddle/fluid/operators/sequence_ops/sequence_enumerate_op.cu b/paddle/fluid/operators/sequence_ops/sequence_enumerate_op.cu index 6d8f60ce932abbe7e23f0f3bbf2c20218a39fe45..8092a40d19b195828c3742854e9b3656424feee7 100644 --- a/paddle/fluid/operators/sequence_ops/sequence_enumerate_op.cu +++ b/paddle/fluid/operators/sequence_ops/sequence_enumerate_op.cu @@ -15,7 +15,7 @@ #include #include #include "paddle/fluid/operators/sequence_ops/sequence_enumerate_op.h" -#include "paddle/fluid/platform/cuda_primitives.h" +#include "paddle/fluid/platform/device/gpu/gpu_primitives.h" namespace paddle { namespace operators { diff --git a/paddle/fluid/operators/sequence_ops/sequence_erase_op.cu b/paddle/fluid/operators/sequence_ops/sequence_erase_op.cu index bacaaeadbf5765e27cda451837c4b2e004c69af7..bb928cf401c3307b76160387e5108264cd5dbb89 100644 --- a/paddle/fluid/operators/sequence_ops/sequence_erase_op.cu +++ b/paddle/fluid/operators/sequence_ops/sequence_erase_op.cu @@ -15,7 +15,7 @@ limitations under the License. */ #include #include #include "paddle/fluid/operators/sequence_ops/sequence_erase_op.h" -#include "paddle/fluid/platform/cuda_primitives.h" +#include "paddle/fluid/platform/device/gpu/gpu_primitives.h" namespace paddle { namespace operators { diff --git a/paddle/fluid/operators/sequence_ops/sequence_expand_as_op.cu b/paddle/fluid/operators/sequence_ops/sequence_expand_as_op.cu index c8b6156881c96ff37fd3208dad789d8fc1589ddd..1c4265a71d4eacf7c0d1eb3b98c1043b064fcd70 100644 --- a/paddle/fluid/operators/sequence_ops/sequence_expand_as_op.cu +++ b/paddle/fluid/operators/sequence_ops/sequence_expand_as_op.cu @@ -14,7 +14,7 @@ limitations under the License. */ #include #include "paddle/fluid/operators/sequence_ops/sequence_expand_as_op.h" -#include "paddle/fluid/platform/cuda_primitives.h" +#include "paddle/fluid/platform/device/gpu/gpu_primitives.h" namespace paddle { namespace operators { diff --git a/paddle/fluid/operators/sequence_ops/sequence_expand_op.cu b/paddle/fluid/operators/sequence_ops/sequence_expand_op.cu index d4f4051c3a4602d7006630434c27ddb1b917cc82..f63fa5be7f496cef2ba723f8e45c46718f60c474 100644 --- a/paddle/fluid/operators/sequence_ops/sequence_expand_op.cu +++ b/paddle/fluid/operators/sequence_ops/sequence_expand_op.cu @@ -15,7 +15,7 @@ limitations under the License. */ #include #include "paddle/fluid/memory/memcpy.h" #include "paddle/fluid/operators/sequence_ops/sequence_expand_op.h" -#include "paddle/fluid/platform/cuda_primitives.h" +#include "paddle/fluid/platform/device/gpu/gpu_primitives.h" namespace paddle { namespace operators { diff --git a/paddle/fluid/operators/shard_index_op.cu b/paddle/fluid/operators/shard_index_op.cu index f2800c60c33048f9dbaf2d48d42a9078eb060558..115b3f47d664ba00228343d221d5be70d13a7ff1 100644 --- a/paddle/fluid/operators/shard_index_op.cu +++ b/paddle/fluid/operators/shard_index_op.cu @@ -13,8 +13,8 @@ // limitations under the License. #include "paddle/fluid/operators/shard_index_op.h" -#include "paddle/fluid/platform/cuda_primitives.h" -#include "paddle/fluid/platform/gpu_info.h" +#include "paddle/fluid/platform/device/gpu/gpu_info.h" +#include "paddle/fluid/platform/device/gpu/gpu_primitives.h" namespace paddle { namespace operators { diff --git a/paddle/fluid/operators/shuffle_channel_op.cu b/paddle/fluid/operators/shuffle_channel_op.cu index dbc3e1a7ebe26ffccd24d1749093d014751d866f..582d1ea0f26af3ec57510a80717436e61b3b252e 100644 --- a/paddle/fluid/operators/shuffle_channel_op.cu +++ b/paddle/fluid/operators/shuffle_channel_op.cu @@ -10,8 +10,8 @@ See the License for the specific language governing permissions and limitations under the License. */ #include "paddle/fluid/operators/shuffle_channel_op.h" -#include "paddle/fluid/platform/cuda_primitives.h" -#include "paddle/fluid/platform/gpu_info.h" +#include "paddle/fluid/platform/device/gpu/gpu_info.h" +#include "paddle/fluid/platform/device/gpu/gpu_primitives.h" namespace paddle { namespace operators { diff --git a/paddle/fluid/operators/sigmoid_cross_entropy_with_logits_op.cu b/paddle/fluid/operators/sigmoid_cross_entropy_with_logits_op.cu index 8611249a29f636c07f915cfb2eda7069bbc99a38..cc012230c1062968da2cd4f2833508795a27b079 100644 --- a/paddle/fluid/operators/sigmoid_cross_entropy_with_logits_op.cu +++ b/paddle/fluid/operators/sigmoid_cross_entropy_with_logits_op.cu @@ -21,7 +21,7 @@ namespace cub = hipcub; #include "paddle/fluid/memory/malloc.h" #include "paddle/fluid/operators/math.h" #include "paddle/fluid/operators/sigmoid_cross_entropy_with_logits_op.h" -#include "paddle/fluid/platform/cuda_primitives.h" +#include "paddle/fluid/platform/device/gpu/gpu_primitives.h" #include "paddle/fluid/platform/hostdevice.h" namespace paddle { diff --git a/paddle/fluid/operators/softmax_cudnn_op.cu.h b/paddle/fluid/operators/softmax_cudnn_op.cu.h index 68b694a59f47d9bfcbaf1d3b6a9b03fa85b4672b..c538fbade8ae8fe0d272bfeaaf94227e5467b7e5 100644 --- a/paddle/fluid/operators/softmax_cudnn_op.cu.h +++ b/paddle/fluid/operators/softmax_cudnn_op.cu.h @@ -18,12 +18,8 @@ limitations under the License. */ #include "paddle/fluid/operators/kernel_primitives/kernel_primitives.h" #include "paddle/fluid/operators/math/math_cuda_utils.h" #include "paddle/fluid/operators/softmax_op.h" -#include "paddle/fluid/platform/cuda_device_function.h" -#ifdef PADDLE_WITH_HIP -#include "paddle/fluid/platform/miopen_helper.h" -#else -#include "paddle/fluid/platform/cudnn_helper.h" -#endif +#include "paddle/fluid/platform/device/gpu/gpu_device_function.h" +#include "paddle/fluid/platform/device/gpu/gpu_dnn.h" namespace paddle { namespace operators { @@ -503,12 +499,12 @@ void SoftmaxForwardCUDAKernelDriver(const platform::CUDADeviceContext& dev_ctx, auto mode = axis == rank - 1 ? MIOPEN_SOFTMAX_MODE_INSTANCE : MIOPEN_SOFTMAX_MODE_CHANNEL; if (LogMode) { - PADDLE_ENFORCE_CUDA_SUCCESS(platform::dynload::miopenSoftmaxForward_V2( + PADDLE_ENFORCE_GPU_SUCCESS(platform::dynload::miopenSoftmaxForward_V2( handle, platform::CudnnDataType::kOne(), desc_, x.data(), platform::CudnnDataType::kZero(), desc_, out_data, MIOPEN_SOFTMAX_LOG, mode)); } else { - PADDLE_ENFORCE_CUDA_SUCCESS(platform::dynload::miopenSoftmaxForward_V2( + PADDLE_ENFORCE_GPU_SUCCESS(platform::dynload::miopenSoftmaxForward_V2( handle, platform::CudnnDataType::kOne(), desc_, x.data(), platform::CudnnDataType::kZero(), desc_, out_data, MIOPEN_SOFTMAX_ACCURATE, mode)); @@ -517,12 +513,12 @@ void SoftmaxForwardCUDAKernelDriver(const platform::CUDADeviceContext& dev_ctx, auto mode = axis == rank - 1 ? CUDNN_SOFTMAX_MODE_INSTANCE : CUDNN_SOFTMAX_MODE_CHANNEL; if (LogMode) { - PADDLE_ENFORCE_CUDA_SUCCESS(platform::dynload::cudnnSoftmaxForward( + PADDLE_ENFORCE_GPU_SUCCESS(platform::dynload::cudnnSoftmaxForward( handle, CUDNN_SOFTMAX_LOG, mode, platform::CudnnDataType::kOne(), desc_, x.data(), platform::CudnnDataType::kZero(), desc_, out_data)); } else { - PADDLE_ENFORCE_CUDA_SUCCESS(platform::dynload::cudnnSoftmaxForward( + PADDLE_ENFORCE_GPU_SUCCESS(platform::dynload::cudnnSoftmaxForward( handle, CUDNN_SOFTMAX_ACCURATE, mode, platform::CudnnDataType::kOne(), desc_, x.data(), platform::CudnnDataType::kZero(), desc_, out_data)); @@ -591,12 +587,12 @@ void SoftmaxBackwardCUDAKernelDriver(const platform::CUDADeviceContext& dev_ctx, auto mode = axis == rank - 1 ? MIOPEN_SOFTMAX_MODE_INSTANCE : MIOPEN_SOFTMAX_MODE_CHANNEL; if (LogMode) { - PADDLE_ENFORCE_CUDA_SUCCESS(platform::dynload::miopenSoftmaxBackward_V2( + PADDLE_ENFORCE_GPU_SUCCESS(platform::dynload::miopenSoftmaxBackward_V2( handle, platform::CudnnDataType::kOne(), desc_, out.data(), desc_, dout.data(), platform::CudnnDataType::kZero(), desc_, dx_data, MIOPEN_SOFTMAX_LOG, mode)); } else { - PADDLE_ENFORCE_CUDA_SUCCESS(platform::dynload::miopenSoftmaxBackward_V2( + PADDLE_ENFORCE_GPU_SUCCESS(platform::dynload::miopenSoftmaxBackward_V2( handle, platform::CudnnDataType::kOne(), desc_, out.data(), desc_, dout.data(), platform::CudnnDataType::kZero(), desc_, dx_data, MIOPEN_SOFTMAX_ACCURATE, mode)); @@ -605,12 +601,12 @@ void SoftmaxBackwardCUDAKernelDriver(const platform::CUDADeviceContext& dev_ctx, auto mode = axis == rank - 1 ? CUDNN_SOFTMAX_MODE_INSTANCE : CUDNN_SOFTMAX_MODE_CHANNEL; if (LogMode) { - PADDLE_ENFORCE_CUDA_SUCCESS(platform::dynload::cudnnSoftmaxBackward( + PADDLE_ENFORCE_GPU_SUCCESS(platform::dynload::cudnnSoftmaxBackward( handle, CUDNN_SOFTMAX_LOG, mode, platform::CudnnDataType::kOne(), desc_, out.data(), desc_, dout.data(), platform::CudnnDataType::kZero(), desc_, dx_data)); } else { - PADDLE_ENFORCE_CUDA_SUCCESS(platform::dynload::cudnnSoftmaxBackward( + PADDLE_ENFORCE_GPU_SUCCESS(platform::dynload::cudnnSoftmaxBackward( handle, CUDNN_SOFTMAX_ACCURATE, mode, platform::CudnnDataType::kOne(), desc_, out.data(), desc_, dout.data(), platform::CudnnDataType::kZero(), desc_, dx_data)); diff --git a/paddle/fluid/operators/softmax_op.cc b/paddle/fluid/operators/softmax_op.cc index 3b1753b49b11d10969ea293cf8fa48adbb25c27e..cb97a0bb27cb5c459bf7f2ccd53374759643133f 100644 --- a/paddle/fluid/operators/softmax_op.cc +++ b/paddle/fluid/operators/softmax_op.cc @@ -18,13 +18,7 @@ limitations under the License. */ #include #include -#ifdef PADDLE_WITH_CUDA -#include "paddle/fluid/platform/cudnn_helper.h" -#endif - -#ifdef PADDLE_WITH_HIP -#include "paddle/fluid/platform/miopen_helper.h" -#endif +#include "paddle/fluid/platform/device/gpu/gpu_dnn.h" #ifdef PADDLE_WITH_MKLDNN #include "paddle/fluid/platform/mkldnn_helper.h" diff --git a/paddle/fluid/operators/softmax_with_cross_entropy_op.cu b/paddle/fluid/operators/softmax_with_cross_entropy_op.cu index 6a9dca9fe2a6ae8f8c7859c938f83696d74791fc..520c95b6f3484d9beb0a68be41b6b2906b2d72f3 100644 --- a/paddle/fluid/operators/softmax_with_cross_entropy_op.cu +++ b/paddle/fluid/operators/softmax_with_cross_entropy_op.cu @@ -20,12 +20,8 @@ namespace cub = hipcub; #include "paddle/fluid/operators/math/math_function.h" #include "paddle/fluid/operators/softmax_cudnn_op.cu.h" #include "paddle/fluid/operators/softmax_with_cross_entropy_op.h" +#include "paddle/fluid/platform/device/gpu/gpu_dnn.h" #include "paddle/fluid/platform/for_range.h" -#ifdef PADDLE_WITH_HIP -#include "paddle/fluid/platform/miopen_helper.h" -#else -#include "paddle/fluid/platform/cudnn_helper.h" -#endif namespace paddle { namespace operators { @@ -453,14 +449,14 @@ static void SoftmaxWithCrossEntropyHardLabel( #ifdef PADDLE_WITH_HIP auto mode = axis == rank - 1 ? MIOPEN_SOFTMAX_MODE_INSTANCE : MIOPEN_SOFTMAX_MODE_CHANNEL; - PADDLE_ENFORCE_CUDA_SUCCESS(platform::dynload::miopenSoftmaxForward_V2( + PADDLE_ENFORCE_GPU_SUCCESS(platform::dynload::miopenSoftmaxForward_V2( handle, platform::CudnnDataType::kOne(), descp, logits_data, platform::CudnnDataType::kZero(), descp, softmax_data, MIOPEN_SOFTMAX_LOG, mode)); #else auto mode = axis == rank - 1 ? CUDNN_SOFTMAX_MODE_INSTANCE : CUDNN_SOFTMAX_MODE_CHANNEL; - PADDLE_ENFORCE_CUDA_SUCCESS(platform::dynload::cudnnSoftmaxForward( + PADDLE_ENFORCE_GPU_SUCCESS(platform::dynload::cudnnSoftmaxForward( handle, CUDNN_SOFTMAX_LOG, mode, platform::CudnnDataType::kOne(), descp, logits_data, platform::CudnnDataType::kZero(), descp, softmax_data)); diff --git a/paddle/fluid/operators/spectral_helper.h b/paddle/fluid/operators/spectral_helper.h index 924ec7cd52d50d91796d0b694037a7d85d27b463..39639768241d4986af75455b7bb5d91b444be3e0 100644 --- a/paddle/fluid/operators/spectral_helper.h +++ b/paddle/fluid/operators/spectral_helper.h @@ -66,7 +66,7 @@ class CuFFTHandle { public: CuFFTHandle() { - PADDLE_ENFORCE_CUDA_SUCCESS(platform::dynload::cufftCreate(&handle_)); + PADDLE_ENFORCE_GPU_SUCCESS(platform::dynload::cufftCreate(&handle_)); } CuFFTHandle(const CuFFTHandle& other) = delete; @@ -79,7 +79,7 @@ class CuFFTHandle { const ::cufftHandle& get() const { return handle_; } ~CuFFTHandle() { - PADDLE_ENFORCE_CUDA_SUCCESS(platform::dynload::cufftDestroy(handle_)); + PADDLE_ENFORCE_GPU_SUCCESS(platform::dynload::cufftDestroy(handle_)); } }; @@ -136,12 +136,12 @@ class FFTConfig { } // disable auto allocation of workspace to use allocator from the framework - PADDLE_ENFORCE_CUDA_SUCCESS(platform::dynload::cufftSetAutoAllocation( + PADDLE_ENFORCE_GPU_SUCCESS(platform::dynload::cufftSetAutoAllocation( plan(), /* autoAllocate */ 0)); size_t ws_size_t; - PADDLE_ENFORCE_CUDA_SUCCESS(platform::dynload::cufftXtMakePlanMany( + PADDLE_ENFORCE_GPU_SUCCESS(platform::dynload::cufftXtMakePlanMany( plan(), signal_ndim, signal_sizes.data(), /* inembed */ nullptr, /* base_istride */ 1, /* idist */ 1, itype, /* onembed */ nullptr, /* base_ostride */ 1, /* odist */ 1, otype, @@ -176,7 +176,7 @@ class HIPFFTHandle { public: HIPFFTHandle() { - PADDLE_ENFORCE_CUDA_SUCCESS(platform::dynload::hipfftCreate(&handle_)); + PADDLE_ENFORCE_GPU_SUCCESS(platform::dynload::hipfftCreate(&handle_)); } HIPFFTHandle(const HIPFFTHandle& other) = delete; @@ -189,7 +189,7 @@ class HIPFFTHandle { const ::hipfftHandle& get() const { return handle_; } ~HIPFFTHandle() { - PADDLE_ENFORCE_CUDA_SUCCESS(platform::dynload::hipfftDestroy(handle_)); + PADDLE_ENFORCE_GPU_SUCCESS(platform::dynload::hipfftDestroy(handle_)); } }; using plan_size_type = int; @@ -248,12 +248,12 @@ class FFTConfig { }(); // disable auto allocation of workspace to use allocator from the framework - PADDLE_ENFORCE_CUDA_SUCCESS(platform::dynload::hipfftSetAutoAllocation( + PADDLE_ENFORCE_GPU_SUCCESS(platform::dynload::hipfftSetAutoAllocation( plan(), /* autoAllocate */ 0)); size_t ws_size_t; - PADDLE_ENFORCE_CUDA_SUCCESS(platform::dynload::hipfftMakePlanMany( + PADDLE_ENFORCE_GPU_SUCCESS(platform::dynload::hipfftMakePlanMany( plan(), signal_ndim, signal_sizes.data(), /* inembed */ nullptr, /* base_istride */ 1, /* idist */ 1, /* onembed */ nullptr, /* base_ostride */ 1, /* odist */ 1, exec_type, diff --git a/paddle/fluid/operators/spectral_op.cu b/paddle/fluid/operators/spectral_op.cu index e97af7cea7e08787c6864fa80c85af085326b7de..4ad99724fd6224f377488c268aa38d70e1b973a7 100644 --- a/paddle/fluid/operators/spectral_op.cu +++ b/paddle/fluid/operators/spectral_op.cu @@ -96,7 +96,7 @@ static void exec_cufft_plan_raw(const FFTConfig& config, void* in_data, void* out_data, bool forward) { auto& plan = config.plan(); - PADDLE_ENFORCE_CUDA_SUCCESS(platform::dynload::cufftXtExec( + PADDLE_ENFORCE_GPU_SUCCESS(platform::dynload::cufftXtExec( plan, in_data, out_data, forward ? CUFFT_FORWARD : CUFFT_INVERSE)); } @@ -167,20 +167,20 @@ static void exec_hipfft_plan_raw(const FFTConfig& config, void* in_data, if (value_type == framework::proto::VarType::FP32) { switch (config.transform_type()) { case FFTTransformType::C2C: { - PADDLE_ENFORCE_CUDA_SUCCESS(platform::dynload::hipfftExecC2C( + PADDLE_ENFORCE_GPU_SUCCESS(platform::dynload::hipfftExecC2C( plan, static_cast(in_data), static_cast(out_data), forward ? HIPFFT_FORWARD : HIPFFT_BACKWARD)); return; } case FFTTransformType::R2C: { - PADDLE_ENFORCE_CUDA_SUCCESS(platform::dynload::hipfftExecR2C( + PADDLE_ENFORCE_GPU_SUCCESS(platform::dynload::hipfftExecR2C( plan, static_cast(in_data), static_cast(out_data))); return; } case FFTTransformType::C2R: { - PADDLE_ENFORCE_CUDA_SUCCESS(platform::dynload::hipfftExecC2R( + PADDLE_ENFORCE_GPU_SUCCESS(platform::dynload::hipfftExecC2R( plan, static_cast(in_data), static_cast(out_data))); return; @@ -189,20 +189,20 @@ static void exec_hipfft_plan_raw(const FFTConfig& config, void* in_data, } else if (value_type == framework::proto::VarType::FP64) { switch (config.transform_type()) { case FFTTransformType::C2C: { - PADDLE_ENFORCE_CUDA_SUCCESS(platform::dynload::hipfftExecZ2Z( + PADDLE_ENFORCE_GPU_SUCCESS(platform::dynload::hipfftExecZ2Z( plan, static_cast(in_data), static_cast(out_data), forward ? HIPFFT_FORWARD : HIPFFT_BACKWARD)); return; } case FFTTransformType::R2C: { - PADDLE_ENFORCE_CUDA_SUCCESS(platform::dynload::hipfftExecD2Z( + PADDLE_ENFORCE_GPU_SUCCESS(platform::dynload::hipfftExecD2Z( plan, static_cast(in_data), static_cast(out_data))); return; } case FFTTransformType::C2R: { - PADDLE_ENFORCE_CUDA_SUCCESS(platform::dynload::hipfftExecZ2D( + PADDLE_ENFORCE_GPU_SUCCESS(platform::dynload::hipfftExecZ2D( plan, static_cast(in_data), static_cast(out_data))); return; @@ -332,11 +332,11 @@ void exec_fft(const DeviceContext& ctx, const Tensor* X, Tensor* out, } // prepare cufft for execution - PADDLE_ENFORCE_CUDA_SUCCESS( + PADDLE_ENFORCE_GPU_SUCCESS( platform::dynload::cufftSetStream(config->plan(), ctx.stream())); framework::Tensor workspace_tensor; workspace_tensor.mutable_data(tensor_place, config->workspace_size()); - PADDLE_ENFORCE_CUDA_SUCCESS(platform::dynload::cufftSetWorkArea( + PADDLE_ENFORCE_GPU_SUCCESS(platform::dynload::cufftSetWorkArea( config->plan(), workspace_tensor.data())); // execute transform plan exec_cufft_plan(ctx, *config, &collapsed_input, @@ -355,11 +355,11 @@ void exec_fft(const DeviceContext& ctx, const Tensor* X, Tensor* out, config = &(plan_cache.lookup(key)); // prepare cufft for execution - PADDLE_ENFORCE_CUDA_SUCCESS( + PADDLE_ENFORCE_GPU_SUCCESS( platform::dynload::hipfftSetStream(config->plan(), ctx.stream())); framework::Tensor workspace_tensor; workspace_tensor.mutable_data(tensor_place, config->workspace_size()); - PADDLE_ENFORCE_CUDA_SUCCESS(platform::dynload::hipfftSetWorkArea( + PADDLE_ENFORCE_GPU_SUCCESS(platform::dynload::hipfftSetWorkArea( config->plan(), workspace_tensor.data())); // execute transform plan exec_hipfft_plan(ctx, *config, &collapsed_input, diff --git a/paddle/fluid/operators/stack_op.cu b/paddle/fluid/operators/stack_op.cu index 9e5e45f4d22d919e9fd037b7d32e1408a5e092dc..5b3f03445d35251e5053b709f7d2c2da920f8077 100644 --- a/paddle/fluid/operators/stack_op.cu +++ b/paddle/fluid/operators/stack_op.cu @@ -16,7 +16,7 @@ #include #include #include "paddle/fluid/operators/stack_op.h" -#include "paddle/fluid/platform/gpu_launch_config.h" +#include "paddle/fluid/platform/device/gpu/gpu_launch_config.h" namespace plat = paddle::platform; namespace ops = paddle::operators; diff --git a/paddle/fluid/operators/svd_op.cu b/paddle/fluid/operators/svd_op.cu index ade7496d646221ba7afa4c8c0bd75d52d2c481c5..0a7ed093ad0b842e169a4b0f31c50575ea53b14c 100644 --- a/paddle/fluid/operators/svd_op.cu +++ b/paddle/fluid/operators/svd_op.cu @@ -91,9 +91,9 @@ void SvdGPUKernel::GesvdjBatched( int ldt = n; int lwork = 0; auto handle = dev_ctx.cusolver_dn_handle(); - PADDLE_ENFORCE_CUDA_SUCCESS( + PADDLE_ENFORCE_GPU_SUCCESS( platform::dynload::cusolverDnCreateGesvdjInfo(&gesvdj_params)); - PADDLE_ENFORCE_CUDA_SUCCESS(platform::dynload::cusolverDnSgesvdj_bufferSize( + PADDLE_ENFORCE_GPU_SUCCESS(platform::dynload::cusolverDnSgesvdj_bufferSize( handle, jobz, thin_UV, m, n, A, lda, S, U, ldu, V, ldt, &lwork, gesvdj_params)); auto workspace = memory::Alloc(dev_ctx, lwork * sizeof(float)); @@ -102,7 +102,7 @@ void SvdGPUKernel::GesvdjBatched( int stride_U = ldu * (thin_UV ? k : m); int stride_V = ldt * (thin_UV ? k : n); for (int i = 0; i < batchSize; ++i) { - PADDLE_ENFORCE_CUDA_SUCCESS(platform::dynload::cusolverDnSgesvdj( + PADDLE_ENFORCE_GPU_SUCCESS(platform::dynload::cusolverDnSgesvdj( handle, jobz, thin_UV, m, n, A + stride_A * i, lda, S + k * i, U + stride_U * i, ldu, V + stride_V * i, ldt, workspace_ptr, lwork, info, gesvdj_params)); @@ -116,7 +116,7 @@ void SvdGPUKernel::GesvdjBatched( platform::errors::PreconditionNotMet( "For batch [%d]: CUSolver SVD is not zero. [%d]", i, error_info)); } - PADDLE_ENFORCE_CUDA_SUCCESS( + PADDLE_ENFORCE_GPU_SUCCESS( platform::dynload::cusolverDnDestroyGesvdjInfo(gesvdj_params)); } @@ -134,9 +134,9 @@ void SvdGPUKernel::GesvdjBatched( int ldt = n; int lwork = 0; auto handle = dev_ctx.cusolver_dn_handle(); - PADDLE_ENFORCE_CUDA_SUCCESS( + PADDLE_ENFORCE_GPU_SUCCESS( platform::dynload::cusolverDnCreateGesvdjInfo(&gesvdj_params)); - PADDLE_ENFORCE_CUDA_SUCCESS(platform::dynload::cusolverDnDgesvdj_bufferSize( + PADDLE_ENFORCE_GPU_SUCCESS(platform::dynload::cusolverDnDgesvdj_bufferSize( handle, jobz, thin_UV, m, n, A, lda, S, U, ldu, V, ldt, &lwork, gesvdj_params)); auto workspace = memory::Alloc(dev_ctx, lwork * sizeof(double)); @@ -145,7 +145,7 @@ void SvdGPUKernel::GesvdjBatched( int stride_U = ldu * (thin_UV ? k : m); int stride_V = ldt * (thin_UV ? k : n); for (int i = 0; i < batchSize; ++i) { - PADDLE_ENFORCE_CUDA_SUCCESS(platform::dynload::cusolverDnDgesvdj( + PADDLE_ENFORCE_GPU_SUCCESS(platform::dynload::cusolverDnDgesvdj( handle, jobz, thin_UV, m, n, A + stride_A * i, lda, S + k * i, U + stride_U * i, ldu, V + stride_V * i, ldt, workspace_ptr, lwork, info, gesvdj_params)); @@ -159,7 +159,7 @@ void SvdGPUKernel::GesvdjBatched( platform::errors::PreconditionNotMet( "For batch [%d]: CUSolver SVD is not zero. [%d]", i, error_info)); } - PADDLE_ENFORCE_CUDA_SUCCESS( + PADDLE_ENFORCE_GPU_SUCCESS( platform::dynload::cusolverDnDestroyGesvdjInfo(gesvdj_params)); } diff --git a/paddle/fluid/operators/sync_batch_norm_op.cu.h b/paddle/fluid/operators/sync_batch_norm_op.cu.h index 69617b7e208a880a649fb73a54830dd3def1c939..201de5ac1a428506babafdf8b135ea7d6e9f3dc8 100644 --- a/paddle/fluid/operators/sync_batch_norm_op.cu.h +++ b/paddle/fluid/operators/sync_batch_norm_op.cu.h @@ -21,19 +21,18 @@ limitations under the License. */ #include #ifdef __NVCC__ #include "cub/cub.cuh" -#include "paddle/fluid/platform/cudnn_helper.h" #endif #ifdef __HIPCC__ #include namespace cub = hipcub; -#include "paddle/fluid/platform/miopen_helper.h" #endif #include "paddle/fluid/framework/data_layout.h" #include "paddle/fluid/memory/malloc.h" #include "paddle/fluid/operators/batch_norm_op.h" #include "paddle/fluid/operators/norm_utils.h" +#include "paddle/fluid/platform/device/gpu/gpu_dnn.h" +#include "paddle/fluid/platform/device/gpu/nccl_helper.h" #include "paddle/fluid/platform/float16.h" -#include "paddle/fluid/platform/nccl_helper.h" namespace paddle { namespace operators { @@ -192,7 +191,7 @@ void SyncBatchNormFunctor(const framework::ExecutionContext &ctx, if (comm) { int dtype = platform::ToNCCLDataType(mean_out->type()); // In-place operation - PADDLE_ENFORCE_CUDA_SUCCESS(platform::dynload::ncclAllReduce( + PADDLE_ENFORCE_GPU_SUCCESS(platform::dynload::ncclAllReduce( stats, stats, 2 * C + 1, static_cast(dtype), ncclSum, comm, stream)); } @@ -466,7 +465,7 @@ void SyncBatchNormGradFunctor( if (comm) { int dtype = platform::ToNCCLDataType(scale->type()); // In-place operation - PADDLE_ENFORCE_CUDA_SUCCESS(platform::dynload::ncclAllReduce( + PADDLE_ENFORCE_GPU_SUCCESS(platform::dynload::ncclAllReduce( stats, stats, 2 * C + 1, static_cast(dtype), ncclSum, comm, stream)); } diff --git a/paddle/fluid/operators/temporal_shift_op.cu b/paddle/fluid/operators/temporal_shift_op.cu index cb1ff5335cdf04db3f6a6920ee28254e330cf432..eb5a78f9dc0ecef0f526ce1c5f00096f056b6bd6 100644 --- a/paddle/fluid/operators/temporal_shift_op.cu +++ b/paddle/fluid/operators/temporal_shift_op.cu @@ -10,8 +10,8 @@ limitations under the License. */ #include "paddle/fluid/operators/temporal_shift_op.h" -#include "paddle/fluid/platform/cuda_primitives.h" -#include "paddle/fluid/platform/gpu_launch_config.h" +#include "paddle/fluid/platform/device/gpu/gpu_launch_config.h" +#include "paddle/fluid/platform/device/gpu/gpu_primitives.h" namespace paddle { namespace operators { diff --git a/paddle/fluid/operators/top_k_function_cuda.h b/paddle/fluid/operators/top_k_function_cuda.h index 07749f90ebaa29c3f618a5850ad2d72942035e95..05ae5c9188cebc176389fdc7cd8c4b2b8f83f3e7 100644 --- a/paddle/fluid/operators/top_k_function_cuda.h +++ b/paddle/fluid/operators/top_k_function_cuda.h @@ -24,7 +24,7 @@ limitations under the License. */ #endif #include "paddle/fluid/operators/eigen/eigen_function.h" #include "paddle/fluid/operators/top_k_op.h" -#include "paddle/fluid/platform/cuda_device_function.h" +#include "paddle/fluid/platform/device/gpu/gpu_device_function.h" #include "paddle/fluid/platform/float16.h" #ifdef __HIPCC__ diff --git a/paddle/fluid/operators/transpose_op.cu.h b/paddle/fluid/operators/transpose_op.cu.h index 784d97b543fbd6e81e2aadcdb7dcf225c13c4dc8..6c637effee2cbab059aedf0eaf54bec2b75dea4b 100644 --- a/paddle/fluid/operators/transpose_op.cu.h +++ b/paddle/fluid/operators/transpose_op.cu.h @@ -16,8 +16,8 @@ limitations under the License. */ #include "paddle/fluid/framework/gpu_utils.h" #include "paddle/fluid/operators/transpose_op.h" -#include "paddle/fluid/platform/cuda_primitives.h" -#include "paddle/fluid/platform/gpu_launch_config.h" +#include "paddle/fluid/platform/device/gpu/gpu_launch_config.h" +#include "paddle/fluid/platform/device/gpu/gpu_primitives.h" namespace paddle { namespace operators { diff --git a/paddle/fluid/operators/trunc_op.cu b/paddle/fluid/operators/trunc_op.cu index a284e0ea6e393910c35f11a64039e6b58f2f67a2..68d8c608f6338802067fbba3141a58087dcafe62 100644 --- a/paddle/fluid/operators/trunc_op.cu +++ b/paddle/fluid/operators/trunc_op.cu @@ -12,8 +12,8 @@ See the License for the specific language governing permissions and limitations under the License. */ #include "paddle/fluid/operators/trunc_op.h" -#include "paddle/fluid/platform/cuda_primitives.h" -#include "paddle/fluid/platform/gpu_info.h" +#include "paddle/fluid/platform/device/gpu/gpu_info.h" +#include "paddle/fluid/platform/device/gpu/gpu_primitives.h" namespace paddle { namespace operators { diff --git a/paddle/fluid/operators/warpctc_op.cc b/paddle/fluid/operators/warpctc_op.cc index f38f5d9f7235795d5460d6a436c6c7dc397e52da..1426c799007a021b400520cc7801c4d6b81c9777 100644 --- a/paddle/fluid/operators/warpctc_op.cc +++ b/paddle/fluid/operators/warpctc_op.cc @@ -16,12 +16,7 @@ limitations under the License. */ #include -#ifdef PADDLE_WITH_HIP -#include "paddle/fluid/platform/miopen_helper.h" -#endif -#ifdef PADDLE_WITH_CUDA -#include "paddle/fluid/platform/cudnn_helper.h" -#endif +#include "paddle/fluid/platform/device/gpu/gpu_dnn.h" namespace paddle { namespace operators { diff --git a/paddle/fluid/operators/where_index_op.cu b/paddle/fluid/operators/where_index_op.cu index b1cd172923ee6dc421cc09b27163422207ea099c..feb8e83864e84ea903d251fb1ca944dca1e5c074 100644 --- a/paddle/fluid/operators/where_index_op.cu +++ b/paddle/fluid/operators/where_index_op.cu @@ -24,7 +24,7 @@ namespace cub = hipcub; #include "paddle/fluid/framework/ddim.h" #include "paddle/fluid/framework/op_registry.h" #include "paddle/fluid/operators/where_index_op.h" -#include "paddle/fluid/platform/cuda_primitives.h" +#include "paddle/fluid/platform/device/gpu/gpu_primitives.h" #include "paddle/fluid/platform/for_range.h" namespace paddle { diff --git a/paddle/fluid/operators/where_op.cu b/paddle/fluid/operators/where_op.cu index 721c6e5390e85d747f5daa119ec61479ee57c3f2..54b0d5b69086cda3ebdefa76636aff734d1a150c 100644 --- a/paddle/fluid/operators/where_op.cu +++ b/paddle/fluid/operators/where_op.cu @@ -13,7 +13,7 @@ // limitations under the License. #include "paddle/fluid/operators/where_op.h" -#include "paddle/fluid/platform/gpu_launch_config.h" +#include "paddle/fluid/platform/device/gpu/gpu_launch_config.h" namespace platform = paddle::platform; diff --git a/paddle/fluid/platform/CMakeLists.txt b/paddle/fluid/platform/CMakeLists.txt index 280674f9ab14781f41288616429bbbba20144da4..4f3c70f5ea0505af7ee6037d1b7fa1dc2827e5c1 100644 --- a/paddle/fluid/platform/CMakeLists.txt +++ b/paddle/fluid/platform/CMakeLists.txt @@ -47,18 +47,11 @@ cc_library(cpu_info SRCS cpu_info.cc DEPS ${CPU_INFO_DEPS}) cc_test(cpu_info_test SRCS cpu_info_test.cc DEPS cpu_info) IF(WITH_GPU) - nv_library(cuda_graph SRCS cuda_graph.cc DEPS enforce allocator_facade) - nv_library(gpu_info SRCS gpu_info.cc DEPS gflags glog enforce monitor dynload_cuda) - nv_library(cuda_profiler SRCS cuda_profiler.cc DEPS enforce) nv_library(cuda_graph_with_memory_pool SRCS cuda_graph_with_memory_pool.cc DEPS device_context allocator_facade cuda_graph) ELSE() cc_library(cuda_graph_with_memory_pool SRCS cuda_graph_with_memory_pool.cc DEPS device_context allocator_facade) ENDIF() -IF(WITH_ROCM) - hip_library(gpu_info SRCS gpu_info.cc DEPS gflags glog enforce monitor dynload_cuda) -ENDIF() - cc_library(place SRCS place.cc DEPS enforce boost) cc_test(place_test SRCS place_test.cc DEPS place glog gflags) @@ -125,8 +118,7 @@ if(WITH_ASCEND_CL) endif() if(WITH_GPU OR WITH_ROCM) - cc_library(cuda_resource_pool SRCS cuda_resource_pool.cc DEPS gpu_info) - target_link_libraries(device_context cuda_resource_pool) + target_link_libraries(device_context gpu_resource_pool) endif() if(WITH_ASCEND_CL) @@ -147,8 +139,6 @@ if(WITH_GPU) nv_test(device_event_test SRCS device_event_test.cc DEPS device_event_gpu) nv_test(device_context_test SRCS device_context_test.cu DEPS device_context gpu_info) - nv_test(cudnn_helper_test SRCS cudnn_helper_test.cc DEPS dynload_cuda pten) - nv_test(cudnn_desc_test SRCS cudnn_desc_test.cc DEPS dynload_cuda) nv_test(transform_test SRCS transform_test.cu DEPS memory place device_context) endif() @@ -158,8 +148,6 @@ if(WITH_ROCM) hip_test(device_event_test SRCS device_event_test.cc DEPS device_event_gpu) hip_test(device_context_test SRCS device_context_test.cu DEPS device_context gpu_info) - hip_test(miopen_helper_test SRCS miopen_helper_test.cc DEPS dynload_cuda) - hip_test(cudnn_desc_test SRCS cudnn_desc_test.cc DEPS dynload_cuda tensor) hip_test(transform_test SRCS transform_test.cu DEPS memory place device_context) endif() @@ -172,11 +160,9 @@ cc_test(lodtensor_printer_test SRCS lodtensor_printer_test.cc DEPS lodtensor_pri cc_library(device_tracer SRCS device_tracer.cc DEPS boost profiler_proto framework_proto ${GPU_CTX_DEPS}) if(WITH_GPU) nv_library(profiler SRCS profiler.cc profiler.cu DEPS device_tracer gpu_info enforce dynload_cuda) - nv_test(cuda_helper_test SRCS cuda_helper_test.cu) nv_library(device_memory_aligment SRCS device_memory_aligment.cc DEPS cpu_info gpu_info place) elseif(WITH_ROCM) hip_library(profiler SRCS profiler.cc profiler.cu DEPS device_tracer gpu_info enforce) - hip_test(cuda_helper_test SRCS cuda_helper_test.cu) hip_library(device_memory_aligment SRCS device_memory_aligment.cc DEPS cpu_info gpu_info place) else() cc_library(profiler SRCS profiler.cc DEPS device_tracer enforce) diff --git a/paddle/fluid/platform/collective_helper.cc b/paddle/fluid/platform/collective_helper.cc index 03359d932b5ab9f34f842b4f9d7fb4252ec5953c..25f8f3ed9f3d8eba7f746c5b38d793d7cc2b0d39 100644 --- a/paddle/fluid/platform/collective_helper.cc +++ b/paddle/fluid/platform/collective_helper.cc @@ -15,7 +15,7 @@ #include "paddle/fluid/platform/collective_helper.h" #include -#include "paddle/fluid/platform/cuda_resource_pool.h" +#include "paddle/fluid/platform/device/gpu/gpu_resource_pool.h" namespace paddle { namespace platform { @@ -96,7 +96,7 @@ NCCLComm* NCCLCommContext::CreateComm(ncclUniqueId* nccl_id, int nranks, ncclComm_t comm = nullptr; SetDeviceId(dev_id); - PADDLE_ENFORCE_CUDA_SUCCESS( + PADDLE_ENFORCE_GPU_SUCCESS( platform::dynload::ncclCommInitRank(&comm, nranks, *nccl_id, rank)); auto* comm_wrapper = AssignNCCLComm(comm, nranks, rank, dev_id, ring_id); @@ -121,7 +121,7 @@ void NCCLCommContext::CreateAllNCCLComms(const std::vector& dev_ids, const int kDevices = dev_ids.size(); ncclComm_t comms[kDevices]; - PADDLE_ENFORCE_CUDA_SUCCESS(platform::dynload::ncclCommInitAll( + PADDLE_ENFORCE_GPU_SUCCESS(platform::dynload::ncclCommInitAll( comms, dev_ids.size(), dev_ids.data())); PADDLE_ENFORCE_EQ(comm_map_.count(ring_id), 0, @@ -153,18 +153,18 @@ void NCCLCommContext::CreateNCCLCommMultiTrainer( << ", rind_id: " << ring_id; ncclComm_t comms[kDevices]; { - PADDLE_ENFORCE_CUDA_SUCCESS(dynload::ncclGroupStart()); + PADDLE_ENFORCE_GPU_SUCCESS(dynload::ncclGroupStart()); for (int i = 0; i < kDevices; i++) { #ifdef PADDLE_WITH_HIP - PADDLE_ENFORCE_CUDA_SUCCESS(hipSetDevice(i)); + PADDLE_ENFORCE_GPU_SUCCESS(hipSetDevice(i)); #else - PADDLE_ENFORCE_CUDA_SUCCESS(cudaSetDevice(i)); + PADDLE_ENFORCE_GPU_SUCCESS(cudaSetDevice(i)); #endif platform::dynload::ncclCommInitRank(comms + i, kDevices * ntrainers, *nccl_id, train_id * kDevices + i); VLOG(1) << "ncclCommInitRank: " << i; } - PADDLE_ENFORCE_CUDA_SUCCESS(dynload::ncclGroupEnd()); + PADDLE_ENFORCE_GPU_SUCCESS(dynload::ncclGroupEnd()); VLOG(1) << "nccl group end seccessss"; } PADDLE_ENFORCE_EQ(comm_map_.count(ring_id), 0, diff --git a/paddle/fluid/platform/cuda_device_guard.h b/paddle/fluid/platform/cuda_device_guard.h index a85ebf4b8136630712d39d98e2341ee919cf6e45..40204c0ed83f94da9de11378cf49c652e4f63962 100644 --- a/paddle/fluid/platform/cuda_device_guard.h +++ b/paddle/fluid/platform/cuda_device_guard.h @@ -13,7 +13,7 @@ // limitations under the License. #pragma once -#include "paddle/fluid/platform/gpu_info.h" +#include "paddle/fluid/platform/device/gpu/gpu_info.h" namespace paddle { namespace platform { diff --git a/paddle/fluid/platform/cuda_graph_with_memory_pool.h b/paddle/fluid/platform/cuda_graph_with_memory_pool.h index 6586146c5aefbe78c6124c032a1d245c37c27801..fe082c850aa4d278d5d8c05d6c506dac22485676 100644 --- a/paddle/fluid/platform/cuda_graph_with_memory_pool.h +++ b/paddle/fluid/platform/cuda_graph_with_memory_pool.h @@ -17,7 +17,7 @@ #include "paddle/fluid/platform/enforce.h" #include "paddle/fluid/platform/place.h" #ifdef PADDLE_WITH_CUDA -#include "paddle/fluid/platform/cuda_graph.h" +#include "paddle/fluid/platform/device/gpu/cuda/cuda_graph.h" #endif namespace paddle { diff --git a/paddle/fluid/platform/device/CMakeLists.txt b/paddle/fluid/platform/device/CMakeLists.txt index 515453afb63bedde1de8fb65d6d1b0c5bb83765b..5b67473b77eaddfd78245467d1d7d7048f04292e 100644 --- a/paddle/fluid/platform/device/CMakeLists.txt +++ b/paddle/fluid/platform/device/CMakeLists.txt @@ -2,6 +2,9 @@ IF(WITH_XPU) add_subdirectory(xpu) ENDIF() +IF(WITH_GPU OR WITH_ROCM) + add_subdirectory(gpu) +ENDIF() # NPU IF(WITH_ASCEND OR WITH_ASCEND_CL) diff --git a/paddle/fluid/platform/device/gpu/CMakeLists.txt b/paddle/fluid/platform/device/gpu/CMakeLists.txt new file mode 100644 index 0000000000000000000000000000000000000000..5cf2258204fdabf07004c438491c6dbc22381124 --- /dev/null +++ b/paddle/fluid/platform/device/gpu/CMakeLists.txt @@ -0,0 +1,15 @@ +IF(WITH_GPU) + add_subdirectory(cuda) + nv_library(gpu_info SRCS gpu_info.cc DEPS cuda_info gflags glog enforce monitor dynload_cuda) + + nv_test(cuda_helper_test SRCS cuda_helper_test.cu) + nv_test(cudnn_desc_test SRCS cudnn_desc_test.cc DEPS dynload_cuda) +ELSEIF(WITH_ROCM) + add_subdirectory(rocm) + hip_library(gpu_info SRCS gpu_info.cc DEPS rocm_info gflags glog enforce monitor dynload_cuda) + + hip_test(cuda_helper_test SRCS cuda_helper_test.cu) + hip_test(cudnn_desc_test SRCS cudnn_desc_test.cc DEPS dynload_cuda) +ENDIF() + +cc_library(gpu_resource_pool SRCS gpu_resource_pool.cc DEPS gpu_info) diff --git a/paddle/fluid/platform/device/gpu/cuda/CMakeLists.txt b/paddle/fluid/platform/device/gpu/cuda/CMakeLists.txt new file mode 100644 index 0000000000000000000000000000000000000000..5df1de1b00fac72fbfa7f7328f89b6c5e9a0cbdc --- /dev/null +++ b/paddle/fluid/platform/device/gpu/cuda/CMakeLists.txt @@ -0,0 +1,5 @@ +nv_library(cuda_info SRCS cuda_info.cc DEPS gflags glog enforce monitor dynload_cuda) +nv_library(cuda_graph SRCS cuda_graph.cc DEPS enforce allocator_facade) +nv_library(cuda_profiler SRCS cuda_profiler.cc DEPS enforce) + +nv_test(cudnn_helper_test SRCS cudnn_helper_test.cc DEPS dynload_cuda pten) diff --git a/paddle/fluid/platform/cuda_device_function.h b/paddle/fluid/platform/device/gpu/cuda/cuda_device_function.h similarity index 67% rename from paddle/fluid/platform/cuda_device_function.h rename to paddle/fluid/platform/device/gpu/cuda/cuda_device_function.h index 352143302388a9f8169a40a14ccea9bae647cfc6..e7d807573957f57c0c3ccb8928422649eda5bdad 100644 --- a/paddle/fluid/platform/cuda_device_function.h +++ b/paddle/fluid/platform/device/gpu/cuda/cuda_device_function.h @@ -22,16 +22,11 @@ limitations under the License. */ namespace paddle { namespace platform { -#ifdef PADDLE_WITH_HIP -#define CREATE_SHFL_MASK(mask, predicate) mask = __ballot((predicate)) -#else #define FULL_WARP_MASK 0xFFFFFFFF #define CREATE_SHFL_MASK(mask, predicate) \ mask = __ballot_sync(FULL_WARP_MASK, (predicate)) -#endif inline static int RoundToPowerOfTwo(int dim) { -#ifdef PADDLE_WITH_CUDA if (dim > 512) { return 1024; } else if (dim > 256) { @@ -45,17 +40,6 @@ inline static int RoundToPowerOfTwo(int dim) { } else { return 32; } -#else // HIP results in error or nan if > 256 - if (dim > 128) { - return 256; - } else if (dim > 64) { - return 128; - } else if (dim > 32) { - return 64; - } else { - return 32; - } -#endif } #define CUDA_LAUNCH_KERNEL_BASE(dim, ...) \ @@ -76,71 +60,15 @@ template __forceinline__ __device__ T CudaShuffleDownSync(unsigned mask, T val, int delta, int width = warpSize) { -#if defined(PADDLE_WITH_HIP) - return __shfl_down(val, delta, width); -#else return __shfl_down_sync(mask, val, static_cast(delta), width); -#endif } template __forceinline__ __device__ T CudaShuffleXorSync(unsigned mask, T val, int width = warpSize) { -#if defined(PADDLE_WITH_HIP) - return __shfl_xor(val, width); -#else return __shfl_xor_sync(mask, val, width); -#endif -} - -#if defined(PADDLE_WITH_HIP) -template <> -__forceinline__ __device__ float16 CudaShuffleDownSync(unsigned mask, - float16 val, int delta, - int width) { - return float16(__shfl_down(static_cast(val), - static_cast(delta), width)); } -template <> -__forceinline__ __device__ paddle::platform::complex CudaShuffleDownSync( - unsigned mask, paddle::platform::complex val, int delta, int width) { - float real = __shfl_down(val.real, delta, width); - float imag = __shfl_down(val.imag, delta, width); - return paddle::platform::complex(real, imag); -} - -template <> -__forceinline__ __device__ paddle::platform::complex -CudaShuffleDownSync(unsigned mask, paddle::platform::complex val, - int delta, int width) { - double real = __shfl_down(val.real, delta, width); - double imag = __shfl_down(val.imag, delta, width); - return paddle::platform::complex(real, imag); -} - -template <> -__forceinline__ __device__ float16 CudaShuffleXorSync(unsigned mask, - float16 val, int width) { - return float16(__shfl_xor(static_cast(val), width)); -} - -template <> -__forceinline__ __device__ paddle::platform::complex CudaShuffleXorSync( - unsigned mask, paddle::platform::complex val, int width) { - float real = __shfl_xor(val.real, width); - float imag = __shfl_xor(val.imag, width); - return paddle::platform::complex(real, imag); -} - -template <> -__forceinline__ __device__ paddle::platform::complex CudaShuffleXorSync( - unsigned mask, paddle::platform::complex val, int width) { - double real = __shfl_xor(val.real, width); - double imag = __shfl_xor(val.imag, width); - return paddle::platform::complex(real, imag); -} -#else template <> __forceinline__ __device__ float16 CudaShuffleDownSync(unsigned mask, float16 val, int delta, @@ -197,16 +125,11 @@ __forceinline__ __device__ paddle::platform::complex CudaShuffleXorSync( __shfl_xor_sync(mask, static_cast(val.imag), width)); return paddle::platform::complex(real, imag); } -#endif template __forceinline__ __device__ T CudaShuffleSync(unsigned mask, T val, int src_line, int width = 32) { -#if defined(PADDLE_WITH_HIP) - return __shfl(val, src_line, width); -#else return __shfl_sync(mask, val, src_line, width); -#endif } template @@ -216,17 +139,13 @@ HOSTDEVICE T Infinity() { template __device__ T reduceSum(T val, int tid, int len) { -// NOTE(zcd): The warp size should be taken from the -// parameters of the GPU but not specified as 32 simply. -// To make the reduceSum more efficiently, -// I use Warp-Level Parallelism and assume the Warp size -// is 32 which may be different for different GPU, -// but most card's warp size is 32. -#ifdef PADDLE_WITH_HIP - const int warpSize = 64; -#else + // NOTE(zcd): The warp size should be taken from the + // parameters of the GPU but not specified as 32 simply. + // To make the reduceSum more efficiently, + // I use Warp-Level Parallelism and assume the Warp size + // is 32 which may be different for different GPU, + // but most card's warp size is 32. const int warpSize = 32; -#endif __shared__ T shm[warpSize]; unsigned mask = 0u; CREATE_SHFL_MASK(mask, tid < len); diff --git a/paddle/fluid/platform/cuda_graph.cc b/paddle/fluid/platform/device/gpu/cuda/cuda_graph.cc similarity index 90% rename from paddle/fluid/platform/cuda_graph.cc rename to paddle/fluid/platform/device/gpu/cuda/cuda_graph.cc index 6f3d452ef5c5031883d88f46a2a4d79143f3f884..3970acf82d3ea3f4b3465e6f561160ab03fcac7c 100644 --- a/paddle/fluid/platform/cuda_graph.cc +++ b/paddle/fluid/platform/device/gpu/cuda/cuda_graph.cc @@ -12,7 +12,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -#include "paddle/fluid/platform/cuda_graph.h" +#include "paddle/fluid/platform/device/gpu/cuda/cuda_graph.h" namespace paddle { namespace platform { @@ -23,11 +23,11 @@ void CUDAGraph::Reset() { if (is_reset_) return; #if CUDA_VERSION >= 10010 for (auto graph : graphs_) { - PADDLE_ENFORCE_CUDA_SUCCESS(cudaGraphDestroy(graph)); + PADDLE_ENFORCE_GPU_SUCCESS(cudaGraphDestroy(graph)); } graphs_.clear(); for (auto exec_graph : exec_graphs_) { - PADDLE_ENFORCE_CUDA_SUCCESS(cudaGraphExecDestroy(exec_graph)); + PADDLE_ENFORCE_GPU_SUCCESS(cudaGraphExecDestroy(exec_graph)); } exec_graphs_.clear(); #endif @@ -46,7 +46,7 @@ void CUDAGraph::Replay() { errors::PermissionDenied( "Cannot replay the CUDA Graph after reset is called.")); for (auto exec_graph : exec_graphs_) { - PADDLE_ENFORCE_CUDA_SUCCESS(cudaGraphLaunch(exec_graph, stream_)); + PADDLE_ENFORCE_GPU_SUCCESS(cudaGraphLaunch(exec_graph, stream_)); } #endif } @@ -58,7 +58,7 @@ void CUDAGraph::BeginSegmentCapture() { IsCapturing(), true, errors::PermissionDenied("BeginSegmentCapture should be called when CUDA " "Graph is capturing.")); - PADDLE_ENFORCE_CUDA_SUCCESS(cudaStreamBeginCapture( + PADDLE_ENFORCE_GPU_SUCCESS(cudaStreamBeginCapture( capturing_graph_->stream_, capturing_graph_->capture_mode_)); PADDLE_ENFORCE_EQ(IsValidCapturing(), true, platform::errors::PermissionDenied( @@ -92,19 +92,19 @@ void CUDAGraph::EndSegmentCapture() { PADDLE_ENFORCE_EQ(IsCapturing(), true, errors::PermissionDenied("No CUDA Graph is capturing.")); cudaGraph_t graph; - PADDLE_ENFORCE_CUDA_SUCCESS( + PADDLE_ENFORCE_GPU_SUCCESS( cudaStreamEndCapture(capturing_graph_->stream_, &graph)); auto num_nodes = static_cast(-1); - PADDLE_ENFORCE_CUDA_SUCCESS(cudaGraphGetNodes(graph, nullptr, &num_nodes)); + PADDLE_ENFORCE_GPU_SUCCESS(cudaGraphGetNodes(graph, nullptr, &num_nodes)); if (num_nodes == 0) { - PADDLE_ENFORCE_CUDA_SUCCESS(cudaGraphDestroy(graph)); + PADDLE_ENFORCE_GPU_SUCCESS(cudaGraphDestroy(graph)); VLOG(10) << "Skip empty CUDA Graph with ID " << capturing_graph_->id_ << ", segment id " << capturing_graph_->graphs_.size(); return; } cudaGraphExec_t exec_graph; - PADDLE_ENFORCE_CUDA_SUCCESS( + PADDLE_ENFORCE_GPU_SUCCESS( cudaGraphInstantiate(&exec_graph, graph, nullptr, nullptr, 0)); VLOG(10) << "End to capture CUDA Graph with ID " << capturing_graph_->id_ << ", segment id " << capturing_graph_->graphs_.size(); @@ -123,7 +123,7 @@ bool CUDAGraph::IsValidCapturing() { if (!IsCapturing()) return false; cudaStreamCaptureStatus status; CUDAGraphID id; - PADDLE_ENFORCE_CUDA_SUCCESS( + PADDLE_ENFORCE_GPU_SUCCESS( cudaStreamGetCaptureInfo(capturing_graph_->stream_, &status, &id)); return status == cudaStreamCaptureStatusActive; #else @@ -154,7 +154,7 @@ void CUDAGraph::PrintToDotFiles(const std::string &dirname, ConcatPath(dirname, "segment_" + std::to_string(i) + ".dot"); VLOG(10) << "Save the " << i << "-th segment of graph " << id_ << " to " << filename; - PADDLE_ENFORCE_CUDA_SUCCESS( + PADDLE_ENFORCE_GPU_SUCCESS( cudaGraphDebugDotPrint(graphs_[i], filename.c_str(), flags)); } #else diff --git a/paddle/fluid/platform/cuda_graph.h b/paddle/fluid/platform/device/gpu/cuda/cuda_graph.h similarity index 96% rename from paddle/fluid/platform/cuda_graph.h rename to paddle/fluid/platform/device/gpu/cuda/cuda_graph.h index f70a66f76242fbf59603fb20ae701ed02342cc3c..0856e0fad1900a8f5a7c586f083acd3ec25e480f 100644 --- a/paddle/fluid/platform/cuda_graph.h +++ b/paddle/fluid/platform/device/gpu/cuda/cuda_graph.h @@ -21,7 +21,7 @@ #include #include "cuda.h" // NOLINT #include "cuda_runtime.h" // NOLINT -#include "paddle/fluid/platform/type_defs.h" +#include "paddle/fluid/platform/device/gpu/gpu_types.h" #include "paddle/fluid/platform/enforce.h" #include "paddle/fluid/platform/macros.h" @@ -129,7 +129,7 @@ class CUDAGraphCaptureModeGuard { explicit CUDAGraphCaptureModeGuard( cudaStreamCaptureMode mode = cudaStreamCaptureModeRelaxed) { if (UNLIKELY(CUDAGraph::IsCapturing())) { - PADDLE_ENFORCE_CUDA_SUCCESS(cudaThreadExchangeStreamCaptureMode(&mode)); + PADDLE_ENFORCE_GPU_SUCCESS(cudaThreadExchangeStreamCaptureMode(&mode)); // After cudaThreadExchangeStreamCaptureMode is called, // the variable "mode" would be set to the old capturing mode. old_mode_ = mode; @@ -138,7 +138,7 @@ class CUDAGraphCaptureModeGuard { ~CUDAGraphCaptureModeGuard() PADDLE_MAY_THROW { if (UNLIKELY(CUDAGraph::IsCapturing())) { - PADDLE_ENFORCE_CUDA_SUCCESS( + PADDLE_ENFORCE_GPU_SUCCESS( cudaThreadExchangeStreamCaptureMode(&old_mode_)); } } diff --git a/paddle/fluid/platform/cuda_helper.h b/paddle/fluid/platform/device/gpu/cuda/cuda_helper.h similarity index 78% rename from paddle/fluid/platform/cuda_helper.h rename to paddle/fluid/platform/device/gpu/cuda/cuda_helper.h index 202be920c559535eeea813b3b27dd3fa48011048..3199af9c9752057842fef0cae406ba319dc28adc 100644 --- a/paddle/fluid/platform/cuda_helper.h +++ b/paddle/fluid/platform/device/gpu/cuda/cuda_helper.h @@ -16,12 +16,7 @@ #include // NOLINT -#ifdef PADDLE_WITH_CUDA #include "paddle/fluid/platform/dynload/cublas.h" -#endif -#ifdef PADDLE_WITH_HIP -#include "paddle/fluid/platform/dynload/rocblas.h" -#endif #include "paddle/fluid/platform/enforce.h" #include "paddle/fluid/platform/macros.h" @@ -72,28 +67,13 @@ namespace platform { * */ -#ifdef __HIPCC__ -#define CUDA_KERNEL_LOOP_TYPE(i, num, index_type) \ - int64_t __index__ = hipBlockIdx_x * hipBlockDim_x + hipThreadIdx_x; \ - for (index_type i = __index__; __index__ < (num); \ - __index__ += hipBlockDim_x * hipGridDim_x, i = __index__) -#else #define CUDA_KERNEL_LOOP_TYPE(i, num, index_type) \ int64_t __index__ = blockIdx.x * blockDim.x + threadIdx.x; \ for (index_type i = __index__; __index__ < (num); \ __index__ += blockDim.x * gridDim.x, i = __index__) -#endif - -#define CUDA_KERNEL_LOOP(i, num) CUDA_KERNEL_LOOP_TYPE(i, num, int) class CublasHandleHolder { public: -#ifdef PADDLE_WITH_HIP - explicit CublasHandleHolder(hipStream_t stream) { - PADDLE_RETRY_CUDA_SUCCESS(dynload::rocblas_create_handle(&handle_)); - PADDLE_RETRY_CUDA_SUCCESS(dynload::rocblas_set_stream(handle_, stream)); - } -#else CublasHandleHolder(cudaStream_t stream, cublasMath_t math_type) { PADDLE_RETRY_CUDA_SUCCESS(dynload::cublasCreate(&handle_)); PADDLE_RETRY_CUDA_SUCCESS(dynload::cublasSetStream(handle_, stream)); @@ -109,20 +89,11 @@ class CublasHandleHolder { } #endif // CUDA_VERSION >= 9000 } -#endif -#ifdef PADDLE_WITH_HIP - const rocblas_handle& GetCublasHandle() const { return handle_; } -#else const cublasHandle_t& GetCublasHandle() const { return handle_; } -#endif ~CublasHandleHolder() PADDLE_MAY_THROW { -#ifdef PADDLE_WITH_HIP - PADDLE_RETRY_CUDA_SUCCESS(dynload::rocblas_destroy_handle(handle_)); -#else PADDLE_RETRY_CUDA_SUCCESS(dynload::cublasDestroy(handle_)); -#endif } template @@ -134,11 +105,7 @@ class CublasHandleHolder { private: DISABLE_COPY_AND_ASSIGN(CublasHandleHolder); -#ifdef PADDLE_WITH_HIP - rocblas_handle handle_; -#else cublasHandle_t handle_; -#endif mutable std::mutex mtx_; }; diff --git a/paddle/fluid/platform/device/gpu/cuda/cuda_info.cc b/paddle/fluid/platform/device/gpu/cuda/cuda_info.cc new file mode 100644 index 0000000000000000000000000000000000000000..6109ed65543189d2a3931a0c69dcd7ba26e6a292 --- /dev/null +++ b/paddle/fluid/platform/device/gpu/cuda/cuda_info.cc @@ -0,0 +1,268 @@ +// Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "paddle/fluid/platform/device/gpu/gpu_info.h" +#include "paddle/fluid/platform/enforce.h" +#include "paddle/fluid/platform/lock_guard_ptr.h" +#include "paddle/fluid/platform/macros.h" +#include "paddle/fluid/platform/monitor.h" +#include "paddle/fluid/platform/place.h" +#include "paddle/fluid/string/split.h" + +static std::once_flag g_device_props_size_init_flag; +static std::vector> g_device_props_init_flags; +static std::vector g_device_props; + +namespace paddle { +namespace platform { +int DnnVersion() { + if (!dynload::HasCUDNN()) return -1; + return dynload::cudnnGetVersion(); +} + +static int GetGPUDeviceCountImpl() { + int driverVersion = 0; + cudaError_t status = cudaDriverGetVersion(&driverVersion); + + if (!(status == gpuSuccess && driverVersion != 0)) { + // No GPU driver + VLOG(2) << "GPU Driver Version can't be detected. No GPU driver!"; + return 0; + } + + const auto *cuda_visible_devices = std::getenv("CUDA_VISIBLE_DEVICES"); + + if (cuda_visible_devices != nullptr) { + std::string cuda_visible_devices_str(cuda_visible_devices); + if (!cuda_visible_devices_str.empty()) { + cuda_visible_devices_str.erase( + 0, cuda_visible_devices_str.find_first_not_of('\'')); + cuda_visible_devices_str.erase( + cuda_visible_devices_str.find_last_not_of('\'') + 1); + cuda_visible_devices_str.erase( + 0, cuda_visible_devices_str.find_first_not_of('\"')); + cuda_visible_devices_str.erase( + cuda_visible_devices_str.find_last_not_of('\"') + 1); + } + if (std::all_of(cuda_visible_devices_str.begin(), + cuda_visible_devices_str.end(), + [](char ch) { return ch == ' '; })) { + VLOG(2) << "CUDA_VISIBLE_DEVICES is set to be " + "empty. No GPU detected."; + return 0; + } + } + int count; + PADDLE_ENFORCE_GPU_SUCCESS(cudaGetDeviceCount(&count)); + return count; +} + +int GetGPUDeviceCount() { + // cache the count + static auto dev_cnt = GetGPUDeviceCountImpl(); + return dev_cnt; +} + +int GetGPUComputeCapability(int id) { + PADDLE_ENFORCE_LT(id, GetGPUDeviceCount(), + platform::errors::InvalidArgument( + "Device id must be less than GPU count, " + "but received id is: %d. GPU count is: %d.", + id, GetGPUDeviceCount())); + int major, minor; + auto major_error_code = + cudaDeviceGetAttribute(&major, cudaDevAttrComputeCapabilityMajor, id); + auto minor_error_code = + cudaDeviceGetAttribute(&minor, cudaDevAttrComputeCapabilityMinor, id); + + PADDLE_ENFORCE_GPU_SUCCESS(major_error_code); + PADDLE_ENFORCE_GPU_SUCCESS(minor_error_code); + return major * 10 + minor; +} + +int GetGPURuntimeVersion(int id) { + PADDLE_ENFORCE_LT(id, GetGPUDeviceCount(), + platform::errors::InvalidArgument( + "Device id must be less than GPU count, " + "but received id is: %d. GPU count is: %d.", + id, GetGPUDeviceCount())); + int runtime_version = 0; + PADDLE_ENFORCE_GPU_SUCCESS(cudaRuntimeGetVersion(&runtime_version)); + return runtime_version; +} + +int GetGPUDriverVersion(int id) { + PADDLE_ENFORCE_LT(id, GetGPUDeviceCount(), + platform::errors::InvalidArgument( + "Device id must be less than GPU count, " + "but received id is: %d. GPU count is: %d.", + id, GetGPUDeviceCount())); + int driver_version = 0; + PADDLE_ENFORCE_GPU_SUCCESS(cudaDriverGetVersion(&driver_version)); + return driver_version; +} + +bool TensorCoreAvailable() { + int device = GetCurrentDeviceId(); + int driver_version = GetGPUComputeCapability(device); + return driver_version >= 70; +} + +int GetGPUMultiProcessors(int id) { + PADDLE_ENFORCE_LT(id, GetGPUDeviceCount(), + platform::errors::InvalidArgument( + "Device id must be less than GPU count, " + "but received id is: %d. GPU count is: %d.", + id, GetGPUDeviceCount())); + int count; + PADDLE_ENFORCE_GPU_SUCCESS( + cudaDeviceGetAttribute(&count, cudaDevAttrMultiProcessorCount, id)); + return count; +} + +int GetGPUMaxThreadsPerMultiProcessor(int id) { + PADDLE_ENFORCE_LT(id, GetGPUDeviceCount(), + platform::errors::InvalidArgument( + "Device id must be less than GPU count, " + "but received id is: %d. GPU count is: %d.", + id, GetGPUDeviceCount())); + int count; + PADDLE_ENFORCE_GPU_SUCCESS(cudaDeviceGetAttribute( + &count, cudaDevAttrMaxThreadsPerMultiProcessor, id)); + + return count; +} + +int GetGPUMaxThreadsPerBlock(int id) { + PADDLE_ENFORCE_LT(id, GetGPUDeviceCount(), + platform::errors::InvalidArgument( + "Device id must be less than GPU count, " + "but received id is: %d. GPU count is: %d.", + id, GetGPUDeviceCount())); + int count; + PADDLE_ENFORCE_GPU_SUCCESS( + cudaDeviceGetAttribute(&count, cudaDevAttrMaxThreadsPerBlock, id)); + return count; +} + +int GetCurrentDeviceId() { + int device_id; + PADDLE_ENFORCE_GPU_SUCCESS(cudaGetDevice(&device_id)); + return device_id; +} + +dim3 GetGpuMaxGridDimSize(int id) { + PADDLE_ENFORCE_LT(id, GetGPUDeviceCount(), + platform::errors::InvalidArgument( + "Device id must be less than GPU count, " + "but received id is: %d. GPU count is: %d.", + id, GetGPUDeviceCount())); + dim3 ret; + int size; + auto error_code_x = cudaDeviceGetAttribute(&size, cudaDevAttrMaxGridDimX, id); + PADDLE_ENFORCE_GPU_SUCCESS(error_code_x); + ret.x = size; + + auto error_code_y = cudaDeviceGetAttribute(&size, cudaDevAttrMaxGridDimY, id); + PADDLE_ENFORCE_GPU_SUCCESS(error_code_y); + ret.y = size; + + auto error_code_z = cudaDeviceGetAttribute(&size, cudaDevAttrMaxGridDimZ, id); + PADDLE_ENFORCE_GPU_SUCCESS(error_code_z); + ret.z = size; + return ret; +} + +const gpuDeviceProp &GetDeviceProperties(int id) { + std::call_once(g_device_props_size_init_flag, [&] { + int gpu_num = 0; + gpu_num = platform::GetGPUDeviceCount(); + g_device_props_init_flags.resize(gpu_num); + g_device_props.resize(gpu_num); + for (int i = 0; i < gpu_num; ++i) { + g_device_props_init_flags[i] = std::make_unique(); + } + }); + + if (id == -1) { + id = platform::GetCurrentDeviceId(); + } + + if (id < 0 || id >= static_cast(g_device_props.size())) { + PADDLE_THROW(platform::errors::OutOfRange( + "The device id %d is out of range [0, %d), where %d is the number of " + "devices on this machine. Because the device id should be greater than " + "or equal to zero and smaller than the number of gpus. Please input " + "appropriate device again!", + id, static_cast(g_device_props.size()), + static_cast(g_device_props.size()))); + } + + std::call_once(*(g_device_props_init_flags[id]), [&] { + PADDLE_ENFORCE_GPU_SUCCESS( + cudaGetDeviceProperties(&g_device_props[id], id)); + }); + + return g_device_props[id]; +} + +void SetDeviceId(int id) { + // TODO(qijun): find a better way to cache the cuda device count + PADDLE_ENFORCE_LT(id, GetGPUDeviceCount(), + platform::errors::InvalidArgument( + "Device id must be less than GPU count, " + "but received id is: %d. GPU count is: %d.", + id, GetGPUDeviceCount())); + PADDLE_RETRY_CUDA_SUCCESS(cudaSetDevice(id)); +} + +void GpuMemcpyAsync(void *dst, const void *src, size_t count, + gpuMemcpyKind kind, gpuStream_t stream) { + PADDLE_ENFORCE_GPU_SUCCESS(cudaMemcpyAsync(dst, src, count, kind, stream)); +} + +void GpuMemcpySync(void *dst, const void *src, size_t count, + gpuMemcpyKind kind) { + PADDLE_ENFORCE_GPU_SUCCESS(cudaMemcpy(dst, src, count, kind)); +} + +void GpuMemcpyPeerAsync(void *dst, int dst_device, const void *src, + int src_device, size_t count, gpuStream_t stream) { + PADDLE_ENFORCE_GPU_SUCCESS( + cudaMemcpyPeerAsync(dst, dst_device, src, src_device, count, stream)); +} + +void GpuMemcpyPeerSync(void *dst, int dst_device, const void *src, + int src_device, size_t count) { + PADDLE_ENFORCE_GPU_SUCCESS( + cudaMemcpyPeer(dst, dst_device, src, src_device, count)); +} + +void GpuMemsetAsync(void *dst, int value, size_t count, gpuStream_t stream) { + PADDLE_ENFORCE_GPU_SUCCESS(cudaMemsetAsync(dst, value, count, stream)); +} + +void GpuStreamSync(gpuStream_t stream) { + PADDLE_ENFORCE_GPU_SUCCESS(cudaStreamSynchronize(stream)); +} + +void GpuDestroyStream(gpuStream_t stream) { + PADDLE_ENFORCE_GPU_SUCCESS(cudaStreamDestroy(stream)); +} + +void GpuDeviceSync() { PADDLE_ENFORCE_GPU_SUCCESS(cudaDeviceSynchronize()); } + +gpuError_t GpuGetLastError() { return cudaGetLastError(); } +} // namespace platform +} // namespace paddle diff --git a/paddle/fluid/platform/cuda_profiler.cc b/paddle/fluid/platform/device/gpu/cuda/cuda_profiler.cc similarity index 85% rename from paddle/fluid/platform/cuda_profiler.cc rename to paddle/fluid/platform/device/gpu/cuda/cuda_profiler.cc index 998dd80dc5e7df6e2b745860237ce5f3cdb81208..42351fe097a9df3f2d34b65d9f1d07ac5d7931a8 100644 --- a/paddle/fluid/platform/cuda_profiler.cc +++ b/paddle/fluid/platform/device/gpu/cuda/cuda_profiler.cc @@ -12,7 +12,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -#include "paddle/fluid/platform/cuda_profiler.h" +#include "paddle/fluid/platform/device/gpu/cuda/cuda_profiler.h" namespace paddle { namespace platform { @@ -25,13 +25,13 @@ void CudaProfilerInit(std::string output_file, std::string output_mode, "`csv`, but received `%s`.", output_mode)); cudaOutputMode_t mode = output_mode == "csv" ? cudaCSV : cudaKeyValuePair; - PADDLE_ENFORCE_CUDA_SUCCESS( + PADDLE_ENFORCE_GPU_SUCCESS( cudaProfilerInitialize(config_file.c_str(), output_file.c_str(), mode)); } -void CudaProfilerStart() { PADDLE_ENFORCE_CUDA_SUCCESS(cudaProfilerStart()); } +void CudaProfilerStart() { PADDLE_ENFORCE_GPU_SUCCESS(cudaProfilerStart()); } -void CudaProfilerStop() { PADDLE_ENFORCE_CUDA_SUCCESS(cudaProfilerStop()); } +void CudaProfilerStop() { PADDLE_ENFORCE_GPU_SUCCESS(cudaProfilerStop()); } #ifndef _WIN32 void CudaNvtxRangePush(std::string name) { diff --git a/paddle/fluid/platform/cuda_profiler.h b/paddle/fluid/platform/device/gpu/cuda/cuda_profiler.h similarity index 100% rename from paddle/fluid/platform/cuda_profiler.h rename to paddle/fluid/platform/device/gpu/cuda/cuda_profiler.h diff --git a/paddle/fluid/platform/cudnn_desc.h b/paddle/fluid/platform/device/gpu/cuda/cudnn_desc.h similarity index 84% rename from paddle/fluid/platform/cudnn_desc.h rename to paddle/fluid/platform/device/gpu/cuda/cudnn_desc.h index 318c85ee484bef17833e48b068bf01f014eccd79..7bff2c69381e6989a18cd795bee15c26cc8a75ef 100644 --- a/paddle/fluid/platform/cudnn_desc.h +++ b/paddle/fluid/platform/device/gpu/cuda/cudnn_desc.h @@ -23,7 +23,7 @@ #include #include -#include "paddle/fluid/platform/cudnn_helper.h" +#include "paddle/fluid/platform/device/gpu/cuda/cudnn_helper.h" #include "paddle/fluid/platform/device_context.h" namespace paddle { @@ -99,7 +99,7 @@ class ActivationDescriptor { struct Deleter { void operator()(T* t) { if (t != nullptr) { - PADDLE_ENFORCE_CUDA_SUCCESS( + PADDLE_ENFORCE_GPU_SUCCESS( dynload::cudnnDestroyActivationDescriptor(t)); t = nullptr; } @@ -107,13 +107,13 @@ class ActivationDescriptor { }; ActivationDescriptor() { T* raw_ptr; - PADDLE_ENFORCE_CUDA_SUCCESS( + PADDLE_ENFORCE_GPU_SUCCESS( dynload::cudnnCreateActivationDescriptor(&raw_ptr)); desc_.reset(raw_ptr); } template void set(cudnnActivationMode_t mode, const T& coef) { - PADDLE_ENFORCE_CUDA_SUCCESS(dynload::cudnnSetActivationDescriptor( + PADDLE_ENFORCE_GPU_SUCCESS(dynload::cudnnSetActivationDescriptor( desc_.get(), mode, CUDNN_NOT_PROPAGATE_NAN, static_cast(coef))); } @@ -130,14 +130,14 @@ class TensorDescriptor { struct Deleter { void operator()(T* t) { if (t != nullptr) { - PADDLE_ENFORCE_CUDA_SUCCESS(dynload::cudnnDestroyTensorDescriptor(t)); + PADDLE_ENFORCE_GPU_SUCCESS(dynload::cudnnDestroyTensorDescriptor(t)); t = nullptr; } } }; TensorDescriptor() { T* raw_ptr; - PADDLE_ENFORCE_CUDA_SUCCESS(dynload::cudnnCreateTensorDescriptor(&raw_ptr)); + PADDLE_ENFORCE_GPU_SUCCESS(dynload::cudnnCreateTensorDescriptor(&raw_ptr)); desc_.reset(raw_ptr); } T* desc() { return desc_.get(); } @@ -153,7 +153,7 @@ class TensorDescriptor { if (groups > 1) { dims_with_group[1] = dims_with_group[1] / groups; } - PADDLE_ENFORCE_CUDA_SUCCESS(dynload::cudnnSetTensorNdDescriptor( + PADDLE_ENFORCE_GPU_SUCCESS(dynload::cudnnSetTensorNdDescriptor( desc_.get(), ToCudnnDataType(tensor.type()), dims_with_group.size(), dims_with_group.data(), strides.data())); } @@ -166,7 +166,7 @@ class TensorDescriptor { } else { transformed_dims = dims; } - PADDLE_ENFORCE_CUDA_SUCCESS(dynload::cudnnSetTensorNdDescriptorEx( + PADDLE_ENFORCE_GPU_SUCCESS(dynload::cudnnSetTensorNdDescriptorEx( desc_.get(), format, dtype, transformed_dims.size(), transformed_dims.data())); } @@ -187,14 +187,14 @@ class FilterDescriptor { struct Deleter { void operator()(T* t) { if (t != nullptr) { - PADDLE_ENFORCE_CUDA_SUCCESS(dynload::cudnnDestroyFilterDescriptor(t)); + PADDLE_ENFORCE_GPU_SUCCESS(dynload::cudnnDestroyFilterDescriptor(t)); t = nullptr; } } }; FilterDescriptor() { T* raw_ptr; - PADDLE_ENFORCE_CUDA_SUCCESS(dynload::cudnnCreateFilterDescriptor(&raw_ptr)); + PADDLE_ENFORCE_GPU_SUCCESS(dynload::cudnnCreateFilterDescriptor(&raw_ptr)); desc_.reset(raw_ptr); } T* desc() { return desc_.get(); } @@ -211,7 +211,7 @@ class FilterDescriptor { if (groups > 1) { transformed_dims[1] = transformed_dims[1] / groups; } - PADDLE_ENFORCE_CUDA_SUCCESS(dynload::cudnnSetFilterNdDescriptor( + PADDLE_ENFORCE_GPU_SUCCESS(dynload::cudnnSetFilterNdDescriptor( desc_.get(), dtype, format, transformed_dims.size(), transformed_dims.data())); } @@ -233,7 +233,7 @@ class ConvolutionDescriptor { struct Deleter { void operator()(T* t) { if (t != nullptr) { - PADDLE_ENFORCE_CUDA_SUCCESS( + PADDLE_ENFORCE_GPU_SUCCESS( dynload::cudnnDestroyConvolutionDescriptor(t)); t = nullptr; } @@ -241,7 +241,7 @@ class ConvolutionDescriptor { }; ConvolutionDescriptor() { T* raw_ptr; - PADDLE_ENFORCE_CUDA_SUCCESS( + PADDLE_ENFORCE_GPU_SUCCESS( dynload::cudnnCreateConvolutionDescriptor(&raw_ptr)); desc_.reset(raw_ptr); } @@ -255,28 +255,26 @@ class ConvolutionDescriptor { cudnnDataType_t compute_type = (dtype == CUDNN_DATA_DOUBLE) ? CUDNN_DATA_DOUBLE : CUDNN_DATA_FLOAT; T* desc = desc_.get(); - PADDLE_ENFORCE_CUDA_SUCCESS(dynload::cudnnSetConvolutionNdDescriptor( + PADDLE_ENFORCE_GPU_SUCCESS(dynload::cudnnSetConvolutionNdDescriptor( desc, pads.size(), pads.data(), strides.data(), dilations.data(), CUDNN_CROSS_CORRELATION, compute_type)); #if CUDNN_VERSION_MIN(7, 0, 1) - PADDLE_ENFORCE_CUDA_SUCCESS( + PADDLE_ENFORCE_GPU_SUCCESS( platform::dynload::cudnnSetConvolutionGroupCount(desc, groups)); #if CUDA_VERSION >= 9000 && CUDNN_VERSION_MIN(7, 0, 1) - PADDLE_ENFORCE_CUDA_SUCCESS(platform::dynload::cudnnSetConvolutionMathType( + PADDLE_ENFORCE_GPU_SUCCESS(platform::dynload::cudnnSetConvolutionMathType( desc, CUDNN_DEFAULT_MATH)); if (dtype == CUDNN_DATA_HALF) { - PADDLE_ENFORCE_CUDA_SUCCESS( - platform::dynload::cudnnSetConvolutionMathType(desc, - CUDNN_TENSOR_OP_MATH)); + PADDLE_ENFORCE_GPU_SUCCESS(platform::dynload::cudnnSetConvolutionMathType( + desc, CUDNN_TENSOR_OP_MATH)); #if CUDA_VERSION >= 11000 #if CUDNN_VERSION_MIN(8, 1, 0) } else if (dtype == CUDNN_DATA_BFLOAT16) { - PADDLE_ENFORCE_CUDA_SUCCESS( - platform::dynload::cudnnSetConvolutionMathType(desc, - CUDNN_TENSOR_OP_MATH)); + PADDLE_ENFORCE_GPU_SUCCESS(platform::dynload::cudnnSetConvolutionMathType( + desc, CUDNN_TENSOR_OP_MATH)); #endif // CUDNN_VERSION_MIN(8,1,0) } else if (dtype == CUDNN_DATA_FLOAT && !allow_tf32) { - PADDLE_ENFORCE_CUDA_SUCCESS( + PADDLE_ENFORCE_GPU_SUCCESS( platform::dynload::cudnnSetConvolutionMathType(desc, CUDNN_FMA_MATH)); #endif // CUDA_VERSION >= 11000 } diff --git a/paddle/fluid/platform/cudnn_helper.h b/paddle/fluid/platform/device/gpu/cuda/cudnn_helper.h similarity index 88% rename from paddle/fluid/platform/cudnn_helper.h rename to paddle/fluid/platform/device/gpu/cuda/cudnn_helper.h index 65dd69a37d37f8116deee0e63ab89d9249f908ba..2bcdbaa201889b945cba578f3fc7f4ed13c9d8b8 100644 --- a/paddle/fluid/platform/cudnn_helper.h +++ b/paddle/fluid/platform/device/gpu/cuda/cudnn_helper.h @@ -191,10 +191,10 @@ inline cudnnTensorFormat_t GetCudnnTensorFormat( class ScopedTensorDescriptor { public: ScopedTensorDescriptor() { - PADDLE_ENFORCE_CUDA_SUCCESS(dynload::cudnnCreateTensorDescriptor(&desc_)); + PADDLE_ENFORCE_GPU_SUCCESS(dynload::cudnnCreateTensorDescriptor(&desc_)); } ~ScopedTensorDescriptor() PADDLE_MAY_THROW { - PADDLE_ENFORCE_CUDA_SUCCESS(dynload::cudnnDestroyTensorDescriptor(desc_)); + PADDLE_ENFORCE_GPU_SUCCESS(dynload::cudnnDestroyTensorDescriptor(desc_)); } inline cudnnTensorDescriptor_t descriptor(const cudnnTensorFormat_t format, @@ -216,20 +216,20 @@ class ScopedTensorDescriptor { if (dims.size() == 4) { if (format == CUDNN_TENSOR_NCHW) { - PADDLE_ENFORCE_CUDA_SUCCESS(dynload::cudnnSetTensorNdDescriptor( + PADDLE_ENFORCE_GPU_SUCCESS(dynload::cudnnSetTensorNdDescriptor( desc_, type, dims_with_group.size(), dims_with_group.data(), strides.data())); } else { // CUDNN_TENSOR_NHWC - PADDLE_ENFORCE_CUDA_SUCCESS(dynload::cudnnSetTensor4dDescriptor( + PADDLE_ENFORCE_GPU_SUCCESS(dynload::cudnnSetTensor4dDescriptor( desc_, format, type, dims[0], dims[3], dims[1], dims[2])); } } else if (dims.size() == 5) { if (format == CUDNN_TENSOR_NCHW) { - PADDLE_ENFORCE_CUDA_SUCCESS(dynload::cudnnSetTensorNdDescriptor( + PADDLE_ENFORCE_GPU_SUCCESS(dynload::cudnnSetTensorNdDescriptor( desc_, type, dims_with_group.size(), dims_with_group.data(), strides.data())); } else { // CUDNN_TENSOR_NHWC - PADDLE_ENFORCE_CUDA_SUCCESS(dynload::cudnnSetTensorNdDescriptorEx( + PADDLE_ENFORCE_GPU_SUCCESS(dynload::cudnnSetTensorNdDescriptorEx( desc_, format, type, dims.size(), dims.data())); } } @@ -247,7 +247,7 @@ class ScopedTensorDescriptor { inline cudnnTensorDescriptor_t descriptor(const cudnnDataType_t cudnn_type, const std::vector& dim, const std::vector& stride) { - PADDLE_ENFORCE_CUDA_SUCCESS(dynload::cudnnSetTensorNdDescriptor( + PADDLE_ENFORCE_GPU_SUCCESS(dynload::cudnnSetTensorNdDescriptor( desc_, cudnn_type, dim.size(), dim.data(), stride.data())); return desc_; } @@ -269,11 +269,11 @@ class ScopedTensorDescriptor { class ScopedRNNTensorDescriptor { public: ScopedRNNTensorDescriptor() { - PADDLE_ENFORCE_CUDA_SUCCESS(dynload::cudnnCreateRNNDataDescriptor(&desc_)); + PADDLE_ENFORCE_GPU_SUCCESS(dynload::cudnnCreateRNNDataDescriptor(&desc_)); } ~ScopedRNNTensorDescriptor() PADDLE_MAY_THROW { - PADDLE_ENFORCE_CUDA_SUCCESS(dynload::cudnnDestroyRNNDataDescriptor(desc_)); + PADDLE_ENFORCE_GPU_SUCCESS(dynload::cudnnDestroyRNNDataDescriptor(desc_)); } inline cudnnRNNDataDescriptor_t descriptor( @@ -288,7 +288,7 @@ class ScopedRNNTensorDescriptor { layout = CUDNN_RNN_DATA_LAYOUT_BATCH_MAJOR_UNPACKED; } - PADDLE_ENFORCE_CUDA_SUCCESS(dynload::cudnnSetRNNDataDescriptor( + PADDLE_ENFORCE_GPU_SUCCESS(dynload::cudnnSetRNNDataDescriptor( desc_, cudnn_type, layout, max_seq_length, batch_size, input_size, seq_length.data(), static_cast(&padding_fill))); @@ -314,10 +314,10 @@ class ScopedRNNTensorDescriptor { class ScopedDropoutDescriptor { public: ScopedDropoutDescriptor() { - PADDLE_ENFORCE_CUDA_SUCCESS(dynload::cudnnCreateDropoutDescriptor(&desc_)); + PADDLE_ENFORCE_GPU_SUCCESS(dynload::cudnnCreateDropoutDescriptor(&desc_)); } ~ScopedDropoutDescriptor() PADDLE_MAY_THROW { - PADDLE_ENFORCE_CUDA_SUCCESS(dynload::cudnnDestroyDropoutDescriptor(desc_)); + PADDLE_ENFORCE_GPU_SUCCESS(dynload::cudnnDestroyDropoutDescriptor(desc_)); } inline cudnnDropoutDescriptor_t descriptor(const cudnnHandle_t& handle, @@ -327,19 +327,19 @@ class ScopedDropoutDescriptor { framework::Tensor* dropout_state_, int seed, size_t state_size) { if (dropout_state_ == nullptr) { // for no dropout or test - PADDLE_ENFORCE_CUDA_SUCCESS(dynload::cudnnSetDropoutDescriptor( + PADDLE_ENFORCE_GPU_SUCCESS(dynload::cudnnSetDropoutDescriptor( desc_, handle, 0 /* dropout */, nullptr, 0 /* state_size */, 0 /* seed */)); return desc_; } auto* dropout_state_data = dropout_state_->data(); if (!initialized) { - PADDLE_ENFORCE_CUDA_SUCCESS(dynload::cudnnSetDropoutDescriptor( + PADDLE_ENFORCE_GPU_SUCCESS(dynload::cudnnSetDropoutDescriptor( desc_, handle, dropout_prob_, dropout_state_data, state_size, seed)); } else { auto dropout_state_dims = dropout_state_->dims(); state_size = dropout_state_dims[0]; - PADDLE_ENFORCE_CUDA_SUCCESS(dynload::cudnnRestoreDropoutDescriptor( + PADDLE_ENFORCE_GPU_SUCCESS(dynload::cudnnRestoreDropoutDescriptor( desc_, handle, dropout_prob_, dropout_state_data, state_size, 0)); } return desc_; @@ -354,10 +354,10 @@ class ScopedDropoutDescriptor { class ScopedRNNDescriptor { public: ScopedRNNDescriptor() { - PADDLE_ENFORCE_CUDA_SUCCESS(dynload::cudnnCreateRNNDescriptor(&desc_)); + PADDLE_ENFORCE_GPU_SUCCESS(dynload::cudnnCreateRNNDescriptor(&desc_)); } ~ScopedRNNDescriptor() PADDLE_MAY_THROW { - PADDLE_ENFORCE_CUDA_SUCCESS(dynload::cudnnDestroyRNNDescriptor(desc_)); + PADDLE_ENFORCE_GPU_SUCCESS(dynload::cudnnDestroyRNNDescriptor(desc_)); } inline cudnnRNNDescriptor_t desc() { return desc_; } @@ -370,10 +370,10 @@ class ScopedRNNDescriptor { class ScopedFilterDescriptor { public: ScopedFilterDescriptor() { - PADDLE_ENFORCE_CUDA_SUCCESS(dynload::cudnnCreateFilterDescriptor(&desc_)); + PADDLE_ENFORCE_GPU_SUCCESS(dynload::cudnnCreateFilterDescriptor(&desc_)); } ~ScopedFilterDescriptor() PADDLE_MAY_THROW { - PADDLE_ENFORCE_CUDA_SUCCESS(dynload::cudnnDestroyFilterDescriptor(desc_)); + PADDLE_ENFORCE_GPU_SUCCESS(dynload::cudnnDestroyFilterDescriptor(desc_)); } inline cudnnFilterDescriptor_t descriptor(const cudnnTensorFormat_t format, @@ -389,7 +389,7 @@ class ScopedFilterDescriptor { kernel_with_group[0] /= groups; // NOTE: input filter(C) of the filter is already asserted to be C/groups. } - PADDLE_ENFORCE_CUDA_SUCCESS(dynload::cudnnSetFilterNdDescriptor( + PADDLE_ENFORCE_GPU_SUCCESS(dynload::cudnnSetFilterNdDescriptor( desc_, type, format, kernel_with_group.size(), kernel_with_group.data())); return desc_; @@ -413,11 +413,11 @@ class ScopedFilterDescriptor { class ScopedConvolutionDescriptor { public: ScopedConvolutionDescriptor() { - PADDLE_ENFORCE_CUDA_SUCCESS( + PADDLE_ENFORCE_GPU_SUCCESS( dynload::cudnnCreateConvolutionDescriptor(&desc_)); } ~ScopedConvolutionDescriptor() PADDLE_MAY_THROW { - PADDLE_ENFORCE_CUDA_SUCCESS( + PADDLE_ENFORCE_GPU_SUCCESS( dynload::cudnnDestroyConvolutionDescriptor(desc_)); } @@ -438,7 +438,7 @@ class ScopedConvolutionDescriptor { cudnnDataType_t compute_type = (type == CUDNN_DATA_DOUBLE) ? CUDNN_DATA_DOUBLE : CUDNN_DATA_FLOAT; - PADDLE_ENFORCE_CUDA_SUCCESS(dynload::cudnnSetConvolutionNdDescriptor( + PADDLE_ENFORCE_GPU_SUCCESS(dynload::cudnnSetConvolutionNdDescriptor( desc_, pads.size(), pads.data(), strides.data(), dilations.data(), CUDNN_CROSS_CORRELATION, compute_type)); return desc_; @@ -459,10 +459,10 @@ class ScopedConvolutionDescriptor { class ScopedPoolingDescriptor { public: ScopedPoolingDescriptor() { - PADDLE_ENFORCE_CUDA_SUCCESS(dynload::cudnnCreatePoolingDescriptor(&desc_)); + PADDLE_ENFORCE_GPU_SUCCESS(dynload::cudnnCreatePoolingDescriptor(&desc_)); } ~ScopedPoolingDescriptor() PADDLE_MAY_THROW { - PADDLE_ENFORCE_CUDA_SUCCESS(dynload::cudnnDestroyPoolingDescriptor(desc_)); + PADDLE_ENFORCE_GPU_SUCCESS(dynload::cudnnDestroyPoolingDescriptor(desc_)); } inline cudnnPoolingDescriptor_t descriptor(const PoolingMode& mode, @@ -480,7 +480,7 @@ class ScopedPoolingDescriptor { "The size of kernel and strides should be equal. But " "received size of kernel is %d, size of strides is %d.", kernel.size(), strides.size())); - PADDLE_ENFORCE_CUDA_SUCCESS(dynload::cudnnSetPoolingNdDescriptor( + PADDLE_ENFORCE_GPU_SUCCESS(dynload::cudnnSetPoolingNdDescriptor( desc_, (GetPoolingMode(mode)), CUDNN_PROPAGATE_NAN, // Always propagate nans. kernel.size(), kernel.data(), pads.data(), strides.data())); @@ -495,18 +495,18 @@ class ScopedPoolingDescriptor { class ScopedSpatialTransformerDescriptor { public: ScopedSpatialTransformerDescriptor() { - PADDLE_ENFORCE_CUDA_SUCCESS( + PADDLE_ENFORCE_GPU_SUCCESS( dynload::cudnnCreateSpatialTransformerDescriptor(&desc_)); } ~ScopedSpatialTransformerDescriptor() PADDLE_MAY_THROW { - PADDLE_ENFORCE_CUDA_SUCCESS( + PADDLE_ENFORCE_GPU_SUCCESS( dynload::cudnnDestroySpatialTransformerDescriptor(desc_)); } template inline cudnnSpatialTransformerDescriptor_t descriptor(const int nbDims, const int dimA[]) { - PADDLE_ENFORCE_CUDA_SUCCESS(dynload::cudnnSetSpatialTransformerNdDescriptor( + PADDLE_ENFORCE_GPU_SUCCESS(dynload::cudnnSetSpatialTransformerNdDescriptor( desc_, CUDNN_SAMPLER_BILINEAR, CudnnDataType::type, nbDims, dimA)); return desc_; } @@ -519,11 +519,11 @@ class ScopedSpatialTransformerDescriptor { class ScopedActivationDescriptor { public: ScopedActivationDescriptor() { - PADDLE_ENFORCE_CUDA_SUCCESS( + PADDLE_ENFORCE_GPU_SUCCESS( dynload::cudnnCreateActivationDescriptor(&desc_)); } ~ScopedActivationDescriptor() PADDLE_MAY_THROW { - PADDLE_ENFORCE_CUDA_SUCCESS( + PADDLE_ENFORCE_GPU_SUCCESS( dynload::cudnnDestroyActivationDescriptor(desc_)); } @@ -561,7 +561,7 @@ class ScopedActivationDescriptor { "Unrecognized CUDNN activation mode: %d.", static_cast(activation_mode))); } - PADDLE_ENFORCE_CUDA_SUCCESS(dynload::cudnnSetActivationDescriptor( + PADDLE_ENFORCE_GPU_SUCCESS(dynload::cudnnSetActivationDescriptor( desc_, mode, CUDNN_NOT_PROPAGATE_NAN, relu_ceiling)); return desc_; } @@ -587,15 +587,15 @@ inline bool CanCUDNNBeUsed(const framework::ExecutionContext& ctx) { class ScopedCTCLossDescriptor { public: ScopedCTCLossDescriptor() { - PADDLE_ENFORCE_CUDA_SUCCESS(dynload::cudnnCreateCTCLossDescriptor(&desc_)); + PADDLE_ENFORCE_GPU_SUCCESS(dynload::cudnnCreateCTCLossDescriptor(&desc_)); } ~ScopedCTCLossDescriptor() PADDLE_MAY_THROW { - PADDLE_ENFORCE_CUDA_SUCCESS(dynload::cudnnDestroyCTCLossDescriptor(desc_)); + PADDLE_ENFORCE_GPU_SUCCESS(dynload::cudnnDestroyCTCLossDescriptor(desc_)); } template inline cudnnCTCLossDescriptor_t descriptor() { - PADDLE_ENFORCE_CUDA_SUCCESS( + PADDLE_ENFORCE_GPU_SUCCESS( dynload::cudnnSetCTCLossDescriptor(desc_, CudnnDataType::type)); return desc_; } diff --git a/paddle/fluid/platform/cudnn_helper_test.cc b/paddle/fluid/platform/device/gpu/cuda/cudnn_helper_test.cc similarity index 98% rename from paddle/fluid/platform/cudnn_helper_test.cc rename to paddle/fluid/platform/device/gpu/cuda/cudnn_helper_test.cc index 98ec2be87755cde318bda3912c51d1532d4934a8..851d0d18c604cdc593c00fd1463d7f87d7c8dcfc 100644 --- a/paddle/fluid/platform/cudnn_helper_test.cc +++ b/paddle/fluid/platform/device/gpu/cuda/cudnn_helper_test.cc @@ -15,7 +15,7 @@ limitations under the License. */ #define GLOG_NO_ABBREVIATED_SEVERITIES #define GOOGLE_GLOG_DLL_DECL -#include "paddle/fluid/platform/cudnn_helper.h" +#include "paddle/fluid/platform/device/gpu/gpu_dnn.h" #include diff --git a/paddle/fluid/platform/cuda_helper_test.cu b/paddle/fluid/platform/device/gpu/cuda_helper_test.cu similarity index 98% rename from paddle/fluid/platform/cuda_helper_test.cu rename to paddle/fluid/platform/device/gpu/cuda_helper_test.cu index fd46aa239340351b5e5464af4a19dd7a04296914..ab8bb2cad8c512d442d80a5ca89dad1dce853cf6 100644 --- a/paddle/fluid/platform/cuda_helper_test.cu +++ b/paddle/fluid/platform/device/gpu/cuda_helper_test.cu @@ -21,11 +21,11 @@ #include #define PADDLE_CUDA_FP16 -#include "paddle/fluid/platform/cuda_device_function.h" -#include "paddle/fluid/platform/cuda_primitives.h" +#include "paddle/fluid/platform/device/gpu/gpu_device_function.h" +#include "paddle/fluid/platform/device/gpu/gpu_primitives.h" #include "paddle/fluid/platform/float16.h" -#include "paddle/fluid/platform/cuda_helper.h" +#include "paddle/fluid/platform/device/gpu/gpu_helper.h" using paddle::platform::PADDLE_CUDA_NUM_THREADS; using paddle::platform::float16; diff --git a/paddle/fluid/platform/cudnn_desc_test.cc b/paddle/fluid/platform/device/gpu/cudnn_desc_test.cc similarity index 90% rename from paddle/fluid/platform/cudnn_desc_test.cc rename to paddle/fluid/platform/device/gpu/cudnn_desc_test.cc index db5362f5cb1f5d4c84d6816c1672340c7f22cb2f..8ea30027e8adeda56657eb441b414c616b223849 100644 --- a/paddle/fluid/platform/cudnn_desc_test.cc +++ b/paddle/fluid/platform/device/gpu/cudnn_desc_test.cc @@ -12,11 +12,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -#ifdef PADDLE_WITH_HIP -#include "paddle/fluid/platform/miopen_desc.h" -#else -#include "paddle/fluid/platform/cudnn_desc.h" -#endif +#include "paddle/fluid/platform/device/gpu/gpu_dnn.h" #include diff --git a/paddle/fluid/platform/type_defs.h b/paddle/fluid/platform/device/gpu/gpu_device_function.h similarity index 50% rename from paddle/fluid/platform/type_defs.h rename to paddle/fluid/platform/device/gpu/gpu_device_function.h index 88a2d16472fa706fdd38c2ca96a92734feaefa9f..a8daa5e87fdc38ebe3efd4de767d35869e607eff 100644 --- a/paddle/fluid/platform/type_defs.h +++ b/paddle/fluid/platform/device/gpu/gpu_device_function.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. +/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -13,28 +13,12 @@ See the License for the specific language governing permissions and limitations under the License. */ #pragma once +#if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP) #ifdef PADDLE_WITH_HIP -#include +#include "paddle/fluid/platform/device/gpu/rocm/rocm_device_function.h" #else -#include +#include "paddle/fluid/platform/device/gpu/cuda/cuda_device_function.h" #endif -namespace paddle { - -#ifdef PADDLE_WITH_HIP -#define gpuSuccess hipSuccess -using gpuStream_t = hipStream_t; -using gpuError_t = hipError_t; -using gpuEvent_t = hipEvent_t; -using gpuDeviceProp = hipDeviceProp_t; -#else -#define gpuSuccess cudaSuccess -using gpuStream_t = cudaStream_t; -using gpuError_t = cudaError_t; -using gpuEvent_t = cudaEvent_t; -using gpuDeviceProp = cudaDeviceProp; #endif - -using CUDAGraphID = unsigned long long; // NOLINT -} // namespace paddle diff --git a/paddle/fluid/platform/device/gpu/gpu_dnn.h b/paddle/fluid/platform/device/gpu/gpu_dnn.h new file mode 100644 index 0000000000000000000000000000000000000000..3f9bc5e6de80b5d05261863b0557f863fe223943 --- /dev/null +++ b/paddle/fluid/platform/device/gpu/gpu_dnn.h @@ -0,0 +1,27 @@ +// Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#pragma once + +#if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP) + +#ifdef PADDLE_WITH_HIP +#include "paddle/fluid/platform/device/gpu/rocm/miopen_desc.h" +#include "paddle/fluid/platform/device/gpu/rocm/miopen_helper.h" +#else // CUDA +#include "paddle/fluid/platform/device/gpu/cuda/cudnn_desc.h" +#include "paddle/fluid/platform/device/gpu/cuda/cudnn_helper.h" +#endif + +#endif diff --git a/paddle/fluid/platform/device/gpu/gpu_helper.h b/paddle/fluid/platform/device/gpu/gpu_helper.h new file mode 100644 index 0000000000000000000000000000000000000000..6077a7b625d250af8018a3bb92b87618bb150098 --- /dev/null +++ b/paddle/fluid/platform/device/gpu/gpu_helper.h @@ -0,0 +1,26 @@ +// Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#pragma once +#if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP) + +#ifdef PADDLE_WITH_HIP +#include "paddle/fluid/platform/device/gpu/rocm/rocm_helper.h" +#else +#include "paddle/fluid/platform/device/gpu/cuda/cuda_helper.h" +#endif + +#define CUDA_KERNEL_LOOP(i, num) CUDA_KERNEL_LOOP_TYPE(i, num, int) + +#endif diff --git a/paddle/fluid/platform/device/gpu/gpu_info.cc b/paddle/fluid/platform/device/gpu/gpu_info.cc new file mode 100644 index 0000000000000000000000000000000000000000..e68277cc37b381a80e33860543c241f5e137c5be --- /dev/null +++ b/paddle/fluid/platform/device/gpu/gpu_info.cc @@ -0,0 +1,356 @@ +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#include "paddle/fluid/platform/device/gpu/gpu_info.h" +#include +#include +#include + +#include "gflags/gflags.h" +#include "paddle/fluid/platform/cuda_device_guard.h" +#ifdef PADDLE_WITH_HIP +#include "paddle/fluid/platform/dynload/miopen.h" +#else +#include "paddle/fluid/platform/device/gpu/cuda/cuda_graph.h" +#include "paddle/fluid/platform/dynload/cudnn.h" +#endif +#include "paddle/fluid/memory/malloc.h" +#ifdef PADDLE_WITH_CUDA +#if CUDA_VERSION >= 10020 +#include "paddle/fluid/platform/dynload/cuda_driver.h" +#endif +#endif +#include "paddle/fluid/platform/enforce.h" +#include "paddle/fluid/platform/lock_guard_ptr.h" +#include "paddle/fluid/platform/macros.h" +#include "paddle/fluid/platform/monitor.h" +#include "paddle/fluid/platform/place.h" +#include "paddle/fluid/string/split.h" + +DECLARE_double(fraction_of_gpu_memory_to_use); +DECLARE_uint64(initial_gpu_memory_in_mb); +DECLARE_uint64(reallocate_gpu_memory_in_mb); +DECLARE_bool(enable_cublas_tensor_op_math); +DECLARE_string(selected_gpus); +DECLARE_uint64(gpu_memory_limit_mb); + +constexpr static float fraction_reserve_gpu_memory = 0.05f; + +USE_GPU_MEM_STAT; +namespace paddle { +namespace platform { +//! Get a list of device ids from environment variable or use all. +std::vector GetSelectedDevices() { + // use user specified GPUs in single-node multi-process mode. + std::vector devices; + if (!FLAGS_selected_gpus.empty()) { + auto devices_str = paddle::string::Split(FLAGS_selected_gpus, ','); + for (auto id : devices_str) { + devices.push_back(atoi(id.c_str())); + } + } else { + int count = GetGPUDeviceCount(); + for (int i = 0; i < count; ++i) { + devices.push_back(i); + } + } + return devices; +} + +void GpuMemoryUsage(size_t *available, size_t *total) { + size_t actual_available, actual_total; + RecordedGpuMemGetInfo(available, total, &actual_available, &actual_total, + platform::GetCurrentDeviceId()); +} + +size_t GpuAvailableMemToAlloc() { + size_t total = 0; + size_t available = 0; + GpuMemoryUsage(&available, &total); + size_t reserving = + static_cast(fraction_reserve_gpu_memory * available); + // If available size is less than minimum chunk size, no usable memory exists + size_t available_to_alloc = available - reserving; + size_t min_chunk_size = GpuMinChunkSize(); + if (available_to_alloc < min_chunk_size) { + available_to_alloc = 0; + } + VLOG(10) << "GPU usage " << (available >> 20) << "M/" << (total >> 20) + << "M, " << (available_to_alloc >> 20) << "M available to allocate"; + return available_to_alloc; +} + +size_t GpuMaxAllocSize() { + return std::max(GpuInitAllocSize(), GpuReallocSize()); +} + +static size_t GpuAllocSize(bool realloc) { + size_t available_to_alloc = GpuAvailableMemToAlloc(); + PADDLE_ENFORCE_GT( + available_to_alloc, 0, + platform::errors::ResourceExhausted("Not enough available GPU memory.")); + // If FLAGS_initial_gpu_memory_in_mb is 0, then initial memory will be + // allocated by fraction + size_t flag_mb = realloc ? FLAGS_reallocate_gpu_memory_in_mb + : FLAGS_initial_gpu_memory_in_mb; + size_t alloc_bytes = + (flag_mb > 0ul ? flag_mb << 20 : available_to_alloc * + FLAGS_fraction_of_gpu_memory_to_use); + PADDLE_ENFORCE_GE( + available_to_alloc, alloc_bytes, + platform::errors::ResourceExhausted("Not enough available GPU memory.")); + VLOG(10) << "Alloc size is " << (alloc_bytes >> 20) + << " MiB, is it Re-alloc: " << realloc; + return alloc_bytes; +} + +size_t GpuInitAllocSize() { return GpuAllocSize(/* realloc = */ false); } + +size_t GpuReallocSize() { return GpuAllocSize(/* realloc = */ true); } + +size_t GpuMinChunkSize() { + // Allow to allocate the minimum chunk size is 256 bytes. + return 1 << 8; +} + +size_t GpuMaxChunkSize() { + size_t max_chunk_size = GpuMaxAllocSize(); + VLOG(10) << "Max chunk size " << (max_chunk_size >> 20) << "M"; + return max_chunk_size; +} + +static void RaiseNonOutOfMemoryError(gpuError_t *status) { + if (*status == gpuErrorOutOfMemory) { + *status = gpuSuccess; + } + PADDLE_ENFORCE_GPU_SUCCESS(*status); + + *status = platform::GpuGetLastError(); + if (*status == gpuErrorOutOfMemory) { + *status = gpuSuccess; + } + PADDLE_ENFORCE_GPU_SUCCESS(*status); +} + +class RecordedGpuMallocHelper { + private: + explicit RecordedGpuMallocHelper(int dev_id, uint64_t limit_size = 0) + : dev_id_(dev_id), limit_size_(limit_size) { + if (NeedRecord()) { + mtx_.reset(new std::mutex()); + } + } + + DISABLE_COPY_AND_ASSIGN(RecordedGpuMallocHelper); + + public: + static RecordedGpuMallocHelper *Instance(int dev_id) { + std::call_once(once_flag_, [] { + int dev_cnt = GetGPUDeviceCount(); + instances_.reserve(dev_cnt); + for (int i = 0; i < dev_cnt; ++i) { + instances_.emplace_back( + new RecordedGpuMallocHelper(i, FLAGS_gpu_memory_limit_mb << 20)); + } + }); + + PADDLE_ENFORCE_GE( + dev_id, 0, + platform::errors::OutOfRange( + "Device id must be not less than 0, but got %d.", dev_id)); + PADDLE_ENFORCE_LT( + dev_id, instances_.size(), + platform::errors::OutOfRange("Device id %d exceeds gpu card number %d.", + dev_id, instances_.size())); + return instances_[dev_id].get(); + } + + /** + * Try to allocate `size` gpu memory. Only cudaErrorMemoryAllocation + * or cudaSuccess would be returned, and the cudaGetLastError() flag + * would be clear. + */ + gpuError_t Malloc(void **ptr, size_t size) { + LockGuardPtr lock(mtx_); + if (UNLIKELY(NeedRecord() && cur_size_.load() + size > limit_size_)) { + return gpuErrorOutOfMemory; + } + + CUDADeviceGuard guard(dev_id_); +#ifdef PADDLE_WITH_HIP + auto result = hipMalloc(ptr, size); +#else + CUDAGraphCaptureModeGuard capture_mode_guard; + auto result = cudaMalloc(ptr, size); +#endif + if (result == gpuSuccess) { + cur_size_.fetch_add(size); + STAT_INT_ADD("STAT_gpu" + std::to_string(dev_id_) + "_mem_size", size); + return gpuSuccess; + } else { + RaiseNonOutOfMemoryError(&result); + // Non out of memory error would be raised inside + // RaiseNonOutOfMemoryError. Therefore, we can + // return cudaErrorMemoryAllocation directly here. + return gpuErrorOutOfMemory; + } + } + + /** + * Free gpu memory. Usually, free is not allowed to raise error. + * If it does raise error, the process should be crashed. + */ + void Free(void *ptr, size_t size) { + // Purposefully allow cudaErrorCudartUnloading, because + // that is returned if you ever call cudaFree after the + // driver has already shutdown. This happens only if the + // process is terminating, in which case we don't care if + // cudaFree succeeds. + CUDADeviceGuard guard(dev_id_); +#ifdef PADDLE_WITH_HIP + auto err = hipFree(ptr); + if (err != hipErrorDeinitialized) { +#else + auto err = cudaFree(ptr); + if (err != cudaErrorCudartUnloading) { +#endif + PADDLE_ENFORCE_GPU_SUCCESS(err); + cur_size_.fetch_sub(size); + STAT_INT_SUB("STAT_gpu" + std::to_string(dev_id_) + "_mem_size", size); + } else { + platform::GpuGetLastError(); // clear the error flag when + // cudaErrorCudartUnloading / + // hipErrorDeinitialized + } + } + + bool GetMemInfo(size_t *avail, size_t *total, size_t *actual_avail, + size_t *actual_total) { + { + CUDADeviceGuard guard(dev_id_); +#ifdef PADDLE_WITH_HIP + auto result = hipMemGetInfo(actual_avail, actual_total); +#else + auto result = cudaMemGetInfo(actual_avail, actual_total); +#endif + if (result != gpuSuccess) { + *actual_avail = 0; + } + RaiseNonOutOfMemoryError(&result); + } + + if (NeedRecord()) { + std::lock_guard guard(*mtx_); + *avail = std::min(*actual_avail, limit_size_ - cur_size_.load()); + *total = std::min(*actual_total, limit_size_); + return *total < *actual_total; + } else { + *avail = *actual_avail; + *total = *actual_total; + return false; + } + } + + inline bool NeedRecord() const { return limit_size_ != 0; } + + uint64_t RecordedSize() const { return cur_size_.load(); } + + uint64_t LimitSize() const { return limit_size_; } + +#ifdef PADDLE_WITH_CUDA +#if CUDA_VERSION >= 10020 + CUresult MemCreate(CUmemGenericAllocationHandle *handle, size_t size, + const CUmemAllocationProp *prop, + unsigned long long flags) { // NOLINT + auto result = + paddle::platform::dynload::cuMemCreate(handle, size, prop, flags); + if (result == CUDA_SUCCESS) { + cur_size_.fetch_add(size); + } + return result; + } + + CUresult MemRelease(CUmemGenericAllocationHandle handle, size_t size) { + auto result = paddle::platform::dynload::cuMemRelease(handle); + if (result == CUDA_SUCCESS) { + cur_size_.fetch_sub(size); + } + return result; + } + +#endif +#endif + + private: + const int dev_id_; + const uint64_t limit_size_; + std::atomic cur_size_{0}; + + mutable std::unique_ptr mtx_; + + static std::once_flag once_flag_; + static std::vector> instances_; +}; // NOLINT + +std::once_flag RecordedGpuMallocHelper::once_flag_; +std::vector> + RecordedGpuMallocHelper::instances_; + +gpuError_t RecordedGpuMalloc(void **ptr, size_t size, int dev_id) { + return RecordedGpuMallocHelper::Instance(dev_id)->Malloc(ptr, size); +} + +void RecordedGpuFree(void *p, size_t size, int dev_id) { + return RecordedGpuMallocHelper::Instance(dev_id)->Free(p, size); +} + +#ifdef PADDLE_WITH_CUDA +#if CUDA_VERSION >= 10020 +CUresult RecordedGpuMemCreate(CUmemGenericAllocationHandle *handle, size_t size, + const CUmemAllocationProp *prop, + unsigned long long flags, int dev_id) { // NOLINT + return RecordedGpuMallocHelper::Instance(dev_id)->MemCreate(handle, size, + prop, flags); +} + +CUresult RecordedGpuMemRelease(CUmemGenericAllocationHandle handle, size_t size, + int dev_id) { + return RecordedGpuMallocHelper::Instance(dev_id)->MemRelease(handle, size); +} +#endif +#endif + +bool RecordedGpuMemGetInfo(size_t *avail, size_t *total, size_t *actual_avail, + size_t *actual_total, int dev_id) { + return RecordedGpuMallocHelper::Instance(dev_id)->GetMemInfo( + avail, total, actual_avail, actual_total); +} + +uint64_t RecordedGpuMallocSize(int dev_id) { + return RecordedGpuMallocHelper::Instance(dev_id)->RecordedSize(); +} + +bool IsGpuMallocRecorded(int dev_id) { + return RecordedGpuMallocHelper::Instance(dev_id)->NeedRecord(); +} + +void EmptyCache(void) { + std::vector devices = GetSelectedDevices(); + for (auto device : devices) { + memory::Release(CUDAPlace(device)); + } +} + +} // namespace platform +} // namespace paddle diff --git a/paddle/fluid/platform/gpu_info.h b/paddle/fluid/platform/device/gpu/gpu_info.h similarity index 70% rename from paddle/fluid/platform/gpu_info.h rename to paddle/fluid/platform/device/gpu/gpu_info.h index 93e787fcf36f504567828bf4a32a77076be058c5..18e6ac83295f89108ea21555a35d2d7f66b3f3dd 100644 --- a/paddle/fluid/platform/gpu_info.h +++ b/paddle/fluid/platform/device/gpu/gpu_info.h @@ -1,11 +1,8 @@ /* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. - Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 - Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. @@ -14,49 +11,42 @@ limitations under the License. */ #pragma once -#ifdef PADDLE_WITH_CUDA -#include -#endif - -#ifdef PADDLE_WITH_HIP -#include -#endif - #if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP) -// Note: this header for simplify HIP and CUDA type string + #include #include #include -#include "paddle/fluid/platform/type_defs.h" + +#include "paddle/fluid/platform/device/gpu/gpu_types.h" namespace paddle { namespace platform { -//! Get the version of cudnn -int CudnnVersion(); +//! Get the version of dnn +int DnnVersion(); //! Get the total number of GPU devices in system. -int GetCUDADeviceCount(); +int GetGPUDeviceCount(); //! Get the compute capability of the ith GPU (format: major * 10 + minor) -int GetCUDAComputeCapability(int i); +int GetGPUComputeCapability(int id); //! Get the runtime version of the ith GPU -int GetCUDARuntimeVersion(int id); +int GetGPURuntimeVersion(int id); //! Get the driver version of the ith GPU -int GetCUDADriverVersion(int id); +int GetGPUDriverVersion(int id); //! Wheter the current device support TensorCore bool TensorCoreAvailable(); //! Get the MultiProcessors of the ith GPU. -int GetCUDAMultiProcessors(int i); +int GetGPUMultiProcessors(int id); //! Get the MaxThreads of each MultiProcessor of the ith GPU. -int GetCUDAMaxThreadsPerMultiProcessor(int i); +int GetGPUMaxThreadsPerMultiProcessor(int id); //! Get the MaxThreads of each block of the ith GPU. -int GetCUDAMaxThreadsPerBlock(int i); +int GetGPUMaxThreadsPerBlock(int id); //! Get the current GPU device id in system. int GetCurrentDeviceId(); @@ -97,19 +87,11 @@ size_t GpuMaxChunkSize(); //! Copy memory from address src to dst asynchronously. void GpuMemcpyAsync(void *dst, const void *src, size_t count, -#ifdef PADDLE_WITH_HIP - enum hipMemcpyKind kind, hipStream_t stream); -#else - enum cudaMemcpyKind kind, cudaStream_t stream); -#endif + gpuMemcpyKind kind, gpuStream_t stream); //! Copy memory from address src to dst synchronously. void GpuMemcpySync(void *dst, const void *src, size_t count, -#ifdef PADDLE_WITH_HIP - enum hipMemcpyKind kind); -#else - enum cudaMemcpyKind kind); -#endif + gpuMemcpyKind kind); //! Copy memory from one device to another device asynchronously. void GpuMemcpyPeerAsync(void *dst, int dst_device, const void *src, @@ -125,34 +107,40 @@ void GpuMemsetAsync(void *dst, int value, size_t count, gpuStream_t stream); //! Blocks until stream has completed all operations. void GpuStreamSync(gpuStream_t stream); +void GpuDestroyStream(gpuStream_t stream); + +// ! Blocks until device has completed all operations. +void GpuDeviceync(); + //! CudaMalloc with recorded info -gpuError_t RecordedCudaMalloc(void **ptr, size_t size, int dev_id); +gpuError_t RecordedGpuMalloc(void **ptr, size_t size, int dev_id); //! CudaFree with recorded info -void RecordedCudaFree(void *p, size_t size, int dev_id); +void RecordedGpuFree(void *p, size_t size, int dev_id); + +gpuError_t GpuGetLastError(); #ifdef PADDLE_WITH_CUDA #if CUDA_VERSION >= 10020 - //! cuMemCreate with recorded info -CUresult RecordedCuMemCreate(CUmemGenericAllocationHandle *handle, size_t size, - const CUmemAllocationProp *prop, - unsigned long long flags, int dev_id); // NOLINT +CUresult RecordedGpuMemCreate(CUmemGenericAllocationHandle *handle, size_t size, + const CUmemAllocationProp *prop, + unsigned long long flags, int dev_id); // NOLINT //! cuMemRelease with recorded info -CUresult RecordedCuMemRelease(CUmemGenericAllocationHandle handle, size_t size, - int dev_id); +CUresult RecordedGpuMemRelease(CUmemGenericAllocationHandle handle, size_t size, + int dev_id); #endif #endif //! Get available and total gpu memory with considering limitation -bool RecordedCudaMemGetInfo(size_t *avail, size_t *total, size_t *actual_avail, - size_t *actual_total, int dev_id); +bool RecordedGpuMemGetInfo(size_t *avail, size_t *total, size_t *actual_avail, + size_t *actual_total, int dev_id); //! Get recorded cudaMalloc size. If record is disabled, return 0. -uint64_t RecordedCudaMallocSize(int dev_id); +uint64_t RecordedGpuMallocSize(int dev_id); -bool IsCudaMallocRecorded(int dev_id); +bool IsGpuMallocRecorded(int dev_id); //! Empty idle cached memory held by the allocator. void EmptyCache(void); diff --git a/paddle/fluid/platform/gpu_launch_config.h b/paddle/fluid/platform/device/gpu/gpu_launch_config.h similarity index 98% rename from paddle/fluid/platform/gpu_launch_config.h rename to paddle/fluid/platform/device/gpu/gpu_launch_config.h index 399f1dbaa03e1f1325cec670e9dabfcfedeab6d4..55f4c8eb4cd55ee42f2095f697724493cc35909b 100644 --- a/paddle/fluid/platform/gpu_launch_config.h +++ b/paddle/fluid/platform/device/gpu/gpu_launch_config.h @@ -28,6 +28,7 @@ #include #include #include +#include "paddle/fluid/platform/device_context.h" namespace paddle { namespace platform { diff --git a/paddle/fluid/platform/cuda_primitives.h b/paddle/fluid/platform/device/gpu/gpu_primitives.h similarity index 100% rename from paddle/fluid/platform/cuda_primitives.h rename to paddle/fluid/platform/device/gpu/gpu_primitives.h diff --git a/paddle/fluid/platform/cuda_resource_pool.cc b/paddle/fluid/platform/device/gpu/gpu_resource_pool.cc similarity index 84% rename from paddle/fluid/platform/cuda_resource_pool.cc rename to paddle/fluid/platform/device/gpu/gpu_resource_pool.cc index 70d2ec55057988eb72f92362c235e9a3879b3579..2c55eb972b765739efdaf116baaba52241f3b4ce 100644 --- a/paddle/fluid/platform/cuda_resource_pool.cc +++ b/paddle/fluid/platform/device/gpu/gpu_resource_pool.cc @@ -13,24 +13,24 @@ // limitations under the License. #if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP) -#include "paddle/fluid/platform/cuda_resource_pool.h" -#include "paddle/fluid/platform/gpu_info.h" +#include "paddle/fluid/platform/device/gpu/gpu_resource_pool.h" +#include "paddle/fluid/platform/device/gpu/gpu_info.h" namespace paddle { namespace platform { CudaStreamResourcePool::CudaStreamResourcePool() { - int dev_cnt = platform::GetCUDADeviceCount(); + int dev_cnt = platform::GetGPUDeviceCount(); pool_.reserve(dev_cnt); for (int dev_idx = 0; dev_idx < dev_cnt; ++dev_idx) { auto creator = [dev_idx] { platform::SetDeviceId(dev_idx); gpuStream_t stream; #ifdef PADDLE_WITH_HIP - PADDLE_ENFORCE_CUDA_SUCCESS( + PADDLE_ENFORCE_GPU_SUCCESS( hipStreamCreateWithFlags(&stream, hipStreamNonBlocking)); #else - PADDLE_ENFORCE_CUDA_SUCCESS( + PADDLE_ENFORCE_GPU_SUCCESS( cudaStreamCreateWithFlags(&stream, cudaStreamNonBlocking)); #endif return stream; @@ -39,9 +39,9 @@ CudaStreamResourcePool::CudaStreamResourcePool() { auto deleter = [dev_idx](gpuStream_t stream) { platform::SetDeviceId(dev_idx); #ifdef PADDLE_WITH_HIP - PADDLE_ENFORCE_CUDA_SUCCESS(hipStreamDestroy(stream)); + PADDLE_ENFORCE_GPU_SUCCESS(hipStreamDestroy(stream)); #else - PADDLE_ENFORCE_CUDA_SUCCESS(cudaStreamDestroy(stream)); + PADDLE_ENFORCE_GPU_SUCCESS(cudaStreamDestroy(stream)); #endif }; @@ -69,17 +69,17 @@ std::shared_ptr CudaStreamResourcePool::New(int dev_idx) { } CudaEventResourcePool::CudaEventResourcePool() { - int dev_cnt = platform::GetCUDADeviceCount(); + int dev_cnt = platform::GetGPUDeviceCount(); pool_.reserve(dev_cnt); for (int dev_idx = 0; dev_idx < dev_cnt; ++dev_idx) { auto creator = [dev_idx] { platform::SetDeviceId(dev_idx); gpuEvent_t event; #ifdef PADDLE_WITH_HIP - PADDLE_ENFORCE_CUDA_SUCCESS( + PADDLE_ENFORCE_GPU_SUCCESS( hipEventCreateWithFlags(&event, hipEventDisableTiming)); #else - PADDLE_ENFORCE_CUDA_SUCCESS( + PADDLE_ENFORCE_GPU_SUCCESS( cudaEventCreateWithFlags(&event, cudaEventDisableTiming)); #endif return event; @@ -88,9 +88,9 @@ CudaEventResourcePool::CudaEventResourcePool() { auto deleter = [dev_idx](gpuEvent_t event) { platform::SetDeviceId(dev_idx); #ifdef PADDLE_WITH_HIP - PADDLE_ENFORCE_CUDA_SUCCESS(hipEventDestroy(event)); + PADDLE_ENFORCE_GPU_SUCCESS(hipEventDestroy(event)); #else - PADDLE_ENFORCE_CUDA_SUCCESS(cudaEventDestroy(event)); + PADDLE_ENFORCE_GPU_SUCCESS(cudaEventDestroy(event)); #endif }; diff --git a/paddle/fluid/platform/cuda_resource_pool.h b/paddle/fluid/platform/device/gpu/gpu_resource_pool.h similarity index 100% rename from paddle/fluid/platform/cuda_resource_pool.h rename to paddle/fluid/platform/device/gpu/gpu_resource_pool.h diff --git a/paddle/fluid/platform/device/gpu/gpu_types.h b/paddle/fluid/platform/device/gpu/gpu_types.h new file mode 100644 index 0000000000000000000000000000000000000000..d7362fe9cbd81d65299987da32067c2ad54084ce --- /dev/null +++ b/paddle/fluid/platform/device/gpu/gpu_types.h @@ -0,0 +1,94 @@ +// Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#pragma once + +#if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP) + +#ifdef PADDLE_WITH_HIP +#include +#include "paddle/fluid/platform/dynload/miopen.h" +#include "paddle/fluid/platform/dynload/rocblas.h" + +#else +#include +#include "paddle/fluid/platform/dynload/cublas.h" +#include "paddle/fluid/platform/dynload/cudnn.h" +#endif + +namespace paddle { + +#ifdef PADDLE_WITH_HIP +#define DECLARE_TYPE_FOR_GPU(GPU_TYPE, CUDA_TYPE, ROCM_TYPE) \ + using GPU_TYPE = ROCM_TYPE; +#else // CDUA + +#define DECLARE_TYPE_FOR_GPU(GPU_TYPE, CUDA_TYPE, ROCM_TYPE) \ + using GPU_TYPE = CUDA_TYPE; +#endif + +DECLARE_TYPE_FOR_GPU(gpuStream_t, cudaStream_t, hipStream_t); +DECLARE_TYPE_FOR_GPU(gpuError_t, cudaError_t, hipError_t); +DECLARE_TYPE_FOR_GPU(gpuEvent_t, cudaEvent_t, hipEvent_t); +DECLARE_TYPE_FOR_GPU(gpuMemcpyKind, cudaMemcpyKind, hipMemcpyKind); +DECLARE_TYPE_FOR_GPU(gpuDeviceProp, cudaDeviceProp, hipDeviceProp_t); + +DECLARE_TYPE_FOR_GPU(dnnDataType_t, cudnnDataType_t, miopenDataType_t); +DECLARE_TYPE_FOR_GPU(dnnActivationDescriptor, cudnnActivationStruct, + miopenActivationDescriptor); +DECLARE_TYPE_FOR_GPU(dnnActivationMode_t, cudnnActivationMode_t, + miopenActivationMode_t); +DECLARE_TYPE_FOR_GPU(dnnTensorDescriptor, cudnnTensorStruct, + miopenTensorDescriptor); +DECLARE_TYPE_FOR_GPU(dnnTensorFormat_t, cudnnTensorFormat_t, + miopenTensorFormat_t); +DECLARE_TYPE_FOR_GPU(dnnFilterDescriptor, cudnnFilterStruct, + miopenTensorDescriptor); +DECLARE_TYPE_FOR_GPU(dnnFilterDescriptor_t, cudnnFilterDescriptor_t, + miopenTensorDescriptor_t); +DECLARE_TYPE_FOR_GPU(dnnConvolutionDescriptor, cudnnConvolutionStruct, + miopenConvolutionDescriptor); +DECLARE_TYPE_FOR_GPU(dnnConvolutionDescriptor_t, cudnnConvolutionDescriptor_t, + miopenConvolutionDescriptor_t); +DECLARE_TYPE_FOR_GPU(dnnPoolingDescriptor_t, cudnnPoolingDescriptor_t, + miopenPoolingDescriptor_t); +DECLARE_TYPE_FOR_GPU(dnnPoolingMode_t, cudnnPoolingMode_t, miopenPoolingMode_t); +DECLARE_TYPE_FOR_GPU(dnnDropoutDescriptor_t, cudnnDropoutDescriptor_t, + miopenDropoutDescriptor_t); +DECLARE_TYPE_FOR_GPU(dnnHandle_t, cudnnHandle_t, miopenHandle_t); + +DECLARE_TYPE_FOR_GPU(blasHandle_t, cublasHandle_t, rocblas_handle); + +using CUDAGraphID = unsigned long long; // NOLINT + +#undef DECLARE_TYPE_FOR_GPU + +#ifdef PADDLE_WITH_HIP +#define DECLARE_CONSTANT_FOR_GPU(GPU_CV, CUDA_CV, ROCM_CV) \ + constexpr auto GPU_CV = ROCM_CV; +#else // CDUA + +#define DECLARE_CONSTANT_FOR_GPU(GPU_CV, CUDA_CV, ROCM_CV) \ + constexpr auto GPU_CV = CUDA_CV; +#endif + +DECLARE_CONSTANT_FOR_GPU(gpuErrorOutOfMemory, cudaErrorMemoryAllocation, + hipErrorOutOfMemory); +DECLARE_CONSTANT_FOR_GPU(gpuErrorNotReady, cudaErrorNotReady, hipErrorNotReady); +DECLARE_CONSTANT_FOR_GPU(gpuSuccess, cudaSuccess, hipSuccess); + +#undef DECLARE_CONSTANT_FOR_GPU +} // namespace paddle + +#endif diff --git a/paddle/fluid/platform/nccl_helper.h b/paddle/fluid/platform/device/gpu/nccl_helper.h similarity index 99% rename from paddle/fluid/platform/nccl_helper.h rename to paddle/fluid/platform/device/gpu/nccl_helper.h index e297e7203c6988c88a04f21e97b9c63e35baf375..f26116749077e26c6a782c70dabf40315e8b8456 100644 --- a/paddle/fluid/platform/nccl_helper.h +++ b/paddle/fluid/platform/device/gpu/nccl_helper.h @@ -70,11 +70,11 @@ class NCCLGroupGuard { inline NCCLGroupGuard() { NCCLMutex().lock(); - PADDLE_ENFORCE_CUDA_SUCCESS(dynload::ncclGroupStart()); + PADDLE_ENFORCE_GPU_SUCCESS(dynload::ncclGroupStart()); } inline ~NCCLGroupGuard() PADDLE_MAY_THROW { - PADDLE_ENFORCE_CUDA_SUCCESS(dynload::ncclGroupEnd()); + PADDLE_ENFORCE_GPU_SUCCESS(dynload::ncclGroupEnd()); NCCLMutex().unlock(); } }; diff --git a/paddle/fluid/platform/device/gpu/rocm/CMakeLists.txt b/paddle/fluid/platform/device/gpu/rocm/CMakeLists.txt new file mode 100644 index 0000000000000000000000000000000000000000..86b9ecd5f5445ca3acc7ff7409bb34c5d39c79f6 --- /dev/null +++ b/paddle/fluid/platform/device/gpu/rocm/CMakeLists.txt @@ -0,0 +1,3 @@ +hip_library(rocm_info SRCS rocm_info.cc DEPS gflags glog enforce monitor dynload_cuda) + +hip_test(miopen_helper_test SRCS miopen_helper_test.cc DEPS dynload_cuda) diff --git a/paddle/fluid/platform/miopen_desc.h b/paddle/fluid/platform/device/gpu/rocm/miopen_desc.h similarity index 88% rename from paddle/fluid/platform/miopen_desc.h rename to paddle/fluid/platform/device/gpu/rocm/miopen_desc.h index c82e61ceb122c033bb5bd800b41bd2c399020a69..d2389ba409e5eba078c01d2d7b409318e7bb7786 100644 --- a/paddle/fluid/platform/miopen_desc.h +++ b/paddle/fluid/platform/device/gpu/rocm/miopen_desc.h @@ -23,8 +23,8 @@ #include #include +#include "paddle/fluid/platform/device/gpu/rocm/miopen_helper.h" #include "paddle/fluid/platform/device_context.h" -#include "paddle/fluid/platform/miopen_helper.h" namespace paddle { namespace framework { @@ -88,7 +88,7 @@ class ActivationDescriptor { struct Deleter { void operator()(T* t) { if (t != nullptr) { - PADDLE_ENFORCE_CUDA_SUCCESS( + PADDLE_ENFORCE_GPU_SUCCESS( dynload::miopenDestroyActivationDescriptor(t)); t = nullptr; } @@ -96,13 +96,13 @@ class ActivationDescriptor { }; ActivationDescriptor() { T* raw_ptr; - PADDLE_ENFORCE_CUDA_SUCCESS( + PADDLE_ENFORCE_GPU_SUCCESS( dynload::miopenCreateActivationDescriptor(&raw_ptr)); desc_.reset(raw_ptr); } template void set(miopenActivationMode_t mode, const T& coef) { - PADDLE_ENFORCE_CUDA_SUCCESS(dynload::miopenSetActivationDescriptor( + PADDLE_ENFORCE_GPU_SUCCESS(dynload::miopenSetActivationDescriptor( desc_.get(), mode, static_cast(coef), 0.0, 0.0)); } @@ -119,15 +119,14 @@ class TensorDescriptor { struct Deleter { void operator()(T* t) { if (t != nullptr) { - PADDLE_ENFORCE_CUDA_SUCCESS(dynload::miopenDestroyTensorDescriptor(t)); + PADDLE_ENFORCE_GPU_SUCCESS(dynload::miopenDestroyTensorDescriptor(t)); t = nullptr; } } }; TensorDescriptor() { T* raw_ptr; - PADDLE_ENFORCE_CUDA_SUCCESS( - dynload::miopenCreateTensorDescriptor(&raw_ptr)); + PADDLE_ENFORCE_GPU_SUCCESS(dynload::miopenCreateTensorDescriptor(&raw_ptr)); desc_.reset(raw_ptr); } T* desc() { return desc_.get(); } @@ -144,7 +143,7 @@ class TensorDescriptor { if (groups > 1) { dims_with_group[1] = dims_with_group[1] / groups; } - PADDLE_ENFORCE_CUDA_SUCCESS(dynload::miopenSetTensorDescriptor( + PADDLE_ENFORCE_GPU_SUCCESS(dynload::miopenSetTensorDescriptor( (miopenTensorDescriptor_t)(desc_.get()), ToCudnnDataType(tensor.type()), static_cast(dims_with_group.size()), const_cast(dims_with_group.data()), @@ -166,7 +165,7 @@ class TensorDescriptor { if (groups > 1) { dims_with_group[1] = dims_with_group[1] / groups; } - PADDLE_ENFORCE_CUDA_SUCCESS(dynload::miopenSetTensorDescriptor( + PADDLE_ENFORCE_GPU_SUCCESS(dynload::miopenSetTensorDescriptor( (miopenTensorDescriptor_t)(desc_.get()), ToCudnnDataType(tensor.type()), static_cast(dims_with_group.size()), const_cast(dims_with_group.data()), @@ -183,15 +182,14 @@ class FilterDescriptor { struct Deleter { void operator()(T* t) { if (t != nullptr) { - PADDLE_ENFORCE_CUDA_SUCCESS(dynload::miopenDestroyTensorDescriptor(t)); + PADDLE_ENFORCE_GPU_SUCCESS(dynload::miopenDestroyTensorDescriptor(t)); t = nullptr; } } }; FilterDescriptor() { T* raw_ptr; - PADDLE_ENFORCE_CUDA_SUCCESS( - dynload::miopenCreateTensorDescriptor(&raw_ptr)); + PADDLE_ENFORCE_GPU_SUCCESS(dynload::miopenCreateTensorDescriptor(&raw_ptr)); desc_.reset(raw_ptr); } T* desc() { return desc_.get(); } @@ -212,7 +210,7 @@ class FilterDescriptor { if (groups > 1) { dims_with_group[1] = dims_with_group[1] / groups; } - PADDLE_ENFORCE_CUDA_SUCCESS(dynload::miopenSetTensorDescriptor( + PADDLE_ENFORCE_GPU_SUCCESS(dynload::miopenSetTensorDescriptor( (miopenTensorDescriptor_t)(desc_.get()), ToCudnnDataType(tensor.type()), static_cast(dims_with_group.size()), const_cast(dims_with_group.data()), @@ -229,7 +227,7 @@ class ConvolutionDescriptor { struct Deleter { void operator()(T* t) { if (t != nullptr) { - PADDLE_ENFORCE_CUDA_SUCCESS( + PADDLE_ENFORCE_GPU_SUCCESS( dynload::miopenDestroyConvolutionDescriptor(t)); t = nullptr; } @@ -237,7 +235,7 @@ class ConvolutionDescriptor { }; ConvolutionDescriptor() { T* raw_ptr; - PADDLE_ENFORCE_CUDA_SUCCESS( + PADDLE_ENFORCE_GPU_SUCCESS( dynload::miopenCreateConvolutionDescriptor(&raw_ptr)); desc_.reset(raw_ptr); } @@ -247,12 +245,12 @@ class ConvolutionDescriptor { void set(miopenDataType_t dtype, const std::vector& pads, const std::vector& strides, const std::vector& dilations, bool allow_tf32, const int groups = 1) { - PADDLE_ENFORCE_CUDA_SUCCESS(dynload::miopenInitConvolutionNdDescriptor( + PADDLE_ENFORCE_GPU_SUCCESS(dynload::miopenInitConvolutionNdDescriptor( (miopenConvolutionDescriptor_t)desc_.get(), static_cast(pads.size()), const_cast(pads.data()), const_cast(strides.data()), const_cast(dilations.data()), miopenConvolution)); - PADDLE_ENFORCE_CUDA_SUCCESS( + PADDLE_ENFORCE_GPU_SUCCESS( platform::dynload::miopenSetConvolutionGroupCount( (miopenConvolutionDescriptor_t)desc_.get(), groups)); } diff --git a/paddle/fluid/platform/miopen_helper.h b/paddle/fluid/platform/device/gpu/rocm/miopen_helper.h similarity index 89% rename from paddle/fluid/platform/miopen_helper.h rename to paddle/fluid/platform/device/gpu/rocm/miopen_helper.h index 46c7da839704196952deda6de6d09c7009ee6a78..bd8d05f8124a1944bcc11051b4000bdce0229fb7 100644 --- a/paddle/fluid/platform/miopen_helper.h +++ b/paddle/fluid/platform/device/gpu/rocm/miopen_helper.h @@ -18,6 +18,7 @@ limitations under the License. */ #include #include "paddle/fluid/framework/operator.h" +#include "paddle/fluid/platform/device/gpu/gpu_types.h" #include "paddle/fluid/platform/dynload/miopen.h" #include "paddle/fluid/platform/enforce.h" #include "paddle/fluid/platform/float16.h" @@ -36,13 +37,6 @@ DECLARE_bool(cudnn_deterministic); namespace paddle { namespace platform { - -// MIOPEN only support NCHW, just for compatibility with CUDNN API -typedef enum { - MIOPEN_TENSOR_NCHW = 0, - MIOPEN_TENSOR_NHWC = 1, -} miopenTensorFormat_t; - inline const char* miopenGetErrorString(miopenStatus_t status) { switch (status) { case miopenStatusSuccess: @@ -188,10 +182,10 @@ inline miopenTensorFormat_t GetCudnnTensorFormat(const DataLayout& order) { class ScopedTensorDescriptor { public: ScopedTensorDescriptor() { - PADDLE_ENFORCE_CUDA_SUCCESS(dynload::miopenCreateTensorDescriptor(&desc_)); + PADDLE_ENFORCE_GPU_SUCCESS(dynload::miopenCreateTensorDescriptor(&desc_)); } ~ScopedTensorDescriptor() PADDLE_MAY_THROW { - PADDLE_ENFORCE_CUDA_SUCCESS(dynload::miopenDestroyTensorDescriptor(desc_)); + PADDLE_ENFORCE_GPU_SUCCESS(dynload::miopenDestroyTensorDescriptor(desc_)); } inline miopenTensorDescriptor_t descriptor(const miopenTensorFormat_t format, @@ -216,12 +210,12 @@ class ScopedTensorDescriptor { platform::errors::InvalidArgument( "format should ONLY be NCHW in MIOPEN.")); if (dims.size() == 4) { - PADDLE_ENFORCE_CUDA_SUCCESS(dynload::miopenSetTensorDescriptor( + PADDLE_ENFORCE_GPU_SUCCESS(dynload::miopenSetTensorDescriptor( desc_, type, dims_with_group.size(), const_cast(dims_with_group.data()), const_cast(strides.data()))); } else if (dims.size() == 5) { - PADDLE_ENFORCE_CUDA_SUCCESS(dynload::miopenSetTensorDescriptor( + PADDLE_ENFORCE_GPU_SUCCESS(dynload::miopenSetTensorDescriptor( desc_, type, dims_with_group.size(), const_cast(dims_with_group.data()), const_cast(strides.data()))); @@ -240,7 +234,7 @@ class ScopedTensorDescriptor { inline miopenTensorDescriptor_t descriptor(const miopenDataType_t miopen_type, const std::vector& dim, const std::vector& stride) { - PADDLE_ENFORCE_CUDA_SUCCESS(dynload::miopenSetTensorDescriptor( + PADDLE_ENFORCE_GPU_SUCCESS(dynload::miopenSetTensorDescriptor( desc_, miopen_type, dim.size(), const_cast(dim.data()), const_cast(stride.data()))); return desc_; @@ -262,10 +256,10 @@ class ScopedTensorDescriptor { class ScopedDropoutDescriptor { public: ScopedDropoutDescriptor() { - PADDLE_ENFORCE_CUDA_SUCCESS(dynload::miopenCreateDropoutDescriptor(&desc_)); + PADDLE_ENFORCE_GPU_SUCCESS(dynload::miopenCreateDropoutDescriptor(&desc_)); } ~ScopedDropoutDescriptor() PADDLE_MAY_THROW { - PADDLE_ENFORCE_CUDA_SUCCESS(dynload::miopenDestroyDropoutDescriptor(desc_)); + PADDLE_ENFORCE_GPU_SUCCESS(dynload::miopenDestroyDropoutDescriptor(desc_)); } inline miopenDropoutDescriptor_t descriptor(const miopenHandle_t& handle, @@ -275,20 +269,20 @@ class ScopedDropoutDescriptor { framework::Tensor* dropout_state_, int seed, size_t state_size) { if (dropout_state_ == nullptr) { // for no dropout or test - PADDLE_ENFORCE_CUDA_SUCCESS(dynload::miopenSetDropoutDescriptor( + PADDLE_ENFORCE_GPU_SUCCESS(dynload::miopenSetDropoutDescriptor( desc_, handle, 0 /* dropout */, nullptr, 0 /* state_size */, 0 /* seed */, false, false, MIOPEN_RNG_PSEUDO_XORWOW)); return desc_; } auto* dropout_state_data = dropout_state_->data(); if (!initialized) { - PADDLE_ENFORCE_CUDA_SUCCESS(dynload::miopenSetDropoutDescriptor( + PADDLE_ENFORCE_GPU_SUCCESS(dynload::miopenSetDropoutDescriptor( desc_, handle, dropout_prob_, dropout_state_data, state_size, seed, false, false, MIOPEN_RNG_PSEUDO_XORWOW)); } else { auto dropout_state_dims = dropout_state_->dims(); state_size = dropout_state_dims[0]; - PADDLE_ENFORCE_CUDA_SUCCESS(dynload::miopenRestoreDropoutDescriptor( + PADDLE_ENFORCE_GPU_SUCCESS(dynload::miopenRestoreDropoutDescriptor( desc_, handle, dropout_prob_, dropout_state_data, state_size, 0, false, false, MIOPEN_RNG_PSEUDO_XORWOW)); } @@ -304,10 +298,10 @@ class ScopedDropoutDescriptor { class ScopedRNNDescriptor { public: ScopedRNNDescriptor() { - PADDLE_ENFORCE_CUDA_SUCCESS(dynload::miopenCreateRNNDescriptor(&desc_)); + PADDLE_ENFORCE_GPU_SUCCESS(dynload::miopenCreateRNNDescriptor(&desc_)); } ~ScopedRNNDescriptor() PADDLE_MAY_THROW { - PADDLE_ENFORCE_CUDA_SUCCESS(dynload::miopenDestroyRNNDescriptor(desc_)); + PADDLE_ENFORCE_GPU_SUCCESS(dynload::miopenDestroyRNNDescriptor(desc_)); } inline miopenRNNDescriptor_t desc() { return desc_; } @@ -320,10 +314,10 @@ class ScopedRNNDescriptor { class ScopedFilterDescriptor { public: ScopedFilterDescriptor() { - PADDLE_ENFORCE_CUDA_SUCCESS(dynload::miopenCreateTensorDescriptor(&desc_)); + PADDLE_ENFORCE_GPU_SUCCESS(dynload::miopenCreateTensorDescriptor(&desc_)); } ~ScopedFilterDescriptor() PADDLE_MAY_THROW { - PADDLE_ENFORCE_CUDA_SUCCESS(dynload::miopenDestroyTensorDescriptor(desc_)); + PADDLE_ENFORCE_GPU_SUCCESS(dynload::miopenDestroyTensorDescriptor(desc_)); } inline miopenTensorDescriptor_t descriptor(const miopenTensorFormat_t format, @@ -344,7 +338,7 @@ class ScopedFilterDescriptor { for (int k = kernel_with_group.size() - 2; k >= 0; k--) { stride_dim[k] = stride_dim[k + 1] * kernel_with_group[k + 1]; } - PADDLE_ENFORCE_CUDA_SUCCESS(dynload::miopenSetTensorDescriptor( + PADDLE_ENFORCE_GPU_SUCCESS(dynload::miopenSetTensorDescriptor( desc_, type, kernel_with_group.size(), const_cast(kernel_with_group.data()), const_cast(stride_dim.data()))); @@ -369,11 +363,11 @@ class ScopedFilterDescriptor { class ScopedConvolutionDescriptor { public: ScopedConvolutionDescriptor() { - PADDLE_ENFORCE_CUDA_SUCCESS( + PADDLE_ENFORCE_GPU_SUCCESS( dynload::miopenCreateConvolutionDescriptor(&desc_)); } ~ScopedConvolutionDescriptor() PADDLE_MAY_THROW { - PADDLE_ENFORCE_CUDA_SUCCESS( + PADDLE_ENFORCE_GPU_SUCCESS( dynload::miopenDestroyConvolutionDescriptor(desc_)); } @@ -391,7 +385,7 @@ class ScopedConvolutionDescriptor { "The size of pads and dilations should be equal. But received size " "of pads is %d, size of dilations is %d.", pads.size(), dilations.size())); - PADDLE_ENFORCE_CUDA_SUCCESS(dynload::miopenInitConvolutionNdDescriptor( + PADDLE_ENFORCE_GPU_SUCCESS(dynload::miopenInitConvolutionNdDescriptor( desc_, pads.size(), const_cast(pads.data()), const_cast(strides.data()), const_cast(dilations.data()), miopenConvolution)); @@ -413,10 +407,10 @@ class ScopedConvolutionDescriptor { class ScopedPoolingDescriptor { public: ScopedPoolingDescriptor() { - PADDLE_ENFORCE_CUDA_SUCCESS(dynload::miopenCreatePoolingDescriptor(&desc_)); + PADDLE_ENFORCE_GPU_SUCCESS(dynload::miopenCreatePoolingDescriptor(&desc_)); } ~ScopedPoolingDescriptor() PADDLE_MAY_THROW { - PADDLE_ENFORCE_CUDA_SUCCESS(dynload::miopenDestroyPoolingDescriptor(desc_)); + PADDLE_ENFORCE_GPU_SUCCESS(dynload::miopenDestroyPoolingDescriptor(desc_)); } inline miopenPoolingDescriptor_t descriptor(const PoolingMode& mode, @@ -434,7 +428,7 @@ class ScopedPoolingDescriptor { "The size of kernel and strides should be equal. But " "received size of kernel is %d, size of strides is %d.", kernel.size(), strides.size())); - PADDLE_ENFORCE_CUDA_SUCCESS(dynload::miopenSetNdPoolingDescriptor( + PADDLE_ENFORCE_GPU_SUCCESS(dynload::miopenSetNdPoolingDescriptor( desc_, GetPoolingMode(mode), kernel.size(), const_cast(kernel.data()), const_cast(pads.data()), const_cast(strides.data()))); @@ -449,11 +443,11 @@ class ScopedPoolingDescriptor { class ScopedActivationDescriptor { public: ScopedActivationDescriptor() { - PADDLE_ENFORCE_CUDA_SUCCESS( + PADDLE_ENFORCE_GPU_SUCCESS( dynload::miopenCreateActivationDescriptor(&desc_)); } ~ScopedActivationDescriptor() PADDLE_MAY_THROW { - PADDLE_ENFORCE_CUDA_SUCCESS( + PADDLE_ENFORCE_GPU_SUCCESS( dynload::miopenDestroyActivationDescriptor(desc_)); } @@ -489,7 +483,7 @@ class ScopedActivationDescriptor { "Unrecognized MIOPEN activation mode: %d.", static_cast(activation_mode))); } - PADDLE_ENFORCE_CUDA_SUCCESS(dynload::miopenSetActivationDescriptor( + PADDLE_ENFORCE_GPU_SUCCESS(dynload::miopenSetActivationDescriptor( desc_, mode, relu_ceiling, 0.0, 0.0)); return desc_; } @@ -514,15 +508,15 @@ inline bool CanCUDNNBeUsed(const framework::ExecutionContext& ctx) { class ScopedCTCLossDescriptor { public: ScopedCTCLossDescriptor() { - PADDLE_ENFORCE_CUDA_SUCCESS(dynload::miopenCreateCTCLossDescriptor(&desc_)); + PADDLE_ENFORCE_GPU_SUCCESS(dynload::miopenCreateCTCLossDescriptor(&desc_)); } ~ScopedCTCLossDescriptor() PADDLE_MAY_THROW { - PADDLE_ENFORCE_CUDA_SUCCESS(dynload::miopenDestroyCTCLossDescriptor(desc_)); + PADDLE_ENFORCE_GPU_SUCCESS(dynload::miopenDestroyCTCLossDescriptor(desc_)); } template inline miopenCTCLossDescriptor_t descriptor() { - PADDLE_ENFORCE_CUDA_SUCCESS(dynload::miopenSetCTCLossDescriptor( + PADDLE_ENFORCE_GPU_SUCCESS(dynload::miopenSetCTCLossDescriptor( desc_, CudnnDataType::type, 0, false)); return desc_; } diff --git a/paddle/fluid/platform/miopen_helper_test.cc b/paddle/fluid/platform/device/gpu/rocm/miopen_helper_test.cc similarity index 98% rename from paddle/fluid/platform/miopen_helper_test.cc rename to paddle/fluid/platform/device/gpu/rocm/miopen_helper_test.cc index e201f4893f5778e9c5f975877d698740ea505774..13cf52dc2c6a30892297153065031228817e35b5 100644 --- a/paddle/fluid/platform/miopen_helper_test.cc +++ b/paddle/fluid/platform/device/gpu/rocm/miopen_helper_test.cc @@ -15,7 +15,7 @@ limitations under the License. */ #define GLOG_NO_ABBREVIATED_SEVERITIES #define GOOGLE_GLOG_DLL_DECL -#include "paddle/fluid/platform/miopen_helper.h" +#include "paddle/fluid/platform/device/gpu/gpu_dnn.h" #include diff --git a/paddle/fluid/platform/device/gpu/rocm/rocm_device_function.h b/paddle/fluid/platform/device/gpu/rocm/rocm_device_function.h new file mode 100644 index 0000000000000000000000000000000000000000..2263383f8fabb0c6b4d8cd30af4cf491f08520eb --- /dev/null +++ b/paddle/fluid/platform/device/gpu/rocm/rocm_device_function.h @@ -0,0 +1,160 @@ +/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#pragma once + +// NOTE(): support float16 to half in header file. +#define PADDLE_CUDA_FP16 +#include "paddle/fluid/platform/complex.h" +#include "paddle/fluid/platform/float16.h" + +namespace paddle { +namespace platform { + +#define CREATE_SHFL_MASK(mask, predicate) mask = __ballot((predicate)) + +inline static int RoundToPowerOfTwo(int dim) { + // HIP results in error or nan if > 256 + if (dim > 128) { + return 256; + } else if (dim > 64) { + return 128; + } else if (dim > 32) { + return 64; + } else { + return 32; + } +} + +#define CUDA_LAUNCH_KERNEL_BASE(dim, ...) \ + case (dim): { \ + constexpr auto kPowerOfTwoDim = (dim); \ + __VA_ARGS__; \ + } break + +#define CUDA_LAUNCH_KERNEL_HELPER(...) \ + CUDA_LAUNCH_KERNEL_BASE(1024, ##__VA_ARGS__); \ + CUDA_LAUNCH_KERNEL_BASE(512, ##__VA_ARGS__); \ + CUDA_LAUNCH_KERNEL_BASE(256, ##__VA_ARGS__); \ + CUDA_LAUNCH_KERNEL_BASE(128, ##__VA_ARGS__); \ + CUDA_LAUNCH_KERNEL_BASE(64, ##__VA_ARGS__); \ + CUDA_LAUNCH_KERNEL_BASE(32, ##__VA_ARGS__); + +template +__forceinline__ __device__ T CudaShuffleDownSync(unsigned mask, T val, + int delta, + int width = warpSize) { + return __shfl_down(val, delta, width); +} + +template +__forceinline__ __device__ T CudaShuffleXorSync(unsigned mask, T val, + int width = warpSize) { + return __shfl_xor(val, width); +} + +template <> +__forceinline__ __device__ float16 CudaShuffleDownSync(unsigned mask, + float16 val, int delta, + int width) { + return float16(__shfl_down(static_cast(val), + static_cast(delta), width)); +} + +template <> +__forceinline__ __device__ paddle::platform::complex CudaShuffleDownSync( + unsigned mask, paddle::platform::complex val, int delta, int width) { + float real = __shfl_down(val.real, delta, width); + float imag = __shfl_down(val.imag, delta, width); + return paddle::platform::complex(real, imag); +} + +template <> +__forceinline__ __device__ paddle::platform::complex +CudaShuffleDownSync(unsigned mask, paddle::platform::complex val, + int delta, int width) { + double real = __shfl_down(val.real, delta, width); + double imag = __shfl_down(val.imag, delta, width); + return paddle::platform::complex(real, imag); +} + +template <> +__forceinline__ __device__ float16 CudaShuffleXorSync(unsigned mask, + float16 val, int width) { + return float16(__shfl_xor(static_cast(val), width)); +} + +template <> +__forceinline__ __device__ paddle::platform::complex CudaShuffleXorSync( + unsigned mask, paddle::platform::complex val, int width) { + float real = __shfl_xor(val.real, width); + float imag = __shfl_xor(val.imag, width); + return paddle::platform::complex(real, imag); +} + +template <> +__forceinline__ __device__ paddle::platform::complex CudaShuffleXorSync( + unsigned mask, paddle::platform::complex val, int width) { + double real = __shfl_xor(val.real, width); + double imag = __shfl_xor(val.imag, width); + return paddle::platform::complex(real, imag); +} + +template +__forceinline__ __device__ T CudaShuffleSync(unsigned mask, T val, int src_line, + int width = 32) { + return __shfl(val, src_line, width); +} + +template +HOSTDEVICE T Infinity() { + return INFINITY; +} + +template +__device__ T reduceSum(T val, int tid, int len) { + // NOTE(zcd): The warp size should be taken from the + // parameters of the GPU but not specified as 32 simply. + // To make the reduceSum more efficiently, + // I use Warp-Level Parallelism and assume the Warp size + // is 32 which may be different for different GPU, + // but most card's warp size is 32. + const int warpSize = 32; + __shared__ T shm[warpSize]; + unsigned mask = 0u; + CREATE_SHFL_MASK(mask, tid < len); + + for (int offset = warpSize / 2; offset > 0; offset /= 2) + val += platform::CudaShuffleDownSync(mask, val, offset); + + if (tid < warpSize) shm[tid] = 0; + __syncthreads(); + + if (tid % warpSize == 0) { + shm[tid / warpSize] = val; + } + __syncthreads(); + + CREATE_SHFL_MASK(mask, tid < warpSize); + + if (tid < warpSize) { + val = shm[tid]; + for (int offset = warpSize / 2; offset > 0; offset /= 2) + val += platform::CudaShuffleDownSync(mask, val, offset); + } + return val; +} + +} // namespace platform +} // namespace paddle diff --git a/paddle/fluid/platform/device/gpu/rocm/rocm_helper.h b/paddle/fluid/platform/device/gpu/rocm/rocm_helper.h new file mode 100644 index 0000000000000000000000000000000000000000..a0f3fb0f73ba5e0100d471f8bd104aadb3932b5c --- /dev/null +++ b/paddle/fluid/platform/device/gpu/rocm/rocm_helper.h @@ -0,0 +1,102 @@ +// Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#pragma once + +#include // NOLINT + +#include "paddle/fluid/platform/dynload/rocblas.h" +#include "paddle/fluid/platform/enforce.h" +#include "paddle/fluid/platform/macros.h" + +namespace paddle { +namespace platform { + +/* + * Summary: Grid stride looping macro in CUDA kernel + * + * [ Why need this macro? ] + * + * The original looping in CUDA kernel is: + * + * `for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < (n); \ + * i += blockDim.x * gridDim.x)` + * + * This for condition is risky. The value of `blockIdx.x * blockDim.x` + * may be large, such as over 1GB, the first iteration is no problem here, + * but when `i += blockDim.x * gridDim.x` is executed, the value of i + * will greater than INT_MAX and overflow becomes negative value, at + * this time, the cycle condition `i < (n)` is still satisfied, so it + * will cause illegal access to cuda memory. + * + * Here is a real example in ERINE, it will trigger above error. + * The related data are: + * - blockIdx.x = 2172938 + * - blockDim.x = 512 + * - blockIdx.x * blockDim.x = 1112543864 + * - INT_MAX = 2147483647 + * + * So we polish the for condition as follow, the int64_t __index__ will + * prevent overflow in the loop increment. + * + * Parameters: + * - i: loop index + * - num: total element numbers + * + * Examples: + * template + * __global__ void Scale(T* logit_grad, const T* loss_grad, const int num, + * const int d, const int remain) { + * CUDA_KERNEL_LOOP(index, num) { + * int idx_n = index / d; + * int idx_remain = index % remain; + * logit_grad[index] *= loss_grad[idx_n * remain + idx_remain]; + * } + * } + * +*/ + +#define CUDA_KERNEL_LOOP_TYPE(i, num, index_type) \ + int64_t __index__ = hipBlockIdx_x * hipBlockDim_x + hipThreadIdx_x; \ + for (index_type i = __index__; __index__ < (num); \ + __index__ += hipBlockDim_x * hipGridDim_x, i = __index__) + +class CublasHandleHolder { + public: + explicit CublasHandleHolder(hipStream_t stream) { + PADDLE_RETRY_CUDA_SUCCESS(dynload::rocblas_create_handle(&handle_)); + PADDLE_RETRY_CUDA_SUCCESS(dynload::rocblas_set_stream(handle_, stream)); + } + + const rocblas_handle& GetCublasHandle() const { return handle_; } + + ~CublasHandleHolder() PADDLE_MAY_THROW { + PADDLE_RETRY_CUDA_SUCCESS(dynload::rocblas_destroy_handle(handle_)); + } + + template + inline void Call(Callback&& callback) const { + std::lock_guard guard(mtx_); + callback(handle_); + } + + private: + DISABLE_COPY_AND_ASSIGN(CublasHandleHolder); + + rocblas_handle handle_; + mutable std::mutex mtx_; +}; + +} // namespace platform +} // namespace paddle diff --git a/paddle/fluid/platform/device/gpu/rocm/rocm_info.cc b/paddle/fluid/platform/device/gpu/rocm/rocm_info.cc new file mode 100644 index 0000000000000000000000000000000000000000..06dba8ce423ef7df4b1d47bfd1b6994abc9fbb98 --- /dev/null +++ b/paddle/fluid/platform/device/gpu/rocm/rocm_info.cc @@ -0,0 +1,269 @@ +// Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "paddle/fluid/platform/device/gpu/gpu_info.h" +#include "paddle/fluid/platform/enforce.h" +#include "paddle/fluid/platform/lock_guard_ptr.h" +#include "paddle/fluid/platform/macros.h" +#include "paddle/fluid/platform/monitor.h" +#include "paddle/fluid/platform/place.h" +#include "paddle/fluid/string/split.h" + +static std::once_flag g_device_props_size_init_flag; +static std::vector> g_device_props_init_flags; +static std::vector g_device_props; + +namespace paddle { +namespace platform { +int DnnVersion() { + if (!dynload::HasCUDNN()) return -1; + size_t version_major, version_minor, version_patch; + PADDLE_ENFORCE_GPU_SUCCESS(dynload::miopenGetVersion( + &version_major, &version_minor, &version_patch)); + return version_major * 100 + version_minor * 10 + version_patch; +} + +static int GetGPUDeviceCountImpl() { + int driverVersion = 0; + hipError_t status = hipDriverGetVersion(&driverVersion); + + if (!(status == gpuSuccess && driverVersion != 0)) { + // No GPU driver + VLOG(2) << "GPU Driver Version can't be detected. No GPU driver!"; + return 0; + } + + const auto *cuda_visible_devices = std::getenv("HIP_VISIBLE_DEVICES"); + + if (cuda_visible_devices != nullptr) { + std::string cuda_visible_devices_str(cuda_visible_devices); + if (!cuda_visible_devices_str.empty()) { + cuda_visible_devices_str.erase( + 0, cuda_visible_devices_str.find_first_not_of('\'')); + cuda_visible_devices_str.erase( + cuda_visible_devices_str.find_last_not_of('\'') + 1); + cuda_visible_devices_str.erase( + 0, cuda_visible_devices_str.find_first_not_of('\"')); + cuda_visible_devices_str.erase( + cuda_visible_devices_str.find_last_not_of('\"') + 1); + } + if (std::all_of(cuda_visible_devices_str.begin(), + cuda_visible_devices_str.end(), + [](char ch) { return ch == ' '; })) { + VLOG(2) << "HIP_VISIBLE_DEVICES is set to be " + "empty. No GPU detected."; + return 0; + } + } + int count; + PADDLE_ENFORCE_GPU_SUCCESS(hipGetDeviceCount(&count)); + return count; +} + +int GetGPUDeviceCount() { + // cache the count + static auto dev_cnt = GetGPUDeviceCountImpl(); + return dev_cnt; +} + +int GetGPUComputeCapability(int id) { + PADDLE_ENFORCE_LT(id, GetGPUDeviceCount(), + platform::errors::InvalidArgument( + "Device id must be less than GPU count, " + "but received id is: %d. GPU count is: %d.", + id, GetGPUDeviceCount())); + int major, minor; + auto major_error_code = hipDeviceGetAttribute( + &major, hipDeviceAttributeComputeCapabilityMajor, id); + auto minor_error_code = hipDeviceGetAttribute( + &minor, hipDeviceAttributeComputeCapabilityMinor, id); + + PADDLE_ENFORCE_GPU_SUCCESS(major_error_code); + PADDLE_ENFORCE_GPU_SUCCESS(minor_error_code); + return major * 100 + minor; +} + +int GetGPURuntimeVersion(int id) { + PADDLE_ENFORCE_LT(id, GetGPUDeviceCount(), + platform::errors::InvalidArgument( + "Device id must be less than GPU count, " + "but received id is: %d. GPU count is: %d.", + id, GetGPUDeviceCount())); + int runtime_version = 0; + PADDLE_ENFORCE_GPU_SUCCESS(hipRuntimeGetVersion(&runtime_version)); + return runtime_version; +} + +int GetGPUDriverVersion(int id) { + PADDLE_ENFORCE_LT(id, GetGPUDeviceCount(), + platform::errors::InvalidArgument( + "Device id must be less than GPU count, " + "but received id is: %d. GPU count is: %d.", + id, GetGPUDeviceCount())); + int driver_version = 0; + PADDLE_ENFORCE_GPU_SUCCESS(hipDriverGetVersion(&driver_version)); + return driver_version; +} + +bool TensorCoreAvailable() { return false; } + +int GetGPUMultiProcessors(int id) { + PADDLE_ENFORCE_LT(id, GetGPUDeviceCount(), + platform::errors::InvalidArgument( + "Device id must be less than GPU count, " + "but received id is: %d. GPU count is: %d.", + id, GetGPUDeviceCount())); + int count; + PADDLE_ENFORCE_GPU_SUCCESS( + hipDeviceGetAttribute(&count, hipDeviceAttributeMultiprocessorCount, id)); + return count; +} + +int GetGPUMaxThreadsPerMultiProcessor(int id) { + PADDLE_ENFORCE_LT(id, GetGPUDeviceCount(), + platform::errors::InvalidArgument( + "Device id must be less than GPU count, " + "but received id is: %d. GPU count is: %d.", + id, GetGPUDeviceCount())); + int count; + PADDLE_ENFORCE_GPU_SUCCESS(hipDeviceGetAttribute( + &count, hipDeviceAttributeMaxThreadsPerMultiProcessor, id)); + + return count; +} + +int GetGPUMaxThreadsPerBlock(int id) { + PADDLE_ENFORCE_LT(id, GetGPUDeviceCount(), + platform::errors::InvalidArgument( + "Device id must be less than GPU count, " + "but received id is: %d. GPU count is: %d.", + id, GetGPUDeviceCount())); + int count; + PADDLE_ENFORCE_GPU_SUCCESS( + hipDeviceGetAttribute(&count, hipDeviceAttributeMaxThreadsPerBlock, id)); + return count; +} + +int GetCurrentDeviceId() { + int device_id; + PADDLE_ENFORCE_GPU_SUCCESS(hipGetDevice(&device_id)); + return device_id; +} + +dim3 GetGpuMaxGridDimSize(int id) { + PADDLE_ENFORCE_LT(id, GetGPUDeviceCount(), + platform::errors::InvalidArgument( + "Device id must be less than GPU count, " + "but received id is: %d. GPU count is: %d.", + id, GetGPUDeviceCount())); + dim3 ret; + int size; + auto error_code_x = + hipDeviceGetAttribute(&size, hipDeviceAttributeMaxGridDimX, id); + PADDLE_ENFORCE_GPU_SUCCESS(error_code_x); + ret.x = size; + + auto error_code_y = + hipDeviceGetAttribute(&size, hipDeviceAttributeMaxGridDimY, id); + PADDLE_ENFORCE_GPU_SUCCESS(error_code_y); + ret.y = size; + + auto error_code_z = + hipDeviceGetAttribute(&size, hipDeviceAttributeMaxGridDimZ, id); + PADDLE_ENFORCE_GPU_SUCCESS(error_code_z); + ret.z = size; + return ret; +} + +const gpuDeviceProp &GetDeviceProperties(int id) { + std::call_once(g_device_props_size_init_flag, [&] { + int gpu_num = 0; + gpu_num = platform::GetGPUDeviceCount(); + g_device_props_init_flags.resize(gpu_num); + g_device_props.resize(gpu_num); + for (int i = 0; i < gpu_num; ++i) { + g_device_props_init_flags[i] = std::make_unique(); + } + }); + + if (id == -1) { + id = platform::GetCurrentDeviceId(); + } + + if (id < 0 || id >= static_cast(g_device_props.size())) { + PADDLE_THROW(platform::errors::OutOfRange( + "The device id %d is out of range [0, %d), where %d is the number of " + "devices on this machine. Because the device id should be greater than " + "or equal to zero and smaller than the number of gpus. Please input " + "appropriate device again!", + id, static_cast(g_device_props.size()), + static_cast(g_device_props.size()))); + } + + std::call_once(*(g_device_props_init_flags[id]), [&] { + PADDLE_ENFORCE_GPU_SUCCESS(hipGetDeviceProperties(&g_device_props[id], id)); + }); + + return g_device_props[id]; +} + +void SetDeviceId(int id) { + // TODO(qijun): find a better way to cache the cuda device count + PADDLE_ENFORCE_LT(id, GetGPUDeviceCount(), + platform::errors::InvalidArgument( + "Device id must be less than GPU count, " + "but received id is: %d. GPU count is: %d.", + id, GetGPUDeviceCount())); + PADDLE_RETRY_CUDA_SUCCESS(hipSetDevice(id)); +} + +void GpuMemcpyAsync(void *dst, const void *src, size_t count, + gpuMemcpyKind kind, gpuStream_t stream) { + PADDLE_ENFORCE_GPU_SUCCESS(hipMemcpyAsync(dst, src, count, kind, stream)); +} + +void GpuMemcpySync(void *dst, const void *src, size_t count, + gpuMemcpyKind kind) { + PADDLE_ENFORCE_GPU_SUCCESS(hipMemcpy(dst, src, count, kind)); +} + +void GpuMemcpyPeerAsync(void *dst, int dst_device, const void *src, + int src_device, size_t count, gpuStream_t stream) { + PADDLE_ENFORCE_GPU_SUCCESS( + hipMemcpyPeerAsync(dst, dst_device, src, src_device, count, stream)); +} + +void GpuMemcpyPeerSync(void *dst, int dst_device, const void *src, + int src_device, size_t count) { + PADDLE_ENFORCE_GPU_SUCCESS( + hipMemcpyPeer(dst, dst_device, src, src_device, count)); +} + +void GpuMemsetAsync(void *dst, int value, size_t count, gpuStream_t stream) { + PADDLE_ENFORCE_GPU_SUCCESS(hipMemsetAsync(dst, value, count, stream)); +} + +void GpuStreamSync(gpuStream_t stream) { + PADDLE_ENFORCE_GPU_SUCCESS(hipStreamSynchronize(stream)); +} + +void GpuDestroyStream(gpuStream_t stream) { + PADDLE_ENFORCE_GPU_SUCCESS(hipStreamDestroy(stream)); +} + +void GpuDeviceSync() { PADDLE_ENFORCE_GPU_SUCCESS(hipDeviceSynchronize()); } + +gpuError_t GpuGetLastError() { return hipGetLastError(); } +} // namespace platform +} // namespace paddle diff --git a/paddle/fluid/platform/device/npu/hccl_helper.h b/paddle/fluid/platform/device/npu/hccl_helper.h index f1ef8650be4c16cb56970139b23a42192f71ef60..69cea31446680e886611f1be6b38a761724ce6b9 100644 --- a/paddle/fluid/platform/device/npu/hccl_helper.h +++ b/paddle/fluid/platform/device/npu/hccl_helper.h @@ -66,11 +66,11 @@ inline HcclDataType ToHCCLDataType(framework::proto::VarType::Type type) { // inline HCCLGroupGuard() { // HCCLMutex().lock(); -// PADDLE_ENFORCE_CUDA_SUCCESS(dynload::ncclGroupStart()); +// PADDLE_ENFORCE_GPU_SUCCESS(dynload::ncclGroupStart()); // } // inline ~HCCLGroupGuard() PADDLE_MAY_THROW { -// PADDLE_ENFORCE_CUDA_SUCCESS(dynload::ncclGroupEnd()); +// PADDLE_ENFORCE_GPU_SUCCESS(dynload::ncclGroupEnd()); // HCCLMutex().unlock(); // } // }; diff --git a/paddle/fluid/platform/device_context.cc b/paddle/fluid/platform/device_context.cc index c2dc60a29fe427a719c15236fef1d3050c1fa2e9..a0c9ff09460aff63d4b41142b7b37635ac17def5 100644 --- a/paddle/fluid/platform/device_context.cc +++ b/paddle/fluid/platform/device_context.cc @@ -370,10 +370,10 @@ class EigenCudaStreamDevice : public Eigen::StreamInterface { char* scratch = static_cast(scratchpad()) + Eigen::kGpuScratchSize; semaphore_ = reinterpret_cast(scratch); #ifdef PADDLE_WITH_HIP - PADDLE_ENFORCE_CUDA_SUCCESS( + PADDLE_ENFORCE_GPU_SUCCESS( hipMemsetAsync(semaphore_, 0, sizeof(unsigned int), *stream_)); #else - PADDLE_ENFORCE_CUDA_SUCCESS( + PADDLE_ENFORCE_GPU_SUCCESS( cudaMemsetAsync(semaphore_, 0, sizeof(unsigned int), *stream_)); #endif } @@ -439,14 +439,14 @@ CUDAContext::~CUDAContext() { CUDADeviceContext::CUDADeviceContext(CUDAPlace place) : place_(place) { CUDADeviceGuard guard(place_.device); - compute_capability_ = GetCUDAComputeCapability(place_.device); - multi_process_ = GetCUDAMultiProcessors(place_.device); - max_threads_per_mp_ = GetCUDAMaxThreadsPerMultiProcessor(place_.device); + compute_capability_ = GetGPUComputeCapability(place_.device); + multi_process_ = GetGPUMultiProcessors(place_.device); + max_threads_per_mp_ = GetGPUMaxThreadsPerMultiProcessor(place_.device); max_grid_dim_size_ = GetGpuMaxGridDimSize(place_.device); - max_threads_per_block_ = GetCUDAMaxThreadsPerBlock(place_.device); + max_threads_per_block_ = GetGPUMaxThreadsPerBlock(place_.device); - driver_version_ = GetCUDADriverVersion(place_.device); - runtime_version_ = GetCUDARuntimeVersion(place_.device); + driver_version_ = GetGPUDriverVersion(place_.device); + runtime_version_ = GetGPURuntimeVersion(place_.device); LOG_FIRST_N(WARNING, 1) << "Please NOTE: device: " << place_.device << ", GPU Compute Capability: " @@ -459,7 +459,7 @@ CUDADeviceContext::CUDADeviceContext(CUDAPlace place) : place_(place) { << (runtime_version_ % 100) / 10; #ifdef PADDLE_WITH_HIP size_t version_major, version_minor, version_patch; - PADDLE_ENFORCE_CUDA_SUCCESS(dynload::miopenGetVersion( + PADDLE_ENFORCE_GPU_SUCCESS(dynload::miopenGetVersion( &version_major, &version_minor, &version_patch)); LOG_FIRST_N(WARNING, 1) << "device: " << place_.device << ", MIOpen Version: " << version_major << "." @@ -499,7 +499,7 @@ CUDADeviceContext::~CUDADeviceContext() { SetDeviceId(place_.device); #if defined(PADDLE_WITH_NCCL) || defined(PADDLE_WITH_RCCL) if (nccl_comm_) { - PADDLE_ENFORCE_CUDA_SUCCESS(dynload::ncclCommDestroy(nccl_comm_)); + PADDLE_ENFORCE_GPU_SUCCESS(dynload::ncclCommDestroy(nccl_comm_)); } #endif } diff --git a/paddle/fluid/platform/device_context.h b/paddle/fluid/platform/device_context.h index 73232994516b61c28dc7d00be541a64d19ec55e8..552d8f1a8c4ffbe3f5dab275a12ef8c746f2c0b1 100644 --- a/paddle/fluid/platform/device_context.h +++ b/paddle/fluid/platform/device_context.h @@ -20,7 +20,7 @@ limitations under the License. */ #include "paddle/fluid/memory/malloc.h" #ifdef PADDLE_WITH_CUDA -#include "paddle/fluid/platform/cuda_helper.h" +#include "paddle/fluid/platform/device/gpu/gpu_helper.h" #include "paddle/fluid/platform/dynload/cublas.h" #include "paddle/fluid/platform/dynload/cudnn.h" #include "paddle/fluid/platform/dynload/cusolver.h" @@ -28,17 +28,17 @@ limitations under the License. */ #if !defined(__APPLE__) && defined(PADDLE_WITH_NCCL) #include "paddle/fluid/platform/dynload/nccl.h" #endif -#include "paddle/fluid/platform/gpu_info.h" +#include "paddle/fluid/platform/device/gpu/gpu_info.h" #endif #ifdef PADDLE_WITH_HIP -#include "paddle/fluid/platform/cuda_helper.h" // NOLINT +#include "paddle/fluid/platform/device/gpu/gpu_helper.h" // NOLINT #include "paddle/fluid/platform/dynload/miopen.h" #include "paddle/fluid/platform/dynload/rocblas.h" #if !defined(__APPLE__) && defined(PADDLE_WITH_RCCL) #include "paddle/fluid/platform/dynload/rccl.h" #endif -#include "paddle/fluid/platform/gpu_info.h" // NOLINT +#include "paddle/fluid/platform/device/gpu/gpu_info.h" // NOLINT #endif #if defined(PADDLE_WITH_XPU_BKCL) @@ -371,7 +371,7 @@ class CUDAContext { if (dynload::HasCUDNN()) { #ifdef PADDLE_WITH_HIP size_t miopen_major, miopen_minor, miopen_patch; - PADDLE_ENFORCE_CUDA_SUCCESS(dynload::miopenGetVersion( + PADDLE_ENFORCE_GPU_SUCCESS(dynload::miopenGetVersion( &miopen_major, &miopen_minor, &miopen_patch)); auto local_miopen_version = (miopen_major * 1000 + miopen_minor * 10 + miopen_patch) / 10; @@ -388,8 +388,8 @@ class CUDAContext { << "Please recompile or reinstall Paddle with compatible MIOPEN " "version."; } - PADDLE_ENFORCE_CUDA_SUCCESS(dynload::miopenCreate(&cudnn_handle_)); - PADDLE_ENFORCE_CUDA_SUCCESS( + PADDLE_ENFORCE_GPU_SUCCESS(dynload::miopenCreate(&cudnn_handle_)); + PADDLE_ENFORCE_GPU_SUCCESS( dynload::miopenSetStream(cudnn_handle_, RawStream())); #else auto local_cudnn_version = dynload::cudnnGetVersion() / 100; @@ -425,9 +425,9 @@ class CUDAContext { void DestoryCuDNNContext() { if (cudnn_handle_) { #ifdef PADDLE_WITH_HIP - PADDLE_ENFORCE_CUDA_SUCCESS(dynload::miopenDestroy(cudnn_handle_)); + PADDLE_ENFORCE_GPU_SUCCESS(dynload::miopenDestroy(cudnn_handle_)); #else - PADDLE_ENFORCE_CUDA_SUCCESS(dynload::cudnnDestroy(cudnn_handle_)); + PADDLE_ENFORCE_GPU_SUCCESS(dynload::cudnnDestroy(cudnn_handle_)); #endif } cudnn_handle_ = nullptr; @@ -442,7 +442,7 @@ class CUDAContext { #ifndef PADDLE_WITH_HIP void DestoryCuSolverContext() { if (cusolver_dn_handle_) { - PADDLE_ENFORCE_CUDA_SUCCESS( + PADDLE_ENFORCE_GPU_SUCCESS( dynload::cusolverDnDestroy(cusolver_dn_handle_)); } } diff --git a/paddle/fluid/platform/device_context_test.cu b/paddle/fluid/platform/device_context_test.cu index 2f9413c4f3ea7e72e8c3690d985329065570c223..cf617a478eb719d41ddc0e8043fb1f35113fb456 100644 --- a/paddle/fluid/platform/device_context_test.cu +++ b/paddle/fluid/platform/device_context_test.cu @@ -23,7 +23,7 @@ TEST(Device, Init) { using paddle::platform::CUDADeviceContext; using paddle::platform::CUDAPlace; - int count = paddle::platform::GetCUDADeviceCount(); + int count = paddle::platform::GetGPUDeviceCount(); for (int i = 0; i < count; i++) { CUDADeviceContext* device_context = new CUDADeviceContext(CUDAPlace(i)); Eigen::GpuDevice* gpu_device = device_context->eigen_device(); @@ -36,7 +36,7 @@ TEST(Device, CUDADeviceContext) { using paddle::platform::CUDADeviceContext; using paddle::platform::CUDAPlace; - int count = paddle::platform::GetCUDADeviceCount(); + int count = paddle::platform::GetGPUDeviceCount(); for (int i = 0; i < count; i++) { CUDADeviceContext* device_context = new CUDADeviceContext(CUDAPlace(i)); Eigen::GpuDevice* gpu_device = device_context->eigen_device(); @@ -70,7 +70,7 @@ TEST(Device, DeviceContextPool) { ASSERT_EQ(cpu_dev_ctx2, cpu_dev_ctx1); std::vector gpu_places; - int count = paddle::platform::GetCUDADeviceCount(); + int count = paddle::platform::GetGPUDeviceCount(); for (int i = 0; i < count; ++i) { auto dev_ctx = pool.Get(CUDAPlace(i)); ASSERT_NE(dev_ctx, nullptr); diff --git a/paddle/fluid/platform/device_memory_aligment.h b/paddle/fluid/platform/device_memory_aligment.h index f42eb7ece1a72be1b7fbc277cd266df6cb255293..a3f88592b7649fcf4acaf01b498901abb8f4a1d7 100644 --- a/paddle/fluid/platform/device_memory_aligment.h +++ b/paddle/fluid/platform/device_memory_aligment.h @@ -17,12 +17,10 @@ limitations under the License. */ #include "paddle/fluid/platform/cpu_info.h" #include "paddle/fluid/platform/place.h" -#if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP) -#include "paddle/fluid/platform/gpu_info.h" -#endif #if defined(PADDLE_WITH_ASCEND_CL) #include "paddle/fluid/platform/device/npu/npu_info.h" #endif +#include "paddle/fluid/platform/device/gpu/gpu_info.h" namespace paddle { namespace platform { diff --git a/paddle/fluid/platform/dynload/miopen.h b/paddle/fluid/platform/dynload/miopen.h index f72eb6731f6276c049b2fe397cda660fd61c1def..34845f24ff50dd6484962103894009c2b58c2eed 100644 --- a/paddle/fluid/platform/dynload/miopen.h +++ b/paddle/fluid/platform/dynload/miopen.h @@ -25,6 +25,12 @@ limitations under the License. */ (MIOPEN_VERSION_MAJOR * 1000 + MIOPEN_VERSION_MINOR * 10 + \ MIOPEN_VERSION_PATCH) // NOLINT +// MIOPEN only support NCHW, just for compatibility with CUDNN API +typedef enum { + MIOPEN_TENSOR_NCHW = 0, + MIOPEN_TENSOR_NHWC = 1, +} miopenTensorFormat_t; + namespace paddle { namespace platform { namespace dynload { diff --git a/paddle/fluid/platform/enforce.h b/paddle/fluid/platform/enforce.h index 86f71fdf64fba14d45dc5391ae6d150f27866234..530ae6ba79889ff6a91de65088dfedc660e65ba3 100644 --- a/paddle/fluid/platform/enforce.h +++ b/paddle/fluid/platform/enforce.h @@ -96,7 +96,7 @@ limitations under the License. */ #include "paddle/fluid/imperative/type_defs.h" // Note: this header for simplify HIP and CUDA type string #if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP) -#include "paddle/fluid/platform/type_defs.h" +#include "paddle/fluid/platform/device/gpu/gpu_types.h" #endif #include "paddle/fluid/platform/flags.h" @@ -944,7 +944,7 @@ inline std::string build_nvidia_error_msg(ncclResult_t nccl_result) { } #endif // not(__APPLE__) and PADDLE_WITH_NCCL -#define PADDLE_ENFORCE_CUDA_SUCCESS(COND) \ +#define PADDLE_ENFORCE_GPU_SUCCESS(COND) \ do { \ auto __cond__ = (COND); \ using __CUDA_STATUS_TYPE__ = decltype(__cond__); \ @@ -1150,7 +1150,7 @@ DEFINE_EXTERNAL_API_TYPE(ncclResult_t, ncclSuccess); } // namespace details -#define PADDLE_ENFORCE_CUDA_SUCCESS(COND) \ +#define PADDLE_ENFORCE_GPU_SUCCESS(COND) \ do { \ auto __cond__ = (COND); \ using __CUDA_STATUS_TYPE__ = decltype(__cond__); \ diff --git a/paddle/fluid/platform/enforce_test.cc b/paddle/fluid/platform/enforce_test.cc index 6ff9e6ea903cd3c32e21389b4eeec5432c72a44b..b9e423929916992fc718df495327e90b89f45a9c 100644 --- a/paddle/fluid/platform/enforce_test.cc +++ b/paddle/fluid/platform/enforce_test.cc @@ -294,14 +294,14 @@ TEST(EOF_EXCEPTION, THROW_EOF) { #if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP) template bool CheckCudaStatusSuccess(T value, const std::string& msg = "success") { - PADDLE_ENFORCE_CUDA_SUCCESS(value); + PADDLE_ENFORCE_GPU_SUCCESS(value); return true; } template bool CheckCudaStatusFailure(T value, const std::string& msg) { try { - PADDLE_ENFORCE_CUDA_SUCCESS(value); + PADDLE_ENFORCE_GPU_SUCCESS(value); return false; } catch (paddle::platform::EnforceNotMet& error) { std::string ex_msg = error.what(); diff --git a/paddle/fluid/platform/event.h b/paddle/fluid/platform/event.h index 2b11de48a1ec70528f7aeddbe530869f10a779b9..136dc2d725208312398faef46a9e014be35274a6 100644 --- a/paddle/fluid/platform/event.h +++ b/paddle/fluid/platform/event.h @@ -148,9 +148,9 @@ class CudaEvent { void Record(const paddle::platform::stream::CUDAStream& stream) { #ifdef PADDLE_WITH_HIP - PADDLE_ENFORCE_CUDA_SUCCESS(hipEventRecord(event_, stream.raw_stream())); + PADDLE_ENFORCE_GPU_SUCCESS(hipEventRecord(event_, stream.raw_stream())); #else - PADDLE_ENFORCE_CUDA_SUCCESS(cudaEventRecord(event_, stream.raw_stream())); + PADDLE_ENFORCE_GPU_SUCCESS(cudaEventRecord(event_, stream.raw_stream())); #endif } @@ -172,15 +172,15 @@ class CudaEvent { return false; } #endif - PADDLE_ENFORCE_CUDA_SUCCESS(err); + PADDLE_ENFORCE_GPU_SUCCESS(err); return false; } void Synchronize() { #ifdef PADDLE_WITH_HIP - PADDLE_ENFORCE_CUDA_SUCCESS(hipEventSynchronize(event_)); + PADDLE_ENFORCE_GPU_SUCCESS(hipEventSynchronize(event_)); #else - PADDLE_ENFORCE_CUDA_SUCCESS(cudaEventSynchronize(event_)); + PADDLE_ENFORCE_GPU_SUCCESS(cudaEventSynchronize(event_)); #endif } gpuEvent_t GetRawCudaEvent() { return event_; } diff --git a/paddle/fluid/platform/for_range.h b/paddle/fluid/platform/for_range.h index 6e5c7f4e9166093a3263ef4b1ef7e5374343a889..5518dabbf92a4790630461e207d30dafbaaaf2ac 100644 --- a/paddle/fluid/platform/for_range.h +++ b/paddle/fluid/platform/for_range.h @@ -13,8 +13,8 @@ See the License for the specific language governing permissions and limitations under the License. */ #pragma once +#include "paddle/fluid/platform/device/gpu/gpu_launch_config.h" #include "paddle/fluid/platform/device_context.h" -#include "paddle/fluid/platform/gpu_launch_config.h" namespace paddle { namespace platform { diff --git a/paddle/fluid/platform/gpu_info.cc b/paddle/fluid/platform/gpu_info.cc deleted file mode 100644 index 9dc6254234a979bba32a5ce7cba075656ba284c7..0000000000000000000000000000000000000000 --- a/paddle/fluid/platform/gpu_info.cc +++ /dev/null @@ -1,734 +0,0 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. */ - -#include "paddle/fluid/platform/gpu_info.h" -#include -#include -#include - -#include "gflags/gflags.h" -#include "paddle/fluid/platform/cuda_device_guard.h" -#ifdef PADDLE_WITH_HIP -#include "paddle/fluid/platform/dynload/miopen.h" -#else -#include "paddle/fluid/platform/cuda_graph.h" -#include "paddle/fluid/platform/dynload/cudnn.h" -#endif -#include "paddle/fluid/memory/malloc.h" -#ifdef PADDLE_WITH_CUDA -#if CUDA_VERSION >= 10020 -#include "paddle/fluid/platform/dynload/cuda_driver.h" -#endif -#endif -#include "paddle/fluid/platform/enforce.h" -#include "paddle/fluid/platform/lock_guard_ptr.h" -#include "paddle/fluid/platform/macros.h" -#include "paddle/fluid/platform/monitor.h" -#include "paddle/fluid/platform/place.h" -#include "paddle/fluid/string/split.h" - -DECLARE_double(fraction_of_gpu_memory_to_use); -DECLARE_uint64(initial_gpu_memory_in_mb); -DECLARE_uint64(reallocate_gpu_memory_in_mb); -DECLARE_bool(enable_cublas_tensor_op_math); -DECLARE_string(selected_gpus); -DECLARE_uint64(gpu_memory_limit_mb); - -constexpr static float fraction_reserve_gpu_memory = 0.05f; - -static std::once_flag g_device_props_size_init_flag; -static std::vector> g_device_props_init_flags; -static std::vector g_device_props; - -USE_GPU_MEM_STAT; -namespace paddle { -namespace platform { - -int CudnnVersion() { - if (!dynload::HasCUDNN()) return -1; - -#ifdef PADDLE_WITH_HIP - size_t version_major, version_minor, version_patch; - PADDLE_ENFORCE_CUDA_SUCCESS(dynload::miopenGetVersion( - &version_major, &version_minor, &version_patch)); - return version_major * 100 + version_minor * 10 + version_patch; -#else - return dynload::cudnnGetVersion(); -#endif -} -static int GetCUDADeviceCountImpl() { - int driverVersion = 0; -#ifdef PADDLE_WITH_HIP - hipError_t status = hipDriverGetVersion(&driverVersion); -#else - cudaError_t status = cudaDriverGetVersion(&driverVersion); -#endif - - if (!(status == gpuSuccess && driverVersion != 0)) { - // No GPU driver - VLOG(2) << "GPU Driver Version can't be detected. No GPU driver!"; - return 0; - } - -#ifdef PADDLE_WITH_HIP - const auto *cuda_visible_devices = std::getenv("HIP_VISIBLE_DEVICES"); -#else - const auto *cuda_visible_devices = std::getenv("CUDA_VISIBLE_DEVICES"); -#endif - if (cuda_visible_devices != nullptr) { - std::string cuda_visible_devices_str(cuda_visible_devices); - if (!cuda_visible_devices_str.empty()) { - cuda_visible_devices_str.erase( - 0, cuda_visible_devices_str.find_first_not_of('\'')); - cuda_visible_devices_str.erase( - cuda_visible_devices_str.find_last_not_of('\'') + 1); - cuda_visible_devices_str.erase( - 0, cuda_visible_devices_str.find_first_not_of('\"')); - cuda_visible_devices_str.erase( - cuda_visible_devices_str.find_last_not_of('\"') + 1); - } - if (std::all_of(cuda_visible_devices_str.begin(), - cuda_visible_devices_str.end(), - [](char ch) { return ch == ' '; })) { - VLOG(2) << "CUDA_VISIBLE_DEVICES or HIP_VISIBLE_DEVICES is set to be " - "empty. No GPU detected."; - return 0; - } - } - int count; -#ifdef PADDLE_WITH_HIP - PADDLE_ENFORCE_CUDA_SUCCESS(hipGetDeviceCount(&count)); -#else - PADDLE_ENFORCE_CUDA_SUCCESS(cudaGetDeviceCount(&count)); -#endif - return count; -} - -int GetCUDADeviceCount() { - // cache the count - static auto dev_cnt = GetCUDADeviceCountImpl(); - return dev_cnt; -} - -/* Here is a very simple CUDA “pro tip”: cudaDeviceGetAttribute() is a much -faster way to query device properties. You can see details in -https://devblogs.nvidia.com/cuda-pro-tip-the-fast-way-to-query-device-properties/ -*/ -int GetCUDAComputeCapability(int id) { - PADDLE_ENFORCE_LT(id, GetCUDADeviceCount(), - platform::errors::InvalidArgument( - "Device id must be less than GPU count, " - "but received id is: %d. GPU count is: %d.", - id, GetCUDADeviceCount())); - int major, minor; - -#ifdef PADDLE_WITH_HIP - auto major_error_code = hipDeviceGetAttribute( - &major, hipDeviceAttributeComputeCapabilityMajor, id); - auto minor_error_code = hipDeviceGetAttribute( - &minor, hipDeviceAttributeComputeCapabilityMinor, id); -#else - auto major_error_code = - cudaDeviceGetAttribute(&major, cudaDevAttrComputeCapabilityMajor, id); - auto minor_error_code = - cudaDeviceGetAttribute(&minor, cudaDevAttrComputeCapabilityMinor, id); -#endif - PADDLE_ENFORCE_CUDA_SUCCESS(major_error_code); - PADDLE_ENFORCE_CUDA_SUCCESS(minor_error_code); -#ifdef PADDLE_WITH_HIP - return major * 100 + minor; -#else - return major * 10 + minor; -#endif -} - -dim3 GetGpuMaxGridDimSize(int id) { - PADDLE_ENFORCE_LT(id, GetCUDADeviceCount(), - platform::errors::InvalidArgument( - "Device id must be less than GPU count, " - "but received id is: %d. GPU count is: %d.", - id, GetCUDADeviceCount())); - dim3 ret; - int size; -#ifdef PADDLE_WITH_HIP - auto error_code_x = - hipDeviceGetAttribute(&size, hipDeviceAttributeMaxGridDimX, id); -#else - auto error_code_x = cudaDeviceGetAttribute(&size, cudaDevAttrMaxGridDimX, id); -#endif - PADDLE_ENFORCE_CUDA_SUCCESS(error_code_x); - ret.x = size; - -#ifdef PADDLE_WITH_HIP - auto error_code_y = - hipDeviceGetAttribute(&size, hipDeviceAttributeMaxGridDimY, id); -#else - auto error_code_y = cudaDeviceGetAttribute(&size, cudaDevAttrMaxGridDimY, id); -#endif - PADDLE_ENFORCE_CUDA_SUCCESS(error_code_y); - ret.y = size; - -#ifdef PADDLE_WITH_HIP - auto error_code_z = - hipDeviceGetAttribute(&size, hipDeviceAttributeMaxGridDimZ, id); -#else - auto error_code_z = cudaDeviceGetAttribute(&size, cudaDevAttrMaxGridDimZ, id); -#endif - PADDLE_ENFORCE_CUDA_SUCCESS(error_code_z); - ret.z = size; - return ret; -} - -int GetCUDARuntimeVersion(int id) { - PADDLE_ENFORCE_LT(id, GetCUDADeviceCount(), - platform::errors::InvalidArgument( - "Device id must be less than GPU count, " - "but received id is: %d. GPU count is: %d.", - id, GetCUDADeviceCount())); - int runtime_version = 0; -#ifdef PADDLE_WITH_HIP - PADDLE_ENFORCE_CUDA_SUCCESS(hipRuntimeGetVersion(&runtime_version)); -#else - PADDLE_ENFORCE_CUDA_SUCCESS(cudaRuntimeGetVersion(&runtime_version)); -#endif - return runtime_version; -} - -int GetCUDADriverVersion(int id) { - PADDLE_ENFORCE_LT(id, GetCUDADeviceCount(), - platform::errors::InvalidArgument( - "Device id must be less than GPU count, " - "but received id is: %d. GPU count is: %d.", - id, GetCUDADeviceCount())); - int driver_version = 0; -#ifdef PADDLE_WITH_HIP - PADDLE_ENFORCE_CUDA_SUCCESS(hipDriverGetVersion(&driver_version)); -#else - PADDLE_ENFORCE_CUDA_SUCCESS(cudaDriverGetVersion(&driver_version)); -#endif - return driver_version; -} - -bool TensorCoreAvailable() { -#if !defined(PADDLE_WITH_HIP) && CUDA_VERSION >= 9000 - int device = GetCurrentDeviceId(); - int driver_version = GetCUDAComputeCapability(device); - return driver_version >= 70; -#else - return false; -#endif -} - -int GetCUDAMultiProcessors(int id) { - PADDLE_ENFORCE_LT(id, GetCUDADeviceCount(), - platform::errors::InvalidArgument( - "Device id must be less than GPU count, " - "but received id is: %d. GPU count is: %d.", - id, GetCUDADeviceCount())); - int count; -#ifdef PADDLE_WITH_HIP - PADDLE_ENFORCE_CUDA_SUCCESS( - hipDeviceGetAttribute(&count, hipDeviceAttributeMultiprocessorCount, id)); -#else - PADDLE_ENFORCE_CUDA_SUCCESS( - cudaDeviceGetAttribute(&count, cudaDevAttrMultiProcessorCount, id)); -#endif - return count; -} - -int GetCUDAMaxThreadsPerMultiProcessor(int id) { - PADDLE_ENFORCE_LT(id, GetCUDADeviceCount(), - platform::errors::InvalidArgument( - "Device id must be less than GPU count, " - "but received id is: %d. GPU count is: %d.", - id, GetCUDADeviceCount())); - int count; -#ifdef PADDLE_WITH_HIP - PADDLE_ENFORCE_CUDA_SUCCESS(hipDeviceGetAttribute( - &count, hipDeviceAttributeMaxThreadsPerMultiProcessor, id)); -#else - PADDLE_ENFORCE_CUDA_SUCCESS(cudaDeviceGetAttribute( - &count, cudaDevAttrMaxThreadsPerMultiProcessor, id)); -#endif - return count; -} - -int GetCUDAMaxThreadsPerBlock(int id) { - PADDLE_ENFORCE_LT(id, GetCUDADeviceCount(), - platform::errors::InvalidArgument( - "Device id must be less than GPU count, " - "but received id is: %d. GPU count is: %d.", - id, GetCUDADeviceCount())); - int count; -#ifdef PADDLE_WITH_HIP - PADDLE_ENFORCE_CUDA_SUCCESS( - hipDeviceGetAttribute(&count, hipDeviceAttributeMaxThreadsPerBlock, id)); -#else - PADDLE_ENFORCE_CUDA_SUCCESS( - cudaDeviceGetAttribute(&count, cudaDevAttrMaxThreadsPerBlock, id)); -#endif - return count; -} - -int GetCurrentDeviceId() { - int device_id; -#ifdef PADDLE_WITH_HIP - PADDLE_ENFORCE_CUDA_SUCCESS(hipGetDevice(&device_id)); -#else - PADDLE_ENFORCE_CUDA_SUCCESS(cudaGetDevice(&device_id)); -#endif - return device_id; -} - -//! Get a list of device ids from environment variable or use all. -std::vector GetSelectedDevices() { - // use user specified GPUs in single-node multi-process mode. - std::vector devices; - if (!FLAGS_selected_gpus.empty()) { - auto devices_str = paddle::string::Split(FLAGS_selected_gpus, ','); - for (auto id : devices_str) { - devices.push_back(atoi(id.c_str())); - } - } else { - int count = GetCUDADeviceCount(); - for (int i = 0; i < count; ++i) { - devices.push_back(i); - } - } - return devices; -} - -const gpuDeviceProp &GetDeviceProperties(int id) { - std::call_once(g_device_props_size_init_flag, [&] { - int gpu_num = 0; - gpu_num = platform::GetCUDADeviceCount(); - g_device_props_init_flags.resize(gpu_num); - g_device_props.resize(gpu_num); - for (int i = 0; i < gpu_num; ++i) { - g_device_props_init_flags[i] = std::make_unique(); - } - }); - - if (id == -1) { - id = platform::GetCurrentDeviceId(); - } - - if (id < 0 || id >= static_cast(g_device_props.size())) { - PADDLE_THROW(platform::errors::OutOfRange( - "The device id %d is out of range [0, %d), where %d is the number of " - "devices on this machine. Because the device id should be greater than " - "or equal to zero and smaller than the number of gpus. Please input " - "appropriate device again!", - id, static_cast(g_device_props.size()), - static_cast(g_device_props.size()))); - } - - std::call_once(*(g_device_props_init_flags[id]), [&] { -#ifdef PADDLE_WITH_CUDA - PADDLE_ENFORCE_CUDA_SUCCESS( - cudaGetDeviceProperties(&g_device_props[id], id)); -#else - PADDLE_ENFORCE_CUDA_SUCCESS( - hipGetDeviceProperties(&g_device_props[id], id)); -#endif - }); - - return g_device_props[id]; -} - -void SetDeviceId(int id) { - // TODO(qijun): find a better way to cache the cuda device count - PADDLE_ENFORCE_LT(id, GetCUDADeviceCount(), - platform::errors::InvalidArgument( - "Device id must be less than GPU count, " - "but received id is: %d. GPU count is: %d.", - id, GetCUDADeviceCount())); -#ifdef PADDLE_WITH_HIP - PADDLE_RETRY_CUDA_SUCCESS(hipSetDevice(id)); -#else - PADDLE_RETRY_CUDA_SUCCESS(cudaSetDevice(id)); -#endif -} - -void GpuMemoryUsage(size_t *available, size_t *total) { - size_t actual_available, actual_total; - RecordedCudaMemGetInfo(available, total, &actual_available, &actual_total, - platform::GetCurrentDeviceId()); -} - -size_t GpuAvailableMemToAlloc() { - size_t total = 0; - size_t available = 0; - GpuMemoryUsage(&available, &total); - size_t reserving = - static_cast(fraction_reserve_gpu_memory * available); - // If available size is less than minimum chunk size, no usable memory exists - size_t available_to_alloc = available - reserving; - size_t min_chunk_size = GpuMinChunkSize(); - if (available_to_alloc < min_chunk_size) { - available_to_alloc = 0; - } - VLOG(10) << "GPU usage " << (available >> 20) << "M/" << (total >> 20) - << "M, " << (available_to_alloc >> 20) << "M available to allocate"; - return available_to_alloc; -} - -size_t GpuMaxAllocSize() { - return std::max(GpuInitAllocSize(), GpuReallocSize()); -} - -static size_t GpuAllocSize(bool realloc) { - size_t available_to_alloc = GpuAvailableMemToAlloc(); - PADDLE_ENFORCE_GT( - available_to_alloc, 0, - platform::errors::ResourceExhausted("Not enough available GPU memory.")); - // If FLAGS_initial_gpu_memory_in_mb is 0, then initial memory will be - // allocated by fraction - size_t flag_mb = realloc ? FLAGS_reallocate_gpu_memory_in_mb - : FLAGS_initial_gpu_memory_in_mb; - size_t alloc_bytes = - (flag_mb > 0ul ? flag_mb << 20 : available_to_alloc * - FLAGS_fraction_of_gpu_memory_to_use); - PADDLE_ENFORCE_GE( - available_to_alloc, alloc_bytes, - platform::errors::ResourceExhausted("Not enough available GPU memory.")); - VLOG(10) << "Alloc size is " << (alloc_bytes >> 20) - << " MiB, is it Re-alloc: " << realloc; - return alloc_bytes; -} - -size_t GpuInitAllocSize() { return GpuAllocSize(/* realloc = */ false); } - -size_t GpuReallocSize() { return GpuAllocSize(/* realloc = */ true); } - -size_t GpuMinChunkSize() { - // Allow to allocate the minimum chunk size is 256 bytes. - return 1 << 8; -} - -size_t GpuMaxChunkSize() { - size_t max_chunk_size = GpuMaxAllocSize(); - VLOG(10) << "Max chunk size " << (max_chunk_size >> 20) << "M"; - return max_chunk_size; -} - -#ifdef PADDLE_WITH_HIP -void GpuMemcpyAsync(void *dst, const void *src, size_t count, - enum hipMemcpyKind kind, hipStream_t stream) { - PADDLE_ENFORCE_CUDA_SUCCESS(hipMemcpyAsync(dst, src, count, kind, stream)); -} -#else -void GpuMemcpyAsync(void *dst, const void *src, size_t count, - enum cudaMemcpyKind kind, cudaStream_t stream) { - PADDLE_ENFORCE_CUDA_SUCCESS(cudaMemcpyAsync(dst, src, count, kind, stream)); -} -#endif - -#ifdef PADDLE_WITH_HIP -void GpuMemcpySync(void *dst, const void *src, size_t count, - enum hipMemcpyKind kind) { - PADDLE_ENFORCE_CUDA_SUCCESS(hipMemcpy(dst, src, count, kind)); -} -#else -void GpuMemcpySync(void *dst, const void *src, size_t count, - enum cudaMemcpyKind kind) { - PADDLE_ENFORCE_CUDA_SUCCESS(cudaMemcpy(dst, src, count, kind)); -} -#endif - -void GpuMemcpyPeerAsync(void *dst, int dst_device, const void *src, - int src_device, size_t count, gpuStream_t stream) { -#ifdef PADDLE_WITH_HIP - PADDLE_ENFORCE_CUDA_SUCCESS( - hipMemcpyPeerAsync(dst, dst_device, src, src_device, count, stream)); -#else - PADDLE_ENFORCE_CUDA_SUCCESS( - cudaMemcpyPeerAsync(dst, dst_device, src, src_device, count, stream)); -#endif -} - -void GpuMemcpyPeerSync(void *dst, int dst_device, const void *src, - int src_device, size_t count) { -#ifdef PADDLE_WITH_HIP - PADDLE_ENFORCE_CUDA_SUCCESS( - hipMemcpyPeer(dst, dst_device, src, src_device, count)); -#else - PADDLE_ENFORCE_CUDA_SUCCESS( - cudaMemcpyPeer(dst, dst_device, src, src_device, count)); -#endif -} - -void GpuMemsetAsync(void *dst, int value, size_t count, gpuStream_t stream) { -#ifdef PADDLE_WITH_HIP - PADDLE_ENFORCE_CUDA_SUCCESS(hipMemsetAsync(dst, value, count, stream)); -#else - PADDLE_ENFORCE_CUDA_SUCCESS(cudaMemsetAsync(dst, value, count, stream)); -#endif -} - -void GpuStreamSync(gpuStream_t stream) { -#ifdef PADDLE_WITH_HIP - PADDLE_ENFORCE_CUDA_SUCCESS(hipStreamSynchronize(stream)); -#else - PADDLE_ENFORCE_CUDA_SUCCESS(cudaStreamSynchronize(stream)); -#endif -} - -static void RaiseNonOutOfMemoryError(gpuError_t *status) { -#ifdef PADDLE_WITH_HIP - if (*status == hipErrorOutOfMemory) { - *status = hipSuccess; - } -#else - if (*status == cudaErrorMemoryAllocation) { - *status = cudaSuccess; - } -#endif - PADDLE_ENFORCE_CUDA_SUCCESS(*status); - -#ifdef PADDLE_WITH_HIP - *status = hipGetLastError(); - if (*status == hipErrorOutOfMemory) { - *status = hipSuccess; - } -#else - *status = cudaGetLastError(); - if (*status == cudaErrorMemoryAllocation) { - *status = cudaSuccess; - } -#endif - PADDLE_ENFORCE_CUDA_SUCCESS(*status); -} - -class RecordedCudaMallocHelper { - private: - explicit RecordedCudaMallocHelper(int dev_id, uint64_t limit_size = 0) - : dev_id_(dev_id), limit_size_(limit_size) { - if (NeedRecord()) { - mtx_.reset(new std::mutex()); - } - } - - DISABLE_COPY_AND_ASSIGN(RecordedCudaMallocHelper); - - public: - static RecordedCudaMallocHelper *Instance(int dev_id) { - std::call_once(once_flag_, [] { - int dev_cnt = GetCUDADeviceCount(); - instances_.reserve(dev_cnt); - for (int i = 0; i < dev_cnt; ++i) { - instances_.emplace_back( - new RecordedCudaMallocHelper(i, FLAGS_gpu_memory_limit_mb << 20)); - } - }); - - PADDLE_ENFORCE_GE( - dev_id, 0, - platform::errors::OutOfRange( - "Device id must be not less than 0, but got %d.", dev_id)); - PADDLE_ENFORCE_LT( - dev_id, instances_.size(), - platform::errors::OutOfRange("Device id %d exceeds gpu card number %d.", - dev_id, instances_.size())); - return instances_[dev_id].get(); - } - - /** - * Try to allocate `size` gpu memory. Only cudaErrorMemoryAllocation - * or cudaSuccess would be returned, and the cudaGetLastError() flag - * would be clear. - */ - gpuError_t Malloc(void **ptr, size_t size) { - LockGuardPtr lock(mtx_); - if (UNLIKELY(NeedRecord() && cur_size_.load() + size > limit_size_)) { -#ifdef PADDLE_WITH_HIP - return hipErrorOutOfMemory; -#else - return cudaErrorMemoryAllocation; -#endif - } - - CUDADeviceGuard guard(dev_id_); -#ifdef PADDLE_WITH_HIP - auto result = hipMalloc(ptr, size); -#else - CUDAGraphCaptureModeGuard capture_mode_guard; - auto result = cudaMalloc(ptr, size); -#endif - if (result == gpuSuccess) { - cur_size_.fetch_add(size); - STAT_INT_ADD("STAT_gpu" + std::to_string(dev_id_) + "_mem_size", size); - return gpuSuccess; - } else { - RaiseNonOutOfMemoryError(&result); -// Non out of memory error would be raised inside -// RaiseNonOutOfMemoryError. Therefore, we can -// return cudaErrorMemoryAllocation directly here. -#ifdef PADDLE_WITH_HIP - return hipErrorOutOfMemory; -#else - return cudaErrorMemoryAllocation; -#endif - } - } - - /** - * Free gpu memory. Usually, free is not allowed to raise error. - * If it does raise error, the process should be crashed. - */ - void Free(void *ptr, size_t size) { - // Purposefully allow cudaErrorCudartUnloading, because - // that is returned if you ever call cudaFree after the - // driver has already shutdown. This happens only if the - // process is terminating, in which case we don't care if - // cudaFree succeeds. - CUDADeviceGuard guard(dev_id_); -#ifdef PADDLE_WITH_HIP - auto err = hipFree(ptr); - if (err != hipErrorDeinitialized) { -#else - auto err = cudaFree(ptr); - if (err != cudaErrorCudartUnloading) { -#endif - PADDLE_ENFORCE_CUDA_SUCCESS(err); - cur_size_.fetch_sub(size); - STAT_INT_SUB("STAT_gpu" + std::to_string(dev_id_) + "_mem_size", size); - } else { -#ifdef PADDLE_WITH_HIP - hipGetLastError(); // clear the error flag when hipErrorDeinitialized -#else - cudaGetLastError(); // clear the error flag when cudaErrorCudartUnloading -#endif - } - } - - bool GetMemInfo(size_t *avail, size_t *total, size_t *actual_avail, - size_t *actual_total) { - { - CUDADeviceGuard guard(dev_id_); -#ifdef PADDLE_WITH_HIP - auto result = hipMemGetInfo(actual_avail, actual_total); -#else - auto result = cudaMemGetInfo(actual_avail, actual_total); -#endif - if (result != gpuSuccess) { - *actual_avail = 0; - } - RaiseNonOutOfMemoryError(&result); - } - - if (NeedRecord()) { - std::lock_guard guard(*mtx_); - *avail = std::min(*actual_avail, limit_size_ - cur_size_.load()); - *total = std::min(*actual_total, limit_size_); - return *total < *actual_total; - } else { - *avail = *actual_avail; - *total = *actual_total; - return false; - } - } - - inline bool NeedRecord() const { return limit_size_ != 0; } - - uint64_t RecordedSize() const { return cur_size_.load(); } - - uint64_t LimitSize() const { return limit_size_; } - -#ifdef PADDLE_WITH_CUDA -#if CUDA_VERSION >= 10020 - CUresult MemCreate(CUmemGenericAllocationHandle *handle, size_t size, - const CUmemAllocationProp *prop, - unsigned long long flags) { // NOLINT - auto result = - paddle::platform::dynload::cuMemCreate(handle, size, prop, flags); - if (result == CUDA_SUCCESS) { - cur_size_.fetch_add(size); - } - return result; - } - - CUresult MemRelease(CUmemGenericAllocationHandle handle, size_t size) { - auto result = paddle::platform::dynload::cuMemRelease(handle); - if (result == CUDA_SUCCESS) { - cur_size_.fetch_sub(size); - } - return result; - } - -#endif -#endif - - private: - const int dev_id_; - const uint64_t limit_size_; - std::atomic cur_size_{0}; - - mutable std::unique_ptr mtx_; - - static std::once_flag once_flag_; - static std::vector> instances_; -}; // NOLINT - -std::once_flag RecordedCudaMallocHelper::once_flag_; -std::vector> - RecordedCudaMallocHelper::instances_; - -gpuError_t RecordedCudaMalloc(void **ptr, size_t size, int dev_id) { - return RecordedCudaMallocHelper::Instance(dev_id)->Malloc(ptr, size); -} - -void RecordedCudaFree(void *p, size_t size, int dev_id) { - return RecordedCudaMallocHelper::Instance(dev_id)->Free(p, size); -} - -#ifdef PADDLE_WITH_CUDA -#if CUDA_VERSION >= 10020 -CUresult RecordedCuMemCreate(CUmemGenericAllocationHandle *handle, size_t size, - const CUmemAllocationProp *prop, - unsigned long long flags, int dev_id) { // NOLINT - return RecordedCudaMallocHelper::Instance(dev_id)->MemCreate(handle, size, - prop, flags); -} - -CUresult RecordedCuMemRelease(CUmemGenericAllocationHandle handle, size_t size, - int dev_id) { - return RecordedCudaMallocHelper::Instance(dev_id)->MemRelease(handle, size); -} -#endif -#endif - -bool RecordedCudaMemGetInfo(size_t *avail, size_t *total, size_t *actual_avail, - size_t *actual_total, int dev_id) { - return RecordedCudaMallocHelper::Instance(dev_id)->GetMemInfo( - avail, total, actual_avail, actual_total); -} - -uint64_t RecordedCudaMallocSize(int dev_id) { - return RecordedCudaMallocHelper::Instance(dev_id)->RecordedSize(); -} - -bool IsCudaMallocRecorded(int dev_id) { - return RecordedCudaMallocHelper::Instance(dev_id)->NeedRecord(); -} - -void EmptyCache(void) { - std::vector devices = GetSelectedDevices(); - for (auto device : devices) { - memory::Release(CUDAPlace(device)); - } -} - -} // namespace platform -} // namespace paddle diff --git a/paddle/fluid/platform/init_test.cc b/paddle/fluid/platform/init_test.cc index 965fe7b6db45cc5334633a28dd9dc08a7ea1f87c..dbca7d154954617d9c603ecff5a10858777f8879 100644 --- a/paddle/fluid/platform/init_test.cc +++ b/paddle/fluid/platform/init_test.cc @@ -32,7 +32,7 @@ TEST(InitDevices, CUDA) { using paddle::platform::DeviceContextPool; #if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP) - int count = paddle::platform::GetCUDADeviceCount(); + int count = paddle::platform::GetGPUDeviceCount(); InitDevices(); DeviceContextPool& pool = DeviceContextPool::Instance(); ASSERT_EQ(pool.size(), 2U + static_cast(count)); diff --git a/paddle/fluid/platform/profiler.cu b/paddle/fluid/platform/profiler.cu index 02930627d41e34625ba1bd1d8e39130669e9390d..5d1caffd45326d7acb22105e61f10a24fa39181e 100644 --- a/paddle/fluid/platform/profiler.cu +++ b/paddle/fluid/platform/profiler.cu @@ -29,7 +29,7 @@ __global__ void DummyKernel(int *a) { a[0] = 0; } static void ForEachDevice(std::function func) { auto original_device = platform::GetCurrentDeviceId(); - int count = platform::GetCUDADeviceCount(); + int count = platform::GetGPUDeviceCount(); for (int i = 0; i < count; i++) { platform::SetDeviceId(i); func(i); @@ -43,13 +43,13 @@ void DummyKernelAndEvent() { ForEachDevice([](int d) { platform::SetDeviceId(d); hipStream_t stream; - PADDLE_ENFORCE_CUDA_SUCCESS(hipStreamCreate(&stream)); + PADDLE_ENFORCE_GPU_SUCCESS(hipStreamCreate(&stream)); Mark("_cuda_startup_"); int *ptr; - PADDLE_ENFORCE_CUDA_SUCCESS(hipMalloc(&ptr, sizeof(int))); + PADDLE_ENFORCE_GPU_SUCCESS(hipMalloc(&ptr, sizeof(int))); hipLaunchKernelGGL(DummyKernel, dim3(1), dim3(1), 0, stream, ptr); - PADDLE_ENFORCE_CUDA_SUCCESS(hipStreamSynchronize(stream)); - PADDLE_ENFORCE_CUDA_SUCCESS(hipFree(ptr)); + PADDLE_ENFORCE_GPU_SUCCESS(hipStreamSynchronize(stream)); + PADDLE_ENFORCE_GPU_SUCCESS(hipFree(ptr)); }); } #else @@ -57,13 +57,13 @@ void DummyKernelAndEvent() { ForEachDevice([](int d) { platform::SetDeviceId(d); cudaStream_t stream; - PADDLE_ENFORCE_CUDA_SUCCESS(cudaStreamCreate(&stream)); + PADDLE_ENFORCE_GPU_SUCCESS(cudaStreamCreate(&stream)); Mark("_cuda_startup_"); int *ptr; - PADDLE_ENFORCE_CUDA_SUCCESS(cudaMalloc(&ptr, sizeof(int))); + PADDLE_ENFORCE_GPU_SUCCESS(cudaMalloc(&ptr, sizeof(int))); DummyKernel<<<1, 1, 0, stream>>>(ptr); - PADDLE_ENFORCE_CUDA_SUCCESS(cudaStreamSynchronize(stream)); - PADDLE_ENFORCE_CUDA_SUCCESS(cudaFree(ptr)); + PADDLE_ENFORCE_GPU_SUCCESS(cudaStreamSynchronize(stream)); + PADDLE_ENFORCE_GPU_SUCCESS(cudaFree(ptr)); }); } #endif diff --git a/paddle/fluid/platform/profiler.h b/paddle/fluid/platform/profiler.h index de814faec2523e9896e041ff3844d494c92073f1..317991160b79882079b0f9b4436f11de8acf9564 100644 --- a/paddle/fluid/platform/profiler.h +++ b/paddle/fluid/platform/profiler.h @@ -31,7 +31,7 @@ limitations under the License. */ #include "paddle/fluid/platform/profiler.pb.h" #if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP) -#include "paddle/fluid/platform/gpu_info.h" +#include "paddle/fluid/platform/device/gpu/gpu_info.h" #endif namespace paddle { diff --git a/paddle/fluid/platform/profiler_helper.h b/paddle/fluid/platform/profiler_helper.h index 3408971efa411560cef0c0ddfd6d36e1310f820c..4277f7d4dc63e29383db3145772ee3aa98094447 100644 --- a/paddle/fluid/platform/profiler_helper.h +++ b/paddle/fluid/platform/profiler_helper.h @@ -121,17 +121,17 @@ std::vector> GetMemEvents() { void SynchronizeAllDevice() { #ifdef PADDLE_WITH_CUDA - int count = GetCUDADeviceCount(); + int count = GetGPUDeviceCount(); for (int i = 0; i < count; i++) { SetDeviceId(i); - PADDLE_ENFORCE_CUDA_SUCCESS(cudaDeviceSynchronize()); + PADDLE_ENFORCE_GPU_SUCCESS(cudaDeviceSynchronize()); } #endif #ifdef PADDLE_WITH_HIP - int count = GetCUDADeviceCount(); + int count = GetGPUDeviceCount(); for (int i = 0; i < count; i++) { SetDeviceId(i); - PADDLE_ENFORCE_CUDA_SUCCESS(hipDeviceSynchronize()); + PADDLE_ENFORCE_GPU_SUCCESS(hipDeviceSynchronize()); } #endif } diff --git a/paddle/fluid/platform/stream/cuda_stream.cc b/paddle/fluid/platform/stream/cuda_stream.cc index 212d99f6a78ed51b8ea6767ebf706259b7784938..dafb61fe0aaf4379099522e8f270166b56349eae 100644 --- a/paddle/fluid/platform/stream/cuda_stream.cc +++ b/paddle/fluid/platform/stream/cuda_stream.cc @@ -30,18 +30,18 @@ bool CUDAStream::Init(const Place& place, const Priority& priority, CUDADeviceGuard guard(BOOST_GET_CONST(CUDAPlace, place_).device); if (priority == Priority::kHigh) { #ifdef PADDLE_WITH_HIP - PADDLE_ENFORCE_CUDA_SUCCESS(hipStreamCreateWithPriority( + PADDLE_ENFORCE_GPU_SUCCESS(hipStreamCreateWithPriority( &stream_, static_cast(flag), -1)); #else - PADDLE_ENFORCE_CUDA_SUCCESS(cudaStreamCreateWithPriority( + PADDLE_ENFORCE_GPU_SUCCESS(cudaStreamCreateWithPriority( &stream_, static_cast(flag), -1)); #endif } else if (priority == Priority::kNormal) { #ifdef PADDLE_WITH_HIP - PADDLE_ENFORCE_CUDA_SUCCESS(hipStreamCreateWithPriority( + PADDLE_ENFORCE_GPU_SUCCESS(hipStreamCreateWithPriority( &stream_, static_cast(flag), 0)); #else - PADDLE_ENFORCE_CUDA_SUCCESS(cudaStreamCreateWithPriority( + PADDLE_ENFORCE_GPU_SUCCESS(cudaStreamCreateWithPriority( &stream_, static_cast(flag), 0)); #endif } @@ -58,9 +58,9 @@ void CUDAStream::Destroy() { WaitCallback(); if (stream_) { #ifdef PADDLE_WITH_HIP - PADDLE_ENFORCE_CUDA_SUCCESS(hipStreamDestroy(stream_)); + PADDLE_ENFORCE_GPU_SUCCESS(hipStreamDestroy(stream_)); #else - PADDLE_ENFORCE_CUDA_SUCCESS(cudaStreamDestroy(stream_)); + PADDLE_ENFORCE_GPU_SUCCESS(cudaStreamDestroy(stream_)); #endif } stream_ = nullptr; @@ -89,7 +89,7 @@ void CUDAStream::Wait() const { #endif #endif // PADDLE_WITH_HIP - PADDLE_ENFORCE_CUDA_SUCCESS(e_sync); + PADDLE_ENFORCE_GPU_SUCCESS(e_sync); } CUDAStream* get_current_stream(int deviceId) { diff --git a/paddle/fluid/platform/stream/cuda_stream.h b/paddle/fluid/platform/stream/cuda_stream.h index 472d6bbab0c6cb02fb8d22ba144efca925786197..36f31c46673b2f0a6dcec5c01dcc12099798a6d5 100644 --- a/paddle/fluid/platform/stream/cuda_stream.h +++ b/paddle/fluid/platform/stream/cuda_stream.h @@ -17,7 +17,7 @@ limitations under the License. */ #include #include -#include "paddle/fluid/platform/gpu_info.h" +#include "paddle/fluid/platform/device/gpu/gpu_info.h" #include "paddle/fluid/platform/macros.h" #include "paddle/fluid/platform/place.h" #include "paddle/fluid/platform/stream_callback_manager.h" @@ -64,32 +64,32 @@ class CUDAStream final { #ifdef PADDLE_WITH_HIP void RecordEvent(hipEvent_t ev, Callback callback) const { callback(); - PADDLE_ENFORCE_CUDA_SUCCESS(hipEventRecord(ev, stream_)); + PADDLE_ENFORCE_GPU_SUCCESS(hipEventRecord(ev, stream_)); } #else void RecordEvent(cudaEvent_t ev, Callback callback) const { callback(); - PADDLE_ENFORCE_CUDA_SUCCESS(cudaEventRecord(ev, stream_)); + PADDLE_ENFORCE_GPU_SUCCESS(cudaEventRecord(ev, stream_)); } #endif #ifdef PADDLE_WITH_HIP void RecordEvent(hipEvent_t ev) const { - PADDLE_ENFORCE_CUDA_SUCCESS(hipEventRecord(ev, stream_)); + PADDLE_ENFORCE_GPU_SUCCESS(hipEventRecord(ev, stream_)); } #else void RecordEvent(cudaEvent_t ev) const { - PADDLE_ENFORCE_CUDA_SUCCESS(cudaEventRecord(ev, stream_)); + PADDLE_ENFORCE_GPU_SUCCESS(cudaEventRecord(ev, stream_)); } #endif #ifdef PADDLE_WITH_HIP void WaitEvent(hipEvent_t ev) const { - PADDLE_ENFORCE_CUDA_SUCCESS(hipStreamWaitEvent(stream_, ev, 0)); + PADDLE_ENFORCE_GPU_SUCCESS(hipStreamWaitEvent(stream_, ev, 0)); } #else void WaitEvent(cudaEvent_t ev) const { - PADDLE_ENFORCE_CUDA_SUCCESS(cudaStreamWaitEvent(stream_, ev, 0)); + PADDLE_ENFORCE_GPU_SUCCESS(cudaStreamWaitEvent(stream_, ev, 0)); } #endif @@ -122,17 +122,11 @@ class CUDAStream final { } #endif - PADDLE_ENFORCE_CUDA_SUCCESS(err); + PADDLE_ENFORCE_GPU_SUCCESS(err); return false; } - void Synchronize() const { -#ifdef PADDLE_WITH_HIP - PADDLE_ENFORCE_CUDA_SUCCESS(hipStreamSynchronize(stream_)); -#else - PADDLE_ENFORCE_CUDA_SUCCESS(cudaStreamSynchronize(stream_)); -#endif - } + void Synchronize() const { platform::GpuStreamSync(stream_); } const Place& GetPlace() const { return place_; } diff --git a/paddle/fluid/platform/stream_callback_manager.cc b/paddle/fluid/platform/stream_callback_manager.cc index 3f0c5ace900d1162737af2819bfe0fa057a626a5..28aa022fe2f13280c80cbcadf72851e114301295 100644 --- a/paddle/fluid/platform/stream_callback_manager.cc +++ b/paddle/fluid/platform/stream_callback_manager.cc @@ -13,9 +13,9 @@ // limitations under the License. #include "paddle/fluid/platform/stream_callback_manager.h" -#ifdef PADDLE_WITH_ASCEND_CL +#include "paddle/fluid/platform/device/gpu/gpu_info.h" #include "paddle/fluid/platform/device/npu/npu_info.h" -#endif +#include "paddle/fluid/platform/enforce.h" namespace paddle { namespace platform { @@ -59,15 +59,15 @@ void StreamCallbackManager::AddCallback( }); #ifdef PADDLE_WITH_HIP - PADDLE_ENFORCE_CUDA_SUCCESS( + PADDLE_ENFORCE_GPU_SUCCESS( hipStreamAddCallback(stream_, StreamCallbackFunc, func, 0)); #endif #ifdef PADDLE_WITH_CUDA #if CUDA_VERSION >= 10000 - PADDLE_ENFORCE_CUDA_SUCCESS( + PADDLE_ENFORCE_GPU_SUCCESS( cudaLaunchHostFunc(stream_, StreamCallbackFunc, func)); #else - PADDLE_ENFORCE_CUDA_SUCCESS( + PADDLE_ENFORCE_GPU_SUCCESS( cudaStreamAddCallback(stream_, StreamCallbackFunc, func, 0)); #endif #endif @@ -81,11 +81,8 @@ void StreamCallbackManager::AddCallback( template void StreamCallbackManager::Wait() const { -#ifdef PADDLE_WITH_HIP - PADDLE_ENFORCE_CUDA_SUCCESS(hipStreamSynchronize(stream_)); -#endif -#ifdef PADDLE_WITH_CUDA - PADDLE_ENFORCE_CUDA_SUCCESS(cudaStreamSynchronize(stream_)); +#if defined(PADDLE_WITH_HIP) || defined(PADDLE_WITH_CUDA) + platform::GpuStreamSync(stream_); #endif #ifdef PADDLE_WITH_ASCEND_CL NPUStreamSync(stream_); diff --git a/paddle/fluid/platform/test_limit_gpu_memory.cu b/paddle/fluid/platform/test_limit_gpu_memory.cu index 81b766182337fc6684c7fca9e6e68a0c18d895b7..684cb780735514de24de3df16bdd97e8f63650bf 100644 --- a/paddle/fluid/platform/test_limit_gpu_memory.cu +++ b/paddle/fluid/platform/test_limit_gpu_memory.cu @@ -15,7 +15,7 @@ #include "gflags/gflags.h" #include "gtest/gtest.h" #include "paddle/fluid/platform/cuda_device_guard.h" -#include "paddle/fluid/platform/gpu_info.h" +#include "paddle/fluid/platform/device/gpu/gpu_info.h" DECLARE_uint64(gpu_memory_limit_mb); @@ -30,32 +30,24 @@ TEST(test_record_malloc, test_limit_gpu_memory) { size_t limit = FLAGS_gpu_memory_limit_mb << 20; { - ASSERT_TRUE(IsCudaMallocRecorded(DEVICE_ID)); - ASSERT_EQ(RecordedCudaMallocSize(DEVICE_ID), 0UL); + ASSERT_TRUE(IsGpuMallocRecorded(DEVICE_ID)); + ASSERT_EQ(RecordedGpuMallocSize(DEVICE_ID), 0UL); } size_t avail, total; { size_t actual_avail, actual_total; - RecordedCudaMemGetInfo(&avail, &total, &actual_avail, &actual_total, - DEVICE_ID); + RecordedGpuMemGetInfo(&avail, &total, &actual_avail, &actual_total, + DEVICE_ID); ASSERT_EQ(total, limit); -#ifdef PADDLE_WITH_HIP - ASSERT_EQ(hipGetLastError(), gpuSuccess); -#else - ASSERT_EQ(cudaGetLastError(), gpuSuccess); -#endif + ASSERT_EQ(paddle::platform::GpuGetLastError(), gpuSuccess); } { CUDADeviceGuard guard(DEVICE_ID); GpuMemoryUsage(&avail, &total); ASSERT_EQ(total, limit); -#ifdef PADDLE_WITH_HIP - ASSERT_EQ(hipGetLastError(), gpuSuccess); -#else - ASSERT_EQ(cudaGetLastError(), gpuSuccess); -#endif + ASSERT_EQ(paddle::platform::GpuGetLastError(), gpuSuccess); } gpuError_t err = gpuSuccess; @@ -63,54 +55,41 @@ TEST(test_record_malloc, test_limit_gpu_memory) { void *p1 = nullptr; size_t size1 = limit / 4 * 3; { - err = platform::RecordedCudaMalloc(&p1, size1, DEVICE_ID); + err = platform::RecordedGpuMalloc(&p1, size1, DEVICE_ID); ASSERT_EQ(err, gpuSuccess); -#ifdef PADDLE_WITH_HIP - ASSERT_EQ(hipGetLastError(), gpuSuccess); -#else - ASSERT_EQ(cudaGetLastError(), gpuSuccess); -#endif + ASSERT_EQ(paddle::platform::GpuGetLastError(), gpuSuccess); ASSERT_NE(p1, nullptr); - ASSERT_EQ(RecordedCudaMallocSize(DEVICE_ID), size1); + ASSERT_EQ(RecordedGpuMallocSize(DEVICE_ID), size1); } void *p2 = nullptr; size_t size2 = limit / 2; { - err = platform::RecordedCudaMalloc(&p2, size2, DEVICE_ID); -#ifdef PADDLE_WITH_HIP - ASSERT_EQ(err, hipErrorOutOfMemory); - ASSERT_EQ(hipGetLastError(), gpuSuccess); -#else - ASSERT_EQ(err, cudaErrorMemoryAllocation); - ASSERT_EQ(cudaGetLastError(), gpuSuccess); -#endif + err = platform::RecordedGpuMalloc(&p2, size2, DEVICE_ID); + ASSERT_EQ(err, gpuErrorOutOfMemory); + ASSERT_EQ(paddle::platform::GpuGetLastError(), gpuSuccess); ASSERT_EQ(p2, nullptr); - ASSERT_EQ(RecordedCudaMallocSize(DEVICE_ID), size1); + ASSERT_EQ(RecordedGpuMallocSize(DEVICE_ID), size1); } { - platform::RecordedCudaFree(p1, size1, DEVICE_ID); - ASSERT_EQ(RecordedCudaMallocSize(DEVICE_ID), 0UL); + platform::RecordedGpuFree(p1, size1, DEVICE_ID); + ASSERT_EQ(RecordedGpuMallocSize(DEVICE_ID), 0UL); } { - err = platform::RecordedCudaMalloc(&p2, size2, DEVICE_ID); + err = platform::RecordedGpuMalloc(&p2, size2, DEVICE_ID); ASSERT_EQ(err, gpuSuccess); -#ifdef PADDLE_WITH_HIP - ASSERT_EQ(hipGetLastError(), hipSuccess); -#else - ASSERT_EQ(cudaGetLastError(), cudaSuccess); -#endif + ASSERT_EQ(paddle::platform::GpuGetLastError(), gpuSuccess); ASSERT_NE(p2, nullptr); - ASSERT_EQ(RecordedCudaMallocSize(DEVICE_ID), size2); + ASSERT_EQ(RecordedGpuMallocSize(DEVICE_ID), size2); } { - platform::RecordedCudaFree(p2, size2, DEVICE_ID); - ASSERT_EQ(RecordedCudaMallocSize(DEVICE_ID), 0UL); + platform::RecordedGpuFree(p2, size2, DEVICE_ID); + ASSERT_EQ(RecordedGpuMallocSize(DEVICE_ID), 0UL); } } diff --git a/paddle/fluid/pybind/cuda_streams_py.cc b/paddle/fluid/pybind/cuda_streams_py.cc index 311fb872ac10394fbb29c4ee2c90801c2816269d..21571e17a2b48bcd5a25cff9178a363f54ba60f2 100644 --- a/paddle/fluid/pybind/cuda_streams_py.cc +++ b/paddle/fluid/pybind/cuda_streams_py.cc @@ -61,9 +61,9 @@ void BindCudaStream(py::module *m_ptr) { int curr_device_id = paddle::platform::GetCurrentDeviceId(); paddle::platform::SetDeviceId(device_id); #ifdef PADDLE_WITH_HIP - PADDLE_ENFORCE_CUDA_SUCCESS(hipDeviceSynchronize()); + PADDLE_ENFORCE_GPU_SUCCESS(hipDeviceSynchronize()); #else - PADDLE_ENFORCE_CUDA_SUCCESS(cudaDeviceSynchronize()); + PADDLE_ENFORCE_GPU_SUCCESS(cudaDeviceSynchronize()); #endif paddle::platform::SetDeviceId(curr_device_id); #else @@ -264,7 +264,7 @@ void BindCudaStream(py::module *m_ptr) { auto stream_flag = paddle::platform::stream::StreamFlag::kStreamNonBlocking; - int device_count = platform::GetCUDADeviceCount(); + int device_count = platform::GetGPUDeviceCount(); if (device < 0) { device = platform::GetCurrentDeviceId(); } diff --git a/paddle/fluid/pybind/imperative.cc b/paddle/fluid/pybind/imperative.cc index cad0c1e70cc03c46c6c9f0f0fbd7326b2b74c0e3..f03acc3808468d9352b48bb6aa197fbf944b99dd 100644 --- a/paddle/fluid/pybind/imperative.cc +++ b/paddle/fluid/pybind/imperative.cc @@ -1744,7 +1744,7 @@ void BindImperative(py::module *m_ptr) { "Cannot copy this Tensor to GPU in CPU version Paddle, " "Please recompile or reinstall Paddle with CUDA support.")); #else - int device_count = platform::GetCUDADeviceCount(); + int device_count = platform::GetGPUDeviceCount(); int device_id = 0; if (handle == py::none()) { if (platform::is_gpu_place(self->Place())) { diff --git a/paddle/fluid/pybind/pybind.cc b/paddle/fluid/pybind/pybind.cc index 5fc1f27eff36f68f0cd4e1f464ea4c5118b1d7af..9ff9377abb2624e4aef9de98518c780443383fd8 100644 --- a/paddle/fluid/pybind/pybind.cc +++ b/paddle/fluid/pybind/pybind.cc @@ -114,9 +114,9 @@ limitations under the License. */ #include "paddle/fluid/operators/nccl/nccl_gpu_common.h" #endif #ifndef PADDLE_WITH_HIP -#include "paddle/fluid/platform/cuda_profiler.h" +#include "paddle/fluid/platform/device/gpu/cuda/cuda_profiler.h" #endif -#include "paddle/fluid/platform/gpu_info.h" +#include "paddle/fluid/platform/device/gpu/gpu_info.h" #endif #ifdef PADDLE_WITH_ASCEND_CL @@ -507,7 +507,7 @@ static void AssertStaticGraphAndDygraphGradMakerNoDiff() { static int GetNCCLVersion() { #if NCCL_VERSION_CODE >= 2304 int ver; - PADDLE_ENFORCE_CUDA_SUCCESS(platform::dynload::ncclGetVersion(&ver)); + PADDLE_ENFORCE_GPU_SUCCESS(platform::dynload::ncclGetVersion(&ver)); return ver; #else PADDLE_THROW(platform::errors::External( @@ -556,7 +556,7 @@ PYBIND11_MODULE(core_noavx, m) { m.def("disable_signal_handler", &DisableSignalHandler); #if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP) - m.def("cudnn_version", &platform::CudnnVersion); + m.def("cudnn_version", &platform::DnnVersion); m.def("gpu_memory_available", []() { size_t available = 0; size_t total = 0; @@ -564,6 +564,7 @@ PYBIND11_MODULE(core_noavx, m) { return available; }); #endif + #ifdef PADDLE_WITH_NCCL m.def("nccl_version", &GetNCCLVersion); #endif @@ -1646,8 +1647,8 @@ All parameter, weight, gradient are variables in Paddle. std::exit(-1); } - if (UNLIKELY(dev_id >= platform::GetCUDADeviceCount())) { - if (platform::GetCUDADeviceCount() == 0) { + if (UNLIKELY(dev_id >= platform::GetGPUDeviceCount())) { + if (platform::GetGPUDeviceCount() == 0) { LOG(ERROR) << "Cannot use GPU because there is no GPU " "detected on your " "machine."; @@ -1656,8 +1657,8 @@ All parameter, weight, gradient are variables in Paddle. LOG(ERROR) << string::Sprintf( "Invalid CUDAPlace(%d), must inside [0, %d), because GPU " "number on your machine is %d", - dev_id, platform::GetCUDADeviceCount(), - platform::GetCUDADeviceCount()); + dev_id, platform::GetGPUDeviceCount(), + platform::GetGPUDeviceCount()); std::exit(-1); } } @@ -2239,7 +2240,7 @@ All parameter, weight, gradient are variables in Paddle. #if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP) m.def("is_float16_supported", [](const platform::CUDAPlace &place) -> bool { // Only GPUs with Compute Capability >= 53 support float16 - return platform::GetCUDAComputeCapability(place.device) >= 53; + return platform::GetGPUComputeCapability(place.device) >= 53; }); #endif @@ -2419,7 +2420,7 @@ All parameter, weight, gradient are variables in Paddle. m.def("op_support_gpu", OpSupportGPU); #if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP) - m.def("get_cuda_device_count", platform::GetCUDADeviceCount); + m.def("get_cuda_device_count", platform::GetGPUDeviceCount); m.def("cuda_empty_cache", [] { for (int dev_id : platform::GetSelectedDevices()) { auto *dev_ctx = platform::DeviceContextPool::Instance().GetByPlace( diff --git a/paddle/pten/api/lib/ext_compat_utils.cc b/paddle/pten/api/lib/ext_compat_utils.cc index b7250d15794319e2ebf59de0f32f9bc177ee7bd8..791a8526f3847a8772cec7eabca9301d8aadda3f 100644 --- a/paddle/pten/api/lib/ext_compat_utils.cc +++ b/paddle/pten/api/lib/ext_compat_utils.cc @@ -13,7 +13,7 @@ See the License for the specific language governing permissions and limitations under the License. */ #include "paddle/pten/api/lib/ext_compat_utils.h" -#include "paddle/fluid/platform/gpu_info.h" +#include "paddle/fluid/platform/device/gpu/gpu_info.h" namespace paddle { namespace experimental { diff --git a/paddle/pten/core/convert_utils.cc b/paddle/pten/core/convert_utils.cc index 8b54813eadf327c8400bcf9575eed1f1fe091fd7..e457c57d59e55cdac8a5101e10abfe0c5815c6a6 100644 --- a/paddle/pten/core/convert_utils.cc +++ b/paddle/pten/core/convert_utils.cc @@ -16,7 +16,7 @@ limitations under the License. */ #include "paddle/fluid/pybind/tensor_py.h" // See Note [ Why still include the fluid headers? ] -#include "paddle/fluid/platform/gpu_info.h" +#include "paddle/fluid/platform/device/gpu/gpu_info.h" namespace pten { diff --git a/paddle/pten/kernels/functions/cuda/cast_kernel_impl.h b/paddle/pten/kernels/functions/cuda/cast_kernel_impl.h index 435da644356f9467930f4954837a53ed6454529a..1bf5bb288e83200a7e2618d86ff1ae4dd9856821 100644 --- a/paddle/pten/kernels/functions/cuda/cast_kernel_impl.h +++ b/paddle/pten/kernels/functions/cuda/cast_kernel_impl.h @@ -13,12 +13,12 @@ // limitations under the License. #pragma once -#include "paddle/fluid/platform/cuda_helper.h" +#include "paddle/fluid/platform/device/gpu/gpu_helper.h" #include "paddle/fluid/platform/float16.h" #include "paddle/pten/core/dense_tensor.h" #include "paddle/fluid/platform/aligned_vector.h" -#include "paddle/fluid/platform/gpu_launch_config.h" +#include "paddle/fluid/platform/device/gpu/gpu_launch_config.h" namespace pten { namespace detail { using CUDAContext = paddle::platform::CUDADeviceContext; diff --git a/paddle/pten/kernels/functions/cuda/reduce/reduce_cuda_impl.h b/paddle/pten/kernels/functions/cuda/reduce/reduce_cuda_impl.h index e292a56d08a8db8a7c645268d6e7e98ebeba6ce2..21663ee0388c0b1561de2b06974aa8f4e3e2d8a9 100644 --- a/paddle/pten/kernels/functions/cuda/reduce/reduce_cuda_impl.h +++ b/paddle/pten/kernels/functions/cuda/reduce/reduce_cuda_impl.h @@ -36,7 +36,7 @@ namespace cub = hipcub; #include "paddle/fluid/operators/amp/fp16_type_traits.h" #include "paddle/fluid/operators/cast_op.h" #include "paddle/fluid/operators/kernel_primitives/kernel_primitives.h" -#include "paddle/fluid/platform/cuda_device_function.h" +#include "paddle/fluid/platform/device/gpu/gpu_device_function.h" #include "paddle/fluid/platform/fast_divmod.h" #include "paddle/fluid/operators/kernel_primitives/compute_primitives.h" @@ -479,9 +479,9 @@ struct ReduceConfig { reduce_num_per_thread = details::AlignUp(reduce_num, block_dim->y); } int device_id = paddle::platform::GetCurrentDeviceId(); - int max_mp = paddle::platform::GetCUDAMultiProcessors(device_id); + int max_mp = paddle::platform::GetGPUMultiProcessors(device_id); int max_threads_per_mp = - paddle::platform::GetCUDAMaxThreadsPerMultiProcessor(device_id); + paddle::platform::GetGPUMaxThreadsPerMultiProcessor(device_id); int max_threads = max_threads_per_mp * max_mp; int num_threads = block_dim->x * block_dim->y; int max_num_blocks = max_threads / num_threads; @@ -521,9 +521,9 @@ struct ReduceConfig { left_num = last_dim_num; grid_dim->z = grid_z; int device_id = paddle::platform::GetCurrentDeviceId(); - int max_mp = paddle::platform::GetCUDAMultiProcessors(device_id); + int max_mp = paddle::platform::GetGPUMultiProcessors(device_id); int max_threads_per_mp = - paddle::platform::GetCUDAMaxThreadsPerMultiProcessor(device_id); + paddle::platform::GetGPUMaxThreadsPerMultiProcessor(device_id); int max_threads = max_threads_per_mp * max_mp; // init int num_block = (max_threads / left_num); diff --git a/tools/check_file_diff_approvals.sh b/tools/check_file_diff_approvals.sh index e77b71e4da1c9c938fea8673f6d140243860b65c..482e65a726ef54668ae5d48f89ee28dd288b498f 100644 --- a/tools/check_file_diff_approvals.sh +++ b/tools/check_file_diff_approvals.sh @@ -225,7 +225,7 @@ if [ "${HAS_MODIFIED_DEMO_CMAKE}" != "" ] && [ "${GIT_PR_ID}" != "" ]; then ALL_PADDLE_ENFORCE=`git diff -U0 upstream/$BRANCH |grep "^+" |grep -zoE "PADDLE_ENFORCE\(.[^,\);]+.[^;]*\);\s" || true` if [ "${ALL_PADDLE_ENFORCE}" != "" ] && [ "${GIT_PR_ID}" != "" ]; then - echo_line="PADDLE_ENFORCE is not recommended. Please use PADDLE_ENFORCE_EQ/NE/GT/GE/LT/LE or PADDLE_ENFORCE_NOT_NULL or PADDLE_ENFORCE_CUDA_SUCCESS instead, see [ https://github.com/PaddlePaddle/Paddle/wiki/PADDLE_ENFORCE-Rewriting-Specification ] for details.\nYou must have one RD (chenwhql (Recommend) , luotao1 (Recommend) or lanxianghit) approval for the usage (either add or delete) of PADDLE_ENFORCE.\n${ALL_PADDLE_ENFORCE}\n" + echo_line="PADDLE_ENFORCE is not recommended. Please use PADDLE_ENFORCE_EQ/NE/GT/GE/LT/LE or PADDLE_ENFORCE_NOT_NULL or PADDLE_ENFORCE_GPU_SUCCESS instead, see [ https://github.com/PaddlePaddle/Paddle/wiki/PADDLE_ENFORCE-Rewriting-Specification ] for details.\nYou must have one RD (chenwhql (Recommend) , luotao1 (Recommend) or lanxianghit) approval for the usage (either add or delete) of PADDLE_ENFORCE.\n${ALL_PADDLE_ENFORCE}\n" check_approval 1 6836917 47554610 22561442 fi