diff --git a/paddle/fluid/framework/rw_lock.h b/paddle/fluid/framework/rw_lock.h index 4f1d9adbfc5d534b1b8f4780c9a0ade26fb53ecc..da163835e8652ae479121bd67f2eed77332b2740 100644 --- a/paddle/fluid/framework/rw_lock.h +++ b/paddle/fluid/framework/rw_lock.h @@ -71,6 +71,9 @@ class RWLockGuard { WRLock(); break; } + case Status::kUnLock: { + break; + } } } @@ -78,6 +81,7 @@ class RWLockGuard { switch (status_) { case Status::kUnLock: { lock_->WRLock(); + status_ = Status::kWRLock; break; } case Status::kWRLock: { @@ -95,6 +99,7 @@ class RWLockGuard { switch (status_) { case Status::kUnLock: { lock_->RDLock(); + status_ = Status::kRDLock; break; } case Status::kRDLock: { @@ -111,6 +116,7 @@ class RWLockGuard { void UnLock() { if (status_ != Status::kUnLock) { lock_->UNLock(); + status_ = Status::kUnLock; } } diff --git a/paddle/fluid/operators/conv_transpose_cudnn_op.cu.cc b/paddle/fluid/operators/conv_transpose_cudnn_op.cu.cc index 2376212f5091468f0b361528dfbb68e7ff76c922..73831611d01b8c5b8d2d9f7f15634a0094e4a608 100644 --- a/paddle/fluid/operators/conv_transpose_cudnn_op.cu.cc +++ b/paddle/fluid/operators/conv_transpose_cudnn_op.cu.cc @@ -230,7 +230,7 @@ class CUDNNConvTransposeGradOpKernel : public framework::OpKernel { // Because beta is zero, it is unnecessary to reset filter_grad. // Gradient with respect to the filter for (int g = 0; g < groups; g++) { - auto cudnn_func = [&](void* cudnn_func) { + auto cudnn_func = [&](void* cudnn_workspace) { CUDNN_ENFORCE(platform::dynload::cudnnConvolutionBackwardFilter( handle, &alpha, cudnn_output_desc, output_grad_data + output_grad_offset * g, cudnn_input_desc, diff --git a/paddle/fluid/platform/device_context.cc b/paddle/fluid/platform/device_context.cc index ec297ea9a6365f82819d329eb63754ba42393d61..3d416bb528a531b7ceaa63b3f651e9955ecb6a4d 100644 --- a/paddle/fluid/platform/device_context.cc +++ b/paddle/fluid/platform/device_context.cc @@ -176,7 +176,7 @@ class CudnnHolder { if (required_workspace_len <= workspace_len_) { return; } - void* new_workspace = paddle::memory::Alloc(place_, required_len); + void* new_workspace = paddle::memory::Alloc(place_, required_workspace_len); if (workspace_ != nullptr) { // Maybe someone is using the current workspace PADDLE_ENFORCE(cudaStreamSynchronize(*stream_)); @@ -184,7 +184,7 @@ class CudnnHolder { paddle::memory::Free(place_, workspace_); } workspace_ = new_workspace; - workspace_len_ = required_len; + workspace_len_ = required_workspace_len; } cudnnHandle_t cudnn_handle_;