diff --git a/paddle/fluid/operators/affine_grid_cudnn_op.cu.cc b/paddle/fluid/operators/affine_grid_cudnn_op.cu.cc index ed71594ba5781590f3291d56c4ba1a4443003bd5..a2027e35063cba3d060c8979a46452ee71ca0e33 100644 --- a/paddle/fluid/operators/affine_grid_cudnn_op.cu.cc +++ b/paddle/fluid/operators/affine_grid_cudnn_op.cu.cc @@ -26,8 +26,11 @@ template <typename T> class CUDNNAffineGridOpKernel : public framework::OpKernel<T> { public: void Compute(const framework::ExecutionContext& ctx) const override { - PADDLE_ENFORCE(platform::is_gpu_place(ctx.GetPlace()), - "It must use CUDAPlace."); + PADDLE_ENFORCE_EQ(platform::is_gpu_place(ctx.GetPlace()), true, + platform::errors::InvalidArgument("Only " + "support for CUDAPlace.Please switch " + "your context from CPUPlace to " + "CUDAPlace or update your cudnn."); auto& dev_ctx = ctx.template device_context<platform::CUDADeviceContext>(); auto handle = dev_ctx.cudnn_handle(); auto* theta = ctx.Input<Tensor>("Theta"); @@ -56,8 +59,10 @@ class CUDNNAffineGridOpKernel : public framework::OpKernel<T> { cudnnSpatialTransformerDescriptor_t cudnn_st_desc = st_desc.descriptor<T>(4, h_size_data); - PADDLE_ENFORCE(platform::dynload::cudnnSpatialTfGridGeneratorForward( - handle, cudnn_st_desc, theta_data, output_data)); + PADDLE_ENFORCE_EQ(platform::dynload::cudnnSpatialTfGridGeneratorForward( + handle, cudnn_st_desc, theta_data, output_data), + true, platform::errors::Fatal("Some errors has occurred " + "during forward computation in cudnn.")); } }; @@ -65,8 +70,11 @@ template <typename T> class CUDNNAffineGridGradOpKernel : public framework::OpKernel<T> { public: void Compute(const framework::ExecutionContext& ctx) const override { - PADDLE_ENFORCE(platform::is_gpu_place(ctx.GetPlace()), - "It must use CUDAPlace."); + PADDLE_ENFORCE_EQ(platform::is_gpu_place(ctx.GetPlace()), true, + platform::errors::InvalidArgument("Only " + "support for CUDAPlace. Please switch " + "your context from CPUPlace to " + "CUDAPlace or update your cudnn."); auto& dev_ctx = ctx.template device_context<platform::CUDADeviceContext>(); auto handle = dev_ctx.cudnn_handle(); auto output_grad = ctx.Input<Tensor>(framework::GradVarName("Output")); @@ -95,8 +103,10 @@ class CUDNNAffineGridGradOpKernel : public framework::OpKernel<T> { const T* output_grad_data = output_grad->data<T>(); T* theta_grad_data = theta_grad->mutable_data<T>(ctx.GetPlace()); - PADDLE_ENFORCE(platform::dynload::cudnnSpatialTfGridGeneratorBackward( - handle, cudnn_st_desc, output_grad_data, theta_grad_data)); + PADDLE_ENFORCE_EQ(platform::dynload::cudnnSpatialTfGridGeneratorBackward( + handle, cudnn_st_desc, output_grad_data, theta_grad_data), + true, "Some errors " + "has occurred during forward computation in cudnn;"); } }; diff --git a/paddle/fluid/operators/detection/generate_proposals_op.cu b/paddle/fluid/operators/detection/generate_proposals_op.cu index 485136d8e2f7ab66f6b1c58deb09036ea5d4e1ec..71323ea966a6cffe6a1750d96083a68f0a686bfb 100644 --- a/paddle/fluid/operators/detection/generate_proposals_op.cu +++ b/paddle/fluid/operators/detection/generate_proposals_op.cu @@ -247,8 +247,6 @@ static void NMS(const platform::CUDADeviceContext &ctx, const Tensor &proposals, const Tensor &sorted_indices, const T nms_threshold, Tensor *keep_out) { int boxes_num = proposals.dims()[0]; - PADDLE_ENFORCE_EQ(boxes_num, sorted_indices.dims()[0]); - const int col_blocks = DIVUP(boxes_num, kThreadsPerBlock); dim3 blocks(DIVUP(boxes_num, kThreadsPerBlock), DIVUP(boxes_num, kThreadsPerBlock));