diff --git a/paddle/fluid/operators/conv_cudnn_op.cu b/paddle/fluid/operators/conv_cudnn_op.cu index 00af724ac7fce64b9a210bf43a150acf20f34dce..f8b76f387cc1954cc4329daf04a6bdcab5c0775f 100644 --- a/paddle/fluid/operators/conv_cudnn_op.cu +++ b/paddle/fluid/operators/conv_cudnn_op.cu @@ -50,8 +50,9 @@ class CUDNNConvOpKernel : public framework::OpKernel { public: void Compute(const framework::ExecutionContext& ctx) const override { auto& dev_ctx = ctx.template device_context(); - PADDLE_ENFORCE_EQ(platform::is_gpu_place(ctx.GetPlace()), true, - "It must use CUDAPlace."); + PADDLE_ENFORCE_EQ( + platform::is_gpu_place(ctx.GetPlace()), true, + paddle::platform::errors::PreconditionNotMet("It must use CUDAPlace.")); const Tensor* input = ctx.Input("Input"); auto* filter = ctx.Input("Filter"); auto* output = ctx.Output("Output"); @@ -60,14 +61,16 @@ class CUDNNConvOpKernel : public framework::OpKernel { std::vector paddings = ctx.Attr>("paddings"); std::vector dilations = ctx.Attr>("dilations"); int groups = ctx.Attr("groups"); + bool exhaustive_search = FLAGS_cudnn_exhaustive_search || ctx.Attr("exhaustive_search"); + bool deterministic = FLAGS_cudnn_deterministic; + auto exhaustive_deterministic = exhaustive_search && deterministic; + PADDLE_ENFORCE_EQ(exhaustive_deterministic, false, + platform::errors::InvalidArgument( + "Cann't set exhaustive_search True and " + "FLAGS_cudnn_deterministic True at same time.")); - if (exhaustive_search && FLAGS_cudnn_deterministic) { - PADDLE_THROW( - "Cann't set exhaustive_search True and " - "FLAGS_cudnn_deterministic True at same time."); - } const std::string padding_algorithm = ctx.Attr("padding_algorithm"); const std::string data_format = ctx.Attr("data_format"); @@ -197,7 +200,8 @@ class CUDNNConvOpKernel : public framework::OpKernel { &transformed_input); } break; default: - PADDLE_THROW("ConvOp only support tensors with 4 or 5 dimensions."); + PADDLE_THROW(platform::errors::InvalidArgument( + "ConvOp only support tensors with 4 or 5 dimensions.")); } } else { @@ -317,8 +321,9 @@ class CUDNNConvGradOpKernel : public framework::OpKernel { public: void Compute(const framework::ExecutionContext& ctx) const override { auto& dev_ctx = ctx.template device_context(); - PADDLE_ENFORCE_EQ(platform::is_gpu_place(ctx.GetPlace()), true, - "It must use CUDAPlace."); + PADDLE_ENFORCE_EQ( + platform::is_gpu_place(ctx.GetPlace()), true, + paddle::platform::errors::PreconditionNotMet("It must use CUDAPlace.")); auto input = ctx.Input("Input"); auto filter = ctx.Input("Filter"); auto output_grad = ctx.Input(framework::GradVarName("Output")); @@ -337,14 +342,16 @@ class CUDNNConvGradOpKernel : public framework::OpKernel { std::vector paddings = ctx.Attr>("paddings"); std::string padding_algorithm = ctx.Attr("padding_algorithm"); int groups = ctx.Attr("groups"); + bool exhaustive_search = FLAGS_cudnn_exhaustive_search || ctx.Attr("exhaustive_search"); bool deterministic = FLAGS_cudnn_deterministic; - if (exhaustive_search && deterministic) { - PADDLE_THROW( - "Can't set exhaustive_search True and " - "FLAGS_cudnn_deterministic True at same time."); - } + auto exhaustive_deterministic = exhaustive_search && deterministic; + PADDLE_ENFORCE_EQ(exhaustive_deterministic, false, + platform::errors::InvalidArgument( + "Cann't set exhaustive_search True and " + "FLAGS_cudnn_deterministic True at same time.")); + const std::string data_format = ctx.Attr("data_format"); const bool channel_last = (data_format == "NHWC" || data_format == "NDHWC"); @@ -495,7 +502,8 @@ class CUDNNConvGradOpKernel : public framework::OpKernel { &transformed_input); } break; default: - PADDLE_THROW("ConvOp only support tensors with 4 or 5 dimensions."); + PADDLE_THROW(platform::errors::InvalidArgument( + "ConvOp only support tensors with 4 or 5 dimensions.")); } } else { transformed_input.ShareDataWith(transformed_input_channel); @@ -701,8 +709,9 @@ class CUDNNConvDoubleGradOpKernel : public framework::OpKernel { public: void Compute(const framework::ExecutionContext& ctx) const override { auto& dev_ctx = ctx.template device_context(); - PADDLE_ENFORCE_EQ(platform::is_gpu_place(ctx.GetPlace()), true, - "It must use CUDAPlace."); + PADDLE_ENFORCE_EQ( + platform::is_gpu_place(ctx.GetPlace()), true, + paddle::platform::errors::PreconditionNotMet("It must use CUDAPlace.")); auto X = ctx.Input("Input"); auto W = ctx.Input("Filter"); auto dO = ctx.Input("DOutput"); @@ -736,14 +745,16 @@ class CUDNNConvDoubleGradOpKernel : public framework::OpKernel { const std::vector& strides = ctx.Attr>("strides"); std::vector dilations = ctx.Attr>("dilations"); int groups = ctx.Attr("groups"); + bool exhaustive_search = FLAGS_cudnn_exhaustive_search || ctx.Attr("exhaustive_search"); bool deterministic = FLAGS_cudnn_deterministic; - if (exhaustive_search && deterministic) { - PADDLE_THROW( - "Can't set exhaustive_search True and " - "FLAGS_cudnn_deterministic True at same time."); - } + auto exhaustive_deterministic = exhaustive_search && deterministic; + PADDLE_ENFORCE_EQ(exhaustive_deterministic, false, + platform::errors::InvalidArgument( + "Cann't set exhaustive_search True and " + "FLAGS_cudnn_deterministic True at same time.")); + std::vector paddings = ctx.Attr>("paddings"); std::string padding_algorithm = ctx.Attr("padding_algorithm"); @@ -878,7 +889,8 @@ class CUDNNConvDoubleGradOpKernel : public framework::OpKernel { } } break; default: - PADDLE_THROW("ConvOp only support tensors with 4 or 5 dimensions."); + PADDLE_THROW(platform::errors::InvalidArgument( + "ConvOp only support tensors with 4 or 5 dimensions.")); } } else { diff --git a/paddle/fluid/operators/conv_op.h b/paddle/fluid/operators/conv_op.h index 8a5345e3cf8d9f1c657fe2996015af4dc038a1bf..662fac9e77e023d2e1b173caa5a9769b56eaf0c4 100644 --- a/paddle/fluid/operators/conv_op.h +++ b/paddle/fluid/operators/conv_op.h @@ -685,8 +685,9 @@ class GemmConvDoubleGradKernel : public framework::OpKernel { public: void Compute(const framework::ExecutionContext& ctx) const override { auto& dev_ctx = ctx.template device_context(); - PADDLE_ENFORCE_EQ(platform::is_cpu_place(ctx.GetPlace()), true, - "It must use CPUPlace."); + PADDLE_ENFORCE_EQ( + platform::is_cpu_place(ctx.GetPlace()), true, + paddle::platform::errors::PreconditionNotMet("It must use CPUPlace.")); const Tensor* X = ctx.Input("Input"); const Tensor* dY = ctx.Input("DOutput"); const Tensor* ddX = ctx.Input("DDInput"); @@ -982,11 +983,20 @@ class DepthwiseConvKernel : public framework::OpKernel { PADDLE_ENFORCE_EQ( output->dims()[output->dims().size() - 1] % input->dims()[input->dims().size() - 1], - 0, "The output channels must be a multiple of the input channels"); + 0, platform::errors::InvalidArgument( + "ShapeError: The output channels must be a multiple of the " + "input channels. But receivced output channel number is %d " + "and input channel number is %d", + output->dims()[output->dims().size() - 1], + input->dims()[input->dims().size() - 1])); } else { PADDLE_ENFORCE_EQ( output->dims()[1] % input->dims()[1], 0, - "The output channels must be a multiple of the input channels"); + platform::errors::InvalidArgument( + "ShapeError: The output channels must be a multiple of the " + "input channels. But receivced output channel number is %d " + "and input channel number is %d", + output->dims()[1], input->dims()[1])); } // transform tensor Tensor transformed_input(input->type()); diff --git a/paddle/fluid/operators/conv_transpose_cudnn_op.cu b/paddle/fluid/operators/conv_transpose_cudnn_op.cu index 99ec1e048101b281e71005f6fde328c664ba66be..5249264b1c9bcf13c5ee8227828087659de5254b 100644 --- a/paddle/fluid/operators/conv_transpose_cudnn_op.cu +++ b/paddle/fluid/operators/conv_transpose_cudnn_op.cu @@ -51,8 +51,9 @@ template class CUDNNConvTransposeOpKernel : public framework::OpKernel { public: void Compute(const framework::ExecutionContext& ctx) const override { - PADDLE_ENFORCE_EQ(platform::is_gpu_place(ctx.GetPlace()), true, - "It must use CUDAPlace."); + PADDLE_ENFORCE_EQ( + platform::is_gpu_place(ctx.GetPlace()), true, + paddle::platform::errors::PreconditionNotMet("It must use CUDAPlace.")); auto* input = ctx.Input("Input"); auto* filter = ctx.Input("Filter"); auto* output = ctx.Output("Output"); @@ -145,9 +146,8 @@ class CUDNNConvTransposeOpKernel : public framework::OpKernel { ctx, input_pad, input_transpose, pad_value, &transformed_input); } break; default: - PADDLE_ENFORCE_EQ( - rank == 4 || rank == 5, true, - "Op(ConvTranspose) only supports 4-D or 5-D input Tensor."); + PADDLE_THROW(platform::errors::InvalidArgument( + "Op(ConvTranspose) only supports 4-D or 5-D input Tensor.")); } } else { transformed_input = input_transpose; @@ -290,8 +290,9 @@ template class CUDNNConvTransposeGradOpKernel : public framework::OpKernel { public: void Compute(const framework::ExecutionContext& ctx) const override { - PADDLE_ENFORCE(platform::is_gpu_place(ctx.GetPlace()), - "It must use CUDAPlace."); + PADDLE_ENFORCE_EQ( + platform::is_gpu_place(ctx.GetPlace()), true, + paddle::platform::errors::PreconditionNotMet("It must use CUDAPlace.")); auto input = ctx.Input("Input"); auto filter = ctx.Input("Filter"); auto output_grad = ctx.Input(framework::GradVarName("Output")); @@ -393,9 +394,8 @@ class CUDNNConvTransposeGradOpKernel : public framework::OpKernel { &transformed_output_grad); } break; default: - PADDLE_ENFORCE_EQ( - rank == 4 || rank == 5, true, - "Op(ConvTranspose) only supports 4-D or 5-D input Tensor."); + PADDLE_THROW(platform::errors::InvalidArgument( + "Op(ConvTranspose) only supports 4-D or 5-D input Tensor.")); } } else { transformed_output_grad = output_grad_transpose; diff --git a/paddle/fluid/operators/conv_transpose_op.h b/paddle/fluid/operators/conv_transpose_op.h index 59b3677acc41658936dc678d9810c923a80bf6e1..1ea869e002af3ac8157321c66616b82517e4fabc 100644 --- a/paddle/fluid/operators/conv_transpose_op.h +++ b/paddle/fluid/operators/conv_transpose_op.h @@ -580,7 +580,12 @@ class DepthwiseConvTransposeKernel : public framework::OpKernel { output->mutable_data(context.GetPlace()); int groups = context.Attr("groups"); - PADDLE_ENFORCE_EQ(groups, filter.dims()[0]); + PADDLE_ENFORCE_EQ( + groups, filter.dims()[0], + platform::errors::InvalidArgument( + "groups should be error to the 1st dimension of filter. But " + "received groups is %d and filter dimension[0] is %d", + groups, filter.dims()[0])); std::vector strides = context.Attr>("strides"); std::vector paddings = context.Attr>("paddings"); @@ -588,7 +593,10 @@ class DepthwiseConvTransposeKernel : public framework::OpKernel { std::string padding_algorithm = context.Attr("padding_algorithm"); for (auto v : dilations) { - PADDLE_ENFORCE_EQ(v, 1); + PADDLE_ENFORCE_EQ(v, 1, platform::errors::InvalidArgument( + "dilations should be 1 in depthwise conv. " + "But received dilations is %d", + v)); } auto in_dims = input->dims();