diff --git a/paddle/fluid/operators/unfold_op.cc b/paddle/fluid/operators/unfold_op.cc index 6500eda638628a71d6619c8022175c0c2b674bfa..fd592bf35a834064dddaaa16f8ec1274b6f838e6 100644 --- a/paddle/fluid/operators/unfold_op.cc +++ b/paddle/fluid/operators/unfold_op.cc @@ -61,10 +61,12 @@ class UnfoldOp : public framework::OperatorWithKernel { public: using framework::OperatorWithKernel::OperatorWithKernel; void InferShape(framework::InferShapeContext* ctx) const override { - PADDLE_ENFORCE(ctx->HasInput("X"), - "Input(X) of UnfoldOp should not be null"); - PADDLE_ENFORCE(ctx->HasOutput("Y"), - "Output(Y) of UnfoldOp should not be null"); + PADDLE_ENFORCE_EQ( + ctx->HasInput("X"), true, + platform::errors::NotFound("Input(X) of UnfoldOp should not be null")); + PADDLE_ENFORCE_EQ( + ctx->HasOutput("Y"), true, + platform::errors::NotFound("Output(Y) of UnfoldOp should not be null")); auto in_dims = ctx->GetInputDim("X"); std::vector kernel_sizes = ctx->Attrs().Get>("kernel_sizes"); @@ -74,31 +76,36 @@ class UnfoldOp : public framework::OperatorWithKernel { ctx->Attrs().Get>("dilations"); // Only [N, C, H, W] input supported now - PADDLE_ENFORCE( - in_dims.size() == 4, - "Input should be 4-D tensor of format [N, C, H, W], but get %u", - in_dims.size()); - PADDLE_ENFORCE( - in_dims.size() - kernel_sizes.size() == 2U, - "The dims of X should be larger than that of kernel_sizes " - "by a number of 2, due to the batch size and input channel dim. " - "But recieved dims(X:%u) - dims(kernel_sizes:%u) != 2", - in_dims.size(), kernel_sizes.size()); + PADDLE_ENFORCE_EQ( + in_dims.size(), 4, + platform::errors::InvalidArgument( + "Input should be 4-D tensor of format [N, C, H, W], but get %u", + in_dims.size())); + PADDLE_ENFORCE_EQ( + in_dims.size() - kernel_sizes.size(), 2U, + platform::errors::InvalidArgument( + "The dims of X should be larger than that of kernel_sizes " + "by a number of 2, due to the batch size and input channel dim. " + "But recieved dims(X:%u) - dims(kernel_sizes:%u) != 2", + in_dims.size(), kernel_sizes.size())); PADDLE_ENFORCE_EQ( strides.size(), kernel_sizes.size(), - "The dims of strides should be the same with that of kernel_sizes. " - "But recieved dims(strides: %u) != dims(kernel_sizes: %u).", - strides.size(), kernel_sizes.size()); + platform::errors::InvalidArgument( + "The dims of strides should be the same with that of kernel_sizes. " + "But recieved dims(strides: %u) != dims(kernel_sizes: %u).", + strides.size(), kernel_sizes.size())); PADDLE_ENFORCE_EQ( paddings.size(), 2 * strides.size(), - "The dims of paddings should be 2 times of that of strides. " - "But recieved dims(paddings: %u) != 2*dims(strides: %u).", - paddings.size(), strides.size()); + platform::errors::InvalidArgument( + "The dims of paddings should be 2 times of that of strides. " + "But recieved dims(paddings: %u) != 2*dims(strides: %u).", + paddings.size(), strides.size())); PADDLE_ENFORCE_EQ( strides.size(), dilations.size(), - "The dims of strides should be the same with that of dilations. " - "But recieved dims(strides: %u) != dims(dilations: %u).", - strides.size(), dilations.size()); + platform::errors::InvalidArgument( + "The dims of strides should be the same with that of dilations. " + "But recieved dims(strides: %u) != dims(dilations: %u).", + strides.size(), dilations.size())); std::vector out_dims; out_dims.push_back(in_dims[0]); @@ -131,11 +138,15 @@ class UnfoldGradOp : public framework::OperatorWithKernel { using framework::OperatorWithKernel::OperatorWithKernel; void InferShape(framework::InferShapeContext* ctx) const override { - PADDLE_ENFORCE(ctx->HasInput(framework::GradVarName("Y")), - "The gradient of Y should not be null"); - PADDLE_ENFORCE(ctx->HasInput("X"), "The input X should not be null"); - PADDLE_ENFORCE(ctx->HasOutput(framework::GradVarName("X")), - "The gradient of X should not be null"); + PADDLE_ENFORCE_EQ( + ctx->HasInput(framework::GradVarName("Y")), true, + platform::errors::NotFound("The gradient of Y should not be null")); + PADDLE_ENFORCE_EQ( + ctx->HasInput("X"), true, + platform::errors::NotFound("The input X should not be null")); + PADDLE_ENFORCE_EQ( + ctx->HasOutput(framework::GradVarName("X")), true, + platform::errors::NotFound("The gradient of X should not be null")); ctx->SetOutputDim(framework::GradVarName("X"), ctx->GetInputDim("X")); } diff --git a/paddle/fluid/operators/unfold_op.h b/paddle/fluid/operators/unfold_op.h index 97e8143bc052b346a85b50ab26bb8563e48f30d9..f22559f1f38c2cbd1ffd86cdf0bf015c43ba9062 100644 --- a/paddle/fluid/operators/unfold_op.h +++ b/paddle/fluid/operators/unfold_op.h @@ -29,12 +29,14 @@ inline int CalcOutputSize(int input_size, int filter_size, int dilation, int padding1, int padding2, int stride) { const int dkernel = dilation * (filter_size - 1) + 1; int output_size = (input_size + padding1 + padding2 - dkernel) / stride + 1; - PADDLE_ENFORCE(output_size > 0, - "Due to the settings of padding(%d, %d), filter_size(%d), " - "dilation(%d) and " - "stride(%d), the output size is less than 0, please check " - "again. Input_size:%d", - padding1, padding2, filter_size, dilation, stride, input_size); + PADDLE_ENFORCE_GT( + output_size, 0UL, + platform::errors::InvalidArgument( + "Due to the settings of padding(%d, %d), filter_size(%d), " + "dilation(%d) and " + "stride(%d), the output size is less than 0, please check " + "again. Input_size:%d", + padding1, padding2, filter_size, dilation, stride, input_size)); return output_size; } diff --git a/python/paddle/fluid/layers/nn.py b/python/paddle/fluid/layers/nn.py index 2436e5f2beccb0508ef111919dc2e65f61fd7aff..ff33031c73e6ea09459720199dc35027c7f19849 100644 --- a/python/paddle/fluid/layers/nn.py +++ b/python/paddle/fluid/layers/nn.py @@ -13990,6 +13990,8 @@ def unfold(x, kernel_sizes, strides=1, paddings=0, dilations=1, name=None): helper = LayerHelper("unfold", **locals()) + check_variable_and_dtype(x, 'x', ['float32', 'float64'], 'unfold') + assert len(x.shape) == 4, \ "input should be the format of [N, C, H, W]"