diff --git a/paddle/fluid/operators/conv_op.cc b/paddle/fluid/operators/conv_op.cc index c8e45f9ec4ee8378560ad5b93838476a68f8727a..8a85ad83851fb8cbc505decdca6a7b9b83b9b4c0 100644 --- a/paddle/fluid/operators/conv_op.cc +++ b/paddle/fluid/operators/conv_op.cc @@ -50,30 +50,28 @@ void ConvOp::InferShape(framework::InferShapeContext* ctx) const { const std::string data_format = ctx->Attrs().Get("data_format"); const bool channel_last = (data_format == "NHWC" || data_format == "NDHWC"); - PADDLE_ENFORCE_EQ(in_dims.size() == 4 || in_dims.size() == 5, true, - "ShapeError: Conv input should be 4-D or 5-D tensor. But " - "received: %u-D Tensor," - "the shape of Conv input is [%s]", - in_dims.size(), in_dims); + PADDLE_ENFORCE_EQ( + in_dims.size() == 4 || in_dims.size() == 5, true, + "ShapeError: the input of Op(conv) should be 4-D or 5-D Tensor. But " + "received: %u-D Tensor, the shape of input is [%s].", + in_dims.size(), in_dims); PADDLE_ENFORCE_EQ( in_dims.size(), filter_dims.size(), - "ShapeError: Conv input dimension and filter dimension should be the " - "equal." - "But received: the shape of Conv input is [%s], input dimension of Conv " - "input is [%d]," - "the shape of filter is [%s], the filter dimension of Conv is [%d]", + "ShapeError: the input's dimension size and filter's dimension size of " + "Op(conv) should be equal. But received: the shape of input is [%s], " + "the dimension size of input is [%d], the shape of filter is [%s], " + "the dimension size of filter is [%d].", in_dims, in_dims.size(), filter_dims, filter_dims.size()); int in_sub_stride_size = in_dims.size() - strides.size(); PADDLE_ENFORCE_EQ(in_dims.size() - strides.size() == 2U, true, - "ShapeError: the dimension of input minus the dimension of " - "stride must be euqal to 2." - "But received: the dimension of input minus the dimension " - "of stride is [%d], the" - "input dimension of Conv is [%d], the shape of Conv input " - "is [%s], the stride" - "dimension of Conv is [%d]", + "ShapeError: the dimension size of input minus the size of " + "Attr(stride) must be euqal to 2 for Op(conv)." + "But received: the dimension size of input minus the size " + "of Attr(stride) is [%d], the " + "input's dimension size is [%d], the shape of input " + "is [%s], the Attr(stride)'s size is [%d].", in_sub_stride_size, in_dims.size(), in_dims, strides.size()); @@ -83,16 +81,19 @@ void ConvOp::InferShape(framework::InferShapeContext* ctx) const { PADDLE_ENFORCE_EQ( input_channels, filter_dims[1] * groups, "ShapeError: The number of input channels should be equal to filter " - "channels * groups. But received: the input channels is [%d], the shape" - "of input is [%s], the filter channel is [%d], the shape of filter is " - "[%s]," - "the groups is [%d]", - in_dims[1], in_dims, filter_dims[1], filter_dims, groups); + "channels * groups for Op(conv). But received: the input's channels is " + "[%d], the shape " + "of input is [%s], the filter's channel is [%d], the shape of filter is " + "[%s], the groups is [%d], the data_format is %s. The error may come " + "from wrong data_format setting.", + input_channels, in_dims, filter_dims[1], filter_dims, groups, + data_format); PADDLE_ENFORCE_EQ( filter_dims[0] % groups, 0, - "ShapeError: The number of output channels should be divided by groups." - "But received: the output channels is [%d], the shape of filter is [%s]" - "(the first dimension of filter is output channel), the groups is [%d]", + "ShapeError: The number of output channels of Op(conv) should be divided " + "by groups. " + "But received: the output channels is [%d], the shape of filter is [%s] " + "(the first dimension of filter is output channel), the groups is [%d].", filter_dims[0], filter_dims, groups); framework::DDim in_data_dims; diff --git a/paddle/fluid/operators/conv_transpose_op.cc b/paddle/fluid/operators/conv_transpose_op.cc index aead007367efe07f51519d8fcc54e8872d6bd153..930d48735405015f2f5aefd876e7590837348cc4 100644 --- a/paddle/fluid/operators/conv_transpose_op.cc +++ b/paddle/fluid/operators/conv_transpose_op.cc @@ -46,30 +46,48 @@ void ConvTransposeOp::InferShape(framework::InferShapeContext* ctx) const { int groups = ctx->Attrs().Get("groups"); std::string padding_algorithm = ctx->Attrs().Get("padding_algorithm"); - const DataLayout data_layout = framework::StringToDataLayout( - ctx->Attrs().Get("data_format")); + const std::string data_layout_str = + ctx->Attrs().Get("data_format"); + const framework::DataLayout data_layout = + framework::StringToDataLayout(data_layout_str); PADDLE_ENFORCE_EQ(in_dims.size() == 4 || in_dims.size() == 5, true, - "ConvTransposeOp intput should be 4-D or 5-D tensor."); - PADDLE_ENFORCE_EQ(in_dims.size(), filter_dims.size(), - "ConvTransposeOp input dimension and filter dimension " - "should be the same."); + "ShapeError: input of Op(conv_transpose) should be 4-D or " + "5-D Tensor. But received: %u-D Tensor, " + "the shape of input is [%s]", + in_dims.size(), in_dims); + PADDLE_ENFORCE_EQ( + in_dims.size(), filter_dims.size(), + "ShapeError: the input's dimension size and filter's dimension size of " + "Op (conv_transpose) should be equal. But received: the shape of input " + "is [%s], the dimension size of input is [%d], the shape of filter is " + "[%s], the dimension size of filter is [%d]. ", + in_dims, in_dims.size(), filter_dims, filter_dims.size()); + int in_sub_stride_size = in_dims.size() - strides.size(); PADDLE_ENFORCE_EQ( in_dims.size() - strides.size(), 2U, - "ConvTransposeOp input dimension and strides dimension should " - "be consistent."); + "ShapeError: the input's dimension size minus Attr(stride)'s size must " + "be euqal to 2 for Op(conv_transpose). But received: [%d], the " + "input's dimension size is [%d], the shape of input " + "is [%s], the Attr(stride)'s size is [%d].", + in_sub_stride_size, in_dims.size(), in_dims, strides.size()); if (output_size.size()) - PADDLE_ENFORCE_EQ(output_size.size(), strides.size(), - "ConvTransposeOp output_size dimension and strides " - "dimension should be the same."); + PADDLE_ENFORCE_EQ( + output_size.size(), strides.size(), + "The Attr(output_size) and Attr(stride) of Op(conv_transpose) " + "should be the same."); const int64_t C = (data_layout != DataLayout::kNHWC ? in_dims[1] : in_dims[in_dims.size() - 1]); PADDLE_ENFORCE_EQ( C, filter_dims[0], - "The number of input channels of Op(ConvTransposeOp) should " - "be equal to the number of filter's channels."); + "ShapeError: The number of input channels should be equal to filter " + "channels for Op(conv_transpose). But received: the input's channels is " + "[%d], the shape of input is [%s], the filter's channels is [%d], the " + "shape of filter is [%s]. The data_format is %s." + "The error may come from wrong data_format setting.", + C, in_dims, filter_dims[0], filter_dims, data_layout_str); framework::DDim in_data_dims; if (data_layout != DataLayout::kNHWC) { diff --git a/paddle/fluid/operators/group_norm_op.cc b/paddle/fluid/operators/group_norm_op.cc index 7cb85d636fbf0a856878588cfb1e86eb0c74a678..1ab6fbafd7332b98c8405d8b2c87c4bf1e0e2002 100644 --- a/paddle/fluid/operators/group_norm_op.cc +++ b/paddle/fluid/operators/group_norm_op.cc @@ -39,24 +39,57 @@ class GroupNormOp : public framework::OperatorWithKernel { PADDLE_ENFORCE(ctx->HasOutput("Variance"), "Output(Variance) of GroupNormOp should not be null."); auto x_dim = ctx->GetInputDim("X"); - const DataLayout data_layout = framework::StringToDataLayout( - ctx->Attrs().Get("data_layout")); + const std::string data_layout_str = + ctx->Attrs().Get("data_layout"); + const framework::DataLayout data_layout = + framework::StringToDataLayout(data_layout_str); const int64_t channel_num = (data_layout == DataLayout::kNCHW ? x_dim[1] : x_dim[x_dim.size() - 1]); auto batch_size = x_dim[0]; auto groups = ctx->Attrs().Get("groups"); PADDLE_ENFORCE_LE( groups, channel_num, - "'groups' must be less equal than the number of channels."); - PADDLE_ENFORCE_GE(groups, 1, "'groups' must be greater equal than 1."); + "ValueError: the Attr(groups) of Op(group_norm) must be less than or " + "equal to the number of channels. " + "But received: groups is [%s], channels is [%s], the Attr(data_layout) " + "is [%s]. The error may come from wrong data_layout setting.", + groups, channel_num, data_layout_str); + PADDLE_ENFORCE_GE( + groups, 1, + "ValueError: the Attr(groups) of Op(group_norm) must be " + "greater than or equal to 1. But received: groups is [%s].", + groups); if (ctx->HasInput("Scale")) { - PADDLE_ENFORCE_EQ(ctx->GetInputDim("Scale").size(), 1UL); - PADDLE_ENFORCE_EQ(ctx->GetInputDim("Scale")[0], channel_num); + PADDLE_ENFORCE_EQ( + ctx->GetInputDim("Scale").size(), 1UL, + "ShapeError: the Input(Scale) of Op(group_norm) should be 1-D " + "Tensor. " + "But received: %u-D Tensor, the shape of Input(Scale) is [%s].", + ctx->GetInputDim("Scale").size(), ctx->GetInputDim("Scale")); + PADDLE_ENFORCE_EQ( + ctx->GetInputDim("Scale")[0], channel_num, + "ShapeError: the Input(Scale)'s first dimension size of " + "Op(group_norm) must be equal to the number of channels. " + "But received: the Input(Scale)'s first dimension size is [%s], the " + "channels is [%s], the Attr(data_layout) is [%s]. The error may come " + "from wrong data_layout setting.", + ctx->GetInputDim("Scale")[0], channel_num, data_layout_str); } if (ctx->HasInput("Bias")) { - PADDLE_ENFORCE_EQ(ctx->GetInputDim("Bias").size(), 1UL); - PADDLE_ENFORCE_EQ(ctx->GetInputDim("Bias")[0], channel_num); + PADDLE_ENFORCE_EQ( + ctx->GetInputDim("Bias").size(), 1UL, + "ShapeError: the Input(Bias) of Op(group_norm) should be 1-D Tensor. " + "But received: %u-D Tensor, the shape of Input(Bias) is [%s].", + ctx->GetInputDim("Bias").size(), ctx->GetInputDim("Bias")); + PADDLE_ENFORCE_EQ( + ctx->GetInputDim("Bias")[0], channel_num, + "ShapeError: the Input(Bias)'s first dimension size of " + "Op(group_norm) must be equal to the number of channels. " + "But received: the Input(Bias)'s first dimension size is [%s], the " + "channels is [%s], the Attr(data_layout) is [%s]. The error may come " + "from wrong data_layout setting.", + ctx->GetInputDim("Bias")[0], channel_num, data_layout_str); } ctx->SetOutputDim("Y", ctx->GetInputDim("X")); diff --git a/python/paddle/fluid/layers/nn.py b/python/paddle/fluid/layers/nn.py index e5691cd8c659bcc7b703a7a24c65fd7a422a060f..5757a9af113a3ba391be15a3588420900bdeafcb 100644 --- a/python/paddle/fluid/layers/nn.py +++ b/python/paddle/fluid/layers/nn.py @@ -1213,7 +1213,8 @@ def conv2d(input, name(str|None): For detailed information, please refer to :ref:`api_guide_Name`. Usually name is no need to set and None by default. - data_format (str): The data format of the input and output data. An optional string from: `"NCHW"`, `"NHWC"`. + data_format (str, optional): Specify the data format of the input, and the data format of the output + will be consistent with that of the input. An optional string from: `"NCHW"`, `"NHWC"`. The default is `"NCHW"`. When it is `"NCHW"`, the data is stored in the order of: `[batch_size, input_channels, input_height, input_width]`. @@ -1223,6 +1224,19 @@ def conv2d(input, result, and if act is not None, the tensor variable storing convolution and non-linearity activation result. + Raises: + ValueError: If the type of `use_cudnn` is not bool. + ValueError: If `data_format` is not "NCHW" or "NHWC". + ValueError: If the channel dimmention of the input is less than or equal to zero. + ValueError: If `padding` is a string, but not "SAME" or "VALID". + ValueError: If `padding` is a tuple, but the element corresponding to the input's batch size is not 0 + or the element corresponding to the input's channel is not 0. + ShapeError: If the input is not 4-D Tensor. + ShapeError: If the input's dimension size and filter's dimension size not equal. + ShapeError: If the dimension size of input minus the size of `stride` is not 2. + ShapeError: If the number of input channels is not equal to filter's channels * groups. + ShapeError: If the number of output channels is not be divided by groups. + Examples: .. code-block:: python @@ -1467,9 +1481,10 @@ def conv3d(input, name(str|None): For detailed information, please refer to :ref:`api_guide_Name`. Usually name is no need to set and None by default. - data_format (str): The data format of the input and output data. An optional string from: `"NCDHW"`, `"NDHWC"`. - The default is `"NCDHW"`. When it is `"NCDHW"`, the data is stored in the order of: - `[batch_size, input_channels, input_depth, input_height, input_width]`. + data_format (str, optional): Specify the data format of the input, and the data format of the output + will be consistent with that of the input. An optional string from: `"NCHW"`, `"NHWC"`. + The default is `"NCHW"`. When it is `"NCHW"`, the data is stored in the order of: + `[batch_size, input_channels, input_height, input_width]`. Returns: A Variable holding Tensor representing the conv3d, whose data type is @@ -1477,6 +1492,19 @@ def conv3d(input, convolution result, and if act is not None, the tensor variable storing convolution and non-linearity activation result. + Raises: + ValueError: If the type of `use_cudnn` is not bool. + ValueError: If `data_format` is not "NCDHW" or "NDHWC". + ValueError: If the channel dimmention of the input is less than or equal to zero. + ValueError: If `padding` is a string, but not "SAME" or "VALID". + ValueError: If `padding` is a tuple, but the element corresponding to the input's batch size is not 0 + or the element corresponding to the input's channel is not 0. + ShapeError: If the input is not 5-D Tensor. + ShapeError: If the input's dimension size and filter's dimension size not equal. + ShapeError: If the dimension size of input minus the size of `stride` is not 2. + ShapeError: If the number of input channels is not equal to filter's channels * groups. + ShapeError: If the number of output channels is not be divided by groups. + Examples: .. code-block:: python @@ -2426,7 +2454,10 @@ def batch_norm(input, will create ParamAttr as bias_attr, the name of bias can be set in ParamAttr. If the Initializer of the bias_attr is not set, the bias is initialized zero. Default: None. - data_layout(str, default NCHW): the data_layout of input, is NCHW or NHWC. + data_layout (str, optional): Specify the data format of the input, and the data format of the output + will be consistent with that of the input. An optional string from: `"NCHW"`, `"NHWC"`. + The default is `"NCHW"`. When it is `"NCHW"`, the data is stored in the order of: + `[batch_size, input_channels, input_height, input_width]`. in_place(bool, Default False): Make the input and output of batch norm reuse memory. name(str|None): For detailed information, please refer to :ref:`api_guide_Name`. Usually name is no need to set and None by default. @@ -2700,7 +2731,10 @@ def data_norm(input, act(string, Default None): Activation type, linear|relu|prelu|... epsilon(float, Default 1e-05): param_attr(ParamAttr): The parameter attribute for Parameter `scale`. - data_layout(string, default NCHW): NCHW|NHWC + data_layout (str, optional): Specify the data format of the input, and the data format of the output + will be consistent with that of the input. An optional string from: `"NCHW"`, `"NHWC"`. + The default is `"NCHW"`. When it is `"NCHW"`, the data is stored in the order of: + `[batch_size, input_channels, input_height, input_width]`. in_place(bool, Default False): Make the input and output of batch norm reuse memory. name(string, Default None): A name for this layer(optional). If set None, the layer will be named automatically. @@ -2944,9 +2978,10 @@ def group_norm(input, Default: None, the default bias parameter attribute is used. For more information, please refer to :ref:`api_guide_ParamAttr` . act(str, optional): Activation to be applied to the output of group normalizaiton. - data_layout(str, optional): The data format of the input and output data. An optional string - from: `"NCHW"`, `"NHWC"`. When it is `"NCHW"`, the data is stored in the order of: - `[batch_size, channels, height, width]`. Default: "NCHW". + data_layout(str, optional): Specify the data format of the input, and the data format of the output + will be consistent with that of the input. An optional string from: `"NCHW"`, `"NHWC"`. + The default is `"NCHW"`. When it is `"NCHW"`, the data is stored in the order of: + `[batch_size, input_channels, input_height, input_width]`. name (str, optional): The default value is None. Normally there is no need for user to set this property. For more information, please refer to :ref:`api_guide_Name` . @@ -2955,6 +2990,12 @@ def group_norm(input, Raises: ValueError: If `data_layout` is neither 'NCHW' nor 'NHWC'. + ValueError: If `groups` is greater than the number of input channels. + ValueError: If `groups` is less than 1. + ShapeError: If the param_attr(Scale) is not 1-D Tensor. + ShapeError: If the param_attr(Scale)'s first dimension size is not equal to the input channels. + ShapeError: If the bias_attr(Bias) is not 1-D Tensor. + ShapeError: If the bias_attr(Bias)'s first dimension size is not equal to the input channels. Examples: .. code-block:: python @@ -3240,9 +3281,10 @@ def conv2d_transpose(input, name(str, optional): For detailed information, please refer to :ref:`api_guide_Name`. Usually name is no need to set and None by default. - data_format(str, optional): The data format of the input and output data. An optional string - from: `"NCHW"`, `"NHWC"`. When it is `"NCHW"`, the data is stored in the order of: - `[batch_size, input_channels, input_height, input_width]`. Default: 'NCHW'. + data_format (str, optional): Specify the data format of the input, and the data format of the output + will be consistent with that of the input. An optional string from: `"NCHW"`, `"NHWC"`. + The default is `"NCHW"`. When it is `"NCHW"`, the data is stored in the order of: + `[batch_size, input_channels, input_height, input_width]`. Returns: A Variable holding Tensor representing the conv2d_transpose, whose @@ -3253,8 +3295,17 @@ def conv2d_transpose(input, result. Raises: - ValueError: If the shapes of output, input, filter_size, stride, padding and - groups mismatch. + ValueError: If the type of `use_cudnn` is not bool. + ValueError: If `data_format` is not "NCHW" or "NHWC". + ValueError: If `padding` is a string, but not "SAME" or "VALID". + ValueError: If `padding` is a tuple, but the element corresponding to the input's batch size is not 0 + or the element corresponding to the input's channel is not 0. + ValueError: If `output_size` and filter_size are None at the same time. + ShapeError: If the input is not 4-D Tensor. + ShapeError: If the input's dimension size and filter's dimension size not equal. + ShapeError: If the dimension size of input minus the size of `stride` is not 2. + ShapeError: If the number of input channels is not equal to filter's channels. + ShapeError: If the size of `output_size` is not equal to that of `stride`. Examples: .. code-block:: python @@ -3519,9 +3570,10 @@ def conv3d_transpose(input, name(str, optional): For detailed information, please refer to :ref:`api_guide_Name`. Usually name is no need to set and None by default. - data_format(str, optional):The data format of the input and output data. An optional string from: `"NCHW"`, `"NHWC"`. - When it is `"NCHW"`, the data is stored in the order of: `[batch_size, input_channels, input_height, input_width]`. - Default: 'NCDHW'. + data_format (str, optional): Specify the data format of the input, and the data format of the output + will be consistent with that of the input. An optional string from: `"NCHW"`, `"NHWC"`. + The default is `"NCHW"`. When it is `"NCHW"`, the data is stored in the order of: + `[batch_size, input_channels, input_height, input_width]`. Returns: A Variable holding Tensor representing the conv3d_transpose, whose data @@ -3531,8 +3583,17 @@ def conv3d_transpose(input, variable storing transposed convolution and non-linearity activation result. Raises: - ValueError: If the shapes of output, input, filter_size, stride, padding and - groups mismatch. + ValueError: If the type of `use_cudnn` is not bool. + ValueError: If `data_format` is not "NCDHW" or "NDHWC". + ValueError: If `padding` is a string, but not "SAME" or "VALID". + ValueError: If `padding` is a tuple, but the element corresponding to the input's batch size is not 0 + or the element corresponding to the input's channel is not 0. + ValueError: If `output_size` and filter_size are None at the same time. + ShapeError: If the input is not 5-D Tensor. + ShapeError: If the input's dimension size and filter's dimension size not equal. + ShapeError: If the dimension size of input minus the size of `stride` is not 2. + ShapeError: If the number of input channels is not equal to filter's channels. + ShapeError: If the size of `output_size` is not equal to that of `stride`. Examples: .. code-block:: python @@ -5742,9 +5803,11 @@ def lrn(input, n=5, k=1.0, alpha=1e-4, beta=0.75, name=None, beta (float, optional): The exponent, positive. Default:0.75 name (str, optional): The default value is None. Normally there is no need for user to set this property. For more information, please refer to :ref:`api_guide_Name` - data_format(str, optional): The data format of the input and output data. An optional string - from: `"NCHW"`, `"NHWC"`. When it is `"NCHW"`, the data is stored in the order of: - `[batch_size, input_channels, input_height, input_width]`. Default: 'NCHW'. + data_format (str, optional): Specify the data format of the input, and the data format of the output + will be consistent with that of the input. An optional string from: `"NCHW"`, `"NHWC"`. + The default is `"NCHW"`. When it is `"NCHW"`, the data is stored in the order of: + `[batch_size, input_channels, input_height, input_width]`. + Returns: Variable: A tensor variable storing the transformation result with the same shape and data type as input. @@ -6376,11 +6439,11 @@ def image_resize(input, align_mode(int) : An optional for bilinear interpolation. can be \'0\' for src_idx = scale*(dst_indx+0.5)-0.5 , can be \'1\' for src_idx = scale*dst_index. - data_format(str, optional): NCHW(num_batches, channels, height, width) or - NHWC(num_batches, height, width, channels) for 4-D Tensor, - NCDHW(num_batches, channels, depth, height, width) or - NDHWC(num_batches, depth, height, width, channels) for 5-D Tensor. - Default: 'NCHW'. + data_format (str, optional): Specify the data format of the input, and the data format of the output + will be consistent with that of the input. An optional string from: `"NCHW"`, `"NHWC"`, `"NCDHW"`, + `"NDHWC"`. The default is `"NCHW"`. When it is `"NCHW"`, the data is stored in the order of: + `[batch_size, input_channels, input_height, input_width]`. When it is `"NCHW"`, the data is stored + in the order of: `[batch_size, input_channels, input_depth, input_height, input_width]`. Returns: A 4-D Tensor of the shape (num_batches, channels, out_h, out_w) or (num_batches, out_h, out_w, channels), @@ -6696,8 +6759,10 @@ def resize_bilinear(input, Default: None align_corners(bool): ${align_corners_comment} align_mode(bool): ${align_mode_comment} - data_format(str, optional): NCHW(num_batches, channels, height, width) or - NHWC(num_batches, height, width, channels). Default: 'NCHW'. + data_format (str, optional): Specify the data format of the input, and the data format of the output + will be consistent with that of the input. An optional string from: `"NCHW"`, `"NHWC"`. + The default is `"NCHW"`. When it is `"NCHW"`, the data is stored in the order of: + `[batch_size, input_channels, input_height, input_width]`. name(str, optional): The default value is None. Normally there is no need for user to set this property. For more information, please refer to :ref:`api_guide_Name` Returns: @@ -6858,9 +6923,10 @@ def resize_trilinear(input, Default: None align_corners(bool): ${align_corners_comment} align_mode(bool): ${align_mode_comment} - data_format(str, optional): NCDHW(num_batches, channels, depth, height, width) or - NDHWC(num_batches, depth, height, width, channels). - Default: 'NCDHW'. + data_format (str, optional): Specify the data format of the input, and the data format of the output + will be consistent with that of the input. An optional string from: `"NCDHW"`, `"NDHWC"`. + The default is `"NCDHW"`. When it is `"NCDHW"`, the data is stored in the order of: + `[batch_size, input_channels, input_depth, input_height, input_width]`. Returns: Variable: A 5-D Tensor(NCDHW or NDHWC) @@ -7010,9 +7076,10 @@ def resize_nearest(input, errors would be occured in graph constructing stage. Default: None align_corners(bool): ${align_corners_comment} - data_format(str, optional): NCHW(num_batches, channels, height, width) or - NHWC(num_batches, height, width, channels). - Default: 'NCHW'. + data_format (str, optional): Specify the data format of the input, and the data format of the output + will be consistent with that of the input. An optional string from: `"NCHW"`, `"NHWC"`. + The default is `"NCHW"`. When it is `"NCHW"`, the data is stored in the order of: + `[batch_size, input_channels, input_height, input_width]`. Returns: Variable: 4-D tensor(NCHW or NHWC). @@ -11104,6 +11171,7 @@ def maxout(x, groups, name=None, axis=1): Raises: ValueError: If `axis` is not 1, -1 or 3. + ValueError: If the number of input channels can not be divisible by `groups`. Examples: .. code-block:: python @@ -11264,8 +11332,11 @@ def affine_channel(x, bias (Variable): 1D input of shape (C), the c-th element is the bias of the affine transformation for the c-th channel of the input. The data type is float32 or float64. - data_layout (str, default NCHW): NCHW or NHWC. If input is 2D - tensor, you can ignore data_layout. + data_layout (str, optional): Specify the data format of the input, and the data format of the output + will be consistent with that of the input. An optional string from: `"NCHW"`, `"NHWC"`. + The default is `"NCHW"`. When it is `"NCHW"`, the data is stored in the order of: + `[batch_size, input_channels, input_height, input_width]`. If input is 2D Tensor, you can ignore + data_layout. name (str, default None): The name of this layer. For more information, please refer to :ref:`api_guide_Name` . act (str, default None): Activation to be applied to the output of this layer.