From 9cbe7bccba0f932c7fdf54c5dce5299dc24798a3 Mon Sep 17 00:00:00 2001 From: Zhang Ting <709968123@qq.com> Date: Mon, 18 Nov 2019 14:36:20 +0800 Subject: [PATCH] modified error message and API doc for channel_last supported Op (#21002) * modified error message for conv and conv_transpose, test=develop * modified doc of conv and conv_transpose op, test=develop * modified the expression for error message, test=develop * modified error message for group_norm op, test=develop * modified detail of Attr(data_format) or Attr(data_layout) * add ValueError in API doc for maxout op, test=develop --- paddle/fluid/operators/conv_op.cc | 51 +++---- paddle/fluid/operators/conv_transpose_op.cc | 44 ++++-- paddle/fluid/operators/group_norm_op.cc | 49 +++++-- python/paddle/fluid/layers/nn.py | 145 +++++++++++++++----- 4 files changed, 206 insertions(+), 83 deletions(-) diff --git a/paddle/fluid/operators/conv_op.cc b/paddle/fluid/operators/conv_op.cc index c8e45f9ec4..8a85ad8385 100644 --- a/paddle/fluid/operators/conv_op.cc +++ b/paddle/fluid/operators/conv_op.cc @@ -50,30 +50,28 @@ void ConvOp::InferShape(framework::InferShapeContext* ctx) const { const std::string data_format = ctx->Attrs().Get("data_format"); const bool channel_last = (data_format == "NHWC" || data_format == "NDHWC"); - PADDLE_ENFORCE_EQ(in_dims.size() == 4 || in_dims.size() == 5, true, - "ShapeError: Conv input should be 4-D or 5-D tensor. But " - "received: %u-D Tensor," - "the shape of Conv input is [%s]", - in_dims.size(), in_dims); + PADDLE_ENFORCE_EQ( + in_dims.size() == 4 || in_dims.size() == 5, true, + "ShapeError: the input of Op(conv) should be 4-D or 5-D Tensor. But " + "received: %u-D Tensor, the shape of input is [%s].", + in_dims.size(), in_dims); PADDLE_ENFORCE_EQ( in_dims.size(), filter_dims.size(), - "ShapeError: Conv input dimension and filter dimension should be the " - "equal." - "But received: the shape of Conv input is [%s], input dimension of Conv " - "input is [%d]," - "the shape of filter is [%s], the filter dimension of Conv is [%d]", + "ShapeError: the input's dimension size and filter's dimension size of " + "Op(conv) should be equal. But received: the shape of input is [%s], " + "the dimension size of input is [%d], the shape of filter is [%s], " + "the dimension size of filter is [%d].", in_dims, in_dims.size(), filter_dims, filter_dims.size()); int in_sub_stride_size = in_dims.size() - strides.size(); PADDLE_ENFORCE_EQ(in_dims.size() - strides.size() == 2U, true, - "ShapeError: the dimension of input minus the dimension of " - "stride must be euqal to 2." - "But received: the dimension of input minus the dimension " - "of stride is [%d], the" - "input dimension of Conv is [%d], the shape of Conv input " - "is [%s], the stride" - "dimension of Conv is [%d]", + "ShapeError: the dimension size of input minus the size of " + "Attr(stride) must be euqal to 2 for Op(conv)." + "But received: the dimension size of input minus the size " + "of Attr(stride) is [%d], the " + "input's dimension size is [%d], the shape of input " + "is [%s], the Attr(stride)'s size is [%d].", in_sub_stride_size, in_dims.size(), in_dims, strides.size()); @@ -83,16 +81,19 @@ void ConvOp::InferShape(framework::InferShapeContext* ctx) const { PADDLE_ENFORCE_EQ( input_channels, filter_dims[1] * groups, "ShapeError: The number of input channels should be equal to filter " - "channels * groups. But received: the input channels is [%d], the shape" - "of input is [%s], the filter channel is [%d], the shape of filter is " - "[%s]," - "the groups is [%d]", - in_dims[1], in_dims, filter_dims[1], filter_dims, groups); + "channels * groups for Op(conv). But received: the input's channels is " + "[%d], the shape " + "of input is [%s], the filter's channel is [%d], the shape of filter is " + "[%s], the groups is [%d], the data_format is %s. The error may come " + "from wrong data_format setting.", + input_channels, in_dims, filter_dims[1], filter_dims, groups, + data_format); PADDLE_ENFORCE_EQ( filter_dims[0] % groups, 0, - "ShapeError: The number of output channels should be divided by groups." - "But received: the output channels is [%d], the shape of filter is [%s]" - "(the first dimension of filter is output channel), the groups is [%d]", + "ShapeError: The number of output channels of Op(conv) should be divided " + "by groups. " + "But received: the output channels is [%d], the shape of filter is [%s] " + "(the first dimension of filter is output channel), the groups is [%d].", filter_dims[0], filter_dims, groups); framework::DDim in_data_dims; diff --git a/paddle/fluid/operators/conv_transpose_op.cc b/paddle/fluid/operators/conv_transpose_op.cc index aead007367..930d487354 100644 --- a/paddle/fluid/operators/conv_transpose_op.cc +++ b/paddle/fluid/operators/conv_transpose_op.cc @@ -46,30 +46,48 @@ void ConvTransposeOp::InferShape(framework::InferShapeContext* ctx) const { int groups = ctx->Attrs().Get("groups"); std::string padding_algorithm = ctx->Attrs().Get("padding_algorithm"); - const DataLayout data_layout = framework::StringToDataLayout( - ctx->Attrs().Get("data_format")); + const std::string data_layout_str = + ctx->Attrs().Get("data_format"); + const framework::DataLayout data_layout = + framework::StringToDataLayout(data_layout_str); PADDLE_ENFORCE_EQ(in_dims.size() == 4 || in_dims.size() == 5, true, - "ConvTransposeOp intput should be 4-D or 5-D tensor."); - PADDLE_ENFORCE_EQ(in_dims.size(), filter_dims.size(), - "ConvTransposeOp input dimension and filter dimension " - "should be the same."); + "ShapeError: input of Op(conv_transpose) should be 4-D or " + "5-D Tensor. But received: %u-D Tensor, " + "the shape of input is [%s]", + in_dims.size(), in_dims); + PADDLE_ENFORCE_EQ( + in_dims.size(), filter_dims.size(), + "ShapeError: the input's dimension size and filter's dimension size of " + "Op (conv_transpose) should be equal. But received: the shape of input " + "is [%s], the dimension size of input is [%d], the shape of filter is " + "[%s], the dimension size of filter is [%d]. ", + in_dims, in_dims.size(), filter_dims, filter_dims.size()); + int in_sub_stride_size = in_dims.size() - strides.size(); PADDLE_ENFORCE_EQ( in_dims.size() - strides.size(), 2U, - "ConvTransposeOp input dimension and strides dimension should " - "be consistent."); + "ShapeError: the input's dimension size minus Attr(stride)'s size must " + "be euqal to 2 for Op(conv_transpose). But received: [%d], the " + "input's dimension size is [%d], the shape of input " + "is [%s], the Attr(stride)'s size is [%d].", + in_sub_stride_size, in_dims.size(), in_dims, strides.size()); if (output_size.size()) - PADDLE_ENFORCE_EQ(output_size.size(), strides.size(), - "ConvTransposeOp output_size dimension and strides " - "dimension should be the same."); + PADDLE_ENFORCE_EQ( + output_size.size(), strides.size(), + "The Attr(output_size) and Attr(stride) of Op(conv_transpose) " + "should be the same."); const int64_t C = (data_layout != DataLayout::kNHWC ? in_dims[1] : in_dims[in_dims.size() - 1]); PADDLE_ENFORCE_EQ( C, filter_dims[0], - "The number of input channels of Op(ConvTransposeOp) should " - "be equal to the number of filter's channels."); + "ShapeError: The number of input channels should be equal to filter " + "channels for Op(conv_transpose). But received: the input's channels is " + "[%d], the shape of input is [%s], the filter's channels is [%d], the " + "shape of filter is [%s]. The data_format is %s." + "The error may come from wrong data_format setting.", + C, in_dims, filter_dims[0], filter_dims, data_layout_str); framework::DDim in_data_dims; if (data_layout != DataLayout::kNHWC) { diff --git a/paddle/fluid/operators/group_norm_op.cc b/paddle/fluid/operators/group_norm_op.cc index 7cb85d636f..1ab6fbafd7 100644 --- a/paddle/fluid/operators/group_norm_op.cc +++ b/paddle/fluid/operators/group_norm_op.cc @@ -39,24 +39,57 @@ class GroupNormOp : public framework::OperatorWithKernel { PADDLE_ENFORCE(ctx->HasOutput("Variance"), "Output(Variance) of GroupNormOp should not be null."); auto x_dim = ctx->GetInputDim("X"); - const DataLayout data_layout = framework::StringToDataLayout( - ctx->Attrs().Get("data_layout")); + const std::string data_layout_str = + ctx->Attrs().Get("data_layout"); + const framework::DataLayout data_layout = + framework::StringToDataLayout(data_layout_str); const int64_t channel_num = (data_layout == DataLayout::kNCHW ? x_dim[1] : x_dim[x_dim.size() - 1]); auto batch_size = x_dim[0]; auto groups = ctx->Attrs().Get("groups"); PADDLE_ENFORCE_LE( groups, channel_num, - "'groups' must be less equal than the number of channels."); - PADDLE_ENFORCE_GE(groups, 1, "'groups' must be greater equal than 1."); + "ValueError: the Attr(groups) of Op(group_norm) must be less than or " + "equal to the number of channels. " + "But received: groups is [%s], channels is [%s], the Attr(data_layout) " + "is [%s]. The error may come from wrong data_layout setting.", + groups, channel_num, data_layout_str); + PADDLE_ENFORCE_GE( + groups, 1, + "ValueError: the Attr(groups) of Op(group_norm) must be " + "greater than or equal to 1. But received: groups is [%s].", + groups); if (ctx->HasInput("Scale")) { - PADDLE_ENFORCE_EQ(ctx->GetInputDim("Scale").size(), 1UL); - PADDLE_ENFORCE_EQ(ctx->GetInputDim("Scale")[0], channel_num); + PADDLE_ENFORCE_EQ( + ctx->GetInputDim("Scale").size(), 1UL, + "ShapeError: the Input(Scale) of Op(group_norm) should be 1-D " + "Tensor. " + "But received: %u-D Tensor, the shape of Input(Scale) is [%s].", + ctx->GetInputDim("Scale").size(), ctx->GetInputDim("Scale")); + PADDLE_ENFORCE_EQ( + ctx->GetInputDim("Scale")[0], channel_num, + "ShapeError: the Input(Scale)'s first dimension size of " + "Op(group_norm) must be equal to the number of channels. " + "But received: the Input(Scale)'s first dimension size is [%s], the " + "channels is [%s], the Attr(data_layout) is [%s]. The error may come " + "from wrong data_layout setting.", + ctx->GetInputDim("Scale")[0], channel_num, data_layout_str); } if (ctx->HasInput("Bias")) { - PADDLE_ENFORCE_EQ(ctx->GetInputDim("Bias").size(), 1UL); - PADDLE_ENFORCE_EQ(ctx->GetInputDim("Bias")[0], channel_num); + PADDLE_ENFORCE_EQ( + ctx->GetInputDim("Bias").size(), 1UL, + "ShapeError: the Input(Bias) of Op(group_norm) should be 1-D Tensor. " + "But received: %u-D Tensor, the shape of Input(Bias) is [%s].", + ctx->GetInputDim("Bias").size(), ctx->GetInputDim("Bias")); + PADDLE_ENFORCE_EQ( + ctx->GetInputDim("Bias")[0], channel_num, + "ShapeError: the Input(Bias)'s first dimension size of " + "Op(group_norm) must be equal to the number of channels. " + "But received: the Input(Bias)'s first dimension size is [%s], the " + "channels is [%s], the Attr(data_layout) is [%s]. The error may come " + "from wrong data_layout setting.", + ctx->GetInputDim("Bias")[0], channel_num, data_layout_str); } ctx->SetOutputDim("Y", ctx->GetInputDim("X")); diff --git a/python/paddle/fluid/layers/nn.py b/python/paddle/fluid/layers/nn.py index e5691cd8c6..5757a9af11 100644 --- a/python/paddle/fluid/layers/nn.py +++ b/python/paddle/fluid/layers/nn.py @@ -1213,7 +1213,8 @@ def conv2d(input, name(str|None): For detailed information, please refer to :ref:`api_guide_Name`. Usually name is no need to set and None by default. - data_format (str): The data format of the input and output data. An optional string from: `"NCHW"`, `"NHWC"`. + data_format (str, optional): Specify the data format of the input, and the data format of the output + will be consistent with that of the input. An optional string from: `"NCHW"`, `"NHWC"`. The default is `"NCHW"`. When it is `"NCHW"`, the data is stored in the order of: `[batch_size, input_channels, input_height, input_width]`. @@ -1223,6 +1224,19 @@ def conv2d(input, result, and if act is not None, the tensor variable storing convolution and non-linearity activation result. + Raises: + ValueError: If the type of `use_cudnn` is not bool. + ValueError: If `data_format` is not "NCHW" or "NHWC". + ValueError: If the channel dimmention of the input is less than or equal to zero. + ValueError: If `padding` is a string, but not "SAME" or "VALID". + ValueError: If `padding` is a tuple, but the element corresponding to the input's batch size is not 0 + or the element corresponding to the input's channel is not 0. + ShapeError: If the input is not 4-D Tensor. + ShapeError: If the input's dimension size and filter's dimension size not equal. + ShapeError: If the dimension size of input minus the size of `stride` is not 2. + ShapeError: If the number of input channels is not equal to filter's channels * groups. + ShapeError: If the number of output channels is not be divided by groups. + Examples: .. code-block:: python @@ -1467,9 +1481,10 @@ def conv3d(input, name(str|None): For detailed information, please refer to :ref:`api_guide_Name`. Usually name is no need to set and None by default. - data_format (str): The data format of the input and output data. An optional string from: `"NCDHW"`, `"NDHWC"`. - The default is `"NCDHW"`. When it is `"NCDHW"`, the data is stored in the order of: - `[batch_size, input_channels, input_depth, input_height, input_width]`. + data_format (str, optional): Specify the data format of the input, and the data format of the output + will be consistent with that of the input. An optional string from: `"NCHW"`, `"NHWC"`. + The default is `"NCHW"`. When it is `"NCHW"`, the data is stored in the order of: + `[batch_size, input_channels, input_height, input_width]`. Returns: A Variable holding Tensor representing the conv3d, whose data type is @@ -1477,6 +1492,19 @@ def conv3d(input, convolution result, and if act is not None, the tensor variable storing convolution and non-linearity activation result. + Raises: + ValueError: If the type of `use_cudnn` is not bool. + ValueError: If `data_format` is not "NCDHW" or "NDHWC". + ValueError: If the channel dimmention of the input is less than or equal to zero. + ValueError: If `padding` is a string, but not "SAME" or "VALID". + ValueError: If `padding` is a tuple, but the element corresponding to the input's batch size is not 0 + or the element corresponding to the input's channel is not 0. + ShapeError: If the input is not 5-D Tensor. + ShapeError: If the input's dimension size and filter's dimension size not equal. + ShapeError: If the dimension size of input minus the size of `stride` is not 2. + ShapeError: If the number of input channels is not equal to filter's channels * groups. + ShapeError: If the number of output channels is not be divided by groups. + Examples: .. code-block:: python @@ -2426,7 +2454,10 @@ def batch_norm(input, will create ParamAttr as bias_attr, the name of bias can be set in ParamAttr. If the Initializer of the bias_attr is not set, the bias is initialized zero. Default: None. - data_layout(str, default NCHW): the data_layout of input, is NCHW or NHWC. + data_layout (str, optional): Specify the data format of the input, and the data format of the output + will be consistent with that of the input. An optional string from: `"NCHW"`, `"NHWC"`. + The default is `"NCHW"`. When it is `"NCHW"`, the data is stored in the order of: + `[batch_size, input_channels, input_height, input_width]`. in_place(bool, Default False): Make the input and output of batch norm reuse memory. name(str|None): For detailed information, please refer to :ref:`api_guide_Name`. Usually name is no need to set and None by default. @@ -2700,7 +2731,10 @@ def data_norm(input, act(string, Default None): Activation type, linear|relu|prelu|... epsilon(float, Default 1e-05): param_attr(ParamAttr): The parameter attribute for Parameter `scale`. - data_layout(string, default NCHW): NCHW|NHWC + data_layout (str, optional): Specify the data format of the input, and the data format of the output + will be consistent with that of the input. An optional string from: `"NCHW"`, `"NHWC"`. + The default is `"NCHW"`. When it is `"NCHW"`, the data is stored in the order of: + `[batch_size, input_channels, input_height, input_width]`. in_place(bool, Default False): Make the input and output of batch norm reuse memory. name(string, Default None): A name for this layer(optional). If set None, the layer will be named automatically. @@ -2944,9 +2978,10 @@ def group_norm(input, Default: None, the default bias parameter attribute is used. For more information, please refer to :ref:`api_guide_ParamAttr` . act(str, optional): Activation to be applied to the output of group normalizaiton. - data_layout(str, optional): The data format of the input and output data. An optional string - from: `"NCHW"`, `"NHWC"`. When it is `"NCHW"`, the data is stored in the order of: - `[batch_size, channels, height, width]`. Default: "NCHW". + data_layout(str, optional): Specify the data format of the input, and the data format of the output + will be consistent with that of the input. An optional string from: `"NCHW"`, `"NHWC"`. + The default is `"NCHW"`. When it is `"NCHW"`, the data is stored in the order of: + `[batch_size, input_channels, input_height, input_width]`. name (str, optional): The default value is None. Normally there is no need for user to set this property. For more information, please refer to :ref:`api_guide_Name` . @@ -2955,6 +2990,12 @@ def group_norm(input, Raises: ValueError: If `data_layout` is neither 'NCHW' nor 'NHWC'. + ValueError: If `groups` is greater than the number of input channels. + ValueError: If `groups` is less than 1. + ShapeError: If the param_attr(Scale) is not 1-D Tensor. + ShapeError: If the param_attr(Scale)'s first dimension size is not equal to the input channels. + ShapeError: If the bias_attr(Bias) is not 1-D Tensor. + ShapeError: If the bias_attr(Bias)'s first dimension size is not equal to the input channels. Examples: .. code-block:: python @@ -3240,9 +3281,10 @@ def conv2d_transpose(input, name(str, optional): For detailed information, please refer to :ref:`api_guide_Name`. Usually name is no need to set and None by default. - data_format(str, optional): The data format of the input and output data. An optional string - from: `"NCHW"`, `"NHWC"`. When it is `"NCHW"`, the data is stored in the order of: - `[batch_size, input_channels, input_height, input_width]`. Default: 'NCHW'. + data_format (str, optional): Specify the data format of the input, and the data format of the output + will be consistent with that of the input. An optional string from: `"NCHW"`, `"NHWC"`. + The default is `"NCHW"`. When it is `"NCHW"`, the data is stored in the order of: + `[batch_size, input_channels, input_height, input_width]`. Returns: A Variable holding Tensor representing the conv2d_transpose, whose @@ -3253,8 +3295,17 @@ def conv2d_transpose(input, result. Raises: - ValueError: If the shapes of output, input, filter_size, stride, padding and - groups mismatch. + ValueError: If the type of `use_cudnn` is not bool. + ValueError: If `data_format` is not "NCHW" or "NHWC". + ValueError: If `padding` is a string, but not "SAME" or "VALID". + ValueError: If `padding` is a tuple, but the element corresponding to the input's batch size is not 0 + or the element corresponding to the input's channel is not 0. + ValueError: If `output_size` and filter_size are None at the same time. + ShapeError: If the input is not 4-D Tensor. + ShapeError: If the input's dimension size and filter's dimension size not equal. + ShapeError: If the dimension size of input minus the size of `stride` is not 2. + ShapeError: If the number of input channels is not equal to filter's channels. + ShapeError: If the size of `output_size` is not equal to that of `stride`. Examples: .. code-block:: python @@ -3519,9 +3570,10 @@ def conv3d_transpose(input, name(str, optional): For detailed information, please refer to :ref:`api_guide_Name`. Usually name is no need to set and None by default. - data_format(str, optional):The data format of the input and output data. An optional string from: `"NCHW"`, `"NHWC"`. - When it is `"NCHW"`, the data is stored in the order of: `[batch_size, input_channels, input_height, input_width]`. - Default: 'NCDHW'. + data_format (str, optional): Specify the data format of the input, and the data format of the output + will be consistent with that of the input. An optional string from: `"NCHW"`, `"NHWC"`. + The default is `"NCHW"`. When it is `"NCHW"`, the data is stored in the order of: + `[batch_size, input_channels, input_height, input_width]`. Returns: A Variable holding Tensor representing the conv3d_transpose, whose data @@ -3531,8 +3583,17 @@ def conv3d_transpose(input, variable storing transposed convolution and non-linearity activation result. Raises: - ValueError: If the shapes of output, input, filter_size, stride, padding and - groups mismatch. + ValueError: If the type of `use_cudnn` is not bool. + ValueError: If `data_format` is not "NCDHW" or "NDHWC". + ValueError: If `padding` is a string, but not "SAME" or "VALID". + ValueError: If `padding` is a tuple, but the element corresponding to the input's batch size is not 0 + or the element corresponding to the input's channel is not 0. + ValueError: If `output_size` and filter_size are None at the same time. + ShapeError: If the input is not 5-D Tensor. + ShapeError: If the input's dimension size and filter's dimension size not equal. + ShapeError: If the dimension size of input minus the size of `stride` is not 2. + ShapeError: If the number of input channels is not equal to filter's channels. + ShapeError: If the size of `output_size` is not equal to that of `stride`. Examples: .. code-block:: python @@ -5742,9 +5803,11 @@ def lrn(input, n=5, k=1.0, alpha=1e-4, beta=0.75, name=None, beta (float, optional): The exponent, positive. Default:0.75 name (str, optional): The default value is None. Normally there is no need for user to set this property. For more information, please refer to :ref:`api_guide_Name` - data_format(str, optional): The data format of the input and output data. An optional string - from: `"NCHW"`, `"NHWC"`. When it is `"NCHW"`, the data is stored in the order of: - `[batch_size, input_channels, input_height, input_width]`. Default: 'NCHW'. + data_format (str, optional): Specify the data format of the input, and the data format of the output + will be consistent with that of the input. An optional string from: `"NCHW"`, `"NHWC"`. + The default is `"NCHW"`. When it is `"NCHW"`, the data is stored in the order of: + `[batch_size, input_channels, input_height, input_width]`. + Returns: Variable: A tensor variable storing the transformation result with the same shape and data type as input. @@ -6376,11 +6439,11 @@ def image_resize(input, align_mode(int) : An optional for bilinear interpolation. can be \'0\' for src_idx = scale*(dst_indx+0.5)-0.5 , can be \'1\' for src_idx = scale*dst_index. - data_format(str, optional): NCHW(num_batches, channels, height, width) or - NHWC(num_batches, height, width, channels) for 4-D Tensor, - NCDHW(num_batches, channels, depth, height, width) or - NDHWC(num_batches, depth, height, width, channels) for 5-D Tensor. - Default: 'NCHW'. + data_format (str, optional): Specify the data format of the input, and the data format of the output + will be consistent with that of the input. An optional string from: `"NCHW"`, `"NHWC"`, `"NCDHW"`, + `"NDHWC"`. The default is `"NCHW"`. When it is `"NCHW"`, the data is stored in the order of: + `[batch_size, input_channels, input_height, input_width]`. When it is `"NCHW"`, the data is stored + in the order of: `[batch_size, input_channels, input_depth, input_height, input_width]`. Returns: A 4-D Tensor of the shape (num_batches, channels, out_h, out_w) or (num_batches, out_h, out_w, channels), @@ -6696,8 +6759,10 @@ def resize_bilinear(input, Default: None align_corners(bool): ${align_corners_comment} align_mode(bool): ${align_mode_comment} - data_format(str, optional): NCHW(num_batches, channels, height, width) or - NHWC(num_batches, height, width, channels). Default: 'NCHW'. + data_format (str, optional): Specify the data format of the input, and the data format of the output + will be consistent with that of the input. An optional string from: `"NCHW"`, `"NHWC"`. + The default is `"NCHW"`. When it is `"NCHW"`, the data is stored in the order of: + `[batch_size, input_channels, input_height, input_width]`. name(str, optional): The default value is None. Normally there is no need for user to set this property. For more information, please refer to :ref:`api_guide_Name` Returns: @@ -6858,9 +6923,10 @@ def resize_trilinear(input, Default: None align_corners(bool): ${align_corners_comment} align_mode(bool): ${align_mode_comment} - data_format(str, optional): NCDHW(num_batches, channels, depth, height, width) or - NDHWC(num_batches, depth, height, width, channels). - Default: 'NCDHW'. + data_format (str, optional): Specify the data format of the input, and the data format of the output + will be consistent with that of the input. An optional string from: `"NCDHW"`, `"NDHWC"`. + The default is `"NCDHW"`. When it is `"NCDHW"`, the data is stored in the order of: + `[batch_size, input_channels, input_depth, input_height, input_width]`. Returns: Variable: A 5-D Tensor(NCDHW or NDHWC) @@ -7010,9 +7076,10 @@ def resize_nearest(input, errors would be occured in graph constructing stage. Default: None align_corners(bool): ${align_corners_comment} - data_format(str, optional): NCHW(num_batches, channels, height, width) or - NHWC(num_batches, height, width, channels). - Default: 'NCHW'. + data_format (str, optional): Specify the data format of the input, and the data format of the output + will be consistent with that of the input. An optional string from: `"NCHW"`, `"NHWC"`. + The default is `"NCHW"`. When it is `"NCHW"`, the data is stored in the order of: + `[batch_size, input_channels, input_height, input_width]`. Returns: Variable: 4-D tensor(NCHW or NHWC). @@ -11104,6 +11171,7 @@ def maxout(x, groups, name=None, axis=1): Raises: ValueError: If `axis` is not 1, -1 or 3. + ValueError: If the number of input channels can not be divisible by `groups`. Examples: .. code-block:: python @@ -11264,8 +11332,11 @@ def affine_channel(x, bias (Variable): 1D input of shape (C), the c-th element is the bias of the affine transformation for the c-th channel of the input. The data type is float32 or float64. - data_layout (str, default NCHW): NCHW or NHWC. If input is 2D - tensor, you can ignore data_layout. + data_layout (str, optional): Specify the data format of the input, and the data format of the output + will be consistent with that of the input. An optional string from: `"NCHW"`, `"NHWC"`. + The default is `"NCHW"`. When it is `"NCHW"`, the data is stored in the order of: + `[batch_size, input_channels, input_height, input_width]`. If input is 2D Tensor, you can ignore + data_layout. name (str, default None): The name of this layer. For more information, please refer to :ref:`api_guide_Name` . act (str, default None): Activation to be applied to the output of this layer. -- GitLab