From 3833b511a62460ee65594eded6951bf8b729cf9e Mon Sep 17 00:00:00 2001 From: Kaipeng Deng Date: Thu, 10 Oct 2019 16:48:38 +0800 Subject: [PATCH] refine en API doc (#20206) * refine en doc. test=develop. test=document_fix --- paddle/fluid/API.spec | 22 +- paddle/fluid/operators/kldiv_loss_op.cc | 9 +- paddle/fluid/operators/pool_op.cc | 68 ++--- paddle/fluid/operators/spectral_norm_op.cc | 7 +- paddle/fluid/operators/temporal_shift_op.cc | 6 +- .../fluid/layers/learning_rate_scheduler.py | 57 ++-- python/paddle/fluid/layers/nn.py | 253 +++++++++++++----- 7 files changed, 288 insertions(+), 134 deletions(-) diff --git a/paddle/fluid/API.spec b/paddle/fluid/API.spec index 3374a85e3fa..530f0a0a031 100644 --- a/paddle/fluid/API.spec +++ b/paddle/fluid/API.spec @@ -147,10 +147,10 @@ paddle.fluid.layers.conv3d (ArgSpec(args=['input', 'num_filters', 'filter_size', paddle.fluid.layers.sequence_pool (ArgSpec(args=['input', 'pool_type', 'is_test', 'pad_value'], varargs=None, keywords=None, defaults=(False, 0.0)), ('document', '5a709f7ef3fdb8fc819d09dc4fbada9a')) paddle.fluid.layers.sequence_softmax (ArgSpec(args=['input', 'use_cudnn', 'name'], varargs=None, keywords=None, defaults=(False, None)), ('document', 'eaa9d0bbd3d4e017c8bc4ecdac483711')) paddle.fluid.layers.softmax (ArgSpec(args=['input', 'use_cudnn', 'name', 'axis'], varargs=None, keywords=None, defaults=(False, None, -1)), ('document', '7ccaea1b93fe4f7387a6036692986c6b')) -paddle.fluid.layers.pool2d (ArgSpec(args=['input', 'pool_size', 'pool_type', 'pool_stride', 'pool_padding', 'global_pooling', 'use_cudnn', 'ceil_mode', 'name', 'exclusive', 'data_format'], varargs=None, keywords=None, defaults=(-1, 'max', 1, 0, False, True, False, None, True, 'NCHW')), ('document', '630cae697d46b4b575b15d56cf8be25a')) -paddle.fluid.layers.pool3d (ArgSpec(args=['input', 'pool_size', 'pool_type', 'pool_stride', 'pool_padding', 'global_pooling', 'use_cudnn', 'ceil_mode', 'name', 'exclusive', 'data_format'], varargs=None, keywords=None, defaults=(-1, 'max', 1, 0, False, True, False, None, True, 'NCDHW')), ('document', 'db0035a3132b1dfb12e53c57591fb9f6')) -paddle.fluid.layers.adaptive_pool2d (ArgSpec(args=['input', 'pool_size', 'pool_type', 'require_index', 'name'], varargs=None, keywords=None, defaults=('max', False, None)), ('document', '52343203de40afe29607397e13aaf0d2')) -paddle.fluid.layers.adaptive_pool3d (ArgSpec(args=['input', 'pool_size', 'pool_type', 'require_index', 'name'], varargs=None, keywords=None, defaults=('max', False, None)), ('document', '55db6ae7275fb9678a6814aebab81a9c')) +paddle.fluid.layers.pool2d (ArgSpec(args=['input', 'pool_size', 'pool_type', 'pool_stride', 'pool_padding', 'global_pooling', 'use_cudnn', 'ceil_mode', 'name', 'exclusive', 'data_format'], varargs=None, keywords=None, defaults=(-1, 'max', 1, 0, False, True, False, None, True, 'NCHW')), ('document', 'daf9ae55b2d54bd5f35acb397fd1e1b5')) +paddle.fluid.layers.pool3d (ArgSpec(args=['input', 'pool_size', 'pool_type', 'pool_stride', 'pool_padding', 'global_pooling', 'use_cudnn', 'ceil_mode', 'name', 'exclusive', 'data_format'], varargs=None, keywords=None, defaults=(-1, 'max', 1, 0, False, True, False, None, True, 'NCDHW')), ('document', 'df8edcb8dd020fdddf778c9f613dc650')) +paddle.fluid.layers.adaptive_pool2d (ArgSpec(args=['input', 'pool_size', 'pool_type', 'require_index', 'name'], varargs=None, keywords=None, defaults=('max', False, None)), ('document', 'd873fdd73bcd74f9203d347cfb90de75')) +paddle.fluid.layers.adaptive_pool3d (ArgSpec(args=['input', 'pool_size', 'pool_type', 'require_index', 'name'], varargs=None, keywords=None, defaults=('max', False, None)), ('document', 'a94ed07bf4828e318aaaedb8b037579a')) paddle.fluid.layers.batch_norm (ArgSpec(args=['input', 'act', 'is_test', 'momentum', 'epsilon', 'param_attr', 'bias_attr', 'data_layout', 'in_place', 'name', 'moving_mean_name', 'moving_variance_name', 'do_model_average_for_mean_and_var', 'fuse_with_relu', 'use_global_stats'], varargs=None, keywords=None, defaults=(None, False, 0.9, 1e-05, None, None, 'NCHW', False, None, None, None, False, False, False)), ('document', '1400433bae7876d0407ae205be39b7a1')) paddle.fluid.layers.instance_norm (ArgSpec(args=['input', 'epsilon', 'param_attr', 'bias_attr', 'name'], varargs=None, keywords=None, defaults=(1e-05, None, None, None)), ('document', '23d6fba8ad8495f67a66d8878be5b0be')) paddle.fluid.layers.data_norm (ArgSpec(args=['input', 'act', 'epsilon', 'param_attr', 'data_layout', 'in_place', 'name', 'moving_mean_name', 'moving_variance_name', 'do_model_average_for_mean_and_var'], varargs=None, keywords=None, defaults=(None, 1e-05, None, 'NCHW', False, None, None, None, False)), ('document', '5ba4cdb4ea5c03382da545335ffc05b7')) @@ -191,7 +191,7 @@ paddle.fluid.layers.row_conv (ArgSpec(args=['input', 'future_context_size', 'par paddle.fluid.layers.multiplex (ArgSpec(args=['inputs', 'index'], varargs=None, keywords=None, defaults=None), ('document', '2c4d1ae83da6ed35e3b36ba1b3b51d23')) paddle.fluid.layers.layer_norm (ArgSpec(args=['input', 'scale', 'shift', 'begin_norm_axis', 'epsilon', 'param_attr', 'bias_attr', 'act', 'name'], varargs=None, keywords=None, defaults=(True, True, 1, 1e-05, None, None, None, None)), ('document', '79797f827d89ae72c77960e9696883a9')) paddle.fluid.layers.group_norm (ArgSpec(args=['input', 'groups', 'epsilon', 'param_attr', 'bias_attr', 'act', 'data_layout', 'name'], varargs=None, keywords=None, defaults=(1e-05, None, None, None, 'NCHW', None)), ('document', '87dd4b818f102bc1a780e1804c28bd38')) -paddle.fluid.layers.spectral_norm (ArgSpec(args=['weight', 'dim', 'power_iters', 'eps', 'name'], varargs=None, keywords=None, defaults=(0, 1, 1e-12, None)), ('document', '9461e67095a6fc5d568fb2ce8fef66ff')) +paddle.fluid.layers.spectral_norm (ArgSpec(args=['weight', 'dim', 'power_iters', 'eps', 'name'], varargs=None, keywords=None, defaults=(0, 1, 1e-12, None)), ('document', '7b3d14d6707d878923847ec617d7d521')) paddle.fluid.layers.softmax_with_cross_entropy (ArgSpec(args=['logits', 'label', 'soft_label', 'ignore_index', 'numeric_stable_mode', 'return_softmax', 'axis'], varargs=None, keywords=None, defaults=(False, -100, True, False, -1)), ('document', '54e1675aa0364f4a78fa72804ec0f413')) paddle.fluid.layers.smooth_l1 (ArgSpec(args=['x', 'y', 'inside_weight', 'outside_weight', 'sigma'], varargs=None, keywords=None, defaults=(None, None, None)), ('document', 'cbe8940643ac80ef75e1abdfbdb09e88')) paddle.fluid.layers.one_hot (ArgSpec(args=['input', 'depth', 'allow_out_of_range'], varargs=None, keywords=None, defaults=(False,)), ('document', 'cdf5dc2078f1e20dc61dd0bec7e28a29')) @@ -284,7 +284,7 @@ paddle.fluid.layers.sequence_reverse (ArgSpec(args=['x', 'name'], varargs=None, paddle.fluid.layers.affine_channel (ArgSpec(args=['x', 'scale', 'bias', 'data_layout', 'name', 'act'], varargs=None, keywords=None, defaults=(None, None, 'NCHW', None, None)), ('document', 'ecc4b1323028bde0518d666882d03515')) paddle.fluid.layers.similarity_focus (ArgSpec(args=['input', 'axis', 'indexes', 'name'], varargs=None, keywords=None, defaults=(None,)), ('document', '18ec2e3afeb90e70c8b73d2b71c40fdb')) paddle.fluid.layers.hash (ArgSpec(args=['input', 'hash_size', 'num_hash', 'name'], varargs=None, keywords=None, defaults=(1, None)), ('document', 'a0b73c21be618cec0281e7903039e5e3')) -paddle.fluid.layers.grid_sampler (ArgSpec(args=['x', 'grid', 'name'], varargs=None, keywords=None, defaults=(None,)), ('document', '5d16663e096d7f04954c70ce1cc5e195')) +paddle.fluid.layers.grid_sampler (ArgSpec(args=['x', 'grid', 'name'], varargs=None, keywords=None, defaults=(None,)), ('document', '90c74742f48c70b103f1fbb9eb129066')) paddle.fluid.layers.log_loss (ArgSpec(args=['input', 'label', 'epsilon', 'name'], varargs=None, keywords=None, defaults=(0.0001, None)), ('document', 'e3993a477c94729526040ff65d95728e')) paddle.fluid.layers.add_position_encoding (ArgSpec(args=['input', 'alpha', 'beta', 'name'], varargs=None, keywords=None, defaults=(None,)), ('document', 'e399f9436fed5f7ff480d8532e42c937')) paddle.fluid.layers.bilinear_tensor_product (ArgSpec(args=['x', 'y', 'size', 'act', 'name', 'param_attr', 'bias_attr'], varargs=None, keywords=None, defaults=(None, None, None, None)), ('document', '45fc3652a8e1aeffbe4eba371c54f756')) @@ -292,13 +292,13 @@ paddle.fluid.layers.merge_selected_rows (ArgSpec(args=['x', 'name'], varargs=Non paddle.fluid.layers.get_tensor_from_selected_rows (ArgSpec(args=['x', 'name'], varargs=None, keywords=None, defaults=(None,)), ('document', '2c568321feb4d16c41a83df43f95089d')) paddle.fluid.layers.lstm (ArgSpec(args=['input', 'init_h', 'init_c', 'max_len', 'hidden_size', 'num_layers', 'dropout_prob', 'is_bidirec', 'is_test', 'name', 'default_initializer', 'seed'], varargs=None, keywords=None, defaults=(0.0, False, False, None, None, -1)), ('document', 'baa7327ed89df6b7bdd32f9ffdb62f63')) paddle.fluid.layers.shuffle_channel (ArgSpec(args=['x', 'group', 'name'], varargs=None, keywords=None, defaults=(None,)), ('document', '276a1213dd431228cefa33c3146df34a')) -paddle.fluid.layers.temporal_shift (ArgSpec(args=['x', 'seg_num', 'shift_ratio', 'name'], varargs=None, keywords=None, defaults=(0.25, None)), ('document', '13b1cdcb01f5ffdc26591ff9a2ec4669')) +paddle.fluid.layers.temporal_shift (ArgSpec(args=['x', 'seg_num', 'shift_ratio', 'name'], varargs=None, keywords=None, defaults=(0.25, None)), ('document', 'd5945431cdcae3cda21914db5bbf383e')) paddle.fluid.layers.py_func (ArgSpec(args=['func', 'x', 'out', 'backward_func', 'skip_vars_in_backward_input'], varargs=None, keywords=None, defaults=(None, None)), ('document', '8404e472ac12b4a30a505d3d3a3e5fdb')) paddle.fluid.layers.psroi_pool (ArgSpec(args=['input', 'rois', 'output_channels', 'spatial_scale', 'pooled_height', 'pooled_width', 'name'], varargs=None, keywords=None, defaults=(None,)), ('document', '42d5155374f69786300d90d751956998')) paddle.fluid.layers.prroi_pool (ArgSpec(args=['input', 'rois', 'output_channels', 'spatial_scale', 'pooled_height', 'pooled_width', 'name'], varargs=None, keywords=None, defaults=(1.0, 1, 1, None)), ('document', '454c7ea8c73313dd41513929d7526303')) paddle.fluid.layers.teacher_student_sigmoid_loss (ArgSpec(args=['input', 'label', 'soft_max_up_bound', 'soft_max_lower_bound'], varargs=None, keywords=None, defaults=(15.0, -15.0)), ('document', 'b0e07aa41caae04b07a8e8217cc96020')) paddle.fluid.layers.huber_loss (ArgSpec(args=['input', 'label', 'delta'], varargs=None, keywords=None, defaults=None), ('document', '9d93ee81f7a3e526d68bb280bc695d6c')) -paddle.fluid.layers.kldiv_loss (ArgSpec(args=['x', 'target', 'reduction', 'name'], varargs=None, keywords=None, defaults=('mean', None)), ('document', '18bc95c62d3300456c3c7da5278b47bb')) +paddle.fluid.layers.kldiv_loss (ArgSpec(args=['x', 'target', 'reduction', 'name'], varargs=None, keywords=None, defaults=('mean', None)), ('document', '45f3ebbcb766fca84cb2fe6307086573')) paddle.fluid.layers.npair_loss (ArgSpec(args=['anchor', 'positive', 'labels', 'l2_reg'], varargs=None, keywords=None, defaults=(0.002,)), ('document', '3828c4bd81c25af0ab955f52d453c587')) paddle.fluid.layers.pixel_shuffle (ArgSpec(args=['x', 'upscale_factor'], varargs=None, keywords=None, defaults=None), ('document', '7e5cac851fd9bad344230e1044b6a565')) paddle.fluid.layers.fsp_matrix (ArgSpec(args=['x', 'y'], varargs=None, keywords=None, defaults=None), ('document', '3a4eb7cce366f5fd8bc38b42b6af5ba1')) @@ -440,9 +440,9 @@ paddle.fluid.layers.box_decoder_and_assign (ArgSpec(args=['prior_box', 'prior_bo paddle.fluid.layers.collect_fpn_proposals (ArgSpec(args=['multi_rois', 'multi_scores', 'min_level', 'max_level', 'post_nms_top_n', 'name'], varargs=None, keywords=None, defaults=(None,)), ('document', 'ff4a651d65a9a9f9da71349ba6a2dc1f')) paddle.fluid.layers.accuracy (ArgSpec(args=['input', 'label', 'k', 'correct', 'total'], varargs=None, keywords=None, defaults=(1, None, None)), ('document', 'b691b7be425e281bd36897b514b2b064')) paddle.fluid.layers.auc (ArgSpec(args=['input', 'label', 'curve', 'num_thresholds', 'topk', 'slide_steps'], varargs=None, keywords=None, defaults=('ROC', 4095, 1, 1)), ('document', 'c36ac7125da977c2bd1b192bee301f75')) -paddle.fluid.layers.exponential_decay (ArgSpec(args=['learning_rate', 'decay_steps', 'decay_rate', 'staircase'], varargs=None, keywords=None, defaults=(False,)), ('document', 'eaf430c5a0380fb11bfe9a8922cd6295')) -paddle.fluid.layers.natural_exp_decay (ArgSpec(args=['learning_rate', 'decay_steps', 'decay_rate', 'staircase'], varargs=None, keywords=None, defaults=(False,)), ('document', 'aa3146f64d5d508e4e50687603aa7b15')) -paddle.fluid.layers.inverse_time_decay (ArgSpec(args=['learning_rate', 'decay_steps', 'decay_rate', 'staircase'], varargs=None, keywords=None, defaults=(False,)), ('document', 'ea37a3a8a0b3ce2254e7bc49a0951dbe')) +paddle.fluid.layers.exponential_decay (ArgSpec(args=['learning_rate', 'decay_steps', 'decay_rate', 'staircase'], varargs=None, keywords=None, defaults=(False,)), ('document', '48c7b2563a6fc11f23030cde8d7a5c80')) +paddle.fluid.layers.natural_exp_decay (ArgSpec(args=['learning_rate', 'decay_steps', 'decay_rate', 'staircase'], varargs=None, keywords=None, defaults=(False,)), ('document', '63edb712ab4ca837049f24a9421dfe30')) +paddle.fluid.layers.inverse_time_decay (ArgSpec(args=['learning_rate', 'decay_steps', 'decay_rate', 'staircase'], varargs=None, keywords=None, defaults=(False,)), ('document', 'ff553aa6546eeb1bc692fadb3df78370')) paddle.fluid.layers.polynomial_decay (ArgSpec(args=['learning_rate', 'decay_steps', 'end_learning_rate', 'power', 'cycle'], varargs=None, keywords=None, defaults=(0.0001, 1.0, False)), ('document', 'a343254c36c2e89512cd8cd8a1960ead')) paddle.fluid.layers.piecewise_decay (ArgSpec(args=['boundaries', 'values'], varargs=None, keywords=None, defaults=None), ('document', 'd9f654117542c6b702963dda107a247f')) paddle.fluid.layers.noam_decay (ArgSpec(args=['d_model', 'warmup_steps'], varargs=None, keywords=None, defaults=None), ('document', 'fd57228fb76195e66bbcc8d8e42c494d')) diff --git a/paddle/fluid/operators/kldiv_loss_op.cc b/paddle/fluid/operators/kldiv_loss_op.cc index a7c5d6305b0..983ab3dba6e 100644 --- a/paddle/fluid/operators/kldiv_loss_op.cc +++ b/paddle/fluid/operators/kldiv_loss_op.cc @@ -69,10 +69,12 @@ class KLDivLossOpMaker : public framework::OpProtoAndCheckerMaker { AddInput("X", "The input tensor of KL divergence loss operator. " "This is a tensor with shape of [N, *], where N is the " - "batch size, * means any number of additional dimensions."); + "batch size, * means any number of additional dimensions. " + "The data type is float32 or flaot64"); AddInput("Target", "The tensor of KL divergence loss operator. " - "This is a tensor with shape of Input(X)."); + "This is a tensor with shape of Input(X). " + "The data type is same as Input(X)"); AddOutput( "Loss", "The output KL divergence loss tensor. if Attr(reduction) is " @@ -90,7 +92,8 @@ class KLDivLossOpMaker : public framework::OpProtoAndCheckerMaker { AddComment(R"DOC( This operator calculates the Kullback-Leibler divergence loss - between Input(X) and Input(Target). + between Input(X) and Input(Target). Notes that Input(X) is the + log-probability and Input(Target) is the probability. KL divergence loss is calculated as follows: diff --git a/paddle/fluid/operators/pool_op.cc b/paddle/fluid/operators/pool_op.cc index 67093e6c6da..f19433115a7 100644 --- a/paddle/fluid/operators/pool_op.cc +++ b/paddle/fluid/operators/pool_op.cc @@ -200,8 +200,9 @@ void Pool2dOpMaker::Make() { // TypedAttrChecker don't support vector type.) AddAttr( "global_pooling", - "(bool, default false) Whether to use the global pooling. " - "If global_pooling = true, kernel size and paddings will be ignored.") + "(bool) Whether to use the global pooling. " + "If global_pooling = true, kernel size and paddings will be ignored. " + "Default False.") .SetDefault(false); AddAttr>("strides", "(vector, default {1, 1}), strides(height, " @@ -217,36 +218,38 @@ void Pool2dOpMaker::Make() { .SetDefault({0, 0}); AddAttr( "exclusive", - "(bool, default True) When true, will exclude the zero-padding in the " + "(bool) When true, will exclude the zero-padding in the " "averaging calculating, otherwise, include the zero-padding. Note, it " - "is only used when pooling_type is avg. The default is True.") + "is only used when pooling_type is avg. The default is True. " + "Default True.") .SetDefault(true); AddAttr( "adaptive", - "(bool, default False) When true, will perform adaptive pooling instead, " + "(bool) When true, will perform adaptive pooling instead, " "output shape in H and W dimensions will be same as ksize, input data " "will be divided into grids specify by ksize averagely and perform " - "pooling in each grid area to get output pooling value.") + "pooling in each grid area to get output pooling value. " + "Default False.") .SetDefault(false); AddAttr( "use_cudnn", - "(bool, default false) Only used in cudnn kernel, need install cudnn.") + "(bool) Only used in cudnn kernel, need install cudnn. Default False") .SetDefault(false); AddAttr( "ceil_mode", - "(bool, default false) Whether to use the ceil function to calculate " + "(bool) Whether to use the ceil function to calculate " "output height and width. False is the default. If it is set to False, " - "the floor function will be used.") + "the floor function will be used. Default False") .SetDefault(false); AddAttr("use_mkldnn", - "(bool, default false) Only used in mkldnn kernel.") + "(bool) Only used in mkldnn kernel. Default False") .SetDefault(false); AddAttr("use_quantizer", - "(bool, default false) " + "(bool) " "Set to true for operators that should be quantized and use " "int8 kernel. " - "Only used on CPU.") + "Only used on CPU. Default False") .SetDefault(false); AddAttr( "data_format", @@ -269,11 +272,11 @@ void Pool2dOpMaker::Make() { // TODO(dzhwinter): need to registered layout transform function AddComment(R"DOC( -The pooling2d operation calculates the output based on -the input, pooling_type and ksize, strides, paddings parameters. -Input(X) and output(Out) are in NCHW or NHWC format, where N is batch size, C is the +This operation calculates the pooling output based on +the input, pooling_type and pool_size, pool_stride, pool_padding parameters. +Input(X) and Output(Out) are in NCHW or NHWC format, where N is batch size, C is the number of channels, H is the height of the feature, and W is the width of the feature. -Parameters(ksize, strides, paddings) are two elements. +Parameters(pool_size, pool_stride, pool_padding) hold two integer elements. These two elements represent height and width, respectively. The input(X) size and output(Out) size may be different. @@ -393,8 +396,9 @@ void Pool3dOpMaker::Make() { // TypedAttrChecker don't support vector type.) AddAttr( "global_pooling", - "(bool, default false) Whether to use the global pooling. " - "If global_pooling = true, kernel size and paddings will be ignored.") + "(bool) Whether to use the global pooling. " + "If global_pooling = true, kernel size and paddings will be ignored. " + "Default False") .SetDefault(false); AddAttr>( "strides", @@ -413,30 +417,32 @@ void Pool3dOpMaker::Make() { // TypedAttrChecker don't support vector type.) AddAttr( "exclusive", - "(bool, default True) When true, will exclude the zero-padding in the " + "(bool) When true, will exclude the zero-padding in the " "averaging calculating, otherwise, include the zero-padding. Note, it " - "is only used when pooling_type is avg. The default is True.") + "is only used when pooling_type is avg. The default is True. " + "Default True") .SetDefault(true); AddAttr( "adaptive", - "(bool, default False) When true, will perform adaptive pooling instead, " + "(bool) When true, will perform adaptive pooling instead, " "output shape in H and W dimensions will be same as ksize, input data " "will be divided into grids specify by ksize averagely and perform " - "pooling in each grid area to get output pooling value.") + "pooling in each grid area to get output pooling value. " + "Default False") .SetDefault(false); AddAttr( "use_cudnn", - "(bool, default false) Only used in cudnn kernel, need install cudnn.") + "(bool) Only used in cudnn kernel, need install cudnn. Default False") .SetDefault(false); AddAttr( "ceil_mode", - "(bool, default false) Whether to use the ceil function to calculate " + "(bool) Whether to use the ceil function to calculate " "output height and width. False is the default. If it is set to False, " - "the floor function will be used.") + "the floor function will be used. Default False") .SetDefault(false); AddAttr("use_mkldnn", - "(bool, default false) Only used in mkldnn kernel") + "(bool) Only used in mkldnn kernel. Default False") .SetDefault(false); AddAttr( "data_format", @@ -454,14 +460,12 @@ void Pool3dOpMaker::Make() { // TODO(dzhwinter): need to registered layout transform function AddComment(R"DOC( -Pool3d Operator. - -The pooling3d operation calculates the output based on -the input, pooling_type, ksize, strides, and paddings parameters. +This operation calculates the output based on +the input, pooling_type, pool_size, pool_stride, and pool_padding parameters. Input(X) and output(Out) are in NCDHW or NDHWC format, where N is batch size, C is the number of channels, and D, H and W are the depth, height and -width of the feature, respectively. Parameters(ksize, strides, paddings) -are three elements. These three elements represent depth, height and +width of the feature, respectively. Parameters(pool_size, pool_stride, pool_padding) +hold three integer elements. These three elements represent depth, height and width, respectively. The input(X) size and output(Out) size may be different. Example: diff --git a/paddle/fluid/operators/spectral_norm_op.cc b/paddle/fluid/operators/spectral_norm_op.cc index ec5ee487729..5690265573f 100644 --- a/paddle/fluid/operators/spectral_norm_op.cc +++ b/paddle/fluid/operators/spectral_norm_op.cc @@ -88,7 +88,8 @@ class SpectralNormOpMaker : public framework::OpProtoAndCheckerMaker { AddInput("Weight", "The input weight tensor of spectral_norm operator, " "This can be a 2-D, 3-D, 4-D, 5-D tensor which is the " - "weights of fc, conv1d, conv2d, conv3d layer."); + "weights of fc, conv1d, conv2d, conv3d layer. " + "The data type is float32 or float64."); AddInput("U", "The weight_u tensor of spectral_norm operator, " "This can be a 1-D tensor in shape [H, 1]," @@ -123,7 +124,9 @@ class SpectralNormOpMaker : public framework::OpProtoAndCheckerMaker { .SetDefault(1); AddAttr("eps", "epsilon for numerical stability in " - "calculating norms") + "calculating norms, it will be added to " + "the denominator to aviod divide zero. " + "Default 1e-12.") .SetDefault(1e-12); AddComment(R"DOC( diff --git a/paddle/fluid/operators/temporal_shift_op.cc b/paddle/fluid/operators/temporal_shift_op.cc index f2a8ae9a411..a438832b5dc 100644 --- a/paddle/fluid/operators/temporal_shift_op.cc +++ b/paddle/fluid/operators/temporal_shift_op.cc @@ -69,7 +69,8 @@ class TemporalShiftOpMaker : public framework::OpProtoAndCheckerMaker { "This is a 4-D tensor with shape of [N*T, C, H, W]. " "While N is the batch size, T is the temporal segment " "number, C is the channel number, H is the height of " - "features and W is the width of features."); + "features and W is the width of features. " + "The data type is float32 and float64"); AddOutput("Out", "The output tensor of temporal shift operator. " "This is a 4-D tensor in the same shape with Input(X)."); @@ -82,7 +83,8 @@ class TemporalShiftOpMaker : public framework::OpProtoAndCheckerMaker { "The shift ratio of the channels, the first :attr:`shift_ratio` part " "of channels will be shifted by -1 along the temporal dimension, " "and the second :attr:`shift_ratio` part of channels will be shifted " - "by 1 along the temporal dimension. Default 0.25.") + "by 1 along the temporal dimension. :attr:`shift_ratio` should be in " + "range [0, 0.5]. Default 0.25.") .SetDefault(0.25); AddComment(R"DOC( diff --git a/python/paddle/fluid/layers/learning_rate_scheduler.py b/python/paddle/fluid/layers/learning_rate_scheduler.py index e80f5d01207..2e397412767 100644 --- a/python/paddle/fluid/layers/learning_rate_scheduler.py +++ b/python/paddle/fluid/layers/learning_rate_scheduler.py @@ -109,20 +109,25 @@ def exponential_decay(learning_rate, decay_steps, decay_rate, staircase=False): training progresses. By using this function, the learning rate will be decayed by 'decay_rate' every 'decay_steps' steps. + Decayed learning rate calcualtes as follows: + >>> if staircase == True: >>> decayed_learning_rate = learning_rate * decay_rate ^ floor(global_step / decay_steps) >>> else: >>> decayed_learning_rate = learning_rate * decay_rate ^ (global_step / decay_steps) Args: - learning_rate(Variable|float): The initial learning rate. - decay_steps(int): See the decay computation above. - decay_rate(float): The decay rate. See the decay computation above. - staircase(Boolean): If True, decay the learning rate at discrete intervals. - Default: False + learning_rate(Variable|float): The initial learning rate. It should be a Variable + or a float + decay_steps(int): The learning rate decay steps. See the decay computation above. + decay_rate(float): The learning rate decay rate. See the decay computation above. + staircase(bool): If True, decay the learning rate at discrete intervals, which + means the learning rate will be decayed by `decay_rate` every + `decay_steps`. If False, learning rate will be decayed continuously + and following the formula above. Default: False Returns: - Variable: The decayed learning rate + Variable: The decayed learning rate. The data type is float32. Examples: .. code-block:: python @@ -156,20 +161,29 @@ def exponential_decay(learning_rate, decay_steps, decay_rate, staircase=False): def natural_exp_decay(learning_rate, decay_steps, decay_rate, staircase=False): """Applies natural exponential decay to the initial learning rate. + When training a model, it is often recommended to lower the learning rate as the + training progresses. By using this function, the learning rate will be decayed by + natural exponential power 'decay_rate' every 'decay_steps' steps. + + Decayed learning rate calcualtes as follows: + >>> if not staircase: >>> decayed_learning_rate = learning_rate * exp(- decay_rate * (global_step / decay_steps)) >>> else: >>> decayed_learning_rate = learning_rate * exp(- decay_rate * floor(global_step / decay_steps)) Args: - learning_rate: A scalar float32 value or a Variable. This - will be the initial learning rate during training - decay_steps: A Python `int32` number. - decay_rate: A Python `float` number. - staircase: Boolean. If set true, decay the learning rate every decay_steps. + learning_rate(Variable|float): The initial learning rate. It should be a Variable + or a float + decay_steps(int): The learning rate decay steps. See the decay computation above. + decay_rate(float): The learning rate decay rate. See the decay computation above. + staircase(bool): If True, decay the learning rate at discrete intervals, which + means the learning rate will be decayed by natual exponential power + `decay_rate` every `decay_steps`. If False, learning rate will be + decayed continuously and following the formula above. Default: False Returns: - The decayed learning rate + The decayed learning rate. The data type is float32. Examples: .. code-block:: python @@ -208,20 +222,25 @@ def inverse_time_decay(learning_rate, decay_steps, decay_rate, staircase=False): training progresses. By using this function, an inverse decay function will be applied to the initial learning rate. + Decayed learning rate calcualtes as follows: + >>> if staircase == True: >>> decayed_learning_rate = learning_rate / (1 + decay_rate * floor(global_step / decay_step)) >>> else: >>> decayed_learning_rate = learning_rate / (1 + decay_rate * global_step / decay_step) Args: - learning_rate(Variable|float): The initial learning rate. - decay_steps(int): See the decay computation above. - decay_rate(float): The decay rate. See the decay computation above. - staircase(Boolean): If True, decay the learning rate at discrete intervals. - Default: False + learning_rate(Variable|float): The initial learning rate. It should be a Variable + or a float + decay_steps(int): The learning rate decay steps. See the decay computation above. + decay_rate(float): The learning rate decay rate. See the decay computation above. + staircase(bool): If True, decay the learning rate at discrete intervals, which + means the learning rate will be decayed by `decay_rate` times + every `decay_steps`. If False, learning rate will be decayed + continuously and following the formula above. Default: False Returns: - Variable: The decayed learning rate + Variable: The decayed learning rate. The data type is float32. Examples: .. code-block:: python @@ -229,7 +248,7 @@ def inverse_time_decay(learning_rate, decay_steps, decay_rate, staircase=False): import paddle.fluid as fluid base_lr = 0.1 sgd_optimizer = fluid.optimizer.SGD( - learning_rate=fluid.layers.natural_exp_decay( + learning_rate=fluid.layers.inverse_time_decay( learning_rate=base_lr, decay_steps=10000, decay_rate=0.5, diff --git a/python/paddle/fluid/layers/nn.py b/python/paddle/fluid/layers/nn.py index 1854f4cdf84..4baaf7d2b20 100755 --- a/python/paddle/fluid/layers/nn.py +++ b/python/paddle/fluid/layers/nn.py @@ -3286,10 +3286,11 @@ def pool2d(input, ${comment} Args: - input (Variable): The input tensor of pooling operator. The format of - input tensor is `"NCHW"` or `"NHWC"`, where `N` is batch size, `C` is - the number of channels, `H` is the height of the - feature, and `W` is the width of the feature. + input (Variable): The input tensor of pooling operator which is a 4-D tensor with + shape [N, C, H, W]. The format of input tensor is `"NCHW"` or + `"NHWC"`, where `N` is batch size, `C` is the number of channels, + `H` is the height of the feature, and `W` is the width of the + feature. The data type if float32 or float64. pool_size (int|list|tuple): The pool kernel size. If pool kernel size is a tuple or list, it must contain two integers, (pool_size_Height, pool_size_Width). Otherwise, the pool kernel size will be a square of an int. @@ -3308,8 +3309,9 @@ def pool2d(input, global_pooling (bool): ${global_pooling_comment} use_cudnn (bool): ${use_cudnn_comment} ceil_mode (bool): ${ceil_mode_comment} - name (str|None): A name for this layer(optional). If set None, the - layer will be named automatically. + name(str, optional): For detailed information, please refer + to :ref:`api_guide_Name`. Usually name is no need to set and + None by default. exclusive (bool): Whether to exclude padding points in average pooling mode, default is `true`. data_format (string): The data format of the input and output data. An optional string from: `"NCHW"`, `"NDHW"`. @@ -3317,7 +3319,7 @@ def pool2d(input, `[batch_size, input_channels, input_height, input_width]`. Returns: - Variable: The pooling result. + Variable: The output tensor of pooling result. The data type is same as input tensor. Raises: ValueError: If `pool_type` is not "max" nor "avg" @@ -3330,10 +3332,32 @@ def pool2d(input, import paddle.fluid as fluid - data = fluid.layers.data( - name='data', shape=[10, 3, 32, 32], append_batch_size=False, dtype='float32') + data = fluid.data(name='data', shape=[None, 3, 32, 32], dtype='float32') + + # max pool2d + pool2d = fluid.layers.pool2d( + input = data, + pool_size = 2, + pool_type = "max", + pool_stride = 1, + global_pooling=False) + + # average pool2d + pool2d = fluid.layers.pool2d( + input = data, + pool_size = 2, + pool_type = "avg", + pool_stride = 1, + global_pooling=False) + + # global average pool2d + pool2d = fluid.layers.pool2d( + input = data, + pool_size = 2, + pool_type = "avg", + pool_stride = 1, + global_pooling=True) - # example 1: # Attr(pool_padding) is a list with 4 elements, Attr(data_format) is "NCHW". out_1 = fluid.layers.pool2d( input = data, @@ -3343,7 +3367,6 @@ def pool2d(input, pool_padding = [1, 2, 1, 0], data_format = "NCHW") - # example 2: # Attr(pool_padding) is a string, Attr(data_format) is "NCHW". out_2 = fluid.layers.pool2d( input = data, @@ -3465,7 +3488,8 @@ def pool3d(input, ${comment} Args: - input (Variable): The input tensor of pooling operator. The format of + input (Variable): The input tensor of pooling operator, which is a 5-D tensor with + shape [N, C, D, H, W]. The format of input tensor is `"NCDHW"` or `"NDHWC"`, where `N` is batch size, `C` is the number of channels, `D` is the depth of the feature, `H` is the height of the feature, and `W` is the width @@ -3489,8 +3513,9 @@ def pool3d(input, global_pooling (bool): ${global_pooling_comment} use_cudnn (bool): ${use_cudnn_comment} ceil_mode (bool): ${ceil_mode_comment} - name (str): A name for this layer(optional). If set None, the layer - will be named automatically. + name(str, optional): For detailed information, please refer + to :ref:`api_guide_Name`. Usually name is no need to set and + None by default. exclusive (bool): Whether to exclude padding points in average pooling mode, default is true. data_format (string): The data format of the input and output data. An optional string from: `"NCDHW"`, `"NDHWC"`. @@ -3498,7 +3523,7 @@ def pool3d(input, `[batch_size, input_channels, input_depth, input_height, input_width]`. Returns: - Variable: output of pool3d layer. + Variable: The output tensor of pooling result. The data type is same as input tensor. Examples: @@ -3506,8 +3531,31 @@ def pool3d(input, import paddle.fluid as fluid - data = fluid.layers.data( - name='data', shape=[10, 3, 32, 32, 32], append_batch_size=False, dtype='float32') + data = fluid.data(name='data', shape=[None, 3, 32, 32, 32], dtype='float32') + + # max pool3d + pool3d = fluid.layers.pool3d( + input = data, + pool_size = 2, + pool_type = "max", + pool_stride = 1, + global_pooling=False) + + # average pool3d + pool3d = fluid.layers.pool3d( + input = data, + pool_size = 2, + pool_type = "avg", + pool_stride = 1, + global_pooling=False) + + # global average pool3d + pool3d = fluid.layers.pool3d( + input = data, + pool_size = 2, + pool_type = "avg", + pool_stride = 1, + global_pooling=True) # example 1: # Attr(pool_padding) is a list with 6 elements, Attr(data_format) is "NCDHW". @@ -3639,13 +3687,12 @@ def adaptive_pool2d(input, require_index=False, name=None): """ - **Adaptive Pool2d Operator** - The adaptive_pool2d operation calculates the output based on the input, pool_size, + This operation calculates the output based on the input, pool_size, pool_type parameters. Input(X) and output(Out) are in NCHW format, where N is batch size, C is the number of channels, H is the height of the feature, and W is the width of the feature. Parameters(pool_size) should contain two elements which represent height and width, respectively. Also the H and W dimensions of output(Out) - is same as Parameter(pool_size). + is same as Parameter(pool_size). The output tensor shape will be [N, C, pool_size[0], pool_size[1]] For average adaptive pool2d: @@ -3662,20 +3709,23 @@ def adaptive_pool2d(input, Output(i ,j) &= \\frac{sum(Input[hstart:hend, wstart:wend])}{(hend - hstart) * (wend - wstart)} Args: - input (Variable): The input tensor of pooling operator. The format of - input tensor is NCHW, where N is batch size, C is - the number of channels, H is the height of the - feature, and W is the width of the feature. + input (Variable): The input tensor of pooling operator, which is a 4-D tensor + with shape [N, C, H, W]. The format of input tensor is NCHW, + where N is batch size, C is the number of channels, H is the + height of the feature, and W is the width of the feature. + The data type is float32 or float64. pool_size (int|list|tuple): The pool kernel size. If pool kernel size is a tuple or list, it must contain two integers, (pool_size_Height, pool_size_Width). pool_type: ${pooling_type_comment} require_index (bool): If true, the index of max pooling point will be returned along - with outputs. It cannot be set in average pooling type. - name (str|None): A name for this layer(optional). If set None, the - layer will be named automatically. + with outputs. It cannot be set in average pooling type. Default False. + name(str, optional): For detailed information, please refer + to :ref:`api_guide_Name`. Usually name is no need to set and + None by default. Returns: - Variable: The pooling result. + Variable: The output tensor of adaptive pooling result. The data type is same + as input tensor. Raises: ValueError: 'pool_type' is not 'max' nor 'avg'. @@ -3685,6 +3735,7 @@ def adaptive_pool2d(input, Examples: .. code-block:: python + # average adaptive pool2d # suppose input data in shape of [N, C, H, W], `pool_size` is [m, n], # output shape is [N, C, m, n], adaptive pool divide H and W dimentions # of input data into m * n grids averagely and performs poolings in each @@ -3700,12 +3751,33 @@ def adaptive_pool2d(input, # output[:, :, i, j] = avg(input[:, :, hstart: hend, wstart: wend]) # import paddle.fluid as fluid - data = fluid.layers.data( - name='data', shape=[3, 32, 32], dtype='float32') + data = fluid.data(name='data', shape=[None, 3, 32, 32], dtype='float32') pool_out = fluid.layers.adaptive_pool2d( input=data, pool_size=[3, 3], pool_type='avg') + + # max adaptive pool2d + # suppose input data in shape of [N, C, H, W], `pool_size` is [m, n], + # output shape is [N, C, m, n], adaptive pool divide H and W dimentions + # of input data into m * n grids averagely and performs poolings in each + # grid to get output. + # adaptive average pool performs calculations as follow: + # + # for i in range(m): + # for j in range(n): + # hstart = floor(i * H / m) + # hend = ceil((i + 1) * H / m) + # wstart = floor(i * W / n) + # wend = ceil((i + 1) * W / n) + # output[:, :, i, j] = max(input[:, :, hstart: hend, wstart: wend]) + # + import paddle.fluid as fluid + data = fluid.data(name='data', shape=[None, 3, 32, 32], dtype='float32') + pool_out = fluid.layers.adaptive_pool2d( + input=data, + pool_size=[3, 3], + pool_type='max') """ if pool_type not in ["max", "avg"]: raise ValueError( @@ -3752,13 +3824,13 @@ def adaptive_pool3d(input, require_index=False, name=None): """ - **Adaptive Pool3d Operator** - The adaptive_pool3d operation calculates the output based on the input, pool_size, + This operation calculates the output based on the input, pool_size, pool_type parameters. Input(X) and output(Out) are in NCDHW format, where N is batch size, C is the number of channels, D is the depth of the feature, H is the height of the feature, and W is the width of the feature. Parameters(pool_size) should contain three elements which represent height and width, respectively. Also the D, H and W - dimensions of output(Out) is same as Parameter(pool_size). + dimensions of output(Out) is same as Parameter(pool_size). The output tensor shape + will be [N, C, pool_size[0], pool_size[1], pool_size[2]] For average adaptive pool3d: @@ -3779,20 +3851,22 @@ def adaptive_pool3d(input, Output(i ,j, k) &= \\frac{sum(Input[dstart:dend, hstart:hend, wstart:wend])}{(dend - dstart) * (hend - hstart) * (wend - wstart)} Args: - input (Variable): The input tensor of pooling operator. The format of - input tensor is NCDHW, where N is batch size, C is - the number of channels, D is the depth of the feature, + input (Variable): The input tensor of pooling operator, which is a 5-D tensor with + shape [N, C, D, H, W]. The format of input tensor is NCDHW, where + N is batch size, C is the number of channels, D is the depth of the feature, H is the height of the feature, and W is the width of the feature. + The data type is float32 or float64. pool_size (int|list|tuple): The pool kernel size. If pool kernel size is a tuple or list, it must contain three integers, (Depth, Height, Width). pool_type: ${pooling_type_comment} require_index (bool): If true, the index of max pooling point will be returned along - with outputs. It cannot be set in average pooling type. - name (str|None): A name for this layer(optional). If set None, the - layer will be named automatically. + with outputs. It cannot be set in average pooling type. Default False. + name(str, optional): For detailed information, please refer + to :ref:`api_guide_Name`. Usually name is no need to set and + None by default. Returns: - Variable: The pooling result. + Variable: The output tensor of adaptive pooling result. The data type is same as input tensor. Raises: ValueError: 'pool_type' is not 'max' nor 'avg'. @@ -3802,6 +3876,7 @@ def adaptive_pool3d(input, Examples: .. code-block:: python + # average adaptive pool3d # suppose input data in shape of [N, C, D, H, W], `pool_size` is [l, m, n], # output shape is [N, C, l, m, n], adaptive pool divide D, H and W dimentions # of input data into l * m * n grids averagely and performs poolings in each @@ -3823,12 +3898,41 @@ def adaptive_pool3d(input, import paddle.fluid as fluid - data = fluid.layers.data( - name='data', shape=[3, 32, 32, 32], dtype='float32') + data = fluid.data( + name='data', shape=[None, 3, 32, 32, 32], dtype='float32') pool_out = fluid.layers.adaptive_pool3d( input=data, pool_size=[3, 3, 3], pool_type='avg') + + # max adaptive pool3d + # suppose input data in shape of [N, C, D, H, W], `pool_size` is [l, m, n], + # output shape is [N, C, l, m, n], adaptive pool divide D, H and W dimentions + # of input data into l * m * n grids averagely and performs poolings in each + # grid to get output. + # adaptive average pool performs calculations as follow: + # + # for i in range(l): + # for j in range(m): + # for k in range(n): + # dstart = floor(i * D / l) + # dend = ceil((i + 1) * D / l) + # hstart = floor(j * H / m) + # hend = ceil((j + 1) * H / m) + # wstart = floor(k * W / n) + # wend = ceil((k + 1) * W / n) + # output[:, :, i, j, k] = + # avg(input[:, :, dstart:dend, hstart: hend, wstart: wend]) + # + + import paddle.fluid as fluid + + data = fluid.data( + name='data', shape=[None, 3, 32, 32, 32], dtype='float32') + pool_out = fluid.layers.adaptive_pool3d( + input=data, + pool_size=[3, 3, 3], + pool_type='max') """ if pool_type not in ["max", "avg"]: raise ValueError( @@ -4538,9 +4642,10 @@ def spectral_norm(weight, dim=0, power_iters=1, eps=1e-12, name=None): """ **Spectral Normalization Layer** - This layer calculates the spectral normalization value of weight parameters of + This operation calculates the spectral normalization value of weight parameters of fc, conv1d, conv2d, conv3d layers which should be 2-D, 3-D, 4-D, 5-D - Parameters. Calculations are showed as follows. + Parameters. Output tensor will be in same shape with input tensor. + Calculations are showed as follows. Step 1: Generate vector U in shape of [H], and V in shape of [W]. @@ -4549,7 +4654,8 @@ def spectral_norm(weight, dim=0, power_iters=1, eps=1e-12, name=None): Step 2: :attr:`power_iters` shoule be a positive interger, do following - calculations with U and V for :attr:`power_iters` rounds. + calculations with U and V for :attr:`power_iters` rounds. Calculations + as follows: .. math:: @@ -4574,18 +4680,20 @@ def spectral_norm(weight, dim=0, power_iters=1, eps=1e-12, name=None): dim(int): ${dim_comment} power_iters(int): ${power_iters_comment} eps(float): ${eps_comment} - name (str): The name of this layer. It is optional. + name(str, optional): For detailed information, please refer + to :ref:`api_guide_Name`. Usually name is no need to set and + None by default. Returns: Variable: A tensor variable of weight parameters after spectral normalization. + The data type and shape is same as input tensor. Examples: .. code-block:: python import paddle.fluid as fluid - weight = fluid.layers.data(name='weight', shape=[2, 8, 32, 32], - append_batch_size=False, dtype='float32') + weight = fluid.data(name='weight', shape=[2, 8, 32, 32], dtype='float32') x = fluid.layers.spectral_norm(weight=weight, dim=1, power_iters=2) """ helper = LayerHelper('spectral_norm', **locals()) @@ -14064,19 +14172,22 @@ def grid_sampler(x, grid, name=None): """ This operation samples input X by using bilinear interpolation based on flow field grid, which is usually gennerated by :code:`affine_grid` . The grid of - shape [N, H, W, 2] is the concatenation of (grid_x, grid_y) coordinates - with shape [N, H, W] each, where grid_x is indexing the 4th dimension - (in width dimension) of input data x and grid_y is indexng the 3rd + shape [N, H, W, 2] is the concatenation of (x, y) coordinates + with shape [N, H, W] each, where x is indexing the 4th dimension + (in width dimension) of input data x and y is indexng the 3rd dimention (in height dimension), finally results is the bilinear - interpolation value of 4 nearest corner points. + interpolation value of 4 nearest corner points. The output tensor + shape will be [N, C, H, W]. .. code-block:: text Step 1: Get (x, y) grid coordinates and scale to [0, H-1/W-1]. - grid_x = 0.5 * (grid[:, :, :, 0] + 1) * (W - 1) - grid_y = 0.5 * (grid[:, :, :, 1] + 1) * (H - 1) + .. code-block:: text + + grid_x = 0.5 * (grid[:, :, :, 0] + 1) * (W - 1) + grid_y = 0.5 * (grid[:, :, :, 1] + 1) * (H - 1) Step 2: Indices input data X with grid (x, y) in each [H, W] area, and bilinear @@ -14111,13 +14222,20 @@ def grid_sampler(x, grid, name=None): + ws * d_e * d_n + es * d_w * d_n Args: - x(Variable): Input data of shape [N, C, H, W]. - grid(Variable): Input grid tensor of shape [N, H, W, 2]. - name (str, default None): The name of this layer. + x(Variable): The input tensor, which is a 4-D tensor with shape + [N, C, H, W], N is the batch size, C is the channel + number, H and W is the feature height and width. + The data type is float32 or float64. + grid(Variable): Input grid tensor of shape [N, H, W, 2]. The + data type is float32 or float64. + name(str, optional): For detailed information, please refer + to :ref:`api_guide_Name`. Usually name is no need to set and + None by default. Returns: Variable: Output of shape [N, C, H, W] data samples input X - using bilnear interpolation based on input grid. + using bilnear interpolation based on input grid. + The data type is same as input tensor. Examples: @@ -14125,7 +14243,8 @@ def grid_sampler(x, grid, name=None): import paddle.fluid as fluid - x = fluid.layers.data(name='x', shape=[10, 32, 32], dtype='float32') + # use with affine_grid + x = fluid.data(name='x', shape=[None, 10, 32, 32], dtype='float32') theta = fluid.layers.data(name='theta', shape=[2, 3], dtype='float32') grid = fluid.layers.affine_grid(theta=theta, out_shape=[3, 10, 32, 32]) out = fluid.layers.grid_sampler(x=x, grid=grid) @@ -14509,11 +14628,13 @@ def temporal_shift(x, seg_num, shift_ratio=0.25, name=None): x(Variable): ${x_comment} seg_num(int): ${seg_num_comment} shift_ratio(float): ${shift_ratio_comment} - name (str, default None): The name of this layer. + name(str, optional): For detailed information, please refer + to :ref:`api_guide_Name`. Usually name is no need to set and + None by default. Returns: out(Variable): The temporal shifting result is a tensor variable with the - same shape and same type as the input. + same shape and same data type as the input. Raises: TypeError: seg_num must be int type. @@ -14522,7 +14643,7 @@ def temporal_shift(x, seg_num, shift_ratio=0.25, name=None): .. code-block:: python import paddle.fluid as fluid - input = fluid.layers.data(name='input', shape=[4,2,2], dtype='float32') + input = fluid.data(name='input', shape=[None,4,2,2], dtype='float32') out = fluid.layers.temporal_shift(x=input, seg_num=2, shift_ratio=0.2) """ helper = LayerHelper("temporal_shift", **locals()) @@ -14953,16 +15074,18 @@ def kldiv_loss(x, target, reduction='mean', name=None): x (Variable): ${x_comment} target (Variable): ${target_comment} reduction (Variable): ${reduction_comment} - name (str, default None): The name of this layer. + name(str, optional): For detailed information, please refer + to :ref:`api_guide_Name`. Usually name is no need to set and + None by default. Returns: - kldiv\_loss (Variable): The KL divergence loss. + Variable(Tensor): The KL divergence loss. The data type is same as input tensor Examples: .. code-block:: python import paddle.fluid as fluid - x = fluid.layers.data(name='x', shape=[4,2,2], dtype='float32') + x = fluid.data(name='x', shape=[None,4,2,2], dtype='float32') target = fluid.layers.data(name='target', shape=[4,2,2], dtype='float32') loss = fluid.layers.kldiv_loss(x=x, target=target, reduction='batchmean') """ -- GitLab