From 534cf892ff9ce1d8cc2189b0797242464337e968 Mon Sep 17 00:00:00 2001
From: zhongpu <2013000149@qq.com>
Date: Fri, 11 Oct 2019 14:35:20 +0800
Subject: [PATCH] Cherry pick en doc, test=release/1.6, test=document_fix
(#20481)
---
paddle/fluid/API.spec | 34 +-
python/paddle/fluid/dygraph/base.py | 14 +-
python/paddle/fluid/dygraph/nn.py | 754 ++++++++++++++++------------
python/paddle/fluid/layers/nn.py | 284 ++++++-----
4 files changed, 627 insertions(+), 459 deletions(-)
diff --git a/paddle/fluid/API.spec b/paddle/fluid/API.spec
index 1fea11413cc..4a6629e603b 100644
--- a/paddle/fluid/API.spec
+++ b/paddle/fluid/API.spec
@@ -141,7 +141,7 @@ paddle.fluid.layers.conv2d (ArgSpec(args=['input', 'num_filters', 'filter_size',
paddle.fluid.layers.conv3d (ArgSpec(args=['input', 'num_filters', 'filter_size', 'stride', 'padding', 'dilation', 'groups', 'param_attr', 'bias_attr', 'use_cudnn', 'act', 'name', 'data_format'], varargs=None, keywords=None, defaults=(1, 0, 1, None, None, None, True, None, None, 'NCDHW')), ('document', 'feff9c8ebb4d4d0be5345f9042f57c8e'))
paddle.fluid.layers.sequence_pool (ArgSpec(args=['input', 'pool_type', 'is_test', 'pad_value'], varargs=None, keywords=None, defaults=(False, 0.0)), ('document', '5a709f7ef3fdb8fc819d09dc4fbada9a'))
paddle.fluid.layers.sequence_softmax (ArgSpec(args=['input', 'use_cudnn', 'name'], varargs=None, keywords=None, defaults=(False, None)), ('document', 'eaa9d0bbd3d4e017c8bc4ecdac483711'))
-paddle.fluid.layers.softmax (ArgSpec(args=['input', 'use_cudnn', 'name', 'axis'], varargs=None, keywords=None, defaults=(False, None, -1)), ('document', '7ccaea1b93fe4f7387a6036692986c6b'))
+paddle.fluid.layers.softmax (ArgSpec(args=['input', 'use_cudnn', 'name', 'axis'], varargs=None, keywords=None, defaults=(False, None, -1)), ('document', 'f7d6a5173c92c23f9a25cbc58a0eb577'))
paddle.fluid.layers.pool2d (ArgSpec(args=['input', 'pool_size', 'pool_type', 'pool_stride', 'pool_padding', 'global_pooling', 'use_cudnn', 'ceil_mode', 'name', 'exclusive', 'data_format'], varargs=None, keywords=None, defaults=(-1, 'max', 1, 0, False, True, False, None, True, 'NCHW')), ('document', 'daf9ae55b2d54bd5f35acb397fd1e1b5'))
paddle.fluid.layers.pool3d (ArgSpec(args=['input', 'pool_size', 'pool_type', 'pool_stride', 'pool_padding', 'global_pooling', 'use_cudnn', 'ceil_mode', 'name', 'exclusive', 'data_format'], varargs=None, keywords=None, defaults=(-1, 'max', 1, 0, False, True, False, None, True, 'NCDHW')), ('document', 'df8edcb8dd020fdddf778c9f613dc650'))
paddle.fluid.layers.adaptive_pool2d (ArgSpec(args=['input', 'pool_size', 'pool_type', 'require_index', 'name'], varargs=None, keywords=None, defaults=('max', False, None)), ('document', 'd873fdd73bcd74f9203d347cfb90de75'))
@@ -184,10 +184,10 @@ paddle.fluid.layers.hsigmoid (ArgSpec(args=['input', 'label', 'num_classes', 'pa
paddle.fluid.layers.beam_search (ArgSpec(args=['pre_ids', 'pre_scores', 'ids', 'scores', 'beam_size', 'end_id', 'level', 'is_accumulated', 'name', 'return_parent_idx'], varargs=None, keywords=None, defaults=(0, True, None, False)), ('document', '1270395ce97a4e1b556104abbb14f096'))
paddle.fluid.layers.row_conv (ArgSpec(args=['input', 'future_context_size', 'param_attr', 'act'], varargs=None, keywords=None, defaults=(None, None)), ('document', 'a6477957b44907787b3c74157400b80c'))
paddle.fluid.layers.multiplex (ArgSpec(args=['inputs', 'index'], varargs=None, keywords=None, defaults=None), ('document', '2c4d1ae83da6ed35e3b36ba1b3b51d23'))
-paddle.fluid.layers.layer_norm (ArgSpec(args=['input', 'scale', 'shift', 'begin_norm_axis', 'epsilon', 'param_attr', 'bias_attr', 'act', 'name'], varargs=None, keywords=None, defaults=(True, True, 1, 1e-05, None, None, None, None)), ('document', '79797f827d89ae72c77960e9696883a9'))
+paddle.fluid.layers.layer_norm (ArgSpec(args=['input', 'scale', 'shift', 'begin_norm_axis', 'epsilon', 'param_attr', 'bias_attr', 'act', 'name'], varargs=None, keywords=None, defaults=(True, True, 1, 1e-05, None, None, None, None)), ('document', '678de6d6d0c93da74189990b039daae8'))
paddle.fluid.layers.group_norm (ArgSpec(args=['input', 'groups', 'epsilon', 'param_attr', 'bias_attr', 'act', 'data_layout', 'name'], varargs=None, keywords=None, defaults=(1e-05, None, None, None, 'NCHW', None)), ('document', '87dd4b818f102bc1a780e1804c28bd38'))
paddle.fluid.layers.spectral_norm (ArgSpec(args=['weight', 'dim', 'power_iters', 'eps', 'name'], varargs=None, keywords=None, defaults=(0, 1, 1e-12, None)), ('document', '7b3d14d6707d878923847ec617d7d521'))
-paddle.fluid.layers.softmax_with_cross_entropy (ArgSpec(args=['logits', 'label', 'soft_label', 'ignore_index', 'numeric_stable_mode', 'return_softmax', 'axis'], varargs=None, keywords=None, defaults=(False, -100, True, False, -1)), ('document', '54e1675aa0364f4a78fa72804ec0f413'))
+paddle.fluid.layers.softmax_with_cross_entropy (ArgSpec(args=['logits', 'label', 'soft_label', 'ignore_index', 'numeric_stable_mode', 'return_softmax', 'axis'], varargs=None, keywords=None, defaults=(False, -100, True, False, -1)), ('document', '6992e4140d667fdf816d0617648b5c00'))
paddle.fluid.layers.smooth_l1 (ArgSpec(args=['x', 'y', 'inside_weight', 'outside_weight', 'sigma'], varargs=None, keywords=None, defaults=(None, None, None)), ('document', 'ecb75c1b00c4c76c98b482f633b7a10c'))
paddle.fluid.layers.one_hot (ArgSpec(args=['input', 'depth', 'allow_out_of_range'], varargs=None, keywords=None, defaults=(False,)), ('document', 'cdf5dc2078f1e20dc61dd0bec7e28a29'))
paddle.fluid.layers.autoincreased_step_counter (ArgSpec(args=['counter_name', 'begin', 'step'], varargs=None, keywords=None, defaults=(None, 1, 1)), ('document', 'd016c137beb9a4528b7378b437d00151'))
@@ -221,7 +221,7 @@ paddle.fluid.layers.selu (ArgSpec(args=['x', 'scale', 'alpha', 'name'], varargs=
paddle.fluid.layers.log (ArgSpec(args=['x', 'name'], varargs=None, keywords=None, defaults=(None,)), ('document', '02f668664e3bfc4df6c00d7363467140'))
paddle.fluid.layers.crop (ArgSpec(args=['x', 'shape', 'offsets', 'name'], varargs=None, keywords=None, defaults=(None, None, None)), ('document', '32196a194f757b4da114a595a5bc6414'))
paddle.fluid.layers.crop_tensor (ArgSpec(args=['x', 'shape', 'offsets', 'name'], varargs=None, keywords=None, defaults=(None, None, None)), ('document', 'd460aaf35afbbeb9beea4789aa6e4343'))
-paddle.fluid.layers.rank_loss (ArgSpec(args=['label', 'left', 'right', 'name'], varargs=None, keywords=None, defaults=(None,)), ('document', '8eb36596bb43d7a907d3397c7aedbdb3'))
+paddle.fluid.layers.rank_loss (ArgSpec(args=['label', 'left', 'right', 'name'], varargs=None, keywords=None, defaults=(None,)), ('document', '6d49ba251e23f32cb09df54a851bb960'))
paddle.fluid.layers.margin_rank_loss (ArgSpec(args=['label', 'left', 'right', 'margin', 'name'], varargs=None, keywords=None, defaults=(0.1, None)), ('document', '1a177f30e5013fae7ee6c45860cf4946'))
paddle.fluid.layers.elu (ArgSpec(args=['x', 'alpha', 'name'], varargs=None, keywords=None, defaults=(1.0, None)), ('document', '9af1926c06711eacef9e82d7a9e4d308'))
paddle.fluid.layers.relu6 (ArgSpec(args=['x', 'threshold', 'name'], varargs=None, keywords=None, defaults=(6.0, None)), ('document', '538fc860b2a1734e118b94e4a1a3ee67'))
@@ -586,8 +586,8 @@ paddle.fluid.dygraph.Layer.sublayers (ArgSpec(args=['self', 'include_sublayers']
paddle.fluid.dygraph.Layer.train (ArgSpec(args=['self'], varargs=None, keywords=None, defaults=None), ('document', '6adf97f83acf6453d4a6a4b1070f3754'))
paddle.fluid.dygraph.__impl__ (ArgSpec(args=['func'], varargs=None, keywords=None, defaults=()), ('document', '75d1d3afccc8b39cdebf05cb1f5969f9'))
paddle.fluid.dygraph.guard (ArgSpec(args=['place'], varargs=None, keywords=None, defaults=(None,)), ('document', '7071320ffe2eec9aacdae574951278c6'))
-paddle.fluid.dygraph.to_variable (ArgSpec(args=['value', 'block', 'name'], varargs=None, keywords=None, defaults=(None, None)), ('document', '0e69fa3666f15dd01b6e3e270b9371cd'))
-paddle.fluid.dygraph.Conv2D ('paddle.fluid.dygraph.nn.Conv2D', ('document', '0b6acb9cc7fbb4f5b129e1f6dd985581'))
+paddle.fluid.dygraph.to_variable (ArgSpec(args=['value', 'block', 'name'], varargs=None, keywords=None, defaults=(None, None)), ('document', '7df6297d66295bdc933e3982caa6f1a8'))
+paddle.fluid.dygraph.Conv2D ('paddle.fluid.dygraph.nn.Conv2D', ('document', '10915f3c643e232d9c6789ce20a96869'))
paddle.fluid.dygraph.Conv2D.__init__ (ArgSpec(args=['self', 'name_scope', 'num_filters', 'filter_size', 'stride', 'padding', 'dilation', 'groups', 'param_attr', 'bias_attr', 'use_cudnn', 'act', 'dtype'], varargs=None, keywords=None, defaults=(1, 0, 1, None, None, None, True, None, 'float32')), ('document', '6adf97f83acf6453d4a6a4b1070f3754'))
paddle.fluid.dygraph.Conv2D.add_parameter (ArgSpec(args=['self', 'name', 'parameter'], varargs=None, keywords=None, defaults=None), ('document', 'f35ab374c7d5165c3daf3bd64a5a2ec1'))
paddle.fluid.dygraph.Conv2D.add_sublayer (ArgSpec(args=['self', 'name', 'sublayer'], varargs=None, keywords=None, defaults=None), ('document', '839ff3c0534677ba6ad8735c3fd4e995'))
@@ -621,7 +621,7 @@ paddle.fluid.dygraph.Conv3D.set_dict (ArgSpec(args=['self', 'stat_dict', 'includ
paddle.fluid.dygraph.Conv3D.state_dict (ArgSpec(args=['self', 'destination', 'include_sublayers'], varargs=None, keywords=None, defaults=(None, True)), ('document', '9d689f44592cd22812c7ec06a9654eac'))
paddle.fluid.dygraph.Conv3D.sublayers (ArgSpec(args=['self', 'include_sublayers'], varargs=None, keywords=None, defaults=(True,)), ('document', '00a881005ecbc96578faf94513bf0d62'))
paddle.fluid.dygraph.Conv3D.train (ArgSpec(args=['self'], varargs=None, keywords=None, defaults=None), ('document', '6adf97f83acf6453d4a6a4b1070f3754'))
-paddle.fluid.dygraph.Pool2D ('paddle.fluid.dygraph.nn.Pool2D', ('document', '50e6fd200e42859daf2924ecb0561ada'))
+paddle.fluid.dygraph.Pool2D ('paddle.fluid.dygraph.nn.Pool2D', ('document', 'ea0b4ef5fd7befb7841cc2f17e66007a'))
paddle.fluid.dygraph.Pool2D.__init__ (ArgSpec(args=['self', 'name_scope', 'pool_size', 'pool_type', 'pool_stride', 'pool_padding', 'global_pooling', 'use_cudnn', 'ceil_mode', 'exclusive', 'dtype'], varargs=None, keywords=None, defaults=(-1, 'max', 1, 0, False, True, False, True, VarType.FP32)), ('document', '6adf97f83acf6453d4a6a4b1070f3754'))
paddle.fluid.dygraph.Pool2D.add_parameter (ArgSpec(args=['self', 'name', 'parameter'], varargs=None, keywords=None, defaults=None), ('document', 'f35ab374c7d5165c3daf3bd64a5a2ec1'))
paddle.fluid.dygraph.Pool2D.add_sublayer (ArgSpec(args=['self', 'name', 'sublayer'], varargs=None, keywords=None, defaults=None), ('document', '839ff3c0534677ba6ad8735c3fd4e995'))
@@ -638,7 +638,7 @@ paddle.fluid.dygraph.Pool2D.set_dict (ArgSpec(args=['self', 'stat_dict', 'includ
paddle.fluid.dygraph.Pool2D.state_dict (ArgSpec(args=['self', 'destination', 'include_sublayers'], varargs=None, keywords=None, defaults=(None, True)), ('document', '9d689f44592cd22812c7ec06a9654eac'))
paddle.fluid.dygraph.Pool2D.sublayers (ArgSpec(args=['self', 'include_sublayers'], varargs=None, keywords=None, defaults=(True,)), ('document', '00a881005ecbc96578faf94513bf0d62'))
paddle.fluid.dygraph.Pool2D.train (ArgSpec(args=['self'], varargs=None, keywords=None, defaults=None), ('document', '6adf97f83acf6453d4a6a4b1070f3754'))
-paddle.fluid.dygraph.FC ('paddle.fluid.dygraph.nn.FC', ('document', '2f73ae00e57c67454c6aa7e911d9bfd6'))
+paddle.fluid.dygraph.FC ('paddle.fluid.dygraph.nn.FC', ('document', '6f4d1855a05f99f5500e042212e5c605'))
paddle.fluid.dygraph.FC.__init__ (ArgSpec(args=['self', 'name_scope', 'size', 'num_flatten_dims', 'param_attr', 'bias_attr', 'act', 'is_test', 'dtype'], varargs=None, keywords=None, defaults=(1, None, None, None, False, 'float32')), ('document', '6adf97f83acf6453d4a6a4b1070f3754'))
paddle.fluid.dygraph.FC.add_parameter (ArgSpec(args=['self', 'name', 'parameter'], varargs=None, keywords=None, defaults=None), ('document', 'f35ab374c7d5165c3daf3bd64a5a2ec1'))
paddle.fluid.dygraph.FC.add_sublayer (ArgSpec(args=['self', 'name', 'sublayer'], varargs=None, keywords=None, defaults=None), ('document', '839ff3c0534677ba6ad8735c3fd4e995'))
@@ -655,7 +655,7 @@ paddle.fluid.dygraph.FC.set_dict (ArgSpec(args=['self', 'stat_dict', 'include_su
paddle.fluid.dygraph.FC.state_dict (ArgSpec(args=['self', 'destination', 'include_sublayers'], varargs=None, keywords=None, defaults=(None, True)), ('document', '9d689f44592cd22812c7ec06a9654eac'))
paddle.fluid.dygraph.FC.sublayers (ArgSpec(args=['self', 'include_sublayers'], varargs=None, keywords=None, defaults=(True,)), ('document', '00a881005ecbc96578faf94513bf0d62'))
paddle.fluid.dygraph.FC.train (ArgSpec(args=['self'], varargs=None, keywords=None, defaults=None), ('document', '6adf97f83acf6453d4a6a4b1070f3754'))
-paddle.fluid.dygraph.BatchNorm ('paddle.fluid.dygraph.nn.BatchNorm', ('document', '390fb9b986423ec6680731ffc7cf24ab'))
+paddle.fluid.dygraph.BatchNorm ('paddle.fluid.dygraph.nn.BatchNorm', ('document', 'f26599d75e3eba36c5dd3224a33009d8'))
paddle.fluid.dygraph.BatchNorm.__init__ (ArgSpec(args=['self', 'name_scope', 'num_channels', 'act', 'is_test', 'momentum', 'epsilon', 'param_attr', 'bias_attr', 'dtype', 'data_layout', 'in_place', 'moving_mean_name', 'moving_variance_name', 'do_model_average_for_mean_and_var', 'fuse_with_relu', 'use_global_stats', 'trainable_statistics'], varargs=None, keywords=None, defaults=(None, False, 0.9, 1e-05, None, None, 'float32', 'NCHW', False, None, None, False, False, False, False)), ('document', '6adf97f83acf6453d4a6a4b1070f3754'))
paddle.fluid.dygraph.BatchNorm.add_parameter (ArgSpec(args=['self', 'name', 'parameter'], varargs=None, keywords=None, defaults=None), ('document', 'f35ab374c7d5165c3daf3bd64a5a2ec1'))
paddle.fluid.dygraph.BatchNorm.add_sublayer (ArgSpec(args=['self', 'name', 'sublayer'], varargs=None, keywords=None, defaults=None), ('document', '839ff3c0534677ba6ad8735c3fd4e995'))
@@ -672,7 +672,7 @@ paddle.fluid.dygraph.BatchNorm.set_dict (ArgSpec(args=['self', 'stat_dict', 'inc
paddle.fluid.dygraph.BatchNorm.state_dict (ArgSpec(args=['self', 'destination', 'include_sublayers'], varargs=None, keywords=None, defaults=(None, True)), ('document', '9d689f44592cd22812c7ec06a9654eac'))
paddle.fluid.dygraph.BatchNorm.sublayers (ArgSpec(args=['self', 'include_sublayers'], varargs=None, keywords=None, defaults=(True,)), ('document', '00a881005ecbc96578faf94513bf0d62'))
paddle.fluid.dygraph.BatchNorm.train (ArgSpec(args=['self'], varargs=None, keywords=None, defaults=None), ('document', '6adf97f83acf6453d4a6a4b1070f3754'))
-paddle.fluid.dygraph.Embedding ('paddle.fluid.dygraph.nn.Embedding', ('document', 'b1b1ed9dc2125c3e16ee08113605fcb4'))
+paddle.fluid.dygraph.Embedding ('paddle.fluid.dygraph.nn.Embedding', ('document', 'dec90fba70f93ff5dac2f4ed0704dbdd'))
paddle.fluid.dygraph.Embedding.__init__ (ArgSpec(args=['self', 'name_scope', 'size', 'is_sparse', 'is_distributed', 'padding_idx', 'param_attr', 'dtype'], varargs=None, keywords=None, defaults=(False, False, None, None, 'float32')), ('document', '6adf97f83acf6453d4a6a4b1070f3754'))
paddle.fluid.dygraph.Embedding.add_parameter (ArgSpec(args=['self', 'name', 'parameter'], varargs=None, keywords=None, defaults=None), ('document', 'f35ab374c7d5165c3daf3bd64a5a2ec1'))
paddle.fluid.dygraph.Embedding.add_sublayer (ArgSpec(args=['self', 'name', 'sublayer'], varargs=None, keywords=None, defaults=None), ('document', '839ff3c0534677ba6ad8735c3fd4e995'))
@@ -706,7 +706,7 @@ paddle.fluid.dygraph.GRUUnit.set_dict (ArgSpec(args=['self', 'stat_dict', 'inclu
paddle.fluid.dygraph.GRUUnit.state_dict (ArgSpec(args=['self', 'destination', 'include_sublayers'], varargs=None, keywords=None, defaults=(None, True)), ('document', '9d689f44592cd22812c7ec06a9654eac'))
paddle.fluid.dygraph.GRUUnit.sublayers (ArgSpec(args=['self', 'include_sublayers'], varargs=None, keywords=None, defaults=(True,)), ('document', '00a881005ecbc96578faf94513bf0d62'))
paddle.fluid.dygraph.GRUUnit.train (ArgSpec(args=['self'], varargs=None, keywords=None, defaults=None), ('document', '6adf97f83acf6453d4a6a4b1070f3754'))
-paddle.fluid.dygraph.LayerNorm ('paddle.fluid.dygraph.nn.LayerNorm', ('document', '8bc39f59fe2d3713bc143fdf1222a63b'))
+paddle.fluid.dygraph.LayerNorm ('paddle.fluid.dygraph.nn.LayerNorm', ('document', '0d4e428afdc5a3c989ec3270967c3263'))
paddle.fluid.dygraph.LayerNorm.__init__ (ArgSpec(args=['self', 'name_scope', 'scale', 'shift', 'begin_norm_axis', 'epsilon', 'param_attr', 'bias_attr', 'act'], varargs=None, keywords=None, defaults=(True, True, 1, 1e-05, None, None, None)), ('document', '6adf97f83acf6453d4a6a4b1070f3754'))
paddle.fluid.dygraph.LayerNorm.add_parameter (ArgSpec(args=['self', 'name', 'parameter'], varargs=None, keywords=None, defaults=None), ('document', 'f35ab374c7d5165c3daf3bd64a5a2ec1'))
paddle.fluid.dygraph.LayerNorm.add_sublayer (ArgSpec(args=['self', 'name', 'sublayer'], varargs=None, keywords=None, defaults=None), ('document', '839ff3c0534677ba6ad8735c3fd4e995'))
@@ -723,7 +723,7 @@ paddle.fluid.dygraph.LayerNorm.set_dict (ArgSpec(args=['self', 'stat_dict', 'inc
paddle.fluid.dygraph.LayerNorm.state_dict (ArgSpec(args=['self', 'destination', 'include_sublayers'], varargs=None, keywords=None, defaults=(None, True)), ('document', '9d689f44592cd22812c7ec06a9654eac'))
paddle.fluid.dygraph.LayerNorm.sublayers (ArgSpec(args=['self', 'include_sublayers'], varargs=None, keywords=None, defaults=(True,)), ('document', '00a881005ecbc96578faf94513bf0d62'))
paddle.fluid.dygraph.LayerNorm.train (ArgSpec(args=['self'], varargs=None, keywords=None, defaults=None), ('document', '6adf97f83acf6453d4a6a4b1070f3754'))
-paddle.fluid.dygraph.NCE ('paddle.fluid.dygraph.nn.NCE', ('document', '993aeea9be436e9c709a758795cb23e9'))
+paddle.fluid.dygraph.NCE ('paddle.fluid.dygraph.nn.NCE', ('document', '148e58ba1698e0cd60a3490fd4188d04'))
paddle.fluid.dygraph.NCE.__init__ (ArgSpec(args=['self', 'name_scope', 'num_total_classes', 'sample_weight', 'param_attr', 'bias_attr', 'num_neg_samples', 'sampler', 'custom_dist', 'seed', 'is_sparse'], varargs=None, keywords=None, defaults=(None, None, None, None, 'uniform', None, 0, False)), ('document', '6adf97f83acf6453d4a6a4b1070f3754'))
paddle.fluid.dygraph.NCE.add_parameter (ArgSpec(args=['self', 'name', 'parameter'], varargs=None, keywords=None, defaults=None), ('document', 'f35ab374c7d5165c3daf3bd64a5a2ec1'))
paddle.fluid.dygraph.NCE.add_sublayer (ArgSpec(args=['self', 'name', 'sublayer'], varargs=None, keywords=None, defaults=None), ('document', '839ff3c0534677ba6ad8735c3fd4e995'))
@@ -740,7 +740,7 @@ paddle.fluid.dygraph.NCE.set_dict (ArgSpec(args=['self', 'stat_dict', 'include_s
paddle.fluid.dygraph.NCE.state_dict (ArgSpec(args=['self', 'destination', 'include_sublayers'], varargs=None, keywords=None, defaults=(None, True)), ('document', '9d689f44592cd22812c7ec06a9654eac'))
paddle.fluid.dygraph.NCE.sublayers (ArgSpec(args=['self', 'include_sublayers'], varargs=None, keywords=None, defaults=(True,)), ('document', '00a881005ecbc96578faf94513bf0d62'))
paddle.fluid.dygraph.NCE.train (ArgSpec(args=['self'], varargs=None, keywords=None, defaults=None), ('document', '6adf97f83acf6453d4a6a4b1070f3754'))
-paddle.fluid.dygraph.PRelu ('paddle.fluid.dygraph.nn.PRelu', ('document', 'da956af1676b08bf15553751a3643b55'))
+paddle.fluid.dygraph.PRelu ('paddle.fluid.dygraph.nn.PRelu', ('document', '58141577833fedf619f2f324eea57e00'))
paddle.fluid.dygraph.PRelu.__init__ (ArgSpec(args=['self', 'name_scope', 'mode', 'param_attr'], varargs=None, keywords=None, defaults=(None,)), ('document', '6adf97f83acf6453d4a6a4b1070f3754'))
paddle.fluid.dygraph.PRelu.add_parameter (ArgSpec(args=['self', 'name', 'parameter'], varargs=None, keywords=None, defaults=None), ('document', 'f35ab374c7d5165c3daf3bd64a5a2ec1'))
paddle.fluid.dygraph.PRelu.add_sublayer (ArgSpec(args=['self', 'name', 'sublayer'], varargs=None, keywords=None, defaults=None), ('document', '839ff3c0534677ba6ad8735c3fd4e995'))
@@ -774,7 +774,7 @@ paddle.fluid.dygraph.BilinearTensorProduct.set_dict (ArgSpec(args=['self', 'stat
paddle.fluid.dygraph.BilinearTensorProduct.state_dict (ArgSpec(args=['self', 'destination', 'include_sublayers'], varargs=None, keywords=None, defaults=(None, True)), ('document', '9d689f44592cd22812c7ec06a9654eac'))
paddle.fluid.dygraph.BilinearTensorProduct.sublayers (ArgSpec(args=['self', 'include_sublayers'], varargs=None, keywords=None, defaults=(True,)), ('document', '00a881005ecbc96578faf94513bf0d62'))
paddle.fluid.dygraph.BilinearTensorProduct.train (ArgSpec(args=['self'], varargs=None, keywords=None, defaults=None), ('document', '6adf97f83acf6453d4a6a4b1070f3754'))
-paddle.fluid.dygraph.Conv2DTranspose ('paddle.fluid.dygraph.nn.Conv2DTranspose', ('document', 'cf23c905abc00b07603dfa71a432d6f7'))
+paddle.fluid.dygraph.Conv2DTranspose ('paddle.fluid.dygraph.nn.Conv2DTranspose', ('document', 'cb5e718ff190ee82e9bd144585dd4707'))
paddle.fluid.dygraph.Conv2DTranspose.__init__ (ArgSpec(args=['self', 'name_scope', 'num_filters', 'output_size', 'filter_size', 'padding', 'stride', 'dilation', 'groups', 'param_attr', 'bias_attr', 'use_cudnn', 'act'], varargs=None, keywords=None, defaults=(None, None, 0, 1, 1, None, None, None, True, None)), ('document', '6adf97f83acf6453d4a6a4b1070f3754'))
paddle.fluid.dygraph.Conv2DTranspose.add_parameter (ArgSpec(args=['self', 'name', 'parameter'], varargs=None, keywords=None, defaults=None), ('document', 'f35ab374c7d5165c3daf3bd64a5a2ec1'))
paddle.fluid.dygraph.Conv2DTranspose.add_sublayer (ArgSpec(args=['self', 'name', 'sublayer'], varargs=None, keywords=None, defaults=None), ('document', '839ff3c0534677ba6ad8735c3fd4e995'))
@@ -808,7 +808,7 @@ paddle.fluid.dygraph.Conv3DTranspose.set_dict (ArgSpec(args=['self', 'stat_dict'
paddle.fluid.dygraph.Conv3DTranspose.state_dict (ArgSpec(args=['self', 'destination', 'include_sublayers'], varargs=None, keywords=None, defaults=(None, True)), ('document', '9d689f44592cd22812c7ec06a9654eac'))
paddle.fluid.dygraph.Conv3DTranspose.sublayers (ArgSpec(args=['self', 'include_sublayers'], varargs=None, keywords=None, defaults=(True,)), ('document', '00a881005ecbc96578faf94513bf0d62'))
paddle.fluid.dygraph.Conv3DTranspose.train (ArgSpec(args=['self'], varargs=None, keywords=None, defaults=None), ('document', '6adf97f83acf6453d4a6a4b1070f3754'))
-paddle.fluid.dygraph.GroupNorm ('paddle.fluid.dygraph.nn.GroupNorm', ('document', '72c125b07bdd1e612607dc77039b2722'))
+paddle.fluid.dygraph.GroupNorm ('paddle.fluid.dygraph.nn.GroupNorm', ('document', 'fb75d41f9f6aa895557caf5315d876cc'))
paddle.fluid.dygraph.GroupNorm.__init__ (ArgSpec(args=['self', 'name_scope', 'groups', 'epsilon', 'param_attr', 'bias_attr', 'act', 'data_layout'], varargs=None, keywords=None, defaults=(1e-05, None, None, None, 'NCHW')), ('document', '6adf97f83acf6453d4a6a4b1070f3754'))
paddle.fluid.dygraph.GroupNorm.add_parameter (ArgSpec(args=['self', 'name', 'parameter'], varargs=None, keywords=None, defaults=None), ('document', 'f35ab374c7d5165c3daf3bd64a5a2ec1'))
paddle.fluid.dygraph.GroupNorm.add_sublayer (ArgSpec(args=['self', 'name', 'sublayer'], varargs=None, keywords=None, defaults=None), ('document', '839ff3c0534677ba6ad8735c3fd4e995'))
@@ -825,7 +825,7 @@ paddle.fluid.dygraph.GroupNorm.set_dict (ArgSpec(args=['self', 'stat_dict', 'inc
paddle.fluid.dygraph.GroupNorm.state_dict (ArgSpec(args=['self', 'destination', 'include_sublayers'], varargs=None, keywords=None, defaults=(None, True)), ('document', '9d689f44592cd22812c7ec06a9654eac'))
paddle.fluid.dygraph.GroupNorm.sublayers (ArgSpec(args=['self', 'include_sublayers'], varargs=None, keywords=None, defaults=(True,)), ('document', '00a881005ecbc96578faf94513bf0d62'))
paddle.fluid.dygraph.GroupNorm.train (ArgSpec(args=['self'], varargs=None, keywords=None, defaults=None), ('document', '6adf97f83acf6453d4a6a4b1070f3754'))
-paddle.fluid.dygraph.SpectralNorm ('paddle.fluid.dygraph.nn.SpectralNorm', ('document', '8f5cfbc431a8b4b44b605cde8b0381ef'))
+paddle.fluid.dygraph.SpectralNorm ('paddle.fluid.dygraph.nn.SpectralNorm', ('document', '20a09e11c24d6a96fbb98bce3800bebb'))
paddle.fluid.dygraph.SpectralNorm.__init__ (ArgSpec(args=['self', 'name_scope', 'dim', 'power_iters', 'eps', 'name'], varargs=None, keywords=None, defaults=(0, 1, 1e-12, None)), ('document', '6adf97f83acf6453d4a6a4b1070f3754'))
paddle.fluid.dygraph.SpectralNorm.add_parameter (ArgSpec(args=['self', 'name', 'parameter'], varargs=None, keywords=None, defaults=None), ('document', 'f35ab374c7d5165c3daf3bd64a5a2ec1'))
paddle.fluid.dygraph.SpectralNorm.add_sublayer (ArgSpec(args=['self', 'name', 'sublayer'], varargs=None, keywords=None, defaults=None), ('document', '839ff3c0534677ba6ad8735c3fd4e995'))
@@ -842,7 +842,7 @@ paddle.fluid.dygraph.SpectralNorm.set_dict (ArgSpec(args=['self', 'stat_dict', '
paddle.fluid.dygraph.SpectralNorm.state_dict (ArgSpec(args=['self', 'destination', 'include_sublayers'], varargs=None, keywords=None, defaults=(None, True)), ('document', '9d689f44592cd22812c7ec06a9654eac'))
paddle.fluid.dygraph.SpectralNorm.sublayers (ArgSpec(args=['self', 'include_sublayers'], varargs=None, keywords=None, defaults=(True,)), ('document', '00a881005ecbc96578faf94513bf0d62'))
paddle.fluid.dygraph.SpectralNorm.train (ArgSpec(args=['self'], varargs=None, keywords=None, defaults=None), ('document', '6adf97f83acf6453d4a6a4b1070f3754'))
-paddle.fluid.dygraph.TreeConv ('paddle.fluid.dygraph.nn.TreeConv', ('document', '6e175a7bf2a43ae6c0f3a8a54bd69afe'))
+paddle.fluid.dygraph.TreeConv ('paddle.fluid.dygraph.nn.TreeConv', ('document', 'cd21d9c15581cf7339ad04794beaa85e'))
paddle.fluid.dygraph.TreeConv.__init__ (ArgSpec(args=['self', 'name_scope', 'output_size', 'num_filters', 'max_depth', 'act', 'param_attr', 'bias_attr', 'name'], varargs=None, keywords=None, defaults=(1, 2, 'tanh', None, None, None)), ('document', '6adf97f83acf6453d4a6a4b1070f3754'))
paddle.fluid.dygraph.TreeConv.add_parameter (ArgSpec(args=['self', 'name', 'parameter'], varargs=None, keywords=None, defaults=None), ('document', 'f35ab374c7d5165c3daf3bd64a5a2ec1'))
paddle.fluid.dygraph.TreeConv.add_sublayer (ArgSpec(args=['self', 'name', 'sublayer'], varargs=None, keywords=None, defaults=None), ('document', '839ff3c0534677ba6ad8735c3fd4e995'))
diff --git a/python/paddle/fluid/dygraph/base.py b/python/paddle/fluid/dygraph/base.py
index 762b65b551e..6e8dbc3b8bf 100644
--- a/python/paddle/fluid/dygraph/base.py
+++ b/python/paddle/fluid/dygraph/base.py
@@ -150,15 +150,15 @@ def _print_debug_msg(limit=5, is_test=False):
@framework.dygraph_only
def to_variable(value, block=None, name=None):
"""
- This function will create a variable from ndarray
+ The API will create a ``Variable`` object from numpy\.ndarray or Variable object.
- Args:
- value(ndarray): the numpy value need to be convert
- block(fluid.Block|None): which block this variable will be in
- name(str|None): Name of Variable
+ Parameters:
+ value(ndarray): The numpy\.ndarray object that needs to be converted, it can be multi-dimension, and the data type is one of numpy\.{float16, float32, float64, int16, int32, int64, uint8, uint16}.
+ block(fluid.Block, optional): Which block this variable will be in. Default: None.
+ name(str, optional): The default value is None. Normally there is no need for user to set this property. For more information, please refer to :ref:`api_guide_Name`
- return:
- Variable: The variable created from given numpy
+ Returns:
+ Variable: ``Tensor`` created from the specified numpy\.ndarray object, data type and shape is the same as ``value`` .
Examples:
diff --git a/python/paddle/fluid/dygraph/nn.py b/python/paddle/fluid/dygraph/nn.py
index 7802dd3cb2f..6cce4727e2a 100644
--- a/python/paddle/fluid/dygraph/nn.py
+++ b/python/paddle/fluid/dygraph/nn.py
@@ -34,16 +34,18 @@ __all__ = [
class Conv2D(layers.Layer):
"""
+ This interface is used to construct a callable object of the ``Conv2D`` class.
+ For more details, refer to code examples.
The convolution2D layer calculates the output based on the input, filter
and strides, paddings, dilations, groups parameters. Input and
Output are in NCHW format, where N is batch size, C is the number of
- channels, H is the height of the feature, and W is the width of the feature.
- Filter is in MCHW format, where M is the number of output image channels,
- C is the number of input image channels, H is the height of the filter,
+ the feature map, H is the height of the feature map, and W is the width of the feature map.
+ Filter's shape is [MCHW] , where M is the number of output feature map,
+ C is the number of input feature map, H is the height of the filter,
and W is the width of the filter. If the groups is greater than 1,
- C will equal the number of input image channels divided by the groups.
+ C will equal the number of input feature map divided by the groups.
Please refer to UFLDL's `convolution
- `
+ `_
for more detials.
If bias attribution and activation type are provided, bias is added to the
output of the convolution, and the corresponding activation function is
@@ -53,14 +55,14 @@ class Conv2D(layers.Layer):
.. math::
- Out = \sigma (W \\ast X + b)
+ Out = \\sigma (W \\ast X + b)
Where:
- * :math:`X`: Input value, a tensor with NCHW format.
- * :math:`W`: Filter value, a tensor with MCHW format.
+ * :math:`X`: Input value, a ``Tensor`` with NCHW format.
+ * :math:`W`: Filter value, a ``Tensor`` with shape [MCHW] .
* :math:`\\ast`: Convolution operation.
- * :math:`b`: Bias value, a 2-D tensor with shape [M, 1].
+ * :math:`b`: Bias value, a 2-D ``Tensor`` with shape [M, 1].
* :math:`\\sigma`: Activation function.
* :math:`Out`: Output value, the shape of :math:`Out` and :math:`X` may be different.
@@ -84,48 +86,52 @@ class Conv2D(layers.Layer):
W_{out}&= \\frac{(W_{in} + 2 * paddings[1] - (dilations[1] * (W_f - 1) + 1))}{strides[1]} + 1
Parameters:
- name_scope(str) : The name for this class.
+ name_scope(str): The name for this class.
num_filters(int): The number of filter. It is as same as the output
- image channel.
- filter_size (int|tuple|None): The filter size. If filter_size is a tuple,
+ feature map.
+ filter_size (int or tuple): The filter size. If filter_size is a tuple,
it must contain two integers, (filter_size_H, filter_size_W).
Otherwise, the filter will be a square.
- stride (int|tuple): The stride size. If stride is a tuple, it must
+ stride (int or tuple, optional): The stride size. If stride is a tuple, it must
contain two integers, (stride_H, stride_W). Otherwise, the
- stride_H = stride_W = stride. Default: stride = 1.
- padding (int|tuple): The padding size. If padding is a tuple, it must
+ stride_H = stride_W = stride. Default: 1.
+ padding (int or tuple, optional): The padding size. If padding is a tuple, it must
contain two integers, (padding_H, padding_W). Otherwise, the
- padding_H = padding_W = padding. Default: padding = 0.
- dilation (int|tuple): The dilation size. If dilation is a tuple, it must
+ padding_H = padding_W = padding. Default: 0.
+ dilation (int or tuple, optional): The dilation size. If dilation is a tuple, it must
contain two integers, (dilation_H, dilation_W). Otherwise, the
- dilation_H = dilation_W = dilation. Default: dilation = 1.
- groups (int): The groups number of the Conv2d Layer. According to grouped
+ dilation_H = dilation_W = dilation. Default: 1.
+ groups (int, optional): The groups number of the Conv2d Layer. According to grouped
convolution in Alex Krizhevsky's Deep CNN paper: when group=2,
the first half of the filters is only connected to the first half
of the input channels, while the second half of the filters is only
- connected to the second half of the input channels. Default: groups=1.
- param_attr (ParamAttr|None): The parameter attribute for learnable parameters/weights
+ connected to the second half of the input channels. Default: 1.
+ param_attr (ParamAttr, optional): The parameter attribute for learnable weights(Parameter)
of conv2d. If it is set to None or one attribute of ParamAttr, conv2d
will create ParamAttr as param_attr. If the Initializer of the param_attr
is not set, the parameter is initialized with :math:`Normal(0.0, std)`,
and the :math:`std` is :math:`(\\frac{2.0 }{filter\_elem\_num})^{0.5}`. Default: None.
- bias_attr (ParamAttr|bool|None): The parameter attribute for the bias of conv2d.
+ bias_attr (ParamAttr or bool, optional): The attribute for the bias of conv2d.
If it is set to False, no bias will be added to the output units.
If it is set to None or one attribute of ParamAttr, conv2d
will create ParamAttr as bias_attr. If the Initializer of the bias_attr
is not set, the bias is initialized zero. Default: None.
- use_cudnn (bool): Use cudnn kernel or not, it is valid only when the cudnn
- library is installed. Default: True
- act (str): Activation type, if it is set to None, activation is not appended.
- Default: None
+ use_cudnn (bool, optional): Use cudnn kernel or not, it is valid only when the cudnn
+ library is installed. Default: True.
+ act (str, optional): Activation type, if it is set to None, activation is not appended.
+ Default: None.
+ dtype (str, optional): Data type, it can be "float32" or "float64". Default: "float32".
- Attributes:
- weight (Parameter): the learnable weights of filter of this layer.
- bias (Parameter|None): the learnable bias of this layer.
+ Attribute:
+ **weight** (Parameter): the learnable weights of filter of this layer.
+
+ **bias** (Parameter or None): the learnable bias of this layer.
+ Returns:
+ None
+
Raises:
- ValueError: If the shapes of input, filter_size, stride, padding and
- groups mismatch.
+ ValueError: if ``use_cudnn`` is not a bool value.
Examples:
.. code-block:: python
@@ -135,11 +141,11 @@ class Conv2D(layers.Layer):
from paddle.fluid.dygraph import Conv2D
import numpy as np
- data = np.random.uniform( -1, 1, [10, 3, 32, 32] ).astype('float32')
+ data = np.random.uniform(-1, 1, [10, 3, 32, 32]).astype('float32')
with fluid.dygraph.guard():
- conv2d = Conv2D( "conv2d", 2, 3)
- data = to_variable( data )
- conv = conv2d( data )
+ conv2d = Conv2D("conv2d", 2, 3)
+ data = to_variable(data)
+ conv = conv2d(data)
"""
@@ -723,33 +729,82 @@ class Conv3DTranspose(layers.Layer):
class Pool2D(layers.Layer):
"""
- The pooling2d operation calculates the output based on the input, pooling_type and ksize, strides,
- paddings parameters.Input(X) and output(Out) are in NCHW format, where N is batch size, C is the number of channels,
- H is the height of the feature, and W is the width of the feature.
+ This interface is used to construct a callable object of the ``Pool2D`` class.
+ For more details, refer to code examples.
+ The pooling2d operation calculates the output based on the input, pool_type and pool_size, pool_stride,
+ pool_padding parameters.Input and output are in NCHW format, where N is batch size, C is the number of feature map,
+ H is the height of the feature map, and W is the width of the feature map.
Parameters(ksize, strides, paddings) are two elements. These two elements represent height and width, respectively.
The input(X) size and output(Out) size may be different.
+ Example:
+
+ - Input:
+
+ Input shape: :math:`(N, C, H_{in}, W_{in})`
+
+ - Output:
+
+ Output shape: :math:`(N, C, H_{out}, W_{out})`
+
+ If ``ceil_mode`` = False:
+
+ .. math::
+
+ H_{out} = \\frac{(H_{in} - ksize[0] + 2 * paddings[0])}{strides[0]} + 1 \\\\
+ W_{out} = \\frac{(W_{in} - ksize[1] + 2 * paddings[1])}{strides[1]} + 1
+
+ If ``ceil_mode`` = True:
+
+ .. math::
+
+ H_{out} = \\frac{(H_{in} - ksize[0] + 2 * paddings[0] + strides[0] - 1)}{strides[0]} + 1 \\\\
+ W_{out} = \\frac{(W_{in} - ksize[1] + 2 * paddings[1] + strides[1] - 1)}{strides[1]} + 1
+
+ If ``exclusive`` = False:
+
+ .. math::
+
+ hstart &= i * strides[0] - paddings[0] \\\\
+ hend &= hstart + ksize[0] \\\\
+ wstart &= j * strides[1] - paddings[1] \\\\
+ wend &= wstart + ksize[1] \\\\
+ Output(i ,j) &= \\frac{sum(Input[hstart:hend, wstart:wend])}{ksize[0] * ksize[1]}
+
+ If ``exclusive`` = True:
+
+ .. math::
+
+ hstart &= max(0, i * strides[0] - paddings[0])\\\\
+ hend &= min(H, hstart + ksize[0]) \\\\
+ wstart &= max(0, j * strides[1] - paddings[1]) \\\\
+ wend & = min(W, wstart + ksize[1]) \\\\
+ Output(i ,j) & = \\frac{sum(Input[hstart:hend, wstart:wend])}{(hend - hstart) * (wend - wstart)}
+
Parameters:
name_scope(str) : The name of this class.
- pool_size (int|list|tuple): The pool kernel size. If pool kernel size is a tuple or list,
+ pool_size (int or list or tuple, optional): The pool kernel size. If pool kernel size is a tuple or list,
it must contain two integers, (pool_size_Height, pool_size_Width).
- Otherwise, the pool kernel size will be a square of an int. Default: -1
- pool_type(str) : The pooling type, can be "max" for max-pooling and "avg" for average-pooling. Default: max
- pool_stride (int|list|tuple): The pool stride size. If pool stride size is a tuple or list,
+ Otherwise, the pool kernel size will be a square of an int. Default: -1.
+ pool_type(str, optional) : The pooling type, can be "max" for max-pooling and "avg" for average-pooling.
+ Default: max.
+ pool_stride (int or list or tuple, optional): The pool stride size. If pool stride size is a tuple or list,
it must contain two integers, (pool_stride_Height, pool_stride_Width). Otherwise,
- the pool stride size will be a square of an int. Default: 1
- pool_padding (int|list|tuple): The pool padding size. If pool padding size is a tuple,
+ the pool stride size will be a square of an int. Default: 1.
+ pool_padding (int or list or tuple, optional): The padding size for pooling operation.
+ If ``pool_padding`` is a tuple,
it must contain two integers, (pool_padding_on_Height, pool_padding_on_Width).
- Otherwise, the pool padding size will be a square of an int. Default: 0
- global_pooling (bool): Whether to use the global pooling. If global_pooling = true,
- kernel size and paddings will be ignored. Default: False
- use_cudnn (bool): Only used in cudnn kernel, need install cudnn. Default: True
- ceil_mode (bool): Whether to use the ceil function to calculate output height and width.
- False is the default. If it is set to False, the floor function will be used. Default: False
- exclusive (bool): Whether to exclude padding points in average pooling mode. Default: True
+ Otherwise, the padding size for pooling operation will be a square of an int. Default: 0.
+ global_pooling (bool, optional): Whether to use the global pooling. If global_pooling = true,
+ kernel size and paddings will be ignored. Default: False.
+ use_cudnn (bool, optional): Only used in cudnn kernel, need install cudnn. Default: True.
+ ceil_mode (bool, optional): Whether to use the ceil function to calculate output height and width.
+ False is the default. If it is set to False, the floor function will be used. Default: False.
+ exclusive (bool, optional): Whether to exclude padding points in average pooling mode. Default: True.
+ dtype (str, optional): Data type, it can be "float32" or "float64". Default: "float32".
Returns:
- Variable: The pooling result.
+ None
Raises:
ValueError: If 'pool_type' is not "max" nor "avg"
@@ -761,16 +816,16 @@ class Pool2D(layers.Layer):
.. code-block:: python
import paddle.fluid as fluid
- import numpy
+ from paddle.fluid.dygraph.base import to_variable
+ import numpy as np
with fluid.dygraph.guard():
- data = numpy.random.random((3, 32, 32)).astype('float32')
-
+ data = numpy.random.random((3, 32, 32, 5)).astype('float32')
pool2d = fluid.dygraph.Pool2D("pool2d",pool_size=2,
pool_type='max',
pool_stride=1,
global_pooling=False)
- pool2d_res = pool2d(data)
+ pool2d_res = pool2d(to_variable(data))
"""
@@ -834,26 +889,25 @@ class Pool2D(layers.Layer):
class FC(layers.Layer):
"""
- **Fully Connected Layer**
-
- This function creates a fully connected layer in the network. It can take
- one or multiple tensors as its inputs(input can be a list of Variable, see
- Args in detail). It creates a variable called weights for each input tensor,
+ This interface is used to construct a callable object of the ``FC`` class.
+ For more details, refer to code examples.
+ It creates a fully connected layer in the network. It can take
+ one or multiple ``Tensor`` as its inputs. It creates a Variable called weights for each input tensor,
which represents a fully connected weight matrix from each input unit to
each output unit. The fully connected layer multiplies each input tensor
- with its corresponding weight to produce an output Tensor with shape [M, `size`],
- where M is batch size. If multiple input tensors are given, the results of
- multiple output tensors with shape [M, `size`] will be summed up. If bias_attr
+ with its corresponding weight to produce an output Tensor with shape [N, `size`],
+ where N is batch size. If multiple input tensors are given, the results of
+ multiple output tensors with shape [N, `size`] will be summed up. If ``bias_attr``
is not None, a bias variable will be created and added to the output.
- Finally, if activation is not None, it will be applied to the output as well.
+ Finally, if ``act`` is not None, it will be applied to the output as well.
- When the input is single tensor:
+ When the input is single ``Tensor`` :
.. math::
Out = Act({XW + b})
- When the input are multiple tensors:
+ When the input are multiple ``Tensor`` :
.. math::
@@ -861,36 +915,36 @@ class FC(layers.Layer):
In the above equation:
- * :math:`N`: Number of the input. N equals to len(input) if input is list of Variable.
- * :math:`X_i`: The i-th input tensor.
+ * :math:`N`: Number of the input. N equals to len(input) if input is list of ``Tensor`` .
+ * :math:`X_i`: The i-th input ``Tensor`` .
* :math:`W_i`: The i-th weights matrix corresponding i-th input tensor.
* :math:`b`: The bias parameter created by this layer (if needed).
* :math:`Act`: The activation function.
- * :math:`Out`: The output tensor.
+ * :math:`Out`: The output ``Tensor`` .
See below for an example.
.. code-block:: text
Given:
- data_1.data = [[[0.1, 0.2],
- [0.3, 0.4]]]
- data_1.shape = (1, 2, 2) # 1 is batch_size
+ data_1.data = [[[0.1, 0.2]]]
+ data_1.shape = (1, 1, 2) # 1 is batch_size
- data_2 = [[[0.1, 0.2, 0.3]]]
- data_2.shape = (1, 1, 3)
+ data_2.data = [[[0.1, 0.2, 0.3]]]
+ data_2.shape = (1, 1, 3) # 1 is batch_size
- out = fluid.layers.fc(input=[data_1, data_2], size=2)
+ fc = FC("fc", 2, num_flatten_dims=2)
+ out = fc(input=[data_1, data_2])
Then:
- out.data = [[0.18669507, 0.1893476]]
- out.shape = (1, 2)
+ out.data = [[[0.182996 -0.474117]]]
+ out.shape = (1, 1, 2)
Parameters:
name_scope(str): The name of this class.
size(int): The number of output units in this layer.
- num_flatten_dims (int): The fc layer can accept an input tensor with more than
- two dimensions. If this happens, the multidimensional tensor will first be flattened
+ num_flatten_dims (int, optional): The fc layer can accept an input tensor with more than
+ two dimensions. If this happens, the multi-dimension tensor will first be flattened
into a 2-dimensional matrix. The parameter `num_flatten_dims` determines how the input
tensor is flattened: the first `num_flatten_dims` (inclusive, index starts from 1)
dimensions will be flatten to form the first dimension of the final matrix (height of
@@ -898,22 +952,23 @@ class FC(layers.Layer):
form the second dimension of the final matrix (width of the matrix). For example, suppose
`X` is a 5-dimensional tensor with a shape [2, 3, 4, 5, 6], and `num_flatten_dims` = 3.
Then, the flattened matrix will have a shape [2 x 3 x 4, 5 x 6] = [24, 30]. Default: 1
- param_attr (ParamAttr|list of ParamAttr|None): The parameter attribute for learnable
- parameters/weights of this layer.
- bias_attr (ParamAttr|list of ParamAttr, default None): The parameter attribute for the bias
+ param_attr (ParamAttr or list of ParamAttr, optional): The parameter attribute for learnable
+ weights(Parameter) of this layer. Default: None.
+ bias_attr (ParamAttr or list of ParamAttr, optional): The attribute for the bias
of this layer. If it is set to False, no bias will be added to the output units.
If it is set to None, the bias is initialized zero. Default: None.
- act (str|None): Activation to be applied to the output of this layer.
- is_test(bool): A flag indicating whether execution is in test phase. Default: False
- dtype(str): Dtype used for weight
+ act (str, optional): Activation to be applied to the output of this layer. Default: None.
+ is_test(bool, optional): A flag indicating whether execution is in test phase. Default: False.
+ dtype(str, optional): Dtype used for weight, it can be "float32" or "float64". Default: "float32".
- Attributes:
- weight (list of Parameter): the learnable weights of this layer.
- bias (Parameter|None): the learnable bias of this layer.
+ Attribute:
+ **weight** (list of Parameter): the learnable weights of this layer.
- Raises:
- ValueError: If rank of the input tensor is less than 2.
+ **bias** (Parameter or None): the learnable bias of this layer.
+ Returns:
+ None
+
Examples:
.. code-block:: python
@@ -922,11 +977,11 @@ class FC(layers.Layer):
from paddle.fluid.dygraph import FC
import numpy as np
- data = np.random.uniform( -1, 1, [30, 10, 32] ).astype('float32')
+ data = np.random.uniform(-1, 1, [30, 10, 32]).astype('float32')
with fluid.dygraph.guard():
- fc = FC( "fc", 64, num_flatten_dims=2)
- data = to_variable( data )
- conv = fc( data )
+ fc = FC("fc", 64, num_flatten_dims=2)
+ data = to_variable(data)
+ conv = fc(data)
"""
@@ -1049,20 +1104,18 @@ class FC(layers.Layer):
class BatchNorm(layers.Layer):
"""
- **Batch Normalization Layer**
-
- Can be used as a normalizer function for conv2d and fully_connected operations.
- The required data format for this layer is one of the following:
-
- 1. NHWC `[batch, in_height, in_width, in_channels]`
-
- 2. NCHW `[batch, in_channels, in_height, in_width]`
-
+ This interface is used to construct a callable object of the ``BatchNorm`` class.
+ For more details, refer to code examples.
+ It implements the function of the Batch Normalization Layer and can be used
+ as a normalizer function for conv2d and fully connected operations.
+ The data is normalized by the mean and variance of the channel based on the current batch data.
Refer to `Batch Normalization: Accelerating Deep Network Training by Reducing
Internal Covariate Shift `_
for more details.
- :math:`input` is the input features over a mini-batch.
+ When use_global_stats = False, the :math:`\\mu_{\\beta}`
+ and :math:`\\sigma_{\\beta}^{2}` are the statistics of one mini-batch.
+ Calculated as follows:
.. math::
@@ -1070,70 +1123,79 @@ class BatchNorm(layers.Layer):
\ mini-batch\ mean \\\\
\\sigma_{\\beta}^{2} &\\gets \\frac{1}{m} \\sum_{i=1}^{m}(x_i - \\
\\mu_{\\beta})^2 \\qquad &//\ mini-batch\ variance \\\\
- \\hat{x_i} &\\gets \\frac{x_i - \\mu_\\beta} {\\sqrt{\\
- \\sigma_{\\beta}^{2} + \\epsilon}} \\qquad &//\ normalize \\\\
- y_i &\\gets \\gamma \\hat{x_i} + \\beta \\qquad &//\ scale\ and\ shift
+ - :math:`x` : mini-batch data
+ - :math:`m` : the size of the mini-batch data
When use_global_stats = True, the :math:`\\mu_{\\beta}`
and :math:`\\sigma_{\\beta}^{2}` are not the statistics of one mini-batch.
- They are global (or running) statistics. (It usually got from the
- pre-trained model.)
- The training and testing (or inference) have the same behavior:
+ They are global or running statistics (moving_mean and moving_variance). It usually got from the
+ pre-trained model. Calculated as follows:
+ .. math::
+ moving\_mean = moving\_mean * momentum + \mu_{\beta} * (1. - momentum) \quad &// global mean \\
+ moving\_variance = moving\_variance * momentum + \sigma_{\beta}^{2} * (1. - momentum) \quad &// global variance \\
+
+ The normalization function formula is as follows:
+
.. math::
\\hat{x_i} &\\gets \\frac{x_i - \\mu_\\beta} {\\sqrt{\\
- \\sigma_{\\beta}^{2} + \\epsilon}} \\\\
- y_i &\\gets \\gamma \\hat{x_i} + \\beta
+ \\sigma_{\\beta}^{2} + \\epsilon}} \\qquad &//\ normalize \\\\
+ y_i &\\gets \\gamma \\hat{x_i} + \\beta \\qquad &//\ scale\ and\ shift
+
+ - :math:`\\epsilon` : add a smaller value to the variance to prevent division by zero
+ - :math:`\\gamma` : trainable proportional parameter
+ - :math:`\\beta` : trainable deviation parameter
Parameters:
name_scope(str): The name of this class.
- act(str|None): Activation type, linear|relu|prelu|...
- is_test (bool): A flag indicating whether it is in
- test phrase or not. Default: False
- momentum(float): The value used for the moving_mean and
- moving_var computation. The updated formula is:
- :math:`moving\_mean = moving\_mean * momentum + new\_mean * (1. - momentum)`
- :math:`moving\_var = moving\_var * momentum + new\_var * (1. - momentum)`
- Default is 0.9.
- epsilon(float): A value added to the denominator for
- numerical stability. Default is 1e-5.
- param_attr(ParamAttr|None): The parameter attribute for Parameter `scale`
+ num_channels(int): Indicate the number of channels of the input ``Tensor``.
+ act(str, optional): Activation to be applied to the output of batch normalizaiton. Default: None.
+ is_test (bool, optional): A flag indicating whether it is in test phrase or not. Default: False.
+ momentum(float, optional): The value used for the moving_mean and moving_var computation. Default: 0.9.
+ epsilon(float, optional): The small value added to the variance to prevent division by zero. Default: 1e-5.
+ param_attr(ParamAttr, optional): The parameter attribute for Parameter `scale`
of batch_norm. If it is set to None or one attribute of ParamAttr, batch_norm
will create ParamAttr as param_attr. If the Initializer of the param_attr
is not set, the parameter is initialized with Xavier. Default: None.
- bias_attr(ParamAttr|None): The parameter attribute for the bias of batch_norm.
+ bias_attr(ParamAttr, optional): The parameter attribute for the bias of batch_norm.
If it is set to None or one attribute of ParamAttr, batch_norm
will create ParamAttr as bias_attr. If the Initializer of the bias_attr
is not set, the bias is initialized zero. Default: None.
- data_layout(string): NCHW|NHWC. Default: NCHW
- in_place(bool): Make the input and output of batch norm reuse memory. Default: False
- moving_mean_name(string|None): The name of moving_mean which store the global Mean. Default: None
- moving_variance_name(string, Default None): The name of the moving_variance which store the global Variance.
- do_model_average_for_mean_and_var(bool, Default False): Do model average for mean and variance or not.
- fuse_with_relu (bool): if True, this OP performs relu after batch norm. Default: False
- use_global_stats(bool): Whether to use global mean and
+ dtype(str, optional): Indicate the data type of the input ``Tensor``,
+ which can be float32 or float64. Default: float32.
+ data_layout(str, optional): Specify the input data format, the data format can be "NCHW" or "NHWC". Default: NCHW.
+ in_place(bool, optional): Make the input and output of batch norm reuse memory. Default: False.
+ moving_mean_name(str, optional): The name of moving_mean which store the global Mean. Default: None.
+ moving_variance_name(str, optional): The name of the moving_variance which store the global Variance. Default: None.
+ do_model_average_for_mean_and_var(bool, optional): Do model average for mean and variance or not. Default: False.
+ fuse_with_relu (bool, optional): When setting fuse_with_relu True, this OP performs relu after batch norm.
+ Default: False.
+ use_global_stats(bool, optional): Whether to use global mean and
variance. In inference or test mode, set use_global_stats to true
or is_test to true, and the behavior is equivalent.
In train mode, when setting use_global_stats True, the global mean
- and variance are also used during train period. Default: False
- trainable_statistics(bool): Whether to calculate mean and var in eval mode. In eval mode, when
- setting trainable_statistics True, mean and variance will be calculated by current batch statistics.Default: False
+ and variance are also used during train period. Default: False.
+ trainable_statistics(bool, optional): Whether to calculate mean and var in eval mode. In eval mode, when
+ setting trainable_statistics True, mean and variance will be calculated by current batch statistics.
+ Default: False.
Returns:
- Variable: A tensor variable which is the result after applying batch normalization on the input.
+ None
Examples:
.. code-block:: python
import paddle.fluid as fluid
+ from paddle.fluid.dygraph.base import to_variable
+ import numpy as np
+ x = np.random.random(size=(3, 10, 3, 7)).astype('float32')
with fluid.dygraph.guard():
- fc = fluid.FC('fc', size=200, param_attr='fc1.w')
- hidden1 = fc(x)
+ x = to_variable(x)
batch_norm = fluid.BatchNorm("batch_norm", 10)
- hidden2 = batch_norm(hidden1)
+ hidden1 = batch_norm(x)
"""
def __init__(self,
@@ -1266,29 +1328,72 @@ class Embedding(layers.Layer):
"""
**Embedding Layer**
- This layer is used to lookup embeddings of IDs, provided by :attr:`input`, in
- a lookup table. The result of this lookup is the embedding of each ID in the
- :attr:`input`.
- All the input variables are passed in as local variables to the LayerHelper constructor
+ This interface is used to construct a callable object of the ``Embedding`` class.
+ For specific usage, refer to code examples. It implements the function of the Embedding Layer.
+ This layer is used to lookup embeddings vector of ids provided by :attr:`input` .
+ It automatically constructs a 2D embedding matrix based on the
+ input :attr:`size` (vocab_size, emb_size) and :attr:`dtype` .
+
+ This layer requires the last dimension of Tensor shape must be equal to 1. The shape
+ of output Tensor is generated by replacing the last dimension of the input Tensor shape
+ with emb_size.
+
+ The id in :attr:`input` must satisfy :math:`0 =< id < size[0]` ,
+ otherwise the program will throw an exception and exit.
+
+ .. code-block:: text
+
+ Case 1:
+
+ input is a Tensor. padding_idx = -1
+ input.data = [[[1], [3]], [[2], [4]], [[4], [127]]]
+ input.shape = [3, 2, 1]
+ Given size = [128, 16]
+ output is a Tensor:
+ out.shape = [3, 2, 16]
+ out.data = [[[0.129435295, 0.244512452, ..., 0.436322452],
+ [0.345421456, 0.524563927, ..., 0.144534654]],
+
+ [[0.345249859, 0.124939536, ..., 0.194353745],
+ [0.945345345, 0.435394634, ..., 0.435345365]],
+
+ [[0.945345345, 0.435394634, ..., 0.435345365],
+ [0.0, 0.0, ..., 0.0 ]]] # padding data
+ The input padding_idx is less than 0, it is automatically converted to padding_idx = -1 + 128 = 127
+ It will pad all-zero data when ids is 127.
Parameters:
name_scope(str): The name of this class.
size(tuple|list): The shape of the look up table parameter. It should have two elements which indicate the size
of the dictionary of embeddings and the size of each embedding vector respectively.
- is_sparse(bool): The flag indicating whether to use sparse update. Default: False
- is_distributed(bool): Whether to run lookup table from remote parameter server. Default: False.
- padding_idx(int|long|None): If :attr:`None`, it makes no effect to lookup.
- Otherwise the given :attr:`padding_idx` indicates padding the output with zeros whenever lookup encounters
- it in :attr:`input`. If :math:`padding_idx < 0`, the :attr:`padding_idx` to use in lookup is :math:`size[0] + dim`. Default: None.
- param_attr(ParamAttr): Parameters for this layer. Default: None.
- dtype(np.dtype|core.VarDesc.VarType|str): The type of data : float32, float_16, int etc. Default: 'float32'.
-
- Attributes:
- weight (Parameter): the learnable weights of this layer.
+ is_sparse(bool): The flag indicating whether to use sparse update. This parameter only
+ affects the performance of the backwards gradient update. It is recommended to set
+ True because sparse update is faster. But some optimizer does not support sparse update,
+ such as :ref:`api_fluid_optimizer_AdadeltaOptimizer` , :ref:`api_fluid_optimizer_AdamaxOptimizer` ,
+ :ref:`api_fluid_optimizer_DecayedAdagradOptimizer` , :ref:`api_fluid_optimizer_FtrlOptimizer` ,
+ :ref:`api_fluid_optimizer_LambOptimizer` and :ref:`api_fluid_optimizer_LarsMomentumOptimizer` .
+ In these case, is_sparse must be False. Default: False.
+ is_distributed(bool): Whether to store the embedding matrix in a distributed manner. Only used
+ in multi-machine distributed CPU training. Default: False.
+ padding_idx(int|long|None): padding_idx needs to be in the interval [-vocab_size, vocab_size).
+ If :math:`padding\_idx < 0`, the :math:`padding\_idx` will automatically be converted
+ to :math:`vocab\_size + padding\_idx` . It will output all-zero padding data whenever lookup
+ encounters :math:`padding\_idx` in id. And the padding data will not be updated while training.
+ If set None, it makes no effect to output. Default: None.
+ param_attr(ParamAttr): To specify the weight parameter property. Default: None, which means the
+ default weight parameter property is used. See usage for details in :ref:`api_fluid_ParamAttr` . In addition,
+ user-defined or pre-trained word vectors can be loaded with the :attr:`param_attr` parameter.
+ The local word vector needs to be transformed into numpy format, and the shape of local word
+ vector shoud be consistent with :attr:`size` . Then :ref:`api_fluid_initializer_NumpyArrayInitializer`
+ is used to load custom or pre-trained word vectors. See code example 2 for details.
+ dtype(np.dtype|core.VarDesc.VarType|str): It refers to the data type of output Tensor.
+ It must be "float32" or "float64". Default: "float32".
+
+ Attribute:
+ **weight** (Parameter): the learnable weights of this layer.
Returns:
- Variable: The tensor variable storing the embeddings of the \
- supplied inputs.
+ Variable: Embedding Tensor or LoDTensor mapped by input. The data type is the same as :attr:`dtype` .
Examples:
@@ -1298,6 +1403,7 @@ class Embedding(layers.Layer):
import paddle.fluid.dygraph.base as base
import numpy as np
+ # example 1
inp_word = np.array([[[1]]]).astype('int64')
dict_size = 20
with fluid.dygraph.guard():
@@ -1307,6 +1413,21 @@ class Embedding(layers.Layer):
param_attr='emb.w',
is_sparse=False)
static_rlt3 = emb(base.to_variable(inp_word))
+
+ # example 2: load custom or pre-trained word vectors
+ weight_data = np.random.random(size=(128, 100)) # word vectors with numpy format
+ w_param_attrs = fluid.ParamAttr(
+ name="emb_weight",
+ learning_rate=0.5,
+ initializer=fluid.initializer.NumpyArrayInitializer(weight_data),
+ trainable=True)
+ with fluid.dygraph.guard():
+ emb = fluid.dygraph.Embedding(
+ name_scope='embedding',
+ size=[128, 100],
+ param_attr= w_param_attrs,
+ is_sparse=False)
+ static_rlt3 = emb(base.to_variable(inp_word))
"""
def __init__(self,
@@ -1363,70 +1484,66 @@ class Embedding(layers.Layer):
class LayerNorm(layers.Layer):
"""
- Assume feature vectors exist on dimensions
- `begin_norm_axis ... rank(input)` and calculate the moment statistics along these dimensions for each feature
- vector `a` with size `H`, then normalize each feature vector using the corresponding
- statistics. After that, apply learnable gain and bias on the normalized
- tensor to scale and shift if `scale` and `shift` are set.
-
+ This interface is used to construct a callable object of the ``LayerNorm`` class.
+ For more details, refer to code examples.
+ It implements the function of the Layer Normalization Layer and can be applied to mini-batch input data.
Refer to `Layer Normalization `_
The formula is as follows:
.. math::
- \\mu & = \\frac{1}{H}\\sum_{i=1}^{H} a_i
-
- \\sigma & = \\sqrt{\\frac{1}{H}\sum_{i=1}^{H}(a_i - \\mu)^2}
-
- h & = f(\\frac{g}{\\sigma}(a - \\mu) + b)
-
- * :math:`a`: the vector representation of the summed inputs to the neurons in that layer.
+ \\mu & = \\frac{1}{H}\\sum_{i=1}^{H} x_i
- * :math:`H`: the number of hidden units in a layers
+ \\sigma & = \\sqrt{\\frac{1}{H}\sum_{i=1}^{H}{(x_i - \\mu)^2} + \\epsilon}
- * :math:`g`: the trainable scale parameter.
+ y & = f(\\frac{g}{\\sigma}(x - \\mu) + b)
- * :math:`b`: the trainable bias parameter.
+ - :math:`x`: the vector representation of the summed inputs to the neurons in that layer.
+ - :math:`H`: the number of hidden units in a layers
+ - :math:`\\epsilon`: the small value added to the variance to prevent division by zero.
+ - :math:`g`: the trainable scale parameter.
+ - :math:`b`: the trainable bias parameter.
Parameters:
name_scope(str): The name of this class.
- scale(bool): Whether to learn the adaptive gain :math:`g` after
+ scale(bool, optional): Whether to learn the adaptive gain :math:`g` after
normalization. Default: True.
- shift(bool): Whether to learn the adaptive bias :math:`b` after
+ shift(bool, optional): Whether to learn the adaptive bias :math:`b` after
normalization. Default: True.
- begin_norm_axis(int): The normalization will be performed along
+ begin_norm_axis(int, optional): The normalization will be performed along
dimensions from :attr:`begin_norm_axis` to :attr:`rank(input)`.
Default: 1.
- epsilon(float): The small value added to the variance to prevent
+ epsilon(float, optional): The small value added to the variance to prevent
division by zero. Default: 1e-05.
- param_attr(ParamAttr|None): The parameter attribute for the learnable
+ param_attr(ParamAttr, optional): The parameter attribute for the learnable
gain :math:`g`. If :attr:`scale` is False, :attr:`param_attr` is
omitted. If :attr:`scale` is True and :attr:`param_attr` is None,
a default :code:`ParamAttr` would be added as scale. The
:attr:`param_attr` is initialized as 1 if it is added. Default: None.
- bias_attr(ParamAttr|None): The parameter attribute for the learnable
+ bias_attr(ParamAttr, optional): The parameter attribute for the learnable
bias :math:`b`. If :attr:`shift` is False, :attr:`bias_attr` is
omitted. If :attr:`shift` is True and :attr:`param_attr` is None,
a default :code:`ParamAttr` would be added as bias. The
:attr:`bias_attr` is initialized as 0 if it is added. Default: None.
- act(str): Activation to be applied to the output of layer normalizaiton.
+ act(str, optional): Activation to be applied to the output of layer normalizaiton.
Default: None.
Returns:
- Result after normalization
+ None
Examples:
.. code-block:: python
import paddle.fluid as fluid
+ from paddle.fluid.dygraph.base import to_variable
import numpy
+ x = numpy.random.random((3, 32, 32)).astype('float32')
with fluid.dygraph.guard():
- x = numpy.random.random((3, 32, 32)).astype('float32')
- layerNorm = fluid.dygraph.nn.LayerNorm(
- 'LayerNorm', begin_norm_axis=1)
- ret = layerNorm(fluid.dygraph.base.to_variable(x))
+ x = to_variable(x)
+ layerNorm = fluid.LayerNorm('LayerNorm', begin_norm_axis=1)
+ ret = layerNorm(x)
"""
@@ -1683,39 +1800,43 @@ class GRUUnit(layers.Layer):
class NCE(layers.Layer):
"""
- Compute and return the noise-contrastive estimation training loss. See
+ This interface is used to construct a callable object of the ``NCE`` class.
+ For more details, refer to code examples.
+ It implements the function of the ``NCE`` loss function.
+ By default this function uses a uniform distribution for sampling, and it
+ compute and return the noise-contrastive estimation training loss. See
`Noise-contrastive estimation: A new estimation principle for unnormalized statistical models `_ .
- By default this operator uses a uniform distribution for sampling.
Parameters:
name_scope(str): The name of this class.
num_total_classes (int): Total number of classes in all samples
- param_attr (ParamAttr|None): The parameter attribute for learnable parameters/weights
+ param_attr (ParamAttr, optional): The parameter attribute for learnable weights(Parameter)
of nce. If it is set to None or one attribute of ParamAttr, nce
will create ParamAttr as param_attr. If the Initializer of the param_attr
is not set, the parameter is initialized with Xavier. Default: None.
- bias_attr (ParamAttr|bool|None): The parameter attribute for the bias of nce.
+ bias_attr (ParamAttr or bool, optional): The attribute for the bias of nce.
If it is set to False, no bias will be added to the output units.
If it is set to None or one attribute of ParamAttr, nce
will create ParamAttr as bias_attr. If the Initializer of the bias_attr
is not set, the bias is initialized zero. Default: None.
- num_neg_samples (int): The number of negative classes. The default value is 10.
- sampler (str): The sampler used to sample class from negtive classes.
+ num_neg_samples (int, optional): The number of negative classes. The default value is 10.
+ sampler (str, optional): The sampler used to sample class from negtive classes.
It can be 'uniform', 'log_uniform' or 'custom_dist'.
default: 'uniform'.
- custom_dist (float[]|None): A float[] with size=num_total_classes.
+ custom_dist (float[], optional): A float[] with size=num_total_classes.
It is used when sampler is set to 'custom_dist'.
custom_dist[i] is the probability of i-th class to be sampled.
Default: None.
- seed (int): The seed used in sampler. Default: 0.
- is_sparse(bool): The flag indicating whether to use sparse update, the weight@GRAD and bias@GRAD will be changed to SelectedRows. Default: False.
+ seed (int, optional): The seed used in sampler. Default: 0.
+ is_sparse(bool, optional): The flag indicating whether to use sparse update. If is_sparse is True, the weight@GRAD and bias@GRAD will be changed to SelectedRows. Default: False.
- Attributes:
- weight (Parameter): the learnable weights of this layer.
- bias (Parameter|None): the learnable bias of this layer.
+ Attribute:
+ **weight** (Parameter): the learnable weights of this layer.
+ **bias** (Parameter or None): the learnable bias of this layer.
+
Returns:
- Variable: The output nce loss.
+ None
Examples:
.. code-block:: python
@@ -1932,6 +2053,10 @@ class NCE(layers.Layer):
class PRelu(layers.Layer):
"""
+ This interface is used to construct a callable object of the ``PRelu`` class.
+ For more details, refer to code examples.
+ It implements three activation methods of the ``PRelu`` activation function.
+
Equation:
.. math::
@@ -1943,30 +2068,32 @@ class PRelu(layers.Layer):
and element. all: all elements share same weight
channel:elements in a channel share same weight
element:each element has a weight
- param_attr(ParamAttr|None): The parameter attribute for the learnable
- weight (alpha).
-
- Attributes:
- weight (Parameter): the learnable weights of this layer.
+ param_attr(ParamAttr, optional): The parameter attribute for the learnable
+ weight (alpha). Default: None.
+ Attribute:
+ **weight** (Parameter): the learnable weights of this layer.
+
Returns:
- Variable: The output tensor with the same shape as input.
+ None
Examples:
.. code-block:: python
import paddle.fluid as fluid
+ from paddle.fluid.dygraph.base import to_variable
import numpy as np
inp_np = np.ones([5, 200, 100, 100]).astype('float32')
with fluid.dygraph.guard():
+ inp_np = to_variable(inp_np)
mode = 'channel'
prelu = fluid.PRelu(
'prelu',
mode=mode,
param_attr=fluid.ParamAttr(initializer=fluid.initializer.Constant(1.0)))
- dy_rlt = prelu(fluid.dygraph.base.to_variable(inp_np))
+ dy_rlt = prelu(inp_np)
"""
@@ -2134,19 +2261,21 @@ class BilinearTensorProduct(layers.Layer):
class Conv2DTranspose(layers.Layer):
"""
- **Convlution2D transpose layer**
-
+ This interface is used to construct a callable object of the ``Conv2DTranspose`` class.
+ For more details, refer to code examples.
The convolution2D transpose layer calculates the output based on the input,
- filter, and dilations, strides, paddings. Input(Input) and output(Output)
- are in NCHW format. Where N is batch size, C is the number of channels,
- H is the height of the feature, and W is the width of the feature.
- Parameters(dilations, strides, paddings) are two elements. These two elements
- represent height and width, respectively. The details of convolution transpose
- layer, please refer to the following explanation and references
- `therein `_.
+ filter, and dilations, strides, paddings. Input and output
+ are in NCHW format. Where N is batch size, C is the number of feature map,
+ H is the height of the feature map, and W is the width of the feature map.
+ Filter's shape is [MCHW] , where M is the number of output feature map,
+ C is the number of input feature map, H is the height of the filter,
+ and W is the width of the filter. If the groups is greater than 1,
+ C will equal the number of input feature map divided by the groups.
If bias attribution and activation type are provided, bias is added to
the output of the convolution, and the corresponding activation function
is applied to the final result.
+ The details of convolution transpose layer, please refer to the following explanation and references
+ `conv2dtranspose `_ .
For each input :math:`X`, the equation is:
@@ -2156,10 +2285,10 @@ class Conv2DTranspose(layers.Layer):
Where:
- * :math:`X`: Input value, a tensor with NCHW format.
- * :math:`W`: Filter value, a tensor with MCHW format.
+ * :math:`X`: Input value, a ``Tensor`` with NCHW format.
+ * :math:`W`: Filter value, a ``Tensor`` with shape [MCHW] .
* :math:`\\ast`: Convolution operation.
- * :math:`b`: Bias value, a 2-D tensor with shape [M, 1].
+ * :math:`b`: Bias value, a 2-D ``Tensor`` with shape [M, 1].
* :math:`\\sigma`: Activation function.
* :math:`Out`: Output value, the shape of :math:`Out` and :math:`X` may be different.
@@ -2187,64 +2316,61 @@ class Conv2DTranspose(layers.Layer):
Parameters:
name_scope(str): The name of this class.
num_filters(int): The number of the filter. It is as same as the output
- image channel.
- output_size(int|tuple|None): The output image size. If output size is a
+ feature map.
+ output_size(int or tuple, optional): The output image size. If output size is a
tuple, it must contain two integers, (image_H, image_W). None if use
filter_size, padding, and stride to calculate output_size.
if output_size and filter_size are specified at the same time, They
should follow the formula above. Default: None.
- filter_size(int|tuple|None): The filter size. If filter_size is a tuple,
+ filter_size(int or tuple, optional): The filter size. If filter_size is a tuple,
it must contain two integers, (filter_size_H, filter_size_W).
Otherwise, the filter will be a square. None if use output size to
calculate filter_size. Default: None.
- padding(int|tuple): The padding size. If padding is a tuple, it must
+ padding(int or tuple, optional): The padding size. If padding is a tuple, it must
contain two integers, (padding_H, padding_W). Otherwise, the
- padding_H = padding_W = padding. Default: padding = 0.
- stride(int|tuple): The stride size. If stride is a tuple, it must
+ padding_H = padding_W = padding. Default: 0.
+ stride(int or tuple, optional): The stride size. If stride is a tuple, it must
contain two integers, (stride_H, stride_W). Otherwise, the
- stride_H = stride_W = stride. Default: stride = 1.
- dilation(int|tuple): The dilation size. If dilation is a tuple, it must
+ stride_H = stride_W = stride. Default: 1.
+ dilation(int or tuple, optional): The dilation size. If dilation is a tuple, it must
contain two integers, (dilation_H, dilation_W). Otherwise, the
- dilation_H = dilation_W = dilation. Default: dilation = 1.
- groups(int): The groups number of the Conv2d transpose layer. Inspired by
+ dilation_H = dilation_W = dilation. Default: 1.
+ groups(int, optional): The groups number of the Conv2d transpose layer. Inspired by
grouped convolution in Alex Krizhevsky's Deep CNN paper, in which
when group=2, the first half of the filters is only connected to the
first half of the input channels, while the second half of the
filters is only connected to the second half of the input channels.
- Default: groups = 1.
- param_attr (ParamAttr|None): The parameter attribute for learnable parameters/weights
+ Default: 1.
+ param_attr (ParamAttr, optional): The parameter attribute for learnable weights(Parameter)
of conv2d_transpose. If it is set to None or one attribute of ParamAttr, conv2d_transpose
will create ParamAttr as param_attr. If the Initializer of the param_attr
is not set, the parameter is initialized with Xavier. Default: None.
- bias_attr (ParamAttr|bool|None): The parameter attribute for the bias of conv2d_transpose.
+ bias_attr (ParamAttr or bool, optional): The attribute for the bias of conv2d_transpose.
If it is set to False, no bias will be added to the output units.
If it is set to None or one attribute of ParamAttr, conv2d_transpose
will create ParamAttr as bias_attr. If the Initializer of the bias_attr
is not set, the bias is initialized zero. Default: None.
- use_cudnn(bool): Use cudnn kernel or not, it is valid only when the cudnn
+ use_cudnn(bool, optional): Use cudnn kernel or not, it is valid only when the cudnn
library is installed. Default: True.
- act (str): Activation type, if it is set to None, activation is not appended.
+ act (str, optional): Activation type, if it is set to None, activation is not appended.
Default: None.
- Attributes:
- weight (Parameter): the learnable weights of filters of this layer.
- bias (Parameter|None): the learnable bias of this layer.
+ Attribute:
+ **weight** (Parameter): the learnable weights of filters of this layer.
- Returns:
- Variable: The tensor variable storing the convolution transpose result.
+ **bias** (Parameter or None): the learnable bias of this layer.
- Raises:
- ValueError: If the shapes of input, filter_size, stride, padding and
- groups mismatch.
+ Returns:
+ None
Examples:
.. code-block:: python
import paddle.fluid as fluid
- import numpy
+ import numpy as np
with fluid.dygraph.guard():
- data = numpy.random.random((3, 32, 32)).astype('float32')
+ data = np.random.random((3, 32, 32, 5)).astype('float32')
conv2DTranspose = fluid.dygraph.nn.Conv2DTranspose(
'Conv2DTranspose', num_filters=2, filter_size=3)
ret = conv2DTranspose(fluid.dygraph.base.to_variable(data))
@@ -2562,37 +2688,38 @@ class RowConv(layers.Layer):
class GroupNorm(layers.Layer):
"""
- **Group Normalization Layer**
-
- Refer to `Group Normalization `_ .
-
- Parameters:
- name_scope(str): The name of this class.
- groups(int): The number of groups that divided from channels.
- epsilon(float): The small value added to the variance to prevent
- division by zero. Default: 1e-05.
- param_attr(ParamAttr|None): The parameter attribute for the learnable
- scale :math:`g`. If it is set to False, no scale will be added to the output units.
- If it is set to None, the bias is initialized one. Default: None.
- bias_attr(ParamAttr|None): The parameter attribute for the learnable
- bias :math:`b`. If it is set to False, no bias will be added to the output units.
- If it is set to None, the bias is initialized zero. Default: None.
- act(str): Activation to be applied to the output of group normalizaiton.
- data_layout(string|NCHW): Only NCHW is supported.
-
- Returns:
- Variable: A tensor variable which is the result after applying group normalization on the input.
-
- Examples:
- .. code-block:: python
-
- import paddle.fluid as fluid
- import numpy
-
- with fluid.dygraph.guard():
- x = numpy.random.random((8, 32, 32)).astype('float32')
- groupNorm = fluid.dygraph.nn.GroupNorm('GroupNorm', groups=4)
- ret = groupNorm(fluid.dygraph.base.to_variable(x))
+ This interface is used to construct a callable object of the ``GroupNorm`` class.
+ For more details, refer to code examples.
+ It implements the function of the Group Normalization Layer.
+ Refer to `Group Normalization `_ .
+
+ Parameters:
+ name_scope(str): The name of this class.
+ groups(int): The number of groups that divided from channels.
+ epsilon(float, optional): The small value added to the variance to prevent
+ division by zero. Default: 1e-05.
+ param_attr(ParamAttr, optional): The parameter attribute for the learnable
+ scale :math:`g`. If it is set to False, no scale will be added to the output units.
+ If it is set to None, the bias is initialized one. Default: None.
+ bias_attr(ParamAttr, optional): The parameter attribute for the learnable
+ bias :math:`b`. If it is set to False, no bias will be added to the output units.
+ If it is set to None, the bias is initialized zero. Default: None.
+ act(str, optional): Activation to be applied to the output of group normalizaiton. Default: None.
+ data_layout(str, optional): Specify the input data format. Only NCHW is supported. Default: NCHW.
+
+ Returns:
+ None
+
+ Examples:
+ .. code-block:: python
+
+ import paddle.fluid as fluid
+ import numpy as np
+
+ with fluid.dygraph.guard():
+ x = np.random.random((8, 32, 32)).astype('float32')
+ groupNorm = fluid.dygraph.nn.GroupNorm('GroupNorm', groups=4)
+ ret = groupNorm(fluid.dygraph.base.to_variable(x))
"""
@@ -2661,8 +2788,8 @@ class GroupNorm(layers.Layer):
class SpectralNorm(layers.Layer):
"""
- **Spectral Normalization Layer**
-
+ This interface is used to construct a callable object of the ``SpectralNorm`` class.
+ For more details, refer to code examples. It implements the function of the Spectral Normalization Layer.
This layer calculates the spectral normalization value of weight parameters of
fc, conv1d, conv2d, conv3d layers which should be 2-D, 3-D, 4-D, 5-D
Parameters. Calculations are showed as follows.
@@ -2696,22 +2823,22 @@ class SpectralNorm(layers.Layer):
Parameters:
name_scope(str): The name of this class.
- dim(int): The index of dimension which should be permuted to the first before reshaping Input(Weight) to matrix, it should be set as 0 if Input(Weight) is the weight of fc layer, and should be set as 1 if Input(Weight) is the weight of conv layer. Default: 0.
- power_iters(int): The number of power iterations to calculate spectral norm. Default: 1.
- eps(float): The epsilon for numerical stability in calculating norms. Default: 1e-12.
- name (str): The name of this layer. It is optional.
+ dim(int, optional): The index of dimension which should be permuted to the first before reshaping Input(Weight) to matrix, it should be set as 0 if Input(Weight) is the weight of fc layer, and should be set as 1 if Input(Weight) is the weight of conv layer. Default: 0.
+ power_iters(int, optional): The number of power iterations to calculate spectral norm. Default: 1.
+ eps(float, optional): The epsilon for numerical stability in calculating norms. Default: 1e-12.
+ name (str, optional): The default value is None. Normally there is no need for user to set this property. For more information, please refer to :ref:`api_guide_Name` .
Returns:
- Variable: A tensor variable of weight parameters after spectral normalization.
+ None
Examples:
.. code-block:: python
import paddle.fluid as fluid
- import numpy
+ import numpy as np
with fluid.dygraph.guard():
- x = numpy.random.random((2, 8, 32, 32)).astype('float32')
+ x = np.random.random((2, 8, 32, 32)).astype('float32')
spectralNorm = fluid.dygraph.nn.SpectralNorm('SpectralNorm', dim=1, power_iters=2)
ret = spectralNorm(fluid.dygraph.base.to_variable(x))
@@ -2761,47 +2888,46 @@ class SpectralNorm(layers.Layer):
class TreeConv(layers.Layer):
"""
- ***Tree-Based Convolution Operator***
-
- Tree-Based Convolution is a kind of convolution based on tree structure.
- Tree-Based Convolution is a part of Tree-Based Convolution Neural Network(TBCNN),
- which is used to classify tree structures, such as Abstract Syntax Tree.
- Tree-Based Convolution proposed a kind of data structure called continuous binary tree,
- which regards multiway tree as binary tree.
- The paper of Tree-Based Convolution Operator is here: https://arxiv.org/abs/1409.5718v1
-
-
- Parameters:
- name_scope(str): The name of this class.
- output_size(int): output feature width
- num_filters(int): number of filters, Default: 1.
- max_depth(int): max depth of filters, Default: 2.
- act(str): activation function, Default: tanh.
- param_attr(ParamAttr): the parameter attribute for the filters, Default: None.
- bias_attr(ParamAttr): the parameter attribute for the bias of this layer, Default: None.
- name(str): a name of this layer(optional). If set None, the layer will be named automatically, Default: None.
+ This interface is used to construct a callable object of the ``TreeConv`` class.
+ For more details, refer to code examples.
+ Tree-Based Convolution is a kind of convolution based on tree structure.
+ Tree-Based Convolution is a part of Tree-Based Convolution Neural Network(TBCNN),
+ which is used to classify tree structures, such as Abstract Syntax Tree.
+ Tree-Based Convolution proposed a kind of data structure called continuous binary tree,
+ which regards multiway tree as binary tree.
+ The paper of Tree-Based Convolution Operator is here: `tree-based convolution `_ .
+
+ Parameters:
+ name_scope(str): The name of this class.
+ output_size(int): output feature width.
+ num_filters(int, optional): number of filters, Default: 1.
+ max_depth(int, optional): max depth of filters, Default: 2.
+ act(str, optional): activation function, Default: tanh.
+ param_attr(ParamAttr, optional): the parameter attribute for the filters, Default: None.
+ bias_attr(ParamAttr, optional): the parameter attribute for the bias of this layer, Default: None.
+ name(str, optional): The default value is None. Normally there is no need for user to set this property. For more information, please refer to :ref:`api_guide_Name` .
- Attributes:
- weight (Parameter): the learnable weights of filters of this layer.
- bias (Parameter|None): the learnable bias of this layer.
+ Attribute:
+ **weight** (Parameter): the learnable weights of filters of this layer.
- Returns:
- out(Variable): (Tensor) The feature vector of subtrees. The shape of the output tensor is [max_tree_node_size, output_size, num_filters]. The output tensor could be a new feature vector for next tree convolution layers
+ **bias** (Parameter or None): the learnable bias of this layer.
- Examples:
+ Returns:
+ None
- .. code-block:: python
+ Examples:
- import paddle.fluid as fluid
- import numpy
+ .. code-block:: python
- with fluid.dygraph.guard():
- nodes_vector = numpy.random.random((1, 10, 5)).astype('float32')
- edge_set = numpy.random.random((1, 9, 2)).astype('int32')
- treeConv = fluid.dygraph.nn.TreeConv(
- 'TreeConv', output_size=6, num_filters=1, max_depth=2)
- ret = treeConv(fluid.dygraph.base.to_variable(nodes_vector), fluid.dygraph.base.to_variable(edge_set))
+ import paddle.fluid as fluid
+ import numpy
+ with fluid.dygraph.guard():
+ nodes_vector = numpy.random.random((1, 10, 5)).astype('float32')
+ edge_set = numpy.random.random((1, 9, 2)).astype('int32')
+ treeConv = fluid.dygraph.nn.TreeConv(
+ 'TreeConv', output_size=6, num_filters=1, max_depth=2)
+ ret = treeConv(fluid.dygraph.base.to_variable(nodes_vector), fluid.dygraph.base.to_variable(edge_set))
"""
def __init__(self,
diff --git a/python/paddle/fluid/layers/nn.py b/python/paddle/fluid/layers/nn.py
index b950afd52ca..5fa7b0933fd 100755
--- a/python/paddle/fluid/layers/nn.py
+++ b/python/paddle/fluid/layers/nn.py
@@ -2324,11 +2324,11 @@ def sequence_softmax(input, use_cudnn=False, name=None):
def softmax(input, use_cudnn=False, name=None, axis=-1):
"""
- The input of the softmax operator is a tensor of any rank. The output tensor
- has the same shape as the input.
+ This operator implements the softmax layer. The calculation process is as follows:
- The dimension :attr:`axis` of the input tensor will be permuted to the last.
- Then the input tensor will be logically flattened to a 2-D matrix. The matrix's
+ 1. The dimension :attr:`axis` of the ``input`` will be permuted to the last.
+
+ 2. Then the input tensor will be logically flattened to a 2-D matrix. The matrix's
second dimension(row length) is the same as the dimension :attr:`axis` of the input
tensor, and the first dimension(column length) is the product of all other
dimensions of the input tensor. For each row of the matrix, the softmax operator
@@ -2336,6 +2336,9 @@ def softmax(input, use_cudnn=False, name=None, axis=-1):
of the input tensor's dimension :attr:`axis`) vector of arbitrary real values to a
K-dimensional vector of real values in the range [0, 1] that add up to 1.
+ 3. After the softmax operation is completed, the inverse operations of steps 1 and 2
+ are performed to restore the two-dimensional matrix to the same dimension as the ``input``.
+
It computes the exponential of the given dimension and the sum of exponential
values of all the other dimensions in the K-dimensional vector input.
Then the ratio of the exponential of the given dimension and the sum of
@@ -2348,20 +2351,66 @@ def softmax(input, use_cudnn=False, name=None, axis=-1):
Out[i, j] = \\frac{\exp(X[i, j])}{\sum_j(exp(X[i, j])}
+ Example:
+
+ .. code-block:: text
+
+ Case 1:
+ Input:
+ X.shape = [2, 3, 4]
+ X.data = [[[2.0, 3.0, 4.0, 5.0],
+ [3.0, 4.0, 5.0, 6.0],
+ [7.0, 8.0, 8.0, 9.0]],
+ [[1.0, 2.0, 3.0, 4.0],
+ [5.0, 6.0, 7.0, 8.0],
+ [6.0, 7.0, 8.0, 9.0]]]
+
+ Attrs:
+ axis = -1
+
+ Output:
+ Out.shape = [2, 3, 4]
+ Out.data = [[[0.0320586 , 0.08714432, 0.23688282, 0.64391426],
+ [0.0320586 , 0.08714432, 0.23688282, 0.64391426],
+ [0.07232949, 0.19661193, 0.19661193, 0.53444665]],
+ [[0.0320586 , 0.08714432, 0.23688282, 0.64391426],
+ [0.0320586 , 0.08714432, 0.23688282, 0.64391426],
+ [0.0320586 , 0.08714432, 0.23688282, 0.64391426]]]
+
+ Case 2:
+ Input:
+ X.shape = [2, 3, 4]
+ X.data = [[[2.0, 3.0, 4.0, 5.0],
+ [3.0, 4.0, 5.0, 6.0],
+ [7.0, 8.0, 8.0, 9.0]],
+ [[1.0, 2.0, 3.0, 4.0],
+ [5.0, 6.0, 7.0, 8.0],
+ [6.0, 7.0, 8.0, 9.0]]]
+ Attrs:
+ axis = 1
+
+ Output:
+ Out.shape = [2, 3, 4]
+ Out.data = [[[0.00657326, 0.00657326, 0.01714783, 0.01714783],
+ [0.01786798, 0.01786798, 0.04661262, 0.04661262],
+ [0.97555875, 0.97555875, 0.93623955, 0.93623955]],
+ [[0.00490169, 0.00490169, 0.00490169, 0.00490169],
+ [0.26762315, 0.26762315, 0.26762315, 0.26762315],
+ [0.72747516, 0.72747516, 0.72747516, 0.72747516]]]
+
Args:
- input (Variable): The input variable. A LoDTensor or Tensor with type
- float32, float64.
- use_cudnn (bool): Use cudnn kernel or not, it is valid only when the cudnn \
+ input (Variable): The input variable. A multi-dimension ``Tensor`` with type float32 or float64.
+ use_cudnn (bool, optional): Use cudnn kernel or not, it is valid only when the cudnn \
library is installed. To improve numerical stablity, set use_cudnn to \
- False by default. Default: False
- name (str|None): A name for this layer(optional). If set None, the layer
+ False by default.
+ name (str, optional): The default value is None. Normally there is no need for user to set this property. For more information, please refer to :ref:`api_guide_Name` . Default: None.
will be named automatically. Default: None.
- axis (int): The index of dimension to perform softmax calculations, it should
+ axis (int, optional): The index of dimension to perform softmax calculations, it should
be in range :math:`[-1, rank - 1]`, while :math:`rank` is the rank of
input variable. Default: -1. -1 means the last dimension.
Returns:
- Variable: output of softmax. A Tensor with type float32, float64.
+ Variable: ``Tensor`` indicates the output of softmax. The data type and shape are the same as ``input`` .
Examples:
@@ -2379,7 +2428,6 @@ def softmax(input, use_cudnn=False, name=None, axis=-1):
output= exe.run(feed={"input": x},
fetch_list=[result[0]])
print(output)
- #array([0.22595254, 0.39276356, 0.38128382], dtype=float32)]
"""
helper = LayerHelper('softmax', **locals())
if not isinstance(input, Variable):
@@ -4435,62 +4483,69 @@ def layer_norm(input,
act=None,
name=None):
"""
- ${comment}
+ **Layer Normalization Layer**
+
+ The API implements the function of the Layer Normalization Layer and can be applied to mini-batch input data.
+ Refer to `Layer Normalization `_
The formula is as follows:
.. math::
- \\mu & = \\frac{1}{H}\\sum_{i=1}^{H} a_i
-
- \\sigma & = \\sqrt{\\frac{1}{H}\sum_{i=1}^{H}(a_i - \\mu)^2}
+ \\mu & = \\frac{1}{H}\\sum_{i=1}^{H} x_i
- h & = f(\\frac{g}{\\sigma}(a - \\mu) + b)
+ \\sigma & = \\sqrt{\\frac{1}{H}\sum_{i=1}^{H}{(x_i - \\mu)^2} + \\epsilon}
- * :math:`a`: the vector representation of the summed inputs to the neurons
- in that layer.
+ y & = f(\\frac{g}{\\sigma}(x - \\mu) + b)
- * :math:`H`: the number of hidden units in a layers
-
- * :math:`g`: the trainable scale parameter.
-
- * :math:`b`: the trainable bias parameter.
+ - :math:`x`: the vector representation of the summed inputs to the neurons in that layer.
+ - :math:`H`: the number of hidden units in a layers
+ - :math:`\\epsilon`: the small value added to the variance to prevent division by zero.
+ - :math:`g`: the trainable scale parameter.
+ - :math:`b`: the trainable bias parameter.
Args:
- input(Variable): The input tensor variable.
- scale(bool): Whether to learn the adaptive gain :math:`g` after
- normalization. Default True.
- shift(bool): Whether to learn the adaptive bias :math:`b` after
- normalization. Default True.
- begin_norm_axis(int): The normalization will be performed along
+ input(Variable): A multi-dimension ``Tensor`` , and the data type is float32 or float64.
+ scale(bool, optional): Whether to learn the adaptive gain :math:`g` after
+ normalization. Default: True.
+ shift(bool, optional): Whether to learn the adaptive bias :math:`b` after
+ normalization. Default: True.
+ begin_norm_axis(int, optional): The normalization will be performed along
dimensions from :attr:`begin_norm_axis` to :attr:`rank(input)`.
- Default 1.
- epsilon(float): The small value added to the variance to prevent
- division by zero. Default 1e-05.
- param_attr(ParamAttr|None): The parameter attribute for the learnable
+ Default: 1.
+ epsilon(float, optional): The small value added to the variance to prevent
+ division by zero. Default: 1e-05.
+ param_attr(ParamAttr, optional): The parameter attribute for the learnable
gain :math:`g`. If :attr:`scale` is False, :attr:`param_attr` is
omitted. If :attr:`scale` is True and :attr:`param_attr` is None,
a default :code:`ParamAttr` would be added as scale. The
- :attr:`param_attr` is initialized as 1 if it is added. Default None.
- bias_attr(ParamAttr|None): The parameter attribute for the learnable
+ :attr:`param_attr` is initialized as 1 if it is added. Default: None.
+ bias_attr(ParamAttr, optional): The parameter attribute for the learnable
bias :math:`b`. If :attr:`shift` is False, :attr:`bias_attr` is
omitted. If :attr:`shift` is True and :attr:`param_attr` is None,
a default :code:`ParamAttr` would be added as bias. The
- :attr:`bias_attr` is initialized as 0 if it is added. Default None.
- act(str): Activation to be applied to the output of layer normalizaiton.
- Default None.
- name(str): The name of this layer. It is optional. Default None, and a
- unique name would be generated automatically.
+ :attr:`bias_attr` is initialized as 0 if it is added. Default: None.
+ act(str, optional): Activation to be applied to the output of layer normalizaiton.
+ Default: None.
+ name(str): The default value is None. Normally there is no need for user to set this property. For more information, please refer to :ref:`api_guide_Name` .
Returns:
- ${y_comment}
+ Variable: ``Tensor`` indicating the normalized result, the data type is the same as ``input`` , and the return dimension is the same as ``input`` .
Examples:
- >>> import paddle.fluid as fluid
- >>> data = fluid.layers.data(name='data', shape=[3, 32, 32],
- >>> dtype='float32')
- >>> x = fluid.layers.layer_norm(input=data, begin_norm_axis=1)
+ .. code-block:: python
+
+ import paddle.fluid as fluid
+ import numpy as np
+ x = fluid.data(name='x', shape=[-1, 32, 32], dtype='float32')
+ hidden1 = fluid.layers.layer_norm(input=x, begin_norm_axis=1)
+ place = fluid.CPUPlace()
+ exe = fluid.Executor(place)
+ exe.run(fluid.default_startup_program())
+ np_x = np.random.random(size=(8, 3, 32, 32)).astype('float32')
+ output = exe.run(feed={"x": np_x}, fetch_list = [hidden1])
+ print(output)
"""
assert in_dygraph_mode(
) is not True, "please use FC instead of fc in dygraph mode!"
@@ -7794,12 +7849,9 @@ def softmax_with_cross_entropy(logits,
return_softmax=False,
axis=-1):
"""
- **Softmax With Cross Entropy Operator.**
-
- Cross entropy loss with softmax is used as the output layer extensively. This
- operator computes the softmax normalized values for dimension :attr:`axis` of
- the input tensor, after which cross-entropy loss is computed. This provides
- a more numerically stable gradient.
+ This operator implements the cross entropy loss function with softmax. This function
+ combines the calculation of the softmax operation and the cross entropy loss function
+ to provide a more numerically stable gradient.
Because this operator performs a softmax on logits internally, it expects
unscaled logits. This operator should not be used with the output of
@@ -7816,72 +7868,71 @@ def softmax_with_cross_entropy(logits,
.. math::
- loss_j = -\\text{logit}_{label_j} +
- \\log\\left(\\sum_{i=0}^{K}\\exp(\\text{logit}_i)\\right), j = 1,..., K
+ loss_j = -\\text{logits}_{label_j} +
+ \\log\\left(\\sum_{i=0}^{K}\\exp(\\text{logits}_i)\\right), j = 1,..., K
2) Soft label (each sample can have a distribution over all classes)
.. math::
loss_j = -\\sum_{i=0}^{K}\\text{label}_i
- \\left(\\text{logit}_i - \\log\\left(\\sum_{i=0}^{K}
- \\exp(\\text{logit}_i)\\right)\\right), j = 1,...,K
+ \\left(\\text{logits}_i - \\log\\left(\\sum_{i=0}^{K}
+ \\exp(\\text{logits}_i)\\right)\\right), j = 1,...,K
- 3) If :attr:`numeric_stable_mode` is :attr:`True`, softmax is calculated
- first by:
+ 3) If :attr:`numeric_stable_mode` is :attr:`True`, softmax is calculated first by:
.. math::
- max_j &= \\max_{i=0}^{K}{\\text{logit}_i}
+ max_j &= \\max_{i=0}^{K}{\\text{logits}_i}
- log\\_max\\_sum_j &= \\log\\sum_{i=0}^{K}\\exp(logit_i - max_j)
+ log\\_max\\_sum_j &= \\log\\sum_{i=0}^{K}\\exp(logits_i - max_j)
- softmax_j &= \\exp(logit_j - max_j - {log\\_max\\_sum}_j)
+ softmax_j &= \\exp(logits_j - max_j - {log\\_max\\_sum}_j)
and then cross entropy loss is calculated by softmax and label.
Args:
- logits (Variable): The input tensor of unscaled log probabilities.
- label (Variable): The ground truth tensor. If :attr:`soft_label`
- is set to :attr:`True`, Label is a Tensor in the
- same shape with :attr:`logits`. If :attr:`soft_label` is set to
- :attr:`True`, Label is a Tensor in the same shape with
- :attr:`logits` expect shape in dimension :attr:`axis` as 1.
- soft_label (bool): A flag to indicate whether to interpretate the given
+ logits (Variable): A multi-dimension ``Tensor`` , and the data type is float32 or float64. The input tensor of unscaled log probabilities.
+ label (Variable): The ground truth ``Tensor`` , data type is the same
+ as the ``logits`` . If :attr:`soft_label` is set to :attr:`True`,
+ Label is a ``Tensor`` in the same shape with :attr:`logits`.
+ If :attr:`soft_label` is set to :attr:`True`, Label is a ``Tensor``
+ in the same shape with :attr:`logits` expect shape in dimension :attr:`axis` as 1.
+ soft_label (bool, optional): A flag to indicate whether to interpretate the given
labels as soft labels. Default False.
- ignore_index (int): Specifies a target value that is ignored and does
- not contribute to the input gradient. Only valid
- if :attr:`soft_label` is set to :attr:`False`.
- Default: kIgnoreIndex
- numeric_stable_mode (bool): A flag to indicate whether to use a more
- numerically stable algorithm. Only valid
- when :attr:`soft_label` is :attr:`False`
- and GPU is used. When :attr:`soft_label`
- is :attr:`True` or CPU is used, the
- algorithm is always numerically stable.
- Note that the speed may be slower when use
- stable algorithm. Default: True
- return_softmax (bool): A flag indicating whether to return the softmax
- along with the cross entropy loss. Default: False
- axis (int): The index of dimension to perform softmax calculations. It
- should be in range :math:`[-1, rank - 1]`, while :math:`rank`
- is the rank of input :attr:`logits`. Default: -1.
+ ignore_index (int, optional): Specifies a target value that is ignored and does
+ not contribute to the input gradient. Only valid
+ if :attr:`soft_label` is set to :attr:`False`.
+ Default: kIgnoreIndex(-100).
+ numeric_stable_mode (bool, optional): A flag to indicate whether to use a more
+ numerically stable algorithm. Only valid
+ when :attr:`soft_label` is :attr:`False`
+ and GPU is used. When :attr:`soft_label`
+ is :attr:`True` or CPU is used, the
+ algorithm is always numerically stable.
+ Note that the speed may be slower when use
+ stable algorithm. Default: True.
+ return_softmax (bool, optional): A flag indicating whether to return the softmax
+ along with the cross entropy loss. Default: False.
+ axis (int, optional): The index of dimension to perform softmax calculations. It
+ should be in range :math:`[-1, rank - 1]`, while :math:`rank`
+ is the rank of input :attr:`logits`. Default: -1.
Returns:
- Variable or Tuple of two Variables: Return the cross entropy loss if \
- `return_softmax` is False, otherwise the tuple \
- (loss, softmax), softmax is in the same shape \
- with input logits and cross entropy loss is in \
- the same shape with input logits except shape \
- in dimension :attr:`axis` as 1.
+ ``Variable`` or Tuple of two ``Variable`` : Return the cross entropy loss if \
+ `return_softmax` is False, otherwise the tuple \
+ (loss, softmax), softmax is in the same shape \
+ with input logits and cross entropy loss is in \
+ the same shape with input logits except shape \
+ in dimension :attr:`axis` as 1.
Examples:
.. code-block:: python
import paddle.fluid as fluid
- data = fluid.layers.data(name='data', shape=[128], dtype='float32')
- label = fluid.layers.data(name='label', shape=[1], dtype='int64')
+ data = fluid.data(name='data', shape=[-1, 128], dtype='float32')
+ label = fluid.data(name='label', shape=[-1, 1], dtype='int64')
fc = fluid.layers.fc(input=data, size=100)
out = fluid.layers.softmax_with_cross_entropy(
logits=fc, label=label)
@@ -11005,54 +11056,45 @@ def affine_grid(theta, out_shape, name=None):
def rank_loss(label, left, right, name=None):
"""
-
- **Rank loss layer for RankNet**
-
+ This operator implements the sort loss layer in the RankNet model. RankNet is a pairwise ranking model
+ with a training sample consisting of a pair of documents (A and B), The label (P)
+ indicates whether A is ranked higher than B or not. Please refer to more details:
`RankNet `_
- is a pairwise ranking model with a training sample consisting of a pair
- of documents, A and B. Label P indicates whether A is ranked higher than B
- or not:
-
- P = {0, 1} or {0, 0.5, 1}, where 0.5 means that there is no information
- about the rank of the input pair.
Rank loss layer takes three inputs: left ( :math:`o_i` ), right ( :math:`o_j` ) and
label ( :math:`P_{i,j}` ). The inputs respectively represent RankNet's output scores
- for documents A and B and the value of label P. The following equation
- computes rank loss C_{i,j} from the inputs:
+ for documents A and B and the value of label P. Rank loss layer takes batch inputs
+ with size batch_size (batch_size >= 1), P = {0, 1} or {0, 0.5, 1},
+ where 0.5 means that there is no information about the rank of the input pair.
+ The following equation computes rank loss C_{i,j} from the inputs:
.. math::
-
C_{i,j} &= -\\tilde{P_{ij}} * o_{i,j} + \log(1 + e^{o_{i,j}}) \\\\
-
+ .. math::
o_{i,j} &= o_i - o_j \\\\
-
+ .. math::
\\tilde{P_{i,j}} &= \\left \{0, 0.5, 1 \\right \} \ or \ \\left \{0, 1 \\right \}
-
- Rank loss layer takes batch inputs with size batch_size (batch_size >= 1).
-
- Args:
- label (Variable): Indicats whether A ranked higher than B or not.
- left (Variable): RankNet's output score for doc A.
- right (Variable): RankNet's output score for doc B.
- name(str|None): A name for this layer(optional). If set None, the layer
- will be named automatically.
+ Parameters:
+ label (Variable): 2-D ``Tensor`` with the shape of :math:`[batch,1]`, the data type is float32, batch indicates the size of the data. Indicats whether A ranked higher than B or not.
+ left (Variable): 2-D ``Tensor`` with the shape of :math:`[batch,1]`, the data type is float32. RankNet's output score for doc A.
+ right (Variable): 2-D ``Tensor`` with the shape of :math:`[batch,1]`, the data type is float32. RankNet's output score for doc B.
+ name(str|None): The default value is None. Normally there is no need for user to set this property. For more information, please refer to :ref:`api_guide_Name` .
Returns:
- list: The value of rank loss.
+ Variable: ``Tensor`` indicating the output value of the sort loss layer, the data type is float32, and the return value's shape is :math:`[batch,1]` .
Raises:
- ValueError: Any of label, left, and right is not a variable.
+ ValueError: Any of label, left, and right is not a ``Variable`` .
Examples:
.. code-block:: python
import paddle.fluid as fluid
- label = fluid.layers.data(name="label", shape=[-1, 1], dtype="float32")
- left = fluid.layers.data(name="left", shape=[-1, 1], dtype="float32")
- right = fluid.layers.data(name="right", shape=[-1, 1], dtype="float32")
+ label = fluid.data(name="label", shape=[-1, 1], dtype="float32")
+ left = fluid.data(name="left", shape=[-1, 1], dtype="float32")
+ right = fluid.data(name="right", shape=[-1, 1], dtype="float32")
out = fluid.layers.rank_loss(label, left, right)
"""
--
GitLab