提交 76a58197 编写于 作者: D DuYao 提交者: hong

update English document (#20330)

* update English document, test=document_fix

* update api.spec, test=document_fix

* update api.spec, test=document_fix

* update, test=document_fix
上级 e34fccbc
...@@ -199,7 +199,7 @@ paddle.fluid.layers.lod_append (ArgSpec(args=['x', 'level'], varargs=None, keywo ...@@ -199,7 +199,7 @@ paddle.fluid.layers.lod_append (ArgSpec(args=['x', 'level'], varargs=None, keywo
paddle.fluid.layers.lrn (ArgSpec(args=['input', 'n', 'k', 'alpha', 'beta', 'name'], varargs=None, keywords=None, defaults=(5, 1.0, 0.0001, 0.75, None)), ('document', 'fa565b65fb98d3ca82361c79f41b06b2')) paddle.fluid.layers.lrn (ArgSpec(args=['input', 'n', 'k', 'alpha', 'beta', 'name'], varargs=None, keywords=None, defaults=(5, 1.0, 0.0001, 0.75, None)), ('document', 'fa565b65fb98d3ca82361c79f41b06b2'))
paddle.fluid.layers.pad (ArgSpec(args=['x', 'paddings', 'pad_value', 'name'], varargs=None, keywords=None, defaults=(0.0, None)), ('document', '46b3ada86dd2c79042dca90a55e08f66')) paddle.fluid.layers.pad (ArgSpec(args=['x', 'paddings', 'pad_value', 'name'], varargs=None, keywords=None, defaults=(0.0, None)), ('document', '46b3ada86dd2c79042dca90a55e08f66'))
paddle.fluid.layers.pad_constant_like (ArgSpec(args=['x', 'y', 'pad_value', 'name'], varargs=None, keywords=None, defaults=(0.0, None)), ('document', '89aa122a50dc20ee116ae49d66854d20')) paddle.fluid.layers.pad_constant_like (ArgSpec(args=['x', 'y', 'pad_value', 'name'], varargs=None, keywords=None, defaults=(0.0, None)), ('document', '89aa122a50dc20ee116ae49d66854d20'))
paddle.fluid.layers.label_smooth (ArgSpec(args=['label', 'prior_dist', 'epsilon', 'dtype', 'name'], varargs=None, keywords=None, defaults=(None, 0.1, 'float32', None)), ('document', '214f1dfbe95a628600bbe99e836319cf')) paddle.fluid.layers.label_smooth (ArgSpec(args=['label', 'prior_dist', 'epsilon', 'dtype', 'name'], varargs=None, keywords=None, defaults=(None, 0.1, 'float32', None)), ('document', '70b6f4ab59e60650231b1ead4ad46222'))
paddle.fluid.layers.roi_pool (ArgSpec(args=['input', 'rois', 'pooled_height', 'pooled_width', 'spatial_scale'], varargs=None, keywords=None, defaults=(1, 1, 1.0)), ('document', '6fc9bae94518bbf3e1a9e479f38f6537')) paddle.fluid.layers.roi_pool (ArgSpec(args=['input', 'rois', 'pooled_height', 'pooled_width', 'spatial_scale'], varargs=None, keywords=None, defaults=(1, 1, 1.0)), ('document', '6fc9bae94518bbf3e1a9e479f38f6537'))
paddle.fluid.layers.roi_align (ArgSpec(args=['input', 'rois', 'pooled_height', 'pooled_width', 'spatial_scale', 'sampling_ratio', 'name'], varargs=None, keywords=None, defaults=(1, 1, 1.0, -1, None)), ('document', '3885fd76e122ac0563fa8369bcab7363')) paddle.fluid.layers.roi_align (ArgSpec(args=['input', 'rois', 'pooled_height', 'pooled_width', 'spatial_scale', 'sampling_ratio', 'name'], varargs=None, keywords=None, defaults=(1, 1, 1.0, -1, None)), ('document', '3885fd76e122ac0563fa8369bcab7363'))
paddle.fluid.layers.dice_loss (ArgSpec(args=['input', 'label', 'epsilon', 'name'], varargs=None, keywords=None, defaults=(1e-05, None)), ('document', '08d94daffbea3935178810bdc1633f07')) paddle.fluid.layers.dice_loss (ArgSpec(args=['input', 'label', 'epsilon', 'name'], varargs=None, keywords=None, defaults=(1e-05, None)), ('document', '08d94daffbea3935178810bdc1633f07'))
...@@ -604,7 +604,7 @@ paddle.fluid.dygraph.Conv2D.set_dict (ArgSpec(args=['self', 'stat_dict', 'includ ...@@ -604,7 +604,7 @@ paddle.fluid.dygraph.Conv2D.set_dict (ArgSpec(args=['self', 'stat_dict', 'includ
paddle.fluid.dygraph.Conv2D.state_dict (ArgSpec(args=['self', 'destination', 'include_sublayers'], varargs=None, keywords=None, defaults=(None, True)), ('document', '9d689f44592cd22812c7ec06a9654eac')) paddle.fluid.dygraph.Conv2D.state_dict (ArgSpec(args=['self', 'destination', 'include_sublayers'], varargs=None, keywords=None, defaults=(None, True)), ('document', '9d689f44592cd22812c7ec06a9654eac'))
paddle.fluid.dygraph.Conv2D.sublayers (ArgSpec(args=['self', 'include_sublayers'], varargs=None, keywords=None, defaults=(True,)), ('document', '00a881005ecbc96578faf94513bf0d62')) paddle.fluid.dygraph.Conv2D.sublayers (ArgSpec(args=['self', 'include_sublayers'], varargs=None, keywords=None, defaults=(True,)), ('document', '00a881005ecbc96578faf94513bf0d62'))
paddle.fluid.dygraph.Conv2D.train (ArgSpec(args=['self'], varargs=None, keywords=None, defaults=None), ('document', '6adf97f83acf6453d4a6a4b1070f3754')) paddle.fluid.dygraph.Conv2D.train (ArgSpec(args=['self'], varargs=None, keywords=None, defaults=None), ('document', '6adf97f83acf6453d4a6a4b1070f3754'))
paddle.fluid.dygraph.Conv3D ('paddle.fluid.dygraph.nn.Conv3D', ('document', '50412bd3fbf3557a8ef48e25c6517025')) paddle.fluid.dygraph.Conv3D ('paddle.fluid.dygraph.nn.Conv3D', ('document', 'f81dee6781d6c18d0e7f5ca66b2fb010'))
paddle.fluid.dygraph.Conv3D.__init__ (ArgSpec(args=['self', 'name_scope', 'num_filters', 'filter_size', 'stride', 'padding', 'dilation', 'groups', 'param_attr', 'bias_attr', 'use_cudnn', 'act'], varargs=None, keywords=None, defaults=(1, 0, 1, None, None, None, True, None)), ('document', '6adf97f83acf6453d4a6a4b1070f3754')) paddle.fluid.dygraph.Conv3D.__init__ (ArgSpec(args=['self', 'name_scope', 'num_filters', 'filter_size', 'stride', 'padding', 'dilation', 'groups', 'param_attr', 'bias_attr', 'use_cudnn', 'act'], varargs=None, keywords=None, defaults=(1, 0, 1, None, None, None, True, None)), ('document', '6adf97f83acf6453d4a6a4b1070f3754'))
paddle.fluid.dygraph.Conv3D.add_parameter (ArgSpec(args=['self', 'name', 'parameter'], varargs=None, keywords=None, defaults=None), ('document', 'f35ab374c7d5165c3daf3bd64a5a2ec1')) paddle.fluid.dygraph.Conv3D.add_parameter (ArgSpec(args=['self', 'name', 'parameter'], varargs=None, keywords=None, defaults=None), ('document', 'f35ab374c7d5165c3daf3bd64a5a2ec1'))
paddle.fluid.dygraph.Conv3D.add_sublayer (ArgSpec(args=['self', 'name', 'sublayer'], varargs=None, keywords=None, defaults=None), ('document', '839ff3c0534677ba6ad8735c3fd4e995')) paddle.fluid.dygraph.Conv3D.add_sublayer (ArgSpec(args=['self', 'name', 'sublayer'], varargs=None, keywords=None, defaults=None), ('document', '839ff3c0534677ba6ad8735c3fd4e995'))
...@@ -689,7 +689,7 @@ paddle.fluid.dygraph.Embedding.set_dict (ArgSpec(args=['self', 'stat_dict', 'inc ...@@ -689,7 +689,7 @@ paddle.fluid.dygraph.Embedding.set_dict (ArgSpec(args=['self', 'stat_dict', 'inc
paddle.fluid.dygraph.Embedding.state_dict (ArgSpec(args=['self', 'destination', 'include_sublayers'], varargs=None, keywords=None, defaults=(None, True)), ('document', '9d689f44592cd22812c7ec06a9654eac')) paddle.fluid.dygraph.Embedding.state_dict (ArgSpec(args=['self', 'destination', 'include_sublayers'], varargs=None, keywords=None, defaults=(None, True)), ('document', '9d689f44592cd22812c7ec06a9654eac'))
paddle.fluid.dygraph.Embedding.sublayers (ArgSpec(args=['self', 'include_sublayers'], varargs=None, keywords=None, defaults=(True,)), ('document', '00a881005ecbc96578faf94513bf0d62')) paddle.fluid.dygraph.Embedding.sublayers (ArgSpec(args=['self', 'include_sublayers'], varargs=None, keywords=None, defaults=(True,)), ('document', '00a881005ecbc96578faf94513bf0d62'))
paddle.fluid.dygraph.Embedding.train (ArgSpec(args=['self'], varargs=None, keywords=None, defaults=None), ('document', '6adf97f83acf6453d4a6a4b1070f3754')) paddle.fluid.dygraph.Embedding.train (ArgSpec(args=['self'], varargs=None, keywords=None, defaults=None), ('document', '6adf97f83acf6453d4a6a4b1070f3754'))
paddle.fluid.dygraph.GRUUnit ('paddle.fluid.dygraph.nn.GRUUnit', ('document', '389e860e455b67aab1f4d472ac9d7e49')) paddle.fluid.dygraph.GRUUnit ('paddle.fluid.dygraph.nn.GRUUnit', ('document', 'f0e648f0a8d3389f755698dde488dc93'))
paddle.fluid.dygraph.GRUUnit.__init__ (ArgSpec(args=['self', 'name_scope', 'size', 'param_attr', 'bias_attr', 'activation', 'gate_activation', 'origin_mode', 'dtype'], varargs=None, keywords=None, defaults=(None, None, 'tanh', 'sigmoid', False, 'float32')), ('document', '6adf97f83acf6453d4a6a4b1070f3754')) paddle.fluid.dygraph.GRUUnit.__init__ (ArgSpec(args=['self', 'name_scope', 'size', 'param_attr', 'bias_attr', 'activation', 'gate_activation', 'origin_mode', 'dtype'], varargs=None, keywords=None, defaults=(None, None, 'tanh', 'sigmoid', False, 'float32')), ('document', '6adf97f83acf6453d4a6a4b1070f3754'))
paddle.fluid.dygraph.GRUUnit.add_parameter (ArgSpec(args=['self', 'name', 'parameter'], varargs=None, keywords=None, defaults=None), ('document', 'f35ab374c7d5165c3daf3bd64a5a2ec1')) paddle.fluid.dygraph.GRUUnit.add_parameter (ArgSpec(args=['self', 'name', 'parameter'], varargs=None, keywords=None, defaults=None), ('document', 'f35ab374c7d5165c3daf3bd64a5a2ec1'))
paddle.fluid.dygraph.GRUUnit.add_sublayer (ArgSpec(args=['self', 'name', 'sublayer'], varargs=None, keywords=None, defaults=None), ('document', '839ff3c0534677ba6ad8735c3fd4e995')) paddle.fluid.dygraph.GRUUnit.add_sublayer (ArgSpec(args=['self', 'name', 'sublayer'], varargs=None, keywords=None, defaults=None), ('document', '839ff3c0534677ba6ad8735c3fd4e995'))
...@@ -757,7 +757,7 @@ paddle.fluid.dygraph.PRelu.set_dict (ArgSpec(args=['self', 'stat_dict', 'include ...@@ -757,7 +757,7 @@ paddle.fluid.dygraph.PRelu.set_dict (ArgSpec(args=['self', 'stat_dict', 'include
paddle.fluid.dygraph.PRelu.state_dict (ArgSpec(args=['self', 'destination', 'include_sublayers'], varargs=None, keywords=None, defaults=(None, True)), ('document', '9d689f44592cd22812c7ec06a9654eac')) paddle.fluid.dygraph.PRelu.state_dict (ArgSpec(args=['self', 'destination', 'include_sublayers'], varargs=None, keywords=None, defaults=(None, True)), ('document', '9d689f44592cd22812c7ec06a9654eac'))
paddle.fluid.dygraph.PRelu.sublayers (ArgSpec(args=['self', 'include_sublayers'], varargs=None, keywords=None, defaults=(True,)), ('document', '00a881005ecbc96578faf94513bf0d62')) paddle.fluid.dygraph.PRelu.sublayers (ArgSpec(args=['self', 'include_sublayers'], varargs=None, keywords=None, defaults=(True,)), ('document', '00a881005ecbc96578faf94513bf0d62'))
paddle.fluid.dygraph.PRelu.train (ArgSpec(args=['self'], varargs=None, keywords=None, defaults=None), ('document', '6adf97f83acf6453d4a6a4b1070f3754')) paddle.fluid.dygraph.PRelu.train (ArgSpec(args=['self'], varargs=None, keywords=None, defaults=None), ('document', '6adf97f83acf6453d4a6a4b1070f3754'))
paddle.fluid.dygraph.BilinearTensorProduct ('paddle.fluid.dygraph.nn.BilinearTensorProduct', ('document', 'be70d0f6d43729d9cb80c9a34ed5f26b')) paddle.fluid.dygraph.BilinearTensorProduct ('paddle.fluid.dygraph.nn.BilinearTensorProduct', ('document', 'ddea5bc0668a636ded7db09538511c20'))
paddle.fluid.dygraph.BilinearTensorProduct.__init__ (ArgSpec(args=['self', 'name_scope', 'size', 'name', 'act', 'param_attr', 'bias_attr'], varargs=None, keywords=None, defaults=(None, None, None, None)), ('document', '6adf97f83acf6453d4a6a4b1070f3754')) paddle.fluid.dygraph.BilinearTensorProduct.__init__ (ArgSpec(args=['self', 'name_scope', 'size', 'name', 'act', 'param_attr', 'bias_attr'], varargs=None, keywords=None, defaults=(None, None, None, None)), ('document', '6adf97f83acf6453d4a6a4b1070f3754'))
paddle.fluid.dygraph.BilinearTensorProduct.add_parameter (ArgSpec(args=['self', 'name', 'parameter'], varargs=None, keywords=None, defaults=None), ('document', 'f35ab374c7d5165c3daf3bd64a5a2ec1')) paddle.fluid.dygraph.BilinearTensorProduct.add_parameter (ArgSpec(args=['self', 'name', 'parameter'], varargs=None, keywords=None, defaults=None), ('document', 'f35ab374c7d5165c3daf3bd64a5a2ec1'))
paddle.fluid.dygraph.BilinearTensorProduct.add_sublayer (ArgSpec(args=['self', 'name', 'sublayer'], varargs=None, keywords=None, defaults=None), ('document', '839ff3c0534677ba6ad8735c3fd4e995')) paddle.fluid.dygraph.BilinearTensorProduct.add_sublayer (ArgSpec(args=['self', 'name', 'sublayer'], varargs=None, keywords=None, defaults=None), ('document', '839ff3c0534677ba6ad8735c3fd4e995'))
...@@ -791,7 +791,7 @@ paddle.fluid.dygraph.Conv2DTranspose.set_dict (ArgSpec(args=['self', 'stat_dict' ...@@ -791,7 +791,7 @@ paddle.fluid.dygraph.Conv2DTranspose.set_dict (ArgSpec(args=['self', 'stat_dict'
paddle.fluid.dygraph.Conv2DTranspose.state_dict (ArgSpec(args=['self', 'destination', 'include_sublayers'], varargs=None, keywords=None, defaults=(None, True)), ('document', '9d689f44592cd22812c7ec06a9654eac')) paddle.fluid.dygraph.Conv2DTranspose.state_dict (ArgSpec(args=['self', 'destination', 'include_sublayers'], varargs=None, keywords=None, defaults=(None, True)), ('document', '9d689f44592cd22812c7ec06a9654eac'))
paddle.fluid.dygraph.Conv2DTranspose.sublayers (ArgSpec(args=['self', 'include_sublayers'], varargs=None, keywords=None, defaults=(True,)), ('document', '00a881005ecbc96578faf94513bf0d62')) paddle.fluid.dygraph.Conv2DTranspose.sublayers (ArgSpec(args=['self', 'include_sublayers'], varargs=None, keywords=None, defaults=(True,)), ('document', '00a881005ecbc96578faf94513bf0d62'))
paddle.fluid.dygraph.Conv2DTranspose.train (ArgSpec(args=['self'], varargs=None, keywords=None, defaults=None), ('document', '6adf97f83acf6453d4a6a4b1070f3754')) paddle.fluid.dygraph.Conv2DTranspose.train (ArgSpec(args=['self'], varargs=None, keywords=None, defaults=None), ('document', '6adf97f83acf6453d4a6a4b1070f3754'))
paddle.fluid.dygraph.Conv3DTranspose ('paddle.fluid.dygraph.nn.Conv3DTranspose', ('document', '91ba132bc690eaf76eabdbde8f87e4a0')) paddle.fluid.dygraph.Conv3DTranspose ('paddle.fluid.dygraph.nn.Conv3DTranspose', ('document', '0ef981fd6a74aaff21673f9925736ac7'))
paddle.fluid.dygraph.Conv3DTranspose.__init__ (ArgSpec(args=['self', 'name_scope', 'num_filters', 'output_size', 'filter_size', 'padding', 'stride', 'dilation', 'groups', 'param_attr', 'bias_attr', 'use_cudnn', 'act', 'name'], varargs=None, keywords=None, defaults=(None, None, 0, 1, 1, None, None, None, True, None, None)), ('document', '6adf97f83acf6453d4a6a4b1070f3754')) paddle.fluid.dygraph.Conv3DTranspose.__init__ (ArgSpec(args=['self', 'name_scope', 'num_filters', 'output_size', 'filter_size', 'padding', 'stride', 'dilation', 'groups', 'param_attr', 'bias_attr', 'use_cudnn', 'act', 'name'], varargs=None, keywords=None, defaults=(None, None, 0, 1, 1, None, None, None, True, None, None)), ('document', '6adf97f83acf6453d4a6a4b1070f3754'))
paddle.fluid.dygraph.Conv3DTranspose.add_parameter (ArgSpec(args=['self', 'name', 'parameter'], varargs=None, keywords=None, defaults=None), ('document', 'f35ab374c7d5165c3daf3bd64a5a2ec1')) paddle.fluid.dygraph.Conv3DTranspose.add_parameter (ArgSpec(args=['self', 'name', 'parameter'], varargs=None, keywords=None, defaults=None), ('document', 'f35ab374c7d5165c3daf3bd64a5a2ec1'))
paddle.fluid.dygraph.Conv3DTranspose.add_sublayer (ArgSpec(args=['self', 'name', 'sublayer'], varargs=None, keywords=None, defaults=None), ('document', '839ff3c0534677ba6ad8735c3fd4e995')) paddle.fluid.dygraph.Conv3DTranspose.add_sublayer (ArgSpec(args=['self', 'name', 'sublayer'], varargs=None, keywords=None, defaults=None), ('document', '839ff3c0534677ba6ad8735c3fd4e995'))
...@@ -870,31 +870,31 @@ paddle.fluid.dygraph.Tracer.train_mode (ArgSpec(args=['self'], varargs=None, key ...@@ -870,31 +870,31 @@ paddle.fluid.dygraph.Tracer.train_mode (ArgSpec(args=['self'], varargs=None, key
paddle.fluid.dygraph.prepare_context (ArgSpec(args=['strategy'], varargs=None, keywords=None, defaults=(None,)), ('document', '6adf97f83acf6453d4a6a4b1070f3754')) paddle.fluid.dygraph.prepare_context (ArgSpec(args=['strategy'], varargs=None, keywords=None, defaults=(None,)), ('document', '6adf97f83acf6453d4a6a4b1070f3754'))
paddle.fluid.dygraph.save_dygraph (ArgSpec(args=['state_dict', 'model_path'], varargs=None, keywords=None, defaults=None), ('document', '7c2bd58a69f9bca3b884f44154c84569')) paddle.fluid.dygraph.save_dygraph (ArgSpec(args=['state_dict', 'model_path'], varargs=None, keywords=None, defaults=None), ('document', '7c2bd58a69f9bca3b884f44154c84569'))
paddle.fluid.dygraph.load_dygraph (ArgSpec(args=['model_path'], varargs=None, keywords=None, defaults=None), ('document', 'd6d98002c39d2484835f4748e35b761c')) paddle.fluid.dygraph.load_dygraph (ArgSpec(args=['model_path'], varargs=None, keywords=None, defaults=None), ('document', 'd6d98002c39d2484835f4748e35b761c'))
paddle.fluid.dygraph.NoamDecay ('paddle.fluid.dygraph.learning_rate_scheduler.NoamDecay', ('document', '9ccfea97dbf15134d406a23aae1e1fa2')) paddle.fluid.dygraph.NoamDecay ('paddle.fluid.dygraph.learning_rate_scheduler.NoamDecay', ('document', '3441619381487db8d1929a205f3c6d41'))
paddle.fluid.dygraph.NoamDecay.__init__ (ArgSpec(args=['self', 'd_model', 'warmup_steps', 'begin', 'step', 'dtype'], varargs=None, keywords=None, defaults=(1, 1, 'float32')), ('document', '6adf97f83acf6453d4a6a4b1070f3754')) paddle.fluid.dygraph.NoamDecay.__init__ (ArgSpec(args=['self', 'd_model', 'warmup_steps', 'begin', 'step', 'dtype'], varargs=None, keywords=None, defaults=(1, 1, 'float32')), ('document', '6adf97f83acf6453d4a6a4b1070f3754'))
paddle.fluid.dygraph.NoamDecay.create_lr_var (ArgSpec(args=['self', 'lr'], varargs=None, keywords=None, defaults=None), ('document', '013bc233558149d0757b3df57845b866')) paddle.fluid.dygraph.NoamDecay.create_lr_var (ArgSpec(args=['self', 'lr'], varargs=None, keywords=None, defaults=None), ('document', '013bc233558149d0757b3df57845b866'))
paddle.fluid.dygraph.NoamDecay.step (ArgSpec(args=['self'], varargs=None, keywords=None, defaults=None), ('document', '6adf97f83acf6453d4a6a4b1070f3754')) paddle.fluid.dygraph.NoamDecay.step (ArgSpec(args=['self'], varargs=None, keywords=None, defaults=None), ('document', '6adf97f83acf6453d4a6a4b1070f3754'))
paddle.fluid.dygraph.PiecewiseDecay ('paddle.fluid.dygraph.learning_rate_scheduler.PiecewiseDecay', ('document', '8f4d37eaad4e2f5b12850f3663856758')) paddle.fluid.dygraph.PiecewiseDecay ('paddle.fluid.dygraph.learning_rate_scheduler.PiecewiseDecay', ('document', '0fccf303b94a13ae670fb3dd51931f73'))
paddle.fluid.dygraph.PiecewiseDecay.__init__ (ArgSpec(args=['self', 'boundaries', 'values', 'begin', 'step', 'dtype'], varargs=None, keywords=None, defaults=(1, 'float32')), ('document', '6adf97f83acf6453d4a6a4b1070f3754')) paddle.fluid.dygraph.PiecewiseDecay.__init__ (ArgSpec(args=['self', 'boundaries', 'values', 'begin', 'step', 'dtype'], varargs=None, keywords=None, defaults=(1, 'float32')), ('document', '6adf97f83acf6453d4a6a4b1070f3754'))
paddle.fluid.dygraph.PiecewiseDecay.create_lr_var (ArgSpec(args=['self', 'lr'], varargs=None, keywords=None, defaults=None), ('document', '013bc233558149d0757b3df57845b866')) paddle.fluid.dygraph.PiecewiseDecay.create_lr_var (ArgSpec(args=['self', 'lr'], varargs=None, keywords=None, defaults=None), ('document', '013bc233558149d0757b3df57845b866'))
paddle.fluid.dygraph.PiecewiseDecay.step (ArgSpec(args=['self'], varargs=None, keywords=None, defaults=None), ('document', '6adf97f83acf6453d4a6a4b1070f3754')) paddle.fluid.dygraph.PiecewiseDecay.step (ArgSpec(args=['self'], varargs=None, keywords=None, defaults=None), ('document', '6adf97f83acf6453d4a6a4b1070f3754'))
paddle.fluid.dygraph.NaturalExpDecay ('paddle.fluid.dygraph.learning_rate_scheduler.NaturalExpDecay', ('document', '94bed58b392a5a71b6d1abd39eed7111')) paddle.fluid.dygraph.NaturalExpDecay ('paddle.fluid.dygraph.learning_rate_scheduler.NaturalExpDecay', ('document', '5fef27468d49ca8ca6c6a9635ad0f5c1'))
paddle.fluid.dygraph.NaturalExpDecay.__init__ (ArgSpec(args=['self', 'learning_rate', 'decay_steps', 'decay_rate', 'staircase', 'begin', 'step', 'dtype'], varargs=None, keywords=None, defaults=(False, 0, 1, 'float32')), ('document', '6adf97f83acf6453d4a6a4b1070f3754')) paddle.fluid.dygraph.NaturalExpDecay.__init__ (ArgSpec(args=['self', 'learning_rate', 'decay_steps', 'decay_rate', 'staircase', 'begin', 'step', 'dtype'], varargs=None, keywords=None, defaults=(False, 0, 1, 'float32')), ('document', '6adf97f83acf6453d4a6a4b1070f3754'))
paddle.fluid.dygraph.NaturalExpDecay.create_lr_var (ArgSpec(args=['self', 'lr'], varargs=None, keywords=None, defaults=None), ('document', '013bc233558149d0757b3df57845b866')) paddle.fluid.dygraph.NaturalExpDecay.create_lr_var (ArgSpec(args=['self', 'lr'], varargs=None, keywords=None, defaults=None), ('document', '013bc233558149d0757b3df57845b866'))
paddle.fluid.dygraph.NaturalExpDecay.step (ArgSpec(args=['self'], varargs=None, keywords=None, defaults=None), ('document', '6adf97f83acf6453d4a6a4b1070f3754')) paddle.fluid.dygraph.NaturalExpDecay.step (ArgSpec(args=['self'], varargs=None, keywords=None, defaults=None), ('document', '6adf97f83acf6453d4a6a4b1070f3754'))
paddle.fluid.dygraph.ExponentialDecay ('paddle.fluid.dygraph.learning_rate_scheduler.ExponentialDecay', ('document', 'a259689c649c5f82636536386ce2ef19')) paddle.fluid.dygraph.ExponentialDecay ('paddle.fluid.dygraph.learning_rate_scheduler.ExponentialDecay', ('document', '846eb564df136d8a8917bf16b5b8ac9b'))
paddle.fluid.dygraph.ExponentialDecay.__init__ (ArgSpec(args=['self', 'learning_rate', 'decay_steps', 'decay_rate', 'staircase', 'begin', 'step', 'dtype'], varargs=None, keywords=None, defaults=(False, 0, 1, 'float32')), ('document', '6adf97f83acf6453d4a6a4b1070f3754')) paddle.fluid.dygraph.ExponentialDecay.__init__ (ArgSpec(args=['self', 'learning_rate', 'decay_steps', 'decay_rate', 'staircase', 'begin', 'step', 'dtype'], varargs=None, keywords=None, defaults=(False, 0, 1, 'float32')), ('document', '6adf97f83acf6453d4a6a4b1070f3754'))
paddle.fluid.dygraph.ExponentialDecay.create_lr_var (ArgSpec(args=['self', 'lr'], varargs=None, keywords=None, defaults=None), ('document', '013bc233558149d0757b3df57845b866')) paddle.fluid.dygraph.ExponentialDecay.create_lr_var (ArgSpec(args=['self', 'lr'], varargs=None, keywords=None, defaults=None), ('document', '013bc233558149d0757b3df57845b866'))
paddle.fluid.dygraph.ExponentialDecay.step (ArgSpec(args=['self'], varargs=None, keywords=None, defaults=None), ('document', '6adf97f83acf6453d4a6a4b1070f3754')) paddle.fluid.dygraph.ExponentialDecay.step (ArgSpec(args=['self'], varargs=None, keywords=None, defaults=None), ('document', '6adf97f83acf6453d4a6a4b1070f3754'))
paddle.fluid.dygraph.InverseTimeDecay ('paddle.fluid.dygraph.learning_rate_scheduler.InverseTimeDecay', ('document', '6a868b2c7cc0f09f57ef71902bbc93ca')) paddle.fluid.dygraph.InverseTimeDecay ('paddle.fluid.dygraph.learning_rate_scheduler.InverseTimeDecay', ('document', '1a74f0370e2e64f9e786d3c336526e6d'))
paddle.fluid.dygraph.InverseTimeDecay.__init__ (ArgSpec(args=['self', 'learning_rate', 'decay_steps', 'decay_rate', 'staircase', 'begin', 'step', 'dtype'], varargs=None, keywords=None, defaults=(False, 0, 1, 'float32')), ('document', '6adf97f83acf6453d4a6a4b1070f3754')) paddle.fluid.dygraph.InverseTimeDecay.__init__ (ArgSpec(args=['self', 'learning_rate', 'decay_steps', 'decay_rate', 'staircase', 'begin', 'step', 'dtype'], varargs=None, keywords=None, defaults=(False, 0, 1, 'float32')), ('document', '6adf97f83acf6453d4a6a4b1070f3754'))
paddle.fluid.dygraph.InverseTimeDecay.create_lr_var (ArgSpec(args=['self', 'lr'], varargs=None, keywords=None, defaults=None), ('document', '013bc233558149d0757b3df57845b866')) paddle.fluid.dygraph.InverseTimeDecay.create_lr_var (ArgSpec(args=['self', 'lr'], varargs=None, keywords=None, defaults=None), ('document', '013bc233558149d0757b3df57845b866'))
paddle.fluid.dygraph.InverseTimeDecay.step (ArgSpec(args=['self'], varargs=None, keywords=None, defaults=None), ('document', '6adf97f83acf6453d4a6a4b1070f3754')) paddle.fluid.dygraph.InverseTimeDecay.step (ArgSpec(args=['self'], varargs=None, keywords=None, defaults=None), ('document', '6adf97f83acf6453d4a6a4b1070f3754'))
paddle.fluid.dygraph.PolynomialDecay ('paddle.fluid.dygraph.learning_rate_scheduler.PolynomialDecay', ('document', 'bb90314cee58952f13522dcd571ca832')) paddle.fluid.dygraph.PolynomialDecay ('paddle.fluid.dygraph.learning_rate_scheduler.PolynomialDecay', ('document', 'e222a066a2bcf31bc52a14271048e034'))
paddle.fluid.dygraph.PolynomialDecay.__init__ (ArgSpec(args=['self', 'learning_rate', 'decay_steps', 'end_learning_rate', 'power', 'cycle', 'begin', 'step', 'dtype'], varargs=None, keywords=None, defaults=(0.0001, 1.0, False, 0, 1, 'float32')), ('document', '6adf97f83acf6453d4a6a4b1070f3754')) paddle.fluid.dygraph.PolynomialDecay.__init__ (ArgSpec(args=['self', 'learning_rate', 'decay_steps', 'end_learning_rate', 'power', 'cycle', 'begin', 'step', 'dtype'], varargs=None, keywords=None, defaults=(0.0001, 1.0, False, 0, 1, 'float32')), ('document', '6adf97f83acf6453d4a6a4b1070f3754'))
paddle.fluid.dygraph.PolynomialDecay.create_lr_var (ArgSpec(args=['self', 'lr'], varargs=None, keywords=None, defaults=None), ('document', '013bc233558149d0757b3df57845b866')) paddle.fluid.dygraph.PolynomialDecay.create_lr_var (ArgSpec(args=['self', 'lr'], varargs=None, keywords=None, defaults=None), ('document', '013bc233558149d0757b3df57845b866'))
paddle.fluid.dygraph.PolynomialDecay.step (ArgSpec(args=['self'], varargs=None, keywords=None, defaults=None), ('document', '6adf97f83acf6453d4a6a4b1070f3754')) paddle.fluid.dygraph.PolynomialDecay.step (ArgSpec(args=['self'], varargs=None, keywords=None, defaults=None), ('document', '6adf97f83acf6453d4a6a4b1070f3754'))
paddle.fluid.dygraph.CosineDecay ('paddle.fluid.dygraph.learning_rate_scheduler.CosineDecay', ('document', '46dadadee1a8a92d70bd277d9345bfb0')) paddle.fluid.dygraph.CosineDecay ('paddle.fluid.dygraph.learning_rate_scheduler.CosineDecay', ('document', '0d7fe2b87492a0eb5cde60dbe268ea17'))
paddle.fluid.dygraph.CosineDecay.__init__ (ArgSpec(args=['self', 'learning_rate', 'step_each_epoch', 'epochs', 'begin', 'step', 'dtype'], varargs=None, keywords=None, defaults=(0, 1, 'float32')), ('document', '6adf97f83acf6453d4a6a4b1070f3754')) paddle.fluid.dygraph.CosineDecay.__init__ (ArgSpec(args=['self', 'learning_rate', 'step_each_epoch', 'epochs', 'begin', 'step', 'dtype'], varargs=None, keywords=None, defaults=(0, 1, 'float32')), ('document', '6adf97f83acf6453d4a6a4b1070f3754'))
paddle.fluid.dygraph.CosineDecay.create_lr_var (ArgSpec(args=['self', 'lr'], varargs=None, keywords=None, defaults=None), ('document', '013bc233558149d0757b3df57845b866')) paddle.fluid.dygraph.CosineDecay.create_lr_var (ArgSpec(args=['self', 'lr'], varargs=None, keywords=None, defaults=None), ('document', '013bc233558149d0757b3df57845b866'))
paddle.fluid.dygraph.CosineDecay.step (ArgSpec(args=['self'], varargs=None, keywords=None, defaults=None), ('document', '6adf97f83acf6453d4a6a4b1070f3754')) paddle.fluid.dygraph.CosineDecay.step (ArgSpec(args=['self'], varargs=None, keywords=None, defaults=None), ('document', '6adf97f83acf6453d4a6a4b1070f3754'))
......
...@@ -69,7 +69,7 @@ class LearningRateDecay(object): ...@@ -69,7 +69,7 @@ class LearningRateDecay(object):
class PiecewiseDecay(LearningRateDecay): class PiecewiseDecay(LearningRateDecay):
""" """
piecewise decay scheduler Piecewise decay scheduler.
The algorithm can be described as the code below. The algorithm can be described as the code below.
...@@ -77,22 +77,25 @@ class PiecewiseDecay(LearningRateDecay): ...@@ -77,22 +77,25 @@ class PiecewiseDecay(LearningRateDecay):
boundaries = [10000, 20000] boundaries = [10000, 20000]
values = [1.0, 0.5, 0.1] values = [1.0, 0.5, 0.1]
if step < 10000: if global_step < 10000:
learning_rate = 1.0 learning_rate = 1.0
elif 10000 <= step < 20000: elif 10000 <= global_step < 20000:
learning_rate = 0.5 learning_rate = 0.5
else: else:
learning_rate = 0.1 learning_rate = 0.1
Args:
boundaries: A list of steps numbers. Parameters:
values: A list of learning rate values that will be picked during boundaries(list): A list of steps numbers. The type of element in the list is python int.
different step boundaries. values(list): A list of learning rate values that will be picked during
begin: The begin step to initilize the self.step_num different step boundaries. The type of element in the list is python float.
step: The step_size using when calculate the new step_num (Defalult is 1) begin(int): The begin step to initilize the global_step in the description above.
dtype: The dtype used to create the learning rate variable step(int, optional): The step size used to calculate the new global_step in the description above.
The defalult value is 1.
dtype(str, optional): The data type used to create the learning rate variable. The data type can be set as
'float32', 'float64'. The default value is 'float32'.
Returns: Returns:
The decayed learning rate. None.
Examples: Examples:
.. code-block:: python .. code-block:: python
...@@ -125,25 +128,40 @@ class NaturalExpDecay(LearningRateDecay): ...@@ -125,25 +128,40 @@ class NaturalExpDecay(LearningRateDecay):
""" """
Applies natural exponential decay to the initial learning rate. Applies natural exponential decay to the initial learning rate.
.. code-block:: python The algorithm can be described as following.
if not staircase: .. math::
decayed_learning_rate = learning_rate * exp(- decay_rate * (global_step / decay_steps))
else:
decayed_learning_rate = learning_rate * exp(- decay_rate * (global_step / decay_steps))
Args: decayed\_learning\_rate = learning\_rate * e^{y}
learning_rate: A scalar float32 value or a Variable. This
will be the initial learning rate during training If staircase is set to False, then:
decay_steps: A Python `int32` number.
decay_rate: A Python `float` number. .. math::
staircase: Boolean. If set true, decay the learning rate every decay_steps.
begin: A Python 'int32' number, the begin step (Default is 0) y = - decay\_rate * \\frac{global\_step}{decay\_steps}
step: A Python 'int32' number, the step size (Default is 1)
dtype: A Python 'str', the dtype used to create learning rate variable (Default is 'float32') If staircase is set to True, then:
.. math::
y = - decay\_rate * math.floor(\\frac{global\_step}{decay\_steps})
Parameters:
learning_rate(Variable|float): The initial learning rate. If the type
is Variable, it's a tensor with shape [1], the data type can be
float32 or float64. It also can be set to python int number.
decay_steps(int): The decay step size. It determines the decay cycle.
decay_rate(int): The decay rate.
staircase(bool, optional): If set to True, decay the learning rate at discrete intervals. The
default value is False.
begin(int, optional): The begin step. The initial value of global_step described above. The default value is 0.
step(int, optional): The step size used to calculate the new global_step in the description above.
The defalult value is 1.
dtype(str, optional): The data type used to create the learning rate variable. The data type can be set as
'float32', 'float64'. The default value is 'float32'.
Returns: Returns:
The decayed learning rate. None.
Examples: Examples:
.. code-block:: python .. code-block:: python
...@@ -189,29 +207,41 @@ class ExponentialDecay(LearningRateDecay): ...@@ -189,29 +207,41 @@ class ExponentialDecay(LearningRateDecay):
""" """
Applies exponential decay to the learning rate. Applies exponential decay to the learning rate.
When training a model, it is often recommended to lower the learning rate as the The algorithm can be described as following.
training progresses. By using this function, the learning rate will be decayed by
'decay_rate' every 'decay_steps' steps.
.. code-block:: python .. math::
if staircase == True: decayed\_learning\_rate = learning\_rate * decay\_rate ^ y
decayed_learning_rate = learning_rate * decay_rate ^ floor(global_step / decay_steps)
else:
decayed_learning_rate = learning_rate * decay_rate ^ (global_step / decay_steps)
Args: If staircase is set to False, then:
learning_rate(Variable|float): The initial learning rate.
decay_steps(int): See the decay computation above. .. math::
decay_rate(float): The decay rate. See the decay computation above.
staircase(Boolean): If True, decay the learning rate at discrete intervals. y = \\frac{global\_step}{decay\_steps}
Default: False
begin(int): The begin step (default is 0) If staircase is set to True, then:
step(int): The step size (default is 1)
dtype(str): The dtype used to create learning rate (default is 'float32') .. math::
y = math.floor(\\frac{global\_step}{decay\_steps})
Parameters:
learning_rate(Variable|float): The initial learning rate. If the type
is Variable, it's a tensor with shape [1], the data type can be
float32 or float64. It also can be set to python int number.
decay_steps(int): The decay step size. It determines the decay cycle.
decay_rate(float): The decay rate.
staircase(bool, optional): If set to True, decay the learning rate at discrete intervals. The
default value is False.
begin(int, optional): The begin step. The initial value of global_step described above. The default value is 0.
step(int, optional): The step size used to calculate the new global_step in the description above.
The defalult value is 1.
dtype(str, optional): The data type used to create the learning rate variable. The data type can be set as
'float32', 'float64'. The default value is 'float32'.
Returns: Returns:
The decayed learning rate. None.
Examples: Examples:
.. code-block:: python .. code-block:: python
...@@ -257,27 +287,35 @@ class InverseTimeDecay(LearningRateDecay): ...@@ -257,27 +287,35 @@ class InverseTimeDecay(LearningRateDecay):
""" """
Applies inverse time decay to the initial learning rate. Applies inverse time decay to the initial learning rate.
When training a model, it is often recommended to lower the learning rate as the The algorithm can be described as following.
training progresses. By using this function, an inverse decay function will be If staircase is set to False, then:
applied to the initial learning rate.
.. math::
>>> if staircase == True: decayed\_learning\_rate = \\frac{learning\_rate}{1 + decay\_rate * \\frac{global\_step}{decay\_step}}
>>> decayed_learning_rate = learning_rate / (1 + decay_rate * floor(global_step / decay_step))
>>> else:
>>> decayed_learning_rate = learning_rate / (1 + decay_rate * global_step / decay_step)
Args: If staircase is set to True, then:
learning_rate(Variable|float): The initial learning rate.
decay_steps(int): See the decay computation above. .. math::
decay_rate(float): The decay rate. See the decay computation above.
staircase(Boolean): If True, decay the learning rate at discrete intervals. decayed\_learning\_rate = \\frac{learning\_rate}{1 + decay\_rate * math.floor(\\frac{global\_step}{decay\_step})}
Default: False
begin(int): The begin step (default is 0) Parameters:
step(int): The step size (default is 1) learning_rate(Variable|float): The initial learning rate. If the type
dtype(str): The dtype used to create learning rate (default is 'float32') is Variable, it's a tensor with shape [1], the data type can be
float32 or float64. It also can be set to python int number.
decay_steps(int): The decay step size. It determines the decay cycle.
decay_rate(float): The decay rate.
staircase(bool, optional): If set to True, decay the learning rate at discrete intervals. The
default value is False.
begin(int, optional): The begin step. The initial value of global_step described above. The default value is 0.
step(int, optional): The step size used to calculate the new global_step in the description above.
The defalult value is 1.
dtype(str, optional): The data type used to create the learning rate variable. The data type can be
'float32', 'float64'. The default value is 'float32'.
Returns: Returns:
The decayed learning rate. None.
Examples: Examples:
.. code-block:: python .. code-block:: python
...@@ -323,28 +361,40 @@ class PolynomialDecay(LearningRateDecay): ...@@ -323,28 +361,40 @@ class PolynomialDecay(LearningRateDecay):
""" """
Applies polynomial decay to the initial learning rate. Applies polynomial decay to the initial learning rate.
.. code-block:: text The algorithm can be described as following.
if cycle: If cycle is set to True, then:
decay_steps = decay_steps * ceil(global_step / decay_steps)
else:
global_step = min(global_step, decay_steps)
decayed_learning_rate = (learning_rate - end_learning_rate) *
(1 - global_step / decay_steps) ^ power + end_learning_rate
Args: .. math::
learning_rate(Variable|float32): A scalar float32 value or a Variable. This
will be the initial learning rate during training. decay\_steps & = decay\_steps * math.ceil(\\frac{global\_step}{decay\_steps})
decay_steps(int32): A Python `int32` number.
end_learning_rate(float): A Python `float` number. decayed\_learning\_rate & = (learning\_rate-end\_learning\_rate)*(1-\\frac{global\_step}{decay\_steps})^{power}+end\_learning\_rate
power(float): A Python `float` number.
cycle(bool): If set true, decay the learning rate every decay_steps. If cycle is set to False, then:
begin(int): The begin step (default is 0)
step(int): The step size (default is 1) .. math::
dtype(str): The dtype used to create learning rate (default is 'float32')
global\_step & = min(global\_step, decay\_steps)
decayed\_learning\_rate & = (learning\_rate-end\_learning\_rate)*(1-\\frac{global\_step}{decay\_steps})^{power}+end\_learning\_rate
Parameters:
learning_rate(Variable|float): The initial learning rate. If the type
is Variable, it's a tensor with shape [1], the data type can be
float32 or float64. It also can be set to python int number.
decay_steps(int32): The decay step size. It determines the decay cycle.
end_learning_rate(float, optional): The minimum final learning rate. The default value is 0.0001.
power(float, optional): Power of polynomial. The default value is 1.0.
cycle(bool, optional): If set true, decay the learning rate every decay_steps. The default value is False.
begin(int, optional): The begin step. The initial value of global_step described above. The default value is 0.
step(int, optional): The step size used to calculate the new global_step in the description above.
The defalult value is 1.
dtype(str, optional): The data type used to create the learning rate variable. The data type can be set as
'float32', 'float64'. The default value is 'float32'.
Returns: Returns:
The decayed learning rate. None.
Examples: Examples:
.. code-block:: python .. code-block:: python
...@@ -401,24 +451,26 @@ class CosineDecay(LearningRateDecay): ...@@ -401,24 +451,26 @@ class CosineDecay(LearningRateDecay):
""" """
Applies cosine decay to the learning rate. Applies cosine decay to the learning rate.
when training a model, it is often recommended to lower the learning rate as the The algorithm can be described as following.
training progresses. By using this function, the learning rate will be decayed by
following cosine decay strategy.
.. math:: .. math::
decayed\_lr = learning\_rate * 0.5 * (math.cos * (epoch * \\frac{math.pi}{epochs} ) + 1) decayed\_learning\_rate = learning\_rate * 0.5 * (math.cos(global\_step * \\frac{math.pi}{step\_each\_epoch} ) + 1)
Args: Parameters:
learning_rate(Variable|float): The initial learning rate. learning_rate(Variable|float): The initial learning rate. If the type
step_each_epoch(int): the number of steps in an epoch. is Variable, it's a tensor with shape [1], the data type can be
epochs(int): the number of epochs. float32 or float64. It also can be set to python int number.
begin(int): The begin step (default is 0). step_each_epoch(int): The number of steps in an epoch.
step(int): The step size (default is 1). epochs(int): The number of epochs.
dtype(str): The dtype used to create learning rate (default is 'float32'). begin(int, optional): The begin step. The initial value of global_step described above. The default value is 0.
step(int, optional): The step size used to calculate the new global_step in the description above.
The defalult value is 1.
dtype(str, optional): The data type used to create the learning rate variable. The data type can be set as
'float32', 'float64'. The default value is 'float32'.
Returns: Returns:
The decayed learning rate. None.
Examples: Examples:
.. code-block:: python .. code-block:: python
...@@ -453,33 +505,29 @@ class CosineDecay(LearningRateDecay): ...@@ -453,33 +505,29 @@ class CosineDecay(LearningRateDecay):
class NoamDecay(LearningRateDecay): class NoamDecay(LearningRateDecay):
""" """
Noam decay method. The numpy implementation of noam decay as follows. Applies Noam decay to the initial learning rate.
.. code-block:: python The algorithm can be described as following.
import numpy as np .. math::
# set hyper parameters
d_model = 2
current_steps = 20
warmup_steps = 200
# compute
lr_value = np.power(d_model, -0.5) * np.min([
np.power(current_steps, -0.5),
np.power(warmup_steps, -1.5) * current_steps])
Please reference `attention is all you need decayed\_learning\_rate = d_{model}^{-0.5} * min(global\_step^{-0.5}, global\_step * warmup\_steps^{-1.5})
<https://arxiv.org/pdf/1706.03762.pdf>`_.
Args: Please reference `attention is all you need <https://arxiv.org/pdf/1706.03762.pdf>`_
d_model(Variable): The dimensionality of input and output of model.
warmup_steps(Variable): A super parameter. Parameters:
begin(int): The begin step (default is 0) d$_{model}$(Variable|int): The dimensionality of input and output feature vector of model. If type is Variable,
step(int): The step size (default is 1) it's a tensor with shape [1] and the data type can be int32 or int64. The type can also be python int.
dtype(str): The dtype used to create learning rate (default is 'float32') warmup_steps(Variable|int): The number of warmup steps. A super parameter. If type is Variable,
it's a tensor with shape [1] and the data type can be int32 or int64. The type can also be python int.
begin(int, optional): The begin step. The initial value of global_step described above. The default value is 0.
step(int, optional): The step size used to calculate the new global_step in the description above.
The defalult value is 1.
dtype(str, optional): The data type used to create the learning rate variable. The data type can be set as
'float32', 'float64'. The default value is 'float32'.
Returns: Returns:
The decayed learning rate. None.
Examples: Examples:
.. code-block:: python .. code-block:: python
......
...@@ -273,7 +273,8 @@ class Conv3D(layers.Layer): ...@@ -273,7 +273,8 @@ class Conv3D(layers.Layer):
The convolution3D layer calculates the output based on the input, filter The convolution3D layer calculates the output based on the input, filter
and strides, paddings, dilations, groups parameters. Input(Input) and and strides, paddings, dilations, groups parameters. Input(Input) and
Output(Output) are in NCDHW format. Where N is batch size C is the number of Output(Output) are multidimensional tensors with a shape of
:math:`[N, C, D, H, W]` . Where N is batch size, C is the number of
channels, D is the depth of the feature, H is the height of the feature, channels, D is the depth of the feature, H is the height of the feature,
and W is the width of the feature. Convlution3D is similar with Convlution2D and W is the width of the feature. Convlution3D is similar with Convlution2D
but adds one dimension(depth). If bias attribution and activation type are but adds one dimension(depth). If bias attribution and activation type are
...@@ -288,7 +289,7 @@ class Conv3D(layers.Layer): ...@@ -288,7 +289,7 @@ class Conv3D(layers.Layer):
In the above equation: In the above equation:
* :math:`X`: Input value, a tensor with NCDHW format. * :math:`X`: Input value, a tensor with NCDHW or NDHWC format.
* :math:`W`: Filter value, a tensor with MCDHW format. * :math:`W`: Filter value, a tensor with MCDHW format.
* :math:`\\ast`: Convolution operation. * :math:`\\ast`: Convolution operation.
* :math:`b`: Bias value, a 2-D tensor with shape [M, 1]. * :math:`b`: Bias value, a 2-D tensor with shape [M, 1].
...@@ -317,45 +318,46 @@ class Conv3D(layers.Layer): ...@@ -317,45 +318,46 @@ class Conv3D(layers.Layer):
Parameters: Parameters:
name_scope(str) : The name for this class. name_scope(str) : The name for this class.
num_filters(int): The number of filter. It is as same as the output image channel. num_filters(int): The number of filter. It is as same as the output image channel.
filter_size (int|tuple|None): The filter size. If filter_size is a tuple, filter_size (int|tuple, optional): The filter size. If filter_size is a tuple,
it must contain three integers, (filter_size_D, filter_size_H, filter_size_W). it must contain three integers, (filter_size_D, filter_size_H, filter_size_W).
Otherwise, the filter will be a square. Otherwise, the filter will be a square, filter_size_depth = filter_size_height
stride (int|tuple): The stride size. If stride is a tuple, it must = filter_size_width = filter_size.
stride (int|tuple, optional): The stride size. If stride is a tuple, it must
contain three integers, (stride_D, stride_H, stride_W). Otherwise, the contain three integers, (stride_D, stride_H, stride_W). Otherwise, the
stride_D = stride_H = stride_W = stride. Default: stride = 1. stride_D = stride_H = stride_W = stride. The default value is 1.
padding (int|tuple): The padding size. If padding is a tuple, it must padding (int|tuple, optional): The padding size. If padding is a tuple, it must
contain three integers, (padding_D, padding_H, padding_W). Otherwise, the contain three integers, (padding_D, padding_H, padding_W). Otherwise, the
padding_D = padding_H = padding_W = padding. Default: padding = 0. padding_D = padding_H = padding_W = padding. The default value is 0.
dilation (int|tuple): The dilation size. If dilation is a tuple, it must dilation (int|tuple, optional): The dilation size. If dilation is a tuple, it must
contain three integers, (dilation_D, dilation_H, dilation_W). Otherwise, the contain three integers, (dilation_D, dilation_H, dilation_W). Otherwise, the
dilation_D = dilation_H = dilation_W = dilation. Default: dilation = 1. dilation_D = dilation_H = dilation_W = dilation. The default value is 1.
groups (int): The groups number of the Conv3d Layer. According to grouped groups (int, optional): The groups number of the Conv3d Layer. According to grouped
convolution in Alex Krizhevsky's Deep CNN paper: when group=2, convolution in Alex Krizhevsky's Deep CNN paper: when group=2,
the first half of the filters is only connected to the first half the first half of the filters is only connected to the first half
of the input channels, while the second half of the filters is only of the input channels, while the second half of the filters is only
connected to the second half of the input channels. Default: groups=1 connected to the second half of the input channels. The default value is 1.
param_attr (ParamAttr|None): The parameter attribute for learnable parameters/weights param_attr (ParamAttr, optional): The parameter attribute for learnable parameters/weights
of conv3d. If it is set to None or one attribute of ParamAttr, conv3d of conv3d. If it is set to None or one attribute of ParamAttr, conv3d
will create ParamAttr as param_attr. If it is set to None, the parameter will create ParamAttr as param_attr. If it is set to None, the parameter
is initialized with :math:`Normal(0.0, std)`, and the :math:`std` is is initialized with :math:`Normal(0.0, std)`, and the :math:`std` is
:math:`(\\frac{2.0 }{filter\_elem\_num})^{0.5}`. Default: None. :math:`(\\frac{2.0 }{filter\_elem\_num})^{0.5}`. The default value is None.
bias_attr (ParamAttr|bool|None): The parameter attribute for the bias of conv3d. bias_attr (ParamAttr|bool, optional): The parameter attribute for the bias of conv3d.
If it is set to False, no bias will be added to the output units. If it is set to False, no bias will be added to the output units.
If it is set to None or one attribute of ParamAttr, conv3d If it is set to None or one attribute of ParamAttr, conv3d
will create ParamAttr as bias_attr. If the Initializer of the bias_attr will create ParamAttr as bias_attr. If the Initializer of the bias_attr
is not set, the bias is initialized zero. Default: None. is not set, the bias is initialized zero. The default value is None.
use_cudnn (bool): Use cudnn kernel or not, it is valid only when the cudnn use_cudnn (bool, optional): Use cudnn kernel or not, it is valid only when the cudnn
library is installed. Default: True library is installed. The default value is True.
act (str): Activation type, if it is set to None, activation is not appended. act (str, optional): Activation type, if it is set to None, activation is not appended.
Default: None. The default value is None.
Attributes: Attribute:
weight (Parameter): the learnable weights of filters of this layer. **weight** (Parameter): the learnable weights of filters of this layer.
bias (Parameter|None): the learnable bias of this layer.
**bias** (Parameter): the learnable bias of this layer.
Returns: Returns:
Variable: The tensor variable storing the convolution and \ None.
non-linearity activation result.
Raises: Raises:
ValueError: If the shapes of input, filter_size, stride, padding and ValueError: If the shapes of input, filter_size, stride, padding and
...@@ -533,58 +535,85 @@ class Conv3DTranspose(layers.Layer): ...@@ -533,58 +535,85 @@ class Conv3DTranspose(layers.Layer):
.. math:: .. math::
D_{out} &= (D_{in} - 1) * strides[0] - 2 * paddings[0] + dilations[0] * (D_f - 1) + 1 \\\\ D^\prime_{out} &= (D_{in} - 1) * strides[0] - 2 * paddings[0] + dilations[0] * (D_f - 1) + 1 \\\\
H_{out} &= (H_{in} - 1) * strides[1] - 2 * paddings[1] + dilations[1] * (H_f - 1) + 1 \\\\ H^\prime_{out} &= (H_{in} - 1) * strides[1] - 2 * paddings[1] + dilations[1] * (H_f - 1) + 1 \\\\
W_{out} &= (W_{in} - 1) * strides[2] - 2 * paddings[2] + dilations[2] * (W_f - 1) + 1 W^\prime_{out} &= (W_{in} - 1) * strides[2] - 2 * paddings[2] + dilations[2] * (W_f - 1) + 1 \\\\
D_{out} &\in [ D^\prime_{out}, D^\prime_{out} + strides[0] ] \\\\
H_{out} &\in [ H^\prime_{out}, H^\prime_{out} + strides[1] ] \\\\
**Note**:
The conv3d_transpose can be seen as the backward of the conv3d. For conv3d,
when stride > 1, conv3d maps multiple input shape to the same output shape,
so for conv3d_transpose, when stride > 1, input shape maps multiple output shape.
If output_size is None, :math:`H_{out} = H^\prime_{out}, :math:`H_{out} = \
H^\prime_{out}, W_{out} = W^\prime_{out}`; else, the :math:`D_{out}` of the output
size must between :math:`D^\prime_{out}` and :math:`D^\prime_{out} + strides[0]`,
the :math:`H_{out}` of the output size must between :math:`H^\prime_{out}`
and :math:`H^\prime_{out} + strides[1]`, and the :math:`W_{out}` of the output size must
between :math:`W^\prime_{out}` and :math:`W^\prime_{out} + strides[2]`,
conv3d_transpose can compute the kernel size automatically.
Parameters: Parameters:
name_scope(str) : The name for this class. name_scope(str) : The name for this class.
num_filters(int): The number of the filter. It is as same as the output num_filters(int): The number of the filter. It is as same as the output
image channel. image channel.
output_size(int|tuple|None): The output image size. If output size is a output_size(int|tuple, optional): The output image size. If output size is a
tuple, it must contain three integers, (image_D, image_H, image_W). This tuple, it must contain three integers, (image_depth, image_height, image_width). This
parameter only works when filter_size is None. parameter only works when filter_size is None. If output_size and filter_size are
filter_size(int|tuple|None): The filter size. If filter_size is a tuple, specified at the same time, They should follow the formula above. The default value is None.
Output_size and filter_size should not be None at the same time.
filter_size(int|tuple, optional): The filter size. If filter_size is a tuple,
it must contain three integers, (filter_size_D, filter_size_H, filter_size_W). it must contain three integers, (filter_size_D, filter_size_H, filter_size_W).
Otherwise, the filter will be a square. None if use output size to Otherwise, the filter will be a square. None if use output size to
calculate filter_size. calculate filter_size. The default value is None.
padding(int|tuple): The padding size. If padding is a tuple, it must padding(int|tuple, optional): The padding size. The padding argument effectively
contain three integers, (padding_D, padding_H, padding_W). Otherwise, the adds `dilation * (kernel - 1)` amount of zero-padding on both sides of input. If `padding` is a string,
padding_D = padding_H = padding_W = padding. Default: padding = 0. either 'VALID' or 'SAME' supported, which is the padding algorithm. If `padding`
stride(int|tuple): The stride size. If stride is a tuple, it must is a tuple or list, it could be in three forms: `[pad_depth, pad_height, pad_width]` or
contain three integers, (stride_D, stride_H, stride_W). Otherwise, the `[pad_depth_front, pad_depth_back, pad_height_top, pad_height_bottom, pad_width_left, pad_width_right]`,
stride_D = stride_H = stride_W = stride. Default: stride = 1. and when `data_format` is `'NCDHW'`, `padding` can be in the form
dilation(int|tuple): The dilation size. If dilation is a tuple, it must `[[0,0], [0,0], [pad_depth_front, pad_depth_back], [pad_height_top, pad_height_bottom], [pad_width_left, pad_width_right]]`.
when `data_format` is `'NDHWC'`, `padding` can be in the form
`[[0,0], [pad_depth_front, pad_depth_back], [pad_height_top, pad_height_bottom], [pad_width_left, pad_width_right], [0,0]]`.
The default value is 0.
stride(int|tuple, optional): The stride size. It means the stride in transposed convolution.
If stride is a tuple, it must contain three integers, (stride_depth, stride_height,
stride_width). Otherwise, stride_depth = stride_height = stride_width = stride.
The default value is 1.
dilation(int|tuple, optional): The dilation size. If dilation is a tuple, it must
contain three integers, (dilation_D, dilation_H, dilation_W). Otherwise, the contain three integers, (dilation_D, dilation_H, dilation_W). Otherwise, the
dilation_D = dilation_H = dilation_W = dilation. Default: dilation = 1. dilation_D = dilation_H = dilation_W = dilation. The default value is 1.
groups(int): The groups number of the Conv3d transpose layer. Inspired by groups(int, optional): The groups number of the Conv3d transpose layer. Inspired by
grouped convolution in Alex Krizhevsky's Deep CNN paper, in which grouped convolution in Alex Krizhevsky's Deep CNN paper, in which
when group=2, the first half of the filters is only connected to the when group=2, the first half of the filters is only connected to the
first half of the input channels, while the second half of the first half of the input channels, while the second half of the
filters is only connected to the second half of the input channels. filters is only connected to the second half of the input channels.
Default: groups=1 The default value is 1.
param_attr (ParamAttr|None): The parameter attribute for learnable parameters/weights param_attr (ParamAttr, optional): The parameter attribute for learnable parameters/weights
of conv3d_transpose. If it is set to None or one attribute of ParamAttr, conv3d_transpose of conv3d_transpose. If it is set to None or one attribute of ParamAttr, conv3d_transpose
will create ParamAttr as param_attr. If the Initializer of the param_attr will create ParamAttr as param_attr. If the Initializer of the param_attr
is not set, the parameter is initialized with Xavier. Default: None. is not set, the parameter is initialized with Xavier. The default value is None.
bias_attr (ParamAttr|bool|None): The parameter attribute for the bias of conv3d_transpose. bias_attr (ParamAttr|bool, optional): The parameter attribute for the bias of conv3d_transpose.
If it is set to False, no bias will be added to the output units. If it is set to False, no bias will be added to the output units.
If it is set to None or one attribute of ParamAttr, conv3d_transpose If it is set to None or one attribute of ParamAttr, conv3d_transpose
will create ParamAttr as bias_attr. If the Initializer of the bias_attr will create ParamAttr as bias_attr. If the Initializer of the bias_attr
is not set, the bias is initialized zero. Default: None. is not set, the bias is initialized zero. The default value is None.
use_cudnn(bool): Use cudnn kernel or not, it is valid only when the cudnn use_cudnn(bool, optional): Use cudnn kernel or not, it is valid only when the cudnn
library is installed. Default: True library is installed. The default value is True.
act (str): Activation type, if it is set to None, activation is not appended. act (str, optional): Activation type, if it is set to None, activation is not appended.
Default: None. The default value is None.
name(str|None): A name for this layer(optional). If set None, the layer name(str, optional): The default value is None. Normally there is no need for user
will be named automatically. to set this property. For more information, please refer to :ref:`api_guide_Name`.
Attributes: Attribute:
weight (Parameter): the learnable weights of filters of this layer. **weight** (Parameter): the learnable weights of filters of this layer.
bias (Parameter|None): the learnable bias of this layer.
**bias** (Parameter): the learnable bias of this layer.
Returns: Returns:
Variable: The tensor variable storing the convolution transpose result. None.
Raises: Raises:
ValueError: If the shapes of input, filter_size, stride, padding and ValueError: If the shapes of input, filter_size, stride, padding and
...@@ -1568,9 +1597,10 @@ class GRUUnit(layers.Layer): ...@@ -1568,9 +1597,10 @@ class GRUUnit(layers.Layer):
""" """
**GRU unit layer** **GRU unit layer**
if origin_mode is True, then the equation of a gru step is from paper It creates a callable object from GRUUnit class.
If origin_mode is True, then the equation of a gru step is from paper
`Learning Phrase Representations using RNN Encoder-Decoder for Statistical `Learning Phrase Representations using RNN Encoder-Decoder for Statistical
Machine Translation <https://arxiv.org/pdf/1406.1078.pdf>` Machine Translation <https://arxiv.org/pdf/1406.1078.pdf>`_
.. math:: .. math::
u_t & = actGate(xu_{t} + W_u h_{t-1} + b_u) u_t & = actGate(xu_{t} + W_u h_{t-1} + b_u)
...@@ -1581,7 +1611,7 @@ class GRUUnit(layers.Layer): ...@@ -1581,7 +1611,7 @@ class GRUUnit(layers.Layer):
h_t & = dot(u_t, h_{t-1}) + dot((1-u_t), m_t) h_t & = dot(u_t, h_{t-1}) + dot((1-u_t), m_t)
if origin_mode is False, then the equation of a gru step is from paper If origin_mode is False, then the equation of a gru step is from paper
`Empirical Evaluation of Gated Recurrent Neural Networks on Sequence `Empirical Evaluation of Gated Recurrent Neural Networks on Sequence
Modeling <https://arxiv.org/pdf/1412.3555.pdf>`_ Modeling <https://arxiv.org/pdf/1412.3555.pdf>`_
...@@ -1610,39 +1640,46 @@ class GRUUnit(layers.Layer): ...@@ -1610,39 +1640,46 @@ class GRUUnit(layers.Layer):
Parameters: Parameters:
name_scope(str): The name of this class. name_scope(str): The name of this class.
size (int): The input dimension value. size (int): The input dimension value.
param_attr(ParamAttr|None): The parameter attribute for the learnable param_attr(ParamAttr, optional): The parameter attribute for the learnable
hidden-hidden weight matrix. Note: hidden-hidden weight matrix.
**Note**:
1. The shape of the weight matrix is :math:`[T, 3*D]`, where D is the hidden size.
2. All elements in the weight matrix can be divided into two parts. The first
part are weights of the update gate and reset gate with shape :math:`[D, 2*D]`,
and the second part are weights for candidate hidden state with shape :math:`[D, D]`.
- The shape of the weight matrix is :math:`(T \\times 3D)`, where
:math:`D` is the hidden size.
- All elements in the weight matrix can be divided into two parts.
The first part are weights of the update gate and reset gate with
shape :math:`(D \\times 2D)`, and the second part are weights for
candidate hidden state with shape :math:`(D \\times D)`.
If it is set to None or one attribute of ParamAttr, gru_unit will If it is set to None or one attribute of ParamAttr, gru_unit will
create ParamAttr as param_attr. If the Initializer of the param_attr create ParamAttr as param_attr. If the Initializer of the param_attr
is not set, the parameter is initialized with Xavier. Default: None. is not set, the parameter is initialized with Xavier. The default
bias_attr (ParamAttr|bool|None): The parameter attribute for the bias value is None.
of GRU.Note that the bias with :math:`(1 \\times 3D)` concatenates bias_attr (ParamAttr|bool, optional): The parameter attribute for the bias
of GRU.Note that the bias with :math:`[1, 3*D]` concatenates
the bias in the update gate, reset gate and candidate calculations. the bias in the update gate, reset gate and candidate calculations.
If it is set to False, no bias will be applied to the update gate, If it is set to False, no bias will be applied to the update gate,
reset gate and candidate calculations. If it is set to None or one reset gate and candidate calculations. If it is set to None or one
attribute of ParamAttr, gru_unit will create ParamAttr as attribute of ParamAttr, gru_unit will create ParamAttr as
bias_attr. If the Initializer of the bias_attr is not set, the bias bias_attr. If the Initializer of the bias_attr is not set, the bias
is initialized zero. Default: None. is initialized zero. The default value is None.
activation (str): The activation type for cell (actNode). activation (str): The activation type for cell (actNode).
Default: 'tanh' The default value is 'tanh'.
gate_activation (str): The activation type for gates (actGate). gate_activation (str): The activation type for gates (actGate).
Default: 'sigmoid' The default value is 'sigmoid'.
dtype(str): The dtype of the layers. Default: 'float32' dtype(str): The dtype of the layers. The data type can be set as
'float32', 'float64'. The default value is 'float32'.
Attributes: Attribute:
weight (Parameter): the learnable weights of this layer. **weight** (Parameter): the learnable weights of this layer.
bias (Parameter): the learnable bias of this layer.
**bias** (Parameter): the learnable bias of this layer.
Returns: Returns:
tuple: The hidden value, reset-hidden value and gate values. tuple: The hidden value, reset-hidden value and gate values. The hidden value
is a 2-D tensor with shape :math:`[T, D]` . The reset-hidden value is a
2-D tensor with shape :math:`[T, D]` . The gate value is a 2-D tensor with
shape :math:`[T, 3*D]`.
Examples: Examples:
...@@ -1656,6 +1693,7 @@ class GRUUnit(layers.Layer): ...@@ -1656,6 +1693,7 @@ class GRUUnit(layers.Layer):
D = 5 D = 5
T = sum(lod[0]) T = sum(lod[0])
input = numpy.random.rand(T, 3 * D).astype('float32')
hidden_input = numpy.random.rand(T, D).astype('float32') hidden_input = numpy.random.rand(T, D).astype('float32')
with fluid.dygraph.guard(): with fluid.dygraph.guard():
x = numpy.random.random((3, 32, 32)).astype('float32') x = numpy.random.random((3, 32, 32)).astype('float32')
...@@ -2085,22 +2123,24 @@ class BilinearTensorProduct(layers.Layer): ...@@ -2085,22 +2123,24 @@ class BilinearTensorProduct(layers.Layer):
- :math:`y`: the second input contains N elements, shape is [batch_size, N]. - :math:`y`: the second input contains N elements, shape is [batch_size, N].
- :math:`W_{i}`: the i-th learned weight, shape is [M, N] - :math:`W_{i}`: the i-th learned weight, shape is [M, N]
- :math:`out_{i}`: the i-th element of out, shape is [batch_size, size]. - :math:`out_{i}`: the i-th element of out, shape is [batch_size, size].
- :math:`y^\mathrm{T}`: the transpose of :math:`y_{2}`. - :math:`y^\mathrm{T}`: the transpose of :math:`y`.
Parameters: Parameters:
name_scope(str): The name of this class. name_scope(str): The name of this class.
size (int): The dimension of this layer. size (int): The dimension of this layer.
act (str): Activation to be applied to the output of this layer. Default: None. name (str): The default value is None. Normally there is no need for user
name (str): The name of this layer. Default: None. to set this property. For more information, please refer to :ref:`api_guide_Name`.
param_attr (ParamAttr): The parameter attribute for the learnable w. act (str, optional): Activation to be applied to the output of this layer. The default value is None.
parameters/weights of this layer. Default: None. param_attr (ParamAttr, optional): The parameter attribute for the learnable w, parameters/weights of
bias_attr (ParamAttr): The parameter attribute for the bias this layer. The default value is None.
bias_attr (ParamAttr, optional): The parameter attribute for the bias
of this layer. If it is set to False, no bias will be added to the output units. of this layer. If it is set to False, no bias will be added to the output units.
If it is set to None, the bias is initialized zero. Default: None. If it is set to None, the bias is initialized zero. The default value is None.
Attributes: Attribute:
weight (Parameter): the learnable weights of this layer. **weight** (Parameter): the learnable weights of this layer.
bias (Parameter|None): the learnable bias of this layer.
**bias** (Parameter): the learnable bias of this layer.
Returns: Returns:
Variable: A 2-D Tensor of shape [batch_size, size]. Variable: A 2-D Tensor of shape [batch_size, size].
......
...@@ -8937,8 +8937,8 @@ def label_smooth(label, ...@@ -8937,8 +8937,8 @@ def label_smooth(label,
dtype="float32", dtype="float32",
name=None): name=None):
""" """
Label smoothing is a mechanism to regularize the classifier layer and is Label smoothing is a mechanism to regularize the classifier layer and is called
called label-smoothing regularization (LSR). label-smoothing regularization (LSR).
Label smoothing is proposed to encourage the model to be less confident, Label smoothing is proposed to encourage the model to be less confident,
since optimizing the log-likelihood of the correct label directly may since optimizing the log-likelihood of the correct label directly may
...@@ -8957,19 +8957,23 @@ def label_smooth(label, ...@@ -8957,19 +8957,23 @@ def label_smooth(label,
See more details about label smoothing in https://arxiv.org/abs/1512.00567. See more details about label smoothing in https://arxiv.org/abs/1512.00567.
Args: Parameters:
label(Variable): The input variable containing the label data. The label(Variable): The input variable containing the label data. The
label data should use one-hot representation. label data should use one-hot representation. It's
prior_dist(Variable): The prior distribution to be used to smooth a multidimensional tensor with a shape of
:math:`[N_1, ..., Depth]`, where Depth is class number.
prior_dist(Variable, optional): The prior distribution to be used to smooth
labels. If not provided, an uniform distribution labels. If not provided, an uniform distribution
is used. The shape of :attr:`prior_dist` should is used. It's a multidimensional tensor with a shape of
be :math:`(1, class\_num)`. :math:`[1, class\_num]` . The default value is None.
epsilon(float): The weight used to mix up the original ground-truth epsilon(float, optional): The weight used to mix up the original ground-truth
distribution and the fixed distribution. distribution and the fixed distribution. The default value is
dtype(np.dtype|core.VarDesc.VarType|str): The type of data : float32, 0.1.
float_64, int etc. dtype(np.dtype|core.VarDesc.VarType|str, optional): The data type can be set
name(str|None): A name for this layer(optional). If set None, the layer as 'float32', 'float64'. The default value is 'float32'.
will be named automatically. name(str, optional): The default value is None. Normally there is no need for user
to set this property. For more information, please refer to
:ref:`api_guide_Name`.
Returns: Returns:
Variable: The tensor variable containing the smoothed labels. Variable: The tensor variable containing the smoothed labels.
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册