From d576d6ddebf4bf45997801a1b5bc03b66501bccb Mon Sep 17 00:00:00 2001 From: Chen Long <1300851984@qq.com> Date: Fri, 27 Nov 2020 17:33:30 +0800 Subject: [PATCH] fix some docs test=develop;test=document_fix (#29159) --- .../distributed/fleet/base/util_factory.py | 1 + .../fleet/data_generator/data_generator.py | 5 ++ python/paddle/fluid/layers/nn.py | 49 +++++-------------- 3 files changed, 18 insertions(+), 37 deletions(-) diff --git a/python/paddle/distributed/fleet/base/util_factory.py b/python/paddle/distributed/fleet/base/util_factory.py index bbb7d60ed9..d982f14eaa 100644 --- a/python/paddle/distributed/fleet/base/util_factory.py +++ b/python/paddle/distributed/fleet/base/util_factory.py @@ -116,6 +116,7 @@ class UtilBase(object): Examples: .. code-block:: python + # Save the following code in `train.py` , and then execute the command `fleetrun --server_num 2 --worker_num 2 train.py` . import paddle.distributed.fleet as fleet diff --git a/python/paddle/distributed/fleet/data_generator/data_generator.py b/python/paddle/distributed/fleet/data_generator/data_generator.py index 0b204a270d..669d2ea24a 100644 --- a/python/paddle/distributed/fleet/data_generator/data_generator.py +++ b/python/paddle/distributed/fleet/data_generator/data_generator.py @@ -36,6 +36,7 @@ class DataGenerator(object): Example: .. code-block:: python + import paddle.distributed.fleet.data_generator as dg class MyData(dg.DataGenerator): @@ -62,6 +63,7 @@ class DataGenerator(object): Example: .. code-block:: python + import paddle.distributed.fleet.data_generator as dg class MyData(dg.DataGenerator): @@ -100,6 +102,7 @@ class DataGenerator(object): Example: .. code-block:: python + import paddle.distributed.fleet.data_generator as dg class MyData(dg.DataGenerator): @@ -171,6 +174,7 @@ class DataGenerator(object): Example: .. code-block:: python + import paddle.distributed.fleet.data_generator as dg class MyData(dg.DataGenerator): @@ -202,6 +206,7 @@ class DataGenerator(object): Example: .. code-block:: python + import paddle.distributed.fleet.data_generator as dg class MyData(dg.DataGenerator): diff --git a/python/paddle/fluid/layers/nn.py b/python/paddle/fluid/layers/nn.py index 804e4e6d46..aa709ff4e1 100755 --- a/python/paddle/fluid/layers/nn.py +++ b/python/paddle/fluid/layers/nn.py @@ -2701,13 +2701,13 @@ def batch_norm(input, `is_test = True` can only be used in test program and inference program, `is_test` CANNOT be set to True in train program, if you want to use global status from pre_train model in train program, please set `use_global_stats = True`. Args: - input(Variable): The rank of input variable can be 2, 3, 4, 5. The data type + input(Tensor): The rank of input Tensor can be 2, 3, 4, 5. The data type is float16 or float32 or float64. act(string, Default None): Activation type, linear|relu|prelu|... is_test (bool, Default False): A flag indicating whether it is in test phrase or not. - momentum(float|Variable, Default 0.9): The value used for the moving_mean and - moving_var computation. This should be a float number or a Variable with + momentum(float|Tensor, Default 0.9): The value used for the moving_mean and + moving_var computation. This should be a float number or a Tensor with shape [1] and data type as float32. The updated formula is: :math:`moving\_mean = moving\_mean * momentum + new\_mean * (1. - momentum)` :math:`moving\_var = moving\_var * momentum + new\_var * (1. - momentum)` @@ -2745,48 +2745,23 @@ def batch_norm(input, In train mode, when setting use_global_stats True, the global mean and variance are also used during train period. Returns: - A Variable holding Tensor which is the result after applying batch normalization on the input, + A Tensor which is the result after applying batch normalization on the input, has same shape and data type with input. Examples: .. code-block:: python - import paddle.fluid as fluid - import paddle - paddle.enable_static() - x = fluid.data(name='x', shape=[3, 7, 3, 7], dtype='float32') - hidden1 = fluid.layers.fc(input=x, size=200, param_attr='fc1.w') - hidden2 = fluid.layers.batch_norm(input=hidden1) - - .. code-block:: python - - # batch_norm with momentum as Variable - import paddle.fluid as fluid - import paddle.fluid.layers.learning_rate_scheduler as lr_scheduler import paddle + paddle.enable_static() - - def get_decay_momentum(momentum_init, decay_steps, decay_rate): - global_step = lr_scheduler._decay_step_counter() - momentum = fluid.layers.create_global_var( - shape=[1], - value=float(momentum_init), - dtype='float32', - # set persistable for save checkpoints and resume - persistable=True, - name="momentum") - div_res = global_step / decay_steps - decayed_momentum = momentum_init * (decay_rate**div_res) - fluid.layers.assign(decayed_momentum, momentum) - - return momentum - - x = fluid.data(name='x', shape=[3, 7, 3, 7], dtype='float32') - hidden1 = fluid.layers.fc(input=x, size=200, param_attr='fc1.w') - momentum = get_decay_momentum(0.9, 1e5, 0.9) - hidden2 = fluid.layers.batch_norm(input=hidden1, momentum=momentum) - + x = paddle.static.data(name='x', shape=[3, 7, 3, 7], dtype='float32') + hidden1 = paddle.static.nn.fc(x=x, size=200) + print(hidden1.shape) + # [3, 200] + hidden2 = paddle.static.nn.batch_norm(input=hidden1) + print(hidden2.shape) + # [3, 200] """ assert bias_attr is not False, "bias_attr should not be False in batch_norm." helper = LayerHelper('batch_norm', **locals()) -- GitLab