diff --git a/python/paddle/fluid/layers/control_flow.py b/python/paddle/fluid/layers/control_flow.py index feac42d94e62c171f5eb46fca01197884114c4eb..5354582aaa73c171d0f989e7ac321e51afe8dc30 100644 --- a/python/paddle/fluid/layers/control_flow.py +++ b/python/paddle/fluid/layers/control_flow.py @@ -76,13 +76,13 @@ def split_lod_tensor(input, mask, level=0): Examples: .. code-block:: python - x = layers.data(name='x', shape=[1]) + x = fluid.layers.data(name='x', shape=[1]) x.persistable = True - y = layers.data(name='y', shape=[1]) + y = fluid.layers.data(name='y', shape=[1]) y.persistable = True - out_true, out_false = layers.split_lod_tensor( + out_true, out_false = fluid.layers.split_lod_tensor( input=x, mask=y, level=level) """ @@ -891,7 +891,7 @@ def array_write(x, i, array=None): def create_array(dtype): """ - **Create LoDTensor Array** + **Create LoDTensorArray** This function creates an array of LOD_TENSOR_ARRAY . It is mainly used to implement RNN with array_write, array_read and While. @@ -989,7 +989,8 @@ def array_read(array, i): Returns: Variable: The tensor type variable that has the data written to it. Examples: - .. code-block::python + .. code-block:: python + tmp = fluid.layers.zeros(shape=[10], dtype='int32') i = fluid.layers.fill_constant(shape=[1], dtype='int64', value=10) arr = layers.array_read(tmp, i=i) @@ -1027,7 +1028,7 @@ def shrink_memory(x, i, table): def array_length(array): """ - **Get the length of Input LoDTensorArray** + **Get the Length of Input LoDTensorArray** This function performs the operation to find the length of the input LOD_TENSOR_ARRAY. @@ -1042,12 +1043,13 @@ def array_length(array): Variable: The length of the input LoDTensorArray. Examples: - .. code-block::python + .. code-block:: python tmp = fluid.layers.zeros(shape=[10], dtype='int32') i = fluid.layers.fill_constant(shape=[1], dtype='int64', value=10) arr = fluid.layers.array_write(tmp, i=i) arr_len = fluid.layers.array_length(arr) + """ helper = LayerHelper('array_length', **locals()) tmp = helper.create_tmp_variable(dtype='int64') diff --git a/python/paddle/fluid/layers/learning_rate_scheduler.py b/python/paddle/fluid/layers/learning_rate_scheduler.py index 2e5cff74c1dc1ddb4346c0c5bc6417f5293db14a..2dbc51c23fe940c3c42629eae27323712244c552 100644 --- a/python/paddle/fluid/layers/learning_rate_scheduler.py +++ b/python/paddle/fluid/layers/learning_rate_scheduler.py @@ -163,11 +163,11 @@ def polynomial_decay(learning_rate, power=1.0, cycle=False): """ - **polynomial_decay** + **Polynomial Decay** Applies polynomial decay to the initial learning rate. - .. code-block::python + .. code-block:: python if cycle: decay_steps = decay_steps * ceil(global_step / decay_steps) @@ -180,9 +180,9 @@ def polynomial_decay(learning_rate, learning_rate(Variable|float32): A scalar float32 value or a Variable. This will be the initial learning rate during training decay_steps(int32): A Python `int32` number. - end_learning_rate(float): A Python `float` number. - power(float): A Python `float` number - cycle(bool, Default False): Boolean. If set true, decay the learning rate every decay_steps. + end_learning_rate(float, Default: 0.0001): A Python `float` number. + power(float, Default: 1.0): A Python `float` number + cycle(bool, Default: False): Boolean. If set true, decay the learning rate every decay_steps. Returns: The decayed learning rate diff --git a/python/paddle/fluid/layers/nn.py b/python/paddle/fluid/layers/nn.py index 3f3b7e20efe57c1e4ce07a24afe1ca312cf84577..7c4393c4d94787274314dfc249e6148f271103e8 100644 --- a/python/paddle/fluid/layers/nn.py +++ b/python/paddle/fluid/layers/nn.py @@ -1615,7 +1615,9 @@ def batch_norm(input, Can be used as a normalizer function for conv2d and fully_connected operations. The required data format for this layer is one of the following: + 1. NHWC `[batch, in_height, in_width, in_channels]` + 2. NCHW `[batch, in_channels, in_height, in_width]` Refer to `Batch Normalization: Accelerating Deep Network Training by Reducing