未验证 提交 dadbe454 编写于 作者: X Xin Pan 提交者: GitHub

Merge pull request #11511 from panyx0718/doc2

Add doc for while and piecewise_decay op
...@@ -654,6 +654,29 @@ class WhileGuard(BlockGuard): ...@@ -654,6 +654,29 @@ class WhileGuard(BlockGuard):
class While(object): class While(object):
"""
while loop control flow.
Args:
cond (Variable): condition used to compare.
name (str): The name of this layer.
Examples:
.. code-block:: python
d0 = layers.data("d0", shape=[10], dtype='float32')
data_array = layers.array_write(x=d0, i=i)
array_len = layers.fill_constant(shape=[1],dtype='int64', value=3)
cond = layers.less_than(x=i, y=array_len)
while_op = layers.While(cond=cond)
with while_op.block():
d = layers.array_read(array=data_array, i=i)
i = layers.increment(x=i, in_place=True)
layers.array_write(result, i=i, array=d)
layers.less_than(x=i, y=array_len, cond=cond)
"""
BEFORE_WHILE_BLOCK = 0 BEFORE_WHILE_BLOCK = 0
IN_WHILE_BLOCK = 1 IN_WHILE_BLOCK = 1
AFTER_WHILE_BLOCK = 2 AFTER_WHILE_BLOCK = 2
......
...@@ -209,15 +209,27 @@ def polynomial_decay(learning_rate, ...@@ -209,15 +209,27 @@ def polynomial_decay(learning_rate,
def piecewise_decay(boundaries, values): def piecewise_decay(boundaries, values):
"""Applies piecewise decay to the initial learning rate. """Applies piecewise decay to the initial learning rate.
>>> boundaries = [10000, 20000] The algorithm can be described as the code below.
>>> values = [1.0, 0.5, 0.1]
>>> .. code-block:: python
>>> if step < 10000:
>>> learning_rate = 1.0 boundaries = [10000, 20000]
>>> elif 10000 <= step < 20000: values = [1.0, 0.5, 0.1]
>>> learning_rate = 0.5 if step < 10000:
>>> else: learning_rate = 1.0
>>> learning_rate = 0.1 elif 10000 <= step < 20000:
learning_rate = 0.5
else:
learning_rate = 0.1
Args:
boundaries: A list of steps numbers.
values: A list of learning rate values that will be picked during
different step boundaries.
Returns:
The decayed learning rate.
""" """
if len(values) - len(boundaries) != 1: if len(values) - len(boundaries) != 1:
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册