未验证 提交 f82e4d75 编写于 作者: L liuwei1031 提交者: GitHub

improve the doc of paddle.fluid.memory_optimize, test=develop (#17473)

* improve the doc of paddle.fluid.memory_optimize, test=develop

* fix typo, test=develop
上级 32da5e9c
......@@ -27,7 +27,7 @@ paddle.fluid.DistributeTranspiler.get_pserver_programs (ArgSpec(args=['self', 'e
paddle.fluid.DistributeTranspiler.get_startup_program (ArgSpec(args=['self', 'endpoint', 'pserver_program', 'startup_program'], varargs=None, keywords=None, defaults=(None, None)), ('document', 'd796fc0c8d51503b556fcf6dc15c4f0c'))
paddle.fluid.DistributeTranspiler.get_trainer_program (ArgSpec(args=['self', 'wait_port'], varargs=None, keywords=None, defaults=(True,)), ('document', '736330e31a7a54abccc0c7fd9119d9ff'))
paddle.fluid.DistributeTranspiler.transpile (ArgSpec(args=['self', 'trainer_id', 'program', 'pservers', 'trainers', 'sync_mode', 'startup_program', 'current_endpoint'], varargs=None, keywords=None, defaults=(None, '127.0.0.1:6174', 1, True, None, '127.0.0.1:6174')), ('document', '06ce55338dfe96311ad1078235ab3bf4'))
paddle.fluid.memory_optimize (ArgSpec(args=['input_program', 'skip_opt_set', 'print_log', 'level', 'skip_grads'], varargs=None, keywords=None, defaults=(None, False, 0, False)), ('document', '3f11d536c8039c7b5fd57970078c344b'))
paddle.fluid.memory_optimize (ArgSpec(args=['input_program', 'skip_opt_set', 'print_log', 'level', 'skip_grads'], varargs=None, keywords=None, defaults=(None, False, 0, False)), ('document', '97fdf61cf1ed4fb8f6a4b58aebce26a2'))
paddle.fluid.release_memory (ArgSpec(args=['input_program', 'skip_opt_set'], varargs=None, keywords=None, defaults=(None,)), ('document', 'b524b73e10f9ccdfa6d189e2e535fc17'))
paddle.fluid.DistributeTranspilerConfig.__init__
paddle.fluid.ParallelExecutor.__init__ (ArgSpec(args=['self', 'use_cuda', 'loss_name', 'main_program', 'share_vars_from', 'exec_strategy', 'build_strategy', 'num_trainers', 'trainer_id', 'scope'], varargs=None, keywords=None, defaults=(None, None, None, None, None, 1, 0, None)), ('document', '6adf97f83acf6453d4a6a4b1070f3754'))
......@@ -428,7 +428,7 @@ paddle.fluid.transpiler.DistributeTranspiler.get_pserver_programs (ArgSpec(args=
paddle.fluid.transpiler.DistributeTranspiler.get_startup_program (ArgSpec(args=['self', 'endpoint', 'pserver_program', 'startup_program'], varargs=None, keywords=None, defaults=(None, None)), ('document', 'd796fc0c8d51503b556fcf6dc15c4f0c'))
paddle.fluid.transpiler.DistributeTranspiler.get_trainer_program (ArgSpec(args=['self', 'wait_port'], varargs=None, keywords=None, defaults=(True,)), ('document', '736330e31a7a54abccc0c7fd9119d9ff'))
paddle.fluid.transpiler.DistributeTranspiler.transpile (ArgSpec(args=['self', 'trainer_id', 'program', 'pservers', 'trainers', 'sync_mode', 'startup_program', 'current_endpoint'], varargs=None, keywords=None, defaults=(None, '127.0.0.1:6174', 1, True, None, '127.0.0.1:6174')), ('document', '06ce55338dfe96311ad1078235ab3bf4'))
paddle.fluid.transpiler.memory_optimize (ArgSpec(args=['input_program', 'skip_opt_set', 'print_log', 'level', 'skip_grads'], varargs=None, keywords=None, defaults=(None, False, 0, False)), ('document', '3f11d536c8039c7b5fd57970078c344b'))
paddle.fluid.transpiler.memory_optimize (ArgSpec(args=['input_program', 'skip_opt_set', 'print_log', 'level', 'skip_grads'], varargs=None, keywords=None, defaults=(None, False, 0, False)), ('document', '97fdf61cf1ed4fb8f6a4b58aebce26a2'))
paddle.fluid.transpiler.release_memory (ArgSpec(args=['input_program', 'skip_opt_set'], varargs=None, keywords=None, defaults=(None,)), ('document', 'b524b73e10f9ccdfa6d189e2e535fc17'))
paddle.fluid.transpiler.HashName.__init__ (ArgSpec(args=['self', 'pserver_endpoints'], varargs=None, keywords=None, defaults=None), ('document', '6adf97f83acf6453d4a6a4b1070f3754'))
paddle.fluid.transpiler.HashName.dispatch (ArgSpec(args=['self', 'varlist'], varargs=None, keywords=None, defaults=None), ('document', '6adf97f83acf6453d4a6a4b1070f3754'))
......
......@@ -498,28 +498,54 @@ def memory_optimize(input_program,
print_log=False,
level=0,
skip_grads=False):
"""Optimize memory by reusing var memory.
Note: it doesn't not support subblock nested in subblock.
"""
| Legacy memory optimization strategy, reduce total memory consumption by reuse variable memory between different operators.
| Simple sample to explain the algorithm:
.. code-block:: python
c = a + b # assume this is the last time a is used
d = b * c
| since **a** will not be used anymore after **"c = a + b"**, and the size of **a** and **d** are the same,
we can use variable **a** to replace variable **d**, so actually we can optimize the above code to below:
.. code-block:: python
c = a + b
a = b * c
| Please notice that, in this legacy design, we are using variable **a** to replace **d** directly, which means
after you call this API, some variables may disappear, and some variables may hold unexpected values, like
the above case, actually **a** holds the value of **d** after execution.
| So to protect important variables from being reused/removed in the optimization, we provide skip_opt_set
to allow you specify a variable whitelist.
The variables in the skip_opt_set will not be affected by memory_optimize API.
Note:
| **This API is deprecated, please avoid to use it in your new code.**
| Does not support operators which will create sub-block like While, IfElse etc.
Args:
input_program(str): Input Program
skip_opt_set(set): vars wil be skipped in memory optimze
print_log(bool): whether to print debug log.
level(int): If level=0, reuse if the shape is completely equal, o
level(int): 0 or 1, 0 means we replace a with b only when a.size == b.size, 1 means we can replace a with b if a.size <= b.size
Returns:
None
Examples:
.. code-block:: python
import paddle.fluid as fluid
# build network
# ...
# deprecated API
fluid.release_memory(fluid.default_main_program())
"""
sys.stderr.write('memory_optimize is deprecated. '
'Use CompiledProgram and Executor\n')
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册