From 199da9689c5c7fddd956375cc5ee4f6e32068658 Mon Sep 17 00:00:00 2001 From: Chen Weihang Date: Tue, 29 Sep 2020 13:35:58 +0800 Subject: [PATCH] Polish api Program/CompiledProgram/ParallelEnv doc & code example (#27656) * polish Program api doc & example * polish CompiledProgram api doc & example * polish ParallelEnv api doc & examples * polish details, test=document_fix * polish program doc details, test=document_fix * polish details, test=document_fix * fix note format error, test=document_fix * add lost example, test=document_fix * fix lost example, test=document_fix --- python/paddle/fluid/compiler.py | 143 ++++++----- python/paddle/fluid/dygraph/parallel.py | 78 +++--- python/paddle/fluid/framework.py | 318 +++++++++++++----------- 3 files changed, 280 insertions(+), 259 deletions(-) diff --git a/python/paddle/fluid/compiler.py b/python/paddle/fluid/compiler.py index 3923620379d..31cacf075b7 100644 --- a/python/paddle/fluid/compiler.py +++ b/python/paddle/fluid/compiler.py @@ -93,16 +93,16 @@ class CompiledProgram(object): for example, the operators' fusion in the computation graph, memory optimization during the execution of the computation graph, etc. For more information about build_strategy, please refer to - :code:`fluid.BuildStrategy`. + :code:`paddle.static.BuildStrategy`. Args: - program_or_graph (Graph|Program): This parameter is the Program or Graph + program_or_graph (Graph|Program): This argument is the Program or Graph being executed. - build_strategy(BuildStrategy): This parameter is used to compile the + build_strategy(BuildStrategy): This argument is used to compile the program or graph with the specified options, such as operators' fusion in the computational graph and memory optimization during the execution of the computational graph. For more information about build_strategy, - please refer to :code:`fluid.BuildStrategy`. The default is None. + please refer to :code:`paddle.static.BuildStrategy`. The default is None. Returns: CompiledProgram @@ -110,25 +110,28 @@ class CompiledProgram(object): Example: .. code-block:: python - import paddle.fluid as fluid - import numpy + import numpy + import paddle + import paddle.static as static - place = fluid.CUDAPlace(0) # fluid.CPUPlace() - exe = fluid.Executor(place) + paddle.enable_static() - data = fluid.data(name='X', shape=[None, 1], dtype='float32') - hidden = fluid.layers.fc(input=data, size=10) - loss = fluid.layers.mean(hidden) - fluid.optimizer.SGD(learning_rate=0.01).minimize(loss) + place = paddle.CUDAPlace(0) # paddle.CPUPlace() + exe = static.Executor(place) - exe.run(fluid.default_startup_program()) - compiled_prog = fluid.CompiledProgram( - fluid.default_main_program()) + data = static.data(name='X', shape=[None, 1], dtype='float32') + hidden = static.nn.fc(input=data, size=10) + loss = paddle.mean(hidden) + paddle.optimizer.SGD(learning_rate=0.01).minimize(loss) - x = numpy.random.random(size=(10, 1)).astype('float32') - loss_data, = exe.run(compiled_prog, - feed={"X": x}, - fetch_list=[loss.name]) + exe.run(static.default_startup_program()) + compiled_prog = static.CompiledProgram( + static.default_main_program()) + + x = numpy.random.random(size=(10, 1)).astype('float32') + loss_data, = exe.run(compiled_prog, + feed={"X": x}, + fetch_list=[loss.name]) """ def __init__(self, program_or_graph, build_strategy=None): @@ -169,13 +172,16 @@ class CompiledProgram(object): exec_strategy to set some optimizations that can be applied during the construction and computation of the Graph, such as reducing the number of AllReduce operations, specifying the size of the thread pool used in the computation Graph running the model, - and so on. **Note: If build_strategy is specified when building CompiledProgram and calling - with_data_parallel, build_strategy in CompiledProgram will be overwritten, therefore, - if it is data parallel training, it is recommended to set build_strategy when calling - with_data_parallel interface.** + and so on. + + .. note:: + If build_strategy is specified when building CompiledProgram and calling + with_data_parallel, build_strategy in CompiledProgram will be overwritten, therefore, + if it is data parallel training, it is recommended to set build_strategy when calling + with_data_parallel interface. Args: - loss_name (str): This parameter is the name of the loss variable of the model. + loss_name (str): This parameter is the name of the loss Tensor of the model. **Note: If it is model training, you must set loss_name, otherwise the result may be problematic**. The default is None. build_strategy(BuildStrategy): This parameter is used to compile the @@ -192,7 +198,7 @@ class CompiledProgram(object): specified by share_vars_from. This parameter needs to be set when model testing is required during model training, and the data parallel mode is used for training and testing. Since CompiledProgram will only distribute parameter - variables to other devices when it is first executed, the CompiledProgram + Tensors to other devices when it is first executed, the CompiledProgram specified by share_vars_from must be run before the current CompiledProgram. The default is None. places(list(CUDAPlace)|list(CPUPlace)|None): This parameter specifies the device @@ -214,50 +220,53 @@ class CompiledProgram(object): Example: .. code-block:: python - import paddle.fluid as fluid - import numpy - import os - - use_cuda = True - place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace() - parallel_places = [fluid.CUDAPlace(0), fluid.CUDAPlace(1)] if use_cuda else [fluid.CPUPlace()] * 2 - - # NOTE: If you use CPU to run the program, you need - # to specify the CPU_NUM, otherwise, fluid will use - # all the number of the logic core as the CPU_NUM, - # in that case, the batch size of the input should be - # greater than CPU_NUM, if not, the process will be - # failed by an exception. - if not use_cuda: - os.environ['CPU_NUM'] = str(2) - - exe = fluid.Executor(place) - - data = fluid.data(name='X', shape=[None, 1], dtype='float32') - hidden = fluid.layers.fc(input=data, size=10) - loss = fluid.layers.mean(hidden) - - test_program = fluid.default_main_program().clone(for_test=True) - fluid.optimizer.SGD(learning_rate=0.01).minimize(loss) - - exe.run(fluid.default_startup_program()) - compiled_train_prog = fluid.CompiledProgram( - fluid.default_main_program()).with_data_parallel( - loss_name=loss.name, places=parallel_places) - # NOTE: if not set share_vars_from=compiled_train_prog, - # the parameters used in test process are different with - # the parameters used by train process - compiled_test_prog = fluid.CompiledProgram( - test_program).with_data_parallel( - share_vars_from=compiled_train_prog, - places=parallel_places) - - train_data = numpy.random.random(size=(10, 1)).astype('float32') - loss_data, = exe.run(compiled_train_prog, + import numpy + import os + import paddle + import paddle.static as static + + paddle.enable_static() + + use_cuda = True + place = paddle.CUDAPlace(0) if use_cuda else paddle.CPUPlace() + parallel_places = [paddle.CUDAPlace(0), paddle.CUDAPlace(1)] if use_cuda else [paddle.CPUPlace()] * 2 + + # NOTE: If you use CPU to run the program, you need + # to specify the CPU_NUM, otherwise, paddle will use + # all the number of the logic core as the CPU_NUM, + # in that case, the batch size of the input should be + # greater than CPU_NUM, if not, the process will be + # failed by an exception. + if not use_cuda: + os.environ['CPU_NUM'] = str(2) + + exe = static.Executor(place) + + data = static.data(name='X', shape=[None, 1], dtype='float32') + hidden = static.nn.fc(input=data, size=10) + loss = paddle.mean(hidden) + + test_program = static.default_main_program().clone(for_test=True) + paddle.optimizer.SGD(learning_rate=0.01).minimize(loss) + + exe.run(static.default_startup_program()) + compiled_train_prog = static.CompiledProgram( + static.default_main_program()).with_data_parallel( + loss_name=loss.name, places=parallel_places) + # NOTE: if not set share_vars_from=compiled_train_prog, + # the parameters used in test process are different with + # the parameters used by train process + compiled_test_prog = static.CompiledProgram( + test_program).with_data_parallel( + share_vars_from=compiled_train_prog, + places=parallel_places) + + train_data = numpy.random.random(size=(10, 1)).astype('float32') + loss_data, = exe.run(compiled_train_prog, feed={"X": train_data}, fetch_list=[loss.name]) - test_data = numpy.random.random(size=(10, 1)).astype('float32') - loss_data, = exe.run(compiled_test_prog, + test_data = numpy.random.random(size=(10, 1)).astype('float32') + loss_data, = exe.run(compiled_test_prog, feed={"X": test_data}, fetch_list=[loss.name]) """ diff --git a/python/paddle/fluid/dygraph/parallel.py b/python/paddle/fluid/dygraph/parallel.py index 344ae55ee9d..30918113be1 100644 --- a/python/paddle/fluid/dygraph/parallel.py +++ b/python/paddle/fluid/dygraph/parallel.py @@ -61,60 +61,44 @@ def prepare_context(strategy=None): class ParallelEnv(object): """ - **Notes**: - **The old class name was Env and will be deprecated. Please use new class name ParallelEnv.** + .. note:: + This API is not recommended, if you need to get rank and world_size, + it is recommended to use ``paddle.distributed.get_rank()`` and + ``paddle.distributed.get_world_size()`` . This class is used to obtain the environment variables required for - the parallel execution of dynamic graph model. + the parallel execution of ``paddle.nn.Layer`` in dynamic mode. - The dynamic graph parallel mode needs to be started using paddle.distributed.launch. - By default, the related environment variable is automatically configured by this module. - - This class is generally used in with `fluid.dygraph.DataParallel` to configure dynamic graph models - to run in parallel. + The parallel execution in dynamic mode needs to be started using ``paddle.distributed.launch`` + or ``paddle.distributed.spawn`` . Examples: .. code-block:: python - # This example needs to run with paddle.distributed.launch, The usage is: - # python -m paddle.distributed.launch --selected_gpus=0,1 example.py - # And the content of `example.py` is the code of following example. - - import numpy as np - import paddle.fluid as fluid - import paddle.fluid.dygraph as dygraph - from paddle.fluid.optimizer import AdamOptimizer - from paddle.fluid.dygraph.nn import Linear - from paddle.fluid.dygraph.base import to_variable - - place = fluid.CUDAPlace(fluid.dygraph.ParallelEnv().dev_id) - with fluid.dygraph.guard(place=place): - - # prepare the data parallel context - strategy=dygraph.prepare_context() - - linear = Linear(1, 10, act="softmax") - adam = fluid.optimizer.AdamOptimizer() - - # make the module become the data parallelism module - linear = dygraph.DataParallel(linear, strategy) - - x_data = np.random.random(size=[10, 1]).astype(np.float32) - data = to_variable(x_data) - - hidden = linear(data) - avg_loss = fluid.layers.mean(hidden) - - # scale the loss according to the number of trainers. - avg_loss = linear.scale_loss(avg_loss) - - avg_loss.backward() - - # collect the gradients of trainers. - linear.apply_collective_grads() - - adam.minimize(avg_loss) - linear.clear_gradients() + import paddle + import paddle.distributed as dist + + def train(): + # 1. initialize parallel environment + dist.init_parallel_env() + + # 2. get current ParallelEnv + parallel_env = dist.ParallelEnv() + print("rank: ", parallel_env.rank) + print("world_size: ", parallel_env.world_size) + + # print result in process 1: + # rank: 1 + # world_size: 2 + # print result in process 2: + # rank: 2 + # world_size: 2 + + if __name__ == '__main__': + # 1. start by ``paddle.distributed.spawn`` (default) + dist.spawn(train, nprocs=2) + # 2. start by ``paddle.distributed.launch`` + # train() """ def __init__(self): diff --git a/python/paddle/fluid/framework.py b/python/paddle/fluid/framework.py index efd15c193ac..61ffb60b110 100644 --- a/python/paddle/fluid/framework.py +++ b/python/paddle/fluid/framework.py @@ -3951,7 +3951,7 @@ class IrGraph(object): class Program(object): """ Create Python Program. It has at least one :ref:`api_guide_Block_en`, when the - control flow op like conditional_block, while :ref:`api_fluid_layers_While` is included, + control flow op like conditional_block, while :ref:`api_paddle_fluid_layers_While` is included, it will contain nested block. Please reference the @@ -3968,9 +3968,9 @@ class Program(object): backward ops and vars. **Notes**: - **we have** :ref:`api_fluid_default_startup_program` **and** :ref:`api_fluid_default_main_program` - **by default, a pair of them will shared the parameters. The** :ref:`api_fluid_default_startup_program` **only run once to initialize parameters,** - :ref:`api_fluid_default_main_program` **run in every mini batch and adjust the weights.** + **we have** :ref:`api_paddle_fluid_framework_default_startup_program` **and** :ref:`api_paddle_fluid_framework_default_main_program` + **by default, a pair of them will shared the parameters. The** :ref:`api_paddle_fluid_framework_default_startup_program` **only run once to initialize parameters,** + :ref:`api_paddle_fluid_framework_default_main_program` **run in every mini batch and adjust the weights.** Returns: Program: An empty Program. @@ -3978,14 +3978,17 @@ class Program(object): Examples: .. code-block:: python - import paddle.fluid as fluid + import paddle + import paddle.static as static - main_program = fluid.Program() - startup_program = fluid.Program() - with fluid.program_guard(main_program=main_program, startup_program=startup_program): - x = fluid.layers.data(name="x", shape=[-1, 784], dtype='float32') - y = fluid.layers.data(name="y", shape=[-1, 1], dtype='int32') - z = fluid.layers.fc(name="fc", input=x, size=10, act="relu") + paddle.enable_static() + + main_program = static.Program() + startup_program = static.Program() + with static.program_guard(main_program=main_program, startup_program=startup_program): + x = static.data(name="x", shape=[-1, 784], dtype='float32') + y = static.data(name="y", shape=[-1, 1], dtype='int32') + z = static.nn.fc(name="fc", input=x, size=10, act="relu") print("main program is: {}".format(main_program)) print("start up program is: {}".format(startup_program)) @@ -4053,15 +4056,18 @@ class Program(object): Examples: .. code-block:: python - import paddle.fluid as fluid + import paddle + import paddle.static as static - prog = fluid.default_main_program() + paddle.enable_static() + + prog = static.default_main_program() print(prog.random_seed) ## 0 ## the default random seed is 0 prog.global_seed(102) - prog1 = fluid.default_main_program() + prog1 = static.default_main_program() print(prog1.random_seed) ## 102 ## the random seed is 102 @@ -4254,11 +4260,14 @@ class Program(object): Examples: .. code-block:: python - import paddle.fluid as fluid + import paddle + import paddle.static as static - prog = fluid.default_main_program() - x = fluid.layers.data(name="X", shape=[2,3], dtype="float32", append_batch_size=False) - pred = fluid.layers.fc(x, size=3) + paddle.enable_static() + + prog = static.default_main_program() + x = static.data(name="X", shape=[2,3], dtype="float32") + pred = static.nn.fc(x, size=3) prog_string = prog.to_string(throw_on_error=True, with_details=False) prog_string_with_details = prog.to_string(throw_on_error=False, with_details=True) print("program string without detail: {}".format(prog_string)) @@ -4299,17 +4308,15 @@ class Program(object): def clone(self, for_test=False): """ - **Notes**: - **1.** :code:`Program.clone()` **method DOES NOT clone** :ref:`api_fluid_io_DataLoader` . - - **2. Recommend you to use** :code:`clone` **before using** :code:`Opimizer.minimize`. - - **3. This API has no effect in Dygraph Mode** + .. note::: + 1. :code:`Program.clone()` method DOES NOT clone :ref:`api_paddle_io_DataLoader` . + 2. Recommend you to use :code:`clone` before using :code:`Opimizer.minimize` . + 3. This API has no effect in Dygraph Mode. Create a new Program with forward content of original one when ``for_test=True``. Create a new Program as same as the original one when ``for_test=False``. - Some operators, e.g., :ref:`api_fluid_layers_batch_norm` , behave differently between + Some operators, e.g., :ref:`api_paddle_fluid_layers_batch_norm` , behave differently between training and testing. They have an attribute, :code:`is_test`, to control this behaviour. This method will change the :code:`is_test` attribute of them to :code:`True` when :code:`for_test=True`. @@ -4323,13 +4330,17 @@ class Program(object): For Example: :: - import paddle.fluid as fluid - img = fluid.layers.data(name='image', shape=[784]) - pred = fluid.layers.fc(input=img, size=10, act='relu') - loss = fluid.layers.mean(pred) + import paddle + import paddle.static as static + + paddle.enable_static() + + img = static.data(name='image', shape=[None, 784]) + pred = static.nn.fc(input=img, size=10, act='relu') + loss = paddle.mean(pred) # Here we use clone before Momentum - test_program = fluid.default_main_program().clone(for_test=True) - optimizer = fluid.optimizer.Momentum(learning_rate=0.01, momentum=0.9) + test_program = static.default_main_program().clone(for_test=True) + optimizer = paddle.optimizer.Momentum(learning_rate=0.01, momentum=0.9) optimizer.minimize(loss) Args: @@ -4343,14 +4354,15 @@ class Program(object): Examples: - **Notes: The Program's order maybe different after** :code:`clone` **and - this will not affect your training or testing progress. In the following - example we give you an simple method** :code:`print_prog(program)` **to - print Program Descs inorder to make sure you have same print result - after** :code:`clone`: + .. note:: + The Program's order maybe different after :code:`clone` and + this will not affect your training or testing progress. In the following + example we give you an simple method :code:`print_prog(program)` to + print Program Descs inorder to make sure you have same print result + after :code:`clone`: + .. code-block:: python - import paddle.fluid as fluid import six def print_prog(prog): @@ -4365,11 +4377,16 @@ class Program(object): print(" [ attrs: {}: {} ]".format(key, value)) - 1. To clone a test program, the sample code is: + 1. To clone a test program, the sample code is: .. code-block:: python - import paddle.fluid as fluid import six + import paddle + import paddle.static as static + import paddle.utils as utils + import paddle.nn.functional as F + + paddle.enable_static() def print_prog(prog): for name, value in sorted(six.iteritems(prog.block(0).vars)): @@ -4382,20 +4399,20 @@ class Program(object): if key not in ['op_callstack', 'op_role_var']: print(" [ attrs: {}: {} ]".format(key, value)) - train_program = fluid.Program() - startup_program = fluid.Program() + train_program = static.Program() + startup_program = static.Program() # startup_program is used to do some parameter init work, # and main program is used to hold the network - with fluid.program_guard(train_program, startup_program): - with fluid.unique_name.guard(): - img = fluid.layers.data(name='image', shape=[784]) - hidden = fluid.layers.fc(input=img, size=200, act='relu') - hidden = fluid.layers.dropout(hidden, dropout_prob=0.5) - loss = fluid.layers.cross_entropy( - input=fluid.layers.fc(hidden, size=10, act='softmax'), - label=fluid.layers.data(name='label', shape=[1], dtype='int64')) - avg_loss = fluid.layers.mean(loss) + with static.program_guard(train_program, startup_program): + with utils.unique_name.guard(): + img = static.data(name='image', shape=[None, 784]) + hidden = static.nn.fc(input=img, size=200, act='relu') + hidden = F.dropout(hidden, p=0.5) + loss = F.cross_entropy( + input=static.nn.fc(hidden, size=10, act='softmax'), + label=static.data(name='label', shape=[1], dtype='int64')) + avg_loss = paddle.mean(loss) test_program = train_program.clone(for_test=True) print_prog(test_program) @@ -4407,17 +4424,22 @@ class Program(object): # that's why we need to use startup program of train. And for startup program of test, it has nothing, # since it is a new program. - with fluid.program_guard(train_program, startup_program): - with fluid.unique_name.guard(): - sgd = fluid.optimizer.SGD(learning_rate=1e-3) + with static.program_guard(train_program, startup_program): + with utils.unique_name.guard(): + sgd = paddle.optimizer.SGD(learning_rate=1e-3) sgd.minimize(avg_loss) - 2. The clone method can be avoid if you create program for training and program for testing individually. + 2. The clone method can be avoid if you create program for training and program for testing individually. .. code-block:: python - import paddle.fluid as fluid import six + import paddle + import paddle.static as static + import paddle.utils as utils + import paddle.nn.functional as F + + paddle.enable_static() def print_prog(prog): for name, value in sorted(six.iteritems(prog.block(0).vars)): @@ -4429,32 +4451,32 @@ class Program(object): for key, value in sorted(six.iteritems(op.all_attrs())): if key not in ['op_callstack', 'op_role_var']: print(" [ attrs: {}: {} ]".format(key, value)) - + def network(): - img = fluid.layers.data(name='image', shape=[784]) - hidden = fluid.layers.fc(input=img, size=200, act='relu') - hidden = fluid.layers.dropout(hidden, dropout_prob=0.5) - loss = fluid.layers.cross_entropy( - input=fluid.layers.fc(hidden, size=10, act='softmax'), - label=fluid.layers.data(name='label', shape=[1], dtype='int64')) - avg_loss = fluid.layers.mean(loss) + img = static.data(name='image', shape=[None, 784]) + hidden = static.nn.fc(input=img, size=200, act='relu') + hidden = F.dropout(hidden, p=0.5) + loss = F.cross_entropy( + input=static.nn.fc(hidden, size=10, act='softmax'), + label=static.data(name='label', shape=[1], dtype='int64')) + avg_loss = paddle.mean(loss) return avg_loss - train_program_2 = fluid.Program() - startup_program_2 = fluid.Program() - test_program_2 = fluid.Program() - with fluid.program_guard(train_program_2, startup_program_2): - with fluid.unique_name.guard(): + train_program_2 = static.Program() + startup_program_2 = static.Program() + test_program_2 = static.Program() + with static.program_guard(train_program_2, startup_program_2): + with utils.unique_name.guard(): avg_loss = network() - sgd = fluid.optimizer.SGD(learning_rate=1e-3) + sgd = paddle.optimizer.SGD(learning_rate=1e-3) sgd.minimize(avg_loss) # the test startup program is not used. - with fluid.program_guard(test_program_2, startup_program_2): - with fluid.unique_name.guard(): + with static.program_guard(test_program_2, startup_program_2): + with utils.unique_name.guard(): avg_loss = network() print_prog(test_program_2) - The two code snippets above will generate and print same programs. + The two code snippets above will generate and print same programs. """ #NOTE(zhiqiu): we sync the original program first, since its program may diff with @@ -4661,10 +4683,9 @@ class Program(object): @staticmethod def parse_from_string(binary_str): """ - **Notes**: - **1. All information about parameters will be lost after serialization** - - **2. This API has no effect in Dygraph mode** + .. note:: + 1. All information about parameters will be lost after serialization; + 2. This API has no effect in Dygraph mode. Deserialize a Program from `protobuf `_ binary string. This method always use to save and load model @@ -4679,23 +4700,24 @@ class Program(object): Examples: .. code-block:: python - import paddle.fluid as fluid + import paddle + import paddle.static as static + + paddle.enable_static() - startup_prog = fluid.Program() - main_prog = fluid.Program() - with fluid.program_guard(startup_prog, main_prog): - x = fluid.layers.data( - name='X', shape=[1000, 784], dtype='float32', append_batch_size=False) + startup_prog = static.Program() + main_prog = static.Program() + with static.program_guard(startup_prog, main_prog): + x = static.data(name='X', shape=[1000, 784], dtype='float32') - y = fluid.layers.data( - name='Y', shape=[784, 100], dtype='float32', append_batch_size=False) + y = static.data(name='Y', shape=[784, 100], dtype='float32') - z = fluid.layers.mul(x=x, y=y) + z = paddle.matmul(x=x, y=y) - binary_str = fluid.default_main_program().desc.serialize_to_string() - prog_restored = fluid.default_main_program().parse_from_string(binary_str) + binary_str = static.default_main_program().desc.serialize_to_string() + prog_restored = static.default_main_program().parse_from_string(binary_str) - print(fluid.default_main_program()) + print(static.default_main_program()) print(prog_restored) """ p = Program() @@ -4727,7 +4749,8 @@ class Program(object): The default random seed for random operators in Program. ``0`` means get the random seed from random device. - **Notes: It must be set before the operators have been added.** + .. note:: + It must be set before the operators have been added. Returns: int64: Random seed in current Program @@ -4736,18 +4759,22 @@ class Program(object): Examples: .. code-block:: python - import paddle.fluid as fluid + import paddle + import paddle.static as static + import paddle.nn.functional as F - prog = fluid.default_main_program() + paddle.enable_static() + + prog = static.default_main_program() random_seed = prog.random_seed - x_var = fluid.layers.data(name="X", shape=[3,3], dtype="float32", append_batch_size=False) + x_var = static.data(name="X", shape=[3,3], dtype="float32") print(random_seed) ## 0 ## the default random seed is 0 # Here we need to set random seed before we use fluid.layers.dropout prog.random_seed = 1 - z_var = fluid.layers.dropout(x_var, 0.7) + z_var = F.dropout(x_var, 0.7) print(prog.random_seed) ## 1 @@ -4760,7 +4787,8 @@ class Program(object): """ The number of :ref:`api_guide_Block_en` in this Program. - **Notes: This API has no effect in Dygraph mode** + .. note:: + This API has no effect in Dygraph mode. Returns: int(Platform-dependent size): num of :ref:`api_guide_Block_en` in current Program @@ -4769,13 +4797,17 @@ class Program(object): Examples: .. code-block:: python - import paddle.fluid as fluid + import paddle + import paddle.static as static - prog = fluid.default_main_program() + paddle.enable_static() + + prog = static.default_main_program() num_blocks = prog.num_blocks print(num_blocks) - + # print result: + # 1 """ return self.desc.num_blocks() @@ -4792,8 +4824,8 @@ class Program(object): def global_block(self): """ - **Notes**: - **This API has no effect in Dygraph mode** + .. note:: + This API has no effect in Dygraph mode. Get the first :ref:`api_guide_Block_en` of this Program. @@ -4804,9 +4836,12 @@ class Program(object): Examples: .. code-block:: python - import paddle.fluid as fluid + import paddle + import paddle.static as static - prog = fluid.default_main_program() + paddle.enable_static() + + prog = static.default_main_program() gb_block = prog.global_block() print(gb_block) @@ -4815,8 +4850,8 @@ class Program(object): def block(self, index): """ - **Notes**: - **This API has no effect in Dygraph mode** + .. note:: + This API has no effect in Dygraph mode. Get the :code:`index` :ref:`api_guide_Block_en` of this Program @@ -4829,9 +4864,12 @@ class Program(object): Examples: .. code-block:: python - import paddle.fluid as fluid + import paddle + import paddle.static as static - prog = fluid.default_main_program() + paddle.enable_static() + + prog = static.default_main_program() block_0 = prog.block(0) print(block_0) """ @@ -4839,8 +4877,8 @@ class Program(object): def current_block(self): """ - **Notes**: - **This API has no effect in Dygraph mode** + .. note:: + This API has no effect in Dygraph mode. Get the current :ref:`api_guide_Block_en` . The :code:`current` :ref:`api_guide_Block_en` is the :ref:`api_guide_Block_en` to append operators. @@ -4851,9 +4889,12 @@ class Program(object): Examples: .. code-block:: python - import paddle.fluid as fluid + import paddle + import paddle.static as static - prog = fluid.default_main_program() + paddle.enable_static() + + prog = static.default_main_program() current_blk = prog.current_block() print(current_blk) """ @@ -4987,21 +5028,27 @@ class Program(object): def list_vars(self): """ - Get all :ref:`api_guide_Variable_en` from this Program. A iterable object is returned. + Get all Tensors from this Program. A iterable object is returned. Returns: - iterable :ref:`api_guide_Variable_en`: The Generator will yield every variable in this program. + iterable Tensors: The Generator will yield every Tensor in this program. Examples: .. code-block:: python - import paddle.fluid as fluid + import paddle + import paddle.static as static - prog = fluid.default_main_program() - img = fluid.layers.data(name='img', shape=[1,28,28], dtype='float32') - label = fluid.layers.data(name='label', shape=[128,1], dtype='int64') + paddle.enable_static() + + prog = static.default_main_program() + img = static.data(name='img', shape=[None, 1,28,28], dtype='float32') + label = static.data(name='label', shape=[None,1], dtype='int64') for var in prog.list_vars(): print(var) + + # var img : fluid.VarType.LOD_TENSOR.shape(-1, 1, 28, 28).astype(VarType.FP32) + # var label : fluid.VarType.LOD_TENSOR.shape(-1, 1).astype(VarType.INT64) """ for each_block in self.blocks: for each_var in list(each_block.vars.values()): @@ -5017,13 +5064,16 @@ class Program(object): Examples: .. code-block:: python - import paddle.fluid as fluid + import paddle + import paddle.static as static + + paddle.enable_static() - program = fluid.default_main_program() - data = fluid.data(name='x', shape=[None, 13], dtype='float32') - hidden = fluid.layers.fc(input=data, size=10) - loss = fluid.layers.mean(hidden) - fluid.optimizer.SGD(learning_rate=0.01).minimize(loss) + program = static.default_main_program() + data = static.data(name='x', shape=[None, 13], dtype='float32') + hidden = static.nn.fc(input=data, size=10) + loss = paddle.mean(hidden) + paddle.optimizer.SGD(learning_rate=0.01).minimize(loss) for param in program.all_parameters(): print(param) @@ -5031,30 +5081,8 @@ class Program(object): # Here will print all parameters in current program, in this example, # the result is like: # - # name: "fc_0.w_0" - # type { - # type: LOD_TENSOR - # lod_tensor { - # tensor { - # data_type: FP32 - # dims: 13 - # dims: 10 - # } - # } - # } - # persistable: true - # - # name: "fc_0.b_0" - # type { - # type: LOD_TENSOR - # lod_tensor { - # tensor { - # data_type: FP32 - # dims: 10 - # } - # } - # } - # persistable: true + # persist trainable param fc_0.w_0 : fluid.VarType.LOD_TENSOR.shape(13, 10).astype(VarType.FP32) + # persist trainable param fc_0.b_0 : fluid.VarType.LOD_TENSOR.shape(10,).astype(VarType.FP32) # # Here print(param) will print out all the properties of a parameter, # including name, type and persistable, you can access to specific -- GitLab