diff --git a/paddle/fluid/API.spec b/paddle/fluid/API.spec index 0369f496be3e08288f27418f44dae5448ee32190..fdca632d14ec49ea80f5e8edfa93dec7c8be23ba 100644 --- a/paddle/fluid/API.spec +++ b/paddle/fluid/API.spec @@ -63,7 +63,7 @@ paddle.fluid.CompiledProgram.__init__ (ArgSpec(args=['self', 'program_or_graph', paddle.fluid.CompiledProgram.with_data_parallel (ArgSpec(args=['self', 'loss_name', 'build_strategy', 'exec_strategy', 'share_vars_from', 'places'], varargs=None, keywords=None, defaults=(None, None, None, None, None)), ('document', '3b61147fc4f54e1724aa9ead8a1d5f26')) paddle.fluid.ExecutionStrategy ('paddle.fluid.core_avx.ExecutionStrategy', ('document', '535ce28c4671176386e3cd283a764084')) paddle.fluid.ExecutionStrategy.__init__ __init__(self: paddle.fluid.core_avx.ParallelExecutor.ExecutionStrategy) -> None -paddle.fluid.BuildStrategy ('paddle.fluid.core_avx.BuildStrategy', ('document', 'eec64b9b7cba58b0a63687b4c34ffe56')) +paddle.fluid.BuildStrategy ('paddle.fluid.core_avx.BuildStrategy', ('document', '9c7ee090a0ab6896f5de996d59a2f645')) paddle.fluid.BuildStrategy.GradientScaleStrategy ('paddle.fluid.core_avx.GradientScaleStrategy', ('document', '6adf97f83acf6453d4a6a4b1070f3754')) paddle.fluid.BuildStrategy.GradientScaleStrategy.__init__ __init__(self: paddle.fluid.core_avx.ParallelExecutor.BuildStrategy.GradientScaleStrategy, arg0: int) -> None paddle.fluid.BuildStrategy.ReduceStrategy ('paddle.fluid.core_avx.ReduceStrategy', ('document', '6adf97f83acf6453d4a6a4b1070f3754')) diff --git a/paddle/fluid/pybind/pybind.cc b/paddle/fluid/pybind/pybind.cc index 66780034a072b3a91dccba8865fc9b2f10724b44..987e088084ed3290dd8a99a9535795fd108e0d3c 100644 --- a/paddle/fluid/pybind/pybind.cc +++ b/paddle/fluid/pybind/pybind.cc @@ -1510,9 +1510,26 @@ All parameter, weight, gradient are variables in Paddle. Examples: .. code-block:: python + import os + import numpy as np import paddle.fluid as fluid + + os.environ["CPU_NUM"] = '2' + places = fluid.cpu_places() + + data = fluid.layers.data(name="x", shape=[1], dtype="float32") + hidden = fluid.layers.fc(input=data, size=10) + loss = fluid.layers.mean(hidden) + fluid.optimizer.SGD(learning_rate=0.01).minimize(loss) + build_strategy = fluid.BuildStrategy() + build_strategy.enable_inplace = True + build_strategy.memory_optimize = True build_strategy.reduce_strategy = fluid.BuildStrategy.ReduceStrategy.Reduce + program = fluid.compiler.CompiledProgram(fluid.default_main_program()) + program = program.with_data_parallel(loss_name=loss.name, + build_strategy=build_strategy, + places=places) )DOC"); py::enum_(build_strategy, "ReduceStrategy") @@ -1534,13 +1551,13 @@ All parameter, weight, gradient are variables in Paddle. "BuildStrategy is finlaized."); self.reduce_ = strategy; }, - R"DOC(The type is fluid.BuildStrategy.ReduceStrategy, there are two reduce + R"DOC((fluid.BuildStrategy.ReduceStrategy, optional): there are two reduce strategies in ParallelExecutor, AllReduce and Reduce. If you want that all the parameters' optimization are done on all devices independently, - you should choose AllReduce; if you choose Reduce, all the parameters' + you should choose AllReduce; otherwise, if you choose Reduce, all the parameters' optimization will be evenly distributed to different devices, and then broadcast the optimized parameter to other devices. - Default 'AllReduce'. + Default is 'AllReduce'. Examples: .. code-block:: python @@ -1558,11 +1575,11 @@ All parameter, weight, gradient are variables in Paddle. "BuildStrategy is finalized."); self.gradient_scale_ = strategy; }, - R"DOC(The type is fluid.BuildStrategy.GradientScaleStrategy, there are three - ways of defining :math:`loss@grad` in ParallelExecutor, CoeffNumDevice, + R"DOC((fluid.BuildStrategy.GradientScaleStrategy, optional): there are three + ways of defining :math:`loss@grad` in ParallelExecutor, that is, CoeffNumDevice, One and Customized. By default, ParallelExecutor sets the :math:`loss@grad` according to the number of devices. If you want to customize :math:`loss@grad`, - you can choose Customized. Default 'CoeffNumDevice'. + you can choose Customized. Default is 'CoeffNumDevice'. Examples: .. code-block:: python @@ -1620,9 +1637,9 @@ All parameter, weight, gradient are variables in Paddle. "BuildStrategy is finlaized."); self.debug_graphviz_path_ = path; }, - R"DOC(The type is STR, debug_graphviz_path indicates the path that + R"DOC((str, optional): debug_graphviz_path indicates the path that writing the SSA Graph to file in the form of graphviz. - It is useful for debugging. Default "" + It is useful for debugging. Default is empty string, that is, "" Examples: .. code-block:: python @@ -1642,8 +1659,8 @@ All parameter, weight, gradient are variables in Paddle. "BuildStrategy is finlaized."); self.enable_sequential_execution_ = b; }, - R"DOC(The type is BOOL. If set True, the execution order of ops would - be the same as what is in the program. Default False. + R"DOC((bool, optional): If set True, the execution order of ops would + be the same as what is in the program. Default is False. Examples: .. code-block:: python @@ -1662,8 +1679,8 @@ All parameter, weight, gradient are variables in Paddle. "BuildStrategy is finlaized."); self.remove_unnecessary_lock_ = b; }, - R"DOC(The type is BOOL. If set True, some locks in GPU ops would be - released and ParallelExecutor would run faster. Default True. + R"DOC((bool, optional): If set True, some locks in GPU ops would be + released and ParallelExecutor would run faster. Default is True. Examples: .. code-block:: python @@ -1724,9 +1741,9 @@ All parameter, weight, gradient are variables in Paddle. "BuildStrategy is finlaized."); self.fuse_elewise_add_act_ops_ = b; }, - R"DOC(The type is BOOL, fuse_elewise_add_act_ops indicate whether + R"DOC((bool, optional): fuse_elewise_add_act_ops indicate whether to fuse elementwise_add_op and activation_op, - it may make the execution faster. Default False + it may make the execution faster. Default is False. Examples: .. code-block:: python @@ -1745,11 +1762,11 @@ All parameter, weight, gradient are variables in Paddle. "BuildStrategy is finlaized."); self.fuse_relu_depthwise_conv_ = b; }, - R"DOC(The type is BOOL, fuse_relu_depthwise_conv indicate whether + R"DOC((bool, optional): fuse_relu_depthwise_conv indicate whether to fuse relu and depthwise_conv2d, it will save GPU memory and may make the execution faster. This options is only available in GPU devices. - Default False. + Default is False. Examples: .. code-block:: python @@ -1768,12 +1785,20 @@ All parameter, weight, gradient are variables in Paddle. "BuildStrategy is finlaized."); self.fuse_broadcast_ops_ = b; }, - R"DOC(The type is BOOL, fuse_broadcast_op indicates whether + R"DOC((bool, optional): fuse_broadcast_op indicates whether to fuse the broadcast ops. Note that, in Reduce mode, fusing broadcast ops may make the program faster. Because fusing broadcast OP equals delaying the execution of all broadcast Ops, in this case, all nccl streams are used only - for NCCLReduce operations for a period of time. Default False.)DOC") + for NCCLReduce operations for a period of time. Default False. + + Examples: + .. code-block:: python + + import paddle.fluid as fluid + build_strategy = fluid.BuildStrategy() + build_strategy.fuse_broadcast_ops = True + )DOC") .def_property("fuse_all_optimizer_ops", [](const BuildStrategy &self) { return self.fuse_all_optimizer_ops_ == true || @@ -1792,14 +1817,12 @@ All parameter, weight, gradient are variables in Paddle. "BuildStrategy is finlaized."); self.sync_batch_norm_ = b; }, - R"DOC(The type is BOOL, sync_batch_norm indicates whether to use + R"DOC((bool, optional): sync_batch_norm indicates whether to use synchronous batch normalization which synchronizes the mean and variance through multi-devices in training phase. - Current implementation doesn't support FP16 training and CPU. - And only synchronous on one machine, not all machines. - - Default False + And only synchronous on one machine, not all machines. + Default is False. Examples: .. code-block:: python @@ -1829,13 +1852,13 @@ All parameter, weight, gradient are variables in Paddle. "True"); } }, - R"DOC(The type is BOOL or None, memory opitimize aims to save total memory + R"DOC((bool, optional): memory opitimize aims to save total memory consumption, set to True to enable it. Default None. None means framework would choose to use or not use this strategy automatically. Currently, None means that it is enabled when GC is disabled, and disabled when GC is enabled. - True means enabling and False means disabling. Default None.)DOC") + True means enabling and False means disabling. Default is None.)DOC") .def_property( "is_distribution", [](const BuildStrategy &self) { return self.is_distribution_; },