diff --git a/paddle/fluid/API.spec b/paddle/fluid/API.spec index 6feaa7b484cfc81451d23e9a52942e54edb8f5be..0434a90909a53e181184dac83e00d6538071ddee 100644 --- a/paddle/fluid/API.spec +++ b/paddle/fluid/API.spec @@ -11,10 +11,10 @@ paddle.fluid.default_startup_program (ArgSpec(args=[], varargs=None, keywords=No paddle.fluid.default_main_program (ArgSpec(args=[], varargs=None, keywords=None, defaults=None), ('document', '853718df675e59aea7104f3d61bbf11d')) paddle.fluid.program_guard (ArgSpec(args=['main_program', 'startup_program'], varargs=None, keywords=None, defaults=(None,)), ('document', '78fb5c7f70ef76bcf4a1862c3f6b8191')) paddle.fluid.name_scope (ArgSpec(args=['prefix'], varargs=None, keywords=None, defaults=(None,)), ('document', '917d313881ff990de5fb18d98a9c7b42')) -paddle.fluid.cuda_places (ArgSpec(args=['device_ids'], varargs=None, keywords=None, defaults=(None,)), ('document', '1f2bb6ece651e44117652d2d7bedecf5')) -paddle.fluid.cpu_places (ArgSpec(args=['device_count'], varargs=None, keywords=None, defaults=(None,)), ('document', '956bab564ebc69ffd17195c08cc8ffa0')) -paddle.fluid.cuda_pinned_places (ArgSpec(args=['device_count'], varargs=None, keywords=None, defaults=(None,)), ('document', 'c2562241744aabe3fff1b59af22dd281')) -paddle.fluid.in_dygraph_mode (ArgSpec(args=[], varargs=None, keywords=None, defaults=None), ('document', '301bae0d8e02cc9eec5be02f052f11c6')) +paddle.fluid.cuda_places (ArgSpec(args=['device_ids'], varargs=None, keywords=None, defaults=(None,)), ('document', 'ab9bd2079536114aa7c1488a489ee87f')) +paddle.fluid.cpu_places (ArgSpec(args=['device_count'], varargs=None, keywords=None, defaults=(None,)), ('document', 'a7352a3dd39308fde4fbbf6421a4193d')) +paddle.fluid.cuda_pinned_places (ArgSpec(args=['device_count'], varargs=None, keywords=None, defaults=(None,)), ('document', '567ac29567716fd8e7432b533337d529')) +paddle.fluid.in_dygraph_mode (ArgSpec(args=[], varargs=None, keywords=None, defaults=None), ('document', 'df1f4d1ed7e1eefe04f6361efda6b75a')) paddle.fluid.is_compiled_with_cuda (ArgSpec(args=[], varargs=None, keywords=None, defaults=None), ('document', '60c7f107a5050aeb58bb74eb175672b5')) paddle.fluid.Variable ('paddle.fluid.framework.Variable', ('document', '65ff735c2b96673d7131f5ff6b0db40c')) paddle.fluid.Variable.__init__ (ArgSpec(args=['self', 'block', 'type', 'name', 'shape', 'dtype', 'lod_level', 'capacity', 'persistable', 'error_clip', 'stop_gradient', 'is_data', 'need_check_feed'], varargs=None, keywords='kwargs', defaults=(VarType.LOD_TENSOR, None, None, None, None, None, None, None, False, False, False)), ('document', '6adf97f83acf6453d4a6a4b1070f3754')) @@ -34,7 +34,7 @@ paddle.fluid.Executor.infer_from_dataset (ArgSpec(args=['self', 'program', 'data paddle.fluid.Executor.run (ArgSpec(args=['self', 'program', 'feed', 'fetch_list', 'feed_var_name', 'fetch_var_name', 'scope', 'return_numpy', 'use_program_cache'], varargs=None, keywords=None, defaults=(None, None, None, 'feed', 'fetch', None, True, False)), ('document', '4cfcd9c15b766a51b584cc46d38f1ad8')) paddle.fluid.Executor.train_from_dataset (ArgSpec(args=['self', 'program', 'dataset', 'scope', 'thread', 'debug', 'fetch_list', 'fetch_info', 'print_period', 'fetch_handler'], varargs=None, keywords=None, defaults=(None, None, None, 0, False, None, None, 100, None)), ('document', '73024c79f46b4f14f1060edeaa4919c8')) paddle.fluid.global_scope (ArgSpec(args=[], varargs=None, keywords=None, defaults=None), ('document', 'f65788d9ead293ada47551339df12203')) -paddle.fluid.scope_guard (ArgSpec(args=['scope'], varargs=None, keywords=None, defaults=None), ('document', 'e6c073ed237001aaba7bff976b62b122')) +paddle.fluid.scope_guard (ArgSpec(args=['scope'], varargs=None, keywords=None, defaults=None), ('document', '02fcfc1eda07c03a84ed62422366239c')) paddle.fluid.DistributeTranspiler ('paddle.fluid.transpiler.distribute_transpiler.DistributeTranspiler', ('document', 'b2b19821c5dffcd11473d6a4eef089af')) paddle.fluid.DistributeTranspiler.__init__ (ArgSpec(args=['self', 'config'], varargs=None, keywords=None, defaults=(None,)), ('document', '6adf97f83acf6453d4a6a4b1070f3754')) paddle.fluid.DistributeTranspiler.get_pserver_program (ArgSpec(args=['self', 'endpoint'], varargs=None, keywords=None, defaults=None), ('document', 'b1951949c6d21698290aa8ac69afee32')) @@ -1041,7 +1041,7 @@ paddle.fluid.optimizer.RecomputeOptimizer.backward (ArgSpec(args=['self', 'loss' paddle.fluid.optimizer.RecomputeOptimizer.get_opti_var_name_list (ArgSpec(args=['self'], varargs=None, keywords=None, defaults=None), ('document', '6adf97f83acf6453d4a6a4b1070f3754')) paddle.fluid.optimizer.RecomputeOptimizer.load (ArgSpec(args=['self', 'stat_dict'], varargs=None, keywords=None, defaults=None), ('document', '7b2b8ae72011bc4decb67e97623f2c56')) paddle.fluid.optimizer.RecomputeOptimizer.minimize (ArgSpec(args=['self', 'loss', 'startup_program', 'parameter_list', 'no_grad_set', 'grad_clip'], varargs=None, keywords=None, defaults=(None, None, None, None)), ('document', '6adf97f83acf6453d4a6a4b1070f3754')) -paddle.fluid.backward.append_backward (ArgSpec(args=['loss', 'parameter_list', 'no_grad_set', 'callbacks', 'checkpoints'], varargs=None, keywords=None, defaults=(None, None, None, None)), ('document', '52488008103886c793843a3828bacd5e')) +paddle.fluid.backward.append_backward (ArgSpec(args=['loss', 'parameter_list', 'no_grad_set', 'callbacks', 'checkpoints'], varargs=None, keywords=None, defaults=(None, None, None, None)), ('document', 'c68fe1cb95d90762b57c309cae9b99d9')) paddle.fluid.backward.gradients (ArgSpec(args=['targets', 'inputs', 'target_gradients', 'no_grad_set'], varargs=None, keywords=None, defaults=(None, None)), ('document', 'e2097e1e0ed84ae44951437bfe269a1b')) paddle.fluid.regularizer.L1DecayRegularizer ('paddle.fluid.regularizer.L1DecayRegularizer', ('document', '34603757e70974d2fcc730643b382925')) paddle.fluid.regularizer.L1DecayRegularizer.__init__ (ArgSpec(args=['self', 'regularization_coeff'], varargs=None, keywords=None, defaults=(0.0,)), ('document', '6adf97f83acf6453d4a6a4b1070f3754')) @@ -1058,11 +1058,11 @@ paddle.fluid.LoDTensor.shape shape(self: paddle.fluid.core_avx.Tensor) -> List[i paddle.fluid.LoDTensorArray ('paddle.fluid.core_avx.LoDTensorArray', ('document', 'e9895b67ba54438b9c0f7053e18966f5')) paddle.fluid.LoDTensorArray.__init__ __init__(self: paddle.fluid.core_avx.LoDTensorArray) -> None paddle.fluid.LoDTensorArray.append append(self: paddle.fluid.core_avx.LoDTensorArray, tensor: paddle.fluid.core_avx.LoDTensor) -> None -paddle.fluid.CPUPlace ('paddle.fluid.core_avx.CPUPlace', ('document', '6014005ef2649045b77d502aeb6cd7f9')) +paddle.fluid.CPUPlace ('paddle.fluid.core_avx.CPUPlace', ('document', 'd269ec68ce9b102ab10610e89ffa06e1')) paddle.fluid.CPUPlace.__init__ __init__(self: paddle.fluid.core_avx.CPUPlace) -> None -paddle.fluid.CUDAPlace ('paddle.fluid.core_avx.CUDAPlace', ('document', '6a6cd8ed607beb951692c4b066d08c94')) +paddle.fluid.CUDAPlace ('paddle.fluid.core_avx.CUDAPlace', ('document', 'f862cb3e5596a3920102f1b1238c223b')) paddle.fluid.CUDAPlace.__init__ __init__(self: paddle.fluid.core_avx.CUDAPlace, arg0: int) -> None -paddle.fluid.CUDAPinnedPlace ('paddle.fluid.core_avx.CUDAPinnedPlace', ('document', 'afd58ea5d390b5ea06ca70291a266d45')) +paddle.fluid.CUDAPinnedPlace ('paddle.fluid.core_avx.CUDAPinnedPlace', ('document', '1320ef739c81c95385330dab3fe6e80b')) paddle.fluid.CUDAPinnedPlace.__init__ __init__(self: paddle.fluid.core_avx.CUDAPinnedPlace) -> None paddle.fluid.ParamAttr ('paddle.fluid.param_attr.ParamAttr', ('document', '7b5bfe856689036b8fffb71af1558e5c')) paddle.fluid.ParamAttr.__init__ (ArgSpec(args=['self', 'name', 'initializer', 'learning_rate', 'regularizer', 'trainable', 'gradient_clip', 'do_model_average'], varargs=None, keywords=None, defaults=(None, None, 1.0, None, True, None, True)), ('document', '6adf97f83acf6453d4a6a4b1070f3754')) diff --git a/paddle/fluid/pybind/pybind.cc b/paddle/fluid/pybind/pybind.cc index ad375aebd0f391e7064595b85cacdb52efdcf869..88999dcdda72ba7f4f4dfb00a4916f8c439c21e8 100644 --- a/paddle/fluid/pybind/pybind.cc +++ b/paddle/fluid/pybind/pybind.cc @@ -832,9 +832,23 @@ All parameter, weight, gradient are variables in Paddle. py::class_(m, "Communicator").def(py::init<>()); #endif py::class_(m, "CUDAPlace", R"DOC( - CUDAPlace is a descriptor of a device. It represents a GPU, and each CUDAPlace - has a dev_id to indicate the number of cards represented by the current CUDAPlace. + **Note**: + For multi-card tasks, please use `FLAGS_selected_gpus` environment variable to set the visible GPU device. + The next version will fix the problem with `CUDA_VISIBLE_DEVICES` environment variable. + + CUDAPlace is a descriptor of a device. + It represents a GPU device allocated or to be allocated with Tensor or LoDTensor. + Each CUDAPlace has a dev_id to indicate the graphics card ID represented by the current CUDAPlace, + staring from 0. The memory of CUDAPlace with different dev_id is not accessible. + Numbering here refers to the logical ID of the visible graphics card, not the actual ID of the graphics card. + You can set visible GPU devices by setting the `CUDA_VISIBLE_DEVICES` environment variable. + When the program starts, visible GPU devices will be numbered from 0. + If `CUDA_VISIBLE_DEVICES` is not set, all devices are visible by default, + and the logical ID is the same as the actual ID. + + Parameters: + id (int): GPU device ID. Examples: .. code-block:: python @@ -892,14 +906,14 @@ All parameter, weight, gradient are variables in Paddle. .def("__str__", string::to_string); py::class_(m, "CPUPlace", R"DOC( - CPUPlace is a descriptor of a device. It represents a CPU, and the memory - CPUPlace can be accessed by CPU. + CPUPlace is a descriptor of a device. + It represents a CPU device allocated or to be allocated with Tensor or LoDTensor. Examples: .. code-block:: python import paddle.fluid as fluid - cpu_place = fluid.CPUPlace() + cpu_place = fluid.CPUPlace()to be allocated )DOC") .def(py::init<>()) @@ -912,8 +926,12 @@ All parameter, weight, gradient are variables in Paddle. .def("__str__", string::to_string); py::class_(m, "CUDAPinnedPlace", R"DOC( - CUDAPinnedPlace is a descriptor of a device. The memory of CUDAPinnedPlace - can be accessed by GPU and CPU. + CUDAPinnedPlace is a descriptor of a device. + It refers to the page locked memory allocated by the CUDA function `cudaHostAlloc()` in the host memory. + The host operating system will not paging and exchanging the memory. + It can be accessed through direct memory access technology to speed up the copy of data between the host and GPU. + For more information on CUDA data transfer and `pinned memory`, + please refer to `official document `_ . Examples: .. code-block:: python diff --git a/python/paddle/fluid/backward.py b/python/paddle/fluid/backward.py index 07d69fadb95ba5a17749bb9fecee95e30c1633b3..21a690a4328fc6c49be3ee18eb5bd8fcc70f5714 100644 --- a/python/paddle/fluid/backward.py +++ b/python/paddle/fluid/backward.py @@ -919,29 +919,31 @@ def append_backward(loss, callbacks=None, checkpoints=None): """ - Append backward part to main_program. + This function appends backward part to main_program. A complete neural network training is made up of forward and backward propagation. However, when we configure a network, we only need to - specify its forwrd part. The backward part is generated automatically - according to the forward part by this function. + specify its forward part. This function uses the chain rule to automatically + generate the backward part according to the forward part. - In most cases, users do not need to invoke this function manually. It - will be automatically invoked by the optimizer's `minimize` function. + In most cases, users do not need to invoke this function manually. + It will be automatically invoked by the optimizer's `minimize` function. - Args: - loss(Variable): The loss variable of the network. - parameter_list(list[string]|None): Names of parameters that need + Parameters: + loss( :ref:`api_guide_Variable_en` ): The loss variable of the network. + parameter_list(list of str, optional): Names of parameters that need to be updated by optimizers. If it is None, all parameters will be updated. - Default: None - no_grad_set(set|None): Variables in the Block 0 whose gradients + Default: None. + no_grad_set(set of str, optional): Variable names in the :ref:`api_guide_Block_en` 0 whose gradients should be ignored. All variables with `stop_gradient=True` from all blocks will be automatically added into this set. - Default: None - callbacks(list[callable object]|None): The callbacks are used for + If this parameter is not None, the names in this set will be added to the default set. + Default: None. + callbacks(list of callable object, optional): List of callback functions. + The callbacks are used for doing some custom jobs during backward part building. All callable objects in it will @@ -950,23 +952,23 @@ def append_backward(loss, into the program. The callable object must has two input parameters: 'block' and 'context'. - The 'block' is the block which + The 'block' is the :ref:`api_guide_Block_en` which the new gradient operator will be added to. The 'context' is a map, whose keys are gradient variable names and values are - corresponding original variables. + corresponding original :ref:`api_guide_Variable_en` . In addition to this, the 'context' has another special key-value pair: the key is string '__current_op_desc__' and the value is the op_desc of the gradient operator who has just triggered the callable object. + Default: None. Returns: - list[(Variable,Variable)]: Pairs of parameter and its - corresponding gradients. The key is the parameter and the - value is gradient variable. + list of tuple ( :ref:`api_guide_Variable_en` , :ref:`api_guide_Variable_en` ): Pairs of parameter and its corresponding gradients. + The key is the parameter and the value is gradient variable. Raises: AssertionError: If `loss` is not an instance of Variable. @@ -974,17 +976,20 @@ def append_backward(loss, Examples: .. code-block:: python - # network configuration code - # loss from ... import paddle.fluid as fluid - x = fluid.layers.data(name='x', shape=[13], dtype='float32') - y = fluid.layers.data(name='y', shape=[1], dtype='float32') + x = fluid.data(name='x', shape=[None, 13], dtype='float32') + y = fluid.data(name='y', shape=[None, 1], dtype='float32') y_predict = fluid.layers.fc(input=x, size=1, act=None) loss = fluid.layers.square_error_cost(input=y_predict, label=y) avg_loss = fluid.layers.mean(loss) param_grad_list = fluid.backward.append_backward(loss=avg_loss) + p_g_list1 = fluid.backward.append_backward(loss=avg_loss) # len(p_g_list1) == 2 + p_g_list2 = fluid.backward.append_backward(loss=avg_loss, parameter_list=[p_g_list1[0][0].name]) # len(p_g_list1) == 1 + p_g_list3 = fluid.backward.append_backward(loss=avg_loss, no_grad_set=set([p_g_list1[0][0].name])) # len(p_g_list1) == 1 + p_g_list4 = fluid.backward.append_backward(loss=avg_loss, parameter_list=[p_g_list1[0][0].name], no_grad_set=set([p_g_list1[0][0].name])) # len(p_g_list1) == 0 + """ assert isinstance(loss, framework.Variable) diff --git a/python/paddle/fluid/executor.py b/python/paddle/fluid/executor.py index 6e8d07e2836f382f7482e5e97d9db1d45c411419..bb4d5bf3342196eaf93495eb244db46d0be4e9d1 100644 --- a/python/paddle/fluid/executor.py +++ b/python/paddle/fluid/executor.py @@ -67,11 +67,21 @@ def _switch_scope(scope): @signature_safe_contextmanager def scope_guard(scope): """ - Change the global/default scope instance by Python `with` statement. All - variable in runtime will assigned to the new scope. + This function switches scope through python `with` statement. + Scope records the mapping between variable names and variables ( :ref:`api_guide_Variable` ), + similar to brackets in programming languages. + If this function is not invoked, all variables and variable names are recorded in the default global scope. + When users need to create variables with the same name, + they need to switch scopes through this function + if they do not want the mapping of variables with the same name to be overwritten. + After switching through the `with` statement, + all variables created in the `with` block will be assigned to a new scope. + + Parameters: + scope: The new scope. - Args: - scope: The new global/default scope. + Returns: + None Examples: .. code-block:: python diff --git a/python/paddle/fluid/framework.py b/python/paddle/fluid/framework.py index b1f2b6df091ab12ab65aa673d0645f44012c64ae..f69963683823a222b76d8ebee5ad852d4245783f 100644 --- a/python/paddle/fluid/framework.py +++ b/python/paddle/fluid/framework.py @@ -62,17 +62,20 @@ _dygraph_current_expected_place_ = None def in_dygraph_mode(): """ - Check program status(tracer), Whether it runs in dygraph mode or not + This function checks whether the program runs in dynamic graph mode or not. + You can turn on dynamic graph mode with :ref:`api_fluid_dygraph_guard` api. Returns: - out (boolean): True if the program is running in dynamic graph mode + bool: Whether the program is running in dynamic graph mode. Examples: .. code-block:: python import paddle.fluid as fluid if fluid.in_dygraph_mode(): - pass + print('running in dygraph mode') + else: + print('not running in dygraph mode') """ return _dygraph_tracer_ is not None @@ -149,25 +152,29 @@ def is_compiled_with_cuda(): def cuda_places(device_ids=None): """ - Create a list of :code:`fluid.CUDAPlace` objects. + **Note**: + For multi-card tasks, please use `FLAGS_selected_gpus` environment variable to set the visible GPU device. + The next version will fix the problem with `CUDA_VISIBLE_DEVICES` environment variable. + + This function creates a list of :code:`fluid.CUDAPlace` objects. If :code:`device_ids` is None, environment variable of - :code:`FLAGS_selected_gpus` would be checked first. If + :code:`FLAGS_selected_gpus` would be checked first. For example, if :code:`FLAGS_selected_gpus=0,1,2`, the returned list would be [fluid.CUDAPlace(0), fluid.CUDAPlace(1), fluid.CUDAPlace(2)]. If :code:`FLAGS_selected_gpus` is not set, all visible - gpu places would be returned. + gpu places would be returned according to the :code:`CUDA_VISIBLE_DEVICES` environment variable. If :code:`device_ids` is not None, it should be the device - ids of gpus. For example, if :code:`device_ids=[0,1,2]`, + ids of GPUs. For example, if :code:`device_ids=[0,1,2]`, the returned list would be [fluid.CUDAPlace(0), fluid.CUDAPlace(1), fluid.CUDAPlace(2)]. - Args: - device_ids (None|list(int)|tuple(int)): gpu device id list. + Parameters: + device_ids (list or tuple of int, optional): list of GPU device ids. Returns: - out (list(fluid.CUDAPlace)): gpu place list. + list of fluid.CUDAPlace: Created GPU place list. Examples: .. code-block:: python @@ -187,18 +194,20 @@ def cuda_places(device_ids=None): def cpu_places(device_count=None): """ - Create a list of :code:`fluid.CPUPlace` objects. + This function creates a list of :code:`fluid.CPUPlace` objects, and returns the created list. If :code:`device_count` is None, the device count would be determined by environment variable :code:`CPU_NUM`. If :code:`CPU_NUM` is not set, the default value is 1, i.e. CPU_NUM=1. + :code:`CPU_NUM` indicates the number of devices used in the current task. + The running of the program can be accelerated if :code:`CPU_NUM` is the same as the number of physical cores. - Args: - device_count (None|int): device number. + Parameters: + device_count (int, optional): device number. Default: None. Returns: - out (list(fluid.CPUPlace)): cpu place list. + list of fluid.CPUPlace: Created list of CPU places. Examples: .. code-block:: python @@ -214,18 +223,20 @@ def cpu_places(device_count=None): def cuda_pinned_places(device_count=None): """ - Create a list of :code:`fluid.CUDAPinnedPlace` objects. + This function creates a list of :code:`fluid.CUDAPinnedPlace` objects. If :code:`device_count` is None, the device count would be determined by environment variable :code:`CPU_NUM`. - If :code:`CPU_NUM` is not set, the device count would - be determined by :code:`multiprocessing.cpu_count()`. + If :code:`CPU_NUM` is not set, the default value is 1, + i.e. CPU_NUM=1. + :code:`CPU_NUM` indicates the number of devices used in the current task. + The running of the program can be accelerated if :code:`CPU_NUM` is the same as the number of physical cores. - Args: - device_count (None|int): device number. + Parameters: + device_count (int, optional): device number. Default: None. Returns: - out (list(fluid.CUDAPinnedPlace)): cuda pinned place list. + list of fluid.CUDAPinnedPlace: Created list of CUDA pinned places. Examples: .. code-block:: python