未验证 提交 f2fa3f73 编写于 作者: Z Zeng Jinle 提交者: GitHub

fix api doc,test=develop (#17241)

上级 4f859408
...@@ -32,8 +32,8 @@ paddle.fluid.release_memory (ArgSpec(args=['input_program', 'skip_opt_set'], var ...@@ -32,8 +32,8 @@ paddle.fluid.release_memory (ArgSpec(args=['input_program', 'skip_opt_set'], var
paddle.fluid.DistributeTranspilerConfig.__init__ paddle.fluid.DistributeTranspilerConfig.__init__
paddle.fluid.ParallelExecutor.__init__ (ArgSpec(args=['self', 'use_cuda', 'loss_name', 'main_program', 'share_vars_from', 'exec_strategy', 'build_strategy', 'num_trainers', 'trainer_id', 'scope'], varargs=None, keywords=None, defaults=(None, None, None, None, None, 1, 0, None)), ('document', '6adf97f83acf6453d4a6a4b1070f3754')) paddle.fluid.ParallelExecutor.__init__ (ArgSpec(args=['self', 'use_cuda', 'loss_name', 'main_program', 'share_vars_from', 'exec_strategy', 'build_strategy', 'num_trainers', 'trainer_id', 'scope'], varargs=None, keywords=None, defaults=(None, None, None, None, None, 1, 0, None)), ('document', '6adf97f83acf6453d4a6a4b1070f3754'))
paddle.fluid.ParallelExecutor.run (ArgSpec(args=['self', 'fetch_list', 'feed', 'feed_dict', 'return_numpy'], varargs=None, keywords=None, defaults=(None, None, True)), ('document', '2cb4bd74481861345c70228a0f57620c')) paddle.fluid.ParallelExecutor.run (ArgSpec(args=['self', 'fetch_list', 'feed', 'feed_dict', 'return_numpy'], varargs=None, keywords=None, defaults=(None, None, True)), ('document', '2cb4bd74481861345c70228a0f57620c'))
paddle.fluid.create_lod_tensor (ArgSpec(args=['data', 'recursive_seq_lens', 'place'], varargs=None, keywords=None, defaults=None), ('document', '8e7bb21e83ff4604f5b379672e285b94')) paddle.fluid.create_lod_tensor (ArgSpec(args=['data', 'recursive_seq_lens', 'place'], varargs=None, keywords=None, defaults=None), ('document', 'b82ea20e2dc5ff2372e0643169ca47ff'))
paddle.fluid.create_random_int_lodtensor (ArgSpec(args=['recursive_seq_lens', 'base_shape', 'place', 'low', 'high'], varargs=None, keywords=None, defaults=None), ('document', '368f638b99f1dfe59e9b02aa6f077752')) paddle.fluid.create_random_int_lodtensor (ArgSpec(args=['recursive_seq_lens', 'base_shape', 'place', 'low', 'high'], varargs=None, keywords=None, defaults=None), ('document', '74dc6d23185d90a7a50fbac19f5b65fb'))
paddle.fluid.DataFeedDesc.__init__ (ArgSpec(args=['self', 'proto_file'], varargs=None, keywords=None, defaults=None), ('document', '6adf97f83acf6453d4a6a4b1070f3754')) paddle.fluid.DataFeedDesc.__init__ (ArgSpec(args=['self', 'proto_file'], varargs=None, keywords=None, defaults=None), ('document', '6adf97f83acf6453d4a6a4b1070f3754'))
paddle.fluid.DataFeedDesc.desc (ArgSpec(args=['self'], varargs=None, keywords=None, defaults=None), ('document', '4294493e31c4bc9fc4bd48753044235f')) paddle.fluid.DataFeedDesc.desc (ArgSpec(args=['self'], varargs=None, keywords=None, defaults=None), ('document', '4294493e31c4bc9fc4bd48753044235f'))
paddle.fluid.DataFeedDesc.set_batch_size (ArgSpec(args=['self', 'batch_size'], varargs=None, keywords=None, defaults=None), ('document', '8d9f44601e0a99dd431f14fd9250cd21')) paddle.fluid.DataFeedDesc.set_batch_size (ArgSpec(args=['self', 'batch_size'], varargs=None, keywords=None, defaults=None), ('document', '8d9f44601e0a99dd431f14fd9250cd21'))
......
...@@ -383,17 +383,18 @@ PYBIND11_MODULE(core, m) { ...@@ -383,17 +383,18 @@ PYBIND11_MODULE(core, m) {
LoD is short for Level of Details and is usually used for varied sequence LoD is short for Level of Details and is usually used for varied sequence
length. You can skip the following comment if you don't need optional LoD. length. You can skip the following comment if you don't need optional LoD.
For example: For example, a LoDTensor X can look like the example below. It contains
A LoDTensor X can look like the example below. It contains 2 sequences. 2 sequences. The first has length 2 and the second has length 3, as
The first has length 2 and the second has length 3, as described by x.lod. described by x.lod.
The first tensor dimension 5=2+3 is calculated from LoD if it's available. The first tensor dimension 5=2+3 is calculated from LoD if it's available.
It means the total number of sequence element. In X, each element has 2 It means the total number of sequence element. In X, each element has 2
columns, hence [5, 2]. columns, hence [5, 2].
x.lod = [[2, 3]] x.lod = [[2, 3]]
x.data = [[1, 2], [3, 4],
[5, 6], [7, 8], [9, 10]] x.data = [[1, 2], [3, 4], [5, 6], [7, 8], [9, 10]]
x.shape = [5, 2] x.shape = [5, 2]
LoD can have multiple levels (for example, a paragraph can have multiple LoD can have multiple levels (for example, a paragraph can have multiple
...@@ -404,8 +405,16 @@ PYBIND11_MODULE(core, m) { ...@@ -404,8 +405,16 @@ PYBIND11_MODULE(core, m) {
respectively. And the second sequence's 1 sub-sequence has length 3. respectively. And the second sequence's 1 sub-sequence has length 3.
y.lod = [[2 1], [2 2 3]] y.lod = [[2 1], [2 2 3]]
y.shape = [2+2+3, ...] y.shape = [2+2+3, ...]
Examples:
.. code-block:: python
import paddle.fluid as fluid
t = fluid.LoDTensor()
Note: Note:
In above description, LoD is length-based. In Paddle internal In above description, LoD is length-based. In Paddle internal
implementation, lod is offset-based. Hence, internally, implementation, lod is offset-based. Hence, internally,
...@@ -416,7 +425,6 @@ PYBIND11_MODULE(core, m) { ...@@ -416,7 +425,6 @@ PYBIND11_MODULE(core, m) {
self-explanatory. In this case, it must be length-based. Due to history self-explanatory. In this case, it must be length-based. Due to history
reasons. when LoD is called lod in public API, it might be offset-based. reasons. when LoD is called lod in public API, it might be offset-based.
Users should be careful about it. Users should be careful about it.
)DOC") )DOC")
.def("__array__", [](Tensor &self) { return TensorToPyArray(self); }) .def("__array__", [](Tensor &self) { return TensorToPyArray(self); })
.def("__init__", .def("__init__",
...@@ -454,6 +462,16 @@ PYBIND11_MODULE(core, m) { ...@@ -454,6 +462,16 @@ PYBIND11_MODULE(core, m) {
Args: Args:
lod (List[List[int]]): the lod to be set. lod (List[List[int]]): the lod to be set.
Examples:
.. code-block:: python
import paddle.fluid as fluid
import numpy as np
t = fluid.LoDTensor()
t.set(np.ndarray([5, 30]), fluid.CPUPlace())
t.set_lod([[0, 2, 5]])
)DOC") )DOC")
.def("set_recursive_sequence_lengths", .def("set_recursive_sequence_lengths",
[](LoDTensor &self, const std::vector<std::vector<size_t>> [](LoDTensor &self, const std::vector<std::vector<size_t>>
...@@ -480,6 +498,16 @@ PYBIND11_MODULE(core, m) { ...@@ -480,6 +498,16 @@ PYBIND11_MODULE(core, m) {
Args: Args:
recursive_sequence_lengths (List[List[int]]): sequence lengths. recursive_sequence_lengths (List[List[int]]): sequence lengths.
Examples:
.. code-block:: python
import paddle.fluid as fluid
import numpy as np
t = fluid.LoDTensor()
t.set(np.ndarray([5, 30]), fluid.CPUPlace())
t.set_recursive_sequence_lengths([[2, 3]])
)DOC") )DOC")
.def("lod", .def("lod",
[](LoDTensor &self) -> std::vector<std::vector<size_t>> { [](LoDTensor &self) -> std::vector<std::vector<size_t>> {
...@@ -495,6 +523,17 @@ PYBIND11_MODULE(core, m) { ...@@ -495,6 +523,17 @@ PYBIND11_MODULE(core, m) {
Returns: Returns:
out (List[List[int]]): the lod of the LoDTensor. out (List[List[int]]): the lod of the LoDTensor.
Examples:
.. code-block:: python
import paddle.fluid as fluid
import numpy as np
t = fluid.LoDTensor()
t.set(np.ndarray([5, 30]), fluid.CPUPlace())
t.set_lod([[0, 2, 5]])
print(t.lod()) # [[0, 2, 5]]
)DOC") )DOC")
// Set above comments of set_lod. // Set above comments of set_lod.
.def("recursive_sequence_lengths", .def("recursive_sequence_lengths",
...@@ -511,6 +550,17 @@ PYBIND11_MODULE(core, m) { ...@@ -511,6 +550,17 @@ PYBIND11_MODULE(core, m) {
Returns: Returns:
out (List[List[int]): the sequence lengths. out (List[List[int]): the sequence lengths.
Examples:
.. code-block:: python
import paddle.fluid as fluid
import numpy as np
t = fluid.LoDTensor()
t.set(np.ndarray([5, 30]), fluid.CPUPlace())
t.set_recursive_sequence_lengths([[2, 3]])
print(t.recursive_sequence_lengths()) # [[2, 3]]
)DOC") )DOC")
.def("has_valid_recursive_sequence_lengths", .def("has_valid_recursive_sequence_lengths",
[](LoDTensor &self) -> bool { [](LoDTensor &self) -> bool {
...@@ -523,6 +573,17 @@ PYBIND11_MODULE(core, m) { ...@@ -523,6 +573,17 @@ PYBIND11_MODULE(core, m) {
Returns: Returns:
out (bool): whether the lod is valid. out (bool): whether the lod is valid.
Examples:
.. code-block:: python
import paddle.fluid as fluid
import numpy as np
t = fluid.LoDTensor()
t.set(np.ndarray([5, 30]), fluid.CPUPlace())
t.set_recursive_sequence_lengths([[2, 3]])
print(t.has_valid_recursive_sequence_lengths()) # True
)DOC") )DOC")
.def("__getitem__", PySliceTensor, py::return_value_policy::reference, .def("__getitem__", PySliceTensor, py::return_value_policy::reference,
R"DOC( R"DOC(
...@@ -985,7 +1046,16 @@ All parameter, weight, gradient are variables in Paddle. ...@@ -985,7 +1046,16 @@ All parameter, weight, gradient are variables in Paddle.
return res; return res;
}); });
py::class_<LoDTensorArray>(m, "LoDTensorArray") py::class_<LoDTensorArray>(m, "LoDTensorArray", R"DOC(
Array of LoDTensor.
Examples:
.. code-block:: python
import paddle.fluid as fluid
arr = fluid.LoDTensorArray()
)DOC")
.def("__init__", .def("__init__",
[](LoDTensorArray &instance) { new (&instance) LoDTensorArray(); }) [](LoDTensorArray &instance) { new (&instance) LoDTensorArray(); })
.def("__getitem__", .def("__getitem__",
...@@ -1004,7 +1074,20 @@ All parameter, weight, gradient are variables in Paddle. ...@@ -1004,7 +1074,20 @@ All parameter, weight, gradient are variables in Paddle.
self.back().ShareDataWith(t); self.back().ShareDataWith(t);
self.back().set_lod(t.lod()); self.back().set_lod(t.lod());
}, },
py::arg("tensor"), "Append a LoDensor to LoDTensorArray."); py::arg("tensor"), R"DOC(
Append a LoDensor to LoDTensorArray.
Examples:
.. code-block:: python
import paddle.fluid as fluid
import numpy as np
arr = fluid.LoDTensorArray()
t = fluid.LoDTensor()
t.set(np.ndarray([5, 30]), fluid.CPUPlace())
arr.append(t)
)DOC");
m.def("IsInplace", m.def("IsInplace",
[](std::string op) -> bool { return operators::IsInplace(op); }); [](std::string op) -> bool { return operators::IsInplace(op); });
......
...@@ -47,6 +47,13 @@ def create_lod_tensor(data, recursive_seq_lens, place): ...@@ -47,6 +47,13 @@ def create_lod_tensor(data, recursive_seq_lens, place):
sentence. This length-based :code:`recursive_seq_lens` [[2, 3]] will be converted to sentence. This length-based :code:`recursive_seq_lens` [[2, 3]] will be converted to
offset-based LoD [[0, 2, 5]] inside the function call. offset-based LoD [[0, 2, 5]] inside the function call.
.. code-block:: python
import paddle.fluid as fluid
import numpy as np
t = fluid.create_lod_tensor(np.ndarray([5, 30]), [[2, 3]], fluid.CPUPlace())
Please reference :ref:`api_guide_low_level_lod_tensor` for more details Please reference :ref:`api_guide_low_level_lod_tensor` for more details
regarding LoD. regarding LoD.
...@@ -127,6 +134,14 @@ def create_random_int_lodtensor(recursive_seq_lens, base_shape, place, low, ...@@ -127,6 +134,14 @@ def create_random_int_lodtensor(recursive_seq_lens, base_shape, place, low,
Returns: Returns:
A fluid LoDTensor object with tensor data and recursive_seq_lens info. A fluid LoDTensor object with tensor data and recursive_seq_lens info.
Examples:
.. code-block:: python
import paddle.fluid as fluid
t = fluid.create_random_int_lodtensor(recursive_seq_lens=[[2, 3]],
base_shape=[30], place=fluid.CPUPlace(), low=0, high=10)
""" """
assert isinstance(base_shape, list), "base_shape should be a list" assert isinstance(base_shape, list), "base_shape should be a list"
# append the total number of basic elements to the front of its shape # append the total number of basic elements to the front of its shape
......
...@@ -48,6 +48,8 @@ class ParamAttr(object): ...@@ -48,6 +48,8 @@ class ParamAttr(object):
Examples: Examples:
.. code-block:: python .. code-block:: python
import paddle.fluid as fluid
w_param_attrs = fluid.ParamAttr(name="fc_weight", w_param_attrs = fluid.ParamAttr(name="fc_weight",
learning_rate=0.5, learning_rate=0.5,
regularizer=fluid.regularizer.L2Decay(1.0), regularizer=fluid.regularizer.L2Decay(1.0),
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册