未验证 提交 1a15a351 编写于 作者: 小飞猪 提交者: GitHub

[xdoctest][task 184-185] reformat example code with google style in...

[xdoctest][task 184-185] reformat example code with google style in `distributed/auto_parallel/static/*` (#56666)

* [Doctest]fix No.184,185, test=docs_preview

* add env skip

* fix @staticmethod

* fix

* add xdoctest for v2

* fix
上级 8746e230
...@@ -58,14 +58,15 @@ class DeviceMesh(core.DeviceMesh): ...@@ -58,14 +58,15 @@ class DeviceMesh(core.DeviceMesh):
Examples: Examples:
.. code-block:: python .. code-block:: python
import paddle >>> # doctest: +REQUIRES(env:DISTRIBUTED)
import paddle.distributed as dist >>> import paddle
>>> import paddle.distributed as dist
paddle.enable_static() >>> paddle.enable_static()
mesh = dist.DeviceMesh([[2, 4, 5], [0, 1, 3]]) >>> mesh = dist.DeviceMesh([[2, 4, 5], [0, 1, 3]])
assert mesh.shape == [2, 3] >>> assert mesh.shape == [2, 3]
assert mesh.device_ids == [2, 4, 5, 0, 1, 3] >>> assert mesh.device_ids == [2, 4, 5, 0, 1, 3]
""" """
......
...@@ -101,28 +101,30 @@ class Converter: ...@@ -101,28 +101,30 @@ class Converter:
Examples: Examples:
.. code-block:: python .. code-block:: python
import numpy as np >>> # doctest: +REQUIRES(env:DISTRIBUTED)
complete_tensors = np.arange(4).reshape([2, 2]) >>> import numpy as np
partitial_tensors = np.split(complete_tensors, 2, axis=0) >>> from paddle.distributed.auto_parallel.static.converter import Converter
name = "tmp_0" >>> complete_tensors = np.arange(4).reshape([2, 2])
tensors_dict = {name: partitial_tensors} >>> partitial_tensors = np.split(complete_tensors, 2, axis=0)
strategy_1 = { >>> name = "tmp_0"
name: { >>> tensors_dict = {name: partitial_tensors}
"process_shape": [2], >>> strategy_1 = {
"process_group": [0, 1], ... name: {
"dims_mapping": [0, -1] ... "process_shape": [2],
} ... "process_group": [0, 1],
} ... "dims_mapping": [0, -1]
strategy_2 = { ... }
name: { ... }
"process_shape": [2], >>> strategy_2 = {
"process_group": [0, 1], ... name: {
"dims_mapping": [-1, -1] ... "process_shape": [2],
} ... "process_group": [0, 1],
} ... "dims_mapping": [-1, -1]
converter = Converter(tensors_dict, strategy_1, strategy_2) ... }
result = converter.convert() ... }
# the result's value is equal to `complete_tensors` >>> converter = Converter(tensors_dict, strategy_1, strategy_2)
>>> result = converter.convert()
>>> # the result's value is equal to `complete_tensors`
""" """
tensors_dict = {} tensors_dict = {}
# the name which is in cur_process but not in pre_process # the name which is in cur_process but not in pre_process
...@@ -352,13 +354,18 @@ class Converter: ...@@ -352,13 +354,18 @@ class Converter:
Examples: Examples:
.. code-block:: python .. code-block:: python
import numpy as np >>> # doctest: +REQUIRES(env:DISTRIBUTED)
partition_tensor_list = [(np.array([[[1.11, 1.12]]]), [[0,1],[0,1],[0,2]])] >>> import numpy as np
tensor = np.array([[[1.13, 1.14]]]) >>> import paddle
partition_index = [[0,1],[0,1],[2,4]] >>> from paddle.distributed.auto_parallel.static.converter import Converter
>>> partition_tensor_list = [(np.array([[[1.11, 1.12]]]), [[0,1],[0,1],[0,2]])]
_merge_tensor(partition_tensor_list, tensor, partition_index) >>> tensor = np.array([[[1.13, 1.14]]])
# partition_tensor_list: [(np.array([[[1.11, 1.12, 1.13, 1.14]]]), [[0,1],[0,1],[0,4]])] >>> partition_index = [[0,1],[0,1],[2,4]]
>>> complete_shape = [3, 2]
>>> Converter.merge(partition_tensor_list, tensor, partition_index, complete_shape)
>>> print(partition_tensor_list)
[(array([[[1.11, 1.12, 1.13, 1.14]]]), [[0, 1], [0, 1], [0, 4]])]
""" """
from .reshard import Resharder from .reshard import Resharder
...@@ -416,16 +423,19 @@ class Converter: ...@@ -416,16 +423,19 @@ class Converter:
Examples: Examples:
.. code-block:: python .. code-block:: python
import numpy as np >>> # doctest: +REQUIRES(env:DISTRIBUTED)
complete_tensor = np.array([[[1.11, 1.12, 1.13, 1.14, 1.15, 1.16]]]) >>> import numpy as np
rank = 2 >>> from paddle.distributed.auto_parallel.static.converter import Converter
complete_shape = [1, 1, 6] >>> complete_tensor = np.array([[[1.11, 1.12, 1.13, 1.14, 1.15, 1.16]]])
dims_mapping = [-1, -1, 0] >>> rank = 2
process_shape = [3] >>> complete_shape = [1, 1, 6]
process_group = [0, 1, 2] >>> dims_mapping = [-1, -1, 0]
>>> process_shape = [3]
sliced_tensor_list = split(complete_tensor, [[], [], [2, 4]], 3) >>> process_group = [0, 1, 2]
# [array([[[1.11, 1.12]]]), array([[[1.13, 1.14]]]), array([[[1.15, 1.16]]])]
>>> sliced_tensor_list = Converter.split(complete_tensor, [[], [], [2, 4]], 3)
>>> print(sliced_tensor_list)
[array([[[1.11, 1.12]]]), array([[[1.13, 1.14]]]), array([[[1.15, 1.16]]])]
""" """
sliced_tensor_list = [] sliced_tensor_list = []
axis = len(complete_tensor.shape) - length axis = len(complete_tensor.shape) - length
...@@ -453,15 +463,18 @@ class Converter: ...@@ -453,15 +463,18 @@ class Converter:
Examples: Examples:
.. code-block:: python .. code-block:: python
import numpy as np >>> # doctest: +REQUIRES(env:DISTRIBUTED)
complete_tensor = np.array([[[1.11, 1.12, 1.13, 1.14, 1.15, 1.16]]]) >>> import numpy as np
complete_shape = [1, 1, 6] >>> from paddle.distributed.auto_parallel.static.utils import _get_split_indices
dims_mapping = [-1, -1, 0] >>> complete_tensor = np.array([[[1.11, 1.12, 1.13, 1.14, 1.15, 1.16]]])
process_shape = [3] >>> complete_shape = [1, 1, 6]
process_group = [0, 1, 2] >>> dims_mapping = [-1, -1, 0]
>>> process_shape = [3]
index = _get_split_indices(complete_shape, dims_mapping, process_shape, process_group) >>> process_group = [0, 1, 2]
# index: [[], [], [2, 4]]
>>> index = _get_split_indices(complete_shape, dims_mapping, process_shape, process_group)
>>> print(index)
[[], [], [2, 4]]
""" """
from .reshard import Resharder from .reshard import Resharder
...@@ -502,21 +515,20 @@ class Converter: ...@@ -502,21 +515,20 @@ class Converter:
Examples: Examples:
.. code-block:: python .. code-block:: python
import numpy as np >>> # doctest: +REQUIRES(env:DISTRIBUTED)
complete_tensor = np.array([[[1.11, 1.12, 1.13, 1.14, 1.15, 1.16]]]) >>> import numpy as np
rank = 2 >>> from paddle.distributed.auto_parallel.static.converter import Converter
complete_shape = [1, 1, 6] >>> complete_tensor = np.array([[[1.11, 1.12, 1.13, 1.14, 1.15, 1.16]]])
dims_mapping = [-1, -1, 0] >>> rank = 2
process_shape = [3] >>> complete_shape = [1, 1, 6]
process_group = [0, 1, 2] >>> dims_mapping = [-1, -1, 0]
>>> process_shape = [3]
slice_tensor = _slice_tensor(complete_tensor, [[], [], [2, 4]], 3) >>> process_group = [0, 1, 2]
# slice_tensor:
# [array([[[1.11, 1.12]]]), array([[[1.13, 1.14]]]), array([[[1.15, 1.16]]])] >>> index = Converter._get_sliced_index(rank, complete_shape, dims_mapping,
... process_shape, process_group)
index = _get_sliced_index(rank, complete_shape, dims_mapping >>> print(index)
process_shape, process_group) 2
# index: 2
""" """
from .reshard import Resharder from .reshard import Resharder
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册