未验证 提交 c958ba74 编写于 作者: 小飞猪 提交者: GitHub

[xdoctest][task 248-249,266-267,269] reformat example code with google style...

[xdoctest][task 248-249,266-267,269] reformat example code with google style in `incubate/distributed/fleet/*`,`incubate/nn/layer/*` (#56772)

* [Doctest]fix No.248-249,266-267,269, test=docs_preview

* fix style

* fix

* add env:DISTRIBUTED
上级 e9364a38
...@@ -55,11 +55,13 @@ class HashName(PSDispatcher): ...@@ -55,11 +55,13 @@ class HashName(PSDispatcher):
Examples: Examples:
.. code-block:: python .. code-block:: python
pserver_endpoints = ["127.0.0.1:6007", "127.0.0.1:6008"] >>> from paddle.incubate.distributed.fleet.parameter_server.ir.ps_dispatcher import RoundRobin
vars = ["var1","var2","var3","var4","var5"]
rr = RoundRobin(pserver_endpoints) >>> pserver_endpoints = ["127.0.0.1:6007", "127.0.0.1:6008"]
rr.dispatch(vars) >>> vars = ["var1","var2","var3","var4","var5"]
>>> rr = HashName(pserver_endpoints)
>>> rr.dispatch(vars)
""" """
...@@ -95,11 +97,13 @@ class RoundRobin(PSDispatcher): ...@@ -95,11 +97,13 @@ class RoundRobin(PSDispatcher):
Examples: Examples:
.. code-block:: python .. code-block:: python
pserver_endpoints = ["127.0.0.1:6007", "127.0.0.1:6008"] >>> from paddle.incubate.distributed.fleet.parameter_server.ir.ps_dispatcher import RoundRobin
vars = ["var1","var2","var3","var4","var5"]
>>> pserver_endpoints = ["127.0.0.1:6007", "127.0.0.1:6008"]
>>> vars = ["var1","var2","var3","var4","var5"]
rr = RoundRobin(pserver_endpoints) >>> rr = RoundRobin(pserver_endpoints)
rr.dispatch(vars) >>> rr.dispatch(vars)
""" """
......
...@@ -46,15 +46,17 @@ class FusedEcMoe(Layer): ...@@ -46,15 +46,17 @@ class FusedEcMoe(Layer):
Examples: Examples:
.. code-block:: python .. code-block:: python
# required: gpu >>> # doctest: +REQUIRES(env:GPU)
import paddle >>> import paddle
from paddle.incubate.nn.layer.fused_ec_moe import FusedEcMoe >>> paddle.device.set_device('gpu')
>>> from paddle.incubate.nn.layer.fused_ec_moe import FusedEcMoe
x = paddle.randn([10, 128, 1024]) # [bsz, seq_len, d_model] >>> x = paddle.randn([10, 128, 1024]) # [bsz, seq_len, d_model]
gate = paddle.randn([10, 128, 8]) # [bsz, seq_len, num_experts] >>> gate = paddle.randn([10, 128, 8]) # [bsz, seq_len, num_experts]
moe = FusedEcMoe(1024, 4096, 8, act_type="gelu") >>> moe = FusedEcMoe(1024, 4096, 8, act_type="gelu")
y = moe(x, gate) >>> y = moe(x, gate)
print(y.shape) # [10, 128, 1024] >>> print(y.shape)
[10, 128, 1024]
""" """
def __init__( def __init__(
......
...@@ -56,14 +56,16 @@ class FusedLinear(Layer): ...@@ -56,14 +56,16 @@ class FusedLinear(Layer):
Examples: Examples:
.. code-block:: python .. code-block:: python
# required: gpu >>> # doctest: +REQUIRES(env:GPU)
import paddle >>> import paddle
from paddle.incubate.nn import FusedLinear >>> paddle.device.set_device('gpu')
>>> from paddle.incubate.nn import FusedLinear
x = paddle.randn([3, 4]) >>> x = paddle.randn([3, 4])
linear = FusedLinear(4, 5) >>> linear = FusedLinear(4, 5)
y = linear(x) >>> y = linear(x)
print(y.shape) # [3, 5] >>> print(y.shape)
[3, 5]
""" """
def __init__( def __init__(
......
...@@ -55,21 +55,25 @@ class ListenAndServ: ...@@ -55,21 +55,25 @@ class ListenAndServ:
Examples: Examples:
.. code-block:: python .. code-block:: python
import paddle.fluid as fluid >>> # doctest: +REQUIRES(env:DISTRIBUTED)
import paddle >>> from paddle.incubate.nn.layer.io import ListenAndServ
with fluid.program_guard(main): >>> import paddle
serv = layers.ListenAndServ( >>> paddle.enable_static()
"127.0.0.1:6170", ["X"], optimizer_mode=False) >>> place = paddle.CPUPlace()
with serv.do(): >>> main = paddle.static.Program()
x = paddle.static.data( >>> with paddle.static.program_guard(main):
shape=[32, 32], ... serv = ListenAndServ(
dtype='float32', ... "127.0.0.1:6170", ["X"], optimizer_mode=False)
name="X") ... with serv.do():
paddle.nn.initializer.Constant(value=1.0)(x, main.global_block()) ... x = paddle.static.data(
paddle.scale(x=x, scale=10.0, out=out_var) ... shape=[32, 32],
... dtype='float32',
exe = fluid.Executor(place) ... name="X")
exe.run(main) ... paddle.nn.initializer.Constant(value=1.0)(x, main.global_block())
... paddle.scale(x=x, scale=10.0)
>>> exe = paddle.static.Executor(place)
>>> exe.run(main)
""" """
def __init__(self, endpoint, inputs, fan_in=1, optimizer_mode=True): def __init__(self, endpoint, inputs, fan_in=1, optimizer_mode=True):
...@@ -115,7 +119,9 @@ class ListenAndServ: ...@@ -115,7 +119,9 @@ class ListenAndServ:
return parent_block return parent_block
def complete_op(self): def complete_op(self):
from paddle.incubate.fleet.parameter_server.mode import DistributedMode from paddle.incubate.distributed.fleet.parameter_server.mode import (
DistributedMode,
)
main_program = self.helper.main_program main_program = self.helper.main_program
current_block = main_program.current_block() current_block = main_program.current_block()
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册