未验证 提交 14b1374f 编写于 作者: C cyberslack_lee 提交者: GitHub

[xdoctest] reformat example code with google style in No.65-68 (#55953)

* test=docs_preview

* test=docs_preview

* test=docs_preview

* test=docs_preview

* fix indent in math.py

* test=docs_preview

* test=docs_preview

* test=docs_preview

* test=docs_preview

---------
Co-authored-by: NSigureMo <sigure.qaq@gmail.com>
上级 f16e1869
...@@ -45,63 +45,68 @@ def flops(net, input_size, custom_ops=None, print_detail=False): ...@@ -45,63 +45,68 @@ def flops(net, input_size, custom_ops=None, print_detail=False):
Examples: Examples:
.. code-block:: python .. code-block:: python
import paddle >>> import paddle
import paddle.nn as nn >>> import paddle.nn as nn
class LeNet(nn.Layer): >>> class LeNet(nn.Layer):
def __init__(self, num_classes=10): ... def __init__(self, num_classes=10):
super().__init__() ... super().__init__()
self.num_classes = num_classes ... self.num_classes = num_classes
self.features = nn.Sequential( ... self.features = nn.Sequential(
nn.Conv2D( ... nn.Conv2D(1, 6, 3, stride=1, padding=1),
1, 6, 3, stride=1, padding=1), ... nn.ReLU(),
nn.ReLU(), ... nn.MaxPool2D(2, 2),
nn.MaxPool2D(2, 2), ... nn.Conv2D(6, 16, 5, stride=1, padding=0),
nn.Conv2D( ... nn.ReLU(),
6, 16, 5, stride=1, padding=0), ... nn.MaxPool2D(2, 2))
nn.ReLU(), ...
nn.MaxPool2D(2, 2)) ... if num_classes > 0:
... self.fc = nn.Sequential(
if num_classes > 0: ... nn.Linear(400, 120),
self.fc = nn.Sequential( ... nn.Linear(120, 84),
nn.Linear(400, 120), ... nn.Linear(84, 10))
nn.Linear(120, 84), ...
nn.Linear( ... def forward(self, inputs):
84, 10)) ... x = self.features(inputs)
...
def forward(self, inputs): ... if self.num_classes > 0:
x = self.features(inputs) ... x = paddle.flatten(x, 1)
... x = self.fc(x)
if self.num_classes > 0: ... return x
x = paddle.flatten(x, 1) ...
x = self.fc(x) >>> lenet = LeNet()
return x >>> # m is the instance of nn.Layer, x is the intput of layer, y is the output of layer.
>>> def count_leaky_relu(m, x, y):
lenet = LeNet() ... x = x[0]
# m is the instance of nn.Layer, x is the intput of layer, y is the output of layer. ... nelements = x.numel()
def count_leaky_relu(m, x, y): ... m.total_ops += int(nelements)
x = x[0] ...
nelements = x.numel() >>> FLOPs = paddle.flops(lenet,
m.total_ops += int(nelements) ... [1, 1, 28, 28],
... custom_ops= {nn.LeakyReLU: count_leaky_relu},
FLOPs = paddle.flops(lenet, [1, 1, 28, 28], custom_ops= {nn.LeakyReLU: count_leaky_relu}, ... print_detail=True)
print_detail=True) >>> # doctest: +SKIP
print(FLOPs) >>> print(FLOPs)
<class 'paddle.nn.layer.conv.Conv2D'>'s flops has been counted
#+--------------+-----------------+-----------------+--------+--------+ <class 'paddle.nn.layer.activation.ReLU'>'s flops has been counted
#| Layer Name | Input Shape | Output Shape | Params | Flops | Cannot find suitable count function for <class 'paddle.nn.layer.pooling.MaxPool2D'>. Treat it as zero FLOPs.
#+--------------+-----------------+-----------------+--------+--------+ <class 'paddle.nn.layer.common.Linear'>'s flops has been counted
#| conv2d_2 | [1, 1, 28, 28] | [1, 6, 28, 28] | 60 | 47040 | +--------------+-----------------+-----------------+--------+--------+
#| re_lu_2 | [1, 6, 28, 28] | [1, 6, 28, 28] | 0 | 0 | | Layer Name | Input Shape | Output Shape | Params | Flops |
#| max_pool2d_2 | [1, 6, 28, 28] | [1, 6, 14, 14] | 0 | 0 | +--------------+-----------------+-----------------+--------+--------+
#| conv2d_3 | [1, 6, 14, 14] | [1, 16, 10, 10] | 2416 | 241600 | | conv2d_0 | [1, 1, 28, 28] | [1, 6, 28, 28] | 60 | 47040 |
#| re_lu_3 | [1, 16, 10, 10] | [1, 16, 10, 10] | 0 | 0 | | re_lu_0 | [1, 6, 28, 28] | [1, 6, 28, 28] | 0 | 0 |
#| max_pool2d_3 | [1, 16, 10, 10] | [1, 16, 5, 5] | 0 | 0 | | max_pool2d_0 | [1, 6, 28, 28] | [1, 6, 14, 14] | 0 | 0 |
#| linear_0 | [1, 400] | [1, 120] | 48120 | 48000 | | conv2d_1 | [1, 6, 14, 14] | [1, 16, 10, 10] | 2416 | 241600 |
#| linear_1 | [1, 120] | [1, 84] | 10164 | 10080 | | re_lu_1 | [1, 16, 10, 10] | [1, 16, 10, 10] | 0 | 0 |
#| linear_2 | [1, 84] | [1, 10] | 850 | 840 | | max_pool2d_1 | [1, 16, 10, 10] | [1, 16, 5, 5] | 0 | 0 |
#+--------------+-----------------+-----------------+--------+--------+ | linear_0 | [1, 400] | [1, 120] | 48120 | 48000 |
#Total Flops: 347560 Total Params: 61610 | linear_1 | [1, 120] | [1, 84] | 10164 | 10080 |
| linear_2 | [1, 84] | [1, 10] | 850 | 840 |
+--------------+-----------------+-----------------+--------+--------+
Total Flops: 347560 Total Params: 61610
347560
>>> # doctest: -SKIP
""" """
if isinstance(net, nn.Layer): if isinstance(net, nn.Layer):
# If net is a dy2stat model, net.forward is StaticFunction instance, # If net is a dy2stat model, net.forward is StaticFunction instance,
......
...@@ -130,7 +130,7 @@ def _get_cache_or_reload(repo, force_reload, verbose=True, source='github'): ...@@ -130,7 +130,7 @@ def _get_cache_or_reload(repo, force_reload, verbose=True, source='github'):
_remove_if_exists(cached_file) _remove_if_exists(cached_file)
_remove_if_exists(repo_dir) _remove_if_exists(repo_dir)
# rename the repo # Rename the repo
shutil.move(extracted_repo, repo_dir) shutil.move(extracted_repo, repo_dir)
return repo_dir return repo_dir
...@@ -177,24 +177,24 @@ def list(repo_dir, source='github', force_reload=False): ...@@ -177,24 +177,24 @@ def list(repo_dir, source='github', force_reload=False):
List all entrypoints available in `github` hubconf. List all entrypoints available in `github` hubconf.
Args: Args:
repo_dir(str): github or local path. repo_dir(str): Github or local path.
github path (str): a str with format "repo_owner/repo_name[:tag_name]" with an optional - github path (str): A string with format "repo_owner/repo_name[:tag_name]" with an optional
tag/branch. The default branch is `main` if not specified. tag/branch. The default branch is `main` if not specified.
- local path (str): Local repo path.
local path (str): local repo path source (str): `github` | `gitee` | `local`. Default is `github`.
force_reload (bool, optional): Whether to discard the existing cache and force a fresh download. Default is `False`.
source (str): `github` | `gitee` | `local`, default is `github`.
force_reload (bool, optional): whether to discard the existing cache and force a fresh download, default is `False`.
Returns: Returns:
entrypoints: a list of available entrypoint names entrypoints: A list of available entrypoint names.
Example: Examples:
.. code-block:: python .. code-block:: python
import paddle >>> import paddle
paddle.hub.list('lyuwenyu/paddlehub_demo:main', source='github', force_reload=False) >>> paddle.hub.list('lyuwenyu/paddlehub_demo:main', source='github', force_reload=False)
""" """
if source not in ('github', 'gitee', 'local'): if source not in ('github', 'gitee', 'local'):
...@@ -225,25 +225,25 @@ def help(repo_dir, model, source='github', force_reload=False): ...@@ -225,25 +225,25 @@ def help(repo_dir, model, source='github', force_reload=False):
Show help information of model Show help information of model
Args: Args:
repo_dir(str): github or local path. repo_dir(str): Github or local path.
github path (str): a str with format "repo_owner/repo_name[:tag_name]" with an optional - github path (str): A string with format "repo_owner/repo_name[:tag_name]" with an optional
tag/branch. The default branch is `main` if not specified. tag/branch. The default branch is `main` if not specified.
local path (str): Local repo path.
local path (str): local repo path. model (str): Model name.
source (str): `github` | `gitee` | `local`. Default is `github`.
force_reload (bool, optional): Default is `False`.
model (str): model name. Returns:
source (str): `github` | `gitee` | `local`, default is `github`.
force_reload (bool, optional): default is `False`.
Return:
docs docs
Example: Examples:
.. code-block:: python .. code-block:: python
import paddle >>> import paddle
paddle.hub.help('lyuwenyu/paddlehub_demo:main', model='MM', source='github') >>> paddle.hub.help('lyuwenyu/paddlehub_demo:main', model='MM', source='github')
""" """
if source not in ('github', 'gitee', 'local'): if source not in ('github', 'gitee', 'local'):
...@@ -270,24 +270,25 @@ def load(repo_dir, model, source='github', force_reload=False, **kwargs): ...@@ -270,24 +270,25 @@ def load(repo_dir, model, source='github', force_reload=False, **kwargs):
Load model Load model
Args: Args:
repo_dir(str): github or local path. repo_dir(str): Github or local path.
- github path (str): A string with format "repo_owner/repo_name[:tag_name]" with an optional
tag/branch. The default branch is `main` if not specified.
- local path (str): Local repo path.
github path (str): a str with format "repo_owner/repo_name[:tag_name]" with an optional model (str): Model name.
tag/branch. The default branch is `main` if not specified. source (str): `github` | `gitee` | `local`. Default is `github`.
force_reload (bool, optional): Default is `False`.
**kwargs: Parameters using for model.
local path (str): local repo path. Returns:
paddle model.
model (str): model name. Examples:
source (str): `github` | `gitee` | `local`, default is `github`.
force_reload (bool, optional): default is `False`.
**kwargs: parameters using for model
Return:
paddle model
Example:
.. code-block:: python .. code-block:: python
import paddle >>> import paddle
paddle.hub.load('lyuwenyu/paddlehub_demo:main', model='MM', source='github') >>> paddle.hub.load('lyuwenyu/paddlehub_demo:main', model='MM', source='github')
""" """
if source not in ('github', 'gitee', 'local'): if source not in ('github', 'gitee', 'local'):
......
此差异已折叠。
...@@ -30,108 +30,200 @@ def summary(net, input_size=None, dtypes=None, input=None): ...@@ -30,108 +30,200 @@ def summary(net, input_size=None, dtypes=None, input=None):
"""Prints a string summary of the network. """Prints a string summary of the network.
Args: Args:
net (Layer): the network which must be a subinstance of Layer. net (Layer): The network which must be a subinstance of Layer.
input_size (tuple|InputSpec|list[tuple|InputSpec], optional): size of input tensor. if model only input_size (tuple|InputSpec|list[tuple|InputSpec], optional): Size of input tensor. if model only
have one input, input_size can be tuple or InputSpec. if model have one input, input_size can be tuple or InputSpec. if model
have multiple input, input_size must be a list which contain have multiple input, input_size must be a list which contain
every input's shape. Note that input_size only dim of every input's shape. Note that input_size only dim of
batch_size can be None or -1. Default: None. Note that batch_size can be None or -1. Default: None. Note that
input_size and input cannot be None at the same time. input_size and input cannot be None at the same time.
dtypes (str, optional): if dtypes is None, 'float32' will be used, Default: None. dtypes (str, optional): If dtypes is None, 'float32' will be used, Default: None.
input: the input tensor. if input is given, input_size and dtype will be ignored, Default: None. input (Tensor, optional): If input is given, input_size and dtype will be ignored, Default: None.
Returns: Returns:
Dict: a summary of the network including total params and total trainable params. Dict: A summary of the network including total params and total trainable params.
Examples: Examples:
.. code-block:: python .. code-block:: python
import paddle >>> import paddle
import paddle.nn as nn >>> import paddle.nn as nn
>>> paddle.seed(2023)
class LeNet(nn.Layer): >>> class LeNet(nn.Layer):
def __init__(self, num_classes=10): ... def __init__(self, num_classes=10):
super().__init__() ... super().__init__()
self.num_classes = num_classes ... self.num_classes = num_classes
self.features = nn.Sequential( ... self.features = nn.Sequential(
nn.Conv2D( ... nn.Conv2D(1, 6, 3, stride=1, padding=1),
1, 6, 3, stride=1, padding=1), ... nn.ReLU(),
nn.ReLU(), ... nn.MaxPool2D(2, 2),
nn.MaxPool2D(2, 2), ... nn.Conv2D(6, 16, 5, stride=1, padding=0),
nn.Conv2D( ... nn.ReLU(),
6, 16, 5, stride=1, padding=0), ... nn.MaxPool2D(2, 2))
nn.ReLU(), ...
nn.MaxPool2D(2, 2)) ... if num_classes > 0:
... self.fc = nn.Sequential(
if num_classes > 0: ... nn.Linear(400, 120),
self.fc = nn.Sequential( ... nn.Linear(120, 84),
nn.Linear(400, 120), ... nn.Linear(84, 10))
nn.Linear(120, 84), ...
nn.Linear( ... def forward(self, inputs):
84, 10)) ... x = self.features(inputs)
...
def forward(self, inputs): ... if self.num_classes > 0:
x = self.features(inputs) ... x = paddle.flatten(x, 1)
... x = self.fc(x)
if self.num_classes > 0: ... return x
x = paddle.flatten(x, 1) ...
x = self.fc(x) >>> lenet = LeNet()
return x
>>> params_info = paddle.summary(lenet, (1, 1, 28, 28))
lenet = LeNet() >>> # doctest: +SKIP
>>> print(params_info)
params_info = paddle.summary(lenet, (1, 1, 28, 28)) ---------------------------------------------------------------------------
print(params_info) Layer (type) Input Shape Output Shape Param #
===========================================================================
# multi input demo Conv2D-1 [[1, 1, 28, 28]] [1, 6, 28, 28] 60
class LeNetMultiInput(LeNet): ReLU-1 [[1, 6, 28, 28]] [1, 6, 28, 28] 0
MaxPool2D-1 [[1, 6, 28, 28]] [1, 6, 14, 14] 0
def forward(self, inputs, y): Conv2D-2 [[1, 6, 14, 14]] [1, 16, 10, 10] 2,416
x = self.features(inputs) ReLU-2 [[1, 16, 10, 10]] [1, 16, 10, 10] 0
MaxPool2D-2 [[1, 16, 10, 10]] [1, 16, 5, 5] 0
if self.num_classes > 0: Linear-1 [[1, 400]] [1, 120] 48,120
x = paddle.flatten(x, 1) Linear-2 [[1, 120]] [1, 84] 10,164
x = self.fc(x + y) Linear-3 [[1, 84]] [1, 10] 850
return x ===========================================================================
Total params: 61,610
lenet_multi_input = LeNetMultiInput() Trainable params: 61,610
Non-trainable params: 0
params_info = paddle.summary(lenet_multi_input, [(1, 1, 28, 28), (1, 400)], ---------------------------------------------------------------------------
dtypes=['float32', 'float32']) Input size (MB): 0.00
print(params_info) Forward/backward pass size (MB): 0.11
Params size (MB): 0.24
# list input demo Estimated Total Size (MB): 0.35
class LeNetListInput(LeNet): ---------------------------------------------------------------------------
{'total_params': 61610, 'trainable_params': 61610}
def forward(self, inputs): >>> # doctest: -SKIP
x = self.features(inputs[0]) >>> # multi input demo
>>> class LeNetMultiInput(LeNet):
if self.num_classes > 0: ... def forward(self, inputs, y):
x = paddle.flatten(x, 1) ... x = self.features(inputs)
x = self.fc(x + inputs[1]) ...
return x ... if self.num_classes > 0:
... x = paddle.flatten(x, 1)
lenet_list_input = LeNetListInput() ... x = self.fc(x + y)
input_data = [paddle.rand([1, 1, 28, 28]), paddle.rand([1, 400])] ... return x
params_info = paddle.summary(lenet_list_input, input=input_data) ...
print(params_info) >>> lenet_multi_input = LeNetMultiInput()
# dict input demo >>> params_info = paddle.summary(lenet_multi_input,
class LeNetDictInput(LeNet): ... [(1, 1, 28, 28), (1, 400)],
... dtypes=['float32', 'float32'])
def forward(self, inputs): >>> # doctest: +SKIP
x = self.features(inputs['x1']) >>> print(params_info)
---------------------------------------------------------------------------
if self.num_classes > 0: Layer (type) Input Shape Output Shape Param #
x = paddle.flatten(x, 1) ===========================================================================
x = self.fc(x + inputs['x2']) Conv2D-3 [[1, 1, 28, 28]] [1, 6, 28, 28] 60
return x ReLU-3 [[1, 6, 28, 28]] [1, 6, 28, 28] 0
MaxPool2D-3 [[1, 6, 28, 28]] [1, 6, 14, 14] 0
lenet_dict_input = LeNetDictInput() Conv2D-4 [[1, 6, 14, 14]] [1, 16, 10, 10] 2,416
input_data = {'x1': paddle.rand([1, 1, 28, 28]), ReLU-4 [[1, 16, 10, 10]] [1, 16, 10, 10] 0
'x2': paddle.rand([1, 400])} MaxPool2D-4 [[1, 16, 10, 10]] [1, 16, 5, 5] 0
params_info = paddle.summary(lenet_dict_input, input=input_data) Linear-4 [[1, 400]] [1, 120] 48,120
print(params_info) Linear-5 [[1, 120]] [1, 84] 10,164
Linear-6 [[1, 84]] [1, 10] 850
===========================================================================
Total params: 61,610
Trainable params: 61,610
Non-trainable params: 0
---------------------------------------------------------------------------
Input size (MB): 0.00
Forward/backward pass size (MB): 0.11
Params size (MB): 0.24
Estimated Total Size (MB): 0.35
---------------------------------------------------------------------------
{'total_params': 61610, 'trainable_params': 61610}
>>> # doctest: -SKIP
>>> # list input demo
>>> class LeNetListInput(LeNet):
... def forward(self, inputs):
... x = self.features(inputs[0])
...
... if self.num_classes > 0:
... x = paddle.flatten(x, 1)
... x = self.fc(x + inputs[1])
... return x
...
>>> lenet_list_input = LeNetListInput()
>>> input_data = [paddle.rand([1, 1, 28, 28]), paddle.rand([1, 400])]
>>> params_info = paddle.summary(lenet_list_input, input=input_data)
>>> # doctest: +SKIP
>>> print(params_info)
---------------------------------------------------------------------------
Layer (type) Input Shape Output Shape Param #
===========================================================================
Conv2D-5 [[1, 1, 28, 28]] [1, 6, 28, 28] 60
ReLU-5 [[1, 6, 28, 28]] [1, 6, 28, 28] 0
MaxPool2D-5 [[1, 6, 28, 28]] [1, 6, 14, 14] 0
Conv2D-6 [[1, 6, 14, 14]] [1, 16, 10, 10] 2,416
ReLU-6 [[1, 16, 10, 10]] [1, 16, 10, 10] 0
MaxPool2D-6 [[1, 16, 10, 10]] [1, 16, 5, 5] 0
Linear-7 [[1, 400]] [1, 120] 48,120
Linear-8 [[1, 120]] [1, 84] 10,164
Linear-9 [[1, 84]] [1, 10] 850
===========================================================================
Total params: 61,610
Trainable params: 61,610
Non-trainable params: 0
---------------------------------------------------------------------------
Input size (MB): 0.00
Forward/backward pass size (MB): 0.11
Params size (MB): 0.24
Estimated Total Size (MB): 0.35
---------------------------------------------------------------------------
{'total_params': 61610, 'trainable_params': 61610}
>>> # doctest: -SKIP
>>> # dict input demo
>>> class LeNetDictInput(LeNet):
... def forward(self, inputs):
... x = self.features(inputs['x1'])
...
... if self.num_classes > 0:
... x = paddle.flatten(x, 1)
... x = self.fc(x + inputs['x2'])
... return x
...
>>> lenet_dict_input = LeNetDictInput()
>>> input_data = {'x1': paddle.rand([1, 1, 28, 28]),
... 'x2': paddle.rand([1, 400])}
>>> params_info = paddle.summary(lenet_dict_input, input=input_data)
>>> # doctest: +SKIP
>>> print(params_info)
---------------------------------------------------------------------------
Layer (type) Input Shape Output Shape Param #
===========================================================================
Conv2D-7 [[1, 1, 28, 28]] [1, 6, 28, 28] 60
ReLU-7 [[1, 6, 28, 28]] [1, 6, 28, 28] 0
MaxPool2D-7 [[1, 6, 28, 28]] [1, 6, 14, 14] 0
Conv2D-8 [[1, 6, 14, 14]] [1, 16, 10, 10] 2,416
ReLU-8 [[1, 16, 10, 10]] [1, 16, 10, 10] 0
MaxPool2D-8 [[1, 16, 10, 10]] [1, 16, 5, 5] 0
Linear-10 [[1, 400]] [1, 120] 48,120
Linear-11 [[1, 120]] [1, 84] 10,164
Linear-12 [[1, 84]] [1, 10] 850
===========================================================================
Total params: 61,610
Trainable params: 61,610
Non-trainable params: 0
---------------------------------------------------------------------------
Input size (MB): 0.00
Forward/backward pass size (MB): 0.11
Params size (MB): 0.24
Estimated Total Size (MB): 0.35
---------------------------------------------------------------------------
{'total_params': 61610, 'trainable_params': 61610}
>>> # doctest: -SKIP
""" """
if input_size is None and input is None: if input_size is None and input is None:
......
...@@ -1617,7 +1617,7 @@ def nansum(x, axis=None, dtype=None, keepdim=False, name=None): ...@@ -1617,7 +1617,7 @@ def nansum(x, axis=None, dtype=None, keepdim=False, name=None):
Examples: Examples:
.. code-block:: python .. code-block:: python
>>> import paddle >>> import paddle
>>> # x is a Tensor with following elements: >>> # x is a Tensor with following elements:
>>> # [[nan, 0.3, 0.5, 0.9] >>> # [[nan, 0.3, 0.5, 0.9]
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册