未验证 提交 14b1374f 编写于 作者: C cyberslack_lee 提交者: GitHub

[xdoctest] reformat example code with google style in No.65-68 (#55953)

* test=docs_preview

* test=docs_preview

* test=docs_preview

* test=docs_preview

* fix indent in math.py

* test=docs_preview

* test=docs_preview

* test=docs_preview

* test=docs_preview

---------
Co-authored-by: NSigureMo <sigure.qaq@gmail.com>
上级 f16e1869
...@@ -45,63 +45,68 @@ def flops(net, input_size, custom_ops=None, print_detail=False): ...@@ -45,63 +45,68 @@ def flops(net, input_size, custom_ops=None, print_detail=False):
Examples: Examples:
.. code-block:: python .. code-block:: python
import paddle >>> import paddle
import paddle.nn as nn >>> import paddle.nn as nn
class LeNet(nn.Layer): >>> class LeNet(nn.Layer):
def __init__(self, num_classes=10): ... def __init__(self, num_classes=10):
super().__init__() ... super().__init__()
self.num_classes = num_classes ... self.num_classes = num_classes
self.features = nn.Sequential( ... self.features = nn.Sequential(
nn.Conv2D( ... nn.Conv2D(1, 6, 3, stride=1, padding=1),
1, 6, 3, stride=1, padding=1), ... nn.ReLU(),
nn.ReLU(), ... nn.MaxPool2D(2, 2),
nn.MaxPool2D(2, 2), ... nn.Conv2D(6, 16, 5, stride=1, padding=0),
nn.Conv2D( ... nn.ReLU(),
6, 16, 5, stride=1, padding=0), ... nn.MaxPool2D(2, 2))
nn.ReLU(), ...
nn.MaxPool2D(2, 2)) ... if num_classes > 0:
... self.fc = nn.Sequential(
if num_classes > 0: ... nn.Linear(400, 120),
self.fc = nn.Sequential( ... nn.Linear(120, 84),
nn.Linear(400, 120), ... nn.Linear(84, 10))
nn.Linear(120, 84), ...
nn.Linear( ... def forward(self, inputs):
84, 10)) ... x = self.features(inputs)
...
def forward(self, inputs): ... if self.num_classes > 0:
x = self.features(inputs) ... x = paddle.flatten(x, 1)
... x = self.fc(x)
if self.num_classes > 0: ... return x
x = paddle.flatten(x, 1) ...
x = self.fc(x) >>> lenet = LeNet()
return x >>> # m is the instance of nn.Layer, x is the intput of layer, y is the output of layer.
>>> def count_leaky_relu(m, x, y):
lenet = LeNet() ... x = x[0]
# m is the instance of nn.Layer, x is the intput of layer, y is the output of layer. ... nelements = x.numel()
def count_leaky_relu(m, x, y): ... m.total_ops += int(nelements)
x = x[0] ...
nelements = x.numel() >>> FLOPs = paddle.flops(lenet,
m.total_ops += int(nelements) ... [1, 1, 28, 28],
... custom_ops= {nn.LeakyReLU: count_leaky_relu},
FLOPs = paddle.flops(lenet, [1, 1, 28, 28], custom_ops= {nn.LeakyReLU: count_leaky_relu}, ... print_detail=True)
print_detail=True) >>> # doctest: +SKIP
print(FLOPs) >>> print(FLOPs)
<class 'paddle.nn.layer.conv.Conv2D'>'s flops has been counted
#+--------------+-----------------+-----------------+--------+--------+ <class 'paddle.nn.layer.activation.ReLU'>'s flops has been counted
#| Layer Name | Input Shape | Output Shape | Params | Flops | Cannot find suitable count function for <class 'paddle.nn.layer.pooling.MaxPool2D'>. Treat it as zero FLOPs.
#+--------------+-----------------+-----------------+--------+--------+ <class 'paddle.nn.layer.common.Linear'>'s flops has been counted
#| conv2d_2 | [1, 1, 28, 28] | [1, 6, 28, 28] | 60 | 47040 | +--------------+-----------------+-----------------+--------+--------+
#| re_lu_2 | [1, 6, 28, 28] | [1, 6, 28, 28] | 0 | 0 | | Layer Name | Input Shape | Output Shape | Params | Flops |
#| max_pool2d_2 | [1, 6, 28, 28] | [1, 6, 14, 14] | 0 | 0 | +--------------+-----------------+-----------------+--------+--------+
#| conv2d_3 | [1, 6, 14, 14] | [1, 16, 10, 10] | 2416 | 241600 | | conv2d_0 | [1, 1, 28, 28] | [1, 6, 28, 28] | 60 | 47040 |
#| re_lu_3 | [1, 16, 10, 10] | [1, 16, 10, 10] | 0 | 0 | | re_lu_0 | [1, 6, 28, 28] | [1, 6, 28, 28] | 0 | 0 |
#| max_pool2d_3 | [1, 16, 10, 10] | [1, 16, 5, 5] | 0 | 0 | | max_pool2d_0 | [1, 6, 28, 28] | [1, 6, 14, 14] | 0 | 0 |
#| linear_0 | [1, 400] | [1, 120] | 48120 | 48000 | | conv2d_1 | [1, 6, 14, 14] | [1, 16, 10, 10] | 2416 | 241600 |
#| linear_1 | [1, 120] | [1, 84] | 10164 | 10080 | | re_lu_1 | [1, 16, 10, 10] | [1, 16, 10, 10] | 0 | 0 |
#| linear_2 | [1, 84] | [1, 10] | 850 | 840 | | max_pool2d_1 | [1, 16, 10, 10] | [1, 16, 5, 5] | 0 | 0 |
#+--------------+-----------------+-----------------+--------+--------+ | linear_0 | [1, 400] | [1, 120] | 48120 | 48000 |
#Total Flops: 347560 Total Params: 61610 | linear_1 | [1, 120] | [1, 84] | 10164 | 10080 |
| linear_2 | [1, 84] | [1, 10] | 850 | 840 |
+--------------+-----------------+-----------------+--------+--------+
Total Flops: 347560 Total Params: 61610
347560
>>> # doctest: -SKIP
""" """
if isinstance(net, nn.Layer): if isinstance(net, nn.Layer):
# If net is a dy2stat model, net.forward is StaticFunction instance, # If net is a dy2stat model, net.forward is StaticFunction instance,
......
...@@ -130,7 +130,7 @@ def _get_cache_or_reload(repo, force_reload, verbose=True, source='github'): ...@@ -130,7 +130,7 @@ def _get_cache_or_reload(repo, force_reload, verbose=True, source='github'):
_remove_if_exists(cached_file) _remove_if_exists(cached_file)
_remove_if_exists(repo_dir) _remove_if_exists(repo_dir)
# rename the repo # Rename the repo
shutil.move(extracted_repo, repo_dir) shutil.move(extracted_repo, repo_dir)
return repo_dir return repo_dir
...@@ -177,24 +177,24 @@ def list(repo_dir, source='github', force_reload=False): ...@@ -177,24 +177,24 @@ def list(repo_dir, source='github', force_reload=False):
List all entrypoints available in `github` hubconf. List all entrypoints available in `github` hubconf.
Args: Args:
repo_dir(str): github or local path. repo_dir(str): Github or local path.
github path (str): a str with format "repo_owner/repo_name[:tag_name]" with an optional - github path (str): A string with format "repo_owner/repo_name[:tag_name]" with an optional
tag/branch. The default branch is `main` if not specified. tag/branch. The default branch is `main` if not specified.
- local path (str): Local repo path.
local path (str): local repo path source (str): `github` | `gitee` | `local`. Default is `github`.
force_reload (bool, optional): Whether to discard the existing cache and force a fresh download. Default is `False`.
source (str): `github` | `gitee` | `local`, default is `github`.
force_reload (bool, optional): whether to discard the existing cache and force a fresh download, default is `False`.
Returns: Returns:
entrypoints: a list of available entrypoint names entrypoints: A list of available entrypoint names.
Example: Examples:
.. code-block:: python .. code-block:: python
import paddle >>> import paddle
paddle.hub.list('lyuwenyu/paddlehub_demo:main', source='github', force_reload=False) >>> paddle.hub.list('lyuwenyu/paddlehub_demo:main', source='github', force_reload=False)
""" """
if source not in ('github', 'gitee', 'local'): if source not in ('github', 'gitee', 'local'):
...@@ -225,25 +225,25 @@ def help(repo_dir, model, source='github', force_reload=False): ...@@ -225,25 +225,25 @@ def help(repo_dir, model, source='github', force_reload=False):
Show help information of model Show help information of model
Args: Args:
repo_dir(str): github or local path. repo_dir(str): Github or local path.
github path (str): a str with format "repo_owner/repo_name[:tag_name]" with an optional - github path (str): A string with format "repo_owner/repo_name[:tag_name]" with an optional
tag/branch. The default branch is `main` if not specified. tag/branch. The default branch is `main` if not specified.
local path (str): Local repo path.
local path (str): local repo path. model (str): Model name.
source (str): `github` | `gitee` | `local`. Default is `github`.
force_reload (bool, optional): Default is `False`.
model (str): model name. Returns:
source (str): `github` | `gitee` | `local`, default is `github`.
force_reload (bool, optional): default is `False`.
Return:
docs docs
Example: Examples:
.. code-block:: python .. code-block:: python
import paddle >>> import paddle
paddle.hub.help('lyuwenyu/paddlehub_demo:main', model='MM', source='github') >>> paddle.hub.help('lyuwenyu/paddlehub_demo:main', model='MM', source='github')
""" """
if source not in ('github', 'gitee', 'local'): if source not in ('github', 'gitee', 'local'):
...@@ -270,24 +270,25 @@ def load(repo_dir, model, source='github', force_reload=False, **kwargs): ...@@ -270,24 +270,25 @@ def load(repo_dir, model, source='github', force_reload=False, **kwargs):
Load model Load model
Args: Args:
repo_dir(str): github or local path. repo_dir(str): Github or local path.
- github path (str): A string with format "repo_owner/repo_name[:tag_name]" with an optional
tag/branch. The default branch is `main` if not specified.
- local path (str): Local repo path.
github path (str): a str with format "repo_owner/repo_name[:tag_name]" with an optional model (str): Model name.
tag/branch. The default branch is `main` if not specified. source (str): `github` | `gitee` | `local`. Default is `github`.
force_reload (bool, optional): Default is `False`.
**kwargs: Parameters using for model.
local path (str): local repo path. Returns:
paddle model.
model (str): model name. Examples:
source (str): `github` | `gitee` | `local`, default is `github`.
force_reload (bool, optional): default is `False`.
**kwargs: parameters using for model
Return:
paddle model
Example:
.. code-block:: python .. code-block:: python
import paddle >>> import paddle
paddle.hub.load('lyuwenyu/paddlehub_demo:main', model='MM', source='github') >>> paddle.hub.load('lyuwenyu/paddlehub_demo:main', model='MM', source='github')
""" """
if source not in ('github', 'gitee', 'local'): if source not in ('github', 'gitee', 'local'):
......
...@@ -385,7 +385,7 @@ class StaticGraphAdapter: ...@@ -385,7 +385,7 @@ class StaticGraphAdapter:
_save(optim, optim_path) _save(optim, optim_path)
# TODO: support save/load scaler state in static graph # TODO: Support save/load scaler state in static graph
def load(self, param_state_pairs, optim_state): def load(self, param_state_pairs, optim_state):
if self._executor is None: if self._executor is None:
executor = fluid.Executor(fluid.CPUPlace())._default_executor executor = fluid.Executor(fluid.CPUPlace())._default_executor
...@@ -1084,78 +1084,79 @@ class Model: ...@@ -1084,78 +1084,79 @@ class Model:
1. A common example 1. A common example
.. code-block:: python .. code-block:: python
:name: code-example1 :name: code-example1
import paddle >>> import paddle
import paddle.nn as nn >>> import paddle.nn as nn
import paddle.vision.transforms as T >>> import paddle.vision.transforms as T
from paddle.static import InputSpec >>> from paddle.static import InputSpec
device = paddle.set_device('cpu') # or 'gpu' >>> device = paddle.set_device('cpu') # or 'gpu'
net = nn.Sequential( >>> net = nn.Sequential(
nn.Flatten(1), ... nn.Flatten(1),
nn.Linear(784, 200), ... nn.Linear(784, 200),
nn.Tanh(), ... nn.Tanh(),
nn.Linear(200, 10)) ... nn.Linear(200, 10))
...
# inputs and labels are not required for dynamic graph. >>> # inputs and labels are not required for dynamic graph.
input = InputSpec([None, 784], 'float32', 'x') >>> input = InputSpec([None, 784], 'float32', 'x')
label = InputSpec([None, 1], 'int64', 'label') >>> label = InputSpec([None, 1], 'int64', 'label')
model = paddle.Model(net, input, label) >>> model = paddle.Model(net, input, label)
optim = paddle.optimizer.SGD(learning_rate=1e-3, >>> optim = paddle.optimizer.SGD(learning_rate=1e-3,
parameters=model.parameters()) ... parameters=model.parameters())
...
model.prepare(optim, >>> model.prepare(optim,
paddle.nn.CrossEntropyLoss(), ... paddle.nn.CrossEntropyLoss(),
paddle.metric.Accuracy()) ... paddle.metric.Accuracy())
...
transform = T.Compose([ >>> transform = T.Compose([
T.Transpose(), ... T.Transpose(),
T.Normalize([127.5], [127.5]) ... T.Normalize([127.5], [127.5])
]) >>> ])
data = paddle.vision.datasets.MNIST(mode='train', transform=transform) >>> data = paddle.vision.datasets.MNIST(mode='train', transform=transform)
model.fit(data, epochs=2, batch_size=32, verbose=1) >>> model.fit(data, epochs=2, batch_size=32, verbose=1)
2. An example using mixed precision training. 2. An example using mixed precision training.
.. code-block:: python .. code-block:: python
:name: code-example2 :name: code-example2
# required: gpu >>> # doctest: +REQUIRES(env:GPU)
import paddle >>> import paddle
import paddle.nn as nn >>> paddle.device.set_device('gpu')
import paddle.vision.transforms as T >>> import paddle.nn as nn
>>> import paddle.vision.transforms as T
def run_example_code():
device = paddle.set_device('gpu') >>> def run_example_code():
... device = paddle.set_device('gpu')
net = nn.Sequential(nn.Flatten(1), nn.Linear(784, 200), nn.Tanh(), ...
nn.Linear(200, 10)) ... net = nn.Sequential(nn.Flatten(1), nn.Linear(784, 200), nn.Tanh(),
... nn.Linear(200, 10))
model = paddle.Model(net) ...
optim = paddle.optimizer.SGD(learning_rate=1e-3, parameters=model.parameters()) ... model = paddle.Model(net)
... optim = paddle.optimizer.SGD(learning_rate=1e-3, parameters=model.parameters())
amp_configs = { ...
"level": "O1", ... amp_configs = {
"custom_white_list": {'conv2d'}, ... "level": "O1",
"use_dynamic_loss_scaling": True ... "custom_white_list": {'conv2d'},
} ... "use_dynamic_loss_scaling": True
model.prepare(optim, ... }
paddle.nn.CrossEntropyLoss(), ... model.prepare(optim,
paddle.metric.Accuracy(), ... paddle.nn.CrossEntropyLoss(),
amp_configs=amp_configs) ... paddle.metric.Accuracy(),
... amp_configs=amp_configs)
transform = T.Compose([T.Transpose(), T.Normalize([127.5], [127.5])]) ...
data = paddle.vision.datasets.MNIST(mode='train', transform=transform) ... transform = T.Compose([T.Transpose(), T.Normalize([127.5], [127.5])])
model.fit(data, epochs=2, batch_size=32, verbose=1) ... data = paddle.vision.datasets.MNIST(mode='train', transform=transform)
... model.fit(data, epochs=2, batch_size=32, verbose=1)
# mixed precision training is only supported on GPU now. ...
if paddle.is_compiled_with_cuda(): >>> # mixed precision training is only supported on GPU now.
run_example_code() >>> if paddle.is_compiled_with_cuda():
... run_example_code()
...
""" """
def __init__(self, network, inputs=None, labels=None): def __init__(self, network, inputs=None, labels=None):
...@@ -1214,28 +1215,29 @@ class Model: ...@@ -1214,28 +1215,29 @@ class Model:
.. code-block:: python .. code-block:: python
import paddle >>> import paddle
import paddle.nn as nn >>> import paddle.nn as nn
from paddle.static import InputSpec >>> from paddle.static import InputSpec
>>> paddle.seed(2023)
device = paddle.set_device('cpu') # or 'gpu'
>>> device = paddle.set_device('cpu') # or 'gpu'
net = nn.Sequential(
nn.Linear(784, 200), >>> net = nn.Sequential(
nn.Tanh(), ... nn.Linear(784, 200),
nn.Linear(200, 10)) ... nn.Tanh(),
... nn.Linear(200, 10))
input = InputSpec([None, 784], 'float32', 'x') ...
label = InputSpec([None, 1], 'int64', 'label') >>> input = InputSpec([None, 784], 'float32', 'x')
model = paddle.Model(net, input, label) >>> label = InputSpec([None, 1], 'int64', 'label')
optim = paddle.optimizer.SGD(learning_rate=1e-3, >>> model = paddle.Model(net, input, label)
parameters=model.parameters()) >>> optim = paddle.optimizer.SGD(learning_rate=1e-3,
model.prepare(optim, paddle.nn.CrossEntropyLoss()) ... parameters=model.parameters())
data = paddle.rand((4, 784), dtype="float32") >>> model.prepare(optim, paddle.nn.CrossEntropyLoss())
label = paddle.randint(0, 10, (4, 1), dtype="int64") >>> data = paddle.rand((4, 784), dtype="float32")
loss = model.train_batch([data], [label]) >>> label = paddle.randint(0, 10, (4, 1), dtype="int64")
print(loss) >>> loss = model.train_batch([data], [label])
# [array([2.192784], dtype=float32)] >>> print(loss)
[array(3.0039132, dtype=float32)]
""" """
loss = self._adapter.train_batch(inputs, labels, update) loss = self._adapter.train_batch(inputs, labels, update)
...@@ -1267,29 +1269,31 @@ class Model: ...@@ -1267,29 +1269,31 @@ class Model:
.. code-block:: python .. code-block:: python
import paddle >>> import paddle
import paddle.nn as nn >>> import paddle.nn as nn
from paddle.static import InputSpec >>> from paddle.static import InputSpec
>>> paddle.seed(2023)
device = paddle.set_device('cpu') # or 'gpu'
>>> device = paddle.set_device('cpu') # or 'gpu'
net = nn.Sequential(
nn.Linear(784, 200), >>> net = nn.Sequential(
nn.Tanh(), ... nn.Linear(784, 200),
nn.Linear(200, 10)) ... nn.Tanh(),
... nn.Linear(200, 10))
input = InputSpec([None, 784], 'float32', 'x') ...
label = InputSpec([None, 1], 'int64', 'label') >>> input = InputSpec([None, 784], 'float32', 'x')
model = paddle.Model(net, input, label) >>> label = InputSpec([None, 1], 'int64', 'label')
optim = paddle.optimizer.SGD(learning_rate=1e-3, >>> model = paddle.Model(net, input, label)
parameters=model.parameters()) >>> optim = paddle.optimizer.SGD(learning_rate=1e-3,
model.prepare(optim, ... parameters=model.parameters())
paddle.nn.CrossEntropyLoss(), metrics=paddle.metric.Accuracy()) >>> model.prepare(optim,
data = paddle.rand((4, 784), dtype="float32") ... paddle.nn.CrossEntropyLoss(),
label = paddle.randint(0, 10, (4, 1), dtype="int64") ... metrics=paddle.metric.Accuracy())
loss, acc = model.eval_batch([data], [label]) >>> data = paddle.rand((4, 784), dtype="float32")
print(loss, acc) >>> label = paddle.randint(0, 10, (4, 1), dtype="int64")
# [array([2.8825705], dtype=float32)] [0.0] >>> loss, acc = model.eval_batch([data], [label])
>>> print(loss, acc)
[array(3.0039132, dtype=float32)] [0.0]
""" """
loss = self._adapter.eval_batch(inputs, labels) loss = self._adapter.eval_batch(inputs, labels)
...@@ -1316,29 +1320,30 @@ class Model: ...@@ -1316,29 +1320,30 @@ class Model:
.. code-block:: python .. code-block:: python
import paddle >>> import paddle
import paddle.nn as nn >>> import paddle.nn as nn
from paddle.static import InputSpec >>> from paddle.static import InputSpec
>>> paddle.seed(2023)
device = paddle.set_device('cpu') # or 'gpu'
>>> device = paddle.set_device('cpu') # or 'gpu'
input = InputSpec([None, 784], 'float32', 'x')
label = InputSpec([None, 1], 'int64', 'label') >>> input = InputSpec([None, 784], 'float32', 'x')
>>> label = InputSpec([None, 1], 'int64', 'label')
net = nn.Sequential(
nn.Linear(784, 200), >>> net = nn.Sequential(
nn.Tanh(), ... nn.Linear(784, 200),
nn.Linear(200, 10), ... nn.Tanh(),
nn.Softmax()) ... nn.Linear(200, 10),
... nn.Softmax())
model = paddle.Model(net, input, label) ...
model.prepare() >>> model = paddle.Model(net, input, label)
data = paddle.rand((1, 784), dtype="float32") >>> model.prepare()
out = model.predict_batch([data]) >>> data = paddle.rand((1, 784), dtype="float32")
print(out) >>> out = model.predict_batch([data])
# [array([[0.08189095, 0.16740078, 0.06889386, 0.05085445, 0.10729759, >>> print(out)
# 0.02217775, 0.14518553, 0.1591538 , 0.01808308, 0.17906217]], [array([[0.10844935, 0.04650883, 0.11790176, 0.04962315, 0.10899059,
# dtype=float32)] 0.08197589, 0.03125402, 0.03232312, 0.3786293 , 0.04434395]],
dtype=float32)]
""" """
loss = self._adapter.predict_batch(inputs) loss = self._adapter.predict_batch(inputs)
...@@ -1377,45 +1382,33 @@ class Model: ...@@ -1377,45 +1382,33 @@ class Model:
.. code-block:: python .. code-block:: python
import paddle >>> import paddle
import paddle.nn as nn >>> import paddle.nn as nn
import paddle.vision.transforms as T >>> import paddle.vision.transforms as T
from paddle.static import InputSpec >>> from paddle.static import InputSpec
>>> from paddle.vision.datasets import MNIST
class Mnist(nn.Layer):
def __init__(self): >>> dynamic = True # False
super().__init__() >>> # If use static graph, do not set
self.net = nn.Sequential( >>> if not dynamic:
nn.Flatten(1), ... paddle.enable_static()
nn.Linear(784, 200),
nn.Tanh(), >>> transform = T.Compose([T.Transpose(),
nn.Linear(200, 10), ... T.Normalize([127.5], [127.5])])
nn.Softmax()) >>> train_dataset = MNIST(mode='train', transform=transform)
>>> train_loader = paddle.io.DataLoader(train_dataset, batch_size=64)
def forward(self, x): >>> val_dataset = MNIST(mode='test', transform=transform)
return self.net(x) >>> val_loader = paddle.io.DataLoader(val_dataset, batch_size=64)
dynamic = True # False >>> input = InputSpec([None, 1, 28, 28], 'float32', 'image')
# if use static graph, do not set >>> label = InputSpec([None, 1], 'int64', 'label')
if not dynamic:
paddle.enable_static() >>> model = paddle.Model(paddle.vision.models.LeNet(), input, label)
>>> optim = paddle.optimizer.Adam(learning_rate=0.001, parameters=model.parameters())
input = InputSpec([None, 784], 'float32', 'x') >>> model.prepare(optim, paddle.nn.CrossEntropyLoss(), paddle.metric.Accuracy(topk=(1, 2)))
label = InputSpec([None, 1], 'int64', 'label') >>> model.fit(train_loader, val_loader, epochs=2, verbose=0)
model = paddle.Model(Mnist(), input, label) >>> model.save('checkpoint/test') # save for training
optim = paddle.optimizer.SGD(learning_rate=1e-3, >>> model.save('inference_model', False) # save for inference
parameters=model.parameters())
model.prepare(optim, paddle.nn.CrossEntropyLoss())
transform = T.Compose([
T.Transpose(),
T.Normalize([127.5], [127.5])
])
data = paddle.vision.datasets.MNIST(mode='train', transform=transform)
model.fit(data, epochs=1, batch_size=32, verbose=0)
model.save('checkpoint/test') # save for training
model.save('inference_model', False) # save for inference
""" """
...@@ -1459,22 +1452,22 @@ class Model: ...@@ -1459,22 +1452,22 @@ class Model:
.. code-block:: python .. code-block:: python
import paddle >>> import paddle
import paddle.nn as nn >>> import paddle.nn as nn
from paddle.static import InputSpec >>> from paddle.static import InputSpec
device = paddle.set_device('cpu')
input = InputSpec([None, 784], 'float32', 'x') >>> device = paddle.set_device('cpu')
model = paddle.Model(nn.Sequential( >>> input = InputSpec([None, 784], 'float32', 'x')
nn.Linear(784, 200),
nn.Tanh(),
nn.Linear(200, 10),
nn.Softmax()), input)
model.save('checkpoint/test') >>> model = paddle.Model(nn.Sequential(
model.load('checkpoint/test') ... nn.Linear(784, 200),
... nn.Tanh(),
... nn.Linear(200, 10),
... nn.Softmax()), input)
...
>>> model.save('checkpoint/test')
>>> model.load('checkpoint/test')
""" """
...@@ -1553,18 +1546,36 @@ class Model: ...@@ -1553,18 +1546,36 @@ class Model:
.. code-block:: python .. code-block:: python
import paddle >>> import paddle
import paddle.nn as nn >>> import paddle.nn as nn
from paddle.static import InputSpec >>> from paddle.static import InputSpec
>>> paddle.seed(2023)
input = InputSpec([None, 784], 'float32', 'x') >>> input = InputSpec([None, 784], 'float32', 'x')
model = paddle.Model(nn.Sequential( >>> model = paddle.Model(nn.Sequential(
nn.Linear(784, 200), ... nn.Linear(784, 200),
nn.Tanh(), ... nn.Tanh(),
nn.Linear(200, 10)), input) ... nn.Linear(200, 10)), input)
...
params = model.parameters() >>> params = model.parameters()
>>> print(params)
[Parameter containing:
Tensor(shape=[784, 200], dtype=float32, place=Place(cpu), stop_gradient=False,
[[ 0.05713400, 0.00314646, -0.03754271, ..., -0.02529256,
0.04872842, -0.06670858],
...,
[ 0.06268418, 0.06550254, -0.02103353, ..., 0.06395906,
0.05509177, -0.06355451]]), Parameter containing:
Tensor(shape=[200], dtype=float32, place=Place(cpu), stop_gradient=False,
[0., 0., 0., ..., 0., 0.]), Parameter containing:
Tensor(shape=[200, 10], dtype=float32, place=Place(cpu), stop_gradient=False,
[[ 0.12933084, 0.07726504, 0.05336720, ..., 0.10865459,
0.06605886, 0.13684085],
...,
[-0.10171061, -0.01649965, -0.13420501, ..., 0.11190581,
-0.12700224, 0.02916957]]), Parameter containing:
Tensor(shape=[10], dtype=float32, place=Place(cpu), stop_gradient=False,
[0., 0., 0., 0., 0., 0., 0., 0., 0., 0.])]
""" """
return self._adapter.parameters() return self._adapter.parameters()
...@@ -1812,84 +1823,80 @@ class Model: ...@@ -1812,84 +1823,80 @@ class Model:
How to make a batch is done internally. How to make a batch is done internally.
.. code-block:: python .. code-block:: python
:name: code-example3 :name: code-example3
import paddle >>> import paddle
import paddle.vision.transforms as T >>> import paddle.vision.transforms as T
from paddle.vision.datasets import MNIST >>> from paddle.vision.datasets import MNIST
from paddle.static import InputSpec >>> from paddle.static import InputSpec
dynamic = True >>> dynamic = True
if not dynamic: >>> if not dynamic:
paddle.enable_static() ... paddle.enable_static()
...
transform = T.Compose([ >>> transform = T.Compose([T.Transpose(),
T.Transpose(), ... T.Normalize([127.5], [127.5])])
T.Normalize([127.5], [127.5]) >>> train_dataset = MNIST(mode='train', transform=transform)
]) >>> val_dataset = MNIST(mode='test', transform=transform)
train_dataset = MNIST(mode='train', transform=transform)
val_dataset = MNIST(mode='test', transform=transform) >>> input = InputSpec([None, 1, 28, 28], 'float32', 'image')
>>> label = InputSpec([None, 1], 'int64', 'label')
input = InputSpec([None, 1, 28, 28], 'float32', 'image')
label = InputSpec([None, 1], 'int64', 'label') >>> model = paddle.Model(
... paddle.vision.models.LeNet(),
model = paddle.Model( ... input, label)
paddle.vision.models.LeNet(), >>> optim = paddle.optimizer.Adam(
input, label) ... learning_rate=0.001, parameters=model.parameters())
optim = paddle.optimizer.Adam( >>> model.prepare(
learning_rate=0.001, parameters=model.parameters()) ... optim,
model.prepare( ... paddle.nn.CrossEntropyLoss(),
optim, ... paddle.metric.Accuracy(topk=(1, 2)))
paddle.nn.CrossEntropyLoss(), >>> model.fit(train_dataset,
paddle.metric.Accuracy(topk=(1, 2))) ... val_dataset,
model.fit(train_dataset, ... epochs=2,
val_dataset, ... batch_size=64,
epochs=2, ... save_dir='mnist_checkpoint')
batch_size=64, ...
save_dir='mnist_checkpoint')
2. An example use DataLoader, batch size and shuffle is set in 2. An example use DataLoader, batch size and shuffle is set in
DataLoader. DataLoader.
.. code-block:: python .. code-block:: python
:name: code-example4 :name: code-example4
import paddle >>> import paddle
import paddle.vision.transforms as T >>> import paddle.vision.transforms as T
from paddle.vision.datasets import MNIST >>> from paddle.vision.datasets import MNIST
from paddle.static import InputSpec >>> from paddle.static import InputSpec
dynamic = True >>> dynamic = True
if not dynamic: >>> if not dynamic:
paddle.enable_static() ... paddle.enable_static()
...
transform = T.Compose([ >>> transform = T.Compose([T.Transpose(),
T.Transpose(), ... T.Normalize([127.5], [127.5])])
T.Normalize([127.5], [127.5]) >>> train_dataset = MNIST(mode='train', transform=transform)
]) >>> train_loader = paddle.io.DataLoader(train_dataset,
train_dataset = MNIST(mode='train', transform=transform) ... batch_size=64)
train_loader = paddle.io.DataLoader(train_dataset, >>> val_dataset = MNIST(mode='test', transform=transform)
batch_size=64) >>> val_loader = paddle.io.DataLoader(val_dataset,
val_dataset = MNIST(mode='test', transform=transform) ... batch_size=64)
val_loader = paddle.io.DataLoader(val_dataset, ...
batch_size=64) >>> input = InputSpec([None, 1, 28, 28], 'float32', 'image')
>>> label = InputSpec([None, 1], 'int64', 'label')
input = InputSpec([None, 1, 28, 28], 'float32', 'image')
label = InputSpec([None, 1], 'int64', 'label') >>> model = paddle.Model(
... paddle.vision.models.LeNet(), input, label)
model = paddle.Model( >>> optim = paddle.optimizer.Adam(
paddle.vision.models.LeNet(), input, label) ... learning_rate=0.001, parameters=model.parameters())
optim = paddle.optimizer.Adam( >>> model.prepare(
learning_rate=0.001, parameters=model.parameters()) ... optim,
model.prepare( ... paddle.nn.CrossEntropyLoss(),
optim, ... paddle.metric.Accuracy(topk=(1, 2)))
paddle.nn.CrossEntropyLoss(), >>> model.fit(train_loader,
paddle.metric.Accuracy(topk=(1, 2))) ... val_loader,
model.fit(train_loader, ... epochs=2,
val_loader, ... save_dir='mnist_checkpoint')
epochs=2, ...
save_dir='mnist_checkpoint')
""" """
assert train_data is not None, "train_data must be given!" assert train_data is not None, "train_data must be given!"
...@@ -2028,26 +2035,25 @@ class Model: ...@@ -2028,26 +2035,25 @@ class Model:
Examples: Examples:
.. code-block:: python .. code-block:: python
import paddle >>> # doctest: +SKIP('Cause each step's acc and using time are not same when repeat running')
import paddle.vision.transforms as T >>> import paddle
from paddle.static import InputSpec >>> import paddle.vision.transforms as T
>>> from paddle.static import InputSpec
# declarative mode
transform = T.Compose([ >>> # declarative mode
T.Transpose(), >>> transform = T.Compose([T.Transpose(),
T.Normalize([127.5], [127.5]) ... T.Normalize([127.5], [127.5])])
]) >>> val_dataset = paddle.vision.datasets.MNIST(mode='test', transform=transform)
val_dataset = paddle.vision.datasets.MNIST(mode='test', transform=transform)
>>> input = InputSpec([-1, 1, 28, 28], 'float32', 'image')
input = InputSpec([-1, 1, 28, 28], 'float32', 'image') >>> label = InputSpec([None, 1], 'int64', 'label')
label = InputSpec([None, 1], 'int64', 'label') >>> model = paddle.Model(paddle.vision.models.LeNet(), input, label)
model = paddle.Model(paddle.vision.models.LeNet(), input, label) >>> model.prepare(metrics=paddle.metric.Accuracy())
model.prepare(metrics=paddle.metric.Accuracy()) >>> result = model.evaluate(val_dataset, batch_size=64)
result = model.evaluate(val_dataset, batch_size=64) >>> print(result)
print(result) {'acc': 0.0699}
# {'acc': 0.0699}
""" """
if eval_data is not None and isinstance(eval_data, Dataset): if eval_data is not None and isinstance(eval_data, Dataset):
...@@ -2136,46 +2142,44 @@ class Model: ...@@ -2136,46 +2142,44 @@ class Model:
Examples: Examples:
.. code-block:: python .. code-block:: python
import numpy as np >>> import numpy as np
import paddle >>> import paddle
from paddle.static import InputSpec >>> from paddle.static import InputSpec
class MnistDataset(paddle.vision.datasets.MNIST): >>> class MnistDataset(paddle.vision.datasets.MNIST):
def __init__(self, mode, return_label=True): ... def __init__(self, mode, return_label=True):
super().__init__(mode=mode) ... super().__init__(mode=mode)
self.return_label = return_label ... self.return_label = return_label
...
def __getitem__(self, idx): ... def __getitem__(self, idx):
img = np.reshape(self.images[idx], [1, 28, 28]) ... img = np.reshape(self.images[idx], [1, 28, 28])
if self.return_label: ... if self.return_label:
return img, np.array(self.labels[idx]).astype('int64') ... return img, np.array(self.labels[idx]).astype('int64')
return img, ... return img
...
def __len__(self): ... def __len__(self):
return len(self.images) ... return len(self.images)
...
test_dataset = MnistDataset(mode='test', return_label=False) >>> test_dataset = MnistDataset(mode='test', return_label=False)
# imperative mode >>> # imperative mode
input = InputSpec([-1, 1, 28, 28], 'float32', 'image') >>> input = InputSpec([-1, 1, 28, 28], 'float32', 'image')
model = paddle.Model(paddle.vision.models.LeNet(), input) >>> model = paddle.Model(paddle.vision.models.LeNet(), input)
model.prepare() >>> model.prepare()
result = model.predict(test_dataset, batch_size=64) >>> result = model.predict(test_dataset, batch_size=64)
print(len(result[0]), result[0][0].shape) >>> print(len(result[0]), result[0][0].shape)
# 157 (64, 10) 157 (64, 10)
>>> # declarative mode
# declarative mode >>> device = paddle.set_device('cpu')
device = paddle.set_device('cpu') >>> paddle.enable_static()
paddle.enable_static() >>> input = InputSpec([-1, 1, 28, 28], 'float32', 'image')
input = InputSpec([-1, 1, 28, 28], 'float32', 'image') >>> model = paddle.Model(paddle.vision.models.LeNet(), input)
model = paddle.Model(paddle.vision.models.LeNet(), input) >>> model.prepare()
model.prepare() >>> result = model.predict(test_dataset, batch_size=64)
>>> print(len(result[0]), result[0][0].shape)
result = model.predict(test_dataset, batch_size=64) 157 (64, 10)
print(len(result[0]), result[0][0].shape)
# 157 (64, 10)
""" """
if test_data is not None and isinstance(test_data, Dataset): if test_data is not None and isinstance(test_data, Dataset):
...@@ -2244,7 +2248,7 @@ class Model: ...@@ -2244,7 +2248,7 @@ class Model:
paddle.jit.save(layer, path, input_spec=self._inputs) paddle.jit.save(layer, path, input_spec=self._inputs)
else: else:
# path check # Path check
file_prefix = os.path.basename(path) file_prefix = os.path.basename(path)
if file_prefix == "": if file_prefix == "":
raise ValueError( raise ValueError(
...@@ -2288,7 +2292,7 @@ class Model: ...@@ -2288,7 +2292,7 @@ class Model:
): ):
outputs = [] outputs = []
for step, data in enumerate(data_loader): for step, data in enumerate(data_loader):
# data might come from different types of data_loader and have # Data might come from different types of data_loader and have
# different format, as following: # different format, as following:
# 1. DataLoader in static graph: # 1. DataLoader in static graph:
# [[input1, input2, ..., label1, lable2, ...]] # [[input1, input2, ..., label1, lable2, ...]]
...@@ -2372,36 +2376,53 @@ class Model: ...@@ -2372,36 +2376,53 @@ class Model:
"""Prints a string summary of the network. """Prints a string summary of the network.
Args: Args:
input_size (tuple|InputSpec|list[tuple|InputSpec], optional): size of input tensor. input_size (tuple|InputSpec|list[tuple|InputSpec], optional): Size of input tensor.
if not set, input_size will get from ``self._inputs`` if network only have if not set, input_size will get from ``self._inputs`` if network only have
one input, input_size can be tuple or InputSpec. if model have multiple one input, input_size can be tuple or InputSpec. if model have multiple
input, input_size must be a list which contain every input's shape. input, input_size must be a list which contain every input's shape. Default: None.
Default: None. dtype (str, optional): If dtype is None, 'float32' will be used, Default: None.
dtype (str, optional): if dtype is None, 'float32' will be used, Default: None.
Returns: Returns:
Dict: a summary of the network including total params and total trainable params. Dict: A summary of the network including total params and total trainable params.
Examples: Examples:
.. code-block:: python .. code-block:: python
import paddle >>> import paddle
from paddle.static import InputSpec >>> from paddle.static import InputSpec
input = InputSpec([None, 1, 28, 28], 'float32', 'image') >>> input = InputSpec([None, 1, 28, 28], 'float32', 'image')
label = InputSpec([None, 1], 'int64', 'label') >>> label = InputSpec([None, 1], 'int64', 'label')
>>> model = paddle.Model(paddle.vision.models.LeNet(), input, label)
model = paddle.Model(paddle.vision.models.LeNet(), >>> optim = paddle.optimizer.Adam(learning_rate=0.001, parameters=model.parameters())
input, label) >>> model.prepare(optim, paddle.nn.CrossEntropyLoss())
optim = paddle.optimizer.Adam( >>> params_info = model.summary()
learning_rate=0.001, parameters=model.parameters()) >>> # doctest: +SKIP
model.prepare( >>> print(params_info)
optim, ---------------------------------------------------------------------------
paddle.nn.CrossEntropyLoss()) Layer (type) Input Shape Output Shape Param #
===========================================================================
params_info = model.summary() Conv2D-1 [[1, 1, 28, 28]] [1, 6, 28, 28] 60
print(params_info) ReLU-1 [[1, 6, 28, 28]] [1, 6, 28, 28] 0
# {'total_params': 61610, 'trainable_params': 61610} MaxPool2D-1 [[1, 6, 28, 28]] [1, 6, 14, 14] 0
Conv2D-2 [[1, 6, 14, 14]] [1, 16, 10, 10] 2,416
ReLU-2 [[1, 16, 10, 10]] [1, 16, 10, 10] 0
MaxPool2D-2 [[1, 16, 10, 10]] [1, 16, 5, 5] 0
Linear-1 [[1, 400]] [1, 120] 48,120
Linear-2 [[1, 120]] [1, 84] 10,164
Linear-3 [[1, 84]] [1, 10] 850
===========================================================================
Total params: 61,610
Trainable params: 61,610
Non-trainable params: 0
---------------------------------------------------------------------------
Input size (MB): 0.00
Forward/backward pass size (MB): 0.11
Params size (MB): 0.24
Estimated Total Size (MB): 0.35
---------------------------------------------------------------------------
{'total_params': 61610, 'trainable_params': 61610}
>>> # doctest: -SKIP
""" """
assert ( assert (
......
...@@ -30,108 +30,200 @@ def summary(net, input_size=None, dtypes=None, input=None): ...@@ -30,108 +30,200 @@ def summary(net, input_size=None, dtypes=None, input=None):
"""Prints a string summary of the network. """Prints a string summary of the network.
Args: Args:
net (Layer): the network which must be a subinstance of Layer. net (Layer): The network which must be a subinstance of Layer.
input_size (tuple|InputSpec|list[tuple|InputSpec], optional): size of input tensor. if model only input_size (tuple|InputSpec|list[tuple|InputSpec], optional): Size of input tensor. if model only
have one input, input_size can be tuple or InputSpec. if model have one input, input_size can be tuple or InputSpec. if model
have multiple input, input_size must be a list which contain have multiple input, input_size must be a list which contain
every input's shape. Note that input_size only dim of every input's shape. Note that input_size only dim of
batch_size can be None or -1. Default: None. Note that batch_size can be None or -1. Default: None. Note that
input_size and input cannot be None at the same time. input_size and input cannot be None at the same time.
dtypes (str, optional): if dtypes is None, 'float32' will be used, Default: None. dtypes (str, optional): If dtypes is None, 'float32' will be used, Default: None.
input: the input tensor. if input is given, input_size and dtype will be ignored, Default: None. input (Tensor, optional): If input is given, input_size and dtype will be ignored, Default: None.
Returns: Returns:
Dict: a summary of the network including total params and total trainable params. Dict: A summary of the network including total params and total trainable params.
Examples: Examples:
.. code-block:: python .. code-block:: python
import paddle >>> import paddle
import paddle.nn as nn >>> import paddle.nn as nn
>>> paddle.seed(2023)
class LeNet(nn.Layer): >>> class LeNet(nn.Layer):
def __init__(self, num_classes=10): ... def __init__(self, num_classes=10):
super().__init__() ... super().__init__()
self.num_classes = num_classes ... self.num_classes = num_classes
self.features = nn.Sequential( ... self.features = nn.Sequential(
nn.Conv2D( ... nn.Conv2D(1, 6, 3, stride=1, padding=1),
1, 6, 3, stride=1, padding=1), ... nn.ReLU(),
nn.ReLU(), ... nn.MaxPool2D(2, 2),
nn.MaxPool2D(2, 2), ... nn.Conv2D(6, 16, 5, stride=1, padding=0),
nn.Conv2D( ... nn.ReLU(),
6, 16, 5, stride=1, padding=0), ... nn.MaxPool2D(2, 2))
nn.ReLU(), ...
nn.MaxPool2D(2, 2)) ... if num_classes > 0:
... self.fc = nn.Sequential(
if num_classes > 0: ... nn.Linear(400, 120),
self.fc = nn.Sequential( ... nn.Linear(120, 84),
nn.Linear(400, 120), ... nn.Linear(84, 10))
nn.Linear(120, 84), ...
nn.Linear( ... def forward(self, inputs):
84, 10)) ... x = self.features(inputs)
...
def forward(self, inputs): ... if self.num_classes > 0:
x = self.features(inputs) ... x = paddle.flatten(x, 1)
... x = self.fc(x)
if self.num_classes > 0: ... return x
x = paddle.flatten(x, 1) ...
x = self.fc(x) >>> lenet = LeNet()
return x
>>> params_info = paddle.summary(lenet, (1, 1, 28, 28))
lenet = LeNet() >>> # doctest: +SKIP
>>> print(params_info)
params_info = paddle.summary(lenet, (1, 1, 28, 28)) ---------------------------------------------------------------------------
print(params_info) Layer (type) Input Shape Output Shape Param #
===========================================================================
# multi input demo Conv2D-1 [[1, 1, 28, 28]] [1, 6, 28, 28] 60
class LeNetMultiInput(LeNet): ReLU-1 [[1, 6, 28, 28]] [1, 6, 28, 28] 0
MaxPool2D-1 [[1, 6, 28, 28]] [1, 6, 14, 14] 0
def forward(self, inputs, y): Conv2D-2 [[1, 6, 14, 14]] [1, 16, 10, 10] 2,416
x = self.features(inputs) ReLU-2 [[1, 16, 10, 10]] [1, 16, 10, 10] 0
MaxPool2D-2 [[1, 16, 10, 10]] [1, 16, 5, 5] 0
if self.num_classes > 0: Linear-1 [[1, 400]] [1, 120] 48,120
x = paddle.flatten(x, 1) Linear-2 [[1, 120]] [1, 84] 10,164
x = self.fc(x + y) Linear-3 [[1, 84]] [1, 10] 850
return x ===========================================================================
Total params: 61,610
lenet_multi_input = LeNetMultiInput() Trainable params: 61,610
Non-trainable params: 0
params_info = paddle.summary(lenet_multi_input, [(1, 1, 28, 28), (1, 400)], ---------------------------------------------------------------------------
dtypes=['float32', 'float32']) Input size (MB): 0.00
print(params_info) Forward/backward pass size (MB): 0.11
Params size (MB): 0.24
# list input demo Estimated Total Size (MB): 0.35
class LeNetListInput(LeNet): ---------------------------------------------------------------------------
{'total_params': 61610, 'trainable_params': 61610}
def forward(self, inputs): >>> # doctest: -SKIP
x = self.features(inputs[0]) >>> # multi input demo
>>> class LeNetMultiInput(LeNet):
if self.num_classes > 0: ... def forward(self, inputs, y):
x = paddle.flatten(x, 1) ... x = self.features(inputs)
x = self.fc(x + inputs[1]) ...
return x ... if self.num_classes > 0:
... x = paddle.flatten(x, 1)
lenet_list_input = LeNetListInput() ... x = self.fc(x + y)
input_data = [paddle.rand([1, 1, 28, 28]), paddle.rand([1, 400])] ... return x
params_info = paddle.summary(lenet_list_input, input=input_data) ...
print(params_info) >>> lenet_multi_input = LeNetMultiInput()
# dict input demo >>> params_info = paddle.summary(lenet_multi_input,
class LeNetDictInput(LeNet): ... [(1, 1, 28, 28), (1, 400)],
... dtypes=['float32', 'float32'])
def forward(self, inputs): >>> # doctest: +SKIP
x = self.features(inputs['x1']) >>> print(params_info)
---------------------------------------------------------------------------
if self.num_classes > 0: Layer (type) Input Shape Output Shape Param #
x = paddle.flatten(x, 1) ===========================================================================
x = self.fc(x + inputs['x2']) Conv2D-3 [[1, 1, 28, 28]] [1, 6, 28, 28] 60
return x ReLU-3 [[1, 6, 28, 28]] [1, 6, 28, 28] 0
MaxPool2D-3 [[1, 6, 28, 28]] [1, 6, 14, 14] 0
lenet_dict_input = LeNetDictInput() Conv2D-4 [[1, 6, 14, 14]] [1, 16, 10, 10] 2,416
input_data = {'x1': paddle.rand([1, 1, 28, 28]), ReLU-4 [[1, 16, 10, 10]] [1, 16, 10, 10] 0
'x2': paddle.rand([1, 400])} MaxPool2D-4 [[1, 16, 10, 10]] [1, 16, 5, 5] 0
params_info = paddle.summary(lenet_dict_input, input=input_data) Linear-4 [[1, 400]] [1, 120] 48,120
print(params_info) Linear-5 [[1, 120]] [1, 84] 10,164
Linear-6 [[1, 84]] [1, 10] 850
===========================================================================
Total params: 61,610
Trainable params: 61,610
Non-trainable params: 0
---------------------------------------------------------------------------
Input size (MB): 0.00
Forward/backward pass size (MB): 0.11
Params size (MB): 0.24
Estimated Total Size (MB): 0.35
---------------------------------------------------------------------------
{'total_params': 61610, 'trainable_params': 61610}
>>> # doctest: -SKIP
>>> # list input demo
>>> class LeNetListInput(LeNet):
... def forward(self, inputs):
... x = self.features(inputs[0])
...
... if self.num_classes > 0:
... x = paddle.flatten(x, 1)
... x = self.fc(x + inputs[1])
... return x
...
>>> lenet_list_input = LeNetListInput()
>>> input_data = [paddle.rand([1, 1, 28, 28]), paddle.rand([1, 400])]
>>> params_info = paddle.summary(lenet_list_input, input=input_data)
>>> # doctest: +SKIP
>>> print(params_info)
---------------------------------------------------------------------------
Layer (type) Input Shape Output Shape Param #
===========================================================================
Conv2D-5 [[1, 1, 28, 28]] [1, 6, 28, 28] 60
ReLU-5 [[1, 6, 28, 28]] [1, 6, 28, 28] 0
MaxPool2D-5 [[1, 6, 28, 28]] [1, 6, 14, 14] 0
Conv2D-6 [[1, 6, 14, 14]] [1, 16, 10, 10] 2,416
ReLU-6 [[1, 16, 10, 10]] [1, 16, 10, 10] 0
MaxPool2D-6 [[1, 16, 10, 10]] [1, 16, 5, 5] 0
Linear-7 [[1, 400]] [1, 120] 48,120
Linear-8 [[1, 120]] [1, 84] 10,164
Linear-9 [[1, 84]] [1, 10] 850
===========================================================================
Total params: 61,610
Trainable params: 61,610
Non-trainable params: 0
---------------------------------------------------------------------------
Input size (MB): 0.00
Forward/backward pass size (MB): 0.11
Params size (MB): 0.24
Estimated Total Size (MB): 0.35
---------------------------------------------------------------------------
{'total_params': 61610, 'trainable_params': 61610}
>>> # doctest: -SKIP
>>> # dict input demo
>>> class LeNetDictInput(LeNet):
... def forward(self, inputs):
... x = self.features(inputs['x1'])
...
... if self.num_classes > 0:
... x = paddle.flatten(x, 1)
... x = self.fc(x + inputs['x2'])
... return x
...
>>> lenet_dict_input = LeNetDictInput()
>>> input_data = {'x1': paddle.rand([1, 1, 28, 28]),
... 'x2': paddle.rand([1, 400])}
>>> params_info = paddle.summary(lenet_dict_input, input=input_data)
>>> # doctest: +SKIP
>>> print(params_info)
---------------------------------------------------------------------------
Layer (type) Input Shape Output Shape Param #
===========================================================================
Conv2D-7 [[1, 1, 28, 28]] [1, 6, 28, 28] 60
ReLU-7 [[1, 6, 28, 28]] [1, 6, 28, 28] 0
MaxPool2D-7 [[1, 6, 28, 28]] [1, 6, 14, 14] 0
Conv2D-8 [[1, 6, 14, 14]] [1, 16, 10, 10] 2,416
ReLU-8 [[1, 16, 10, 10]] [1, 16, 10, 10] 0
MaxPool2D-8 [[1, 16, 10, 10]] [1, 16, 5, 5] 0
Linear-10 [[1, 400]] [1, 120] 48,120
Linear-11 [[1, 120]] [1, 84] 10,164
Linear-12 [[1, 84]] [1, 10] 850
===========================================================================
Total params: 61,610
Trainable params: 61,610
Non-trainable params: 0
---------------------------------------------------------------------------
Input size (MB): 0.00
Forward/backward pass size (MB): 0.11
Params size (MB): 0.24
Estimated Total Size (MB): 0.35
---------------------------------------------------------------------------
{'total_params': 61610, 'trainable_params': 61610}
>>> # doctest: -SKIP
""" """
if input_size is None and input is None: if input_size is None and input is None:
......
...@@ -1617,7 +1617,7 @@ def nansum(x, axis=None, dtype=None, keepdim=False, name=None): ...@@ -1617,7 +1617,7 @@ def nansum(x, axis=None, dtype=None, keepdim=False, name=None):
Examples: Examples:
.. code-block:: python .. code-block:: python
>>> import paddle >>> import paddle
>>> # x is a Tensor with following elements: >>> # x is a Tensor with following elements:
>>> # [[nan, 0.3, 0.5, 0.9] >>> # [[nan, 0.3, 0.5, 0.9]
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册