未验证 提交 14b1374f 编写于 作者: C cyberslack_lee 提交者: GitHub

[xdoctest] reformat example code with google style in No.65-68 (#55953)

* test=docs_preview

* test=docs_preview

* test=docs_preview

* test=docs_preview

* fix indent in math.py

* test=docs_preview

* test=docs_preview

* test=docs_preview

* test=docs_preview

---------
Co-authored-by: NSigureMo <sigure.qaq@gmail.com>
上级 f16e1869
......@@ -45,63 +45,68 @@ def flops(net, input_size, custom_ops=None, print_detail=False):
Examples:
.. code-block:: python
import paddle
import paddle.nn as nn
class LeNet(nn.Layer):
def __init__(self, num_classes=10):
super().__init__()
self.num_classes = num_classes
self.features = nn.Sequential(
nn.Conv2D(
1, 6, 3, stride=1, padding=1),
nn.ReLU(),
nn.MaxPool2D(2, 2),
nn.Conv2D(
6, 16, 5, stride=1, padding=0),
nn.ReLU(),
nn.MaxPool2D(2, 2))
if num_classes > 0:
self.fc = nn.Sequential(
nn.Linear(400, 120),
nn.Linear(120, 84),
nn.Linear(
84, 10))
def forward(self, inputs):
x = self.features(inputs)
if self.num_classes > 0:
x = paddle.flatten(x, 1)
x = self.fc(x)
return x
lenet = LeNet()
# m is the instance of nn.Layer, x is the intput of layer, y is the output of layer.
def count_leaky_relu(m, x, y):
x = x[0]
nelements = x.numel()
m.total_ops += int(nelements)
FLOPs = paddle.flops(lenet, [1, 1, 28, 28], custom_ops= {nn.LeakyReLU: count_leaky_relu},
print_detail=True)
print(FLOPs)
#+--------------+-----------------+-----------------+--------+--------+
#| Layer Name | Input Shape | Output Shape | Params | Flops |
#+--------------+-----------------+-----------------+--------+--------+
#| conv2d_2 | [1, 1, 28, 28] | [1, 6, 28, 28] | 60 | 47040 |
#| re_lu_2 | [1, 6, 28, 28] | [1, 6, 28, 28] | 0 | 0 |
#| max_pool2d_2 | [1, 6, 28, 28] | [1, 6, 14, 14] | 0 | 0 |
#| conv2d_3 | [1, 6, 14, 14] | [1, 16, 10, 10] | 2416 | 241600 |
#| re_lu_3 | [1, 16, 10, 10] | [1, 16, 10, 10] | 0 | 0 |
#| max_pool2d_3 | [1, 16, 10, 10] | [1, 16, 5, 5] | 0 | 0 |
#| linear_0 | [1, 400] | [1, 120] | 48120 | 48000 |
#| linear_1 | [1, 120] | [1, 84] | 10164 | 10080 |
#| linear_2 | [1, 84] | [1, 10] | 850 | 840 |
#+--------------+-----------------+-----------------+--------+--------+
#Total Flops: 347560 Total Params: 61610
>>> import paddle
>>> import paddle.nn as nn
>>> class LeNet(nn.Layer):
... def __init__(self, num_classes=10):
... super().__init__()
... self.num_classes = num_classes
... self.features = nn.Sequential(
... nn.Conv2D(1, 6, 3, stride=1, padding=1),
... nn.ReLU(),
... nn.MaxPool2D(2, 2),
... nn.Conv2D(6, 16, 5, stride=1, padding=0),
... nn.ReLU(),
... nn.MaxPool2D(2, 2))
...
... if num_classes > 0:
... self.fc = nn.Sequential(
... nn.Linear(400, 120),
... nn.Linear(120, 84),
... nn.Linear(84, 10))
...
... def forward(self, inputs):
... x = self.features(inputs)
...
... if self.num_classes > 0:
... x = paddle.flatten(x, 1)
... x = self.fc(x)
... return x
...
>>> lenet = LeNet()
>>> # m is the instance of nn.Layer, x is the intput of layer, y is the output of layer.
>>> def count_leaky_relu(m, x, y):
... x = x[0]
... nelements = x.numel()
... m.total_ops += int(nelements)
...
>>> FLOPs = paddle.flops(lenet,
... [1, 1, 28, 28],
... custom_ops= {nn.LeakyReLU: count_leaky_relu},
... print_detail=True)
>>> # doctest: +SKIP
>>> print(FLOPs)
<class 'paddle.nn.layer.conv.Conv2D'>'s flops has been counted
<class 'paddle.nn.layer.activation.ReLU'>'s flops has been counted
Cannot find suitable count function for <class 'paddle.nn.layer.pooling.MaxPool2D'>. Treat it as zero FLOPs.
<class 'paddle.nn.layer.common.Linear'>'s flops has been counted
+--------------+-----------------+-----------------+--------+--------+
| Layer Name | Input Shape | Output Shape | Params | Flops |
+--------------+-----------------+-----------------+--------+--------+
| conv2d_0 | [1, 1, 28, 28] | [1, 6, 28, 28] | 60 | 47040 |
| re_lu_0 | [1, 6, 28, 28] | [1, 6, 28, 28] | 0 | 0 |
| max_pool2d_0 | [1, 6, 28, 28] | [1, 6, 14, 14] | 0 | 0 |
| conv2d_1 | [1, 6, 14, 14] | [1, 16, 10, 10] | 2416 | 241600 |
| re_lu_1 | [1, 16, 10, 10] | [1, 16, 10, 10] | 0 | 0 |
| max_pool2d_1 | [1, 16, 10, 10] | [1, 16, 5, 5] | 0 | 0 |
| linear_0 | [1, 400] | [1, 120] | 48120 | 48000 |
| linear_1 | [1, 120] | [1, 84] | 10164 | 10080 |
| linear_2 | [1, 84] | [1, 10] | 850 | 840 |
+--------------+-----------------+-----------------+--------+--------+
Total Flops: 347560 Total Params: 61610
347560
>>> # doctest: -SKIP
"""
if isinstance(net, nn.Layer):
# If net is a dy2stat model, net.forward is StaticFunction instance,
......
......@@ -130,7 +130,7 @@ def _get_cache_or_reload(repo, force_reload, verbose=True, source='github'):
_remove_if_exists(cached_file)
_remove_if_exists(repo_dir)
# rename the repo
# Rename the repo
shutil.move(extracted_repo, repo_dir)
return repo_dir
......@@ -177,24 +177,24 @@ def list(repo_dir, source='github', force_reload=False):
List all entrypoints available in `github` hubconf.
Args:
repo_dir(str): github or local path.
repo_dir(str): Github or local path.
github path (str): a str with format "repo_owner/repo_name[:tag_name]" with an optional
tag/branch. The default branch is `main` if not specified.
- github path (str): A string with format "repo_owner/repo_name[:tag_name]" with an optional
tag/branch. The default branch is `main` if not specified.
- local path (str): Local repo path.
local path (str): local repo path
source (str): `github` | `gitee` | `local`. Default is `github`.
force_reload (bool, optional): Whether to discard the existing cache and force a fresh download. Default is `False`.
source (str): `github` | `gitee` | `local`, default is `github`.
force_reload (bool, optional): whether to discard the existing cache and force a fresh download, default is `False`.
Returns:
entrypoints: a list of available entrypoint names
entrypoints: A list of available entrypoint names.
Example:
Examples:
.. code-block:: python
import paddle
>>> import paddle
paddle.hub.list('lyuwenyu/paddlehub_demo:main', source='github', force_reload=False)
>>> paddle.hub.list('lyuwenyu/paddlehub_demo:main', source='github', force_reload=False)
"""
if source not in ('github', 'gitee', 'local'):
......@@ -225,25 +225,25 @@ def help(repo_dir, model, source='github', force_reload=False):
Show help information of model
Args:
repo_dir(str): github or local path.
repo_dir(str): Github or local path.
github path (str): a str with format "repo_owner/repo_name[:tag_name]" with an optional
tag/branch. The default branch is `main` if not specified.
- github path (str): A string with format "repo_owner/repo_name[:tag_name]" with an optional
tag/branch. The default branch is `main` if not specified.
local path (str): Local repo path.
local path (str): local repo path.
model (str): Model name.
source (str): `github` | `gitee` | `local`. Default is `github`.
force_reload (bool, optional): Default is `False`.
model (str): model name.
source (str): `github` | `gitee` | `local`, default is `github`.
force_reload (bool, optional): default is `False`.
Return:
Returns:
docs
Example:
Examples:
.. code-block:: python
import paddle
>>> import paddle
paddle.hub.help('lyuwenyu/paddlehub_demo:main', model='MM', source='github')
>>> paddle.hub.help('lyuwenyu/paddlehub_demo:main', model='MM', source='github')
"""
if source not in ('github', 'gitee', 'local'):
......@@ -270,24 +270,25 @@ def load(repo_dir, model, source='github', force_reload=False, **kwargs):
Load model
Args:
repo_dir(str): github or local path.
repo_dir(str): Github or local path.
- github path (str): A string with format "repo_owner/repo_name[:tag_name]" with an optional
tag/branch. The default branch is `main` if not specified.
- local path (str): Local repo path.
github path (str): a str with format "repo_owner/repo_name[:tag_name]" with an optional
tag/branch. The default branch is `main` if not specified.
model (str): Model name.
source (str): `github` | `gitee` | `local`. Default is `github`.
force_reload (bool, optional): Default is `False`.
**kwargs: Parameters using for model.
local path (str): local repo path.
Returns:
paddle model.
model (str): model name.
source (str): `github` | `gitee` | `local`, default is `github`.
force_reload (bool, optional): default is `False`.
**kwargs: parameters using for model
Return:
paddle model
Example:
Examples:
.. code-block:: python
import paddle
paddle.hub.load('lyuwenyu/paddlehub_demo:main', model='MM', source='github')
>>> import paddle
>>> paddle.hub.load('lyuwenyu/paddlehub_demo:main', model='MM', source='github')
"""
if source not in ('github', 'gitee', 'local'):
......
......@@ -385,7 +385,7 @@ class StaticGraphAdapter:
_save(optim, optim_path)
# TODO: support save/load scaler state in static graph
# TODO: Support save/load scaler state in static graph
def load(self, param_state_pairs, optim_state):
if self._executor is None:
executor = fluid.Executor(fluid.CPUPlace())._default_executor
......@@ -1084,78 +1084,79 @@ class Model:
1. A common example
.. code-block:: python
:name: code-example1
import paddle
import paddle.nn as nn
import paddle.vision.transforms as T
from paddle.static import InputSpec
device = paddle.set_device('cpu') # or 'gpu'
net = nn.Sequential(
nn.Flatten(1),
nn.Linear(784, 200),
nn.Tanh(),
nn.Linear(200, 10))
# inputs and labels are not required for dynamic graph.
input = InputSpec([None, 784], 'float32', 'x')
label = InputSpec([None, 1], 'int64', 'label')
model = paddle.Model(net, input, label)
optim = paddle.optimizer.SGD(learning_rate=1e-3,
parameters=model.parameters())
model.prepare(optim,
paddle.nn.CrossEntropyLoss(),
paddle.metric.Accuracy())
transform = T.Compose([
T.Transpose(),
T.Normalize([127.5], [127.5])
])
data = paddle.vision.datasets.MNIST(mode='train', transform=transform)
model.fit(data, epochs=2, batch_size=32, verbose=1)
:name: code-example1
>>> import paddle
>>> import paddle.nn as nn
>>> import paddle.vision.transforms as T
>>> from paddle.static import InputSpec
>>> device = paddle.set_device('cpu') # or 'gpu'
>>> net = nn.Sequential(
... nn.Flatten(1),
... nn.Linear(784, 200),
... nn.Tanh(),
... nn.Linear(200, 10))
...
>>> # inputs and labels are not required for dynamic graph.
>>> input = InputSpec([None, 784], 'float32', 'x')
>>> label = InputSpec([None, 1], 'int64', 'label')
>>> model = paddle.Model(net, input, label)
>>> optim = paddle.optimizer.SGD(learning_rate=1e-3,
... parameters=model.parameters())
...
>>> model.prepare(optim,
... paddle.nn.CrossEntropyLoss(),
... paddle.metric.Accuracy())
...
>>> transform = T.Compose([
... T.Transpose(),
... T.Normalize([127.5], [127.5])
>>> ])
>>> data = paddle.vision.datasets.MNIST(mode='train', transform=transform)
>>> model.fit(data, epochs=2, batch_size=32, verbose=1)
2. An example using mixed precision training.
.. code-block:: python
:name: code-example2
# required: gpu
import paddle
import paddle.nn as nn
import paddle.vision.transforms as T
def run_example_code():
device = paddle.set_device('gpu')
net = nn.Sequential(nn.Flatten(1), nn.Linear(784, 200), nn.Tanh(),
nn.Linear(200, 10))
model = paddle.Model(net)
optim = paddle.optimizer.SGD(learning_rate=1e-3, parameters=model.parameters())
amp_configs = {
"level": "O1",
"custom_white_list": {'conv2d'},
"use_dynamic_loss_scaling": True
}
model.prepare(optim,
paddle.nn.CrossEntropyLoss(),
paddle.metric.Accuracy(),
amp_configs=amp_configs)
transform = T.Compose([T.Transpose(), T.Normalize([127.5], [127.5])])
data = paddle.vision.datasets.MNIST(mode='train', transform=transform)
model.fit(data, epochs=2, batch_size=32, verbose=1)
# mixed precision training is only supported on GPU now.
if paddle.is_compiled_with_cuda():
run_example_code()
:name: code-example2
>>> # doctest: +REQUIRES(env:GPU)
>>> import paddle
>>> paddle.device.set_device('gpu')
>>> import paddle.nn as nn
>>> import paddle.vision.transforms as T
>>> def run_example_code():
... device = paddle.set_device('gpu')
...
... net = nn.Sequential(nn.Flatten(1), nn.Linear(784, 200), nn.Tanh(),
... nn.Linear(200, 10))
...
... model = paddle.Model(net)
... optim = paddle.optimizer.SGD(learning_rate=1e-3, parameters=model.parameters())
...
... amp_configs = {
... "level": "O1",
... "custom_white_list": {'conv2d'},
... "use_dynamic_loss_scaling": True
... }
... model.prepare(optim,
... paddle.nn.CrossEntropyLoss(),
... paddle.metric.Accuracy(),
... amp_configs=amp_configs)
...
... transform = T.Compose([T.Transpose(), T.Normalize([127.5], [127.5])])
... data = paddle.vision.datasets.MNIST(mode='train', transform=transform)
... model.fit(data, epochs=2, batch_size=32, verbose=1)
...
>>> # mixed precision training is only supported on GPU now.
>>> if paddle.is_compiled_with_cuda():
... run_example_code()
...
"""
def __init__(self, network, inputs=None, labels=None):
......@@ -1214,28 +1215,29 @@ class Model:
.. code-block:: python
import paddle
import paddle.nn as nn
from paddle.static import InputSpec
device = paddle.set_device('cpu') # or 'gpu'
net = nn.Sequential(
nn.Linear(784, 200),
nn.Tanh(),
nn.Linear(200, 10))
input = InputSpec([None, 784], 'float32', 'x')
label = InputSpec([None, 1], 'int64', 'label')
model = paddle.Model(net, input, label)
optim = paddle.optimizer.SGD(learning_rate=1e-3,
parameters=model.parameters())
model.prepare(optim, paddle.nn.CrossEntropyLoss())
data = paddle.rand((4, 784), dtype="float32")
label = paddle.randint(0, 10, (4, 1), dtype="int64")
loss = model.train_batch([data], [label])
print(loss)
# [array([2.192784], dtype=float32)]
>>> import paddle
>>> import paddle.nn as nn
>>> from paddle.static import InputSpec
>>> paddle.seed(2023)
>>> device = paddle.set_device('cpu') # or 'gpu'
>>> net = nn.Sequential(
... nn.Linear(784, 200),
... nn.Tanh(),
... nn.Linear(200, 10))
...
>>> input = InputSpec([None, 784], 'float32', 'x')
>>> label = InputSpec([None, 1], 'int64', 'label')
>>> model = paddle.Model(net, input, label)
>>> optim = paddle.optimizer.SGD(learning_rate=1e-3,
... parameters=model.parameters())
>>> model.prepare(optim, paddle.nn.CrossEntropyLoss())
>>> data = paddle.rand((4, 784), dtype="float32")
>>> label = paddle.randint(0, 10, (4, 1), dtype="int64")
>>> loss = model.train_batch([data], [label])
>>> print(loss)
[array(3.0039132, dtype=float32)]
"""
loss = self._adapter.train_batch(inputs, labels, update)
......@@ -1267,29 +1269,31 @@ class Model:
.. code-block:: python
import paddle
import paddle.nn as nn
from paddle.static import InputSpec
device = paddle.set_device('cpu') # or 'gpu'
net = nn.Sequential(
nn.Linear(784, 200),
nn.Tanh(),
nn.Linear(200, 10))
input = InputSpec([None, 784], 'float32', 'x')
label = InputSpec([None, 1], 'int64', 'label')
model = paddle.Model(net, input, label)
optim = paddle.optimizer.SGD(learning_rate=1e-3,
parameters=model.parameters())
model.prepare(optim,
paddle.nn.CrossEntropyLoss(), metrics=paddle.metric.Accuracy())
data = paddle.rand((4, 784), dtype="float32")
label = paddle.randint(0, 10, (4, 1), dtype="int64")
loss, acc = model.eval_batch([data], [label])
print(loss, acc)
# [array([2.8825705], dtype=float32)] [0.0]
>>> import paddle
>>> import paddle.nn as nn
>>> from paddle.static import InputSpec
>>> paddle.seed(2023)
>>> device = paddle.set_device('cpu') # or 'gpu'
>>> net = nn.Sequential(
... nn.Linear(784, 200),
... nn.Tanh(),
... nn.Linear(200, 10))
...
>>> input = InputSpec([None, 784], 'float32', 'x')
>>> label = InputSpec([None, 1], 'int64', 'label')
>>> model = paddle.Model(net, input, label)
>>> optim = paddle.optimizer.SGD(learning_rate=1e-3,
... parameters=model.parameters())
>>> model.prepare(optim,
... paddle.nn.CrossEntropyLoss(),
... metrics=paddle.metric.Accuracy())
>>> data = paddle.rand((4, 784), dtype="float32")
>>> label = paddle.randint(0, 10, (4, 1), dtype="int64")
>>> loss, acc = model.eval_batch([data], [label])
>>> print(loss, acc)
[array(3.0039132, dtype=float32)] [0.0]
"""
loss = self._adapter.eval_batch(inputs, labels)
......@@ -1316,29 +1320,30 @@ class Model:
.. code-block:: python
import paddle
import paddle.nn as nn
from paddle.static import InputSpec
device = paddle.set_device('cpu') # or 'gpu'
input = InputSpec([None, 784], 'float32', 'x')
label = InputSpec([None, 1], 'int64', 'label')
net = nn.Sequential(
nn.Linear(784, 200),
nn.Tanh(),
nn.Linear(200, 10),
nn.Softmax())
model = paddle.Model(net, input, label)
model.prepare()
data = paddle.rand((1, 784), dtype="float32")
out = model.predict_batch([data])
print(out)
# [array([[0.08189095, 0.16740078, 0.06889386, 0.05085445, 0.10729759,
# 0.02217775, 0.14518553, 0.1591538 , 0.01808308, 0.17906217]],
# dtype=float32)]
>>> import paddle
>>> import paddle.nn as nn
>>> from paddle.static import InputSpec
>>> paddle.seed(2023)
>>> device = paddle.set_device('cpu') # or 'gpu'
>>> input = InputSpec([None, 784], 'float32', 'x')
>>> label = InputSpec([None, 1], 'int64', 'label')
>>> net = nn.Sequential(
... nn.Linear(784, 200),
... nn.Tanh(),
... nn.Linear(200, 10),
... nn.Softmax())
...
>>> model = paddle.Model(net, input, label)
>>> model.prepare()
>>> data = paddle.rand((1, 784), dtype="float32")
>>> out = model.predict_batch([data])
>>> print(out)
[array([[0.10844935, 0.04650883, 0.11790176, 0.04962315, 0.10899059,
0.08197589, 0.03125402, 0.03232312, 0.3786293 , 0.04434395]],
dtype=float32)]
"""
loss = self._adapter.predict_batch(inputs)
......@@ -1377,45 +1382,33 @@ class Model:
.. code-block:: python
import paddle
import paddle.nn as nn
import paddle.vision.transforms as T
from paddle.static import InputSpec
class Mnist(nn.Layer):
def __init__(self):
super().__init__()
self.net = nn.Sequential(
nn.Flatten(1),
nn.Linear(784, 200),
nn.Tanh(),
nn.Linear(200, 10),
nn.Softmax())
def forward(self, x):
return self.net(x)
dynamic = True # False
# if use static graph, do not set
if not dynamic:
paddle.enable_static()
input = InputSpec([None, 784], 'float32', 'x')
label = InputSpec([None, 1], 'int64', 'label')
model = paddle.Model(Mnist(), input, label)
optim = paddle.optimizer.SGD(learning_rate=1e-3,
parameters=model.parameters())
model.prepare(optim, paddle.nn.CrossEntropyLoss())
transform = T.Compose([
T.Transpose(),
T.Normalize([127.5], [127.5])
])
data = paddle.vision.datasets.MNIST(mode='train', transform=transform)
model.fit(data, epochs=1, batch_size=32, verbose=0)
model.save('checkpoint/test') # save for training
model.save('inference_model', False) # save for inference
>>> import paddle
>>> import paddle.nn as nn
>>> import paddle.vision.transforms as T
>>> from paddle.static import InputSpec
>>> from paddle.vision.datasets import MNIST
>>> dynamic = True # False
>>> # If use static graph, do not set
>>> if not dynamic:
... paddle.enable_static()
>>> transform = T.Compose([T.Transpose(),
... T.Normalize([127.5], [127.5])])
>>> train_dataset = MNIST(mode='train', transform=transform)
>>> train_loader = paddle.io.DataLoader(train_dataset, batch_size=64)
>>> val_dataset = MNIST(mode='test', transform=transform)
>>> val_loader = paddle.io.DataLoader(val_dataset, batch_size=64)
>>> input = InputSpec([None, 1, 28, 28], 'float32', 'image')
>>> label = InputSpec([None, 1], 'int64', 'label')
>>> model = paddle.Model(paddle.vision.models.LeNet(), input, label)
>>> optim = paddle.optimizer.Adam(learning_rate=0.001, parameters=model.parameters())
>>> model.prepare(optim, paddle.nn.CrossEntropyLoss(), paddle.metric.Accuracy(topk=(1, 2)))
>>> model.fit(train_loader, val_loader, epochs=2, verbose=0)
>>> model.save('checkpoint/test') # save for training
>>> model.save('inference_model', False) # save for inference
"""
......@@ -1459,22 +1452,22 @@ class Model:
.. code-block:: python
import paddle
import paddle.nn as nn
from paddle.static import InputSpec
device = paddle.set_device('cpu')
>>> import paddle
>>> import paddle.nn as nn
>>> from paddle.static import InputSpec
input = InputSpec([None, 784], 'float32', 'x')
>>> device = paddle.set_device('cpu')
model = paddle.Model(nn.Sequential(
nn.Linear(784, 200),
nn.Tanh(),
nn.Linear(200, 10),
nn.Softmax()), input)
>>> input = InputSpec([None, 784], 'float32', 'x')
model.save('checkpoint/test')
model.load('checkpoint/test')
>>> model = paddle.Model(nn.Sequential(
... nn.Linear(784, 200),
... nn.Tanh(),
... nn.Linear(200, 10),
... nn.Softmax()), input)
...
>>> model.save('checkpoint/test')
>>> model.load('checkpoint/test')
"""
......@@ -1553,18 +1546,36 @@ class Model:
.. code-block:: python
import paddle
import paddle.nn as nn
from paddle.static import InputSpec
input = InputSpec([None, 784], 'float32', 'x')
model = paddle.Model(nn.Sequential(
nn.Linear(784, 200),
nn.Tanh(),
nn.Linear(200, 10)), input)
params = model.parameters()
>>> import paddle
>>> import paddle.nn as nn
>>> from paddle.static import InputSpec
>>> paddle.seed(2023)
>>> input = InputSpec([None, 784], 'float32', 'x')
>>> model = paddle.Model(nn.Sequential(
... nn.Linear(784, 200),
... nn.Tanh(),
... nn.Linear(200, 10)), input)
...
>>> params = model.parameters()
>>> print(params)
[Parameter containing:
Tensor(shape=[784, 200], dtype=float32, place=Place(cpu), stop_gradient=False,
[[ 0.05713400, 0.00314646, -0.03754271, ..., -0.02529256,
0.04872842, -0.06670858],
...,
[ 0.06268418, 0.06550254, -0.02103353, ..., 0.06395906,
0.05509177, -0.06355451]]), Parameter containing:
Tensor(shape=[200], dtype=float32, place=Place(cpu), stop_gradient=False,
[0., 0., 0., ..., 0., 0.]), Parameter containing:
Tensor(shape=[200, 10], dtype=float32, place=Place(cpu), stop_gradient=False,
[[ 0.12933084, 0.07726504, 0.05336720, ..., 0.10865459,
0.06605886, 0.13684085],
...,
[-0.10171061, -0.01649965, -0.13420501, ..., 0.11190581,
-0.12700224, 0.02916957]]), Parameter containing:
Tensor(shape=[10], dtype=float32, place=Place(cpu), stop_gradient=False,
[0., 0., 0., 0., 0., 0., 0., 0., 0., 0.])]
"""
return self._adapter.parameters()
......@@ -1812,84 +1823,80 @@ class Model:
How to make a batch is done internally.
.. code-block:: python
:name: code-example3
import paddle
import paddle.vision.transforms as T
from paddle.vision.datasets import MNIST
from paddle.static import InputSpec
dynamic = True
if not dynamic:
paddle.enable_static()
transform = T.Compose([
T.Transpose(),
T.Normalize([127.5], [127.5])
])
train_dataset = MNIST(mode='train', transform=transform)
val_dataset = MNIST(mode='test', transform=transform)
input = InputSpec([None, 1, 28, 28], 'float32', 'image')
label = InputSpec([None, 1], 'int64', 'label')
model = paddle.Model(
paddle.vision.models.LeNet(),
input, label)
optim = paddle.optimizer.Adam(
learning_rate=0.001, parameters=model.parameters())
model.prepare(
optim,
paddle.nn.CrossEntropyLoss(),
paddle.metric.Accuracy(topk=(1, 2)))
model.fit(train_dataset,
val_dataset,
epochs=2,
batch_size=64,
save_dir='mnist_checkpoint')
:name: code-example3
>>> import paddle
>>> import paddle.vision.transforms as T
>>> from paddle.vision.datasets import MNIST
>>> from paddle.static import InputSpec
>>> dynamic = True
>>> if not dynamic:
... paddle.enable_static()
...
>>> transform = T.Compose([T.Transpose(),
... T.Normalize([127.5], [127.5])])
>>> train_dataset = MNIST(mode='train', transform=transform)
>>> val_dataset = MNIST(mode='test', transform=transform)
>>> input = InputSpec([None, 1, 28, 28], 'float32', 'image')
>>> label = InputSpec([None, 1], 'int64', 'label')
>>> model = paddle.Model(
... paddle.vision.models.LeNet(),
... input, label)
>>> optim = paddle.optimizer.Adam(
... learning_rate=0.001, parameters=model.parameters())
>>> model.prepare(
... optim,
... paddle.nn.CrossEntropyLoss(),
... paddle.metric.Accuracy(topk=(1, 2)))
>>> model.fit(train_dataset,
... val_dataset,
... epochs=2,
... batch_size=64,
... save_dir='mnist_checkpoint')
...
2. An example use DataLoader, batch size and shuffle is set in
DataLoader.
.. code-block:: python
:name: code-example4
import paddle
import paddle.vision.transforms as T
from paddle.vision.datasets import MNIST
from paddle.static import InputSpec
dynamic = True
if not dynamic:
paddle.enable_static()
transform = T.Compose([
T.Transpose(),
T.Normalize([127.5], [127.5])
])
train_dataset = MNIST(mode='train', transform=transform)
train_loader = paddle.io.DataLoader(train_dataset,
batch_size=64)
val_dataset = MNIST(mode='test', transform=transform)
val_loader = paddle.io.DataLoader(val_dataset,
batch_size=64)
input = InputSpec([None, 1, 28, 28], 'float32', 'image')
label = InputSpec([None, 1], 'int64', 'label')
model = paddle.Model(
paddle.vision.models.LeNet(), input, label)
optim = paddle.optimizer.Adam(
learning_rate=0.001, parameters=model.parameters())
model.prepare(
optim,
paddle.nn.CrossEntropyLoss(),
paddle.metric.Accuracy(topk=(1, 2)))
model.fit(train_loader,
val_loader,
epochs=2,
save_dir='mnist_checkpoint')
:name: code-example4
>>> import paddle
>>> import paddle.vision.transforms as T
>>> from paddle.vision.datasets import MNIST
>>> from paddle.static import InputSpec
>>> dynamic = True
>>> if not dynamic:
... paddle.enable_static()
...
>>> transform = T.Compose([T.Transpose(),
... T.Normalize([127.5], [127.5])])
>>> train_dataset = MNIST(mode='train', transform=transform)
>>> train_loader = paddle.io.DataLoader(train_dataset,
... batch_size=64)
>>> val_dataset = MNIST(mode='test', transform=transform)
>>> val_loader = paddle.io.DataLoader(val_dataset,
... batch_size=64)
...
>>> input = InputSpec([None, 1, 28, 28], 'float32', 'image')
>>> label = InputSpec([None, 1], 'int64', 'label')
>>> model = paddle.Model(
... paddle.vision.models.LeNet(), input, label)
>>> optim = paddle.optimizer.Adam(
... learning_rate=0.001, parameters=model.parameters())
>>> model.prepare(
... optim,
... paddle.nn.CrossEntropyLoss(),
... paddle.metric.Accuracy(topk=(1, 2)))
>>> model.fit(train_loader,
... val_loader,
... epochs=2,
... save_dir='mnist_checkpoint')
...
"""
assert train_data is not None, "train_data must be given!"
......@@ -2028,26 +2035,25 @@ class Model:
Examples:
.. code-block:: python
import paddle
import paddle.vision.transforms as T
from paddle.static import InputSpec
# declarative mode
transform = T.Compose([
T.Transpose(),
T.Normalize([127.5], [127.5])
])
val_dataset = paddle.vision.datasets.MNIST(mode='test', transform=transform)
input = InputSpec([-1, 1, 28, 28], 'float32', 'image')
label = InputSpec([None, 1], 'int64', 'label')
model = paddle.Model(paddle.vision.models.LeNet(), input, label)
model.prepare(metrics=paddle.metric.Accuracy())
result = model.evaluate(val_dataset, batch_size=64)
print(result)
# {'acc': 0.0699}
.. code-block:: python
>>> # doctest: +SKIP('Cause each step's acc and using time are not same when repeat running')
>>> import paddle
>>> import paddle.vision.transforms as T
>>> from paddle.static import InputSpec
>>> # declarative mode
>>> transform = T.Compose([T.Transpose(),
... T.Normalize([127.5], [127.5])])
>>> val_dataset = paddle.vision.datasets.MNIST(mode='test', transform=transform)
>>> input = InputSpec([-1, 1, 28, 28], 'float32', 'image')
>>> label = InputSpec([None, 1], 'int64', 'label')
>>> model = paddle.Model(paddle.vision.models.LeNet(), input, label)
>>> model.prepare(metrics=paddle.metric.Accuracy())
>>> result = model.evaluate(val_dataset, batch_size=64)
>>> print(result)
{'acc': 0.0699}
"""
if eval_data is not None and isinstance(eval_data, Dataset):
......@@ -2136,46 +2142,44 @@ class Model:
Examples:
.. code-block:: python
import numpy as np
import paddle
from paddle.static import InputSpec
class MnistDataset(paddle.vision.datasets.MNIST):
def __init__(self, mode, return_label=True):
super().__init__(mode=mode)
self.return_label = return_label
def __getitem__(self, idx):
img = np.reshape(self.images[idx], [1, 28, 28])
if self.return_label:
return img, np.array(self.labels[idx]).astype('int64')
return img,
def __len__(self):
return len(self.images)
test_dataset = MnistDataset(mode='test', return_label=False)
# imperative mode
input = InputSpec([-1, 1, 28, 28], 'float32', 'image')
model = paddle.Model(paddle.vision.models.LeNet(), input)
model.prepare()
result = model.predict(test_dataset, batch_size=64)
print(len(result[0]), result[0][0].shape)
# 157 (64, 10)
# declarative mode
device = paddle.set_device('cpu')
paddle.enable_static()
input = InputSpec([-1, 1, 28, 28], 'float32', 'image')
model = paddle.Model(paddle.vision.models.LeNet(), input)
model.prepare()
result = model.predict(test_dataset, batch_size=64)
print(len(result[0]), result[0][0].shape)
# 157 (64, 10)
.. code-block:: python
>>> import numpy as np
>>> import paddle
>>> from paddle.static import InputSpec
>>> class MnistDataset(paddle.vision.datasets.MNIST):
... def __init__(self, mode, return_label=True):
... super().__init__(mode=mode)
... self.return_label = return_label
...
... def __getitem__(self, idx):
... img = np.reshape(self.images[idx], [1, 28, 28])
... if self.return_label:
... return img, np.array(self.labels[idx]).astype('int64')
... return img
...
... def __len__(self):
... return len(self.images)
...
>>> test_dataset = MnistDataset(mode='test', return_label=False)
>>> # imperative mode
>>> input = InputSpec([-1, 1, 28, 28], 'float32', 'image')
>>> model = paddle.Model(paddle.vision.models.LeNet(), input)
>>> model.prepare()
>>> result = model.predict(test_dataset, batch_size=64)
>>> print(len(result[0]), result[0][0].shape)
157 (64, 10)
>>> # declarative mode
>>> device = paddle.set_device('cpu')
>>> paddle.enable_static()
>>> input = InputSpec([-1, 1, 28, 28], 'float32', 'image')
>>> model = paddle.Model(paddle.vision.models.LeNet(), input)
>>> model.prepare()
>>> result = model.predict(test_dataset, batch_size=64)
>>> print(len(result[0]), result[0][0].shape)
157 (64, 10)
"""
if test_data is not None and isinstance(test_data, Dataset):
......@@ -2244,7 +2248,7 @@ class Model:
paddle.jit.save(layer, path, input_spec=self._inputs)
else:
# path check
# Path check
file_prefix = os.path.basename(path)
if file_prefix == "":
raise ValueError(
......@@ -2288,7 +2292,7 @@ class Model:
):
outputs = []
for step, data in enumerate(data_loader):
# data might come from different types of data_loader and have
# Data might come from different types of data_loader and have
# different format, as following:
# 1. DataLoader in static graph:
# [[input1, input2, ..., label1, lable2, ...]]
......@@ -2372,36 +2376,53 @@ class Model:
"""Prints a string summary of the network.
Args:
input_size (tuple|InputSpec|list[tuple|InputSpec], optional): size of input tensor.
if not set, input_size will get from ``self._inputs`` if network only have
one input, input_size can be tuple or InputSpec. if model have multiple
input, input_size must be a list which contain every input's shape.
Default: None.
dtype (str, optional): if dtype is None, 'float32' will be used, Default: None.
input_size (tuple|InputSpec|list[tuple|InputSpec], optional): Size of input tensor.
if not set, input_size will get from ``self._inputs`` if network only have
one input, input_size can be tuple or InputSpec. if model have multiple
input, input_size must be a list which contain every input's shape. Default: None.
dtype (str, optional): If dtype is None, 'float32' will be used, Default: None.
Returns:
Dict: a summary of the network including total params and total trainable params.
Dict: A summary of the network including total params and total trainable params.
Examples:
.. code-block:: python
import paddle
from paddle.static import InputSpec
input = InputSpec([None, 1, 28, 28], 'float32', 'image')
label = InputSpec([None, 1], 'int64', 'label')
model = paddle.Model(paddle.vision.models.LeNet(),
input, label)
optim = paddle.optimizer.Adam(
learning_rate=0.001, parameters=model.parameters())
model.prepare(
optim,
paddle.nn.CrossEntropyLoss())
params_info = model.summary()
print(params_info)
# {'total_params': 61610, 'trainable_params': 61610}
>>> import paddle
>>> from paddle.static import InputSpec
>>> input = InputSpec([None, 1, 28, 28], 'float32', 'image')
>>> label = InputSpec([None, 1], 'int64', 'label')
>>> model = paddle.Model(paddle.vision.models.LeNet(), input, label)
>>> optim = paddle.optimizer.Adam(learning_rate=0.001, parameters=model.parameters())
>>> model.prepare(optim, paddle.nn.CrossEntropyLoss())
>>> params_info = model.summary()
>>> # doctest: +SKIP
>>> print(params_info)
---------------------------------------------------------------------------
Layer (type) Input Shape Output Shape Param #
===========================================================================
Conv2D-1 [[1, 1, 28, 28]] [1, 6, 28, 28] 60
ReLU-1 [[1, 6, 28, 28]] [1, 6, 28, 28] 0
MaxPool2D-1 [[1, 6, 28, 28]] [1, 6, 14, 14] 0
Conv2D-2 [[1, 6, 14, 14]] [1, 16, 10, 10] 2,416
ReLU-2 [[1, 16, 10, 10]] [1, 16, 10, 10] 0
MaxPool2D-2 [[1, 16, 10, 10]] [1, 16, 5, 5] 0
Linear-1 [[1, 400]] [1, 120] 48,120
Linear-2 [[1, 120]] [1, 84] 10,164
Linear-3 [[1, 84]] [1, 10] 850
===========================================================================
Total params: 61,610
Trainable params: 61,610
Non-trainable params: 0
---------------------------------------------------------------------------
Input size (MB): 0.00
Forward/backward pass size (MB): 0.11
Params size (MB): 0.24
Estimated Total Size (MB): 0.35
---------------------------------------------------------------------------
{'total_params': 61610, 'trainable_params': 61610}
>>> # doctest: -SKIP
"""
assert (
......
......@@ -30,108 +30,200 @@ def summary(net, input_size=None, dtypes=None, input=None):
"""Prints a string summary of the network.
Args:
net (Layer): the network which must be a subinstance of Layer.
input_size (tuple|InputSpec|list[tuple|InputSpec], optional): size of input tensor. if model only
net (Layer): The network which must be a subinstance of Layer.
input_size (tuple|InputSpec|list[tuple|InputSpec], optional): Size of input tensor. if model only
have one input, input_size can be tuple or InputSpec. if model
have multiple input, input_size must be a list which contain
every input's shape. Note that input_size only dim of
batch_size can be None or -1. Default: None. Note that
input_size and input cannot be None at the same time.
dtypes (str, optional): if dtypes is None, 'float32' will be used, Default: None.
input: the input tensor. if input is given, input_size and dtype will be ignored, Default: None.
dtypes (str, optional): If dtypes is None, 'float32' will be used, Default: None.
input (Tensor, optional): If input is given, input_size and dtype will be ignored, Default: None.
Returns:
Dict: a summary of the network including total params and total trainable params.
Dict: A summary of the network including total params and total trainable params.
Examples:
.. code-block:: python
import paddle
import paddle.nn as nn
class LeNet(nn.Layer):
def __init__(self, num_classes=10):
super().__init__()
self.num_classes = num_classes
self.features = nn.Sequential(
nn.Conv2D(
1, 6, 3, stride=1, padding=1),
nn.ReLU(),
nn.MaxPool2D(2, 2),
nn.Conv2D(
6, 16, 5, stride=1, padding=0),
nn.ReLU(),
nn.MaxPool2D(2, 2))
if num_classes > 0:
self.fc = nn.Sequential(
nn.Linear(400, 120),
nn.Linear(120, 84),
nn.Linear(
84, 10))
def forward(self, inputs):
x = self.features(inputs)
if self.num_classes > 0:
x = paddle.flatten(x, 1)
x = self.fc(x)
return x
lenet = LeNet()
params_info = paddle.summary(lenet, (1, 1, 28, 28))
print(params_info)
# multi input demo
class LeNetMultiInput(LeNet):
def forward(self, inputs, y):
x = self.features(inputs)
if self.num_classes > 0:
x = paddle.flatten(x, 1)
x = self.fc(x + y)
return x
lenet_multi_input = LeNetMultiInput()
params_info = paddle.summary(lenet_multi_input, [(1, 1, 28, 28), (1, 400)],
dtypes=['float32', 'float32'])
print(params_info)
# list input demo
class LeNetListInput(LeNet):
def forward(self, inputs):
x = self.features(inputs[0])
if self.num_classes > 0:
x = paddle.flatten(x, 1)
x = self.fc(x + inputs[1])
return x
lenet_list_input = LeNetListInput()
input_data = [paddle.rand([1, 1, 28, 28]), paddle.rand([1, 400])]
params_info = paddle.summary(lenet_list_input, input=input_data)
print(params_info)
# dict input demo
class LeNetDictInput(LeNet):
def forward(self, inputs):
x = self.features(inputs['x1'])
if self.num_classes > 0:
x = paddle.flatten(x, 1)
x = self.fc(x + inputs['x2'])
return x
lenet_dict_input = LeNetDictInput()
input_data = {'x1': paddle.rand([1, 1, 28, 28]),
'x2': paddle.rand([1, 400])}
params_info = paddle.summary(lenet_dict_input, input=input_data)
print(params_info)
>>> import paddle
>>> import paddle.nn as nn
>>> paddle.seed(2023)
>>> class LeNet(nn.Layer):
... def __init__(self, num_classes=10):
... super().__init__()
... self.num_classes = num_classes
... self.features = nn.Sequential(
... nn.Conv2D(1, 6, 3, stride=1, padding=1),
... nn.ReLU(),
... nn.MaxPool2D(2, 2),
... nn.Conv2D(6, 16, 5, stride=1, padding=0),
... nn.ReLU(),
... nn.MaxPool2D(2, 2))
...
... if num_classes > 0:
... self.fc = nn.Sequential(
... nn.Linear(400, 120),
... nn.Linear(120, 84),
... nn.Linear(84, 10))
...
... def forward(self, inputs):
... x = self.features(inputs)
...
... if self.num_classes > 0:
... x = paddle.flatten(x, 1)
... x = self.fc(x)
... return x
...
>>> lenet = LeNet()
>>> params_info = paddle.summary(lenet, (1, 1, 28, 28))
>>> # doctest: +SKIP
>>> print(params_info)
---------------------------------------------------------------------------
Layer (type) Input Shape Output Shape Param #
===========================================================================
Conv2D-1 [[1, 1, 28, 28]] [1, 6, 28, 28] 60
ReLU-1 [[1, 6, 28, 28]] [1, 6, 28, 28] 0
MaxPool2D-1 [[1, 6, 28, 28]] [1, 6, 14, 14] 0
Conv2D-2 [[1, 6, 14, 14]] [1, 16, 10, 10] 2,416
ReLU-2 [[1, 16, 10, 10]] [1, 16, 10, 10] 0
MaxPool2D-2 [[1, 16, 10, 10]] [1, 16, 5, 5] 0
Linear-1 [[1, 400]] [1, 120] 48,120
Linear-2 [[1, 120]] [1, 84] 10,164
Linear-3 [[1, 84]] [1, 10] 850
===========================================================================
Total params: 61,610
Trainable params: 61,610
Non-trainable params: 0
---------------------------------------------------------------------------
Input size (MB): 0.00
Forward/backward pass size (MB): 0.11
Params size (MB): 0.24
Estimated Total Size (MB): 0.35
---------------------------------------------------------------------------
{'total_params': 61610, 'trainable_params': 61610}
>>> # doctest: -SKIP
>>> # multi input demo
>>> class LeNetMultiInput(LeNet):
... def forward(self, inputs, y):
... x = self.features(inputs)
...
... if self.num_classes > 0:
... x = paddle.flatten(x, 1)
... x = self.fc(x + y)
... return x
...
>>> lenet_multi_input = LeNetMultiInput()
>>> params_info = paddle.summary(lenet_multi_input,
... [(1, 1, 28, 28), (1, 400)],
... dtypes=['float32', 'float32'])
>>> # doctest: +SKIP
>>> print(params_info)
---------------------------------------------------------------------------
Layer (type) Input Shape Output Shape Param #
===========================================================================
Conv2D-3 [[1, 1, 28, 28]] [1, 6, 28, 28] 60
ReLU-3 [[1, 6, 28, 28]] [1, 6, 28, 28] 0
MaxPool2D-3 [[1, 6, 28, 28]] [1, 6, 14, 14] 0
Conv2D-4 [[1, 6, 14, 14]] [1, 16, 10, 10] 2,416
ReLU-4 [[1, 16, 10, 10]] [1, 16, 10, 10] 0
MaxPool2D-4 [[1, 16, 10, 10]] [1, 16, 5, 5] 0
Linear-4 [[1, 400]] [1, 120] 48,120
Linear-5 [[1, 120]] [1, 84] 10,164
Linear-6 [[1, 84]] [1, 10] 850
===========================================================================
Total params: 61,610
Trainable params: 61,610
Non-trainable params: 0
---------------------------------------------------------------------------
Input size (MB): 0.00
Forward/backward pass size (MB): 0.11
Params size (MB): 0.24
Estimated Total Size (MB): 0.35
---------------------------------------------------------------------------
{'total_params': 61610, 'trainable_params': 61610}
>>> # doctest: -SKIP
>>> # list input demo
>>> class LeNetListInput(LeNet):
... def forward(self, inputs):
... x = self.features(inputs[0])
...
... if self.num_classes > 0:
... x = paddle.flatten(x, 1)
... x = self.fc(x + inputs[1])
... return x
...
>>> lenet_list_input = LeNetListInput()
>>> input_data = [paddle.rand([1, 1, 28, 28]), paddle.rand([1, 400])]
>>> params_info = paddle.summary(lenet_list_input, input=input_data)
>>> # doctest: +SKIP
>>> print(params_info)
---------------------------------------------------------------------------
Layer (type) Input Shape Output Shape Param #
===========================================================================
Conv2D-5 [[1, 1, 28, 28]] [1, 6, 28, 28] 60
ReLU-5 [[1, 6, 28, 28]] [1, 6, 28, 28] 0
MaxPool2D-5 [[1, 6, 28, 28]] [1, 6, 14, 14] 0
Conv2D-6 [[1, 6, 14, 14]] [1, 16, 10, 10] 2,416
ReLU-6 [[1, 16, 10, 10]] [1, 16, 10, 10] 0
MaxPool2D-6 [[1, 16, 10, 10]] [1, 16, 5, 5] 0
Linear-7 [[1, 400]] [1, 120] 48,120
Linear-8 [[1, 120]] [1, 84] 10,164
Linear-9 [[1, 84]] [1, 10] 850
===========================================================================
Total params: 61,610
Trainable params: 61,610
Non-trainable params: 0
---------------------------------------------------------------------------
Input size (MB): 0.00
Forward/backward pass size (MB): 0.11
Params size (MB): 0.24
Estimated Total Size (MB): 0.35
---------------------------------------------------------------------------
{'total_params': 61610, 'trainable_params': 61610}
>>> # doctest: -SKIP
>>> # dict input demo
>>> class LeNetDictInput(LeNet):
... def forward(self, inputs):
... x = self.features(inputs['x1'])
...
... if self.num_classes > 0:
... x = paddle.flatten(x, 1)
... x = self.fc(x + inputs['x2'])
... return x
...
>>> lenet_dict_input = LeNetDictInput()
>>> input_data = {'x1': paddle.rand([1, 1, 28, 28]),
... 'x2': paddle.rand([1, 400])}
>>> params_info = paddle.summary(lenet_dict_input, input=input_data)
>>> # doctest: +SKIP
>>> print(params_info)
---------------------------------------------------------------------------
Layer (type) Input Shape Output Shape Param #
===========================================================================
Conv2D-7 [[1, 1, 28, 28]] [1, 6, 28, 28] 60
ReLU-7 [[1, 6, 28, 28]] [1, 6, 28, 28] 0
MaxPool2D-7 [[1, 6, 28, 28]] [1, 6, 14, 14] 0
Conv2D-8 [[1, 6, 14, 14]] [1, 16, 10, 10] 2,416
ReLU-8 [[1, 16, 10, 10]] [1, 16, 10, 10] 0
MaxPool2D-8 [[1, 16, 10, 10]] [1, 16, 5, 5] 0
Linear-10 [[1, 400]] [1, 120] 48,120
Linear-11 [[1, 120]] [1, 84] 10,164
Linear-12 [[1, 84]] [1, 10] 850
===========================================================================
Total params: 61,610
Trainable params: 61,610
Non-trainable params: 0
---------------------------------------------------------------------------
Input size (MB): 0.00
Forward/backward pass size (MB): 0.11
Params size (MB): 0.24
Estimated Total Size (MB): 0.35
---------------------------------------------------------------------------
{'total_params': 61610, 'trainable_params': 61610}
>>> # doctest: -SKIP
"""
if input_size is None and input is None:
......
......@@ -1617,7 +1617,7 @@ def nansum(x, axis=None, dtype=None, keepdim=False, name=None):
Examples:
.. code-block:: python
>>> import paddle
>>> import paddle
>>> # x is a Tensor with following elements:
>>> # [[nan, 0.3, 0.5, 0.9]
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册