未验证 提交 4c3969fa 编写于 作者: N Nyakku Shigure 提交者: GitHub

fix paddle.Model en docs (#43537)

* add copy-from label for code examples, test=document_fix

* refine docs, test=document_fix

* add some output for code example, test=document_fix

* add `optional`, test=document_fix

* add missing parameters, test=document_fix

* add missing links for `ProgBarLogger` and `ModelCheckpoint`, test=document_fix

* update eval_batch example, test=document_fix

* fix typos in stack_outputs, test=document_fix

* np.random -> paddle.random, test=document_fix
上级 3d232b29
......@@ -934,89 +934,91 @@ class Model(object):
Args:
network (paddle.nn.Layer): The network is an instance of
paddle.nn.Layer.
inputs (InputSpec|list|tuple|dict|None): `inputs`, entry points of network,
inputs (InputSpec|list|tuple|dict|None, optional): `inputs`, entry points of network,
could be a InputSpec instance, or list/tuple of InputSpec instances,
or dict ({name: InputSpec}), and it couldn't be None in static
graph.
labels (InputSpec|list|tuple|None): `labels`, entry points of network,
graph. Default: None.
labels (InputSpec|list|tuple|None, optional): `labels`, entry points of network,
could be a InputSpec instnace or list/tuple of InputSpec instances,
or None. For static graph, if labels is required in loss,
labels must be set. Otherwise, it could be None.
labels must be set. Otherwise, it could be None. Default: None.
Examples:
1. A common example
.. code-block:: python
:name: code-example1
import paddle
import paddle.nn as nn
import paddle.vision.transforms as T
from paddle.static import InputSpec
device = paddle.set_device('cpu') # or 'gpu'
net = nn.Sequential(
nn.Flatten(1),
nn.Linear(784, 200),
nn.Tanh(),
nn.Linear(200, 10))
# inputs and labels are not required for dynamic graph.
input = InputSpec([None, 784], 'float32', 'x')
label = InputSpec([None, 1], 'int64', 'label')
model = paddle.Model(net, input, label)
optim = paddle.optimizer.SGD(learning_rate=1e-3,
parameters=model.parameters())
model.prepare(optim,
import paddle
import paddle.nn as nn
import paddle.vision.transforms as T
from paddle.static import InputSpec
device = paddle.set_device('cpu') # or 'gpu'
net = nn.Sequential(
nn.Flatten(1),
nn.Linear(784, 200),
nn.Tanh(),
nn.Linear(200, 10))
# inputs and labels are not required for dynamic graph.
input = InputSpec([None, 784], 'float32', 'x')
label = InputSpec([None, 1], 'int64', 'label')
model = paddle.Model(net, input, label)
optim = paddle.optimizer.SGD(learning_rate=1e-3,
parameters=model.parameters())
model.prepare(optim,
paddle.nn.CrossEntropyLoss(),
paddle.metric.Accuracy())
transform = T.Compose([
T.Transpose(),
T.Normalize([127.5], [127.5])
])
data = paddle.vision.datasets.MNIST(mode='train', transform=transform)
model.fit(data, epochs=2, batch_size=32, verbose=1)
transform = T.Compose([
T.Transpose(),
T.Normalize([127.5], [127.5])
])
data = paddle.vision.datasets.MNIST(mode='train', transform=transform)
model.fit(data, epochs=2, batch_size=32, verbose=1)
2. An example using mixed precision training.
.. code-block:: python
# required: gpu
import paddle
import paddle.nn as nn
import paddle.vision.transforms as T
:name: code-example2
def run_example_code():
device = paddle.set_device('gpu')
# required: gpu
import paddle
import paddle.nn as nn
import paddle.vision.transforms as T
net = nn.Sequential(nn.Flatten(1), nn.Linear(784, 200), nn.Tanh(),
nn.Linear(200, 10))
def run_example_code():
device = paddle.set_device('gpu')
model = paddle.Model(net)
optim = paddle.optimizer.SGD(learning_rate=1e-3, parameters=model.parameters())
net = nn.Sequential(nn.Flatten(1), nn.Linear(784, 200), nn.Tanh(),
nn.Linear(200, 10))
amp_configs = {
"level": "O1",
"custom_white_list": {'conv2d'},
"use_dynamic_loss_scaling": True
}
model.prepare(optim,
paddle.nn.CrossEntropyLoss(),
paddle.metric.Accuracy(),
amp_configs=amp_configs)
model = paddle.Model(net)
optim = paddle.optimizer.SGD(learning_rate=1e-3, parameters=model.parameters())
transform = T.Compose([T.Transpose(), T.Normalize([127.5], [127.5])])
data = paddle.vision.datasets.MNIST(mode='train', transform=transform)
model.fit(data, epochs=2, batch_size=32, verbose=1)
amp_configs = {
"level": "O1",
"custom_white_list": {'conv2d'},
"use_dynamic_loss_scaling": True
}
model.prepare(optim,
paddle.nn.CrossEntropyLoss(),
paddle.metric.Accuracy(),
amp_configs=amp_configs)
# mixed precision training is only supported on GPU now.
if paddle.is_compiled_with_cuda():
run_example_code()
transform = T.Compose([T.Transpose(), T.Normalize([127.5], [127.5])])
data = paddle.vision.datasets.MNIST(mode='train', transform=transform)
model.fit(data, epochs=2, batch_size=32, verbose=1)
# mixed precision training is only supported on GPU now.
if paddle.is_compiled_with_cuda():
run_example_code()
"""
......@@ -1059,12 +1061,12 @@ class Model(object):
inputs (numpy.ndarray|Tensor|list): Batch of input data. It could
be a numpy array or paddle.Tensor, or a list of arrays or
tensors (in case the model has multiple inputs).
labels (numpy.ndarray|Tensor|list): Batch of labels. It could be
labels (numpy.ndarray|Tensor|list, optional): Batch of labels. It could be
a numpy array or paddle.Tensor, or a list of arrays or tensors
(in case the model has multiple labels). If has no labels,
set None. Default is None.
update (bool): Whether update parameters after loss.backward() computing.
Using it to accumulate gradients. Default is True.
set None. Default: None.
update (bool, optional): Whether update parameters after loss.backward() computing.
Set it to False to accumulate gradients. Default: True.
Returns:
A list of scalar training loss if the model has no metrics,
......@@ -1074,29 +1076,30 @@ class Model(object):
Examples:
.. code-block:: python
:name: code-example-train-batch
import numpy as np
import paddle
import paddle.nn as nn
from paddle.static import InputSpec
device = paddle.set_device('cpu') # or 'gpu'
net = nn.Sequential(
nn.Linear(784, 200),
nn.Tanh(),
nn.Linear(200, 10))
input = InputSpec([None, 784], 'float32', 'x')
label = InputSpec([None, 1], 'int64', 'label')
model = paddle.Model(net, input, label)
optim = paddle.optimizer.SGD(learning_rate=1e-3,
parameters=model.parameters())
model.prepare(optim, paddle.nn.CrossEntropyLoss())
data = np.random.random(size=(4,784)).astype(np.float32)
label = np.random.randint(0, 10, size=(4, 1)).astype(np.int64)
loss = model.train_batch([data], [label])
print(loss)
import paddle
import paddle.nn as nn
from paddle.static import InputSpec
device = paddle.set_device('cpu') # or 'gpu'
net = nn.Sequential(
nn.Linear(784, 200),
nn.Tanh(),
nn.Linear(200, 10))
input = InputSpec([None, 784], 'float32', 'x')
label = InputSpec([None, 1], 'int64', 'label')
model = paddle.Model(net, input, label)
optim = paddle.optimizer.SGD(learning_rate=1e-3,
parameters=model.parameters())
model.prepare(optim, paddle.nn.CrossEntropyLoss())
data = paddle.rand((4, 784), dtype="float32")
label = paddle.randint(0, 10, (4, 1), dtype="int64")
loss = model.train_batch([data], [label])
print(loss)
# [array([2.192784], dtype=float32)]
"""
loss = self._adapter.train_batch(inputs, labels, update)
if fluid._non_static_mode() and self._input_info is None:
......@@ -1112,10 +1115,10 @@ class Model(object):
inputs (numpy.ndarray|Tensor|list): Batch of input data. It could
be a numpy array or paddle.Tensor, or a list of arrays or
tensors (in case the model has multiple inputs).
labels (numpy.ndarray|Tensor|list): Batch of labels. It could be
labels (numpy.ndarray|Tensor|list, optional): Batch of labels. It could be
a numpy array or paddle.Tensor, or a list of arrays or tensors
(in case the model has multiple labels). If has no labels,
set None. Default is None.
set None. Default: None.
Returns:
A list of scalar testing loss if the model has no metrics,
......@@ -1125,30 +1128,31 @@ class Model(object):
Examples:
.. code-block:: python
import numpy as np
import paddle
import paddle.nn as nn
from paddle.static import InputSpec
device = paddle.set_device('cpu') # or 'gpu'
net = nn.Sequential(
nn.Linear(784, 200),
nn.Tanh(),
nn.Linear(200, 10))
input = InputSpec([None, 784], 'float32', 'x')
label = InputSpec([None, 1], 'int64', 'label')
model = paddle.Model(net, input, label)
optim = paddle.optimizer.SGD(learning_rate=1e-3,
parameters=model.parameters())
model.prepare(optim,
paddle.nn.CrossEntropyLoss())
data = np.random.random(size=(4,784)).astype(np.float32)
label = np.random.randint(0, 10, size=(4, 1)).astype(np.int64)
loss = model.eval_batch([data], [label])
print(loss)
:name: code-example-eval-batch
import paddle
import paddle.nn as nn
from paddle.static import InputSpec
device = paddle.set_device('cpu') # or 'gpu'
net = nn.Sequential(
nn.Linear(784, 200),
nn.Tanh(),
nn.Linear(200, 10))
input = InputSpec([None, 784], 'float32', 'x')
label = InputSpec([None, 1], 'int64', 'label')
model = paddle.Model(net, input, label)
optim = paddle.optimizer.SGD(learning_rate=1e-3,
parameters=model.parameters())
model.prepare(optim,
paddle.nn.CrossEntropyLoss(), metrics=paddle.metric.Accuracy())
data = paddle.rand((4, 784), dtype="float32")
label = paddle.randint(0, 10, (4, 1), dtype="int64")
loss, acc = model.eval_batch([data], [label])
print(loss, acc)
# [array([2.8825705], dtype=float32)] [0.0]
"""
loss = self._adapter.eval_batch(inputs, labels)
if fluid._non_static_mode() and self._input_info is None:
......@@ -1172,28 +1176,31 @@ class Model(object):
Examples:
.. code-block:: python
import numpy as np
import paddle
import paddle.nn as nn
from paddle.static import InputSpec
device = paddle.set_device('cpu') # or 'gpu'
input = InputSpec([None, 784], 'float32', 'x')
label = InputSpec([None, 1], 'int64', 'label')
net = nn.Sequential(
nn.Linear(784, 200),
nn.Tanh(),
nn.Linear(200, 10),
nn.Softmax())
model = paddle.Model(net, input, label)
model.prepare()
data = np.random.random(size=(4,784)).astype(np.float32)
out = model.predict_batch([data])
print(out)
:name: code-example-predict-batch
import paddle
import paddle.nn as nn
from paddle.static import InputSpec
device = paddle.set_device('cpu') # or 'gpu'
input = InputSpec([None, 784], 'float32', 'x')
label = InputSpec([None, 1], 'int64', 'label')
net = nn.Sequential(
nn.Linear(784, 200),
nn.Tanh(),
nn.Linear(200, 10),
nn.Softmax())
model = paddle.Model(net, input, label)
model.prepare()
data = paddle.rand((1, 784), dtype="float32")
out = model.predict_batch([data])
print(out)
# [array([[0.08189095, 0.16740078, 0.06889386, 0.05085445, 0.10729759,
# 0.02217775, 0.14518553, 0.1591538 , 0.01808308, 0.17906217]],
# dtype=float32)]
"""
loss = self._adapter.predict_batch(inputs)
if fluid._non_static_mode() and self._input_info is None:
......@@ -1229,6 +1236,7 @@ class Model(object):
Examples:
.. code-block:: python
:name: code-example-save
import paddle
import paddle.nn as nn
......@@ -1259,7 +1267,7 @@ class Model(object):
optim = paddle.optimizer.SGD(learning_rate=1e-3,
parameters=model.parameters())
model.prepare(optim, paddle.nn.CrossEntropyLoss())
transform = T.Compose([
T.Transpose(),
T.Normalize([127.5], [127.5])
......@@ -1294,14 +1302,14 @@ class Model(object):
optimizer states. The files would be `path.pdparams` and
`path.pdopt` separately, and the latter is not necessary
when no need to restore.
skip_mismatch (bool): Whether to skip the loading of mismatch
skip_mismatch (bool, optional): Whether to skip the loading of mismatch
parameter or raise an error when mismatch happens (not found
the parameter in file storing model states of or receives a
mismatch shape).
reset_optimizer (bool): If True, ignore the providing file storing
mismatch shape). Default: False.
reset_optimizer (bool, optional): If True, ignore the providing file storing
optimizer states and initialize optimizer states from scratch.
Otherwise, restore optimizer states from `path.pdopt` if
a optimizer has been set to the model. Default False.
a optimizer has been set to the model. Default: False.
Returns:
None
......@@ -1309,23 +1317,24 @@ class Model(object):
Examples:
.. code-block:: python
import paddle
import paddle.nn as nn
from paddle.static import InputSpec
:name: code-example-load
import paddle
import paddle.nn as nn
from paddle.static import InputSpec
device = paddle.set_device('cpu')
device = paddle.set_device('cpu')
input = InputSpec([None, 784], 'float32', 'x')
input = InputSpec([None, 784], 'float32', 'x')
model = paddle.Model(nn.Sequential(
nn.Linear(784, 200),
nn.Tanh(),
nn.Linear(200, 10),
nn.Softmax()), input)
model = paddle.Model(nn.Sequential(
nn.Linear(784, 200),
nn.Tanh(),
nn.Linear(200, 10),
nn.Softmax()), input)
model.save('checkpoint/test')
model.load('checkpoint/test')
model.save('checkpoint/test')
model.load('checkpoint/test')
"""
def _load_state_from_path(path):
......@@ -1395,19 +1404,20 @@ class Model(object):
Examples:
.. code-block:: python
:name: code-example-parameters
import paddle
import paddle.nn as nn
from paddle.static import InputSpec
import paddle
import paddle.nn as nn
from paddle.static import InputSpec
input = InputSpec([None, 784], 'float32', 'x')
model = paddle.Model(nn.Sequential(
nn.Linear(784, 200),
nn.Tanh(),
nn.Linear(200, 10)), input)
input = InputSpec([None, 784], 'float32', 'x')
model = paddle.Model(nn.Sequential(
nn.Linear(784, 200),
nn.Tanh(),
nn.Linear(200, 10)), input)
params = model.parameters()
params = model.parameters()
"""
return self._adapter.parameters()
......@@ -1501,16 +1511,16 @@ class Model(object):
Configures the model before runing.
Args:
optimizer (Optimizer|None): Optimizer must be set in training
optimizer (Optimizer|None, optional): Optimizer must be set in training
and should be a Optimizer instance. It can be None in eval
and test mode.
loss (Loss|callable function|None): Loss function can
and test mode. Default: None.
loss (Loss|Callable|None, optional): Loss function can
be a `paddle.nn.Layer` instance or any callable function
taken the predicted values and ground truth values as input.
It can be None when there is no loss.
metrics (Metric|list of Metric|None): If metrics is set, all
metrics will be calculated and output in train/eval mode.
amp_configs (str|dict|None): AMP configurations. If AMP or pure
It can be None when there is no loss. Default: None.
metrics (Metric|list[Metric]|None, optional): If metrics is set, all
metrics will be calculated and output in train/eval mode. Default: None.
amp_configs (str|dict|None, optional): AMP configurations. If AMP or pure
float16 training is used, the key 'level' of 'amp_configs'
should be set to 'O1' or 'O2' respectively. Otherwise, the
value of 'level' defaults to 'O0', which means float32
......@@ -1526,6 +1536,7 @@ class Model(object):
for details. For convenience, 'amp_configs' could be set to
'O1' or 'O2' if no more parameters are needed. 'amp_configs'
could be None in float32 training. Default: None.
Returns:
None
"""
......@@ -1587,133 +1598,133 @@ class Model(object):
evaluation will be done at the end of each epoch.
Args:
train_data (Dataset|DataLoader): An iterable data loader is used for
train_data (Dataset|DataLoader, optional): An iterable data loader is used for
train. An instance of paddle paddle.io.Dataset or
paddle.io.Dataloader is recomended. Default: None.
eval_data (Dataset|DataLoader): An iterable data loader is used for
eval_data (Dataset|DataLoader, optional): An iterable data loader is used for
evaluation at the end of epoch. If None, will not do evaluation.
An instance of paddle.io.Dataset or paddle.io.Dataloader
is recomended. Default: None.
batch_size (int): Integer number. The batch size of train_data
and eval_data. When train_data and eval_data are both the
instance of Dataloader, this parameter will be ignored.
Default: 1.
epochs (int): Integer number. The number of epochs to train
the model. Default: 1.
eval_freq (int): The frequency, in number of epochs, an evalutation
batch_size (int, optional): The batch size of train_data and eval_data. When
train_data and eval_data are both the instance of Dataloader, this
parameter will be ignored. Default: 1.
epochs (int, optional): The number of epochs to train the model. Default: 1.
eval_freq (int, optional): The frequency, in number of epochs, an evalutation
is performed. Default: 1.
log_freq (int): The frequency, in number of steps, the training logs
log_freq (int, optional): The frequency, in number of steps, the training logs
are printed. Default: 10.
save_dir(str|None): The directory to save checkpoint during training.
save_dir(str|None, optional): The directory to save checkpoint during training.
If None, will not save checkpoint. Default: None.
save_freq (int): The frequency, in number of epochs, to save
save_freq (int, optional): The frequency, in number of epochs, to save
checkpoint. Default: 1.
verbose (int): The verbosity mode, should be 0, 1, or 2. 0 = silent,
verbose (int, optional): The verbosity mode, should be 0, 1, or 2. 0 = silent,
1 = progress bar, 2 = one line per epoch. Default: 2.
drop_last (bool): Whether drop the last incomplete batch of
drop_last (bool, optional): Whether drop the last incomplete batch of
train_data when dataset size is not divisible by the batch size.
When train_data is an instance of Dataloader, this parameter
will be ignored. Default: False.
shuffle (bool): Whther to shuffle train_data. When train_data is
shuffle (bool, optional): Whther to shuffle train_data. When train_data is
an instance of Dataloader, this parameter will be ignored.
Default: True.
num_workers (int): The number of subprocess to load data, 0 for no
num_workers (int, optional): The number of subprocess to load data, 0 for no
subprocess used and loading data in main process.
When train_data and eval_data are both the instance of
Dataloader, this parameter will be ignored. Default: 0.
callbacks (Callback|None): A list of `Callback` instances to apply
during training. If None, `ProgBarLogger` and `ModelCheckpoint`
are automatically inserted. Default: None.
accumulate_grad_batches (int): The number of batches to accumulate gradident
callbacks (Callback|None, optional): A list of `Callback` instances to apply
during training. If None, :ref:`api_paddle_callbacks_ProgBarLogger` and
:ref:`api_paddle_callbacks_ModelCheckpoint` are automatically inserted. Default: None.
accumulate_grad_batches (int, optional): The number of batches to accumulate gradident
during training process before optimizer updates. It can mimic large batch
size. Default: 1.
num_iters (int|None): Integer number. The number of iterations to train
the model. If None, follow `epochs` to train the model, otherwise, train
the model `num_iters` times. Default: None.
num_iters (int|None, optional): The number of iterations to evaluate the model.
If None, evaluate on whole input dataset, otherwise, evaluate `num_iters` times.
Default: None.
Returns:
None
Examples:
1. An example use Dataset and set btch size, shuffle in fit.
1. An example use Dataset and set batch size, shuffle in fit.
How to make a batch is done internally.
.. code-block:: python
:name: code-example-fit-1
import paddle
import paddle.vision.transforms as T
from paddle.vision.datasets import MNIST
from paddle.static import InputSpec
dynamic = True
if not dynamic:
paddle.enable_static()
transform = T.Compose([
T.Transpose(),
T.Normalize([127.5], [127.5])
])
train_dataset = MNIST(mode='train', transform=transform)
val_dataset = MNIST(mode='test', transform=transform)
input = InputSpec([None, 1, 28, 28], 'float32', 'image')
label = InputSpec([None, 1], 'int64', 'label')
model = paddle.Model(
paddle.vision.models.LeNet(),
input, label)
optim = paddle.optimizer.Adam(
learning_rate=0.001, parameters=model.parameters())
model.prepare(
optim,
paddle.nn.CrossEntropyLoss(),
paddle.metric.Accuracy(topk=(1, 2)))
model.fit(train_dataset,
val_dataset,
epochs=2,
batch_size=64,
save_dir='mnist_checkpoint')
import paddle
import paddle.vision.transforms as T
from paddle.vision.datasets import MNIST
from paddle.static import InputSpec
dynamic = True
if not dynamic:
paddle.enable_static()
transform = T.Compose([
T.Transpose(),
T.Normalize([127.5], [127.5])
])
train_dataset = MNIST(mode='train', transform=transform)
val_dataset = MNIST(mode='test', transform=transform)
input = InputSpec([None, 1, 28, 28], 'float32', 'image')
label = InputSpec([None, 1], 'int64', 'label')
model = paddle.Model(
paddle.vision.models.LeNet(),
input, label)
optim = paddle.optimizer.Adam(
learning_rate=0.001, parameters=model.parameters())
model.prepare(
optim,
paddle.nn.CrossEntropyLoss(),
paddle.metric.Accuracy(topk=(1, 2)))
model.fit(train_dataset,
val_dataset,
epochs=2,
batch_size=64,
save_dir='mnist_checkpoint')
2. An example use DataLoader, batch size and shuffle is set in
DataLoader.
.. code-block:: python
:name: code-example-fit-2
import paddle
import paddle.vision.transforms as T
from paddle.vision.datasets import MNIST
from paddle.static import InputSpec
import paddle
import paddle.vision.transforms as T
from paddle.vision.datasets import MNIST
from paddle.static import InputSpec
dynamic = True
if not dynamic:
paddle.enable_static()
transform = T.Compose([
T.Transpose(),
T.Normalize([127.5], [127.5])
])
train_dataset = MNIST(mode='train', transform=transform)
train_loader = paddle.io.DataLoader(train_dataset,
batch_size=64)
val_dataset = MNIST(mode='test', transform=transform)
val_loader = paddle.io.DataLoader(val_dataset,
batch_size=64)
input = InputSpec([None, 1, 28, 28], 'float32', 'image')
label = InputSpec([None, 1], 'int64', 'label')
dynamic = True
if not dynamic:
paddle.enable_static()
transform = T.Compose([
T.Transpose(),
T.Normalize([127.5], [127.5])
])
train_dataset = MNIST(mode='train', transform=transform)
train_loader = paddle.io.DataLoader(train_dataset,
batch_size=64)
val_dataset = MNIST(mode='test', transform=transform)
val_loader = paddle.io.DataLoader(val_dataset,
batch_size=64)
input = InputSpec([None, 1, 28, 28], 'float32', 'image')
label = InputSpec([None, 1], 'int64', 'label')
model = paddle.Model(
paddle.vision.models.LeNet(), input, label)
optim = paddle.optimizer.Adam(
learning_rate=0.001, parameters=model.parameters())
model.prepare(
optim,
paddle.nn.CrossEntropyLoss(),
paddle.metric.Accuracy(topk=(1, 2)))
model.fit(train_loader,
val_loader,
epochs=2,
save_dir='mnist_checkpoint')
model = paddle.Model(
paddle.vision.models.LeNet(), input, label)
optim = paddle.optimizer.Adam(
learning_rate=0.001, parameters=model.parameters())
model.prepare(
optim,
paddle.nn.CrossEntropyLoss(),
paddle.metric.Accuracy(topk=(1, 2)))
model.fit(train_loader,
val_loader,
epochs=2,
save_dir='mnist_checkpoint')
"""
assert train_data is not None, \
"train_data must be given!"
......@@ -1809,23 +1820,23 @@ class Model(object):
eval_data (Dataset|DataLoader): An iterable data loader is used for
evaluation. An instance of paddle.io.Dataset or
paddle.io.Dataloader is recomended.
batch_size (int): Integer number. The batch size of train_data
and eval_data. When eval_data is the instance of Dataloader,
this argument will be ignored. Default: 1.
log_freq (int): The frequency, in number of steps, the eval logs
batch_size (int, optional): The batch size of train_data and eval_data.
When eval_data is the instance of Dataloader, this argument will be
ignored. Default: 1.
log_freq (int, optional): The frequency, in number of steps, the eval logs
are printed. Default: 10.
verbose (int): The verbosity mode, should be 0, 1, or 2. 0 = silent,
verbose (int, optional): The verbosity mode, should be 0, 1, or 2. 0 = silent,
1 = progress bar, 2 = one line per epoch. Default: 2.
num_workers (int): The number of subprocess to load data,
num_workers (int, optional): The number of subprocess to load data,
0 for no subprocess used and loading data in main process. When
train_data and eval_data are both the instance of Dataloader,
this parameter will be ignored. Default: 0.
callbacks (Callback|None): A list of `Callback` instances to apply
callbacks (Callback|None, optional): A list of `Callback` instances to apply
during training. If None, `ProgBarLogger` and `ModelCheckpoint`
are automatically inserted. Default: None.
num_iters (int|None): Integer number. The number of iterations to
evaluate the model. If None, evaluate on whole input dataset,
otherwise, evaluate `num_iters` times. Default: None.
num_iters (int|None, optional): The number of iterations to evaluate the model.
If None, evaluate on whole input dataset, otherwise, evaluate `num_iters` times.
Default: None.
Returns:
dict: Result of metric. The key is the names of Metric,
value is a scalar or numpy.array.
......@@ -1833,24 +1844,26 @@ class Model(object):
Examples:
.. code-block:: python
:name: code-example-evaluate
import paddle
import paddle.vision.transforms as T
from paddle.static import InputSpec
import paddle
import paddle.vision.transforms as T
from paddle.static import InputSpec
# declarative mode
transform = T.Compose([
T.Transpose(),
T.Normalize([127.5], [127.5])
])
val_dataset = paddle.vision.datasets.MNIST(mode='test', transform=transform)
# declarative mode
transform = T.Compose([
T.Transpose(),
T.Normalize([127.5], [127.5])
])
val_dataset = paddle.vision.datasets.MNIST(mode='test', transform=transform)
input = InputSpec([-1, 1, 28, 28], 'float32', 'image')
label = InputSpec([None, 1], 'int64', 'label')
model = paddle.Model(paddle.vision.models.LeNet(), input, label)
model.prepare(metrics=paddle.metric.Accuracy())
result = model.evaluate(val_dataset, batch_size=64)
print(result)
input = InputSpec([-1, 1, 28, 28], 'float32', 'image')
label = InputSpec([None, 1], 'int64', 'label')
model = paddle.Model(paddle.vision.models.LeNet(), input, label)
model.prepare(metrics=paddle.metric.Accuracy())
result = model.evaluate(val_dataset, batch_size=64)
print(result)
# {'acc': 0.0699}
"""
if eval_data is not None and isinstance(eval_data, Dataset):
......@@ -1912,21 +1925,20 @@ class Model(object):
test_data (Dataset|DataLoader): An iterable data loader is used for
predict. An instance of paddle.io.Dataset or paddle.io.Dataloader
is recomended.
batch_size (int): Integer number. The batch size of train_data and eval_data.
When train_data and eval_data are both the instance of Dataloader, this
argument will be ignored. Default: 1.
num_workers (int): The number of subprocess to load data, 0 for no subprocess
used and loading data in main process. When train_data and eval_data are
both the instance of Dataloader, this argument will be ignored. Default: 0.
stack_outputs (bool): Whether stack output field like a batch, as for an output
filed of a sample is in shape [X, Y], test_data contains N samples, predict
batch_size (int, optional): The batch size of test_data. When test_data is the
instance of Dataloader, this argument will be ignored. Default: 1.
num_workers (int, optional): The number of subprocess to load data, 0 for no subprocess
used and loading data in main process. When test_data is the instance of Dataloader,
this argument will be ignored. Default: 0.
stack_outputs (bool, optional): Whether stack output field like a batch, as for an output
field of a sample is in shape [X, Y], test_data contains N samples, predict
output field will be in shape [N, X, Y] if stack_output is True, and will
be a length N list in shape [[X, Y], [X, Y], ....[X, Y]] if stack_outputs
be a length N list in shape [[X, Y], [X, Y], ..., [X, Y]] if stack_outputs
is False. stack_outputs as False is used for LoDTensor output situation,
it is recommended set as True if outputs contains no LoDTensor. Default: False.
verbose (int): The verbosity mode, should be 0, 1, or 2. 0 = silent,
verbose (int, optional): The verbosity mode, should be 0, 1, or 2. 0 = silent,
1 = progress bar, 2 = one line per batch. Default: 1.
callbacks(Callback): A Callback instance, default None.
callbacks(Callback, optional): A Callback instance, Default: None.
Returns:
list: output of models.
......@@ -1934,43 +1946,46 @@ class Model(object):
Examples:
.. code-block:: python
:name: code-example-predict
import numpy as np
import paddle
from paddle.static import InputSpec
import numpy as np
import paddle
from paddle.static import InputSpec
class MnistDataset(paddle.vision.datasets.MNIST):
def __init__(self, mode, return_label=True):
super(MnistDataset, self).__init__(mode=mode)
self.return_label = return_label
def __getitem__(self, idx):
img = np.reshape(self.images[idx], [1, 28, 28])
if self.return_label:
return img, np.array(self.labels[idx]).astype('int64')
return img,
def __len__(self):
return len(self.images)
test_dataset = MnistDataset(mode='test', return_label=False)
# imperative mode
input = InputSpec([-1, 1, 28, 28], 'float32', 'image')
model = paddle.Model(paddle.vision.models.LeNet(), input)
model.prepare()
result = model.predict(test_dataset, batch_size=64)
print(len(result[0]), result[0][0].shape)
# declarative mode
device = paddle.set_device('cpu')
paddle.enable_static()
input = InputSpec([-1, 1, 28, 28], 'float32', 'image')
model = paddle.Model(paddle.vision.models.LeNet(), input)
model.prepare()
result = model.predict(test_dataset, batch_size=64)
print(len(result[0]), result[0][0].shape)
class MnistDataset(paddle.vision.datasets.MNIST):
def __init__(self, mode, return_label=True):
super(MnistDataset, self).__init__(mode=mode)
self.return_label = return_label
def __getitem__(self, idx):
img = np.reshape(self.images[idx], [1, 28, 28])
if self.return_label:
return img, np.array(self.labels[idx]).astype('int64')
return img,
def __len__(self):
return len(self.images)
test_dataset = MnistDataset(mode='test', return_label=False)
# imperative mode
input = InputSpec([-1, 1, 28, 28], 'float32', 'image')
model = paddle.Model(paddle.vision.models.LeNet(), input)
model.prepare()
result = model.predict(test_dataset, batch_size=64)
print(len(result[0]), result[0][0].shape)
# 157 (64, 10)
# declarative mode
device = paddle.set_device('cpu')
paddle.enable_static()
input = InputSpec([-1, 1, 28, 28], 'float32', 'image')
model = paddle.Model(paddle.vision.models.LeNet(), input)
model.prepare()
result = model.predict(test_dataset, batch_size=64)
print(len(result[0]), result[0][0].shape)
# 157 (64, 10)
"""
if test_data is not None and isinstance(test_data, Dataset):
......@@ -2164,23 +2179,25 @@ class Model(object):
Examples:
.. code-block:: python
:name: code-example-summary
import paddle
from paddle.static import InputSpec
input = InputSpec([None, 1, 28, 28], 'float32', 'image')
label = InputSpec([None, 1], 'int64', 'label')
import paddle
from paddle.static import InputSpec
input = InputSpec([None, 1, 28, 28], 'float32', 'image')
label = InputSpec([None, 1], 'int64', 'label')
model = paddle.Model(paddle.vision.models.LeNet(),
input, label)
optim = paddle.optimizer.Adam(
learning_rate=0.001, parameters=model.parameters())
model.prepare(
optim,
paddle.nn.CrossEntropyLoss())
params_info = model.summary()
print(params_info)
model = paddle.Model(paddle.vision.models.LeNet(),
input, label)
optim = paddle.optimizer.Adam(
learning_rate=0.001, parameters=model.parameters())
model.prepare(
optim,
paddle.nn.CrossEntropyLoss())
params_info = model.summary()
print(params_info)
# {'total_params': 61610, 'trainable_params': 61610}
"""
assert (input_size is not None or self._inputs
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册