未验证 提交 220b676d 编写于 作者: O ooooo-create 提交者: GitHub

[xdoctest][task 60-64] reformat example code with google style in...

[xdoctest][task 60-64] reformat example code with google style in `geometric/*` ,`hapi/callbacks.py` (#55919)

* [Doctest]fix No.21, test=docs_preview

* Revert "[Doctest]fix No.21, test=docs_preview"

This reverts commit 76bcdb280e254d682be6fc6f85588f1940bb1ade.

* [Doctest]fix No.60-64, test=docs_preview
上级 801a8655
......@@ -43,11 +43,13 @@ def segment_sum(data, segment_ids, name=None):
Examples:
.. code-block:: python
import paddle
data = paddle.to_tensor([[1, 2, 3], [3, 2, 1], [4, 5, 6]], dtype='float32')
segment_ids = paddle.to_tensor([0, 0, 1], dtype='int32')
out = paddle.geometric.segment_sum(data, segment_ids)
#Outputs: [[4., 4., 4.], [4., 5., 6.]]
>>> import paddle
>>> data = paddle.to_tensor([[1, 2, 3], [3, 2, 1], [4, 5, 6]], dtype='float32')
>>> segment_ids = paddle.to_tensor([0, 0, 1], dtype='int32')
>>> out = paddle.geometric.segment_sum(data, segment_ids)
>>> print(out.numpy())
[[4. 4. 4.]
[4. 5. 6.]]
"""
if in_dynamic_mode():
......@@ -99,11 +101,13 @@ def segment_mean(data, segment_ids, name=None):
Examples:
.. code-block:: python
import paddle
data = paddle.to_tensor([[1, 2, 3], [3, 2, 1], [4, 5, 6]], dtype='float32')
segment_ids = paddle.to_tensor([0, 0, 1], dtype='int32')
out = paddle.geometric.segment_mean(data, segment_ids)
#Outputs: [[2., 2., 2.], [4., 5., 6.]]
>>> import paddle
>>> data = paddle.to_tensor([[1, 2, 3], [3, 2, 1], [4, 5, 6]], dtype='float32')
>>> segment_ids = paddle.to_tensor([0, 0, 1], dtype='int32')
>>> out = paddle.geometric.segment_mean(data, segment_ids)
>>> print(out.numpy())
[[2. 2. 2.]
[4. 5. 6.]]
"""
......@@ -155,11 +159,13 @@ def segment_min(data, segment_ids, name=None):
Examples:
.. code-block:: python
import paddle
data = paddle.to_tensor([[1, 2, 3], [3, 2, 1], [4, 5, 6]], dtype='float32')
segment_ids = paddle.to_tensor([0, 0, 1], dtype='int32')
out = paddle.geometric.segment_min(data, segment_ids)
#Outputs: [[1., 2., 1.], [4., 5., 6.]]
>>> import paddle
>>> data = paddle.to_tensor([[1, 2, 3], [3, 2, 1], [4, 5, 6]], dtype='float32')
>>> segment_ids = paddle.to_tensor([0, 0, 1], dtype='int32')
>>> out = paddle.geometric.segment_min(data, segment_ids)
>>> print(out.numpy())
[[1. 2. 1.]
[4. 5. 6.]]
"""
......@@ -211,11 +217,13 @@ def segment_max(data, segment_ids, name=None):
Examples:
.. code-block:: python
import paddle
data = paddle.to_tensor([[1, 2, 3], [3, 2, 1], [4, 5, 6]], dtype='float32')
segment_ids = paddle.to_tensor([0, 0, 1], dtype='int32')
out = paddle.geometric.segment_max(data, segment_ids)
#Outputs: [[3., 2., 3.], [4., 5., 6.]]
>>> import paddle
>>> data = paddle.to_tensor([[1, 2, 3], [3, 2, 1], [4, 5, 6]], dtype='float32')
>>> segment_ids = paddle.to_tensor([0, 0, 1], dtype='int32')
>>> out = paddle.geometric.segment_max(data, segment_ids)
>>> print(out.numpy())
[[3. 2. 3.]
[4. 5. 6.]]
"""
......
......@@ -88,26 +88,34 @@ def send_u_recv(
Examples:
.. code-block:: python
import paddle
x = paddle.to_tensor([[0, 2, 3], [1, 4, 5], [2, 6, 7]], dtype="float32")
indexes = paddle.to_tensor([[0, 1], [1, 2], [2, 1], [0, 0]], dtype="int32")
src_index, dst_index = indexes[:, 0], indexes[:, 1]
out = paddle.geometric.send_u_recv(x, src_index, dst_index, reduce_op="sum")
# Outputs: [[0., 2., 3.], [2., 8., 10.], [1., 4., 5.]]
x = paddle.to_tensor([[0, 2, 3], [1, 4, 5], [2, 6, 7]], dtype="float32")
indexes = paddle.to_tensor([[0, 1], [2, 1], [0, 0]], dtype="int32")
src_index, dst_index = indexes[:, 0], indexes[:, 1]
out_size = paddle.max(dst_index) + 1
out = paddle.geometric.send_u_recv(x, src_index, dst_index, reduce_op="sum", out_size=out_size)
# Outputs: [[0., 2., 3.], [[2., 8., 10.]]]
x = paddle.to_tensor([[0, 2, 3], [1, 4, 5], [2, 6, 7]], dtype="float32")
indexes = paddle.to_tensor([[0, 1], [2, 1], [0, 0]], dtype="int32")
src_index, dst_index = indexes[:, 0], indexes[:, 1]
out = paddle.geometric.send_u_recv(x, src_index, dst_index, reduce_op="sum")
# Outputs: [[0., 2., 3.], [2., 8., 10.], [0., 0., 0.]]
>>> import paddle
>>> x = paddle.to_tensor([[0, 2, 3], [1, 4, 5], [2, 6, 7]], dtype="float32")
>>> indexes = paddle.to_tensor([[0, 1], [1, 2], [2, 1], [0, 0]], dtype="int32")
>>> src_index, dst_index = indexes[:, 0], indexes[:, 1]
>>> out = paddle.geometric.send_u_recv(x, src_index, dst_index, reduce_op="sum")
>>> print(out.numpy())
[[ 0. 2. 3.]
[ 2. 8. 10.]
[ 1. 4. 5.]]
>>> x = paddle.to_tensor([[0, 2, 3], [1, 4, 5], [2, 6, 7]], dtype="float32")
>>> indexes = paddle.to_tensor([[0, 1], [2, 1], [0, 0]], dtype="int32")
>>> src_index, dst_index = indexes[:, 0], indexes[:, 1]
>>> out_size = paddle.max(dst_index) + 1
>>> out = paddle.geometric.send_u_recv(x, src_index, dst_index, reduce_op="sum", out_size=out_size)
>>> print(out.numpy())
[[ 0. 2. 3.]
[ 2. 8. 10.]]
>>> x = paddle.to_tensor([[0, 2, 3], [1, 4, 5], [2, 6, 7]], dtype="float32")
>>> indexes = paddle.to_tensor([[0, 1], [2, 1], [0, 0]], dtype="int32")
>>> src_index, dst_index = indexes[:, 0], indexes[:, 1]
>>> out = paddle.geometric.send_u_recv(x, src_index, dst_index, reduce_op="sum")
>>> print(out.numpy())
[[ 0. 2. 3.]
[ 2. 8. 10.]
[ 0. 0. 0.]]
"""
......@@ -247,29 +255,37 @@ def send_ue_recv(
Examples:
.. code-block:: python
import paddle
x = paddle.to_tensor([[0, 2, 3], [1, 4, 5], [2, 6, 7]], dtype="float32")
y = paddle.to_tensor([1, 1, 1, 1], dtype="float32")
indexes = paddle.to_tensor([[0, 1], [1, 2], [2, 1], [0, 0]], dtype="int32")
src_index, dst_index = indexes[:, 0], indexes[:, 1]
out = paddle.geometric.send_ue_recv(x, y, src_index, dst_index, message_op="add", reduce_op="sum")
# Outputs: [[1., 3., 4.], [4., 10., 12.], [2., 5., 6.]]
x = paddle.to_tensor([[0, 2, 3], [1, 4, 5], [2, 6, 7]], dtype="float32")
y = paddle.to_tensor([1, 1, 1], dtype="float32")
indexes = paddle.to_tensor([[0, 1], [2, 1], [0, 0]], dtype="int32")
src_index, dst_index = indexes[:, 0], indexes[:, 1]
out_size = paddle.max(dst_index) + 1
out = paddle.geometric.send_ue_recv(x, y, src_index, dst_index, message_op="add", reduce_op="sum", out_size=out_size)
# Outputs: [[1., 3., 4.], [[4., 10., 12.]]]
x = paddle.to_tensor([[0, 2, 3], [1, 4, 5], [2, 6, 7]], dtype="float32")
y = paddle.to_tensor([1, 1, 1], dtype="float32")
indexes = paddle.to_tensor([[0, 1], [2, 1], [0, 0]], dtype="int32")
src_index, dst_index = indexes[:, 0], indexes[:, 1]
out = paddle.geometric.send_ue_recv(x, y, src_index, dst_index, message_op="add", reduce_op="sum")
# Outputs: [[1., 3., 4.], [4., 10., 12.], [0., 0., 0.]]
>>> import paddle
>>> x = paddle.to_tensor([[0, 2, 3], [1, 4, 5], [2, 6, 7]], dtype="float32")
>>> y = paddle.to_tensor([1, 1, 1, 1], dtype="float32")
>>> indexes = paddle.to_tensor([[0, 1], [1, 2], [2, 1], [0, 0]], dtype="int32")
>>> src_index, dst_index = indexes[:, 0], indexes[:, 1]
>>> out = paddle.geometric.send_ue_recv(x, y, src_index, dst_index, message_op="add", reduce_op="sum")
>>> print(out.numpy())
[[ 1. 3. 4.]
[ 4. 10. 12.]
[ 2. 5. 6.]]
>>> x = paddle.to_tensor([[0, 2, 3], [1, 4, 5], [2, 6, 7]], dtype="float32")
>>> y = paddle.to_tensor([1, 1, 1], dtype="float32")
>>> indexes = paddle.to_tensor([[0, 1], [2, 1], [0, 0]], dtype="int32")
>>> src_index, dst_index = indexes[:, 0], indexes[:, 1]
>>> out_size = paddle.max(dst_index) + 1
>>> out = paddle.geometric.send_ue_recv(x, y, src_index, dst_index, message_op="add", reduce_op="sum", out_size=out_size)
>>> print(out.numpy())
[[ 1. 3. 4.]
[ 4. 10. 12.]]
>>> x = paddle.to_tensor([[0, 2, 3], [1, 4, 5], [2, 6, 7]], dtype="float32")
>>> y = paddle.to_tensor([1, 1, 1], dtype="float32")
>>> indexes = paddle.to_tensor([[0, 1], [2, 1], [0, 0]], dtype="int32")
>>> src_index, dst_index = indexes[:, 0], indexes[:, 1]
>>> out = paddle.geometric.send_ue_recv(x, y, src_index, dst_index, message_op="add", reduce_op="sum")
>>> print(out.numpy())
[[ 1. 3. 4.]
[ 4. 10. 12.]
[ 0. 0. 0.]]
"""
......@@ -425,15 +441,19 @@ def send_uv(x, y, src_index, dst_index, message_op="add", name=None):
.. code-block:: python
import paddle
x = paddle.to_tensor([[0, 2, 3], [1, 4, 5], [2, 6, 7]], dtype="float32")
y = paddle.to_tensor([[0, 1, 2], [2, 3, 4], [4, 5, 6]], dtype="float32")
indexes = paddle.to_tensor([[0, 1], [1, 2], [2, 1], [0, 0]], dtype="int32")
src_index = indexes[:, 0]
dst_index = indexes[:, 1]
out = paddle.geometric.send_uv(x, y, src_index, dst_index, message_op="add")
# Outputs: [[2., 5., 7.], [5., 9., 11.], [4., 9., 11.], [0., 3., 5.]]
>>> import paddle
>>> x = paddle.to_tensor([[0, 2, 3], [1, 4, 5], [2, 6, 7]], dtype="float32")
>>> y = paddle.to_tensor([[0, 1, 2], [2, 3, 4], [4, 5, 6]], dtype="float32")
>>> indexes = paddle.to_tensor([[0, 1], [1, 2], [2, 1], [0, 0]], dtype="int32")
>>> src_index = indexes[:, 0]
>>> dst_index = indexes[:, 1]
>>> out = paddle.geometric.send_uv(x, y, src_index, dst_index, message_op="add")
>>> print(out.numpy())
[[ 2. 5. 7.]
[ 5. 9. 11.]
[ 4. 9. 11.]
[ 0. 3. 5.]]
"""
......
......@@ -69,18 +69,21 @@ def reindex_graph(
Examples:
.. code-block:: python
import paddle
x = [0, 1, 2]
neighbors = [8, 9, 0, 4, 7, 6, 7]
count = [2, 3, 2]
x = paddle.to_tensor(x, dtype="int64")
neighbors = paddle.to_tensor(neighbors, dtype="int64")
count = paddle.to_tensor(count, dtype="int32")
reindex_src, reindex_dst, out_nodes = paddle.geometric.reindex_graph(x, neighbors, count)
# reindex_src: [3, 4, 0, 5, 6, 7, 6]
# reindex_dst: [0, 0, 1, 1, 1, 2, 2]
# out_nodes: [0, 1, 2, 8, 9, 4, 7, 6]
>>> import paddle
>>> x = [0, 1, 2]
>>> neighbors = [8, 9, 0, 4, 7, 6, 7]
>>> count = [2, 3, 2]
>>> x = paddle.to_tensor(x, dtype="int64")
>>> neighbors = paddle.to_tensor(neighbors, dtype="int64")
>>> count = paddle.to_tensor(count, dtype="int32")
>>> reindex_src, reindex_dst, out_nodes = paddle.geometric.reindex_graph(x, neighbors, count)
>>> print(reindex_src.numpy())
[3 4 0 5 6 7 6]
>>> print(reindex_dst.numpy())
[0 0 1 1 1 2 2]
>>> print(out_nodes.numpy())
[0 1 2 8 9 4 7 6]
"""
use_buffer_hashtable = (
......@@ -182,24 +185,27 @@ def reindex_heter_graph(
Examples:
.. code-block:: python
import paddle
x = [0, 1, 2]
neighbors_a = [8, 9, 0, 4, 7, 6, 7]
count_a = [2, 3, 2]
x = paddle.to_tensor(x, dtype="int64")
neighbors_a = paddle.to_tensor(neighbors_a, dtype="int64")
count_a = paddle.to_tensor(count_a, dtype="int32")
neighbors_b = [0, 2, 3, 5, 1]
count_b = [1, 3, 1]
neighbors_b = paddle.to_tensor(neighbors_b, dtype="int64")
count_b = paddle.to_tensor(count_b, dtype="int32")
neighbors = [neighbors_a, neighbors_b]
count = [count_a, count_b]
reindex_src, reindex_dst, out_nodes = paddle.geometric.reindex_heter_graph(x, neighbors, count)
# reindex_src: [3, 4, 0, 5, 6, 7, 6, 0, 2, 8, 9, 1]
# reindex_dst: [0, 0, 1, 1, 1, 2, 2, 0, 1, 1, 1, 2]
# out_nodes: [0, 1, 2, 8, 9, 4, 7, 6, 3, 5]
>>> import paddle
>>> x = [0, 1, 2]
>>> neighbors_a = [8, 9, 0, 4, 7, 6, 7]
>>> count_a = [2, 3, 2]
>>> x = paddle.to_tensor(x, dtype="int64")
>>> neighbors_a = paddle.to_tensor(neighbors_a, dtype="int64")
>>> count_a = paddle.to_tensor(count_a, dtype="int32")
>>> neighbors_b = [0, 2, 3, 5, 1]
>>> count_b = [1, 3, 1]
>>> neighbors_b = paddle.to_tensor(neighbors_b, dtype="int64")
>>> count_b = paddle.to_tensor(count_b, dtype="int32")
>>> neighbors = [neighbors_a, neighbors_b]
>>> count = [count_a, count_b]
>>> reindex_src, reindex_dst, out_nodes = paddle.geometric.reindex_heter_graph(x, neighbors, count)
>>> print(reindex_src.numpy())
[3 4 0 5 6 7 6 0 2 8 9 1]
>>> print(reindex_dst.numpy())
[0 0 1 1 1 2 2 0 1 1 1 2]
>>> print(out_nodes.numpy())
[0 1 2 8 9 4 7 6 3 5]
"""
use_buffer_hashtable = (
......
......@@ -77,18 +77,18 @@ def sample_neighbors(
Examples:
.. code-block:: python
import paddle
# edges: (3, 0), (7, 0), (0, 1), (9, 1), (1, 2), (4, 3), (2, 4),
# (9, 5), (3, 5), (9, 6), (1, 6), (9, 8), (7, 8)
row = [3, 7, 0, 9, 1, 4, 2, 9, 3, 9, 1, 9, 7]
colptr = [0, 2, 4, 5, 6, 7, 9, 11, 11, 13, 13]
nodes = [0, 8, 1, 2]
sample_size = 2
row = paddle.to_tensor(row, dtype="int64")
colptr = paddle.to_tensor(colptr, dtype="int64")
nodes = paddle.to_tensor(nodes, dtype="int64")
out_neighbors, out_count = paddle.geometric.sample_neighbors(row, colptr, nodes, sample_size=sample_size)
>>> import paddle
>>> # edges: (3, 0), (7, 0), (0, 1), (9, 1), (1, 2), (4, 3), (2, 4),
>>> # (9, 5), (3, 5), (9, 6), (1, 6), (9, 8), (7, 8)
>>> row = [3, 7, 0, 9, 1, 4, 2, 9, 3, 9, 1, 9, 7]
>>> colptr = [0, 2, 4, 5, 6, 7, 9, 11, 11, 13, 13]
>>> nodes = [0, 8, 1, 2]
>>> sample_size = 2
>>> row = paddle.to_tensor(row, dtype="int64")
>>> colptr = paddle.to_tensor(colptr, dtype="int64")
>>> nodes = paddle.to_tensor(nodes, dtype="int64")
>>> out_neighbors, out_count = paddle.geometric.sample_neighbors(row, colptr, nodes, sample_size=sample_size)
"""
......@@ -228,20 +228,20 @@ def weighted_sample_neighbors(
Examples:
.. code-block:: python
import paddle
# edges: (3, 0), (7, 0), (0, 1), (9, 1), (1, 2), (4, 3), (2, 4),
# (9, 5), (3, 5), (9, 6), (1, 6), (9, 8), (7, 8)
row = [3, 7, 0, 9, 1, 4, 2, 9, 3, 9, 1, 9, 7]
colptr = [0, 2, 4, 5, 6, 7, 9, 11, 11, 13, 13]
weight = [0.1, 0.5, 0.2, 0.5, 0.9, 1.9, 2.0, 2.1, 0.01, 0.9, 0,12, 0.59, 0.67]
nodes = [0, 8, 1, 2]
sample_size = 2
row = paddle.to_tensor(row, dtype="int64")
colptr = paddle.to_tensor(colptr, dtype="int64")
weight = paddle.to_tensor(weight, dtype="float32")
nodes = paddle.to_tensor(nodes, dtype="int64")
out_neighbors, out_count = paddle.geometric.weighted_sample_neighbors(row, colptr, weight, nodes, sample_size=sample_size)
>>> import paddle
>>> # edges: (3, 0), (7, 0), (0, 1), (9, 1), (1, 2), (4, 3), (2, 4),
>>> # (9, 5), (3, 5), (9, 6), (1, 6), (9, 8), (7, 8)
>>> row = [3, 7, 0, 9, 1, 4, 2, 9, 3, 9, 1, 9, 7]
>>> colptr = [0, 2, 4, 5, 6, 7, 9, 11, 11, 13, 13]
>>> weight = [0.1, 0.5, 0.2, 0.5, 0.9, 1.9, 2.0, 2.1, 0.01, 0.9, 0,12, 0.59, 0.67]
>>> nodes = [0, 8, 1, 2]
>>> sample_size = 2
>>> row = paddle.to_tensor(row, dtype="int64")
>>> colptr = paddle.to_tensor(colptr, dtype="int64")
>>> weight = paddle.to_tensor(weight, dtype="float32")
>>> nodes = paddle.to_tensor(nodes, dtype="int64")
>>> out_neighbors, out_count = paddle.geometric.weighted_sample_neighbors(row, colptr, weight, nodes, sample_size=sample_size)
"""
......
......@@ -137,19 +137,19 @@ class Callback:
.. code-block:: python
import paddle
# build a simple model checkpoint callback
class ModelCheckpoint(paddle.callbacks.Callback):
def __init__(self, save_freq=1, save_dir=None):
self.save_freq = save_freq
self.save_dir = save_dir
def on_epoch_end(self, epoch, logs=None):
if self.model is not None and epoch % self.save_freq == 0:
path = '{}/{}'.format(self.save_dir, epoch)
print('save checkpoint at {}'.format(path))
self.model.save(path)
>>> import paddle
>>> # build a simple model checkpoint callback
>>> class ModelCheckpoint(paddle.callbacks.Callback):
... def __init__(self, save_freq=1, save_dir=None):
... self.save_freq = save_freq
... self.save_dir = save_dir
...
... def on_epoch_end(self, epoch, logs=None):
... if self.model is not None and epoch % self.save_freq == 0:
... path = '{}/{}'.format(self.save_dir, epoch)
... print('save checkpoint at {}'.format(path))
... self.model.save(path)
"""
......@@ -314,31 +314,31 @@ class ProgBarLogger(Callback):
Examples:
.. code-block:: python
import paddle
import paddle.vision.transforms as T
from paddle.vision.datasets import MNIST
from paddle.static import InputSpec
>>> import paddle
>>> import paddle.vision.transforms as T
>>> from paddle.vision.datasets import MNIST
>>> from paddle.static import InputSpec
inputs = [InputSpec([-1, 1, 28, 28], 'float32', 'image')]
labels = [InputSpec([None, 1], 'int64', 'label')]
>>> inputs = [InputSpec([-1, 1, 28, 28], 'float32', 'image')]
>>> labels = [InputSpec([None, 1], 'int64', 'label')]
transform = T.Compose([
T.Transpose(),
T.Normalize([127.5], [127.5])
])
train_dataset = MNIST(mode='train', transform=transform)
>>> transform = T.Compose([
... T.Transpose(),
... T.Normalize([127.5], [127.5])
... ])
>>> train_dataset = MNIST(mode='train', transform=transform)
lenet = paddle.vision.models.LeNet()
model = paddle.Model(lenet,
inputs, labels)
>>> lenet = paddle.vision.models.LeNet()
>>> model = paddle.Model(lenet,
... inputs, labels)
optim = paddle.optimizer.Adam(0.001, parameters=lenet.parameters())
model.prepare(optimizer=optim,
loss=paddle.nn.CrossEntropyLoss(),
metrics=paddle.metric.Accuracy())
>>> optim = paddle.optimizer.Adam(0.001, parameters=lenet.parameters())
>>> model.prepare(optimizer=optim,
... loss=paddle.nn.CrossEntropyLoss(),
... metrics=paddle.metric.Accuracy())
callback = paddle.callbacks.ProgBarLogger(log_freq=10)
model.fit(train_dataset, batch_size=64, callbacks=callback)
>>> callback = paddle.callbacks.ProgBarLogger(log_freq=10)
>>> model.fit(train_dataset, batch_size=64, callbacks=callback)
"""
def __init__(self, log_freq=1, verbose=2):
......@@ -562,31 +562,31 @@ class ModelCheckpoint(Callback):
Examples:
.. code-block:: python
import paddle
import paddle.vision.transforms as T
from paddle.vision.datasets import MNIST
from paddle.static import InputSpec
>>> import paddle
>>> import paddle.vision.transforms as T
>>> from paddle.vision.datasets import MNIST
>>> from paddle.static import InputSpec
inputs = [InputSpec([-1, 1, 28, 28], 'float32', 'image')]
labels = [InputSpec([None, 1], 'int64', 'label')]
>>> inputs = [InputSpec([-1, 1, 28, 28], 'float32', 'image')]
>>> labels = [InputSpec([None, 1], 'int64', 'label')]
transform = T.Compose([
T.Transpose(),
T.Normalize([127.5], [127.5])
])
train_dataset = MNIST(mode='train', transform=transform)
>>> transform = T.Compose([
... T.Transpose(),
... T.Normalize([127.5], [127.5])
... ])
>>> train_dataset = MNIST(mode='train', transform=transform)
lenet = paddle.vision.models.LeNet()
model = paddle.Model(lenet,
inputs, labels)
>>> lenet = paddle.vision.models.LeNet()
>>> model = paddle.Model(lenet,
... inputs, labels)
optim = paddle.optimizer.Adam(0.001, parameters=lenet.parameters())
model.prepare(optimizer=optim,
loss=paddle.nn.CrossEntropyLoss(),
metrics=paddle.metric.Accuracy())
>>> optim = paddle.optimizer.Adam(0.001, parameters=lenet.parameters())
>>> model.prepare(optimizer=optim,
... loss=paddle.nn.CrossEntropyLoss(),
... metrics=paddle.metric.Accuracy())
callback = paddle.callbacks.ModelCheckpoint(save_dir='./temp')
model.fit(train_dataset, batch_size=64, callbacks=callback)
>>> callback = paddle.callbacks.ModelCheckpoint(save_dir='./temp')
>>> model.fit(train_dataset, batch_size=64, callbacks=callback)
"""
def __init__(self, save_freq=1, save_dir=None):
......@@ -628,58 +628,58 @@ class LRScheduler(Callback):
Examples:
.. code-block:: python
import paddle
import paddle.vision.transforms as T
from paddle.static import InputSpec
inputs = [InputSpec([-1, 1, 28, 28], 'float32', 'image')]
labels = [InputSpec([None, 1], 'int64', 'label')]
transform = T.Compose([
T.Transpose(),
T.Normalize([127.5], [127.5])
])
train_dataset = paddle.vision.datasets.MNIST(mode='train', transform=transform)
lenet = paddle.vision.models.LeNet()
model = paddle.Model(lenet,
inputs, labels)
base_lr = 1e-3
boundaries = [5, 8]
wamup_steps = 4
def make_optimizer(parameters=None):
momentum = 0.9
weight_decay = 5e-4
values = [base_lr * (0.1**i) for i in range(len(boundaries) + 1)]
learning_rate = paddle.optimizer.lr.PiecewiseDecay(
boundaries=boundaries, values=values)
learning_rate = paddle.optimizer.lr.LinearWarmup(
learning_rate=learning_rate,
warmup_steps=wamup_steps,
start_lr=base_lr / 5.,
end_lr=base_lr,
verbose=True)
optimizer = paddle.optimizer.Momentum(
learning_rate=learning_rate,
weight_decay=weight_decay,
momentum=momentum,
parameters=parameters)
return optimizer
optim = make_optimizer(parameters=lenet.parameters())
model.prepare(optimizer=optim,
loss=paddle.nn.CrossEntropyLoss(),
metrics=paddle.metric.Accuracy())
# if LRScheduler callback not set, an instance LRScheduler update by step
# will be created auto.
model.fit(train_dataset, batch_size=64)
# create a learning rate scheduler update by epoch
callback = paddle.callbacks.LRScheduler(by_step=False, by_epoch=True)
model.fit(train_dataset, batch_size=64, callbacks=callback)
>>> import paddle
>>> import paddle.vision.transforms as T
>>> from paddle.static import InputSpec
>>> inputs = [InputSpec([-1, 1, 28, 28], 'float32', 'image')]
>>> labels = [InputSpec([None, 1], 'int64', 'label')]
>>> transform = T.Compose([
... T.Transpose(),
... T.Normalize([127.5], [127.5])
... ])
>>> train_dataset = paddle.vision.datasets.MNIST(mode='train', transform=transform)
>>> lenet = paddle.vision.models.LeNet()
>>> model = paddle.Model(lenet,
... inputs, labels)
>>> base_lr = 1e-3
>>> boundaries = [5, 8]
>>> wamup_steps = 4
>>> def make_optimizer(parameters=None):
... momentum = 0.9
... weight_decay = 5e-4
... values = [base_lr * (0.1**i) for i in range(len(boundaries) + 1)]
... learning_rate = paddle.optimizer.lr.PiecewiseDecay(
... boundaries=boundaries, values=values)
... learning_rate = paddle.optimizer.lr.LinearWarmup(
... learning_rate=learning_rate,
... warmup_steps=wamup_steps,
... start_lr=base_lr / 5.,
... end_lr=base_lr,
... verbose=True)
... optimizer = paddle.optimizer.Momentum(
... learning_rate=learning_rate,
... weight_decay=weight_decay,
... momentum=momentum,
... parameters=parameters)
... return optimizer
>>> optim = make_optimizer(parameters=lenet.parameters())
>>> model.prepare(optimizer=optim,
... loss=paddle.nn.CrossEntropyLoss(),
... metrics=paddle.metric.Accuracy())
>>> # if LRScheduler callback not set, an instance LRScheduler update by step
>>> # will be created auto.
>>> model.fit(train_dataset, batch_size=64)
>>> # create a learning rate scheduler update by epoch
>>> callback = paddle.callbacks.LRScheduler(by_step=False, by_epoch=True)
>>> model.fit(train_dataset, batch_size=64, callbacks=callback)
"""
def __init__(self, by_step=True, by_epoch=False):
......@@ -744,50 +744,50 @@ class EarlyStopping(Callback):
Examples:
.. code-block:: python
import paddle
from paddle import Model
from paddle.static import InputSpec
from paddle.vision.models import LeNet
from paddle.vision.datasets import MNIST
from paddle.metric import Accuracy
from paddle.nn import CrossEntropyLoss
import paddle.vision.transforms as T
device = paddle.set_device('cpu')
sample_num = 200
save_dir = './best_model_checkpoint'
transform = T.Compose(
[T.Transpose(), T.Normalize([127.5], [127.5])])
train_dataset = MNIST(mode='train', transform=transform)
val_dataset = MNIST(mode='test', transform=transform)
net = LeNet()
optim = paddle.optimizer.Adam(
learning_rate=0.001, parameters=net.parameters())
inputs = [InputSpec([None, 1, 28, 28], 'float32', 'x')]
labels = [InputSpec([None, 1], 'int64', 'label')]
model = Model(net, inputs=inputs, labels=labels)
model.prepare(
optim,
loss=CrossEntropyLoss(reduction="sum"),
metrics=[Accuracy()])
callbacks = paddle.callbacks.EarlyStopping(
'loss',
mode='min',
patience=1,
verbose=1,
min_delta=0,
baseline=None,
save_best_model=True)
model.fit(train_dataset,
val_dataset,
batch_size=64,
log_freq=200,
save_freq=10,
save_dir=save_dir,
epochs=20,
callbacks=[callbacks])
>>> import paddle
>>> from paddle import Model
>>> from paddle.static import InputSpec
>>> from paddle.vision.models import LeNet
>>> from paddle.vision.datasets import MNIST
>>> from paddle.metric import Accuracy
>>> from paddle.nn import CrossEntropyLoss
>>> import paddle.vision.transforms as T
>>> device = paddle.set_device('cpu')
>>> sample_num = 200
>>> save_dir = './best_model_checkpoint'
>>> transform = T.Compose(
... [T.Transpose(), T.Normalize([127.5], [127.5])])
>>> train_dataset = MNIST(mode='train', transform=transform)
>>> val_dataset = MNIST(mode='test', transform=transform)
>>> net = LeNet()
>>> optim = paddle.optimizer.Adam(
... learning_rate=0.001, parameters=net.parameters())
>>> inputs = [InputSpec([None, 1, 28, 28], 'float32', 'x')]
>>> labels = [InputSpec([None, 1], 'int64', 'label')]
>>> model = Model(net, inputs=inputs, labels=labels)
>>> model.prepare(
... optim,
... loss=CrossEntropyLoss(reduction="sum"),
... metrics=[Accuracy()])
>>> callbacks = paddle.callbacks.EarlyStopping(
... 'loss',
... mode='min',
... patience=1,
... verbose=1,
... min_delta=0,
... baseline=None,
... save_best_model=True)
>>> model.fit(train_dataset,
... val_dataset,
... batch_size=64,
... log_freq=200,
... save_freq=10,
... save_dir=save_dir,
... epochs=20,
... callbacks=[callbacks])
"""
def __init__(
......@@ -890,31 +890,31 @@ class VisualDL(Callback):
Examples:
.. code-block:: python
import paddle
import paddle.vision.transforms as T
from paddle.static import InputSpec
>>> import paddle
>>> import paddle.vision.transforms as T
>>> from paddle.static import InputSpec
inputs = [InputSpec([-1, 1, 28, 28], 'float32', 'image')]
labels = [InputSpec([None, 1], 'int64', 'label')]
>>> inputs = [InputSpec([-1, 1, 28, 28], 'float32', 'image')]
>>> labels = [InputSpec([None, 1], 'int64', 'label')]
transform = T.Compose([
T.Transpose(),
T.Normalize([127.5], [127.5])
])
train_dataset = paddle.vision.datasets.MNIST(mode='train', transform=transform)
eval_dataset = paddle.vision.datasets.MNIST(mode='test', transform=transform)
>>> transform = T.Compose([
... T.Transpose(),
... T.Normalize([127.5], [127.5])
... ])
>>> train_dataset = paddle.vision.datasets.MNIST(mode='train', transform=transform)
>>> eval_dataset = paddle.vision.datasets.MNIST(mode='test', transform=transform)
net = paddle.vision.models.LeNet()
model = paddle.Model(net, inputs, labels)
>>> net = paddle.vision.models.LeNet()
>>> model = paddle.Model(net, inputs, labels)
optim = paddle.optimizer.Adam(0.001, parameters=net.parameters())
model.prepare(optimizer=optim,
loss=paddle.nn.CrossEntropyLoss(),
metrics=paddle.metric.Accuracy())
>>> optim = paddle.optimizer.Adam(0.001, parameters=net.parameters())
>>> model.prepare(optimizer=optim,
... loss=paddle.nn.CrossEntropyLoss(),
... metrics=paddle.metric.Accuracy())
## uncomment following lines to fit model with visualdl callback function
# callback = paddle.callbacks.VisualDL(log_dir='visualdl_log_dir')
# model.fit(train_dataset, eval_dataset, batch_size=64, callbacks=callback)
>>> ## uncomment following lines to fit model with visualdl callback function
>>> # callback = paddle.callbacks.VisualDL(log_dir='visualdl_log_dir')
>>> # model.fit(train_dataset, eval_dataset, batch_size=64, callbacks=callback)
"""
......@@ -1019,31 +1019,31 @@ class WandbCallback(Callback):
Examples:
.. code-block:: python
import paddle
import paddle.vision.transforms as T
from paddle.static import InputSpec
>>> import paddle
>>> import paddle.vision.transforms as T
>>> from paddle.static import InputSpec
inputs = [InputSpec([-1, 1, 28, 28], 'float32', 'image')]
labels = [InputSpec([None, 1], 'int64', 'label')]
>>> inputs = [InputSpec([-1, 1, 28, 28], 'float32', 'image')]
>>> labels = [InputSpec([None, 1], 'int64', 'label')]
transform = T.Compose([
T.Transpose(),
T.Normalize([127.5], [127.5])
])
train_dataset = paddle.vision.datasets.MNIST(mode='train', transform=transform)
eval_dataset = paddle.vision.datasets.MNIST(mode='test', transform=transform)
>>> transform = T.Compose([
... T.Transpose(),
... T.Normalize([127.5], [127.5])
... ])
>>> train_dataset = paddle.vision.datasets.MNIST(mode='train', transform=transform)
>>> eval_dataset = paddle.vision.datasets.MNIST(mode='test', transform=transform)
net = paddle.vision.models.LeNet()
model = paddle.Model(net, inputs, labels)
>>> net = paddle.vision.models.LeNet()
>>> model = paddle.Model(net, inputs, labels)
optim = paddle.optimizer.Adam(0.001, parameters=net.parameters())
model.prepare(optimizer=optim,
loss=paddle.nn.CrossEntropyLoss(),
metrics=paddle.metric.Accuracy())
>>> optim = paddle.optimizer.Adam(0.001, parameters=net.parameters())
>>> model.prepare(optimizer=optim,
... loss=paddle.nn.CrossEntropyLoss(),
... metrics=paddle.metric.Accuracy())
## uncomment following lines to fit model with wandb callback function
# callback = paddle.callbacks.WandbCallback(project='paddle_mnist')
# model.fit(train_dataset, eval_dataset, batch_size=64, callbacks=callback)
>>> ## uncomment following lines to fit model with wandb callback function
>>> # callback = paddle.callbacks.WandbCallback(project='paddle_mnist')
>>> # model.fit(train_dataset, eval_dataset, batch_size=64, callbacks=callback)
"""
......@@ -1200,37 +1200,37 @@ class ReduceLROnPlateau(Callback):
Examples:
.. code-block:: python
import paddle
from paddle import Model
from paddle.static import InputSpec
from paddle.vision.models import LeNet
from paddle.vision.datasets import MNIST
from paddle.metric import Accuracy
from paddle.nn.layer.loss import CrossEntropyLoss
import paddle.vision.transforms as T
sample_num = 200
transform = T.Compose(
[T.Transpose(), T.Normalize([127.5], [127.5])])
train_dataset = MNIST(mode='train', transform=transform)
val_dataset = MNIST(mode='test', transform=transform)
net = LeNet()
optim = paddle.optimizer.Adam(
learning_rate=0.001, parameters=net.parameters())
inputs = [InputSpec([None, 1, 28, 28], 'float32', 'x')]
labels = [InputSpec([None, 1], 'int64', 'label')]
model = Model(net, inputs=inputs, labels=labels)
model.prepare(
optim,
loss=CrossEntropyLoss(),
metrics=[Accuracy()])
callbacks = paddle.callbacks.ReduceLROnPlateau(patience=3, verbose=1)
model.fit(train_dataset,
val_dataset,
batch_size=64,
log_freq=200,
save_freq=10,
epochs=20,
callbacks=[callbacks])
>>> import paddle
>>> from paddle import Model
>>> from paddle.static import InputSpec
>>> from paddle.vision.models import LeNet
>>> from paddle.vision.datasets import MNIST
>>> from paddle.metric import Accuracy
>>> from paddle.nn.layer.loss import CrossEntropyLoss
>>> import paddle.vision.transforms as T
>>> sample_num = 200
>>> transform = T.Compose(
... [T.Transpose(), T.Normalize([127.5], [127.5])])
>>> train_dataset = MNIST(mode='train', transform=transform)
>>> val_dataset = MNIST(mode='test', transform=transform)
>>> net = LeNet()
>>> optim = paddle.optimizer.Adam(
... learning_rate=0.001, parameters=net.parameters())
>>> inputs = [InputSpec([None, 1, 28, 28], 'float32', 'x')]
>>> labels = [InputSpec([None, 1], 'int64', 'label')]
>>> model = Model(net, inputs=inputs, labels=labels)
>>> model.prepare(
... optim,
... loss=CrossEntropyLoss(),
... metrics=[Accuracy()])
>>> callbacks = paddle.callbacks.ReduceLROnPlateau(patience=3, verbose=1)
>>> model.fit(train_dataset,
... val_dataset,
... batch_size=64,
... log_freq=200,
... save_freq=10,
... epochs=20,
... callbacks=[callbacks])
"""
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册