未验证 提交 220b676d 编写于 作者: O ooooo-create 提交者: GitHub

[xdoctest][task 60-64] reformat example code with google style in...

[xdoctest][task 60-64] reformat example code with google style in `geometric/*` ,`hapi/callbacks.py` (#55919)

* [Doctest]fix No.21, test=docs_preview

* Revert "[Doctest]fix No.21, test=docs_preview"

This reverts commit 76bcdb280e254d682be6fc6f85588f1940bb1ade.

* [Doctest]fix No.60-64, test=docs_preview
上级 801a8655
...@@ -43,11 +43,13 @@ def segment_sum(data, segment_ids, name=None): ...@@ -43,11 +43,13 @@ def segment_sum(data, segment_ids, name=None):
Examples: Examples:
.. code-block:: python .. code-block:: python
import paddle >>> import paddle
data = paddle.to_tensor([[1, 2, 3], [3, 2, 1], [4, 5, 6]], dtype='float32') >>> data = paddle.to_tensor([[1, 2, 3], [3, 2, 1], [4, 5, 6]], dtype='float32')
segment_ids = paddle.to_tensor([0, 0, 1], dtype='int32') >>> segment_ids = paddle.to_tensor([0, 0, 1], dtype='int32')
out = paddle.geometric.segment_sum(data, segment_ids) >>> out = paddle.geometric.segment_sum(data, segment_ids)
#Outputs: [[4., 4., 4.], [4., 5., 6.]] >>> print(out.numpy())
[[4. 4. 4.]
[4. 5. 6.]]
""" """
if in_dynamic_mode(): if in_dynamic_mode():
...@@ -99,11 +101,13 @@ def segment_mean(data, segment_ids, name=None): ...@@ -99,11 +101,13 @@ def segment_mean(data, segment_ids, name=None):
Examples: Examples:
.. code-block:: python .. code-block:: python
import paddle >>> import paddle
data = paddle.to_tensor([[1, 2, 3], [3, 2, 1], [4, 5, 6]], dtype='float32') >>> data = paddle.to_tensor([[1, 2, 3], [3, 2, 1], [4, 5, 6]], dtype='float32')
segment_ids = paddle.to_tensor([0, 0, 1], dtype='int32') >>> segment_ids = paddle.to_tensor([0, 0, 1], dtype='int32')
out = paddle.geometric.segment_mean(data, segment_ids) >>> out = paddle.geometric.segment_mean(data, segment_ids)
#Outputs: [[2., 2., 2.], [4., 5., 6.]] >>> print(out.numpy())
[[2. 2. 2.]
[4. 5. 6.]]
""" """
...@@ -155,11 +159,13 @@ def segment_min(data, segment_ids, name=None): ...@@ -155,11 +159,13 @@ def segment_min(data, segment_ids, name=None):
Examples: Examples:
.. code-block:: python .. code-block:: python
import paddle >>> import paddle
data = paddle.to_tensor([[1, 2, 3], [3, 2, 1], [4, 5, 6]], dtype='float32') >>> data = paddle.to_tensor([[1, 2, 3], [3, 2, 1], [4, 5, 6]], dtype='float32')
segment_ids = paddle.to_tensor([0, 0, 1], dtype='int32') >>> segment_ids = paddle.to_tensor([0, 0, 1], dtype='int32')
out = paddle.geometric.segment_min(data, segment_ids) >>> out = paddle.geometric.segment_min(data, segment_ids)
#Outputs: [[1., 2., 1.], [4., 5., 6.]] >>> print(out.numpy())
[[1. 2. 1.]
[4. 5. 6.]]
""" """
...@@ -211,11 +217,13 @@ def segment_max(data, segment_ids, name=None): ...@@ -211,11 +217,13 @@ def segment_max(data, segment_ids, name=None):
Examples: Examples:
.. code-block:: python .. code-block:: python
import paddle >>> import paddle
data = paddle.to_tensor([[1, 2, 3], [3, 2, 1], [4, 5, 6]], dtype='float32') >>> data = paddle.to_tensor([[1, 2, 3], [3, 2, 1], [4, 5, 6]], dtype='float32')
segment_ids = paddle.to_tensor([0, 0, 1], dtype='int32') >>> segment_ids = paddle.to_tensor([0, 0, 1], dtype='int32')
out = paddle.geometric.segment_max(data, segment_ids) >>> out = paddle.geometric.segment_max(data, segment_ids)
#Outputs: [[3., 2., 3.], [4., 5., 6.]] >>> print(out.numpy())
[[3. 2. 3.]
[4. 5. 6.]]
""" """
......
...@@ -88,26 +88,34 @@ def send_u_recv( ...@@ -88,26 +88,34 @@ def send_u_recv(
Examples: Examples:
.. code-block:: python .. code-block:: python
import paddle >>> import paddle
x = paddle.to_tensor([[0, 2, 3], [1, 4, 5], [2, 6, 7]], dtype="float32") >>> x = paddle.to_tensor([[0, 2, 3], [1, 4, 5], [2, 6, 7]], dtype="float32")
indexes = paddle.to_tensor([[0, 1], [1, 2], [2, 1], [0, 0]], dtype="int32") >>> indexes = paddle.to_tensor([[0, 1], [1, 2], [2, 1], [0, 0]], dtype="int32")
src_index, dst_index = indexes[:, 0], indexes[:, 1] >>> src_index, dst_index = indexes[:, 0], indexes[:, 1]
out = paddle.geometric.send_u_recv(x, src_index, dst_index, reduce_op="sum") >>> out = paddle.geometric.send_u_recv(x, src_index, dst_index, reduce_op="sum")
# Outputs: [[0., 2., 3.], [2., 8., 10.], [1., 4., 5.]] >>> print(out.numpy())
[[ 0. 2. 3.]
x = paddle.to_tensor([[0, 2, 3], [1, 4, 5], [2, 6, 7]], dtype="float32") [ 2. 8. 10.]
indexes = paddle.to_tensor([[0, 1], [2, 1], [0, 0]], dtype="int32") [ 1. 4. 5.]]
src_index, dst_index = indexes[:, 0], indexes[:, 1]
out_size = paddle.max(dst_index) + 1 >>> x = paddle.to_tensor([[0, 2, 3], [1, 4, 5], [2, 6, 7]], dtype="float32")
out = paddle.geometric.send_u_recv(x, src_index, dst_index, reduce_op="sum", out_size=out_size) >>> indexes = paddle.to_tensor([[0, 1], [2, 1], [0, 0]], dtype="int32")
# Outputs: [[0., 2., 3.], [[2., 8., 10.]]] >>> src_index, dst_index = indexes[:, 0], indexes[:, 1]
>>> out_size = paddle.max(dst_index) + 1
x = paddle.to_tensor([[0, 2, 3], [1, 4, 5], [2, 6, 7]], dtype="float32") >>> out = paddle.geometric.send_u_recv(x, src_index, dst_index, reduce_op="sum", out_size=out_size)
indexes = paddle.to_tensor([[0, 1], [2, 1], [0, 0]], dtype="int32") >>> print(out.numpy())
src_index, dst_index = indexes[:, 0], indexes[:, 1] [[ 0. 2. 3.]
out = paddle.geometric.send_u_recv(x, src_index, dst_index, reduce_op="sum") [ 2. 8. 10.]]
# Outputs: [[0., 2., 3.], [2., 8., 10.], [0., 0., 0.]]
>>> x = paddle.to_tensor([[0, 2, 3], [1, 4, 5], [2, 6, 7]], dtype="float32")
>>> indexes = paddle.to_tensor([[0, 1], [2, 1], [0, 0]], dtype="int32")
>>> src_index, dst_index = indexes[:, 0], indexes[:, 1]
>>> out = paddle.geometric.send_u_recv(x, src_index, dst_index, reduce_op="sum")
>>> print(out.numpy())
[[ 0. 2. 3.]
[ 2. 8. 10.]
[ 0. 0. 0.]]
""" """
...@@ -247,29 +255,37 @@ def send_ue_recv( ...@@ -247,29 +255,37 @@ def send_ue_recv(
Examples: Examples:
.. code-block:: python .. code-block:: python
import paddle >>> import paddle
x = paddle.to_tensor([[0, 2, 3], [1, 4, 5], [2, 6, 7]], dtype="float32") >>> x = paddle.to_tensor([[0, 2, 3], [1, 4, 5], [2, 6, 7]], dtype="float32")
y = paddle.to_tensor([1, 1, 1, 1], dtype="float32") >>> y = paddle.to_tensor([1, 1, 1, 1], dtype="float32")
indexes = paddle.to_tensor([[0, 1], [1, 2], [2, 1], [0, 0]], dtype="int32") >>> indexes = paddle.to_tensor([[0, 1], [1, 2], [2, 1], [0, 0]], dtype="int32")
src_index, dst_index = indexes[:, 0], indexes[:, 1] >>> src_index, dst_index = indexes[:, 0], indexes[:, 1]
out = paddle.geometric.send_ue_recv(x, y, src_index, dst_index, message_op="add", reduce_op="sum") >>> out = paddle.geometric.send_ue_recv(x, y, src_index, dst_index, message_op="add", reduce_op="sum")
# Outputs: [[1., 3., 4.], [4., 10., 12.], [2., 5., 6.]] >>> print(out.numpy())
[[ 1. 3. 4.]
x = paddle.to_tensor([[0, 2, 3], [1, 4, 5], [2, 6, 7]], dtype="float32") [ 4. 10. 12.]
y = paddle.to_tensor([1, 1, 1], dtype="float32") [ 2. 5. 6.]]
indexes = paddle.to_tensor([[0, 1], [2, 1], [0, 0]], dtype="int32")
src_index, dst_index = indexes[:, 0], indexes[:, 1] >>> x = paddle.to_tensor([[0, 2, 3], [1, 4, 5], [2, 6, 7]], dtype="float32")
out_size = paddle.max(dst_index) + 1 >>> y = paddle.to_tensor([1, 1, 1], dtype="float32")
out = paddle.geometric.send_ue_recv(x, y, src_index, dst_index, message_op="add", reduce_op="sum", out_size=out_size) >>> indexes = paddle.to_tensor([[0, 1], [2, 1], [0, 0]], dtype="int32")
# Outputs: [[1., 3., 4.], [[4., 10., 12.]]] >>> src_index, dst_index = indexes[:, 0], indexes[:, 1]
>>> out_size = paddle.max(dst_index) + 1
x = paddle.to_tensor([[0, 2, 3], [1, 4, 5], [2, 6, 7]], dtype="float32") >>> out = paddle.geometric.send_ue_recv(x, y, src_index, dst_index, message_op="add", reduce_op="sum", out_size=out_size)
y = paddle.to_tensor([1, 1, 1], dtype="float32") >>> print(out.numpy())
indexes = paddle.to_tensor([[0, 1], [2, 1], [0, 0]], dtype="int32") [[ 1. 3. 4.]
src_index, dst_index = indexes[:, 0], indexes[:, 1] [ 4. 10. 12.]]
out = paddle.geometric.send_ue_recv(x, y, src_index, dst_index, message_op="add", reduce_op="sum")
# Outputs: [[1., 3., 4.], [4., 10., 12.], [0., 0., 0.]] >>> x = paddle.to_tensor([[0, 2, 3], [1, 4, 5], [2, 6, 7]], dtype="float32")
>>> y = paddle.to_tensor([1, 1, 1], dtype="float32")
>>> indexes = paddle.to_tensor([[0, 1], [2, 1], [0, 0]], dtype="int32")
>>> src_index, dst_index = indexes[:, 0], indexes[:, 1]
>>> out = paddle.geometric.send_ue_recv(x, y, src_index, dst_index, message_op="add", reduce_op="sum")
>>> print(out.numpy())
[[ 1. 3. 4.]
[ 4. 10. 12.]
[ 0. 0. 0.]]
""" """
...@@ -425,15 +441,19 @@ def send_uv(x, y, src_index, dst_index, message_op="add", name=None): ...@@ -425,15 +441,19 @@ def send_uv(x, y, src_index, dst_index, message_op="add", name=None):
.. code-block:: python .. code-block:: python
import paddle >>> import paddle
x = paddle.to_tensor([[0, 2, 3], [1, 4, 5], [2, 6, 7]], dtype="float32") >>> x = paddle.to_tensor([[0, 2, 3], [1, 4, 5], [2, 6, 7]], dtype="float32")
y = paddle.to_tensor([[0, 1, 2], [2, 3, 4], [4, 5, 6]], dtype="float32") >>> y = paddle.to_tensor([[0, 1, 2], [2, 3, 4], [4, 5, 6]], dtype="float32")
indexes = paddle.to_tensor([[0, 1], [1, 2], [2, 1], [0, 0]], dtype="int32") >>> indexes = paddle.to_tensor([[0, 1], [1, 2], [2, 1], [0, 0]], dtype="int32")
src_index = indexes[:, 0] >>> src_index = indexes[:, 0]
dst_index = indexes[:, 1] >>> dst_index = indexes[:, 1]
out = paddle.geometric.send_uv(x, y, src_index, dst_index, message_op="add") >>> out = paddle.geometric.send_uv(x, y, src_index, dst_index, message_op="add")
# Outputs: [[2., 5., 7.], [5., 9., 11.], [4., 9., 11.], [0., 3., 5.]] >>> print(out.numpy())
[[ 2. 5. 7.]
[ 5. 9. 11.]
[ 4. 9. 11.]
[ 0. 3. 5.]]
""" """
......
...@@ -69,18 +69,21 @@ def reindex_graph( ...@@ -69,18 +69,21 @@ def reindex_graph(
Examples: Examples:
.. code-block:: python .. code-block:: python
import paddle >>> import paddle
x = [0, 1, 2] >>> x = [0, 1, 2]
neighbors = [8, 9, 0, 4, 7, 6, 7] >>> neighbors = [8, 9, 0, 4, 7, 6, 7]
count = [2, 3, 2] >>> count = [2, 3, 2]
x = paddle.to_tensor(x, dtype="int64") >>> x = paddle.to_tensor(x, dtype="int64")
neighbors = paddle.to_tensor(neighbors, dtype="int64") >>> neighbors = paddle.to_tensor(neighbors, dtype="int64")
count = paddle.to_tensor(count, dtype="int32") >>> count = paddle.to_tensor(count, dtype="int32")
reindex_src, reindex_dst, out_nodes = paddle.geometric.reindex_graph(x, neighbors, count) >>> reindex_src, reindex_dst, out_nodes = paddle.geometric.reindex_graph(x, neighbors, count)
# reindex_src: [3, 4, 0, 5, 6, 7, 6] >>> print(reindex_src.numpy())
# reindex_dst: [0, 0, 1, 1, 1, 2, 2] [3 4 0 5 6 7 6]
# out_nodes: [0, 1, 2, 8, 9, 4, 7, 6] >>> print(reindex_dst.numpy())
[0 0 1 1 1 2 2]
>>> print(out_nodes.numpy())
[0 1 2 8 9 4 7 6]
""" """
use_buffer_hashtable = ( use_buffer_hashtable = (
...@@ -182,24 +185,27 @@ def reindex_heter_graph( ...@@ -182,24 +185,27 @@ def reindex_heter_graph(
Examples: Examples:
.. code-block:: python .. code-block:: python
import paddle >>> import paddle
x = [0, 1, 2] >>> x = [0, 1, 2]
neighbors_a = [8, 9, 0, 4, 7, 6, 7] >>> neighbors_a = [8, 9, 0, 4, 7, 6, 7]
count_a = [2, 3, 2] >>> count_a = [2, 3, 2]
x = paddle.to_tensor(x, dtype="int64") >>> x = paddle.to_tensor(x, dtype="int64")
neighbors_a = paddle.to_tensor(neighbors_a, dtype="int64") >>> neighbors_a = paddle.to_tensor(neighbors_a, dtype="int64")
count_a = paddle.to_tensor(count_a, dtype="int32") >>> count_a = paddle.to_tensor(count_a, dtype="int32")
neighbors_b = [0, 2, 3, 5, 1] >>> neighbors_b = [0, 2, 3, 5, 1]
count_b = [1, 3, 1] >>> count_b = [1, 3, 1]
neighbors_b = paddle.to_tensor(neighbors_b, dtype="int64") >>> neighbors_b = paddle.to_tensor(neighbors_b, dtype="int64")
count_b = paddle.to_tensor(count_b, dtype="int32") >>> count_b = paddle.to_tensor(count_b, dtype="int32")
neighbors = [neighbors_a, neighbors_b] >>> neighbors = [neighbors_a, neighbors_b]
count = [count_a, count_b] >>> count = [count_a, count_b]
reindex_src, reindex_dst, out_nodes = paddle.geometric.reindex_heter_graph(x, neighbors, count) >>> reindex_src, reindex_dst, out_nodes = paddle.geometric.reindex_heter_graph(x, neighbors, count)
# reindex_src: [3, 4, 0, 5, 6, 7, 6, 0, 2, 8, 9, 1] >>> print(reindex_src.numpy())
# reindex_dst: [0, 0, 1, 1, 1, 2, 2, 0, 1, 1, 1, 2] [3 4 0 5 6 7 6 0 2 8 9 1]
# out_nodes: [0, 1, 2, 8, 9, 4, 7, 6, 3, 5] >>> print(reindex_dst.numpy())
[0 0 1 1 1 2 2 0 1 1 1 2]
>>> print(out_nodes.numpy())
[0 1 2 8 9 4 7 6 3 5]
""" """
use_buffer_hashtable = ( use_buffer_hashtable = (
......
...@@ -77,18 +77,18 @@ def sample_neighbors( ...@@ -77,18 +77,18 @@ def sample_neighbors(
Examples: Examples:
.. code-block:: python .. code-block:: python
import paddle >>> import paddle
# edges: (3, 0), (7, 0), (0, 1), (9, 1), (1, 2), (4, 3), (2, 4), >>> # edges: (3, 0), (7, 0), (0, 1), (9, 1), (1, 2), (4, 3), (2, 4),
# (9, 5), (3, 5), (9, 6), (1, 6), (9, 8), (7, 8) >>> # (9, 5), (3, 5), (9, 6), (1, 6), (9, 8), (7, 8)
row = [3, 7, 0, 9, 1, 4, 2, 9, 3, 9, 1, 9, 7] >>> row = [3, 7, 0, 9, 1, 4, 2, 9, 3, 9, 1, 9, 7]
colptr = [0, 2, 4, 5, 6, 7, 9, 11, 11, 13, 13] >>> colptr = [0, 2, 4, 5, 6, 7, 9, 11, 11, 13, 13]
nodes = [0, 8, 1, 2] >>> nodes = [0, 8, 1, 2]
sample_size = 2 >>> sample_size = 2
row = paddle.to_tensor(row, dtype="int64") >>> row = paddle.to_tensor(row, dtype="int64")
colptr = paddle.to_tensor(colptr, dtype="int64") >>> colptr = paddle.to_tensor(colptr, dtype="int64")
nodes = paddle.to_tensor(nodes, dtype="int64") >>> nodes = paddle.to_tensor(nodes, dtype="int64")
out_neighbors, out_count = paddle.geometric.sample_neighbors(row, colptr, nodes, sample_size=sample_size) >>> out_neighbors, out_count = paddle.geometric.sample_neighbors(row, colptr, nodes, sample_size=sample_size)
""" """
...@@ -228,20 +228,20 @@ def weighted_sample_neighbors( ...@@ -228,20 +228,20 @@ def weighted_sample_neighbors(
Examples: Examples:
.. code-block:: python .. code-block:: python
import paddle >>> import paddle
# edges: (3, 0), (7, 0), (0, 1), (9, 1), (1, 2), (4, 3), (2, 4), >>> # edges: (3, 0), (7, 0), (0, 1), (9, 1), (1, 2), (4, 3), (2, 4),
# (9, 5), (3, 5), (9, 6), (1, 6), (9, 8), (7, 8) >>> # (9, 5), (3, 5), (9, 6), (1, 6), (9, 8), (7, 8)
row = [3, 7, 0, 9, 1, 4, 2, 9, 3, 9, 1, 9, 7] >>> row = [3, 7, 0, 9, 1, 4, 2, 9, 3, 9, 1, 9, 7]
colptr = [0, 2, 4, 5, 6, 7, 9, 11, 11, 13, 13] >>> colptr = [0, 2, 4, 5, 6, 7, 9, 11, 11, 13, 13]
weight = [0.1, 0.5, 0.2, 0.5, 0.9, 1.9, 2.0, 2.1, 0.01, 0.9, 0,12, 0.59, 0.67] >>> weight = [0.1, 0.5, 0.2, 0.5, 0.9, 1.9, 2.0, 2.1, 0.01, 0.9, 0,12, 0.59, 0.67]
nodes = [0, 8, 1, 2] >>> nodes = [0, 8, 1, 2]
sample_size = 2 >>> sample_size = 2
row = paddle.to_tensor(row, dtype="int64") >>> row = paddle.to_tensor(row, dtype="int64")
colptr = paddle.to_tensor(colptr, dtype="int64") >>> colptr = paddle.to_tensor(colptr, dtype="int64")
weight = paddle.to_tensor(weight, dtype="float32") >>> weight = paddle.to_tensor(weight, dtype="float32")
nodes = paddle.to_tensor(nodes, dtype="int64") >>> nodes = paddle.to_tensor(nodes, dtype="int64")
out_neighbors, out_count = paddle.geometric.weighted_sample_neighbors(row, colptr, weight, nodes, sample_size=sample_size) >>> out_neighbors, out_count = paddle.geometric.weighted_sample_neighbors(row, colptr, weight, nodes, sample_size=sample_size)
""" """
......
...@@ -137,19 +137,19 @@ class Callback: ...@@ -137,19 +137,19 @@ class Callback:
.. code-block:: python .. code-block:: python
import paddle >>> import paddle
# build a simple model checkpoint callback >>> # build a simple model checkpoint callback
class ModelCheckpoint(paddle.callbacks.Callback): >>> class ModelCheckpoint(paddle.callbacks.Callback):
def __init__(self, save_freq=1, save_dir=None): ... def __init__(self, save_freq=1, save_dir=None):
self.save_freq = save_freq ... self.save_freq = save_freq
self.save_dir = save_dir ... self.save_dir = save_dir
...
def on_epoch_end(self, epoch, logs=None): ... def on_epoch_end(self, epoch, logs=None):
if self.model is not None and epoch % self.save_freq == 0: ... if self.model is not None and epoch % self.save_freq == 0:
path = '{}/{}'.format(self.save_dir, epoch) ... path = '{}/{}'.format(self.save_dir, epoch)
print('save checkpoint at {}'.format(path)) ... print('save checkpoint at {}'.format(path))
self.model.save(path) ... self.model.save(path)
""" """
...@@ -314,31 +314,31 @@ class ProgBarLogger(Callback): ...@@ -314,31 +314,31 @@ class ProgBarLogger(Callback):
Examples: Examples:
.. code-block:: python .. code-block:: python
import paddle >>> import paddle
import paddle.vision.transforms as T >>> import paddle.vision.transforms as T
from paddle.vision.datasets import MNIST >>> from paddle.vision.datasets import MNIST
from paddle.static import InputSpec >>> from paddle.static import InputSpec
inputs = [InputSpec([-1, 1, 28, 28], 'float32', 'image')] >>> inputs = [InputSpec([-1, 1, 28, 28], 'float32', 'image')]
labels = [InputSpec([None, 1], 'int64', 'label')] >>> labels = [InputSpec([None, 1], 'int64', 'label')]
transform = T.Compose([ >>> transform = T.Compose([
T.Transpose(), ... T.Transpose(),
T.Normalize([127.5], [127.5]) ... T.Normalize([127.5], [127.5])
]) ... ])
train_dataset = MNIST(mode='train', transform=transform) >>> train_dataset = MNIST(mode='train', transform=transform)
lenet = paddle.vision.models.LeNet() >>> lenet = paddle.vision.models.LeNet()
model = paddle.Model(lenet, >>> model = paddle.Model(lenet,
inputs, labels) ... inputs, labels)
optim = paddle.optimizer.Adam(0.001, parameters=lenet.parameters()) >>> optim = paddle.optimizer.Adam(0.001, parameters=lenet.parameters())
model.prepare(optimizer=optim, >>> model.prepare(optimizer=optim,
loss=paddle.nn.CrossEntropyLoss(), ... loss=paddle.nn.CrossEntropyLoss(),
metrics=paddle.metric.Accuracy()) ... metrics=paddle.metric.Accuracy())
callback = paddle.callbacks.ProgBarLogger(log_freq=10) >>> callback = paddle.callbacks.ProgBarLogger(log_freq=10)
model.fit(train_dataset, batch_size=64, callbacks=callback) >>> model.fit(train_dataset, batch_size=64, callbacks=callback)
""" """
def __init__(self, log_freq=1, verbose=2): def __init__(self, log_freq=1, verbose=2):
...@@ -562,31 +562,31 @@ class ModelCheckpoint(Callback): ...@@ -562,31 +562,31 @@ class ModelCheckpoint(Callback):
Examples: Examples:
.. code-block:: python .. code-block:: python
import paddle >>> import paddle
import paddle.vision.transforms as T >>> import paddle.vision.transforms as T
from paddle.vision.datasets import MNIST >>> from paddle.vision.datasets import MNIST
from paddle.static import InputSpec >>> from paddle.static import InputSpec
inputs = [InputSpec([-1, 1, 28, 28], 'float32', 'image')] >>> inputs = [InputSpec([-1, 1, 28, 28], 'float32', 'image')]
labels = [InputSpec([None, 1], 'int64', 'label')] >>> labels = [InputSpec([None, 1], 'int64', 'label')]
transform = T.Compose([ >>> transform = T.Compose([
T.Transpose(), ... T.Transpose(),
T.Normalize([127.5], [127.5]) ... T.Normalize([127.5], [127.5])
]) ... ])
train_dataset = MNIST(mode='train', transform=transform) >>> train_dataset = MNIST(mode='train', transform=transform)
lenet = paddle.vision.models.LeNet() >>> lenet = paddle.vision.models.LeNet()
model = paddle.Model(lenet, >>> model = paddle.Model(lenet,
inputs, labels) ... inputs, labels)
optim = paddle.optimizer.Adam(0.001, parameters=lenet.parameters()) >>> optim = paddle.optimizer.Adam(0.001, parameters=lenet.parameters())
model.prepare(optimizer=optim, >>> model.prepare(optimizer=optim,
loss=paddle.nn.CrossEntropyLoss(), ... loss=paddle.nn.CrossEntropyLoss(),
metrics=paddle.metric.Accuracy()) ... metrics=paddle.metric.Accuracy())
callback = paddle.callbacks.ModelCheckpoint(save_dir='./temp') >>> callback = paddle.callbacks.ModelCheckpoint(save_dir='./temp')
model.fit(train_dataset, batch_size=64, callbacks=callback) >>> model.fit(train_dataset, batch_size=64, callbacks=callback)
""" """
def __init__(self, save_freq=1, save_dir=None): def __init__(self, save_freq=1, save_dir=None):
...@@ -628,58 +628,58 @@ class LRScheduler(Callback): ...@@ -628,58 +628,58 @@ class LRScheduler(Callback):
Examples: Examples:
.. code-block:: python .. code-block:: python
import paddle >>> import paddle
import paddle.vision.transforms as T >>> import paddle.vision.transforms as T
from paddle.static import InputSpec >>> from paddle.static import InputSpec
inputs = [InputSpec([-1, 1, 28, 28], 'float32', 'image')] >>> inputs = [InputSpec([-1, 1, 28, 28], 'float32', 'image')]
labels = [InputSpec([None, 1], 'int64', 'label')] >>> labels = [InputSpec([None, 1], 'int64', 'label')]
transform = T.Compose([ >>> transform = T.Compose([
T.Transpose(), ... T.Transpose(),
T.Normalize([127.5], [127.5]) ... T.Normalize([127.5], [127.5])
]) ... ])
train_dataset = paddle.vision.datasets.MNIST(mode='train', transform=transform) >>> train_dataset = paddle.vision.datasets.MNIST(mode='train', transform=transform)
lenet = paddle.vision.models.LeNet() >>> lenet = paddle.vision.models.LeNet()
model = paddle.Model(lenet, >>> model = paddle.Model(lenet,
inputs, labels) ... inputs, labels)
base_lr = 1e-3 >>> base_lr = 1e-3
boundaries = [5, 8] >>> boundaries = [5, 8]
wamup_steps = 4 >>> wamup_steps = 4
def make_optimizer(parameters=None): >>> def make_optimizer(parameters=None):
momentum = 0.9 ... momentum = 0.9
weight_decay = 5e-4 ... weight_decay = 5e-4
values = [base_lr * (0.1**i) for i in range(len(boundaries) + 1)] ... values = [base_lr * (0.1**i) for i in range(len(boundaries) + 1)]
learning_rate = paddle.optimizer.lr.PiecewiseDecay( ... learning_rate = paddle.optimizer.lr.PiecewiseDecay(
boundaries=boundaries, values=values) ... boundaries=boundaries, values=values)
learning_rate = paddle.optimizer.lr.LinearWarmup( ... learning_rate = paddle.optimizer.lr.LinearWarmup(
learning_rate=learning_rate, ... learning_rate=learning_rate,
warmup_steps=wamup_steps, ... warmup_steps=wamup_steps,
start_lr=base_lr / 5., ... start_lr=base_lr / 5.,
end_lr=base_lr, ... end_lr=base_lr,
verbose=True) ... verbose=True)
optimizer = paddle.optimizer.Momentum( ... optimizer = paddle.optimizer.Momentum(
learning_rate=learning_rate, ... learning_rate=learning_rate,
weight_decay=weight_decay, ... weight_decay=weight_decay,
momentum=momentum, ... momentum=momentum,
parameters=parameters) ... parameters=parameters)
return optimizer ... return optimizer
optim = make_optimizer(parameters=lenet.parameters()) >>> optim = make_optimizer(parameters=lenet.parameters())
model.prepare(optimizer=optim, >>> model.prepare(optimizer=optim,
loss=paddle.nn.CrossEntropyLoss(), ... loss=paddle.nn.CrossEntropyLoss(),
metrics=paddle.metric.Accuracy()) ... metrics=paddle.metric.Accuracy())
# if LRScheduler callback not set, an instance LRScheduler update by step >>> # if LRScheduler callback not set, an instance LRScheduler update by step
# will be created auto. >>> # will be created auto.
model.fit(train_dataset, batch_size=64) >>> model.fit(train_dataset, batch_size=64)
# create a learning rate scheduler update by epoch >>> # create a learning rate scheduler update by epoch
callback = paddle.callbacks.LRScheduler(by_step=False, by_epoch=True) >>> callback = paddle.callbacks.LRScheduler(by_step=False, by_epoch=True)
model.fit(train_dataset, batch_size=64, callbacks=callback) >>> model.fit(train_dataset, batch_size=64, callbacks=callback)
""" """
def __init__(self, by_step=True, by_epoch=False): def __init__(self, by_step=True, by_epoch=False):
...@@ -744,50 +744,50 @@ class EarlyStopping(Callback): ...@@ -744,50 +744,50 @@ class EarlyStopping(Callback):
Examples: Examples:
.. code-block:: python .. code-block:: python
import paddle >>> import paddle
from paddle import Model >>> from paddle import Model
from paddle.static import InputSpec >>> from paddle.static import InputSpec
from paddle.vision.models import LeNet >>> from paddle.vision.models import LeNet
from paddle.vision.datasets import MNIST >>> from paddle.vision.datasets import MNIST
from paddle.metric import Accuracy >>> from paddle.metric import Accuracy
from paddle.nn import CrossEntropyLoss >>> from paddle.nn import CrossEntropyLoss
import paddle.vision.transforms as T >>> import paddle.vision.transforms as T
device = paddle.set_device('cpu') >>> device = paddle.set_device('cpu')
sample_num = 200 >>> sample_num = 200
save_dir = './best_model_checkpoint' >>> save_dir = './best_model_checkpoint'
transform = T.Compose( >>> transform = T.Compose(
[T.Transpose(), T.Normalize([127.5], [127.5])]) ... [T.Transpose(), T.Normalize([127.5], [127.5])])
train_dataset = MNIST(mode='train', transform=transform) >>> train_dataset = MNIST(mode='train', transform=transform)
val_dataset = MNIST(mode='test', transform=transform) >>> val_dataset = MNIST(mode='test', transform=transform)
net = LeNet() >>> net = LeNet()
optim = paddle.optimizer.Adam( >>> optim = paddle.optimizer.Adam(
learning_rate=0.001, parameters=net.parameters()) ... learning_rate=0.001, parameters=net.parameters())
inputs = [InputSpec([None, 1, 28, 28], 'float32', 'x')] >>> inputs = [InputSpec([None, 1, 28, 28], 'float32', 'x')]
labels = [InputSpec([None, 1], 'int64', 'label')] >>> labels = [InputSpec([None, 1], 'int64', 'label')]
model = Model(net, inputs=inputs, labels=labels) >>> model = Model(net, inputs=inputs, labels=labels)
model.prepare( >>> model.prepare(
optim, ... optim,
loss=CrossEntropyLoss(reduction="sum"), ... loss=CrossEntropyLoss(reduction="sum"),
metrics=[Accuracy()]) ... metrics=[Accuracy()])
callbacks = paddle.callbacks.EarlyStopping( >>> callbacks = paddle.callbacks.EarlyStopping(
'loss', ... 'loss',
mode='min', ... mode='min',
patience=1, ... patience=1,
verbose=1, ... verbose=1,
min_delta=0, ... min_delta=0,
baseline=None, ... baseline=None,
save_best_model=True) ... save_best_model=True)
model.fit(train_dataset, >>> model.fit(train_dataset,
val_dataset, ... val_dataset,
batch_size=64, ... batch_size=64,
log_freq=200, ... log_freq=200,
save_freq=10, ... save_freq=10,
save_dir=save_dir, ... save_dir=save_dir,
epochs=20, ... epochs=20,
callbacks=[callbacks]) ... callbacks=[callbacks])
""" """
def __init__( def __init__(
...@@ -890,31 +890,31 @@ class VisualDL(Callback): ...@@ -890,31 +890,31 @@ class VisualDL(Callback):
Examples: Examples:
.. code-block:: python .. code-block:: python
import paddle >>> import paddle
import paddle.vision.transforms as T >>> import paddle.vision.transforms as T
from paddle.static import InputSpec >>> from paddle.static import InputSpec
inputs = [InputSpec([-1, 1, 28, 28], 'float32', 'image')] >>> inputs = [InputSpec([-1, 1, 28, 28], 'float32', 'image')]
labels = [InputSpec([None, 1], 'int64', 'label')] >>> labels = [InputSpec([None, 1], 'int64', 'label')]
transform = T.Compose([ >>> transform = T.Compose([
T.Transpose(), ... T.Transpose(),
T.Normalize([127.5], [127.5]) ... T.Normalize([127.5], [127.5])
]) ... ])
train_dataset = paddle.vision.datasets.MNIST(mode='train', transform=transform) >>> train_dataset = paddle.vision.datasets.MNIST(mode='train', transform=transform)
eval_dataset = paddle.vision.datasets.MNIST(mode='test', transform=transform) >>> eval_dataset = paddle.vision.datasets.MNIST(mode='test', transform=transform)
net = paddle.vision.models.LeNet() >>> net = paddle.vision.models.LeNet()
model = paddle.Model(net, inputs, labels) >>> model = paddle.Model(net, inputs, labels)
optim = paddle.optimizer.Adam(0.001, parameters=net.parameters()) >>> optim = paddle.optimizer.Adam(0.001, parameters=net.parameters())
model.prepare(optimizer=optim, >>> model.prepare(optimizer=optim,
loss=paddle.nn.CrossEntropyLoss(), ... loss=paddle.nn.CrossEntropyLoss(),
metrics=paddle.metric.Accuracy()) ... metrics=paddle.metric.Accuracy())
## uncomment following lines to fit model with visualdl callback function >>> ## uncomment following lines to fit model with visualdl callback function
# callback = paddle.callbacks.VisualDL(log_dir='visualdl_log_dir') >>> # callback = paddle.callbacks.VisualDL(log_dir='visualdl_log_dir')
# model.fit(train_dataset, eval_dataset, batch_size=64, callbacks=callback) >>> # model.fit(train_dataset, eval_dataset, batch_size=64, callbacks=callback)
""" """
...@@ -1019,31 +1019,31 @@ class WandbCallback(Callback): ...@@ -1019,31 +1019,31 @@ class WandbCallback(Callback):
Examples: Examples:
.. code-block:: python .. code-block:: python
import paddle >>> import paddle
import paddle.vision.transforms as T >>> import paddle.vision.transforms as T
from paddle.static import InputSpec >>> from paddle.static import InputSpec
inputs = [InputSpec([-1, 1, 28, 28], 'float32', 'image')] >>> inputs = [InputSpec([-1, 1, 28, 28], 'float32', 'image')]
labels = [InputSpec([None, 1], 'int64', 'label')] >>> labels = [InputSpec([None, 1], 'int64', 'label')]
transform = T.Compose([ >>> transform = T.Compose([
T.Transpose(), ... T.Transpose(),
T.Normalize([127.5], [127.5]) ... T.Normalize([127.5], [127.5])
]) ... ])
train_dataset = paddle.vision.datasets.MNIST(mode='train', transform=transform) >>> train_dataset = paddle.vision.datasets.MNIST(mode='train', transform=transform)
eval_dataset = paddle.vision.datasets.MNIST(mode='test', transform=transform) >>> eval_dataset = paddle.vision.datasets.MNIST(mode='test', transform=transform)
net = paddle.vision.models.LeNet() >>> net = paddle.vision.models.LeNet()
model = paddle.Model(net, inputs, labels) >>> model = paddle.Model(net, inputs, labels)
optim = paddle.optimizer.Adam(0.001, parameters=net.parameters()) >>> optim = paddle.optimizer.Adam(0.001, parameters=net.parameters())
model.prepare(optimizer=optim, >>> model.prepare(optimizer=optim,
loss=paddle.nn.CrossEntropyLoss(), ... loss=paddle.nn.CrossEntropyLoss(),
metrics=paddle.metric.Accuracy()) ... metrics=paddle.metric.Accuracy())
## uncomment following lines to fit model with wandb callback function >>> ## uncomment following lines to fit model with wandb callback function
# callback = paddle.callbacks.WandbCallback(project='paddle_mnist') >>> # callback = paddle.callbacks.WandbCallback(project='paddle_mnist')
# model.fit(train_dataset, eval_dataset, batch_size=64, callbacks=callback) >>> # model.fit(train_dataset, eval_dataset, batch_size=64, callbacks=callback)
""" """
...@@ -1198,39 +1198,39 @@ class ReduceLROnPlateau(Callback): ...@@ -1198,39 +1198,39 @@ class ReduceLROnPlateau(Callback):
min_lr(float, optional): lower bound on the learning rate. Default: 0. min_lr(float, optional): lower bound on the learning rate. Default: 0.
Examples: Examples:
.. code-block:: python .. code-block:: python
import paddle >>> import paddle
from paddle import Model >>> from paddle import Model
from paddle.static import InputSpec >>> from paddle.static import InputSpec
from paddle.vision.models import LeNet >>> from paddle.vision.models import LeNet
from paddle.vision.datasets import MNIST >>> from paddle.vision.datasets import MNIST
from paddle.metric import Accuracy >>> from paddle.metric import Accuracy
from paddle.nn.layer.loss import CrossEntropyLoss >>> from paddle.nn.layer.loss import CrossEntropyLoss
import paddle.vision.transforms as T >>> import paddle.vision.transforms as T
sample_num = 200 >>> sample_num = 200
transform = T.Compose( >>> transform = T.Compose(
[T.Transpose(), T.Normalize([127.5], [127.5])]) ... [T.Transpose(), T.Normalize([127.5], [127.5])])
train_dataset = MNIST(mode='train', transform=transform) >>> train_dataset = MNIST(mode='train', transform=transform)
val_dataset = MNIST(mode='test', transform=transform) >>> val_dataset = MNIST(mode='test', transform=transform)
net = LeNet() >>> net = LeNet()
optim = paddle.optimizer.Adam( >>> optim = paddle.optimizer.Adam(
learning_rate=0.001, parameters=net.parameters()) ... learning_rate=0.001, parameters=net.parameters())
inputs = [InputSpec([None, 1, 28, 28], 'float32', 'x')] >>> inputs = [InputSpec([None, 1, 28, 28], 'float32', 'x')]
labels = [InputSpec([None, 1], 'int64', 'label')] >>> labels = [InputSpec([None, 1], 'int64', 'label')]
model = Model(net, inputs=inputs, labels=labels) >>> model = Model(net, inputs=inputs, labels=labels)
model.prepare( >>> model.prepare(
optim, ... optim,
loss=CrossEntropyLoss(), ... loss=CrossEntropyLoss(),
metrics=[Accuracy()]) ... metrics=[Accuracy()])
callbacks = paddle.callbacks.ReduceLROnPlateau(patience=3, verbose=1) >>> callbacks = paddle.callbacks.ReduceLROnPlateau(patience=3, verbose=1)
model.fit(train_dataset, >>> model.fit(train_dataset,
val_dataset, ... val_dataset,
batch_size=64, ... batch_size=64,
log_freq=200, ... log_freq=200,
save_freq=10, ... save_freq=10,
epochs=20, ... epochs=20,
callbacks=[callbacks]) ... callbacks=[callbacks])
""" """
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册