未验证 提交 ff7e6ec5 编写于 作者: M megemini 提交者: GitHub

[Fix]fix cleandoc with a first blank line (#55052)

* [Fix]fix cleandoc with a first blank line

* [Fix]fix metrics.py code-block

* [Fix]fix metrics.py code-block indent
上级 ef19521c
...@@ -82,31 +82,33 @@ class Metric(metaclass=abc.ABCMeta): ...@@ -82,31 +82,33 @@ class Metric(metaclass=abc.ABCMeta):
prediction of each sample like follows, while the correct prediction prediction of each sample like follows, while the correct prediction
matrix shape is [N, 5]. matrix shape is [N, 5].
.. code-block:: text .. code-block:: python
:name: code-compute-example
def compute(pred, label): def compute(pred, label):
# sort prediction and slice the top-5 scores # sort prediction and slice the top-5 scores
pred = paddle.argsort(pred, descending=True)[:, :5] pred = paddle.argsort(pred, descending=True)[:, :5]
# calculate whether the predictions are correct # calculate whether the predictions are correct
correct = pred == label correct = pred == label
return paddle.cast(correct, dtype='float32') return paddle.cast(correct, dtype='float32')
With the :code:`compute`, we split some calculations to OPs (which With the :code:`compute`, we split some calculations to OPs (which
may run on GPU devices, will be faster), and only fetch 1 tensor with may run on GPU devices, will be faster), and only fetch 1 tensor with
shape as [N, 5] instead of 2 tensors with shapes as [N, 10] and [N, 1]. shape as [N, 5] instead of 2 tensors with shapes as [N, 10] and [N, 1].
:code:`update` can be define as follows: :code:`update` can be define as follows:
.. code-block:: text .. code-block:: python
:name: code-update-example
def update(self, correct):
accs = [] def update(self, correct):
for i, k in enumerate(self.topk): accs = []
num_corrects = correct[:, :k].sum() for i, k in enumerate(self.topk):
num_samples = len(correct) num_corrects = correct[:, :k].sum()
accs.append(float(num_corrects) / num_samples) num_samples = len(correct)
self.total[i] += num_corrects accs.append(float(num_corrects) / num_samples)
self.count[i] += num_samples self.total[i] += num_corrects
return accs self.count[i] += num_samples
return accs
""" """
def __init__(self): def __init__(self):
...@@ -195,50 +197,48 @@ class Accuracy(Metric): ...@@ -195,50 +197,48 @@ class Accuracy(Metric):
name (str, optional): String name of the metric instance. Default name (str, optional): String name of the metric instance. Default
is `acc`. is `acc`.
Example by standalone: Examples:
.. code-block:: python .. code-block:: python
:name: code-standalone-example
import numpy as np import numpy as np
import paddle import paddle
x = paddle.to_tensor(np.array([
[0.1, 0.2, 0.3, 0.4],
[0.1, 0.4, 0.3, 0.2],
[0.1, 0.2, 0.4, 0.3],
[0.1, 0.2, 0.3, 0.4]]))
y = paddle.to_tensor(np.array([[0], [1], [2], [3]]))
m = paddle.metric.Accuracy()
correct = m.compute(x, y)
m.update(correct)
res = m.accumulate()
print(res) # 0.75
x = paddle.to_tensor(np.array([
[0.1, 0.2, 0.3, 0.4],
[0.1, 0.4, 0.3, 0.2],
[0.1, 0.2, 0.4, 0.3],
[0.1, 0.2, 0.3, 0.4]]))
y = paddle.to_tensor(np.array([[0], [1], [2], [3]]))
Example with Model API: m = paddle.metric.Accuracy()
correct = m.compute(x, y)
m.update(correct)
res = m.accumulate()
print(res) # 0.75
.. code-block:: python .. code-block:: python
:name: code-model-api-example
import paddle import paddle
from paddle.static import InputSpec from paddle.static import InputSpec
import paddle.vision.transforms as T import paddle.vision.transforms as T
from paddle.vision.datasets import MNIST from paddle.vision.datasets import MNIST
input = InputSpec([None, 1, 28, 28], 'float32', 'image') input = InputSpec([None, 1, 28, 28], 'float32', 'image')
label = InputSpec([None, 1], 'int64', 'label') label = InputSpec([None, 1], 'int64', 'label')
transform = T.Compose([T.Transpose(), T.Normalize([127.5], [127.5])]) transform = T.Compose([T.Transpose(), T.Normalize([127.5], [127.5])])
train_dataset = MNIST(mode='train', transform=transform) train_dataset = MNIST(mode='train', transform=transform)
model = paddle.Model(paddle.vision.models.LeNet(), input, label) model = paddle.Model(paddle.vision.models.LeNet(), input, label)
optim = paddle.optimizer.Adam( optim = paddle.optimizer.Adam(
learning_rate=0.001, parameters=model.parameters()) learning_rate=0.001, parameters=model.parameters())
model.prepare( model.prepare(
optim, optim,
loss=paddle.nn.CrossEntropyLoss(), loss=paddle.nn.CrossEntropyLoss(),
metrics=paddle.metric.Accuracy()) metrics=paddle.metric.Accuracy())
model.fit(train_dataset, batch_size=64) model.fit(train_dataset, batch_size=64)
""" """
...@@ -349,57 +349,55 @@ class Precision(Metric): ...@@ -349,57 +349,55 @@ class Precision(Metric):
name (str, optional): String name of the metric instance. name (str, optional): String name of the metric instance.
Default is `precision`. Default is `precision`.
Example by standalone: Examples:
.. code-block:: python .. code-block:: python
:name: code-standalone-example
import numpy as np import numpy as np
import paddle import paddle
x = np.array([0.1, 0.5, 0.6, 0.7])
y = np.array([0, 1, 1, 1])
m = paddle.metric.Precision()
m.update(x, y)
res = m.accumulate()
print(res) # 1.0
x = np.array([0.1, 0.5, 0.6, 0.7])
y = np.array([0, 1, 1, 1])
Example with Model API: m = paddle.metric.Precision()
m.update(x, y)
res = m.accumulate()
print(res) # 1.0
.. code-block:: python .. code-block:: python
:name: code-model-api-example
import numpy as np
import numpy as np import paddle
import paddle.nn as nn
import paddle
import paddle.nn as nn class Data(paddle.io.Dataset):
def __init__(self):
class Data(paddle.io.Dataset): super().__init__()
def __init__(self): self.n = 1024
super().__init__() self.x = np.random.randn(self.n, 10).astype('float32')
self.n = 1024 self.y = np.random.randint(2, size=(self.n, 1)).astype('float32')
self.x = np.random.randn(self.n, 10).astype('float32')
self.y = np.random.randint(2, size=(self.n, 1)).astype('float32') def __getitem__(self, idx):
return self.x[idx], self.y[idx]
def __getitem__(self, idx):
return self.x[idx], self.y[idx] def __len__(self):
return self.n
def __len__(self):
return self.n model = paddle.Model(nn.Sequential(
nn.Linear(10, 1),
model = paddle.Model(nn.Sequential( nn.Sigmoid()
nn.Linear(10, 1), ))
nn.Sigmoid() optim = paddle.optimizer.Adam(
)) learning_rate=0.001, parameters=model.parameters())
optim = paddle.optimizer.Adam( model.prepare(
learning_rate=0.001, parameters=model.parameters()) optim,
model.prepare( loss=nn.BCELoss(),
optim, metrics=paddle.metric.Precision())
loss=nn.BCELoss(),
metrics=paddle.metric.Precision()) data = Data()
model.fit(data, batch_size=16)
data = Data()
model.fit(data, batch_size=16)
""" """
def __init__(self, name='precision', *args, **kwargs): def __init__(self, name='precision', *args, **kwargs):
...@@ -482,57 +480,55 @@ class Recall(Metric): ...@@ -482,57 +480,55 @@ class Recall(Metric):
name (str, optional): String name of the metric instance. name (str, optional): String name of the metric instance.
Default is `recall`. Default is `recall`.
Example by standalone: Examples:
.. code-block:: python .. code-block:: python
:name: code-standalone-example
import numpy as np import numpy as np
import paddle import paddle
x = np.array([0.1, 0.5, 0.6, 0.7])
y = np.array([1, 0, 1, 1])
m = paddle.metric.Recall()
m.update(x, y)
res = m.accumulate()
print(res) # 2.0 / 3.0
x = np.array([0.1, 0.5, 0.6, 0.7])
y = np.array([1, 0, 1, 1])
Example with Model API: m = paddle.metric.Recall()
m.update(x, y)
res = m.accumulate()
print(res) # 2.0 / 3.0
.. code-block:: python .. code-block:: python
:name: code-model-api-example
import numpy as np
import numpy as np import paddle
import paddle.nn as nn
import paddle
import paddle.nn as nn class Data(paddle.io.Dataset):
def __init__(self):
class Data(paddle.io.Dataset): super().__init__()
def __init__(self): self.n = 1024
super().__init__() self.x = np.random.randn(self.n, 10).astype('float32')
self.n = 1024 self.y = np.random.randint(2, size=(self.n, 1)).astype('float32')
self.x = np.random.randn(self.n, 10).astype('float32')
self.y = np.random.randint(2, size=(self.n, 1)).astype('float32') def __getitem__(self, idx):
return self.x[idx], self.y[idx]
def __getitem__(self, idx):
return self.x[idx], self.y[idx] def __len__(self):
return self.n
def __len__(self):
return self.n model = paddle.Model(nn.Sequential(
nn.Linear(10, 1),
model = paddle.Model(nn.Sequential( nn.Sigmoid()
nn.Linear(10, 1), ))
nn.Sigmoid() optim = paddle.optimizer.Adam(
)) learning_rate=0.001, parameters=model.parameters())
optim = paddle.optimizer.Adam( model.prepare(
learning_rate=0.001, parameters=model.parameters()) optim,
model.prepare( loss=nn.BCELoss(),
optim, metrics=[paddle.metric.Precision(), paddle.metric.Recall()])
loss=nn.BCELoss(),
metrics=[paddle.metric.Precision(), paddle.metric.Recall()]) data = Data()
model.fit(data, batch_size=16)
data = Data()
model.fit(data, batch_size=16)
""" """
def __init__(self, name='recall', *args, **kwargs): def __init__(self, name='recall', *args, **kwargs):
...@@ -624,61 +620,60 @@ class Auc(Metric): ...@@ -624,61 +620,60 @@ class Auc(Metric):
"NOTE: only implement the ROC curve type via Python now." "NOTE: only implement the ROC curve type via Python now."
Example by standalone: Examples:
.. code-block:: python .. code-block:: python
:name: code-standalone-example
import numpy as np import numpy as np
import paddle import paddle
m = paddle.metric.Auc() m = paddle.metric.Auc()
n = 8 n = 8
class0_preds = np.random.random(size = (n, 1)) class0_preds = np.random.random(size = (n, 1))
class1_preds = 1 - class0_preds class1_preds = 1 - class0_preds
preds = np.concatenate((class0_preds, class1_preds), axis=1) preds = np.concatenate((class0_preds, class1_preds), axis=1)
labels = np.random.randint(2, size = (n, 1)) labels = np.random.randint(2, size = (n, 1))
m.update(preds=preds, labels=labels) m.update(preds=preds, labels=labels)
res = m.accumulate() res = m.accumulate()
.. code-block:: python
:name: code-model-api-example
Example with Model API: import numpy as np
import paddle
import paddle.nn as nn
.. code-block:: python class Data(paddle.io.Dataset):
def __init__(self):
super().__init__()
self.n = 1024
self.x = np.random.randn(self.n, 10).astype('float32')
self.y = np.random.randint(2, size=(self.n, 1)).astype('int64')
import numpy as np def __getitem__(self, idx):
import paddle return self.x[idx], self.y[idx]
import paddle.nn as nn
def __len__(self):
class Data(paddle.io.Dataset): return self.n
def __init__(self):
super().__init__() model = paddle.Model(nn.Sequential(
self.n = 1024 nn.Linear(10, 2), nn.Softmax())
self.x = np.random.randn(self.n, 10).astype('float32') )
self.y = np.random.randint(2, size=(self.n, 1)).astype('int64') optim = paddle.optimizer.Adam(
learning_rate=0.001, parameters=model.parameters())
def __getitem__(self, idx):
return self.x[idx], self.y[idx] def loss(x, y):
return nn.functional.nll_loss(paddle.log(x), y)
def __len__(self):
return self.n model.prepare(
optim,
model = paddle.Model(nn.Sequential( loss=loss,
nn.Linear(10, 2), nn.Softmax()) metrics=paddle.metric.Auc())
) data = Data()
optim = paddle.optimizer.Adam( model.fit(data, batch_size=16)
learning_rate=0.001, parameters=model.parameters())
def loss(x, y):
return nn.functional.nll_loss(paddle.log(x), y)
model.prepare(
optim,
loss=loss,
metrics=paddle.metric.Auc())
data = Data()
model.fit(data, batch_size=16)
""" """
def __init__( def __init__(
......
...@@ -151,7 +151,7 @@ def extract_code_blocks_from_docstr(docstr): ...@@ -151,7 +151,7 @@ def extract_code_blocks_from_docstr(docstr):
# nonlocal code_blocks, cb_cur, cb_cur_name, cb_cur_seq_id, cb_required # nonlocal code_blocks, cb_cur, cb_cur_name, cb_cur_seq_id, cb_required
code_blocks.append( code_blocks.append(
{ {
'codes': inspect.cleandoc("\n".join(cb_info['cb_cur'])), 'codes': inspect.cleandoc("\n" + "\n".join(cb_info['cb_cur'])),
'name': cb_info['cb_cur_name'], 'name': cb_info['cb_cur_name'],
'id': cb_info['cb_cur_seq_id'], 'id': cb_info['cb_cur_seq_id'],
'required': cb_info['cb_required'], 'required': cb_info['cb_required'],
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册