Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
PaddlePaddle
Paddle
提交
8bbedc23
P
Paddle
项目概览
PaddlePaddle
/
Paddle
大约 1 年 前同步成功
通知
2299
Star
20931
Fork
5422
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
1423
列表
看板
标记
里程碑
合并请求
543
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
Paddle
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
1,423
Issue
1,423
列表
看板
标记
里程碑
合并请求
543
合并请求
543
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
未验证
提交
8bbedc23
编写于
11月 25, 2020
作者:
Q
qingqing01
提交者:
GitHub
11月 25, 2020
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
Fix doc format for callbacks, metrics and Model (#28638)
* Fix doc format for callbacks, metrics and Model * Fix code sample and doc
上级
a1486091
变更
4
隐藏空白更改
内联
并排
Showing
4 changed file
with
261 addition
and
251 deletion
+261
-251
python/paddle/fluid/dataloader/batch_sampler.py
python/paddle/fluid/dataloader/batch_sampler.py
+2
-2
python/paddle/hapi/callbacks.py
python/paddle/hapi/callbacks.py
+21
-15
python/paddle/hapi/model.py
python/paddle/hapi/model.py
+53
-57
python/paddle/metric/metrics.py
python/paddle/metric/metrics.py
+185
-177
未找到文件。
python/paddle/fluid/dataloader/batch_sampler.py
浏览文件 @
8bbedc23
...
@@ -180,11 +180,11 @@ class DistributedBatchSampler(BatchSampler):
...
@@ -180,11 +180,11 @@ class DistributedBatchSampler(BatchSampler):
batch_size(int): sample indice number in a mini-batch indices.
batch_size(int): sample indice number in a mini-batch indices.
num_replicas(int, optional): porcess number in distributed training.
num_replicas(int, optional): porcess number in distributed training.
If :attr:`num_replicas` is None, :attr:`num_replicas` will be
If :attr:`num_replicas` is None, :attr:`num_replicas` will be
retrieved from :code:`paddle.
fluid.dygraph.parallel
.ParallenEnv`.
retrieved from :code:`paddle.
distributed
.ParallenEnv`.
Default None.
Default None.
rank(int, optional): the rank of the current process among :attr:`num_replicas`
rank(int, optional): the rank of the current process among :attr:`num_replicas`
processes. If :attr:`rank` is None, :attr:`rank` is retrieved from
processes. If :attr:`rank` is None, :attr:`rank` is retrieved from
:code:`paddle.
fluid.dygraph.parallel
.ParallenEnv`. Default None.
:code:`paddle.
distributed
.ParallenEnv`. Default None.
shuffle(bool): whther to shuffle indices order before genrating
shuffle(bool): whther to shuffle indices order before genrating
batch indices. Default False.
batch indices. Default False.
drop_last(bool): whether drop the last incomplete batch dataset size
drop_last(bool): whether drop the last incomplete batch dataset size
...
...
python/paddle/hapi/callbacks.py
浏览文件 @
8bbedc23
...
@@ -161,10 +161,8 @@ class Callback(object):
...
@@ -161,10 +161,8 @@ class Callback(object):
- 'batch_size': an integer. Number of samples per batch.
- 'batch_size': an integer. Number of samples per batch.
- 'epochs': an integer. Number of epochs.
- 'epochs': an integer. Number of epochs.
- 'steps': an integer. Number of steps of one epoch.
- 'steps': an integer. Number of steps of one epoch.
- 'verbose': an integer. Verbose mode is 0, 1 or 2.
- 'verbose': an integer. Verbose mode is 0, 1 or 2. 0 = silent, 1 = progress bar, 2 = one line per epoch.
0 = silent, 1 = progress bar, 2 = one line per epoch.
- 'metrics': a list of str. Names of metrics, including 'loss' and the names of paddle.metric.Metric.
- 'metrics': a list of str. Names of metrics, including 'loss'
and the names of paddle.metric.Metric.
"""
"""
self
.
params
=
params
self
.
params
=
params
...
@@ -298,18 +296,21 @@ class Callback(object):
...
@@ -298,18 +296,21 @@ class Callback(object):
class
ProgBarLogger
(
Callback
):
class
ProgBarLogger
(
Callback
):
"""Logger callback function
"""
Logger callback function.
Args:
Args:
log_freq (int): The frequency, in number of steps,
the logs such as `loss`,
log_freq (int): The frequency, in number of steps,
`metrics`
are printed. Default: 1.
the logs such as loss, metrics
are printed. Default: 1.
verbose (int): The verbosity mode, should be 0, 1, or 2.
verbose (int): The verbosity mode, should be 0, 1, or 2.
0 = silent, 1 = progress bar, 2 = one line per epoch. Default: 2.
0 = silent, 1 = progress bar, 2 = one line per epoch. Default: 2.
Examples:
Examples:
.. code-block:: python
.. code-block:: python
import paddle
import paddle
import paddle.vision.transforms as T
import paddle.vision.transforms as T
from paddle.vision.datasets import MNIST
from paddle.static import InputSpec
from paddle.static import InputSpec
inputs = [InputSpec([-1, 1, 28, 28], 'float32', 'image')]
inputs = [InputSpec([-1, 1, 28, 28], 'float32', 'image')]
...
@@ -319,7 +320,7 @@ class ProgBarLogger(Callback):
...
@@ -319,7 +320,7 @@ class ProgBarLogger(Callback):
T.Transpose(),
T.Transpose(),
T.Normalize([127.5], [127.5])
T.Normalize([127.5], [127.5])
])
])
train_dataset =
paddle.vision.datasets.
MNIST(mode='train', transform=transform)
train_dataset = MNIST(mode='train', transform=transform)
lenet = paddle.vision.LeNet()
lenet = paddle.vision.LeNet()
model = paddle.Model(lenet,
model = paddle.Model(lenet,
...
@@ -439,18 +440,21 @@ class ProgBarLogger(Callback):
...
@@ -439,18 +440,21 @@ class ProgBarLogger(Callback):
class
ModelCheckpoint
(
Callback
):
class
ModelCheckpoint
(
Callback
):
"""Model checkpoint callback function
"""
Model checkpoint callback function.
Args:
Args:
save_freq(int): The frequency, in number of epochs, the model checkpoint
save_freq(int): The frequency, in number of epochs, the model checkpoint
are saved. Default: 1.
are saved. Default: 1.
save_dir(str|None): The directory to save checkpoint during training.
save_dir(str|None): The directory to save checkpoint during training.
If None, will not save checkpoint. Default: None.
If None, will not save checkpoint. Default: None.
Examples:
Examples:
.. code-block:: python
.. code-block:: python
import paddle
import paddle
import paddle.vision.transforms as T
import paddle.vision.transforms as T
from paddle.vision.datasets import MNIST
from paddle.static import InputSpec
from paddle.static import InputSpec
inputs = [InputSpec([-1, 1, 28, 28], 'float32', 'image')]
inputs = [InputSpec([-1, 1, 28, 28], 'float32', 'image')]
...
@@ -460,7 +464,7 @@ class ModelCheckpoint(Callback):
...
@@ -460,7 +464,7 @@ class ModelCheckpoint(Callback):
T.Transpose(),
T.Transpose(),
T.Normalize([127.5], [127.5])
T.Normalize([127.5], [127.5])
])
])
train_dataset =
paddle.vision.datasets.
MNIST(mode='train', transform=transform)
train_dataset = MNIST(mode='train', transform=transform)
lenet = paddle.vision.LeNet()
lenet = paddle.vision.LeNet()
model = paddle.Model(lenet,
model = paddle.Model(lenet,
...
@@ -740,7 +744,9 @@ class EarlyStopping(Callback):
...
@@ -740,7 +744,9 @@ class EarlyStopping(Callback):
class
VisualDL
(
Callback
):
class
VisualDL
(
Callback
):
"""VisualDL callback function
"""
VisualDL callback function.
Args:
Args:
log_dir (str): The directory to save visualdl log file.
log_dir (str): The directory to save visualdl log file.
...
...
python/paddle/hapi/model.py
浏览文件 @
8bbedc23
...
@@ -808,7 +808,7 @@ class Model(object):
...
@@ -808,7 +808,7 @@ class Model(object):
"""
"""
An Model object is network with training and inference features.
An Model object is network with training and inference features.
Dynamic graph and static graph are supported at the same time,
Dynamic graph and static graph are supported at the same time,
switched by `paddle.
dis
able_static()`. The usage is as follows.
switched by `paddle.
en
able_static()`. The usage is as follows.
But note, the switching between dynamic and static should be before
But note, the switching between dynamic and static should be before
instantiating a Model. The input description, i.e, paddle.static.InputSpec,
instantiating a Model. The input description, i.e, paddle.static.InputSpec,
must be required for static graph.
must be required for static graph.
...
@@ -829,36 +829,36 @@ class Model(object):
...
@@ -829,36 +829,36 @@ class Model(object):
Examples:
Examples:
.. code-block:: python
.. code-block:: python
import paddle
import paddle
import paddle.nn as nn
import paddle.nn as nn
import paddle.vision.transforms as T
import paddle.vision.transforms as T
from paddle.static import InputSpec
from paddle.static import InputSpec
device = paddle.set_device('cpu') # or 'gpu'
device = paddle.set_device('cpu') # or 'gpu'
net = nn.Sequential(
net = nn.Sequential(
nn.Flatten(1),
nn.Flatten(1),
nn.Linear(784, 200),
nn.Linear(784, 200),
nn.Tanh(),
nn.Tanh(),
nn.Linear(200, 10))
nn.Linear(200, 10))
# inputs and labels are not required for dynamic graph.
# inputs and labels are not required for dynamic graph.
input = InputSpec([None, 784], 'float32', 'x')
input = InputSpec([None, 784], 'float32', 'x')
label = InputSpec([None, 1], 'int64', 'label')
label = InputSpec([None, 1], 'int64', 'label')
model = paddle.Model(net, input, label)
model = paddle.Model(net, input, label)
optim = paddle.optimizer.SGD(learning_rate=1e-3,
optim = paddle.optimizer.SGD(learning_rate=1e-3,
parameters=model.parameters())
parameters=model.parameters())
model.prepare(optim,
model.prepare(optim,
paddle.nn.CrossEntropyLoss(),
paddle.nn.CrossEntropyLoss(),
paddle.metric.Accuracy())
paddle.metric.Accuracy())
transform = T.Compose([
transform = T.Compose([
T.Transpose(),
T.Transpose(),
T.Normalize([127.5], [127.5])
T.Normalize([127.5], [127.5])
])
])
data = paddle.vision.datasets.MNIST(mode='train', transform=transform)
data = paddle.vision.datasets.MNIST(mode='train', transform=transform)
model.fit(data, epochs=2, batch_size=32, verbose=1)
model.fit(data, epochs=2, batch_size=32, verbose=1)
"""
"""
def
__init__
(
self
,
network
,
inputs
=
None
,
labels
=
None
):
def
__init__
(
self
,
network
,
inputs
=
None
,
labels
=
None
):
...
@@ -1052,9 +1052,9 @@ class Model(object):
...
@@ -1052,9 +1052,9 @@ class Model(object):
If `training` is set to False, only inference model will be saved.
If `training` is set to False, only inference model will be saved.
Args:
Args:
path (str): The file prefix to save model. The format
is
path (str): The file prefix to save model. The format
'dirname/file_prefix' or 'file_prefix'. if empty str. A exception
is 'dirname/file_prefix' or 'file_prefix'. if empty str.
will be raised.
A exception
will be raised.
training (bool, optional): Whether to save for training. If not, save
training (bool, optional): Whether to save for training. If not, save
for inference only. Default: True.
for inference only. Default: True.
...
@@ -1084,9 +1084,9 @@ class Model(object):
...
@@ -1084,9 +1084,9 @@ class Model(object):
return self.net(x)
return self.net(x)
dynamic = True # False
dynamic = True # False
device = paddle.set_device('cpu')
# if use static graph, do not set
# if use static graph, do not set
paddle.disable_static(device) if dynamic else None
if not dynamic:
paddle.enable_static()
input = InputSpec([None, 784], 'float32', 'x')
input = InputSpec([None, 784], 'float32', 'x')
label = InputSpec([None, 1], 'int64', 'label')
label = InputSpec([None, 1], 'int64', 'label')
...
@@ -1361,18 +1361,19 @@ class Model(object):
...
@@ -1361,18 +1361,19 @@ class Model(object):
import paddle
import paddle
import paddle.vision.transforms as T
import paddle.vision.transforms as T
from paddle.vision.datasets import MNIST
from paddle.static import InputSpec
from paddle.static import InputSpec
dynamic = True
dynamic = True
device = paddle.set_device('cpu') # or 'gpu'
if not dynamic:
paddle.disable_static(device) if dynamic else None
paddle.enable_static()
transform = T.Compose([
transform = T.Compose([
T.Transpose(),
T.Transpose(),
T.Normalize([127.5], [127.5])
T.Normalize([127.5], [127.5])
])
])
train_dataset =
paddle.vision.datasets.
MNIST(mode='train', transform=transform)
train_dataset = MNIST(mode='train', transform=transform)
val_dataset =
paddle.vision.datasets.
MNIST(mode='test', transform=transform)
val_dataset = MNIST(mode='test', transform=transform)
input = InputSpec([None, 1, 28, 28], 'float32', 'image')
input = InputSpec([None, 1, 28, 28], 'float32', 'image')
label = InputSpec([None, 1], 'int64', 'label')
label = InputSpec([None, 1], 'int64', 'label')
...
@@ -1399,22 +1400,23 @@ class Model(object):
...
@@ -1399,22 +1400,23 @@ class Model(object):
import paddle
import paddle
import paddle.vision.transforms as T
import paddle.vision.transforms as T
from paddle.vision.datasets import MNIST
from paddle.static import InputSpec
from paddle.static import InputSpec
dynamic = True
dynamic = True
device = paddle.set_device('cpu') # or 'gpu'
if not dynamic:
paddle.disable_static(device) if dynamic else None
paddle.enable_static()
transform = T.Compose([
transform = T.Compose([
T.Transpose(),
T.Transpose(),
T.Normalize([127.5], [127.5])
T.Normalize([127.5], [127.5])
])
])
train_dataset =
paddle.vision.datasets.
MNIST(mode='train', transform=transform)
train_dataset = MNIST(mode='train', transform=transform)
train_loader = paddle.io.DataLoader(train_dataset,
train_loader = paddle.io.DataLoader(train_dataset,
places=device,
batch_size=64)
batch_size=64)
val_dataset =
paddle.vision.datasets.
MNIST(mode='test', transform=transform)
val_dataset = MNIST(mode='test', transform=transform)
val_loader = paddle.io.DataLoader(val_dataset,
val_loader = paddle.io.DataLoader(val_dataset,
places=device,
batch_size=64)
batch_size=64)
input = InputSpec([None, 1, 28, 28], 'float32', 'image')
input = InputSpec([None, 1, 28, 28], 'float32', 'image')
label = InputSpec([None, 1], 'int64', 'label')
label = InputSpec([None, 1], 'int64', 'label')
...
@@ -1540,7 +1542,8 @@ class Model(object):
...
@@ -1540,7 +1542,8 @@ class Model(object):
value is a scalar or numpy.array.
value is a scalar or numpy.array.
Examples:
Examples:
.. code-block:: python
.. code-block:: python
import paddle
import paddle
import paddle.vision.transforms as T
import paddle.vision.transforms as T
...
@@ -1559,14 +1562,6 @@ class Model(object):
...
@@ -1559,14 +1562,6 @@ class Model(object):
model.prepare(metrics=paddle.metric.Accuracy())
model.prepare(metrics=paddle.metric.Accuracy())
result = model.evaluate(val_dataset, batch_size=64)
result = model.evaluate(val_dataset, batch_size=64)
print(result)
print(result)
# imperative mode
paddle.disable_static()
model = paddle.Model(paddle.vision.models.LeNet(), input, label)
model.prepare(metrics=paddle.metric.Accuracy())
result = model.evaluate(val_dataset, batch_size=64)
print(result)
"""
"""
if
eval_data
is
not
None
and
isinstance
(
eval_data
,
Dataset
):
if
eval_data
is
not
None
and
isinstance
(
eval_data
,
Dataset
):
...
@@ -1637,7 +1632,8 @@ class Model(object):
...
@@ -1637,7 +1632,8 @@ class Model(object):
list: output of models.
list: output of models.
Examples:
Examples:
.. code-block:: python
.. code-block:: python
import numpy as np
import numpy as np
import paddle
import paddle
...
...
python/paddle/metric/metrics.py
浏览文件 @
8bbedc23
...
@@ -38,11 +38,13 @@ class Metric(object):
...
@@ -38,11 +38,13 @@ class Metric(object):
r
"""
r
"""
Base class for metric, encapsulates metric logic and APIs
Base class for metric, encapsulates metric logic and APIs
Usage:
Usage:
m = SomeMetric()
.. code-block:: text
for prediction, label in ...:
m.update(prediction, label)
m = SomeMetric()
m.accumulate()
for prediction, label in ...:
m.update(prediction, label)
m.accumulate()
Advanced usage for :code:`compute`:
Advanced usage for :code:`compute`:
...
@@ -52,6 +54,9 @@ class Metric(object):
...
@@ -52,6 +54,9 @@ class Metric(object):
call :code:`update` with states in NumPy format.
call :code:`update` with states in NumPy format.
Metric calculated as follows (operations in Model and Metric are
Metric calculated as follows (operations in Model and Metric are
indicated with curly brackets, while data nodes not):
indicated with curly brackets, while data nodes not):
.. code-block:: text
inputs & labels || ------------------
inputs & labels || ------------------
| ||
| ||
{model} ||
{model} ||
...
@@ -67,8 +72,9 @@ class Metric(object):
...
@@ -67,8 +72,9 @@ class Metric(object):
metric states(numpy) || numpy data
metric states(numpy) || numpy data
| ||
| ||
{Metric.update} \/ ------------------
{Metric.update} \/ ------------------
Examples:
Examples:
For :code:`Accuracy` metric, which takes :code:`pred` and :code:`label`
For :code:`Accuracy` metric, which takes :code:`pred` and :code:`label`
as inputs, we can calculate the correct prediction matrix between
as inputs, we can calculate the correct prediction matrix between
:code:`pred` and :code:`label` in :code:`compute`.
:code:`pred` and :code:`label` in :code:`compute`.
...
@@ -79,29 +85,31 @@ class Metric(object):
...
@@ -79,29 +85,31 @@ class Metric(object):
prediction of each sample like follows, while the correct prediction
prediction of each sample like follows, while the correct prediction
matrix shape is [N, 5].
matrix shape is [N, 5].
.. code-block:: python
.. code-block:: text
def compute(pred, label):
# sort prediction and slice the top-5 scores
def compute(pred, label):
pred = paddle.argsort(pred, descending=True)[:, :5]
# sort prediction and slice the top-5 scores
# calculate whether the predictions are correct
pred = paddle.argsort(pred, descending=True)[:, :5]
correct = pred == label
# calculate whether the predictions are correct
return paddle.cast(correct, dtype='float32')
correct = pred == label
return paddle.cast(correct, dtype='float32')
With the :code:`compute`, we split some calculations to OPs (which
With the :code:`compute`, we split some calculations to OPs (which
may run on GPU devices, will be faster), and only fetch 1 tensor with
may run on GPU devices, will be faster), and only fetch 1 tensor with
shape as [N, 5] instead of 2 tensors with shapes as [N, 10] and [N, 1].
shape as [N, 5] instead of 2 tensors with shapes as [N, 10] and [N, 1].
:code:`update` can be define as follows:
:code:`update` can be define as follows:
.. code-block:: python
.. code-block:: text
def update(self, correct):
accs = []
def update(self, correct):
for i, k in enumerate(self.topk):
accs = []
num_corrects = correct[:, :k].sum()
for i, k in enumerate(self.topk):
num_samples = len(correct)
num_corrects = correct[:, :k].sum()
accs.append(float(num_corrects) / num_samples)
num_samples = len(correct)
self.total[i] += num_corrects
accs.append(float(num_corrects) / num_samples)
self.count[i] += num_samples
self.total[i] += num_corrects
return accs
self.count[i] += num_samples
return accs
"""
"""
def
__init__
(
self
):
def
__init__
(
self
):
...
@@ -183,43 +191,46 @@ class Accuracy(Metric):
...
@@ -183,43 +191,46 @@ class Accuracy(Metric):
.. code-block:: python
.. code-block:: python
import numpy as np
import numpy as np
import paddle
import paddle
x = paddle.to_tensor(np.array([
x = paddle.to_tensor(np.array([
[0.1, 0.2, 0.3, 0.4],
[0.1, 0.2, 0.3, 0.4],
[0.1, 0.4, 0.3, 0.2],
[0.1, 0.4, 0.3, 0.2],
[0.1, 0.2, 0.4, 0.3],
[0.1, 0.2, 0.4, 0.3],
[0.1, 0.2, 0.3, 0.4]]))
[0.1, 0.2, 0.3, 0.4]]))
y = paddle.to_tensor(np.array([[0], [1], [2], [3]]))
y = paddle.to_tensor(np.array([[0], [1], [2], [3]]))
m = paddle.metric.Accuracy()
m = paddle.metric.Accuracy()
correct = m.compute(x, y)
correct = m.compute(x, y)
m.update(correct)
m.update(correct)
res = m.accumulate()
res = m.accumulate()
print(res) # 0.75
print(res) # 0.75
Example with Model API:
Example with Model API:
.. code-block:: python
.. code-block:: python
import paddle
import paddle
from paddle.static import InputSpec
from paddle.static import InputSpec
import paddle.vision.transforms as T
input = InputSpec([None, 1, 28, 28], 'float32', 'image')
from paddle.vision.datasets import MNIST
label = InputSpec([None, 1], 'int64', 'label')
train_dataset = paddle.vision.datasets.MNIST(mode='train')
input = InputSpec([None, 1, 28, 28], 'float32', 'image')
label = InputSpec([None, 1], 'int64', 'label')
model = paddle.Model(paddle.vision.LeNet(), input, label)
transform = T.Compose([T.Transpose(), T.Normalize([127.5], [127.5])])
optim = paddle.optimizer.Adam(
train_dataset = MNIST(mode='train', transform=transform)
learning_rate=0.001, parameters=model.parameters())
model.prepare(
model = paddle.Model(paddle.vision.LeNet(), input, label)
optim,
optim = paddle.optimizer.Adam(
loss=paddle.nn.CrossEntropyLoss(),
learning_rate=0.001, parameters=model.parameters())
metrics=paddle.metric.Accuracy())
model.prepare(
optim,
model.fit(train_dataset, batch_size=64)
loss=paddle.nn.CrossEntropyLoss(),
metrics=paddle.metric.Accuracy())
model.fit(train_dataset, batch_size=64)
"""
"""
...
@@ -321,54 +332,53 @@ class Precision(Metric):
...
@@ -321,54 +332,53 @@ class Precision(Metric):
.. code-block:: python
.. code-block:: python
import numpy as np
import numpy as np
import paddle
import paddle
x = np.array([0.1, 0.5, 0.6, 0.7])
x = np.array([0.1, 0.5, 0.6, 0.7])
y = np.array([0, 1, 1, 1])
y = np.array([0, 1, 1, 1])
m = paddle.metric.Precision()
m = paddle.metric.Precision()
m.update(x, y)
m.update(x, y)
res = m.accumulate()
res = m.accumulate()
print(res) # 1.0
print(res) # 1.0
Example with Model API:
Example with Model API:
.. code-block:: python
.. code-block:: python
import numpy as np
import numpy as np
import paddle
import paddle
import paddle.nn as nn
import paddle.nn as nn
class Data(paddle.io.Dataset):
class Data(paddle.io.Dataset):
def __init__(self):
def __init__(self):
super(Data, self).__init__()
super(Data, self).__init__()
self.n = 1024
self.n = 1024
self.x = np.random.randn(self.n, 10).astype('float32')
self.x = np.random.randn(self.n, 10).astype('float32')
self.y = np.random.randint(2, size=(self.n, 1)).astype('float32')
self.y = np.random.randint(2, size=(self.n, 1)).astype('float32')
def __getitem__(self, idx):
def __getitem__(self, idx):
return self.x[idx], self.y[idx]
return self.x[idx], self.y[idx]
def __len__(self):
def __len__(self):
return self.n
return self.n
paddle.disable_static()
model = paddle.Model(nn.Sequential(
model = paddle.Model(nn.Sequential(
nn.Linear(10, 1),
nn.Linear(10, 1),
nn.Sigmoid()
nn.Sigmoid()
))
))
optim = paddle.optimizer.Adam(
optim = paddle.optimizer.Adam(
learning_rate=0.001, parameters=model.parameters())
learning_rate=0.001, parameters=model.parameters())
model.prepare(
model.prepare(
optim,
optim,
loss=nn.BCELoss(),
loss=nn.BCELoss(),
metrics=paddle.metric.Precision())
metrics=paddle.metric.Precision())
data = Data()
data = Data()
model.fit(data, batch_size=16)
model.fit(data, batch_size=16)
"""
"""
def
__init__
(
self
,
name
=
'precision'
,
*
args
,
**
kwargs
):
def
__init__
(
self
,
name
=
'precision'
,
*
args
,
**
kwargs
):
...
@@ -455,54 +465,53 @@ class Recall(Metric):
...
@@ -455,54 +465,53 @@ class Recall(Metric):
.. code-block:: python
.. code-block:: python
import numpy as np
import numpy as np
import paddle
import paddle
x = np.array([0.1, 0.5, 0.6, 0.7])
x = np.array([0.1, 0.5, 0.6, 0.7])
y = np.array([1, 0, 1, 1])
y = np.array([1, 0, 1, 1])
m = paddle.metric.Recall()
m = paddle.metric.Recall()
m.update(x, y)
m.update(x, y)
res = m.accumulate()
res = m.accumulate()
print(res) # 2.0 / 3.0
print(res) # 2.0 / 3.0
Example with Model API:
Example with Model API:
.. code-block:: python
.. code-block:: python
import numpy as np
import numpy as np
import paddle
import paddle
import paddle.nn as nn
import paddle.nn as nn
class Data(paddle.io.Dataset):
class Data(paddle.io.Dataset):
def __init__(self):
def __init__(self):
super(Data, self).__init__()
super(Data, self).__init__()
self.n = 1024
self.n = 1024
self.x = np.random.randn(self.n, 10).astype('float32')
self.x = np.random.randn(self.n, 10).astype('float32')
self.y = np.random.randint(2, size=(self.n, 1)).astype('float32')
self.y = np.random.randint(2, size=(self.n, 1)).astype('float32')
def __getitem__(self, idx):
def __getitem__(self, idx):
return self.x[idx], self.y[idx]
return self.x[idx], self.y[idx]
def __len__(self):
def __len__(self):
return self.n
return self.n
paddle.disable_static()
model = paddle.Model(nn.Sequential(
model = paddle.Model(nn.Sequential(
nn.Linear(10, 1),
nn.Linear(10, 1),
nn.Sigmoid()
nn.Sigmoid()
))
))
optim = paddle.optimizer.Adam(
optim = paddle.optimizer.Adam(
learning_rate=0.001, parameters=model.parameters())
learning_rate=0.001, parameters=model.parameters())
model.prepare(
model.prepare(
optim,
optim,
loss=nn.BCELoss(),
loss=nn.BCELoss(),
metrics=[paddle.metric.Precision(), paddle.metric.Recall()])
metrics=[paddle.metric.Precision(), paddle.metric.Recall()])
data = Data()
data = Data()
model.fit(data, batch_size=16)
model.fit(data, batch_size=16)
"""
"""
def
__init__
(
self
,
name
=
'recall'
,
*
args
,
**
kwargs
):
def
__init__
(
self
,
name
=
'recall'
,
*
args
,
**
kwargs
):
...
@@ -597,59 +606,58 @@ class Auc(Metric):
...
@@ -597,59 +606,58 @@ class Auc(Metric):
Example by standalone:
Example by standalone:
.. code-block:: python
.. code-block:: python
import numpy as np
import numpy as np
import paddle
import paddle
m = paddle.metric.Auc()
m = paddle.metric.Auc()
n = 8
n = 8
class0_preds = np.random.random(size = (n, 1))
class0_preds = np.random.random(size = (n, 1))
class1_preds = 1 - class0_preds
class1_preds = 1 - class0_preds
preds = np.concatenate((class0_preds, class1_preds), axis=1)
preds = np.concatenate((class0_preds, class1_preds), axis=1)
labels = np.random.randint(2, size = (n, 1))
labels = np.random.randint(2, size = (n, 1))
m.update(preds=preds, labels=labels)
m.update(preds=preds, labels=labels)
res = m.accumulate()
res = m.accumulate()
Example with Model API:
Example with Model API:
.. code-block:: python
.. code-block:: python
import numpy as np
import numpy as np
import paddle
import paddle
import paddle.nn as nn
import paddle.nn as nn
class Data(paddle.io.Dataset):
class Data(paddle.io.Dataset):
def __init__(self):
def __init__(self):
super(Data, self).__init__()
super(Data, self).__init__()
self.n = 1024
self.n = 1024
self.x = np.random.randn(self.n, 10).astype('float32')
self.x = np.random.randn(self.n, 10).astype('float32')
self.y = np.random.randint(2, size=(self.n, 1)).astype('int64')
self.y = np.random.randint(2, size=(self.n, 1)).astype('int64')
def __getitem__(self, idx):
def __getitem__(self, idx):
return self.x[idx], self.y[idx]
return self.x[idx], self.y[idx]
def __len__(self):
def __len__(self):
return self.n
return self.n
paddle.disable_static()
model = paddle.Model(nn.Sequential(
model = paddle.Model(nn.Sequential(
nn.Linear(10, 2), nn.Softmax())
nn.Linear(10, 2), nn.Softmax())
)
)
optim = paddle.optimizer.Adam(
optim = paddle.optimizer.Adam(
learning_rate=0.001, parameters=model.parameters())
learning_rate=0.001, parameters=model.parameters())
def loss(x, y):
def loss(x, y):
return nn.functional.nll_loss(paddle.log(x), y)
return nn.functional.nll_loss(paddle.log(x), y)
model.prepare(
model.prepare(
optim,
optim,
loss=loss,
loss=loss,
metrics=paddle.metric.Auc())
metrics=paddle.metric.Auc())
data = Data()
data = Data()
model.fit(data, batch_size=16)
model.fit(data, batch_size=16)
"""
"""
def
__init__
(
self
,
def
__init__
(
self
,
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录