提交 3dd26cff 编写于 作者: Q qingqing01

Add doc and unit testing

上级 d9a42a7d
......@@ -185,6 +185,9 @@ class ProgBarLogger(Callback):
self.verbose = verbose
self.log_freq = log_freq
def _is_print(self):
return self.verbose and ParallelEnv().local_rank == 0
def on_train_begin(self, logs=None):
self.epochs = self.params['epochs']
assert self.epochs
......@@ -195,7 +198,7 @@ class ProgBarLogger(Callback):
self.steps = self.params['steps']
self.epoch = epoch
self.train_step = 0
if self.verbose and self.epochs and ParallelEnv().local_rank == 0:
if self.epochs and self._is_print():
print('Epoch %d/%d' % (epoch + 1, self.epochs))
self.train_progbar = ProgressBar(num=self.steps, verbose=self.verbose)
......@@ -213,15 +216,13 @@ class ProgBarLogger(Callback):
logs = logs or {}
self.train_step += 1
if self.train_step % self.log_freq == 0 and self.verbose and ParallelEnv(
).local_rank == 0:
if self._is_print() and self.train_step % self.log_freq == 0:
if self.steps is None or self.train_step < self.steps:
self._updates(logs, 'train')
def on_epoch_end(self, epoch, logs=None):
logs = logs or {}
if self.train_step % self.log_freq != 0 and self.verbose and ParallelEnv(
).local_rank == 0:
if self._is_print() and (self.steps is not None):
self._updates(logs, 'train')
def on_eval_begin(self, logs=None):
......@@ -231,7 +232,7 @@ class ProgBarLogger(Callback):
self.evaled_samples = 0
self.eval_progbar = ProgressBar(
num=self.eval_steps, verbose=self.verbose)
if ParallelEnv().local_rank == 0:
if self._is_print():
print('Eval begin...')
def on_eval_batch_end(self, step, logs=None):
......@@ -240,16 +241,14 @@ class ProgBarLogger(Callback):
samples = logs.get('batch_size', 1)
self.evaled_samples += samples
if self.eval_step % self.log_freq == 0 and self.verbose and ParallelEnv(
).local_rank == 0:
if self._is_print() and self.eval_step % self.log_freq == 0:
if self.eval_steps is None or self.eval_step < self.eval_steps:
self._updates(logs, 'eval')
def on_eval_end(self, logs=None):
logs = logs or {}
if self.verbose and ParallelEnv().local_rank == 0:
if self.eval_step % self.log_freq != 0:
self._updates(logs, 'eval')
if self._is_print() and (self.steps is not None):
self._updates(logs, 'eval')
print('Eval samples: %d' % (self.evaled_samples))
......
......@@ -66,7 +66,7 @@ class CrossEntropy(Loss):
"""
def __init__(self, average=True):
super(CrossEntropy, self).__init__()
super(CrossEntropy, self).__init__(average)
def forward(self, outputs, labels):
return [
......@@ -88,7 +88,7 @@ class SoftmaxWithCrossEntropy(Loss):
"""
def __init__(self, average=True):
super(SoftmaxWithCrossEntropy, self).__init__()
super(SoftmaxWithCrossEntropy, self).__init__(average)
def forward(self, outputs, labels):
return [
......
......@@ -653,28 +653,30 @@ class Model(fluid.dygraph.Layer):
import paddle
import paddle.fluid as fluid
#import paddle.incubate.hapi as hapi
import hapi as hapi
from hapi import Model, Input, set_device
from hapi.loss import CrossEntropy
from hapi.dataset import MNIST
class MyModel(hapi.model.Model):
class MyModel(Model):
def __init__(self):
super(MyModel, self).__init__()
self._fc = fluid.dygraph.Linear(784, 10, act='softmax')
def forward(self, x):
y = self._fc(x)
return y
device = hapi.model.set_device('gpu')
device = set_device('gpu')
# if use static graph, do not set
fluid.enable_dygraph(device)
model = MyModel()
optim = fluid.optimizer.SGD(learning_rate=1e-3,
parameter_list=model.parameters())
inputs = [hapi.model.Input([None, 784], 'float32', name='x')]
labels = [hapi.model.Input([None, 1], 'int64', name='label')]
inputs = [Input([None, 784], 'float32', name='x')]
labels = [Input([None, 1], 'int64', name='label')]
mnist_data = hapi.datasets.MNIST(mode='train')
mnist_data = MNIST(mode='train')
model.prepare(optim,
hapi.model.CrossEntropy(),
CrossEntropy(average=True),
hapi.metrics.Accuracy(),
inputs,
labels,
......@@ -721,9 +723,9 @@ class Model(fluid.dygraph.Layer):
import numpy as np
import paddle.fluid as fluid
import paddle.incubate.hapi as hapi
from hapi import Model, Input, set_device
class MyModel(hapi.Model):
class MyModel(Model):
def __init__(self):
super(MyModel, self).__init__()
self._fc = Linear(784, 1, act='softmax')
......@@ -738,10 +740,10 @@ class Model(fluid.dygraph.Layer):
optim = fluid.optimizer.SGD(learning_rate=1e-3,
parameter_list=model.parameters())
inputs = [hapi.model.Input([None, 784], 'float32', name='x')]
labels = [hapi.model.Input([None, 1], 'int64', name='label')]
inputs = [Input([None, 784], 'float32', name='x')]
labels = [Input([None, 1], 'int64', name='label')]
model.prepare(optim,
hapi.model.CrossEntropy(),
CrossEntropy(average=True),
inputs=inputs,
labels=labels,
device=device)
......@@ -750,7 +752,7 @@ class Model(fluid.dygraph.Layer):
loss = model.train_batch([data], [label])
print(loss)
"""
return self._adapter.train_batch(*args, **kwargs)
return self._adapter.train_batch(inputs, labels)
def eval_batch(self, inputs, labels=None):
"""
......@@ -773,9 +775,9 @@ class Model(fluid.dygraph.Layer):
import numpy as np
import paddle.fluid as fluid
import paddle.incubate.hapi as hapi
from hapi import Model, Input, set_device
class MyModel(hapi.model.Model):
class MyModel(Model):
def __init__(self):
super(MyModel, self).__init__()
self._fc = fluid.dygraph.Linear(784, 1, act='softmax')
......@@ -783,17 +785,17 @@ class Model(fluid.dygraph.Layer):
y = self._fc(x)
return y
device = hapi.model.set_device('gpu')
device = set_device('gpu')
fluid.enable_dygraph(device)
model = MyModel()
optim = fluid.optimizer.SGD(learning_rate=1e-3,
parameter_list=model.parameters())
inputs = [hapi.model.Input([None, 784], 'float32', name='x')]
labels = [hapi.model.Input([None, 1], 'int64', name='label')]
inputs = [Input([None, 784], 'float32', name='x')]
labels = [Input([None, 1], 'int64', name='label')]
model.prepare(optim,
hapi.model.CrossEntropy(),
CrossEntropy(average=True),
inputs=inputs,
labels=labels,
device=device)
......@@ -802,7 +804,7 @@ class Model(fluid.dygraph.Layer):
loss = model.eval_batch([data], [label])
print(loss)
"""
return self._adapter.eval_batch(*args, **kwargs)
return self._adapter.eval_batch(inputs, labels)
def test_batch(self, inputs):
"""
......@@ -822,9 +824,9 @@ class Model(fluid.dygraph.Layer):
import numpy as np
import paddle.fluid as fluid
import paddle.incubate.hapi as hapi
from hapi import Model, Input, set_device
class MyModel(hapi.model.Model):
class MyModel(Model):
def __init__(self):
super(MyModel, self).__init__()
self._fc = fluid.dygraph.Linear(784, 1, act='softmax')
......@@ -832,18 +834,18 @@ class Model(fluid.dygraph.Layer):
y = self._fc(x)
return y
device = hapi.model.set_device('gpu')
device = set_device('gpu')
fluid.enable_dygraph(device)
model = MyModel()
inputs = [hapi.model.Input([None, 784], 'float32', name='x')]
inputs = [Input([None, 784], 'float32', name='x')]
model.prepare(inputs=inputs,
device=device)
data = np.random.random(size=(4,784)).astype(np.float32)
out = model.eval_batch([data])
print(out)
"""
return self._adapter.test_batch(*args, **kwargs)
return self._adapter.test_batch(inputs)
def save(self, path):
"""
......@@ -872,9 +874,9 @@ class Model(fluid.dygraph.Layer):
.. code-block:: python
import paddle.fluid as fluid
import hapi as hapi
from hapi import Model, set_device
class MyModel(hapi.model.Model):
class MyModel(Model):
def __init__(self):
super(MyModel, self).__init__()
self._fc = fluid.dygraph.Linear(784, 1, act='softmax')
......@@ -882,13 +884,13 @@ class Model(fluid.dygraph.Layer):
y = self._fc(x)
return y
device = hapi.model.set_device('cpu')
device = set_device('cpu')
fluid.enable_dygraph(device)
model = MyModel()
model.save('checkpoint/test')
"""
if ParallelEnv().local_rank == 0:
return self._adapter.save(path)
self._adapter.save(path)
def load(self, path, skip_mismatch=False, reset_optimizer=False):
"""
......@@ -924,9 +926,9 @@ class Model(fluid.dygraph.Layer):
.. code-block:: python
import paddle.fluid as fluid
import hapi as hapi
from hapi import Model, set_device
class MyModel(hapi.model.Model):
class MyModel(Model):
def __init__(self):
super(MyModel, self).__init__()
self._fc = fluid.dygraph.Linear(784, 1, act='softmax')
......@@ -934,7 +936,7 @@ class Model(fluid.dygraph.Layer):
y = self._fc(x)
return y
device = hapi.model.set_device('cpu')
device = set_device('cpu')
fluid.enable_dygraph(device)
model = MyModel()
model.load('checkpoint/test')
......@@ -987,7 +989,14 @@ class Model(fluid.dygraph.Layer):
return self._adapter.load(matched_param_state, optim_state)
def parameters(self, *args, **kwargs):
return self._adapter.parameters(*args, **kwargs)
"""
Returns a list of parameters of the model.
Returns:
list of :ref:`api_guide_Variable_en` : a list of parameters.
"""
return self._adapter.parameters()
def prepare(self,
optimizer=None,
......
......@@ -17,34 +17,25 @@ from __future__ import print_function
import unittest
import os
import cv2
import numpy as np
import shutil
import tempfile
import paddle
from paddle import fluid
from paddle.fluid.dygraph.nn import Conv2D, Pool2D, Linear
from paddle.fluid.dygraph.container import Sequential
from paddle.io import BatchSampler, DataLoader
from paddle.io import DataLoader
from paddle.fluid.dygraph.base import to_variable
from hapi.model import Model, Input, set_device
from hapi.loss import Loss
from hapi.loss import CrossEntropy
from hapi.metrics import Accuracy
from hapi.datasets import MNIST
from hapi.vision.models import LeNet
from hapi.download import get_weights_path_from_url
class LeNetDygraph(fluid.dygraph.Layer):
"""LeNet model from
`"LeCun Y, Bottou L, Bengio Y, et al. Gradient-based learning applied to document recognition[J]. Proceedings of the IEEE, 1998, 86(11): 2278-2324.`_
Args:
num_classes (int): output dim of last fc layer. If num_classes <=0, last fc layer
will not be defined. Default: 10.
classifier_activation (str): activation for the last fc layer. Default: 'softmax'.
"""
def __init__(self, num_classes=10, classifier_activation='softmax'):
super(LeNetDygraph, self).__init__()
self.num_classes = num_classes
......@@ -73,9 +64,12 @@ class LeNetDygraph(fluid.dygraph.Layer):
class MnistDataset(MNIST):
def __init__(self, mode, return_label=True):
def __init__(self, mode, return_label=True, sample_num=None):
super(MnistDataset, self).__init__(mode=mode)
self.return_label = return_label
if sample_num:
self.images = self.images[:sample_num]
self.labels = self.labels[:sample_num]
def __getitem__(self, idx):
img = np.reshape(self.images[idx], [1, 28, 28])
......@@ -87,15 +81,14 @@ class MnistDataset(MNIST):
return len(self.images)
def get_predict_accuracy(pred, gt):
def compute_acc(pred, label):
pred = np.argmax(pred, -1)
gt = np.array(gt)
correct = pred[:, np.newaxis] == gt
label = np.array(label)
correct = pred[:, np.newaxis] == label
return np.sum(correct) / correct.shape[0]
def low_level_lenet_dygraph_train(model, dataloader):
def dynamic_train(model, dataloader):
optim = fluid.optimizer.Adam(
learning_rate=0.001, parameter_list=model.parameters())
model.train()
......@@ -108,7 +101,7 @@ def low_level_lenet_dygraph_train(model, dataloader):
model.clear_gradients()
def low_level_dynamic_evaluate(model, dataloader):
def dynamic_evaluate(model, dataloader):
with fluid.dygraph.no_grad():
model.eval()
cnt = 0
......@@ -121,108 +114,210 @@ def low_level_dynamic_evaluate(model, dataloader):
return cnt / len(dataloader.dataset)
class TestEvaluatePredict(unittest.TestCase):
def setUp(self):
self.device = set_device('gpu')
self.train_dataset = MnistDataset(mode='train')
self.val_dataset = MnistDataset(mode='test')
self.test_dataset = MnistDataset(mode='test', return_label=False)
class TestModel(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.device = set_device('gpu')
fluid.enable_dygraph(cls.device)
fluid.enable_dygraph(self.device)
train_dataloader = fluid.io.DataLoader(
self.train_dataset, places=self.device, batch_size=64)
val_dataloader = fluid.io.DataLoader(
self.val_dataset, places=self.device, batch_size=64)
self.lenet_dygraph = LeNetDygraph()
low_level_lenet_dygraph_train(self.lenet_dygraph, train_dataloader)
self.acc1 = low_level_dynamic_evaluate(self.lenet_dygraph,
val_dataloader)
sp_num = 1280
cls.train_dataset = MnistDataset(mode='train', sample_num=sp_num)
cls.val_dataset = MnistDataset(mode='test', sample_num=sp_num)
cls.test_dataset = MnistDataset(
mode='test', return_label=False, sample_num=sp_num)
def evaluate(self, dynamic):
fluid.enable_dygraph(self.device) if dynamic else None
cls.train_loader = fluid.io.DataLoader(
cls.train_dataset, places=cls.device, batch_size=64)
cls.val_loader = fluid.io.DataLoader(
cls.val_dataset, places=cls.device, batch_size=64)
cls.test_loader = fluid.io.DataLoader(
cls.test_dataset, places=cls.device, batch_size=64)
inputs = [Input([-1, 1, 28, 28], 'float32', name='image')]
labels = [Input([None, 1], 'int64', name='label')]
if fluid.in_dygraph_mode():
feed_list = None
else:
feed_list = [x.forward() for x in inputs + labels]
self.train_dataloader = fluid.io.DataLoader(
self.train_dataset,
places=self.device,
batch_size=64,
feed_list=feed_list)
self.val_dataloader = fluid.io.DataLoader(
self.val_dataset,
places=self.device,
batch_size=64,
feed_list=feed_list)
self.test_dataloader = fluid.io.DataLoader(
self.test_dataset,
places=self.device,
batch_size=64,
feed_list=feed_list)
seed = 333
fluid.default_startup_program().random_seed = seed
fluid.default_main_program().random_seed = seed
model = LeNet()
model.load_dict(self.lenet_dygraph.state_dict())
model.prepare(metrics=Accuracy(), inputs=inputs, labels=labels)
dy_lenet = LeNetDygraph()
cls.init_param = dy_lenet.state_dict()
dynamic_train(dy_lenet, cls.train_loader)
result = model.evaluate(self.val_dataloader)
cls.trained_param = dy_lenet.state_dict()
np.testing.assert_allclose(result['acc'], self.acc1)
cls.acc1 = dynamic_evaluate(dy_lenet, cls.val_loader)
cls.inputs = [Input([-1, 1, 28, 28], 'float32', name='image')]
cls.labels = [Input([None, 1], 'int64', name='label')]
fluid.disable_dygraph()
def predict(self, dynamic):
fluid.enable_dygraph(self.device) if dynamic else None
def test_fit_dygraph(self):
self.fit(True)
inputs = [Input([-1, 1, 28, 28], 'float32', name='image')]
labels = [Input([None, 1], 'int64', name='label')]
if fluid.in_dygraph_mode():
feed_list = None
else:
feed_list = [x.forward() for x in inputs + labels]
self.train_dataloader = fluid.io.DataLoader(
self.train_dataset,
places=self.device,
batch_size=64,
feed_list=feed_list)
self.val_dataloader = fluid.io.DataLoader(
self.val_dataset,
places=self.device,
batch_size=64,
feed_list=feed_list)
self.test_dataloader = fluid.io.DataLoader(
self.test_dataset,
places=self.device,
batch_size=64,
feed_list=feed_list)
def test_fit_static(self):
self.fit(False)
model = LeNet()
model.load_dict(self.lenet_dygraph.state_dict())
model.prepare(metrics=Accuracy(), inputs=inputs, labels=labels)
def not_test_evaluate_dygraph(self):
self.evaluate(True)
output = model.predict(self.test_dataloader, stack_outputs=True)
def not_test_evaluate_static(self):
self.evaluate(False)
np.testing.assert_equal(output[0].shape[0], len(self.test_dataset))
def not_test_predict_dygraph(self):
self.predict(True)
acc = get_predict_accuracy(output[0], self.val_dataset.labels)
def not_test_predict_static(self):
self.predict(False)
np.testing.assert_allclose(acc, self.acc1)
def fit(self, dynamic):
fluid.enable_dygraph(self.device) if dynamic else None
def test_evaluate_dygraph(self):
self.evaluate(True)
seed = 333
fluid.default_startup_program().random_seed = seed
fluid.default_main_program().random_seed = seed
def test_evaluate_static(self):
self.evaluate(False)
model = LeNet()
optim_new = fluid.optimizer.Adam(
learning_rate=0.001, parameter_list=model.parameters())
model.prepare(
optim_new,
loss_function=CrossEntropy(average=False),
metrics=Accuracy(),
inputs=self.inputs,
labels=self.labels)
model.fit(self.train_dataset, batch_size=64, shuffle=False)
result = model.evaluate(self.val_dataset, batch_size=64)
np.testing.assert_allclose(result['acc'], self.acc1)
fluid.disable_dygraph() if dynamic else None
def test_predict_dygraph(self):
self.predict(True)
def evaluate(self, dynamic):
fluid.enable_dygraph(self.device) if dynamic else None
model = LeNet()
model.prepare(
metrics=Accuracy(), inputs=self.inputs, labels=self.labels)
model.load_dict(self.trained_param)
result = model.evaluate(self.val_dataset, batch_size=64)
np.testing.assert_allclose(result['acc'], self.acc1)
fluid.disable_dygraph() if dynamic else None
def test_predict_static(self):
self.predict(False)
def predict(self, dynamic):
fluid.enable_dygraph(self.device) if dynamic else None
model = LeNet()
model.prepare(inputs=self.inputs)
model.load_dict(self.trained_param)
output = model.predict(
self.test_dataset, batch_size=64, stack_outputs=True)
np.testing.assert_equal(output[0].shape[0], len(self.test_dataset))
acc = compute_acc(output[0], self.val_dataset.labels)
np.testing.assert_allclose(acc, self.acc1)
fluid.disable_dygraph() if dynamic else None
class MyModel(Model):
def __init__(self):
super(MyModel, self).__init__()
self._fc = Linear(20, 10, act='softmax')
def forward(self, x):
y = self._fc(x)
return y
class TestModelFunction(unittest.TestCase):
def set_seed(self, seed=1024):
fluid.default_startup_program().random_seed = seed
fluid.default_main_program().random_seed = seed
def test_train_batch(self, dynamic=True):
dim = 20
data = np.random.random(size=(4, dim)).astype(np.float32)
label = np.random.randint(0, 10, size=(4, 1)).astype(np.int64)
def get_expect():
fluid.enable_dygraph(fluid.CPUPlace())
self.set_seed()
m = MyModel()
optim = fluid.optimizer.SGD(learning_rate=0.001,
parameter_list=m.parameters())
m.train()
output = m(to_variable(data))
l = to_variable(label)
loss = fluid.layers.cross_entropy(output, l)
avg_loss = fluid.layers.reduce_sum(loss)
avg_loss.backward()
optim.minimize(avg_loss)
m.clear_gradients()
fluid.disable_dygraph()
return avg_loss.numpy()
ref = get_expect()
for dynamic in [True, False]:
device = set_device('cpu')
fluid.enable_dygraph(device) if dynamic else None
self.set_seed()
model = MyModel()
optim2 = fluid.optimizer.SGD(learning_rate=0.001,
parameter_list=model.parameters())
inputs = [Input([None, dim], 'float32', name='x')]
labels = [Input([None, 1], 'int64', name='label')]
model.prepare(
optim2,
loss_function=CrossEntropy(average=False),
inputs=inputs,
labels=labels,
device=device)
loss, = model.train_batch([data], [label])
print(loss, ref)
np.testing.assert_allclose(loss.flatten(), ref.flatten())
fluid.disable_dygraph() if dynamic else None
def not_test_test_batch(self, dynamic=True):
dim = 20
data = np.random.random(size=(4, dim)).astype(np.float32)
def get_expect():
fluid.enable_dygraph(fluid.CPUPlace())
self.set_seed()
m = MyModel()
m.eval()
output = m(to_variable(data))
fluid.disable_dygraph()
return output.numpy()
ref = get_expect()
for dynamic in [True, False]:
self.set_seed()
device = set_device('cpu')
fluid.enable_dygraph(device) if dynamic else None
model = MyModel()
inputs = [Input([None, dim], 'float32', name='x')]
model.prepare(inputs=inputs, device=device)
out, = model.test_batch([data])
np.testing.assert_allclose(out, ref)
fluid.disable_dygraph() if dynamic else None
def not_test_save_load(self):
path = tempfile.mkdtemp()
for dynamic in [True, False]:
device = set_device('cpu')
fluid.enable_dygraph(device) if dynamic else None
model = MyModel()
model.save(path + '/test')
model.load(path + '/test')
shutil.rmtree(path)
fluid.disable_dygraph() if dynamic else None
def not_test_parameters(self):
for dynamic in [True, False]:
device = set_device('cpu')
fluid.enable_dygraph(device) if dynamic else None
model = MyModel()
params = model.parameters()
self.assertTrue(params[0].shape == [20, 10])
fluid.disable_dygraph() if dynamic else None
if __name__ == '__main__':
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册