提交 5ac55a4d 编写于 作者: L LielinJiang

add unit test

上级 254fbcc7
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import division
from __future__ import print_function
import unittest
import os
import numpy as np
import contextlib
import paddle
from paddle import fluid
from paddle.fluid.dygraph.nn import Conv2D, Pool2D, Linear
from paddle.io import BatchSampler, DataLoader
from hapi.model import Model, Input, Loss, set_device
from hapi.loss import CrossEntropy
from hapi.metrics import Accuracy
from hapi.callbacks import ProgBarLogger
from hapi.datasets import MNIST as MnistDataset
class SimpleImgConvPool(fluid.dygraph.Layer):
def __init__(self,
num_channels,
num_filters,
filter_size,
pool_size,
pool_stride,
pool_padding=0,
pool_type='max',
global_pooling=False,
conv_stride=1,
conv_padding=0,
conv_dilation=1,
conv_groups=None,
act=None,
use_cudnn=False,
param_attr=None,
bias_attr=None):
super(SimpleImgConvPool, self).__init__('SimpleConv')
self._conv2d = Conv2D(
num_channels=num_channels,
num_filters=num_filters,
filter_size=filter_size,
stride=conv_stride,
padding=conv_padding,
dilation=conv_dilation,
groups=conv_groups,
param_attr=None,
bias_attr=None,
use_cudnn=use_cudnn)
self._pool2d = Pool2D(
pool_size=pool_size,
pool_type=pool_type,
pool_stride=pool_stride,
pool_padding=pool_padding,
global_pooling=global_pooling,
use_cudnn=use_cudnn)
def forward(self, inputs):
x = self._conv2d(inputs)
x = self._pool2d(x)
return x
class MNIST(Model):
def __init__(self):
super(MNIST, self).__init__()
self._simple_img_conv_pool_1 = SimpleImgConvPool(
1, 20, 5, 2, 2, act="relu")
self._simple_img_conv_pool_2 = SimpleImgConvPool(
20, 50, 5, 2, 2, act="relu")
pool_2_shape = 50 * 4 * 4
SIZE = 10
scale = (2.0 / (pool_2_shape**2 * SIZE))**0.5
self._fc = Linear(
800,
10,
param_attr=fluid.param_attr.ParamAttr(
initializer=fluid.initializer.NormalInitializer(
loc=0.0, scale=scale)),
act="softmax")
def forward(self, inputs):
inputs = fluid.layers.reshape(inputs, [-1, 1, 28, 28])
x = self._simple_img_conv_pool_1(inputs)
x = self._simple_img_conv_pool_2(x)
x = fluid.layers.flatten(x, axis=1)
x = self._fc(x)
return x
class TestMnistDataset(MnistDataset):
def __init__(self):
super(TestMnistDataset, self).__init__(mode='test')
def __getitem__(self, idx):
return self.images[idx],
def __len__(self):
return len(self.images)
def get_predict_accuracy(pred, gt):
pred = np.argmax(pred, -1)
gt = np.array(gt)
correct = pred[:, np.newaxis] == gt
return np.sum(correct) / correct.shape[0]
class TestModel(unittest.TestCase):
def fit(self, dynamic):
device = set_device('gpu')
fluid.enable_dygraph(device) if dynamic else None
im_shape = (-1, 784)
batch_size = 128
inputs = [Input(im_shape, 'float32', name='image')]
labels = [Input([None, 1], 'int64', name='label')]
train_dataset = MnistDataset(mode='train')
val_dataset = MnistDataset(mode='test')
test_dataset = TestMnistDataset()
model = MNIST()
optim = fluid.optimizer.Momentum(
learning_rate=0.01, momentum=.9, parameter_list=model.parameters())
loss = CrossEntropy()
model.prepare(optim, loss, Accuracy(), inputs, labels, device=device)
cbk = ProgBarLogger(50)
model.fit(train_dataset,
val_dataset,
epochs=2,
batch_size=batch_size,
callbacks=cbk)
eval_result = model.evaluate(val_dataset, batch_size=batch_size)
output = model.predict(
test_dataset, batch_size=batch_size, stack_outputs=True)
np.testing.assert_equal(output[0].shape[0], len(test_dataset))
acc = get_predict_accuracy(output[0], val_dataset.labels)
np.testing.assert_allclose(acc, eval_result['acc'])
def test_multiple_gpus_static(self):
self.fit(False)
def test_multiple_gpus_dygraph(self):
self.fit(True)
if __name__ == '__main__':
unittest.main()
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import os
import numpy as np
import tempfile
import shutil
import cv2
from hapi.datasets import *
class TestFolderDatasets(unittest.TestCase):
def makedata(self):
self.data_dir = tempfile.mkdtemp()
for i in range(2):
sub_dir = os.path.join(self.data_dir, 'class_' + str(i))
if not os.path.exists(sub_dir):
os.makedirs(sub_dir)
for j in range(2):
fake_img = (np.random.random(
(32, 32, 3)) * 255).astype('uint8')
cv2.imwrite(os.path.join(sub_dir, str(j) + '.jpg'), fake_img)
def test_dataset(self):
self.makedata()
dataset_folder = DatasetFolder(self.data_dir)
for _ in dataset_folder:
pass
assert len(dataset_folder) == 4
assert len(dataset_folder.classes) == 2
shutil.rmtree(self.data_dir)
class TestMNISTTest(unittest.TestCase):
def test_main(self):
mnist = MNIST(mode='test')
self.assertTrue(len(mnist) == 10000)
for i in range(len(mnist)):
image, label = mnist[i]
self.assertTrue(image.shape[0] == 784)
self.assertTrue(label.shape[0] == 1)
self.assertTrue(0 <= int(label) <= 9)
class TestMNISTTrain(unittest.TestCase):
def test_main(self):
mnist = MNIST(mode='train')
self.assertTrue(len(mnist) == 60000)
for i in range(len(mnist)):
image, label = mnist[i]
self.assertTrue(image.shape[0] == 784)
self.assertTrue(label.shape[0] == 1)
self.assertTrue(0 <= int(label) <= 9)
class TestFlowersTrain(unittest.TestCase):
def test_main(self):
flowers = Flowers(mode='train')
self.assertTrue(len(flowers) == 6149)
# traversal whole dataset may cost a
# long time, randomly check 1 sample
idx = np.random.randint(0, 6149)
image, label = flowers[idx]
self.assertTrue(len(image.shape) == 3)
self.assertTrue(image.shape[2] == 3)
self.assertTrue(label.shape[0] == 1)
class TestFlowersValid(unittest.TestCase):
def test_main(self):
flowers = Flowers(mode='valid')
self.assertTrue(len(flowers) == 1020)
# traversal whole dataset may cost a
# long time, randomly check 1 sample
idx = np.random.randint(0, 1020)
image, label = flowers[idx]
self.assertTrue(len(image.shape) == 3)
self.assertTrue(image.shape[2] == 3)
self.assertTrue(label.shape[0] == 1)
class TestFlowersTest(unittest.TestCase):
def test_main(self):
flowers = Flowers(mode='test')
self.assertTrue(len(flowers) == 1020)
# traversal whole dataset may cost a
# long time, randomly check 1 sample
idx = np.random.randint(0, 1020)
image, label = flowers[idx]
self.assertTrue(len(image.shape) == 3)
self.assertTrue(image.shape[2] == 3)
self.assertTrue(label.shape[0] == 1)
if __name__ == '__main__':
unittest.main()
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import unittest
import os
import time
import six
import copy
from argparse import ArgumentParser, REMAINDER
import paddle
import paddle.fluid as fluid
from paddle.distributed.utils import *
import paddle.distributed.cloud_utils as cloud_utils
def get_cluster_from_args(selected_gpus):
cluster_node_ips = '127.0.0.1'
node_ip = '127.0.0.1'
use_paddlecloud = False
started_port = None
node_ips = [x.strip() for x in cluster_node_ips.split(',')]
node_rank = node_ips.index(node_ip)
logger.debug("parsed from args:node_ips:{} node_ip:{} node_rank:{}".format(
node_ips, node_ip, node_rank))
free_ports = None
if not use_paddlecloud and len(node_ips) <= 1 and started_port is None:
free_ports = find_free_ports(len(selected_gpus))
if free_ports is not None:
free_ports = list(free_ports)
else:
started_port = 6070
free_ports = [
x for x in range(started_port, started_port + len(selected_gpus))
]
return get_cluster(node_ips, node_ip, free_ports, selected_gpus)
def get_gpus(selected_gpus):
if selected_gpus is None:
gpus_num = fluid.core.get_cuda_device_count()
selected_gpus = [str(x) for x in range(0, gpus_num)]
else:
cuda_visible_devices = os.getenv("CUDA_VISIBLE_DEVICES")
if cuda_visible_devices is None or cuda_visible_devices == "":
selected_gpus = [x.strip() for x in selected_gpus.split(',')]
else:
cuda_visible_devices_list = cuda_visible_devices.split(',')
for x in selected_gpus.split(','):
assert x in cuda_visible_devices_list, "Can't find "\
"your selected_gpus %s in CUDA_VISIBLE_DEVICES[%s]."\
% (x, cuda_visible_devices)
selected_gpus = [
cuda_visible_devices_list.index(x.strip())
for x in selected_gpus.split(',')
]
return selected_gpus
class TestMultipleGpus(unittest.TestCase):
def test_mnist_2gpu(self):
if fluid.core.get_cuda_device_count() == 0:
return
selected_gpus = get_gpus('0,1')
cluster = None
pod = None
cluster, pod = get_cluster_from_args(selected_gpus)
procs = start_local_trainers(
cluster,
pod,
training_script='dist_mnist.py',
training_script_args=[])
while True:
alive = watch_local_trainers(procs, cluster.trainers_nranks())
if not alive:
logger.info("Local procs complete, POD info:{}".format(pod))
break
time.sleep(3)
if __name__ == "__main__":
unittest.main()
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import division
from __future__ import print_function
import unittest
import os
import numpy as np
import shutil
import tempfile
from hapi.logger import setup_logger
class TestSetupLogger(unittest.TestCase):
def setUp(self):
self.save_dir = tempfile.mkdtemp()
self.save_file = os.path.join(self.save_dir, 'logger.txt')
def tearDown(self):
shutil.rmtree(self.save_dir)
def logger(self, output=None):
setup_logger(output=output)
def test_logger_no_output(self):
self.logger()
def test_logger_dir(self):
self.logger(self.save_dir)
def test_logger_file(self):
self.logger(self.save_file)
if __name__ == '__main__':
unittest.main()
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import division
from __future__ import print_function
import unittest
import os
import six
import numpy as np
import shutil
import copy
import paddle
from paddle import fluid
from hapi.model import Model, Input
from hapi.vision.models import resnet18
from hapi.loss import CrossEntropy, SoftmaxWithCrossEntropy
def stable_softmax(x):
"""Compute the softmax of vector x in a numerically stable way."""
# clip to shiftx, otherwise, when calc loss with
# log(exp(shiftx)), may get log(0)=INF
shiftx = (x - np.max(x)).clip(-64.)
exps = np.exp(shiftx)
return exps / np.sum(exps)
def randomize_probability(batch_size, class_num, dtype='float32'):
prob = np.random.uniform(
0.1, 1.0, size=(batch_size, class_num)).astype(dtype)
prob_sum = prob.sum(axis=1)
for i in six.moves.xrange(len(prob)):
prob[i] /= prob_sum[i]
return prob
def numpy_ce(x, label):
return np.asmatrix(
[[-np.log(x[i][label[i][0]])] for i in range(x.shape[0])],
dtype="float32").mean()
class TestLoss(unittest.TestCase):
def test_cross_entropy(self):
class_num = 100
batch_size = 128
inputs = [randomize_probability(128, class_num) for _ in range(2)]
labels = [
np.random.randint(
0, class_num, (batch_size, 1), dtype="int64") for _ in range(2)
]
gt_out = [numpy_ce(inputs[i], labels[i]) for i in range(2)]
fluid.enable_dygraph()
cross_entropy = CrossEntropy()
out = cross_entropy(
[fluid.dygraph.to_variable(x) for x in inputs],
[fluid.dygraph.to_variable(label) for label in labels])
out = [o.numpy() for o in out]
for o, g in zip(out, gt_out):
np.testing.assert_allclose(o, g, atol=1e-5)
def test_soft_cross_entronpy(self):
class_num = 100
batch_size = 128
inputs = [randomize_probability(128, class_num) for _ in range(2)]
labels = [
np.random.randint(
0, class_num, (batch_size, 1), dtype="int64") for _ in range(2)
]
fluid.enable_dygraph()
softmax_cross_entropy = SoftmaxWithCrossEntropy()
softmax_cross_entropy(
[fluid.dygraph.to_variable(x) for x in inputs],
[fluid.dygraph.to_variable(label) for label in labels])
softmax_cross_entropy = SoftmaxWithCrossEntropy(average=False)
inputs = [randomize_probability(128, class_num)]
labels = [
np.random.randint(
0, class_num, (batch_size, 1), dtype="int64")
]
softmax_cross_entropy([fluid.dygraph.to_variable(x) for x in inputs],
fluid.dygraph.to_variable(labels[0]))
if __name__ == '__main__':
unittest.main()
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import division
from __future__ import print_function
import unittest
import os
import numpy as np
import shutil
import tempfile
import paddle
from paddle import fluid
from hapi.model import Model, Input
from hapi.vision.models import resnet18
class TestSaveInferenceModel(unittest.TestCase):
def tearDown(self):
shutil.rmtree(self.save_dir)
def export_deploy_model(self):
model = resnet18()
inputs = [Input([None, 3, 224, 224], 'float32', name='image')]
model.prepare(inputs=inputs)
self.save_dir = tempfile.mkdtemp()
if not os.path.exists(self.save_dir):
os.makedirs(self.save_dir)
# tensor_img = np.array(np.random.random((1, 3, 224, 224)), dtype=np.float32)
# ori_results = model.test_batch(tensor_img)
model.save_inference_model(self.save_dir)
place = fluid.CPUPlace() if not fluid.is_compiled_with_cuda(
) else fluid.CUDAPlace(0)
exe = fluid.Executor(place)
[inference_program, feed_target_names, fetch_targets] = (
fluid.io.load_inference_model(
dirname=self.save_dir, executor=exe))
tensor_img = np.array(
np.random.random((1, 3, 224, 224)), dtype=np.float32)
ori_results = model.test_batch(tensor_img)
results = exe.run(inference_program,
feed={feed_target_names[0]: tensor_img},
fetch_list=fetch_targets)
np.testing.assert_allclose(results, ori_results)
def test_save_inference_model(self):
self.export_deploy_model()
if __name__ == '__main__':
unittest.main()
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# when test, you should add hapi root path to the PYTHONPATH,
# export PYTHONPATH=PATH_TO_HAPI:$PYTHONPATH
import unittest
import os
import tempfile
import cv2
import shutil
import numpy as np
from hapi.datasets import DatasetFolder
import hapi.vision.transforms as transforms
class TestTransforms(unittest.TestCase):
def setUp(self):
self.data_dir = tempfile.mkdtemp()
for i in range(2):
sub_dir = os.path.join(self.data_dir, 'class_' + str(i))
if not os.path.exists(sub_dir):
os.makedirs(sub_dir)
for j in range(2):
if j == 0:
fake_img = (np.random.random(
(280, 350, 3)) * 255).astype('uint8')
else:
fake_img = (np.random.random(
(400, 300, 3)) * 255).astype('uint8')
cv2.imwrite(os.path.join(sub_dir, str(j) + '.jpg'), fake_img)
def tearDown(self):
shutil.rmtree(self.data_dir)
def do_transform(self, trans):
dataset_folder = DatasetFolder(self.data_dir, transform=trans)
for _ in dataset_folder:
pass
def test_trans_all(self):
normalize = transforms.Normalize(
mean=[123.675, 116.28, 103.53], std=[58.395, 57.120, 57.375])
trans = transforms.Compose([
transforms.RandomResizedCrop(224), transforms.GaussianNoise(),
transforms.ColorJitter(
brightness=0.4, contrast=0.4, saturation=0.4,
hue=0.4), transforms.RandomHorizontalFlip(),
transforms.Permute(mode='CHW'), normalize
])
self.do_transform(trans)
def test_trans_resize(self):
trans = transforms.Compose([
transforms.Resize(300, [0, 1]),
transforms.RandomResizedCrop((280, 280)),
transforms.Resize(280, [0, 1]),
transforms.Resize((256, 200)),
transforms.Resize((180, 160)),
transforms.CenterCrop(128),
transforms.CenterCrop((128, 128)),
])
self.do_transform(trans)
def test_trans_centerCrop(self):
trans = transforms.Compose([
transforms.CenterCropResize(224),
transforms.CenterCropResize(128, 160),
])
self.do_transform(trans)
def test_flip(self):
trans = transforms.Compose([
transforms.RandomHorizontalFlip(1.0),
transforms.RandomHorizontalFlip(0.0),
transforms.RandomVerticalFlip(0.0),
transforms.RandomVerticalFlip(1.0),
])
self.do_transform(trans)
def test_color_jitter(self):
trans = transforms.BatchCompose([
transforms.BrightnessTransform(0.0),
transforms.HueTransform(0.0),
transforms.SaturationTransform(0.0),
transforms.ContrastTransform(0.0),
])
self.do_transform(trans)
def test_exception(self):
trans = transforms.Compose([transforms.Resize(-1)])
trans_batch = transforms.BatchCompose([transforms.Resize(-1)])
with self.assertRaises(Exception):
self.do_transform(trans)
with self.assertRaises(Exception):
self.do_transform(trans_batch)
with self.assertRaises(ValueError):
transforms.ContrastTransform(-1.0)
with self.assertRaises(ValueError):
transforms.SaturationTransform(-1.0),
with self.assertRaises(ValueError):
transforms.HueTransform(-1.0)
with self.assertRaises(ValueError):
transforms.BrightnessTransform(-1.0)
def test_info(self):
str(transforms.Compose([transforms.Resize((224, 224))]))
str(transforms.BatchCompose([transforms.Resize((224, 224))]))
if __name__ == '__main__':
unittest.main()
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import numpy as np
import hapi.vision.models as models
from hapi.model import Input
class TestVisonModels(unittest.TestCase):
def models_infer(self, arch, pretrained=False, batch_norm=False):
x = np.array(np.random.random((2, 3, 224, 224)), dtype=np.float32)
if batch_norm:
model = models.__dict__[arch](pretrained=pretrained,
batch_norm=True)
else:
model = models.__dict__[arch](pretrained=pretrained)
inputs = [Input([None, 3, 224, 224], 'float32', name='image')]
model.prepare(inputs=inputs)
model.test_batch(x)
def test_mobilenetv2_pretrained(self):
self.models_infer('mobilenet_v2', pretrained=True)
def test_mobilenetv1(self):
self.models_infer('mobilenet_v1')
def test_vgg11(self):
self.models_infer('vgg11')
def test_vgg13(self):
self.models_infer('vgg13')
def test_vgg16(self):
self.models_infer('vgg16')
def test_vgg16_bn(self):
self.models_infer('vgg16', batch_norm=True)
def test_vgg19(self):
self.models_infer('vgg19')
def test_resnet18(self):
self.models_infer('resnet18')
def test_resnet34(self):
self.models_infer('resnet34')
def test_resnet50(self):
self.models_infer('resnet50')
def test_resbet101(self):
self.models_infer('resnet101')
def test_resbet152(self):
self.models_infer('resnet152')
def test_darknet53(self):
self.models_infer('darknet53')
def test_lenet(self):
lenet = models.__dict__['LeNet']()
inputs = [Input([None, 1, 28, 28], 'float32', name='x')]
lenet.prepare(inputs=inputs)
x = np.array(np.random.random((2, 1, 28, 28)), dtype=np.float32)
lenet.test_batch(x)
if __name__ == '__main__':
unittest.main()
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册