提交 c1f19c95 编写于 作者: L Lucas Meneghel Rodrigues

Merge pull request #963 from apahim/tests_move

avocado.tests Remove tests moved to avocado-misc-tests repo
#!/usr/bin/env python
import os
import shutil
from avocado import Test
from avocado import main
from avocado.utils import process
class Aiostress(Test):
"""
aio-stress is a basic utility for testing the Linux kernel AIO api
"""
def setUp(self):
"""
Build 'aiostress'.
Source:
https://oss.oracle.com/~mason/aio-stress/aio-stress.c
"""
aiostress_c = self.params.get('aiostress_c', default='aio-stress.c')
c_path = self.get_data_path(aiostress_c)
shutil.copy(c_path, self.srcdir)
os.chdir(self.srcdir)
# This requires libaio.h in order to build
process.run('gcc -Wall -laio -lpthread -o aio-stress %s' % aiostress_c)
def test(self):
"""
Run aiostress
"""
os.chdir(self.srcdir)
# aio-stress needs a filename (foo) to run tests on.
cmd = ('./aio-stress foo')
process.run(cmd)
if __name__ == "__main__":
main()
此差异已折叠。
#!/usr/bin/env python
import os
from avocado import Test
from avocado import main
from avocado.utils import archive
from avocado.utils import build
from avocado.utils import process
class Bonnie(Test):
"""
Bonnie++ is a benchmark suite that is aimed at performing a number
of simple tests of hard drive and file system performance.
"""
def setUp(self):
"""
Build bonnie++
Source:
http://www.coker.com.au/bonnie++/experimental/bonnie++-1.96.tgz
"""
bonnie_tarball = self.params.get('bonnie_tarball',
default='bonnie++-1.96.tgz')
tarball_path = self.get_data_path(bonnie_tarball)
archive.extract(tarball_path, self.srcdir)
bonnie_version = bonnie_tarball.split('.tgz')[0]
self.srcdir = os.path.join(self.srcdir, bonnie_version)
os.chdir(self.srcdir)
process.run('./configure')
build.make(self.srcdir)
def test(self):
"""
Run 'bonnie' with its arguments
"""
scratch_dir = self.params.get('scratch-dir', default=self.srcdir)
uid_to_use = self.params.get('uid-to-use', default=None)
number_to_stat = self.params.get('number-to-stat', default=2048)
args = []
args.append('-d %s' % scratch_dir)
args.append('-n %s' % number_to_stat)
if uid_to_use is not None:
args.append('-u %s' % uid_to_use)
cmd = ('%s/bonnie++ %s' % (self.srcdir, " ".join(args)))
process.run(cmd)
if __name__ == "__main__":
main()
# Bonnie options:
# bonnie++ [-d scratch-dir] [-c concurrency] [-s size(MiB)[:chunk-size(b)]]
# [-n number-to-stat[:max-size[:min-size][:num-directories[:chunk-size]]]]
# [-m machine-name] [-r ram-size-in-MiB]
# [-x number-of-tests] [-u uid-to-use:gid-to-use] [-g gid-to-use]
# [-q] [-f] [-b] [-p processes | -y] [-z seed | -Z random-file]
# [-D]
# Valid options in avocado test are bellow:
setup:
scratch-dir: null
uid-to-use: null
number-to-stat: null
#!/usr/bin/env python
import os
from avocado import Test
from avocado import main
from avocado.utils import archive
from avocado.utils import process
class Compilebench(Test):
"""
Compilebench tries to age a filesystem by simulating some of the
disk IO common in creating, compiling, patching, stating and
reading kernel trees.
"""
def setUp(self):
"""
Extract compilebench
Source:
https://oss.oracle.com/~mason/compilebench/compilebench-0.6.tar.bz2
"""
cb_tarball = self.params.get('cb_tarball',
default='compilebench-0.6.tar.bz2')
tarball_path = self.get_data_path(cb_tarball)
archive.extract(tarball_path, self.srcdir)
cb_version = cb_tarball.split('.tar.')[0]
self.srcdir = os.path.join(self.srcdir, cb_version)
def test(self):
"""
Run 'compilebench' with its arguments
"""
initial_dirs = self.params.get('INITIAL_DIRS', default=10)
runs = self.params.get('RUNS', default=30)
args = []
args.append('-D %s ' % self.srcdir)
args.append('-s %s ' % self.srcdir)
args.append('-i %d ' % initial_dirs)
args.append('-r %d ' % runs)
# Using python explicitly due to the compilebench current
# shebang set to python2.4
cmd = ('python %s/compilebench %s' % (self.srcdir, " ".join(args)))
process.run(cmd)
if __name__ == "__main__":
main()
# Usage: compilebench [options]
# version: 0.6
#
# Options:
# -h, --help show this help message and exit
# -b BUFFER_SIZE, --buffer-size=BUFFER_SIZE
# buffer size (bytes)
# -i INITIAL_DIRS, --initial-dirs=INITIAL_DIRS
# number of dirs initially created
# -r RUNS, --runs=RUNS number of rand op runs
# -D DIRECTORY, --directory=DIRECTORY
# working directory
# -s SOURCES, --sources=SOURCES
# data set source file directory
# -t TRACE, --trace=TRACE
# blktrace output file
# -d DEVICE, --device=DEVICE
# blktrace device
# -m, --makej simulate a make -j on the initial dirs and exit
# -n, --no-sync don't sync and drop caches between each iteration
# Valid options in avocado test are bellow:
setup:
trees: !mux
default:
INITIAL_DIRS: null
quick:
INITIAL_DIRS: 5
runs: !mux
default:
RUNS: null
minimal:
RUNS: 1
#!/usr/bin/env python
import os
from avocado import Test
from avocado import main
from avocado.utils import archive
from avocado.utils import build
from avocado.utils import process
class FioTest(Test):
"""
fio is an I/O tool meant to be used both for benchmark and
stress/hardware verification.
:see: http://freecode.com/projects/fio
:param fio_tarbal: name of the tarbal of fio suite located in deps path
:param fio_job: config defining set of executed tests located in deps path
"""
def setUp(self):
"""
Build 'fio'.
"""
fio_tarball = self.params.get('fio_tarball',
default='fio-2.1.10.tar.bz2')
tarball_path = self.get_data_path(fio_tarball)
archive.extract(tarball_path, self.srcdir)
fio_version = fio_tarball.split('.tar.')[0]
self.srcdir = os.path.join(self.srcdir, fio_version)
build.make(self.srcdir)
def test(self):
"""
Execute 'fio' with appropriate parameters.
"""
os.chdir(self.srcdir)
fio_job = self.params.get('fio_job', default='fio-mixed.job')
cmd = ('./fio %s' % self.get_data_path(fio_job))
process.system(cmd)
if __name__ == "__main__":
main()
; fio-mixed.job
[global]
name=fio-sync
;directory=tmpfiles
rw=randrw
rwmixread=67
rwmixwrite=33
bsrange=16K-256K
direct=0
end_fsync=1
verify=crc32
;ioscheduler=x
numjobs=4
[file1]
size=100M
ioengine=sync
mem=malloc
[file2]
stonewall
size=100M
ioengine=posixaio
mem=shm
iodepth=4
[file3]
stonewall
size=100M
ioengine=mmap
mem=mmap
direct=1
[file4]
stonewall
size=100M
ioengine=splice
mem=malloc
direct=1
#!/usr/bin/env python
import os
import multiprocessing
from avocado import Test
from avocado import main
from avocado.utils import archive
from avocado.utils import disk
from avocado.utils import build
from avocado.utils import memory
from avocado.utils import process
class Stress(Test):
"""
Calls stress, a simple program which aims to impose certain types of
computing stress on the target machine.
@author: Yi Yang (yang.y.yi@gmail.com)
"""
def setUp(self):
"""
Build 'stress'.
Source:
http://people.seas.harvard.edu/~apw/stress/stress-1.0.4.tar.gz
"""
stress_tarball = self.params.get('stress_tarball',
default='stress-1.0.4.tar.gz')
tarball_path = self.get_data_path(stress_tarball)
archive.extract(tarball_path, self.srcdir)
stress_version = stress_tarball.split('.tar.')[0]
self.srcdir = os.path.join(self.srcdir, stress_version)
os.chdir(self.srcdir)
process.run('./configure')
build.make(self.srcdir)
def test(self):
"""
Execute 'stress' with proper arguments.
"""
length = self.params.get('stress_lenght', default=60)
threads = self.params.get('threads', default=None)
memory_per_thread = self.params.get('memory_per_thread', default=None)
file_size_per_thread = self.params.get('file_size_per_thread',
default=None)
if threads is None:
# We will use 2 workers of each type for each CPU detected
threads = 2 * multiprocessing.cpu_count()
if memory_per_thread is None:
# Sometimes the default memory used by each memory worker (256 M)
# might make our machine go OOM and then funny things might start to
# happen. Let's avoid that.
mb = (memory.freememtotal() +
memory.read_from_meminfo('SwapFree') / 2)
memory_per_thread = (mb * 1024) / threads
if file_size_per_thread is None:
# Even though unlikely, it's good to prevent from allocating more
# disk than this machine actually has on its autotest directory
# (limit the amount of disk used to max of 90 % of free space)
free_disk = disk.freespace(self.srcdir)
file_size_per_thread = 1024 ** 2
if (0.9 * free_disk) < file_size_per_thread * threads:
file_size_per_thread = (0.9 * free_disk) / threads
# Number of CPU workers spinning on sqrt()
args = '--cpu %d ' % threads
# Number of IO workers spinning on sync()
args += '--io %d ' % threads
# Number of Memory workers spinning on malloc()/free()
args += '--vm %d ' % threads
# Amount of memory used per each worker
args += '--vm-bytes %d ' % memory_per_thread
# Number of HD workers spinning on write()/ulink()
args += '--hdd %d ' % threads
# Size of the files created by each worker in bytes
args += '--hdd-bytes %d ' % file_size_per_thread
# Time for which the stress test will run
args += '--timeout %d ' % length
# Verbose flag
args += '--verbose'
os.chdir(self.srcdir)
cmd = ('./src/stress %s' % args)
process.run(cmd)
if __name__ == "__main__":
main()
setup:
duration: !mux
default:
stress_lenght: 60
quick:
stress_lenght: 5
workers: !mux
default:
threads: null
minimal:
threads: 1
memory:
memory_per_thread: null
files:
file_size_per_thread: null
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册