提交 586cc564 编写于 作者: L Lucas Meneghel Rodrigues 提交者: Lucas Meneghel Rodrigues

Merge pull request #60 from avocado-framework/multiplex

Initial multiplex of configuration
...@@ -12,16 +12,8 @@ install: ...@@ -12,16 +12,8 @@ install:
- pip install -r requirements.txt - pip install -r requirements.txt
script: script:
- ./unittests/runtests.py -c .nose.cfg
- inspekt lint - inspekt lint
- inspekt style - inspekt style
- make -C docs html 2>&1 | grep -E '(ERROR|WARNING)' || test $? -eq 1 - ./selftests/run -v selftests/all/doc
- ./scripts/avocado run "sleeptest sleeptest" - ./selftests/run -v selftests/all/functional
- ./scripts/avocado run "sleeptest failtest sleeptest" || test $? -eq 1 - ./selftests/run selftests/all/unit -c selftests/.nose.cfg
- ./scripts/avocado run "bogustest" || test $? -ne 3
- export PYTHONPATH=$PYTHONPATH:.
- ./tests/sleeptest/sleeptest.py
- ./tests/skiptest/skiptest.py
- ./tests/failtest/failtest.py || test $? -eq 1
- ./tests/errortest/errortest.py || test $? -eq 1
- ./tests/warntest/warntest.py || test $? -eq 1
...@@ -196,6 +196,7 @@ def get_job_logs_dir(args=None): ...@@ -196,6 +196,7 @@ def get_job_logs_dir(args=None):
:param args: :class:`argparse.Namespace` instance with cmdline arguments :param args: :class:`argparse.Namespace` instance with cmdline arguments
(optional). (optional).
:rtype: basestring
""" """
start_time = time.strftime('%Y-%m-%d-%H.%M.%S') start_time = time.strftime('%Y-%m-%d-%H.%M.%S')
if args is not None: if args is not None:
......
...@@ -27,6 +27,7 @@ from avocado.core import output ...@@ -27,6 +27,7 @@ from avocado.core import output
from avocado.core import status from avocado.core import status
from avocado.core import exceptions from avocado.core import exceptions
from avocado.core import error_codes from avocado.core import error_codes
from avocado import multiplex_config
from avocado import test from avocado import test
from avocado import result from avocado import result
...@@ -52,15 +53,24 @@ class Job(object): ...@@ -52,15 +53,24 @@ class Job(object):
self.debuglog = os.path.join(self.debugdir, "debug.log") self.debuglog = os.path.join(self.debugdir, "debug.log")
if self.args is not None: if self.args is not None:
self.loglevel = args.log_level or logging.DEBUG self.loglevel = args.log_level or logging.DEBUG
self.multiplex_file = args.multiplex_file
else: else:
self.loglevel = logging.DEBUG self.loglevel = logging.DEBUG
self.multiplex_file = None
self.test_dir = data_dir.get_test_dir() self.test_dir = data_dir.get_test_dir()
self.test_index = 1 self.test_index = 1
self.status = "RUNNING" self.status = "RUNNING"
self.output_manager = output.OutputManager() self.output_manager = output.OutputManager()
def _load_test_instance(self, url): def _load_test_instance(self, params):
"""
Find the test url from the first component of the test shortname, and load the url.
:param params: Dictionary with test params.
"""
shortname = params.get('shortname')
url = shortname.split('.')[0]
path_attempt = os.path.abspath(url) path_attempt = os.path.abspath(url)
if os.path.exists(path_attempt): if os.path.exists(path_attempt):
test_class = test.DropinTest test_class = test.DropinTest
...@@ -79,31 +89,33 @@ class Job(object): ...@@ -79,31 +89,33 @@ class Job(object):
finally: finally:
test_instance = test_class(name=url, test_instance = test_class(name=url,
base_logdir=self.debugdir, base_logdir=self.debugdir,
params=params,
job=self) job=self)
return test_instance return test_instance
def run_test(self, url): def run_test(self, params):
""" """
Run a single test. Run a single test.
:param url: test URL. :param params: Dictionary with test params.
:type params: dict
:return: an instance of :class:`avocado.test.Test`. :return: an instance of :class:`avocado.test.Test`.
""" """
test_instance = self._load_test_instance(url) test_instance = self._load_test_instance(params)
test_instance.run_avocado() test_instance.run_avocado()
return test_instance return test_instance
def test_runner(self, urls, test_result): def test_runner(self, params_list, test_result):
""" """
Run one or more tests and report with test result. Run one or more tests and report with test result.
:param urls: a list of tests URLs. :param params_list: a list of param dicts.
:param test_result: An instance of :class:`avocado.result.TestResult`. :param test_result: An instance of :class:`avocado.result.TestResult`.
:return: a list of test failures. :return: a list of test failures.
""" """
failures = [] failures = []
for url in urls: for params in params_list:
test_instance = self.run_test(url) test_instance = self.run_test(params)
test_result.check_test(test_instance) test_result.check_test(test_instance)
if not status.mapping[test_instance.status]: if not status.mapping[test_instance.status]:
failures.append(test_instance.name) failures.append(test_instance.name)
...@@ -128,11 +140,12 @@ class Job(object): ...@@ -128,11 +140,12 @@ class Job(object):
self.args) self.args)
return test_result return test_result
def _run(self, urls=None): def _run(self, urls=None, multiplex_file=None):
""" """
Unhandled job method. Runs a list of test URLs to its completion. Unhandled job method. Runs a list of test URLs to its completion.
:param urls: String with tests to run. :param urls: String with tests to run.
:param multiplex_file: File that multiplexes a given test url.
:return: Integer with overall job status. See :return: Integer with overall job status. See
:mod:`avocado.core.error_codes` for more information. :mod:`avocado.core.error_codes` for more information.
...@@ -140,14 +153,45 @@ class Job(object): ...@@ -140,14 +153,45 @@ class Job(object):
:class:`avocado.core.exceptions.JobBaseException` errors, :class:`avocado.core.exceptions.JobBaseException` errors,
that configure a job failure. that configure a job failure.
""" """
params_list = []
if urls is None: if urls is None:
urls = self.args.url.split() if self.args and self.args.url is not None:
urls = self.args.url.split()
else:
if isinstance(urls, str):
urls = urls.split()
if urls is not None:
for url in urls:
params_list.append({'shortname': url})
test_runner = self._make_test_runner() if multiplex_file is None:
test_result = self._make_test_result(urls) if self.args and self.args.multiplex_file is not None:
multiplex_file = os.path.abspath(self.args.multiplex_file)
else:
multiplex_file = os.path.abspath(multiplex_file)
if multiplex_file is not None:
params_list = []
if urls is not None:
for url in urls:
parser = multiplex_config.Parser(multiplex_file)
parser.only_filter(url)
dcts = [d for d in parser.get_dicts()]
if dcts:
for dct in dcts:
params_list.append(dct)
else:
params_list.append({'shortname': url})
else:
parser = multiplex_config.Parser(multiplex_file)
for dct in parser.get_dicts():
params_list.append(dct)
test_result = self._make_test_result(params_list)
test_result.start_tests() test_result.start_tests()
failures = test_runner(urls, test_result) failures = self.test_runner(params_list, test_result)
test_result.end_tests() test_result.end_tests()
# If it's all good so far, set job status to 'PASS' # If it's all good so far, set job status to 'PASS'
if self.status == 'RUNNING': if self.status == 'RUNNING':
...@@ -161,17 +205,26 @@ class Job(object): ...@@ -161,17 +205,26 @@ class Job(object):
else: else:
return error_codes.numeric_status['AVOCADO_TESTS_FAIL'] return error_codes.numeric_status['AVOCADO_TESTS_FAIL']
def run(self, urls=None): def run(self, urls=None, multiplex_file=None):
""" """
Handled main job method. Runs a list of test URLs to its completion. Handled main job method. Runs a list of test URLs to its completion.
Note that the behavior is as follows:
* If urls is provided alone, just make a simple list with no specific params (all tests use default params).
* If urls and multiplex_file are provided, multiplex provides params and variants to all tests it can.
* If multiplex_file is provided alone, just use the matrix produced by the file
The test runner figures out which tests need to be run on an empty urls list by assuming the first component
of the shortname is the test url.
:param urls: String with tests to run. :param urls: String with tests to run.
:param multiplex_file: File that multiplexes a given test url.
:return: Integer with overall job status. See :return: Integer with overall job status. See
:mod:`avocado.core.error_codes` for more information. :mod:`avocado.core.error_codes` for more information.
""" """
try: try:
return self._run(urls) return self._run(urls, multiplex_file)
except exceptions.JobBaseException, details: except exceptions.JobBaseException, details:
self.status = details.status self.status = details.status
fail_class = details.__class__.__name__ fail_class = details.__class__.__name__
......
...@@ -689,7 +689,7 @@ class AptBackend(DpkgBackend): ...@@ -689,7 +689,7 @@ class AptBackend(DpkgBackend):
""" """
repo_file = open(self.repo_file_path, 'r') repo_file = open(self.repo_file_path, 'r')
new_file_contents = [] new_file_contents = []
for line in repo_file.readlines: for line in repo_file.readlines():
if not line == repo: if not line == repo:
new_file_contents.append(line) new_file_contents.append(line)
repo_file.close() repo_file.close()
......
此差异已折叠。
...@@ -27,7 +27,8 @@ Builtins = [('avocado.plugins.runner', 'TestLister'), ...@@ -27,7 +27,8 @@ Builtins = [('avocado.plugins.runner', 'TestLister'),
('avocado.plugins.xunit', 'XUnit'), ('avocado.plugins.xunit', 'XUnit'),
('avocado.plugins.lister', 'PluginsList'), ('avocado.plugins.lister', 'PluginsList'),
('avocado.plugins.journal', 'Journal'), ('avocado.plugins.journal', 'Journal'),
('avocado.plugins.datadir', 'DataDirList')] ('avocado.plugins.datadir', 'DataDirList'),
('avocado.plugins.multiplexer', 'Multiplexer')]
def load_builtins(set_globals=True): def load_builtins(set_globals=True):
......
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
#
# See LICENSE for more details.
#
# Copyright: Red Hat Inc. 2013-2014
# Author: Lucas Meneghel Rodrigues <lmr@redhat.com>
import os
import sys
from avocado.plugins import plugin
from avocado.core import output
from avocado.core import error_codes
from avocado import multiplex_config
class Multiplexer(plugin.Plugin):
"""
Implements the avocado 'multiplex' functionality.
"""
name = 'plugins_list'
enabled = True
def configure(self, app_parser, cmd_parser):
myparser = cmd_parser.add_parser('multiplex',
help='Generate a list of dictionaries with params from a multiplex file')
myparser.add_argument('multiplex_file', type=str,
help='Path to a multiplex file ',
nargs='?', default=None)
myparser.add_argument('-c', '--contents', action='store_true',
help=('Keep temporary files generated by tests. '
'Default: %(defaults)'), default=False)
myparser.set_defaults(func=self.multiplex)
self.configured = True
def multiplex(self, args):
bcolors = output.colors
pipe = output.get_paginator()
multiplex_file = os.path.abspath(args.multiplex_file)
if not os.path.isfile(multiplex_file):
pipe.write(bcolors.fail_header_str('Invalid multiplex file %s' % multiplex_file))
sys.exit(error_codes.numeric_status['AVOCADO_JOB_FAIL'])
try:
parser = multiplex_config.Parser(filename=multiplex_file)
except multiplex_config.ParserError, details:
fail_class = details.__class__.__name__
pipe.write(bcolors.fail_header_str("Multiplex file '%s' has a syntax error\n" % multiplex_file))
pipe.write('%s: %s\n' % (fail_class, details))
pipe.write(bcolors.fail_header_str('Aborting...'))
sys.exit(error_codes.numeric_status['AVOCADO_JOB_FAIL'])
pipe.write(bcolors.header_str('Dictionaries generated:'))
pipe.write('\n')
for (index, dct) in enumerate(parser.get_dicts()):
pipe.write(' dict %s: %s\n' % (index+1, dct.get('shortname')))
if args.contents:
for key in sorted(dct.keys()):
pipe.write(' %s = %s\n' % (key, dct.get(key)))
sys.exit(error_codes.numeric_status['AVOCADO_ALL_OK'])
...@@ -82,7 +82,12 @@ class TestRunner(plugin.Plugin): ...@@ -82,7 +82,12 @@ class TestRunner(plugin.Plugin):
myparser.add_argument('url', type=str, myparser.add_argument('url', type=str,
help=('Test module names or paths to dropin tests ' help=('Test module names or paths to dropin tests '
'(space separated)'), '(space separated)'),
nargs='?', default='') nargs='?', default=None)
myparser.add_argument('-m', '--multiplex-file', type=str,
help=('Path to an avocado multiplex '
'(.mplex) file '),
nargs='?', default=None)
myparser.add_argument('--keep-tmp-files', action='store_true', myparser.add_argument('--keep-tmp-files', action='store_true',
help=('Keep temporary files generated by tests. ' help=('Keep temporary files generated by tests. '
......
...@@ -62,12 +62,40 @@ class ConfigFileNotFound(SettingsError): ...@@ -62,12 +62,40 @@ class ConfigFileNotFound(SettingsError):
self.path_list) self.path_list)
def convert_value_type(key, section, value, value_type): def convert_value_type(value, value_type):
""" """
Convert a string to another data type. Convert a string value to a given value type.
:param value: Value we want to convert.
:type value: str.
:param value_type: Type of the value we want to convert.
:type value_type: str or type.
:return: Converted value type.
:rtype: Dependent on value_type.
:raise: TypeError, in case it was not possible to convert values.
""" """
# strip off leading and trailing white space # strip off leading and trailing white space
sval = value.strip() try:
sval = value.strip()
except:
sval = value
if isinstance(value_type, str):
if value_type == 'str':
value_type = str
elif value_type == 'bool':
value_type = bool
elif value_type == 'int':
value_type = int
elif value_type == 'float':
value_type = float
elif value_type == 'list':
value_type = list
if value_type is None:
value_type = str
# if length of string is zero then return None # if length of string is zero then return None
if len(sval) == 0: if len(sval) == 0:
...@@ -94,13 +122,8 @@ def convert_value_type(key, section, value, value_type): ...@@ -94,13 +122,8 @@ def convert_value_type(key, section, value, value_type):
# Split the string using ',' and return a list # Split the string using ',' and return a list
return [val.strip() for val in sval.split(',')] return [val.strip() for val in sval.split(',')]
try: conv_val = value_type(sval)
conv_val = value_type(sval) return conv_val
return conv_val
except Exception:
msg = ("Could not convert %s value %r in section %s to type %s" %
(key, sval, section, value_type))
raise SettingsValueError(msg)
class Settings(object): class Settings(object):
...@@ -183,7 +206,12 @@ class Settings(object): ...@@ -183,7 +206,12 @@ class Settings(object):
if not val.strip() and not allow_blank: if not val.strip() and not allow_blank:
return self._handle_no_value(section, key, default) return self._handle_no_value(section, key, default)
return convert_value_type(key, section, val, key_type) try:
return convert_value_type(val, key_type)
except Exception, details:
raise SettingsValueError("Could not convert value %r to type %s "
"(settings key %s, section %s): %s" %
(val, key_type, key, section, details))
settings = Settings() settings = Settings()
...@@ -27,6 +27,7 @@ import unittest ...@@ -27,6 +27,7 @@ import unittest
from avocado.core import data_dir from avocado.core import data_dir
from avocado.core import exceptions from avocado.core import exceptions
from avocado.utils import process from avocado.utils import process
from avocado.utils.params import Params
from avocado import sysinfo from avocado import sysinfo
...@@ -38,9 +39,10 @@ class Test(unittest.TestCase): ...@@ -38,9 +39,10 @@ class Test(unittest.TestCase):
You'll inherit from this to write your own tests. Tipically you'll want You'll inherit from this to write your own tests. Tipically you'll want
to implement setup(), action() and cleanup() methods on your own tests. to implement setup(), action() and cleanup() methods on your own tests.
""" """
default_params = {}
def __init__(self, methodName='runTest', name=None, base_logdir=None, def __init__(self, methodName='runTest', name=None, params=None,
tag=None, job=None): base_logdir=None, tag=None, job=None):
""" """
Initializes the test. Initializes the test.
...@@ -63,7 +65,17 @@ class Test(unittest.TestCase): ...@@ -63,7 +65,17 @@ class Test(unittest.TestCase):
else: else:
self.name = self.__class__.__name__ self.name = self.__class__.__name__
self.tag = tag if params is None:
params = {}
self.params = Params(params)
shortname = self.params.get('shortname')
s_tag = None
if shortname:
split_shortname = shortname.split('.')
if len(split_shortname) > 1:
s_tag = ".".join(split_shortname[1:])
self.tag = tag or s_tag
self.job = job self.job = job
self.basedir = os.path.join(data_dir.get_test_dir(), self.name) self.basedir = os.path.join(data_dir.get_test_dir(), self.name)
self.depsdir = os.path.join(self.basedir, 'deps') self.depsdir = os.path.join(self.basedir, 'deps')
...@@ -84,6 +96,28 @@ class Test(unittest.TestCase): ...@@ -84,6 +96,28 @@ class Test(unittest.TestCase):
self.log = logging.getLogger("avocado.test") self.log = logging.getLogger("avocado.test")
self.log.info('START %s', self.tagged_name)
self.log.debug('')
self.log.debug('Test instance parameters:')
# Set the helper set_default to the params object
setattr(self.params, 'set_default', self._set_default)
# Apply what comes from the params dict
for key in sorted(self.params.keys()):
self.log.debug(' %s = %s', key, self.params.get(key))
setattr(self.params, key, self.params.get(key))
self.log.debug('')
# Apply what comes from the default_params dict
self.log.debug('Default parameters:')
for key in sorted(self.default_params.keys()):
self.log.debug(' %s = %s', key, self.default_params.get(key))
self.params.set_default(key, self.default_params[key])
self.log.debug('')
self.log.debug('Test instance params override defaults whenever available')
self.log.debug('')
self.debugdir = None self.debugdir = None
self.resultsdir = None self.resultsdir = None
self.status = None self.status = None
...@@ -101,6 +135,13 @@ class Test(unittest.TestCase): ...@@ -101,6 +135,13 @@ class Test(unittest.TestCase):
def __repr__(self): def __repr__(self):
return "Test(%r)" % self.tagged_name return "Test(%r)" % self.tagged_name
def _set_default(self, key, default):
try:
self.params[key]
except Exception:
self.params[key] = default
setattr(self.params, key, default)
def get_deps_path(self, basename): def get_deps_path(self, basename):
""" """
Find a test dependency path inside the test depsdir. Find a test dependency path inside the test depsdir.
...@@ -245,6 +286,7 @@ class Test(unittest.TestCase): ...@@ -245,6 +286,7 @@ class Test(unittest.TestCase):
end_time = time.time() end_time = time.time()
self.time_elapsed = end_time - start_time self.time_elapsed = end_time - start_time
self.report() self.report()
self.log.info("")
with open(self.logfile, 'r') as log_file_obj: with open(self.logfile, 'r') as log_file_obj:
self.text_output = log_file_obj.read() self.text_output = log_file_obj.read()
self.stop_logging() self.stop_logging()
...@@ -270,12 +312,12 @@ class DropinTest(Test): ...@@ -270,12 +312,12 @@ class DropinTest(Test):
Run an arbitrary command that returns either 0 (PASS) or !=0 (FAIL). Run an arbitrary command that returns either 0 (PASS) or !=0 (FAIL).
""" """
def __init__(self, path, base_logdir, tag=None, job=None): def __init__(self, path, params=None, base_logdir=None, tag=None, job=None):
basename = os.path.basename(path) basename = os.path.basename(path)
name = basename.split(".")[0] name = basename.split(".")[0]
self.path = os.path.abspath(path) self.path = os.path.abspath(path)
super(DropinTest, self).__init__(name=name, base_logdir=base_logdir, super(DropinTest, self).__init__(name=name, base_logdir=base_logdir,
tag=tag, job=job) params=params, tag=tag, job=job)
def _log_detailed_cmd_info(self, result): def _log_detailed_cmd_info(self, result):
""" """
...@@ -305,7 +347,8 @@ class MissingTest(Test): ...@@ -305,7 +347,8 @@ class MissingTest(Test):
Handle when there is no such test module in the test directory. Handle when there is no such test module in the test directory.
""" """
def __init__(self, name=None, base_logdir=None, tag=None, job=None): def __init__(self, name=None, params=None, base_logdir=None, tag=None,
job=None):
super(MissingTest, self).__init__(name=name, super(MissingTest, self).__init__(name=name,
base_logdir=base_logdir, base_logdir=base_logdir,
tag=tag, job=job) tag=tag, job=job)
......
import UserDict
from threading import Lock
from avocado.core import exceptions
from avocado import settings
class ParamNotFound(exceptions.TestError):
pass
class ParamInvalidType(exceptions.TestError):
pass
class Params(UserDict.IterableUserDict):
"""
A dict-like object passed to every test.
"""
lock = Lock()
def __getitem__(self, key):
""" overrides the error messages of missing params[$key] """
try:
value = UserDict.IterableUserDict.__getitem__(self, key)
vtype = UserDict.IterableUserDict.get(self, "%s_type" % key)
return settings.convert_value_type(value, vtype)
except KeyError:
raise ParamNotFound("Mandatory parameter '%s' is missing. "
"Check your cfg files for typos/mistakes" %
key)
except Exception, details:
raise ParamInvalidType("Parameter '%s' value '%r' failed to "
"convert to %s: %s" %
(key, value, vtype, details))
def objects(self, key):
"""
Return the names of objects defined using a given key.
:param key: The name of the key whose value lists the objects
(e.g. 'nics').
"""
return self.get(key, "").split()
def object_params(self, obj_name):
"""
Return a dict-like object containing the parameters of an individual
object.
This method behaves as follows: the suffix '_' + obj_name is removed
from all key names that have it. Other key names are left unchanged.
The values of keys with the suffix overwrite the values of their
suffixless versions.
:param obj_name: The name of the object (objects are listed by the
objects() method).
"""
suffix = "_" + obj_name
self.lock.acquire()
new_dict = self.copy()
self.lock.release()
for key in new_dict.keys():
if key.endswith(suffix):
new_key = key.split(suffix)[0]
new_dict[new_key] = new_dict[key]
return new_dict
def object_counts(self, count_key, base_name):
"""
This is a generator method: to give it the name of a count key and a
base_name, and it returns an iterator over all the values from params
"""
count = self.get(count_key, 1)
# Protect in case original is modified for some reason
cpy = self.copy()
for number in xrange(1, int(count) + 1):
key = "%s%s" % (base_name, number)
yield (key, cpy.get(key))
...@@ -92,3 +92,6 @@ native tests and dropin tests:: ...@@ -92,3 +92,6 @@ native tests and dropin tests::
TOTAL SKIPPED: 0 TOTAL SKIPPED: 0
TOTAL WARNED: 0 TOTAL WARNED: 0
ELAPSED TIME: 5.67 s ELAPSED TIME: 5.67 s
Some more involved functionalities for the avocado runner are discussed as appropriate, during
the introduction of important concepts.
.. _multiplex_configuration:
=======================
Multiplex Configuration
=======================
Multiplex Configuration is a specialized way of providing lists
of key/value pairs within combination's of various categories,
that will be passed to avocado test as parameters in a dictionary
called ``params``. The format simplifies and condenses complex
multidimensional arrays of test parameters into a flat list. The
combinatorial result can be filtered and adjusted prior to testing,
with filters, dependencies, and key/value substitutions.
The parser relies on indentation, and is very sensitive to misplacement
of tab and space characters. It's highly recommended to edit/view
Multiplex Configuration files in an editor capable of collapsing tab
characters into four space characters. Improper attention to column
spacing can drastically affect output.
.. _keys_and_values:
Keys and values
===============
Keys and values are the most basic useful facility provided by the
format. A statement in the form ``<key> = <value>`` sets ``<key>`` to
``<value>``. Values are strings, terminated by a linefeed, with
surrounding quotes completely optional (but honored). A reference of
descriptions for most keys is included in section Configuration Parameter
Reference.
The key will become part of all lower-level (i.e. further indented) variant
stanzas (see section variants_). However, key precedence is evaluated in
top-down or ``last defined`` order. In other words, the last parsed key has
precedence over earlier definitions.
.. _variants:
Variants
========
A ``variants`` stanza is opened by a ``variants:`` statement. The contents
of the stanza must be indented further left than the ``variants:``
statement. Each variant stanza or block defines a single dimension of
the output array. When a Multiplex Configuration file contains
two variants stanzas, the output will be all possible combination's of
both variant contents. Variants may be nested within other variants,
effectively nesting arbitrarily complex arrays within the cells of
outside arrays. For example::
variants:
- one:
key1 = Hello
- two:
key2 = World
- three:
variants:
- four:
key3 = foo
- five:
key3 = bar
- six:
key1 = foo
key2 = bar
While combining, the parser forms names for each outcome based on
prepending each variant onto a list. In other words, the first variant
name parsed will appear as the left most name component. These names can
become quite long, and since they contain keys to distinguishing between
results, a 'short-name' key is also used.
Avocado comes equipped with a plugin to parse multiplex files. The appropriate
subcommand is::
avocado multiplex /path/to/multiplex.mplx [-c]
Note that there's no need to put extensions to a multiplex file, although
doing so helps with organization. The optional -c param is used to provide
the contents of the dictionaries generated, not only their shortnames.
``avocado multiplex`` against the content above produces the following
combinations and names::
Dictionaries generated:
dict 1: four.one
dict 2: four.two
dict 3: four.three
dict 4: five.one
dict 5: five.two
dict 6: five.three
dict 7: six.one
dict 8: six.two
dict 9: six.three
Variant shortnames represent the ``<TESTNAME>`` value used when results are
recorded (see section Job Names and Tags). For convenience
variants whose name begins with a ``@`` do not prepend their name to
``shortname``, only 'name'. This allows creating ``shortcuts`` for
specifying multiple sets or changes to key/value pairs without changing
the results directory name. For example, this is often convenient for
providing a collection of related pre-configured tests based on a
combination of others.
.. _filters:
Filters
=======
Filter statements allow modifying the resultant set of keys based on the
name of the variant set (see section variants_). Filters can be used in 3 ways:
Limiting the set to include only combination names matching a pattern.
Limiting the set to exclude all combination names not matching a
pattern. Modifying the set or contents of key/value pairs within a
matching combination name.
Names are matched by pairing a variant name component with the
character(s) ``,`` meaning ``OR``, ``..`` meaning ``AND``, and ``.`` meaning
``IMMEDIATELY-FOLLOWED-BY``. When used alone, they permit modifying the list
of key/values previously defined. For example:
::
Linux..OpenSuse:
initrd = initrd
Modifies all variants containing ``Linux`` followed anywhere thereafter
with ``OpenSuse``, such that the ``initrd`` key is created or overwritten
with the value ``initrd``.
When a filter is preceded by the keyword ``only`` or ``no``, it limits the
selection of variant combination's This is used where a particular set
of one or more variant combination's should be considered selectively or
exclusively. When given an extremely large matrix of variants, the
``only`` keyword is convenient to limit the result set to only those
matching the filter. Whereas the ``no`` keyword could be used to remove
particular conflicting key/value sets under other variant combination
names. For example:
::
only Linux..Fedora..64
Would reduce an arbitrarily large matrix to only those variants whose
names contain Linux, Fedora, and 64 in them.
However, note that any of these filters may be used within named
variants as well. In this application, they are only evaluated when that
variant name is selected for inclusion (implicitly or explicitly) by a
higher-order. For example:
::
variants:
- one:
key1 = Hello
variants:
- two:
key2 = Complicated
- three: one two
key3 = World
variants:
- default:
only three
key2 =
only default
Results in the following outcome (using -c):
::
Dictionaries generated:
dict 1: default.three.one
_name_map_file = {'docs.mplx': 'default.three.one'}
_short_name_map_file = {'docs.mplx': 'default.three.one'}
dep = ['default.one', 'default.two']
key1 = Hello
key2 =
key3 = World
name = default.three.one
shortname = default.three.one
.. _value_substitutions:
Value Substitutions
===================
Value substitution allows for selectively overriding precedence and
defining part or all of a future key's value. Using a previously defined
key, it's value may be substituted in or as a another key's value. The
syntax is exactly the same as in the bash shell, where as a key's value
is substituted in wherever that key's name appears following a ``$``
character. When nesting a key within other non-key-name text, the name
should also be surrounded by ``{``, and ``}`` characters.
Replacement is context-sensitive, thereby if a key is redefined within
the same, or, higher-order block, that value will be used for future
substitutions. If a key is referenced for substitution, but hasn``t yet
been defined, no action is taken. In other words, the $key or ${key}
string will appear literally as or within the value. Nesting of
references is not supported (i.e. key substitutions within other
substitutions.
For example, if ``one = 1``, ``two = 2``, and ``three = 3``; then,
``order = ${one}${two}${three}`` results in ``order = 123``. This is
particularly handy for rooting an arbitrary complex directory tree
within a predefined top-level directory.
An example of context-sensitivity,
::
key1 = default value
key2 = default value
sub = "key1: ${key1}; key2: ${key2};"
variants:
- one:
key1 = Hello
sub = "key1: ${key1}; key2: ${key2};"
- two: one
key2 = World
sub = "key1: ${key1}; key2: ${key2};"
- three: one two
sub = "key1: ${key1}; key2: ${key2};"
Results in the following (using -c)
::
Dictionaries generated:
dict 1: one
_name_map_file = {'docs.mplx': 'one'}
_short_name_map_file = {'docs.mplx': 'one'}
dep = []
key1 = Hello
key2 = default value
name = one
shortname = one
sub = key1: Hello; key2: default value;
dict 2: two
_name_map_file = {'docs.mplx': 'two'}
_short_name_map_file = {'docs.mplx': 'two'}
dep = ['one']
key1 = default value
key2 = World
name = two
shortname = two
sub = key1: default value; key2: World;
dict 3: three
_name_map_file = {'docs.mplx': 'three'}
_short_name_map_file = {'docs.mplx': 'three'}
dep = ['one', 'two']
key1 = default value
key2 = default value
name = three
shortname = three
sub = key1: default value; key2: default value;
With Keys, Values, Variants, Filters and Value Substitutions, we have most of what you
actually need to construct most multiplex files. The format also has some extra features,
that you can find in :doc:`MultiplexConfigAdvanced` should you need them.
此差异已折叠。
.. _writing-tests: .. _writing-tests:
=====================
Writing Avocado Tests Writing Avocado Tests
===================== =====================
...@@ -8,13 +9,12 @@ test module, which is a python file with a class that inherits from ...@@ -8,13 +9,12 @@ test module, which is a python file with a class that inherits from
:class:`avocado.test.Test`. This class only really needs to implement a method :class:`avocado.test.Test`. This class only really needs to implement a method
called `action`, which represents the actual test payload. called `action`, which represents the actual test payload.
Super simple example - sleeptest Simple example
-------------------------------- ==============
Let's re-create an old time favorite, sleeptest, which is a functional
test for autotest. It does nothing but `time.sleep([number-seconds])`:
:: Let's re-create an old time favorite, ``sleeptest``, which is a functional
test for avocado (old because we also use such a test for autotest). It does
nothing but ``time.sleep([number-seconds])``::
#!/usr/bin/python #!/usr/bin/python
...@@ -29,29 +29,105 @@ test for autotest. It does nothing but `time.sleep([number-seconds])`: ...@@ -29,29 +29,105 @@ test for autotest. It does nothing but `time.sleep([number-seconds])`:
""" """
Example test for avocado. Example test for avocado.
""" """
default_params = {'sleep_length': 1.0}
def action(self, length=1): def action(self):
""" """
Sleep for length seconds. Sleep for length seconds.
""" """
self.log.debug("Sleeping for %d seconds", length) self.log.debug("Sleeping for %.2f seconds", self.params.sleep_length)
time.sleep(length) time.sleep(self.params.sleep_length)
if __name__ == "__main__": if __name__ == "__main__":
job.main() job.main()
This is about the simplest test you can write for avocado (at least, one using This is about the simplest test you can write for avocado (at least, one using
the avocado APIs). Note that the test object provides you with a number of the avocado APIs). Note that the test object provides you with a number of
convenience attributes, such as `self.log`, that lets you log debug, info, error convenience attributes, such as ``self.log``, that lets you log debug, info, error
and warning messages. and warning messages. Also, we note the parameter passing system that avocado provides:
We frequently want to pass parameters to tests, and we can do that through what
we call a `multiplex file`, which is a configuration file that not only allows you
to provide params to your test, but also easily create a validation matrix in a
concise way. You can find more about the multiplex file format on :doc:`MultiplexConfig`.
Accessing test parameters
=========================
Each test has a set of parameters that can be accessed through ``self.params.[param-name]``.
Avocado finds and populates ``self.params`` with all parameters you define on a Multiplex
Config file (see :doc:`MultiplexConfig`), in a way that they are available as attributes,
not just dict keys. This has the advantage of reducing the boilerplate code necessary to
access those parameters. As an example, consider the following multiplex file for sleeptest::
variants:
- sleeptest:
sleep_length_type = float
variants:
- short:
sleep_length = 0.5
- medium:
sleep_length = 1
- long:
sleep_length = 5
You may notice some things here: there is one test param to sleeptest, called ``sleep_length``. We could have named it
``length`` really, but I prefer to create a param namespace of sorts here. Then, I defined
``sleep_length_type``, that is used by the config system to convert a value (by default a
:class:`basestring`) to an appropriate value type (in this case, we need to pass a :class:`float`
to :func:`time.sleep` anyway). Note that this is an optional feature, and you can always use
:func:`float` to convert the string value coming from the configuration anyway.
Another important design detail is that sometimes we might not want to use the config system
at all (for example, when we run an avocado test as a stand alone test). To account for this
case, we have to specify a ``default_params`` dictionary that contains the default values
for when we are not providing config from a multiplex file.
Using a multiplex file
======================
You may use the avocado runner with a multiplex file to provide params and matrix
generation for sleeptest just like::
$ avocado run sleeptest --multiplex tests/sleeptest/sleeptest.mplx
DEBUG LOG: /home/lmr/avocado/logs/run-2014-05-13-15.44.54/debug.log
TOTAL TESTS: 3
(1/3) sleeptest.short: PASS (0.64 s)
(2/3) sleeptest.medium: PASS (1.11 s)
(3/3) sleeptest.long: PASS (5.12 s)
TOTAL PASSED: 3
TOTAL ERROR: 0
TOTAL FAILED: 0
TOTAL SKIPPED: 0
TOTAL WARNED: 0
ELAPSED TIME: 6.87 s
Note that, as your multiplex file specifies all parameters for sleeptest, you can simply
leave the test url list empty, such as::
$ avocado run --multiplex tests/sleeptest/sleeptest.mplx
If you want to run some tests that don't require params set by the multiplex file, you can::
$ avocado run "sleeptest synctest" --multiplex tests/sleeptest/sleeptest.mplx
DEBUG LOG: /home/lmr/avocado/logs/run-2014-05-13-15.47.55/debug.log
TOTAL TESTS: 4
(1/4) sleeptest.short: PASS (0.61 s)
(2/4) sleeptest.medium: PASS (1.11 s)
(3/4) sleeptest.long: PASS (5.11 s)
(4/4) synctest.1: PASS (1.85 s)
TOTAL PASSED: 4
TOTAL ERROR: 0
TOTAL FAILED: 0
TOTAL SKIPPED: 0
TOTAL WARNED: 0
ELAPSED TIME: 8.69 s
Avocado tests are also unittests Avocado tests are also unittests
-------------------------------- ================================
Since avocado tests inherit from :class:`unittest.TestCase`, you can use all Since avocado tests inherit from :class:`unittest.TestCase`, you can use all
the ``assert`` class methods on your tests. Some silly examples:: the :func:`assert` class methods on your tests. Some silly examples::
class random_examples(test.Test): class random_examples(test.Test):
def action(self): def action(self):
...@@ -86,7 +162,7 @@ Executing an avocado test gives:: ...@@ -86,7 +162,7 @@ Executing an avocado test gives::
ELAPSED TIME: 1.11 s ELAPSED TIME: 1.11 s
Running tests with nosetests Running tests with nosetests
---------------------------- ============================
`nose <https://nose.readthedocs.org/>`__ is a python testing framework with `nose <https://nose.readthedocs.org/>`__ is a python testing framework with
similar goals as avocado, except that avocado also intends to provide tools to similar goals as avocado, except that avocado also intends to provide tools to
...@@ -102,22 +178,24 @@ cass, you can run them with the ``nosetests`` application:: ...@@ -102,22 +178,24 @@ cass, you can run them with the ``nosetests`` application::
OK OK
Setup and cleanup methods Setup and cleanup methods
------------------------- =========================
If you need to perform setup actions before/after your test, you may do so If you need to perform setup actions before/after your test, you may do so
in the ``setup`` and ``cleanup`` methods, respectively. We'll give examples in the ``setup`` and ``cleanup`` methods, respectively. We'll give examples
in the following section. in the following section.
Building and executing 3rd party test suites Running third party test suites
-------------------------------------------- ===============================
It is very common in test automation workloads to use test suites developed It is very common in test automation workloads to use test suites developed
by 3rd parties. By wrapping the execution code inside an avocado test module, by third parties. By wrapping the execution code inside an avocado test module,
you gain access to the facilities and API provided by the framework. Let's you gain access to the facilities and API provided by the framework. Let's
say you want to pick up a test suite written in C that it is in a tarball, say you want to pick up a test suite written in C that it is in a tarball,
uncompress it, compile the suite code, and then executing the test. Here's uncompress it, compile the suite code, and then executing the test. Here's
an example that does that:: an example that does that::
#!/usr/bin/python
import os import os
from avocado import test from avocado import test
...@@ -132,17 +210,34 @@ an example that does that:: ...@@ -132,17 +210,34 @@ an example that does that::
""" """
Execute the synctest test suite. Execute the synctest test suite.
""" """
default_params = {'sync_tarball': 'synctest.tar.bz2',
'sync_length': 100,
'sync_loop': 10}
def setup(self, tarball='synctest.tar.bz2'): def setup(self):
tarball_path = self.get_deps_path(tarball) """
Set default params and build the synctest suite.
"""
# Build the synctest suite
self.cwd = os.getcwd()
tarball_path = self.get_deps_path(self.params.sync_tarball)
archive.extract(tarball_path, self.srcdir) archive.extract(tarball_path, self.srcdir)
self.srcdir = os.path.join(self.srcdir, 'synctest') self.srcdir = os.path.join(self.srcdir, 'synctest')
build.make(self.srcdir) build.make(self.srcdir)
def action(self, length=100, loop=10): def action(self):
"""
Execute synctest with the appropriate params.
"""
os.chdir(self.srcdir) os.chdir(self.srcdir)
cmd = './synctest %s %s' % (length, loop) cmd = ('./synctest %s %s' %
(self.params.sync_length, self.params.sync_loop))
process.system(cmd) process.system(cmd)
os.chdir(self.cwd)
if __name__ == "__main__":
job.main()
Here we have an example of the ``setup`` method in action: Here we get the Here we have an example of the ``setup`` method in action: Here we get the
location of the test suite code (tarball) through location of the test suite code (tarball) through
...@@ -156,9 +251,10 @@ and executes the ``./synctest`` command, with appropriate parameters, using ...@@ -156,9 +251,10 @@ and executes the ``./synctest`` command, with appropriate parameters, using
:func:`avocado.utils.process.system`. :func:`avocado.utils.process.system`.
Wrap Up Wrap Up
------- =======
While there are certainly other resources that can be used to build your tests, While there are certainly other resources that can be used to build your tests,
we recommend you take a look at the example tests present in the ``tests`` we recommend you take a look at the example tests present in the ``tests``
directory to take some inspiration. It is also recommended that you take a directory, that contains a few samples to take some inspiration. It is also
look at the :doc:`API documentation <api/modules>` for more possibilities. recommended that you take a look at the :doc:`API documentation <api/modules>`
for more possibilities.
...@@ -16,6 +16,8 @@ Contents: ...@@ -16,6 +16,8 @@ Contents:
GetStartedGuide GetStartedGuide
DataDir DataDir
WritingTests WritingTests
MultiplexConfig
MultiplexConfigAdvanced
Plugins Plugins
OutputPlugins OutputPlugins
api/modules api/modules
......
#!/usr/bin/python
"""
Build documentation and report whether we had warning/error messages.
This is geared towards documentation build regression testing.
"""
import os
import sys
# simple magic for using scripts within a source tree
basedir = os.path.join(os.path.dirname(os.path.abspath(__file__)), '..', '..', '..')
basedir = os.path.abspath(basedir)
if os.path.isdir(os.path.join(basedir, 'avocado')):
sys.path.append(basedir)
from avocado.utils import process
class DocBuildError(Exception):
pass
def test_build_docs():
"""
Build avocado HTML docs, reporting failures
"""
ignore_list = []
failure_lines = []
doc_dir = os.path.join(basedir, 'docs')
process.run('make -C %s clean' % doc_dir)
result = process.run('make -C %s html' % doc_dir)
stdout = result.stdout.splitlines()
stderr = result.stderr.splitlines()
output_lines = stdout + stderr
for line in output_lines:
ignore_msg = False
for ignore in ignore_list:
if ignore in line:
print 'Expected warning ignored: %s' % line
ignore_msg = True
if ignore_msg:
continue
if 'ERROR' in line:
failure_lines.append(line)
if 'WARNING' in line:
failure_lines.append(line)
if failure_lines:
e_msg = ('%s ERRORS and/or WARNINGS detected while building the html docs:\n' %
len(failure_lines))
for (index, failure_line) in enumerate(failure_lines):
e_msg += "%s) %s\n" % (index + 1, failure_line)
e_msg += 'Please check the output and fix your docstrings/.rst docs'
raise DocBuildError(e_msg)
if __name__ == '__main__':
test_build_docs()
#!/usr/bin/env python
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
#
# See LICENSE for more details.
#
# Copyright: Red Hat Inc. 2013-2014
# Author: Lucas Meneghel Rodrigues <lmr@redhat.com>
import unittest
import os
import sys
# simple magic for using scripts within a source tree
basedir = os.path.join(os.path.dirname(os.path.abspath(__file__)), '..', '..', '..', '..')
basedir = os.path.abspath(basedir)
if os.path.isdir(os.path.join(basedir, 'avocado')):
sys.path.append(basedir)
from avocado.utils import process
class RunnerOperationTest(unittest.TestCase):
def test_runner_all_ok(self):
os.chdir(basedir)
cmd_line = './scripts/avocado run "sleeptest sleeptest"'
process.run(cmd_line)
def test_runner_tests_fail(self):
os.chdir(basedir)
cmd_line = './scripts/avocado run "sleeptest failtest sleeptest"'
result = process.run(cmd_line, ignore_status=True)
expected_rc = 1
self.assertEqual(result.exit_status, expected_rc,
"Avocado did not return rc %d:\n%s" % (expected_rc, result))
def test_runner_nonexistent_test(self):
os.chdir(basedir)
cmd_line = './scripts/avocado run bogustest'
result = process.run(cmd_line, ignore_status=True)
expected_rc = 1
unexpected_rc = 3
self.assertNotEqual(result.exit_status, unexpected_rc,
"Avocado crashed (rc %d):\n%s" % (unexpected_rc, result))
self.assertEqual(result.exit_status, expected_rc,
"Avocado did not return rc %d:\n%s" % (expected_rc, result))
if __name__ == '__main__':
unittest.main()
#!/usr/bin/env python
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
#
# See LICENSE for more details.
#
# Copyright: Red Hat Inc. 2013-2014
# Author: Lucas Meneghel Rodrigues <lmr@redhat.com>
import unittest
import os
import sys
# simple magic for using scripts within a source tree
basedir = os.path.join(os.path.dirname(os.path.abspath(__file__)), '..', '..', '..', '..')
basedir = os.path.abspath(basedir)
if os.path.isdir(os.path.join(basedir, 'avocado')):
sys.path.append(basedir)
from avocado.utils import process
class MultiplexTests(unittest.TestCase):
def run_and_check(self, cmd_line, expected_rc):
os.chdir(basedir)
result = process.run(cmd_line, ignore_status=True)
self.assertEqual(result.exit_status, expected_rc,
"Command %s did not return rc "
"%d:\n%s" % (cmd_line, expected_rc, result))
def test_mplex_plugin(self):
cmd_line = './scripts/avocado multiplex tests/sleeptest/sleeptest.mplx'
expected_rc = 0
self.run_and_check(cmd_line, expected_rc)
def test_mplex_plugin_nonexistent(self):
cmd_line = './scripts/avocado multiplex nonexist'
expected_rc = 2
self.run_and_check(cmd_line, expected_rc)
def test_run_mplex_sleeptest(self):
cmd_line = './scripts/avocado run sleeptest --multiplex tests/sleeptest/sleeptest.mplx'
expected_rc = 0
self.run_and_check(cmd_line, expected_rc)
def test_run_mplex_doublesleep(self):
cmd_line = './scripts/avocado run "sleeptest sleeptest" --multiplex tests/sleeptest/sleeptest.mplx'
expected_rc = 0
self.run_and_check(cmd_line, expected_rc)
def test_run_mplex_failtest(self):
cmd_line = './scripts/avocado run "sleeptest failtest" --multiplex tests/sleeptest/sleeptest.mplx'
expected_rc = 1
self.run_and_check(cmd_line, expected_rc)
if __name__ == '__main__':
unittest.main()
#!/usr/bin/env python
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
#
# See LICENSE for more details.
#
# Copyright: Red Hat Inc. 2013-2014
# Author: Lucas Meneghel Rodrigues <lmr@redhat.com>
import unittest
import os
import sys
# simple magic for using scripts within a source tree
basedir = os.path.join(os.path.dirname(os.path.abspath(__file__)), '..', '..', '..', '..')
basedir = os.path.abspath(basedir)
if os.path.isdir(os.path.join(basedir, 'avocado')):
sys.path.append(basedir)
from avocado.utils import process
class StandaloneTests(unittest.TestCase):
def setUp(self):
self.original_pypath = os.environ.get('PYTHONPATH')
if self.original_pypath is not None:
os.environ['PYTHONPATH'] = '%s:%s' % (basedir, self.original_pypath)
else:
os.environ['PYTHONPATH'] = '%s' % basedir
def run_and_check(self, cmd_line, expected_rc, tstname):
os.chdir(basedir)
result = process.run(cmd_line, ignore_status=True)
self.assertEqual(result.exit_status, expected_rc,
"Stand alone %s did not return rc "
"%d:\n%s" % (tstname, expected_rc, result))
def test_sleeptest(self):
cmd_line = './tests/sleeptest/sleeptest.py'
expected_rc = 0
self.run_and_check(cmd_line, expected_rc, 'sleeptest')
def test_skiptest(self):
cmd_line = './tests/skiptest/skiptest.py'
expected_rc = 0
self.run_and_check(cmd_line, expected_rc, 'skiptest')
def test_failtest(self):
cmd_line = './tests/failtest/failtest.py'
expected_rc = 1
self.run_and_check(cmd_line, expected_rc, 'failtest')
def test_errortest(self):
cmd_line = './tests/errortest/errortest.py'
expected_rc = 1
self.run_and_check(cmd_line, expected_rc, 'errortest')
def test_warntest(self):
cmd_line = './tests/warntest/warntest.py'
expected_rc = 1
self.run_and_check(cmd_line, expected_rc, 'warntest')
if __name__ == '__main__':
unittest.main()
此差异已折叠。
#!/usr/bin/python
import unittest
from avocado.utils import params
BASE_DICT = {
'image_boot': 'yes',
'image_boot_stg': 'no',
'image_chain': '',
'image_clone_command': 'cp --reflink=auto %s %s',
'image_format': 'qcow2',
'image_format_stg': 'qcow2',
'image_name': 'images/f18-64',
'image_name_stg': 'enospc',
'image_raw_device': 'no',
'image_remove_command': 'rm -rf %s',
'image_size': '10G',
'image_snapshot_stg': 'no',
'image_unbootable_pattern': 'Hard Disk.*not a bootable disk',
'image_verify_bootable': 'yes',
'images': 'image1 stg',
}
CORRECT_RESULT_MAPPING = {"image1": {'image_boot_stg': 'no',
'image_snapshot_stg': 'no',
'image_chain': '',
'image_unbootable_pattern': 'Hard Disk.*not a bootable disk',
'image_name': 'images/f18-64',
'image_remove_command': 'rm -rf %s',
'image_name_stg': 'enospc',
'image_clone_command': 'cp --reflink=auto %s %s',
'image_size': '10G', 'images': 'image1 stg',
'image_raw_device': 'no',
'image_format': 'qcow2',
'image_boot': 'yes',
'image_verify_bootable': 'yes',
'image_format_stg': 'qcow2'},
"stg": {'image_snapshot': 'no',
'image_boot_stg': 'no',
'image_snapshot_stg': 'no',
'image_chain': '',
'image_unbootable_pattern': 'Hard Disk.*not a bootable disk',
'image_name': 'enospc',
'image_remove_command': 'rm -rf %s',
'image_name_stg': 'enospc',
'image_clone_command': 'cp --reflink=auto %s %s',
'image_size': '10G',
'images': 'image1 stg',
'image_raw_device': 'no',
'image_format': 'qcow2',
'image_boot': 'no',
'image_verify_bootable': 'yes',
'image_format_stg': 'qcow2'}}
class TestParams(unittest.TestCase):
def setUp(self):
self.params = params.Params(BASE_DICT)
def testObjects(self):
self.assertEquals(self.params.objects("images"), ['image1', 'stg'])
def testObjectsParams(self):
for key in CORRECT_RESULT_MAPPING.keys():
self.assertEquals(self.params.object_params(key),
CORRECT_RESULT_MAPPING[key])
def testGetItemMissing(self):
try:
self.params['bogus']
raise ValueError("Did not get a ParamNotFound error when trying "
"to access a non-existing param")
# pylint: disable=E0712
except params.ParamNotFound:
pass
def testGetItem(self):
self.assertEqual(self.params['image_size'], "10G")
if __name__ == "__main__":
unittest.main()
...@@ -42,7 +42,7 @@ class AvocadoTestSelector(Selector): ...@@ -42,7 +42,7 @@ class AvocadoTestSelector(Selector):
return True return True
def wantFile(self, filename): def wantFile(self, filename):
if not filename.endswith('_unittest.py'): if not filename.endswith('.py'):
return False return False
skip_tests = [] skip_tests = []
...@@ -82,17 +82,8 @@ class AvocadoTestRunner(Plugin): ...@@ -82,17 +82,8 @@ class AvocadoTestRunner(Plugin):
def prepareTestLoader(self, loader): def prepareTestLoader(self, loader):
loader.selector = AvocadoTestSelector(loader.config) loader.selector = AvocadoTestSelector(loader.config)
if __name__ == '__main__':
def run_test():
nose.main(addplugins=[AvocadoTestRunner(), nose.main(addplugins=[AvocadoTestRunner(),
AttributeSelector(), AttributeSelector(),
Xunit(), Xunit(),
Coverage()]) Coverage()])
def main():
run_test()
if __name__ == '__main__':
main()
variants:
- sleeptest:
sleep_length_type = float
variants:
- short:
sleep_length = 0.5
- medium:
sleep_length = 1
- long:
sleep_length = 5
...@@ -26,13 +26,14 @@ class sleeptest(test.Test): ...@@ -26,13 +26,14 @@ class sleeptest(test.Test):
""" """
Example test for avocado. Example test for avocado.
""" """
default_params = {'sleep_length': 1.0}
def action(self, length=1): def action(self):
""" """
Sleep for length seconds. Sleep for length seconds.
""" """
self.log.debug("Sleeping for %d seconds", length) self.log.debug("Sleeping for %.2f seconds", self.params.sleep_length)
time.sleep(length) time.sleep(self.params.sleep_length)
if __name__ == "__main__": if __name__ == "__main__":
......
variants:
- synctest:
sync_tarball = synctest.tar.bz2
sync_length_type = int
sync_loop_type = int
variants:
- loop_short:
sync_loop = 10
- loop_medium:
sync_loop = 50
- loop_long:
sync_loop = 100
variants:
- length_short:
sync_length = 100
- length_medium:
sync_length = 500
- length_long:
sync_length = 1000
...@@ -14,7 +14,6 @@ ...@@ -14,7 +14,6 @@
# Copyright: Red Hat Inc. 2013-2014 # Copyright: Red Hat Inc. 2013-2014
# Author: Lucas Meneghel Rodrigues <lmr@redhat.com> # Author: Lucas Meneghel Rodrigues <lmr@redhat.com>
import os import os
from avocado import test from avocado import test
...@@ -29,17 +28,27 @@ class synctest(test.Test): ...@@ -29,17 +28,27 @@ class synctest(test.Test):
""" """
Execute the synctest test suite. Execute the synctest test suite.
""" """
default_params = {'sync_tarball': 'synctest.tar.bz2',
def setup(self, tarball='synctest.tar.bz2'): 'sync_length': 100,
'sync_loop': 10}
def setup(self):
"""
Build the synctest suite.
"""
self.cwd = os.getcwd() self.cwd = os.getcwd()
tarball_path = self.get_deps_path(tarball) tarball_path = self.get_deps_path(self.params.sync_tarball)
archive.extract(tarball_path, self.srcdir) archive.extract(tarball_path, self.srcdir)
self.srcdir = os.path.join(self.srcdir, 'synctest') self.srcdir = os.path.join(self.srcdir, 'synctest')
build.make(self.srcdir) build.make(self.srcdir)
def action(self, length=100, loop=10): def action(self):
"""
Execute synctest with the appropriate params.
"""
os.chdir(self.srcdir) os.chdir(self.srcdir)
cmd = './synctest %s %s' % (length, loop) cmd = ('./synctest %s %s' %
(self.params.sync_length, self.params.sync_loop))
process.system(cmd) process.system(cmd)
os.chdir(self.cwd) os.chdir(self.cwd)
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册