diff --git a/.travis.yml b/.travis.yml index e5a3dc52e9918229114315d50e395e3cdfc4ae3d..0484ac287848e69bc6901bf7162006b36062457e 100644 --- a/.travis.yml +++ b/.travis.yml @@ -12,16 +12,8 @@ install: - pip install -r requirements.txt script: - - ./unittests/runtests.py -c .nose.cfg - inspekt lint - inspekt style - - make -C docs html 2>&1 | grep -E '(ERROR|WARNING)' || test $? -eq 1 - - ./scripts/avocado run "sleeptest sleeptest" - - ./scripts/avocado run "sleeptest failtest sleeptest" || test $? -eq 1 - - ./scripts/avocado run "bogustest" || test $? -ne 3 - - export PYTHONPATH=$PYTHONPATH:. - - ./tests/sleeptest/sleeptest.py - - ./tests/skiptest/skiptest.py - - ./tests/failtest/failtest.py || test $? -eq 1 - - ./tests/errortest/errortest.py || test $? -eq 1 - - ./tests/warntest/warntest.py || test $? -eq 1 + - ./selftests/run -v selftests/all/doc + - ./selftests/run -v selftests/all/functional + - ./selftests/run selftests/all/unit -c selftests/.nose.cfg diff --git a/avocado/core/data_dir.py b/avocado/core/data_dir.py index 58934638abc933fd0b623058e73d0137ddb74c02..b8452427505b94c6a5ffb0040bad1e4cf9110458 100755 --- a/avocado/core/data_dir.py +++ b/avocado/core/data_dir.py @@ -196,6 +196,7 @@ def get_job_logs_dir(args=None): :param args: :class:`argparse.Namespace` instance with cmdline arguments (optional). + :rtype: basestring """ start_time = time.strftime('%Y-%m-%d-%H.%M.%S') if args is not None: diff --git a/avocado/job.py b/avocado/job.py index 929d6b9b425d4b66e9e793666a5cb30272b9c1db..2ea6ccce0ea32336457f1a240ca0252158e00cc6 100644 --- a/avocado/job.py +++ b/avocado/job.py @@ -27,6 +27,7 @@ from avocado.core import output from avocado.core import status from avocado.core import exceptions from avocado.core import error_codes +from avocado import multiplex_config from avocado import test from avocado import result @@ -52,15 +53,24 @@ class Job(object): self.debuglog = os.path.join(self.debugdir, "debug.log") if self.args is not None: self.loglevel = args.log_level or logging.DEBUG + self.multiplex_file = args.multiplex_file else: self.loglevel = logging.DEBUG + self.multiplex_file = None self.test_dir = data_dir.get_test_dir() self.test_index = 1 self.status = "RUNNING" self.output_manager = output.OutputManager() - def _load_test_instance(self, url): + def _load_test_instance(self, params): + """ + Find the test url from the first component of the test shortname, and load the url. + + :param params: Dictionary with test params. + """ + shortname = params.get('shortname') + url = shortname.split('.')[0] path_attempt = os.path.abspath(url) if os.path.exists(path_attempt): test_class = test.DropinTest @@ -79,31 +89,33 @@ class Job(object): finally: test_instance = test_class(name=url, base_logdir=self.debugdir, + params=params, job=self) return test_instance - def run_test(self, url): + def run_test(self, params): """ Run a single test. - :param url: test URL. + :param params: Dictionary with test params. + :type params: dict :return: an instance of :class:`avocado.test.Test`. """ - test_instance = self._load_test_instance(url) + test_instance = self._load_test_instance(params) test_instance.run_avocado() return test_instance - def test_runner(self, urls, test_result): + def test_runner(self, params_list, test_result): """ Run one or more tests and report with test result. - :param urls: a list of tests URLs. + :param params_list: a list of param dicts. :param test_result: An instance of :class:`avocado.result.TestResult`. :return: a list of test failures. """ failures = [] - for url in urls: - test_instance = self.run_test(url) + for params in params_list: + test_instance = self.run_test(params) test_result.check_test(test_instance) if not status.mapping[test_instance.status]: failures.append(test_instance.name) @@ -128,11 +140,12 @@ class Job(object): self.args) return test_result - def _run(self, urls=None): + def _run(self, urls=None, multiplex_file=None): """ Unhandled job method. Runs a list of test URLs to its completion. :param urls: String with tests to run. + :param multiplex_file: File that multiplexes a given test url. :return: Integer with overall job status. See :mod:`avocado.core.error_codes` for more information. @@ -140,14 +153,45 @@ class Job(object): :class:`avocado.core.exceptions.JobBaseException` errors, that configure a job failure. """ + params_list = [] if urls is None: - urls = self.args.url.split() + if self.args and self.args.url is not None: + urls = self.args.url.split() + else: + if isinstance(urls, str): + urls = urls.split() + + if urls is not None: + for url in urls: + params_list.append({'shortname': url}) - test_runner = self._make_test_runner() - test_result = self._make_test_result(urls) + if multiplex_file is None: + if self.args and self.args.multiplex_file is not None: + multiplex_file = os.path.abspath(self.args.multiplex_file) + else: + multiplex_file = os.path.abspath(multiplex_file) + + if multiplex_file is not None: + params_list = [] + if urls is not None: + for url in urls: + parser = multiplex_config.Parser(multiplex_file) + parser.only_filter(url) + dcts = [d for d in parser.get_dicts()] + if dcts: + for dct in dcts: + params_list.append(dct) + else: + params_list.append({'shortname': url}) + else: + parser = multiplex_config.Parser(multiplex_file) + for dct in parser.get_dicts(): + params_list.append(dct) + + test_result = self._make_test_result(params_list) test_result.start_tests() - failures = test_runner(urls, test_result) + failures = self.test_runner(params_list, test_result) test_result.end_tests() # If it's all good so far, set job status to 'PASS' if self.status == 'RUNNING': @@ -161,17 +205,26 @@ class Job(object): else: return error_codes.numeric_status['AVOCADO_TESTS_FAIL'] - def run(self, urls=None): + def run(self, urls=None, multiplex_file=None): """ Handled main job method. Runs a list of test URLs to its completion. + Note that the behavior is as follows: + * If urls is provided alone, just make a simple list with no specific params (all tests use default params). + * If urls and multiplex_file are provided, multiplex provides params and variants to all tests it can. + * If multiplex_file is provided alone, just use the matrix produced by the file + + The test runner figures out which tests need to be run on an empty urls list by assuming the first component + of the shortname is the test url. + :param urls: String with tests to run. + :param multiplex_file: File that multiplexes a given test url. :return: Integer with overall job status. See :mod:`avocado.core.error_codes` for more information. """ try: - return self._run(urls) + return self._run(urls, multiplex_file) except exceptions.JobBaseException, details: self.status = details.status fail_class = details.__class__.__name__ diff --git a/avocado/linux/software_manager.py b/avocado/linux/software_manager.py index e4a9ce881586be95442de2b8b1407ced8aed5c33..044c21e70bf813866a575b40a785ba87030fd144 100644 --- a/avocado/linux/software_manager.py +++ b/avocado/linux/software_manager.py @@ -689,7 +689,7 @@ class AptBackend(DpkgBackend): """ repo_file = open(self.repo_file_path, 'r') new_file_contents = [] - for line in repo_file.readlines: + for line in repo_file.readlines(): if not line == repo: new_file_contents.append(line) repo_file.close() diff --git a/avocado/multiplex_config.py b/avocado/multiplex_config.py new file mode 100644 index 0000000000000000000000000000000000000000..3ed89ac0c1cd1e2452622bb81bc81c8862fb95bd --- /dev/null +++ b/avocado/multiplex_config.py @@ -0,0 +1,2031 @@ +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; specifically version 2 of the License. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. +# +# See LICENSE for more details. +# +# Copyright: Red Hat Inc. +# Author: Michael Goldish +# Author: Jiri Zupka + +""" +Multiplex configuration file parser. + +This file format allows you to specify a test matrix in a compact fashion. + +Filter syntax: + +* ``,`` means ``OR`` +* ``..`` means ``AND`` +* ``.`` means ``IMMEDIATELY-FOLLOWED-BY`` +* ``(xx=yy)`` where ``xx=VARIANT_NAME`` and ``yy=VARIANT_VALUE`` + +Example: + +:: + + qcow2..(guest_os=Fedora).14, RHEL.6..raw..boot, smp2..qcow2..migrate..ide + +means match all dicts whose names have: + +:: + + (qcow2 AND ((guest_os=Fedora) IMMEDIATELY-FOLLOWED-BY 14)) OR + ((RHEL IMMEDIATELY-FOLLOWED-BY 6) AND raw AND boot) OR + (smp2 AND qcow2 AND migrate AND ide) + +Note: + +* ``qcow2..Fedora.14`` is equivalent to ``Fedora.14..qcow2``. +* ``qcow2..Fedora.14`` is not equivalent to ``qcow2..14.Fedora``. +* ``ide, scsi`` is equivalent to ``scsi, ide``. + +Filters can be used in 3 ways: + +:: + + only + no + : + +The last one starts a conditional block. + +Formal definition: Regexp come from `python `__. +They're not deterministic, but more readable for people. Spaces between +terminals and non terminals are only for better reading of definitions. + +The base of the definitions come verbatim as follows: + + +:: + + E = {\\n, #, :, "-", =, +=, <=, ?=, ?+=, ?<=, !, < , del, @, variants, include, only, no, name, value} + + N = {S, DEL, FILTER, FILTER_NAME, FILTER_GROUP, PN_FILTER_GROUP, STAT, VARIANT, VAR-TYPE, VAR-NAME, VAR-NAME-F, + VAR, COMMENT, TEXT, DEPS, DEPS-NAME-F, META-DATA, IDENTIFIER}`` + + + I = I^n | n in N // indentation from start of line + // where n is indentation length. + I = I^n+x | n,x in N // indentation with shift + + start symbol = S + end symbol = eps + + S -> I^0+x STATV | eps + + I^n STATV + I^n STATV + + I^n STATV -> I^n STATV \\n I^n STATV | I^n STAT | I^n variants VARIANT + I^n STAT -> I^n STAT \\n I^n STAT | I^n COMMENT | I^n include INC + I^n STAT -> I^n del DEL | I^n FILTER + + DEL -> name \\n + + I^n STAT -> I^n name = VALUE | I^n name += VALUE | I^n name <= VALUE + I^n STAT -> I^n name ?= VALUE | I^n name ?+= VALUE | I^n name ?<= VALUE + + VALUE -> TEXT \\n | 'TEXT' \\n | "TEXT" \\n + + COMMENT_BLOCK -> #TEXT | //TEXT + COMMENT -> COMMENT_BLOCK\\n + COMMENT -> COMMENT_BLOCK\\n + + TEXT = [^\\n] TEXT //python format regexp + + I^n variants VAR #comments: add possibility for comment + I^n+x VAR-NAME: DEPS + I^n+x+x2 STATV + I^n VAR-NAME: + + IDENTIFIER -> [A-Za-z0-9][A-Za-z0-9_-]* + + VARIANT -> VAR COMMENT_BLOCK\\n I^n+x VAR-NAME + VAR -> VAR-TYPE: | VAR-TYPE META-DATA: | : // Named | unnamed variant + + VAR-TYPE -> IDENTIFIER + + variants _name_ [xxx] [zzz=yyy] [uuu]: + + META-DATA -> [IDENTIFIER] | [IDENTIFIER=TEXT] | META-DATA META-DATA + + I^n VAR-NAME -> I^n VAR-NAME \\n I^n VAR-NAME | I^n VAR-NAME-N \\n I^n+x STATV + VAR-NAME-N -> - @VAR-NAME-F: DEPS | - VAR-NAME-F: DEPS + VAR-NAME-F -> [a-zA-Z0-9\\._-]+ // Python regexp + + DEPS -> DEPS-NAME-F | DEPS-NAME-F,DEPS + DEPS-NAME-F -> [a-zA-Z0-9\\._- ]+ // Python regexp + + INC -> name \\n + + + FILTER_GROUP: STAT + STAT + + I^n STAT -> I^n PN_FILTER_GROUP | I^n ! PN_FILTER_GROUP + + PN_FILTER_GROUP -> FILTER_GROUP: \\n I^n+x STAT + PN_FILTER_GROUP -> FILTER_GROUP: STAT \\n I^n+x STAT + + only FILTER_GROUP + no FILTER_GROUP + + FILTER -> only FILTER_GROUP \\n | no FILTER_GROUP \\n + + FILTER_GROUP -> FILTER_NAME + FILTER_GROUP -> FILTER_GROUP..FILTER_GROUP + FILTER_GROUP -> FILTER_GROUP,FILTER_GROUP + + FILTER_NAME -> FILTER_NAME.FILTER_NAME + FILTER_NAME -> VAR-NAME-F | (VAR-NAME-F=VAR-NAME-F) + +:copyright: Red Hat 2008-2013 +""" + +import os +import collections +import logging +import re +import sys + +_reserved_keys = {"name", "shortname", "dep"} + +num_failed_cases = 5 + + +class ParserError(Exception): + + def __init__(self, msg, line=None, filename=None, linenum=None): + """ + Error parsing a file or string. + + :param msg: Error message. + :param line: Contents of line where the error happened. + :param filename: File parsed. + :param linenum: Line number. + """ + Exception.__init__(self) + self.msg = msg + self.line = line + self.filename = filename + self.linenum = linenum + + def __str__(self): + if self.line: + return "%s: %r (%s:%s)" % (self.msg, self.line, + self.filename, self.linenum) + else: + return "%s (%s:%s)" % (self.msg, self.filename, self.linenum) + + +class LexerError(ParserError): + pass + + +class MissingIncludeError(Exception): + + def __init__(self, line, filename, linenum): + Exception.__init__(self) + self.line = line + self.filename = filename + self.linenum = linenum + + def __str__(self): + return ("%r (%s:%s): file does not exist or it's not a regular " + "file" % (self.line, self.filename, self.linenum)) + + +if sys.version_info[0] == 2 and sys.version_info[1] < 6: + def enum(iterator, start_pos=0): + for i in iterator: + yield start_pos, i + start_pos += 1 +else: + enum = enumerate + + +def _match_adjacent(block, ctx, ctx_set): + """ + It try to match as many blocks as possible from context. + + :return: Count of matched blocks. + """ + if block[0] not in ctx_set: + return 0 + if len(block) == 1: + return 1 # First match and length is 1. + if block[1] not in ctx_set: + return int(ctx[-1] == block[0]) # Check match with last from ctx. + k = 0 + i = ctx.index(block[0]) + while i < len(ctx): # Try to match all of blocks. + if k > 0 and ctx[i] != block[k]: # Block not match + i -= k - 1 + k = 0 # Start from first block in next ctx. + if ctx[i] == block[k]: + k += 1 + if k >= len(block): # match all of blocks + break + if block[k] not in ctx_set: # block in not in whole ctx. + break + i += 1 + return k + + +def _might_match_adjacent(block, ctx, ctx_set, descendant_labels): + matched = _match_adjacent(block, ctx, ctx_set) + for elem in block[matched:]: # Try to find rest of blocks in subtree + if elem not in descendant_labels: + # print "Can't match %s, ctx %s" % (block, ctx) + return False + return True + + +# Filter must inherit from object (otherwise type() won't work) +class Filter(object): + __slots__ = ["filter"] + + def __init__(self, lfilter): + """ + Set an lfilter. + + :param lfilter: :class:`avocado.multiplex_config.LFilter` instance. + """ + self.filter = lfilter + + def match(self, ctx, ctx_set): + for word in self.filter: # Go through , + for block in word: # Go through .. + if _match_adjacent(block, ctx, ctx_set) != len(block): + break + else: + return True # All match + return False + + def might_match(self, ctx, ctx_set, descendant_labels): + # There is some possibility to match in children blocks. + for word in self.filter: + for block in word: + if not _might_match_adjacent(block, ctx, ctx_set, + descendant_labels): + break + else: + return True + return False + + +class NoOnlyFilter(Filter): + __slots__ = ("line",) + + def __init__(self, lfilter, line): + super(NoOnlyFilter, self).__init__(lfilter) + self.line = line + + def __eq__(self, o): + if isinstance(o, self.__class__): + if self.filter == o.filter: + return True + + return False + + +class OnlyFilter(NoOnlyFilter): + # pylint: disable=W0613 + # noinspection PyUnusedLocal + + def is_irrelevant(self, ctx, ctx_set, descendant_labels): + # Matched in this tree. + return self.match(ctx, ctx_set) + + def requires_action(self, ctx, ctx_set, descendant_labels): + # Impossible to match in this tree. + return not self.might_match(ctx, ctx_set, descendant_labels) + + def might_pass(self, failed_ctx, failed_ctx_set, ctx, ctx_set, + descendant_labels): + for word in self.filter: + for block in word: + if (_match_adjacent(block, ctx, ctx_set) > + _match_adjacent(block, failed_ctx, failed_ctx_set)): + return self.might_match(ctx, ctx_set, descendant_labels) + return False + + def __str__(self): + return "Only %s" % self.filter + + def __repr__(self): + return "Only %s" % self.filter + + +class NoFilter(NoOnlyFilter): + + def is_irrelevant(self, ctx, ctx_set, descendant_labels): + return not self.might_match(ctx, ctx_set, descendant_labels) + + # pylint: disable=W0613 + # noinspection PyUnusedLocal + def requires_action(self, ctx, ctx_set, descendant_labels): + """ + Verify if filter requires action. + + :param ctx: Filter context. + :param ctx_set: Filter context set. + :param descendant_labels: Descendant labels. + :return: Boolean of whether filter requires action. + """ + return self.match(ctx, ctx_set) + + # pylint: disable=W0613 + # noinspection PyUnusedLocal + def might_pass(self, failed_ctx, failed_ctx_set, ctx, ctx_set, + descendant_labels): + """ + Verify if filter might pass. + + :param failed_ctx: Failed context. + :param failed_ctx_set: Failed context set. + :param ctx: Context. + :param ctx_set: Context set. + :param descendant_labels: Descendant labels + :return: Boolean as to whether filter might pass. + """ + for word in self.filter: + for block in word: + if (_match_adjacent(block, ctx, ctx_set) < + _match_adjacent(block, failed_ctx, failed_ctx_set)): + return not self.match(ctx, ctx_set) + return False + + def __str__(self): + return "No %s" % self.filter + + def __repr__(self): + return "No %s" % self.filter + + +class BlockFilter(object): + __slots__ = ["blocked"] + + def __init__(self, blocked): + self.blocked = blocked + + def apply_to_dict(self, d): + pass + + +class Condition(NoFilter): + __slots__ = ["content"] + + # pylint: disable=W0231 + def __init__(self, lfilter, line): + super(Condition, self).__init__(lfilter, line) + self.content = [] + + def __str__(self): + return "Condition %s:%s" % (self.filter, self.content) + + def __repr__(self): + return "Condition %s:%s" % (self.filter, self.content) + + +class NegativeCondition(OnlyFilter): + __slots__ = ["content"] + + # pylint: disable=W0231 + def __init__(self, lfilter, line): + super(NegativeCondition, self).__init__(lfilter, line) + self.content = [] + + def __str__(self): + return "NotCond %s:%s" % (self.filter, self.content) + + def __repr__(self): + return "NotCond %s:%s" % (self.filter, self.content) + + +class StrReader(object): + + """ + Preprocess an input string for easy reading. + """ + + def __init__(self, s): + """ + Initialize the reader. + + :param s: The string to parse. + """ + self.filename = "" + self._lines = [] + self._line_index = 0 + self._stored_line = None + for linenum, line in enumerate(s.splitlines()): + line = line.rstrip().expandtabs() + stripped_line = line.lstrip() + indent = len(line) - len(stripped_line) + if (not stripped_line + or stripped_line.startswith("#") + or stripped_line.startswith("//")): + continue + self._lines.append((stripped_line, indent, linenum + 1)) + + def get_next_line(self, prev_indent): + """ + Get the next line in the current block. + + :param prev_indent: The indentation level of the previous block. + :return: (line, indent, linenum), where indent is the line's + indentation level. If no line is available, (None, -1, -1) is + returned. + """ + if self._stored_line: + ret = self._stored_line + self._stored_line = None + return ret + if self._line_index >= len(self._lines): + return None, -1, -1 + line, indent, linenum = self._lines[self._line_index] + if indent <= prev_indent: + return None, indent, linenum + self._line_index += 1 + return line, indent, linenum + + def set_next_line(self, line, indent, linenum): + """ + Make the next call to get_next_line() return the given line instead of + the real next line. + """ + line = line.strip() + if line: + self._stored_line = line, indent, linenum + + +class FileReader(StrReader): + + """ + Preprocess an input file for easy reading. + """ + + def __init__(self, filename): + """ + Initialize the reader. + + :parse filename: The name of the input file. + """ + StrReader.__init__(self, open(filename).read()) + self.filename = filename + + +class Label(object): + __slots__ = ["name", "var_name", "long_name", "hash_val", "hash_var"] + + def __init__(self, name, next_name=None): + if next_name is None: + self.name = name + self.var_name = None + else: + self.name = next_name + self.var_name = name + + if self.var_name is None: + self.long_name = "%s" % self.name + else: + self.long_name = "(%s=%s)" % (self.var_name, self.name) + + self.hash_val = self.hash_name() + self.hash_var = None + if self.var_name: + self.hash_var = self.hash_variant() + + def __str__(self): + return self.long_name + + def __repr__(self): + return self.long_name + + def __eq__(self, o): + """ + The comparison is asymmetric due to optimization. + """ + if o.var_name: + if self.long_name == o.long_name: + return True + else: + if self.name == o.name: + return True + return False + + def __ne__(self, o): + """ + The comparison is asymmetric due to optimization. + """ + if o.var_name: + if self.long_name != o.long_name: + return True + else: + if self.name != o.name: + return True + return False + + def __hash__(self): + return self.hash_val + + def hash_name(self): + return sum([i + 1 * ord(x) for i, x in enumerate(self.name)]) + + def hash_variant(self): + return sum([i + 1 * ord(x) for i, x in enumerate(str(self))]) + + +class Node(object): + __slots__ = ["var_name", "name", "filename", "dep", "content", "children", + "labels", "append_to_shortname", "failed_cases", "default", + "q_dict"] + + def __init__(self): + self.var_name = [] + self.name = [] + self.filename = "" + self.dep = [] + self.content = [] + self.children = [] + self.labels = set() + self.append_to_shortname = False + self.failed_cases = collections.deque() + self.default = False + + def dump(self, indent, recurse=False): + print("%s%s" % (" " * indent, self.name)) + print("%s%s" % (" " * indent, self.var_name)) + print("%s%s" % (" " * indent, self)) + print("%s%s" % (" " * indent, self.content)) + print("%s%s" % (" " * indent, self.failed_cases)) + if recurse: + for child in self.children: + child.dump(indent + 3, recurse) + + +match_substitute = re.compile("\$\{(.+?)\}") + + +# noinspection PyBroadException +def _substitution(value, d): + """ + Only optimization string Template substitute is quite expensive operation. + + :param value: String where could be $string for substitution. + :param d: Dictionary from which should be value substituted to value. + + :return: Substituted string + """ + if "$" in value: + start = 0 + st = "" + try: + match = match_substitute.search(value, start) + while match: + val = eval(match.group(1), None, d) + st += value[start:match.start()] + str(val) + start = match.end() + match = match_substitute.search(value, start) + except Exception: + pass + st += value[start:len(value)] + return st + else: + return value + + +class Token(object): + __slots__ = [] + identifier = "" + + @property + def __str__(self): + """ + Return the string representation for the token. + + :return: Token identifier. + """ + return self.identifier + + def __repr__(self): + return "'%s'" % self.identifier + + def __ne__(self, o): + """ + The comparison is asymmetric due to optimization. + """ + if o.identifier != self.identifier: + return True + return False + + +class LIndent(Token): + __slots__ = ["length"] + identifier = "indent" + + def __init__(self, length): + self.length = length + + def __str__(self): + return "%s %s" % (self.identifier, self.length) + + def __repr__(self): + return "%s %s" % (self.identifier, self.length) + + +class LEndL(Token): + __slots__ = [] + identifier = "endl" + + +class LEndBlock(LIndent): + __slots__ = [] + pass + + +class LIdentifier(str): + __slots__ = [] + identifier = "Identifier re([A-Za-z0-9][A-Za-z0-9_-]*)" + + def __str__(self): + return super(LIdentifier, self).__str__() + + def __repr__(self): + return "'%s'" % self + + # noinspection PyTypeChecker + def check_char(self, chars): + """ + Check if string contains given chars. + + :type self: object + :param chars: List of chars. + :return: :raise ParserError: + """ + for t in self: + if not (t in chars): + raise ParserError("Wrong char %s in %s" % (t, self)) + return self + + def check_alpha(self): + """ + Check if string contain only chars + """ + if not self.isalpha(): + raise ParserError("Some of chars is not alpha in %s" % self) + return self + + def check_numbers(self): + """ + Check if string contain only chars + """ + if not self.isdigit(): + raise ParserError("Some of chars is not digit in %s" % self) + return self + + def check_char_alpha(self, chars): + """ + Check if string contain only chars + """ + for t in self: + if not (t in chars or t.isalpha()): + raise ParserError("Char %s is not alpha or one of special" + "chars [%s] in %s" % (t, chars, self)) + return self + + def check_char_alpha_num(self, chars): + """ + Check if string contain only chars + """ + for t in self: + if not (t in chars or t.isalnum()): + raise ParserError("Char %s is not alphanum or one of special" + "chars [%s] in %s" % (t, chars, self)) + return self + + def check_char_numeric(self, chars): + """ + Check if string contain only chars + """ + for t in self: + if not (t in chars or t.isdigit()): + raise ParserError("Char %s is not digit or one of special" + "chars [%s] in %s" % (t, chars, self)) + return self + + +class LWhite(LIdentifier): + __slots__ = [] + identifier = "WhiteSpace re(\\s)" + + +class LString(LIdentifier): + __slots__ = [] + identifier = "String re(.+)" + + +class LColon(Token): + __slots__ = [] + identifier = ":" + + +class LVariants(Token): + __slots__ = [] + identifier = "variants" + + +class LDot(Token): + __slots__ = [] + identifier = "." + + +class LVariant(Token): + __slots__ = [] + identifier = "-" + + +class LDefault(Token): + __slots__ = [] + identifier = "@" + + +class LOnly(Token): + __slots__ = [] + identifier = "only" + + +class LNo(Token): + __slots__ = [] + identifier = "no" + + +class LCond(Token): + __slots__ = [] + identifier = "" + + +class LNotCond(Token): + __slots__ = [] + identifier = "!" + + +class LOr(Token): + __slots__ = [] + identifier = "," + + +class LAnd(Token): + __slots__ = [] + identifier = ".." + + +class LCoc(Token): + __slots__ = [] + identifier = "." + + +class LComa(Token): + __slots__ = [] + identifier = "," + + +class LLBracket(Token): + __slots__ = [] + identifier = "[" + + +class LRBracket(Token): + __slots__ = [] + identifier = "]" + + +class LLRBracket(Token): + __slots__ = [] + identifier = "(" + + +class LRRBracket(Token): + __slots__ = [] + identifier = ")" + + +class LRegExpStart(Token): + __slots__ = [] + identifier = "${" + + +class LRegExpStop(Token): + __slots__ = [] + identifier = "}" + + +class LInclude(Token): + __slots__ = [] + identifier = "include" + + +class LOperators(Token): + __slots__ = ["name", "value"] + identifier = "" + function = None + + # noinspection PyAttributeOutsideInit + def set_operands(self, name, value): + # pylint: disable=W0201 + self.name = str(name) + # pylint: disable=W0201 + self.value = str(value) + return self + + +class LSet(LOperators): + __slots__ = [] + identifier = "=" + + def apply_to_dict(self, d): + """ + :param d: Dictionary for apply value + """ + if self.name not in _reserved_keys: + d[self.name] = _substitution(self.value, d) + + +class LAppend(LOperators): + __slots__ = [] + identifier = "+=" + + def apply_to_dict(self, d): + if self.name not in _reserved_keys: + d[self.name] = d.get(self.name, "") + _substitution(self.value, d) + + +class LPrepend(LOperators): + __slots__ = [] + identifier = "<=" + + def apply_to_dict(self, d): + if self.name not in _reserved_keys: + d[self.name] = _substitution(self.value, d) + d.get(self.name, "") + + +class LRegExpSet(LOperators): + __slots__ = [] + identifier = "?=" + + def apply_to_dict(self, d): + exp = re.compile("%s$" % self.name) + value = _substitution(self.value, d) + for key in d: + if key not in _reserved_keys and exp.match(key): + d[key] = value + + +class LRegExpAppend(LOperators): + __slots__ = [] + identifier = "?+=" + + def apply_to_dict(self, d): + exp = re.compile("%s$" % self.name) + value = _substitution(self.value, d) + for key in d: + if key not in _reserved_keys and exp.match(key): + d[key] += value + + +class LRegExpPrepend(LOperators): + __slots__ = [] + identifier = "?<=" + + def apply_to_dict(self, d): + exp = re.compile("%s$" % self.name) + value = _substitution(self.value, d) + for key in d: + if key not in _reserved_keys and exp.match(key): + d[key] = value + d[key] + + +class LDel(LOperators): + __slots__ = [] + identifier = "del" + + def apply_to_dict(self, d): + exp = re.compile("%s$" % self.name) + keys_to_del = collections.deque() + for key in d: + if key not in _reserved_keys and exp.match(key): + keys_to_del.append(key) + for key in keys_to_del: + del d[key] + + +class LApplyPreDict(LOperators): + __slots__ = [] + identifier = "apply_pre_dict" + + # noinspection PyAttributeOutsideInit + def set_operands(self, name, value): + # pylint: disable=W0201 + self.name = name + # pylint: disable=W0201 + self.value = value + return self + + def apply_to_dict(self, d): + d.update(self.value) + + def __str__(self): + return "Apply_pre_dict: %s" % self.value + + def __repr__(self): + return "Apply_pre_dict: %s" % self.value + + +class LUpdateFileMap(LOperators): + __slots__ = ["shortname", "dest"] + identifier = "update_file_map" + + # noinspection PyAttributeOutsideInit + def set_operands(self, filename, name, dest="_name_map_file"): + # pylint: disable=W0201 + self.name = name + # pylint: disable=W0201 + if filename == "": + self.shortname = filename + else: + self.shortname = os.path.basename(filename) + + self.dest = dest + return self + + def apply_to_dict(self, d): + dest = self.dest + if dest not in d: + d[dest] = {} + + if self.shortname in d[dest]: + old_name = d[dest][self.shortname] + d[dest][self.shortname] = "%s.%s" % (self.name, old_name) + else: + d[dest][self.shortname] = self.name + + +spec_iden = "_-" +spec_oper = "+ m.end()): + chars = "" + yield LIdentifier(line[:m.start()].rstrip()) + yield tokens_oper[m.group()[:-1]]() + yield LString(line[m.end():].lstrip()) + else: + li = enum(line[pos:], pos) + for pos, char in li: + if char.isalnum() or char in spec_iden: # alphanum+_- + chars += char + elif char in spec_oper: # <+?= + if chars: + yield LIdentifier(chars) + oper = "" + chars = "" + oper += char + else: + if chars: + yield LIdentifier(chars) + chars = "" + if char.isspace(): # Whitespace + # noinspection PyAssignmentToLoopOrWithParameter + for pos, char in li: + if not char.isspace(): + if not self.ignore_white: + yield LWhite() + break + if char.isalnum() or char in spec_iden: + chars += char + elif char == "=": + if oper in tokens_oper: + yield tokens_oper[oper]() + else: + raise LexerError("Unexpected character %s on" + " pos %s" % (char, pos), + self.line, self.filename, + self.linenum) + oper = "" + elif char in tokens_map: + token = tokens_map[char]() + elif char == "\"": + chars = "" + pos, char = li.next() + while char != "\"": + chars += char + pos, char = li.next() + yield LString(chars) + elif char == "#": + break + elif char in spec_oper: + oper += char + else: + raise LexerError("Unexpected character %s on" + " pos %s. Special chars are allowed" + " only in variable assignation" + " statement" % (char, pos), line, + self.filename, self.linenum) + if token is not None: + yield token + token = None + if self.rest_as_string: + self.rest_as_string = False + yield LString(line[pos + 1:].lstrip()) + break + if chars: + yield LIdentifier(chars) + yield LEndL() + + def get_lexer(self): + cr = self.reader + while True: + (self.line, indent, + self.linenum) = cr.get_next_line(self.prev_indent) + + if not self.line: + yield LEndBlock(indent) + continue + + yield LIndent(indent) + for token in self.match(self.line, 0): + yield token + + def get_until_gen(self, end_tokens=None): + if end_tokens is None: + end_tokens = [LEndL] + token = self.generator.next() + while type(token) not in end_tokens: + yield token + token = self.generator.next() + yield token + + def get_until(self, end_tokens=None): + if end_tokens is None: + end_tokens = [LEndL] + return [x for x in self.get_until_gen(end_tokens)] + + def flush_until(self, end_tokens=None): + if end_tokens is None: + end_tokens = [LEndL] + for _ in self.get_until_gen(end_tokens): + pass + + def get_until_check(self, ltype, end_tokens=None): + """ + Read tokens from iterator until get end_tokens or type of token not + match ltype + + :param ltype: List of allowed tokens + :param end_tokens: List of tokens for end reading + :return: List of read tokens. + """ + if end_tokens is None: + end_tokens = [LEndL] + tokens = [] + ltype = ltype + end_tokens + for token in self.get_until_gen(end_tokens): + if type(token) in ltype: + tokens.append(token) + else: + raise ParserError("Expected %s got %s" % (ltype, type(token)), + self.line, self.filename, self.linenum) + return tokens + + def get_until_no_white(self, end_tokens=None): + """ + Read tokens from iterator until get one of end_tokens and strip LWhite + + :param end_tokens: List of tokens for end reading + :return: List of read tokens. + """ + if end_tokens is None: + end_tokens = [LEndL] + return [x for x in self.get_until_gen(end_tokens) if type(x) != LWhite] + + def rest_line_gen(self): + token = self.generator.next() + while type(token) != LEndL: + yield token + token = self.generator.next() + + def rest_line(self): + return [x for x in self.rest_line_gen()] + + def rest_line_no_white(self): + return [x for x in self.rest_line_gen() if type(x) != LWhite] + + def rest_line_as_lstring(self): + self.rest_as_string = True + lstr = self.generator.next() + self.generator.next() + return lstr + + def get_next_check(self, ltype): + token = self.generator.next() + if type(token) in ltype: + return type(token), token + else: + raise ParserError("Expected %s got ['%s']=[%s]" % + ([x.identifier for x in ltype], + token.identifier, token), + self.line, self.filename, self.linenum) + + def get_next_check_nw(self, ltype): + token = self.generator.next() + while type(token) == LWhite: + token = self.generator.next() + if type(token) in ltype: + return type(token), token + else: + raise ParserError("Expected %s got ['%s']" % + ([x.identifier for x in ltype], + token.identifier), + self.line, self.filename, self.linenum) + + def check_token(self, token, ltype): + if type(token) in ltype: + return type(token), token + else: + raise ParserError("Expected %s got ['%s']" % + ([x.identifier for x in ltype], + token.identifier), + self.line, self.filename, self.linenum) + + +def next_nw(gener): + token = gener.next() + while type(token) == LWhite: + token = gener.next() + return token + + +def cmd_tokens(tokens1, tokens2): + for x, y in zip(tokens1, tokens2): + if x != y: + return False + else: + return True + + +def apply_predict(lexer, node, pre_dict): + predict = LApplyPreDict().set_operands(None, pre_dict) + node.content += [(lexer.filename, lexer.linenum, predict)] + return {} + + +def parse_filter(lexer, tokens): + """ + :return: Parsed filter + """ + or_filters = [] + tokens = iter(tokens + [LEndL()]) + typet, token = lexer.check_token(tokens.next(), [LIdentifier, LLRBracket, + LEndL, LWhite]) + and_filter = [] + con_filter = [] + dots = 1 + while typet not in [LEndL]: + if typet in [LIdentifier, LLRBracket]: # join identifier + if typet == LLRBracket: # (xxx=ttt) + _, indent = lexer.check_token(next_nw(tokens), + [LIdentifier]) # (iden + typet, _ = lexer.check_token(next_nw(tokens), + [LSet, LRRBracket]) # = + if typet == LRRBracket: # (xxx) + token = Label(str(indent)) + elif typet == LSet: # (xxx = yyyy) + _, value = lexer.check_token(next_nw(tokens), + [LIdentifier, LString]) + lexer.check_token(next_nw(tokens), [LRRBracket]) + token = Label(str(indent), str(value)) + else: + token = Label(token) + if dots == 1: + con_filter.append(token) + elif dots == 2: + and_filter.append(con_filter) + con_filter = [token] + elif dots == 0 or dots > 2: + raise ParserError("Syntax Error expected \".\" between" + " Identifier.", lexer.line, lexer.filename, + lexer.linenum) + + dots = 0 + elif typet == LDot: # xxx.xxxx or xxx..xxxx + dots += 1 + elif typet in [LComa, LWhite]: + if dots > 0: + raise ParserError("Syntax Error expected identifier between" + " \".\" and \",\".", lexer.line, + lexer.filename, lexer.linenum) + if and_filter: + if con_filter: + and_filter.append(con_filter) + con_filter = [] + or_filters.append(and_filter) + and_filter = [] + elif con_filter: + or_filters.append([con_filter]) + con_filter = [] + elif typet == LIdentifier: + or_filters.append([[Label(token)]]) + else: + raise ParserError("Syntax Error expected \",\" between" + " Identifier.", lexer.line, lexer.filename, + lexer.linenum) + dots = 1 + token = tokens.next() + while type(token) == LWhite: + token = tokens.next() + typet, token = lexer.check_token(token, [LIdentifier, + LComa, LDot, + LLRBracket, LEndL]) + continue + typet, token = lexer.check_token(tokens.next(), [LIdentifier, LComa, + LDot, LLRBracket, + LEndL, LWhite]) + if and_filter: + if con_filter: + and_filter.append(con_filter) + con_filter = [] + or_filters.append(and_filter) + if con_filter: + or_filters.append([con_filter]) + return or_filters + + +class Parser(object): + # pylint: disable=W0102 + + # noinspection PyDefaultArgument + + def __init__(self, filename=None, defaults=False, expand_defaults=[], + debug=False): + self.node = Node() + self.debug = debug + self.defaults = defaults + self.expand_defaults = [LIdentifier(x) for x in expand_defaults] + + self.filename = filename + if self.filename: + self.parse_file(self.filename) + + self.only_filters = [] + self.no_filters = [] + self.assignments = [] + + def _debug(self, s, *args): + if self.debug: + logging.debug(s, *args) + + @staticmethod + def _warn(s, *args): + logging.warn(s, *args) + + def parse_file(self, filename): + """ + Parse a file. + + :param filename: Path of the configuration file. + """ + self.node.filename = filename + self.node = self._parse(Lexer(FileReader(filename)), self.node) + self.filename = filename + + def parse_string(self, s): + """ + Parse a string. + + :param s: String to parse. + """ + self.node.filename = StrReader("").filename + self.node = self._parse(Lexer(StrReader(s)), self.node) + + def only_filter(self, variant): + """ + Apply a only filter programatically and keep track of it. + + Equivalent to parse a "only variant" line. + + :param variant: String with the variant name. + """ + string = "only %s" % variant + self.only_filters.append(string) + self.parse_string(string) + + def no_filter(self, variant): + """ + Apply a only filter programatically and keep track of it. + + Equivalent to parse a "no variant" line. + + :param variant: String with the variant name. + """ + string = "no %s" % variant + self.only_filters.append(string) + self.parse_string(string) + + def assign(self, key, value): + """ + Apply a only filter programatically and keep track of it. + + Equivalent to parse a "key = value" line. + + :param key: String with the variant name. + :param value: Filter value. + """ + string = "%s = %s" % (key, value) + self.assignments.append(string) + self.parse_string(string) + + def _parse(self, lexer, node=None, prev_indent=-1): + if not node: + node = self.node + block_allowed = [LVariants, LIdentifier, LOnly, + LNo, LInclude, LDel, LNotCond] + + variants_allowed = [LVariant] + + identifier_allowed = [LSet, LAppend, LPrepend, + LRegExpSet, LRegExpAppend, + LRegExpPrepend, LColon, + LEndL] + + variants_allowed_in = [LLBracket, LColon, LIdentifier, LEndL] + indent_allowed = [LIndent, LEndBlock] + + allowed = block_allowed + var_indent = 0 + var_name = "" + # meta contains variants meta-data + meta = {} + # pre_dict contains block of operation without collision with + # others block or operation. Increase speed almost twice. + pre_dict = {} + lexer.set_fast() + try: + while True: + lexer.set_prev_indent(prev_indent) + typet, token = lexer.get_next_check(indent_allowed) + if typet == LEndBlock: + if pre_dict: + # flush pre_dict to node content. + apply_predict(lexer, node, pre_dict) + return node + + indent = token.length + typet, token = lexer.get_next_check(allowed) + + if typet == LIdentifier: + # Parse: + # identifier ..... + identifier = lexer.get_until_no_white(identifier_allowed) + if isinstance(identifier[-1], LOperators): # operand = <= + # Parse: + # identifier = xxx + # identifier <= xxx + # identifier ?= xxx + # etc.. + op = identifier[-1] + if len(identifier) == 1: + identifier = token + else: + identifier = [token] + identifier[:-1] + identifier = "".join([str(x) for x in identifier]) + _, value = lexer.get_next_check([LString]) + if value and (value[0] == value[-1] == '"' or + value[0] == value[-1] == "'"): + value = value[1:-1] + + op.set_operands(identifier, value) + d_nin_val = "$" not in value + if type(op) == LSet and d_nin_val: # Optimization + op.apply_to_dict(pre_dict) + else: + if pre_dict: + # flush pre_dict to node content. + # If block already contain xxx = yyyy + # then operation xxx +=, <=, .... are safe. + if op.name in pre_dict and d_nin_val: + op.apply_to_dict(pre_dict) + lexer.get_next_check([LEndL]) + continue + else: + pre_dict = apply_predict(lexer, node, + pre_dict) + + node.content += [(lexer.filename, + lexer.linenum, + op)] + lexer.get_next_check([LEndL]) + + elif type(identifier[-1]) == LColon: # condition: + # Parse: + # xxx.yyy.(aaa=bbb): + identifier = [token] + identifier[:-1] + cfilter = parse_filter(lexer, identifier + [LEndL()]) + next_line = lexer.rest_line_as_lstring() + if next_line != "": + lexer.reader.set_next_line(next_line, indent + 1, + lexer.linenum) + cond = Condition(cfilter, lexer.line) + self._parse(lexer, cond, prev_indent=indent) + + pre_dict = apply_predict(lexer, node, pre_dict) + node.content += [(lexer.filename, lexer.linenum, cond)] + else: + raise ParserError("Syntax ERROR expected \":\" or" + " operand", lexer.line, + lexer.filename, lexer.linenum) + + elif typet == LVariant: + # Parse + # - var1: depend1, depend2 + # block1 + # - var2: + # block2 + if pre_dict: + pre_dict = apply_predict(lexer, node, pre_dict) + already_default = False + meta_with_default = False + if "default" in meta: + meta_with_default = True + meta_in_expand_defaults = False + if var_name not in self.expand_defaults: + meta_in_expand_defaults = True + node4 = Node() + while True: + lexer.set_prev_indent(var_indent) + # Get token from lexer and check syntax. + typet, token = lexer.get_next_check_nw([LIdentifier, + LDefault, + LIndent, + LEndBlock]) + if typet == LEndBlock: + break + + if typet == LIndent: + lexer.get_next_check_nw([LVariant]) + typet, token = lexer.get_next_check_nw( + [LIdentifier, + LDefault]) + + if typet == LDefault: # @ + is_default = True + name = lexer.get_until_check([LIdentifier, LDot], + [LColon]) + else: # identifier + is_default = False + name = [token] + lexer.get_until_check( + [LIdentifier, LDot], + [LColon]) + + if len(name) == 2: + name = [name[0]] + raw_name = name + else: + raw_name = [x for x in name[:-1]] + name = [x for x in name[:-1] + if type(x) == LIdentifier] + + token = lexer.generator.next() + while type(token) == LWhite: + token = lexer.generator.next() + if type(token) != LEndL: + tokens = [token] + lexer.get_until([LEndL]) + deps = parse_filter(lexer, tokens) + else: + deps = [] + + # Prepare data for dict generator. + node2 = Node() + node2.children = [node] + node2.labels = node.labels + + if var_name: + op = LSet().set_operands(var_name, + ".".join([str(n) for n in name])) + node2.content += [(lexer.filename, + lexer.linenum, + op)] + + node3 = self._parse(lexer, node2, prev_indent=indent) + + if var_name: + node3.var_name = var_name + node3.name = [Label(var_name, str(n)) + for n in name] + else: + node3.name = [Label(str(n)) for n in name] + + # Update mapping name to file + + node3.dep = deps + + if meta_with_default: + for wd in meta["default"]: + if cmd_tokens(wd, raw_name): + is_default = True + meta["default"].remove(wd) + + if (is_default and not already_default and + meta_in_expand_defaults): + node3.default = True + already_default = True + + node3.append_to_shortname = not is_default + + op = LUpdateFileMap() + op.set_operands(lexer.filename, + ".".join(str(x) + for x in node3.name)) + node3.content += [(lexer.filename, + lexer.linenum, + op)] + + op = LUpdateFileMap() + op.set_operands(lexer.filename, + ".".join(str(x.name) + for x in node3.name), + "_short_name_map_file") + node3.content += [(lexer.filename, + lexer.linenum, + op)] + + if node3.default and self.defaults: + # Move default variant in front of rest + # of all variants. + # Speed optimization. + node4.children.insert(0, node3) + else: + node4.children += [node3] + node4.labels.update(node3.labels) + node4.labels.update(node3.name) + + if "default" in meta and meta["default"]: + raise ParserError("Missing default variant %s" % + (meta["default"]), lexer.line, + lexer.filename, lexer.linenum) + allowed = block_allowed + node = node4 + + elif typet == LVariants: # _name_ [meta1=xxx] [yyy] [xxx] + # Parse + # variants _name_ [meta1] [meta2]: + if type(node) in [Condition, NegativeCondition]: + raise ParserError("'variants' is not allowed inside a " + "conditional block", lexer.line, + lexer.reader.filename, lexer.linenum) + + lexer.set_strict() + tokens = lexer.get_until_no_white([LLBracket, LColon, + LIdentifier, LEndL]) + vtypet = type(tokens[-1]) + var_name = "" + meta.clear() + # [meta1=xxx] [yyy] [xxx] + while vtypet not in [LColon, LEndL]: + if vtypet == LIdentifier: + if var_name != "": + raise ParserError("Syntax ERROR expected" + " \"[\" or \":\"", + lexer.line, lexer.filename, + lexer.linenum) + var_name = tokens[0] + elif vtypet == LLBracket: # [ + _, ident = lexer.get_next_check_nw([LIdentifier]) + typet, _ = lexer.get_next_check_nw([LSet, + LRBracket]) + if typet == LRBracket: # [xxx] + if ident not in meta: + meta[ident] = [] + meta[ident].append(True) + elif typet == LSet: # [xxx = yyyy] + tokens = lexer.get_until_no_white([LRBracket, + LEndL]) + if type(tokens[-1]) == LRBracket: + if ident not in meta: + meta[ident] = [] + meta[ident].append(tokens[:-1]) + else: + raise ParserError("Syntax ERROR" + " expected \"]\"", + lexer.line, + lexer.filename, + lexer.linenum) + + tokens = lexer.get_next_check_nw(variants_allowed_in) + vtypet = type(tokens[-1]) + + if "default" in meta: + for wd in meta["default"]: + if type(wd) != list: + raise ParserError("Syntax ERROR expected " + "[default=xxx]", + lexer.line, + lexer.filename, + lexer.linenum) + + if vtypet == LEndL: + raise ParserError("Syntax ERROR expected \":\"", + lexer.line, lexer.filename, + lexer.linenum) + lexer.get_next_check_nw([LEndL]) + allowed = variants_allowed + var_indent = indent + + elif typet in [LNo, LOnly]: + # Parse: + # only/no (filter=text)..aaa.bbb, xxxx + lfilter = parse_filter(lexer, lexer.rest_line()) + + pre_dict = apply_predict(lexer, node, pre_dict) + if typet == LOnly: + node.content += [(lexer.filename, lexer.linenum, + OnlyFilter(lfilter, lexer.line))] + else: # LNo + node.content += [(lexer.filename, lexer.linenum, + NoFilter(lfilter, lexer.line))] + + elif typet == LInclude: + # Parse: + # include relative file patch to working directory. + path = lexer.rest_line_as_lstring() + filename = os.path.expanduser(path) + if (isinstance(lexer.reader, FileReader) and + not os.path.isabs(filename)): + filename = os.path.join( + os.path.dirname(lexer.filename), + filename) + if not os.path.isfile(filename): + raise MissingIncludeError(lexer.line, lexer.filename, + lexer.linenum) + pre_dict = apply_predict(lexer, node, pre_dict) + lch = Lexer(FileReader(filename)) + node = self._parse(lch, node, -1) + lexer.set_prev_indent(prev_indent) + + elif typet == LDel: + # Parse: + # del operand + _, to_del = lexer.get_next_check_nw([LIdentifier]) + lexer.get_next_check_nw([LEndL]) + token.set_operands(to_del, None) + + pre_dict = apply_predict(lexer, node, pre_dict) + node.content += [(lexer.filename, lexer.linenum, + token)] + + elif typet == LNotCond: + # Parse: + # !xxx.yyy.(aaa=bbb): vvv + lfilter = parse_filter(lexer, + lexer.get_until_no_white( + [LColon, LEndL])[:-1]) + next_line = lexer.rest_line_as_lstring() + if next_line != "": + lexer.reader.set_next_line(next_line, indent + 1, + lexer.linenum) + cond = NegativeCondition(lfilter, lexer.line) + self._parse(lexer, cond, prev_indent=indent) + lexer.set_prev_indent(prev_indent) + + pre_dict = apply_predict(lexer, node, pre_dict) + node.content += [(lexer.filename, lexer.linenum, cond)] + else: + raise ParserError("Syntax ERROR expected", lexer.line, + lexer.filename, lexer.linenum) + except Exception: + self._debug("%s %s: %s" % (lexer.filename, lexer.linenum, + lexer.line)) + raise + + # noinspection PyDefaultArgument + def get_dicts(self, node=None, ctx=[], content=[], shortname=[], dep=[]): + """ + Generate dictionaries from the code parsed so far. This should + be called after parsing something. + + :return: A dict generator. + """ + # noinspection PyShadowingNames + def process_content(content, failed_filters): + """ + Process dict contents. + + 1. Check that the filters in content are OK with the current + context (ctx). + 2. Move the parts of content that are still relevant into + new_content and unpack conditional blocks if appropriate. + For example, if an 'only' statement fully matches ctx, it + becomes irrelevant and is not appended to new_content. + If a conditional block fully matches, its contents are + unpacked into new_content. + 3. Move failed filters into failed_filters, so that next time we + reach this node or one of its ancestors, we'll check those + filters first. + + :rtype : Boolean + :param content: Content dict + :param failed_filters: Failed filters. + """ + blocked_filters = [] + for t in content: + filename, linenum, obj = t + if isinstance(obj, LOperators): + new_content.append(t) + continue + # obj is an OnlyFilter/NoFilter/Condition/NegativeCondition + if obj.requires_action(ctx, ctx_set, labels): + # This filter requires action now + if type(obj) is OnlyFilter or type(obj) is NoFilter: + if obj not in blocked_filters: + self._debug(" filter did not pass: %r (%s:%s)", + obj.line, filename, linenum) + failed_filters.append(t) + return False + else: + continue + else: + self._debug(" conditional block matches:" + " %r (%s:%s)", obj.line, filename, linenum) + # Check and unpack the content inside this Condition + # object (note: the failed filters should go into + # new_internal_filters because we don't expect them to + # come from outside this node, even if the Condition + # itself was external) + if not process_content(obj.content, + new_internal_filters): + failed_filters.append(t) + return False + continue + elif obj.is_irrelevant(ctx, ctx_set, labels): + # This filter is no longer relevant and can be removed + continue + else: + # Keep the filter and check it again later + new_content.append(t) + return True + + # noinspection PyShadowingNames + def might_pass(failed_ctx, + failed_ctx_set, + failed_external_filters, + failed_internal_filters): + all_content = content + node.content + for t in failed_external_filters + failed_internal_filters: + if t not in all_content: + return True + for t in failed_external_filters: + _, _, external_filter = t + if not external_filter.might_pass(failed_ctx, + failed_ctx_set, + ctx, ctx_set, + labels): + return False + for t in failed_internal_filters: + if t not in node.content: + return True + + for t in failed_internal_filters: + _, _, internal_filter = t + if not internal_filter.might_pass(failed_ctx, + failed_ctx_set, + ctx, ctx_set, + labels): + return False + return True + + def add_failed_case(): + node.failed_cases.appendleft((ctx, ctx_set, + new_external_filters, + new_internal_filters)) + if len(node.failed_cases) > num_failed_cases: + node.failed_cases.pop() + + node = node or self.node + # if self.debug: #Print dict on which is working now. + # node.dump(0) + # Update dep + for d in node.dep: + for dd in d: + dep = dep + [".".join([str(label) for label in ctx + dd])] + # Update ctx + ctx = ctx + node.name + ctx_set = set(ctx) + labels = node.labels + # Get the current name + name = ".".join([str(label) for label in ctx]) + + if node.name: + self._debug("checking out %r", name) + + # Check previously failed filters + for i, failed_case in enumerate(node.failed_cases): + # pylint: disable=W0142 + if not might_pass(*failed_case): + self._debug("\n* this subtree has failed before %s\n" + " content: %s\n" + " failed_case:%s\n", + name, content + node.content, failed_case) + del node.failed_cases[i] + node.failed_cases.appendleft(failed_case) + return + # Check content and unpack it into new_content + new_content = [] + new_external_filters = [] + new_internal_filters = [] + if (not process_content(node.content, new_internal_filters) or + not process_content(content, new_external_filters)): + add_failed_case() + self._debug("Failed_cases %s", node.failed_cases) + return + + # Update shortname + if node.append_to_shortname: + shortname = shortname + node.name + + # Recurse into children + count = 0 + if self.defaults and node.var_name not in self.expand_defaults: + for n in node.children: + for d in self.get_dicts(n, ctx, new_content, shortname, dep): + count += 1 + yield d + if n.default and count: + break + else: + for n in node.children: + for d in self.get_dicts(n, ctx, new_content, shortname, dep): + count += 1 + yield d + # Reached leaf? + if not node.children: + self._debug(" reached leaf, returning it") + d = {"name": name, "dep": dep, + "shortname": ".".join([str(sn.name) for sn in shortname])} + for _, _, op in new_content: + op.apply_to_dict(d) + yield d + # If this node did not produce any dicts, remember the failed filters + # of its descendants + elif not count: + new_external_filters = [] + new_internal_filters = [] + for n in node.children: + (_, _, + failed_external_filters, + failed_internal_filters) = n.failed_cases[0] + for obj in failed_internal_filters: + if obj not in new_internal_filters: + new_internal_filters.append(obj) + for obj in failed_external_filters: + if obj in content: + if obj not in new_external_filters: + new_external_filters.append(obj) + else: + if obj not in new_internal_filters: + new_internal_filters.append(obj) + add_failed_case() + + +def print_dicts_default(options, dicts): + """Print dictionaries in the default mode""" + for i, d in enumerate(dicts): + if options.fullname: + print "dict %4d: %s" % (i + 1, d["name"]) + else: + print "dict %4d: %s" % (i + 1, d["shortname"]) + if options.contents: + keys = d.keys() + keys.sort() + for key in keys: + print " %s = %s" % (key, d[key]) + + +# pylint: disable=W0613 +# noinspection PyUnusedLocal +def print_dicts_repr(options, dicts): + import pprint + print "[" + for d in dicts: + print "%s," % (pprint.pformat(d)) + print "]" + + +def print_dicts(options, dicts): + if options.repr_mode: + print_dicts_repr(options, dicts) + else: + print_dicts_default(options, dicts) diff --git a/avocado/plugins/builtin.py b/avocado/plugins/builtin.py index 718babb51a96abff7a67a1a6b2aa6e1fb1e6bb6c..5f097205265efb26860c89b471f1f7480f8d8e6b 100644 --- a/avocado/plugins/builtin.py +++ b/avocado/plugins/builtin.py @@ -27,7 +27,8 @@ Builtins = [('avocado.plugins.runner', 'TestLister'), ('avocado.plugins.xunit', 'XUnit'), ('avocado.plugins.lister', 'PluginsList'), ('avocado.plugins.journal', 'Journal'), - ('avocado.plugins.datadir', 'DataDirList')] + ('avocado.plugins.datadir', 'DataDirList'), + ('avocado.plugins.multiplexer', 'Multiplexer')] def load_builtins(set_globals=True): diff --git a/avocado/plugins/multiplexer.py b/avocado/plugins/multiplexer.py new file mode 100644 index 0000000000000000000000000000000000000000..758cd6e813852e5ef0a9cbd01acc0a47deb2e84a --- /dev/null +++ b/avocado/plugins/multiplexer.py @@ -0,0 +1,73 @@ +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 2 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. +# +# See LICENSE for more details. +# +# Copyright: Red Hat Inc. 2013-2014 +# Author: Lucas Meneghel Rodrigues + +import os +import sys + +from avocado.plugins import plugin +from avocado.core import output +from avocado.core import error_codes +from avocado import multiplex_config + + +class Multiplexer(plugin.Plugin): + + """ + Implements the avocado 'multiplex' functionality. + """ + + name = 'plugins_list' + enabled = True + + def configure(self, app_parser, cmd_parser): + myparser = cmd_parser.add_parser('multiplex', + help='Generate a list of dictionaries with params from a multiplex file') + + myparser.add_argument('multiplex_file', type=str, + help='Path to a multiplex file ', + nargs='?', default=None) + + myparser.add_argument('-c', '--contents', action='store_true', + help=('Keep temporary files generated by tests. ' + 'Default: %(defaults)'), default=False) + + myparser.set_defaults(func=self.multiplex) + self.configured = True + + def multiplex(self, args): + bcolors = output.colors + pipe = output.get_paginator() + multiplex_file = os.path.abspath(args.multiplex_file) + + if not os.path.isfile(multiplex_file): + pipe.write(bcolors.fail_header_str('Invalid multiplex file %s' % multiplex_file)) + sys.exit(error_codes.numeric_status['AVOCADO_JOB_FAIL']) + + try: + parser = multiplex_config.Parser(filename=multiplex_file) + except multiplex_config.ParserError, details: + fail_class = details.__class__.__name__ + pipe.write(bcolors.fail_header_str("Multiplex file '%s' has a syntax error\n" % multiplex_file)) + pipe.write('%s: %s\n' % (fail_class, details)) + pipe.write(bcolors.fail_header_str('Aborting...')) + sys.exit(error_codes.numeric_status['AVOCADO_JOB_FAIL']) + + pipe.write(bcolors.header_str('Dictionaries generated:')) + pipe.write('\n') + for (index, dct) in enumerate(parser.get_dicts()): + pipe.write(' dict %s: %s\n' % (index+1, dct.get('shortname'))) + if args.contents: + for key in sorted(dct.keys()): + pipe.write(' %s = %s\n' % (key, dct.get(key))) + sys.exit(error_codes.numeric_status['AVOCADO_ALL_OK']) diff --git a/avocado/plugins/runner.py b/avocado/plugins/runner.py index 0de664b44444ab56dae6c17453746907e4269b72..b957a7a7e6ea35c68ff2665298e4a21b4c438c8c 100644 --- a/avocado/plugins/runner.py +++ b/avocado/plugins/runner.py @@ -82,7 +82,12 @@ class TestRunner(plugin.Plugin): myparser.add_argument('url', type=str, help=('Test module names or paths to dropin tests ' '(space separated)'), - nargs='?', default='') + nargs='?', default=None) + + myparser.add_argument('-m', '--multiplex-file', type=str, + help=('Path to an avocado multiplex ' + '(.mplex) file '), + nargs='?', default=None) myparser.add_argument('--keep-tmp-files', action='store_true', help=('Keep temporary files generated by tests. ' diff --git a/avocado/settings.py b/avocado/settings.py index 9052cdf0e053d85208e3f6a484d83b8294365f52..0d3b620e9141e18fd53539cb65703ddc91a69451 100644 --- a/avocado/settings.py +++ b/avocado/settings.py @@ -62,12 +62,40 @@ class ConfigFileNotFound(SettingsError): self.path_list) -def convert_value_type(key, section, value, value_type): +def convert_value_type(value, value_type): """ - Convert a string to another data type. + Convert a string value to a given value type. + + :param value: Value we want to convert. + :type value: str. + :param value_type: Type of the value we want to convert. + :type value_type: str or type. + + :return: Converted value type. + :rtype: Dependent on value_type. + + :raise: TypeError, in case it was not possible to convert values. """ # strip off leading and trailing white space - sval = value.strip() + try: + sval = value.strip() + except: + sval = value + + if isinstance(value_type, str): + if value_type == 'str': + value_type = str + elif value_type == 'bool': + value_type = bool + elif value_type == 'int': + value_type = int + elif value_type == 'float': + value_type = float + elif value_type == 'list': + value_type = list + + if value_type is None: + value_type = str # if length of string is zero then return None if len(sval) == 0: @@ -94,13 +122,8 @@ def convert_value_type(key, section, value, value_type): # Split the string using ',' and return a list return [val.strip() for val in sval.split(',')] - try: - conv_val = value_type(sval) - return conv_val - except Exception: - msg = ("Could not convert %s value %r in section %s to type %s" % - (key, sval, section, value_type)) - raise SettingsValueError(msg) + conv_val = value_type(sval) + return conv_val class Settings(object): @@ -183,7 +206,12 @@ class Settings(object): if not val.strip() and not allow_blank: return self._handle_no_value(section, key, default) - return convert_value_type(key, section, val, key_type) + try: + return convert_value_type(val, key_type) + except Exception, details: + raise SettingsValueError("Could not convert value %r to type %s " + "(settings key %s, section %s): %s" % + (val, key_type, key, section, details)) settings = Settings() diff --git a/avocado/test.py b/avocado/test.py index a223689327d5d9b4df92db2b21e2823906aa819d..91de0eed05bd312c86458c4c9bf93ad9c14bdc8f 100644 --- a/avocado/test.py +++ b/avocado/test.py @@ -27,6 +27,7 @@ import unittest from avocado.core import data_dir from avocado.core import exceptions from avocado.utils import process +from avocado.utils.params import Params from avocado import sysinfo @@ -38,9 +39,10 @@ class Test(unittest.TestCase): You'll inherit from this to write your own tests. Tipically you'll want to implement setup(), action() and cleanup() methods on your own tests. """ + default_params = {} - def __init__(self, methodName='runTest', name=None, base_logdir=None, - tag=None, job=None): + def __init__(self, methodName='runTest', name=None, params=None, + base_logdir=None, tag=None, job=None): """ Initializes the test. @@ -63,7 +65,17 @@ class Test(unittest.TestCase): else: self.name = self.__class__.__name__ - self.tag = tag + if params is None: + params = {} + self.params = Params(params) + + shortname = self.params.get('shortname') + s_tag = None + if shortname: + split_shortname = shortname.split('.') + if len(split_shortname) > 1: + s_tag = ".".join(split_shortname[1:]) + self.tag = tag or s_tag self.job = job self.basedir = os.path.join(data_dir.get_test_dir(), self.name) self.depsdir = os.path.join(self.basedir, 'deps') @@ -84,6 +96,28 @@ class Test(unittest.TestCase): self.log = logging.getLogger("avocado.test") + self.log.info('START %s', self.tagged_name) + self.log.debug('') + self.log.debug('Test instance parameters:') + + # Set the helper set_default to the params object + setattr(self.params, 'set_default', self._set_default) + + # Apply what comes from the params dict + for key in sorted(self.params.keys()): + self.log.debug(' %s = %s', key, self.params.get(key)) + setattr(self.params, key, self.params.get(key)) + self.log.debug('') + + # Apply what comes from the default_params dict + self.log.debug('Default parameters:') + for key in sorted(self.default_params.keys()): + self.log.debug(' %s = %s', key, self.default_params.get(key)) + self.params.set_default(key, self.default_params[key]) + self.log.debug('') + self.log.debug('Test instance params override defaults whenever available') + self.log.debug('') + self.debugdir = None self.resultsdir = None self.status = None @@ -101,6 +135,13 @@ class Test(unittest.TestCase): def __repr__(self): return "Test(%r)" % self.tagged_name + def _set_default(self, key, default): + try: + self.params[key] + except Exception: + self.params[key] = default + setattr(self.params, key, default) + def get_deps_path(self, basename): """ Find a test dependency path inside the test depsdir. @@ -245,6 +286,7 @@ class Test(unittest.TestCase): end_time = time.time() self.time_elapsed = end_time - start_time self.report() + self.log.info("") with open(self.logfile, 'r') as log_file_obj: self.text_output = log_file_obj.read() self.stop_logging() @@ -270,12 +312,12 @@ class DropinTest(Test): Run an arbitrary command that returns either 0 (PASS) or !=0 (FAIL). """ - def __init__(self, path, base_logdir, tag=None, job=None): + def __init__(self, path, params=None, base_logdir=None, tag=None, job=None): basename = os.path.basename(path) name = basename.split(".")[0] self.path = os.path.abspath(path) super(DropinTest, self).__init__(name=name, base_logdir=base_logdir, - tag=tag, job=job) + params=params, tag=tag, job=job) def _log_detailed_cmd_info(self, result): """ @@ -305,7 +347,8 @@ class MissingTest(Test): Handle when there is no such test module in the test directory. """ - def __init__(self, name=None, base_logdir=None, tag=None, job=None): + def __init__(self, name=None, params=None, base_logdir=None, tag=None, + job=None): super(MissingTest, self).__init__(name=name, base_logdir=base_logdir, tag=tag, job=job) diff --git a/avocado/utils/params.py b/avocado/utils/params.py new file mode 100644 index 0000000000000000000000000000000000000000..52cb6b5066a142dabc5f55e97e7d28d1ca92454b --- /dev/null +++ b/avocado/utils/params.py @@ -0,0 +1,80 @@ +import UserDict +from threading import Lock + +from avocado.core import exceptions +from avocado import settings + + +class ParamNotFound(exceptions.TestError): + pass + + +class ParamInvalidType(exceptions.TestError): + pass + + +class Params(UserDict.IterableUserDict): + + """ + A dict-like object passed to every test. + """ + lock = Lock() + + def __getitem__(self, key): + """ overrides the error messages of missing params[$key] """ + try: + value = UserDict.IterableUserDict.__getitem__(self, key) + vtype = UserDict.IterableUserDict.get(self, "%s_type" % key) + return settings.convert_value_type(value, vtype) + except KeyError: + raise ParamNotFound("Mandatory parameter '%s' is missing. " + "Check your cfg files for typos/mistakes" % + key) + except Exception, details: + raise ParamInvalidType("Parameter '%s' value '%r' failed to " + "convert to %s: %s" % + (key, value, vtype, details)) + + def objects(self, key): + """ + Return the names of objects defined using a given key. + + :param key: The name of the key whose value lists the objects + (e.g. 'nics'). + """ + return self.get(key, "").split() + + def object_params(self, obj_name): + """ + Return a dict-like object containing the parameters of an individual + object. + + This method behaves as follows: the suffix '_' + obj_name is removed + from all key names that have it. Other key names are left unchanged. + The values of keys with the suffix overwrite the values of their + suffixless versions. + + :param obj_name: The name of the object (objects are listed by the + objects() method). + """ + suffix = "_" + obj_name + self.lock.acquire() + new_dict = self.copy() + self.lock.release() + for key in new_dict.keys(): + if key.endswith(suffix): + new_key = key.split(suffix)[0] + new_dict[new_key] = new_dict[key] + return new_dict + + def object_counts(self, count_key, base_name): + """ + This is a generator method: to give it the name of a count key and a + base_name, and it returns an iterator over all the values from params + """ + count = self.get(count_key, 1) + # Protect in case original is modified for some reason + cpy = self.copy() + for number in xrange(1, int(count) + 1): + key = "%s%s" % (base_name, number) + yield (key, cpy.get(key)) diff --git a/docs/source/GetStartedGuide.rst b/docs/source/GetStartedGuide.rst index db8220fb376991fee29039444bbabd282d29e717..726e9cd8d6e5be414f49280ea10f4abc8ab2692b 100644 --- a/docs/source/GetStartedGuide.rst +++ b/docs/source/GetStartedGuide.rst @@ -92,3 +92,6 @@ native tests and dropin tests:: TOTAL SKIPPED: 0 TOTAL WARNED: 0 ELAPSED TIME: 5.67 s + +Some more involved functionalities for the avocado runner are discussed as appropriate, during +the introduction of important concepts. diff --git a/docs/source/MultiplexConfig.rst b/docs/source/MultiplexConfig.rst new file mode 100644 index 0000000000000000000000000000000000000000..45e5e5d820e008428c3f20ab71d7604d9bdfb04d --- /dev/null +++ b/docs/source/MultiplexConfig.rst @@ -0,0 +1,264 @@ +.. _multiplex_configuration: + +======================= +Multiplex Configuration +======================= + +Multiplex Configuration is a specialized way of providing lists +of key/value pairs within combination's of various categories, +that will be passed to avocado test as parameters in a dictionary +called ``params``. The format simplifies and condenses complex +multidimensional arrays of test parameters into a flat list. The +combinatorial result can be filtered and adjusted prior to testing, +with filters, dependencies, and key/value substitutions. + +The parser relies on indentation, and is very sensitive to misplacement +of tab and space characters. It's highly recommended to edit/view +Multiplex Configuration files in an editor capable of collapsing tab +characters into four space characters. Improper attention to column +spacing can drastically affect output. + +.. _keys_and_values: + +Keys and values +=============== + +Keys and values are the most basic useful facility provided by the +format. A statement in the form `` = `` sets ```` to +````. Values are strings, terminated by a linefeed, with +surrounding quotes completely optional (but honored). A reference of +descriptions for most keys is included in section Configuration Parameter +Reference. + +The key will become part of all lower-level (i.e. further indented) variant +stanzas (see section variants_). However, key precedence is evaluated in +top-down or ``last defined`` order. In other words, the last parsed key has +precedence over earlier definitions. + +.. _variants: + +Variants +======== + +A ``variants`` stanza is opened by a ``variants:`` statement. The contents +of the stanza must be indented further left than the ``variants:`` +statement. Each variant stanza or block defines a single dimension of +the output array. When a Multiplex Configuration file contains +two variants stanzas, the output will be all possible combination's of +both variant contents. Variants may be nested within other variants, +effectively nesting arbitrarily complex arrays within the cells of +outside arrays. For example:: + + variants: + - one: + key1 = Hello + - two: + key2 = World + - three: + variants: + - four: + key3 = foo + - five: + key3 = bar + - six: + key1 = foo + key2 = bar + +While combining, the parser forms names for each outcome based on +prepending each variant onto a list. In other words, the first variant +name parsed will appear as the left most name component. These names can +become quite long, and since they contain keys to distinguishing between +results, a 'short-name' key is also used. + +Avocado comes equipped with a plugin to parse multiplex files. The appropriate +subcommand is:: + + avocado multiplex /path/to/multiplex.mplx [-c] + +Note that there's no need to put extensions to a multiplex file, although +doing so helps with organization. The optional -c param is used to provide +the contents of the dictionaries generated, not only their shortnames. + +``avocado multiplex`` against the content above produces the following +combinations and names:: + + Dictionaries generated: + dict 1: four.one + dict 2: four.two + dict 3: four.three + dict 4: five.one + dict 5: five.two + dict 6: five.three + dict 7: six.one + dict 8: six.two + dict 9: six.three + +Variant shortnames represent the ```` value used when results are +recorded (see section Job Names and Tags). For convenience +variants whose name begins with a ``@`` do not prepend their name to +``shortname``, only 'name'. This allows creating ``shortcuts`` for +specifying multiple sets or changes to key/value pairs without changing +the results directory name. For example, this is often convenient for +providing a collection of related pre-configured tests based on a +combination of others. + +.. _filters: + +Filters +======= + +Filter statements allow modifying the resultant set of keys based on the +name of the variant set (see section variants_). Filters can be used in 3 ways: +Limiting the set to include only combination names matching a pattern. +Limiting the set to exclude all combination names not matching a +pattern. Modifying the set or contents of key/value pairs within a +matching combination name. + +Names are matched by pairing a variant name component with the +character(s) ``,`` meaning ``OR``, ``..`` meaning ``AND``, and ``.`` meaning +``IMMEDIATELY-FOLLOWED-BY``. When used alone, they permit modifying the list +of key/values previously defined. For example: + +:: + + Linux..OpenSuse: + initrd = initrd + +Modifies all variants containing ``Linux`` followed anywhere thereafter +with ``OpenSuse``, such that the ``initrd`` key is created or overwritten +with the value ``initrd``. + +When a filter is preceded by the keyword ``only`` or ``no``, it limits the +selection of variant combination's This is used where a particular set +of one or more variant combination's should be considered selectively or +exclusively. When given an extremely large matrix of variants, the +``only`` keyword is convenient to limit the result set to only those +matching the filter. Whereas the ``no`` keyword could be used to remove +particular conflicting key/value sets under other variant combination +names. For example: + +:: + + only Linux..Fedora..64 + +Would reduce an arbitrarily large matrix to only those variants whose +names contain Linux, Fedora, and 64 in them. + +However, note that any of these filters may be used within named +variants as well. In this application, they are only evaluated when that +variant name is selected for inclusion (implicitly or explicitly) by a +higher-order. For example: + +:: + + variants: + - one: + key1 = Hello + variants: + - two: + key2 = Complicated + - three: one two + key3 = World + variants: + - default: + only three + key2 = + + only default + +Results in the following outcome (using -c): + +:: + + Dictionaries generated: + dict 1: default.three.one + _name_map_file = {'docs.mplx': 'default.three.one'} + _short_name_map_file = {'docs.mplx': 'default.three.one'} + dep = ['default.one', 'default.two'] + key1 = Hello + key2 = + key3 = World + name = default.three.one + shortname = default.three.one + +.. _value_substitutions: + +Value Substitutions +=================== + +Value substitution allows for selectively overriding precedence and +defining part or all of a future key's value. Using a previously defined +key, it's value may be substituted in or as a another key's value. The +syntax is exactly the same as in the bash shell, where as a key's value +is substituted in wherever that key's name appears following a ``$`` +character. When nesting a key within other non-key-name text, the name +should also be surrounded by ``{``, and ``}`` characters. + +Replacement is context-sensitive, thereby if a key is redefined within +the same, or, higher-order block, that value will be used for future +substitutions. If a key is referenced for substitution, but hasn``t yet +been defined, no action is taken. In other words, the $key or ${key} +string will appear literally as or within the value. Nesting of +references is not supported (i.e. key substitutions within other +substitutions. + +For example, if ``one = 1``, ``two = 2``, and ``three = 3``; then, +``order = ${one}${two}${three}`` results in ``order = 123``. This is +particularly handy for rooting an arbitrary complex directory tree +within a predefined top-level directory. + +An example of context-sensitivity, + +:: + + key1 = default value + key2 = default value + + sub = "key1: ${key1}; key2: ${key2};" + + variants: + - one: + key1 = Hello + sub = "key1: ${key1}; key2: ${key2};" + - two: one + key2 = World + sub = "key1: ${key1}; key2: ${key2};" + - three: one two + sub = "key1: ${key1}; key2: ${key2};" + +Results in the following (using -c) + +:: + + Dictionaries generated: + dict 1: one + _name_map_file = {'docs.mplx': 'one'} + _short_name_map_file = {'docs.mplx': 'one'} + dep = [] + key1 = Hello + key2 = default value + name = one + shortname = one + sub = key1: Hello; key2: default value; + dict 2: two + _name_map_file = {'docs.mplx': 'two'} + _short_name_map_file = {'docs.mplx': 'two'} + dep = ['one'] + key1 = default value + key2 = World + name = two + shortname = two + sub = key1: default value; key2: World; + dict 3: three + _name_map_file = {'docs.mplx': 'three'} + _short_name_map_file = {'docs.mplx': 'three'} + dep = ['one', 'two'] + key1 = default value + key2 = default value + name = three + shortname = three + sub = key1: default value; key2: default value; + +With Keys, Values, Variants, Filters and Value Substitutions, we have most of what you +actually need to construct most multiplex files. The format also has some extra features, +that you can find in :doc:`MultiplexConfigAdvanced` should you need them. diff --git a/docs/source/MultiplexConfigAdvanced.rst b/docs/source/MultiplexConfigAdvanced.rst new file mode 100644 index 0000000000000000000000000000000000000000..503e94f343e42cfccf228fcaef3caf1e08c2e564 --- /dev/null +++ b/docs/source/MultiplexConfigAdvanced.rst @@ -0,0 +1,702 @@ +.. _multiplex_configuration_advanced: + +================================ +Multiplex Configuration Advanced +================================ + +The features discussed in the previous session should be enough to get you going +to do a lot of testing work. If you are planning to do more, then you can check +here for some more features of the format. + +Named Variants +============== + +Named variants allow assigning a parseable name to a variant set. This enables +an entire variant set to be used for in filters_. All output combinations will +inherit the named variant key, along with the specific variant name. For example:: + + variants var1_name: + - one: + key1 = Hello + - two: + key2 = World + - three: + variants var2_name: + - one: + key3 = Hello2 + - two: + key4 = World2 + - three: + +Using:: + + only (var2_name=one).(var1_name=two) + +Results in the following outcome when parsed with ``avocado multiplex [file] -c``:: + + Dictionaries generated: + dict 1: one.two + _name_map_file = {'docs.mplx': '(var2_name=one).(var1_name=two)'} + _short_name_map_file = {'docs.mplx': 'one.two'} + dep = [] + key2 = World + key3 = Hello2 + name = (var2_name=one).(var1_name=two) + shortname = one.two + var1_name = two + var2_name = one + +Named variants could also be used as normal variables.:: + + variants guest_os: + - fedora: + - ubuntu: + variants disk_interface: + - virtio: + - hda: + +Which then results in the following:: + + Dictionaries generated: + dict 1: virtio.fedora + _name_map_file = {'docs.mplx': '(disk_interface=virtio).(guest_os=fedora)'} + _short_name_map_file = {'docs.mplx': 'virtio.fedora'} + dep = [] + disk_interface = virtio + guest_os = fedora + name = (disk_interface=virtio).(guest_os=fedora) + shortname = virtio.fedora + dict 2: virtio.ubuntu + _name_map_file = {'docs.mplx': '(disk_interface=virtio).(guest_os=ubuntu)'} + _short_name_map_file = {'docs.mplx': 'virtio.ubuntu'} + dep = [] + disk_interface = virtio + guest_os = ubuntu + name = (disk_interface=virtio).(guest_os=ubuntu) + shortname = virtio.ubuntu + dict 3: hda.fedora + _name_map_file = {'docs.mplx': '(disk_interface=hda).(guest_os=fedora)'} + _short_name_map_file = {'docs.mplx': 'hda.fedora'} + dep = [] + disk_interface = hda + guest_os = fedora + name = (disk_interface=hda).(guest_os=fedora) + shortname = hda.fedora + dict 4: hda.ubuntu + _name_map_file = {'docs.mplx': '(disk_interface=hda).(guest_os=ubuntu)'} + _short_name_map_file = {'docs.mplx': 'hda.ubuntu'} + dep = [] + disk_interface = hda + guest_os = ubuntu + name = (disk_interface=hda).(guest_os=ubuntu) + shortname = hda.ubuntu + + +.. _key_sub_arrays: + +Key Sub Arrays +============== + +Parameters for objects like VM's utilize arrays of keys specific to a +particular object instance. In this way, values specific to an object +instance can be addressed. For example, a parameter ``vms`` lists the VM +objects names to instantiate in in the current frame's test. Values +specific to one of the named instances should be prefixed to the name: + +:: + + vms = vm1 second_vm another_vm + mem = 128 + mem_vm1 = 512 + mem_second_vm = 1024 + +The result would be, three virtual machine objects are create. The third +one ``another_vm`` receives the default ``mem`` value of 128. The first two +receive specialized values based on their name. + +The order in which these statements are written in a configuration file +is not important; statements addressing a single object always override +statements addressing all objects. Note: This is contrary to the way the +Multiplex Configuration file as a whole is parsed (top-down). + +.. _include_statements: + +Include Statements +================== + +The ``include`` statement is utilized within a Multiplex Configuration +file to better organize related content. When parsing, the contents of +any referenced files will be evaluated as soon as the parser encounters +the ``include`` statement. The order in which files are included is +relevant, and will carry through any key/value substitutions +(see section key_sub_arrays_) as if parsing a complete, flat file. + +.. _combinatorial_outcome: + +Combinatorial outcome +===================== + +The output of parsing a multiplex file will be just the names of the +combinatorial result set items (see short-names, section Variants). However, +the ``--contents`` (short ``-c``) parameter may be specified to examine the output in +more depth. Internally, the key/value data is stored/accessed similar to +a python dictionary instance. With the collection of dictionaries all +being part of a python list-like object. Irrespective of the internals, +running this module from the command-line is an excellent tool for both +reviewing and learning about the Multiplex Configuration format. + +In general, each individual combination of the defined variants provides +the parameters for a single test. Testing proceeds in order, through +each result, passing the set of keys and values through to the harness +and test code. When examining Multiplex Configuration files, It's +helpful to consider the earliest key definitions as “defaults”, then +look to the end of the file for other top-level override to those +values. If in doubt of where to define or set a key, placing it at the +top indentation level, at the end of the file, will guarantee it is +used. + +.. _formal_definition: + +Formal Definition +================= + +A list of dictionaries is referred to as a frame. The parser +produces a list of dictionaries (dicts). Each dictionary +contains a set of key-value pairs. + +Each dict contains at least three keys: ``name``, ``shortname`` and ``depend``. +The values of name and ``shortname`` are strings, and the value of depend +is a list of strings. + +The initial frame contains a single dict, whose ``name`` and ``shortname`` +are empty strings, and whose depend is an empty list. + +Parsing dict contents +--------------------- + +The dict parser operates on a frame, referred to as the current frame. + +A statement of the form `` = `` sets the value of ```` to +```` in all dicts of the current frame. If a dict lacks ````, +it will be created. + +A statement of the form `` += `` appends ```` to the +value of ```` in all dicts of the current frame. If a dict lacks +````, it will be created. + +A statement of the form `` <= `` pre-pends ```` to the +value of ```` in all dicts of the current frame. If a dict lacks +````, it will be created. + +A statement of the form `` ?= `` sets the value of ```` +to ````, in all dicts of the current frame, but only if ```` +exists in the dict. The operators ``?+=`` and ``?<=`` are also supported. + +A statement of the form ``no `` removes from the current frame +all dicts whose name field matches ````. + +A statement of the form ``only `` removes from the current +frame all dicts whose name field does not match ````. + +Content exceptions +------------------ + +Single line exceptions have the format ``: `` +where ```` is any of the operators listed above +(e.g. ``=``, ``+=``, ``?<=``). The statement following the +regular expression ```` will apply only to the dicts in +the current frame whose name partially matches ```` (i.e. +contains a substring that matches ````). + +A multi-line exception block is opened by a line of the format +``:``. The text following this line should be indented. The +statements in a multi-line exception block may be assignment +statements (such as `` = ``) or no or only statements. +Nested multi-line exceptions are allowed. + +Parsing Variants +---------------- + +A variants block is opened by a ``variants:`` statement. The indentation +level of the statement places the following set within the outer-most +context-level when nested within other ``variant:`` blocks. The contents +of the ``variants:`` block must be further indented. + +A variant-name may optionally follow the ``variants`` keyword, before +the ``:`` character. That name will be inherited by and decorate all +block content as the key for each variant contained in it's the +block. + +The name of the variants are specified as ``- variant_name:``. +Each name is pre-pended to the name field of each dict of the variant's +frame, along with a separator dot ('.'). + +The contents of each variant may use the format `` ``. +They may also contain further ``variants:`` statements. + +If the name of the variant is not preceeded by a ``@`` (i.e. +``-@:``), it is pre-pended to the ``shortname`` field of +each dict of the variant's frame. In other words, if a variant's +name is preceeded by a ``@``, it is omitted from the shortname field. + +Each variant in a variants block inherits a copy of the frame in +which the ``variants:`` statement appears. The current frame, which +may be modified by the dict parser, becomes this copy. + +The frames of the variants defined in the block are +joined into a single frame. The contents of frame replace the +contents of the outer containing frame (if there is one). + +Filters +------- + +Filters can be used in 3 ways: + +* ``only `` +* ``no `` +* ``:`` + +That last one starts a conditional block, see _filters. + +Here ``..`` means ``AND`` and ``.`` means ``IMMEDIATELY-FOLLOWED-BY``. Example:: + + qcow2..Fedora.14, RHEL.6..raw..boot, smp2..qcow2..migrate..ide + +This means `match all dicts whose names have` +``(qcow2 AND (Fedora IMMEDIATELY-FOLLOWED-BY 14))`` ``OR`` +``((RHEL IMMEDIATELY-FOLLOWED-BY 6) AND raw AND boot)`` ``OR`` +``(smp2 AND qcow2 AND migrate AND ide)``. Note that:: + + qcow2..Fedora.14 + +is equivalent to:: + + Fedora.14..qcow2 + +But:: + + qcow2..Fedora.14 + +is not equivalent to:: + + qcow2..14.Fedora + +``ide, scsi`` is equivalent to ``scsi, ide``. + +.. _examples_multiplex: + +Examples +======== + +A file with no variants, just assignments:: + + key1 = value1 + key2 = value2 + key3 = value3 + +Results in the following:: + + Dictionaries generated: + dict 1: + dep = [] + key1 = value1 + key2 = value2 + key3 = value3 + name = + shortname = + +Adding a variants block:: + + key1 = value1 + key2 = value2 + key3 = value3 + + variants: + - one: + - two: + - three: + +Results in the following:: + + Dictionaries generated: + dict 1: one + _name_map_file = {'docs.mplx': 'one'} + _short_name_map_file = {'docs.mplx': 'one'} + dep = [] + key1 = value1 + key2 = value2 + key3 = value3 + name = one + shortname = one + dict 2: two + _name_map_file = {'docs.mplx': 'two'} + _short_name_map_file = {'docs.mplx': 'two'} + dep = [] + key1 = value1 + key2 = value2 + key3 = value3 + name = two + shortname = two + dict 3: three + _name_map_file = {'docs.mplx': 'three'} + _short_name_map_file = {'docs.mplx': 'three'} + dep = [] + key1 = value1 + key2 = value2 + key3 = value3 + name = three + shortname = three + +Modifying dictionaries inside a variant:: + + key1 = value1 + key2 = value2 + key3 = value3 + + variants: + - one: + key1 = Hello World + key2 <= some_prefix_ + - two: + key2 <= another_prefix_ + - three: + +Results in the following:: + + Dictionaries generated: + dict 1: one + _name_map_file = {'docs.mplx': 'one'} + _short_name_map_file = {'docs.mplx': 'one'} + dep = [] + key1 = Hello World + key2 = some_prefix_value2 + key3 = value3 + name = one + shortname = one + dict 2: two + _name_map_file = {'docs.mplx': 'two'} + _short_name_map_file = {'docs.mplx': 'two'} + dep = [] + key1 = value1 + key2 = another_prefix_value2 + key3 = value3 + name = two + shortname = two + dict 3: three + _name_map_file = {'docs.mplx': 'three'} + _short_name_map_file = {'docs.mplx': 'three'} + dep = [] + key1 = value1 + key2 = value2 + key3 = value3 + name = three + shortname = three + +Adding dependencies:: + + key1 = value1 + key2 = value2 + key3 = value3 + + variants: + - one: + key1 = Hello World + key2 <= some_prefix_ + - two: one + key2 <= another_prefix_ + - three: one two + +Results in the following:: + + Dictionaries generated: + dict 1: one + _name_map_file = {'docs.mplx': 'one'} + _short_name_map_file = {'docs.mplx': 'one'} + dep = [] + key1 = Hello World + key2 = some_prefix_value2 + key3 = value3 + name = one + shortname = one + dict 2: two + _name_map_file = {'docs.mplx': 'two'} + _short_name_map_file = {'docs.mplx': 'two'} + dep = ['one'] + key1 = value1 + key2 = another_prefix_value2 + key3 = value3 + name = two + shortname = two + dict 3: three + _name_map_file = {'docs.mplx': 'three'} + _short_name_map_file = {'docs.mplx': 'three'} + dep = ['one', 'two'] + key1 = value1 + key2 = value2 + key3 = value3 + name = three + shortname = three + +Multiple variant blocks:: + + key1 = value1 + key2 = value2 + key3 = value3 + + variants: + - one: + key1 = Hello World + key2 <= some_prefix_ + - two: one + key2 <= another_prefix_ + - three: one two + + variants: + - A: + - B: + +Results in the following:: + + Dictionaries generated: + dict 1: A.one + _name_map_file = {'docs.mplx': 'A.one'} + _short_name_map_file = {'docs.mplx': 'A.one'} + dep = [] + key1 = Hello World + key2 = some_prefix_value2 + key3 = value3 + name = A.one + shortname = A.one + dict 2: A.two + _name_map_file = {'docs.mplx': 'A.two'} + _short_name_map_file = {'docs.mplx': 'A.two'} + dep = ['A.one'] + key1 = value1 + key2 = another_prefix_value2 + key3 = value3 + name = A.two + shortname = A.two + dict 3: A.three + _name_map_file = {'docs.mplx': 'A.three'} + _short_name_map_file = {'docs.mplx': 'A.three'} + dep = ['A.one', 'A.two'] + key1 = value1 + key2 = value2 + key3 = value3 + name = A.three + shortname = A.three + dict 4: B.one + _name_map_file = {'docs.mplx': 'B.one'} + _short_name_map_file = {'docs.mplx': 'B.one'} + dep = [] + key1 = Hello World + key2 = some_prefix_value2 + key3 = value3 + name = B.one + shortname = B.one + dict 5: B.two + _name_map_file = {'docs.mplx': 'B.two'} + _short_name_map_file = {'docs.mplx': 'B.two'} + dep = ['B.one'] + key1 = value1 + key2 = another_prefix_value2 + key3 = value3 + name = B.two + shortname = B.two + dict 6: B.three + _name_map_file = {'docs.mplx': 'B.three'} + _short_name_map_file = {'docs.mplx': 'B.three'} + dep = ['B.one', 'B.two'] + key1 = value1 + key2 = value2 + key3 = value3 + name = B.three + shortname = B.three + +Filters, ``no`` and ``only``:: + + key1 = value1 + key2 = value2 + key3 = value3 + + variants: + - one: + key1 = Hello World + key2 <= some_prefix_ + - two: one + key2 <= another_prefix_ + - three: one two + + variants: + - A: + no one + - B: + only one,three + +Results in the following:: + + Dictionaries generated: + dict 1: A.two + _name_map_file = {'docs.mplx': 'A.two'} + _short_name_map_file = {'docs.mplx': 'A.two'} + dep = ['A.one'] + key1 = value1 + key2 = another_prefix_value2 + key3 = value3 + name = A.two + shortname = A.two + dict 2: A.three + _name_map_file = {'docs.mplx': 'A.three'} + _short_name_map_file = {'docs.mplx': 'A.three'} + dep = ['A.one', 'A.two'] + key1 = value1 + key2 = value2 + key3 = value3 + name = A.three + shortname = A.three + dict 3: B.one + _name_map_file = {'docs.mplx': 'B.one'} + _short_name_map_file = {'docs.mplx': 'B.one'} + dep = [] + key1 = Hello World + key2 = some_prefix_value2 + key3 = value3 + name = B.one + shortname = B.one + dict 4: B.three + _name_map_file = {'docs.mplx': 'B.three'} + _short_name_map_file = {'docs.mplx': 'B.three'} + dep = ['B.one', 'B.two'] + key1 = value1 + key2 = value2 + key3 = value3 + name = B.three + shortname = B.three + +Some short names:: + + key1 = value1 + key2 = value2 + key3 = value3 + + variants: + - one: + key1 = Hello World + key2 <= some_prefix_ + - two: one + key2 <= another_prefix_ + - three: one two + + variants: + - @A: + no one + - B: + only one,three + +Results in the following:: + + Dictionaries generated: + dict 1: two + _name_map_file = {'docs.mplx': 'A.two'} + _short_name_map_file = {'docs.mplx': 'A.two'} + dep = ['A.one'] + key1 = value1 + key2 = another_prefix_value2 + key3 = value3 + name = A.two + shortname = two + dict 2: three + _name_map_file = {'docs.mplx': 'A.three'} + _short_name_map_file = {'docs.mplx': 'A.three'} + dep = ['A.one', 'A.two'] + key1 = value1 + key2 = value2 + key3 = value3 + name = A.three + shortname = three + dict 3: B.one + _name_map_file = {'docs.mplx': 'B.one'} + _short_name_map_file = {'docs.mplx': 'B.one'} + dep = [] + key1 = Hello World + key2 = some_prefix_value2 + key3 = value3 + name = B.one + shortname = B.one + dict 4: B.three + _name_map_file = {'docs.mplx': 'B.three'} + _short_name_map_file = {'docs.mplx': 'B.three'} + dep = ['B.one', 'B.two'] + key1 = value1 + key2 = value2 + key3 = value3 + name = B.three + shortname = B.three + +Exceptions:: + + key1 = value1 + key2 = value2 + key3 = value3 + + variants: + - one: + key1 = Hello World + key2 <= some_prefix_ + - two: one + key2 <= another_prefix_ + - three: one two + + variants: + - @A: + no one + - B: + only one,three + + three: key4 = some_value + + A: + no two + key5 = yet_another_value + +Results in the following:: + + Dictionaries generated: + dict 1: three + _name_map_file = {'docs.mplx': 'A.three'} + _short_name_map_file = {'docs.mplx': 'A.three'} + dep = ['A.one', 'A.two'] + key1 = value1 + key2 = value2 + key3 = value3 + key4 = some_value + key5 = yet_another_value + name = A.three + shortname = three + dict 2: B.one + _name_map_file = {'docs.mplx': 'B.one'} + _short_name_map_file = {'docs.mplx': 'B.one'} + dep = [] + key1 = Hello World + key2 = some_prefix_value2 + key3 = value3 + name = B.one + shortname = B.one + dict 3: B.three + _name_map_file = {'docs.mplx': 'B.three'} + _short_name_map_file = {'docs.mplx': 'B.three'} + dep = ['B.one', 'B.two'] + key1 = value1 + key2 = value2 + key3 = value3 + key4 = some_value + name = B.three + shortname = B.three + +Wrap Up +======= + +The multiplex config provides you with a way to specify parameters for your tests, +and also to specify complex test matrices in a concise way. You don't need to use +multiplex files as long as you are mindful to provide sensible defaults for any +params in your tests. diff --git a/docs/source/WritingTests.rst b/docs/source/WritingTests.rst index 4eb2e5f64c4012e919466cba2b606eb3f77abdbb..9ae7b24921ddabd0c834d6b69189123220ad43d2 100644 --- a/docs/source/WritingTests.rst +++ b/docs/source/WritingTests.rst @@ -1,5 +1,6 @@ .. _writing-tests: +===================== Writing Avocado Tests ===================== @@ -8,13 +9,12 @@ test module, which is a python file with a class that inherits from :class:`avocado.test.Test`. This class only really needs to implement a method called `action`, which represents the actual test payload. -Super simple example - sleeptest --------------------------------- - -Let's re-create an old time favorite, sleeptest, which is a functional -test for autotest. It does nothing but `time.sleep([number-seconds])`: +Simple example +============== -:: +Let's re-create an old time favorite, ``sleeptest``, which is a functional +test for avocado (old because we also use such a test for autotest). It does +nothing but ``time.sleep([number-seconds])``:: #!/usr/bin/python @@ -29,29 +29,105 @@ test for autotest. It does nothing but `time.sleep([number-seconds])`: """ Example test for avocado. """ + default_params = {'sleep_length': 1.0} - def action(self, length=1): + def action(self): """ Sleep for length seconds. """ - self.log.debug("Sleeping for %d seconds", length) - time.sleep(length) + self.log.debug("Sleeping for %.2f seconds", self.params.sleep_length) + time.sleep(self.params.sleep_length) if __name__ == "__main__": job.main() - This is about the simplest test you can write for avocado (at least, one using the avocado APIs). Note that the test object provides you with a number of -convenience attributes, such as `self.log`, that lets you log debug, info, error -and warning messages. +convenience attributes, such as ``self.log``, that lets you log debug, info, error +and warning messages. Also, we note the parameter passing system that avocado provides: +We frequently want to pass parameters to tests, and we can do that through what +we call a `multiplex file`, which is a configuration file that not only allows you +to provide params to your test, but also easily create a validation matrix in a +concise way. You can find more about the multiplex file format on :doc:`MultiplexConfig`. + +Accessing test parameters +========================= + +Each test has a set of parameters that can be accessed through ``self.params.[param-name]``. +Avocado finds and populates ``self.params`` with all parameters you define on a Multiplex +Config file (see :doc:`MultiplexConfig`), in a way that they are available as attributes, +not just dict keys. This has the advantage of reducing the boilerplate code necessary to +access those parameters. As an example, consider the following multiplex file for sleeptest:: + + variants: + - sleeptest: + sleep_length_type = float + variants: + - short: + sleep_length = 0.5 + - medium: + sleep_length = 1 + - long: + sleep_length = 5 + +You may notice some things here: there is one test param to sleeptest, called ``sleep_length``. We could have named it +``length`` really, but I prefer to create a param namespace of sorts here. Then, I defined +``sleep_length_type``, that is used by the config system to convert a value (by default a +:class:`basestring`) to an appropriate value type (in this case, we need to pass a :class:`float` +to :func:`time.sleep` anyway). Note that this is an optional feature, and you can always use +:func:`float` to convert the string value coming from the configuration anyway. + +Another important design detail is that sometimes we might not want to use the config system +at all (for example, when we run an avocado test as a stand alone test). To account for this +case, we have to specify a ``default_params`` dictionary that contains the default values +for when we are not providing config from a multiplex file. + +Using a multiplex file +====================== + +You may use the avocado runner with a multiplex file to provide params and matrix +generation for sleeptest just like:: + + $ avocado run sleeptest --multiplex tests/sleeptest/sleeptest.mplx + DEBUG LOG: /home/lmr/avocado/logs/run-2014-05-13-15.44.54/debug.log + TOTAL TESTS: 3 + (1/3) sleeptest.short: PASS (0.64 s) + (2/3) sleeptest.medium: PASS (1.11 s) + (3/3) sleeptest.long: PASS (5.12 s) + TOTAL PASSED: 3 + TOTAL ERROR: 0 + TOTAL FAILED: 0 + TOTAL SKIPPED: 0 + TOTAL WARNED: 0 + ELAPSED TIME: 6.87 s + +Note that, as your multiplex file specifies all parameters for sleeptest, you can simply +leave the test url list empty, such as:: + + $ avocado run --multiplex tests/sleeptest/sleeptest.mplx + +If you want to run some tests that don't require params set by the multiplex file, you can:: + + $ avocado run "sleeptest synctest" --multiplex tests/sleeptest/sleeptest.mplx + DEBUG LOG: /home/lmr/avocado/logs/run-2014-05-13-15.47.55/debug.log + TOTAL TESTS: 4 + (1/4) sleeptest.short: PASS (0.61 s) + (2/4) sleeptest.medium: PASS (1.11 s) + (3/4) sleeptest.long: PASS (5.11 s) + (4/4) synctest.1: PASS (1.85 s) + TOTAL PASSED: 4 + TOTAL ERROR: 0 + TOTAL FAILED: 0 + TOTAL SKIPPED: 0 + TOTAL WARNED: 0 + ELAPSED TIME: 8.69 s Avocado tests are also unittests --------------------------------- +================================ Since avocado tests inherit from :class:`unittest.TestCase`, you can use all -the ``assert`` class methods on your tests. Some silly examples:: +the :func:`assert` class methods on your tests. Some silly examples:: class random_examples(test.Test): def action(self): @@ -86,7 +162,7 @@ Executing an avocado test gives:: ELAPSED TIME: 1.11 s Running tests with nosetests ----------------------------- +============================ `nose `__ is a python testing framework with similar goals as avocado, except that avocado also intends to provide tools to @@ -102,22 +178,24 @@ cass, you can run them with the ``nosetests`` application:: OK Setup and cleanup methods -------------------------- +========================= If you need to perform setup actions before/after your test, you may do so in the ``setup`` and ``cleanup`` methods, respectively. We'll give examples in the following section. -Building and executing 3rd party test suites --------------------------------------------- +Running third party test suites +=============================== It is very common in test automation workloads to use test suites developed -by 3rd parties. By wrapping the execution code inside an avocado test module, +by third parties. By wrapping the execution code inside an avocado test module, you gain access to the facilities and API provided by the framework. Let's say you want to pick up a test suite written in C that it is in a tarball, uncompress it, compile the suite code, and then executing the test. Here's an example that does that:: + #!/usr/bin/python + import os from avocado import test @@ -132,17 +210,34 @@ an example that does that:: """ Execute the synctest test suite. """ + default_params = {'sync_tarball': 'synctest.tar.bz2', + 'sync_length': 100, + 'sync_loop': 10} - def setup(self, tarball='synctest.tar.bz2'): - tarball_path = self.get_deps_path(tarball) + def setup(self): + """ + Set default params and build the synctest suite. + """ + # Build the synctest suite + self.cwd = os.getcwd() + tarball_path = self.get_deps_path(self.params.sync_tarball) archive.extract(tarball_path, self.srcdir) self.srcdir = os.path.join(self.srcdir, 'synctest') build.make(self.srcdir) - def action(self, length=100, loop=10): + def action(self): + """ + Execute synctest with the appropriate params. + """ os.chdir(self.srcdir) - cmd = './synctest %s %s' % (length, loop) + cmd = ('./synctest %s %s' % + (self.params.sync_length, self.params.sync_loop)) process.system(cmd) + os.chdir(self.cwd) + + + if __name__ == "__main__": + job.main() Here we have an example of the ``setup`` method in action: Here we get the location of the test suite code (tarball) through @@ -156,9 +251,10 @@ and executes the ``./synctest`` command, with appropriate parameters, using :func:`avocado.utils.process.system`. Wrap Up -------- +======= While there are certainly other resources that can be used to build your tests, we recommend you take a look at the example tests present in the ``tests`` -directory to take some inspiration. It is also recommended that you take a -look at the :doc:`API documentation ` for more possibilities. +directory, that contains a few samples to take some inspiration. It is also +recommended that you take a look at the :doc:`API documentation ` +for more possibilities. diff --git a/docs/source/index.rst b/docs/source/index.rst index c1cf1add5e03c82901a3f097e7e6ad66d6b7b47c..0c2a549ce7469f649da20c40198331a90e67d2ca 100644 --- a/docs/source/index.rst +++ b/docs/source/index.rst @@ -16,6 +16,8 @@ Contents: GetStartedGuide DataDir WritingTests + MultiplexConfig + MultiplexConfigAdvanced Plugins OutputPlugins api/modules diff --git a/.nose.cfg b/selftests/.nose.cfg similarity index 100% rename from .nose.cfg rename to selftests/.nose.cfg diff --git a/selftests/all/doc/doc_build_test.py b/selftests/all/doc/doc_build_test.py new file mode 100755 index 0000000000000000000000000000000000000000..883f846a59dbbe9a7d62a1e87d0bfc3c3a48bde0 --- /dev/null +++ b/selftests/all/doc/doc_build_test.py @@ -0,0 +1,56 @@ +#!/usr/bin/python +""" +Build documentation and report whether we had warning/error messages. + +This is geared towards documentation build regression testing. +""" +import os +import sys + +# simple magic for using scripts within a source tree +basedir = os.path.join(os.path.dirname(os.path.abspath(__file__)), '..', '..', '..') +basedir = os.path.abspath(basedir) +if os.path.isdir(os.path.join(basedir, 'avocado')): + sys.path.append(basedir) + +from avocado.utils import process + + +class DocBuildError(Exception): + pass + + +def test_build_docs(): + """ + Build avocado HTML docs, reporting failures + """ + ignore_list = [] + failure_lines = [] + doc_dir = os.path.join(basedir, 'docs') + process.run('make -C %s clean' % doc_dir) + result = process.run('make -C %s html' % doc_dir) + stdout = result.stdout.splitlines() + stderr = result.stderr.splitlines() + output_lines = stdout + stderr + for line in output_lines: + ignore_msg = False + for ignore in ignore_list: + if ignore in line: + print 'Expected warning ignored: %s' % line + ignore_msg = True + if ignore_msg: + continue + if 'ERROR' in line: + failure_lines.append(line) + if 'WARNING' in line: + failure_lines.append(line) + if failure_lines: + e_msg = ('%s ERRORS and/or WARNINGS detected while building the html docs:\n' % + len(failure_lines)) + for (index, failure_line) in enumerate(failure_lines): + e_msg += "%s) %s\n" % (index + 1, failure_line) + e_msg += 'Please check the output and fix your docstrings/.rst docs' + raise DocBuildError(e_msg) + +if __name__ == '__main__': + test_build_docs() diff --git a/selftests/all/functional/avocado/basic_tests.py b/selftests/all/functional/avocado/basic_tests.py new file mode 100644 index 0000000000000000000000000000000000000000..1c41a8c9d3d4a699a1d18c989fd7840de1bae61f --- /dev/null +++ b/selftests/all/functional/avocado/basic_tests.py @@ -0,0 +1,57 @@ +#!/usr/bin/env python + +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 2 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. +# +# See LICENSE for more details. +# +# Copyright: Red Hat Inc. 2013-2014 +# Author: Lucas Meneghel Rodrigues + +import unittest +import os +import sys + +# simple magic for using scripts within a source tree +basedir = os.path.join(os.path.dirname(os.path.abspath(__file__)), '..', '..', '..', '..') +basedir = os.path.abspath(basedir) +if os.path.isdir(os.path.join(basedir, 'avocado')): + sys.path.append(basedir) + +from avocado.utils import process + + +class RunnerOperationTest(unittest.TestCase): + + def test_runner_all_ok(self): + os.chdir(basedir) + cmd_line = './scripts/avocado run "sleeptest sleeptest"' + process.run(cmd_line) + + def test_runner_tests_fail(self): + os.chdir(basedir) + cmd_line = './scripts/avocado run "sleeptest failtest sleeptest"' + result = process.run(cmd_line, ignore_status=True) + expected_rc = 1 + self.assertEqual(result.exit_status, expected_rc, + "Avocado did not return rc %d:\n%s" % (expected_rc, result)) + + def test_runner_nonexistent_test(self): + os.chdir(basedir) + cmd_line = './scripts/avocado run bogustest' + result = process.run(cmd_line, ignore_status=True) + expected_rc = 1 + unexpected_rc = 3 + self.assertNotEqual(result.exit_status, unexpected_rc, + "Avocado crashed (rc %d):\n%s" % (unexpected_rc, result)) + self.assertEqual(result.exit_status, expected_rc, + "Avocado did not return rc %d:\n%s" % (expected_rc, result)) + +if __name__ == '__main__': + unittest.main() diff --git a/selftests/all/functional/avocado/multiplex_tests.py b/selftests/all/functional/avocado/multiplex_tests.py new file mode 100644 index 0000000000000000000000000000000000000000..d5d5cf3f07017376752b4226429c7a9da3de5dd5 --- /dev/null +++ b/selftests/all/functional/avocado/multiplex_tests.py @@ -0,0 +1,65 @@ +#!/usr/bin/env python + +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 2 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. +# +# See LICENSE for more details. +# +# Copyright: Red Hat Inc. 2013-2014 +# Author: Lucas Meneghel Rodrigues + +import unittest +import os +import sys + +# simple magic for using scripts within a source tree +basedir = os.path.join(os.path.dirname(os.path.abspath(__file__)), '..', '..', '..', '..') +basedir = os.path.abspath(basedir) +if os.path.isdir(os.path.join(basedir, 'avocado')): + sys.path.append(basedir) + +from avocado.utils import process + + +class MultiplexTests(unittest.TestCase): + + def run_and_check(self, cmd_line, expected_rc): + os.chdir(basedir) + result = process.run(cmd_line, ignore_status=True) + self.assertEqual(result.exit_status, expected_rc, + "Command %s did not return rc " + "%d:\n%s" % (cmd_line, expected_rc, result)) + + def test_mplex_plugin(self): + cmd_line = './scripts/avocado multiplex tests/sleeptest/sleeptest.mplx' + expected_rc = 0 + self.run_and_check(cmd_line, expected_rc) + + def test_mplex_plugin_nonexistent(self): + cmd_line = './scripts/avocado multiplex nonexist' + expected_rc = 2 + self.run_and_check(cmd_line, expected_rc) + + def test_run_mplex_sleeptest(self): + cmd_line = './scripts/avocado run sleeptest --multiplex tests/sleeptest/sleeptest.mplx' + expected_rc = 0 + self.run_and_check(cmd_line, expected_rc) + + def test_run_mplex_doublesleep(self): + cmd_line = './scripts/avocado run "sleeptest sleeptest" --multiplex tests/sleeptest/sleeptest.mplx' + expected_rc = 0 + self.run_and_check(cmd_line, expected_rc) + + def test_run_mplex_failtest(self): + cmd_line = './scripts/avocado run "sleeptest failtest" --multiplex tests/sleeptest/sleeptest.mplx' + expected_rc = 1 + self.run_and_check(cmd_line, expected_rc) + +if __name__ == '__main__': + unittest.main() diff --git a/selftests/all/functional/avocado/standalone_tests.py b/selftests/all/functional/avocado/standalone_tests.py new file mode 100644 index 0000000000000000000000000000000000000000..a6b7c605dd4846f547acd7c8459bd705453e3151 --- /dev/null +++ b/selftests/all/functional/avocado/standalone_tests.py @@ -0,0 +1,72 @@ +#!/usr/bin/env python + +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 2 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. +# +# See LICENSE for more details. +# +# Copyright: Red Hat Inc. 2013-2014 +# Author: Lucas Meneghel Rodrigues + +import unittest +import os +import sys + +# simple magic for using scripts within a source tree +basedir = os.path.join(os.path.dirname(os.path.abspath(__file__)), '..', '..', '..', '..') +basedir = os.path.abspath(basedir) +if os.path.isdir(os.path.join(basedir, 'avocado')): + sys.path.append(basedir) + +from avocado.utils import process + + +class StandaloneTests(unittest.TestCase): + + def setUp(self): + self.original_pypath = os.environ.get('PYTHONPATH') + if self.original_pypath is not None: + os.environ['PYTHONPATH'] = '%s:%s' % (basedir, self.original_pypath) + else: + os.environ['PYTHONPATH'] = '%s' % basedir + + def run_and_check(self, cmd_line, expected_rc, tstname): + os.chdir(basedir) + result = process.run(cmd_line, ignore_status=True) + self.assertEqual(result.exit_status, expected_rc, + "Stand alone %s did not return rc " + "%d:\n%s" % (tstname, expected_rc, result)) + + def test_sleeptest(self): + cmd_line = './tests/sleeptest/sleeptest.py' + expected_rc = 0 + self.run_and_check(cmd_line, expected_rc, 'sleeptest') + + def test_skiptest(self): + cmd_line = './tests/skiptest/skiptest.py' + expected_rc = 0 + self.run_and_check(cmd_line, expected_rc, 'skiptest') + + def test_failtest(self): + cmd_line = './tests/failtest/failtest.py' + expected_rc = 1 + self.run_and_check(cmd_line, expected_rc, 'failtest') + + def test_errortest(self): + cmd_line = './tests/errortest/errortest.py' + expected_rc = 1 + self.run_and_check(cmd_line, expected_rc, 'errortest') + + def test_warntest(self): + cmd_line = './tests/warntest/warntest.py' + expected_rc = 1 + self.run_and_check(cmd_line, expected_rc, 'warntest') + +if __name__ == '__main__': + unittest.main() diff --git a/unittests/avocado/datadir_unittest.py b/selftests/all/unit/avocado/datadir_unittest.py similarity index 100% rename from unittests/avocado/datadir_unittest.py rename to selftests/all/unit/avocado/datadir_unittest.py diff --git a/selftests/all/unit/avocado/multiplex_config_unittest.py b/selftests/all/unit/avocado/multiplex_config_unittest.py new file mode 100644 index 0000000000000000000000000000000000000000..235111c91c1c6018cf4556d6a9847f59a4dd11cf --- /dev/null +++ b/selftests/all/unit/avocado/multiplex_config_unittest.py @@ -0,0 +1,753 @@ +#!/usr/bin/python + +import unittest +import os +import gzip +import sys + +# simple magic for using scripts within a source tree +basedir = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) +basedir = os.path.dirname(basedir) +if os.path.isdir(os.path.join(basedir, 'avocado')): + sys.path.append(basedir) + +from avocado import multiplex_config + +mydir = os.path.dirname(__file__) +testdatadir = os.path.join(mydir, 'unittest_data') + + +class CartesianConfigTest(unittest.TestCase): + + def _checkDictionaries(self, parser, reference): + result = list(parser.get_dicts()) + # as the dictionary list is very large, test each item individually: + self.assertEquals(len(result), len(reference)) + for resdict, refdict in zip(result, reference): + # checking the dict name first should make some errors more visible + self.assertEquals(resdict.get('name'), refdict.get('name')) + self.assertEquals(resdict, refdict) + + def _checkConfigDump(self, config, dump): + """Check if the parser output matches a config file dump""" + configpath = os.path.join(testdatadir, config) + dumppath = os.path.join(testdatadir, dump) + + if dumppath.endswith('.gz'): + df = gzip.GzipFile(dumppath, 'r') + else: + df = open(dumppath, 'r') + # we could have used pickle, but repr()-based dumps are easier to + # enerate, debug, and edit + dumpdata = eval(df.read()) + + p = multiplex_config.Parser(configpath) + self._checkDictionaries(p, dumpdata) + + def _checkStringConfig(self, string, reference): + p = multiplex_config.Parser() + p.parse_string(string) + self._checkDictionaries(p, reference) + + def _checkStringDump(self, string, dump, defaults=False): + p = multiplex_config.Parser(defaults=defaults) + p.parse_string(string) + + self._checkDictionaries(p, dump) + + def testSimpleVariant(self): + self._checkStringConfig(""" + c = abc + variants: + - a: + x = va + - b: + x = vb + """, + [ + {'_name_map_file': {'': 'a'}, + '_short_name_map_file': {'': 'a'}, + 'c': 'abc', + 'dep': [], + 'name': 'a', + 'shortname': 'a', + 'x': 'va'}, + {'_name_map_file': {'': 'b'}, + '_short_name_map_file': {'': 'b'}, + 'c': 'abc', + 'dep': [], + 'name': 'b', + 'shortname': 'b', + 'x': 'vb'}, + ]) + + def testFilterMixing(self): + self._checkStringDump(""" + variants: + - unknown_qemu: + - rhel64: + only unknown_qemu + variants: + - kvm: + - nokvm: + variants: + - testA: + nokvm: + no unknown_qemu + - testB: + """, + [ + {'_name_map_file': {'': 'testA.kvm.unknown_qemu'}, + '_short_name_map_file': {'': 'testA.kvm.unknown_qemu'}, + 'dep': [], + 'name': 'testA.kvm.unknown_qemu', + 'shortname': 'testA.kvm.unknown_qemu'}, + {'_name_map_file': {'': 'testB.kvm.unknown_qemu'}, + '_short_name_map_file': {'': 'testB.kvm.unknown_qemu'}, + 'dep': [], + 'name': 'testB.kvm.unknown_qemu', + 'shortname': 'testB.kvm.unknown_qemu'}, + {'_name_map_file': {'': 'testB.nokvm.unknown_qemu'}, + '_short_name_map_file': {'': 'testB.nokvm.unknown_qemu'}, + 'dep': [], + 'name': 'testB.nokvm.unknown_qemu', + 'shortname': 'testB.nokvm.unknown_qemu'}, + ]) + + def testNameVariant(self): + self._checkStringDump(""" + variants tests: # All tests in configuration + - wait: + run = "wait" + variants: + - long: + time = short_time + - short: long + time = logn_time + - test2: + run = "test1" + + variants virt_system: + - @linux: + - windows: + + variants host_os: + - linux: + image = linux + - windows: + image = windows + + only (host_os=linux) + """, + [ + {'_name_map_file': {'': '(host_os=linux).(virt_system=linux).(tests=wait).long'}, + '_short_name_map_file': {'': 'linux.linux.wait.long'}, + 'dep': [], + 'host_os': 'linux', + 'image': 'linux', + 'name': '(host_os=linux).(virt_system=linux).(tests=wait).long', + 'run': 'wait', + 'shortname': 'linux.wait.long', + 'tests': 'wait', + 'time': 'short_time', + 'virt_system': 'linux'}, + {'_name_map_file': {'': '(host_os=linux).(virt_system=linux).(tests=wait).short'}, + '_short_name_map_file': {'': 'linux.linux.wait.short'}, + 'dep': ['(host_os=linux).(virt_system=linux).(tests=wait).long'], + 'host_os': 'linux', + 'image': 'linux', + 'name': '(host_os=linux).(virt_system=linux).(tests=wait).short', + 'run': 'wait', + 'shortname': 'linux.wait.short', + 'tests': 'wait', + 'time': 'logn_time', + 'virt_system': 'linux'}, + {'_name_map_file': {'': '(host_os=linux).(virt_system=linux).(tests=test2)'}, + '_short_name_map_file': {'': 'linux.linux.test2'}, + 'dep': [], + 'host_os': 'linux', + 'image': 'linux', + 'name': '(host_os=linux).(virt_system=linux).(tests=test2)', + 'run': 'test1', + 'shortname': 'linux.test2', + 'tests': 'test2', + 'virt_system': 'linux'}, + {'_name_map_file': {'': '(host_os=linux).(virt_system=windows).(tests=wait).long'}, + '_short_name_map_file': {'': 'linux.windows.wait.long'}, + 'dep': [], + 'host_os': 'linux', + 'image': 'linux', + 'name': '(host_os=linux).(virt_system=windows).(tests=wait).long', + 'run': 'wait', + 'shortname': 'linux.windows.wait.long', + 'tests': 'wait', + 'time': 'short_time', + 'virt_system': 'windows'}, + {'_name_map_file': {'': '(host_os=linux).(virt_system=windows).(tests=wait).short'}, + '_short_name_map_file': {'': 'linux.windows.wait.short'}, + 'dep': ['(host_os=linux).(virt_system=windows).(tests=wait).long'], + 'host_os': 'linux', + 'image': 'linux', + 'name': '(host_os=linux).(virt_system=windows).(tests=wait).short', + 'run': 'wait', + 'shortname': 'linux.windows.wait.short', + 'tests': 'wait', + 'time': 'logn_time', + 'virt_system': 'windows'}, + {'_name_map_file': {'': '(host_os=linux).(virt_system=windows).(tests=test2)'}, + '_short_name_map_file': {'': 'linux.windows.test2'}, + 'dep': [], + 'host_os': 'linux', + 'image': 'linux', + 'name': '(host_os=linux).(virt_system=windows).(tests=test2)', + 'run': 'test1', + 'shortname': 'linux.windows.test2', + 'tests': 'test2', + 'virt_system': 'windows'}, + ] + ) + + def testDefaults(self): + self._checkStringDump(""" + variants tests: + - wait: + run = "wait" + variants: + - long: + time = short_time + - short: long + time = logn_time + - test2: + run = "test1" + + variants virt_system [ default= linux ]: + - linux: + - @windows: + + variants host_os: + - linux: + image = linux + - @windows: + image = windows + """, + [ + {'_name_map_file': {'': '(host_os=windows).(virt_system=linux).(tests=wait).long'}, + '_short_name_map_file': {'': 'windows.linux.wait.long'}, + 'dep': [], + 'host_os': 'windows', + 'image': 'windows', + 'name': '(host_os=windows).(virt_system=linux).(tests=wait).long', + 'run': 'wait', + 'shortname': 'wait.long', + 'tests': 'wait', + 'time': 'short_time', + 'virt_system': 'linux'}, + {'_name_map_file': {'': '(host_os=windows).(virt_system=linux).(tests=wait).short'}, + '_short_name_map_file': {'': 'windows.linux.wait.short'}, + 'dep': ['(host_os=windows).(virt_system=linux).(tests=wait).long'], + 'host_os': 'windows', + 'image': 'windows', + 'name': '(host_os=windows).(virt_system=linux).(tests=wait).short', + 'run': 'wait', + 'shortname': 'wait.short', + 'tests': 'wait', + 'time': 'logn_time', + 'virt_system': 'linux'}, + {'_name_map_file': {'': '(host_os=windows).(virt_system=linux).(tests=test2)'}, + '_short_name_map_file': {'': 'windows.linux.test2'}, + 'dep': [], + 'host_os': 'windows', + 'image': 'windows', + 'name': '(host_os=windows).(virt_system=linux).(tests=test2)', + 'run': 'test1', + 'shortname': 'test2', + 'tests': 'test2', + 'virt_system': 'linux'}, + ], + True) + + self.assertRaises(multiplex_config.ParserError, + self._checkStringDump, """ + variants tests [default=system2]: + - system1: + """, + [], + True) + + def testDel(self): + self._checkStringDump(""" + variants tests: + - wait: + run = "wait" + variants: + - long: + time = short_time + - short: long + time = logn_time + - test2: + run = "test1" + """, + [ + {'_name_map_file': {'': '(tests=wait).long'}, + '_short_name_map_file': {'': 'wait.long'}, + 'dep': [], + 'name': '(tests=wait).long', + 'run': 'wait', + 'shortname': 'wait.long', + 'tests': 'wait', + 'time': 'short_time'}, + {'_name_map_file': {'': '(tests=wait).short'}, + '_short_name_map_file': {'': 'wait.short'}, + 'dep': ['(tests=wait).long'], + 'name': '(tests=wait).short', + 'run': 'wait', + 'shortname': 'wait.short', + 'tests': 'wait', + 'time': 'logn_time'}, + {'_name_map_file': {'': '(tests=test2)'}, + '_short_name_map_file': {'': 'test2'}, + 'dep': [], + 'name': '(tests=test2)', + 'run': 'test1', + 'shortname': 'test2', + 'tests': 'test2'}, + ], + True) + + self._checkStringDump(""" + variants tests: + - wait: + run = "wait" + variants: + - long: + time = short_time + - short: long + time = logn_time + - test2: + run = "test1" + + del time + """, + [ + {'_name_map_file': {'': '(tests=wait).long'}, + '_short_name_map_file': {'': 'wait.long'}, + 'dep': [], + 'name': '(tests=wait).long', + 'run': 'wait', + 'shortname': 'wait.long', + 'tests': 'wait'}, + {'_name_map_file': {'': '(tests=wait).short'}, + '_short_name_map_file': {'': 'wait.short'}, + 'dep': ['(tests=wait).long'], + 'name': '(tests=wait).short', + 'run': 'wait', + 'shortname': 'wait.short', + 'tests': 'wait'}, + {'_name_map_file': {'': '(tests=test2)'}, + '_short_name_map_file': {'': 'test2'}, + 'dep': [], + 'name': '(tests=test2)', + 'run': 'test1', + 'shortname': 'test2', + 'tests': 'test2'}, + ], + True) + + def testError1(self): + self.assertRaises(multiplex_config.ParserError, + self._checkStringDump, """ + variants tests: + wait: + run = "wait" + variants: + - long: + time = short_time + - short: long + time = logn_time + - test2: + run = "test1" + """, + [], + True) + + def testMissingInclude(self): + self.assertRaises(multiplex_config.MissingIncludeError, + self._checkStringDump, """ + include xxxxxxxxx/xxxxxxxxxxx + """, + [], + True) + + def testVariableAssignment(self): + self._checkStringDump(""" + variants tests: + -system1: + var = 1 + var = 2 + var += a + var <= b + system = 2 + ddd = ${tests + str(int(system) + 3)}4 + error = ${tests + str(system + 3)}4 + s.* ?= ${tests + "ahoj"}4 + s.* ?+= c + s.* ?<= d + system += 4 + var += "test" + """, + [ + {'_name_map_file': {'': '(tests=system1)'}, + '_short_name_map_file': {'': 'system1'}, + 'ddd': 'system154', + 'dep': [], + 'error': '${tests + str(system + 3)}4', + 'name': '(tests=system1)', + 'shortname': 'system1', + 'system': 'dsystem1ahoj4c4', + 'tests': 'system1', + 'var': 'b2atest'}, + ], + True) + + def testCondition(self): + self._checkStringDump(""" + variants tests [meta1]: + - wait: + run = "wait" + variants: + - long: + time = short_time + - short: long + time = logn_time + - test2: + run = "test1" + + test2: bbb = aaaa + aaa = 1 + """, + [ + {'_name_map_file': {'': '(tests=wait).long'}, + '_short_name_map_file': {'': 'wait.long'}, + 'dep': [], + 'name': '(tests=wait).long', + 'run': 'wait', + 'shortname': 'wait.long', + 'tests': 'wait', + 'time': 'short_time'}, + {'_name_map_file': {'': '(tests=wait).short'}, + '_short_name_map_file': {'': 'wait.short'}, + 'dep': ['(tests=wait).long'], + 'name': '(tests=wait).short', + 'run': 'wait', + 'shortname': 'wait.short', + 'tests': 'wait', + 'time': 'logn_time'}, + {'_name_map_file': {'': '(tests=test2)'}, + '_short_name_map_file': {'': 'test2'}, + 'aaa': '1', + 'bbb': 'aaaa', + 'dep': [], + 'name': '(tests=test2)', + 'run': 'test1', + 'shortname': 'test2', + 'tests': 'test2'}, + ], + True) + self._checkStringDump(""" + variants: + - a: + foo = foo + c: + foo = bar + - b: + foo = foob + variants: + - c: + bala = lalalala + a: + bala = balabala + - d: + """, + [ + {'_name_map_file': {'': 'c.a'}, + '_short_name_map_file': {'': 'c.a'}, + 'bala': 'balabala', + 'dep': [], + 'foo': 'bar', + 'name': 'c.a', + 'shortname': 'c.a'}, + {'_name_map_file': {'': 'c.b'}, + '_short_name_map_file': {'': 'c.b'}, + 'bala': 'lalalala', + 'dep': [], + 'foo': 'foob', + 'name': 'c.b', + 'shortname': 'c.b'}, + {'_name_map_file': {'': 'd.a'}, + '_short_name_map_file': {'': 'd.a'}, + 'dep': [], + 'foo': 'foo', + 'name': 'd.a', + 'shortname': 'd.a'}, + {'_name_map_file': {'': 'd.b'}, + '_short_name_map_file': {'': 'd.b'}, + 'dep': [], + 'foo': 'foob', + 'name': 'd.b', + 'shortname': 'd.b'}, + ], + True) + + def testNegativeCondition(self): + self._checkStringDump(""" + variants tests [meta1]: + - wait: + run = "wait" + variants: + - long: + time = short_time + - short: long + time = logn_time + - test2: + run = "test1" + + !test2: bbb = aaaa + aaa = 1 + """, + [ + {'_name_map_file': {'': '(tests=wait).long'}, + '_short_name_map_file': {'': 'wait.long'}, + 'aaa': '1', + 'bbb': 'aaaa', + 'dep': [], + 'name': '(tests=wait).long', + 'run': 'wait', + 'shortname': 'wait.long', + 'tests': 'wait', + 'time': 'short_time'}, + {'_name_map_file': {'': '(tests=wait).short'}, + '_short_name_map_file': {'': 'wait.short'}, + 'aaa': '1', + 'bbb': 'aaaa', + 'dep': ['(tests=wait).long'], + 'name': '(tests=wait).short', + 'run': 'wait', + 'shortname': 'wait.short', + 'tests': 'wait', + 'time': 'logn_time'}, + {'_name_map_file': {'': '(tests=test2)'}, + '_short_name_map_file': {'': 'test2'}, + 'dep': [], + 'name': '(tests=test2)', + 'run': 'test1', + 'shortname': 'test2', + 'tests': 'test2'}, + ], + True) + + def testSyntaxErrors(self): + self.assertRaises(multiplex_config.LexerError, + self._checkStringDump, """ + variants tests$: + - system1: + var = 1 + var = 2 + var += a + var <= b + system = 2 + s.* ?= ${tests}4 + s.* ?+= c + s.* ?<= d + system += 4 + """, + [], + True) + + self.assertRaises(multiplex_config.LexerError, + self._checkStringDump, """ + variants tests [defaul$$$$t=system1]: + - system1: + """, + [], + True) + + self.assertRaises(multiplex_config.ParserError, + self._checkStringDump, """ + variants tests [default=system1] wrong: + - system1: + """, + [], + True) + + self.assertRaises(multiplex_config.ParserError, + self._checkStringDump, """ + only xxx...yyy + """, + [], + True) + + self.assertRaises(multiplex_config.ParserError, + self._checkStringDump, """ + only xxx..,yyy + """, + [], + True) + + self.assertRaises(multiplex_config.ParserError, + self._checkStringDump, """ + aaabbbb.ddd + """, + [], + True) + + self.assertRaises(multiplex_config.ParserError, + self._checkStringDump, """ + aaa.bbb: + variants test: + -sss: + """, + [], + True) + + self.assertRaises(multiplex_config.ParserError, + self._checkStringDump, """ + variants test [sss = bbb: + -sss: + """, + [], + True) + + self.assertRaises(multiplex_config.ParserError, + self._checkStringDump, """ + variants test [default]: + -sss: + """, + [], + True) + + self.assertRaises(multiplex_config.ParserError, + self._checkStringDump, """ + variants test [default] ddd: + -sss: + """, + [], + True) + + self.assertRaises(multiplex_config.ParserError, + self._checkStringDump, """ + variants test [default] ddd + """, + [], + True) + + def testComplicatedFilter(self): + self._checkStringDump(""" + variants tests: + - wait: + run = "wait" + variants: + - long: + time = short_time + - short: long + time = logn_time + only (host_os=linux), ( guest_os = linux ) + - test2: + run = "test1" + + variants guest_os: + - linux: + install = linux + no (tests=wait)..short + - windows: + install = windows + only test2 + + variants host_os: + - linux: + start = linux + - windows: + start = windows + only test2 + """, + [ + {'_name_map_file': {'': '(host_os=linux).(guest_os=linux).(tests=wait).long'}, + '_short_name_map_file': {'': 'linux.linux.wait.long'}, + 'dep': [], + 'guest_os': 'linux', + 'host_os': 'linux', + 'install': 'linux', + 'name': '(host_os=linux).(guest_os=linux).(tests=wait).long', + 'run': 'wait', + 'shortname': 'linux.linux.wait.long', + 'start': 'linux', + 'tests': 'wait', + 'time': 'short_time'}, + {'_name_map_file': {'': '(host_os=linux).(guest_os=linux).(tests=test2)'}, + '_short_name_map_file': {'': 'linux.linux.test2'}, + 'dep': [], + 'guest_os': 'linux', + 'host_os': 'linux', + 'install': 'linux', + 'name': '(host_os=linux).(guest_os=linux).(tests=test2)', + 'run': 'test1', + 'shortname': 'linux.linux.test2', + 'start': 'linux', + 'tests': 'test2'}, + {'_name_map_file': {'': '(host_os=linux).(guest_os=windows).(tests=test2)'}, + '_short_name_map_file': {'': 'linux.windows.test2'}, + 'dep': [], + 'guest_os': 'windows', + 'host_os': 'linux', + 'install': 'windows', + 'name': '(host_os=linux).(guest_os=windows).(tests=test2)', + 'run': 'test1', + 'shortname': 'linux.windows.test2', + 'start': 'linux', + 'tests': 'test2'}, + {'_name_map_file': {'': '(host_os=windows).(guest_os=linux).(tests=test2)'}, + '_short_name_map_file': {'': 'windows.linux.test2'}, + 'dep': [], + 'guest_os': 'linux', + 'host_os': 'windows', + 'install': 'linux', + 'name': '(host_os=windows).(guest_os=linux).(tests=test2)', + 'run': 'test1', + 'shortname': 'windows.linux.test2', + 'start': 'windows', + 'tests': 'test2'}, + {'_name_map_file': {'': '(host_os=windows).(guest_os=windows).(tests=test2)'}, + '_short_name_map_file': {'': 'windows.windows.test2'}, + 'dep': [], + 'guest_os': 'windows', + 'host_os': 'windows', + 'install': 'windows', + 'name': '(host_os=windows).(guest_os=windows).(tests=test2)', + 'run': 'test1', + 'shortname': 'windows.windows.test2', + 'start': 'windows', + 'tests': 'test2'}, + ], + True) + + f = "only xxx.yyy..(xxx=333).aaa, ddd (eeee) rrr.aaa" + + self._checkStringDump(f, [], True) + + lexer = multiplex_config.Lexer(multiplex_config.StrReader(f)) + lexer.set_prev_indent(-1) + lexer.get_next_check([multiplex_config.LIndent]) + lexer.get_next_check([multiplex_config.LOnly]) + p_filter = multiplex_config.parse_filter(lexer, lexer.rest_line()) + self.assertEquals(p_filter, + [[[multiplex_config.Label("xxx"), + multiplex_config.Label("yyy")], + [multiplex_config.Label("xxx", "333"), + multiplex_config.Label("aaa")]], + [[multiplex_config.Label("ddd")]], + [[multiplex_config.Label("eeee")]], + [[multiplex_config.Label("rrr"), + multiplex_config.Label("aaa")]]], + "Failed to parse filter.") + +if __name__ == '__main__': + unittest.main() diff --git a/unittests/avocado/settings_unittest.py b/selftests/all/unit/avocado/settings_unittest.py similarity index 100% rename from unittests/avocado/settings_unittest.py rename to selftests/all/unit/avocado/settings_unittest.py diff --git a/unittests/avocado/sysinfo_unittest.py b/selftests/all/unit/avocado/sysinfo_unittest.py similarity index 100% rename from unittests/avocado/sysinfo_unittest.py rename to selftests/all/unit/avocado/sysinfo_unittest.py diff --git a/unittests/avocado/test_unittest.py b/selftests/all/unit/avocado/test_unittest.py similarity index 100% rename from unittests/avocado/test_unittest.py rename to selftests/all/unit/avocado/test_unittest.py diff --git a/selftests/all/unit/avocado/utils_params_unittest.py b/selftests/all/unit/avocado/utils_params_unittest.py new file mode 100755 index 0000000000000000000000000000000000000000..eef8a9a7801587572df2e916ed7037c065f37718 --- /dev/null +++ b/selftests/all/unit/avocado/utils_params_unittest.py @@ -0,0 +1,84 @@ +#!/usr/bin/python + +import unittest + +from avocado.utils import params + +BASE_DICT = { + 'image_boot': 'yes', + 'image_boot_stg': 'no', + 'image_chain': '', + 'image_clone_command': 'cp --reflink=auto %s %s', + 'image_format': 'qcow2', + 'image_format_stg': 'qcow2', + 'image_name': 'images/f18-64', + 'image_name_stg': 'enospc', + 'image_raw_device': 'no', + 'image_remove_command': 'rm -rf %s', + 'image_size': '10G', + 'image_snapshot_stg': 'no', + 'image_unbootable_pattern': 'Hard Disk.*not a bootable disk', + 'image_verify_bootable': 'yes', + 'images': 'image1 stg', +} + +CORRECT_RESULT_MAPPING = {"image1": {'image_boot_stg': 'no', + 'image_snapshot_stg': 'no', + 'image_chain': '', + 'image_unbootable_pattern': 'Hard Disk.*not a bootable disk', + 'image_name': 'images/f18-64', + 'image_remove_command': 'rm -rf %s', + 'image_name_stg': 'enospc', + 'image_clone_command': 'cp --reflink=auto %s %s', + 'image_size': '10G', 'images': 'image1 stg', + 'image_raw_device': 'no', + 'image_format': 'qcow2', + 'image_boot': 'yes', + 'image_verify_bootable': 'yes', + 'image_format_stg': 'qcow2'}, + "stg": {'image_snapshot': 'no', + 'image_boot_stg': 'no', + 'image_snapshot_stg': 'no', + 'image_chain': '', + 'image_unbootable_pattern': 'Hard Disk.*not a bootable disk', + 'image_name': 'enospc', + 'image_remove_command': 'rm -rf %s', + 'image_name_stg': 'enospc', + 'image_clone_command': 'cp --reflink=auto %s %s', + 'image_size': '10G', + 'images': 'image1 stg', + 'image_raw_device': 'no', + 'image_format': 'qcow2', + 'image_boot': 'no', + 'image_verify_bootable': 'yes', + 'image_format_stg': 'qcow2'}} + + +class TestParams(unittest.TestCase): + + def setUp(self): + self.params = params.Params(BASE_DICT) + + def testObjects(self): + self.assertEquals(self.params.objects("images"), ['image1', 'stg']) + + def testObjectsParams(self): + for key in CORRECT_RESULT_MAPPING.keys(): + self.assertEquals(self.params.object_params(key), + CORRECT_RESULT_MAPPING[key]) + + def testGetItemMissing(self): + try: + self.params['bogus'] + raise ValueError("Did not get a ParamNotFound error when trying " + "to access a non-existing param") + # pylint: disable=E0712 + except params.ParamNotFound: + pass + + def testGetItem(self): + self.assertEqual(self.params['image_size'], "10G") + + +if __name__ == "__main__": + unittest.main() diff --git a/unittests/avocado/xunit_unittest.py b/selftests/all/unit/avocado/xunit_unittest.py similarity index 100% rename from unittests/avocado/xunit_unittest.py rename to selftests/all/unit/avocado/xunit_unittest.py diff --git a/unittests/runtests.py b/selftests/run similarity index 95% rename from unittests/runtests.py rename to selftests/run index f176575e0b5e6581c48fb749dab425bf0db33aa8..14966a451bc4474ed31e18f270607dad6e188440 100755 --- a/unittests/runtests.py +++ b/selftests/run @@ -42,7 +42,7 @@ class AvocadoTestSelector(Selector): return True def wantFile(self, filename): - if not filename.endswith('_unittest.py'): + if not filename.endswith('.py'): return False skip_tests = [] @@ -82,17 +82,8 @@ class AvocadoTestRunner(Plugin): def prepareTestLoader(self, loader): loader.selector = AvocadoTestSelector(loader.config) - -def run_test(): +if __name__ == '__main__': nose.main(addplugins=[AvocadoTestRunner(), AttributeSelector(), Xunit(), Coverage()]) - - -def main(): - run_test() - - -if __name__ == '__main__': - main() diff --git a/tests/sleeptest/sleeptest.mplx b/tests/sleeptest/sleeptest.mplx new file mode 100644 index 0000000000000000000000000000000000000000..7ccaa8b27c884ca9c697623cf91571af39ff9013 --- /dev/null +++ b/tests/sleeptest/sleeptest.mplx @@ -0,0 +1,10 @@ +variants: + - sleeptest: + sleep_length_type = float + variants: + - short: + sleep_length = 0.5 + - medium: + sleep_length = 1 + - long: + sleep_length = 5 diff --git a/tests/sleeptest/sleeptest.py b/tests/sleeptest/sleeptest.py index 22667060ecdba05cd5a3628b542f6293cb8bc4aa..c828056b4e32b4badb138b1dcff420ddd4de2133 100755 --- a/tests/sleeptest/sleeptest.py +++ b/tests/sleeptest/sleeptest.py @@ -26,13 +26,14 @@ class sleeptest(test.Test): """ Example test for avocado. """ + default_params = {'sleep_length': 1.0} - def action(self, length=1): + def action(self): """ Sleep for length seconds. """ - self.log.debug("Sleeping for %d seconds", length) - time.sleep(length) + self.log.debug("Sleeping for %.2f seconds", self.params.sleep_length) + time.sleep(self.params.sleep_length) if __name__ == "__main__": diff --git a/tests/synctest/synctest.mplx b/tests/synctest/synctest.mplx new file mode 100644 index 0000000000000000000000000000000000000000..31c036b224e6a1f54b41006a5a2db823e639c4ea --- /dev/null +++ b/tests/synctest/synctest.mplx @@ -0,0 +1,20 @@ +variants: + - synctest: + sync_tarball = synctest.tar.bz2 + sync_length_type = int + sync_loop_type = int + variants: + - loop_short: + sync_loop = 10 + - loop_medium: + sync_loop = 50 + - loop_long: + sync_loop = 100 + variants: + - length_short: + sync_length = 100 + - length_medium: + sync_length = 500 + - length_long: + sync_length = 1000 + diff --git a/tests/synctest/synctest.py b/tests/synctest/synctest.py index 52ff8ce3092ced28e533ab2df74147a0cb2e7074..7d58199694abfca97a85a58a694554420174c466 100755 --- a/tests/synctest/synctest.py +++ b/tests/synctest/synctest.py @@ -14,7 +14,6 @@ # Copyright: Red Hat Inc. 2013-2014 # Author: Lucas Meneghel Rodrigues - import os from avocado import test @@ -29,17 +28,27 @@ class synctest(test.Test): """ Execute the synctest test suite. """ - - def setup(self, tarball='synctest.tar.bz2'): + default_params = {'sync_tarball': 'synctest.tar.bz2', + 'sync_length': 100, + 'sync_loop': 10} + + def setup(self): + """ + Build the synctest suite. + """ self.cwd = os.getcwd() - tarball_path = self.get_deps_path(tarball) + tarball_path = self.get_deps_path(self.params.sync_tarball) archive.extract(tarball_path, self.srcdir) self.srcdir = os.path.join(self.srcdir, 'synctest') build.make(self.srcdir) - def action(self, length=100, loop=10): + def action(self): + """ + Execute synctest with the appropriate params. + """ os.chdir(self.srcdir) - cmd = './synctest %s %s' % (length, loop) + cmd = ('./synctest %s %s' % + (self.params.sync_length, self.params.sync_loop)) process.system(cmd) os.chdir(self.cwd)