提交 5dca35fe 编写于 作者: C Cleber Rosa

Merge remote-tracking branch 'lmr/py26-compat-v3'

language: python
python:
- "2.7"
- "2.7_with_system_site_packages"
- "2.6"
branches:
only:
- master
virtualenv:
system_site_packages: true
before_install:
- sudo apt-get update
- sudo apt-get -y --force-yes install python-libvirt python-lzma python-yaml
- sudo apt-get -y --force-yes install python-libvirt python-lzma libyaml-dev
install:
- pip install -r requirements-travis.txt
- if [ $TRAVIS_PYTHON_VERSION == '2.6' ]; then pip install -r requirements-travis-python26.txt; fi
script:
- inspekt lint
......
......@@ -6,9 +6,15 @@ License: GPLv2
Group: Development/Tools
URL: http://avocado-framework.github.io/
Source: avocado-%{version}.tar.gz
BuildRequires: python2-devel, python-docutils, python-yaml
BuildArch: noarch
Requires: python, python-requests, python-yaml, fabric, pyliblzma
%if "%{?dist}" == ".el6"
Requires: python, python-requests, fabric, pyliblzma, libvirt-python, pystache, PyYAML, python-argparse, python-unittest2, python-logutils, python-importlib
BuildRequires: python2-devel, python-docutils, PyYAML, python-logutils
%else
Requires: python, python-requests, fabric, pyliblzma, libvirt-python, pystache, python-yaml
BuildRequires: python2-devel, python-docutils, python-yaml
%endif
%description
Avocado is a set of tools and libraries (what people call
......@@ -19,8 +25,13 @@ these days a framework) to perform automated testing.
%build
%{__python} setup.py build
%if "%{?dist}" == ".el6"
%{__python} /usr/bin/rst2man man/avocado.rst man/avocado.1
%{__python} /usr/bin/rst2man man/avocado-rest-client.rst man/avocado-rest-client.1
%else
%{__python2} /usr/bin/rst2man man/avocado.rst man/avocado.1
%{__python2} /usr/bin/rst2man man/avocado-rest-client.rst man/avocado-rest-client.1
%endif
%install
%{__python} setup.py install --root %{buildroot} --skip-build
......@@ -35,13 +46,15 @@ these days a framework) to perform automated testing.
%dir /etc/avocado/conf.d
%config(noreplace)/etc/avocado/avocado.conf
%config(noreplace)/etc/avocado/conf.d/README
%{python_sitelib}/avocado*
%{_bindir}/avocado
%{_bindir}/avocado-rest-client
%exclude %{python_sitelib}/avocado/plugins/htmlresult.py*
%exclude %{python_sitelib}/avocado/plugins/resources/htmlresult/*
%{python_sitelib}/avocado*
%{_mandir}/man1/avocado.1.gz
%{_mandir}/man1/avocado-rest-client.1.gz
%{_docdir}/avocado/avocado.rst
%{_docdir}/avocado/avocado-rest-client.rst
%exclude %{python_sitelib}/avocado/plugins/htmlresult.py*
%exclude %{python_sitelib}/avocado/plugins/resources/htmlresult/*
%package plugins-output-html
Summary: Avocado HTML report plugin
......@@ -77,7 +90,7 @@ examples of how to write tests on your own.
* Fri Feb 6 2015 Lucas Meneghel Rodrigues <lmr@redhat.com> - 0.20.1-1
- Update to upstream version 0.20.1
* Mon Feb 3 2015 Lucas Meneghel Rodrigues <lmr@redhat.com> - 0.20.0-1
* Tue Feb 3 2015 Lucas Meneghel Rodrigues <lmr@redhat.com> - 0.20.0-1
- Update to upstream version 0.20.0
* Mon Dec 15 2014 Lucas Meneghel Rodrigues <lmr@redhat.com> - 0.17.0-1
......
......@@ -12,6 +12,12 @@
# Copyright: Red Hat Inc. 2013-2014
# Author: Lucas Meneghel Rodrigues <lmr@redhat.com>
import logging
if hasattr(logging, 'NullHandler'):
NULL_HANDLER = 'logging.NullHandler'
else:
NULL_HANDLER = 'logutils.NullHandler'
DEFAULT_LOGGING = {
'version': 1,
......@@ -32,7 +38,7 @@ DEFAULT_LOGGING = {
'handlers': {
'null': {
'level': 'INFO',
'class': 'logging.NullHandler',
'class': NULL_HANDLER,
},
'console': {
'level': 'INFO',
......@@ -97,4 +103,11 @@ DEFAULT_LOGGING = {
}
from logging import config
config.dictConfig(DEFAULT_LOGGING)
if not hasattr(config, 'dictConfig'):
from logutils import dictconfig
cfg_func = dictconfig.dictConfig
else:
cfg_func = config.dictConfig
cfg_func(DEFAULT_LOGGING)
......@@ -32,9 +32,10 @@ class Parser(object):
def __init__(self):
self.application = argparse.ArgumentParser(
prog='avocado',
version='Avocado %s' % VERSION,
add_help=False, # see parent parsing
description='Avocado Test Runner')
self.application.add_argument('-v', '--version', action='version',
version='Avocado %s' % VERSION)
self.application.add_argument('--logdir', action='store',
help='Alternate logs directory',
dest='logdir', default='')
......
......@@ -50,6 +50,13 @@ class ReportModel(object):
self.relative_links = relative_links
self.html_output = html_output
def get(self, key, default):
value = getattr(self, key, default)
if callable(value):
return value()
else:
return value
def job_id(self):
return self.json['job_id']
......@@ -238,10 +245,18 @@ class HTMLTestResult(TestResult):
context = ReportModel(json_input=self.json, html_output=self.output,
relative_links=relative_links)
renderer = pystache.Renderer('utf-8', 'utf-8')
html = HTML()
template = html.get_resource_path('templates', 'report.mustache')
report_contents = renderer.render(open(template, 'r').read(), context)
# pylint: disable=E0611
if hasattr(pystache, 'Renderer'):
renderer = pystache.Renderer('utf-8', 'utf-8')
report_contents = renderer.render(open(template, 'r').read(), context)
else:
from pystache import view
v = view.View(open(template, 'r').read(), context)
report_contents = v.render('utf8')
static_basedir = html.get_resource_path('static')
output_dir = os.path.dirname(os.path.abspath(self.output))
utils_path.init_dir(output_dir)
......
......@@ -99,7 +99,8 @@ class RemoteTestRunner(TestRunner):
for t_dict in json_result['tests']:
logdir = os.path.dirname(self.result.stream.debuglog)
logdir = os.path.join(logdir, 'test-results')
logdir = os.path.join(logdir, os.path.relpath(t_dict['url'], '/'))
relative_path = t_dict['url'].lstrip('/')
logdir = os.path.join(logdir, relative_path)
t_dict['logdir'] = logdir
t_dict['logfile'] = os.path.join(logdir, 'debug.log')
......
......@@ -24,7 +24,11 @@ import pipes
import shutil
import sys
import time
import unittest
if sys.version_info[:2] == (2, 6):
import unittest2 as unittest
else:
import unittest
from avocado import sysinfo
from avocado.core import data_dir
......@@ -222,7 +226,7 @@ class Test(unittest.TestCase):
'traceback', 'workdir', 'whiteboard', 'time_start',
'time_end', 'running', 'paused', 'paused_msg',
'fail_class']
state = {key: self.__dict__.get(key) for key in preserve_attr}
state = dict([(key, self.__dict__.get(key)) for key in preserve_attr])
state['params'] = dict(self.__dict__['params'])
state['class_name'] = self.__class__.__name__
state['job_logdir'] = self.job.logdir
......@@ -548,8 +552,8 @@ class SimpleTest(Test):
Run the executable, and log its detailed execution.
"""
try:
test_params = {str(key): str(val)
for key, val in self.params.iteritems()}
test_params = dict([(str(key), str(val)) for key, val in
self.params.iteritems()])
# process.run uses shlex.split(), the self.path needs to be escaped
result = process.run(pipes.quote(self.path), verbose=True,
env=test_params)
......
# All python 2.6 specific requirements (backports)
argparse==1.3.0
logutils==0.3.3
importlib==1.0.3
unittest2==1.0.0
......@@ -4,5 +4,7 @@ nose==1.3.4
pystache==0.5.4
Sphinx==1.3b1
flexmock==0.9.7
inspektor==0.1.14
inspektor==0.1.15
pep8==1.6.2
requests==1.2.3
PyYAML==3.11
import os
import sys
import glob
import unittest
if sys.version_info[:2] == (2, 6):
import unittest2 as unittest
else:
import unittest
# simple magic for using scripts within a source tree
basedir = os.path.join(os.path.dirname(os.path.abspath(__file__)), '..', '..', '..', '..')
......
import json
import unittest
import os
import shutil
import time
......@@ -7,6 +6,11 @@ import sys
import tempfile
import xml.dom.minidom
if sys.version_info[:2] == (2, 6):
import unittest2 as unittest
else:
import unittest
# simple magic for using scripts within a source tree
basedir = os.path.join(os.path.dirname(os.path.abspath(__file__)), '..', '..', '..', '..')
basedir = os.path.abspath(basedir)
......@@ -354,7 +358,8 @@ class PluginsTest(unittest.TestCase):
self.assertEqual(result.exit_status, expected_rc,
"Avocado did not return rc %d:\n%s" %
(expected_rc, result))
self.assertNotIn('Disabled', output)
if sys.version_info[:2] >= (2, 7, 0):
self.assertNotIn('Disabled', output)
def test_config_plugin(self):
os.chdir(basedir)
......
import os
import sys
import unittest
if sys.version_info[:2] == (2, 6):
import unittest2 as unittest
else:
import unittest
# simple magic for using scripts within a source tree
basedir = os.path.join(os.path.dirname(os.path.abspath(__file__)), '..', '..', '..', '..')
......
#!/usr/bin/env python
import unittest
import os
import sys
if sys.version_info[:2] == (2, 6):
import unittest2 as unittest
else:
import unittest
# simple magic for using scripts within a source tree
basedir = os.path.join(os.path.dirname(os.path.abspath(__file__)), '..', '..',
'..', '..')
......
import os
import sys
import unittest
if sys.version_info[:2] == (2, 6):
import unittest2 as unittest
else:
import unittest
# simple magic for using scripts within a source tree
basedir = os.path.join(os.path.dirname(os.path.abspath(__file__)), '..', '..', '..', '..')
......
import json
import tempfile
import unittest
import os
import sys
import shutil
from xml.dom import minidom
if sys.version_info[:2] == (2, 6):
import unittest2 as unittest
else:
import unittest
# simple magic for using scripts within a source tree
basedir = os.path.join(os.path.dirname(os.path.abspath(__file__)), '..', '..', '..', '..')
basedir = os.path.abspath(basedir)
......@@ -59,21 +63,6 @@ class OutputPluginTest(unittest.TestCase):
"Missing excerpt error message from output:\n%s" % output)
def test_output_incompatible_setup_2(self):
os.chdir(basedir)
cmd_line = ('./scripts/avocado run --sysinfo=off --vm-domain aaa '
'--vm-hostname host --json - passtest')
result = process.run(cmd_line, ignore_status=True)
expected_rc = 2
output = result.stdout + result.stderr
self.assertEqual(result.exit_status, expected_rc,
"Avocado did not return rc %d:\n%s" %
(expected_rc, result))
error_excerpt = ("Options --json --vm-domain are trying to use "
"stdout simultaneously")
self.assertIn(error_excerpt, output,
"Missing excerpt error message from output:\n%s" % output)
def test_output_incompatible_setup_3(self):
os.chdir(basedir)
cmd_line = './scripts/avocado run --sysinfo=off --html - passtest'
result = process.run(cmd_line, ignore_status=True)
......
import unittest
import os
import sys
if sys.version_info[:2] == (2, 6):
import unittest2 as unittest
else:
import unittest
# simple magic for using scripts within a source tree
basedir = os.path.join(os.path.dirname(os.path.abspath(__file__)), '..', '..', '..', '..')
basedir = os.path.abspath(basedir)
......
import os
import sys
import unittest
if sys.version_info[:2] == (2, 6):
import unittest2 as unittest
else:
import unittest
# simple magic for using scripts within a source tree
basedir = os.path.join(os.path.dirname(os.path.abspath(__file__)), '..', '..', '..', '..')
......
import os
import sys
import unittest
import time
import tempfile
if sys.version_info[:2] == (2, 6):
import unittest2 as unittest
else:
import unittest
# simple magic for using scripts within a source tree
basedir = os.path.join(os.path.dirname(os.path.abspath(__file__)), '..', '..', '..', '..')
basedir = os.path.abspath(basedir)
......
import unittest
import itertools
import sys
if sys.version_info[:2] == (2, 6):
import unittest2 as unittest
else:
import unittest
from avocado import multiplexer
from avocado.core import tree
......
......@@ -18,10 +18,10 @@ JSON_RESULTS = ('Something other than json\n'
'"status": "PASS", "time": 1.23, "start": 0, "end": 1.23}],'
'"debuglog": "/home/user/avocado/logs/run-2014-05-26-15.45.'
'37/debug.log", "errors": 0, "skip": 0, "time": 1.4, '
'"logdir": "/local/path/test-results%s/sleeptest", '
'"logdir": "/local/path/test-results%s/sleeptest", '
'"logdir": "/local/path/test-results/sleeptest", '
'"logdir": "/local/path/test-results/sleeptest", '
'"start": 0, "end": 1.4, "pass": 1, "failures": 0, "total": '
'1}\nAdditional stuff other than json' % (cwd, cwd))
'1}\nAdditional stuff other than json')
class RemoteTestRunnerTest(unittest.TestCase):
......@@ -63,9 +63,8 @@ class RemoteTestRunnerTest(unittest.TestCase):
'tagged_name': u'sleeptest.1', 'time_elapsed': 1.23,
'fail_class': 'Not supported yet', 'job_unique_id': '',
'fail_reason': 'None',
'logdir': '/local/path/test-results%s/sleeptest' % cwd,
'logfile': '/local/path/test-results%s/sleeptest/debug.log' %
cwd}
'logdir': '/local/path/test-results/sleeptest',
'logfile': '/local/path/test-results/sleeptest/debug.log'}
Results.should_receive('start_test').once().with_args(args).ordered()
Results.should_receive('check_test').once().with_args(args).ordered()
(Remote.should_receive('receive_files')
......
import unittest
import os
import sys
import tempfile
if sys.version_info[:2] == (2, 6):
import unittest2 as unittest
else:
import unittest
# simple magic for using scripts within a source tree
basedir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
basedir = os.path.dirname(basedir)
......
import os
import sys
import unittest
import tempfile
if sys.version_info[:2] == (2, 6):
import unittest2 as unittest
else:
import unittest
# simple magic for using scripts within a source tree
basedir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
basedir = os.path.dirname(basedir)
......
import unittest
import os
import shutil
import sys
import tempfile
if sys.version_info[:2] == (2, 6):
import unittest2 as unittest
else:
import unittest
# simple magic for using scripts within a source tree
basedir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
basedir = os.path.dirname(basedir)
......
import copy
import unittest
import sys
if sys.version_info[:2] == (2, 6):
import unittest2 as unittest
else:
import unittest
from avocado.core import tree
......
import unittest
import sys
if sys.version_info[:2] == (2, 6):
import unittest2 as unittest
else:
import unittest
from avocado import runtime
from avocado.utils import process
......
......@@ -66,5 +66,12 @@ class AvocadoTestRunner(Plugin):
loader.selector = AvocadoTestSelector(loader.config)
if __name__ == '__main__':
nose.main(addplugins=[AvocadoTestRunner(),
AttributeSelector()])
if 'addplugins' in nose.main.__init__.func_code.co_varnames:
nose.main(addplugins=[AvocadoTestRunner(),
AttributeSelector()])
elif 'plugins' in nose.main.__init__.func_code.co_varnames:
nose.main(plugins=[AvocadoTestRunner(),
AttributeSelector()])
else:
print("Unsupported nose API, can't proceed with testing...")
sys.exit(1)
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册