未验证 提交 2974776f 编写于 作者: C Cleber Rosa

Merge remote-tracking branch 'ldoktor/release'

Signed-off-by: NCleber Rosa <crosa@redhat.com>
......@@ -7,17 +7,17 @@
# Settings used for build from snapshots.
%if 0%{?rel_build}
%global gittar %{srcname}-%{version}.tar.gz
%global gittar %{srcname}-%{version}.tar.gz
%else
%if ! 0%{?commit:1}
%global commit ef2b6f3b14716ef76912bd71feb582f0226fa217
%global commit ef2b6f3b14716ef76912bd71feb582f0226fa217
%endif
%if ! 0%{?commit_date:1}
%global commit_date 20180227
%global commit_date 20180227
%endif
%global shortcommit %(c=%{commit};echo ${c:0:8})
%global gitrel .%{commit_date}git%{shortcommit}
%global gittar %{srcname}-%{shortcommit}.tar.gz
%global shortcommit %(c=%{commit};echo ${c:0:8})
%global gitrel .%{commit_date}git%{shortcommit}
%global gittar %{srcname}-%{shortcommit}.tar.gz
%endif
# Selftests are provided but may need to be skipped because many of
......@@ -433,6 +433,7 @@ AVOCADO_CHECK_LEVEL=0 %{__python3} selftests/run
%exclude %{python2_sitelib}/avocado_framework_plugin_result_upload*
%exclude %{python2_sitelib}/avocado_framework_plugin_glib*
%exclude %{python2_sitelib}/avocado/libexec*
%exclude %{python2_sitelib}/tests*
%if %{with_python3}
%files -n python3-%{srcname}
......@@ -455,6 +456,7 @@ AVOCADO_CHECK_LEVEL=0 %{__python3} selftests/run
%exclude %{python3_sitelib}/avocado_framework_plugin_golang*
%exclude %{python3_sitelib}/avocado_framework_plugin_result_upload*
%exclude %{python3_sitelib}/avocado_framework_plugin_glib*
%exclude %{python3_sitelib}/tests*
%endif
%package common
......@@ -567,7 +569,7 @@ server.
%package -n python2-%{srcname}-plugins-varianter-yaml-to-mux
Summary: Avocado plugin to generate variants out of yaml files
Requires: %{name} == %{version}
Requires: python2-%{srcname} == %{version}
%if 0%{?rhel}
Requires: PyYAML
%else
......@@ -598,7 +600,7 @@ defined in a yaml file(s).
%package -n python2-%{srcname}-plugins-loader-yaml
Summary: Avocado Plugin that loads tests from YAML files
Requires: %{name}-plugins-varianter-yaml-to-mux == %{version}
Requires: python2-%{srcname}-plugins-varianter-yaml-to-mux == %{version}
%description -n python2-%{srcname}-plugins-loader-yaml
Can be used to produce a test suite from definitions in a YAML file,
......@@ -706,7 +708,7 @@ a dedicated sever.
%package -n python2-%{srcname}-plugins-glib
Summary: Avocado Plugin for Execution of GLib Test Framework tests
Requires: %{name} == %{version}
Requires: python2-%{srcname} == %{version}
%description -n python2-%{srcname}-plugins-glib
This optional plugin is intended to list and run tests written in the
......@@ -719,7 +721,7 @@ GLib Test Framework.
%if %{with_python3}
%package -n python3-%{srcname}-plugins-glib
Summary: Avocado Plugin for Execution of GLib Test Framework tests
Requires: %{name} == %{version}
Requires: python3-%{srcname} == %{version}
%description -n python3-%{srcname}-plugins-glib
This optional plugin is intended to list and run tests written in the
......
......@@ -38,7 +38,8 @@ from avocado.utils import path as utils_path
basedir = os.path.join(os.path.dirname(os.path.abspath(__file__)), '..', '..')
basedir = os.path.abspath(basedir)
AVOCADO = os.environ.get("UNITTEST_AVOCADO_CMD", "./scripts/avocado")
AVOCADO = os.environ.get("UNITTEST_AVOCADO_CMD",
"%s ./scripts/avocado" % sys.executable)
LOCAL_IMPORT_TEST_CONTENTS = '''
from avocado import Test
......@@ -790,8 +791,9 @@ class RunnerSimpleTest(unittest.TestCase):
test_base_dir = os.path.dirname(self.pass_script.path)
os.chdir(test_base_dir)
test_file_name = os.path.basename(self.pass_script.path)
cmd_line = ('%s run --job-results-dir %s --sysinfo=off'
' "%s"' % (avocado_path, self.tmpdir, test_file_name))
cmd_line = ('%s %s run --job-results-dir %s --sysinfo=off'
' "%s"' % (sys.executable, avocado_path, self.tmpdir,
test_file_name))
result = process.run(cmd_line, ignore_status=True)
expected_rc = exit_codes.AVOCADO_ALL_OK
self.assertEqual(result.exit_status, expected_rc,
......@@ -877,7 +879,7 @@ class RunnerSimpleTestStatus(unittest.TestCase):
self.tmpdir, warn_script.path))
result = process.run(cmd_line, ignore_status=True)
json_results = json.loads(result.stdout_text)
self.assertEquals(json_results['tests'][0]['status'], 'WARN')
self.assertEqual(json_results['tests'][0]['status'], 'WARN')
warn_script.remove()
# Skip in STDOUT should be handled because of config
skip_script = script.TemporaryScript('avocado_skip.sh',
......@@ -890,7 +892,7 @@ class RunnerSimpleTestStatus(unittest.TestCase):
self.tmpdir, skip_script.path))
result = process.run(cmd_line, ignore_status=True)
json_results = json.loads(result.stdout_text)
self.assertEquals(json_results['tests'][0]['status'], 'SKIP')
self.assertEqual(json_results['tests'][0]['status'], 'SKIP')
skip_script.remove()
# STDERR skip should not be handled
skip2_script = script.TemporaryScript('avocado_skip.sh',
......@@ -903,7 +905,7 @@ class RunnerSimpleTestStatus(unittest.TestCase):
self.tmpdir, skip2_script.path))
result = process.run(cmd_line, ignore_status=True)
json_results = json.loads(result.stdout_text)
self.assertEquals(json_results['tests'][0]['status'], 'PASS')
self.assertEqual(json_results['tests'][0]['status'], 'PASS')
skip2_script.remove()
def tearDown(self):
......
......@@ -10,6 +10,9 @@ basedir = os.path.join(os.path.dirname(os.path.abspath(__file__)), '..', '..')
basedir = os.path.abspath(basedir)
PY_CMD = sys.executable
class StandaloneTests(unittest.TestCase):
def setUp(self):
......@@ -28,22 +31,22 @@ class StandaloneTests(unittest.TestCase):
return result
def test_passtest(self):
cmd_line = './examples/tests/passtest.py -r'
cmd_line = '%s ./examples/tests/passtest.py -r' % PY_CMD
expected_rc = exit_codes.AVOCADO_ALL_OK
self.run_and_check(cmd_line, expected_rc, 'passtest')
def test_warntest(self):
cmd_line = './examples/tests/warntest.py -r'
cmd_line = '%s ./examples/tests/warntest.py -r' % PY_CMD
expected_rc = exit_codes.AVOCADO_ALL_OK
self.run_and_check(cmd_line, expected_rc, 'warntest')
def test_failtest(self):
cmd_line = './examples/tests/failtest.py -r'
cmd_line = '%s ./examples/tests/failtest.py -r' % PY_CMD
expected_rc = exit_codes.AVOCADO_TESTS_FAIL
self.run_and_check(cmd_line, expected_rc, 'failtest')
def test_errortest_nasty(self):
cmd_line = './examples/tests/errortest_nasty.py -r'
cmd_line = '%s ./examples/tests/errortest_nasty.py -r' % PY_CMD
expected_rc = exit_codes.AVOCADO_TESTS_FAIL
result = self.run_and_check(cmd_line, expected_rc, 'errortest_nasty')
if sys.version_info[0] == 3:
......@@ -56,14 +59,14 @@ class StandaloneTests(unittest.TestCase):
"exception details." % (exc))
def test_errortest_nasty2(self):
cmd_line = './examples/tests/errortest_nasty2.py -r'
cmd_line = '%s ./examples/tests/errortest_nasty2.py -r' % PY_CMD
expected_rc = exit_codes.AVOCADO_TESTS_FAIL
result = self.run_and_check(cmd_line, expected_rc, 'errortest_nasty2')
self.assertIn(b"Exception: Unable to get exception, check the traceback"
b" for details.", result.stdout)
def test_errortest_nasty3(self):
cmd_line = './examples/tests/errortest_nasty3.py -r'
cmd_line = '%s ./examples/tests/errortest_nasty3.py -r' % PY_CMD
expected_rc = exit_codes.AVOCADO_TESTS_FAIL
result = self.run_and_check(cmd_line, expected_rc, 'errortest_nasty3')
if sys.version_info[0] == 3:
......@@ -73,7 +76,7 @@ class StandaloneTests(unittest.TestCase):
self.assertIn(exc, result.stdout)
def test_errortest(self):
cmd_line = './examples/tests/errortest.py -r'
cmd_line = '%s ./examples/tests/errortest.py -r' % PY_CMD
expected_rc = exit_codes.AVOCADO_TESTS_FAIL
self.run_and_check(cmd_line, expected_rc, 'errortest')
......
......@@ -49,10 +49,10 @@ class JobTest(unittest.TestCase):
self.assertIsNone(self.job.test_suite)
self.assertIsNone(self.job.tmpdir)
self.assertFalse(self.job._Job__remove_tmpdir)
self.assertEquals(self.job.args, args)
self.assertEquals(self.job.exitcode, exit_codes.AVOCADO_ALL_OK)
self.assertEquals(self.job.references, [])
self.assertEquals(self.job.status, "RUNNING")
self.assertEqual(self.job.args, args)
self.assertEqual(self.job.exitcode, exit_codes.AVOCADO_ALL_OK)
self.assertEqual(self.job.references, [])
self.assertEqual(self.job.status, "RUNNING")
uid = self.job.unique_id
# Job with setup called
......@@ -62,8 +62,8 @@ class JobTest(unittest.TestCase):
self.assertIsNotNone(self.job.result)
self.assertIsNotNone(self.job.tmpdir)
self.assertTrue(self.job._Job__remove_tmpdir)
self.assertEquals(uid, self.job.unique_id)
self.assertEquals(self.job.status, "RUNNING")
self.assertEqual(uid, self.job.unique_id)
self.assertEqual(self.job.status, "RUNNING")
# Calling setup twice
self.assertRaises(AssertionError, self.job.setup)
......
......@@ -55,7 +55,7 @@ class TestRunnerQueue(unittest.TestCase):
'base_logdir': self.tmpdir}]
msg = self._run_test(factory)
self.assertEquals(msg['whiteboard'], 'ZGVmYXVsdCB3\n')
self.assertEqual(msg['whiteboard'], 'ZGVmYXVsdCB3\n')
def tearDown(self):
shutil.rmtree(self.tmpdir)
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册