summaryrefslogtreecommitdiffstats
path: root/util/testrunner
diff options
context:
space:
mode:
Diffstat (limited to 'util/testrunner')
-rw-r--r--util/testrunner/README8
-rwxr-xr-xutil/testrunner/qt-testrunner.py34
-rw-r--r--util/testrunner/tests/qt_mock_test-log.xml50
-rwxr-xr-xutil/testrunner/tests/qt_mock_test.py212
-rwxr-xr-xutil/testrunner/tests/tst_testrunner.py551
5 files changed, 29 insertions, 826 deletions
diff --git a/util/testrunner/README b/util/testrunner/README
index 5758e325140..cb6722ac807 100644
--- a/util/testrunner/README
+++ b/util/testrunner/README
@@ -15,10 +15,4 @@ It offers the following functionality
The script itself has a testsuite that is simply run by invoking
-qtbase/util/testrunner/tests/tst_testrunner.py
-
-Please *run this manually* before submitting a change to qt-testrunner and
-make sure it's passing. The reason it does not run automatically during the
-usual qtbase test run, is because
-+ the test run should not depend on Python
-+ we don't want to wrap the testrunner tests with testrunner.
+qtbase/tests/auto/util/testrunner/tst_qt_testrunner.py
diff --git a/util/testrunner/qt-testrunner.py b/util/testrunner/qt-testrunner.py
index 6437c3fe7f1..1573534cee9 100755
--- a/util/testrunner/qt-testrunner.py
+++ b/util/testrunner/qt-testrunner.py
@@ -4,10 +4,9 @@
# !!!IMPORTANT!!! If you change anything to this script, run the testsuite
-# manually and make sure it still passes, as it doesn't run automatically.
-# Just execute the command line as such:
+# and make sure it still passes:
#
-# ./util/testrunner/tests/tst_testrunner.py -v [--debug]
+# qtbase/tests/auto/util/testrunner/tst_qt_testrunner.py -v [--debug]
#
# ======== qt-testrunner ========
#
@@ -92,6 +91,8 @@ class WhatFailed(NamedTuple):
class ReRunCrash(Exception):
pass
+class BadXMLCrash(Exception):
+ pass
# In the last test re-run, we add special verbosity arguments, in an attempt
@@ -255,7 +256,7 @@ def parse_log(results_file) -> WhatFailed:
root = tree.getroot()
if root.tag != "TestCase":
- raise AssertionError(
+ raise BadXMLCrash(
f"The XML test log must have <TestCase> as root tag, but has: <{root.tag}>")
failures = []
@@ -299,6 +300,12 @@ def parse_log(results_file) -> WhatFailed:
def run_test(arg_list: List[str], **kwargs):
+ if (os.environ.get("QT_TESTRUNNER_TESTING", "0") == "1"
+ and os.name == "nt"
+ and arg_list[0].endswith(".py")
+ ):
+ # For executing qt_mock_test.py under the same Python interpreter when testing.
+ arg_list = [ sys.executable ] + arg_list
L.debug("Running test command line: %s", arg_list)
proc = subprocess.run(arg_list, **kwargs)
L.info("Test process exited with code: %d", proc.returncode)
@@ -380,8 +387,19 @@ def rerun_failed_testcase(test_basename, testargs: List[str], output_dir: str,
proc = run_test(testargs + output_args + VERBOSE_ARGS + [failed_arg],
timeout=timeout,
env={**os.environ, **VERBOSE_ENV})
+ # There are platforms that run tests wrapped with some test-runner
+ # script, that can possibly fail to extract a process exit code.
+ # Because of these cases, we *also* parse the XML file and signify
+ # CRASH in case of QFATAL/empty/corrupt result.
+ what_failed = parse_log(f"{pathname_stem}.xml")
+ if what_failed.qfatal_message:
+ raise ReRunCrash(f"CRASH! returncode:{proc.returncode} "
+ f"QFATAL:'{what_failed.qfatal_message}'")
if proc.returncode < 0 or proc.returncode >= 128:
raise ReRunCrash(f"CRASH! returncode:{proc.returncode}")
+ if proc.returncode == 0 and len(what_failed.failed_tests) > 0:
+ raise ReRunCrash("CRASH! returncode:0 but failures were found: "
+ + what_failed.failed_tests)
if proc.returncode == 0:
n_passes += 1
if n_passes == passes_needed:
@@ -451,6 +469,8 @@ def main():
assert len(failed_functions) > 0 and retcode != 0
break # all is fine, goto re-running individual failed testcases
+ except AssertionError:
+ raise
except Exception as e:
L.error("exception:%s %s", type(e).__name__, e)
L.error("The test executable probably crashed, see above for details")
@@ -466,9 +486,11 @@ def main():
ret = rerun_failed_testcase(args.test_basename, args.testargs, args.log_dir,
test_result, args.max_repeats, args.passes_needed,
dryrun=args.dry_run, timeout=args.timeout)
- except ReRunCrash as e:
+ except AssertionError:
+ raise
+ except Exception as e:
L.error("exception:%s", e)
- L.error("The testcase re-run crashed, giving up")
+ L.error("The testcase re-run probably crashed, giving up")
sys.exit(3) # Test re-run CRASH
if not ret:
diff --git a/util/testrunner/tests/qt_mock_test-log.xml b/util/testrunner/tests/qt_mock_test-log.xml
deleted file mode 100644
index a164bec9f9c..00000000000
--- a/util/testrunner/tests/qt_mock_test-log.xml
+++ /dev/null
@@ -1,50 +0,0 @@
-<?xml version="1.0" encoding="UTF-8"?>
-<TestCase name="qt_mock_test">
- <Environment>
- <QtVersion>MOCK</QtVersion>
- <QtBuild>MOCK</QtBuild>
- <QTestVersion>6.3.0</QTestVersion>
- </Environment>
- <TestFunction name="initTestCase">
- <Incident type="{{initTestCase_result}}" file="" line="0" />
- <Duration msecs="0.00004"/>
- </TestFunction>
- <TestFunction name="always_pass">
- <Incident type="{{always_pass_result}}" file="" line="0" />
- <Duration msecs="0.71704"/>
- </TestFunction>
- <TestFunction name="always_fail">
- <Incident type="{{always_fail_result}}" file="" line="0" />
- <Duration msecs="0.828272"/>
- </TestFunction>
- <TestFunction name="always_crash">
- <Incident type="{{always_crash_result}}" file="" line="0" />
- <Duration msecs="0.828272"/>
- </TestFunction>
-
- <!-- The strings like this one "{{fail_then_pass:2_result}}"
- are just template strings that will be replaced by the test driver
- before each test. The colon doesn't have a special meaning.
- The datatags in the following tests are just "2", "5", "6".
- We don't strictly need datatags because the tests don't include
- specific testing for datatags. It's just that adding a couple
- of datatags to this XML template, complicates it a bit and
- tests somewhat that functionality as a side-effect.
- -->
- <TestFunction name="fail_then_pass">
- <Incident type="{{fail_then_pass:2_result}}" file="" line="0">
- <DataTag><![CDATA[2]]></DataTag>
- </Incident>
- <Incident type="{{fail_then_pass:5_result}}" file="" line="0">
- <DataTag><![CDATA[5]]></DataTag>
- </Incident>
- <Incident type="{{fail_then_pass:6_result}}" file="" line="0">
- <DataTag><![CDATA[6]]></DataTag>
- </Incident>
- </TestFunction>
- <TestFunction name="fail_then_crash">
- <Incident type="{{fail_then_crash_result}}" file="" line="0" />
- <Duration msecs="0.828272"/>
- </TestFunction>
- <Duration msecs="1904.9"/>
-</TestCase>
diff --git a/util/testrunner/tests/qt_mock_test.py b/util/testrunner/tests/qt_mock_test.py
deleted file mode 100755
index eb6e33727f8..00000000000
--- a/util/testrunner/tests/qt_mock_test.py
+++ /dev/null
@@ -1,212 +0,0 @@
-#!/usr/bin/env python3
-# Copyright (C) 2021 The Qt Company Ltd.
-# SPDX-License-Identifier: LicenseRef-Qt-Commercial OR GPL-3.0-only WITH Qt-GPL-exception-1.0
-
-
-# This is an artificial test, mimicking the Qt tests, for example tst_whatever.
-# Its purpose is to assist in testing qt-testrunner.py.
-#
-# Mode A:
-#
-# If invoked with a test function argument, it runs that test function.
-#
-# Usage:
-#
-# $0 always_pass
-# $0 always_fail
-# $0 always_crash
-# $0 fail_then_pass:N # where N is the number of failing runs before passing
-#
-# Needs environment variable:
-# + QT_MOCK_TEST_STATE_FILE :: points to a unique filename, to be written
-# for keeping the state of the fail_then_pass:N tests.
-#
-# Mode B:
-#
-# If invoked without any argument, it runs the tests listed in the
-# variable QT_MOCK_TEST_RUN_LIST. If variable is empty it just runs
-# the always_pass test. It also understands qtestlib's `-o outfile.xml,xml`
-# option for writing a mock testlog in a file. Requires environment variables:
-# + QT_MOCK_TEST_STATE_FILE :: See above
-# + QT_MOCK_TEST_XML_TEMPLATE_FILE :: may point to the template XML file
-# located in the same source directory. Without this variable, the
-# option `-o outfile.xml,xml` will be ignored.
-# + QT_MOCK_TEST_RUN_LIST :: may contain a comma-separated list of test
-# that should run.
-# + QT_MOCK_TEST_CRASH_CLEANLY :: if set to 1, then the executable will
-# crash (exit with a high exit code)
-# after successfully running the given tests and writing the XML logfile.
-
-
-
-import sys
-import os
-import traceback
-from tst_testrunner import write_xml_log
-
-
-MY_NAME = os.path.basename(sys.argv[0])
-STATE_FILE = None
-XML_TEMPLATE = None
-XML_OUTPUT_FILE = None
-CRASH_CLEANLY = False
-
-
-def crash():
- sys.exit(131)
-
-def put_failure(test_name):
- with open(STATE_FILE, "a") as f:
- f.write(test_name + "\n")
-def get_failures(test_name):
- n = 0
- try:
- with open(STATE_FILE) as f:
- for line in f:
- if line.strip() == test_name:
- n += 1
- except FileNotFoundError:
- return 0
- return n
-
-# Only care about the XML log output file.
-def parse_output_argument(a):
- global XML_OUTPUT_FILE
- if a.endswith(",xml"):
- XML_OUTPUT_FILE = a[:-4]
-
-# Strip qtestlib specific arguments.
-# Only care about the "-o ...,xml" argument.
-def clean_cmdline():
- args = []
- prev_arg = None
- skip_next_arg = True # Skip argv[0]
- for a in sys.argv:
- if skip_next_arg:
- if prev_arg == "-o":
- parse_output_argument(a)
- prev_arg = None
- skip_next_arg = False
- continue
- if a in ("-o", "-maxwarnings"):
- skip_next_arg = True
- prev_arg = a
- continue
- if a in ("-v1", "-v2", "-vs"):
- print("VERBOSE RUN")
- if "QT_LOGGING_RULES" in os.environ:
- print("Environment has QT_LOGGING_RULES:",
- os.environ["QT_LOGGING_RULES"])
- continue
- args.append(a)
- return args
-
-
-def log_test(testcase, result,
- testsuite=MY_NAME.rpartition(".")[0]):
- print("%-7s: %s::%s()" % (result, testsuite, testcase))
-
-# Return the exit code
-def run_test(testname):
- if testname == "initTestCase":
- exit_code = 1 # specifically test that initTestCase fails
- elif testname == "always_pass":
- exit_code = 0
- elif testname == "always_fail":
- exit_code = 1
- elif testname == "always_crash":
- exit_code = 131
- elif testname == "fail_then_crash":
- previous_fails = get_failures(testname)
- if previous_fails == 0:
- put_failure(testname)
- exit_code = 1
- else:
- exit_code = 131
- elif testname.startswith("fail_then_pass"):
- wanted_fails = int(testname.partition(":")[2])
- previous_fails = get_failures(testname)
- if previous_fails < wanted_fails:
- put_failure(testname)
- exit_code = 1
- else:
- exit_code = 0
- else:
- assert False, "Unknown argument: %s" % testname
-
- if exit_code == 0:
- log_test(testname, "PASS")
- elif exit_code == 1:
- log_test(testname, "FAIL!")
- else:
- log_test(testname, "CRASH!")
-
- return exit_code
-
-def no_args_run():
- try:
- run_list = os.environ["QT_MOCK_TEST_RUN_LIST"].split(",")
- except KeyError:
- run_list = ["always_pass"]
-
- total_result = True
- fail_list = []
- for test in run_list:
- test_exit_code = run_test(test)
- if test_exit_code not in (0, 1):
- crash()
- if test_exit_code != 0:
- fail_list.append(test)
- total_result = total_result and (test_exit_code == 0)
-
- if XML_OUTPUT_FILE:
- if XML_TEMPLATE:
- write_xml_log(XML_OUTPUT_FILE, failure=fail_list)
- # If the template is an empty file, then write an empty output file
- elif XML_TEMPLATE == "":
- with open(XML_OUTPUT_FILE, "w"):
- pass
-
- if CRASH_CLEANLY:
- # Crash despite all going well and writing all output files cleanly.
- crash()
-
- if total_result:
- sys.exit(0)
- else:
- sys.exit(1)
-
-
-def main():
- global STATE_FILE
- # Will fail if env var is not set.
- STATE_FILE = os.environ["QT_MOCK_TEST_STATE_FILE"]
-
- global XML_TEMPLATE
- if "QT_MOCK_TEST_XML_TEMPLATE_FILE" in os.environ:
- with open(os.environ["QT_MOCK_TEST_XML_TEMPLATE_FILE"]) as f:
- XML_TEMPLATE = f.read()
-
- global CRASH_CLEANLY
- if ("QT_MOCK_TEST_CRASH_CLEANLY" in os.environ
- and os.environ["QT_MOCK_TEST_CRASH_CLEANLY"] == "1"
- ):
- CRASH_CLEANLY = True
-
- args = clean_cmdline()
-
- if len(args) == 0:
- no_args_run()
- assert False, "Unreachable!"
- else:
- sys.exit(run_test(args[0]))
-
-
-# TODO write XPASS test that does exit(1)
-
-if __name__ == "__main__":
- try:
- main()
- except Exception as e:
- traceback.print_exc()
- exit(128) # Something went wrong with this script
diff --git a/util/testrunner/tests/tst_testrunner.py b/util/testrunner/tests/tst_testrunner.py
deleted file mode 100755
index 79aa0a824fc..00000000000
--- a/util/testrunner/tests/tst_testrunner.py
+++ /dev/null
@@ -1,551 +0,0 @@
-#!/usr/bin/env python3
-# Copyright (C) 2021 The Qt Company Ltd.
-# SPDX-License-Identifier: LicenseRef-Qt-Commercial OR GPL-3.0-only WITH Qt-GPL-exception-1.0
-
-
-import sys
-import os
-import re
-import glob
-import subprocess
-
-from subprocess import STDOUT, PIPE
-from tempfile import TemporaryDirectory, mkstemp
-
-MY_NAME = os.path.basename(__file__)
-my_dir = os.path.dirname(__file__)
-testrunner = os.path.join(my_dir, "..", "qt-testrunner.py")
-mock_test = os.path.join(my_dir, "qt_mock_test.py")
-xml_log_template = os.path.join(my_dir, "qt_mock_test-log.xml")
-
-with open(xml_log_template) as f:
- XML_TEMPLATE = f.read()
-
-
-import unittest
-
-def setUpModule():
- global TEMPDIR
- TEMPDIR = TemporaryDirectory(prefix="tst_testrunner-")
-
- global EMPTY_FILE
- EMPTY_FILE = os.path.join(TEMPDIR.name, "EMPTY")
- with open(EMPTY_FILE, "w") as f:
- pass
-
- filename = os.path.join(TEMPDIR.name, "file_1")
- print("setUpModule(): setting up temporary directory and env var"
- " QT_MOCK_TEST_STATE_FILE=" + filename + " and"
- " QT_MOCK_TEST_XML_TEMPLATE_FILE=" + xml_log_template)
-
- os.environ["QT_MOCK_TEST_STATE_FILE"] = filename
- os.environ["QT_MOCK_TEST_XML_TEMPLATE_FILE"] = xml_log_template
-
-def tearDownModule():
- print("\ntearDownModule(): Cleaning up temporary directory:",
- TEMPDIR.name)
- del os.environ["QT_MOCK_TEST_STATE_FILE"]
- TEMPDIR.cleanup()
-
-
-# Helper to run a command and always capture output
-def run(*args, **kwargs):
- if DEBUG:
- print("Running: ", args, flush=True)
- proc = subprocess.run(*args, stdout=PIPE, stderr=STDOUT, **kwargs)
- if DEBUG and proc.stdout:
- print(proc.stdout.decode(), flush=True)
- return proc
-
-# Helper to run qt-testrunner.py with proper testing arguments.
-def run_testrunner(xml_filename=None, testrunner_args=None,
- wrapper_script=None, wrapper_args=None,
- qttest_args=None, env=None):
-
- args = [ testrunner ]
- if xml_filename:
- args += [ "--parse-xml-testlog", xml_filename ]
- if testrunner_args:
- args += testrunner_args
-
- if wrapper_script:
- args += [ wrapper_script ]
- if wrapper_args:
- args += wrapper_args
-
- args += [ mock_test ]
- if qttest_args:
- args += qttest_args
-
- return run(args, env=env)
-
-# Write the XML_TEMPLATE to filename, replacing the templated results.
-def write_xml_log(filename, failure=None, inject_message=None):
- data = XML_TEMPLATE
- if failure is None:
- failure = []
- elif isinstance(failure, str):
- failure = [ failure ]
- # Replace what was asked to fail with "fail"
- for x in failure:
- data = data.replace("{{" + x + "_result}}", "fail")
- # Replace the rest with "pass"
- data = re.sub(r"{{[^}]+}}", "pass", data)
- # Inject possible <Message> tags inside the first <TestFunction>
- if inject_message:
- i = data.index("</TestFunction>")
- data = data[:i] + inject_message + data[i:]
- with open(filename, "w") as f:
- f.write(data)
-
-
-# Test that qt_mock_test.py behaves well. This is necessary to properly
-# test qt-testrunner.
-class Test_qt_mock_test(unittest.TestCase):
- def setUp(self):
- state_file = os.environ["QT_MOCK_TEST_STATE_FILE"]
- if os.path.exists(state_file):
- os.remove(state_file)
- def assertProcessCrashed(self, proc):
- if DEBUG:
- print("process returncode is:", proc.returncode)
- self.assertTrue(proc.returncode < 0 or
- proc.returncode >= 128)
-
- def test_always_pass(self):
- proc = run([mock_test, "always_pass"])
- self.assertEqual(proc.returncode, 0)
- def test_always_fail(self):
- proc = run([mock_test, "always_fail"])
- self.assertEqual(proc.returncode, 1)
- def test_fail_then_pass_2(self):
- proc = run([mock_test, "fail_then_pass:2"])
- self.assertEqual(proc.returncode, 1)
- proc = run([mock_test, "fail_then_pass:2"])
- self.assertEqual(proc.returncode, 1)
- proc = run([mock_test, "fail_then_pass:2"])
- self.assertEqual(proc.returncode, 0)
- def test_fail_then_pass_1(self):
- proc = run([mock_test, "fail_then_pass:1"])
- self.assertEqual(proc.returncode, 1)
- proc = run([mock_test, "fail_then_pass:1"])
- self.assertEqual(proc.returncode, 0)
- def test_fail_then_pass_many_tests(self):
- proc = run([mock_test, "fail_then_pass:1"])
- self.assertEqual(proc.returncode, 1)
- proc = run([mock_test, "fail_then_pass:2"])
- self.assertEqual(proc.returncode, 1)
- proc = run([mock_test, "fail_then_pass:1"])
- self.assertEqual(proc.returncode, 0)
- proc = run([mock_test, "fail_then_pass:2"])
- self.assertEqual(proc.returncode, 1)
- proc = run([mock_test, "fail_then_pass:2"])
- self.assertEqual(proc.returncode, 0)
- def test_fail_then_crash(self):
- proc = run([mock_test, "fail_then_crash"])
- self.assertEqual(proc.returncode, 1)
- proc = run([mock_test, "fail_then_crash"])
- self.assertProcessCrashed(proc)
- def test_xml_file_is_written(self):
- filename = os.path.join(TEMPDIR.name, "testlog.xml")
- proc = run([mock_test, "-o", filename+",xml"])
- self.assertEqual(proc.returncode, 0)
- self.assertTrue(os.path.exists(filename))
- self.assertGreater(os.path.getsize(filename), 0)
- os.remove(filename)
- # Test it will write an empty XML file if template is empty
- def test_empty_xml_file_is_written(self):
- my_env = {
- "QT_MOCK_TEST_STATE_FILE": os.environ["QT_MOCK_TEST_STATE_FILE"],
- "QT_MOCK_TEST_XML_TEMPLATE_FILE": EMPTY_FILE
- }
- filename = os.path.join(TEMPDIR.name, "testlog.xml")
- proc = run([mock_test, "-o", filename+",xml"],
- env=my_env)
- self.assertEqual(proc.returncode, 0)
- self.assertTrue(os.path.exists(filename))
- self.assertEqual(os.path.getsize(filename), 0)
- os.remove(filename)
- def test_crash_cleanly(self):
- proc = run(mock_test,
- env= os.environ | {"QT_MOCK_TEST_CRASH_CLEANLY":"1"} )
- if DEBUG:
- print("returncode:", proc.returncode)
- self.assertProcessCrashed(proc)
-
-
-# Test regular invocations of qt-testrunner.
-class Test_testrunner(unittest.TestCase):
- def setUp(self):
- state_file = os.environ["QT_MOCK_TEST_STATE_FILE"]
- if os.path.exists(state_file):
- os.remove(state_file)
- # The mock_test honors only the XML output arguments, the rest are ignored.
- old_logfiles = glob.glob(os.path.basename(mock_test) + "*.xml",
- root_dir=TEMPDIR.name)
- for fname in old_logfiles:
- os.remove(os.path.join(TEMPDIR.name, fname))
- self.env = dict()
- self.env["QT_MOCK_TEST_XML_TEMPLATE_FILE"] = os.environ["QT_MOCK_TEST_XML_TEMPLATE_FILE"]
- self.env["QT_MOCK_TEST_STATE_FILE"] = state_file
- self.testrunner_args = [ "--log-dir", TEMPDIR.name ]
- def prepare_env(self, run_list=None):
- if run_list is not None:
- self.env['QT_MOCK_TEST_RUN_LIST'] = ",".join(run_list)
- def run2(self):
- return run_testrunner(testrunner_args=self.testrunner_args, env=self.env)
-
- def test_simple_invocation(self):
- # All tests pass.
- proc = self.run2()
- self.assertEqual(proc.returncode, 0)
- def test_always_pass(self):
- self.prepare_env(run_list=["always_pass"])
- proc = self.run2()
- self.assertEqual(proc.returncode, 0)
- def test_output_files_are_generated(self):
- proc = self.run2()
- xml_output_files = glob.glob(os.path.basename(mock_test) + "-*[0-9].xml",
- root_dir=TEMPDIR.name)
- if DEBUG:
- print("Output files found: ",
- xml_output_files)
- self.assertEqual(len(xml_output_files), 1)
- def test_always_fail(self):
- self.prepare_env(run_list=["always_fail"])
- proc = self.run2()
- # TODO verify that re-runs==max_repeats
- self.assertEqual(proc.returncode, 2)
- def test_flaky_pass_1(self):
- self.prepare_env(run_list=["always_pass,fail_then_pass:1"])
- proc = self.run2()
- self.assertEqual(proc.returncode, 0)
- def test_flaky_pass_5(self):
- self.prepare_env(run_list=["always_pass,fail_then_pass:1,fail_then_pass:5"])
- proc = self.run2()
- self.assertEqual(proc.returncode, 0)
- def test_flaky_fail(self):
- self.prepare_env(run_list=["always_pass,fail_then_pass:6"])
- proc = self.run2()
- self.assertEqual(proc.returncode, 2)
- def test_flaky_pass_fail(self):
- self.prepare_env(run_list=["always_pass,fail_then_pass:1,fail_then_pass:6"])
- proc = self.run2()
- # TODO verify that one func was re-run and passed but the other failed.
- self.assertEqual(proc.returncode, 2)
- def test_initTestCase_fail_crash(self):
- self.prepare_env(run_list=["initTestCase,always_pass"])
- proc = self.run2()
- self.assertEqual(proc.returncode, 3)
- def test_fail_then_crash(self):
- self.prepare_env(run_list=["fail_then_crash"])
- proc = self.run2()
- self.assertEqual(proc.returncode, 3)
-
- # If no XML file is found by qt-testrunner, it is usually considered a
- # CRASH and the whole test is re-run. Even when the return code is zero.
- # It is a PASS only if the test is not capable of XML output (see no_extra_args, TODO test it).
- def test_no_xml_log_written_pass_crash(self):
- del self.env["QT_MOCK_TEST_XML_TEMPLATE_FILE"]
- self.prepare_env(run_list=["always_pass"])
- proc = self.run2()
- self.assertEqual(proc.returncode, 3)
- # On the 2nd iteration of the full test, both of the tests pass.
- # Still it's a CRASH because no XML file was found.
- def test_no_xml_log_written_fail_then_pass_crash(self):
- del self.env["QT_MOCK_TEST_XML_TEMPLATE_FILE"]
- self.prepare_env(run_list=["always_pass,fail_then_pass:1"])
- proc = self.run2()
- # TODO verify that the whole test has run twice.
- self.assertEqual(proc.returncode, 3)
- # Even after 2 iterations of the full test we still get failures but no XML file,
- # and this is considered a CRASH.
- def test_no_xml_log_written_crash(self):
- del self.env["QT_MOCK_TEST_XML_TEMPLATE_FILE"]
- self.prepare_env(run_list=["fail_then_pass:2"])
- proc = self.run2()
- self.assertEqual(proc.returncode, 3)
-
- def test_empty_xml_crash_1(self):
- self.env["QT_MOCK_TEST_XML_TEMPLATE_FILE"] = EMPTY_FILE
- self.prepare_env(run_list=["always_pass"])
- proc = self.run2()
- self.assertEqual(proc.returncode, 3)
- def test_empty_xml_crash_2(self):
- self.env["QT_MOCK_TEST_XML_TEMPLATE_FILE"] = EMPTY_FILE
- self.prepare_env(run_list=["always_fail"])
- proc = self.run2()
- self.assertEqual(proc.returncode, 3)
-
- # test qFatal should be a crash in all cases.
- def test_qfatal_crash_1(self):
- fatal_xml_message = """
- <Message type="qfatal" file="" line="0">
- <DataTag><![CDATA[modal]]></DataTag>
- <Description><![CDATA[Failed to initialize graphics backend for OpenGL.]]></Description>
- </Message>
- """
- logfile = os.path.join(TEMPDIR.name, os.path.basename(mock_test) + ".xml")
- write_xml_log(logfile, failure=None, inject_message=fatal_xml_message)
- del self.env["QT_MOCK_TEST_XML_TEMPLATE_FILE"]
- self.env["QT_TESTRUNNER_DEBUG_NO_UNIQUE_OUTPUT_FILENAME"] = "1"
- self.prepare_env(run_list=["always_pass"])
- proc = self.run2()
- self.assertEqual(proc.returncode, 3)
- def test_qfatal_crash_2(self):
- fatal_xml_message = """
- <Message type="qfatal" file="" line="0">
- <DataTag><![CDATA[modal]]></DataTag>
- <Description><![CDATA[Failed to initialize graphics backend for OpenGL.]]></Description>
- </Message>
- """
- logfile = os.path.join(TEMPDIR.name, os.path.basename(mock_test) + ".xml")
- write_xml_log(logfile, failure="always_fail", inject_message=fatal_xml_message)
- del self.env["QT_MOCK_TEST_XML_TEMPLATE_FILE"]
- self.env["QT_TESTRUNNER_DEBUG_NO_UNIQUE_OUTPUT_FILENAME"] = "1"
- self.prepare_env(run_list=["always_pass,always_fail"])
- proc = self.run2()
- self.assertEqual(proc.returncode, 3)
-
- def test_qwarn_is_ignored_1(self):
- qwarn_xml_message = """
- <Message type="qwarn" file="" line="0">
- <DataTag><![CDATA[modal]]></DataTag>
- <Description><![CDATA[Failed to create RHI (backend 2)]]></Description>
- </Message>
- """
- logfile = os.path.join(TEMPDIR.name, os.path.basename(mock_test) + ".xml")
- write_xml_log(logfile, failure=None, inject_message=qwarn_xml_message)
- del self.env["QT_MOCK_TEST_XML_TEMPLATE_FILE"]
- self.env["QT_TESTRUNNER_DEBUG_NO_UNIQUE_OUTPUT_FILENAME"] = "1"
- self.prepare_env(run_list=["always_pass"])
- proc = self.run2()
- self.assertEqual(proc.returncode, 0)
- def test_qwarn_is_ignored_2(self):
- fatal_xml_message = """
- <Message type="qfatal" file="" line="0">
- <DataTag><![CDATA[modal]]></DataTag>
- <Description><![CDATA[Failed to initialize graphics backend for OpenGL.]]></Description>
- </Message>
- <Message type="qwarn" file="" line="0">
- <DataTag><![CDATA[modal]]></DataTag>
- <Description><![CDATA[Failed to create RHI (backend 2)]]></Description>
- </Message>
- """
- logfile = os.path.join(TEMPDIR.name, os.path.basename(mock_test) + ".xml")
- write_xml_log(logfile, failure=None, inject_message=fatal_xml_message)
- del self.env["QT_MOCK_TEST_XML_TEMPLATE_FILE"]
- self.env["QT_TESTRUNNER_DEBUG_NO_UNIQUE_OUTPUT_FILENAME"] = "1"
- self.prepare_env(run_list=["always_pass"])
- proc = self.run2()
- self.assertEqual(proc.returncode, 3)
-
- # If a test returns success but XML contains failures, it's a CRASH.
- def test_wrong_xml_log_written_1_crash(self):
- logfile = os.path.join(TEMPDIR.name, os.path.basename(mock_test) + ".xml")
- write_xml_log(logfile, failure="always_fail")
- del self.env["QT_MOCK_TEST_XML_TEMPLATE_FILE"]
- self.env["QT_TESTRUNNER_DEBUG_NO_UNIQUE_OUTPUT_FILENAME"] = "1"
- self.prepare_env(run_list=["always_pass"])
- proc = self.run2()
- self.assertEqual(proc.returncode, 3)
- # If a test returns failure but XML contains only pass, it's a CRASH.
- def test_wrong_xml_log_written_2_crash(self):
- logfile = os.path.join(TEMPDIR.name, os.path.basename(mock_test) + ".xml")
- write_xml_log(logfile)
- del self.env["QT_MOCK_TEST_XML_TEMPLATE_FILE"]
- self.env["QT_TESTRUNNER_DEBUG_NO_UNIQUE_OUTPUT_FILENAME"] = "1"
- self.prepare_env(run_list=["always_fail"])
- proc = self.run2()
- self.assertEqual(proc.returncode, 3)
-
- def create_wrapper(self, filename, content=None):
- if not content:
- content='exec "$@"'
- filename = os.path.join(TEMPDIR.name, filename)
- with open(filename, "w") as f:
- f.write(f'#!/bin/sh\n{content}\n')
- self.wrapper_script = f.name
- os.chmod(self.wrapper_script, 0o700)
-
- # Test that it re-runs the full executable in case of crash, even if the
- # XML file is valid and shows one specific test failing.
- def test_crash_reruns_full_QTQAINFRA_5226(self):
- self.env["QT_MOCK_TEST_RUN_LIST"] = "always_fail"
- # Tell qt_mock_test to crash after writing a proper XML file.
- self.env["QT_MOCK_TEST_CRASH_CLEANLY"] = "1"
- proc = self.run2()
- # Verify qt-testrunner exited with 3 which means CRASH.
- self.assertEqual(proc.returncode, 3)
- # Verify that a full executable re-run happened that re-runs 2 times,
- # instead of individual functions that re-run 5 times.
- xml_output_files = glob.glob(os.path.basename(mock_test) + "-*[0-9].xml",
- root_dir=TEMPDIR.name)
- if DEBUG:
- print("XML output files found: ", xml_output_files)
- self.assertEqual(len(xml_output_files), 2)
-
- # Test that qt-testrunner detects the correct executable name even if we
- # use a special wrapper script, and that it uses that in the XML log filename.
- @unittest.skipUnless(os.name == "posix", "Wrapper script needs POSIX shell")
- def test_wrapper(self):
- self.create_wrapper("coin_vxworks_qemu_runner.sh")
- proc = run_testrunner(wrapper_script=self.wrapper_script,
- testrunner_args=["--log-dir",TEMPDIR.name],
- env=self.env)
- self.assertEqual(proc.returncode, 0)
- xml_output_files = glob.glob(os.path.basename(mock_test) + "-*[0-9].xml",
- root_dir=TEMPDIR.name)
- if DEBUG:
- print("XML output files found: ", xml_output_files)
- self.assertEqual(len(xml_output_files), 1)
-
- # The "androidtestrunner" wrapper is special. It expects the QTest arguments after "--".
- # So our mock androidtestrunner wrapper ignores everything before "--"
- # and executes our hardcoded mock_test with the arguments that follow.
- def create_mock_anroidtestrunner_wrapper(self):
- self.create_wrapper("androidtestrunner", content=
- 'while [ "$1" != "--" ]; do shift; done; shift; exec {} "$@"'.format(mock_test))
-
- @unittest.skipUnless(os.name == "posix", "Wrapper script needs POSIX shell")
- def test_androidtestrunner_with_aab(self):
- self.create_mock_anroidtestrunner_wrapper()
- # Copied from our CI logs. The only relevant option is --aab.
- androidtestrunner_args= [
- '--path', '/home/qt/work/qt/qtdeclarative_standalone_tests/tests/auto/quickcontrols/qquickpopup/android-build-tst_qquickpopup',
- '--adb', '/opt/android/sdk/platform-tools/adb', '--skip-install-root',
- '--ndk-stack', '/opt/android/android-ndk-r27c/ndk-stack',
- '--manifest', '/home/qt/work/qt/qtdeclarative_standalone_tests/tests/auto/quickcontrols/qquickpopup/android-build-tst_qquickpopup/app/AndroidManifest.xml',
- '--make', '"/opt/cmake-3.30.5/bin/cmake" --build /home/qt/work/qt/qtdeclarative_standalone_tests --target tst_qquickpopup_make_aab',
- '--aab', '/home/qt/work/qt/qtdeclarative_standalone_tests/tests/auto/quickcontrols/qquickpopup/android-build-tst_qquickpopup/tst_qquickpopup.aab',
- '--bundletool', '/opt/bundletool/bundletool', '--timeout', '1425'
- ]
- # In COIN CI, TESTRUNNER="qt-testrunner.py --". That's why we append "--".
- proc = run_testrunner(testrunner_args=["--log-dir", TEMPDIR.name, "--"],
- wrapper_script=self.wrapper_script,
- wrapper_args=androidtestrunner_args,
- env=self.env)
- self.assertEqual(proc.returncode, 0)
- xml_output_files = glob.glob("tst_qquickpopup-*[0-9].xml",
- root_dir=TEMPDIR.name)
- if DEBUG:
- print("XML output files found: ", xml_output_files)
- self.assertEqual(len(xml_output_files), 1)
- # similar to above but with "--apk"
- @unittest.skipUnless(os.name == "posix", "Wrapper script needs POSIX shell")
- def test_androidtestrunner_with_apk(self):
- self.create_mock_anroidtestrunner_wrapper()
- androidtestrunner_args= ['--blah', '--apk', '/whatever/waza.apk', 'blue']
- proc = run_testrunner(testrunner_args=["--log-dir", TEMPDIR.name, "--"],
- wrapper_script=self.wrapper_script,
- wrapper_args=androidtestrunner_args,
- env=self.env)
- self.assertEqual(proc.returncode, 0)
- xml_output_files = glob.glob("waza-*[0-9].xml",
- root_dir=TEMPDIR.name)
- if DEBUG:
- print("XML output files found: ", xml_output_files)
- self.assertEqual(len(xml_output_files), 1)
- # similar to above but with neither "--apk" nor "--aab". qt-testrunner throws error.
- @unittest.skipUnless(os.name == "posix", "Wrapper script needs POSIX shell")
- def test_androidtestrunner_fail_to_detect_filename(self):
- self.create_mock_anroidtestrunner_wrapper()
- androidtestrunner_args= ['--blah', '--argh', '/whatever/waza.apk', 'waza.aab']
- proc = run_testrunner(testrunner_args=["--log-dir", TEMPDIR.name, "--"],
- wrapper_script=self.wrapper_script,
- wrapper_args=androidtestrunner_args,
- env=self.env)
- self.assertEqual(proc.returncode, 1)
- xml_output_files = glob.glob("waza-*[0-9].xml",
- root_dir=TEMPDIR.name)
- if DEBUG:
- print("XML output files found: ", xml_output_files)
- self.assertEqual(len(xml_output_files), 0)
-
-
-# Test qt-testrunner script with an existing XML log file:
-# qt-testrunner.py qt_mock_test.py --parse-xml-testlog file.xml
-# qt-testrunner should repeat the testcases that are logged as
-# failures and fail or pass depending on how the testcases behave.
-# Different XML files are generated for the following test cases.
-# + No failure logged. qt-testrunner should exit(0)
-# + The "always_pass" test has failed. qt-testrunner should exit(0).
-# + The "always_fail" test has failed. qt-testrunner should exit(2).
-# + The "always_crash" test has failed. qt-testrunner should exit(3)
-# since the re-run will crash.
-# + The "fail_then_pass:2" test failed. qt-testrunner should exit(0).
-# + The "fail_then_pass:5" test failed. qt-testrunner should exit(2).
-# + The "initTestCase" failed which is listed as NO_RERUN thus
-# qt-testrunner should exit(3).
-class Test_testrunner_with_xml_logfile(unittest.TestCase):
- # Runs before every single test function, creating a unique temp file.
- def setUp(self):
- (_handle, self.xml_file) = mkstemp(
- suffix=".xml", prefix="qt_mock_test-log-",
- dir=TEMPDIR.name)
- if os.path.exists(os.environ["QT_MOCK_TEST_STATE_FILE"]):
- os.remove(os.environ["QT_MOCK_TEST_STATE_FILE"])
- def tearDown(self):
- os.remove(self.xml_file)
- del self.xml_file
-
- def test_no_failure(self):
- write_xml_log(self.xml_file, failure=None)
- proc = run_testrunner(self.xml_file)
- self.assertEqual(proc.returncode, 0)
- def test_always_pass_failed(self):
- write_xml_log(self.xml_file, failure="always_pass")
- proc = run_testrunner(self.xml_file)
- self.assertEqual(proc.returncode, 0)
- def test_always_pass_failed_max_repeats_0(self):
- write_xml_log(self.xml_file, failure="always_pass")
- proc = run_testrunner(self.xml_file,
- testrunner_args=["--max-repeats", "0"])
- self.assertEqual(proc.returncode, 2)
- def test_always_fail_failed(self):
- write_xml_log(self.xml_file, failure="always_fail")
- proc = run_testrunner(self.xml_file)
- self.assertEqual(proc.returncode, 2)
- # Assert that one of the re-runs was in verbose mode
- matches = re.findall("VERBOSE RUN",
- proc.stdout.decode())
- self.assertEqual(len(matches), 1)
- # Assert that the environment was altered too
- self.assertIn("QT_LOGGING_RULES", proc.stdout.decode())
- def test_always_crash_crashed(self):
- write_xml_log(self.xml_file, failure="always_crash")
- proc = run_testrunner(self.xml_file)
- self.assertEqual(proc.returncode, 3)
- def test_fail_then_pass_2_failed(self):
- write_xml_log(self.xml_file, failure="fail_then_pass:2")
- proc = run_testrunner(self.xml_file)
- self.assertEqual(proc.returncode, 0)
- def test_fail_then_pass_5_failed(self):
- write_xml_log(self.xml_file, failure="fail_then_pass:5")
- proc = run_testrunner(self.xml_file)
- self.assertEqual(proc.returncode, 2)
- def test_with_two_failures(self):
- write_xml_log(self.xml_file,
- failure=["always_pass", "fail_then_pass:2"])
- proc = run_testrunner(self.xml_file)
- self.assertEqual(proc.returncode, 0)
- # Check that test output is properly interleaved with qt-testrunner's logging.
- matches = re.findall(r"(PASS|FAIL!).*\n.*Test process exited with code",
- proc.stdout.decode())
- self.assertEqual(len(matches), 4)
- def test_initTestCase_fail_crash(self):
- write_xml_log(self.xml_file, failure="initTestCase")
- proc = run_testrunner(self.xml_file)
- self.assertEqual(proc.returncode, 3)
-
-
-if __name__ == "__main__":
-
- DEBUG = False
- if "--debug" in sys.argv:
- sys.argv.remove("--debug")
- DEBUG = True
-
- # We set failfast=True as we do not want the test suite to continue if the
- # tests of qt_mock_test failed. The next ones depend on it.
- unittest.main(failfast=True)