diff options
Diffstat (limited to 'util')
| -rw-r--r-- | util/testrunner/README | 8 | ||||
| -rwxr-xr-x | util/testrunner/qt-testrunner.py | 155 | ||||
| -rw-r--r-- | util/testrunner/tests/qt_mock_test-log.xml | 36 | ||||
| -rwxr-xr-x | util/testrunner/tests/qt_mock_test.py | 182 | ||||
| -rwxr-xr-x | util/testrunner/tests/tst_testrunner.py | 401 |
5 files changed, 119 insertions, 663 deletions
diff --git a/util/testrunner/README b/util/testrunner/README index 5758e325140..cb6722ac807 100644 --- a/util/testrunner/README +++ b/util/testrunner/README @@ -15,10 +15,4 @@ It offers the following functionality The script itself has a testsuite that is simply run by invoking -qtbase/util/testrunner/tests/tst_testrunner.py - -Please *run this manually* before submitting a change to qt-testrunner and -make sure it's passing. The reason it does not run automatically during the -usual qtbase test run, is because -+ the test run should not depend on Python -+ we don't want to wrap the testrunner tests with testrunner. +qtbase/tests/auto/util/testrunner/tst_qt_testrunner.py diff --git a/util/testrunner/qt-testrunner.py b/util/testrunner/qt-testrunner.py index 41e81e83122..1573534cee9 100755 --- a/util/testrunner/qt-testrunner.py +++ b/util/testrunner/qt-testrunner.py @@ -4,10 +4,9 @@ # !!!IMPORTANT!!! If you change anything to this script, run the testsuite -# manually and make sure it still passes, as it doesn't run automatically. -# Just execute the command line as such: +# and make sure it still passes: # -# ./util/testrunner/tests/tst_testrunner.py -v [--debug] +# qtbase/tests/auto/util/testrunner/tst_qt_testrunner.py -v [--debug] # # ======== qt-testrunner ======== # @@ -15,24 +14,44 @@ # tst_whatever, and tries to iron out unpredictable test failures. # In particular: # -# + Appends output argument to it: "-o tst_whatever.xml,xml" -# + Checks the exit code. If it is zero, the script exits with zero, -# otherwise proceeds. -# + Reads the XML test log and Understands exactly which function -# of the test failed. -# + If no XML file is found or was invalid, the test executable -# probably CRASHed, so we *re-run the full test once again*. -# + If some testcases failed it executes only those individually -# until they pass, or until max-repeats times is reached. +# + Append output argument to it: "-o tst_whatever.xml,xml" and +# execute it. +# + Save the exit code. +# - If it is <0 or >=128 (see NOTE_2), mark the test run as CRASH. +# + Read the XML test log and find exactly which functions +# of the test FAILed. +# + Mark the test run as CRASH, if: +# - no XML file is found, +# - or an invalid XML file is found, +# - or the XML contains a QFatal message: <Message type="qfatal"> +# - or no test FAILures are listed in the XML but the saved +# exit code is not 0. +# + If, based on the rules above, the test run is marked as CRASH, +# then *re-run the full test once again* and start this logic over. +# If we are on the 2nd run and CRASH happens again, then exit(3). +# + Examine the saved exit code: +# if it is 0, then exit(0) (success, all tests have PASSed). +# + Otherwise, some testcases failed, so execute only those individually +# until they pass, or until max-repeats (default: 5) times is reached. # # The regular way to use is to set the environment variable TESTRUNNER to -# point to this script before invoking ctest. +# point to this script before invoking ctest. In COIN CI it is set as +# TESTRUNNER="qt-testrunner.py --" to stop it from parsing further args. # # NOTE: this script is crafted specifically for use with Qt tests and for # using it in Qt's CI. For example it detects and acts specially if test # executable is "tst_selftests" or "androidtestrunner". It also detects # env var "COIN_CTEST_RESULTSDIR" and uses it as log-dir. # +# NOTE_2: Why is qt-testrunner considering exit code outside [0,127] as CRASH? +# On Linux, Python subprocess module returns positive `returncode` +# (255 for example), even if the child does exit(-1 for example). It +# returns negative `returncode` only if the child is killed by a signal. +# Qt-testrunner wants to catch both of these cases as CRASH. +# On Windows, a crash is usually accompanied by exitcode >= 0xC0000000. +# Finally, QTest is limiting itself to exit codes in [0,127] +# so anything outside that range is abnormal, thus treated as CRASH. +# # TODO implement --dry-run. # Exit codes of this script: @@ -63,9 +82,17 @@ from pprint import pprint from typing import NamedTuple, Tuple, List, Optional # Define a custom type for returning a fail incident -class WhatFailed(NamedTuple): +class TestResult(NamedTuple): func: str tag: Optional[str] = None +class WhatFailed(NamedTuple): + qfatal_message: Optional[str] = None + failed_tests: List[TestResult] = [] + +class ReRunCrash(Exception): + pass +class BadXMLCrash(Exception): + pass # In the last test re-run, we add special verbosity arguments, in an attempt @@ -83,9 +110,11 @@ NO_RERUN_FUNCTIONS = { # not try to append "-o" to their command-line or re-run failed testcases. # Only add tests here if absolutely necessary! NON_XML_GENERATING_TESTS = { - "tst_selftests", # qtestlib's selftests are using an external test framework (Catch) that does not support -o argument - "tst_QDoc", # Some of QDoc's tests are using an external test framework (Catch) that does not support -o argument - "tst_QDoc_Catch_Generators", # Some of QDoc's tests are using an external test framework (Catch) that does not support -o argument + # These tests use an external test framework (Catch) that doesn't support + # QtTest's -o argument. + "tst_selftests", + "tst_QDoc", + "tst_QDoc_Catch_Generators", } # These are scripts that are used to wrap test execution for special platforms. # They need special handling (most times just skipping the wrapper name in argv[]). @@ -131,6 +160,9 @@ Default flags: --max-repeats 5 --passes-needed 1 " -o log_file.xml -v2 -vs. This will disable some functionality like the" " failed test repetition and the verbose output on failure. This is" " activated by default when TESTARGS is tst_selftests.") + # TODO parser.parse_args(args=sys.argv[0:cmd_index]). + # Where cmd_index is either the first positional argument, or the argument right after "--". + # This way it won't interpet arguments after the first positional arg. args = parser.parse_args() args.self_name = os.path.basename(sys.argv[0]) args.specific_extra_args = [] @@ -198,11 +230,13 @@ Default flags: --max-repeats 5 --passes-needed 1 return args -def parse_log(results_file) -> List[WhatFailed]: - """Parse the XML test log file. Return the failed testcases, if any. +def parse_log(results_file) -> WhatFailed: + """ + Parse the XML test log file. Return the failed testcases, if any, + and the first qfatal message possibly printed. Failures are considered the "fail" and "xpass" incidents. - A testcase is a function with an optional data tag.""" + """ start_timer = timeit.default_timer() try: @@ -222,10 +256,12 @@ def parse_log(results_file) -> List[WhatFailed]: root = tree.getroot() if root.tag != "TestCase": - raise AssertionError( + raise BadXMLCrash( f"The XML test log must have <TestCase> as root tag, but has: <{root.tag}>") failures = [] + qfatal_message = None + n_passes = 0 for e1 in root: if e1.tag == "TestFunction": @@ -233,23 +269,43 @@ def parse_log(results_file) -> List[WhatFailed]: if e2.tag == "Incident": if e2.attrib["type"] in ("fail", "xpass"): func = e1.attrib["name"] + datatag = None e3 = e2.find("DataTag") # every <Incident> might have a <DataTag> if e3 is not None: - failures.append(WhatFailed(func, tag=e3.text)) - else: - failures.append(WhatFailed(func)) + datatag = e3.text + failures.append(TestResult(func, datatag)) else: n_passes += 1 + # Use iter() here to _recursively_ search root for <Message>, + # as we don't trust that messages are always at the same depth. + for message_tag in root.iter(tag="Message"): + messagetype = message_tag.get("type") + if messagetype == "qfatal": + message_desc = message_tag.find("Description") + if message_desc is not None: + qfatal_message = message_desc.text + else: + qfatal_message = "--EMPTY QFATAL--" + L.warning("qFatal message ('%s') found in the XML, treating this run as a CRASH!", + qfatal_message) + break + end_timer = timeit.default_timer() t = end_timer - start_timer L.info(f"Parsed XML file {results_file} in {t:.3f} seconds") L.info(f"Found {n_passes} passes and {len(failures)} failures") - return failures + return WhatFailed(qfatal_message, failures) def run_test(arg_list: List[str], **kwargs): + if (os.environ.get("QT_TESTRUNNER_TESTING", "0") == "1" + and os.name == "nt" + and arg_list[0].endswith(".py") + ): + # For executing qt_mock_test.py under the same Python interpreter when testing. + arg_list = [ sys.executable ] + arg_list L.debug("Running test command line: %s", arg_list) proc = subprocess.run(arg_list, **kwargs) L.info("Test process exited with code: %d", proc.returncode) @@ -257,6 +313,11 @@ def run_test(arg_list: List[str], **kwargs): return proc def unique_filename(test_basename: str) -> str: + + # Hidden env var for testing, enforcing a predictable, non-unique filename. + if os.environ.get("QT_TESTRUNNER_DEBUG_NO_UNIQUE_OUTPUT_FILENAME"): + return f"{test_basename}" + timestamp = round(time.time() * 1000) return f"{test_basename}-{timestamp}" @@ -291,18 +352,19 @@ def run_full_test(test_basename, testargs: List[str], output_dir: str, def rerun_failed_testcase(test_basename, testargs: List[str], output_dir: str, - what_failed: WhatFailed, + testcase: TestResult, max_repeats, passes_needed, dryrun=False, timeout=None) -> bool: """Run a specific function:tag of a test, until it passes enough times, or until max_repeats is reached. Return True if it passes eventually, False if it fails. + Raise ReRunCrash Exception if it crashes. """ assert passes_needed <= max_repeats - failed_arg = what_failed.func - if what_failed.tag: - failed_arg += ":" + what_failed.tag + failed_arg = testcase.func + if testcase.tag: + failed_arg += ":" + testcase.tag n_passes = 0 @@ -325,6 +387,19 @@ def rerun_failed_testcase(test_basename, testargs: List[str], output_dir: str, proc = run_test(testargs + output_args + VERBOSE_ARGS + [failed_arg], timeout=timeout, env={**os.environ, **VERBOSE_ENV}) + # There are platforms that run tests wrapped with some test-runner + # script, that can possibly fail to extract a process exit code. + # Because of these cases, we *also* parse the XML file and signify + # CRASH in case of QFATAL/empty/corrupt result. + what_failed = parse_log(f"{pathname_stem}.xml") + if what_failed.qfatal_message: + raise ReRunCrash(f"CRASH! returncode:{proc.returncode} " + f"QFATAL:'{what_failed.qfatal_message}'") + if proc.returncode < 0 or proc.returncode >= 128: + raise ReRunCrash(f"CRASH! returncode:{proc.returncode}") + if proc.returncode == 0 and len(what_failed.failed_tests) > 0: + raise ReRunCrash("CRASH! returncode:0 but failures were found: " + + what_failed.failed_tests) if proc.returncode == 0: n_passes += 1 if n_passes == passes_needed: @@ -354,20 +429,22 @@ def main(): try: results_file = None - failed_functions = [] + what_failed = WhatFailed() if args.parse_xml_testlog: # do not run test, just parse file - failed_functions = parse_log(args.parse_xml_testlog) + what_failed = parse_log(args.parse_xml_testlog) # Pretend the test returned correct exit code - retcode = len(failed_functions) + retcode = len(what_failed.failed_tests) else: # normal invocation, run test (retcode, results_file) = \ run_full_test(args.test_basename, args.testargs, args.log_dir, args.no_extra_args, args.dry_run, args.timeout, args.specific_extra_args) if results_file: - failed_functions = parse_log(results_file) + what_failed = parse_log(results_file) + + failed_functions = what_failed.failed_tests - if retcode < 0: + if retcode < 0 or retcode >= 128 or what_failed.qfatal_message: L.warning("CRASH detected, re-running the whole executable") continue if retcode == 0: @@ -392,6 +469,8 @@ def main(): assert len(failed_functions) > 0 and retcode != 0 break # all is fine, goto re-running individual failed testcases + except AssertionError: + raise except Exception as e: L.error("exception:%s %s", type(e).__name__, e) L.error("The test executable probably crashed, see above for details") @@ -402,13 +481,15 @@ def main(): L.info("Some tests failed, will re-run at most %d times.\n", args.max_repeats) - for what_failed in failed_functions: + for test_result in failed_functions: try: ret = rerun_failed_testcase(args.test_basename, args.testargs, args.log_dir, - what_failed, args.max_repeats, args.passes_needed, + test_result, args.max_repeats, args.passes_needed, dryrun=args.dry_run, timeout=args.timeout) + except AssertionError: + raise except Exception as e: - L.error("exception:%s %s", type(e).__name__, e) + L.error("exception:%s", e) L.error("The testcase re-run probably crashed, giving up") sys.exit(3) # Test re-run CRASH diff --git a/util/testrunner/tests/qt_mock_test-log.xml b/util/testrunner/tests/qt_mock_test-log.xml deleted file mode 100644 index 62e93bb8dcc..00000000000 --- a/util/testrunner/tests/qt_mock_test-log.xml +++ /dev/null @@ -1,36 +0,0 @@ -<?xml version="1.0" encoding="UTF-8"?> -<TestCase name="qt_mock_test"> - <Environment> - <QtVersion>MOCK</QtVersion> - <QtBuild>MOCK</QtBuild> - <QTestVersion>6.3.0</QTestVersion> - </Environment> - <TestFunction name="initTestCase"> - <Incident type="{{initTestCase_result}}" file="" line="0" /> - <Duration msecs="0.00004"/> - </TestFunction> - <TestFunction name="always_pass"> - <Incident type="{{always_pass_result}}" file="" line="0" /> - <Duration msecs="0.71704"/> - </TestFunction> - <TestFunction name="always_fail"> - <Incident type="{{always_fail_result}}" file="" line="0" /> - <Duration msecs="0.828272"/> - </TestFunction> - <TestFunction name="always_crash"> - <Incident type="{{always_crash_result}}" file="" line="0" /> - <Duration msecs="0.828272"/> - </TestFunction> - <TestFunction name="fail_then_pass"> - <Incident type="{{fail_then_pass:2_result}}" file="" line="0"> - <DataTag><![CDATA[2]]></DataTag> - </Incident> - <Incident type="{{fail_then_pass:5_result}}" file="" line="0"> - <DataTag><![CDATA[5]]></DataTag> - </Incident> - <Incident type="{{fail_then_pass:6_result}}" file="" line="0"> - <DataTag><![CDATA[6]]></DataTag> - </Incident> - </TestFunction> - <Duration msecs="1904.9"/> -</TestCase> diff --git a/util/testrunner/tests/qt_mock_test.py b/util/testrunner/tests/qt_mock_test.py deleted file mode 100755 index a7adb8804af..00000000000 --- a/util/testrunner/tests/qt_mock_test.py +++ /dev/null @@ -1,182 +0,0 @@ -#!/usr/bin/env python3 -# Copyright (C) 2021 The Qt Company Ltd. -# SPDX-License-Identifier: LicenseRef-Qt-Commercial OR GPL-3.0-only WITH Qt-GPL-exception-1.0 - - -# This is an artificial test, mimicking the Qt tests, for example tst_whatever. -# Its purpose is to assist in testing qt-testrunner.py. -# -# Mode A: -# -# If invoked with a test function argument, it runs that test function. -# -# Usage: -# -# $0 always_pass -# $0 always_fail -# $0 always_crash -# $0 fail_then_pass:N # where N is the number of failing runs before passing -# -# Needs environment variable: -# + QT_MOCK_TEST_STATE_FILE :: points to a unique filename, to be written -# for keeping the state of the fail_then_pass:N tests. -# -# Mode B: -# -# If invoked without any argument, it runs the tests listed in the -# variable QT_MOCK_TEST_FAIL_LIST. If variable is empty it just runs -# the always_pass test. It also understands qtestlib's `-o outfile.xml,xml` -# option for writing a mock testlog in a file. Requires environment variables: -# + QT_MOCK_TEST_STATE_FILE :: See above -# + QT_MOCK_TEST_XML_TEMPLATE_FILE :: may point to the template XML file -# located in the same source directory. Without this variable, the -# option `-o outfile.xml,xml` will be ignored. -# + QT_MOCK_TEST_FAIL_LIST :: may contain a comma-separated list of test -# that should run. - - -import sys -import os -import traceback -from tst_testrunner import write_xml_log - - -MY_NAME = os.path.basename(sys.argv[0]) -STATE_FILE = None -XML_TEMPLATE = None -XML_OUTPUT_FILE = None - - -def put_failure(test_name): - with open(STATE_FILE, "a") as f: - f.write(test_name + "\n") -def get_failures(test_name): - n = 0 - try: - with open(STATE_FILE) as f: - for line in f: - if line.strip() == test_name: - n += 1 - except FileNotFoundError: - return 0 - return n - -# Only care about the XML log output file. -def parse_output_argument(a): - global XML_OUTPUT_FILE - if a.endswith(",xml"): - XML_OUTPUT_FILE = a[:-4] - -# Strip qtestlib specific arguments. -# Only care about the "-o ...,xml" argument. -def clean_cmdline(): - args = [] - prev_arg = None - skip_next_arg = True # Skip argv[0] - for a in sys.argv: - if skip_next_arg: - if prev_arg == "-o": - parse_output_argument(a) - prev_arg = None - skip_next_arg = False - continue - if a in ("-o", "-maxwarnings"): - skip_next_arg = True - prev_arg = a - continue - if a in ("-v1", "-v2", "-vs"): - print("VERBOSE RUN") - if "QT_LOGGING_RULES" in os.environ: - print("Environment has QT_LOGGING_RULES:", - os.environ["QT_LOGGING_RULES"]) - continue - args.append(a) - return args - - -def log_test(testcase, result, - testsuite=MY_NAME.rpartition(".")[0]): - print("%-7s: %s::%s()" % (result, testsuite, testcase)) - -# Return the exit code -def run_test(testname): - if testname == "initTestCase": - exit_code = 1 # specifically test that initTestCase fails - elif testname == "always_pass": - exit_code = 0 - elif testname == "always_fail": - exit_code = 1 - elif testname == "always_crash": - exit_code = 130 - elif testname.startswith("fail_then_pass"): - wanted_fails = int(testname.partition(":")[2]) - previous_fails = get_failures(testname) - if previous_fails < wanted_fails: - put_failure(testname) - exit_code = 1 - else: - exit_code = 0 - else: - assert False, "Unknown argument: %s" % testname - - if exit_code == 0: - log_test(testname, "PASS") - elif exit_code == 1: - log_test(testname, "FAIL!") - else: - log_test(testname, "CRASH!") - - return exit_code - -def no_args_run(): - try: - run_list = os.environ["QT_MOCK_TEST_RUN_LIST"].split(",") - except KeyError: - run_list = ["always_pass"] - - total_result = True - fail_list = [] - for test in run_list: - test_exit_code = run_test(test) - if test_exit_code not in (0, 1): - sys.exit(130) # CRASH! - if test_exit_code != 0: - fail_list.append(test) - total_result = total_result and (test_exit_code == 0) - - if XML_TEMPLATE and XML_OUTPUT_FILE: - write_xml_log(XML_OUTPUT_FILE, failure=fail_list) - - if total_result: - sys.exit(0) - else: - sys.exit(1) - - -def main(): - global STATE_FILE - # Will fail if env var is not set. - STATE_FILE = os.environ["QT_MOCK_TEST_STATE_FILE"] - - global XML_TEMPLATE - if "QT_MOCK_TEST_XML_TEMPLATE_FILE" in os.environ: - with open(os.environ["QT_MOCK_TEST_XML_TEMPLATE_FILE"]) as f: - XML_TEMPLATE = f.read() - - args = clean_cmdline() - - if len(args) == 0: - no_args_run() - assert False, "Unreachable!" - else: - sys.exit(run_test(args[0])) - - -# TODO write XPASS test that does exit(1) - -if __name__ == "__main__": - try: - main() - except Exception as e: - traceback.print_exc() - exit(128) # Something went wrong with this script diff --git a/util/testrunner/tests/tst_testrunner.py b/util/testrunner/tests/tst_testrunner.py deleted file mode 100755 index 4a3d92e167b..00000000000 --- a/util/testrunner/tests/tst_testrunner.py +++ /dev/null @@ -1,401 +0,0 @@ -#!/usr/bin/env python3 -# Copyright (C) 2021 The Qt Company Ltd. -# SPDX-License-Identifier: LicenseRef-Qt-Commercial OR GPL-3.0-only WITH Qt-GPL-exception-1.0 - - -import sys -import os -import re -import glob -import subprocess - -from subprocess import STDOUT, PIPE -from tempfile import TemporaryDirectory, mkstemp - -MY_NAME = os.path.basename(__file__) -my_dir = os.path.dirname(__file__) -testrunner = os.path.join(my_dir, "..", "qt-testrunner.py") -mock_test = os.path.join(my_dir, "qt_mock_test.py") -xml_log_template = os.path.join(my_dir, "qt_mock_test-log.xml") - -with open(xml_log_template) as f: - XML_TEMPLATE = f.read() - - -import unittest - -def setUpModule(): - global TEMPDIR - TEMPDIR = TemporaryDirectory(prefix="tst_testrunner-") - - filename = os.path.join(TEMPDIR.name, "file_1") - print("setUpModule(): setting up temporary directory and env var" - " QT_MOCK_TEST_STATE_FILE=" + filename + " and" - " QT_MOCK_TEST_XML_TEMPLATE_FILE=" + xml_log_template) - - os.environ["QT_MOCK_TEST_STATE_FILE"] = filename - os.environ["QT_MOCK_TEST_XML_TEMPLATE_FILE"] = xml_log_template - -def tearDownModule(): - print("\ntearDownModule(): Cleaning up temporary directory:", - TEMPDIR.name) - del os.environ["QT_MOCK_TEST_STATE_FILE"] - TEMPDIR.cleanup() - - -# Helper to run a command and always capture output -def run(*args, **kwargs): - if DEBUG: - print("Running: ", args, flush=True) - proc = subprocess.run(*args, stdout=PIPE, stderr=STDOUT, **kwargs) - if DEBUG and proc.stdout: - print(proc.stdout.decode(), flush=True) - return proc - -# Helper to run qt-testrunner.py with proper testing arguments. -def run_testrunner(xml_filename=None, testrunner_args=None, - wrapper_script=None, wrapper_args=None, - qttest_args=None, env=None): - - args = [ testrunner ] - if xml_filename: - args += [ "--parse-xml-testlog", xml_filename ] - if testrunner_args: - args += testrunner_args - - if wrapper_script: - args += [ wrapper_script ] - if wrapper_args: - args += wrapper_args - - args += [ mock_test ] - if qttest_args: - args += qttest_args - - return run(args, env=env) - -# Write the XML_TEMPLATE to filename, replacing the templated results. -def write_xml_log(filename, failure=None): - data = XML_TEMPLATE - # Replace what was asked to fail with "fail" - if type(failure) in (list, tuple): - for template in failure: - data = data.replace("{{"+template+"_result}}", "fail") - elif type(failure) is str: - data = data.replace("{{"+failure+"_result}}", "fail") - # Replace the rest with "pass" - data = re.sub(r"{{[^}]+}}", "pass", data) - with open(filename, "w") as f: - f.write(data) - - -# Test that qt_mock_test.py behaves well. This is necessary to properly -# test qt-testrunner. -class Test_qt_mock_test(unittest.TestCase): - def setUp(self): - state_file = os.environ["QT_MOCK_TEST_STATE_FILE"] - if os.path.exists(state_file): - os.remove(state_file) - def test_always_pass(self): - proc = run([mock_test, "always_pass"]) - self.assertEqual(proc.returncode, 0) - def test_always_fail(self): - proc = run([mock_test, "always_fail"]) - self.assertEqual(proc.returncode, 1) - def test_fail_then_pass_2(self): - proc = run([mock_test, "fail_then_pass:2"]) - self.assertEqual(proc.returncode, 1) - proc = run([mock_test, "fail_then_pass:2"]) - self.assertEqual(proc.returncode, 1) - proc = run([mock_test, "fail_then_pass:2"]) - self.assertEqual(proc.returncode, 0) - def test_fail_then_pass_1(self): - proc = run([mock_test, "fail_then_pass:1"]) - self.assertEqual(proc.returncode, 1) - proc = run([mock_test, "fail_then_pass:1"]) - self.assertEqual(proc.returncode, 0) - def test_fail_then_pass_many_tests(self): - proc = run([mock_test, "fail_then_pass:1"]) - self.assertEqual(proc.returncode, 1) - proc = run([mock_test, "fail_then_pass:2"]) - self.assertEqual(proc.returncode, 1) - proc = run([mock_test, "fail_then_pass:1"]) - self.assertEqual(proc.returncode, 0) - proc = run([mock_test, "fail_then_pass:2"]) - self.assertEqual(proc.returncode, 1) - proc = run([mock_test, "fail_then_pass:2"]) - self.assertEqual(proc.returncode, 0) - def test_xml_file_is_written(self): - filename = os.path.join(TEMPDIR.name, "testlog.xml") - proc = run([mock_test, "-o", filename+",xml"]) - self.assertEqual(proc.returncode, 0) - self.assertTrue(os.path.exists(filename)) - self.assertGreater(os.path.getsize(filename), 0) - os.remove(filename) - -# Test regular invocations of qt-testrunner. -class Test_testrunner(unittest.TestCase): - def setUp(self): - state_file = os.environ["QT_MOCK_TEST_STATE_FILE"] - if os.path.exists(state_file): - os.remove(state_file) - # The mock_test honors only the XML output arguments, the rest are ignored. - old_logfiles = glob.glob(os.path.basename(mock_test) + "*.xml", - root_dir=TEMPDIR.name) - for fname in old_logfiles: - os.remove(os.path.join(TEMPDIR.name, fname)) - self.env = dict() - self.env["QT_MOCK_TEST_XML_TEMPLATE_FILE"] = os.environ["QT_MOCK_TEST_XML_TEMPLATE_FILE"] - self.env["QT_MOCK_TEST_STATE_FILE"] = state_file - self.testrunner_args = [ "--log-dir", TEMPDIR.name ] - def prepare_env(self, run_list=None): - if run_list is not None: - self.env['QT_MOCK_TEST_RUN_LIST'] = ",".join(run_list) - def run2(self): - return run_testrunner(testrunner_args=self.testrunner_args, env=self.env) - def test_simple_invocation(self): - # All tests pass. - proc = self.run2() - self.assertEqual(proc.returncode, 0) - def test_always_pass(self): - self.prepare_env(run_list=["always_pass"]) - proc = self.run2() - self.assertEqual(proc.returncode, 0) - def test_output_files_are_generated(self): - proc = self.run2() - xml_output_files = glob.glob(os.path.basename(mock_test) + "-*[0-9].xml", - root_dir=TEMPDIR.name) - if DEBUG: - print("Output files found: ", - xml_output_files) - self.assertEqual(len(xml_output_files), 1) - def test_always_fail(self): - self.prepare_env(run_list=["always_fail"]) - proc = self.run2() - # TODO verify that re-runs==max_repeats - self.assertEqual(proc.returncode, 2) - def test_flaky_pass_1(self): - self.prepare_env(run_list=["always_pass,fail_then_pass:1"]) - proc = self.run2() - self.assertEqual(proc.returncode, 0) - def test_flaky_pass_5(self): - self.prepare_env(run_list=["always_pass,fail_then_pass:1,fail_then_pass:5"]) - proc = self.run2() - self.assertEqual(proc.returncode, 0) - def test_flaky_fail(self): - self.prepare_env(run_list=["always_pass,fail_then_pass:6"]) - proc = self.run2() - self.assertEqual(proc.returncode, 2) - def test_flaky_pass_fail(self): - self.prepare_env(run_list=["always_pass,fail_then_pass:1,fail_then_pass:6"]) - proc = self.run2() - # TODO verify that one func was re-run and passed but the other failed. - self.assertEqual(proc.returncode, 2) - def test_initTestCase_fail_crash(self): - self.prepare_env(run_list=["initTestCase,always_pass"]) - proc = self.run2() - self.assertEqual(proc.returncode, 3) - - # If no XML file is found by qt-testrunner, it is usually considered a - # CRASH and the whole test is re-run. Even when the return code is zero. - # It is a PASS only if the test is not capable of XML output (see no_extra_args, TODO test it). - def test_no_xml_log_written_pass_crash(self): - del self.env["QT_MOCK_TEST_XML_TEMPLATE_FILE"] - self.prepare_env(run_list=["always_pass"]) - proc = self.run2() - self.assertEqual(proc.returncode, 3) - # On the 2nd iteration of the full test, both of the tests pass. - # Still it's a CRASH because no XML file was found. - def test_no_xml_log_written_fail_then_pass_crash(self): - del self.env["QT_MOCK_TEST_XML_TEMPLATE_FILE"] - self.prepare_env(run_list=["always_pass,fail_then_pass:1"]) - proc = self.run2() - # TODO verify that the whole test has run twice. - self.assertEqual(proc.returncode, 3) - # Even after 2 iterations of the full test we still get failures but no XML file, - # and this is considered a CRASH. - def test_no_xml_log_written_crash(self): - del self.env["QT_MOCK_TEST_XML_TEMPLATE_FILE"] - self.prepare_env(run_list=["fail_then_pass:2"]) - proc = self.run2() - self.assertEqual(proc.returncode, 3) - - # If a test returns success but XML contains failures, it's a CRASH. - def test_wrong_xml_log_written_1_crash(self): - logfile = os.path.join(TEMPDIR.name, os.path.basename(mock_test) + ".xml") - write_xml_log(logfile, failure="always_fail") - del self.env["QT_MOCK_TEST_XML_TEMPLATE_FILE"] - self.prepare_env(run_list=["always_pass"]) - proc = self.run2() - self.assertEqual(proc.returncode, 3) - # If a test returns failure but XML contains only pass, it's a CRASH. - def test_wrong_xml_log_written_2_crash(self): - logfile = os.path.join(TEMPDIR.name, os.path.basename(mock_test) + ".xml") - write_xml_log(logfile) - del self.env["QT_MOCK_TEST_XML_TEMPLATE_FILE"] - self.prepare_env(run_list=["always_fail"]) - proc = self.run2() - self.assertEqual(proc.returncode, 3) - - def create_wrapper(self, filename, content=None): - if not content: - content='exec "$@"' - filename = os.path.join(TEMPDIR.name, filename) - # if os.path.exists(filename): - # os.remove(filename) - with open(filename, "w") as f: - f.write(f'#!/bin/sh\n{content}\n') - self.wrapper_script = f.name - os.chmod(self.wrapper_script, 0o700) - - # Test that qt-testrunner detects the correct executable name even if we - # use a special wrapper script, and that it uses that in the XML log filename. - def test_wrapper(self): - self.create_wrapper("coin_vxworks_qemu_runner.sh") - proc = run_testrunner(wrapper_script=self.wrapper_script, - testrunner_args=["--log-dir",TEMPDIR.name], - env=self.env) - self.assertEqual(proc.returncode, 0) - xml_output_files = glob.glob(os.path.basename(mock_test) + "-*[0-9].xml", - root_dir=TEMPDIR.name) - if DEBUG: - print("XML output files found: ", xml_output_files) - self.assertEqual(len(xml_output_files), 1) - - # The "androidtestrunner" wrapper is special. It expects the QTest arguments after "--". - # So our mock androidtestrunner wrapper ignores everything before "--" - # and executes our hardcoded mock_test with the arguments that follow. - def create_mock_anroidtestrunner_wrapper(self): - self.create_wrapper("androidtestrunner", content= - 'while [ "$1" != "--" ]; do shift; done; shift; exec {} "$@"'.format(mock_test)) - - def test_androidtestrunner_with_aab(self): - self.create_mock_anroidtestrunner_wrapper() - # Copied verbatim from our CI logs. The only relevant option is --aab. - androidtestrunner_args= ['--path', '/home/qt/work/qt/qtdeclarative_standalone_tests/tests/auto/quickcontrols/qquickpopup/android-build-tst_qquickpopup', '--adb', '/opt/android/sdk/platform-tools/adb', '--skip-install-root', '--ndk-stack', '/opt/android/android-ndk-r27c/ndk-stack', '--manifest', '/home/qt/work/qt/qtdeclarative_standalone_tests/tests/auto/quickcontrols/qquickpopup/android-build-tst_qquickpopup/app/AndroidManifest.xml', '--make', '"/opt/cmake-3.30.5/bin/cmake" --build /home/qt/work/qt/qtdeclarative_standalone_tests --target tst_qquickpopup_make_aab', '--aab', '/home/qt/work/qt/qtdeclarative_standalone_tests/tests/auto/quickcontrols/qquickpopup/android-build-tst_qquickpopup/tst_qquickpopup.aab', '--bundletool', '/opt/bundletool/bundletool', '--timeout', '1425'] - # In COIN CI, TESTRUNNER="qt-testrunner.py --". That's why we append "--". - proc = run_testrunner(testrunner_args=["--log-dir", TEMPDIR.name, "--"], - wrapper_script=self.wrapper_script, - wrapper_args=androidtestrunner_args, - env=self.env) - self.assertEqual(proc.returncode, 0) - xml_output_files = glob.glob("tst_qquickpopup-*[0-9].xml", - root_dir=TEMPDIR.name) - if DEBUG: - print("XML output files found: ", xml_output_files) - self.assertEqual(len(xml_output_files), 1) - # similar to above but with "--apk" - def test_androidtestrunner_with_apk(self): - self.create_mock_anroidtestrunner_wrapper() - androidtestrunner_args= ['--blah', '--apk', '/whatever/waza.apk', 'blue'] - proc = run_testrunner(testrunner_args=["--log-dir", TEMPDIR.name, "--"], - wrapper_script=self.wrapper_script, - wrapper_args=androidtestrunner_args, - env=self.env) - self.assertEqual(proc.returncode, 0) - xml_output_files = glob.glob("waza-*[0-9].xml", - root_dir=TEMPDIR.name) - if DEBUG: - print("XML output files found: ", xml_output_files) - self.assertEqual(len(xml_output_files), 1) - # similar to above but with neither "--apk" nor "--aab". qt-testrunner throws error. - def test_androidtestrunner_fail_to_detect_filename(self): - self.create_mock_anroidtestrunner_wrapper() - androidtestrunner_args= ['--blah', '--argh', '/whatever/waza.apk', 'waza.aab'] - proc = run_testrunner(testrunner_args=["--log-dir", TEMPDIR.name, "--"], - wrapper_script=self.wrapper_script, - wrapper_args=androidtestrunner_args, - env=self.env) - self.assertEqual(proc.returncode, 1) - xml_output_files = glob.glob("waza-*[0-9].xml", - root_dir=TEMPDIR.name) - if DEBUG: - print("XML output files found: ", xml_output_files) - self.assertEqual(len(xml_output_files), 0) - - -# Test qt-testrunner script with an existing XML log file: -# qt-testrunner.py qt_mock_test.py --parse-xml-testlog file.xml -# qt-testrunner should repeat the testcases that are logged as -# failures and fail or pass depending on how the testcases behave. -# Different XML files are generated for the following test cases. -# + No failure logged. qt-testrunner should exit(0) -# + The "always_pass" test has failed. qt-testrunner should exit(0). -# + The "always_fail" test has failed. qt-testrunner should exit(2). -# + The "always_crash" test has failed. qt-testrunner should exit(2). -# + The "fail_then_pass:2" test failed. qt-testrunner should exit(0). -# + The "fail_then_pass:5" test failed. qt-testrunner should exit(2). -# + The "initTestCase" failed which is listed as NO_RERUN thus -# qt-testrunner should exit(3). -class Test_testrunner_with_xml_logfile(unittest.TestCase): - # Runs before every single test function, creating a unique temp file. - def setUp(self): - (_handle, self.xml_file) = mkstemp( - suffix=".xml", prefix="qt_mock_test-log-", - dir=TEMPDIR.name) - if os.path.exists(os.environ["QT_MOCK_TEST_STATE_FILE"]): - os.remove(os.environ["QT_MOCK_TEST_STATE_FILE"]) - def tearDown(self): - os.remove(self.xml_file) - del self.xml_file - - def test_no_failure(self): - write_xml_log(self.xml_file, failure=None) - proc = run_testrunner(self.xml_file) - self.assertEqual(proc.returncode, 0) - def test_always_pass_failed(self): - write_xml_log(self.xml_file, failure="always_pass") - proc = run_testrunner(self.xml_file) - self.assertEqual(proc.returncode, 0) - def test_always_pass_failed_max_repeats_0(self): - write_xml_log(self.xml_file, failure="always_pass") - proc = run_testrunner(self.xml_file, - testrunner_args=["--max-repeats", "0"]) - self.assertEqual(proc.returncode, 2) - def test_always_fail_failed(self): - write_xml_log(self.xml_file, failure="always_fail") - proc = run_testrunner(self.xml_file) - self.assertEqual(proc.returncode, 2) - # Assert that one of the re-runs was in verbose mode - matches = re.findall("VERBOSE RUN", - proc.stdout.decode()) - self.assertEqual(len(matches), 1) - # Assert that the environment was altered too - self.assertIn("QT_LOGGING_RULES", proc.stdout.decode()) - def test_always_crash_failed(self): - write_xml_log(self.xml_file, failure="always_crash") - proc = run_testrunner(self.xml_file) - self.assertEqual(proc.returncode, 2) - def test_fail_then_pass_2_failed(self): - write_xml_log(self.xml_file, failure="fail_then_pass:2") - proc = run_testrunner(self.xml_file) - self.assertEqual(proc.returncode, 0) - def test_fail_then_pass_5_failed(self): - write_xml_log(self.xml_file, failure="fail_then_pass:5") - proc = run_testrunner(self.xml_file) - self.assertEqual(proc.returncode, 2) - def test_with_two_failures(self): - write_xml_log(self.xml_file, - failure=["always_pass", "fail_then_pass:2"]) - proc = run_testrunner(self.xml_file) - self.assertEqual(proc.returncode, 0) - # Check that test output is properly interleaved with qt-testrunner's logging. - matches = re.findall(r"(PASS|FAIL!).*\n.*Test process exited with code", - proc.stdout.decode()) - self.assertEqual(len(matches), 4) - def test_initTestCase_fail_crash(self): - write_xml_log(self.xml_file, failure="initTestCase") - proc = run_testrunner(self.xml_file) - self.assertEqual(proc.returncode, 3) - - -if __name__ == "__main__": - - DEBUG = False - if "--debug" in sys.argv: - sys.argv.remove("--debug") - DEBUG = True - - # We set failfast=True as we do not want the test suite to continue if the - # tests of qt_mock_test failed. The next ones depend on it. - unittest.main(failfast=True) |
