summaryrefslogtreecommitdiffstats
path: root/tests
diff options
context:
space:
mode:
Diffstat (limited to 'tests')
-rw-r--r--tests/auto/CMakeLists.txt3
-rw-r--r--tests/auto/cmake/mockplugins/.cmake.conf2
-rw-r--r--tests/auto/cmake/test_generating_cpp_exports/.cmake.conf2
-rw-r--r--tests/auto/cmake/test_static_resources/.cmake.conf2
-rw-r--r--tests/auto/corelib/itemmodels/qrangemodeladapter/tst_qrangemodeladapter.cpp8
-rw-r--r--tests/auto/corelib/thread/qreadwritelock/tst_qreadwritelock.cpp106
-rw-r--r--tests/auto/network/access/qhttp2connection/tst_qhttp2connection.cpp55
-rw-r--r--tests/auto/tools/rcc/data/legal/rcc_legal.cpp2
-rw-r--r--tests/auto/util/testrunner/CMakeLists.txt14
-rw-r--r--tests/auto/util/testrunner/qt_mock_test-log.xml50
-rwxr-xr-xtests/auto/util/testrunner/qt_mock_test.py221
-rwxr-xr-xtests/auto/util/testrunner/tst_qt_testrunner.py581
12 files changed, 1038 insertions, 8 deletions
diff --git a/tests/auto/CMakeLists.txt b/tests/auto/CMakeLists.txt
index ac8aece707b..7dd9340f51b 100644
--- a/tests/auto/CMakeLists.txt
+++ b/tests/auto/CMakeLists.txt
@@ -4,6 +4,9 @@
# Order by dependency [*], then alphabetic. [*] If bugs in part A of
# our source would break tests of part B, then test A before B.
+
+add_subdirectory(util/testrunner)
+
set(run_dbus_tests OFF)
if (QT_FEATURE_dbus)
set(run_dbus_tests ON)
diff --git a/tests/auto/cmake/mockplugins/.cmake.conf b/tests/auto/cmake/mockplugins/.cmake.conf
index be788d10f8e..846c4f3b923 100644
--- a/tests/auto/cmake/mockplugins/.cmake.conf
+++ b/tests/auto/cmake/mockplugins/.cmake.conf
@@ -1 +1 @@
-set(QT_REPO_MODULE_VERSION "6.11.0")
+set(QT_REPO_MODULE_VERSION "6.12.0")
diff --git a/tests/auto/cmake/test_generating_cpp_exports/.cmake.conf b/tests/auto/cmake/test_generating_cpp_exports/.cmake.conf
index be788d10f8e..846c4f3b923 100644
--- a/tests/auto/cmake/test_generating_cpp_exports/.cmake.conf
+++ b/tests/auto/cmake/test_generating_cpp_exports/.cmake.conf
@@ -1 +1 @@
-set(QT_REPO_MODULE_VERSION "6.11.0")
+set(QT_REPO_MODULE_VERSION "6.12.0")
diff --git a/tests/auto/cmake/test_static_resources/.cmake.conf b/tests/auto/cmake/test_static_resources/.cmake.conf
index be788d10f8e..846c4f3b923 100644
--- a/tests/auto/cmake/test_static_resources/.cmake.conf
+++ b/tests/auto/cmake/test_static_resources/.cmake.conf
@@ -1 +1 @@
-set(QT_REPO_MODULE_VERSION "6.11.0")
+set(QT_REPO_MODULE_VERSION "6.12.0")
diff --git a/tests/auto/corelib/itemmodels/qrangemodeladapter/tst_qrangemodeladapter.cpp b/tests/auto/corelib/itemmodels/qrangemodeladapter/tst_qrangemodeladapter.cpp
index ef4a535dcac..29e26f99bdd 100644
--- a/tests/auto/corelib/itemmodels/qrangemodeladapter/tst_qrangemodeladapter.cpp
+++ b/tests/auto/corelib/itemmodels/qrangemodeladapter/tst_qrangemodeladapter.cpp
@@ -220,8 +220,8 @@ API_TEST(moveRows, moveRows(0, 0, 0))
API_TEST(moveTreeRows, moveRows(QList<int>{0, 0}, 0, QList<int>{0, 0}))
API_TEST(insertColumn, insertColumn(0))
-API_TEST(insertColumnWithData, insertColumn(0, {}))
-API_TEST(insertColumns, insertColumns(0, std::declval<Range&>()))
+API_TEST(insertColumnWithData, insertColumn(0, QList<int>{0}))
+API_TEST(insertColumns, insertColumns(0, QList<int>{0}))
API_TEST(removeColumn, removeColumn(0))
API_TEST(removeColumns, removeColumns(0, 0))
API_TEST(moveColumn, moveColumn(0, 0))
@@ -849,7 +849,7 @@ void tst_QRangeModelAdapter::insertColumn_API()
static_assert(has_insertColumnWithData(d.tableOfNumbers));
static_assert(!has_insertColumnWithData(d.constTableOfNumbers));
- static_assert(has_insertColumnWithData(d.tableOfPointers));
+ static_assert(!has_insertColumnWithData(d.tableOfPointers));
}
void tst_QRangeModelAdapter::insertColumns_API()
@@ -863,7 +863,7 @@ void tst_QRangeModelAdapter::insertColumns_API()
static_assert(has_insertColumns(d.tableOfNumbers));
static_assert(!has_insertColumns(d.constTableOfNumbers));
- static_assert(has_insertColumns(d.tableOfPointers));
+ static_assert(!has_insertColumns(d.tableOfPointers));
static_assert(!has_insertColumns(d.tableOfRowPointers));
static_assert(!has_insertColumns(d.listOfNamedRoles));
static_assert(!has_insertColumns(d.m_tree));
diff --git a/tests/auto/corelib/thread/qreadwritelock/tst_qreadwritelock.cpp b/tests/auto/corelib/thread/qreadwritelock/tst_qreadwritelock.cpp
index 86dfa5faffc..4c089091f8d 100644
--- a/tests/auto/corelib/thread/qreadwritelock/tst_qreadwritelock.cpp
+++ b/tests/auto/corelib/thread/qreadwritelock/tst_qreadwritelock.cpp
@@ -57,6 +57,7 @@ private slots:
void multipleReadersLoop();
void multipleWritersLoop();
void multipleReadersWritersLoop();
+ void heavyLoadLocks();
void countingTest();
void limitedReaders();
void deleteOnUnlock();
@@ -603,6 +604,111 @@ public:
}
};
+class HeavyLoadLockThread : public QThread
+{
+public:
+ QReadWriteLock &testRwlock;
+ const qsizetype iterations;
+ const int numThreads;
+ inline HeavyLoadLockThread(QReadWriteLock &l, qsizetype iters, int numThreads, QVector<QAtomicInt *> &counters):
+ testRwlock(l),
+ iterations(iters),
+ numThreads(numThreads),
+ counters(counters)
+ { }
+
+private:
+ QVector<QAtomicInt *> &counters;
+ QAtomicInt *getCounter(qsizetype index)
+ {
+ QReadLocker locker(&testRwlock);
+ /*
+ The index is increased monotonically, so the index
+ being requested should be always within or at the end of the
+ counters vector.
+ */
+ Q_ASSERT(index <= counters.size());
+ if (counters.size() <= index || counters[index] == nullptr) {
+ locker.unlock();
+ QWriteLocker wlocker(&testRwlock);
+ if (counters.size() <= index)
+ counters.resize(index + 1, nullptr);
+ if (counters[index] == nullptr)
+ counters[index] = new QAtomicInt(0);
+ return counters[index];
+ }
+ return counters[index];
+ }
+ void releaseCounter(qsizetype index)
+ {
+ QWriteLocker locker(&testRwlock);
+ delete counters[index];
+ counters[index] = nullptr;
+ }
+
+public:
+ void run() override
+ {
+ for (qsizetype i = 0; i < iterations; ++i) {
+ QAtomicInt *counter = getCounter(i);
+ /*
+ Here each counter is accessed by each thread
+ and increaed only once. As a result, when the
+ counter reaches numThreads, i.e. the fetched
+ value before the increment is numThreads-1,
+ we know all threads have accessed this counter
+ and we can delete it safely.
+ */
+ int prev = counter->fetchAndAddRelaxed(1);
+ if (prev == numThreads - 1) {
+#ifdef QT_BUILDING_UNDER_TSAN
+ /*
+ Under TSAN, deleting and freeing an object
+ will trigger a write operation on the memory
+ of the object. Since we used fetchAndAddRelaxed
+ to update the counter, TSAN will report a data
+ race when deleting the counter here. To avoid
+ the false positive, we simply reset the counter
+ to 0 here, with ordered semantics to establish
+ the sequence to ensure the the free-ing option
+ happens after all fetchAndAddRelaxed operations
+ in other threads.
+
+ When not building under TSAN, deleting the counter
+ will not result in any data read or written to the
+ memory region of the counter, so no data race will
+ happen.
+ */
+ counter->fetchAndStoreOrdered(0);
+#endif
+ releaseCounter(i);
+ }
+ }
+ }
+};
+
+/*
+ Multiple threads racing acquiring and releasing
+ locks on the same indices.
+*/
+
+void tst_QReadWriteLock::heavyLoadLocks()
+{
+ constexpr qsizetype iterations = 65536 * 4;
+ constexpr int numThreads = 8;
+ QVector<QAtomicInt *> counters;
+ QReadWriteLock testLock;
+ std::array<std::unique_ptr<HeavyLoadLockThread>, numThreads> threads;
+ for (auto &thread : threads)
+ thread = std::make_unique<HeavyLoadLockThread>(testLock, iterations, numThreads, counters);
+ for (auto &thread : threads)
+ thread->start();
+ for (auto &thread : threads)
+ thread->wait();
+ QVERIFY(counters.size() == iterations);
+ for (qsizetype i = 0; i < iterations; ++i)
+ QVERIFY(counters[i] == nullptr);
+}
/*
A writer acquires a read-lock, a reader locks
diff --git a/tests/auto/network/access/qhttp2connection/tst_qhttp2connection.cpp b/tests/auto/network/access/qhttp2connection/tst_qhttp2connection.cpp
index 417655c31d9..22aa9d44262 100644
--- a/tests/auto/network/access/qhttp2connection/tst_qhttp2connection.cpp
+++ b/tests/auto/network/access/qhttp2connection/tst_qhttp2connection.cpp
@@ -21,6 +21,8 @@ class tst_QHttp2Connection : public QObject
private slots:
void construct();
void constructStream();
+ void streamConfiguration_data();
+ void streamConfiguration();
void testSETTINGSFrame();
void maxHeaderTableSize();
void testPING();
@@ -204,6 +206,59 @@ void tst_QHttp2Connection::constructStream()
QCOMPARE(stream->isUploadingDATA(), false);
}
+void tst_QHttp2Connection::streamConfiguration_data()
+{
+ QTest::addColumn<bool>("useDownloadBuffer");
+
+ QTest::addRow("useDownloadBuffer=true") << true;
+ QTest::addRow("useDownloadBuffer=false") << false;
+}
+
+void tst_QHttp2Connection::streamConfiguration()
+{
+ QFETCH(const bool, useDownloadBuffer);
+
+ auto [client, server] = makeFakeConnectedSockets();
+ auto *clientConnection = makeHttp2Connection(client.get(), {}, Client);
+ auto *serverConnection = makeHttp2Connection(server.get(), {}, Server);
+
+ QHttp2Stream::Configuration config;
+ config.useDownloadBuffer = useDownloadBuffer;
+
+ QHttp2Stream *clientStream = clientConnection->createStream(config).unwrap();
+ QVERIFY(clientStream);
+ QCOMPARE(clientStream->configuration().useDownloadBuffer, useDownloadBuffer);
+ QVERIFY(waitForSettingsExchange(clientConnection, serverConnection));
+
+ QSignalSpy newIncomingStreamSpy{ serverConnection, &QHttp2Connection::newIncomingStream };
+ QSignalSpy clientDataReceivedSpy{ clientStream, &QHttp2Stream::dataReceived };
+
+ HPack::HttpHeader headers = getRequiredHeaders();
+ clientStream->sendHEADERS(headers, false);
+
+ QVERIFY(newIncomingStreamSpy.wait());
+ auto *serverStream = newIncomingStreamSpy.front().front().value<QHttp2Stream *>();
+ QVERIFY(serverStream);
+
+ const HPack::HttpHeader responseHeaders{ { ":status", "200" } };
+ serverStream->sendHEADERS(responseHeaders, false);
+
+ const QByteArray testData = "Hello World"_ba.repeated(100);
+ serverStream->sendDATA(testData, true);
+
+ QVERIFY(clientDataReceivedSpy.wait());
+ QCOMPARE(clientDataReceivedSpy.count(), 1);
+
+ const QByteArray receivedData = clientDataReceivedSpy.front().front().value<QByteArray>();
+ QCOMPARE(receivedData, testData);
+
+ if (useDownloadBuffer) {
+ QCOMPARE(clientStream->downloadBuffer().byteAmount(), testData.size());
+ } else {
+ QVERIFY(clientStream->downloadBuffer().isEmpty());
+ }
+}
+
void tst_QHttp2Connection::testSETTINGSFrame()
{
constexpr qint32 PrefaceLength = 24;
diff --git a/tests/auto/tools/rcc/data/legal/rcc_legal.cpp b/tests/auto/tools/rcc/data/legal/rcc_legal.cpp
index 248ab2e3b48..96f87d192e7 100644
--- a/tests/auto/tools/rcc/data/legal/rcc_legal.cpp
+++ b/tests/auto/tools/rcc/data/legal/rcc_legal.cpp
@@ -3,7 +3,7 @@
** Copyright (C) 2024 Intel Corporation.
** SPDX-License-Identifier: LicenseRef-Qt-Commercial OR GPL-3.0-only
**
-** Created by: The Resource Compiler for Qt version 6.11.0
+** Created by: The Resource Compiler for Qt version 6.12.0
**
** WARNING! All changes made in this file will be lost!
*****************************************************************************/
diff --git a/tests/auto/util/testrunner/CMakeLists.txt b/tests/auto/util/testrunner/CMakeLists.txt
new file mode 100644
index 00000000000..5ca8406f854
--- /dev/null
+++ b/tests/auto/util/testrunner/CMakeLists.txt
@@ -0,0 +1,14 @@
+# Copyright (C) 2025 The Qt Company Ltd.
+# SPDX-License-Identifier: BSD-3-Clause
+
+
+# Run the qt-testrunner test only inside the CI.
+if(DEFINED ENV{COIN_UNIQUE_JOB_ID} AND NOT IOS)
+ qt_internal_create_test_script(
+ NAME tst_qt_testrunner
+ COMMAND "${CMAKE_CURRENT_SOURCE_DIR}/tst_qt_testrunner.py" ARGS -v
+ WORKING_DIRECTORY "${test_working_dir}"
+ OUTPUT_FILE "${CMAKE_CURRENT_BINARY_DIR}/tst_qt_testrunner_Wrapper$<CONFIG>.cmake"
+ ENVIRONMENT "TESTRUNNER" ""
+ )
+endif()
diff --git a/tests/auto/util/testrunner/qt_mock_test-log.xml b/tests/auto/util/testrunner/qt_mock_test-log.xml
new file mode 100644
index 00000000000..a164bec9f9c
--- /dev/null
+++ b/tests/auto/util/testrunner/qt_mock_test-log.xml
@@ -0,0 +1,50 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<TestCase name="qt_mock_test">
+ <Environment>
+ <QtVersion>MOCK</QtVersion>
+ <QtBuild>MOCK</QtBuild>
+ <QTestVersion>6.3.0</QTestVersion>
+ </Environment>
+ <TestFunction name="initTestCase">
+ <Incident type="{{initTestCase_result}}" file="" line="0" />
+ <Duration msecs="0.00004"/>
+ </TestFunction>
+ <TestFunction name="always_pass">
+ <Incident type="{{always_pass_result}}" file="" line="0" />
+ <Duration msecs="0.71704"/>
+ </TestFunction>
+ <TestFunction name="always_fail">
+ <Incident type="{{always_fail_result}}" file="" line="0" />
+ <Duration msecs="0.828272"/>
+ </TestFunction>
+ <TestFunction name="always_crash">
+ <Incident type="{{always_crash_result}}" file="" line="0" />
+ <Duration msecs="0.828272"/>
+ </TestFunction>
+
+ <!-- The strings like this one "{{fail_then_pass:2_result}}"
+ are just template strings that will be replaced by the test driver
+ before each test. The colon doesn't have a special meaning.
+ The datatags in the following tests are just "2", "5", "6".
+ We don't strictly need datatags because the tests don't include
+ specific testing for datatags. It's just that adding a couple
+ of datatags to this XML template, complicates it a bit and
+ tests somewhat that functionality as a side-effect.
+ -->
+ <TestFunction name="fail_then_pass">
+ <Incident type="{{fail_then_pass:2_result}}" file="" line="0">
+ <DataTag><![CDATA[2]]></DataTag>
+ </Incident>
+ <Incident type="{{fail_then_pass:5_result}}" file="" line="0">
+ <DataTag><![CDATA[5]]></DataTag>
+ </Incident>
+ <Incident type="{{fail_then_pass:6_result}}" file="" line="0">
+ <DataTag><![CDATA[6]]></DataTag>
+ </Incident>
+ </TestFunction>
+ <TestFunction name="fail_then_crash">
+ <Incident type="{{fail_then_crash_result}}" file="" line="0" />
+ <Duration msecs="0.828272"/>
+ </TestFunction>
+ <Duration msecs="1904.9"/>
+</TestCase>
diff --git a/tests/auto/util/testrunner/qt_mock_test.py b/tests/auto/util/testrunner/qt_mock_test.py
new file mode 100755
index 00000000000..af8fdf24509
--- /dev/null
+++ b/tests/auto/util/testrunner/qt_mock_test.py
@@ -0,0 +1,221 @@
+#!/usr/bin/env python3
+# Copyright (C) 2021 The Qt Company Ltd.
+# SPDX-License-Identifier: LicenseRef-Qt-Commercial OR GPL-3.0-only WITH Qt-GPL-exception-1.0
+
+
+# This is an artificial test, mimicking the Qt tests, for example tst_whatever.
+# Its purpose is to assist in testing qt-testrunner.py.
+#
+# Mode A:
+#
+# If invoked with a test function argument, it runs that test function.
+#
+# Usage:
+#
+# $0 always_pass
+# $0 always_fail
+# $0 always_crash
+# $0 fail_then_pass:N # where N is the number of failing runs before passing
+#
+# Needs environment variable:
+# + QT_MOCK_TEST_STATE_FILE :: points to a unique filename, to be written
+# for keeping the state of the fail_then_pass:N tests.
+#
+# Mode B:
+#
+# If invoked without any argument, it runs the tests listed in the
+# variable QT_MOCK_TEST_RUN_LIST. If variable is empty it just runs
+# the always_pass test. It also understands qtestlib's `-o outfile.xml,xml`
+# option for writing a mock testlog in a file. Requires environment variables:
+# + QT_MOCK_TEST_STATE_FILE :: See above
+# + QT_MOCK_TEST_XML_TEMPLATE_FILE :: may point to the template XML file
+# located in the same source directory. Without this variable, the
+# option `-o outfile.xml,xml` will be ignored.
+# + QT_MOCK_TEST_RUN_LIST :: may contain a comma-separated list of test
+# that should run.
+# + QT_MOCK_TEST_CRASH_CLEANLY :: if set to 1, then the executable will
+# crash (exit with a high exit code)
+# after successfully running the given tests and writing the XML logfile.
+
+
+
+import sys
+import os
+import traceback
+from tst_qt_testrunner import write_xml_log
+
+
+MY_NAME = os.path.basename(sys.argv[0])
+STATE_FILE = None
+XML_TEMPLATE = None
+XML_OUTPUT_FILE = None
+CRASH_CLEANLY = False
+
+
+def crash():
+ sys.exit(131)
+
+def put_failure(test_name):
+ with open(STATE_FILE, "a") as f:
+ f.write(test_name + "\n")
+def get_failures(test_name):
+ n = 0
+ try:
+ with open(STATE_FILE) as f:
+ for line in f:
+ if line.strip() == test_name:
+ n += 1
+ except FileNotFoundError:
+ return 0
+ return n
+
+# Only care about the XML log output file.
+def parse_output_argument(a):
+ global XML_OUTPUT_FILE
+ if a.endswith(",xml"):
+ XML_OUTPUT_FILE = a[:-4]
+
+# Strip qtestlib specific arguments.
+# Only care about the "-o ...,xml" argument.
+def clean_cmdline():
+ args = []
+ prev_arg = None
+ skip_next_arg = True # Skip argv[0]
+ for a in sys.argv:
+ if skip_next_arg:
+ if prev_arg == "-o":
+ parse_output_argument(a)
+ prev_arg = None
+ skip_next_arg = False
+ continue
+ if a in ("-o", "-maxwarnings"):
+ skip_next_arg = True
+ prev_arg = a
+ continue
+ if a in ("-v1", "-v2", "-vs"):
+ print("VERBOSE RUN")
+ if "QT_LOGGING_RULES" in os.environ:
+ print("Environment has QT_LOGGING_RULES:",
+ os.environ["QT_LOGGING_RULES"])
+ continue
+ args.append(a)
+ return args
+
+
+def log_test(testcase, result,
+ testsuite=MY_NAME.rpartition(".")[0]):
+ print("%-7s: %s::%s()" % (result, testsuite, testcase))
+
+def log_xml(fail_list):
+ if XML_OUTPUT_FILE and XML_TEMPLATE is not None:
+ if XML_TEMPLATE == "":
+ # If the template is an empty file, then write an empty output file
+ with open(XML_OUTPUT_FILE, "w"):
+ pass
+ else:
+ write_xml_log(XML_OUTPUT_FILE, failure=fail_list)
+
+# Return the exit code
+def run_test(testname):
+ if testname == "initTestCase":
+ exit_code = 1 # specifically test that initTestCase fails
+ elif testname == "always_pass":
+ exit_code = 0
+ elif testname == "always_fail":
+ exit_code = 1
+ elif testname == "always_crash":
+ exit_code = 131
+ elif testname == "fail_then_crash":
+ previous_fails = get_failures(testname)
+ if previous_fails == 0:
+ put_failure(testname)
+ exit_code = 1
+ else:
+ exit_code = 131
+ elif testname.startswith("fail_then_pass"):
+ wanted_fails = int(testname.partition(":")[2])
+ previous_fails = get_failures(testname)
+ if previous_fails < wanted_fails:
+ put_failure(testname)
+ exit_code = 1
+ else:
+ exit_code = 0
+ else:
+ assert False, "Unknown argument: %s" % testname
+
+ if exit_code == 0:
+ log_test(testname, "PASS")
+ elif exit_code == 1:
+ log_test(testname, "FAIL!")
+ else:
+ log_test(testname, "CRASH!")
+
+ return exit_code
+
+def no_args_run():
+ try:
+ run_list = os.environ["QT_MOCK_TEST_RUN_LIST"].split(",")
+ except KeyError:
+ run_list = ["always_pass"]
+
+ total_result = True
+ fail_list = []
+ for test in run_list:
+ test_exit_code = run_test(test)
+ if test_exit_code not in (0, 1):
+ crash()
+ if test_exit_code != 0:
+ fail_list.append(test)
+ total_result = total_result and (test_exit_code == 0)
+
+ log_xml(fail_list)
+
+ if CRASH_CLEANLY:
+ # Crash despite all going well and writing all output files cleanly.
+ crash()
+
+ if total_result:
+ sys.exit(0)
+ else:
+ sys.exit(1)
+
+
+def main():
+ global STATE_FILE
+ # Will fail if env var is not set.
+ STATE_FILE = os.environ["QT_MOCK_TEST_STATE_FILE"]
+
+ global XML_TEMPLATE
+ if "QT_MOCK_TEST_XML_TEMPLATE_FILE" in os.environ:
+ with open(os.environ["QT_MOCK_TEST_XML_TEMPLATE_FILE"]) as f:
+ XML_TEMPLATE = f.read()
+
+ global CRASH_CLEANLY
+ if ("QT_MOCK_TEST_CRASH_CLEANLY" in os.environ
+ and os.environ["QT_MOCK_TEST_CRASH_CLEANLY"] == "1"
+ ):
+ CRASH_CLEANLY = True
+
+ args = clean_cmdline()
+
+ if len(args) == 0:
+ no_args_run()
+ assert False, "Unreachable!"
+ else: # run single test function
+ exit_code = run_test(args[0])
+ # Write "fail" in the XML log only if the specific run has failed.
+ if exit_code != 0:
+ log_xml([args[0]])
+ else:
+ log_xml([])
+ sys.exit(exit_code)
+
+
+# TODO write XPASS test that does exit(1)
+
+if __name__ == "__main__":
+ try:
+ main()
+ except Exception as e:
+ traceback.print_exc()
+ exit(128) # Something went wrong with this script
diff --git a/tests/auto/util/testrunner/tst_qt_testrunner.py b/tests/auto/util/testrunner/tst_qt_testrunner.py
new file mode 100755
index 00000000000..1134fa0427f
--- /dev/null
+++ b/tests/auto/util/testrunner/tst_qt_testrunner.py
@@ -0,0 +1,581 @@
+#!/usr/bin/env python3
+# Copyright (C) 2021 The Qt Company Ltd.
+# SPDX-License-Identifier: LicenseRef-Qt-Commercial OR GPL-3.0-only WITH Qt-GPL-exception-1.0
+
+
+import sys
+import os
+import re
+import glob
+import subprocess
+
+from subprocess import STDOUT, PIPE
+from tempfile import TemporaryDirectory, mkstemp
+
+MY_NAME = os.path.basename(__file__)
+my_dir = os.path.dirname(__file__)
+testrunner = os.path.join(my_dir, "..", "..", "..", "..",
+ "util", "testrunner", "qt-testrunner.py")
+mock_test = os.path.join(my_dir, "qt_mock_test.py")
+xml_log_template = os.path.join(my_dir, "qt_mock_test-log.xml")
+
+with open(xml_log_template) as f:
+ XML_TEMPLATE = f.read()
+
+
+import unittest
+
+def setUpModule():
+ global TEMPDIR
+ TEMPDIR = TemporaryDirectory(prefix="tst_qt_testrunner-")
+
+ global EMPTY_FILE
+ EMPTY_FILE = os.path.join(TEMPDIR.name, "EMPTY")
+ with open(EMPTY_FILE, "w") as f:
+ pass
+
+ filename = os.path.join(TEMPDIR.name, "file_1")
+ print("setUpModule(): setting up temporary directory and env var"
+ " QT_MOCK_TEST_STATE_FILE=" + filename + " and"
+ " QT_MOCK_TEST_XML_TEMPLATE_FILE=" + xml_log_template)
+
+ os.environ["QT_MOCK_TEST_STATE_FILE"] = filename
+ os.environ["QT_MOCK_TEST_XML_TEMPLATE_FILE"] = xml_log_template
+ os.environ["QT_TESTRUNNER_TESTING"] = "1"
+
+def tearDownModule():
+ print("\ntearDownModule(): Cleaning up temporary directory:",
+ TEMPDIR.name)
+ del os.environ["QT_MOCK_TEST_STATE_FILE"]
+ TEMPDIR.cleanup()
+
+
+# Helper to run a command and always capture output
+def run(args : list, **kwargs):
+ if args[0].endswith(".py"):
+ # Make sure we run python executables with the same python version.
+ # It also helps on Windows, that .py files are not directly executable.
+ args = [ sys.executable, *args ]
+ if DEBUG:
+ print("Running: ", args, flush=True)
+ proc = subprocess.run(args, stdout=PIPE, stderr=STDOUT, **kwargs)
+ if DEBUG and proc.stdout:
+ print(proc.stdout.decode(), flush=True)
+ return proc
+
+# Helper to run qt-testrunner.py with proper testing arguments.
+# Always append --log-dir=TEMPDIR unless specifically told not to.
+def run_testrunner(xml_filename=None, log_dir=None,
+ testrunner_args=None,
+ wrapper_script=None, wrapper_args=None,
+ qttest_args=None, env=None):
+
+ args = [ testrunner ]
+ if xml_filename:
+ args += [ "--parse-xml-testlog", xml_filename ]
+ if log_dir == None:
+ args += [ "--log-dir", TEMPDIR.name ]
+ elif log_dir != "":
+ args += [ "--log-dir", log_dir ]
+ if testrunner_args:
+ args += testrunner_args
+
+ if wrapper_script:
+ args += [ wrapper_script ]
+ if wrapper_args:
+ args += wrapper_args
+
+ args += [ mock_test ]
+ if qttest_args:
+ args += qttest_args
+
+ return run(args, env=env)
+
+# Write the XML_TEMPLATE to filename, replacing the templated results.
+def write_xml_log(filename, failure=None, inject_message=None):
+ data = XML_TEMPLATE
+ if failure is None:
+ failure = []
+ elif isinstance(failure, str):
+ failure = [ failure ]
+ # Replace what was asked to fail with "fail"
+ for x in failure:
+ data = data.replace("{{" + x + "_result}}", "fail")
+ # Replace the rest with "pass"
+ data = re.sub(r"{{[^}]+}}", "pass", data)
+ # Inject possible <Message> tags inside the first <TestFunction>
+ if inject_message:
+ i = data.index("</TestFunction>")
+ data = data[:i] + inject_message + data[i:]
+ with open(filename, "w") as f:
+ f.write(data)
+
+
+# Test that qt_mock_test.py behaves well. This is necessary to properly
+# test qt-testrunner.
+class Test_qt_mock_test(unittest.TestCase):
+ def setUp(self):
+ state_file = os.environ["QT_MOCK_TEST_STATE_FILE"]
+ if os.path.exists(state_file):
+ os.remove(state_file)
+ def assertProcessCrashed(self, proc):
+ if DEBUG:
+ print("process returncode is:", proc.returncode)
+ self.assertTrue(proc.returncode < 0 or
+ proc.returncode >= 128)
+
+ def test_always_pass(self):
+ proc = run([mock_test, "always_pass"])
+ self.assertEqual(proc.returncode, 0)
+ def test_always_fail(self):
+ proc = run([mock_test, "always_fail"])
+ self.assertEqual(proc.returncode, 1)
+ def test_fail_then_pass_2(self):
+ proc = run([mock_test, "fail_then_pass:2"])
+ self.assertEqual(proc.returncode, 1)
+ proc = run([mock_test, "fail_then_pass:2"])
+ self.assertEqual(proc.returncode, 1)
+ proc = run([mock_test, "fail_then_pass:2"])
+ self.assertEqual(proc.returncode, 0)
+ def test_fail_then_pass_1(self):
+ proc = run([mock_test, "fail_then_pass:1"])
+ self.assertEqual(proc.returncode, 1)
+ proc = run([mock_test, "fail_then_pass:1"])
+ self.assertEqual(proc.returncode, 0)
+ def test_fail_then_pass_many_tests(self):
+ proc = run([mock_test, "fail_then_pass:1"])
+ self.assertEqual(proc.returncode, 1)
+ proc = run([mock_test, "fail_then_pass:2"])
+ self.assertEqual(proc.returncode, 1)
+ proc = run([mock_test, "fail_then_pass:1"])
+ self.assertEqual(proc.returncode, 0)
+ proc = run([mock_test, "fail_then_pass:2"])
+ self.assertEqual(proc.returncode, 1)
+ proc = run([mock_test, "fail_then_pass:2"])
+ self.assertEqual(proc.returncode, 0)
+ def test_fail_then_crash(self):
+ proc = run([mock_test, "fail_then_crash"])
+ self.assertEqual(proc.returncode, 1)
+ proc = run([mock_test, "fail_then_crash"])
+ self.assertProcessCrashed(proc)
+ def test_xml_file_is_written(self):
+ filename = os.path.join(TEMPDIR.name, "testlog.xml")
+ proc = run([mock_test, "-o", filename+",xml"])
+ self.assertEqual(proc.returncode, 0)
+ self.assertTrue(os.path.exists(filename))
+ self.assertGreater(os.path.getsize(filename), 0)
+ os.remove(filename)
+ # Test it will write an empty XML file if template is empty
+ def test_empty_xml_file_is_written(self):
+ my_env = {
+ **os.environ,
+ "QT_MOCK_TEST_XML_TEMPLATE_FILE": EMPTY_FILE
+ }
+ filename = os.path.join(TEMPDIR.name, "testlog.xml")
+ proc = run([mock_test, "-o", filename+",xml"],
+ env=my_env)
+ self.assertEqual(proc.returncode, 0)
+ self.assertTrue(os.path.exists(filename))
+ self.assertEqual(os.path.getsize(filename), 0)
+ os.remove(filename)
+ def test_crash_cleanly(self):
+ proc = run([mock_test],
+ env={ **os.environ, "QT_MOCK_TEST_CRASH_CLEANLY":"1" })
+ if DEBUG:
+ print("returncode:", proc.returncode)
+ self.assertProcessCrashed(proc)
+
+
+# Find in @path, files that start with @testname and end with @pattern,
+# where @pattern is a glob-like string.
+def find_test_logs(testname=None, path=None, pattern="-*[0-9].xml"):
+ if testname is None:
+ testname = os.path.basename(mock_test)
+ if path is None:
+ path = TEMPDIR.name
+ pattern = os.path.join(path, testname + pattern)
+ logfiles = glob.glob(pattern)
+ if DEBUG:
+ print(f"Test ({testname}) logfiles found: ", logfiles)
+ return logfiles
+
+# Test regular invocations of qt-testrunner.
+class Test_testrunner(unittest.TestCase):
+ def setUp(self):
+ state_file = os.environ["QT_MOCK_TEST_STATE_FILE"]
+ if os.path.exists(state_file):
+ os.remove(state_file)
+ # The mock_test honors only the XML output arguments, the rest are ignored.
+ old_logfiles = find_test_logs(pattern="*.xml")
+ for fname in old_logfiles:
+ os.remove(os.path.join(TEMPDIR.name, fname))
+ self.env = dict(os.environ)
+ self.testrunner_args = []
+ def prepare_env(self, run_list=None):
+ if run_list is not None:
+ self.env['QT_MOCK_TEST_RUN_LIST'] = ",".join(run_list)
+ def run2(self):
+ return run_testrunner(testrunner_args=self.testrunner_args, env=self.env)
+
+ def test_simple_invocation(self):
+ # All tests pass.
+ proc = self.run2()
+ self.assertEqual(proc.returncode, 0)
+ def test_always_pass(self):
+ self.prepare_env(run_list=["always_pass"])
+ proc = self.run2()
+ self.assertEqual(proc.returncode, 0)
+ def test_output_files_are_generated(self):
+ proc = self.run2()
+ xml_output_files = find_test_logs()
+ self.assertEqual(len(xml_output_files), 1)
+ def test_always_fail(self):
+ self.prepare_env(run_list=["always_fail"])
+ proc = self.run2()
+ # TODO verify that re-runs==max_repeats
+ self.assertEqual(proc.returncode, 2)
+ def test_flaky_pass_1(self):
+ self.prepare_env(run_list=["always_pass,fail_then_pass:1"])
+ proc = self.run2()
+ self.assertEqual(proc.returncode, 0)
+ def test_flaky_pass_5(self):
+ self.prepare_env(run_list=["always_pass,fail_then_pass:1,fail_then_pass:5"])
+ proc = self.run2()
+ self.assertEqual(proc.returncode, 0)
+ def test_flaky_fail(self):
+ self.prepare_env(run_list=["always_pass,fail_then_pass:6"])
+ proc = self.run2()
+ self.assertEqual(proc.returncode, 2)
+ def test_flaky_pass_fail(self):
+ self.prepare_env(run_list=["always_pass,fail_then_pass:1,fail_then_pass:6"])
+ proc = self.run2()
+ # TODO verify that one func was re-run and passed but the other failed.
+ self.assertEqual(proc.returncode, 2)
+ def test_initTestCase_fail_crash(self):
+ self.prepare_env(run_list=["initTestCase,always_pass"])
+ proc = self.run2()
+ self.assertEqual(proc.returncode, 3)
+ def test_fail_then_crash(self):
+ self.prepare_env(run_list=["fail_then_crash"])
+ proc = self.run2()
+ self.assertEqual(proc.returncode, 3)
+
+ # By testing --no-extra-args, we ensure qt-testrunner works for
+ # tst_selftests and the other NON_XML_GENERATING_TESTS.
+ def test_no_extra_args_pass(self):
+ self.testrunner_args += ["--no-extra-args"]
+ proc = self.run2()
+ self.assertEqual(proc.returncode, 0)
+ def test_no_extra_args_fail(self):
+ self.prepare_env(run_list=["always_fail"])
+ self.testrunner_args += ["--no-extra-args"]
+ proc = self.run2()
+ self.assertEqual(proc.returncode, 3)
+ def test_no_extra_args_reruns_only_once_1(self):
+ self.prepare_env(run_list=["fail_then_pass:1"])
+ self.testrunner_args += ["--no-extra-args"]
+ proc = self.run2()
+ # The 1st rerun PASSed.
+ self.assertEqual(proc.returncode, 0)
+ def test_no_extra_args_reruns_only_once_2(self):
+ self.prepare_env(run_list=["fail_then_pass:2"])
+ self.testrunner_args += ["--no-extra-args"]
+ proc = self.run2()
+ # We never re-run more than once, so the exit code shows FAIL.
+ self.assertEqual(proc.returncode, 3)
+
+ # If no XML file is found by qt-testrunner, it is usually considered a
+ # CRASH and the whole test is re-run. Even when the return code is zero.
+ # It is a PASS only if the test is not capable of XML output (see no_extra_args above).
+ def test_no_xml_log_written_pass_crash(self):
+ del self.env["QT_MOCK_TEST_XML_TEMPLATE_FILE"]
+ self.prepare_env(run_list=["always_pass"])
+ proc = self.run2()
+ self.assertEqual(proc.returncode, 3)
+ # On the 2nd iteration of the full test, both of the tests pass.
+ # Still it's a CRASH because no XML file was found.
+ def test_no_xml_log_written_fail_then_pass_crash(self):
+ del self.env["QT_MOCK_TEST_XML_TEMPLATE_FILE"]
+ self.prepare_env(run_list=["always_pass,fail_then_pass:1"])
+ proc = self.run2()
+ # TODO verify that the whole test has run twice.
+ self.assertEqual(proc.returncode, 3)
+ # Even after 2 iterations of the full test we still get failures but no XML file,
+ # and this is considered a CRASH.
+ def test_no_xml_log_written_crash(self):
+ del self.env["QT_MOCK_TEST_XML_TEMPLATE_FILE"]
+ self.prepare_env(run_list=["fail_then_pass:2"])
+ proc = self.run2()
+ self.assertEqual(proc.returncode, 3)
+
+ def test_empty_xml_crash_1(self):
+ self.env["QT_MOCK_TEST_XML_TEMPLATE_FILE"] = EMPTY_FILE
+ self.prepare_env(run_list=["always_pass"])
+ proc = self.run2()
+ self.assertEqual(proc.returncode, 3)
+ def test_empty_xml_crash_2(self):
+ self.env["QT_MOCK_TEST_XML_TEMPLATE_FILE"] = EMPTY_FILE
+ self.prepare_env(run_list=["always_fail"])
+ proc = self.run2()
+ self.assertEqual(proc.returncode, 3)
+
+ # test qFatal should be a crash in all cases.
+ def test_qfatal_crash_1(self):
+ fatal_xml_message = """
+ <Message type="qfatal" file="" line="0">
+ <DataTag><![CDATA[modal]]></DataTag>
+ <Description><![CDATA[Failed to initialize graphics backend for OpenGL.]]></Description>
+ </Message>
+ """
+ logfile = os.path.join(TEMPDIR.name, os.path.basename(mock_test) + ".xml")
+ write_xml_log(logfile, failure=None, inject_message=fatal_xml_message)
+ del self.env["QT_MOCK_TEST_XML_TEMPLATE_FILE"]
+ self.env["QT_TESTRUNNER_DEBUG_NO_UNIQUE_OUTPUT_FILENAME"] = "1"
+ self.prepare_env(run_list=["always_pass"])
+ proc = self.run2()
+ self.assertEqual(proc.returncode, 3)
+ def test_qfatal_crash_2(self):
+ fatal_xml_message = """
+ <Message type="qfatal" file="" line="0">
+ <DataTag><![CDATA[modal]]></DataTag>
+ <Description><![CDATA[Failed to initialize graphics backend for OpenGL.]]></Description>
+ </Message>
+ """
+ logfile = os.path.join(TEMPDIR.name, os.path.basename(mock_test) + ".xml")
+ write_xml_log(logfile, failure="always_fail", inject_message=fatal_xml_message)
+ del self.env["QT_MOCK_TEST_XML_TEMPLATE_FILE"]
+ self.env["QT_TESTRUNNER_DEBUG_NO_UNIQUE_OUTPUT_FILENAME"] = "1"
+ self.prepare_env(run_list=["always_pass,always_fail"])
+ proc = self.run2()
+ self.assertEqual(proc.returncode, 3)
+
+ def test_qwarn_is_ignored_1(self):
+ qwarn_xml_message = """
+ <Message type="qwarn" file="" line="0">
+ <DataTag><![CDATA[modal]]></DataTag>
+ <Description><![CDATA[Failed to create RHI (backend 2)]]></Description>
+ </Message>
+ """
+ logfile = os.path.join(TEMPDIR.name, os.path.basename(mock_test) + ".xml")
+ write_xml_log(logfile, failure=None, inject_message=qwarn_xml_message)
+ del self.env["QT_MOCK_TEST_XML_TEMPLATE_FILE"]
+ self.env["QT_TESTRUNNER_DEBUG_NO_UNIQUE_OUTPUT_FILENAME"] = "1"
+ self.prepare_env(run_list=["always_pass"])
+ proc = self.run2()
+ self.assertEqual(proc.returncode, 0)
+ def test_qwarn_is_ignored_2(self):
+ fatal_xml_message = """
+ <Message type="qfatal" file="" line="0">
+ <DataTag><![CDATA[modal]]></DataTag>
+ <Description><![CDATA[Failed to initialize graphics backend for OpenGL.]]></Description>
+ </Message>
+ <Message type="qwarn" file="" line="0">
+ <DataTag><![CDATA[modal]]></DataTag>
+ <Description><![CDATA[Failed to create RHI (backend 2)]]></Description>
+ </Message>
+ """
+ logfile = os.path.join(TEMPDIR.name, os.path.basename(mock_test) + ".xml")
+ write_xml_log(logfile, failure=None, inject_message=fatal_xml_message)
+ del self.env["QT_MOCK_TEST_XML_TEMPLATE_FILE"]
+ self.env["QT_TESTRUNNER_DEBUG_NO_UNIQUE_OUTPUT_FILENAME"] = "1"
+ self.prepare_env(run_list=["always_pass"])
+ proc = self.run2()
+ self.assertEqual(proc.returncode, 3)
+
+ # If a test returns success but XML contains failures, it's a CRASH.
+ def test_wrong_xml_log_written_1_crash(self):
+ logfile = os.path.join(TEMPDIR.name, os.path.basename(mock_test) + ".xml")
+ write_xml_log(logfile, failure="always_fail")
+ del self.env["QT_MOCK_TEST_XML_TEMPLATE_FILE"]
+ self.env["QT_TESTRUNNER_DEBUG_NO_UNIQUE_OUTPUT_FILENAME"] = "1"
+ self.prepare_env(run_list=["always_pass"])
+ proc = self.run2()
+ self.assertEqual(proc.returncode, 3)
+ # If a test returns failure but XML contains only pass, it's a CRASH.
+ def test_wrong_xml_log_written_2_crash(self):
+ logfile = os.path.join(TEMPDIR.name, os.path.basename(mock_test) + ".xml")
+ write_xml_log(logfile)
+ del self.env["QT_MOCK_TEST_XML_TEMPLATE_FILE"]
+ self.env["QT_TESTRUNNER_DEBUG_NO_UNIQUE_OUTPUT_FILENAME"] = "1"
+ self.prepare_env(run_list=["always_fail"])
+ proc = self.run2()
+ self.assertEqual(proc.returncode, 3)
+
+ def create_wrapper(self, filename, content=None):
+ if not content:
+ content='exec "$@"'
+ filename = os.path.join(TEMPDIR.name, filename)
+ with open(filename, "w") as f:
+ f.write(f'#!/bin/sh\n{content}\n')
+ self.wrapper_script = f.name
+ os.chmod(self.wrapper_script, 0o700)
+
+ # Test that it re-runs the full executable in case of crash, even if the
+ # XML file is valid and shows one specific test failing.
+ def test_crash_reruns_full_QTQAINFRA_5226(self):
+ self.env["QT_MOCK_TEST_RUN_LIST"] = "always_fail"
+ # Tell qt_mock_test to crash after writing a proper XML file.
+ self.env["QT_MOCK_TEST_CRASH_CLEANLY"] = "1"
+ proc = self.run2()
+ # Verify qt-testrunner exited with 3 which means CRASH.
+ self.assertEqual(proc.returncode, 3)
+ # Verify that a full executable re-run happened that re-runs 2 times,
+ # instead of individual functions that re-run 5 times.
+ xml_output_files = find_test_logs()
+ self.assertEqual(len(xml_output_files), 2)
+
+ # Test that qt-testrunner detects the correct executable name even if we
+ # use a special wrapper script, and that it uses that in the XML log filename.
+ @unittest.skipUnless(os.name == "posix", "Wrapper script needs POSIX shell")
+ def test_wrapper(self):
+ self.create_wrapper("coin_vxworks_qemu_runner.sh")
+ proc = run_testrunner(wrapper_script=self.wrapper_script,
+ env=self.env)
+ self.assertEqual(proc.returncode, 0)
+ xml_output_files = find_test_logs()
+ self.assertEqual(len(xml_output_files), 1)
+
+ # The "androidtestrunner" wrapper is special. It expects the QTest arguments after "--".
+ # So our mock androidtestrunner wrapper ignores everything before "--"
+ # and executes our hardcoded mock_test with the arguments that follow.
+ def create_mock_anroidtestrunner_wrapper(self):
+ self.create_wrapper("androidtestrunner", content=
+ 'while [ "$1" != "--" ]; do shift; done; shift; exec {} "$@"'.format(mock_test))
+
+ @unittest.skipUnless(os.name == "posix", "Wrapper script needs POSIX shell")
+ def test_androidtestrunner_with_aab(self):
+ self.create_mock_anroidtestrunner_wrapper()
+ # Copied from our CI logs. The only relevant option is --aab.
+ androidtestrunner_args= [
+ '--path', '/home/qt/work/qt/qtdeclarative_standalone_tests/tests/auto/quickcontrols/qquickpopup/android-build-tst_qquickpopup',
+ '--adb', '/opt/android/sdk/platform-tools/adb', '--skip-install-root',
+ '--ndk-stack', '/opt/android/android-ndk-r27c/ndk-stack',
+ '--manifest', '/home/qt/work/qt/qtdeclarative_standalone_tests/tests/auto/quickcontrols/qquickpopup/android-build-tst_qquickpopup/app/AndroidManifest.xml',
+ '--make', '"/opt/cmake-3.30.5/bin/cmake" --build /home/qt/work/qt/qtdeclarative_standalone_tests --target tst_qquickpopup_make_aab',
+ '--aab', '/home/qt/work/qt/qtdeclarative_standalone_tests/tests/auto/quickcontrols/qquickpopup/android-build-tst_qquickpopup/tst_qquickpopup.aab',
+ '--bundletool', '/opt/bundletool/bundletool', '--timeout', '1425'
+ ]
+ # In COIN CI, TESTRUNNER="qt-testrunner.py --". That's why we append "--".
+ proc = run_testrunner(testrunner_args=["--"],
+ wrapper_script=self.wrapper_script,
+ wrapper_args=androidtestrunner_args,
+ env=self.env)
+ self.assertEqual(proc.returncode, 0)
+ xml_output_files = find_test_logs("tst_qquickpopup")
+ self.assertEqual(len(xml_output_files), 1)
+ # similar to above but with "--apk"
+ @unittest.skipUnless(os.name == "posix", "Wrapper script needs POSIX shell")
+ def test_androidtestrunner_with_apk(self):
+ self.create_mock_anroidtestrunner_wrapper()
+ androidtestrunner_args= ['--blah', '--apk', '/whatever/waza.apk', 'blue']
+ proc = run_testrunner(testrunner_args=["--"],
+ wrapper_script=self.wrapper_script,
+ wrapper_args=androidtestrunner_args,
+ env=self.env)
+ self.assertEqual(proc.returncode, 0)
+ xml_output_files = find_test_logs("waza")
+ self.assertEqual(len(xml_output_files), 1)
+ # similar to above but with neither "--apk" nor "--aab". qt-testrunner throws error.
+ @unittest.skipUnless(os.name == "posix", "Wrapper script needs POSIX shell")
+ def test_androidtestrunner_fail_to_detect_filename(self):
+ self.create_mock_anroidtestrunner_wrapper()
+ androidtestrunner_args= ['--blah', '--argh', '/whatever/waza.apk', 'waza.aab']
+ proc = run_testrunner(testrunner_args=["--"],
+ wrapper_script=self.wrapper_script,
+ wrapper_args=androidtestrunner_args,
+ env=self.env)
+ self.assertEqual(proc.returncode, 1)
+ xml_output_files = find_test_logs("waza")
+ self.assertEqual(len(xml_output_files), 0)
+
+
+# Test qt-testrunner script with an existing XML log file:
+# qt-testrunner.py qt_mock_test.py --parse-xml-testlog file.xml
+# qt-testrunner should repeat the testcases that are logged as
+# failures and fail or pass depending on how the testcases behave.
+# Different XML files are generated for the following test cases.
+# + No failure logged. qt-testrunner should exit(0)
+# + The "always_pass" test has failed. qt-testrunner should exit(0).
+# + The "always_fail" test has failed. qt-testrunner should exit(2).
+# + The "always_crash" test has failed. qt-testrunner should exit(3)
+# since the re-run will crash.
+# + The "fail_then_pass:2" test failed. qt-testrunner should exit(0).
+# + The "fail_then_pass:5" test failed. qt-testrunner should exit(2).
+# + The "initTestCase" failed which is listed as NO_RERUN thus
+# qt-testrunner should exit(3).
+class Test_testrunner_with_xml_logfile(unittest.TestCase):
+ # Runs before every single test function, creating a unique temp file.
+ def setUp(self):
+ (_handle, self.xml_file) = mkstemp(
+ suffix=".xml", prefix="qt_mock_test-log-",
+ dir=TEMPDIR.name)
+ os.close(_handle)
+ if os.path.exists(os.environ["QT_MOCK_TEST_STATE_FILE"]):
+ os.remove(os.environ["QT_MOCK_TEST_STATE_FILE"])
+ def tearDown(self):
+ os.remove(self.xml_file)
+ del self.xml_file
+ # Run testrunner specifically for the tests here, with --parse-xml-testlog.
+ def run3(self, testrunner_args=None):
+ return run_testrunner(self.xml_file,
+ testrunner_args=testrunner_args)
+
+ def test_no_failure(self):
+ write_xml_log(self.xml_file, failure=None)
+ proc = self.run3()
+ self.assertEqual(proc.returncode, 0)
+ def test_always_pass_failed(self):
+ write_xml_log(self.xml_file, failure="always_pass")
+ proc = self.run3()
+ self.assertEqual(proc.returncode, 0)
+ def test_always_pass_failed_max_repeats_0(self):
+ write_xml_log(self.xml_file, failure="always_pass")
+ proc = self.run3(testrunner_args=["--max-repeats", "0"])
+ self.assertEqual(proc.returncode, 2)
+ def test_always_fail_failed(self):
+ write_xml_log(self.xml_file, failure="always_fail")
+ proc = self.run3()
+ self.assertEqual(proc.returncode, 2)
+ # Assert that one of the re-runs was in verbose mode
+ matches = re.findall("VERBOSE RUN",
+ proc.stdout.decode())
+ self.assertEqual(len(matches), 1)
+ # Assert that the environment was altered too
+ self.assertIn("QT_LOGGING_RULES", proc.stdout.decode())
+ def test_always_crash_crashed(self):
+ write_xml_log(self.xml_file, failure="always_crash")
+ proc = self.run3()
+ self.assertEqual(proc.returncode, 3)
+ def test_fail_then_pass_2_failed(self):
+ write_xml_log(self.xml_file, failure="fail_then_pass:2")
+ proc = self.run3()
+ self.assertEqual(proc.returncode, 0)
+ def test_fail_then_pass_5_failed(self):
+ write_xml_log(self.xml_file, failure="fail_then_pass:5")
+ proc = self.run3()
+ self.assertEqual(proc.returncode, 2)
+ def test_with_two_failures(self):
+ write_xml_log(self.xml_file,
+ failure=["always_pass", "fail_then_pass:2"])
+ proc = self.run3()
+ self.assertEqual(proc.returncode, 0)
+ # Check that test output is properly interleaved with qt-testrunner's logging.
+ matches = re.findall(r"(PASS|FAIL!).*\n.*Test process exited with code",
+ proc.stdout.decode())
+ self.assertEqual(len(matches), 4)
+ def test_initTestCase_fail_crash(self):
+ write_xml_log(self.xml_file, failure="initTestCase")
+ proc = self.run3()
+ self.assertEqual(proc.returncode, 3)
+
+
+if __name__ == "__main__":
+
+ DEBUG = False
+ if "--debug" in sys.argv:
+ sys.argv.remove("--debug")
+ DEBUG = True
+
+ # We set failfast=True as we do not want the test suite to continue if the
+ # tests of qt_mock_test failed. The next ones depend on it.
+ unittest.main(failfast=True)