aboutsummaryrefslogtreecommitdiffstats
path: root/src
diff options
context:
space:
mode:
Diffstat (limited to 'src')
-rw-r--r--src/qml/memory/design.md28
-rw-r--r--src/qml/memory/qv4mm.cpp193
-rw-r--r--src/qml/memory/qv4mm_p.h8
3 files changed, 170 insertions, 59 deletions
diff --git a/src/qml/memory/design.md b/src/qml/memory/design.md
index f75cd7d7f9..1b4eb8eac4 100644
--- a/src/qml/memory/design.md
+++ b/src/qml/memory/design.md
@@ -5,6 +5,7 @@ ChangeLog
---------
- < 6.8: There was little documentation, and the gc was STW mark&sweep
- 6.8: The gc became incremental (with a stop-the-world sweep phase)
+- 6.8: Sweep was made incremental, too
Glossary:
@@ -28,7 +29,7 @@ Glossary:
Overview:
---------
-Since Qt 6.7, V4 uses an incremental, precise mark-and-sweep gc algorithm. It is neither generational nor moving.
+Since Qt 6.8, V4 uses an incremental, precise mark-and-sweep gc algorithm. It is neither generational nor moving.
In the mark phase, each heap-item can be in one of three states:
1. unvisited ("white"): The gc has not seen this item at all
@@ -72,22 +73,15 @@ To facilitate incremental garbage collection, the gc algorithm is divided into t
7. markWeakValues: An interruptible phase which takes care of marking the QObjectWrappers
8. markDrain: An interrupible phase. While the MarkStack is not empty, the marking algorithm runs.
9. markReady: An atomic phase which currently does nothing, but could be used for e.g. logging statistics
-10. sweepPhase: An atomic phase, in which the stack is rescanned, the MarkStack is drained once more, and then the actual sweep algorithm is running, freeing dead objects.
-11. invalid, the "not-running" stage of the state machine.
-
-The transitions between the states look as following (D == done, T == can stay in state if there's a timeout, NW == No work):
-```
- NW __->-__ _____ NW
- / \ __ / |
- D D D | D D v / D v D D D
-1 -> 2 -> 3 -> 4--> 5 -->6---->7----->8---->9--->10---->11
- ^ T T T |
- \___ __ | restart gc
- \ |
- ----------------------------------------------/
-
-```
-
+10. initCallDestroyObjects: An atomic phase, in which the stack is rescanned, the MarkStack is drained once more. This ensures that all live objects are really marked.
+ Afterwards, the iteration over all the QObjectWrappers is prepared.
+11. callDestroyObject: An interruptible phase, were we call destroyObject of all non-marked QObjectWrapper.
+12. freeWeakMaps: An atomic phase in which we remove references to dead objects from live weak maps.
+13. freeWeakSets: Same as the last phase, but for weak sets
+14: handleQObjectWrappers: An atomic phase in which pending references to QObjectWrappers are cleared
+15. multiple sweep phases: Atomic phases, in which do the actual sweeping to free up memory. Note that this will also call destroy on objects marked with `V4_NEEDS_DESTROY`. There is one phase for the various allocators (identifier table, block allocator, huge item allocator, IC allocator)
+16. updateMetaData: Updates the black bitmaps, the usage statistics, and marks the gc cycle as done.
+17. invalid, the "not-running" stage of the state machine.
To avoid constantly having to query the timer, even interruptible phases run for a fixed amount of steps before checking whether there's a timemout.
diff --git a/src/qml/memory/qv4mm.cpp b/src/qml/memory/qv4mm.cpp
index adea75b198..c553aae17e 100644
--- a/src/qml/memory/qv4mm.cpp
+++ b/src/qml/memory/qv4mm.cpp
@@ -784,21 +784,105 @@ GCState markDrain(GCStateMachine *that, ExtraData &)
GCState markReady(GCStateMachine *, ExtraData &)
{
//Possibility to do some clean up, stat printing, etc...
- return Sweep;
+ return InitCallDestroyObjects;
}
-GCState sweepPhase(GCStateMachine *that, ExtraData &)
+/** \!internal
+collects new references from the stack, then drains the mark stack again
+*/
+void redrain(GCStateMachine *that)
{
- // if we don't have a deletion barrier, then we need to rescan
that->mm->collectFromJSStack(that->mm->markStack());
that->mm->m_markStack->drain();
- if (!that->mm->gcCollectorStats) {
- that->mm->sweep();
- } else {
- that->mm->sweep(false, increaseFreedCountForClass);
+}
+
+GCState initCallDestroyObjects(GCStateMachine *that, ExtraData &stateData)
+{
+ // as we don't have a deletion barrier, we need to rescan the stack
+ redrain(that);
+ if (!that->mm->m_weakValues)
+ return FreeWeakMaps; // no need to call destroy objects
+ stateData = GCIteratorStorage { that->mm->m_weakValues->begin() };
+ return CallDestroyObjects;
+}
+GCState callDestroyObject(GCStateMachine *, ExtraData &stateData)
+{
+ PersistentValueStorage::Iterator& it = get<GCIteratorStorage>(stateData).it;
+ // avoid repeatedly hitting the timer constantly by batching iterations
+ for (int i = 0; i < markLoopIterationCount; ++i) {
+ if (!it.p)
+ return FreeWeakMaps;
+ Managed *m = (*it).managed();
+ ++it;
+ if (!m || m->markBit())
+ continue;
+ // we need to call destroyObject on qobjectwrappers now, so that they can emit the destroyed
+ // signal before we start sweeping the heap
+ if (QObjectWrapper *qobjectWrapper = m->as<QObjectWrapper>())
+ qobjectWrapper->destroyObject(/*lastSweep =*/false);
+ }
+ return CallDestroyObjects;
+}
+
+void freeWeakMaps(MemoryManager *mm)
+{
+ for (auto [map, lastMap] = std::tuple {mm->weakMaps, &mm->weakMaps }; map; map = map->nextWeakMap) {
+ if (!map->isMarked())
+ continue;
+ map->removeUnmarkedKeys();
+ *lastMap = map;
+ lastMap = &map->nextWeakMap;
}
- that->mm->m_markStack.reset();
- that->mm->engine->isGCOngoing = false;
+}
+
+GCState freeWeakMaps(GCStateMachine *that, ExtraData &)
+{
+ freeWeakMaps(that->mm);
+ return FreeWeakSets;
+}
+
+void freeWeakSets(MemoryManager *mm)
+{
+ for (auto [set, lastSet] = std::tuple {mm->weakSets, &mm->weakSets}; set; set = set->nextWeakSet) {
+
+ if (!set->isMarked())
+ continue;
+ set->removeUnmarkedKeys();
+ *lastSet = set;
+ lastSet = &set->nextWeakSet;
+ }
+}
+
+GCState freeWeakSets(GCStateMachine *that, ExtraData &)
+{
+ freeWeakSets(that->mm);
+ return HandleQObjectWrappers;
+}
+
+GCState handleQObjectWrappers(GCStateMachine *that, ExtraData &)
+{
+ that->mm->cleanupDeletedQObjectWrappersInSweep();
+ return DoSweep;
+}
+
+GCState doSweep(GCStateMachine *that, ExtraData &)
+{
+ auto mm = that->mm;
+
+ mm->engine->identifierTable->sweep();
+ mm->blockAllocator.sweep();
+ mm->hugeItemAllocator.sweep(that->mm->gcCollectorStats ? increaseFreedCountForClass : nullptr);
+ mm->icAllocator.sweep();
+
+ // reset all black bits
+ mm->blockAllocator.resetBlackBits();
+ mm->hugeItemAllocator.resetBlackBits();
+ mm->icAllocator.resetBlackBits();
+
+ mm->usedSlotsAfterLastFullSweep = mm->blockAllocator.usedSlotsAfterLastSweep + mm->icAllocator.usedSlotsAfterLastSweep;
+ mm->gcBlocked = MemoryManager::Unblocked;
+ mm->m_markStack.reset();
+ mm->engine->isGCOngoing = false;
return Invalid;
}
@@ -862,11 +946,30 @@ MemoryManager::MemoryManager(ExecutionEngine *engine)
};
gcStateMachine->stateInfoMap[GCState::MarkReady] = {
markReady,
- true, //Break after this step, so that Sweep is only executed the next time GC is allowed
- // as sweep is not concurrent, we want to provide as much time as possible to it
+ false,
};
- gcStateMachine->stateInfoMap[GCState::Sweep] = {
- sweepPhase,
+ gcStateMachine->stateInfoMap[GCState::InitCallDestroyObjects] = {
+ initCallDestroyObjects,
+ false,
+ };
+ gcStateMachine->stateInfoMap[GCState::CallDestroyObjects] = {
+ callDestroyObject,
+ false,
+ };
+ gcStateMachine->stateInfoMap[GCState::FreeWeakMaps] = {
+ freeWeakMaps,
+ false,
+ };
+ gcStateMachine->stateInfoMap[GCState::FreeWeakSets] = {
+ freeWeakSets,
+ true, // ensure that handleQObjectWrappers runs in isolation
+ };
+ gcStateMachine->stateInfoMap[GCState::HandleQObjectWrappers] = {
+ handleQObjectWrappers,
+ false,
+ };
+ gcStateMachine->stateInfoMap[GCState::DoSweep] = {
+ doSweep,
false,
};
}
@@ -1014,23 +1117,34 @@ void MemoryManager::sweep(bool lastSweep, ClassDestroyStatsCallback classCountPt
}
}
- for (auto [map, lastMap] = std::tuple {weakMaps, &weakMaps }; map; map = map->nextWeakMap) {
- if (!map->isMarked())
- continue;
- map->removeUnmarkedKeys();
- *lastMap = map;
- lastMap = &map->nextWeakMap;
- }
+ freeWeakMaps(this);
+ freeWeakSets(this);
- for (auto [set, lastSet] = std::tuple {weakSets, &weakSets}; set; set = set->nextWeakSet) {
+ cleanupDeletedQObjectWrappersInSweep();
- if (!set->isMarked())
- continue;
- set->removeUnmarkedKeys();
- *lastSet = set;
- lastSet = &set->nextWeakSet;
+ if (!lastSweep) {
+ engine->identifierTable->sweep();
+ blockAllocator.sweep(/*classCountPtr*/);
+ hugeItemAllocator.sweep(classCountPtr);
+ icAllocator.sweep(/*classCountPtr*/);
}
+ // reset all black bits
+ blockAllocator.resetBlackBits();
+ hugeItemAllocator.resetBlackBits();
+ icAllocator.resetBlackBits();
+
+ usedSlotsAfterLastFullSweep = blockAllocator.usedSlotsAfterLastSweep + icAllocator.usedSlotsAfterLastSweep;
+ gcBlocked = false;
+}
+
+/*
+ \internal
+ Helper function used in sweep to clean up the (to-be-freed) QObjectWrapper
+ Used both in MemoryManager::sweep, and the corresponding gc statemachine phase
+*/
+void MemoryManager::cleanupDeletedQObjectWrappersInSweep()
+{
// onDestruction handlers may have accessed other QObject wrappers and reset their value, so ensure
// that they are all set to undefined.
for (PersistentValueStorage::Iterator it = m_weakValues->begin(); it != m_weakValues->end(); ++it) {
@@ -1063,21 +1177,6 @@ void MemoryManager::sweep(bool lastSweep, ClassDestroyStatsCallback classCountPt
++it;
}
}
-
- if (!lastSweep) {
- engine->identifierTable->sweep();
- blockAllocator.sweep(/*classCountPtr*/);
- hugeItemAllocator.sweep(classCountPtr);
- icAllocator.sweep(/*classCountPtr*/);
- }
-
- // reset all black bits
- blockAllocator.resetBlackBits();
- hugeItemAllocator.resetBlackBits();
- icAllocator.resetBlackBits();
-
- usedSlotsAfterLastFullSweep = blockAllocator.usedSlotsAfterLastSweep + icAllocator.usedSlotsAfterLastSweep;
- gcBlocked = false;
}
bool MemoryManager::shouldRunGC() const
@@ -1318,6 +1417,18 @@ void GCStateMachine::transition() {
deadline = QDeadlineTimer(timeLimit);
bool deadlineExpired = false;
while (!(deadlineExpired = deadline.hasExpired()) && state != GCState::Invalid) {
+ if (state > GCState::InitCallDestroyObjects) {
+ /* initCallDestroyObjects is the last action which drains the mark
+ stack by default. But as our write-barrier might end up putting
+ objects on the markStack which still reference other objects.
+ Especially when we call user code triggered by Component.onDestruction,
+ but also when we run into a timeout.
+ We don't redrain before InitCallDestroyObjects, as that would
+ potentially lead to useless busy-work (e.g., if the last referencs
+ to objects are removed while the mark phase is running)
+ */
+ redrain(this);
+ }
GCStateInfo& stateInfo = stateInfoMap[int(state)];
state = stateInfo.execute(this, stateData);
if (stateInfo.breakAfter)
diff --git a/src/qml/memory/qv4mm_p.h b/src/qml/memory/qv4mm_p.h
index a16ea580b1..1d9b03600b 100644
--- a/src/qml/memory/qv4mm_p.h
+++ b/src/qml/memory/qv4mm_p.h
@@ -38,7 +38,12 @@ enum GCState {
MarkWeakValues,
MarkDrain,
MarkReady,
- Sweep,
+ InitCallDestroyObjects,
+ CallDestroyObjects,
+ FreeWeakMaps,
+ FreeWeakSets,
+ HandleQObjectWrappers,
+ DoSweep,
Invalid,
Count,
};
@@ -346,6 +351,7 @@ private:
public:
void collectFromJSStack(MarkStack *markStack) const;
void sweep(bool lastSweep = false, ClassDestroyStatsCallback classCountPtr = nullptr);
+ void cleanupDeletedQObjectWrappersInSweep();
private:
bool shouldRunGC() const;