Chromium Code Reviews| Index: third_party/WebKit/Source/platform/heap/HeapCompact.cpp |
| diff --git a/third_party/WebKit/Source/platform/heap/HeapCompact.cpp b/third_party/WebKit/Source/platform/heap/HeapCompact.cpp |
| new file mode 100644 |
| index 0000000000000000000000000000000000000000..cd9fa498c3441d4a55272bd6d04c7730f8c49178 |
| --- /dev/null |
| +++ b/third_party/WebKit/Source/platform/heap/HeapCompact.cpp |
| @@ -0,0 +1,545 @@ |
| +// Copyright 2016 Opera Software AS. All rights reserved. |
| +// Use of this source code is governed by a BSD-style license that can be |
| +// found in the LICENSE file. |
| + |
| +#include "platform/heap/HeapCompact.h" |
| + |
| +#include "platform/RuntimeEnabledFeatures.h" |
| +#include "platform/heap/Heap.h" |
| +#include "platform/heap/SparseHeapBitmap.h" |
| +#include "wtf/CurrentTime.h" |
| +#include "wtf/HashMap.h" |
| +#include "wtf/HashSet.h" |
| +#include "wtf/Vector.h" |
| + |
| +namespace blink { |
| + |
| +bool HeapCompact::s_forceCompactionGC = false; |
| + |
| +// The real worker behind heap compaction, recording references to movable |
| +// objects ("slots".) When the objects end up being compacted and moved, |
| +// relocate() will adjust the slots to point to the new location of the |
| +// object along with handling fixups for interior pointers. |
| +// |
| +// The "fixups" object is created and maintained for the lifetime of one |
| +// heap compaction-enhanced GC. |
| +class HeapCompact::MovableObjectFixups final { |
| + public: |
| + static std::unique_ptr<MovableObjectFixups> create() { |
| + return wrapUnique(new MovableObjectFixups); |
| + } |
| + |
| + ~MovableObjectFixups() {} |
| + |
| + // For the compactable arenas, record all pages belonging to them. |
| + // This is needed to handle 'interior slots', pointers that themselves |
| + // can move (independently from the reference the slot points to.) |
| + void addCompactablePage(BasePage* page) { |
| + DCHECK(!page->isLargeObjectPage()); |
| + if (!HeapCompact::isCompactableArena(page->arena()->arenaIndex())) |
| + return; |
| + |
| + m_relocatablePages.add(page); |
| + } |
| + |
| + void add(MovableReference* slot) { |
| + MovableReference reference = *slot; |
| + BasePage* refPage = pageFromObject(reference); |
| + // Nothing to compact on a large object's page. |
| + if (refPage->isLargeObjectPage()) |
| + return; |
| + |
| +#if DCHECK_IS_ON() |
| + auto it = m_fixups.find(reference); |
| + DCHECK(it == m_fixups.end() || it->value == slot); |
| +#endif |
| + m_fixups.add(reference, slot); |
| + |
| + Address slotAddress = reinterpret_cast<Address>(slot); |
| + BasePage* slotPage = reinterpret_cast<BasePage*>( |
| + blinkPageAddress(slotAddress) + blinkGuardPageSize); |
| + if (!m_relocatablePages.contains(slotPage)) |
|
haraken
2016/12/06 13:30:39
Add LIKELY (or a comment) for documentation purpos
sof
2016/12/06 21:39:34
Yes, LIKELY() is warranted. Added.
|
| + return; |
| +#if ENABLE(ASSERT) |
|
haraken
2016/12/06 13:30:38
Remove?
sof
2016/12/06 21:39:34
The ENABLE(ASSERT) is needed to unbreak CrOS bots
haraken
2016/12/07 08:55:11
Then can we use #if DCHECK_IS_ON()? I guess ENABLE
sof
2016/12/07 10:45:08
That would be redundant; the situation is a relati
|
| + DCHECK(slotPage->contains(slotAddress)); |
| +#endif |
| + // Slot resides on a compactable arena's page. |
| + // => It is an 'interior slot' (interior to a movable backing store.) |
| + // Record it as an interior slot, which entails: |
| + // |
| + // - Storing it in the interior map, which maps the slot to |
| + // its (eventual) location. Initially nullptr. |
| + // - Mark it as being interior pointer within the page's |
| + // "interior" bitmap. This bitmap is used when moving a backing |
| + // store, quickly/ier checking if interior slots will have to |
| + // be additionally redirected. |
| + addInteriorFixup(slotAddress, slot); |
| + } |
| + |
| + void addFixupCallback(MovableReference reference, |
| + MovingObjectCallback callback, |
| + void* callbackData) { |
| + DCHECK(!m_fixupCallbacks.contains(reference)); |
| + m_fixupCallbacks.add(reference, std::pair<void*, MovingObjectCallback>( |
| + callbackData, callback)); |
| + } |
| + |
| + void relocateInteriorFixups(Address from, Address to, size_t size) { |
| + SparseHeapBitmap* range = m_interiors->hasRange(from, size); |
| + if (LIKELY(!range)) |
| + return; |
| + |
| + // Scan through the payload, looking for interior pointer slots |
| + // to adjust. If the backing store of such an interior slot hasn't |
| + // been moved already, update the slot -> real location mapping. |
| + // When the backing store is eventually moved, it'll use that location. |
| + // |
| + for (size_t offset = 0; offset < size; offset += sizeof(void*)) { |
| + if (!range->isSet(from + offset)) |
| + continue; |
| + MovableReference* slot = |
| + reinterpret_cast<MovableReference*>(from + offset); |
| + auto it = m_interiorFixups.find(slot); |
| + if (it == m_interiorFixups.end()) |
| + continue; |
| + |
| + // TODO: with the right sparse bitmap representation, it could be possible |
| + // to quickly determine if we've now stepped past the last address |
| + // that needed fixup in [address, address + size). Breaking out of this |
| + // loop might be worth doing for hash table backing stores with a very |
| + // low load factor. But interior fixups are rare. |
| + |
| + // If |slot|'s mapping is set, then the slot has been adjusted already. |
| + if (it->value) |
|
haraken
2016/12/06 13:30:39
Maybe this should not happen if we apply my commen
|
| + continue; |
| + Address fixup = to + offset; |
| + LOG_HEAP_COMPACTION("Range interior fixup: %p %p %p\n", from + offset, |
| + it->value, fixup); |
| + // Fill in the relocated location of the original slot at |slot|. |
| + // when the backing store corresponding to |slot| is eventually |
| + // moved/compacted, it'll update |to + offset| with a pointer to the |
| + // moved backing store. |
| + m_interiorFixups.set(slot, fixup); |
|
haraken
2016/12/06 13:30:38
If we apply my comment at line 131, we should call
|
| + } |
| + } |
| + |
| + void relocate(Address from, Address to) { |
|
haraken
2016/12/06 13:30:39
Can we add an assert to check that |from| is in m_
sof
2016/12/06 21:39:34
Done.
|
| + auto it = m_fixups.find(from); |
| + DCHECK(it != m_fixups.end()); |
| + MovableReference* slot = reinterpret_cast<MovableReference*>(it->value); |
| + auto interior = m_interiorFixups.find(slot); |
| + if (interior != m_interiorFixups.end()) { |
|
haraken
2016/12/06 13:30:38
Hmm, I still don't quite understand this. Can we r
sof
2016/12/06 21:39:34
We can't enforce an object graph heap ordering. i.
haraken
2016/12/07 08:55:11
If a backing store A is contained in a backing sto
sof
2016/12/07 10:45:08
I don't think your assumptions hold, we're compact
haraken
2016/12/07 12:44:33
Ah, thanks, now I understand!
|
| + MovableReference* slotLocation = |
| + reinterpret_cast<MovableReference*>(interior->value); |
| + if (!slotLocation) { |
| + m_interiorFixups.set(slot, to); |
| + } else { |
| + LOG_HEAP_COMPACTION("Redirected slot: %p => %p\n", slot, slotLocation); |
| + slot = slotLocation; |
| + } |
| + } |
| + // If the slot has subsequently been updated, a prefinalizer or |
| + // a destructor having mutated and expanded/shrunk the collection, |
| + // do not update and relocate the slot -- |from| is no longer valid |
| + // and referenced. |
| + // |
| + // The slot's contents may also have been cleared during weak processing; |
| + // no work to be done in that case either. |
| + if (UNLIKELY(*slot != from)) { |
|
haraken
2016/12/06 13:30:38
Just in case, can we add the following assert?
sof
2016/12/06 21:39:34
I think so -- a non-null *slot must be pointing to
|
| + LOG_HEAP_COMPACTION( |
| + "No relocation: slot = %p, *slot = %p, from = %p, to = %p\n", slot, |
| + *slot, from, to); |
| + return; |
| + } |
| + *slot = to; |
| + |
| + size_t size = 0; |
| + auto callback = m_fixupCallbacks.find(from); |
| + if (UNLIKELY(callback != m_fixupCallbacks.end())) { |
| + size = HeapObjectHeader::fromPayload(to)->payloadSize(); |
| + callback->value.second(callback->value.first, from, to, size); |
| + } |
| + |
| + if (LIKELY(!m_interiors)) |
|
haraken
2016/12/06 13:30:38
I'm just curious but is this really LIKELY? I gues
sof
2016/12/06 21:39:34
You're right, the (un)LIKELY() is used one level t
|
| + return; |
| + |
| + if (!size) |
| + size = HeapObjectHeader::fromPayload(to)->payloadSize(); |
| + relocateInteriorFixups(from, to, size); |
| + } |
| + |
| + void addInteriorFixup(Address interior, MovableReference* slot) { |
|
haraken
2016/12/06 13:30:38
Move this function up to just below add().
haraken
2016/12/06 13:30:38
Maybe can we drop the |interior| parameter? |inter
sof
2016/12/06 21:39:34
Ah yes, quite redundant to pass that separately.
|
| + auto it = m_interiorFixups.find(slot); |
| + // Ephemeron fixpoint iterations may cause repeated |
| + // registrations. |
| + DCHECK(it == m_interiorFixups.end() || !it->value); |
| + if (UNLIKELY(it != m_interiorFixups.end() && !it->value)) |
| + return; |
| + m_interiorFixups.add(slot, nullptr); |
| + LOG_HEAP_COMPACTION("Interior: %p\n", interior); |
| + if (!m_interiors) { |
| + m_interiors = SparseHeapBitmap::create(interior); |
| + return; |
| + } |
| + m_interiors->add(interior); |
| + } |
| + |
| + void addRelocation(MovableReference* slot) { |
| + MovableReference reference = *slot; |
| + if (!m_fixups.contains(reference)) { |
| + // Record the interior pointer. |
| + addInteriorFixup(reinterpret_cast<Address>(reference), slot); |
| + } |
| + |
| + BasePage* heapPage = pageFromObject(reference); |
| + DCHECK(heapPage); |
| + DCHECK(!heapPage->isLargeObjectPage()); |
| + // For now, the heap objects we're adding relocations for are assumed |
| + // to be residing in a compactable heap. There's no reason why it must be |
| + // so, just a sanity checking assert while phasing in this extra set of |
| + // relocations. |
| + DCHECK(m_relocatablePages.contains(heapPage)); |
| + |
| + NormalPage* normalPage = static_cast<NormalPage*>(heapPage); |
| + auto perHeap = m_externalRelocations.find(normalPage->arenaForNormalPage()); |
| + if (perHeap == m_externalRelocations.end()) { |
| + Vector<MovableReference*> relocations; |
| + relocations.append(slot); |
| + ExternalRelocations table; |
| + table.add(*slot, relocations); |
| + m_externalRelocations.add(normalPage->arenaForNormalPage(), table); |
| + return; |
| + } |
| + auto entry = perHeap->value.find(*slot); |
| + if (entry == perHeap->value.end()) { |
| + Vector<MovableReference*> relocations; |
| + relocations.append(slot); |
| + perHeap->value.add(*slot, relocations); |
| + return; |
| + } |
| + entry->value.append(slot); |
| + } |
| + |
| + void fixupExternalRelocations(NormalPageArena* arena) { |
| + auto perHeap = m_externalRelocations.find(arena); |
| + if (LIKELY(perHeap == m_externalRelocations.end())) |
| + return; |
| + for (const auto& entry : perHeap->value) { |
| + MovableReference heapObject = entry.key; |
| + // |heapObject| will either be in |m_fixups| or have been recorded as |
| + // an internal fixup. |
| + auto heapEntry = m_fixups.find(heapObject); |
| + if (heapEntry != m_fixups.end()) { |
| + for (auto slot : entry.value) |
| + *slot = reinterpret_cast<MovableReference>(heapEntry->value); |
| + continue; |
| + } |
| + // The movement of the containing object will have moved the |
| + // interior slot. |
| + auto it = m_interiorFixups.find( |
| + reinterpret_cast<MovableReference*>(heapObject)); |
| + DCHECK(it != m_interiorFixups.end()); |
| + for (auto slot : entry.value) |
| + *slot = reinterpret_cast<MovableReference>(it->value); |
| + } |
| + } |
| + |
| +#if DEBUG_HEAP_COMPACTION |
| + void dumpDebugStats() { |
| + LOG_HEAP_COMPACTION( |
| + "Fixups: pages=%u objects=%u callbacks=%u interior-size=%zu" |
| + " interiors-f=%u externals=%u\n", |
| + m_relocatablePages.size(), m_fixups.size(), m_fixupCallbacks.size(), |
| + m_interiors ? m_interiors->intervalCount() : 0, m_interiorFixups.size(), |
| + m_externalRelocations.size()); |
| + } |
| +#endif |
| + |
| + private: |
| + MovableObjectFixups() {} |
| + |
| + // Tracking movable and updatable references. For now, we keep a |
| + // map which for each movable object, recording the slot that |
| + // points to it. Upon moving the object, that slot needs to be |
| + // updated. |
| + // |
| + // (TODO: consider in-place updating schemes.) |
| + HashMap<MovableReference, MovableReference*> m_fixups; |
| + |
| + // Map from movable reference to callbacks that need to be invoked |
| + // when the object moves. |
| + HashMap<MovableReference, std::pair<void*, MovingObjectCallback>> |
| + m_fixupCallbacks; |
| + |
| + // Slot => relocated slot/final location. |
| + HashMap<MovableReference*, Address> m_interiorFixups; |
| + |
| + // All pages that are being compacted. |
| + HashSet<BasePage*> m_relocatablePages; |
| + |
| + std::unique_ptr<SparseHeapBitmap> m_interiors; |
| + |
| + // Each heap/arena may have additional slots pointing into it, |
| + // which must be fixed up & relocated after compaction has happened. |
| + // |
| + // This is currently not needed for Blink, but functionality is kept |
| + // around to be able to support this should the need arise.. |
| + using ExternalRelocations = |
| + HashMap<MovableReference, Vector<MovableReference*>>; |
| + |
| + HashMap<NormalPageArena*, ExternalRelocations> m_externalRelocations; |
| +}; |
| + |
| +HeapCompact::HeapCompact() |
| + : m_doCompact(false), |
| + m_gcCountSinceLastCompaction(0), |
| + m_threadCount(0), |
| + m_freeListSize(0), |
| + m_compactableArenas(0u), |
| + m_freedPages(0), |
| + m_freedSize(0) |
| +#if DEBUG_LOG_HEAP_COMPACTION_RUNNING_TIME |
| + , |
| + m_startCompaction(0), |
| + m_startCompactionTimeMS(0) |
| +#endif |
| +{ |
| +} |
| + |
| +HeapCompact::~HeapCompact() {} |
| + |
| +HeapCompact::MovableObjectFixups& HeapCompact::fixups() { |
| + if (!m_fixups) |
| + m_fixups = MovableObjectFixups::create(); |
| + return *m_fixups; |
| +} |
| + |
| +// checkIfCompacting() is called when a GC is initiated |
| +// (by ThreadState::collectGarbage()), checking if there's sufficient |
| +// reason to do a compaction pass on completion of the GC (but before |
| +// lazy sweeping), and that this can be safely done (i.e., it is not a |
| +// conservative GC.) |
| +// |
| +// TODO(sof): reconsider what is an effective policy for when compaction |
| +// is required. Both in terms of frequency and freelist residency. |
| +bool HeapCompact::shouldCompact(ThreadState* state, |
| + BlinkGC::GCType gcType, |
| + BlinkGC::GCReason reason) { |
| +#if !ENABLE_HEAP_COMPACTION |
| + return false; |
| +#else |
| + if (!RuntimeEnabledFeatures::heapCompactionEnabled()) |
| + return false; |
| + |
| + LOG_HEAP_COMPACTION("shouldCompact(): gc=%s count=%zu free=%zu\n", |
| + ThreadState::gcReasonString(reason), |
| + m_gcCountSinceLastCompaction, m_freeListSize); |
| + m_gcCountSinceLastCompaction++; |
| + // It is only safe to compact during non-conservative GCs. |
| + // TODO: for the main thread, limit this further to only idle GCs. |
| + if (reason != BlinkGC::IdleGC && reason != BlinkGC::PreciseGC && |
| + reason != BlinkGC::ForcedGC) |
| + return false; |
| + |
| + const ThreadHeap& heap = state->heap(); |
| + // If any of the participating threads require a stack scan, |
| + // do not compact. |
| + // |
| + // Why? Should the stack contain an iterator pointing into its |
| + // associated backing store, its references wouldn't be |
| + // correctly relocated. |
| + for (ThreadState* state : heap.threads()) { |
| + if (state->stackState() == BlinkGC::HeapPointersOnStack) { |
| + return false; |
| + } |
| + } |
| + |
| + // Compaction enable rules: |
| + // - It's been a while since the last time. |
| + // - "Considerable" amount of heap memory is bound up in freelist |
| + // allocations. For now, use a fixed limit irrespective of heap |
| + // size. |
| + // |
| + // As this isn't compacting all arenas, the cost of doing compaction |
| + // isn't a worry as it will additionally only be done by idle GCs. |
| + // TODO: add some form of compaction overhead estimate to the marking |
| + // time estimate. |
| + |
| + updateHeapResidency(state); |
|
haraken
2016/12/06 13:30:39
I still think that updateHeapResidency should be a
sof
2016/12/06 21:39:34
I'm keeping it here; ThreadState shouldn't keep co
|
| + |
| +#if STRESS_TEST_HEAP_COMPACTION |
| + // Exercise the handling of object movement by compacting as |
| + // often as possible. |
| + return true; |
| +#else |
| + return s_forceCompactionGC || |
| + (m_gcCountSinceLastCompaction > kGCCountSinceLastCompactionThreshold && |
| + m_freeListSize > kFreeListSizeThreshold); |
| +#endif |
| +#endif |
| +} |
| + |
| +// checkIfCompacting() is called when a GC is initiated |
| +// (by ThreadState::collectGarbage()), checking if there's sufficient |
| +// reason to do a compaction pass on completion of the GC (but before |
| +// lazy sweeping), and that this can be safely done (i.e., it is not a |
| +// conservative GC.) |
| +// |
| +// TODO(sof): reconsider what is an effective policy for when compaction |
| +// is required. Both in terms of frequency and freelist residency. |
|
haraken
2016/12/06 13:30:38
Remove the comment.
sof
2016/12/06 21:39:34
Done.
|
| +BlinkGC::GCType HeapCompact::initialize(ThreadState* state) { |
| + DCHECK(RuntimeEnabledFeatures::heapCompactionEnabled()); |
| + LOG_HEAP_COMPACTION("Compacting: free=%zu\n", m_freeListSize); |
| + m_doCompact = true; |
| + m_freedPages = 0; |
| + m_freedSize = 0; |
| + m_threadCount = state->heap().threads().size(); |
| + m_fixups.reset(); |
| + m_gcCountSinceLastCompaction = 0; |
| + s_forceCompactionGC = false; |
| + return BlinkGC::GCWithSweepCompaction; |
| +} |
| + |
| +void HeapCompact::registerMovingObjectReference(MovableReference* slot) { |
| + if (!m_doCompact) |
| + return; |
| + |
| + fixups().add(slot); |
| +} |
| + |
| +void HeapCompact::registerMovingObjectCallback(MovableReference reference, |
| + MovingObjectCallback callback, |
| + void* callbackData) { |
| + if (!m_doCompact) |
| + return; |
| + |
| + fixups().addFixupCallback(reference, callback, callbackData); |
| +} |
| + |
| +void HeapCompact::registerRelocation(MovableReference* slot) { |
| + if (!m_doCompact) |
| + return; |
| + |
| + fixups().addRelocation(slot); |
| +} |
| + |
| +void HeapCompact::updateHeapResidency(ThreadState* threadState) { |
| + // The heap compaction implementation assumes the contiguous range, |
| + // |
| + // [Vector1ArenaIndex, HashTableArenaIndex] |
| + // |
| + // in a few spots. Use static asserts here to not have that assumption |
| + // be silently invalidated by ArenaIndices changes. |
| + static_assert(BlinkGC::Vector1ArenaIndex + 3 == BlinkGC::Vector4ArenaIndex, |
| + "unexpected ArenaIndices ordering"); |
| + static_assert( |
| + BlinkGC::Vector4ArenaIndex + 1 == BlinkGC::InlineVectorArenaIndex, |
| + "unexpected ArenaIndices ordering"); |
| + static_assert( |
| + BlinkGC::InlineVectorArenaIndex + 1 == BlinkGC::HashTableArenaIndex, |
| + "unexpected ArenaIndices ordering"); |
| + |
| + size_t totalArenaSize = 0; |
| + size_t totalFreeListSize = 0; |
| + |
| + m_compactableArenas = 0; |
| +#if DEBUG_HEAP_FREELIST |
| + LOG_HEAP_FREELIST("Arena residencies: {"); |
| +#endif |
| + for (int i = BlinkGC::Vector1ArenaIndex; i <= BlinkGC::HashTableArenaIndex; |
| + ++i) { |
| + NormalPageArena* arena = |
| + static_cast<NormalPageArena*>(threadState->arena(i)); |
| + size_t arenaSize = arena->arenaSize(); |
| + size_t freeListSize = arena->freeListSize(); |
| + totalArenaSize += arenaSize; |
| + totalFreeListSize += freeListSize; |
| + LOG_HEAP_FREELIST("%d: [%zu, %zu], ", i, arenaSize, freeListSize); |
| + // TODO: be more discriminating and consider arena |
| + // load factor, effectiveness of past compactions etc. |
| + if (!arenaSize) |
| + continue; |
| + // Mark the arena as compactable. |
| + m_compactableArenas |= (0x1u << (BlinkGC::Vector1ArenaIndex + i)); |
| + } |
| + LOG_HEAP_FREELIST("}\nTotal = %zu, Free = %zu\n", totalArenaSize, |
| + totalFreeListSize); |
| + |
| + // TODO(sof): consider smoothing the reported sizes. |
| + m_freeListSize = totalFreeListSize; |
| +} |
| + |
| +void HeapCompact::finishedArenaCompaction(NormalPageArena* arena, |
| + size_t freedPages, |
| + size_t freedSize) { |
| + if (!m_doCompact) |
| + return; |
| + |
| + fixups().fixupExternalRelocations(arena); |
| + m_freedPages += freedPages; |
| + m_freedSize += freedSize; |
| +} |
| + |
| +void HeapCompact::relocate(Address from, Address to) { |
| + DCHECK(m_fixups); |
| + m_fixups->relocate(from, to); |
| +} |
| + |
| +void HeapCompact::startThreadCompaction(ThreadState*) { |
| + if (!m_doCompact) |
| + return; |
| +#if DEBUG_LOG_HEAP_COMPACTION_RUNNING_TIME |
| + if (!atomicTestAndSetToOne(&m_startCompaction)) |
| + m_startCompactionTimeMS = WTF::currentTimeMS(); |
| +#endif |
| +} |
| + |
| +void HeapCompact::finishedThreadCompaction(ThreadState*) { |
| + if (!m_doCompact) |
| + return; |
| + |
| + MutexLocker locker(m_mutex); |
| + // Final one clears out. |
| + if (!--m_threadCount) { |
| +#if DEBUG_HEAP_COMPACTION |
| + if (m_fixups) |
| + m_fixups->dumpDebugStats(); |
| +#endif |
| + m_fixups.reset(); |
| + m_doCompact = false; |
| +#if DEBUG_LOG_HEAP_COMPACTION_RUNNING_TIME |
| + double end = WTF::currentTimeMS(); |
| + LOG_HEAP_COMPACTION_INTERNAL( |
| + "Compaction stats: time=%gms, pages freed=%zu, size=%zu\n", |
| + end - m_startCompactionTimeMS, m_freedPages, m_freedSize); |
| + m_startCompaction = 0; |
| + m_startCompactionTimeMS = 0; |
| +#else |
| + LOG_HEAP_COMPACTION("Compaction stats: freed pages=%zu size=%zu\n", |
| + m_freedPages, m_freedSize); |
| +#endif |
| + // All compaction has completed, all participating threads may now |
| + // proceed. |
| + m_finished.broadcast(); |
| + } else { |
| + // Letting a thread return to leave GC and become a "mutator" again |
| + // runs the risk of it accessing heaps of other threads that are |
| + // still being compacted. Consequently, all GC-participating threads |
| + // must complete compaction together. |
| + m_finished.wait(m_mutex); |
| + } |
| +} |
| + |
| +void HeapCompact::addCompactablePage(BasePage* page) { |
| + if (!m_doCompact) |
| + return; |
| + fixups().addCompactablePage(page); |
| +} |
| + |
| +bool HeapCompact::scheduleCompactionGCForTesting(bool value) { |
| + bool current = s_forceCompactionGC; |
| + s_forceCompactionGC = value; |
| + return current; |
| +} |
| + |
| +} // namespace blink |