Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(659)

Unified Diff: third_party/WebKit/Source/platform/heap/HeapCompact.cpp

Issue 2531973002: Simple BlinkGC heap compaction. (Closed)
Patch Set: add pointer alignment handling to SparseHeapBitmap Created 4 years ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View side-by-side diff with in-line comments
Download patch
Index: third_party/WebKit/Source/platform/heap/HeapCompact.cpp
diff --git a/third_party/WebKit/Source/platform/heap/HeapCompact.cpp b/third_party/WebKit/Source/platform/heap/HeapCompact.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..57f28f6b7ea373c2d422ea9156afd48307243e56
--- /dev/null
+++ b/third_party/WebKit/Source/platform/heap/HeapCompact.cpp
@@ -0,0 +1,471 @@
+// Copyright 2016 Opera Software AS. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "platform/heap/HeapCompact.h"
+
+#include "platform/RuntimeEnabledFeatures.h"
+#include "platform/heap/Heap.h"
+#include "platform/heap/SparseHeapBitmap.h"
+#include "wtf/CurrentTime.h"
+#include "wtf/HashMap.h"
+#include "wtf/HashSet.h"
+#include "wtf/Vector.h"
+
+namespace blink {
+
+bool HeapCompact::s_forceCompactionGC = false;
+
+// The real worker behind heap compaction, recording references to movable
+// objects ("slots".) When the objects end up being compacted and moved,
+// relocate() will adjust the slots to point to the new location of the
+// object along with handling fixups for interior pointers.
+//
+// The "fixups" object is created and maintained for the lifetime of one
+// heap compaction-enhanced GC.
+class HeapCompact::MovableObjectFixups final {
+ public:
+ static std::unique_ptr<MovableObjectFixups> create() {
+ return wrapUnique(new MovableObjectFixups);
+ }
+
+ ~MovableObjectFixups() {}
+
+ // For the compactable arenas, record all pages belonging to them.
+ // This is needed to handle 'interior slots', pointers that themselves
+ // can move (independently from the reference the slot points to.)
+ void addCompactablePage(BasePage* page) {
+ DCHECK(!page->isLargeObjectPage());
+ if (!HeapCompact::isCompactableArena(page->arena()->arenaIndex()))
haraken 2016/12/09 07:25:55 Maybe do you want to mean isCompact"ing"Arena? Al
sof 2016/12/09 21:44:03 I've converted the assumption into a DCHECK() (not
+ return;
+
+ m_relocatablePages.add(page);
+ }
+
+ void add(MovableReference* slot) {
+ MovableReference reference = *slot;
+ BasePage* refPage = pageFromObject(reference);
+ // Nothing to compact on a large object's page.
+ if (refPage->isLargeObjectPage())
haraken 2016/12/09 07:25:55 Maybe we also want to check if the refPage is in a
sof 2016/12/09 21:44:04 I think that's too fine grained a check, i.e., we
+ return;
+
+#if DCHECK_IS_ON()
+ auto it = m_fixups.find(reference);
+ DCHECK(it == m_fixups.end() || it->value == slot);
+#endif
+ m_fixups.add(reference, slot);
+
+ Address slotAddress = reinterpret_cast<Address>(slot);
+ BasePage* slotPage = reinterpret_cast<BasePage*>(
+ blinkPageAddress(slotAddress) + blinkGuardPageSize);
haraken 2016/12/09 07:25:55 It might be better to add a helper function to Hea
sof 2016/12/09 21:44:03 pageFromObject() does just this already, but this
+ if (LIKELY(!m_relocatablePages.contains(slotPage)))
+ return;
+#if ENABLE(ASSERT)
+ DCHECK(slotPage->contains(slotAddress));
+#endif
+ // Unlikely case, the slot resides on a compactable arena's page.
haraken 2016/12/09 07:25:55 compact"ing"
sof 2016/12/09 21:44:04 Done.
+ // => It is an 'interior slot' (interior to a movable backing store.)
+ // Record it as an interior slot, which entails:
+ //
+ // - Storing it in the interior map, which maps the slot to
+ // its (eventual) location. Initially nullptr.
+ // - Mark it as being interior pointer within the page's
+ // "interior" bitmap. This bitmap is used when moving a backing
+ // store, quickly/ier checking if interior slots will have to
+ // be additionally redirected.
+ addInteriorFixup(slot);
+ }
+
+ void addFixupCallback(MovableReference reference,
+ MovingObjectCallback callback,
+ void* callbackData) {
+ DCHECK(!m_fixupCallbacks.contains(reference));
+ m_fixupCallbacks.add(reference, std::pair<void*, MovingObjectCallback>(
+ callbackData, callback));
+ }
+
+ void relocateInteriorFixups(Address from, Address to, size_t size) {
+ SparseHeapBitmap* range = m_interiors->hasRange(from, size);
+ if (LIKELY(!range))
+ return;
+
+ // Scan through the payload, looking for interior pointer slots
+ // to adjust. If the backing store of such an interior slot hasn't
+ // been moved already, update the slot -> real location mapping.
+ // When the backing store is eventually moved, it'll use that location.
+ //
+ for (size_t offset = 0; offset < size; offset += sizeof(void*)) {
+ if (!range->isSet(from + offset))
+ continue;
+ MovableReference* slot =
+ reinterpret_cast<MovableReference*>(from + offset);
+ auto it = m_interiorFixups.find(slot);
+ if (it == m_interiorFixups.end())
+ continue;
+
+ // TODO: with the right sparse bitmap representation, it could be possible
+ // to quickly determine if we've now stepped past the last address
+ // that needed fixup in [address, address + size). Breaking out of this
+ // loop might be worth doing for hash table backing stores with a very
+ // low load factor. But interior fixups are rare.
+
+ // If |slot|'s mapping is set, then the slot has been adjusted already.
+ if (it->value)
+ continue;
+ Address fixup = to + offset;
+ LOG_HEAP_COMPACTION("Range interior fixup: %p %p %p\n", from + offset,
+ it->value, fixup);
+ // Fill in the relocated location of the original slot at |slot|.
+ // when the backing store corresponding to |slot| is eventually
+ // moved/compacted, it'll update |to + offset| with a pointer to the
+ // moved backing store.
+ m_interiorFixups.set(slot, fixup);
+ }
+ }
+
+ void relocate(Address from, Address to) {
+ auto it = m_fixups.find(from);
+ DCHECK(it != m_fixups.end());
+#if DCHECK_IS_ON()
+ BasePage* fromPage = reinterpret_cast<BasePage*>(blinkPageAddress(from) +
+ blinkGuardPageSize);
+ DCHECK(m_relocatablePages.contains(fromPage));
haraken 2016/12/09 07:25:55 Add DCHECK(m_relocatablePages.contains(toPage));
sof 2016/12/09 21:44:04 Hmm, that's a bit unnatural a check - why would we
+#endif
+ MovableReference* slot = reinterpret_cast<MovableReference*>(it->value);
+ auto interior = m_interiorFixups.find(slot);
+ if (interior != m_interiorFixups.end()) {
+ MovableReference* slotLocation =
+ reinterpret_cast<MovableReference*>(interior->value);
+ if (!slotLocation) {
+ m_interiorFixups.set(slot, to);
haraken 2016/12/09 07:25:55 I'll stop asking a question about this, but I stil
sof 2016/12/09 21:44:03 We're here if an interior slot's backing store is
+ } else {
+ LOG_HEAP_COMPACTION("Redirected slot: %p => %p\n", slot, slotLocation);
+ slot = slotLocation;
+ }
+ }
+ // If the slot has subsequently been updated, a prefinalizer or
+ // a destructor having mutated and expanded/shrunk the collection,
+ // do not update and relocate the slot -- |from| is no longer valid
+ // and referenced.
+ //
+ // The slot's contents may also have been cleared during weak processing;
+ // no work to be done in that case either.
+ if (UNLIKELY(*slot != from)) {
+ LOG_HEAP_COMPACTION(
+ "No relocation: slot = %p, *slot = %p, from = %p, to = %p\n", slot,
+ *slot, from, to);
+#if DCHECK_IS_ON()
+ // Verify that the already updated slot is valid, meaning:
+ // - has been cleared.
+ // - has been updated & expanded with a large object backing store.
+ // - has been updated with a larger, freshly allocated backing store.
+ // (on a fresh page in a compactable arena that is not being
+ // compacted.)
+ BasePage* refPage = reinterpret_cast<BasePage*>(
haraken 2016/12/09 07:25:55 refPage => slotPage
sof 2016/12/09 21:44:03 Done.
+ blinkPageAddress(reinterpret_cast<Address>(*slot)) +
+ blinkGuardPageSize);
+ DCHECK(!*slot || refPage->isLargeObjectPage() ||
+ (HeapCompact::isCompactableArena(refPage->arena()->arenaIndex()) &&
+ m_relocatablePages.contains(refPage)));
haraken 2016/12/09 07:25:54 Shouldn't this be: DCHECK(!*slot || !m_relocata
sof 2016/12/09 21:44:03 Indeed, thanks - it should check what the comment
+#endif
+ return;
+ }
+ *slot = to;
+
+ size_t size = 0;
+ auto callback = m_fixupCallbacks.find(from);
+ if (UNLIKELY(callback != m_fixupCallbacks.end())) {
+ size = HeapObjectHeader::fromPayload(to)->payloadSize();
+ callback->value.second(callback->value.first, from, to, size);
+ }
+
+ if (!m_interiors)
+ return;
+
+ if (!size)
+ size = HeapObjectHeader::fromPayload(to)->payloadSize();
+ relocateInteriorFixups(from, to, size);
+ }
+
+ void addInteriorFixup(MovableReference* slot) {
haraken 2016/12/09 07:25:55 Move this method up to just below add().
sof 2016/12/09 21:44:03 Done.
+ auto it = m_interiorFixups.find(slot);
+ // Ephemeron fixpoint iterations may cause repeated registrations.
+ if (UNLIKELY(it != m_interiorFixups.end())) {
+ DCHECK(!it->value);
+ return;
+ }
+ m_interiorFixups.add(slot, nullptr);
+ LOG_HEAP_COMPACTION("Interior slot: %p\n", slot);
+ Address slotAddress = reinterpret_cast<Address>(slot);
+ if (!m_interiors) {
+ m_interiors = SparseHeapBitmap::create(slotAddress);
+ return;
+ }
+ m_interiors->add(slotAddress);
+ }
+
+#if DEBUG_HEAP_COMPACTION
+ void dumpDebugStats() {
+ LOG_HEAP_COMPACTION(
+ "Fixups: pages=%u objects=%u callbacks=%u interior-size=%zu"
+ " interiors-f=%u\n",
+ m_relocatablePages.size(), m_fixups.size(), m_fixupCallbacks.size(),
+ m_interiors ? m_interiors->intervalCount() : 0,
+ m_interiorFixups.size());
+ }
+#endif
+
+ private:
+ MovableObjectFixups() {}
+
+ // Tracking movable and updatable references. For now, we keep a
+ // map which for each movable object, recording the slot that
+ // points to it. Upon moving the object, that slot needs to be
+ // updated.
+ //
+ // (TODO: consider in-place updating schemes.)
+ HashMap<MovableReference, MovableReference*> m_fixups;
+
+ // Map from movable reference to callbacks that need to be invoked
+ // when the object moves.
+ HashMap<MovableReference, std::pair<void*, MovingObjectCallback>>
+ m_fixupCallbacks;
+
+ // Slot => relocated slot/final location.
+ HashMap<MovableReference*, Address> m_interiorFixups;
+
+ // All pages that are being compacted.
+ HashSet<BasePage*> m_relocatablePages;
+
+ std::unique_ptr<SparseHeapBitmap> m_interiors;
+};
haraken 2016/12/09 07:25:55 Would you add an assert to ScriptWrappableVisitor
sof 2016/12/09 21:44:04 Done, but should I be concerned about perf fallout
+
+HeapCompact::HeapCompact()
+ : m_doCompact(false),
+ m_gcCountSinceLastCompaction(0),
+ m_threadCount(0),
+ m_freeListSize(0),
+ m_compactableArenas(0u),
+ m_freedPages(0),
+ m_freedSize(0)
+#if DEBUG_LOG_HEAP_COMPACTION_RUNNING_TIME
+ ,
+ m_startCompaction(0),
+ m_startCompactionTimeMS(0)
+#endif
+{
+}
+
+HeapCompact::~HeapCompact() {}
+
+HeapCompact::MovableObjectFixups& HeapCompact::fixups() {
+ if (!m_fixups)
+ m_fixups = MovableObjectFixups::create();
haraken 2016/12/09 07:25:55 Nit: It wouldn't make much sense to create the fix
+ return *m_fixups;
+}
+
+bool HeapCompact::shouldCompact(ThreadState* state,
+ BlinkGC::GCType gcType,
+ BlinkGC::GCReason reason) {
+#if !ENABLE_HEAP_COMPACTION
+ return false;
+#else
+ if (!RuntimeEnabledFeatures::heapCompactionEnabled())
+ return false;
+
+ LOG_HEAP_COMPACTION("shouldCompact(): gc=%s count=%zu free=%zu\n",
+ ThreadState::gcReasonString(reason),
+ m_gcCountSinceLastCompaction, m_freeListSize);
+ m_gcCountSinceLastCompaction++;
+ // It is only safe to compact during non-conservative GCs.
+ // TODO: for the main thread, limit this further to only idle GCs.
+ if (reason != BlinkGC::IdleGC && reason != BlinkGC::PreciseGC &&
+ reason != BlinkGC::ForcedGC)
haraken 2016/12/09 07:25:55 Do we need this check? I guess the following Blink
sof 2016/12/09 21:44:03 I think it is fine to have it here; if a page navi
+ return false;
+
+ const ThreadHeap& heap = state->heap();
+ // If any of the participating threads require a stack scan,
+ // do not compact.
+ //
+ // Why? Should the stack contain an iterator pointing into its
+ // associated backing store, its references wouldn't be
+ // correctly relocated.
+ for (ThreadState* state : heap.threads()) {
+ if (state->stackState() == BlinkGC::HeapPointersOnStack) {
+ return false;
+ }
+ }
+
+ // Compaction enable rules:
+ // - It's been a while since the last time.
+ // - "Considerable" amount of heap memory is bound up in freelist
+ // allocations. For now, use a fixed limit irrespective of heap
+ // size.
+ //
+ // As this isn't compacting all arenas, the cost of doing compaction
+ // isn't a worry as it will additionally only be done by idle GCs.
+ // TODO: add some form of compaction overhead estimate to the marking
+ // time estimate.
+
+ updateHeapResidency(state);
+
+#if STRESS_TEST_HEAP_COMPACTION
+ // Exercise the handling of object movement by compacting as
+ // often as possible.
+ return true;
+#else
+ return s_forceCompactionGC ||
+ (m_gcCountSinceLastCompaction > kGCCountSinceLastCompactionThreshold &&
+ m_freeListSize > kFreeListSizeThreshold);
+#endif
+#endif
+}
+
+BlinkGC::GCType HeapCompact::initialize(ThreadState* state) {
+ DCHECK(RuntimeEnabledFeatures::heapCompactionEnabled());
+ LOG_HEAP_COMPACTION("Compacting: free=%zu\n", m_freeListSize);
+ m_doCompact = true;
+ m_freedPages = 0;
+ m_freedSize = 0;
+ m_threadCount = state->heap().threads().size();
+ m_fixups.reset();
+ m_gcCountSinceLastCompaction = 0;
+ s_forceCompactionGC = false;
+ return BlinkGC::GCWithSweepCompaction;
haraken 2016/12/09 07:25:55 It doesn't really make sense to return GCWithSweep
sof 2016/12/09 21:44:04 The caller shouldn't have to know such details, I
+}
+
+void HeapCompact::registerMovingObjectReference(MovableReference* slot) {
+ if (!m_doCompact)
+ return;
+
+ fixups().add(slot);
+}
+
+void HeapCompact::registerMovingObjectCallback(MovableReference reference,
+ MovingObjectCallback callback,
+ void* callbackData) {
+ if (!m_doCompact)
+ return;
+
+ fixups().addFixupCallback(reference, callback, callbackData);
+}
+
+void HeapCompact::updateHeapResidency(ThreadState* threadState) {
+ // The heap compaction implementation assumes the contiguous range,
+ //
+ // [Vector1ArenaIndex, HashTableArenaIndex]
+ //
+ // in a few spots. Use static asserts here to not have that assumption
+ // be silently invalidated by ArenaIndices changes.
+ static_assert(BlinkGC::Vector1ArenaIndex + 3 == BlinkGC::Vector4ArenaIndex,
+ "unexpected ArenaIndices ordering");
+ static_assert(
+ BlinkGC::Vector4ArenaIndex + 1 == BlinkGC::InlineVectorArenaIndex,
+ "unexpected ArenaIndices ordering");
+ static_assert(
+ BlinkGC::InlineVectorArenaIndex + 1 == BlinkGC::HashTableArenaIndex,
+ "unexpected ArenaIndices ordering");
+
+ size_t totalArenaSize = 0;
+ size_t totalFreeListSize = 0;
+
+ m_compactableArenas = 0;
+#if DEBUG_HEAP_FREELIST
+ LOG_HEAP_FREELIST("Arena residencies: {");
+#endif
+ for (int i = BlinkGC::Vector1ArenaIndex; i <= BlinkGC::HashTableArenaIndex;
+ ++i) {
+ NormalPageArena* arena =
+ static_cast<NormalPageArena*>(threadState->arena(i));
+ size_t arenaSize = arena->arenaSize();
+ size_t freeListSize = arena->freeListSize();
+ totalArenaSize += arenaSize;
+ totalFreeListSize += freeListSize;
+ LOG_HEAP_FREELIST("%d: [%zu, %zu], ", i, arenaSize, freeListSize);
+ // TODO: be more discriminating and consider arena
+ // load factor, effectiveness of past compactions etc.
+ if (!arenaSize)
haraken 2016/12/09 07:25:55 Actually this is not doing a meaningful check... a
sof 2016/12/09 21:44:04 Some of the vector arenas do run into this, when o
+ continue;
+ // Mark the arena as compactable.
+ m_compactableArenas |= (0x1u << (BlinkGC::Vector1ArenaIndex + i));
+ }
+ LOG_HEAP_FREELIST("}\nTotal = %zu, Free = %zu\n", totalArenaSize,
+ totalFreeListSize);
+
+ // TODO(sof): consider smoothing the reported sizes.
+ m_freeListSize = totalFreeListSize;
+}
+
+void HeapCompact::finishedArenaCompaction(NormalPageArena* arena,
+ size_t freedPages,
+ size_t freedSize) {
+ if (!m_doCompact)
+ return;
+
+ m_freedPages += freedPages;
+ m_freedSize += freedSize;
+}
+
+void HeapCompact::relocate(Address from, Address to) {
+ DCHECK(m_fixups);
+ m_fixups->relocate(from, to);
+}
+
+void HeapCompact::startThreadCompaction(ThreadState*) {
haraken 2016/12/09 07:25:54 Drop ThreadState*.
sof 2016/12/09 21:44:03 Done.
+ if (!m_doCompact)
+ return;
+#if DEBUG_LOG_HEAP_COMPACTION_RUNNING_TIME
+ if (!atomicTestAndSetToOne(&m_startCompaction))
+ m_startCompactionTimeMS = WTF::currentTimeMS();
haraken 2016/12/09 07:25:55 Slightly simpler: MutexLocker locker; if (!m_star
sof 2016/12/09 21:44:03 Done. I thought I'd sneak in the first user of ato
+#endif
+}
+
+void HeapCompact::finishedThreadCompaction(ThreadState*) {
haraken 2016/12/09 07:25:55 Drop ThreadState*.
sof 2016/12/09 21:44:04 Done.
+ if (!m_doCompact)
+ return;
+
+ MutexLocker locker(m_mutex);
+ // Final one clears out.
+ if (!--m_threadCount) {
+#if DEBUG_HEAP_COMPACTION
+ if (m_fixups)
+ m_fixups->dumpDebugStats();
+#endif
+ m_fixups.reset();
+ m_doCompact = false;
+#if DEBUG_LOG_HEAP_COMPACTION_RUNNING_TIME
+ double end = WTF::currentTimeMS();
+ LOG_HEAP_COMPACTION_INTERNAL(
+ "Compaction stats: time=%gms, pages freed=%zu, size=%zu\n",
+ end - m_startCompactionTimeMS, m_freedPages, m_freedSize);
+ m_startCompaction = 0;
+ m_startCompactionTimeMS = 0;
+#else
+ LOG_HEAP_COMPACTION("Compaction stats: freed pages=%zu size=%zu\n",
+ m_freedPages, m_freedSize);
+#endif
+ // All compaction has completed, all participating threads may now
+ // proceed.
+ m_finished.broadcast();
+ } else {
+ // Letting a thread return to leave GC and become a "mutator" again
+ // runs the risk of it accessing heaps of other threads that are
+ // still being compacted. Consequently, all GC-participating threads
+ // must complete compaction together.
+ m_finished.wait(m_mutex);
+ }
+}
+
+void HeapCompact::addCompactablePage(BasePage* page) {
+ if (!m_doCompact)
+ return;
+ fixups().addCompactablePage(page);
+}
+
+bool HeapCompact::scheduleCompactionGCForTesting(bool value) {
+ bool current = s_forceCompactionGC;
+ s_forceCompactionGC = value;
+ return current;
+}
+
+} // namespace blink

Powered by Google App Engine
This is Rietveld 408576698