Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(40)

Unified Diff: third_party/WebKit/Source/platform/heap/HeapCompact.cpp

Issue 2531973002: Simple BlinkGC heap compaction. (Closed)
Patch Set: synchronize on compaction finish Created 4 years ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View side-by-side diff with in-line comments
Download patch
Index: third_party/WebKit/Source/platform/heap/HeapCompact.cpp
diff --git a/third_party/WebKit/Source/platform/heap/HeapCompact.cpp b/third_party/WebKit/Source/platform/heap/HeapCompact.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..f1078246d6feb7882d97319d23cdb23bbe712027
--- /dev/null
+++ b/third_party/WebKit/Source/platform/heap/HeapCompact.cpp
@@ -0,0 +1,548 @@
+// Copyright 2016 Opera Software AS. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "platform/heap/HeapCompact.h"
+
+#include "platform/RuntimeEnabledFeatures.h"
+#include "platform/heap/Heap.h"
+#include "platform/heap/SparseHeapBitmap.h"
+#include "wtf/CurrentTime.h"
+#include "wtf/HashMap.h"
+#include "wtf/HashSet.h"
+#include "wtf/Vector.h"
+
+namespace blink {
+
+bool HeapCompact::s_forceCompactionGC = false;
+
+// The real worker behind heap compaction, recording references to movable
+// objects ("slots".) When the objects end up being compacted and moved,
+// relocate() will adjust the slots to point to the new location of the
+// object along with handling fixups for interior pointers.
+//
+// The "fixups" object is created and maintained for the lifetime of one
+// heap compaction-enhanced GC.
+class HeapCompact::MovableObjectFixups final {
+ public:
+ static std::unique_ptr<MovableObjectFixups> create() {
+ return std::unique_ptr<MovableObjectFixups>(new MovableObjectFixups);
+ }
+
+ ~MovableObjectFixups() {}
+
+ void addCompactablePage(BasePage* p) {
+ // Add all pages belonging to arena to the set of relocatable pages.
+ m_relocatablePages.add(p);
+ }
+
+ void add(MovableReference* slot) {
+ MovableReference reference = *slot;
+ BasePage* refPage = pageFromObject(reference);
+ // Nothing to compact on a large object's page.
+ if (refPage->isLargeObjectPage())
+ return;
+
+#if DCHECK_IS_ON()
+ auto it = m_fixups.find(reference);
+ DCHECK(it == m_fixups.end() || it->value == slot);
+#endif
+ Address slotAddress = reinterpret_cast<Address>(slot);
+ BasePage* slotPage = reinterpret_cast<BasePage*>(
+ blinkPageAddress(slotAddress) + blinkGuardPageSize);
+ if (m_relocatablePages.contains(slotPage)) {
+ // Slot resides on a compactable heap's page.
+ // => It is an interior slot (interior to some other backing store.)
+ // Record it as an interior slot, which entails:
+ //
+ // - Storing it in the interior map, which maps the slot to
+ // its (eventual) location. Initially nullptr.
+ // - Mark it as being interior pointer within the page's
+ // "interior" bitmap. This bitmap is used when moving a backing
+ // store, quickly/ier checking if interior slots will have to
+ // be redirected.
+
+ // Large object pages aren't compactable by definition, so shouldn't
+ // encounter any here.
+ DCHECK(!slotPage->isLargeObjectPage());
+ if (HeapCompact::isCompactableArena(slotPage->arena()->arenaIndex()))
+ addInteriorFixup(slotAddress, slot);
+ }
+ m_fixups.add(reference, slot);
+ }
+
+ void addFixupCallback(MovableReference reference,
+ MovingObjectCallback callback,
+ void* callbackData) {
+ DCHECK(!m_fixupCallbacks.contains(reference));
+ m_fixupCallbacks.add(reference, std::pair<void*, MovingObjectCallback>(
+ callbackData, callback));
+ }
+
+ size_t size() const { return m_fixups.size(); }
+
+ void relocateInteriorFixups(Address from, Address to, size_t size) {
+ SparseHeapBitmap* range = m_interiors->hasRange(from, size);
+ if (LIKELY(!range))
+ return;
+
+ // Scan through the payload, looking for interior pointer slots
+ // to adjust. If the backing store of such an interior slot hasn't
+ // been moved already, update the slot -> real location mapping.
+ // When the backing store is eventually moved, it'll use that location.
+ //
+ for (size_t i = 0; i < size; i += sizeof(void*)) {
+ if (!range->isSet(from + i))
+ continue;
+ MovableReference* fromRef = reinterpret_cast<MovableReference*>(from + i);
+ auto it = m_interiorFixups.find(fromRef);
+ if (it == m_interiorFixups.end())
+ continue;
+
+ // TODO: with the right sparse bitmap representation, it could be possible
+ // to quickly determine if we've now stepped past the last address
+ // that needed fixup in [address, address + size). Breaking out of this
+ // loop might be worth doing for hash table backing stores with a very
+ // low load factor. But interior fixups are rare.
+
+ // If |slot|'s mapping is set, then the slot has been adjusted already.
+ if (it->value)
+ continue;
+ LOG_HEAP_COMPACTION("Range interior fixup: %p %p %p\n", from + i,
+ it->value, to + i);
+ Address fixup = to + i;
+ // Fill in the relocated location of the original slot at |from + i|;
+ // when the backing store corresponding to |from + i| is eventually
+ // moved/compacted, it'll update |to + i| with a pointer to the
+ // moved backing store.
+ m_interiorFixups.set(fromRef, fixup);
+ }
+ }
+
+ void relocate(Address from, Address to) {
+ auto it = m_fixups.find(from);
+ DCHECK(it != m_fixups.end());
+ MovableReference* slot = reinterpret_cast<MovableReference*>(it->value);
+ auto interior = m_interiorFixups.find(slot);
+ if (interior != m_interiorFixups.end()) {
+ MovableReference* slotLocation =
+ reinterpret_cast<MovableReference*>(interior->value);
+ if (!slotLocation) {
+ m_interiorFixups.set(slot, to);
+ } else {
+ LOG_HEAP_COMPACTION("Redirected slot: %p => %p\n", slot, slotLocation);
+ slot = slotLocation;
+ }
+ }
+ // If the slot has subsequently been updated, a prefinalizer or
+ // a destructor having mutated and expanded/shrunk the collection,
+ // do not update and relocate the slot -- |from| is no longer valid
+ // and referenced.
+ //
+ // The slot's contents may also have been cleared during weak processing;
+ // no work to be done in that case either.
+ if (UNLIKELY(*slot != from)) {
+ LOG_HEAP_COMPACTION(
+ "No relocation: slot = %p, *slot = %p, from = %p, to = %p\n", slot,
+ *slot, from, to);
+ return;
+ }
+ *slot = to;
+
+ size_t size = 0;
+ auto callback = m_fixupCallbacks.find(from);
+ if (UNLIKELY(callback != m_fixupCallbacks.end())) {
+ size = HeapObjectHeader::fromPayload(to)->payloadSize();
+ callback->value.second(callback->value.first, from, to, size);
+ }
+
+ if (LIKELY(!m_interiors))
+ return;
+
+ if (!size)
+ size = HeapObjectHeader::fromPayload(to)->payloadSize();
+ relocateInteriorFixups(from, to, size);
+ }
+
+ void addInteriorFixup(Address interior, MovableReference* slot) {
+ auto it = m_interiorFixups.find(slot);
+ // Ephemeron fixpoint iterations may cause repeated
+ // registrations.
+ DCHECK(it == m_interiorFixups.end() || !it->value);
+ if (UNLIKELY(it != m_interiorFixups.end() && !it->value))
+ return;
+ m_interiorFixups.add(slot, nullptr);
+ addInteriorMapping(interior);
+ }
+
+ void addInteriorMapping(Address interior) {
+ LOG_HEAP_COMPACTION("Interior: %p\n", interior);
+ if (!m_interiors) {
+ m_interiors = SparseHeapBitmap::create(interior);
+ return;
+ }
+ m_interiors->add(interior);
+ }
+
+ void addRelocation(MovableReference* slot) {
+ MovableReference reference = *slot;
+ if (!m_fixups.contains(reference)) {
+ // Record the interior pointer.
+ addInteriorFixup(reinterpret_cast<Address>(reference), slot);
+ }
+
+ BasePage* heapPage = pageFromObject(reference);
+ DCHECK(heapPage);
+ DCHECK(!heapPage->isLargeObjectPage());
+ // For now, the heap objects we're adding relocations for are assumed
+ // to be residing in a compactable heap. There's no reason why it must be
+ // so, just a sanity checking assert while phasing in this extra set of
+ // relocations.
+ DCHECK(m_relocatablePages.contains(heapPage));
+
+ NormalPage* normalPage = static_cast<NormalPage*>(heapPage);
+ auto perHeap = m_externalRelocations.find(normalPage->arenaForNormalPage());
+ if (perHeap == m_externalRelocations.end()) {
+ Vector<MovableReference*> relocations;
+ relocations.append(slot);
+ ExternalRelocations table;
+ table.add(*slot, relocations);
+ m_externalRelocations.add(normalPage->arenaForNormalPage(), table);
+ return;
+ }
+ auto entry = perHeap->value.find(*slot);
+ if (entry == perHeap->value.end()) {
+ Vector<MovableReference*> relocations;
+ relocations.append(slot);
+ perHeap->value.add(*slot, relocations);
+ return;
+ }
+ entry->value.append(slot);
+ }
+
+ void fixupExternalRelocations(NormalPageArena* arena) {
+ auto perHeap = m_externalRelocations.find(arena);
+ if (LIKELY(perHeap == m_externalRelocations.end()))
+ return;
+ for (const auto& entry : perHeap->value) {
+ MovableReference heapObject = entry.key;
+ // |heapObject| will either be in |m_fixups| or have been recorded as
+ // an internal fixup.
+ auto heapEntry = m_fixups.find(heapObject);
+ if (heapEntry != m_fixups.end()) {
+ for (auto slot : entry.value)
+ *slot = reinterpret_cast<MovableReference>(heapEntry->value);
+ continue;
+ }
+ // The movement of the containing object will have moved the
+ // interior slot.
+ auto it = m_interiorFixups.find(
+ reinterpret_cast<MovableReference*>(heapObject));
+ DCHECK(it != m_interiorFixups.end());
+ for (auto slot : entry.value)
+ *slot = reinterpret_cast<MovableReference>(it->value);
+ }
+ }
+
+#if DEBUG_HEAP_COMPACTION
+ void dumpDebugStats() {
+ LOG_HEAP_COMPACTION(
+ "Fixups: pages=%u objects=%u callbacks=%u interior-size=%zu"
+ " interiors-f=%u externals=%u\n",
+ m_relocatablePages.size(), m_fixups.size(), m_fixupCallbacks.size(),
+ m_interiors ? m_interiors->intervalCount() : 0, m_interiorFixups.size(),
+ m_externalRelocations.size());
+ }
+#endif
+
+ private:
+ MovableObjectFixups() {}
+
+ // Tracking movable and updatable references. For now, we keep a
+ // map which for each movable object, recording the slot that
+ // points to it. Upon moving the object, that slot needs to be
+ // updated.
+ //
+ // (TODO: consider in-place updating schemes.)
+ HashMap<MovableReference, MovableReference*> m_fixups;
+
+ // Map from movable reference to callbacks that need to be invoked
+ // when the object moves.
+ HashMap<MovableReference, std::pair<void*, MovingObjectCallback>>
+ m_fixupCallbacks;
+
+ // Slot => relocated slot/final location.
+ HashMap<MovableReference*, Address> m_interiorFixups;
+
+ // All pages that are being compacted.
+ HashSet<BasePage*> m_relocatablePages;
+
+ std::unique_ptr<SparseHeapBitmap> m_interiors;
+
+ // Each heap/arena may have additional slots pointing into it,
+ // which must be fixed up & relocated after compaction has happened.
+ //
+ // This is currently not needed for Blink, but functionality is kept
+ // around to be able to support this should the need arise..
+ using ExternalRelocations =
+ HashMap<MovableReference, Vector<MovableReference*>>;
+
+ HashMap<NormalPageArena*, ExternalRelocations> m_externalRelocations;
+};
+
+#if DEBUG_HEAP_COMPACTION
+namespace {
+
+const char* gcReasonString(BlinkGC::GCReason reason) {
+ switch (reason) {
+ case blink::BlinkGC::IdleGC:
+ return "IdleGC";
+ case BlinkGC::PreciseGC:
+ return "PreciseGC";
+ case BlinkGC::ConservativeGC:
+ return "ConservativeGC";
+ case BlinkGC::ForcedGC:
+ return "ForcedGC";
+ case BlinkGC::MemoryPressureGC:
+ return "MemoryPressureGC";
+ case BlinkGC::PageNavigationGC:
+ return "PageNavigationGC";
+ default:
+ NOTREACHED();
+ }
+ return "<Unknown>";
+}
+
+} // namespace
+#endif
+
+HeapCompact::HeapCompact()
+ : m_doCompact(false),
+ m_gcCountSinceLastCompaction(0),
+ m_threadCount(0),
+ m_freeListAllocations(0),
+ m_compactableHeaps(0u),
+ m_freedPages(0),
+ m_freedSize(0)
+#if DEBUG_LOG_HEAP_COMPACTION_RUNNING_TIME
+ ,
+ m_startCompaction(0),
+ m_startCompactionTimeMS(0)
+#endif
+{
+}
+
+HeapCompact::~HeapCompact() {}
+
+HeapCompact::MovableObjectFixups& HeapCompact::fixups() {
+ if (!m_fixups)
+ m_fixups = MovableObjectFixups::create();
+ return *m_fixups;
+}
+
+// checkIfCompacting() is called when a GC is initiated
+// (by ThreadState::collectGarbage()), checking if there's sufficient
+// reason to do a compaction pass on completion of the GC (but before
+// lazy sweeping), and that this can be safely done (i.e., it is not a
+// conservative GC.)
+//
+// TODO(sof): reconsider what is an effective policy for when compaction
+// is required. Both in terms of frequency and freelist residency.
+void HeapCompact::checkIfCompacting(ThreadHeap* heap,
+ Visitor* visitor,
+ BlinkGC::GCType gcType,
+ BlinkGC::GCReason reason) {
+#if ENABLE_HEAP_COMPACTION
+ if (!RuntimeEnabledFeatures::heapCompactionEnabled())
+ return;
+
+ m_doCompact = false;
+ LOG_HEAP_COMPACTION("check if compacting: gc=%s count=%zu free=%zu\n",
+ gcReasonString(reason), m_gcCountSinceLastCompaction,
+ m_freeListAllocations);
+ m_gcCountSinceLastCompaction++;
+ // It is only safe to compact during non-conservative GCs.
+ if (reason != BlinkGC::IdleGC && reason != BlinkGC::PreciseGC &&
+ reason != BlinkGC::ForcedGC)
+ return;
+
+ // If any of the participating threads require a stack scan,
+ // do not compact.
+ //
+ // Why? Should the stack contain an iterator pointing into its
+ // associated backing store, its references wouldn't be
+ // correctly relocated.
+ for (ThreadState* state : heap->threads()) {
+ if (state->stackState() == BlinkGC::HeapPointersOnStack) {
+ return;
+ }
+ }
+
+ m_freedPages = 0;
+ m_freedSize = 0;
+
+// Compaction enable rules:
+// - It's been a while since the last time.
+// - "Considerable" amount of heap bound up in freelist allocations.
+// For now, use a fixed limit irrespective of heap size.
+//
+// As this isn't compacting all heaps/arenas, the cost of doing compaction
+// isn't a worry as it will additionally only be done by idle GCs.
+// TODO: add some form of compaction overhead estimate to the marking
+// time estimate.
+
+#if STRESS_TEST_HEAP_COMPACTION
+ // Exercise the handling of object movement by compacting as
+ // often as possible.
+ m_doCompact = true;
+#else
+ m_doCompact = s_forceCompactionGC ||
+ (m_gcCountSinceLastCompaction > kCompactIntervalThreshold &&
+ m_freeListAllocations > kFreeThreshold);
+#endif
+ if (m_doCompact) {
+ LOG_HEAP_COMPACTION("Compacting: free=%zu\n", m_freeListAllocations);
+ m_threadCount = heap->threads().size();
+ visitor->setMarkCompactionMode();
+ m_fixups.reset();
+ m_gcCountSinceLastCompaction = 0;
+ s_forceCompactionGC = false;
+ }
+#endif // ENABLE_HEAP_COMPACTION
+}
+
+void HeapCompact::registerMovingObjectReference(MovableReference* slot) {
+ if (!m_doCompact)
+ return;
+
+ fixups().add(slot);
+}
+
+void HeapCompact::registerMovingObjectCallback(MovableReference reference,
+ MovingObjectCallback callback,
+ void* callbackData) {
+ if (!m_doCompact)
+ return;
+
+ fixups().addFixupCallback(reference, callback, callbackData);
+}
+
+void HeapCompact::registerRelocation(MovableReference* slot) {
+ if (!m_doCompact)
+ return;
+
+ if (!*slot)
+ return;
+
+ fixups().addRelocation(slot);
+}
+
+void HeapCompact::setHeapResidency(
+ size_t liveSize,
+ size_t freeSize,
+ const Vector<std::pair<size_t, size_t>>& heapResidencies) {
+#if DEBUG_HEAP_FREELIST
+ LOG_HEAP_FREELIST("Heap residencies: {");
+ for (int i = 0; i < heapResidencies.size(); ++i) {
+ LOG_HEAP_FREELIST("%d: [%zu, %zu], ", i, heapResidencies[i].first,
+ heapResidencies[i].second);
+ }
+ LOG_HEAP_FREELIST("}\nFree + live size: %zu %zu\n", freeSize, liveSize);
+#endif
+ // Mark the sub heaps that are viable compaction candidates;
+ // or, rather, not mark those that aren't.
+ size_t subHeapCount =
+ BlinkGC::HashTableArenaIndex - BlinkGC::Vector1ArenaIndex + 1;
+ DCHECK(heapResidencies.size() == subHeapCount);
+ m_compactableHeaps = 0;
+ for (size_t i = 0; i < subHeapCount; ++i) {
+ // TODO: be more discriminating and consider sub-heap
+ // load factor, effectiveness of past compactions etc.
+ if (heapResidencies[i].first == 0) {
+ if (m_doCompact) {
+ LOG_HEAP_COMPACTION("Not compacting heap: %zu\n",
+ BlinkGC::Vector1ArenaIndex + i);
+ }
+ continue;
+ }
+ m_compactableHeaps |= (0x1u << (BlinkGC::Vector1ArenaIndex + i));
+ }
+ if (m_doCompact) {
+ // Reset the total freelist allocation if we're about to compact.
+ // TODO(sof): re-record the actual (but very low) freelist size
+ // after the compaction has completed.
+ m_freeListAllocations = 0;
+ return;
+ }
+ // TODO(sof): consider smoothing the reported sizes.
+ m_freeListAllocations = freeSize;
+}
+
+void HeapCompact::finishedArenaCompaction(NormalPageArena* arena,
+ size_t freedPages,
+ size_t freedSize) {
+ if (!m_doCompact)
+ return;
+
+ fixups().fixupExternalRelocations(arena);
+ m_freedPages += freedPages;
+ m_freedSize += freedSize;
+}
+
+void HeapCompact::movedObject(Address from, Address to) {
+ DCHECK(m_fixups);
+ m_fixups->relocate(from, to);
+}
+
+void HeapCompact::startCompacting(ThreadState*) {
+#if DEBUG_LOG_HEAP_COMPACTION_RUNNING_TIME
haraken 2016/12/02 12:43:19 if (!m_doCompact) return; ?
sof 2016/12/04 14:55:37 Added, along with renaming the method to startThre
+ if (!atomicTestAndSetToOne(&m_startCompaction))
+ m_startCompactionTimeMS = WTF::currentTimeMS();
+#endif
+}
+
+void HeapCompact::finishedCompacting(ThreadState*) {
+ if (!m_doCompact)
+ return;
+
+ MutexLocker locker(m_mutex);
+ // Final one clears out.
+ if (!--m_threadCount) {
+#if DEBUG_HEAP_COMPACTION
+ if (m_fixups)
+ m_fixups->dumpDebugStats();
+#endif
+ m_fixups.reset();
+ m_doCompact = false;
+#if DEBUG_LOG_HEAP_COMPACTION_RUNNING_TIME
+ double end = WTF::currentTimeMS();
+ LOG_HEAP_COMPACTION_INTERNAL(
+ "Compaction stats: time=%gms, pages=%zu, size=%zu\n",
+ end - m_startCompactionTimeMS, m_freedPages, m_freedSize);
+ m_startCompaction = 0;
+ m_startCompactionTimeMS = 0;
+#else
+ LOG_HEAP_COMPACTION("Compaction stats: freed pages=%zu size=%zu\n",
+ m_freedPages, m_freedSize);
+#endif
+ m_finished.broadcast();
+ } else {
+ // See comment next to the |m_finished| declaration for why
+ // this synchronization is needed.
+ m_finished.wait(m_mutex);
+ }
+}
+
+void HeapCompact::addCompactablePage(BasePage* page) {
+ if (!m_doCompact)
+ return;
+ fixups().addCompactablePage(page);
+}
+
+bool HeapCompact::scheduleCompactionGCForTesting(bool value) {
+ bool current = s_forceCompactionGC;
+ s_forceCompactionGC = value;
+ return current;
+}
+
+} // namespace blink

Powered by Google App Engine
This is Rietveld 408576698