Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(233)

Unified Diff: third_party/WebKit/Source/platform/heap/HeapCompact.cpp

Issue 2531973002: Simple BlinkGC heap compaction. (Closed)
Patch Set: cros compile fix Created 4 years ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View side-by-side diff with in-line comments
Download patch
Index: third_party/WebKit/Source/platform/heap/HeapCompact.cpp
diff --git a/third_party/WebKit/Source/platform/heap/HeapCompact.cpp b/third_party/WebKit/Source/platform/heap/HeapCompact.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..122d5f0c90e569c89fe5a4b29ea9b6c01d4d62bb
--- /dev/null
+++ b/third_party/WebKit/Source/platform/heap/HeapCompact.cpp
@@ -0,0 +1,564 @@
+// Copyright 2016 Opera Software AS. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "platform/heap/HeapCompact.h"
+
+#include "platform/RuntimeEnabledFeatures.h"
+#include "platform/heap/Heap.h"
+#include "platform/heap/SparseHeapBitmap.h"
+#include "wtf/CurrentTime.h"
+#include "wtf/HashMap.h"
+#include "wtf/HashSet.h"
+#include "wtf/Vector.h"
+
+namespace blink {
+
+bool HeapCompact::s_forceCompactionGC = false;
+
+// The real worker behind heap compaction, recording references to movable
+// objects ("slots".) When the objects end up being compacted and moved,
+// relocate() will adjust the slots to point to the new location of the
+// object along with handling fixups for interior pointers.
+//
+// The "fixups" object is created and maintained for the lifetime of one
+// heap compaction-enhanced GC.
+class HeapCompact::MovableObjectFixups final {
+ public:
+ static std::unique_ptr<MovableObjectFixups> create() {
+ return std::unique_ptr<MovableObjectFixups>(new MovableObjectFixups);
haraken 2016/12/05 11:27:46 wrapUnique
sof 2016/12/05 19:30:06 Done.
+ }
+
+ ~MovableObjectFixups() {}
+
+ void addCompactablePage(BasePage* p) {
+ // Add all pages belonging to arena to the set of relocatable pages.
+ m_relocatablePages.add(p);
+ }
+
+ void add(MovableReference* slot) {
+ MovableReference reference = *slot;
+ BasePage* refPage = pageFromObject(reference);
+ // Nothing to compact on a large object's page.
+ if (refPage->isLargeObjectPage())
+ return;
+
+#if DCHECK_IS_ON()
+ auto it = m_fixups.find(reference);
+ DCHECK(it == m_fixups.end() || it->value == slot);
+#endif
+ Address slotAddress = reinterpret_cast<Address>(slot);
+ BasePage* slotPage = reinterpret_cast<BasePage*>(
+ blinkPageAddress(slotAddress) + blinkGuardPageSize);
+ if (m_relocatablePages.contains(slotPage)) {
+ // Slot resides on a compactable heap's page.
haraken 2016/12/05 11:27:46 Add DCHECK(slotPage->contains(slotAddress)).
sof 2016/12/05 19:30:05 Done.
+ // => It is an interior slot (interior to some other backing store.)
+ // Record it as an interior slot, which entails:
+ //
+ // - Storing it in the interior map, which maps the slot to
+ // its (eventual) location. Initially nullptr.
+ // - Mark it as being interior pointer within the page's
+ // "interior" bitmap. This bitmap is used when moving a backing
+ // store, quickly/ier checking if interior slots will have to
+ // be redirected.
+
+ // Large object pages aren't compactable by definition, so shouldn't
+ // encounter any here.
+ DCHECK(!slotPage->isLargeObjectPage());
+ if (HeapCompact::isCompactableArena(slotPage->arena()->arenaIndex()))
haraken 2016/12/05 11:27:46 When can this return false? m_relocatablePages con
sof 2016/12/05 19:30:06 addRelocatablePage() doesn't currently take m_comp
+ addInteriorFixup(slotAddress, slot);
+ }
+ m_fixups.add(reference, slot);
+ }
+
+ void addFixupCallback(MovableReference reference,
haraken 2016/12/05 11:27:47 I think you're assuming that addFixupCallback() is
sof 2016/12/05 19:30:06 Not readily as that map is over slots, this is a r
haraken 2016/12/06 13:30:38 Hmm, what do you mean? DCHECK(m_fixups.contains(re
sof 2016/12/06 21:39:34 Sorry, thought of wrong map. It doesn't hold in ge
+ MovingObjectCallback callback,
+ void* callbackData) {
+ DCHECK(!m_fixupCallbacks.contains(reference));
+ m_fixupCallbacks.add(reference, std::pair<void*, MovingObjectCallback>(
+ callbackData, callback));
+ }
+
+ size_t size() const { return m_fixups.size(); }
haraken 2016/12/05 11:27:46 size() => numberOfFixups()
sof 2016/12/05 19:30:05 A generic name like that makes it harder to see th
+
+ void relocateInteriorFixups(Address from, Address to, size_t size) {
haraken 2016/12/05 11:27:47 size => payloadSize
sof 2016/12/05 19:30:05 "size" is accurate as-is; we're not using "payload
+ SparseHeapBitmap* range = m_interiors->hasRange(from, size);
+ if (LIKELY(!range))
+ return;
+
+ // Scan through the payload, looking for interior pointer slots
+ // to adjust. If the backing store of such an interior slot hasn't
+ // been moved already, update the slot -> real location mapping.
+ // When the backing store is eventually moved, it'll use that location.
+ //
+ for (size_t i = 0; i < size; i += sizeof(void*)) {
haraken 2016/12/05 11:27:47 i => offset
sof 2016/12/05 19:30:05 Renamed.
+ if (!range->isSet(from + i))
+ continue;
+ MovableReference* fromRef = reinterpret_cast<MovableReference*>(from + i);
haraken 2016/12/05 11:27:47 fromRef => slot This is not a reference.
sof 2016/12/05 19:30:06 Done.
+ auto it = m_interiorFixups.find(fromRef);
+ if (it == m_interiorFixups.end())
+ continue;
+
+ // TODO: with the right sparse bitmap representation, it could be possible
+ // to quickly determine if we've now stepped past the last address
+ // that needed fixup in [address, address + size). Breaking out of this
+ // loop might be worth doing for hash table backing stores with a very
+ // low load factor. But interior fixups are rare.
+
+ // If |slot|'s mapping is set, then the slot has been adjusted already.
+ if (it->value)
+ continue;
haraken 2016/12/05 11:27:46 Can we check that it->value is equal to |to+i|?
sof 2016/12/05 19:30:06 No, it->value would contain the (relocated) refere
+ LOG_HEAP_COMPACTION("Range interior fixup: %p %p %p\n", from + i,
+ it->value, to + i);
+ Address fixup = to + i;
+ // Fill in the relocated location of the original slot at |from + i|;
+ // when the backing store corresponding to |from + i| is eventually
+ // moved/compacted, it'll update |to + i| with a pointer to the
+ // moved backing store.
+ m_interiorFixups.set(fromRef, fixup);
+ }
+ }
+
+ void relocate(Address from, Address to) {
+ auto it = m_fixups.find(from);
+ DCHECK(it != m_fixups.end());
+ MovableReference* slot = reinterpret_cast<MovableReference*>(it->value);
+ auto interior = m_interiorFixups.find(slot);
+ if (interior != m_interiorFixups.end()) {
+ MovableReference* slotLocation =
+ reinterpret_cast<MovableReference*>(interior->value);
+ if (!slotLocation) {
+ m_interiorFixups.set(slot, to);
haraken 2016/12/05 11:27:46 Why do we need to update m_interiorFixups here? I
sof 2016/12/05 19:30:05 No, we don't know which is processed first -- the
+ } else {
+ LOG_HEAP_COMPACTION("Redirected slot: %p => %p\n", slot, slotLocation);
haraken 2016/12/05 11:27:46 Can we add DCHECK(slotLocation == to)? In the fir
sof 2016/12/05 19:30:05 That wouldn't be appropriate; if m_interiorFixups
+ slot = slotLocation;
+ }
+ }
+ // If the slot has subsequently been updated, a prefinalizer or
+ // a destructor having mutated and expanded/shrunk the collection,
+ // do not update and relocate the slot -- |from| is no longer valid
+ // and referenced.
haraken 2016/12/05 11:27:46 Specifically, how is it possible that destructor o
sof 2016/12/05 19:30:06 The slot contains the pointer to the backing store
+ //
+ // The slot's contents may also have been cleared during weak processing;
+ // no work to be done in that case either.
+ if (UNLIKELY(*slot != from)) {
haraken 2016/12/05 11:27:46 Does '*slot != from' happen only when the *slot is
sof 2016/12/05 19:30:05 The first half of the comment block explains why t
+ LOG_HEAP_COMPACTION(
+ "No relocation: slot = %p, *slot = %p, from = %p, to = %p\n", slot,
+ *slot, from, to);
+ return;
+ }
+ *slot = to;
+
+ size_t size = 0;
haraken 2016/12/05 11:27:46 size => payloadSize
sof 2016/12/05 19:30:06 "size" is fine as-is.
+ auto callback = m_fixupCallbacks.find(from);
+ if (UNLIKELY(callback != m_fixupCallbacks.end())) {
+ size = HeapObjectHeader::fromPayload(to)->payloadSize();
+ callback->value.second(callback->value.first, from, to, size);
+ }
+
+ if (LIKELY(!m_interiors))
+ return;
+
+ if (!size)
+ size = HeapObjectHeader::fromPayload(to)->payloadSize();
+ relocateInteriorFixups(from, to, size);
+ }
+
+ void addInteriorFixup(Address interior, MovableReference* slot) {
+ auto it = m_interiorFixups.find(slot);
+ // Ephemeron fixpoint iterations may cause repeated
+ // registrations.
+ DCHECK(it == m_interiorFixups.end() || !it->value);
+ if (UNLIKELY(it != m_interiorFixups.end() && !it->value))
haraken 2016/12/05 11:27:46 Would you help me understand this branch? I don't
sof 2016/12/05 19:30:06 nullptr is what we initially add (see line just be
haraken 2016/12/06 13:30:38 So what we really want to do is: ... // Drop the
sof 2016/12/06 21:39:34 No, ephemeron fix pointing is quite valid.
haraken 2016/12/07 08:55:11 Ephemeron fix pointing is valid, but how is it pos
sof 2016/12/07 10:45:08 Tuned the assert logic.
+ return;
+ m_interiorFixups.add(slot, nullptr);
+ addInteriorMapping(interior);
haraken 2016/12/05 11:27:46 You can inline the method.
sof 2016/12/05 19:30:06 Done.
+ }
+
+ void addInteriorMapping(Address interior) {
+ LOG_HEAP_COMPACTION("Interior: %p\n", interior);
+ if (!m_interiors) {
+ m_interiors = SparseHeapBitmap::create(interior);
+ return;
+ }
+ m_interiors->add(interior);
+ }
+
+ void addRelocation(MovableReference* slot) {
haraken 2016/12/05 11:27:46 I'm wondering why we need to have so different cod
sof 2016/12/05 19:30:05 Your assumption is correct, backing stores are "li
+ MovableReference reference = *slot;
+ if (!m_fixups.contains(reference)) {
+ // Record the interior pointer.
+ addInteriorFixup(reinterpret_cast<Address>(reference), slot);
+ }
+
+ BasePage* heapPage = pageFromObject(reference);
+ DCHECK(heapPage);
+ DCHECK(!heapPage->isLargeObjectPage());
+ // For now, the heap objects we're adding relocations for are assumed
+ // to be residing in a compactable heap. There's no reason why it must be
+ // so, just a sanity checking assert while phasing in this extra set of
+ // relocations.
+ DCHECK(m_relocatablePages.contains(heapPage));
+
+ NormalPage* normalPage = static_cast<NormalPage*>(heapPage);
+ auto perHeap = m_externalRelocations.find(normalPage->arenaForNormalPage());
+ if (perHeap == m_externalRelocations.end()) {
+ Vector<MovableReference*> relocations;
+ relocations.append(slot);
+ ExternalRelocations table;
+ table.add(*slot, relocations);
+ m_externalRelocations.add(normalPage->arenaForNormalPage(), table);
+ return;
+ }
+ auto entry = perHeap->value.find(*slot);
+ if (entry == perHeap->value.end()) {
+ Vector<MovableReference*> relocations;
+ relocations.append(slot);
+ perHeap->value.add(*slot, relocations);
+ return;
+ }
+ entry->value.append(slot);
+ }
+
+ void fixupExternalRelocations(NormalPageArena* arena) {
+ auto perHeap = m_externalRelocations.find(arena);
+ if (LIKELY(perHeap == m_externalRelocations.end()))
+ return;
+ for (const auto& entry : perHeap->value) {
+ MovableReference heapObject = entry.key;
+ // |heapObject| will either be in |m_fixups| or have been recorded as
+ // an internal fixup.
+ auto heapEntry = m_fixups.find(heapObject);
+ if (heapEntry != m_fixups.end()) {
+ for (auto slot : entry.value)
+ *slot = reinterpret_cast<MovableReference>(heapEntry->value);
+ continue;
+ }
+ // The movement of the containing object will have moved the
+ // interior slot.
+ auto it = m_interiorFixups.find(
+ reinterpret_cast<MovableReference*>(heapObject));
+ DCHECK(it != m_interiorFixups.end());
+ for (auto slot : entry.value)
+ *slot = reinterpret_cast<MovableReference>(it->value);
+ }
+ }
+
+#if DEBUG_HEAP_COMPACTION
+ void dumpDebugStats() {
+ LOG_HEAP_COMPACTION(
+ "Fixups: pages=%u objects=%u callbacks=%u interior-size=%zu"
+ " interiors-f=%u externals=%u\n",
+ m_relocatablePages.size(), m_fixups.size(), m_fixupCallbacks.size(),
+ m_interiors ? m_interiors->intervalCount() : 0, m_interiorFixups.size(),
+ m_externalRelocations.size());
+ }
+#endif
+
+ private:
+ MovableObjectFixups() {}
+
+ // Tracking movable and updatable references. For now, we keep a
+ // map which for each movable object, recording the slot that
+ // points to it. Upon moving the object, that slot needs to be
+ // updated.
+ //
+ // (TODO: consider in-place updating schemes.)
+ HashMap<MovableReference, MovableReference*> m_fixups;
+
+ // Map from movable reference to callbacks that need to be invoked
+ // when the object moves.
+ HashMap<MovableReference, std::pair<void*, MovingObjectCallback>>
+ m_fixupCallbacks;
+
+ // Slot => relocated slot/final location.
+ HashMap<MovableReference*, Address> m_interiorFixups;
+
+ // All pages that are being compacted.
+ HashSet<BasePage*> m_relocatablePages;
+
+ std::unique_ptr<SparseHeapBitmap> m_interiors;
+
+ // Each heap/arena may have additional slots pointing into it,
+ // which must be fixed up & relocated after compaction has happened.
+ //
+ // This is currently not needed for Blink, but functionality is kept
+ // around to be able to support this should the need arise..
haraken 2016/12/05 11:27:46 You can say it's needed in Opera. Otherwise, someo
sof 2016/12/05 19:30:06 It's not, no one uses it. I'm fine with letting it
haraken 2016/12/06 13:30:38 If it's unused, I'd prefer not landing the code un
sof 2016/12/06 21:39:34 ok, the normal procedure for Blink. Removed.
+ using ExternalRelocations =
+ HashMap<MovableReference, Vector<MovableReference*>>;
+
+ HashMap<NormalPageArena*, ExternalRelocations> m_externalRelocations;
+};
+
+#if DEBUG_HEAP_COMPACTION
+namespace {
+
+const char* gcReasonString(BlinkGC::GCReason reason) {
haraken 2016/12/05 11:27:46 We should share this function with ThreadState.cpp
sof 2016/12/05 19:30:05 Re-added, it was dropped there as a public method
+ switch (reason) {
+ case blink::BlinkGC::IdleGC:
+ return "IdleGC";
+ case BlinkGC::PreciseGC:
+ return "PreciseGC";
+ case BlinkGC::ConservativeGC:
+ return "ConservativeGC";
+ case BlinkGC::ForcedGC:
+ return "ForcedGC";
+ case BlinkGC::MemoryPressureGC:
+ return "MemoryPressureGC";
+ case BlinkGC::PageNavigationGC:
+ return "PageNavigationGC";
+ default:
+ NOTREACHED();
+ }
+ return "<Unknown>";
+}
+
+} // namespace
+#endif
+
+HeapCompact::HeapCompact()
+ : m_doCompact(false),
+ m_gcCountSinceLastCompaction(0),
+ m_threadCount(0),
+ m_freeListSize(0),
+ m_compactableArenas(0u),
+ m_freedPages(0),
+ m_freedSize(0)
+#if DEBUG_LOG_HEAP_COMPACTION_RUNNING_TIME
+ ,
+ m_startCompaction(0),
+ m_startCompactionTimeMS(0)
+#endif
+{
+}
+
+HeapCompact::~HeapCompact() {}
+
+HeapCompact::MovableObjectFixups& HeapCompact::fixups() {
+ if (!m_fixups)
+ m_fixups = MovableObjectFixups::create();
+ return *m_fixups;
+}
+
+// checkIfCompacting() is called when a GC is initiated
+// (by ThreadState::collectGarbage()), checking if there's sufficient
+// reason to do a compaction pass on completion of the GC (but before
+// lazy sweeping), and that this can be safely done (i.e., it is not a
+// conservative GC.)
+//
+// TODO(sof): reconsider what is an effective policy for when compaction
+// is required. Both in terms of frequency and freelist residency.
+BlinkGC::GCType HeapCompact::checkIfCompacting(ThreadState* state,
+ BlinkGC::GCType gcType,
+ BlinkGC::GCReason reason) {
+#if ENABLE_HEAP_COMPACTION
+ if (!RuntimeEnabledFeatures::heapCompactionEnabled())
+ return gcType;
+
+ m_doCompact = false;
+ LOG_HEAP_COMPACTION("check if compacting: gc=%s count=%zu free=%zu\n",
+ gcReasonString(reason), m_gcCountSinceLastCompaction,
+ m_freeListSize);
+ m_gcCountSinceLastCompaction++;
+ // It is only safe to compact during non-conservative GCs.
+ // TODO: for the main thread, limit this further to only idle GCs.
+ if (reason != BlinkGC::IdleGC && reason != BlinkGC::PreciseGC &&
+ reason != BlinkGC::ForcedGC)
+ return gcType;
+
+ const ThreadHeap& heap = state->heap();
+ // If any of the participating threads require a stack scan,
+ // do not compact.
+ //
+ // Why? Should the stack contain an iterator pointing into its
+ // associated backing store, its references wouldn't be
+ // correctly relocated.
+ for (ThreadState* state : heap.threads()) {
+ if (state->stackState() == BlinkGC::HeapPointersOnStack) {
+ return gcType;
+ }
+ }
+
+ updateHeapResidency(state);
+
+ // Compaction enable rules:
+ // - It's been a while since the last time.
+ // - "Considerable" amount of heap memory is bound up in freelist
+ // allocations. For now, use a fixed limit irrespective of heap
+ // size.
+ //
+ // As this isn't compacting all arenas, the cost of doing compaction
+ // isn't a worry as it will additionally only be done by idle GCs.
+ // TODO: add some form of compaction overhead estimate to the marking
+ // time estimate.
+ m_freedPages = 0;
+ m_freedSize = 0;
+
+#if STRESS_TEST_HEAP_COMPACTION
+ // Exercise the handling of object movement by compacting as
+ // often as possible.
+ m_doCompact = true;
+#else
+ m_doCompact =
+ s_forceCompactionGC ||
+ (m_gcCountSinceLastCompaction > kGCCountSinceLastCompactionThreshold &&
+ m_freeListSize > kFreeListSizeThreshold);
haraken 2016/12/05 11:27:46 Is it enough to look at only m_freeListSize? Even
sof 2016/12/05 19:30:06 It is possible to have a different & more discrimi
+#endif
+ if (m_doCompact) {
+ LOG_HEAP_COMPACTION("Compacting: free=%zu\n", m_freeListSize);
+ m_threadCount = heap.threads().size();
+ m_fixups.reset();
+ m_gcCountSinceLastCompaction = 0;
+ s_forceCompactionGC = false;
+ return BlinkGC::GCWithSweepCompaction;
+ }
+#endif // ENABLE_HEAP_COMPACTION
+ return gcType;
+}
+
+void HeapCompact::registerMovingObjectReference(MovableReference* slot) {
+ if (!m_doCompact)
+ return;
+
+ fixups().add(slot);
+}
+
+void HeapCompact::registerMovingObjectCallback(MovableReference reference,
+ MovingObjectCallback callback,
+ void* callbackData) {
+ if (!m_doCompact)
+ return;
+
+ fixups().addFixupCallback(reference, callback, callbackData);
+}
+
+void HeapCompact::registerRelocation(MovableReference* slot) {
+ if (!m_doCompact)
+ return;
+
+ if (!*slot)
haraken 2016/12/05 11:27:46 This should not happen, right?
sof 2016/12/05 19:30:06 No, we now insist - removed.
+ return;
+
+ fixups().addRelocation(slot);
+}
+
+void HeapCompact::updateHeapResidency(ThreadState* threadState) {
+ // The heap compaction implementation assumes the contiguous range,
+ //
+ // [Vector1ArenaIndex, HashTableArenaIndex]
+ //
+ // in a few spots. Use static asserts here to not have that assumption
+ // be silently invalidated by ArenaIndices changes.
+ static_assert(BlinkGC::Vector1ArenaIndex + 3 == BlinkGC::Vector4ArenaIndex,
+ "unexpected ArenaIndices ordering");
+ static_assert(
+ BlinkGC::Vector4ArenaIndex + 1 == BlinkGC::InlineVectorArenaIndex,
+ "unexpected ArenaIndices ordering");
+ static_assert(
+ BlinkGC::InlineVectorArenaIndex + 1 == BlinkGC::HashTableArenaIndex,
+ "unexpected ArenaIndices ordering");
+
+ size_t totalArenaSize = 0;
+ size_t freeArenaSize = 0;
haraken 2016/12/05 11:27:46 totalFreeListSize
sof 2016/12/05 19:30:06 Done, but there's arguably over-focus on naming he
+
+ m_compactableArenas = 0;
+#if DEBUG_HEAP_FREELIST
+ LOG_HEAP_FREELIST("Arena residencies: {");
+#endif
+ for (int i = BlinkGC::Vector1ArenaIndex; i <= BlinkGC::HashTableArenaIndex;
haraken 2016/12/05 11:27:46 I'd prefer encapsulating the details of arenas in
sof 2016/12/05 19:30:06 This loop & sampling was moved from ThreadState in
haraken 2016/12/06 13:30:38 In the first review, I just wanted to say that we
+ ++i) {
+ NormalPageArena* arena =
+ static_cast<NormalPageArena*>(threadState->arena(i));
+ size_t arenaSize = arena->arenaSize();
haraken 2016/12/05 11:27:46 It looks like arenaSize is unused except debug pri
sof 2016/12/05 19:30:06 ? Not so, it is added to totalArenaSize as well.
haraken 2016/12/06 13:30:38 But totalArenaSize is only for debug printing. You
+ size_t freeSize = arena->freeListSize();
haraken 2016/12/05 11:27:46 freeListSize
sof 2016/12/05 19:30:05 Alright.
+ totalArenaSize += arenaSize;
+ freeArenaSize += freeSize;
+ LOG_HEAP_FREELIST("%d: [%zu, %zu], ", i, arenaSize, freeSize);
+ // TODO: be more discriminating and consider arena
+ // load factor, effectiveness of past compactions etc.
+ if (arenaSize == 0)
+ continue;
+ // Mark the arena as compactable.
+ m_compactableArenas |= (0x1u << (BlinkGC::Vector1ArenaIndex + i));
+ }
+ LOG_HEAP_FREELIST("}\nTotal = %zu, Free = %zu\n", totalArenaSize,
+ freeArenaSize);
+
+ // TODO(sof): consider smoothing the reported sizes.
+ m_freeListSize = freeArenaSize;
haraken 2016/12/05 11:27:47 Instead of making m_freeListSize a member variable
sof 2016/12/05 19:30:05 I would prefer to keep it, see above TODO and rema
+}
+
+void HeapCompact::finishedArenaCompaction(NormalPageArena* arena,
+ size_t freedPages,
+ size_t freedSize) {
+ if (!m_doCompact)
+ return;
+
+ fixups().fixupExternalRelocations(arena);
+ m_freedPages += freedPages;
+ m_freedSize += freedSize;
+}
+
+void HeapCompact::movedObject(Address from, Address to) {
+ DCHECK(m_fixups);
+ m_fixups->relocate(from, to);
+}
+
+void HeapCompact::startThreadCompaction(ThreadState*) {
+ if (!m_doCompact)
+ return;
+#if DEBUG_LOG_HEAP_COMPACTION_RUNNING_TIME
+ if (!atomicTestAndSetToOne(&m_startCompaction))
+ m_startCompactionTimeMS = WTF::currentTimeMS();
+#endif
+}
+
+void HeapCompact::finishedThreadCompaction(ThreadState*) {
+ if (!m_doCompact)
+ return;
+
+ MutexLocker locker(m_mutex);
+ // Final one clears out.
+ if (!--m_threadCount) {
+#if DEBUG_HEAP_COMPACTION
+ if (m_fixups)
+ m_fixups->dumpDebugStats();
+#endif
+ m_fixups.reset();
+ m_doCompact = false;
+#if DEBUG_LOG_HEAP_COMPACTION_RUNNING_TIME
+ double end = WTF::currentTimeMS();
+ LOG_HEAP_COMPACTION_INTERNAL(
+ "Compaction stats: time=%gms, pages freed=%zu, size=%zu\n",
+ end - m_startCompactionTimeMS, m_freedPages, m_freedSize);
+ m_startCompaction = 0;
+ m_startCompactionTimeMS = 0;
+#else
+ LOG_HEAP_COMPACTION("Compaction stats: freed pages=%zu size=%zu\n",
+ m_freedPages, m_freedSize);
+#endif
+ // All compaction has completed, all participating threads may now
+ // proceed.
+ m_finished.broadcast();
+ } else {
+ // Letting a thread return to leave GC and become a "mutator" again
+ // runs the risk of it accessing heaps of other threads that are
+ // still being compacted. Consequently, all GC-participating threads
+ // must complete compaction together.
+ m_finished.wait(m_mutex);
+ }
+}
+
+void HeapCompact::addCompactablePage(BasePage* page) {
+ if (!m_doCompact)
+ return;
+ fixups().addCompactablePage(page);
+}
+
+bool HeapCompact::scheduleCompactionGCForTesting(bool value) {
+ bool current = s_forceCompactionGC;
+ s_forceCompactionGC = value;
+ return current;
+}
+
+} // namespace blink

Powered by Google App Engine
This is Rietveld 408576698