Chromium Code Reviews| Index: third_party/WebKit/Source/platform/heap/ThreadState.cpp |
| diff --git a/third_party/WebKit/Source/platform/heap/ThreadState.cpp b/third_party/WebKit/Source/platform/heap/ThreadState.cpp |
| index 9057cec1faf2a469839695d299c41d01ff2d33ec..097330f8d57ec9f4cabecfb0e36983e087b3180c 100644 |
| --- a/third_party/WebKit/Source/platform/heap/ThreadState.cpp |
| +++ b/third_party/WebKit/Source/platform/heap/ThreadState.cpp |
| @@ -38,6 +38,7 @@ |
| #include "platform/heap/CallbackStack.h" |
| #include "platform/heap/Handle.h" |
| #include "platform/heap/Heap.h" |
| +#include "platform/heap/HeapCompact.h" |
| #include "platform/heap/PagePool.h" |
| #include "platform/heap/SafePoint.h" |
| #include "platform/heap/Visitor.h" |
| @@ -1034,12 +1035,54 @@ void ThreadState::flushHeapDoesNotContainCacheIfNeeded() { |
| } |
| void ThreadState::makeConsistentForGC() { |
| + if (isMainThread()) { |
|
haraken
2016/12/02 12:43:20
Maybe can we move this code into checkIfCompacting
sof
2016/12/04 14:55:38
Moved the size sampling into the heap compaction c
|
| + // For the main thread, report heap + freelist residency to the |
| + // heap compactor. It uses the data to determine if compactions |
| + // is now worthwhile for one or more of the sub heaps/arenas it can |
| + // compact. |
| + size_t heapSize = 0; |
| + size_t freeSize = 0; |
|
haraken
2016/12/02 12:43:21
heapSize => totalArenaSize
freeSize => totalFreeLi
sof
2016/12/04 14:55:38
Done.
|
| + using Residency = std::pair<size_t, size_t>; |
| + Vector<Residency> residencies; |
| + NormalPageArena* arena; |
| + for (int i = BlinkGC::Vector1ArenaIndex; i <= BlinkGC::HashTableArenaIndex; |
| + ++i) { |
|
haraken
2016/12/02 12:43:20
Add an assertion somewhere to check that all backi
sof
2016/12/04 14:55:38
Added (in HeapCompact::updateHeapResidency())
|
| + arena = static_cast<NormalPageArena*>(m_arenas[i]); |
| + heapSize += arena->arenaSize(); |
| + freeSize += arena->freeListSize(); |
| + residencies.append(Residency(arena->arenaSize(), arena->freeListSize())); |
| + } |
| + heap().compaction()->setHeapResidency(heapSize, freeSize, residencies); |
| + } |
| ASSERT(isInGC()); |
| TRACE_EVENT0("blink_gc", "ThreadState::makeConsistentForGC"); |
| for (int i = 0; i < BlinkGC::NumberOfArenas; ++i) |
| m_arenas[i]->makeConsistentForGC(); |
| } |
| +void ThreadState::compact() { |
|
haraken
2016/12/02 12:43:20
Add a TODO that the heap compaction should probabl
sof
2016/12/04 14:55:38
checkIfCompacting() is who decides this; added a T
|
| + if (!heap().compaction()->isCompacting()) |
|
haraken
2016/12/02 12:43:20
When can this happen?
sof
2016/12/04 14:55:38
Most of the time? :) compact() is called unconditi
|
| + return; |
| + |
|
haraken
2016/12/02 12:43:20
Let's enter SweepForbiddenScope and ScriptForbidde
sof
2016/12/04 14:55:38
Makes good sense, like the other sweeping passes.
|
| + // Compaction is done eagerly and before the mutator threads get |
| + // to run again. Doing it lazily is problematic, as the mutator's |
| + // references to live objects could suddenly be invalidated by |
| + // compaction of a page/heap. We do know all the references to |
| + // the relocating objects just after marking, but won't later. |
| + // (e.g., stack references could have been created, new objects |
| + // created which refer to old collection objects, and so on.) |
| + // |
| + heap().compaction()->startCompacting(this); |
| + // TODO: implement bail out wrt any overall deadline, not |
| + // compacting heaps if the time budget has been exceeded. |
| + static_cast<NormalPageArena*>(m_arenas[BlinkGC::HashTableArenaIndex]) |
| + ->sweepCompact(); |
|
haraken
2016/12/02 12:43:20
Can we move BlinkGC::HashTableArenaIndex into the
sof
2016/12/04 14:55:38
Done.
|
| + for (int i = BlinkGC::Vector1ArenaIndex; i <= BlinkGC::InlineVectorArenaIndex; |
| + ++i) |
| + static_cast<NormalPageArena*>(m_arenas[i])->sweepCompact(); |
| + heap().compaction()->finishedCompacting(this); |
| +} |
| + |
| void ThreadState::makeConsistentForMutator() { |
| ASSERT(isInGC()); |
| for (int i = 0; i < BlinkGC::NumberOfArenas; ++i) |
| @@ -1123,9 +1166,12 @@ void ThreadState::preSweep() { |
| eagerSweep(); |
| + compact(); |
| + |
| #if defined(ADDRESS_SANITIZER) |
| poisonAllHeaps(); |
| #endif |
| + |
| if (previousGCState == EagerSweepScheduled) { |
| // Eager sweeping should happen only in testing. |
| completeSweep(); |
| @@ -1685,6 +1731,9 @@ void ThreadState::collectGarbage(BlinkGC::StackState stackState, |
| if (!parkThreadsScope.parkThreads()) |
| return; |
| + heap().compaction()->checkIfCompacting(&heap(), visitor.get(), gcType, |
| + reason); |
| + |
| ScriptForbiddenIfMainThreadScope scriptForbidden; |
| TRACE_EVENT2("blink_gc,devtools.timeline", "BlinkGCMarking", "lazySweeping", |