Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(102)

Side by Side Diff: third_party/WebKit/Source/platform/heap/HeapCompact.cpp

Issue 2531973002: Simple BlinkGC heap compaction. (Closed)
Patch Set: tweak assert Created 4 years ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
OLDNEW
(Empty)
1 // Copyright 2016 Opera Software AS. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4
5 #include "platform/heap/HeapCompact.h"
6
7 #include "platform/RuntimeEnabledFeatures.h"
8 #include "platform/heap/Heap.h"
9 #include "platform/heap/SparseHeapBitmap.h"
10 #include "wtf/CurrentTime.h"
11 #include "wtf/HashMap.h"
12 #include "wtf/HashSet.h"
13 #include "wtf/Vector.h"
14
15 namespace blink {
16
17 bool HeapCompact::s_forceCompactionGC = false;
18
19 // The real worker behind heap compaction, recording references to movable
20 // objects ("slots".) When the objects end up being compacted and moved,
21 // relocate() will adjust the slots to point to the new location of the
22 // object along with handling fixups for interior pointers.
23 //
24 // The "fixups" object is created and maintained for the lifetime of one
25 // heap compaction-enhanced GC.
26 class HeapCompact::MovableObjectFixups final {
27 public:
28 static std::unique_ptr<MovableObjectFixups> create() {
29 return WTF::wrapUnique(new MovableObjectFixups);
30 }
31
32 ~MovableObjectFixups() {}
33
34 // For the arenas being compacted, record all pages belonging to them.
35 // This is needed to handle 'interior slots', pointers that themselves
36 // can move (independently from the reference the slot points to.)
37 void addCompactingPage(BasePage* page) {
38 DCHECK(!page->isLargeObjectPage());
39 m_relocatablePages.add(page);
40 }
41
42 void addInteriorFixup(MovableReference* slot) {
43 auto it = m_interiorFixups.find(slot);
44 // Ephemeron fixpoint iterations may cause repeated registrations.
45 if (UNLIKELY(it != m_interiorFixups.end())) {
46 DCHECK(!it->value);
47 return;
48 }
49 m_interiorFixups.add(slot, nullptr);
50 LOG_HEAP_COMPACTION("Interior slot: %p\n", slot);
51 Address slotAddress = reinterpret_cast<Address>(slot);
52 if (!m_interiors) {
53 m_interiors = SparseHeapBitmap::create(slotAddress);
54 return;
55 }
56 m_interiors->add(slotAddress);
57 }
58
59 void add(MovableReference* slot) {
60 MovableReference reference = *slot;
61 BasePage* refPage = pageFromObject(reference);
62 // Nothing to compact on a large object's page.
63 if (refPage->isLargeObjectPage())
64 return;
65
66 #if DCHECK_IS_ON()
67 DCHECK(HeapCompact::isCompactableArena(refPage->arena()->arenaIndex()));
68 auto it = m_fixups.find(reference);
69 DCHECK(it == m_fixups.end() || it->value == slot);
70 #endif
71
72 // TODO: when updateHeapResidency() becomes more discriminating about
73 // leaving out arenas that aren't worth compacting, a check for
74 // isCompactingArena() would be appropriate here, leaving early if
75 // |refPage|'s arena isn't in the set.
76
77 m_fixups.add(reference, slot);
78
79 // Note: |slot| will reside outside the Oilpan heap if it is a
80 // PersistentHeapCollectionBase. Hence pageFromObject() cannot be
81 // used, as it sanity checks the |BasePage| it returns. Simply
82 // derive the raw BasePage address here and check if it is a member
83 // of the compactable and relocatable page address set.
84 Address slotAddress = reinterpret_cast<Address>(slot);
85 BasePage* slotPage = reinterpret_cast<BasePage*>(
86 blinkPageAddress(slotAddress) + blinkGuardPageSize);
87 if (LIKELY(!m_relocatablePages.contains(slotPage)))
88 return;
89 #if ENABLE(ASSERT)
90 slotPage->contains(slotAddress);
91 #endif
92 // Unlikely case, the slot resides on a compacting arena's page.
93 // => It is an 'interior slot' (interior to a movable backing store.)
94 // Record it as an interior slot, which entails:
95 //
96 // - Storing it in the interior map, which maps the slot to
97 // its (eventual) location. Initially nullptr.
98 // - Mark it as being interior pointer within the page's
99 // "interior" bitmap. This bitmap is used when moving a backing
100 // store, quickly/ier checking if interior slots will have to
101 // be additionally redirected.
102 addInteriorFixup(slot);
103 }
104
105 void addFixupCallback(MovableReference reference,
106 MovingObjectCallback callback,
107 void* callbackData) {
108 DCHECK(!m_fixupCallbacks.contains(reference));
109 m_fixupCallbacks.add(reference, std::pair<void*, MovingObjectCallback>(
110 callbackData, callback));
111 }
112
113 void relocateInteriorFixups(Address from, Address to, size_t size) {
114 SparseHeapBitmap* range = m_interiors->hasRange(from, size);
115 if (LIKELY(!range))
116 return;
117
118 // Scan through the payload, looking for interior pointer slots
119 // to adjust. If the backing store of such an interior slot hasn't
120 // been moved already, update the slot -> real location mapping.
121 // When the backing store is eventually moved, it'll use that location.
122 //
123 for (size_t offset = 0; offset < size; offset += sizeof(void*)) {
124 if (!range->isSet(from + offset))
125 continue;
126 MovableReference* slot =
127 reinterpret_cast<MovableReference*>(from + offset);
128 auto it = m_interiorFixups.find(slot);
129 if (it == m_interiorFixups.end())
130 continue;
131
132 // TODO: with the right sparse bitmap representation, it could be possible
133 // to quickly determine if we've now stepped past the last address
134 // that needed fixup in [address, address + size). Breaking out of this
135 // loop might be worth doing for hash table backing stores with a very
136 // low load factor. But interior fixups are rare.
137
138 // If |slot|'s mapping is set, then the slot has been adjusted already.
139 if (it->value)
140 continue;
141 Address fixup = to + offset;
142 LOG_HEAP_COMPACTION("Range interior fixup: %p %p %p\n", from + offset,
143 it->value, fixup);
144 // Fill in the relocated location of the original slot at |slot|.
145 // when the backing store corresponding to |slot| is eventually
146 // moved/compacted, it'll update |to + offset| with a pointer to the
147 // moved backing store.
148 m_interiorFixups.set(slot, fixup);
149 }
150 }
151
152 void relocate(Address from, Address to) {
153 auto it = m_fixups.find(from);
154 DCHECK(it != m_fixups.end());
155 #if DCHECK_IS_ON()
156 BasePage* fromPage = pageFromObject(from);
157 DCHECK(m_relocatablePages.contains(fromPage));
158 #endif
159 MovableReference* slot = reinterpret_cast<MovableReference*>(it->value);
160 auto interior = m_interiorFixups.find(slot);
161 if (interior != m_interiorFixups.end()) {
162 MovableReference* slotLocation =
163 reinterpret_cast<MovableReference*>(interior->value);
164 if (!slotLocation) {
165 m_interiorFixups.set(slot, to);
166 } else {
167 LOG_HEAP_COMPACTION("Redirected slot: %p => %p\n", slot, slotLocation);
168 slot = slotLocation;
169 }
170 }
171 // If the slot has subsequently been updated, a prefinalizer or
172 // a destructor having mutated and expanded/shrunk the collection,
173 // do not update and relocate the slot -- |from| is no longer valid
174 // and referenced.
175 //
176 // The slot's contents may also have been cleared during weak processing;
177 // no work to be done in that case either.
178 if (UNLIKELY(*slot != from)) {
179 LOG_HEAP_COMPACTION(
180 "No relocation: slot = %p, *slot = %p, from = %p, to = %p\n", slot,
181 *slot, from, to);
182 #if DCHECK_IS_ON()
183 // Verify that the already updated slot is valid, meaning:
184 // - has been cleared.
185 // - has been updated & expanded with a large object backing store.
186 // - has been updated with a larger, freshly allocated backing store.
187 // (on a fresh page in a compactable arena that is not being
188 // compacted.)
189 if (!*slot)
190 return;
191 BasePage* slotPage = pageFromObject(*slot);
192 DCHECK(
193 slotPage->isLargeObjectPage() ||
194 (HeapCompact::isCompactableArena(slotPage->arena()->arenaIndex()) &&
195 !m_relocatablePages.contains(slotPage)));
196 #endif
197 return;
198 }
199 *slot = to;
200
201 size_t size = 0;
202 auto callback = m_fixupCallbacks.find(from);
203 if (UNLIKELY(callback != m_fixupCallbacks.end())) {
204 size = HeapObjectHeader::fromPayload(to)->payloadSize();
205 callback->value.second(callback->value.first, from, to, size);
206 }
207
208 if (!m_interiors)
209 return;
210
211 if (!size)
212 size = HeapObjectHeader::fromPayload(to)->payloadSize();
213 relocateInteriorFixups(from, to, size);
214 }
215
216 #if DEBUG_HEAP_COMPACTION
217 void dumpDebugStats() {
218 LOG_HEAP_COMPACTION(
219 "Fixups: pages=%u objects=%u callbacks=%u interior-size=%zu"
220 " interiors-f=%u\n",
221 m_relocatablePages.size(), m_fixups.size(), m_fixupCallbacks.size(),
222 m_interiors ? m_interiors->intervalCount() : 0,
223 m_interiorFixups.size());
224 }
225 #endif
226
227 private:
228 MovableObjectFixups() {}
229
230 // Tracking movable and updatable references. For now, we keep a
231 // map which for each movable object, recording the slot that
232 // points to it. Upon moving the object, that slot needs to be
233 // updated.
234 //
235 // (TODO: consider in-place updating schemes.)
236 HashMap<MovableReference, MovableReference*> m_fixups;
237
238 // Map from movable reference to callbacks that need to be invoked
239 // when the object moves.
240 HashMap<MovableReference, std::pair<void*, MovingObjectCallback>>
241 m_fixupCallbacks;
242
243 // Slot => relocated slot/final location.
244 HashMap<MovableReference*, Address> m_interiorFixups;
245
246 // All pages that are being compacted.
247 HashSet<BasePage*> m_relocatablePages;
248
249 std::unique_ptr<SparseHeapBitmap> m_interiors;
250 };
251
252 HeapCompact::HeapCompact()
253 : m_doCompact(false),
254 m_gcCountSinceLastCompaction(0),
255 m_threadCount(0),
256 m_freeListSize(0),
257 m_compactableArenas(0u),
258 m_freedPages(0),
259 m_freedSize(0)
260 #if DEBUG_LOG_HEAP_COMPACTION_RUNNING_TIME
261 ,
262 m_startCompactionTimeMS(0)
263 #endif
264 {
265 }
266
267 HeapCompact::~HeapCompact() {}
268
269 HeapCompact::MovableObjectFixups& HeapCompact::fixups() {
270 if (!m_fixups)
271 m_fixups = MovableObjectFixups::create();
272 return *m_fixups;
273 }
274
275 bool HeapCompact::shouldCompact(ThreadState* state,
276 BlinkGC::GCType gcType,
277 BlinkGC::GCReason reason) {
278 #if !ENABLE_HEAP_COMPACTION
279 return false;
280 #else
281 if (!RuntimeEnabledFeatures::heapCompactionEnabled())
282 return false;
283
284 LOG_HEAP_COMPACTION("shouldCompact(): gc=%s count=%zu free=%zu\n",
285 ThreadState::gcReasonString(reason),
286 m_gcCountSinceLastCompaction, m_freeListSize);
287 m_gcCountSinceLastCompaction++;
288 // It is only safe to compact during non-conservative GCs.
289 // TODO: for the main thread, limit this further to only idle GCs.
290 if (reason != BlinkGC::IdleGC && reason != BlinkGC::PreciseGC &&
291 reason != BlinkGC::ForcedGC)
292 return false;
293
294 const ThreadHeap& heap = state->heap();
295 // If any of the participating threads require a stack scan,
296 // do not compact.
297 //
298 // Why? Should the stack contain an iterator pointing into its
299 // associated backing store, its references wouldn't be
300 // correctly relocated.
301 for (ThreadState* state : heap.threads()) {
302 if (state->stackState() == BlinkGC::HeapPointersOnStack) {
303 return false;
304 }
305 }
306
307 // Compaction enable rules:
308 // - It's been a while since the last time.
309 // - "Considerable" amount of heap memory is bound up in freelist
310 // allocations. For now, use a fixed limit irrespective of heap
311 // size.
312 //
313 // As this isn't compacting all arenas, the cost of doing compaction
314 // isn't a worry as it will additionally only be done by idle GCs.
315 // TODO: add some form of compaction overhead estimate to the marking
316 // time estimate.
317
318 updateHeapResidency(state);
319
320 #if STRESS_TEST_HEAP_COMPACTION
321 // Exercise the handling of object movement by compacting as
322 // often as possible.
323 return true;
324 #else
325 return s_forceCompactionGC ||
326 (m_gcCountSinceLastCompaction > kGCCountSinceLastCompactionThreshold &&
327 m_freeListSize > kFreeListSizeThreshold);
328 #endif
329 #endif
330 }
331
332 BlinkGC::GCType HeapCompact::initialize(ThreadState* state) {
333 DCHECK(RuntimeEnabledFeatures::heapCompactionEnabled());
334 LOG_HEAP_COMPACTION("Compacting: free=%zu\n", m_freeListSize);
335 m_doCompact = true;
336 m_freedPages = 0;
337 m_freedSize = 0;
338 m_threadCount = state->heap().threads().size();
339 m_fixups.reset();
340 m_gcCountSinceLastCompaction = 0;
341 s_forceCompactionGC = false;
342 return BlinkGC::GCWithSweepCompaction;
343 }
344
345 void HeapCompact::registerMovingObjectReference(MovableReference* slot) {
346 if (!m_doCompact)
347 return;
348
349 fixups().add(slot);
350 }
351
352 void HeapCompact::registerMovingObjectCallback(MovableReference reference,
353 MovingObjectCallback callback,
354 void* callbackData) {
355 if (!m_doCompact)
356 return;
357
358 fixups().addFixupCallback(reference, callback, callbackData);
359 }
360
361 void HeapCompact::updateHeapResidency(ThreadState* threadState) {
362 // The heap compaction implementation assumes the contiguous range,
363 //
364 // [Vector1ArenaIndex, HashTableArenaIndex]
365 //
366 // in a few spots. Use static asserts here to not have that assumption
367 // be silently invalidated by ArenaIndices changes.
368 static_assert(BlinkGC::Vector1ArenaIndex + 3 == BlinkGC::Vector4ArenaIndex,
369 "unexpected ArenaIndices ordering");
370 static_assert(
371 BlinkGC::Vector4ArenaIndex + 1 == BlinkGC::InlineVectorArenaIndex,
372 "unexpected ArenaIndices ordering");
373 static_assert(
374 BlinkGC::InlineVectorArenaIndex + 1 == BlinkGC::HashTableArenaIndex,
375 "unexpected ArenaIndices ordering");
376
377 size_t totalArenaSize = 0;
378 size_t totalFreeListSize = 0;
379
380 m_compactableArenas = 0;
381 #if DEBUG_HEAP_FREELIST
382 LOG_HEAP_FREELIST("Arena residencies: {");
383 #endif
384 for (int i = BlinkGC::Vector1ArenaIndex; i <= BlinkGC::HashTableArenaIndex;
385 ++i) {
386 NormalPageArena* arena =
387 static_cast<NormalPageArena*>(threadState->arena(i));
388 size_t arenaSize = arena->arenaSize();
389 size_t freeListSize = arena->freeListSize();
390 totalArenaSize += arenaSize;
391 totalFreeListSize += freeListSize;
392 LOG_HEAP_FREELIST("%d: [%zu, %zu], ", i, arenaSize, freeListSize);
393 // TODO: be more discriminating and consider arena
394 // load factor, effectiveness of past compactions etc.
395 if (!arenaSize)
396 continue;
397 // Mark the arena as compactable.
398 m_compactableArenas |= (0x1u << (BlinkGC::Vector1ArenaIndex + i));
399 }
400 LOG_HEAP_FREELIST("}\nTotal = %zu, Free = %zu\n", totalArenaSize,
401 totalFreeListSize);
402
403 // TODO(sof): consider smoothing the reported sizes.
404 m_freeListSize = totalFreeListSize;
405 }
406
407 void HeapCompact::finishedArenaCompaction(NormalPageArena* arena,
408 size_t freedPages,
409 size_t freedSize) {
410 if (!m_doCompact)
411 return;
412
413 m_freedPages += freedPages;
414 m_freedSize += freedSize;
415 }
416
417 void HeapCompact::relocate(Address from, Address to) {
418 DCHECK(m_fixups);
419 m_fixups->relocate(from, to);
420 }
421
422 void HeapCompact::startThreadCompaction() {
423 if (!m_doCompact)
424 return;
425 #if DEBUG_LOG_HEAP_COMPACTION_RUNNING_TIME
426 MutexLocker locker(m_mutex);
427 if (!m_startCompactionTimeMS)
428 m_startCompactionTimeMS = WTF::currentTimeMS();
429 #endif
430 }
431
432 void HeapCompact::finishThreadCompaction() {
433 if (!m_doCompact)
434 return;
435
436 MutexLocker locker(m_mutex);
437 // Final one clears out.
438 if (!--m_threadCount) {
439 #if DEBUG_HEAP_COMPACTION
440 if (m_fixups)
441 m_fixups->dumpDebugStats();
442 #endif
443 m_fixups.reset();
444 m_doCompact = false;
445 #if DEBUG_LOG_HEAP_COMPACTION_RUNNING_TIME
446 double end = WTF::currentTimeMS();
447 LOG_HEAP_COMPACTION_INTERNAL(
448 "Compaction stats: time=%gms, pages freed=%zu, size=%zu\n",
449 end - m_startCompactionTimeMS, m_freedPages, m_freedSize);
450 m_startCompactionTimeMS = 0;
451 #else
452 LOG_HEAP_COMPACTION("Compaction stats: freed pages=%zu size=%zu\n",
453 m_freedPages, m_freedSize);
454 #endif
455 // All compaction has completed, all participating threads may now
456 // proceed.
457 m_finished.broadcast();
458 } else {
459 // Letting a thread return to leave GC and become a "mutator" again
460 // runs the risk of it accessing heaps of other threads that are
461 // still being compacted. Consequently, all GC-participating threads
462 // must complete compaction together.
463 m_finished.wait(m_mutex);
464 }
465 }
466
467 void HeapCompact::addCompactingPage(BasePage* page) {
468 DCHECK(m_doCompact);
469 DCHECK(isCompactingArena(page->arena()->arenaIndex()));
470 fixups().addCompactingPage(page);
471 }
472
473 bool HeapCompact::scheduleCompactionGCForTesting(bool value) {
474 bool current = s_forceCompactionGC;
475 s_forceCompactionGC = value;
476 return current;
477 }
478
479 } // namespace blink
OLDNEW

Powered by Google App Engine
This is Rietveld 408576698