OLD | NEW |
---|---|
(Empty) | |
1 // Copyright 2016 Opera Software AS. All rights reserved. | |
2 // Use of this source code is governed by a BSD-style license that can be | |
3 // found in the LICENSE file. | |
4 | |
5 #include "platform/heap/HeapCompact.h" | |
6 | |
7 #include "platform/RuntimeEnabledFeatures.h" | |
8 #include "platform/heap/Heap.h" | |
9 #include "platform/heap/SparseHeapBitmap.h" | |
10 #include "wtf/CurrentTime.h" | |
11 #include "wtf/HashMap.h" | |
12 #include "wtf/HashSet.h" | |
13 #include "wtf/Vector.h" | |
14 | |
15 namespace blink { | |
16 | |
17 bool HeapCompact::s_forceCompactionGC = false; | |
18 | |
19 // The real worker behind heap compaction, recording references to movable | |
20 // objects ("slots".) When the objects end up being compacted and moved, | |
21 // relocate() will adjust the slots to point to the new location of the | |
22 // object along with handling fixups for interior pointers. | |
23 // | |
24 // The "fixups" object is created and maintained for the lifetime of one | |
25 // heap compaction-enhanced GC. | |
26 class HeapCompact::MovableObjectFixups final { | |
27 public: | |
28 static std::unique_ptr<MovableObjectFixups> create() { | |
29 return std::unique_ptr<MovableObjectFixups>(new MovableObjectFixups); | |
30 } | |
31 | |
32 ~MovableObjectFixups() {} | |
33 | |
34 void addCompactablePage(BasePage* p) { | |
35 // Add all pages belonging to arena to the set of relocatable pages. | |
36 m_relocatablePages.add(p); | |
37 } | |
38 | |
39 void add(MovableReference* slot) { | |
40 MovableReference reference = *slot; | |
41 BasePage* refPage = pageFromObject(reference); | |
42 // Nothing to compact on a large object's page. | |
43 if (refPage->isLargeObjectPage()) | |
44 return; | |
45 | |
46 #if DCHECK_IS_ON() | |
47 auto it = m_fixups.find(reference); | |
48 DCHECK(it == m_fixups.end() || it->value == slot); | |
49 #endif | |
50 Address slotAddress = reinterpret_cast<Address>(slot); | |
51 BasePage* slotPage = reinterpret_cast<BasePage*>( | |
52 blinkPageAddress(slotAddress) + blinkGuardPageSize); | |
53 if (m_relocatablePages.contains(slotPage)) { | |
haraken
2016/11/30 06:29:53
Does the interior pointer problem happen only in L
sof
2016/11/30 06:52:43
Interior pointers can happen in a number of places
haraken
2016/11/30 07:09:49
I think I'm failing at understanding the problem :
sof
2016/11/30 07:26:38
Consider HeapHashMap<int, HeapVector<Member<someth
haraken
2016/11/30 08:51:19
But no matter what order the backing store is trac
sof
2016/11/30 09:22:14
Yes, but where are the relocatable slots - have th
| |
54 // Slot resides on a compactable heap's page. | |
55 // => It is an interior slot (interior to some other backing store.) | |
56 // Record it as an interior slot, which entails: | |
57 // | |
58 // - Storing it in the interior map, which maps the slot to | |
59 // its (eventual) location. Initially nullptr. | |
60 // - Mark it as being interior pointer within the page's | |
61 // "interior" bitmap. This bitmap is used when moving a backing | |
62 // store, quickly/ier checking if interior slots will have to | |
63 // be redirected. | |
64 | |
65 // Large object pages aren't compactable by definition, so shouldn't | |
66 // encounter any here. | |
67 DCHECK(!slotPage->isLargeObjectPage()); | |
68 if (HeapCompact::isCompactableArena(slotPage->arena()->arenaIndex())) | |
69 addInteriorFixup(slotAddress, slot); | |
70 } | |
71 m_fixups.add(reference, slot); | |
72 } | |
73 | |
74 void addFixupCallback(MovableReference reference, | |
75 MovingObjectCallback callback, | |
76 void* callbackData) { | |
77 DCHECK(!m_fixupCallbacks.contains(reference)); | |
78 m_fixupCallbacks.add(reference, std::pair<void*, MovingObjectCallback>( | |
79 callbackData, callback)); | |
80 } | |
81 | |
82 size_t size() const { return m_fixups.size(); } | |
83 | |
84 void relocateInteriorFixups(Address from, Address to, size_t size) { | |
85 SparseHeapBitmap* range = m_interiors->hasRange(from, size); | |
86 if (LIKELY(!range)) | |
87 return; | |
88 | |
89 // Scan through the payload, looking for interior pointer slots | |
90 // to adjust. If the backing store of such an interior slot hasn't | |
91 // been moved already, update the slot -> real location mapping. | |
92 // When the backing store is eventually moved, it'll use that location. | |
93 // | |
94 for (size_t i = 0; i < size; i += sizeof(void*)) { | |
95 if (!range->isSet(from + i)) | |
96 continue; | |
97 MovableReference* fromRef = reinterpret_cast<MovableReference*>(from + i); | |
98 auto it = m_interiorFixups.find(fromRef); | |
99 if (it == m_interiorFixups.end()) | |
100 continue; | |
101 | |
102 // TODO: with the right sparse bitmap representation, it could be possible | |
103 // to quickly determine if we've now stepped past the last address | |
104 // that needed fixup in [address, address + size). Breaking out of this | |
105 // loop might be worth doing for hash table backing stores with a very | |
106 // low load factor. But interior fixups are rare. | |
107 | |
108 // If |slot|'s mapping is set, then the slot has been adjusted already. | |
109 if (it->value) | |
110 continue; | |
111 LOG_HEAP_COMPACTION("Range interior fixup: %p %p %p\n", from + i, | |
112 it->value, to + i); | |
113 Address fixup = to + i; | |
114 // Fill in the relocated location of the original slot at |from + i|; | |
115 // when the backing store corresponding to |from + i| is eventually | |
116 // moved/compacted, it'll update |to + i| with a pointer to the | |
117 // moved backing store. | |
118 m_interiorFixups.set(fromRef, fixup); | |
119 } | |
120 } | |
121 | |
122 void relocate(Address from, Address to) { | |
123 auto it = m_fixups.find(from); | |
124 DCHECK(it != m_fixups.end()); | |
125 MovableReference* slot = reinterpret_cast<MovableReference*>(it->value); | |
126 auto interior = m_interiorFixups.find(slot); | |
127 if (interior != m_interiorFixups.end()) { | |
128 MovableReference* slotLocation = | |
129 reinterpret_cast<MovableReference*>(interior->value); | |
130 if (!slotLocation) { | |
131 m_interiorFixups.set(slot, to); | |
132 } else { | |
133 LOG_HEAP_COMPACTION("Redirected slot: %p => %p\n", slot, slotLocation); | |
134 slot = slotLocation; | |
135 } | |
136 } | |
137 // If the slot has subsequently been updated, a prefinalizer or | |
138 // a destructor having mutated and expanded/shrunk the collection, | |
139 // do not update and relocate the slot -- |from| is no longer valid | |
140 // and referenced. | |
141 // | |
142 // The slot's contents may also have been cleared during weak processing; | |
143 // no work to be done in that case either. | |
144 if (UNLIKELY(*slot != from)) { | |
145 LOG_HEAP_COMPACTION( | |
146 "No relocation: slot = %p, *slot = %p, from = %p, to = %p\n", slot, | |
147 *slot, from, to); | |
148 return; | |
149 } | |
150 *slot = to; | |
151 | |
152 size_t size = 0; | |
153 auto callback = m_fixupCallbacks.find(from); | |
154 if (UNLIKELY(callback != m_fixupCallbacks.end())) { | |
155 size = HeapObjectHeader::fromPayload(to)->payloadSize(); | |
156 callback->value.second(callback->value.first, from, to, size); | |
157 } | |
158 | |
159 if (LIKELY(!m_interiors)) | |
160 return; | |
161 | |
162 if (!size) | |
163 size = HeapObjectHeader::fromPayload(to)->payloadSize(); | |
164 relocateInteriorFixups(from, to, size); | |
165 } | |
166 | |
167 void addInteriorFixup(Address interior, MovableReference* slot) { | |
168 auto it = m_interiorFixups.find(slot); | |
169 // Ephemeron fixpoint iterations may cause repeated | |
170 // registrations. | |
171 DCHECK(it == m_interiorFixups.end() || !it->value); | |
172 if (UNLIKELY(it != m_interiorFixups.end() && !it->value)) | |
173 return; | |
174 m_interiorFixups.add(slot, nullptr); | |
175 addInteriorMapping(interior); | |
176 } | |
177 | |
178 void addInteriorMapping(Address interior) { | |
179 LOG_HEAP_COMPACTION("Interior: %p\n", interior); | |
180 if (!m_interiors) { | |
181 m_interiors = SparseHeapBitmap::create(interior); | |
182 return; | |
183 } | |
184 m_interiors->add(interior); | |
185 } | |
186 | |
187 void addRelocation(MovableReference* slot) { | |
188 MovableReference reference = *slot; | |
189 if (!m_fixups.contains(reference)) { | |
190 // Record the interior pointer. | |
191 addInteriorFixup(reinterpret_cast<Address>(reference), slot); | |
192 } | |
193 | |
194 BasePage* heapPage = pageFromObject(reference); | |
195 DCHECK(heapPage); | |
196 DCHECK(!heapPage->isLargeObjectPage()); | |
197 // For now, the heap objects we're adding relocations for are assumed | |
198 // to be residing in a compactable heap. There's no reason why it must be | |
199 // so, just a sanity checking assert while phasing in this extra set of | |
200 // relocations. | |
201 DCHECK(m_relocatablePages.contains(heapPage)); | |
202 | |
203 NormalPage* normalPage = static_cast<NormalPage*>(heapPage); | |
204 auto perHeap = m_externalRelocations.find(normalPage->arenaForNormalPage()); | |
205 if (perHeap == m_externalRelocations.end()) { | |
206 Vector<MovableReference*> relocations; | |
207 relocations.append(slot); | |
208 ExternalRelocations table; | |
209 table.add(*slot, relocations); | |
210 m_externalRelocations.add(normalPage->arenaForNormalPage(), table); | |
211 return; | |
212 } | |
213 auto entry = perHeap->value.find(*slot); | |
214 if (entry == perHeap->value.end()) { | |
215 Vector<MovableReference*> relocations; | |
216 relocations.append(slot); | |
217 perHeap->value.add(*slot, relocations); | |
218 return; | |
219 } | |
220 entry->value.append(slot); | |
221 } | |
222 | |
223 void fixupExternalRelocations(NormalPageArena* arena) { | |
224 auto perHeap = m_externalRelocations.find(arena); | |
225 if (LIKELY(perHeap == m_externalRelocations.end())) | |
226 return; | |
227 for (const auto& entry : perHeap->value) { | |
228 MovableReference heapObject = entry.key; | |
229 // |heapObject| will either be in |m_fixups| or have been recorded as | |
230 // an internal fixup. | |
231 auto heapEntry = m_fixups.find(heapObject); | |
232 if (heapEntry != m_fixups.end()) { | |
233 for (auto slot : entry.value) | |
234 *slot = reinterpret_cast<MovableReference>(heapEntry->value); | |
235 continue; | |
236 } | |
237 // The movement of the containing object will have moved the | |
238 // interior slot. | |
239 auto it = m_interiorFixups.find( | |
240 reinterpret_cast<MovableReference*>(heapObject)); | |
241 DCHECK(it != m_interiorFixups.end()); | |
242 for (auto slot : entry.value) | |
243 *slot = reinterpret_cast<MovableReference>(it->value); | |
244 } | |
245 } | |
246 | |
247 #if DEBUG_HEAP_COMPACTION | |
248 void dumpDebugStats() { | |
249 LOG_HEAP_COMPACTION( | |
250 "Fixups: pages=%u objects=%u callbacks=%u interior-size=%zu" | |
251 " interiors-f=%u externals=%u\n", | |
252 m_relocatablePages.size(), m_fixups.size(), m_fixupCallbacks.size(), | |
253 m_interiors ? m_interiors->intervalCount() : 0, m_interiorFixups.size(), | |
254 m_externalRelocations.size()); | |
255 } | |
256 #endif | |
257 | |
258 private: | |
259 MovableObjectFixups() {} | |
260 | |
261 // Tracking movable and updatable references. For now, we keep a | |
262 // map which for each movable object, recording the slot that | |
263 // points to it. Upon moving the object, that slot needs to be | |
264 // updated. | |
265 // | |
266 // (TODO: consider in-place updating schemes.) | |
267 HashMap<MovableReference, MovableReference*> m_fixups; | |
268 | |
269 // Map from movable reference to callbacks that need to be invoked | |
270 // when the object moves. | |
271 HashMap<MovableReference, std::pair<void*, MovingObjectCallback>> | |
272 m_fixupCallbacks; | |
273 | |
274 // Slot => relocated slot/final location. | |
275 HashMap<MovableReference*, Address> m_interiorFixups; | |
276 | |
277 // All pages that are being compacted. | |
278 HashSet<BasePage*> m_relocatablePages; | |
279 | |
280 std::unique_ptr<SparseHeapBitmap> m_interiors; | |
281 | |
282 // Each heap/arena may have additional slots pointing into it, | |
283 // which must be fixed up & relocated after compaction has happened. | |
284 // | |
285 // This is currently not needed for Blink, but functionality is kept | |
286 // around to be able to support this should the need arise.. | |
287 using ExternalRelocations = | |
288 HashMap<MovableReference, Vector<MovableReference*>>; | |
289 | |
290 HashMap<NormalPageArena*, ExternalRelocations> m_externalRelocations; | |
291 }; | |
292 | |
293 #if DEBUG_HEAP_COMPACTION | |
294 namespace { | |
295 | |
296 const char* gcReasonString(BlinkGC::GCReason reason) { | |
297 switch (reason) { | |
298 case blink::BlinkGC::IdleGC: | |
299 return "IdleGC"; | |
300 case BlinkGC::PreciseGC: | |
301 return "PreciseGC"; | |
302 case BlinkGC::ConservativeGC: | |
303 return "ConservativeGC"; | |
304 case BlinkGC::ForcedGC: | |
305 return "ForcedGC"; | |
306 case BlinkGC::MemoryPressureGC: | |
307 return "MemoryPressureGC"; | |
308 case BlinkGC::PageNavigationGC: | |
309 return "PageNavigationGC"; | |
310 default: | |
311 NOTREACHED(); | |
312 } | |
313 return "<Unknown>"; | |
314 } | |
315 | |
316 } // namespace | |
317 #endif | |
318 | |
319 HeapCompact::HeapCompact() | |
320 : m_doCompact(false), | |
321 m_gcCountSinceLastCompaction(0), | |
322 m_threadCount(0), | |
323 m_freeListAllocations(0), | |
324 m_compactableHeaps(0u), | |
325 m_freedPages(0), | |
326 m_freedSize(0) | |
327 #if DEBUG_LOG_HEAP_COMPACTION_RUNNING_TIME | |
328 , | |
329 m_startCompaction(0), | |
330 m_startCompactionTimeMS(0) | |
331 #endif | |
332 { | |
333 } | |
334 | |
335 HeapCompact::~HeapCompact() {} | |
336 | |
337 HeapCompact::MovableObjectFixups& HeapCompact::fixups() { | |
338 if (!m_fixups) | |
339 m_fixups = MovableObjectFixups::create(); | |
340 return *m_fixups; | |
341 } | |
342 | |
343 // checkIfCompacting() is called when a GC is initiated | |
344 // (by ThreadState::collectGarbage()), checking if there's sufficient | |
345 // reason to do a compaction pass on completion of the GC (but before | |
346 // lazy sweeping), and that this can be safely done (i.e., it is not a | |
347 // conservative GC.) | |
348 // | |
349 // TODO(sof): reconsider what is an effective policy for when compaction | |
350 // is required. Both in terms of frequency and freelist residency. | |
351 void HeapCompact::checkIfCompacting(ThreadHeap* heap, | |
352 Visitor* visitor, | |
353 BlinkGC::GCType gcType, | |
354 BlinkGC::GCReason reason) { | |
355 #if ENABLE_HEAP_COMPACTION | |
356 if (!RuntimeEnabledFeatures::heapCompactionEnabled()) | |
357 return; | |
358 | |
359 m_doCompact = false; | |
360 LOG_HEAP_COMPACTION("check if compacting: gc=%s count=%zu free=%zu\n", | |
361 gcReasonString(reason), m_gcCountSinceLastCompaction, | |
362 m_freeListAllocations); | |
363 m_gcCountSinceLastCompaction++; | |
364 // It is only safe to compact during non-conservative GCs. | |
365 if (reason != BlinkGC::IdleGC && reason != BlinkGC::PreciseGC && | |
366 reason != BlinkGC::ForcedGC) | |
367 return; | |
368 | |
369 // If any of the participating threads require a stack scan, | |
370 // do not compact. | |
371 // | |
372 // Why? Should the stack contain an iterator pointing into its | |
373 // associated backing store, its references wouldn't be | |
374 // correctly relocated. | |
375 for (ThreadState* state : heap->threads()) { | |
376 if (state->stackState() == BlinkGC::HeapPointersOnStack) { | |
377 return; | |
378 } | |
379 } | |
380 | |
381 m_freedPages = 0; | |
382 m_freedSize = 0; | |
383 | |
384 // Compaction enable rules: | |
385 // - It's been a while since the last time. | |
386 // - "Considerable" amount of heap bound up in freelist allocations. | |
387 // For now, use a fixed limit irrespective of heap size. | |
388 // | |
389 // As this isn't compacting all heaps/arenas, the cost of doing compaction | |
390 // isn't a worry as it will additionally only be done by idle GCs. | |
391 // TODO: add some form of compaction overhead estimate to the marking | |
392 // time estimate. | |
393 | |
394 #if STRESS_TEST_HEAP_COMPACTION | |
395 // Exercise the handling of object movement by compacting as | |
396 // often as possible. | |
397 m_doCompact = true; | |
398 #else | |
399 m_doCompact = s_forceCompactionGC || | |
400 (m_gcCountSinceLastCompaction > kCompactIntervalThreshold && | |
401 m_freeListAllocations > kFreeThreshold); | |
402 #endif | |
403 if (m_doCompact) { | |
404 LOG_HEAP_COMPACTION("Compacting: free=%zu\n", m_freeListAllocations); | |
405 m_threadCount = heap->threads().size(); | |
406 visitor->setMarkCompactionMode(); | |
407 m_fixups.reset(); | |
408 m_gcCountSinceLastCompaction = 0; | |
409 s_forceCompactionGC = false; | |
410 } | |
411 #endif // ENABLE_HEAP_COMPACTION | |
412 } | |
413 | |
414 void HeapCompact::registerMovingObjectReference(MovableReference* slot) { | |
415 if (!m_doCompact) | |
416 return; | |
417 | |
418 fixups().add(slot); | |
419 } | |
420 | |
421 void HeapCompact::registerMovingObjectCallback(MovableReference reference, | |
422 MovingObjectCallback callback, | |
423 void* callbackData) { | |
424 if (!m_doCompact) | |
425 return; | |
426 | |
427 fixups().addFixupCallback(reference, callback, callbackData); | |
428 } | |
429 | |
430 void HeapCompact::registerRelocation(MovableReference* slot) { | |
431 if (!m_doCompact) | |
432 return; | |
433 | |
434 if (!*slot) | |
435 return; | |
436 | |
437 fixups().addRelocation(slot); | |
438 } | |
439 | |
440 void HeapCompact::setHeapResidency( | |
441 size_t liveSize, | |
442 size_t freeSize, | |
443 const Vector<std::pair<size_t, size_t>>& heapResidencies) { | |
444 #if DEBUG_HEAP_FREELIST | |
445 LOG_HEAP_FREELIST("Heap residencies: {"); | |
446 for (int i = 0; i < heapResidencies.size(); ++i) { | |
447 LOG_HEAP_FREELIST("%d: [%zu, %zu], ", i, heapResidencies[i].first, | |
448 heapResidencies[i].second); | |
449 } | |
450 LOG_HEAP_FREELIST("}\nFree + live size: %zu %zu\n", freeSize, liveSize); | |
451 #endif | |
452 // Mark the sub heaps that are viable compaction candidates; | |
453 // or, rather, not mark those that aren't. | |
454 size_t subHeapCount = | |
455 BlinkGC::HashTableArenaIndex - BlinkGC::Vector1ArenaIndex + 1; | |
456 DCHECK(heapResidencies.size() == subHeapCount); | |
457 m_compactableHeaps = 0; | |
458 for (size_t i = 0; i < subHeapCount; ++i) { | |
459 // TODO: be more discriminating and consider sub-heap | |
460 // load factor, effectiveness of past compactions etc. | |
461 if (heapResidencies[i].first == 0) { | |
462 if (m_doCompact) { | |
463 LOG_HEAP_COMPACTION("Not compacting heap: %zu\n", | |
464 BlinkGC::Vector1ArenaIndex + i); | |
465 } | |
466 continue; | |
467 } | |
468 m_compactableHeaps |= (0x1u << (BlinkGC::Vector1ArenaIndex + i)); | |
469 } | |
470 if (m_doCompact) { | |
471 // Reset the total freelist allocation if we're about to compact. | |
472 // TODO(sof): re-record the actual (but very low) freelist size | |
473 // after the compaction has completed. | |
474 m_freeListAllocations = 0; | |
475 return; | |
476 } | |
477 // TODO(sof): consider smoothing the reported sizes. | |
478 m_freeListAllocations = freeSize; | |
479 } | |
480 | |
481 void HeapCompact::finishedArenaCompaction(NormalPageArena* arena, | |
482 size_t freedPages, | |
483 size_t freedSize) { | |
484 if (!m_doCompact) | |
485 return; | |
486 | |
487 fixups().fixupExternalRelocations(arena); | |
488 m_freedPages += freedPages; | |
489 m_freedSize += freedSize; | |
490 } | |
491 | |
492 void HeapCompact::movedObject(Address from, Address to) { | |
493 DCHECK(m_fixups); | |
494 m_fixups->relocate(from, to); | |
495 } | |
496 | |
497 void HeapCompact::startCompacting(ThreadState*) { | |
498 #if DEBUG_LOG_HEAP_COMPACTION_RUNNING_TIME | |
499 if (!atomicTestAndSetToOne(&m_startCompaction)) | |
500 m_startCompactionTimeMS = WTF::currentTimeMS(); | |
501 #endif | |
502 } | |
503 | |
504 void HeapCompact::finishedCompacting(ThreadState*) { | |
505 if (!m_doCompact) | |
506 return; | |
507 | |
508 // Final one clears out. | |
509 if (!atomicDecrement(&m_threadCount)) { | |
510 #if DEBUG_HEAP_COMPACTION | |
511 if (m_fixups) | |
512 m_fixups->dumpDebugStats(); | |
513 #endif | |
514 m_fixups.reset(); | |
515 m_doCompact = false; | |
516 #if DEBUG_LOG_HEAP_COMPACTION_RUNNING_TIME | |
517 double end = WTF::currentTimeMS(); | |
518 LOG_HEAP_COMPACTION_INTERNAL( | |
519 "Compaction stats: time=%gms, pages=%zu, size=%zu\n", | |
520 end - m_startCompactionTimeMS, m_freedPages, m_freedSize); | |
521 m_startCompaction = 0; | |
522 m_startCompactionTimeMS = 0; | |
523 #else | |
524 LOG_HEAP_COMPACTION("Compaction stats: freed pages=%zu size=%zu\n", | |
525 m_freedPages, m_freedSize); | |
526 #endif | |
527 } | |
528 } | |
529 | |
530 void HeapCompact::addCompactablePage(BasePage* page) { | |
531 if (!m_doCompact) | |
532 return; | |
533 fixups().addCompactablePage(page); | |
534 } | |
535 | |
536 bool HeapCompact::scheduleCompactionGCForTesting(bool value) { | |
537 bool current = s_forceCompactionGC; | |
538 s_forceCompactionGC = value; | |
539 return current; | |
540 } | |
541 | |
542 } // namespace blink | |
OLD | NEW |