Chromium Code Reviews| OLD | NEW |
|---|---|
| (Empty) | |
| 1 // Copyright 2016 Opera Software AS. All rights reserved. | |
| 2 // Use of this source code is governed by a BSD-style license that can be | |
| 3 // found in the LICENSE file. | |
| 4 | |
| 5 #include "platform/heap/HeapCompact.h" | |
| 6 | |
| 7 #include "platform/RuntimeEnabledFeatures.h" | |
| 8 #include "platform/heap/Heap.h" | |
| 9 #include "platform/heap/SparseHeapBitmap.h" | |
| 10 #include "wtf/CurrentTime.h" | |
| 11 #include "wtf/HashMap.h" | |
| 12 #include "wtf/HashSet.h" | |
| 13 #include "wtf/Vector.h" | |
| 14 | |
| 15 namespace blink { | |
| 16 | |
| 17 bool HeapCompact::s_forceCompactionGC = false; | |
| 18 | |
| 19 // The real worker behind heap compaction, recording references to movable | |
| 20 // objects ("slots".) When the objects end up being compacted and moved, | |
| 21 // relocate() will adjust the slots to point to the new location of the | |
| 22 // object along with handling fixups for interior pointers. | |
| 23 // | |
| 24 // The "fixups" object is created and maintained for the lifetime of one | |
| 25 // heap compaction-enhanced GC. | |
| 26 class HeapCompact::MovableObjectFixups final { | |
| 27 public: | |
| 28 static std::unique_ptr<MovableObjectFixups> create() { | |
| 29 return std::unique_ptr<MovableObjectFixups>(new MovableObjectFixups); | |
|
haraken
2016/12/05 11:27:46
wrapUnique
sof
2016/12/05 19:30:06
Done.
| |
| 30 } | |
| 31 | |
| 32 ~MovableObjectFixups() {} | |
| 33 | |
| 34 void addCompactablePage(BasePage* p) { | |
| 35 // Add all pages belonging to arena to the set of relocatable pages. | |
| 36 m_relocatablePages.add(p); | |
| 37 } | |
| 38 | |
| 39 void add(MovableReference* slot) { | |
| 40 MovableReference reference = *slot; | |
| 41 BasePage* refPage = pageFromObject(reference); | |
| 42 // Nothing to compact on a large object's page. | |
| 43 if (refPage->isLargeObjectPage()) | |
| 44 return; | |
| 45 | |
| 46 #if DCHECK_IS_ON() | |
| 47 auto it = m_fixups.find(reference); | |
| 48 DCHECK(it == m_fixups.end() || it->value == slot); | |
| 49 #endif | |
| 50 Address slotAddress = reinterpret_cast<Address>(slot); | |
| 51 BasePage* slotPage = reinterpret_cast<BasePage*>( | |
| 52 blinkPageAddress(slotAddress) + blinkGuardPageSize); | |
| 53 if (m_relocatablePages.contains(slotPage)) { | |
| 54 // Slot resides on a compactable heap's page. | |
|
haraken
2016/12/05 11:27:46
Add DCHECK(slotPage->contains(slotAddress)).
sof
2016/12/05 19:30:05
Done.
| |
| 55 // => It is an interior slot (interior to some other backing store.) | |
| 56 // Record it as an interior slot, which entails: | |
| 57 // | |
| 58 // - Storing it in the interior map, which maps the slot to | |
| 59 // its (eventual) location. Initially nullptr. | |
| 60 // - Mark it as being interior pointer within the page's | |
| 61 // "interior" bitmap. This bitmap is used when moving a backing | |
| 62 // store, quickly/ier checking if interior slots will have to | |
| 63 // be redirected. | |
| 64 | |
| 65 // Large object pages aren't compactable by definition, so shouldn't | |
| 66 // encounter any here. | |
| 67 DCHECK(!slotPage->isLargeObjectPage()); | |
| 68 if (HeapCompact::isCompactableArena(slotPage->arena()->arenaIndex())) | |
|
haraken
2016/12/05 11:27:46
When can this return false? m_relocatablePages con
sof
2016/12/05 19:30:06
addRelocatablePage() doesn't currently take m_comp
| |
| 69 addInteriorFixup(slotAddress, slot); | |
| 70 } | |
| 71 m_fixups.add(reference, slot); | |
| 72 } | |
| 73 | |
| 74 void addFixupCallback(MovableReference reference, | |
|
haraken
2016/12/05 11:27:47
I think you're assuming that addFixupCallback() is
sof
2016/12/05 19:30:06
Not readily as that map is over slots, this is a r
haraken
2016/12/06 13:30:38
Hmm, what do you mean? DCHECK(m_fixups.contains(re
sof
2016/12/06 21:39:34
Sorry, thought of wrong map. It doesn't hold in ge
| |
| 75 MovingObjectCallback callback, | |
| 76 void* callbackData) { | |
| 77 DCHECK(!m_fixupCallbacks.contains(reference)); | |
| 78 m_fixupCallbacks.add(reference, std::pair<void*, MovingObjectCallback>( | |
| 79 callbackData, callback)); | |
| 80 } | |
| 81 | |
| 82 size_t size() const { return m_fixups.size(); } | |
|
haraken
2016/12/05 11:27:46
size() => numberOfFixups()
sof
2016/12/05 19:30:05
A generic name like that makes it harder to see th
| |
| 83 | |
| 84 void relocateInteriorFixups(Address from, Address to, size_t size) { | |
|
haraken
2016/12/05 11:27:47
size => payloadSize
sof
2016/12/05 19:30:05
"size" is accurate as-is; we're not using "payload
| |
| 85 SparseHeapBitmap* range = m_interiors->hasRange(from, size); | |
| 86 if (LIKELY(!range)) | |
| 87 return; | |
| 88 | |
| 89 // Scan through the payload, looking for interior pointer slots | |
| 90 // to adjust. If the backing store of such an interior slot hasn't | |
| 91 // been moved already, update the slot -> real location mapping. | |
| 92 // When the backing store is eventually moved, it'll use that location. | |
| 93 // | |
| 94 for (size_t i = 0; i < size; i += sizeof(void*)) { | |
|
haraken
2016/12/05 11:27:47
i => offset
sof
2016/12/05 19:30:05
Renamed.
| |
| 95 if (!range->isSet(from + i)) | |
| 96 continue; | |
| 97 MovableReference* fromRef = reinterpret_cast<MovableReference*>(from + i); | |
|
haraken
2016/12/05 11:27:47
fromRef => slot
This is not a reference.
sof
2016/12/05 19:30:06
Done.
| |
| 98 auto it = m_interiorFixups.find(fromRef); | |
| 99 if (it == m_interiorFixups.end()) | |
| 100 continue; | |
| 101 | |
| 102 // TODO: with the right sparse bitmap representation, it could be possible | |
| 103 // to quickly determine if we've now stepped past the last address | |
| 104 // that needed fixup in [address, address + size). Breaking out of this | |
| 105 // loop might be worth doing for hash table backing stores with a very | |
| 106 // low load factor. But interior fixups are rare. | |
| 107 | |
| 108 // If |slot|'s mapping is set, then the slot has been adjusted already. | |
| 109 if (it->value) | |
| 110 continue; | |
|
haraken
2016/12/05 11:27:46
Can we check that it->value is equal to |to+i|?
sof
2016/12/05 19:30:06
No, it->value would contain the (relocated) refere
| |
| 111 LOG_HEAP_COMPACTION("Range interior fixup: %p %p %p\n", from + i, | |
| 112 it->value, to + i); | |
| 113 Address fixup = to + i; | |
| 114 // Fill in the relocated location of the original slot at |from + i|; | |
| 115 // when the backing store corresponding to |from + i| is eventually | |
| 116 // moved/compacted, it'll update |to + i| with a pointer to the | |
| 117 // moved backing store. | |
| 118 m_interiorFixups.set(fromRef, fixup); | |
| 119 } | |
| 120 } | |
| 121 | |
| 122 void relocate(Address from, Address to) { | |
| 123 auto it = m_fixups.find(from); | |
| 124 DCHECK(it != m_fixups.end()); | |
| 125 MovableReference* slot = reinterpret_cast<MovableReference*>(it->value); | |
| 126 auto interior = m_interiorFixups.find(slot); | |
| 127 if (interior != m_interiorFixups.end()) { | |
| 128 MovableReference* slotLocation = | |
| 129 reinterpret_cast<MovableReference*>(interior->value); | |
| 130 if (!slotLocation) { | |
| 131 m_interiorFixups.set(slot, to); | |
|
haraken
2016/12/05 11:27:46
Why do we need to update m_interiorFixups here? I
sof
2016/12/05 19:30:05
No, we don't know which is processed first -- the
| |
| 132 } else { | |
| 133 LOG_HEAP_COMPACTION("Redirected slot: %p => %p\n", slot, slotLocation); | |
|
haraken
2016/12/05 11:27:46
Can we add DCHECK(slotLocation == to)?
In the fir
sof
2016/12/05 19:30:05
That wouldn't be appropriate; if m_interiorFixups
| |
| 134 slot = slotLocation; | |
| 135 } | |
| 136 } | |
| 137 // If the slot has subsequently been updated, a prefinalizer or | |
| 138 // a destructor having mutated and expanded/shrunk the collection, | |
| 139 // do not update and relocate the slot -- |from| is no longer valid | |
| 140 // and referenced. | |
|
haraken
2016/12/05 11:27:46
Specifically, how is it possible that destructor o
sof
2016/12/05 19:30:06
The slot contains the pointer to the backing store
| |
| 141 // | |
| 142 // The slot's contents may also have been cleared during weak processing; | |
| 143 // no work to be done in that case either. | |
| 144 if (UNLIKELY(*slot != from)) { | |
|
haraken
2016/12/05 11:27:46
Does '*slot != from' happen only when the *slot is
sof
2016/12/05 19:30:05
The first half of the comment block explains why t
| |
| 145 LOG_HEAP_COMPACTION( | |
| 146 "No relocation: slot = %p, *slot = %p, from = %p, to = %p\n", slot, | |
| 147 *slot, from, to); | |
| 148 return; | |
| 149 } | |
| 150 *slot = to; | |
| 151 | |
| 152 size_t size = 0; | |
|
haraken
2016/12/05 11:27:46
size => payloadSize
sof
2016/12/05 19:30:06
"size" is fine as-is.
| |
| 153 auto callback = m_fixupCallbacks.find(from); | |
| 154 if (UNLIKELY(callback != m_fixupCallbacks.end())) { | |
| 155 size = HeapObjectHeader::fromPayload(to)->payloadSize(); | |
| 156 callback->value.second(callback->value.first, from, to, size); | |
| 157 } | |
| 158 | |
| 159 if (LIKELY(!m_interiors)) | |
| 160 return; | |
| 161 | |
| 162 if (!size) | |
| 163 size = HeapObjectHeader::fromPayload(to)->payloadSize(); | |
| 164 relocateInteriorFixups(from, to, size); | |
| 165 } | |
| 166 | |
| 167 void addInteriorFixup(Address interior, MovableReference* slot) { | |
| 168 auto it = m_interiorFixups.find(slot); | |
| 169 // Ephemeron fixpoint iterations may cause repeated | |
| 170 // registrations. | |
| 171 DCHECK(it == m_interiorFixups.end() || !it->value); | |
| 172 if (UNLIKELY(it != m_interiorFixups.end() && !it->value)) | |
|
haraken
2016/12/05 11:27:46
Would you help me understand this branch? I don't
sof
2016/12/05 19:30:06
nullptr is what we initially add (see line just be
haraken
2016/12/06 13:30:38
So what we really want to do is:
... // Drop the
sof
2016/12/06 21:39:34
No, ephemeron fix pointing is quite valid.
haraken
2016/12/07 08:55:11
Ephemeron fix pointing is valid, but how is it pos
sof
2016/12/07 10:45:08
Tuned the assert logic.
| |
| 173 return; | |
| 174 m_interiorFixups.add(slot, nullptr); | |
| 175 addInteriorMapping(interior); | |
|
haraken
2016/12/05 11:27:46
You can inline the method.
sof
2016/12/05 19:30:06
Done.
| |
| 176 } | |
| 177 | |
| 178 void addInteriorMapping(Address interior) { | |
| 179 LOG_HEAP_COMPACTION("Interior: %p\n", interior); | |
| 180 if (!m_interiors) { | |
| 181 m_interiors = SparseHeapBitmap::create(interior); | |
| 182 return; | |
| 183 } | |
| 184 m_interiors->add(interior); | |
| 185 } | |
| 186 | |
| 187 void addRelocation(MovableReference* slot) { | |
|
haraken
2016/12/05 11:27:46
I'm wondering why we need to have so different cod
sof
2016/12/05 19:30:05
Your assumption is correct, backing stores are "li
| |
| 188 MovableReference reference = *slot; | |
| 189 if (!m_fixups.contains(reference)) { | |
| 190 // Record the interior pointer. | |
| 191 addInteriorFixup(reinterpret_cast<Address>(reference), slot); | |
| 192 } | |
| 193 | |
| 194 BasePage* heapPage = pageFromObject(reference); | |
| 195 DCHECK(heapPage); | |
| 196 DCHECK(!heapPage->isLargeObjectPage()); | |
| 197 // For now, the heap objects we're adding relocations for are assumed | |
| 198 // to be residing in a compactable heap. There's no reason why it must be | |
| 199 // so, just a sanity checking assert while phasing in this extra set of | |
| 200 // relocations. | |
| 201 DCHECK(m_relocatablePages.contains(heapPage)); | |
| 202 | |
| 203 NormalPage* normalPage = static_cast<NormalPage*>(heapPage); | |
| 204 auto perHeap = m_externalRelocations.find(normalPage->arenaForNormalPage()); | |
| 205 if (perHeap == m_externalRelocations.end()) { | |
| 206 Vector<MovableReference*> relocations; | |
| 207 relocations.append(slot); | |
| 208 ExternalRelocations table; | |
| 209 table.add(*slot, relocations); | |
| 210 m_externalRelocations.add(normalPage->arenaForNormalPage(), table); | |
| 211 return; | |
| 212 } | |
| 213 auto entry = perHeap->value.find(*slot); | |
| 214 if (entry == perHeap->value.end()) { | |
| 215 Vector<MovableReference*> relocations; | |
| 216 relocations.append(slot); | |
| 217 perHeap->value.add(*slot, relocations); | |
| 218 return; | |
| 219 } | |
| 220 entry->value.append(slot); | |
| 221 } | |
| 222 | |
| 223 void fixupExternalRelocations(NormalPageArena* arena) { | |
| 224 auto perHeap = m_externalRelocations.find(arena); | |
| 225 if (LIKELY(perHeap == m_externalRelocations.end())) | |
| 226 return; | |
| 227 for (const auto& entry : perHeap->value) { | |
| 228 MovableReference heapObject = entry.key; | |
| 229 // |heapObject| will either be in |m_fixups| or have been recorded as | |
| 230 // an internal fixup. | |
| 231 auto heapEntry = m_fixups.find(heapObject); | |
| 232 if (heapEntry != m_fixups.end()) { | |
| 233 for (auto slot : entry.value) | |
| 234 *slot = reinterpret_cast<MovableReference>(heapEntry->value); | |
| 235 continue; | |
| 236 } | |
| 237 // The movement of the containing object will have moved the | |
| 238 // interior slot. | |
| 239 auto it = m_interiorFixups.find( | |
| 240 reinterpret_cast<MovableReference*>(heapObject)); | |
| 241 DCHECK(it != m_interiorFixups.end()); | |
| 242 for (auto slot : entry.value) | |
| 243 *slot = reinterpret_cast<MovableReference>(it->value); | |
| 244 } | |
| 245 } | |
| 246 | |
| 247 #if DEBUG_HEAP_COMPACTION | |
| 248 void dumpDebugStats() { | |
| 249 LOG_HEAP_COMPACTION( | |
| 250 "Fixups: pages=%u objects=%u callbacks=%u interior-size=%zu" | |
| 251 " interiors-f=%u externals=%u\n", | |
| 252 m_relocatablePages.size(), m_fixups.size(), m_fixupCallbacks.size(), | |
| 253 m_interiors ? m_interiors->intervalCount() : 0, m_interiorFixups.size(), | |
| 254 m_externalRelocations.size()); | |
| 255 } | |
| 256 #endif | |
| 257 | |
| 258 private: | |
| 259 MovableObjectFixups() {} | |
| 260 | |
| 261 // Tracking movable and updatable references. For now, we keep a | |
| 262 // map which for each movable object, recording the slot that | |
| 263 // points to it. Upon moving the object, that slot needs to be | |
| 264 // updated. | |
| 265 // | |
| 266 // (TODO: consider in-place updating schemes.) | |
| 267 HashMap<MovableReference, MovableReference*> m_fixups; | |
| 268 | |
| 269 // Map from movable reference to callbacks that need to be invoked | |
| 270 // when the object moves. | |
| 271 HashMap<MovableReference, std::pair<void*, MovingObjectCallback>> | |
| 272 m_fixupCallbacks; | |
| 273 | |
| 274 // Slot => relocated slot/final location. | |
| 275 HashMap<MovableReference*, Address> m_interiorFixups; | |
| 276 | |
| 277 // All pages that are being compacted. | |
| 278 HashSet<BasePage*> m_relocatablePages; | |
| 279 | |
| 280 std::unique_ptr<SparseHeapBitmap> m_interiors; | |
| 281 | |
| 282 // Each heap/arena may have additional slots pointing into it, | |
| 283 // which must be fixed up & relocated after compaction has happened. | |
| 284 // | |
| 285 // This is currently not needed for Blink, but functionality is kept | |
| 286 // around to be able to support this should the need arise.. | |
|
haraken
2016/12/05 11:27:46
You can say it's needed in Opera. Otherwise, someo
sof
2016/12/05 19:30:06
It's not, no one uses it. I'm fine with letting it
haraken
2016/12/06 13:30:38
If it's unused, I'd prefer not landing the code un
sof
2016/12/06 21:39:34
ok, the normal procedure for Blink. Removed.
| |
| 287 using ExternalRelocations = | |
| 288 HashMap<MovableReference, Vector<MovableReference*>>; | |
| 289 | |
| 290 HashMap<NormalPageArena*, ExternalRelocations> m_externalRelocations; | |
| 291 }; | |
| 292 | |
| 293 #if DEBUG_HEAP_COMPACTION | |
| 294 namespace { | |
| 295 | |
| 296 const char* gcReasonString(BlinkGC::GCReason reason) { | |
|
haraken
2016/12/05 11:27:46
We should share this function with ThreadState.cpp
sof
2016/12/05 19:30:05
Re-added, it was dropped there as a public method
| |
| 297 switch (reason) { | |
| 298 case blink::BlinkGC::IdleGC: | |
| 299 return "IdleGC"; | |
| 300 case BlinkGC::PreciseGC: | |
| 301 return "PreciseGC"; | |
| 302 case BlinkGC::ConservativeGC: | |
| 303 return "ConservativeGC"; | |
| 304 case BlinkGC::ForcedGC: | |
| 305 return "ForcedGC"; | |
| 306 case BlinkGC::MemoryPressureGC: | |
| 307 return "MemoryPressureGC"; | |
| 308 case BlinkGC::PageNavigationGC: | |
| 309 return "PageNavigationGC"; | |
| 310 default: | |
| 311 NOTREACHED(); | |
| 312 } | |
| 313 return "<Unknown>"; | |
| 314 } | |
| 315 | |
| 316 } // namespace | |
| 317 #endif | |
| 318 | |
| 319 HeapCompact::HeapCompact() | |
| 320 : m_doCompact(false), | |
| 321 m_gcCountSinceLastCompaction(0), | |
| 322 m_threadCount(0), | |
| 323 m_freeListSize(0), | |
| 324 m_compactableArenas(0u), | |
| 325 m_freedPages(0), | |
| 326 m_freedSize(0) | |
| 327 #if DEBUG_LOG_HEAP_COMPACTION_RUNNING_TIME | |
| 328 , | |
| 329 m_startCompaction(0), | |
| 330 m_startCompactionTimeMS(0) | |
| 331 #endif | |
| 332 { | |
| 333 } | |
| 334 | |
| 335 HeapCompact::~HeapCompact() {} | |
| 336 | |
| 337 HeapCompact::MovableObjectFixups& HeapCompact::fixups() { | |
| 338 if (!m_fixups) | |
| 339 m_fixups = MovableObjectFixups::create(); | |
| 340 return *m_fixups; | |
| 341 } | |
| 342 | |
| 343 // checkIfCompacting() is called when a GC is initiated | |
| 344 // (by ThreadState::collectGarbage()), checking if there's sufficient | |
| 345 // reason to do a compaction pass on completion of the GC (but before | |
| 346 // lazy sweeping), and that this can be safely done (i.e., it is not a | |
| 347 // conservative GC.) | |
| 348 // | |
| 349 // TODO(sof): reconsider what is an effective policy for when compaction | |
| 350 // is required. Both in terms of frequency and freelist residency. | |
| 351 BlinkGC::GCType HeapCompact::checkIfCompacting(ThreadState* state, | |
| 352 BlinkGC::GCType gcType, | |
| 353 BlinkGC::GCReason reason) { | |
| 354 #if ENABLE_HEAP_COMPACTION | |
| 355 if (!RuntimeEnabledFeatures::heapCompactionEnabled()) | |
| 356 return gcType; | |
| 357 | |
| 358 m_doCompact = false; | |
| 359 LOG_HEAP_COMPACTION("check if compacting: gc=%s count=%zu free=%zu\n", | |
| 360 gcReasonString(reason), m_gcCountSinceLastCompaction, | |
| 361 m_freeListSize); | |
| 362 m_gcCountSinceLastCompaction++; | |
| 363 // It is only safe to compact during non-conservative GCs. | |
| 364 // TODO: for the main thread, limit this further to only idle GCs. | |
| 365 if (reason != BlinkGC::IdleGC && reason != BlinkGC::PreciseGC && | |
| 366 reason != BlinkGC::ForcedGC) | |
| 367 return gcType; | |
| 368 | |
| 369 const ThreadHeap& heap = state->heap(); | |
| 370 // If any of the participating threads require a stack scan, | |
| 371 // do not compact. | |
| 372 // | |
| 373 // Why? Should the stack contain an iterator pointing into its | |
| 374 // associated backing store, its references wouldn't be | |
| 375 // correctly relocated. | |
| 376 for (ThreadState* state : heap.threads()) { | |
| 377 if (state->stackState() == BlinkGC::HeapPointersOnStack) { | |
| 378 return gcType; | |
| 379 } | |
| 380 } | |
| 381 | |
| 382 updateHeapResidency(state); | |
| 383 | |
| 384 // Compaction enable rules: | |
| 385 // - It's been a while since the last time. | |
| 386 // - "Considerable" amount of heap memory is bound up in freelist | |
| 387 // allocations. For now, use a fixed limit irrespective of heap | |
| 388 // size. | |
| 389 // | |
| 390 // As this isn't compacting all arenas, the cost of doing compaction | |
| 391 // isn't a worry as it will additionally only be done by idle GCs. | |
| 392 // TODO: add some form of compaction overhead estimate to the marking | |
| 393 // time estimate. | |
| 394 m_freedPages = 0; | |
| 395 m_freedSize = 0; | |
| 396 | |
| 397 #if STRESS_TEST_HEAP_COMPACTION | |
| 398 // Exercise the handling of object movement by compacting as | |
| 399 // often as possible. | |
| 400 m_doCompact = true; | |
| 401 #else | |
| 402 m_doCompact = | |
| 403 s_forceCompactionGC || | |
| 404 (m_gcCountSinceLastCompaction > kGCCountSinceLastCompactionThreshold && | |
| 405 m_freeListSize > kFreeListSizeThreshold); | |
|
haraken
2016/12/05 11:27:46
Is it enough to look at only m_freeListSize? Even
sof
2016/12/05 19:30:06
It is possible to have a different & more discrimi
| |
| 406 #endif | |
| 407 if (m_doCompact) { | |
| 408 LOG_HEAP_COMPACTION("Compacting: free=%zu\n", m_freeListSize); | |
| 409 m_threadCount = heap.threads().size(); | |
| 410 m_fixups.reset(); | |
| 411 m_gcCountSinceLastCompaction = 0; | |
| 412 s_forceCompactionGC = false; | |
| 413 return BlinkGC::GCWithSweepCompaction; | |
| 414 } | |
| 415 #endif // ENABLE_HEAP_COMPACTION | |
| 416 return gcType; | |
| 417 } | |
| 418 | |
| 419 void HeapCompact::registerMovingObjectReference(MovableReference* slot) { | |
| 420 if (!m_doCompact) | |
| 421 return; | |
| 422 | |
| 423 fixups().add(slot); | |
| 424 } | |
| 425 | |
| 426 void HeapCompact::registerMovingObjectCallback(MovableReference reference, | |
| 427 MovingObjectCallback callback, | |
| 428 void* callbackData) { | |
| 429 if (!m_doCompact) | |
| 430 return; | |
| 431 | |
| 432 fixups().addFixupCallback(reference, callback, callbackData); | |
| 433 } | |
| 434 | |
| 435 void HeapCompact::registerRelocation(MovableReference* slot) { | |
| 436 if (!m_doCompact) | |
| 437 return; | |
| 438 | |
| 439 if (!*slot) | |
|
haraken
2016/12/05 11:27:46
This should not happen, right?
sof
2016/12/05 19:30:06
No, we now insist - removed.
| |
| 440 return; | |
| 441 | |
| 442 fixups().addRelocation(slot); | |
| 443 } | |
| 444 | |
| 445 void HeapCompact::updateHeapResidency(ThreadState* threadState) { | |
| 446 // The heap compaction implementation assumes the contiguous range, | |
| 447 // | |
| 448 // [Vector1ArenaIndex, HashTableArenaIndex] | |
| 449 // | |
| 450 // in a few spots. Use static asserts here to not have that assumption | |
| 451 // be silently invalidated by ArenaIndices changes. | |
| 452 static_assert(BlinkGC::Vector1ArenaIndex + 3 == BlinkGC::Vector4ArenaIndex, | |
| 453 "unexpected ArenaIndices ordering"); | |
| 454 static_assert( | |
| 455 BlinkGC::Vector4ArenaIndex + 1 == BlinkGC::InlineVectorArenaIndex, | |
| 456 "unexpected ArenaIndices ordering"); | |
| 457 static_assert( | |
| 458 BlinkGC::InlineVectorArenaIndex + 1 == BlinkGC::HashTableArenaIndex, | |
| 459 "unexpected ArenaIndices ordering"); | |
| 460 | |
| 461 size_t totalArenaSize = 0; | |
| 462 size_t freeArenaSize = 0; | |
|
haraken
2016/12/05 11:27:46
totalFreeListSize
sof
2016/12/05 19:30:06
Done, but there's arguably over-focus on naming he
| |
| 463 | |
| 464 m_compactableArenas = 0; | |
| 465 #if DEBUG_HEAP_FREELIST | |
| 466 LOG_HEAP_FREELIST("Arena residencies: {"); | |
| 467 #endif | |
| 468 for (int i = BlinkGC::Vector1ArenaIndex; i <= BlinkGC::HashTableArenaIndex; | |
|
haraken
2016/12/05 11:27:46
I'd prefer encapsulating the details of arenas in
sof
2016/12/05 19:30:06
This loop & sampling was moved from ThreadState in
haraken
2016/12/06 13:30:38
In the first review, I just wanted to say that we
| |
| 469 ++i) { | |
| 470 NormalPageArena* arena = | |
| 471 static_cast<NormalPageArena*>(threadState->arena(i)); | |
| 472 size_t arenaSize = arena->arenaSize(); | |
|
haraken
2016/12/05 11:27:46
It looks like arenaSize is unused except debug pri
sof
2016/12/05 19:30:06
? Not so, it is added to totalArenaSize as well.
haraken
2016/12/06 13:30:38
But totalArenaSize is only for debug printing. You
| |
| 473 size_t freeSize = arena->freeListSize(); | |
|
haraken
2016/12/05 11:27:46
freeListSize
sof
2016/12/05 19:30:05
Alright.
| |
| 474 totalArenaSize += arenaSize; | |
| 475 freeArenaSize += freeSize; | |
| 476 LOG_HEAP_FREELIST("%d: [%zu, %zu], ", i, arenaSize, freeSize); | |
| 477 // TODO: be more discriminating and consider arena | |
| 478 // load factor, effectiveness of past compactions etc. | |
| 479 if (arenaSize == 0) | |
| 480 continue; | |
| 481 // Mark the arena as compactable. | |
| 482 m_compactableArenas |= (0x1u << (BlinkGC::Vector1ArenaIndex + i)); | |
| 483 } | |
| 484 LOG_HEAP_FREELIST("}\nTotal = %zu, Free = %zu\n", totalArenaSize, | |
| 485 freeArenaSize); | |
| 486 | |
| 487 // TODO(sof): consider smoothing the reported sizes. | |
| 488 m_freeListSize = freeArenaSize; | |
|
haraken
2016/12/05 11:27:47
Instead of making m_freeListSize a member variable
sof
2016/12/05 19:30:05
I would prefer to keep it, see above TODO and rema
| |
| 489 } | |
| 490 | |
| 491 void HeapCompact::finishedArenaCompaction(NormalPageArena* arena, | |
| 492 size_t freedPages, | |
| 493 size_t freedSize) { | |
| 494 if (!m_doCompact) | |
| 495 return; | |
| 496 | |
| 497 fixups().fixupExternalRelocations(arena); | |
| 498 m_freedPages += freedPages; | |
| 499 m_freedSize += freedSize; | |
| 500 } | |
| 501 | |
| 502 void HeapCompact::movedObject(Address from, Address to) { | |
| 503 DCHECK(m_fixups); | |
| 504 m_fixups->relocate(from, to); | |
| 505 } | |
| 506 | |
| 507 void HeapCompact::startThreadCompaction(ThreadState*) { | |
| 508 if (!m_doCompact) | |
| 509 return; | |
| 510 #if DEBUG_LOG_HEAP_COMPACTION_RUNNING_TIME | |
| 511 if (!atomicTestAndSetToOne(&m_startCompaction)) | |
| 512 m_startCompactionTimeMS = WTF::currentTimeMS(); | |
| 513 #endif | |
| 514 } | |
| 515 | |
| 516 void HeapCompact::finishedThreadCompaction(ThreadState*) { | |
| 517 if (!m_doCompact) | |
| 518 return; | |
| 519 | |
| 520 MutexLocker locker(m_mutex); | |
| 521 // Final one clears out. | |
| 522 if (!--m_threadCount) { | |
| 523 #if DEBUG_HEAP_COMPACTION | |
| 524 if (m_fixups) | |
| 525 m_fixups->dumpDebugStats(); | |
| 526 #endif | |
| 527 m_fixups.reset(); | |
| 528 m_doCompact = false; | |
| 529 #if DEBUG_LOG_HEAP_COMPACTION_RUNNING_TIME | |
| 530 double end = WTF::currentTimeMS(); | |
| 531 LOG_HEAP_COMPACTION_INTERNAL( | |
| 532 "Compaction stats: time=%gms, pages freed=%zu, size=%zu\n", | |
| 533 end - m_startCompactionTimeMS, m_freedPages, m_freedSize); | |
| 534 m_startCompaction = 0; | |
| 535 m_startCompactionTimeMS = 0; | |
| 536 #else | |
| 537 LOG_HEAP_COMPACTION("Compaction stats: freed pages=%zu size=%zu\n", | |
| 538 m_freedPages, m_freedSize); | |
| 539 #endif | |
| 540 // All compaction has completed, all participating threads may now | |
| 541 // proceed. | |
| 542 m_finished.broadcast(); | |
| 543 } else { | |
| 544 // Letting a thread return to leave GC and become a "mutator" again | |
| 545 // runs the risk of it accessing heaps of other threads that are | |
| 546 // still being compacted. Consequently, all GC-participating threads | |
| 547 // must complete compaction together. | |
| 548 m_finished.wait(m_mutex); | |
| 549 } | |
| 550 } | |
| 551 | |
| 552 void HeapCompact::addCompactablePage(BasePage* page) { | |
| 553 if (!m_doCompact) | |
| 554 return; | |
| 555 fixups().addCompactablePage(page); | |
| 556 } | |
| 557 | |
| 558 bool HeapCompact::scheduleCompactionGCForTesting(bool value) { | |
| 559 bool current = s_forceCompactionGC; | |
| 560 s_forceCompactionGC = value; | |
| 561 return current; | |
| 562 } | |
| 563 | |
| 564 } // namespace blink | |
| OLD | NEW |