| OLD | NEW |
| (Empty) |
| 1 // Copyright 2016 The Chromium Authors. All rights reserved. | |
| 2 // Use of this source code is governed by a BSD-style license that can be | |
| 3 // found in the LICENSE file. | |
| 4 | |
| 5 #include "platform/heap/HeapCompact.h" | |
| 6 | |
| 7 #include "platform/RuntimeEnabledFeatures.h" | |
| 8 #include "platform/heap/Heap.h" | |
| 9 #include "platform/heap/SparseHeapBitmap.h" | |
| 10 #include "wtf/CurrentTime.h" | |
| 11 #include "wtf/HashMap.h" | |
| 12 #include "wtf/HashSet.h" | |
| 13 #include "wtf/Vector.h" | |
| 14 | |
| 15 namespace blink { | |
| 16 | |
| 17 bool HeapCompact::s_forceCompactionGC = false; | |
| 18 | |
| 19 // The real worker behind heap compaction, recording references to movable | |
| 20 // objects ("slots".) When the objects end up being compacted and moved, | |
| 21 // relocate() will adjust the slots to point to the new location of the | |
| 22 // object along with handling fixups for interior pointers. | |
| 23 // | |
| 24 // The "fixups" object is created and maintained for the lifetime of one | |
| 25 // heap compaction-enhanced GC. | |
| 26 class HeapCompact::MovableObjectFixups final { | |
| 27 public: | |
| 28 static std::unique_ptr<MovableObjectFixups> create() { | |
| 29 return WTF::wrapUnique(new MovableObjectFixups); | |
| 30 } | |
| 31 | |
| 32 ~MovableObjectFixups() {} | |
| 33 | |
| 34 // For the arenas being compacted, record all pages belonging to them. | |
| 35 // This is needed to handle 'interior slots', pointers that themselves | |
| 36 // can move (independently from the reference the slot points to.) | |
| 37 void addCompactingPage(BasePage* page) { | |
| 38 DCHECK(!page->isLargeObjectPage()); | |
| 39 m_relocatablePages.add(page); | |
| 40 } | |
| 41 | |
| 42 void addInteriorFixup(MovableReference* slot) { | |
| 43 auto it = m_interiorFixups.find(slot); | |
| 44 // Ephemeron fixpoint iterations may cause repeated registrations. | |
| 45 if (UNLIKELY(it != m_interiorFixups.end())) { | |
| 46 DCHECK(!it->value); | |
| 47 return; | |
| 48 } | |
| 49 m_interiorFixups.add(slot, nullptr); | |
| 50 LOG_HEAP_COMPACTION("Interior slot: %p\n", slot); | |
| 51 Address slotAddress = reinterpret_cast<Address>(slot); | |
| 52 if (!m_interiors) { | |
| 53 m_interiors = SparseHeapBitmap::create(slotAddress); | |
| 54 return; | |
| 55 } | |
| 56 m_interiors->add(slotAddress); | |
| 57 } | |
| 58 | |
| 59 void add(MovableReference* slot) { | |
| 60 MovableReference reference = *slot; | |
| 61 BasePage* refPage = pageFromObject(reference); | |
| 62 // Nothing to compact on a large object's page. | |
| 63 if (refPage->isLargeObjectPage()) | |
| 64 return; | |
| 65 | |
| 66 #if DCHECK_IS_ON() | |
| 67 DCHECK(HeapCompact::isCompactableArena(refPage->arena()->arenaIndex())); | |
| 68 auto it = m_fixups.find(reference); | |
| 69 DCHECK(it == m_fixups.end() || it->value == slot); | |
| 70 #endif | |
| 71 | |
| 72 // TODO: when updateHeapResidency() becomes more discriminating about | |
| 73 // leaving out arenas that aren't worth compacting, a check for | |
| 74 // isCompactingArena() would be appropriate here, leaving early if | |
| 75 // |refPage|'s arena isn't in the set. | |
| 76 | |
| 77 m_fixups.add(reference, slot); | |
| 78 | |
| 79 // Note: |slot| will reside outside the Oilpan heap if it is a | |
| 80 // PersistentHeapCollectionBase. Hence pageFromObject() cannot be | |
| 81 // used, as it sanity checks the |BasePage| it returns. Simply | |
| 82 // derive the raw BasePage address here and check if it is a member | |
| 83 // of the compactable and relocatable page address set. | |
| 84 Address slotAddress = reinterpret_cast<Address>(slot); | |
| 85 BasePage* slotPage = reinterpret_cast<BasePage*>( | |
| 86 blinkPageAddress(slotAddress) + blinkGuardPageSize); | |
| 87 if (LIKELY(!m_relocatablePages.contains(slotPage))) | |
| 88 return; | |
| 89 #if ENABLE(ASSERT) | |
| 90 slotPage->contains(slotAddress); | |
| 91 #endif | |
| 92 // Unlikely case, the slot resides on a compacting arena's page. | |
| 93 // => It is an 'interior slot' (interior to a movable backing store.) | |
| 94 // Record it as an interior slot, which entails: | |
| 95 // | |
| 96 // - Storing it in the interior map, which maps the slot to | |
| 97 // its (eventual) location. Initially nullptr. | |
| 98 // - Mark it as being interior pointer within the page's | |
| 99 // "interior" bitmap. This bitmap is used when moving a backing | |
| 100 // store, quickly/ier checking if interior slots will have to | |
| 101 // be additionally redirected. | |
| 102 addInteriorFixup(slot); | |
| 103 } | |
| 104 | |
| 105 void addFixupCallback(MovableReference reference, | |
| 106 MovingObjectCallback callback, | |
| 107 void* callbackData) { | |
| 108 DCHECK(!m_fixupCallbacks.contains(reference)); | |
| 109 m_fixupCallbacks.add(reference, std::pair<void*, MovingObjectCallback>( | |
| 110 callbackData, callback)); | |
| 111 } | |
| 112 | |
| 113 void relocateInteriorFixups(Address from, Address to, size_t size) { | |
| 114 SparseHeapBitmap* range = m_interiors->hasRange(from, size); | |
| 115 if (LIKELY(!range)) | |
| 116 return; | |
| 117 | |
| 118 // Scan through the payload, looking for interior pointer slots | |
| 119 // to adjust. If the backing store of such an interior slot hasn't | |
| 120 // been moved already, update the slot -> real location mapping. | |
| 121 // When the backing store is eventually moved, it'll use that location. | |
| 122 // | |
| 123 for (size_t offset = 0; offset < size; offset += sizeof(void*)) { | |
| 124 if (!range->isSet(from + offset)) | |
| 125 continue; | |
| 126 MovableReference* slot = | |
| 127 reinterpret_cast<MovableReference*>(from + offset); | |
| 128 auto it = m_interiorFixups.find(slot); | |
| 129 if (it == m_interiorFixups.end()) | |
| 130 continue; | |
| 131 | |
| 132 // TODO: with the right sparse bitmap representation, it could be possible | |
| 133 // to quickly determine if we've now stepped past the last address | |
| 134 // that needed fixup in [address, address + size). Breaking out of this | |
| 135 // loop might be worth doing for hash table backing stores with a very | |
| 136 // low load factor. But interior fixups are rare. | |
| 137 | |
| 138 // If |slot|'s mapping is set, then the slot has been adjusted already. | |
| 139 if (it->value) | |
| 140 continue; | |
| 141 Address fixup = to + offset; | |
| 142 LOG_HEAP_COMPACTION("Range interior fixup: %p %p %p\n", from + offset, | |
| 143 it->value, fixup); | |
| 144 // Fill in the relocated location of the original slot at |slot|. | |
| 145 // when the backing store corresponding to |slot| is eventually | |
| 146 // moved/compacted, it'll update |to + offset| with a pointer to the | |
| 147 // moved backing store. | |
| 148 m_interiorFixups.set(slot, fixup); | |
| 149 } | |
| 150 } | |
| 151 | |
| 152 void relocate(Address from, Address to) { | |
| 153 auto it = m_fixups.find(from); | |
| 154 DCHECK(it != m_fixups.end()); | |
| 155 #if DCHECK_IS_ON() | |
| 156 BasePage* fromPage = pageFromObject(from); | |
| 157 DCHECK(m_relocatablePages.contains(fromPage)); | |
| 158 #endif | |
| 159 MovableReference* slot = reinterpret_cast<MovableReference*>(it->value); | |
| 160 auto interior = m_interiorFixups.find(slot); | |
| 161 if (interior != m_interiorFixups.end()) { | |
| 162 MovableReference* slotLocation = | |
| 163 reinterpret_cast<MovableReference*>(interior->value); | |
| 164 if (!slotLocation) { | |
| 165 m_interiorFixups.set(slot, to); | |
| 166 } else { | |
| 167 LOG_HEAP_COMPACTION("Redirected slot: %p => %p\n", slot, slotLocation); | |
| 168 slot = slotLocation; | |
| 169 } | |
| 170 } | |
| 171 // If the slot has subsequently been updated, a prefinalizer or | |
| 172 // a destructor having mutated and expanded/shrunk the collection, | |
| 173 // do not update and relocate the slot -- |from| is no longer valid | |
| 174 // and referenced. | |
| 175 // | |
| 176 // The slot's contents may also have been cleared during weak processing; | |
| 177 // no work to be done in that case either. | |
| 178 if (UNLIKELY(*slot != from)) { | |
| 179 LOG_HEAP_COMPACTION( | |
| 180 "No relocation: slot = %p, *slot = %p, from = %p, to = %p\n", slot, | |
| 181 *slot, from, to); | |
| 182 #if DCHECK_IS_ON() | |
| 183 // Verify that the already updated slot is valid, meaning: | |
| 184 // - has been cleared. | |
| 185 // - has been updated & expanded with a large object backing store. | |
| 186 // - has been updated with a larger, freshly allocated backing store. | |
| 187 // (on a fresh page in a compactable arena that is not being | |
| 188 // compacted.) | |
| 189 if (!*slot) | |
| 190 return; | |
| 191 BasePage* slotPage = pageFromObject(*slot); | |
| 192 DCHECK( | |
| 193 slotPage->isLargeObjectPage() || | |
| 194 (HeapCompact::isCompactableArena(slotPage->arena()->arenaIndex()) && | |
| 195 !m_relocatablePages.contains(slotPage))); | |
| 196 #endif | |
| 197 return; | |
| 198 } | |
| 199 *slot = to; | |
| 200 | |
| 201 size_t size = 0; | |
| 202 auto callback = m_fixupCallbacks.find(from); | |
| 203 if (UNLIKELY(callback != m_fixupCallbacks.end())) { | |
| 204 size = HeapObjectHeader::fromPayload(to)->payloadSize(); | |
| 205 callback->value.second(callback->value.first, from, to, size); | |
| 206 } | |
| 207 | |
| 208 if (!m_interiors) | |
| 209 return; | |
| 210 | |
| 211 if (!size) | |
| 212 size = HeapObjectHeader::fromPayload(to)->payloadSize(); | |
| 213 relocateInteriorFixups(from, to, size); | |
| 214 } | |
| 215 | |
| 216 #if DEBUG_HEAP_COMPACTION | |
| 217 void dumpDebugStats() { | |
| 218 LOG_HEAP_COMPACTION( | |
| 219 "Fixups: pages=%u objects=%u callbacks=%u interior-size=%zu" | |
| 220 " interiors-f=%u\n", | |
| 221 m_relocatablePages.size(), m_fixups.size(), m_fixupCallbacks.size(), | |
| 222 m_interiors ? m_interiors->intervalCount() : 0, | |
| 223 m_interiorFixups.size()); | |
| 224 } | |
| 225 #endif | |
| 226 | |
| 227 private: | |
| 228 MovableObjectFixups() {} | |
| 229 | |
| 230 // Tracking movable and updatable references. For now, we keep a | |
| 231 // map which for each movable object, recording the slot that | |
| 232 // points to it. Upon moving the object, that slot needs to be | |
| 233 // updated. | |
| 234 // | |
| 235 // (TODO: consider in-place updating schemes.) | |
| 236 HashMap<MovableReference, MovableReference*> m_fixups; | |
| 237 | |
| 238 // Map from movable reference to callbacks that need to be invoked | |
| 239 // when the object moves. | |
| 240 HashMap<MovableReference, std::pair<void*, MovingObjectCallback>> | |
| 241 m_fixupCallbacks; | |
| 242 | |
| 243 // Slot => relocated slot/final location. | |
| 244 HashMap<MovableReference*, Address> m_interiorFixups; | |
| 245 | |
| 246 // All pages that are being compacted. | |
| 247 HashSet<BasePage*> m_relocatablePages; | |
| 248 | |
| 249 std::unique_ptr<SparseHeapBitmap> m_interiors; | |
| 250 }; | |
| 251 | |
| 252 HeapCompact::HeapCompact() | |
| 253 : m_doCompact(false), | |
| 254 m_gcCountSinceLastCompaction(0), | |
| 255 m_threadCount(0), | |
| 256 m_freeListSize(0), | |
| 257 m_compactableArenas(0u), | |
| 258 m_freedPages(0), | |
| 259 m_freedSize(0) | |
| 260 #if DEBUG_LOG_HEAP_COMPACTION_RUNNING_TIME | |
| 261 , | |
| 262 m_startCompactionTimeMS(0) | |
| 263 #endif | |
| 264 { | |
| 265 // The heap compaction implementation assumes the contiguous range, | |
| 266 // | |
| 267 // [Vector1ArenaIndex, HashTableArenaIndex] | |
| 268 // | |
| 269 // in a few places. Use static asserts here to not have that assumption | |
| 270 // be silently invalidated by ArenaIndices changes. | |
| 271 static_assert(BlinkGC::Vector1ArenaIndex + 3 == BlinkGC::Vector4ArenaIndex, | |
| 272 "unexpected ArenaIndices ordering"); | |
| 273 static_assert( | |
| 274 BlinkGC::Vector4ArenaIndex + 1 == BlinkGC::InlineVectorArenaIndex, | |
| 275 "unexpected ArenaIndices ordering"); | |
| 276 static_assert( | |
| 277 BlinkGC::InlineVectorArenaIndex + 1 == BlinkGC::HashTableArenaIndex, | |
| 278 "unexpected ArenaIndices ordering"); | |
| 279 } | |
| 280 | |
| 281 HeapCompact::~HeapCompact() {} | |
| 282 | |
| 283 HeapCompact::MovableObjectFixups& HeapCompact::fixups() { | |
| 284 if (!m_fixups) | |
| 285 m_fixups = MovableObjectFixups::create(); | |
| 286 return *m_fixups; | |
| 287 } | |
| 288 | |
| 289 bool HeapCompact::shouldCompact(ThreadState* state, | |
| 290 BlinkGC::GCType gcType, | |
| 291 BlinkGC::GCReason reason) { | |
| 292 #if !ENABLE_HEAP_COMPACTION | |
| 293 return false; | |
| 294 #else | |
| 295 if (!RuntimeEnabledFeatures::heapCompactionEnabled()) | |
| 296 return false; | |
| 297 | |
| 298 LOG_HEAP_COMPACTION("shouldCompact(): gc=%s count=%zu free=%zu\n", | |
| 299 ThreadState::gcReasonString(reason), | |
| 300 m_gcCountSinceLastCompaction, m_freeListSize); | |
| 301 m_gcCountSinceLastCompaction++; | |
| 302 // It is only safe to compact during non-conservative GCs. | |
| 303 // TODO: for the main thread, limit this further to only idle GCs. | |
| 304 if (reason != BlinkGC::IdleGC && reason != BlinkGC::PreciseGC && | |
| 305 reason != BlinkGC::ForcedGC) | |
| 306 return false; | |
| 307 | |
| 308 const ThreadHeap& heap = state->heap(); | |
| 309 // If any of the participating threads require a stack scan, | |
| 310 // do not compact. | |
| 311 // | |
| 312 // Why? Should the stack contain an iterator pointing into its | |
| 313 // associated backing store, its references wouldn't be | |
| 314 // correctly relocated. | |
| 315 for (ThreadState* state : heap.threads()) { | |
| 316 if (state->stackState() == BlinkGC::HeapPointersOnStack) { | |
| 317 return false; | |
| 318 } | |
| 319 } | |
| 320 | |
| 321 // Compaction enable rules: | |
| 322 // - It's been a while since the last time. | |
| 323 // - "Considerable" amount of heap memory is bound up in freelist | |
| 324 // allocations. For now, use a fixed limit irrespective of heap | |
| 325 // size. | |
| 326 // | |
| 327 // As this isn't compacting all arenas, the cost of doing compaction | |
| 328 // isn't a worry as it will additionally only be done by idle GCs. | |
| 329 // TODO: add some form of compaction overhead estimate to the marking | |
| 330 // time estimate. | |
| 331 | |
| 332 updateHeapResidency(state); | |
| 333 | |
| 334 #if STRESS_TEST_HEAP_COMPACTION | |
| 335 // Exercise the handling of object movement by compacting as | |
| 336 // often as possible. | |
| 337 return true; | |
| 338 #else | |
| 339 return s_forceCompactionGC || | |
| 340 (m_gcCountSinceLastCompaction > kGCCountSinceLastCompactionThreshold && | |
| 341 m_freeListSize > kFreeListSizeThreshold); | |
| 342 #endif | |
| 343 #endif | |
| 344 } | |
| 345 | |
| 346 BlinkGC::GCType HeapCompact::initialize(ThreadState* state) { | |
| 347 DCHECK(RuntimeEnabledFeatures::heapCompactionEnabled()); | |
| 348 LOG_HEAP_COMPACTION("Compacting: free=%zu\n", m_freeListSize); | |
| 349 m_doCompact = true; | |
| 350 m_freedPages = 0; | |
| 351 m_freedSize = 0; | |
| 352 m_threadCount = state->heap().threads().size(); | |
| 353 m_fixups.reset(); | |
| 354 m_gcCountSinceLastCompaction = 0; | |
| 355 s_forceCompactionGC = false; | |
| 356 return BlinkGC::GCWithSweepCompaction; | |
| 357 } | |
| 358 | |
| 359 void HeapCompact::registerMovingObjectReference(MovableReference* slot) { | |
| 360 if (!m_doCompact) | |
| 361 return; | |
| 362 | |
| 363 fixups().add(slot); | |
| 364 } | |
| 365 | |
| 366 void HeapCompact::registerMovingObjectCallback(MovableReference reference, | |
| 367 MovingObjectCallback callback, | |
| 368 void* callbackData) { | |
| 369 if (!m_doCompact) | |
| 370 return; | |
| 371 | |
| 372 fixups().addFixupCallback(reference, callback, callbackData); | |
| 373 } | |
| 374 | |
| 375 void HeapCompact::updateHeapResidency(ThreadState* threadState) { | |
| 376 size_t totalArenaSize = 0; | |
| 377 size_t totalFreeListSize = 0; | |
| 378 | |
| 379 m_compactableArenas = 0; | |
| 380 #if DEBUG_HEAP_FREELIST | |
| 381 LOG_HEAP_FREELIST("Arena residencies: {"); | |
| 382 #endif | |
| 383 for (int i = BlinkGC::Vector1ArenaIndex; i <= BlinkGC::HashTableArenaIndex; | |
| 384 ++i) { | |
| 385 NormalPageArena* arena = | |
| 386 static_cast<NormalPageArena*>(threadState->arena(i)); | |
| 387 size_t arenaSize = arena->arenaSize(); | |
| 388 size_t freeListSize = arena->freeListSize(); | |
| 389 totalArenaSize += arenaSize; | |
| 390 totalFreeListSize += freeListSize; | |
| 391 LOG_HEAP_FREELIST("%d: [%zu, %zu], ", i, arenaSize, freeListSize); | |
| 392 // TODO: be more discriminating and consider arena | |
| 393 // load factor, effectiveness of past compactions etc. | |
| 394 if (!arenaSize) | |
| 395 continue; | |
| 396 // Mark the arena as compactable. | |
| 397 m_compactableArenas |= (0x1u << (BlinkGC::Vector1ArenaIndex + i)); | |
| 398 } | |
| 399 LOG_HEAP_FREELIST("}\nTotal = %zu, Free = %zu\n", totalArenaSize, | |
| 400 totalFreeListSize); | |
| 401 | |
| 402 // TODO(sof): consider smoothing the reported sizes. | |
| 403 m_freeListSize = totalFreeListSize; | |
| 404 } | |
| 405 | |
| 406 void HeapCompact::finishedArenaCompaction(NormalPageArena* arena, | |
| 407 size_t freedPages, | |
| 408 size_t freedSize) { | |
| 409 if (!m_doCompact) | |
| 410 return; | |
| 411 | |
| 412 m_freedPages += freedPages; | |
| 413 m_freedSize += freedSize; | |
| 414 } | |
| 415 | |
| 416 void HeapCompact::relocate(Address from, Address to) { | |
| 417 DCHECK(m_fixups); | |
| 418 m_fixups->relocate(from, to); | |
| 419 } | |
| 420 | |
| 421 void HeapCompact::startThreadCompaction() { | |
| 422 if (!m_doCompact) | |
| 423 return; | |
| 424 #if DEBUG_LOG_HEAP_COMPACTION_RUNNING_TIME | |
| 425 MutexLocker locker(m_mutex); | |
| 426 if (!m_startCompactionTimeMS) | |
| 427 m_startCompactionTimeMS = WTF::currentTimeMS(); | |
| 428 #endif | |
| 429 } | |
| 430 | |
| 431 void HeapCompact::finishThreadCompaction() { | |
| 432 if (!m_doCompact) | |
| 433 return; | |
| 434 | |
| 435 MutexLocker locker(m_mutex); | |
| 436 // Final one clears out. | |
| 437 if (!--m_threadCount) { | |
| 438 #if DEBUG_HEAP_COMPACTION | |
| 439 if (m_fixups) | |
| 440 m_fixups->dumpDebugStats(); | |
| 441 #endif | |
| 442 m_fixups.reset(); | |
| 443 m_doCompact = false; | |
| 444 #if DEBUG_LOG_HEAP_COMPACTION_RUNNING_TIME | |
| 445 double end = WTF::currentTimeMS(); | |
| 446 LOG_HEAP_COMPACTION_INTERNAL( | |
| 447 "Compaction stats: time=%gms, pages freed=%zu, size=%zu\n", | |
| 448 end - m_startCompactionTimeMS, m_freedPages, m_freedSize); | |
| 449 m_startCompactionTimeMS = 0; | |
| 450 #else | |
| 451 LOG_HEAP_COMPACTION("Compaction stats: freed pages=%zu size=%zu\n", | |
| 452 m_freedPages, m_freedSize); | |
| 453 #endif | |
| 454 // Compaction has been completed by all participating threads, unblock | |
| 455 // them all. | |
| 456 m_finished.broadcast(); | |
| 457 } else { | |
| 458 // Letting a thread return here and let it exit its safe point opens up | |
| 459 // the possibility of it accessing heaps of other threads that are | |
| 460 // still being compacted. It is not in a valid state until objects have | |
| 461 // all been moved together, hence all GC-participating threads must | |
| 462 // complete compaction together. Grab the condition variable and wait. | |
| 463 m_finished.wait(m_mutex); | |
| 464 } | |
| 465 } | |
| 466 | |
| 467 void HeapCompact::addCompactingPage(BasePage* page) { | |
| 468 DCHECK(m_doCompact); | |
| 469 DCHECK(isCompactingArena(page->arena()->arenaIndex())); | |
| 470 fixups().addCompactingPage(page); | |
| 471 } | |
| 472 | |
| 473 bool HeapCompact::scheduleCompactionGCForTesting(bool value) { | |
| 474 bool current = s_forceCompactionGC; | |
| 475 s_forceCompactionGC = value; | |
| 476 return current; | |
| 477 } | |
| 478 | |
| 479 } // namespace blink | |
| OLD | NEW |