Chromium Code Reviews| OLD | NEW |
|---|---|
| (Empty) | |
| 1 // Copyright 2016 Opera Software AS. All rights reserved. | |
| 2 // Use of this source code is governed by a BSD-style license that can be | |
| 3 // found in the LICENSE file. | |
| 4 | |
| 5 #include "platform/heap/HeapCompact.h" | |
| 6 | |
| 7 #include "platform/RuntimeEnabledFeatures.h" | |
| 8 #include "platform/heap/Heap.h" | |
| 9 #include "platform/heap/SparseHeapBitmap.h" | |
| 10 #include "wtf/CurrentTime.h" | |
| 11 #include "wtf/HashMap.h" | |
| 12 #include "wtf/HashSet.h" | |
| 13 #include "wtf/Vector.h" | |
| 14 | |
| 15 namespace blink { | |
| 16 | |
| 17 bool HeapCompact::s_forceCompactionGC = false; | |
| 18 | |
| 19 // The real worker behind heap compaction, recording references to movable | |
| 20 // objects ("slots".) When the objects end up being compacted and moved, | |
| 21 // relocate() will adjust the slots to point to the new location of the | |
| 22 // object along with handling fixups for interior pointers. | |
| 23 // | |
| 24 // The "fixups" object is created and maintained for the lifetime of one | |
| 25 // heap compaction-enhanced GC. | |
| 26 class HeapCompact::MovableObjectFixups final { | |
| 27 public: | |
| 28 static std::unique_ptr<MovableObjectFixups> create() { | |
| 29 return wrapUnique(new MovableObjectFixups); | |
| 30 } | |
| 31 | |
| 32 ~MovableObjectFixups() {} | |
| 33 | |
| 34 // For the compactable arenas, record all pages belonging to them. | |
| 35 // This is needed to handle 'interior slots', pointers that themselves | |
| 36 // can move (independently from the reference the slot points to.) | |
| 37 void addCompactablePage(BasePage* page) { | |
| 38 DCHECK(!page->isLargeObjectPage()); | |
| 39 if (!HeapCompact::isCompactableArena(page->arena()->arenaIndex())) | |
|
haraken
2016/12/09 07:25:55
Maybe do you want to mean isCompact"ing"Arena?
Al
sof
2016/12/09 21:44:03
I've converted the assumption into a DCHECK() (not
| |
| 40 return; | |
| 41 | |
| 42 m_relocatablePages.add(page); | |
| 43 } | |
| 44 | |
| 45 void add(MovableReference* slot) { | |
| 46 MovableReference reference = *slot; | |
| 47 BasePage* refPage = pageFromObject(reference); | |
| 48 // Nothing to compact on a large object's page. | |
| 49 if (refPage->isLargeObjectPage()) | |
|
haraken
2016/12/09 07:25:55
Maybe we also want to check if the refPage is in a
sof
2016/12/09 21:44:04
I think that's too fine grained a check, i.e., we
| |
| 50 return; | |
| 51 | |
| 52 #if DCHECK_IS_ON() | |
| 53 auto it = m_fixups.find(reference); | |
| 54 DCHECK(it == m_fixups.end() || it->value == slot); | |
| 55 #endif | |
| 56 m_fixups.add(reference, slot); | |
| 57 | |
| 58 Address slotAddress = reinterpret_cast<Address>(slot); | |
| 59 BasePage* slotPage = reinterpret_cast<BasePage*>( | |
| 60 blinkPageAddress(slotAddress) + blinkGuardPageSize); | |
|
haraken
2016/12/09 07:25:55
It might be better to add a helper function to Hea
sof
2016/12/09 21:44:03
pageFromObject() does just this already, but this
| |
| 61 if (LIKELY(!m_relocatablePages.contains(slotPage))) | |
| 62 return; | |
| 63 #if ENABLE(ASSERT) | |
| 64 DCHECK(slotPage->contains(slotAddress)); | |
| 65 #endif | |
| 66 // Unlikely case, the slot resides on a compactable arena's page. | |
|
haraken
2016/12/09 07:25:55
compact"ing"
sof
2016/12/09 21:44:04
Done.
| |
| 67 // => It is an 'interior slot' (interior to a movable backing store.) | |
| 68 // Record it as an interior slot, which entails: | |
| 69 // | |
| 70 // - Storing it in the interior map, which maps the slot to | |
| 71 // its (eventual) location. Initially nullptr. | |
| 72 // - Mark it as being interior pointer within the page's | |
| 73 // "interior" bitmap. This bitmap is used when moving a backing | |
| 74 // store, quickly/ier checking if interior slots will have to | |
| 75 // be additionally redirected. | |
| 76 addInteriorFixup(slot); | |
| 77 } | |
| 78 | |
| 79 void addFixupCallback(MovableReference reference, | |
| 80 MovingObjectCallback callback, | |
| 81 void* callbackData) { | |
| 82 DCHECK(!m_fixupCallbacks.contains(reference)); | |
| 83 m_fixupCallbacks.add(reference, std::pair<void*, MovingObjectCallback>( | |
| 84 callbackData, callback)); | |
| 85 } | |
| 86 | |
| 87 void relocateInteriorFixups(Address from, Address to, size_t size) { | |
| 88 SparseHeapBitmap* range = m_interiors->hasRange(from, size); | |
| 89 if (LIKELY(!range)) | |
| 90 return; | |
| 91 | |
| 92 // Scan through the payload, looking for interior pointer slots | |
| 93 // to adjust. If the backing store of such an interior slot hasn't | |
| 94 // been moved already, update the slot -> real location mapping. | |
| 95 // When the backing store is eventually moved, it'll use that location. | |
| 96 // | |
| 97 for (size_t offset = 0; offset < size; offset += sizeof(void*)) { | |
| 98 if (!range->isSet(from + offset)) | |
| 99 continue; | |
| 100 MovableReference* slot = | |
| 101 reinterpret_cast<MovableReference*>(from + offset); | |
| 102 auto it = m_interiorFixups.find(slot); | |
| 103 if (it == m_interiorFixups.end()) | |
| 104 continue; | |
| 105 | |
| 106 // TODO: with the right sparse bitmap representation, it could be possible | |
| 107 // to quickly determine if we've now stepped past the last address | |
| 108 // that needed fixup in [address, address + size). Breaking out of this | |
| 109 // loop might be worth doing for hash table backing stores with a very | |
| 110 // low load factor. But interior fixups are rare. | |
| 111 | |
| 112 // If |slot|'s mapping is set, then the slot has been adjusted already. | |
| 113 if (it->value) | |
| 114 continue; | |
| 115 Address fixup = to + offset; | |
| 116 LOG_HEAP_COMPACTION("Range interior fixup: %p %p %p\n", from + offset, | |
| 117 it->value, fixup); | |
| 118 // Fill in the relocated location of the original slot at |slot|. | |
| 119 // when the backing store corresponding to |slot| is eventually | |
| 120 // moved/compacted, it'll update |to + offset| with a pointer to the | |
| 121 // moved backing store. | |
| 122 m_interiorFixups.set(slot, fixup); | |
| 123 } | |
| 124 } | |
| 125 | |
| 126 void relocate(Address from, Address to) { | |
| 127 auto it = m_fixups.find(from); | |
| 128 DCHECK(it != m_fixups.end()); | |
| 129 #if DCHECK_IS_ON() | |
| 130 BasePage* fromPage = reinterpret_cast<BasePage*>(blinkPageAddress(from) + | |
| 131 blinkGuardPageSize); | |
| 132 DCHECK(m_relocatablePages.contains(fromPage)); | |
|
haraken
2016/12/09 07:25:55
Add DCHECK(m_relocatablePages.contains(toPage));
sof
2016/12/09 21:44:04
Hmm, that's a bit unnatural a check - why would we
| |
| 133 #endif | |
| 134 MovableReference* slot = reinterpret_cast<MovableReference*>(it->value); | |
| 135 auto interior = m_interiorFixups.find(slot); | |
| 136 if (interior != m_interiorFixups.end()) { | |
| 137 MovableReference* slotLocation = | |
| 138 reinterpret_cast<MovableReference*>(interior->value); | |
| 139 if (!slotLocation) { | |
| 140 m_interiorFixups.set(slot, to); | |
|
haraken
2016/12/09 07:25:55
I'll stop asking a question about this, but I stil
sof
2016/12/09 21:44:03
We're here if an interior slot's backing store is
| |
| 141 } else { | |
| 142 LOG_HEAP_COMPACTION("Redirected slot: %p => %p\n", slot, slotLocation); | |
| 143 slot = slotLocation; | |
| 144 } | |
| 145 } | |
| 146 // If the slot has subsequently been updated, a prefinalizer or | |
| 147 // a destructor having mutated and expanded/shrunk the collection, | |
| 148 // do not update and relocate the slot -- |from| is no longer valid | |
| 149 // and referenced. | |
| 150 // | |
| 151 // The slot's contents may also have been cleared during weak processing; | |
| 152 // no work to be done in that case either. | |
| 153 if (UNLIKELY(*slot != from)) { | |
| 154 LOG_HEAP_COMPACTION( | |
| 155 "No relocation: slot = %p, *slot = %p, from = %p, to = %p\n", slot, | |
| 156 *slot, from, to); | |
| 157 #if DCHECK_IS_ON() | |
| 158 // Verify that the already updated slot is valid, meaning: | |
| 159 // - has been cleared. | |
| 160 // - has been updated & expanded with a large object backing store. | |
| 161 // - has been updated with a larger, freshly allocated backing store. | |
| 162 // (on a fresh page in a compactable arena that is not being | |
| 163 // compacted.) | |
| 164 BasePage* refPage = reinterpret_cast<BasePage*>( | |
|
haraken
2016/12/09 07:25:55
refPage => slotPage
sof
2016/12/09 21:44:03
Done.
| |
| 165 blinkPageAddress(reinterpret_cast<Address>(*slot)) + | |
| 166 blinkGuardPageSize); | |
| 167 DCHECK(!*slot || refPage->isLargeObjectPage() || | |
| 168 (HeapCompact::isCompactableArena(refPage->arena()->arenaIndex()) && | |
| 169 m_relocatablePages.contains(refPage))); | |
|
haraken
2016/12/09 07:25:54
Shouldn't this be:
DCHECK(!*slot || !m_relocata
sof
2016/12/09 21:44:03
Indeed, thanks - it should check what the comment
| |
| 170 #endif | |
| 171 return; | |
| 172 } | |
| 173 *slot = to; | |
| 174 | |
| 175 size_t size = 0; | |
| 176 auto callback = m_fixupCallbacks.find(from); | |
| 177 if (UNLIKELY(callback != m_fixupCallbacks.end())) { | |
| 178 size = HeapObjectHeader::fromPayload(to)->payloadSize(); | |
| 179 callback->value.second(callback->value.first, from, to, size); | |
| 180 } | |
| 181 | |
| 182 if (!m_interiors) | |
| 183 return; | |
| 184 | |
| 185 if (!size) | |
| 186 size = HeapObjectHeader::fromPayload(to)->payloadSize(); | |
| 187 relocateInteriorFixups(from, to, size); | |
| 188 } | |
| 189 | |
| 190 void addInteriorFixup(MovableReference* slot) { | |
|
haraken
2016/12/09 07:25:55
Move this method up to just below add().
sof
2016/12/09 21:44:03
Done.
| |
| 191 auto it = m_interiorFixups.find(slot); | |
| 192 // Ephemeron fixpoint iterations may cause repeated registrations. | |
| 193 if (UNLIKELY(it != m_interiorFixups.end())) { | |
| 194 DCHECK(!it->value); | |
| 195 return; | |
| 196 } | |
| 197 m_interiorFixups.add(slot, nullptr); | |
| 198 LOG_HEAP_COMPACTION("Interior slot: %p\n", slot); | |
| 199 Address slotAddress = reinterpret_cast<Address>(slot); | |
| 200 if (!m_interiors) { | |
| 201 m_interiors = SparseHeapBitmap::create(slotAddress); | |
| 202 return; | |
| 203 } | |
| 204 m_interiors->add(slotAddress); | |
| 205 } | |
| 206 | |
| 207 #if DEBUG_HEAP_COMPACTION | |
| 208 void dumpDebugStats() { | |
| 209 LOG_HEAP_COMPACTION( | |
| 210 "Fixups: pages=%u objects=%u callbacks=%u interior-size=%zu" | |
| 211 " interiors-f=%u\n", | |
| 212 m_relocatablePages.size(), m_fixups.size(), m_fixupCallbacks.size(), | |
| 213 m_interiors ? m_interiors->intervalCount() : 0, | |
| 214 m_interiorFixups.size()); | |
| 215 } | |
| 216 #endif | |
| 217 | |
| 218 private: | |
| 219 MovableObjectFixups() {} | |
| 220 | |
| 221 // Tracking movable and updatable references. For now, we keep a | |
| 222 // map which for each movable object, recording the slot that | |
| 223 // points to it. Upon moving the object, that slot needs to be | |
| 224 // updated. | |
| 225 // | |
| 226 // (TODO: consider in-place updating schemes.) | |
| 227 HashMap<MovableReference, MovableReference*> m_fixups; | |
| 228 | |
| 229 // Map from movable reference to callbacks that need to be invoked | |
| 230 // when the object moves. | |
| 231 HashMap<MovableReference, std::pair<void*, MovingObjectCallback>> | |
| 232 m_fixupCallbacks; | |
| 233 | |
| 234 // Slot => relocated slot/final location. | |
| 235 HashMap<MovableReference*, Address> m_interiorFixups; | |
| 236 | |
| 237 // All pages that are being compacted. | |
| 238 HashSet<BasePage*> m_relocatablePages; | |
| 239 | |
| 240 std::unique_ptr<SparseHeapBitmap> m_interiors; | |
| 241 }; | |
|
haraken
2016/12/09 07:25:55
Would you add an assert to ScriptWrappableVisitor
sof
2016/12/09 21:44:04
Done, but should I be concerned about perf fallout
| |
| 242 | |
| 243 HeapCompact::HeapCompact() | |
| 244 : m_doCompact(false), | |
| 245 m_gcCountSinceLastCompaction(0), | |
| 246 m_threadCount(0), | |
| 247 m_freeListSize(0), | |
| 248 m_compactableArenas(0u), | |
| 249 m_freedPages(0), | |
| 250 m_freedSize(0) | |
| 251 #if DEBUG_LOG_HEAP_COMPACTION_RUNNING_TIME | |
| 252 , | |
| 253 m_startCompaction(0), | |
| 254 m_startCompactionTimeMS(0) | |
| 255 #endif | |
| 256 { | |
| 257 } | |
| 258 | |
| 259 HeapCompact::~HeapCompact() {} | |
| 260 | |
| 261 HeapCompact::MovableObjectFixups& HeapCompact::fixups() { | |
| 262 if (!m_fixups) | |
| 263 m_fixups = MovableObjectFixups::create(); | |
|
haraken
2016/12/09 07:25:55
Nit: It wouldn't make much sense to create the fix
| |
| 264 return *m_fixups; | |
| 265 } | |
| 266 | |
| 267 bool HeapCompact::shouldCompact(ThreadState* state, | |
| 268 BlinkGC::GCType gcType, | |
| 269 BlinkGC::GCReason reason) { | |
| 270 #if !ENABLE_HEAP_COMPACTION | |
| 271 return false; | |
| 272 #else | |
| 273 if (!RuntimeEnabledFeatures::heapCompactionEnabled()) | |
| 274 return false; | |
| 275 | |
| 276 LOG_HEAP_COMPACTION("shouldCompact(): gc=%s count=%zu free=%zu\n", | |
| 277 ThreadState::gcReasonString(reason), | |
| 278 m_gcCountSinceLastCompaction, m_freeListSize); | |
| 279 m_gcCountSinceLastCompaction++; | |
| 280 // It is only safe to compact during non-conservative GCs. | |
| 281 // TODO: for the main thread, limit this further to only idle GCs. | |
| 282 if (reason != BlinkGC::IdleGC && reason != BlinkGC::PreciseGC && | |
| 283 reason != BlinkGC::ForcedGC) | |
|
haraken
2016/12/09 07:25:55
Do we need this check? I guess the following Blink
sof
2016/12/09 21:44:03
I think it is fine to have it here; if a page navi
| |
| 284 return false; | |
| 285 | |
| 286 const ThreadHeap& heap = state->heap(); | |
| 287 // If any of the participating threads require a stack scan, | |
| 288 // do not compact. | |
| 289 // | |
| 290 // Why? Should the stack contain an iterator pointing into its | |
| 291 // associated backing store, its references wouldn't be | |
| 292 // correctly relocated. | |
| 293 for (ThreadState* state : heap.threads()) { | |
| 294 if (state->stackState() == BlinkGC::HeapPointersOnStack) { | |
| 295 return false; | |
| 296 } | |
| 297 } | |
| 298 | |
| 299 // Compaction enable rules: | |
| 300 // - It's been a while since the last time. | |
| 301 // - "Considerable" amount of heap memory is bound up in freelist | |
| 302 // allocations. For now, use a fixed limit irrespective of heap | |
| 303 // size. | |
| 304 // | |
| 305 // As this isn't compacting all arenas, the cost of doing compaction | |
| 306 // isn't a worry as it will additionally only be done by idle GCs. | |
| 307 // TODO: add some form of compaction overhead estimate to the marking | |
| 308 // time estimate. | |
| 309 | |
| 310 updateHeapResidency(state); | |
| 311 | |
| 312 #if STRESS_TEST_HEAP_COMPACTION | |
| 313 // Exercise the handling of object movement by compacting as | |
| 314 // often as possible. | |
| 315 return true; | |
| 316 #else | |
| 317 return s_forceCompactionGC || | |
| 318 (m_gcCountSinceLastCompaction > kGCCountSinceLastCompactionThreshold && | |
| 319 m_freeListSize > kFreeListSizeThreshold); | |
| 320 #endif | |
| 321 #endif | |
| 322 } | |
| 323 | |
| 324 BlinkGC::GCType HeapCompact::initialize(ThreadState* state) { | |
| 325 DCHECK(RuntimeEnabledFeatures::heapCompactionEnabled()); | |
| 326 LOG_HEAP_COMPACTION("Compacting: free=%zu\n", m_freeListSize); | |
| 327 m_doCompact = true; | |
| 328 m_freedPages = 0; | |
| 329 m_freedSize = 0; | |
| 330 m_threadCount = state->heap().threads().size(); | |
| 331 m_fixups.reset(); | |
| 332 m_gcCountSinceLastCompaction = 0; | |
| 333 s_forceCompactionGC = false; | |
| 334 return BlinkGC::GCWithSweepCompaction; | |
|
haraken
2016/12/09 07:25:55
It doesn't really make sense to return GCWithSweep
sof
2016/12/09 21:44:04
The caller shouldn't have to know such details, I
| |
| 335 } | |
| 336 | |
| 337 void HeapCompact::registerMovingObjectReference(MovableReference* slot) { | |
| 338 if (!m_doCompact) | |
| 339 return; | |
| 340 | |
| 341 fixups().add(slot); | |
| 342 } | |
| 343 | |
| 344 void HeapCompact::registerMovingObjectCallback(MovableReference reference, | |
| 345 MovingObjectCallback callback, | |
| 346 void* callbackData) { | |
| 347 if (!m_doCompact) | |
| 348 return; | |
| 349 | |
| 350 fixups().addFixupCallback(reference, callback, callbackData); | |
| 351 } | |
| 352 | |
| 353 void HeapCompact::updateHeapResidency(ThreadState* threadState) { | |
| 354 // The heap compaction implementation assumes the contiguous range, | |
| 355 // | |
| 356 // [Vector1ArenaIndex, HashTableArenaIndex] | |
| 357 // | |
| 358 // in a few spots. Use static asserts here to not have that assumption | |
| 359 // be silently invalidated by ArenaIndices changes. | |
| 360 static_assert(BlinkGC::Vector1ArenaIndex + 3 == BlinkGC::Vector4ArenaIndex, | |
| 361 "unexpected ArenaIndices ordering"); | |
| 362 static_assert( | |
| 363 BlinkGC::Vector4ArenaIndex + 1 == BlinkGC::InlineVectorArenaIndex, | |
| 364 "unexpected ArenaIndices ordering"); | |
| 365 static_assert( | |
| 366 BlinkGC::InlineVectorArenaIndex + 1 == BlinkGC::HashTableArenaIndex, | |
| 367 "unexpected ArenaIndices ordering"); | |
| 368 | |
| 369 size_t totalArenaSize = 0; | |
| 370 size_t totalFreeListSize = 0; | |
| 371 | |
| 372 m_compactableArenas = 0; | |
| 373 #if DEBUG_HEAP_FREELIST | |
| 374 LOG_HEAP_FREELIST("Arena residencies: {"); | |
| 375 #endif | |
| 376 for (int i = BlinkGC::Vector1ArenaIndex; i <= BlinkGC::HashTableArenaIndex; | |
| 377 ++i) { | |
| 378 NormalPageArena* arena = | |
| 379 static_cast<NormalPageArena*>(threadState->arena(i)); | |
| 380 size_t arenaSize = arena->arenaSize(); | |
| 381 size_t freeListSize = arena->freeListSize(); | |
| 382 totalArenaSize += arenaSize; | |
| 383 totalFreeListSize += freeListSize; | |
| 384 LOG_HEAP_FREELIST("%d: [%zu, %zu], ", i, arenaSize, freeListSize); | |
| 385 // TODO: be more discriminating and consider arena | |
| 386 // load factor, effectiveness of past compactions etc. | |
| 387 if (!arenaSize) | |
|
haraken
2016/12/09 07:25:55
Actually this is not doing a meaningful check... a
sof
2016/12/09 21:44:04
Some of the vector arenas do run into this, when o
| |
| 388 continue; | |
| 389 // Mark the arena as compactable. | |
| 390 m_compactableArenas |= (0x1u << (BlinkGC::Vector1ArenaIndex + i)); | |
| 391 } | |
| 392 LOG_HEAP_FREELIST("}\nTotal = %zu, Free = %zu\n", totalArenaSize, | |
| 393 totalFreeListSize); | |
| 394 | |
| 395 // TODO(sof): consider smoothing the reported sizes. | |
| 396 m_freeListSize = totalFreeListSize; | |
| 397 } | |
| 398 | |
| 399 void HeapCompact::finishedArenaCompaction(NormalPageArena* arena, | |
| 400 size_t freedPages, | |
| 401 size_t freedSize) { | |
| 402 if (!m_doCompact) | |
| 403 return; | |
| 404 | |
| 405 m_freedPages += freedPages; | |
| 406 m_freedSize += freedSize; | |
| 407 } | |
| 408 | |
| 409 void HeapCompact::relocate(Address from, Address to) { | |
| 410 DCHECK(m_fixups); | |
| 411 m_fixups->relocate(from, to); | |
| 412 } | |
| 413 | |
| 414 void HeapCompact::startThreadCompaction(ThreadState*) { | |
|
haraken
2016/12/09 07:25:54
Drop ThreadState*.
sof
2016/12/09 21:44:03
Done.
| |
| 415 if (!m_doCompact) | |
| 416 return; | |
| 417 #if DEBUG_LOG_HEAP_COMPACTION_RUNNING_TIME | |
| 418 if (!atomicTestAndSetToOne(&m_startCompaction)) | |
| 419 m_startCompactionTimeMS = WTF::currentTimeMS(); | |
|
haraken
2016/12/09 07:25:55
Slightly simpler:
MutexLocker locker;
if (!m_star
sof
2016/12/09 21:44:03
Done. I thought I'd sneak in the first user of ato
| |
| 420 #endif | |
| 421 } | |
| 422 | |
| 423 void HeapCompact::finishedThreadCompaction(ThreadState*) { | |
|
haraken
2016/12/09 07:25:55
Drop ThreadState*.
sof
2016/12/09 21:44:04
Done.
| |
| 424 if (!m_doCompact) | |
| 425 return; | |
| 426 | |
| 427 MutexLocker locker(m_mutex); | |
| 428 // Final one clears out. | |
| 429 if (!--m_threadCount) { | |
| 430 #if DEBUG_HEAP_COMPACTION | |
| 431 if (m_fixups) | |
| 432 m_fixups->dumpDebugStats(); | |
| 433 #endif | |
| 434 m_fixups.reset(); | |
| 435 m_doCompact = false; | |
| 436 #if DEBUG_LOG_HEAP_COMPACTION_RUNNING_TIME | |
| 437 double end = WTF::currentTimeMS(); | |
| 438 LOG_HEAP_COMPACTION_INTERNAL( | |
| 439 "Compaction stats: time=%gms, pages freed=%zu, size=%zu\n", | |
| 440 end - m_startCompactionTimeMS, m_freedPages, m_freedSize); | |
| 441 m_startCompaction = 0; | |
| 442 m_startCompactionTimeMS = 0; | |
| 443 #else | |
| 444 LOG_HEAP_COMPACTION("Compaction stats: freed pages=%zu size=%zu\n", | |
| 445 m_freedPages, m_freedSize); | |
| 446 #endif | |
| 447 // All compaction has completed, all participating threads may now | |
| 448 // proceed. | |
| 449 m_finished.broadcast(); | |
| 450 } else { | |
| 451 // Letting a thread return to leave GC and become a "mutator" again | |
| 452 // runs the risk of it accessing heaps of other threads that are | |
| 453 // still being compacted. Consequently, all GC-participating threads | |
| 454 // must complete compaction together. | |
| 455 m_finished.wait(m_mutex); | |
| 456 } | |
| 457 } | |
| 458 | |
| 459 void HeapCompact::addCompactablePage(BasePage* page) { | |
| 460 if (!m_doCompact) | |
| 461 return; | |
| 462 fixups().addCompactablePage(page); | |
| 463 } | |
| 464 | |
| 465 bool HeapCompact::scheduleCompactionGCForTesting(bool value) { | |
| 466 bool current = s_forceCompactionGC; | |
| 467 s_forceCompactionGC = value; | |
| 468 return current; | |
| 469 } | |
| 470 | |
| 471 } // namespace blink | |
| OLD | NEW |