OLD | NEW |
---|---|
(Empty) | |
1 // Copyright 2016 Opera Software AS. All rights reserved. | |
2 // Use of this source code is governed by a BSD-style license that can be | |
3 // found in the LICENSE file. | |
4 | |
5 #include "platform/heap/HeapCompact.h" | |
6 | |
7 #include "platform/RuntimeEnabledFeatures.h" | |
8 #include "platform/heap/Heap.h" | |
9 #include "platform/heap/SparseHeapBitmap.h" | |
10 #include "wtf/CurrentTime.h" | |
11 #include "wtf/HashMap.h" | |
12 #include "wtf/HashSet.h" | |
13 #include "wtf/Vector.h" | |
14 | |
15 namespace blink { | |
16 | |
17 bool HeapCompact::s_forceCompactionGC = false; | |
18 | |
19 // The real worker behind heap compaction, recording references to movable | |
20 // objects ("slots".) When the objects end up being compacted and moved, | |
21 // relocate() will adjust the slots to point to the new location of the | |
22 // object along with handling fixups for interior pointers. | |
23 // | |
24 // The "fixups" object is created and maintained for the lifetime of one | |
25 // heap compaction-enhanced GC. | |
26 class HeapCompact::MovableObjectFixups final { | |
27 public: | |
28 static std::unique_ptr<MovableObjectFixups> create() { | |
29 return wrapUnique(new MovableObjectFixups); | |
30 } | |
31 | |
32 ~MovableObjectFixups() {} | |
33 | |
34 // For the compactable arenas, record all pages belonging to them. | |
35 // This is needed to handle 'interior slots', pointers that themselves | |
36 // can move (independently from the reference the slot points to.) | |
37 void addCompactablePage(BasePage* page) { | |
38 DCHECK(!page->isLargeObjectPage()); | |
39 if (!HeapCompact::isCompactableArena(page->arena()->arenaIndex())) | |
40 return; | |
41 | |
42 m_relocatablePages.add(page); | |
43 } | |
44 | |
45 void add(MovableReference* slot) { | |
46 MovableReference reference = *slot; | |
47 BasePage* refPage = pageFromObject(reference); | |
48 // Nothing to compact on a large object's page. | |
49 if (refPage->isLargeObjectPage()) | |
50 return; | |
51 | |
52 #if DCHECK_IS_ON() | |
53 auto it = m_fixups.find(reference); | |
54 DCHECK(it == m_fixups.end() || it->value == slot); | |
55 #endif | |
56 m_fixups.add(reference, slot); | |
57 | |
58 Address slotAddress = reinterpret_cast<Address>(slot); | |
59 BasePage* slotPage = reinterpret_cast<BasePage*>( | |
60 blinkPageAddress(slotAddress) + blinkGuardPageSize); | |
61 if (!m_relocatablePages.contains(slotPage)) | |
haraken
2016/12/06 13:30:39
Add LIKELY (or a comment) for documentation purpos
sof
2016/12/06 21:39:34
Yes, LIKELY() is warranted. Added.
| |
62 return; | |
63 #if ENABLE(ASSERT) | |
haraken
2016/12/06 13:30:38
Remove?
sof
2016/12/06 21:39:34
The ENABLE(ASSERT) is needed to unbreak CrOS bots
haraken
2016/12/07 08:55:11
Then can we use #if DCHECK_IS_ON()? I guess ENABLE
sof
2016/12/07 10:45:08
That would be redundant; the situation is a relati
| |
64 DCHECK(slotPage->contains(slotAddress)); | |
65 #endif | |
66 // Slot resides on a compactable arena's page. | |
67 // => It is an 'interior slot' (interior to a movable backing store.) | |
68 // Record it as an interior slot, which entails: | |
69 // | |
70 // - Storing it in the interior map, which maps the slot to | |
71 // its (eventual) location. Initially nullptr. | |
72 // - Mark it as being interior pointer within the page's | |
73 // "interior" bitmap. This bitmap is used when moving a backing | |
74 // store, quickly/ier checking if interior slots will have to | |
75 // be additionally redirected. | |
76 addInteriorFixup(slotAddress, slot); | |
77 } | |
78 | |
79 void addFixupCallback(MovableReference reference, | |
80 MovingObjectCallback callback, | |
81 void* callbackData) { | |
82 DCHECK(!m_fixupCallbacks.contains(reference)); | |
83 m_fixupCallbacks.add(reference, std::pair<void*, MovingObjectCallback>( | |
84 callbackData, callback)); | |
85 } | |
86 | |
87 void relocateInteriorFixups(Address from, Address to, size_t size) { | |
88 SparseHeapBitmap* range = m_interiors->hasRange(from, size); | |
89 if (LIKELY(!range)) | |
90 return; | |
91 | |
92 // Scan through the payload, looking for interior pointer slots | |
93 // to adjust. If the backing store of such an interior slot hasn't | |
94 // been moved already, update the slot -> real location mapping. | |
95 // When the backing store is eventually moved, it'll use that location. | |
96 // | |
97 for (size_t offset = 0; offset < size; offset += sizeof(void*)) { | |
98 if (!range->isSet(from + offset)) | |
99 continue; | |
100 MovableReference* slot = | |
101 reinterpret_cast<MovableReference*>(from + offset); | |
102 auto it = m_interiorFixups.find(slot); | |
103 if (it == m_interiorFixups.end()) | |
104 continue; | |
105 | |
106 // TODO: with the right sparse bitmap representation, it could be possible | |
107 // to quickly determine if we've now stepped past the last address | |
108 // that needed fixup in [address, address + size). Breaking out of this | |
109 // loop might be worth doing for hash table backing stores with a very | |
110 // low load factor. But interior fixups are rare. | |
111 | |
112 // If |slot|'s mapping is set, then the slot has been adjusted already. | |
113 if (it->value) | |
haraken
2016/12/06 13:30:39
Maybe this should not happen if we apply my commen
| |
114 continue; | |
115 Address fixup = to + offset; | |
116 LOG_HEAP_COMPACTION("Range interior fixup: %p %p %p\n", from + offset, | |
117 it->value, fixup); | |
118 // Fill in the relocated location of the original slot at |slot|. | |
119 // when the backing store corresponding to |slot| is eventually | |
120 // moved/compacted, it'll update |to + offset| with a pointer to the | |
121 // moved backing store. | |
122 m_interiorFixups.set(slot, fixup); | |
haraken
2016/12/06 13:30:38
If we apply my comment at line 131, we should call
| |
123 } | |
124 } | |
125 | |
126 void relocate(Address from, Address to) { | |
haraken
2016/12/06 13:30:39
Can we add an assert to check that |from| is in m_
sof
2016/12/06 21:39:34
Done.
| |
127 auto it = m_fixups.find(from); | |
128 DCHECK(it != m_fixups.end()); | |
129 MovableReference* slot = reinterpret_cast<MovableReference*>(it->value); | |
130 auto interior = m_interiorFixups.find(slot); | |
131 if (interior != m_interiorFixups.end()) { | |
haraken
2016/12/06 13:30:38
Hmm, I still don't quite understand this. Can we r
sof
2016/12/06 21:39:34
We can't enforce an object graph heap ordering. i.
haraken
2016/12/07 08:55:11
If a backing store A is contained in a backing sto
sof
2016/12/07 10:45:08
I don't think your assumptions hold, we're compact
haraken
2016/12/07 12:44:33
Ah, thanks, now I understand!
| |
132 MovableReference* slotLocation = | |
133 reinterpret_cast<MovableReference*>(interior->value); | |
134 if (!slotLocation) { | |
135 m_interiorFixups.set(slot, to); | |
136 } else { | |
137 LOG_HEAP_COMPACTION("Redirected slot: %p => %p\n", slot, slotLocation); | |
138 slot = slotLocation; | |
139 } | |
140 } | |
141 // If the slot has subsequently been updated, a prefinalizer or | |
142 // a destructor having mutated and expanded/shrunk the collection, | |
143 // do not update and relocate the slot -- |from| is no longer valid | |
144 // and referenced. | |
145 // | |
146 // The slot's contents may also have been cleared during weak processing; | |
147 // no work to be done in that case either. | |
148 if (UNLIKELY(*slot != from)) { | |
haraken
2016/12/06 13:30:38
Just in case, can we add the following assert?
sof
2016/12/06 21:39:34
I think so -- a non-null *slot must be pointing to
| |
149 LOG_HEAP_COMPACTION( | |
150 "No relocation: slot = %p, *slot = %p, from = %p, to = %p\n", slot, | |
151 *slot, from, to); | |
152 return; | |
153 } | |
154 *slot = to; | |
155 | |
156 size_t size = 0; | |
157 auto callback = m_fixupCallbacks.find(from); | |
158 if (UNLIKELY(callback != m_fixupCallbacks.end())) { | |
159 size = HeapObjectHeader::fromPayload(to)->payloadSize(); | |
160 callback->value.second(callback->value.first, from, to, size); | |
161 } | |
162 | |
163 if (LIKELY(!m_interiors)) | |
haraken
2016/12/06 13:30:38
I'm just curious but is this really LIKELY? I gues
sof
2016/12/06 21:39:34
You're right, the (un)LIKELY() is used one level t
| |
164 return; | |
165 | |
166 if (!size) | |
167 size = HeapObjectHeader::fromPayload(to)->payloadSize(); | |
168 relocateInteriorFixups(from, to, size); | |
169 } | |
170 | |
171 void addInteriorFixup(Address interior, MovableReference* slot) { | |
haraken
2016/12/06 13:30:38
Move this function up to just below add().
haraken
2016/12/06 13:30:38
Maybe can we drop the |interior| parameter? |inter
sof
2016/12/06 21:39:34
Ah yes, quite redundant to pass that separately.
| |
172 auto it = m_interiorFixups.find(slot); | |
173 // Ephemeron fixpoint iterations may cause repeated | |
174 // registrations. | |
175 DCHECK(it == m_interiorFixups.end() || !it->value); | |
176 if (UNLIKELY(it != m_interiorFixups.end() && !it->value)) | |
177 return; | |
178 m_interiorFixups.add(slot, nullptr); | |
179 LOG_HEAP_COMPACTION("Interior: %p\n", interior); | |
180 if (!m_interiors) { | |
181 m_interiors = SparseHeapBitmap::create(interior); | |
182 return; | |
183 } | |
184 m_interiors->add(interior); | |
185 } | |
186 | |
187 void addRelocation(MovableReference* slot) { | |
188 MovableReference reference = *slot; | |
189 if (!m_fixups.contains(reference)) { | |
190 // Record the interior pointer. | |
191 addInteriorFixup(reinterpret_cast<Address>(reference), slot); | |
192 } | |
193 | |
194 BasePage* heapPage = pageFromObject(reference); | |
195 DCHECK(heapPage); | |
196 DCHECK(!heapPage->isLargeObjectPage()); | |
197 // For now, the heap objects we're adding relocations for are assumed | |
198 // to be residing in a compactable heap. There's no reason why it must be | |
199 // so, just a sanity checking assert while phasing in this extra set of | |
200 // relocations. | |
201 DCHECK(m_relocatablePages.contains(heapPage)); | |
202 | |
203 NormalPage* normalPage = static_cast<NormalPage*>(heapPage); | |
204 auto perHeap = m_externalRelocations.find(normalPage->arenaForNormalPage()); | |
205 if (perHeap == m_externalRelocations.end()) { | |
206 Vector<MovableReference*> relocations; | |
207 relocations.append(slot); | |
208 ExternalRelocations table; | |
209 table.add(*slot, relocations); | |
210 m_externalRelocations.add(normalPage->arenaForNormalPage(), table); | |
211 return; | |
212 } | |
213 auto entry = perHeap->value.find(*slot); | |
214 if (entry == perHeap->value.end()) { | |
215 Vector<MovableReference*> relocations; | |
216 relocations.append(slot); | |
217 perHeap->value.add(*slot, relocations); | |
218 return; | |
219 } | |
220 entry->value.append(slot); | |
221 } | |
222 | |
223 void fixupExternalRelocations(NormalPageArena* arena) { | |
224 auto perHeap = m_externalRelocations.find(arena); | |
225 if (LIKELY(perHeap == m_externalRelocations.end())) | |
226 return; | |
227 for (const auto& entry : perHeap->value) { | |
228 MovableReference heapObject = entry.key; | |
229 // |heapObject| will either be in |m_fixups| or have been recorded as | |
230 // an internal fixup. | |
231 auto heapEntry = m_fixups.find(heapObject); | |
232 if (heapEntry != m_fixups.end()) { | |
233 for (auto slot : entry.value) | |
234 *slot = reinterpret_cast<MovableReference>(heapEntry->value); | |
235 continue; | |
236 } | |
237 // The movement of the containing object will have moved the | |
238 // interior slot. | |
239 auto it = m_interiorFixups.find( | |
240 reinterpret_cast<MovableReference*>(heapObject)); | |
241 DCHECK(it != m_interiorFixups.end()); | |
242 for (auto slot : entry.value) | |
243 *slot = reinterpret_cast<MovableReference>(it->value); | |
244 } | |
245 } | |
246 | |
247 #if DEBUG_HEAP_COMPACTION | |
248 void dumpDebugStats() { | |
249 LOG_HEAP_COMPACTION( | |
250 "Fixups: pages=%u objects=%u callbacks=%u interior-size=%zu" | |
251 " interiors-f=%u externals=%u\n", | |
252 m_relocatablePages.size(), m_fixups.size(), m_fixupCallbacks.size(), | |
253 m_interiors ? m_interiors->intervalCount() : 0, m_interiorFixups.size(), | |
254 m_externalRelocations.size()); | |
255 } | |
256 #endif | |
257 | |
258 private: | |
259 MovableObjectFixups() {} | |
260 | |
261 // Tracking movable and updatable references. For now, we keep a | |
262 // map which for each movable object, recording the slot that | |
263 // points to it. Upon moving the object, that slot needs to be | |
264 // updated. | |
265 // | |
266 // (TODO: consider in-place updating schemes.) | |
267 HashMap<MovableReference, MovableReference*> m_fixups; | |
268 | |
269 // Map from movable reference to callbacks that need to be invoked | |
270 // when the object moves. | |
271 HashMap<MovableReference, std::pair<void*, MovingObjectCallback>> | |
272 m_fixupCallbacks; | |
273 | |
274 // Slot => relocated slot/final location. | |
275 HashMap<MovableReference*, Address> m_interiorFixups; | |
276 | |
277 // All pages that are being compacted. | |
278 HashSet<BasePage*> m_relocatablePages; | |
279 | |
280 std::unique_ptr<SparseHeapBitmap> m_interiors; | |
281 | |
282 // Each heap/arena may have additional slots pointing into it, | |
283 // which must be fixed up & relocated after compaction has happened. | |
284 // | |
285 // This is currently not needed for Blink, but functionality is kept | |
286 // around to be able to support this should the need arise.. | |
287 using ExternalRelocations = | |
288 HashMap<MovableReference, Vector<MovableReference*>>; | |
289 | |
290 HashMap<NormalPageArena*, ExternalRelocations> m_externalRelocations; | |
291 }; | |
292 | |
293 HeapCompact::HeapCompact() | |
294 : m_doCompact(false), | |
295 m_gcCountSinceLastCompaction(0), | |
296 m_threadCount(0), | |
297 m_freeListSize(0), | |
298 m_compactableArenas(0u), | |
299 m_freedPages(0), | |
300 m_freedSize(0) | |
301 #if DEBUG_LOG_HEAP_COMPACTION_RUNNING_TIME | |
302 , | |
303 m_startCompaction(0), | |
304 m_startCompactionTimeMS(0) | |
305 #endif | |
306 { | |
307 } | |
308 | |
309 HeapCompact::~HeapCompact() {} | |
310 | |
311 HeapCompact::MovableObjectFixups& HeapCompact::fixups() { | |
312 if (!m_fixups) | |
313 m_fixups = MovableObjectFixups::create(); | |
314 return *m_fixups; | |
315 } | |
316 | |
317 // checkIfCompacting() is called when a GC is initiated | |
318 // (by ThreadState::collectGarbage()), checking if there's sufficient | |
319 // reason to do a compaction pass on completion of the GC (but before | |
320 // lazy sweeping), and that this can be safely done (i.e., it is not a | |
321 // conservative GC.) | |
322 // | |
323 // TODO(sof): reconsider what is an effective policy for when compaction | |
324 // is required. Both in terms of frequency and freelist residency. | |
325 bool HeapCompact::shouldCompact(ThreadState* state, | |
326 BlinkGC::GCType gcType, | |
327 BlinkGC::GCReason reason) { | |
328 #if !ENABLE_HEAP_COMPACTION | |
329 return false; | |
330 #else | |
331 if (!RuntimeEnabledFeatures::heapCompactionEnabled()) | |
332 return false; | |
333 | |
334 LOG_HEAP_COMPACTION("shouldCompact(): gc=%s count=%zu free=%zu\n", | |
335 ThreadState::gcReasonString(reason), | |
336 m_gcCountSinceLastCompaction, m_freeListSize); | |
337 m_gcCountSinceLastCompaction++; | |
338 // It is only safe to compact during non-conservative GCs. | |
339 // TODO: for the main thread, limit this further to only idle GCs. | |
340 if (reason != BlinkGC::IdleGC && reason != BlinkGC::PreciseGC && | |
341 reason != BlinkGC::ForcedGC) | |
342 return false; | |
343 | |
344 const ThreadHeap& heap = state->heap(); | |
345 // If any of the participating threads require a stack scan, | |
346 // do not compact. | |
347 // | |
348 // Why? Should the stack contain an iterator pointing into its | |
349 // associated backing store, its references wouldn't be | |
350 // correctly relocated. | |
351 for (ThreadState* state : heap.threads()) { | |
352 if (state->stackState() == BlinkGC::HeapPointersOnStack) { | |
353 return false; | |
354 } | |
355 } | |
356 | |
357 // Compaction enable rules: | |
358 // - It's been a while since the last time. | |
359 // - "Considerable" amount of heap memory is bound up in freelist | |
360 // allocations. For now, use a fixed limit irrespective of heap | |
361 // size. | |
362 // | |
363 // As this isn't compacting all arenas, the cost of doing compaction | |
364 // isn't a worry as it will additionally only be done by idle GCs. | |
365 // TODO: add some form of compaction overhead estimate to the marking | |
366 // time estimate. | |
367 | |
368 updateHeapResidency(state); | |
haraken
2016/12/06 13:30:39
I still think that updateHeapResidency should be a
sof
2016/12/06 21:39:34
I'm keeping it here; ThreadState shouldn't keep co
| |
369 | |
370 #if STRESS_TEST_HEAP_COMPACTION | |
371 // Exercise the handling of object movement by compacting as | |
372 // often as possible. | |
373 return true; | |
374 #else | |
375 return s_forceCompactionGC || | |
376 (m_gcCountSinceLastCompaction > kGCCountSinceLastCompactionThreshold && | |
377 m_freeListSize > kFreeListSizeThreshold); | |
378 #endif | |
379 #endif | |
380 } | |
381 | |
382 // checkIfCompacting() is called when a GC is initiated | |
383 // (by ThreadState::collectGarbage()), checking if there's sufficient | |
384 // reason to do a compaction pass on completion of the GC (but before | |
385 // lazy sweeping), and that this can be safely done (i.e., it is not a | |
386 // conservative GC.) | |
387 // | |
388 // TODO(sof): reconsider what is an effective policy for when compaction | |
389 // is required. Both in terms of frequency and freelist residency. | |
haraken
2016/12/06 13:30:38
Remove the comment.
sof
2016/12/06 21:39:34
Done.
| |
390 BlinkGC::GCType HeapCompact::initialize(ThreadState* state) { | |
391 DCHECK(RuntimeEnabledFeatures::heapCompactionEnabled()); | |
392 LOG_HEAP_COMPACTION("Compacting: free=%zu\n", m_freeListSize); | |
393 m_doCompact = true; | |
394 m_freedPages = 0; | |
395 m_freedSize = 0; | |
396 m_threadCount = state->heap().threads().size(); | |
397 m_fixups.reset(); | |
398 m_gcCountSinceLastCompaction = 0; | |
399 s_forceCompactionGC = false; | |
400 return BlinkGC::GCWithSweepCompaction; | |
401 } | |
402 | |
403 void HeapCompact::registerMovingObjectReference(MovableReference* slot) { | |
404 if (!m_doCompact) | |
405 return; | |
406 | |
407 fixups().add(slot); | |
408 } | |
409 | |
410 void HeapCompact::registerMovingObjectCallback(MovableReference reference, | |
411 MovingObjectCallback callback, | |
412 void* callbackData) { | |
413 if (!m_doCompact) | |
414 return; | |
415 | |
416 fixups().addFixupCallback(reference, callback, callbackData); | |
417 } | |
418 | |
419 void HeapCompact::registerRelocation(MovableReference* slot) { | |
420 if (!m_doCompact) | |
421 return; | |
422 | |
423 fixups().addRelocation(slot); | |
424 } | |
425 | |
426 void HeapCompact::updateHeapResidency(ThreadState* threadState) { | |
427 // The heap compaction implementation assumes the contiguous range, | |
428 // | |
429 // [Vector1ArenaIndex, HashTableArenaIndex] | |
430 // | |
431 // in a few spots. Use static asserts here to not have that assumption | |
432 // be silently invalidated by ArenaIndices changes. | |
433 static_assert(BlinkGC::Vector1ArenaIndex + 3 == BlinkGC::Vector4ArenaIndex, | |
434 "unexpected ArenaIndices ordering"); | |
435 static_assert( | |
436 BlinkGC::Vector4ArenaIndex + 1 == BlinkGC::InlineVectorArenaIndex, | |
437 "unexpected ArenaIndices ordering"); | |
438 static_assert( | |
439 BlinkGC::InlineVectorArenaIndex + 1 == BlinkGC::HashTableArenaIndex, | |
440 "unexpected ArenaIndices ordering"); | |
441 | |
442 size_t totalArenaSize = 0; | |
443 size_t totalFreeListSize = 0; | |
444 | |
445 m_compactableArenas = 0; | |
446 #if DEBUG_HEAP_FREELIST | |
447 LOG_HEAP_FREELIST("Arena residencies: {"); | |
448 #endif | |
449 for (int i = BlinkGC::Vector1ArenaIndex; i <= BlinkGC::HashTableArenaIndex; | |
450 ++i) { | |
451 NormalPageArena* arena = | |
452 static_cast<NormalPageArena*>(threadState->arena(i)); | |
453 size_t arenaSize = arena->arenaSize(); | |
454 size_t freeListSize = arena->freeListSize(); | |
455 totalArenaSize += arenaSize; | |
456 totalFreeListSize += freeListSize; | |
457 LOG_HEAP_FREELIST("%d: [%zu, %zu], ", i, arenaSize, freeListSize); | |
458 // TODO: be more discriminating and consider arena | |
459 // load factor, effectiveness of past compactions etc. | |
460 if (!arenaSize) | |
461 continue; | |
462 // Mark the arena as compactable. | |
463 m_compactableArenas |= (0x1u << (BlinkGC::Vector1ArenaIndex + i)); | |
464 } | |
465 LOG_HEAP_FREELIST("}\nTotal = %zu, Free = %zu\n", totalArenaSize, | |
466 totalFreeListSize); | |
467 | |
468 // TODO(sof): consider smoothing the reported sizes. | |
469 m_freeListSize = totalFreeListSize; | |
470 } | |
471 | |
472 void HeapCompact::finishedArenaCompaction(NormalPageArena* arena, | |
473 size_t freedPages, | |
474 size_t freedSize) { | |
475 if (!m_doCompact) | |
476 return; | |
477 | |
478 fixups().fixupExternalRelocations(arena); | |
479 m_freedPages += freedPages; | |
480 m_freedSize += freedSize; | |
481 } | |
482 | |
483 void HeapCompact::relocate(Address from, Address to) { | |
484 DCHECK(m_fixups); | |
485 m_fixups->relocate(from, to); | |
486 } | |
487 | |
488 void HeapCompact::startThreadCompaction(ThreadState*) { | |
489 if (!m_doCompact) | |
490 return; | |
491 #if DEBUG_LOG_HEAP_COMPACTION_RUNNING_TIME | |
492 if (!atomicTestAndSetToOne(&m_startCompaction)) | |
493 m_startCompactionTimeMS = WTF::currentTimeMS(); | |
494 #endif | |
495 } | |
496 | |
497 void HeapCompact::finishedThreadCompaction(ThreadState*) { | |
498 if (!m_doCompact) | |
499 return; | |
500 | |
501 MutexLocker locker(m_mutex); | |
502 // Final one clears out. | |
503 if (!--m_threadCount) { | |
504 #if DEBUG_HEAP_COMPACTION | |
505 if (m_fixups) | |
506 m_fixups->dumpDebugStats(); | |
507 #endif | |
508 m_fixups.reset(); | |
509 m_doCompact = false; | |
510 #if DEBUG_LOG_HEAP_COMPACTION_RUNNING_TIME | |
511 double end = WTF::currentTimeMS(); | |
512 LOG_HEAP_COMPACTION_INTERNAL( | |
513 "Compaction stats: time=%gms, pages freed=%zu, size=%zu\n", | |
514 end - m_startCompactionTimeMS, m_freedPages, m_freedSize); | |
515 m_startCompaction = 0; | |
516 m_startCompactionTimeMS = 0; | |
517 #else | |
518 LOG_HEAP_COMPACTION("Compaction stats: freed pages=%zu size=%zu\n", | |
519 m_freedPages, m_freedSize); | |
520 #endif | |
521 // All compaction has completed, all participating threads may now | |
522 // proceed. | |
523 m_finished.broadcast(); | |
524 } else { | |
525 // Letting a thread return to leave GC and become a "mutator" again | |
526 // runs the risk of it accessing heaps of other threads that are | |
527 // still being compacted. Consequently, all GC-participating threads | |
528 // must complete compaction together. | |
529 m_finished.wait(m_mutex); | |
530 } | |
531 } | |
532 | |
533 void HeapCompact::addCompactablePage(BasePage* page) { | |
534 if (!m_doCompact) | |
535 return; | |
536 fixups().addCompactablePage(page); | |
537 } | |
538 | |
539 bool HeapCompact::scheduleCompactionGCForTesting(bool value) { | |
540 bool current = s_forceCompactionGC; | |
541 s_forceCompactionGC = value; | |
542 return current; | |
543 } | |
544 | |
545 } // namespace blink | |
OLD | NEW |