OLD | NEW |
1 // Copyright 2016 The Chromium Authors. All rights reserved. | 1 // Copyright 2016 The Chromium Authors. All rights reserved. |
2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
4 | 4 |
5 #include "platform/heap/HeapCompact.h" | 5 #include "platform/heap/HeapCompact.h" |
6 | 6 |
7 #include "platform/Histogram.h" | 7 #include "platform/Histogram.h" |
8 #include "platform/RuntimeEnabledFeatures.h" | 8 #include "platform/RuntimeEnabledFeatures.h" |
9 #include "platform/heap/Heap.h" | 9 #include "platform/heap/Heap.h" |
10 #include "platform/heap/SparseHeapBitmap.h" | 10 #include "platform/heap/SparseHeapBitmap.h" |
(...skipping 148 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
159 DCHECK(relocatable_pages_.Contains(from_page)); | 159 DCHECK(relocatable_pages_.Contains(from_page)); |
160 #endif | 160 #endif |
161 MovableReference* slot = reinterpret_cast<MovableReference*>(it->value); | 161 MovableReference* slot = reinterpret_cast<MovableReference*>(it->value); |
162 auto interior = interior_fixups_.find(slot); | 162 auto interior = interior_fixups_.find(slot); |
163 if (interior != interior_fixups_.end()) { | 163 if (interior != interior_fixups_.end()) { |
164 MovableReference* slot_location = | 164 MovableReference* slot_location = |
165 reinterpret_cast<MovableReference*>(interior->value); | 165 reinterpret_cast<MovableReference*>(interior->value); |
166 if (!slot_location) { | 166 if (!slot_location) { |
167 interior_fixups_.Set(slot, to); | 167 interior_fixups_.Set(slot, to); |
168 } else { | 168 } else { |
169 LOG_HEAP_COMPACTION("Redirected slot: %p => %p\n", slot, slotLocation); | 169 LOG_HEAP_COMPACTION("Redirected slot: %p => %p\n", slot, slot_location); |
170 slot = slot_location; | 170 slot = slot_location; |
171 } | 171 } |
172 } | 172 } |
173 // If the slot has subsequently been updated, a prefinalizer or | 173 // If the slot has subsequently been updated, a prefinalizer or |
174 // a destructor having mutated and expanded/shrunk the collection, | 174 // a destructor having mutated and expanded/shrunk the collection, |
175 // do not update and relocate the slot -- |from| is no longer valid | 175 // do not update and relocate the slot -- |from| is no longer valid |
176 // and referenced. | 176 // and referenced. |
177 // | 177 // |
178 // The slot's contents may also have been cleared during weak processing; | 178 // The slot's contents may also have been cleared during weak processing; |
179 // no work to be done in that case either. | 179 // no work to be done in that case either. |
(...skipping 33 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
213 if (!size) | 213 if (!size) |
214 size = HeapObjectHeader::FromPayload(to)->PayloadSize(); | 214 size = HeapObjectHeader::FromPayload(to)->PayloadSize(); |
215 RelocateInteriorFixups(from, to, size); | 215 RelocateInteriorFixups(from, to, size); |
216 } | 216 } |
217 | 217 |
218 #if DEBUG_HEAP_COMPACTION | 218 #if DEBUG_HEAP_COMPACTION |
219 void dumpDebugStats() { | 219 void dumpDebugStats() { |
220 LOG_HEAP_COMPACTION( | 220 LOG_HEAP_COMPACTION( |
221 "Fixups: pages=%u objects=%u callbacks=%u interior-size=%zu" | 221 "Fixups: pages=%u objects=%u callbacks=%u interior-size=%zu" |
222 " interiors-f=%u\n", | 222 " interiors-f=%u\n", |
223 m_relocatablePages.size(), m_fixups.size(), m_fixupCallbacks.size(), | 223 relocatable_pages_.size(), fixups_.size(), fixup_callbacks_.size(), |
224 m_interiors ? m_interiors->intervalCount() : 0, | 224 interiors_ ? interiors_->IntervalCount() : 0, interior_fixups_.size()); |
225 m_interiorFixups.size()); | |
226 } | 225 } |
227 #endif | 226 #endif |
228 | 227 |
229 private: | 228 private: |
230 MovableObjectFixups() {} | 229 MovableObjectFixups() {} |
231 | 230 |
232 // Tracking movable and updatable references. For now, we keep a | 231 // Tracking movable and updatable references. For now, we keep a |
233 // map which for each movable object, recording the slot that | 232 // map which for each movable object, recording the slot that |
234 // points to it. Upon moving the object, that slot needs to be | 233 // points to it. Upon moving the object, that slot needs to be |
235 // updated. | 234 // updated. |
(...skipping 52 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
288 bool HeapCompact::ShouldCompact(ThreadState* state, | 287 bool HeapCompact::ShouldCompact(ThreadState* state, |
289 BlinkGC::GCType gc_type, | 288 BlinkGC::GCType gc_type, |
290 BlinkGC::GCReason reason) { | 289 BlinkGC::GCReason reason) { |
291 #if !ENABLE_HEAP_COMPACTION | 290 #if !ENABLE_HEAP_COMPACTION |
292 return false; | 291 return false; |
293 #else | 292 #else |
294 if (!RuntimeEnabledFeatures::heapCompactionEnabled()) | 293 if (!RuntimeEnabledFeatures::heapCompactionEnabled()) |
295 return false; | 294 return false; |
296 | 295 |
297 LOG_HEAP_COMPACTION("shouldCompact(): gc=%s count=%zu free=%zu\n", | 296 LOG_HEAP_COMPACTION("shouldCompact(): gc=%s count=%zu free=%zu\n", |
298 ThreadState::gcReasonString(reason), | 297 ThreadState::GcReasonString(reason), |
299 m_gcCountSinceLastCompaction, m_freeListSize); | 298 gc_count_since_last_compaction_, free_list_size_); |
300 gc_count_since_last_compaction_++; | 299 gc_count_since_last_compaction_++; |
301 // It is only safe to compact during non-conservative GCs. | 300 // It is only safe to compact during non-conservative GCs. |
302 // TODO: for the main thread, limit this further to only idle GCs. | 301 // TODO: for the main thread, limit this further to only idle GCs. |
303 if (reason != BlinkGC::kIdleGC && reason != BlinkGC::kPreciseGC && | 302 if (reason != BlinkGC::kIdleGC && reason != BlinkGC::kPreciseGC && |
304 reason != BlinkGC::kForcedGC) | 303 reason != BlinkGC::kForcedGC) |
305 return false; | 304 return false; |
306 | 305 |
307 // If the GCing thread requires a stack scan, do not compact. | 306 // If the GCing thread requires a stack scan, do not compact. |
308 // Why? Should the stack contain an iterator pointing into its | 307 // Why? Should the stack contain an iterator pointing into its |
309 // associated backing store, its references wouldn't be | 308 // associated backing store, its references wouldn't be |
(...skipping 21 matching lines...) Expand all Loading... |
331 #else | 330 #else |
332 return force_compaction_gc_ || (gc_count_since_last_compaction_ > | 331 return force_compaction_gc_ || (gc_count_since_last_compaction_ > |
333 kGCCountSinceLastCompactionThreshold && | 332 kGCCountSinceLastCompactionThreshold && |
334 free_list_size_ > kFreeListSizeThreshold); | 333 free_list_size_ > kFreeListSizeThreshold); |
335 #endif | 334 #endif |
336 #endif | 335 #endif |
337 } | 336 } |
338 | 337 |
339 void HeapCompact::Initialize(ThreadState* state) { | 338 void HeapCompact::Initialize(ThreadState* state) { |
340 DCHECK(RuntimeEnabledFeatures::heapCompactionEnabled()); | 339 DCHECK(RuntimeEnabledFeatures::heapCompactionEnabled()); |
341 LOG_HEAP_COMPACTION("Compacting: free=%zu\n", m_freeListSize); | 340 LOG_HEAP_COMPACTION("Compacting: free=%zu\n", free_list_size_); |
342 do_compact_ = true; | 341 do_compact_ = true; |
343 freed_pages_ = 0; | 342 freed_pages_ = 0; |
344 freed_size_ = 0; | 343 freed_size_ = 0; |
345 fixups_.reset(); | 344 fixups_.reset(); |
346 gc_count_since_last_compaction_ = 0; | 345 gc_count_since_last_compaction_ = 0; |
347 force_compaction_gc_ = false; | 346 force_compaction_gc_ = false; |
348 } | 347 } |
349 | 348 |
350 void HeapCompact::RegisterMovingObjectReference(MovableReference* slot) { | 349 void HeapCompact::RegisterMovingObjectReference(MovableReference* slot) { |
351 if (!do_compact_) | 350 if (!do_compact_) |
(...skipping 20 matching lines...) Expand all Loading... |
372 LOG_HEAP_FREELIST("Arena residencies: {"); | 371 LOG_HEAP_FREELIST("Arena residencies: {"); |
373 #endif | 372 #endif |
374 for (int i = BlinkGC::kVector1ArenaIndex; i <= BlinkGC::kHashTableArenaIndex; | 373 for (int i = BlinkGC::kVector1ArenaIndex; i <= BlinkGC::kHashTableArenaIndex; |
375 ++i) { | 374 ++i) { |
376 NormalPageArena* arena = | 375 NormalPageArena* arena = |
377 static_cast<NormalPageArena*>(thread_state->Arena(i)); | 376 static_cast<NormalPageArena*>(thread_state->Arena(i)); |
378 size_t arena_size = arena->ArenaSize(); | 377 size_t arena_size = arena->ArenaSize(); |
379 size_t free_list_size = arena->FreeListSize(); | 378 size_t free_list_size = arena->FreeListSize(); |
380 total_arena_size += arena_size; | 379 total_arena_size += arena_size; |
381 total_free_list_size += free_list_size; | 380 total_free_list_size += free_list_size; |
382 LOG_HEAP_FREELIST("%d: [%zu, %zu], ", i, arenaSize, freeListSize); | 381 LOG_HEAP_FREELIST("%d: [%zu, %zu], ", i, arena_size, free_list_size); |
383 // TODO: be more discriminating and consider arena | 382 // TODO: be more discriminating and consider arena |
384 // load factor, effectiveness of past compactions etc. | 383 // load factor, effectiveness of past compactions etc. |
385 if (!arena_size) | 384 if (!arena_size) |
386 continue; | 385 continue; |
387 // Mark the arena as compactable. | 386 // Mark the arena as compactable. |
388 compactable_arenas_ |= (0x1u << (BlinkGC::kVector1ArenaIndex + i)); | 387 compactable_arenas_ |= (0x1u << (BlinkGC::kVector1ArenaIndex + i)); |
389 } | 388 } |
390 LOG_HEAP_FREELIST("}\nTotal = %zu, Free = %zu\n", totalArenaSize, | 389 LOG_HEAP_FREELIST("}\nTotal = %zu, Free = %zu\n", total_arena_size, |
391 totalFreeListSize); | 390 total_free_list_size); |
392 | 391 |
393 // TODO(sof): consider smoothing the reported sizes. | 392 // TODO(sof): consider smoothing the reported sizes. |
394 free_list_size_ = total_free_list_size; | 393 free_list_size_ = total_free_list_size; |
395 } | 394 } |
396 | 395 |
397 void HeapCompact::FinishedArenaCompaction(NormalPageArena* arena, | 396 void HeapCompact::FinishedArenaCompaction(NormalPageArena* arena, |
398 size_t freed_pages, | 397 size_t freed_pages, |
399 size_t freed_size) { | 398 size_t freed_size) { |
400 if (!do_compact_) | 399 if (!do_compact_) |
401 return; | 400 return; |
(...skipping 13 matching lines...) Expand all Loading... |
415 | 414 |
416 if (!start_compaction_time_ms_) | 415 if (!start_compaction_time_ms_) |
417 start_compaction_time_ms_ = WTF::CurrentTimeMS(); | 416 start_compaction_time_ms_ = WTF::CurrentTimeMS(); |
418 } | 417 } |
419 | 418 |
420 void HeapCompact::FinishThreadCompaction() { | 419 void HeapCompact::FinishThreadCompaction() { |
421 if (!do_compact_) | 420 if (!do_compact_) |
422 return; | 421 return; |
423 | 422 |
424 #if DEBUG_HEAP_COMPACTION | 423 #if DEBUG_HEAP_COMPACTION |
425 if (m_fixups) | 424 if (fixups_) |
426 m_fixups->dumpDebugStats(); | 425 fixups_->dumpDebugStats(); |
427 #endif | 426 #endif |
428 fixups_.reset(); | 427 fixups_.reset(); |
429 do_compact_ = false; | 428 do_compact_ = false; |
430 | 429 |
431 double time_for_heap_compaction = | 430 double time_for_heap_compaction = |
432 WTF::CurrentTimeMS() - start_compaction_time_ms_; | 431 WTF::CurrentTimeMS() - start_compaction_time_ms_; |
433 DEFINE_THREAD_SAFE_STATIC_LOCAL( | 432 DEFINE_THREAD_SAFE_STATIC_LOCAL( |
434 CustomCountHistogram, time_for_heap_compaction_histogram, | 433 CustomCountHistogram, time_for_heap_compaction_histogram, |
435 new CustomCountHistogram("BlinkGC.TimeForHeapCompaction", 1, 10 * 1000, | 434 new CustomCountHistogram("BlinkGC.TimeForHeapCompaction", 1, 10 * 1000, |
436 50)); | 435 50)); |
437 time_for_heap_compaction_histogram.Count(time_for_heap_compaction); | 436 time_for_heap_compaction_histogram.Count(time_for_heap_compaction); |
438 start_compaction_time_ms_ = 0; | 437 start_compaction_time_ms_ = 0; |
439 | 438 |
440 DEFINE_THREAD_SAFE_STATIC_LOCAL( | 439 DEFINE_THREAD_SAFE_STATIC_LOCAL( |
441 CustomCountHistogram, object_size_freed_by_heap_compaction, | 440 CustomCountHistogram, object_size_freed_by_heap_compaction, |
442 new CustomCountHistogram("BlinkGC.ObjectSizeFreedByHeapCompaction", 1, | 441 new CustomCountHistogram("BlinkGC.ObjectSizeFreedByHeapCompaction", 1, |
443 4 * 1024 * 1024, 50)); | 442 4 * 1024 * 1024, 50)); |
444 object_size_freed_by_heap_compaction.Count(freed_size_ / 1024); | 443 object_size_freed_by_heap_compaction.Count(freed_size_ / 1024); |
445 | 444 |
446 #if DEBUG_LOG_HEAP_COMPACTION_RUNNING_TIME | 445 #if DEBUG_LOG_HEAP_COMPACTION_RUNNING_TIME |
447 LOG_HEAP_COMPACTION_INTERNAL( | 446 LOG_HEAP_COMPACTION_INTERNAL( |
448 "Compaction stats: time=%gms, pages freed=%zu, size=%zu\n", | 447 "Compaction stats: time=%gms, pages freed=%zu, size=%zu\n", |
449 timeForHeapCompaction, m_freedPages, m_freedSize); | 448 time_for_heap_compaction, freed_pages_, freed_size_); |
450 #else | 449 #else |
451 LOG_HEAP_COMPACTION("Compaction stats: freed pages=%zu size=%zu\n", | 450 LOG_HEAP_COMPACTION("Compaction stats: freed pages=%zu size=%zu\n", |
452 m_freedPages, m_freedSize); | 451 freed_pages_, freed_size_); |
453 #endif | 452 #endif |
454 } | 453 } |
455 | 454 |
456 void HeapCompact::AddCompactingPage(BasePage* page) { | 455 void HeapCompact::AddCompactingPage(BasePage* page) { |
457 DCHECK(do_compact_); | 456 DCHECK(do_compact_); |
458 DCHECK(IsCompactingArena(page->Arena()->ArenaIndex())); | 457 DCHECK(IsCompactingArena(page->Arena()->ArenaIndex())); |
459 Fixups().AddCompactingPage(page); | 458 Fixups().AddCompactingPage(page); |
460 } | 459 } |
461 | 460 |
462 bool HeapCompact::ScheduleCompactionGCForTesting(bool value) { | 461 bool HeapCompact::ScheduleCompactionGCForTesting(bool value) { |
463 bool current = force_compaction_gc_; | 462 bool current = force_compaction_gc_; |
464 force_compaction_gc_ = value; | 463 force_compaction_gc_ = value; |
465 return current; | 464 return current; |
466 } | 465 } |
467 | 466 |
468 } // namespace blink | 467 } // namespace blink |
OLD | NEW |