Chromium Code Reviews| OLD | NEW |
|---|---|
| 1 /* | 1 /* |
| 2 * Copyright (C) 2013 Google Inc. All rights reserved. | 2 * Copyright (C) 2013 Google Inc. All rights reserved. |
| 3 * | 3 * |
| 4 * Redistribution and use in source and binary forms, with or without | 4 * Redistribution and use in source and binary forms, with or without |
| 5 * modification, are permitted provided that the following conditions are | 5 * modification, are permitted provided that the following conditions are |
| 6 * met: | 6 * met: |
| 7 * | 7 * |
| 8 * * Redistributions of source code must retain the above copyright | 8 * * Redistributions of source code must retain the above copyright |
| 9 * notice, this list of conditions and the following disclaimer. | 9 * notice, this list of conditions and the following disclaimer. |
| 10 * * Redistributions in binary form must reproduce the above | 10 * * Redistributions in binary form must reproduce the above |
| (...skipping 99 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 110 | 110 |
| 111 void ProcessHeap::init() | 111 void ProcessHeap::init() |
| 112 { | 112 { |
| 113 s_shutdownComplete = false; | 113 s_shutdownComplete = false; |
| 114 s_totalAllocatedSpace = 0; | 114 s_totalAllocatedSpace = 0; |
| 115 s_totalAllocatedObjectSize = 0; | 115 s_totalAllocatedObjectSize = 0; |
| 116 s_totalMarkedObjectSize = 0; | 116 s_totalMarkedObjectSize = 0; |
| 117 s_isLowEndDevice = base::SysInfo::IsLowEndDevice(); | 117 s_isLowEndDevice = base::SysInfo::IsLowEndDevice(); |
| 118 | 118 |
| 119 GCInfoTable::init(); | 119 GCInfoTable::init(); |
| 120 CallbackStackMemoryPool::instance().initialize(); | |
| 120 } | 121 } |
| 121 | 122 |
| 122 void ProcessHeap::resetHeapCounters() | 123 void ProcessHeap::resetHeapCounters() |
| 123 { | 124 { |
| 124 s_totalAllocatedObjectSize = 0; | 125 s_totalAllocatedObjectSize = 0; |
| 125 s_totalMarkedObjectSize = 0; | 126 s_totalMarkedObjectSize = 0; |
| 126 } | 127 } |
| 127 | 128 |
| 128 void ProcessHeap::shutdown() | 129 void ProcessHeap::shutdown() |
| 129 { | 130 { |
| 130 ASSERT(!s_shutdownComplete); | 131 ASSERT(!s_shutdownComplete); |
| 131 | 132 |
| 132 { | 133 { |
| 133 // The main thread must be the last thread that gets detached. | 134 // The main thread must be the last thread that gets detached. |
| 134 MutexLocker locker(ThreadHeap::allHeapsMutex()); | 135 MutexLocker locker(ThreadHeap::allHeapsMutex()); |
| 135 RELEASE_ASSERT(ThreadHeap::allHeaps().isEmpty()); | 136 RELEASE_ASSERT(ThreadHeap::allHeaps().isEmpty()); |
| 136 } | 137 } |
| 137 | 138 |
| 139 CallbackStackMemoryPool::instance().shutdown(); | |
| 138 GCInfoTable::shutdown(); | 140 GCInfoTable::shutdown(); |
| 139 ASSERT(ProcessHeap::totalAllocatedSpace() == 0); | 141 ASSERT(ProcessHeap::totalAllocatedSpace() == 0); |
| 140 s_shutdownComplete = true; | 142 s_shutdownComplete = true; |
| 141 } | 143 } |
| 142 | 144 |
| 143 CrossThreadPersistentRegion& ProcessHeap::crossThreadPersistentRegion() | 145 CrossThreadPersistentRegion& ProcessHeap::crossThreadPersistentRegion() |
| 144 { | 146 { |
| 145 DEFINE_THREAD_SAFE_STATIC_LOCAL(CrossThreadPersistentRegion, persistentRegio n, new CrossThreadPersistentRegion()); | 147 DEFINE_THREAD_SAFE_STATIC_LOCAL(CrossThreadPersistentRegion, persistentRegio n, new CrossThreadPersistentRegion()); |
| 146 return persistentRegion; | 148 return persistentRegion; |
| 147 } | 149 } |
| (...skipping 70 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 218 atomicSubtract(&m_allocatedSpace, static_cast<long>(delta)); | 220 atomicSubtract(&m_allocatedSpace, static_cast<long>(delta)); |
| 219 ProcessHeap::decreaseTotalAllocatedSpace(delta); | 221 ProcessHeap::decreaseTotalAllocatedSpace(delta); |
| 220 } | 222 } |
| 221 | 223 |
| 222 ThreadHeap::ThreadHeap() | 224 ThreadHeap::ThreadHeap() |
| 223 : m_regionTree(wrapUnique(new RegionTree())) | 225 : m_regionTree(wrapUnique(new RegionTree())) |
| 224 , m_heapDoesNotContainCache(wrapUnique(new HeapDoesNotContainCache)) | 226 , m_heapDoesNotContainCache(wrapUnique(new HeapDoesNotContainCache)) |
| 225 , m_safePointBarrier(wrapUnique(new SafePointBarrier())) | 227 , m_safePointBarrier(wrapUnique(new SafePointBarrier())) |
| 226 , m_freePagePool(wrapUnique(new FreePagePool)) | 228 , m_freePagePool(wrapUnique(new FreePagePool)) |
| 227 , m_orphanedPagePool(wrapUnique(new OrphanedPagePool)) | 229 , m_orphanedPagePool(wrapUnique(new OrphanedPagePool)) |
| 228 , m_markingStack(wrapUnique(new CallbackStack())) | 230 , m_markingStack(wrapUnique(new CallbackStack())) |
|
sof
2016/07/05 14:19:02
Let's add CallbackStack::create() ?
| |
| 229 , m_postMarkingCallbackStack(wrapUnique(new CallbackStack())) | 231 , m_postMarkingCallbackStack(wrapUnique(new CallbackStack())) |
| 230 , m_globalWeakCallbackStack(wrapUnique(new CallbackStack())) | 232 , m_globalWeakCallbackStack(wrapUnique(new CallbackStack())) |
| 231 , m_ephemeronStack(wrapUnique(new CallbackStack(CallbackStack::kMinimalBlock Size))) | 233 , m_ephemeronStack(wrapUnique(new CallbackStack())) |
| 232 { | 234 { |
| 233 if (ThreadState::current()->isMainThread()) | 235 if (ThreadState::current()->isMainThread()) |
| 234 s_mainThreadHeap = this; | 236 s_mainThreadHeap = this; |
| 235 | 237 |
| 236 MutexLocker locker(ThreadHeap::allHeapsMutex()); | 238 MutexLocker locker(ThreadHeap::allHeapsMutex()); |
| 237 allHeaps().add(this); | 239 allHeaps().add(this); |
| 238 } | 240 } |
| 239 | 241 |
| 240 ThreadHeap::~ThreadHeap() | 242 ThreadHeap::~ThreadHeap() |
| 241 { | 243 { |
| (...skipping 188 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 430 } | 432 } |
| 431 | 433 |
| 432 #if ENABLE(ASSERT) | 434 #if ENABLE(ASSERT) |
| 433 bool ThreadHeap::weakTableRegistered(const void* table) | 435 bool ThreadHeap::weakTableRegistered(const void* table) |
| 434 { | 436 { |
| 435 ASSERT(m_ephemeronStack); | 437 ASSERT(m_ephemeronStack); |
| 436 return m_ephemeronStack->hasCallbackForObject(table); | 438 return m_ephemeronStack->hasCallbackForObject(table); |
| 437 } | 439 } |
| 438 #endif | 440 #endif |
| 439 | 441 |
| 442 void ThreadHeap::commitCallbackStacks() | |
| 443 { | |
| 444 m_markingStack->commit(); | |
| 445 m_postMarkingCallbackStack->commit(); | |
| 446 m_globalWeakCallbackStack->commit(); | |
| 447 m_ephemeronStack->commit(); | |
| 448 } | |
| 449 | |
| 440 void ThreadHeap::decommitCallbackStacks() | 450 void ThreadHeap::decommitCallbackStacks() |
| 441 { | 451 { |
| 442 m_markingStack->decommit(); | 452 m_markingStack->decommit(); |
| 443 m_postMarkingCallbackStack->decommit(); | 453 m_postMarkingCallbackStack->decommit(); |
| 444 m_globalWeakCallbackStack->decommit(); | 454 m_globalWeakCallbackStack->decommit(); |
| 445 m_ephemeronStack->decommit(); | 455 m_ephemeronStack->decommit(); |
| 446 } | 456 } |
| 447 | 457 |
| 448 void ThreadHeap::preGC() | 458 void ThreadHeap::preGC() |
| 449 { | 459 { |
| (...skipping 58 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 508 TRACE_EVENT_SCOPED_SAMPLING_STATE("blink_gc", "BlinkGC"); | 518 TRACE_EVENT_SCOPED_SAMPLING_STATE("blink_gc", "BlinkGC"); |
| 509 double startTime = WTF::currentTimeMS(); | 519 double startTime = WTF::currentTimeMS(); |
| 510 | 520 |
| 511 if (gcType == BlinkGC::TakeSnapshot) | 521 if (gcType == BlinkGC::TakeSnapshot) |
| 512 BlinkGCMemoryDumpProvider::instance()->clearProcessDumpForCurrentGC(); | 522 BlinkGCMemoryDumpProvider::instance()->clearProcessDumpForCurrentGC(); |
| 513 | 523 |
| 514 // Disallow allocation during garbage collection (but not during the | 524 // Disallow allocation during garbage collection (but not during the |
| 515 // finalization that happens when the visitorScope is torn down). | 525 // finalization that happens when the visitorScope is torn down). |
| 516 ThreadState::NoAllocationScope noAllocationScope(state); | 526 ThreadState::NoAllocationScope noAllocationScope(state); |
| 517 | 527 |
| 528 state->heap().commitCallbackStacks(); | |
| 518 state->heap().preGC(); | 529 state->heap().preGC(); |
| 519 | 530 |
| 520 StackFrameDepthScope stackDepthScope; | 531 StackFrameDepthScope stackDepthScope; |
| 521 | 532 |
| 522 size_t totalObjectSize = state->heap().heapStats().allocatedObjectSize() + s tate->heap().heapStats().markedObjectSize(); | 533 size_t totalObjectSize = state->heap().heapStats().allocatedObjectSize() + s tate->heap().heapStats().markedObjectSize(); |
| 523 if (gcType != BlinkGC::TakeSnapshot) | 534 if (gcType != BlinkGC::TakeSnapshot) |
| 524 state->heap().resetHeapCounters(); | 535 state->heap().resetHeapCounters(); |
| 525 | 536 |
| 526 { | 537 { |
| 527 // Access to the CrossThreadPersistentRegion has to be prevented while | 538 // Access to the CrossThreadPersistentRegion has to be prevented while |
| (...skipping 51 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 579 { | 590 { |
| 580 { | 591 { |
| 581 // A thread-specific termination GC must not allow other global GCs to g o | 592 // A thread-specific termination GC must not allow other global GCs to g o |
| 582 // ahead while it is running, hence the termination GC does not enter a | 593 // ahead while it is running, hence the termination GC does not enter a |
| 583 // safepoint. VisitorScope will not enter also a safepoint scope for | 594 // safepoint. VisitorScope will not enter also a safepoint scope for |
| 584 // ThreadTerminationGC. | 595 // ThreadTerminationGC. |
| 585 std::unique_ptr<Visitor> visitor = Visitor::create(state, BlinkGC::Threa dTerminationGC); | 596 std::unique_ptr<Visitor> visitor = Visitor::create(state, BlinkGC::Threa dTerminationGC); |
| 586 | 597 |
| 587 ThreadState::NoAllocationScope noAllocationScope(state); | 598 ThreadState::NoAllocationScope noAllocationScope(state); |
| 588 | 599 |
| 600 state->heap().commitCallbackStacks(); | |
| 589 state->preGC(); | 601 state->preGC(); |
| 590 | 602 |
| 591 // 1. Trace the thread local persistent roots. For thread local GCs we | 603 // 1. Trace the thread local persistent roots. For thread local GCs we |
| 592 // don't trace the stack (ie. no conservative scanning) since this is | 604 // don't trace the stack (ie. no conservative scanning) since this is |
| 593 // only called during thread shutdown where there should be no objects | 605 // only called during thread shutdown where there should be no objects |
| 594 // on the stack. | 606 // on the stack. |
| 595 // We also assume that orphaned pages have no objects reachable from | 607 // We also assume that orphaned pages have no objects reachable from |
| 596 // persistent handles on other threads or CrossThreadPersistents. The | 608 // persistent handles on other threads or CrossThreadPersistents. The |
| 597 // only cases where this could happen is if a subsequent conservative | 609 // only cases where this could happen is if a subsequent conservative |
| 598 // global GC finds a "pointer" on the stack or due to a programming | 610 // global GC finds a "pointer" on the stack or due to a programming |
| (...skipping 198 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 797 ProcessHeap::decreaseTotalMarkedObjectSize(m_stats.markedObjectSize()); | 809 ProcessHeap::decreaseTotalMarkedObjectSize(m_stats.markedObjectSize()); |
| 798 | 810 |
| 799 m_stats.reset(); | 811 m_stats.reset(); |
| 800 for (ThreadState* state : m_threads) | 812 for (ThreadState* state : m_threads) |
| 801 state->resetHeapCounters(); | 813 state->resetHeapCounters(); |
| 802 } | 814 } |
| 803 | 815 |
| 804 ThreadHeap* ThreadHeap::s_mainThreadHeap = nullptr; | 816 ThreadHeap* ThreadHeap::s_mainThreadHeap = nullptr; |
| 805 | 817 |
| 806 } // namespace blink | 818 } // namespace blink |
| OLD | NEW |