Chromium Code Reviews| OLD | NEW |
|---|---|
| 1 /* | 1 /* |
| 2 * Copyright (C) 2013 Google Inc. All rights reserved. | 2 * Copyright (C) 2013 Google Inc. All rights reserved. |
| 3 * | 3 * |
| 4 * Redistribution and use in source and binary forms, with or without | 4 * Redistribution and use in source and binary forms, with or without |
| 5 * modification, are permitted provided that the following conditions are | 5 * modification, are permitted provided that the following conditions are |
| 6 * met: | 6 * met: |
| 7 * | 7 * |
| 8 * * Redistributions of source code must retain the above copyright | 8 * * Redistributions of source code must retain the above copyright |
| 9 * notice, this list of conditions and the following disclaimer. | 9 * notice, this list of conditions and the following disclaimer. |
| 10 * * Redistributions in binary form must reproduce the above | 10 * * Redistributions in binary form must reproduce the above |
| (...skipping 50 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 61 | 61 |
| 62 ~GCForbiddenScope() | 62 ~GCForbiddenScope() |
| 63 { | 63 { |
| 64 m_state->leaveGCForbiddenScope(); | 64 m_state->leaveGCForbiddenScope(); |
| 65 } | 65 } |
| 66 | 66 |
| 67 private: | 67 private: |
| 68 ThreadState* m_state; | 68 ThreadState* m_state; |
| 69 }; | 69 }; |
| 70 | 70 |
| 71 class GCScope final { | 71 GCData::GCData(ThreadState* state, BlinkGC::StackState stackState, BlinkGC::GCTy pe gcType) |
| 72 : m_state(state) | |
| 73 , m_markingStack(adoptPtr(new CallbackStack())) | |
| 74 , m_postMarkingCallbackStack(adoptPtr(new CallbackStack())) | |
| 75 , m_globalWeakCallbackStack(adoptPtr(new CallbackStack())) | |
| 76 , m_ephemeronStack(adoptPtr(new CallbackStack())) | |
| 77 { | |
| 78 ASSERT(m_state->checkThread()); | |
| 79 switch (gcType) { | |
| 80 case BlinkGC::GCWithSweep: | |
| 81 case BlinkGC::GCWithoutSweep: | |
| 82 m_visitor = adoptPtr(new MarkingVisitor<Visitor::GlobalMarking>(this)); | |
| 83 break; | |
| 84 case BlinkGC::TakeSnapshot: | |
| 85 m_visitor = adoptPtr(new MarkingVisitor<Visitor::SnapshotMarking>(this)) ; | |
| 86 break; | |
| 87 case BlinkGC::ThreadTerminationGC: | |
| 88 m_visitor = adoptPtr(new MarkingVisitor<Visitor::ThreadLocalMarking>(thi s)); | |
| 89 break; | |
| 90 default: | |
| 91 ASSERT_NOT_REACHED(); | |
| 92 } | |
| 93 } | |
| 94 | |
| 95 bool GCData::parkAllThreads(BlinkGC::StackState stackState, BlinkGC::GCType gcTy pe) | |
| 96 { | |
| 97 TRACE_EVENT0("blink_gc", "Heap::GCScope"); | |
| 98 const char* samplingState = TRACE_EVENT_GET_SAMPLING_STATE(); | |
| 99 if (m_state->isMainThread()) | |
| 100 TRACE_EVENT_SET_SAMPLING_STATE("blink_gc", "BlinkGCWaiting"); | |
| 101 | |
| 102 // TODO(haraken): In an unlikely coincidence that two threads decide | |
| 103 // to collect garbage at the same time, avoid doing two GCs in | |
| 104 // a row and return false. | |
| 105 double startTime = WTF::currentTimeMS(); | |
| 106 bool allParked = gcType != BlinkGC::ThreadTerminationGC && m_state->gcGroup( )->park(); | |
| 107 double timeForStoppingThreads = WTF::currentTimeMS() - startTime; | |
| 108 Platform::current()->histogramCustomCounts("BlinkGC.TimeForStoppingThreads", timeForStoppingThreads, 1, 1000, 50); | |
| 109 | |
| 110 if (m_state->isMainThread()) | |
| 111 TRACE_EVENT_SET_NONCONST_SAMPLING_STATE(samplingState); | |
| 112 | |
| 113 return allParked; | |
| 114 } | |
| 115 | |
| 116 class GCScope final : public GCData { | |
|
haraken
2016/01/28 15:52:49
Is there any reason you want to distinguish GCScop
| |
| 72 public: | 117 public: |
| 73 GCScope(ThreadState* state, BlinkGC::StackState stackState, BlinkGC::GCType gcType) | 118 GCScope(ThreadState* state, BlinkGC::StackState stackState, BlinkGC::GCType gcType) |
| 74 : m_state(state) | 119 : GCData(state, stackState, gcType) |
| 75 , m_gcForbiddenScope(state) | 120 , m_gcForbiddenScope(state) |
| 76 { | 121 { |
| 77 ASSERT(m_state->checkThread()); | |
| 78 | |
| 79 switch (gcType) { | |
| 80 case BlinkGC::GCWithSweep: | |
| 81 case BlinkGC::GCWithoutSweep: | |
| 82 m_visitor = adoptPtr(new MarkingVisitor<Visitor::GlobalMarking>()); | |
| 83 break; | |
| 84 case BlinkGC::TakeSnapshot: | |
| 85 m_visitor = adoptPtr(new MarkingVisitor<Visitor::SnapshotMarking>()) ; | |
| 86 break; | |
| 87 case BlinkGC::ThreadTerminationGC: | |
| 88 m_visitor = adoptPtr(new MarkingVisitor<Visitor::ThreadLocalMarking> ()); | |
| 89 break; | |
| 90 default: | |
| 91 ASSERT_NOT_REACHED(); | |
| 92 } | |
| 93 } | |
| 94 | |
| 95 ~GCScope() | |
| 96 { | |
| 97 } | 122 } |
| 98 | 123 |
| 99 bool parkAllThreads(BlinkGC::StackState stackState, BlinkGC::GCType gcType) | |
| 100 { | |
| 101 TRACE_EVENT0("blink_gc", "Heap::GCScope"); | |
| 102 const char* samplingState = TRACE_EVENT_GET_SAMPLING_STATE(); | |
| 103 if (m_state->isMainThread()) | |
| 104 TRACE_EVENT_SET_SAMPLING_STATE("blink_gc", "BlinkGCWaiting"); | |
| 105 | |
| 106 // TODO(haraken): In an unlikely coincidence that two threads decide | |
| 107 // to collect garbage at the same time, avoid doing two GCs in | |
| 108 // a row and return false. | |
| 109 double startTime = WTF::currentTimeMS(); | |
| 110 bool allParked = gcType != BlinkGC::ThreadTerminationGC && ThreadState:: stopThreads(); | |
| 111 double timeForStoppingThreads = WTF::currentTimeMS() - startTime; | |
| 112 Platform::current()->histogramCustomCounts("BlinkGC.TimeForStoppingThrea ds", timeForStoppingThreads, 1, 1000, 50); | |
| 113 | |
| 114 if (m_state->isMainThread()) | |
| 115 TRACE_EVENT_SET_NONCONST_SAMPLING_STATE(samplingState); | |
| 116 | |
| 117 return allParked; | |
| 118 } | |
| 119 | |
| 120 Visitor* visitor() const { return m_visitor.get(); } | |
| 121 | |
| 122 private: | 124 private: |
| 123 ThreadState* m_state; | |
| 124 // See ThreadState::runScheduledGC() why we need to already be in a | 125 // See ThreadState::runScheduledGC() why we need to already be in a |
| 125 // GCForbiddenScope before any safe point is entered. | 126 // GCForbiddenScope before any safe point is entered. |
| 126 GCForbiddenScope m_gcForbiddenScope; | 127 GCForbiddenScope m_gcForbiddenScope; |
| 127 OwnPtr<Visitor> m_visitor; | |
| 128 }; | 128 }; |
| 129 | 129 |
| 130 class ResumeThreadScope { | 130 class ResumeThreadScope { |
| 131 public: | 131 public: |
| 132 explicit ResumeThreadScope(BlinkGC::GCType gcType) | 132 explicit ResumeThreadScope(BlinkGC::GCType gcType, GCGroup* gcGroup) |
|
haraken
2016/01/28 15:52:49
Remove explicit.
| |
| 133 : m_resumeThreads(gcType != BlinkGC::ThreadTerminationGC) | 133 : m_resumeThreads(gcType != BlinkGC::ThreadTerminationGC) |
| 134 , m_gcGroup(gcGroup) | |
| 134 { | 135 { |
| 135 } | 136 } |
| 136 ~ResumeThreadScope() | 137 ~ResumeThreadScope() |
| 137 { | 138 { |
| 138 // Only cleanup if we parked all threads in which case the GC happened | 139 // Only cleanup if we parked all threads in which case the GC happened |
| 139 // and we need to resume the other threads. | 140 // and we need to resume the other threads. |
| 140 if (m_resumeThreads) | 141 if (m_resumeThreads) |
| 141 ThreadState::resumeThreads(); | 142 m_gcGroup->resume(); |
| 142 } | 143 } |
| 143 private: | 144 private: |
| 144 bool m_resumeThreads; | 145 bool m_resumeThreads; |
| 146 GCGroup* m_gcGroup; | |
| 145 }; | 147 }; |
| 146 | 148 |
| 147 void Heap::flushHeapDoesNotContainCache() | |
| 148 { | |
| 149 s_heapDoesNotContainCache->flush(); | |
| 150 } | |
| 151 | |
| 152 void Heap::init() | 149 void Heap::init() |
| 153 { | 150 { |
| 154 ThreadState::init(); | 151 ThreadState::init(); |
| 155 s_markingStack = new CallbackStack(); | |
| 156 s_postMarkingCallbackStack = new CallbackStack(); | |
| 157 s_globalWeakCallbackStack = new CallbackStack(); | |
| 158 s_ephemeronStack = new CallbackStack(); | |
| 159 s_heapDoesNotContainCache = new HeapDoesNotContainCache(); | |
| 160 s_freePagePool = new FreePagePool(); | |
| 161 s_orphanedPagePool = new OrphanedPagePool(); | |
| 162 s_allocatedSpace = 0; | |
| 163 s_allocatedObjectSize = 0; | |
| 164 s_objectSizeAtLastGC = 0; | |
| 165 s_markedObjectSize = 0; | |
| 166 s_markedObjectSizeAtLastCompleteSweep = 0; | |
| 167 s_wrapperCount = 0; | |
| 168 s_wrapperCountAtLastGC = 0; | |
| 169 s_collectedWrapperCount = 0; | |
| 170 s_partitionAllocSizeAtLastGC = WTF::Partitions::totalSizeOfCommittedPages(); | |
| 171 s_estimatedMarkingTimePerByte = 0.0; | |
| 172 #if ENABLE(ASSERT) | |
| 173 s_gcGeneration = 1; | |
| 174 #endif | |
| 175 | 152 |
| 176 GCInfoTable::init(); | 153 GCInfoTable::init(); |
| 177 | 154 |
| 178 if (Platform::current() && Platform::current()->currentThread()) | 155 if (Platform::current() && Platform::current()->currentThread()) |
| 179 Platform::current()->registerMemoryDumpProvider(BlinkGCMemoryDumpProvide r::instance(), "BlinkGC"); | 156 Platform::current()->registerMemoryDumpProvider(BlinkGCMemoryDumpProvide r::instance(), "BlinkGC"); |
| 180 } | 157 } |
| 181 | 158 |
| 182 void Heap::shutdown() | 159 void Heap::shutdown() |
| 183 { | 160 { |
| 184 if (Platform::current() && Platform::current()->currentThread()) | 161 if (Platform::current() && Platform::current()->currentThread()) |
| 185 Platform::current()->unregisterMemoryDumpProvider(BlinkGCMemoryDumpProvi der::instance()); | 162 Platform::current()->unregisterMemoryDumpProvider(BlinkGCMemoryDumpProvi der::instance()); |
| 186 s_shutdownCalled = true; | 163 s_shutdownCalled = true; |
| 187 ThreadState::shutdownHeapIfNecessary(); | 164 if (ThreadState::current() && ThreadState::current()->gcGroup()) |
| 165 ThreadState::current()->gcGroup()->shutdownIfNecessary(); | |
|
haraken
2016/01/28 15:52:49
I guess this is doing something wrong. Heap::shutd
keishi
2016/02/29 06:02:32
I had some mistakes in the new CL but
Heap::shutdo
| |
| 188 } | 166 } |
| 189 | 167 |
| 190 void Heap::doShutdown() | 168 void Heap::doShutdown() |
| 191 { | 169 { |
| 192 // We don't want to call doShutdown() twice. | 170 // We don't want to call doShutdown() twice. |
| 193 if (!s_markingStack) | 171 if (!s_doShutdownDone) |
| 194 return; | 172 return; |
| 173 s_doShutdownDone = true; | |
| 195 | 174 |
| 196 ASSERT(!ThreadState::attachedThreads().size()); | 175 // ASSERT(!ThreadState::attachedThreads().size()); |
| 197 delete s_heapDoesNotContainCache; | |
| 198 s_heapDoesNotContainCache = nullptr; | |
| 199 delete s_freePagePool; | |
| 200 s_freePagePool = nullptr; | |
| 201 delete s_orphanedPagePool; | |
| 202 s_orphanedPagePool = nullptr; | |
| 203 delete s_globalWeakCallbackStack; | |
| 204 s_globalWeakCallbackStack = nullptr; | |
| 205 delete s_postMarkingCallbackStack; | |
| 206 s_postMarkingCallbackStack = nullptr; | |
| 207 delete s_markingStack; | |
| 208 s_markingStack = nullptr; | |
| 209 delete s_ephemeronStack; | |
| 210 s_ephemeronStack = nullptr; | |
| 211 delete s_regionTree; | |
| 212 s_regionTree = nullptr; | |
| 213 GCInfoTable::shutdown(); | 176 GCInfoTable::shutdown(); |
| 214 ThreadState::shutdown(); | 177 ThreadState::shutdown(); |
| 215 ASSERT(Heap::allocatedSpace() == 0); | 178 // ASSERT(Heap::allocatedSpace() == 0); |
|
haraken
2016/01/28 15:52:49
We want to keep this ASSERT somehow. This ASSERT i
keishi
2016/02/29 06:02:32
Done.
| |
| 216 } | 179 } |
| 217 | 180 |
| 218 CrossThreadPersistentRegion& Heap::crossThreadPersistentRegion() | 181 CrossThreadPersistentRegion& Heap::crossThreadPersistentRegion() |
| 219 { | 182 { |
| 220 DEFINE_THREAD_SAFE_STATIC_LOCAL(CrossThreadPersistentRegion, persistentRegio n, new CrossThreadPersistentRegion()); | 183 DEFINE_THREAD_SAFE_STATIC_LOCAL(CrossThreadPersistentRegion, persistentRegio n, new CrossThreadPersistentRegion()); |
| 221 return persistentRegion; | 184 return persistentRegion; |
| 222 } | 185 } |
| 223 | 186 |
| 224 #if ENABLE(ASSERT) | 187 #if ENABLE(ASSERT) |
| 225 BasePage* Heap::findPageFromAddress(Address address) | 188 BasePage* Heap::findPageFromAddress(Address address) |
|
haraken
2016/01/28 15:52:49
This method should be moved to GCGroup. I think wh
keishi
2016/02/29 06:02:32
Removed
| |
| 226 { | 189 { |
| 227 MutexLocker lock(ThreadState::threadAttachMutex()); | 190 for (GCGroup* gcGroup : GCGroup::all()) { |
|
haraken
2016/01/28 15:52:49
In general, it is wrong to replace:
for (Thread
| |
| 228 for (ThreadState* state : ThreadState::attachedThreads()) { | 191 if (BasePage* page = gcGroup->findPageFromAddress(address)) |
| 229 if (BasePage* page = state->findPageFromAddress(address)) | |
| 230 return page; | 192 return page; |
| 231 } | 193 } |
| 232 return nullptr; | 194 return nullptr; |
| 233 } | 195 } |
| 234 #endif | 196 #endif |
| 235 | 197 |
| 236 Address Heap::checkAndMarkPointer(Visitor* visitor, Address address) | 198 Address Heap::checkAndMarkPointer(Visitor* visitor, Address address) |
| 237 { | 199 { |
| 238 ASSERT(ThreadState::current()->isInGC()); | 200 ASSERT(ThreadState::current()->isInGC()); |
| 239 | 201 |
| 240 #if !ENABLE(ASSERT) | 202 #if !ENABLE(ASSERT) |
| 241 if (s_heapDoesNotContainCache->lookup(address)) | 203 if (visitor->gcData()->threadState()->gcGroup()->heapDoesNotContainCache()-> lookup(address)) |
|
haraken
2016/01/28 15:52:49
It's nasty that we have to write visitor->gcData()
keishi
2016/02/29 06:02:32
Done.
| |
| 242 return nullptr; | 204 return nullptr; |
| 243 #endif | 205 #endif |
| 244 | 206 |
| 245 if (BasePage* page = lookup(address)) { | 207 // TODO: use Visitor::threadState |
| 208 if (BasePage* page = visitor->gcData()->threadState()->gcGroup()->lookupPage ForAddress(address)) { | |
| 246 ASSERT(page->contains(address)); | 209 ASSERT(page->contains(address)); |
| 247 ASSERT(!page->orphaned()); | 210 ASSERT(!page->orphaned()); |
| 248 ASSERT(!s_heapDoesNotContainCache->lookup(address)); | 211 ASSERT(!visitor->gcData()->threadState()->gcGroup()->heapDoesNotContainC ache()->lookup(address)); |
| 249 page->checkAndMarkPointer(visitor, address); | 212 page->checkAndMarkPointer(visitor, address); |
| 250 return address; | 213 return address; |
| 251 } | 214 } |
| 252 | 215 |
| 253 #if !ENABLE(ASSERT) | 216 #if !ENABLE(ASSERT) |
| 254 s_heapDoesNotContainCache->addEntry(address); | 217 visitor->gcData()->threadState()->gcGroup()->heapDoesNotContainCache()->addE ntry(address); |
| 255 #else | 218 #else |
| 256 if (!s_heapDoesNotContainCache->lookup(address)) | 219 if (!visitor->gcData()->threadState()->gcGroup()->heapDoesNotContainCache()- >lookup(address)) |
| 257 s_heapDoesNotContainCache->addEntry(address); | 220 visitor->gcData()->threadState()->gcGroup()->heapDoesNotContainCache()-> addEntry(address); |
|
haraken
2016/01/28 15:52:49
Cache the gcGroup into a local variable and avoid
| |
| 258 #endif | 221 #endif |
| 259 return nullptr; | 222 return nullptr; |
| 260 } | 223 } |
| 261 | 224 |
| 262 void Heap::pushTraceCallback(void* object, TraceCallback callback) | 225 void Heap::pushTraceCallback(void* object, TraceCallback callback, GCData* gcDat a) |
| 263 { | 226 { |
| 264 ASSERT(ThreadState::current()->isInGC()); | 227 ASSERT(ThreadState::current()->isInGC()); |
| 265 | 228 |
| 266 // Trace should never reach an orphaned page. | 229 // Trace should never reach an orphaned page. |
| 267 ASSERT(!Heap::orphanedPagePool()->contains(object)); | 230 ASSERT(!gcData->threadState()->gcGroup()->orphanedPagePool()->contains(objec t)); |
| 268 CallbackStack::Item* slot = s_markingStack->allocateEntry(); | 231 CallbackStack::Item* slot = gcData->markingStack()->allocateEntry(); |
| 269 *slot = CallbackStack::Item(object, callback); | 232 *slot = CallbackStack::Item(object, callback); |
| 270 } | 233 } |
| 271 | 234 |
| 272 bool Heap::popAndInvokeTraceCallback(Visitor* visitor) | 235 bool Heap::popAndInvokeTraceCallback(Visitor* visitor) |
|
haraken
2016/01/28 15:52:49
It's inconsistent that you sometimes pass Visitor
| |
| 273 { | 236 { |
| 274 CallbackStack::Item* item = s_markingStack->pop(); | 237 CallbackStack::Item* item = visitor->gcData()->markingStack()->pop(); |
| 275 if (!item) | 238 if (!item) |
| 276 return false; | 239 return false; |
| 277 item->call(visitor); | 240 item->call(visitor); |
| 278 return true; | 241 return true; |
| 279 } | 242 } |
| 280 | 243 |
| 281 void Heap::pushPostMarkingCallback(void* object, TraceCallback callback) | 244 void Heap::pushPostMarkingCallback(void* object, TraceCallback callback, GCData* gcData) |
| 282 { | 245 { |
| 283 ASSERT(ThreadState::current()->isInGC()); | 246 ASSERT(ThreadState::current()->isInGC()); |
| 284 | 247 |
| 285 // Trace should never reach an orphaned page. | 248 // Trace should never reach an orphaned page. |
| 286 ASSERT(!Heap::orphanedPagePool()->contains(object)); | 249 ASSERT(!gcData->threadState()->gcGroup()->orphanedPagePool()->contains(objec t)); |
| 287 CallbackStack::Item* slot = s_postMarkingCallbackStack->allocateEntry(); | 250 CallbackStack::Item* slot = gcData->postMarkingCallbackStack()->allocateEntr y(); |
| 288 *slot = CallbackStack::Item(object, callback); | 251 *slot = CallbackStack::Item(object, callback); |
| 289 } | 252 } |
| 290 | 253 |
| 291 bool Heap::popAndInvokePostMarkingCallback(Visitor* visitor) | 254 bool Heap::popAndInvokePostMarkingCallback(Visitor* visitor) |
| 292 { | 255 { |
| 293 if (CallbackStack::Item* item = s_postMarkingCallbackStack->pop()) { | 256 if (CallbackStack::Item* item = visitor->gcData()->postMarkingCallbackStack( )->pop()) { |
| 294 item->call(visitor); | 257 item->call(visitor); |
| 295 return true; | 258 return true; |
| 296 } | 259 } |
| 297 return false; | 260 return false; |
| 298 } | 261 } |
| 299 | 262 |
| 300 void Heap::pushGlobalWeakCallback(void** cell, WeakCallback callback) | 263 void Heap::pushGlobalWeakCallback(void** cell, WeakCallback callback, GCData* gc Data) |
| 301 { | 264 { |
| 302 ASSERT(ThreadState::current()->isInGC()); | 265 ASSERT(ThreadState::current()->isInGC()); |
| 303 | 266 |
| 304 // Trace should never reach an orphaned page. | 267 // Trace should never reach an orphaned page. |
| 305 ASSERT(!Heap::orphanedPagePool()->contains(cell)); | 268 ASSERT(!gcData->threadState()->gcGroup()->orphanedPagePool()->contains(cell) ); |
| 306 CallbackStack::Item* slot = s_globalWeakCallbackStack->allocateEntry(); | 269 CallbackStack::Item* slot = gcData->globalWeakCallbackStack()->allocateEntry (); |
| 307 *slot = CallbackStack::Item(cell, callback); | 270 *slot = CallbackStack::Item(cell, callback); |
| 308 } | 271 } |
| 309 | 272 |
| 310 void Heap::pushThreadLocalWeakCallback(void* closure, void* object, WeakCallback callback) | 273 void Heap::pushThreadLocalWeakCallback(void* closure, void* object, WeakCallback callback) |
|
haraken
2016/01/28 15:52:49
We should pass in Visitor.
keishi
2016/02/29 06:02:32
Why? I don't think we can't use Visitor for anythi
| |
| 311 { | 274 { |
| 312 ASSERT(ThreadState::current()->isInGC()); | 275 ASSERT(ThreadState::current()->isInGC()); |
| 313 | 276 |
| 277 ThreadState* state = pageFromObject(object)->heap()->threadState(); | |
| 314 // Trace should never reach an orphaned page. | 278 // Trace should never reach an orphaned page. |
| 315 ASSERT(!Heap::orphanedPagePool()->contains(object)); | 279 ASSERT(!state->gcGroup()->orphanedPagePool()->contains(object)); |
| 316 ThreadState* state = pageFromObject(object)->heap()->threadState(); | |
| 317 state->pushThreadLocalWeakCallback(closure, callback); | 280 state->pushThreadLocalWeakCallback(closure, callback); |
| 318 } | 281 } |
| 319 | 282 |
| 320 bool Heap::popAndInvokeGlobalWeakCallback(Visitor* visitor) | 283 bool Heap::popAndInvokeGlobalWeakCallback(Visitor* visitor) |
| 321 { | 284 { |
| 322 if (CallbackStack::Item* item = s_globalWeakCallbackStack->pop()) { | 285 if (CallbackStack::Item* item = visitor->gcData()->globalWeakCallbackStack() ->pop()) { |
| 323 item->call(visitor); | 286 item->call(visitor); |
| 324 return true; | 287 return true; |
| 325 } | 288 } |
| 326 return false; | 289 return false; |
| 327 } | 290 } |
| 328 | 291 |
| 329 void Heap::registerWeakTable(void* table, EphemeronCallback iterationCallback, E phemeronCallback iterationDoneCallback) | 292 void Heap::registerWeakTable(void* table, EphemeronCallback iterationCallback, E phemeronCallback iterationDoneCallback, GCData* gcData) |
| 330 { | 293 { |
| 331 ASSERT(ThreadState::current()->isInGC()); | 294 ASSERT(ThreadState::current()->isInGC()); |
| 332 | 295 |
| 333 // Trace should never reach an orphaned page. | 296 // Trace should never reach an orphaned page. |
| 334 ASSERT(!Heap::orphanedPagePool()->contains(table)); | 297 ASSERT(!gcData->threadState()->gcGroup()->orphanedPagePool()->contains(table )); |
| 335 CallbackStack::Item* slot = s_ephemeronStack->allocateEntry(); | 298 CallbackStack::Item* slot = gcData->ephemeronStack()->allocateEntry(); |
| 336 *slot = CallbackStack::Item(table, iterationCallback); | 299 *slot = CallbackStack::Item(table, iterationCallback); |
| 337 | 300 |
| 338 // Register a post-marking callback to tell the tables that | 301 // Register a post-marking callback to tell the tables that |
| 339 // ephemeron iteration is complete. | 302 // ephemeron iteration is complete. |
| 340 pushPostMarkingCallback(table, iterationDoneCallback); | 303 pushPostMarkingCallback(table, iterationDoneCallback, gcData); |
| 341 } | 304 } |
| 342 | 305 |
| 343 #if ENABLE(ASSERT) | 306 #if ENABLE(ASSERT) |
| 344 bool Heap::weakTableRegistered(const void* table) | 307 bool Heap::weakTableRegistered(const void* table, GCData* gcData) |
| 345 { | 308 { |
| 346 ASSERT(s_ephemeronStack); | 309 ASSERT(gcData->ephemeronStack()); |
| 347 return s_ephemeronStack->hasCallbackForObject(table); | 310 return gcData->ephemeronStack()->hasCallbackForObject(table); |
| 348 } | 311 } |
| 349 #endif | 312 #endif |
| 350 | 313 |
| 351 void Heap::preGC() | 314 void Heap::preGC() |
| 352 { | 315 { |
|
haraken
2016/01/28 15:52:49
Is this helper function still useful?
keishi
2016/02/29 06:02:32
Remvoed
| |
| 353 ASSERT(!ThreadState::current()->isInGC()); | 316 ASSERT(!ThreadState::current()->isInGC()); |
| 354 for (ThreadState* state : ThreadState::attachedThreads()) | 317 ThreadState::current()->gcGroup()->preGC(); |
| 355 state->preGC(); | |
| 356 } | 318 } |
| 357 | 319 |
| 358 void Heap::postGC(BlinkGC::GCType gcType) | 320 void Heap::postGC(BlinkGC::GCType gcType) |
|
haraken
2016/01/28 15:52:49
Ditto.
keishi
2016/02/29 06:02:32
Removed
| |
| 359 { | 321 { |
| 360 ASSERT(ThreadState::current()->isInGC()); | 322 ASSERT(ThreadState::current()->isInGC()); |
| 361 for (ThreadState* state : ThreadState::attachedThreads()) | 323 ThreadState::current()->gcGroup()->postGC(gcType); |
| 362 state->postGC(gcType); | |
| 363 } | 324 } |
| 364 | 325 |
| 365 const char* Heap::gcReasonString(BlinkGC::GCReason reason) | 326 const char* Heap::gcReasonString(BlinkGC::GCReason reason) |
| 366 { | 327 { |
| 367 switch (reason) { | 328 switch (reason) { |
| 368 case BlinkGC::IdleGC: | 329 case BlinkGC::IdleGC: |
| 369 return "IdleGC"; | 330 return "IdleGC"; |
| 370 case BlinkGC::PreciseGC: | 331 case BlinkGC::PreciseGC: |
| 371 return "PreciseGC"; | 332 return "PreciseGC"; |
| 372 case BlinkGC::ConservativeGC: | 333 case BlinkGC::ConservativeGC: |
| (...skipping 20 matching lines...) Expand all Loading... | |
| 393 GCScope gcScope(state, stackState, gcType); | 354 GCScope gcScope(state, stackState, gcType); |
| 394 // See collectGarbageForTerminatingThread() comment on why a | 355 // See collectGarbageForTerminatingThread() comment on why a |
| 395 // safepoint scope isn't entered for it. | 356 // safepoint scope isn't entered for it. |
| 396 SafePointScope safePointScope(stackState, gcType != BlinkGC::ThreadTerminati onGC ? state : nullptr); | 357 SafePointScope safePointScope(stackState, gcType != BlinkGC::ThreadTerminati onGC ? state : nullptr); |
| 397 | 358 |
| 398 // Try to park the other threads. If we're unable to, bail out of the GC. | 359 // Try to park the other threads. If we're unable to, bail out of the GC. |
| 399 if (!gcScope.parkAllThreads(stackState, gcType)) | 360 if (!gcScope.parkAllThreads(stackState, gcType)) |
| 400 return; | 361 return; |
| 401 | 362 |
| 402 // Resume all parked threads upon leaving this scope. | 363 // Resume all parked threads upon leaving this scope. |
| 403 ResumeThreadScope resumeThreads(gcType); | 364 ResumeThreadScope resumeThreads(gcType, state->gcGroup()); |
| 404 ScriptForbiddenIfMainThreadScope scriptForbidden; | 365 ScriptForbiddenIfMainThreadScope scriptForbidden; |
| 405 | 366 |
| 406 TRACE_EVENT2("blink_gc", "Heap::collectGarbage", | 367 TRACE_EVENT2("blink_gc", "Heap::collectGarbage", |
| 407 "lazySweeping", gcType == BlinkGC::GCWithoutSweep, | 368 "lazySweeping", gcType == BlinkGC::GCWithoutSweep, |
| 408 "gcReason", gcReasonString(reason)); | 369 "gcReason", gcReasonString(reason)); |
| 409 TRACE_EVENT_SCOPED_SAMPLING_STATE("blink_gc", "BlinkGC"); | 370 TRACE_EVENT_SCOPED_SAMPLING_STATE("blink_gc", "BlinkGC"); |
| 410 double startTime = WTF::currentTimeMS(); | 371 double startTime = WTF::currentTimeMS(); |
| 411 | 372 |
| 412 if (gcType == BlinkGC::TakeSnapshot) | 373 if (gcType == BlinkGC::TakeSnapshot) |
| 413 BlinkGCMemoryDumpProvider::instance()->clearProcessDumpForCurrentGC(); | 374 BlinkGCMemoryDumpProvider::instance()->clearProcessDumpForCurrentGC(); |
| 414 | 375 |
| 415 // Disallow allocation during garbage collection (but not during the | 376 // Disallow allocation during garbage collection (but not during the |
| 416 // finalization that happens when the gcScope is torn down). | 377 // finalization that happens when the gcScope is torn down). |
| 417 ThreadState::NoAllocationScope noAllocationScope(state); | 378 ThreadState::NoAllocationScope noAllocationScope(state); |
| 418 | 379 |
| 419 preGC(); | 380 preGC(); |
| 420 | 381 |
| 421 StackFrameDepthScope stackDepthScope; | 382 StackFrameDepthScope stackDepthScope; |
| 422 | 383 |
| 423 size_t totalObjectSize = Heap::allocatedObjectSize() + Heap::markedObjectSiz e(); | 384 size_t totalObjectSize = state->gcGroup()->heapStats().allocatedObjectSize() + ThreadState::current()->gcGroup()->heapStats().markedObjectSize(); |
| 424 if (gcType != BlinkGC::TakeSnapshot) | 385 if (gcType != BlinkGC::TakeSnapshot) { |
| 425 Heap::resetHeapCounters(); | 386 Heap::reportMemoryUsageForTracing(); |
| 387 state->gcGroup()->heapStats().reset(); | |
| 388 } | |
| 426 | 389 |
| 427 // 1. Trace persistent roots. | 390 // 1. Trace persistent roots. |
| 428 ThreadState::visitPersistentRoots(gcScope.visitor()); | 391 state->gcGroup()->visitPersistentRoots(gcScope.visitor()); |
| 429 | 392 |
| 430 // 2. Trace objects reachable from the stack. We do this independent of the | 393 // 2. Trace objects reachable from the stack. We do this independent of the |
| 431 // given stackState since other threads might have a different stack state. | 394 // given stackState since other threads might have a different stack state. |
| 432 ThreadState::visitStackRoots(gcScope.visitor()); | 395 state->gcGroup()->visitStackRoots(gcScope.visitor()); |
| 433 | 396 |
| 434 // 3. Transitive closure to trace objects including ephemerons. | 397 // 3. Transitive closure to trace objects including ephemerons. |
| 435 processMarkingStack(gcScope.visitor()); | 398 processMarkingStack(gcScope.visitor()); |
| 436 | 399 |
| 437 postMarkingProcessing(gcScope.visitor()); | 400 postMarkingProcessing(gcScope.visitor()); |
| 438 globalWeakProcessing(gcScope.visitor()); | 401 globalWeakProcessing(gcScope.visitor()); |
| 439 | 402 |
| 440 // Now we can delete all orphaned pages because there are no dangling | 403 // Now we can delete all orphaned pages because there are no dangling |
| 441 // pointers to the orphaned pages. (If we have such dangling pointers, | 404 // pointers to the orphaned pages. (If we have such dangling pointers, |
| 442 // we should have crashed during marking before getting here.) | 405 // we should have crashed during marking before getting here.) |
| 443 orphanedPagePool()->decommitOrphanedPages(); | 406 state->gcGroup()->orphanedPagePool()->decommitOrphanedPages(); |
| 444 | 407 |
| 445 double markingTimeInMilliseconds = WTF::currentTimeMS() - startTime; | 408 double markingTimeInMilliseconds = WTF::currentTimeMS() - startTime; |
| 446 s_estimatedMarkingTimePerByte = totalObjectSize ? (markingTimeInMilliseconds / 1000 / totalObjectSize) : 0; | 409 state->gcGroup()->heapStats().setEstimatedMarkingTimePerByte(totalObjectSize ? (markingTimeInMilliseconds / 1000 / totalObjectSize) : 0); |
| 447 | 410 |
| 448 #if PRINT_HEAP_STATS | 411 #if PRINT_HEAP_STATS |
| 449 dataLogF("Heap::collectGarbage (gcReason=%s, lazySweeping=%d, time=%.1lfms)\ n", gcReasonString(reason), gcType == BlinkGC::GCWithoutSweep, markingTimeInMill iseconds); | 412 dataLogF("Heap::collectGarbage (gcReason=%s, lazySweeping=%d, time=%.1lfms)\ n", gcReasonString(reason), gcType == BlinkGC::GCWithoutSweep, markingTimeInMill iseconds); |
| 450 #endif | 413 #endif |
| 451 | 414 |
| 452 Platform::current()->histogramCustomCounts("BlinkGC.CollectGarbage", marking TimeInMilliseconds, 0, 10 * 1000, 50); | 415 Platform::current()->histogramCustomCounts("BlinkGC.CollectGarbage", marking TimeInMilliseconds, 0, 10 * 1000, 50); |
| 453 Platform::current()->histogramCustomCounts("BlinkGC.TotalObjectSpace", Heap: :allocatedObjectSize() / 1024, 0, 4 * 1024 * 1024, 50); | 416 Platform::current()->histogramCustomCounts("BlinkGC.TotalObjectSpace", Heap: :totalAllocatedObjectSize() / 1024, 0, 4 * 1024 * 1024, 50); |
| 454 Platform::current()->histogramCustomCounts("BlinkGC.TotalAllocatedSpace", He ap::allocatedSpace() / 1024, 0, 4 * 1024 * 1024, 50); | 417 Platform::current()->histogramCustomCounts("BlinkGC.TotalAllocatedSpace", He ap::totalAllocatedSpace() / 1024, 0, 4 * 1024 * 1024, 50); |
| 455 Platform::current()->histogramEnumeration("BlinkGC.GCReason", reason, BlinkG C::NumberOfGCReason); | 418 Platform::current()->histogramEnumeration("BlinkGC.GCReason", reason, BlinkG C::NumberOfGCReason); |
| 456 Heap::reportMemoryUsageHistogram(); | 419 Heap::reportMemoryUsageHistogram(); |
| 457 WTF::Partitions::reportMemoryUsageHistogram(); | 420 WTF::Partitions::reportMemoryUsageHistogram(); |
| 458 | 421 |
| 459 postGC(gcType); | 422 postGC(gcType); |
| 460 | |
| 461 #if ENABLE(ASSERT) | |
| 462 // 0 is used to figure non-assigned area, so avoid to use 0 in s_gcGeneratio n. | |
| 463 if (++s_gcGeneration == 0) { | |
| 464 s_gcGeneration = 1; | |
| 465 } | |
| 466 #endif | |
| 467 } | 423 } |
| 468 | 424 |
| 469 void Heap::collectGarbageForTerminatingThread(ThreadState* state) | 425 void Heap::collectGarbageForTerminatingThread(ThreadState* state) |
| 470 { | 426 { |
| 471 { | 427 { |
| 472 // A thread-specific termination GC must not allow other global GCs to g o | 428 // A thread-specific termination GC must not allow other global GCs to g o |
| 473 // ahead while it is running, hence the termination GC does not enter a | 429 // ahead while it is running, hence the termination GC does not enter a |
| 474 // safepoint. GCScope will not enter also a safepoint scope for | 430 // safepoint. GCScope will not enter also a safepoint scope for |
| 475 // ThreadTerminationGC. | 431 // ThreadTerminationGC. |
| 476 GCScope gcScope(state, BlinkGC::NoHeapPointersOnStack, BlinkGC::ThreadTe rminationGC); | 432 GCScope gcScope(state, BlinkGC::NoHeapPointersOnStack, BlinkGC::ThreadTe rminationGC); |
| (...skipping 34 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 511 // Iteratively mark all objects that are reachable from the objects | 467 // Iteratively mark all objects that are reachable from the objects |
| 512 // currently pushed onto the marking stack. | 468 // currently pushed onto the marking stack. |
| 513 TRACE_EVENT0("blink_gc", "Heap::processMarkingStackSingleThreaded"); | 469 TRACE_EVENT0("blink_gc", "Heap::processMarkingStackSingleThreaded"); |
| 514 while (popAndInvokeTraceCallback(visitor)) { } | 470 while (popAndInvokeTraceCallback(visitor)) { } |
| 515 } | 471 } |
| 516 | 472 |
| 517 { | 473 { |
| 518 // Mark any strong pointers that have now become reachable in | 474 // Mark any strong pointers that have now become reachable in |
| 519 // ephemeron maps. | 475 // ephemeron maps. |
| 520 TRACE_EVENT0("blink_gc", "Heap::processEphemeronStack"); | 476 TRACE_EVENT0("blink_gc", "Heap::processEphemeronStack"); |
| 521 s_ephemeronStack->invokeEphemeronCallbacks(visitor); | 477 visitor->gcData()->ephemeronStack()->invokeEphemeronCallbacks(visito r); |
| 522 } | 478 } |
| 523 | 479 |
| 524 // Rerun loop if ephemeron processing queued more objects for tracing. | 480 // Rerun loop if ephemeron processing queued more objects for tracing. |
| 525 } while (!s_markingStack->isEmpty()); | 481 } while (!visitor->gcData()->markingStack()->isEmpty()); |
| 526 } | 482 } |
| 527 | 483 |
| 528 void Heap::postMarkingProcessing(Visitor* visitor) | 484 void Heap::postMarkingProcessing(Visitor* visitor) |
| 529 { | 485 { |
| 530 TRACE_EVENT0("blink_gc", "Heap::postMarkingProcessing"); | 486 TRACE_EVENT0("blink_gc", "Heap::postMarkingProcessing"); |
| 531 // Call post-marking callbacks including: | 487 // Call post-marking callbacks including: |
| 532 // 1. the ephemeronIterationDone callbacks on weak tables to do cleanup | 488 // 1. the ephemeronIterationDone callbacks on weak tables to do cleanup |
| 533 // (specifically to clear the queued bits for weak hash tables), and | 489 // (specifically to clear the queued bits for weak hash tables), and |
| 534 // 2. the markNoTracing callbacks on collection backings to mark them | 490 // 2. the markNoTracing callbacks on collection backings to mark them |
| 535 // if they are only reachable from their front objects. | 491 // if they are only reachable from their front objects. |
| 536 while (popAndInvokePostMarkingCallback(visitor)) { } | 492 while (popAndInvokePostMarkingCallback(visitor)) { } |
| 537 | 493 |
| 538 s_ephemeronStack->clear(); | 494 visitor->gcData()->ephemeronStack()->clear(); |
| 539 | 495 |
| 540 // Post-marking callbacks should not trace any objects and | 496 // Post-marking callbacks should not trace any objects and |
| 541 // therefore the marking stack should be empty after the | 497 // therefore the marking stack should be empty after the |
| 542 // post-marking callbacks. | 498 // post-marking callbacks. |
| 543 ASSERT(s_markingStack->isEmpty()); | 499 ASSERT(visitor->gcData()->markingStack()->isEmpty()); |
| 544 } | 500 } |
| 545 | 501 |
| 546 void Heap::globalWeakProcessing(Visitor* visitor) | 502 void Heap::globalWeakProcessing(Visitor* visitor) |
| 547 { | 503 { |
| 548 TRACE_EVENT0("blink_gc", "Heap::globalWeakProcessing"); | 504 TRACE_EVENT0("blink_gc", "Heap::globalWeakProcessing"); |
| 549 double startTime = WTF::currentTimeMS(); | 505 double startTime = WTF::currentTimeMS(); |
| 550 | 506 |
| 551 // Call weak callbacks on objects that may now be pointing to dead objects. | 507 // Call weak callbacks on objects that may now be pointing to dead objects. |
| 552 while (popAndInvokeGlobalWeakCallback(visitor)) { } | 508 while (popAndInvokeGlobalWeakCallback(visitor)) { } |
| 553 | 509 |
| 554 // It is not permitted to trace pointers of live objects in the weak | 510 // It is not permitted to trace pointers of live objects in the weak |
| 555 // callback phase, so the marking stack should still be empty here. | 511 // callback phase, so the marking stack should still be empty here. |
| 556 ASSERT(s_markingStack->isEmpty()); | 512 ASSERT(visitor->gcData()->markingStack()->isEmpty()); |
| 557 | 513 |
| 558 double timeForGlobalWeakProcessing = WTF::currentTimeMS() - startTime; | 514 double timeForGlobalWeakProcessing = WTF::currentTimeMS() - startTime; |
| 559 Platform::current()->histogramCustomCounts("BlinkGC.TimeForGlobalWeakPrcessi ng", timeForGlobalWeakProcessing, 1, 10 * 1000, 50); | 515 Platform::current()->histogramCustomCounts("BlinkGC.TimeForGlobalWeakPrcessi ng", timeForGlobalWeakProcessing, 1, 10 * 1000, 50); |
| 560 } | 516 } |
| 561 | 517 |
| 562 void Heap::collectAllGarbage() | 518 void Heap::collectAllGarbage() |
| 563 { | 519 { |
| 564 // We need to run multiple GCs to collect a chain of persistent handles. | 520 // We need to run multiple GCs to collect a chain of persistent handles. |
| 565 size_t previousLiveObjects = 0; | 521 size_t previousLiveObjects = 0; |
| 566 for (int i = 0; i < 5; ++i) { | 522 for (int i = 0; i < 5; ++i) { |
| 567 collectGarbage(BlinkGC::NoHeapPointersOnStack, BlinkGC::GCWithSweep, Bli nkGC::ForcedGC); | 523 collectGarbage(BlinkGC::NoHeapPointersOnStack, BlinkGC::GCWithSweep, Bli nkGC::ForcedGC); |
| 568 size_t liveObjects = Heap::markedObjectSize(); | 524 size_t liveObjects = ThreadState::current()->gcGroup()->heapStats().mark edObjectSize(); |
| 569 if (liveObjects == previousLiveObjects) | 525 if (liveObjects == previousLiveObjects) |
| 570 break; | 526 break; |
| 571 previousLiveObjects = liveObjects; | 527 previousLiveObjects = liveObjects; |
| 572 } | 528 } |
| 573 } | 529 } |
| 574 | 530 |
| 575 double Heap::estimatedMarkingTime() | |
| 576 { | |
| 577 ASSERT(ThreadState::current()->isMainThread()); | |
| 578 | |
| 579 // Use 8 ms as initial estimated marking time. | |
| 580 // 8 ms is long enough for low-end mobile devices to mark common | |
| 581 // real-world object graphs. | |
| 582 if (s_estimatedMarkingTimePerByte == 0) | |
| 583 return 0.008; | |
| 584 | |
| 585 // Assuming that the collection rate of this GC will be mostly equal to | |
| 586 // the collection rate of the last GC, estimate the marking time of this GC. | |
| 587 return s_estimatedMarkingTimePerByte * (Heap::allocatedObjectSize() + Heap:: markedObjectSize()); | |
| 588 } | |
| 589 | |
| 590 void Heap::reportMemoryUsageHistogram() | 531 void Heap::reportMemoryUsageHistogram() |
| 591 { | 532 { |
| 592 static size_t supportedMaxSizeInMB = 4 * 1024; | 533 static size_t supportedMaxSizeInMB = 4 * 1024; |
| 593 static size_t observedMaxSizeInMB = 0; | 534 static size_t observedMaxSizeInMB = 0; |
| 594 | 535 |
| 595 // We only report the memory in the main thread. | 536 // We only report the memory in the main thread. |
| 596 if (!isMainThread()) | 537 if (!isMainThread()) |
| 597 return; | 538 return; |
| 598 // +1 is for rounding up the sizeInMB. | 539 // +1 is for rounding up the sizeInMB. |
| 599 size_t sizeInMB = Heap::allocatedSpace() / 1024 / 1024 + 1; | 540 size_t sizeInMB = ThreadState::current()->gcGroup()->heapStats().allocatedSp ace() / 1024 / 1024 + 1; |
| 600 if (sizeInMB >= supportedMaxSizeInMB) | 541 if (sizeInMB >= supportedMaxSizeInMB) |
| 601 sizeInMB = supportedMaxSizeInMB - 1; | 542 sizeInMB = supportedMaxSizeInMB - 1; |
| 602 if (sizeInMB > observedMaxSizeInMB) { | 543 if (sizeInMB > observedMaxSizeInMB) { |
| 603 // Send a UseCounter only when we see the highest memory usage | 544 // Send a UseCounter only when we see the highest memory usage |
| 604 // we've ever seen. | 545 // we've ever seen. |
| 605 Platform::current()->histogramEnumeration("BlinkGC.CommittedSize", sizeI nMB, supportedMaxSizeInMB); | 546 Platform::current()->histogramEnumeration("BlinkGC.CommittedSize", sizeI nMB, supportedMaxSizeInMB); |
| 606 observedMaxSizeInMB = sizeInMB; | 547 observedMaxSizeInMB = sizeInMB; |
| 607 } | 548 } |
| 608 } | 549 } |
| 609 | 550 |
| 610 void Heap::reportMemoryUsageForTracing() | 551 void Heap::reportMemoryUsageForTracing() |
| 611 { | 552 { |
| 612 #if PRINT_HEAP_STATS | 553 #if PRINT_HEAP_STATS |
| 613 // dataLogF("allocatedSpace=%ldMB, allocatedObjectSize=%ldMB, markedObjectSi ze=%ldMB, partitionAllocSize=%ldMB, wrapperCount=%ld, collectedWrapperCount=%ld\ n", Heap::allocatedSpace() / 1024 / 1024, Heap::allocatedObjectSize() / 1024 / 1 024, Heap::markedObjectSize() / 1024 / 1024, WTF::Partitions::totalSizeOfCommitt edPages() / 1024 / 1024, Heap::wrapperCount(), Heap::collectedWrapperCount()); | 554 // dataLogF("allocatedSpace=%ldMB, allocatedObjectSize=%ldMB, markedObjectSi ze=%ldMB, partitionAllocSize=%ldMB, wrapperCount=%ld, collectedWrapperCount=%ld\ n", Heap::allocatedSpace() / 1024 / 1024, Heap::allocatedObjectSize() / 1024 / 1 024, Heap::markedObjectSize() / 1024 / 1024, WTF::Partitions::totalSizeOfCommitt edPages() / 1024 / 1024, Heap::wrapperCount(), Heap::collectedWrapperCount()); |
| 614 #endif | 555 #endif |
| 615 | 556 |
| 616 bool gcTracingEnabled; | 557 bool gcTracingEnabled; |
| 617 TRACE_EVENT_CATEGORY_GROUP_ENABLED("blink_gc", &gcTracingEnabled); | 558 TRACE_EVENT_CATEGORY_GROUP_ENABLED("blink_gc", &gcTracingEnabled); |
| 618 if (!gcTracingEnabled) | 559 if (!gcTracingEnabled) |
| 619 return; | 560 return; |
| 620 | 561 |
| 562 GCGroup* gcGroup = ThreadState::current()->gcGroup(); | |
| 563 | |
| 621 // These values are divided by 1024 to avoid overflow in practical cases (TR ACE_COUNTER values are 32-bit ints). | 564 // These values are divided by 1024 to avoid overflow in practical cases (TR ACE_COUNTER values are 32-bit ints). |
| 622 // They are capped to INT_MAX just in case. | 565 // They are capped to INT_MAX just in case. |
| 623 TRACE_COUNTER1("blink_gc", "Heap::allocatedObjectSizeKB", std::min(Heap::all ocatedObjectSize() / 1024, static_cast<size_t>(INT_MAX))); | 566 TRACE_COUNTER1("blink_gc", "Heap::allocatedObjectSizeKB", std::min(gcGroup-> heapStats().allocatedObjectSize() / 1024, static_cast<size_t>(INT_MAX))); |
| 624 TRACE_COUNTER1("blink_gc", "Heap::markedObjectSizeKB", std::min(Heap::marked ObjectSize() / 1024, static_cast<size_t>(INT_MAX))); | 567 TRACE_COUNTER1("blink_gc", "Heap::markedObjectSizeKB", std::min(gcGroup->hea pStats().markedObjectSize() / 1024, static_cast<size_t>(INT_MAX))); |
| 625 TRACE_COUNTER1(TRACE_DISABLED_BY_DEFAULT("blink_gc"), "Heap::markedObjectSiz eAtLastCompleteSweepKB", std::min(Heap::markedObjectSizeAtLastCompleteSweep() / 1024, static_cast<size_t>(INT_MAX))); | 568 TRACE_COUNTER1(TRACE_DISABLED_BY_DEFAULT("blink_gc"), "Heap::markedObjectSiz eAtLastCompleteSweepKB", std::min(gcGroup->heapStats().markedObjectSizeAtLastCom pleteSweep() / 1024, static_cast<size_t>(INT_MAX))); |
| 626 TRACE_COUNTER1("blink_gc", "Heap::allocatedSpaceKB", std::min(Heap::allocate dSpace() / 1024, static_cast<size_t>(INT_MAX))); | 569 TRACE_COUNTER1("blink_gc", "Heap::allocatedSpaceKB", std::min(Heap::totalAll ocatedSpace() / 1024, static_cast<size_t>(INT_MAX))); |
| 627 TRACE_COUNTER1(TRACE_DISABLED_BY_DEFAULT("blink_gc"), "Heap::objectSizeAtLas tGCKB", std::min(Heap::objectSizeAtLastGC() / 1024, static_cast<size_t>(INT_MAX) )); | 570 TRACE_COUNTER1(TRACE_DISABLED_BY_DEFAULT("blink_gc"), "Heap::objectSizeAtLas tGCKB", std::min(gcGroup->heapStats().objectSizeAtLastGC() / 1024, static_cast<s ize_t>(INT_MAX))); |
| 628 TRACE_COUNTER1(TRACE_DISABLED_BY_DEFAULT("blink_gc"), "Heap::wrapperCount", std::min(Heap::wrapperCount(), static_cast<size_t>(INT_MAX))); | 571 TRACE_COUNTER1(TRACE_DISABLED_BY_DEFAULT("blink_gc"), "Heap::wrapperCount", std::min(gcGroup->heapStats().wrapperCount(), static_cast<size_t>(INT_MAX))); |
| 629 TRACE_COUNTER1(TRACE_DISABLED_BY_DEFAULT("blink_gc"), "Heap::wrapperCountAtL astGC", std::min(Heap::wrapperCountAtLastGC(), static_cast<size_t>(INT_MAX))); | 572 TRACE_COUNTER1(TRACE_DISABLED_BY_DEFAULT("blink_gc"), "Heap::wrapperCountAtL astGC", std::min(gcGroup->heapStats().wrapperCountAtLastGC(), static_cast<size_t >(INT_MAX))); |
| 630 TRACE_COUNTER1(TRACE_DISABLED_BY_DEFAULT("blink_gc"), "Heap::collectedWrappe rCount", std::min(Heap::collectedWrapperCount(), static_cast<size_t>(INT_MAX))); | 573 TRACE_COUNTER1(TRACE_DISABLED_BY_DEFAULT("blink_gc"), "Heap::collectedWrappe rCount", std::min(gcGroup->heapStats().collectedWrapperCount(), static_cast<size _t>(INT_MAX))); |
| 631 TRACE_COUNTER1(TRACE_DISABLED_BY_DEFAULT("blink_gc"), "Heap::partitionAllocS izeAtLastGCKB", std::min(Heap::partitionAllocSizeAtLastGC() / 1024, static_cast< size_t>(INT_MAX))); | 574 TRACE_COUNTER1(TRACE_DISABLED_BY_DEFAULT("blink_gc"), "Heap::partitionAllocS izeAtLastGCKB", std::min(gcGroup->heapStats().partitionAllocSizeAtLastGC() / 102 4, static_cast<size_t>(INT_MAX))); |
| 632 TRACE_COUNTER1("blink_gc", "Partitions::totalSizeOfCommittedPagesKB", std::m in(WTF::Partitions::totalSizeOfCommittedPages() / 1024, static_cast<size_t>(INT_ MAX))); | 575 TRACE_COUNTER1("blink_gc", "Partitions::totalSizeOfCommittedPagesKB", std::m in(WTF::Partitions::totalSizeOfCommittedPages() / 1024, static_cast<size_t>(INT_ MAX))); |
| 633 } | 576 } |
| 634 | 577 |
| 635 size_t Heap::objectPayloadSizeForTesting() | 578 size_t Heap::objectPayloadSizeForTesting() |
| 636 { | 579 { |
| 637 size_t objectPayloadSize = 0; | 580 size_t objectPayloadSize = 0; |
| 638 for (ThreadState* state : ThreadState::attachedThreads()) { | 581 for (GCGroup* gcGroup : GCGroup::all()) { |
|
haraken
2016/01/28 15:52:49
This should not iterate all GCGroups. Just return:
keishi
2016/02/29 06:02:32
Removed and used GCGroup::objectPayloadSizeForTest
| |
| 639 state->setGCState(ThreadState::GCRunning); | 582 objectPayloadSize += gcGroup->objectPayloadSizeForTesting(); |
| 640 state->makeConsistentForGC(); | |
| 641 objectPayloadSize += state->objectPayloadSizeForTesting(); | |
| 642 state->setGCState(ThreadState::EagerSweepScheduled); | |
| 643 state->setGCState(ThreadState::Sweeping); | |
| 644 state->setGCState(ThreadState::NoGCScheduled); | |
| 645 } | 583 } |
| 646 return objectPayloadSize; | 584 return objectPayloadSize; |
| 647 } | 585 } |
| 648 | 586 |
| 649 BasePage* Heap::lookup(Address address) | |
| 650 { | |
| 651 ASSERT(ThreadState::current()->isInGC()); | |
| 652 if (!s_regionTree) | |
| 653 return nullptr; | |
| 654 if (PageMemoryRegion* region = s_regionTree->lookup(address)) { | |
| 655 BasePage* page = region->pageFromAddress(address); | |
| 656 return page && !page->orphaned() ? page : nullptr; | |
| 657 } | |
| 658 return nullptr; | |
| 659 } | |
| 660 | |
| 661 static Mutex& regionTreeMutex() | |
| 662 { | |
| 663 DEFINE_THREAD_SAFE_STATIC_LOCAL(Mutex, mutex, new Mutex); | |
| 664 return mutex; | |
| 665 } | |
| 666 | |
| 667 void Heap::removePageMemoryRegion(PageMemoryRegion* region) | |
| 668 { | |
| 669 // Deletion of large objects (and thus their regions) can happen | |
| 670 // concurrently on sweeper threads. Removal can also happen during thread | |
| 671 // shutdown, but that case is safe. Regardless, we make all removals | |
| 672 // mutually exclusive. | |
| 673 MutexLocker locker(regionTreeMutex()); | |
| 674 RegionTree::remove(region, &s_regionTree); | |
| 675 } | |
| 676 | |
| 677 void Heap::addPageMemoryRegion(PageMemoryRegion* region) | |
| 678 { | |
| 679 MutexLocker locker(regionTreeMutex()); | |
| 680 RegionTree::add(new RegionTree(region), &s_regionTree); | |
| 681 } | |
| 682 | |
| 683 void Heap::resetHeapCounters() | 587 void Heap::resetHeapCounters() |
| 684 { | 588 { |
| 685 ASSERT(ThreadState::current()->isInGC()); | 589 ASSERT(ThreadState::current()->isInGC()); |
| 686 | 590 |
| 687 Heap::reportMemoryUsageForTracing(); | 591 Heap::reportMemoryUsageForTracing(); |
| 688 | 592 |
| 689 s_objectSizeAtLastGC = s_allocatedObjectSize + s_markedObjectSize; | 593 ThreadState::current()->gcGroup()->heapStats().reset(); |
| 690 s_partitionAllocSizeAtLastGC = WTF::Partitions::totalSizeOfCommittedPages(); | |
| 691 s_allocatedObjectSize = 0; | |
| 692 s_markedObjectSize = 0; | |
| 693 s_wrapperCountAtLastGC = s_wrapperCount; | |
| 694 s_collectedWrapperCount = 0; | |
| 695 } | 594 } |
| 696 | 595 |
| 697 CallbackStack* Heap::s_markingStack; | |
| 698 CallbackStack* Heap::s_postMarkingCallbackStack; | |
| 699 CallbackStack* Heap::s_globalWeakCallbackStack; | |
| 700 CallbackStack* Heap::s_ephemeronStack; | |
| 701 HeapDoesNotContainCache* Heap::s_heapDoesNotContainCache; | |
| 702 bool Heap::s_shutdownCalled = false; | 596 bool Heap::s_shutdownCalled = false; |
| 703 FreePagePool* Heap::s_freePagePool; | 597 bool Heap::s_doShutdownDone = false; |
| 704 OrphanedPagePool* Heap::s_orphanedPagePool; | 598 size_t Heap::s_totalAllocatedSpace = 0; |
| 705 RegionTree* Heap::s_regionTree = nullptr; | 599 size_t Heap::s_totalAllocatedObjectSize = 0; |
| 706 size_t Heap::s_allocatedSpace = 0; | 600 size_t Heap::s_totalMarkedObjectSize = 0; |
| 707 size_t Heap::s_allocatedObjectSize = 0; | |
| 708 size_t Heap::s_objectSizeAtLastGC = 0; | |
| 709 size_t Heap::s_markedObjectSize = 0; | |
| 710 size_t Heap::s_markedObjectSizeAtLastCompleteSweep = 0; | |
| 711 size_t Heap::s_wrapperCount = 0; | |
| 712 size_t Heap::s_wrapperCountAtLastGC = 0; | |
| 713 size_t Heap::s_collectedWrapperCount = 0; | |
| 714 size_t Heap::s_partitionAllocSizeAtLastGC = 0; | |
| 715 double Heap::s_estimatedMarkingTimePerByte = 0.0; | |
| 716 #if ENABLE(ASSERT) | |
| 717 uint16_t Heap::s_gcGeneration = 0; | |
| 718 #endif | |
| 719 | 601 |
| 720 } // namespace blink | 602 } // namespace blink |
| OLD | NEW |