OLD | NEW |
1 /* | 1 /* |
2 * Copyright (C) 2013 Google Inc. All rights reserved. | 2 * Copyright (C) 2013 Google Inc. All rights reserved. |
3 * | 3 * |
4 * Redistribution and use in source and binary forms, with or without | 4 * Redistribution and use in source and binary forms, with or without |
5 * modification, are permitted provided that the following conditions are | 5 * modification, are permitted provided that the following conditions are |
6 * met: | 6 * met: |
7 * | 7 * |
8 * * Redistributions of source code must retain the above copyright | 8 * * Redistributions of source code must retain the above copyright |
9 * notice, this list of conditions and the following disclaimer. | 9 * notice, this list of conditions and the following disclaimer. |
10 * * Redistributions in binary form must reproduce the above | 10 * * Redistributions in binary form must reproduce the above |
(...skipping 12 matching lines...) Expand all Loading... |
23 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT | 23 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT |
24 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, | 24 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, |
25 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY | 25 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY |
26 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT | 26 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
27 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE | 27 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE |
28 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | 28 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
29 */ | 29 */ |
30 | 30 |
31 #include "platform/heap/Heap.h" | 31 #include "platform/heap/Heap.h" |
32 | 32 |
33 #include "base/debug/alias.h" | |
34 #include "base/sys_info.h" | 33 #include "base/sys_info.h" |
35 #include "platform/Histogram.h" | 34 #include "platform/Histogram.h" |
36 #include "platform/ScriptForbiddenScope.h" | 35 #include "platform/ScriptForbiddenScope.h" |
37 #include "platform/TraceEvent.h" | 36 #include "platform/TraceEvent.h" |
38 #include "platform/heap/BlinkGCMemoryDumpProvider.h" | 37 #include "platform/heap/BlinkGCMemoryDumpProvider.h" |
39 #include "platform/heap/CallbackStack.h" | 38 #include "platform/heap/CallbackStack.h" |
40 #include "platform/heap/MarkingVisitor.h" | 39 #include "platform/heap/MarkingVisitor.h" |
41 #include "platform/heap/PageMemory.h" | 40 #include "platform/heap/PageMemory.h" |
42 #include "platform/heap/PagePool.h" | 41 #include "platform/heap/PagePool.h" |
43 #include "platform/heap/SafePoint.h" | 42 #include "platform/heap/SafePoint.h" |
(...skipping 15 matching lines...) Expand all Loading... |
59 class ParkThreadsScope final { | 58 class ParkThreadsScope final { |
60 STACK_ALLOCATED(); | 59 STACK_ALLOCATED(); |
61 public: | 60 public: |
62 ParkThreadsScope() | 61 ParkThreadsScope() |
63 : m_shouldResumeThreads(false) | 62 : m_shouldResumeThreads(false) |
64 { | 63 { |
65 } | 64 } |
66 | 65 |
67 bool parkThreads(ThreadState* state) | 66 bool parkThreads(ThreadState* state) |
68 { | 67 { |
69 TRACE_EVENT0("blink_gc", "Heap::ParkThreadsScope"); | 68 TRACE_EVENT0("blink_gc", "ThreadHeap::ParkThreadsScope"); |
70 const char* samplingState = TRACE_EVENT_GET_SAMPLING_STATE(); | 69 const char* samplingState = TRACE_EVENT_GET_SAMPLING_STATE(); |
71 if (state->isMainThread()) | 70 if (state->isMainThread()) |
72 TRACE_EVENT_SET_SAMPLING_STATE("blink_gc", "BlinkGCWaiting"); | 71 TRACE_EVENT_SET_SAMPLING_STATE("blink_gc", "BlinkGCWaiting"); |
73 | 72 |
74 // TODO(haraken): In an unlikely coincidence that two threads decide | 73 // TODO(haraken): In an unlikely coincidence that two threads decide |
75 // to collect garbage at the same time, avoid doing two GCs in | 74 // to collect garbage at the same time, avoid doing two GCs in |
76 // a row and return false. | 75 // a row and return false. |
77 double startTime = WTF::currentTimeMS(); | 76 double startTime = WTF::currentTimeMS(); |
78 | 77 |
79 m_shouldResumeThreads = ThreadState::stopThreads(); | 78 m_shouldResumeThreads = ThreadState::stopThreads(); |
(...skipping 12 matching lines...) Expand all Loading... |
92 // Only cleanup if we parked all threads in which case the GC happened | 91 // Only cleanup if we parked all threads in which case the GC happened |
93 // and we need to resume the other threads. | 92 // and we need to resume the other threads. |
94 if (m_shouldResumeThreads) | 93 if (m_shouldResumeThreads) |
95 ThreadState::resumeThreads(); | 94 ThreadState::resumeThreads(); |
96 } | 95 } |
97 | 96 |
98 private: | 97 private: |
99 bool m_shouldResumeThreads; | 98 bool m_shouldResumeThreads; |
100 }; | 99 }; |
101 | 100 |
102 void Heap::flushHeapDoesNotContainCache() | 101 void ThreadHeap::flushHeapDoesNotContainCache() |
103 { | 102 { |
104 s_heapDoesNotContainCache->flush(); | 103 s_heapDoesNotContainCache->flush(); |
105 } | 104 } |
106 | 105 |
107 void ProcessHeap::init() | 106 void ProcessHeap::init() |
108 { | 107 { |
109 s_totalAllocatedSpace = 0; | 108 s_totalAllocatedSpace = 0; |
110 s_totalAllocatedObjectSize = 0; | 109 s_totalAllocatedObjectSize = 0; |
111 s_totalMarkedObjectSize = 0; | 110 s_totalMarkedObjectSize = 0; |
112 s_isLowEndDevice = base::SysInfo::IsLowEndDevice(); | 111 s_isLowEndDevice = base::SysInfo::IsLowEndDevice(); |
113 } | 112 } |
114 | 113 |
115 void ProcessHeap::resetHeapCounters() | 114 void ProcessHeap::resetHeapCounters() |
116 { | 115 { |
117 s_totalAllocatedObjectSize = 0; | 116 s_totalAllocatedObjectSize = 0; |
118 s_totalMarkedObjectSize = 0; | 117 s_totalMarkedObjectSize = 0; |
119 } | 118 } |
120 | 119 |
121 void Heap::init() | 120 void ThreadHeap::init() |
122 { | 121 { |
123 ThreadState::init(); | 122 ThreadState::init(); |
124 ProcessHeap::init(); | 123 ProcessHeap::init(); |
125 s_markingStack = new CallbackStack(); | 124 s_markingStack = new CallbackStack(); |
126 s_postMarkingCallbackStack = new CallbackStack(); | 125 s_postMarkingCallbackStack = new CallbackStack(); |
127 s_globalWeakCallbackStack = new CallbackStack(); | 126 s_globalWeakCallbackStack = new CallbackStack(); |
128 // Use smallest supported block size for ephemerons. | 127 // Use smallest supported block size for ephemerons. |
129 s_ephemeronStack = new CallbackStack(CallbackStack::kMinimalBlockSize); | 128 s_ephemeronStack = new CallbackStack(CallbackStack::kMinimalBlockSize); |
130 s_heapDoesNotContainCache = new HeapDoesNotContainCache(); | 129 s_heapDoesNotContainCache = new HeapDoesNotContainCache(); |
131 s_freePagePool = new FreePagePool(); | 130 s_freePagePool = new FreePagePool(); |
132 s_orphanedPagePool = new OrphanedPagePool(); | 131 s_orphanedPagePool = new OrphanedPagePool(); |
133 s_lastGCReason = BlinkGC::NumberOfGCReason; | 132 s_lastGCReason = BlinkGC::NumberOfGCReason; |
134 | 133 |
135 GCInfoTable::init(); | 134 GCInfoTable::init(); |
136 | 135 |
137 if (Platform::current() && Platform::current()->currentThread()) | 136 if (Platform::current() && Platform::current()->currentThread()) |
138 Platform::current()->registerMemoryDumpProvider(BlinkGCMemoryDumpProvide
r::instance(), "BlinkGC"); | 137 Platform::current()->registerMemoryDumpProvider(BlinkGCMemoryDumpProvide
r::instance(), "BlinkGC"); |
139 } | 138 } |
140 | 139 |
141 void Heap::shutdown() | 140 void ThreadHeap::shutdown() |
142 { | 141 { |
143 ASSERT(s_markingStack); | 142 ASSERT(s_markingStack); |
144 | 143 |
145 if (Platform::current() && Platform::current()->currentThread()) | 144 if (Platform::current() && Platform::current()->currentThread()) |
146 Platform::current()->unregisterMemoryDumpProvider(BlinkGCMemoryDumpProvi
der::instance()); | 145 Platform::current()->unregisterMemoryDumpProvider(BlinkGCMemoryDumpProvi
der::instance()); |
147 | 146 |
148 // The main thread must be the last thread that gets detached. | 147 // The main thread must be the last thread that gets detached. |
149 RELEASE_ASSERT(ThreadState::attachedThreads().size() == 0); | 148 RELEASE_ASSERT(ThreadState::attachedThreads().size() == 0); |
150 | 149 |
151 delete s_heapDoesNotContainCache; | 150 delete s_heapDoesNotContainCache; |
152 s_heapDoesNotContainCache = nullptr; | 151 s_heapDoesNotContainCache = nullptr; |
153 delete s_freePagePool; | 152 delete s_freePagePool; |
154 s_freePagePool = nullptr; | 153 s_freePagePool = nullptr; |
155 delete s_orphanedPagePool; | 154 delete s_orphanedPagePool; |
156 s_orphanedPagePool = nullptr; | 155 s_orphanedPagePool = nullptr; |
157 delete s_globalWeakCallbackStack; | 156 delete s_globalWeakCallbackStack; |
158 s_globalWeakCallbackStack = nullptr; | 157 s_globalWeakCallbackStack = nullptr; |
159 delete s_postMarkingCallbackStack; | 158 delete s_postMarkingCallbackStack; |
160 s_postMarkingCallbackStack = nullptr; | 159 s_postMarkingCallbackStack = nullptr; |
161 delete s_markingStack; | 160 delete s_markingStack; |
162 s_markingStack = nullptr; | 161 s_markingStack = nullptr; |
163 delete s_ephemeronStack; | 162 delete s_ephemeronStack; |
164 s_ephemeronStack = nullptr; | 163 s_ephemeronStack = nullptr; |
165 GCInfoTable::shutdown(); | 164 GCInfoTable::shutdown(); |
166 ThreadState::shutdown(); | 165 ThreadState::shutdown(); |
167 ASSERT(Heap::heapStats().allocatedSpace() == 0); | 166 ASSERT(ThreadHeap::heapStats().allocatedSpace() == 0); |
168 } | 167 } |
169 | 168 |
170 CrossThreadPersistentRegion& ProcessHeap::crossThreadPersistentRegion() | 169 CrossThreadPersistentRegion& ProcessHeap::crossThreadPersistentRegion() |
171 { | 170 { |
172 DEFINE_THREAD_SAFE_STATIC_LOCAL(CrossThreadPersistentRegion, persistentRegio
n, new CrossThreadPersistentRegion()); | 171 DEFINE_THREAD_SAFE_STATIC_LOCAL(CrossThreadPersistentRegion, persistentRegio
n, new CrossThreadPersistentRegion()); |
173 return persistentRegion; | 172 return persistentRegion; |
174 } | 173 } |
175 | 174 |
176 bool ProcessHeap::s_isLowEndDevice = false; | 175 bool ProcessHeap::s_isLowEndDevice = false; |
177 size_t ProcessHeap::s_totalAllocatedSpace = 0; | 176 size_t ProcessHeap::s_totalAllocatedSpace = 0; |
(...skipping 61 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
239 ProcessHeap::increaseTotalAllocatedSpace(delta); | 238 ProcessHeap::increaseTotalAllocatedSpace(delta); |
240 } | 239 } |
241 | 240 |
242 void ThreadHeapStats::decreaseAllocatedSpace(size_t delta) | 241 void ThreadHeapStats::decreaseAllocatedSpace(size_t delta) |
243 { | 242 { |
244 atomicSubtract(&m_allocatedSpace, static_cast<long>(delta)); | 243 atomicSubtract(&m_allocatedSpace, static_cast<long>(delta)); |
245 ProcessHeap::decreaseTotalAllocatedSpace(delta); | 244 ProcessHeap::decreaseTotalAllocatedSpace(delta); |
246 } | 245 } |
247 | 246 |
248 #if ENABLE(ASSERT) | 247 #if ENABLE(ASSERT) |
249 BasePage* Heap::findPageFromAddress(Address address) | 248 BasePage* ThreadHeap::findPageFromAddress(Address address) |
250 { | 249 { |
251 MutexLocker lock(ThreadState::threadAttachMutex()); | 250 MutexLocker lock(ThreadState::threadAttachMutex()); |
252 for (ThreadState* state : ThreadState::attachedThreads()) { | 251 for (ThreadState* state : ThreadState::attachedThreads()) { |
253 if (BasePage* page = state->findPageFromAddress(address)) | 252 if (BasePage* page = state->findPageFromAddress(address)) |
254 return page; | 253 return page; |
255 } | 254 } |
256 return nullptr; | 255 return nullptr; |
257 } | 256 } |
258 #endif | 257 #endif |
259 | 258 |
260 Address Heap::checkAndMarkPointer(Visitor* visitor, Address address) | 259 Address ThreadHeap::checkAndMarkPointer(Visitor* visitor, Address address) |
261 { | 260 { |
262 ASSERT(ThreadState::current()->isInGC()); | 261 ASSERT(ThreadState::current()->isInGC()); |
263 | 262 |
264 #if !ENABLE(ASSERT) | 263 #if !ENABLE(ASSERT) |
265 if (s_heapDoesNotContainCache->lookup(address)) | 264 if (s_heapDoesNotContainCache->lookup(address)) |
266 return nullptr; | 265 return nullptr; |
267 #endif | 266 #endif |
268 | 267 |
269 if (BasePage* page = lookup(address)) { | 268 if (BasePage* page = lookup(address)) { |
270 ASSERT(page->contains(address)); | 269 ASSERT(page->contains(address)); |
271 ASSERT(!page->orphaned()); | 270 ASSERT(!page->orphaned()); |
272 ASSERT(!s_heapDoesNotContainCache->lookup(address)); | 271 ASSERT(!s_heapDoesNotContainCache->lookup(address)); |
273 page->checkAndMarkPointer(visitor, address); | 272 page->checkAndMarkPointer(visitor, address); |
274 return address; | 273 return address; |
275 } | 274 } |
276 | 275 |
277 #if !ENABLE(ASSERT) | 276 #if !ENABLE(ASSERT) |
278 s_heapDoesNotContainCache->addEntry(address); | 277 s_heapDoesNotContainCache->addEntry(address); |
279 #else | 278 #else |
280 if (!s_heapDoesNotContainCache->lookup(address)) | 279 if (!s_heapDoesNotContainCache->lookup(address)) |
281 s_heapDoesNotContainCache->addEntry(address); | 280 s_heapDoesNotContainCache->addEntry(address); |
282 #endif | 281 #endif |
283 return nullptr; | 282 return nullptr; |
284 } | 283 } |
285 | 284 |
286 void Heap::pushTraceCallback(void* object, TraceCallback callback) | 285 void ThreadHeap::pushTraceCallback(void* object, TraceCallback callback) |
287 { | 286 { |
288 ASSERT(ThreadState::current()->isInGC()); | 287 ASSERT(ThreadState::current()->isInGC()); |
289 | 288 |
290 // Trace should never reach an orphaned page. | 289 // Trace should never reach an orphaned page. |
291 ASSERT(!Heap::getOrphanedPagePool()->contains(object)); | 290 ASSERT(!ThreadHeap::getOrphanedPagePool()->contains(object)); |
292 CallbackStack::Item* slot = s_markingStack->allocateEntry(); | 291 CallbackStack::Item* slot = s_markingStack->allocateEntry(); |
293 *slot = CallbackStack::Item(object, callback); | 292 *slot = CallbackStack::Item(object, callback); |
294 } | 293 } |
295 | 294 |
296 bool Heap::popAndInvokeTraceCallback(Visitor* visitor) | 295 bool ThreadHeap::popAndInvokeTraceCallback(Visitor* visitor) |
297 { | 296 { |
298 CallbackStack::Item* item = s_markingStack->pop(); | 297 CallbackStack::Item* item = s_markingStack->pop(); |
299 if (!item) | 298 if (!item) |
300 return false; | 299 return false; |
301 item->call(visitor); | 300 item->call(visitor); |
302 return true; | 301 return true; |
303 } | 302 } |
304 | 303 |
305 void Heap::pushPostMarkingCallback(void* object, TraceCallback callback) | 304 void ThreadHeap::pushPostMarkingCallback(void* object, TraceCallback callback) |
306 { | 305 { |
307 ASSERT(ThreadState::current()->isInGC()); | 306 ASSERT(ThreadState::current()->isInGC()); |
308 | 307 |
309 // Trace should never reach an orphaned page. | 308 // Trace should never reach an orphaned page. |
310 ASSERT(!Heap::getOrphanedPagePool()->contains(object)); | 309 ASSERT(!ThreadHeap::getOrphanedPagePool()->contains(object)); |
311 CallbackStack::Item* slot = s_postMarkingCallbackStack->allocateEntry(); | 310 CallbackStack::Item* slot = s_postMarkingCallbackStack->allocateEntry(); |
312 *slot = CallbackStack::Item(object, callback); | 311 *slot = CallbackStack::Item(object, callback); |
313 } | 312 } |
314 | 313 |
315 bool Heap::popAndInvokePostMarkingCallback(Visitor* visitor) | 314 bool ThreadHeap::popAndInvokePostMarkingCallback(Visitor* visitor) |
316 { | 315 { |
317 if (CallbackStack::Item* item = s_postMarkingCallbackStack->pop()) { | 316 if (CallbackStack::Item* item = s_postMarkingCallbackStack->pop()) { |
318 item->call(visitor); | 317 item->call(visitor); |
319 return true; | 318 return true; |
320 } | 319 } |
321 return false; | 320 return false; |
322 } | 321 } |
323 | 322 |
324 void Heap::pushGlobalWeakCallback(void** cell, WeakCallback callback) | 323 void ThreadHeap::pushGlobalWeakCallback(void** cell, WeakCallback callback) |
325 { | 324 { |
326 ASSERT(ThreadState::current()->isInGC()); | 325 ASSERT(ThreadState::current()->isInGC()); |
327 | 326 |
328 // Trace should never reach an orphaned page. | 327 // Trace should never reach an orphaned page. |
329 ASSERT(!Heap::getOrphanedPagePool()->contains(cell)); | 328 ASSERT(!ThreadHeap::getOrphanedPagePool()->contains(cell)); |
330 CallbackStack::Item* slot = s_globalWeakCallbackStack->allocateEntry(); | 329 CallbackStack::Item* slot = s_globalWeakCallbackStack->allocateEntry(); |
331 *slot = CallbackStack::Item(cell, callback); | 330 *slot = CallbackStack::Item(cell, callback); |
332 } | 331 } |
333 | 332 |
334 void Heap::pushThreadLocalWeakCallback(void* closure, void* object, WeakCallback
callback) | 333 void ThreadHeap::pushThreadLocalWeakCallback(void* closure, void* object, WeakCa
llback callback) |
335 { | 334 { |
336 ASSERT(ThreadState::current()->isInGC()); | 335 ASSERT(ThreadState::current()->isInGC()); |
337 | 336 |
338 // Trace should never reach an orphaned page. | 337 // Trace should never reach an orphaned page. |
339 ASSERT(!Heap::getOrphanedPagePool()->contains(object)); | 338 ASSERT(!ThreadHeap::getOrphanedPagePool()->contains(object)); |
340 ThreadState* state = pageFromObject(object)->arena()->getThreadState(); | 339 ThreadState* state = pageFromObject(object)->arena()->getThreadState(); |
341 state->pushThreadLocalWeakCallback(closure, callback); | 340 state->pushThreadLocalWeakCallback(closure, callback); |
342 } | 341 } |
343 | 342 |
344 bool Heap::popAndInvokeGlobalWeakCallback(Visitor* visitor) | 343 bool ThreadHeap::popAndInvokeGlobalWeakCallback(Visitor* visitor) |
345 { | 344 { |
346 if (CallbackStack::Item* item = s_globalWeakCallbackStack->pop()) { | 345 if (CallbackStack::Item* item = s_globalWeakCallbackStack->pop()) { |
347 item->call(visitor); | 346 item->call(visitor); |
348 return true; | 347 return true; |
349 } | 348 } |
350 return false; | 349 return false; |
351 } | 350 } |
352 | 351 |
353 void Heap::registerWeakTable(void* table, EphemeronCallback iterationCallback, E
phemeronCallback iterationDoneCallback) | 352 void ThreadHeap::registerWeakTable(void* table, EphemeronCallback iterationCallb
ack, EphemeronCallback iterationDoneCallback) |
354 { | 353 { |
355 ASSERT(ThreadState::current()->isInGC()); | 354 ASSERT(ThreadState::current()->isInGC()); |
356 | 355 |
357 // Trace should never reach an orphaned page. | 356 // Trace should never reach an orphaned page. |
358 ASSERT(!Heap::getOrphanedPagePool()->contains(table)); | 357 ASSERT(!ThreadHeap::getOrphanedPagePool()->contains(table)); |
359 CallbackStack::Item* slot = s_ephemeronStack->allocateEntry(); | 358 CallbackStack::Item* slot = s_ephemeronStack->allocateEntry(); |
360 *slot = CallbackStack::Item(table, iterationCallback); | 359 *slot = CallbackStack::Item(table, iterationCallback); |
361 | 360 |
362 // Register a post-marking callback to tell the tables that | 361 // Register a post-marking callback to tell the tables that |
363 // ephemeron iteration is complete. | 362 // ephemeron iteration is complete. |
364 pushPostMarkingCallback(table, iterationDoneCallback); | 363 pushPostMarkingCallback(table, iterationDoneCallback); |
365 } | 364 } |
366 | 365 |
367 #if ENABLE(ASSERT) | 366 #if ENABLE(ASSERT) |
368 bool Heap::weakTableRegistered(const void* table) | 367 bool ThreadHeap::weakTableRegistered(const void* table) |
369 { | 368 { |
370 ASSERT(s_ephemeronStack); | 369 ASSERT(s_ephemeronStack); |
371 return s_ephemeronStack->hasCallbackForObject(table); | 370 return s_ephemeronStack->hasCallbackForObject(table); |
372 } | 371 } |
373 #endif | 372 #endif |
374 | 373 |
375 void Heap::decommitCallbackStacks() | 374 void ThreadHeap::decommitCallbackStacks() |
376 { | 375 { |
377 s_markingStack->decommit(); | 376 s_markingStack->decommit(); |
378 s_postMarkingCallbackStack->decommit(); | 377 s_postMarkingCallbackStack->decommit(); |
379 s_globalWeakCallbackStack->decommit(); | 378 s_globalWeakCallbackStack->decommit(); |
380 s_ephemeronStack->decommit(); | 379 s_ephemeronStack->decommit(); |
381 } | 380 } |
382 | 381 |
383 void Heap::preGC() | 382 void ThreadHeap::preGC() |
384 { | 383 { |
385 ASSERT(!ThreadState::current()->isInGC()); | 384 ASSERT(!ThreadState::current()->isInGC()); |
386 for (ThreadState* state : ThreadState::attachedThreads()) | 385 for (ThreadState* state : ThreadState::attachedThreads()) |
387 state->preGC(); | 386 state->preGC(); |
388 } | 387 } |
389 | 388 |
390 void Heap::postGC(BlinkGC::GCType gcType) | 389 void ThreadHeap::postGC(BlinkGC::GCType gcType) |
391 { | 390 { |
392 ASSERT(ThreadState::current()->isInGC()); | 391 ASSERT(ThreadState::current()->isInGC()); |
393 for (ThreadState* state : ThreadState::attachedThreads()) | 392 for (ThreadState* state : ThreadState::attachedThreads()) |
394 state->postGC(gcType); | 393 state->postGC(gcType); |
395 } | 394 } |
396 | 395 |
397 const char* Heap::gcReasonString(BlinkGC::GCReason reason) | 396 const char* ThreadHeap::gcReasonString(BlinkGC::GCReason reason) |
398 { | 397 { |
399 switch (reason) { | 398 switch (reason) { |
400 case BlinkGC::IdleGC: | 399 case BlinkGC::IdleGC: |
401 return "IdleGC"; | 400 return "IdleGC"; |
402 case BlinkGC::PreciseGC: | 401 case BlinkGC::PreciseGC: |
403 return "PreciseGC"; | 402 return "PreciseGC"; |
404 case BlinkGC::ConservativeGC: | 403 case BlinkGC::ConservativeGC: |
405 return "ConservativeGC"; | 404 return "ConservativeGC"; |
406 case BlinkGC::ForcedGC: | 405 case BlinkGC::ForcedGC: |
407 return "ForcedGC"; | 406 return "ForcedGC"; |
408 case BlinkGC::MemoryPressureGC: | 407 case BlinkGC::MemoryPressureGC: |
409 return "MemoryPressureGC"; | 408 return "MemoryPressureGC"; |
410 case BlinkGC::PageNavigationGC: | 409 case BlinkGC::PageNavigationGC: |
411 return "PageNavigationGC"; | 410 return "PageNavigationGC"; |
412 default: | 411 default: |
413 ASSERT_NOT_REACHED(); | 412 ASSERT_NOT_REACHED(); |
414 } | 413 } |
415 return "<Unknown>"; | 414 return "<Unknown>"; |
416 } | 415 } |
417 | 416 |
418 void Heap::collectGarbage(BlinkGC::StackState stackState, BlinkGC::GCType gcType
, BlinkGC::GCReason reason) | 417 void ThreadHeap::collectGarbage(BlinkGC::StackState stackState, BlinkGC::GCType
gcType, BlinkGC::GCReason reason) |
419 { | 418 { |
420 ASSERT(gcType != BlinkGC::ThreadTerminationGC); | 419 ASSERT(gcType != BlinkGC::ThreadTerminationGC); |
421 | 420 |
422 ThreadState* state = ThreadState::current(); | 421 ThreadState* state = ThreadState::current(); |
423 // Nested collectGarbage() invocations aren't supported. | 422 // Nested collectGarbage() invocations aren't supported. |
424 RELEASE_ASSERT(!state->isGCForbidden()); | 423 RELEASE_ASSERT(!state->isGCForbidden()); |
425 state->completeSweep(); | 424 state->completeSweep(); |
426 | 425 |
427 size_t debugAllocatedObjectSize = Heap::heapStats().allocatedObjectSize(); | |
428 base::debug::Alias(&debugAllocatedObjectSize); | |
429 size_t debugWrapperCount = Heap::heapStats().wrapperCount(); | |
430 base::debug::Alias(&debugWrapperCount); | |
431 | |
432 OwnPtr<Visitor> visitor = Visitor::create(state, gcType); | 426 OwnPtr<Visitor> visitor = Visitor::create(state, gcType); |
433 | 427 |
434 SafePointScope safePointScope(stackState, state); | 428 SafePointScope safePointScope(stackState, state); |
435 | 429 |
436 // Resume all parked threads upon leaving this scope. | 430 // Resume all parked threads upon leaving this scope. |
437 ParkThreadsScope parkThreadsScope; | 431 ParkThreadsScope parkThreadsScope; |
438 | 432 |
439 // Try to park the other threads. If we're unable to, bail out of the GC. | 433 // Try to park the other threads. If we're unable to, bail out of the GC. |
440 if (!parkThreadsScope.parkThreads(state)) | 434 if (!parkThreadsScope.parkThreads(state)) |
441 return; | 435 return; |
442 | 436 |
443 ScriptForbiddenIfMainThreadScope scriptForbidden; | 437 ScriptForbiddenIfMainThreadScope scriptForbidden; |
444 | 438 |
445 TRACE_EVENT2("blink_gc,devtools.timeline", "Heap::collectGarbage", | 439 TRACE_EVENT2("blink_gc,devtools.timeline", "BlinkGCMarking", |
446 "lazySweeping", gcType == BlinkGC::GCWithoutSweep, | 440 "lazySweeping", gcType == BlinkGC::GCWithoutSweep, |
447 "gcReason", gcReasonString(reason)); | 441 "gcReason", gcReasonString(reason)); |
448 TRACE_EVENT_SCOPED_SAMPLING_STATE("blink_gc", "BlinkGC"); | 442 TRACE_EVENT_SCOPED_SAMPLING_STATE("blink_gc", "BlinkGC"); |
449 double startTime = WTF::currentTimeMS(); | 443 double startTime = WTF::currentTimeMS(); |
450 | 444 |
451 if (gcType == BlinkGC::TakeSnapshot) | 445 if (gcType == BlinkGC::TakeSnapshot) |
452 BlinkGCMemoryDumpProvider::instance()->clearProcessDumpForCurrentGC(); | 446 BlinkGCMemoryDumpProvider::instance()->clearProcessDumpForCurrentGC(); |
453 | 447 |
454 // Disallow allocation during garbage collection (but not during the | 448 // Disallow allocation during garbage collection (but not during the |
455 // finalization that happens when the visitorScope is torn down). | 449 // finalization that happens when the visitorScope is torn down). |
456 ThreadState::NoAllocationScope noAllocationScope(state); | 450 ThreadState::NoAllocationScope noAllocationScope(state); |
457 | 451 |
458 preGC(); | 452 preGC(); |
459 | 453 |
460 StackFrameDepthScope stackDepthScope; | 454 StackFrameDepthScope stackDepthScope; |
461 | 455 |
462 size_t totalObjectSize = Heap::heapStats().allocatedObjectSize() + Heap::hea
pStats().markedObjectSize(); | 456 size_t totalObjectSize = ThreadHeap::heapStats().allocatedObjectSize() + Thr
eadHeap::heapStats().markedObjectSize(); |
463 if (gcType != BlinkGC::TakeSnapshot) | 457 if (gcType != BlinkGC::TakeSnapshot) |
464 Heap::resetHeapCounters(); | 458 ThreadHeap::resetHeapCounters(); |
465 | 459 |
466 // 1. Trace persistent roots. | 460 // 1. Trace persistent roots. |
467 ThreadState::visitPersistentRoots(visitor.get()); | 461 ThreadState::visitPersistentRoots(visitor.get()); |
468 | 462 |
469 // 2. Trace objects reachable from the stack. We do this independent of the | 463 // 2. Trace objects reachable from the stack. We do this independent of the |
470 // given stackState since other threads might have a different stack state. | 464 // given stackState since other threads might have a different stack state. |
471 ThreadState::visitStackRoots(visitor.get()); | 465 ThreadState::visitStackRoots(visitor.get()); |
472 | 466 |
473 // 3. Transitive closure to trace objects including ephemerons. | 467 // 3. Transitive closure to trace objects including ephemerons. |
474 processMarkingStack(visitor.get()); | 468 processMarkingStack(visitor.get()); |
475 | 469 |
476 postMarkingProcessing(visitor.get()); | 470 postMarkingProcessing(visitor.get()); |
477 globalWeakProcessing(visitor.get()); | 471 globalWeakProcessing(visitor.get()); |
478 | 472 |
479 // Now we can delete all orphaned pages because there are no dangling | 473 // Now we can delete all orphaned pages because there are no dangling |
480 // pointers to the orphaned pages. (If we have such dangling pointers, | 474 // pointers to the orphaned pages. (If we have such dangling pointers, |
481 // we should have crashed during marking before getting here.) | 475 // we should have crashed during marking before getting here.) |
482 getOrphanedPagePool()->decommitOrphanedPages(); | 476 getOrphanedPagePool()->decommitOrphanedPages(); |
483 | 477 |
484 double markingTimeInMilliseconds = WTF::currentTimeMS() - startTime; | 478 double markingTimeInMilliseconds = WTF::currentTimeMS() - startTime; |
485 Heap::heapStats().setEstimatedMarkingTimePerByte(totalObjectSize ? (markingT
imeInMilliseconds / 1000 / totalObjectSize) : 0); | 479 ThreadHeap::heapStats().setEstimatedMarkingTimePerByte(totalObjectSize ? (ma
rkingTimeInMilliseconds / 1000 / totalObjectSize) : 0); |
486 | 480 |
487 #if PRINT_HEAP_STATS | 481 #if PRINT_HEAP_STATS |
488 dataLogF("Heap::collectGarbage (gcReason=%s, lazySweeping=%d, time=%.1lfms)\
n", gcReasonString(reason), gcType == BlinkGC::GCWithoutSweep, markingTimeInMill
iseconds); | 482 dataLogF("ThreadHeap::collectGarbage (gcReason=%s, lazySweeping=%d, time=%.1
lfms)\n", gcReasonString(reason), gcType == BlinkGC::GCWithoutSweep, markingTime
InMilliseconds); |
489 #endif | 483 #endif |
490 | 484 |
491 DEFINE_THREAD_SAFE_STATIC_LOCAL(CustomCountHistogram, markingTimeHistogram,
new CustomCountHistogram("BlinkGC.CollectGarbage", 0, 10 * 1000, 50)); | 485 DEFINE_THREAD_SAFE_STATIC_LOCAL(CustomCountHistogram, markingTimeHistogram,
new CustomCountHistogram("BlinkGC.CollectGarbage", 0, 10 * 1000, 50)); |
492 markingTimeHistogram.count(markingTimeInMilliseconds); | 486 markingTimeHistogram.count(markingTimeInMilliseconds); |
493 DEFINE_THREAD_SAFE_STATIC_LOCAL(CustomCountHistogram, totalObjectSpaceHistog
ram, new CustomCountHistogram("BlinkGC.TotalObjectSpace", 0, 4 * 1024 * 1024, 50
)); | 487 DEFINE_THREAD_SAFE_STATIC_LOCAL(CustomCountHistogram, totalObjectSpaceHistog
ram, new CustomCountHistogram("BlinkGC.TotalObjectSpace", 0, 4 * 1024 * 1024, 50
)); |
494 totalObjectSpaceHistogram.count(ProcessHeap::totalAllocatedObjectSize() / 10
24); | 488 totalObjectSpaceHistogram.count(ProcessHeap::totalAllocatedObjectSize() / 10
24); |
495 DEFINE_THREAD_SAFE_STATIC_LOCAL(CustomCountHistogram, totalAllocatedSpaceHis
togram, new CustomCountHistogram("BlinkGC.TotalAllocatedSpace", 0, 4 * 1024 * 10
24, 50)); | 489 DEFINE_THREAD_SAFE_STATIC_LOCAL(CustomCountHistogram, totalAllocatedSpaceHis
togram, new CustomCountHistogram("BlinkGC.TotalAllocatedSpace", 0, 4 * 1024 * 10
24, 50)); |
496 totalAllocatedSpaceHistogram.count(ProcessHeap::totalAllocatedSpace() / 1024
); | 490 totalAllocatedSpaceHistogram.count(ProcessHeap::totalAllocatedSpace() / 1024
); |
497 DEFINE_THREAD_SAFE_STATIC_LOCAL(EnumerationHistogram, gcReasonHistogram, new
EnumerationHistogram("BlinkGC.GCReason", BlinkGC::NumberOfGCReason)); | 491 DEFINE_THREAD_SAFE_STATIC_LOCAL(EnumerationHistogram, gcReasonHistogram, new
EnumerationHistogram("BlinkGC.GCReason", BlinkGC::NumberOfGCReason)); |
498 gcReasonHistogram.count(reason); | 492 gcReasonHistogram.count(reason); |
499 | 493 |
500 s_lastGCReason = reason; | 494 s_lastGCReason = reason; |
501 | 495 |
502 Heap::reportMemoryUsageHistogram(); | 496 ThreadHeap::reportMemoryUsageHistogram(); |
503 WTF::Partitions::reportMemoryUsageHistogram(); | 497 WTF::Partitions::reportMemoryUsageHistogram(); |
504 | 498 |
505 postGC(gcType); | 499 postGC(gcType); |
506 Heap::decommitCallbackStacks(); | 500 ThreadHeap::decommitCallbackStacks(); |
507 } | 501 } |
508 | 502 |
509 void Heap::collectGarbageForTerminatingThread(ThreadState* state) | 503 void ThreadHeap::collectGarbageForTerminatingThread(ThreadState* state) |
510 { | 504 { |
511 { | 505 { |
512 // A thread-specific termination GC must not allow other global GCs to g
o | 506 // A thread-specific termination GC must not allow other global GCs to g
o |
513 // ahead while it is running, hence the termination GC does not enter a | 507 // ahead while it is running, hence the termination GC does not enter a |
514 // safepoint. VisitorScope will not enter also a safepoint scope for | 508 // safepoint. VisitorScope will not enter also a safepoint scope for |
515 // ThreadTerminationGC. | 509 // ThreadTerminationGC. |
516 OwnPtr<Visitor> visitor = Visitor::create(state, BlinkGC::ThreadTerminat
ionGC); | 510 OwnPtr<Visitor> visitor = Visitor::create(state, BlinkGC::ThreadTerminat
ionGC); |
517 | 511 |
518 ThreadState::NoAllocationScope noAllocationScope(state); | 512 ThreadState::NoAllocationScope noAllocationScope(state); |
519 | 513 |
(...skipping 12 matching lines...) Expand all Loading... |
532 state->visitPersistents(visitor.get()); | 526 state->visitPersistents(visitor.get()); |
533 | 527 |
534 // 2. Trace objects reachable from the thread's persistent roots | 528 // 2. Trace objects reachable from the thread's persistent roots |
535 // including ephemerons. | 529 // including ephemerons. |
536 processMarkingStack(visitor.get()); | 530 processMarkingStack(visitor.get()); |
537 | 531 |
538 postMarkingProcessing(visitor.get()); | 532 postMarkingProcessing(visitor.get()); |
539 globalWeakProcessing(visitor.get()); | 533 globalWeakProcessing(visitor.get()); |
540 | 534 |
541 state->postGC(BlinkGC::GCWithSweep); | 535 state->postGC(BlinkGC::GCWithSweep); |
542 Heap::decommitCallbackStacks(); | 536 ThreadHeap::decommitCallbackStacks(); |
543 } | 537 } |
544 state->preSweep(); | 538 state->preSweep(); |
545 } | 539 } |
546 | 540 |
547 void Heap::processMarkingStack(Visitor* visitor) | 541 void ThreadHeap::processMarkingStack(Visitor* visitor) |
548 { | 542 { |
549 // Ephemeron fixed point loop. | 543 // Ephemeron fixed point loop. |
550 do { | 544 do { |
551 { | 545 { |
552 // Iteratively mark all objects that are reachable from the objects | 546 // Iteratively mark all objects that are reachable from the objects |
553 // currently pushed onto the marking stack. | 547 // currently pushed onto the marking stack. |
554 TRACE_EVENT0("blink_gc", "Heap::processMarkingStackSingleThreaded"); | 548 TRACE_EVENT0("blink_gc", "ThreadHeap::processMarkingStackSingleThrea
ded"); |
555 while (popAndInvokeTraceCallback(visitor)) { } | 549 while (popAndInvokeTraceCallback(visitor)) { } |
556 } | 550 } |
557 | 551 |
558 { | 552 { |
559 // Mark any strong pointers that have now become reachable in | 553 // Mark any strong pointers that have now become reachable in |
560 // ephemeron maps. | 554 // ephemeron maps. |
561 TRACE_EVENT0("blink_gc", "Heap::processEphemeronStack"); | 555 TRACE_EVENT0("blink_gc", "ThreadHeap::processEphemeronStack"); |
562 s_ephemeronStack->invokeEphemeronCallbacks(visitor); | 556 s_ephemeronStack->invokeEphemeronCallbacks(visitor); |
563 } | 557 } |
564 | 558 |
565 // Rerun loop if ephemeron processing queued more objects for tracing. | 559 // Rerun loop if ephemeron processing queued more objects for tracing. |
566 } while (!s_markingStack->isEmpty()); | 560 } while (!s_markingStack->isEmpty()); |
567 } | 561 } |
568 | 562 |
569 void Heap::postMarkingProcessing(Visitor* visitor) | 563 void ThreadHeap::postMarkingProcessing(Visitor* visitor) |
570 { | 564 { |
571 TRACE_EVENT0("blink_gc", "Heap::postMarkingProcessing"); | 565 TRACE_EVENT0("blink_gc", "ThreadHeap::postMarkingProcessing"); |
572 // Call post-marking callbacks including: | 566 // Call post-marking callbacks including: |
573 // 1. the ephemeronIterationDone callbacks on weak tables to do cleanup | 567 // 1. the ephemeronIterationDone callbacks on weak tables to do cleanup |
574 // (specifically to clear the queued bits for weak hash tables), and | 568 // (specifically to clear the queued bits for weak hash tables), and |
575 // 2. the markNoTracing callbacks on collection backings to mark them | 569 // 2. the markNoTracing callbacks on collection backings to mark them |
576 // if they are only reachable from their front objects. | 570 // if they are only reachable from their front objects. |
577 while (popAndInvokePostMarkingCallback(visitor)) { } | 571 while (popAndInvokePostMarkingCallback(visitor)) { } |
578 | 572 |
579 // Post-marking callbacks should not trace any objects and | 573 // Post-marking callbacks should not trace any objects and |
580 // therefore the marking stack should be empty after the | 574 // therefore the marking stack should be empty after the |
581 // post-marking callbacks. | 575 // post-marking callbacks. |
582 ASSERT(s_markingStack->isEmpty()); | 576 ASSERT(s_markingStack->isEmpty()); |
583 } | 577 } |
584 | 578 |
585 void Heap::globalWeakProcessing(Visitor* visitor) | 579 void ThreadHeap::globalWeakProcessing(Visitor* visitor) |
586 { | 580 { |
587 TRACE_EVENT0("blink_gc", "Heap::globalWeakProcessing"); | 581 TRACE_EVENT0("blink_gc", "ThreadHeap::globalWeakProcessing"); |
588 double startTime = WTF::currentTimeMS(); | 582 double startTime = WTF::currentTimeMS(); |
589 | 583 |
590 // Call weak callbacks on objects that may now be pointing to dead objects. | 584 // Call weak callbacks on objects that may now be pointing to dead objects. |
591 while (popAndInvokeGlobalWeakCallback(visitor)) { } | 585 while (popAndInvokeGlobalWeakCallback(visitor)) { } |
592 | 586 |
593 // It is not permitted to trace pointers of live objects in the weak | 587 // It is not permitted to trace pointers of live objects in the weak |
594 // callback phase, so the marking stack should still be empty here. | 588 // callback phase, so the marking stack should still be empty here. |
595 ASSERT(s_markingStack->isEmpty()); | 589 ASSERT(s_markingStack->isEmpty()); |
596 | 590 |
597 double timeForGlobalWeakProcessing = WTF::currentTimeMS() - startTime; | 591 double timeForGlobalWeakProcessing = WTF::currentTimeMS() - startTime; |
598 DEFINE_THREAD_SAFE_STATIC_LOCAL(CustomCountHistogram, globalWeakTimeHistogra
m, new CustomCountHistogram("BlinkGC.TimeForGlobalWeakProcessing", 1, 10 * 1000,
50)); | 592 DEFINE_THREAD_SAFE_STATIC_LOCAL(CustomCountHistogram, globalWeakTimeHistogra
m, new CustomCountHistogram("BlinkGC.TimeForGlobalWeakProcessing", 1, 10 * 1000,
50)); |
599 globalWeakTimeHistogram.count(timeForGlobalWeakProcessing); | 593 globalWeakTimeHistogram.count(timeForGlobalWeakProcessing); |
600 } | 594 } |
601 | 595 |
602 void Heap::collectAllGarbage() | 596 void ThreadHeap::collectAllGarbage() |
603 { | 597 { |
604 // We need to run multiple GCs to collect a chain of persistent handles. | 598 // We need to run multiple GCs to collect a chain of persistent handles. |
605 size_t previousLiveObjects = 0; | 599 size_t previousLiveObjects = 0; |
606 for (int i = 0; i < 5; ++i) { | 600 for (int i = 0; i < 5; ++i) { |
607 collectGarbage(BlinkGC::NoHeapPointersOnStack, BlinkGC::GCWithSweep, Bli
nkGC::ForcedGC); | 601 collectGarbage(BlinkGC::NoHeapPointersOnStack, BlinkGC::GCWithSweep, Bli
nkGC::ForcedGC); |
608 size_t liveObjects = Heap::heapStats().markedObjectSize(); | 602 size_t liveObjects = ThreadHeap::heapStats().markedObjectSize(); |
609 if (liveObjects == previousLiveObjects) | 603 if (liveObjects == previousLiveObjects) |
610 break; | 604 break; |
611 previousLiveObjects = liveObjects; | 605 previousLiveObjects = liveObjects; |
612 } | 606 } |
613 } | 607 } |
614 | 608 |
615 void Heap::reportMemoryUsageHistogram() | 609 void ThreadHeap::reportMemoryUsageHistogram() |
616 { | 610 { |
617 static size_t supportedMaxSizeInMB = 4 * 1024; | 611 static size_t supportedMaxSizeInMB = 4 * 1024; |
618 static size_t observedMaxSizeInMB = 0; | 612 static size_t observedMaxSizeInMB = 0; |
619 | 613 |
620 // We only report the memory in the main thread. | 614 // We only report the memory in the main thread. |
621 if (!isMainThread()) | 615 if (!isMainThread()) |
622 return; | 616 return; |
623 // +1 is for rounding up the sizeInMB. | 617 // +1 is for rounding up the sizeInMB. |
624 size_t sizeInMB = Heap::heapStats().allocatedSpace() / 1024 / 1024 + 1; | 618 size_t sizeInMB = ThreadHeap::heapStats().allocatedSpace() / 1024 / 1024 + 1
; |
625 if (sizeInMB >= supportedMaxSizeInMB) | 619 if (sizeInMB >= supportedMaxSizeInMB) |
626 sizeInMB = supportedMaxSizeInMB - 1; | 620 sizeInMB = supportedMaxSizeInMB - 1; |
627 if (sizeInMB > observedMaxSizeInMB) { | 621 if (sizeInMB > observedMaxSizeInMB) { |
628 // Send a UseCounter only when we see the highest memory usage | 622 // Send a UseCounter only when we see the highest memory usage |
629 // we've ever seen. | 623 // we've ever seen. |
630 DEFINE_THREAD_SAFE_STATIC_LOCAL(EnumerationHistogram, commitedSizeHistog
ram, new EnumerationHistogram("BlinkGC.CommittedSize", supportedMaxSizeInMB)); | 624 DEFINE_THREAD_SAFE_STATIC_LOCAL(EnumerationHistogram, commitedSizeHistog
ram, new EnumerationHistogram("BlinkGC.CommittedSize", supportedMaxSizeInMB)); |
631 commitedSizeHistogram.count(sizeInMB); | 625 commitedSizeHistogram.count(sizeInMB); |
632 observedMaxSizeInMB = sizeInMB; | 626 observedMaxSizeInMB = sizeInMB; |
633 } | 627 } |
634 } | 628 } |
635 | 629 |
636 void Heap::reportMemoryUsageForTracing() | 630 void ThreadHeap::reportMemoryUsageForTracing() |
637 { | 631 { |
638 #if PRINT_HEAP_STATS | 632 #if PRINT_HEAP_STATS |
639 // dataLogF("allocatedSpace=%ldMB, allocatedObjectSize=%ldMB, markedObjectSi
ze=%ldMB, partitionAllocSize=%ldMB, wrapperCount=%ld, collectedWrapperCount=%ld\
n", Heap::allocatedSpace() / 1024 / 1024, Heap::allocatedObjectSize() / 1024 / 1
024, Heap::markedObjectSize() / 1024 / 1024, WTF::Partitions::totalSizeOfCommitt
edPages() / 1024 / 1024, Heap::wrapperCount(), Heap::collectedWrapperCount()); | 633 // dataLogF("allocatedSpace=%ldMB, allocatedObjectSize=%ldMB, markedObjectSi
ze=%ldMB, partitionAllocSize=%ldMB, wrapperCount=%ld, collectedWrapperCount=%ld\
n", ThreadHeap::allocatedSpace() / 1024 / 1024, ThreadHeap::allocatedObjectSize(
) / 1024 / 1024, ThreadHeap::markedObjectSize() / 1024 / 1024, WTF::Partitions::
totalSizeOfCommittedPages() / 1024 / 1024, ThreadHeap::wrapperCount(), ThreadHea
p::collectedWrapperCount()); |
640 #endif | 634 #endif |
641 | 635 |
642 bool gcTracingEnabled; | 636 bool gcTracingEnabled; |
643 TRACE_EVENT_CATEGORY_GROUP_ENABLED("blink_gc", &gcTracingEnabled); | 637 TRACE_EVENT_CATEGORY_GROUP_ENABLED("blink_gc", &gcTracingEnabled); |
644 if (!gcTracingEnabled) | 638 if (!gcTracingEnabled) |
645 return; | 639 return; |
646 | 640 |
647 // These values are divided by 1024 to avoid overflow in practical cases (TR
ACE_COUNTER values are 32-bit ints). | 641 // These values are divided by 1024 to avoid overflow in practical cases (TR
ACE_COUNTER values are 32-bit ints). |
648 // They are capped to INT_MAX just in case. | 642 // They are capped to INT_MAX just in case. |
649 TRACE_COUNTER1(TRACE_DISABLED_BY_DEFAULT("blink_gc"), "Heap::allocatedObject
SizeKB", std::min(Heap::heapStats().allocatedObjectSize() / 1024, static_cast<si
ze_t>(INT_MAX))); | 643 TRACE_COUNTER1(TRACE_DISABLED_BY_DEFAULT("blink_gc"), "ThreadHeap::allocated
ObjectSizeKB", std::min(ThreadHeap::heapStats().allocatedObjectSize() / 1024, st
atic_cast<size_t>(INT_MAX))); |
650 TRACE_COUNTER1(TRACE_DISABLED_BY_DEFAULT("blink_gc"), "Heap::markedObjectSiz
eKB", std::min(Heap::heapStats().markedObjectSize() / 1024, static_cast<size_t>(
INT_MAX))); | 644 TRACE_COUNTER1(TRACE_DISABLED_BY_DEFAULT("blink_gc"), "ThreadHeap::markedObj
ectSizeKB", std::min(ThreadHeap::heapStats().markedObjectSize() / 1024, static_c
ast<size_t>(INT_MAX))); |
651 TRACE_COUNTER1(TRACE_DISABLED_BY_DEFAULT("blink_gc"), "Heap::markedObjectSiz
eAtLastCompleteSweepKB", std::min(Heap::heapStats().markedObjectSizeAtLastComple
teSweep() / 1024, static_cast<size_t>(INT_MAX))); | 645 TRACE_COUNTER1(TRACE_DISABLED_BY_DEFAULT("blink_gc"), "ThreadHeap::markedObj
ectSizeAtLastCompleteSweepKB", std::min(ThreadHeap::heapStats().markedObjectSize
AtLastCompleteSweep() / 1024, static_cast<size_t>(INT_MAX))); |
652 TRACE_COUNTER1(TRACE_DISABLED_BY_DEFAULT("blink_gc"), "Heap::allocatedSpaceK
B", std::min(Heap::heapStats().allocatedSpace() / 1024, static_cast<size_t>(INT_
MAX))); | 646 TRACE_COUNTER1(TRACE_DISABLED_BY_DEFAULT("blink_gc"), "ThreadHeap::allocated
SpaceKB", std::min(ThreadHeap::heapStats().allocatedSpace() / 1024, static_cast<
size_t>(INT_MAX))); |
653 TRACE_COUNTER1(TRACE_DISABLED_BY_DEFAULT("blink_gc"), "Heap::objectSizeAtLas
tGCKB", std::min(Heap::heapStats().objectSizeAtLastGC() / 1024, static_cast<size
_t>(INT_MAX))); | 647 TRACE_COUNTER1(TRACE_DISABLED_BY_DEFAULT("blink_gc"), "ThreadHeap::objectSiz
eAtLastGCKB", std::min(ThreadHeap::heapStats().objectSizeAtLastGC() / 1024, stat
ic_cast<size_t>(INT_MAX))); |
654 TRACE_COUNTER1(TRACE_DISABLED_BY_DEFAULT("blink_gc"), "Heap::wrapperCount",
std::min(Heap::heapStats().wrapperCount(), static_cast<size_t>(INT_MAX))); | 648 TRACE_COUNTER1(TRACE_DISABLED_BY_DEFAULT("blink_gc"), "ThreadHeap::wrapperCo
unt", std::min(ThreadHeap::heapStats().wrapperCount(), static_cast<size_t>(INT_M
AX))); |
655 TRACE_COUNTER1(TRACE_DISABLED_BY_DEFAULT("blink_gc"), "Heap::heapStats().wra
pperCountAtLastGC", std::min(Heap::heapStats().wrapperCountAtLastGC(), static_ca
st<size_t>(INT_MAX))); | 649 TRACE_COUNTER1(TRACE_DISABLED_BY_DEFAULT("blink_gc"), "ThreadHeap::heapStats
().wrapperCountAtLastGC", std::min(ThreadHeap::heapStats().wrapperCountAtLastGC(
), static_cast<size_t>(INT_MAX))); |
656 TRACE_COUNTER1(TRACE_DISABLED_BY_DEFAULT("blink_gc"), "Heap::collectedWrappe
rCount", std::min(Heap::heapStats().collectedWrapperCount(), static_cast<size_t>
(INT_MAX))); | 650 TRACE_COUNTER1(TRACE_DISABLED_BY_DEFAULT("blink_gc"), "ThreadHeap::collected
WrapperCount", std::min(ThreadHeap::heapStats().collectedWrapperCount(), static_
cast<size_t>(INT_MAX))); |
657 TRACE_COUNTER1(TRACE_DISABLED_BY_DEFAULT("blink_gc"), "Heap::partitionAllocS
izeAtLastGCKB", std::min(Heap::heapStats().partitionAllocSizeAtLastGC() / 1024,
static_cast<size_t>(INT_MAX))); | 651 TRACE_COUNTER1(TRACE_DISABLED_BY_DEFAULT("blink_gc"), "ThreadHeap::partition
AllocSizeAtLastGCKB", std::min(ThreadHeap::heapStats().partitionAllocSizeAtLastG
C() / 1024, static_cast<size_t>(INT_MAX))); |
658 TRACE_COUNTER1(TRACE_DISABLED_BY_DEFAULT("blink_gc"), "Partitions::totalSize
OfCommittedPagesKB", std::min(WTF::Partitions::totalSizeOfCommittedPages() / 102
4, static_cast<size_t>(INT_MAX))); | 652 TRACE_COUNTER1(TRACE_DISABLED_BY_DEFAULT("blink_gc"), "Partitions::totalSize
OfCommittedPagesKB", std::min(WTF::Partitions::totalSizeOfCommittedPages() / 102
4, static_cast<size_t>(INT_MAX))); |
659 } | 653 } |
660 | 654 |
661 size_t Heap::objectPayloadSizeForTesting() | 655 size_t ThreadHeap::objectPayloadSizeForTesting() |
662 { | 656 { |
663 size_t objectPayloadSize = 0; | 657 size_t objectPayloadSize = 0; |
664 for (ThreadState* state : ThreadState::attachedThreads()) { | 658 for (ThreadState* state : ThreadState::attachedThreads()) { |
665 state->setGCState(ThreadState::GCRunning); | 659 state->setGCState(ThreadState::GCRunning); |
666 state->makeConsistentForGC(); | 660 state->makeConsistentForGC(); |
667 objectPayloadSize += state->objectPayloadSizeForTesting(); | 661 objectPayloadSize += state->objectPayloadSizeForTesting(); |
668 state->setGCState(ThreadState::EagerSweepScheduled); | 662 state->setGCState(ThreadState::EagerSweepScheduled); |
669 state->setGCState(ThreadState::Sweeping); | 663 state->setGCState(ThreadState::Sweeping); |
670 state->setGCState(ThreadState::NoGCScheduled); | 664 state->setGCState(ThreadState::NoGCScheduled); |
671 } | 665 } |
672 return objectPayloadSize; | 666 return objectPayloadSize; |
673 } | 667 } |
674 | 668 |
675 RegionTree* Heap::getRegionTree() | 669 RegionTree* ThreadHeap::getRegionTree() |
676 { | 670 { |
677 DEFINE_THREAD_SAFE_STATIC_LOCAL(RegionTree, tree, new RegionTree); | 671 DEFINE_THREAD_SAFE_STATIC_LOCAL(RegionTree, tree, new RegionTree); |
678 return &tree; | 672 return &tree; |
679 } | 673 } |
680 | 674 |
681 BasePage* Heap::lookup(Address address) | 675 BasePage* ThreadHeap::lookup(Address address) |
682 { | 676 { |
683 ASSERT(ThreadState::current()->isInGC()); | 677 ASSERT(ThreadState::current()->isInGC()); |
684 if (PageMemoryRegion* region = Heap::getRegionTree()->lookup(address)) { | 678 if (PageMemoryRegion* region = ThreadHeap::getRegionTree()->lookup(address))
{ |
685 BasePage* page = region->pageFromAddress(address); | 679 BasePage* page = region->pageFromAddress(address); |
686 return page && !page->orphaned() ? page : nullptr; | 680 return page && !page->orphaned() ? page : nullptr; |
687 } | 681 } |
688 return nullptr; | 682 return nullptr; |
689 } | 683 } |
690 | 684 |
691 void Heap::resetHeapCounters() | 685 void ThreadHeap::resetHeapCounters() |
692 { | 686 { |
693 ASSERT(ThreadState::current()->isInGC()); | 687 ASSERT(ThreadState::current()->isInGC()); |
694 | 688 |
695 Heap::reportMemoryUsageForTracing(); | 689 ThreadHeap::reportMemoryUsageForTracing(); |
696 | 690 |
697 ProcessHeap::resetHeapCounters(); | 691 ProcessHeap::resetHeapCounters(); |
698 Heap::heapStats().reset(); | 692 ThreadHeap::heapStats().reset(); |
699 for (ThreadState* state : ThreadState::attachedThreads()) | 693 for (ThreadState* state : ThreadState::attachedThreads()) |
700 state->resetHeapCounters(); | 694 state->resetHeapCounters(); |
701 } | 695 } |
702 | 696 |
703 // TODO(keishi): Make this a member of ThreadHeap. | 697 ThreadHeapStats& ThreadHeap::heapStats() |
704 ThreadHeapStats& Heap::heapStats() | |
705 { | 698 { |
706 DEFINE_THREAD_SAFE_STATIC_LOCAL(ThreadHeapStats, stats, new ThreadHeapStats(
)); | 699 DEFINE_THREAD_SAFE_STATIC_LOCAL(ThreadHeapStats, stats, new ThreadHeapStats(
)); |
707 return stats; | 700 return stats; |
708 } | 701 } |
709 | 702 |
710 CallbackStack* Heap::s_markingStack; | 703 CallbackStack* ThreadHeap::s_markingStack; |
711 CallbackStack* Heap::s_postMarkingCallbackStack; | 704 CallbackStack* ThreadHeap::s_postMarkingCallbackStack; |
712 CallbackStack* Heap::s_globalWeakCallbackStack; | 705 CallbackStack* ThreadHeap::s_globalWeakCallbackStack; |
713 CallbackStack* Heap::s_ephemeronStack; | 706 CallbackStack* ThreadHeap::s_ephemeronStack; |
714 HeapDoesNotContainCache* Heap::s_heapDoesNotContainCache; | 707 HeapDoesNotContainCache* ThreadHeap::s_heapDoesNotContainCache; |
715 FreePagePool* Heap::s_freePagePool; | 708 FreePagePool* ThreadHeap::s_freePagePool; |
716 OrphanedPagePool* Heap::s_orphanedPagePool; | 709 OrphanedPagePool* ThreadHeap::s_orphanedPagePool; |
717 | 710 |
718 BlinkGC::GCReason Heap::s_lastGCReason = BlinkGC::NumberOfGCReason; | 711 BlinkGC::GCReason ThreadHeap::s_lastGCReason = BlinkGC::NumberOfGCReason; |
719 | 712 |
720 } // namespace blink | 713 } // namespace blink |
OLD | NEW |