Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(29)

Side by Side Diff: third_party/WebKit/Source/platform/heap/Heap.cpp

Issue 1892713003: Prepare for multiple ThreadHeaps (Closed) Base URL: https://chromium.googlesource.com/chromium/src.git@master
Patch Set: Rebase Created 4 years, 7 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
OLDNEW
1 /* 1 /*
2 * Copyright (C) 2013 Google Inc. All rights reserved. 2 * Copyright (C) 2013 Google Inc. All rights reserved.
3 * 3 *
4 * Redistribution and use in source and binary forms, with or without 4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions are 5 * modification, are permitted provided that the following conditions are
6 * met: 6 * met:
7 * 7 *
8 * * Redistributions of source code must retain the above copyright 8 * * Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer. 9 * notice, this list of conditions and the following disclaimer.
10 * * Redistributions in binary form must reproduce the above 10 * * Redistributions in binary form must reproduce the above
(...skipping 40 matching lines...) Expand 10 before | Expand all | Expand 10 after
51 #include "wtf/allocator/Partitions.h" 51 #include "wtf/allocator/Partitions.h"
52 52
53 namespace blink { 53 namespace blink {
54 54
55 HeapAllocHooks::AllocationHook* HeapAllocHooks::m_allocationHook = nullptr; 55 HeapAllocHooks::AllocationHook* HeapAllocHooks::m_allocationHook = nullptr;
56 HeapAllocHooks::FreeHook* HeapAllocHooks::m_freeHook = nullptr; 56 HeapAllocHooks::FreeHook* HeapAllocHooks::m_freeHook = nullptr;
57 57
58 class ParkThreadsScope final { 58 class ParkThreadsScope final {
59 STACK_ALLOCATED(); 59 STACK_ALLOCATED();
60 public: 60 public:
61 ParkThreadsScope() 61 explicit ParkThreadsScope(ThreadState* state)
62 : m_shouldResumeThreads(false) 62 : m_state(state)
63 , m_shouldResumeThreads(false)
63 { 64 {
64 } 65 }
65 66
66 bool parkThreads(ThreadState* state) 67 bool parkThreads()
67 { 68 {
68 TRACE_EVENT0("blink_gc", "ThreadHeap::ParkThreadsScope"); 69 TRACE_EVENT0("blink_gc", "ThreadHeap::ParkThreadsScope");
69 const char* samplingState = TRACE_EVENT_GET_SAMPLING_STATE(); 70 const char* samplingState = TRACE_EVENT_GET_SAMPLING_STATE();
70 if (state->isMainThread()) 71 if (m_state->isMainThread())
71 TRACE_EVENT_SET_SAMPLING_STATE("blink_gc", "BlinkGCWaiting"); 72 TRACE_EVENT_SET_SAMPLING_STATE("blink_gc", "BlinkGCWaiting");
72 73
73 // TODO(haraken): In an unlikely coincidence that two threads decide 74 // TODO(haraken): In an unlikely coincidence that two threads decide
74 // to collect garbage at the same time, avoid doing two GCs in 75 // to collect garbage at the same time, avoid doing two GCs in
75 // a row and return false. 76 // a row and return false.
76 double startTime = WTF::currentTimeMS(); 77 double startTime = WTF::currentTimeMS();
77 78
78 m_shouldResumeThreads = ThreadState::stopThreads(); 79 m_shouldResumeThreads = m_state->heap().park();
79 80
80 double timeForStoppingThreads = WTF::currentTimeMS() - startTime; 81 double timeForStoppingThreads = WTF::currentTimeMS() - startTime;
81 DEFINE_THREAD_SAFE_STATIC_LOCAL(CustomCountHistogram, timeToStopThreadsH istogram, new CustomCountHistogram("BlinkGC.TimeForStoppingThreads", 1, 1000, 50 )); 82 DEFINE_THREAD_SAFE_STATIC_LOCAL(CustomCountHistogram, timeToStopThreadsH istogram, new CustomCountHistogram("BlinkGC.TimeForStoppingThreads", 1, 1000, 50 ));
82 timeToStopThreadsHistogram.count(timeForStoppingThreads); 83 timeToStopThreadsHistogram.count(timeForStoppingThreads);
83 84
84 if (state->isMainThread()) 85 if (m_state->isMainThread())
85 TRACE_EVENT_SET_NONCONST_SAMPLING_STATE(samplingState); 86 TRACE_EVENT_SET_NONCONST_SAMPLING_STATE(samplingState);
86 return m_shouldResumeThreads; 87 return m_shouldResumeThreads;
87 } 88 }
88 89
89 ~ParkThreadsScope() 90 ~ParkThreadsScope()
90 { 91 {
91 // Only cleanup if we parked all threads in which case the GC happened 92 // Only cleanup if we parked all threads in which case the GC happened
92 // and we need to resume the other threads. 93 // and we need to resume the other threads.
93 if (m_shouldResumeThreads) 94 if (m_shouldResumeThreads)
94 ThreadState::resumeThreads(); 95 m_state->heap().resume();
95 } 96 }
96 97
97 private: 98 private:
99 ThreadState* m_state;
98 bool m_shouldResumeThreads; 100 bool m_shouldResumeThreads;
99 }; 101 };
100 102
101 void ThreadHeap::flushHeapDoesNotContainCache() 103 void ThreadHeap::flushHeapDoesNotContainCache()
102 { 104 {
103 s_heapDoesNotContainCache->flush(); 105 m_heapDoesNotContainCache->flush();
104 } 106 }
105 107
106 void ProcessHeap::init() 108 void ProcessHeap::init()
107 { 109 {
110 s_shutdownComplete = false;
108 s_totalAllocatedSpace = 0; 111 s_totalAllocatedSpace = 0;
109 s_totalAllocatedObjectSize = 0; 112 s_totalAllocatedObjectSize = 0;
110 s_totalMarkedObjectSize = 0; 113 s_totalMarkedObjectSize = 0;
111 s_isLowEndDevice = base::SysInfo::IsLowEndDevice(); 114 s_isLowEndDevice = base::SysInfo::IsLowEndDevice();
115
116 GCInfoTable::init();
117
118 if (Platform::current() && Platform::current()->currentThread())
119 Platform::current()->registerMemoryDumpProvider(BlinkGCMemoryDumpProvide r::instance(), "BlinkGC");
112 } 120 }
113 121
114 void ProcessHeap::resetHeapCounters() 122 void ProcessHeap::resetHeapCounters()
115 { 123 {
116 s_totalAllocatedObjectSize = 0; 124 s_totalAllocatedObjectSize = 0;
117 s_totalMarkedObjectSize = 0; 125 s_totalMarkedObjectSize = 0;
118 } 126 }
119 127
120 void ThreadHeap::init() 128 void ProcessHeap::shutdown()
121 { 129 {
122 ThreadState::init(); 130 ASSERT(!s_shutdownComplete);
123 ProcessHeap::init();
124 s_markingStack = new CallbackStack();
125 s_postMarkingCallbackStack = new CallbackStack();
126 s_globalWeakCallbackStack = new CallbackStack();
127 // Use smallest supported block size for ephemerons.
128 s_ephemeronStack = new CallbackStack(CallbackStack::kMinimalBlockSize);
129 s_heapDoesNotContainCache = new HeapDoesNotContainCache();
130 s_freePagePool = new FreePagePool();
131 s_orphanedPagePool = new OrphanedPagePool();
132 s_lastGCReason = BlinkGC::NumberOfGCReason;
133
134 GCInfoTable::init();
135
136 if (Platform::current() && Platform::current()->currentThread())
137 Platform::current()->registerMemoryDumpProvider(BlinkGCMemoryDumpProvide r::instance(), "BlinkGC");
138 }
139
140 void ThreadHeap::shutdown()
141 {
142 ASSERT(s_markingStack);
143 131
144 if (Platform::current() && Platform::current()->currentThread()) 132 if (Platform::current() && Platform::current()->currentThread())
145 Platform::current()->unregisterMemoryDumpProvider(BlinkGCMemoryDumpProvi der::instance()); 133 Platform::current()->unregisterMemoryDumpProvider(BlinkGCMemoryDumpProvi der::instance());
146 134
147 // The main thread must be the last thread that gets detached. 135 {
148 RELEASE_ASSERT(ThreadState::attachedThreads().size() == 0); 136 // The main thread must be the last thread that gets detached.
137 MutexLocker locker(ThreadHeap::allHeapsMutex());
138 RELEASE_ASSERT(ThreadHeap::allHeaps().isEmpty());
139 }
149 140
150 delete s_heapDoesNotContainCache;
151 s_heapDoesNotContainCache = nullptr;
152 delete s_freePagePool;
153 s_freePagePool = nullptr;
154 delete s_orphanedPagePool;
155 s_orphanedPagePool = nullptr;
156 delete s_globalWeakCallbackStack;
157 s_globalWeakCallbackStack = nullptr;
158 delete s_postMarkingCallbackStack;
159 s_postMarkingCallbackStack = nullptr;
160 delete s_markingStack;
161 s_markingStack = nullptr;
162 delete s_ephemeronStack;
163 s_ephemeronStack = nullptr;
164 GCInfoTable::shutdown(); 141 GCInfoTable::shutdown();
165 ThreadState::shutdown(); 142 ASSERT(ProcessHeap::totalAllocatedSpace() == 0);
166 ASSERT(ThreadHeap::heapStats().allocatedSpace() == 0); 143 s_shutdownComplete = true;
167 } 144 }
168 145
169 CrossThreadPersistentRegion& ProcessHeap::crossThreadPersistentRegion() 146 CrossThreadPersistentRegion& ProcessHeap::crossThreadPersistentRegion()
170 { 147 {
171 DEFINE_THREAD_SAFE_STATIC_LOCAL(CrossThreadPersistentRegion, persistentRegio n, new CrossThreadPersistentRegion()); 148 DEFINE_THREAD_SAFE_STATIC_LOCAL(CrossThreadPersistentRegion, persistentRegio n, new CrossThreadPersistentRegion());
172 return persistentRegion; 149 return persistentRegion;
173 } 150 }
174 151
152 bool ProcessHeap::s_shutdownComplete = false;
175 bool ProcessHeap::s_isLowEndDevice = false; 153 bool ProcessHeap::s_isLowEndDevice = false;
176 size_t ProcessHeap::s_totalAllocatedSpace = 0; 154 size_t ProcessHeap::s_totalAllocatedSpace = 0;
177 size_t ProcessHeap::s_totalAllocatedObjectSize = 0; 155 size_t ProcessHeap::s_totalAllocatedObjectSize = 0;
178 size_t ProcessHeap::s_totalMarkedObjectSize = 0; 156 size_t ProcessHeap::s_totalMarkedObjectSize = 0;
179 157
180 ThreadHeapStats::ThreadHeapStats() 158 ThreadHeapStats::ThreadHeapStats()
181 : m_allocatedSpace(0) 159 : m_allocatedSpace(0)
182 , m_allocatedObjectSize(0) 160 , m_allocatedObjectSize(0)
183 , m_objectSizeAtLastGC(0) 161 , m_objectSizeAtLastGC(0)
184 , m_markedObjectSize(0) 162 , m_markedObjectSize(0)
(...skipping 52 matching lines...) Expand 10 before | Expand all | Expand 10 after
237 atomicAdd(&m_allocatedSpace, static_cast<long>(delta)); 215 atomicAdd(&m_allocatedSpace, static_cast<long>(delta));
238 ProcessHeap::increaseTotalAllocatedSpace(delta); 216 ProcessHeap::increaseTotalAllocatedSpace(delta);
239 } 217 }
240 218
241 void ThreadHeapStats::decreaseAllocatedSpace(size_t delta) 219 void ThreadHeapStats::decreaseAllocatedSpace(size_t delta)
242 { 220 {
243 atomicSubtract(&m_allocatedSpace, static_cast<long>(delta)); 221 atomicSubtract(&m_allocatedSpace, static_cast<long>(delta));
244 ProcessHeap::decreaseTotalAllocatedSpace(delta); 222 ProcessHeap::decreaseTotalAllocatedSpace(delta);
245 } 223 }
246 224
225 ThreadHeap::ThreadHeap()
226 : m_regionTree(adoptPtr(new RegionTree()))
227 , m_heapDoesNotContainCache(adoptPtr(new HeapDoesNotContainCache))
228 , m_safePointBarrier(adoptPtr(new SafePointBarrier()))
229 , m_freePagePool(adoptPtr(new FreePagePool))
230 , m_orphanedPagePool(adoptPtr(new OrphanedPagePool))
231 , m_markingStack(adoptPtr(new CallbackStack()))
232 , m_postMarkingCallbackStack(adoptPtr(new CallbackStack()))
233 , m_globalWeakCallbackStack(adoptPtr(new CallbackStack()))
234 , m_ephemeronStack(adoptPtr(new CallbackStack(CallbackStack::kMinimalBlockSi ze)))
235 {
236 if (ThreadState::current()->isMainThread())
237 s_mainThreadHeap = this;
238
239 MutexLocker locker(ThreadHeap::allHeapsMutex());
240 allHeaps().add(this);
241 }
242
243 ThreadHeap::~ThreadHeap()
244 {
245 MutexLocker locker(ThreadHeap::allHeapsMutex());
246 allHeaps().remove(this);
247 }
248
249 RecursiveMutex& ThreadHeap::allHeapsMutex()
250 {
251 DEFINE_THREAD_SAFE_STATIC_LOCAL(RecursiveMutex, mutex, (new RecursiveMutex)) ;
252 return mutex;
253 }
254
255 HashSet<ThreadHeap*>& ThreadHeap::allHeaps()
256 {
257 DEFINE_STATIC_LOCAL(HashSet<ThreadHeap*>, heaps, ());
258 return heaps;
259 }
260
261 void ThreadHeap::attach(ThreadState* thread)
262 {
263 MutexLocker locker(m_threadAttachMutex);
264 m_threads.add(thread);
265 }
266
267 void ThreadHeap::detach(ThreadState* thread)
268 {
269 ASSERT(ThreadState::current() == thread);
270 {
271 // Grab the threadAttachMutex to ensure only one thread can shutdown at
272 // a time and that no other thread can do a global GC. It also allows
273 // safe iteration of the m_threads set which happens as part of
274 // thread local GC asserts. We enter a safepoint while waiting for the
275 // lock to avoid a dead-lock where another thread has already requested
276 // GC.
277 SafePointAwareMutexLocker locker(m_threadAttachMutex, BlinkGC::NoHeapPoi ntersOnStack);
278 thread->runTerminationGC();
279 ASSERT(m_threads.contains(thread));
280 m_threads.remove(thread);
281 }
282 // The main thread must be the last thread that gets detached.
283 ASSERT(!thread->isMainThread() || m_threads.isEmpty());
284 if (thread->isMainThread()) {
285 ASSERT(heapStats().allocatedSpace() == 0);
286 delete this;
287 }
288 }
289
290 bool ThreadHeap::park()
291 {
292 return m_safePointBarrier->parkOthers();
293 }
294
295 void ThreadHeap::resume()
296 {
297 m_safePointBarrier->resumeOthers();
298 }
299
247 #if ENABLE(ASSERT) 300 #if ENABLE(ASSERT)
248 BasePage* ThreadHeap::findPageFromAddress(Address address) 301 BasePage* ThreadHeap::findPageFromAddress(Address address)
249 { 302 {
250 MutexLocker lock(ThreadState::threadAttachMutex()); 303 MutexLocker locker(m_threadAttachMutex);
251 for (ThreadState* state : ThreadState::attachedThreads()) { 304 for (ThreadState* state : m_threads) {
252 if (BasePage* page = state->findPageFromAddress(address)) 305 if (BasePage* page = state->findPageFromAddress(address))
253 return page; 306 return page;
254 } 307 }
255 return nullptr; 308 return nullptr;
256 } 309 }
310
311 bool ThreadHeap::isAtSafePoint()
312 {
313 MutexLocker locker(m_threadAttachMutex);
314 for (ThreadState* state : m_threads) {
315 if (!state->isAtSafePoint())
316 return false;
317 }
318 return true;
319 }
257 #endif 320 #endif
258 321
259 Address ThreadHeap::checkAndMarkPointer(Visitor* visitor, Address address) 322 Address ThreadHeap::checkAndMarkPointer(Visitor* visitor, Address address)
260 { 323 {
261 ASSERT(ThreadState::current()->isInGC()); 324 ASSERT(ThreadState::current()->isInGC());
262 325
263 #if !ENABLE(ASSERT) 326 #if !ENABLE(ASSERT)
264 if (s_heapDoesNotContainCache->lookup(address)) 327 if (m_heapDoesNotContainCache->lookup(address))
265 return nullptr; 328 return nullptr;
266 #endif 329 #endif
267 330
268 if (BasePage* page = lookup(address)) { 331 if (BasePage* page = lookupPageForAddress(address)) {
269 ASSERT(page->contains(address)); 332 ASSERT(page->contains(address));
270 ASSERT(!page->orphaned()); 333 ASSERT(!page->orphaned());
271 ASSERT(!s_heapDoesNotContainCache->lookup(address)); 334 ASSERT(!m_heapDoesNotContainCache->lookup(address));
272 page->checkAndMarkPointer(visitor, address); 335 page->checkAndMarkPointer(visitor, address);
273 return address; 336 return address;
274 } 337 }
275 338
276 #if !ENABLE(ASSERT) 339 #if !ENABLE(ASSERT)
277 s_heapDoesNotContainCache->addEntry(address); 340 m_heapDoesNotContainCache->addEntry(address);
278 #else 341 #else
279 if (!s_heapDoesNotContainCache->lookup(address)) 342 if (!m_heapDoesNotContainCache->lookup(address))
280 s_heapDoesNotContainCache->addEntry(address); 343 m_heapDoesNotContainCache->addEntry(address);
281 #endif 344 #endif
282 return nullptr; 345 return nullptr;
283 } 346 }
284 347
285 void ThreadHeap::pushTraceCallback(void* object, TraceCallback callback) 348 void ThreadHeap::pushTraceCallback(void* object, TraceCallback callback)
286 { 349 {
287 ASSERT(ThreadState::current()->isInGC()); 350 ASSERT(ThreadState::current()->isInGC());
288 351
289 // Trace should never reach an orphaned page. 352 // Trace should never reach an orphaned page.
290 ASSERT(!ThreadHeap::getOrphanedPagePool()->contains(object)); 353 ASSERT(!getOrphanedPagePool()->contains(object));
291 CallbackStack::Item* slot = s_markingStack->allocateEntry(); 354 CallbackStack::Item* slot = m_markingStack->allocateEntry();
292 *slot = CallbackStack::Item(object, callback); 355 *slot = CallbackStack::Item(object, callback);
293 } 356 }
294 357
295 bool ThreadHeap::popAndInvokeTraceCallback(Visitor* visitor) 358 bool ThreadHeap::popAndInvokeTraceCallback(Visitor* visitor)
296 { 359 {
297 CallbackStack::Item* item = s_markingStack->pop(); 360 CallbackStack::Item* item = m_markingStack->pop();
298 if (!item) 361 if (!item)
299 return false; 362 return false;
300 item->call(visitor); 363 item->call(visitor);
301 return true; 364 return true;
302 } 365 }
303 366
304 void ThreadHeap::pushPostMarkingCallback(void* object, TraceCallback callback) 367 void ThreadHeap::pushPostMarkingCallback(void* object, TraceCallback callback)
305 { 368 {
306 ASSERT(ThreadState::current()->isInGC()); 369 ASSERT(ThreadState::current()->isInGC());
307 370
308 // Trace should never reach an orphaned page. 371 // Trace should never reach an orphaned page.
309 ASSERT(!ThreadHeap::getOrphanedPagePool()->contains(object)); 372 ASSERT(!getOrphanedPagePool()->contains(object));
310 CallbackStack::Item* slot = s_postMarkingCallbackStack->allocateEntry(); 373 CallbackStack::Item* slot = m_postMarkingCallbackStack->allocateEntry();
311 *slot = CallbackStack::Item(object, callback); 374 *slot = CallbackStack::Item(object, callback);
312 } 375 }
313 376
314 bool ThreadHeap::popAndInvokePostMarkingCallback(Visitor* visitor) 377 bool ThreadHeap::popAndInvokePostMarkingCallback(Visitor* visitor)
315 { 378 {
316 if (CallbackStack::Item* item = s_postMarkingCallbackStack->pop()) { 379 if (CallbackStack::Item* item = m_postMarkingCallbackStack->pop()) {
317 item->call(visitor); 380 item->call(visitor);
318 return true; 381 return true;
319 } 382 }
320 return false; 383 return false;
321 } 384 }
322 385
323 void ThreadHeap::pushGlobalWeakCallback(void** cell, WeakCallback callback) 386 void ThreadHeap::pushGlobalWeakCallback(void** cell, WeakCallback callback)
324 { 387 {
325 ASSERT(ThreadState::current()->isInGC()); 388 ASSERT(ThreadState::current()->isInGC());
326 389
327 // Trace should never reach an orphaned page. 390 // Trace should never reach an orphaned page.
328 ASSERT(!ThreadHeap::getOrphanedPagePool()->contains(cell)); 391 ASSERT(!getOrphanedPagePool()->contains(cell));
329 CallbackStack::Item* slot = s_globalWeakCallbackStack->allocateEntry(); 392 CallbackStack::Item* slot = m_globalWeakCallbackStack->allocateEntry();
330 *slot = CallbackStack::Item(cell, callback); 393 *slot = CallbackStack::Item(cell, callback);
331 } 394 }
332 395
333 void ThreadHeap::pushThreadLocalWeakCallback(void* closure, void* object, WeakCa llback callback) 396 void ThreadHeap::pushThreadLocalWeakCallback(void* closure, void* object, WeakCa llback callback)
334 { 397 {
335 ASSERT(ThreadState::current()->isInGC()); 398 ASSERT(ThreadState::current()->isInGC());
336 399
337 // Trace should never reach an orphaned page. 400 // Trace should never reach an orphaned page.
338 ASSERT(!ThreadHeap::getOrphanedPagePool()->contains(object)); 401 ASSERT(!getOrphanedPagePool()->contains(object));
339 ThreadState* state = pageFromObject(object)->arena()->getThreadState(); 402 ThreadState* state = pageFromObject(object)->arena()->getThreadState();
340 state->pushThreadLocalWeakCallback(closure, callback); 403 state->pushThreadLocalWeakCallback(closure, callback);
341 } 404 }
342 405
343 bool ThreadHeap::popAndInvokeGlobalWeakCallback(Visitor* visitor) 406 bool ThreadHeap::popAndInvokeGlobalWeakCallback(Visitor* visitor)
344 { 407 {
345 if (CallbackStack::Item* item = s_globalWeakCallbackStack->pop()) { 408 if (CallbackStack::Item* item = m_globalWeakCallbackStack->pop()) {
346 item->call(visitor); 409 item->call(visitor);
347 return true; 410 return true;
348 } 411 }
349 return false; 412 return false;
350 } 413 }
351 414
352 void ThreadHeap::registerWeakTable(void* table, EphemeronCallback iterationCallb ack, EphemeronCallback iterationDoneCallback) 415 void ThreadHeap::registerWeakTable(void* table, EphemeronCallback iterationCallb ack, EphemeronCallback iterationDoneCallback)
353 { 416 {
354 ASSERT(ThreadState::current()->isInGC()); 417 ASSERT(ThreadState::current()->isInGC());
355 418
356 // Trace should never reach an orphaned page. 419 // Trace should never reach an orphaned page.
357 ASSERT(!ThreadHeap::getOrphanedPagePool()->contains(table)); 420 ASSERT(!getOrphanedPagePool()->contains(table));
358 CallbackStack::Item* slot = s_ephemeronStack->allocateEntry(); 421 CallbackStack::Item* slot = m_ephemeronStack->allocateEntry();
359 *slot = CallbackStack::Item(table, iterationCallback); 422 *slot = CallbackStack::Item(table, iterationCallback);
360 423
361 // Register a post-marking callback to tell the tables that 424 // Register a post-marking callback to tell the tables that
362 // ephemeron iteration is complete. 425 // ephemeron iteration is complete.
363 pushPostMarkingCallback(table, iterationDoneCallback); 426 pushPostMarkingCallback(table, iterationDoneCallback);
364 } 427 }
365 428
366 #if ENABLE(ASSERT) 429 #if ENABLE(ASSERT)
367 bool ThreadHeap::weakTableRegistered(const void* table) 430 bool ThreadHeap::weakTableRegistered(const void* table)
368 { 431 {
369 ASSERT(s_ephemeronStack); 432 ASSERT(m_ephemeronStack);
370 return s_ephemeronStack->hasCallbackForObject(table); 433 return m_ephemeronStack->hasCallbackForObject(table);
371 } 434 }
372 #endif 435 #endif
373 436
374 void ThreadHeap::decommitCallbackStacks() 437 void ThreadHeap::decommitCallbackStacks()
375 { 438 {
376 s_markingStack->decommit(); 439 m_markingStack->decommit();
377 s_postMarkingCallbackStack->decommit(); 440 m_postMarkingCallbackStack->decommit();
378 s_globalWeakCallbackStack->decommit(); 441 m_globalWeakCallbackStack->decommit();
379 s_ephemeronStack->decommit(); 442 m_ephemeronStack->decommit();
380 } 443 }
381 444
382 void ThreadHeap::preGC() 445 void ThreadHeap::preGC()
383 { 446 {
384 ASSERT(!ThreadState::current()->isInGC()); 447 ASSERT(!ThreadState::current()->isInGC());
385 for (ThreadState* state : ThreadState::attachedThreads()) 448 for (ThreadState* state : m_threads) {
386 state->preGC(); 449 state->preGC();
450 }
387 } 451 }
388 452
389 void ThreadHeap::postGC(BlinkGC::GCType gcType) 453 void ThreadHeap::postGC(BlinkGC::GCType gcType)
390 { 454 {
391 ASSERT(ThreadState::current()->isInGC()); 455 ASSERT(ThreadState::current()->isInGC());
392 for (ThreadState* state : ThreadState::attachedThreads()) 456 for (ThreadState* state : m_threads) {
393 state->postGC(gcType); 457 state->postGC(gcType);
458 }
394 } 459 }
395 460
396 const char* ThreadHeap::gcReasonString(BlinkGC::GCReason reason) 461 const char* ThreadHeap::gcReasonString(BlinkGC::GCReason reason)
397 { 462 {
398 switch (reason) { 463 switch (reason) {
399 case BlinkGC::IdleGC: 464 case BlinkGC::IdleGC:
400 return "IdleGC"; 465 return "IdleGC";
401 case BlinkGC::PreciseGC: 466 case BlinkGC::PreciseGC:
402 return "PreciseGC"; 467 return "PreciseGC";
403 case BlinkGC::ConservativeGC: 468 case BlinkGC::ConservativeGC:
(...skipping 17 matching lines...) Expand all
421 ThreadState* state = ThreadState::current(); 486 ThreadState* state = ThreadState::current();
422 // Nested collectGarbage() invocations aren't supported. 487 // Nested collectGarbage() invocations aren't supported.
423 RELEASE_ASSERT(!state->isGCForbidden()); 488 RELEASE_ASSERT(!state->isGCForbidden());
424 state->completeSweep(); 489 state->completeSweep();
425 490
426 OwnPtr<Visitor> visitor = Visitor::create(state, gcType); 491 OwnPtr<Visitor> visitor = Visitor::create(state, gcType);
427 492
428 SafePointScope safePointScope(stackState, state); 493 SafePointScope safePointScope(stackState, state);
429 494
430 // Resume all parked threads upon leaving this scope. 495 // Resume all parked threads upon leaving this scope.
431 ParkThreadsScope parkThreadsScope; 496 ParkThreadsScope parkThreadsScope(state);
432 497
433 // Try to park the other threads. If we're unable to, bail out of the GC. 498 // Try to park the other threads. If we're unable to, bail out of the GC.
434 if (!parkThreadsScope.parkThreads(state)) 499 if (!parkThreadsScope.parkThreads())
435 return; 500 return;
436 501
437 ScriptForbiddenIfMainThreadScope scriptForbidden; 502 ScriptForbiddenIfMainThreadScope scriptForbidden;
438 503
439 TRACE_EVENT2("blink_gc,devtools.timeline", "BlinkGCMarking", 504 TRACE_EVENT2("blink_gc,devtools.timeline", "BlinkGCMarking",
440 "lazySweeping", gcType == BlinkGC::GCWithoutSweep, 505 "lazySweeping", gcType == BlinkGC::GCWithoutSweep,
441 "gcReason", gcReasonString(reason)); 506 "gcReason", gcReasonString(reason));
442 TRACE_EVENT_SCOPED_SAMPLING_STATE("blink_gc", "BlinkGC"); 507 TRACE_EVENT_SCOPED_SAMPLING_STATE("blink_gc", "BlinkGC");
443 double startTime = WTF::currentTimeMS(); 508 double startTime = WTF::currentTimeMS();
444 509
445 if (gcType == BlinkGC::TakeSnapshot) 510 if (gcType == BlinkGC::TakeSnapshot)
446 BlinkGCMemoryDumpProvider::instance()->clearProcessDumpForCurrentGC(); 511 BlinkGCMemoryDumpProvider::instance()->clearProcessDumpForCurrentGC();
447 512
448 // Disallow allocation during garbage collection (but not during the 513 // Disallow allocation during garbage collection (but not during the
449 // finalization that happens when the visitorScope is torn down). 514 // finalization that happens when the visitorScope is torn down).
450 ThreadState::NoAllocationScope noAllocationScope(state); 515 ThreadState::NoAllocationScope noAllocationScope(state);
451 516
452 preGC(); 517 state->heap().preGC();
453 518
454 StackFrameDepthScope stackDepthScope; 519 StackFrameDepthScope stackDepthScope;
455 520
456 size_t totalObjectSize = ThreadHeap::heapStats().allocatedObjectSize() + Thr eadHeap::heapStats().markedObjectSize(); 521 size_t totalObjectSize = state->heap().heapStats().allocatedObjectSize() + s tate->heap().heapStats().markedObjectSize();
457 if (gcType != BlinkGC::TakeSnapshot) 522 if (gcType != BlinkGC::TakeSnapshot)
458 ThreadHeap::resetHeapCounters(); 523 state->heap().resetHeapCounters();
459 524
460 // 1. Trace persistent roots. 525 // 1. Trace persistent roots.
461 ThreadState::visitPersistentRoots(visitor.get()); 526 state->heap().visitPersistentRoots(visitor.get());
462 527
463 // 2. Trace objects reachable from the stack. We do this independent of the 528 // 2. Trace objects reachable from the stack. We do this independent of the
464 // given stackState since other threads might have a different stack state. 529 // given stackState since other threads might have a different stack state.
465 ThreadState::visitStackRoots(visitor.get()); 530 state->heap().visitStackRoots(visitor.get());
466 531
467 // 3. Transitive closure to trace objects including ephemerons. 532 // 3. Transitive closure to trace objects including ephemerons.
468 processMarkingStack(visitor.get()); 533 state->heap().processMarkingStack(visitor.get());
469 534
470 postMarkingProcessing(visitor.get()); 535 state->heap().postMarkingProcessing(visitor.get());
471 globalWeakProcessing(visitor.get()); 536 state->heap().globalWeakProcessing(visitor.get());
472 537
473 // Now we can delete all orphaned pages because there are no dangling 538 // Now we can delete all orphaned pages because there are no dangling
474 // pointers to the orphaned pages. (If we have such dangling pointers, 539 // pointers to the orphaned pages. (If we have such dangling pointers,
475 // we should have crashed during marking before getting here.) 540 // we should have crashed during marking before getting here.)
476 getOrphanedPagePool()->decommitOrphanedPages(); 541 state->heap().getOrphanedPagePool()->decommitOrphanedPages();
477 542
478 double markingTimeInMilliseconds = WTF::currentTimeMS() - startTime; 543 double markingTimeInMilliseconds = WTF::currentTimeMS() - startTime;
479 ThreadHeap::heapStats().setEstimatedMarkingTimePerByte(totalObjectSize ? (ma rkingTimeInMilliseconds / 1000 / totalObjectSize) : 0); 544 state->heap().heapStats().setEstimatedMarkingTimePerByte(totalObjectSize ? ( markingTimeInMilliseconds / 1000 / totalObjectSize) : 0);
480 545
481 #if PRINT_HEAP_STATS 546 #if PRINT_HEAP_STATS
482 dataLogF("ThreadHeap::collectGarbage (gcReason=%s, lazySweeping=%d, time=%.1 lfms)\n", gcReasonString(reason), gcType == BlinkGC::GCWithoutSweep, markingTime InMilliseconds); 547 dataLogF("ThreadHeap::collectGarbage (gcReason=%s, lazySweeping=%d, time=%.1 lfms)\n", gcReasonString(reason), gcType == BlinkGC::GCWithoutSweep, markingTime InMilliseconds);
483 #endif 548 #endif
484 549
485 DEFINE_THREAD_SAFE_STATIC_LOCAL(CustomCountHistogram, markingTimeHistogram, new CustomCountHistogram("BlinkGC.CollectGarbage", 0, 10 * 1000, 50)); 550 DEFINE_THREAD_SAFE_STATIC_LOCAL(CustomCountHistogram, markingTimeHistogram, new CustomCountHistogram("BlinkGC.CollectGarbage", 0, 10 * 1000, 50));
486 markingTimeHistogram.count(markingTimeInMilliseconds); 551 markingTimeHistogram.count(markingTimeInMilliseconds);
487 DEFINE_THREAD_SAFE_STATIC_LOCAL(CustomCountHistogram, totalObjectSpaceHistog ram, new CustomCountHistogram("BlinkGC.TotalObjectSpace", 0, 4 * 1024 * 1024, 50 )); 552 DEFINE_THREAD_SAFE_STATIC_LOCAL(CustomCountHistogram, totalObjectSpaceHistog ram, new CustomCountHistogram("BlinkGC.TotalObjectSpace", 0, 4 * 1024 * 1024, 50 ));
488 totalObjectSpaceHistogram.count(ProcessHeap::totalAllocatedObjectSize() / 10 24); 553 totalObjectSpaceHistogram.count(ProcessHeap::totalAllocatedObjectSize() / 10 24);
489 DEFINE_THREAD_SAFE_STATIC_LOCAL(CustomCountHistogram, totalAllocatedSpaceHis togram, new CustomCountHistogram("BlinkGC.TotalAllocatedSpace", 0, 4 * 1024 * 10 24, 50)); 554 DEFINE_THREAD_SAFE_STATIC_LOCAL(CustomCountHistogram, totalAllocatedSpaceHis togram, new CustomCountHistogram("BlinkGC.TotalAllocatedSpace", 0, 4 * 1024 * 10 24, 50));
490 totalAllocatedSpaceHistogram.count(ProcessHeap::totalAllocatedSpace() / 1024 ); 555 totalAllocatedSpaceHistogram.count(ProcessHeap::totalAllocatedSpace() / 1024 );
491 DEFINE_THREAD_SAFE_STATIC_LOCAL(EnumerationHistogram, gcReasonHistogram, new EnumerationHistogram("BlinkGC.GCReason", BlinkGC::NumberOfGCReason)); 556 DEFINE_THREAD_SAFE_STATIC_LOCAL(EnumerationHistogram, gcReasonHistogram, new EnumerationHistogram("BlinkGC.GCReason", BlinkGC::NumberOfGCReason));
492 gcReasonHistogram.count(reason); 557 gcReasonHistogram.count(reason);
493 558
494 s_lastGCReason = reason; 559 state->heap().m_lastGCReason = reason;
495 560
496 ThreadHeap::reportMemoryUsageHistogram(); 561 ThreadHeap::reportMemoryUsageHistogram();
497 WTF::Partitions::reportMemoryUsageHistogram(); 562 WTF::Partitions::reportMemoryUsageHistogram();
498 563
499 postGC(gcType); 564 state->heap().postGC(gcType);
500 ThreadHeap::decommitCallbackStacks(); 565 state->heap().decommitCallbackStacks();
501 } 566 }
502 567
503 void ThreadHeap::collectGarbageForTerminatingThread(ThreadState* state) 568 void ThreadHeap::collectGarbageForTerminatingThread(ThreadState* state)
504 { 569 {
505 { 570 {
506 // A thread-specific termination GC must not allow other global GCs to g o 571 // A thread-specific termination GC must not allow other global GCs to g o
507 // ahead while it is running, hence the termination GC does not enter a 572 // ahead while it is running, hence the termination GC does not enter a
508 // safepoint. VisitorScope will not enter also a safepoint scope for 573 // safepoint. VisitorScope will not enter also a safepoint scope for
509 // ThreadTerminationGC. 574 // ThreadTerminationGC.
510 OwnPtr<Visitor> visitor = Visitor::create(state, BlinkGC::ThreadTerminat ionGC); 575 OwnPtr<Visitor> visitor = Visitor::create(state, BlinkGC::ThreadTerminat ionGC);
511 576
512 ThreadState::NoAllocationScope noAllocationScope(state); 577 ThreadState::NoAllocationScope noAllocationScope(state);
513 578
514 state->preGC(); 579 state->preGC();
515 580
516 // 1. Trace the thread local persistent roots. For thread local GCs we 581 // 1. Trace the thread local persistent roots. For thread local GCs we
517 // don't trace the stack (ie. no conservative scanning) since this is 582 // don't trace the stack (ie. no conservative scanning) since this is
518 // only called during thread shutdown where there should be no objects 583 // only called during thread shutdown where there should be no objects
519 // on the stack. 584 // on the stack.
520 // We also assume that orphaned pages have no objects reachable from 585 // We also assume that orphaned pages have no objects reachable from
521 // persistent handles on other threads or CrossThreadPersistents. The 586 // persistent handles on other threads or CrossThreadPersistents. The
522 // only cases where this could happen is if a subsequent conservative 587 // only cases where this could happen is if a subsequent conservative
523 // global GC finds a "pointer" on the stack or due to a programming 588 // global GC finds a "pointer" on the stack or due to a programming
524 // error where an object has a dangling cross-thread pointer to an 589 // error where an object has a dangling cross-thread pointer to an
525 // object on this heap. 590 // object on this heap.
526 state->visitPersistents(visitor.get()); 591 state->visitPersistents(visitor.get());
527 592
528 // 2. Trace objects reachable from the thread's persistent roots 593 // 2. Trace objects reachable from the thread's persistent roots
529 // including ephemerons. 594 // including ephemerons.
530 processMarkingStack(visitor.get()); 595 state->heap().processMarkingStack(visitor.get());
531 596
532 postMarkingProcessing(visitor.get()); 597 state->heap().postMarkingProcessing(visitor.get());
533 globalWeakProcessing(visitor.get()); 598 state->heap().globalWeakProcessing(visitor.get());
534 599
535 state->postGC(BlinkGC::GCWithSweep); 600 state->postGC(BlinkGC::GCWithSweep);
536 ThreadHeap::decommitCallbackStacks(); 601 state->heap().decommitCallbackStacks();
537 } 602 }
538 state->preSweep(); 603 state->preSweep();
539 } 604 }
540 605
541 void ThreadHeap::processMarkingStack(Visitor* visitor) 606 void ThreadHeap::processMarkingStack(Visitor* visitor)
542 { 607 {
543 // Ephemeron fixed point loop. 608 // Ephemeron fixed point loop.
544 do { 609 do {
545 { 610 {
546 // Iteratively mark all objects that are reachable from the objects 611 // Iteratively mark all objects that are reachable from the objects
547 // currently pushed onto the marking stack. 612 // currently pushed onto the marking stack.
548 TRACE_EVENT0("blink_gc", "ThreadHeap::processMarkingStackSingleThrea ded"); 613 TRACE_EVENT0("blink_gc", "ThreadHeap::processMarkingStackSingleThrea ded");
549 while (popAndInvokeTraceCallback(visitor)) { } 614 while (popAndInvokeTraceCallback(visitor)) { }
550 } 615 }
551 616
552 { 617 {
553 // Mark any strong pointers that have now become reachable in 618 // Mark any strong pointers that have now become reachable in
554 // ephemeron maps. 619 // ephemeron maps.
555 TRACE_EVENT0("blink_gc", "ThreadHeap::processEphemeronStack"); 620 TRACE_EVENT0("blink_gc", "ThreadHeap::processEphemeronStack");
556 s_ephemeronStack->invokeEphemeronCallbacks(visitor); 621 m_ephemeronStack->invokeEphemeronCallbacks(visitor);
557 } 622 }
558 623
559 // Rerun loop if ephemeron processing queued more objects for tracing. 624 // Rerun loop if ephemeron processing queued more objects for tracing.
560 } while (!s_markingStack->isEmpty()); 625 } while (!m_markingStack->isEmpty());
561 } 626 }
562 627
563 void ThreadHeap::postMarkingProcessing(Visitor* visitor) 628 void ThreadHeap::postMarkingProcessing(Visitor* visitor)
564 { 629 {
565 TRACE_EVENT0("blink_gc", "ThreadHeap::postMarkingProcessing"); 630 TRACE_EVENT0("blink_gc", "ThreadHeap::postMarkingProcessing");
566 // Call post-marking callbacks including: 631 // Call post-marking callbacks including:
567 // 1. the ephemeronIterationDone callbacks on weak tables to do cleanup 632 // 1. the ephemeronIterationDone callbacks on weak tables to do cleanup
568 // (specifically to clear the queued bits for weak hash tables), and 633 // (specifically to clear the queued bits for weak hash tables), and
569 // 2. the markNoTracing callbacks on collection backings to mark them 634 // 2. the markNoTracing callbacks on collection backings to mark them
570 // if they are only reachable from their front objects. 635 // if they are only reachable from their front objects.
571 while (popAndInvokePostMarkingCallback(visitor)) { } 636 while (popAndInvokePostMarkingCallback(visitor)) { }
572 637
573 // Post-marking callbacks should not trace any objects and 638 // Post-marking callbacks should not trace any objects and
574 // therefore the marking stack should be empty after the 639 // therefore the marking stack should be empty after the
575 // post-marking callbacks. 640 // post-marking callbacks.
576 ASSERT(s_markingStack->isEmpty()); 641 ASSERT(m_markingStack->isEmpty());
577 } 642 }
578 643
579 void ThreadHeap::globalWeakProcessing(Visitor* visitor) 644 void ThreadHeap::globalWeakProcessing(Visitor* visitor)
580 { 645 {
581 TRACE_EVENT0("blink_gc", "ThreadHeap::globalWeakProcessing"); 646 TRACE_EVENT0("blink_gc", "ThreadHeap::globalWeakProcessing");
582 double startTime = WTF::currentTimeMS(); 647 double startTime = WTF::currentTimeMS();
583 648
584 // Call weak callbacks on objects that may now be pointing to dead objects. 649 // Call weak callbacks on objects that may now be pointing to dead objects.
585 while (popAndInvokeGlobalWeakCallback(visitor)) { } 650 while (popAndInvokeGlobalWeakCallback(visitor)) { }
586 651
587 // It is not permitted to trace pointers of live objects in the weak 652 // It is not permitted to trace pointers of live objects in the weak
588 // callback phase, so the marking stack should still be empty here. 653 // callback phase, so the marking stack should still be empty here.
589 ASSERT(s_markingStack->isEmpty()); 654 ASSERT(m_markingStack->isEmpty());
590 655
591 double timeForGlobalWeakProcessing = WTF::currentTimeMS() - startTime; 656 double timeForGlobalWeakProcessing = WTF::currentTimeMS() - startTime;
592 DEFINE_THREAD_SAFE_STATIC_LOCAL(CustomCountHistogram, globalWeakTimeHistogra m, new CustomCountHistogram("BlinkGC.TimeForGlobalWeakProcessing", 1, 10 * 1000, 50)); 657 DEFINE_THREAD_SAFE_STATIC_LOCAL(CustomCountHistogram, globalWeakTimeHistogra m, new CustomCountHistogram("BlinkGC.TimeForGlobalWeakProcessing", 1, 10 * 1000, 50));
593 globalWeakTimeHistogram.count(timeForGlobalWeakProcessing); 658 globalWeakTimeHistogram.count(timeForGlobalWeakProcessing);
594 } 659 }
595 660
596 void ThreadHeap::collectAllGarbage() 661 void ThreadHeap::collectAllGarbage()
597 { 662 {
598 // We need to run multiple GCs to collect a chain of persistent handles. 663 // We need to run multiple GCs to collect a chain of persistent handles.
599 size_t previousLiveObjects = 0; 664 size_t previousLiveObjects = 0;
665 ThreadState* state = ThreadState::current();
600 for (int i = 0; i < 5; ++i) { 666 for (int i = 0; i < 5; ++i) {
601 collectGarbage(BlinkGC::NoHeapPointersOnStack, BlinkGC::GCWithSweep, Bli nkGC::ForcedGC); 667 collectGarbage(BlinkGC::NoHeapPointersOnStack, BlinkGC::GCWithSweep, Bli nkGC::ForcedGC);
602 size_t liveObjects = ThreadHeap::heapStats().markedObjectSize(); 668 size_t liveObjects = state->heap().heapStats().markedObjectSize();
603 if (liveObjects == previousLiveObjects) 669 if (liveObjects == previousLiveObjects)
604 break; 670 break;
605 previousLiveObjects = liveObjects; 671 previousLiveObjects = liveObjects;
606 } 672 }
607 } 673 }
608 674
609 void ThreadHeap::reportMemoryUsageHistogram() 675 void ThreadHeap::reportMemoryUsageHistogram()
610 { 676 {
611 static size_t supportedMaxSizeInMB = 4 * 1024; 677 static size_t supportedMaxSizeInMB = 4 * 1024;
612 static size_t observedMaxSizeInMB = 0; 678 static size_t observedMaxSizeInMB = 0;
613 679
614 // We only report the memory in the main thread. 680 // We only report the memory in the main thread.
615 if (!isMainThread()) 681 if (!isMainThread())
616 return; 682 return;
617 // +1 is for rounding up the sizeInMB. 683 // +1 is for rounding up the sizeInMB.
618 size_t sizeInMB = ThreadHeap::heapStats().allocatedSpace() / 1024 / 1024 + 1 ; 684 size_t sizeInMB = ThreadState::current()->heap().heapStats().allocatedSpace( ) / 1024 / 1024 + 1;
619 if (sizeInMB >= supportedMaxSizeInMB) 685 if (sizeInMB >= supportedMaxSizeInMB)
620 sizeInMB = supportedMaxSizeInMB - 1; 686 sizeInMB = supportedMaxSizeInMB - 1;
621 if (sizeInMB > observedMaxSizeInMB) { 687 if (sizeInMB > observedMaxSizeInMB) {
622 // Send a UseCounter only when we see the highest memory usage 688 // Send a UseCounter only when we see the highest memory usage
623 // we've ever seen. 689 // we've ever seen.
624 DEFINE_THREAD_SAFE_STATIC_LOCAL(EnumerationHistogram, commitedSizeHistog ram, new EnumerationHistogram("BlinkGC.CommittedSize", supportedMaxSizeInMB)); 690 DEFINE_THREAD_SAFE_STATIC_LOCAL(EnumerationHistogram, commitedSizeHistog ram, new EnumerationHistogram("BlinkGC.CommittedSize", supportedMaxSizeInMB));
625 commitedSizeHistogram.count(sizeInMB); 691 commitedSizeHistogram.count(sizeInMB);
626 observedMaxSizeInMB = sizeInMB; 692 observedMaxSizeInMB = sizeInMB;
627 } 693 }
628 } 694 }
629 695
630 void ThreadHeap::reportMemoryUsageForTracing() 696 void ThreadHeap::reportMemoryUsageForTracing()
631 { 697 {
632 #if PRINT_HEAP_STATS 698 #if PRINT_HEAP_STATS
633 // dataLogF("allocatedSpace=%ldMB, allocatedObjectSize=%ldMB, markedObjectSi ze=%ldMB, partitionAllocSize=%ldMB, wrapperCount=%ld, collectedWrapperCount=%ld\ n", ThreadHeap::allocatedSpace() / 1024 / 1024, ThreadHeap::allocatedObjectSize( ) / 1024 / 1024, ThreadHeap::markedObjectSize() / 1024 / 1024, WTF::Partitions:: totalSizeOfCommittedPages() / 1024 / 1024, ThreadHeap::wrapperCount(), ThreadHea p::collectedWrapperCount()); 699 // dataLogF("allocatedSpace=%ldMB, allocatedObjectSize=%ldMB, markedObjectSi ze=%ldMB, partitionAllocSize=%ldMB, wrapperCount=%ld, collectedWrapperCount=%ld\ n", ThreadHeap::allocatedSpace() / 1024 / 1024, ThreadHeap::allocatedObjectSize( ) / 1024 / 1024, ThreadHeap::markedObjectSize() / 1024 / 1024, WTF::Partitions:: totalSizeOfCommittedPages() / 1024 / 1024, ThreadHeap::wrapperCount(), ThreadHea p::collectedWrapperCount());
634 #endif 700 #endif
635 701
636 bool gcTracingEnabled; 702 bool gcTracingEnabled;
637 TRACE_EVENT_CATEGORY_GROUP_ENABLED("blink_gc", &gcTracingEnabled); 703 TRACE_EVENT_CATEGORY_GROUP_ENABLED("blink_gc", &gcTracingEnabled);
638 if (!gcTracingEnabled) 704 if (!gcTracingEnabled)
639 return; 705 return;
640 706
707 ThreadHeap& heap = ThreadState::current()->heap();
641 // These values are divided by 1024 to avoid overflow in practical cases (TR ACE_COUNTER values are 32-bit ints). 708 // These values are divided by 1024 to avoid overflow in practical cases (TR ACE_COUNTER values are 32-bit ints).
642 // They are capped to INT_MAX just in case. 709 // They are capped to INT_MAX just in case.
643 TRACE_COUNTER1(TRACE_DISABLED_BY_DEFAULT("blink_gc"), "ThreadHeap::allocated ObjectSizeKB", std::min(ThreadHeap::heapStats().allocatedObjectSize() / 1024, st atic_cast<size_t>(INT_MAX))); 710 TRACE_COUNTER1(TRACE_DISABLED_BY_DEFAULT("blink_gc"), "ThreadHeap::allocated ObjectSizeKB", std::min(heap.heapStats().allocatedObjectSize() / 1024, static_ca st<size_t>(INT_MAX)));
644 TRACE_COUNTER1(TRACE_DISABLED_BY_DEFAULT("blink_gc"), "ThreadHeap::markedObj ectSizeKB", std::min(ThreadHeap::heapStats().markedObjectSize() / 1024, static_c ast<size_t>(INT_MAX))); 711 TRACE_COUNTER1(TRACE_DISABLED_BY_DEFAULT("blink_gc"), "ThreadHeap::markedObj ectSizeKB", std::min(heap.heapStats().markedObjectSize() / 1024, static_cast<siz e_t>(INT_MAX)));
645 TRACE_COUNTER1(TRACE_DISABLED_BY_DEFAULT("blink_gc"), "ThreadHeap::markedObj ectSizeAtLastCompleteSweepKB", std::min(ThreadHeap::heapStats().markedObjectSize AtLastCompleteSweep() / 1024, static_cast<size_t>(INT_MAX))); 712 TRACE_COUNTER1(TRACE_DISABLED_BY_DEFAULT("blink_gc"), "ThreadHeap::markedObj ectSizeAtLastCompleteSweepKB", std::min(heap.heapStats().markedObjectSizeAtLastC ompleteSweep() / 1024, static_cast<size_t>(INT_MAX)));
646 TRACE_COUNTER1(TRACE_DISABLED_BY_DEFAULT("blink_gc"), "ThreadHeap::allocated SpaceKB", std::min(ThreadHeap::heapStats().allocatedSpace() / 1024, static_cast< size_t>(INT_MAX))); 713 TRACE_COUNTER1(TRACE_DISABLED_BY_DEFAULT("blink_gc"), "ThreadHeap::allocated SpaceKB", std::min(heap.heapStats().allocatedSpace() / 1024, static_cast<size_t> (INT_MAX)));
647 TRACE_COUNTER1(TRACE_DISABLED_BY_DEFAULT("blink_gc"), "ThreadHeap::objectSiz eAtLastGCKB", std::min(ThreadHeap::heapStats().objectSizeAtLastGC() / 1024, stat ic_cast<size_t>(INT_MAX))); 714 TRACE_COUNTER1(TRACE_DISABLED_BY_DEFAULT("blink_gc"), "ThreadHeap::objectSiz eAtLastGCKB", std::min(heap.heapStats().objectSizeAtLastGC() / 1024, static_cast <size_t>(INT_MAX)));
648 TRACE_COUNTER1(TRACE_DISABLED_BY_DEFAULT("blink_gc"), "ThreadHeap::wrapperCo unt", std::min(ThreadHeap::heapStats().wrapperCount(), static_cast<size_t>(INT_M AX))); 715 TRACE_COUNTER1(TRACE_DISABLED_BY_DEFAULT("blink_gc"), "ThreadHeap::wrapperCo unt", std::min(heap.heapStats().wrapperCount(), static_cast<size_t>(INT_MAX)));
649 TRACE_COUNTER1(TRACE_DISABLED_BY_DEFAULT("blink_gc"), "ThreadHeap::heapStats ().wrapperCountAtLastGC", std::min(ThreadHeap::heapStats().wrapperCountAtLastGC( ), static_cast<size_t>(INT_MAX))); 716 TRACE_COUNTER1(TRACE_DISABLED_BY_DEFAULT("blink_gc"), "ThreadHeap::wrapperCo untAtLastGC", std::min(heap.heapStats().wrapperCountAtLastGC(), static_cast<size _t>(INT_MAX)));
650 TRACE_COUNTER1(TRACE_DISABLED_BY_DEFAULT("blink_gc"), "ThreadHeap::collected WrapperCount", std::min(ThreadHeap::heapStats().collectedWrapperCount(), static_ cast<size_t>(INT_MAX))); 717 TRACE_COUNTER1(TRACE_DISABLED_BY_DEFAULT("blink_gc"), "ThreadHeap::collected WrapperCount", std::min(heap.heapStats().collectedWrapperCount(), static_cast<si ze_t>(INT_MAX)));
651 TRACE_COUNTER1(TRACE_DISABLED_BY_DEFAULT("blink_gc"), "ThreadHeap::partition AllocSizeAtLastGCKB", std::min(ThreadHeap::heapStats().partitionAllocSizeAtLastG C() / 1024, static_cast<size_t>(INT_MAX))); 718 TRACE_COUNTER1(TRACE_DISABLED_BY_DEFAULT("blink_gc"), "ThreadHeap::partition AllocSizeAtLastGCKB", std::min(heap.heapStats().partitionAllocSizeAtLastGC() / 1 024, static_cast<size_t>(INT_MAX)));
652 TRACE_COUNTER1(TRACE_DISABLED_BY_DEFAULT("blink_gc"), "Partitions::totalSize OfCommittedPagesKB", std::min(WTF::Partitions::totalSizeOfCommittedPages() / 102 4, static_cast<size_t>(INT_MAX))); 719 TRACE_COUNTER1(TRACE_DISABLED_BY_DEFAULT("blink_gc"), "Partitions::totalSize OfCommittedPagesKB", std::min(WTF::Partitions::totalSizeOfCommittedPages() / 102 4, static_cast<size_t>(INT_MAX)));
653 } 720 }
654 721
655 size_t ThreadHeap::objectPayloadSizeForTesting() 722 size_t ThreadHeap::objectPayloadSizeForTesting()
656 { 723 {
724 // MEMO: is threadAttachMutex locked?
657 size_t objectPayloadSize = 0; 725 size_t objectPayloadSize = 0;
658 for (ThreadState* state : ThreadState::attachedThreads()) { 726 for (ThreadState* state : m_threads) {
659 state->setGCState(ThreadState::GCRunning); 727 state->setGCState(ThreadState::GCRunning);
660 state->makeConsistentForGC(); 728 state->makeConsistentForGC();
661 objectPayloadSize += state->objectPayloadSizeForTesting(); 729 objectPayloadSize += state->objectPayloadSizeForTesting();
662 state->setGCState(ThreadState::EagerSweepScheduled); 730 state->setGCState(ThreadState::EagerSweepScheduled);
663 state->setGCState(ThreadState::Sweeping); 731 state->setGCState(ThreadState::Sweeping);
664 state->setGCState(ThreadState::NoGCScheduled); 732 state->setGCState(ThreadState::NoGCScheduled);
665 } 733 }
666 return objectPayloadSize; 734 return objectPayloadSize;
667 } 735 }
668 736
669 RegionTree* ThreadHeap::getRegionTree() 737 void ThreadHeap::visitPersistentRoots(Visitor* visitor)
670 { 738 {
671 DEFINE_THREAD_SAFE_STATIC_LOCAL(RegionTree, tree, new RegionTree); 739 ASSERT(ThreadState::current()->isInGC());
672 return &tree; 740 TRACE_EVENT0("blink_gc", "ThreadHeap::visitPersistentRoots");
741 ProcessHeap::crossThreadPersistentRegion().tracePersistentNodes(visitor);
742
743 for (ThreadState* state : m_threads) {
744 state->visitPersistents(visitor);
745 }
673 } 746 }
674 747
675 BasePage* ThreadHeap::lookup(Address address) 748 void ThreadHeap::visitStackRoots(Visitor* visitor)
676 { 749 {
677 ASSERT(ThreadState::current()->isInGC()); 750 ASSERT(ThreadState::current()->isInGC());
678 if (PageMemoryRegion* region = ThreadHeap::getRegionTree()->lookup(address)) { 751 TRACE_EVENT0("blink_gc", "ThreadHeap::visitStackRoots");
752 for (ThreadState* state : m_threads) {
753 state->visitStack(visitor);
754 }
755 }
756
757 void ThreadHeap::checkAndPark(ThreadState* threadState, SafePointAwareMutexLocke r* locker)
758 {
759 m_safePointBarrier->checkAndPark(threadState, locker);
760 }
761
762 void ThreadHeap::enterSafePoint(ThreadState* threadState)
763 {
764 m_safePointBarrier->enterSafePoint(threadState);
765 }
766
767 void ThreadHeap::leaveSafePoint(ThreadState* threadState, SafePointAwareMutexLoc ker* locker)
768 {
769 m_safePointBarrier->leaveSafePoint(threadState, locker);
770 }
771
772 BasePage* ThreadHeap::lookupPageForAddress(Address address)
773 {
774 ASSERT(ThreadState::current()->isInGC());
775 if (PageMemoryRegion* region = m_regionTree->lookup(address)) {
679 BasePage* page = region->pageFromAddress(address); 776 BasePage* page = region->pageFromAddress(address);
680 return page && !page->orphaned() ? page : nullptr; 777 return page && !page->orphaned() ? page : nullptr;
681 } 778 }
682 return nullptr; 779 return nullptr;
683 } 780 }
684 781
685 void ThreadHeap::resetHeapCounters() 782 void ThreadHeap::resetHeapCounters()
686 { 783 {
687 ASSERT(ThreadState::current()->isInGC()); 784 ASSERT(ThreadState::current()->isInGC());
688 785
689 ThreadHeap::reportMemoryUsageForTracing(); 786 ThreadHeap::reportMemoryUsageForTracing();
690 787
691 ProcessHeap::resetHeapCounters(); 788 ProcessHeap::decreaseTotalAllocatedObjectSize(m_stats.allocatedObjectSize()) ;
692 ThreadHeap::heapStats().reset(); 789 ProcessHeap::decreaseTotalMarkedObjectSize(m_stats.markedObjectSize());
693 for (ThreadState* state : ThreadState::attachedThreads()) 790
791 m_stats.reset();
792 for (ThreadState* state : m_threads)
694 state->resetHeapCounters(); 793 state->resetHeapCounters();
695 } 794 }
696 795
697 ThreadHeapStats& ThreadHeap::heapStats() 796 ThreadHeap* ThreadHeap::s_mainThreadHeap = nullptr;
698 {
699 DEFINE_THREAD_SAFE_STATIC_LOCAL(ThreadHeapStats, stats, new ThreadHeapStats( ));
700 return stats;
701 }
702
703 CallbackStack* ThreadHeap::s_markingStack;
704 CallbackStack* ThreadHeap::s_postMarkingCallbackStack;
705 CallbackStack* ThreadHeap::s_globalWeakCallbackStack;
706 CallbackStack* ThreadHeap::s_ephemeronStack;
707 HeapDoesNotContainCache* ThreadHeap::s_heapDoesNotContainCache;
708 FreePagePool* ThreadHeap::s_freePagePool;
709 OrphanedPagePool* ThreadHeap::s_orphanedPagePool;
710
711 BlinkGC::GCReason ThreadHeap::s_lastGCReason = BlinkGC::NumberOfGCReason;
712 797
713 } // namespace blink 798 } // namespace blink
OLDNEW
« no previous file with comments | « third_party/WebKit/Source/platform/heap/Heap.h ('k') | third_party/WebKit/Source/platform/heap/HeapPage.cpp » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698