Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(60)

Side by Side Diff: third_party/WebKit/Source/platform/heap/Heap.cpp

Issue 1892713003: Prepare for multiple ThreadHeaps (Closed) Base URL: https://chromium.googlesource.com/chromium/src.git@master
Patch Set: Created 4 years, 8 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
OLDNEW
1 /* 1 /*
2 * Copyright (C) 2013 Google Inc. All rights reserved. 2 * Copyright (C) 2013 Google Inc. All rights reserved.
3 * 3 *
4 * Redistribution and use in source and binary forms, with or without 4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions are 5 * modification, are permitted provided that the following conditions are
6 * met: 6 * met:
7 * 7 *
8 * * Redistributions of source code must retain the above copyright 8 * * Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer. 9 * notice, this list of conditions and the following disclaimer.
10 * * Redistributions in binary form must reproduce the above 10 * * Redistributions in binary form must reproduce the above
(...skipping 40 matching lines...) Expand 10 before | Expand all | Expand 10 after
51 #include "wtf/allocator/Partitions.h" 51 #include "wtf/allocator/Partitions.h"
52 52
53 namespace blink { 53 namespace blink {
54 54
55 HeapAllocHooks::AllocationHook* HeapAllocHooks::m_allocationHook = nullptr; 55 HeapAllocHooks::AllocationHook* HeapAllocHooks::m_allocationHook = nullptr;
56 HeapAllocHooks::FreeHook* HeapAllocHooks::m_freeHook = nullptr; 56 HeapAllocHooks::FreeHook* HeapAllocHooks::m_freeHook = nullptr;
57 57
58 class ParkThreadsScope final { 58 class ParkThreadsScope final {
59 STACK_ALLOCATED(); 59 STACK_ALLOCATED();
60 public: 60 public:
61 ParkThreadsScope() 61 ParkThreadsScope(ThreadState* state)
haraken 2016/04/21 11:48:25 Add explicit.
keishi 2016/04/22 06:09:58 Done.
62 : m_shouldResumeThreads(false) 62 : m_state(state)
63 , m_shouldResumeThreads(false)
63 { 64 {
64 } 65 }
65 66
66 bool parkThreads(ThreadState* state) 67 bool parkThreads()
67 { 68 {
68 TRACE_EVENT0("blink_gc", "ThreadHeap::ParkThreadsScope"); 69 TRACE_EVENT0("blink_gc", "ThreadHeap::ParkThreadsScope");
69 const char* samplingState = TRACE_EVENT_GET_SAMPLING_STATE(); 70 const char* samplingState = TRACE_EVENT_GET_SAMPLING_STATE();
70 if (state->isMainThread()) 71 if (m_state->isMainThread())
71 TRACE_EVENT_SET_SAMPLING_STATE("blink_gc", "BlinkGCWaiting"); 72 TRACE_EVENT_SET_SAMPLING_STATE("blink_gc", "BlinkGCWaiting");
72 73
73 // TODO(haraken): In an unlikely coincidence that two threads decide 74 // TODO(haraken): In an unlikely coincidence that two threads decide
74 // to collect garbage at the same time, avoid doing two GCs in 75 // to collect garbage at the same time, avoid doing two GCs in
75 // a row and return false. 76 // a row and return false.
76 double startTime = WTF::currentTimeMS(); 77 double startTime = WTF::currentTimeMS();
77 78
78 m_shouldResumeThreads = ThreadState::stopThreads(); 79 m_shouldResumeThreads = m_state->heap().park();
79 80
80 double timeForStoppingThreads = WTF::currentTimeMS() - startTime; 81 double timeForStoppingThreads = WTF::currentTimeMS() - startTime;
81 DEFINE_THREAD_SAFE_STATIC_LOCAL(CustomCountHistogram, timeToStopThreadsH istogram, new CustomCountHistogram("BlinkGC.TimeForStoppingThreads", 1, 1000, 50 )); 82 DEFINE_THREAD_SAFE_STATIC_LOCAL(CustomCountHistogram, timeToStopThreadsH istogram, new CustomCountHistogram("BlinkGC.TimeForStoppingThreads", 1, 1000, 50 ));
82 timeToStopThreadsHistogram.count(timeForStoppingThreads); 83 timeToStopThreadsHistogram.count(timeForStoppingThreads);
83 84
84 if (state->isMainThread()) 85 if (m_state->isMainThread())
85 TRACE_EVENT_SET_NONCONST_SAMPLING_STATE(samplingState); 86 TRACE_EVENT_SET_NONCONST_SAMPLING_STATE(samplingState);
86 return m_shouldResumeThreads; 87 return m_shouldResumeThreads;
87 } 88 }
88 89
89 ~ParkThreadsScope() 90 ~ParkThreadsScope()
90 { 91 {
91 // Only cleanup if we parked all threads in which case the GC happened 92 // Only cleanup if we parked all threads in which case the GC happened
92 // and we need to resume the other threads. 93 // and we need to resume the other threads.
93 if (m_shouldResumeThreads) 94 if (m_shouldResumeThreads)
94 ThreadState::resumeThreads(); 95 m_state->heap().resume();
95 } 96 }
96 97
97 private: 98 private:
99 ThreadState* m_state;
98 bool m_shouldResumeThreads; 100 bool m_shouldResumeThreads;
99 }; 101 };
100 102
101 void ThreadHeap::flushHeapDoesNotContainCache() 103 void ThreadHeap::flushHeapDoesNotContainCache()
102 { 104 {
103 s_heapDoesNotContainCache->flush(); 105 m_heapDoesNotContainCache->flush();
104 } 106 }
105 107
106 void ProcessHeap::init() 108 void ProcessHeap::init()
107 { 109 {
110 ThreadState::init();
111 s_shutdownComplete = false;
108 s_totalAllocatedSpace = 0; 112 s_totalAllocatedSpace = 0;
109 s_totalAllocatedObjectSize = 0; 113 s_totalAllocatedObjectSize = 0;
110 s_totalMarkedObjectSize = 0; 114 s_totalMarkedObjectSize = 0;
111 s_isLowEndDevice = base::SysInfo::IsLowEndDevice(); 115 s_isLowEndDevice = base::SysInfo::IsLowEndDevice();
116
117 GCInfoTable::init();
118
119 if (Platform::current() && Platform::current()->currentThread())
120 Platform::current()->registerMemoryDumpProvider(BlinkGCMemoryDumpProvide r::instance(), "BlinkGC");
112 } 121 }
113 122
114 void ProcessHeap::resetHeapCounters() 123 void ProcessHeap::resetHeapCounters()
115 { 124 {
116 s_totalAllocatedObjectSize = 0; 125 s_totalAllocatedObjectSize = 0;
117 s_totalMarkedObjectSize = 0; 126 s_totalMarkedObjectSize = 0;
118 } 127 }
119 128
120 void ThreadHeap::init() 129 void ProcessHeap::shutdown()
121 { 130 {
122 ThreadState::init(); 131 ASSERT(!s_shutdownComplete);
123 ProcessHeap::init();
124 s_markingStack = new CallbackStack();
125 s_postMarkingCallbackStack = new CallbackStack();
126 s_globalWeakCallbackStack = new CallbackStack();
127 // Use smallest supported block size for ephemerons.
128 s_ephemeronStack = new CallbackStack(CallbackStack::kMinimalBlockSize);
129 s_heapDoesNotContainCache = new HeapDoesNotContainCache();
130 s_freePagePool = new FreePagePool();
131 s_orphanedPagePool = new OrphanedPagePool();
132 s_lastGCReason = BlinkGC::NumberOfGCReason;
133
134 GCInfoTable::init();
135
136 if (Platform::current() && Platform::current()->currentThread())
137 Platform::current()->registerMemoryDumpProvider(BlinkGCMemoryDumpProvide r::instance(), "BlinkGC");
138 }
139
140 void ThreadHeap::shutdown()
141 {
142 ASSERT(s_markingStack);
143 132
144 if (Platform::current() && Platform::current()->currentThread()) 133 if (Platform::current() && Platform::current()->currentThread())
145 Platform::current()->unregisterMemoryDumpProvider(BlinkGCMemoryDumpProvi der::instance()); 134 Platform::current()->unregisterMemoryDumpProvider(BlinkGCMemoryDumpProvi der::instance());
146 135
147 // The main thread must be the last thread that gets detached. 136 {
148 RELEASE_ASSERT(ThreadState::attachedThreads().size() == 0); 137 // The main thread must be the last thread that gets detached.
138 MutexLocker locker(ThreadHeap::allHeapsMutex());
139 RELEASE_ASSERT(ThreadHeap::allHeaps().isEmpty());
140 }
149 141
150 delete s_heapDoesNotContainCache;
151 s_heapDoesNotContainCache = nullptr;
152 delete s_freePagePool;
153 s_freePagePool = nullptr;
154 delete s_orphanedPagePool;
155 s_orphanedPagePool = nullptr;
156 delete s_globalWeakCallbackStack;
157 s_globalWeakCallbackStack = nullptr;
158 delete s_postMarkingCallbackStack;
159 s_postMarkingCallbackStack = nullptr;
160 delete s_markingStack;
161 s_markingStack = nullptr;
162 delete s_ephemeronStack;
163 s_ephemeronStack = nullptr;
164 GCInfoTable::shutdown(); 142 GCInfoTable::shutdown();
165 ThreadState::shutdown(); 143 ASSERT(ProcessHeap::totalAllocatedSpace() == 0);
166 ASSERT(ThreadHeap::heapStats().allocatedSpace() == 0); 144 s_shutdownComplete = true;
167 } 145 }
168 146
169 CrossThreadPersistentRegion& ProcessHeap::crossThreadPersistentRegion() 147 CrossThreadPersistentRegion& ProcessHeap::crossThreadPersistentRegion()
170 { 148 {
171 DEFINE_THREAD_SAFE_STATIC_LOCAL(CrossThreadPersistentRegion, persistentRegio n, new CrossThreadPersistentRegion()); 149 DEFINE_THREAD_SAFE_STATIC_LOCAL(CrossThreadPersistentRegion, persistentRegio n, new CrossThreadPersistentRegion());
172 return persistentRegion; 150 return persistentRegion;
173 } 151 }
174 152
153 bool ProcessHeap::s_shutdownComplete = false;
175 bool ProcessHeap::s_isLowEndDevice = false; 154 bool ProcessHeap::s_isLowEndDevice = false;
176 size_t ProcessHeap::s_totalAllocatedSpace = 0; 155 size_t ProcessHeap::s_totalAllocatedSpace = 0;
177 size_t ProcessHeap::s_totalAllocatedObjectSize = 0; 156 size_t ProcessHeap::s_totalAllocatedObjectSize = 0;
178 size_t ProcessHeap::s_totalMarkedObjectSize = 0; 157 size_t ProcessHeap::s_totalMarkedObjectSize = 0;
179 158
180 ThreadHeapStats::ThreadHeapStats() 159 ThreadHeapStats::ThreadHeapStats()
181 : m_allocatedSpace(0) 160 : m_allocatedSpace(0)
182 , m_allocatedObjectSize(0) 161 , m_allocatedObjectSize(0)
183 , m_objectSizeAtLastGC(0) 162 , m_objectSizeAtLastGC(0)
184 , m_markedObjectSize(0) 163 , m_markedObjectSize(0)
(...skipping 52 matching lines...) Expand 10 before | Expand all | Expand 10 after
237 atomicAdd(&m_allocatedSpace, static_cast<long>(delta)); 216 atomicAdd(&m_allocatedSpace, static_cast<long>(delta));
238 ProcessHeap::increaseTotalAllocatedSpace(delta); 217 ProcessHeap::increaseTotalAllocatedSpace(delta);
239 } 218 }
240 219
241 void ThreadHeapStats::decreaseAllocatedSpace(size_t delta) 220 void ThreadHeapStats::decreaseAllocatedSpace(size_t delta)
242 { 221 {
243 atomicSubtract(&m_allocatedSpace, static_cast<long>(delta)); 222 atomicSubtract(&m_allocatedSpace, static_cast<long>(delta));
244 ProcessHeap::decreaseTotalAllocatedSpace(delta); 223 ProcessHeap::decreaseTotalAllocatedSpace(delta);
245 } 224 }
246 225
226 ThreadHeap::ThreadHeap()
227 : m_regionTree(adoptPtr(new RegionTree()))
228 , m_heapDoesNotContainCache(adoptPtr(new HeapDoesNotContainCache))
229 , m_safePointBarrier(adoptPtr(new SafePointBarrier()))
230 , m_freePagePool(adoptPtr(new FreePagePool))
231 , m_orphanedPagePool(adoptPtr(new OrphanedPagePool))
232 , m_markingStack(adoptPtr(new CallbackStack()))
233 , m_postMarkingCallbackStack(adoptPtr(new CallbackStack()))
234 , m_globalWeakCallbackStack(adoptPtr(new CallbackStack()))
235 , m_ephemeronStack(adoptPtr(new CallbackStack(CallbackStack::kMinimalBlockSi ze)))
236 {
237 if (ThreadState::current()->isMainThread())
238 s_mainThreadHeap = this;
239
240 MutexLocker locker(ThreadHeap::allHeapsMutex());
241 allHeaps().add(this);
242 }
243
244 ThreadHeap::~ThreadHeap()
245 {
246 MutexLocker locker(ThreadHeap::allHeapsMutex());
247 allHeaps().remove(this);
248 }
249
250 RecursiveMutex& ThreadHeap::allHeapsMutex()
251 {
252 DEFINE_THREAD_SAFE_STATIC_LOCAL(RecursiveMutex, mutex, (new RecursiveMutex)) ;
253 return mutex;
254 }
255
256 HashSet<ThreadHeap*>& ThreadHeap::allHeaps()
257 {
258 DEFINE_STATIC_LOCAL(HashSet<ThreadHeap*>, heaps, ());
259 return heaps;
260 }
261
262 void ThreadHeap::attach(ThreadState* thread)
263 {
264 MutexLocker locker(m_threadAttachMutex);
265 m_threads.add(thread);
266 }
267
268 void ThreadHeap::detach(ThreadState* thread)
269 {
270 ASSERT(ThreadState::current() == thread);
271 {
272 // Grab the threadAttachMutex to ensure only one thread can shutdown at
273 // a time and that no other thread can do a global GC. It also allows
274 // safe iteration of the m_threads set which happens as part of
275 // thread local GC asserts. We enter a safepoint while waiting for the
276 // lock to avoid a dead-lock where another thread has already requested
277 // GC.
278 SafePointAwareMutexLocker locker(m_threadAttachMutex, BlinkGC::NoHeapPoi ntersOnStack);
279 thread->cleanup();
280 ASSERT(m_threads.contains(thread));
281 m_threads.remove(thread);
282 }
283 // The main thread must be the last thread that gets detached.
284 ASSERT(!thread->isMainThread() || m_threads.isEmpty());
285 if (m_threads.isEmpty()) {
haraken 2016/04/21 11:48:25 m_threads.isEmpty() => thread->isMainThread()
keishi 2016/04/22 06:09:58 Done.
286 ASSERT(heapStats().allocatedSpace() == 0);
287 delete this;
288 }
289 }
290
291 bool ThreadHeap::park()
292 {
293 return m_safePointBarrier->parkOthers();
294 }
295
296 void ThreadHeap::resume()
297 {
298 m_safePointBarrier->resumeOthers();
299 }
300
247 #if ENABLE(ASSERT) 301 #if ENABLE(ASSERT)
248 BasePage* ThreadHeap::findPageFromAddress(Address address) 302 BasePage* ThreadHeap::findPageFromAddress(Address address)
249 { 303 {
250 MutexLocker lock(ThreadState::threadAttachMutex()); 304 MutexLocker locker(m_threadAttachMutex);
251 for (ThreadState* state : ThreadState::attachedThreads()) { 305 for (ThreadState* state : m_threads) {
252 if (BasePage* page = state->findPageFromAddress(address)) 306 if (BasePage* page = state->findPageFromAddress(address))
253 return page; 307 return page;
254 } 308 }
255 return nullptr; 309 return nullptr;
256 } 310 }
311
312 bool ThreadHeap::isAtSafePoint()
313 {
314 MutexLocker locker(m_threadAttachMutex);
315 for (ThreadState* state : m_threads) {
316 if (!state->isAtSafePoint())
317 return false;
318 }
319 return true;
320 }
257 #endif 321 #endif
258 322
259 Address ThreadHeap::checkAndMarkPointer(Visitor* visitor, Address address) 323 Address ThreadHeap::checkAndMarkPointer(Visitor* visitor, Address address)
260 { 324 {
261 ASSERT(ThreadState::current()->isInGC()); 325 ASSERT(ThreadState::current()->isInGC());
262 326
263 #if !ENABLE(ASSERT) 327 #if !ENABLE(ASSERT)
264 if (s_heapDoesNotContainCache->lookup(address)) 328 if (m_heapDoesNotContainCache->lookup(address))
265 return nullptr; 329 return nullptr;
266 #endif 330 #endif
267 331
268 if (BasePage* page = lookup(address)) { 332 if (BasePage* page = lookupPageForAddress(address)) {
269 ASSERT(page->contains(address)); 333 ASSERT(page->contains(address));
270 ASSERT(!page->orphaned()); 334 ASSERT(!page->orphaned());
271 ASSERT(!s_heapDoesNotContainCache->lookup(address)); 335 ASSERT(!m_heapDoesNotContainCache->lookup(address));
272 page->checkAndMarkPointer(visitor, address); 336 page->checkAndMarkPointer(visitor, address);
273 return address; 337 return address;
274 } 338 }
275 339
276 #if !ENABLE(ASSERT) 340 #if !ENABLE(ASSERT)
277 s_heapDoesNotContainCache->addEntry(address); 341 m_heapDoesNotContainCache->addEntry(address);
278 #else 342 #else
279 if (!s_heapDoesNotContainCache->lookup(address)) 343 if (!m_heapDoesNotContainCache->lookup(address))
280 s_heapDoesNotContainCache->addEntry(address); 344 m_heapDoesNotContainCache->addEntry(address);
281 #endif 345 #endif
282 return nullptr; 346 return nullptr;
283 } 347 }
284 348
285 void ThreadHeap::pushTraceCallback(void* object, TraceCallback callback) 349 void ThreadHeap::pushTraceCallback(void* object, TraceCallback callback)
286 { 350 {
287 ASSERT(ThreadState::current()->isInGC()); 351 ASSERT(ThreadState::current()->isInGC());
288 352
289 // Trace should never reach an orphaned page. 353 // Trace should never reach an orphaned page.
290 ASSERT(!ThreadHeap::getOrphanedPagePool()->contains(object)); 354 ASSERT(!getOrphanedPagePool()->contains(object));
291 CallbackStack::Item* slot = s_markingStack->allocateEntry(); 355 CallbackStack::Item* slot = m_markingStack->allocateEntry();
292 *slot = CallbackStack::Item(object, callback); 356 *slot = CallbackStack::Item(object, callback);
293 } 357 }
294 358
295 bool ThreadHeap::popAndInvokeTraceCallback(Visitor* visitor) 359 bool ThreadHeap::popAndInvokeTraceCallback(Visitor* visitor)
296 { 360 {
297 CallbackStack::Item* item = s_markingStack->pop(); 361 CallbackStack::Item* item = m_markingStack->pop();
298 if (!item) 362 if (!item)
299 return false; 363 return false;
300 item->call(visitor); 364 item->call(visitor);
301 return true; 365 return true;
302 } 366 }
303 367
304 void ThreadHeap::pushPostMarkingCallback(void* object, TraceCallback callback) 368 void ThreadHeap::pushPostMarkingCallback(void* object, TraceCallback callback)
305 { 369 {
306 ASSERT(ThreadState::current()->isInGC()); 370 ASSERT(ThreadState::current()->isInGC());
307 371
308 // Trace should never reach an orphaned page. 372 // Trace should never reach an orphaned page.
309 ASSERT(!ThreadHeap::getOrphanedPagePool()->contains(object)); 373 ASSERT(!getOrphanedPagePool()->contains(object));
310 CallbackStack::Item* slot = s_postMarkingCallbackStack->allocateEntry(); 374 CallbackStack::Item* slot = m_postMarkingCallbackStack->allocateEntry();
311 *slot = CallbackStack::Item(object, callback); 375 *slot = CallbackStack::Item(object, callback);
312 } 376 }
313 377
314 bool ThreadHeap::popAndInvokePostMarkingCallback(Visitor* visitor) 378 bool ThreadHeap::popAndInvokePostMarkingCallback(Visitor* visitor)
315 { 379 {
316 if (CallbackStack::Item* item = s_postMarkingCallbackStack->pop()) { 380 if (CallbackStack::Item* item = m_postMarkingCallbackStack->pop()) {
317 item->call(visitor); 381 item->call(visitor);
318 return true; 382 return true;
319 } 383 }
320 return false; 384 return false;
321 } 385 }
322 386
323 void ThreadHeap::pushGlobalWeakCallback(void** cell, WeakCallback callback) 387 void ThreadHeap::pushGlobalWeakCallback(void** cell, WeakCallback callback)
324 { 388 {
325 ASSERT(ThreadState::current()->isInGC()); 389 ASSERT(ThreadState::current()->isInGC());
326 390
327 // Trace should never reach an orphaned page. 391 // Trace should never reach an orphaned page.
328 ASSERT(!ThreadHeap::getOrphanedPagePool()->contains(cell)); 392 ASSERT(!getOrphanedPagePool()->contains(cell));
329 CallbackStack::Item* slot = s_globalWeakCallbackStack->allocateEntry(); 393 CallbackStack::Item* slot = m_globalWeakCallbackStack->allocateEntry();
330 *slot = CallbackStack::Item(cell, callback); 394 *slot = CallbackStack::Item(cell, callback);
331 } 395 }
332 396
333 void ThreadHeap::pushThreadLocalWeakCallback(void* closure, void* object, WeakCa llback callback) 397 void ThreadHeap::pushThreadLocalWeakCallback(void* closure, void* object, WeakCa llback callback)
334 { 398 {
335 ASSERT(ThreadState::current()->isInGC()); 399 ASSERT(ThreadState::current()->isInGC());
336 400
337 // Trace should never reach an orphaned page. 401 // Trace should never reach an orphaned page.
338 ASSERT(!ThreadHeap::getOrphanedPagePool()->contains(object)); 402 ASSERT(!getOrphanedPagePool()->contains(object));
339 ThreadState* state = pageFromObject(object)->arena()->getThreadState(); 403 ThreadState* state = pageFromObject(object)->arena()->getThreadState();
340 state->pushThreadLocalWeakCallback(closure, callback); 404 state->pushThreadLocalWeakCallback(closure, callback);
341 } 405 }
342 406
343 bool ThreadHeap::popAndInvokeGlobalWeakCallback(Visitor* visitor) 407 bool ThreadHeap::popAndInvokeGlobalWeakCallback(Visitor* visitor)
344 { 408 {
345 if (CallbackStack::Item* item = s_globalWeakCallbackStack->pop()) { 409 if (CallbackStack::Item* item = m_globalWeakCallbackStack->pop()) {
346 item->call(visitor); 410 item->call(visitor);
347 return true; 411 return true;
348 } 412 }
349 return false; 413 return false;
350 } 414 }
351 415
352 void ThreadHeap::registerWeakTable(void* table, EphemeronCallback iterationCallb ack, EphemeronCallback iterationDoneCallback) 416 void ThreadHeap::registerWeakTable(void* table, EphemeronCallback iterationCallb ack, EphemeronCallback iterationDoneCallback)
353 { 417 {
354 ASSERT(ThreadState::current()->isInGC()); 418 ASSERT(ThreadState::current()->isInGC());
355 419
356 // Trace should never reach an orphaned page. 420 // Trace should never reach an orphaned page.
357 ASSERT(!ThreadHeap::getOrphanedPagePool()->contains(table)); 421 ASSERT(!getOrphanedPagePool()->contains(table));
358 CallbackStack::Item* slot = s_ephemeronStack->allocateEntry(); 422 CallbackStack::Item* slot = m_ephemeronStack->allocateEntry();
359 *slot = CallbackStack::Item(table, iterationCallback); 423 *slot = CallbackStack::Item(table, iterationCallback);
360 424
361 // Register a post-marking callback to tell the tables that 425 // Register a post-marking callback to tell the tables that
362 // ephemeron iteration is complete. 426 // ephemeron iteration is complete.
363 pushPostMarkingCallback(table, iterationDoneCallback); 427 pushPostMarkingCallback(table, iterationDoneCallback);
364 } 428 }
365 429
366 #if ENABLE(ASSERT) 430 #if ENABLE(ASSERT)
367 bool ThreadHeap::weakTableRegistered(const void* table) 431 bool ThreadHeap::weakTableRegistered(const void* table)
368 { 432 {
369 ASSERT(s_ephemeronStack); 433 ASSERT(m_ephemeronStack);
370 return s_ephemeronStack->hasCallbackForObject(table); 434 return m_ephemeronStack->hasCallbackForObject(table);
371 } 435 }
372 #endif 436 #endif
373 437
374 void ThreadHeap::decommitCallbackStacks() 438 void ThreadHeap::decommitCallbackStacks()
375 { 439 {
376 s_markingStack->decommit(); 440 m_markingStack->decommit();
377 s_postMarkingCallbackStack->decommit(); 441 m_postMarkingCallbackStack->decommit();
378 s_globalWeakCallbackStack->decommit(); 442 m_globalWeakCallbackStack->decommit();
379 s_ephemeronStack->decommit(); 443 m_ephemeronStack->decommit();
380 } 444 }
381 445
382 void ThreadHeap::preGC() 446 void ThreadHeap::preGC()
383 { 447 {
384 ASSERT(!ThreadState::current()->isInGC()); 448 ASSERT(!ThreadState::current()->isInGC());
385 for (ThreadState* state : ThreadState::attachedThreads()) 449 for (ThreadState* state : m_threads) {
386 state->preGC(); 450 state->preGC();
451 }
387 } 452 }
388 453
389 void ThreadHeap::postGC(BlinkGC::GCType gcType) 454 void ThreadHeap::postGC(BlinkGC::GCType gcType)
390 { 455 {
391 ASSERT(ThreadState::current()->isInGC()); 456 ASSERT(ThreadState::current()->isInGC());
392 for (ThreadState* state : ThreadState::attachedThreads()) 457 for (ThreadState* state : m_threads) {
393 state->postGC(gcType); 458 state->postGC(gcType);
459 }
394 } 460 }
395 461
396 const char* ThreadHeap::gcReasonString(BlinkGC::GCReason reason) 462 const char* ThreadHeap::gcReasonString(BlinkGC::GCReason reason)
397 { 463 {
398 switch (reason) { 464 switch (reason) {
399 case BlinkGC::IdleGC: 465 case BlinkGC::IdleGC:
400 return "IdleGC"; 466 return "IdleGC";
401 case BlinkGC::PreciseGC: 467 case BlinkGC::PreciseGC:
402 return "PreciseGC"; 468 return "PreciseGC";
403 case BlinkGC::ConservativeGC: 469 case BlinkGC::ConservativeGC:
(...skipping 17 matching lines...) Expand all
421 ThreadState* state = ThreadState::current(); 487 ThreadState* state = ThreadState::current();
422 // Nested collectGarbage() invocations aren't supported. 488 // Nested collectGarbage() invocations aren't supported.
423 RELEASE_ASSERT(!state->isGCForbidden()); 489 RELEASE_ASSERT(!state->isGCForbidden());
424 state->completeSweep(); 490 state->completeSweep();
425 491
426 OwnPtr<Visitor> visitor = Visitor::create(state, gcType); 492 OwnPtr<Visitor> visitor = Visitor::create(state, gcType);
427 493
428 SafePointScope safePointScope(stackState, state); 494 SafePointScope safePointScope(stackState, state);
429 495
430 // Resume all parked threads upon leaving this scope. 496 // Resume all parked threads upon leaving this scope.
431 ParkThreadsScope parkThreadsScope; 497 ParkThreadsScope parkThreadsScope(state);
432 498
433 // Try to park the other threads. If we're unable to, bail out of the GC. 499 // Try to park the other threads. If we're unable to, bail out of the GC.
434 if (!parkThreadsScope.parkThreads(state)) 500 if (!parkThreadsScope.parkThreads())
435 return; 501 return;
436 502
437 ScriptForbiddenIfMainThreadScope scriptForbidden; 503 ScriptForbiddenIfMainThreadScope scriptForbidden;
438 504
439 TRACE_EVENT2("blink_gc,devtools.timeline", "BlinkGCMarking", 505 TRACE_EVENT2("blink_gc,devtools.timeline", "BlinkGCMarking",
440 "lazySweeping", gcType == BlinkGC::GCWithoutSweep, 506 "lazySweeping", gcType == BlinkGC::GCWithoutSweep,
441 "gcReason", gcReasonString(reason)); 507 "gcReason", gcReasonString(reason));
442 TRACE_EVENT_SCOPED_SAMPLING_STATE("blink_gc", "BlinkGC"); 508 TRACE_EVENT_SCOPED_SAMPLING_STATE("blink_gc", "BlinkGC");
443 double startTime = WTF::currentTimeMS(); 509 double startTime = WTF::currentTimeMS();
444 510
445 if (gcType == BlinkGC::TakeSnapshot) 511 if (gcType == BlinkGC::TakeSnapshot)
446 BlinkGCMemoryDumpProvider::instance()->clearProcessDumpForCurrentGC(); 512 BlinkGCMemoryDumpProvider::instance()->clearProcessDumpForCurrentGC();
447 513
448 // Disallow allocation during garbage collection (but not during the 514 // Disallow allocation during garbage collection (but not during the
449 // finalization that happens when the visitorScope is torn down). 515 // finalization that happens when the visitorScope is torn down).
450 ThreadState::NoAllocationScope noAllocationScope(state); 516 ThreadState::NoAllocationScope noAllocationScope(state);
451 517
452 preGC(); 518 state->heap().preGC();
453 519
454 StackFrameDepthScope stackDepthScope; 520 StackFrameDepthScope stackDepthScope;
455 521
456 size_t totalObjectSize = ThreadHeap::heapStats().allocatedObjectSize() + Thr eadHeap::heapStats().markedObjectSize(); 522 size_t totalObjectSize = state->heap().heapStats().allocatedObjectSize() + s tate->heap().heapStats().markedObjectSize();
457 if (gcType != BlinkGC::TakeSnapshot) 523 if (gcType != BlinkGC::TakeSnapshot)
458 ThreadHeap::resetHeapCounters(); 524 state->heap().resetHeapCounters();
459 525
460 // 1. Trace persistent roots. 526 // 1. Trace persistent roots.
461 ThreadState::visitPersistentRoots(visitor.get()); 527 state->heap().visitPersistentRoots(visitor.get());
462 528
463 // 2. Trace objects reachable from the stack. We do this independent of the 529 // 2. Trace objects reachable from the stack. We do this independent of the
464 // given stackState since other threads might have a different stack state. 530 // given stackState since other threads might have a different stack state.
465 ThreadState::visitStackRoots(visitor.get()); 531 state->heap().visitStackRoots(visitor.get());
466 532
467 // 3. Transitive closure to trace objects including ephemerons. 533 // 3. Transitive closure to trace objects including ephemerons.
468 processMarkingStack(visitor.get()); 534 state->heap().processMarkingStack(visitor.get());
469 535
470 postMarkingProcessing(visitor.get()); 536 state->heap().postMarkingProcessing(visitor.get());
471 globalWeakProcessing(visitor.get()); 537 state->heap().globalWeakProcessing(visitor.get());
472 538
473 // Now we can delete all orphaned pages because there are no dangling 539 // Now we can delete all orphaned pages because there are no dangling
474 // pointers to the orphaned pages. (If we have such dangling pointers, 540 // pointers to the orphaned pages. (If we have such dangling pointers,
475 // we should have crashed during marking before getting here.) 541 // we should have crashed during marking before getting here.)
476 getOrphanedPagePool()->decommitOrphanedPages(); 542 state->heap().getOrphanedPagePool()->decommitOrphanedPages();
477 543
478 double markingTimeInMilliseconds = WTF::currentTimeMS() - startTime; 544 double markingTimeInMilliseconds = WTF::currentTimeMS() - startTime;
479 ThreadHeap::heapStats().setEstimatedMarkingTimePerByte(totalObjectSize ? (ma rkingTimeInMilliseconds / 1000 / totalObjectSize) : 0); 545 state->heap().heapStats().setEstimatedMarkingTimePerByte(totalObjectSize ? ( markingTimeInMilliseconds / 1000 / totalObjectSize) : 0);
480 546
481 #if PRINT_HEAP_STATS 547 #if PRINT_HEAP_STATS
482 dataLogF("ThreadHeap::collectGarbage (gcReason=%s, lazySweeping=%d, time=%.1 lfms)\n", gcReasonString(reason), gcType == BlinkGC::GCWithoutSweep, markingTime InMilliseconds); 548 dataLogF("ThreadHeap::collectGarbage (gcReason=%s, lazySweeping=%d, time=%.1 lfms)\n", gcReasonString(reason), gcType == BlinkGC::GCWithoutSweep, markingTime InMilliseconds);
483 #endif 549 #endif
484 550
485 DEFINE_THREAD_SAFE_STATIC_LOCAL(CustomCountHistogram, markingTimeHistogram, new CustomCountHistogram("BlinkGC.CollectGarbage", 0, 10 * 1000, 50)); 551 DEFINE_THREAD_SAFE_STATIC_LOCAL(CustomCountHistogram, markingTimeHistogram, new CustomCountHistogram("BlinkGC.CollectGarbage", 0, 10 * 1000, 50));
486 markingTimeHistogram.count(markingTimeInMilliseconds); 552 markingTimeHistogram.count(markingTimeInMilliseconds);
487 DEFINE_THREAD_SAFE_STATIC_LOCAL(CustomCountHistogram, totalObjectSpaceHistog ram, new CustomCountHistogram("BlinkGC.TotalObjectSpace", 0, 4 * 1024 * 1024, 50 )); 553 DEFINE_THREAD_SAFE_STATIC_LOCAL(CustomCountHistogram, totalObjectSpaceHistog ram, new CustomCountHistogram("BlinkGC.TotalObjectSpace", 0, 4 * 1024 * 1024, 50 ));
488 totalObjectSpaceHistogram.count(ProcessHeap::totalAllocatedObjectSize() / 10 24); 554 totalObjectSpaceHistogram.count(ProcessHeap::totalAllocatedObjectSize() / 10 24);
489 DEFINE_THREAD_SAFE_STATIC_LOCAL(CustomCountHistogram, totalAllocatedSpaceHis togram, new CustomCountHistogram("BlinkGC.TotalAllocatedSpace", 0, 4 * 1024 * 10 24, 50)); 555 DEFINE_THREAD_SAFE_STATIC_LOCAL(CustomCountHistogram, totalAllocatedSpaceHis togram, new CustomCountHistogram("BlinkGC.TotalAllocatedSpace", 0, 4 * 1024 * 10 24, 50));
490 totalAllocatedSpaceHistogram.count(ProcessHeap::totalAllocatedSpace() / 1024 ); 556 totalAllocatedSpaceHistogram.count(ProcessHeap::totalAllocatedSpace() / 1024 );
491 DEFINE_THREAD_SAFE_STATIC_LOCAL(EnumerationHistogram, gcReasonHistogram, new EnumerationHistogram("BlinkGC.GCReason", BlinkGC::NumberOfGCReason)); 557 DEFINE_THREAD_SAFE_STATIC_LOCAL(EnumerationHistogram, gcReasonHistogram, new EnumerationHistogram("BlinkGC.GCReason", BlinkGC::NumberOfGCReason));
492 gcReasonHistogram.count(reason); 558 gcReasonHistogram.count(reason);
493 559
494 s_lastGCReason = reason; 560 state->heap().m_lastGCReason = reason;
495 561
496 ThreadHeap::reportMemoryUsageHistogram(); 562 ThreadHeap::reportMemoryUsageHistogram();
497 WTF::Partitions::reportMemoryUsageHistogram(); 563 WTF::Partitions::reportMemoryUsageHistogram();
498 564
499 postGC(gcType); 565 state->heap().postGC(gcType);
500 ThreadHeap::decommitCallbackStacks(); 566 state->heap().decommitCallbackStacks();
501 } 567 }
502 568
503 void ThreadHeap::collectGarbageForTerminatingThread(ThreadState* state) 569 void ThreadHeap::collectGarbageForTerminatingThread(ThreadState* state)
504 { 570 {
505 { 571 {
506 // A thread-specific termination GC must not allow other global GCs to g o 572 // A thread-specific termination GC must not allow other global GCs to g o
507 // ahead while it is running, hence the termination GC does not enter a 573 // ahead while it is running, hence the termination GC does not enter a
508 // safepoint. VisitorScope will not enter also a safepoint scope for 574 // safepoint. VisitorScope will not enter also a safepoint scope for
509 // ThreadTerminationGC. 575 // ThreadTerminationGC.
510 OwnPtr<Visitor> visitor = Visitor::create(state, BlinkGC::ThreadTerminat ionGC); 576 OwnPtr<Visitor> visitor = Visitor::create(state, BlinkGC::ThreadTerminat ionGC);
511 577
512 ThreadState::NoAllocationScope noAllocationScope(state); 578 ThreadState::NoAllocationScope noAllocationScope(state);
513 579
514 state->preGC(); 580 state->preGC();
515 581
516 // 1. Trace the thread local persistent roots. For thread local GCs we 582 // 1. Trace the thread local persistent roots. For thread local GCs we
517 // don't trace the stack (ie. no conservative scanning) since this is 583 // don't trace the stack (ie. no conservative scanning) since this is
518 // only called during thread shutdown where there should be no objects 584 // only called during thread shutdown where there should be no objects
519 // on the stack. 585 // on the stack.
520 // We also assume that orphaned pages have no objects reachable from 586 // We also assume that orphaned pages have no objects reachable from
521 // persistent handles on other threads or CrossThreadPersistents. The 587 // persistent handles on other threads or CrossThreadPersistents. The
522 // only cases where this could happen is if a subsequent conservative 588 // only cases where this could happen is if a subsequent conservative
523 // global GC finds a "pointer" on the stack or due to a programming 589 // global GC finds a "pointer" on the stack or due to a programming
524 // error where an object has a dangling cross-thread pointer to an 590 // error where an object has a dangling cross-thread pointer to an
525 // object on this heap. 591 // object on this heap.
526 state->visitPersistents(visitor.get()); 592 state->visitPersistents(visitor.get());
527 593
528 // 2. Trace objects reachable from the thread's persistent roots 594 // 2. Trace objects reachable from the thread's persistent roots
529 // including ephemerons. 595 // including ephemerons.
530 processMarkingStack(visitor.get()); 596 state->heap().processMarkingStack(visitor.get());
531 597
532 postMarkingProcessing(visitor.get()); 598 state->heap().postMarkingProcessing(visitor.get());
533 globalWeakProcessing(visitor.get()); 599 state->heap().globalWeakProcessing(visitor.get());
534 600
535 state->postGC(BlinkGC::GCWithSweep); 601 state->postGC(BlinkGC::GCWithSweep);
536 ThreadHeap::decommitCallbackStacks(); 602 state->heap().decommitCallbackStacks();
537 } 603 }
538 state->preSweep(); 604 state->preSweep();
539 } 605 }
540 606
541 void ThreadHeap::processMarkingStack(Visitor* visitor) 607 void ThreadHeap::processMarkingStack(Visitor* visitor)
542 { 608 {
543 // Ephemeron fixed point loop. 609 // Ephemeron fixed point loop.
544 do { 610 do {
545 { 611 {
546 // Iteratively mark all objects that are reachable from the objects 612 // Iteratively mark all objects that are reachable from the objects
547 // currently pushed onto the marking stack. 613 // currently pushed onto the marking stack.
548 TRACE_EVENT0("blink_gc", "ThreadHeap::processMarkingStackSingleThrea ded"); 614 TRACE_EVENT0("blink_gc", "ThreadHeap::processMarkingStackSingleThrea ded");
549 while (popAndInvokeTraceCallback(visitor)) { } 615 while (popAndInvokeTraceCallback(visitor)) { }
550 } 616 }
551 617
552 { 618 {
553 // Mark any strong pointers that have now become reachable in 619 // Mark any strong pointers that have now become reachable in
554 // ephemeron maps. 620 // ephemeron maps.
555 TRACE_EVENT0("blink_gc", "ThreadHeap::processEphemeronStack"); 621 TRACE_EVENT0("blink_gc", "ThreadHeap::processEphemeronStack");
556 s_ephemeronStack->invokeEphemeronCallbacks(visitor); 622 m_ephemeronStack->invokeEphemeronCallbacks(visitor);
557 } 623 }
558 624
559 // Rerun loop if ephemeron processing queued more objects for tracing. 625 // Rerun loop if ephemeron processing queued more objects for tracing.
560 } while (!s_markingStack->isEmpty()); 626 } while (!m_markingStack->isEmpty());
561 } 627 }
562 628
563 void ThreadHeap::postMarkingProcessing(Visitor* visitor) 629 void ThreadHeap::postMarkingProcessing(Visitor* visitor)
564 { 630 {
565 TRACE_EVENT0("blink_gc", "ThreadHeap::postMarkingProcessing"); 631 TRACE_EVENT0("blink_gc", "ThreadHeap::postMarkingProcessing");
566 // Call post-marking callbacks including: 632 // Call post-marking callbacks including:
567 // 1. the ephemeronIterationDone callbacks on weak tables to do cleanup 633 // 1. the ephemeronIterationDone callbacks on weak tables to do cleanup
568 // (specifically to clear the queued bits for weak hash tables), and 634 // (specifically to clear the queued bits for weak hash tables), and
569 // 2. the markNoTracing callbacks on collection backings to mark them 635 // 2. the markNoTracing callbacks on collection backings to mark them
570 // if they are only reachable from their front objects. 636 // if they are only reachable from their front objects.
571 while (popAndInvokePostMarkingCallback(visitor)) { } 637 while (popAndInvokePostMarkingCallback(visitor)) { }
572 638
573 // Post-marking callbacks should not trace any objects and 639 // Post-marking callbacks should not trace any objects and
574 // therefore the marking stack should be empty after the 640 // therefore the marking stack should be empty after the
575 // post-marking callbacks. 641 // post-marking callbacks.
576 ASSERT(s_markingStack->isEmpty()); 642 ASSERT(m_markingStack->isEmpty());
577 } 643 }
578 644
579 void ThreadHeap::globalWeakProcessing(Visitor* visitor) 645 void ThreadHeap::globalWeakProcessing(Visitor* visitor)
580 { 646 {
581 TRACE_EVENT0("blink_gc", "ThreadHeap::globalWeakProcessing"); 647 TRACE_EVENT0("blink_gc", "ThreadHeap::globalWeakProcessing");
582 double startTime = WTF::currentTimeMS(); 648 double startTime = WTF::currentTimeMS();
583 649
584 // Call weak callbacks on objects that may now be pointing to dead objects. 650 // Call weak callbacks on objects that may now be pointing to dead objects.
585 while (popAndInvokeGlobalWeakCallback(visitor)) { } 651 while (popAndInvokeGlobalWeakCallback(visitor)) { }
586 652
587 // It is not permitted to trace pointers of live objects in the weak 653 // It is not permitted to trace pointers of live objects in the weak
588 // callback phase, so the marking stack should still be empty here. 654 // callback phase, so the marking stack should still be empty here.
589 ASSERT(s_markingStack->isEmpty()); 655 ASSERT(m_markingStack->isEmpty());
590 656
591 double timeForGlobalWeakProcessing = WTF::currentTimeMS() - startTime; 657 double timeForGlobalWeakProcessing = WTF::currentTimeMS() - startTime;
592 DEFINE_THREAD_SAFE_STATIC_LOCAL(CustomCountHistogram, globalWeakTimeHistogra m, new CustomCountHistogram("BlinkGC.TimeForGlobalWeakProcessing", 1, 10 * 1000, 50)); 658 DEFINE_THREAD_SAFE_STATIC_LOCAL(CustomCountHistogram, globalWeakTimeHistogra m, new CustomCountHistogram("BlinkGC.TimeForGlobalWeakProcessing", 1, 10 * 1000, 50));
593 globalWeakTimeHistogram.count(timeForGlobalWeakProcessing); 659 globalWeakTimeHistogram.count(timeForGlobalWeakProcessing);
594 } 660 }
595 661
596 void ThreadHeap::collectAllGarbage() 662 void ThreadHeap::collectAllGarbage()
597 { 663 {
598 // We need to run multiple GCs to collect a chain of persistent handles. 664 // We need to run multiple GCs to collect a chain of persistent handles.
599 size_t previousLiveObjects = 0; 665 size_t previousLiveObjects = 0;
666 ThreadState* state = ThreadState::current();
600 for (int i = 0; i < 5; ++i) { 667 for (int i = 0; i < 5; ++i) {
601 collectGarbage(BlinkGC::NoHeapPointersOnStack, BlinkGC::GCWithSweep, Bli nkGC::ForcedGC); 668 collectGarbage(BlinkGC::NoHeapPointersOnStack, BlinkGC::GCWithSweep, Bli nkGC::ForcedGC);
602 size_t liveObjects = ThreadHeap::heapStats().markedObjectSize(); 669 size_t liveObjects = state->heap().heapStats().markedObjectSize();
603 if (liveObjects == previousLiveObjects) 670 if (liveObjects == previousLiveObjects)
604 break; 671 break;
605 previousLiveObjects = liveObjects; 672 previousLiveObjects = liveObjects;
606 } 673 }
607 } 674 }
608 675
609 void ThreadHeap::reportMemoryUsageHistogram() 676 void ThreadHeap::reportMemoryUsageHistogram()
610 { 677 {
611 static size_t supportedMaxSizeInMB = 4 * 1024; 678 static size_t supportedMaxSizeInMB = 4 * 1024;
612 static size_t observedMaxSizeInMB = 0; 679 static size_t observedMaxSizeInMB = 0;
613 680
614 // We only report the memory in the main thread. 681 // We only report the memory in the main thread.
615 if (!isMainThread()) 682 if (!isMainThread())
616 return; 683 return;
617 // +1 is for rounding up the sizeInMB. 684 // +1 is for rounding up the sizeInMB.
618 size_t sizeInMB = ThreadHeap::heapStats().allocatedSpace() / 1024 / 1024 + 1 ; 685 size_t sizeInMB = ThreadState::current()->heap().heapStats().allocatedSpace( ) / 1024 / 1024 + 1;
619 if (sizeInMB >= supportedMaxSizeInMB) 686 if (sizeInMB >= supportedMaxSizeInMB)
620 sizeInMB = supportedMaxSizeInMB - 1; 687 sizeInMB = supportedMaxSizeInMB - 1;
621 if (sizeInMB > observedMaxSizeInMB) { 688 if (sizeInMB > observedMaxSizeInMB) {
622 // Send a UseCounter only when we see the highest memory usage 689 // Send a UseCounter only when we see the highest memory usage
623 // we've ever seen. 690 // we've ever seen.
624 DEFINE_THREAD_SAFE_STATIC_LOCAL(EnumerationHistogram, commitedSizeHistog ram, new EnumerationHistogram("BlinkGC.CommittedSize", supportedMaxSizeInMB)); 691 DEFINE_THREAD_SAFE_STATIC_LOCAL(EnumerationHistogram, commitedSizeHistog ram, new EnumerationHistogram("BlinkGC.CommittedSize", supportedMaxSizeInMB));
625 commitedSizeHistogram.count(sizeInMB); 692 commitedSizeHistogram.count(sizeInMB);
626 observedMaxSizeInMB = sizeInMB; 693 observedMaxSizeInMB = sizeInMB;
627 } 694 }
628 } 695 }
629 696
630 void ThreadHeap::reportMemoryUsageForTracing() 697 void ThreadHeap::reportMemoryUsageForTracing()
631 { 698 {
632 #if PRINT_HEAP_STATS 699 #if PRINT_HEAP_STATS
633 // dataLogF("allocatedSpace=%ldMB, allocatedObjectSize=%ldMB, markedObjectSi ze=%ldMB, partitionAllocSize=%ldMB, wrapperCount=%ld, collectedWrapperCount=%ld\ n", ThreadHeap::allocatedSpace() / 1024 / 1024, ThreadHeap::allocatedObjectSize( ) / 1024 / 1024, ThreadHeap::markedObjectSize() / 1024 / 1024, WTF::Partitions:: totalSizeOfCommittedPages() / 1024 / 1024, ThreadHeap::wrapperCount(), ThreadHea p::collectedWrapperCount()); 700 // dataLogF("allocatedSpace=%ldMB, allocatedObjectSize=%ldMB, markedObjectSi ze=%ldMB, partitionAllocSize=%ldMB, wrapperCount=%ld, collectedWrapperCount=%ld\ n", ThreadHeap::allocatedSpace() / 1024 / 1024, ThreadHeap::allocatedObjectSize( ) / 1024 / 1024, ThreadHeap::markedObjectSize() / 1024 / 1024, WTF::Partitions:: totalSizeOfCommittedPages() / 1024 / 1024, ThreadHeap::wrapperCount(), ThreadHea p::collectedWrapperCount());
634 #endif 701 #endif
635 702
636 bool gcTracingEnabled; 703 bool gcTracingEnabled;
637 TRACE_EVENT_CATEGORY_GROUP_ENABLED("blink_gc", &gcTracingEnabled); 704 TRACE_EVENT_CATEGORY_GROUP_ENABLED("blink_gc", &gcTracingEnabled);
638 if (!gcTracingEnabled) 705 if (!gcTracingEnabled)
639 return; 706 return;
640 707
708 ThreadHeap& heap = ThreadState::current()->heap();
641 // These values are divided by 1024 to avoid overflow in practical cases (TR ACE_COUNTER values are 32-bit ints). 709 // These values are divided by 1024 to avoid overflow in practical cases (TR ACE_COUNTER values are 32-bit ints).
642 // They are capped to INT_MAX just in case. 710 // They are capped to INT_MAX just in case.
643 TRACE_COUNTER1(TRACE_DISABLED_BY_DEFAULT("blink_gc"), "ThreadHeap::allocated ObjectSizeKB", std::min(ThreadHeap::heapStats().allocatedObjectSize() / 1024, st atic_cast<size_t>(INT_MAX))); 711 TRACE_COUNTER1(TRACE_DISABLED_BY_DEFAULT("blink_gc"), "ThreadHeap::allocated ObjectSizeKB", std::min(heap.heapStats().allocatedObjectSize() / 1024, static_ca st<size_t>(INT_MAX)));
644 TRACE_COUNTER1(TRACE_DISABLED_BY_DEFAULT("blink_gc"), "ThreadHeap::markedObj ectSizeKB", std::min(ThreadHeap::heapStats().markedObjectSize() / 1024, static_c ast<size_t>(INT_MAX))); 712 TRACE_COUNTER1(TRACE_DISABLED_BY_DEFAULT("blink_gc"), "ThreadHeap::markedObj ectSizeKB", std::min(heap.heapStats().markedObjectSize() / 1024, static_cast<siz e_t>(INT_MAX)));
645 TRACE_COUNTER1(TRACE_DISABLED_BY_DEFAULT("blink_gc"), "ThreadHeap::markedObj ectSizeAtLastCompleteSweepKB", std::min(ThreadHeap::heapStats().markedObjectSize AtLastCompleteSweep() / 1024, static_cast<size_t>(INT_MAX))); 713 TRACE_COUNTER1(TRACE_DISABLED_BY_DEFAULT("blink_gc"), "ThreadHeap::markedObj ectSizeAtLastCompleteSweepKB", std::min(heap.heapStats().markedObjectSizeAtLastC ompleteSweep() / 1024, static_cast<size_t>(INT_MAX)));
646 TRACE_COUNTER1(TRACE_DISABLED_BY_DEFAULT("blink_gc"), "ThreadHeap::allocated SpaceKB", std::min(ThreadHeap::heapStats().allocatedSpace() / 1024, static_cast< size_t>(INT_MAX))); 714 TRACE_COUNTER1(TRACE_DISABLED_BY_DEFAULT("blink_gc"), "ThreadHeap::allocated SpaceKB", std::min(heap.heapStats().allocatedSpace() / 1024, static_cast<size_t> (INT_MAX)));
647 TRACE_COUNTER1(TRACE_DISABLED_BY_DEFAULT("blink_gc"), "ThreadHeap::objectSiz eAtLastGCKB", std::min(ThreadHeap::heapStats().objectSizeAtLastGC() / 1024, stat ic_cast<size_t>(INT_MAX))); 715 TRACE_COUNTER1(TRACE_DISABLED_BY_DEFAULT("blink_gc"), "ThreadHeap::objectSiz eAtLastGCKB", std::min(heap.heapStats().objectSizeAtLastGC() / 1024, static_cast <size_t>(INT_MAX)));
648 TRACE_COUNTER1(TRACE_DISABLED_BY_DEFAULT("blink_gc"), "ThreadHeap::wrapperCo unt", std::min(ThreadHeap::heapStats().wrapperCount(), static_cast<size_t>(INT_M AX))); 716 TRACE_COUNTER1(TRACE_DISABLED_BY_DEFAULT("blink_gc"), "ThreadHeap::wrapperCo unt", std::min(heap.heapStats().wrapperCount(), static_cast<size_t>(INT_MAX)));
649 TRACE_COUNTER1(TRACE_DISABLED_BY_DEFAULT("blink_gc"), "ThreadHeap::heapStats ().wrapperCountAtLastGC", std::min(ThreadHeap::heapStats().wrapperCountAtLastGC( ), static_cast<size_t>(INT_MAX))); 717 TRACE_COUNTER1(TRACE_DISABLED_BY_DEFAULT("blink_gc"), "ThreadHeap::wrapperCo untAtLastGC", std::min(heap.heapStats().wrapperCountAtLastGC(), static_cast<size _t>(INT_MAX)));
650 TRACE_COUNTER1(TRACE_DISABLED_BY_DEFAULT("blink_gc"), "ThreadHeap::collected WrapperCount", std::min(ThreadHeap::heapStats().collectedWrapperCount(), static_ cast<size_t>(INT_MAX))); 718 TRACE_COUNTER1(TRACE_DISABLED_BY_DEFAULT("blink_gc"), "ThreadHeap::collected WrapperCount", std::min(heap.heapStats().collectedWrapperCount(), static_cast<si ze_t>(INT_MAX)));
651 TRACE_COUNTER1(TRACE_DISABLED_BY_DEFAULT("blink_gc"), "ThreadHeap::partition AllocSizeAtLastGCKB", std::min(ThreadHeap::heapStats().partitionAllocSizeAtLastG C() / 1024, static_cast<size_t>(INT_MAX))); 719 TRACE_COUNTER1(TRACE_DISABLED_BY_DEFAULT("blink_gc"), "ThreadHeap::partition AllocSizeAtLastGCKB", std::min(heap.heapStats().partitionAllocSizeAtLastGC() / 1 024, static_cast<size_t>(INT_MAX)));
652 TRACE_COUNTER1(TRACE_DISABLED_BY_DEFAULT("blink_gc"), "Partitions::totalSize OfCommittedPagesKB", std::min(WTF::Partitions::totalSizeOfCommittedPages() / 102 4, static_cast<size_t>(INT_MAX))); 720 TRACE_COUNTER1(TRACE_DISABLED_BY_DEFAULT("blink_gc"), "Partitions::totalSize OfCommittedPagesKB", std::min(WTF::Partitions::totalSizeOfCommittedPages() / 102 4, static_cast<size_t>(INT_MAX)));
653 } 721 }
654 722
655 size_t ThreadHeap::objectPayloadSizeForTesting() 723 size_t ThreadHeap::objectPayloadSizeForTesting()
656 { 724 {
725 // MEMO: is threadAttachMutex locked?
657 size_t objectPayloadSize = 0; 726 size_t objectPayloadSize = 0;
658 for (ThreadState* state : ThreadState::attachedThreads()) { 727 for (ThreadState* state : m_threads) {
659 state->setGCState(ThreadState::GCRunning); 728 state->setGCState(ThreadState::GCRunning);
660 state->makeConsistentForGC(); 729 state->makeConsistentForGC();
661 objectPayloadSize += state->objectPayloadSizeForTesting(); 730 objectPayloadSize += state->objectPayloadSizeForTesting();
662 state->setGCState(ThreadState::EagerSweepScheduled); 731 state->setGCState(ThreadState::EagerSweepScheduled);
663 state->setGCState(ThreadState::Sweeping); 732 state->setGCState(ThreadState::Sweeping);
664 state->setGCState(ThreadState::NoGCScheduled); 733 state->setGCState(ThreadState::NoGCScheduled);
665 } 734 }
666 return objectPayloadSize; 735 return objectPayloadSize;
667 } 736 }
668 737
669 RegionTree* ThreadHeap::getRegionTree() 738 void ThreadHeap::visitPersistentRoots(Visitor* visitor)
670 { 739 {
haraken 2016/04/21 11:48:25 Add ASSERT(ThreadState::current()->isInGC()).
keishi 2016/04/22 06:09:58 Done.
671 DEFINE_THREAD_SAFE_STATIC_LOCAL(RegionTree, tree, new RegionTree); 740 TRACE_EVENT0("blink_gc", "ThreadHeap::visitPersistentRoots");
672 return &tree; 741 ProcessHeap::crossThreadPersistentRegion().tracePersistentNodes(visitor);
742
743 for (ThreadState* state : m_threads) {
744 state->visitPersistents(visitor);
745 }
673 } 746 }
674 747
675 BasePage* ThreadHeap::lookup(Address address) 748 void ThreadHeap::visitStackRoots(Visitor* visitor)
749 {
haraken 2016/04/21 11:48:25 Add ASSERT(ThreadState::current()->isInGC()).
keishi 2016/04/22 06:09:58 Done.
750 TRACE_EVENT0("blink_gc", "ThreadHeap::visitStackRoots");
751 for (ThreadState* state : m_threads) {
752 state->visitStack(visitor);
753 }
754 }
755
756 void ThreadHeap::checkAndPark(ThreadState* threadState, SafePointAwareMutexLocke r* locker)
757 {
758 m_safePointBarrier->checkAndPark(threadState, locker);
759 }
760
761 void ThreadHeap::enterSafePoint(ThreadState* threadState)
762 {
763 m_safePointBarrier->enterSafePoint(threadState);
764 }
765
766 void ThreadHeap::leaveSafePoint(ThreadState* threadState, SafePointAwareMutexLoc ker* locker)
767 {
768 m_safePointBarrier->leaveSafePoint(threadState, locker);
769 }
770
771 BasePage* ThreadHeap::lookupPageForAddress(Address address)
676 { 772 {
677 ASSERT(ThreadState::current()->isInGC()); 773 ASSERT(ThreadState::current()->isInGC());
678 if (PageMemoryRegion* region = ThreadHeap::getRegionTree()->lookup(address)) { 774 if (!m_regionTree)
haraken 2016/04/21 11:48:25 Do we need this check?
keishi 2016/04/22 06:09:58 Removed.
775 return nullptr;
776 if (PageMemoryRegion* region = m_regionTree->lookup(address)) {
679 BasePage* page = region->pageFromAddress(address); 777 BasePage* page = region->pageFromAddress(address);
680 return page && !page->orphaned() ? page : nullptr; 778 return page && !page->orphaned() ? page : nullptr;
681 } 779 }
682 return nullptr; 780 return nullptr;
683 } 781 }
684 782
685 void ThreadHeap::resetHeapCounters() 783 void ThreadHeap::resetHeapCounters()
686 { 784 {
687 ASSERT(ThreadState::current()->isInGC()); 785 ASSERT(ThreadState::current()->isInGC());
688 786
689 ThreadHeap::reportMemoryUsageForTracing(); 787 ThreadHeap::reportMemoryUsageForTracing();
690 788
789 // MEMO: when should we do this?
691 ProcessHeap::resetHeapCounters(); 790 ProcessHeap::resetHeapCounters();
haraken 2016/04/21 11:48:25 This is TODO. We should not reset the process-wide
keishi 2016/04/22 06:09:58 Sorry. Meant to deal with this later and forgot. D
692 ThreadHeap::heapStats().reset(); 791 m_stats.reset();
693 for (ThreadState* state : ThreadState::attachedThreads()) 792 {
694 state->resetHeapCounters(); 793 MutexLocker locker(m_threadAttachMutex);
haraken 2016/04/21 11:48:25 This lock wouldn't be needed because we're in GC h
keishi 2016/04/22 06:09:58 Done.
794 for (ThreadState* state : m_threads)
795 state->resetHeapCounters();
796 }
695 } 797 }
696 798
697 ThreadHeapStats& ThreadHeap::heapStats() 799 ThreadHeap* ThreadHeap::s_mainThreadHeap = nullptr;
698 {
699 DEFINE_THREAD_SAFE_STATIC_LOCAL(ThreadHeapStats, stats, new ThreadHeapStats( ));
700 return stats;
701 }
702
703 CallbackStack* ThreadHeap::s_markingStack;
704 CallbackStack* ThreadHeap::s_postMarkingCallbackStack;
705 CallbackStack* ThreadHeap::s_globalWeakCallbackStack;
706 CallbackStack* ThreadHeap::s_ephemeronStack;
707 HeapDoesNotContainCache* ThreadHeap::s_heapDoesNotContainCache;
708 FreePagePool* ThreadHeap::s_freePagePool;
709 OrphanedPagePool* ThreadHeap::s_orphanedPagePool;
710
711 BlinkGC::GCReason ThreadHeap::s_lastGCReason = BlinkGC::NumberOfGCReason;
712 800
713 } // namespace blink 801 } // namespace blink
OLDNEW

Powered by Google App Engine
This is Rietveld 408576698