Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(70)

Side by Side Diff: third_party/WebKit/Source/platform/heap/Heap.cpp

Issue 1477023003: Refactor the Heap into ThreadHeap to prepare for per thread heaps Base URL: https://chromium.googlesource.com/chromium/src.git@master
Patch Set: Created 4 years, 9 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
OLDNEW
1 /* 1 /*
2 * Copyright (C) 2013 Google Inc. All rights reserved. 2 * Copyright (C) 2013 Google Inc. All rights reserved.
3 * 3 *
4 * Redistribution and use in source and binary forms, with or without 4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions are 5 * modification, are permitted provided that the following conditions are
6 * met: 6 * met:
7 * 7 *
8 * * Redistributions of source code must retain the above copyright 8 * * Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer. 9 * notice, this list of conditions and the following disclaimer.
10 * * Redistributions in binary form must reproduce the above 10 * * Redistributions in binary form must reproduce the above
(...skipping 41 matching lines...) Expand 10 before | Expand all | Expand 10 after
52 #include "wtf/Partitions.h" 52 #include "wtf/Partitions.h"
53 53
54 namespace blink { 54 namespace blink {
55 55
56 HeapAllocHooks::AllocationHook* HeapAllocHooks::m_allocationHook = nullptr; 56 HeapAllocHooks::AllocationHook* HeapAllocHooks::m_allocationHook = nullptr;
57 HeapAllocHooks::FreeHook* HeapAllocHooks::m_freeHook = nullptr; 57 HeapAllocHooks::FreeHook* HeapAllocHooks::m_freeHook = nullptr;
58 58
59 class ParkThreadsScope final { 59 class ParkThreadsScope final {
60 STACK_ALLOCATED(); 60 STACK_ALLOCATED();
61 public: 61 public:
62 ParkThreadsScope() 62 ParkThreadsScope(ThreadState* state)
63 : m_shouldResumeThreads(false) 63 : m_state(state)
64 , m_shouldResumeThreads(false)
64 { 65 {
65 } 66 }
66 67
67 bool parkThreads(ThreadState* state) 68 bool parkThreads()
68 { 69 {
69 TRACE_EVENT0("blink_gc", "Heap::ParkThreadsScope"); 70 TRACE_EVENT0("blink_gc", "ThreadHeap::ParkThreadsScope");
70 const char* samplingState = TRACE_EVENT_GET_SAMPLING_STATE(); 71 const char* samplingState = TRACE_EVENT_GET_SAMPLING_STATE();
71 if (state->isMainThread()) 72 if (m_state->isMainThread())
72 TRACE_EVENT_SET_SAMPLING_STATE("blink_gc", "BlinkGCWaiting"); 73 TRACE_EVENT_SET_SAMPLING_STATE("blink_gc", "BlinkGCWaiting");
73 74
74 // TODO(haraken): In an unlikely coincidence that two threads decide 75 // TODO(haraken): In an unlikely coincidence that two threads decide
75 // to collect garbage at the same time, avoid doing two GCs in 76 // to collect garbage at the same time, avoid doing two GCs in
76 // a row and return false. 77 // a row and return false.
77 double startTime = WTF::currentTimeMS(); 78 double startTime = WTF::currentTimeMS();
78 79
79 m_shouldResumeThreads = ThreadState::stopThreads(); 80 m_shouldResumeThreads = m_state->heap().park();
80 81
81 double timeForStoppingThreads = WTF::currentTimeMS() - startTime; 82 double timeForStoppingThreads = WTF::currentTimeMS() - startTime;
82 DEFINE_THREAD_SAFE_STATIC_LOCAL(CustomCountHistogram, timeToStopThreadsH istogram, new CustomCountHistogram("BlinkGC.TimeForStoppingThreads", 1, 1000, 50 )); 83 DEFINE_THREAD_SAFE_STATIC_LOCAL(CustomCountHistogram, timeToStopThreadsH istogram, new CustomCountHistogram("BlinkGC.TimeForStoppingThreads", 1, 1000, 50 ));
83 timeToStopThreadsHistogram.count(timeForStoppingThreads); 84 timeToStopThreadsHistogram.count(timeForStoppingThreads);
84 85
85 if (state->isMainThread()) 86 if (m_state->isMainThread())
86 TRACE_EVENT_SET_NONCONST_SAMPLING_STATE(samplingState); 87 TRACE_EVENT_SET_NONCONST_SAMPLING_STATE(samplingState);
87 return m_shouldResumeThreads; 88 return m_shouldResumeThreads;
88 } 89 }
89 90
90 ~ParkThreadsScope() 91 ~ParkThreadsScope()
91 { 92 {
92 // Only cleanup if we parked all threads in which case the GC happened 93 // Only cleanup if we parked all threads in which case the GC happened
93 // and we need to resume the other threads. 94 // and we need to resume the other threads.
94 if (m_shouldResumeThreads) 95 if (m_shouldResumeThreads)
95 ThreadState::resumeThreads(); 96 m_state->heap().resume();
96 } 97 }
97 98
98 private: 99 private:
100 ThreadState* m_state;
99 bool m_shouldResumeThreads; 101 bool m_shouldResumeThreads;
100 }; 102 };
101 103
102 void Heap::flushHeapDoesNotContainCache() 104 void ThreadHeap::flushHeapDoesNotContainCache()
103 { 105 {
104 s_heapDoesNotContainCache->flush(); 106 m_heapDoesNotContainCache->flush();
105 } 107 }
106 108
107 void Heap::init() 109 void ProcessHeap::init()
108 { 110 {
109 ThreadState::init(); 111 ThreadState::init();
110 s_markingStack = new CallbackStack(); 112 s_shutdownComplete = false;
111 s_postMarkingCallbackStack = new CallbackStack(); 113 s_isLowEndDevice = false;
112 s_globalWeakCallbackStack = new CallbackStack(); 114 s_totalAllocatedSpace = 0;
113 // Use smallest supported block size for ephemerons. 115 s_totalAllocatedObjectSize = 0;
114 s_ephemeronStack = new CallbackStack(CallbackStack::kMinimalBlockSize); 116 s_totalMarkedObjectSize = 0;
115 s_heapDoesNotContainCache = new HeapDoesNotContainCache();
116 s_freePagePool = new FreePagePool();
117 s_orphanedPagePool = new OrphanedPagePool();
118 s_allocatedSpace = 0;
119 s_allocatedObjectSize = 0;
120 s_objectSizeAtLastGC = 0;
121 s_markedObjectSize = 0;
122 s_markedObjectSizeAtLastCompleteSweep = 0;
123 s_wrapperCount = 0;
124 s_wrapperCountAtLastGC = 0;
125 s_collectedWrapperCount = 0;
126 s_partitionAllocSizeAtLastGC = WTF::Partitions::totalSizeOfCommittedPages();
127 s_estimatedMarkingTimePerByte = 0.0;
128 s_isLowEndDevice = base::SysInfo::IsLowEndDevice(); 117 s_isLowEndDevice = base::SysInfo::IsLowEndDevice();
129 s_lastGCReason = BlinkGC::NumberOfGCReason;
130 #if ENABLE(ASSERT)
131 s_gcGeneration = 1;
132 #endif
133 118
134 GCInfoTable::init(); 119 GCInfoTable::init();
135 120
136 if (Platform::current() && Platform::current()->currentThread()) 121 if (Platform::current() && Platform::current()->currentThread())
137 Platform::current()->registerMemoryDumpProvider(BlinkGCMemoryDumpProvide r::instance(), "BlinkGC"); 122 Platform::current()->registerMemoryDumpProvider(BlinkGCMemoryDumpProvide r::instance(), "BlinkGC");
138 } 123 }
139 124
140 void Heap::shutdown() 125 void ProcessHeap::shutdown()
141 { 126 {
142 ASSERT(s_markingStack); 127 ASSERT(!s_shutdownComplete);
143 128
144 if (Platform::current() && Platform::current()->currentThread()) 129 if (Platform::current() && Platform::current()->currentThread())
145 Platform::current()->unregisterMemoryDumpProvider(BlinkGCMemoryDumpProvi der::instance()); 130 Platform::current()->unregisterMemoryDumpProvider(BlinkGCMemoryDumpProvi der::instance());
146 131
147 // The main thread must be the last thread that gets detached. 132 {
148 RELEASE_ASSERT(ThreadState::attachedThreads().size() == 0); 133 // The main thread must be the last thread that gets detached.
149 134 MutexLocker locker(ThreadHeap::allHeapsMutex());
150 delete s_heapDoesNotContainCache; 135 RELEASE_ASSERT(ThreadHeap::allHeaps().isEmpty());
151 s_heapDoesNotContainCache = nullptr; 136 }
152 delete s_freePagePool; 137
153 s_freePagePool = nullptr;
154 delete s_orphanedPagePool;
155 s_orphanedPagePool = nullptr;
156 delete s_globalWeakCallbackStack;
157 s_globalWeakCallbackStack = nullptr;
158 delete s_postMarkingCallbackStack;
159 s_postMarkingCallbackStack = nullptr;
160 delete s_markingStack;
161 s_markingStack = nullptr;
162 delete s_ephemeronStack;
163 s_ephemeronStack = nullptr;
164 GCInfoTable::shutdown(); 138 GCInfoTable::shutdown();
165 ThreadState::shutdown(); 139 ASSERT(ProcessHeap::totalAllocatedSpace() == 0);
166 ASSERT(Heap::allocatedSpace() == 0); 140 s_shutdownComplete = true;
167 } 141 }
168 142
169 CrossThreadPersistentRegion& Heap::crossThreadPersistentRegion() 143 CrossThreadPersistentRegion& ProcessHeap::crossThreadPersistentRegion()
170 { 144 {
171 DEFINE_THREAD_SAFE_STATIC_LOCAL(CrossThreadPersistentRegion, persistentRegio n, new CrossThreadPersistentRegion()); 145 DEFINE_THREAD_SAFE_STATIC_LOCAL(CrossThreadPersistentRegion, persistentRegio n, new CrossThreadPersistentRegion());
172 return persistentRegion; 146 return persistentRegion;
173 } 147 }
174 148
149 bool ProcessHeap::s_shutdownComplete = false;
150 bool ProcessHeap::s_isLowEndDevice = false;
151 size_t ProcessHeap::s_totalAllocatedSpace = 0;
152 size_t ProcessHeap::s_totalAllocatedObjectSize = 0;
153 size_t ProcessHeap::s_totalMarkedObjectSize = 0;
154
155 ThreadHeapStats::ThreadHeapStats()
156 : m_allocatedSpace(0)
157 , m_allocatedObjectSize(0)
158 , m_objectSizeAtLastGC(0)
159 , m_markedObjectSize(0)
160 , m_markedObjectSizeAtLastCompleteSweep(0)
161 , m_wrapperCount(0)
162 , m_wrapperCountAtLastGC(0)
163 , m_collectedWrapperCount(0)
164 , m_partitionAllocSizeAtLastGC(WTF::Partitions::totalSizeOfCommittedPages())
165 , m_estimatedMarkingTimePerByte(0.0)
166 {
167 }
168
169 double ThreadHeapStats::estimatedMarkingTime()
170 {
171 // Use 8 ms as initial estimated marking time.
172 // 8 ms is long enough for low-end mobile devices to mark common
173 // real-world object graphs.
174 if (m_estimatedMarkingTimePerByte == 0)
175 return 0.008;
176
177 // Assuming that the collection rate of this GC will be mostly equal to
178 // the collection rate of the last GC, estimate the marking time of this GC.
179 return m_estimatedMarkingTimePerByte * (allocatedObjectSize() + markedObject Size());
180 }
181
182 void ThreadHeapStats::reset()
183 {
184 m_objectSizeAtLastGC = m_allocatedObjectSize + m_markedObjectSize;
185 m_partitionAllocSizeAtLastGC = WTF::Partitions::totalSizeOfCommittedPages();
186 m_allocatedObjectSize = 0;
187 m_markedObjectSize = 0;
188 m_wrapperCountAtLastGC = m_wrapperCount;
189 m_collectedWrapperCount = 0;
190 }
191
192 void ThreadHeapStats::increaseAllocatedObjectSize(size_t delta)
193 {
194 atomicAdd(&m_allocatedObjectSize, static_cast<long>(delta));
195 ProcessHeap::increaseTotalAllocatedObjectSize(delta);
196 }
197
198 void ThreadHeapStats::decreaseAllocatedObjectSize(size_t delta)
199 {
200 atomicSubtract(&m_allocatedObjectSize, static_cast<long>(delta));
201 ProcessHeap::decreaseTotalAllocatedObjectSize(delta);
202 }
203
204 void ThreadHeapStats::increaseMarkedObjectSize(size_t delta)
205 {
206 atomicAdd(&m_markedObjectSize, static_cast<long>(delta));
207 ProcessHeap::increaseTotalMarkedObjectSize(delta);
208 }
209
210 void ThreadHeapStats::increaseAllocatedSpace(size_t delta)
211 {
212 atomicAdd(&m_allocatedSpace, static_cast<long>(delta));
213 ProcessHeap::increaseTotalAllocatedSpace(delta);
214 }
215
216 void ThreadHeapStats::decreaseAllocatedSpace(size_t delta)
217 {
218 atomicSubtract(&m_allocatedSpace, static_cast<long>(delta));
219 ProcessHeap::decreaseTotalAllocatedSpace(delta);
220 }
221
222 ThreadHeap::ThreadHeap()
223 : m_regionTree(adoptPtr(new RegionTree()))
224 , m_heapDoesNotContainCache(adoptPtr(new HeapDoesNotContainCache))
225 , m_safePointBarrier(adoptPtr(new SafePointBarrier()))
226 , m_freePagePool(adoptPtr(new FreePagePool))
227 , m_orphanedPagePool(adoptPtr(new OrphanedPagePool))
228 , m_markingStack(adoptPtr(new CallbackStack()))
229 , m_postMarkingCallbackStack(adoptPtr(new CallbackStack()))
230 , m_globalWeakCallbackStack(adoptPtr(new CallbackStack()))
231 , m_ephemeronStack(adoptPtr(new CallbackStack(CallbackStack::kMinimalBlockSi ze)))
232 {
233 MutexLocker locker(ThreadHeap::allHeapsMutex());
234 allHeaps().add(this);
235 }
236
237 ThreadHeap::~ThreadHeap()
238 {
239 MutexLocker locker(ThreadHeap::allHeapsMutex());
240 allHeaps().remove(this);
241 }
242
243 RecursiveMutex& ThreadHeap::allHeapsMutex()
244 {
245 DEFINE_THREAD_SAFE_STATIC_LOCAL(RecursiveMutex, mutex, (new RecursiveMutex)) ;
246 return mutex;
247 }
248
249 HashSet<ThreadHeap*>& ThreadHeap::allHeaps()
250 {
251 DEFINE_STATIC_LOCAL(HashSet<ThreadHeap*>, heaps, ());
252 return heaps;
253 }
254
255 void ThreadHeap::attach(ThreadState* thread)
256 {
257 MutexLocker locker(m_threadAttachMutex);
258 m_threads.add(thread);
259 }
260
261 void ThreadHeap::detach(ThreadState* thread)
262 {
263 ASSERT(ThreadState::current() == thread);
264 {
265 // Grab the threadAttachMutex to ensure only one thread can shutdown at
266 // a time and that no other thread can do a global GC. It also allows
267 // safe iteration of the m_threads set which happens as part of
268 // thread local GC asserts. We enter a safepoint while waiting for the
269 // lock to avoid a dead-lock where another thread has already requested
270 // GC.
271 SafePointAwareMutexLocker locker(m_threadAttachMutex, BlinkGC::NoHeapPoi ntersOnStack);
272 thread->runThreadTerminationGC();
273 ASSERT(m_threads.contains(thread));
274 m_threads.remove(thread);
275 }
276 ASSERT(!thread->isMainThread() || m_threads.isEmpty());
277 if (m_threads.isEmpty()) {
278 ASSERT(heapStats().allocatedSpace() == 0);
279 delete this;
280 }
281 }
282
283 bool ThreadHeap::park()
284 {
285 return m_safePointBarrier->parkOthers();
286 }
287
288 void ThreadHeap::resume()
289 {
290 m_safePointBarrier->resumeOthers();
291 }
292
175 #if ENABLE(ASSERT) 293 #if ENABLE(ASSERT)
176 BasePage* Heap::findPageFromAddress(Address address) 294 BasePage* ThreadHeap::findPageFromAddress(Address address)
177 { 295 {
178 MutexLocker lock(ThreadState::threadAttachMutex()); 296 MutexLocker locker(m_threadAttachMutex); // MEMO: Added
179 for (ThreadState* state : ThreadState::attachedThreads()) { 297 for (ThreadState* state : m_threads) {
180 if (BasePage* page = state->findPageFromAddress(address)) 298 if (BasePage* page = state->findPageFromAddress(address))
181 return page; 299 return page;
182 } 300 }
183 return nullptr; 301 return nullptr;
184 } 302 }
303
304 bool ThreadHeap::isAtSafePoint()
305 {
306 MutexLocker locker(m_threadAttachMutex); // MEMO: Added
307 for (ThreadState* state : m_threads) {
308 if (!state->isAtSafePoint())
309 return false;
310 }
311 return true;
312 }
185 #endif 313 #endif
186 314
187 Address Heap::checkAndMarkPointer(Visitor* visitor, Address address) 315 Address ThreadHeap::checkAndMarkPointer(Visitor* visitor, Address address)
188 { 316 {
189 ASSERT(ThreadState::current()->isInGC()); 317 ASSERT(ThreadState::current()->isInGC());
190
191 #if !ENABLE(ASSERT) 318 #if !ENABLE(ASSERT)
192 if (s_heapDoesNotContainCache->lookup(address)) 319 if (m_heapDoesNotContainCache->lookup(address))
193 return nullptr; 320 return nullptr;
194 #endif 321 #endif
195 322
196 if (BasePage* page = lookup(address)) { 323 if (BasePage* page = lookupPageForAddress(address)) {
197 ASSERT(page->contains(address)); 324 ASSERT(page->contains(address));
198 ASSERT(!page->orphaned()); 325 ASSERT(!page->orphaned());
199 ASSERT(!s_heapDoesNotContainCache->lookup(address)); 326 ASSERT(!m_heapDoesNotContainCache->lookup(address));
200 page->checkAndMarkPointer(visitor, address); 327 page->checkAndMarkPointer(visitor, address);
201 return address; 328 return address;
202 } 329 }
203 330
204 #if !ENABLE(ASSERT) 331 #if !ENABLE(ASSERT)
205 s_heapDoesNotContainCache->addEntry(address); 332 m_heapDoesNotContainCache->addEntry(address);
206 #else 333 #else
207 if (!s_heapDoesNotContainCache->lookup(address)) 334 if (!m_heapDoesNotContainCache->lookup(address))
208 s_heapDoesNotContainCache->addEntry(address); 335 m_heapDoesNotContainCache->addEntry(address);
209 #endif 336 #endif
210 return nullptr; 337 return nullptr;
211 } 338 }
212 339
213 void Heap::pushTraceCallback(void* object, TraceCallback callback) 340 void ThreadHeap::pushTraceCallback(void* object, TraceCallback callback)
214 { 341 {
215 ASSERT(ThreadState::current()->isInGC()); 342 ASSERT(ThreadState::current()->isInGC());
216 343
217 // Trace should never reach an orphaned page. 344 // Trace should never reach an orphaned page.
218 ASSERT(!Heap::getOrphanedPagePool()->contains(object)); 345 ASSERT(!getOrphanedPagePool()->contains(object));
219 CallbackStack::Item* slot = s_markingStack->allocateEntry(); 346 CallbackStack::Item* slot = m_markingStack->allocateEntry();
220 *slot = CallbackStack::Item(object, callback); 347 *slot = CallbackStack::Item(object, callback);
221 } 348 }
222 349
223 bool Heap::popAndInvokeTraceCallback(Visitor* visitor) 350 bool ThreadHeap::popAndInvokeTraceCallback(Visitor* visitor)
224 { 351 {
225 CallbackStack::Item* item = s_markingStack->pop(); 352 CallbackStack::Item* item = m_markingStack->pop();
226 if (!item) 353 if (!item)
227 return false; 354 return false;
228 item->call(visitor); 355 item->call(visitor);
229 return true; 356 return true;
230 } 357 }
231 358
232 void Heap::pushPostMarkingCallback(void* object, TraceCallback callback) 359 void ThreadHeap::pushPostMarkingCallback(void* object, TraceCallback callback)
233 { 360 {
234 ASSERT(ThreadState::current()->isInGC()); 361 ASSERT(ThreadState::current()->isInGC());
235 362
236 // Trace should never reach an orphaned page. 363 // Trace should never reach an orphaned page.
237 ASSERT(!Heap::getOrphanedPagePool()->contains(object)); 364 ASSERT(!getOrphanedPagePool()->contains(object));
238 CallbackStack::Item* slot = s_postMarkingCallbackStack->allocateEntry(); 365 CallbackStack::Item* slot = m_postMarkingCallbackStack->allocateEntry();
239 *slot = CallbackStack::Item(object, callback); 366 *slot = CallbackStack::Item(object, callback);
240 } 367 }
241 368
242 bool Heap::popAndInvokePostMarkingCallback(Visitor* visitor) 369 bool ThreadHeap::popAndInvokePostMarkingCallback(Visitor* visitor)
243 { 370 {
244 if (CallbackStack::Item* item = s_postMarkingCallbackStack->pop()) { 371 if (CallbackStack::Item* item = m_postMarkingCallbackStack->pop()) {
245 item->call(visitor); 372 item->call(visitor);
246 return true; 373 return true;
247 } 374 }
248 return false; 375 return false;
249 } 376 }
250 377
251 void Heap::pushGlobalWeakCallback(void** cell, WeakCallback callback) 378 void ThreadHeap::pushGlobalWeakCallback(void** cell, WeakCallback callback)
252 { 379 {
253 ASSERT(ThreadState::current()->isInGC()); 380 ASSERT(ThreadState::current()->isInGC());
254 381
255 // Trace should never reach an orphaned page. 382 // Trace should never reach an orphaned page.
256 ASSERT(!Heap::getOrphanedPagePool()->contains(cell)); 383 ASSERT(!getOrphanedPagePool()->contains(cell));
257 CallbackStack::Item* slot = s_globalWeakCallbackStack->allocateEntry(); 384 CallbackStack::Item* slot = m_globalWeakCallbackStack->allocateEntry();
258 *slot = CallbackStack::Item(cell, callback); 385 *slot = CallbackStack::Item(cell, callback);
259 } 386 }
260 387
261 void Heap::pushThreadLocalWeakCallback(void* closure, void* object, WeakCallback callback) 388 void ThreadHeap::pushThreadLocalWeakCallback(void* closure, void* object, WeakCa llback callback)
262 { 389 {
263 ASSERT(ThreadState::current()->isInGC()); 390 ASSERT(ThreadState::current()->isInGC());
264 391
265 // Trace should never reach an orphaned page. 392 // Trace should never reach an orphaned page.
266 ASSERT(!Heap::getOrphanedPagePool()->contains(object)); 393 ASSERT(!getOrphanedPagePool()->contains(object));
267 ThreadState* state = pageFromObject(object)->arena()->getThreadState(); 394 ThreadState* state = pageFromObject(object)->arena()->getThreadState();
268 state->pushThreadLocalWeakCallback(closure, callback); 395 state->pushThreadLocalWeakCallback(closure, callback);
269 } 396 }
270 397
271 bool Heap::popAndInvokeGlobalWeakCallback(Visitor* visitor) 398 bool ThreadHeap::popAndInvokeGlobalWeakCallback(Visitor* visitor)
272 { 399 {
273 if (CallbackStack::Item* item = s_globalWeakCallbackStack->pop()) { 400 if (CallbackStack::Item* item = m_globalWeakCallbackStack->pop()) {
274 item->call(visitor); 401 item->call(visitor);
275 return true; 402 return true;
276 } 403 }
277 return false; 404 return false;
278 } 405 }
279 406
280 void Heap::registerWeakTable(void* table, EphemeronCallback iterationCallback, E phemeronCallback iterationDoneCallback) 407 void ThreadHeap::registerWeakTable(void* table, EphemeronCallback iterationCallb ack, EphemeronCallback iterationDoneCallback)
281 { 408 {
282 ASSERT(ThreadState::current()->isInGC()); 409 ASSERT(ThreadState::current()->isInGC());
283 410
284 // Trace should never reach an orphaned page. 411 // Trace should never reach an orphaned page.
285 ASSERT(!Heap::getOrphanedPagePool()->contains(table)); 412 ASSERT(!getOrphanedPagePool()->contains(table));
286 CallbackStack::Item* slot = s_ephemeronStack->allocateEntry(); 413 CallbackStack::Item* slot = m_ephemeronStack->allocateEntry();
287 *slot = CallbackStack::Item(table, iterationCallback); 414 *slot = CallbackStack::Item(table, iterationCallback);
288 415
289 // Register a post-marking callback to tell the tables that 416 // Register a post-marking callback to tell the tables that
290 // ephemeron iteration is complete. 417 // ephemeron iteration is complete.
291 pushPostMarkingCallback(table, iterationDoneCallback); 418 pushPostMarkingCallback(table, iterationDoneCallback);
292 } 419 }
293 420
294 #if ENABLE(ASSERT) 421 #if ENABLE(ASSERT)
295 bool Heap::weakTableRegistered(const void* table) 422 bool ThreadHeap::weakTableRegistered(const void* table)
296 { 423 {
297 ASSERT(s_ephemeronStack); 424 ASSERT(m_ephemeronStack);
298 return s_ephemeronStack->hasCallbackForObject(table); 425 return m_ephemeronStack->hasCallbackForObject(table);
299 } 426 }
300 #endif 427 #endif
301 428
302 void Heap::decommitCallbackStacks() 429 void ThreadHeap::decommitCallbackStacks()
303 { 430 {
304 s_markingStack->decommit(); 431 m_markingStack->decommit();
305 s_postMarkingCallbackStack->decommit(); 432 m_postMarkingCallbackStack->decommit();
306 s_globalWeakCallbackStack->decommit(); 433 m_globalWeakCallbackStack->decommit();
307 s_ephemeronStack->decommit(); 434 m_ephemeronStack->decommit();
308 } 435 }
309 436
310 void Heap::preGC() 437 void ThreadHeap::preGC()
311 { 438 {
312 ASSERT(!ThreadState::current()->isInGC()); 439 ASSERT(!ThreadState::current()->isInGC());
313 for (ThreadState* state : ThreadState::attachedThreads()) 440 for (ThreadState* state : m_threads) {
314 state->preGC(); 441 state->preGC();
442 }
315 } 443 }
316 444
317 void Heap::postGC(BlinkGC::GCType gcType) 445 void ThreadHeap::postGC(BlinkGC::GCType gcType)
318 { 446 {
319 ASSERT(ThreadState::current()->isInGC()); 447 ASSERT(ThreadState::current()->isInGC());
320 for (ThreadState* state : ThreadState::attachedThreads()) 448 for (ThreadState* state : m_threads) {
321 state->postGC(gcType); 449 state->postGC(gcType);
450 }
322 } 451 }
323 452
324 const char* Heap::gcReasonString(BlinkGC::GCReason reason) 453 const char* ThreadHeap::gcReasonString(BlinkGC::GCReason reason)
325 { 454 {
326 switch (reason) { 455 switch (reason) {
327 case BlinkGC::IdleGC: 456 case BlinkGC::IdleGC:
328 return "IdleGC"; 457 return "IdleGC";
329 case BlinkGC::PreciseGC: 458 case BlinkGC::PreciseGC:
330 return "PreciseGC"; 459 return "PreciseGC";
331 case BlinkGC::ConservativeGC: 460 case BlinkGC::ConservativeGC:
332 return "ConservativeGC"; 461 return "ConservativeGC";
333 case BlinkGC::ForcedGC: 462 case BlinkGC::ForcedGC:
334 return "ForcedGC"; 463 return "ForcedGC";
335 case BlinkGC::MemoryPressureGC: 464 case BlinkGC::MemoryPressureGC:
336 return "MemoryPressureGC"; 465 return "MemoryPressureGC";
337 case BlinkGC::PageNavigationGC: 466 case BlinkGC::PageNavigationGC:
338 return "PageNavigationGC"; 467 return "PageNavigationGC";
339 default: 468 default:
340 ASSERT_NOT_REACHED(); 469 ASSERT_NOT_REACHED();
341 } 470 }
342 return "<Unknown>"; 471 return "<Unknown>";
343 } 472 }
344 473
345 void Heap::collectGarbage(BlinkGC::StackState stackState, BlinkGC::GCType gcType , BlinkGC::GCReason reason) 474 void ThreadHeap::collectGarbage(BlinkGC::StackState stackState, BlinkGC::GCType gcType, BlinkGC::GCReason reason)
346 { 475 {
347 ASSERT(gcType != BlinkGC::ThreadTerminationGC); 476 ASSERT(gcType != BlinkGC::ThreadTerminationGC);
348 477
349 ThreadState* state = ThreadState::current(); 478 ThreadState* state = ThreadState::current();
350 // Nested collectGarbage() invocations aren't supported. 479 // Nested collectGarbage() invocations aren't supported.
351 RELEASE_ASSERT(!state->isGCForbidden()); 480 RELEASE_ASSERT(!state->isGCForbidden());
352 state->completeSweep(); 481 state->completeSweep();
353 482
354 OwnPtr<Visitor> visitor = Visitor::create(state, gcType); 483 OwnPtr<Visitor> visitor = Visitor::create(state, gcType);
355 484
356 SafePointScope safePointScope(stackState, state); 485 SafePointScope safePointScope(stackState, state);
357 486
358 // Resume all parked threads upon leaving this scope. 487 // Resume all parked threads upon leaving this scope.
359 ParkThreadsScope parkThreadsScope; 488 ParkThreadsScope parkThreadsScope(state);
360 489
361 // Try to park the other threads. If we're unable to, bail out of the GC. 490 // Try to park the other threads. If we're unable to, bail out of the GC.
362 if (!parkThreadsScope.parkThreads(state)) 491 if (!parkThreadsScope.parkThreads())
363 return; 492 return;
364 493
365 ScriptForbiddenIfMainThreadScope scriptForbidden; 494 ScriptForbiddenIfMainThreadScope scriptForbidden;
366 495
367 TRACE_EVENT2("blink_gc,devtools.timeline", "Heap::collectGarbage", 496 TRACE_EVENT2("blink_gc,devtools.timeline", "ThreadHeap::collectGarbage",
368 "lazySweeping", gcType == BlinkGC::GCWithoutSweep, 497 "lazySweeping", gcType == BlinkGC::GCWithoutSweep,
369 "gcReason", gcReasonString(reason)); 498 "gcReason", gcReasonString(reason));
370 TRACE_EVENT_SCOPED_SAMPLING_STATE("blink_gc", "BlinkGC"); 499 TRACE_EVENT_SCOPED_SAMPLING_STATE("blink_gc", "BlinkGC");
371 double startTime = WTF::currentTimeMS(); 500 double startTime = WTF::currentTimeMS();
372 501
373 if (gcType == BlinkGC::TakeSnapshot) 502 if (gcType == BlinkGC::TakeSnapshot)
374 BlinkGCMemoryDumpProvider::instance()->clearProcessDumpForCurrentGC(); 503 BlinkGCMemoryDumpProvider::instance()->clearProcessDumpForCurrentGC();
375 504
376 // Disallow allocation during garbage collection (but not during the 505 // Disallow allocation during garbage collection (but not during the
377 // finalization that happens when the visitorScope is torn down). 506 // finalization that happens when the visitorScope is torn down).
378 ThreadState::NoAllocationScope noAllocationScope(state); 507 ThreadState::NoAllocationScope noAllocationScope(state);
379 508
380 preGC(); 509 state->heap().preGC();
381 510
382 StackFrameDepthScope stackDepthScope; 511 StackFrameDepthScope stackDepthScope;
383 512
384 size_t totalObjectSize = Heap::allocatedObjectSize() + Heap::markedObjectSiz e(); 513 size_t totalObjectSize = state->heap().heapStats().allocatedObjectSize() + s tate->heap().heapStats().markedObjectSize();
385 if (gcType != BlinkGC::TakeSnapshot) 514 if (gcType != BlinkGC::TakeSnapshot)
386 Heap::resetHeapCounters(); 515 state->heap().resetHeapCounters();
387 516
388 // 1. Trace persistent roots. 517 // 1. Trace persistent roots.
389 ThreadState::visitPersistentRoots(visitor.get()); 518 state->heap().visitPersistentRoots(visitor.get());
390 519
391 // 2. Trace objects reachable from the stack. We do this independent of the 520 // 2. Trace objects reachable from the stack. We do this independent of the
392 // given stackState since other threads might have a different stack state. 521 // given stackState since other threads might have a different stack state.
393 ThreadState::visitStackRoots(visitor.get()); 522 state->heap().visitStackRoots(visitor.get());
394 523
395 // 3. Transitive closure to trace objects including ephemerons. 524 // 3. Transitive closure to trace objects including ephemerons.
396 processMarkingStack(visitor.get()); 525 state->heap().processMarkingStack(visitor.get());
397 526
398 postMarkingProcessing(visitor.get()); 527 state->heap().postMarkingProcessing(visitor.get());
399 globalWeakProcessing(visitor.get()); 528 state->heap().globalWeakProcessing(visitor.get());
400 529
401 // Now we can delete all orphaned pages because there are no dangling 530 // Now we can delete all orphaned pages because there are no dangling
402 // pointers to the orphaned pages. (If we have such dangling pointers, 531 // pointers to the orphaned pages. (If we have such dangling pointers,
403 // we should have crashed during marking before getting here.) 532 // we should have crashed during marking before getting here.)
404 getOrphanedPagePool()->decommitOrphanedPages(); 533 state->heap().getOrphanedPagePool()->decommitOrphanedPages();
405 534
406 double markingTimeInMilliseconds = WTF::currentTimeMS() - startTime; 535 double markingTimeInMilliseconds = WTF::currentTimeMS() - startTime;
407 s_estimatedMarkingTimePerByte = totalObjectSize ? (markingTimeInMilliseconds / 1000 / totalObjectSize) : 0; 536 state->heap().heapStats().setEstimatedMarkingTimePerByte(totalObjectSize ? ( markingTimeInMilliseconds / 1000 / totalObjectSize) : 0);
408 537
409 #if PRINT_HEAP_STATS 538 #if PRINT_HEAP_STATS
410 dataLogF("Heap::collectGarbage (gcReason=%s, lazySweeping=%d, time=%.1lfms)\ n", gcReasonString(reason), gcType == BlinkGC::GCWithoutSweep, markingTimeInMill iseconds); 539 dataLogF("ThreadHeap::collectGarbage (gcReason=%s, lazySweeping=%d, time=%.1 lfms)\n", gcReasonString(reason), gcType == BlinkGC::GCWithoutSweep, markingTime InMilliseconds);
411 #endif 540 #endif
412 541
413 DEFINE_THREAD_SAFE_STATIC_LOCAL(CustomCountHistogram, markingTimeHistogram, new CustomCountHistogram("BlinkGC.CollectGarbage", 0, 10 * 1000, 50)); 542 DEFINE_THREAD_SAFE_STATIC_LOCAL(CustomCountHistogram, markingTimeHistogram, new CustomCountHistogram("BlinkGC.CollectGarbage", 0, 10 * 1000, 50));
414 markingTimeHistogram.count(markingTimeInMilliseconds); 543 markingTimeHistogram.count(markingTimeInMilliseconds);
415 DEFINE_THREAD_SAFE_STATIC_LOCAL(CustomCountHistogram, totalObjectSpaceHistog ram, new CustomCountHistogram("BlinkGC.TotalObjectSpace", 0, 4 * 1024 * 1024, 50 )); 544 DEFINE_THREAD_SAFE_STATIC_LOCAL(CustomCountHistogram, totalObjectSpaceHistog ram, new CustomCountHistogram("BlinkGC.TotalObjectSpace", 0, 4 * 1024 * 1024, 50 ));
416 totalObjectSpaceHistogram.count(Heap::allocatedObjectSize() / 1024); 545 totalObjectSpaceHistogram.count(ProcessHeap::totalAllocatedObjectSize() / 10 24);
417 DEFINE_THREAD_SAFE_STATIC_LOCAL(CustomCountHistogram, totalAllocatedSpaceHis togram, new CustomCountHistogram("BlinkGC.TotalAllocatedSpace", 0, 4 * 1024 * 10 24, 50)); 546 DEFINE_THREAD_SAFE_STATIC_LOCAL(CustomCountHistogram, totalAllocatedSpaceHis togram, new CustomCountHistogram("BlinkGC.TotalAllocatedSpace", 0, 4 * 1024 * 10 24, 50));
418 totalAllocatedSpaceHistogram.count(Heap::allocatedSpace() / 1024); 547 totalAllocatedSpaceHistogram.count(ProcessHeap::totalAllocatedSpace() / 1024 );
419 DEFINE_THREAD_SAFE_STATIC_LOCAL(EnumerationHistogram, gcReasonHistogram, new EnumerationHistogram("BlinkGC.GCReason", BlinkGC::NumberOfGCReason)); 548 DEFINE_THREAD_SAFE_STATIC_LOCAL(EnumerationHistogram, gcReasonHistogram, new EnumerationHistogram("BlinkGC.GCReason", BlinkGC::NumberOfGCReason));
420 gcReasonHistogram.count(reason); 549 gcReasonHistogram.count(reason);
421 550
422 s_lastGCReason = reason; 551 state->heap().m_lastGCReason = reason;
423 552
424 Heap::reportMemoryUsageHistogram(); 553 ThreadHeap::reportMemoryUsageHistogram();
425 WTF::Partitions::reportMemoryUsageHistogram(); 554 WTF::Partitions::reportMemoryUsageHistogram();
426 555
427 postGC(gcType); 556 state->heap().postGC(gcType);
428 Heap::decommitCallbackStacks(); 557 state->heap().decommitCallbackStacks();
429
430 #if ENABLE(ASSERT)
431 // 0 is used to figure non-assigned area, so avoid to use 0 in s_gcGeneratio n.
432 if (++s_gcGeneration == 0) {
433 s_gcGeneration = 1;
434 }
435 #endif
436 } 558 }
437 559
438 void Heap::collectGarbageForTerminatingThread(ThreadState* state) 560 void ThreadHeap::collectGarbageForTerminatingThread(ThreadState* state)
439 { 561 {
440 { 562 {
441 // A thread-specific termination GC must not allow other global GCs to g o 563 // A thread-specific termination GC must not allow other global GCs to g o
442 // ahead while it is running, hence the termination GC does not enter a 564 // ahead while it is running, hence the termination GC does not enter a
443 // safepoint. VisitorScope will not enter also a safepoint scope for 565 // safepoint. VisitorScope will not enter also a safepoint scope for
444 // ThreadTerminationGC. 566 // ThreadTerminationGC.
445 OwnPtr<Visitor> visitor = Visitor::create(state, BlinkGC::ThreadTerminat ionGC); 567 OwnPtr<Visitor> visitor = Visitor::create(state, BlinkGC::ThreadTerminat ionGC);
446 568
447 ThreadState::NoAllocationScope noAllocationScope(state); 569 ThreadState::NoAllocationScope noAllocationScope(state);
448 570
449 state->preGC(); 571 state->preGC();
450 572
451 // 1. Trace the thread local persistent roots. For thread local GCs we 573 // 1. Trace the thread local persistent roots. For thread local GCs we
452 // don't trace the stack (ie. no conservative scanning) since this is 574 // don't trace the stack (ie. no conservative scanning) since this is
453 // only called during thread shutdown where there should be no objects 575 // only called during thread shutdown where there should be no objects
454 // on the stack. 576 // on the stack.
455 // We also assume that orphaned pages have no objects reachable from 577 // We also assume that orphaned pages have no objects reachable from
456 // persistent handles on other threads or CrossThreadPersistents. The 578 // persistent handles on other threads or CrossThreadPersistents. The
457 // only cases where this could happen is if a subsequent conservative 579 // only cases where this could happen is if a subsequent conservative
458 // global GC finds a "pointer" on the stack or due to a programming 580 // global GC finds a "pointer" on the stack or due to a programming
459 // error where an object has a dangling cross-thread pointer to an 581 // error where an object has a dangling cross-thread pointer to an
460 // object on this heap. 582 // object on this heap.
461 state->visitPersistents(visitor.get()); 583 state->visitPersistents(visitor.get());
462 584
463 // 2. Trace objects reachable from the thread's persistent roots 585 // 2. Trace objects reachable from the thread's persistent roots
464 // including ephemerons. 586 // including ephemerons.
465 processMarkingStack(visitor.get()); 587 state->heap().processMarkingStack(visitor.get());
466 588
467 postMarkingProcessing(visitor.get()); 589 state->heap().postMarkingProcessing(visitor.get());
468 globalWeakProcessing(visitor.get()); 590 state->heap().globalWeakProcessing(visitor.get());
469 591
470 state->postGC(BlinkGC::GCWithSweep); 592 state->postGC(BlinkGC::GCWithSweep);
471 Heap::decommitCallbackStacks(); 593 state->heap().decommitCallbackStacks();
472 } 594 }
473 state->preSweep(); 595 state->preSweep();
474 } 596 }
475 597
476 void Heap::processMarkingStack(Visitor* visitor) 598 void ThreadHeap::processMarkingStack(Visitor* visitor)
477 { 599 {
478 // Ephemeron fixed point loop. 600 // Ephemeron fixed point loop.
479 do { 601 do {
480 { 602 {
481 // Iteratively mark all objects that are reachable from the objects 603 // Iteratively mark all objects that are reachable from the objects
482 // currently pushed onto the marking stack. 604 // currently pushed onto the marking stack.
483 TRACE_EVENT0("blink_gc", "Heap::processMarkingStackSingleThreaded"); 605 TRACE_EVENT0("blink_gc", "ThreadHeap::processMarkingStackSingleThrea ded");
484 while (popAndInvokeTraceCallback(visitor)) { } 606 while (popAndInvokeTraceCallback(visitor)) { }
485 } 607 }
486 608
487 { 609 {
488 // Mark any strong pointers that have now become reachable in 610 // Mark any strong pointers that have now become reachable in
489 // ephemeron maps. 611 // ephemeron maps.
490 TRACE_EVENT0("blink_gc", "Heap::processEphemeronStack"); 612 TRACE_EVENT0("blink_gc", "ThreadHeap::processEphemeronStack");
491 s_ephemeronStack->invokeEphemeronCallbacks(visitor); 613 m_ephemeronStack->invokeEphemeronCallbacks(visitor);
492 } 614 }
493 615
494 // Rerun loop if ephemeron processing queued more objects for tracing. 616 // Rerun loop if ephemeron processing queued more objects for tracing.
495 } while (!s_markingStack->isEmpty()); 617 } while (!m_markingStack->isEmpty());
496 } 618 }
497 619
498 void Heap::postMarkingProcessing(Visitor* visitor) 620 void ThreadHeap::postMarkingProcessing(Visitor* visitor)
499 { 621 {
500 TRACE_EVENT0("blink_gc", "Heap::postMarkingProcessing"); 622 TRACE_EVENT0("blink_gc", "ThreadHeap::postMarkingProcessing");
501 // Call post-marking callbacks including: 623 // Call post-marking callbacks including:
502 // 1. the ephemeronIterationDone callbacks on weak tables to do cleanup 624 // 1. the ephemeronIterationDone callbacks on weak tables to do cleanup
503 // (specifically to clear the queued bits for weak hash tables), and 625 // (specifically to clear the queued bits for weak hash tables), and
504 // 2. the markNoTracing callbacks on collection backings to mark them 626 // 2. the markNoTracing callbacks on collection backings to mark them
505 // if they are only reachable from their front objects. 627 // if they are only reachable from their front objects.
506 while (popAndInvokePostMarkingCallback(visitor)) { } 628 while (popAndInvokePostMarkingCallback(visitor)) { }
507 629
508 // Post-marking callbacks should not trace any objects and 630 // Post-marking callbacks should not trace any objects and
509 // therefore the marking stack should be empty after the 631 // therefore the marking stack should be empty after the
510 // post-marking callbacks. 632 // post-marking callbacks.
511 ASSERT(s_markingStack->isEmpty()); 633 ASSERT(m_markingStack->isEmpty());
512 } 634 }
513 635
514 void Heap::globalWeakProcessing(Visitor* visitor) 636 void ThreadHeap::globalWeakProcessing(Visitor* visitor)
515 { 637 {
516 TRACE_EVENT0("blink_gc", "Heap::globalWeakProcessing"); 638 TRACE_EVENT0("blink_gc", "ThreadHeap::globalWeakProcessing");
517 double startTime = WTF::currentTimeMS(); 639 double startTime = WTF::currentTimeMS();
518 640
519 // Call weak callbacks on objects that may now be pointing to dead objects. 641 // Call weak callbacks on objects that may now be pointing to dead objects.
520 while (popAndInvokeGlobalWeakCallback(visitor)) { } 642 while (popAndInvokeGlobalWeakCallback(visitor)) { }
521 643
522 // It is not permitted to trace pointers of live objects in the weak 644 // It is not permitted to trace pointers of live objects in the weak
523 // callback phase, so the marking stack should still be empty here. 645 // callback phase, so the marking stack should still be empty here.
524 ASSERT(s_markingStack->isEmpty()); 646 ASSERT(m_markingStack->isEmpty());
525 647
526 double timeForGlobalWeakProcessing = WTF::currentTimeMS() - startTime; 648 double timeForGlobalWeakProcessing = WTF::currentTimeMS() - startTime;
527 DEFINE_THREAD_SAFE_STATIC_LOCAL(CustomCountHistogram, globalWeakTimeHistogra m, new CustomCountHistogram("BlinkGC.TimeForGlobalWeakProcessing", 1, 10 * 1000, 50)); 649 DEFINE_THREAD_SAFE_STATIC_LOCAL(CustomCountHistogram, globalWeakTimeHistogra m, new CustomCountHistogram("BlinkGC.TimeForGlobalWeakPrcessing", 1, 10 * 1000, 50));
528 globalWeakTimeHistogram.count(timeForGlobalWeakProcessing); 650 globalWeakTimeHistogram.count(timeForGlobalWeakProcessing);
529 } 651 }
530 652
531 void Heap::collectAllGarbage() 653 void ThreadHeap::collectAllGarbage()
532 { 654 {
533 // We need to run multiple GCs to collect a chain of persistent handles. 655 // We need to run multiple GCs to collect a chain of persistent handles.
534 size_t previousLiveObjects = 0; 656 size_t previousLiveObjects = 0;
535 for (int i = 0; i < 5; ++i) { 657 for (int i = 0; i < 5; ++i) {
536 collectGarbage(BlinkGC::NoHeapPointersOnStack, BlinkGC::GCWithSweep, Bli nkGC::ForcedGC); 658 collectGarbage(BlinkGC::NoHeapPointersOnStack, BlinkGC::GCWithSweep, Bli nkGC::ForcedGC);
537 size_t liveObjects = Heap::markedObjectSize(); 659 size_t liveObjects = ThreadState::current()->heap().heapStats().markedOb jectSize();
538 if (liveObjects == previousLiveObjects) 660 if (liveObjects == previousLiveObjects)
539 break; 661 break;
540 previousLiveObjects = liveObjects; 662 previousLiveObjects = liveObjects;
541 } 663 }
542 } 664 }
543 665
544 double Heap::estimatedMarkingTime() 666 void ThreadHeap::reportMemoryUsageHistogram()
545 {
546 ASSERT(ThreadState::current()->isMainThread());
547
548 // Use 8 ms as initial estimated marking time.
549 // 8 ms is long enough for low-end mobile devices to mark common
550 // real-world object graphs.
551 if (s_estimatedMarkingTimePerByte == 0)
552 return 0.008;
553
554 // Assuming that the collection rate of this GC will be mostly equal to
555 // the collection rate of the last GC, estimate the marking time of this GC.
556 return s_estimatedMarkingTimePerByte * (Heap::allocatedObjectSize() + Heap:: markedObjectSize());
557 }
558
559 void Heap::reportMemoryUsageHistogram()
560 { 667 {
561 static size_t supportedMaxSizeInMB = 4 * 1024; 668 static size_t supportedMaxSizeInMB = 4 * 1024;
562 static size_t observedMaxSizeInMB = 0; 669 static size_t observedMaxSizeInMB = 0;
563 670
564 // We only report the memory in the main thread. 671 // We only report the memory in the main thread.
565 if (!isMainThread()) 672 if (!isMainThread())
566 return; 673 return;
567 // +1 is for rounding up the sizeInMB. 674 // +1 is for rounding up the sizeInMB.
568 size_t sizeInMB = Heap::allocatedSpace() / 1024 / 1024 + 1; 675 size_t sizeInMB = ThreadState::current()->heap().heapStats().allocatedSpace( ) / 1024 / 1024 + 1;
569 if (sizeInMB >= supportedMaxSizeInMB) 676 if (sizeInMB >= supportedMaxSizeInMB)
570 sizeInMB = supportedMaxSizeInMB - 1; 677 sizeInMB = supportedMaxSizeInMB - 1;
571 if (sizeInMB > observedMaxSizeInMB) { 678 if (sizeInMB > observedMaxSizeInMB) {
572 // Send a UseCounter only when we see the highest memory usage 679 // Send a UseCounter only when we see the highest memory usage
573 // we've ever seen. 680 // we've ever seen.
574 DEFINE_THREAD_SAFE_STATIC_LOCAL(EnumerationHistogram, commitedSizeHistog ram, new EnumerationHistogram("BlinkGC.CommittedSize", supportedMaxSizeInMB)); 681 DEFINE_THREAD_SAFE_STATIC_LOCAL(EnumerationHistogram, commitedSizeHistog ram, new EnumerationHistogram("BlinkGC.CommittedSize", supportedMaxSizeInMB));
575 commitedSizeHistogram.count(sizeInMB); 682 commitedSizeHistogram.count(sizeInMB);
576 observedMaxSizeInMB = sizeInMB; 683 observedMaxSizeInMB = sizeInMB;
577 } 684 }
578 } 685 }
579 686
580 void Heap::reportMemoryUsageForTracing() 687 void ThreadHeap::reportMemoryUsageForTracing()
581 { 688 {
582 #if PRINT_HEAP_STATS 689 #if PRINT_HEAP_STATS
583 // dataLogF("allocatedSpace=%ldMB, allocatedObjectSize=%ldMB, markedObjectSi ze=%ldMB, partitionAllocSize=%ldMB, wrapperCount=%ld, collectedWrapperCount=%ld\ n", Heap::allocatedSpace() / 1024 / 1024, Heap::allocatedObjectSize() / 1024 / 1 024, Heap::markedObjectSize() / 1024 / 1024, WTF::Partitions::totalSizeOfCommitt edPages() / 1024 / 1024, Heap::wrapperCount(), Heap::collectedWrapperCount()); 690 // dataLogF("allocatedSpace=%ldMB, allocatedObjectSize=%ldMB, markedObjectSi ze=%ldMB, partitionAllocSize=%ldMB, wrapperCount=%ld, collectedWrapperCount=%ld\ n", ThreadHeap::allocatedSpace() / 1024 / 1024, ThreadHeap::allocatedObjectSize( ) / 1024 / 1024, ThreadHeap::markedObjectSize() / 1024 / 1024, WTF::Partitions:: totalSizeOfCommittedPages() / 1024 / 1024, ThreadHeap::wrapperCount(), ThreadHea p::collectedWrapperCount());
584 #endif 691 #endif
585 692
586 bool gcTracingEnabled; 693 bool gcTracingEnabled;
587 TRACE_EVENT_CATEGORY_GROUP_ENABLED("blink_gc", &gcTracingEnabled); 694 TRACE_EVENT_CATEGORY_GROUP_ENABLED("blink_gc", &gcTracingEnabled);
588 if (!gcTracingEnabled) 695 if (!gcTracingEnabled)
589 return; 696 return;
590 697
698 ThreadHeap& heap = ThreadState::current()->heap();
591 // These values are divided by 1024 to avoid overflow in practical cases (TR ACE_COUNTER values are 32-bit ints). 699 // These values are divided by 1024 to avoid overflow in practical cases (TR ACE_COUNTER values are 32-bit ints).
592 // They are capped to INT_MAX just in case. 700 // They are capped to INT_MAX just in case.
593 TRACE_COUNTER1(TRACE_DISABLED_BY_DEFAULT("blink_gc"), "Heap::allocatedObject SizeKB", std::min(Heap::allocatedObjectSize() / 1024, static_cast<size_t>(INT_MA X))); 701 TRACE_COUNTER1(TRACE_DISABLED_BY_DEFAULT("blink_gc"), "ThreadHeap::allocated ObjectSizeKB", std::min(heap.heapStats().allocatedObjectSize() / 1024, static_ca st<size_t>(INT_MAX)));
594 TRACE_COUNTER1(TRACE_DISABLED_BY_DEFAULT("blink_gc"), "Heap::markedObjectSiz eKB", std::min(Heap::markedObjectSize() / 1024, static_cast<size_t>(INT_MAX))); 702 TRACE_COUNTER1(TRACE_DISABLED_BY_DEFAULT("blink_gc"), "ThreadHeap::markedObj ectSizeKB", std::min(heap.heapStats().markedObjectSize() / 1024, static_cast<siz e_t>(INT_MAX)));
595 TRACE_COUNTER1(TRACE_DISABLED_BY_DEFAULT("blink_gc"), "Heap::markedObjectSiz eAtLastCompleteSweepKB", std::min(Heap::markedObjectSizeAtLastCompleteSweep() / 1024, static_cast<size_t>(INT_MAX))); 703 TRACE_COUNTER1(TRACE_DISABLED_BY_DEFAULT("blink_gc"), "ThreadHeap::markedObj ectSizeAtLastCompleteSweepKB", std::min(heap.heapStats().markedObjectSizeAtLastC ompleteSweep() / 1024, static_cast<size_t>(INT_MAX)));
596 TRACE_COUNTER1(TRACE_DISABLED_BY_DEFAULT("blink_gc"), "Heap::allocatedSpaceK B", std::min(Heap::allocatedSpace() / 1024, static_cast<size_t>(INT_MAX))); 704 TRACE_COUNTER1(TRACE_DISABLED_BY_DEFAULT("blink_gc"), "ThreadHeap::allocated SpaceKB", std::min(heap.heapStats().allocatedSpace() / 1024, static_cast<size_t> (INT_MAX)));
597 TRACE_COUNTER1(TRACE_DISABLED_BY_DEFAULT("blink_gc"), "Heap::objectSizeAtLas tGCKB", std::min(Heap::objectSizeAtLastGC() / 1024, static_cast<size_t>(INT_MAX) )); 705 TRACE_COUNTER1(TRACE_DISABLED_BY_DEFAULT("blink_gc"), "ThreadHeap::objectSiz eAtLastGCKB", std::min(heap.heapStats().objectSizeAtLastGC() / 1024, static_cast <size_t>(INT_MAX)));
598 TRACE_COUNTER1(TRACE_DISABLED_BY_DEFAULT("blink_gc"), "Heap::wrapperCount", std::min(Heap::wrapperCount(), static_cast<size_t>(INT_MAX))); 706 TRACE_COUNTER1(TRACE_DISABLED_BY_DEFAULT("blink_gc"), "ThreadHeap::wrapperCo unt", std::min(heap.heapStats().wrapperCount(), static_cast<size_t>(INT_MAX)));
599 TRACE_COUNTER1(TRACE_DISABLED_BY_DEFAULT("blink_gc"), "Heap::wrapperCountAtL astGC", std::min(Heap::wrapperCountAtLastGC(), static_cast<size_t>(INT_MAX))); 707 TRACE_COUNTER1(TRACE_DISABLED_BY_DEFAULT("blink_gc"), "ThreadHeap::wrapperCo untAtLastGC", std::min(heap.heapStats().wrapperCountAtLastGC(), static_cast<size _t>(INT_MAX)));
600 TRACE_COUNTER1(TRACE_DISABLED_BY_DEFAULT("blink_gc"), "Heap::collectedWrappe rCount", std::min(Heap::collectedWrapperCount(), static_cast<size_t>(INT_MAX))); 708 TRACE_COUNTER1(TRACE_DISABLED_BY_DEFAULT("blink_gc"), "ThreadHeap::collected WrapperCount", std::min(heap.heapStats().collectedWrapperCount(), static_cast<si ze_t>(INT_MAX)));
601 TRACE_COUNTER1(TRACE_DISABLED_BY_DEFAULT("blink_gc"), "Heap::partitionAllocS izeAtLastGCKB", std::min(Heap::partitionAllocSizeAtLastGC() / 1024, static_cast< size_t>(INT_MAX))); 709 TRACE_COUNTER1(TRACE_DISABLED_BY_DEFAULT("blink_gc"), "ThreadHeap::partition AllocSizeAtLastGCKB", std::min(heap.heapStats().partitionAllocSizeAtLastGC() / 1 024, static_cast<size_t>(INT_MAX)));
602 TRACE_COUNTER1(TRACE_DISABLED_BY_DEFAULT("blink_gc"), "Partitions::totalSize OfCommittedPagesKB", std::min(WTF::Partitions::totalSizeOfCommittedPages() / 102 4, static_cast<size_t>(INT_MAX)));
603 } 710 }
604 711
605 size_t Heap::objectPayloadSizeForTesting() 712 size_t ThreadHeap::objectPayloadSizeForTesting()
606 { 713 {
607 size_t objectPayloadSize = 0; 714 size_t objectPayloadSize = 0;
608 for (ThreadState* state : ThreadState::attachedThreads()) { 715 for (ThreadState* state : m_threads) {
609 state->setGCState(ThreadState::GCRunning); 716 state->setGCState(ThreadState::GCRunning);
610 state->makeConsistentForGC(); 717 state->makeConsistentForGC();
611 objectPayloadSize += state->objectPayloadSizeForTesting(); 718 objectPayloadSize += state->objectPayloadSizeForTesting();
612 state->setGCState(ThreadState::EagerSweepScheduled); 719 state->setGCState(ThreadState::EagerSweepScheduled);
613 state->setGCState(ThreadState::Sweeping); 720 state->setGCState(ThreadState::Sweeping);
614 state->setGCState(ThreadState::NoGCScheduled); 721 state->setGCState(ThreadState::NoGCScheduled);
615 } 722 }
616 return objectPayloadSize; 723 return objectPayloadSize;
617 } 724 }
618 725
619 RegionTree* Heap::getRegionTree() 726 void ThreadHeap::resetHeapCounters()
620 { 727 {
621 DEFINE_THREAD_SAFE_STATIC_LOCAL(RegionTree, tree, new RegionTree); 728 ASSERT(ThreadState::current()->isInGC());
622 return &tree; 729
730 ThreadHeap::reportMemoryUsageForTracing();
731
732 m_stats.reset();
733 {
734 MutexLocker locker(m_threadAttachMutex);
735 for (ThreadState* state : m_threads)
736 state->resetHeapCounters();
737 }
623 } 738 }
624 739
625 BasePage* Heap::lookup(Address address) 740 void ThreadHeap::visitPersistentRoots(Visitor* visitor)
741 {
742 TRACE_EVENT0("blink_gc", "ThreadState::visitPersistentRoots");
743 ProcessHeap::crossThreadPersistentRegion().tracePersistentNodes(visitor);
744
745 for (ThreadState* state : m_threads) {
746 state->visitPersistents(visitor);
747 }
748 }
749
750 void ThreadHeap::visitStackRoots(Visitor* visitor)
751 {
752 TRACE_EVENT0("blink_gc", "ThreadState::visitStackRoots");
753 for (ThreadState* state : m_threads) {
754 state->visitStack(visitor);
755 }
756 }
757
758 void ThreadHeap::checkAndPark(ThreadState* threadState, SafePointAwareMutexLocke r* locker)
759 {
760 m_safePointBarrier->checkAndPark(threadState, locker);
761 }
762
763 void ThreadHeap::enterSafePoint(ThreadState* threadState)
764 {
765 m_safePointBarrier->enterSafePoint(threadState);
766 }
767
768 void ThreadHeap::leaveSafePoint(ThreadState* threadState, SafePointAwareMutexLoc ker* locker)
769 {
770 m_safePointBarrier->leaveSafePoint(threadState, locker);
771 }
772
773 BasePage* ThreadHeap::lookupPageForAddress(Address address)
626 { 774 {
627 ASSERT(ThreadState::current()->isInGC()); 775 ASSERT(ThreadState::current()->isInGC());
628 if (PageMemoryRegion* region = Heap::getRegionTree()->lookup(address)) { 776 if (!m_regionTree)
777 return nullptr;
778 if (PageMemoryRegion* region = m_regionTree->lookup(address)) {
629 BasePage* page = region->pageFromAddress(address); 779 BasePage* page = region->pageFromAddress(address);
630 return page && !page->orphaned() ? page : nullptr; 780 return page && !page->orphaned() ? page : nullptr;
631 } 781 }
632 return nullptr; 782 return nullptr;
633 } 783 }
634 784
635 void Heap::resetHeapCounters()
636 {
637 ASSERT(ThreadState::current()->isInGC());
638
639 Heap::reportMemoryUsageForTracing();
640
641 s_objectSizeAtLastGC = s_allocatedObjectSize + s_markedObjectSize;
642 s_partitionAllocSizeAtLastGC = WTF::Partitions::totalSizeOfCommittedPages();
643 s_allocatedObjectSize = 0;
644 s_markedObjectSize = 0;
645 s_wrapperCountAtLastGC = s_wrapperCount;
646 s_collectedWrapperCount = 0;
647 for (ThreadState* state : ThreadState::attachedThreads())
648 state->resetHeapCounters();
649 }
650
651 CallbackStack* Heap::s_markingStack;
652 CallbackStack* Heap::s_postMarkingCallbackStack;
653 CallbackStack* Heap::s_globalWeakCallbackStack;
654 CallbackStack* Heap::s_ephemeronStack;
655 HeapDoesNotContainCache* Heap::s_heapDoesNotContainCache;
656 FreePagePool* Heap::s_freePagePool;
657 OrphanedPagePool* Heap::s_orphanedPagePool;
658 size_t Heap::s_allocatedSpace = 0;
659 size_t Heap::s_allocatedObjectSize = 0;
660 size_t Heap::s_objectSizeAtLastGC = 0;
661 size_t Heap::s_markedObjectSize = 0;
662 size_t Heap::s_markedObjectSizeAtLastCompleteSweep = 0;
663 size_t Heap::s_wrapperCount = 0;
664 size_t Heap::s_wrapperCountAtLastGC = 0;
665 size_t Heap::s_collectedWrapperCount = 0;
666 size_t Heap::s_partitionAllocSizeAtLastGC = 0;
667 double Heap::s_estimatedMarkingTimePerByte = 0.0;
668 bool Heap::s_isLowEndDevice = false;
669 BlinkGC::GCReason Heap::s_lastGCReason = BlinkGC::NumberOfGCReason;
670 #if ENABLE(ASSERT)
671 uint16_t Heap::s_gcGeneration = 0;
672 #endif
673
674 } // namespace blink 785 } // namespace blink
OLDNEW
« no previous file with comments | « third_party/WebKit/Source/platform/heap/Heap.h ('k') | third_party/WebKit/Source/platform/heap/HeapAllocator.h » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698