Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(14)

Side by Side Diff: third_party/WebKit/Source/platform/heap/ThreadState.cpp

Issue 1477023003: Refactor the Heap into ThreadHeap to prepare for per thread heaps Base URL: https://chromium.googlesource.com/chromium/src.git@master
Patch Set: Created 4 years, 9 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
OLDNEW
1 /* 1 /*
2 * Copyright (C) 2013 Google Inc. All rights reserved. 2 * Copyright (C) 2013 Google Inc. All rights reserved.
3 * 3 *
4 * Redistribution and use in source and binary forms, with or without 4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions are 5 * modification, are permitted provided that the following conditions are
6 * met: 6 * met:
7 * 7 *
8 * * Redistributions of source code must retain the above copyright 8 * * Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer. 9 * notice, this list of conditions and the following disclaimer.
10 * * Redistributions in binary form must reproduce the above 10 * * Redistributions in binary form must reproduce the above
(...skipping 54 matching lines...) Expand 10 before | Expand all | Expand 10 after
65 #endif 65 #endif
66 66
67 #include <v8.h> 67 #include <v8.h>
68 68
69 namespace blink { 69 namespace blink {
70 70
71 WTF::ThreadSpecific<ThreadState*>* ThreadState::s_threadSpecific = nullptr; 71 WTF::ThreadSpecific<ThreadState*>* ThreadState::s_threadSpecific = nullptr;
72 uintptr_t ThreadState::s_mainThreadStackStart = 0; 72 uintptr_t ThreadState::s_mainThreadStackStart = 0;
73 uintptr_t ThreadState::s_mainThreadUnderestimatedStackSize = 0; 73 uintptr_t ThreadState::s_mainThreadUnderestimatedStackSize = 0;
74 uint8_t ThreadState::s_mainThreadStateStorage[sizeof(ThreadState)]; 74 uint8_t ThreadState::s_mainThreadStateStorage[sizeof(ThreadState)];
75 SafePointBarrier* ThreadState::s_safePointBarrier = nullptr;
76
77 RecursiveMutex& ThreadState::threadAttachMutex()
78 {
79 DEFINE_THREAD_SAFE_STATIC_LOCAL(RecursiveMutex, mutex, (new RecursiveMutex)) ;
80 return mutex;
81 }
82 75
83 ThreadState::ThreadState() 76 ThreadState::ThreadState()
84 : m_thread(currentThread()) 77 : m_thread(currentThread())
85 , m_persistentRegion(adoptPtr(new PersistentRegion())) 78 , m_persistentRegion(adoptPtr(new PersistentRegion()))
86 #if OS(WIN) && COMPILER(MSVC) 79 #if OS(WIN) && COMPILER(MSVC)
87 , m_threadStackSize(0) 80 , m_threadStackSize(0)
88 #endif 81 #endif
89 , m_startOfStack(reinterpret_cast<intptr_t*>(StackFrameDepth::getStackStart( ))) 82 , m_startOfStack(reinterpret_cast<intptr_t*>(StackFrameDepth::getStackStart( )))
90 , m_endOfStack(reinterpret_cast<intptr_t*>(StackFrameDepth::getStackStart()) ) 83 , m_endOfStack(reinterpret_cast<intptr_t*>(StackFrameDepth::getStackStart()) )
91 , m_safePointScopeMarker(nullptr) 84 , m_safePointScopeMarker(nullptr)
92 , m_atSafePoint(false) 85 , m_atSafePoint(false)
93 , m_interruptors() 86 , m_interruptors()
94 , m_sweepForbidden(false) 87 , m_sweepForbidden(false)
95 , m_noAllocationCount(0) 88 , m_noAllocationCount(0)
96 , m_gcForbiddenCount(0) 89 , m_gcForbiddenCount(0)
97 , m_accumulatedSweepingTime(0) 90 , m_accumulatedSweepingTime(0)
98 , m_vectorBackingHeapIndex(BlinkGC::Vector1HeapIndex) 91 , m_vectorBackingArenaIndex(BlinkGC::Vector1ArenaIndex)
99 , m_currentHeapAges(0) 92 , m_currentHeapAges(0)
100 , m_isTerminating(false) 93 , m_isTerminating(false)
101 , m_gcMixinMarker(nullptr) 94 , m_gcMixinMarker(nullptr)
102 , m_shouldFlushHeapDoesNotContainCache(false) 95 , m_shouldFlushHeapDoesNotContainCache(false)
103 , m_gcState(NoGCScheduled) 96 , m_gcState(NoGCScheduled)
104 , m_isolate(nullptr) 97 , m_isolate(nullptr)
105 , m_traceDOMWrappers(nullptr) 98 , m_traceDOMWrappers(nullptr)
106 #if defined(ADDRESS_SANITIZER) 99 #if defined(ADDRESS_SANITIZER)
107 , m_asanFakeStack(__asan_get_current_fake_stack()) 100 , m_asanFakeStack(__asan_get_current_fake_stack())
108 #endif 101 #endif
109 #if defined(LEAK_SANITIZER) 102 #if defined(LEAK_SANITIZER)
110 , m_disabledStaticPersistentsRegistration(0) 103 , m_disabledStaticPersistentsRegistration(0)
111 #endif 104 #endif
112 , m_allocatedObjectSize(0) 105 , m_allocatedObjectSize(0)
113 , m_markedObjectSize(0) 106 , m_markedObjectSize(0)
114 , m_reportedMemoryToV8(0) 107 , m_reportedMemoryToV8(0)
115 { 108 {
116 ASSERT(checkThread()); 109 ASSERT(checkThread());
117 ASSERT(!**s_threadSpecific); 110 ASSERT(!**s_threadSpecific);
118 **s_threadSpecific = this; 111 **s_threadSpecific = this;
119 112
120 if (isMainThread()) { 113 if (isMainThread()) {
121 s_mainThreadStackStart = reinterpret_cast<uintptr_t>(m_startOfStack) - s izeof(void*); 114 s_mainThreadStackStart = reinterpret_cast<uintptr_t>(m_startOfStack) - s izeof(void*);
122 size_t underestimatedStackSize = StackFrameDepth::getUnderestimatedStack Size(); 115 size_t underestimatedStackSize = StackFrameDepth::getUnderestimatedStack Size();
123 if (underestimatedStackSize > sizeof(void*)) 116 if (underestimatedStackSize > sizeof(void*))
124 s_mainThreadUnderestimatedStackSize = underestimatedStackSize - size of(void*); 117 s_mainThreadUnderestimatedStackSize = underestimatedStackSize - size of(void*);
118 m_heap = new Heap();
119 } else {
120 m_heap = &ThreadState::mainThreadState()->heap();
125 } 121 }
122 ASSERT(m_heap);
123 m_heap->attach(this);
126 124
127 for (int heapIndex = 0; heapIndex < BlinkGC::LargeObjectHeapIndex; heapIndex ++) 125 for (int arenaIndex = 0; arenaIndex < BlinkGC::LargeObjectArenaIndex; arenaI ndex++)
128 m_heaps[heapIndex] = new NormalPageHeap(this, heapIndex); 126 m_arenas[arenaIndex] = new NormalPageArena(this, arenaIndex);
129 m_heaps[BlinkGC::LargeObjectHeapIndex] = new LargeObjectHeap(this, BlinkGC:: LargeObjectHeapIndex); 127 m_arenas[BlinkGC::LargeObjectArenaIndex] = new LargeObjectArena(this, BlinkG C::LargeObjectArenaIndex);
130 128
131 m_likelyToBePromptlyFreed = adoptArrayPtr(new int[likelyToBePromptlyFreedArr aySize]); 129 m_likelyToBePromptlyFreed = adoptArrayPtr(new int[likelyToBePromptlyFreedArr aySize]);
132 clearHeapAges(); 130 clearHeapAges();
133 131
134 m_threadLocalWeakCallbackStack = new CallbackStack(); 132 m_threadLocalWeakCallbackStack = new CallbackStack();
135 } 133 }
136 134
137 ThreadState::~ThreadState() 135 ThreadState::~ThreadState()
138 { 136 {
139 ASSERT(checkThread()); 137 ASSERT(checkThread());
140 delete m_threadLocalWeakCallbackStack; 138 delete m_threadLocalWeakCallbackStack;
141 m_threadLocalWeakCallbackStack = nullptr; 139 m_threadLocalWeakCallbackStack = nullptr;
142 for (int i = 0; i < BlinkGC::NumberOfHeaps; ++i) 140 for (int i = 0; i < BlinkGC::NumberOfArenas; ++i)
143 delete m_heaps[i]; 141 delete m_arenas[i];
144 142
145 **s_threadSpecific = nullptr; 143 **s_threadSpecific = nullptr;
146 if (isMainThread()) { 144 if (isMainThread()) {
147 s_mainThreadStackStart = 0; 145 s_mainThreadStackStart = 0;
148 s_mainThreadUnderestimatedStackSize = 0; 146 s_mainThreadUnderestimatedStackSize = 0;
149 } 147 }
150 } 148 }
151 149
152 void ThreadState::init() 150 void ThreadState::init()
153 { 151 {
154 s_threadSpecific = new WTF::ThreadSpecific<ThreadState*>(); 152 s_threadSpecific = new WTF::ThreadSpecific<ThreadState*>();
155 s_safePointBarrier = new SafePointBarrier;
156 } 153 }
157 154
158 void ThreadState::shutdown() 155 void ThreadState::shutdown()
159 { 156 {
160 delete s_safePointBarrier;
161 s_safePointBarrier = nullptr;
162
163 // Thread-local storage shouldn't be disposed, so we don't call ~ThreadSpeci fic(). 157 // Thread-local storage shouldn't be disposed, so we don't call ~ThreadSpeci fic().
164 } 158 }
165 159
166 #if OS(WIN) && COMPILER(MSVC) 160 #if OS(WIN) && COMPILER(MSVC)
167 size_t ThreadState::threadStackSize() 161 size_t ThreadState::threadStackSize()
168 { 162 {
169 if (m_threadStackSize) 163 if (m_threadStackSize)
170 return m_threadStackSize; 164 return m_threadStackSize;
171 165
172 // Notice that we cannot use the TIB's StackLimit for the stack end, as it 166 // Notice that we cannot use the TIB's StackLimit for the stack end, as it
(...skipping 21 matching lines...) Expand all
194 // explains the details. 188 // explains the details.
195 RELEASE_ASSERT(m_threadStackSize > 4 * 0x1000); 189 RELEASE_ASSERT(m_threadStackSize > 4 * 0x1000);
196 m_threadStackSize -= 4 * 0x1000; 190 m_threadStackSize -= 4 * 0x1000;
197 return m_threadStackSize; 191 return m_threadStackSize;
198 } 192 }
199 #endif 193 #endif
200 194
201 void ThreadState::attachMainThread() 195 void ThreadState::attachMainThread()
202 { 196 {
203 RELEASE_ASSERT(!Heap::s_shutdownCalled); 197 RELEASE_ASSERT(!Heap::s_shutdownCalled);
204 MutexLocker locker(threadAttachMutex()); 198 new(s_mainThreadStateStorage) ThreadState();
205 ThreadState* state = new(s_mainThreadStateStorage) ThreadState(); 199 }
206 attachedThreads().add(state); 200
201 void ThreadState::attach()
202 {
203 RELEASE_ASSERT(!Heap::s_shutdownCalled);
204 new ThreadState();
205 }
206
207 void ThreadState::cleanupPages()
208 {
209 ASSERT(checkThread());
210 for (int i = 0; i < BlinkGC::NumberOfArenas; ++i)
211 m_arenas[i]->cleanupPages();
212 }
213
214 void ThreadState::cleanup()
haraken 2016/02/29 11:17:45 cleanup => runThreadTerminationGC
keishi 2016/03/02 06:01:03 Done.
215 {
216 if (isMainThread()) {
217 cleanupPages();
218 return;
219 }
220 ASSERT(checkThread());
221
222 // Grab the threadAttachMutex to ensure only one thread can shutdown at
223 // a time and that no other thread can do a global GC. It also allows
224 // safe iteration of the attachedThreads set which happens as part of
225 // thread local GC asserts. We enter a safepoint while waiting for the
226 // lock to avoid a dead-lock where another thread has already requested
227 // GC.
228 // MEMO: move comment
229
230 // Finish sweeping.
231 completeSweep();
232
233 // From here on ignore all conservatively discovered
234 // pointers into the heap owned by this thread.
235 m_isTerminating = true;
236
237 // Set the terminate flag on all heap pages of this thread. This is used to
238 // ensure we don't trace pages on other threads that are not part of the
239 // thread local GC.
240 prepareForThreadStateTermination();
241
242 Heap::crossThreadPersistentRegion().prepareForThreadStateTermination(this);
243
244 // Do thread local GC's as long as the count of thread local Persistents
245 // changes and is above zero.
246 int oldCount = -1;
247 int currentCount = persistentRegion()->numberOfPersistents();
248 ASSERT(currentCount >= 0);
249 while (currentCount != oldCount) {
250 Heap::collectGarbageForTerminatingThread(this);
251 oldCount = currentCount;
252 currentCount = persistentRegion()->numberOfPersistents();
253 }
254 // We should not have any persistents left when getting to this point,
255 // if we have it is probably a bug so adding a debug ASSERT to catch this.
256 ASSERT(!currentCount);
257 // All of pre-finalizers should be consumed.
258 ASSERT(m_orderedPreFinalizers.isEmpty());
259 RELEASE_ASSERT(gcState() == NoGCScheduled);
260
261 // Add pages to the orphaned page pool to ensure any global GCs from this po int
262 // on will not trace objects on this thread's heaps.
263 cleanupPages();
207 } 264 }
208 265
209 void ThreadState::detachMainThread() 266 void ThreadState::detachMainThread()
210 { 267 {
211 // Enter a safe point before trying to acquire threadAttachMutex 268 // Enter a safe point before trying to acquire threadAttachMutex
212 // to avoid dead lock if another thread is preparing for GC, has acquired 269 // to avoid dead lock if another thread is preparing for GC, has acquired
213 // threadAttachMutex and waiting for other threads to pause or reach a 270 // threadAttachMutex and waiting for other threads to pause or reach a
214 // safepoint. 271 // safepoint.
215 ThreadState* state = mainThreadState(); 272 ThreadState* state = mainThreadState();
216 273
217 // 1. Finish sweeping. 274 // 1. Finish sweeping.
218 state->completeSweep(); 275 state->completeSweep();
219 { 276 state->detach();
220 SafePointAwareMutexLocker locker(threadAttachMutex(), BlinkGC::NoHeapPoi ntersOnStack);
221
222 // 2. Add the main thread's heap pages to the orphaned pool.
223 state->cleanupPages();
224
225 // 3. Detach the main thread.
226 ASSERT(attachedThreads().contains(state));
227 attachedThreads().remove(state);
228 state->~ThreadState();
229 }
230 shutdownHeapIfNecessary();
231 } 277 }
232 278
233 void ThreadState::shutdownHeapIfNecessary() 279 void ThreadState::detachCurrentThread()
234 { 280 {
235 // We don't need to enter a safe point before acquiring threadAttachMutex 281 current()->detach();
236 // because this thread is already detached.
237
238 MutexLocker locker(threadAttachMutex());
239 // We start shutting down the heap if there is no running thread
240 // and Heap::shutdown() is already called.
241 if (!attachedThreads().size() && Heap::s_shutdownCalled)
242 Heap::doShutdown();
243 }
244
245 void ThreadState::attach()
246 {
247 RELEASE_ASSERT(!Heap::s_shutdownCalled);
248 MutexLocker locker(threadAttachMutex());
249 ThreadState* state = new ThreadState();
250 attachedThreads().add(state);
251 }
252
253 void ThreadState::cleanupPages()
254 {
255 ASSERT(checkThread());
256 for (int i = 0; i < BlinkGC::NumberOfHeaps; ++i)
257 m_heaps[i]->cleanupPages();
258 }
259
260 void ThreadState::cleanup()
261 {
262 ASSERT(checkThread());
263 {
264 // Grab the threadAttachMutex to ensure only one thread can shutdown at
265 // a time and that no other thread can do a global GC. It also allows
266 // safe iteration of the attachedThreads set which happens as part of
267 // thread local GC asserts. We enter a safepoint while waiting for the
268 // lock to avoid a dead-lock where another thread has already requested
269 // GC.
270 SafePointAwareMutexLocker locker(threadAttachMutex(), BlinkGC::NoHeapPoi ntersOnStack);
271
272 // Finish sweeping.
273 completeSweep();
274
275 // From here on ignore all conservatively discovered
276 // pointers into the heap owned by this thread.
277 m_isTerminating = true;
278
279 // Set the terminate flag on all heap pages of this thread. This is used to
280 // ensure we don't trace pages on other threads that are not part of the
281 // thread local GC.
282 prepareForThreadStateTermination();
283
284 Heap::crossThreadPersistentRegion().prepareForThreadStateTermination(thi s);
285
286 // Do thread local GC's as long as the count of thread local Persistents
287 // changes and is above zero.
288 int oldCount = -1;
289 int currentCount = persistentRegion()->numberOfPersistents();
290 ASSERT(currentCount >= 0);
291 while (currentCount != oldCount) {
292 Heap::collectGarbageForTerminatingThread(this);
293 oldCount = currentCount;
294 currentCount = persistentRegion()->numberOfPersistents();
295 }
296 // We should not have any persistents left when getting to this point,
297 // if we have it is probably a bug so adding a debug ASSERT to catch thi s.
298 ASSERT(!currentCount);
299 // All of pre-finalizers should be consumed.
300 ASSERT(m_orderedPreFinalizers.isEmpty());
301 RELEASE_ASSERT(gcState() == NoGCScheduled);
302
303 // Add pages to the orphaned page pool to ensure any global GCs from thi s point
304 // on will not trace objects on this thread's heaps.
305 cleanupPages();
306
307 ASSERT(attachedThreads().contains(this));
308 attachedThreads().remove(this);
309 }
310 } 282 }
311 283
312 void ThreadState::detach() 284 void ThreadState::detach()
313 { 285 {
314 ThreadState* state = current(); 286 m_heap->detach(this);
315 state->cleanup(); 287 RELEASE_ASSERT(gcState() == ThreadState::NoGCScheduled);
316 RELEASE_ASSERT(state->gcState() == ThreadState::NoGCScheduled); 288 if (isMainThread())
317 delete state; 289 this->~ThreadState();
haraken 2016/02/29 11:17:45 Is this different from 'delete this'?
keishi 2016/03/02 06:01:03 mainThreadState is stored in s_mainThreadStateStor
318 shutdownHeapIfNecessary(); 290 else
319 } 291 delete this;
320
321 void ThreadState::visitPersistentRoots(Visitor* visitor)
322 {
323 TRACE_EVENT0("blink_gc", "ThreadState::visitPersistentRoots");
324 Heap::crossThreadPersistentRegion().tracePersistentNodes(visitor);
325
326 for (ThreadState* state : attachedThreads())
327 state->visitPersistents(visitor);
328 }
329
330 void ThreadState::visitStackRoots(Visitor* visitor)
331 {
332 TRACE_EVENT0("blink_gc", "ThreadState::visitStackRoots");
333 for (ThreadState* state : attachedThreads())
334 state->visitStack(visitor);
335 } 292 }
336 293
337 NO_SANITIZE_ADDRESS 294 NO_SANITIZE_ADDRESS
338 void ThreadState::visitAsanFakeStackForPointer(Visitor* visitor, Address ptr) 295 void ThreadState::visitAsanFakeStackForPointer(Visitor* visitor, Address ptr)
339 { 296 {
340 #if defined(ADDRESS_SANITIZER) 297 #if defined(ADDRESS_SANITIZER)
341 Address* start = reinterpret_cast<Address*>(m_startOfStack); 298 Address* start = reinterpret_cast<Address*>(m_startOfStack);
342 Address* end = reinterpret_cast<Address*>(m_endOfStack); 299 Address* end = reinterpret_cast<Address*>(m_endOfStack);
343 Address* fakeFrameStart = nullptr; 300 Address* fakeFrameStart = nullptr;
344 Address* fakeFrameEnd = nullptr; 301 Address* fakeFrameEnd = nullptr;
345 Address* maybeFakeFrame = reinterpret_cast<Address*>(ptr); 302 Address* maybeFakeFrame = reinterpret_cast<Address*>(ptr);
346 Address* realFrameForFakeFrame = 303 Address* realFrameForFakeFrame =
347 reinterpret_cast<Address*>( 304 reinterpret_cast<Address*>(
348 __asan_addr_is_in_fake_stack( 305 __asan_addr_is_in_fake_stack(
349 m_asanFakeStack, maybeFakeFrame, 306 m_asanFakeStack, maybeFakeFrame,
350 reinterpret_cast<void**>(&fakeFrameStart), 307 reinterpret_cast<void**>(&fakeFrameStart),
351 reinterpret_cast<void**>(&fakeFrameEnd))); 308 reinterpret_cast<void**>(&fakeFrameEnd)));
352 if (realFrameForFakeFrame) { 309 if (realFrameForFakeFrame) {
353 // This is a fake frame from the asan fake stack. 310 // This is a fake frame from the asan fake stack.
354 if (realFrameForFakeFrame > end && start > realFrameForFakeFrame) { 311 if (realFrameForFakeFrame > end && start > realFrameForFakeFrame) {
355 // The real stack address for the asan fake frame is 312 // The real stack address for the asan fake frame is
356 // within the stack range that we need to scan so we need 313 // within the stack range that we need to scan so we need
357 // to visit the values in the fake frame. 314 // to visit the values in the fake frame.
358 for (Address* p = fakeFrameStart; p < fakeFrameEnd; ++p) 315 for (Address* p = fakeFrameStart; p < fakeFrameEnd; ++p)
359 Heap::checkAndMarkPointer(visitor, *p); 316 m_heap->checkAndMarkPointer(visitor, *p);
360 } 317 }
361 } 318 }
362 #endif 319 #endif
363 } 320 }
364 321
365 NO_SANITIZE_ADDRESS 322 NO_SANITIZE_ADDRESS
366 void ThreadState::visitStack(Visitor* visitor) 323 void ThreadState::visitStack(Visitor* visitor)
367 { 324 {
368 if (m_stackState == BlinkGC::NoHeapPointersOnStack) 325 if (m_stackState == BlinkGC::NoHeapPointersOnStack)
369 return; 326 return;
(...skipping 15 matching lines...) Expand all
385 for (; current < start; ++current) { 342 for (; current < start; ++current) {
386 Address ptr = *current; 343 Address ptr = *current;
387 #if defined(MEMORY_SANITIZER) 344 #if defined(MEMORY_SANITIZER)
388 // |ptr| may be uninitialized by design. Mark it as initialized to keep 345 // |ptr| may be uninitialized by design. Mark it as initialized to keep
389 // MSan from complaining. 346 // MSan from complaining.
390 // Note: it may be tempting to get rid of |ptr| and simply use |current| 347 // Note: it may be tempting to get rid of |ptr| and simply use |current|
391 // here, but that would be incorrect. We intentionally use a local 348 // here, but that would be incorrect. We intentionally use a local
392 // variable because we don't want to unpoison the original stack. 349 // variable because we don't want to unpoison the original stack.
393 __msan_unpoison(&ptr, sizeof(ptr)); 350 __msan_unpoison(&ptr, sizeof(ptr));
394 #endif 351 #endif
395 Heap::checkAndMarkPointer(visitor, ptr); 352 m_heap->checkAndMarkPointer(visitor, ptr);
396 visitAsanFakeStackForPointer(visitor, ptr); 353 visitAsanFakeStackForPointer(visitor, ptr);
397 } 354 }
398 355
399 for (Address ptr : m_safePointStackCopy) { 356 for (Address ptr : m_safePointStackCopy) {
400 #if defined(MEMORY_SANITIZER) 357 #if defined(MEMORY_SANITIZER)
401 // See the comment above. 358 // See the comment above.
402 __msan_unpoison(&ptr, sizeof(ptr)); 359 __msan_unpoison(&ptr, sizeof(ptr));
403 #endif 360 #endif
404 Heap::checkAndMarkPointer(visitor, ptr); 361 m_heap->checkAndMarkPointer(visitor, ptr);
405 visitAsanFakeStackForPointer(visitor, ptr); 362 visitAsanFakeStackForPointer(visitor, ptr);
406 } 363 }
407 } 364 }
408 365
409 void ThreadState::visitPersistents(Visitor* visitor) 366 void ThreadState::visitPersistents(Visitor* visitor)
410 { 367 {
411 m_persistentRegion->tracePersistentNodes(visitor); 368 m_persistentRegion->tracePersistentNodes(visitor);
412 if (m_traceDOMWrappers) { 369 if (m_traceDOMWrappers) {
413 TRACE_EVENT0("blink_gc", "V8GCController::traceDOMWrappers"); 370 TRACE_EVENT0("blink_gc", "V8GCController::traceDOMWrappers");
414 m_traceDOMWrappers(m_isolate, visitor); 371 m_traceDOMWrappers(m_isolate, visitor);
(...skipping 80 matching lines...) Expand 10 before | Expand all | Expand 10 after
495 452
496 if (isMainThread()) { 453 if (isMainThread()) {
497 double timeForThreadLocalWeakProcessing = WTF::currentTimeMS() - startTi me; 454 double timeForThreadLocalWeakProcessing = WTF::currentTimeMS() - startTi me;
498 DEFINE_STATIC_LOCAL(CustomCountHistogram, timeForWeakHistogram, ("BlinkG C.timeForThreadLocalWeakProcessing", 1, 10 * 1000, 50)); 455 DEFINE_STATIC_LOCAL(CustomCountHistogram, timeForWeakHistogram, ("BlinkG C.timeForThreadLocalWeakProcessing", 1, 10 * 1000, 50));
499 timeForWeakHistogram.count(timeForThreadLocalWeakProcessing); 456 timeForWeakHistogram.count(timeForThreadLocalWeakProcessing);
500 } 457 }
501 } 458 }
502 459
503 size_t ThreadState::totalMemorySize() 460 size_t ThreadState::totalMemorySize()
504 { 461 {
505 return Heap::allocatedObjectSize() + Heap::markedObjectSize() + WTF::Partiti ons::totalSizeOfCommittedPages(); 462 return m_heap->heapStats().allocatedObjectSize() + m_heap->heapStats().marke dObjectSize() + WTF::Partitions::totalSizeOfCommittedPages();
506 } 463 }
507 464
508 size_t ThreadState::estimatedLiveSize(size_t estimationBaseSize, size_t sizeAtLa stGC) 465 size_t ThreadState::estimatedLiveSize(size_t estimationBaseSize, size_t sizeAtLa stGC)
509 { 466 {
510 if (Heap::wrapperCountAtLastGC() == 0) { 467 if (m_heap->heapStats().wrapperCountAtLastGC() == 0) {
511 // We'll reach here only before hitting the first GC. 468 // We'll reach here only before hitting the first GC.
512 return 0; 469 return 0;
513 } 470 }
514 471
515 // (estimated size) = (estimation base size) - (heap size at the last GC) / (# of persistent handles at the last GC) * (# of persistent handles collected si nce the last GC); 472 // (estimated size) = (estimation base size) - (heap size at the last GC) / (# of persistent handles at the last GC) * (# of persistent handles collected si nce the last GC);
516 size_t sizeRetainedByCollectedPersistents = static_cast<size_t>(1.0 * sizeAt LastGC / Heap::wrapperCountAtLastGC() * Heap::collectedWrapperCount()); 473 size_t sizeRetainedByCollectedPersistents = static_cast<size_t>(1.0 * sizeAt LastGC / m_heap->heapStats().wrapperCountAtLastGC() * m_heap->heapStats().collec tedWrapperCount());
517 if (estimationBaseSize < sizeRetainedByCollectedPersistents) 474 if (estimationBaseSize < sizeRetainedByCollectedPersistents)
518 return 0; 475 return 0;
519 return estimationBaseSize - sizeRetainedByCollectedPersistents; 476 return estimationBaseSize - sizeRetainedByCollectedPersistents;
520 } 477 }
521 478
522 double ThreadState::heapGrowingRate() 479 double ThreadState::heapGrowingRate()
523 { 480 {
524 size_t currentSize = Heap::allocatedObjectSize() + Heap::markedObjectSize(); 481 size_t currentSize = m_heap->heapStats().allocatedObjectSize() + m_heap->hea pStats().markedObjectSize();
525 size_t estimatedSize = estimatedLiveSize(Heap::markedObjectSizeAtLastComplet eSweep(), Heap::markedObjectSizeAtLastCompleteSweep()); 482 size_t estimatedSize = estimatedLiveSize(m_heap->heapStats().markedObjectSiz eAtLastCompleteSweep(), m_heap->heapStats().markedObjectSizeAtLastCompleteSweep( ));
526 483
527 // If the estimatedSize is 0, we set a high growing rate to trigger a GC. 484 // If the estimatedSize is 0, we set a high growing rate to trigger a GC.
528 double growingRate = estimatedSize > 0 ? 1.0 * currentSize / estimatedSize : 100; 485 double growingRate = estimatedSize > 0 ? 1.0 * currentSize / estimatedSize : 100;
529 TRACE_COUNTER1(TRACE_DISABLED_BY_DEFAULT("blink_gc"), "ThreadState::heapEsti matedSizeKB", std::min(estimatedSize / 1024, static_cast<size_t>(INT_MAX))); 486 TRACE_COUNTER1(TRACE_DISABLED_BY_DEFAULT("blink_gc"), "ThreadState::heapEsti matedSizeKB", std::min(estimatedSize / 1024, static_cast<size_t>(INT_MAX)));
530 TRACE_COUNTER1(TRACE_DISABLED_BY_DEFAULT("blink_gc"), "ThreadState::heapGrow ingRate", static_cast<int>(100 * growingRate)); 487 TRACE_COUNTER1(TRACE_DISABLED_BY_DEFAULT("blink_gc"), "ThreadState::heapGrow ingRate", static_cast<int>(100 * growingRate));
531 return growingRate; 488 return growingRate;
532 } 489 }
533 490
534 double ThreadState::partitionAllocGrowingRate() 491 double ThreadState::partitionAllocGrowingRate()
535 { 492 {
536 size_t currentSize = WTF::Partitions::totalSizeOfCommittedPages(); 493 size_t currentSize = WTF::Partitions::totalSizeOfCommittedPages();
537 size_t estimatedSize = estimatedLiveSize(currentSize, Heap::partitionAllocSi zeAtLastGC()); 494 size_t estimatedSize = estimatedLiveSize(currentSize, m_heap->heapStats().pa rtitionAllocSizeAtLastGC());
538 495
539 // If the estimatedSize is 0, we set a high growing rate to trigger a GC. 496 // If the estimatedSize is 0, we set a high growing rate to trigger a GC.
540 double growingRate = estimatedSize > 0 ? 1.0 * currentSize / estimatedSize : 100; 497 double growingRate = estimatedSize > 0 ? 1.0 * currentSize / estimatedSize : 100;
541 TRACE_COUNTER1(TRACE_DISABLED_BY_DEFAULT("blink_gc"), "ThreadState::partitio nAllocEstimatedSizeKB", std::min(estimatedSize / 1024, static_cast<size_t>(INT_M AX))); 498 TRACE_COUNTER1(TRACE_DISABLED_BY_DEFAULT("blink_gc"), "ThreadState::partitio nAllocEstimatedSizeKB", std::min(estimatedSize / 1024, static_cast<size_t>(INT_M AX)));
542 TRACE_COUNTER1(TRACE_DISABLED_BY_DEFAULT("blink_gc"), "ThreadState::partitio nAllocGrowingRate", static_cast<int>(100 * growingRate)); 499 TRACE_COUNTER1(TRACE_DISABLED_BY_DEFAULT("blink_gc"), "ThreadState::partitio nAllocGrowingRate", static_cast<int>(100 * growingRate));
543 return growingRate; 500 return growingRate;
544 } 501 }
545 502
546 // TODO(haraken): We should improve the GC heuristics. The heuristics affect 503 // TODO(haraken): We should improve the GC heuristics. The heuristics affect
547 // performance significantly. 504 // performance significantly.
548 bool ThreadState::judgeGCThreshold(size_t totalMemorySizeThreshold, double heapG rowingRateThreshold) 505 bool ThreadState::judgeGCThreshold(size_t totalMemorySizeThreshold, double heapG rowingRateThreshold)
549 { 506 {
550 // If the allocated object size or the total memory size is small, don't tri gger a GC. 507 // If the allocated object size or the total memory size is small, don't tri gger a GC.
551 if (Heap::allocatedObjectSize() < 100 * 1024 || totalMemorySize() < totalMem orySizeThreshold) 508 if (m_heap->heapStats().allocatedObjectSize() < 100 * 1024 || totalMemorySiz e() < totalMemorySizeThreshold)
552 return false; 509 return false;
553 // If the growing rate of Oilpan's heap or PartitionAlloc is high enough, 510 // If the growing rate of Oilpan's heap or PartitionAlloc is high enough,
554 // trigger a GC. 511 // trigger a GC.
555 #if PRINT_HEAP_STATS 512 #if PRINT_HEAP_STATS
556 dataLogF("heapGrowingRate=%.1lf, partitionAllocGrowingRate=%.1lf\n", heapGro wingRate(), partitionAllocGrowingRate()); 513 dataLogF("heapGrowingRate=%.1lf, partitionAllocGrowingRate=%.1lf\n", heapGro wingRate(), partitionAllocGrowingRate());
557 #endif 514 #endif
558 return heapGrowingRate() >= heapGrowingRateThreshold || partitionAllocGrowin gRate() >= heapGrowingRateThreshold; 515 return heapGrowingRate() >= heapGrowingRateThreshold || partitionAllocGrowin gRate() >= heapGrowingRateThreshold;
559 } 516 }
560 517
561 bool ThreadState::shouldScheduleIdleGC() 518 bool ThreadState::shouldScheduleIdleGC()
(...skipping 162 matching lines...) Expand 10 before | Expand all | Expand 10 after
724 } 681 }
725 if (shouldScheduleIdleGC()) { 682 if (shouldScheduleIdleGC()) {
726 #if PRINT_HEAP_STATS 683 #if PRINT_HEAP_STATS
727 dataLogF("Scheduled IdleGC\n"); 684 dataLogF("Scheduled IdleGC\n");
728 #endif 685 #endif
729 scheduleIdleGC(); 686 scheduleIdleGC();
730 return; 687 return;
731 } 688 }
732 } 689 }
733 690
691 ThreadState* ThreadState::fromObject(const void* object)
692 {
693 if (!object)
694 return nullptr;
695 BasePage* page = pageFromObject(object);
696 ASSERT(page);
697 ASSERT(page->arena());
698 return page->arena()->threadState();
699 }
700
734 void ThreadState::performIdleGC(double deadlineSeconds) 701 void ThreadState::performIdleGC(double deadlineSeconds)
735 { 702 {
736 ASSERT(checkThread()); 703 ASSERT(checkThread());
737 ASSERT(isMainThread()); 704 ASSERT(isMainThread());
738 ASSERT(Platform::current()->currentThread()->scheduler()); 705 ASSERT(Platform::current()->currentThread()->scheduler());
739 706
740 if (gcState() != IdleGCScheduled) 707 if (gcState() != IdleGCScheduled)
741 return; 708 return;
742 709
743 double idleDeltaInSeconds = deadlineSeconds - monotonicallyIncreasingTime(); 710 double idleDeltaInSeconds = deadlineSeconds - monotonicallyIncreasingTime();
744 TRACE_EVENT2("blink_gc", "ThreadState::performIdleGC", "idleDeltaInSeconds", idleDeltaInSeconds, "estimatedMarkingTime", Heap::estimatedMarkingTime()); 711 TRACE_EVENT2("blink_gc", "ThreadState::performIdleGC", "idleDeltaInSeconds", idleDeltaInSeconds, "estimatedMarkingTime", m_heap->heapStats().estimatedMarkin gTime());
745 if (idleDeltaInSeconds <= Heap::estimatedMarkingTime() && !Platform::current ()->currentThread()->scheduler()->canExceedIdleDeadlineIfRequired()) { 712 if (idleDeltaInSeconds <= m_heap->heapStats().estimatedMarkingTime() && !Pla tform::current()->currentThread()->scheduler()->canExceedIdleDeadlineIfRequired( )) {
746 // If marking is estimated to take longer than the deadline and we can't 713 // If marking is estimated to take longer than the deadline and we can't
747 // exceed the deadline, then reschedule for the next idle period. 714 // exceed the deadline, then reschedule for the next idle period.
748 scheduleIdleGC(); 715 scheduleIdleGC();
749 return; 716 return;
750 } 717 }
751 718
752 Heap::collectGarbage(BlinkGC::NoHeapPointersOnStack, BlinkGC::GCWithoutSweep , BlinkGC::IdleGC); 719 Heap::collectGarbage(BlinkGC::NoHeapPointersOnStack, BlinkGC::GCWithoutSweep , BlinkGC::IdleGC);
753 } 720 }
754 721
755 void ThreadState::performIdleLazySweep(double deadlineSeconds) 722 void ThreadState::performIdleLazySweep(double deadlineSeconds)
(...skipping 12 matching lines...) Expand all
768 return; 735 return;
769 736
770 TRACE_EVENT1("blink_gc,devtools.timeline", "ThreadState::performIdleLazySwee p", "idleDeltaInSeconds", deadlineSeconds - monotonicallyIncreasingTime()); 737 TRACE_EVENT1("blink_gc,devtools.timeline", "ThreadState::performIdleLazySwee p", "idleDeltaInSeconds", deadlineSeconds - monotonicallyIncreasingTime());
771 738
772 bool sweepCompleted = true; 739 bool sweepCompleted = true;
773 SweepForbiddenScope scope(this); 740 SweepForbiddenScope scope(this);
774 { 741 {
775 double startTime = WTF::currentTimeMS(); 742 double startTime = WTF::currentTimeMS();
776 ScriptForbiddenIfMainThreadScope scriptForbiddenScope; 743 ScriptForbiddenIfMainThreadScope scriptForbiddenScope;
777 744
778 for (int i = 0; i < BlinkGC::NumberOfHeaps; i++) { 745 for (int i = 0; i < BlinkGC::NumberOfArenas; i++) {
779 // lazySweepWithDeadline() won't check the deadline until it sweeps 746 // lazySweepWithDeadline() won't check the deadline until it sweeps
780 // 10 pages. So we give a small slack for safety. 747 // 10 pages. So we give a small slack for safety.
781 double slack = 0.001; 748 double slack = 0.001;
782 double remainingBudget = deadlineSeconds - slack - monotonicallyIncr easingTime(); 749 double remainingBudget = deadlineSeconds - slack - monotonicallyIncr easingTime();
783 if (remainingBudget <= 0 || !m_heaps[i]->lazySweepWithDeadline(deadl ineSeconds)) { 750 if (remainingBudget <= 0 || !m_arenas[i]->lazySweepWithDeadline(dead lineSeconds)) {
784 // We couldn't finish the sweeping within the deadline. 751 // We couldn't finish the sweeping within the deadline.
785 // We request another idle task for the remaining sweeping. 752 // We request another idle task for the remaining sweeping.
786 scheduleIdleLazySweep(); 753 scheduleIdleLazySweep();
787 sweepCompleted = false; 754 sweepCompleted = false;
788 break; 755 break;
789 } 756 }
790 } 757 }
791 758
792 accumulateSweepingTime(WTF::currentTimeMS() - startTime); 759 accumulateSweepingTime(WTF::currentTimeMS() - startTime);
793 } 760 }
(...skipping 143 matching lines...) Expand 10 before | Expand all | Expand 10 after
937 // Idle time GC will be scheduled by Blink Scheduler. 904 // Idle time GC will be scheduled by Blink Scheduler.
938 break; 905 break;
939 default: 906 default:
940 break; 907 break;
941 } 908 }
942 } 909 }
943 910
944 void ThreadState::flushHeapDoesNotContainCacheIfNeeded() 911 void ThreadState::flushHeapDoesNotContainCacheIfNeeded()
945 { 912 {
946 if (m_shouldFlushHeapDoesNotContainCache) { 913 if (m_shouldFlushHeapDoesNotContainCache) {
947 Heap::flushHeapDoesNotContainCache(); 914 m_heap->flushHeapDoesNotContainCache();
948 m_shouldFlushHeapDoesNotContainCache = false; 915 m_shouldFlushHeapDoesNotContainCache = false;
949 } 916 }
950 } 917 }
951 918
952 void ThreadState::makeConsistentForGC() 919 void ThreadState::makeConsistentForGC()
953 { 920 {
954 ASSERT(isInGC()); 921 ASSERT(isInGC());
955 TRACE_EVENT0("blink_gc", "ThreadState::makeConsistentForGC"); 922 TRACE_EVENT0("blink_gc", "ThreadState::makeConsistentForGC");
956 for (int i = 0; i < BlinkGC::NumberOfHeaps; ++i) 923 for (int i = 0; i < BlinkGC::NumberOfArenas; ++i)
957 m_heaps[i]->makeConsistentForGC(); 924 m_arenas[i]->makeConsistentForGC();
958 } 925 }
959 926
960 void ThreadState::makeConsistentForMutator() 927 void ThreadState::makeConsistentForMutator()
961 { 928 {
962 ASSERT(isInGC()); 929 ASSERT(isInGC());
963 for (int i = 0; i < BlinkGC::NumberOfHeaps; ++i) 930 for (int i = 0; i < BlinkGC::NumberOfArenas; ++i)
964 m_heaps[i]->makeConsistentForMutator(); 931 m_arenas[i]->makeConsistentForMutator();
965 } 932 }
966 933
967 void ThreadState::preGC() 934 void ThreadState::preGC()
968 { 935 {
969 ASSERT(!isInGC()); 936 ASSERT(!isInGC());
970 setGCState(GCRunning); 937 setGCState(GCRunning);
971 makeConsistentForGC(); 938 makeConsistentForGC();
972 flushHeapDoesNotContainCacheIfNeeded(); 939 flushHeapDoesNotContainCacheIfNeeded();
973 clearHeapAges(); 940 clearHeapAges();
974 } 941 }
975 942
976 void ThreadState::postGC(BlinkGC::GCType gcType) 943 void ThreadState::postGC(BlinkGC::GCType gcType)
977 { 944 {
978 ASSERT(isInGC()); 945 ASSERT(isInGC());
979 for (int i = 0; i < BlinkGC::NumberOfHeaps; i++) 946 for (int i = 0; i < BlinkGC::NumberOfArenas; i++)
980 m_heaps[i]->prepareForSweep(); 947 m_arenas[i]->prepareForSweep();
981 948
982 if (gcType == BlinkGC::GCWithSweep) { 949 if (gcType == BlinkGC::GCWithSweep) {
983 setGCState(EagerSweepScheduled); 950 setGCState(EagerSweepScheduled);
984 } else if (gcType == BlinkGC::GCWithoutSweep) { 951 } else if (gcType == BlinkGC::GCWithoutSweep) {
985 setGCState(LazySweepScheduled); 952 setGCState(LazySweepScheduled);
986 } else { 953 } else {
987 takeSnapshot(SnapshotType::HeapSnapshot); 954 takeSnapshot(SnapshotType::HeapSnapshot);
988 955
989 // This unmarks all marked objects and marks all unmarked objects dead. 956 // This unmarks all marked objects and marks all unmarked objects dead.
990 makeConsistentForMutator(); 957 makeConsistentForMutator();
(...skipping 44 matching lines...) Expand 10 before | Expand all | Expand 10 after
1035 } 1002 }
1036 1003
1037 #if defined(ADDRESS_SANITIZER) 1004 #if defined(ADDRESS_SANITIZER)
1038 void ThreadState::poisonAllHeaps() 1005 void ThreadState::poisonAllHeaps()
1039 { 1006 {
1040 // TODO(Oilpan): enable the poisoning always. 1007 // TODO(Oilpan): enable the poisoning always.
1041 #if ENABLE(OILPAN) 1008 #if ENABLE(OILPAN)
1042 // Unpoison the live objects remaining in the eager heaps.. 1009 // Unpoison the live objects remaining in the eager heaps..
1043 poisonEagerHeap(BlinkGC::ClearPoison); 1010 poisonEagerHeap(BlinkGC::ClearPoison);
1044 // ..along with poisoning all unmarked objects in the other heaps. 1011 // ..along with poisoning all unmarked objects in the other heaps.
1045 for (int i = 1; i < BlinkGC::NumberOfHeaps; i++) 1012 for (int i = 1; i < BlinkGC::NumberOfArenas; i++)
1046 m_heaps[i]->poisonHeap(BlinkGC::UnmarkedOnly, BlinkGC::SetPoison); 1013 m_arenas[i]->poisonHeap(BlinkGC::UnmarkedOnly, BlinkGC::SetPoison);
1047 #endif 1014 #endif
1048 } 1015 }
1049 1016
1050 void ThreadState::poisonEagerHeap(BlinkGC::Poisoning poisoning) 1017 void ThreadState::poisonEagerHeap(BlinkGC::Poisoning poisoning)
1051 { 1018 {
1052 // TODO(Oilpan): enable the poisoning always. 1019 // TODO(Oilpan): enable the poisoning always.
1053 #if ENABLE(OILPAN) 1020 #if ENABLE(OILPAN)
1054 m_heaps[BlinkGC::EagerSweepHeapIndex]->poisonHeap(BlinkGC::MarkedAndUnmarked , poisoning); 1021 m_arenas[BlinkGC::EagerSweepArenaIndex]->poisonHeap(BlinkGC::MarkedAndUnmark ed, poisoning);
1055 #endif 1022 #endif
1056 } 1023 }
1057 #endif 1024 #endif
1058 1025
1059 void ThreadState::eagerSweep() 1026 void ThreadState::eagerSweep()
1060 { 1027 {
1061 ASSERT(checkThread()); 1028 ASSERT(checkThread());
1062 // Some objects need to be finalized promptly and cannot be handled 1029 // Some objects need to be finalized promptly and cannot be handled
1063 // by lazy sweeping. Keep those in a designated heap and sweep it 1030 // by lazy sweeping. Keep those in a designated heap and sweep it
1064 // eagerly. 1031 // eagerly.
1065 ASSERT(isSweepingInProgress()); 1032 ASSERT(isSweepingInProgress());
1066 1033
1067 // Mirroring the completeSweep() condition; see its comment. 1034 // Mirroring the completeSweep() condition; see its comment.
1068 if (sweepForbidden()) 1035 if (sweepForbidden())
1069 return; 1036 return;
1070 1037
1071 SweepForbiddenScope scope(this); 1038 SweepForbiddenScope scope(this);
1072 ScriptForbiddenIfMainThreadScope scriptForbiddenScope; 1039 ScriptForbiddenIfMainThreadScope scriptForbiddenScope;
1073 1040
1074 double startTime = WTF::currentTimeMS(); 1041 double startTime = WTF::currentTimeMS();
1075 m_heaps[BlinkGC::EagerSweepHeapIndex]->completeSweep(); 1042 m_arenas[BlinkGC::EagerSweepArenaIndex]->completeSweep();
1076 accumulateSweepingTime(WTF::currentTimeMS() - startTime); 1043 accumulateSweepingTime(WTF::currentTimeMS() - startTime);
1077 } 1044 }
1078 1045
1079 void ThreadState::completeSweep() 1046 void ThreadState::completeSweep()
1080 { 1047 {
1081 ASSERT(checkThread()); 1048 ASSERT(checkThread());
1082 // If we are not in a sweeping phase, there is nothing to do here. 1049 // If we are not in a sweeping phase, there is nothing to do here.
1083 if (!isSweepingInProgress()) 1050 if (!isSweepingInProgress())
1084 return; 1051 return;
1085 1052
1086 // completeSweep() can be called recursively if finalizers can allocate 1053 // completeSweep() can be called recursively if finalizers can allocate
1087 // memory and the allocation triggers completeSweep(). This check prevents 1054 // memory and the allocation triggers completeSweep(). This check prevents
1088 // the sweeping from being executed recursively. 1055 // the sweeping from being executed recursively.
1089 if (sweepForbidden()) 1056 if (sweepForbidden())
1090 return; 1057 return;
1091 1058
1092 SweepForbiddenScope scope(this); 1059 SweepForbiddenScope scope(this);
1093 { 1060 {
1094 ScriptForbiddenIfMainThreadScope scriptForbiddenScope; 1061 ScriptForbiddenIfMainThreadScope scriptForbiddenScope;
1095 1062
1096 TRACE_EVENT0("blink_gc,devtools.timeline", "ThreadState::completeSweep") ; 1063 TRACE_EVENT0("blink_gc,devtools.timeline", "ThreadState::completeSweep") ;
1097 double startTime = WTF::currentTimeMS(); 1064 double startTime = WTF::currentTimeMS();
1098 1065
1099 static_assert(BlinkGC::EagerSweepHeapIndex == 0, "Eagerly swept heaps mu st be processed first."); 1066 static_assert(BlinkGC::EagerSweepArenaIndex == 0, "Eagerly swept heaps m ust be processed first.");
1100 for (int i = 0; i < BlinkGC::NumberOfHeaps; i++) 1067 for (int i = 0; i < BlinkGC::NumberOfArenas; i++)
1101 m_heaps[i]->completeSweep(); 1068 m_arenas[i]->completeSweep();
1102 1069
1103 double timeForCompleteSweep = WTF::currentTimeMS() - startTime; 1070 double timeForCompleteSweep = WTF::currentTimeMS() - startTime;
1104 accumulateSweepingTime(timeForCompleteSweep); 1071 accumulateSweepingTime(timeForCompleteSweep);
1105 1072
1106 if (isMainThread()) { 1073 if (isMainThread()) {
1107 DEFINE_STATIC_LOCAL(CustomCountHistogram, completeSweepHistogram, (" BlinkGC.CompleteSweep", 1, 10 * 1000, 50)); 1074 DEFINE_STATIC_LOCAL(CustomCountHistogram, completeSweepHistogram, (" BlinkGC.CompleteSweep", 1, 10 * 1000, 50));
1108 completeSweepHistogram.count(timeForCompleteSweep); 1075 completeSweepHistogram.count(timeForCompleteSweep);
1109 } 1076 }
1110 } 1077 }
1111 1078
1112 postSweep(); 1079 postSweep();
1113 } 1080 }
1114 1081
1115 void ThreadState::postSweep() 1082 void ThreadState::postSweep()
1116 { 1083 {
1117 ASSERT(checkThread()); 1084 ASSERT(checkThread());
1118 Heap::reportMemoryUsageForTracing(); 1085 Heap::reportMemoryUsageForTracing();
1119 1086
1120 if (isMainThread()) { 1087 if (isMainThread()) {
1121 double collectionRate = 0; 1088 double collectionRate = 0;
1122 if (Heap::objectSizeAtLastGC() > 0) 1089 if (m_heap->heapStats().objectSizeAtLastGC() > 0)
1123 collectionRate = 1 - 1.0 * Heap::markedObjectSize() / Heap::objectSi zeAtLastGC(); 1090 collectionRate = 1 - 1.0 * m_heap->heapStats().markedObjectSize() / m_heap->heapStats().objectSizeAtLastGC();
1124 TRACE_COUNTER1(TRACE_DISABLED_BY_DEFAULT("blink_gc"), "ThreadState::coll ectionRate", static_cast<int>(100 * collectionRate)); 1091 TRACE_COUNTER1(TRACE_DISABLED_BY_DEFAULT("blink_gc"), "ThreadState::coll ectionRate", static_cast<int>(100 * collectionRate));
1125 1092
1126 #if PRINT_HEAP_STATS 1093 #if PRINT_HEAP_STATS
1127 dataLogF("ThreadState::postSweep (collectionRate=%d%%)\n", static_cast<i nt>(100 * collectionRate)); 1094 dataLogF("ThreadState::postSweep (collectionRate=%d%%)\n", static_cast<i nt>(100 * collectionRate));
1128 #endif 1095 #endif
1129 1096
1130 // Heap::markedObjectSize() may be underestimated here if any other 1097 // Heap::markedObjectSize() may be underestimated here if any other
1131 // thread has not yet finished lazy sweeping. 1098 // thread has not yet finished lazy sweeping.
1132 Heap::setMarkedObjectSizeAtLastCompleteSweep(Heap::markedObjectSize()); 1099 m_heap->heapStats().setMarkedObjectSizeAtLastCompleteSweep(m_heap->heapS tats().markedObjectSize());
1133 1100
1134 DEFINE_STATIC_LOCAL(CustomCountHistogram, objectSizeBeforeGCHistogram, ( "BlinkGC.ObjectSizeBeforeGC", 1, 4 * 1024 * 1024, 50)); 1101 DEFINE_STATIC_LOCAL(CustomCountHistogram, objectSizeBeforeGCHistogram, ( "BlinkGC.ObjectSizeBeforeGC", 1, 4 * 1024 * 1024, 50));
1135 objectSizeBeforeGCHistogram.count(Heap::objectSizeAtLastGC() / 1024); 1102 objectSizeBeforeGCHistogram.count(m_heap->heapStats().objectSizeAtLastGC () / 1024);
1136 DEFINE_STATIC_LOCAL(CustomCountHistogram, objectSizeAfterGCHistogram, (" BlinkGC.ObjectSizeAfterGC", 1, 4 * 1024 * 1024, 50)); 1103 DEFINE_STATIC_LOCAL(CustomCountHistogram, objectSizeAfterGCHistogram, (" BlinkGC.ObjectSizeAfterGC", 1, 4 * 1024 * 1024, 50));
1137 objectSizeAfterGCHistogram.count(Heap::markedObjectSize() / 1024); 1104 objectSizeAfterGCHistogram.count(m_heap->heapStats().markedObjectSize() / 1024);
1138 DEFINE_STATIC_LOCAL(CustomCountHistogram, collectionRateHistogram, ("Bli nkGC.CollectionRate", 1, 100, 20)); 1105 DEFINE_STATIC_LOCAL(CustomCountHistogram, collectionRateHistogram, ("Bli nkGC.CollectionRate", 1, 100, 20));
1139 collectionRateHistogram.count(static_cast<int>(100 * collectionRate)); 1106 collectionRateHistogram.count(static_cast<int>(100 * collectionRate));
1140 DEFINE_STATIC_LOCAL(CustomCountHistogram, timeForSweepHistogram, ("Blink GC.TimeForSweepingAllObjects", 1, 10 * 1000, 50)); 1107 DEFINE_STATIC_LOCAL(CustomCountHistogram, timeForSweepHistogram, ("Blink GC.TimeForSweepingAllObjects", 1, 10 * 1000, 50));
1141 timeForSweepHistogram.count(m_accumulatedSweepingTime); 1108 timeForSweepHistogram.count(m_accumulatedSweepingTime);
1142 } 1109 }
1143 1110
1144 switch (gcState()) { 1111 switch (gcState()) {
1145 case Sweeping: 1112 case Sweeping:
1146 setGCState(NoGCScheduled); 1113 setGCState(NoGCScheduled);
1147 break; 1114 break;
1148 case SweepingAndPreciseGCScheduled: 1115 case SweepingAndPreciseGCScheduled:
1149 setGCState(PreciseGCScheduled); 1116 setGCState(PreciseGCScheduled);
1150 break; 1117 break;
1151 case SweepingAndIdleGCScheduled: 1118 case SweepingAndIdleGCScheduled:
1152 setGCState(NoGCScheduled); 1119 setGCState(NoGCScheduled);
1153 scheduleIdleGC(); 1120 scheduleIdleGC();
1154 break; 1121 break;
1155 default: 1122 default:
1156 ASSERT_NOT_REACHED(); 1123 ASSERT_NOT_REACHED();
1157 } 1124 }
1158 } 1125 }
1159 1126
1160 void ThreadState::prepareForThreadStateTermination() 1127 void ThreadState::prepareForThreadStateTermination()
1161 { 1128 {
1162 ASSERT(checkThread()); 1129 ASSERT(checkThread());
1163 for (int i = 0; i < BlinkGC::NumberOfHeaps; ++i) 1130 for (int i = 0; i < BlinkGC::NumberOfArenas; ++i)
1164 m_heaps[i]->prepareHeapForTermination(); 1131 m_arenas[i]->prepareHeapForTermination();
1165 } 1132 }
1166 1133
1167 #if ENABLE(ASSERT) 1134 #if ENABLE(ASSERT)
1168 BasePage* ThreadState::findPageFromAddress(Address address) 1135 BasePage* ThreadState::findPageFromAddress(Address address)
1169 { 1136 {
1170 for (int i = 0; i < BlinkGC::NumberOfHeaps; ++i) { 1137 for (int i = 0; i < BlinkGC::NumberOfArenas; ++i) {
1171 if (BasePage* page = m_heaps[i]->findPageFromAddress(address)) 1138 if (BasePage* page = m_arenas[i]->findPageFromAddress(address))
1172 return page; 1139 return page;
1173 } 1140 }
1174 return nullptr; 1141 return nullptr;
1175 } 1142 }
1176 #endif 1143 #endif
1177 1144
1178 size_t ThreadState::objectPayloadSizeForTesting() 1145 size_t ThreadState::objectPayloadSizeForTesting()
1179 { 1146 {
1180 size_t objectPayloadSize = 0; 1147 size_t objectPayloadSize = 0;
1181 for (int i = 0; i < BlinkGC::NumberOfHeaps; ++i) 1148 for (int i = 0; i < BlinkGC::NumberOfArenas; ++i)
1182 objectPayloadSize += m_heaps[i]->objectPayloadSizeForTesting(); 1149 objectPayloadSize += m_arenas[i]->objectPayloadSizeForTesting();
1183 return objectPayloadSize; 1150 return objectPayloadSize;
1184 } 1151 }
1185 1152
1186 bool ThreadState::stopThreads()
1187 {
1188 return s_safePointBarrier->parkOthers();
1189 }
1190
1191 void ThreadState::resumeThreads()
1192 {
1193 s_safePointBarrier->resumeOthers();
1194 }
1195
1196 void ThreadState::safePoint(BlinkGC::StackState stackState) 1153 void ThreadState::safePoint(BlinkGC::StackState stackState)
1197 { 1154 {
1198 ASSERT(checkThread()); 1155 ASSERT(checkThread());
1199 Heap::reportMemoryUsageForTracing(); 1156 Heap::reportMemoryUsageForTracing();
1200 1157
1201 runScheduledGC(stackState); 1158 runScheduledGC(stackState);
1202 ASSERT(!m_atSafePoint); 1159 ASSERT(!m_atSafePoint);
1203 m_stackState = stackState; 1160 m_stackState = stackState;
1204 m_atSafePoint = true; 1161 m_atSafePoint = true;
1205 s_safePointBarrier->checkAndPark(this); 1162 m_heap->checkAndPark(this, nullptr);
1206 m_atSafePoint = false; 1163 m_atSafePoint = false;
1207 m_stackState = BlinkGC::HeapPointersOnStack; 1164 m_stackState = BlinkGC::HeapPointersOnStack;
1208 preSweep(); 1165 preSweep();
1209 } 1166 }
1210 1167
1211 #ifdef ADDRESS_SANITIZER 1168 #ifdef ADDRESS_SANITIZER
1212 // When we are running under AddressSanitizer with detect_stack_use_after_return =1 1169 // When we are running under AddressSanitizer with detect_stack_use_after_return =1
1213 // then stack marker obtained from SafePointScope will point into a fake stack. 1170 // then stack marker obtained from SafePointScope will point into a fake stack.
1214 // Detect this case by checking if it falls in between current stack frame 1171 // Detect this case by checking if it falls in between current stack frame
1215 // and stack start and use an arbitrary high enough value for it. 1172 // and stack start and use an arbitrary high enough value for it.
(...skipping 23 matching lines...) Expand all
1239 #ifdef ADDRESS_SANITIZER 1196 #ifdef ADDRESS_SANITIZER
1240 if (stackState == BlinkGC::HeapPointersOnStack) 1197 if (stackState == BlinkGC::HeapPointersOnStack)
1241 scopeMarker = adjustScopeMarkerForAdressSanitizer(scopeMarker); 1198 scopeMarker = adjustScopeMarkerForAdressSanitizer(scopeMarker);
1242 #endif 1199 #endif
1243 ASSERT(stackState == BlinkGC::NoHeapPointersOnStack || scopeMarker); 1200 ASSERT(stackState == BlinkGC::NoHeapPointersOnStack || scopeMarker);
1244 runScheduledGC(stackState); 1201 runScheduledGC(stackState);
1245 ASSERT(!m_atSafePoint); 1202 ASSERT(!m_atSafePoint);
1246 m_atSafePoint = true; 1203 m_atSafePoint = true;
1247 m_stackState = stackState; 1204 m_stackState = stackState;
1248 m_safePointScopeMarker = scopeMarker; 1205 m_safePointScopeMarker = scopeMarker;
1249 s_safePointBarrier->enterSafePoint(this); 1206 m_heap->enterSafePoint(this);
1250 } 1207 }
1251 1208
1252 void ThreadState::leaveSafePoint(SafePointAwareMutexLocker* locker) 1209 void ThreadState::leaveSafePoint(SafePointAwareMutexLocker* locker)
1253 { 1210 {
1254 ASSERT(checkThread()); 1211 ASSERT(checkThread());
1255 ASSERT(m_atSafePoint); 1212 ASSERT(m_atSafePoint);
1256 s_safePointBarrier->leaveSafePoint(this, locker); 1213 m_heap->leaveSafePoint(this, locker);
1257 m_atSafePoint = false; 1214 m_atSafePoint = false;
1258 m_stackState = BlinkGC::HeapPointersOnStack; 1215 m_stackState = BlinkGC::HeapPointersOnStack;
1259 clearSafePointScopeMarker(); 1216 clearSafePointScopeMarker();
1260 preSweep(); 1217 preSweep();
1261 } 1218 }
1262 1219
1263 void ThreadState::reportMemoryToV8() 1220 void ThreadState::reportMemoryToV8()
1264 { 1221 {
1265 if (!m_isolate) 1222 if (!m_isolate)
1266 return; 1223 return;
1267 1224
1268 size_t currentHeapSize = m_allocatedObjectSize + m_markedObjectSize; 1225 size_t currentHeapSize = m_allocatedObjectSize + m_markedObjectSize;
1269 int64_t diff = static_cast<int64_t>(currentHeapSize) - static_cast<int64_t>( m_reportedMemoryToV8); 1226 int64_t diff = static_cast<int64_t>(currentHeapSize) - static_cast<int64_t>( m_reportedMemoryToV8);
1270 m_isolate->AdjustAmountOfExternalAllocatedMemory(diff); 1227 m_isolate->AdjustAmountOfExternalAllocatedMemory(diff);
1271 m_reportedMemoryToV8 = currentHeapSize; 1228 m_reportedMemoryToV8 = currentHeapSize;
1272 } 1229 }
1273 1230
1274 void ThreadState::resetHeapCounters() 1231 void ThreadState::resetHeapCounters()
1275 { 1232 {
1276 m_allocatedObjectSize = 0; 1233 m_allocatedObjectSize = 0;
1277 m_markedObjectSize = 0; 1234 m_markedObjectSize = 0;
1278 } 1235 }
1279 1236
1280 void ThreadState::increaseAllocatedObjectSize(size_t delta) 1237 void ThreadState::increaseAllocatedObjectSize(size_t delta)
1281 { 1238 {
1282 m_allocatedObjectSize += delta; 1239 m_allocatedObjectSize += delta;
1283 Heap::increaseAllocatedObjectSize(delta); 1240 m_heap->heapStats().increaseAllocatedObjectSize(delta);
1284 } 1241 }
1285 1242
1286 void ThreadState::decreaseAllocatedObjectSize(size_t delta) 1243 void ThreadState::decreaseAllocatedObjectSize(size_t delta)
1287 { 1244 {
1288 m_allocatedObjectSize -= delta; 1245 m_allocatedObjectSize -= delta;
1289 Heap::decreaseAllocatedObjectSize(delta); 1246 m_heap->heapStats().decreaseAllocatedObjectSize(delta);
1290 } 1247 }
1291 1248
1292 void ThreadState::increaseMarkedObjectSize(size_t delta) 1249 void ThreadState::increaseMarkedObjectSize(size_t delta)
1293 { 1250 {
1294 m_markedObjectSize += delta; 1251 m_markedObjectSize += delta;
1295 Heap::increaseMarkedObjectSize(delta); 1252 m_heap->heapStats().increaseMarkedObjectSize(delta);
1296 } 1253 }
1297 1254
1298 void ThreadState::copyStackUntilSafePointScope() 1255 void ThreadState::copyStackUntilSafePointScope()
1299 { 1256 {
1300 if (!m_safePointScopeMarker || m_stackState == BlinkGC::NoHeapPointersOnStac k) 1257 if (!m_safePointScopeMarker || m_stackState == BlinkGC::NoHeapPointersOnStac k)
1301 return; 1258 return;
1302 1259
1303 Address* to = reinterpret_cast<Address*>(m_safePointScopeMarker); 1260 Address* to = reinterpret_cast<Address*>(m_safePointScopeMarker);
1304 Address* from = reinterpret_cast<Address*>(m_endOfStack); 1261 Address* from = reinterpret_cast<Address*>(m_endOfStack);
1305 RELEASE_ASSERT(from < to); 1262 RELEASE_ASSERT(from < to);
(...skipping 13 matching lines...) Expand all
1319 for (size_t i = 0; i < slotCount; ++i) { 1276 for (size_t i = 0; i < slotCount; ++i) {
1320 m_safePointStackCopy[i] = from[i]; 1277 m_safePointStackCopy[i] = from[i];
1321 } 1278 }
1322 } 1279 }
1323 1280
1324 void ThreadState::addInterruptor(PassOwnPtr<BlinkGCInterruptor> interruptor) 1281 void ThreadState::addInterruptor(PassOwnPtr<BlinkGCInterruptor> interruptor)
1325 { 1282 {
1326 ASSERT(checkThread()); 1283 ASSERT(checkThread());
1327 SafePointScope scope(BlinkGC::HeapPointersOnStack); 1284 SafePointScope scope(BlinkGC::HeapPointersOnStack);
1328 { 1285 {
1329 MutexLocker locker(threadAttachMutex()); 1286 MutexLocker locker(m_heap->threadAttachMutex());
1330 m_interruptors.append(interruptor); 1287 m_interruptors.append(interruptor);
1331 } 1288 }
1332 } 1289 }
1333 1290
1334 void ThreadState::removeInterruptor(BlinkGCInterruptor* interruptor) 1291 void ThreadState::removeInterruptor(BlinkGCInterruptor* interruptor)
1335 { 1292 {
1336 ASSERT(checkThread()); 1293 ASSERT(checkThread());
1337 SafePointScope scope(BlinkGC::HeapPointersOnStack); 1294 SafePointScope scope(BlinkGC::HeapPointersOnStack);
1338 { 1295 {
1339 MutexLocker locker(threadAttachMutex()); 1296 MutexLocker locker(m_heap->threadAttachMutex());
1340 size_t index = m_interruptors.find(interruptor); 1297 size_t index = m_interruptors.find(interruptor);
1341 RELEASE_ASSERT(index != kNotFound); 1298 RELEASE_ASSERT(index != kNotFound);
1342 m_interruptors.remove(index); 1299 m_interruptors.remove(index);
1343 } 1300 }
1344 } 1301 }
1345 1302
1346 #if defined(LEAK_SANITIZER) 1303 #if defined(LEAK_SANITIZER)
1347 void ThreadState::registerStaticPersistentNode(PersistentNode* node) 1304 void ThreadState::registerStaticPersistentNode(PersistentNode* node)
1348 { 1305 {
1349 if (m_disabledStaticPersistentsRegistration) 1306 if (m_disabledStaticPersistentsRegistration)
(...skipping 16 matching lines...) Expand all
1366 m_disabledStaticPersistentsRegistration++; 1323 m_disabledStaticPersistentsRegistration++;
1367 } 1324 }
1368 1325
1369 void ThreadState::leaveStaticReferenceRegistrationDisabledScope() 1326 void ThreadState::leaveStaticReferenceRegistrationDisabledScope()
1370 { 1327 {
1371 ASSERT(m_disabledStaticPersistentsRegistration); 1328 ASSERT(m_disabledStaticPersistentsRegistration);
1372 m_disabledStaticPersistentsRegistration--; 1329 m_disabledStaticPersistentsRegistration--;
1373 } 1330 }
1374 #endif 1331 #endif
1375 1332
1376 ThreadState::AttachedThreadStateSet& ThreadState::attachedThreads()
1377 {
1378 DEFINE_STATIC_LOCAL(AttachedThreadStateSet, threads, ());
1379 return threads;
1380 }
1381
1382 void ThreadState::lockThreadAttachMutex()
1383 {
1384 threadAttachMutex().lock();
1385 }
1386
1387 void ThreadState::unlockThreadAttachMutex()
1388 {
1389 threadAttachMutex().unlock();
1390 }
1391
1392 void ThreadState::invokePreFinalizers() 1333 void ThreadState::invokePreFinalizers()
1393 { 1334 {
1394 ASSERT(checkThread()); 1335 ASSERT(checkThread());
1395 ASSERT(!sweepForbidden()); 1336 ASSERT(!sweepForbidden());
1396 TRACE_EVENT0("blink_gc", "ThreadState::invokePreFinalizers"); 1337 TRACE_EVENT0("blink_gc", "ThreadState::invokePreFinalizers");
1397 1338
1398 double startTime = WTF::currentTimeMS(); 1339 double startTime = WTF::currentTimeMS();
1399 if (!m_orderedPreFinalizers.isEmpty()) { 1340 if (!m_orderedPreFinalizers.isEmpty()) {
1400 SweepForbiddenScope sweepForbidden(this); 1341 SweepForbiddenScope sweepForbidden(this);
1401 ScriptForbiddenIfMainThreadScope scriptForbidden; 1342 ScriptForbiddenIfMainThreadScope scriptForbidden;
(...skipping 17 matching lines...) Expand all
1419 } 1360 }
1420 if (isMainThread()) { 1361 if (isMainThread()) {
1421 double timeForInvokingPreFinalizers = WTF::currentTimeMS() - startTime; 1362 double timeForInvokingPreFinalizers = WTF::currentTimeMS() - startTime;
1422 DEFINE_STATIC_LOCAL(CustomCountHistogram, preFinalizersHistogram, ("Blin kGC.TimeForInvokingPreFinalizers", 1, 10 * 1000, 50)); 1363 DEFINE_STATIC_LOCAL(CustomCountHistogram, preFinalizersHistogram, ("Blin kGC.TimeForInvokingPreFinalizers", 1, 10 * 1000, 50));
1423 preFinalizersHistogram.count(timeForInvokingPreFinalizers); 1364 preFinalizersHistogram.count(timeForInvokingPreFinalizers);
1424 } 1365 }
1425 } 1366 }
1426 1367
1427 void ThreadState::clearHeapAges() 1368 void ThreadState::clearHeapAges()
1428 { 1369 {
1429 memset(m_heapAges, 0, sizeof(size_t) * BlinkGC::NumberOfHeaps); 1370 memset(m_arenaAges, 0, sizeof(size_t) * BlinkGC::NumberOfArenas);
1430 memset(m_likelyToBePromptlyFreed.get(), 0, sizeof(int) * likelyToBePromptlyF reedArraySize); 1371 memset(m_likelyToBePromptlyFreed.get(), 0, sizeof(int) * likelyToBePromptlyF reedArraySize);
1431 m_currentHeapAges = 0; 1372 m_currentHeapAges = 0;
1432 } 1373 }
1433 1374
1434 int ThreadState::heapIndexOfVectorHeapLeastRecentlyExpanded(int beginHeapIndex, int endHeapIndex) 1375 int ThreadState::arenaIndexOfVectorHeapLeastRecentlyExpanded(int beginArenaIndex , int endArenaIndex)
1435 { 1376 {
1436 size_t minHeapAge = m_heapAges[beginHeapIndex]; 1377 size_t minHeapAge = m_arenaAges[beginArenaIndex];
1437 int heapIndexWithMinHeapAge = beginHeapIndex; 1378 int arenaIndexWithMinHeapAge = beginArenaIndex;
1438 for (int heapIndex = beginHeapIndex + 1; heapIndex <= endHeapIndex; heapInde x++) { 1379 for (int arenaIndex = beginArenaIndex + 1; arenaIndex <= endArenaIndex; aren aIndex++) {
1439 if (m_heapAges[heapIndex] < minHeapAge) { 1380 if (m_arenaAges[arenaIndex] < minHeapAge) {
1440 minHeapAge = m_heapAges[heapIndex]; 1381 minHeapAge = m_arenaAges[arenaIndex];
1441 heapIndexWithMinHeapAge = heapIndex; 1382 arenaIndexWithMinHeapAge = arenaIndex;
1442 } 1383 }
1443 } 1384 }
1444 ASSERT(isVectorHeapIndex(heapIndexWithMinHeapAge)); 1385 ASSERT(isVectorArenaIndex(arenaIndexWithMinHeapAge));
1445 return heapIndexWithMinHeapAge; 1386 return arenaIndexWithMinHeapAge;
1446 } 1387 }
1447 1388
1448 BaseHeap* ThreadState::expandedVectorBackingHeap(size_t gcInfoIndex) 1389 BaseArena* ThreadState::expandedVectorBackingArena(size_t gcInfoIndex)
1449 { 1390 {
1450 ASSERT(checkThread()); 1391 ASSERT(checkThread());
1451 size_t entryIndex = gcInfoIndex & likelyToBePromptlyFreedArrayMask; 1392 size_t entryIndex = gcInfoIndex & likelyToBePromptlyFreedArrayMask;
1452 --m_likelyToBePromptlyFreed[entryIndex]; 1393 --m_likelyToBePromptlyFreed[entryIndex];
1453 int heapIndex = m_vectorBackingHeapIndex; 1394 int arenaIndex = m_vectorBackingArenaIndex;
1454 m_heapAges[heapIndex] = ++m_currentHeapAges; 1395 m_arenaAges[arenaIndex] = ++m_currentHeapAges;
1455 m_vectorBackingHeapIndex = heapIndexOfVectorHeapLeastRecentlyExpanded(BlinkG C::Vector1HeapIndex, BlinkGC::Vector4HeapIndex); 1396 m_vectorBackingArenaIndex = arenaIndexOfVectorHeapLeastRecentlyExpanded(Blin kGC::Vector1ArenaIndex, BlinkGC::Vector4ArenaIndex);
1456 return m_heaps[heapIndex]; 1397 return m_arenas[arenaIndex];
1457 } 1398 }
1458 1399
1459 void ThreadState::allocationPointAdjusted(int heapIndex) 1400 void ThreadState::allocationPointAdjusted(int arenaIndex)
1460 { 1401 {
1461 m_heapAges[heapIndex] = ++m_currentHeapAges; 1402 m_arenaAges[arenaIndex] = ++m_currentHeapAges;
1462 if (m_vectorBackingHeapIndex == heapIndex) 1403 if (m_vectorBackingArenaIndex == arenaIndex)
1463 m_vectorBackingHeapIndex = heapIndexOfVectorHeapLeastRecentlyExpanded(Bl inkGC::Vector1HeapIndex, BlinkGC::Vector4HeapIndex); 1404 m_vectorBackingArenaIndex = arenaIndexOfVectorHeapLeastRecentlyExpanded( BlinkGC::Vector1ArenaIndex, BlinkGC::Vector4ArenaIndex);
1464 } 1405 }
1465 1406
1466 void ThreadState::promptlyFreed(size_t gcInfoIndex) 1407 void ThreadState::promptlyFreed(size_t gcInfoIndex)
1467 { 1408 {
1468 ASSERT(checkThread()); 1409 ASSERT(checkThread());
1469 size_t entryIndex = gcInfoIndex & likelyToBePromptlyFreedArrayMask; 1410 size_t entryIndex = gcInfoIndex & likelyToBePromptlyFreedArrayMask;
1470 // See the comment in vectorBackingHeap() for why this is +3. 1411 // See the comment in vectorBackingArena() for why this is +3.
1471 m_likelyToBePromptlyFreed[entryIndex] += 3; 1412 m_likelyToBePromptlyFreed[entryIndex] += 3;
1472 } 1413 }
1473 1414
1474 void ThreadState::takeSnapshot(SnapshotType type) 1415 void ThreadState::takeSnapshot(SnapshotType type)
1475 { 1416 {
1476 ASSERT(isInGC()); 1417 ASSERT(isInGC());
1477 1418
1478 // 0 is used as index for freelist entries. Objects are indexed 1 to 1419 // 0 is used as index for freelist entries. Objects are indexed 1 to
1479 // gcInfoIndex. 1420 // gcInfoIndex.
1480 GCSnapshotInfo info(GCInfoTable::gcInfoIndex() + 1); 1421 GCSnapshotInfo info(GCInfoTable::gcInfoIndex() + 1);
1481 String threadDumpName = String::format("blink_gc/thread_%lu", static_cast<un signed long>(m_thread)); 1422 String threadDumpName = String::format("blink_gc/thread_%lu", static_cast<un signed long>(m_thread));
1482 const String heapsDumpName = threadDumpName + "/heaps"; 1423 const String heapsDumpName = threadDumpName + "/heaps";
1483 const String classesDumpName = threadDumpName + "/classes"; 1424 const String classesDumpName = threadDumpName + "/classes";
1484 1425
1485 int numberOfHeapsReported = 0; 1426 int numberOfHeapsReported = 0;
1486 #define SNAPSHOT_HEAP(HeapType) \ 1427 #define SNAPSHOT_HEAP(HeapType) \
1487 { \ 1428 { \
1488 numberOfHeapsReported++; \ 1429 numberOfHeapsReported++; \
1489 switch (type) { \ 1430 switch (type) { \
1490 case SnapshotType::HeapSnapshot: \ 1431 case SnapshotType::HeapSnapshot: \
1491 m_heaps[BlinkGC::HeapType##HeapIndex]->takeSnapshot(heapsDumpName + "/" #HeapType, info); \ 1432 m_arenas[BlinkGC::HeapType##ArenaIndex]->takeSnapshot(heapsDumpName + "/" #HeapType, info); \
1492 break; \ 1433 break; \
1493 case SnapshotType::FreelistSnapshot: \ 1434 case SnapshotType::FreelistSnapshot: \
1494 m_heaps[BlinkGC::HeapType##HeapIndex]->takeFreelistSnapshot(heapsDum pName + "/" #HeapType); \ 1435 m_arenas[BlinkGC::HeapType##ArenaIndex]->takeFreelistSnapshot(heapsD umpName + "/" #HeapType); \
1495 break; \ 1436 break; \
1496 default: \ 1437 default: \
1497 ASSERT_NOT_REACHED(); \ 1438 ASSERT_NOT_REACHED(); \
1498 } \ 1439 } \
1499 } 1440 }
1500 1441
1501 SNAPSHOT_HEAP(NormalPage1); 1442 SNAPSHOT_HEAP(NormalPage1);
1502 SNAPSHOT_HEAP(NormalPage2); 1443 SNAPSHOT_HEAP(NormalPage2);
1503 SNAPSHOT_HEAP(NormalPage3); 1444 SNAPSHOT_HEAP(NormalPage3);
1504 SNAPSHOT_HEAP(NormalPage4); 1445 SNAPSHOT_HEAP(NormalPage4);
1505 SNAPSHOT_HEAP(EagerSweep); 1446 SNAPSHOT_HEAP(EagerSweep);
1506 SNAPSHOT_HEAP(Vector1); 1447 SNAPSHOT_HEAP(Vector1);
1507 SNAPSHOT_HEAP(Vector2); 1448 SNAPSHOT_HEAP(Vector2);
1508 SNAPSHOT_HEAP(Vector3); 1449 SNAPSHOT_HEAP(Vector3);
1509 SNAPSHOT_HEAP(Vector4); 1450 SNAPSHOT_HEAP(Vector4);
1510 SNAPSHOT_HEAP(InlineVector); 1451 SNAPSHOT_HEAP(InlineVector);
1511 SNAPSHOT_HEAP(HashTable); 1452 SNAPSHOT_HEAP(HashTable);
1512 SNAPSHOT_HEAP(LargeObject); 1453 SNAPSHOT_HEAP(LargeObject);
1513 FOR_EACH_TYPED_HEAP(SNAPSHOT_HEAP); 1454 FOR_EACH_TYPED_HEAP(SNAPSHOT_HEAP);
1514 1455
1515 ASSERT(numberOfHeapsReported == BlinkGC::NumberOfHeaps); 1456 ASSERT(numberOfHeapsReported == BlinkGC::NumberOfArenas);
1516 1457
1517 #undef SNAPSHOT_HEAP 1458 #undef SNAPSHOT_HEAP
1518 1459
1519 if (type == SnapshotType::FreelistSnapshot) 1460 if (type == SnapshotType::FreelistSnapshot)
1520 return; 1461 return;
1521 1462
1522 size_t totalLiveCount = 0; 1463 size_t totalLiveCount = 0;
1523 size_t totalDeadCount = 0; 1464 size_t totalDeadCount = 0;
1524 size_t totalLiveSize = 0; 1465 size_t totalLiveSize = 0;
1525 size_t totalDeadSize = 0; 1466 size_t totalDeadSize = 0;
1526 for (size_t gcInfoIndex = 1; gcInfoIndex <= GCInfoTable::gcInfoIndex(); ++gc InfoIndex) { 1467 for (size_t gcInfoIndex = 1; gcInfoIndex <= GCInfoTable::gcInfoIndex(); ++gc InfoIndex) {
1527 totalLiveCount += info.liveCount[gcInfoIndex]; 1468 totalLiveCount += info.liveCount[gcInfoIndex];
1528 totalDeadCount += info.deadCount[gcInfoIndex]; 1469 totalDeadCount += info.deadCount[gcInfoIndex];
1529 totalLiveSize += info.liveSize[gcInfoIndex]; 1470 totalLiveSize += info.liveSize[gcInfoIndex];
1530 totalDeadSize += info.deadSize[gcInfoIndex]; 1471 totalDeadSize += info.deadSize[gcInfoIndex];
1531 } 1472 }
1532 1473
1533 WebMemoryAllocatorDump* threadDump = BlinkGCMemoryDumpProvider::instance()-> createMemoryAllocatorDumpForCurrentGC(threadDumpName); 1474 WebMemoryAllocatorDump* threadDump = BlinkGCMemoryDumpProvider::instance()-> createMemoryAllocatorDumpForCurrentGC(threadDumpName);
1534 threadDump->addScalar("live_count", "objects", totalLiveCount); 1475 threadDump->addScalar("live_count", "objects", totalLiveCount);
1535 threadDump->addScalar("dead_count", "objects", totalDeadCount); 1476 threadDump->addScalar("dead_count", "objects", totalDeadCount);
1536 threadDump->addScalar("live_size", "bytes", totalLiveSize); 1477 threadDump->addScalar("live_size", "bytes", totalLiveSize);
1537 threadDump->addScalar("dead_size", "bytes", totalDeadSize); 1478 threadDump->addScalar("dead_size", "bytes", totalDeadSize);
1538 1479
1539 WebMemoryAllocatorDump* heapsDump = BlinkGCMemoryDumpProvider::instance()->c reateMemoryAllocatorDumpForCurrentGC(heapsDumpName); 1480 WebMemoryAllocatorDump* heapsDump = BlinkGCMemoryDumpProvider::instance()->c reateMemoryAllocatorDumpForCurrentGC(heapsDumpName);
1540 WebMemoryAllocatorDump* classesDump = BlinkGCMemoryDumpProvider::instance()- >createMemoryAllocatorDumpForCurrentGC(classesDumpName); 1481 WebMemoryAllocatorDump* classesDump = BlinkGCMemoryDumpProvider::instance()- >createMemoryAllocatorDumpForCurrentGC(classesDumpName);
1541 BlinkGCMemoryDumpProvider::instance()->currentProcessMemoryDump()->addOwners hipEdge(classesDump->guid(), heapsDump->guid()); 1482 BlinkGCMemoryDumpProvider::instance()->currentProcessMemoryDump()->addOwners hipEdge(classesDump->guid(), heapsDump->guid());
1542 } 1483 }
1543 1484
1544 } // namespace blink 1485 } // namespace blink
OLDNEW

Powered by Google App Engine
This is Rietveld 408576698