OLD | NEW |
1 /* | 1 /* |
2 * Copyright (C) 2013 Google Inc. All rights reserved. | 2 * Copyright (C) 2013 Google Inc. All rights reserved. |
3 * | 3 * |
4 * Redistribution and use in source and binary forms, with or without | 4 * Redistribution and use in source and binary forms, with or without |
5 * modification, are permitted provided that the following conditions are | 5 * modification, are permitted provided that the following conditions are |
6 * met: | 6 * met: |
7 * | 7 * |
8 * * Redistributions of source code must retain the above copyright | 8 * * Redistributions of source code must retain the above copyright |
9 * notice, this list of conditions and the following disclaimer. | 9 * notice, this list of conditions and the following disclaimer. |
10 * * Redistributions in binary form must reproduce the above | 10 * * Redistributions in binary form must reproduce the above |
(...skipping 159 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
170 m_invalidateDeadObjectsInWrappersMarkingDeque(nullptr), | 170 m_invalidateDeadObjectsInWrappersMarkingDeque(nullptr), |
171 #if defined(ADDRESS_SANITIZER) | 171 #if defined(ADDRESS_SANITIZER) |
172 m_asanFakeStack(__asan_get_current_fake_stack()), | 172 m_asanFakeStack(__asan_get_current_fake_stack()), |
173 #endif | 173 #endif |
174 #if defined(LEAK_SANITIZER) | 174 #if defined(LEAK_SANITIZER) |
175 m_disabledStaticPersistentsRegistration(0), | 175 m_disabledStaticPersistentsRegistration(0), |
176 #endif | 176 #endif |
177 m_allocatedObjectSize(0), | 177 m_allocatedObjectSize(0), |
178 m_markedObjectSize(0), | 178 m_markedObjectSize(0), |
179 m_reportedMemoryToV8(0) { | 179 m_reportedMemoryToV8(0) { |
180 ASSERT(checkThread()); | 180 DCHECK(checkThread()); |
181 ASSERT(!**s_threadSpecific); | 181 DCHECK(!**s_threadSpecific); |
182 **s_threadSpecific = this; | 182 **s_threadSpecific = this; |
183 | 183 |
184 switch (m_threadHeapMode) { | 184 switch (m_threadHeapMode) { |
185 case BlinkGC::MainThreadHeapMode: | 185 case BlinkGC::MainThreadHeapMode: |
186 if (isMainThread()) { | 186 if (isMainThread()) { |
187 s_mainThreadStackStart = | 187 s_mainThreadStackStart = |
188 reinterpret_cast<uintptr_t>(m_startOfStack) - sizeof(void*); | 188 reinterpret_cast<uintptr_t>(m_startOfStack) - sizeof(void*); |
189 size_t underestimatedStackSize = | 189 size_t underestimatedStackSize = |
190 StackFrameDepth::getUnderestimatedStackSize(); | 190 StackFrameDepth::getUnderestimatedStackSize(); |
191 if (underestimatedStackSize > sizeof(void*)) | 191 if (underestimatedStackSize > sizeof(void*)) |
192 s_mainThreadUnderestimatedStackSize = | 192 s_mainThreadUnderestimatedStackSize = |
193 underestimatedStackSize - sizeof(void*); | 193 underestimatedStackSize - sizeof(void*); |
194 m_heap = new ThreadHeap(); | 194 m_heap = new ThreadHeap(); |
195 } else { | 195 } else { |
196 m_heap = &ThreadState::mainThreadState()->heap(); | 196 m_heap = &ThreadState::mainThreadState()->heap(); |
197 } | 197 } |
198 break; | 198 break; |
199 case BlinkGC::PerThreadHeapMode: | 199 case BlinkGC::PerThreadHeapMode: |
200 m_heap = new ThreadHeap(); | 200 m_heap = new ThreadHeap(); |
201 break; | 201 break; |
202 } | 202 } |
203 ASSERT(m_heap); | 203 DCHECK(m_heap); |
204 m_heap->attach(this); | 204 m_heap->attach(this); |
205 | 205 |
206 for (int arenaIndex = 0; arenaIndex < BlinkGC::LargeObjectArenaIndex; | 206 for (int arenaIndex = 0; arenaIndex < BlinkGC::LargeObjectArenaIndex; |
207 arenaIndex++) | 207 arenaIndex++) |
208 m_arenas[arenaIndex] = new NormalPageArena(this, arenaIndex); | 208 m_arenas[arenaIndex] = new NormalPageArena(this, arenaIndex); |
209 m_arenas[BlinkGC::LargeObjectArenaIndex] = | 209 m_arenas[BlinkGC::LargeObjectArenaIndex] = |
210 new LargeObjectArena(this, BlinkGC::LargeObjectArenaIndex); | 210 new LargeObjectArena(this, BlinkGC::LargeObjectArenaIndex); |
211 | 211 |
212 m_likelyToBePromptlyFreed = | 212 m_likelyToBePromptlyFreed = |
213 wrapArrayUnique(new int[likelyToBePromptlyFreedArraySize]); | 213 wrapArrayUnique(new int[likelyToBePromptlyFreedArraySize]); |
214 clearArenaAges(); | 214 clearArenaAges(); |
215 } | 215 } |
216 | 216 |
217 ThreadState::~ThreadState() { | 217 ThreadState::~ThreadState() { |
218 ASSERT(checkThread()); | 218 DCHECK(checkThread()); |
219 for (int i = 0; i < BlinkGC::NumberOfArenas; ++i) | 219 for (int i = 0; i < BlinkGC::NumberOfArenas; ++i) |
220 delete m_arenas[i]; | 220 delete m_arenas[i]; |
221 | 221 |
222 **s_threadSpecific = nullptr; | 222 **s_threadSpecific = nullptr; |
223 if (isMainThread()) { | 223 if (isMainThread()) { |
224 s_mainThreadStackStart = 0; | 224 s_mainThreadStackStart = 0; |
225 s_mainThreadUnderestimatedStackSize = 0; | 225 s_mainThreadUnderestimatedStackSize = 0; |
226 } | 226 } |
227 } | 227 } |
228 | 228 |
229 #if OS(WIN) && COMPILER(MSVC) | 229 #if OS(WIN) && COMPILER(MSVC) |
230 size_t ThreadState::threadStackSize() { | 230 size_t ThreadState::threadStackSize() { |
231 if (m_threadStackSize) | 231 if (m_threadStackSize) |
232 return m_threadStackSize; | 232 return m_threadStackSize; |
233 | 233 |
234 // Notice that we cannot use the TIB's StackLimit for the stack end, as it | 234 // Notice that we cannot use the TIB's StackLimit for the stack end, as it |
235 // tracks the end of the committed range. We're after the end of the reserved | 235 // tracks the end of the committed range. We're after the end of the reserved |
236 // stack area (most of which will be uncommitted, most times.) | 236 // stack area (most of which will be uncommitted, most times.) |
237 MEMORY_BASIC_INFORMATION stackInfo; | 237 MEMORY_BASIC_INFORMATION stackInfo; |
238 memset(&stackInfo, 0, sizeof(MEMORY_BASIC_INFORMATION)); | 238 memset(&stackInfo, 0, sizeof(MEMORY_BASIC_INFORMATION)); |
239 size_t resultSize = | 239 size_t resultSize = |
240 VirtualQuery(&stackInfo, &stackInfo, sizeof(MEMORY_BASIC_INFORMATION)); | 240 VirtualQuery(&stackInfo, &stackInfo, sizeof(MEMORY_BASIC_INFORMATION)); |
241 DCHECK_GE(resultSize, sizeof(MEMORY_BASIC_INFORMATION)); | 241 DCHECK_GE(resultSize, sizeof(MEMORY_BASIC_INFORMATION)); |
242 Address stackEnd = reinterpret_cast<Address>(stackInfo.AllocationBase); | 242 Address stackEnd = reinterpret_cast<Address>(stackInfo.AllocationBase); |
243 | 243 |
244 Address stackStart = | 244 Address stackStart = |
245 reinterpret_cast<Address>(StackFrameDepth::getStackStart()); | 245 reinterpret_cast<Address>(StackFrameDepth::getStackStart()); |
246 RELEASE_ASSERT(stackStart && stackStart > stackEnd); | 246 CHECK(stackStart && stackStart > stackEnd); |
247 m_threadStackSize = static_cast<size_t>(stackStart - stackEnd); | 247 m_threadStackSize = static_cast<size_t>(stackStart - stackEnd); |
248 // When the third last page of the reserved stack is accessed as a | 248 // When the third last page of the reserved stack is accessed as a |
249 // guard page, the second last page will be committed (along with removing | 249 // guard page, the second last page will be committed (along with removing |
250 // the guard bit on the third last) _and_ a stack overflow exception | 250 // the guard bit on the third last) _and_ a stack overflow exception |
251 // is raised. | 251 // is raised. |
252 // | 252 // |
253 // We have zero interest in running into stack overflow exceptions while | 253 // We have zero interest in running into stack overflow exceptions while |
254 // marking objects, so simply consider the last three pages + one above | 254 // marking objects, so simply consider the last three pages + one above |
255 // as off-limits and adjust the reported stack size accordingly. | 255 // as off-limits and adjust the reported stack size accordingly. |
256 // | 256 // |
257 // http://blogs.msdn.com/b/satyem/archive/2012/08/13/thread-s-stack-memory-man
agement.aspx | 257 // http://blogs.msdn.com/b/satyem/archive/2012/08/13/thread-s-stack-memory-man
agement.aspx |
258 // explains the details. | 258 // explains the details. |
259 RELEASE_ASSERT(m_threadStackSize > 4 * 0x1000); | 259 CHECK(m_threadStackSize > 4 * 0x1000); |
260 m_threadStackSize -= 4 * 0x1000; | 260 m_threadStackSize -= 4 * 0x1000; |
261 return m_threadStackSize; | 261 return m_threadStackSize; |
262 } | 262 } |
263 #endif | 263 #endif |
264 | 264 |
265 void ThreadState::attachMainThread() { | 265 void ThreadState::attachMainThread() { |
266 RELEASE_ASSERT(!ProcessHeap::s_shutdownComplete); | 266 CHECK(!ProcessHeap::s_shutdownComplete); |
267 s_threadSpecific = new WTF::ThreadSpecific<ThreadState*>(); | 267 s_threadSpecific = new WTF::ThreadSpecific<ThreadState*>(); |
268 new (s_mainThreadStateStorage) ThreadState(BlinkGC::MainThreadHeapMode); | 268 new (s_mainThreadStateStorage) ThreadState(BlinkGC::MainThreadHeapMode); |
269 } | 269 } |
270 | 270 |
271 void ThreadState::attachCurrentThread(BlinkGC::ThreadHeapMode threadHeapMode) { | 271 void ThreadState::attachCurrentThread(BlinkGC::ThreadHeapMode threadHeapMode) { |
272 RELEASE_ASSERT(!ProcessHeap::s_shutdownComplete); | 272 CHECK(!ProcessHeap::s_shutdownComplete); |
273 new ThreadState(threadHeapMode); | 273 new ThreadState(threadHeapMode); |
274 } | 274 } |
275 | 275 |
276 void ThreadState::cleanupPages() { | 276 void ThreadState::cleanupPages() { |
277 ASSERT(checkThread()); | 277 DCHECK(checkThread()); |
278 for (int i = 0; i < BlinkGC::NumberOfArenas; ++i) | 278 for (int i = 0; i < BlinkGC::NumberOfArenas; ++i) |
279 m_arenas[i]->cleanupPages(); | 279 m_arenas[i]->cleanupPages(); |
280 } | 280 } |
281 | 281 |
282 void ThreadState::runTerminationGC() { | 282 void ThreadState::runTerminationGC() { |
283 if (isMainThread()) { | 283 if (isMainThread()) { |
284 cleanupPages(); | 284 cleanupPages(); |
285 return; | 285 return; |
286 } | 286 } |
287 ASSERT(checkThread()); | 287 DCHECK(checkThread()); |
288 | 288 |
289 // Finish sweeping. | 289 // Finish sweeping. |
290 completeSweep(); | 290 completeSweep(); |
291 | 291 |
292 releaseStaticPersistentNodes(); | 292 releaseStaticPersistentNodes(); |
293 | 293 |
294 // From here on ignore all conservatively discovered | 294 // From here on ignore all conservatively discovered |
295 // pointers into the heap owned by this thread. | 295 // pointers into the heap owned by this thread. |
296 m_isTerminating = true; | 296 m_isTerminating = true; |
297 | 297 |
298 // Set the terminate flag on all heap pages of this thread. This is used to | 298 // Set the terminate flag on all heap pages of this thread. This is used to |
299 // ensure we don't trace pages on other threads that are not part of the | 299 // ensure we don't trace pages on other threads that are not part of the |
300 // thread local GC. | 300 // thread local GC. |
301 prepareForThreadStateTermination(); | 301 prepareForThreadStateTermination(); |
302 | 302 |
303 ProcessHeap::crossThreadPersistentRegion().prepareForThreadStateTermination( | 303 ProcessHeap::crossThreadPersistentRegion().prepareForThreadStateTermination( |
304 this); | 304 this); |
305 | 305 |
306 // Do thread local GC's as long as the count of thread local Persistents | 306 // Do thread local GC's as long as the count of thread local Persistents |
307 // changes and is above zero. | 307 // changes and is above zero. |
308 int oldCount = -1; | 308 int oldCount = -1; |
309 int currentCount = getPersistentRegion()->numberOfPersistents(); | 309 int currentCount = getPersistentRegion()->numberOfPersistents(); |
310 ASSERT(currentCount >= 0); | 310 DCHECK_GE(currentCount, 0); |
311 while (currentCount != oldCount) { | 311 while (currentCount != oldCount) { |
312 collectGarbageForTerminatingThread(); | 312 collectGarbageForTerminatingThread(); |
313 // Release the thread-local static persistents that were | 313 // Release the thread-local static persistents that were |
314 // instantiated while running the termination GC. | 314 // instantiated while running the termination GC. |
315 releaseStaticPersistentNodes(); | 315 releaseStaticPersistentNodes(); |
316 oldCount = currentCount; | 316 oldCount = currentCount; |
317 currentCount = getPersistentRegion()->numberOfPersistents(); | 317 currentCount = getPersistentRegion()->numberOfPersistents(); |
318 } | 318 } |
319 // We should not have any persistents left when getting to this point, | 319 // We should not have any persistents left when getting to this point, |
320 // if we have it is probably a bug so adding a debug ASSERT to catch this. | 320 // if we have it is probably a bug so adding a debug DCHECK to catch this. |
321 ASSERT(!currentCount); | 321 DCHECK(!currentCount); |
322 // All of pre-finalizers should be consumed. | 322 // All of pre-finalizers should be consumed. |
323 ASSERT(m_orderedPreFinalizers.isEmpty()); | 323 DCHECK(m_orderedPreFinalizers.isEmpty()); |
324 RELEASE_ASSERT(gcState() == NoGCScheduled); | 324 CHECK(gcState() == NoGCScheduled); |
325 | 325 |
326 // Add pages to the orphaned page pool to ensure any global GCs from this | 326 // Add pages to the orphaned page pool to ensure any global GCs from this |
327 // point on will not trace objects on this thread's arenas. | 327 // point on will not trace objects on this thread's arenas. |
328 cleanupPages(); | 328 cleanupPages(); |
329 } | 329 } |
330 | 330 |
331 void ThreadState::cleanupMainThread() { | 331 void ThreadState::cleanupMainThread() { |
332 ASSERT(isMainThread()); | 332 DCHECK(isMainThread()); |
333 | 333 |
334 releaseStaticPersistentNodes(); | 334 releaseStaticPersistentNodes(); |
335 | 335 |
336 // Finish sweeping before shutting down V8. Otherwise, some destructor | 336 // Finish sweeping before shutting down V8. Otherwise, some destructor |
337 // may access V8 and cause crashes. | 337 // may access V8 and cause crashes. |
338 completeSweep(); | 338 completeSweep(); |
339 | 339 |
340 // It is unsafe to trigger GCs after this point because some | 340 // It is unsafe to trigger GCs after this point because some |
341 // destructor may access already-detached V8 and cause crashes. | 341 // destructor may access already-detached V8 and cause crashes. |
342 // Also it is useless. So we forbid GCs. | 342 // Also it is useless. So we forbid GCs. |
343 enterGCForbiddenScope(); | 343 enterGCForbiddenScope(); |
344 } | 344 } |
345 | 345 |
346 void ThreadState::detachMainThread() { | 346 void ThreadState::detachMainThread() { |
347 // Enter a safe point before trying to acquire threadAttachMutex | 347 // Enter a safe point before trying to acquire threadAttachMutex |
348 // to avoid dead lock if another thread is preparing for GC, has acquired | 348 // to avoid dead lock if another thread is preparing for GC, has acquired |
349 // threadAttachMutex and waiting for other threads to pause or reach a | 349 // threadAttachMutex and waiting for other threads to pause or reach a |
350 // safepoint. | 350 // safepoint. |
351 ThreadState* state = mainThreadState(); | 351 ThreadState* state = mainThreadState(); |
352 ASSERT(!state->isSweepingInProgress()); | 352 DCHECK(!state->isSweepingInProgress()); |
353 | 353 |
354 state->heap().detach(state); | 354 state->heap().detach(state); |
355 state->~ThreadState(); | 355 state->~ThreadState(); |
356 } | 356 } |
357 | 357 |
358 void ThreadState::detachCurrentThread() { | 358 void ThreadState::detachCurrentThread() { |
359 ThreadState* state = current(); | 359 ThreadState* state = current(); |
360 state->heap().detach(state); | 360 state->heap().detach(state); |
361 RELEASE_ASSERT(state->gcState() == ThreadState::NoGCScheduled); | 361 CHECK(state->gcState() == ThreadState::NoGCScheduled); |
362 delete state; | 362 delete state; |
363 } | 363 } |
364 | 364 |
365 NO_SANITIZE_ADDRESS | 365 NO_SANITIZE_ADDRESS |
366 void ThreadState::visitAsanFakeStackForPointer(Visitor* visitor, Address ptr) { | 366 void ThreadState::visitAsanFakeStackForPointer(Visitor* visitor, Address ptr) { |
367 #if defined(ADDRESS_SANITIZER) | 367 #if defined(ADDRESS_SANITIZER) |
368 Address* start = reinterpret_cast<Address*>(m_startOfStack); | 368 Address* start = reinterpret_cast<Address*>(m_startOfStack); |
369 Address* end = reinterpret_cast<Address*>(m_endOfStack); | 369 Address* end = reinterpret_cast<Address*>(m_endOfStack); |
370 Address* fakeFrameStart = nullptr; | 370 Address* fakeFrameStart = nullptr; |
371 Address* fakeFrameEnd = nullptr; | 371 Address* fakeFrameEnd = nullptr; |
(...skipping 77 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
449 liveSize(Vector<size_t>(numObjectTypes)), | 449 liveSize(Vector<size_t>(numObjectTypes)), |
450 deadSize(Vector<size_t>(numObjectTypes)) {} | 450 deadSize(Vector<size_t>(numObjectTypes)) {} |
451 | 451 |
452 void ThreadState::pushThreadLocalWeakCallback(void* object, | 452 void ThreadState::pushThreadLocalWeakCallback(void* object, |
453 WeakCallback callback) { | 453 WeakCallback callback) { |
454 CallbackStack::Item* slot = m_threadLocalWeakCallbackStack->allocateEntry(); | 454 CallbackStack::Item* slot = m_threadLocalWeakCallbackStack->allocateEntry(); |
455 *slot = CallbackStack::Item(object, callback); | 455 *slot = CallbackStack::Item(object, callback); |
456 } | 456 } |
457 | 457 |
458 bool ThreadState::popAndInvokeThreadLocalWeakCallback(Visitor* visitor) { | 458 bool ThreadState::popAndInvokeThreadLocalWeakCallback(Visitor* visitor) { |
459 ASSERT(checkThread()); | 459 DCHECK(checkThread()); |
460 // For weak processing we should never reach orphaned pages since orphaned | 460 // For weak processing we should never reach orphaned pages since orphaned |
461 // pages are not traced and thus objects on those pages are never be | 461 // pages are not traced and thus objects on those pages are never be |
462 // registered as objects on orphaned pages. We cannot assert this here since | 462 // registered as objects on orphaned pages. We cannot assert this here since |
463 // we might have an off-heap collection. We assert it in | 463 // we might have an off-heap collection. We assert it in |
464 // ThreadHeap::pushThreadLocalWeakCallback. | 464 // ThreadHeap::pushThreadLocalWeakCallback. |
465 if (CallbackStack::Item* item = m_threadLocalWeakCallbackStack->pop()) { | 465 if (CallbackStack::Item* item = m_threadLocalWeakCallbackStack->pop()) { |
466 item->call(visitor); | 466 item->call(visitor); |
467 return true; | 467 return true; |
468 } | 468 } |
469 return false; | 469 return false; |
470 } | 470 } |
471 | 471 |
472 void ThreadState::threadLocalWeakProcessing() { | 472 void ThreadState::threadLocalWeakProcessing() { |
473 ASSERT(checkThread()); | 473 DCHECK(checkThread()); |
474 ASSERT(!sweepForbidden()); | 474 DCHECK(!sweepForbidden()); |
475 TRACE_EVENT0("blink_gc", "ThreadState::threadLocalWeakProcessing"); | 475 TRACE_EVENT0("blink_gc", "ThreadState::threadLocalWeakProcessing"); |
476 double startTime = WTF::currentTimeMS(); | 476 double startTime = WTF::currentTimeMS(); |
477 | 477 |
478 SweepForbiddenScope sweepForbiddenScope(this); | 478 SweepForbiddenScope sweepForbiddenScope(this); |
479 ScriptForbiddenIfMainThreadScope scriptForbiddenScope; | 479 ScriptForbiddenIfMainThreadScope scriptForbiddenScope; |
480 | 480 |
481 // Disallow allocation during weak processing. | 481 // Disallow allocation during weak processing. |
482 // It would be technically safe to allow allocations, but it is unsafe | 482 // It would be technically safe to allow allocations, but it is unsafe |
483 // to mutate an object graph in a way in which a dead object gets | 483 // to mutate an object graph in a way in which a dead object gets |
484 // resurrected or mutate a HashTable (because HashTable's weak processing | 484 // resurrected or mutate a HashTable (because HashTable's weak processing |
(...skipping 129 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
614 | 614 |
615 // If we're consuming too much memory, trigger a conservative GC | 615 // If we're consuming too much memory, trigger a conservative GC |
616 // aggressively. This is a safe guard to avoid OOM. | 616 // aggressively. This is a safe guard to avoid OOM. |
617 bool ThreadState::shouldForceMemoryPressureGC() { | 617 bool ThreadState::shouldForceMemoryPressureGC() { |
618 if (totalMemorySize() < 300 * 1024 * 1024) | 618 if (totalMemorySize() < 300 * 1024 * 1024) |
619 return false; | 619 return false; |
620 return judgeGCThreshold(0, 0, 1.5); | 620 return judgeGCThreshold(0, 0, 1.5); |
621 } | 621 } |
622 | 622 |
623 void ThreadState::scheduleV8FollowupGCIfNeeded(BlinkGC::V8GCType gcType) { | 623 void ThreadState::scheduleV8FollowupGCIfNeeded(BlinkGC::V8GCType gcType) { |
624 ASSERT(checkThread()); | 624 DCHECK(checkThread()); |
625 ThreadHeap::reportMemoryUsageForTracing(); | 625 ThreadHeap::reportMemoryUsageForTracing(); |
626 | 626 |
627 #if PRINT_HEAP_STATS | 627 #if PRINT_HEAP_STATS |
628 dataLogF("ThreadState::scheduleV8FollowupGCIfNeeded (gcType=%s)\n", | 628 dataLogF("ThreadState::scheduleV8FollowupGCIfNeeded (gcType=%s)\n", |
629 gcType == BlinkGC::V8MajorGC ? "MajorGC" : "MinorGC"); | 629 gcType == BlinkGC::V8MajorGC ? "MajorGC" : "MinorGC"); |
630 #endif | 630 #endif |
631 | 631 |
632 if (isGCForbidden()) | 632 if (isGCForbidden()) |
633 return; | 633 return; |
634 | 634 |
635 // This completeSweep() will do nothing in common cases since we've | 635 // This completeSweep() will do nothing in common cases since we've |
636 // called completeSweep() before V8 starts minor/major GCs. | 636 // called completeSweep() before V8 starts minor/major GCs. |
637 completeSweep(); | 637 completeSweep(); |
638 ASSERT(!isSweepingInProgress()); | 638 DCHECK(!isSweepingInProgress()); |
639 ASSERT(!sweepForbidden()); | 639 DCHECK(!sweepForbidden()); |
640 | 640 |
641 if ((gcType == BlinkGC::V8MajorGC && shouldForceMemoryPressureGC()) || | 641 if ((gcType == BlinkGC::V8MajorGC && shouldForceMemoryPressureGC()) || |
642 shouldScheduleV8FollowupGC()) { | 642 shouldScheduleV8FollowupGC()) { |
643 #if PRINT_HEAP_STATS | 643 #if PRINT_HEAP_STATS |
644 dataLogF("Scheduled PreciseGC\n"); | 644 dataLogF("Scheduled PreciseGC\n"); |
645 #endif | 645 #endif |
646 schedulePreciseGC(); | 646 schedulePreciseGC(); |
647 return; | 647 return; |
648 } | 648 } |
649 if (gcType == BlinkGC::V8MajorGC && shouldScheduleIdleGC()) { | 649 if (gcType == BlinkGC::V8MajorGC && shouldScheduleIdleGC()) { |
(...skipping 12 matching lines...) Expand all Loading... |
662 // TODO(haraken): It's a bit too late for a major GC to schedule | 662 // TODO(haraken): It's a bit too late for a major GC to schedule |
663 // completeSweep() here, because gcPrologue for a major GC is called | 663 // completeSweep() here, because gcPrologue for a major GC is called |
664 // not at the point where the major GC started but at the point where | 664 // not at the point where the major GC started but at the point where |
665 // the major GC requests object grouping. | 665 // the major GC requests object grouping. |
666 if (gcType == BlinkGC::V8MajorGC) | 666 if (gcType == BlinkGC::V8MajorGC) |
667 completeSweep(); | 667 completeSweep(); |
668 } | 668 } |
669 | 669 |
670 void ThreadState::schedulePageNavigationGCIfNeeded( | 670 void ThreadState::schedulePageNavigationGCIfNeeded( |
671 float estimatedRemovalRatio) { | 671 float estimatedRemovalRatio) { |
672 ASSERT(checkThread()); | 672 DCHECK(checkThread()); |
673 ThreadHeap::reportMemoryUsageForTracing(); | 673 ThreadHeap::reportMemoryUsageForTracing(); |
674 | 674 |
675 #if PRINT_HEAP_STATS | 675 #if PRINT_HEAP_STATS |
676 dataLogF( | 676 dataLogF( |
677 "ThreadState::schedulePageNavigationGCIfNeeded " | 677 "ThreadState::schedulePageNavigationGCIfNeeded " |
678 "(estimatedRemovalRatio=%.2lf)\n", | 678 "(estimatedRemovalRatio=%.2lf)\n", |
679 estimatedRemovalRatio); | 679 estimatedRemovalRatio); |
680 #endif | 680 #endif |
681 | 681 |
682 if (isGCForbidden()) | 682 if (isGCForbidden()) |
683 return; | 683 return; |
684 | 684 |
685 // Finish on-going lazy sweeping. | 685 // Finish on-going lazy sweeping. |
686 // TODO(haraken): It might not make sense to force completeSweep() for all | 686 // TODO(haraken): It might not make sense to force completeSweep() for all |
687 // page navigations. | 687 // page navigations. |
688 completeSweep(); | 688 completeSweep(); |
689 ASSERT(!isSweepingInProgress()); | 689 DCHECK(!isSweepingInProgress()); |
690 ASSERT(!sweepForbidden()); | 690 DCHECK(!sweepForbidden()); |
691 | 691 |
692 if (shouldForceMemoryPressureGC()) { | 692 if (shouldForceMemoryPressureGC()) { |
693 #if PRINT_HEAP_STATS | 693 #if PRINT_HEAP_STATS |
694 dataLogF("Scheduled MemoryPressureGC\n"); | 694 dataLogF("Scheduled MemoryPressureGC\n"); |
695 #endif | 695 #endif |
696 collectGarbage(BlinkGC::HeapPointersOnStack, BlinkGC::GCWithoutSweep, | 696 collectGarbage(BlinkGC::HeapPointersOnStack, BlinkGC::GCWithoutSweep, |
697 BlinkGC::MemoryPressureGC); | 697 BlinkGC::MemoryPressureGC); |
698 return; | 698 return; |
699 } | 699 } |
700 if (shouldSchedulePageNavigationGC(estimatedRemovalRatio)) { | 700 if (shouldSchedulePageNavigationGC(estimatedRemovalRatio)) { |
701 #if PRINT_HEAP_STATS | 701 #if PRINT_HEAP_STATS |
702 dataLogF("Scheduled PageNavigationGC\n"); | 702 dataLogF("Scheduled PageNavigationGC\n"); |
703 #endif | 703 #endif |
704 schedulePageNavigationGC(); | 704 schedulePageNavigationGC(); |
705 } | 705 } |
706 } | 706 } |
707 | 707 |
708 void ThreadState::schedulePageNavigationGC() { | 708 void ThreadState::schedulePageNavigationGC() { |
709 ASSERT(checkThread()); | 709 DCHECK(checkThread()); |
710 ASSERT(!isSweepingInProgress()); | 710 DCHECK(!isSweepingInProgress()); |
711 setGCState(PageNavigationGCScheduled); | 711 setGCState(PageNavigationGCScheduled); |
712 } | 712 } |
713 | 713 |
714 void ThreadState::scheduleGCIfNeeded() { | 714 void ThreadState::scheduleGCIfNeeded() { |
715 ASSERT(checkThread()); | 715 DCHECK(checkThread()); |
716 ThreadHeap::reportMemoryUsageForTracing(); | 716 ThreadHeap::reportMemoryUsageForTracing(); |
717 | 717 |
718 #if PRINT_HEAP_STATS | 718 #if PRINT_HEAP_STATS |
719 dataLogF("ThreadState::scheduleGCIfNeeded\n"); | 719 dataLogF("ThreadState::scheduleGCIfNeeded\n"); |
720 #endif | 720 #endif |
721 | 721 |
722 // Allocation is allowed during sweeping, but those allocations should not | 722 // Allocation is allowed during sweeping, but those allocations should not |
723 // trigger nested GCs. | 723 // trigger nested GCs. |
724 if (isGCForbidden()) | 724 if (isGCForbidden()) |
725 return; | 725 return; |
726 | 726 |
727 if (isSweepingInProgress()) | 727 if (isSweepingInProgress()) |
728 return; | 728 return; |
729 ASSERT(!sweepForbidden()); | 729 DCHECK(!sweepForbidden()); |
730 | 730 |
731 reportMemoryToV8(); | 731 reportMemoryToV8(); |
732 | 732 |
733 if (shouldForceMemoryPressureGC()) { | 733 if (shouldForceMemoryPressureGC()) { |
734 completeSweep(); | 734 completeSweep(); |
735 if (shouldForceMemoryPressureGC()) { | 735 if (shouldForceMemoryPressureGC()) { |
736 #if PRINT_HEAP_STATS | 736 #if PRINT_HEAP_STATS |
737 dataLogF("Scheduled MemoryPressureGC\n"); | 737 dataLogF("Scheduled MemoryPressureGC\n"); |
738 #endif | 738 #endif |
739 collectGarbage(BlinkGC::HeapPointersOnStack, BlinkGC::GCWithoutSweep, | 739 collectGarbage(BlinkGC::HeapPointersOnStack, BlinkGC::GCWithoutSweep, |
(...skipping 16 matching lines...) Expand all Loading... |
756 if (shouldScheduleIdleGC()) { | 756 if (shouldScheduleIdleGC()) { |
757 #if PRINT_HEAP_STATS | 757 #if PRINT_HEAP_STATS |
758 dataLogF("Scheduled IdleGC\n"); | 758 dataLogF("Scheduled IdleGC\n"); |
759 #endif | 759 #endif |
760 scheduleIdleGC(); | 760 scheduleIdleGC(); |
761 return; | 761 return; |
762 } | 762 } |
763 } | 763 } |
764 | 764 |
765 ThreadState* ThreadState::fromObject(const void* object) { | 765 ThreadState* ThreadState::fromObject(const void* object) { |
766 ASSERT(object); | 766 DCHECK(object); |
767 BasePage* page = pageFromObject(object); | 767 BasePage* page = pageFromObject(object); |
768 ASSERT(page); | 768 DCHECK(page); |
769 ASSERT(page->arena()); | 769 DCHECK(page->arena()); |
770 return page->arena()->getThreadState(); | 770 return page->arena()->getThreadState(); |
771 } | 771 } |
772 | 772 |
773 void ThreadState::performIdleGC(double deadlineSeconds) { | 773 void ThreadState::performIdleGC(double deadlineSeconds) { |
774 ASSERT(checkThread()); | 774 DCHECK(checkThread()); |
775 ASSERT(isMainThread()); | 775 DCHECK(isMainThread()); |
776 ASSERT(Platform::current()->currentThread()->scheduler()); | 776 DCHECK(Platform::current()->currentThread()->scheduler()); |
777 | 777 |
778 if (gcState() != IdleGCScheduled) | 778 if (gcState() != IdleGCScheduled) |
779 return; | 779 return; |
780 | 780 |
781 if (isGCForbidden()) { | 781 if (isGCForbidden()) { |
782 // If GC is forbidden at this point, try again. | 782 // If GC is forbidden at this point, try again. |
783 scheduleIdleGC(); | 783 scheduleIdleGC(); |
784 return; | 784 return; |
785 } | 785 } |
786 | 786 |
(...skipping 10 matching lines...) Expand all Loading... |
797 } | 797 } |
798 | 798 |
799 TRACE_EVENT2("blink_gc", "ThreadState::performIdleGC", "idleDeltaInSeconds", | 799 TRACE_EVENT2("blink_gc", "ThreadState::performIdleGC", "idleDeltaInSeconds", |
800 idleDeltaInSeconds, "estimatedMarkingTime", | 800 idleDeltaInSeconds, "estimatedMarkingTime", |
801 m_heap->heapStats().estimatedMarkingTime()); | 801 m_heap->heapStats().estimatedMarkingTime()); |
802 collectGarbage(BlinkGC::NoHeapPointersOnStack, BlinkGC::GCWithoutSweep, | 802 collectGarbage(BlinkGC::NoHeapPointersOnStack, BlinkGC::GCWithoutSweep, |
803 BlinkGC::IdleGC); | 803 BlinkGC::IdleGC); |
804 } | 804 } |
805 | 805 |
806 void ThreadState::performIdleLazySweep(double deadlineSeconds) { | 806 void ThreadState::performIdleLazySweep(double deadlineSeconds) { |
807 ASSERT(checkThread()); | 807 DCHECK(checkThread()); |
808 ASSERT(isMainThread()); | 808 DCHECK(isMainThread()); |
809 | 809 |
810 // If we are not in a sweeping phase, there is nothing to do here. | 810 // If we are not in a sweeping phase, there is nothing to do here. |
811 if (!isSweepingInProgress()) | 811 if (!isSweepingInProgress()) |
812 return; | 812 return; |
813 | 813 |
814 // This check is here to prevent performIdleLazySweep() from being called | 814 // This check is here to prevent performIdleLazySweep() from being called |
815 // recursively. I'm not sure if it can happen but it would be safer to have | 815 // recursively. I'm not sure if it can happen but it would be safer to have |
816 // the check just in case. | 816 // the check just in case. |
817 if (sweepForbidden()) | 817 if (sweepForbidden()) |
818 return; | 818 return; |
(...skipping 59 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
878 // Some threads (e.g. PPAPI thread) don't have a scheduler. | 878 // Some threads (e.g. PPAPI thread) don't have a scheduler. |
879 if (!Platform::current()->currentThread()->scheduler()) | 879 if (!Platform::current()->currentThread()->scheduler()) |
880 return; | 880 return; |
881 | 881 |
882 Platform::current()->currentThread()->scheduler()->postIdleTask( | 882 Platform::current()->currentThread()->scheduler()->postIdleTask( |
883 BLINK_FROM_HERE, | 883 BLINK_FROM_HERE, |
884 WTF::bind(&ThreadState::performIdleLazySweep, WTF::unretained(this))); | 884 WTF::bind(&ThreadState::performIdleLazySweep, WTF::unretained(this))); |
885 } | 885 } |
886 | 886 |
887 void ThreadState::schedulePreciseGC() { | 887 void ThreadState::schedulePreciseGC() { |
888 ASSERT(checkThread()); | 888 DCHECK(checkThread()); |
889 if (isSweepingInProgress()) { | 889 if (isSweepingInProgress()) { |
890 setGCState(SweepingAndPreciseGCScheduled); | 890 setGCState(SweepingAndPreciseGCScheduled); |
891 return; | 891 return; |
892 } | 892 } |
893 | 893 |
894 setGCState(PreciseGCScheduled); | 894 setGCState(PreciseGCScheduled); |
895 } | 895 } |
896 | 896 |
897 namespace { | 897 namespace { |
898 | 898 |
899 #define UNEXPECTED_GCSTATE(s) \ | 899 #define UNEXPECTED_GCSTATE(s) \ |
900 case ThreadState::s: \ | 900 case ThreadState::s: \ |
901 LOG(FATAL) << "Unexpected transition while in GCState " #s; \ | 901 LOG(FATAL) << "Unexpected transition while in GCState " #s; \ |
902 return | 902 return |
903 | 903 |
904 void unexpectedGCState(ThreadState::GCState gcState) { | 904 void unexpectedGCState(ThreadState::GCState gcState) { |
905 switch (gcState) { | 905 switch (gcState) { |
906 UNEXPECTED_GCSTATE(NoGCScheduled); | 906 UNEXPECTED_GCSTATE(NoGCScheduled); |
907 UNEXPECTED_GCSTATE(IdleGCScheduled); | 907 UNEXPECTED_GCSTATE(IdleGCScheduled); |
908 UNEXPECTED_GCSTATE(PreciseGCScheduled); | 908 UNEXPECTED_GCSTATE(PreciseGCScheduled); |
909 UNEXPECTED_GCSTATE(FullGCScheduled); | 909 UNEXPECTED_GCSTATE(FullGCScheduled); |
910 UNEXPECTED_GCSTATE(GCRunning); | 910 UNEXPECTED_GCSTATE(GCRunning); |
911 UNEXPECTED_GCSTATE(EagerSweepScheduled); | 911 UNEXPECTED_GCSTATE(EagerSweepScheduled); |
912 UNEXPECTED_GCSTATE(LazySweepScheduled); | 912 UNEXPECTED_GCSTATE(LazySweepScheduled); |
913 UNEXPECTED_GCSTATE(Sweeping); | 913 UNEXPECTED_GCSTATE(Sweeping); |
914 UNEXPECTED_GCSTATE(SweepingAndIdleGCScheduled); | 914 UNEXPECTED_GCSTATE(SweepingAndIdleGCScheduled); |
915 UNEXPECTED_GCSTATE(SweepingAndPreciseGCScheduled); | 915 UNEXPECTED_GCSTATE(SweepingAndPreciseGCScheduled); |
916 default: | 916 default: |
917 ASSERT_NOT_REACHED(); | 917 NOTREACHED(); |
918 return; | 918 return; |
919 } | 919 } |
920 } | 920 } |
921 | 921 |
922 #undef UNEXPECTED_GCSTATE | 922 #undef UNEXPECTED_GCSTATE |
923 | 923 |
924 } // namespace | 924 } // namespace |
925 | 925 |
926 #define VERIFY_STATE_TRANSITION(condition) \ | 926 #define VERIFY_STATE_TRANSITION(condition) \ |
927 if (UNLIKELY(!(condition))) \ | 927 if (UNLIKELY(!(condition))) { \ |
928 unexpectedGCState(m_gcState) | 928 unexpectedGCState(m_gcState); \ |
| 929 } |
929 | 930 |
930 void ThreadState::setGCState(GCState gcState) { | 931 void ThreadState::setGCState(GCState gcState) { |
931 switch (gcState) { | 932 switch (gcState) { |
932 case NoGCScheduled: | 933 case NoGCScheduled: |
933 ASSERT(checkThread()); | 934 DCHECK(checkThread()); |
934 VERIFY_STATE_TRANSITION(m_gcState == Sweeping || | 935 VERIFY_STATE_TRANSITION(m_gcState == Sweeping || |
935 m_gcState == SweepingAndIdleGCScheduled); | 936 m_gcState == SweepingAndIdleGCScheduled); |
936 break; | 937 break; |
937 case IdleGCScheduled: | 938 case IdleGCScheduled: |
938 case PreciseGCScheduled: | 939 case PreciseGCScheduled: |
939 case FullGCScheduled: | 940 case FullGCScheduled: |
940 case PageNavigationGCScheduled: | 941 case PageNavigationGCScheduled: |
941 ASSERT(checkThread()); | 942 DCHECK(checkThread()); |
942 VERIFY_STATE_TRANSITION( | 943 VERIFY_STATE_TRANSITION( |
943 m_gcState == NoGCScheduled || m_gcState == IdleGCScheduled || | 944 m_gcState == NoGCScheduled || m_gcState == IdleGCScheduled || |
944 m_gcState == PreciseGCScheduled || m_gcState == FullGCScheduled || | 945 m_gcState == PreciseGCScheduled || m_gcState == FullGCScheduled || |
945 m_gcState == PageNavigationGCScheduled || m_gcState == Sweeping || | 946 m_gcState == PageNavigationGCScheduled || m_gcState == Sweeping || |
946 m_gcState == SweepingAndIdleGCScheduled || | 947 m_gcState == SweepingAndIdleGCScheduled || |
947 m_gcState == SweepingAndPreciseGCScheduled); | 948 m_gcState == SweepingAndPreciseGCScheduled); |
948 completeSweep(); | 949 completeSweep(); |
949 break; | 950 break; |
950 case GCRunning: | 951 case GCRunning: |
951 ASSERT(!isInGC()); | 952 DCHECK(!isInGC()); |
952 VERIFY_STATE_TRANSITION(m_gcState != GCRunning); | 953 VERIFY_STATE_TRANSITION(m_gcState != GCRunning); |
953 break; | 954 break; |
954 case EagerSweepScheduled: | 955 case EagerSweepScheduled: |
955 case LazySweepScheduled: | 956 case LazySweepScheduled: |
956 ASSERT(isInGC()); | 957 DCHECK(isInGC()); |
957 VERIFY_STATE_TRANSITION(m_gcState == GCRunning); | 958 VERIFY_STATE_TRANSITION(m_gcState == GCRunning); |
958 break; | 959 break; |
959 case Sweeping: | 960 case Sweeping: |
960 ASSERT(checkThread()); | 961 DCHECK(checkThread()); |
961 VERIFY_STATE_TRANSITION(m_gcState == EagerSweepScheduled || | 962 VERIFY_STATE_TRANSITION(m_gcState == EagerSweepScheduled || |
962 m_gcState == LazySweepScheduled); | 963 m_gcState == LazySweepScheduled); |
963 break; | 964 break; |
964 case SweepingAndIdleGCScheduled: | 965 case SweepingAndIdleGCScheduled: |
965 case SweepingAndPreciseGCScheduled: | 966 case SweepingAndPreciseGCScheduled: |
966 ASSERT(checkThread()); | 967 DCHECK(checkThread()); |
967 VERIFY_STATE_TRANSITION(m_gcState == Sweeping || | 968 VERIFY_STATE_TRANSITION(m_gcState == Sweeping || |
968 m_gcState == SweepingAndIdleGCScheduled || | 969 m_gcState == SweepingAndIdleGCScheduled || |
969 m_gcState == SweepingAndPreciseGCScheduled); | 970 m_gcState == SweepingAndPreciseGCScheduled); |
970 break; | 971 break; |
971 default: | 972 default: |
972 ASSERT_NOT_REACHED(); | 973 NOTREACHED(); |
973 } | 974 } |
974 m_gcState = gcState; | 975 m_gcState = gcState; |
975 } | 976 } |
976 | 977 |
977 #undef VERIFY_STATE_TRANSITION | 978 #undef VERIFY_STATE_TRANSITION |
978 | 979 |
979 void ThreadState::runScheduledGC(BlinkGC::StackState stackState) { | 980 void ThreadState::runScheduledGC(BlinkGC::StackState stackState) { |
980 ASSERT(checkThread()); | 981 DCHECK(checkThread()); |
981 if (stackState != BlinkGC::NoHeapPointersOnStack) | 982 if (stackState != BlinkGC::NoHeapPointersOnStack) |
982 return; | 983 return; |
983 | 984 |
984 // If a safe point is entered while initiating a GC, we clearly do | 985 // If a safe point is entered while initiating a GC, we clearly do |
985 // not want to do another as part of that -- the safe point is only | 986 // not want to do another as part of that -- the safe point is only |
986 // entered after checking if a scheduled GC ought to run first. | 987 // entered after checking if a scheduled GC ought to run first. |
987 // Prevent that from happening by marking GCs as forbidden while | 988 // Prevent that from happening by marking GCs as forbidden while |
988 // one is initiated and later running. | 989 // one is initiated and later running. |
989 if (isGCForbidden()) | 990 if (isGCForbidden()) |
990 return; | 991 return; |
(...skipping 19 matching lines...) Expand all Loading... |
1010 } | 1011 } |
1011 | 1012 |
1012 void ThreadState::flushHeapDoesNotContainCacheIfNeeded() { | 1013 void ThreadState::flushHeapDoesNotContainCacheIfNeeded() { |
1013 if (m_shouldFlushHeapDoesNotContainCache) { | 1014 if (m_shouldFlushHeapDoesNotContainCache) { |
1014 m_heap->flushHeapDoesNotContainCache(); | 1015 m_heap->flushHeapDoesNotContainCache(); |
1015 m_shouldFlushHeapDoesNotContainCache = false; | 1016 m_shouldFlushHeapDoesNotContainCache = false; |
1016 } | 1017 } |
1017 } | 1018 } |
1018 | 1019 |
1019 void ThreadState::makeConsistentForGC() { | 1020 void ThreadState::makeConsistentForGC() { |
1020 ASSERT(isInGC()); | 1021 DCHECK(isInGC()); |
1021 TRACE_EVENT0("blink_gc", "ThreadState::makeConsistentForGC"); | 1022 TRACE_EVENT0("blink_gc", "ThreadState::makeConsistentForGC"); |
1022 for (int i = 0; i < BlinkGC::NumberOfArenas; ++i) | 1023 for (int i = 0; i < BlinkGC::NumberOfArenas; ++i) |
1023 m_arenas[i]->makeConsistentForGC(); | 1024 m_arenas[i]->makeConsistentForGC(); |
1024 } | 1025 } |
1025 | 1026 |
1026 void ThreadState::compact() { | 1027 void ThreadState::compact() { |
1027 if (!heap().compaction()->isCompacting()) | 1028 if (!heap().compaction()->isCompacting()) |
1028 return; | 1029 return; |
1029 | 1030 |
1030 SweepForbiddenScope scope(this); | 1031 SweepForbiddenScope scope(this); |
(...skipping 14 matching lines...) Expand all Loading... |
1045 // TODO: implement bail out wrt any overall deadline, not compacting | 1046 // TODO: implement bail out wrt any overall deadline, not compacting |
1046 // the remaining arenas if the time budget has been exceeded. | 1047 // the remaining arenas if the time budget has been exceeded. |
1047 heap().compaction()->startThreadCompaction(); | 1048 heap().compaction()->startThreadCompaction(); |
1048 for (int i = BlinkGC::HashTableArenaIndex; i >= BlinkGC::Vector1ArenaIndex; | 1049 for (int i = BlinkGC::HashTableArenaIndex; i >= BlinkGC::Vector1ArenaIndex; |
1049 --i) | 1050 --i) |
1050 static_cast<NormalPageArena*>(m_arenas[i])->sweepAndCompact(); | 1051 static_cast<NormalPageArena*>(m_arenas[i])->sweepAndCompact(); |
1051 heap().compaction()->finishThreadCompaction(); | 1052 heap().compaction()->finishThreadCompaction(); |
1052 } | 1053 } |
1053 | 1054 |
1054 void ThreadState::makeConsistentForMutator() { | 1055 void ThreadState::makeConsistentForMutator() { |
1055 ASSERT(isInGC()); | 1056 DCHECK(isInGC()); |
1056 for (int i = 0; i < BlinkGC::NumberOfArenas; ++i) | 1057 for (int i = 0; i < BlinkGC::NumberOfArenas; ++i) |
1057 m_arenas[i]->makeConsistentForMutator(); | 1058 m_arenas[i]->makeConsistentForMutator(); |
1058 } | 1059 } |
1059 | 1060 |
1060 void ThreadState::preGC() { | 1061 void ThreadState::preGC() { |
1061 if (RuntimeEnabledFeatures::traceWrappablesEnabled() && m_isolate && | 1062 if (RuntimeEnabledFeatures::traceWrappablesEnabled() && m_isolate && |
1062 m_performCleanup) | 1063 m_performCleanup) |
1063 m_performCleanup(m_isolate); | 1064 m_performCleanup(m_isolate); |
1064 | 1065 |
1065 ASSERT(!isInGC()); | 1066 DCHECK(!isInGC()); |
1066 setGCState(GCRunning); | 1067 setGCState(GCRunning); |
1067 makeConsistentForGC(); | 1068 makeConsistentForGC(); |
1068 flushHeapDoesNotContainCacheIfNeeded(); | 1069 flushHeapDoesNotContainCacheIfNeeded(); |
1069 clearArenaAges(); | 1070 clearArenaAges(); |
1070 | 1071 |
1071 // It is possible, albeit rare, for a thread to be kept | 1072 // It is possible, albeit rare, for a thread to be kept |
1072 // at a safepoint across multiple GCs, as resuming all attached | 1073 // at a safepoint across multiple GCs, as resuming all attached |
1073 // threads after the "global" GC phases will contend for the shared | 1074 // threads after the "global" GC phases will contend for the shared |
1074 // safepoint barrier mutexes etc, which can additionally delay | 1075 // safepoint barrier mutexes etc, which can additionally delay |
1075 // a thread. Enough so that another thread may initiate | 1076 // a thread. Enough so that another thread may initiate |
1076 // a new GC before this has happened. | 1077 // a new GC before this has happened. |
1077 // | 1078 // |
1078 // In which case the parked thread's ThreadState will have unprocessed | 1079 // In which case the parked thread's ThreadState will have unprocessed |
1079 // entries on its local weak callback stack when that later GC goes | 1080 // entries on its local weak callback stack when that later GC goes |
1080 // ahead. Clear out and invalidate the stack now, as the thread | 1081 // ahead. Clear out and invalidate the stack now, as the thread |
1081 // should only process callbacks that's found to be reachable by | 1082 // should only process callbacks that's found to be reachable by |
1082 // the latest GC, when it eventually gets to next perform | 1083 // the latest GC, when it eventually gets to next perform |
1083 // thread-local weak processing. | 1084 // thread-local weak processing. |
1084 m_threadLocalWeakCallbackStack->decommit(); | 1085 m_threadLocalWeakCallbackStack->decommit(); |
1085 m_threadLocalWeakCallbackStack->commit(); | 1086 m_threadLocalWeakCallbackStack->commit(); |
1086 } | 1087 } |
1087 | 1088 |
1088 void ThreadState::postGC(BlinkGC::GCType gcType) { | 1089 void ThreadState::postGC(BlinkGC::GCType gcType) { |
1089 if (RuntimeEnabledFeatures::traceWrappablesEnabled() && | 1090 if (RuntimeEnabledFeatures::traceWrappablesEnabled() && |
1090 m_invalidateDeadObjectsInWrappersMarkingDeque) { | 1091 m_invalidateDeadObjectsInWrappersMarkingDeque) { |
1091 m_invalidateDeadObjectsInWrappersMarkingDeque(m_isolate); | 1092 m_invalidateDeadObjectsInWrappersMarkingDeque(m_isolate); |
1092 } | 1093 } |
1093 | 1094 |
1094 ASSERT(isInGC()); | 1095 DCHECK(isInGC()); |
1095 for (int i = 0; i < BlinkGC::NumberOfArenas; i++) | 1096 for (int i = 0; i < BlinkGC::NumberOfArenas; i++) |
1096 m_arenas[i]->prepareForSweep(); | 1097 m_arenas[i]->prepareForSweep(); |
1097 | 1098 |
1098 if (gcType == BlinkGC::GCWithSweep) { | 1099 if (gcType == BlinkGC::GCWithSweep) { |
1099 setGCState(EagerSweepScheduled); | 1100 setGCState(EagerSweepScheduled); |
1100 } else if (gcType == BlinkGC::GCWithoutSweep) { | 1101 } else if (gcType == BlinkGC::GCWithoutSweep) { |
1101 setGCState(LazySweepScheduled); | 1102 setGCState(LazySweepScheduled); |
1102 } else { | 1103 } else { |
1103 takeSnapshot(SnapshotType::HeapSnapshot); | 1104 takeSnapshot(SnapshotType::HeapSnapshot); |
1104 | 1105 |
1105 // This unmarks all marked objects and marks all unmarked objects dead. | 1106 // This unmarks all marked objects and marks all unmarked objects dead. |
1106 makeConsistentForMutator(); | 1107 makeConsistentForMutator(); |
1107 | 1108 |
1108 takeSnapshot(SnapshotType::FreelistSnapshot); | 1109 takeSnapshot(SnapshotType::FreelistSnapshot); |
1109 | 1110 |
1110 // Force setting NoGCScheduled to circumvent checkThread() | 1111 // Force setting NoGCScheduled to circumvent checkThread() |
1111 // in setGCState(). | 1112 // in setGCState(). |
1112 m_gcState = NoGCScheduled; | 1113 m_gcState = NoGCScheduled; |
1113 } | 1114 } |
1114 } | 1115 } |
1115 | 1116 |
1116 void ThreadState::preSweep() { | 1117 void ThreadState::preSweep() { |
1117 ASSERT(checkThread()); | 1118 DCHECK(checkThread()); |
1118 if (gcState() != EagerSweepScheduled && gcState() != LazySweepScheduled) | 1119 if (gcState() != EagerSweepScheduled && gcState() != LazySweepScheduled) |
1119 return; | 1120 return; |
1120 | 1121 |
1121 threadLocalWeakProcessing(); | 1122 threadLocalWeakProcessing(); |
1122 | 1123 |
1123 GCState previousGCState = gcState(); | 1124 GCState previousGCState = gcState(); |
1124 // We have to set the GCState to Sweeping before calling pre-finalizers | 1125 // We have to set the GCState to Sweeping before calling pre-finalizers |
1125 // to disallow a GC during the pre-finalizers. | 1126 // to disallow a GC during the pre-finalizers. |
1126 setGCState(Sweeping); | 1127 setGCState(Sweeping); |
1127 | 1128 |
(...skipping 49 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1177 // threads (e.g. in CrossThreadPersistentRegion::shouldTracePersistent) and | 1178 // threads (e.g. in CrossThreadPersistentRegion::shouldTracePersistent) and |
1178 // that would be fine. | 1179 // that would be fine. |
1179 ProcessHeap::crossThreadPersistentRegion().unpoisonCrossThreadPersistents(); | 1180 ProcessHeap::crossThreadPersistentRegion().unpoisonCrossThreadPersistents(); |
1180 } | 1181 } |
1181 #endif | 1182 #endif |
1182 | 1183 |
1183 void ThreadState::eagerSweep() { | 1184 void ThreadState::eagerSweep() { |
1184 #if defined(ADDRESS_SANITIZER) | 1185 #if defined(ADDRESS_SANITIZER) |
1185 poisonEagerArena(); | 1186 poisonEagerArena(); |
1186 #endif | 1187 #endif |
1187 ASSERT(checkThread()); | 1188 DCHECK(checkThread()); |
1188 // Some objects need to be finalized promptly and cannot be handled | 1189 // Some objects need to be finalized promptly and cannot be handled |
1189 // by lazy sweeping. Keep those in a designated heap and sweep it | 1190 // by lazy sweeping. Keep those in a designated heap and sweep it |
1190 // eagerly. | 1191 // eagerly. |
1191 ASSERT(isSweepingInProgress()); | 1192 DCHECK(isSweepingInProgress()); |
1192 | 1193 |
1193 // Mirroring the completeSweep() condition; see its comment. | 1194 // Mirroring the completeSweep() condition; see its comment. |
1194 if (sweepForbidden()) | 1195 if (sweepForbidden()) |
1195 return; | 1196 return; |
1196 | 1197 |
1197 SweepForbiddenScope scope(this); | 1198 SweepForbiddenScope scope(this); |
1198 ScriptForbiddenIfMainThreadScope scriptForbiddenScope; | 1199 ScriptForbiddenIfMainThreadScope scriptForbiddenScope; |
1199 | 1200 |
1200 double startTime = WTF::currentTimeMS(); | 1201 double startTime = WTF::currentTimeMS(); |
1201 m_arenas[BlinkGC::EagerSweepArenaIndex]->completeSweep(); | 1202 m_arenas[BlinkGC::EagerSweepArenaIndex]->completeSweep(); |
1202 accumulateSweepingTime(WTF::currentTimeMS() - startTime); | 1203 accumulateSweepingTime(WTF::currentTimeMS() - startTime); |
1203 } | 1204 } |
1204 | 1205 |
1205 void ThreadState::completeSweep() { | 1206 void ThreadState::completeSweep() { |
1206 ASSERT(checkThread()); | 1207 DCHECK(checkThread()); |
1207 // If we are not in a sweeping phase, there is nothing to do here. | 1208 // If we are not in a sweeping phase, there is nothing to do here. |
1208 if (!isSweepingInProgress()) | 1209 if (!isSweepingInProgress()) |
1209 return; | 1210 return; |
1210 | 1211 |
1211 // completeSweep() can be called recursively if finalizers can allocate | 1212 // completeSweep() can be called recursively if finalizers can allocate |
1212 // memory and the allocation triggers completeSweep(). This check prevents | 1213 // memory and the allocation triggers completeSweep(). This check prevents |
1213 // the sweeping from being executed recursively. | 1214 // the sweeping from being executed recursively. |
1214 if (sweepForbidden()) | 1215 if (sweepForbidden()) |
1215 return; | 1216 return; |
1216 | 1217 |
(...skipping 16 matching lines...) Expand all Loading... |
1233 DEFINE_STATIC_LOCAL(CustomCountHistogram, completeSweepHistogram, | 1234 DEFINE_STATIC_LOCAL(CustomCountHistogram, completeSweepHistogram, |
1234 ("BlinkGC.CompleteSweep", 1, 10 * 1000, 50)); | 1235 ("BlinkGC.CompleteSweep", 1, 10 * 1000, 50)); |
1235 completeSweepHistogram.count(timeForCompleteSweep); | 1236 completeSweepHistogram.count(timeForCompleteSweep); |
1236 } | 1237 } |
1237 } | 1238 } |
1238 | 1239 |
1239 postSweep(); | 1240 postSweep(); |
1240 } | 1241 } |
1241 | 1242 |
1242 void ThreadState::postSweep() { | 1243 void ThreadState::postSweep() { |
1243 ASSERT(checkThread()); | 1244 DCHECK(checkThread()); |
1244 ThreadHeap::reportMemoryUsageForTracing(); | 1245 ThreadHeap::reportMemoryUsageForTracing(); |
1245 | 1246 |
1246 if (isMainThread()) { | 1247 if (isMainThread()) { |
1247 double collectionRate = 0; | 1248 double collectionRate = 0; |
1248 if (m_heap->heapStats().objectSizeAtLastGC() > 0) | 1249 if (m_heap->heapStats().objectSizeAtLastGC() > 0) |
1249 collectionRate = 1 - | 1250 collectionRate = 1 - |
1250 1.0 * m_heap->heapStats().markedObjectSize() / | 1251 1.0 * m_heap->heapStats().markedObjectSize() / |
1251 m_heap->heapStats().objectSizeAtLastGC(); | 1252 m_heap->heapStats().objectSizeAtLastGC(); |
1252 TRACE_COUNTER1(TRACE_DISABLED_BY_DEFAULT("blink_gc"), | 1253 TRACE_COUNTER1(TRACE_DISABLED_BY_DEFAULT("blink_gc"), |
1253 "ThreadState::collectionRate", | 1254 "ThreadState::collectionRate", |
(...skipping 50 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1304 setGCState(NoGCScheduled); | 1305 setGCState(NoGCScheduled); |
1305 break; | 1306 break; |
1306 case SweepingAndPreciseGCScheduled: | 1307 case SweepingAndPreciseGCScheduled: |
1307 setGCState(PreciseGCScheduled); | 1308 setGCState(PreciseGCScheduled); |
1308 break; | 1309 break; |
1309 case SweepingAndIdleGCScheduled: | 1310 case SweepingAndIdleGCScheduled: |
1310 setGCState(NoGCScheduled); | 1311 setGCState(NoGCScheduled); |
1311 scheduleIdleGC(); | 1312 scheduleIdleGC(); |
1312 break; | 1313 break; |
1313 default: | 1314 default: |
1314 ASSERT_NOT_REACHED(); | 1315 NOTREACHED(); |
1315 } | 1316 } |
1316 } | 1317 } |
1317 | 1318 |
1318 void ThreadState::prepareForThreadStateTermination() { | 1319 void ThreadState::prepareForThreadStateTermination() { |
1319 ASSERT(checkThread()); | 1320 DCHECK(checkThread()); |
1320 for (int i = 0; i < BlinkGC::NumberOfArenas; ++i) | 1321 for (int i = 0; i < BlinkGC::NumberOfArenas; ++i) |
1321 m_arenas[i]->prepareHeapForTermination(); | 1322 m_arenas[i]->prepareHeapForTermination(); |
1322 } | 1323 } |
1323 | 1324 |
1324 #if ENABLE(ASSERT) | 1325 #if DCHECK_IS_ON() |
1325 BasePage* ThreadState::findPageFromAddress(Address address) { | 1326 BasePage* ThreadState::findPageFromAddress(Address address) { |
1326 for (int i = 0; i < BlinkGC::NumberOfArenas; ++i) { | 1327 for (int i = 0; i < BlinkGC::NumberOfArenas; ++i) { |
1327 if (BasePage* page = m_arenas[i]->findPageFromAddress(address)) | 1328 if (BasePage* page = m_arenas[i]->findPageFromAddress(address)) |
1328 return page; | 1329 return page; |
1329 } | 1330 } |
1330 return nullptr; | 1331 return nullptr; |
1331 } | 1332 } |
1332 #endif | 1333 #endif |
1333 | 1334 |
1334 size_t ThreadState::objectPayloadSizeForTesting() { | 1335 size_t ThreadState::objectPayloadSizeForTesting() { |
1335 size_t objectPayloadSize = 0; | 1336 size_t objectPayloadSize = 0; |
1336 for (int i = 0; i < BlinkGC::NumberOfArenas; ++i) | 1337 for (int i = 0; i < BlinkGC::NumberOfArenas; ++i) |
1337 objectPayloadSize += m_arenas[i]->objectPayloadSizeForTesting(); | 1338 objectPayloadSize += m_arenas[i]->objectPayloadSizeForTesting(); |
1338 return objectPayloadSize; | 1339 return objectPayloadSize; |
1339 } | 1340 } |
1340 | 1341 |
1341 void ThreadState::safePoint(BlinkGC::StackState stackState) { | 1342 void ThreadState::safePoint(BlinkGC::StackState stackState) { |
1342 ASSERT(checkThread()); | 1343 DCHECK(checkThread()); |
1343 ThreadHeap::reportMemoryUsageForTracing(); | 1344 ThreadHeap::reportMemoryUsageForTracing(); |
1344 | 1345 |
1345 runScheduledGC(stackState); | 1346 runScheduledGC(stackState); |
1346 ASSERT(!m_atSafePoint); | 1347 DCHECK(!m_atSafePoint); |
1347 m_stackState = stackState; | 1348 m_stackState = stackState; |
1348 m_atSafePoint = true; | 1349 m_atSafePoint = true; |
1349 m_heap->checkAndPark(this, nullptr); | 1350 m_heap->checkAndPark(this, nullptr); |
1350 m_atSafePoint = false; | 1351 m_atSafePoint = false; |
1351 m_stackState = BlinkGC::HeapPointersOnStack; | 1352 m_stackState = BlinkGC::HeapPointersOnStack; |
1352 preSweep(); | 1353 preSweep(); |
1353 } | 1354 } |
1354 | 1355 |
1355 #ifdef ADDRESS_SANITIZER | 1356 #ifdef ADDRESS_SANITIZER |
1356 // When we are running under AddressSanitizer with | 1357 // When we are running under AddressSanitizer with |
1357 // detect_stack_use_after_return=1 then stack marker obtained from | 1358 // detect_stack_use_after_return=1 then stack marker obtained from |
1358 // SafePointScope will point into a fake stack. Detect this case by checking if | 1359 // SafePointScope will point into a fake stack. Detect this case by checking if |
1359 // it falls in between current stack frame and stack start and use an arbitrary | 1360 // it falls in between current stack frame and stack start and use an arbitrary |
1360 // high enough value for it. Don't adjust stack marker in any other case to | 1361 // high enough value for it. Don't adjust stack marker in any other case to |
1361 // match behavior of code running without AddressSanitizer. | 1362 // match behavior of code running without AddressSanitizer. |
1362 NO_SANITIZE_ADDRESS static void* adjustScopeMarkerForAdressSanitizer( | 1363 NO_SANITIZE_ADDRESS static void* adjustScopeMarkerForAdressSanitizer( |
1363 void* scopeMarker) { | 1364 void* scopeMarker) { |
1364 Address start = reinterpret_cast<Address>(StackFrameDepth::getStackStart()); | 1365 Address start = reinterpret_cast<Address>(StackFrameDepth::getStackStart()); |
1365 Address end = reinterpret_cast<Address>(&start); | 1366 Address end = reinterpret_cast<Address>(&start); |
1366 RELEASE_ASSERT(end < start); | 1367 CHECK(end < start); |
1367 | 1368 |
1368 if (end <= scopeMarker && scopeMarker < start) | 1369 if (end <= scopeMarker && scopeMarker < start) |
1369 return scopeMarker; | 1370 return scopeMarker; |
1370 | 1371 |
1371 // 256 is as good an approximation as any else. | 1372 // 256 is as good an approximation as any else. |
1372 const size_t bytesToCopy = sizeof(Address) * 256; | 1373 const size_t bytesToCopy = sizeof(Address) * 256; |
1373 if (static_cast<size_t>(start - end) < bytesToCopy) | 1374 if (static_cast<size_t>(start - end) < bytesToCopy) |
1374 return start; | 1375 return start; |
1375 | 1376 |
1376 return end + bytesToCopy; | 1377 return end + bytesToCopy; |
1377 } | 1378 } |
1378 #endif | 1379 #endif |
1379 | 1380 |
1380 void ThreadState::enterSafePoint(BlinkGC::StackState stackState, | 1381 void ThreadState::enterSafePoint(BlinkGC::StackState stackState, |
1381 void* scopeMarker) { | 1382 void* scopeMarker) { |
1382 ASSERT(checkThread()); | 1383 DCHECK(checkThread()); |
1383 #ifdef ADDRESS_SANITIZER | 1384 #ifdef ADDRESS_SANITIZER |
1384 if (stackState == BlinkGC::HeapPointersOnStack) | 1385 if (stackState == BlinkGC::HeapPointersOnStack) |
1385 scopeMarker = adjustScopeMarkerForAdressSanitizer(scopeMarker); | 1386 scopeMarker = adjustScopeMarkerForAdressSanitizer(scopeMarker); |
1386 #endif | 1387 #endif |
1387 ASSERT(stackState == BlinkGC::NoHeapPointersOnStack || scopeMarker); | 1388 DCHECK(stackState == BlinkGC::NoHeapPointersOnStack || scopeMarker); |
1388 runScheduledGC(stackState); | 1389 runScheduledGC(stackState); |
1389 ASSERT(!m_atSafePoint); | 1390 DCHECK(!m_atSafePoint); |
1390 m_atSafePoint = true; | 1391 m_atSafePoint = true; |
1391 m_stackState = stackState; | 1392 m_stackState = stackState; |
1392 m_safePointScopeMarker = scopeMarker; | 1393 m_safePointScopeMarker = scopeMarker; |
1393 m_heap->enterSafePoint(this); | 1394 m_heap->enterSafePoint(this); |
1394 } | 1395 } |
1395 | 1396 |
1396 void ThreadState::leaveSafePoint(SafePointAwareMutexLocker* locker) { | 1397 void ThreadState::leaveSafePoint(SafePointAwareMutexLocker* locker) { |
1397 ASSERT(checkThread()); | 1398 DCHECK(checkThread()); |
1398 ASSERT(m_atSafePoint); | 1399 DCHECK(m_atSafePoint); |
1399 m_heap->leaveSafePoint(this, locker); | 1400 m_heap->leaveSafePoint(this, locker); |
1400 m_atSafePoint = false; | 1401 m_atSafePoint = false; |
1401 m_stackState = BlinkGC::HeapPointersOnStack; | 1402 m_stackState = BlinkGC::HeapPointersOnStack; |
1402 clearSafePointScopeMarker(); | 1403 clearSafePointScopeMarker(); |
1403 preSweep(); | 1404 preSweep(); |
1404 } | 1405 } |
1405 | 1406 |
1406 void ThreadState::reportMemoryToV8() { | 1407 void ThreadState::reportMemoryToV8() { |
1407 if (!m_isolate) | 1408 if (!m_isolate) |
1408 return; | 1409 return; |
(...skipping 24 matching lines...) Expand all Loading... |
1433 m_markedObjectSize += delta; | 1434 m_markedObjectSize += delta; |
1434 m_heap->heapStats().increaseMarkedObjectSize(delta); | 1435 m_heap->heapStats().increaseMarkedObjectSize(delta); |
1435 } | 1436 } |
1436 | 1437 |
1437 void ThreadState::copyStackUntilSafePointScope() { | 1438 void ThreadState::copyStackUntilSafePointScope() { |
1438 if (!m_safePointScopeMarker || m_stackState == BlinkGC::NoHeapPointersOnStack) | 1439 if (!m_safePointScopeMarker || m_stackState == BlinkGC::NoHeapPointersOnStack) |
1439 return; | 1440 return; |
1440 | 1441 |
1441 Address* to = reinterpret_cast<Address*>(m_safePointScopeMarker); | 1442 Address* to = reinterpret_cast<Address*>(m_safePointScopeMarker); |
1442 Address* from = reinterpret_cast<Address*>(m_endOfStack); | 1443 Address* from = reinterpret_cast<Address*>(m_endOfStack); |
1443 RELEASE_ASSERT(from < to); | 1444 CHECK(from < to); |
1444 RELEASE_ASSERT(to <= reinterpret_cast<Address*>(m_startOfStack)); | 1445 CHECK(to <= reinterpret_cast<Address*>(m_startOfStack)); |
1445 size_t slotCount = static_cast<size_t>(to - from); | 1446 size_t slotCount = static_cast<size_t>(to - from); |
1446 // Catch potential performance issues. | 1447 // Catch potential performance issues. |
1447 #if defined(LEAK_SANITIZER) || defined(ADDRESS_SANITIZER) | 1448 #if defined(LEAK_SANITIZER) || defined(ADDRESS_SANITIZER) |
1448 // ASan/LSan use more space on the stack and we therefore | 1449 // ASan/LSan use more space on the stack and we therefore |
1449 // increase the allowed stack copying for those builds. | 1450 // increase the allowed stack copying for those builds. |
1450 ASSERT(slotCount < 2048); | 1451 DCHECK_LT(slotCount, 2048UL); |
1451 #else | 1452 #else |
1452 ASSERT(slotCount < 1024); | 1453 DCHECK_LT(slotCount, 1024UL); |
1453 #endif | 1454 #endif |
1454 | 1455 |
1455 ASSERT(!m_safePointStackCopy.size()); | 1456 DCHECK(!m_safePointStackCopy.size()); |
1456 m_safePointStackCopy.resize(slotCount); | 1457 m_safePointStackCopy.resize(slotCount); |
1457 for (size_t i = 0; i < slotCount; ++i) { | 1458 for (size_t i = 0; i < slotCount; ++i) { |
1458 m_safePointStackCopy[i] = from[i]; | 1459 m_safePointStackCopy[i] = from[i]; |
1459 } | 1460 } |
1460 } | 1461 } |
1461 | 1462 |
1462 void ThreadState::addInterruptor( | 1463 void ThreadState::addInterruptor( |
1463 std::unique_ptr<BlinkGCInterruptor> interruptor) { | 1464 std::unique_ptr<BlinkGCInterruptor> interruptor) { |
1464 ASSERT(checkThread()); | 1465 DCHECK(checkThread()); |
1465 SafePointScope scope(BlinkGC::HeapPointersOnStack); | 1466 SafePointScope scope(BlinkGC::HeapPointersOnStack); |
1466 { | 1467 { |
1467 MutexLocker locker(m_heap->threadAttachMutex()); | 1468 MutexLocker locker(m_heap->threadAttachMutex()); |
1468 m_interruptors.append(std::move(interruptor)); | 1469 m_interruptors.append(std::move(interruptor)); |
1469 } | 1470 } |
1470 } | 1471 } |
1471 | 1472 |
1472 void ThreadState::registerStaticPersistentNode( | 1473 void ThreadState::registerStaticPersistentNode( |
1473 PersistentNode* node, | 1474 PersistentNode* node, |
1474 PersistentClearCallback callback) { | 1475 PersistentClearCallback callback) { |
1475 #if defined(LEAK_SANITIZER) | 1476 #if defined(LEAK_SANITIZER) |
1476 if (m_disabledStaticPersistentsRegistration) | 1477 if (m_disabledStaticPersistentsRegistration) |
1477 return; | 1478 return; |
1478 #endif | 1479 #endif |
1479 | 1480 |
1480 ASSERT(!m_staticPersistents.contains(node)); | 1481 DCHECK(!m_staticPersistents.contains(node)); |
1481 m_staticPersistents.add(node, callback); | 1482 m_staticPersistents.add(node, callback); |
1482 } | 1483 } |
1483 | 1484 |
1484 void ThreadState::releaseStaticPersistentNodes() { | 1485 void ThreadState::releaseStaticPersistentNodes() { |
1485 HashMap<PersistentNode*, ThreadState::PersistentClearCallback> | 1486 HashMap<PersistentNode*, ThreadState::PersistentClearCallback> |
1486 staticPersistents; | 1487 staticPersistents; |
1487 staticPersistents.swap(m_staticPersistents); | 1488 staticPersistents.swap(m_staticPersistents); |
1488 | 1489 |
1489 PersistentRegion* persistentRegion = getPersistentRegion(); | 1490 PersistentRegion* persistentRegion = getPersistentRegion(); |
1490 for (const auto& it : staticPersistents) | 1491 for (const auto& it : staticPersistents) |
1491 persistentRegion->releasePersistentNode(it.key, it.value); | 1492 persistentRegion->releasePersistentNode(it.key, it.value); |
1492 } | 1493 } |
1493 | 1494 |
1494 void ThreadState::freePersistentNode(PersistentNode* persistentNode) { | 1495 void ThreadState::freePersistentNode(PersistentNode* persistentNode) { |
1495 PersistentRegion* persistentRegion = getPersistentRegion(); | 1496 PersistentRegion* persistentRegion = getPersistentRegion(); |
1496 persistentRegion->freePersistentNode(persistentNode); | 1497 persistentRegion->freePersistentNode(persistentNode); |
1497 // Do not allow static persistents to be freed before | 1498 // Do not allow static persistents to be freed before |
1498 // they're all released in releaseStaticPersistentNodes(). | 1499 // they're all released in releaseStaticPersistentNodes(). |
1499 // | 1500 // |
1500 // There's no fundamental reason why this couldn't be supported, | 1501 // There's no fundamental reason why this couldn't be supported, |
1501 // but no known use for it. | 1502 // but no known use for it. |
1502 ASSERT(!m_staticPersistents.contains(persistentNode)); | 1503 DCHECK(!m_staticPersistents.contains(persistentNode)); |
1503 } | 1504 } |
1504 | 1505 |
1505 #if defined(LEAK_SANITIZER) | 1506 #if defined(LEAK_SANITIZER) |
1506 void ThreadState::enterStaticReferenceRegistrationDisabledScope() { | 1507 void ThreadState::enterStaticReferenceRegistrationDisabledScope() { |
1507 m_disabledStaticPersistentsRegistration++; | 1508 m_disabledStaticPersistentsRegistration++; |
1508 } | 1509 } |
1509 | 1510 |
1510 void ThreadState::leaveStaticReferenceRegistrationDisabledScope() { | 1511 void ThreadState::leaveStaticReferenceRegistrationDisabledScope() { |
1511 ASSERT(m_disabledStaticPersistentsRegistration); | 1512 DCHECK(m_disabledStaticPersistentsRegistration); |
1512 m_disabledStaticPersistentsRegistration--; | 1513 m_disabledStaticPersistentsRegistration--; |
1513 } | 1514 } |
1514 #endif | 1515 #endif |
1515 | 1516 |
1516 void ThreadState::lockThreadAttachMutex() { | 1517 void ThreadState::lockThreadAttachMutex() { |
1517 m_heap->threadAttachMutex().lock(); | 1518 m_heap->threadAttachMutex().lock(); |
1518 } | 1519 } |
1519 | 1520 |
1520 void ThreadState::unlockThreadAttachMutex() { | 1521 void ThreadState::unlockThreadAttachMutex() { |
1521 m_heap->threadAttachMutex().unlock(); | 1522 m_heap->threadAttachMutex().unlock(); |
1522 } | 1523 } |
1523 | 1524 |
1524 void ThreadState::invokePreFinalizers() { | 1525 void ThreadState::invokePreFinalizers() { |
1525 ASSERT(checkThread()); | 1526 DCHECK(checkThread()); |
1526 ASSERT(!sweepForbidden()); | 1527 DCHECK(!sweepForbidden()); |
1527 TRACE_EVENT0("blink_gc", "ThreadState::invokePreFinalizers"); | 1528 TRACE_EVENT0("blink_gc", "ThreadState::invokePreFinalizers"); |
1528 | 1529 |
1529 double startTime = WTF::currentTimeMS(); | 1530 double startTime = WTF::currentTimeMS(); |
1530 if (!m_orderedPreFinalizers.isEmpty()) { | 1531 if (!m_orderedPreFinalizers.isEmpty()) { |
1531 SweepForbiddenScope sweepForbidden(this); | 1532 SweepForbiddenScope sweepForbidden(this); |
1532 ScriptForbiddenIfMainThreadScope scriptForbidden; | 1533 ScriptForbiddenIfMainThreadScope scriptForbidden; |
1533 | 1534 |
1534 // Call the prefinalizers in the opposite order to their registration. | 1535 // Call the prefinalizers in the opposite order to their registration. |
1535 // | 1536 // |
1536 // The prefinalizer callback wrapper returns |true| when its associated | 1537 // The prefinalizer callback wrapper returns |true| when its associated |
(...skipping 32 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1569 int endArenaIndex) { | 1570 int endArenaIndex) { |
1570 size_t minArenaAge = m_arenaAges[beginArenaIndex]; | 1571 size_t minArenaAge = m_arenaAges[beginArenaIndex]; |
1571 int arenaIndexWithMinArenaAge = beginArenaIndex; | 1572 int arenaIndexWithMinArenaAge = beginArenaIndex; |
1572 for (int arenaIndex = beginArenaIndex + 1; arenaIndex <= endArenaIndex; | 1573 for (int arenaIndex = beginArenaIndex + 1; arenaIndex <= endArenaIndex; |
1573 arenaIndex++) { | 1574 arenaIndex++) { |
1574 if (m_arenaAges[arenaIndex] < minArenaAge) { | 1575 if (m_arenaAges[arenaIndex] < minArenaAge) { |
1575 minArenaAge = m_arenaAges[arenaIndex]; | 1576 minArenaAge = m_arenaAges[arenaIndex]; |
1576 arenaIndexWithMinArenaAge = arenaIndex; | 1577 arenaIndexWithMinArenaAge = arenaIndex; |
1577 } | 1578 } |
1578 } | 1579 } |
1579 ASSERT(isVectorArenaIndex(arenaIndexWithMinArenaAge)); | 1580 DCHECK(isVectorArenaIndex(arenaIndexWithMinArenaAge)); |
1580 return arenaIndexWithMinArenaAge; | 1581 return arenaIndexWithMinArenaAge; |
1581 } | 1582 } |
1582 | 1583 |
1583 BaseArena* ThreadState::expandedVectorBackingArena(size_t gcInfoIndex) { | 1584 BaseArena* ThreadState::expandedVectorBackingArena(size_t gcInfoIndex) { |
1584 ASSERT(checkThread()); | 1585 DCHECK(checkThread()); |
1585 size_t entryIndex = gcInfoIndex & likelyToBePromptlyFreedArrayMask; | 1586 size_t entryIndex = gcInfoIndex & likelyToBePromptlyFreedArrayMask; |
1586 --m_likelyToBePromptlyFreed[entryIndex]; | 1587 --m_likelyToBePromptlyFreed[entryIndex]; |
1587 int arenaIndex = m_vectorBackingArenaIndex; | 1588 int arenaIndex = m_vectorBackingArenaIndex; |
1588 m_arenaAges[arenaIndex] = ++m_currentArenaAges; | 1589 m_arenaAges[arenaIndex] = ++m_currentArenaAges; |
1589 m_vectorBackingArenaIndex = arenaIndexOfVectorArenaLeastRecentlyExpanded( | 1590 m_vectorBackingArenaIndex = arenaIndexOfVectorArenaLeastRecentlyExpanded( |
1590 BlinkGC::Vector1ArenaIndex, BlinkGC::Vector4ArenaIndex); | 1591 BlinkGC::Vector1ArenaIndex, BlinkGC::Vector4ArenaIndex); |
1591 return m_arenas[arenaIndex]; | 1592 return m_arenas[arenaIndex]; |
1592 } | 1593 } |
1593 | 1594 |
1594 void ThreadState::allocationPointAdjusted(int arenaIndex) { | 1595 void ThreadState::allocationPointAdjusted(int arenaIndex) { |
1595 m_arenaAges[arenaIndex] = ++m_currentArenaAges; | 1596 m_arenaAges[arenaIndex] = ++m_currentArenaAges; |
1596 if (m_vectorBackingArenaIndex == arenaIndex) | 1597 if (m_vectorBackingArenaIndex == arenaIndex) |
1597 m_vectorBackingArenaIndex = arenaIndexOfVectorArenaLeastRecentlyExpanded( | 1598 m_vectorBackingArenaIndex = arenaIndexOfVectorArenaLeastRecentlyExpanded( |
1598 BlinkGC::Vector1ArenaIndex, BlinkGC::Vector4ArenaIndex); | 1599 BlinkGC::Vector1ArenaIndex, BlinkGC::Vector4ArenaIndex); |
1599 } | 1600 } |
1600 | 1601 |
1601 void ThreadState::promptlyFreed(size_t gcInfoIndex) { | 1602 void ThreadState::promptlyFreed(size_t gcInfoIndex) { |
1602 ASSERT(checkThread()); | 1603 DCHECK(checkThread()); |
1603 size_t entryIndex = gcInfoIndex & likelyToBePromptlyFreedArrayMask; | 1604 size_t entryIndex = gcInfoIndex & likelyToBePromptlyFreedArrayMask; |
1604 // See the comment in vectorBackingArena() for why this is +3. | 1605 // See the comment in vectorBackingArena() for why this is +3. |
1605 m_likelyToBePromptlyFreed[entryIndex] += 3; | 1606 m_likelyToBePromptlyFreed[entryIndex] += 3; |
1606 } | 1607 } |
1607 | 1608 |
1608 void ThreadState::takeSnapshot(SnapshotType type) { | 1609 void ThreadState::takeSnapshot(SnapshotType type) { |
1609 ASSERT(isInGC()); | 1610 DCHECK(isInGC()); |
1610 | 1611 |
1611 // 0 is used as index for freelist entries. Objects are indexed 1 to | 1612 // 0 is used as index for freelist entries. Objects are indexed 1 to |
1612 // gcInfoIndex. | 1613 // gcInfoIndex. |
1613 GCSnapshotInfo info(GCInfoTable::gcInfoIndex() + 1); | 1614 GCSnapshotInfo info(GCInfoTable::gcInfoIndex() + 1); |
1614 String threadDumpName = String::format("blink_gc/thread_%lu", | 1615 String threadDumpName = String::format("blink_gc/thread_%lu", |
1615 static_cast<unsigned long>(m_thread)); | 1616 static_cast<unsigned long>(m_thread)); |
1616 const String heapsDumpName = threadDumpName + "/heaps"; | 1617 const String heapsDumpName = threadDumpName + "/heaps"; |
1617 const String classesDumpName = threadDumpName + "/classes"; | 1618 const String classesDumpName = threadDumpName + "/classes"; |
1618 | 1619 |
1619 int numberOfHeapsReported = 0; | 1620 int numberOfHeapsReported = 0; |
1620 #define SNAPSHOT_HEAP(ArenaType) \ | 1621 #define SNAPSHOT_HEAP(ArenaType) \ |
1621 { \ | 1622 { \ |
1622 numberOfHeapsReported++; \ | 1623 numberOfHeapsReported++; \ |
1623 switch (type) { \ | 1624 switch (type) { \ |
1624 case SnapshotType::HeapSnapshot: \ | 1625 case SnapshotType::HeapSnapshot: \ |
1625 m_arenas[BlinkGC::ArenaType##ArenaIndex]->takeSnapshot( \ | 1626 m_arenas[BlinkGC::ArenaType##ArenaIndex]->takeSnapshot( \ |
1626 heapsDumpName + "/" #ArenaType, info); \ | 1627 heapsDumpName + "/" #ArenaType, info); \ |
1627 break; \ | 1628 break; \ |
1628 case SnapshotType::FreelistSnapshot: \ | 1629 case SnapshotType::FreelistSnapshot: \ |
1629 m_arenas[BlinkGC::ArenaType##ArenaIndex]->takeFreelistSnapshot( \ | 1630 m_arenas[BlinkGC::ArenaType##ArenaIndex]->takeFreelistSnapshot( \ |
1630 heapsDumpName + "/" #ArenaType); \ | 1631 heapsDumpName + "/" #ArenaType); \ |
1631 break; \ | 1632 break; \ |
1632 default: \ | 1633 default: \ |
1633 ASSERT_NOT_REACHED(); \ | 1634 NOTREACHED(); \ |
1634 } \ | 1635 } \ |
1635 } | 1636 } |
1636 | 1637 |
1637 SNAPSHOT_HEAP(NormalPage1); | 1638 SNAPSHOT_HEAP(NormalPage1); |
1638 SNAPSHOT_HEAP(NormalPage2); | 1639 SNAPSHOT_HEAP(NormalPage2); |
1639 SNAPSHOT_HEAP(NormalPage3); | 1640 SNAPSHOT_HEAP(NormalPage3); |
1640 SNAPSHOT_HEAP(NormalPage4); | 1641 SNAPSHOT_HEAP(NormalPage4); |
1641 SNAPSHOT_HEAP(EagerSweep); | 1642 SNAPSHOT_HEAP(EagerSweep); |
1642 SNAPSHOT_HEAP(Vector1); | 1643 SNAPSHOT_HEAP(Vector1); |
1643 SNAPSHOT_HEAP(Vector2); | 1644 SNAPSHOT_HEAP(Vector2); |
1644 SNAPSHOT_HEAP(Vector3); | 1645 SNAPSHOT_HEAP(Vector3); |
1645 SNAPSHOT_HEAP(Vector4); | 1646 SNAPSHOT_HEAP(Vector4); |
1646 SNAPSHOT_HEAP(InlineVector); | 1647 SNAPSHOT_HEAP(InlineVector); |
1647 SNAPSHOT_HEAP(HashTable); | 1648 SNAPSHOT_HEAP(HashTable); |
1648 SNAPSHOT_HEAP(LargeObject); | 1649 SNAPSHOT_HEAP(LargeObject); |
1649 FOR_EACH_TYPED_ARENA(SNAPSHOT_HEAP); | 1650 FOR_EACH_TYPED_ARENA(SNAPSHOT_HEAP); |
1650 | 1651 |
1651 ASSERT(numberOfHeapsReported == BlinkGC::NumberOfArenas); | 1652 DCHECK_EQ(numberOfHeapsReported, BlinkGC::NumberOfArenas); |
1652 | 1653 |
1653 #undef SNAPSHOT_HEAP | 1654 #undef SNAPSHOT_HEAP |
1654 | 1655 |
1655 if (type == SnapshotType::FreelistSnapshot) | 1656 if (type == SnapshotType::FreelistSnapshot) |
1656 return; | 1657 return; |
1657 | 1658 |
1658 size_t totalLiveCount = 0; | 1659 size_t totalLiveCount = 0; |
1659 size_t totalDeadCount = 0; | 1660 size_t totalDeadCount = 0; |
1660 size_t totalLiveSize = 0; | 1661 size_t totalLiveSize = 0; |
1661 size_t totalDeadSize = 0; | 1662 size_t totalDeadSize = 0; |
(...skipping 21 matching lines...) Expand all Loading... |
1683 ->createMemoryAllocatorDumpForCurrentGC(classesDumpName); | 1684 ->createMemoryAllocatorDumpForCurrentGC(classesDumpName); |
1684 BlinkGCMemoryDumpProvider::instance() | 1685 BlinkGCMemoryDumpProvider::instance() |
1685 ->currentProcessMemoryDump() | 1686 ->currentProcessMemoryDump() |
1686 ->AddOwnershipEdge(classesDump->guid(), heapsDump->guid()); | 1687 ->AddOwnershipEdge(classesDump->guid(), heapsDump->guid()); |
1687 } | 1688 } |
1688 | 1689 |
1689 void ThreadState::collectGarbage(BlinkGC::StackState stackState, | 1690 void ThreadState::collectGarbage(BlinkGC::StackState stackState, |
1690 BlinkGC::GCType gcType, | 1691 BlinkGC::GCType gcType, |
1691 BlinkGC::GCReason reason) { | 1692 BlinkGC::GCReason reason) { |
1692 // Nested collectGarbage() invocations aren't supported. | 1693 // Nested collectGarbage() invocations aren't supported. |
1693 RELEASE_ASSERT(!isGCForbidden()); | 1694 CHECK(!isGCForbidden()); |
1694 completeSweep(); | 1695 completeSweep(); |
1695 | 1696 |
1696 GCForbiddenScope gcForbiddenScope(this); | 1697 GCForbiddenScope gcForbiddenScope(this); |
1697 | 1698 |
1698 SafePointScope safePointScope(stackState, this); | 1699 SafePointScope safePointScope(stackState, this); |
1699 | 1700 |
1700 // Resume all parked threads upon leaving this scope. | 1701 // Resume all parked threads upon leaving this scope. |
1701 ParkThreadsScope parkThreadsScope(this); | 1702 ParkThreadsScope parkThreadsScope(this); |
1702 | 1703 |
1703 // Try to park the other threads. If we're unable to, bail out of the GC. | 1704 // Try to park the other threads. If we're unable to, bail out of the GC. |
(...skipping 154 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1858 collectGarbage(BlinkGC::NoHeapPointersOnStack, BlinkGC::GCWithSweep, | 1859 collectGarbage(BlinkGC::NoHeapPointersOnStack, BlinkGC::GCWithSweep, |
1859 BlinkGC::ForcedGC); | 1860 BlinkGC::ForcedGC); |
1860 size_t liveObjects = heap().heapStats().markedObjectSize(); | 1861 size_t liveObjects = heap().heapStats().markedObjectSize(); |
1861 if (liveObjects == previousLiveObjects) | 1862 if (liveObjects == previousLiveObjects) |
1862 break; | 1863 break; |
1863 previousLiveObjects = liveObjects; | 1864 previousLiveObjects = liveObjects; |
1864 } | 1865 } |
1865 } | 1866 } |
1866 | 1867 |
1867 } // namespace blink | 1868 } // namespace blink |
OLD | NEW |