OLD | NEW |
1 /* | 1 /* |
2 * Copyright (C) 2013 Google Inc. All rights reserved. | 2 * Copyright (C) 2013 Google Inc. All rights reserved. |
3 * | 3 * |
4 * Redistribution and use in source and binary forms, with or without | 4 * Redistribution and use in source and binary forms, with or without |
5 * modification, are permitted provided that the following conditions are | 5 * modification, are permitted provided that the following conditions are |
6 * met: | 6 * met: |
7 * | 7 * |
8 * * Redistributions of source code must retain the above copyright | 8 * * Redistributions of source code must retain the above copyright |
9 * notice, this list of conditions and the following disclaimer. | 9 * notice, this list of conditions and the following disclaimer. |
10 * * Redistributions in binary form must reproduce the above | 10 * * Redistributions in binary form must reproduce the above |
(...skipping 116 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
127 #ifdef _WIN64 | 127 #ifdef _WIN64 |
128 return __readgsqword(offsetof(NT_TIB64, StackBase)) - __readgsqword(offsetof
(NT_TIB64, StackLimit)); | 128 return __readgsqword(offsetof(NT_TIB64, StackBase)) - __readgsqword(offsetof
(NT_TIB64, StackLimit)); |
129 #else | 129 #else |
130 return __readfsdword(offsetof(NT_TIB, StackBase)) - __readfsdword(offsetof(N
T_TIB, StackLimit)); | 130 return __readfsdword(offsetof(NT_TIB, StackBase)) - __readfsdword(offsetof(N
T_TIB, StackLimit)); |
131 #endif | 131 #endif |
132 #else | 132 #else |
133 return 0; | 133 return 0; |
134 #endif | 134 #endif |
135 } | 135 } |
136 | 136 |
137 // The maximum number of WrapperPersistentRegions to keep around in the | |
138 // m_pooledWrapperPersistentRegions pool. | |
139 static const size_t MaxPooledWrapperPersistentRegionCount = 2; | |
140 | |
141 WTF::ThreadSpecific<ThreadState*>* ThreadState::s_threadSpecific = 0; | 137 WTF::ThreadSpecific<ThreadState*>* ThreadState::s_threadSpecific = 0; |
142 uintptr_t ThreadState::s_mainThreadStackStart = 0; | 138 uintptr_t ThreadState::s_mainThreadStackStart = 0; |
143 uintptr_t ThreadState::s_mainThreadUnderestimatedStackSize = 0; | 139 uintptr_t ThreadState::s_mainThreadUnderestimatedStackSize = 0; |
144 uint8_t ThreadState::s_mainThreadStateStorage[sizeof(ThreadState)]; | 140 uint8_t ThreadState::s_mainThreadStateStorage[sizeof(ThreadState)]; |
145 SafePointBarrier* ThreadState::s_safePointBarrier = 0; | 141 SafePointBarrier* ThreadState::s_safePointBarrier = 0; |
146 bool ThreadState::s_inGC = false; | 142 bool ThreadState::s_inGC = false; |
147 | 143 |
148 static Mutex& threadAttachMutex() | 144 static Mutex& threadAttachMutex() |
149 { | 145 { |
150 AtomicallyInitializedStatic(Mutex&, mutex = *new Mutex); | 146 AtomicallyInitializedStatic(Mutex&, mutex = *new Mutex); |
(...skipping 167 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
318 InitializeHeaps<index>::init(heaps, state); | 314 InitializeHeaps<index>::init(heaps, state); |
319 heaps[index] = new typename HeapIndexTrait<index>::HeapType(state, index
); | 315 heaps[index] = new typename HeapIndexTrait<index>::HeapType(state, index
); |
320 } | 316 } |
321 }; | 317 }; |
322 template<> struct InitializeHeaps<0> { | 318 template<> struct InitializeHeaps<0> { |
323 static void init(BaseHeap** heaps, ThreadState* state) { } | 319 static void init(BaseHeap** heaps, ThreadState* state) { } |
324 }; | 320 }; |
325 | 321 |
326 ThreadState::ThreadState() | 322 ThreadState::ThreadState() |
327 : m_thread(currentThread()) | 323 : m_thread(currentThread()) |
328 , m_liveWrapperPersistents(new WrapperPersistentRegion()) | |
329 , m_pooledWrapperPersistents(0) | |
330 , m_pooledWrapperPersistentRegionCount(0) | |
331 , m_persistents(adoptPtr(new PersistentAnchor())) | 324 , m_persistents(adoptPtr(new PersistentAnchor())) |
332 , m_startOfStack(reinterpret_cast<intptr_t*>(getStackStart())) | 325 , m_startOfStack(reinterpret_cast<intptr_t*>(getStackStart())) |
333 , m_endOfStack(reinterpret_cast<intptr_t*>(getStackStart())) | 326 , m_endOfStack(reinterpret_cast<intptr_t*>(getStackStart())) |
334 , m_safePointScopeMarker(0) | 327 , m_safePointScopeMarker(0) |
335 , m_atSafePoint(false) | 328 , m_atSafePoint(false) |
336 , m_interruptors() | 329 , m_interruptors() |
337 , m_gcRequested(false) | 330 , m_gcRequested(false) |
338 , m_forcePreciseGCForTesting(false) | 331 , m_forcePreciseGCForTesting(false) |
339 , m_sweepRequested(0) | 332 , m_sweepRequested(0) |
340 , m_sweepInProgress(false) | 333 , m_sweepInProgress(false) |
341 , m_noAllocationCount(0) | 334 , m_noAllocationCount(0) |
342 , m_inGC(false) | 335 , m_inGC(false) |
343 , m_isTerminating(false) | 336 , m_isTerminating(false) |
344 , m_shouldFlushHeapDoesNotContainCache(false) | 337 , m_shouldFlushHeapDoesNotContainCache(false) |
345 , m_lowCollectionRate(false) | 338 , m_lowCollectionRate(false) |
346 , m_numberOfSweeperTasks(0) | 339 , m_numberOfSweeperTasks(0) |
| 340 , m_traceDOMWrappers(0) |
347 #if defined(ADDRESS_SANITIZER) | 341 #if defined(ADDRESS_SANITIZER) |
348 , m_asanFakeStack(__asan_get_current_fake_stack()) | 342 , m_asanFakeStack(__asan_get_current_fake_stack()) |
349 #endif | 343 #endif |
350 { | 344 { |
351 ASSERT(!**s_threadSpecific); | 345 ASSERT(!**s_threadSpecific); |
352 **s_threadSpecific = this; | 346 **s_threadSpecific = this; |
353 | 347 |
354 if (isMainThread()) { | 348 if (isMainThread()) { |
355 s_mainThreadStackStart = reinterpret_cast<uintptr_t>(m_startOfStack) - s
izeof(void*); | 349 s_mainThreadStackStart = reinterpret_cast<uintptr_t>(m_startOfStack) - s
izeof(void*); |
356 s_mainThreadUnderestimatedStackSize = getUnderestimatedStackSize() - siz
eof(void*); | 350 s_mainThreadUnderestimatedStackSize = getUnderestimatedStackSize() - siz
eof(void*); |
357 } | 351 } |
358 | 352 |
359 InitializeHeaps<NumberOfHeaps>::init(m_heaps, this); | 353 InitializeHeaps<NumberOfHeaps>::init(m_heaps, this); |
360 | 354 |
361 m_weakCallbackStack = new CallbackStack(); | 355 m_weakCallbackStack = new CallbackStack(); |
362 | 356 |
363 if (blink::Platform::current()) | 357 if (blink::Platform::current()) |
364 m_sweeperThread = adoptPtr(blink::Platform::current()->createThread("Bli
nk GC Sweeper Thread")); | 358 m_sweeperThread = adoptPtr(blink::Platform::current()->createThread("Bli
nk GC Sweeper Thread")); |
365 } | 359 } |
366 | 360 |
367 ThreadState::~ThreadState() | 361 ThreadState::~ThreadState() |
368 { | 362 { |
369 checkThread(); | 363 checkThread(); |
370 delete m_weakCallbackStack; | 364 delete m_weakCallbackStack; |
371 m_weakCallbackStack = 0; | 365 m_weakCallbackStack = 0; |
372 for (int i = 0; i < NumberOfHeaps; i++) | 366 for (int i = 0; i < NumberOfHeaps; i++) |
373 delete m_heaps[i]; | 367 delete m_heaps[i]; |
374 deleteAllValues(m_interruptors); | 368 deleteAllValues(m_interruptors); |
375 while (m_liveWrapperPersistents) { | |
376 WrapperPersistentRegion* region = WrapperPersistentRegion::removeHead(&m
_liveWrapperPersistents); | |
377 delete region; | |
378 } | |
379 while (m_pooledWrapperPersistents) { | |
380 WrapperPersistentRegion* region = WrapperPersistentRegion::removeHead(&m
_pooledWrapperPersistents); | |
381 delete region; | |
382 } | |
383 **s_threadSpecific = 0; | 369 **s_threadSpecific = 0; |
384 s_mainThreadStackStart = 0; | 370 s_mainThreadStackStart = 0; |
385 s_mainThreadUnderestimatedStackSize = 0; | 371 s_mainThreadUnderestimatedStackSize = 0; |
386 } | 372 } |
387 | 373 |
388 void ThreadState::init() | 374 void ThreadState::init() |
389 { | 375 { |
390 s_threadSpecific = new WTF::ThreadSpecific<ThreadState*>(); | 376 s_threadSpecific = new WTF::ThreadSpecific<ThreadState*>(); |
391 s_safePointBarrier = new SafePointBarrier; | 377 s_safePointBarrier = new SafePointBarrier; |
392 } | 378 } |
(...skipping 217 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
610 __msan_unpoison(&ptr, sizeof(ptr)); | 596 __msan_unpoison(&ptr, sizeof(ptr)); |
611 #endif | 597 #endif |
612 Heap::checkAndMarkPointer(visitor, ptr); | 598 Heap::checkAndMarkPointer(visitor, ptr); |
613 visitAsanFakeStackForPointer(visitor, ptr); | 599 visitAsanFakeStackForPointer(visitor, ptr); |
614 } | 600 } |
615 } | 601 } |
616 | 602 |
617 void ThreadState::visitPersistents(Visitor* visitor) | 603 void ThreadState::visitPersistents(Visitor* visitor) |
618 { | 604 { |
619 m_persistents->trace(visitor); | 605 m_persistents->trace(visitor); |
620 { | 606 if (m_traceDOMWrappers) { |
621 TRACE_EVENT0("blink_gc", "WrapperPersistentRegion::trace"); | 607 TRACE_EVENT0("blink_gc", "V8GCController::traceDOMWrappers"); |
622 WrapperPersistentRegion::trace(m_liveWrapperPersistents, visitor); | 608 m_traceDOMWrappers(m_isolate, visitor); |
623 } | 609 } |
624 } | 610 } |
625 | 611 |
626 #if ENABLE(GC_PROFILE_MARKING) | 612 #if ENABLE(GC_PROFILE_MARKING) |
627 const GCInfo* ThreadState::findGCInfo(Address address) | 613 const GCInfo* ThreadState::findGCInfo(Address address) |
628 { | 614 { |
629 BaseHeapPage* page = heapPageFromAddress(address); | 615 BaseHeapPage* page = heapPageFromAddress(address); |
630 if (page) { | 616 if (page) { |
631 return page->findGCInfo(address); | 617 return page->findGCInfo(address); |
632 } | 618 } |
(...skipping 88 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
721 // registered as objects on orphaned pages. We cannot assert this here since | 707 // registered as objects on orphaned pages. We cannot assert this here since |
722 // we might have an off-heap collection. We assert it in | 708 // we might have an off-heap collection. We assert it in |
723 // Heap::pushWeakObjectPointerCallback. | 709 // Heap::pushWeakObjectPointerCallback. |
724 if (CallbackStack::Item* item = m_weakCallbackStack->pop()) { | 710 if (CallbackStack::Item* item = m_weakCallbackStack->pop()) { |
725 item->call(visitor); | 711 item->call(visitor); |
726 return true; | 712 return true; |
727 } | 713 } |
728 return false; | 714 return false; |
729 } | 715 } |
730 | 716 |
731 WrapperPersistentRegion* ThreadState::takeWrapperPersistentRegion() | |
732 { | |
733 WrapperPersistentRegion* region; | |
734 if (m_pooledWrapperPersistentRegionCount) { | |
735 region = WrapperPersistentRegion::removeHead(&m_pooledWrapperPersistents
); | |
736 m_pooledWrapperPersistentRegionCount--; | |
737 } else { | |
738 region = new WrapperPersistentRegion(); | |
739 } | |
740 ASSERT(region); | |
741 WrapperPersistentRegion::insertHead(&m_liveWrapperPersistents, region); | |
742 return region; | |
743 } | |
744 | |
745 void ThreadState::freeWrapperPersistentRegion(WrapperPersistentRegion* region) | |
746 { | |
747 if (!region->removeIfNotLast(&m_liveWrapperPersistents)) | |
748 return; | |
749 | |
750 // Region was removed, ie. it was not the last region in the list. | |
751 if (m_pooledWrapperPersistentRegionCount < MaxPooledWrapperPersistentRegionC
ount) { | |
752 WrapperPersistentRegion::insertHead(&m_pooledWrapperPersistents, region)
; | |
753 m_pooledWrapperPersistentRegionCount++; | |
754 } else { | |
755 delete region; | |
756 } | |
757 } | |
758 | |
759 PersistentNode* ThreadState::globalRoots() | 717 PersistentNode* ThreadState::globalRoots() |
760 { | 718 { |
761 AtomicallyInitializedStatic(PersistentNode*, anchor = new PersistentAnchor); | 719 AtomicallyInitializedStatic(PersistentNode*, anchor = new PersistentAnchor); |
762 return anchor; | 720 return anchor; |
763 } | 721 } |
764 | 722 |
765 Mutex& ThreadState::globalRootsMutex() | 723 Mutex& ThreadState::globalRootsMutex() |
766 { | 724 { |
767 AtomicallyInitializedStatic(Mutex&, mutex = *new Mutex); | 725 AtomicallyInitializedStatic(Mutex&, mutex = *new Mutex); |
768 return mutex; | 726 return mutex; |
(...skipping 546 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1315 return gcInfo; | 1273 return gcInfo; |
1316 } | 1274 } |
1317 } | 1275 } |
1318 if (needLockForIteration) | 1276 if (needLockForIteration) |
1319 threadAttachMutex().unlock(); | 1277 threadAttachMutex().unlock(); |
1320 return 0; | 1278 return 0; |
1321 } | 1279 } |
1322 #endif | 1280 #endif |
1323 | 1281 |
1324 } | 1282 } |
OLD | NEW |