Chromium Code Reviews| OLD | NEW |
|---|---|
| 1 /* | 1 /* |
| 2 * Copyright (C) 2013 Google Inc. All rights reserved. | 2 * Copyright (C) 2013 Google Inc. All rights reserved. |
| 3 * | 3 * |
| 4 * Redistribution and use in source and binary forms, with or without | 4 * Redistribution and use in source and binary forms, with or without |
| 5 * modification, are permitted provided that the following conditions are | 5 * modification, are permitted provided that the following conditions are |
| 6 * met: | 6 * met: |
| 7 * | 7 * |
| 8 * * Redistributions of source code must retain the above copyright | 8 * * Redistributions of source code must retain the above copyright |
| 9 * notice, this list of conditions and the following disclaimer. | 9 * notice, this list of conditions and the following disclaimer. |
| 10 * * Redistributions in binary form must reproduce the above | 10 * * Redistributions in binary form must reproduce the above |
| (...skipping 92 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 103 #ifdef _WIN64 | 103 #ifdef _WIN64 |
| 104 return reinterpret_cast<void*>(__readgsqword(offsetof(NT_TIB64, StackBase))) ; | 104 return reinterpret_cast<void*>(__readgsqword(offsetof(NT_TIB64, StackBase))) ; |
| 105 #else | 105 #else |
| 106 return reinterpret_cast<void*>(__readfsdword(offsetof(NT_TIB, StackBase))); | 106 return reinterpret_cast<void*>(__readfsdword(offsetof(NT_TIB, StackBase))); |
| 107 #endif | 107 #endif |
| 108 #else | 108 #else |
| 109 #error Unsupported getStackStart on this platform. | 109 #error Unsupported getStackStart on this platform. |
| 110 #endif | 110 #endif |
| 111 } | 111 } |
| 112 | 112 |
| 113 // The maximum number of WrapperPersistentRegions to keep around in the | |
| 114 // m_pooledWrapperPersistentRegions pool. | |
| 115 static const size_t MaxPooledWrapperPersistentRegionCount = 2; | |
| 116 | |
| 117 WTF::ThreadSpecific<ThreadState*>* ThreadState::s_threadSpecific = 0; | 113 WTF::ThreadSpecific<ThreadState*>* ThreadState::s_threadSpecific = 0; |
| 118 uint8_t ThreadState::s_mainThreadStateStorage[sizeof(ThreadState)]; | 114 uint8_t ThreadState::s_mainThreadStateStorage[sizeof(ThreadState)]; |
| 119 SafePointBarrier* ThreadState::s_safePointBarrier = 0; | 115 SafePointBarrier* ThreadState::s_safePointBarrier = 0; |
| 120 bool ThreadState::s_inGC = false; | 116 bool ThreadState::s_inGC = false; |
| 121 | 117 |
| 122 static Mutex& threadAttachMutex() | 118 static Mutex& threadAttachMutex() |
| 123 { | 119 { |
| 124 AtomicallyInitializedStatic(Mutex&, mutex = *new Mutex); | 120 AtomicallyInitializedStatic(Mutex&, mutex = *new Mutex); |
| 125 return mutex; | 121 return mutex; |
| 126 } | 122 } |
| (...skipping 165 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 292 InitializeHeaps<index>::init(heaps, state); | 288 InitializeHeaps<index>::init(heaps, state); |
| 293 heaps[index] = new typename HeapIndexTrait<index>::HeapType(state, index ); | 289 heaps[index] = new typename HeapIndexTrait<index>::HeapType(state, index ); |
| 294 } | 290 } |
| 295 }; | 291 }; |
| 296 template<> struct InitializeHeaps<0> { | 292 template<> struct InitializeHeaps<0> { |
| 297 static void init(BaseHeap** heaps, ThreadState* state) { } | 293 static void init(BaseHeap** heaps, ThreadState* state) { } |
| 298 }; | 294 }; |
| 299 | 295 |
| 300 ThreadState::ThreadState() | 296 ThreadState::ThreadState() |
| 301 : m_thread(currentThread()) | 297 : m_thread(currentThread()) |
| 302 , m_liveWrapperPersistents(new WrapperPersistentRegion()) | |
| 303 , m_pooledWrapperPersistents(0) | |
| 304 , m_pooledWrapperPersistentRegionCount(0) | |
| 305 , m_persistents(adoptPtr(new PersistentAnchor())) | 298 , m_persistents(adoptPtr(new PersistentAnchor())) |
| 306 , m_startOfStack(reinterpret_cast<intptr_t*>(getStackStart())) | 299 , m_startOfStack(reinterpret_cast<intptr_t*>(getStackStart())) |
| 307 , m_endOfStack(reinterpret_cast<intptr_t*>(getStackStart())) | 300 , m_endOfStack(reinterpret_cast<intptr_t*>(getStackStart())) |
| 308 , m_safePointScopeMarker(0) | 301 , m_safePointScopeMarker(0) |
| 309 , m_atSafePoint(false) | 302 , m_atSafePoint(false) |
| 310 , m_interruptors() | 303 , m_interruptors() |
| 311 , m_gcRequested(false) | 304 , m_gcRequested(false) |
| 312 , m_forcePreciseGCForTesting(false) | 305 , m_forcePreciseGCForTesting(false) |
| 313 , m_sweepRequested(0) | 306 , m_sweepRequested(0) |
| 314 , m_sweepInProgress(false) | 307 , m_sweepInProgress(false) |
| 315 , m_noAllocationCount(0) | 308 , m_noAllocationCount(0) |
| 316 , m_inGC(false) | 309 , m_inGC(false) |
| 317 , m_isTerminating(false) | 310 , m_isTerminating(false) |
| 318 , m_shouldFlushHeapDoesNotContainCache(false) | 311 , m_shouldFlushHeapDoesNotContainCache(false) |
| 319 , m_lowCollectionRate(false) | 312 , m_lowCollectionRate(false) |
| 320 , m_numberOfSweeperTasks(0) | 313 , m_numberOfSweeperTasks(0) |
| 314 , m_traceDOMWrappers(0) | |
|
tkent
2014/10/15 03:48:35
nit: We can use |nullptr|.
| |
| 321 #if defined(ADDRESS_SANITIZER) | 315 #if defined(ADDRESS_SANITIZER) |
| 322 , m_asanFakeStack(__asan_get_current_fake_stack()) | 316 , m_asanFakeStack(__asan_get_current_fake_stack()) |
| 323 #endif | 317 #endif |
| 324 { | 318 { |
| 325 ASSERT(!**s_threadSpecific); | 319 ASSERT(!**s_threadSpecific); |
| 326 **s_threadSpecific = this; | 320 **s_threadSpecific = this; |
| 327 | 321 |
| 328 InitializeHeaps<NumberOfHeaps>::init(m_heaps, this); | 322 InitializeHeaps<NumberOfHeaps>::init(m_heaps, this); |
| 329 | 323 |
| 330 m_weakCallbackStack = new CallbackStack(); | 324 m_weakCallbackStack = new CallbackStack(); |
| 331 | 325 |
| 332 if (blink::Platform::current()) | 326 if (blink::Platform::current()) |
| 333 m_sweeperThread = adoptPtr(blink::Platform::current()->createThread("Bli nk GC Sweeper")); | 327 m_sweeperThread = adoptPtr(blink::Platform::current()->createThread("Bli nk GC Sweeper")); |
| 334 } | 328 } |
| 335 | 329 |
| 336 ThreadState::~ThreadState() | 330 ThreadState::~ThreadState() |
| 337 { | 331 { |
| 338 checkThread(); | 332 checkThread(); |
| 339 delete m_weakCallbackStack; | 333 delete m_weakCallbackStack; |
| 340 m_weakCallbackStack = 0; | 334 m_weakCallbackStack = 0; |
| 341 for (int i = 0; i < NumberOfHeaps; i++) | 335 for (int i = 0; i < NumberOfHeaps; i++) |
| 342 delete m_heaps[i]; | 336 delete m_heaps[i]; |
| 343 deleteAllValues(m_interruptors); | 337 deleteAllValues(m_interruptors); |
| 344 while (m_liveWrapperPersistents) { | |
| 345 WrapperPersistentRegion* region = WrapperPersistentRegion::removeHead(&m _liveWrapperPersistents); | |
| 346 delete region; | |
| 347 } | |
| 348 while (m_pooledWrapperPersistents) { | |
| 349 WrapperPersistentRegion* region = WrapperPersistentRegion::removeHead(&m _pooledWrapperPersistents); | |
| 350 delete region; | |
| 351 } | |
| 352 **s_threadSpecific = 0; | 338 **s_threadSpecific = 0; |
| 353 } | 339 } |
| 354 | 340 |
| 355 void ThreadState::init() | 341 void ThreadState::init() |
| 356 { | 342 { |
| 357 s_threadSpecific = new WTF::ThreadSpecific<ThreadState*>(); | 343 s_threadSpecific = new WTF::ThreadSpecific<ThreadState*>(); |
| 358 s_safePointBarrier = new SafePointBarrier; | 344 s_safePointBarrier = new SafePointBarrier; |
| 359 } | 345 } |
| 360 | 346 |
| 361 void ThreadState::shutdown() | 347 void ThreadState::shutdown() |
| (...skipping 215 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 577 __msan_unpoison(&ptr, sizeof(ptr)); | 563 __msan_unpoison(&ptr, sizeof(ptr)); |
| 578 #endif | 564 #endif |
| 579 Heap::checkAndMarkPointer(visitor, ptr); | 565 Heap::checkAndMarkPointer(visitor, ptr); |
| 580 visitAsanFakeStackForPointer(visitor, ptr); | 566 visitAsanFakeStackForPointer(visitor, ptr); |
| 581 } | 567 } |
| 582 } | 568 } |
| 583 | 569 |
| 584 void ThreadState::visitPersistents(Visitor* visitor) | 570 void ThreadState::visitPersistents(Visitor* visitor) |
| 585 { | 571 { |
| 586 m_persistents->trace(visitor); | 572 m_persistents->trace(visitor); |
| 587 { | 573 if (m_traceDOMWrappers) { |
| 588 TRACE_EVENT0("blink_gc", "WrapperPersistentRegion::trace"); | 574 TRACE_EVENT0("blink_gc", "V8GCController::traceDOMWrappers"); |
|
sof
2014/10/12 20:40:43
Push this down into V8GCController::traceDOMrapper
| |
| 589 WrapperPersistentRegion::trace(m_liveWrapperPersistents, visitor); | 575 m_traceDOMWrappers(visitor); |
| 590 } | 576 } |
| 591 } | 577 } |
| 592 | 578 |
| 593 #if ENABLE(GC_PROFILE_MARKING) | 579 #if ENABLE(GC_PROFILE_MARKING) |
| 594 const GCInfo* ThreadState::findGCInfo(Address address) | 580 const GCInfo* ThreadState::findGCInfo(Address address) |
| 595 { | 581 { |
| 596 BaseHeapPage* page = heapPageFromAddress(address); | 582 BaseHeapPage* page = heapPageFromAddress(address); |
| 597 if (page) { | 583 if (page) { |
| 598 return page->findGCInfo(address); | 584 return page->findGCInfo(address); |
| 599 } | 585 } |
| (...skipping 88 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 688 // registered as objects on orphaned pages. We cannot assert this here since | 674 // registered as objects on orphaned pages. We cannot assert this here since |
| 689 // we might have an off-heap collection. We assert it in | 675 // we might have an off-heap collection. We assert it in |
| 690 // Heap::pushWeakObjectPointerCallback. | 676 // Heap::pushWeakObjectPointerCallback. |
| 691 if (CallbackStack::Item* item = m_weakCallbackStack->pop()) { | 677 if (CallbackStack::Item* item = m_weakCallbackStack->pop()) { |
| 692 item->call(visitor); | 678 item->call(visitor); |
| 693 return true; | 679 return true; |
| 694 } | 680 } |
| 695 return false; | 681 return false; |
| 696 } | 682 } |
| 697 | 683 |
| 698 WrapperPersistentRegion* ThreadState::takeWrapperPersistentRegion() | |
| 699 { | |
| 700 WrapperPersistentRegion* region; | |
| 701 if (m_pooledWrapperPersistentRegionCount) { | |
| 702 region = WrapperPersistentRegion::removeHead(&m_pooledWrapperPersistents ); | |
| 703 m_pooledWrapperPersistentRegionCount--; | |
| 704 } else { | |
| 705 region = new WrapperPersistentRegion(); | |
| 706 } | |
| 707 ASSERT(region); | |
| 708 WrapperPersistentRegion::insertHead(&m_liveWrapperPersistents, region); | |
| 709 return region; | |
| 710 } | |
| 711 | |
| 712 void ThreadState::freeWrapperPersistentRegion(WrapperPersistentRegion* region) | |
| 713 { | |
| 714 if (!region->removeIfNotLast(&m_liveWrapperPersistents)) | |
| 715 return; | |
| 716 | |
| 717 // Region was removed, ie. it was not the last region in the list. | |
| 718 if (m_pooledWrapperPersistentRegionCount < MaxPooledWrapperPersistentRegionC ount) { | |
| 719 WrapperPersistentRegion::insertHead(&m_pooledWrapperPersistents, region) ; | |
| 720 m_pooledWrapperPersistentRegionCount++; | |
| 721 } else { | |
| 722 delete region; | |
| 723 } | |
| 724 } | |
| 725 | |
| 726 PersistentNode* ThreadState::globalRoots() | 684 PersistentNode* ThreadState::globalRoots() |
| 727 { | 685 { |
| 728 AtomicallyInitializedStatic(PersistentNode*, anchor = new PersistentAnchor); | 686 AtomicallyInitializedStatic(PersistentNode*, anchor = new PersistentAnchor); |
| 729 return anchor; | 687 return anchor; |
| 730 } | 688 } |
| 731 | 689 |
| 732 Mutex& ThreadState::globalRootsMutex() | 690 Mutex& ThreadState::globalRootsMutex() |
| 733 { | 691 { |
| 734 AtomicallyInitializedStatic(Mutex&, mutex = *new Mutex); | 692 AtomicallyInitializedStatic(Mutex&, mutex = *new Mutex); |
| 735 return mutex; | 693 return mutex; |
| (...skipping 546 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 1282 return gcInfo; | 1240 return gcInfo; |
| 1283 } | 1241 } |
| 1284 } | 1242 } |
| 1285 if (needLockForIteration) | 1243 if (needLockForIteration) |
| 1286 threadAttachMutex().unlock(); | 1244 threadAttachMutex().unlock(); |
| 1287 return 0; | 1245 return 0; |
| 1288 } | 1246 } |
| 1289 #endif | 1247 #endif |
| 1290 | 1248 |
| 1291 } | 1249 } |
| OLD | NEW |