OLD | NEW |
---|---|
1 /* | 1 /* |
2 * Copyright (C) 2013 Google Inc. All rights reserved. | 2 * Copyright (C) 2013 Google Inc. All rights reserved. |
3 * | 3 * |
4 * Redistribution and use in source and binary forms, with or without | 4 * Redistribution and use in source and binary forms, with or without |
5 * modification, are permitted provided that the following conditions are | 5 * modification, are permitted provided that the following conditions are |
6 * met: | 6 * met: |
7 * | 7 * |
8 * * Redistributions of source code must retain the above copyright | 8 * * Redistributions of source code must retain the above copyright |
9 * notice, this list of conditions and the following disclaimer. | 9 * notice, this list of conditions and the following disclaimer. |
10 * * Redistributions in binary form must reproduce the above | 10 * * Redistributions in binary form must reproduce the above |
(...skipping 77 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
88 #ifdef _WIN64 | 88 #ifdef _WIN64 |
89 return reinterpret_cast<void*>(__readgsqword(offsetof(NT_TIB64, StackBase))) ; | 89 return reinterpret_cast<void*>(__readgsqword(offsetof(NT_TIB64, StackBase))) ; |
90 #else | 90 #else |
91 return reinterpret_cast<void*>(__readfsdword(offsetof(NT_TIB, StackBase))); | 91 return reinterpret_cast<void*>(__readfsdword(offsetof(NT_TIB, StackBase))); |
92 #endif | 92 #endif |
93 #else | 93 #else |
94 #error Unsupported getStackStart on this platform. | 94 #error Unsupported getStackStart on this platform. |
95 #endif | 95 #endif |
96 } | 96 } |
97 | 97 |
98 // The maximum number of WrapperPersistentRegions to keep around in the | |
99 // m_pooledWrapperPersistentRegions pool. | |
100 static const size_t MaxPooledWrapperPersistentRegionCount = 2; | |
98 | 101 |
99 WTF::ThreadSpecific<ThreadState*>* ThreadState::s_threadSpecific = 0; | 102 WTF::ThreadSpecific<ThreadState*>* ThreadState::s_threadSpecific = 0; |
100 uint8_t ThreadState::s_mainThreadStateStorage[sizeof(ThreadState)]; | 103 uint8_t ThreadState::s_mainThreadStateStorage[sizeof(ThreadState)]; |
101 SafePointBarrier* ThreadState::s_safePointBarrier = 0; | 104 SafePointBarrier* ThreadState::s_safePointBarrier = 0; |
102 bool ThreadState::s_inGC = false; | 105 bool ThreadState::s_inGC = false; |
103 | 106 |
104 static Mutex& threadAttachMutex() | 107 static Mutex& threadAttachMutex() |
105 { | 108 { |
106 AtomicallyInitializedStatic(Mutex&, mutex = *new Mutex); | 109 AtomicallyInitializedStatic(Mutex&, mutex = *new Mutex); |
107 return mutex; | 110 return mutex; |
(...skipping 150 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
258 barrier->doEnterSafePoint(state, stackEnd); | 261 barrier->doEnterSafePoint(state, stackEnd); |
259 } | 262 } |
260 | 263 |
261 volatile int m_canResume; | 264 volatile int m_canResume; |
262 volatile int m_unparkedThreadCount; | 265 volatile int m_unparkedThreadCount; |
263 Mutex m_mutex; | 266 Mutex m_mutex; |
264 ThreadCondition m_parked; | 267 ThreadCondition m_parked; |
265 ThreadCondition m_resume; | 268 ThreadCondition m_resume; |
266 }; | 269 }; |
267 | 270 |
268 | |
269 BaseHeapPage::BaseHeapPage(PageMemory* storage, const GCInfo* gcInfo, ThreadStat e* state) | 271 BaseHeapPage::BaseHeapPage(PageMemory* storage, const GCInfo* gcInfo, ThreadStat e* state) |
270 : m_storage(storage) | 272 : m_storage(storage) |
271 , m_gcInfo(gcInfo) | 273 , m_gcInfo(gcInfo) |
272 , m_threadState(state) | 274 , m_threadState(state) |
273 , m_terminating(false) | 275 , m_terminating(false) |
274 , m_tracedAfterOrphaned(false) | 276 , m_tracedAfterOrphaned(false) |
275 { | 277 { |
276 ASSERT(isPageHeaderAddress(reinterpret_cast<Address>(this))); | 278 ASSERT(isPageHeaderAddress(reinterpret_cast<Address>(this))); |
277 } | 279 } |
278 | 280 |
279 // Statically unfold the heap initialization loop so the compiler statically | 281 // Statically unfold the heap initialization loop so the compiler statically |
280 // knows the heap index when using HeapIndexTrait. | 282 // knows the heap index when using HeapIndexTrait. |
281 template<int num> struct InitializeHeaps { | 283 template<int num> struct InitializeHeaps { |
282 static const int index = num - 1; | 284 static const int index = num - 1; |
283 static void init(BaseHeap** heaps, ThreadState* state) | 285 static void init(BaseHeap** heaps, ThreadState* state) |
284 { | 286 { |
285 InitializeHeaps<index>::init(heaps, state); | 287 InitializeHeaps<index>::init(heaps, state); |
286 heaps[index] = new typename HeapIndexTrait<index>::HeapType(state, index ); | 288 heaps[index] = new typename HeapIndexTrait<index>::HeapType(state, index ); |
287 } | 289 } |
288 }; | 290 }; |
289 template<> struct InitializeHeaps<0> { | 291 template<> struct InitializeHeaps<0> { |
290 static void init(BaseHeap** heaps, ThreadState* state) { } | 292 static void init(BaseHeap** heaps, ThreadState* state) { } |
291 }; | 293 }; |
292 | 294 |
293 ThreadState::ThreadState() | 295 ThreadState::ThreadState() |
294 : m_thread(currentThread()) | 296 : m_thread(currentThread()) |
297 , m_liveWrapperPersistents(new WrapperPersistentRegion()) | |
298 , m_pooledWrapperPersistents(0) | |
299 , m_pooledWrapperPersistentRegionCount(0) | |
295 , m_persistents(adoptPtr(new PersistentAnchor())) | 300 , m_persistents(adoptPtr(new PersistentAnchor())) |
296 , m_startOfStack(reinterpret_cast<intptr_t*>(getStackStart())) | 301 , m_startOfStack(reinterpret_cast<intptr_t*>(getStackStart())) |
297 , m_endOfStack(reinterpret_cast<intptr_t*>(getStackStart())) | 302 , m_endOfStack(reinterpret_cast<intptr_t*>(getStackStart())) |
298 , m_safePointScopeMarker(0) | 303 , m_safePointScopeMarker(0) |
299 , m_atSafePoint(false) | 304 , m_atSafePoint(false) |
300 , m_interruptors() | 305 , m_interruptors() |
301 , m_gcRequested(false) | 306 , m_gcRequested(false) |
302 , m_forcePreciseGCForTesting(false) | 307 , m_forcePreciseGCForTesting(false) |
303 , m_sweepRequested(0) | 308 , m_sweepRequested(0) |
304 , m_sweepInProgress(false) | 309 , m_sweepInProgress(false) |
(...skipping 18 matching lines...) Expand all Loading... | |
323 m_sweeperThread = adoptPtr(blink::Platform::current()->createThread("Bli nk GC Sweeper")); | 328 m_sweeperThread = adoptPtr(blink::Platform::current()->createThread("Bli nk GC Sweeper")); |
324 } | 329 } |
325 | 330 |
326 ThreadState::~ThreadState() | 331 ThreadState::~ThreadState() |
327 { | 332 { |
328 checkThread(); | 333 checkThread(); |
329 CallbackStack::shutdown(&m_weakCallbackStack); | 334 CallbackStack::shutdown(&m_weakCallbackStack); |
330 for (int i = 0; i < NumberOfHeaps; i++) | 335 for (int i = 0; i < NumberOfHeaps; i++) |
331 delete m_heaps[i]; | 336 delete m_heaps[i]; |
332 deleteAllValues(m_interruptors); | 337 deleteAllValues(m_interruptors); |
338 while (m_liveWrapperPersistents) { | |
339 WrapperPersistentRegion* region = WrapperPersistentRegion::removeHead(&m _liveWrapperPersistents); | |
340 delete region; | |
341 } | |
342 while (m_pooledWrapperPersistents) { | |
343 WrapperPersistentRegion* region = WrapperPersistentRegion::removeHead(&m _pooledWrapperPersistents); | |
344 delete region; | |
345 } | |
333 **s_threadSpecific = 0; | 346 **s_threadSpecific = 0; |
334 } | 347 } |
335 | 348 |
336 void ThreadState::init() | 349 void ThreadState::init() |
337 { | 350 { |
338 s_threadSpecific = new WTF::ThreadSpecific<ThreadState*>(); | 351 s_threadSpecific = new WTF::ThreadSpecific<ThreadState*>(); |
339 s_safePointBarrier = new SafePointBarrier; | 352 s_safePointBarrier = new SafePointBarrier; |
340 } | 353 } |
341 | 354 |
342 void ThreadState::shutdown() | 355 void ThreadState::shutdown() |
(...skipping 211 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
554 __msan_unpoison(&ptr, sizeof(ptr)); | 567 __msan_unpoison(&ptr, sizeof(ptr)); |
555 #endif | 568 #endif |
556 Heap::checkAndMarkPointer(visitor, ptr); | 569 Heap::checkAndMarkPointer(visitor, ptr); |
557 visitAsanFakeStackForPointer(visitor, ptr); | 570 visitAsanFakeStackForPointer(visitor, ptr); |
558 } | 571 } |
559 } | 572 } |
560 | 573 |
561 void ThreadState::visitPersistents(Visitor* visitor) | 574 void ThreadState::visitPersistents(Visitor* visitor) |
562 { | 575 { |
563 m_persistents->trace(visitor); | 576 m_persistents->trace(visitor); |
577 WrapperPersistentRegion::trace(m_liveWrapperPersistents, visitor); | |
564 } | 578 } |
565 | 579 |
566 bool ThreadState::checkAndMarkPointer(Visitor* visitor, Address address) | 580 bool ThreadState::checkAndMarkPointer(Visitor* visitor, Address address) |
567 { | 581 { |
568 // If thread is terminating ignore conservative pointers. | 582 // If thread is terminating ignore conservative pointers. |
569 if (m_isTerminating) | 583 if (m_isTerminating) |
570 return false; | 584 return false; |
571 | 585 |
572 // This checks for normal pages and for large objects which span the extent | 586 // This checks for normal pages and for large objects which span the extent |
573 // of several normal pages. | 587 // of several normal pages. |
(...skipping 99 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
673 { | 687 { |
674 CallbackStack::Item* slot = m_weakCallbackStack->allocateEntry(&m_weakCallba ckStack); | 688 CallbackStack::Item* slot = m_weakCallbackStack->allocateEntry(&m_weakCallba ckStack); |
675 *slot = CallbackStack::Item(object, callback); | 689 *slot = CallbackStack::Item(object, callback); |
676 } | 690 } |
677 | 691 |
678 bool ThreadState::popAndInvokeWeakPointerCallback(Visitor* visitor) | 692 bool ThreadState::popAndInvokeWeakPointerCallback(Visitor* visitor) |
679 { | 693 { |
680 return m_weakCallbackStack->popAndInvokeCallback<WeaknessProcessing>(&m_weak CallbackStack, visitor); | 694 return m_weakCallbackStack->popAndInvokeCallback<WeaknessProcessing>(&m_weak CallbackStack, visitor); |
681 } | 695 } |
682 | 696 |
697 WrapperPersistentRegion* ThreadState::addWrapperPersistentRegion() | |
haraken
2014/09/02 14:53:51
takeWrapperPersistentRegion & freeWrapperPersisten
wibling-chromium
2014/09/03 07:51:42
Done.
| |
698 { | |
699 WrapperPersistentRegion* region; | |
700 if (m_pooledWrapperPersistentRegionCount) { | |
701 region = WrapperPersistentRegion::removeHead(&m_pooledWrapperPersistents ); | |
702 m_pooledWrapperPersistentRegionCount--; | |
703 } else { | |
704 region = new WrapperPersistentRegion(); | |
705 } | |
706 ASSERT(region); | |
707 WrapperPersistentRegion::insertHead(&m_liveWrapperPersistents, region); | |
708 return region; | |
709 } | |
710 | |
711 void ThreadState::removeWrapperPersistentRegion(WrapperPersistentRegion* region) | |
712 { | |
713 if (!region->removeIfNotLast(&m_liveWrapperPersistents)) | |
714 return; | |
715 | |
716 // Region was removed, ie. it was not the last region in the list. | |
717 if (m_pooledWrapperPersistentRegionCount < MaxPooledWrapperPersistentRegionC ount) { | |
718 WrapperPersistentRegion::insertHead(&m_pooledWrapperPersistents, region) ; | |
719 m_pooledWrapperPersistentRegionCount++; | |
720 } else { | |
721 delete region; | |
722 } | |
723 } | |
724 | |
683 PersistentNode* ThreadState::globalRoots() | 725 PersistentNode* ThreadState::globalRoots() |
684 { | 726 { |
685 AtomicallyInitializedStatic(PersistentNode*, anchor = new PersistentAnchor); | 727 AtomicallyInitializedStatic(PersistentNode*, anchor = new PersistentAnchor); |
686 return anchor; | 728 return anchor; |
687 } | 729 } |
688 | 730 |
689 Mutex& ThreadState::globalRootsMutex() | 731 Mutex& ThreadState::globalRootsMutex() |
690 { | 732 { |
691 AtomicallyInitializedStatic(Mutex&, mutex = *new Mutex); | 733 AtomicallyInitializedStatic(Mutex&, mutex = *new Mutex); |
692 return mutex; | 734 return mutex; |
(...skipping 501 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
1194 return gcInfo; | 1236 return gcInfo; |
1195 } | 1237 } |
1196 } | 1238 } |
1197 if (needLockForIteration) | 1239 if (needLockForIteration) |
1198 threadAttachMutex().unlock(); | 1240 threadAttachMutex().unlock(); |
1199 return 0; | 1241 return 0; |
1200 } | 1242 } |
1201 #endif | 1243 #endif |
1202 | 1244 |
1203 } | 1245 } |
OLD | NEW |