Chromium Code Reviews| OLD | NEW |
|---|---|
| 1 /* | 1 /* |
| 2 * Copyright (C) 2013 Google Inc. All rights reserved. | 2 * Copyright (C) 2013 Google Inc. All rights reserved. |
| 3 * | 3 * |
| 4 * Redistribution and use in source and binary forms, with or without | 4 * Redistribution and use in source and binary forms, with or without |
| 5 * modification, are permitted provided that the following conditions are | 5 * modification, are permitted provided that the following conditions are |
| 6 * met: | 6 * met: |
| 7 * | 7 * |
| 8 * * Redistributions of source code must retain the above copyright | 8 * * Redistributions of source code must retain the above copyright |
| 9 * notice, this list of conditions and the following disclaimer. | 9 * notice, this list of conditions and the following disclaimer. |
| 10 * * Redistributions in binary form must reproduce the above | 10 * * Redistributions in binary form must reproduce the above |
| (...skipping 247 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 258 barrier->doEnterSafePoint(state, stackEnd); | 258 barrier->doEnterSafePoint(state, stackEnd); |
| 259 } | 259 } |
| 260 | 260 |
| 261 volatile int m_canResume; | 261 volatile int m_canResume; |
| 262 volatile int m_unparkedThreadCount; | 262 volatile int m_unparkedThreadCount; |
| 263 Mutex m_mutex; | 263 Mutex m_mutex; |
| 264 ThreadCondition m_parked; | 264 ThreadCondition m_parked; |
| 265 ThreadCondition m_resume; | 265 ThreadCondition m_resume; |
| 266 }; | 266 }; |
| 267 | 267 |
| 268 | |
| 269 BaseHeapPage::BaseHeapPage(PageMemory* storage, const GCInfo* gcInfo, ThreadStat e* state) | 268 BaseHeapPage::BaseHeapPage(PageMemory* storage, const GCInfo* gcInfo, ThreadStat e* state) |
| 270 : m_storage(storage) | 269 : m_storage(storage) |
| 271 , m_gcInfo(gcInfo) | 270 , m_gcInfo(gcInfo) |
| 272 , m_threadState(state) | 271 , m_threadState(state) |
| 273 , m_terminating(false) | 272 , m_terminating(false) |
| 274 , m_tracedAfterOrphaned(false) | 273 , m_tracedAfterOrphaned(false) |
| 275 { | 274 { |
| 276 ASSERT(isPageHeaderAddress(reinterpret_cast<Address>(this))); | 275 ASSERT(isPageHeaderAddress(reinterpret_cast<Address>(this))); |
| 277 } | 276 } |
| 278 | 277 |
| 279 // Statically unfold the heap initialization loop so the compiler statically | 278 // Statically unfold the heap initialization loop so the compiler statically |
| 280 // knows the heap index when using HeapIndexTrait. | 279 // knows the heap index when using HeapIndexTrait. |
| 281 template<int num> struct InitializeHeaps { | 280 template<int num> struct InitializeHeaps { |
| 282 static const int index = num - 1; | 281 static const int index = num - 1; |
| 283 static void init(BaseHeap** heaps, ThreadState* state) | 282 static void init(BaseHeap** heaps, ThreadState* state) |
| 284 { | 283 { |
| 285 InitializeHeaps<index>::init(heaps, state); | 284 InitializeHeaps<index>::init(heaps, state); |
| 286 heaps[index] = new typename HeapIndexTrait<index>::HeapType(state, index ); | 285 heaps[index] = new typename HeapIndexTrait<index>::HeapType(state, index ); |
| 287 } | 286 } |
| 288 }; | 287 }; |
| 289 template<> struct InitializeHeaps<0> { | 288 template<> struct InitializeHeaps<0> { |
| 290 static void init(BaseHeap** heaps, ThreadState* state) { } | 289 static void init(BaseHeap** heaps, ThreadState* state) { } |
| 291 }; | 290 }; |
| 292 | 291 |
| 293 ThreadState::ThreadState() | 292 ThreadState::ThreadState() |
| 294 : m_thread(currentThread()) | 293 : m_thread(currentThread()) |
| 294 , m_liveWrapperPersistents(new WrapperPersistentRegion()) | |
| 295 , m_pooledWrapperPersistents(0) | |
| 296 , m_numPooledWrapperPersistentRegions(0) | |
| 295 , m_persistents(adoptPtr(new PersistentAnchor())) | 297 , m_persistents(adoptPtr(new PersistentAnchor())) |
| 296 , m_startOfStack(reinterpret_cast<intptr_t*>(getStackStart())) | 298 , m_startOfStack(reinterpret_cast<intptr_t*>(getStackStart())) |
| 297 , m_endOfStack(reinterpret_cast<intptr_t*>(getStackStart())) | 299 , m_endOfStack(reinterpret_cast<intptr_t*>(getStackStart())) |
| 298 , m_safePointScopeMarker(0) | 300 , m_safePointScopeMarker(0) |
| 299 , m_atSafePoint(false) | 301 , m_atSafePoint(false) |
| 300 , m_interruptors() | 302 , m_interruptors() |
| 301 , m_gcRequested(false) | 303 , m_gcRequested(false) |
| 302 , m_forcePreciseGCForTesting(false) | 304 , m_forcePreciseGCForTesting(false) |
| 303 , m_sweepRequested(0) | 305 , m_sweepRequested(0) |
| 304 , m_sweepInProgress(false) | 306 , m_sweepInProgress(false) |
| (...skipping 18 matching lines...) Expand all Loading... | |
| 323 m_sweeperThread = adoptPtr(blink::Platform::current()->createThread("Bli nk GC Sweeper")); | 325 m_sweeperThread = adoptPtr(blink::Platform::current()->createThread("Bli nk GC Sweeper")); |
| 324 } | 326 } |
| 325 | 327 |
| 326 ThreadState::~ThreadState() | 328 ThreadState::~ThreadState() |
| 327 { | 329 { |
| 328 checkThread(); | 330 checkThread(); |
| 329 CallbackStack::shutdown(&m_weakCallbackStack); | 331 CallbackStack::shutdown(&m_weakCallbackStack); |
| 330 for (int i = 0; i < NumberOfHeaps; i++) | 332 for (int i = 0; i < NumberOfHeaps; i++) |
| 331 delete m_heaps[i]; | 333 delete m_heaps[i]; |
| 332 deleteAllValues(m_interruptors); | 334 deleteAllValues(m_interruptors); |
| 335 while (m_liveWrapperPersistents) { | |
| 336 WrapperPersistentRegion* region = WrapperPersistentRegion::removeHead(&m _liveWrapperPersistents); | |
| 337 delete region; | |
| 338 } | |
| 339 while (m_pooledWrapperPersistents) { | |
| 340 WrapperPersistentRegion* region = WrapperPersistentRegion::removeHead(&m _pooledWrapperPersistents); | |
| 341 delete region; | |
| 342 } | |
| 333 **s_threadSpecific = 0; | 343 **s_threadSpecific = 0; |
| 334 } | 344 } |
| 335 | 345 |
| 336 void ThreadState::init() | 346 void ThreadState::init() |
| 337 { | 347 { |
| 338 s_threadSpecific = new WTF::ThreadSpecific<ThreadState*>(); | 348 s_threadSpecific = new WTF::ThreadSpecific<ThreadState*>(); |
| 339 s_safePointBarrier = new SafePointBarrier; | 349 s_safePointBarrier = new SafePointBarrier; |
| 340 } | 350 } |
| 341 | 351 |
| 342 void ThreadState::shutdown() | 352 void ThreadState::shutdown() |
| (...skipping 211 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 554 __msan_unpoison(&ptr, sizeof(ptr)); | 564 __msan_unpoison(&ptr, sizeof(ptr)); |
| 555 #endif | 565 #endif |
| 556 Heap::checkAndMarkPointer(visitor, ptr); | 566 Heap::checkAndMarkPointer(visitor, ptr); |
| 557 visitAsanFakeStackForPointer(visitor, ptr); | 567 visitAsanFakeStackForPointer(visitor, ptr); |
| 558 } | 568 } |
| 559 } | 569 } |
| 560 | 570 |
| 561 void ThreadState::visitPersistents(Visitor* visitor) | 571 void ThreadState::visitPersistents(Visitor* visitor) |
| 562 { | 572 { |
| 563 m_persistents->trace(visitor); | 573 m_persistents->trace(visitor); |
| 574 WrapperPersistentRegion::trace(m_liveWrapperPersistents, visitor); | |
| 564 } | 575 } |
| 565 | 576 |
| 566 bool ThreadState::checkAndMarkPointer(Visitor* visitor, Address address) | 577 bool ThreadState::checkAndMarkPointer(Visitor* visitor, Address address) |
| 567 { | 578 { |
| 568 // If thread is terminating ignore conservative pointers. | 579 // If thread is terminating ignore conservative pointers. |
| 569 if (m_isTerminating) | 580 if (m_isTerminating) |
| 570 return false; | 581 return false; |
| 571 | 582 |
| 572 // This checks for normal pages and for large objects which span the extent | 583 // This checks for normal pages and for large objects which span the extent |
| 573 // of several normal pages. | 584 // of several normal pages. |
| (...skipping 99 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 673 { | 684 { |
| 674 CallbackStack::Item* slot = m_weakCallbackStack->allocateEntry(&m_weakCallba ckStack); | 685 CallbackStack::Item* slot = m_weakCallbackStack->allocateEntry(&m_weakCallba ckStack); |
| 675 *slot = CallbackStack::Item(object, callback); | 686 *slot = CallbackStack::Item(object, callback); |
| 676 } | 687 } |
| 677 | 688 |
| 678 bool ThreadState::popAndInvokeWeakPointerCallback(Visitor* visitor) | 689 bool ThreadState::popAndInvokeWeakPointerCallback(Visitor* visitor) |
| 679 { | 690 { |
| 680 return m_weakCallbackStack->popAndInvokeCallback<WeaknessProcessing>(&m_weak CallbackStack, visitor); | 691 return m_weakCallbackStack->popAndInvokeCallback<WeaknessProcessing>(&m_weak CallbackStack, visitor); |
| 681 } | 692 } |
| 682 | 693 |
| 694 WrapperPersistentRegion* ThreadState::addWrapperPersistentRegion() | |
| 695 { | |
| 696 WrapperPersistentRegion* region; | |
| 697 if (m_numPooledWrapperPersistentRegions) { | |
| 698 region = WrapperPersistentRegion::removeHead(&m_pooledWrapperPersistents ); | |
| 699 m_numPooledWrapperPersistentRegions--; | |
| 700 } else { | |
| 701 region = new WrapperPersistentRegion(); | |
| 702 } | |
| 703 ASSERT(region); | |
| 704 WrapperPersistentRegion::insertHead(&m_liveWrapperPersistents, region); | |
| 705 return region; | |
| 706 } | |
| 707 | |
| 708 void ThreadState::removeWrapperPersistentRegion(WrapperPersistentRegion* region) | |
| 709 { | |
| 710 if (!region->removeIfNotLast(&m_liveWrapperPersistents)) | |
|
haraken
2014/09/02 13:00:37
Can we avoid introducing removeIfNotLast and clean
wibling-chromium
2014/09/02 13:55:33
No, I did try having an anchor and it doesn't simp
| |
| 711 return; | |
| 712 | |
| 713 // Region was removed, ie. it was not the last region in the list. | |
| 714 if (m_numPooledWrapperPersistentRegions < 2) { | |
|
haraken
2014/09/02 13:00:37
Add a comment about '< 2' or use a static constant
wibling-chromium
2014/09/02 13:55:33
Added a constant.
| |
| 715 WrapperPersistentRegion::insertHead(&m_pooledWrapperPersistents, region) ; | |
| 716 m_numPooledWrapperPersistentRegions++; | |
| 717 } else { | |
| 718 delete region; | |
| 719 } | |
| 720 } | |
| 721 | |
| 683 PersistentNode* ThreadState::globalRoots() | 722 PersistentNode* ThreadState::globalRoots() |
| 684 { | 723 { |
| 685 AtomicallyInitializedStatic(PersistentNode*, anchor = new PersistentAnchor); | 724 AtomicallyInitializedStatic(PersistentNode*, anchor = new PersistentAnchor); |
| 686 return anchor; | 725 return anchor; |
| 687 } | 726 } |
| 688 | 727 |
| 689 Mutex& ThreadState::globalRootsMutex() | 728 Mutex& ThreadState::globalRootsMutex() |
| 690 { | 729 { |
| 691 AtomicallyInitializedStatic(Mutex&, mutex = *new Mutex); | 730 AtomicallyInitializedStatic(Mutex&, mutex = *new Mutex); |
| 692 return mutex; | 731 return mutex; |
| (...skipping 501 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 1194 return gcInfo; | 1233 return gcInfo; |
| 1195 } | 1234 } |
| 1196 } | 1235 } |
| 1197 if (needLockForIteration) | 1236 if (needLockForIteration) |
| 1198 threadAttachMutex().unlock(); | 1237 threadAttachMutex().unlock(); |
| 1199 return 0; | 1238 return 0; |
| 1200 } | 1239 } |
| 1201 #endif | 1240 #endif |
| 1202 | 1241 |
| 1203 } | 1242 } |
| OLD | NEW |