Chromium Code Reviews| OLD | NEW |
|---|---|
| 1 /* | 1 /* |
| 2 * Copyright (C) 2013 Google Inc. All rights reserved. | 2 * Copyright (C) 2013 Google Inc. All rights reserved. |
| 3 * | 3 * |
| 4 * Redistribution and use in source and binary forms, with or without | 4 * Redistribution and use in source and binary forms, with or without |
| 5 * modification, are permitted provided that the following conditions are | 5 * modification, are permitted provided that the following conditions are |
| 6 * met: | 6 * met: |
| 7 * | 7 * |
| 8 * * Redistributions of source code must retain the above copyright | 8 * * Redistributions of source code must retain the above copyright |
| 9 * notice, this list of conditions and the following disclaimer. | 9 * notice, this list of conditions and the following disclaimer. |
| 10 * * Redistributions in binary form must reproduce the above | 10 * * Redistributions in binary form must reproduce the above |
| (...skipping 247 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 258 barrier->doEnterSafePoint(state, stackEnd); | 258 barrier->doEnterSafePoint(state, stackEnd); |
| 259 } | 259 } |
| 260 | 260 |
| 261 volatile int m_canResume; | 261 volatile int m_canResume; |
| 262 volatile int m_unparkedThreadCount; | 262 volatile int m_unparkedThreadCount; |
| 263 Mutex m_mutex; | 263 Mutex m_mutex; |
| 264 ThreadCondition m_parked; | 264 ThreadCondition m_parked; |
| 265 ThreadCondition m_resume; | 265 ThreadCondition m_resume; |
| 266 }; | 266 }; |
| 267 | 267 |
| 268 | |
| 269 BaseHeapPage::BaseHeapPage(PageMemory* storage, const GCInfo* gcInfo, ThreadStat e* state) | 268 BaseHeapPage::BaseHeapPage(PageMemory* storage, const GCInfo* gcInfo, ThreadStat e* state) |
| 270 : m_storage(storage) | 269 : m_storage(storage) |
| 271 , m_gcInfo(gcInfo) | 270 , m_gcInfo(gcInfo) |
| 272 , m_threadState(state) | 271 , m_threadState(state) |
| 273 , m_terminating(false) | 272 , m_terminating(false) |
| 274 , m_tracedAfterOrphaned(false) | 273 , m_tracedAfterOrphaned(false) |
| 275 { | 274 { |
| 276 ASSERT(isPageHeaderAddress(reinterpret_cast<Address>(this))); | 275 ASSERT(isPageHeaderAddress(reinterpret_cast<Address>(this))); |
| 277 } | 276 } |
| 278 | 277 |
| 279 // Statically unfold the heap initialization loop so the compiler statically | 278 // Statically unfold the heap initialization loop so the compiler statically |
| 280 // knows the heap index when using HeapIndexTrait. | 279 // knows the heap index when using HeapIndexTrait. |
| 281 template<int num> struct InitializeHeaps { | 280 template<int num> struct InitializeHeaps { |
| 282 static const int index = num - 1; | 281 static const int index = num - 1; |
| 283 static void init(BaseHeap** heaps, ThreadState* state) | 282 static void init(BaseHeap** heaps, ThreadState* state) |
| 284 { | 283 { |
| 285 InitializeHeaps<index>::init(heaps, state); | 284 InitializeHeaps<index>::init(heaps, state); |
| 286 heaps[index] = new typename HeapIndexTrait<index>::HeapType(state, index ); | 285 heaps[index] = new typename HeapIndexTrait<index>::HeapType(state, index ); |
| 287 } | 286 } |
| 288 }; | 287 }; |
| 289 template<> struct InitializeHeaps<0> { | 288 template<> struct InitializeHeaps<0> { |
| 290 static void init(BaseHeap** heaps, ThreadState* state) { } | 289 static void init(BaseHeap** heaps, ThreadState* state) { } |
| 291 }; | 290 }; |
| 292 | 291 |
| 293 ThreadState::ThreadState() | 292 ThreadState::ThreadState() |
| 294 : m_thread(currentThread()) | 293 : m_thread(currentThread()) |
| 294 , m_wrapperPersistents(new WrapperPersistentRegion()) | |
| 295 , m_wrapperPersistentRegionPool(0) | |
| 296 , m_wrapperPersistentRegionPoolSize(0) | |
| 295 , m_persistents(adoptPtr(new PersistentAnchor())) | 297 , m_persistents(adoptPtr(new PersistentAnchor())) |
| 296 , m_startOfStack(reinterpret_cast<intptr_t*>(getStackStart())) | 298 , m_startOfStack(reinterpret_cast<intptr_t*>(getStackStart())) |
| 297 , m_endOfStack(reinterpret_cast<intptr_t*>(getStackStart())) | 299 , m_endOfStack(reinterpret_cast<intptr_t*>(getStackStart())) |
| 298 , m_safePointScopeMarker(0) | 300 , m_safePointScopeMarker(0) |
| 299 , m_atSafePoint(false) | 301 , m_atSafePoint(false) |
| 300 , m_interruptors() | 302 , m_interruptors() |
| 301 , m_gcRequested(false) | 303 , m_gcRequested(false) |
| 302 , m_forcePreciseGCForTesting(false) | 304 , m_forcePreciseGCForTesting(false) |
| 303 , m_sweepRequested(0) | 305 , m_sweepRequested(0) |
| 304 , m_sweepInProgress(false) | 306 , m_sweepInProgress(false) |
| (...skipping 18 matching lines...) Expand all Loading... | |
| 323 m_sweeperThread = adoptPtr(blink::Platform::current()->createThread("Bli nk GC Sweeper")); | 325 m_sweeperThread = adoptPtr(blink::Platform::current()->createThread("Bli nk GC Sweeper")); |
| 324 } | 326 } |
| 325 | 327 |
| 326 ThreadState::~ThreadState() | 328 ThreadState::~ThreadState() |
| 327 { | 329 { |
| 328 checkThread(); | 330 checkThread(); |
| 329 CallbackStack::shutdown(&m_weakCallbackStack); | 331 CallbackStack::shutdown(&m_weakCallbackStack); |
| 330 for (int i = 0; i < NumberOfHeaps; i++) | 332 for (int i = 0; i < NumberOfHeaps; i++) |
| 331 delete m_heaps[i]; | 333 delete m_heaps[i]; |
| 332 deleteAllValues(m_interruptors); | 334 deleteAllValues(m_interruptors); |
| 335 while (m_wrapperPersistents) { | |
|
Mads Ager (chromium)
2014/09/01 13:52:46
How about regions that are in the pool? Should we
wibling-chromium
2014/09/02 11:19:38
That seems like a good idea:) Fixed.
| |
| 336 WrapperPersistentRegion* region = WrapperPersistentRegion::removeHead(&m _wrapperPersistents); | |
| 337 delete region; | |
| 338 } | |
| 333 **s_threadSpecific = 0; | 339 **s_threadSpecific = 0; |
| 334 } | 340 } |
| 335 | 341 |
| 336 void ThreadState::init() | 342 void ThreadState::init() |
| 337 { | 343 { |
| 338 s_threadSpecific = new WTF::ThreadSpecific<ThreadState*>(); | 344 s_threadSpecific = new WTF::ThreadSpecific<ThreadState*>(); |
| 339 s_safePointBarrier = new SafePointBarrier; | 345 s_safePointBarrier = new SafePointBarrier; |
| 340 } | 346 } |
| 341 | 347 |
| 342 void ThreadState::shutdown() | 348 void ThreadState::shutdown() |
| (...skipping 211 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 554 __msan_unpoison(&ptr, sizeof(ptr)); | 560 __msan_unpoison(&ptr, sizeof(ptr)); |
| 555 #endif | 561 #endif |
| 556 Heap::checkAndMarkPointer(visitor, ptr); | 562 Heap::checkAndMarkPointer(visitor, ptr); |
| 557 visitAsanFakeStackForPointer(visitor, ptr); | 563 visitAsanFakeStackForPointer(visitor, ptr); |
| 558 } | 564 } |
| 559 } | 565 } |
| 560 | 566 |
| 561 void ThreadState::visitPersistents(Visitor* visitor) | 567 void ThreadState::visitPersistents(Visitor* visitor) |
| 562 { | 568 { |
| 563 m_persistents->trace(visitor); | 569 m_persistents->trace(visitor); |
| 570 m_wrapperPersistents->trace(visitor); | |
| 564 } | 571 } |
| 565 | 572 |
| 566 bool ThreadState::checkAndMarkPointer(Visitor* visitor, Address address) | 573 bool ThreadState::checkAndMarkPointer(Visitor* visitor, Address address) |
| 567 { | 574 { |
| 568 // If thread is terminating ignore conservative pointers. | 575 // If thread is terminating ignore conservative pointers. |
| 569 if (m_isTerminating) | 576 if (m_isTerminating) |
| 570 return false; | 577 return false; |
| 571 | 578 |
| 572 // This checks for normal pages and for large objects which span the extent | 579 // This checks for normal pages and for large objects which span the extent |
| 573 // of several normal pages. | 580 // of several normal pages. |
| (...skipping 99 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 673 { | 680 { |
| 674 CallbackStack::Item* slot = m_weakCallbackStack->allocateEntry(&m_weakCallba ckStack); | 681 CallbackStack::Item* slot = m_weakCallbackStack->allocateEntry(&m_weakCallba ckStack); |
| 675 *slot = CallbackStack::Item(object, callback); | 682 *slot = CallbackStack::Item(object, callback); |
| 676 } | 683 } |
| 677 | 684 |
| 678 bool ThreadState::popAndInvokeWeakPointerCallback(Visitor* visitor) | 685 bool ThreadState::popAndInvokeWeakPointerCallback(Visitor* visitor) |
| 679 { | 686 { |
| 680 return m_weakCallbackStack->popAndInvokeCallback<WeaknessProcessing>(&m_weak CallbackStack, visitor); | 687 return m_weakCallbackStack->popAndInvokeCallback<WeaknessProcessing>(&m_weak CallbackStack, visitor); |
| 681 } | 688 } |
| 682 | 689 |
| 690 WrapperPersistentRegion* ThreadState::addWrapperPersistentRegion() | |
| 691 { | |
| 692 WrapperPersistentRegion* region; | |
| 693 if (m_wrapperPersistentRegionPoolSize) { | |
| 694 region = WrapperPersistentRegion::removeHead(&m_wrapperPersistentRegionP ool); | |
| 695 m_wrapperPersistentRegionPoolSize--; | |
| 696 } else { | |
| 697 region = new WrapperPersistentRegion(); | |
| 698 } | |
| 699 ASSERT(region); | |
| 700 WrapperPersistentRegion::insertHead(&m_wrapperPersistents, region); | |
| 701 return region; | |
| 702 } | |
| 703 | |
| 704 void ThreadState::removeWrapperPersistentRegion(WrapperPersistentRegion* region) | |
| 705 { | |
| 706 ASSERT(!region->count()); | |
| 707 if (!region->removeIfNotLast(&m_wrapperPersistents)) | |
| 708 return; | |
| 709 | |
| 710 // Region was removed, ie. it is was not the last region in the list. | |
|
haraken
2014/09/02 05:22:06
it is was => it was
wibling-chromium
2014/09/02 11:19:38
Done.
| |
| 711 if (m_wrapperPersistentRegionPoolSize < 2) { | |
|
haraken
2014/09/02 05:22:06
What does the '< 2' check mean?
wibling-chromium
2014/09/02 11:19:38
It means we will only keep at most two regions in
| |
| 712 WrapperPersistentRegion::insertHead(&m_wrapperPersistentRegionPool, regi on); | |
| 713 m_wrapperPersistentRegionPoolSize++; | |
| 714 } else { | |
| 715 delete region; | |
| 716 } | |
| 717 } | |
| 718 | |
| 683 PersistentNode* ThreadState::globalRoots() | 719 PersistentNode* ThreadState::globalRoots() |
| 684 { | 720 { |
| 685 AtomicallyInitializedStatic(PersistentNode*, anchor = new PersistentAnchor); | 721 AtomicallyInitializedStatic(PersistentNode*, anchor = new PersistentAnchor); |
| 686 return anchor; | 722 return anchor; |
| 687 } | 723 } |
| 688 | 724 |
| 689 Mutex& ThreadState::globalRootsMutex() | 725 Mutex& ThreadState::globalRootsMutex() |
| 690 { | 726 { |
| 691 AtomicallyInitializedStatic(Mutex&, mutex = *new Mutex); | 727 AtomicallyInitializedStatic(Mutex&, mutex = *new Mutex); |
| 692 return mutex; | 728 return mutex; |
| (...skipping 501 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 1194 return gcInfo; | 1230 return gcInfo; |
| 1195 } | 1231 } |
| 1196 } | 1232 } |
| 1197 if (needLockForIteration) | 1233 if (needLockForIteration) |
| 1198 threadAttachMutex().unlock(); | 1234 threadAttachMutex().unlock(); |
| 1199 return 0; | 1235 return 0; |
| 1200 } | 1236 } |
| 1201 #endif | 1237 #endif |
| 1202 | 1238 |
| 1203 } | 1239 } |
| OLD | NEW |