Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(434)

Side by Side Diff: Source/platform/heap/ThreadState.cpp

Issue 371623002: [oilpan]: Make thread shutdown more robust. (Closed) Base URL: svn://svn.chromium.org/blink/trunk
Patch Set: review feedback Created 6 years, 5 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
OLDNEW
1 /* 1 /*
2 * Copyright (C) 2013 Google Inc. All rights reserved. 2 * Copyright (C) 2013 Google Inc. All rights reserved.
3 * 3 *
4 * Redistribution and use in source and binary forms, with or without 4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions are 5 * modification, are permitted provided that the following conditions are
6 * met: 6 * met:
7 * 7 *
8 * * Redistributions of source code must retain the above copyright 8 * * Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer. 9 * notice, this list of conditions and the following disclaimer.
10 * * Redistributions in binary form must reproduce the above 10 * * Redistributions in binary form must reproduce the above
(...skipping 85 matching lines...) Expand 10 before | Expand all | Expand 10 after
96 uint8_t ThreadState::s_mainThreadStateStorage[sizeof(ThreadState)]; 96 uint8_t ThreadState::s_mainThreadStateStorage[sizeof(ThreadState)];
97 SafePointBarrier* ThreadState::s_safePointBarrier = 0; 97 SafePointBarrier* ThreadState::s_safePointBarrier = 0;
98 bool ThreadState::s_inGC = false; 98 bool ThreadState::s_inGC = false;
99 99
100 static Mutex& threadAttachMutex() 100 static Mutex& threadAttachMutex()
101 { 101 {
102 AtomicallyInitializedStatic(Mutex&, mutex = *new Mutex); 102 AtomicallyInitializedStatic(Mutex&, mutex = *new Mutex);
103 return mutex; 103 return mutex;
104 } 104 }
105 105
106 // The threadShutdownMutex is used to synchronize thread shutdown
107 // since the thread local GC, as of now, cannot run in parallel
108 // with other thread local GCs since it is using the global marking
109 // stack. It can also not run in parallel with a global GC, but
110 // that is honored by not entering a safepoint while doing the
111 // thread local GC, meaning a request for a global GC would time
112 // out.
haraken 2014/07/09 05:17:48 Probably it might be a good idea to add NoSafePoin
wibling-chromium 2014/07/09 10:32:31 We already have an ASSERT ensuring that we cannot
113 static Mutex& threadShutdownMutex()
114 {
115 AtomicallyInitializedStatic(Mutex&, mutex = *new Mutex);
116 return mutex;
117 }
118
106 static double lockingTimeout() 119 static double lockingTimeout()
107 { 120 {
108 // Wait time for parking all threads is at most 100 MS. 121 // Wait time for parking all threads is at most 100 MS.
109 return 0.100; 122 return 0.100;
110 } 123 }
111 124
112 125
113 typedef void (*PushAllRegistersCallback)(SafePointBarrier*, ThreadState*, intptr _t*); 126 typedef void (*PushAllRegistersCallback)(SafePointBarrier*, ThreadState*, intptr _t*);
114 extern "C" void pushAllRegisters(SafePointBarrier*, ThreadState*, PushAllRegiste rsCallback); 127 extern "C" void pushAllRegisters(SafePointBarrier*, ThreadState*, PushAllRegiste rsCallback);
115 128
(...skipping 138 matching lines...) Expand 10 before | Expand all | Expand 10 after
254 barrier->doEnterSafePoint(state, stackEnd); 267 barrier->doEnterSafePoint(state, stackEnd);
255 } 268 }
256 269
257 volatile int m_canResume; 270 volatile int m_canResume;
258 volatile int m_unparkedThreadCount; 271 volatile int m_unparkedThreadCount;
259 Mutex m_mutex; 272 Mutex m_mutex;
260 ThreadCondition m_parked; 273 ThreadCondition m_parked;
261 ThreadCondition m_resume; 274 ThreadCondition m_resume;
262 }; 275 };
263 276
277
278 BaseHeapPage::BaseHeapPage(PageMemory* storage, const GCInfo* gcInfo, ThreadStat e* state)
279 : m_storage(storage)
280 , m_gcInfo(gcInfo)
281 , m_threadState(state)
282 , m_shuttingDown(false)
283 , m_traced(false)
284 {
285 ASSERT(isPageHeaderAddress(reinterpret_cast<Address>(this)));
286 }
287
264 ThreadState::ThreadState() 288 ThreadState::ThreadState()
265 : m_thread(currentThread()) 289 : m_thread(currentThread())
266 , m_persistents(adoptPtr(new PersistentAnchor())) 290 , m_persistents(adoptPtr(new PersistentAnchor()))
267 , m_startOfStack(reinterpret_cast<intptr_t*>(getStackStart())) 291 , m_startOfStack(reinterpret_cast<intptr_t*>(getStackStart()))
268 , m_endOfStack(reinterpret_cast<intptr_t*>(getStackStart())) 292 , m_endOfStack(reinterpret_cast<intptr_t*>(getStackStart()))
269 , m_safePointScopeMarker(0) 293 , m_safePointScopeMarker(0)
270 , m_atSafePoint(false) 294 , m_atSafePoint(false)
271 , m_interruptors() 295 , m_interruptors()
272 , m_gcRequested(false) 296 , m_gcRequested(false)
273 , m_forcePreciseGCForTesting(false) 297 , m_forcePreciseGCForTesting(false)
274 , m_sweepRequested(0) 298 , m_sweepRequested(0)
275 , m_sweepInProgress(false) 299 , m_sweepInProgress(false)
276 , m_noAllocationCount(0) 300 , m_noAllocationCount(0)
277 , m_inGC(false) 301 , m_inGC(false)
278 , m_heapContainsCache(adoptPtr(new HeapContainsCache())) 302 , m_heapContainsCache(adoptPtr(new HeapContainsCache()))
279 , m_isCleaningUp(false) 303 , m_isCleaningUp(false)
280 #if defined(ADDRESS_SANITIZER) 304 #if defined(ADDRESS_SANITIZER)
281 , m_asanFakeStack(__asan_get_current_fake_stack()) 305 , m_asanFakeStack(__asan_get_current_fake_stack())
282 #endif 306 #endif
283 { 307 {
284 ASSERT(!**s_threadSpecific); 308 ASSERT(!**s_threadSpecific);
285 **s_threadSpecific = this; 309 **s_threadSpecific = this;
286 310
287 m_stats.clear(); 311 m_stats.clear();
288 m_statsAfterLastGC.clear(); 312 m_statsAfterLastGC.clear();
289 // First allocate the general heap, second iterate through to 313 // First allocate the general heap, second iterate through to
290 // allocate the type specific heaps 314 // allocate the type specific heaps
291 m_heaps[GeneralHeap] = new ThreadHeap<FinalizedHeapObjectHeader>(this); 315 m_heaps[GeneralHeap] = new ThreadHeap<FinalizedHeapObjectHeader>(this, Gener alHeap);
292 for (int i = GeneralHeap + 1; i < NumberOfHeaps; i++) 316 for (int i = GeneralHeap + 1; i < NumberOfHeaps; i++)
293 m_heaps[i] = new ThreadHeap<HeapObjectHeader>(this); 317 m_heaps[i] = new ThreadHeap<HeapObjectHeader>(this, i);
294 318
295 CallbackStack::init(&m_weakCallbackStack); 319 CallbackStack::init(&m_weakCallbackStack);
296 } 320 }
297 321
298 ThreadState::~ThreadState() 322 ThreadState::~ThreadState()
299 { 323 {
300 checkThread(); 324 checkThread();
301 CallbackStack::shutdown(&m_weakCallbackStack); 325 CallbackStack::shutdown(&m_weakCallbackStack);
302 for (int i = GeneralHeap; i < NumberOfHeaps; i++) 326 for (int i = GeneralHeap; i < NumberOfHeaps; i++)
303 delete m_heaps[i]; 327 delete m_heaps[i];
(...skipping 65 matching lines...) Expand 10 before | Expand all | Expand 10 after
369 393
370 void ThreadState::cleanup() 394 void ThreadState::cleanup()
371 { 395 {
372 // From here on ignore all conservatively discovered 396 // From here on ignore all conservatively discovered
373 // pointers into the heap owned by this thread. 397 // pointers into the heap owned by this thread.
374 m_isCleaningUp = true; 398 m_isCleaningUp = true;
375 399
376 for (size_t i = 0; i < m_cleanupTasks.size(); i++) 400 for (size_t i = 0; i < m_cleanupTasks.size(); i++)
377 m_cleanupTasks[i]->preCleanup(); 401 m_cleanupTasks[i]->preCleanup();
378 402
379 // After this GC we expect heap to be empty because 403 {
380 // preCleanup tasks should have cleared all persistent 404 // We enter a safepoint while waiting for the thread shutdown mutex.
381 // handles that were externally owned. 405 SafePointAwareMutexLocker locker(threadShutdownMutex(), NoHeapPointersOn Stack);
382 Heap::collectAllGarbage();
383 406
384 // Verify that all heaps are empty now. 407 // Set flags on all heap pages of this thread to ensure we don't trace
385 for (int i = 0; i < NumberOfHeaps; i++) 408 // pages on other threads.
386 m_heaps[i]->assertEmpty(); 409 setupHeapsForShutdown();
410
411 // Do thread local GC's as long as the count of thread local Persistents
412 // changes and is above zero.
413 PersistentAnchor* anchor = static_cast<PersistentAnchor*>(m_persistents. get());
414 int oldCount = 0;
415 int currentCount = anchor->numberOfPersistents();
416 while (currentCount > 0 && currentCount != oldCount) {
417 Heap::collectGarbageForTerminatingThread(this);
418 oldCount = currentCount;
419 currentCount = anchor->numberOfPersistents();
420 }
421 // We should not have any persistents left when getting to this point,
422 // if we have it is probably a bug so adding a debug ASSERT to catch thi s.
423 ASSERT(currentCount == 0);
haraken 2014/07/09 05:17:48 ASSERT(!currentCount);
wibling-chromium 2014/07/09 10:32:31 Done.
424
425 // Do a final GC to finalize any objects pointed to by persistents
426 // collected in the last round of GC above.
haraken 2014/07/09 05:17:48 Why do we need the final GC? Isn't it already guar
wibling-chromium 2014/07/09 10:32:31 We need it since when the count of persistents rea
427 Heap::collectGarbageForTerminatingThread(this);
428 }
387 429
388 for (size_t i = 0; i < m_cleanupTasks.size(); i++) 430 for (size_t i = 0; i < m_cleanupTasks.size(); i++)
389 m_cleanupTasks[i]->postCleanup(); 431 m_cleanupTasks[i]->postCleanup();
390 432
391 m_cleanupTasks.clear(); 433 m_cleanupTasks.clear();
392 } 434 }
393 435
394 void ThreadState::detach() 436 void ThreadState::detach()
395 { 437 {
396 ThreadState* state = current(); 438 ThreadState* state = current();
397 state->cleanup(); 439 state->cleanup();
398 440
399 // Enter a safe point before trying to acquire threadAttachMutex
400 // to avoid dead lock if another thread is preparing for GC, has acquired
401 // threadAttachMutex and waiting for other threads to pause or reach a
402 // safepoint.
403 if (!state->isAtSafePoint())
404 state->enterSafePointWithoutPointers();
405
406 { 441 {
407 MutexLocker locker(threadAttachMutex()); 442 // Enter a safe point while trying to acquire threadAttachMutex
408 state->leaveSafePoint(); 443 // to avoid dead lock if another thread is preparing for GC, has acquire d
444 // threadAttachMutex and waiting for other threads to pause or reach a
445 // safepoint.
446 SafePointAwareMutexLocker locker(threadAttachMutex(), NoHeapPointersOnSt ack);
409 ASSERT(attachedThreads().contains(state)); 447 ASSERT(attachedThreads().contains(state));
410 attachedThreads().remove(state); 448 attachedThreads().remove(state);
449 // Deleting the thread state also destroys the thread's heaps at which
450 // point all the thread's pages are added to the orphanedPagePool.
Mads Ager (chromium) 2014/07/11 05:48:06 This comment says that the pages are added to the
451 // Subsequently they are moved to the freePagePool once they are no
452 // longer traced by a global GC.
411 delete state; 453 delete state;
412 } 454 }
413 shutdownHeapIfNecessary(); 455 shutdownHeapIfNecessary();
414 } 456 }
415 457
416 void ThreadState::visitRoots(Visitor* visitor) 458 void ThreadState::visitRoots(Visitor* visitor)
417 { 459 {
418 { 460 {
419 // All threads are at safepoints so this is not strictly necessary. 461 // All threads are at safepoints so this is not strictly necessary.
420 // However we acquire the mutex to make mutation and traversal of this 462 // However we acquire the mutex to make mutation and traversal of this
421 // list symmetrical. 463 // list symmetrical.
422 MutexLocker locker(globalRootsMutex()); 464 MutexLocker locker(globalRootsMutex());
423 globalRoots()->trace(visitor); 465 globalRoots()->trace(visitor);
424 } 466 }
425 467
426 AttachedThreadStateSet& threads = attachedThreads(); 468 AttachedThreadStateSet& threads = attachedThreads();
427 for (AttachedThreadStateSet::iterator it = threads.begin(), end = threads.en d(); it != end; ++it) 469 for (AttachedThreadStateSet::iterator it = threads.begin(), end = threads.en d(); it != end; ++it)
428 (*it)->trace(visitor); 470 (*it)->trace(visitor);
429 } 471 }
430 472
473 void ThreadState::visitLocalRoots(Visitor* visitor)
474 {
475 // We assume there are no CrossThreadPersistents to this threads heap. If
476 // there is, it will be handled in the same way as any other cross-thread
477 // pointer pointing to an orphaned page.
haraken 2014/07/09 05:17:48 This comment is a bit misleading. We can read it a
wibling-chromium 2014/07/09 10:32:31 Done. Rephrased it a bit.
478 m_persistents->trace(visitor);
479 }
480
431 NO_SANITIZE_ADDRESS 481 NO_SANITIZE_ADDRESS
432 void ThreadState::visitAsanFakeStackForPointer(Visitor* visitor, Address ptr) 482 void ThreadState::visitAsanFakeStackForPointer(Visitor* visitor, Address ptr)
433 { 483 {
434 #if defined(ADDRESS_SANITIZER) 484 #if defined(ADDRESS_SANITIZER)
435 Address* start = reinterpret_cast<Address*>(m_startOfStack); 485 Address* start = reinterpret_cast<Address*>(m_startOfStack);
436 Address* end = reinterpret_cast<Address*>(m_endOfStack); 486 Address* end = reinterpret_cast<Address*>(m_endOfStack);
437 Address* fakeFrameStart = 0; 487 Address* fakeFrameStart = 0;
438 Address* fakeFrameEnd = 0; 488 Address* fakeFrameEnd = 0;
439 Address* maybeFakeFrame = reinterpret_cast<Address*>(ptr); 489 Address* maybeFakeFrame = reinterpret_cast<Address*>(ptr);
440 Address* realFrameForFakeFrame = 490 Address* realFrameForFakeFrame =
(...skipping 102 matching lines...) Expand 10 before | Expand all | Expand 10 after
543 #endif 593 #endif
544 594
545 void ThreadState::pushWeakObjectPointerCallback(void* object, WeakPointerCallbac k callback) 595 void ThreadState::pushWeakObjectPointerCallback(void* object, WeakPointerCallbac k callback)
546 { 596 {
547 CallbackStack::Item* slot = m_weakCallbackStack->allocateEntry(&m_weakCallba ckStack); 597 CallbackStack::Item* slot = m_weakCallbackStack->allocateEntry(&m_weakCallba ckStack);
548 *slot = CallbackStack::Item(object, callback); 598 *slot = CallbackStack::Item(object, callback);
549 } 599 }
550 600
551 bool ThreadState::popAndInvokeWeakPointerCallback(Visitor* visitor) 601 bool ThreadState::popAndInvokeWeakPointerCallback(Visitor* visitor)
552 { 602 {
553 return m_weakCallbackStack->popAndInvokeCallback(&m_weakCallbackStack, visit or); 603 return m_weakCallbackStack->popAndInvokeCallback<GlobalGC>(&m_weakCallbackSt ack, visitor);
554 } 604 }
555 605
556 PersistentNode* ThreadState::globalRoots() 606 PersistentNode* ThreadState::globalRoots()
557 { 607 {
558 AtomicallyInitializedStatic(PersistentNode*, anchor = new PersistentAnchor); 608 AtomicallyInitializedStatic(PersistentNode*, anchor = new PersistentAnchor);
559 return anchor; 609 return anchor;
560 } 610 }
561 611
562 Mutex& ThreadState::globalRootsMutex() 612 Mutex& ThreadState::globalRootsMutex()
563 { 613 {
(...skipping 117 matching lines...) Expand 10 before | Expand all | Expand 10 after
681 { 731 {
682 for (int i = 0; i < NumberOfHeaps; i++) 732 for (int i = 0; i < NumberOfHeaps; i++)
683 m_heaps[i]->makeConsistentForGC(); 733 m_heaps[i]->makeConsistentForGC();
684 } 734 }
685 735
686 void ThreadState::prepareForGC() 736 void ThreadState::prepareForGC()
687 { 737 {
688 for (int i = 0; i < NumberOfHeaps; i++) { 738 for (int i = 0; i < NumberOfHeaps; i++) {
689 BaseHeap* heap = m_heaps[i]; 739 BaseHeap* heap = m_heaps[i];
690 heap->makeConsistentForGC(); 740 heap->makeConsistentForGC();
691 // If there are parked threads with outstanding sweep requests, clear th eir mark bits. 741 // If a new GC is requested before this thread got around to sweep, ie. due to the
692 // This happens if a thread did not have time to wake up and sweep, 742 // thread doing a long running operation, we clear the mark bits and mar k any of
693 // before the next GC arrived. 743 // the dead objects as dead. The latter is used to ensure the next GC ma rking does
744 // not trace already dead objects. If we trace a dead object we could en d up tracing
745 // into garbage or the middle of another object via the newly conservati vely found
746 // object.
694 if (sweepRequested()) 747 if (sweepRequested())
695 heap->clearMarks(); 748 heap->clearLiveAndMarkDead();
696 } 749 }
697 setSweepRequested(); 750 setSweepRequested();
698 } 751 }
699 752
753 void ThreadState::setupHeapsForShutdown()
754 {
755 for (int i = 0; i < NumberOfHeaps; i++) {
756 BaseHeap* heap = m_heaps[i];
757 heap->prepareHeapForShutdown();
758 }
759 }
760
700 BaseHeapPage* ThreadState::heapPageFromAddress(Address address) 761 BaseHeapPage* ThreadState::heapPageFromAddress(Address address)
701 { 762 {
702 BaseHeapPage* cachedPage = heapContainsCache()->lookup(address); 763 BaseHeapPage* cachedPage = heapContainsCache()->lookup(address);
703 #ifdef NDEBUG 764 #ifdef NDEBUG
704 if (cachedPage) 765 if (cachedPage)
705 return cachedPage; 766 return cachedPage;
706 #endif 767 #endif
707 768
708 for (int i = 0; i < NumberOfHeaps; i++) { 769 for (int i = 0; i < NumberOfHeaps; i++) {
709 BaseHeapPage* page = m_heaps[i]->heapPageFromAddress(address); 770 BaseHeapPage* page = m_heaps[i]->heapPageFromAddress(address);
(...skipping 208 matching lines...) Expand 10 before | Expand all | Expand 10 after
918 threadAttachMutex().unlock(); 979 threadAttachMutex().unlock();
919 return gcInfo; 980 return gcInfo;
920 } 981 }
921 } 982 }
922 if (needLockForIteration) 983 if (needLockForIteration)
923 threadAttachMutex().unlock(); 984 threadAttachMutex().unlock();
924 return 0; 985 return 0;
925 } 986 }
926 #endif 987 #endif
927 } 988 }
OLDNEW

Powered by Google App Engine
This is Rietveld 408576698