Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(374)

Side by Side Diff: Source/platform/heap/ThreadState.cpp

Issue 371623002: [oilpan]: Make thread shutdown more robust. (Closed) Base URL: svn://svn.chromium.org/blink/trunk
Patch Set: Created 6 years, 5 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
OLDNEW
1 /* 1 /*
2 * Copyright (C) 2013 Google Inc. All rights reserved. 2 * Copyright (C) 2013 Google Inc. All rights reserved.
3 * 3 *
4 * Redistribution and use in source and binary forms, with or without 4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions are 5 * modification, are permitted provided that the following conditions are
6 * met: 6 * met:
7 * 7 *
8 * * Redistributions of source code must retain the above copyright 8 * * Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer. 9 * notice, this list of conditions and the following disclaimer.
10 * * Redistributions in binary form must reproduce the above 10 * * Redistributions in binary form must reproduce the above
(...skipping 84 matching lines...) Expand 10 before | Expand all | Expand 10 after
95 uint8_t ThreadState::s_mainThreadStateStorage[sizeof(ThreadState)]; 95 uint8_t ThreadState::s_mainThreadStateStorage[sizeof(ThreadState)];
96 SafePointBarrier* ThreadState::s_safePointBarrier = 0; 96 SafePointBarrier* ThreadState::s_safePointBarrier = 0;
97 bool ThreadState::s_inGC = false; 97 bool ThreadState::s_inGC = false;
98 98
99 static Mutex& threadAttachMutex() 99 static Mutex& threadAttachMutex()
100 { 100 {
101 AtomicallyInitializedStatic(Mutex&, mutex = *new Mutex); 101 AtomicallyInitializedStatic(Mutex&, mutex = *new Mutex);
102 return mutex; 102 return mutex;
103 } 103 }
104 104
105 // The threadShutdownMutex is used to synchronize thread shutdown
106 // since the thread local GC, as of now, cannot run in parallel
107 // with other thread local GCs since it is using the global marking
108 // stack. It can also not run in parallel with a global GC, but
109 // that is honored by not entering a safepoint while doing the
110 // thread local GC, meaning a request for a global GC would time
111 // out.
112 static Mutex& threadShutdownMutex()
113 {
114 AtomicallyInitializedStatic(Mutex&, mutex = *new Mutex);
115 return mutex;
116 }
117
105 static double lockingTimeout() 118 static double lockingTimeout()
106 { 119 {
107 // Wait time for parking all threads is at most 100 MS. 120 // Wait time for parking all threads is at most 100 MS.
108 return 0.100; 121 return 0.100;
109 } 122 }
110 123
111 124
112 typedef void (*PushAllRegistersCallback)(SafePointBarrier*, ThreadState*, intptr _t*); 125 typedef void (*PushAllRegistersCallback)(SafePointBarrier*, ThreadState*, intptr _t*);
113 extern "C" void pushAllRegisters(SafePointBarrier*, ThreadState*, PushAllRegiste rsCallback); 126 extern "C" void pushAllRegisters(SafePointBarrier*, ThreadState*, PushAllRegiste rsCallback);
114 127
(...skipping 138 matching lines...) Expand 10 before | Expand all | Expand 10 after
253 barrier->doEnterSafePoint(state, stackEnd); 266 barrier->doEnterSafePoint(state, stackEnd);
254 } 267 }
255 268
256 volatile int m_canResume; 269 volatile int m_canResume;
257 volatile int m_unparkedThreadCount; 270 volatile int m_unparkedThreadCount;
258 Mutex m_mutex; 271 Mutex m_mutex;
259 ThreadCondition m_parked; 272 ThreadCondition m_parked;
260 ThreadCondition m_resume; 273 ThreadCondition m_resume;
261 }; 274 };
262 275
276
277 BaseHeapPage::BaseHeapPage(PageMemory* storage, const GCInfo* gcInfo, ThreadStat e* state)
278 : m_storage(storage)
279 , m_gcInfo(gcInfo)
280 , m_threadState(state)
281 , m_padding(0)
282 {
283 ASSERT(isPageHeaderAddress(reinterpret_cast<Address>(this)));
284 }
285
263 ThreadState::ThreadState() 286 ThreadState::ThreadState()
264 : m_thread(currentThread()) 287 : m_thread(currentThread())
265 , m_persistents(adoptPtr(new PersistentAnchor())) 288 , m_persistents(adoptPtr(new PersistentAnchor()))
266 , m_startOfStack(reinterpret_cast<intptr_t*>(getStackStart())) 289 , m_startOfStack(reinterpret_cast<intptr_t*>(getStackStart()))
267 , m_endOfStack(reinterpret_cast<intptr_t*>(getStackStart())) 290 , m_endOfStack(reinterpret_cast<intptr_t*>(getStackStart()))
268 , m_safePointScopeMarker(0) 291 , m_safePointScopeMarker(0)
269 , m_atSafePoint(false) 292 , m_atSafePoint(false)
270 , m_interruptors() 293 , m_interruptors()
271 , m_gcRequested(false) 294 , m_gcRequested(false)
272 , m_forcePreciseGCForTesting(false) 295 , m_forcePreciseGCForTesting(false)
273 , m_sweepRequested(0) 296 , m_sweepRequested(0)
274 , m_sweepInProgress(false) 297 , m_sweepInProgress(false)
275 , m_noAllocationCount(0) 298 , m_noAllocationCount(0)
276 , m_inGC(false) 299 , m_inGC(false)
277 , m_heapContainsCache(adoptPtr(new HeapContainsCache())) 300 , m_heapContainsCache(adoptPtr(new HeapContainsCache()))
278 , m_isCleaningUp(false) 301 , m_isCleaningUp(false)
279 #if defined(ADDRESS_SANITIZER) 302 #if defined(ADDRESS_SANITIZER)
280 , m_asanFakeStack(__asan_get_current_fake_stack()) 303 , m_asanFakeStack(__asan_get_current_fake_stack())
281 #endif 304 #endif
282 { 305 {
283 ASSERT(!**s_threadSpecific); 306 ASSERT(!**s_threadSpecific);
284 **s_threadSpecific = this; 307 **s_threadSpecific = this;
285 308
286 m_stats.clear(); 309 m_stats.clear();
287 m_statsAfterLastGC.clear(); 310 m_statsAfterLastGC.clear();
288 // First allocate the general heap, second iterate through to 311 // First allocate the general heap, second iterate through to
289 // allocate the type specific heaps 312 // allocate the type specific heaps
290 m_heaps[GeneralHeap] = new ThreadHeap<FinalizedHeapObjectHeader>(this); 313 m_heaps[GeneralHeap] = new ThreadHeap<FinalizedHeapObjectHeader>(this, Gener alHeap);
291 for (int i = GeneralHeap + 1; i < NumberOfHeaps; i++) 314 for (int i = GeneralHeap + 1; i < NumberOfHeaps; i++)
292 m_heaps[i] = new ThreadHeap<HeapObjectHeader>(this); 315 m_heaps[i] = new ThreadHeap<HeapObjectHeader>(this, i);
293 316
294 CallbackStack::init(&m_weakCallbackStack); 317 CallbackStack::init(&m_weakCallbackStack);
295 } 318 }
296 319
297 ThreadState::~ThreadState() 320 ThreadState::~ThreadState()
298 { 321 {
299 checkThread(); 322 checkThread();
300 CallbackStack::shutdown(&m_weakCallbackStack); 323 CallbackStack::shutdown(&m_weakCallbackStack);
301 for (int i = GeneralHeap; i < NumberOfHeaps; i++) 324 for (int i = GeneralHeap; i < NumberOfHeaps; i++)
302 delete m_heaps[i]; 325 delete m_heaps[i];
(...skipping 65 matching lines...) Expand 10 before | Expand all | Expand 10 after
368 391
369 void ThreadState::cleanup() 392 void ThreadState::cleanup()
370 { 393 {
371 // From here on ignore all conservatively discovered 394 // From here on ignore all conservatively discovered
372 // pointers into the heap owned by this thread. 395 // pointers into the heap owned by this thread.
373 m_isCleaningUp = true; 396 m_isCleaningUp = true;
374 397
375 for (size_t i = 0; i < m_cleanupTasks.size(); i++) 398 for (size_t i = 0; i < m_cleanupTasks.size(); i++)
376 m_cleanupTasks[i]->preCleanup(); 399 m_cleanupTasks[i]->preCleanup();
377 400
378 // After this GC we expect heap to be empty because 401 {
379 // preCleanup tasks should have cleared all persistent 402 // We enter a safepoint while waiting for the thread shutdown mutex.
380 // handles that were externally owned. 403 SafePointAwareMutexLocker locker(threadShutdownMutex());
381 Heap::collectAllGarbage();
382 404
383 // Verify that all heaps are empty now. 405 // Set a flag on all this thread's heap pages to ensure we don't trace
384 for (int i = 0; i < NumberOfHeaps; i++) 406 // outside this thread's heap pages.
385 m_heaps[i]->assertEmpty(); 407 setupHeapsForShutdown();
408
409 // Do thread local GC's as long as the count of thread local Persistents
410 // changes and is above zero.
411 PersistentAnchor* anchor = static_cast<PersistentAnchor*>(m_persistents. get());
412 int oldCount = 0;
413 int curCount = anchor->numberOfPersistents();
414 while (curCount > 0 && curCount != oldCount) {
415 Heap::collectGarbageForThread(this, false);
416 oldCount = curCount;
417 curCount = anchor->numberOfPersistents();
418 }
419
420 // Do a final sweep to finalize any objects pointed to by persistents
421 // collected in the last round of GC above.
422 Heap::collectGarbageForThread(this, true);
423 }
386 424
387 for (size_t i = 0; i < m_cleanupTasks.size(); i++) 425 for (size_t i = 0; i < m_cleanupTasks.size(); i++)
388 m_cleanupTasks[i]->postCleanup(); 426 m_cleanupTasks[i]->postCleanup();
389 427
390 m_cleanupTasks.clear(); 428 m_cleanupTasks.clear();
391 } 429 }
392 430
393 void ThreadState::detach() 431 void ThreadState::detach()
394 { 432 {
395 ThreadState* state = current(); 433 ThreadState* state = current();
396 state->cleanup(); 434 state->cleanup();
397 435
398 // Enter a safe point before trying to acquire threadAttachMutex 436 // Enter a safe point before trying to acquire threadAttachMutex
399 // to avoid dead lock if another thread is preparing for GC, has acquired 437 // to avoid dead lock if another thread is preparing for GC, has acquired
400 // threadAttachMutex and waiting for other threads to pause or reach a 438 // threadAttachMutex and waiting for other threads to pause or reach a
401 // safepoint. 439 // safepoint.
402 if (!state->isAtSafePoint()) 440 if (!state->isAtSafePoint())
403 state->enterSafePointWithoutPointers(); 441 state->enterSafePointWithoutPointers();
404 442
405 { 443 {
406 MutexLocker locker(threadAttachMutex()); 444 MutexLocker locker(threadAttachMutex());
407 state->leaveSafePoint(); 445 state->leaveSafePoint();
408 ASSERT(attachedThreads().contains(state)); 446 ASSERT(attachedThreads().contains(state));
409 attachedThreads().remove(state); 447 attachedThreads().remove(state);
448 // Deleting the thread state also destroys the thread's heaps at which
449 // point all the thread's pages are added to the orphanedPagePool.
450 // Subsequently they are promoted to the memoryPool once they are no
zerny-chromium 2014/07/07 12:11:56 s/promoted/moved
wibling-chromium 2014/07/07 13:50:07 Done.
451 // longer traced by a global GC.
410 delete state; 452 delete state;
411 } 453 }
412 shutdownHeapIfNecessary(); 454 shutdownHeapIfNecessary();
413 } 455 }
414 456
415 void ThreadState::visitRoots(Visitor* visitor) 457 void ThreadState::visitRoots(Visitor* visitor)
416 { 458 {
417 { 459 {
418 // All threads are at safepoints so this is not strictly necessary. 460 // All threads are at safepoints so this is not strictly necessary.
419 // However we acquire the mutex to make mutation and traversal of this 461 // However we acquire the mutex to make mutation and traversal of this
420 // list symmetrical. 462 // list symmetrical.
421 MutexLocker locker(globalRootsMutex()); 463 MutexLocker locker(globalRootsMutex());
422 globalRoots()->trace(visitor); 464 globalRoots()->trace(visitor);
423 } 465 }
424 466
425 AttachedThreadStateSet& threads = attachedThreads(); 467 AttachedThreadStateSet& threads = attachedThreads();
426 for (AttachedThreadStateSet::iterator it = threads.begin(), end = threads.en d(); it != end; ++it) 468 for (AttachedThreadStateSet::iterator it = threads.begin(), end = threads.en d(); it != end; ++it)
427 (*it)->trace(visitor); 469 (*it)->trace(visitor);
428 } 470 }
429 471
472 void ThreadState::visitLocalRoots(Visitor* visitor)
473 {
474 // We assume there are no CrossThreadPersistents pointing to this thread whe n
475 // it is being destructed since it would typically have been in a refcounted
476 // object which should have been destructed prior to shutting down the threa d.
477 // If there are any we treat it similar to when we find a dead object with
478 // a member pointing to this thread's heap and allow it being marked, but
479 // do nothing when popping the trace method.
zerny-chromium 2014/07/07 12:11:56 Simpler: We assume there are no CrossThreadPersis
wibling-chromium 2014/07/07 13:50:07 Done. Much better:)
480 m_persistents->trace(visitor);
481 }
482
430 NO_SANITIZE_ADDRESS 483 NO_SANITIZE_ADDRESS
431 void ThreadState::visitAsanFakeStackForPointer(Visitor* visitor, Address ptr) 484 void ThreadState::visitAsanFakeStackForPointer(Visitor* visitor, Address ptr)
432 { 485 {
433 #if defined(ADDRESS_SANITIZER) 486 #if defined(ADDRESS_SANITIZER)
434 Address* start = reinterpret_cast<Address*>(m_startOfStack); 487 Address* start = reinterpret_cast<Address*>(m_startOfStack);
435 Address* end = reinterpret_cast<Address*>(m_endOfStack); 488 Address* end = reinterpret_cast<Address*>(m_endOfStack);
436 Address* fakeFrameStart = 0; 489 Address* fakeFrameStart = 0;
437 Address* fakeFrameEnd = 0; 490 Address* fakeFrameEnd = 0;
438 Address* maybeFakeFrame = reinterpret_cast<Address*>(ptr); 491 Address* maybeFakeFrame = reinterpret_cast<Address*>(ptr);
439 Address* realFrameForFakeFrame = 492 Address* realFrameForFakeFrame =
(...skipping 102 matching lines...) Expand 10 before | Expand all | Expand 10 after
542 #endif 595 #endif
543 596
544 void ThreadState::pushWeakObjectPointerCallback(void* object, WeakPointerCallbac k callback) 597 void ThreadState::pushWeakObjectPointerCallback(void* object, WeakPointerCallbac k callback)
545 { 598 {
546 CallbackStack::Item* slot = m_weakCallbackStack->allocateEntry(&m_weakCallba ckStack); 599 CallbackStack::Item* slot = m_weakCallbackStack->allocateEntry(&m_weakCallba ckStack);
547 *slot = CallbackStack::Item(object, callback); 600 *slot = CallbackStack::Item(object, callback);
548 } 601 }
549 602
550 bool ThreadState::popAndInvokeWeakPointerCallback(Visitor* visitor) 603 bool ThreadState::popAndInvokeWeakPointerCallback(Visitor* visitor)
551 { 604 {
552 return m_weakCallbackStack->popAndInvokeCallback(&m_weakCallbackStack, visit or); 605 return m_weakCallbackStack->popAndInvokeCallback<false>(&m_weakCallbackStack , visitor);
zerny-chromium 2014/07/07 12:11:56 Nit: maybe we make an type/enum for ThreadLocal/Gl
wibling-chromium 2014/07/07 13:50:07 I prefer keeping it as a bool to keep the if in th
553 } 606 }
554 607
555 PersistentNode* ThreadState::globalRoots() 608 PersistentNode* ThreadState::globalRoots()
556 { 609 {
557 AtomicallyInitializedStatic(PersistentNode*, anchor = new PersistentAnchor); 610 AtomicallyInitializedStatic(PersistentNode*, anchor = new PersistentAnchor);
558 return anchor; 611 return anchor;
559 } 612 }
560 613
561 Mutex& ThreadState::globalRootsMutex() 614 Mutex& ThreadState::globalRootsMutex()
562 { 615 {
(...skipping 117 matching lines...) Expand 10 before | Expand all | Expand 10 after
680 { 733 {
681 for (int i = 0; i < NumberOfHeaps; i++) 734 for (int i = 0; i < NumberOfHeaps; i++)
682 m_heaps[i]->makeConsistentForGC(); 735 m_heaps[i]->makeConsistentForGC();
683 } 736 }
684 737
685 void ThreadState::prepareForGC() 738 void ThreadState::prepareForGC()
686 { 739 {
687 for (int i = 0; i < NumberOfHeaps; i++) { 740 for (int i = 0; i < NumberOfHeaps; i++) {
688 BaseHeap* heap = m_heaps[i]; 741 BaseHeap* heap = m_heaps[i];
689 heap->makeConsistentForGC(); 742 heap->makeConsistentForGC();
690 // If there are parked threads with outstanding sweep requests, clear th eir mark bits. 743 // If there are parked threads with outstanding sweep requests, clear th eir mark bits,
691 // This happens if a thread did not have time to wake up and sweep, 744 // and mark any of their dead objects as dead. The latter is used to ens ure the next
692 // before the next GC arrived. 745 // GC marking does not revive already dead objects. If we revived a dead object we
693 if (sweepRequested()) 746 // could end up tracing into garbage or the middle of another object via the revived
694 heap->clearMarks(); 747 // object.
748 // The case of a thread missing a sweep happens if it did not have time to wake up
749 // and sweep, before the next GC arrived.
750 if (sweepRequested()) {
751 heap->clearLiveAndMarkDead();
752 }
zerny-chromium 2014/07/07 12:11:56 Nit: no curlies
wibling-chromium 2014/07/07 13:50:07 Done.
695 } 753 }
696 setSweepRequested(); 754 setSweepRequested();
697 } 755 }
698 756
757 void ThreadState::setupHeapsForShutdown()
758 {
759 for (int i = 0; i < NumberOfHeaps; i++) {
760 BaseHeap* heap = m_heaps[i];
761 heap->setShutdown();
762 }
763 }
764
699 BaseHeapPage* ThreadState::heapPageFromAddress(Address address) 765 BaseHeapPage* ThreadState::heapPageFromAddress(Address address)
700 { 766 {
701 BaseHeapPage* cachedPage = heapContainsCache()->lookup(address); 767 BaseHeapPage* cachedPage = heapContainsCache()->lookup(address);
702 #ifdef NDEBUG 768 #ifdef NDEBUG
703 if (cachedPage) 769 if (cachedPage)
704 return cachedPage; 770 return cachedPage;
705 #endif 771 #endif
706 772
707 for (int i = 0; i < NumberOfHeaps; i++) { 773 for (int i = 0; i < NumberOfHeaps; i++) {
708 BaseHeapPage* page = m_heaps[i]->heapPageFromAddress(address); 774 BaseHeapPage* page = m_heaps[i]->heapPageFromAddress(address);
(...skipping 206 matching lines...) Expand 10 before | Expand all | Expand 10 after
915 threadAttachMutex().unlock(); 981 threadAttachMutex().unlock();
916 return gcInfo; 982 return gcInfo;
917 } 983 }
918 } 984 }
919 if (needLockForIteration) 985 if (needLockForIteration)
920 threadAttachMutex().unlock(); 986 threadAttachMutex().unlock();
921 return 0; 987 return 0;
922 } 988 }
923 #endif 989 #endif
924 } 990 }
OLDNEW

Powered by Google App Engine
This is Rietveld 408576698