Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(209)

Side by Side Diff: Source/platform/heap/ThreadState.h

Issue 1166793002: Oilpan: add assert to verify eager finalization. (Closed) Base URL: svn://svn.chromium.org/blink/trunk
Patch Set: Created 5 years, 6 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
OLDNEW
1 /* 1 /*
2 * Copyright (C) 2013 Google Inc. All rights reserved. 2 * Copyright (C) 2013 Google Inc. All rights reserved.
3 * 3 *
4 * Redistribution and use in source and binary forms, with or without 4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions are 5 * modification, are permitted provided that the following conditions are
6 * met: 6 * met:
7 * 7 *
8 * * Redistributions of source code must retain the above copyright 8 * * Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer. 9 * notice, this list of conditions and the following disclaimer.
10 * * Redistributions in binary form must reproduce the above 10 * * Redistributions in binary form must reproduce the above
(...skipping 220 matching lines...) Expand 10 before | Expand all | Expand 10 after
231 m_state->leaveNoAllocationScope(); 231 m_state->leaveNoAllocationScope();
232 } 232 }
233 private: 233 private:
234 ThreadState* m_state; 234 ThreadState* m_state;
235 }; 235 };
236 236
237 class SweepForbiddenScope final { 237 class SweepForbiddenScope final {
238 public: 238 public:
239 explicit SweepForbiddenScope(ThreadState* state) : m_state(state) 239 explicit SweepForbiddenScope(ThreadState* state) : m_state(state)
240 { 240 {
241 ASSERT(!m_state->m_sweepForbidden); 241 ASSERT(!m_state->m_isSweepForbidden);
242 m_state->m_sweepForbidden = true; 242 m_state->m_isSweepForbidden = true;
243 } 243 }
244 ~SweepForbiddenScope() 244 ~SweepForbiddenScope()
245 { 245 {
246 ASSERT(m_state->m_sweepForbidden); 246 ASSERT(m_state->m_isSweepForbidden);
247 m_state->m_sweepForbidden = false; 247 m_state->m_isSweepForbidden = false;
248 } 248 }
249 private: 249 private:
250 ThreadState* m_state; 250 ThreadState* m_state;
251 }; 251 };
252 252
253 // The set of ThreadStates for all threads attached to the Blink 253 // The set of ThreadStates for all threads attached to the Blink
254 // garbage collector. 254 // garbage collector.
255 using AttachedThreadStateSet = HashSet<ThreadState*>; 255 using AttachedThreadStateSet = HashSet<ThreadState*>;
256 static AttachedThreadStateSet& attachedThreads(); 256 static AttachedThreadStateSet& attachedThreads();
257 static RecursiveMutex& threadAttachMutex(); 257 static RecursiveMutex& threadAttachMutex();
(...skipping 109 matching lines...) Expand 10 before | Expand all | Expand 10 after
367 bool isAllocationAllowed() const { return !isAtSafePoint() && !m_noAllocatio nCount; } 367 bool isAllocationAllowed() const { return !isAtSafePoint() && !m_noAllocatio nCount; }
368 void enterNoAllocationScope() { m_noAllocationCount++; } 368 void enterNoAllocationScope() { m_noAllocationCount++; }
369 void leaveNoAllocationScope() { m_noAllocationCount--; } 369 void leaveNoAllocationScope() { m_noAllocationCount--; }
370 bool isGCForbidden() const { return m_gcForbiddenCount; } 370 bool isGCForbidden() const { return m_gcForbiddenCount; }
371 void enterGCForbiddenScope() { m_gcForbiddenCount++; } 371 void enterGCForbiddenScope() { m_gcForbiddenCount++; }
372 void leaveGCForbiddenScope() 372 void leaveGCForbiddenScope()
373 { 373 {
374 ASSERT(m_gcForbiddenCount > 0); 374 ASSERT(m_gcForbiddenCount > 0);
375 m_gcForbiddenCount--; 375 m_gcForbiddenCount--;
376 } 376 }
377 bool sweepForbidden() const { return m_sweepForbidden; } 377 bool sweepForbidden() const { return m_isSweepForbidden; }
378
379 #if ENABLE(ASSERT)
380 bool isEagerlySweeping() const { return m_isEagerlySweeping; }
381 void enterEagerSweepScope()
382 {
383 ASSERT(!m_isEagerlySweeping);
384 m_isEagerlySweeping = true;
385 }
386 void leaveEagerSweepScope()
387 {
388 ASSERT(m_isEagerlySweeping);
389 m_isEagerlySweeping = false;
390 }
391 #endif
378 392
379 void prepareRegionTree(); 393 void prepareRegionTree();
380 void flushHeapDoesNotContainCacheIfNeeded(); 394 void flushHeapDoesNotContainCacheIfNeeded();
381 395
382 // Safepoint related functionality. 396 // Safepoint related functionality.
383 // 397 //
384 // When a thread attempts to perform GC it needs to stop all other threads 398 // When a thread attempts to perform GC it needs to stop all other threads
385 // that use the heap or at least guarantee that they will not touch any 399 // that use the heap or at least guarantee that they will not touch any
386 // heap allocated object until GC is complete. 400 // heap allocated object until GC is complete.
387 // 401 //
(...skipping 314 matching lines...) Expand 10 before | Expand all | Expand 10 after
702 // and lazily construct ThreadState in it using placement new. 716 // and lazily construct ThreadState in it using placement new.
703 static uint8_t s_mainThreadStateStorage[]; 717 static uint8_t s_mainThreadStateStorage[];
704 718
705 ThreadIdentifier m_thread; 719 ThreadIdentifier m_thread;
706 OwnPtr<PersistentNode> m_persistents; 720 OwnPtr<PersistentNode> m_persistents;
707 StackState m_stackState; 721 StackState m_stackState;
708 intptr_t* m_startOfStack; 722 intptr_t* m_startOfStack;
709 intptr_t* m_endOfStack; 723 intptr_t* m_endOfStack;
710 void* m_safePointScopeMarker; 724 void* m_safePointScopeMarker;
711 Vector<Address> m_safePointStackCopy; 725 Vector<Address> m_safePointStackCopy;
712 bool m_atSafePoint;
713 Vector<Interruptor*> m_interruptors; 726 Vector<Interruptor*> m_interruptors;
714 bool m_sweepForbidden; 727
715 size_t m_noAllocationCount; 728 size_t m_noAllocationCount;
716 size_t m_gcForbiddenCount; 729 size_t m_gcForbiddenCount;
730
717 BaseHeap* m_heaps[NumberOfHeaps]; 731 BaseHeap* m_heaps[NumberOfHeaps];
718 732
719 int m_vectorBackingHeapIndex; 733 int m_vectorBackingHeapIndex;
720 size_t m_heapAges[NumberOfHeaps]; 734 size_t m_heapAges[NumberOfHeaps];
721 size_t m_currentHeapAges; 735 size_t m_currentHeapAges;
722 736
723 bool m_isTerminating;
724 GarbageCollectedMixinConstructorMarker* m_gcMixinMarker; 737 GarbageCollectedMixinConstructorMarker* m_gcMixinMarker;
725 738
726 bool m_shouldFlushHeapDoesNotContainCache; 739 bool m_shouldFlushHeapDoesNotContainCache;
727 GCState m_gcState; 740 GCState m_gcState;
728 741
729 CallbackStack* m_threadLocalWeakCallbackStack; 742 CallbackStack* m_threadLocalWeakCallbackStack;
730 HashMap<void*, bool (*)(void*, Visitor&)> m_preFinalizers; 743 HashMap<void*, bool (*)(void*, Visitor&)> m_preFinalizers;
731 744
732 v8::Isolate* m_isolate; 745 v8::Isolate* m_isolate;
733 void (*m_traceDOMWrappers)(v8::Isolate*, Visitor*); 746 void (*m_traceDOMWrappers)(v8::Isolate*, Visitor*);
734 747
735 #if defined(ADDRESS_SANITIZER) 748 #if defined(ADDRESS_SANITIZER)
736 void* m_asanFakeStack; 749 void* m_asanFakeStack;
737 #endif 750 #endif
738 751
739 Vector<PageMemoryRegion*> m_allocatedRegionsSinceLastGC; 752 Vector<PageMemoryRegion*> m_allocatedRegionsSinceLastGC;
740 753
741 #if ENABLE(GC_PROFILING) 754 #if ENABLE(GC_PROFILING)
742 double m_nextFreeListSnapshotTime; 755 double m_nextFreeListSnapshotTime;
743 #endif 756 #endif
744 // Ideally we want to allocate an array of size |gcInfoTableMax| but it will 757 // Ideally we want to allocate an array of size |gcInfoTableMax| but it will
745 // waste memory. Thus we limit the array size to 2^8 and share one entry 758 // waste memory. Thus we limit the array size to 2^8 and share one entry
746 // with multiple types of vectors. This won't be an issue in practice, 759 // with multiple types of vectors. This won't be an issue in practice,
747 // since there will be less than 2^8 types of objects in common cases. 760 // since there will be less than 2^8 types of objects in common cases.
748 static const int likelyToBePromptlyFreedArraySize = (1 << 8); 761 static const int likelyToBePromptlyFreedArraySize = (1 << 8);
749 static const int likelyToBePromptlyFreedArrayMask = likelyToBePromptlyFreedA rraySize - 1; 762 static const int likelyToBePromptlyFreedArrayMask = likelyToBePromptlyFreedA rraySize - 1;
750 OwnPtr<int[]> m_likelyToBePromptlyFreed; 763 OwnPtr<int[]> m_likelyToBePromptlyFreed;
764
765 bool m_atSafePoint;
766 bool m_isTerminating;
767 bool m_isSweepForbidden;
768 #if ENABLE(ASSERT)
769 bool m_isEagerlySweeping;
770 #endif
751 }; 771 };
752 772
753 template<ThreadAffinity affinity> class ThreadStateFor; 773 template<ThreadAffinity affinity> class ThreadStateFor;
754 774
755 template<> class ThreadStateFor<MainThreadOnly> { 775 template<> class ThreadStateFor<MainThreadOnly> {
756 public: 776 public:
757 static ThreadState* state() 777 static ThreadState* state()
758 { 778 {
759 // This specialization must only be used from the main thread. 779 // This specialization must only be used from the main thread.
760 ASSERT(ThreadState::current()->isMainThread()); 780 ASSERT(ThreadState::current()->isMainThread());
761 return ThreadState::mainThreadState(); 781 return ThreadState::mainThreadState();
762 } 782 }
763 }; 783 };
764 784
765 template<> class ThreadStateFor<AnyThread> { 785 template<> class ThreadStateFor<AnyThread> {
766 public: 786 public:
767 static ThreadState* state() { return ThreadState::current(); } 787 static ThreadState* state() { return ThreadState::current(); }
768 }; 788 };
769 789
770 } // namespace blink 790 } // namespace blink
771 791
772 #endif // ThreadState_h 792 #endif // ThreadState_h
OLDNEW

Powered by Google App Engine
This is Rietveld 408576698