Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(180)

Side by Side Diff: third_party/WebKit/Source/platform/heap/ThreadState.h

Issue 1477023003: Refactor the Heap into ThreadHeap to prepare for per thread heaps Base URL: https://chromium.googlesource.com/chromium/src.git@master
Patch Set: Refactored Created 4 years, 10 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
OLDNEW
1 /* 1 /*
2 * Copyright (C) 2013 Google Inc. All rights reserved. 2 * Copyright (C) 2013 Google Inc. All rights reserved.
3 * 3 *
4 * Redistribution and use in source and binary forms, with or without 4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions are 5 * modification, are permitted provided that the following conditions are
6 * met: 6 * met:
7 * 7 *
8 * * Redistributions of source code must retain the above copyright 8 * * Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer. 9 * notice, this list of conditions and the following disclaimer.
10 * * Redistributions in binary form must reproduce the above 10 * * Redistributions in binary form must reproduce the above
(...skipping 43 matching lines...) Expand 10 before | Expand all | Expand 10 after
54 class CallbackStack; 54 class CallbackStack;
55 struct GCInfo; 55 struct GCInfo;
56 class GarbageCollectedMixinConstructorMarker; 56 class GarbageCollectedMixinConstructorMarker;
57 class HeapObjectHeader; 57 class HeapObjectHeader;
58 class PersistentNode; 58 class PersistentNode;
59 class PersistentRegion; 59 class PersistentRegion;
60 class BaseHeap; 60 class BaseHeap;
61 class SafePointAwareMutexLocker; 61 class SafePointAwareMutexLocker;
62 class SafePointBarrier; 62 class SafePointBarrier;
63 class ThreadState; 63 class ThreadState;
64 class OrphanedPagePool;
64 class Visitor; 65 class Visitor;
66 class PageMemoryRegion;
67 class RegionTree;
68 class FreePagePool;
69 class HeapDoesNotContainCache;
65 70
66 // Declare that a class has a pre-finalizer. The pre-finalizer is called 71 // Declare that a class has a pre-finalizer. The pre-finalizer is called
67 // before any object gets swept, so it is safe to touch on-heap objects 72 // before any object gets swept, so it is safe to touch on-heap objects
68 // that may be collected in the same GC cycle. If you cannot avoid touching 73 // that may be collected in the same GC cycle. If you cannot avoid touching
69 // on-heap objects in a destructor (which is not allowed), you can consider 74 // on-heap objects in a destructor (which is not allowed), you can consider
70 // using the pre-finalizer. The only restriction is that the pre-finalizer 75 // using the pre-finalizer. The only restriction is that the pre-finalizer
71 // must not resurrect dead objects (e.g., store unmarked objects into 76 // must not resurrect dead objects (e.g., store unmarked objects into
72 // Members etc). The pre-finalizer is called on the thread that registered 77 // Members etc). The pre-finalizer is called on the thread that registered
73 // the pre-finalizer. 78 // the pre-finalizer.
74 // 79 //
(...skipping 36 matching lines...) Expand 10 before | Expand all | Expand 10 after
111 return true; \ 116 return true; \
112 } \ 117 } \
113 using UsingPreFinalizerMacroNeedsTrailingSemiColon = char 118 using UsingPreFinalizerMacroNeedsTrailingSemiColon = char
114 119
115 #if ENABLE(OILPAN) 120 #if ENABLE(OILPAN)
116 #define WILL_BE_USING_PRE_FINALIZER(Class, method) USING_PRE_FINALIZER(Class, me thod) 121 #define WILL_BE_USING_PRE_FINALIZER(Class, method) USING_PRE_FINALIZER(Class, me thod)
117 #else 122 #else
118 #define WILL_BE_USING_PRE_FINALIZER(Class, method) 123 #define WILL_BE_USING_PRE_FINALIZER(Class, method)
119 #endif 124 #endif
120 125
126 using ThreadStateSet = HashSet<ThreadState*>;
127
128 // Stats for heap in a GCGroup.
129 class GCHeapStats {
130 public:
131 GCHeapStats();
132 void setMarkedObjectSizeAtLastCompleteSweep(size_t size) { releaseStore(&m_m arkedObjectSizeAtLastCompleteSweep, size); }
133 size_t markedObjectSizeAtLastCompleteSweep() { return acquireLoad(&m_markedO bjectSizeAtLastCompleteSweep); }
134 void increaseAllocatedObjectSize(size_t delta);
135 void decreaseAllocatedObjectSize(size_t delta);
136 size_t allocatedObjectSize() { return acquireLoad(&m_allocatedObjectSize); }
137 void increaseMarkedObjectSize(size_t delta);
138 size_t markedObjectSize() { return acquireLoad(&m_markedObjectSize); }
139 void increaseAllocatedSpace(size_t delta);
140 void decreaseAllocatedSpace(size_t delta);
141 size_t allocatedSpace() { return acquireLoad(&m_allocatedSpace); }
142 size_t objectSizeAtLastGC() { return acquireLoad(&m_objectSizeAtLastGC); }
143 void increaseWrapperCount(size_t delta) { atomicAdd(&m_wrapperCount, static_ cast<long>(delta)); }
144 void decreaseWrapperCount(size_t delta) { atomicSubtract(&m_wrapperCount, st atic_cast<long>(delta)); }
145 size_t wrapperCount() { return acquireLoad(&m_wrapperCount); }
146 size_t wrapperCountAtLastGC() { return acquireLoad(&m_wrapperCountAtLastGC); }
147 void increaseCollectedWrapperCount(size_t delta) { atomicAdd(&m_collectedWra pperCount, static_cast<long>(delta)); }
148 size_t collectedWrapperCount() { return acquireLoad(&m_collectedWrapperCount ); }
149 size_t partitionAllocSizeAtLastGC() { return acquireLoad(&m_partitionAllocSi zeAtLastGC); }
150 void setEstimatedMarkingTimePerByte(double estimatedMarkingTimePerByte) { m_ estimatedMarkingTimePerByte = estimatedMarkingTimePerByte; }
151 double estimatedMarkingTimePerByte() const { return m_estimatedMarkingTimePe rByte; }
152 double estimatedMarkingTime();
153 void reset();
154
155 private:
156 size_t m_allocatedSpace;
157 size_t m_allocatedObjectSize;
158 size_t m_objectSizeAtLastGC;
159 size_t m_markedObjectSize;
160 size_t m_markedObjectSizeAtLastCompleteSweep;
161 size_t m_wrapperCount;
162 size_t m_wrapperCountAtLastGC;
163 size_t m_collectedWrapperCount;
164 size_t m_partitionAllocSizeAtLastGC;
165 double m_estimatedMarkingTimePerByte;
166 };
167
168 class PLATFORM_EXPORT GCGroup {
169 public:
170 virtual ~GCGroup();
171 virtual void attach(ThreadState*) = 0;
172 virtual void detach(ThreadState*) = 0;
173 virtual bool park() = 0;
174 virtual void resume() = 0;
175 virtual bool isParked() const = 0;
176 virtual void lockThreadAttachMutex() = 0;
177 virtual void unlockThreadAttachMutex() = 0;
178 #if ENABLE(ASSERT)
179 virtual BasePage* findPageFromAddress(Address) = 0;
180 #endif
181 virtual void preGC() = 0;
182 virtual void postGC(BlinkGC::GCType) = 0;
183 virtual size_t objectPayloadSizeForTesting() = 0;
184 virtual size_t size() const = 0;
185 virtual void checkAndPark(ThreadState*, SafePointAwareMutexLocker*) = 0;
186 virtual void enterSafePoint(ThreadState*) = 0;
187 virtual void leaveSafePoint(ThreadState*, SafePointAwareMutexLocker*) = 0;
188 virtual void shutdownIfNecessary() = 0;
189
190 // Trace all persistent roots, called when marking the managed heap objects.
191 virtual void visitPersistentRoots(Visitor*) = 0;
192
193 // Trace all objects found on the stack, used when doing conservative GCs.
194 virtual void visitStackRoots(Visitor*) = 0;
195
196 void flushHeapDoesNotContainCache();
197 HeapDoesNotContainCache* heapDoesNotContainCache() { return m_heapDoesNotCon tainCache.get(); }
198
199 // This look-up uses the region search tree and a negative contains cache to
200 // provide an efficient mapping from arbitrary addresses to the containing
201 // heap-page if one exists.
202 BasePage* lookupPageForAddress(Address);
203 void addPageMemoryRegion(PageMemoryRegion*);
204 void removePageMemoryRegion(PageMemoryRegion*);
205 GCHeapStats& heapStats() { return m_stats; }
206
207 static HashSet<GCGroup*>& all();
208
209 protected:
210 GCGroup();
211
212 private:
213 GCHeapStats m_stats;
214 Mutex m_regionTreeMutex;
215 RegionTree* m_regionTree;
216 OwnPtr<HeapDoesNotContainCache> m_heapDoesNotContainCache;
217 };
218
219 class PLATFORM_EXPORT MultiThreadGCGroup : public GCGroup {
haraken 2016/02/12 11:28:53 Is it really worth distinguishing MultiThreadGCGro
keishi 2016/02/29 06:02:34 For per thread heap enabled threads I'm thinking o
220 public:
221 MultiThreadGCGroup();
222 ~MultiThreadGCGroup();
223 void attach(ThreadState*) override;
224 void detach(ThreadState*) override;
225 bool park() override;
226 void resume() override;
227 bool isParked() const override;
228 void lockThreadAttachMutex() override;
229 void unlockThreadAttachMutex() override;
230 #if ENABLE(ASSERT)
231 BasePage* findPageFromAddress(Address) override;
232 #endif
233 void preGC() override;
234 void postGC(BlinkGC::GCType) override;
235 size_t objectPayloadSizeForTesting() override;
236 size_t size() const override;
237 void visitPersistentRoots(Visitor*) override;
238 void visitStackRoots(Visitor*) override;
239 void checkAndPark(ThreadState*, SafePointAwareMutexLocker*) override;
240 void enterSafePoint(ThreadState*) override;
241 void leaveSafePoint(ThreadState*, SafePointAwareMutexLocker*) override;
242 void shutdownIfNecessary() override;
243 FreePagePool* freePagePool() { return m_freePagePool.get(); }
244 OrphanedPagePool* orphanedPagePool() { return m_orphanedPagePool.get(); }
245
246 const ThreadStateSet& threads() const { return m_threads; }
247 SafePointBarrier* safePointBarrier() { return m_safePointBarrier.get(); }
248 RecursiveMutex& threadAttachMutex() { return m_threadAttachMutex; }
249
250 private:
251 void shutdown();
252
253 RecursiveMutex m_threadAttachMutex;
254 ThreadStateSet m_threads;
255 OwnPtr<SafePointBarrier> m_safePointBarrier;
256 OwnPtr<FreePagePool> m_freePagePool;
257 OwnPtr<OrphanedPagePool> m_orphanedPagePool;
258 };
259
121 class PLATFORM_EXPORT ThreadState { 260 class PLATFORM_EXPORT ThreadState {
122 WTF_MAKE_NONCOPYABLE(ThreadState); 261 WTF_MAKE_NONCOPYABLE(ThreadState);
123 public: 262 public:
124 typedef std::pair<void*, PreFinalizerCallback> PreFinalizer; 263 typedef std::pair<void*, PreFinalizerCallback> PreFinalizer;
125 264
126 // See setGCState() for possible state transitions. 265 // See setGCState() for possible state transitions.
127 enum GCState { 266 enum GCState {
128 NoGCScheduled, 267 NoGCScheduled,
129 IdleGCScheduled, 268 IdleGCScheduled,
130 PreciseGCScheduled, 269 PreciseGCScheduled,
(...skipping 36 matching lines...) Expand 10 before | Expand all | Expand 10 after
167 m_state->m_sweepForbidden = false; 306 m_state->m_sweepForbidden = false;
168 } 307 }
169 private: 308 private:
170 ThreadState* m_state; 309 ThreadState* m_state;
171 }; 310 };
172 311
173 // The set of ThreadStates for all threads attached to the Blink 312 // The set of ThreadStates for all threads attached to the Blink
174 // garbage collector. 313 // garbage collector.
175 using AttachedThreadStateSet = HashSet<ThreadState*>; 314 using AttachedThreadStateSet = HashSet<ThreadState*>;
176 static AttachedThreadStateSet& attachedThreads(); 315 static AttachedThreadStateSet& attachedThreads();
177 static RecursiveMutex& threadAttachMutex();
178 static void lockThreadAttachMutex();
179 static void unlockThreadAttachMutex();
180 316
181 // Initialize threading infrastructure. Should be called from the main 317 // Initialize threading infrastructure. Should be called from the main
182 // thread. 318 // thread.
183 static void init(); 319 static void init();
184 static void shutdown(); 320 static void shutdown();
185 static void shutdownHeapIfNecessary();
186 bool isTerminating() { return m_isTerminating; } 321 bool isTerminating() { return m_isTerminating; }
187 322
188 static void attachMainThread(); 323 static void prepareForMainThread();
189 static void detachMainThread(); 324 static void detachMainThread();
190 325
191 // Trace all persistent roots, called when marking the managed heap objects.
192 static void visitPersistentRoots(Visitor*);
193
194 // Trace all objects found on the stack, used when doing conservative GCs.
195 static void visitStackRoots(Visitor*);
196
197 // Associate ThreadState object with the current thread. After this 326 // Associate ThreadState object with the current thread. After this
198 // call thread can start using the garbage collected heap infrastructure. 327 // call thread can start using the garbage collected heap infrastructure.
199 // It also has to periodically check for safepoints. 328 // It also has to periodically check for safepoints.
200 static void attach(); 329 static void prepareForCurrentThread(bool prepareForCurrentThread = false);
201 330
202 // Disassociate attached ThreadState from the current thread. The thread 331 // Disassociate attached ThreadState from the current thread. The thread
203 // can no longer use the garbage collected heap after this call. 332 // can no longer use the garbage collected heap after this call.
204 static void detach(); 333 static void detach();
205 334
206 static ThreadState* current() 335 static ThreadState* current()
207 { 336 {
208 #if defined(__GLIBC__) || OS(ANDROID) || OS(FREEBSD) 337 #if defined(__GLIBC__) || OS(ANDROID) || OS(FREEBSD)
209 // TLS lookup is fast in these platforms. 338 // TLS lookup is fast in these platforms.
210 return **s_threadSpecific; 339 return **s_threadSpecific;
(...skipping 11 matching lines...) Expand all
222 // TLS lookup is slow. 351 // TLS lookup is slow.
223 return **s_threadSpecific; 352 return **s_threadSpecific;
224 #endif 353 #endif
225 } 354 }
226 355
227 static ThreadState* mainThreadState() 356 static ThreadState* mainThreadState()
228 { 357 {
229 return reinterpret_cast<ThreadState*>(s_mainThreadStateStorage); 358 return reinterpret_cast<ThreadState*>(s_mainThreadStateStorage);
230 } 359 }
231 360
361 static ThreadState* forObject(const void*);
362
232 bool isMainThread() const { return this == mainThreadState(); } 363 bool isMainThread() const { return this == mainThreadState(); }
233 #if ENABLE(ASSERT) 364 #if ENABLE(ASSERT)
234 bool checkThread() const { return m_thread == currentThread(); } 365 bool checkThread() const { return m_thread == currentThread(); }
235 #endif 366 #endif
236 367
368 bool perThreadHeapEnabled() const { return m_perThreadHeapEnabled; }
369
370 // When ThreadState is detaching from non-main thread its
371 // heap is expected to be empty (because it is going away).
372 // Perform registered cleanup tasks and garbage collection
373 // to sweep away any objects that are left on this heap.
374 // We assert that nothing must remain after this cleanup.
375 // If assertion does not hold we crash as we are potentially
376 // in the dangling pointer situation.
377 void cleanupCallback();
378
237 void performIdleGC(double deadlineSeconds); 379 void performIdleGC(double deadlineSeconds);
238 void performIdleLazySweep(double deadlineSeconds); 380 void performIdleLazySweep(double deadlineSeconds);
239 381
240 void scheduleIdleGC(); 382 void scheduleIdleGC();
241 void scheduleIdleLazySweep(); 383 void scheduleIdleLazySweep();
242 void schedulePreciseGC(); 384 void schedulePreciseGC();
243 void scheduleV8FollowupGCIfNeeded(BlinkGC::V8GCType); 385 void scheduleV8FollowupGCIfNeeded(BlinkGC::V8GCType);
244 void schedulePageNavigationGCIfNeeded(float estimatedRemovalRatio); 386 void schedulePageNavigationGCIfNeeded(float estimatedRemovalRatio);
245 void schedulePageNavigationGC(); 387 void schedulePageNavigationGC();
246 void scheduleGCIfNeeded(); 388 void scheduleGCIfNeeded();
(...skipping 71 matching lines...) Expand 10 before | Expand all | Expand 10 after
318 // there is a GC in progress. 460 // there is a GC in progress.
319 // 461 //
320 // Each thread that has ThreadState attached must: 462 // Each thread that has ThreadState attached must:
321 // - periodically check if GC is requested from another thread by calling a safePoint() method; 463 // - periodically check if GC is requested from another thread by calling a safePoint() method;
322 // - use SafePointScope around long running loops that have no safePoint() invocation inside, 464 // - use SafePointScope around long running loops that have no safePoint() invocation inside,
323 // such loops must not touch any heap object; 465 // such loops must not touch any heap object;
324 // - register an BlinkGCInterruptor that can interrupt long running loops that have no calls to safePoint and 466 // - register an BlinkGCInterruptor that can interrupt long running loops that have no calls to safePoint and
325 // are not wrapped in a SafePointScope (e.g. BlinkGCInterruptor for Java Script code) 467 // are not wrapped in a SafePointScope (e.g. BlinkGCInterruptor for Java Script code)
326 // 468 //
327 469
328 // Request all other threads to stop. Must only be called if the current thr ead is at safepoint.
329 static bool stopThreads();
330 static void resumeThreads();
331
332 // Check if GC is requested by another thread and pause this thread if this is the case. 470 // Check if GC is requested by another thread and pause this thread if this is the case.
333 // Can only be called when current thread is in a consistent state. 471 // Can only be called when current thread is in a consistent state.
334 void safePoint(BlinkGC::StackState); 472 void safePoint(BlinkGC::StackState);
335 473
336 // Mark current thread as running inside safepoint. 474 // Mark current thread as running inside safepoint.
337 void enterSafePoint(BlinkGC::StackState, void*); 475 void enterSafePoint(BlinkGC::StackState, void*);
338 void leaveSafePoint(SafePointAwareMutexLocker* = nullptr); 476 void leaveSafePoint(SafePointAwareMutexLocker* = nullptr);
339 bool isAtSafePoint() const { return m_atSafePoint; } 477 bool isAtSafePoint() const { return m_atSafePoint; }
340 478
479 MultiThreadGCGroup* gcGroup() const { return m_gcGroup; }
480
341 void addInterruptor(PassOwnPtr<BlinkGCInterruptor>); 481 void addInterruptor(PassOwnPtr<BlinkGCInterruptor>);
342 void removeInterruptor(BlinkGCInterruptor*); 482 void removeInterruptor(BlinkGCInterruptor*);
343 483
344 void recordStackEnd(intptr_t* endOfStack) 484 void recordStackEnd(intptr_t* endOfStack)
345 { 485 {
346 m_endOfStack = endOfStack; 486 m_endOfStack = endOfStack;
347 } 487 }
348 488
349 // Get one of the heap structures for this thread. 489 // Get one of the heap structures for this thread.
350 // The thread heap is split into multiple heap parts based on object types 490 // The thread heap is split into multiple heap parts based on object types
(...skipping 163 matching lines...) Expand 10 before | Expand all | Expand 10 after
514 void enterStaticReferenceRegistrationDisabledScope(); 654 void enterStaticReferenceRegistrationDisabledScope();
515 void leaveStaticReferenceRegistrationDisabledScope(); 655 void leaveStaticReferenceRegistrationDisabledScope();
516 #endif 656 #endif
517 657
518 private: 658 private:
519 enum SnapshotType { 659 enum SnapshotType {
520 HeapSnapshot, 660 HeapSnapshot,
521 FreelistSnapshot 661 FreelistSnapshot
522 }; 662 };
523 663
524 ThreadState(); 664 ThreadState(bool perThreadHeapEnabled);
525 ~ThreadState(); 665 ~ThreadState();
526 666
527 NO_SANITIZE_ADDRESS void copyStackUntilSafePointScope(); 667 NO_SANITIZE_ADDRESS void copyStackUntilSafePointScope();
528 void clearSafePointScopeMarker() 668 void clearSafePointScopeMarker()
529 { 669 {
530 m_safePointStackCopy.clear(); 670 m_safePointStackCopy.clear();
531 m_safePointScopeMarker = nullptr; 671 m_safePointScopeMarker = nullptr;
532 } 672 }
533 673
534 // shouldSchedule{Precise,Idle}GC and shouldForceConservativeGC 674 // shouldSchedule{Precise,Idle}GC and shouldForceConservativeGC
(...skipping 33 matching lines...) Expand 10 before | Expand all | Expand 10 after
568 708
569 void runScheduledGC(BlinkGC::StackState); 709 void runScheduledGC(BlinkGC::StackState);
570 710
571 void eagerSweep(); 711 void eagerSweep();
572 712
573 #if defined(ADDRESS_SANITIZER) 713 #if defined(ADDRESS_SANITIZER)
574 void poisonEagerHeap(BlinkGC::Poisoning); 714 void poisonEagerHeap(BlinkGC::Poisoning);
575 void poisonAllHeaps(); 715 void poisonAllHeaps();
576 #endif 716 #endif
577 717
578 // When ThreadState is detaching from non-main thread its
579 // heap is expected to be empty (because it is going away).
580 // Perform registered cleanup tasks and garbage collection
581 // to sweep away any objects that are left on this heap.
582 // We assert that nothing must remain after this cleanup.
583 // If assertion does not hold we crash as we are potentially
584 // in the dangling pointer situation.
585 void cleanup();
586 void cleanupPages(); 718 void cleanupPages();
587 719
588 void prepareForThreadStateTermination(); 720 void prepareForThreadStateTermination();
589 721
590 void invokePreFinalizers(); 722 void invokePreFinalizers();
591 723
592 void takeSnapshot(SnapshotType); 724 void takeSnapshot(SnapshotType);
593 void clearHeapAges(); 725 void clearHeapAges();
594 int heapIndexOfVectorHeapLeastRecentlyExpanded(int beginHeapIndex, int endHe apIndex); 726 int heapIndexOfVectorHeapLeastRecentlyExpanded(int beginHeapIndex, int endHe apIndex);
595 727
596 // Should only be called under protection of threadAttachMutex(). 728 // Should only be called under protection of threadAttachMutex().
597 const Vector<OwnPtr<BlinkGCInterruptor>>& interruptors() const { return m_in terruptors; } 729 const Vector<OwnPtr<BlinkGCInterruptor>>& interruptors() const { return m_in terruptors; }
598 730
599 friend class SafePointAwareMutexLocker; 731 friend class SafePointAwareMutexLocker;
600 friend class SafePointBarrier; 732 friend class SafePointBarrier;
601 friend class SafePointScope; 733 friend class SafePointScope;
602 734
603 static WTF::ThreadSpecific<ThreadState*>* s_threadSpecific; 735 static WTF::ThreadSpecific<ThreadState*>* s_threadSpecific;
604 static uintptr_t s_mainThreadStackStart; 736 static uintptr_t s_mainThreadStackStart;
605 static uintptr_t s_mainThreadUnderestimatedStackSize; 737 static uintptr_t s_mainThreadUnderestimatedStackSize;
606 static SafePointBarrier* s_safePointBarrier;
607 738
608 // We can't create a static member of type ThreadState here 739 // We can't create a static member of type ThreadState here
609 // because it will introduce global constructor and destructor. 740 // because it will introduce global constructor and destructor.
610 // We would like to manage lifetime of the ThreadState attached 741 // We would like to manage lifetime of the ThreadState attached
611 // to the main thread explicitly instead and still use normal 742 // to the main thread explicitly instead and still use normal
612 // constructor and destructor for the ThreadState class. 743 // constructor and destructor for the ThreadState class.
613 // For this we reserve static storage for the main ThreadState 744 // For this we reserve static storage for the main ThreadState
614 // and lazily construct ThreadState in it using placement new. 745 // and lazily construct ThreadState in it using placement new.
615 static uint8_t s_mainThreadStateStorage[]; 746 static uint8_t s_mainThreadStateStorage[];
616 747
617 ThreadIdentifier m_thread; 748 ThreadIdentifier m_thread;
618 OwnPtr<PersistentRegion> m_persistentRegion; 749 OwnPtr<PersistentRegion> m_persistentRegion;
619 BlinkGC::StackState m_stackState; 750 BlinkGC::StackState m_stackState;
620 #if OS(WIN) && COMPILER(MSVC) 751 #if OS(WIN) && COMPILER(MSVC)
621 size_t m_threadStackSize; 752 size_t m_threadStackSize;
622 #endif 753 #endif
754 bool m_perThreadHeapEnabled;
623 intptr_t* m_startOfStack; 755 intptr_t* m_startOfStack;
624 intptr_t* m_endOfStack; 756 intptr_t* m_endOfStack;
625 757
626 void* m_safePointScopeMarker; 758 void* m_safePointScopeMarker;
627 Vector<Address> m_safePointStackCopy; 759 Vector<Address> m_safePointStackCopy;
628 bool m_atSafePoint; 760 bool m_atSafePoint;
629 Vector<OwnPtr<BlinkGCInterruptor>> m_interruptors; 761 Vector<OwnPtr<BlinkGCInterruptor>> m_interruptors;
630 bool m_sweepForbidden; 762 bool m_sweepForbidden;
631 size_t m_noAllocationCount; 763 size_t m_noAllocationCount;
632 size_t m_gcForbiddenCount; 764 size_t m_gcForbiddenCount;
(...skipping 13 matching lines...) Expand all
646 CallbackStack* m_threadLocalWeakCallbackStack; 778 CallbackStack* m_threadLocalWeakCallbackStack;
647 779
648 // Pre-finalizers are called in the reverse order in which they are 780 // Pre-finalizers are called in the reverse order in which they are
649 // registered by the constructors (including constructors of Mixin objects) 781 // registered by the constructors (including constructors of Mixin objects)
650 // for an object, by processing the m_orderedPreFinalizers back-to-front. 782 // for an object, by processing the m_orderedPreFinalizers back-to-front.
651 ListHashSet<PreFinalizer> m_orderedPreFinalizers; 783 ListHashSet<PreFinalizer> m_orderedPreFinalizers;
652 784
653 v8::Isolate* m_isolate; 785 v8::Isolate* m_isolate;
654 void (*m_traceDOMWrappers)(v8::Isolate*, Visitor*); 786 void (*m_traceDOMWrappers)(v8::Isolate*, Visitor*);
655 787
788 MultiThreadGCGroup* m_gcGroup;
789
656 #if defined(ADDRESS_SANITIZER) 790 #if defined(ADDRESS_SANITIZER)
657 void* m_asanFakeStack; 791 void* m_asanFakeStack;
658 #endif 792 #endif
659 793
660 #if defined(LEAK_SANITIZER) 794 #if defined(LEAK_SANITIZER)
661 // PersistentNodes that are stored in static references; 795 // PersistentNodes that are stored in static references;
662 // references we have to clear before initiating LSan's leak detection. 796 // references we have to clear before initiating LSan's leak detection.
663 HashSet<PersistentNode*> m_staticPersistents; 797 HashSet<PersistentNode*> m_staticPersistents;
664 798
665 // Count that controls scoped disabling of persistent registration. 799 // Count that controls scoped disabling of persistent registration.
(...skipping 22 matching lines...) Expand all
688 }; 822 };
689 823
690 template<> class ThreadStateFor<AnyThread> { 824 template<> class ThreadStateFor<AnyThread> {
691 public: 825 public:
692 static ThreadState* state() { return ThreadState::current(); } 826 static ThreadState* state() { return ThreadState::current(); }
693 }; 827 };
694 828
695 } // namespace blink 829 } // namespace blink
696 830
697 #endif // ThreadState_h 831 #endif // ThreadState_h
OLDNEW

Powered by Google App Engine
This is Rietveld 408576698