Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(759)

Unified Diff: third_party/WebKit/Source/platform/heap/ThreadState.h

Issue 1477023003: Refactor the Heap into ThreadHeap to prepare for per thread heaps Base URL: https://chromium.googlesource.com/chromium/src.git@master
Patch Set: Refactored Created 4 years, 11 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View side-by-side diff with in-line comments
Download patch
Index: third_party/WebKit/Source/platform/heap/ThreadState.h
diff --git a/third_party/WebKit/Source/platform/heap/ThreadState.h b/third_party/WebKit/Source/platform/heap/ThreadState.h
index b43767d0e928051384ed01a68e6466d4066d524a..f83cc0a95b8b577a432d286cdd5f3ccce6f88654 100644
--- a/third_party/WebKit/Source/platform/heap/ThreadState.h
+++ b/third_party/WebKit/Source/platform/heap/ThreadState.h
@@ -61,7 +61,12 @@ class BaseHeap;
class SafePointAwareMutexLocker;
class SafePointBarrier;
class ThreadState;
+class OrphanedPagePool;
class Visitor;
+class PageMemoryRegion;
+class RegionTree;
+class FreePagePool;
+class HeapDoesNotContainCache;
// Declare that a class has a pre-finalizer. The pre-finalizer is called
// before any object gets swept, so it is safe to touch on-heap objects
@@ -118,6 +123,140 @@ using UsingPreFinalizerMacroNeedsTrailingSemiColon = char
#define WILL_BE_USING_PRE_FINALIZER(Class, method)
#endif
+using ThreadStateSet = HashSet<ThreadState*>;
+
+// Stats for heap in a GCGroup.
+class GCHeapStats {
+public:
+ GCHeapStats();
+ void setMarkedObjectSizeAtLastCompleteSweep(size_t size) { releaseStore(&m_markedObjectSizeAtLastCompleteSweep, size); }
+ size_t markedObjectSizeAtLastCompleteSweep() { return acquireLoad(&m_markedObjectSizeAtLastCompleteSweep); }
+ void increaseAllocatedObjectSize(size_t delta);
+ void decreaseAllocatedObjectSize(size_t delta);
+ size_t allocatedObjectSize() { return acquireLoad(&m_allocatedObjectSize); }
+ void increaseMarkedObjectSize(size_t delta);
+ size_t markedObjectSize() { return acquireLoad(&m_markedObjectSize); }
+ void increaseAllocatedSpace(size_t delta);
+ void decreaseAllocatedSpace(size_t delta);
+ size_t allocatedSpace() { return acquireLoad(&m_allocatedSpace); }
+ size_t objectSizeAtLastGC() { return acquireLoad(&m_objectSizeAtLastGC); }
+ void increaseWrapperCount(size_t delta) { atomicAdd(&m_wrapperCount, static_cast<long>(delta)); }
+ void decreaseWrapperCount(size_t delta) { atomicSubtract(&m_wrapperCount, static_cast<long>(delta)); }
+ size_t wrapperCount() { return acquireLoad(&m_wrapperCount); }
+ size_t wrapperCountAtLastGC() { return acquireLoad(&m_wrapperCountAtLastGC); }
+ void increaseCollectedWrapperCount(size_t delta) { atomicAdd(&m_collectedWrapperCount, static_cast<long>(delta)); }
+ size_t collectedWrapperCount() { return acquireLoad(&m_collectedWrapperCount); }
+ size_t partitionAllocSizeAtLastGC() { return acquireLoad(&m_partitionAllocSizeAtLastGC); }
+ void setEstimatedMarkingTimePerByte(double estimatedMarkingTimePerByte) { m_estimatedMarkingTimePerByte = estimatedMarkingTimePerByte; }
+ double estimatedMarkingTimePerByte() const { return m_estimatedMarkingTimePerByte; }
+ double estimatedMarkingTime();
+ void reset();
+
+private:
+ size_t m_allocatedSpace;
+ size_t m_allocatedObjectSize;
+ size_t m_objectSizeAtLastGC;
+ size_t m_markedObjectSize;
+ size_t m_markedObjectSizeAtLastCompleteSweep;
+ size_t m_wrapperCount;
+ size_t m_wrapperCountAtLastGC;
+ size_t m_collectedWrapperCount;
+ size_t m_partitionAllocSizeAtLastGC;
+ double m_estimatedMarkingTimePerByte;
+};
+
+class PLATFORM_EXPORT GCGroup {
+public:
+ virtual ~GCGroup();
+ virtual void attach(ThreadState*) = 0;
+ virtual void detach(ThreadState*) = 0;
+ virtual bool park() = 0;
+ virtual void resume() = 0;
+ virtual bool isParked() const = 0;
+ virtual void lockThreadAttachMutex() = 0;
+ virtual void unlockThreadAttachMutex() = 0;
+#if ENABLE(ASSERT)
+ virtual BasePage* findPageFromAddress(Address) = 0;
+#endif
+ virtual void preGC() = 0;
+ virtual void postGC(BlinkGC::GCType) = 0;
+ virtual size_t objectPayloadSizeForTesting() = 0;
+ virtual size_t size() const = 0;
+ virtual void checkAndPark(ThreadState*, SafePointAwareMutexLocker*) = 0;
+ virtual void enterSafePoint(ThreadState*) = 0;
+ virtual void leaveSafePoint(ThreadState*, SafePointAwareMutexLocker*) = 0;
+ virtual void shutdownIfNecessary() = 0;
+
+ // Trace all persistent roots, called when marking the managed heap objects.
+ virtual void visitPersistentRoots(Visitor*) = 0;
+
+ // Trace all objects found on the stack, used when doing conservative GCs.
+ virtual void visitStackRoots(Visitor*) = 0;
+
+ void flushHeapDoesNotContainCache();
+ HeapDoesNotContainCache* heapDoesNotContainCache() { return m_heapDoesNotContainCache.get(); }
+
+ // This look-up uses the region search tree and a negative contains cache to
+ // provide an efficient mapping from arbitrary addresses to the containing
+ // heap-page if one exists.
+ BasePage* lookupPageForAddress(Address);
+ void addPageMemoryRegion(PageMemoryRegion*);
+ void removePageMemoryRegion(PageMemoryRegion*);
+ GCHeapStats& heapStats() { return m_stats; }
+
+ static HashSet<GCGroup*>& all();
+
+protected:
+ GCGroup();
+
+private:
+ GCHeapStats m_stats;
+ Mutex m_regionTreeMutex;
+ RegionTree* m_regionTree;
+ OwnPtr<HeapDoesNotContainCache> m_heapDoesNotContainCache;
+};
+
+class PLATFORM_EXPORT MultiThreadGCGroup : public GCGroup {
haraken 2016/02/12 11:28:53 Is it really worth distinguishing MultiThreadGCGro
keishi 2016/02/29 06:02:34 For per thread heap enabled threads I'm thinking o
+public:
+ MultiThreadGCGroup();
+ ~MultiThreadGCGroup();
+ void attach(ThreadState*) override;
+ void detach(ThreadState*) override;
+ bool park() override;
+ void resume() override;
+ bool isParked() const override;
+ void lockThreadAttachMutex() override;
+ void unlockThreadAttachMutex() override;
+#if ENABLE(ASSERT)
+ BasePage* findPageFromAddress(Address) override;
+#endif
+ void preGC() override;
+ void postGC(BlinkGC::GCType) override;
+ size_t objectPayloadSizeForTesting() override;
+ size_t size() const override;
+ void visitPersistentRoots(Visitor*) override;
+ void visitStackRoots(Visitor*) override;
+ void checkAndPark(ThreadState*, SafePointAwareMutexLocker*) override;
+ void enterSafePoint(ThreadState*) override;
+ void leaveSafePoint(ThreadState*, SafePointAwareMutexLocker*) override;
+ void shutdownIfNecessary() override;
+ FreePagePool* freePagePool() { return m_freePagePool.get(); }
+ OrphanedPagePool* orphanedPagePool() { return m_orphanedPagePool.get(); }
+
+ const ThreadStateSet& threads() const { return m_threads; }
+ SafePointBarrier* safePointBarrier() { return m_safePointBarrier.get(); }
+ RecursiveMutex& threadAttachMutex() { return m_threadAttachMutex; }
+
+private:
+ void shutdown();
+
+ RecursiveMutex m_threadAttachMutex;
+ ThreadStateSet m_threads;
+ OwnPtr<SafePointBarrier> m_safePointBarrier;
+ OwnPtr<FreePagePool> m_freePagePool;
+ OwnPtr<OrphanedPagePool> m_orphanedPagePool;
+};
+
class PLATFORM_EXPORT ThreadState {
WTF_MAKE_NONCOPYABLE(ThreadState);
public:
@@ -174,30 +313,20 @@ public:
// garbage collector.
using AttachedThreadStateSet = HashSet<ThreadState*>;
static AttachedThreadStateSet& attachedThreads();
- static RecursiveMutex& threadAttachMutex();
- static void lockThreadAttachMutex();
- static void unlockThreadAttachMutex();
// Initialize threading infrastructure. Should be called from the main
// thread.
static void init();
static void shutdown();
- static void shutdownHeapIfNecessary();
bool isTerminating() { return m_isTerminating; }
- static void attachMainThread();
+ static void prepareForMainThread();
static void detachMainThread();
- // Trace all persistent roots, called when marking the managed heap objects.
- static void visitPersistentRoots(Visitor*);
-
- // Trace all objects found on the stack, used when doing conservative GCs.
- static void visitStackRoots(Visitor*);
-
// Associate ThreadState object with the current thread. After this
// call thread can start using the garbage collected heap infrastructure.
// It also has to periodically check for safepoints.
- static void attach();
+ static void prepareForCurrentThread(bool prepareForCurrentThread = false);
// Disassociate attached ThreadState from the current thread. The thread
// can no longer use the garbage collected heap after this call.
@@ -229,11 +358,24 @@ public:
return reinterpret_cast<ThreadState*>(s_mainThreadStateStorage);
}
+ static ThreadState* forObject(const void*);
+
bool isMainThread() const { return this == mainThreadState(); }
#if ENABLE(ASSERT)
bool checkThread() const { return m_thread == currentThread(); }
#endif
+ bool perThreadHeapEnabled() const { return m_perThreadHeapEnabled; }
+
+ // When ThreadState is detaching from non-main thread its
+ // heap is expected to be empty (because it is going away).
+ // Perform registered cleanup tasks and garbage collection
+ // to sweep away any objects that are left on this heap.
+ // We assert that nothing must remain after this cleanup.
+ // If assertion does not hold we crash as we are potentially
+ // in the dangling pointer situation.
+ void cleanupCallback();
+
void performIdleGC(double deadlineSeconds);
void performIdleLazySweep(double deadlineSeconds);
@@ -325,10 +467,6 @@ public:
// are not wrapped in a SafePointScope (e.g. BlinkGCInterruptor for JavaScript code)
//
- // Request all other threads to stop. Must only be called if the current thread is at safepoint.
- static bool stopThreads();
- static void resumeThreads();
-
// Check if GC is requested by another thread and pause this thread if this is the case.
// Can only be called when current thread is in a consistent state.
void safePoint(BlinkGC::StackState);
@@ -338,6 +476,8 @@ public:
void leaveSafePoint(SafePointAwareMutexLocker* = nullptr);
bool isAtSafePoint() const { return m_atSafePoint; }
+ MultiThreadGCGroup* gcGroup() const { return m_gcGroup; }
+
void addInterruptor(PassOwnPtr<BlinkGCInterruptor>);
void removeInterruptor(BlinkGCInterruptor*);
@@ -521,7 +661,7 @@ private:
FreelistSnapshot
};
- ThreadState();
+ ThreadState(bool perThreadHeapEnabled);
~ThreadState();
NO_SANITIZE_ADDRESS void copyStackUntilSafePointScope();
@@ -575,14 +715,6 @@ private:
void poisonAllHeaps();
#endif
- // When ThreadState is detaching from non-main thread its
- // heap is expected to be empty (because it is going away).
- // Perform registered cleanup tasks and garbage collection
- // to sweep away any objects that are left on this heap.
- // We assert that nothing must remain after this cleanup.
- // If assertion does not hold we crash as we are potentially
- // in the dangling pointer situation.
- void cleanup();
void cleanupPages();
void prepareForThreadStateTermination();
@@ -603,7 +735,6 @@ private:
static WTF::ThreadSpecific<ThreadState*>* s_threadSpecific;
static uintptr_t s_mainThreadStackStart;
static uintptr_t s_mainThreadUnderestimatedStackSize;
- static SafePointBarrier* s_safePointBarrier;
// We can't create a static member of type ThreadState here
// because it will introduce global constructor and destructor.
@@ -620,6 +751,7 @@ private:
#if OS(WIN) && COMPILER(MSVC)
size_t m_threadStackSize;
#endif
+ bool m_perThreadHeapEnabled;
intptr_t* m_startOfStack;
intptr_t* m_endOfStack;
@@ -653,6 +785,8 @@ private:
v8::Isolate* m_isolate;
void (*m_traceDOMWrappers)(v8::Isolate*, Visitor*);
+ MultiThreadGCGroup* m_gcGroup;
+
#if defined(ADDRESS_SANITIZER)
void* m_asanFakeStack;
#endif

Powered by Google App Engine
This is Rietveld 408576698