Chromium Code Reviews| Index: third_party/WebKit/Source/platform/heap/ThreadState.h |
| diff --git a/third_party/WebKit/Source/platform/heap/ThreadState.h b/third_party/WebKit/Source/platform/heap/ThreadState.h |
| index 2e8e081b2c4c245c7ad6f5b33d30c44f93feed40..487adc4700e2bc42d8ba4fb72d69fae62be308bc 100644 |
| --- a/third_party/WebKit/Source/platform/heap/ThreadState.h |
| +++ b/third_party/WebKit/Source/platform/heap/ThreadState.h |
| @@ -53,16 +53,21 @@ namespace blink { |
| class BasePage; |
| class CallbackStack; |
| class CrossThreadPersistentRegion; |
| +class FreePagePool; |
| struct GCInfo; |
| class GarbageCollectedMixinConstructorMarker; |
| +class HeapDoesNotContainCache; |
| class HeapObjectHeader; |
| +class OrphanedPagePool; |
| class PersistentNode; |
| class PersistentRegion; |
| +class XThreadPersistentRegion; |
| class BaseHeap; |
| class SafePointAwareMutexLocker; |
| class SafePointBarrier; |
| class ThreadState; |
| class Visitor; |
| +class PageMemoryRegion; |
| // Declare that a class has a pre-finalizer. The pre-finalizer is called |
| // before any object gets swept, so it is safe to touch on-heap objects |
| @@ -124,6 +129,77 @@ class PLATFORM_EXPORT ThreadState { |
| public: |
| typedef std::pair<void*, PreFinalizerCallback> PreFinalizer; |
| + // Heap stats that concern a GC. The main thread's GCHeapStats contains stats for all threads in Heap::attachedThreads(). |
| + class GCHeapStats { |
|
haraken
2016/01/07 08:06:22
Can we make this change (i.e., move a bunch of sta
|
| + public: |
| + GCHeapStats(); |
| + void setMarkedObjectSizeAtLastCompleteSweep(size_t size) { releaseStore(&m_markedObjectSizeAtLastCompleteSweep, size); } |
| + size_t markedObjectSizeAtLastCompleteSweep() { return acquireLoad(&m_markedObjectSizeAtLastCompleteSweep); } |
| + void increaseAllocatedObjectSize(size_t delta) { atomicAdd(&m_allocatedObjectSize, static_cast<long>(delta)); } |
| + void decreaseAllocatedObjectSize(size_t delta) { atomicSubtract(&m_allocatedObjectSize, static_cast<long>(delta)); } |
| + size_t allocatedObjectSize() { return acquireLoad(&m_allocatedObjectSize); } |
| + void increaseMarkedObjectSize(size_t delta) { atomicAdd(&m_markedObjectSize, static_cast<long>(delta)); } |
| + size_t markedObjectSize() { return acquireLoad(&m_markedObjectSize); } |
| + void increaseAllocatedSpace(size_t delta) { atomicAdd(&m_allocatedSpace, static_cast<long>(delta)); } |
| + void decreaseAllocatedSpace(size_t delta) { atomicSubtract(&m_allocatedSpace, static_cast<long>(delta)); } |
| + size_t allocatedSpace() { return acquireLoad(&m_allocatedSpace); } |
| + size_t objectSizeAtLastGC() { return acquireLoad(&m_objectSizeAtLastGC); } |
| + void increaseWrapperCount(size_t delta) { atomicAdd(&m_wrapperCount, static_cast<long>(delta)); } |
| + void decreaseWrapperCount(size_t delta) { atomicSubtract(&m_wrapperCount, static_cast<long>(delta)); } |
| + size_t wrapperCount() { return acquireLoad(&m_wrapperCount); } |
| + size_t wrapperCountAtLastGC() { return acquireLoad(&m_wrapperCountAtLastGC); } |
| + void increaseCollectedWrapperCount(size_t delta) { atomicAdd(&m_collectedWrapperCount, static_cast<long>(delta)); } |
| + size_t collectedWrapperCount() { return acquireLoad(&m_collectedWrapperCount); } |
| + size_t partitionAllocSizeAtLastGC() { return acquireLoad(&m_partitionAllocSizeAtLastGC); } |
| + void setEstimatedMarkingTimePerByte(double estimatedMarkingTimePerByte) { m_estimatedMarkingTimePerByte = estimatedMarkingTimePerByte; } |
| + double estimatedMarkingTimePerByte() const { return m_estimatedMarkingTimePerByte; } |
| + double estimatedMarkingTime(); |
| + void reset(); |
| + |
| +#if ENABLE(ASSERT) |
| + void incrementGcGeneration() |
| + { |
| + if (++m_gcGeneration == 0) |
| + m_gcGeneration = 1; |
| + } |
| + uint16_t gcGeneration() { return m_gcGeneration; } |
| +#endif |
| + |
| + private: |
| + size_t m_allocatedSpace; |
| + size_t m_allocatedObjectSize; |
| + size_t m_objectSizeAtLastGC; |
| + size_t m_markedObjectSize; |
| + size_t m_markedObjectSizeAtLastCompleteSweep; |
| + size_t m_wrapperCount; |
| + size_t m_wrapperCountAtLastGC; |
| + size_t m_collectedWrapperCount; |
| + size_t m_partitionAllocSizeAtLastGC; |
| + double m_estimatedMarkingTimePerByte; |
| +#if ENABLE(ASSERT) |
| + uint16_t m_gcGeneration; |
| +#endif |
| + }; |
| + |
| + // A RegionTree is a simple binary search tree of PageMemoryRegions sorted |
| + // by base addresses. |
| + class RegionTree { |
|
haraken
2016/01/07 08:06:22
Let's move out the RegionTree to a separate file.
|
| + public: |
| + explicit RegionTree(PageMemoryRegion* region) : m_region(region), m_left(nullptr), m_right(nullptr) { } |
| + ~RegionTree() |
| + { |
| + delete m_left; |
| + delete m_right; |
| + } |
| + PageMemoryRegion* lookup(Address); |
| + static void add(RegionTree*, RegionTree**); |
| + static void remove(PageMemoryRegion*, RegionTree**); |
| + private: |
| + PageMemoryRegion* m_region; |
| + RegionTree* m_left; |
| + RegionTree* m_right; |
| + }; |
| + |
| // See setGCState() for possible state transitions. |
| enum GCState { |
| NoGCScheduled, |
| @@ -139,6 +215,11 @@ public: |
| SweepingAndPreciseGCScheduled, |
| }; |
| + enum PerThreadHeapState { |
| + PerThreadHeapEnabled, |
| + PerThreadHeapDisabled |
| + }; |
| + |
| // The NoAllocationScope class is used in debug mode to catch unwanted |
| // allocations. E.g. allocations during GC. |
| class NoAllocationScope final { |
| @@ -171,8 +252,7 @@ public: |
| ThreadState* m_state; |
| }; |
| - // The set of ThreadStates for all threads attached to the Blink |
| - // garbage collector. |
| + // The set of ThreadStates that are bound to the main thread's GC. |
| using AttachedThreadStateSet = HashSet<ThreadState*>; |
| static AttachedThreadStateSet& attachedThreads(); |
| static RecursiveMutex& threadAttachMutex(); |
| @@ -198,7 +278,7 @@ public: |
| // Associate ThreadState object with the current thread. After this |
| // call thread can start using the garbage collected heap infrastructure. |
| // It also has to periodically check for safepoints. |
| - static void attach(); |
| + static void attach(PerThreadHeapState); |
| // Disassociate attached ThreadState from the current thread. The thread |
| // can no longer use the garbage collected heap after this call. |
| @@ -229,6 +309,80 @@ public: |
| { |
| return reinterpret_cast<ThreadState*>(s_mainThreadStateStorage); |
| } |
| + static ThreadState* forObject(const void*); |
| + |
| + GCHeapStats* heapStats() const { return m_heapStats; } |
| + |
| + CallbackStack* markingStack() const { return m_markingStack.get(); } |
|
haraken
2016/01/07 08:06:22
Can we move these methods before landing this CL?
|
| + CallbackStack* postMarkingCallbackStack() const { return m_postMarkingCallbackStack.get(); } |
| + CallbackStack* globalWeakCallbackStack() const { return m_globalWeakCallbackStack.get(); } |
| + CallbackStack* ephemeronStack() const { return m_ephemeronStack.get(); } |
| + |
| + static void increaseTotalAllocatedObjectSize(size_t delta) { atomicAdd(&s_totalAllocatedObjectSize, static_cast<long>(delta)); } |
| + static void decreaseTotalAllocatedObjectSize(size_t delta) { atomicSubtract(&s_totalAllocatedObjectSize, static_cast<long>(delta)); } |
| + static size_t totalAllocatedObjectSize() { return acquireLoad(&s_totalAllocatedObjectSize); } |
| + static void increaseTotalMarkedObjectSize(size_t delta) { atomicAdd(&s_totalMarkedObjectSize, static_cast<long>(delta)); } |
| + static size_t totalMarkedObjectSize() { return acquireLoad(&s_totalMarkedObjectSize); } |
| + static void increaseTotalAllocatedSpace(size_t delta) { atomicAdd(&s_totalMarkedObjectSize, static_cast<long>(delta)); } |
| + static void decreaseTotalAllocatedSpace(size_t delta) { atomicSubtract(&s_totalAllocatedSpace, static_cast<long>(delta)); } |
| + static size_t totalAllocatedSpace() { return acquireLoad(&s_totalAllocatedSpace); } |
| + |
| + size_t markedObjectSizeAtLastCompleteSweep() { return m_heapStats->markedObjectSizeAtLastCompleteSweep(); } |
| + void increaseAllocatedObjectSize(size_t delta) |
| + { |
| + m_heapStats->increaseAllocatedObjectSize(delta); |
| + increaseTotalAllocatedObjectSize(delta); |
| + } |
| + void decreaseAllocatedObjectSize(size_t delta) |
| + { |
| + m_heapStats->decreaseAllocatedObjectSize(delta); |
| + decreaseTotalAllocatedObjectSize(delta); |
| + } |
| + size_t allocatedObjectSize() { return m_heapStats->allocatedObjectSize(); } |
| + void increaseMarkedObjectSize(size_t delta) |
| + { |
| + m_heapStats->increaseMarkedObjectSize(delta); |
| + increaseTotalMarkedObjectSize(delta); |
| + } |
| + size_t markedObjectSize() { return m_heapStats->markedObjectSize(); } |
| + void increaseAllocatedSpace(size_t delta) |
| + { |
| + m_heapStats->increaseAllocatedSpace(delta); |
| + increaseTotalAllocatedSpace(delta); |
| + } |
| + void decreaseAllocatedSpace(size_t delta) |
| + { |
| + m_heapStats->decreaseAllocatedSpace(delta); |
| + decreaseTotalAllocatedSpace(delta); |
| + } |
| + size_t allocatedSpace() { return m_heapStats->allocatedSpace(); } |
| + size_t objectSizeAtLastGC() { return m_heapStats->objectSizeAtLastGC(); } |
| + void increaseWrapperCount(size_t delta) { m_heapStats->increaseWrapperCount(delta); } |
| + void decreaseWrapperCount(size_t delta) { m_heapStats->decreaseWrapperCount(delta); } |
| + size_t wrapperCount() { return m_heapStats->wrapperCount(); } |
| + size_t wrapperCountAtLastGC() { return m_heapStats->wrapperCountAtLastGC(); } |
| + void increaseCollectedWrapperCount(size_t delta) { m_heapStats->increaseCollectedWrapperCount(delta); } |
| + size_t collectedWrapperCount() { return m_heapStats->collectedWrapperCount(); } |
| + size_t partitionAllocSizeAtLastGC() { return m_heapStats->partitionAllocSizeAtLastGC(); } |
| + void setEstimatedMarkingTimePerByte(double estimatedMarkingTimePerByte) { m_heapStats->setEstimatedMarkingTimePerByte(estimatedMarkingTimePerByte); } |
| + |
| +#if ENABLE(ASSERT) |
| + void incrementGcGeneration() { m_heapStats->incrementGcGeneration(); } |
| + uint16_t gcGeneration() { return m_heapStats->gcGeneration(); } |
| +#endif |
| + |
| + HeapDoesNotContainCache* heapDoesNotContainCache() const { return m_heapDoesNotContainCache.get(); } |
| + RegionTree* regionTree() const { return m_regionTree; } |
| + void setRegionTree(RegionTree* tree) { m_regionTree = tree; } |
| + |
| + double estimatedMarkingTime(); |
| + void reportMemoryUsageHistogram(); |
| + void reportMemoryUsageForTracing(); |
| + |
| + void flushHeapDoesNotContainCache(); |
| + |
| + // Reset counters that track live and allocated-since-last-GC sizes. |
| + void resetHeapCounters(); |
| bool isMainThread() const { return this == mainThreadState(); } |
| #if ENABLE(ASSERT) |
| @@ -367,6 +521,7 @@ public: |
| // A region of PersistentNodes allocated on the given thread. |
| PersistentRegion* persistentRegion() const { return m_persistentRegion.get(); } |
| + XThreadPersistentRegion* xThreadPersistentRegion() const { return m_xThreadPersistentRegion.get(); } |
| // A region of PersistentNodes not owned by any particular thread. |
| static CrossThreadPersistentRegion& crossThreadPersistentRegion(); |
|
haraken
2016/01/07 08:06:22
Not related to your CL, this should be moved to He
|
| @@ -509,6 +664,9 @@ public: |
| size_t threadStackSize(); |
| #endif |
| + bool perThreadHeapEnabled() { return m_perThreadHeapEnabled; } |
| + void removeFromRegionTree(PageMemoryRegion*); |
| + |
| #if defined(LEAK_SANITIZER) |
| void registerStaticPersistentNode(PersistentNode*); |
| void releaseStaticPersistentNodes(); |
| @@ -523,7 +681,7 @@ private: |
| FreelistSnapshot |
| }; |
| - ThreadState(); |
| + ThreadState(PerThreadHeapState); |
|
haraken
2016/01/07 08:06:22
Add explicit.
|
| ~ThreadState(); |
| NO_SANITIZE_ADDRESS void copyStackUntilSafePointScope(); |
| @@ -618,6 +776,7 @@ private: |
| ThreadIdentifier m_thread; |
| OwnPtr<PersistentRegion> m_persistentRegion; |
| + OwnPtr<XThreadPersistentRegion> m_xThreadPersistentRegion; |
| BlinkGC::StackState m_stackState; |
| #if OS(WIN) && COMPILER(MSVC) |
| size_t m_threadStackSize; |
| @@ -675,6 +834,22 @@ private: |
| static const int likelyToBePromptlyFreedArraySize = (1 << 8); |
| static const int likelyToBePromptlyFreedArrayMask = likelyToBePromptlyFreedArraySize - 1; |
| OwnPtr<int[]> m_likelyToBePromptlyFreed; |
| + |
| + // Per thread heap enabled ThreadStates will have their own GCHeapStats. |
| + // Others will use the main thread's. |
| + GCHeapStats* m_heapStats; |
|
haraken
2016/01/07 08:06:22
How about using an OwnPtr?
|
| + OwnPtr<CallbackStack> m_markingStack; |
| + OwnPtr<CallbackStack> m_postMarkingCallbackStack; |
| + OwnPtr<CallbackStack> m_globalWeakCallbackStack; |
| + OwnPtr<CallbackStack> m_ephemeronStack; |
| + OwnPtr<HeapDoesNotContainCache> m_heapDoesNotContainCache; |
| + RegionTree* m_regionTree; |
|
haraken
2016/01/07 08:06:22
How about using an OwnPtr?
|
| + bool m_perThreadHeapEnabled; |
| + |
| + // Stats for the entire Oilpan heap. |
| + static size_t s_totalAllocatedSpace; |
| + static size_t s_totalAllocatedObjectSize; |
| + static size_t s_totalMarkedObjectSize; |
|
haraken
2016/01/07 08:06:22
Given that these variables keep track of the stats
|
| }; |
| template<ThreadAffinity affinity> class ThreadStateFor; |