| Index: Source/platform/heap/ThreadState.cpp
|
| diff --git a/Source/platform/heap/ThreadState.cpp b/Source/platform/heap/ThreadState.cpp
|
| index 079ac1912d0ce202fbbc8a761bbc15343b422211..cab0bca3a86477bd54502970a5cf3a93285fe9a2 100644
|
| --- a/Source/platform/heap/ThreadState.cpp
|
| +++ b/Source/platform/heap/ThreadState.cpp
|
| @@ -83,8 +83,9 @@ RecursiveMutex& ThreadState::threadAttachMutex()
|
| return mutex;
|
| }
|
|
|
| -ThreadState::ThreadState()
|
| - : m_thread(currentThread())
|
| +ThreadState::ThreadState(bool shouldAttach)
|
| + : m_attached(shouldAttach)
|
| + , m_thread(currentThread())
|
| , m_persistentRegion(adoptPtr(new PersistentRegion()))
|
| , m_startOfStack(reinterpret_cast<intptr_t*>(StackFrameDepth::getStackStart()))
|
| , m_endOfStack(reinterpret_cast<intptr_t*>(StackFrameDepth::getStackStart()))
|
| @@ -97,6 +98,7 @@ ThreadState::ThreadState()
|
| , m_persistentAllocated(0)
|
| , m_persistentFreed(0)
|
| , m_vectorBackingHeapIndex(Vector1HeapIndex)
|
| + , m_hashTableBackingHeapIndex(HashTable1HeapIndex)
|
| , m_currentHeapAges(0)
|
| , m_isTerminating(false)
|
| , m_gcMixinMarker(nullptr)
|
| @@ -110,15 +112,17 @@ ThreadState::ThreadState()
|
| , m_nextFreeListSnapshotTime(-std::numeric_limits<double>::infinity())
|
| #endif
|
| {
|
| - ASSERT(checkThread());
|
| - ASSERT(!**s_threadSpecific);
|
| - **s_threadSpecific = this;
|
| -
|
| - if (isMainThread()) {
|
| - s_mainThreadStackStart = reinterpret_cast<uintptr_t>(m_startOfStack) - sizeof(void*);
|
| - size_t underestimatedStackSize = StackFrameDepth::getUnderestimatedStackSize();
|
| - if (underestimatedStackSize > sizeof(void*))
|
| - s_mainThreadUnderestimatedStackSize = underestimatedStackSize - sizeof(void*);
|
| + if (attached()) {
|
| + ASSERT(checkThread());
|
| + ASSERT(!**s_threadSpecific);
|
| + **s_threadSpecific = this;
|
| +
|
| + if (isMainThread()) {
|
| + s_mainThreadStackStart = reinterpret_cast<uintptr_t>(m_startOfStack) - sizeof(void*);
|
| + size_t underestimatedStackSize = StackFrameDepth::getUnderestimatedStackSize();
|
| + if (underestimatedStackSize > sizeof(void*))
|
| + s_mainThreadUnderestimatedStackSize = underestimatedStackSize - sizeof(void*);
|
| + }
|
| }
|
|
|
| for (int heapIndex = 0; heapIndex < LargeObjectHeapIndex; heapIndex++)
|
| @@ -133,16 +137,17 @@ ThreadState::ThreadState()
|
|
|
| ThreadState::~ThreadState()
|
| {
|
| - ASSERT(checkThread());
|
| delete m_threadLocalWeakCallbackStack;
|
| m_threadLocalWeakCallbackStack = nullptr;
|
| for (int i = 0; i < NumberOfHeaps; ++i)
|
| delete m_heaps[i];
|
|
|
| - **s_threadSpecific = nullptr;
|
| - if (isMainThread()) {
|
| - s_mainThreadStackStart = 0;
|
| - s_mainThreadUnderestimatedStackSize = 0;
|
| + if (attached()) {
|
| + **s_threadSpecific = nullptr;
|
| + if (isMainThread()) {
|
| + s_mainThreadStackStart = 0;
|
| + s_mainThreadUnderestimatedStackSize = 0;
|
| + }
|
| }
|
| }
|
|
|
| @@ -214,7 +219,6 @@ void ThreadState::attach()
|
|
|
| void ThreadState::cleanupPages()
|
| {
|
| - ASSERT(checkThread());
|
| for (int i = 0; i < NumberOfHeaps; ++i)
|
| m_heaps[i]->cleanupPages();
|
| }
|
| @@ -420,7 +424,11 @@ void ThreadState::snapshot()
|
| SNAPSHOT_HEAP(Vector3);
|
| SNAPSHOT_HEAP(Vector4);
|
| SNAPSHOT_HEAP(InlineVector);
|
| - SNAPSHOT_HEAP(HashTable);
|
| + SNAPSHOT_HEAP(HashTable1);
|
| + SNAPSHOT_HEAP(HashTable2);
|
| + SNAPSHOT_HEAP(HashTable3);
|
| + SNAPSHOT_HEAP(HashTable4);
|
| + SNAPSHOT_HEAP(BufferString);
|
| SNAPSHOT_HEAP(LargeObject);
|
| FOR_EACH_TYPED_HEAP(SNAPSHOT_HEAP);
|
| json->endArray();
|
| @@ -941,6 +949,7 @@ void ThreadState::makeConsistentForMutator()
|
|
|
| void ThreadState::preGC()
|
| {
|
| + ASSERT(attached());
|
| ASSERT(!isInGC());
|
| setGCState(GCRunning);
|
| makeConsistentForGC();
|
| @@ -951,6 +960,7 @@ void ThreadState::preGC()
|
|
|
| void ThreadState::postGC(GCType gcType)
|
| {
|
| + ASSERT(attached());
|
| ASSERT(isInGC());
|
|
|
| #if ENABLE(GC_PROFILING)
|
| @@ -1358,7 +1368,7 @@ void ThreadState::clearHeapAges()
|
| m_currentHeapAges = 0;
|
| }
|
|
|
| -int ThreadState::heapIndexOfVectorHeapLeastRecentlyExpanded(int beginHeapIndex, int endHeapIndex)
|
| +int ThreadState::heapIndexOfLeastRecentlyExpanded(int beginHeapIndex, int endHeapIndex)
|
| {
|
| size_t minHeapAge = m_heapAges[beginHeapIndex];
|
| int heapIndexWithMinHeapAge = beginHeapIndex;
|
| @@ -1368,18 +1378,40 @@ int ThreadState::heapIndexOfVectorHeapLeastRecentlyExpanded(int beginHeapIndex,
|
| heapIndexWithMinHeapAge = heapIndex;
|
| }
|
| }
|
| - ASSERT(isVectorHeapIndex(heapIndexWithMinHeapAge));
|
| return heapIndexWithMinHeapAge;
|
| }
|
|
|
| BaseHeap* ThreadState::expandedVectorBackingHeap(size_t gcInfoIndex)
|
| {
|
| - ASSERT(checkThread());
|
| size_t entryIndex = gcInfoIndex & likelyToBePromptlyFreedArrayMask;
|
| --m_likelyToBePromptlyFreed[entryIndex];
|
| int heapIndex = m_vectorBackingHeapIndex;
|
| m_heapAges[heapIndex] = ++m_currentHeapAges;
|
| - m_vectorBackingHeapIndex = heapIndexOfVectorHeapLeastRecentlyExpanded(Vector1HeapIndex, Vector4HeapIndex);
|
| + m_vectorBackingHeapIndex = heapIndexOfLeastRecentlyExpanded(Vector1HeapIndex, Vector4HeapIndex);
|
| + ASSERT(isVectorHeapIndex(m_vectorBackingHeapIndex));
|
| +#if BUFFER_ALLOCATOR_DEBUG
|
| + if (!attached() && heapIndex != m_vectorBackingHeapIndex)
|
| + fprintf(stderr, "heap index updated: %d => %d\n", heapIndex, m_vectorBackingHeapIndex);
|
| + if (!attached())
|
| + fprintf(stderr, "expandedVectorBackingHeap: heapIndex=%d, likely=%d\n", heapIndex, m_likelyToBePromptlyFreed[entryIndex]);
|
| +#endif
|
| + return m_heaps[heapIndex];
|
| +}
|
| +
|
| +BaseHeap* ThreadState::expandedHashTableBackingHeap(size_t gcInfoIndex)
|
| +{
|
| + size_t entryIndex = gcInfoIndex & likelyToBePromptlyFreedArrayMask;
|
| + --m_likelyToBePromptlyFreed[entryIndex];
|
| + int heapIndex = m_hashTableBackingHeapIndex;
|
| + m_heapAges[heapIndex] = ++m_currentHeapAges;
|
| + m_hashTableBackingHeapIndex = heapIndexOfLeastRecentlyExpanded(HashTable1HeapIndex, HashTable4HeapIndex);
|
| + ASSERT(isHashTableHeapIndex(m_hashTableBackingHeapIndex));
|
| +#if BUFFER_ALLOCATOR_DEBUG
|
| + if (!attached() && heapIndex != m_hashTableBackingHeapIndex)
|
| + fprintf(stderr, "heap index updated: %d => %d\n", heapIndex, m_hashTableBackingHeapIndex);
|
| + if (!attached())
|
| + fprintf(stderr, "expandedHashTableBackingHeap: heapIndex=%d, likely=%d\n", heapIndex, m_likelyToBePromptlyFreed[entryIndex]);
|
| +#endif
|
| return m_heaps[heapIndex];
|
| }
|
|
|
| @@ -1387,12 +1419,19 @@ void ThreadState::allocationPointAdjusted(int heapIndex)
|
| {
|
| m_heapAges[heapIndex] = ++m_currentHeapAges;
|
| if (m_vectorBackingHeapIndex == heapIndex)
|
| - m_vectorBackingHeapIndex = heapIndexOfVectorHeapLeastRecentlyExpanded(Vector1HeapIndex, Vector4HeapIndex);
|
| + m_vectorBackingHeapIndex = heapIndexOfLeastRecentlyExpanded(Vector1HeapIndex, Vector4HeapIndex);
|
| + else if (m_hashTableBackingHeapIndex == heapIndex)
|
| + m_hashTableBackingHeapIndex = heapIndexOfLeastRecentlyExpanded(HashTable1HeapIndex, HashTable4HeapIndex);
|
| +#if BUFFER_ALLOCATOR_DEBUG
|
| + if (!attached())
|
| + fprintf(stderr, "allocationPointAdjusted: heapIndex=%d\n", heapIndex);
|
| + if (!attached() && heapIndex != m_hashTableBackingHeapIndex)
|
| + fprintf(stderr, "heap index updated: %d => %d\n", heapIndex, m_hashTableBackingHeapIndex);
|
| +#endif
|
| }
|
|
|
| void ThreadState::promptlyFreed(size_t gcInfoIndex)
|
| {
|
| - ASSERT(checkThread());
|
| size_t entryIndex = gcInfoIndex & likelyToBePromptlyFreedArrayMask;
|
| // See the comment in vectorBackingHeap() for why this is +3.
|
| m_likelyToBePromptlyFreed[entryIndex] += 3;
|
| @@ -1430,7 +1469,11 @@ void ThreadState::takeSnapshot(SnapshotType type)
|
| SNAPSHOT_HEAP(Vector3);
|
| SNAPSHOT_HEAP(Vector4);
|
| SNAPSHOT_HEAP(InlineVector);
|
| - SNAPSHOT_HEAP(HashTable);
|
| + SNAPSHOT_HEAP(HashTable1);
|
| + SNAPSHOT_HEAP(HashTable2);
|
| + SNAPSHOT_HEAP(HashTable3);
|
| + SNAPSHOT_HEAP(HashTable4);
|
| + SNAPSHOT_HEAP(BufferString);
|
| SNAPSHOT_HEAP(LargeObject);
|
| FOR_EACH_TYPED_HEAP(SNAPSHOT_HEAP);
|
|
|
|
|