OLD | NEW |
---|---|
1 /* | 1 /* |
2 * Copyright (C) 2013 Google Inc. All rights reserved. | 2 * Copyright (C) 2013 Google Inc. All rights reserved. |
3 * | 3 * |
4 * Redistribution and use in source and binary forms, with or without | 4 * Redistribution and use in source and binary forms, with or without |
5 * modification, are permitted provided that the following conditions are | 5 * modification, are permitted provided that the following conditions are |
6 * met: | 6 * met: |
7 * | 7 * |
8 * * Redistributions of source code must retain the above copyright | 8 * * Redistributions of source code must retain the above copyright |
9 * notice, this list of conditions and the following disclaimer. | 9 * notice, this list of conditions and the following disclaimer. |
10 * * Redistributions in binary form must reproduce the above | 10 * * Redistributions in binary form must reproduce the above |
(...skipping 58 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
69 #include <pthread_np.h> | 69 #include <pthread_np.h> |
70 #endif | 70 #endif |
71 | 71 |
72 namespace blink { | 72 namespace blink { |
73 | 73 |
74 WTF::ThreadSpecific<ThreadState*>* ThreadState::s_threadSpecific = nullptr; | 74 WTF::ThreadSpecific<ThreadState*>* ThreadState::s_threadSpecific = nullptr; |
75 uintptr_t ThreadState::s_mainThreadStackStart = 0; | 75 uintptr_t ThreadState::s_mainThreadStackStart = 0; |
76 uintptr_t ThreadState::s_mainThreadUnderestimatedStackSize = 0; | 76 uintptr_t ThreadState::s_mainThreadUnderestimatedStackSize = 0; |
77 uint8_t ThreadState::s_mainThreadStateStorage[sizeof(ThreadState)]; | 77 uint8_t ThreadState::s_mainThreadStateStorage[sizeof(ThreadState)]; |
78 SafePointBarrier* ThreadState::s_safePointBarrier = nullptr; | 78 SafePointBarrier* ThreadState::s_safePointBarrier = nullptr; |
79 #if ENABLE(ASSERT) | |
80 int ThreadState::s_selfKeepAliveAllocationsOnMainThread = 0; | |
81 #endif | |
79 | 82 |
80 RecursiveMutex& ThreadState::threadAttachMutex() | 83 RecursiveMutex& ThreadState::threadAttachMutex() |
81 { | 84 { |
82 AtomicallyInitializedStaticReference(RecursiveMutex, mutex, (new RecursiveMu tex)); | 85 AtomicallyInitializedStaticReference(RecursiveMutex, mutex, (new RecursiveMu tex)); |
83 return mutex; | 86 return mutex; |
84 } | 87 } |
85 | 88 |
86 ThreadState::ThreadState() | 89 ThreadState::ThreadState() |
87 : m_thread(currentThread()) | 90 : m_thread(currentThread()) |
88 , m_persistentRegion(adoptPtr(new PersistentRegion())) | 91 , m_persistentRegion(adoptPtr(new PersistentRegion())) |
(...skipping 70 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
159 | 162 |
160 // Thread-local storage shouldn't be disposed, so we don't call ~ThreadSpeci fic(). | 163 // Thread-local storage shouldn't be disposed, so we don't call ~ThreadSpeci fic(). |
161 } | 164 } |
162 | 165 |
163 void ThreadState::attachMainThread() | 166 void ThreadState::attachMainThread() |
164 { | 167 { |
165 RELEASE_ASSERT(!Heap::s_shutdownCalled); | 168 RELEASE_ASSERT(!Heap::s_shutdownCalled); |
166 MutexLocker locker(threadAttachMutex()); | 169 MutexLocker locker(threadAttachMutex()); |
167 ThreadState* state = new(s_mainThreadStateStorage) ThreadState(); | 170 ThreadState* state = new(s_mainThreadStateStorage) ThreadState(); |
168 attachedThreads().add(state); | 171 attachedThreads().add(state); |
172 #if ENABLE(ASSERT) | |
173 s_selfKeepAliveAllocationsOnMainThread = 0; | |
174 #endif | |
169 } | 175 } |
170 | 176 |
171 void ThreadState::detachMainThread() | 177 void ThreadState::detachMainThread() |
172 { | 178 { |
173 // Enter a safe point before trying to acquire threadAttachMutex | 179 // Enter a safe point before trying to acquire threadAttachMutex |
174 // to avoid dead lock if another thread is preparing for GC, has acquired | 180 // to avoid dead lock if another thread is preparing for GC, has acquired |
175 // threadAttachMutex and waiting for other threads to pause or reach a | 181 // threadAttachMutex and waiting for other threads to pause or reach a |
176 // safepoint. | 182 // safepoint. |
177 ThreadState* state = mainThreadState(); | 183 ThreadState* state = mainThreadState(); |
178 | 184 |
179 // 1. Finish sweeping. | 185 // 1. Finish sweeping. |
180 state->completeSweep(); | 186 state->completeSweep(); |
181 { | 187 { |
182 SafePointAwareMutexLocker locker(threadAttachMutex(), NoHeapPointersOnSt ack); | 188 SafePointAwareMutexLocker locker(threadAttachMutex(), NoHeapPointersOnSt ack); |
183 | 189 |
184 // 2. Add the main thread's heap pages to the orphaned pool. | 190 // 2. Add the main thread's heap pages to the orphaned pool. |
185 state->cleanupPages(); | 191 state->cleanupPages(); |
186 | 192 |
187 // 3. Detach the main thread. | 193 // 3. Detach the main thread. |
188 ASSERT(attachedThreads().contains(state)); | 194 ASSERT(attachedThreads().contains(state)); |
189 attachedThreads().remove(state); | 195 attachedThreads().remove(state); |
190 state->~ThreadState(); | 196 state->~ThreadState(); |
191 } | 197 } |
198 // Catch out any self-referential leaks created by the main thread. | |
199 ASSERT(s_selfKeepAliveAllocationsOnMainThread == 0); | |
192 shutdownHeapIfNecessary(); | 200 shutdownHeapIfNecessary(); |
193 } | 201 } |
194 | 202 |
195 void ThreadState::shutdownHeapIfNecessary() | 203 void ThreadState::shutdownHeapIfNecessary() |
196 { | 204 { |
197 // We don't need to enter a safe point before acquiring threadAttachMutex | 205 // We don't need to enter a safe point before acquiring threadAttachMutex |
198 // because this thread is already detached. | 206 // because this thread is already detached. |
199 | 207 |
200 MutexLocker locker(threadAttachMutex()); | 208 MutexLocker locker(threadAttachMutex()); |
201 // We start shutting down the heap if there is no running thread | 209 // We start shutting down the heap if there is no running thread |
(...skipping 1328 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
1530 for (ClassAgeCountsMap::const_iterator it = classAgeCounts.begin(), end = cl assAgeCounts.end(); it != end; ++it) { | 1538 for (ClassAgeCountsMap::const_iterator it = classAgeCounts.begin(), end = cl assAgeCounts.end(); it != end; ++it) { |
1531 json->beginArray(it->key.ascii().data()); | 1539 json->beginArray(it->key.ascii().data()); |
1532 for (size_t age = 0; age <= maxHeapObjectAge; ++age) | 1540 for (size_t age = 0; age <= maxHeapObjectAge; ++age) |
1533 json->pushInteger(it->value.ages[age]); | 1541 json->pushInteger(it->value.ages[age]); |
1534 json->endArray(); | 1542 json->endArray(); |
1535 } | 1543 } |
1536 TRACE_EVENT_OBJECT_SNAPSHOT_WITH_ID(TRACE_DISABLED_BY_DEFAULT("blink_gc"), s tatsName, this, json.release()); | 1544 TRACE_EVENT_OBJECT_SNAPSHOT_WITH_ID(TRACE_DISABLED_BY_DEFAULT("blink_gc"), s tatsName, this, json.release()); |
1537 } | 1545 } |
1538 #endif | 1546 #endif |
1539 | 1547 |
1548 #if ENABLE(ASSERT) | |
1549 void ThreadState::incrementSelfKeepAliveAllocations() | |
1550 { | |
1551 if (!ThreadState::current()->isMainThread()) | |
haraken
2015/08/06 10:30:59
Is there any reason we want to limit the check onl
sof
2015/08/06 10:40:29
The reason is that there's protection for thread s
haraken
2015/08/06 10:42:08
You could just add ThreadState::m_selfKeepAliveCou
sof
2015/08/06 10:59:01
You certainly could, but I wonder if we should try
haraken
2015/08/06 11:01:19
I don't have a strong opinion. I don't think it's
sof
2015/08/06 12:21:15
I've added a debugger-friendly entry point to Pers
| |
1552 return; | |
1553 | |
1554 s_selfKeepAliveAllocationsOnMainThread++; | |
1555 } | |
1556 | |
1557 void ThreadState::decrementSelfKeepAliveAllocations() | |
1558 { | |
1559 if (!ThreadState::current()->isMainThread()) | |
1560 return; | |
1561 | |
1562 ASSERT(s_selfKeepAliveAllocationsOnMainThread > 0); | |
1563 s_selfKeepAliveAllocationsOnMainThread--; | |
1564 } | |
1565 #endif | |
1566 | |
1540 } // namespace blink | 1567 } // namespace blink |
OLD | NEW |