Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(471)

Side by Side Diff: Source/platform/heap/ThreadState.cpp

Issue 1272573008: Revert of Oilpan: catch some self-referential leaks (main thread.) (Closed) Base URL: svn://svn.chromium.org/blink/trunk
Patch Set: Created 5 years, 4 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
« no previous file with comments | « Source/platform/heap/ThreadState.h ('k') | no next file » | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 /* 1 /*
2 * Copyright (C) 2013 Google Inc. All rights reserved. 2 * Copyright (C) 2013 Google Inc. All rights reserved.
3 * 3 *
4 * Redistribution and use in source and binary forms, with or without 4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions are 5 * modification, are permitted provided that the following conditions are
6 * met: 6 * met:
7 * 7 *
8 * * Redistributions of source code must retain the above copyright 8 * * Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer. 9 * notice, this list of conditions and the following disclaimer.
10 * * Redistributions in binary form must reproduce the above 10 * * Redistributions in binary form must reproduce the above
(...skipping 58 matching lines...) Expand 10 before | Expand all | Expand 10 after
69 #include <pthread_np.h> 69 #include <pthread_np.h>
70 #endif 70 #endif
71 71
72 namespace blink { 72 namespace blink {
73 73
74 WTF::ThreadSpecific<ThreadState*>* ThreadState::s_threadSpecific = nullptr; 74 WTF::ThreadSpecific<ThreadState*>* ThreadState::s_threadSpecific = nullptr;
75 uintptr_t ThreadState::s_mainThreadStackStart = 0; 75 uintptr_t ThreadState::s_mainThreadStackStart = 0;
76 uintptr_t ThreadState::s_mainThreadUnderestimatedStackSize = 0; 76 uintptr_t ThreadState::s_mainThreadUnderestimatedStackSize = 0;
77 uint8_t ThreadState::s_mainThreadStateStorage[sizeof(ThreadState)]; 77 uint8_t ThreadState::s_mainThreadStateStorage[sizeof(ThreadState)];
78 SafePointBarrier* ThreadState::s_safePointBarrier = nullptr; 78 SafePointBarrier* ThreadState::s_safePointBarrier = nullptr;
79 #if ENABLE(ASSERT)
80 int ThreadState::s_selfKeepAliveAllocationsOnMainThread = 0;
81 #endif
82 79
83 RecursiveMutex& ThreadState::threadAttachMutex() 80 RecursiveMutex& ThreadState::threadAttachMutex()
84 { 81 {
85 AtomicallyInitializedStaticReference(RecursiveMutex, mutex, (new RecursiveMu tex)); 82 AtomicallyInitializedStaticReference(RecursiveMutex, mutex, (new RecursiveMu tex));
86 return mutex; 83 return mutex;
87 } 84 }
88 85
89 ThreadState::ThreadState() 86 ThreadState::ThreadState()
90 : m_thread(currentThread()) 87 : m_thread(currentThread())
91 , m_persistentRegion(adoptPtr(new PersistentRegion())) 88 , m_persistentRegion(adoptPtr(new PersistentRegion()))
(...skipping 70 matching lines...) Expand 10 before | Expand all | Expand 10 after
162 159
163 // Thread-local storage shouldn't be disposed, so we don't call ~ThreadSpeci fic(). 160 // Thread-local storage shouldn't be disposed, so we don't call ~ThreadSpeci fic().
164 } 161 }
165 162
166 void ThreadState::attachMainThread() 163 void ThreadState::attachMainThread()
167 { 164 {
168 RELEASE_ASSERT(!Heap::s_shutdownCalled); 165 RELEASE_ASSERT(!Heap::s_shutdownCalled);
169 MutexLocker locker(threadAttachMutex()); 166 MutexLocker locker(threadAttachMutex());
170 ThreadState* state = new(s_mainThreadStateStorage) ThreadState(); 167 ThreadState* state = new(s_mainThreadStateStorage) ThreadState();
171 attachedThreads().add(state); 168 attachedThreads().add(state);
172 #if ENABLE(ASSERT)
173 s_selfKeepAliveAllocationsOnMainThread = 0;
174 #endif
175 } 169 }
176 170
177 void ThreadState::detachMainThread() 171 void ThreadState::detachMainThread()
178 { 172 {
179 // Enter a safe point before trying to acquire threadAttachMutex 173 // Enter a safe point before trying to acquire threadAttachMutex
180 // to avoid dead lock if another thread is preparing for GC, has acquired 174 // to avoid dead lock if another thread is preparing for GC, has acquired
181 // threadAttachMutex and waiting for other threads to pause or reach a 175 // threadAttachMutex and waiting for other threads to pause or reach a
182 // safepoint. 176 // safepoint.
183 ThreadState* state = mainThreadState(); 177 ThreadState* state = mainThreadState();
184 178
185 // 1. Finish sweeping. 179 // 1. Finish sweeping.
186 state->completeSweep(); 180 state->completeSweep();
187 { 181 {
188 SafePointAwareMutexLocker locker(threadAttachMutex(), NoHeapPointersOnSt ack); 182 SafePointAwareMutexLocker locker(threadAttachMutex(), NoHeapPointersOnSt ack);
189 183
190 // 2. Add the main thread's heap pages to the orphaned pool. 184 // 2. Add the main thread's heap pages to the orphaned pool.
191 state->cleanupPages(); 185 state->cleanupPages();
192 186
193 // 3. Detach the main thread. 187 // 3. Detach the main thread.
194 ASSERT(attachedThreads().contains(state)); 188 ASSERT(attachedThreads().contains(state));
195 attachedThreads().remove(state); 189 attachedThreads().remove(state);
196 state->~ThreadState(); 190 state->~ThreadState();
197 } 191 }
198 // Catch out any self-referential leaks created by the main thread.
199 ASSERT(s_selfKeepAliveAllocationsOnMainThread == 0);
200 shutdownHeapIfNecessary(); 192 shutdownHeapIfNecessary();
201 } 193 }
202 194
203 void ThreadState::shutdownHeapIfNecessary() 195 void ThreadState::shutdownHeapIfNecessary()
204 { 196 {
205 // We don't need to enter a safe point before acquiring threadAttachMutex 197 // We don't need to enter a safe point before acquiring threadAttachMutex
206 // because this thread is already detached. 198 // because this thread is already detached.
207 199
208 MutexLocker locker(threadAttachMutex()); 200 MutexLocker locker(threadAttachMutex());
209 // We start shutting down the heap if there is no running thread 201 // We start shutting down the heap if there is no running thread
(...skipping 48 matching lines...) Expand 10 before | Expand all | Expand 10 after
258 int oldCount = -1; 250 int oldCount = -1;
259 int currentCount = persistentRegion()->numberOfPersistents(); 251 int currentCount = persistentRegion()->numberOfPersistents();
260 ASSERT(currentCount >= 0); 252 ASSERT(currentCount >= 0);
261 while (currentCount != oldCount) { 253 while (currentCount != oldCount) {
262 Heap::collectGarbageForTerminatingThread(this); 254 Heap::collectGarbageForTerminatingThread(this);
263 oldCount = currentCount; 255 oldCount = currentCount;
264 currentCount = persistentRegion()->numberOfPersistents(); 256 currentCount = persistentRegion()->numberOfPersistents();
265 } 257 }
266 // We should not have any persistents left when getting to this point, 258 // We should not have any persistents left when getting to this point,
267 // if we have it is probably a bug so adding a debug ASSERT to catch thi s. 259 // if we have it is probably a bug so adding a debug ASSERT to catch thi s.
268 // (debug tip: use persistentRegion()->dumpLivePersistents() to get a li st of 260 ASSERT(!currentCount);
269 // the remaining live Persistent<>s. In gdb, performing "info symbol" on the
270 // trace callback addresses printed should tell you what Persistent<T>s are leaking.)
271 ASSERT(!currentCount && "Persistent<>s leak on thread heap shutdown");
272 // All of pre-finalizers should be consumed. 261 // All of pre-finalizers should be consumed.
273 ASSERT(m_orderedPreFinalizers.isEmpty()); 262 ASSERT(m_orderedPreFinalizers.isEmpty());
274 RELEASE_ASSERT(gcState() == NoGCScheduled); 263 RELEASE_ASSERT(gcState() == NoGCScheduled);
275 264
276 // Add pages to the orphaned page pool to ensure any global GCs from thi s point 265 // Add pages to the orphaned page pool to ensure any global GCs from thi s point
277 // on will not trace objects on this thread's heaps. 266 // on will not trace objects on this thread's heaps.
278 cleanupPages(); 267 cleanupPages();
279 268
280 ASSERT(attachedThreads().contains(this)); 269 ASSERT(attachedThreads().contains(this));
281 attachedThreads().remove(this); 270 attachedThreads().remove(this);
(...skipping 1264 matching lines...) Expand 10 before | Expand all | Expand 10 after
1546 for (ClassAgeCountsMap::const_iterator it = classAgeCounts.begin(), end = cl assAgeCounts.end(); it != end; ++it) { 1535 for (ClassAgeCountsMap::const_iterator it = classAgeCounts.begin(), end = cl assAgeCounts.end(); it != end; ++it) {
1547 json->beginArray(it->key.ascii().data()); 1536 json->beginArray(it->key.ascii().data());
1548 for (size_t age = 0; age <= maxHeapObjectAge; ++age) 1537 for (size_t age = 0; age <= maxHeapObjectAge; ++age)
1549 json->pushInteger(it->value.ages[age]); 1538 json->pushInteger(it->value.ages[age]);
1550 json->endArray(); 1539 json->endArray();
1551 } 1540 }
1552 TRACE_EVENT_OBJECT_SNAPSHOT_WITH_ID(TRACE_DISABLED_BY_DEFAULT("blink_gc"), s tatsName, this, json.release()); 1541 TRACE_EVENT_OBJECT_SNAPSHOT_WITH_ID(TRACE_DISABLED_BY_DEFAULT("blink_gc"), s tatsName, this, json.release());
1553 } 1542 }
1554 #endif 1543 #endif
1555 1544
1556 #if ENABLE(ASSERT)
1557 void ThreadState::incrementSelfKeepAliveAllocations()
1558 {
1559 if (!ThreadState::current()->isMainThread())
1560 return;
1561
1562 s_selfKeepAliveAllocationsOnMainThread++;
1563 }
1564
1565 void ThreadState::decrementSelfKeepAliveAllocations()
1566 {
1567 if (!ThreadState::current()->isMainThread())
1568 return;
1569
1570 ASSERT(s_selfKeepAliveAllocationsOnMainThread > 0);
1571 s_selfKeepAliveAllocationsOnMainThread--;
1572 }
1573 #endif
1574
1575 } // namespace blink 1545 } // namespace blink
OLDNEW
« no previous file with comments | « Source/platform/heap/ThreadState.h ('k') | no next file » | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698