Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(236)

Side by Side Diff: Source/platform/heap/ThreadState.cpp

Issue 1275863002: Oilpan: catch some self-referential leaks (main thread.) (Closed) Base URL: svn://svn.chromium.org/blink/trunk
Patch Set: add stdio.h include Created 5 years, 4 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
« no previous file with comments | « Source/platform/heap/ThreadState.h ('k') | no next file » | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 /* 1 /*
2 * Copyright (C) 2013 Google Inc. All rights reserved. 2 * Copyright (C) 2013 Google Inc. All rights reserved.
3 * 3 *
4 * Redistribution and use in source and binary forms, with or without 4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions are 5 * modification, are permitted provided that the following conditions are
6 * met: 6 * met:
7 * 7 *
8 * * Redistributions of source code must retain the above copyright 8 * * Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer. 9 * notice, this list of conditions and the following disclaimer.
10 * * Redistributions in binary form must reproduce the above 10 * * Redistributions in binary form must reproduce the above
(...skipping 58 matching lines...) Expand 10 before | Expand all | Expand 10 after
69 #include <pthread_np.h> 69 #include <pthread_np.h>
70 #endif 70 #endif
71 71
72 namespace blink { 72 namespace blink {
73 73
74 WTF::ThreadSpecific<ThreadState*>* ThreadState::s_threadSpecific = nullptr; 74 WTF::ThreadSpecific<ThreadState*>* ThreadState::s_threadSpecific = nullptr;
75 uintptr_t ThreadState::s_mainThreadStackStart = 0; 75 uintptr_t ThreadState::s_mainThreadStackStart = 0;
76 uintptr_t ThreadState::s_mainThreadUnderestimatedStackSize = 0; 76 uintptr_t ThreadState::s_mainThreadUnderestimatedStackSize = 0;
77 uint8_t ThreadState::s_mainThreadStateStorage[sizeof(ThreadState)]; 77 uint8_t ThreadState::s_mainThreadStateStorage[sizeof(ThreadState)];
78 SafePointBarrier* ThreadState::s_safePointBarrier = nullptr; 78 SafePointBarrier* ThreadState::s_safePointBarrier = nullptr;
79 #if ENABLE(ASSERT)
80 int ThreadState::s_selfKeepAliveAllocationsOnMainThread = 0;
81 #endif
79 82
80 RecursiveMutex& ThreadState::threadAttachMutex() 83 RecursiveMutex& ThreadState::threadAttachMutex()
81 { 84 {
82 AtomicallyInitializedStaticReference(RecursiveMutex, mutex, (new RecursiveMu tex)); 85 AtomicallyInitializedStaticReference(RecursiveMutex, mutex, (new RecursiveMu tex));
83 return mutex; 86 return mutex;
84 } 87 }
85 88
86 ThreadState::ThreadState() 89 ThreadState::ThreadState()
87 : m_thread(currentThread()) 90 : m_thread(currentThread())
88 , m_persistentRegion(adoptPtr(new PersistentRegion())) 91 , m_persistentRegion(adoptPtr(new PersistentRegion()))
(...skipping 70 matching lines...) Expand 10 before | Expand all | Expand 10 after
159 162
160 // Thread-local storage shouldn't be disposed, so we don't call ~ThreadSpeci fic(). 163 // Thread-local storage shouldn't be disposed, so we don't call ~ThreadSpeci fic().
161 } 164 }
162 165
163 void ThreadState::attachMainThread() 166 void ThreadState::attachMainThread()
164 { 167 {
165 RELEASE_ASSERT(!Heap::s_shutdownCalled); 168 RELEASE_ASSERT(!Heap::s_shutdownCalled);
166 MutexLocker locker(threadAttachMutex()); 169 MutexLocker locker(threadAttachMutex());
167 ThreadState* state = new(s_mainThreadStateStorage) ThreadState(); 170 ThreadState* state = new(s_mainThreadStateStorage) ThreadState();
168 attachedThreads().add(state); 171 attachedThreads().add(state);
172 #if ENABLE(ASSERT)
173 s_selfKeepAliveAllocationsOnMainThread = 0;
174 #endif
169 } 175 }
170 176
171 void ThreadState::detachMainThread() 177 void ThreadState::detachMainThread()
172 { 178 {
173 // Enter a safe point before trying to acquire threadAttachMutex 179 // Enter a safe point before trying to acquire threadAttachMutex
174 // to avoid dead lock if another thread is preparing for GC, has acquired 180 // to avoid dead lock if another thread is preparing for GC, has acquired
175 // threadAttachMutex and waiting for other threads to pause or reach a 181 // threadAttachMutex and waiting for other threads to pause or reach a
176 // safepoint. 182 // safepoint.
177 ThreadState* state = mainThreadState(); 183 ThreadState* state = mainThreadState();
178 184
179 // 1. Finish sweeping. 185 // 1. Finish sweeping.
180 state->completeSweep(); 186 state->completeSweep();
181 { 187 {
182 SafePointAwareMutexLocker locker(threadAttachMutex(), NoHeapPointersOnSt ack); 188 SafePointAwareMutexLocker locker(threadAttachMutex(), NoHeapPointersOnSt ack);
183 189
184 // 2. Add the main thread's heap pages to the orphaned pool. 190 // 2. Add the main thread's heap pages to the orphaned pool.
185 state->cleanupPages(); 191 state->cleanupPages();
186 192
187 // 3. Detach the main thread. 193 // 3. Detach the main thread.
188 ASSERT(attachedThreads().contains(state)); 194 ASSERT(attachedThreads().contains(state));
189 attachedThreads().remove(state); 195 attachedThreads().remove(state);
190 state->~ThreadState(); 196 state->~ThreadState();
191 } 197 }
198 // Catch out any self-referential leaks created by the main thread.
199 ASSERT(s_selfKeepAliveAllocationsOnMainThread == 0);
192 shutdownHeapIfNecessary(); 200 shutdownHeapIfNecessary();
193 } 201 }
194 202
195 void ThreadState::shutdownHeapIfNecessary() 203 void ThreadState::shutdownHeapIfNecessary()
196 { 204 {
197 // We don't need to enter a safe point before acquiring threadAttachMutex 205 // We don't need to enter a safe point before acquiring threadAttachMutex
198 // because this thread is already detached. 206 // because this thread is already detached.
199 207
200 MutexLocker locker(threadAttachMutex()); 208 MutexLocker locker(threadAttachMutex());
201 // We start shutting down the heap if there is no running thread 209 // We start shutting down the heap if there is no running thread
(...skipping 48 matching lines...) Expand 10 before | Expand all | Expand 10 after
250 int oldCount = -1; 258 int oldCount = -1;
251 int currentCount = persistentRegion()->numberOfPersistents(); 259 int currentCount = persistentRegion()->numberOfPersistents();
252 ASSERT(currentCount >= 0); 260 ASSERT(currentCount >= 0);
253 while (currentCount != oldCount) { 261 while (currentCount != oldCount) {
254 Heap::collectGarbageForTerminatingThread(this); 262 Heap::collectGarbageForTerminatingThread(this);
255 oldCount = currentCount; 263 oldCount = currentCount;
256 currentCount = persistentRegion()->numberOfPersistents(); 264 currentCount = persistentRegion()->numberOfPersistents();
257 } 265 }
258 // We should not have any persistents left when getting to this point, 266 // We should not have any persistents left when getting to this point,
259 // if we have it is probably a bug so adding a debug ASSERT to catch thi s. 267 // if we have it is probably a bug so adding a debug ASSERT to catch thi s.
260 ASSERT(!currentCount); 268 // (debug tip: use persistentRegion()->dumpLivePersistents() to get a li st of
269 // the remaining live Persistent<>s. In gdb, performing "info symbol" on the
270 // trace callback addresses printed should tell you what Persistent<T>s are leaking.)
271 ASSERT(!currentCount && "Persistent<>s leak on thread heap shutdown");
261 // All of pre-finalizers should be consumed. 272 // All of pre-finalizers should be consumed.
262 ASSERT(m_orderedPreFinalizers.isEmpty()); 273 ASSERT(m_orderedPreFinalizers.isEmpty());
263 RELEASE_ASSERT(gcState() == NoGCScheduled); 274 RELEASE_ASSERT(gcState() == NoGCScheduled);
264 275
265 // Add pages to the orphaned page pool to ensure any global GCs from thi s point 276 // Add pages to the orphaned page pool to ensure any global GCs from thi s point
266 // on will not trace objects on this thread's heaps. 277 // on will not trace objects on this thread's heaps.
267 cleanupPages(); 278 cleanupPages();
268 279
269 ASSERT(attachedThreads().contains(this)); 280 ASSERT(attachedThreads().contains(this));
270 attachedThreads().remove(this); 281 attachedThreads().remove(this);
(...skipping 1264 matching lines...) Expand 10 before | Expand all | Expand 10 after
1535 for (ClassAgeCountsMap::const_iterator it = classAgeCounts.begin(), end = cl assAgeCounts.end(); it != end; ++it) { 1546 for (ClassAgeCountsMap::const_iterator it = classAgeCounts.begin(), end = cl assAgeCounts.end(); it != end; ++it) {
1536 json->beginArray(it->key.ascii().data()); 1547 json->beginArray(it->key.ascii().data());
1537 for (size_t age = 0; age <= maxHeapObjectAge; ++age) 1548 for (size_t age = 0; age <= maxHeapObjectAge; ++age)
1538 json->pushInteger(it->value.ages[age]); 1549 json->pushInteger(it->value.ages[age]);
1539 json->endArray(); 1550 json->endArray();
1540 } 1551 }
1541 TRACE_EVENT_OBJECT_SNAPSHOT_WITH_ID(TRACE_DISABLED_BY_DEFAULT("blink_gc"), s tatsName, this, json.release()); 1552 TRACE_EVENT_OBJECT_SNAPSHOT_WITH_ID(TRACE_DISABLED_BY_DEFAULT("blink_gc"), s tatsName, this, json.release());
1542 } 1553 }
1543 #endif 1554 #endif
1544 1555
1556 #if ENABLE(ASSERT)
1557 void ThreadState::incrementSelfKeepAliveAllocations()
1558 {
1559 if (!ThreadState::current()->isMainThread())
1560 return;
1561
1562 s_selfKeepAliveAllocationsOnMainThread++;
1563 }
1564
1565 void ThreadState::decrementSelfKeepAliveAllocations()
1566 {
1567 if (!ThreadState::current()->isMainThread())
1568 return;
1569
1570 ASSERT(s_selfKeepAliveAllocationsOnMainThread > 0);
1571 s_selfKeepAliveAllocationsOnMainThread--;
1572 }
1573 #endif
1574
1545 } // namespace blink 1575 } // namespace blink
OLDNEW
« no previous file with comments | « Source/platform/heap/ThreadState.h ('k') | no next file » | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698