OLD | NEW |
---|---|
1 /* | 1 /* |
2 * Copyright (C) 2013 Google Inc. All rights reserved. | 2 * Copyright (C) 2013 Google Inc. All rights reserved. |
3 * | 3 * |
4 * Redistribution and use in source and binary forms, with or without | 4 * Redistribution and use in source and binary forms, with or without |
5 * modification, are permitted provided that the following conditions are | 5 * modification, are permitted provided that the following conditions are |
6 * met: | 6 * met: |
7 * | 7 * |
8 * * Redistributions of source code must retain the above copyright | 8 * * Redistributions of source code must retain the above copyright |
9 * notice, this list of conditions and the following disclaimer. | 9 * notice, this list of conditions and the following disclaimer. |
10 * * Redistributions in binary form must reproduce the above | 10 * * Redistributions in binary form must reproduce the above |
(...skipping 202 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
213 } | 213 } |
214 ASSERT(checkThread()); | 214 ASSERT(checkThread()); |
215 | 215 |
216 // Finish sweeping. | 216 // Finish sweeping. |
217 completeSweep(); | 217 completeSweep(); |
218 | 218 |
219 // From here on ignore all conservatively discovered | 219 // From here on ignore all conservatively discovered |
220 // pointers into the heap owned by this thread. | 220 // pointers into the heap owned by this thread. |
221 m_isTerminating = true; | 221 m_isTerminating = true; |
222 | 222 |
223 ThreadState::callThreadShutdownHooks(); | 223 releaseStaticPersistentNodes(); |
224 | 224 |
225 // Set the terminate flag on all heap pages of this thread. This is used to | 225 // Set the terminate flag on all heap pages of this thread. This is used to |
226 // ensure we don't trace pages on other threads that are not part of the | 226 // ensure we don't trace pages on other threads that are not part of the |
227 // thread local GC. | 227 // thread local GC. |
228 prepareForThreadStateTermination(); | 228 prepareForThreadStateTermination(); |
229 | 229 |
230 ProcessHeap::crossThreadPersistentRegion().prepareForThreadStateTermination( this); | 230 ProcessHeap::crossThreadPersistentRegion().prepareForThreadStateTermination( this); |
231 | 231 |
232 // Do thread local GC's as long as the count of thread local Persistents | 232 // Do thread local GC's as long as the count of thread local Persistents |
233 // changes and is above zero. | 233 // changes and is above zero. |
(...skipping 14 matching lines...) Expand all Loading... | |
248 | 248 |
249 // Add pages to the orphaned page pool to ensure any global GCs from this po int | 249 // Add pages to the orphaned page pool to ensure any global GCs from this po int |
250 // on will not trace objects on this thread's arenas. | 250 // on will not trace objects on this thread's arenas. |
251 cleanupPages(); | 251 cleanupPages(); |
252 } | 252 } |
253 | 253 |
254 void ThreadState::cleanupMainThread() | 254 void ThreadState::cleanupMainThread() |
255 { | 255 { |
256 ASSERT(isMainThread()); | 256 ASSERT(isMainThread()); |
257 | 257 |
258 releaseStaticPersistentNodes(); | |
259 | |
258 #if defined(LEAK_SANITIZER) | 260 #if defined(LEAK_SANITIZER) |
259 // If LSan is about to perform leak detection, release all the registered | 261 // If LSan is about to perform leak detection, after having released all |
260 // static Persistent<> root references to global caches that Blink keeps, | 262 // the registered static Persistent<> root references to global caches |
261 // followed by GCs to clear out all they referred to. | 263 // that Blink keeps, follow up with a round of GCs to clear out all |
264 // what they referred to. | |
262 // | 265 // |
263 // This is not needed for caches over non-Oilpan objects, as they're | 266 // This is not needed for caches over non-Oilpan objects, as they're |
264 // not scanned by LSan due to being held in non-global storage | 267 // not scanned by LSan due to being held in non-global storage |
265 // ("static" references inside functions/methods.) | 268 // ("static" references inside functions/methods.) |
266 releaseStaticPersistentNodes(); | |
267 ThreadHeap::collectAllGarbage(); | 269 ThreadHeap::collectAllGarbage(); |
268 #endif | 270 #endif |
269 | 271 |
270 // Finish sweeping before shutting down V8. Otherwise, some destructor | 272 // Finish sweeping before shutting down V8. Otherwise, some destructor |
271 // may access V8 and cause crashes. | 273 // may access V8 and cause crashes. |
272 completeSweep(); | 274 completeSweep(); |
273 | 275 |
274 // It is unsafe to trigger GCs after this point because some | 276 // It is unsafe to trigger GCs after this point because some |
275 // destructor may access already-detached V8 and cause crashes. | 277 // destructor may access already-detached V8 and cause crashes. |
276 // Also it is useless. So we forbid GCs. | 278 // Also it is useless. So we forbid GCs. |
277 enterGCForbiddenScope(); | 279 enterGCForbiddenScope(); |
278 } | 280 } |
279 | 281 |
280 void ThreadState::detachMainThread() | 282 void ThreadState::detachMainThread() |
281 { | 283 { |
282 // Enter a safe point before trying to acquire threadAttachMutex | 284 // Enter a safe point before trying to acquire threadAttachMutex |
283 // to avoid dead lock if another thread is preparing for GC, has acquired | 285 // to avoid dead lock if another thread is preparing for GC, has acquired |
284 // threadAttachMutex and waiting for other threads to pause or reach a | 286 // threadAttachMutex and waiting for other threads to pause or reach a |
285 // safepoint. | 287 // safepoint. |
286 ThreadState* state = mainThreadState(); | 288 ThreadState* state = mainThreadState(); |
287 ASSERT(!state->isSweepingInProgress()); | 289 ASSERT(!state->isSweepingInProgress()); |
288 | 290 |
289 state->heap().detach(state); | 291 state->heap().detach(state); |
290 state->~ThreadState(); | 292 state->~ThreadState(); |
291 } | 293 } |
292 | 294 |
293 void ThreadState::callThreadShutdownHooks() | |
294 { | |
295 // Invoke the cleanup hooks. This gives an opportunity to release any | |
296 // persistent handles that may exist, e.g. in thread-specific static | |
297 // locals. | |
298 for (const OwnPtr<SameThreadClosure>& hook : m_threadShutdownHooks) | |
299 (*hook)(); | |
300 } | |
301 | |
302 void ThreadState::detachCurrentThread() | 295 void ThreadState::detachCurrentThread() |
303 { | 296 { |
304 ThreadState* state = current(); | 297 ThreadState* state = current(); |
305 state->heap().detach(state); | 298 state->heap().detach(state); |
306 RELEASE_ASSERT(state->gcState() == ThreadState::NoGCScheduled); | 299 RELEASE_ASSERT(state->gcState() == ThreadState::NoGCScheduled); |
307 delete state; | 300 delete state; |
308 } | 301 } |
309 | 302 |
310 NO_SANITIZE_ADDRESS | 303 NO_SANITIZE_ADDRESS |
311 void ThreadState::visitAsanFakeStackForPointer(Visitor* visitor, Address ptr) | 304 void ThreadState::visitAsanFakeStackForPointer(Visitor* visitor, Address ptr) |
(...skipping 997 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
1309 void ThreadState::addInterruptor(PassOwnPtr<BlinkGCInterruptor> interruptor) | 1302 void ThreadState::addInterruptor(PassOwnPtr<BlinkGCInterruptor> interruptor) |
1310 { | 1303 { |
1311 ASSERT(checkThread()); | 1304 ASSERT(checkThread()); |
1312 SafePointScope scope(BlinkGC::HeapPointersOnStack); | 1305 SafePointScope scope(BlinkGC::HeapPointersOnStack); |
1313 { | 1306 { |
1314 MutexLocker locker(m_heap->threadAttachMutex()); | 1307 MutexLocker locker(m_heap->threadAttachMutex()); |
1315 m_interruptors.append(interruptor); | 1308 m_interruptors.append(interruptor); |
1316 } | 1309 } |
1317 } | 1310 } |
1318 | 1311 |
1319 void ThreadState::registerThreadShutdownHook(PassOwnPtr<SameThreadClosure> hook) | 1312 void ThreadState::registerStaticPersistentNode(PersistentNode* node, PersistentC learCallback callback) |
1320 { | 1313 { |
1321 ASSERT(checkThread()); | |
1322 ASSERT(!isTerminating()); | |
1323 m_threadShutdownHooks.append(hook); | |
1324 } | |
1325 | |
1326 #if defined(LEAK_SANITIZER) | 1314 #if defined(LEAK_SANITIZER) |
1327 void ThreadState::registerStaticPersistentNode(PersistentNode* node) | |
1328 { | |
1329 if (m_disabledStaticPersistentsRegistration) | 1315 if (m_disabledStaticPersistentsRegistration) |
1330 return; | 1316 return; |
1317 #endif | |
1331 | 1318 |
1332 ASSERT(!m_staticPersistents.contains(node)); | 1319 ASSERT(!m_staticPersistents.contains(node)); |
1333 m_staticPersistents.add(node); | 1320 m_staticPersistents.add(node, callback); |
1334 } | 1321 } |
1335 | 1322 |
1336 void ThreadState::releaseStaticPersistentNodes() | 1323 void ThreadState::releaseStaticPersistentNodes() |
1337 { | 1324 { |
1338 for (PersistentNode* node : m_staticPersistents) | 1325 HashMap<PersistentNode*, ThreadState::PersistentClearCallback> staticPersist ents; |
1339 getPersistentRegion()->freePersistentNode(node); | 1326 staticPersistents.swap(m_staticPersistents); |
1340 | 1327 |
1341 m_staticPersistents.clear(); | 1328 PersistentRegion* persistentRegion = getPersistentRegion(); |
1329 for (const auto& it : staticPersistents) | |
1330 persistentRegion->releasePersistentNode(it.key, it.value); | |
1342 } | 1331 } |
1343 | 1332 |
1333 void ThreadState::freePersistentNode(PersistentNode* persistentNode) | |
1334 { | |
1335 PersistentRegion* persistentRegion = getPersistentRegion(); | |
1336 persistentRegion->freePersistentNode(persistentNode); | |
1337 // Do not allow static persistents to be freed before | |
1338 // they're all released in releaseStaticPersistentNodes(). | |
1339 // | |
1340 // There's no fundamental reason why this couldn't be supported, | |
1341 // but no known use for it. | |
haraken
2016/04/25 08:39:23
Yeah, I prefer adding this restriction too. regist
sof
2016/04/25 08:53:05
Agreed; a thread needing to free a persistent whil
| |
1342 ASSERT(!m_staticPersistents.contains(persistentNode)); | |
1343 } | |
1344 | |
1345 #if defined(LEAK_SANITIZER) | |
1344 void ThreadState::enterStaticReferenceRegistrationDisabledScope() | 1346 void ThreadState::enterStaticReferenceRegistrationDisabledScope() |
1345 { | 1347 { |
1346 m_disabledStaticPersistentsRegistration++; | 1348 m_disabledStaticPersistentsRegistration++; |
1347 } | 1349 } |
1348 | 1350 |
1349 void ThreadState::leaveStaticReferenceRegistrationDisabledScope() | 1351 void ThreadState::leaveStaticReferenceRegistrationDisabledScope() |
1350 { | 1352 { |
1351 ASSERT(m_disabledStaticPersistentsRegistration); | 1353 ASSERT(m_disabledStaticPersistentsRegistration); |
1352 m_disabledStaticPersistentsRegistration--; | 1354 m_disabledStaticPersistentsRegistration--; |
1353 } | 1355 } |
(...skipping 155 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
1509 threadDump->addScalar("dead_count", "objects", totalDeadCount); | 1511 threadDump->addScalar("dead_count", "objects", totalDeadCount); |
1510 threadDump->addScalar("live_size", "bytes", totalLiveSize); | 1512 threadDump->addScalar("live_size", "bytes", totalLiveSize); |
1511 threadDump->addScalar("dead_size", "bytes", totalDeadSize); | 1513 threadDump->addScalar("dead_size", "bytes", totalDeadSize); |
1512 | 1514 |
1513 WebMemoryAllocatorDump* heapsDump = BlinkGCMemoryDumpProvider::instance()->c reateMemoryAllocatorDumpForCurrentGC(heapsDumpName); | 1515 WebMemoryAllocatorDump* heapsDump = BlinkGCMemoryDumpProvider::instance()->c reateMemoryAllocatorDumpForCurrentGC(heapsDumpName); |
1514 WebMemoryAllocatorDump* classesDump = BlinkGCMemoryDumpProvider::instance()- >createMemoryAllocatorDumpForCurrentGC(classesDumpName); | 1516 WebMemoryAllocatorDump* classesDump = BlinkGCMemoryDumpProvider::instance()- >createMemoryAllocatorDumpForCurrentGC(classesDumpName); |
1515 BlinkGCMemoryDumpProvider::instance()->currentProcessMemoryDump()->addOwners hipEdge(classesDump->guid(), heapsDump->guid()); | 1517 BlinkGCMemoryDumpProvider::instance()->currentProcessMemoryDump()->addOwners hipEdge(classesDump->guid(), heapsDump->guid()); |
1516 } | 1518 } |
1517 | 1519 |
1518 } // namespace blink | 1520 } // namespace blink |
OLD | NEW |