| OLD | NEW |
| 1 /* | 1 /* |
| 2 * Copyright (C) 2013 Google Inc. All rights reserved. | 2 * Copyright (C) 2013 Google Inc. All rights reserved. |
| 3 * | 3 * |
| 4 * Redistribution and use in source and binary forms, with or without | 4 * Redistribution and use in source and binary forms, with or without |
| 5 * modification, are permitted provided that the following conditions are | 5 * modification, are permitted provided that the following conditions are |
| 6 * met: | 6 * met: |
| 7 * | 7 * |
| 8 * * Redistributions of source code must retain the above copyright | 8 * * Redistributions of source code must retain the above copyright |
| 9 * notice, this list of conditions and the following disclaimer. | 9 * notice, this list of conditions and the following disclaimer. |
| 10 * * Redistributions in binary form must reproduce the above | 10 * * Redistributions in binary form must reproduce the above |
| (...skipping 67 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 78 SafePointBarrier* ThreadState::s_safePointBarrier = nullptr; | 78 SafePointBarrier* ThreadState::s_safePointBarrier = nullptr; |
| 79 | 79 |
| 80 RecursiveMutex& ThreadState::threadAttachMutex() | 80 RecursiveMutex& ThreadState::threadAttachMutex() |
| 81 { | 81 { |
| 82 AtomicallyInitializedStaticReference(RecursiveMutex, mutex, (new RecursiveMu
tex)); | 82 AtomicallyInitializedStaticReference(RecursiveMutex, mutex, (new RecursiveMu
tex)); |
| 83 return mutex; | 83 return mutex; |
| 84 } | 84 } |
| 85 | 85 |
| 86 ThreadState::ThreadState() | 86 ThreadState::ThreadState() |
| 87 : m_thread(currentThread()) | 87 : m_thread(currentThread()) |
| 88 , m_persistents(adoptPtr(new PersistentAnchor())) | 88 , m_persistentRegion(adoptPtr(new PersistentRegion())) |
| 89 , m_startOfStack(reinterpret_cast<intptr_t*>(StackFrameDepth::getStackStart(
))) | 89 , m_startOfStack(reinterpret_cast<intptr_t*>(StackFrameDepth::getStackStart(
))) |
| 90 , m_endOfStack(reinterpret_cast<intptr_t*>(StackFrameDepth::getStackStart())
) | 90 , m_endOfStack(reinterpret_cast<intptr_t*>(StackFrameDepth::getStackStart())
) |
| 91 , m_safePointScopeMarker(nullptr) | 91 , m_safePointScopeMarker(nullptr) |
| 92 , m_atSafePoint(false) | 92 , m_atSafePoint(false) |
| 93 , m_interruptors() | 93 , m_interruptors() |
| 94 , m_sweepForbidden(false) | 94 , m_sweepForbidden(false) |
| 95 , m_noAllocationCount(0) | 95 , m_noAllocationCount(0) |
| 96 , m_gcForbiddenCount(0) | 96 , m_gcForbiddenCount(0) |
| 97 , m_vectorBackingHeapIndex(Vector1HeapIndex) | 97 , m_vectorBackingHeapIndex(Vector1HeapIndex) |
| 98 , m_currentHeapAges(0) | 98 , m_currentHeapAges(0) |
| (...skipping 137 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 236 // pointers into the heap owned by this thread. | 236 // pointers into the heap owned by this thread. |
| 237 m_isTerminating = true; | 237 m_isTerminating = true; |
| 238 | 238 |
| 239 // Set the terminate flag on all heap pages of this thread. This is used
to | 239 // Set the terminate flag on all heap pages of this thread. This is used
to |
| 240 // ensure we don't trace pages on other threads that are not part of the | 240 // ensure we don't trace pages on other threads that are not part of the |
| 241 // thread local GC. | 241 // thread local GC. |
| 242 prepareHeapForTermination(); | 242 prepareHeapForTermination(); |
| 243 | 243 |
| 244 // Do thread local GC's as long as the count of thread local Persistents | 244 // Do thread local GC's as long as the count of thread local Persistents |
| 245 // changes and is above zero. | 245 // changes and is above zero. |
| 246 PersistentAnchor* anchor = m_persistents.get(); | |
| 247 int oldCount = -1; | 246 int oldCount = -1; |
| 248 int currentCount = anchor->numberOfPersistents(); | 247 int currentCount = persistentRegion()->numberOfPersistents(); |
| 249 ASSERT(currentCount >= 0); | 248 ASSERT(currentCount >= 0); |
| 250 while (currentCount != oldCount) { | 249 while (currentCount != oldCount) { |
| 251 Heap::collectGarbageForTerminatingThread(this); | 250 Heap::collectGarbageForTerminatingThread(this); |
| 252 oldCount = currentCount; | 251 oldCount = currentCount; |
| 253 currentCount = anchor->numberOfPersistents(); | 252 currentCount = persistentRegion()->numberOfPersistents(); |
| 254 } | 253 } |
| 255 // We should not have any persistents left when getting to this point, | 254 // We should not have any persistents left when getting to this point, |
| 256 // if we have it is probably a bug so adding a debug ASSERT to catch thi
s. | 255 // if we have it is probably a bug so adding a debug ASSERT to catch thi
s. |
| 257 ASSERT(!currentCount); | 256 ASSERT(!currentCount); |
| 258 // All of pre-finalizers should be consumed. | 257 // All of pre-finalizers should be consumed. |
| 259 ASSERT(m_preFinalizers.isEmpty()); | 258 ASSERT(m_preFinalizers.isEmpty()); |
| 260 RELEASE_ASSERT(gcState() == NoGCScheduled); | 259 RELEASE_ASSERT(gcState() == NoGCScheduled); |
| 261 | 260 |
| 262 // Add pages to the orphaned page pool to ensure any global GCs from thi
s point | 261 // Add pages to the orphaned page pool to ensure any global GCs from thi
s point |
| 263 // on will not trace objects on this thread's heaps. | 262 // on will not trace objects on this thread's heaps. |
| 264 cleanupPages(); | 263 cleanupPages(); |
| 265 | 264 |
| 266 ASSERT(attachedThreads().contains(this)); | 265 ASSERT(attachedThreads().contains(this)); |
| 267 attachedThreads().remove(this); | 266 attachedThreads().remove(this); |
| 268 } | 267 } |
| 269 } | 268 } |
| 270 | 269 |
| 271 void ThreadState::detach() | 270 void ThreadState::detach() |
| 272 { | 271 { |
| 273 ThreadState* state = current(); | 272 ThreadState* state = current(); |
| 274 state->cleanup(); | 273 state->cleanup(); |
| 275 RELEASE_ASSERT(state->gcState() == ThreadState::NoGCScheduled); | 274 RELEASE_ASSERT(state->gcState() == ThreadState::NoGCScheduled); |
| 276 delete state; | 275 delete state; |
| 277 shutdownHeapIfNecessary(); | 276 shutdownHeapIfNecessary(); |
| 278 } | 277 } |
| 279 | 278 |
| 280 void ThreadState::visitPersistentRoots(Visitor* visitor) | 279 void ThreadState::visitPersistentRoots(Visitor* visitor) |
| 281 { | 280 { |
| 282 TRACE_EVENT0("blink_gc", "ThreadState::visitPersistentRoots"); | 281 TRACE_EVENT0("blink_gc", "ThreadState::visitPersistentRoots"); |
| 283 { | 282 crossThreadPersistentRegion().tracePersistentNodes(visitor); |
| 284 // All threads are at safepoints so this is not strictly necessary. | |
| 285 // However we acquire the mutex to make mutation and traversal of this | |
| 286 // list symmetrical. | |
| 287 MutexLocker locker(globalRootsMutex()); | |
| 288 globalRoots().tracePersistentNodes(visitor); | |
| 289 } | |
| 290 | 283 |
| 291 for (ThreadState* state : attachedThreads()) | 284 for (ThreadState* state : attachedThreads()) |
| 292 state->visitPersistents(visitor); | 285 state->visitPersistents(visitor); |
| 293 } | 286 } |
| 294 | 287 |
| 295 void ThreadState::visitStackRoots(Visitor* visitor) | 288 void ThreadState::visitStackRoots(Visitor* visitor) |
| 296 { | 289 { |
| 297 TRACE_EVENT0("blink_gc", "ThreadState::visitStackRoots"); | 290 TRACE_EVENT0("blink_gc", "ThreadState::visitStackRoots"); |
| 298 for (ThreadState* state : attachedThreads()) | 291 for (ThreadState* state : attachedThreads()) |
| 299 state->visitStack(visitor); | 292 state->visitStack(visitor); |
| (...skipping 66 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 366 // See the comment above. | 359 // See the comment above. |
| 367 __msan_unpoison(&ptr, sizeof(ptr)); | 360 __msan_unpoison(&ptr, sizeof(ptr)); |
| 368 #endif | 361 #endif |
| 369 Heap::checkAndMarkPointer(visitor, ptr); | 362 Heap::checkAndMarkPointer(visitor, ptr); |
| 370 visitAsanFakeStackForPointer(visitor, ptr); | 363 visitAsanFakeStackForPointer(visitor, ptr); |
| 371 } | 364 } |
| 372 } | 365 } |
| 373 | 366 |
| 374 void ThreadState::visitPersistents(Visitor* visitor) | 367 void ThreadState::visitPersistents(Visitor* visitor) |
| 375 { | 368 { |
| 376 m_persistents->tracePersistentNodes(visitor); | 369 m_persistentRegion->tracePersistentNodes(visitor); |
| 377 if (m_traceDOMWrappers) { | 370 if (m_traceDOMWrappers) { |
| 378 TRACE_EVENT0("blink_gc", "V8GCController::traceDOMWrappers"); | 371 TRACE_EVENT0("blink_gc", "V8GCController::traceDOMWrappers"); |
| 379 m_traceDOMWrappers(m_isolate, visitor); | 372 m_traceDOMWrappers(m_isolate, visitor); |
| 380 } | 373 } |
| 381 } | 374 } |
| 382 | 375 |
| 383 #if ENABLE(GC_PROFILING) | 376 #if ENABLE(GC_PROFILING) |
| 384 const GCInfo* ThreadState::findGCInfo(Address address) | 377 const GCInfo* ThreadState::findGCInfo(Address address) |
| 385 { | 378 { |
| 386 if (BasePage* page = findPageFromAddress(address)) | 379 if (BasePage* page = findPageFromAddress(address)) |
| (...skipping 148 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 535 | 528 |
| 536 MarkingVisitor<Visitor::WeakProcessing> weakProcessingVisitor; | 529 MarkingVisitor<Visitor::WeakProcessing> weakProcessingVisitor; |
| 537 | 530 |
| 538 // Perform thread-specific weak processing. | 531 // Perform thread-specific weak processing. |
| 539 while (popAndInvokeThreadLocalWeakCallback(&weakProcessingVisitor)) { } | 532 while (popAndInvokeThreadLocalWeakCallback(&weakProcessingVisitor)) { } |
| 540 | 533 |
| 541 if (isMainThread()) | 534 if (isMainThread()) |
| 542 ScriptForbiddenScope::exit(); | 535 ScriptForbiddenScope::exit(); |
| 543 } | 536 } |
| 544 | 537 |
| 545 PersistentAnchor& ThreadState::globalRoots() | 538 CrossThreadPersistentRegion& ThreadState::crossThreadPersistentRegion() |
| 546 { | 539 { |
| 547 AtomicallyInitializedStaticReference(PersistentAnchor, anchor, new Persisten
tAnchor); | 540 AtomicallyInitializedStaticReference(CrossThreadPersistentRegion, persistent
Region, new CrossThreadPersistentRegion()); |
| 548 return anchor; | 541 return persistentRegion; |
| 549 } | |
| 550 | |
| 551 Mutex& ThreadState::globalRootsMutex() | |
| 552 { | |
| 553 AtomicallyInitializedStaticReference(Mutex, mutex, new Mutex); | |
| 554 return mutex; | |
| 555 } | 542 } |
| 556 | 543 |
| 557 bool ThreadState::shouldForceMemoryPressureGC() | 544 bool ThreadState::shouldForceMemoryPressureGC() |
| 558 { | 545 { |
| 559 // Avoid potential overflow by truncating to Kb. | 546 // Avoid potential overflow by truncating to Kb. |
| 560 size_t currentObjectSizeKb = (Heap::allocatedObjectSize() + Heap::markedObje
ctSize() + WTF::Partitions::totalSizeOfCommittedPages()) >> 10; | 547 size_t currentObjectSizeKb = (Heap::allocatedObjectSize() + Heap::markedObje
ctSize() + WTF::Partitions::totalSizeOfCommittedPages()) >> 10; |
| 561 if (currentObjectSizeKb < 300 * 1024) | 548 if (currentObjectSizeKb < 300 * 1024) |
| 562 return false; | 549 return false; |
| 563 | 550 |
| 564 size_t estimatedLiveObjectSizeKb = Heap::estimatedLiveObjectSize() >> 10; | 551 size_t estimatedLiveObjectSizeKb = Heap::estimatedLiveObjectSize() >> 10; |
| (...skipping 943 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 1508 json->beginArray(it->key.ascii().data()); | 1495 json->beginArray(it->key.ascii().data()); |
| 1509 for (size_t age = 0; age <= maxHeapObjectAge; ++age) | 1496 for (size_t age = 0; age <= maxHeapObjectAge; ++age) |
| 1510 json->pushInteger(it->value.ages[age]); | 1497 json->pushInteger(it->value.ages[age]); |
| 1511 json->endArray(); | 1498 json->endArray(); |
| 1512 } | 1499 } |
| 1513 TRACE_EVENT_OBJECT_SNAPSHOT_WITH_ID(TRACE_DISABLED_BY_DEFAULT("blink_gc"), s
tatsName, this, json.release()); | 1500 TRACE_EVENT_OBJECT_SNAPSHOT_WITH_ID(TRACE_DISABLED_BY_DEFAULT("blink_gc"), s
tatsName, this, json.release()); |
| 1514 } | 1501 } |
| 1515 #endif | 1502 #endif |
| 1516 | 1503 |
| 1517 } // namespace blink | 1504 } // namespace blink |
| OLD | NEW |