OLD | NEW |
1 /* | 1 /* |
2 * Copyright (C) 2013 Google Inc. All rights reserved. | 2 * Copyright (C) 2013 Google Inc. All rights reserved. |
3 * | 3 * |
4 * Redistribution and use in source and binary forms, with or without | 4 * Redistribution and use in source and binary forms, with or without |
5 * modification, are permitted provided that the following conditions are | 5 * modification, are permitted provided that the following conditions are |
6 * met: | 6 * met: |
7 * | 7 * |
8 * * Redistributions of source code must retain the above copyright | 8 * * Redistributions of source code must retain the above copyright |
9 * notice, this list of conditions and the following disclaimer. | 9 * notice, this list of conditions and the following disclaimer. |
10 * * Redistributions in binary form must reproduce the above | 10 * * Redistributions in binary form must reproduce the above |
(...skipping 261 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
272 | 272 |
273 // shouldGC and shouldForceConservativeGC implement the heuristics | 273 // shouldGC and shouldForceConservativeGC implement the heuristics |
274 // that are used to determine when to collect garbage. If | 274 // that are used to determine when to collect garbage. If |
275 // shouldForceConservativeGC returns true, we force the garbage | 275 // shouldForceConservativeGC returns true, we force the garbage |
276 // collection immediately. Otherwise, if shouldGC returns true, we | 276 // collection immediately. Otherwise, if shouldGC returns true, we |
277 // record that we should garbage collect the next time we return | 277 // record that we should garbage collect the next time we return |
278 // to the event loop. If both return false, we don't need to | 278 // to the event loop. If both return false, we don't need to |
279 // collect garbage at this point. | 279 // collect garbage at this point. |
280 bool shouldGC(); | 280 bool shouldGC(); |
281 bool shouldForceConservativeGC(); | 281 bool shouldForceConservativeGC(); |
| 282 bool increasedEnoughToGC(size_t, size_t); |
| 283 bool increasedEnoughToForceConservativeGC(size_t, size_t); |
282 | 284 |
283 // If gcRequested returns true when a thread returns to its event | 285 // If gcRequested returns true when a thread returns to its event |
284 // loop the thread will initiate a garbage collection. | 286 // loop the thread will initiate a garbage collection. |
285 bool gcRequested(); | 287 bool gcRequested(); |
286 void setGCRequested(); | 288 void setGCRequested(); |
287 void clearGCRequested(); | 289 void clearGCRequested(); |
288 | 290 |
289 // Was the last GC forced for testing? This is set when garbage collection | 291 // Was the last GC forced for testing? This is set when garbage collection |
290 // is forced for testing and there are pointers on the stack. It remains | 292 // is forced for testing and there are pointers on the stack. It remains |
291 // set until a garbage collection is triggered with no pointers on the stack
. | 293 // set until a garbage collection is triggered with no pointers on the stack
. |
(...skipping 245 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
537 // When ThreadState is detaching from non-main thread its | 539 // When ThreadState is detaching from non-main thread its |
538 // heap is expected to be empty (because it is going away). | 540 // heap is expected to be empty (because it is going away). |
539 // Perform registered cleanup tasks and garbage collection | 541 // Perform registered cleanup tasks and garbage collection |
540 // to sweep away any objects that are left on this heap. | 542 // to sweep away any objects that are left on this heap. |
541 // We assert that nothing must remain after this cleanup. | 543 // We assert that nothing must remain after this cleanup. |
542 // If assertion does not hold we crash as we are potentially | 544 // If assertion does not hold we crash as we are potentially |
543 // in the dangling pointer situation. | 545 // in the dangling pointer situation. |
544 void cleanup(); | 546 void cleanup(); |
545 void cleanupPages(); | 547 void cleanupPages(); |
546 | 548 |
| 549 void setLowCollectionRate(bool value) { m_lowCollectionRate = value; } |
| 550 |
547 static WTF::ThreadSpecific<ThreadState*>* s_threadSpecific; | 551 static WTF::ThreadSpecific<ThreadState*>* s_threadSpecific; |
548 static SafePointBarrier* s_safePointBarrier; | 552 static SafePointBarrier* s_safePointBarrier; |
549 | 553 |
550 // This variable is flipped to true after all threads are stoped | 554 // This variable is flipped to true after all threads are stoped |
551 // and outermost GC has started. | 555 // and outermost GC has started. |
552 static bool s_inGC; | 556 static bool s_inGC; |
553 | 557 |
554 // We can't create a static member of type ThreadState here | 558 // We can't create a static member of type ThreadState here |
555 // because it will introduce global constructor and destructor. | 559 // because it will introduce global constructor and destructor. |
556 // We would like to manage lifetime of the ThreadState attached | 560 // We would like to manage lifetime of the ThreadState attached |
(...skipping 19 matching lines...) Expand all Loading... |
576 size_t m_noAllocationCount; | 580 size_t m_noAllocationCount; |
577 bool m_inGC; | 581 bool m_inGC; |
578 BaseHeap* m_heaps[NumberOfHeaps]; | 582 BaseHeap* m_heaps[NumberOfHeaps]; |
579 OwnPtr<HeapContainsCache> m_heapContainsCache; | 583 OwnPtr<HeapContainsCache> m_heapContainsCache; |
580 HeapStats m_stats; | 584 HeapStats m_stats; |
581 HeapStats m_statsAfterLastGC; | 585 HeapStats m_statsAfterLastGC; |
582 | 586 |
583 Vector<OwnPtr<CleanupTask> > m_cleanupTasks; | 587 Vector<OwnPtr<CleanupTask> > m_cleanupTasks; |
584 bool m_isTerminating; | 588 bool m_isTerminating; |
585 | 589 |
| 590 bool m_lowCollectionRate; |
| 591 |
586 CallbackStack* m_weakCallbackStack; | 592 CallbackStack* m_weakCallbackStack; |
587 | 593 |
588 #if defined(ADDRESS_SANITIZER) | 594 #if defined(ADDRESS_SANITIZER) |
589 void* m_asanFakeStack; | 595 void* m_asanFakeStack; |
590 #endif | 596 #endif |
591 }; | 597 }; |
592 | 598 |
593 template<ThreadAffinity affinity> class ThreadStateFor; | 599 template<ThreadAffinity affinity> class ThreadStateFor; |
594 | 600 |
595 template<> class ThreadStateFor<MainThreadOnly> { | 601 template<> class ThreadStateFor<MainThreadOnly> { |
(...skipping 113 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
709 // HeapPage header. We use some of the bits to determine | 715 // HeapPage header. We use some of the bits to determine |
710 // whether the page is part of a terminting thread or | 716 // whether the page is part of a terminting thread or |
711 // if the page is traced after being terminated (orphaned). | 717 // if the page is traced after being terminated (orphaned). |
712 uintptr_t m_terminating : 1; | 718 uintptr_t m_terminating : 1; |
713 uintptr_t m_tracedAfterOrphaned : 1; | 719 uintptr_t m_tracedAfterOrphaned : 1; |
714 }; | 720 }; |
715 | 721 |
716 } | 722 } |
717 | 723 |
718 #endif // ThreadState_h | 724 #endif // ThreadState_h |
OLD | NEW |