OLD | NEW |
1 /* | 1 /* |
2 * Copyright (C) 2013 Google Inc. All rights reserved. | 2 * Copyright (C) 2013 Google Inc. All rights reserved. |
3 * | 3 * |
4 * Redistribution and use in source and binary forms, with or without | 4 * Redistribution and use in source and binary forms, with or without |
5 * modification, are permitted provided that the following conditions are | 5 * modification, are permitted provided that the following conditions are |
6 * met: | 6 * met: |
7 * | 7 * |
8 * * Redistributions of source code must retain the above copyright | 8 * * Redistributions of source code must retain the above copyright |
9 * notice, this list of conditions and the following disclaimer. | 9 * notice, this list of conditions and the following disclaimer. |
10 * * Redistributions in binary form must reproduce the above | 10 * * Redistributions in binary form must reproduce the above |
(...skipping 277 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
288 // We don't have any type-based mappings to the CollectionBackingHeap. | 288 // We don't have any type-based mappings to the CollectionBackingHeap. |
289 | 289 |
290 // Each typed-heap maps the respective type to its heap. | 290 // Each typed-heap maps the respective type to its heap. |
291 #define DEFINE_TYPED_HEAP_TRAIT(Type) \ | 291 #define DEFINE_TYPED_HEAP_TRAIT(Type) \ |
292 class Type; \ | 292 class Type; \ |
293 template<> \ | 293 template<> \ |
294 struct HeapTypeTrait<class Type> : public HeapIndexTrait<Type##Heap> { }; | 294 struct HeapTypeTrait<class Type> : public HeapIndexTrait<Type##Heap> { }; |
295 FOR_EACH_TYPED_HEAP(DEFINE_TYPED_HEAP_TRAIT) | 295 FOR_EACH_TYPED_HEAP(DEFINE_TYPED_HEAP_TRAIT) |
296 #undef DEFINE_TYPED_HEAP_TRAIT | 296 #undef DEFINE_TYPED_HEAP_TRAIT |
297 | 297 |
298 // A HeapStats structure keeps track of the amount of memory allocated | |
299 // for a Blink heap and how much of that memory is used for actual | |
300 // Blink objects. These stats are used in the heuristics to determine | |
301 // when to perform garbage collections. | |
302 class HeapStats { | |
303 public: | |
304 HeapStats() : m_totalObjectSpace(0), m_totalAllocatedSpace(0) { } | |
305 | |
306 size_t totalObjectSpace() const { return m_totalObjectSpace; } | |
307 size_t totalAllocatedSpace() const { return m_totalAllocatedSpace; } | |
308 | |
309 void add(HeapStats* other) | |
310 { | |
311 m_totalObjectSpace += other->m_totalObjectSpace; | |
312 m_totalAllocatedSpace += other->m_totalAllocatedSpace; | |
313 } | |
314 | |
315 void inline increaseObjectSpace(size_t newObjectSpace) | |
316 { | |
317 m_totalObjectSpace += newObjectSpace; | |
318 } | |
319 | |
320 void inline decreaseObjectSpace(size_t deadObjectSpace) | |
321 { | |
322 m_totalObjectSpace -= deadObjectSpace; | |
323 } | |
324 | |
325 void inline increaseAllocatedSpace(size_t newAllocatedSpace) | |
326 { | |
327 m_totalAllocatedSpace += newAllocatedSpace; | |
328 } | |
329 | |
330 void inline decreaseAllocatedSpace(size_t deadAllocatedSpace) | |
331 { | |
332 m_totalAllocatedSpace -= deadAllocatedSpace; | |
333 } | |
334 | |
335 void clear() | |
336 { | |
337 m_totalObjectSpace = 0; | |
338 m_totalAllocatedSpace = 0; | |
339 } | |
340 | |
341 bool operator==(const HeapStats& other) | |
342 { | |
343 return m_totalAllocatedSpace == other.m_totalAllocatedSpace | |
344 && m_totalObjectSpace == other.m_totalObjectSpace; | |
345 } | |
346 | |
347 private: | |
348 size_t m_totalObjectSpace; // Actually contains objects that may be live, no
t including headers. | |
349 size_t m_totalAllocatedSpace; // Allocated from the OS. | |
350 | |
351 friend class HeapTester; | |
352 }; | |
353 | |
354 class PLATFORM_EXPORT ThreadState { | 298 class PLATFORM_EXPORT ThreadState { |
355 WTF_MAKE_NONCOPYABLE(ThreadState); | 299 WTF_MAKE_NONCOPYABLE(ThreadState); |
356 public: | 300 public: |
357 // When garbage collecting we need to know whether or not there | 301 // When garbage collecting we need to know whether or not there |
358 // can be pointers to Blink GC managed objects on the stack for | 302 // can be pointers to Blink GC managed objects on the stack for |
359 // each thread. When threads reach a safe point they record | 303 // each thread. When threads reach a safe point they record |
360 // whether or not they have pointers on the stack. | 304 // whether or not they have pointers on the stack. |
361 enum StackState { | 305 enum StackState { |
362 NoHeapPointersOnStack, | 306 NoHeapPointersOnStack, |
363 HeapPointersOnStack | 307 HeapPointersOnStack |
(...skipping 77 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
441 return reinterpret_cast<ThreadState*>(s_mainThreadStateStorage); | 385 return reinterpret_cast<ThreadState*>(s_mainThreadStateStorage); |
442 } | 386 } |
443 | 387 |
444 bool isMainThread() const { return this == mainThreadState(); } | 388 bool isMainThread() const { return this == mainThreadState(); } |
445 inline bool checkThread() const | 389 inline bool checkThread() const |
446 { | 390 { |
447 ASSERT(m_thread == currentThread()); | 391 ASSERT(m_thread == currentThread()); |
448 return true; | 392 return true; |
449 } | 393 } |
450 | 394 |
| 395 // If gcRequested returns true when a thread returns to its event |
| 396 // loop the thread will initiate a garbage collection. |
| 397 bool gcRequested(); |
| 398 void setGCRequested(); |
| 399 void clearGCRequested(); |
| 400 |
451 // shouldGC and shouldForceConservativeGC implement the heuristics | 401 // shouldGC and shouldForceConservativeGC implement the heuristics |
452 // that are used to determine when to collect garbage. If | 402 // that are used to determine when to collect garbage. If |
453 // shouldForceConservativeGC returns true, we force the garbage | 403 // shouldForceConservativeGC returns true, we force the garbage |
454 // collection immediately. Otherwise, if shouldGC returns true, we | 404 // collection immediately. Otherwise, if shouldGC returns true, we |
455 // record that we should garbage collect the next time we return | 405 // record that we should garbage collect the next time we return |
456 // to the event loop. If both return false, we don't need to | 406 // to the event loop. If both return false, we don't need to |
457 // collect garbage at this point. | 407 // collect garbage at this point. |
458 bool shouldGC(); | 408 bool shouldGC(); |
459 bool shouldForceConservativeGC(); | 409 bool shouldForceConservativeGC(); |
460 bool increasedEnoughToGC(size_t, size_t); | |
461 bool increasedEnoughToForceConservativeGC(size_t, size_t); | |
462 | |
463 // If gcRequested returns true when a thread returns to its event | |
464 // loop the thread will initiate a garbage collection. | |
465 bool gcRequested(); | |
466 void setGCRequested(); | |
467 void clearGCRequested(); | |
468 | 410 |
469 // Was the last GC forced for testing? This is set when garbage collection | 411 // Was the last GC forced for testing? This is set when garbage collection |
470 // is forced for testing and there are pointers on the stack. It remains | 412 // is forced for testing and there are pointers on the stack. It remains |
471 // set until a garbage collection is triggered with no pointers on the stack
. | 413 // set until a garbage collection is triggered with no pointers on the stack
. |
472 // This is used for layout tests that trigger GCs and check if objects are | 414 // This is used for layout tests that trigger GCs and check if objects are |
473 // dead at a given point in time. That only reliably works when we get | 415 // dead at a given point in time. That only reliably works when we get |
474 // precise GCs with no conservative stack scanning. | 416 // precise GCs with no conservative stack scanning. |
475 void setForcePreciseGCForTesting(bool); | 417 void setForcePreciseGCForTesting(bool); |
476 bool forcePreciseGCForTesting(); | 418 bool forcePreciseGCForTesting(); |
477 | 419 |
(...skipping 228 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
706 | 648 |
707 size_t getClassTag(const GCInfo*); | 649 size_t getClassTag(const GCInfo*); |
708 }; | 650 }; |
709 | 651 |
710 void snapshot(); | 652 void snapshot(); |
711 #endif | 653 #endif |
712 | 654 |
713 void pushWeakObjectPointerCallback(void*, WeakPointerCallback); | 655 void pushWeakObjectPointerCallback(void*, WeakPointerCallback); |
714 bool popAndInvokeWeakPointerCallback(Visitor*); | 656 bool popAndInvokeWeakPointerCallback(Visitor*); |
715 | 657 |
716 void getStats(HeapStats&); | 658 size_t objectPayloadSizeForTesting(); |
717 void getStatsForTesting(HeapStats&); | |
718 HeapStats& stats() { return m_stats; } | |
719 | 659 |
720 void setupHeapsForTermination(); | 660 void setupHeapsForTermination(); |
721 | 661 |
722 void registerSweepingTask(); | 662 void registerSweepingTask(); |
723 void unregisterSweepingTask(); | 663 void unregisterSweepingTask(); |
724 | 664 |
725 // Request to call a pref-finalizer of the target object before the object | 665 // Request to call a pref-finalizer of the target object before the object |
726 // is destructed. The class T must have USING_PRE_FINALIZER(). The | 666 // is destructed. The class T must have USING_PRE_FINALIZER(). The |
727 // argument should be |*this|. Registering a lot of objects affects GC | 667 // argument should be |*this|. Registering a lot of objects affects GC |
728 // performance. We should register an object only if the object really | 668 // performance. We should register an object only if the object really |
(...skipping 54 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
783 // When ThreadState is detaching from non-main thread its | 723 // When ThreadState is detaching from non-main thread its |
784 // heap is expected to be empty (because it is going away). | 724 // heap is expected to be empty (because it is going away). |
785 // Perform registered cleanup tasks and garbage collection | 725 // Perform registered cleanup tasks and garbage collection |
786 // to sweep away any objects that are left on this heap. | 726 // to sweep away any objects that are left on this heap. |
787 // We assert that nothing must remain after this cleanup. | 727 // We assert that nothing must remain after this cleanup. |
788 // If assertion does not hold we crash as we are potentially | 728 // If assertion does not hold we crash as we are potentially |
789 // in the dangling pointer situation. | 729 // in the dangling pointer situation. |
790 void cleanup(); | 730 void cleanup(); |
791 void cleanupPages(); | 731 void cleanupPages(); |
792 | 732 |
793 void setLowCollectionRate(bool value) { m_lowCollectionRate = value; } | |
794 | |
795 void performPendingSweepInParallel(); | 733 void performPendingSweepInParallel(); |
796 void waitUntilSweepersDone(); | 734 void waitUntilSweepersDone(); |
797 void unregisterPreFinalizerInternal(void*); | 735 void unregisterPreFinalizerInternal(void*); |
798 void invokePreFinalizers(Visitor&); | 736 void invokePreFinalizers(Visitor&); |
799 | 737 |
800 static WTF::ThreadSpecific<ThreadState*>* s_threadSpecific; | 738 static WTF::ThreadSpecific<ThreadState*>* s_threadSpecific; |
801 static uintptr_t s_mainThreadStackStart; | 739 static uintptr_t s_mainThreadStackStart; |
802 static uintptr_t s_mainThreadUnderestimatedStackSize; | 740 static uintptr_t s_mainThreadUnderestimatedStackSize; |
803 static SafePointBarrier* s_safePointBarrier; | 741 static SafePointBarrier* s_safePointBarrier; |
804 | 742 |
(...skipping 19 matching lines...) Expand all Loading... |
824 Vector<Address> m_safePointStackCopy; | 762 Vector<Address> m_safePointStackCopy; |
825 bool m_atSafePoint; | 763 bool m_atSafePoint; |
826 Vector<Interruptor*> m_interruptors; | 764 Vector<Interruptor*> m_interruptors; |
827 bool m_gcRequested; | 765 bool m_gcRequested; |
828 bool m_forcePreciseGCForTesting; | 766 bool m_forcePreciseGCForTesting; |
829 volatile int m_sweepRequested; | 767 volatile int m_sweepRequested; |
830 bool m_sweepInProgress; | 768 bool m_sweepInProgress; |
831 size_t m_noAllocationCount; | 769 size_t m_noAllocationCount; |
832 bool m_inGC; | 770 bool m_inGC; |
833 BaseHeap* m_heaps[NumberOfHeaps]; | 771 BaseHeap* m_heaps[NumberOfHeaps]; |
834 HeapStats m_stats; | |
835 HeapStats m_statsAfterLastGC; | |
836 | 772 |
837 Vector<OwnPtr<CleanupTask> > m_cleanupTasks; | 773 Vector<OwnPtr<CleanupTask> > m_cleanupTasks; |
838 bool m_isTerminating; | 774 bool m_isTerminating; |
839 | 775 |
840 bool m_shouldFlushHeapDoesNotContainCache; | 776 bool m_shouldFlushHeapDoesNotContainCache; |
841 bool m_lowCollectionRate; | 777 bool m_lowCollectionRate; |
842 | 778 |
843 OwnPtr<WebThread> m_sweeperThread; | 779 OwnPtr<WebThread> m_sweeperThread; |
844 int m_numberOfSweeperTasks; | 780 int m_numberOfSweeperTasks; |
845 Mutex m_sweepMutex; | 781 Mutex m_sweepMutex; |
(...skipping 129 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
975 // whether the page is part of a terminting thread or | 911 // whether the page is part of a terminting thread or |
976 // if the page is traced after being terminated (orphaned). | 912 // if the page is traced after being terminated (orphaned). |
977 uintptr_t m_terminating : 1; | 913 uintptr_t m_terminating : 1; |
978 uintptr_t m_tracedAfterOrphaned : 1; | 914 uintptr_t m_tracedAfterOrphaned : 1; |
979 uintptr_t m_promptlyFreedSize : 17; // == blinkPageSizeLog2 | 915 uintptr_t m_promptlyFreedSize : 17; // == blinkPageSizeLog2 |
980 }; | 916 }; |
981 | 917 |
982 } // namespace blink | 918 } // namespace blink |
983 | 919 |
984 #endif // ThreadState_h | 920 #endif // ThreadState_h |
OLD | NEW |