OLD | NEW |
---|---|
1 /* | 1 /* |
2 * Copyright (C) 2013 Google Inc. All rights reserved. | 2 * Copyright (C) 2013 Google Inc. All rights reserved. |
3 * | 3 * |
4 * Redistribution and use in source and binary forms, with or without | 4 * Redistribution and use in source and binary forms, with or without |
5 * modification, are permitted provided that the following conditions are | 5 * modification, are permitted provided that the following conditions are |
6 * met: | 6 * met: |
7 * | 7 * |
8 * * Redistributions of source code must retain the above copyright | 8 * * Redistributions of source code must retain the above copyright |
9 * notice, this list of conditions and the following disclaimer. | 9 * notice, this list of conditions and the following disclaimer. |
10 * * Redistributions in binary form must reproduce the above | 10 * * Redistributions in binary form must reproduce the above |
(...skipping 383 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
394 virtual size_t size() = 0; | 394 virtual size_t size() = 0; |
395 virtual bool isLargeObject() { return false; } | 395 virtual bool isLargeObject() { return false; } |
396 | 396 |
397 Address address() { return reinterpret_cast<Address>(this); } | 397 Address address() { return reinterpret_cast<Address>(this); } |
398 PageMemory* storage() const { return m_storage; } | 398 PageMemory* storage() const { return m_storage; } |
399 ThreadHeap* heap() const { return m_heap; } | 399 ThreadHeap* heap() const { return m_heap; } |
400 bool orphaned() { return !m_heap; } | 400 bool orphaned() { return !m_heap; } |
401 bool terminating() { return m_terminating; } | 401 bool terminating() { return m_terminating; } |
402 void setTerminating() { m_terminating = true; } | 402 void setTerminating() { m_terminating = true; } |
403 | 403 |
404 // Returns true if this page has been swept by the ongoing lazy/incremental sweep. | |
haraken
2015/01/16 17:29:49
Nit: lazy/incremental => lazy
sof
2015/01/16 19:30:17
done.
| |
405 bool hasBeenSwept() const { return m_swept; } | |
406 | |
407 void markAsSwept() { m_swept = true; } | |
haraken
2015/01/16 17:29:49
Can we add ASSERT(!m_swept)?
sof
2015/01/16 19:30:17
Done.
| |
408 void markAsUnswept() { m_swept = false; } | |
haraken
2015/01/16 17:29:49
Can we add ASSERT(m_swept)?
sof
2015/01/16 19:30:17
Done.
| |
409 | |
404 private: | 410 private: |
405 PageMemory* m_storage; | 411 PageMemory* m_storage; |
406 ThreadHeap* m_heap; | 412 ThreadHeap* m_heap; |
407 // Whether the page is part of a terminating thread or not. | 413 // Whether the page is part of a terminating thread or not. |
408 bool m_terminating; | 414 bool m_terminating; |
415 | |
416 protected: | |
haraken
2015/01/16 17:29:49
Need to be protected?
sof
2015/01/16 19:30:17
Done.
| |
417 // Track the sweeping state of a page. Set to true once | |
418 // the incremental/lazy sweep completes has processed it. | |
419 // | |
420 // Cleared at the start of a sweep. | |
haraken
2015/01/16 17:29:49
Set to false at the start of a sweep and set to tr
sof
2015/01/16 19:30:17
Done.
| |
421 bool m_swept; | |
409 }; | 422 }; |
410 | 423 |
411 class HeapPage final : public BaseHeapPage { | 424 class HeapPage final : public BaseHeapPage { |
412 public: | 425 public: |
413 HeapPage(PageMemory*, ThreadHeap*); | 426 HeapPage(PageMemory*, ThreadHeap*); |
414 | 427 |
415 Address payload() | 428 Address payload() |
416 { | 429 { |
417 return address() + sizeof(HeapPage) + headerPadding(); | 430 return address() + sizeof(HeapPage) + headerPadding(); |
418 } | 431 } |
(...skipping 357 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
776 void allocatePage(); | 789 void allocatePage(); |
777 Address allocateLargeObject(size_t, size_t gcInfoIndex); | 790 Address allocateLargeObject(size_t, size_t gcInfoIndex); |
778 | 791 |
779 inline Address allocateObject(size_t allocationSize, size_t gcInfoIndex); | 792 inline Address allocateObject(size_t allocationSize, size_t gcInfoIndex); |
780 | 793 |
781 #if ENABLE(ASSERT) | 794 #if ENABLE(ASSERT) |
782 bool pagesToBeSweptContains(Address); | 795 bool pagesToBeSweptContains(Address); |
783 #endif | 796 #endif |
784 | 797 |
785 bool coalesce(); | 798 bool coalesce(); |
786 void markUnmarkedObjectsDead(); | 799 void preparePagesForSweeping(); |
787 | 800 |
788 Address m_currentAllocationPoint; | 801 Address m_currentAllocationPoint; |
789 size_t m_remainingAllocationSize; | 802 size_t m_remainingAllocationSize; |
790 size_t m_lastRemainingAllocationSize; | 803 size_t m_lastRemainingAllocationSize; |
791 | 804 |
792 HeapPage* m_firstPage; | 805 HeapPage* m_firstPage; |
793 LargeObject* m_firstLargeObject; | 806 LargeObject* m_firstLargeObject; |
794 HeapPage* m_firstUnsweptPage; | 807 HeapPage* m_firstUnsweptPage; |
795 LargeObject* m_firstUnsweptLargeObject; | 808 LargeObject* m_firstUnsweptLargeObject; |
796 | 809 |
797 ThreadState* m_threadState; | 810 ThreadState* m_threadState; |
798 | 811 |
799 FreeList m_freeList; | 812 FreeList m_freeList; |
800 | 813 |
801 // Index into the page pools. This is used to ensure that the pages of the | 814 // Index into the page pools. This is used to ensure that the pages of the |
802 // same type go into the correct page pool and thus avoid type confusion. | 815 // same type go into the correct page pool and thus avoid type confusion. |
803 int m_index; | 816 int m_index; |
804 | 817 |
805 // The size of promptly freed objects in the heap. | 818 // The size of promptly freed objects in the heap. |
806 size_t m_promptlyFreedSize; | 819 size_t m_promptlyFreedSize; |
807 }; | 820 }; |
808 | 821 |
822 // Mask an address down to the enclosing oilpan heap base page. All oilpan heap | |
823 // pages are aligned at blinkPageBase plus an OS page size. | |
824 // FIXME: Remove PLATFORM_EXPORT once we get a proper public interface to our | |
825 // typed heaps. This is only exported to enable tests in HeapTest.cpp. | |
826 PLATFORM_EXPORT inline BaseHeapPage* pageFromObject(const void* object) | |
827 { | |
828 Address address = reinterpret_cast<Address>(const_cast<void*>(object)); | |
829 BaseHeapPage* page = reinterpret_cast<BaseHeapPage*>(blinkPageAddress(addres s) + WTF::kSystemPageSize); | |
830 ASSERT(page->contains(address)); | |
831 return page; | |
832 } | |
833 | |
809 class PLATFORM_EXPORT Heap { | 834 class PLATFORM_EXPORT Heap { |
810 public: | 835 public: |
811 static void init(); | 836 static void init(); |
812 static void shutdown(); | 837 static void shutdown(); |
813 static void doShutdown(); | 838 static void doShutdown(); |
814 | 839 |
815 #if ENABLE(ASSERT) | 840 #if ENABLE(ASSERT) |
816 static BaseHeapPage* findPageFromAddress(Address); | 841 static BaseHeapPage* findPageFromAddress(Address); |
817 static BaseHeapPage* findPageFromAddress(void* pointer) { return findPageFro mAddress(reinterpret_cast<Address>(pointer)); } | 842 static BaseHeapPage* findPageFromAddress(void* pointer) { return findPageFro mAddress(reinterpret_cast<Address>(pointer)); } |
818 static bool containedInHeapOrOrphanedPage(void*); | 843 static bool containedInHeapOrOrphanedPage(void*); |
819 #endif | 844 #endif |
820 | 845 |
846 // Is the finalizable GC object still alive? If no GC is in progress, | |
847 // it must be true. If a lazy sweep is in progress, it will be true if | |
848 // the object hasn't been swept yet and it is marked, or it has | |
849 // been swept and it is still alive. | |
850 // | |
851 // isFinalizedObjectAlive() must not be used with already-finalized object | |
852 // references. | |
853 // | |
854 template<typename T> | |
855 static bool isFinalizedObjectAlive(const T* objectPointer) | |
856 { | |
857 static_assert(IsGarbageCollectedType<T>::value, "only objects deriving f rom GarbageCollected can be used."); | |
858 #if ENABLE(OILPAN) | |
859 BaseHeapPage* page = pageFromObject(objectPointer); | |
860 if (!page->heap()->threadState()->isSweepingInProgress()) | |
haraken
2015/01/16 17:29:49
Do we need this check? I guess that the following
sof
2015/01/16 19:30:17
It depends on having all the page mark bits set up
| |
861 return true; | |
862 | |
863 if (page->hasBeenSwept()) | |
864 return true; | |
865 | |
866 return ObjectAliveTrait<T>::isHeapObjectAlive(s_markingVisitor, const_ca st<T*>(objectPointer)); | |
867 #else | |
868 // FIXME: remove when incremental sweeping is always on | |
haraken
2015/01/16 17:29:49
Nit: incremental => lazy
sof
2015/01/16 19:30:17
Done.
| |
869 // (cf. ThreadState::postGCProcessing()). | |
870 return true; | |
871 #endif | |
872 } | |
873 | |
821 // Push a trace callback on the marking stack. | 874 // Push a trace callback on the marking stack. |
822 static void pushTraceCallback(void* containerObject, TraceCallback); | 875 static void pushTraceCallback(void* containerObject, TraceCallback); |
823 | 876 |
824 // Push a trace callback on the post-marking callback stack. These | 877 // Push a trace callback on the post-marking callback stack. These |
825 // callbacks are called after normal marking (including ephemeron | 878 // callbacks are called after normal marking (including ephemeron |
826 // iteration). | 879 // iteration). |
827 static void pushPostMarkingCallback(void*, TraceCallback); | 880 static void pushPostMarkingCallback(void*, TraceCallback); |
828 | 881 |
829 // Add a weak pointer callback to the weak callback work list. General | 882 // Add a weak pointer callback to the weak callback work list. General |
830 // object pointer callbacks are added to a thread local weak callback work | 883 // object pointer callbacks are added to a thread local weak callback work |
(...skipping 352 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
1183 void* operator new(size_t, NotNullTag, void*) = delete; \ | 1236 void* operator new(size_t, NotNullTag, void*) = delete; \ |
1184 void* operator new(size_t, void*) = delete; | 1237 void* operator new(size_t, void*) = delete; |
1185 | 1238 |
1186 #define GC_PLUGIN_IGNORE(bug) \ | 1239 #define GC_PLUGIN_IGNORE(bug) \ |
1187 __attribute__((annotate("blink_gc_plugin_ignore"))) | 1240 __attribute__((annotate("blink_gc_plugin_ignore"))) |
1188 #else | 1241 #else |
1189 #define STACK_ALLOCATED() DISALLOW_ALLOCATION() | 1242 #define STACK_ALLOCATED() DISALLOW_ALLOCATION() |
1190 #define GC_PLUGIN_IGNORE(bug) | 1243 #define GC_PLUGIN_IGNORE(bug) |
1191 #endif | 1244 #endif |
1192 | 1245 |
1193 // Mask an address down to the enclosing oilpan heap base page. All oilpan heap | |
1194 // pages are aligned at blinkPageBase plus an OS page size. | |
1195 // FIXME: Remove PLATFORM_EXPORT once we get a proper public interface to our | |
1196 // typed heaps. This is only exported to enable tests in HeapTest.cpp. | |
1197 PLATFORM_EXPORT inline BaseHeapPage* pageFromObject(const void* object) | |
1198 { | |
1199 Address address = reinterpret_cast<Address>(const_cast<void*>(object)); | |
1200 BaseHeapPage* page = reinterpret_cast<BaseHeapPage*>(blinkPageAddress(addres s) + WTF::kSystemPageSize); | |
1201 ASSERT(page->contains(address)); | |
1202 return page; | |
1203 } | |
1204 | |
1205 NO_SANITIZE_ADDRESS inline | 1246 NO_SANITIZE_ADDRESS inline |
1206 size_t HeapObjectHeader::size() const | 1247 size_t HeapObjectHeader::size() const |
1207 { | 1248 { |
1208 size_t result = m_encoded & headerSizeMask; | 1249 size_t result = m_encoded & headerSizeMask; |
1209 // Large objects should not refer to header->size(). | 1250 // Large objects should not refer to header->size(). |
1210 // The actual size of a large object is stored in | 1251 // The actual size of a large object is stored in |
1211 // LargeObject::m_payloadSize. | 1252 // LargeObject::m_payloadSize. |
1212 ASSERT(result != largeObjectSizeInHeader); | 1253 ASSERT(result != largeObjectSizeInHeader); |
1213 ASSERT(!pageFromObject(this)->isLargeObject()); | 1254 ASSERT(!pageFromObject(this)->isLargeObject()); |
1214 return result; | 1255 return result; |
(...skipping 1181 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
2396 template<typename T, size_t inlineCapacity> | 2437 template<typename T, size_t inlineCapacity> |
2397 struct GCInfoTrait<HeapVector<T, inlineCapacity>> : public GCInfoTrait<Vector<T, inlineCapacity, HeapAllocator>> { }; | 2438 struct GCInfoTrait<HeapVector<T, inlineCapacity>> : public GCInfoTrait<Vector<T, inlineCapacity, HeapAllocator>> { }; |
2398 template<typename T, size_t inlineCapacity> | 2439 template<typename T, size_t inlineCapacity> |
2399 struct GCInfoTrait<HeapDeque<T, inlineCapacity>> : public GCInfoTrait<Deque<T, i nlineCapacity, HeapAllocator>> { }; | 2440 struct GCInfoTrait<HeapDeque<T, inlineCapacity>> : public GCInfoTrait<Deque<T, i nlineCapacity, HeapAllocator>> { }; |
2400 template<typename T, typename U, typename V> | 2441 template<typename T, typename U, typename V> |
2401 struct GCInfoTrait<HeapHashCountedSet<T, U, V>> : public GCInfoTrait<HashCounted Set<T, U, V, HeapAllocator>> { }; | 2442 struct GCInfoTrait<HeapHashCountedSet<T, U, V>> : public GCInfoTrait<HashCounted Set<T, U, V, HeapAllocator>> { }; |
2402 | 2443 |
2403 } // namespace blink | 2444 } // namespace blink |
2404 | 2445 |
2405 #endif // Heap_h | 2446 #endif // Heap_h |
OLD | NEW |