Chromium Code Reviews| OLD | NEW |
|---|---|
| 1 /* | 1 /* |
| 2 * Copyright (C) 2013 Google Inc. All rights reserved. | 2 * Copyright (C) 2013 Google Inc. All rights reserved. |
| 3 * | 3 * |
| 4 * Redistribution and use in source and binary forms, with or without | 4 * Redistribution and use in source and binary forms, with or without |
| 5 * modification, are permitted provided that the following conditions are | 5 * modification, are permitted provided that the following conditions are |
| 6 * met: | 6 * met: |
| 7 * | 7 * |
| 8 * * Redistributions of source code must retain the above copyright | 8 * * Redistributions of source code must retain the above copyright |
| 9 * notice, this list of conditions and the following disclaimer. | 9 * notice, this list of conditions and the following disclaimer. |
| 10 * * Redistributions in binary form must reproduce the above | 10 * * Redistributions in binary form must reproduce the above |
| (...skipping 383 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 394 virtual size_t size() = 0; | 394 virtual size_t size() = 0; |
| 395 virtual bool isLargeObject() { return false; } | 395 virtual bool isLargeObject() { return false; } |
| 396 | 396 |
| 397 Address address() { return reinterpret_cast<Address>(this); } | 397 Address address() { return reinterpret_cast<Address>(this); } |
| 398 PageMemory* storage() const { return m_storage; } | 398 PageMemory* storage() const { return m_storage; } |
| 399 ThreadHeap* heap() const { return m_heap; } | 399 ThreadHeap* heap() const { return m_heap; } |
| 400 bool orphaned() { return !m_heap; } | 400 bool orphaned() { return !m_heap; } |
| 401 bool terminating() { return m_terminating; } | 401 bool terminating() { return m_terminating; } |
| 402 void setTerminating() { m_terminating = true; } | 402 void setTerminating() { m_terminating = true; } |
| 403 | 403 |
| 404 // Returns true if this page has been swept by the ongoing lazy sweep. | |
| 405 bool hasBeenSwept() const { return m_swept; } | |
| 406 | |
| 407 void markAsSwept() | |
| 408 { | |
| 409 ASSERT(!m_swept); | |
| 410 m_swept = true; | |
| 411 } | |
| 412 | |
| 413 void markAsUnswept() | |
| 414 { | |
| 415 ASSERT(m_swept); | |
| 416 m_swept = false; | |
| 417 } | |
| 418 | |
| 404 private: | 419 private: |
| 405 PageMemory* m_storage; | 420 PageMemory* m_storage; |
| 406 ThreadHeap* m_heap; | 421 ThreadHeap* m_heap; |
| 407 // Whether the page is part of a terminating thread or not. | 422 // Whether the page is part of a terminating thread or not. |
| 408 bool m_terminating; | 423 bool m_terminating; |
| 424 | |
| 425 // Track the sweeping state of a page. Set to true once | |
| 426 // the lazy sweep completes has processed it. | |
| 427 // | |
| 428 // Set to false at the start of a sweep, true upon completion | |
| 429 // of lazy sweeping. | |
| 430 bool m_swept; | |
| 409 }; | 431 }; |
| 410 | 432 |
| 411 class HeapPage final : public BaseHeapPage { | 433 class HeapPage final : public BaseHeapPage { |
| 412 public: | 434 public: |
| 413 HeapPage(PageMemory*, ThreadHeap*); | 435 HeapPage(PageMemory*, ThreadHeap*); |
| 414 | 436 |
| 415 Address payload() | 437 Address payload() |
| 416 { | 438 { |
| 417 return address() + sizeof(HeapPage) + headerPadding(); | 439 return address() + sizeof(HeapPage) + headerPadding(); |
| 418 } | 440 } |
| (...skipping 357 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 776 void allocatePage(); | 798 void allocatePage(); |
| 777 Address allocateLargeObject(size_t, size_t gcInfoIndex); | 799 Address allocateLargeObject(size_t, size_t gcInfoIndex); |
| 778 | 800 |
| 779 inline Address allocateObject(size_t allocationSize, size_t gcInfoIndex); | 801 inline Address allocateObject(size_t allocationSize, size_t gcInfoIndex); |
| 780 | 802 |
| 781 #if ENABLE(ASSERT) | 803 #if ENABLE(ASSERT) |
| 782 bool pagesToBeSweptContains(Address); | 804 bool pagesToBeSweptContains(Address); |
| 783 #endif | 805 #endif |
| 784 | 806 |
| 785 bool coalesce(); | 807 bool coalesce(); |
| 786 void markUnmarkedObjectsDead(); | 808 void preparePagesForSweeping(); |
| 787 | 809 |
| 788 Address m_currentAllocationPoint; | 810 Address m_currentAllocationPoint; |
| 789 size_t m_remainingAllocationSize; | 811 size_t m_remainingAllocationSize; |
| 790 size_t m_lastRemainingAllocationSize; | 812 size_t m_lastRemainingAllocationSize; |
| 791 | 813 |
| 792 HeapPage* m_firstPage; | 814 HeapPage* m_firstPage; |
| 793 LargeObject* m_firstLargeObject; | 815 LargeObject* m_firstLargeObject; |
| 794 HeapPage* m_firstUnsweptPage; | 816 HeapPage* m_firstUnsweptPage; |
| 795 LargeObject* m_firstUnsweptLargeObject; | 817 LargeObject* m_firstUnsweptLargeObject; |
| 796 | 818 |
| 797 ThreadState* m_threadState; | 819 ThreadState* m_threadState; |
| 798 | 820 |
| 799 FreeList m_freeList; | 821 FreeList m_freeList; |
| 800 | 822 |
| 801 // Index into the page pools. This is used to ensure that the pages of the | 823 // Index into the page pools. This is used to ensure that the pages of the |
| 802 // same type go into the correct page pool and thus avoid type confusion. | 824 // same type go into the correct page pool and thus avoid type confusion. |
| 803 int m_index; | 825 int m_index; |
| 804 | 826 |
| 805 // The size of promptly freed objects in the heap. | 827 // The size of promptly freed objects in the heap. |
| 806 size_t m_promptlyFreedSize; | 828 size_t m_promptlyFreedSize; |
| 807 }; | 829 }; |
| 808 | 830 |
| 831 // Mask an address down to the enclosing oilpan heap base page. All oilpan heap | |
| 832 // pages are aligned at blinkPageBase plus an OS page size. | |
| 833 // FIXME: Remove PLATFORM_EXPORT once we get a proper public interface to our | |
| 834 // typed heaps. This is only exported to enable tests in HeapTest.cpp. | |
| 835 PLATFORM_EXPORT inline BaseHeapPage* pageFromObject(const void* object) | |
| 836 { | |
| 837 Address address = reinterpret_cast<Address>(const_cast<void*>(object)); | |
| 838 BaseHeapPage* page = reinterpret_cast<BaseHeapPage*>(blinkPageAddress(addres s) + WTF::kSystemPageSize); | |
| 839 ASSERT(page->contains(address)); | |
| 840 return page; | |
| 841 } | |
| 842 | |
| 809 class PLATFORM_EXPORT Heap { | 843 class PLATFORM_EXPORT Heap { |
| 810 public: | 844 public: |
| 811 static void init(); | 845 static void init(); |
| 812 static void shutdown(); | 846 static void shutdown(); |
| 813 static void doShutdown(); | 847 static void doShutdown(); |
| 814 | 848 |
| 815 #if ENABLE(ASSERT) | 849 #if ENABLE(ASSERT) |
| 816 static BaseHeapPage* findPageFromAddress(Address); | 850 static BaseHeapPage* findPageFromAddress(Address); |
| 817 static BaseHeapPage* findPageFromAddress(void* pointer) { return findPageFro mAddress(reinterpret_cast<Address>(pointer)); } | 851 static BaseHeapPage* findPageFromAddress(void* pointer) { return findPageFro mAddress(reinterpret_cast<Address>(pointer)); } |
| 818 static bool containedInHeapOrOrphanedPage(void*); | 852 static bool containedInHeapOrOrphanedPage(void*); |
| 819 #endif | 853 #endif |
| 820 | 854 |
| 855 // Is the finalizable GC object still alive? If no GC is in progress, | |
| 856 // it must be true. If a lazy sweep is in progress, it will be true if | |
| 857 // the object hasn't been swept yet and it is marked, or it has | |
| 858 // been swept and it is still alive. | |
| 859 // | |
| 860 // isFinalizedObjectAlive() must not be used with already-finalized object | |
| 861 // references. | |
| 862 // | |
|
haraken
2015/01/19 05:35:49
A couple of suggestions to clean up the method and
| |
| 863 template<typename T> | |
| 864 static bool isFinalizedObjectAlive(const T* objectPointer) | |
| 865 { | |
| 866 static_assert(IsGarbageCollectedType<T>::value, "only objects deriving f rom GarbageCollected can be used."); | |
| 867 #if ENABLE(OILPAN) | |
| 868 BaseHeapPage* page = pageFromObject(objectPointer); | |
| 869 if (page->hasBeenSwept()) | |
| 870 return true; | |
| 871 ASSERT(page->heap()->threadState()->isSweepingInProgress()); | |
| 872 | |
| 873 return ObjectAliveTrait<T>::isHeapObjectAlive(s_markingVisitor, const_ca st<T*>(objectPointer)); | |
| 874 #else | |
| 875 // FIXME: remove when lazy sweeping is always on | |
| 876 // (cf. ThreadState::postGCProcessing()). | |
| 877 return true; | |
| 878 #endif | |
| 879 } | |
| 880 | |
| 821 // Push a trace callback on the marking stack. | 881 // Push a trace callback on the marking stack. |
| 822 static void pushTraceCallback(void* containerObject, TraceCallback); | 882 static void pushTraceCallback(void* containerObject, TraceCallback); |
| 823 | 883 |
| 824 // Push a trace callback on the post-marking callback stack. These | 884 // Push a trace callback on the post-marking callback stack. These |
| 825 // callbacks are called after normal marking (including ephemeron | 885 // callbacks are called after normal marking (including ephemeron |
| 826 // iteration). | 886 // iteration). |
| 827 static void pushPostMarkingCallback(void*, TraceCallback); | 887 static void pushPostMarkingCallback(void*, TraceCallback); |
| 828 | 888 |
| 829 // Add a weak pointer callback to the weak callback work list. General | 889 // Add a weak pointer callback to the weak callback work list. General |
| 830 // object pointer callbacks are added to a thread local weak callback work | 890 // object pointer callbacks are added to a thread local weak callback work |
| (...skipping 352 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 1183 void* operator new(size_t, NotNullTag, void*) = delete; \ | 1243 void* operator new(size_t, NotNullTag, void*) = delete; \ |
| 1184 void* operator new(size_t, void*) = delete; | 1244 void* operator new(size_t, void*) = delete; |
| 1185 | 1245 |
| 1186 #define GC_PLUGIN_IGNORE(bug) \ | 1246 #define GC_PLUGIN_IGNORE(bug) \ |
| 1187 __attribute__((annotate("blink_gc_plugin_ignore"))) | 1247 __attribute__((annotate("blink_gc_plugin_ignore"))) |
| 1188 #else | 1248 #else |
| 1189 #define STACK_ALLOCATED() DISALLOW_ALLOCATION() | 1249 #define STACK_ALLOCATED() DISALLOW_ALLOCATION() |
| 1190 #define GC_PLUGIN_IGNORE(bug) | 1250 #define GC_PLUGIN_IGNORE(bug) |
| 1191 #endif | 1251 #endif |
| 1192 | 1252 |
| 1193 // Mask an address down to the enclosing oilpan heap base page. All oilpan heap | |
| 1194 // pages are aligned at blinkPageBase plus an OS page size. | |
| 1195 // FIXME: Remove PLATFORM_EXPORT once we get a proper public interface to our | |
| 1196 // typed heaps. This is only exported to enable tests in HeapTest.cpp. | |
| 1197 PLATFORM_EXPORT inline BaseHeapPage* pageFromObject(const void* object) | |
| 1198 { | |
| 1199 Address address = reinterpret_cast<Address>(const_cast<void*>(object)); | |
| 1200 BaseHeapPage* page = reinterpret_cast<BaseHeapPage*>(blinkPageAddress(addres s) + WTF::kSystemPageSize); | |
| 1201 ASSERT(page->contains(address)); | |
| 1202 return page; | |
| 1203 } | |
| 1204 | |
| 1205 NO_SANITIZE_ADDRESS inline | 1253 NO_SANITIZE_ADDRESS inline |
| 1206 size_t HeapObjectHeader::size() const | 1254 size_t HeapObjectHeader::size() const |
| 1207 { | 1255 { |
| 1208 size_t result = m_encoded & headerSizeMask; | 1256 size_t result = m_encoded & headerSizeMask; |
| 1209 // Large objects should not refer to header->size(). | 1257 // Large objects should not refer to header->size(). |
| 1210 // The actual size of a large object is stored in | 1258 // The actual size of a large object is stored in |
| 1211 // LargeObject::m_payloadSize. | 1259 // LargeObject::m_payloadSize. |
| 1212 ASSERT(result != largeObjectSizeInHeader); | 1260 ASSERT(result != largeObjectSizeInHeader); |
| 1213 ASSERT(!pageFromObject(this)->isLargeObject()); | 1261 ASSERT(!pageFromObject(this)->isLargeObject()); |
| 1214 return result; | 1262 return result; |
| (...skipping 1181 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 2396 template<typename T, size_t inlineCapacity> | 2444 template<typename T, size_t inlineCapacity> |
| 2397 struct GCInfoTrait<HeapVector<T, inlineCapacity>> : public GCInfoTrait<Vector<T, inlineCapacity, HeapAllocator>> { }; | 2445 struct GCInfoTrait<HeapVector<T, inlineCapacity>> : public GCInfoTrait<Vector<T, inlineCapacity, HeapAllocator>> { }; |
| 2398 template<typename T, size_t inlineCapacity> | 2446 template<typename T, size_t inlineCapacity> |
| 2399 struct GCInfoTrait<HeapDeque<T, inlineCapacity>> : public GCInfoTrait<Deque<T, i nlineCapacity, HeapAllocator>> { }; | 2447 struct GCInfoTrait<HeapDeque<T, inlineCapacity>> : public GCInfoTrait<Deque<T, i nlineCapacity, HeapAllocator>> { }; |
| 2400 template<typename T, typename U, typename V> | 2448 template<typename T, typename U, typename V> |
| 2401 struct GCInfoTrait<HeapHashCountedSet<T, U, V>> : public GCInfoTrait<HashCounted Set<T, U, V, HeapAllocator>> { }; | 2449 struct GCInfoTrait<HeapHashCountedSet<T, U, V>> : public GCInfoTrait<HashCounted Set<T, U, V, HeapAllocator>> { }; |
| 2402 | 2450 |
| 2403 } // namespace blink | 2451 } // namespace blink |
| 2404 | 2452 |
| 2405 #endif // Heap_h | 2453 #endif // Heap_h |
| OLD | NEW |