OLD | NEW |
1 /* | 1 /* |
2 * Copyright (C) 2013 Google Inc. All rights reserved. | 2 * Copyright (C) 2013 Google Inc. All rights reserved. |
3 * | 3 * |
4 * Redistribution and use in source and binary forms, with or without | 4 * Redistribution and use in source and binary forms, with or without |
5 * modification, are permitted provided that the following conditions are | 5 * modification, are permitted provided that the following conditions are |
6 * met: | 6 * met: |
7 * | 7 * |
8 * * Redistributions of source code must retain the above copyright | 8 * * Redistributions of source code must retain the above copyright |
9 * notice, this list of conditions and the following disclaimer. | 9 * notice, this list of conditions and the following disclaimer. |
10 * * Redistributions in binary form must reproduce the above | 10 * * Redistributions in binary form must reproduce the above |
(...skipping 113 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
124 { | 124 { |
125 return (allocationGranularity - (sizeof(Header) % allocationGranularity)) %
allocationGranularity; | 125 return (allocationGranularity - (sizeof(Header) % allocationGranularity)) %
allocationGranularity; |
126 } | 126 } |
127 | 127 |
128 // Masks an address down to the enclosing blink page base address. | 128 // Masks an address down to the enclosing blink page base address. |
129 inline Address blinkPageAddress(Address address) | 129 inline Address blinkPageAddress(Address address) |
130 { | 130 { |
131 return reinterpret_cast<Address>(reinterpret_cast<uintptr_t>(address) & blin
kPageBaseMask); | 131 return reinterpret_cast<Address>(reinterpret_cast<uintptr_t>(address) & blin
kPageBaseMask); |
132 } | 132 } |
133 | 133 |
134 #ifndef NDEBUG | 134 #if ENABLE(ASSERT) |
135 | 135 |
136 // Sanity check for a page header address: the address of the page | 136 // Sanity check for a page header address: the address of the page |
137 // header should be OS page size away from being Blink page size | 137 // header should be OS page size away from being Blink page size |
138 // aligned. | 138 // aligned. |
139 inline bool isPageHeaderAddress(Address address) | 139 inline bool isPageHeaderAddress(Address address) |
140 { | 140 { |
141 return !((reinterpret_cast<uintptr_t>(address) & blinkPageOffsetMask) - osPa
geSize()); | 141 return !((reinterpret_cast<uintptr_t>(address) & blinkPageOffsetMask) - osPa
geSize()); |
142 } | 142 } |
143 #endif | 143 #endif |
144 | 144 |
(...skipping 131 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
276 // | 276 // |
277 // Object memory layout: | 277 // Object memory layout: |
278 // [ LargeObjectHeader | ] [ FinalizedObjectHeader | ] HeapObjectHeader | payloa
d | 278 // [ LargeObjectHeader | ] [ FinalizedObjectHeader | ] HeapObjectHeader | payloa
d |
279 // The [ ] notation denotes that the LargeObjectHeader and the FinalizedObjectHe
ader | 279 // The [ ] notation denotes that the LargeObjectHeader and the FinalizedObjectHe
ader |
280 // are independently optional. | 280 // are independently optional. |
281 class PLATFORM_EXPORT HeapObjectHeader : public BasicObjectHeader { | 281 class PLATFORM_EXPORT HeapObjectHeader : public BasicObjectHeader { |
282 public: | 282 public: |
283 NO_SANITIZE_ADDRESS | 283 NO_SANITIZE_ADDRESS |
284 explicit HeapObjectHeader(size_t encodedSize) | 284 explicit HeapObjectHeader(size_t encodedSize) |
285 : BasicObjectHeader(encodedSize) | 285 : BasicObjectHeader(encodedSize) |
286 #ifndef NDEBUG | 286 #if ENABLE(ASSERT) |
287 , m_magic(magic) | 287 , m_magic(magic) |
288 #endif | 288 #endif |
289 { } | 289 { } |
290 | 290 |
291 NO_SANITIZE_ADDRESS | 291 NO_SANITIZE_ADDRESS |
292 HeapObjectHeader(size_t encodedSize, const GCInfo*) | 292 HeapObjectHeader(size_t encodedSize, const GCInfo*) |
293 : BasicObjectHeader(encodedSize) | 293 : BasicObjectHeader(encodedSize) |
294 #ifndef NDEBUG | 294 #if ENABLE(ASSERT) |
295 , m_magic(magic) | 295 , m_magic(magic) |
296 #endif | 296 #endif |
297 { } | 297 { } |
298 | 298 |
299 inline void checkHeader() const; | 299 inline void checkHeader() const; |
300 inline bool isMarked() const; | 300 inline bool isMarked() const; |
301 | 301 |
302 inline void mark(); | 302 inline void mark(); |
303 inline void unmark(); | 303 inline void unmark(); |
304 | 304 |
(...skipping 15 matching lines...) Expand all Loading... |
320 static void finalize(const GCInfo*, Address, size_t); | 320 static void finalize(const GCInfo*, Address, size_t); |
321 static HeapObjectHeader* fromPayload(const void*); | 321 static HeapObjectHeader* fromPayload(const void*); |
322 | 322 |
323 static const intptr_t magic = 0xc0de247; | 323 static const intptr_t magic = 0xc0de247; |
324 static const intptr_t zappedMagic = 0xC0DEdead; | 324 static const intptr_t zappedMagic = 0xC0DEdead; |
325 // The zap value for vtables should be < 4K to ensure it cannot be | 325 // The zap value for vtables should be < 4K to ensure it cannot be |
326 // used for dispatch. | 326 // used for dispatch. |
327 static const intptr_t zappedVTable = 0xd0d; | 327 static const intptr_t zappedVTable = 0xd0d; |
328 | 328 |
329 private: | 329 private: |
330 #ifndef NDEBUG | 330 #if ENABLE(ASSERT) |
331 intptr_t m_magic; | 331 intptr_t m_magic; |
332 #endif | 332 #endif |
333 }; | 333 }; |
334 | 334 |
335 const size_t objectHeaderSize = sizeof(HeapObjectHeader); | 335 const size_t objectHeaderSize = sizeof(HeapObjectHeader); |
336 | 336 |
337 // Each object on the GeneralHeap needs to carry a pointer to its | 337 // Each object on the GeneralHeap needs to carry a pointer to its |
338 // own GCInfo structure for tracing and potential finalization. | 338 // own GCInfo structure for tracing and potential finalization. |
339 class PLATFORM_EXPORT FinalizedHeapObjectHeader : public HeapObjectHeader { | 339 class PLATFORM_EXPORT FinalizedHeapObjectHeader : public HeapObjectHeader { |
340 public: | 340 public: |
(...skipping 29 matching lines...) Expand all Loading... |
370 | 370 |
371 const size_t finalizedHeaderSize = sizeof(FinalizedHeapObjectHeader); | 371 const size_t finalizedHeaderSize = sizeof(FinalizedHeapObjectHeader); |
372 | 372 |
373 class FreeListEntry : public HeapObjectHeader { | 373 class FreeListEntry : public HeapObjectHeader { |
374 public: | 374 public: |
375 NO_SANITIZE_ADDRESS | 375 NO_SANITIZE_ADDRESS |
376 explicit FreeListEntry(size_t size) | 376 explicit FreeListEntry(size_t size) |
377 : HeapObjectHeader(freeListEncodedSize(size)) | 377 : HeapObjectHeader(freeListEncodedSize(size)) |
378 , m_next(0) | 378 , m_next(0) |
379 { | 379 { |
380 #if !defined(NDEBUG) && !defined(ADDRESS_SANITIZER) | 380 #if ENABLE(ASSERT) && !defined(ADDRESS_SANITIZER) |
381 // Zap free area with asterisks, aka 0x2a2a2a2a. | 381 // Zap free area with asterisks, aka 0x2a2a2a2a. |
382 // For ASan don't zap since we keep accounting in the freelist entry. | 382 // For ASan don't zap since we keep accounting in the freelist entry. |
383 for (size_t i = sizeof(*this); i < size; i++) | 383 for (size_t i = sizeof(*this); i < size; i++) |
384 reinterpret_cast<Address>(this)[i] = freelistZapValue; | 384 reinterpret_cast<Address>(this)[i] = freelistZapValue; |
385 ASSERT(size >= objectHeaderSize); | 385 ASSERT(size >= objectHeaderSize); |
386 zapMagic(); | 386 zapMagic(); |
387 #endif | 387 #endif |
388 } | 388 } |
389 | 389 |
390 Address address() { return reinterpret_cast<Address>(this); } | 390 Address address() { return reinterpret_cast<Address>(this); } |
(...skipping 316 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
707 | 707 |
708 private: | 708 private: |
709 Mutex m_mutex[NumberOfHeaps]; | 709 Mutex m_mutex[NumberOfHeaps]; |
710 }; | 710 }; |
711 | 711 |
712 class OrphanedPagePool : public PagePool<BaseHeapPage> { | 712 class OrphanedPagePool : public PagePool<BaseHeapPage> { |
713 public: | 713 public: |
714 ~OrphanedPagePool(); | 714 ~OrphanedPagePool(); |
715 void addOrphanedPage(int, BaseHeapPage*); | 715 void addOrphanedPage(int, BaseHeapPage*); |
716 void decommitOrphanedPages(); | 716 void decommitOrphanedPages(); |
717 #ifndef NDEBUG | 717 #if ENABLE(ASSERT) |
718 bool contains(void*); | 718 bool contains(void*); |
719 #endif | 719 #endif |
720 private: | 720 private: |
721 void clearMemory(PageMemory*); | 721 void clearMemory(PageMemory*); |
722 }; | 722 }; |
723 | 723 |
724 // The CallbackStack contains all the visitor callbacks used to trace and mark | 724 // The CallbackStack contains all the visitor callbacks used to trace and mark |
725 // objects. A specific CallbackStack instance contains at most bufferSize elemen
ts. | 725 // objects. A specific CallbackStack instance contains at most bufferSize elemen
ts. |
726 // If more space is needed a new CallbackStack instance is created and chained | 726 // If more space is needed a new CallbackStack instance is created and chained |
727 // together with the former instance. I.e. a logical CallbackStack can be made o
f | 727 // together with the former instance. I.e. a logical CallbackStack can be made o
f |
728 // multiple chained CallbackStack object instances. | 728 // multiple chained CallbackStack object instances. |
729 // There are two logical callback stacks. One containing all the marking callbac
ks and | 729 // There are two logical callback stacks. One containing all the marking callbac
ks and |
730 // one containing the weak pointer callbacks. | 730 // one containing the weak pointer callbacks. |
731 class CallbackStack { | 731 class CallbackStack { |
732 public: | 732 public: |
733 CallbackStack(CallbackStack** first) | 733 CallbackStack(CallbackStack** first) |
734 : m_limit(&(m_buffer[bufferSize])) | 734 : m_limit(&(m_buffer[bufferSize])) |
735 , m_current(&(m_buffer[0])) | 735 , m_current(&(m_buffer[0])) |
736 , m_next(*first) | 736 , m_next(*first) |
737 { | 737 { |
738 #ifndef NDEBUG | 738 #if ENABLE(ASSERT) |
739 clearUnused(); | 739 clearUnused(); |
740 #endif | 740 #endif |
741 *first = this; | 741 *first = this; |
742 } | 742 } |
743 | 743 |
744 ~CallbackStack(); | 744 ~CallbackStack(); |
745 void clearUnused(); | 745 void clearUnused(); |
746 | 746 |
747 bool isEmpty(); | 747 bool isEmpty(); |
748 | 748 |
(...skipping 25 matching lines...) Expand all Loading... |
774 template<CallbackInvocationMode Mode> bool popAndInvokeCallback(CallbackStac
k** first, Visitor*); | 774 template<CallbackInvocationMode Mode> bool popAndInvokeCallback(CallbackStac
k** first, Visitor*); |
775 static void invokeCallbacks(CallbackStack** first, Visitor*); | 775 static void invokeCallbacks(CallbackStack** first, Visitor*); |
776 | 776 |
777 Item* allocateEntry(CallbackStack** first) | 777 Item* allocateEntry(CallbackStack** first) |
778 { | 778 { |
779 if (m_current < m_limit) | 779 if (m_current < m_limit) |
780 return m_current++; | 780 return m_current++; |
781 return (new CallbackStack(first))->allocateEntry(first); | 781 return (new CallbackStack(first))->allocateEntry(first); |
782 } | 782 } |
783 | 783 |
784 #ifndef NDEBUG | 784 #if ENABLE(ASSERT) |
785 bool hasCallbackForObject(const void*); | 785 bool hasCallbackForObject(const void*); |
786 #endif | 786 #endif |
787 | 787 |
788 private: | 788 private: |
789 void invokeOldestCallbacks(Visitor*); | 789 void invokeOldestCallbacks(Visitor*); |
790 | 790 |
791 static const size_t bufferSize = 8000; | 791 static const size_t bufferSize = 8000; |
792 Item m_buffer[bufferSize]; | 792 Item m_buffer[bufferSize]; |
793 Item* m_limit; | 793 Item* m_limit; |
794 Item* m_current; | 794 Item* m_current; |
(...skipping 14 matching lines...) Expand all Loading... |
809 #if ENABLE(GC_TRACING) | 809 #if ENABLE(GC_TRACING) |
810 virtual const GCInfo* findGCInfoOfLargeHeapObject(Address) = 0; | 810 virtual const GCInfo* findGCInfoOfLargeHeapObject(Address) = 0; |
811 #endif | 811 #endif |
812 | 812 |
813 // Sweep this part of the Blink heap. This finalizes dead objects | 813 // Sweep this part of the Blink heap. This finalizes dead objects |
814 // and builds freelists for all the unused memory. | 814 // and builds freelists for all the unused memory. |
815 virtual void sweep() = 0; | 815 virtual void sweep() = 0; |
816 | 816 |
817 virtual void clearFreeLists() = 0; | 817 virtual void clearFreeLists() = 0; |
818 virtual void clearLiveAndMarkDead() = 0; | 818 virtual void clearLiveAndMarkDead() = 0; |
819 #ifndef NDEBUG | 819 #if ENABLE(ASSERT) |
820 virtual void getScannedStats(HeapStats&) = 0; | 820 virtual void getScannedStats(HeapStats&) = 0; |
821 #endif | 821 #endif |
822 | 822 |
823 virtual void makeConsistentForGC() = 0; | 823 virtual void makeConsistentForGC() = 0; |
824 virtual bool isConsistentForGC() = 0; | 824 virtual bool isConsistentForGC() = 0; |
825 | 825 |
826 virtual void prepareHeapForTermination() = 0; | 826 virtual void prepareHeapForTermination() = 0; |
827 | 827 |
828 // Returns a bucket number for inserting a FreeListEntry of a | 828 // Returns a bucket number for inserting a FreeListEntry of a |
829 // given size. All FreeListEntries in the given bucket, n, have | 829 // given size. All FreeListEntries in the given bucket, n, have |
(...skipping 18 matching lines...) Expand all Loading... |
848 virtual ~ThreadHeap(); | 848 virtual ~ThreadHeap(); |
849 virtual void cleanupPages(); | 849 virtual void cleanupPages(); |
850 | 850 |
851 virtual BaseHeapPage* heapPageFromAddress(Address); | 851 virtual BaseHeapPage* heapPageFromAddress(Address); |
852 #if ENABLE(GC_TRACING) | 852 #if ENABLE(GC_TRACING) |
853 virtual const GCInfo* findGCInfoOfLargeHeapObject(Address); | 853 virtual const GCInfo* findGCInfoOfLargeHeapObject(Address); |
854 #endif | 854 #endif |
855 virtual void sweep(); | 855 virtual void sweep(); |
856 virtual void clearFreeLists(); | 856 virtual void clearFreeLists(); |
857 virtual void clearLiveAndMarkDead(); | 857 virtual void clearLiveAndMarkDead(); |
858 #ifndef NDEBUG | 858 #if ENABLE(ASSERT) |
859 virtual void getScannedStats(HeapStats&); | 859 virtual void getScannedStats(HeapStats&); |
860 #endif | 860 #endif |
861 | 861 |
862 virtual void makeConsistentForGC(); | 862 virtual void makeConsistentForGC(); |
863 virtual bool isConsistentForGC(); | 863 virtual bool isConsistentForGC(); |
864 | 864 |
865 ThreadState* threadState() { return m_threadState; } | 865 ThreadState* threadState() { return m_threadState; } |
866 HeapStats& stats() { return m_threadState->stats(); } | 866 HeapStats& stats() { return m_threadState->stats(); } |
867 void flushHeapContainsCache() | 867 void flushHeapContainsCache() |
868 { | 868 { |
(...skipping 50 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
919 | 919 |
920 class PLATFORM_EXPORT Heap { | 920 class PLATFORM_EXPORT Heap { |
921 public: | 921 public: |
922 static void init(); | 922 static void init(); |
923 static void shutdown(); | 923 static void shutdown(); |
924 static void doShutdown(); | 924 static void doShutdown(); |
925 | 925 |
926 static BaseHeapPage* contains(Address); | 926 static BaseHeapPage* contains(Address); |
927 static BaseHeapPage* contains(void* pointer) { return contains(reinterpret_c
ast<Address>(pointer)); } | 927 static BaseHeapPage* contains(void* pointer) { return contains(reinterpret_c
ast<Address>(pointer)); } |
928 static BaseHeapPage* contains(const void* pointer) { return contains(const_c
ast<void*>(pointer)); } | 928 static BaseHeapPage* contains(const void* pointer) { return contains(const_c
ast<void*>(pointer)); } |
929 #ifndef NDEBUG | 929 #if ENABLE(ASSERT) |
930 static bool containedInHeapOrOrphanedPage(void*); | 930 static bool containedInHeapOrOrphanedPage(void*); |
931 #endif | 931 #endif |
932 | 932 |
933 // Push a trace callback on the marking stack. | 933 // Push a trace callback on the marking stack. |
934 static void pushTraceCallback(void* containerObject, TraceCallback); | 934 static void pushTraceCallback(void* containerObject, TraceCallback); |
935 | 935 |
936 // Add a weak pointer callback to the weak callback work list. General | 936 // Add a weak pointer callback to the weak callback work list. General |
937 // object pointer callbacks are added to a thread local weak callback work | 937 // object pointer callbacks are added to a thread local weak callback work |
938 // list and the callback is called on the thread that owns the object, with | 938 // list and the callback is called on the thread that owns the object, with |
939 // the closure pointer as an argument. Most of the time, the closure and | 939 // the closure pointer as an argument. Most of the time, the closure and |
(...skipping 12 matching lines...) Expand all Loading... |
952 // and the object. Returns false when there is nothing more to do. | 952 // and the object. Returns false when there is nothing more to do. |
953 template<CallbackInvocationMode Mode> static bool popAndInvokeTraceCallback(
Visitor*); | 953 template<CallbackInvocationMode Mode> static bool popAndInvokeTraceCallback(
Visitor*); |
954 | 954 |
955 // Remove an item from the weak callback work list and call the callback | 955 // Remove an item from the weak callback work list and call the callback |
956 // with the visitor and the closure pointer. Returns false when there is | 956 // with the visitor and the closure pointer. Returns false when there is |
957 // nothing more to do. | 957 // nothing more to do. |
958 static bool popAndInvokeWeakPointerCallback(Visitor*); | 958 static bool popAndInvokeWeakPointerCallback(Visitor*); |
959 | 959 |
960 // Register an ephemeron table for fixed-point iteration. | 960 // Register an ephemeron table for fixed-point iteration. |
961 static void registerWeakTable(void* containerObject, EphemeronCallback, Ephe
meronCallback); | 961 static void registerWeakTable(void* containerObject, EphemeronCallback, Ephe
meronCallback); |
962 #ifndef NDEBUG | 962 #if ENABLE(ASSERT) |
963 static bool weakTableRegistered(const void*); | 963 static bool weakTableRegistered(const void*); |
964 #endif | 964 #endif |
965 | 965 |
966 template<typename T> static Address allocate(size_t); | 966 template<typename T> static Address allocate(size_t); |
967 template<typename T> static Address reallocate(void* previous, size_t); | 967 template<typename T> static Address reallocate(void* previous, size_t); |
968 | 968 |
969 static void collectGarbage(ThreadState::StackState); | 969 static void collectGarbage(ThreadState::StackState); |
970 static void collectGarbageForTerminatingThread(ThreadState*); | 970 static void collectGarbageForTerminatingThread(ThreadState*); |
971 static void collectAllGarbage(); | 971 static void collectAllGarbage(); |
972 template<CallbackInvocationMode Mode> static void traceRootsAndPerformGlobal
WeakProcessing(); | 972 template<CallbackInvocationMode Mode> static void traceRootsAndPerformGlobal
WeakProcessing(); |
(...skipping 347 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1320 #define GC_PLUGIN_IGNORE(bug) \ | 1320 #define GC_PLUGIN_IGNORE(bug) \ |
1321 __attribute__((annotate("blink_gc_plugin_ignore"))) | 1321 __attribute__((annotate("blink_gc_plugin_ignore"))) |
1322 #else | 1322 #else |
1323 #define STACK_ALLOCATED() DISALLOW_ALLOCATION() | 1323 #define STACK_ALLOCATED() DISALLOW_ALLOCATION() |
1324 #define GC_PLUGIN_IGNORE(bug) | 1324 #define GC_PLUGIN_IGNORE(bug) |
1325 #endif | 1325 #endif |
1326 | 1326 |
1327 NO_SANITIZE_ADDRESS | 1327 NO_SANITIZE_ADDRESS |
1328 void HeapObjectHeader::checkHeader() const | 1328 void HeapObjectHeader::checkHeader() const |
1329 { | 1329 { |
1330 #ifndef NDEBUG | 1330 #if ENABLE(ASSERT) |
1331 BaseHeapPage* page = pageHeaderFromObject(this); | 1331 BaseHeapPage* page = pageHeaderFromObject(this); |
1332 ASSERT(page->orphaned() || m_magic == magic); | 1332 ASSERT(page->orphaned() || m_magic == magic); |
1333 #endif | 1333 #endif |
1334 } | 1334 } |
1335 | 1335 |
1336 Address HeapObjectHeader::payload() | 1336 Address HeapObjectHeader::payload() |
1337 { | 1337 { |
1338 return reinterpret_cast<Address>(this) + objectHeaderSize; | 1338 return reinterpret_cast<Address>(this) + objectHeaderSize; |
1339 } | 1339 } |
1340 | 1340 |
(...skipping 52 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1393 Address headerAddress = m_currentAllocationPoint; | 1393 Address headerAddress = m_currentAllocationPoint; |
1394 m_currentAllocationPoint += allocationSize; | 1394 m_currentAllocationPoint += allocationSize; |
1395 m_remainingAllocationSize -= allocationSize; | 1395 m_remainingAllocationSize -= allocationSize; |
1396 Header* header = new (NotNull, headerAddress) Header(allocationSize, gcInfo)
; | 1396 Header* header = new (NotNull, headerAddress) Header(allocationSize, gcInfo)
; |
1397 size_t payloadSize = allocationSize - sizeof(Header); | 1397 size_t payloadSize = allocationSize - sizeof(Header); |
1398 stats().increaseObjectSpace(payloadSize); | 1398 stats().increaseObjectSpace(payloadSize); |
1399 Address result = headerAddress + sizeof(*header); | 1399 Address result = headerAddress + sizeof(*header); |
1400 ASSERT(!(reinterpret_cast<uintptr_t>(result) & allocationMask)); | 1400 ASSERT(!(reinterpret_cast<uintptr_t>(result) & allocationMask)); |
1401 // Unpoison the memory used for the object (payload). | 1401 // Unpoison the memory used for the object (payload). |
1402 ASAN_UNPOISON_MEMORY_REGION(result, payloadSize); | 1402 ASAN_UNPOISON_MEMORY_REGION(result, payloadSize); |
1403 #if !defined(NDEBUG) || defined(LEAK_SANITIZER) || defined(ADDRESS_SANITIZER) | 1403 #if ENABLE(ASSERT) || defined(LEAK_SANITIZER) || defined(ADDRESS_SANITIZER) |
1404 memset(result, 0, payloadSize); | 1404 memset(result, 0, payloadSize); |
1405 #endif | 1405 #endif |
1406 ASSERT(heapPageFromAddress(headerAddress + allocationSize - 1)); | 1406 ASSERT(heapPageFromAddress(headerAddress + allocationSize - 1)); |
1407 return result; | 1407 return result; |
1408 } | 1408 } |
1409 | 1409 |
1410 // FIXME: Allocate objects that do not need finalization separately | 1410 // FIXME: Allocate objects that do not need finalization separately |
1411 // and use separate sweeping to not have to check for finalizers. | 1411 // and use separate sweeping to not have to check for finalizers. |
1412 template<typename T> | 1412 template<typename T> |
1413 Address Heap::allocate(size_t size) | 1413 Address Heap::allocate(size_t size) |
(...skipping 103 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1517 static void registerWeakMembers(Visitor* visitor, const void* closure, const
void* object, WeakPointerCallback callback) | 1517 static void registerWeakMembers(Visitor* visitor, const void* closure, const
void* object, WeakPointerCallback callback) |
1518 { | 1518 { |
1519 visitor->registerWeakMembers(closure, object, callback); | 1519 visitor->registerWeakMembers(closure, object, callback); |
1520 } | 1520 } |
1521 | 1521 |
1522 static void registerWeakTable(Visitor* visitor, const void* closure, Ephemer
onCallback iterationCallback, EphemeronCallback iterationDoneCallback) | 1522 static void registerWeakTable(Visitor* visitor, const void* closure, Ephemer
onCallback iterationCallback, EphemeronCallback iterationDoneCallback) |
1523 { | 1523 { |
1524 visitor->registerWeakTable(closure, iterationCallback, iterationDoneCall
back); | 1524 visitor->registerWeakTable(closure, iterationCallback, iterationDoneCall
back); |
1525 } | 1525 } |
1526 | 1526 |
1527 #ifndef NDEBUG | 1527 #if ENABLE(ASSERT) |
1528 static bool weakTableRegistered(Visitor* visitor, const void* closure) | 1528 static bool weakTableRegistered(Visitor* visitor, const void* closure) |
1529 { | 1529 { |
1530 return visitor->weakTableRegistered(closure); | 1530 return visitor->weakTableRegistered(closure); |
1531 } | 1531 } |
1532 #endif | 1532 #endif |
1533 | 1533 |
1534 template<typename T> | 1534 template<typename T> |
1535 struct ResultType { | 1535 struct ResultType { |
1536 typedef T* Type; | 1536 typedef T* Type; |
1537 }; | 1537 }; |
(...skipping 753 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
2291 COMPILE_ASSERT(!WTF::IsWeak<T>::value, WeDontSupportWeaknessInHeapVector
sOrDeques); | 2291 COMPILE_ASSERT(!WTF::IsWeak<T>::value, WeDontSupportWeaknessInHeapVector
sOrDeques); |
2292 if (WTF::ShouldBeTraced<Traits>::value) | 2292 if (WTF::ShouldBeTraced<Traits>::value) |
2293 WTF::TraceInCollectionTrait<WTF::NoWeakHandlingInCollections, WTF::W
eakPointersActWeak, HeapVectorBacking<T, Traits>, void>::trace(visitor, self); | 2293 WTF::TraceInCollectionTrait<WTF::NoWeakHandlingInCollections, WTF::W
eakPointersActWeak, HeapVectorBacking<T, Traits>, void>::trace(visitor, self); |
2294 } | 2294 } |
2295 static void mark(Visitor* visitor, const Backing* backing) | 2295 static void mark(Visitor* visitor, const Backing* backing) |
2296 { | 2296 { |
2297 visitor->mark(backing, &trace); | 2297 visitor->mark(backing, &trace); |
2298 } | 2298 } |
2299 static void checkGCInfo(Visitor* visitor, const Backing* backing) | 2299 static void checkGCInfo(Visitor* visitor, const Backing* backing) |
2300 { | 2300 { |
2301 #ifndef NDEBUG | 2301 #if ENABLE(ASSERT) |
2302 visitor->checkGCInfo(const_cast<Backing*>(backing), GCInfoTrait<Backing>
::get()); | 2302 visitor->checkGCInfo(const_cast<Backing*>(backing), GCInfoTrait<Backing>
::get()); |
2303 #endif | 2303 #endif |
2304 } | 2304 } |
2305 }; | 2305 }; |
2306 | 2306 |
2307 // The trace trait for the heap hashtable backing is used when we find a | 2307 // The trace trait for the heap hashtable backing is used when we find a |
2308 // direct pointer to the backing from the conservative stack scanner. This | 2308 // direct pointer to the backing from the conservative stack scanner. This |
2309 // normally indicates that there is an ongoing iteration over the table, and so | 2309 // normally indicates that there is an ongoing iteration over the table, and so |
2310 // we disable weak processing of table entries. When the backing is found | 2310 // we disable weak processing of table entries. When the backing is found |
2311 // through the owning hash table we mark differently, in order to do weak | 2311 // through the owning hash table we mark differently, in order to do weak |
2312 // processing. | 2312 // processing. |
2313 template<typename Table> | 2313 template<typename Table> |
2314 struct TraceTrait<HeapHashTableBacking<Table> > { | 2314 struct TraceTrait<HeapHashTableBacking<Table> > { |
2315 typedef HeapHashTableBacking<Table> Backing; | 2315 typedef HeapHashTableBacking<Table> Backing; |
2316 typedef typename Table::ValueTraits Traits; | 2316 typedef typename Table::ValueTraits Traits; |
2317 static void trace(Visitor* visitor, void* self) | 2317 static void trace(Visitor* visitor, void* self) |
2318 { | 2318 { |
2319 if (WTF::ShouldBeTraced<Traits>::value || Traits::weakHandlingFlag == WT
F::WeakHandlingInCollections) | 2319 if (WTF::ShouldBeTraced<Traits>::value || Traits::weakHandlingFlag == WT
F::WeakHandlingInCollections) |
2320 WTF::TraceInCollectionTrait<WTF::NoWeakHandlingInCollections, WTF::W
eakPointersActStrong, Backing, void>::trace(visitor, self); | 2320 WTF::TraceInCollectionTrait<WTF::NoWeakHandlingInCollections, WTF::W
eakPointersActStrong, Backing, void>::trace(visitor, self); |
2321 } | 2321 } |
2322 static void mark(Visitor* visitor, const Backing* backing) | 2322 static void mark(Visitor* visitor, const Backing* backing) |
2323 { | 2323 { |
2324 if (WTF::ShouldBeTraced<Traits>::value || Traits::weakHandlingFlag == WT
F::WeakHandlingInCollections) | 2324 if (WTF::ShouldBeTraced<Traits>::value || Traits::weakHandlingFlag == WT
F::WeakHandlingInCollections) |
2325 visitor->mark(backing, &trace); | 2325 visitor->mark(backing, &trace); |
2326 else | 2326 else |
2327 visitor->markNoTracing(backing); // If we know the trace function wi
ll do nothing there is no need to call it. | 2327 visitor->markNoTracing(backing); // If we know the trace function wi
ll do nothing there is no need to call it. |
2328 } | 2328 } |
2329 static void checkGCInfo(Visitor* visitor, const Backing* backing) | 2329 static void checkGCInfo(Visitor* visitor, const Backing* backing) |
2330 { | 2330 { |
2331 #ifndef NDEBUG | 2331 #if ENABLE(ASSERT) |
2332 visitor->checkGCInfo(const_cast<Backing*>(backing), GCInfoTrait<Backing>
::get()); | 2332 visitor->checkGCInfo(const_cast<Backing*>(backing), GCInfoTrait<Backing>
::get()); |
2333 #endif | 2333 #endif |
2334 } | 2334 } |
2335 }; | 2335 }; |
2336 | 2336 |
2337 template<typename Table> | 2337 template<typename Table> |
2338 void HeapHashTableBacking<Table>::finalize(void* pointer) | 2338 void HeapHashTableBacking<Table>::finalize(void* pointer) |
2339 { | 2339 { |
2340 typedef typename Table::ValueType Value; | 2340 typedef typename Table::ValueType Value; |
2341 ASSERT(Table::ValueTraits::needsDestruction); | 2341 ASSERT(Table::ValueTraits::needsDestruction); |
(...skipping 33 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
2375 }; | 2375 }; |
2376 | 2376 |
2377 template<typename T> | 2377 template<typename T> |
2378 struct IfWeakMember<WeakMember<T> > { | 2378 struct IfWeakMember<WeakMember<T> > { |
2379 static bool isDead(Visitor* visitor, const WeakMember<T>& t) { return !visit
or->isAlive(t.get()); } | 2379 static bool isDead(Visitor* visitor, const WeakMember<T>& t) { return !visit
or->isAlive(t.get()); } |
2380 }; | 2380 }; |
2381 | 2381 |
2382 } | 2382 } |
2383 | 2383 |
2384 #endif // Heap_h | 2384 #endif // Heap_h |
OLD | NEW |