Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(350)

Side by Side Diff: Source/platform/heap/Heap.h

Issue 717923005: Profile FreeList Base URL: svn://svn.chromium.org/blink/trunk
Patch Set: Rebased Created 5 years, 11 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
« no previous file with comments | « Source/platform/heap/Handle.h ('k') | Source/platform/heap/Heap.cpp » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 /* 1 /*
2 * Copyright (C) 2013 Google Inc. All rights reserved. 2 * Copyright (C) 2013 Google Inc. All rights reserved.
3 * 3 *
4 * Redistribution and use in source and binary forms, with or without 4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions are 5 * modification, are permitted provided that the following conditions are
6 * met: 6 * met:
7 * 7 *
8 * * Redistributions of source code must retain the above copyright 8 * * Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer. 9 * notice, this list of conditions and the following disclaimer.
10 * * Redistributions in binary form must reproduce the above 10 * * Redistributions in binary form must reproduce the above
(...skipping 74 matching lines...) Expand 10 before | Expand all | Expand 10 after
85 #else 85 #else
86 #define FILL_ZERO_IF_PRODUCTION(address, size) memset((address), 0, (size)) 86 #define FILL_ZERO_IF_PRODUCTION(address, size) memset((address), 0, (size))
87 #define FILL_ZERO_IF_NOT_PRODUCTION(address, size) do { } while (false) 87 #define FILL_ZERO_IF_NOT_PRODUCTION(address, size) do { } while (false)
88 #endif 88 #endif
89 89
90 class CallbackStack; 90 class CallbackStack;
91 class PageMemory; 91 class PageMemory;
92 template<ThreadAffinity affinity> class ThreadLocalPersistents; 92 template<ThreadAffinity affinity> class ThreadLocalPersistents;
93 template<typename T, typename RootsAccessor = ThreadLocalPersistents<ThreadingTr ait<T>::Affinity>> class Persistent; 93 template<typename T, typename RootsAccessor = ThreadLocalPersistents<ThreadingTr ait<T>::Affinity>> class Persistent;
94 94
95 #if ENABLE(GC_PROFILE_HEAP) 95 #if ENABLE(GC_PROFILE_HEAP) || ENABLE(GC_PROFILE_FREE_LIST)
96 class TracedValue; 96 class TracedValue;
97 #endif 97 #endif
98 98
99 // HeapObjectHeader is 4 byte (32 bit) that has the following layout: 99 // HeapObjectHeader is 4 byte (32 bit) that has the following layout:
100 // 100 //
101 // | gcInfoIndex (15 bit) | size (14 bit) | dead bit (1 bit) | freed bit (1 bit) | mark bit (1 bit) | 101 // | gcInfoIndex (15 bit) | size (14 bit) | dead bit (1 bit) | freed bit (1 bit) | mark bit (1 bit) |
102 // 102 //
103 // - For non-large objects, 14 bit is enough for |size| because the blink 103 // - For non-large objects, 14 bit is enough for |size| because the blink
104 // page size is 2^17 byte and each object is guaranteed to be aligned with 104 // page size is 2^17 byte and each object is guaranteed to be aligned with
105 // 2^3 byte. 105 // 2^3 byte.
(...skipping 12 matching lines...) Expand all
118 // them via a conservatively found pointer. Tracing dead objects could lead to 118 // them via a conservatively found pointer. Tracing dead objects could lead to
119 // tracing of already finalized objects in another thread's heap which is a 119 // tracing of already finalized objects in another thread's heap which is a
120 // use-after-free situation. 120 // use-after-free situation.
121 const size_t headerDeadBitMask = 4; 121 const size_t headerDeadBitMask = 4;
122 // On free-list entries we reuse the dead bit to distinguish a normal free-list 122 // On free-list entries we reuse the dead bit to distinguish a normal free-list
123 // entry from one that has been promptly freed. 123 // entry from one that has been promptly freed.
124 const size_t headerPromptlyFreedBitMask = headerFreedBitMask | headerDeadBitMask ; 124 const size_t headerPromptlyFreedBitMask = headerFreedBitMask | headerDeadBitMask ;
125 const size_t largeObjectSizeInHeader = 0; 125 const size_t largeObjectSizeInHeader = 0;
126 const size_t gcInfoIndexForFreeListHeader = 0; 126 const size_t gcInfoIndexForFreeListHeader = 0;
127 const size_t nonLargeObjectSizeMax = 1 << 17; 127 const size_t nonLargeObjectSizeMax = 1 << 17;
128 #if ENABLE(GC_PROFILE_HEAP) 128 //#if ENABLE(GC_PROFILE_HEAP)
129 const size_t maxHeapObjectAge = 7; 129 const size_t maxHeapObjectAge = 7;
130 #endif 130 //#endif
131 131
132 static_assert(nonLargeObjectSizeMax >= blinkPageSize, "max size supported by Hea pObjectHeader must at least be blinkPageSize"); 132 static_assert(nonLargeObjectSizeMax >= blinkPageSize, "max size supported by Hea pObjectHeader must at least be blinkPageSize");
133 133
134 class PLATFORM_EXPORT HeapObjectHeader { 134 class PLATFORM_EXPORT HeapObjectHeader {
135 public: 135 public:
136 // If gcInfoIndex is 0, this header is interpreted as a free list header. 136 // If gcInfoIndex is 0, this header is interpreted as a free list header.
137 NO_SANITIZE_ADDRESS 137 NO_SANITIZE_ADDRESS
138 HeapObjectHeader(size_t size, size_t gcInfoIndex) 138 HeapObjectHeader(size_t size, size_t gcInfoIndex)
139 { 139 {
140 #if ENABLE(ASSERT) 140 #if ENABLE(ASSERT)
(...skipping 46 matching lines...) Expand 10 before | Expand all | Expand 10 after
187 // GC. 187 // GC.
188 void zapMagic(); 188 void zapMagic();
189 #endif 189 #endif
190 190
191 void finalize(Address, size_t); 191 void finalize(Address, size_t);
192 static HeapObjectHeader* fromPayload(const void*); 192 static HeapObjectHeader* fromPayload(const void*);
193 193
194 static const uint16_t magic = 0xfff1; 194 static const uint16_t magic = 0xfff1;
195 static const uint16_t zappedMagic = 0x4321; 195 static const uint16_t zappedMagic = 0x4321;
196 196
197 #if ENABLE(GC_PROFILE_HEAP) 197 //#if ENABLE(GC_PROFILE_HEAP)
198 NO_SANITIZE_ADDRESS 198 NO_SANITIZE_ADDRESS
199 size_t encodedSize() const { return m_encoded; } 199 size_t encodedSize() const { return m_encoded; }
200 200
201 NO_SANITIZE_ADDRESS 201 NO_SANITIZE_ADDRESS
202 size_t age() const { return m_age; } 202 size_t age() const { return m_age; }
203 203
204 NO_SANITIZE_ADDRESS 204 NO_SANITIZE_ADDRESS
205 void incAge() 205 void incAge()
206 { 206 {
207 if (m_age < maxHeapObjectAge) 207 if (m_age < maxHeapObjectAge)
208 m_age++; 208 m_age++;
209 } 209 }
210 #endif 210 //#endif
211 211
212 #if !ENABLE(ASSERT) && !ENABLE(GC_PROFILE_HEAP) && CPU(64BIT) 212 #if !ENABLE(ASSERT) && !ENABLE(GC_PROFILE_HEAP) && CPU(64BIT)
213 // This method is needed just to avoid compilers from removing m_padding. 213 // This method is needed just to avoid compilers from removing m_padding.
214 uint64_t unusedMethod() const { return m_padding; } 214 uint64_t unusedMethod() const { return m_padding; }
215 #endif 215 #endif
216 216
217 private: 217 private:
218 uint32_t m_encoded; 218 uint32_t m_encoded;
219 #if ENABLE(ASSERT) 219 #if ENABLE(ASSERT)
220 uint16_t m_magic; 220 uint16_t m_magic;
221 #endif 221 #endif
222 #if ENABLE(GC_PROFILE_HEAP) 222 //#if ENABLE(GC_PROFILE_HEAP)
223 uint8_t m_age; 223 uint8_t m_age;
224 #endif 224 //#endif
225 225
226 // In 64 bit architectures, we intentionally add 4 byte padding immediately 226 // In 64 bit architectures, we intentionally add 4 byte padding immediately
227 // after the HeapHeaderObject. This is because: 227 // after the HeapHeaderObject. This is because:
228 // 228 //
229 // | HeapHeaderObject (4 byte) | padding (4 byte) | object payload (8 * n by te) | 229 // | HeapHeaderObject (4 byte) | padding (4 byte) | object payload (8 * n by te) |
230 // ^8 byte aligned ^8 byte aligned 230 // ^8 byte aligned ^8 byte aligned
231 // 231 //
232 // is better than: 232 // is better than:
233 // 233 //
234 // | HeapHeaderObject (4 byte) | object payload (8 * n byte) | padding (4 by te) | 234 // | HeapHeaderObject (4 byte) | object payload (8 * n byte) | padding (4 by te) |
(...skipping 140 matching lines...) Expand 10 before | Expand all | Expand 10 after
375 // Check if the given address points to an object in this 375 // Check if the given address points to an object in this
376 // heap page. If so, find the start of that object and mark it 376 // heap page. If so, find the start of that object and mark it
377 // using the given Visitor. Otherwise do nothing. The pointer must 377 // using the given Visitor. Otherwise do nothing. The pointer must
378 // be within the same aligned blinkPageSize as the this-pointer. 378 // be within the same aligned blinkPageSize as the this-pointer.
379 // 379 //
380 // This is used during conservative stack scanning to 380 // This is used during conservative stack scanning to
381 // conservatively mark all objects that could be referenced from 381 // conservatively mark all objects that could be referenced from
382 // the stack. 382 // the stack.
383 virtual void checkAndMarkPointer(Visitor*, Address) = 0; 383 virtual void checkAndMarkPointer(Visitor*, Address) = 0;
384 virtual void markOrphaned(); 384 virtual void markOrphaned();
385
385 #if ENABLE(GC_PROFILE_MARKING) 386 #if ENABLE(GC_PROFILE_MARKING)
386 virtual const GCInfo* findGCInfo(Address) = 0; 387 virtual const GCInfo* findGCInfo(Address) = 0;
387 #endif 388 #endif
388 #if ENABLE(GC_PROFILE_HEAP) 389 #if ENABLE(GC_PROFILE_HEAP)
389 virtual void snapshot(TracedValue*, ThreadState::SnapshotInfo*) = 0; 390 virtual void snapshot(TracedValue*, ThreadState::SnapshotInfo*) = 0;
390 #endif 391 #endif
391 #if ENABLE(ASSERT) 392 #if ENABLE(ASSERT) || ENABLE(GC_PROFILE_MARKING)
392 virtual bool contains(Address) = 0; 393 virtual bool contains(Address) = 0;
393 #endif 394 #endif
394 virtual size_t size() = 0; 395 virtual size_t size() = 0;
395 virtual bool isLargeObject() { return false; } 396 virtual bool isLargeObject() { return false; }
396 397
397 Address address() { return reinterpret_cast<Address>(this); } 398 Address address() { return reinterpret_cast<Address>(this); }
398 PageMemory* storage() const { return m_storage; } 399 PageMemory* storage() const { return m_storage; }
399 ThreadHeap* heap() const { return m_heap; } 400 ThreadHeap* heap() const { return m_heap; }
400 bool orphaned() { return !m_heap; } 401 bool orphaned() { return !m_heap; }
401 bool terminating() { return m_terminating; } 402 bool terminating() { return m_terminating; }
(...skipping 72 matching lines...) Expand 10 before | Expand all | Expand 10 after
474 #endif 475 #endif
475 memset(payload(), orphanedZapValue, payloadSize()); 476 memset(payload(), orphanedZapValue, payloadSize());
476 BaseHeapPage::markOrphaned(); 477 BaseHeapPage::markOrphaned();
477 } 478 }
478 #if ENABLE(GC_PROFILE_MARKING) 479 #if ENABLE(GC_PROFILE_MARKING)
479 const GCInfo* findGCInfo(Address) override; 480 const GCInfo* findGCInfo(Address) override;
480 #endif 481 #endif
481 #if ENABLE(GC_PROFILE_HEAP) 482 #if ENABLE(GC_PROFILE_HEAP)
482 virtual void snapshot(TracedValue*, ThreadState::SnapshotInfo*); 483 virtual void snapshot(TracedValue*, ThreadState::SnapshotInfo*);
483 #endif 484 #endif
484 #if ENABLE(ASSERT) 485 #if ENABLE(ASSERT) || ENABLE(GC_PROFILE_MARKING)
485 // Returns true for the whole blinkPageSize page that the page is on, even 486 // Returns true for the whole blinkPageSize page that the page is on, even
486 // for the header, and the unmapped guard page at the start. That ensures 487 // for the header, and the unmapped guard page at the start. That ensures
487 // the result can be used to populate the negative page cache. 488 // the result can be used to populate the negative page cache.
488 virtual bool contains(Address addr) override 489 virtual bool contains(Address addr) override
489 { 490 {
490 Address blinkPageStart = roundToBlinkPageStart(address()); 491 Address blinkPageStart = roundToBlinkPageStart(address());
491 ASSERT(blinkPageStart == address() - WTF::kSystemPageSize); // Page is a t aligned address plus guard page size. 492 ASSERT(blinkPageStart == address() - WTF::kSystemPageSize); // Page is a t aligned address plus guard page size.
492 return blinkPageStart <= addr && addr < blinkPageStart + blinkPageSize; 493 return blinkPageStart <= addr && addr < blinkPageStart + blinkPageSize;
493 } 494 }
494 #endif 495 #endif
495 virtual size_t size() override { return blinkPageSize; } 496 virtual size_t size() override { return blinkPageSize; }
496 497
497 HeapPage* next() { return m_next; } 498 HeapPage* next() { return m_next; }
498 499
499 void clearObjectStartBitMap(); 500 void clearObjectStartBitMap();
500 501
502 void countUnmarkedObjects();
503
501 #if defined(ADDRESS_SANITIZER) 504 #if defined(ADDRESS_SANITIZER)
502 void poisonUnmarkedObjects(); 505 void poisonUnmarkedObjects();
503 #endif 506 #endif
504 507
505 // This method is needed just to avoid compilers from removing m_padding. 508 // This method is needed just to avoid compilers from removing m_padding.
506 uint64_t unusedMethod() const { return m_padding; } 509 uint64_t unusedMethod() const { return m_padding; }
507 510
508 private: 511 private:
509 HeapObjectHeader* findHeaderFromAddress(Address); 512 HeapObjectHeader* findHeaderFromAddress(Address);
510 void populateObjectStartBitMap(); 513 void populateObjectStartBitMap();
(...skipping 32 matching lines...) Expand 10 before | Expand all | Expand 10 after
543 virtual void markUnmarkedObjectsDead() override; 546 virtual void markUnmarkedObjectsDead() override;
544 virtual void checkAndMarkPointer(Visitor*, Address) override; 547 virtual void checkAndMarkPointer(Visitor*, Address) override;
545 virtual void markOrphaned() override 548 virtual void markOrphaned() override
546 { 549 {
547 // Zap the payload with a recognizable value to detect any incorrect 550 // Zap the payload with a recognizable value to detect any incorrect
548 // cross thread pointer usage. 551 // cross thread pointer usage.
549 memset(payload(), orphanedZapValue, payloadSize()); 552 memset(payload(), orphanedZapValue, payloadSize());
550 BaseHeapPage::markOrphaned(); 553 BaseHeapPage::markOrphaned();
551 } 554 }
552 #if ENABLE(GC_PROFILE_MARKING) 555 #if ENABLE(GC_PROFILE_MARKING)
553 virtual const GCInfo* findGCInfo(Address address) override 556 virtual const GCInfo* findGCInfo(Address);
554 {
555 if (!objectContains(address))
556 return nullptr;
557 return gcInfo();
558 }
559 #endif 557 #endif
560 #if ENABLE(GC_PROFILE_HEAP) 558 #if ENABLE(GC_PROFILE_HEAP)
561 virtual void snapshot(TracedValue*, ThreadState::SnapshotInfo*) override; 559 virtual void snapshot(TracedValue*, ThreadState::SnapshotInfo*) override;
562 #endif 560 #endif
563 #if ENABLE(ASSERT) 561 #if ENABLE(ASSERT) || ENABLE(GC_PROFILE_MARKING)
564 // Returns true for any address that is on one of the pages that this 562 // Returns true for any address that is on one of the pages that this
565 // large object uses. That ensures that we can use a negative result to 563 // large object uses. That ensures that we can use a negative result to
566 // populate the negative page cache. 564 // populate the negative page cache.
567 virtual bool contains(Address object) override 565 virtual bool contains(Address object) override
568 { 566 {
569 return roundToBlinkPageStart(address()) <= object && object < roundToBli nkPageEnd(address() + size()); 567 return roundToBlinkPageStart(address()) <= object && object < roundToBli nkPageEnd(address() + size());
570 } 568 }
571 #endif 569 #endif
572 virtual size_t size() 570 virtual size_t size()
573 { 571 {
(...skipping 131 matching lines...) Expand 10 before | Expand all | Expand 10 after
705 private: 703 private:
706 void clearMemory(PageMemory*); 704 void clearMemory(PageMemory*);
707 }; 705 };
708 706
709 class FreeList { 707 class FreeList {
710 public: 708 public:
711 FreeList(); 709 FreeList();
712 710
713 void addToFreeList(Address, size_t); 711 void addToFreeList(Address, size_t);
714 void clear(); 712 void clear();
713 FreeListEntry* takeEntry(size_t allocationSize);
714
715 #if ENABLE(GC_PROFILE_FREE_LIST)
716 void countBucketSizes(size_t[], size_t[], size_t* freeSize) const;
717 #endif
715 718
716 // Returns a bucket number for inserting a FreeListEntry of a given size. 719 // Returns a bucket number for inserting a FreeListEntry of a given size.
717 // All FreeListEntries in the given bucket, n, have size >= 2^n. 720 // All FreeListEntries in the given bucket, n, have size >= 2^n.
718 static int bucketIndexForSize(size_t); 721 static int bucketIndexForSize(size_t);
719 722
720 private: 723 private:
721 int m_biggestFreeListIndex; 724 int m_biggestFreeListIndex;
722 725
723 // All FreeListEntries in the nth list have size >= 2^n. 726 // All FreeListEntries in the nth list have size >= 2^n.
724 FreeListEntry* m_freeLists[blinkPageSizeLog2]; 727 FreeListEntry* m_freeLists[blinkPageSizeLog2];
(...skipping 10 matching lines...) Expand all
735 // Each thread heap contains the functionality to allocate new objects 738 // Each thread heap contains the functionality to allocate new objects
736 // (potentially adding new pages to the heap), to find and mark 739 // (potentially adding new pages to the heap), to find and mark
737 // objects during conservative stack scanning and to sweep the set of 740 // objects during conservative stack scanning and to sweep the set of
738 // pages after a GC. 741 // pages after a GC.
739 class PLATFORM_EXPORT ThreadHeap final { 742 class PLATFORM_EXPORT ThreadHeap final {
740 public: 743 public:
741 ThreadHeap(ThreadState*, int); 744 ThreadHeap(ThreadState*, int);
742 ~ThreadHeap(); 745 ~ThreadHeap();
743 void cleanupPages(); 746 void cleanupPages();
744 747
745 #if ENABLE(ASSERT) 748 #if ENABLE(ASSERT) || ENABLE(GC_PROFILE_MARKING)
746 BaseHeapPage* findPageFromAddress(Address); 749 BaseHeapPage* findPageFromAddress(Address);
747 #endif 750 #endif
751 #if ENABLE(GC_PROFILE_FREE_LIST)
752 virtual void snapshotFreeList(TracedValue*);
753 #endif
748 #if ENABLE(GC_PROFILE_HEAP) 754 #if ENABLE(GC_PROFILE_HEAP)
749 void snapshot(TracedValue*, ThreadState::SnapshotInfo*); 755 void snapshot(TracedValue*, ThreadState::SnapshotInfo*);
750 #endif 756 #endif
751 757
752 void clearFreeLists(); 758 void clearFreeLists();
753 void makeConsistentForSweeping(); 759 void makeConsistentForSweeping();
754 #if ENABLE(ASSERT) 760 #if ENABLE(ASSERT)
755 bool isConsistentForSweeping(); 761 bool isConsistentForSweeping();
756 #endif 762 #endif
757 size_t objectPayloadSizeForTesting(); 763 size_t objectPayloadSizeForTesting();
(...skipping 46 matching lines...) Expand 10 before | Expand all | Expand 10 after
804 bool pagesToBeSweptContains(Address); 810 bool pagesToBeSweptContains(Address);
805 #endif 811 #endif
806 812
807 bool coalesce(); 813 bool coalesce();
808 void preparePagesForSweeping(); 814 void preparePagesForSweeping();
809 815
810 Address m_currentAllocationPoint; 816 Address m_currentAllocationPoint;
811 size_t m_remainingAllocationSize; 817 size_t m_remainingAllocationSize;
812 size_t m_lastRemainingAllocationSize; 818 size_t m_lastRemainingAllocationSize;
813 819
820 double m_totalAllocationSize;
821 size_t m_allocationCount;
822 size_t m_inlineAllocationCount;
823
814 HeapPage* m_firstPage; 824 HeapPage* m_firstPage;
815 LargeObject* m_firstLargeObject; 825 LargeObject* m_firstLargeObject;
816 HeapPage* m_firstUnsweptPage; 826 HeapPage* m_firstUnsweptPage;
817 LargeObject* m_firstUnsweptLargeObject; 827 LargeObject* m_firstUnsweptLargeObject;
818 828
819 ThreadState* m_threadState; 829 ThreadState* m_threadState;
820 830
821 FreeList m_freeList; 831 FreeList m_freeList;
822 832
823 // Index into the page pools. This is used to ensure that the pages of the 833 // Index into the page pools. This is used to ensure that the pages of the
824 // same type go into the correct page pool and thus avoid type confusion. 834 // same type go into the correct page pool and thus avoid type confusion.
825 int m_index; 835 int m_index;
826 836
827 // The size of promptly freed objects in the heap. 837 // The size of promptly freed objects in the heap.
828 size_t m_promptlyFreedSize; 838 size_t m_promptlyFreedSize;
839
840 #if ENABLE(GC_PROFILE_FREE_LIST)
841 size_t m_allocationPointSizeSum = 0;
842 size_t m_setAllocationPointCount = 0;
843 #endif
829 }; 844 };
830 845
831 // Mask an address down to the enclosing oilpan heap base page. All oilpan heap 846 // Mask an address down to the enclosing oilpan heap base page. All oilpan heap
832 // pages are aligned at blinkPageBase plus an OS page size. 847 // pages are aligned at blinkPageBase plus an OS page size.
833 // FIXME: Remove PLATFORM_EXPORT once we get a proper public interface to our 848 // FIXME: Remove PLATFORM_EXPORT once we get a proper public interface to our
834 // typed heaps. This is only exported to enable tests in HeapTest.cpp. 849 // typed heaps. This is only exported to enable tests in HeapTest.cpp.
835 PLATFORM_EXPORT inline BaseHeapPage* pageFromObject(const void* object) 850 PLATFORM_EXPORT inline BaseHeapPage* pageFromObject(const void* object)
836 { 851 {
837 Address address = reinterpret_cast<Address>(const_cast<void*>(object)); 852 Address address = reinterpret_cast<Address>(const_cast<void*>(object));
838 BaseHeapPage* page = reinterpret_cast<BaseHeapPage*>(blinkPageAddress(addres s) + WTF::kSystemPageSize); 853 BaseHeapPage* page = reinterpret_cast<BaseHeapPage*>(blinkPageAddress(addres s) + WTF::kSystemPageSize);
839 ASSERT(page->contains(address)); 854 ASSERT(page->contains(address));
840 return page; 855 return page;
841 } 856 }
842 857
843 class PLATFORM_EXPORT Heap { 858 class PLATFORM_EXPORT Heap {
844 public: 859 public:
845 static void init(); 860 static void init();
846 static void shutdown(); 861 static void shutdown();
847 static void doShutdown(); 862 static void doShutdown();
848 863
849 #if ENABLE(ASSERT) 864 #if ENABLE(ASSERT) || ENABLE(GC_PROFILE_MARKING)
850 static BaseHeapPage* findPageFromAddress(Address); 865 static BaseHeapPage* findPageFromAddress(Address);
851 static BaseHeapPage* findPageFromAddress(void* pointer) { return findPageFro mAddress(reinterpret_cast<Address>(pointer)); } 866 static BaseHeapPage* findPageFromAddress(void* pointer) { return findPageFro mAddress(reinterpret_cast<Address>(pointer)); }
852 static bool containedInHeapOrOrphanedPage(void*); 867 static bool containedInHeapOrOrphanedPage(void*);
853 #endif 868 #endif
854 869
855 // Is the finalizable GC object still alive, but slated for lazy sweeping? 870 // Is the finalizable GC object still alive, but slated for lazy sweeping?
856 // If a lazy sweep is in progress, returns true if the object was found 871 // If a lazy sweep is in progress, returns true if the object was found
857 // to be not reachable during the marking phase, but it has yet to be swept 872 // to be not reachable during the marking phase, but it has yet to be swept
858 // and finalized. The predicate returns false in all other cases. 873 // and finalized. The predicate returns false in all other cases.
859 // 874 //
(...skipping 70 matching lines...) Expand 10 before | Expand all | Expand 10 after
930 static void collectAllGarbage(); 945 static void collectAllGarbage();
931 946
932 static void processMarkingStack(Visitor*); 947 static void processMarkingStack(Visitor*);
933 static void postMarkingProcessing(Visitor*); 948 static void postMarkingProcessing(Visitor*);
934 static void globalWeakProcessing(Visitor*); 949 static void globalWeakProcessing(Visitor*);
935 static void setForcePreciseGCForTesting(); 950 static void setForcePreciseGCForTesting();
936 951
937 static void preGC(); 952 static void preGC();
938 static void postGC(ThreadState::GCType); 953 static void postGC(ThreadState::GCType);
939 954
955 static void reportSweepingStats();
956
940 // Conservatively checks whether an address is a pointer in any of the 957 // Conservatively checks whether an address is a pointer in any of the
941 // thread heaps. If so marks the object pointed to as live. 958 // thread heaps. If so marks the object pointed to as live.
942 static Address checkAndMarkPointer(Visitor*, Address); 959 static Address checkAndMarkPointer(Visitor*, Address);
943 960
944 #if ENABLE(GC_PROFILE_MARKING) 961 #if ENABLE(GC_PROFILE_MARKING)
945 // Dump the path to specified object on the next GC. This method is to be 962 // Dump the path to specified object on the next GC. This method is to be
946 // invoked from GDB. 963 // invoked from GDB.
947 static void dumpPathToObjectOnNextGC(void* p); 964 static void dumpPathToObjectOnNextGC(void* p);
948 965
949 // Forcibly find GCInfo of the object at Address. This is slow and should 966 // Forcibly find GCInfo of the object at Address. This is slow and should
(...skipping 387 matching lines...) Expand 10 before | Expand all | Expand 10 after
1337 1354
1338 // Add space for header. 1355 // Add space for header.
1339 size_t allocationSize = size + sizeof(HeapObjectHeader); 1356 size_t allocationSize = size + sizeof(HeapObjectHeader);
1340 // Align size with allocation granularity. 1357 // Align size with allocation granularity.
1341 allocationSize = (allocationSize + allocationMask) & ~allocationMask; 1358 allocationSize = (allocationSize + allocationMask) & ~allocationMask;
1342 return allocationSize; 1359 return allocationSize;
1343 } 1360 }
1344 1361
1345 Address ThreadHeap::allocateObject(size_t allocationSize, size_t gcInfoIndex) 1362 Address ThreadHeap::allocateObject(size_t allocationSize, size_t gcInfoIndex)
1346 { 1363 {
1364 m_totalAllocationSize += allocationSize;
1365 m_allocationCount++;
1347 if (LIKELY(allocationSize <= m_remainingAllocationSize)) { 1366 if (LIKELY(allocationSize <= m_remainingAllocationSize)) {
1367 m_inlineAllocationCount++;
1348 Address headerAddress = m_currentAllocationPoint; 1368 Address headerAddress = m_currentAllocationPoint;
1349 m_currentAllocationPoint += allocationSize; 1369 m_currentAllocationPoint += allocationSize;
1350 m_remainingAllocationSize -= allocationSize; 1370 m_remainingAllocationSize -= allocationSize;
1351 ASSERT(gcInfoIndex > 0); 1371 ASSERT(gcInfoIndex > 0);
1352 new (NotNull, headerAddress) HeapObjectHeader(allocationSize, gcInfoInde x); 1372 new (NotNull, headerAddress) HeapObjectHeader(allocationSize, gcInfoInde x);
1353 Address result = headerAddress + sizeof(HeapObjectHeader); 1373 Address result = headerAddress + sizeof(HeapObjectHeader);
1354 ASSERT(!(reinterpret_cast<uintptr_t>(result) & allocationMask)); 1374 ASSERT(!(reinterpret_cast<uintptr_t>(result) & allocationMask));
1355 1375
1356 // Unpoison the memory used for the object (payload). 1376 // Unpoison the memory used for the object (payload).
1357 ASAN_UNPOISON_MEMORY_REGION(result, allocationSize - sizeof(HeapObjectHe ader)); 1377 ASAN_UNPOISON_MEMORY_REGION(result, allocationSize - sizeof(HeapObjectHe ader));
(...skipping 1076 matching lines...) Expand 10 before | Expand all | Expand 10 after
2434 template<typename T, size_t inlineCapacity> 2454 template<typename T, size_t inlineCapacity>
2435 struct GCInfoTrait<HeapVector<T, inlineCapacity>> : public GCInfoTrait<Vector<T, inlineCapacity, HeapAllocator>> { }; 2455 struct GCInfoTrait<HeapVector<T, inlineCapacity>> : public GCInfoTrait<Vector<T, inlineCapacity, HeapAllocator>> { };
2436 template<typename T, size_t inlineCapacity> 2456 template<typename T, size_t inlineCapacity>
2437 struct GCInfoTrait<HeapDeque<T, inlineCapacity>> : public GCInfoTrait<Deque<T, i nlineCapacity, HeapAllocator>> { }; 2457 struct GCInfoTrait<HeapDeque<T, inlineCapacity>> : public GCInfoTrait<Deque<T, i nlineCapacity, HeapAllocator>> { };
2438 template<typename T, typename U, typename V> 2458 template<typename T, typename U, typename V>
2439 struct GCInfoTrait<HeapHashCountedSet<T, U, V>> : public GCInfoTrait<HashCounted Set<T, U, V, HeapAllocator>> { }; 2459 struct GCInfoTrait<HeapHashCountedSet<T, U, V>> : public GCInfoTrait<HashCounted Set<T, U, V, HeapAllocator>> { };
2440 2460
2441 } // namespace blink 2461 } // namespace blink
2442 2462
2443 #endif // Heap_h 2463 #endif // Heap_h
OLDNEW
« no previous file with comments | « Source/platform/heap/Handle.h ('k') | Source/platform/heap/Heap.cpp » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698