OLD | NEW |
---|---|
1 /* | 1 /* |
2 * Copyright (C) 2013 Google Inc. All rights reserved. | 2 * Copyright (C) 2013 Google Inc. All rights reserved. |
3 * | 3 * |
4 * Redistribution and use in source and binary forms, with or without | 4 * Redistribution and use in source and binary forms, with or without |
5 * modification, are permitted provided that the following conditions are | 5 * modification, are permitted provided that the following conditions are |
6 * met: | 6 * met: |
7 * | 7 * |
8 * * Redistributions of source code must retain the above copyright | 8 * * Redistributions of source code must retain the above copyright |
9 * notice, this list of conditions and the following disclaimer. | 9 * notice, this list of conditions and the following disclaimer. |
10 * * Redistributions in binary form must reproduce the above | 10 * * Redistributions in binary form must reproduce the above |
(...skipping 98 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
109 #define CHECK_MEMORY_INACCESSIBLE(address, size) \ | 109 #define CHECK_MEMORY_INACCESSIBLE(address, size) \ |
110 ASAN_UNPOISON_MEMORY_REGION(address, size); \ | 110 ASAN_UNPOISON_MEMORY_REGION(address, size); \ |
111 FreeList::checkFreedMemoryIsZapped(address, size); \ | 111 FreeList::checkFreedMemoryIsZapped(address, size); \ |
112 ASAN_POISON_MEMORY_REGION(address, size) | 112 ASAN_POISON_MEMORY_REGION(address, size) |
113 #else | 113 #else |
114 #define SET_MEMORY_INACCESSIBLE(address, size) memset((address), 0, (size)) | 114 #define SET_MEMORY_INACCESSIBLE(address, size) memset((address), 0, (size)) |
115 #define SET_MEMORY_ACCESSIBLE(address, size) do { } while (false) | 115 #define SET_MEMORY_ACCESSIBLE(address, size) do { } while (false) |
116 #define CHECK_MEMORY_INACCESSIBLE(address, size) do { } while (false) | 116 #define CHECK_MEMORY_INACCESSIBLE(address, size) do { } while (false) |
117 #endif | 117 #endif |
118 | 118 |
119 #if !ENABLE(ASSERT) && CPU(64BIT) | |
120 #define USE_4BYTE_HEADER_PADDING 1 | |
121 #else | |
122 #define USE_4BYTE_HEADER_PADDING 0 | |
123 #endif | |
124 | |
125 class CallbackStack; | 119 class CallbackStack; |
126 class FreePagePool; | 120 class FreePagePool; |
127 class NormalPageHeap; | 121 class NormalPageHeap; |
128 class OrphanedPagePool; | 122 class OrphanedPagePool; |
129 class PageMemory; | 123 class PageMemory; |
130 class PageMemoryRegion; | 124 class PageMemoryRegion; |
131 class WebProcessMemoryDump; | 125 class WebProcessMemoryDump; |
132 | 126 |
133 // HeapObjectHeader is 4 byte (32 bit) that has the following layout: | 127 // HeapObjectHeader is 4 byte (32 bit) that has the following layout: |
134 // | 128 // |
(...skipping 26 matching lines...) Expand all Loading... | |
161 const size_t largeObjectSizeInHeader = 0; | 155 const size_t largeObjectSizeInHeader = 0; |
162 const size_t gcInfoIndexForFreeListHeader = 0; | 156 const size_t gcInfoIndexForFreeListHeader = 0; |
163 const size_t nonLargeObjectPageSizeMax = 1 << 17; | 157 const size_t nonLargeObjectPageSizeMax = 1 << 17; |
164 | 158 |
165 static_assert(nonLargeObjectPageSizeMax >= blinkPageSize, "max size supported by HeapObjectHeader must at least be blinkPageSize"); | 159 static_assert(nonLargeObjectPageSizeMax >= blinkPageSize, "max size supported by HeapObjectHeader must at least be blinkPageSize"); |
166 | 160 |
167 class PLATFORM_EXPORT HeapObjectHeader { | 161 class PLATFORM_EXPORT HeapObjectHeader { |
168 public: | 162 public: |
169 // If gcInfoIndex is 0, this header is interpreted as a free list header. | 163 // If gcInfoIndex is 0, this header is interpreted as a free list header. |
170 NO_SANITIZE_ADDRESS | 164 NO_SANITIZE_ADDRESS |
171 HeapObjectHeader(size_t size, size_t gcInfoIndex) | 165 HeapObjectHeader(size_t size, size_t gcInfoIndex, uint32_t generation) |
166 : m_gcGeneration(generation) | |
172 { | 167 { |
173 #if ENABLE(ASSERT) | |
174 m_magic = magic; | |
175 #endif | |
176 // sizeof(HeapObjectHeader) must be equal to or smaller than | 168 // sizeof(HeapObjectHeader) must be equal to or smaller than |
177 // allocationGranurarity, because HeapObjectHeader is used as a header | 169 // allocationGranurarity, because HeapObjectHeader is used as a header |
178 // for an freed entry. Given that the smallest entry size is | 170 // for an freed entry. Given that the smallest entry size is |
179 // allocationGranurarity, HeapObjectHeader must fit into the size. | 171 // allocationGranurarity, HeapObjectHeader must fit into the size. |
180 static_assert(sizeof(HeapObjectHeader) <= allocationGranularity, "size o f HeapObjectHeader must be smaller than allocationGranularity"); | 172 static_assert(sizeof(HeapObjectHeader) <= allocationGranularity, "size o f HeapObjectHeader must be smaller than allocationGranularity"); |
181 #if CPU(64BIT) | 173 #if CPU(64BIT) |
182 static_assert(sizeof(HeapObjectHeader) == 8, "size of HeapObjectHeader m ust be 8 byte aligned"); | 174 static_assert(sizeof(HeapObjectHeader) == 8, "size of HeapObjectHeader m ust be 8 byte aligned"); |
183 #endif | 175 #endif |
184 | 176 |
185 ASSERT(gcInfoIndex < GCInfoTable::maxIndex); | 177 ASSERT(gcInfoIndex < GCInfoTable::maxIndex); |
(...skipping 19 matching lines...) Expand all Loading... | |
205 void unmark(); | 197 void unmark(); |
206 void markDead(); | 198 void markDead(); |
207 bool isDead() const; | 199 bool isDead() const; |
208 | 200 |
209 Address payload(); | 201 Address payload(); |
210 size_t payloadSize(); | 202 size_t payloadSize(); |
211 Address payloadEnd(); | 203 Address payloadEnd(); |
212 | 204 |
213 #if ENABLE(ASSERT) | 205 #if ENABLE(ASSERT) |
214 bool checkHeader() const; | 206 bool checkHeader() const; |
215 // Zap magic number with a new magic number that means there was once an | |
216 // object allocated here, but it was freed because nobody marked it during | |
217 // GC. | |
218 void zapMagic(); | |
219 #endif | 207 #endif |
208 NO_SANITIZE_ADDRESS | |
209 uint32_t gcGeneration() const { return m_gcGeneration; } | |
220 | 210 |
221 void finalize(Address, size_t); | 211 void finalize(Address, size_t); |
222 static HeapObjectHeader* fromPayload(const void*); | 212 static HeapObjectHeader* fromPayload(const void*); |
223 | 213 |
224 static const uint16_t magic = 0xfff1; | |
225 static const uint16_t zappedMagic = 0x4321; | |
226 | |
227 private: | 214 private: |
228 uint32_t m_encoded; | 215 uint32_t m_encoded; |
229 #if ENABLE(ASSERT) | 216 // m_gcGeneration keeps track of the number of GC cycle where the object get s |
haraken
2015/11/16 02:47:50
GC cycles
gets allocated
peria
2015/11/16 05:33:26
Done.
| |
230 uint16_t m_magic; | 217 // allocated. m_gcGeneration == 0 indicates that the object has already been freed. |
231 #endif | 218 uint32_t m_gcGeneration; |
232 | |
233 // In 64 bit architectures, we intentionally add 4 byte padding immediately | |
234 // after the HeapHeaderObject. This is because: | |
235 // | |
236 // | HeapHeaderObject (4 byte) | padding (4 byte) | object payload (8 * n by te) | | |
237 // ^8 byte aligned ^8 byte aligned | |
238 // | |
239 // is better than: | |
240 // | |
241 // | HeapHeaderObject (4 byte) | object payload (8 * n byte) | padding (4 by te) | | |
242 // ^4 byte aligned ^8 byte aligned ^4 byte aligned | |
243 // | |
244 // since the former layout aligns both header and payload to 8 byte. | |
245 #if USE_4BYTE_HEADER_PADDING | |
246 public: | |
247 uint32_t m_padding; | |
248 #endif | |
249 }; | 219 }; |
250 | 220 |
251 class FreeListEntry final : public HeapObjectHeader { | 221 class FreeListEntry final : public HeapObjectHeader { |
252 public: | 222 public: |
253 NO_SANITIZE_ADDRESS | 223 NO_SANITIZE_ADDRESS |
254 explicit FreeListEntry(size_t size) | 224 explicit FreeListEntry(size_t size) |
255 : HeapObjectHeader(size, gcInfoIndexForFreeListHeader) | 225 : HeapObjectHeader(size, gcInfoIndexForFreeListHeader, 0) |
256 , m_next(nullptr) | 226 , m_next(nullptr) |
257 { | 227 { |
258 #if ENABLE(ASSERT) | 228 #if ENABLE(ASSERT) |
259 ASSERT(size >= sizeof(HeapObjectHeader)); | 229 ASSERT(size >= sizeof(HeapObjectHeader)); |
260 zapMagic(); | |
261 #endif | 230 #endif |
262 } | 231 } |
263 | 232 |
264 Address address() { return reinterpret_cast<Address>(this); } | 233 Address address() { return reinterpret_cast<Address>(this); } |
265 | 234 |
266 NO_SANITIZE_ADDRESS | 235 NO_SANITIZE_ADDRESS |
267 void unlink(FreeListEntry** prevNext) | 236 void unlink(FreeListEntry** prevNext) |
268 { | 237 { |
269 *prevNext = m_next; | 238 *prevNext = m_next; |
270 m_next = nullptr; | 239 m_next = nullptr; |
(...skipping 425 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
696 ASSERT(findPageFromAddress(address + size - 1)); | 665 ASSERT(findPageFromAddress(address + size - 1)); |
697 m_freeList.addToFreeList(address, size); | 666 m_freeList.addToFreeList(address, size); |
698 } | 667 } |
699 void clearFreeLists() override; | 668 void clearFreeLists() override; |
700 #if ENABLE(ASSERT) | 669 #if ENABLE(ASSERT) |
701 bool isConsistentForGC() override; | 670 bool isConsistentForGC() override; |
702 bool pagesToBeSweptContains(Address); | 671 bool pagesToBeSweptContains(Address); |
703 #endif | 672 #endif |
704 void takeFreelistSnapshot(const String& dumpBaseName) override; | 673 void takeFreelistSnapshot(const String& dumpBaseName) override; |
705 | 674 |
706 Address allocateObject(size_t allocationSize, size_t gcInfoIndex); | 675 Address allocateObject(size_t allocationSize, size_t gcInfoIndex, uint32_t g eneration); |
707 | 676 |
708 void freePage(NormalPage*); | 677 void freePage(NormalPage*); |
709 | 678 |
710 bool coalesce(); | 679 bool coalesce(); |
711 void promptlyFreeObject(HeapObjectHeader*); | 680 void promptlyFreeObject(HeapObjectHeader*); |
712 bool expandObject(HeapObjectHeader*, size_t); | 681 bool expandObject(HeapObjectHeader*, size_t); |
713 bool shrinkObject(HeapObjectHeader*, size_t); | 682 bool shrinkObject(HeapObjectHeader*, size_t); |
714 void decreasePromptlyFreedSize(size_t size) { m_promptlyFreedSize -= size; } | 683 void decreasePromptlyFreedSize(size_t size) { m_promptlyFreedSize -= size; } |
715 | 684 |
716 bool isObjectAllocatedAtAllocationPoint(HeapObjectHeader* header) | 685 bool isObjectAllocatedAtAllocationPoint(HeapObjectHeader* header) |
(...skipping 59 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
776 // LargeObjectPage::m_payloadSize. | 745 // LargeObjectPage::m_payloadSize. |
777 ASSERT(result != largeObjectSizeInHeader); | 746 ASSERT(result != largeObjectSizeInHeader); |
778 ASSERT(!pageFromObject(this)->isLargeObjectPage()); | 747 ASSERT(!pageFromObject(this)->isLargeObjectPage()); |
779 return result; | 748 return result; |
780 } | 749 } |
781 | 750 |
782 #if ENABLE(ASSERT) | 751 #if ENABLE(ASSERT) |
783 NO_SANITIZE_ADDRESS inline | 752 NO_SANITIZE_ADDRESS inline |
784 bool HeapObjectHeader::checkHeader() const | 753 bool HeapObjectHeader::checkHeader() const |
785 { | 754 { |
786 return !pageFromObject(this)->orphaned() && m_magic == magic; | 755 ASSERT(isFree() == (m_gcGeneration == 0)); |
haraken
2015/11/16 02:47:50
Looks much nicer than the predicate I suggested!
peria
2015/11/16 05:33:26
:)
| |
756 return !pageFromObject(this)->orphaned(); | |
787 } | 757 } |
788 #endif | 758 #endif |
789 | 759 |
790 inline Address HeapObjectHeader::payload() | 760 inline Address HeapObjectHeader::payload() |
791 { | 761 { |
792 return reinterpret_cast<Address>(this) + sizeof(HeapObjectHeader); | 762 return reinterpret_cast<Address>(this) + sizeof(HeapObjectHeader); |
793 } | 763 } |
794 | 764 |
795 inline Address HeapObjectHeader::payloadEnd() | 765 inline Address HeapObjectHeader::payloadEnd() |
796 { | 766 { |
(...skipping 51 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
848 } | 818 } |
849 | 819 |
850 NO_SANITIZE_ADDRESS inline | 820 NO_SANITIZE_ADDRESS inline |
851 void HeapObjectHeader::markDead() | 821 void HeapObjectHeader::markDead() |
852 { | 822 { |
853 ASSERT(checkHeader()); | 823 ASSERT(checkHeader()); |
854 ASSERT(!isMarked()); | 824 ASSERT(!isMarked()); |
855 m_encoded |= headerDeadBitMask; | 825 m_encoded |= headerDeadBitMask; |
856 } | 826 } |
857 | 827 |
858 inline Address NormalPageHeap::allocateObject(size_t allocationSize, size_t gcIn foIndex) | 828 inline Address NormalPageHeap::allocateObject(size_t allocationSize, size_t gcIn foIndex, uint32_t generation) |
haraken
2015/11/16 02:47:50
Why do we need to pass the generation parameter to
peria
2015/11/16 05:33:26
This is just a dependency problem of .h files.
We
| |
859 { | 829 { |
860 if (LIKELY(allocationSize <= m_remainingAllocationSize)) { | 830 if (LIKELY(allocationSize <= m_remainingAllocationSize)) { |
861 Address headerAddress = m_currentAllocationPoint; | 831 Address headerAddress = m_currentAllocationPoint; |
862 m_currentAllocationPoint += allocationSize; | 832 m_currentAllocationPoint += allocationSize; |
863 m_remainingAllocationSize -= allocationSize; | 833 m_remainingAllocationSize -= allocationSize; |
864 ASSERT(gcInfoIndex > 0); | 834 ASSERT(gcInfoIndex > 0); |
865 new (NotNull, headerAddress) HeapObjectHeader(allocationSize, gcInfoInde x); | 835 new (NotNull, headerAddress) HeapObjectHeader(allocationSize, gcInfoInde x, generation); |
866 Address result = headerAddress + sizeof(HeapObjectHeader); | 836 Address result = headerAddress + sizeof(HeapObjectHeader); |
867 ASSERT(!(reinterpret_cast<uintptr_t>(result) & allocationMask)); | 837 ASSERT(!(reinterpret_cast<uintptr_t>(result) & allocationMask)); |
868 | 838 |
869 SET_MEMORY_ACCESSIBLE(result, allocationSize - sizeof(HeapObjectHeader)) ; | 839 SET_MEMORY_ACCESSIBLE(result, allocationSize - sizeof(HeapObjectHeader)) ; |
870 ASSERT(findPageFromAddress(headerAddress + allocationSize - 1)); | 840 ASSERT(findPageFromAddress(headerAddress + allocationSize - 1)); |
871 return result; | 841 return result; |
872 } | 842 } |
873 return outOfLineAllocate(allocationSize, gcInfoIndex); | 843 return outOfLineAllocate(allocationSize, gcInfoIndex); |
874 } | 844 } |
875 | 845 |
876 } // namespace blink | 846 } // namespace blink |
877 | 847 |
878 #endif // HeapPage_h | 848 #endif // HeapPage_h |
OLD | NEW |