Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(356)

Side by Side Diff: third_party/WebKit/Source/platform/heap/HeapPage.h

Issue 1411603007: [Oilpan] Add use-after-free detector in Member<> Base URL: https://chromium.googlesource.com/chromium/src.git@master
Patch Set: Rebase Created 5 years ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
OLDNEW
1 /* 1 /*
2 * Copyright (C) 2013 Google Inc. All rights reserved. 2 * Copyright (C) 2013 Google Inc. All rights reserved.
3 * 3 *
4 * Redistribution and use in source and binary forms, with or without 4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions are 5 * modification, are permitted provided that the following conditions are
6 * met: 6 * met:
7 * 7 *
8 * * Redistributions of source code must retain the above copyright 8 * * Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer. 9 * notice, this list of conditions and the following disclaimer.
10 * * Redistributions in binary form must reproduce the above 10 * * Redistributions in binary form must reproduce the above
(...skipping 98 matching lines...) Expand 10 before | Expand all | Expand 10 after
109 #define CHECK_MEMORY_INACCESSIBLE(address, size) \ 109 #define CHECK_MEMORY_INACCESSIBLE(address, size) \
110 ASAN_UNPOISON_MEMORY_REGION(address, size); \ 110 ASAN_UNPOISON_MEMORY_REGION(address, size); \
111 FreeList::checkFreedMemoryIsZapped(address, size); \ 111 FreeList::checkFreedMemoryIsZapped(address, size); \
112 ASAN_POISON_MEMORY_REGION(address, size) 112 ASAN_POISON_MEMORY_REGION(address, size)
113 #else 113 #else
114 #define SET_MEMORY_INACCESSIBLE(address, size) memset((address), 0, (size)) 114 #define SET_MEMORY_INACCESSIBLE(address, size) memset((address), 0, (size))
115 #define SET_MEMORY_ACCESSIBLE(address, size) do { } while (false) 115 #define SET_MEMORY_ACCESSIBLE(address, size) do { } while (false)
116 #define CHECK_MEMORY_INACCESSIBLE(address, size) do { } while (false) 116 #define CHECK_MEMORY_INACCESSIBLE(address, size) do { } while (false)
117 #endif 117 #endif
118 118
119 #if !ENABLE(ASSERT) && CPU(64BIT)
120 #define USE_4BYTE_HEADER_PADDING 1
121 #else
122 #define USE_4BYTE_HEADER_PADDING 0
123 #endif
124
125 class CallbackStack; 119 class CallbackStack;
126 class FreePagePool; 120 class FreePagePool;
127 class NormalPageHeap; 121 class NormalPageHeap;
128 class OrphanedPagePool; 122 class OrphanedPagePool;
129 class PageMemory; 123 class PageMemory;
130 class PageMemoryRegion; 124 class PageMemoryRegion;
131 class WebProcessMemoryDump; 125 class WebProcessMemoryDump;
132 126
133 // HeapObjectHeader is 4 byte (32 bit) that has the following layout: 127 // HeapObjectHeader has two 4 byte (32 bit) members, and one of them has
128 // the following bit field layout:
134 // 129 //
135 // | gcInfoIndex (14 bit) | DOM mark bit (1 bit) | size (14 bit) | dead bit (1 b it) | freed bit (1 bit) | mark bit (1 bit) | 130 // | gcInfoIndex (14 bit) | DOM mark bit (1 bit) | size (14 bit) | dead bit (1 b it) | freed bit (1 bit) | mark bit (1 bit)
136 // 131 //
137 // - For non-large objects, 14 bit is enough for |size| because the blink 132 // - For non-large objects, 14 bit is enough for |size| because the blink
138 // page size is 2^17 byte and each object is guaranteed to be aligned with 133 // page size is 2^17 byte and each object is guaranteed to be aligned with
139 // 2^3 byte. 134 // 2^3 byte.
140 // - For large objects, |size| is 0. The actual size of a large object is 135 // - For large objects, |size| is 0. The actual size of a large object is
141 // stored in LargeObjectPage::m_payloadSize. 136 // stored in LargeObjectPage::m_payloadSize.
142 // - 1 bit used to mark DOM trees for V8. 137 // - 1 bit used to mark DOM trees for V8.
143 // - 14 bit is enough for gcInfoIndex because there are less than 2^14 types 138 // - 14 bit is enough for gcInfoIndex because there are less than 2^14 types
144 // in Blink. 139 // in Blink.
145 const size_t headerDOMMarkBitMask = 1u << 17; 140 const size_t headerDOMMarkBitMask = 1u << 17;
146 const size_t headerGCInfoIndexShift = 18; 141 const size_t headerGCInfoIndexShift = 18;
147 const size_t headerGCInfoIndexMask = (static_cast<size_t>((1 << 14) - 1)) << hea derGCInfoIndexShift; 142 const size_t headerGCInfoIndexMask = (static_cast<size_t>((1 << 14) - 1)) << hea derGCInfoIndexShift;
148 const size_t headerSizeMask = (static_cast<size_t>((1 << 14) - 1)) << 3; 143 const size_t headerSizeMask = (static_cast<size_t>((1 << 14) - 1)) << 3;
149 const size_t headerMarkBitMask = 1; 144 const size_t headerMarkBitMask = 1;
150 const size_t headerFreedBitMask = 2; 145 const size_t headerFreedBitMask = 2;
151 // The dead bit is used for objects that have gone through a GC marking, but did 146 // The dead bit is used for objects that have gone through a GC marking, but did
152 // not get swept before a new GC started. In that case we set the dead bit on 147 // not get swept before a new GC started. In that case we set the dead bit on
153 // objects that were not marked in the previous GC to ensure we are not tracing 148 // objects that were not marked in the previous GC to ensure we are not tracing
154 // them via a conservatively found pointer. Tracing dead objects could lead to 149 // them via a conservatively found pointer. Tracing dead objects could lead to
155 // tracing of already finalized objects in another thread's heap which is a 150 // tracing of already finalized objects in another thread's heap which is a
156 // use-after-free situation. 151 // use-after-free situation.
157 const size_t headerDeadBitMask = 4; 152 const size_t headerDeadBitMask = 4;
158 // On free-list entries we reuse the dead bit to distinguish a normal free-list 153 // On free-list entries we reuse the dead bit to distinguish a normal free-list
159 // entry from one that has been promptly freed. 154 // entry from one that has been promptly freed.
160 const size_t headerPromptlyFreedBitMask = headerFreedBitMask | headerDeadBitMask ; 155 const size_t headerPromptlyFreedBitMask = headerFreedBitMask | headerDeadBitMask ;
161 const size_t largeObjectSizeInHeader = 0; 156 const size_t largeObjectSizeInHeader = 0;
162 const size_t gcInfoIndexForFreeListHeader = 0; 157 const size_t gcInfoIndexForFreeListHeader = 0;
163 const size_t nonLargeObjectPageSizeMax = 1 << 17; 158 const size_t nonLargeObjectPageSizeMax = 1 << 17;
164 159
160 const uint32_t gcGenerationUnchecked = 0;
161 const uint32_t gcGenerationForFreeListEntry = 1;
162 const uint32_t gcGenerationStart = 2;
163
165 static_assert(nonLargeObjectPageSizeMax >= blinkPageSize, "max size supported by HeapObjectHeader must at least be blinkPageSize"); 164 static_assert(nonLargeObjectPageSizeMax >= blinkPageSize, "max size supported by HeapObjectHeader must at least be blinkPageSize");
166 165
167 class PLATFORM_EXPORT HeapObjectHeader { 166 class PLATFORM_EXPORT HeapObjectHeader {
168 public: 167 public:
169 // If gcInfoIndex is 0, this header is interpreted as a free list header. 168 // If gcInfoIndex is 0, this header is interpreted as a free list header.
170 NO_SANITIZE_ADDRESS 169 NO_SANITIZE_ADDRESS
171 HeapObjectHeader(size_t size, size_t gcInfoIndex) 170 HeapObjectHeader(size_t size, size_t gcInfoIndex, uint32_t generation)
171 : m_gcGeneration(generation)
172 { 172 {
173 #if ENABLE(ASSERT)
174 m_magic = magic;
175 #endif
176 // sizeof(HeapObjectHeader) must be equal to or smaller than 173 // sizeof(HeapObjectHeader) must be equal to or smaller than
177 // allocationGranurarity, because HeapObjectHeader is used as a header 174 // allocationGranurarity, because HeapObjectHeader is used as a header
178 // for an freed entry. Given that the smallest entry size is 175 // for an freed entry. Given that the smallest entry size is
179 // allocationGranurarity, HeapObjectHeader must fit into the size. 176 // allocationGranurarity, HeapObjectHeader must fit into the size.
180 static_assert(sizeof(HeapObjectHeader) <= allocationGranularity, "size o f HeapObjectHeader must be smaller than allocationGranularity"); 177 static_assert(sizeof(HeapObjectHeader) <= allocationGranularity, "size o f HeapObjectHeader must be smaller than allocationGranularity");
181 #if CPU(64BIT) 178 #if CPU(64BIT)
182 static_assert(sizeof(HeapObjectHeader) == 8, "size of HeapObjectHeader m ust be 8 byte aligned"); 179 static_assert(sizeof(HeapObjectHeader) == 8, "size of HeapObjectHeader m ust be 8 byte aligned");
183 #endif 180 #endif
184 181
185 ASSERT(gcInfoIndex < GCInfoTable::maxIndex); 182 ASSERT(gcInfoIndex < GCInfoTable::maxIndex);
(...skipping 23 matching lines...) Expand all
209 void unmark(); 206 void unmark();
210 void markDead(); 207 void markDead();
211 bool isDead() const; 208 bool isDead() const;
212 209
213 Address payload(); 210 Address payload();
214 size_t payloadSize(); 211 size_t payloadSize();
215 Address payloadEnd(); 212 Address payloadEnd();
216 213
217 #if ENABLE(ASSERT) 214 #if ENABLE(ASSERT)
218 bool checkHeader() const; 215 bool checkHeader() const;
219 // Zap magic number with a new magic number that means there was once an
220 // object allocated here, but it was freed because nobody marked it during
221 // GC.
222 void zapMagic();
223 #endif 216 #endif
217 NO_SANITIZE_ADDRESS
218 uint32_t gcGeneration() const { return m_gcGeneration; }
224 219
225 void finalize(Address, size_t); 220 void finalize(Address, size_t);
226 static HeapObjectHeader* fromPayload(const void*); 221 static HeapObjectHeader* fromPayload(const void*);
227 222
228 static const uint16_t magic = 0xfff1;
229 static const uint16_t zappedMagic = 0x4321;
230
231 private: 223 private:
232 uint32_t m_encoded; 224 uint32_t m_encoded;
233 #if ENABLE(ASSERT) 225 // m_gcGeneration keeps track of the number of GC cycles where the object ge ts
234 uint16_t m_magic; 226 // allocated. gcGenerationForFreeListentry indicates that the object has
235 #endif 227 // already been freed.
236 228 uint32_t m_gcGeneration;
237 // In 64 bit architectures, we intentionally add 4 byte padding immediately
238 // after the HeapHeaderObject. This is because:
239 //
240 // | HeapHeaderObject (4 byte) | padding (4 byte) | object payload (8 * n by te) |
241 // ^8 byte aligned ^8 byte aligned
242 //
243 // is better than:
244 //
245 // | HeapHeaderObject (4 byte) | object payload (8 * n byte) | padding (4 by te) |
246 // ^4 byte aligned ^8 byte aligned ^4 byte aligned
247 //
248 // since the former layout aligns both header and payload to 8 byte.
249 #if USE_4BYTE_HEADER_PADDING
250 public:
251 uint32_t m_padding;
252 #endif
253 }; 229 };
254 230
255 class FreeListEntry final : public HeapObjectHeader { 231 class FreeListEntry final : public HeapObjectHeader {
256 public: 232 public:
257 NO_SANITIZE_ADDRESS 233 NO_SANITIZE_ADDRESS
258 explicit FreeListEntry(size_t size) 234 explicit FreeListEntry(size_t size)
259 : HeapObjectHeader(size, gcInfoIndexForFreeListHeader) 235 : HeapObjectHeader(size, gcInfoIndexForFreeListHeader, gcGenerationForFr eeListEntry)
260 , m_next(nullptr) 236 , m_next(nullptr)
261 { 237 {
262 #if ENABLE(ASSERT) 238 #if ENABLE(ASSERT)
263 ASSERT(size >= sizeof(HeapObjectHeader)); 239 ASSERT(size >= sizeof(HeapObjectHeader));
264 zapMagic();
265 #endif 240 #endif
266 } 241 }
267 242
268 Address address() { return reinterpret_cast<Address>(this); } 243 Address address() { return reinterpret_cast<Address>(this); }
269 244
270 NO_SANITIZE_ADDRESS 245 NO_SANITIZE_ADDRESS
271 void unlink(FreeListEntry** prevNext) 246 void unlink(FreeListEntry** prevNext)
272 { 247 {
273 *prevNext = m_next; 248 *prevNext = m_next;
274 m_next = nullptr; 249 m_next = nullptr;
(...skipping 201 matching lines...) Expand 10 before | Expand all | Expand 10 after
476 // Compute the amount of padding we have to add to a header to make 451 // Compute the amount of padding we have to add to a header to make
477 // the size of the header plus the padding a multiple of 8 bytes. 452 // the size of the header plus the padding a multiple of 8 bytes.
478 size_t paddingSize = (sizeof(NormalPage) + allocationGranularity - (size of(HeapObjectHeader) % allocationGranularity)) % allocationGranularity; 453 size_t paddingSize = (sizeof(NormalPage) + allocationGranularity - (size of(HeapObjectHeader) % allocationGranularity)) % allocationGranularity;
479 return sizeof(NormalPage) + paddingSize; 454 return sizeof(NormalPage) + paddingSize;
480 } 455 }
481 456
482 457
483 NormalPageHeap* heapForNormalPage(); 458 NormalPageHeap* heapForNormalPage();
484 void clearObjectStartBitMap(); 459 void clearObjectStartBitMap();
485 460
461 HeapObjectHeader* findHeaderFromObject(const void*);
462
486 private: 463 private:
487 HeapObjectHeader* findHeaderFromAddress(Address); 464 HeapObjectHeader* findHeaderFromAddress(Address);
488 void populateObjectStartBitMap(); 465 void populateObjectStartBitMap();
489 bool isObjectStartBitMapComputed() { return m_objectStartBitMapComputed; } 466 bool isObjectStartBitMapComputed() { return m_objectStartBitMapComputed; }
490 467
491 bool m_objectStartBitMapComputed; 468 bool m_objectStartBitMapComputed;
492 uint8_t m_objectStartBitMap[reservedForObjectBitMap]; 469 uint8_t m_objectStartBitMap[reservedForObjectBitMap];
493 }; 470 };
494 471
495 // Large allocations are allocated as separate objects and linked in a list. 472 // Large allocations are allocated as separate objects and linked in a list.
(...skipping 204 matching lines...) Expand 10 before | Expand all | Expand 10 after
700 ASSERT(findPageFromAddress(address + size - 1)); 677 ASSERT(findPageFromAddress(address + size - 1));
701 m_freeList.addToFreeList(address, size); 678 m_freeList.addToFreeList(address, size);
702 } 679 }
703 void clearFreeLists() override; 680 void clearFreeLists() override;
704 #if ENABLE(ASSERT) 681 #if ENABLE(ASSERT)
705 bool isConsistentForGC() override; 682 bool isConsistentForGC() override;
706 bool pagesToBeSweptContains(Address); 683 bool pagesToBeSweptContains(Address);
707 #endif 684 #endif
708 void takeFreelistSnapshot(const String& dumpBaseName) override; 685 void takeFreelistSnapshot(const String& dumpBaseName) override;
709 686
710 Address allocateObject(size_t allocationSize, size_t gcInfoIndex); 687 Address allocateObject(size_t allocationSize, size_t gcInfoIndex, uint32_t g eneration);
711 688
712 void freePage(NormalPage*); 689 void freePage(NormalPage*);
713 690
714 bool coalesce(); 691 bool coalesce();
715 void promptlyFreeObject(HeapObjectHeader*); 692 void promptlyFreeObject(HeapObjectHeader*);
716 bool expandObject(HeapObjectHeader*, size_t); 693 bool expandObject(HeapObjectHeader*, size_t);
717 bool shrinkObject(HeapObjectHeader*, size_t); 694 bool shrinkObject(HeapObjectHeader*, size_t);
718 void decreasePromptlyFreedSize(size_t size) { m_promptlyFreedSize -= size; } 695 void decreasePromptlyFreedSize(size_t size) { m_promptlyFreedSize -= size; }
719 696
720 bool isObjectAllocatedAtAllocationPoint(HeapObjectHeader* header) 697 bool isObjectAllocatedAtAllocationPoint(HeapObjectHeader* header)
(...skipping 59 matching lines...) Expand 10 before | Expand all | Expand 10 after
780 // LargeObjectPage::m_payloadSize. 757 // LargeObjectPage::m_payloadSize.
781 ASSERT(result != largeObjectSizeInHeader); 758 ASSERT(result != largeObjectSizeInHeader);
782 ASSERT(!pageFromObject(this)->isLargeObjectPage()); 759 ASSERT(!pageFromObject(this)->isLargeObjectPage());
783 return result; 760 return result;
784 } 761 }
785 762
786 #if ENABLE(ASSERT) 763 #if ENABLE(ASSERT)
787 NO_SANITIZE_ADDRESS inline 764 NO_SANITIZE_ADDRESS inline
788 bool HeapObjectHeader::checkHeader() const 765 bool HeapObjectHeader::checkHeader() const
789 { 766 {
790 return !pageFromObject(this)->orphaned() && m_magic == magic; 767 ASSERT(isFree() == (m_gcGeneration == gcGenerationForFreeListEntry));
768 ASSERT(m_gcGeneration != gcGenerationUnchecked);
769 return !pageFromObject(this)->orphaned();
791 } 770 }
792 #endif 771 #endif
793 772
794 inline Address HeapObjectHeader::payload() 773 inline Address HeapObjectHeader::payload()
795 { 774 {
796 return reinterpret_cast<Address>(this) + sizeof(HeapObjectHeader); 775 return reinterpret_cast<Address>(this) + sizeof(HeapObjectHeader);
797 } 776 }
798 777
799 inline Address HeapObjectHeader::payloadEnd() 778 inline Address HeapObjectHeader::payloadEnd()
800 { 779 {
(...skipping 51 matching lines...) Expand 10 before | Expand all | Expand 10 after
852 } 831 }
853 832
854 NO_SANITIZE_ADDRESS inline 833 NO_SANITIZE_ADDRESS inline
855 void HeapObjectHeader::markDead() 834 void HeapObjectHeader::markDead()
856 { 835 {
857 ASSERT(checkHeader()); 836 ASSERT(checkHeader());
858 ASSERT(!isMarked()); 837 ASSERT(!isMarked());
859 m_encoded |= headerDeadBitMask; 838 m_encoded |= headerDeadBitMask;
860 } 839 }
861 840
862 inline Address NormalPageHeap::allocateObject(size_t allocationSize, size_t gcIn foIndex) 841 inline Address NormalPageHeap::allocateObject(size_t allocationSize, size_t gcIn foIndex, uint32_t gcGeneration)
863 { 842 {
864 if (LIKELY(allocationSize <= m_remainingAllocationSize)) { 843 if (LIKELY(allocationSize <= m_remainingAllocationSize)) {
865 Address headerAddress = m_currentAllocationPoint; 844 Address headerAddress = m_currentAllocationPoint;
866 m_currentAllocationPoint += allocationSize; 845 m_currentAllocationPoint += allocationSize;
867 m_remainingAllocationSize -= allocationSize; 846 m_remainingAllocationSize -= allocationSize;
868 ASSERT(gcInfoIndex > 0); 847 ASSERT(gcInfoIndex > 0);
869 new (NotNull, headerAddress) HeapObjectHeader(allocationSize, gcInfoInde x); 848 new (NotNull, headerAddress) HeapObjectHeader(allocationSize, gcInfoInde x, gcGeneration);
870 Address result = headerAddress + sizeof(HeapObjectHeader); 849 Address result = headerAddress + sizeof(HeapObjectHeader);
871 ASSERT(!(reinterpret_cast<uintptr_t>(result) & allocationMask)); 850 ASSERT(!(reinterpret_cast<uintptr_t>(result) & allocationMask));
872 851
873 SET_MEMORY_ACCESSIBLE(result, allocationSize - sizeof(HeapObjectHeader)) ; 852 SET_MEMORY_ACCESSIBLE(result, allocationSize - sizeof(HeapObjectHeader)) ;
874 ASSERT(findPageFromAddress(headerAddress + allocationSize - 1)); 853 ASSERT(findPageFromAddress(headerAddress + allocationSize - 1));
875 return result; 854 return result;
876 } 855 }
877 return outOfLineAllocate(allocationSize, gcInfoIndex); 856 return outOfLineAllocate(allocationSize, gcInfoIndex);
878 } 857 }
879 858
880 } // namespace blink 859 } // namespace blink
881 860
882 #endif // HeapPage_h 861 #endif // HeapPage_h
OLDNEW

Powered by Google App Engine
This is Rietveld 408576698