Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(37)

Side by Side Diff: third_party/WebKit/Source/platform/heap/HeapPage.h

Issue 1411603007: [Oilpan] Add use-after-free detector in Member<> Base URL: https://chromium.googlesource.com/chromium/src.git@master
Patch Set: Require full definition of T Created 5 years, 1 month ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
OLDNEW
1 /* 1 /*
2 * Copyright (C) 2013 Google Inc. All rights reserved. 2 * Copyright (C) 2013 Google Inc. All rights reserved.
3 * 3 *
4 * Redistribution and use in source and binary forms, with or without 4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions are 5 * modification, are permitted provided that the following conditions are
6 * met: 6 * met:
7 * 7 *
8 * * Redistributions of source code must retain the above copyright 8 * * Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer. 9 * notice, this list of conditions and the following disclaimer.
10 * * Redistributions in binary form must reproduce the above 10 * * Redistributions in binary form must reproduce the above
(...skipping 98 matching lines...) Expand 10 before | Expand all | Expand 10 after
109 #define CHECK_MEMORY_INACCESSIBLE(address, size) \ 109 #define CHECK_MEMORY_INACCESSIBLE(address, size) \
110 ASAN_UNPOISON_MEMORY_REGION(address, size); \ 110 ASAN_UNPOISON_MEMORY_REGION(address, size); \
111 FreeList::checkFreedMemoryIsZapped(address, size); \ 111 FreeList::checkFreedMemoryIsZapped(address, size); \
112 ASAN_POISON_MEMORY_REGION(address, size) 112 ASAN_POISON_MEMORY_REGION(address, size)
113 #else 113 #else
114 #define SET_MEMORY_INACCESSIBLE(address, size) memset((address), 0, (size)) 114 #define SET_MEMORY_INACCESSIBLE(address, size) memset((address), 0, (size))
115 #define SET_MEMORY_ACCESSIBLE(address, size) do { } while (false) 115 #define SET_MEMORY_ACCESSIBLE(address, size) do { } while (false)
116 #define CHECK_MEMORY_INACCESSIBLE(address, size) do { } while (false) 116 #define CHECK_MEMORY_INACCESSIBLE(address, size) do { } while (false)
117 #endif 117 #endif
118 118
119 #if !ENABLE(ASSERT) && CPU(64BIT)
120 #define USE_4BYTE_HEADER_PADDING 1
121 #else
122 #define USE_4BYTE_HEADER_PADDING 0
123 #endif
124
125 class CallbackStack; 119 class CallbackStack;
126 class FreePagePool; 120 class FreePagePool;
127 class NormalPageHeap; 121 class NormalPageHeap;
128 class OrphanedPagePool; 122 class OrphanedPagePool;
129 class PageMemory; 123 class PageMemory;
130 class PageMemoryRegion; 124 class PageMemoryRegion;
131 class WebProcessMemoryDump; 125 class WebProcessMemoryDump;
132 126
133 // HeapObjectHeader is 4 byte (32 bit) that has the following layout: 127 // HeapObjectHeader has two 4 byte (32 bit) members, and one of them has
128 // the following bit field layout:
134 // 129 //
135 // | gcInfoIndex (14 bit) | DOM mark bit (1 bit) | size (14 bit) | dead bit (1 b it) | freed bit (1 bit) | mark bit (1 bit) | 130 // | gcInfoIndex (14 bit) | DOM mark bit (1 bit) | size (14 bit) | dead bit (1 b it) | freed bit (1 bit) | mark bit (1 bit)
136 // 131 //
137 // - For non-large objects, 14 bit is enough for |size| because the blink 132 // - For non-large objects, 14 bit is enough for |size| because the blink
138 // page size is 2^17 byte and each object is guaranteed to be aligned with 133 // page size is 2^17 byte and each object is guaranteed to be aligned with
139 // 2^3 byte. 134 // 2^3 byte.
140 // - For large objects, |size| is 0. The actual size of a large object is 135 // - For large objects, |size| is 0. The actual size of a large object is
141 // stored in LargeObjectPage::m_payloadSize. 136 // stored in LargeObjectPage::m_payloadSize.
142 // - 1 bit used to mark DOM trees for V8. 137 // - 1 bit used to mark DOM trees for V8.
143 // - 14 bit is enough for gcInfoIndex because there are less than 2^14 types 138 // - 14 bit is enough for gcInfoIndex because there are less than 2^14 types
144 // in Blink. 139 // in Blink.
145 const size_t headerDOMMarkBitMask = 1u << 17; 140 const size_t headerDOMMarkBitMask = 1u << 17;
146 const size_t headerGCInfoIndexShift = 18; 141 const size_t headerGCInfoIndexShift = 18;
147 const size_t headerGCInfoIndexMask = (static_cast<size_t>((1 << 14) - 1)) << hea derGCInfoIndexShift; 142 const size_t headerGCInfoIndexMask = (static_cast<size_t>((1 << 14) - 1)) << hea derGCInfoIndexShift;
148 const size_t headerSizeMask = (static_cast<size_t>((1 << 14) - 1)) << 3; 143 const size_t headerSizeMask = (static_cast<size_t>((1 << 14) - 1)) << 3;
149 const size_t headerMarkBitMask = 1; 144 const size_t headerMarkBitMask = 1;
150 const size_t headerFreedBitMask = 2; 145 const size_t headerFreedBitMask = 2;
151 // The dead bit is used for objects that have gone through a GC marking, but did 146 // The dead bit is used for objects that have gone through a GC marking, but did
152 // not get swept before a new GC started. In that case we set the dead bit on 147 // not get swept before a new GC started. In that case we set the dead bit on
153 // objects that were not marked in the previous GC to ensure we are not tracing 148 // objects that were not marked in the previous GC to ensure we are not tracing
154 // them via a conservatively found pointer. Tracing dead objects could lead to 149 // them via a conservatively found pointer. Tracing dead objects could lead to
155 // tracing of already finalized objects in another thread's heap which is a 150 // tracing of already finalized objects in another thread's heap which is a
156 // use-after-free situation. 151 // use-after-free situation.
157 const size_t headerDeadBitMask = 4; 152 const size_t headerDeadBitMask = 4;
158 // On free-list entries we reuse the dead bit to distinguish a normal free-list 153 // On free-list entries we reuse the dead bit to distinguish a normal free-list
159 // entry from one that has been promptly freed. 154 // entry from one that has been promptly freed.
160 const size_t headerPromptlyFreedBitMask = headerFreedBitMask | headerDeadBitMask ; 155 const size_t headerPromptlyFreedBitMask = headerFreedBitMask | headerDeadBitMask ;
161 const size_t largeObjectSizeInHeader = 0; 156 const size_t largeObjectSizeInHeader = 0;
162 const size_t gcInfoIndexForFreeListHeader = 0; 157 const size_t gcInfoIndexForFreeListHeader = 0;
163 const size_t nonLargeObjectPageSizeMax = 1 << 17; 158 const size_t nonLargeObjectPageSizeMax = 1 << 17;
164 159
160 const uint32_t gcGenerationUnchecked = 0;
161 const uint32_t gcGenerationForFreeListEntry = 1;
162 const uint32_t gcGenerationStart = 2;
163
165 static_assert(nonLargeObjectPageSizeMax >= blinkPageSize, "max size supported by HeapObjectHeader must at least be blinkPageSize"); 164 static_assert(nonLargeObjectPageSizeMax >= blinkPageSize, "max size supported by HeapObjectHeader must at least be blinkPageSize");
166 165
167 class PLATFORM_EXPORT HeapObjectHeader { 166 class PLATFORM_EXPORT HeapObjectHeader {
168 public: 167 public:
169 // If gcInfoIndex is 0, this header is interpreted as a free list header. 168 // If gcInfoIndex is 0, this header is interpreted as a free list header.
170 NO_SANITIZE_ADDRESS 169 NO_SANITIZE_ADDRESS
171 HeapObjectHeader(size_t size, size_t gcInfoIndex) 170 HeapObjectHeader(size_t size, size_t gcInfoIndex, uint32_t generation)
171 : m_gcGeneration(generation)
172 { 172 {
173 #if ENABLE(ASSERT)
174 m_magic = magic;
175 #endif
176 // sizeof(HeapObjectHeader) must be equal to or smaller than 173 // sizeof(HeapObjectHeader) must be equal to or smaller than
177 // allocationGranurarity, because HeapObjectHeader is used as a header 174 // allocationGranurarity, because HeapObjectHeader is used as a header
178 // for an freed entry. Given that the smallest entry size is 175 // for an freed entry. Given that the smallest entry size is
179 // allocationGranurarity, HeapObjectHeader must fit into the size. 176 // allocationGranurarity, HeapObjectHeader must fit into the size.
180 static_assert(sizeof(HeapObjectHeader) <= allocationGranularity, "size o f HeapObjectHeader must be smaller than allocationGranularity"); 177 static_assert(sizeof(HeapObjectHeader) <= allocationGranularity, "size o f HeapObjectHeader must be smaller than allocationGranularity");
181 #if CPU(64BIT) 178 #if CPU(64BIT)
182 static_assert(sizeof(HeapObjectHeader) == 8, "size of HeapObjectHeader m ust be 8 byte aligned"); 179 static_assert(sizeof(HeapObjectHeader) == 8, "size of HeapObjectHeader m ust be 8 byte aligned");
183 #endif 180 #endif
184 181
185 ASSERT(gcInfoIndex < GCInfoTable::maxIndex); 182 ASSERT(gcInfoIndex < GCInfoTable::maxIndex);
(...skipping 19 matching lines...) Expand all
205 void unmark(); 202 void unmark();
206 void markDead(); 203 void markDead();
207 bool isDead() const; 204 bool isDead() const;
208 205
209 Address payload(); 206 Address payload();
210 size_t payloadSize(); 207 size_t payloadSize();
211 Address payloadEnd(); 208 Address payloadEnd();
212 209
213 #if ENABLE(ASSERT) 210 #if ENABLE(ASSERT)
214 bool checkHeader() const; 211 bool checkHeader() const;
215 // Zap magic number with a new magic number that means there was once an
216 // object allocated here, but it was freed because nobody marked it during
217 // GC.
218 void zapMagic();
219 #endif 212 #endif
213 NO_SANITIZE_ADDRESS
214 uint32_t gcGeneration() const { return m_gcGeneration; }
220 215
221 void finalize(Address, size_t); 216 void finalize(Address, size_t);
222 static HeapObjectHeader* fromPayload(const void*); 217 static HeapObjectHeader* fromPayload(const void*);
223 218
224 static const uint16_t magic = 0xfff1;
225 static const uint16_t zappedMagic = 0x4321;
226
227 private: 219 private:
228 uint32_t m_encoded; 220 uint32_t m_encoded;
229 #if ENABLE(ASSERT) 221 // m_gcGeneration keeps track of the number of GC cycles where the object ge ts
230 uint16_t m_magic; 222 // allocated. gcGenerationForFreeListentry indicates that the object has
231 #endif 223 // already been freed.
232 224 uint32_t m_gcGeneration;
233 // In 64 bit architectures, we intentionally add 4 byte padding immediately
234 // after the HeapHeaderObject. This is because:
235 //
236 // | HeapHeaderObject (4 byte) | padding (4 byte) | object payload (8 * n by te) |
237 // ^8 byte aligned ^8 byte aligned
238 //
239 // is better than:
240 //
241 // | HeapHeaderObject (4 byte) | object payload (8 * n byte) | padding (4 by te) |
242 // ^4 byte aligned ^8 byte aligned ^4 byte aligned
243 //
244 // since the former layout aligns both header and payload to 8 byte.
245 #if USE_4BYTE_HEADER_PADDING
246 public:
247 uint32_t m_padding;
248 #endif
249 }; 225 };
250 226
251 class FreeListEntry final : public HeapObjectHeader { 227 class FreeListEntry final : public HeapObjectHeader {
252 public: 228 public:
253 NO_SANITIZE_ADDRESS 229 NO_SANITIZE_ADDRESS
254 explicit FreeListEntry(size_t size) 230 explicit FreeListEntry(size_t size)
255 : HeapObjectHeader(size, gcInfoIndexForFreeListHeader) 231 : HeapObjectHeader(size, gcInfoIndexForFreeListHeader, gcGenerationForFr eeListEntry)
256 , m_next(nullptr) 232 , m_next(nullptr)
257 { 233 {
258 #if ENABLE(ASSERT) 234 #if ENABLE(ASSERT)
259 ASSERT(size >= sizeof(HeapObjectHeader)); 235 ASSERT(size >= sizeof(HeapObjectHeader));
260 zapMagic();
261 #endif 236 #endif
262 } 237 }
263 238
264 Address address() { return reinterpret_cast<Address>(this); } 239 Address address() { return reinterpret_cast<Address>(this); }
265 240
266 NO_SANITIZE_ADDRESS 241 NO_SANITIZE_ADDRESS
267 void unlink(FreeListEntry** prevNext) 242 void unlink(FreeListEntry** prevNext)
268 { 243 {
269 *prevNext = m_next; 244 *prevNext = m_next;
270 m_next = nullptr; 245 m_next = nullptr;
(...skipping 425 matching lines...) Expand 10 before | Expand all | Expand 10 after
696 ASSERT(findPageFromAddress(address + size - 1)); 671 ASSERT(findPageFromAddress(address + size - 1));
697 m_freeList.addToFreeList(address, size); 672 m_freeList.addToFreeList(address, size);
698 } 673 }
699 void clearFreeLists() override; 674 void clearFreeLists() override;
700 #if ENABLE(ASSERT) 675 #if ENABLE(ASSERT)
701 bool isConsistentForGC() override; 676 bool isConsistentForGC() override;
702 bool pagesToBeSweptContains(Address); 677 bool pagesToBeSweptContains(Address);
703 #endif 678 #endif
704 void takeFreelistSnapshot(const String& dumpBaseName) override; 679 void takeFreelistSnapshot(const String& dumpBaseName) override;
705 680
706 Address allocateObject(size_t allocationSize, size_t gcInfoIndex); 681 Address allocateObject(size_t allocationSize, size_t gcInfoIndex, uint32_t g eneration);
707 682
708 void freePage(NormalPage*); 683 void freePage(NormalPage*);
709 684
710 bool coalesce(); 685 bool coalesce();
711 void promptlyFreeObject(HeapObjectHeader*); 686 void promptlyFreeObject(HeapObjectHeader*);
712 bool expandObject(HeapObjectHeader*, size_t); 687 bool expandObject(HeapObjectHeader*, size_t);
713 bool shrinkObject(HeapObjectHeader*, size_t); 688 bool shrinkObject(HeapObjectHeader*, size_t);
714 void decreasePromptlyFreedSize(size_t size) { m_promptlyFreedSize -= size; } 689 void decreasePromptlyFreedSize(size_t size) { m_promptlyFreedSize -= size; }
715 690
716 bool isObjectAllocatedAtAllocationPoint(HeapObjectHeader* header) 691 bool isObjectAllocatedAtAllocationPoint(HeapObjectHeader* header)
(...skipping 59 matching lines...) Expand 10 before | Expand all | Expand 10 after
776 // LargeObjectPage::m_payloadSize. 751 // LargeObjectPage::m_payloadSize.
777 ASSERT(result != largeObjectSizeInHeader); 752 ASSERT(result != largeObjectSizeInHeader);
778 ASSERT(!pageFromObject(this)->isLargeObjectPage()); 753 ASSERT(!pageFromObject(this)->isLargeObjectPage());
779 return result; 754 return result;
780 } 755 }
781 756
782 #if ENABLE(ASSERT) 757 #if ENABLE(ASSERT)
783 NO_SANITIZE_ADDRESS inline 758 NO_SANITIZE_ADDRESS inline
784 bool HeapObjectHeader::checkHeader() const 759 bool HeapObjectHeader::checkHeader() const
785 { 760 {
786 return !pageFromObject(this)->orphaned() && m_magic == magic; 761 ASSERT(isFree() == (m_gcGeneration == gcGenerationForFreeListEntry));
haraken 2015/11/20 01:59:06 Can we add ASSERT(m_gcGeneration != gcGenerationUn
peria 2015/11/20 02:27:00 Done.
762 return !pageFromObject(this)->orphaned();
787 } 763 }
788 #endif 764 #endif
789 765
790 inline Address HeapObjectHeader::payload() 766 inline Address HeapObjectHeader::payload()
791 { 767 {
792 return reinterpret_cast<Address>(this) + sizeof(HeapObjectHeader); 768 return reinterpret_cast<Address>(this) + sizeof(HeapObjectHeader);
793 } 769 }
794 770
795 inline Address HeapObjectHeader::payloadEnd() 771 inline Address HeapObjectHeader::payloadEnd()
796 { 772 {
(...skipping 51 matching lines...) Expand 10 before | Expand all | Expand 10 after
848 } 824 }
849 825
850 NO_SANITIZE_ADDRESS inline 826 NO_SANITIZE_ADDRESS inline
851 void HeapObjectHeader::markDead() 827 void HeapObjectHeader::markDead()
852 { 828 {
853 ASSERT(checkHeader()); 829 ASSERT(checkHeader());
854 ASSERT(!isMarked()); 830 ASSERT(!isMarked());
855 m_encoded |= headerDeadBitMask; 831 m_encoded |= headerDeadBitMask;
856 } 832 }
857 833
858 inline Address NormalPageHeap::allocateObject(size_t allocationSize, size_t gcIn foIndex) 834 inline Address NormalPageHeap::allocateObject(size_t allocationSize, size_t gcIn foIndex, uint32_t gcGeneration)
859 { 835 {
860 if (LIKELY(allocationSize <= m_remainingAllocationSize)) { 836 if (LIKELY(allocationSize <= m_remainingAllocationSize)) {
861 Address headerAddress = m_currentAllocationPoint; 837 Address headerAddress = m_currentAllocationPoint;
862 m_currentAllocationPoint += allocationSize; 838 m_currentAllocationPoint += allocationSize;
863 m_remainingAllocationSize -= allocationSize; 839 m_remainingAllocationSize -= allocationSize;
864 ASSERT(gcInfoIndex > 0); 840 ASSERT(gcInfoIndex > 0);
865 new (NotNull, headerAddress) HeapObjectHeader(allocationSize, gcInfoInde x); 841 new (NotNull, headerAddress) HeapObjectHeader(allocationSize, gcInfoInde x, gcGeneration);
866 Address result = headerAddress + sizeof(HeapObjectHeader); 842 Address result = headerAddress + sizeof(HeapObjectHeader);
867 ASSERT(!(reinterpret_cast<uintptr_t>(result) & allocationMask)); 843 ASSERT(!(reinterpret_cast<uintptr_t>(result) & allocationMask));
868 844
869 SET_MEMORY_ACCESSIBLE(result, allocationSize - sizeof(HeapObjectHeader)) ; 845 SET_MEMORY_ACCESSIBLE(result, allocationSize - sizeof(HeapObjectHeader)) ;
870 ASSERT(findPageFromAddress(headerAddress + allocationSize - 1)); 846 ASSERT(findPageFromAddress(headerAddress + allocationSize - 1));
871 return result; 847 return result;
872 } 848 }
873 return outOfLineAllocate(allocationSize, gcInfoIndex); 849 return outOfLineAllocate(allocationSize, gcInfoIndex);
874 } 850 }
875 851
876 } // namespace blink 852 } // namespace blink
877 853
878 #endif // HeapPage_h 854 #endif // HeapPage_h
OLDNEW

Powered by Google App Engine
This is Rietveld 408576698