Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(342)

Side by Side Diff: third_party/WebKit/Source/platform/heap/HeapPage.h

Issue 1411603007: [Oilpan] Add use-after-free detector in Member<> Base URL: https://chromium.googlesource.com/chromium/src.git@master
Patch Set: Created 5 years, 1 month ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
OLDNEW
1 /* 1 /*
2 * Copyright (C) 2013 Google Inc. All rights reserved. 2 * Copyright (C) 2013 Google Inc. All rights reserved.
3 * 3 *
4 * Redistribution and use in source and binary forms, with or without 4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions are 5 * modification, are permitted provided that the following conditions are
6 * met: 6 * met:
7 * 7 *
8 * * Redistributions of source code must retain the above copyright 8 * * Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer. 9 * notice, this list of conditions and the following disclaimer.
10 * * Redistributions in binary form must reproduce the above 10 * * Redistributions in binary form must reproduce the above
(...skipping 98 matching lines...) Expand 10 before | Expand all | Expand 10 after
109 #define CHECK_MEMORY_INACCESSIBLE(address, size) \ 109 #define CHECK_MEMORY_INACCESSIBLE(address, size) \
110 ASAN_UNPOISON_MEMORY_REGION(address, size); \ 110 ASAN_UNPOISON_MEMORY_REGION(address, size); \
111 FreeList::checkFreedMemoryIsZapped(address, size); \ 111 FreeList::checkFreedMemoryIsZapped(address, size); \
112 ASAN_POISON_MEMORY_REGION(address, size) 112 ASAN_POISON_MEMORY_REGION(address, size)
113 #else 113 #else
114 #define SET_MEMORY_INACCESSIBLE(address, size) memset((address), 0, (size)) 114 #define SET_MEMORY_INACCESSIBLE(address, size) memset((address), 0, (size))
115 #define SET_MEMORY_ACCESSIBLE(address, size) do { } while (false) 115 #define SET_MEMORY_ACCESSIBLE(address, size) do { } while (false)
116 #define CHECK_MEMORY_INACCESSIBLE(address, size) do { } while (false) 116 #define CHECK_MEMORY_INACCESSIBLE(address, size) do { } while (false)
117 #endif 117 #endif
118 118
119 #if !ENABLE(ASSERT) && CPU(64BIT)
120 #define USE_4BYTE_HEADER_PADDING 1
121 #else
122 #define USE_4BYTE_HEADER_PADDING 0
123 #endif
124
125 class CallbackStack; 119 class CallbackStack;
126 class FreePagePool; 120 class FreePagePool;
127 class NormalPageHeap; 121 class NormalPageHeap;
128 class OrphanedPagePool; 122 class OrphanedPagePool;
129 class PageMemory; 123 class PageMemory;
130 class PageMemoryRegion; 124 class PageMemoryRegion;
131 class WebProcessMemoryDump; 125 class WebProcessMemoryDump;
132 126
133 // HeapObjectHeader is 4 byte (32 bit) that has the following layout: 127 // HeapObjectHeader is 4 byte (32 bit) that has the following layout:
134 // 128 //
(...skipping 26 matching lines...) Expand all
161 const size_t largeObjectSizeInHeader = 0; 155 const size_t largeObjectSizeInHeader = 0;
162 const size_t gcInfoIndexForFreeListHeader = 0; 156 const size_t gcInfoIndexForFreeListHeader = 0;
163 const size_t nonLargeObjectPageSizeMax = 1 << 17; 157 const size_t nonLargeObjectPageSizeMax = 1 << 17;
164 158
165 static_assert(nonLargeObjectPageSizeMax >= blinkPageSize, "max size supported by HeapObjectHeader must at least be blinkPageSize"); 159 static_assert(nonLargeObjectPageSizeMax >= blinkPageSize, "max size supported by HeapObjectHeader must at least be blinkPageSize");
166 160
167 class PLATFORM_EXPORT HeapObjectHeader { 161 class PLATFORM_EXPORT HeapObjectHeader {
168 public: 162 public:
169 // If gcInfoIndex is 0, this header is interpreted as a free list header. 163 // If gcInfoIndex is 0, this header is interpreted as a free list header.
170 NO_SANITIZE_ADDRESS 164 NO_SANITIZE_ADDRESS
171 HeapObjectHeader(size_t size, size_t gcInfoIndex) 165 HeapObjectHeader(size_t size, size_t gcInfoIndex, uint32_t generation)
166 : m_gcGeneration(generation)
172 { 167 {
173 #if ENABLE(ASSERT)
174 m_magic = magic;
175 #endif
176 // sizeof(HeapObjectHeader) must be equal to or smaller than 168 // sizeof(HeapObjectHeader) must be equal to or smaller than
177 // allocationGranurarity, because HeapObjectHeader is used as a header 169 // allocationGranurarity, because HeapObjectHeader is used as a header
178 // for an freed entry. Given that the smallest entry size is 170 // for an freed entry. Given that the smallest entry size is
179 // allocationGranurarity, HeapObjectHeader must fit into the size. 171 // allocationGranurarity, HeapObjectHeader must fit into the size.
180 static_assert(sizeof(HeapObjectHeader) <= allocationGranularity, "size o f HeapObjectHeader must be smaller than allocationGranularity"); 172 static_assert(sizeof(HeapObjectHeader) <= allocationGranularity, "size o f HeapObjectHeader must be smaller than allocationGranularity");
181 #if CPU(64BIT) 173 #if CPU(64BIT)
182 static_assert(sizeof(HeapObjectHeader) == 8, "size of HeapObjectHeader m ust be 8 byte aligned"); 174 static_assert(sizeof(HeapObjectHeader) == 8, "size of HeapObjectHeader m ust be 8 byte aligned");
183 #endif 175 #endif
184 176
185 ASSERT(gcInfoIndex < GCInfoTable::maxIndex); 177 ASSERT(gcInfoIndex < GCInfoTable::maxIndex);
(...skipping 18 matching lines...) Expand all
204 void mark(); 196 void mark();
205 void unmark(); 197 void unmark();
206 void markDead(); 198 void markDead();
207 bool isDead() const; 199 bool isDead() const;
208 200
209 Address payload(); 201 Address payload();
210 size_t payloadSize(); 202 size_t payloadSize();
211 Address payloadEnd(); 203 Address payloadEnd();
212 204
213 #if ENABLE(ASSERT) 205 #if ENABLE(ASSERT)
214 bool checkHeader() const; 206 bool checkHeader() const;
haraken 2015/11/12 15:59:08 Can you insert the following ASSERT to the checkHe
peria 2015/11/13 05:44:38 Done. checkHeader() is used as ASSERT(checkHeade
215 // Zap magic number with a new magic number that means there was once an
216 // object allocated here, but it was freed because nobody marked it during
217 // GC.
218 void zapMagic();
219 #endif 207 #endif
208 uint32_t gcGeneration() const { return m_gcGeneration; }
220 209
221 void finalize(Address, size_t); 210 void finalize(Address, size_t);
222 static HeapObjectHeader* fromPayload(const void*); 211 static HeapObjectHeader* fromPayload(const void*);
223 212
224 static const uint16_t magic = 0xfff1;
225 static const uint16_t zappedMagic = 0x4321;
226
227 private: 213 private:
228 uint32_t m_encoded; 214 uint32_t m_encoded;
229 #if ENABLE(ASSERT) 215 // m_gcGeneration keeps track of the number of GC cycle where the object get s
230 uint16_t m_magic; 216 // allocated. m_gcGeneration == 0 indicates that the object has already been freed.
231 #endif 217 uint32_t m_gcGeneration;
232
233 // In 64 bit architectures, we intentionally add 4 byte padding immediately
234 // after the HeapHeaderObject. This is because:
235 //
236 // | HeapHeaderObject (4 byte) | padding (4 byte) | object payload (8 * n by te) |
237 // ^8 byte aligned ^8 byte aligned
238 //
239 // is better than:
240 //
241 // | HeapHeaderObject (4 byte) | object payload (8 * n byte) | padding (4 by te) |
242 // ^4 byte aligned ^8 byte aligned ^4 byte aligned
243 //
244 // since the former layout aligns both header and payload to 8 byte.
245 #if USE_4BYTE_HEADER_PADDING
246 public:
247 uint32_t m_padding;
248 #endif
249 }; 218 };
250 219
251 class FreeListEntry final : public HeapObjectHeader { 220 class FreeListEntry final : public HeapObjectHeader {
252 public: 221 public:
253 NO_SANITIZE_ADDRESS 222 NO_SANITIZE_ADDRESS
254 explicit FreeListEntry(size_t size) 223 explicit FreeListEntry(size_t size)
255 : HeapObjectHeader(size, gcInfoIndexForFreeListHeader) 224 : HeapObjectHeader(size, gcInfoIndexForFreeListHeader, 0)
256 , m_next(nullptr) 225 , m_next(nullptr)
257 { 226 {
258 #if ENABLE(ASSERT) 227 #if ENABLE(ASSERT)
259 ASSERT(size >= sizeof(HeapObjectHeader)); 228 ASSERT(size >= sizeof(HeapObjectHeader));
260 zapMagic();
261 #endif 229 #endif
262 } 230 }
263 231
264 Address address() { return reinterpret_cast<Address>(this); } 232 Address address() { return reinterpret_cast<Address>(this); }
265 233
266 NO_SANITIZE_ADDRESS 234 NO_SANITIZE_ADDRESS
267 void unlink(FreeListEntry** prevNext) 235 void unlink(FreeListEntry** prevNext)
268 { 236 {
269 *prevNext = m_next; 237 *prevNext = m_next;
270 m_next = nullptr; 238 m_next = nullptr;
(...skipping 425 matching lines...) Expand 10 before | Expand all | Expand 10 after
696 ASSERT(findPageFromAddress(address + size - 1)); 664 ASSERT(findPageFromAddress(address + size - 1));
697 m_freeList.addToFreeList(address, size); 665 m_freeList.addToFreeList(address, size);
698 } 666 }
699 void clearFreeLists() override; 667 void clearFreeLists() override;
700 #if ENABLE(ASSERT) 668 #if ENABLE(ASSERT)
701 bool isConsistentForGC() override; 669 bool isConsistentForGC() override;
702 bool pagesToBeSweptContains(Address); 670 bool pagesToBeSweptContains(Address);
703 #endif 671 #endif
704 void takeFreelistSnapshot(const String& dumpBaseName) override; 672 void takeFreelistSnapshot(const String& dumpBaseName) override;
705 673
706 Address allocateObject(size_t allocationSize, size_t gcInfoIndex); 674 Address allocateObject(size_t allocationSize, size_t gcInfoIndex, uint32_t g eneration);
707 675
708 void freePage(NormalPage*); 676 void freePage(NormalPage*);
709 677
710 bool coalesce(); 678 bool coalesce();
711 void promptlyFreeObject(HeapObjectHeader*); 679 void promptlyFreeObject(HeapObjectHeader*);
712 bool expandObject(HeapObjectHeader*, size_t); 680 bool expandObject(HeapObjectHeader*, size_t);
713 bool shrinkObject(HeapObjectHeader*, size_t); 681 bool shrinkObject(HeapObjectHeader*, size_t);
714 void decreasePromptlyFreedSize(size_t size) { m_promptlyFreedSize -= size; } 682 void decreasePromptlyFreedSize(size_t size) { m_promptlyFreedSize -= size; }
715 683
716 bool isObjectAllocatedAtAllocationPoint(HeapObjectHeader* header) 684 bool isObjectAllocatedAtAllocationPoint(HeapObjectHeader* header)
(...skipping 59 matching lines...) Expand 10 before | Expand all | Expand 10 after
776 // LargeObjectPage::m_payloadSize. 744 // LargeObjectPage::m_payloadSize.
777 ASSERT(result != largeObjectSizeInHeader); 745 ASSERT(result != largeObjectSizeInHeader);
778 ASSERT(!pageFromObject(this)->isLargeObjectPage()); 746 ASSERT(!pageFromObject(this)->isLargeObjectPage());
779 return result; 747 return result;
780 } 748 }
781 749
782 #if ENABLE(ASSERT) 750 #if ENABLE(ASSERT)
783 NO_SANITIZE_ADDRESS inline 751 NO_SANITIZE_ADDRESS inline
784 bool HeapObjectHeader::checkHeader() const 752 bool HeapObjectHeader::checkHeader() const
785 { 753 {
786 return !pageFromObject(this)->orphaned() && m_magic == magic; 754 return !pageFromObject(this)->orphaned() && m_gcGeneration;
787 } 755 }
788 #endif 756 #endif
789 757
790 inline Address HeapObjectHeader::payload() 758 inline Address HeapObjectHeader::payload()
791 { 759 {
792 return reinterpret_cast<Address>(this) + sizeof(HeapObjectHeader); 760 return reinterpret_cast<Address>(this) + sizeof(HeapObjectHeader);
793 } 761 }
794 762
795 inline Address HeapObjectHeader::payloadEnd() 763 inline Address HeapObjectHeader::payloadEnd()
796 { 764 {
(...skipping 51 matching lines...) Expand 10 before | Expand all | Expand 10 after
848 } 816 }
849 817
850 NO_SANITIZE_ADDRESS inline 818 NO_SANITIZE_ADDRESS inline
851 void HeapObjectHeader::markDead() 819 void HeapObjectHeader::markDead()
852 { 820 {
853 ASSERT(checkHeader()); 821 ASSERT(checkHeader());
854 ASSERT(!isMarked()); 822 ASSERT(!isMarked());
855 m_encoded |= headerDeadBitMask; 823 m_encoded |= headerDeadBitMask;
856 } 824 }
857 825
858 inline Address NormalPageHeap::allocateObject(size_t allocationSize, size_t gcIn foIndex) 826 inline Address NormalPageHeap::allocateObject(size_t allocationSize, size_t gcIn foIndex, uint32_t generation)
859 { 827 {
860 if (LIKELY(allocationSize <= m_remainingAllocationSize)) { 828 if (LIKELY(allocationSize <= m_remainingAllocationSize)) {
861 Address headerAddress = m_currentAllocationPoint; 829 Address headerAddress = m_currentAllocationPoint;
862 m_currentAllocationPoint += allocationSize; 830 m_currentAllocationPoint += allocationSize;
863 m_remainingAllocationSize -= allocationSize; 831 m_remainingAllocationSize -= allocationSize;
864 ASSERT(gcInfoIndex > 0); 832 ASSERT(gcInfoIndex > 0);
865 new (NotNull, headerAddress) HeapObjectHeader(allocationSize, gcInfoInde x); 833 new (NotNull, headerAddress) HeapObjectHeader(allocationSize, gcInfoInde x, generation);
866 Address result = headerAddress + sizeof(HeapObjectHeader); 834 Address result = headerAddress + sizeof(HeapObjectHeader);
867 ASSERT(!(reinterpret_cast<uintptr_t>(result) & allocationMask)); 835 ASSERT(!(reinterpret_cast<uintptr_t>(result) & allocationMask));
868 836
869 SET_MEMORY_ACCESSIBLE(result, allocationSize - sizeof(HeapObjectHeader)) ; 837 SET_MEMORY_ACCESSIBLE(result, allocationSize - sizeof(HeapObjectHeader)) ;
870 ASSERT(findPageFromAddress(headerAddress + allocationSize - 1)); 838 ASSERT(findPageFromAddress(headerAddress + allocationSize - 1));
871 return result; 839 return result;
872 } 840 }
873 return outOfLineAllocate(allocationSize, gcInfoIndex); 841 return outOfLineAllocate(allocationSize, gcInfoIndex);
874 } 842 }
875 843
876 } // namespace blink 844 } // namespace blink
877 845
878 #endif // HeapPage_h 846 #endif // HeapPage_h
OLDNEW

Powered by Google App Engine
This is Rietveld 408576698