Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(264)

Side by Side Diff: third_party/WebKit/Source/platform/heap/HeapPage.cpp

Issue 1411603007: [Oilpan] Add use-after-free detector in Member<> Base URL: https://chromium.googlesource.com/chromium/src.git@master
Patch Set: Rebase Created 5 years ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
OLDNEW
1 /* 1 /*
2 * Copyright (C) 2013 Google Inc. All rights reserved. 2 * Copyright (C) 2013 Google Inc. All rights reserved.
3 * 3 *
4 * Redistribution and use in source and binary forms, with or without 4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions are 5 * modification, are permitted provided that the following conditions are
6 * met: 6 * met:
7 * 7 *
8 * * Redistributions of source code must retain the above copyright 8 * * Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer. 9 * notice, this list of conditions and the following disclaimer.
10 * * Redistributions in binary form must reproduce the above 10 * * Redistributions in binary form must reproduce the above
(...skipping 70 matching lines...) Expand 10 before | Expand all | Expand 10 after
81 static_cast<LargeObjectPage*>(largePage)->setIsVectorBackingPage(); \ 81 static_cast<LargeObjectPage*>(largePage)->setIsVectorBackingPage(); \
82 } 82 }
83 #else 83 #else
84 #define ENABLE_ASAN_CONTAINER_ANNOTATIONS 0 84 #define ENABLE_ASAN_CONTAINER_ANNOTATIONS 0
85 #define ASAN_RETIRE_CONTAINER_ANNOTATION(payload, payloadSize) 85 #define ASAN_RETIRE_CONTAINER_ANNOTATION(payload, payloadSize)
86 #define ASAN_MARK_LARGE_VECTOR_CONTAINER(heap, largeObject) 86 #define ASAN_MARK_LARGE_VECTOR_CONTAINER(heap, largeObject)
87 #endif 87 #endif
88 88
89 namespace blink { 89 namespace blink {
90 90
91 #if ENABLE(ASSERT)
92 NO_SANITIZE_ADDRESS
93 void HeapObjectHeader::zapMagic()
94 {
95 ASSERT(checkHeader());
96 m_magic = zappedMagic;
97 }
98 #endif
99
100 void HeapObjectHeader::finalize(Address object, size_t objectSize) 91 void HeapObjectHeader::finalize(Address object, size_t objectSize)
101 { 92 {
102 const GCInfo* gcInfo = Heap::gcInfo(gcInfoIndex()); 93 const GCInfo* gcInfo = Heap::gcInfo(gcInfoIndex());
103 if (gcInfo->hasFinalizer()) 94 if (gcInfo->hasFinalizer())
104 gcInfo->m_finalize(object); 95 gcInfo->m_finalize(object);
105 96
106 ASAN_RETIRE_CONTAINER_ANNOTATION(object, objectSize); 97 ASAN_RETIRE_CONTAINER_ANNOTATION(object, objectSize);
107 } 98 }
108 99
109 BaseHeap::BaseHeap(ThreadState* state, int index) 100 BaseHeap::BaseHeap(ThreadState* state, int index)
(...skipping 506 matching lines...) Expand 10 before | Expand all | Expand 10 after
616 if (isObjectAllocatedAtAllocationPoint(header)) { 607 if (isObjectAllocatedAtAllocationPoint(header)) {
617 m_currentAllocationPoint -= shrinkSize; 608 m_currentAllocationPoint -= shrinkSize;
618 setRemainingAllocationSize(m_remainingAllocationSize + shrinkSize); 609 setRemainingAllocationSize(m_remainingAllocationSize + shrinkSize);
619 SET_MEMORY_INACCESSIBLE(m_currentAllocationPoint, shrinkSize); 610 SET_MEMORY_INACCESSIBLE(m_currentAllocationPoint, shrinkSize);
620 header->setSize(allocationSize); 611 header->setSize(allocationSize);
621 return true; 612 return true;
622 } 613 }
623 ASSERT(shrinkSize >= sizeof(HeapObjectHeader)); 614 ASSERT(shrinkSize >= sizeof(HeapObjectHeader));
624 ASSERT(header->gcInfoIndex() > 0); 615 ASSERT(header->gcInfoIndex() > 0);
625 Address shrinkAddress = header->payloadEnd() - shrinkSize; 616 Address shrinkAddress = header->payloadEnd() - shrinkSize;
626 HeapObjectHeader* freedHeader = new (NotNull, shrinkAddress) HeapObjectHeade r(shrinkSize, header->gcInfoIndex()); 617 HeapObjectHeader* freedHeader = new (NotNull, shrinkAddress) HeapObjectHeade r(shrinkSize, header->gcInfoIndex(), gcGenerationForFreeListEntry);
627 freedHeader->markPromptlyFreed(); 618 freedHeader->markPromptlyFreed();
628 ASSERT(pageFromObject(reinterpret_cast<Address>(header)) == findPageFromAddr ess(reinterpret_cast<Address>(header))); 619 ASSERT(pageFromObject(reinterpret_cast<Address>(header)) == findPageFromAddr ess(reinterpret_cast<Address>(header)));
629 m_promptlyFreedSize += shrinkSize; 620 m_promptlyFreedSize += shrinkSize;
630 header->setSize(allocationSize); 621 header->setSize(allocationSize);
631 SET_MEMORY_INACCESSIBLE(shrinkAddress + sizeof(HeapObjectHeader), shrinkSize - sizeof(HeapObjectHeader)); 622 SET_MEMORY_INACCESSIBLE(shrinkAddress + sizeof(HeapObjectHeader), shrinkSize - sizeof(HeapObjectHeader));
632 return false; 623 return false;
633 } 624 }
634 625
635 Address NormalPageHeap::lazySweepPages(size_t allocationSize, size_t gcInfoIndex ) 626 Address NormalPageHeap::lazySweepPages(size_t allocationSize, size_t gcInfoIndex )
636 { 627 {
(...skipping 133 matching lines...) Expand 10 before | Expand all | Expand 10 after
770 // as it is considered too costly. 761 // as it is considered too costly.
771 if (!entry || entry->size() < allocationSize) 762 if (!entry || entry->size() < allocationSize)
772 break; 763 break;
773 } 764 }
774 if (entry) { 765 if (entry) {
775 entry->unlink(&m_freeList.m_freeLists[index]); 766 entry->unlink(&m_freeList.m_freeLists[index]);
776 setAllocationPoint(entry->address(), entry->size()); 767 setAllocationPoint(entry->address(), entry->size());
777 ASSERT(hasCurrentAllocationArea()); 768 ASSERT(hasCurrentAllocationArea());
778 ASSERT(remainingAllocationSize() >= allocationSize); 769 ASSERT(remainingAllocationSize() >= allocationSize);
779 m_freeList.m_biggestFreeListIndex = index; 770 m_freeList.m_biggestFreeListIndex = index;
780 return allocateObject(allocationSize, gcInfoIndex); 771 return allocateObject(allocationSize, gcInfoIndex, Heap::gcGeneratio n());
781 } 772 }
782 } 773 }
783 m_freeList.m_biggestFreeListIndex = index; 774 m_freeList.m_biggestFreeListIndex = index;
784 return nullptr; 775 return nullptr;
785 } 776 }
786 777
787 LargeObjectHeap::LargeObjectHeap(ThreadState* state, int index) 778 LargeObjectHeap::LargeObjectHeap(ThreadState* state, int index)
788 : BaseHeap(state, index) 779 : BaseHeap(state, index)
789 { 780 {
790 } 781 }
(...skipping 32 matching lines...) Expand 10 before | Expand all | Expand 10 after
823 threadState()->shouldFlushHeapDoesNotContainCache(); 814 threadState()->shouldFlushHeapDoesNotContainCache();
824 PageMemory* pageMemory = PageMemory::allocate(largeObjectSize); 815 PageMemory* pageMemory = PageMemory::allocate(largeObjectSize);
825 Address largeObjectAddress = pageMemory->writableStart(); 816 Address largeObjectAddress = pageMemory->writableStart();
826 Address headerAddress = largeObjectAddress + LargeObjectPage::pageHeaderSize (); 817 Address headerAddress = largeObjectAddress + LargeObjectPage::pageHeaderSize ();
827 #if ENABLE(ASSERT) 818 #if ENABLE(ASSERT)
828 // Verify that the allocated PageMemory is expectedly zeroed. 819 // Verify that the allocated PageMemory is expectedly zeroed.
829 for (size_t i = 0; i < largeObjectSize; ++i) 820 for (size_t i = 0; i < largeObjectSize; ++i)
830 ASSERT(!largeObjectAddress[i]); 821 ASSERT(!largeObjectAddress[i]);
831 #endif 822 #endif
832 ASSERT(gcInfoIndex > 0); 823 ASSERT(gcInfoIndex > 0);
833 HeapObjectHeader* header = new (NotNull, headerAddress) HeapObjectHeader(lar geObjectSizeInHeader, gcInfoIndex); 824 HeapObjectHeader* header = new (NotNull, headerAddress) HeapObjectHeader(lar geObjectSizeInHeader, gcInfoIndex, Heap::gcGeneration());
834 Address result = headerAddress + sizeof(*header); 825 Address result = headerAddress + sizeof(*header);
835 ASSERT(!(reinterpret_cast<uintptr_t>(result) & allocationMask)); 826 ASSERT(!(reinterpret_cast<uintptr_t>(result) & allocationMask));
836 LargeObjectPage* largeObject = new (largeObjectAddress) LargeObjectPage(page Memory, this, allocationSize); 827 LargeObjectPage* largeObject = new (largeObjectAddress) LargeObjectPage(page Memory, this, allocationSize);
837 ASSERT(header->checkHeader()); 828 ASSERT(header->checkHeader());
838 829
839 // Poison the object header and allocationGranularity bytes after the object 830 // Poison the object header and allocationGranularity bytes after the object
840 ASAN_POISON_MEMORY_REGION(header, sizeof(*header)); 831 ASAN_POISON_MEMORY_REGION(header, sizeof(*header));
841 ASAN_POISON_MEMORY_REGION(largeObject->address() + largeObject->size(), allo cationGranularity); 832 ASAN_POISON_MEMORY_REGION(largeObject->address() + largeObject->size(), allo cationGranularity);
842 833
843 largeObject->link(&m_firstPage); 834 largeObject->link(&m_firstPage);
(...skipping 73 matching lines...) Expand 10 before | Expand all | Expand 10 after
917 // The free list entries are only pointer aligned (but when we allocate 908 // The free list entries are only pointer aligned (but when we allocate
918 // from them we are 8 byte aligned due to the header size). 909 // from them we are 8 byte aligned due to the header size).
919 ASSERT(!((reinterpret_cast<uintptr_t>(address) + sizeof(HeapObjectHeader)) & allocationMask)); 910 ASSERT(!((reinterpret_cast<uintptr_t>(address) + sizeof(HeapObjectHeader)) & allocationMask));
920 ASSERT(!(size & allocationMask)); 911 ASSERT(!(size & allocationMask));
921 ASAN_UNPOISON_MEMORY_REGION(address, size); 912 ASAN_UNPOISON_MEMORY_REGION(address, size);
922 FreeListEntry* entry; 913 FreeListEntry* entry;
923 if (size < sizeof(*entry)) { 914 if (size < sizeof(*entry)) {
924 // Create a dummy header with only a size and freelist bit set. 915 // Create a dummy header with only a size and freelist bit set.
925 ASSERT(size >= sizeof(HeapObjectHeader)); 916 ASSERT(size >= sizeof(HeapObjectHeader));
926 // Free list encode the size to mark the lost memory as freelist memory. 917 // Free list encode the size to mark the lost memory as freelist memory.
927 new (NotNull, address) HeapObjectHeader(size, gcInfoIndexForFreeListHead er); 918 new (NotNull, address) HeapObjectHeader(size, gcInfoIndexForFreeListHead er, gcGenerationForFreeListEntry);
928 919
929 ASAN_POISON_MEMORY_REGION(address, size); 920 ASAN_POISON_MEMORY_REGION(address, size);
930 // This memory gets lost. Sweeping can reclaim it. 921 // This memory gets lost. Sweeping can reclaim it.
931 return; 922 return;
932 } 923 }
933 entry = new (NotNull, address) FreeListEntry(size); 924 entry = new (NotNull, address) FreeListEntry(size);
934 925
935 #if ENABLE(ASSERT) || defined(LEAK_SANITIZER) || defined(ADDRESS_SANITIZER) 926 #if ENABLE(ASSERT) || defined(LEAK_SANITIZER) || defined(ADDRESS_SANITIZER)
936 // The following logic delays reusing free lists for (at least) one GC 927 // The following logic delays reusing free lists for (at least) one GC
937 // cycle or coalescing. This is helpful to detect use-after-free errors 928 // cycle or coalescing. This is helpful to detect use-after-free errors
(...skipping 371 matching lines...) Expand 10 before | Expand all | Expand 10 after
1309 objectStartNumber = (mapIndex * 8) + 7 - leadingZeroes; 1300 objectStartNumber = (mapIndex * 8) + 7 - leadingZeroes;
1310 objectOffset = objectStartNumber * allocationGranularity; 1301 objectOffset = objectStartNumber * allocationGranularity;
1311 Address objectAddress = objectOffset + payload(); 1302 Address objectAddress = objectOffset + payload();
1312 HeapObjectHeader* header = reinterpret_cast<HeapObjectHeader*>(objectAddress ); 1303 HeapObjectHeader* header = reinterpret_cast<HeapObjectHeader*>(objectAddress );
1313 if (header->isFree()) 1304 if (header->isFree())
1314 return nullptr; 1305 return nullptr;
1315 ASSERT(header->checkHeader()); 1306 ASSERT(header->checkHeader());
1316 return header; 1307 return header;
1317 } 1308 }
1318 1309
1310 HeapObjectHeader* NormalPage::findHeaderFromObject(const void* obj)
haraken 2015/11/25 02:38:54 I'm afraid that this method would be super heavy.
1311 {
1312 ASSERT(payload() <= obj && obj <= payloadEnd());
1313
1314 Address start = payload();
1315 for (Address headerAddress = start; headerAddress < payloadEnd();) {
1316 HeapObjectHeader* header = reinterpret_cast<HeapObjectHeader*>(headerAdd ress);
1317 Address nextHeaderAddress = headerAddress + header->size();
1318 if (header <= obj && obj < nextHeaderAddress)
1319 return header;
1320 headerAddress = nextHeaderAddress;
1321 ASSERT(headerAddress <= payloadEnd());
1322 }
1323
1324 ASSERT_NOT_REACHED();
1325 return nullptr;
1326 }
1327
1319 #if ENABLE(ASSERT) 1328 #if ENABLE(ASSERT)
1320 static bool isUninitializedMemory(void* objectPointer, size_t objectSize) 1329 static bool isUninitializedMemory(void* objectPointer, size_t objectSize)
1321 { 1330 {
1322 // Scan through the object's fields and check that they are all zero. 1331 // Scan through the object's fields and check that they are all zero.
1323 Address* objectFields = reinterpret_cast<Address*>(objectPointer); 1332 Address* objectFields = reinterpret_cast<Address*>(objectPointer);
1324 for (size_t i = 0; i < objectSize / sizeof(Address); ++i) { 1333 for (size_t i = 0; i < objectSize / sizeof(Address); ++i) {
1325 if (objectFields[i] != 0) 1334 if (objectFields[i] != 0)
1326 return false; 1335 return false;
1327 } 1336 }
1328 return true; 1337 return true;
(...skipping 255 matching lines...) Expand 10 before | Expand all | Expand 10 after
1584 1593
1585 m_hasEntries = true; 1594 m_hasEntries = true;
1586 size_t index = hash(address); 1595 size_t index = hash(address);
1587 ASSERT(!(index & 1)); 1596 ASSERT(!(index & 1));
1588 Address cachePage = roundToBlinkPageStart(address); 1597 Address cachePage = roundToBlinkPageStart(address);
1589 m_entries[index + 1] = m_entries[index]; 1598 m_entries[index + 1] = m_entries[index];
1590 m_entries[index] = cachePage; 1599 m_entries[index] = cachePage;
1591 } 1600 }
1592 1601
1593 } // namespace blink 1602 } // namespace blink
OLDNEW
« no previous file with comments | « third_party/WebKit/Source/platform/heap/HeapPage.h ('k') | third_party/WebKit/Source/platform/heap/ThreadState.h » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698