Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(43)

Side by Side Diff: Source/platform/heap/Heap.cpp

Issue 1176003002: Oilpan: Defer reusing freed memory for one GC cycle (Closed) Base URL: svn://svn.chromium.org/blink/trunk
Patch Set: Created 5 years, 5 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
« no previous file with comments | « Source/platform/heap/Heap.h ('k') | Source/platform/heap/HeapTest.cpp » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 /* 1 /*
2 * Copyright (C) 2013 Google Inc. All rights reserved. 2 * Copyright (C) 2013 Google Inc. All rights reserved.
3 * 3 *
4 * Redistribution and use in source and binary forms, with or without 4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions are 5 * modification, are permitted provided that the following conditions are
6 * met: 6 * met:
7 * 7 *
8 * * Redistributions of source code must retain the above copyright 8 * * Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer. 9 * notice, this list of conditions and the following disclaimer.
10 * * Redistributions in binary form must reproduce the above 10 * * Redistributions in binary form must reproduce the above
(...skipping 191 matching lines...) Expand 10 before | Expand all | Expand 10 after
202 void HeapObjectHeader::zapMagic() 202 void HeapObjectHeader::zapMagic()
203 { 203 {
204 ASSERT(checkHeader()); 204 ASSERT(checkHeader());
205 m_magic = zappedMagic; 205 m_magic = zappedMagic;
206 } 206 }
207 #endif 207 #endif
208 208
209 void HeapObjectHeader::finalize(Address object, size_t objectSize) 209 void HeapObjectHeader::finalize(Address object, size_t objectSize)
210 { 210 {
211 const GCInfo* gcInfo = Heap::gcInfo(gcInfoIndex()); 211 const GCInfo* gcInfo = Heap::gcInfo(gcInfoIndex());
212 if (gcInfo->hasFinalizer()) { 212 if (gcInfo->hasFinalizer())
213 gcInfo->m_finalize(object); 213 gcInfo->m_finalize(object);
214 }
215 214
216 ASAN_RETIRE_CONTAINER_ANNOTATION(object, objectSize); 215 ASAN_RETIRE_CONTAINER_ANNOTATION(object, objectSize);
217
218 #if ENABLE(ASSERT) || defined(LEAK_SANITIZER) || defined(ADDRESS_SANITIZER)
219 // In Debug builds, memory is zapped when it's freed, and the zapped memory
220 // is zeroed out when the memory is reused. Memory is also zapped when
221 // using Leak Sanitizer because the heap is used as a root region for LSan
222 // and therefore pointers in unreachable memory could hide leaks.
223 for (size_t i = 0; i < objectSize; ++i)
224 object[i] = finalizedZapValue;
225
226 // Zap the primary vTable entry (secondary vTable entries are not zapped).
227 if (gcInfo->hasVTable()) {
228 *(reinterpret_cast<uintptr_t*>(object)) = zappedVTable;
229 }
230 #endif
231 // In Release builds, the entire object is zeroed out when it is added to
232 // the free list. This happens right after sweeping the page and before the
233 // thread commences execution.
234 } 216 }
235 217
236 BaseHeap::BaseHeap(ThreadState* state, int index) 218 BaseHeap::BaseHeap(ThreadState* state, int index)
237 : m_firstPage(nullptr) 219 : m_firstPage(nullptr)
238 , m_firstUnsweptPage(nullptr) 220 , m_firstUnsweptPage(nullptr)
239 , m_threadState(state) 221 , m_threadState(state)
240 , m_index(index) 222 , m_index(index)
241 { 223 {
242 } 224 }
243 225
(...skipping 375 matching lines...) Expand 10 before | Expand all | Expand 10 after
619 json.pushInteger(bucketStats[i].entryCount); 601 json.pushInteger(bucketStats[i].entryCount);
620 json.endArray(); 602 json.endArray();
621 603
622 json.beginArray("perBucketFreeSize"); 604 json.beginArray("perBucketFreeSize");
623 for (size_t i = 0; i < blinkPageSizeLog2; ++i) 605 for (size_t i = 0; i < blinkPageSizeLog2; ++i)
624 json.pushInteger(bucketStats[i].freeSize); 606 json.pushInteger(bucketStats[i].freeSize);
625 json.endArray(); 607 json.endArray();
626 } 608 }
627 #endif 609 #endif
628 610
611 NO_SANITIZE_ADDRESS
629 void NormalPageHeap::allocatePage() 612 void NormalPageHeap::allocatePage()
630 { 613 {
631 threadState()->shouldFlushHeapDoesNotContainCache(); 614 threadState()->shouldFlushHeapDoesNotContainCache();
632 PageMemory* pageMemory = Heap::freePagePool()->takeFreePage(heapIndex()); 615 PageMemory* pageMemory = Heap::freePagePool()->takeFreePage(heapIndex());
633 // We continue allocating page memory until we succeed in committing one. 616 // We continue allocating page memory until we succeed in committing one.
634 while (!pageMemory) { 617 while (!pageMemory) {
635 // Allocate a memory region for blinkPagesPerRegion pages that 618 // Allocate a memory region for blinkPagesPerRegion pages that
636 // will each have the following layout. 619 // will each have the following layout.
637 // 620 //
638 // [ guard os page | ... payload ... | guard os page ] 621 // [ guard os page | ... payload ... | guard os page ]
(...skipping 15 matching lines...) Expand all
654 } else { 637 } else {
655 Heap::freePagePool()->addFreePage(heapIndex(), memory); 638 Heap::freePagePool()->addFreePage(heapIndex(), memory);
656 } 639 }
657 offset += blinkPageSize; 640 offset += blinkPageSize;
658 } 641 }
659 } 642 }
660 NormalPage* page = new (pageMemory->writableStart()) NormalPage(pageMemory, this); 643 NormalPage* page = new (pageMemory->writableStart()) NormalPage(pageMemory, this);
661 page->link(&m_firstPage); 644 page->link(&m_firstPage);
662 645
663 Heap::increaseAllocatedSpace(page->size()); 646 Heap::increaseAllocatedSpace(page->size());
647 #if ENABLE(ASSERT) || defined(LEAK_SANITIZER) || defined(ADDRESS_SANITIZER)
648 // Allow the following addToFreeList() to add the newly allocated memory
649 // to the free list.
650 Address address = page->payload();
651 for (size_t i = 0; i < page->payloadSize(); i++)
652 address[i] = reuseAllowedZapValue;
653 #endif
664 addToFreeList(page->payload(), page->payloadSize()); 654 addToFreeList(page->payload(), page->payloadSize());
665 } 655 }
666 656
667 void NormalPageHeap::freePage(NormalPage* page) 657 void NormalPageHeap::freePage(NormalPage* page)
668 { 658 {
669 Heap::decreaseAllocatedSpace(page->size()); 659 Heap::decreaseAllocatedSpace(page->size());
670 660
671 if (page->terminating()) { 661 if (page->terminating()) {
672 // The thread is shutting down and this page is being removed as a part 662 // The thread is shutting down and this page is being removed as a part
673 // of the thread local GC. In that case the object could be traced in 663 // of the thread local GC. In that case the object could be traced in
(...skipping 425 matching lines...) Expand 10 before | Expand all | Expand 10 after
1099 { 1089 {
1100 } 1090 }
1101 1091
1102 void FreeList::addToFreeList(Address address, size_t size) 1092 void FreeList::addToFreeList(Address address, size_t size)
1103 { 1093 {
1104 ASSERT(size < blinkPagePayloadSize()); 1094 ASSERT(size < blinkPagePayloadSize());
1105 // The free list entries are only pointer aligned (but when we allocate 1095 // The free list entries are only pointer aligned (but when we allocate
1106 // from them we are 8 byte aligned due to the header size). 1096 // from them we are 8 byte aligned due to the header size).
1107 ASSERT(!((reinterpret_cast<uintptr_t>(address) + sizeof(HeapObjectHeader)) & allocationMask)); 1097 ASSERT(!((reinterpret_cast<uintptr_t>(address) + sizeof(HeapObjectHeader)) & allocationMask));
1108 ASSERT(!(size & allocationMask)); 1098 ASSERT(!(size & allocationMask));
1109 ASAN_POISON_MEMORY_REGION(address, size); 1099 ASAN_UNPOISON_MEMORY_REGION(address, size);
1110 FreeListEntry* entry; 1100 FreeListEntry* entry;
1111 if (size < sizeof(*entry)) { 1101 if (size < sizeof(*entry)) {
1112 // Create a dummy header with only a size and freelist bit set. 1102 // Create a dummy header with only a size and freelist bit set.
1113 ASSERT(size >= sizeof(HeapObjectHeader)); 1103 ASSERT(size >= sizeof(HeapObjectHeader));
1114 // Free list encode the size to mark the lost memory as freelist memory. 1104 // Free list encode the size to mark the lost memory as freelist memory.
1115 new (NotNull, address) HeapObjectHeader(size, gcInfoIndexForFreeListHead er); 1105 new (NotNull, address) HeapObjectHeader(size, gcInfoIndexForFreeListHead er);
1106
1107 ASAN_POISON_MEMORY_REGION(address, size);
1116 // This memory gets lost. Sweeping can reclaim it. 1108 // This memory gets lost. Sweeping can reclaim it.
1117 return; 1109 return;
1118 } 1110 }
1119 entry = new (NotNull, address) FreeListEntry(size); 1111 entry = new (NotNull, address) FreeListEntry(size);
1120 #if defined(ADDRESS_SANITIZER) 1112
1121 BasePage* page = pageFromObject(address); 1113 #if ENABLE(ASSERT) || defined(LEAK_SANITIZER) || defined(ADDRESS_SANITIZER)
1122 ASSERT(!page->isLargeObjectPage()); 1114 // The following logic delays reusing free lists for (at least) one GC
1123 // For ASan we don't add the entry to the free lists until the 1115 // cycle or coalescing. This is helpful to detect use-after-free errors
1124 // asanDeferMemoryReuseCount reaches zero. However we always add entire 1116 // that could be caused by lazy sweeping etc.
1125 // pages to ensure that adding a new page will increase the allocation 1117 size_t allowedCount = 0;
1126 // space. 1118 size_t forbiddenCount = 0;
1127 if (static_cast<NormalPage*>(page)->payloadSize() != size && !entry->shouldA ddToFreeList()) 1119 for (size_t i = sizeof(FreeListEntry); i < size; i++) {
1120 if (address[i] == reuseAllowedZapValue) {
1121 allowedCount++;
1122 } else if (address[i] == reuseForbiddenZapValue) {
1123 forbiddenCount++;
1124 } else {
1125 ASSERT_NOT_REACHED();
1126 }
1127 }
1128 size_t entryCount = size - sizeof(FreeListEntry);
1129 if (forbiddenCount == entryCount) {
1130 // If all values in the memory region are reuseForbiddenZapValue,
1131 // we flip them to reuseAllowedZapValue. This allows the next
1132 // addToFreeList() to add the memory region to the free list
1133 // (unless someone concatenates the memory region with another memory
1134 // region that contains reuseForbiddenZapValue.)
1135 for (size_t i = sizeof(FreeListEntry); i < size; i++)
1136 address[i] = reuseAllowedZapValue;
1137 ASAN_POISON_MEMORY_REGION(address, size);
1138 // Don't add the memory region to the free list in this addToFreeList().
1128 return; 1139 return;
1140 }
1141 if (allowedCount != entryCount) {
1142 // If the memory region mixes reuseForbiddenZapValue and
1143 // reuseAllowedZapValue, we (conservatively) flip all the values
1144 // to reuseForbiddenZapValue. These values will be changed to
1145 // reuseAllowedZapValue in the next addToFreeList().
1146 for (size_t i = sizeof(FreeListEntry); i < size; i++)
1147 address[i] = reuseForbiddenZapValue;
1148 ASAN_POISON_MEMORY_REGION(address, size);
1149 // Don't add the memory region to the free list in this addToFreeList().
1150 return;
1151 }
1152 // We reach here only when all the values in the memory region are
1153 // reuseAllowedZapValue. In this case, we are allowed to add the memory
1154 // region to the free list and reuse it for another object.
1129 #endif 1155 #endif
1156 ASAN_POISON_MEMORY_REGION(address, size);
1157
1130 int index = bucketIndexForSize(size); 1158 int index = bucketIndexForSize(size);
1131 entry->link(&m_freeLists[index]); 1159 entry->link(&m_freeLists[index]);
1132 if (index > m_biggestFreeListIndex) 1160 if (index > m_biggestFreeListIndex)
1133 m_biggestFreeListIndex = index; 1161 m_biggestFreeListIndex = index;
1134 } 1162 }
1135 1163
1164 #if ENABLE(ASSERT) || defined(LEAK_SANITIZER) || defined(ADDRESS_SANITIZER)
1165 NO_SANITIZE_ADDRESS
1166 void NEVER_INLINE FreeList::zapFreedMemory(Address address, size_t size)
1167 {
1168 for (size_t i = 0; i < size; i++) {
1169 // See the comment in addToFreeList().
1170 if (address[i] != reuseAllowedZapValue)
1171 address[i] = reuseForbiddenZapValue;
1172 }
1173 }
1174 #endif
1175
1136 void FreeList::clear() 1176 void FreeList::clear()
1137 { 1177 {
1138 m_biggestFreeListIndex = 0; 1178 m_biggestFreeListIndex = 0;
1139 for (size_t i = 0; i < blinkPageSizeLog2; ++i) 1179 for (size_t i = 0; i < blinkPageSizeLog2; ++i)
1140 m_freeLists[i] = nullptr; 1180 m_freeLists[i] = nullptr;
1141 } 1181 }
1142 1182
1143 int FreeList::bucketIndexForSize(size_t size) 1183 int FreeList::bucketIndexForSize(size_t size)
1144 { 1184 {
1145 ASSERT(size > 0); 1185 ASSERT(size > 0);
(...skipping 1313 matching lines...) Expand 10 before | Expand all | Expand 10 after
2459 size_t Heap::s_allocatedObjectSize = 0; 2499 size_t Heap::s_allocatedObjectSize = 0;
2460 size_t Heap::s_allocatedSpace = 0; 2500 size_t Heap::s_allocatedSpace = 0;
2461 size_t Heap::s_markedObjectSize = 0; 2501 size_t Heap::s_markedObjectSize = 0;
2462 // We don't want to use 0 KB for the initial value because it may end up 2502 // We don't want to use 0 KB for the initial value because it may end up
2463 // triggering the first GC of some thread too prematurely. 2503 // triggering the first GC of some thread too prematurely.
2464 size_t Heap::s_estimatedLiveObjectSize = 512 * 1024; 2504 size_t Heap::s_estimatedLiveObjectSize = 512 * 1024;
2465 size_t Heap::s_externalObjectSizeAtLastGC = 0; 2505 size_t Heap::s_externalObjectSizeAtLastGC = 0;
2466 double Heap::s_estimatedMarkingTimePerByte = 0.0; 2506 double Heap::s_estimatedMarkingTimePerByte = 0.0;
2467 2507
2468 } // namespace blink 2508 } // namespace blink
OLDNEW
« no previous file with comments | « Source/platform/heap/Heap.h ('k') | Source/platform/heap/HeapTest.cpp » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698