Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(114)

Side by Side Diff: Source/platform/heap/Heap.cpp

Issue 1176003002: Oilpan: Defer reusing freed memory for one GC cycle (Closed) Base URL: svn://svn.chromium.org/blink/trunk
Patch Set: Created 5 years, 6 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
« no previous file with comments | « Source/platform/heap/Heap.h ('k') | Source/platform/heap/HeapTest.cpp » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 /* 1 /*
2 * Copyright (C) 2013 Google Inc. All rights reserved. 2 * Copyright (C) 2013 Google Inc. All rights reserved.
3 * 3 *
4 * Redistribution and use in source and binary forms, with or without 4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions are 5 * modification, are permitted provided that the following conditions are
6 * met: 6 * met:
7 * 7 *
8 * * Redistributions of source code must retain the above copyright 8 * * Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer. 9 * notice, this list of conditions and the following disclaimer.
10 * * Redistributions in binary form must reproduce the above 10 * * Redistributions in binary form must reproduce the above
(...skipping 188 matching lines...) Expand 10 before | Expand all | Expand 10 after
199 void HeapObjectHeader::zapMagic() 199 void HeapObjectHeader::zapMagic()
200 { 200 {
201 checkHeader(); 201 checkHeader();
202 m_magic = zappedMagic; 202 m_magic = zappedMagic;
203 } 203 }
204 #endif 204 #endif
205 205
206 void HeapObjectHeader::finalize(Address object, size_t objectSize) 206 void HeapObjectHeader::finalize(Address object, size_t objectSize)
207 { 207 {
208 const GCInfo* gcInfo = Heap::gcInfo(gcInfoIndex()); 208 const GCInfo* gcInfo = Heap::gcInfo(gcInfoIndex());
209 if (gcInfo->hasFinalizer()) { 209 if (gcInfo->hasFinalizer())
210 gcInfo->m_finalize(object); 210 gcInfo->m_finalize(object);
211 }
212 211
213 ASAN_RETIRE_CONTAINER_ANNOTATION(object, objectSize); 212 ASAN_RETIRE_CONTAINER_ANNOTATION(object, objectSize);
214
215 #if ENABLE(ASSERT) || defined(LEAK_SANITIZER) || defined(ADDRESS_SANITIZER)
216 // In Debug builds, memory is zapped when it's freed, and the zapped memory
217 // is zeroed out when the memory is reused. Memory is also zapped when
218 // using Leak Sanitizer because the heap is used as a root region for LSan
219 // and therefore pointers in unreachable memory could hide leaks.
220 for (size_t i = 0; i < objectSize; ++i)
221 object[i] = finalizedZapValue;
222
223 // Zap the primary vTable entry (secondary vTable entries are not zapped).
224 if (gcInfo->hasVTable()) {
225 *(reinterpret_cast<uintptr_t*>(object)) = zappedVTable;
226 }
227 #endif
228 // In Release builds, the entire object is zeroed out when it is added to
229 // the free list. This happens right after sweeping the page and before the
230 // thread commences execution.
231 } 213 }
232 214
233 BaseHeap::BaseHeap(ThreadState* state, int index) 215 BaseHeap::BaseHeap(ThreadState* state, int index)
234 : m_firstPage(nullptr) 216 : m_firstPage(nullptr)
235 , m_firstUnsweptPage(nullptr) 217 , m_firstUnsweptPage(nullptr)
236 , m_threadState(state) 218 , m_threadState(state)
237 , m_index(index) 219 , m_index(index)
238 { 220 {
239 } 221 }
240 222
(...skipping 334 matching lines...) Expand 10 before | Expand all | Expand 10 after
575 json.pushInteger(bucketStats[i].entryCount); 557 json.pushInteger(bucketStats[i].entryCount);
576 json.endArray(); 558 json.endArray();
577 559
578 json.beginArray("perBucketFreeSize"); 560 json.beginArray("perBucketFreeSize");
579 for (size_t i = 0; i < blinkPageSizeLog2; ++i) 561 for (size_t i = 0; i < blinkPageSizeLog2; ++i)
580 json.pushInteger(bucketStats[i].freeSize); 562 json.pushInteger(bucketStats[i].freeSize);
581 json.endArray(); 563 json.endArray();
582 } 564 }
583 #endif 565 #endif
584 566
567 NO_SANITIZE_ADDRESS
585 void NormalPageHeap::allocatePage() 568 void NormalPageHeap::allocatePage()
586 { 569 {
587 threadState()->shouldFlushHeapDoesNotContainCache(); 570 threadState()->shouldFlushHeapDoesNotContainCache();
588 PageMemory* pageMemory = Heap::freePagePool()->takeFreePage(heapIndex()); 571 PageMemory* pageMemory = Heap::freePagePool()->takeFreePage(heapIndex());
589 // We continue allocating page memory until we succeed in committing one. 572 // We continue allocating page memory until we succeed in committing one.
590 while (!pageMemory) { 573 while (!pageMemory) {
591 // Allocate a memory region for blinkPagesPerRegion pages that 574 // Allocate a memory region for blinkPagesPerRegion pages that
592 // will each have the following layout. 575 // will each have the following layout.
593 // 576 //
594 // [ guard os page | ... payload ... | guard os page ] 577 // [ guard os page | ... payload ... | guard os page ]
(...skipping 15 matching lines...) Expand all
610 } else { 593 } else {
611 Heap::freePagePool()->addFreePage(heapIndex(), memory); 594 Heap::freePagePool()->addFreePage(heapIndex(), memory);
612 } 595 }
613 offset += blinkPageSize; 596 offset += blinkPageSize;
614 } 597 }
615 } 598 }
616 NormalPage* page = new (pageMemory->writableStart()) NormalPage(pageMemory, this); 599 NormalPage* page = new (pageMemory->writableStart()) NormalPage(pageMemory, this);
617 page->link(&m_firstPage); 600 page->link(&m_firstPage);
618 601
619 Heap::increaseAllocatedSpace(page->size()); 602 Heap::increaseAllocatedSpace(page->size());
603 #if ENABLE(ASSERT) || defined(LEAK_SANITIZER) || defined(ADDRESS_SANITIZER)
604 // Allow the following addToFreeList() to add the newly allocated memory
605 // to the free list.
606 Address address = page->payload();
607 for (size_t i = 0; i < page->payloadSize(); i++)
608 address[i] = reuseAllowedZapValue;
609 #endif
620 addToFreeList(page->payload(), page->payloadSize()); 610 addToFreeList(page->payload(), page->payloadSize());
621 } 611 }
622 612
623 void NormalPageHeap::freePage(NormalPage* page) 613 void NormalPageHeap::freePage(NormalPage* page)
624 { 614 {
625 Heap::decreaseAllocatedSpace(page->size()); 615 Heap::decreaseAllocatedSpace(page->size());
626 616
627 if (page->terminating()) { 617 if (page->terminating()) {
628 // The thread is shutting down and this page is being removed as a part 618 // The thread is shutting down and this page is being removed as a part
629 // of the thread local GC. In that case the object could be traced in 619 // of the thread local GC. In that case the object could be traced in
(...skipping 418 matching lines...) Expand 10 before | Expand all | Expand 10 after
1048 } 1038 }
1049 } 1039 }
1050 return result; 1040 return result;
1051 } 1041 }
1052 1042
1053 FreeList::FreeList() 1043 FreeList::FreeList()
1054 : m_biggestFreeListIndex(0) 1044 : m_biggestFreeListIndex(0)
1055 { 1045 {
1056 } 1046 }
1057 1047
1048 NO_SANITIZE_ADDRESS
1058 void FreeList::addToFreeList(Address address, size_t size) 1049 void FreeList::addToFreeList(Address address, size_t size)
1059 { 1050 {
1060 ASSERT(size < blinkPagePayloadSize()); 1051 ASSERT(size < blinkPagePayloadSize());
1061 // The free list entries are only pointer aligned (but when we allocate 1052 // The free list entries are only pointer aligned (but when we allocate
1062 // from them we are 8 byte aligned due to the header size). 1053 // from them we are 8 byte aligned due to the header size).
1063 ASSERT(!((reinterpret_cast<uintptr_t>(address) + sizeof(HeapObjectHeader)) & allocationMask)); 1054 ASSERT(!((reinterpret_cast<uintptr_t>(address) + sizeof(HeapObjectHeader)) & allocationMask));
1064 ASSERT(!(size & allocationMask)); 1055 ASSERT(!(size & allocationMask));
1065 ASAN_POISON_MEMORY_REGION(address, size); 1056 ASAN_POISON_MEMORY_REGION(address, size);
haraken 2015/06/17 21:11:43 If we delay this ASAN_POISON_MEMORY_REGION, we nee
1066 FreeListEntry* entry; 1057 FreeListEntry* entry;
1067 if (size < sizeof(*entry)) { 1058 if (size < sizeof(*entry)) {
1068 // Create a dummy header with only a size and freelist bit set. 1059 // Create a dummy header with only a size and freelist bit set.
1069 ASSERT(size >= sizeof(HeapObjectHeader)); 1060 ASSERT(size >= sizeof(HeapObjectHeader));
1070 // Free list encode the size to mark the lost memory as freelist memory. 1061 // Free list encode the size to mark the lost memory as freelist memory.
1071 new (NotNull, address) HeapObjectHeader(size, gcInfoIndexForFreeListHead er); 1062 new (NotNull, address) HeapObjectHeader(size, gcInfoIndexForFreeListHead er);
1072 // This memory gets lost. Sweeping can reclaim it. 1063 // This memory gets lost. Sweeping can reclaim it.
1073 return; 1064 return;
1074 } 1065 }
1075 entry = new (NotNull, address) FreeListEntry(size); 1066 entry = new (NotNull, address) FreeListEntry(size);
1076 #if defined(ADDRESS_SANITIZER) 1067
1077 BasePage* page = pageFromObject(address); 1068 #if ENABLE(ASSERT) || defined(LEAK_SANITIZER) || defined(ADDRESS_SANITIZER)
1078 ASSERT(!page->isLargeObjectPage()); 1069 // The following logic delays reusing free lists for (at least) one GC
1079 // For ASan we don't add the entry to the free lists until the 1070 // cycle or coalescing. This is helpful to detect use-after-free errors
1080 // asanDeferMemoryReuseCount reaches zero. However we always add entire 1071 // that could be caused by lazy sweeping etc.
1081 // pages to ensure that adding a new page will increase the allocation 1072 size_t allowedCount = 0;
1082 // space. 1073 size_t forbiddenCount = 0;
1083 if (static_cast<NormalPage*>(page)->payloadSize() != size && !entry->shouldA ddToFreeList()) 1074 for (size_t i = sizeof(FreeListEntry); i < size; i++) {
1075 if (address[i] == reuseAllowedZapValue) {
1076 allowedCount++;
1077 } else if (address[i] == reuseForbiddenZapValue) {
1078 forbiddenCount++;
1079 } else {
1080 ASSERT_NOT_REACHED();
1081 }
1082 }
1083 size_t entryCount = size - sizeof(FreeListEntry);
1084 if (forbiddenCount == entryCount) {
1085 // If all values in the memory region are reuseForbiddenZapValue,
1086 // we flip them to reuseAllowedZapValue. This allows the next
1087 // addToFreeList() to add the memory region to the free list
1088 // (unless someone concatenates the memory region with another memory
1089 // region that contains reuseForbiddenZapValue.)
1090 for (size_t i = sizeof(FreeListEntry); i < size; i++)
1091 address[i] = reuseAllowedZapValue;
1092 // Don't add the memory region to the free list in this addToFreeList().
1084 return; 1093 return;
1094 }
1095 if (allowedCount != entryCount) {
1096 // If the memory region mixes reuseForbiddenZapValue and
1097 // reuseAllowedZapValue, we (conservatively) flip all the values
1098 // to reuseForbiddenZapValue. These values will be changed to
1099 // reuseAllowedZapValue in the next addToFreeList().
1100 for (size_t i = sizeof(FreeListEntry); i < size; i++)
1101 address[i] = reuseForbiddenZapValue;
haraken 2015/06/22 11:27:39 It seems that this line is causing use-after-poiso
1102 // Don't add the memory region to the free list in this addToFreeList().
1103 return;
1104 }
1105 // We reach here only when all the values in the memory region are
1106 // reuseAllowedZapValue. In this case, we are allowed to add the memory
1107 // region to the free list and reuse it for another object.
1085 #endif 1108 #endif
1109
1086 int index = bucketIndexForSize(size); 1110 int index = bucketIndexForSize(size);
1087 entry->link(&m_freeLists[index]); 1111 entry->link(&m_freeLists[index]);
1088 if (index > m_biggestFreeListIndex) 1112 if (index > m_biggestFreeListIndex)
1089 m_biggestFreeListIndex = index; 1113 m_biggestFreeListIndex = index;
1090 } 1114 }
1091 1115
1116 #if ENABLE(ASSERT) || defined(LEAK_SANITIZER) || defined(ADDRESS_SANITIZER)
1117 NO_SANITIZE_ADDRESS
1118 void FreeList::zapFreedMemory(Address address, size_t size)
1119 {
1120 for (size_t i = 0; i < size; i++) {
1121 // See the comment in addToFreeList().
1122 if (address[i] != reuseAllowedZapValue)
1123 address[i] = reuseForbiddenZapValue;
1124 }
1125 }
1126 #endif
1127
1092 void FreeList::clear() 1128 void FreeList::clear()
1093 { 1129 {
1094 m_biggestFreeListIndex = 0; 1130 m_biggestFreeListIndex = 0;
1095 for (size_t i = 0; i < blinkPageSizeLog2; ++i) 1131 for (size_t i = 0; i < blinkPageSizeLog2; ++i)
1096 m_freeLists[i] = nullptr; 1132 m_freeLists[i] = nullptr;
1097 } 1133 }
1098 1134
1099 int FreeList::bucketIndexForSize(size_t size) 1135 int FreeList::bucketIndexForSize(size_t size)
1100 { 1136 {
1101 ASSERT(size > 0); 1137 ASSERT(size > 0);
(...skipping 1220 matching lines...) Expand 10 before | Expand all | Expand 10 after
2322 size_t Heap::s_allocatedObjectSize = 0; 2358 size_t Heap::s_allocatedObjectSize = 0;
2323 size_t Heap::s_allocatedSpace = 0; 2359 size_t Heap::s_allocatedSpace = 0;
2324 size_t Heap::s_markedObjectSize = 0; 2360 size_t Heap::s_markedObjectSize = 0;
2325 // We don't want to use 0 KB for the initial value because it may end up 2361 // We don't want to use 0 KB for the initial value because it may end up
2326 // triggering the first GC of some thread too prematurely. 2362 // triggering the first GC of some thread too prematurely.
2327 size_t Heap::s_estimatedLiveObjectSize = 512 * 1024; 2363 size_t Heap::s_estimatedLiveObjectSize = 512 * 1024;
2328 size_t Heap::s_externalObjectSizeAtLastGC = 0; 2364 size_t Heap::s_externalObjectSizeAtLastGC = 0;
2329 double Heap::s_estimatedMarkingTimePerByte = 0.0; 2365 double Heap::s_estimatedMarkingTimePerByte = 0.0;
2330 2366
2331 } // namespace blink 2367 } // namespace blink
OLDNEW
« no previous file with comments | « Source/platform/heap/Heap.h ('k') | Source/platform/heap/HeapTest.cpp » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698