Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(3)

Side by Side Diff: trunk/Source/platform/heap/Heap.cpp

Issue 1190093003: Revert 197289 "Oilpan: Defer reusing freed memory for one GC cycle" (Closed) Base URL: svn://svn.chromium.org/blink/
Patch Set: Created 5 years, 6 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
« no previous file with comments | « trunk/Source/platform/heap/Heap.h ('k') | trunk/Source/platform/heap/HeapTest.cpp » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 /* 1 /*
2 * Copyright (C) 2013 Google Inc. All rights reserved. 2 * Copyright (C) 2013 Google Inc. All rights reserved.
3 * 3 *
4 * Redistribution and use in source and binary forms, with or without 4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions are 5 * modification, are permitted provided that the following conditions are
6 * met: 6 * met:
7 * 7 *
8 * * Redistributions of source code must retain the above copyright 8 * * Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer. 9 * notice, this list of conditions and the following disclaimer.
10 * * Redistributions in binary form must reproduce the above 10 * * Redistributions in binary form must reproduce the above
(...skipping 188 matching lines...) Expand 10 before | Expand all | Expand 10 after
199 void HeapObjectHeader::zapMagic() 199 void HeapObjectHeader::zapMagic()
200 { 200 {
201 checkHeader(); 201 checkHeader();
202 m_magic = zappedMagic; 202 m_magic = zappedMagic;
203 } 203 }
204 #endif 204 #endif
205 205
206 void HeapObjectHeader::finalize(Address object, size_t objectSize) 206 void HeapObjectHeader::finalize(Address object, size_t objectSize)
207 { 207 {
208 const GCInfo* gcInfo = Heap::gcInfo(gcInfoIndex()); 208 const GCInfo* gcInfo = Heap::gcInfo(gcInfoIndex());
209 if (gcInfo->hasFinalizer()) 209 if (gcInfo->hasFinalizer()) {
210 gcInfo->m_finalize(object); 210 gcInfo->m_finalize(object);
211 }
211 212
212 ASAN_RETIRE_CONTAINER_ANNOTATION(object, objectSize); 213 ASAN_RETIRE_CONTAINER_ANNOTATION(object, objectSize);
214
215 #if ENABLE(ASSERT) || defined(LEAK_SANITIZER) || defined(ADDRESS_SANITIZER)
216 // In Debug builds, memory is zapped when it's freed, and the zapped memory
217 // is zeroed out when the memory is reused. Memory is also zapped when
218 // using Leak Sanitizer because the heap is used as a root region for LSan
219 // and therefore pointers in unreachable memory could hide leaks.
220 for (size_t i = 0; i < objectSize; ++i)
221 object[i] = finalizedZapValue;
222
223 // Zap the primary vTable entry (secondary vTable entries are not zapped).
224 if (gcInfo->hasVTable()) {
225 *(reinterpret_cast<uintptr_t*>(object)) = zappedVTable;
226 }
227 #endif
228 // In Release builds, the entire object is zeroed out when it is added to
229 // the free list. This happens right after sweeping the page and before the
230 // thread commences execution.
213 } 231 }
214 232
215 BaseHeap::BaseHeap(ThreadState* state, int index) 233 BaseHeap::BaseHeap(ThreadState* state, int index)
216 : m_firstPage(nullptr) 234 : m_firstPage(nullptr)
217 , m_firstUnsweptPage(nullptr) 235 , m_firstUnsweptPage(nullptr)
218 , m_threadState(state) 236 , m_threadState(state)
219 , m_index(index) 237 , m_index(index)
220 { 238 {
221 } 239 }
222 240
(...skipping 334 matching lines...) Expand 10 before | Expand all | Expand 10 after
557 json.pushInteger(bucketStats[i].entryCount); 575 json.pushInteger(bucketStats[i].entryCount);
558 json.endArray(); 576 json.endArray();
559 577
560 json.beginArray("perBucketFreeSize"); 578 json.beginArray("perBucketFreeSize");
561 for (size_t i = 0; i < blinkPageSizeLog2; ++i) 579 for (size_t i = 0; i < blinkPageSizeLog2; ++i)
562 json.pushInteger(bucketStats[i].freeSize); 580 json.pushInteger(bucketStats[i].freeSize);
563 json.endArray(); 581 json.endArray();
564 } 582 }
565 #endif 583 #endif
566 584
567 NO_SANITIZE_ADDRESS
568 void NormalPageHeap::allocatePage() 585 void NormalPageHeap::allocatePage()
569 { 586 {
570 threadState()->shouldFlushHeapDoesNotContainCache(); 587 threadState()->shouldFlushHeapDoesNotContainCache();
571 PageMemory* pageMemory = Heap::freePagePool()->takeFreePage(heapIndex()); 588 PageMemory* pageMemory = Heap::freePagePool()->takeFreePage(heapIndex());
572 // We continue allocating page memory until we succeed in committing one. 589 // We continue allocating page memory until we succeed in committing one.
573 while (!pageMemory) { 590 while (!pageMemory) {
574 // Allocate a memory region for blinkPagesPerRegion pages that 591 // Allocate a memory region for blinkPagesPerRegion pages that
575 // will each have the following layout. 592 // will each have the following layout.
576 // 593 //
577 // [ guard os page | ... payload ... | guard os page ] 594 // [ guard os page | ... payload ... | guard os page ]
(...skipping 15 matching lines...) Expand all
593 } else { 610 } else {
594 Heap::freePagePool()->addFreePage(heapIndex(), memory); 611 Heap::freePagePool()->addFreePage(heapIndex(), memory);
595 } 612 }
596 offset += blinkPageSize; 613 offset += blinkPageSize;
597 } 614 }
598 } 615 }
599 NormalPage* page = new (pageMemory->writableStart()) NormalPage(pageMemory, this); 616 NormalPage* page = new (pageMemory->writableStart()) NormalPage(pageMemory, this);
600 page->link(&m_firstPage); 617 page->link(&m_firstPage);
601 618
602 Heap::increaseAllocatedSpace(page->size()); 619 Heap::increaseAllocatedSpace(page->size());
603 #if ENABLE(ASSERT) || defined(LEAK_SANITIZER) || defined(ADDRESS_SANITIZER)
604 // Allow the following addToFreeList() to add the newly allocated memory
605 // to the free list.
606 Address address = page->payload();
607 for (size_t i = 0; i < page->payloadSize(); i++)
608 address[i] = reuseAllowedZapValue;
609 #endif
610 addToFreeList(page->payload(), page->payloadSize()); 620 addToFreeList(page->payload(), page->payloadSize());
611 } 621 }
612 622
613 void NormalPageHeap::freePage(NormalPage* page) 623 void NormalPageHeap::freePage(NormalPage* page)
614 { 624 {
615 Heap::decreaseAllocatedSpace(page->size()); 625 Heap::decreaseAllocatedSpace(page->size());
616 626
617 if (page->terminating()) { 627 if (page->terminating()) {
618 // The thread is shutting down and this page is being removed as a part 628 // The thread is shutting down and this page is being removed as a part
619 // of the thread local GC. In that case the object could be traced in 629 // of the thread local GC. In that case the object could be traced in
(...skipping 418 matching lines...) Expand 10 before | Expand all | Expand 10 after
1038 } 1048 }
1039 } 1049 }
1040 return result; 1050 return result;
1041 } 1051 }
1042 1052
1043 FreeList::FreeList() 1053 FreeList::FreeList()
1044 : m_biggestFreeListIndex(0) 1054 : m_biggestFreeListIndex(0)
1045 { 1055 {
1046 } 1056 }
1047 1057
1048 NO_SANITIZE_ADDRESS
1049 void FreeList::addToFreeList(Address address, size_t size) 1058 void FreeList::addToFreeList(Address address, size_t size)
1050 { 1059 {
1051 ASSERT(size < blinkPagePayloadSize()); 1060 ASSERT(size < blinkPagePayloadSize());
1052 // The free list entries are only pointer aligned (but when we allocate 1061 // The free list entries are only pointer aligned (but when we allocate
1053 // from them we are 8 byte aligned due to the header size). 1062 // from them we are 8 byte aligned due to the header size).
1054 ASSERT(!((reinterpret_cast<uintptr_t>(address) + sizeof(HeapObjectHeader)) & allocationMask)); 1063 ASSERT(!((reinterpret_cast<uintptr_t>(address) + sizeof(HeapObjectHeader)) & allocationMask));
1055 ASSERT(!(size & allocationMask)); 1064 ASSERT(!(size & allocationMask));
1056 ASAN_POISON_MEMORY_REGION(address, size); 1065 ASAN_POISON_MEMORY_REGION(address, size);
1057 FreeListEntry* entry; 1066 FreeListEntry* entry;
1058 if (size < sizeof(*entry)) { 1067 if (size < sizeof(*entry)) {
1059 // Create a dummy header with only a size and freelist bit set. 1068 // Create a dummy header with only a size and freelist bit set.
1060 ASSERT(size >= sizeof(HeapObjectHeader)); 1069 ASSERT(size >= sizeof(HeapObjectHeader));
1061 // Free list encode the size to mark the lost memory as freelist memory. 1070 // Free list encode the size to mark the lost memory as freelist memory.
1062 new (NotNull, address) HeapObjectHeader(size, gcInfoIndexForFreeListHead er); 1071 new (NotNull, address) HeapObjectHeader(size, gcInfoIndexForFreeListHead er);
1063 // This memory gets lost. Sweeping can reclaim it. 1072 // This memory gets lost. Sweeping can reclaim it.
1064 return; 1073 return;
1065 } 1074 }
1066 entry = new (NotNull, address) FreeListEntry(size); 1075 entry = new (NotNull, address) FreeListEntry(size);
1067 1076 #if defined(ADDRESS_SANITIZER)
1068 #if ENABLE(ASSERT) || defined(LEAK_SANITIZER) || defined(ADDRESS_SANITIZER) 1077 BasePage* page = pageFromObject(address);
1069 // The following logic delays reusing free lists for (at least) one GC 1078 ASSERT(!page->isLargeObjectPage());
1070 // cycle or coalescing. This is helpful to detect use-after-free errors 1079 // For ASan we don't add the entry to the free lists until the
1071 // that could be caused by lazy sweeping etc. 1080 // asanDeferMemoryReuseCount reaches zero. However we always add entire
1072 size_t allowedCount = 0; 1081 // pages to ensure that adding a new page will increase the allocation
1073 size_t forbiddenCount = 0; 1082 // space.
1074 for (size_t i = sizeof(FreeListEntry); i < size; i++) { 1083 if (static_cast<NormalPage*>(page)->payloadSize() != size && !entry->shouldA ddToFreeList())
1075 if (address[i] == reuseAllowedZapValue) {
1076 allowedCount++;
1077 } else if (address[i] == reuseForbiddenZapValue) {
1078 forbiddenCount++;
1079 } else {
1080 ASSERT_NOT_REACHED();
1081 }
1082 }
1083 size_t entryCount = size - sizeof(FreeListEntry);
1084 if (forbiddenCount == entryCount) {
1085 // If all values in the memory region are reuseForbiddenZapValue,
1086 // we flip them to reuseAllowedZapValue. This allows the next
1087 // addToFreeList() to add the memory region to the free list
1088 // (unless someone concatenates the memory region with another memory
1089 // region that contains reuseForbiddenZapValue.)
1090 for (size_t i = sizeof(FreeListEntry); i < size; i++)
1091 address[i] = reuseAllowedZapValue;
1092 // Don't add the memory region to the free list in this addToFreeList().
1093 return; 1084 return;
1094 }
1095 if (allowedCount != entryCount) {
1096 // If the memory region mixes reuseForbiddenZapValue and
1097 // reuseAllowedZapValue, we (conservatively) flip all the values
1098 // to reuseForbiddenZapValue. These values will be changed to
1099 // reuseAllowedZapValue in the next addToFreeList().
1100 for (size_t i = sizeof(FreeListEntry); i < size; i++)
1101 address[i] = reuseForbiddenZapValue;
1102 // Don't add the memory region to the free list in this addToFreeList().
1103 return;
1104 }
1105 // We reach here only when all the values in the memory region are
1106 // reuseAllowedZapValue. In this case, we are allowed to add the memory
1107 // region to the free list and reuse it for another object.
1108 #endif 1085 #endif
1109
1110 int index = bucketIndexForSize(size); 1086 int index = bucketIndexForSize(size);
1111 entry->link(&m_freeLists[index]); 1087 entry->link(&m_freeLists[index]);
1112 if (index > m_biggestFreeListIndex) 1088 if (index > m_biggestFreeListIndex)
1113 m_biggestFreeListIndex = index; 1089 m_biggestFreeListIndex = index;
1114 } 1090 }
1115 1091
1116 #if ENABLE(ASSERT) || defined(LEAK_SANITIZER) || defined(ADDRESS_SANITIZER)
1117 NO_SANITIZE_ADDRESS
1118 void FreeList::zapFreedMemory(Address address, size_t size)
1119 {
1120 for (size_t i = 0; i < size; i++) {
1121 // See the comment in addToFreeList().
1122 if (address[i] != reuseAllowedZapValue)
1123 address[i] = reuseForbiddenZapValue;
1124 }
1125 }
1126 #endif
1127
1128 void FreeList::clear() 1092 void FreeList::clear()
1129 { 1093 {
1130 m_biggestFreeListIndex = 0; 1094 m_biggestFreeListIndex = 0;
1131 for (size_t i = 0; i < blinkPageSizeLog2; ++i) 1095 for (size_t i = 0; i < blinkPageSizeLog2; ++i)
1132 m_freeLists[i] = nullptr; 1096 m_freeLists[i] = nullptr;
1133 } 1097 }
1134 1098
1135 int FreeList::bucketIndexForSize(size_t size) 1099 int FreeList::bucketIndexForSize(size_t size)
1136 { 1100 {
1137 ASSERT(size > 0); 1101 ASSERT(size > 0);
(...skipping 1220 matching lines...) Expand 10 before | Expand all | Expand 10 after
2358 size_t Heap::s_allocatedObjectSize = 0; 2322 size_t Heap::s_allocatedObjectSize = 0;
2359 size_t Heap::s_allocatedSpace = 0; 2323 size_t Heap::s_allocatedSpace = 0;
2360 size_t Heap::s_markedObjectSize = 0; 2324 size_t Heap::s_markedObjectSize = 0;
2361 // We don't want to use 0 KB for the initial value because it may end up 2325 // We don't want to use 0 KB for the initial value because it may end up
2362 // triggering the first GC of some thread too prematurely. 2326 // triggering the first GC of some thread too prematurely.
2363 size_t Heap::s_estimatedLiveObjectSize = 512 * 1024; 2327 size_t Heap::s_estimatedLiveObjectSize = 512 * 1024;
2364 size_t Heap::s_externalObjectSizeAtLastGC = 0; 2328 size_t Heap::s_externalObjectSizeAtLastGC = 0;
2365 double Heap::s_estimatedMarkingTimePerByte = 0.0; 2329 double Heap::s_estimatedMarkingTimePerByte = 0.0;
2366 2330
2367 } // namespace blink 2331 } // namespace blink
OLDNEW
« no previous file with comments | « trunk/Source/platform/heap/Heap.h ('k') | trunk/Source/platform/heap/HeapTest.cpp » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698