Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(244)

Unified Diff: trunk/Source/platform/heap/Heap.cpp

Issue 1184613004: Revert 196997 "Oilpan: Defer reusing freed memory for one GC cycle" (Closed) Base URL: svn://svn.chromium.org/blink/
Patch Set: Created 5 years, 6 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View side-by-side diff with in-line comments
Download patch
« no previous file with comments | « trunk/Source/platform/heap/Heap.h ('k') | trunk/Source/platform/heap/HeapTest.cpp » ('j') | no next file with comments »
Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
Index: trunk/Source/platform/heap/Heap.cpp
===================================================================
--- trunk/Source/platform/heap/Heap.cpp (revision 197018)
+++ trunk/Source/platform/heap/Heap.cpp (working copy)
@@ -206,10 +206,28 @@
void HeapObjectHeader::finalize(Address object, size_t objectSize)
{
const GCInfo* gcInfo = Heap::gcInfo(gcInfoIndex());
- if (gcInfo->hasFinalizer())
+ if (gcInfo->hasFinalizer()) {
gcInfo->m_finalize(object);
+ }
ASAN_RETIRE_CONTAINER_ANNOTATION(object, objectSize);
+
+#if ENABLE(ASSERT) || defined(LEAK_SANITIZER) || defined(ADDRESS_SANITIZER)
+ // In Debug builds, memory is zapped when it's freed, and the zapped memory
+ // is zeroed out when the memory is reused. Memory is also zapped when
+ // using Leak Sanitizer because the heap is used as a root region for LSan
+ // and therefore pointers in unreachable memory could hide leaks.
+ for (size_t i = 0; i < objectSize; ++i)
+ object[i] = finalizedZapValue;
+
+ // Zap the primary vTable entry (secondary vTable entries are not zapped).
+ if (gcInfo->hasVTable()) {
+ *(reinterpret_cast<uintptr_t*>(object)) = zappedVTable;
+ }
+#endif
+ // In Release builds, the entire object is zeroed out when it is added to
+ // the free list. This happens right after sweeping the page and before the
+ // thread commences execution.
}
BaseHeap::BaseHeap(ThreadState* state, int index)
@@ -599,13 +617,6 @@
page->link(&m_firstPage);
Heap::increaseAllocatedSpace(page->size());
-#if ENABLE(ASSERT) || defined(LEAK_SANITIZER) || defined(ADDRESS_SANITIZER)
- // Allow the following addToFreeList() to add the newly allocated memory
- // to the free list.
- Address address = page->payload();
- for (size_t i = 0; i < page->payloadSize(); i++)
- address[i] = reuseAllowedZapValue;
-#endif
addToFreeList(page->payload(), page->payloadSize());
}
@@ -1062,49 +1073,16 @@
return;
}
entry = new (NotNull, address) FreeListEntry(size);
-
-#if ENABLE(ASSERT) || defined(LEAK_SANITIZER) || defined(ADDRESS_SANITIZER)
- // The following logic delays reusing free lists for (at least) one GC
- // cycle or coalescing. This is helpful to detect use-after-free errors
- // that could be caused by lazy sweeping etc.
- size_t allowedCount = 0;
- size_t forbiddenCount = 0;
- for (size_t i = sizeof(FreeListEntry); i < size; i++) {
- if (address[i] == reuseAllowedZapValue) {
- allowedCount++;
- } else if (address[i] == reuseForbiddenZapValue) {
- forbiddenCount++;
- } else {
- ASSERT_NOT_REACHED();
- }
- }
- size_t entryCount = size - sizeof(FreeListEntry);
- if (forbiddenCount == entryCount) {
- // If all values in the memory region are reuseForbiddenZapValue,
- // we flip them to reuseAllowedZapValue. This allows the next
- // addToFreeList() to add the memory region to the free list
- // (unless someone concatenates the memory region with another memory
- // region that contains reuseForbiddenZapValue.)
- for (size_t i = sizeof(FreeListEntry); i < size; i++)
- address[i] = reuseAllowedZapValue;
- // Don't add the memory region to the free list in this addToFreeList().
+#if defined(ADDRESS_SANITIZER)
+ BasePage* page = pageFromObject(address);
+ ASSERT(!page->isLargeObjectPage());
+ // For ASan we don't add the entry to the free lists until the
+ // asanDeferMemoryReuseCount reaches zero. However we always add entire
+ // pages to ensure that adding a new page will increase the allocation
+ // space.
+ if (static_cast<NormalPage*>(page)->payloadSize() != size && !entry->shouldAddToFreeList())
return;
- }
- if (allowedCount != entryCount) {
- // If the memory region mixes reuseForbiddenZapValue and
- // reuseAllowedZapValue, we (conservatively) flip all the values
- // to reuseForbiddenZapValue. These values will be changed to
- // reuseAllowedZapValue in the next addToFreeList().
- for (size_t i = sizeof(FreeListEntry); i < size; i++)
- address[i] = reuseForbiddenZapValue;
- // Don't add the memory region to the free list in this addToFreeList().
- return;
- }
- // We reach here only when all the values in the memory region are
- // reuseAllowedZapValue. In this case, we are allowed to add the memory
- // region to the free list and reuse it for another object.
#endif
-
int index = bucketIndexForSize(size);
entry->link(&m_freeLists[index]);
if (index > m_biggestFreeListIndex)
@@ -1111,17 +1089,6 @@
m_biggestFreeListIndex = index;
}
-#if ENABLE(ASSERT) || defined(LEAK_SANITIZER) || defined(ADDRESS_SANITIZER)
-void FreeList::zapFreedMemory(Address address, size_t size)
-{
- for (size_t i = 0; i < size; i++) {
- // See the comment in addToFreeList().
- if (address[i] != reuseAllowedZapValue)
- address[i] = reuseForbiddenZapValue;
- }
-}
-#endif
-
void FreeList::clear()
{
m_biggestFreeListIndex = 0;
« no previous file with comments | « trunk/Source/platform/heap/Heap.h ('k') | trunk/Source/platform/heap/HeapTest.cpp » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698