Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(109)

Side by Side Diff: Source/platform/heap/Heap.cpp

Issue 363173002: Don't zero out memory added to a free list if ASan is enabled (Closed) Base URL: svn://svn.chromium.org/blink/trunk
Patch Set: Created 6 years, 5 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
« no previous file with comments | « Source/platform/heap/Heap.h ('k') | no next file » | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 /* 1 /*
2 * Copyright (C) 2013 Google Inc. All rights reserved. 2 * Copyright (C) 2013 Google Inc. All rights reserved.
3 * 3 *
4 * Redistribution and use in source and binary forms, with or without 4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions are 5 * modification, are permitted provided that the following conditions are
6 * met: 6 * met:
7 * 7 *
8 * * Redistributions of source code must retain the above copyright 8 * * Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer. 9 * notice, this list of conditions and the following disclaimer.
10 * * Redistributions in binary form must reproduce the above 10 * * Redistributions in binary form must reproduce the above
(...skipping 447 matching lines...) Expand 10 before | Expand all | Expand 10 after
458 return header; 458 return header;
459 } 459 }
460 460
461 void HeapObjectHeader::finalize(const GCInfo* gcInfo, Address object, size_t obj ectSize) 461 void HeapObjectHeader::finalize(const GCInfo* gcInfo, Address object, size_t obj ectSize)
462 { 462 {
463 ASSERT(gcInfo); 463 ASSERT(gcInfo);
464 if (gcInfo->hasFinalizer()) { 464 if (gcInfo->hasFinalizer()) {
465 gcInfo->m_finalize(object); 465 gcInfo->m_finalize(object);
466 } 466 }
467 467
468 #if !defined(NDEBUG) || defined(LEAK_SANITIZER) 468 #if !defined(NDEBUG) || defined(LEAK_SANITIZER) || defined(ADDRESS_SANITIZER)
469 // In Debug builds, memory is zapped when it's freed, and the zapped memory is 469 // In Debug builds, memory is zapped when it's freed, and the zapped memory is
470 // zeroed out when the memory is reused. Memory is also zapped when using Le ak 470 // zeroed out when the memory is reused. Memory is also zapped when using Le ak
471 // Sanitizer because the heap is used as a root region for LSan and therefor e 471 // Sanitizer because the heap is used as a root region for LSan and therefor e
472 // pointers in unreachable memory could hide leaks. 472 // pointers in unreachable memory could hide leaks.
473 for (size_t i = 0; i < objectSize; i++) 473 for (size_t i = 0; i < objectSize; i++)
474 object[i] = finalizedZapValue; 474 object[i] = finalizedZapValue;
475 475
476 // Zap the primary vTable entry (secondary vTable entries are not zapped). 476 // Zap the primary vTable entry (secondary vTable entries are not zapped).
477 *(reinterpret_cast<uintptr_t*>(object)) = zappedVTable; 477 *(reinterpret_cast<uintptr_t*>(object)) = zappedVTable;
478 #endif 478 #endif
(...skipping 176 matching lines...) Expand 10 before | Expand all | Expand 10 after
655 template<typename Header> 655 template<typename Header>
656 void ThreadHeap<Header>::addToFreeList(Address address, size_t size) 656 void ThreadHeap<Header>::addToFreeList(Address address, size_t size)
657 { 657 {
658 ASSERT(heapPageFromAddress(address)); 658 ASSERT(heapPageFromAddress(address));
659 ASSERT(heapPageFromAddress(address + size - 1)); 659 ASSERT(heapPageFromAddress(address + size - 1));
660 ASSERT(size < blinkPagePayloadSize()); 660 ASSERT(size < blinkPagePayloadSize());
661 // The free list entries are only pointer aligned (but when we allocate 661 // The free list entries are only pointer aligned (but when we allocate
662 // from them we are 8 byte aligned due to the header size). 662 // from them we are 8 byte aligned due to the header size).
663 ASSERT(!((reinterpret_cast<uintptr_t>(address) + sizeof(Header)) & allocatio nMask)); 663 ASSERT(!((reinterpret_cast<uintptr_t>(address) + sizeof(Header)) & allocatio nMask));
664 ASSERT(!(size & allocationMask)); 664 ASSERT(!(size & allocationMask));
665 #if defined(NDEBUG) && !defined(LEAK_SANITIZER) 665 #if defined(NDEBUG) && !defined(LEAK_SANITIZER) && !defined(ADDRESS_SANITIZER)
666 memset(address, 0, size); 666 memset(address, 0, size);
667 #endif 667 #endif
668 ASAN_POISON_MEMORY_REGION(address, size); 668 ASAN_POISON_MEMORY_REGION(address, size);
669 FreeListEntry* entry; 669 FreeListEntry* entry;
670 if (size < sizeof(*entry)) { 670 if (size < sizeof(*entry)) {
671 // Create a dummy header with only a size and freelist bit set. 671 // Create a dummy header with only a size and freelist bit set.
672 ASSERT(size >= sizeof(BasicObjectHeader)); 672 ASSERT(size >= sizeof(BasicObjectHeader));
673 // Free list encode the size to mark the lost memory as freelist memory. 673 // Free list encode the size to mark the lost memory as freelist memory.
674 new (NotNull, address) BasicObjectHeader(BasicObjectHeader::freeListEnco dedSize(size)); 674 new (NotNull, address) BasicObjectHeader(BasicObjectHeader::freeListEnco dedSize(size));
675 // This memory gets lost. Sweeping can reclaim it. 675 // This memory gets lost. Sweeping can reclaim it.
676 return; 676 return;
677 } 677 }
678 entry = new (NotNull, address) FreeListEntry(size); 678 entry = new (NotNull, address) FreeListEntry(size);
679 #if defined(ADDRESS_SANITIZER) 679 #if defined(ADDRESS_SANITIZER)
680 // For ASAN we don't add the entry to the free lists until the asanDeferMemo ryReuseCount 680 // For ASan we don't add the entry to the free lists until the asanDeferMemo ryReuseCount
681 // reaches zero. However we always add entire pages to ensure that adding a new page will 681 // reaches zero. However we always add entire pages to ensure that adding a new page will
682 // increase the allocation space. 682 // increase the allocation space.
683 if (HeapPage<Header>::payloadSize() != size && !entry->shouldAddToFreeList() ) 683 if (HeapPage<Header>::payloadSize() != size && !entry->shouldAddToFreeList() )
684 return; 684 return;
685 #endif 685 #endif
686 int index = bucketIndexForSize(size); 686 int index = bucketIndexForSize(size);
687 entry->link(&m_freeLists[index]); 687 entry->link(&m_freeLists[index]);
688 if (index > m_biggestFreeListIndex) 688 if (index > m_biggestFreeListIndex)
689 m_biggestFreeListIndex = index; 689 m_biggestFreeListIndex = index;
690 } 690 }
691 691
692 template<typename Header> 692 template<typename Header>
693 Address ThreadHeap<Header>::allocateLargeObject(size_t size, const GCInfo* gcInf o) 693 Address ThreadHeap<Header>::allocateLargeObject(size_t size, const GCInfo* gcInf o)
694 { 694 {
695 // Caller already added space for object header and rounded up to allocation alignment 695 // Caller already added space for object header and rounded up to allocation alignment
696 ASSERT(!(size & allocationMask)); 696 ASSERT(!(size & allocationMask));
697 697
698 size_t allocationSize = sizeof(LargeHeapObject<Header>) + size; 698 size_t allocationSize = sizeof(LargeHeapObject<Header>) + size;
699 699
700 // Ensure that there is enough space for alignment. If the header 700 // Ensure that there is enough space for alignment. If the header
701 // is not a multiple of 8 bytes we will allocate an extra 701 // is not a multiple of 8 bytes we will allocate an extra
702 // headerPadding<Header> bytes to ensure it 8 byte aligned. 702 // headerPadding<Header> bytes to ensure it 8 byte aligned.
703 allocationSize += headerPadding<Header>(); 703 allocationSize += headerPadding<Header>();
704 704
705 // If ASAN is supported we add allocationGranularity bytes to the allocated space and 705 // If ASan is supported we add allocationGranularity bytes to the allocated space and
706 // poison that to detect overflows 706 // poison that to detect overflows
707 #if defined(ADDRESS_SANITIZER) 707 #if defined(ADDRESS_SANITIZER)
708 allocationSize += allocationGranularity; 708 allocationSize += allocationGranularity;
709 #endif 709 #endif
710 if (threadState()->shouldGC()) 710 if (threadState()->shouldGC())
711 threadState()->setGCRequested(); 711 threadState()->setGCRequested();
712 Heap::flushHeapDoesNotContainCache(); 712 Heap::flushHeapDoesNotContainCache();
713 PageMemory* pageMemory = PageMemory::allocate(allocationSize); 713 PageMemory* pageMemory = PageMemory::allocate(allocationSize);
714 Address largeObjectAddress = pageMemory->writableStart(); 714 Address largeObjectAddress = pageMemory->writableStart();
715 Address headerAddress = largeObjectAddress + sizeof(LargeHeapObject<Header>) + headerPadding<Header>(); 715 Address headerAddress = largeObjectAddress + sizeof(LargeHeapObject<Header>) + headerPadding<Header>();
(...skipping 133 matching lines...) Expand 10 before | Expand all | Expand 10 after
849 // turned on by default because it also triggers for cases that are safe. 849 // turned on by default because it also triggers for cases that are safe.
850 // Examples of such safe cases are context life cycle observers and timers 850 // Examples of such safe cases are context life cycle observers and timers
851 // embedded in garbage collected objects. 851 // embedded in garbage collected objects.
852 #define STRICT_ASAN_FINALIZATION_CHECKING 0 852 #define STRICT_ASAN_FINALIZATION_CHECKING 0
853 853
854 template<typename Header> 854 template<typename Header>
855 void ThreadHeap<Header>::sweep() 855 void ThreadHeap<Header>::sweep()
856 { 856 {
857 ASSERT(isConsistentForGC()); 857 ASSERT(isConsistentForGC());
858 #if defined(ADDRESS_SANITIZER) && STRICT_ASAN_FINALIZATION_CHECKING 858 #if defined(ADDRESS_SANITIZER) && STRICT_ASAN_FINALIZATION_CHECKING
859 // When using ASAN do a pre-sweep where all unmarked objects are poisoned be fore 859 // When using ASan do a pre-sweep where all unmarked objects are poisoned be fore
860 // calling their finalizer methods. This can catch the cases where one objec ts 860 // calling their finalizer methods. This can catch the cases where one objec ts
861 // finalizer tries to modify another object as part of finalization. 861 // finalizer tries to modify another object as part of finalization.
862 for (HeapPage<Header>* page = m_firstPage; page; page = page->next()) 862 for (HeapPage<Header>* page = m_firstPage; page; page = page->next())
863 page->poisonUnmarkedObjects(); 863 page->poisonUnmarkedObjects();
864 #endif 864 #endif
865 HeapPage<Header>* page = m_firstPage; 865 HeapPage<Header>* page = m_firstPage;
866 HeapPage<Header>** previous = &m_firstPage; 866 HeapPage<Header>** previous = &m_firstPage;
867 bool pagesRemoved = false; 867 bool pagesRemoved = false;
868 while (page) { 868 while (page) {
869 if (page->isEmpty()) { 869 if (page->isEmpty()) {
(...skipping 197 matching lines...) Expand 10 before | Expand all | Expand 10 after
1067 ASSERT(basicHeader->size() < blinkPagePayloadSize()); 1067 ASSERT(basicHeader->size() < blinkPagePayloadSize());
1068 1068
1069 if (basicHeader->isFree()) { 1069 if (basicHeader->isFree()) {
1070 headerAddress += basicHeader->size(); 1070 headerAddress += basicHeader->size();
1071 continue; 1071 continue;
1072 } 1072 }
1073 // At this point we know this is a valid object of type Header 1073 // At this point we know this is a valid object of type Header
1074 Header* header = static_cast<Header*>(basicHeader); 1074 Header* header = static_cast<Header*>(basicHeader);
1075 1075
1076 if (!header->isMarked()) { 1076 if (!header->isMarked()) {
1077 // For ASAN we unpoison the specific object when calling the finaliz er and 1077 // For ASan we unpoison the specific object when calling the finaliz er and
1078 // poison it again when done to allow the object's own finalizer to operate 1078 // poison it again when done to allow the object's own finalizer to operate
1079 // on the object, but not have other finalizers be allowed to access it. 1079 // on the object, but not have other finalizers be allowed to access it.
1080 ASAN_UNPOISON_MEMORY_REGION(header->payload(), header->payloadSize() ); 1080 ASAN_UNPOISON_MEMORY_REGION(header->payload(), header->payloadSize() );
1081 finalize(header); 1081 finalize(header);
1082 ASAN_POISON_MEMORY_REGION(header->payload(), header->payloadSize()); 1082 ASAN_POISON_MEMORY_REGION(header->payload(), header->payloadSize());
1083 headerAddress += header->size(); 1083 headerAddress += header->size();
1084 continue; 1084 continue;
1085 } 1085 }
1086 1086
1087 if (startOfGap != headerAddress) 1087 if (startOfGap != headerAddress)
(...skipping 897 matching lines...) Expand 10 before | Expand all | Expand 10 after
1985 template class ThreadHeap<HeapObjectHeader>; 1985 template class ThreadHeap<HeapObjectHeader>;
1986 1986
1987 Visitor* Heap::s_markingVisitor; 1987 Visitor* Heap::s_markingVisitor;
1988 CallbackStack* Heap::s_markingStack; 1988 CallbackStack* Heap::s_markingStack;
1989 CallbackStack* Heap::s_weakCallbackStack; 1989 CallbackStack* Heap::s_weakCallbackStack;
1990 CallbackStack* Heap::s_ephemeronStack; 1990 CallbackStack* Heap::s_ephemeronStack;
1991 HeapDoesNotContainCache* Heap::s_heapDoesNotContainCache; 1991 HeapDoesNotContainCache* Heap::s_heapDoesNotContainCache;
1992 bool Heap::s_shutdownCalled = false; 1992 bool Heap::s_shutdownCalled = false;
1993 bool Heap::s_lastGCWasConservative = false; 1993 bool Heap::s_lastGCWasConservative = false;
1994 } 1994 }
OLDNEW
« no previous file with comments | « Source/platform/heap/Heap.h ('k') | no next file » | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698