OLD | NEW |
---|---|
1 /* | 1 /* |
2 * Copyright (C) 2013 Google Inc. All rights reserved. | 2 * Copyright (C) 2013 Google Inc. All rights reserved. |
3 * | 3 * |
4 * Redistribution and use in source and binary forms, with or without | 4 * Redistribution and use in source and binary forms, with or without |
5 * modification, are permitted provided that the following conditions are | 5 * modification, are permitted provided that the following conditions are |
6 * met: | 6 * met: |
7 * | 7 * |
8 * * Redistributions of source code must retain the above copyright | 8 * * Redistributions of source code must retain the above copyright |
9 * notice, this list of conditions and the following disclaimer. | 9 * notice, this list of conditions and the following disclaimer. |
10 * * Redistributions in binary form must reproduce the above | 10 * * Redistributions in binary form must reproduce the above |
(...skipping 70 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
81 static_cast<LargeObjectPage*>(largePage)->setIsVectorBackingPage(); \ | 81 static_cast<LargeObjectPage*>(largePage)->setIsVectorBackingPage(); \ |
82 } | 82 } |
83 #else | 83 #else |
84 #define ENABLE_ASAN_CONTAINER_ANNOTATIONS 0 | 84 #define ENABLE_ASAN_CONTAINER_ANNOTATIONS 0 |
85 #define ASAN_RETIRE_CONTAINER_ANNOTATION(payload, payloadSize) | 85 #define ASAN_RETIRE_CONTAINER_ANNOTATION(payload, payloadSize) |
86 #define ASAN_MARK_LARGE_VECTOR_CONTAINER(heap, largeObject) | 86 #define ASAN_MARK_LARGE_VECTOR_CONTAINER(heap, largeObject) |
87 #endif | 87 #endif |
88 | 88 |
89 namespace blink { | 89 namespace blink { |
90 | 90 |
91 #if ENABLE(ASSERT) | |
92 NO_SANITIZE_ADDRESS | |
93 void HeapObjectHeader::zapMagic() | |
94 { | |
95 ASSERT(checkHeader()); | |
96 m_magic = zappedMagic; | |
97 } | |
98 #endif | |
99 | |
100 void HeapObjectHeader::finalize(Address object, size_t objectSize) | 91 void HeapObjectHeader::finalize(Address object, size_t objectSize) |
101 { | 92 { |
102 const GCInfo* gcInfo = Heap::gcInfo(gcInfoIndex()); | 93 const GCInfo* gcInfo = Heap::gcInfo(gcInfoIndex()); |
103 if (gcInfo->hasFinalizer()) | 94 if (gcInfo->hasFinalizer()) |
104 gcInfo->m_finalize(object); | 95 gcInfo->m_finalize(object); |
105 | 96 |
106 ASAN_RETIRE_CONTAINER_ANNOTATION(object, objectSize); | 97 ASAN_RETIRE_CONTAINER_ANNOTATION(object, objectSize); |
107 } | 98 } |
108 | 99 |
109 BaseHeap::BaseHeap(ThreadState* state, int index) | 100 BaseHeap::BaseHeap(ThreadState* state, int index) |
(...skipping 506 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
616 if (isObjectAllocatedAtAllocationPoint(header)) { | 607 if (isObjectAllocatedAtAllocationPoint(header)) { |
617 m_currentAllocationPoint -= shrinkSize; | 608 m_currentAllocationPoint -= shrinkSize; |
618 setRemainingAllocationSize(m_remainingAllocationSize + shrinkSize); | 609 setRemainingAllocationSize(m_remainingAllocationSize + shrinkSize); |
619 SET_MEMORY_INACCESSIBLE(m_currentAllocationPoint, shrinkSize); | 610 SET_MEMORY_INACCESSIBLE(m_currentAllocationPoint, shrinkSize); |
620 header->setSize(allocationSize); | 611 header->setSize(allocationSize); |
621 return true; | 612 return true; |
622 } | 613 } |
623 ASSERT(shrinkSize >= sizeof(HeapObjectHeader)); | 614 ASSERT(shrinkSize >= sizeof(HeapObjectHeader)); |
624 ASSERT(header->gcInfoIndex() > 0); | 615 ASSERT(header->gcInfoIndex() > 0); |
625 Address shrinkAddress = header->payloadEnd() - shrinkSize; | 616 Address shrinkAddress = header->payloadEnd() - shrinkSize; |
626 HeapObjectHeader* freedHeader = new (NotNull, shrinkAddress) HeapObjectHeade r(shrinkSize, header->gcInfoIndex()); | 617 HeapObjectHeader* freedHeader = new (NotNull, shrinkAddress) HeapObjectHeade r(shrinkSize, header->gcInfoIndex(), 0); |
627 freedHeader->markPromptlyFreed(); | 618 freedHeader->markPromptlyFreed(); |
628 ASSERT(pageFromObject(reinterpret_cast<Address>(header)) == findPageFromAddr ess(reinterpret_cast<Address>(header))); | 619 ASSERT(pageFromObject(reinterpret_cast<Address>(header)) == findPageFromAddr ess(reinterpret_cast<Address>(header))); |
629 m_promptlyFreedSize += shrinkSize; | 620 m_promptlyFreedSize += shrinkSize; |
630 header->setSize(allocationSize); | 621 header->setSize(allocationSize); |
631 SET_MEMORY_INACCESSIBLE(shrinkAddress + sizeof(HeapObjectHeader), shrinkSize - sizeof(HeapObjectHeader)); | 622 SET_MEMORY_INACCESSIBLE(shrinkAddress + sizeof(HeapObjectHeader), shrinkSize - sizeof(HeapObjectHeader)); |
632 return false; | 623 return false; |
633 } | 624 } |
634 | 625 |
635 Address NormalPageHeap::lazySweepPages(size_t allocationSize, size_t gcInfoIndex ) | 626 Address NormalPageHeap::lazySweepPages(size_t allocationSize, size_t gcInfoIndex ) |
636 { | 627 { |
(...skipping 133 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
770 // as it is considered too costly. | 761 // as it is considered too costly. |
771 if (!entry || entry->size() < allocationSize) | 762 if (!entry || entry->size() < allocationSize) |
772 break; | 763 break; |
773 } | 764 } |
774 if (entry) { | 765 if (entry) { |
775 entry->unlink(&m_freeList.m_freeLists[index]); | 766 entry->unlink(&m_freeList.m_freeLists[index]); |
776 setAllocationPoint(entry->address(), entry->size()); | 767 setAllocationPoint(entry->address(), entry->size()); |
777 ASSERT(hasCurrentAllocationArea()); | 768 ASSERT(hasCurrentAllocationArea()); |
778 ASSERT(remainingAllocationSize() >= allocationSize); | 769 ASSERT(remainingAllocationSize() >= allocationSize); |
779 m_freeList.m_biggestFreeListIndex = index; | 770 m_freeList.m_biggestFreeListIndex = index; |
780 return allocateObject(allocationSize, gcInfoIndex); | 771 return allocateObject(allocationSize, gcInfoIndex, Heap::gcGeneratio n()); |
781 } | 772 } |
782 } | 773 } |
783 m_freeList.m_biggestFreeListIndex = index; | 774 m_freeList.m_biggestFreeListIndex = index; |
784 return nullptr; | 775 return nullptr; |
785 } | 776 } |
786 | 777 |
787 LargeObjectHeap::LargeObjectHeap(ThreadState* state, int index) | 778 LargeObjectHeap::LargeObjectHeap(ThreadState* state, int index) |
788 : BaseHeap(state, index) | 779 : BaseHeap(state, index) |
789 { | 780 { |
790 } | 781 } |
(...skipping 32 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
823 threadState()->shouldFlushHeapDoesNotContainCache(); | 814 threadState()->shouldFlushHeapDoesNotContainCache(); |
824 PageMemory* pageMemory = PageMemory::allocate(largeObjectSize); | 815 PageMemory* pageMemory = PageMemory::allocate(largeObjectSize); |
825 Address largeObjectAddress = pageMemory->writableStart(); | 816 Address largeObjectAddress = pageMemory->writableStart(); |
826 Address headerAddress = largeObjectAddress + LargeObjectPage::pageHeaderSize (); | 817 Address headerAddress = largeObjectAddress + LargeObjectPage::pageHeaderSize (); |
827 #if ENABLE(ASSERT) | 818 #if ENABLE(ASSERT) |
828 // Verify that the allocated PageMemory is expectedly zeroed. | 819 // Verify that the allocated PageMemory is expectedly zeroed. |
829 for (size_t i = 0; i < largeObjectSize; ++i) | 820 for (size_t i = 0; i < largeObjectSize; ++i) |
830 ASSERT(!largeObjectAddress[i]); | 821 ASSERT(!largeObjectAddress[i]); |
831 #endif | 822 #endif |
832 ASSERT(gcInfoIndex > 0); | 823 ASSERT(gcInfoIndex > 0); |
833 HeapObjectHeader* header = new (NotNull, headerAddress) HeapObjectHeader(lar geObjectSizeInHeader, gcInfoIndex); | 824 HeapObjectHeader* header = new (NotNull, headerAddress) HeapObjectHeader(lar geObjectSizeInHeader, gcInfoIndex, Heap::gcGeneration()); |
834 Address result = headerAddress + sizeof(*header); | 825 Address result = headerAddress + sizeof(*header); |
835 ASSERT(!(reinterpret_cast<uintptr_t>(result) & allocationMask)); | 826 ASSERT(!(reinterpret_cast<uintptr_t>(result) & allocationMask)); |
836 LargeObjectPage* largeObject = new (largeObjectAddress) LargeObjectPage(page Memory, this, allocationSize); | 827 LargeObjectPage* largeObject = new (largeObjectAddress) LargeObjectPage(page Memory, this, allocationSize); |
837 ASSERT(header->checkHeader()); | 828 ASSERT(header->checkHeader()); |
838 | 829 |
839 // Poison the object header and allocationGranularity bytes after the object | 830 // Poison the object header and allocationGranularity bytes after the object |
840 ASAN_POISON_MEMORY_REGION(header, sizeof(*header)); | 831 ASAN_POISON_MEMORY_REGION(header, sizeof(*header)); |
841 ASAN_POISON_MEMORY_REGION(largeObject->address() + largeObject->size(), allo cationGranularity); | 832 ASAN_POISON_MEMORY_REGION(largeObject->address() + largeObject->size(), allo cationGranularity); |
842 | 833 |
843 largeObject->link(&m_firstPage); | 834 largeObject->link(&m_firstPage); |
(...skipping 73 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
917 // The free list entries are only pointer aligned (but when we allocate | 908 // The free list entries are only pointer aligned (but when we allocate |
918 // from them we are 8 byte aligned due to the header size). | 909 // from them we are 8 byte aligned due to the header size). |
919 ASSERT(!((reinterpret_cast<uintptr_t>(address) + sizeof(HeapObjectHeader)) & allocationMask)); | 910 ASSERT(!((reinterpret_cast<uintptr_t>(address) + sizeof(HeapObjectHeader)) & allocationMask)); |
920 ASSERT(!(size & allocationMask)); | 911 ASSERT(!(size & allocationMask)); |
921 ASAN_UNPOISON_MEMORY_REGION(address, size); | 912 ASAN_UNPOISON_MEMORY_REGION(address, size); |
922 FreeListEntry* entry; | 913 FreeListEntry* entry; |
923 if (size < sizeof(*entry)) { | 914 if (size < sizeof(*entry)) { |
924 // Create a dummy header with only a size and freelist bit set. | 915 // Create a dummy header with only a size and freelist bit set. |
925 ASSERT(size >= sizeof(HeapObjectHeader)); | 916 ASSERT(size >= sizeof(HeapObjectHeader)); |
926 // Free list encode the size to mark the lost memory as freelist memory. | 917 // Free list encode the size to mark the lost memory as freelist memory. |
927 new (NotNull, address) HeapObjectHeader(size, gcInfoIndexForFreeListHead er); | 918 new (NotNull, address) HeapObjectHeader(size, gcInfoIndexForFreeListHead er, 0); |
haraken
2015/11/16 02:47:50
Can you define:
unsigned gcGenerationForFreeLis
peria
2015/11/16 05:33:26
Done.
| |
928 | 919 |
929 ASAN_POISON_MEMORY_REGION(address, size); | 920 ASAN_POISON_MEMORY_REGION(address, size); |
930 // This memory gets lost. Sweeping can reclaim it. | 921 // This memory gets lost. Sweeping can reclaim it. |
931 return; | 922 return; |
932 } | 923 } |
933 entry = new (NotNull, address) FreeListEntry(size); | 924 entry = new (NotNull, address) FreeListEntry(size); |
934 | 925 |
935 #if ENABLE(ASSERT) || defined(LEAK_SANITIZER) || defined(ADDRESS_SANITIZER) | 926 #if ENABLE(ASSERT) || defined(LEAK_SANITIZER) || defined(ADDRESS_SANITIZER) |
936 // The following logic delays reusing free lists for (at least) one GC | 927 // The following logic delays reusing free lists for (at least) one GC |
937 // cycle or coalescing. This is helpful to detect use-after-free errors | 928 // cycle or coalescing. This is helpful to detect use-after-free errors |
(...skipping 646 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
1584 | 1575 |
1585 m_hasEntries = true; | 1576 m_hasEntries = true; |
1586 size_t index = hash(address); | 1577 size_t index = hash(address); |
1587 ASSERT(!(index & 1)); | 1578 ASSERT(!(index & 1)); |
1588 Address cachePage = roundToBlinkPageStart(address); | 1579 Address cachePage = roundToBlinkPageStart(address); |
1589 m_entries[index + 1] = m_entries[index]; | 1580 m_entries[index + 1] = m_entries[index]; |
1590 m_entries[index] = cachePage; | 1581 m_entries[index] = cachePage; |
1591 } | 1582 } |
1592 | 1583 |
1593 } // namespace blink | 1584 } // namespace blink |
OLD | NEW |