| OLD | NEW | 
|---|
| 1 /* | 1 /* | 
| 2  * Copyright (C) 2013 Google Inc. All rights reserved. | 2  * Copyright (C) 2013 Google Inc. All rights reserved. | 
| 3  * | 3  * | 
| 4  * Redistribution and use in source and binary forms, with or without | 4  * Redistribution and use in source and binary forms, with or without | 
| 5  * modification, are permitted provided that the following conditions are | 5  * modification, are permitted provided that the following conditions are | 
| 6  * met: | 6  * met: | 
| 7  * | 7  * | 
| 8  *     * Redistributions of source code must retain the above copyright | 8  *     * Redistributions of source code must retain the above copyright | 
| 9  * notice, this list of conditions and the following disclaimer. | 9  * notice, this list of conditions and the following disclaimer. | 
| 10  *     * Redistributions in binary form must reproduce the above | 10  *     * Redistributions in binary form must reproduce the above | 
| (...skipping 73 matching lines...) Expand 10 before | Expand all | Expand 10 after  Loading... | 
| 84 #define ENABLE_ASAN_CONTAINER_ANNOTATIONS 0 | 84 #define ENABLE_ASAN_CONTAINER_ANNOTATIONS 0 | 
| 85 #define ASAN_RETIRE_CONTAINER_ANNOTATION(payload, payloadSize) | 85 #define ASAN_RETIRE_CONTAINER_ANNOTATION(payload, payloadSize) | 
| 86 #define ASAN_MARK_LARGE_VECTOR_CONTAINER(arena, largeObject) | 86 #define ASAN_MARK_LARGE_VECTOR_CONTAINER(arena, largeObject) | 
| 87 #endif | 87 #endif | 
| 88 | 88 | 
| 89 namespace blink { | 89 namespace blink { | 
| 90 | 90 | 
| 91 #if DCHECK_IS_ON() && CPU(64BIT) | 91 #if DCHECK_IS_ON() && CPU(64BIT) | 
| 92 NO_SANITIZE_ADDRESS | 92 NO_SANITIZE_ADDRESS | 
| 93 void HeapObjectHeader::zapMagic() { | 93 void HeapObjectHeader::zapMagic() { | 
| 94   ASSERT(checkHeader()); | 94   checkHeader(); | 
| 95   m_magic = zappedMagic; | 95   m_magic = zappedMagic; | 
| 96 } | 96 } | 
| 97 #endif | 97 #endif | 
| 98 | 98 | 
| 99 void HeapObjectHeader::finalize(Address object, size_t objectSize) { | 99 void HeapObjectHeader::finalize(Address object, size_t objectSize) { | 
| 100   HeapAllocHooks::freeHookIfEnabled(object); | 100   HeapAllocHooks::freeHookIfEnabled(object); | 
| 101   const GCInfo* gcInfo = ThreadHeap::gcInfo(gcInfoIndex()); | 101   const GCInfo* gcInfo = ThreadHeap::gcInfo(gcInfoIndex()); | 
| 102   if (gcInfo->hasFinalizer()) | 102   if (gcInfo->hasFinalizer()) | 
| 103     gcInfo->m_finalize(object); | 103     gcInfo->m_finalize(object); | 
| 104 | 104 | 
| (...skipping 594 matching lines...) Expand 10 before | Expand all | Expand 10 after  Loading... | 
| 699         // invariant that memory on the free list is zero filled. | 699         // invariant that memory on the free list is zero filled. | 
| 700         // The rest of the memory is already on the free list and is | 700         // The rest of the memory is already on the free list and is | 
| 701         // therefore already zero filled. | 701         // therefore already zero filled. | 
| 702         SET_MEMORY_INACCESSIBLE(headerAddress, size < sizeof(FreeListEntry) | 702         SET_MEMORY_INACCESSIBLE(headerAddress, size < sizeof(FreeListEntry) | 
| 703                                                    ? size | 703                                                    ? size | 
| 704                                                    : sizeof(FreeListEntry)); | 704                                                    : sizeof(FreeListEntry)); | 
| 705         CHECK_MEMORY_INACCESSIBLE(headerAddress, size); | 705         CHECK_MEMORY_INACCESSIBLE(headerAddress, size); | 
| 706         headerAddress += size; | 706         headerAddress += size; | 
| 707         continue; | 707         continue; | 
| 708       } | 708       } | 
| 709       ASSERT(header->checkHeader()); | 709       header->checkHeader(); | 
| 710       if (startOfGap != headerAddress) | 710       if (startOfGap != headerAddress) | 
| 711         addToFreeList(startOfGap, headerAddress - startOfGap); | 711         addToFreeList(startOfGap, headerAddress - startOfGap); | 
| 712 | 712 | 
| 713       headerAddress += size; | 713       headerAddress += size; | 
| 714       startOfGap = headerAddress; | 714       startOfGap = headerAddress; | 
| 715     } | 715     } | 
| 716 | 716 | 
| 717     if (startOfGap != page->payloadEnd()) | 717     if (startOfGap != page->payloadEnd()) | 
| 718       addToFreeList(startOfGap, page->payloadEnd() - startOfGap); | 718       addToFreeList(startOfGap, page->payloadEnd() - startOfGap); | 
| 719   } | 719   } | 
| 720   getThreadState()->decreaseAllocatedObjectSize(freedSize); | 720   getThreadState()->decreaseAllocatedObjectSize(freedSize); | 
| 721   ASSERT(m_promptlyFreedSize == freedSize); | 721   ASSERT(m_promptlyFreedSize == freedSize); | 
| 722   m_promptlyFreedSize = 0; | 722   m_promptlyFreedSize = 0; | 
| 723   return true; | 723   return true; | 
| 724 } | 724 } | 
| 725 | 725 | 
| 726 void NormalPageArena::promptlyFreeObject(HeapObjectHeader* header) { | 726 void NormalPageArena::promptlyFreeObject(HeapObjectHeader* header) { | 
| 727   ASSERT(!getThreadState()->sweepForbidden()); | 727   ASSERT(!getThreadState()->sweepForbidden()); | 
| 728   ASSERT(header->checkHeader()); | 728   header->checkHeader(); | 
| 729   Address address = reinterpret_cast<Address>(header); | 729   Address address = reinterpret_cast<Address>(header); | 
| 730   Address payload = header->payload(); | 730   Address payload = header->payload(); | 
| 731   size_t size = header->size(); | 731   size_t size = header->size(); | 
| 732   size_t payloadSize = header->payloadSize(); | 732   size_t payloadSize = header->payloadSize(); | 
| 733   ASSERT(size > 0); | 733   ASSERT(size > 0); | 
| 734   ASSERT(pageFromObject(address) == findPageFromAddress(address)); | 734   ASSERT(pageFromObject(address) == findPageFromAddress(address)); | 
| 735 | 735 | 
| 736   { | 736   { | 
| 737     ThreadState::SweepForbiddenScope forbiddenScope(getThreadState()); | 737     ThreadState::SweepForbiddenScope forbiddenScope(getThreadState()); | 
| 738     header->finalize(payload, payloadSize); | 738     header->finalize(payload, payloadSize); | 
| 739     if (address + size == m_currentAllocationPoint) { | 739     if (address + size == m_currentAllocationPoint) { | 
| 740       m_currentAllocationPoint = address; | 740       m_currentAllocationPoint = address; | 
| 741       setRemainingAllocationSize(m_remainingAllocationSize + size); | 741       setRemainingAllocationSize(m_remainingAllocationSize + size); | 
| 742       SET_MEMORY_INACCESSIBLE(address, size); | 742       SET_MEMORY_INACCESSIBLE(address, size); | 
| 743       return; | 743       return; | 
| 744     } | 744     } | 
| 745     SET_MEMORY_INACCESSIBLE(payload, payloadSize); | 745     SET_MEMORY_INACCESSIBLE(payload, payloadSize); | 
| 746     header->markPromptlyFreed(); | 746     header->markPromptlyFreed(); | 
| 747   } | 747   } | 
| 748 | 748 | 
| 749   m_promptlyFreedSize += size; | 749   m_promptlyFreedSize += size; | 
| 750 } | 750 } | 
| 751 | 751 | 
| 752 bool NormalPageArena::expandObject(HeapObjectHeader* header, size_t newSize) { | 752 bool NormalPageArena::expandObject(HeapObjectHeader* header, size_t newSize) { | 
| 753   // It's possible that Vector requests a smaller expanded size because | 753   // It's possible that Vector requests a smaller expanded size because | 
| 754   // Vector::shrinkCapacity can set a capacity smaller than the actual payload | 754   // Vector::shrinkCapacity can set a capacity smaller than the actual payload | 
| 755   // size. | 755   // size. | 
| 756   ASSERT(header->checkHeader()); | 756   header->checkHeader(); | 
| 757   if (header->payloadSize() >= newSize) | 757   if (header->payloadSize() >= newSize) | 
| 758     return true; | 758     return true; | 
| 759   size_t allocationSize = ThreadHeap::allocationSizeFromSize(newSize); | 759   size_t allocationSize = ThreadHeap::allocationSizeFromSize(newSize); | 
| 760   ASSERT(allocationSize > header->size()); | 760   ASSERT(allocationSize > header->size()); | 
| 761   size_t expandSize = allocationSize - header->size(); | 761   size_t expandSize = allocationSize - header->size(); | 
| 762   if (isObjectAllocatedAtAllocationPoint(header) && | 762   if (isObjectAllocatedAtAllocationPoint(header) && | 
| 763       expandSize <= m_remainingAllocationSize) { | 763       expandSize <= m_remainingAllocationSize) { | 
| 764     m_currentAllocationPoint += expandSize; | 764     m_currentAllocationPoint += expandSize; | 
| 765     ASSERT(m_remainingAllocationSize >= expandSize); | 765     ASSERT(m_remainingAllocationSize >= expandSize); | 
| 766     setRemainingAllocationSize(m_remainingAllocationSize - expandSize); | 766     setRemainingAllocationSize(m_remainingAllocationSize - expandSize); | 
| 767     // Unpoison the memory used for the object (payload). | 767     // Unpoison the memory used for the object (payload). | 
| 768     SET_MEMORY_ACCESSIBLE(header->payloadEnd(), expandSize); | 768     SET_MEMORY_ACCESSIBLE(header->payloadEnd(), expandSize); | 
| 769     header->setSize(allocationSize); | 769     header->setSize(allocationSize); | 
| 770     ASSERT(findPageFromAddress(header->payloadEnd() - 1)); | 770     ASSERT(findPageFromAddress(header->payloadEnd() - 1)); | 
| 771     return true; | 771     return true; | 
| 772   } | 772   } | 
| 773   return false; | 773   return false; | 
| 774 } | 774 } | 
| 775 | 775 | 
| 776 bool NormalPageArena::shrinkObject(HeapObjectHeader* header, size_t newSize) { | 776 bool NormalPageArena::shrinkObject(HeapObjectHeader* header, size_t newSize) { | 
| 777   ASSERT(header->checkHeader()); | 777   header->checkHeader(); | 
| 778   ASSERT(header->payloadSize() > newSize); | 778   ASSERT(header->payloadSize() > newSize); | 
| 779   size_t allocationSize = ThreadHeap::allocationSizeFromSize(newSize); | 779   size_t allocationSize = ThreadHeap::allocationSizeFromSize(newSize); | 
| 780   ASSERT(header->size() > allocationSize); | 780   ASSERT(header->size() > allocationSize); | 
| 781   size_t shrinkSize = header->size() - allocationSize; | 781   size_t shrinkSize = header->size() - allocationSize; | 
| 782   if (isObjectAllocatedAtAllocationPoint(header)) { | 782   if (isObjectAllocatedAtAllocationPoint(header)) { | 
| 783     m_currentAllocationPoint -= shrinkSize; | 783     m_currentAllocationPoint -= shrinkSize; | 
| 784     setRemainingAllocationSize(m_remainingAllocationSize + shrinkSize); | 784     setRemainingAllocationSize(m_remainingAllocationSize + shrinkSize); | 
| 785     SET_MEMORY_INACCESSIBLE(m_currentAllocationPoint, shrinkSize); | 785     SET_MEMORY_INACCESSIBLE(m_currentAllocationPoint, shrinkSize); | 
| 786     header->setSize(allocationSize); | 786     header->setSize(allocationSize); | 
| 787     return true; | 787     return true; | 
| (...skipping 208 matching lines...) Expand 10 before | Expand all | Expand 10 after  Loading... | 
| 996   for (size_t i = 0; i < largeObjectSize; ++i) | 996   for (size_t i = 0; i < largeObjectSize; ++i) | 
| 997     ASSERT(!largeObjectAddress[i]); | 997     ASSERT(!largeObjectAddress[i]); | 
| 998 #endif | 998 #endif | 
| 999   ASSERT(gcInfoIndex > 0); | 999   ASSERT(gcInfoIndex > 0); | 
| 1000   HeapObjectHeader* header = new (NotNull, headerAddress) | 1000   HeapObjectHeader* header = new (NotNull, headerAddress) | 
| 1001       HeapObjectHeader(largeObjectSizeInHeader, gcInfoIndex); | 1001       HeapObjectHeader(largeObjectSizeInHeader, gcInfoIndex); | 
| 1002   Address result = headerAddress + sizeof(*header); | 1002   Address result = headerAddress + sizeof(*header); | 
| 1003   ASSERT(!(reinterpret_cast<uintptr_t>(result) & allocationMask)); | 1003   ASSERT(!(reinterpret_cast<uintptr_t>(result) & allocationMask)); | 
| 1004   LargeObjectPage* largeObject = new (largeObjectAddress) | 1004   LargeObjectPage* largeObject = new (largeObjectAddress) | 
| 1005       LargeObjectPage(pageMemory, this, allocationSize); | 1005       LargeObjectPage(pageMemory, this, allocationSize); | 
| 1006   ASSERT(header->checkHeader()); | 1006   header->checkHeader(); | 
| 1007 | 1007 | 
| 1008   // Poison the object header and allocationGranularity bytes after the object | 1008   // Poison the object header and allocationGranularity bytes after the object | 
| 1009   ASAN_POISON_MEMORY_REGION(header, sizeof(*header)); | 1009   ASAN_POISON_MEMORY_REGION(header, sizeof(*header)); | 
| 1010   ASAN_POISON_MEMORY_REGION(largeObject->getAddress() + largeObject->size(), | 1010   ASAN_POISON_MEMORY_REGION(largeObject->getAddress() + largeObject->size(), | 
| 1011                             allocationGranularity); | 1011                             allocationGranularity); | 
| 1012 | 1012 | 
| 1013   largeObject->link(&m_firstPage); | 1013   largeObject->link(&m_firstPage); | 
| 1014 | 1014 | 
| 1015   getThreadState()->heap().heapStats().increaseAllocatedSpace( | 1015   getThreadState()->heap().heapStats().increaseAllocatedSpace( | 
| 1016       largeObject->size()); | 1016       largeObject->size()); | 
| (...skipping 237 matching lines...) Expand 10 before | Expand all | Expand 10 after  Loading... | 
| 1254 | 1254 | 
| 1255 size_t NormalPage::objectPayloadSizeForTesting() { | 1255 size_t NormalPage::objectPayloadSizeForTesting() { | 
| 1256   size_t objectPayloadSize = 0; | 1256   size_t objectPayloadSize = 0; | 
| 1257   Address headerAddress = payload(); | 1257   Address headerAddress = payload(); | 
| 1258   markAsSwept(); | 1258   markAsSwept(); | 
| 1259   ASSERT(headerAddress != payloadEnd()); | 1259   ASSERT(headerAddress != payloadEnd()); | 
| 1260   do { | 1260   do { | 
| 1261     HeapObjectHeader* header = | 1261     HeapObjectHeader* header = | 
| 1262         reinterpret_cast<HeapObjectHeader*>(headerAddress); | 1262         reinterpret_cast<HeapObjectHeader*>(headerAddress); | 
| 1263     if (!header->isFree()) { | 1263     if (!header->isFree()) { | 
| 1264       ASSERT(header->checkHeader()); | 1264       header->checkHeader(); | 
| 1265       objectPayloadSize += header->payloadSize(); | 1265       objectPayloadSize += header->payloadSize(); | 
| 1266     } | 1266     } | 
| 1267     ASSERT(header->size() < blinkPagePayloadSize()); | 1267     ASSERT(header->size() < blinkPagePayloadSize()); | 
| 1268     headerAddress += header->size(); | 1268     headerAddress += header->size(); | 
| 1269     ASSERT(headerAddress <= payloadEnd()); | 1269     ASSERT(headerAddress <= payloadEnd()); | 
| 1270   } while (headerAddress < payloadEnd()); | 1270   } while (headerAddress < payloadEnd()); | 
| 1271   return objectPayloadSize; | 1271   return objectPayloadSize; | 
| 1272 } | 1272 } | 
| 1273 | 1273 | 
| 1274 bool NormalPage::isEmpty() { | 1274 bool NormalPage::isEmpty() { | 
| (...skipping 295 matching lines...) Expand 10 before | Expand all | Expand 10 after  Loading... | 
| 1570     ASSERT(mapIndex > 0); | 1570     ASSERT(mapIndex > 0); | 
| 1571     byte = m_objectStartBitMap[--mapIndex]; | 1571     byte = m_objectStartBitMap[--mapIndex]; | 
| 1572   } | 1572   } | 
| 1573   int leadingZeroes = numberOfLeadingZeroes(byte); | 1573   int leadingZeroes = numberOfLeadingZeroes(byte); | 
| 1574   objectStartNumber = (mapIndex * 8) + 7 - leadingZeroes; | 1574   objectStartNumber = (mapIndex * 8) + 7 - leadingZeroes; | 
| 1575   objectOffset = objectStartNumber * allocationGranularity; | 1575   objectOffset = objectStartNumber * allocationGranularity; | 
| 1576   Address objectAddress = objectOffset + payload(); | 1576   Address objectAddress = objectOffset + payload(); | 
| 1577   HeapObjectHeader* header = reinterpret_cast<HeapObjectHeader*>(objectAddress); | 1577   HeapObjectHeader* header = reinterpret_cast<HeapObjectHeader*>(objectAddress); | 
| 1578   if (header->isFree()) | 1578   if (header->isFree()) | 
| 1579     return nullptr; | 1579     return nullptr; | 
| 1580   ASSERT(header->checkHeader()); | 1580   header->checkHeader(); | 
| 1581   return header; | 1581   return header; | 
| 1582 } | 1582 } | 
| 1583 | 1583 | 
| 1584 #if DCHECK_IS_ON() | 1584 #if DCHECK_IS_ON() | 
| 1585 static bool isUninitializedMemory(void* objectPointer, size_t objectSize) { | 1585 static bool isUninitializedMemory(void* objectPointer, size_t objectSize) { | 
| 1586   // Scan through the object's fields and check that they are all zero. | 1586   // Scan through the object's fields and check that they are all zero. | 
| 1587   Address* objectFields = reinterpret_cast<Address*>(objectPointer); | 1587   Address* objectFields = reinterpret_cast<Address*>(objectPointer); | 
| 1588   for (size_t i = 0; i < objectSize / sizeof(Address); ++i) { | 1588   for (size_t i = 0; i < objectSize / sizeof(Address); ++i) { | 
| 1589     if (objectFields[i] != 0) | 1589     if (objectFields[i] != 0) | 
| 1590       return false; | 1590       return false; | 
| 1591   } | 1591   } | 
| 1592   return true; | 1592   return true; | 
| 1593 } | 1593 } | 
| 1594 #endif | 1594 #endif | 
| 1595 | 1595 | 
| 1596 static void markPointer(Visitor* visitor, HeapObjectHeader* header) { | 1596 static void markPointer(Visitor* visitor, HeapObjectHeader* header) { | 
| 1597   ASSERT(header->checkHeader()); | 1597   header->checkHeader(); | 
| 1598   const GCInfo* gcInfo = ThreadHeap::gcInfo(header->gcInfoIndex()); | 1598   const GCInfo* gcInfo = ThreadHeap::gcInfo(header->gcInfoIndex()); | 
| 1599   if (gcInfo->hasVTable() && !vTableInitialized(header->payload())) { | 1599   if (gcInfo->hasVTable() && !vTableInitialized(header->payload())) { | 
| 1600     // We hit this branch when a GC strikes before GarbageCollected<>'s | 1600     // We hit this branch when a GC strikes before GarbageCollected<>'s | 
| 1601     // constructor runs. | 1601     // constructor runs. | 
| 1602     // | 1602     // | 
| 1603     // class A : public GarbageCollected<A> { virtual void f() = 0; }; | 1603     // class A : public GarbageCollected<A> { virtual void f() = 0; }; | 
| 1604     // class B : public A { | 1604     // class B : public A { | 
| 1605     //   B() : A(foo()) { }; | 1605     //   B() : A(foo()) { }; | 
| 1606     // }; | 1606     // }; | 
| 1607     // | 1607     // | 
| (...skipping 218 matching lines...) Expand 10 before | Expand all | Expand 10 after  Loading... | 
| 1826 | 1826 | 
| 1827   m_hasEntries = true; | 1827   m_hasEntries = true; | 
| 1828   size_t index = hash(address); | 1828   size_t index = hash(address); | 
| 1829   ASSERT(!(index & 1)); | 1829   ASSERT(!(index & 1)); | 
| 1830   Address cachePage = roundToBlinkPageStart(address); | 1830   Address cachePage = roundToBlinkPageStart(address); | 
| 1831   m_entries[index + 1] = m_entries[index]; | 1831   m_entries[index + 1] = m_entries[index]; | 
| 1832   m_entries[index] = cachePage; | 1832   m_entries[index] = cachePage; | 
| 1833 } | 1833 } | 
| 1834 | 1834 | 
| 1835 }  // namespace blink | 1835 }  // namespace blink | 
| OLD | NEW | 
|---|