| OLD | NEW |
| 1 /* | 1 /* |
| 2 * Copyright (C) 2013 Google Inc. All rights reserved. | 2 * Copyright (C) 2013 Google Inc. All rights reserved. |
| 3 * | 3 * |
| 4 * Redistribution and use in source and binary forms, with or without | 4 * Redistribution and use in source and binary forms, with or without |
| 5 * modification, are permitted provided that the following conditions are | 5 * modification, are permitted provided that the following conditions are |
| 6 * met: | 6 * met: |
| 7 * | 7 * |
| 8 * * Redistributions of source code must retain the above copyright | 8 * * Redistributions of source code must retain the above copyright |
| 9 * notice, this list of conditions and the following disclaimer. | 9 * notice, this list of conditions and the following disclaimer. |
| 10 * * Redistributions in binary form must reproduce the above | 10 * * Redistributions in binary form must reproduce the above |
| (...skipping 366 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 377 m_freeList.clear(); | 377 m_freeList.clear(); |
| 378 } | 378 } |
| 379 | 379 |
| 380 #if ENABLE(ASSERT) | 380 #if ENABLE(ASSERT) |
| 381 bool NormalPageHeap::isConsistentForGC() | 381 bool NormalPageHeap::isConsistentForGC() |
| 382 { | 382 { |
| 383 // A thread heap is consistent for sweeping if none of the pages to be swept | 383 // A thread heap is consistent for sweeping if none of the pages to be swept |
| 384 // contain a freelist block or the current allocation point. | 384 // contain a freelist block or the current allocation point. |
| 385 for (size_t i = 0; i < blinkPageSizeLog2; ++i) { | 385 for (size_t i = 0; i < blinkPageSizeLog2; ++i) { |
| 386 for (FreeListEntry* freeListEntry = m_freeList.m_freeLists[i]; freeListE
ntry; freeListEntry = freeListEntry->next()) { | 386 for (FreeListEntry* freeListEntry = m_freeList.m_freeLists[i]; freeListE
ntry; freeListEntry = freeListEntry->next()) { |
| 387 if (pagesToBeSweptContains(freeListEntry->address())) | 387 if (pagesToBeSweptContains(freeListEntry->getAddress())) |
| 388 return false; | 388 return false; |
| 389 } | 389 } |
| 390 } | 390 } |
| 391 if (hasCurrentAllocationArea()) { | 391 if (hasCurrentAllocationArea()) { |
| 392 if (pagesToBeSweptContains(currentAllocationPoint())) | 392 if (pagesToBeSweptContains(currentAllocationPoint())) |
| 393 return false; | 393 return false; |
| 394 } | 394 } |
| 395 return true; | 395 return true; |
| 396 } | 396 } |
| 397 | 397 |
| (...skipping 362 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 760 FreeListEntry* entry = m_freeList.m_freeLists[index]; | 760 FreeListEntry* entry = m_freeList.m_freeLists[index]; |
| 761 if (allocationSize > bucketSize) { | 761 if (allocationSize > bucketSize) { |
| 762 // Final bucket candidate; check initial entry if it is able | 762 // Final bucket candidate; check initial entry if it is able |
| 763 // to service this allocation. Do not perform a linear scan, | 763 // to service this allocation. Do not perform a linear scan, |
| 764 // as it is considered too costly. | 764 // as it is considered too costly. |
| 765 if (!entry || entry->size() < allocationSize) | 765 if (!entry || entry->size() < allocationSize) |
| 766 break; | 766 break; |
| 767 } | 767 } |
| 768 if (entry) { | 768 if (entry) { |
| 769 entry->unlink(&m_freeList.m_freeLists[index]); | 769 entry->unlink(&m_freeList.m_freeLists[index]); |
| 770 setAllocationPoint(entry->address(), entry->size()); | 770 setAllocationPoint(entry->getAddress(), entry->size()); |
| 771 ASSERT(hasCurrentAllocationArea()); | 771 ASSERT(hasCurrentAllocationArea()); |
| 772 ASSERT(remainingAllocationSize() >= allocationSize); | 772 ASSERT(remainingAllocationSize() >= allocationSize); |
| 773 m_freeList.m_biggestFreeListIndex = index; | 773 m_freeList.m_biggestFreeListIndex = index; |
| 774 return allocateObject(allocationSize, gcInfoIndex); | 774 return allocateObject(allocationSize, gcInfoIndex); |
| 775 } | 775 } |
| 776 } | 776 } |
| 777 m_freeList.m_biggestFreeListIndex = index; | 777 m_freeList.m_biggestFreeListIndex = index; |
| 778 return nullptr; | 778 return nullptr; |
| 779 } | 779 } |
| 780 | 780 |
| (...skipping 44 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 825 #endif | 825 #endif |
| 826 ASSERT(gcInfoIndex > 0); | 826 ASSERT(gcInfoIndex > 0); |
| 827 HeapObjectHeader* header = new (NotNull, headerAddress) HeapObjectHeader(lar
geObjectSizeInHeader, gcInfoIndex); | 827 HeapObjectHeader* header = new (NotNull, headerAddress) HeapObjectHeader(lar
geObjectSizeInHeader, gcInfoIndex); |
| 828 Address result = headerAddress + sizeof(*header); | 828 Address result = headerAddress + sizeof(*header); |
| 829 ASSERT(!(reinterpret_cast<uintptr_t>(result) & allocationMask)); | 829 ASSERT(!(reinterpret_cast<uintptr_t>(result) & allocationMask)); |
| 830 LargeObjectPage* largeObject = new (largeObjectAddress) LargeObjectPage(page
Memory, this, allocationSize); | 830 LargeObjectPage* largeObject = new (largeObjectAddress) LargeObjectPage(page
Memory, this, allocationSize); |
| 831 ASSERT(header->checkHeader()); | 831 ASSERT(header->checkHeader()); |
| 832 | 832 |
| 833 // Poison the object header and allocationGranularity bytes after the object | 833 // Poison the object header and allocationGranularity bytes after the object |
| 834 ASAN_POISON_MEMORY_REGION(header, sizeof(*header)); | 834 ASAN_POISON_MEMORY_REGION(header, sizeof(*header)); |
| 835 ASAN_POISON_MEMORY_REGION(largeObject->address() + largeObject->size(), allo
cationGranularity); | 835 ASAN_POISON_MEMORY_REGION(largeObject->getAddress() + largeObject->size(), a
llocationGranularity); |
| 836 | 836 |
| 837 largeObject->link(&m_firstPage); | 837 largeObject->link(&m_firstPage); |
| 838 | 838 |
| 839 Heap::increaseAllocatedSpace(largeObject->size()); | 839 Heap::increaseAllocatedSpace(largeObject->size()); |
| 840 threadState()->increaseAllocatedObjectSize(largeObject->size()); | 840 threadState()->increaseAllocatedObjectSize(largeObject->size()); |
| 841 return result; | 841 return result; |
| 842 } | 842 } |
| 843 | 843 |
| 844 void LargeObjectHeap::freeLargeObjectPage(LargeObjectPage* object) | 844 void LargeObjectHeap::freeLargeObjectPage(LargeObjectPage* object) |
| 845 { | 845 { |
| 846 ASAN_UNPOISON_MEMORY_REGION(object->payload(), object->payloadSize()); | 846 ASAN_UNPOISON_MEMORY_REGION(object->payload(), object->payloadSize()); |
| 847 object->heapObjectHeader()->finalize(object->payload(), object->payloadSize(
)); | 847 object->heapObjectHeader()->finalize(object->payload(), object->payloadSize(
)); |
| 848 Heap::decreaseAllocatedSpace(object->size()); | 848 Heap::decreaseAllocatedSpace(object->size()); |
| 849 | 849 |
| 850 // Unpoison the object header and allocationGranularity bytes after the | 850 // Unpoison the object header and allocationGranularity bytes after the |
| 851 // object before freeing. | 851 // object before freeing. |
| 852 ASAN_UNPOISON_MEMORY_REGION(object->heapObjectHeader(), sizeof(HeapObjectHea
der)); | 852 ASAN_UNPOISON_MEMORY_REGION(object->heapObjectHeader(), sizeof(HeapObjectHea
der)); |
| 853 ASAN_UNPOISON_MEMORY_REGION(object->address() + object->size(), allocationGr
anularity); | 853 ASAN_UNPOISON_MEMORY_REGION(object->getAddress() + object->size(), allocatio
nGranularity); |
| 854 | 854 |
| 855 if (object->terminating()) { | 855 if (object->terminating()) { |
| 856 ASSERT(ThreadState::current()->isTerminating()); | 856 ASSERT(ThreadState::current()->isTerminating()); |
| 857 // The thread is shutting down and this page is being removed as a part | 857 // The thread is shutting down and this page is being removed as a part |
| 858 // of the thread local GC. In that case the object could be traced in | 858 // of the thread local GC. In that case the object could be traced in |
| 859 // the next global GC if there is a dangling pointer from a live thread | 859 // the next global GC if there is a dangling pointer from a live thread |
| 860 // heap to this dead thread heap. To guard against this, we put the | 860 // heap to this dead thread heap. To guard against this, we put the |
| 861 // page into the orphaned page pool and zap the page memory. This | 861 // page into the orphaned page pool and zap the page memory. This |
| 862 // ensures that tracing the dangling pointer in the next global GC just | 862 // ensures that tracing the dangling pointer in the next global GC just |
| 863 // crashes instead of causing use-after-frees. After the next global | 863 // crashes instead of causing use-after-frees. After the next global |
| (...skipping 556 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 1420 pageDump->addScalar("live_size", "bytes", liveSize); | 1420 pageDump->addScalar("live_size", "bytes", liveSize); |
| 1421 pageDump->addScalar("dead_size", "bytes", deadSize); | 1421 pageDump->addScalar("dead_size", "bytes", deadSize); |
| 1422 pageDump->addScalar("free_size", "bytes", freeSize); | 1422 pageDump->addScalar("free_size", "bytes", freeSize); |
| 1423 heapInfo.freeSize += freeSize; | 1423 heapInfo.freeSize += freeSize; |
| 1424 heapInfo.freeCount += freeCount; | 1424 heapInfo.freeCount += freeCount; |
| 1425 } | 1425 } |
| 1426 | 1426 |
| 1427 #if ENABLE(ASSERT) | 1427 #if ENABLE(ASSERT) |
| 1428 bool NormalPage::contains(Address addr) | 1428 bool NormalPage::contains(Address addr) |
| 1429 { | 1429 { |
| 1430 Address blinkPageStart = roundToBlinkPageStart(address()); | 1430 Address blinkPageStart = roundToBlinkPageStart(getAddress()); |
| 1431 ASSERT(blinkPageStart == address() - blinkGuardPageSize); // Page is at alig
ned address plus guard page size. | 1431 ASSERT(blinkPageStart == getAddress() - blinkGuardPageSize); // Page is at a
ligned address plus guard page size. |
| 1432 return blinkPageStart <= addr && addr < blinkPageStart + blinkPageSize; | 1432 return blinkPageStart <= addr && addr < blinkPageStart + blinkPageSize; |
| 1433 } | 1433 } |
| 1434 #endif | 1434 #endif |
| 1435 | 1435 |
| 1436 NormalPageHeap* NormalPage::heapForNormalPage() | 1436 NormalPageHeap* NormalPage::heapForNormalPage() |
| 1437 { | 1437 { |
| 1438 return static_cast<NormalPageHeap*>(heap()); | 1438 return static_cast<NormalPageHeap*>(heap()); |
| 1439 } | 1439 } |
| 1440 | 1440 |
| 1441 LargeObjectPage::LargeObjectPage(PageMemory* storage, BaseHeap* heap, size_t pay
loadSize) | 1441 LargeObjectPage::LargeObjectPage(PageMemory* storage, BaseHeap* heap, size_t pay
loadSize) |
| (...skipping 97 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 1539 | 1539 |
| 1540 pageDump->addScalar("live_count", "objects", liveCount); | 1540 pageDump->addScalar("live_count", "objects", liveCount); |
| 1541 pageDump->addScalar("dead_count", "objects", deadCount); | 1541 pageDump->addScalar("dead_count", "objects", deadCount); |
| 1542 pageDump->addScalar("live_size", "bytes", liveSize); | 1542 pageDump->addScalar("live_size", "bytes", liveSize); |
| 1543 pageDump->addScalar("dead_size", "bytes", deadSize); | 1543 pageDump->addScalar("dead_size", "bytes", deadSize); |
| 1544 } | 1544 } |
| 1545 | 1545 |
| 1546 #if ENABLE(ASSERT) | 1546 #if ENABLE(ASSERT) |
| 1547 bool LargeObjectPage::contains(Address object) | 1547 bool LargeObjectPage::contains(Address object) |
| 1548 { | 1548 { |
| 1549 return roundToBlinkPageStart(address()) <= object && object < roundToBlinkPa
geEnd(address() + size()); | 1549 return roundToBlinkPageStart(getAddress()) <= object && object < roundToBlin
kPageEnd(getAddress() + size()); |
| 1550 } | 1550 } |
| 1551 #endif | 1551 #endif |
| 1552 | 1552 |
| 1553 void HeapDoesNotContainCache::flush() | 1553 void HeapDoesNotContainCache::flush() |
| 1554 { | 1554 { |
| 1555 if (m_hasEntries) { | 1555 if (m_hasEntries) { |
| 1556 for (int i = 0; i < numberOfEntries; ++i) | 1556 for (int i = 0; i < numberOfEntries; ++i) |
| 1557 m_entries[i] = nullptr; | 1557 m_entries[i] = nullptr; |
| 1558 m_hasEntries = false; | 1558 m_hasEntries = false; |
| 1559 } | 1559 } |
| (...skipping 28 matching lines...) Expand all Loading... |
| 1588 | 1588 |
| 1589 m_hasEntries = true; | 1589 m_hasEntries = true; |
| 1590 size_t index = hash(address); | 1590 size_t index = hash(address); |
| 1591 ASSERT(!(index & 1)); | 1591 ASSERT(!(index & 1)); |
| 1592 Address cachePage = roundToBlinkPageStart(address); | 1592 Address cachePage = roundToBlinkPageStart(address); |
| 1593 m_entries[index + 1] = m_entries[index]; | 1593 m_entries[index + 1] = m_entries[index]; |
| 1594 m_entries[index] = cachePage; | 1594 m_entries[index] = cachePage; |
| 1595 } | 1595 } |
| 1596 | 1596 |
| 1597 } // namespace blink | 1597 } // namespace blink |
| OLD | NEW |