| OLD | NEW |
| 1 /* | 1 /* |
| 2 * Copyright (C) 2013 Google Inc. All rights reserved. | 2 * Copyright (C) 2013 Google Inc. All rights reserved. |
| 3 * | 3 * |
| 4 * Redistribution and use in source and binary forms, with or without | 4 * Redistribution and use in source and binary forms, with or without |
| 5 * modification, are permitted provided that the following conditions are | 5 * modification, are permitted provided that the following conditions are |
| 6 * met: | 6 * met: |
| 7 * | 7 * |
| 8 * * Redistributions of source code must retain the above copyright | 8 * * Redistributions of source code must retain the above copyright |
| 9 * notice, this list of conditions and the following disclaimer. | 9 * notice, this list of conditions and the following disclaimer. |
| 10 * * Redistributions in binary form must reproduce the above | 10 * * Redistributions in binary form must reproduce the above |
| (...skipping 623 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 634 { | 634 { |
| 635 Address addr = reinterpret_cast<Address>(const_cast<void*>(payload)); | 635 Address addr = reinterpret_cast<Address>(const_cast<void*>(payload)); |
| 636 FinalizedHeapObjectHeader* header = | 636 FinalizedHeapObjectHeader* header = |
| 637 reinterpret_cast<FinalizedHeapObjectHeader*>(addr - finalizedHeaderSize)
; | 637 reinterpret_cast<FinalizedHeapObjectHeader*>(addr - finalizedHeaderSize)
; |
| 638 return header; | 638 return header; |
| 639 } | 639 } |
| 640 | 640 |
| 641 template<typename Header> | 641 template<typename Header> |
| 642 ThreadHeap<Header>::ThreadHeap(ThreadState* state, int index) | 642 ThreadHeap<Header>::ThreadHeap(ThreadState* state, int index) |
| 643 : m_currentAllocationPoint(0) | 643 : m_currentAllocationPoint(0) |
| 644 , m_remainingAllocationSize(0) | 644 , m_allocationLimit(0) |
| 645 , m_lastRemainingAllocationSize(0) |
| 645 , m_firstPage(0) | 646 , m_firstPage(0) |
| 646 , m_firstLargeHeapObject(0) | 647 , m_firstLargeHeapObject(0) |
| 647 , m_firstPageAllocatedDuringSweeping(0) | 648 , m_firstPageAllocatedDuringSweeping(0) |
| 648 , m_lastPageAllocatedDuringSweeping(0) | 649 , m_lastPageAllocatedDuringSweeping(0) |
| 649 , m_mergePoint(0) | 650 , m_mergePoint(0) |
| 650 , m_biggestFreeListIndex(0) | 651 , m_biggestFreeListIndex(0) |
| 651 , m_threadState(state) | 652 , m_threadState(state) |
| 652 , m_index(index) | 653 , m_index(index) |
| 653 , m_numberOfNormalPages(0) | 654 , m_numberOfNormalPages(0) |
| 654 , m_promptlyFreedCount(0) | 655 , m_promptlyFreedCount(0) |
| (...skipping 20 matching lines...) Expand all Loading... |
| 675 | 676 |
| 676 for (LargeHeapObject<Header>* largeObject = m_firstLargeHeapObject; largeObj
ect; largeObject = largeObject->m_next) | 677 for (LargeHeapObject<Header>* largeObject = m_firstLargeHeapObject; largeObj
ect; largeObject = largeObject->m_next) |
| 677 Heap::orphanedPagePool()->addOrphanedPage(m_index, largeObject); | 678 Heap::orphanedPagePool()->addOrphanedPage(m_index, largeObject); |
| 678 m_firstLargeHeapObject = 0; | 679 m_firstLargeHeapObject = 0; |
| 679 } | 680 } |
| 680 | 681 |
| 681 template<typename Header> | 682 template<typename Header> |
| 682 Address ThreadHeap<Header>::outOfLineAllocate(size_t size, const GCInfo* gcInfo) | 683 Address ThreadHeap<Header>::outOfLineAllocate(size_t size, const GCInfo* gcInfo) |
| 683 { | 684 { |
| 684 size_t allocationSize = allocationSizeFromSize(size); | 685 size_t allocationSize = allocationSizeFromSize(size); |
| 686 ASSERT(allocationSize > remainingAllocationSize()); |
| 687 if (allocationSize > HeapPage<Header>::payloadSize() / 2) |
| 688 return allocateLargeObject(allocationSize, gcInfo); |
| 689 |
| 690 if (remainingAllocationSize() > 0) |
| 691 addToFreeList(currentAllocationPoint(), remainingAllocationSize()); |
| 692 // This also updates the stats. |
| 693 setAllocationPoint(0, 0); |
| 685 if (threadState()->shouldGC()) { | 694 if (threadState()->shouldGC()) { |
| 686 if (threadState()->shouldForceConservativeGC()) | 695 if (threadState()->shouldForceConservativeGC()) |
| 687 Heap::collectGarbage(ThreadState::HeapPointersOnStack); | 696 Heap::collectGarbage(ThreadState::HeapPointersOnStack); |
| 688 else | 697 else |
| 689 threadState()->setGCRequested(); | 698 threadState()->setGCRequested(); |
| 690 } | 699 } |
| 691 ensureCurrentAllocation(allocationSize, gcInfo); | 700 ensureCurrentAllocation(allocationSize, gcInfo); |
| 692 return allocate(size, gcInfo); | 701 return allocate(size, gcInfo); |
| 693 } | 702 } |
| 694 | 703 |
| (...skipping 15 matching lines...) Expand all Loading... |
| 710 } | 719 } |
| 711 } | 720 } |
| 712 m_biggestFreeListIndex = i; | 721 m_biggestFreeListIndex = i; |
| 713 return false; | 722 return false; |
| 714 } | 723 } |
| 715 | 724 |
| 716 template<typename Header> | 725 template<typename Header> |
| 717 void ThreadHeap<Header>::ensureCurrentAllocation(size_t minSize, const GCInfo* g
cInfo) | 726 void ThreadHeap<Header>::ensureCurrentAllocation(size_t minSize, const GCInfo* g
cInfo) |
| 718 { | 727 { |
| 719 ASSERT(minSize >= allocationGranularity); | 728 ASSERT(minSize >= allocationGranularity); |
| 720 if (remainingAllocationSize() >= minSize) | |
| 721 return; | |
| 722 | |
| 723 if (remainingAllocationSize() > 0) { | |
| 724 addToFreeList(currentAllocationPoint(), remainingAllocationSize()); | |
| 725 setAllocationPoint(0, 0); | |
| 726 } | |
| 727 if (allocateFromFreeList(minSize)) | 729 if (allocateFromFreeList(minSize)) |
| 728 return; | 730 return; |
| 729 if (coalesce(minSize) && allocateFromFreeList(minSize)) | 731 if (coalesce(minSize) && allocateFromFreeList(minSize)) |
| 730 return; | 732 return; |
| 731 addPageToHeap(gcInfo); | 733 addPageToHeap(gcInfo); |
| 732 bool success = allocateFromFreeList(minSize); | 734 bool success = allocateFromFreeList(minSize); |
| 733 RELEASE_ASSERT(success); | 735 RELEASE_ASSERT(success); |
| 734 } | 736 } |
| 735 | 737 |
| 736 template<typename Header> | 738 template<typename Header> |
| (...skipping 169 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 906 page->clearObjectStartBitMap(); | 908 page->clearObjectStartBitMap(); |
| 907 page->resetPromptlyFreedSize(); | 909 page->resetPromptlyFreedSize(); |
| 908 size_t freedCount = 0; | 910 size_t freedCount = 0; |
| 909 Address startOfGap = page->payload(); | 911 Address startOfGap = page->payload(); |
| 910 for (Address headerAddress = startOfGap; headerAddress < page->end(); )
{ | 912 for (Address headerAddress = startOfGap; headerAddress < page->end(); )
{ |
| 911 BasicObjectHeader* basicHeader = reinterpret_cast<BasicObjectHeader*
>(headerAddress); | 913 BasicObjectHeader* basicHeader = reinterpret_cast<BasicObjectHeader*
>(headerAddress); |
| 912 ASSERT(basicHeader->size() > 0); | 914 ASSERT(basicHeader->size() > 0); |
| 913 ASSERT(basicHeader->size() < blinkPagePayloadSize()); | 915 ASSERT(basicHeader->size() < blinkPagePayloadSize()); |
| 914 | 916 |
| 915 if (basicHeader->isPromptlyFreed()) { | 917 if (basicHeader->isPromptlyFreed()) { |
| 916 stats().decreaseObjectSpace(reinterpret_cast<Header*>(basicHeade
r)->payloadSize()); | 918 stats().decreaseObjectSpace(reinterpret_cast<Header*>(basicHeade
r)->size()); |
| 917 size_t size = basicHeader->size(); | 919 size_t size = basicHeader->size(); |
| 918 ASSERT(size >= sizeof(Header)); | 920 ASSERT(size >= sizeof(Header)); |
| 919 #if !ENABLE(ASSERT) && !defined(LEAK_SANITIZER) && !defined(ADDRESS_SANITIZER) | 921 #if !ENABLE(ASSERT) && !defined(LEAK_SANITIZER) && !defined(ADDRESS_SANITIZER) |
| 920 memset(headerAddress, 0, sizeof(Header)); | 922 memset(headerAddress, 0, sizeof(Header)); |
| 921 #endif | 923 #endif |
| 922 ++freedCount; | 924 ++freedCount; |
| 923 headerAddress += size; | 925 headerAddress += size; |
| 924 continue; | 926 continue; |
| 925 } | 927 } |
| 926 | 928 |
| (...skipping 44 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 971 // Ensure that there is enough space for alignment. If the header | 973 // Ensure that there is enough space for alignment. If the header |
| 972 // is not a multiple of 8 bytes we will allocate an extra | 974 // is not a multiple of 8 bytes we will allocate an extra |
| 973 // headerPadding<Header> bytes to ensure it 8 byte aligned. | 975 // headerPadding<Header> bytes to ensure it 8 byte aligned. |
| 974 allocationSize += headerPadding<Header>(); | 976 allocationSize += headerPadding<Header>(); |
| 975 | 977 |
| 976 // If ASan is supported we add allocationGranularity bytes to the allocated
space and | 978 // If ASan is supported we add allocationGranularity bytes to the allocated
space and |
| 977 // poison that to detect overflows | 979 // poison that to detect overflows |
| 978 #if defined(ADDRESS_SANITIZER) | 980 #if defined(ADDRESS_SANITIZER) |
| 979 allocationSize += allocationGranularity; | 981 allocationSize += allocationGranularity; |
| 980 #endif | 982 #endif |
| 983 |
| 984 // Update stats before checking if we should GC. |
| 985 if (m_lastRemainingAllocationSize != remainingAllocationSize()) { |
| 986 stats().increaseObjectSpace(m_lastRemainingAllocationSize - remainingAll
ocationSize()); |
| 987 m_lastRemainingAllocationSize = remainingAllocationSize(); |
| 988 } |
| 981 if (m_threadState->shouldGC()) | 989 if (m_threadState->shouldGC()) |
| 982 m_threadState->setGCRequested(); | 990 m_threadState->setGCRequested(); |
| 983 m_threadState->shouldFlushHeapDoesNotContainCache(); | 991 m_threadState->shouldFlushHeapDoesNotContainCache(); |
| 984 PageMemory* pageMemory = PageMemory::allocate(allocationSize); | 992 PageMemory* pageMemory = PageMemory::allocate(allocationSize); |
| 985 m_threadState->allocatedRegionsSinceLastGC().append(pageMemory->region()); | 993 m_threadState->allocatedRegionsSinceLastGC().append(pageMemory->region()); |
| 986 Address largeObjectAddress = pageMemory->writableStart(); | 994 Address largeObjectAddress = pageMemory->writableStart(); |
| 987 Address headerAddress = largeObjectAddress + sizeof(LargeHeapObject<Header>)
+ headerPadding<Header>(); | 995 Address headerAddress = largeObjectAddress + sizeof(LargeHeapObject<Header>)
+ headerPadding<Header>(); |
| 988 memset(headerAddress, 0, size); | 996 memset(headerAddress, 0, size); |
| 989 Header* header = new (NotNull, headerAddress) Header(size, gcInfo); | 997 Header* header = new (NotNull, headerAddress) Header(size, gcInfo); |
| 990 Address result = headerAddress + sizeof(*header); | 998 Address result = headerAddress + sizeof(*header); |
| 991 ASSERT(!(reinterpret_cast<uintptr_t>(result) & allocationMask)); | 999 ASSERT(!(reinterpret_cast<uintptr_t>(result) & allocationMask)); |
| 992 LargeHeapObject<Header>* largeObject = new (largeObjectAddress) LargeHeapObj
ect<Header>(pageMemory, gcInfo, threadState()); | 1000 LargeHeapObject<Header>* largeObject = new (largeObjectAddress) LargeHeapObj
ect<Header>(pageMemory, gcInfo, threadState()); |
| 993 | 1001 |
| 994 // Poison the object header and allocationGranularity bytes after the object | 1002 // Poison the object header and allocationGranularity bytes after the object |
| 995 ASAN_POISON_MEMORY_REGION(header, sizeof(*header)); | 1003 ASAN_POISON_MEMORY_REGION(header, sizeof(*header)); |
| 996 ASAN_POISON_MEMORY_REGION(largeObject->address() + largeObject->size(), allo
cationGranularity); | 1004 ASAN_POISON_MEMORY_REGION(largeObject->address() + largeObject->size(), allo
cationGranularity); |
| 997 largeObject->link(&m_firstLargeHeapObject); | 1005 largeObject->link(&m_firstLargeHeapObject); |
| 998 stats().increaseAllocatedSpace(largeObject->size()); | 1006 stats().increaseAllocatedSpace(largeObject->size()); |
| 999 stats().increaseObjectSpace(largeObject->payloadSize()); | 1007 stats().increaseObjectSpace(largeObject->size()); |
| 1000 return result; | 1008 return result; |
| 1001 } | 1009 } |
| 1002 | 1010 |
| 1003 template<typename Header> | 1011 template<typename Header> |
| 1004 void ThreadHeap<Header>::freeLargeObject(LargeHeapObject<Header>* object, LargeH
eapObject<Header>** previousNext) | 1012 void ThreadHeap<Header>::freeLargeObject(LargeHeapObject<Header>* object, LargeH
eapObject<Header>** previousNext) |
| 1005 { | 1013 { |
| 1006 object->unlink(previousNext); | 1014 object->unlink(previousNext); |
| 1007 object->finalize(); | 1015 object->finalize(); |
| 1008 | 1016 |
| 1009 // Unpoison the object header and allocationGranularity bytes after the | 1017 // Unpoison the object header and allocationGranularity bytes after the |
| (...skipping 341 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 1351 } | 1359 } |
| 1352 | 1360 |
| 1353 template<typename Header> | 1361 template<typename Header> |
| 1354 void ThreadHeap<Header>::sweepLargePages(HeapStats* stats) | 1362 void ThreadHeap<Header>::sweepLargePages(HeapStats* stats) |
| 1355 { | 1363 { |
| 1356 TRACE_EVENT0("blink_gc", "ThreadHeap::sweepLargePages"); | 1364 TRACE_EVENT0("blink_gc", "ThreadHeap::sweepLargePages"); |
| 1357 LargeHeapObject<Header>** previousNext = &m_firstLargeHeapObject; | 1365 LargeHeapObject<Header>** previousNext = &m_firstLargeHeapObject; |
| 1358 for (LargeHeapObject<Header>* current = m_firstLargeHeapObject; current;) { | 1366 for (LargeHeapObject<Header>* current = m_firstLargeHeapObject; current;) { |
| 1359 if (current->isMarked()) { | 1367 if (current->isMarked()) { |
| 1360 stats->increaseAllocatedSpace(current->size()); | 1368 stats->increaseAllocatedSpace(current->size()); |
| 1361 stats->increaseObjectSpace(current->payloadSize()); | 1369 stats->increaseObjectSpace(current->size()); |
| 1362 current->unmark(); | 1370 current->unmark(); |
| 1363 previousNext = ¤t->m_next; | 1371 previousNext = ¤t->m_next; |
| 1364 current = current->next(); | 1372 current = current->next(); |
| 1365 } else { | 1373 } else { |
| 1366 LargeHeapObject<Header>* next = current->next(); | 1374 LargeHeapObject<Header>* next = current->next(); |
| 1367 freeLargeObject(current, previousNext); | 1375 freeLargeObject(current, previousNext); |
| 1368 current = next; | 1376 current = next; |
| 1369 } | 1377 } |
| 1370 } | 1378 } |
| 1371 } | 1379 } |
| (...skipping 130 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 1502 | 1510 |
| 1503 template<typename Header> | 1511 template<typename Header> |
| 1504 void HeapPage<Header>::getStats(HeapStats& stats) | 1512 void HeapPage<Header>::getStats(HeapStats& stats) |
| 1505 { | 1513 { |
| 1506 stats.increaseAllocatedSpace(blinkPageSize); | 1514 stats.increaseAllocatedSpace(blinkPageSize); |
| 1507 Address headerAddress = payload(); | 1515 Address headerAddress = payload(); |
| 1508 ASSERT(headerAddress != end()); | 1516 ASSERT(headerAddress != end()); |
| 1509 do { | 1517 do { |
| 1510 Header* header = reinterpret_cast<Header*>(headerAddress); | 1518 Header* header = reinterpret_cast<Header*>(headerAddress); |
| 1511 if (!header->isFree()) | 1519 if (!header->isFree()) |
| 1512 stats.increaseObjectSpace(header->payloadSize()); | 1520 stats.increaseObjectSpace(header->size()); |
| 1513 ASSERT(header->size() < blinkPagePayloadSize()); | 1521 ASSERT(header->size() < blinkPagePayloadSize()); |
| 1514 headerAddress += header->size(); | 1522 headerAddress += header->size(); |
| 1515 ASSERT(headerAddress <= end()); | 1523 ASSERT(headerAddress <= end()); |
| 1516 } while (headerAddress < end()); | 1524 } while (headerAddress < end()); |
| 1517 } | 1525 } |
| 1518 | 1526 |
| 1519 template<typename Header> | 1527 template<typename Header> |
| 1520 bool HeapPage<Header>::isEmpty() | 1528 bool HeapPage<Header>::isEmpty() |
| 1521 { | 1529 { |
| 1522 BasicObjectHeader* header = reinterpret_cast<BasicObjectHeader*>(payload()); | 1530 BasicObjectHeader* header = reinterpret_cast<BasicObjectHeader*>(payload()); |
| (...skipping 43 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 1566 #endif | 1574 #endif |
| 1567 ASAN_POISON_MEMORY_REGION(header->payload(), header->payloadSize()); | 1575 ASAN_POISON_MEMORY_REGION(header->payload(), header->payloadSize()); |
| 1568 headerAddress += size; | 1576 headerAddress += size; |
| 1569 continue; | 1577 continue; |
| 1570 } | 1578 } |
| 1571 | 1579 |
| 1572 if (startOfGap != headerAddress) | 1580 if (startOfGap != headerAddress) |
| 1573 heap->addToFreeList(startOfGap, headerAddress - startOfGap); | 1581 heap->addToFreeList(startOfGap, headerAddress - startOfGap); |
| 1574 header->unmark(); | 1582 header->unmark(); |
| 1575 headerAddress += header->size(); | 1583 headerAddress += header->size(); |
| 1576 stats->increaseObjectSpace(header->payloadSize()); | 1584 stats->increaseObjectSpace(header->size()); |
| 1577 startOfGap = headerAddress; | 1585 startOfGap = headerAddress; |
| 1578 } | 1586 } |
| 1579 if (startOfGap != end()) | 1587 if (startOfGap != end()) |
| 1580 heap->addToFreeList(startOfGap, end() - startOfGap); | 1588 heap->addToFreeList(startOfGap, end() - startOfGap); |
| 1581 } | 1589 } |
| 1582 | 1590 |
| 1583 template<typename Header> | 1591 template<typename Header> |
| 1584 void HeapPage<Header>::clearLiveAndMarkDead() | 1592 void HeapPage<Header>::clearLiveAndMarkDead() |
| 1585 { | 1593 { |
| 1586 for (Address headerAddress = payload(); headerAddress < end();) { | 1594 for (Address headerAddress = payload(); headerAddress < end();) { |
| (...skipping 208 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 1795 template<> | 1803 template<> |
| 1796 inline bool HeapPage<FinalizedHeapObjectHeader>::hasVTable(FinalizedHeapObjectHe
ader* header) | 1804 inline bool HeapPage<FinalizedHeapObjectHeader>::hasVTable(FinalizedHeapObjectHe
ader* header) |
| 1797 { | 1805 { |
| 1798 return header->hasVTable(); | 1806 return header->hasVTable(); |
| 1799 } | 1807 } |
| 1800 | 1808 |
| 1801 template<typename Header> | 1809 template<typename Header> |
| 1802 void LargeHeapObject<Header>::getStats(HeapStats& stats) | 1810 void LargeHeapObject<Header>::getStats(HeapStats& stats) |
| 1803 { | 1811 { |
| 1804 stats.increaseAllocatedSpace(size()); | 1812 stats.increaseAllocatedSpace(size()); |
| 1805 stats.increaseObjectSpace(payloadSize()); | 1813 stats.increaseObjectSpace(size()); |
| 1806 } | 1814 } |
| 1807 | 1815 |
| 1808 #if ENABLE(GC_PROFILE_HEAP) | 1816 #if ENABLE(GC_PROFILE_HEAP) |
| 1809 template<typename Header> | 1817 template<typename Header> |
| 1810 void LargeHeapObject<Header>::snapshot(TracedValue* json, ThreadState::SnapshotI
nfo* info) | 1818 void LargeHeapObject<Header>::snapshot(TracedValue* json, ThreadState::SnapshotI
nfo* info) |
| 1811 { | 1819 { |
| 1812 Header* header = heapObjectHeader(); | 1820 Header* header = heapObjectHeader(); |
| 1813 size_t tag = info->getClassTag(header->gcInfo()); | 1821 size_t tag = info->getClassTag(header->gcInfo()); |
| 1814 size_t age = header->age(); | 1822 size_t age = header->age(); |
| 1815 if (isMarked()) { | 1823 if (isMarked()) { |
| (...skipping 1077 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 2893 CallbackStack* Heap::s_weakCallbackStack; | 2901 CallbackStack* Heap::s_weakCallbackStack; |
| 2894 CallbackStack* Heap::s_ephemeronStack; | 2902 CallbackStack* Heap::s_ephemeronStack; |
| 2895 HeapDoesNotContainCache* Heap::s_heapDoesNotContainCache; | 2903 HeapDoesNotContainCache* Heap::s_heapDoesNotContainCache; |
| 2896 bool Heap::s_shutdownCalled = false; | 2904 bool Heap::s_shutdownCalled = false; |
| 2897 bool Heap::s_lastGCWasConservative = false; | 2905 bool Heap::s_lastGCWasConservative = false; |
| 2898 FreePagePool* Heap::s_freePagePool; | 2906 FreePagePool* Heap::s_freePagePool; |
| 2899 OrphanedPagePool* Heap::s_orphanedPagePool; | 2907 OrphanedPagePool* Heap::s_orphanedPagePool; |
| 2900 Heap::RegionTree* Heap::s_regionTree = 0; | 2908 Heap::RegionTree* Heap::s_regionTree = 0; |
| 2901 | 2909 |
| 2902 } | 2910 } |
| OLD | NEW |