| OLD | NEW |
| 1 /* | 1 /* |
| 2 * Copyright (C) 2013 Google Inc. All rights reserved. | 2 * Copyright (C) 2013 Google Inc. All rights reserved. |
| 3 * | 3 * |
| 4 * Redistribution and use in source and binary forms, with or without | 4 * Redistribution and use in source and binary forms, with or without |
| 5 * modification, are permitted provided that the following conditions are | 5 * modification, are permitted provided that the following conditions are |
| 6 * met: | 6 * met: |
| 7 * | 7 * |
| 8 * * Redistributions of source code must retain the above copyright | 8 * * Redistributions of source code must retain the above copyright |
| 9 * notice, this list of conditions and the following disclaimer. | 9 * notice, this list of conditions and the following disclaimer. |
| 10 * * Redistributions in binary form must reproduce the above | 10 * * Redistributions in binary form must reproduce the above |
| (...skipping 630 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 641 template<typename Header> | 641 template<typename Header> |
| 642 ThreadHeap<Header>::ThreadHeap(ThreadState* state, int index) | 642 ThreadHeap<Header>::ThreadHeap(ThreadState* state, int index) |
| 643 : m_currentAllocationPoint(0) | 643 : m_currentAllocationPoint(0) |
| 644 , m_remainingAllocationSize(0) | 644 , m_remainingAllocationSize(0) |
| 645 , m_lastRemainingAllocationSize(0) | 645 , m_lastRemainingAllocationSize(0) |
| 646 , m_firstPage(0) | 646 , m_firstPage(0) |
| 647 , m_firstLargeHeapObject(0) | 647 , m_firstLargeHeapObject(0) |
| 648 , m_firstPageAllocatedDuringSweeping(0) | 648 , m_firstPageAllocatedDuringSweeping(0) |
| 649 , m_lastPageAllocatedDuringSweeping(0) | 649 , m_lastPageAllocatedDuringSweeping(0) |
| 650 , m_mergePoint(0) | 650 , m_mergePoint(0) |
| 651 , m_biggestFreeListIndex(0) | |
| 652 , m_threadState(state) | 651 , m_threadState(state) |
| 653 , m_index(index) | 652 , m_index(index) |
| 654 , m_numberOfNormalPages(0) | 653 , m_numberOfNormalPages(0) |
| 655 , m_promptlyFreedCount(0) | 654 , m_promptlyFreedCount(0) |
| 656 { | 655 { |
| 657 clearFreeLists(); | 656 clearFreeLists(); |
| 658 } | 657 } |
| 659 | 658 |
| 660 template<typename Header> | 659 template<typename Header> |
| 660 FreeList<Header>::FreeList() |
| 661 : m_biggestFreeListIndex(0) |
| 662 { |
| 663 } |
| 664 |
| 665 template<typename Header> |
| 661 ThreadHeap<Header>::~ThreadHeap() | 666 ThreadHeap<Header>::~ThreadHeap() |
| 662 { | 667 { |
| 663 ASSERT(!m_firstPage); | 668 ASSERT(!m_firstPage); |
| 664 ASSERT(!m_firstLargeHeapObject); | 669 ASSERT(!m_firstLargeHeapObject); |
| 665 } | 670 } |
| 666 | 671 |
| 667 template<typename Header> | 672 template<typename Header> |
| 668 void ThreadHeap<Header>::cleanupPages() | 673 void ThreadHeap<Header>::cleanupPages() |
| 669 { | 674 { |
| 670 clearFreeLists(); | 675 clearFreeLists(); |
| (...skipping 27 matching lines...) Expand all Loading... |
| 698 return allocateLargeObject(allocationSize, gcInfo); | 703 return allocateLargeObject(allocationSize, gcInfo); |
| 699 | 704 |
| 700 updateRemainingAllocationSize(); | 705 updateRemainingAllocationSize(); |
| 701 if (threadState()->shouldGC()) { | 706 if (threadState()->shouldGC()) { |
| 702 if (threadState()->shouldForceConservativeGC()) | 707 if (threadState()->shouldForceConservativeGC()) |
| 703 Heap::collectGarbage(ThreadState::HeapPointersOnStack); | 708 Heap::collectGarbage(ThreadState::HeapPointersOnStack); |
| 704 else | 709 else |
| 705 threadState()->setGCRequested(); | 710 threadState()->setGCRequested(); |
| 706 } | 711 } |
| 707 if (remainingAllocationSize() > 0) { | 712 if (remainingAllocationSize() > 0) { |
| 708 addToFreeList(currentAllocationPoint(), remainingAllocationSize()); | 713 m_freeList.addToFreeList(currentAllocationPoint(), remainingAllocationSi
ze()); |
| 709 setAllocationPoint(0, 0); | 714 setAllocationPoint(0, 0); |
| 710 } | 715 } |
| 711 ensureCurrentAllocation(allocationSize, gcInfo); | 716 ensureCurrentAllocation(allocationSize, gcInfo); |
| 712 return allocate(size, gcInfo); | 717 return allocate(size, gcInfo); |
| 713 } | 718 } |
| 714 | 719 |
| 715 template<typename Header> | 720 template<typename Header> |
| 716 bool ThreadHeap<Header>::allocateFromFreeList(size_t minSize) | 721 bool ThreadHeap<Header>::allocateFromFreeList(size_t minSize) |
| 717 { | 722 { |
| 718 size_t bucketSize = 1 << m_biggestFreeListIndex; | 723 size_t bucketSize = 1 << m_freeList.m_biggestFreeListIndex; |
| 719 int i = m_biggestFreeListIndex; | 724 int i = m_freeList.m_biggestFreeListIndex; |
| 720 for (; i > 0; i--, bucketSize >>= 1) { | 725 for (; i > 0; i--, bucketSize >>= 1) { |
| 721 if (bucketSize < minSize) | 726 if (bucketSize < minSize) |
| 722 break; | 727 break; |
| 723 FreeListEntry* entry = m_freeLists[i]; | 728 FreeListEntry* entry = m_freeList.m_freeLists[i]; |
| 724 if (entry) { | 729 if (entry) { |
| 725 m_biggestFreeListIndex = i; | 730 m_freeList.m_biggestFreeListIndex = i; |
| 726 entry->unlink(&m_freeLists[i]); | 731 entry->unlink(&m_freeList.m_freeLists[i]); |
| 727 setAllocationPoint(entry->address(), entry->size()); | 732 setAllocationPoint(entry->address(), entry->size()); |
| 728 ASSERT(currentAllocationPoint() && remainingAllocationSize() >= minS
ize); | 733 ASSERT(currentAllocationPoint() && remainingAllocationSize() >= minS
ize); |
| 729 return true; | 734 return true; |
| 730 } | 735 } |
| 731 } | 736 } |
| 732 m_biggestFreeListIndex = i; | 737 m_freeList.m_biggestFreeListIndex = i; |
| 733 return false; | 738 return false; |
| 734 } | 739 } |
| 735 | 740 |
| 736 template<typename Header> | 741 template<typename Header> |
| 737 void ThreadHeap<Header>::ensureCurrentAllocation(size_t minSize, const GCInfo* g
cInfo) | 742 void ThreadHeap<Header>::ensureCurrentAllocation(size_t minSize, const GCInfo* g
cInfo) |
| 738 { | 743 { |
| 739 ASSERT(minSize >= allocationGranularity); | 744 ASSERT(minSize >= allocationGranularity); |
| 740 if (allocateFromFreeList(minSize)) | 745 if (allocateFromFreeList(minSize)) |
| 741 return; | 746 return; |
| 742 if (coalesce(minSize) && allocateFromFreeList(minSize)) | 747 if (coalesce(minSize) && allocateFromFreeList(minSize)) |
| (...skipping 63 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 806 current->snapshot(json, info); | 811 current->snapshot(json, info); |
| 807 json->endDictionary(); | 812 json->endDictionary(); |
| 808 } | 813 } |
| 809 json->endArray(); | 814 json->endArray(); |
| 810 | 815 |
| 811 json->setInteger("pageCount", info->pageCount - previousPageCount); | 816 json->setInteger("pageCount", info->pageCount - previousPageCount); |
| 812 } | 817 } |
| 813 #endif | 818 #endif |
| 814 | 819 |
| 815 template<typename Header> | 820 template<typename Header> |
| 816 void ThreadHeap<Header>::addToFreeList(Address address, size_t size) | 821 void FreeList<Header>::addToFreeList(Address address, size_t size) |
| 817 { | 822 { |
| 818 ASSERT(heapPageFromAddress(address)); | |
| 819 ASSERT(heapPageFromAddress(address + size - 1)); | |
| 820 ASSERT(size < blinkPagePayloadSize()); | 823 ASSERT(size < blinkPagePayloadSize()); |
| 821 // The free list entries are only pointer aligned (but when we allocate | 824 // The free list entries are only pointer aligned (but when we allocate |
| 822 // from them we are 8 byte aligned due to the header size). | 825 // from them we are 8 byte aligned due to the header size). |
| 823 ASSERT(!((reinterpret_cast<uintptr_t>(address) + sizeof(Header)) & allocatio
nMask)); | 826 ASSERT(!((reinterpret_cast<uintptr_t>(address) + sizeof(Header)) & allocatio
nMask)); |
| 824 ASSERT(!(size & allocationMask)); | 827 ASSERT(!(size & allocationMask)); |
| 825 ASAN_POISON_MEMORY_REGION(address, size); | 828 ASAN_POISON_MEMORY_REGION(address, size); |
| 826 FreeListEntry* entry; | 829 FreeListEntry* entry; |
| 827 if (size < sizeof(*entry)) { | 830 if (size < sizeof(*entry)) { |
| 828 // Create a dummy header with only a size and freelist bit set. | 831 // Create a dummy header with only a size and freelist bit set. |
| 829 ASSERT(size >= sizeof(BasicObjectHeader)); | 832 ASSERT(size >= sizeof(BasicObjectHeader)); |
| (...skipping 49 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 879 { | 882 { |
| 880 if (m_threadState->isSweepInProgress()) | 883 if (m_threadState->isSweepInProgress()) |
| 881 return false; | 884 return false; |
| 882 | 885 |
| 883 if (m_promptlyFreedCount < 256) | 886 if (m_promptlyFreedCount < 256) |
| 884 return false; | 887 return false; |
| 885 | 888 |
| 886 // The smallest bucket able to satisfy an allocation request for minSize is | 889 // The smallest bucket able to satisfy an allocation request for minSize is |
| 887 // the bucket where all free-list entries are guarantied to be larger than | 890 // the bucket where all free-list entries are guarantied to be larger than |
| 888 // minSize. That bucket is one larger than the bucket minSize would go into. | 891 // minSize. That bucket is one larger than the bucket minSize would go into. |
| 889 size_t neededBucketIndex = bucketIndexForSize(minSize) + 1; | 892 size_t neededBucketIndex = FreeList<Header>::bucketIndexForSize(minSize) + 1
; |
| 890 size_t neededFreeEntrySize = 1 << neededBucketIndex; | 893 size_t neededFreeEntrySize = 1 << neededBucketIndex; |
| 891 size_t neededPromptlyFreedSize = neededFreeEntrySize * 3; | 894 size_t neededPromptlyFreedSize = neededFreeEntrySize * 3; |
| 892 size_t foundFreeEntrySize = 0; | 895 size_t foundFreeEntrySize = 0; |
| 893 | 896 |
| 894 // Bailout early on large requests because it is unlikely we will find a fre
e-list entry. | 897 // Bailout early on large requests because it is unlikely we will find a fre
e-list entry. |
| 895 if (neededPromptlyFreedSize >= blinkPageSize) | 898 if (neededPromptlyFreedSize >= blinkPageSize) |
| 896 return false; | 899 return false; |
| 897 | 900 |
| 898 TRACE_EVENT_BEGIN2("blink_gc", "ThreadHeap::coalesce" , "requestedSize", (un
signed)minSize , "neededSize", (unsigned)neededFreeEntrySize); | 901 TRACE_EVENT_BEGIN2("blink_gc", "ThreadHeap::coalesce" , "requestedSize", (un
signed)minSize , "neededSize", (unsigned)neededFreeEntrySize); |
| 899 | 902 |
| (...skipping 523 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 1423 } | 1426 } |
| 1424 | 1427 |
| 1425 #if ENABLE(ASSERT) | 1428 #if ENABLE(ASSERT) |
| 1426 template<typename Header> | 1429 template<typename Header> |
| 1427 bool ThreadHeap<Header>::isConsistentForSweeping() | 1430 bool ThreadHeap<Header>::isConsistentForSweeping() |
| 1428 { | 1431 { |
| 1429 // A thread heap is consistent for sweeping if none of the pages to | 1432 // A thread heap is consistent for sweeping if none of the pages to |
| 1430 // be swept contain a freelist block or the current allocation | 1433 // be swept contain a freelist block or the current allocation |
| 1431 // point. | 1434 // point. |
| 1432 for (size_t i = 0; i < blinkPageSizeLog2; i++) { | 1435 for (size_t i = 0; i < blinkPageSizeLog2; i++) { |
| 1433 for (FreeListEntry* freeListEntry = m_freeLists[i]; freeListEntry; freeL
istEntry = freeListEntry->next()) { | 1436 for (FreeListEntry* freeListEntry = m_freeList.m_freeLists[i]; freeListE
ntry; freeListEntry = freeListEntry->next()) { |
| 1434 if (pagesToBeSweptContains(freeListEntry->address())) { | 1437 if (pagesToBeSweptContains(freeListEntry->address())) { |
| 1435 return false; | 1438 return false; |
| 1436 } | 1439 } |
| 1437 ASSERT(pagesAllocatedDuringSweepingContains(freeListEntry->address()
)); | 1440 ASSERT(pagesAllocatedDuringSweepingContains(freeListEntry->address()
)); |
| 1438 } | 1441 } |
| 1439 } | 1442 } |
| 1440 if (ownsNonEmptyAllocationArea()) { | 1443 if (ownsNonEmptyAllocationArea()) { |
| 1441 ASSERT(pagesToBeSweptContains(currentAllocationPoint()) | 1444 ASSERT(pagesToBeSweptContains(currentAllocationPoint()) |
| 1442 || pagesAllocatedDuringSweepingContains(currentAllocationPoint())); | 1445 || pagesAllocatedDuringSweepingContains(currentAllocationPoint())); |
| 1443 return !pagesToBeSweptContains(currentAllocationPoint()); | 1446 return !pagesToBeSweptContains(currentAllocationPoint()); |
| (...skipping 22 matching lines...) Expand all Loading... |
| 1466 current->unmark(); | 1469 current->unmark(); |
| 1467 else | 1470 else |
| 1468 current->setDeadMark(); | 1471 current->setDeadMark(); |
| 1469 } | 1472 } |
| 1470 } | 1473 } |
| 1471 | 1474 |
| 1472 template<typename Header> | 1475 template<typename Header> |
| 1473 void ThreadHeap<Header>::clearFreeLists() | 1476 void ThreadHeap<Header>::clearFreeLists() |
| 1474 { | 1477 { |
| 1475 m_promptlyFreedCount = 0; | 1478 m_promptlyFreedCount = 0; |
| 1479 m_freeList.clear(); |
| 1480 } |
| 1481 |
| 1482 template<typename Header> |
| 1483 void FreeList<Header>::clear() |
| 1484 { |
| 1485 m_biggestFreeListIndex = 0; |
| 1476 for (size_t i = 0; i < blinkPageSizeLog2; i++) { | 1486 for (size_t i = 0; i < blinkPageSizeLog2; i++) { |
| 1477 m_freeLists[i] = 0; | 1487 m_freeLists[i] = 0; |
| 1478 m_lastFreeListEntries[i] = 0; | 1488 m_lastFreeListEntries[i] = 0; |
| 1479 } | 1489 } |
| 1480 } | 1490 } |
| 1481 | 1491 |
| 1482 int BaseHeap::bucketIndexForSize(size_t size) | 1492 template<typename Header> |
| 1493 int FreeList<Header>::bucketIndexForSize(size_t size) |
| 1483 { | 1494 { |
| 1484 ASSERT(size > 0); | 1495 ASSERT(size > 0); |
| 1485 int index = -1; | 1496 int index = -1; |
| 1486 while (size) { | 1497 while (size) { |
| 1487 size >>= 1; | 1498 size >>= 1; |
| 1488 index++; | 1499 index++; |
| 1489 } | 1500 } |
| 1490 return index; | 1501 return index; |
| 1491 } | 1502 } |
| 1492 | 1503 |
| (...skipping 1243 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 2736 // nothing on the freelists. | 2747 // nothing on the freelists. |
| 2737 ASSERT(splitOff->m_mergePoint || splitOff->m_numberOfNormalPages == 0); | 2748 ASSERT(splitOff->m_mergePoint || splitOff->m_numberOfNormalPages == 0); |
| 2738 if (splitOff->m_mergePoint) { | 2749 if (splitOff->m_mergePoint) { |
| 2739 // Link the split off pages into the beginning of the list again. | 2750 // Link the split off pages into the beginning of the list again. |
| 2740 splitOff->m_mergePoint->m_next = m_firstPage; | 2751 splitOff->m_mergePoint->m_next = m_firstPage; |
| 2741 m_firstPage = splitOff->m_firstPage; | 2752 m_firstPage = splitOff->m_firstPage; |
| 2742 m_numberOfNormalPages += splitOff->m_numberOfNormalPages; | 2753 m_numberOfNormalPages += splitOff->m_numberOfNormalPages; |
| 2743 splitOff->m_firstPage = 0; | 2754 splitOff->m_firstPage = 0; |
| 2744 // Merge free lists. | 2755 // Merge free lists. |
| 2745 for (size_t i = 0; i < blinkPageSizeLog2; i++) { | 2756 for (size_t i = 0; i < blinkPageSizeLog2; i++) { |
| 2746 if (!m_freeLists[i]) { | 2757 if (!m_freeList.m_freeLists[i]) { |
| 2747 m_freeLists[i] = splitOff->m_freeLists[i]; | 2758 m_freeList.m_freeLists[i] = splitOff->m_freeList.m_freeLists[i]; |
| 2748 } else if (splitOff->m_freeLists[i]) { | 2759 } else if (splitOff->m_freeList.m_freeLists[i]) { |
| 2749 m_lastFreeListEntries[i]->append(splitOff->m_freeLists[i]); | 2760 m_freeList.m_lastFreeListEntries[i]->append(splitOff->m_freeList
.m_freeLists[i]); |
| 2750 m_lastFreeListEntries[i] = splitOff->m_lastFreeListEntries[i]; | 2761 m_freeList.m_lastFreeListEntries[i] = splitOff->m_freeList.m_las
tFreeListEntries[i]; |
| 2751 } | 2762 } |
| 2752 } | 2763 } |
| 2753 } | 2764 } |
| 2754 } | 2765 } |
| 2755 | 2766 |
| 2756 void Heap::getHeapSpaceSize(uint64_t* objectSpaceSize, uint64_t* allocatedSpaceS
ize) | 2767 void Heap::getHeapSpaceSize(uint64_t* objectSpaceSize, uint64_t* allocatedSpaceS
ize) |
| 2757 { | 2768 { |
| 2758 *objectSpaceSize = 0; | 2769 *objectSpaceSize = 0; |
| 2759 *allocatedSpaceSize = 0; | 2770 *allocatedSpaceSize = 0; |
| 2760 ASSERT(ThreadState::isAnyThreadInGC()); | 2771 ASSERT(ThreadState::isAnyThreadInGC()); |
| (...skipping 183 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 2944 CallbackStack* Heap::s_weakCallbackStack; | 2955 CallbackStack* Heap::s_weakCallbackStack; |
| 2945 CallbackStack* Heap::s_ephemeronStack; | 2956 CallbackStack* Heap::s_ephemeronStack; |
| 2946 HeapDoesNotContainCache* Heap::s_heapDoesNotContainCache; | 2957 HeapDoesNotContainCache* Heap::s_heapDoesNotContainCache; |
| 2947 bool Heap::s_shutdownCalled = false; | 2958 bool Heap::s_shutdownCalled = false; |
| 2948 bool Heap::s_lastGCWasConservative = false; | 2959 bool Heap::s_lastGCWasConservative = false; |
| 2949 FreePagePool* Heap::s_freePagePool; | 2960 FreePagePool* Heap::s_freePagePool; |
| 2950 OrphanedPagePool* Heap::s_orphanedPagePool; | 2961 OrphanedPagePool* Heap::s_orphanedPagePool; |
| 2951 Heap::RegionTree* Heap::s_regionTree = 0; | 2962 Heap::RegionTree* Heap::s_regionTree = 0; |
| 2952 | 2963 |
| 2953 } | 2964 } |
| OLD | NEW |