Chromium Code Reviews| OLD | NEW |
|---|---|
| 1 // Copyright 2011 the V8 project authors. All rights reserved. | 1 // Copyright 2011 the V8 project authors. All rights reserved. |
| 2 // Redistribution and use in source and binary forms, with or without | 2 // Redistribution and use in source and binary forms, with or without |
| 3 // modification, are permitted provided that the following conditions are | 3 // modification, are permitted provided that the following conditions are |
| 4 // met: | 4 // met: |
| 5 // | 5 // |
| 6 // * Redistributions of source code must retain the above copyright | 6 // * Redistributions of source code must retain the above copyright |
| 7 // notice, this list of conditions and the following disclaimer. | 7 // notice, this list of conditions and the following disclaimer. |
| 8 // * Redistributions in binary form must reproduce the above | 8 // * Redistributions in binary form must reproduce the above |
| 9 // copyright notice, this list of conditions and the following | 9 // copyright notice, this list of conditions and the following |
| 10 // disclaimer in the documentation and/or other materials provided | 10 // disclaimer in the documentation and/or other materials provided |
| (...skipping 529 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 540 kSizeOffset + kPointerSize + kPointerSize + kPointerSize + | 540 kSizeOffset + kPointerSize + kPointerSize + kPointerSize + |
| 541 kPointerSize + kPointerSize + | 541 kPointerSize + kPointerSize + |
| 542 kPointerSize + kPointerSize + kPointerSize + kIntSize; | 542 kPointerSize + kPointerSize + kPointerSize + kIntSize; |
| 543 | 543 |
| 544 static const size_t kSlotsBufferOffset = kLiveBytesOffset + kIntSize; | 544 static const size_t kSlotsBufferOffset = kLiveBytesOffset + kIntSize; |
| 545 | 545 |
| 546 static const size_t kWriteBarrierCounterOffset = | 546 static const size_t kWriteBarrierCounterOffset = |
| 547 kSlotsBufferOffset + kPointerSize + kPointerSize; | 547 kSlotsBufferOffset + kPointerSize + kPointerSize; |
| 548 | 548 |
| 549 static const size_t kHeaderSize = kWriteBarrierCounterOffset + kPointerSize + | 549 static const size_t kHeaderSize = kWriteBarrierCounterOffset + kPointerSize + |
| 550 kIntSize + kIntSize + kPointerSize; | 550 kIntSize + kIntSize + kPointerSize + |
| 551 5 * kPointerSize; | |
|
Michael Starzinger
2013/04/09 16:56:04
As discussed offline: Can we check whether that st
Hannes Payer (out of office)
2013/04/10 07:53:49
It still fits.
| |
| 551 | 552 |
| 552 static const int kBodyOffset = | 553 static const int kBodyOffset = |
| 553 CODE_POINTER_ALIGN(kHeaderSize + Bitmap::kSize); | 554 CODE_POINTER_ALIGN(kHeaderSize + Bitmap::kSize); |
| 554 | 555 |
| 555 // The start offset of the object area in a page. Aligned to both maps and | 556 // The start offset of the object area in a page. Aligned to both maps and |
| 556 // code alignment to be suitable for both. Also aligned to 32 words because | 557 // code alignment to be suitable for both. Also aligned to 32 words because |
| 557 // the marking bitmap is arranged in 32 bit chunks. | 558 // the marking bitmap is arranged in 32 bit chunks. |
| 558 static const int kObjectStartAlignment = 32 * kPointerSize; | 559 static const int kObjectStartAlignment = 32 * kPointerSize; |
| 559 static const int kObjectStartOffset = kBodyOffset - 1 + | 560 static const int kObjectStartOffset = kBodyOffset - 1 + |
| 560 (kObjectStartAlignment - (kBodyOffset - 1) % kObjectStartAlignment); | 561 (kObjectStartAlignment - (kBodyOffset - 1) % kObjectStartAlignment); |
| (...skipping 156 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 717 | 718 |
| 718 | 719 |
| 719 // ----------------------------------------------------------------------------- | 720 // ----------------------------------------------------------------------------- |
| 720 // A page is a memory chunk of a size 1MB. Large object pages may be larger. | 721 // A page is a memory chunk of a size 1MB. Large object pages may be larger. |
| 721 // | 722 // |
| 722 // The only way to get a page pointer is by calling factory methods: | 723 // The only way to get a page pointer is by calling factory methods: |
| 723 // Page* p = Page::FromAddress(addr); or | 724 // Page* p = Page::FromAddress(addr); or |
| 724 // Page* p = Page::FromAllocationTop(top); | 725 // Page* p = Page::FromAllocationTop(top); |
| 725 class Page : public MemoryChunk { | 726 class Page : public MemoryChunk { |
| 726 public: | 727 public: |
| 728 Page(); | |
|
Michael Starzinger
2013/04/09 16:56:04
There should be no constructor for Pages, they sho
Hannes Payer (out of office)
2013/04/10 07:53:49
Done.
| |
| 727 // Returns the page containing a given address. The address ranges | 729 // Returns the page containing a given address. The address ranges |
| 728 // from [page_addr .. page_addr + kPageSize[ | 730 // from [page_addr .. page_addr + kPageSize[ |
| 729 // This only works if the object is in fact in a page. See also MemoryChunk:: | 731 // This only works if the object is in fact in a page. See also MemoryChunk:: |
| 730 // FromAddress() and FromAnyAddress(). | 732 // FromAddress() and FromAnyAddress(). |
| 731 INLINE(static Page* FromAddress(Address a)) { | 733 INLINE(static Page* FromAddress(Address a)) { |
| 732 return reinterpret_cast<Page*>(OffsetFrom(a) & ~kPageAlignmentMask); | 734 return reinterpret_cast<Page*>(OffsetFrom(a) & ~kPageAlignmentMask); |
| 733 } | 735 } |
| 734 | 736 |
| 735 // Returns the page containing an allocation top. Because an allocation | 737 // Returns the page containing an allocation top. Because an allocation |
| 736 // top address can be the upper bound of the page, we need to subtract | 738 // top address can be the upper bound of the page, we need to subtract |
| (...skipping 53 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 790 bool WasSweptPrecisely() { return IsFlagSet(WAS_SWEPT_PRECISELY); } | 792 bool WasSweptPrecisely() { return IsFlagSet(WAS_SWEPT_PRECISELY); } |
| 791 bool WasSweptConservatively() { return IsFlagSet(WAS_SWEPT_CONSERVATIVELY); } | 793 bool WasSweptConservatively() { return IsFlagSet(WAS_SWEPT_CONSERVATIVELY); } |
| 792 bool WasSwept() { return WasSweptPrecisely() || WasSweptConservatively(); } | 794 bool WasSwept() { return WasSweptPrecisely() || WasSweptConservatively(); } |
| 793 | 795 |
| 794 void MarkSweptPrecisely() { SetFlag(WAS_SWEPT_PRECISELY); } | 796 void MarkSweptPrecisely() { SetFlag(WAS_SWEPT_PRECISELY); } |
| 795 void MarkSweptConservatively() { SetFlag(WAS_SWEPT_CONSERVATIVELY); } | 797 void MarkSweptConservatively() { SetFlag(WAS_SWEPT_CONSERVATIVELY); } |
| 796 | 798 |
| 797 void ClearSweptPrecisely() { ClearFlag(WAS_SWEPT_PRECISELY); } | 799 void ClearSweptPrecisely() { ClearFlag(WAS_SWEPT_PRECISELY); } |
| 798 void ClearSweptConservatively() { ClearFlag(WAS_SWEPT_CONSERVATIVELY); } | 800 void ClearSweptConservatively() { ClearFlag(WAS_SWEPT_CONSERVATIVELY); } |
| 799 | 801 |
| 802 void ResetFreeListStatistics(); | |
| 803 | |
| 804 intptr_t available_in_small_free_list() const { | |
| 805 return available_in_small_free_list_; | |
| 806 } | |
| 807 | |
| 808 void set_available_in_small_free_list(intptr_t available_in_small_free_list) { | |
|
Michael Starzinger
2013/04/09 16:56:04
Just an idea. We could use macros to generate this
Hannes Payer (out of office)
2013/04/10 07:53:49
Done.
| |
| 809 available_in_small_free_list_ = available_in_small_free_list; | |
| 810 } | |
| 811 | |
| 812 void AddAvailableInSmallFreeList(intptr_t add) { | |
| 813 available_in_small_free_list_ += add; | |
| 814 } | |
| 815 | |
| 816 intptr_t available_in_medium_free_list() const { | |
| 817 return available_in_medium_free_list_; | |
| 818 } | |
| 819 | |
| 820 void set_available_in_medium_free_list( | |
| 821 intptr_t available_in_medium_free_list) { | |
| 822 available_in_medium_free_list_ = available_in_medium_free_list; | |
| 823 } | |
| 824 | |
| 825 void AddAvailableInMediumFreeList(intptr_t add) { | |
| 826 available_in_medium_free_list_ += add; | |
| 827 } | |
| 828 | |
| 829 intptr_t available_in_large_free_list() const { | |
| 830 return available_in_large_free_list_; | |
| 831 } | |
| 832 | |
| 833 void set_available_in_large_free_list(intptr_t available_in_large_free_list) { | |
| 834 available_in_large_free_list_ = available_in_large_free_list; | |
| 835 } | |
| 836 | |
| 837 void AddAvailableInLargeFreeList(intptr_t add) { | |
| 838 available_in_large_free_list_ += add; | |
| 839 } | |
| 840 | |
| 841 intptr_t available_in_huge_free_list() const { | |
| 842 return available_in_huge_free_list_; | |
| 843 } | |
| 844 | |
| 845 void set_available_in_huge_free_list(intptr_t available_in_huge_free_list) { | |
| 846 available_in_huge_free_list_ = available_in_huge_free_list; | |
| 847 } | |
| 848 | |
| 849 void AddAvailableInHugeFreeList(intptr_t add) { | |
| 850 available_in_huge_free_list_ += add; | |
| 851 } | |
| 852 | |
| 853 intptr_t non_available_small_blocks() const { | |
| 854 return non_available_small_blocks_; | |
| 855 } | |
| 856 | |
| 857 void set_non_available_small_blocks(intptr_t non_available_small_blocks) { | |
| 858 non_available_small_blocks_ = non_available_small_blocks; | |
| 859 } | |
| 860 | |
| 861 void AddNonAvailableSmallBlocks(intptr_t add) { | |
| 862 non_available_small_blocks_ += add; | |
| 863 } | |
| 864 | |
| 800 #ifdef DEBUG | 865 #ifdef DEBUG |
| 801 void Print(); | 866 void Print(); |
| 802 #endif // DEBUG | 867 #endif // DEBUG |
| 803 | 868 |
| 869 private: | |
| 870 intptr_t available_in_small_free_list_; | |
| 871 intptr_t available_in_medium_free_list_; | |
| 872 intptr_t available_in_large_free_list_; | |
| 873 intptr_t available_in_huge_free_list_; | |
| 874 intptr_t non_available_small_blocks_; | |
| 875 | |
| 804 friend class MemoryAllocator; | 876 friend class MemoryAllocator; |
| 805 }; | 877 }; |
| 806 | 878 |
| 807 | 879 |
| 808 STATIC_CHECK(sizeof(Page) <= MemoryChunk::kHeaderSize); | 880 STATIC_CHECK(sizeof(Page) <= MemoryChunk::kHeaderSize); |
| 809 | 881 |
| 810 | 882 |
| 811 class LargePage : public MemoryChunk { | 883 class LargePage : public MemoryChunk { |
| 812 public: | 884 public: |
| 813 HeapObject* GetObject() { | 885 HeapObject* GetObject() { |
| (...skipping 611 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 1425 } | 1497 } |
| 1426 | 1498 |
| 1427 intptr_t Concatenate(FreeListCategory* category); | 1499 intptr_t Concatenate(FreeListCategory* category); |
| 1428 | 1500 |
| 1429 void Reset(); | 1501 void Reset(); |
| 1430 | 1502 |
| 1431 void Free(FreeListNode* node, int size_in_bytes); | 1503 void Free(FreeListNode* node, int size_in_bytes); |
| 1432 | 1504 |
| 1433 FreeListNode* PickNodeFromList(int *node_size); | 1505 FreeListNode* PickNodeFromList(int *node_size); |
| 1434 | 1506 |
| 1435 intptr_t CountFreeListItemsInList(Page* p); | |
| 1436 | |
| 1437 intptr_t EvictFreeListItemsInList(Page* p); | 1507 intptr_t EvictFreeListItemsInList(Page* p); |
| 1438 | 1508 |
| 1439 void RepairFreeList(Heap* heap); | 1509 void RepairFreeList(Heap* heap); |
| 1440 | 1510 |
| 1441 FreeListNode** GetTopAddress() { return &top_; } | 1511 FreeListNode** GetTopAddress() { return &top_; } |
| 1442 FreeListNode* top() const { return top_; } | 1512 FreeListNode* top() const { return top_; } |
| 1443 void set_top(FreeListNode* top) { top_ = top; } | 1513 void set_top(FreeListNode* top) { top_ = top; } |
| 1444 | 1514 |
| 1445 FreeListNode** GetEndAddress() { return &end_; } | 1515 FreeListNode** GetEndAddress() { return &end_; } |
| 1446 FreeListNode* end() const { return end_; } | 1516 FreeListNode* end() const { return end_; } |
| (...skipping 74 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 1521 | 1591 |
| 1522 #ifdef DEBUG | 1592 #ifdef DEBUG |
| 1523 void Zap(); | 1593 void Zap(); |
| 1524 intptr_t SumFreeLists(); | 1594 intptr_t SumFreeLists(); |
| 1525 bool IsVeryLong(); | 1595 bool IsVeryLong(); |
| 1526 #endif | 1596 #endif |
| 1527 | 1597 |
| 1528 // Used after booting the VM. | 1598 // Used after booting the VM. |
| 1529 void RepairLists(Heap* heap); | 1599 void RepairLists(Heap* heap); |
| 1530 | 1600 |
| 1531 struct SizeStats { | |
| 1532 intptr_t Total() { | |
| 1533 return small_size_ + medium_size_ + large_size_ + huge_size_; | |
| 1534 } | |
| 1535 | |
| 1536 intptr_t small_size_; | |
| 1537 intptr_t medium_size_; | |
| 1538 intptr_t large_size_; | |
| 1539 intptr_t huge_size_; | |
| 1540 }; | |
| 1541 | |
| 1542 void CountFreeListItems(Page* p, SizeStats* sizes); | |
| 1543 | |
| 1544 intptr_t EvictFreeListItems(Page* p); | 1601 intptr_t EvictFreeListItems(Page* p); |
| 1545 | 1602 |
| 1546 FreeListCategory* small_list() { return &small_list_; } | 1603 FreeListCategory* small_list() { return &small_list_; } |
| 1547 FreeListCategory* medium_list() { return &medium_list_; } | 1604 FreeListCategory* medium_list() { return &medium_list_; } |
| 1548 FreeListCategory* large_list() { return &large_list_; } | 1605 FreeListCategory* large_list() { return &large_list_; } |
| 1549 FreeListCategory* huge_list() { return &huge_list_; } | 1606 FreeListCategory* huge_list() { return &huge_list_; } |
| 1550 | 1607 |
| 1551 private: | 1608 private: |
| 1552 // The size range of blocks, in bytes. | 1609 // The size range of blocks, in bytes. |
| 1553 static const int kMinBlockSize = 3 * kPointerSize; | 1610 static const int kMinBlockSize = 3 * kPointerSize; |
| (...skipping 64 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 1618 // Current capacity without growing (Size() + Available()). | 1675 // Current capacity without growing (Size() + Available()). |
| 1619 intptr_t Capacity() { return accounting_stats_.Capacity(); } | 1676 intptr_t Capacity() { return accounting_stats_.Capacity(); } |
| 1620 | 1677 |
| 1621 // Total amount of memory committed for this space. For paged | 1678 // Total amount of memory committed for this space. For paged |
| 1622 // spaces this equals the capacity. | 1679 // spaces this equals the capacity. |
| 1623 intptr_t CommittedMemory() { return Capacity(); } | 1680 intptr_t CommittedMemory() { return Capacity(); } |
| 1624 | 1681 |
| 1625 // Approximate amount of physical memory committed for this space. | 1682 // Approximate amount of physical memory committed for this space. |
| 1626 size_t CommittedPhysicalMemory(); | 1683 size_t CommittedPhysicalMemory(); |
| 1627 | 1684 |
| 1685 struct SizeStats { | |
| 1686 intptr_t Total() { | |
| 1687 return small_size_ + medium_size_ + large_size_ + huge_size_; | |
| 1688 } | |
| 1689 | |
| 1690 intptr_t small_size_; | |
| 1691 intptr_t medium_size_; | |
| 1692 intptr_t large_size_; | |
| 1693 intptr_t huge_size_; | |
| 1694 }; | |
| 1695 | |
| 1696 void ObtainFreeListStatistics(Page* p, SizeStats* sizes); | |
| 1697 void ResetFreeListStatistics(); | |
| 1698 | |
| 1628 // Sets the capacity, the available space and the wasted space to zero. | 1699 // Sets the capacity, the available space and the wasted space to zero. |
| 1629 // The stats are rebuilt during sweeping by adding each page to the | 1700 // The stats are rebuilt during sweeping by adding each page to the |
| 1630 // capacity and the size when it is encountered. As free spaces are | 1701 // capacity and the size when it is encountered. As free spaces are |
| 1631 // discovered during the sweeping they are subtracted from the size and added | 1702 // discovered during the sweeping they are subtracted from the size and added |
| 1632 // to the available and wasted totals. | 1703 // to the available and wasted totals. |
| 1633 void ClearStats() { | 1704 void ClearStats() { |
| 1634 accounting_stats_.ClearSizeWaste(); | 1705 accounting_stats_.ClearSizeWaste(); |
| 1706 ResetFreeListStatistics(); | |
| 1635 } | 1707 } |
| 1636 | 1708 |
| 1637 // Increases the number of available bytes of that space. | 1709 // Increases the number of available bytes of that space. |
| 1638 void AddToAccountingStats(intptr_t bytes) { | 1710 void AddToAccountingStats(intptr_t bytes) { |
| 1639 accounting_stats_.DeallocateBytes(bytes); | 1711 accounting_stats_.DeallocateBytes(bytes); |
| 1640 } | 1712 } |
| 1641 | 1713 |
| 1642 // Available bytes without growing. These are the bytes on the free list. | 1714 // Available bytes without growing. These are the bytes on the free list. |
| 1643 // The bytes in the linear allocation area are not included in this total | 1715 // The bytes in the linear allocation area are not included in this total |
| 1644 // because updating the stats would slow down allocation. New pages are | 1716 // because updating the stats would slow down allocation. New pages are |
| (...skipping 133 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 1778 // AdvanceSweeper with size_in_bytes is called. | 1850 // AdvanceSweeper with size_in_bytes is called. |
| 1779 bool EnsureSweeperProgress(intptr_t size_in_bytes); | 1851 bool EnsureSweeperProgress(intptr_t size_in_bytes); |
| 1780 | 1852 |
| 1781 bool IsLazySweepingComplete() { | 1853 bool IsLazySweepingComplete() { |
| 1782 return !first_unswept_page_->is_valid(); | 1854 return !first_unswept_page_->is_valid(); |
| 1783 } | 1855 } |
| 1784 | 1856 |
| 1785 Page* FirstPage() { return anchor_.next_page(); } | 1857 Page* FirstPage() { return anchor_.next_page(); } |
| 1786 Page* LastPage() { return anchor_.prev_page(); } | 1858 Page* LastPage() { return anchor_.prev_page(); } |
| 1787 | 1859 |
| 1788 void CountFreeListItems(Page* p, FreeList::SizeStats* sizes) { | |
| 1789 free_list_.CountFreeListItems(p, sizes); | |
| 1790 } | |
| 1791 | |
| 1792 void EvictEvacuationCandidatesFromFreeLists(); | 1860 void EvictEvacuationCandidatesFromFreeLists(); |
| 1793 | 1861 |
| 1794 bool CanExpand(); | 1862 bool CanExpand(); |
| 1795 | 1863 |
| 1796 // Returns the number of total pages in this space. | 1864 // Returns the number of total pages in this space. |
| 1797 int CountTotalPages(); | 1865 int CountTotalPages(); |
| 1798 | 1866 |
| 1799 // Return size of allocatable area on a page in this space. | 1867 // Return size of allocatable area on a page in this space. |
| 1800 inline int AreaSize() { | 1868 inline int AreaSize() { |
| 1801 return area_size_; | 1869 return area_size_; |
| (...skipping 1024 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 2826 } | 2894 } |
| 2827 // Must be small, since an iteration is used for lookup. | 2895 // Must be small, since an iteration is used for lookup. |
| 2828 static const int kMaxComments = 64; | 2896 static const int kMaxComments = 64; |
| 2829 }; | 2897 }; |
| 2830 #endif | 2898 #endif |
| 2831 | 2899 |
| 2832 | 2900 |
| 2833 } } // namespace v8::internal | 2901 } } // namespace v8::internal |
| 2834 | 2902 |
| 2835 #endif // V8_SPACES_H_ | 2903 #endif // V8_SPACES_H_ |
| OLD | NEW |