Chromium Code Reviews| OLD | NEW |
|---|---|
| 1 // Copyright 2011 the V8 project authors. All rights reserved. | 1 // Copyright 2011 the V8 project authors. All rights reserved. |
| 2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
| 3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
| 4 | 4 |
| 5 #ifndef V8_HEAP_SPACES_H_ | 5 #ifndef V8_HEAP_SPACES_H_ |
| 6 #define V8_HEAP_SPACES_H_ | 6 #define V8_HEAP_SPACES_H_ |
| 7 | 7 |
| 8 #include <list> | 8 #include <list> |
| 9 #include <memory> | 9 #include <memory> |
| 10 #include <unordered_set> | 10 #include <unordered_set> |
| (...skipping 506 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 517 | 517 |
| 518 bool CanAllocate() { | 518 bool CanAllocate() { |
| 519 return !IsEvacuationCandidate() && !IsFlagSet(NEVER_ALLOCATE_ON_PAGE); | 519 return !IsEvacuationCandidate() && !IsFlagSet(NEVER_ALLOCATE_ON_PAGE); |
| 520 } | 520 } |
| 521 | 521 |
| 522 bool ShouldSkipEvacuationSlotRecording() { | 522 bool ShouldSkipEvacuationSlotRecording() { |
| 523 return ((flags_ & kSkipEvacuationSlotsRecordingMask) != 0) && | 523 return ((flags_ & kSkipEvacuationSlotsRecordingMask) != 0) && |
| 524 !IsFlagSet(COMPACTION_WAS_ABORTED); | 524 !IsFlagSet(COMPACTION_WAS_ABORTED); |
| 525 } | 525 } |
| 526 | 526 |
| 527 bool CanUseForAllocation() { return CanAllocate() && !NeverEvacuate(); } | |
| 528 | |
| 527 Executability executable() { | 529 Executability executable() { |
| 528 return IsFlagSet(IS_EXECUTABLE) ? EXECUTABLE : NOT_EXECUTABLE; | 530 return IsFlagSet(IS_EXECUTABLE) ? EXECUTABLE : NOT_EXECUTABLE; |
| 529 } | 531 } |
| 530 | 532 |
| 531 bool InNewSpace() { return (flags_ & kIsInNewSpaceMask) != 0; } | 533 bool InNewSpace() { return (flags_ & kIsInNewSpaceMask) != 0; } |
| 532 | 534 |
| 533 bool InToSpace() { return IsFlagSet(IN_TO_SPACE); } | 535 bool InToSpace() { return IsFlagSet(IN_TO_SPACE); } |
| 534 | 536 |
| 535 bool InFromSpace() { return IsFlagSet(IN_FROM_SPACE); } | 537 bool InFromSpace() { return IsFlagSet(IN_FROM_SPACE); } |
| 536 | 538 |
| (...skipping 126 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 663 | 665 |
| 664 static MarkingState Internal(MemoryChunk* chunk) { | 666 static MarkingState Internal(MemoryChunk* chunk) { |
| 665 return MarkingState( | 667 return MarkingState( |
| 666 Bitmap::FromAddress(chunk->address() + MemoryChunk::kHeaderSize), | 668 Bitmap::FromAddress(chunk->address() + MemoryChunk::kHeaderSize), |
| 667 &chunk->live_byte_count_); | 669 &chunk->live_byte_count_); |
| 668 } | 670 } |
| 669 | 671 |
| 670 MarkingState(Bitmap* bitmap, intptr_t* live_bytes) | 672 MarkingState(Bitmap* bitmap, intptr_t* live_bytes) |
| 671 : bitmap_(bitmap), live_bytes_(live_bytes) {} | 673 : bitmap_(bitmap), live_bytes_(live_bytes) {} |
| 672 | 674 |
| 673 void IncrementLiveBytes(intptr_t by) const { | 675 template <MarkBit::AccessMode mode = MarkBit::NON_ATOMIC> |
| 676 inline void IncrementLiveBytes(intptr_t by) const { | |
| 674 *live_bytes_ += static_cast<int>(by); | 677 *live_bytes_ += static_cast<int>(by); |
| 675 } | 678 } |
| 679 | |
| 676 void SetLiveBytes(intptr_t value) const { | 680 void SetLiveBytes(intptr_t value) const { |
| 677 *live_bytes_ = static_cast<int>(value); | 681 *live_bytes_ = static_cast<int>(value); |
| 678 } | 682 } |
| 679 | 683 |
| 680 void ClearLiveness() const { | 684 void ClearLiveness() const { |
| 681 bitmap_->Clear(); | 685 bitmap_->Clear(); |
| 682 *live_bytes_ = 0; | 686 *live_bytes_ = 0; |
| 683 } | 687 } |
| 684 | 688 |
| 685 Bitmap* bitmap() const { return bitmap_; } | 689 Bitmap* bitmap() const { return bitmap_; } |
| 686 intptr_t live_bytes() const { return *live_bytes_; } | 690 intptr_t live_bytes() const { return *live_bytes_; } |
| 687 | 691 |
| 688 private: | 692 private: |
| 689 Bitmap* bitmap_; | 693 Bitmap* bitmap_; |
| 690 intptr_t* live_bytes_; | 694 intptr_t* live_bytes_; |
| 691 }; | 695 }; |
| 692 | 696 |
| 697 template <> | |
| 698 inline void MarkingState::IncrementLiveBytes<MarkBit::NON_ATOMIC>( | |
| 699 intptr_t by) const { | |
| 700 *live_bytes_ += static_cast<int>(by); | |
| 701 } | |
| 702 | |
| 703 template <> | |
| 704 inline void MarkingState::IncrementLiveBytes<MarkBit::ATOMIC>( | |
| 705 intptr_t by) const { | |
| 706 reinterpret_cast<base::AtomicNumber<intptr_t>*>(live_bytes_)->Increment(by); | |
| 707 } | |
| 708 | |
| 693 // ----------------------------------------------------------------------------- | 709 // ----------------------------------------------------------------------------- |
| 694 // A page is a memory chunk of a size 1MB. Large object pages may be larger. | 710 // A page is a memory chunk of a size 1MB. Large object pages may be larger. |
| 695 // | 711 // |
| 696 // The only way to get a page pointer is by calling factory methods: | 712 // The only way to get a page pointer is by calling factory methods: |
| 697 // Page* p = Page::FromAddress(addr); or | 713 // Page* p = Page::FromAddress(addr); or |
| 698 // Page* p = Page::FromTopOrLimit(top); | 714 // Page* p = Page::FromTopOrLimit(top); |
| 699 class Page : public MemoryChunk { | 715 class Page : public MemoryChunk { |
| 700 public: | 716 public: |
| 701 static const intptr_t kCopyAllFlags = ~0; | 717 static const intptr_t kCopyAllFlags = ~0; |
| 702 | 718 |
| (...skipping 882 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 1585 Address limit_; | 1601 Address limit_; |
| 1586 }; | 1602 }; |
| 1587 | 1603 |
| 1588 | 1604 |
| 1589 // An abstraction of the accounting statistics of a page-structured space. | 1605 // An abstraction of the accounting statistics of a page-structured space. |
| 1590 // | 1606 // |
| 1591 // The stats are only set by functions that ensure they stay balanced. These | 1607 // The stats are only set by functions that ensure they stay balanced. These |
| 1592 // functions increase or decrease one of the non-capacity stats in conjunction | 1608 // functions increase or decrease one of the non-capacity stats in conjunction |
| 1593 // with capacity, or else they always balance increases and decreases to the | 1609 // with capacity, or else they always balance increases and decreases to the |
| 1594 // non-capacity stats. | 1610 // non-capacity stats. |
| 1595 class AllocationStats BASE_EMBEDDED { | 1611 class AllocationStats BASE_EMBEDDED { |
|
Michael Lippautz
2017/04/21 07:05:52
We might borrow pages for evacuation (using the fr
Hannes Payer (out of office)
2017/04/21 14:46:27
Acknowledged.
| |
| 1596 public: | 1612 public: |
| 1597 AllocationStats() { Clear(); } | 1613 AllocationStats() { Clear(); } |
| 1598 | 1614 |
| 1599 // Zero out all the allocation statistics (i.e., no capacity). | 1615 // Zero out all the allocation statistics (i.e., no capacity). |
| 1600 void Clear() { | 1616 void Clear() { |
| 1601 capacity_ = 0; | 1617 capacity_.SetValue(0); |
| 1602 max_capacity_ = 0; | 1618 max_capacity_.SetValue(0); |
| 1603 size_ = 0; | 1619 size_.SetValue(0); |
| 1604 } | 1620 } |
| 1605 | 1621 |
| 1606 void ClearSize() { size_ = capacity_; } | 1622 void ClearSize() { size_.SetValue(capacity_.Value()); } |
| 1607 | 1623 |
| 1608 // Accessors for the allocation statistics. | 1624 // Accessors for the allocation statistics. |
| 1609 size_t Capacity() { return capacity_; } | 1625 size_t Capacity() { return capacity_.Value(); } |
| 1610 size_t MaxCapacity() { return max_capacity_; } | 1626 size_t MaxCapacity() { return max_capacity_.Value(); } |
| 1611 size_t Size() { return size_; } | 1627 size_t Size() { return size_.Value(); } |
| 1612 | 1628 |
| 1613 // Grow the space by adding available bytes. They are initially marked as | 1629 // Grow the space by adding available bytes. They are initially marked as |
| 1614 // being in use (part of the size), but will normally be immediately freed, | 1630 // being in use (part of the size), but will normally be immediately freed, |
| 1615 // putting them on the free list and removing them from size_. | 1631 // putting them on the free list and removing them from size_. |
| 1616 void ExpandSpace(size_t bytes) { | 1632 void ExpandSpace(size_t bytes) { |
| 1617 DCHECK_GE(size_ + bytes, size_); | 1633 DCHECK_GE(size_.Value() + bytes, size_.Value()); |
| 1618 DCHECK_GE(capacity_ + bytes, capacity_); | 1634 DCHECK_GE(capacity_.Value() + bytes, capacity_.Value()); |
| 1619 capacity_ += bytes; | 1635 capacity_.Increment(bytes); |
|
Michael Lippautz
2017/04/21 07:05:52
This should be save even though there are 2 counte
Hannes Payer (out of office)
2017/04/21 14:46:27
Acknowledged.
| |
| 1620 size_ += bytes; | 1636 size_.Increment(bytes); |
| 1621 if (capacity_ > max_capacity_) { | 1637 if (capacity_.Value() > max_capacity_.Value()) { |
| 1622 max_capacity_ = capacity_; | 1638 max_capacity_.SetValue(capacity_.Value()); |
| 1623 } | 1639 } |
| 1624 } | 1640 } |
| 1625 | 1641 |
| 1626 // Shrink the space by removing available bytes. Since shrinking is done | 1642 // Shrink the space by removing available bytes. Since shrinking is done |
| 1627 // during sweeping, bytes have been marked as being in use (part of the size) | 1643 // during sweeping, bytes have been marked as being in use (part of the size) |
| 1628 // and are hereby freed. | 1644 // and are hereby freed. |
| 1629 void ShrinkSpace(size_t bytes) { | 1645 void ShrinkSpace(size_t bytes) { |
| 1630 DCHECK_GE(capacity_, bytes); | 1646 DCHECK_GE(capacity_.Value(), bytes); |
| 1631 DCHECK_GE(size_, bytes); | 1647 DCHECK_GE(size_.Value(), bytes); |
| 1632 capacity_ -= bytes; | 1648 capacity_.Decrement(bytes); |
| 1633 size_ -= bytes; | 1649 size_.Decrement(bytes); |
| 1634 } | 1650 } |
| 1635 | 1651 |
| 1636 void AllocateBytes(size_t bytes) { | 1652 void AllocateBytes(size_t bytes) { |
| 1637 DCHECK_GE(size_ + bytes, size_); | 1653 DCHECK_GE(size_.Value() + bytes, size_.Value()); |
| 1638 size_ += bytes; | 1654 size_.Increment(bytes); |
| 1639 } | 1655 } |
| 1640 | 1656 |
| 1641 void DeallocateBytes(size_t bytes) { | 1657 void DeallocateBytes(size_t bytes) { |
| 1642 DCHECK_GE(size_, bytes); | 1658 DCHECK_GE(size_.Value(), bytes); |
| 1643 size_ -= bytes; | 1659 size_.Decrement(bytes); |
| 1644 } | 1660 } |
| 1645 | 1661 |
| 1646 void DecreaseCapacity(size_t bytes) { | 1662 void DecreaseCapacity(size_t bytes) { |
| 1647 DCHECK_GE(capacity_, bytes); | 1663 DCHECK_GE(capacity_.Value(), bytes); |
| 1648 DCHECK_GE(capacity_ - bytes, size_); | 1664 DCHECK_GE(capacity_.Value() - bytes, size_.Value()); |
| 1649 capacity_ -= bytes; | 1665 capacity_.Decrement(bytes); |
| 1650 } | 1666 } |
| 1651 | 1667 |
| 1652 void IncreaseCapacity(size_t bytes) { | 1668 void IncreaseCapacity(size_t bytes) { |
| 1653 DCHECK_GE(capacity_ + bytes, capacity_); | 1669 DCHECK_GE(capacity_.Value() + bytes, capacity_.Value()); |
| 1654 capacity_ += bytes; | 1670 capacity_.Increment(bytes); |
| 1655 } | 1671 } |
| 1656 | 1672 |
| 1657 // Merge |other| into |this|. | 1673 // Merge |other| into |this|. |
| 1658 void Merge(const AllocationStats& other) { | 1674 void Merge(const AllocationStats& other) { |
| 1659 DCHECK_GE(capacity_ + other.capacity_, capacity_); | 1675 DCHECK_GE(capacity_.Value() + other.capacity_.Value(), capacity_.Value()); |
| 1660 DCHECK_GE(size_ + other.size_, size_); | 1676 DCHECK_GE(size_.Value() + other.size_.Value(), size_.Value()); |
| 1661 capacity_ += other.capacity_; | 1677 capacity_.Increment(other.capacity_.Value()); |
| 1662 size_ += other.size_; | 1678 size_.Increment(other.size_.Value()); |
| 1663 if (other.max_capacity_ > max_capacity_) { | 1679 if (other.max_capacity_.Value() > max_capacity_.Value()) { |
| 1664 max_capacity_ = other.max_capacity_; | 1680 max_capacity_.SetValue(other.max_capacity_.Value()); |
| 1665 } | 1681 } |
| 1666 } | 1682 } |
| 1667 | 1683 |
| 1668 private: | 1684 private: |
| 1669 // |capacity_|: The number of object-area bytes (i.e., not including page | 1685 // |capacity_|: The number of object-area bytes (i.e., not including page |
| 1670 // bookkeeping structures) currently in the space. | 1686 // bookkeeping structures) currently in the space. |
| 1671 size_t capacity_; | 1687 base::AtomicNumber<size_t> capacity_; |
| 1672 | 1688 |
| 1673 // |max_capacity_|: The maximum capacity ever observed. | 1689 // |max_capacity_|: The maximum capacity ever observed. |
| 1674 size_t max_capacity_; | 1690 base::AtomicNumber<size_t> max_capacity_; |
| 1675 | 1691 |
| 1676 // |size_|: The number of allocated bytes. | 1692 // |size_|: The number of allocated bytes. |
| 1677 size_t size_; | 1693 base::AtomicNumber<size_t> size_; |
| 1678 }; | 1694 }; |
| 1679 | 1695 |
| 1680 // A free list maintaining free blocks of memory. The free list is organized in | 1696 // A free list maintaining free blocks of memory. The free list is organized in |
| 1681 // a way to encourage objects allocated around the same time to be near each | 1697 // a way to encourage objects allocated around the same time to be near each |
| 1682 // other. The normal way to allocate is intended to be by bumping a 'top' | 1698 // other. The normal way to allocate is intended to be by bumping a 'top' |
| 1683 // pointer until it hits a 'limit' pointer. When the limit is hit we need to | 1699 // pointer until it hits a 'limit' pointer. When the limit is hit we need to |
| 1684 // find a new space to allocate from. This is done with the free list, which is | 1700 // find a new space to allocate from. This is done with the free list, which is |
| 1685 // divided up into rough categories to cut down on waste. Having finer | 1701 // divided up into rough categories to cut down on waste. Having finer |
| 1686 // categories would scatter allocation more. | 1702 // categories would scatter allocation more. |
| 1687 | 1703 |
| (...skipping 245 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 1933 LocalAllocationBuffer(Heap* heap, AllocationInfo allocation_info); | 1949 LocalAllocationBuffer(Heap* heap, AllocationInfo allocation_info); |
| 1934 | 1950 |
| 1935 Heap* heap_; | 1951 Heap* heap_; |
| 1936 AllocationInfo allocation_info_; | 1952 AllocationInfo allocation_info_; |
| 1937 }; | 1953 }; |
| 1938 | 1954 |
| 1939 class V8_EXPORT_PRIVATE PagedSpace : NON_EXPORTED_BASE(public Space) { | 1955 class V8_EXPORT_PRIVATE PagedSpace : NON_EXPORTED_BASE(public Space) { |
| 1940 public: | 1956 public: |
| 1941 typedef PageIterator iterator; | 1957 typedef PageIterator iterator; |
| 1942 | 1958 |
| 1959 // Reuse a page for allocation only if it has at least {kPageReuseThreshold} | |
| 1960 // memory available in its FreeList. | |
| 1961 static const size_t kPageReuseThreshold = 4 * KB; | |
| 1962 | |
| 1943 static const intptr_t kCompactionMemoryWanted = 500 * KB; | 1963 static const intptr_t kCompactionMemoryWanted = 500 * KB; |
| 1944 | 1964 |
| 1945 // Creates a space with an id. | 1965 // Creates a space with an id. |
| 1946 PagedSpace(Heap* heap, AllocationSpace id, Executability executable); | 1966 PagedSpace(Heap* heap, AllocationSpace id, Executability executable); |
| 1947 | 1967 |
| 1948 ~PagedSpace() override { TearDown(); } | 1968 ~PagedSpace() override { TearDown(); } |
| 1949 | 1969 |
| 1950 // Set up the space using the given address range of virtual memory (from | 1970 // Set up the space using the given address range of virtual memory (from |
| 1951 // the memory allocator's initial chunk) if possible. If the block of | 1971 // the memory allocator's initial chunk) if possible. If the block of |
| 1952 // addresses is not big enough to contain a single page-aligned page, a | 1972 // addresses is not big enough to contain a single page-aligned page, a |
| (...skipping 186 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 2139 | 2159 |
| 2140 iterator begin() { return iterator(anchor_.next_page()); } | 2160 iterator begin() { return iterator(anchor_.next_page()); } |
| 2141 iterator end() { return iterator(&anchor_); } | 2161 iterator end() { return iterator(&anchor_); } |
| 2142 | 2162 |
| 2143 // Shrink immortal immovable pages of the space to be exactly the size needed | 2163 // Shrink immortal immovable pages of the space to be exactly the size needed |
| 2144 // using the high water mark. | 2164 // using the high water mark. |
| 2145 void ShrinkImmortalImmovablePages(); | 2165 void ShrinkImmortalImmovablePages(); |
| 2146 | 2166 |
| 2147 std::unique_ptr<ObjectIterator> GetObjectIterator() override; | 2167 std::unique_ptr<ObjectIterator> GetObjectIterator() override; |
| 2148 | 2168 |
| 2169 Page* RemovePageSafe(); | |
| 2170 void AddPage(Page* page); | |
| 2171 | |
| 2149 protected: | 2172 protected: |
| 2150 // PagedSpaces that should be included in snapshots have different, i.e., | 2173 // PagedSpaces that should be included in snapshots have different, i.e., |
| 2151 // smaller, initial pages. | 2174 // smaller, initial pages. |
| 2152 virtual bool snapshotable() { return true; } | 2175 virtual bool snapshotable() { return true; } |
| 2153 | 2176 |
| 2154 bool HasPages() { return anchor_.next_page() != &anchor_; } | 2177 bool HasPages() { return anchor_.next_page() != &anchor_; } |
| 2155 | 2178 |
| 2156 // Cleans up the space, frees all pages in this space except those belonging | 2179 // Cleans up the space, frees all pages in this space except those belonging |
| 2157 // to the initial chunk, uncommits addresses in the initial chunk. | 2180 // to the initial chunk, uncommits addresses in the initial chunk. |
| 2158 void TearDown(); | 2181 void TearDown(); |
| (...skipping 792 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 2951 PageIterator old_iterator_; | 2974 PageIterator old_iterator_; |
| 2952 PageIterator code_iterator_; | 2975 PageIterator code_iterator_; |
| 2953 PageIterator map_iterator_; | 2976 PageIterator map_iterator_; |
| 2954 LargePageIterator lo_iterator_; | 2977 LargePageIterator lo_iterator_; |
| 2955 }; | 2978 }; |
| 2956 | 2979 |
| 2957 } // namespace internal | 2980 } // namespace internal |
| 2958 } // namespace v8 | 2981 } // namespace v8 |
| 2959 | 2982 |
| 2960 #endif // V8_HEAP_SPACES_H_ | 2983 #endif // V8_HEAP_SPACES_H_ |
| OLD | NEW |