Chromium Code Reviews| OLD | NEW |
|---|---|
| 1 // Copyright 2011 the V8 project authors. All rights reserved. | 1 // Copyright 2011 the V8 project authors. All rights reserved. |
| 2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
| 3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
| 4 | 4 |
| 5 #ifndef V8_HEAP_SPACES_H_ | 5 #ifndef V8_HEAP_SPACES_H_ |
| 6 #define V8_HEAP_SPACES_H_ | 6 #define V8_HEAP_SPACES_H_ |
| 7 | 7 |
| 8 #include <list> | 8 #include <list> |
| 9 #include <memory> | 9 #include <memory> |
| 10 #include <unordered_set> | 10 #include <unordered_set> |
| (...skipping 506 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 517 | 517 |
| 518 bool CanAllocate() { | 518 bool CanAllocate() { |
| 519 return !IsEvacuationCandidate() && !IsFlagSet(NEVER_ALLOCATE_ON_PAGE); | 519 return !IsEvacuationCandidate() && !IsFlagSet(NEVER_ALLOCATE_ON_PAGE); |
| 520 } | 520 } |
| 521 | 521 |
| 522 bool ShouldSkipEvacuationSlotRecording() { | 522 bool ShouldSkipEvacuationSlotRecording() { |
| 523 return ((flags_ & kSkipEvacuationSlotsRecordingMask) != 0) && | 523 return ((flags_ & kSkipEvacuationSlotsRecordingMask) != 0) && |
| 524 !IsFlagSet(COMPACTION_WAS_ABORTED); | 524 !IsFlagSet(COMPACTION_WAS_ABORTED); |
| 525 } | 525 } |
| 526 | 526 |
| 527 bool CanUseForAllocation() { return CanAllocate() && !NeverEvacuate(); } | |
|
ulan
2017/04/26 09:51:28
Can you add a comment explaining how NeverEvacuate
Michael Lippautz
2017/05/02 11:22:00
This one is gone now since we take pages for which
| |
| 528 | |
| 527 Executability executable() { | 529 Executability executable() { |
| 528 return IsFlagSet(IS_EXECUTABLE) ? EXECUTABLE : NOT_EXECUTABLE; | 530 return IsFlagSet(IS_EXECUTABLE) ? EXECUTABLE : NOT_EXECUTABLE; |
| 529 } | 531 } |
| 530 | 532 |
| 531 bool InNewSpace() { return (flags_ & kIsInNewSpaceMask) != 0; } | 533 bool InNewSpace() { return (flags_ & kIsInNewSpaceMask) != 0; } |
| 532 | 534 |
| 533 bool InToSpace() { return IsFlagSet(IN_TO_SPACE); } | 535 bool InToSpace() { return IsFlagSet(IN_TO_SPACE); } |
| 534 | 536 |
| 535 bool InFromSpace() { return IsFlagSet(IN_FROM_SPACE); } | 537 bool InFromSpace() { return IsFlagSet(IN_FROM_SPACE); } |
| 536 | 538 |
| (...skipping 1066 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 1603 // The stats are only set by functions that ensure they stay balanced. These | 1605 // The stats are only set by functions that ensure they stay balanced. These |
| 1604 // functions increase or decrease one of the non-capacity stats in conjunction | 1606 // functions increase or decrease one of the non-capacity stats in conjunction |
| 1605 // with capacity, or else they always balance increases and decreases to the | 1607 // with capacity, or else they always balance increases and decreases to the |
| 1606 // non-capacity stats. | 1608 // non-capacity stats. |
| 1607 class AllocationStats BASE_EMBEDDED { | 1609 class AllocationStats BASE_EMBEDDED { |
| 1608 public: | 1610 public: |
| 1609 AllocationStats() { Clear(); } | 1611 AllocationStats() { Clear(); } |
| 1610 | 1612 |
| 1611 // Zero out all the allocation statistics (i.e., no capacity). | 1613 // Zero out all the allocation statistics (i.e., no capacity). |
| 1612 void Clear() { | 1614 void Clear() { |
| 1613 capacity_ = 0; | 1615 capacity_.SetValue(0); |
| 1614 max_capacity_ = 0; | 1616 max_capacity_.SetValue(0); |
| 1615 size_ = 0; | 1617 size_.SetValue(0); |
| 1616 } | 1618 } |
| 1617 | 1619 |
| 1618 void ClearSize() { size_ = capacity_; } | 1620 void ClearSize() { size_.SetValue(capacity_.Value()); } |
| 1619 | 1621 |
| 1620 // Accessors for the allocation statistics. | 1622 // Accessors for the allocation statistics. |
| 1621 size_t Capacity() { return capacity_; } | 1623 size_t Capacity() { return capacity_.Value(); } |
| 1622 size_t MaxCapacity() { return max_capacity_; } | 1624 size_t MaxCapacity() { return max_capacity_.Value(); } |
| 1623 size_t Size() { return size_; } | 1625 size_t Size() { return size_.Value(); } |
| 1624 | 1626 |
| 1625 // Grow the space by adding available bytes. They are initially marked as | 1627 // Grow the space by adding available bytes. They are initially marked as |
| 1626 // being in use (part of the size), but will normally be immediately freed, | 1628 // being in use (part of the size), but will normally be immediately freed, |
| 1627 // putting them on the free list and removing them from size_. | 1629 // putting them on the free list and removing them from size_. |
| 1628 void ExpandSpace(size_t bytes) { | 1630 void ExpandSpace(size_t bytes) { |
| 1629 DCHECK_GE(size_ + bytes, size_); | 1631 DCHECK_GE(size_.Value() + bytes, size_.Value()); |
| 1630 DCHECK_GE(capacity_ + bytes, capacity_); | 1632 DCHECK_GE(capacity_.Value() + bytes, capacity_.Value()); |
| 1631 capacity_ += bytes; | 1633 capacity_.Increment(bytes); |
| 1632 size_ += bytes; | 1634 size_.Increment(bytes); |
| 1633 if (capacity_ > max_capacity_) { | 1635 if (capacity_.Value() > max_capacity_.Value()) { |
| 1634 max_capacity_ = capacity_; | 1636 max_capacity_.SetValue(capacity_.Value()); |
| 1635 } | 1637 } |
| 1636 } | 1638 } |
| 1637 | 1639 |
| 1638 // Shrink the space by removing available bytes. Since shrinking is done | 1640 // Shrink the space by removing available bytes. Since shrinking is done |
| 1639 // during sweeping, bytes have been marked as being in use (part of the size) | 1641 // during sweeping, bytes have been marked as being in use (part of the size) |
| 1640 // and are hereby freed. | 1642 // and are hereby freed. |
| 1641 void ShrinkSpace(size_t bytes) { | 1643 void ShrinkSpace(size_t bytes) { |
| 1642 DCHECK_GE(capacity_, bytes); | 1644 DCHECK_GE(capacity_.Value(), bytes); |
| 1643 DCHECK_GE(size_, bytes); | 1645 DCHECK_GE(size_.Value(), bytes); |
| 1644 capacity_ -= bytes; | 1646 capacity_.Decrement(bytes); |
| 1645 size_ -= bytes; | 1647 size_.Decrement(bytes); |
| 1646 } | 1648 } |
| 1647 | 1649 |
| 1648 void AllocateBytes(size_t bytes) { | 1650 void AllocateBytes(size_t bytes) { |
| 1649 DCHECK_GE(size_ + bytes, size_); | 1651 DCHECK_GE(size_.Value() + bytes, size_.Value()); |
| 1650 size_ += bytes; | 1652 size_.Increment(bytes); |
| 1651 } | 1653 } |
| 1652 | 1654 |
| 1653 void DeallocateBytes(size_t bytes) { | 1655 void DeallocateBytes(size_t bytes) { |
| 1654 DCHECK_GE(size_, bytes); | 1656 DCHECK_GE(size_.Value(), bytes); |
| 1655 size_ -= bytes; | 1657 size_.Decrement(bytes); |
| 1656 } | 1658 } |
| 1657 | 1659 |
| 1658 void DecreaseCapacity(size_t bytes) { | 1660 void DecreaseCapacity(size_t bytes) { |
| 1659 DCHECK_GE(capacity_, bytes); | 1661 DCHECK_GE(capacity_.Value(), bytes); |
| 1660 DCHECK_GE(capacity_ - bytes, size_); | 1662 DCHECK_GE(capacity_.Value() - bytes, size_.Value()); |
| 1661 capacity_ -= bytes; | 1663 capacity_.Decrement(bytes); |
| 1662 } | 1664 } |
| 1663 | 1665 |
| 1664 void IncreaseCapacity(size_t bytes) { | 1666 void IncreaseCapacity(size_t bytes) { |
| 1665 DCHECK_GE(capacity_ + bytes, capacity_); | 1667 DCHECK_GE(capacity_.Value() + bytes, capacity_.Value()); |
| 1666 capacity_ += bytes; | 1668 capacity_.Increment(bytes); |
| 1667 } | 1669 } |
| 1668 | 1670 |
| 1669 // Merge |other| into |this|. | 1671 // Merge |other| into |this|. |
| 1670 void Merge(const AllocationStats& other) { | 1672 void Merge(const AllocationStats& other) { |
| 1671 DCHECK_GE(capacity_ + other.capacity_, capacity_); | 1673 DCHECK_GE(capacity_.Value() + other.capacity_.Value(), capacity_.Value()); |
| 1672 DCHECK_GE(size_ + other.size_, size_); | 1674 DCHECK_GE(size_.Value() + other.size_.Value(), size_.Value()); |
| 1673 capacity_ += other.capacity_; | 1675 capacity_.Increment(other.capacity_.Value()); |
| 1674 size_ += other.size_; | 1676 size_.Increment(other.size_.Value()); |
| 1675 if (other.max_capacity_ > max_capacity_) { | 1677 if (other.max_capacity_.Value() > max_capacity_.Value()) { |
| 1676 max_capacity_ = other.max_capacity_; | 1678 max_capacity_.SetValue(other.max_capacity_.Value()); |
| 1677 } | 1679 } |
| 1678 } | 1680 } |
| 1679 | 1681 |
| 1680 private: | 1682 private: |
| 1681 // |capacity_|: The number of object-area bytes (i.e., not including page | 1683 // |capacity_|: The number of object-area bytes (i.e., not including page |
| 1682 // bookkeeping structures) currently in the space. | 1684 // bookkeeping structures) currently in the space. |
| 1683 size_t capacity_; | 1685 base::AtomicNumber<size_t> capacity_; |
| 1684 | 1686 |
| 1685 // |max_capacity_|: The maximum capacity ever observed. | 1687 // |max_capacity_|: The maximum capacity ever observed. |
| 1686 size_t max_capacity_; | 1688 base::AtomicNumber<size_t> max_capacity_; |
| 1687 | 1689 |
| 1688 // |size_|: The number of allocated bytes. | 1690 // |size_|: The number of allocated bytes. |
| 1689 size_t size_; | 1691 base::AtomicNumber<size_t> size_; |
| 1690 }; | 1692 }; |
| 1691 | 1693 |
| 1692 // A free list maintaining free blocks of memory. The free list is organized in | 1694 // A free list maintaining free blocks of memory. The free list is organized in |
| 1693 // a way to encourage objects allocated around the same time to be near each | 1695 // a way to encourage objects allocated around the same time to be near each |
| 1694 // other. The normal way to allocate is intended to be by bumping a 'top' | 1696 // other. The normal way to allocate is intended to be by bumping a 'top' |
| 1695 // pointer until it hits a 'limit' pointer. When the limit is hit we need to | 1697 // pointer until it hits a 'limit' pointer. When the limit is hit we need to |
| 1696 // find a new space to allocate from. This is done with the free list, which is | 1698 // find a new space to allocate from. This is done with the free list, which is |
| 1697 // divided up into rough categories to cut down on waste. Having finer | 1699 // divided up into rough categories to cut down on waste. Having finer |
| 1698 // categories would scatter allocation more. | 1700 // categories would scatter allocation more. |
| 1699 | 1701 |
| (...skipping 245 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 1945 LocalAllocationBuffer(Heap* heap, AllocationInfo allocation_info); | 1947 LocalAllocationBuffer(Heap* heap, AllocationInfo allocation_info); |
| 1946 | 1948 |
| 1947 Heap* heap_; | 1949 Heap* heap_; |
| 1948 AllocationInfo allocation_info_; | 1950 AllocationInfo allocation_info_; |
| 1949 }; | 1951 }; |
| 1950 | 1952 |
| 1951 class V8_EXPORT_PRIVATE PagedSpace : NON_EXPORTED_BASE(public Space) { | 1953 class V8_EXPORT_PRIVATE PagedSpace : NON_EXPORTED_BASE(public Space) { |
| 1952 public: | 1954 public: |
| 1953 typedef PageIterator iterator; | 1955 typedef PageIterator iterator; |
| 1954 | 1956 |
| 1957 // Reuse a page for allocation only if it has at least {kPageReuseThreshold} | |
| 1958 // memory available in its FreeList. | |
| 1959 static const size_t kPageReuseThreshold = 4 * KB; | |
| 1960 | |
| 1955 static const intptr_t kCompactionMemoryWanted = 500 * KB; | 1961 static const intptr_t kCompactionMemoryWanted = 500 * KB; |
| 1956 | 1962 |
| 1957 // Creates a space with an id. | 1963 // Creates a space with an id. |
| 1958 PagedSpace(Heap* heap, AllocationSpace id, Executability executable); | 1964 PagedSpace(Heap* heap, AllocationSpace id, Executability executable); |
| 1959 | 1965 |
| 1960 ~PagedSpace() override { TearDown(); } | 1966 ~PagedSpace() override { TearDown(); } |
| 1961 | 1967 |
| 1962 // Set up the space using the given address range of virtual memory (from | 1968 // Set up the space using the given address range of virtual memory (from |
| 1963 // the memory allocator's initial chunk) if possible. If the block of | 1969 // the memory allocator's initial chunk) if possible. If the block of |
| 1964 // addresses is not big enough to contain a single page-aligned page, a | 1970 // addresses is not big enough to contain a single page-aligned page, a |
| (...skipping 186 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 2151 | 2157 |
| 2152 iterator begin() { return iterator(anchor_.next_page()); } | 2158 iterator begin() { return iterator(anchor_.next_page()); } |
| 2153 iterator end() { return iterator(&anchor_); } | 2159 iterator end() { return iterator(&anchor_); } |
| 2154 | 2160 |
| 2155 // Shrink immortal immovable pages of the space to be exactly the size needed | 2161 // Shrink immortal immovable pages of the space to be exactly the size needed |
| 2156 // using the high water mark. | 2162 // using the high water mark. |
| 2157 void ShrinkImmortalImmovablePages(); | 2163 void ShrinkImmortalImmovablePages(); |
| 2158 | 2164 |
| 2159 std::unique_ptr<ObjectIterator> GetObjectIterator() override; | 2165 std::unique_ptr<ObjectIterator> GetObjectIterator() override; |
| 2160 | 2166 |
| 2167 Page* RemovePageSafe(); | |
| 2168 void AddPage(Page* page); | |
| 2169 | |
| 2161 protected: | 2170 protected: |
| 2162 // PagedSpaces that should be included in snapshots have different, i.e., | 2171 // PagedSpaces that should be included in snapshots have different, i.e., |
| 2163 // smaller, initial pages. | 2172 // smaller, initial pages. |
| 2164 virtual bool snapshotable() { return true; } | 2173 virtual bool snapshotable() { return true; } |
| 2165 | 2174 |
| 2166 bool HasPages() { return anchor_.next_page() != &anchor_; } | 2175 bool HasPages() { return anchor_.next_page() != &anchor_; } |
| 2167 | 2176 |
| 2168 // Cleans up the space, frees all pages in this space except those belonging | 2177 // Cleans up the space, frees all pages in this space except those belonging |
| 2169 // to the initial chunk, uncommits addresses in the initial chunk. | 2178 // to the initial chunk, uncommits addresses in the initial chunk. |
| 2170 void TearDown(); | 2179 void TearDown(); |
| (...skipping 792 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 2963 PageIterator old_iterator_; | 2972 PageIterator old_iterator_; |
| 2964 PageIterator code_iterator_; | 2973 PageIterator code_iterator_; |
| 2965 PageIterator map_iterator_; | 2974 PageIterator map_iterator_; |
| 2966 LargePageIterator lo_iterator_; | 2975 LargePageIterator lo_iterator_; |
| 2967 }; | 2976 }; |
| 2968 | 2977 |
| 2969 } // namespace internal | 2978 } // namespace internal |
| 2970 } // namespace v8 | 2979 } // namespace v8 |
| 2971 | 2980 |
| 2972 #endif // V8_HEAP_SPACES_H_ | 2981 #endif // V8_HEAP_SPACES_H_ |
| OLD | NEW |