| OLD | NEW |
| 1 // Copyright 2011 the V8 project authors. All rights reserved. | 1 // Copyright 2011 the V8 project authors. All rights reserved. |
| 2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
| 3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
| 4 | 4 |
| 5 #ifndef V8_HEAP_SPACES_H_ | 5 #ifndef V8_HEAP_SPACES_H_ |
| 6 #define V8_HEAP_SPACES_H_ | 6 #define V8_HEAP_SPACES_H_ |
| 7 | 7 |
| 8 #include <list> | 8 #include <list> |
| 9 #include <memory> | 9 #include <memory> |
| 10 #include <unordered_set> | 10 #include <unordered_set> |
| (...skipping 217 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 228 class MemoryChunk { | 228 class MemoryChunk { |
| 229 public: | 229 public: |
| 230 enum MemoryChunkFlags { | 230 enum MemoryChunkFlags { |
| 231 IS_EXECUTABLE, | 231 IS_EXECUTABLE, |
| 232 POINTERS_TO_HERE_ARE_INTERESTING, | 232 POINTERS_TO_HERE_ARE_INTERESTING, |
| 233 POINTERS_FROM_HERE_ARE_INTERESTING, | 233 POINTERS_FROM_HERE_ARE_INTERESTING, |
| 234 IN_FROM_SPACE, // Mutually exclusive with IN_TO_SPACE. | 234 IN_FROM_SPACE, // Mutually exclusive with IN_TO_SPACE. |
| 235 IN_TO_SPACE, // All pages in new space has one of these two set. | 235 IN_TO_SPACE, // All pages in new space has one of these two set. |
| 236 NEW_SPACE_BELOW_AGE_MARK, | 236 NEW_SPACE_BELOW_AGE_MARK, |
| 237 EVACUATION_CANDIDATE, | 237 EVACUATION_CANDIDATE, |
| 238 | 238 NEVER_EVACUATE, // May contain immortal immutables. |
| 239 // |NEVER_EVACUATE|: A page tagged with this flag will never be selected | |
| 240 // for evacuation. Typically used for immortal immovable pages. | |
| 241 NEVER_EVACUATE, | |
| 242 | 239 |
| 243 // Large objects can have a progress bar in their page header. These object | 240 // Large objects can have a progress bar in their page header. These object |
| 244 // are scanned in increments and will be kept black while being scanned. | 241 // are scanned in increments and will be kept black while being scanned. |
| 245 // Even if the mutator writes to them they will be kept black and a white | 242 // Even if the mutator writes to them they will be kept black and a white |
| 246 // to grey transition is performed in the value. | 243 // to grey transition is performed in the value. |
| 247 HAS_PROGRESS_BAR, | 244 HAS_PROGRESS_BAR, |
| 248 | 245 |
| 249 // |PAGE_NEW_OLD_PROMOTION|: A page tagged with this flag has been promoted | 246 // |PAGE_NEW_OLD_PROMOTION|: A page tagged with this flag has been promoted |
| 250 // from new to old space during evacuation. | 247 // from new to old space during evacuation. |
| 251 PAGE_NEW_OLD_PROMOTION, | 248 PAGE_NEW_OLD_PROMOTION, |
| (...skipping 466 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 718 (1 << MemoryChunk::POINTERS_TO_HERE_ARE_INTERESTING) | | 715 (1 << MemoryChunk::POINTERS_TO_HERE_ARE_INTERESTING) | |
| 719 (1 << MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING); | 716 (1 << MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING); |
| 720 | 717 |
| 721 // Maximum object size that gets allocated into regular pages. Objects larger | 718 // Maximum object size that gets allocated into regular pages. Objects larger |
| 722 // than that size are allocated in large object space and are never moved in | 719 // than that size are allocated in large object space and are never moved in |
| 723 // memory. This also applies to new space allocation, since objects are never | 720 // memory. This also applies to new space allocation, since objects are never |
| 724 // migrated from new space to large object space. Takes double alignment into | 721 // migrated from new space to large object space. Takes double alignment into |
| 725 // account. | 722 // account. |
| 726 // TODO(hpayer): This limit should be way smaller but we currently have | 723 // TODO(hpayer): This limit should be way smaller but we currently have |
| 727 // short living objects >256K. | 724 // short living objects >256K. |
| 728 static const int kMaxRegularHeapObjectSize = 400 * KB; | 725 static const int kMaxRegularHeapObjectSize = 600 * KB; |
| 729 | 726 |
| 730 static inline Page* ConvertNewToOld(Page* old_page, PagedSpace* new_owner); | 727 static inline Page* ConvertNewToOld(Page* old_page, PagedSpace* new_owner); |
| 731 | 728 |
| 732 // Returns the page containing a given address. The address ranges | 729 // Returns the page containing a given address. The address ranges |
| 733 // from [page_addr .. page_addr + kPageSize[. This only works if the object | 730 // from [page_addr .. page_addr + kPageSize[. This only works if the object |
| 734 // is in fact in a page. | 731 // is in fact in a page. |
| 735 static Page* FromAddress(Address addr) { | 732 static Page* FromAddress(Address addr) { |
| 736 return reinterpret_cast<Page*>(OffsetFrom(addr) & ~kPageAlignmentMask); | 733 return reinterpret_cast<Page*>(OffsetFrom(addr) & ~kPageAlignmentMask); |
| 737 } | 734 } |
| 738 | 735 |
| (...skipping 80 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 819 | 816 |
| 820 bool is_anchor() { return IsFlagSet(Page::ANCHOR); } | 817 bool is_anchor() { return IsFlagSet(Page::ANCHOR); } |
| 821 | 818 |
| 822 intptr_t wasted_memory() { return wasted_memory_.Value(); } | 819 intptr_t wasted_memory() { return wasted_memory_.Value(); } |
| 823 void add_wasted_memory(intptr_t waste) { wasted_memory_.Increment(waste); } | 820 void add_wasted_memory(intptr_t waste) { wasted_memory_.Increment(waste); } |
| 824 intptr_t available_in_free_list() { return available_in_free_list_.Value(); } | 821 intptr_t available_in_free_list() { return available_in_free_list_.Value(); } |
| 825 void add_available_in_free_list(intptr_t available) { | 822 void add_available_in_free_list(intptr_t available) { |
| 826 available_in_free_list_.Increment(available); | 823 available_in_free_list_.Increment(available); |
| 827 } | 824 } |
| 828 | 825 |
| 829 size_t ShrinkToHighWaterMark(); | |
| 830 | |
| 831 #ifdef DEBUG | 826 #ifdef DEBUG |
| 832 void Print(); | 827 void Print(); |
| 833 #endif // DEBUG | 828 #endif // DEBUG |
| 834 | 829 |
| 835 private: | 830 private: |
| 836 enum InitializationMode { kFreeMemory, kDoNotFreeMemory }; | 831 enum InitializationMode { kFreeMemory, kDoNotFreeMemory }; |
| 837 | 832 |
| 838 template <InitializationMode mode = kFreeMemory> | 833 template <InitializationMode mode = kFreeMemory> |
| 839 static inline Page* Initialize(Heap* heap, MemoryChunk* chunk, | 834 static inline Page* Initialize(Heap* heap, MemoryChunk* chunk, |
| 840 Executability executable, PagedSpace* owner); | 835 Executability executable, PagedSpace* owner); |
| (...skipping 460 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 1301 void ReportStatistics(); | 1296 void ReportStatistics(); |
| 1302 #endif | 1297 #endif |
| 1303 | 1298 |
| 1304 // Returns a MemoryChunk in which the memory region from commit_area_size to | 1299 // Returns a MemoryChunk in which the memory region from commit_area_size to |
| 1305 // reserve_area_size of the chunk area is reserved but not committed, it | 1300 // reserve_area_size of the chunk area is reserved but not committed, it |
| 1306 // could be committed later by calling MemoryChunk::CommitArea. | 1301 // could be committed later by calling MemoryChunk::CommitArea. |
| 1307 MemoryChunk* AllocateChunk(intptr_t reserve_area_size, | 1302 MemoryChunk* AllocateChunk(intptr_t reserve_area_size, |
| 1308 intptr_t commit_area_size, | 1303 intptr_t commit_area_size, |
| 1309 Executability executable, Space* space); | 1304 Executability executable, Space* space); |
| 1310 | 1305 |
| 1311 void ShrinkChunk(MemoryChunk* chunk, size_t bytes_to_shrink); | |
| 1312 | |
| 1313 Address ReserveAlignedMemory(size_t requested, size_t alignment, | 1306 Address ReserveAlignedMemory(size_t requested, size_t alignment, |
| 1314 base::VirtualMemory* controller); | 1307 base::VirtualMemory* controller); |
| 1315 Address AllocateAlignedMemory(size_t reserve_size, size_t commit_size, | 1308 Address AllocateAlignedMemory(size_t reserve_size, size_t commit_size, |
| 1316 size_t alignment, Executability executable, | 1309 size_t alignment, Executability executable, |
| 1317 base::VirtualMemory* controller); | 1310 base::VirtualMemory* controller); |
| 1318 | 1311 |
| 1319 bool CommitMemory(Address addr, size_t size, Executability executable); | 1312 bool CommitMemory(Address addr, size_t size, Executability executable); |
| 1320 | 1313 |
| 1321 void FreeMemory(base::VirtualMemory* reservation, Executability executable); | 1314 void FreeMemory(base::VirtualMemory* reservation, Executability executable); |
| 1322 void PartialFreeMemory(MemoryChunk* chunk, Address start_free); | 1315 void PartialFreeMemory(MemoryChunk* chunk, Address start_free); |
| (...skipping 295 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 1618 CHECK(size_ >= 0); | 1611 CHECK(size_ >= 0); |
| 1619 } | 1612 } |
| 1620 | 1613 |
| 1621 // Shrink the space by removing available bytes. Since shrinking is done | 1614 // Shrink the space by removing available bytes. Since shrinking is done |
| 1622 // during sweeping, bytes have been marked as being in use (part of the size) | 1615 // during sweeping, bytes have been marked as being in use (part of the size) |
| 1623 // and are hereby freed. | 1616 // and are hereby freed. |
| 1624 void ShrinkSpace(int size_in_bytes) { | 1617 void ShrinkSpace(int size_in_bytes) { |
| 1625 capacity_ -= size_in_bytes; | 1618 capacity_ -= size_in_bytes; |
| 1626 size_ -= size_in_bytes; | 1619 size_ -= size_in_bytes; |
| 1627 CHECK_GE(size_, 0); | 1620 CHECK_GE(size_, 0); |
| 1628 CHECK_GE(capacity_, 0); | |
| 1629 } | 1621 } |
| 1630 | 1622 |
| 1631 // Allocate from available bytes (available -> size). | 1623 // Allocate from available bytes (available -> size). |
| 1632 void AllocateBytes(intptr_t size_in_bytes) { | 1624 void AllocateBytes(intptr_t size_in_bytes) { |
| 1633 size_ += size_in_bytes; | 1625 size_ += size_in_bytes; |
| 1634 CHECK_GE(size_, 0); | 1626 CHECK_GE(size_, 0); |
| 1635 } | 1627 } |
| 1636 | 1628 |
| 1637 // Free allocated bytes, making them available (size -> available). | 1629 // Free allocated bytes, making them available (size -> available). |
| 1638 void DeallocateBytes(intptr_t size_in_bytes) { | 1630 void DeallocateBytes(intptr_t size_in_bytes) { |
| (...skipping 545 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 2184 FreeList* free_list() { return &free_list_; } | 2176 FreeList* free_list() { return &free_list_; } |
| 2185 | 2177 |
| 2186 base::Mutex* mutex() { return &space_mutex_; } | 2178 base::Mutex* mutex() { return &space_mutex_; } |
| 2187 | 2179 |
| 2188 inline void UnlinkFreeListCategories(Page* page); | 2180 inline void UnlinkFreeListCategories(Page* page); |
| 2189 inline intptr_t RelinkFreeListCategories(Page* page); | 2181 inline intptr_t RelinkFreeListCategories(Page* page); |
| 2190 | 2182 |
| 2191 iterator begin() { return iterator(anchor_.next_page()); } | 2183 iterator begin() { return iterator(anchor_.next_page()); } |
| 2192 iterator end() { return iterator(&anchor_); } | 2184 iterator end() { return iterator(&anchor_); } |
| 2193 | 2185 |
| 2194 // Shrink immortal immovable pages of the space to be exactly the size needed | |
| 2195 // using the high water mark. | |
| 2196 void ShrinkImmortalImmovablePages(); | |
| 2197 | |
| 2198 protected: | 2186 protected: |
| 2199 // PagedSpaces that should be included in snapshots have different, i.e., | 2187 // PagedSpaces that should be included in snapshots have different, i.e., |
| 2200 // smaller, initial pages. | 2188 // smaller, initial pages. |
| 2201 virtual bool snapshotable() { return true; } | 2189 virtual bool snapshotable() { return true; } |
| 2202 | 2190 |
| 2203 bool HasPages() { return anchor_.next_page() != &anchor_; } | 2191 bool HasPages() { return anchor_.next_page() != &anchor_; } |
| 2204 | 2192 |
| 2205 // Cleans up the space, frees all pages in this space except those belonging | 2193 // Cleans up the space, frees all pages in this space except those belonging |
| 2206 // to the initial chunk, uncommits addresses in the initial chunk. | 2194 // to the initial chunk, uncommits addresses in the initial chunk. |
| 2207 void TearDown(); | 2195 void TearDown(); |
| (...skipping 832 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 3040 count = 0; | 3028 count = 0; |
| 3041 } | 3029 } |
| 3042 // Must be small, since an iteration is used for lookup. | 3030 // Must be small, since an iteration is used for lookup. |
| 3043 static const int kMaxComments = 64; | 3031 static const int kMaxComments = 64; |
| 3044 }; | 3032 }; |
| 3045 #endif | 3033 #endif |
| 3046 } // namespace internal | 3034 } // namespace internal |
| 3047 } // namespace v8 | 3035 } // namespace v8 |
| 3048 | 3036 |
| 3049 #endif // V8_HEAP_SPACES_H_ | 3037 #endif // V8_HEAP_SPACES_H_ |
| OLD | NEW |