Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(705)

Side by Side Diff: src/heap/spaces.h

Issue 2278653003: Reland of "[heap] Switch to 500k pages" (Closed)
Patch Set: Rebase Created 4 years, 3 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
« no previous file with comments | « src/heap/mark-compact.cc ('k') | src/heap/spaces.cc » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 // Copyright 2011 the V8 project authors. All rights reserved. 1 // Copyright 2011 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be 2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file. 3 // found in the LICENSE file.
4 4
5 #ifndef V8_HEAP_SPACES_H_ 5 #ifndef V8_HEAP_SPACES_H_
6 #define V8_HEAP_SPACES_H_ 6 #define V8_HEAP_SPACES_H_
7 7
8 #include <list> 8 #include <list>
9 #include <memory> 9 #include <memory>
10 #include <unordered_set> 10 #include <unordered_set>
(...skipping 217 matching lines...) Expand 10 before | Expand all | Expand 10 after
228 class MemoryChunk { 228 class MemoryChunk {
229 public: 229 public:
230 enum MemoryChunkFlags { 230 enum MemoryChunkFlags {
231 IS_EXECUTABLE, 231 IS_EXECUTABLE,
232 POINTERS_TO_HERE_ARE_INTERESTING, 232 POINTERS_TO_HERE_ARE_INTERESTING,
233 POINTERS_FROM_HERE_ARE_INTERESTING, 233 POINTERS_FROM_HERE_ARE_INTERESTING,
234 IN_FROM_SPACE, // Mutually exclusive with IN_TO_SPACE. 234 IN_FROM_SPACE, // Mutually exclusive with IN_TO_SPACE.
235 IN_TO_SPACE, // All pages in new space has one of these two set. 235 IN_TO_SPACE, // All pages in new space has one of these two set.
236 NEW_SPACE_BELOW_AGE_MARK, 236 NEW_SPACE_BELOW_AGE_MARK,
237 EVACUATION_CANDIDATE, 237 EVACUATION_CANDIDATE,
238 NEVER_EVACUATE, // May contain immortal immutables. 238
239 // |NEVER_EVACUATE|: A page tagged with this flag will never be selected
240 // for evacuation. Typically used for immortal immovable pages.
241 NEVER_EVACUATE,
239 242
240 // Large objects can have a progress bar in their page header. These object 243 // Large objects can have a progress bar in their page header. These object
241 // are scanned in increments and will be kept black while being scanned. 244 // are scanned in increments and will be kept black while being scanned.
242 // Even if the mutator writes to them they will be kept black and a white 245 // Even if the mutator writes to them they will be kept black and a white
243 // to grey transition is performed in the value. 246 // to grey transition is performed in the value.
244 HAS_PROGRESS_BAR, 247 HAS_PROGRESS_BAR,
245 248
246 // |PAGE_NEW_OLD_PROMOTION|: A page tagged with this flag has been promoted 249 // |PAGE_NEW_OLD_PROMOTION|: A page tagged with this flag has been promoted
247 // from new to old space during evacuation. 250 // from new to old space during evacuation.
248 PAGE_NEW_OLD_PROMOTION, 251 PAGE_NEW_OLD_PROMOTION,
(...skipping 466 matching lines...) Expand 10 before | Expand all | Expand 10 after
715 (1 << MemoryChunk::POINTERS_TO_HERE_ARE_INTERESTING) | 718 (1 << MemoryChunk::POINTERS_TO_HERE_ARE_INTERESTING) |
716 (1 << MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING); 719 (1 << MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING);
717 720
718 // Maximum object size that gets allocated into regular pages. Objects larger 721 // Maximum object size that gets allocated into regular pages. Objects larger
719 // than that size are allocated in large object space and are never moved in 722 // than that size are allocated in large object space and are never moved in
720 // memory. This also applies to new space allocation, since objects are never 723 // memory. This also applies to new space allocation, since objects are never
721 // migrated from new space to large object space. Takes double alignment into 724 // migrated from new space to large object space. Takes double alignment into
722 // account. 725 // account.
723 // TODO(hpayer): This limit should be way smaller but we currently have 726 // TODO(hpayer): This limit should be way smaller but we currently have
724 // short living objects >256K. 727 // short living objects >256K.
725 static const int kMaxRegularHeapObjectSize = 600 * KB; 728 static const int kMaxRegularHeapObjectSize = 400 * KB;
726 729
727 static inline Page* ConvertNewToOld(Page* old_page, PagedSpace* new_owner); 730 static inline Page* ConvertNewToOld(Page* old_page, PagedSpace* new_owner);
728 731
729 // Returns the page containing a given address. The address ranges 732 // Returns the page containing a given address. The address ranges
730 // from [page_addr .. page_addr + kPageSize[. This only works if the object 733 // from [page_addr .. page_addr + kPageSize[. This only works if the object
731 // is in fact in a page. 734 // is in fact in a page.
732 static Page* FromAddress(Address addr) { 735 static Page* FromAddress(Address addr) {
733 return reinterpret_cast<Page*>(OffsetFrom(addr) & ~kPageAlignmentMask); 736 return reinterpret_cast<Page*>(OffsetFrom(addr) & ~kPageAlignmentMask);
734 } 737 }
735 738
(...skipping 80 matching lines...) Expand 10 before | Expand all | Expand 10 after
816 819
817 bool is_anchor() { return IsFlagSet(Page::ANCHOR); } 820 bool is_anchor() { return IsFlagSet(Page::ANCHOR); }
818 821
819 intptr_t wasted_memory() { return wasted_memory_.Value(); } 822 intptr_t wasted_memory() { return wasted_memory_.Value(); }
820 void add_wasted_memory(intptr_t waste) { wasted_memory_.Increment(waste); } 823 void add_wasted_memory(intptr_t waste) { wasted_memory_.Increment(waste); }
821 intptr_t available_in_free_list() { return available_in_free_list_.Value(); } 824 intptr_t available_in_free_list() { return available_in_free_list_.Value(); }
822 void add_available_in_free_list(intptr_t available) { 825 void add_available_in_free_list(intptr_t available) {
823 available_in_free_list_.Increment(available); 826 available_in_free_list_.Increment(available);
824 } 827 }
825 828
829 size_t ShrinkToHighWaterMark();
830
826 #ifdef DEBUG 831 #ifdef DEBUG
827 void Print(); 832 void Print();
828 #endif // DEBUG 833 #endif // DEBUG
829 834
830 private: 835 private:
831 enum InitializationMode { kFreeMemory, kDoNotFreeMemory }; 836 enum InitializationMode { kFreeMemory, kDoNotFreeMemory };
832 837
833 template <InitializationMode mode = kFreeMemory> 838 template <InitializationMode mode = kFreeMemory>
834 static inline Page* Initialize(Heap* heap, MemoryChunk* chunk, 839 static inline Page* Initialize(Heap* heap, MemoryChunk* chunk,
835 Executability executable, PagedSpace* owner); 840 Executability executable, PagedSpace* owner);
(...skipping 472 matching lines...) Expand 10 before | Expand all | Expand 10 after
1308 void ReportStatistics(); 1313 void ReportStatistics();
1309 #endif 1314 #endif
1310 1315
1311 // Returns a MemoryChunk in which the memory region from commit_area_size to 1316 // Returns a MemoryChunk in which the memory region from commit_area_size to
1312 // reserve_area_size of the chunk area is reserved but not committed, it 1317 // reserve_area_size of the chunk area is reserved but not committed, it
1313 // could be committed later by calling MemoryChunk::CommitArea. 1318 // could be committed later by calling MemoryChunk::CommitArea.
1314 MemoryChunk* AllocateChunk(intptr_t reserve_area_size, 1319 MemoryChunk* AllocateChunk(intptr_t reserve_area_size,
1315 intptr_t commit_area_size, 1320 intptr_t commit_area_size,
1316 Executability executable, Space* space); 1321 Executability executable, Space* space);
1317 1322
1323 void ShrinkChunk(MemoryChunk* chunk, size_t bytes_to_shrink);
1324
1318 Address ReserveAlignedMemory(size_t requested, size_t alignment, 1325 Address ReserveAlignedMemory(size_t requested, size_t alignment,
1319 base::VirtualMemory* controller); 1326 base::VirtualMemory* controller);
1320 Address AllocateAlignedMemory(size_t reserve_size, size_t commit_size, 1327 Address AllocateAlignedMemory(size_t reserve_size, size_t commit_size,
1321 size_t alignment, Executability executable, 1328 size_t alignment, Executability executable,
1322 base::VirtualMemory* controller); 1329 base::VirtualMemory* controller);
1323 1330
1324 bool CommitMemory(Address addr, size_t size, Executability executable); 1331 bool CommitMemory(Address addr, size_t size, Executability executable);
1325 1332
1326 void FreeMemory(base::VirtualMemory* reservation, Executability executable); 1333 void FreeMemory(base::VirtualMemory* reservation, Executability executable);
1327 void PartialFreeMemory(MemoryChunk* chunk, Address start_free); 1334 void PartialFreeMemory(MemoryChunk* chunk, Address start_free);
(...skipping 861 matching lines...) Expand 10 before | Expand all | Expand 10 after
2189 FreeList* free_list() { return &free_list_; } 2196 FreeList* free_list() { return &free_list_; }
2190 2197
2191 base::Mutex* mutex() { return &space_mutex_; } 2198 base::Mutex* mutex() { return &space_mutex_; }
2192 2199
2193 inline void UnlinkFreeListCategories(Page* page); 2200 inline void UnlinkFreeListCategories(Page* page);
2194 inline intptr_t RelinkFreeListCategories(Page* page); 2201 inline intptr_t RelinkFreeListCategories(Page* page);
2195 2202
2196 iterator begin() { return iterator(anchor_.next_page()); } 2203 iterator begin() { return iterator(anchor_.next_page()); }
2197 iterator end() { return iterator(&anchor_); } 2204 iterator end() { return iterator(&anchor_); }
2198 2205
2206 // Shrink immortal immovable pages of the space to be exactly the size needed
2207 // using the high water mark.
2208 void ShrinkImmortalImmovablePages();
2209
2199 protected: 2210 protected:
2200 // PagedSpaces that should be included in snapshots have different, i.e., 2211 // PagedSpaces that should be included in snapshots have different, i.e.,
2201 // smaller, initial pages. 2212 // smaller, initial pages.
2202 virtual bool snapshotable() { return true; } 2213 virtual bool snapshotable() { return true; }
2203 2214
2204 bool HasPages() { return anchor_.next_page() != &anchor_; } 2215 bool HasPages() { return anchor_.next_page() != &anchor_; }
2205 2216
2206 // Cleans up the space, frees all pages in this space except those belonging 2217 // Cleans up the space, frees all pages in this space except those belonging
2207 // to the initial chunk, uncommits addresses in the initial chunk. 2218 // to the initial chunk, uncommits addresses in the initial chunk.
2208 void TearDown(); 2219 void TearDown();
(...skipping 832 matching lines...) Expand 10 before | Expand all | Expand 10 after
3041 count = 0; 3052 count = 0;
3042 } 3053 }
3043 // Must be small, since an iteration is used for lookup. 3054 // Must be small, since an iteration is used for lookup.
3044 static const int kMaxComments = 64; 3055 static const int kMaxComments = 64;
3045 }; 3056 };
3046 #endif 3057 #endif
3047 } // namespace internal 3058 } // namespace internal
3048 } // namespace v8 3059 } // namespace v8
3049 3060
3050 #endif // V8_HEAP_SPACES_H_ 3061 #endif // V8_HEAP_SPACES_H_
OLDNEW
« no previous file with comments | « src/heap/mark-compact.cc ('k') | src/heap/spaces.cc » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698