Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(95)

Side by Side Diff: src/heap/spaces.h

Issue 1900423002: [heap] Merge NewSpacePage into Page (Closed) Base URL: https://chromium.googlesource.com/v8/v8.git@master
Patch Set: Addressed comments Created 4 years, 8 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
« no previous file with comments | « src/heap/mark-compact.cc ('k') | src/heap/spaces.cc » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 // Copyright 2011 the V8 project authors. All rights reserved. 1 // Copyright 2011 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be 2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file. 3 // found in the LICENSE file.
4 4
5 #ifndef V8_HEAP_SPACES_H_ 5 #ifndef V8_HEAP_SPACES_H_
6 #define V8_HEAP_SPACES_H_ 6 #define V8_HEAP_SPACES_H_
7 7
8 #include "src/allocation.h" 8 #include "src/allocation.h"
9 #include "src/atomic-utils.h" 9 #include "src/atomic-utils.h"
10 #include "src/base/atomicops.h" 10 #include "src/base/atomicops.h"
11 #include "src/base/bits.h" 11 #include "src/base/bits.h"
12 #include "src/base/platform/mutex.h" 12 #include "src/base/platform/mutex.h"
13 #include "src/flags.h" 13 #include "src/flags.h"
14 #include "src/hashmap.h" 14 #include "src/hashmap.h"
15 #include "src/list.h" 15 #include "src/list.h"
16 #include "src/objects.h" 16 #include "src/objects.h"
17 #include "src/utils.h" 17 #include "src/utils.h"
18 18
19 namespace v8 { 19 namespace v8 {
20 namespace internal { 20 namespace internal {
21 21
22 class AllocationInfo; 22 class AllocationInfo;
23 class AllocationObserver; 23 class AllocationObserver;
24 class CompactionSpace; 24 class CompactionSpace;
25 class CompactionSpaceCollection; 25 class CompactionSpaceCollection;
26 class FreeList; 26 class FreeList;
27 class Isolate; 27 class Isolate;
28 class MemoryAllocator; 28 class MemoryAllocator;
29 class MemoryChunk; 29 class MemoryChunk;
30 class NewSpacePage;
31 class Page; 30 class Page;
32 class PagedSpace; 31 class PagedSpace;
33 class SemiSpace; 32 class SemiSpace;
34 class SkipList; 33 class SkipList;
35 class SlotsBuffer; 34 class SlotsBuffer;
36 class SlotSet; 35 class SlotSet;
37 class TypedSlotSet; 36 class TypedSlotSet;
38 class Space; 37 class Space;
39 38
40 // ----------------------------------------------------------------------------- 39 // -----------------------------------------------------------------------------
(...skipping 397 matching lines...) Expand 10 before | Expand all | Expand 10 after
438 NEVER_ALLOCATE_ON_PAGE, 437 NEVER_ALLOCATE_ON_PAGE,
439 438
440 // The memory chunk is already logically freed, however the actual freeing 439 // The memory chunk is already logically freed, however the actual freeing
441 // still has to be performed. 440 // still has to be performed.
442 PRE_FREED, 441 PRE_FREED,
443 442
444 // |COMPACTION_WAS_ABORTED|: Indicates that the compaction in this page 443 // |COMPACTION_WAS_ABORTED|: Indicates that the compaction in this page
445 // has been aborted and needs special handling by the sweeper. 444 // has been aborted and needs special handling by the sweeper.
446 COMPACTION_WAS_ABORTED, 445 COMPACTION_WAS_ABORTED,
447 446
447 // |ANCHOR|: Flag is set if page is an anchor.
448 ANCHOR,
449
448 // Last flag, keep at bottom. 450 // Last flag, keep at bottom.
449 NUM_MEMORY_CHUNK_FLAGS 451 NUM_MEMORY_CHUNK_FLAGS
450 }; 452 };
451 453
452 // |kSweepingDone|: The page state when sweeping is complete or sweeping must 454 // |kSweepingDone|: The page state when sweeping is complete or sweeping must
453 // not be performed on that page. Sweeper threads that are done with their 455 // not be performed on that page. Sweeper threads that are done with their
454 // work will set this value and not touch the page anymore. 456 // work will set this value and not touch the page anymore.
455 // |kSweepingPending|: This page is ready for parallel sweeping. 457 // |kSweepingPending|: This page is ready for parallel sweeping.
456 // |kSweepingInProgress|: This page is currently swept by a sweeper thread. 458 // |kSweepingInProgress|: This page is currently swept by a sweeper thread.
457 enum ConcurrentSweepingState { 459 enum ConcurrentSweepingState {
(...skipping 91 matching lines...) Expand 10 before | Expand all | Expand 10 after
549 static intptr_t OffsetInPage(Address a) { 551 static intptr_t OffsetInPage(Address a) {
550 return reinterpret_cast<intptr_t>(a) & kPageAlignmentMask; 552 return reinterpret_cast<intptr_t>(a) & kPageAlignmentMask;
551 } 553 }
552 554
553 static inline MemoryChunk* FromAnyPointerAddress(Heap* heap, Address addr); 555 static inline MemoryChunk* FromAnyPointerAddress(Heap* heap, Address addr);
554 556
555 static inline void UpdateHighWaterMark(Address mark) { 557 static inline void UpdateHighWaterMark(Address mark) {
556 if (mark == nullptr) return; 558 if (mark == nullptr) return;
557 // Need to subtract one from the mark because when a chunk is full the 559 // Need to subtract one from the mark because when a chunk is full the
558 // top points to the next address after the chunk, which effectively belongs 560 // top points to the next address after the chunk, which effectively belongs
559 // to another chunk. See the comment to Page::FromAllocationTop. 561 // to another chunk. See the comment to Page::FromTopOrLimit.
560 MemoryChunk* chunk = MemoryChunk::FromAddress(mark - 1); 562 MemoryChunk* chunk = MemoryChunk::FromAddress(mark - 1);
561 intptr_t new_mark = static_cast<intptr_t>(mark - chunk->address()); 563 intptr_t new_mark = static_cast<intptr_t>(mark - chunk->address());
562 intptr_t old_mark = 0; 564 intptr_t old_mark = 0;
563 do { 565 do {
564 old_mark = chunk->high_water_mark_.Value(); 566 old_mark = chunk->high_water_mark_.Value();
565 } while ((new_mark > old_mark) && 567 } while ((new_mark > old_mark) &&
566 !chunk->high_water_mark_.TrySetValue(old_mark, new_mark)); 568 !chunk->high_water_mark_.TrySetValue(old_mark, new_mark));
567 } 569 }
568 570
571 static bool IsValid(MemoryChunk* chunk) { return chunk != nullptr; }
572
569 Address address() { return reinterpret_cast<Address>(this); } 573 Address address() { return reinterpret_cast<Address>(this); }
570 574
571 bool is_valid() { return address() != NULL; }
572
573 base::Mutex* mutex() { return mutex_; } 575 base::Mutex* mutex() { return mutex_; }
574 576
575 bool Contains(Address addr) { 577 bool Contains(Address addr) {
576 return addr >= area_start() && addr < area_end(); 578 return addr >= area_start() && addr < area_end();
577 } 579 }
578 580
579 // Checks whether |addr| can be a limit of addresses in this page. It's a 581 // Checks whether |addr| can be a limit of addresses in this page. It's a
580 // limit if it's in the page, or if it's just after the last byte of the page. 582 // limit if it's in the page, or if it's just after the last byte of the page.
581 bool ContainsLimit(Address addr) { 583 bool ContainsLimit(Address addr) {
582 return addr >= area_start() && addr <= area_end(); 584 return addr >= area_start() && addr <= area_end();
(...skipping 235 matching lines...) Expand 10 before | Expand all | Expand 10 after
818 820
819 friend class MemoryAllocator; 821 friend class MemoryAllocator;
820 friend class MemoryChunkValidator; 822 friend class MemoryChunkValidator;
821 }; 823 };
822 824
823 // ----------------------------------------------------------------------------- 825 // -----------------------------------------------------------------------------
824 // A page is a memory chunk of a size 1MB. Large object pages may be larger. 826 // A page is a memory chunk of a size 1MB. Large object pages may be larger.
825 // 827 //
826 // The only way to get a page pointer is by calling factory methods: 828 // The only way to get a page pointer is by calling factory methods:
827 // Page* p = Page::FromAddress(addr); or 829 // Page* p = Page::FromAddress(addr); or
828 // Page* p = Page::FromAllocationTop(top); 830 // Page* p = Page::FromTopOrLimit(top);
829 class Page : public MemoryChunk { 831 class Page : public MemoryChunk {
830 public: 832 public:
831 static inline Page* Convert(NewSpacePage* old_page, PagedSpace* new_owner); 833 static const intptr_t kCopyAllFlags = ~0;
834
835 // Page flags copied from from-space to to-space when flipping semispaces.
836 static const intptr_t kCopyOnFlipFlagsMask =
837 (1 << MemoryChunk::POINTERS_TO_HERE_ARE_INTERESTING) |
838 (1 << MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING);
839
840 // Maximum object size that gets allocated into regular pages. Objects larger
841 // than that size are allocated in large object space and are never moved in
842 // memory. This also applies to new space allocation, since objects are never
843 // migrated from new space to large object space. Takes double alignment into
844 // account.
845 // TODO(hpayer): This limit should be way smaller but we currently have
846 // short living objects >256K.
847 static const int kMaxRegularHeapObjectSize = 600 * KB;
848
849 static inline Page* ConvertNewToOld(Page* old_page, PagedSpace* new_owner);
832 850
833 // Returns the page containing a given address. The address ranges 851 // Returns the page containing a given address. The address ranges
834 // from [page_addr .. page_addr + kPageSize[ 852 // from [page_addr .. page_addr + kPageSize[. This only works if the object
835 // This only works if the object is in fact in a page. See also MemoryChunk:: 853 // is in fact in a page.
836 // FromAddress() and FromAnyAddress(). 854 static Page* FromAddress(Address addr) {
837 INLINE(static Page* FromAddress(Address a)) { 855 return reinterpret_cast<Page*>(OffsetFrom(addr) & ~kPageAlignmentMask);
838 return reinterpret_cast<Page*>(OffsetFrom(a) & ~kPageAlignmentMask);
839 } 856 }
840 857
841 // Only works for addresses in pointer spaces, not code space. 858 // Returns the page containing the address provided. The address can
859 // potentially point righter after the page. To be also safe for tagged values
860 // we subtract a hole word. The valid address ranges from
861 // [page_addr + kObjectStartOffset .. page_addr + kPageSize + kPointerSize].
862 static Page* FromAllocationAreaAddress(Address address) {
863 return Page::FromAddress(address - kPointerSize);
864 }
865
866 // Checks if address1 and address2 are on the same new space page.
867 static bool OnSamePage(Address address1, Address address2) {
868 return Page::FromAddress(address1) == Page::FromAddress(address2);
869 }
870
871 // Checks whether an address is page aligned.
872 static bool IsAlignedToPageSize(Address addr) {
873 return (OffsetFrom(addr) & kPageAlignmentMask) == 0;
874 }
875
876 static bool IsAtObjectStart(Address addr) {
877 return (reinterpret_cast<intptr_t>(addr) & kPageAlignmentMask) ==
878 kObjectStartOffset;
879 }
880
842 inline static Page* FromAnyPointerAddress(Heap* heap, Address addr); 881 inline static Page* FromAnyPointerAddress(Heap* heap, Address addr);
843 882
844 // Returns the page containing an allocation top. Because an allocation 883 // Create a Page object that is only used as anchor for the doubly-linked
845 // top address can be the upper bound of the page, we need to subtract 884 // list of real pages.
846 // it with kPointerSize first. The address ranges from 885 explicit Page(Space* owner) { InitializeAsAnchor(owner); }
847 // [page_addr + kObjectStartOffset .. page_addr + kPageSize].
848 INLINE(static Page* FromAllocationTop(Address top)) {
849 Page* p = FromAddress(top - kPointerSize);
850 return p;
851 }
852 886
853 // Returns the next page in the chain of pages owned by a space. 887 inline void MarkNeverAllocateForTesting();
854 inline Page* next_page() { 888 inline void MarkEvacuationCandidate();
855 DCHECK(next_chunk()->owner() == owner()); 889 inline void ClearEvacuationCandidate();
856 return static_cast<Page*>(next_chunk());
857 }
858 inline Page* prev_page() {
859 DCHECK(prev_chunk()->owner() == owner());
860 return static_cast<Page*>(prev_chunk());
861 }
862 inline void set_next_page(Page* page);
863 inline void set_prev_page(Page* page);
864 890
865 // Checks whether an address is page aligned. 891 Page* next_page() { return static_cast<Page*>(next_chunk()); }
866 static bool IsAlignedToPageSize(Address a) { 892 Page* prev_page() { return static_cast<Page*>(prev_chunk()); }
867 return 0 == (OffsetFrom(a) & kPageAlignmentMask); 893 void set_next_page(Page* page) { set_next_chunk(page); }
894 void set_prev_page(Page* page) { set_prev_chunk(page); }
895
896 template <typename Callback>
897 inline void ForAllFreeListCategories(Callback callback) {
898 for (int i = kFirstCategory; i < kNumberOfCategories; i++) {
899 callback(&categories_[i]);
900 }
868 } 901 }
869 902
870 // Returns the offset of a given address to this page. 903 // Returns the offset of a given address to this page.
871 INLINE(int Offset(Address a)) { 904 inline int Offset(Address a) {
872 int offset = static_cast<int>(a - address()); 905 int offset = static_cast<int>(a - address());
873 return offset; 906 return offset;
874 } 907 }
875 908
876 // Returns the address for a given offset to the this page. 909 // Returns the address for a given offset to the this page.
877 Address OffsetToAddress(int offset) { 910 Address OffsetToAddress(int offset) {
878 DCHECK_PAGE_OFFSET(offset); 911 DCHECK_PAGE_OFFSET(offset);
879 return address() + offset; 912 return address() + offset;
880 } 913 }
881 914
882 // ---------------------------------------------------------------------
883
884 // Maximum object size that gets allocated into regular pages. Objects larger
885 // than that size are allocated in large object space and are never moved in
886 // memory. This also applies to new space allocation, since objects are never
887 // migrated from new space to large object space. Takes double alignment into
888 // account.
889 // TODO(hpayer): This limit should be way smaller but we currently have
890 // short living objects >256K.
891 static const int kMaxRegularHeapObjectSize = 600 * KB;
892
893 inline void ClearGCFields();
894
895 void InitializeAsAnchor(PagedSpace* owner);
896
897 // WaitUntilSweepingCompleted only works when concurrent sweeping is in 915 // WaitUntilSweepingCompleted only works when concurrent sweeping is in
898 // progress. In particular, when we know that right before this call a 916 // progress. In particular, when we know that right before this call a
899 // sweeper thread was sweeping this page. 917 // sweeper thread was sweeping this page.
900 void WaitUntilSweepingCompleted() { 918 void WaitUntilSweepingCompleted() {
901 mutex_->Lock(); 919 mutex_->Lock();
902 mutex_->Unlock(); 920 mutex_->Unlock();
903 DCHECK(SweepingDone()); 921 DCHECK(SweepingDone());
904 } 922 }
905 923
906 bool SweepingDone() { 924 bool SweepingDone() {
907 return concurrent_sweeping_state().Value() == kSweepingDone; 925 return concurrent_sweeping_state().Value() == kSweepingDone;
908 } 926 }
909 927
910 void ResetFreeListStatistics(); 928 void ResetFreeListStatistics();
911 929
912 int LiveBytesFromFreeList() { 930 int LiveBytesFromFreeList() {
913 return static_cast<int>(area_size() - wasted_memory() - 931 return static_cast<int>(area_size() - wasted_memory() -
914 available_in_free_list()); 932 available_in_free_list());
915 } 933 }
916 934
917 template <typename Callback>
918 inline void ForAllFreeListCategories(Callback callback) {
919 for (int i = kFirstCategory; i < kNumberOfCategories; i++) {
920 callback(&categories_[i]);
921 }
922 }
923
924 FreeListCategory* free_list_category(FreeListCategoryType type) { 935 FreeListCategory* free_list_category(FreeListCategoryType type) {
925 return &categories_[type]; 936 return &categories_[type];
926 } 937 }
927 938
928 #define FRAGMENTATION_STATS_ACCESSORS(type, name) \ 939 bool is_anchor() { return IsFlagSet(Page::ANCHOR); }
929 type name() { return name##_.Value(); } \
930 void set_##name(type name) { name##_.SetValue(name); } \
931 void add_##name(type name) { name##_.Increment(name); }
932 940
933 FRAGMENTATION_STATS_ACCESSORS(intptr_t, wasted_memory) 941 intptr_t wasted_memory() { return wasted_memory_.Value(); }
934 FRAGMENTATION_STATS_ACCESSORS(intptr_t, available_in_free_list) 942 void add_wasted_memory(intptr_t waste) { wasted_memory_.Increment(waste); }
935 943 intptr_t available_in_free_list() { return available_in_free_list_.Value(); }
936 #undef FRAGMENTATION_STATS_ACCESSORS 944 void add_available_in_free_list(intptr_t available) {
945 available_in_free_list_.Increment(available);
946 }
937 947
938 #ifdef DEBUG 948 #ifdef DEBUG
939 void Print(); 949 void Print();
940 #endif // DEBUG 950 #endif // DEBUG
941 951
942 inline void MarkNeverAllocateForTesting();
943 inline void MarkEvacuationCandidate();
944 inline void ClearEvacuationCandidate();
945
946 private: 952 private:
947 enum InitializationMode { kFreeMemory, kDoNotFreeMemory }; 953 enum InitializationMode { kFreeMemory, kDoNotFreeMemory };
948 954
949 template <InitializationMode mode = kFreeMemory> 955 template <InitializationMode mode = kFreeMemory>
950 static inline Page* Initialize(Heap* heap, MemoryChunk* chunk, 956 static inline Page* Initialize(Heap* heap, MemoryChunk* chunk,
951 Executability executable, PagedSpace* owner); 957 Executability executable, PagedSpace* owner);
958 static inline Page* Initialize(Heap* heap, MemoryChunk* chunk,
959 Executability executable, SemiSpace* owner);
952 960
953 inline void InitializeFreeListCategories(); 961 inline void InitializeFreeListCategories();
954 962
963 void InitializeAsAnchor(Space* owner);
964
955 friend class MemoryAllocator; 965 friend class MemoryAllocator;
956 }; 966 };
957 967
958
959 class LargePage : public MemoryChunk { 968 class LargePage : public MemoryChunk {
960 public: 969 public:
961 HeapObject* GetObject() { return HeapObject::FromAddress(area_start()); } 970 HeapObject* GetObject() { return HeapObject::FromAddress(area_start()); }
962 971
963 inline LargePage* next_page() { 972 inline LargePage* next_page() {
964 return static_cast<LargePage*>(next_chunk()); 973 return static_cast<LargePage*>(next_chunk());
965 } 974 }
966 975
967 inline void set_next_page(LargePage* page) { set_next_chunk(page); } 976 inline void set_next_page(LargePage* page) { set_next_chunk(page); }
968 977
(...skipping 299 matching lines...) Expand 10 before | Expand all | Expand 10 after
1268 1277
1269 explicit MemoryAllocator(Isolate* isolate); 1278 explicit MemoryAllocator(Isolate* isolate);
1270 1279
1271 // Initializes its internal bookkeeping structures. 1280 // Initializes its internal bookkeeping structures.
1272 // Max capacity of the total space and executable memory limit. 1281 // Max capacity of the total space and executable memory limit.
1273 bool SetUp(intptr_t max_capacity, intptr_t capacity_executable, 1282 bool SetUp(intptr_t max_capacity, intptr_t capacity_executable,
1274 intptr_t code_range_size); 1283 intptr_t code_range_size);
1275 1284
1276 void TearDown(); 1285 void TearDown();
1277 1286
1278 // Allocates either Page or NewSpacePage from the allocator. AllocationMode 1287 // Allocates a Page from the allocator. AllocationMode is used to indicate
1279 // is used to indicate whether pooled allocation, which only works for 1288 // whether pooled allocation, which only works for MemoryChunk::kPageSize,
1280 // MemoryChunk::kPageSize, should be tried first. 1289 // should be tried first.
1281 template <typename PageType, MemoryAllocator::AllocationMode mode = kRegular, 1290 template <MemoryAllocator::AllocationMode alloc_mode = kRegular,
1282 typename SpaceType> 1291 typename SpaceType>
1283 PageType* AllocatePage(intptr_t size, SpaceType* owner, 1292 Page* AllocatePage(intptr_t size, SpaceType* owner, Executability executable);
1284 Executability executable); 1293
1294 LargePage* AllocateLargePage(intptr_t size, LargeObjectSpace* owner,
1295 Executability executable);
1285 1296
1286 // PreFree logically frees the object, i.e., it takes care of the size 1297 // PreFree logically frees the object, i.e., it takes care of the size
1287 // bookkeeping and calls the allocation callback. 1298 // bookkeeping and calls the allocation callback.
1288 void PreFreeMemory(MemoryChunk* chunk); 1299 void PreFreeMemory(MemoryChunk* chunk);
1289 1300
1290 // FreeMemory can be called concurrently when PreFree was executed before. 1301 // FreeMemory can be called concurrently when PreFree was executed before.
1291 void PerformFreeMemory(MemoryChunk* chunk); 1302 void PerformFreeMemory(MemoryChunk* chunk);
1292 1303
1293 // Free is a wrapper method. For kRegular AllocationMode it calls PreFree and 1304 // Free is a wrapper method. For kRegular AllocationMode it calls PreFree and
1294 // PerformFreeMemory together. For kPooled it will dispatch to pooled free. 1305 // PerformFreeMemory together. For kPooled it will dispatch to pooled free.
(...skipping 290 matching lines...) Expand 10 before | Expand all | Expand 10 after
1585 } 1596 }
1586 1597
1587 INLINE(Address limit()) const { 1598 INLINE(Address limit()) const {
1588 return limit_; 1599 return limit_;
1589 } 1600 }
1590 1601
1591 Address* limit_address() { return &limit_; } 1602 Address* limit_address() { return &limit_; }
1592 1603
1593 #ifdef DEBUG 1604 #ifdef DEBUG
1594 bool VerifyPagedAllocation() { 1605 bool VerifyPagedAllocation() {
1595 return (Page::FromAllocationTop(top_) == Page::FromAllocationTop(limit_)) && 1606 return (Page::FromAllocationAreaAddress(top_) ==
1607 Page::FromAllocationAreaAddress(limit_)) &&
1596 (top_ <= limit_); 1608 (top_ <= limit_);
1597 } 1609 }
1598 #endif 1610 #endif
1599 1611
1600 private: 1612 private:
1601 // Current allocation top. 1613 // Current allocation top.
1602 Address top_; 1614 Address top_;
1603 // Current allocation limit. 1615 // Current allocation limit.
1604 Address limit_; 1616 Address limit_;
1605 }; 1617 };
(...skipping 690 matching lines...) Expand 10 before | Expand all | Expand 10 after
2296 public: 2308 public:
2297 HistogramInfo() : NumberAndSizeInfo() {} 2309 HistogramInfo() : NumberAndSizeInfo() {}
2298 2310
2299 const char* name() { return name_; } 2311 const char* name() { return name_; }
2300 void set_name(const char* name) { name_ = name; } 2312 void set_name(const char* name) { name_ = name; }
2301 2313
2302 private: 2314 private:
2303 const char* name_; 2315 const char* name_;
2304 }; 2316 };
2305 2317
2306
2307 enum SemiSpaceId { kFromSpace = 0, kToSpace = 1 }; 2318 enum SemiSpaceId { kFromSpace = 0, kToSpace = 1 };
2308 2319
2309
2310 class NewSpacePage : public MemoryChunk {
2311 public:
2312 static bool IsAtStart(Address addr) {
2313 return (reinterpret_cast<intptr_t>(addr) & Page::kPageAlignmentMask) ==
2314 kObjectStartOffset;
2315 }
2316
2317 static bool IsAtEnd(Address addr) {
2318 return (reinterpret_cast<intptr_t>(addr) & Page::kPageAlignmentMask) == 0;
2319 }
2320
2321 // Finds the NewSpacePage containing the given address.
2322 static inline NewSpacePage* FromAddress(Address address_in_page) {
2323 Address page_start =
2324 reinterpret_cast<Address>(reinterpret_cast<uintptr_t>(address_in_page) &
2325 ~Page::kPageAlignmentMask);
2326 NewSpacePage* page = reinterpret_cast<NewSpacePage*>(page_start);
2327 return page;
2328 }
2329
2330 // Find the page for a limit address. A limit address is either an address
2331 // inside a page, or the address right after the last byte of a page.
2332 static inline NewSpacePage* FromLimit(Address address_limit) {
2333 return NewSpacePage::FromAddress(address_limit - 1);
2334 }
2335
2336 // Checks if address1 and address2 are on the same new space page.
2337 static inline bool OnSamePage(Address address1, Address address2) {
2338 return NewSpacePage::FromAddress(address1) ==
2339 NewSpacePage::FromAddress(address2);
2340 }
2341
2342 inline NewSpacePage* next_page() {
2343 return static_cast<NewSpacePage*>(next_chunk());
2344 }
2345
2346 inline void set_next_page(NewSpacePage* page) { set_next_chunk(page); }
2347
2348 inline NewSpacePage* prev_page() {
2349 return static_cast<NewSpacePage*>(prev_chunk());
2350 }
2351
2352 inline void set_prev_page(NewSpacePage* page) { set_prev_chunk(page); }
2353
2354 SemiSpace* semi_space() { return reinterpret_cast<SemiSpace*>(owner()); }
2355
2356 bool is_anchor() { return !this->InNewSpace(); }
2357
2358 private:
2359 static inline NewSpacePage* Initialize(Heap* heap, MemoryChunk* chunk,
2360 Executability executable,
2361 SemiSpace* owner);
2362
2363 // GC related flags copied from from-space to to-space when
2364 // flipping semispaces.
2365 static const intptr_t kCopyOnFlipFlagsMask =
2366 (1 << MemoryChunk::POINTERS_TO_HERE_ARE_INTERESTING) |
2367 (1 << MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING);
2368
2369 static const intptr_t kCopyAllFlags = ~0;
2370
2371 // Create a NewSpacePage object that is only used as anchor
2372 // for the doubly-linked list of real pages.
2373 explicit NewSpacePage(SemiSpace* owner) { InitializeAsAnchor(owner); }
2374
2375 // Intialize a fake NewSpacePage used as sentinel at the ends
2376 // of a doubly-linked list of real NewSpacePages.
2377 // Only uses the prev/next links, and sets flags to not be in new-space.
2378 void InitializeAsAnchor(SemiSpace* owner);
2379
2380 friend class MemoryAllocator;
2381 friend class SemiSpace;
2382 friend class SemiSpaceIterator;
2383 };
2384
2385
2386 // ----------------------------------------------------------------------------- 2320 // -----------------------------------------------------------------------------
2387 // SemiSpace in young generation 2321 // SemiSpace in young generation
2388 // 2322 //
2389 // A SemiSpace is a contiguous chunk of memory holding page-like memory chunks. 2323 // A SemiSpace is a contiguous chunk of memory holding page-like memory chunks.
2390 // The mark-compact collector uses the memory of the first page in the from 2324 // The mark-compact collector uses the memory of the first page in the from
2391 // space as a marking stack when tracing live objects. 2325 // space as a marking stack when tracing live objects.
2392 class SemiSpace : public Space { 2326 class SemiSpace : public Space {
2393 public: 2327 public:
2394 static void Swap(SemiSpace* from, SemiSpace* to); 2328 static void Swap(SemiSpace* from, SemiSpace* to);
2395 2329
(...skipping 28 matching lines...) Expand all
2424 // must be more than the amount of used memory in the semispace and less 2358 // must be more than the amount of used memory in the semispace and less
2425 // than the current capacity. 2359 // than the current capacity.
2426 bool ShrinkTo(int new_capacity); 2360 bool ShrinkTo(int new_capacity);
2427 2361
2428 // Returns the start address of the first page of the space. 2362 // Returns the start address of the first page of the space.
2429 Address space_start() { 2363 Address space_start() {
2430 DCHECK_NE(anchor_.next_page(), anchor()); 2364 DCHECK_NE(anchor_.next_page(), anchor());
2431 return anchor_.next_page()->area_start(); 2365 return anchor_.next_page()->area_start();
2432 } 2366 }
2433 2367
2434 NewSpacePage* first_page() { return anchor_.next_page(); } 2368 Page* first_page() { return anchor_.next_page(); }
2435 NewSpacePage* current_page() { return current_page_; } 2369 Page* current_page() { return current_page_; }
2436 2370
2437 // Returns one past the end address of the space. 2371 // Returns one past the end address of the space.
2438 Address space_end() { return anchor_.prev_page()->area_end(); } 2372 Address space_end() { return anchor_.prev_page()->area_end(); }
2439 2373
2440 // Returns the start address of the current page of the space. 2374 // Returns the start address of the current page of the space.
2441 Address page_low() { return current_page_->area_start(); } 2375 Address page_low() { return current_page_->area_start(); }
2442 2376
2443 // Returns one past the end address of the current page of the space. 2377 // Returns one past the end address of the current page of the space.
2444 Address page_high() { return current_page_->area_end(); } 2378 Address page_high() { return current_page_->area_end(); }
2445 2379
2446 bool AdvancePage() { 2380 bool AdvancePage() {
2447 NewSpacePage* next_page = current_page_->next_page(); 2381 Page* next_page = current_page_->next_page();
2448 if (next_page == anchor()) return false; 2382 if (next_page == anchor()) return false;
2449 current_page_ = next_page; 2383 current_page_ = next_page;
2450 return true; 2384 return true;
2451 } 2385 }
2452 2386
2453 // Resets the space to using the first page. 2387 // Resets the space to using the first page.
2454 void Reset(); 2388 void Reset();
2455 2389
2456 void ReplaceWithEmptyPage(NewSpacePage* page); 2390 void ReplaceWithEmptyPage(Page* page);
2457 2391
2458 // Age mark accessors. 2392 // Age mark accessors.
2459 Address age_mark() { return age_mark_; } 2393 Address age_mark() { return age_mark_; }
2460 void set_age_mark(Address mark); 2394 void set_age_mark(Address mark);
2461 2395
2462 // Returns the current capacity of the semispace. 2396 // Returns the current capacity of the semispace.
2463 int current_capacity() { return current_capacity_; } 2397 int current_capacity() { return current_capacity_; }
2464 2398
2465 // Returns the maximum capacity of the semispace. 2399 // Returns the maximum capacity of the semispace.
2466 int maximum_capacity() { return maximum_capacity_; } 2400 int maximum_capacity() { return maximum_capacity_; }
(...skipping 30 matching lines...) Expand all
2497 #else 2431 #else
2498 // Do nothing. 2432 // Do nothing.
2499 inline static void AssertValidRange(Address from, Address to) {} 2433 inline static void AssertValidRange(Address from, Address to) {}
2500 #endif 2434 #endif
2501 2435
2502 #ifdef VERIFY_HEAP 2436 #ifdef VERIFY_HEAP
2503 virtual void Verify(); 2437 virtual void Verify();
2504 #endif 2438 #endif
2505 2439
2506 private: 2440 private:
2507 void RewindPages(NewSpacePage* start, int num_pages); 2441 void RewindPages(Page* start, int num_pages);
2508 2442
2509 inline NewSpacePage* anchor() { return &anchor_; } 2443 inline Page* anchor() { return &anchor_; }
2510 2444
2511 // Copies the flags into the masked positions on all pages in the space. 2445 // Copies the flags into the masked positions on all pages in the space.
2512 void FixPagesFlags(intptr_t flags, intptr_t flag_mask); 2446 void FixPagesFlags(intptr_t flags, intptr_t flag_mask);
2513 2447
2514 // The currently committed space capacity. 2448 // The currently committed space capacity.
2515 int current_capacity_; 2449 int current_capacity_;
2516 2450
2517 // The maximum capacity that can be used by this space. 2451 // The maximum capacity that can be used by this space.
2518 int maximum_capacity_; 2452 int maximum_capacity_;
2519 2453
2520 // The minimum capacity for the space. A space cannot shrink below this size. 2454 // The minimum capacity for the space. A space cannot shrink below this size.
2521 int minimum_capacity_; 2455 int minimum_capacity_;
2522 2456
2523 // Used to govern object promotion during mark-compact collection. 2457 // Used to govern object promotion during mark-compact collection.
2524 Address age_mark_; 2458 Address age_mark_;
2525 2459
2526 bool committed_; 2460 bool committed_;
2527 SemiSpaceId id_; 2461 SemiSpaceId id_;
2528 2462
2529 NewSpacePage anchor_; 2463 Page anchor_;
2530 NewSpacePage* current_page_; 2464 Page* current_page_;
2531 2465
2532 friend class SemiSpaceIterator; 2466 friend class SemiSpaceIterator;
2533 friend class NewSpacePageIterator; 2467 friend class NewSpacePageIterator;
2534 }; 2468 };
2535 2469
2536 2470
2537 // A SemiSpaceIterator is an ObjectIterator that iterates over the active 2471 // A SemiSpaceIterator is an ObjectIterator that iterates over the active
2538 // semispace of the heap's new space. It iterates over the objects in the 2472 // semispace of the heap's new space. It iterates over the objects in the
2539 // semispace from a given start address (defaulting to the bottom of the 2473 // semispace from a given start address (defaulting to the bottom of the
2540 // semispace) to the top of the semispace. New objects allocated after the 2474 // semispace) to the top of the semispace. New objects allocated after the
(...skipping 27 matching lines...) Expand all
2568 2502
2569 // Make an iterator that runs over all pages in the given semispace, 2503 // Make an iterator that runs over all pages in the given semispace,
2570 // even those not used in allocation. 2504 // even those not used in allocation.
2571 explicit inline NewSpacePageIterator(SemiSpace* space); 2505 explicit inline NewSpacePageIterator(SemiSpace* space);
2572 2506
2573 // Make iterator that iterates from the page containing start 2507 // Make iterator that iterates from the page containing start
2574 // to the page that contains limit in the same semispace. 2508 // to the page that contains limit in the same semispace.
2575 inline NewSpacePageIterator(Address start, Address limit); 2509 inline NewSpacePageIterator(Address start, Address limit);
2576 2510
2577 inline bool has_next(); 2511 inline bool has_next();
2578 inline NewSpacePage* next(); 2512 inline Page* next();
2579 2513
2580 private: 2514 private:
2581 NewSpacePage* prev_page_; // Previous page returned. 2515 Page* prev_page_; // Previous page returned.
2582 // Next page that will be returned. Cached here so that we can use this 2516 // Next page that will be returned. Cached here so that we can use this
2583 // iterator for operations that deallocate pages. 2517 // iterator for operations that deallocate pages.
2584 NewSpacePage* next_page_; 2518 Page* next_page_;
2585 // Last page returned. 2519 // Last page returned.
2586 NewSpacePage* last_page_; 2520 Page* last_page_;
2587 }; 2521 };
2588 2522
2589 2523
2590 // ----------------------------------------------------------------------------- 2524 // -----------------------------------------------------------------------------
2591 // The young generation space. 2525 // The young generation space.
2592 // 2526 //
2593 // The new space consists of a contiguous pair of semispaces. It simply 2527 // The new space consists of a contiguous pair of semispaces. It simply
2594 // forwards most functions to the appropriate semispace. 2528 // forwards most functions to the appropriate semispace.
2595 2529
2596 class NewSpace : public Space { 2530 class NewSpace : public Space {
(...skipping 29 matching lines...) Expand all
2626 2560
2627 // Grow the capacity of the semispaces. Assumes that they are not at 2561 // Grow the capacity of the semispaces. Assumes that they are not at
2628 // their maximum capacity. 2562 // their maximum capacity.
2629 void Grow(); 2563 void Grow();
2630 2564
2631 // Shrink the capacity of the semispaces. 2565 // Shrink the capacity of the semispaces.
2632 void Shrink(); 2566 void Shrink();
2633 2567
2634 // Return the allocated bytes in the active semispace. 2568 // Return the allocated bytes in the active semispace.
2635 intptr_t Size() override { 2569 intptr_t Size() override {
2636 return pages_used_ * NewSpacePage::kAllocatableMemory + 2570 return pages_used_ * Page::kAllocatableMemory +
2637 static_cast<int>(top() - to_space_.page_low()); 2571 static_cast<int>(top() - to_space_.page_low());
2638 } 2572 }
2639 2573
2640 // The same, but returning an int. We have to have the one that returns 2574 // The same, but returning an int. We have to have the one that returns
2641 // intptr_t because it is inherited, but if we know we are dealing with the 2575 // intptr_t because it is inherited, but if we know we are dealing with the
2642 // new space, which can't get as big as the other spaces then this is useful: 2576 // new space, which can't get as big as the other spaces then this is useful:
2643 int SizeAsInt() { return static_cast<int>(Size()); } 2577 int SizeAsInt() { return static_cast<int>(Size()); }
2644 2578
2645 // Return the allocatable capacity of a semispace. 2579 // Return the allocatable capacity of a semispace.
2646 intptr_t Capacity() { 2580 intptr_t Capacity() {
2647 SLOW_DCHECK(to_space_.current_capacity() == from_space_.current_capacity()); 2581 SLOW_DCHECK(to_space_.current_capacity() == from_space_.current_capacity());
2648 return (to_space_.current_capacity() / Page::kPageSize) * 2582 return (to_space_.current_capacity() / Page::kPageSize) *
2649 NewSpacePage::kAllocatableMemory; 2583 Page::kAllocatableMemory;
2650 } 2584 }
2651 2585
2652 // Return the current size of a semispace, allocatable and non-allocatable 2586 // Return the current size of a semispace, allocatable and non-allocatable
2653 // memory. 2587 // memory.
2654 intptr_t TotalCapacity() { 2588 intptr_t TotalCapacity() {
2655 DCHECK(to_space_.current_capacity() == from_space_.current_capacity()); 2589 DCHECK(to_space_.current_capacity() == from_space_.current_capacity());
2656 return to_space_.current_capacity(); 2590 return to_space_.current_capacity();
2657 } 2591 }
2658 2592
2659 // Committed memory for NewSpace is the committed memory of both semi-spaces 2593 // Committed memory for NewSpace is the committed memory of both semi-spaces
2660 // combined. 2594 // combined.
2661 intptr_t CommittedMemory() override { 2595 intptr_t CommittedMemory() override {
2662 return from_space_.CommittedMemory() + to_space_.CommittedMemory(); 2596 return from_space_.CommittedMemory() + to_space_.CommittedMemory();
2663 } 2597 }
2664 2598
2665 intptr_t MaximumCommittedMemory() override { 2599 intptr_t MaximumCommittedMemory() override {
2666 return from_space_.MaximumCommittedMemory() + 2600 return from_space_.MaximumCommittedMemory() +
2667 to_space_.MaximumCommittedMemory(); 2601 to_space_.MaximumCommittedMemory();
2668 } 2602 }
2669 2603
2670 // Approximate amount of physical memory committed for this space. 2604 // Approximate amount of physical memory committed for this space.
2671 size_t CommittedPhysicalMemory() override; 2605 size_t CommittedPhysicalMemory() override;
2672 2606
2673 // Return the available bytes without growing. 2607 // Return the available bytes without growing.
2674 intptr_t Available() override { return Capacity() - Size(); } 2608 intptr_t Available() override { return Capacity() - Size(); }
2675 2609
2676 inline size_t AllocatedSinceLastGC(); 2610 inline size_t AllocatedSinceLastGC();
2677 2611
2678 void ReplaceWithEmptyPage(NewSpacePage* page) { 2612 void ReplaceWithEmptyPage(Page* page) {
2679 // This method is called after flipping the semispace. 2613 // This method is called after flipping the semispace.
2680 DCHECK(page->InFromSpace()); 2614 DCHECK(page->InFromSpace());
2681 from_space_.ReplaceWithEmptyPage(page); 2615 from_space_.ReplaceWithEmptyPage(page);
2682 } 2616 }
2683 2617
2684 // Return the maximum capacity of a semispace. 2618 // Return the maximum capacity of a semispace.
2685 int MaximumCapacity() { 2619 int MaximumCapacity() {
2686 DCHECK(to_space_.maximum_capacity() == from_space_.maximum_capacity()); 2620 DCHECK(to_space_.maximum_capacity() == from_space_.maximum_capacity());
2687 return to_space_.maximum_capacity(); 2621 return to_space_.maximum_capacity();
2688 } 2622 }
(...skipping 421 matching lines...) Expand 10 before | Expand all | Expand 10 after
3110 count = 0; 3044 count = 0;
3111 } 3045 }
3112 // Must be small, since an iteration is used for lookup. 3046 // Must be small, since an iteration is used for lookup.
3113 static const int kMaxComments = 64; 3047 static const int kMaxComments = 64;
3114 }; 3048 };
3115 #endif 3049 #endif
3116 } // namespace internal 3050 } // namespace internal
3117 } // namespace v8 3051 } // namespace v8
3118 3052
3119 #endif // V8_HEAP_SPACES_H_ 3053 #endif // V8_HEAP_SPACES_H_
OLDNEW
« no previous file with comments | « src/heap/mark-compact.cc ('k') | src/heap/spaces.cc » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698