Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(4)

Side by Side Diff: src/heap/spaces.h

Issue 1012023002: Merge old data and pointer space. (Closed) Base URL: https://chromium.googlesource.com/v8/v8.git@master
Patch Set: Created 5 years, 9 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
« no previous file with comments | « src/heap/mark-compact-inl.h ('k') | src/heap/spaces.cc » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 // Copyright 2011 the V8 project authors. All rights reserved. 1 // Copyright 2011 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be 2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file. 3 // found in the LICENSE file.
4 4
5 #ifndef V8_HEAP_SPACES_H_ 5 #ifndef V8_HEAP_SPACES_H_
6 #define V8_HEAP_SPACES_H_ 6 #define V8_HEAP_SPACES_H_
7 7
8 #include "src/allocation.h" 8 #include "src/allocation.h"
9 #include "src/base/atomicops.h" 9 #include "src/base/atomicops.h"
10 #include "src/base/bits.h" 10 #include "src/base/bits.h"
(...skipping 25 matching lines...) Expand all
36 // There is a separate large object space for objects larger than 36 // There is a separate large object space for objects larger than
37 // Page::kMaxHeapObjectSize, so that they do not have to move during 37 // Page::kMaxHeapObjectSize, so that they do not have to move during
38 // collection. The large object space is paged. Pages in large object space 38 // collection. The large object space is paged. Pages in large object space
39 // may be larger than the page size. 39 // may be larger than the page size.
40 // 40 //
41 // A store-buffer based write barrier is used to keep track of intergenerational 41 // A store-buffer based write barrier is used to keep track of intergenerational
42 // references. See heap/store-buffer.h. 42 // references. See heap/store-buffer.h.
43 // 43 //
44 // During scavenges and mark-sweep collections we sometimes (after a store 44 // During scavenges and mark-sweep collections we sometimes (after a store
45 // buffer overflow) iterate intergenerational pointers without decoding heap 45 // buffer overflow) iterate intergenerational pointers without decoding heap
46 // object maps so if the page belongs to old pointer space or large object 46 // object maps so if the page belongs to old space or large object space
47 // space it is essential to guarantee that the page does not contain any 47 // it is essential to guarantee that the page does not contain any
48 // garbage pointers to new space: every pointer aligned word which satisfies 48 // garbage pointers to new space: every pointer aligned word which satisfies
49 // the Heap::InNewSpace() predicate must be a pointer to a live heap object in 49 // the Heap::InNewSpace() predicate must be a pointer to a live heap object in
50 // new space. Thus objects in old pointer and large object spaces should have a 50 // new space. Thus objects in old space and large object spaces should have a
51 // special layout (e.g. no bare integer fields). This requirement does not 51 // special layout (e.g. no bare integer fields). This requirement does not
52 // apply to map space which is iterated in a special fashion. However we still 52 // apply to map space which is iterated in a special fashion. However we still
53 // require pointer fields of dead maps to be cleaned. 53 // require pointer fields of dead maps to be cleaned.
54 // 54 //
55 // To enable lazy cleaning of old space pages we can mark chunks of the page 55 // To enable lazy cleaning of old space pages we can mark chunks of the page
56 // as being garbage. Garbage sections are marked with a special map. These 56 // as being garbage. Garbage sections are marked with a special map. These
57 // sections are skipped when scanning the page, even if we are otherwise 57 // sections are skipped when scanning the page, even if we are otherwise
58 // scanning without regard for object boundaries. Garbage sections are chained 58 // scanning without regard for object boundaries. Garbage sections are chained
59 // together to form a free list after a GC. Garbage sections created outside 59 // together to form a free list after a GC. Garbage sections created outside
60 // of GCs by object trunctation etc. may not be in the free list chain. Very 60 // of GCs by object trunctation etc. may not be in the free list chain. Very
(...skipping 34 matching lines...) Expand 10 before | Expand all | Expand 10 after
95 class MemoryAllocator; 95 class MemoryAllocator;
96 class AllocationInfo; 96 class AllocationInfo;
97 class Space; 97 class Space;
98 class FreeList; 98 class FreeList;
99 class MemoryChunk; 99 class MemoryChunk;
100 100
101 class MarkBit { 101 class MarkBit {
102 public: 102 public:
103 typedef uint32_t CellType; 103 typedef uint32_t CellType;
104 104
105 inline MarkBit(CellType* cell, CellType mask, bool data_only) 105 inline MarkBit(CellType* cell, CellType mask) : cell_(cell), mask_(mask) {}
106 : cell_(cell), mask_(mask), data_only_(data_only) {}
107 106
108 inline CellType* cell() { return cell_; } 107 inline CellType* cell() { return cell_; }
109 inline CellType mask() { return mask_; } 108 inline CellType mask() { return mask_; }
110 109
111 #ifdef DEBUG 110 #ifdef DEBUG
112 bool operator==(const MarkBit& other) { 111 bool operator==(const MarkBit& other) {
113 return cell_ == other.cell_ && mask_ == other.mask_; 112 return cell_ == other.cell_ && mask_ == other.mask_;
114 } 113 }
115 #endif 114 #endif
116 115
117 inline void Set() { *cell_ |= mask_; } 116 inline void Set() { *cell_ |= mask_; }
118 inline bool Get() { return (*cell_ & mask_) != 0; } 117 inline bool Get() { return (*cell_ & mask_) != 0; }
119 inline void Clear() { *cell_ &= ~mask_; } 118 inline void Clear() { *cell_ &= ~mask_; }
120 119
121 inline bool data_only() { return data_only_; }
122 120
123 inline MarkBit Next() { 121 inline MarkBit Next() {
124 CellType new_mask = mask_ << 1; 122 CellType new_mask = mask_ << 1;
125 if (new_mask == 0) { 123 if (new_mask == 0) {
126 return MarkBit(cell_ + 1, 1, data_only_); 124 return MarkBit(cell_ + 1, 1);
127 } else { 125 } else {
128 return MarkBit(cell_, new_mask, data_only_); 126 return MarkBit(cell_, new_mask);
129 } 127 }
130 } 128 }
131 129
132 private: 130 private:
133 CellType* cell_; 131 CellType* cell_;
134 CellType mask_; 132 CellType mask_;
135 // This boolean indicates that the object is in a data-only space with no
136 // pointers. This enables some optimizations when marking.
137 // It is expected that this field is inlined and turned into control flow
138 // at the place where the MarkBit object is created.
139 bool data_only_;
140 }; 133 };
141 134
142 135
143 // Bitmap is a sequence of cells each containing fixed number of bits. 136 // Bitmap is a sequence of cells each containing fixed number of bits.
144 class Bitmap { 137 class Bitmap {
145 public: 138 public:
146 static const uint32_t kBitsPerCell = 32; 139 static const uint32_t kBitsPerCell = 32;
147 static const uint32_t kBitsPerCellLog2 = 5; 140 static const uint32_t kBitsPerCellLog2 = 5;
148 static const uint32_t kBitIndexMask = kBitsPerCell - 1; 141 static const uint32_t kBitIndexMask = kBitsPerCell - 1;
149 static const uint32_t kBytesPerCell = kBitsPerCell / kBitsPerByte; 142 static const uint32_t kBytesPerCell = kBitsPerCell / kBitsPerByte;
(...skipping 30 matching lines...) Expand all
180 INLINE(MarkBit::CellType* cells()) { 173 INLINE(MarkBit::CellType* cells()) {
181 return reinterpret_cast<MarkBit::CellType*>(this); 174 return reinterpret_cast<MarkBit::CellType*>(this);
182 } 175 }
183 176
184 INLINE(Address address()) { return reinterpret_cast<Address>(this); } 177 INLINE(Address address()) { return reinterpret_cast<Address>(this); }
185 178
186 INLINE(static Bitmap* FromAddress(Address addr)) { 179 INLINE(static Bitmap* FromAddress(Address addr)) {
187 return reinterpret_cast<Bitmap*>(addr); 180 return reinterpret_cast<Bitmap*>(addr);
188 } 181 }
189 182
190 inline MarkBit MarkBitFromIndex(uint32_t index, bool data_only = false) { 183 inline MarkBit MarkBitFromIndex(uint32_t index) {
191 MarkBit::CellType mask = 1 << (index & kBitIndexMask); 184 MarkBit::CellType mask = 1 << (index & kBitIndexMask);
192 MarkBit::CellType* cell = this->cells() + (index >> kBitsPerCellLog2); 185 MarkBit::CellType* cell = this->cells() + (index >> kBitsPerCellLog2);
193 return MarkBit(cell, mask, data_only); 186 return MarkBit(cell, mask);
194 } 187 }
195 188
196 static inline void Clear(MemoryChunk* chunk); 189 static inline void Clear(MemoryChunk* chunk);
197 190
198 static void PrintWord(uint32_t word, uint32_t himask = 0) { 191 static void PrintWord(uint32_t word, uint32_t himask = 0) {
199 for (uint32_t mask = 1; mask != 0; mask <<= 1) { 192 for (uint32_t mask = 1; mask != 0; mask <<= 1) {
200 if ((mask & himask) != 0) PrintF("["); 193 if ((mask & himask) != 0) PrintF("[");
201 PrintF((mask & word) ? "1" : "0"); 194 PrintF((mask & word) ? "1" : "0");
202 if ((mask & himask) != 0) PrintF("]"); 195 if ((mask & himask) != 0) PrintF("]");
203 } 196 }
(...skipping 159 matching lines...) Expand 10 before | Expand all | Expand 10 after
363 356
364 enum MemoryChunkFlags { 357 enum MemoryChunkFlags {
365 IS_EXECUTABLE, 358 IS_EXECUTABLE,
366 ABOUT_TO_BE_FREED, 359 ABOUT_TO_BE_FREED,
367 POINTERS_TO_HERE_ARE_INTERESTING, 360 POINTERS_TO_HERE_ARE_INTERESTING,
368 POINTERS_FROM_HERE_ARE_INTERESTING, 361 POINTERS_FROM_HERE_ARE_INTERESTING,
369 SCAN_ON_SCAVENGE, 362 SCAN_ON_SCAVENGE,
370 IN_FROM_SPACE, // Mutually exclusive with IN_TO_SPACE. 363 IN_FROM_SPACE, // Mutually exclusive with IN_TO_SPACE.
371 IN_TO_SPACE, // All pages in new space has one of these two set. 364 IN_TO_SPACE, // All pages in new space has one of these two set.
372 NEW_SPACE_BELOW_AGE_MARK, 365 NEW_SPACE_BELOW_AGE_MARK,
373 CONTAINS_ONLY_DATA,
374 EVACUATION_CANDIDATE, 366 EVACUATION_CANDIDATE,
375 RESCAN_ON_EVACUATION, 367 RESCAN_ON_EVACUATION,
376 NEVER_EVACUATE, // May contain immortal immutables. 368 NEVER_EVACUATE, // May contain immortal immutables.
377 369
378 // WAS_SWEPT indicates that marking bits have been cleared by the sweeper, 370 // WAS_SWEPT indicates that marking bits have been cleared by the sweeper,
379 // otherwise marking bits are still intact. 371 // otherwise marking bits are still intact.
380 WAS_SWEPT, 372 WAS_SWEPT,
381 373
382 // Large objects can have a progress bar in their page header. These object 374 // Large objects can have a progress bar in their page header. These object
383 // are scanned in increments and will be kept black while being scanned. 375 // are scanned in increments and will be kept black while being scanned.
(...skipping 174 matching lines...) Expand 10 before | Expand all | Expand 10 after
558 550
559 void SetArea(Address area_start, Address area_end) { 551 void SetArea(Address area_start, Address area_end) {
560 area_start_ = area_start; 552 area_start_ = area_start;
561 area_end_ = area_end; 553 area_end_ = area_end;
562 } 554 }
563 555
564 Executability executable() { 556 Executability executable() {
565 return IsFlagSet(IS_EXECUTABLE) ? EXECUTABLE : NOT_EXECUTABLE; 557 return IsFlagSet(IS_EXECUTABLE) ? EXECUTABLE : NOT_EXECUTABLE;
566 } 558 }
567 559
568 bool ContainsOnlyData() { return IsFlagSet(CONTAINS_ONLY_DATA); }
569
570 bool InNewSpace() { 560 bool InNewSpace() {
571 return (flags_ & ((1 << IN_FROM_SPACE) | (1 << IN_TO_SPACE))) != 0; 561 return (flags_ & ((1 << IN_FROM_SPACE) | (1 << IN_TO_SPACE))) != 0;
572 } 562 }
573 563
574 bool InToSpace() { return IsFlagSet(IN_TO_SPACE); } 564 bool InToSpace() { return IsFlagSet(IN_TO_SPACE); }
575 565
576 bool InFromSpace() { return IsFlagSet(IN_FROM_SPACE); } 566 bool InFromSpace() { return IsFlagSet(IN_FROM_SPACE); }
577 567
578 // --------------------------------------------------------------------- 568 // ---------------------------------------------------------------------
579 // Markbits support 569 // Markbits support
(...skipping 2009 matching lines...) Expand 10 before | Expand all | Expand 10 after
2589 MUST_USE_RESULT AllocationResult SlowAllocateRaw(int size_in_bytes); 2579 MUST_USE_RESULT AllocationResult SlowAllocateRaw(int size_in_bytes);
2590 2580
2591 friend class SemiSpaceIterator; 2581 friend class SemiSpaceIterator;
2592 2582
2593 public: 2583 public:
2594 TRACK_MEMORY("NewSpace") 2584 TRACK_MEMORY("NewSpace")
2595 }; 2585 };
2596 2586
2597 2587
2598 // ----------------------------------------------------------------------------- 2588 // -----------------------------------------------------------------------------
2599 // Old object space (excluding map objects) 2589 // Old object space (includes the old space of objects and code space)
2600 2590
2601 class OldSpace : public PagedSpace { 2591 class OldSpace : public PagedSpace {
2602 public: 2592 public:
2603 // Creates an old space object with a given maximum capacity. 2593 // Creates an old space object with a given maximum capacity.
2604 // The constructor does not allocate pages from OS. 2594 // The constructor does not allocate pages from OS.
2605 OldSpace(Heap* heap, intptr_t max_capacity, AllocationSpace id, 2595 OldSpace(Heap* heap, intptr_t max_capacity, AllocationSpace id,
2606 Executability executable) 2596 Executability executable)
2607 : PagedSpace(heap, max_capacity, id, executable) {} 2597 : PagedSpace(heap, max_capacity, id, executable) {}
2608 2598
2609 public: 2599 public:
(...skipping 213 matching lines...) Expand 10 before | Expand all | Expand 10 after
2823 2813
2824 // Iterates over the chunks (pages and large object pages) that can contain 2814 // Iterates over the chunks (pages and large object pages) that can contain
2825 // pointers to new space. 2815 // pointers to new space.
2826 class PointerChunkIterator BASE_EMBEDDED { 2816 class PointerChunkIterator BASE_EMBEDDED {
2827 public: 2817 public:
2828 inline explicit PointerChunkIterator(Heap* heap); 2818 inline explicit PointerChunkIterator(Heap* heap);
2829 2819
2830 // Return NULL when the iterator is done. 2820 // Return NULL when the iterator is done.
2831 MemoryChunk* next() { 2821 MemoryChunk* next() {
2832 switch (state_) { 2822 switch (state_) {
2833 case kOldPointerState: { 2823 case kOldSpaceState: {
2834 if (old_pointer_iterator_.has_next()) { 2824 if (old_pointer_iterator_.has_next()) {
2835 return old_pointer_iterator_.next(); 2825 return old_pointer_iterator_.next();
2836 } 2826 }
2837 state_ = kMapState; 2827 state_ = kMapState;
2838 // Fall through. 2828 // Fall through.
2839 } 2829 }
2840 case kMapState: { 2830 case kMapState: {
2841 if (map_iterator_.has_next()) { 2831 if (map_iterator_.has_next()) {
2842 return map_iterator_.next(); 2832 return map_iterator_.next();
2843 } 2833 }
(...skipping 18 matching lines...) Expand all
2862 return NULL; 2852 return NULL;
2863 default: 2853 default:
2864 break; 2854 break;
2865 } 2855 }
2866 UNREACHABLE(); 2856 UNREACHABLE();
2867 return NULL; 2857 return NULL;
2868 } 2858 }
2869 2859
2870 2860
2871 private: 2861 private:
2872 enum State { kOldPointerState, kMapState, kLargeObjectState, kFinishedState }; 2862 enum State { kOldSpaceState, kMapState, kLargeObjectState, kFinishedState };
2873 State state_; 2863 State state_;
2874 PageIterator old_pointer_iterator_; 2864 PageIterator old_pointer_iterator_;
2875 PageIterator map_iterator_; 2865 PageIterator map_iterator_;
2876 LargeObjectIterator lo_iterator_; 2866 LargeObjectIterator lo_iterator_;
2877 }; 2867 };
2878 2868
2879 2869
2880 #ifdef DEBUG 2870 #ifdef DEBUG
2881 struct CommentStatistic { 2871 struct CommentStatistic {
2882 const char* comment; 2872 const char* comment;
2883 int size; 2873 int size;
2884 int count; 2874 int count;
2885 void Clear() { 2875 void Clear() {
2886 comment = NULL; 2876 comment = NULL;
2887 size = 0; 2877 size = 0;
2888 count = 0; 2878 count = 0;
2889 } 2879 }
2890 // Must be small, since an iteration is used for lookup. 2880 // Must be small, since an iteration is used for lookup.
2891 static const int kMaxComments = 64; 2881 static const int kMaxComments = 64;
2892 }; 2882 };
2893 #endif 2883 #endif
2894 } 2884 }
2895 } // namespace v8::internal 2885 } // namespace v8::internal
2896 2886
2897 #endif // V8_HEAP_SPACES_H_ 2887 #endif // V8_HEAP_SPACES_H_
OLDNEW
« no previous file with comments | « src/heap/mark-compact-inl.h ('k') | src/heap/spaces.cc » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698