OLD | NEW |
1 // Copyright 2011 the V8 project authors. All rights reserved. | 1 // Copyright 2011 the V8 project authors. All rights reserved. |
2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
4 | 4 |
5 #ifndef V8_HEAP_SPACES_H_ | 5 #ifndef V8_HEAP_SPACES_H_ |
6 #define V8_HEAP_SPACES_H_ | 6 #define V8_HEAP_SPACES_H_ |
7 | 7 |
8 #include "src/allocation.h" | 8 #include "src/allocation.h" |
9 #include "src/base/atomicops.h" | 9 #include "src/base/atomicops.h" |
10 #include "src/base/bits.h" | 10 #include "src/base/bits.h" |
(...skipping 25 matching lines...) Expand all Loading... |
36 // There is a separate large object space for objects larger than | 36 // There is a separate large object space for objects larger than |
37 // Page::kMaxHeapObjectSize, so that they do not have to move during | 37 // Page::kMaxHeapObjectSize, so that they do not have to move during |
38 // collection. The large object space is paged. Pages in large object space | 38 // collection. The large object space is paged. Pages in large object space |
39 // may be larger than the page size. | 39 // may be larger than the page size. |
40 // | 40 // |
41 // A store-buffer based write barrier is used to keep track of intergenerational | 41 // A store-buffer based write barrier is used to keep track of intergenerational |
42 // references. See heap/store-buffer.h. | 42 // references. See heap/store-buffer.h. |
43 // | 43 // |
44 // During scavenges and mark-sweep collections we sometimes (after a store | 44 // During scavenges and mark-sweep collections we sometimes (after a store |
45 // buffer overflow) iterate intergenerational pointers without decoding heap | 45 // buffer overflow) iterate intergenerational pointers without decoding heap |
46 // object maps so if the page belongs to old pointer space or large object | 46 // object maps so if the page belongs to old space or large object space |
47 // space it is essential to guarantee that the page does not contain any | 47 // it is essential to guarantee that the page does not contain any |
48 // garbage pointers to new space: every pointer aligned word which satisfies | 48 // garbage pointers to new space: every pointer aligned word which satisfies |
49 // the Heap::InNewSpace() predicate must be a pointer to a live heap object in | 49 // the Heap::InNewSpace() predicate must be a pointer to a live heap object in |
50 // new space. Thus objects in old pointer and large object spaces should have a | 50 // new space. Thus objects in old space and large object spaces should have a |
51 // special layout (e.g. no bare integer fields). This requirement does not | 51 // special layout (e.g. no bare integer fields). This requirement does not |
52 // apply to map space which is iterated in a special fashion. However we still | 52 // apply to map space which is iterated in a special fashion. However we still |
53 // require pointer fields of dead maps to be cleaned. | 53 // require pointer fields of dead maps to be cleaned. |
54 // | 54 // |
55 // To enable lazy cleaning of old space pages we can mark chunks of the page | 55 // To enable lazy cleaning of old space pages we can mark chunks of the page |
56 // as being garbage. Garbage sections are marked with a special map. These | 56 // as being garbage. Garbage sections are marked with a special map. These |
57 // sections are skipped when scanning the page, even if we are otherwise | 57 // sections are skipped when scanning the page, even if we are otherwise |
58 // scanning without regard for object boundaries. Garbage sections are chained | 58 // scanning without regard for object boundaries. Garbage sections are chained |
59 // together to form a free list after a GC. Garbage sections created outside | 59 // together to form a free list after a GC. Garbage sections created outside |
60 // of GCs by object trunctation etc. may not be in the free list chain. Very | 60 // of GCs by object trunctation etc. may not be in the free list chain. Very |
(...skipping 34 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
95 class MemoryAllocator; | 95 class MemoryAllocator; |
96 class AllocationInfo; | 96 class AllocationInfo; |
97 class Space; | 97 class Space; |
98 class FreeList; | 98 class FreeList; |
99 class MemoryChunk; | 99 class MemoryChunk; |
100 | 100 |
101 class MarkBit { | 101 class MarkBit { |
102 public: | 102 public: |
103 typedef uint32_t CellType; | 103 typedef uint32_t CellType; |
104 | 104 |
105 inline MarkBit(CellType* cell, CellType mask, bool data_only) | 105 inline MarkBit(CellType* cell, CellType mask) : cell_(cell), mask_(mask) {} |
106 : cell_(cell), mask_(mask), data_only_(data_only) {} | |
107 | 106 |
108 inline CellType* cell() { return cell_; } | 107 inline CellType* cell() { return cell_; } |
109 inline CellType mask() { return mask_; } | 108 inline CellType mask() { return mask_; } |
110 | 109 |
111 #ifdef DEBUG | 110 #ifdef DEBUG |
112 bool operator==(const MarkBit& other) { | 111 bool operator==(const MarkBit& other) { |
113 return cell_ == other.cell_ && mask_ == other.mask_; | 112 return cell_ == other.cell_ && mask_ == other.mask_; |
114 } | 113 } |
115 #endif | 114 #endif |
116 | 115 |
117 inline void Set() { *cell_ |= mask_; } | 116 inline void Set() { *cell_ |= mask_; } |
118 inline bool Get() { return (*cell_ & mask_) != 0; } | 117 inline bool Get() { return (*cell_ & mask_) != 0; } |
119 inline void Clear() { *cell_ &= ~mask_; } | 118 inline void Clear() { *cell_ &= ~mask_; } |
120 | 119 |
121 inline bool data_only() { return data_only_; } | |
122 | 120 |
123 inline MarkBit Next() { | 121 inline MarkBit Next() { |
124 CellType new_mask = mask_ << 1; | 122 CellType new_mask = mask_ << 1; |
125 if (new_mask == 0) { | 123 if (new_mask == 0) { |
126 return MarkBit(cell_ + 1, 1, data_only_); | 124 return MarkBit(cell_ + 1, 1); |
127 } else { | 125 } else { |
128 return MarkBit(cell_, new_mask, data_only_); | 126 return MarkBit(cell_, new_mask); |
129 } | 127 } |
130 } | 128 } |
131 | 129 |
132 private: | 130 private: |
133 CellType* cell_; | 131 CellType* cell_; |
134 CellType mask_; | 132 CellType mask_; |
135 // This boolean indicates that the object is in a data-only space with no | |
136 // pointers. This enables some optimizations when marking. | |
137 // It is expected that this field is inlined and turned into control flow | |
138 // at the place where the MarkBit object is created. | |
139 bool data_only_; | |
140 }; | 133 }; |
141 | 134 |
142 | 135 |
143 // Bitmap is a sequence of cells each containing fixed number of bits. | 136 // Bitmap is a sequence of cells each containing fixed number of bits. |
144 class Bitmap { | 137 class Bitmap { |
145 public: | 138 public: |
146 static const uint32_t kBitsPerCell = 32; | 139 static const uint32_t kBitsPerCell = 32; |
147 static const uint32_t kBitsPerCellLog2 = 5; | 140 static const uint32_t kBitsPerCellLog2 = 5; |
148 static const uint32_t kBitIndexMask = kBitsPerCell - 1; | 141 static const uint32_t kBitIndexMask = kBitsPerCell - 1; |
149 static const uint32_t kBytesPerCell = kBitsPerCell / kBitsPerByte; | 142 static const uint32_t kBytesPerCell = kBitsPerCell / kBitsPerByte; |
(...skipping 30 matching lines...) Expand all Loading... |
180 INLINE(MarkBit::CellType* cells()) { | 173 INLINE(MarkBit::CellType* cells()) { |
181 return reinterpret_cast<MarkBit::CellType*>(this); | 174 return reinterpret_cast<MarkBit::CellType*>(this); |
182 } | 175 } |
183 | 176 |
184 INLINE(Address address()) { return reinterpret_cast<Address>(this); } | 177 INLINE(Address address()) { return reinterpret_cast<Address>(this); } |
185 | 178 |
186 INLINE(static Bitmap* FromAddress(Address addr)) { | 179 INLINE(static Bitmap* FromAddress(Address addr)) { |
187 return reinterpret_cast<Bitmap*>(addr); | 180 return reinterpret_cast<Bitmap*>(addr); |
188 } | 181 } |
189 | 182 |
190 inline MarkBit MarkBitFromIndex(uint32_t index, bool data_only = false) { | 183 inline MarkBit MarkBitFromIndex(uint32_t index) { |
191 MarkBit::CellType mask = 1 << (index & kBitIndexMask); | 184 MarkBit::CellType mask = 1 << (index & kBitIndexMask); |
192 MarkBit::CellType* cell = this->cells() + (index >> kBitsPerCellLog2); | 185 MarkBit::CellType* cell = this->cells() + (index >> kBitsPerCellLog2); |
193 return MarkBit(cell, mask, data_only); | 186 return MarkBit(cell, mask); |
194 } | 187 } |
195 | 188 |
196 static inline void Clear(MemoryChunk* chunk); | 189 static inline void Clear(MemoryChunk* chunk); |
197 | 190 |
198 static void PrintWord(uint32_t word, uint32_t himask = 0) { | 191 static void PrintWord(uint32_t word, uint32_t himask = 0) { |
199 for (uint32_t mask = 1; mask != 0; mask <<= 1) { | 192 for (uint32_t mask = 1; mask != 0; mask <<= 1) { |
200 if ((mask & himask) != 0) PrintF("["); | 193 if ((mask & himask) != 0) PrintF("["); |
201 PrintF((mask & word) ? "1" : "0"); | 194 PrintF((mask & word) ? "1" : "0"); |
202 if ((mask & himask) != 0) PrintF("]"); | 195 if ((mask & himask) != 0) PrintF("]"); |
203 } | 196 } |
(...skipping 159 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
363 | 356 |
364 enum MemoryChunkFlags { | 357 enum MemoryChunkFlags { |
365 IS_EXECUTABLE, | 358 IS_EXECUTABLE, |
366 ABOUT_TO_BE_FREED, | 359 ABOUT_TO_BE_FREED, |
367 POINTERS_TO_HERE_ARE_INTERESTING, | 360 POINTERS_TO_HERE_ARE_INTERESTING, |
368 POINTERS_FROM_HERE_ARE_INTERESTING, | 361 POINTERS_FROM_HERE_ARE_INTERESTING, |
369 SCAN_ON_SCAVENGE, | 362 SCAN_ON_SCAVENGE, |
370 IN_FROM_SPACE, // Mutually exclusive with IN_TO_SPACE. | 363 IN_FROM_SPACE, // Mutually exclusive with IN_TO_SPACE. |
371 IN_TO_SPACE, // All pages in new space has one of these two set. | 364 IN_TO_SPACE, // All pages in new space has one of these two set. |
372 NEW_SPACE_BELOW_AGE_MARK, | 365 NEW_SPACE_BELOW_AGE_MARK, |
373 CONTAINS_ONLY_DATA, | |
374 EVACUATION_CANDIDATE, | 366 EVACUATION_CANDIDATE, |
375 RESCAN_ON_EVACUATION, | 367 RESCAN_ON_EVACUATION, |
376 NEVER_EVACUATE, // May contain immortal immutables. | 368 NEVER_EVACUATE, // May contain immortal immutables. |
377 | 369 |
378 // WAS_SWEPT indicates that marking bits have been cleared by the sweeper, | 370 // WAS_SWEPT indicates that marking bits have been cleared by the sweeper, |
379 // otherwise marking bits are still intact. | 371 // otherwise marking bits are still intact. |
380 WAS_SWEPT, | 372 WAS_SWEPT, |
381 | 373 |
382 // Large objects can have a progress bar in their page header. These object | 374 // Large objects can have a progress bar in their page header. These object |
383 // are scanned in increments and will be kept black while being scanned. | 375 // are scanned in increments and will be kept black while being scanned. |
(...skipping 180 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
564 | 556 |
565 void SetArea(Address area_start, Address area_end) { | 557 void SetArea(Address area_start, Address area_end) { |
566 area_start_ = area_start; | 558 area_start_ = area_start; |
567 area_end_ = area_end; | 559 area_end_ = area_end; |
568 } | 560 } |
569 | 561 |
570 Executability executable() { | 562 Executability executable() { |
571 return IsFlagSet(IS_EXECUTABLE) ? EXECUTABLE : NOT_EXECUTABLE; | 563 return IsFlagSet(IS_EXECUTABLE) ? EXECUTABLE : NOT_EXECUTABLE; |
572 } | 564 } |
573 | 565 |
574 bool ContainsOnlyData() { return IsFlagSet(CONTAINS_ONLY_DATA); } | |
575 | |
576 bool InNewSpace() { | 566 bool InNewSpace() { |
577 return (flags_ & ((1 << IN_FROM_SPACE) | (1 << IN_TO_SPACE))) != 0; | 567 return (flags_ & ((1 << IN_FROM_SPACE) | (1 << IN_TO_SPACE))) != 0; |
578 } | 568 } |
579 | 569 |
580 bool InToSpace() { return IsFlagSet(IN_TO_SPACE); } | 570 bool InToSpace() { return IsFlagSet(IN_TO_SPACE); } |
581 | 571 |
582 bool InFromSpace() { return IsFlagSet(IN_FROM_SPACE); } | 572 bool InFromSpace() { return IsFlagSet(IN_FROM_SPACE); } |
583 | 573 |
584 // --------------------------------------------------------------------- | 574 // --------------------------------------------------------------------- |
585 // Markbits support | 575 // Markbits support |
(...skipping 2010 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
2596 MUST_USE_RESULT AllocationResult SlowAllocateRaw(int size_in_bytes); | 2586 MUST_USE_RESULT AllocationResult SlowAllocateRaw(int size_in_bytes); |
2597 | 2587 |
2598 friend class SemiSpaceIterator; | 2588 friend class SemiSpaceIterator; |
2599 | 2589 |
2600 public: | 2590 public: |
2601 TRACK_MEMORY("NewSpace") | 2591 TRACK_MEMORY("NewSpace") |
2602 }; | 2592 }; |
2603 | 2593 |
2604 | 2594 |
2605 // ----------------------------------------------------------------------------- | 2595 // ----------------------------------------------------------------------------- |
2606 // Old object space (excluding map objects) | 2596 // Old object space (includes the old space of objects and code space) |
2607 | 2597 |
2608 class OldSpace : public PagedSpace { | 2598 class OldSpace : public PagedSpace { |
2609 public: | 2599 public: |
2610 // Creates an old space object with a given maximum capacity. | 2600 // Creates an old space object with a given maximum capacity. |
2611 // The constructor does not allocate pages from OS. | 2601 // The constructor does not allocate pages from OS. |
2612 OldSpace(Heap* heap, intptr_t max_capacity, AllocationSpace id, | 2602 OldSpace(Heap* heap, intptr_t max_capacity, AllocationSpace id, |
2613 Executability executable) | 2603 Executability executable) |
2614 : PagedSpace(heap, max_capacity, id, executable) {} | 2604 : PagedSpace(heap, max_capacity, id, executable) {} |
2615 | 2605 |
2616 public: | 2606 public: |
(...skipping 188 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
2805 | 2795 |
2806 // Iterates over the chunks (pages and large object pages) that can contain | 2796 // Iterates over the chunks (pages and large object pages) that can contain |
2807 // pointers to new space. | 2797 // pointers to new space. |
2808 class PointerChunkIterator BASE_EMBEDDED { | 2798 class PointerChunkIterator BASE_EMBEDDED { |
2809 public: | 2799 public: |
2810 inline explicit PointerChunkIterator(Heap* heap); | 2800 inline explicit PointerChunkIterator(Heap* heap); |
2811 | 2801 |
2812 // Return NULL when the iterator is done. | 2802 // Return NULL when the iterator is done. |
2813 MemoryChunk* next() { | 2803 MemoryChunk* next() { |
2814 switch (state_) { | 2804 switch (state_) { |
2815 case kOldPointerState: { | 2805 case kOldSpaceState: { |
2816 if (old_pointer_iterator_.has_next()) { | 2806 if (old_iterator_.has_next()) { |
2817 return old_pointer_iterator_.next(); | 2807 return old_iterator_.next(); |
2818 } | 2808 } |
2819 state_ = kMapState; | 2809 state_ = kMapState; |
2820 // Fall through. | 2810 // Fall through. |
2821 } | 2811 } |
2822 case kMapState: { | 2812 case kMapState: { |
2823 if (map_iterator_.has_next()) { | 2813 if (map_iterator_.has_next()) { |
2824 return map_iterator_.next(); | 2814 return map_iterator_.next(); |
2825 } | 2815 } |
2826 state_ = kLargeObjectState; | 2816 state_ = kLargeObjectState; |
2827 // Fall through. | 2817 // Fall through. |
(...skipping 16 matching lines...) Expand all Loading... |
2844 return NULL; | 2834 return NULL; |
2845 default: | 2835 default: |
2846 break; | 2836 break; |
2847 } | 2837 } |
2848 UNREACHABLE(); | 2838 UNREACHABLE(); |
2849 return NULL; | 2839 return NULL; |
2850 } | 2840 } |
2851 | 2841 |
2852 | 2842 |
2853 private: | 2843 private: |
2854 enum State { kOldPointerState, kMapState, kLargeObjectState, kFinishedState }; | 2844 enum State { kOldSpaceState, kMapState, kLargeObjectState, kFinishedState }; |
2855 State state_; | 2845 State state_; |
2856 PageIterator old_pointer_iterator_; | 2846 PageIterator old_iterator_; |
2857 PageIterator map_iterator_; | 2847 PageIterator map_iterator_; |
2858 LargeObjectIterator lo_iterator_; | 2848 LargeObjectIterator lo_iterator_; |
2859 }; | 2849 }; |
2860 | 2850 |
2861 | 2851 |
2862 #ifdef DEBUG | 2852 #ifdef DEBUG |
2863 struct CommentStatistic { | 2853 struct CommentStatistic { |
2864 const char* comment; | 2854 const char* comment; |
2865 int size; | 2855 int size; |
2866 int count; | 2856 int count; |
2867 void Clear() { | 2857 void Clear() { |
2868 comment = NULL; | 2858 comment = NULL; |
2869 size = 0; | 2859 size = 0; |
2870 count = 0; | 2860 count = 0; |
2871 } | 2861 } |
2872 // Must be small, since an iteration is used for lookup. | 2862 // Must be small, since an iteration is used for lookup. |
2873 static const int kMaxComments = 64; | 2863 static const int kMaxComments = 64; |
2874 }; | 2864 }; |
2875 #endif | 2865 #endif |
2876 } | 2866 } |
2877 } // namespace v8::internal | 2867 } // namespace v8::internal |
2878 | 2868 |
2879 #endif // V8_HEAP_SPACES_H_ | 2869 #endif // V8_HEAP_SPACES_H_ |
OLD | NEW |