OLD | NEW |
1 // Copyright 2011 the V8 project authors. All rights reserved. | 1 // Copyright 2011 the V8 project authors. All rights reserved. |
2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
4 | 4 |
5 #ifndef V8_HEAP_SPACES_H_ | 5 #ifndef V8_HEAP_SPACES_H_ |
6 #define V8_HEAP_SPACES_H_ | 6 #define V8_HEAP_SPACES_H_ |
7 | 7 |
8 #include "src/allocation.h" | 8 #include "src/allocation.h" |
9 #include "src/atomic-utils.h" | 9 #include "src/atomic-utils.h" |
10 #include "src/base/atomicops.h" | 10 #include "src/base/atomicops.h" |
(...skipping 13 matching lines...) Expand all Loading... |
24 class CompactionSpaceCollection; | 24 class CompactionSpaceCollection; |
25 class FreeList; | 25 class FreeList; |
26 class InlineAllocationObserver; | 26 class InlineAllocationObserver; |
27 class Isolate; | 27 class Isolate; |
28 class MemoryAllocator; | 28 class MemoryAllocator; |
29 class MemoryChunk; | 29 class MemoryChunk; |
30 class PagedSpace; | 30 class PagedSpace; |
31 class SemiSpace; | 31 class SemiSpace; |
32 class SkipList; | 32 class SkipList; |
33 class SlotsBuffer; | 33 class SlotsBuffer; |
| 34 class SlotSet; |
34 class Space; | 35 class Space; |
35 | 36 |
36 // ----------------------------------------------------------------------------- | 37 // ----------------------------------------------------------------------------- |
37 // Heap structures: | 38 // Heap structures: |
38 // | 39 // |
39 // A JS heap consists of a young generation, an old generation, and a large | 40 // A JS heap consists of a young generation, an old generation, and a large |
40 // object space. The young generation is divided into two semispaces. A | 41 // object space. The young generation is divided into two semispaces. A |
41 // scavenger implements Cheney's copying algorithm. The old generation is | 42 // scavenger implements Cheney's copying algorithm. The old generation is |
42 // separated into a map space and an old object space. The map space contains | 43 // separated into a map space and an old object space. The map space contains |
43 // all (and only) map objects, the rest of old objects go into the old space. | 44 // all (and only) map objects, the rest of old objects go into the old space. |
(...skipping 245 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
289 | 290 |
290 | 291 |
291 // MemoryChunk represents a memory region owned by a specific space. | 292 // MemoryChunk represents a memory region owned by a specific space. |
292 // It is divided into the header and the body. Chunk start is always | 293 // It is divided into the header and the body. Chunk start is always |
293 // 1MB aligned. Start of the body is aligned so it can accommodate | 294 // 1MB aligned. Start of the body is aligned so it can accommodate |
294 // any heap object. | 295 // any heap object. |
295 class MemoryChunk { | 296 class MemoryChunk { |
296 public: | 297 public: |
297 enum MemoryChunkFlags { | 298 enum MemoryChunkFlags { |
298 IS_EXECUTABLE, | 299 IS_EXECUTABLE, |
299 ABOUT_TO_BE_FREED, | |
300 POINTERS_TO_HERE_ARE_INTERESTING, | 300 POINTERS_TO_HERE_ARE_INTERESTING, |
301 POINTERS_FROM_HERE_ARE_INTERESTING, | 301 POINTERS_FROM_HERE_ARE_INTERESTING, |
302 SCAN_ON_SCAVENGE, | |
303 IN_FROM_SPACE, // Mutually exclusive with IN_TO_SPACE. | 302 IN_FROM_SPACE, // Mutually exclusive with IN_TO_SPACE. |
304 IN_TO_SPACE, // All pages in new space has one of these two set. | 303 IN_TO_SPACE, // All pages in new space has one of these two set. |
305 NEW_SPACE_BELOW_AGE_MARK, | 304 NEW_SPACE_BELOW_AGE_MARK, |
306 EVACUATION_CANDIDATE, | 305 EVACUATION_CANDIDATE, |
307 RESCAN_ON_EVACUATION, | 306 RESCAN_ON_EVACUATION, |
308 NEVER_EVACUATE, // May contain immortal immutables. | 307 NEVER_EVACUATE, // May contain immortal immutables. |
309 POPULAR_PAGE, // Slots buffer of this page overflowed on the previous GC. | 308 POPULAR_PAGE, // Slots buffer of this page overflowed on the previous GC. |
310 | 309 |
311 // Large objects can have a progress bar in their page header. These object | 310 // Large objects can have a progress bar in their page header. These object |
312 // are scanned in increments and will be kept black while being scanned. | 311 // are scanned in increments and will be kept black while being scanned. |
(...skipping 78 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
391 + 2 * kPointerSize // base::VirtualMemory reservation_ | 390 + 2 * kPointerSize // base::VirtualMemory reservation_ |
392 + kPointerSize // Address owner_ | 391 + kPointerSize // Address owner_ |
393 + kPointerSize // Heap* heap_ | 392 + kPointerSize // Heap* heap_ |
394 + kIntSize; // int progress_bar_ | 393 + kIntSize; // int progress_bar_ |
395 | 394 |
396 static const size_t kSlotsBufferOffset = | 395 static const size_t kSlotsBufferOffset = |
397 kLiveBytesOffset + kIntSize; // int live_byte_count_ | 396 kLiveBytesOffset + kIntSize; // int live_byte_count_ |
398 | 397 |
399 static const size_t kWriteBarrierCounterOffset = | 398 static const size_t kWriteBarrierCounterOffset = |
400 kSlotsBufferOffset + kPointerSize // SlotsBuffer* slots_buffer_; | 399 kSlotsBufferOffset + kPointerSize // SlotsBuffer* slots_buffer_; |
| 400 + kPointerSize // SlotSet* old_to_new_slots_; |
401 + kPointerSize; // SkipList* skip_list_; | 401 + kPointerSize; // SkipList* skip_list_; |
402 | 402 |
403 static const size_t kMinHeaderSize = | 403 static const size_t kMinHeaderSize = |
404 kWriteBarrierCounterOffset + | 404 kWriteBarrierCounterOffset + |
405 kIntptrSize // intptr_t write_barrier_counter_ | 405 kIntptrSize // intptr_t write_barrier_counter_ |
406 + kPointerSize // AtomicValue high_water_mark_ | 406 + kPointerSize // AtomicValue high_water_mark_ |
407 + kPointerSize // base::Mutex* mutex_ | 407 + kPointerSize // base::Mutex* mutex_ |
408 + kPointerSize // base::AtomicWord parallel_sweeping_ | 408 + kPointerSize // base::AtomicWord parallel_sweeping_ |
409 + kPointerSize // AtomicValue parallel_compaction_ | 409 + kPointerSize // AtomicValue parallel_compaction_ |
410 + 5 * kPointerSize // AtomicNumber free-list statistics | 410 + 5 * kPointerSize // AtomicNumber free-list statistics |
(...skipping 85 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
496 kPageHeaderTag); | 496 kPageHeaderTag); |
497 } | 497 } |
498 | 498 |
499 base::VirtualMemory* reserved_memory() { return &reservation_; } | 499 base::VirtualMemory* reserved_memory() { return &reservation_; } |
500 | 500 |
501 void set_reserved_memory(base::VirtualMemory* reservation) { | 501 void set_reserved_memory(base::VirtualMemory* reservation) { |
502 DCHECK_NOT_NULL(reservation); | 502 DCHECK_NOT_NULL(reservation); |
503 reservation_.TakeControl(reservation); | 503 reservation_.TakeControl(reservation); |
504 } | 504 } |
505 | 505 |
506 bool scan_on_scavenge() { return IsFlagSet(SCAN_ON_SCAVENGE); } | |
507 void initialize_scan_on_scavenge(bool scan) { | |
508 if (scan) { | |
509 SetFlag(SCAN_ON_SCAVENGE); | |
510 } else { | |
511 ClearFlag(SCAN_ON_SCAVENGE); | |
512 } | |
513 } | |
514 inline void set_scan_on_scavenge(bool scan); | |
515 | |
516 bool Contains(Address addr) { | 506 bool Contains(Address addr) { |
517 return addr >= area_start() && addr < area_end(); | 507 return addr >= area_start() && addr < area_end(); |
518 } | 508 } |
519 | 509 |
520 // Checks whether addr can be a limit of addresses in this page. | 510 // Checks whether addr can be a limit of addresses in this page. |
521 // It's a limit if it's in the page, or if it's just after the | 511 // It's a limit if it's in the page, or if it's just after the |
522 // last byte of the page. | 512 // last byte of the page. |
523 bool ContainsLimit(Address addr) { | 513 bool ContainsLimit(Address addr) { |
524 return addr >= area_start() && addr <= area_end(); | 514 return addr >= area_start() && addr <= area_end(); |
525 } | 515 } |
(...skipping 153 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
679 } | 669 } |
680 | 670 |
681 inline SkipList* skip_list() { return skip_list_; } | 671 inline SkipList* skip_list() { return skip_list_; } |
682 | 672 |
683 inline void set_skip_list(SkipList* skip_list) { skip_list_ = skip_list; } | 673 inline void set_skip_list(SkipList* skip_list) { skip_list_ = skip_list; } |
684 | 674 |
685 inline SlotsBuffer* slots_buffer() { return slots_buffer_; } | 675 inline SlotsBuffer* slots_buffer() { return slots_buffer_; } |
686 | 676 |
687 inline SlotsBuffer** slots_buffer_address() { return &slots_buffer_; } | 677 inline SlotsBuffer** slots_buffer_address() { return &slots_buffer_; } |
688 | 678 |
| 679 inline SlotSet* old_to_new_slots() { return old_to_new_slots_; } |
| 680 |
| 681 void AllocateOldToNewSlots(); |
| 682 void ReleaseOldToNewSlots(); |
| 683 |
689 void MarkEvacuationCandidate() { | 684 void MarkEvacuationCandidate() { |
690 DCHECK(!IsFlagSet(NEVER_EVACUATE)); | 685 DCHECK(!IsFlagSet(NEVER_EVACUATE)); |
691 DCHECK(slots_buffer_ == NULL); | 686 DCHECK(slots_buffer_ == NULL); |
692 SetFlag(EVACUATION_CANDIDATE); | 687 SetFlag(EVACUATION_CANDIDATE); |
693 } | 688 } |
694 | 689 |
695 void ClearEvacuationCandidate() { | 690 void ClearEvacuationCandidate() { |
696 DCHECK(slots_buffer_ == NULL); | 691 DCHECK(slots_buffer_ == NULL); |
697 ClearFlag(EVACUATION_CANDIDATE); | 692 ClearFlag(EVACUATION_CANDIDATE); |
698 } | 693 } |
(...skipping 27 matching lines...) Expand all Loading... |
726 // no failure can be in an object, so this can be distinguished from any entry | 721 // no failure can be in an object, so this can be distinguished from any entry |
727 // in a fixed array. | 722 // in a fixed array. |
728 Address owner_; | 723 Address owner_; |
729 Heap* heap_; | 724 Heap* heap_; |
730 // Used by the incremental marker to keep track of the scanning progress in | 725 // Used by the incremental marker to keep track of the scanning progress in |
731 // large objects that have a progress bar and are scanned in increments. | 726 // large objects that have a progress bar and are scanned in increments. |
732 int progress_bar_; | 727 int progress_bar_; |
733 // Count of bytes marked black on page. | 728 // Count of bytes marked black on page. |
734 int live_byte_count_; | 729 int live_byte_count_; |
735 SlotsBuffer* slots_buffer_; | 730 SlotsBuffer* slots_buffer_; |
| 731 // A single slot set for small pages (of size kPageSize) or an array of slot |
| 732 // set for large pages. In the latter case the number of entries in the array |
| 733 // is ceil(size() / kPageSize). |
| 734 SlotSet* old_to_new_slots_; |
736 SkipList* skip_list_; | 735 SkipList* skip_list_; |
737 intptr_t write_barrier_counter_; | 736 intptr_t write_barrier_counter_; |
738 // Assuming the initial allocation on a page is sequential, | 737 // Assuming the initial allocation on a page is sequential, |
739 // count highest number of bytes ever allocated on the page. | 738 // count highest number of bytes ever allocated on the page. |
740 AtomicValue<intptr_t> high_water_mark_; | 739 AtomicValue<intptr_t> high_water_mark_; |
741 | 740 |
742 base::Mutex* mutex_; | 741 base::Mutex* mutex_; |
743 AtomicValue<ConcurrentSweepingState> concurrent_sweeping_; | 742 AtomicValue<ConcurrentSweepingState> concurrent_sweeping_; |
744 AtomicValue<ParallelCompactingState> parallel_compaction_; | 743 AtomicValue<ParallelCompactingState> parallel_compaction_; |
745 | 744 |
(...skipping 1485 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
2231 | 2230 |
2232 enum SemiSpaceId { kFromSpace = 0, kToSpace = 1 }; | 2231 enum SemiSpaceId { kFromSpace = 0, kToSpace = 1 }; |
2233 | 2232 |
2234 | 2233 |
2235 class NewSpacePage : public MemoryChunk { | 2234 class NewSpacePage : public MemoryChunk { |
2236 public: | 2235 public: |
2237 // GC related flags copied from from-space to to-space when | 2236 // GC related flags copied from from-space to to-space when |
2238 // flipping semispaces. | 2237 // flipping semispaces. |
2239 static const intptr_t kCopyOnFlipFlagsMask = | 2238 static const intptr_t kCopyOnFlipFlagsMask = |
2240 (1 << MemoryChunk::POINTERS_TO_HERE_ARE_INTERESTING) | | 2239 (1 << MemoryChunk::POINTERS_TO_HERE_ARE_INTERESTING) | |
2241 (1 << MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING) | | 2240 (1 << MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING); |
2242 (1 << MemoryChunk::SCAN_ON_SCAVENGE); | |
2243 | 2241 |
2244 static const int kAreaSize = Page::kAllocatableMemory; | 2242 static const int kAreaSize = Page::kAllocatableMemory; |
2245 | 2243 |
2246 inline NewSpacePage* next_page() { | 2244 inline NewSpacePage* next_page() { |
2247 return static_cast<NewSpacePage*>(next_chunk()); | 2245 return static_cast<NewSpacePage*>(next_chunk()); |
2248 } | 2246 } |
2249 | 2247 |
2250 inline void set_next_page(NewSpacePage* page) { set_next_chunk(page); } | 2248 inline void set_next_page(NewSpacePage* page) { set_next_chunk(page); } |
2251 | 2249 |
2252 inline NewSpacePage* prev_page() { | 2250 inline NewSpacePage* prev_page() { |
(...skipping 875 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
3128 count = 0; | 3126 count = 0; |
3129 } | 3127 } |
3130 // Must be small, since an iteration is used for lookup. | 3128 // Must be small, since an iteration is used for lookup. |
3131 static const int kMaxComments = 64; | 3129 static const int kMaxComments = 64; |
3132 }; | 3130 }; |
3133 #endif | 3131 #endif |
3134 } // namespace internal | 3132 } // namespace internal |
3135 } // namespace v8 | 3133 } // namespace v8 |
3136 | 3134 |
3137 #endif // V8_HEAP_SPACES_H_ | 3135 #endif // V8_HEAP_SPACES_H_ |
OLD | NEW |