| OLD | NEW |
| 1 // Copyright 2011 the V8 project authors. All rights reserved. | 1 // Copyright 2011 the V8 project authors. All rights reserved. |
| 2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
| 3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
| 4 | 4 |
| 5 #ifndef V8_HEAP_SPACES_H_ | 5 #ifndef V8_HEAP_SPACES_H_ |
| 6 #define V8_HEAP_SPACES_H_ | 6 #define V8_HEAP_SPACES_H_ |
| 7 | 7 |
| 8 #include "src/allocation.h" | 8 #include "src/allocation.h" |
| 9 #include "src/atomic-utils.h" | 9 #include "src/atomic-utils.h" |
| 10 #include "src/base/atomicops.h" | 10 #include "src/base/atomicops.h" |
| (...skipping 14 matching lines...) Expand all Loading... |
| 25 class CompactionSpaceCollection; | 25 class CompactionSpaceCollection; |
| 26 class FreeList; | 26 class FreeList; |
| 27 class Isolate; | 27 class Isolate; |
| 28 class MemoryAllocator; | 28 class MemoryAllocator; |
| 29 class MemoryChunk; | 29 class MemoryChunk; |
| 30 class PagedSpace; | 30 class PagedSpace; |
| 31 class SemiSpace; | 31 class SemiSpace; |
| 32 class SkipList; | 32 class SkipList; |
| 33 class SlotsBuffer; | 33 class SlotsBuffer; |
| 34 class SlotSet; | 34 class SlotSet; |
| 35 class TypedSlotSet; | |
| 36 class Space; | 35 class Space; |
| 37 | 36 |
| 38 // ----------------------------------------------------------------------------- | 37 // ----------------------------------------------------------------------------- |
| 39 // Heap structures: | 38 // Heap structures: |
| 40 // | 39 // |
| 41 // A JS heap consists of a young generation, an old generation, and a large | 40 // A JS heap consists of a young generation, an old generation, and a large |
| 42 // object space. The young generation is divided into two semispaces. A | 41 // object space. The young generation is divided into two semispaces. A |
| 43 // scavenger implements Cheney's copying algorithm. The old generation is | 42 // scavenger implements Cheney's copying algorithm. The old generation is |
| 44 // separated into a map space and an old object space. The map space contains | 43 // separated into a map space and an old object space. The map space contains |
| 45 // all (and only) map objects, the rest of old objects go into the old space. | 44 // all (and only) map objects, the rest of old objects go into the old space. |
| (...skipping 340 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 386 static const intptr_t kLiveBytesOffset = | 385 static const intptr_t kLiveBytesOffset = |
| 387 kSizeOffset + kPointerSize // size_t size | 386 kSizeOffset + kPointerSize // size_t size |
| 388 + kIntptrSize // intptr_t flags_ | 387 + kIntptrSize // intptr_t flags_ |
| 389 + kPointerSize // Address area_start_ | 388 + kPointerSize // Address area_start_ |
| 390 + kPointerSize // Address area_end_ | 389 + kPointerSize // Address area_end_ |
| 391 + 2 * kPointerSize // base::VirtualMemory reservation_ | 390 + 2 * kPointerSize // base::VirtualMemory reservation_ |
| 392 + kPointerSize // Address owner_ | 391 + kPointerSize // Address owner_ |
| 393 + kPointerSize // Heap* heap_ | 392 + kPointerSize // Heap* heap_ |
| 394 + kIntSize; // int progress_bar_ | 393 + kIntSize; // int progress_bar_ |
| 395 | 394 |
| 396 static const size_t kOldToNewSlotsOffset = | 395 static const size_t kSlotsBufferOffset = |
| 397 kLiveBytesOffset + kIntSize; // int live_byte_count_ | 396 kLiveBytesOffset + kIntSize; // int live_byte_count_ |
| 398 | 397 |
| 399 static const size_t kWriteBarrierCounterOffset = | 398 static const size_t kWriteBarrierCounterOffset = |
| 400 kOldToNewSlotsOffset + kPointerSize // SlotSet* old_to_new_slots_; | 399 kSlotsBufferOffset + kPointerSize // SlotsBuffer* slots_buffer_; |
| 401 + kPointerSize // SlotSet* old_to_old_slots_; | 400 + kPointerSize // SlotSet* old_to_new_slots_; |
| 402 + kPointerSize // TypedSlotSet* typed_old_to_old_slots_; | 401 + kPointerSize // SlotSet* old_to_old_slots_; |
| 403 + kPointerSize; // SkipList* skip_list_; | 402 + kPointerSize; // SkipList* skip_list_; |
| 404 | 403 |
| 405 static const size_t kMinHeaderSize = | 404 static const size_t kMinHeaderSize = |
| 406 kWriteBarrierCounterOffset + | 405 kWriteBarrierCounterOffset + |
| 407 kIntptrSize // intptr_t write_barrier_counter_ | 406 kIntptrSize // intptr_t write_barrier_counter_ |
| 408 + kPointerSize // AtomicValue high_water_mark_ | 407 + kPointerSize // AtomicValue high_water_mark_ |
| 409 + kPointerSize // base::Mutex* mutex_ | 408 + kPointerSize // base::Mutex* mutex_ |
| 410 + kPointerSize // base::AtomicWord parallel_sweeping_ | 409 + kPointerSize // base::AtomicWord parallel_sweeping_ |
| 411 + kPointerSize // AtomicValue parallel_compaction_ | 410 + kPointerSize // AtomicValue parallel_compaction_ |
| 412 + 2 * kPointerSize // AtomicNumber free-list statistics | 411 + 2 * kPointerSize // AtomicNumber free-list statistics |
| 413 + kPointerSize // AtomicValue next_chunk_ | 412 + kPointerSize // AtomicValue next_chunk_ |
| (...skipping 89 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 503 } | 502 } |
| 504 | 503 |
| 505 size_t size() const { return size_; } | 504 size_t size() const { return size_; } |
| 506 | 505 |
| 507 inline Heap* heap() const { return heap_; } | 506 inline Heap* heap() const { return heap_; } |
| 508 | 507 |
| 509 inline SkipList* skip_list() { return skip_list_; } | 508 inline SkipList* skip_list() { return skip_list_; } |
| 510 | 509 |
| 511 inline void set_skip_list(SkipList* skip_list) { skip_list_ = skip_list; } | 510 inline void set_skip_list(SkipList* skip_list) { skip_list_ = skip_list; } |
| 512 | 511 |
| 512 inline SlotsBuffer* slots_buffer() { return slots_buffer_; } |
| 513 |
| 514 inline SlotsBuffer** slots_buffer_address() { return &slots_buffer_; } |
| 515 |
| 513 inline SlotSet* old_to_new_slots() { return old_to_new_slots_; } | 516 inline SlotSet* old_to_new_slots() { return old_to_new_slots_; } |
| 514 inline SlotSet* old_to_old_slots() { return old_to_old_slots_; } | 517 inline SlotSet* old_to_old_slots() { return old_to_old_slots_; } |
| 515 inline TypedSlotSet* typed_old_to_old_slots() { | |
| 516 return typed_old_to_old_slots_; | |
| 517 } | |
| 518 | 518 |
| 519 void AllocateOldToNewSlots(); | 519 void AllocateOldToNewSlots(); |
| 520 void ReleaseOldToNewSlots(); | 520 void ReleaseOldToNewSlots(); |
| 521 void AllocateOldToOldSlots(); | 521 void AllocateOldToOldSlots(); |
| 522 void ReleaseOldToOldSlots(); | 522 void ReleaseOldToOldSlots(); |
| 523 void AllocateTypedOldToOldSlots(); | |
| 524 void ReleaseTypedOldToOldSlots(); | |
| 525 | 523 |
| 526 Address area_start() { return area_start_; } | 524 Address area_start() { return area_start_; } |
| 527 Address area_end() { return area_end_; } | 525 Address area_end() { return area_end_; } |
| 528 int area_size() { return static_cast<int>(area_end() - area_start()); } | 526 int area_size() { return static_cast<int>(area_end() - area_start()); } |
| 529 | 527 |
| 530 bool CommitArea(size_t requested); | 528 bool CommitArea(size_t requested); |
| 531 | 529 |
| 532 // Approximate amount of physical memory committed for this chunk. | 530 // Approximate amount of physical memory committed for this chunk. |
| 533 size_t CommittedPhysicalMemory() { return high_water_mark_.Value(); } | 531 size_t CommittedPhysicalMemory() { return high_water_mark_.Value(); } |
| 534 | 532 |
| (...skipping 53 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 588 DCHECK(!(IsFlagSet(NEVER_EVACUATE) && IsFlagSet(EVACUATION_CANDIDATE))); | 586 DCHECK(!(IsFlagSet(NEVER_EVACUATE) && IsFlagSet(EVACUATION_CANDIDATE))); |
| 589 return IsFlagSet(EVACUATION_CANDIDATE); | 587 return IsFlagSet(EVACUATION_CANDIDATE); |
| 590 } | 588 } |
| 591 | 589 |
| 592 bool CanAllocate() { | 590 bool CanAllocate() { |
| 593 return !IsEvacuationCandidate() && !IsFlagSet(NEVER_ALLOCATE_ON_PAGE); | 591 return !IsEvacuationCandidate() && !IsFlagSet(NEVER_ALLOCATE_ON_PAGE); |
| 594 } | 592 } |
| 595 | 593 |
| 596 void MarkEvacuationCandidate() { | 594 void MarkEvacuationCandidate() { |
| 597 DCHECK(!IsFlagSet(NEVER_EVACUATE)); | 595 DCHECK(!IsFlagSet(NEVER_EVACUATE)); |
| 598 DCHECK_NULL(old_to_old_slots_); | 596 DCHECK_NULL(slots_buffer_); |
| 599 DCHECK_NULL(typed_old_to_old_slots_); | |
| 600 SetFlag(EVACUATION_CANDIDATE); | 597 SetFlag(EVACUATION_CANDIDATE); |
| 601 } | 598 } |
| 602 | 599 |
| 603 void ClearEvacuationCandidate() { | 600 void ClearEvacuationCandidate() { |
| 604 DCHECK_NULL(old_to_old_slots_); | 601 DCHECK(slots_buffer_ == NULL); |
| 605 DCHECK_NULL(typed_old_to_old_slots_); | |
| 606 ClearFlag(EVACUATION_CANDIDATE); | 602 ClearFlag(EVACUATION_CANDIDATE); |
| 607 } | 603 } |
| 608 | 604 |
| 609 bool ShouldSkipEvacuationSlotRecording() { | 605 bool ShouldSkipEvacuationSlotRecording() { |
| 610 return (flags_ & kSkipEvacuationSlotsRecordingMask) != 0; | 606 return (flags_ & kSkipEvacuationSlotsRecordingMask) != 0; |
| 611 } | 607 } |
| 612 | 608 |
| 613 Executability executable() { | 609 Executability executable() { |
| 614 return IsFlagSet(IS_EXECUTABLE) ? EXECUTABLE : NOT_EXECUTABLE; | 610 return IsFlagSet(IS_EXECUTABLE) ? EXECUTABLE : NOT_EXECUTABLE; |
| 615 } | 611 } |
| (...skipping 64 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 680 | 676 |
| 681 Heap* heap_; | 677 Heap* heap_; |
| 682 | 678 |
| 683 // Used by the incremental marker to keep track of the scanning progress in | 679 // Used by the incremental marker to keep track of the scanning progress in |
| 684 // large objects that have a progress bar and are scanned in increments. | 680 // large objects that have a progress bar and are scanned in increments. |
| 685 int progress_bar_; | 681 int progress_bar_; |
| 686 | 682 |
| 687 // Count of bytes marked black on page. | 683 // Count of bytes marked black on page. |
| 688 int live_byte_count_; | 684 int live_byte_count_; |
| 689 | 685 |
| 686 SlotsBuffer* slots_buffer_; |
| 687 |
| 690 // A single slot set for small pages (of size kPageSize) or an array of slot | 688 // A single slot set for small pages (of size kPageSize) or an array of slot |
| 691 // set for large pages. In the latter case the number of entries in the array | 689 // set for large pages. In the latter case the number of entries in the array |
| 692 // is ceil(size() / kPageSize). | 690 // is ceil(size() / kPageSize). |
| 693 SlotSet* old_to_new_slots_; | 691 SlotSet* old_to_new_slots_; |
| 694 SlotSet* old_to_old_slots_; | 692 SlotSet* old_to_old_slots_; |
| 695 TypedSlotSet* typed_old_to_old_slots_; | |
| 696 | 693 |
| 697 SkipList* skip_list_; | 694 SkipList* skip_list_; |
| 698 | 695 |
| 699 intptr_t write_barrier_counter_; | 696 intptr_t write_barrier_counter_; |
| 700 | 697 |
| 701 // Assuming the initial allocation on a page is sequential, | 698 // Assuming the initial allocation on a page is sequential, |
| 702 // count highest number of bytes ever allocated on the page. | 699 // count highest number of bytes ever allocated on the page. |
| 703 AtomicValue<intptr_t> high_water_mark_; | 700 AtomicValue<intptr_t> high_water_mark_; |
| 704 | 701 |
| 705 base::Mutex* mutex_; | 702 base::Mutex* mutex_; |
| (...skipping 152 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 858 class LargePage : public MemoryChunk { | 855 class LargePage : public MemoryChunk { |
| 859 public: | 856 public: |
| 860 HeapObject* GetObject() { return HeapObject::FromAddress(area_start()); } | 857 HeapObject* GetObject() { return HeapObject::FromAddress(area_start()); } |
| 861 | 858 |
| 862 inline LargePage* next_page() { | 859 inline LargePage* next_page() { |
| 863 return static_cast<LargePage*>(next_chunk()); | 860 return static_cast<LargePage*>(next_chunk()); |
| 864 } | 861 } |
| 865 | 862 |
| 866 inline void set_next_page(LargePage* page) { set_next_chunk(page); } | 863 inline void set_next_page(LargePage* page) { set_next_chunk(page); } |
| 867 | 864 |
| 868 // A limit to guarantee that we do not overflow typed slot offset in | |
| 869 // the old to old remembered set. | |
| 870 // Note that this limit is higher than what assembler already imposes on | |
| 871 // x64 and ia32 architectures. | |
| 872 static const int kMaxCodePageSize = 512 * MB; | |
| 873 | |
| 874 private: | 865 private: |
| 875 static inline LargePage* Initialize(Heap* heap, MemoryChunk* chunk); | 866 static inline LargePage* Initialize(Heap* heap, MemoryChunk* chunk); |
| 876 | 867 |
| 877 friend class MemoryAllocator; | 868 friend class MemoryAllocator; |
| 878 }; | 869 }; |
| 879 | 870 |
| 880 | 871 |
| 881 // ---------------------------------------------------------------------------- | 872 // ---------------------------------------------------------------------------- |
| 882 // Space is the abstract superclass for all allocation spaces. | 873 // Space is the abstract superclass for all allocation spaces. |
| 883 class Space : public Malloced { | 874 class Space : public Malloced { |
| (...skipping 95 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 979 intptr_t committed_; | 970 intptr_t committed_; |
| 980 intptr_t max_committed_; | 971 intptr_t max_committed_; |
| 981 }; | 972 }; |
| 982 | 973 |
| 983 | 974 |
| 984 class MemoryChunkValidator { | 975 class MemoryChunkValidator { |
| 985 // Computed offsets should match the compiler generated ones. | 976 // Computed offsets should match the compiler generated ones. |
| 986 STATIC_ASSERT(MemoryChunk::kSizeOffset == offsetof(MemoryChunk, size_)); | 977 STATIC_ASSERT(MemoryChunk::kSizeOffset == offsetof(MemoryChunk, size_)); |
| 987 STATIC_ASSERT(MemoryChunk::kLiveBytesOffset == | 978 STATIC_ASSERT(MemoryChunk::kLiveBytesOffset == |
| 988 offsetof(MemoryChunk, live_byte_count_)); | 979 offsetof(MemoryChunk, live_byte_count_)); |
| 989 STATIC_ASSERT(MemoryChunk::kOldToNewSlotsOffset == | 980 STATIC_ASSERT(MemoryChunk::kSlotsBufferOffset == |
| 990 offsetof(MemoryChunk, old_to_new_slots_)); | 981 offsetof(MemoryChunk, slots_buffer_)); |
| 991 STATIC_ASSERT(MemoryChunk::kWriteBarrierCounterOffset == | 982 STATIC_ASSERT(MemoryChunk::kWriteBarrierCounterOffset == |
| 992 offsetof(MemoryChunk, write_barrier_counter_)); | 983 offsetof(MemoryChunk, write_barrier_counter_)); |
| 993 | 984 |
| 994 // Validate our estimates on the header size. | 985 // Validate our estimates on the header size. |
| 995 STATIC_ASSERT(sizeof(MemoryChunk) <= MemoryChunk::kHeaderSize); | 986 STATIC_ASSERT(sizeof(MemoryChunk) <= MemoryChunk::kHeaderSize); |
| 996 STATIC_ASSERT(sizeof(LargePage) <= MemoryChunk::kHeaderSize); | 987 STATIC_ASSERT(sizeof(LargePage) <= MemoryChunk::kHeaderSize); |
| 997 STATIC_ASSERT(sizeof(Page) <= MemoryChunk::kHeaderSize); | 988 STATIC_ASSERT(sizeof(Page) <= MemoryChunk::kHeaderSize); |
| 998 }; | 989 }; |
| 999 | 990 |
| 1000 | 991 |
| (...skipping 1989 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 2990 | 2981 |
| 2991 HeapObject* Next(); | 2982 HeapObject* Next(); |
| 2992 | 2983 |
| 2993 // implementation of ObjectIterator. | 2984 // implementation of ObjectIterator. |
| 2994 virtual HeapObject* next_object() { return Next(); } | 2985 virtual HeapObject* next_object() { return Next(); } |
| 2995 | 2986 |
| 2996 private: | 2987 private: |
| 2997 LargePage* current_; | 2988 LargePage* current_; |
| 2998 }; | 2989 }; |
| 2999 | 2990 |
| 3000 class LargePageIterator BASE_EMBEDDED { | |
| 3001 public: | |
| 3002 explicit inline LargePageIterator(LargeObjectSpace* space); | |
| 3003 | |
| 3004 inline LargePage* next(); | |
| 3005 | |
| 3006 private: | |
| 3007 LargePage* next_page_; | |
| 3008 }; | |
| 3009 | 2991 |
| 3010 // Iterates over the chunks (pages and large object pages) that can contain | 2992 // Iterates over the chunks (pages and large object pages) that can contain |
| 3011 // pointers to new space or to evacuation candidates. | 2993 // pointers to new space. |
| 3012 class MemoryChunkIterator BASE_EMBEDDED { | 2994 class PointerChunkIterator BASE_EMBEDDED { |
| 3013 public: | 2995 public: |
| 3014 enum Mode { ALL, ALL_BUT_MAP_SPACE, ALL_BUT_CODE_SPACE }; | 2996 inline explicit PointerChunkIterator(Heap* heap); |
| 3015 inline explicit MemoryChunkIterator(Heap* heap, Mode mode); | |
| 3016 | 2997 |
| 3017 // Return NULL when the iterator is done. | 2998 // Return NULL when the iterator is done. |
| 3018 inline MemoryChunk* next(); | 2999 inline MemoryChunk* next(); |
| 3019 | 3000 |
| 3020 private: | 3001 private: |
| 3021 enum State { | 3002 enum State { kOldSpaceState, kMapState, kLargeObjectState, kFinishedState }; |
| 3022 kOldSpaceState, | |
| 3023 kMapState, | |
| 3024 kCodeState, | |
| 3025 kLargeObjectState, | |
| 3026 kFinishedState | |
| 3027 }; | |
| 3028 State state_; | 3003 State state_; |
| 3029 const Mode mode_; | |
| 3030 PageIterator old_iterator_; | 3004 PageIterator old_iterator_; |
| 3031 PageIterator code_iterator_; | |
| 3032 PageIterator map_iterator_; | 3005 PageIterator map_iterator_; |
| 3033 LargePageIterator lo_iterator_; | 3006 LargeObjectIterator lo_iterator_; |
| 3034 }; | 3007 }; |
| 3035 | 3008 |
| 3009 |
| 3036 #ifdef DEBUG | 3010 #ifdef DEBUG |
| 3037 struct CommentStatistic { | 3011 struct CommentStatistic { |
| 3038 const char* comment; | 3012 const char* comment; |
| 3039 int size; | 3013 int size; |
| 3040 int count; | 3014 int count; |
| 3041 void Clear() { | 3015 void Clear() { |
| 3042 comment = NULL; | 3016 comment = NULL; |
| 3043 size = 0; | 3017 size = 0; |
| 3044 count = 0; | 3018 count = 0; |
| 3045 } | 3019 } |
| 3046 // Must be small, since an iteration is used for lookup. | 3020 // Must be small, since an iteration is used for lookup. |
| 3047 static const int kMaxComments = 64; | 3021 static const int kMaxComments = 64; |
| 3048 }; | 3022 }; |
| 3049 #endif | 3023 #endif |
| 3050 } // namespace internal | 3024 } // namespace internal |
| 3051 } // namespace v8 | 3025 } // namespace v8 |
| 3052 | 3026 |
| 3053 #endif // V8_HEAP_SPACES_H_ | 3027 #endif // V8_HEAP_SPACES_H_ |
| OLD | NEW |