| OLD | NEW |
| 1 // Copyright 2011 the V8 project authors. All rights reserved. | 1 // Copyright 2011 the V8 project authors. All rights reserved. |
| 2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
| 3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
| 4 | 4 |
| 5 #ifndef V8_HEAP_SPACES_H_ | 5 #ifndef V8_HEAP_SPACES_H_ |
| 6 #define V8_HEAP_SPACES_H_ | 6 #define V8_HEAP_SPACES_H_ |
| 7 | 7 |
| 8 #include "src/allocation.h" | 8 #include "src/allocation.h" |
| 9 #include "src/atomic-utils.h" | 9 #include "src/atomic-utils.h" |
| 10 #include "src/base/atomicops.h" | 10 #include "src/base/atomicops.h" |
| (...skipping 14 matching lines...) Expand all Loading... |
| 25 class CompactionSpaceCollection; | 25 class CompactionSpaceCollection; |
| 26 class FreeList; | 26 class FreeList; |
| 27 class Isolate; | 27 class Isolate; |
| 28 class MemoryAllocator; | 28 class MemoryAllocator; |
| 29 class MemoryChunk; | 29 class MemoryChunk; |
| 30 class PagedSpace; | 30 class PagedSpace; |
| 31 class SemiSpace; | 31 class SemiSpace; |
| 32 class SkipList; | 32 class SkipList; |
| 33 class SlotsBuffer; | 33 class SlotsBuffer; |
| 34 class SlotSet; | 34 class SlotSet; |
| 35 class TypedSlotSet; |
| 35 class Space; | 36 class Space; |
| 36 | 37 |
| 37 // ----------------------------------------------------------------------------- | 38 // ----------------------------------------------------------------------------- |
| 38 // Heap structures: | 39 // Heap structures: |
| 39 // | 40 // |
| 40 // A JS heap consists of a young generation, an old generation, and a large | 41 // A JS heap consists of a young generation, an old generation, and a large |
| 41 // object space. The young generation is divided into two semispaces. A | 42 // object space. The young generation is divided into two semispaces. A |
| 42 // scavenger implements Cheney's copying algorithm. The old generation is | 43 // scavenger implements Cheney's copying algorithm. The old generation is |
| 43 // separated into a map space and an old object space. The map space contains | 44 // separated into a map space and an old object space. The map space contains |
| 44 // all (and only) map objects, the rest of old objects go into the old space. | 45 // all (and only) map objects, the rest of old objects go into the old space. |
| (...skipping 340 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 385 static const intptr_t kLiveBytesOffset = | 386 static const intptr_t kLiveBytesOffset = |
| 386 kSizeOffset + kPointerSize // size_t size | 387 kSizeOffset + kPointerSize // size_t size |
| 387 + kIntptrSize // intptr_t flags_ | 388 + kIntptrSize // intptr_t flags_ |
| 388 + kPointerSize // Address area_start_ | 389 + kPointerSize // Address area_start_ |
| 389 + kPointerSize // Address area_end_ | 390 + kPointerSize // Address area_end_ |
| 390 + 2 * kPointerSize // base::VirtualMemory reservation_ | 391 + 2 * kPointerSize // base::VirtualMemory reservation_ |
| 391 + kPointerSize // Address owner_ | 392 + kPointerSize // Address owner_ |
| 392 + kPointerSize // Heap* heap_ | 393 + kPointerSize // Heap* heap_ |
| 393 + kIntSize; // int progress_bar_ | 394 + kIntSize; // int progress_bar_ |
| 394 | 395 |
| 395 static const size_t kSlotsBufferOffset = | 396 static const size_t kOldToNewSlotsOffset = |
| 396 kLiveBytesOffset + kIntSize; // int live_byte_count_ | 397 kLiveBytesOffset + kIntSize; // int live_byte_count_ |
| 397 | 398 |
| 398 static const size_t kWriteBarrierCounterOffset = | 399 static const size_t kWriteBarrierCounterOffset = |
| 399 kSlotsBufferOffset + kPointerSize // SlotsBuffer* slots_buffer_; | 400 kOldToNewSlotsOffset + kPointerSize // SlotSet* old_to_new_slots_; |
| 400 + kPointerSize // SlotSet* old_to_new_slots_; | 401 + kPointerSize // SlotSet* old_to_old_slots_; |
| 401 + kPointerSize // SlotSet* old_to_old_slots_; | 402 + kPointerSize // TypedSlotSet* typed_old_to_old_slots_; |
| 402 + kPointerSize; // SkipList* skip_list_; | 403 + kPointerSize; // SkipList* skip_list_; |
| 403 | 404 |
| 404 static const size_t kMinHeaderSize = | 405 static const size_t kMinHeaderSize = |
| 405 kWriteBarrierCounterOffset + | 406 kWriteBarrierCounterOffset + |
| 406 kIntptrSize // intptr_t write_barrier_counter_ | 407 kIntptrSize // intptr_t write_barrier_counter_ |
| 407 + kPointerSize // AtomicValue high_water_mark_ | 408 + kPointerSize // AtomicValue high_water_mark_ |
| 408 + kPointerSize // base::Mutex* mutex_ | 409 + kPointerSize // base::Mutex* mutex_ |
| 409 + kPointerSize // base::AtomicWord parallel_sweeping_ | 410 + kPointerSize // base::AtomicWord parallel_sweeping_ |
| 410 + kPointerSize // AtomicValue parallel_compaction_ | 411 + kPointerSize // AtomicValue parallel_compaction_ |
| 411 + 2 * kPointerSize // AtomicNumber free-list statistics | 412 + 2 * kPointerSize // AtomicNumber free-list statistics |
| 412 + kPointerSize // AtomicValue next_chunk_ | 413 + kPointerSize // AtomicValue next_chunk_ |
| (...skipping 89 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 502 } | 503 } |
| 503 | 504 |
| 504 size_t size() const { return size_; } | 505 size_t size() const { return size_; } |
| 505 | 506 |
| 506 inline Heap* heap() const { return heap_; } | 507 inline Heap* heap() const { return heap_; } |
| 507 | 508 |
| 508 inline SkipList* skip_list() { return skip_list_; } | 509 inline SkipList* skip_list() { return skip_list_; } |
| 509 | 510 |
| 510 inline void set_skip_list(SkipList* skip_list) { skip_list_ = skip_list; } | 511 inline void set_skip_list(SkipList* skip_list) { skip_list_ = skip_list; } |
| 511 | 512 |
| 512 inline SlotsBuffer* slots_buffer() { return slots_buffer_; } | |
| 513 | |
| 514 inline SlotsBuffer** slots_buffer_address() { return &slots_buffer_; } | |
| 515 | |
| 516 inline SlotSet* old_to_new_slots() { return old_to_new_slots_; } | 513 inline SlotSet* old_to_new_slots() { return old_to_new_slots_; } |
| 517 inline SlotSet* old_to_old_slots() { return old_to_old_slots_; } | 514 inline SlotSet* old_to_old_slots() { return old_to_old_slots_; } |
| 515 inline TypedSlotSet* typed_old_to_old_slots() { |
| 516 return typed_old_to_old_slots_; |
| 517 } |
| 518 | 518 |
| 519 void AllocateOldToNewSlots(); | 519 void AllocateOldToNewSlots(); |
| 520 void ReleaseOldToNewSlots(); | 520 void ReleaseOldToNewSlots(); |
| 521 void AllocateOldToOldSlots(); | 521 void AllocateOldToOldSlots(); |
| 522 void ReleaseOldToOldSlots(); | 522 void ReleaseOldToOldSlots(); |
| 523 void AllocateTypedOldToOldSlots(); |
| 524 void ReleaseTypedOldToOldSlots(); |
| 523 | 525 |
| 524 Address area_start() { return area_start_; } | 526 Address area_start() { return area_start_; } |
| 525 Address area_end() { return area_end_; } | 527 Address area_end() { return area_end_; } |
| 526 int area_size() { return static_cast<int>(area_end() - area_start()); } | 528 int area_size() { return static_cast<int>(area_end() - area_start()); } |
| 527 | 529 |
| 528 bool CommitArea(size_t requested); | 530 bool CommitArea(size_t requested); |
| 529 | 531 |
| 530 // Approximate amount of physical memory committed for this chunk. | 532 // Approximate amount of physical memory committed for this chunk. |
| 531 size_t CommittedPhysicalMemory() { return high_water_mark_.Value(); } | 533 size_t CommittedPhysicalMemory() { return high_water_mark_.Value(); } |
| 532 | 534 |
| (...skipping 53 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 586 DCHECK(!(IsFlagSet(NEVER_EVACUATE) && IsFlagSet(EVACUATION_CANDIDATE))); | 588 DCHECK(!(IsFlagSet(NEVER_EVACUATE) && IsFlagSet(EVACUATION_CANDIDATE))); |
| 587 return IsFlagSet(EVACUATION_CANDIDATE); | 589 return IsFlagSet(EVACUATION_CANDIDATE); |
| 588 } | 590 } |
| 589 | 591 |
| 590 bool CanAllocate() { | 592 bool CanAllocate() { |
| 591 return !IsEvacuationCandidate() && !IsFlagSet(NEVER_ALLOCATE_ON_PAGE); | 593 return !IsEvacuationCandidate() && !IsFlagSet(NEVER_ALLOCATE_ON_PAGE); |
| 592 } | 594 } |
| 593 | 595 |
| 594 void MarkEvacuationCandidate() { | 596 void MarkEvacuationCandidate() { |
| 595 DCHECK(!IsFlagSet(NEVER_EVACUATE)); | 597 DCHECK(!IsFlagSet(NEVER_EVACUATE)); |
| 596 DCHECK_NULL(slots_buffer_); | 598 DCHECK_NULL(old_to_old_slots_); |
| 599 DCHECK_NULL(typed_old_to_old_slots_); |
| 597 SetFlag(EVACUATION_CANDIDATE); | 600 SetFlag(EVACUATION_CANDIDATE); |
| 598 } | 601 } |
| 599 | 602 |
| 600 void ClearEvacuationCandidate() { | 603 void ClearEvacuationCandidate() { |
| 601 DCHECK(slots_buffer_ == NULL); | 604 DCHECK_NULL(old_to_old_slots_); |
| 605 DCHECK_NULL(typed_old_to_old_slots_); |
| 602 ClearFlag(EVACUATION_CANDIDATE); | 606 ClearFlag(EVACUATION_CANDIDATE); |
| 603 } | 607 } |
| 604 | 608 |
| 605 bool ShouldSkipEvacuationSlotRecording() { | 609 bool ShouldSkipEvacuationSlotRecording() { |
| 606 return (flags_ & kSkipEvacuationSlotsRecordingMask) != 0; | 610 return (flags_ & kSkipEvacuationSlotsRecordingMask) != 0; |
| 607 } | 611 } |
| 608 | 612 |
| 609 Executability executable() { | 613 Executability executable() { |
| 610 return IsFlagSet(IS_EXECUTABLE) ? EXECUTABLE : NOT_EXECUTABLE; | 614 return IsFlagSet(IS_EXECUTABLE) ? EXECUTABLE : NOT_EXECUTABLE; |
| 611 } | 615 } |
| (...skipping 64 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 676 | 680 |
| 677 Heap* heap_; | 681 Heap* heap_; |
| 678 | 682 |
| 679 // Used by the incremental marker to keep track of the scanning progress in | 683 // Used by the incremental marker to keep track of the scanning progress in |
| 680 // large objects that have a progress bar and are scanned in increments. | 684 // large objects that have a progress bar and are scanned in increments. |
| 681 int progress_bar_; | 685 int progress_bar_; |
| 682 | 686 |
| 683 // Count of bytes marked black on page. | 687 // Count of bytes marked black on page. |
| 684 int live_byte_count_; | 688 int live_byte_count_; |
| 685 | 689 |
| 686 SlotsBuffer* slots_buffer_; | |
| 687 | |
| 688 // A single slot set for small pages (of size kPageSize) or an array of slot | 690 // A single slot set for small pages (of size kPageSize) or an array of slot |
| 689 // set for large pages. In the latter case the number of entries in the array | 691 // set for large pages. In the latter case the number of entries in the array |
| 690 // is ceil(size() / kPageSize). | 692 // is ceil(size() / kPageSize). |
| 691 SlotSet* old_to_new_slots_; | 693 SlotSet* old_to_new_slots_; |
| 692 SlotSet* old_to_old_slots_; | 694 SlotSet* old_to_old_slots_; |
| 695 TypedSlotSet* typed_old_to_old_slots_; |
| 693 | 696 |
| 694 SkipList* skip_list_; | 697 SkipList* skip_list_; |
| 695 | 698 |
| 696 intptr_t write_barrier_counter_; | 699 intptr_t write_barrier_counter_; |
| 697 | 700 |
| 698 // Assuming the initial allocation on a page is sequential, | 701 // Assuming the initial allocation on a page is sequential, |
| 699 // count highest number of bytes ever allocated on the page. | 702 // count highest number of bytes ever allocated on the page. |
| 700 AtomicValue<intptr_t> high_water_mark_; | 703 AtomicValue<intptr_t> high_water_mark_; |
| 701 | 704 |
| 702 base::Mutex* mutex_; | 705 base::Mutex* mutex_; |
| (...skipping 152 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 855 class LargePage : public MemoryChunk { | 858 class LargePage : public MemoryChunk { |
| 856 public: | 859 public: |
| 857 HeapObject* GetObject() { return HeapObject::FromAddress(area_start()); } | 860 HeapObject* GetObject() { return HeapObject::FromAddress(area_start()); } |
| 858 | 861 |
| 859 inline LargePage* next_page() { | 862 inline LargePage* next_page() { |
| 860 return static_cast<LargePage*>(next_chunk()); | 863 return static_cast<LargePage*>(next_chunk()); |
| 861 } | 864 } |
| 862 | 865 |
| 863 inline void set_next_page(LargePage* page) { set_next_chunk(page); } | 866 inline void set_next_page(LargePage* page) { set_next_chunk(page); } |
| 864 | 867 |
| 868 // A limit to guarantee that we do not overflow typed slot offset in |
| 869 // the old to old remembered set. |
| 870 // Note that this limit is higher than what assembler already imposes on |
| 871 // x64 and ia32 architectures. |
| 872 static const int kMaxCodePageSize = 512 * MB; |
| 873 |
| 865 private: | 874 private: |
| 866 static inline LargePage* Initialize(Heap* heap, MemoryChunk* chunk); | 875 static inline LargePage* Initialize(Heap* heap, MemoryChunk* chunk); |
| 867 | 876 |
| 868 friend class MemoryAllocator; | 877 friend class MemoryAllocator; |
| 869 }; | 878 }; |
| 870 | 879 |
| 871 | 880 |
| 872 // ---------------------------------------------------------------------------- | 881 // ---------------------------------------------------------------------------- |
| 873 // Space is the abstract superclass for all allocation spaces. | 882 // Space is the abstract superclass for all allocation spaces. |
| 874 class Space : public Malloced { | 883 class Space : public Malloced { |
| (...skipping 95 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 970 intptr_t committed_; | 979 intptr_t committed_; |
| 971 intptr_t max_committed_; | 980 intptr_t max_committed_; |
| 972 }; | 981 }; |
| 973 | 982 |
| 974 | 983 |
| 975 class MemoryChunkValidator { | 984 class MemoryChunkValidator { |
| 976 // Computed offsets should match the compiler generated ones. | 985 // Computed offsets should match the compiler generated ones. |
| 977 STATIC_ASSERT(MemoryChunk::kSizeOffset == offsetof(MemoryChunk, size_)); | 986 STATIC_ASSERT(MemoryChunk::kSizeOffset == offsetof(MemoryChunk, size_)); |
| 978 STATIC_ASSERT(MemoryChunk::kLiveBytesOffset == | 987 STATIC_ASSERT(MemoryChunk::kLiveBytesOffset == |
| 979 offsetof(MemoryChunk, live_byte_count_)); | 988 offsetof(MemoryChunk, live_byte_count_)); |
| 980 STATIC_ASSERT(MemoryChunk::kSlotsBufferOffset == | 989 STATIC_ASSERT(MemoryChunk::kOldToNewSlotsOffset == |
| 981 offsetof(MemoryChunk, slots_buffer_)); | 990 offsetof(MemoryChunk, old_to_new_slots_)); |
| 982 STATIC_ASSERT(MemoryChunk::kWriteBarrierCounterOffset == | 991 STATIC_ASSERT(MemoryChunk::kWriteBarrierCounterOffset == |
| 983 offsetof(MemoryChunk, write_barrier_counter_)); | 992 offsetof(MemoryChunk, write_barrier_counter_)); |
| 984 | 993 |
| 985 // Validate our estimates on the header size. | 994 // Validate our estimates on the header size. |
| 986 STATIC_ASSERT(sizeof(MemoryChunk) <= MemoryChunk::kHeaderSize); | 995 STATIC_ASSERT(sizeof(MemoryChunk) <= MemoryChunk::kHeaderSize); |
| 987 STATIC_ASSERT(sizeof(LargePage) <= MemoryChunk::kHeaderSize); | 996 STATIC_ASSERT(sizeof(LargePage) <= MemoryChunk::kHeaderSize); |
| 988 STATIC_ASSERT(sizeof(Page) <= MemoryChunk::kHeaderSize); | 997 STATIC_ASSERT(sizeof(Page) <= MemoryChunk::kHeaderSize); |
| 989 }; | 998 }; |
| 990 | 999 |
| 991 | 1000 |
| (...skipping 1989 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 2981 | 2990 |
| 2982 HeapObject* Next(); | 2991 HeapObject* Next(); |
| 2983 | 2992 |
| 2984 // implementation of ObjectIterator. | 2993 // implementation of ObjectIterator. |
| 2985 virtual HeapObject* next_object() { return Next(); } | 2994 virtual HeapObject* next_object() { return Next(); } |
| 2986 | 2995 |
| 2987 private: | 2996 private: |
| 2988 LargePage* current_; | 2997 LargePage* current_; |
| 2989 }; | 2998 }; |
| 2990 | 2999 |
| 3000 class LargePageIterator BASE_EMBEDDED { |
| 3001 public: |
| 3002 explicit inline LargePageIterator(LargeObjectSpace* space); |
| 3003 |
| 3004 inline LargePage* next(); |
| 3005 |
| 3006 private: |
| 3007 LargePage* next_page_; |
| 3008 }; |
| 2991 | 3009 |
| 2992 // Iterates over the chunks (pages and large object pages) that can contain | 3010 // Iterates over the chunks (pages and large object pages) that can contain |
| 2993 // pointers to new space. | 3011 // pointers to new space or to evacuation candidates. |
| 2994 class PointerChunkIterator BASE_EMBEDDED { | 3012 class MemoryChunkIterator BASE_EMBEDDED { |
| 2995 public: | 3013 public: |
| 2996 inline explicit PointerChunkIterator(Heap* heap); | 3014 enum Mode { ALL, ALL_BUT_MAP_SPACE, ALL_BUT_CODE_SPACE }; |
| 3015 inline explicit MemoryChunkIterator(Heap* heap, Mode mode); |
| 2997 | 3016 |
| 2998 // Return NULL when the iterator is done. | 3017 // Return NULL when the iterator is done. |
| 2999 inline MemoryChunk* next(); | 3018 inline MemoryChunk* next(); |
| 3000 | 3019 |
| 3001 private: | 3020 private: |
| 3002 enum State { kOldSpaceState, kMapState, kLargeObjectState, kFinishedState }; | 3021 enum State { |
| 3022 kOldSpaceState, |
| 3023 kMapState, |
| 3024 kCodeState, |
| 3025 kLargeObjectState, |
| 3026 kFinishedState |
| 3027 }; |
| 3003 State state_; | 3028 State state_; |
| 3029 const Mode mode_; |
| 3004 PageIterator old_iterator_; | 3030 PageIterator old_iterator_; |
| 3031 PageIterator code_iterator_; |
| 3005 PageIterator map_iterator_; | 3032 PageIterator map_iterator_; |
| 3006 LargeObjectIterator lo_iterator_; | 3033 LargePageIterator lo_iterator_; |
| 3007 }; | 3034 }; |
| 3008 | 3035 |
| 3009 | |
| 3010 #ifdef DEBUG | 3036 #ifdef DEBUG |
| 3011 struct CommentStatistic { | 3037 struct CommentStatistic { |
| 3012 const char* comment; | 3038 const char* comment; |
| 3013 int size; | 3039 int size; |
| 3014 int count; | 3040 int count; |
| 3015 void Clear() { | 3041 void Clear() { |
| 3016 comment = NULL; | 3042 comment = NULL; |
| 3017 size = 0; | 3043 size = 0; |
| 3018 count = 0; | 3044 count = 0; |
| 3019 } | 3045 } |
| 3020 // Must be small, since an iteration is used for lookup. | 3046 // Must be small, since an iteration is used for lookup. |
| 3021 static const int kMaxComments = 64; | 3047 static const int kMaxComments = 64; |
| 3022 }; | 3048 }; |
| 3023 #endif | 3049 #endif |
| 3024 } // namespace internal | 3050 } // namespace internal |
| 3025 } // namespace v8 | 3051 } // namespace v8 |
| 3026 | 3052 |
| 3027 #endif // V8_HEAP_SPACES_H_ | 3053 #endif // V8_HEAP_SPACES_H_ |
| OLD | NEW |