| OLD | NEW |
| 1 // Copyright 2011 the V8 project authors. All rights reserved. | 1 // Copyright 2011 the V8 project authors. All rights reserved. |
| 2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
| 3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
| 4 | 4 |
| 5 #ifndef V8_HEAP_SPACES_H_ | 5 #ifndef V8_HEAP_SPACES_H_ |
| 6 #define V8_HEAP_SPACES_H_ | 6 #define V8_HEAP_SPACES_H_ |
| 7 | 7 |
| 8 #include "src/allocation.h" | 8 #include "src/allocation.h" |
| 9 #include "src/atomic-utils.h" | 9 #include "src/atomic-utils.h" |
| 10 #include "src/base/atomicops.h" | 10 #include "src/base/atomicops.h" |
| (...skipping 380 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 391 + kPointerSize // Address owner_ | 391 + kPointerSize // Address owner_ |
| 392 + kPointerSize // Heap* heap_ | 392 + kPointerSize // Heap* heap_ |
| 393 + kIntSize; // int progress_bar_ | 393 + kIntSize; // int progress_bar_ |
| 394 | 394 |
| 395 static const size_t kSlotsBufferOffset = | 395 static const size_t kSlotsBufferOffset = |
| 396 kLiveBytesOffset + kIntSize; // int live_byte_count_ | 396 kLiveBytesOffset + kIntSize; // int live_byte_count_ |
| 397 | 397 |
| 398 static const size_t kWriteBarrierCounterOffset = | 398 static const size_t kWriteBarrierCounterOffset = |
| 399 kSlotsBufferOffset + kPointerSize // SlotsBuffer* slots_buffer_; | 399 kSlotsBufferOffset + kPointerSize // SlotsBuffer* slots_buffer_; |
| 400 + kPointerSize // SlotSet* old_to_new_slots_; | 400 + kPointerSize // SlotSet* old_to_new_slots_; |
| 401 + kPointerSize // SlotSet* old_to_old_slots_; |
| 401 + kPointerSize; // SkipList* skip_list_; | 402 + kPointerSize; // SkipList* skip_list_; |
| 402 | 403 |
| 403 static const size_t kMinHeaderSize = | 404 static const size_t kMinHeaderSize = |
| 404 kWriteBarrierCounterOffset + | 405 kWriteBarrierCounterOffset + |
| 405 kIntptrSize // intptr_t write_barrier_counter_ | 406 kIntptrSize // intptr_t write_barrier_counter_ |
| 406 + kPointerSize // AtomicValue high_water_mark_ | 407 + kPointerSize // AtomicValue high_water_mark_ |
| 407 + kPointerSize // base::Mutex* mutex_ | 408 + kPointerSize // base::Mutex* mutex_ |
| 408 + kPointerSize // base::AtomicWord parallel_sweeping_ | 409 + kPointerSize // base::AtomicWord parallel_sweeping_ |
| 409 + kPointerSize // AtomicValue parallel_compaction_ | 410 + kPointerSize // AtomicValue parallel_compaction_ |
| 410 + 5 * kPointerSize // AtomicNumber free-list statistics | 411 + 5 * kPointerSize // AtomicNumber free-list statistics |
| (...skipping 27 matching lines...) Expand all Loading... |
| 438 | 439 |
| 439 static const MemoryChunk* FromAddress(const byte* a) { | 440 static const MemoryChunk* FromAddress(const byte* a) { |
| 440 return reinterpret_cast<const MemoryChunk*>(OffsetFrom(a) & | 441 return reinterpret_cast<const MemoryChunk*>(OffsetFrom(a) & |
| 441 ~kAlignmentMask); | 442 ~kAlignmentMask); |
| 442 } | 443 } |
| 443 | 444 |
| 444 static void IncrementLiveBytesFromGC(HeapObject* object, int by) { | 445 static void IncrementLiveBytesFromGC(HeapObject* object, int by) { |
| 445 MemoryChunk::FromAddress(object->address())->IncrementLiveBytes(by); | 446 MemoryChunk::FromAddress(object->address())->IncrementLiveBytes(by); |
| 446 } | 447 } |
| 447 | 448 |
| 448 // Only works for addresses in pointer spaces, not data or code spaces. | 449 // Only works for addresses in pointer spaces, not code space. |
| 449 static inline MemoryChunk* FromAnyPointerAddress(Heap* heap, Address addr); | 450 static inline MemoryChunk* FromAnyPointerAddress(Heap* heap, Address addr); |
| 450 | 451 |
| 451 static inline uint32_t FastAddressToMarkbitIndex(Address addr) { | 452 static inline uint32_t FastAddressToMarkbitIndex(Address addr) { |
| 452 const intptr_t offset = reinterpret_cast<intptr_t>(addr) & kAlignmentMask; | 453 const intptr_t offset = reinterpret_cast<intptr_t>(addr) & kAlignmentMask; |
| 453 return static_cast<uint32_t>(offset) >> kPointerSizeLog2; | 454 return static_cast<uint32_t>(offset) >> kPointerSizeLog2; |
| 454 } | 455 } |
| 455 | 456 |
| 456 static inline void UpdateHighWaterMark(Address mark) { | 457 static inline void UpdateHighWaterMark(Address mark) { |
| 457 if (mark == nullptr) return; | 458 if (mark == nullptr) return; |
| 458 // Need to subtract one from the mark because when a chunk is full the | 459 // Need to subtract one from the mark because when a chunk is full the |
| (...skipping 212 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 671 inline SkipList* skip_list() { return skip_list_; } | 672 inline SkipList* skip_list() { return skip_list_; } |
| 672 | 673 |
| 673 inline void set_skip_list(SkipList* skip_list) { skip_list_ = skip_list; } | 674 inline void set_skip_list(SkipList* skip_list) { skip_list_ = skip_list; } |
| 674 | 675 |
| 675 inline SlotsBuffer* slots_buffer() { return slots_buffer_; } | 676 inline SlotsBuffer* slots_buffer() { return slots_buffer_; } |
| 676 | 677 |
| 677 inline SlotsBuffer** slots_buffer_address() { return &slots_buffer_; } | 678 inline SlotsBuffer** slots_buffer_address() { return &slots_buffer_; } |
| 678 | 679 |
| 679 inline SlotSet* old_to_new_slots() { return old_to_new_slots_; } | 680 inline SlotSet* old_to_new_slots() { return old_to_new_slots_; } |
| 680 | 681 |
| 682 inline SlotSet* old_to_old_slots() { return old_to_old_slots_; } |
| 683 |
| 681 void AllocateOldToNewSlots(); | 684 void AllocateOldToNewSlots(); |
| 682 void ReleaseOldToNewSlots(); | 685 void ReleaseOldToNewSlots(); |
| 686 void AllocateOldToOldSlots(); |
| 687 void ReleaseOldToOldSlots(); |
| 683 | 688 |
| 684 void MarkEvacuationCandidate() { | 689 void MarkEvacuationCandidate() { |
| 685 DCHECK(!IsFlagSet(NEVER_EVACUATE)); | 690 DCHECK(!IsFlagSet(NEVER_EVACUATE)); |
| 686 DCHECK(slots_buffer_ == NULL); | 691 DCHECK(slots_buffer_ == NULL); |
| 687 SetFlag(EVACUATION_CANDIDATE); | 692 SetFlag(EVACUATION_CANDIDATE); |
| 688 } | 693 } |
| 689 | 694 |
| 690 void ClearEvacuationCandidate() { | 695 void ClearEvacuationCandidate() { |
| 691 DCHECK(slots_buffer_ == NULL); | 696 DCHECK(slots_buffer_ == NULL); |
| 692 ClearFlag(EVACUATION_CANDIDATE); | 697 ClearFlag(EVACUATION_CANDIDATE); |
| (...skipping 32 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 725 // Used by the incremental marker to keep track of the scanning progress in | 730 // Used by the incremental marker to keep track of the scanning progress in |
| 726 // large objects that have a progress bar and are scanned in increments. | 731 // large objects that have a progress bar and are scanned in increments. |
| 727 int progress_bar_; | 732 int progress_bar_; |
| 728 // Count of bytes marked black on page. | 733 // Count of bytes marked black on page. |
| 729 int live_byte_count_; | 734 int live_byte_count_; |
| 730 SlotsBuffer* slots_buffer_; | 735 SlotsBuffer* slots_buffer_; |
| 731 // A single slot set for small pages (of size kPageSize) or an array of slot | 736 // A single slot set for small pages (of size kPageSize) or an array of slot |
| 732 // set for large pages. In the latter case the number of entries in the array | 737 // set for large pages. In the latter case the number of entries in the array |
| 733 // is ceil(size() / kPageSize). | 738 // is ceil(size() / kPageSize). |
| 734 SlotSet* old_to_new_slots_; | 739 SlotSet* old_to_new_slots_; |
| 740 SlotSet* old_to_old_slots_; |
| 735 SkipList* skip_list_; | 741 SkipList* skip_list_; |
| 736 intptr_t write_barrier_counter_; | 742 intptr_t write_barrier_counter_; |
| 737 // Assuming the initial allocation on a page is sequential, | 743 // Assuming the initial allocation on a page is sequential, |
| 738 // count highest number of bytes ever allocated on the page. | 744 // count highest number of bytes ever allocated on the page. |
| 739 AtomicValue<intptr_t> high_water_mark_; | 745 AtomicValue<intptr_t> high_water_mark_; |
| 740 | 746 |
| 741 base::Mutex* mutex_; | 747 base::Mutex* mutex_; |
| 742 AtomicValue<ConcurrentSweepingState> concurrent_sweeping_; | 748 AtomicValue<ConcurrentSweepingState> concurrent_sweeping_; |
| 743 AtomicValue<ParallelCompactingState> parallel_compaction_; | 749 AtomicValue<ParallelCompactingState> parallel_compaction_; |
| 744 | 750 |
| (...skipping 29 matching lines...) Expand all Loading... |
| 774 class Page : public MemoryChunk { | 780 class Page : public MemoryChunk { |
| 775 public: | 781 public: |
| 776 // Returns the page containing a given address. The address ranges | 782 // Returns the page containing a given address. The address ranges |
| 777 // from [page_addr .. page_addr + kPageSize[ | 783 // from [page_addr .. page_addr + kPageSize[ |
| 778 // This only works if the object is in fact in a page. See also MemoryChunk:: | 784 // This only works if the object is in fact in a page. See also MemoryChunk:: |
| 779 // FromAddress() and FromAnyAddress(). | 785 // FromAddress() and FromAnyAddress(). |
| 780 INLINE(static Page* FromAddress(Address a)) { | 786 INLINE(static Page* FromAddress(Address a)) { |
| 781 return reinterpret_cast<Page*>(OffsetFrom(a) & ~kPageAlignmentMask); | 787 return reinterpret_cast<Page*>(OffsetFrom(a) & ~kPageAlignmentMask); |
| 782 } | 788 } |
| 783 | 789 |
| 790 // Only works for addresses in pointer spaces, not code space. |
| 791 inline static Page* FromAnyPointerAddress(Heap* heap, Address addr); |
| 792 |
| 784 // Returns the page containing an allocation top. Because an allocation | 793 // Returns the page containing an allocation top. Because an allocation |
| 785 // top address can be the upper bound of the page, we need to subtract | 794 // top address can be the upper bound of the page, we need to subtract |
| 786 // it with kPointerSize first. The address ranges from | 795 // it with kPointerSize first. The address ranges from |
| 787 // [page_addr + kObjectStartOffset .. page_addr + kPageSize]. | 796 // [page_addr + kObjectStartOffset .. page_addr + kPageSize]. |
| 788 INLINE(static Page* FromAllocationTop(Address top)) { | 797 INLINE(static Page* FromAllocationTop(Address top)) { |
| 789 Page* p = FromAddress(top - kPointerSize); | 798 Page* p = FromAddress(top - kPointerSize); |
| 790 return p; | 799 return p; |
| 791 } | 800 } |
| 792 | 801 |
| 793 // Returns the next page in the chain of pages owned by a space. | 802 // Returns the next page in the chain of pages owned by a space. |
| (...skipping 2268 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 3062 count = 0; | 3071 count = 0; |
| 3063 } | 3072 } |
| 3064 // Must be small, since an iteration is used for lookup. | 3073 // Must be small, since an iteration is used for lookup. |
| 3065 static const int kMaxComments = 64; | 3074 static const int kMaxComments = 64; |
| 3066 }; | 3075 }; |
| 3067 #endif | 3076 #endif |
| 3068 } // namespace internal | 3077 } // namespace internal |
| 3069 } // namespace v8 | 3078 } // namespace v8 |
| 3070 | 3079 |
| 3071 #endif // V8_HEAP_SPACES_H_ | 3080 #endif // V8_HEAP_SPACES_H_ |
| OLD | NEW |