| OLD | NEW |
| 1 // Copyright 2011 the V8 project authors. All rights reserved. | 1 // Copyright 2011 the V8 project authors. All rights reserved. |
| 2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
| 3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
| 4 | 4 |
| 5 #ifndef V8_HEAP_SPACES_H_ | 5 #ifndef V8_HEAP_SPACES_H_ |
| 6 #define V8_HEAP_SPACES_H_ | 6 #define V8_HEAP_SPACES_H_ |
| 7 | 7 |
| 8 #include "src/allocation.h" | 8 #include "src/allocation.h" |
| 9 #include "src/atomic-utils.h" | 9 #include "src/atomic-utils.h" |
| 10 #include "src/base/atomicops.h" | 10 #include "src/base/atomicops.h" |
| (...skipping 380 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 391 + kPointerSize // Address owner_ | 391 + kPointerSize // Address owner_ |
| 392 + kPointerSize // Heap* heap_ | 392 + kPointerSize // Heap* heap_ |
| 393 + kIntSize; // int progress_bar_ | 393 + kIntSize; // int progress_bar_ |
| 394 | 394 |
| 395 static const size_t kSlotsBufferOffset = | 395 static const size_t kSlotsBufferOffset = |
| 396 kLiveBytesOffset + kIntSize; // int live_byte_count_ | 396 kLiveBytesOffset + kIntSize; // int live_byte_count_ |
| 397 | 397 |
| 398 static const size_t kWriteBarrierCounterOffset = | 398 static const size_t kWriteBarrierCounterOffset = |
| 399 kSlotsBufferOffset + kPointerSize // SlotsBuffer* slots_buffer_; | 399 kSlotsBufferOffset + kPointerSize // SlotsBuffer* slots_buffer_; |
| 400 + kPointerSize // SlotSet* old_to_new_slots_; | 400 + kPointerSize // SlotSet* old_to_new_slots_; |
| 401 + kPointerSize // SlotSet* old_to_old_slots_; |
| 401 + kPointerSize; // SkipList* skip_list_; | 402 + kPointerSize; // SkipList* skip_list_; |
| 402 | 403 |
| 403 static const size_t kMinHeaderSize = | 404 static const size_t kMinHeaderSize = |
| 404 kWriteBarrierCounterOffset + | 405 kWriteBarrierCounterOffset + |
| 405 kIntptrSize // intptr_t write_barrier_counter_ | 406 kIntptrSize // intptr_t write_barrier_counter_ |
| 406 + kPointerSize // AtomicValue high_water_mark_ | 407 + kPointerSize // AtomicValue high_water_mark_ |
| 407 + kPointerSize // base::Mutex* mutex_ | 408 + kPointerSize // base::Mutex* mutex_ |
| 408 + kPointerSize // base::AtomicWord parallel_sweeping_ | 409 + kPointerSize // base::AtomicWord parallel_sweeping_ |
| 409 + kPointerSize // AtomicValue parallel_compaction_ | 410 + kPointerSize // AtomicValue parallel_compaction_ |
| 410 + 5 * kPointerSize // AtomicNumber free-list statistics | 411 + 5 * kPointerSize // AtomicNumber free-list statistics |
| (...skipping 19 matching lines...) Expand all Loading... |
| 430 static const int kFlagsOffset = kPointerSize; | 431 static const int kFlagsOffset = kPointerSize; |
| 431 | 432 |
| 432 static inline void IncrementLiveBytesFromMutator(HeapObject* object, int by); | 433 static inline void IncrementLiveBytesFromMutator(HeapObject* object, int by); |
| 433 static inline void IncrementLiveBytesFromGC(HeapObject* object, int by); | 434 static inline void IncrementLiveBytesFromGC(HeapObject* object, int by); |
| 434 | 435 |
| 435 // Only works if the pointer is in the first kPageSize of the MemoryChunk. | 436 // Only works if the pointer is in the first kPageSize of the MemoryChunk. |
| 436 static MemoryChunk* FromAddress(Address a) { | 437 static MemoryChunk* FromAddress(Address a) { |
| 437 return reinterpret_cast<MemoryChunk*>(OffsetFrom(a) & ~kAlignmentMask); | 438 return reinterpret_cast<MemoryChunk*>(OffsetFrom(a) & ~kAlignmentMask); |
| 438 } | 439 } |
| 439 | 440 |
| 440 // Only works for addresses in pointer spaces, not data or code spaces. | |
| 441 static inline MemoryChunk* FromAnyPointerAddress(Heap* heap, Address addr); | 441 static inline MemoryChunk* FromAnyPointerAddress(Heap* heap, Address addr); |
| 442 | 442 |
| 443 static inline void UpdateHighWaterMark(Address mark) { | 443 static inline void UpdateHighWaterMark(Address mark) { |
| 444 if (mark == nullptr) return; | 444 if (mark == nullptr) return; |
| 445 // Need to subtract one from the mark because when a chunk is full the | 445 // Need to subtract one from the mark because when a chunk is full the |
| 446 // top points to the next address after the chunk, which effectively belongs | 446 // top points to the next address after the chunk, which effectively belongs |
| 447 // to another chunk. See the comment to Page::FromAllocationTop. | 447 // to another chunk. See the comment to Page::FromAllocationTop. |
| 448 MemoryChunk* chunk = MemoryChunk::FromAddress(mark - 1); | 448 MemoryChunk* chunk = MemoryChunk::FromAddress(mark - 1); |
| 449 intptr_t new_mark = static_cast<intptr_t>(mark - chunk->address()); | 449 intptr_t new_mark = static_cast<intptr_t>(mark - chunk->address()); |
| 450 intptr_t old_mark = 0; | 450 intptr_t old_mark = 0; |
| (...skipping 56 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 507 | 507 |
| 508 inline SkipList* skip_list() { return skip_list_; } | 508 inline SkipList* skip_list() { return skip_list_; } |
| 509 | 509 |
| 510 inline void set_skip_list(SkipList* skip_list) { skip_list_ = skip_list; } | 510 inline void set_skip_list(SkipList* skip_list) { skip_list_ = skip_list; } |
| 511 | 511 |
| 512 inline SlotsBuffer* slots_buffer() { return slots_buffer_; } | 512 inline SlotsBuffer* slots_buffer() { return slots_buffer_; } |
| 513 | 513 |
| 514 inline SlotsBuffer** slots_buffer_address() { return &slots_buffer_; } | 514 inline SlotsBuffer** slots_buffer_address() { return &slots_buffer_; } |
| 515 | 515 |
| 516 inline SlotSet* old_to_new_slots() { return old_to_new_slots_; } | 516 inline SlotSet* old_to_new_slots() { return old_to_new_slots_; } |
| 517 inline SlotSet* old_to_old_slots() { return old_to_old_slots_; } |
| 517 | 518 |
| 518 void AllocateOldToNewSlots(); | 519 void AllocateOldToNewSlots(); |
| 519 void ReleaseOldToNewSlots(); | 520 void ReleaseOldToNewSlots(); |
| 521 void AllocateOldToOldSlots(); |
| 522 void ReleaseOldToOldSlots(); |
| 520 | 523 |
| 521 Address area_start() { return area_start_; } | 524 Address area_start() { return area_start_; } |
| 522 Address area_end() { return area_end_; } | 525 Address area_end() { return area_end_; } |
| 523 int area_size() { return static_cast<int>(area_end() - area_start()); } | 526 int area_size() { return static_cast<int>(area_end() - area_start()); } |
| 524 | 527 |
| 525 bool CommitArea(size_t requested); | 528 bool CommitArea(size_t requested); |
| 526 | 529 |
| 527 // Approximate amount of physical memory committed for this chunk. | 530 // Approximate amount of physical memory committed for this chunk. |
| 528 size_t CommittedPhysicalMemory() { return high_water_mark_.Value(); } | 531 size_t CommittedPhysicalMemory() { return high_water_mark_.Value(); } |
| 529 | 532 |
| (...skipping 103 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 633 } | 636 } |
| 634 } | 637 } |
| 635 | 638 |
| 636 void set_owner(Space* space) { | 639 void set_owner(Space* space) { |
| 637 DCHECK((reinterpret_cast<intptr_t>(space) & kPageHeaderTagMask) == 0); | 640 DCHECK((reinterpret_cast<intptr_t>(space) & kPageHeaderTagMask) == 0); |
| 638 owner_ = reinterpret_cast<Address>(space) + kPageHeaderTag; | 641 owner_ = reinterpret_cast<Address>(space) + kPageHeaderTag; |
| 639 DCHECK((reinterpret_cast<intptr_t>(owner_) & kPageHeaderTagMask) == | 642 DCHECK((reinterpret_cast<intptr_t>(owner_) & kPageHeaderTagMask) == |
| 640 kPageHeaderTag); | 643 kPageHeaderTag); |
| 641 } | 644 } |
| 642 | 645 |
| 646 bool HasPageHeader() { return owner() != nullptr; } |
| 647 |
| 643 void InsertAfter(MemoryChunk* other); | 648 void InsertAfter(MemoryChunk* other); |
| 644 void Unlink(); | 649 void Unlink(); |
| 645 | 650 |
| 646 protected: | 651 protected: |
| 647 static MemoryChunk* Initialize(Heap* heap, Address base, size_t size, | 652 static MemoryChunk* Initialize(Heap* heap, Address base, size_t size, |
| 648 Address area_start, Address area_end, | 653 Address area_start, Address area_end, |
| 649 Executability executable, Space* owner, | 654 Executability executable, Space* owner, |
| 650 base::VirtualMemory* reservation); | 655 base::VirtualMemory* reservation); |
| 651 | 656 |
| 652 // Should be called when memory chunk is about to be freed. | 657 // Should be called when memory chunk is about to be freed. |
| (...skipping 24 matching lines...) Expand all Loading... |
| 677 | 682 |
| 678 // Count of bytes marked black on page. | 683 // Count of bytes marked black on page. |
| 679 int live_byte_count_; | 684 int live_byte_count_; |
| 680 | 685 |
| 681 SlotsBuffer* slots_buffer_; | 686 SlotsBuffer* slots_buffer_; |
| 682 | 687 |
| 683 // A single slot set for small pages (of size kPageSize) or an array of slot | 688 // A single slot set for small pages (of size kPageSize) or an array of slot |
| 684 // set for large pages. In the latter case the number of entries in the array | 689 // set for large pages. In the latter case the number of entries in the array |
| 685 // is ceil(size() / kPageSize). | 690 // is ceil(size() / kPageSize). |
| 686 SlotSet* old_to_new_slots_; | 691 SlotSet* old_to_new_slots_; |
| 692 SlotSet* old_to_old_slots_; |
| 687 | 693 |
| 688 SkipList* skip_list_; | 694 SkipList* skip_list_; |
| 689 | 695 |
| 690 intptr_t write_barrier_counter_; | 696 intptr_t write_barrier_counter_; |
| 691 | 697 |
| 692 // Assuming the initial allocation on a page is sequential, | 698 // Assuming the initial allocation on a page is sequential, |
| 693 // count highest number of bytes ever allocated on the page. | 699 // count highest number of bytes ever allocated on the page. |
| 694 AtomicValue<intptr_t> high_water_mark_; | 700 AtomicValue<intptr_t> high_water_mark_; |
| 695 | 701 |
| 696 base::Mutex* mutex_; | 702 base::Mutex* mutex_; |
| (...skipping 33 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 730 class Page : public MemoryChunk { | 736 class Page : public MemoryChunk { |
| 731 public: | 737 public: |
| 732 // Returns the page containing a given address. The address ranges | 738 // Returns the page containing a given address. The address ranges |
| 733 // from [page_addr .. page_addr + kPageSize[ | 739 // from [page_addr .. page_addr + kPageSize[ |
| 734 // This only works if the object is in fact in a page. See also MemoryChunk:: | 740 // This only works if the object is in fact in a page. See also MemoryChunk:: |
| 735 // FromAddress() and FromAnyAddress(). | 741 // FromAddress() and FromAnyAddress(). |
| 736 INLINE(static Page* FromAddress(Address a)) { | 742 INLINE(static Page* FromAddress(Address a)) { |
| 737 return reinterpret_cast<Page*>(OffsetFrom(a) & ~kPageAlignmentMask); | 743 return reinterpret_cast<Page*>(OffsetFrom(a) & ~kPageAlignmentMask); |
| 738 } | 744 } |
| 739 | 745 |
| 746 // Only works for addresses in pointer spaces, not code space. |
| 747 inline static Page* FromAnyPointerAddress(Heap* heap, Address addr); |
| 748 |
| 740 // Returns the page containing an allocation top. Because an allocation | 749 // Returns the page containing an allocation top. Because an allocation |
| 741 // top address can be the upper bound of the page, we need to subtract | 750 // top address can be the upper bound of the page, we need to subtract |
| 742 // it with kPointerSize first. The address ranges from | 751 // it with kPointerSize first. The address ranges from |
| 743 // [page_addr + kObjectStartOffset .. page_addr + kPageSize]. | 752 // [page_addr + kObjectStartOffset .. page_addr + kPageSize]. |
| 744 INLINE(static Page* FromAllocationTop(Address top)) { | 753 INLINE(static Page* FromAllocationTop(Address top)) { |
| 745 Page* p = FromAddress(top - kPointerSize); | 754 Page* p = FromAddress(top - kPointerSize); |
| 746 return p; | 755 return p; |
| 747 } | 756 } |
| 748 | 757 |
| 749 // Returns the next page in the chain of pages owned by a space. | 758 // Returns the next page in the chain of pages owned by a space. |
| (...skipping 2284 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 3034 count = 0; | 3043 count = 0; |
| 3035 } | 3044 } |
| 3036 // Must be small, since an iteration is used for lookup. | 3045 // Must be small, since an iteration is used for lookup. |
| 3037 static const int kMaxComments = 64; | 3046 static const int kMaxComments = 64; |
| 3038 }; | 3047 }; |
| 3039 #endif | 3048 #endif |
| 3040 } // namespace internal | 3049 } // namespace internal |
| 3041 } // namespace v8 | 3050 } // namespace v8 |
| 3042 | 3051 |
| 3043 #endif // V8_HEAP_SPACES_H_ | 3052 #endif // V8_HEAP_SPACES_H_ |
| OLD | NEW |