| OLD | NEW |
| 1 // Copyright 2011 the V8 project authors. All rights reserved. | 1 // Copyright 2011 the V8 project authors. All rights reserved. |
| 2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
| 3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
| 4 | 4 |
| 5 #ifndef V8_HEAP_SPACES_H_ | 5 #ifndef V8_HEAP_SPACES_H_ |
| 6 #define V8_HEAP_SPACES_H_ | 6 #define V8_HEAP_SPACES_H_ |
| 7 | 7 |
| 8 #include "src/allocation.h" | 8 #include "src/allocation.h" |
| 9 #include "src/base/atomicops.h" | 9 #include "src/base/atomicops.h" |
| 10 #include "src/base/bits.h" | 10 #include "src/base/bits.h" |
| (...skipping 523 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 534 static void IncrementLiveBytesFromMutator(HeapObject* object, int by); | 534 static void IncrementLiveBytesFromMutator(HeapObject* object, int by); |
| 535 | 535 |
| 536 static const intptr_t kAlignment = | 536 static const intptr_t kAlignment = |
| 537 (static_cast<uintptr_t>(1) << kPageSizeBits); | 537 (static_cast<uintptr_t>(1) << kPageSizeBits); |
| 538 | 538 |
| 539 static const intptr_t kAlignmentMask = kAlignment - 1; | 539 static const intptr_t kAlignmentMask = kAlignment - 1; |
| 540 | 540 |
| 541 static const intptr_t kSizeOffset = 0; | 541 static const intptr_t kSizeOffset = 0; |
| 542 | 542 |
| 543 static const intptr_t kLiveBytesOffset = | 543 static const intptr_t kLiveBytesOffset = |
| 544 kSizeOffset + kPointerSize + kPointerSize + kPointerSize + kPointerSize + | 544 kSizeOffset + kPointerSize // size_t size |
| 545 kPointerSize + kPointerSize + kPointerSize + kPointerSize + kIntSize; | 545 + kIntptrSize // intptr_t flags_ |
| 546 + kPointerSize // Address area_start_ |
| 547 + kPointerSize // Address area_end_ |
| 548 + 2 * kPointerSize // base::VirtualMemory reservation_ |
| 549 + kPointerSize // Address owner_ |
| 550 + kPointerSize // Heap* heap_ |
| 551 + kIntSize; // int store_buffer_counter_ |
| 546 | 552 |
| 547 static const size_t kSlotsBufferOffset = kLiveBytesOffset + kIntSize; | 553 |
| 554 static const size_t kSlotsBufferOffset = |
| 555 kLiveBytesOffset + kIntSize; // int live_byte_count_ |
| 548 | 556 |
| 549 static const size_t kWriteBarrierCounterOffset = | 557 static const size_t kWriteBarrierCounterOffset = |
| 550 kSlotsBufferOffset + kPointerSize + kPointerSize; | 558 kSlotsBufferOffset + kPointerSize // SlotsBuffer* slots_buffer_; |
| 559 + kPointerSize; // SkipList* skip_list_; |
| 551 | 560 |
| 552 static const size_t kHeaderSize = kWriteBarrierCounterOffset + | 561 static const size_t kMinHeaderSize = |
| 553 kPointerSize + // write_barrier_counter_ | 562 kWriteBarrierCounterOffset + |
| 554 kIntSize + // progress_bar_ | 563 kIntptrSize // intptr_t write_barrier_counter_ |
| 555 kIntSize + // high_water_mark_ | 564 + kIntSize // int progress_bar_ |
| 556 kPointerSize + // mutex_ page lock | 565 + kIntSize // int high_water_mark_ |
| 557 kPointerSize + // parallel_sweeping_ | 566 + kPointerSize // base::Mutex* mutex_ |
| 558 5 * kPointerSize + // free list statistics | 567 + kPointerSize // base::AtomicWord parallel_sweeping_ |
| 559 kPointerSize + // next_chunk_ | 568 + 5 * kIntSize // int free-list statistics |
| 560 kPointerSize; // prev_chunk_ | 569 + kPointerSize // base::AtomicWord next_chunk_ |
| 570 + kPointerSize; // base::AtomicWord prev_chunk_ |
| 571 |
| 572 // We add some more space to the computed header size to amount for missing |
| 573 // alignment requirements in our computation. |
| 574 // Try to get kHeaderSize properly aligned on 32-bit and 64-bit machines. |
| 575 static const size_t kHeaderSize = kMinHeaderSize + kIntSize; |
| 561 | 576 |
| 562 static const int kBodyOffset = | 577 static const int kBodyOffset = |
| 563 CODE_POINTER_ALIGN(kHeaderSize + Bitmap::kSize); | 578 CODE_POINTER_ALIGN(kHeaderSize + Bitmap::kSize); |
| 564 | 579 |
| 565 // The start offset of the object area in a page. Aligned to both maps and | 580 // The start offset of the object area in a page. Aligned to both maps and |
| 566 // code alignment to be suitable for both. Also aligned to 32 words because | 581 // code alignment to be suitable for both. Also aligned to 32 words because |
| 567 // the marking bitmap is arranged in 32 bit chunks. | 582 // the marking bitmap is arranged in 32 bit chunks. |
| 568 static const int kObjectStartAlignment = 32 * kPointerSize; | 583 static const int kObjectStartAlignment = 32 * kPointerSize; |
| 569 static const int kObjectStartOffset = | 584 static const int kObjectStartOffset = |
| 570 kBodyOffset - 1 + | 585 kBodyOffset - 1 + |
| (...skipping 138 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 709 base::Mutex* mutex_; | 724 base::Mutex* mutex_; |
| 710 base::AtomicWord parallel_sweeping_; | 725 base::AtomicWord parallel_sweeping_; |
| 711 | 726 |
| 712 // PagedSpace free-list statistics. | 727 // PagedSpace free-list statistics. |
| 713 int available_in_small_free_list_; | 728 int available_in_small_free_list_; |
| 714 int available_in_medium_free_list_; | 729 int available_in_medium_free_list_; |
| 715 int available_in_large_free_list_; | 730 int available_in_large_free_list_; |
| 716 int available_in_huge_free_list_; | 731 int available_in_huge_free_list_; |
| 717 int non_available_small_blocks_; | 732 int non_available_small_blocks_; |
| 718 | 733 |
| 734 // next_chunk_ holds a pointer of type MemoryChunk |
| 735 base::AtomicWord next_chunk_; |
| 736 // prev_chunk_ holds a pointer of type MemoryChunk |
| 737 base::AtomicWord prev_chunk_; |
| 738 |
| 719 static MemoryChunk* Initialize(Heap* heap, Address base, size_t size, | 739 static MemoryChunk* Initialize(Heap* heap, Address base, size_t size, |
| 720 Address area_start, Address area_end, | 740 Address area_start, Address area_end, |
| 721 Executability executable, Space* owner); | 741 Executability executable, Space* owner); |
| 722 | 742 |
| 723 private: | 743 private: |
| 724 // next_chunk_ holds a pointer of type MemoryChunk | |
| 725 base::AtomicWord next_chunk_; | |
| 726 // prev_chunk_ holds a pointer of type MemoryChunk | |
| 727 base::AtomicWord prev_chunk_; | |
| 728 | |
| 729 friend class MemoryAllocator; | 744 friend class MemoryAllocator; |
| 745 friend class MemoryChunkValidator; |
| 730 }; | 746 }; |
| 731 | 747 |
| 732 | 748 |
| 733 STATIC_ASSERT(sizeof(MemoryChunk) <= MemoryChunk::kHeaderSize); | |
| 734 | |
| 735 | |
| 736 // ----------------------------------------------------------------------------- | 749 // ----------------------------------------------------------------------------- |
| 737 // A page is a memory chunk of a size 1MB. Large object pages may be larger. | 750 // A page is a memory chunk of a size 1MB. Large object pages may be larger. |
| 738 // | 751 // |
| 739 // The only way to get a page pointer is by calling factory methods: | 752 // The only way to get a page pointer is by calling factory methods: |
| 740 // Page* p = Page::FromAddress(addr); or | 753 // Page* p = Page::FromAddress(addr); or |
| 741 // Page* p = Page::FromAllocationTop(top); | 754 // Page* p = Page::FromAllocationTop(top); |
| 742 class Page : public MemoryChunk { | 755 class Page : public MemoryChunk { |
| 743 public: | 756 public: |
| 744 // Returns the page containing a given address. The address ranges | 757 // Returns the page containing a given address. The address ranges |
| 745 // from [page_addr .. page_addr + kPageSize[ | 758 // from [page_addr .. page_addr + kPageSize[ |
| (...skipping 88 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 834 #undef FRAGMENTATION_STATS_ACCESSORS | 847 #undef FRAGMENTATION_STATS_ACCESSORS |
| 835 | 848 |
| 836 #ifdef DEBUG | 849 #ifdef DEBUG |
| 837 void Print(); | 850 void Print(); |
| 838 #endif // DEBUG | 851 #endif // DEBUG |
| 839 | 852 |
| 840 friend class MemoryAllocator; | 853 friend class MemoryAllocator; |
| 841 }; | 854 }; |
| 842 | 855 |
| 843 | 856 |
| 844 STATIC_ASSERT(sizeof(Page) <= MemoryChunk::kHeaderSize); | |
| 845 | |
| 846 | |
| 847 class LargePage : public MemoryChunk { | 857 class LargePage : public MemoryChunk { |
| 848 public: | 858 public: |
| 849 HeapObject* GetObject() { return HeapObject::FromAddress(area_start()); } | 859 HeapObject* GetObject() { return HeapObject::FromAddress(area_start()); } |
| 850 | 860 |
| 851 inline LargePage* next_page() const { | 861 inline LargePage* next_page() const { |
| 852 return static_cast<LargePage*>(next_chunk()); | 862 return static_cast<LargePage*>(next_chunk()); |
| 853 } | 863 } |
| 854 | 864 |
| 855 inline void set_next_page(LargePage* page) { set_next_chunk(page); } | 865 inline void set_next_page(LargePage* page) { set_next_chunk(page); } |
| 856 | 866 |
| 857 private: | 867 private: |
| 858 static inline LargePage* Initialize(Heap* heap, MemoryChunk* chunk); | 868 static inline LargePage* Initialize(Heap* heap, MemoryChunk* chunk); |
| 859 | 869 |
| 860 friend class MemoryAllocator; | 870 friend class MemoryAllocator; |
| 861 }; | 871 }; |
| 862 | 872 |
| 863 STATIC_ASSERT(sizeof(LargePage) <= MemoryChunk::kHeaderSize); | |
| 864 | 873 |
| 865 // ---------------------------------------------------------------------------- | 874 // ---------------------------------------------------------------------------- |
| 866 // Space is the abstract superclass for all allocation spaces. | 875 // Space is the abstract superclass for all allocation spaces. |
| 867 class Space : public Malloced { | 876 class Space : public Malloced { |
| 868 public: | 877 public: |
| 869 Space(Heap* heap, AllocationSpace id, Executability executable) | 878 Space(Heap* heap, AllocationSpace id, Executability executable) |
| 870 : heap_(heap), id_(id), executable_(executable) {} | 879 : heap_(heap), id_(id), executable_(executable) {} |
| 871 | 880 |
| 872 virtual ~Space() {} | 881 virtual ~Space() {} |
| 873 | 882 |
| (...skipping 33 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 907 virtual void Print() = 0; | 916 virtual void Print() = 0; |
| 908 #endif | 917 #endif |
| 909 | 918 |
| 910 private: | 919 private: |
| 911 Heap* heap_; | 920 Heap* heap_; |
| 912 AllocationSpace id_; | 921 AllocationSpace id_; |
| 913 Executability executable_; | 922 Executability executable_; |
| 914 }; | 923 }; |
| 915 | 924 |
| 916 | 925 |
| 926 class MemoryChunkValidator { |
| 927 // Computed offsets should match the compiler generated ones. |
| 928 STATIC_ASSERT(MemoryChunk::kSizeOffset == offsetof(MemoryChunk, size_)); |
| 929 STATIC_ASSERT(MemoryChunk::kLiveBytesOffset == |
| 930 offsetof(MemoryChunk, live_byte_count_)); |
| 931 STATIC_ASSERT(MemoryChunk::kSlotsBufferOffset == |
| 932 offsetof(MemoryChunk, slots_buffer_)); |
| 933 STATIC_ASSERT(MemoryChunk::kWriteBarrierCounterOffset == |
| 934 offsetof(MemoryChunk, write_barrier_counter_)); |
| 935 |
| 936 // Validate our estimates on the header size. |
| 937 STATIC_ASSERT(sizeof(MemoryChunk) <= MemoryChunk::kHeaderSize); |
| 938 STATIC_ASSERT(sizeof(LargePage) <= MemoryChunk::kHeaderSize); |
| 939 STATIC_ASSERT(sizeof(Page) <= MemoryChunk::kHeaderSize); |
| 940 }; |
| 941 |
| 942 |
| 917 // ---------------------------------------------------------------------------- | 943 // ---------------------------------------------------------------------------- |
| 918 // All heap objects containing executable code (code objects) must be allocated | 944 // All heap objects containing executable code (code objects) must be allocated |
| 919 // from a 2 GB range of memory, so that they can call each other using 32-bit | 945 // from a 2 GB range of memory, so that they can call each other using 32-bit |
| 920 // displacements. This happens automatically on 32-bit platforms, where 32-bit | 946 // displacements. This happens automatically on 32-bit platforms, where 32-bit |
| 921 // displacements cover the entire 4GB virtual address space. On 64-bit | 947 // displacements cover the entire 4GB virtual address space. On 64-bit |
| 922 // platforms, we support this using the CodeRange object, which reserves and | 948 // platforms, we support this using the CodeRange object, which reserves and |
| 923 // manages a range of virtual memory. | 949 // manages a range of virtual memory. |
| 924 class CodeRange { | 950 class CodeRange { |
| 925 public: | 951 public: |
| 926 explicit CodeRange(Isolate* isolate); | 952 explicit CodeRange(Isolate* isolate); |
| (...skipping 1968 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 2895 count = 0; | 2921 count = 0; |
| 2896 } | 2922 } |
| 2897 // Must be small, since an iteration is used for lookup. | 2923 // Must be small, since an iteration is used for lookup. |
| 2898 static const int kMaxComments = 64; | 2924 static const int kMaxComments = 64; |
| 2899 }; | 2925 }; |
| 2900 #endif | 2926 #endif |
| 2901 } | 2927 } |
| 2902 } // namespace v8::internal | 2928 } // namespace v8::internal |
| 2903 | 2929 |
| 2904 #endif // V8_HEAP_SPACES_H_ | 2930 #endif // V8_HEAP_SPACES_H_ |
| OLD | NEW |