OLD | NEW |
---|---|
1 // Copyright 2011 the V8 project authors. All rights reserved. | 1 // Copyright 2011 the V8 project authors. All rights reserved. |
2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
4 | 4 |
5 #ifndef V8_HEAP_SPACES_H_ | 5 #ifndef V8_HEAP_SPACES_H_ |
6 #define V8_HEAP_SPACES_H_ | 6 #define V8_HEAP_SPACES_H_ |
7 | 7 |
8 #include "src/allocation.h" | 8 #include "src/allocation.h" |
9 #include "src/base/atomicops.h" | 9 #include "src/base/atomicops.h" |
10 #include "src/base/bits.h" | 10 #include "src/base/bits.h" |
(...skipping 523 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
534 static void IncrementLiveBytesFromMutator(HeapObject* object, int by); | 534 static void IncrementLiveBytesFromMutator(HeapObject* object, int by); |
535 | 535 |
536 static const intptr_t kAlignment = | 536 static const intptr_t kAlignment = |
537 (static_cast<uintptr_t>(1) << kPageSizeBits); | 537 (static_cast<uintptr_t>(1) << kPageSizeBits); |
538 | 538 |
539 static const intptr_t kAlignmentMask = kAlignment - 1; | 539 static const intptr_t kAlignmentMask = kAlignment - 1; |
540 | 540 |
541 static const intptr_t kSizeOffset = 0; | 541 static const intptr_t kSizeOffset = 0; |
542 | 542 |
543 static const intptr_t kLiveBytesOffset = | 543 static const intptr_t kLiveBytesOffset = |
544 kSizeOffset + kPointerSize + kPointerSize + kPointerSize + kPointerSize + | 544 kSizeOffset + kPointerSize // size_t size |
545 kPointerSize + kPointerSize + kPointerSize + kPointerSize + kIntSize; | 545 + kIntptrSize // intptr_t flags_ |
546 + kPointerSize // Address area_start_ | |
547 + kPointerSize // Address area_end_ | |
548 + 2 * kPointerSize // base::VirtualMemory reservation_ | |
549 + kPointerSize // Address owner_ | |
550 + kPointerSize // Heap* heap_ | |
551 + kIntSize; // int store_buffer_counter_ | |
546 | 552 |
547 static const size_t kSlotsBufferOffset = kLiveBytesOffset + kIntSize; | 553 |
554 static const size_t kSlotsBufferOffset = | |
555 kLiveBytesOffset + kIntSize; // int live_byte_count_ | |
548 | 556 |
549 static const size_t kWriteBarrierCounterOffset = | 557 static const size_t kWriteBarrierCounterOffset = |
550 kSlotsBufferOffset + kPointerSize + kPointerSize; | 558 kSlotsBufferOffset + kPointerSize // SlotsBuffer* slots_buffer_; |
559 + kPointerSize; // SkipList* skip_list_; | |
551 | 560 |
552 static const size_t kHeaderSize = kWriteBarrierCounterOffset + | 561 static const size_t kMinHeaderSize = |
553 kPointerSize + // write_barrier_counter_ | 562 kWriteBarrierCounterOffset + |
554 kIntSize + // progress_bar_ | 563 kPointerSize // intptr_t write_barrier_counter_ |
555 kIntSize + // high_water_mark_ | 564 + kIntSize // int progress_bar_ |
556 kPointerSize + // mutex_ page lock | 565 + kIntSize // int high_water_mark_ |
557 kPointerSize + // parallel_sweeping_ | 566 + kPointerSize // base::Mutex* mutex_ |
558 5 * kPointerSize + // free list statistics | 567 + kPointerSize // base::AtomicWord parallel_sweeping_ |
Michael Lippautz
2015/09/07 16:55:25
5 * kPointerSize implicitly included the memory th
| |
559 kPointerSize + // next_chunk_ | 568 + 5 * kIntSize // int free-list statistics |
560 kPointerSize; // prev_chunk_ | 569 + kPointerSize // base::AtomicWord next_chunk_ |
570 + kPointerSize; // base::AtomicWord prev_chunk_ | |
571 | |
572 // We add some more space to the computed header size to amount for missing | |
573 // alignment requirements in our computation. | |
574 static const size_t kHeaderSize = kMinHeaderSize + kPointerSize; | |
561 | 575 |
562 static const int kBodyOffset = | 576 static const int kBodyOffset = |
563 CODE_POINTER_ALIGN(kHeaderSize + Bitmap::kSize); | 577 CODE_POINTER_ALIGN(kHeaderSize + Bitmap::kSize); |
564 | 578 |
565 // The start offset of the object area in a page. Aligned to both maps and | 579 // The start offset of the object area in a page. Aligned to both maps and |
566 // code alignment to be suitable for both. Also aligned to 32 words because | 580 // code alignment to be suitable for both. Also aligned to 32 words because |
567 // the marking bitmap is arranged in 32 bit chunks. | 581 // the marking bitmap is arranged in 32 bit chunks. |
568 static const int kObjectStartAlignment = 32 * kPointerSize; | 582 static const int kObjectStartAlignment = 32 * kPointerSize; |
569 static const int kObjectStartOffset = | 583 static const int kObjectStartOffset = |
570 kBodyOffset - 1 + | 584 kBodyOffset - 1 + |
(...skipping 149 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
720 Address area_start, Address area_end, | 734 Address area_start, Address area_end, |
721 Executability executable, Space* owner); | 735 Executability executable, Space* owner); |
722 | 736 |
723 private: | 737 private: |
724 // next_chunk_ holds a pointer of type MemoryChunk | 738 // next_chunk_ holds a pointer of type MemoryChunk |
725 base::AtomicWord next_chunk_; | 739 base::AtomicWord next_chunk_; |
726 // prev_chunk_ holds a pointer of type MemoryChunk | 740 // prev_chunk_ holds a pointer of type MemoryChunk |
727 base::AtomicWord prev_chunk_; | 741 base::AtomicWord prev_chunk_; |
728 | 742 |
729 friend class MemoryAllocator; | 743 friend class MemoryAllocator; |
744 friend class MemoryChunkValidator; | |
730 }; | 745 }; |
731 | 746 |
732 | 747 |
733 STATIC_ASSERT(sizeof(MemoryChunk) <= MemoryChunk::kHeaderSize); | |
734 | |
735 | |
736 // ----------------------------------------------------------------------------- | 748 // ----------------------------------------------------------------------------- |
737 // A page is a memory chunk of a size 1MB. Large object pages may be larger. | 749 // A page is a memory chunk of a size 1MB. Large object pages may be larger. |
738 // | 750 // |
739 // The only way to get a page pointer is by calling factory methods: | 751 // The only way to get a page pointer is by calling factory methods: |
740 // Page* p = Page::FromAddress(addr); or | 752 // Page* p = Page::FromAddress(addr); or |
741 // Page* p = Page::FromAllocationTop(top); | 753 // Page* p = Page::FromAllocationTop(top); |
742 class Page : public MemoryChunk { | 754 class Page : public MemoryChunk { |
743 public: | 755 public: |
744 // Returns the page containing a given address. The address ranges | 756 // Returns the page containing a given address. The address ranges |
745 // from [page_addr .. page_addr + kPageSize[ | 757 // from [page_addr .. page_addr + kPageSize[ |
(...skipping 88 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
834 #undef FRAGMENTATION_STATS_ACCESSORS | 846 #undef FRAGMENTATION_STATS_ACCESSORS |
835 | 847 |
836 #ifdef DEBUG | 848 #ifdef DEBUG |
837 void Print(); | 849 void Print(); |
838 #endif // DEBUG | 850 #endif // DEBUG |
839 | 851 |
840 friend class MemoryAllocator; | 852 friend class MemoryAllocator; |
841 }; | 853 }; |
842 | 854 |
843 | 855 |
844 STATIC_ASSERT(sizeof(Page) <= MemoryChunk::kHeaderSize); | |
845 | |
846 | |
847 class LargePage : public MemoryChunk { | 856 class LargePage : public MemoryChunk { |
848 public: | 857 public: |
849 HeapObject* GetObject() { return HeapObject::FromAddress(area_start()); } | 858 HeapObject* GetObject() { return HeapObject::FromAddress(area_start()); } |
850 | 859 |
851 inline LargePage* next_page() const { | 860 inline LargePage* next_page() const { |
852 return static_cast<LargePage*>(next_chunk()); | 861 return static_cast<LargePage*>(next_chunk()); |
853 } | 862 } |
854 | 863 |
855 inline void set_next_page(LargePage* page) { set_next_chunk(page); } | 864 inline void set_next_page(LargePage* page) { set_next_chunk(page); } |
856 | 865 |
857 private: | 866 private: |
858 static inline LargePage* Initialize(Heap* heap, MemoryChunk* chunk); | 867 static inline LargePage* Initialize(Heap* heap, MemoryChunk* chunk); |
859 | 868 |
860 friend class MemoryAllocator; | 869 friend class MemoryAllocator; |
861 }; | 870 }; |
862 | 871 |
863 STATIC_ASSERT(sizeof(LargePage) <= MemoryChunk::kHeaderSize); | |
864 | 872 |
865 // ---------------------------------------------------------------------------- | 873 // ---------------------------------------------------------------------------- |
866 // Space is the abstract superclass for all allocation spaces. | 874 // Space is the abstract superclass for all allocation spaces. |
867 class Space : public Malloced { | 875 class Space : public Malloced { |
868 public: | 876 public: |
869 Space(Heap* heap, AllocationSpace id, Executability executable) | 877 Space(Heap* heap, AllocationSpace id, Executability executable) |
870 : heap_(heap), id_(id), executable_(executable) {} | 878 : heap_(heap), id_(id), executable_(executable) {} |
871 | 879 |
872 virtual ~Space() {} | 880 virtual ~Space() {} |
873 | 881 |
(...skipping 33 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
907 virtual void Print() = 0; | 915 virtual void Print() = 0; |
908 #endif | 916 #endif |
909 | 917 |
910 private: | 918 private: |
911 Heap* heap_; | 919 Heap* heap_; |
912 AllocationSpace id_; | 920 AllocationSpace id_; |
913 Executability executable_; | 921 Executability executable_; |
914 }; | 922 }; |
915 | 923 |
916 | 924 |
925 class MemoryChunkValidator { | |
926 // Computed offsets should match the compiler generates ones. | |
927 STATIC_ASSERT(MemoryChunk::kSizeOffset == offsetof(MemoryChunk, size_)); | |
928 STATIC_ASSERT(MemoryChunk::kLiveBytesOffset == | |
929 offsetof(MemoryChunk, live_byte_count_)); | |
930 STATIC_ASSERT(MemoryChunk::kSlotsBufferOffset == | |
931 offsetof(MemoryChunk, slots_buffer_)); | |
932 STATIC_ASSERT(MemoryChunk::kWriteBarrierCounterOffset == | |
933 offsetof(MemoryChunk, write_barrier_counter_)); | |
934 | |
935 // Validate our estimates on the header size. | |
936 STATIC_ASSERT(sizeof(MemoryChunk) <= MemoryChunk::kHeaderSize); | |
937 STATIC_ASSERT(sizeof(LargePage) <= MemoryChunk::kHeaderSize); | |
938 STATIC_ASSERT(sizeof(Page) <= MemoryChunk::kHeaderSize); | |
939 }; | |
940 | |
941 | |
917 // ---------------------------------------------------------------------------- | 942 // ---------------------------------------------------------------------------- |
918 // All heap objects containing executable code (code objects) must be allocated | 943 // All heap objects containing executable code (code objects) must be allocated |
919 // from a 2 GB range of memory, so that they can call each other using 32-bit | 944 // from a 2 GB range of memory, so that they can call each other using 32-bit |
920 // displacements. This happens automatically on 32-bit platforms, where 32-bit | 945 // displacements. This happens automatically on 32-bit platforms, where 32-bit |
921 // displacements cover the entire 4GB virtual address space. On 64-bit | 946 // displacements cover the entire 4GB virtual address space. On 64-bit |
922 // platforms, we support this using the CodeRange object, which reserves and | 947 // platforms, we support this using the CodeRange object, which reserves and |
923 // manages a range of virtual memory. | 948 // manages a range of virtual memory. |
924 class CodeRange { | 949 class CodeRange { |
925 public: | 950 public: |
926 explicit CodeRange(Isolate* isolate); | 951 explicit CodeRange(Isolate* isolate); |
(...skipping 1968 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
2895 count = 0; | 2920 count = 0; |
2896 } | 2921 } |
2897 // Must be small, since an iteration is used for lookup. | 2922 // Must be small, since an iteration is used for lookup. |
2898 static const int kMaxComments = 64; | 2923 static const int kMaxComments = 64; |
2899 }; | 2924 }; |
2900 #endif | 2925 #endif |
2901 } | 2926 } |
2902 } // namespace v8::internal | 2927 } // namespace v8::internal |
2903 | 2928 |
2904 #endif // V8_HEAP_SPACES_H_ | 2929 #endif // V8_HEAP_SPACES_H_ |
OLD | NEW |