OLD | NEW |
---|---|
1 // Copyright 2011 the V8 project authors. All rights reserved. | 1 // Copyright 2011 the V8 project authors. All rights reserved. |
2 // Redistribution and use in source and binary forms, with or without | 2 // Redistribution and use in source and binary forms, with or without |
3 // modification, are permitted provided that the following conditions are | 3 // modification, are permitted provided that the following conditions are |
4 // met: | 4 // met: |
5 // | 5 // |
6 // * Redistributions of source code must retain the above copyright | 6 // * Redistributions of source code must retain the above copyright |
7 // notice, this list of conditions and the following disclaimer. | 7 // notice, this list of conditions and the following disclaimer. |
8 // * Redistributions in binary form must reproduce the above | 8 // * Redistributions in binary form must reproduce the above |
9 // copyright notice, this list of conditions and the following | 9 // copyright notice, this list of conditions and the following |
10 // disclaimer in the documentation and/or other materials provided | 10 // disclaimer in the documentation and/or other materials provided |
(...skipping 85 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
96 | 96 |
97 // Some assertion macros used in the debugging mode. | 97 // Some assertion macros used in the debugging mode. |
98 | 98 |
99 #define ASSERT_PAGE_ALIGNED(address) \ | 99 #define ASSERT_PAGE_ALIGNED(address) \ |
100 ASSERT((OffsetFrom(address) & Page::kPageAlignmentMask) == 0) | 100 ASSERT((OffsetFrom(address) & Page::kPageAlignmentMask) == 0) |
101 | 101 |
102 #define ASSERT_OBJECT_ALIGNED(address) \ | 102 #define ASSERT_OBJECT_ALIGNED(address) \ |
103 ASSERT((OffsetFrom(address) & kObjectAlignmentMask) == 0) | 103 ASSERT((OffsetFrom(address) & kObjectAlignmentMask) == 0) |
104 | 104 |
105 #define ASSERT_OBJECT_SIZE(size) \ | 105 #define ASSERT_OBJECT_SIZE(size) \ |
106 ASSERT((0 < size) && (size <= Page::kMaxNonCodeHeapObjectSize)) | 106 ASSERT((0 < size) && (size <= Page::kMaxRegularHeapObjectSize)) |
107 | 107 |
108 #define ASSERT_PAGE_OFFSET(offset) \ | 108 #define ASSERT_PAGE_OFFSET(offset) \ |
109 ASSERT((Page::kObjectStartOffset <= offset) \ | 109 ASSERT((Page::kObjectStartOffset <= offset) \ |
110 && (offset <= Page::kPageSize)) | 110 && (offset <= Page::kPageSize)) |
111 | 111 |
112 #define ASSERT_MAP_PAGE_INDEX(index) \ | 112 #define ASSERT_MAP_PAGE_INDEX(index) \ |
113 ASSERT((0 <= index) && (index <= MapSpace::kMaxMapPageIndex)) | 113 ASSERT((0 <= index) && (index <= MapSpace::kMaxMapPageIndex)) |
114 | 114 |
115 | 115 |
116 class PagedSpace; | 116 class PagedSpace; |
(...skipping 655 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
772 Address OffsetToAddress(int offset) { | 772 Address OffsetToAddress(int offset) { |
773 ASSERT_PAGE_OFFSET(offset); | 773 ASSERT_PAGE_OFFSET(offset); |
774 return address() + offset; | 774 return address() + offset; |
775 } | 775 } |
776 | 776 |
777 // --------------------------------------------------------------------- | 777 // --------------------------------------------------------------------- |
778 | 778 |
779 // Page size in bytes. This must be a multiple of the OS page size. | 779 // Page size in bytes. This must be a multiple of the OS page size. |
780 static const int kPageSize = 1 << kPageSizeBits; | 780 static const int kPageSize = 1 << kPageSizeBits; |
781 | 781 |
782 // Object area size in bytes. | 782 // Object area size in bytes of non code objects. |
Michael Starzinger
2014/01/22 16:04:51
nit: s/non code objects/non-code pages/ (or "non-c
Hannes Payer (out of office)
2014/01/27 14:35:20
Done. Got garbage collected.
| |
783 static const int kNonCodeObjectAreaSize = kPageSize - kObjectStartOffset; | 783 static const int kRegularObjectAreaSize = kPageSize - kObjectStartOffset; |
784 | 784 |
785 // Maximum object size that fits in a page. Objects larger than that size | 785 // Maximum object size that fits in a non-code page. Objects larger than that |
786 // are allocated in large object space and are never moved in memory. This | 786 // size are allocated in large object space and are never moved in memory. |
787 // also applies to new space allocation, since objects are never migrated | 787 // This also applies to new space allocation, since objects are never migrated |
788 // from new space to large object space. Takes double alignment into account. | 788 // from new space to large object space. Takes double alignment into account. |
789 static const int kMaxNonCodeHeapObjectSize = | 789 static const int kMaxRegularHeapObjectSize = |
790 kNonCodeObjectAreaSize - kPointerSize; | 790 kRegularObjectAreaSize - kPointerSize; |
791 | 791 |
792 // Page size mask. | 792 // Page size mask. |
793 static const intptr_t kPageAlignmentMask = (1 << kPageSizeBits) - 1; | 793 static const intptr_t kPageAlignmentMask = (1 << kPageSizeBits) - 1; |
794 | 794 |
795 inline void ClearGCFields(); | 795 inline void ClearGCFields(); |
796 | 796 |
797 static inline Page* Initialize(Heap* heap, | 797 static inline Page* Initialize(Heap* heap, |
798 MemoryChunk* chunk, | 798 MemoryChunk* chunk, |
799 Executability executable, | 799 Executability executable, |
800 PagedSpace* owner); | 800 PagedSpace* owner); |
(...skipping 272 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
1073 intptr_t AvailableExecutable() { | 1073 intptr_t AvailableExecutable() { |
1074 if (capacity_executable_ < size_executable_) return 0; | 1074 if (capacity_executable_ < size_executable_) return 0; |
1075 return capacity_executable_ - size_executable_; | 1075 return capacity_executable_ - size_executable_; |
1076 } | 1076 } |
1077 | 1077 |
1078 // Returns allocated executable spaces in bytes. | 1078 // Returns allocated executable spaces in bytes. |
1079 intptr_t SizeExecutable() { return size_executable_; } | 1079 intptr_t SizeExecutable() { return size_executable_; } |
1080 | 1080 |
1081 // Returns maximum available bytes that the old space can have. | 1081 // Returns maximum available bytes that the old space can have. |
1082 intptr_t MaxAvailable() { | 1082 intptr_t MaxAvailable() { |
1083 return (Available() / Page::kPageSize) * Page::kMaxNonCodeHeapObjectSize; | 1083 return (Available() / Page::kPageSize) * Page::kMaxRegularHeapObjectSize; |
1084 } | 1084 } |
1085 | 1085 |
1086 // Returns an indication of whether a pointer is in a space that has | 1086 // Returns an indication of whether a pointer is in a space that has |
1087 // been allocated by this MemoryAllocator. | 1087 // been allocated by this MemoryAllocator. |
1088 V8_INLINE bool IsOutsideAllocatedSpace(const void* address) const { | 1088 V8_INLINE bool IsOutsideAllocatedSpace(const void* address) const { |
1089 return address < lowest_ever_allocated_ || | 1089 return address < lowest_ever_allocated_ || |
1090 address >= highest_ever_allocated_; | 1090 address >= highest_ever_allocated_; |
1091 } | 1091 } |
1092 | 1092 |
1093 #ifdef DEBUG | 1093 #ifdef DEBUG |
(...skipping 534 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
1628 bool ContainsPageFreeListItems(Page* p); | 1628 bool ContainsPageFreeListItems(Page* p); |
1629 | 1629 |
1630 FreeListCategory* small_list() { return &small_list_; } | 1630 FreeListCategory* small_list() { return &small_list_; } |
1631 FreeListCategory* medium_list() { return &medium_list_; } | 1631 FreeListCategory* medium_list() { return &medium_list_; } |
1632 FreeListCategory* large_list() { return &large_list_; } | 1632 FreeListCategory* large_list() { return &large_list_; } |
1633 FreeListCategory* huge_list() { return &huge_list_; } | 1633 FreeListCategory* huge_list() { return &huge_list_; } |
1634 | 1634 |
1635 private: | 1635 private: |
1636 // The size range of blocks, in bytes. | 1636 // The size range of blocks, in bytes. |
1637 static const int kMinBlockSize = 3 * kPointerSize; | 1637 static const int kMinBlockSize = 3 * kPointerSize; |
1638 static const int kMaxBlockSize = Page::kMaxNonCodeHeapObjectSize; | 1638 static const int kMaxBlockSize = Page::kMaxRegularHeapObjectSize; |
1639 | 1639 |
1640 FreeListNode* FindNodeFor(int size_in_bytes, int* node_size); | 1640 FreeListNode* FindNodeFor(int size_in_bytes, int* node_size); |
1641 | 1641 |
1642 PagedSpace* owner_; | 1642 PagedSpace* owner_; |
1643 Heap* heap_; | 1643 Heap* heap_; |
1644 | 1644 |
1645 static const int kSmallListMin = 0x20 * kPointerSize; | 1645 static const int kSmallListMin = 0x20 * kPointerSize; |
1646 static const int kSmallListMax = 0xff * kPointerSize; | 1646 static const int kSmallListMax = 0xff * kPointerSize; |
1647 static const int kMediumListMax = 0x7ff * kPointerSize; | 1647 static const int kMediumListMax = 0x7ff * kPointerSize; |
1648 static const int kLargeListMax = 0x3fff * kPointerSize; | 1648 static const int kLargeListMax = 0x3fff * kPointerSize; |
(...skipping 357 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
2006 | 2006 |
2007 class NewSpacePage : public MemoryChunk { | 2007 class NewSpacePage : public MemoryChunk { |
2008 public: | 2008 public: |
2009 // GC related flags copied from from-space to to-space when | 2009 // GC related flags copied from from-space to to-space when |
2010 // flipping semispaces. | 2010 // flipping semispaces. |
2011 static const intptr_t kCopyOnFlipFlagsMask = | 2011 static const intptr_t kCopyOnFlipFlagsMask = |
2012 (1 << MemoryChunk::POINTERS_TO_HERE_ARE_INTERESTING) | | 2012 (1 << MemoryChunk::POINTERS_TO_HERE_ARE_INTERESTING) | |
2013 (1 << MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING) | | 2013 (1 << MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING) | |
2014 (1 << MemoryChunk::SCAN_ON_SCAVENGE); | 2014 (1 << MemoryChunk::SCAN_ON_SCAVENGE); |
2015 | 2015 |
2016 static const int kAreaSize = Page::kNonCodeObjectAreaSize; | 2016 static const int kAreaSize = Page::kRegularObjectAreaSize; |
2017 | 2017 |
2018 inline NewSpacePage* next_page() const { | 2018 inline NewSpacePage* next_page() const { |
2019 return static_cast<NewSpacePage*>(next_chunk()); | 2019 return static_cast<NewSpacePage*>(next_chunk()); |
2020 } | 2020 } |
2021 | 2021 |
2022 inline void set_next_page(NewSpacePage* page) { | 2022 inline void set_next_page(NewSpacePage* page) { |
2023 set_next_chunk(page); | 2023 set_next_chunk(page); |
2024 } | 2024 } |
2025 | 2025 |
2026 inline NewSpacePage* prev_page() const { | 2026 inline NewSpacePage* prev_page() const { |
(...skipping 639 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
2666 return RoundDown(size, Map::kSize); | 2666 return RoundDown(size, Map::kSize); |
2667 } else { | 2667 } else { |
2668 return (size / Map::kSize) * Map::kSize; | 2668 return (size / Map::kSize) * Map::kSize; |
2669 } | 2669 } |
2670 } | 2670 } |
2671 | 2671 |
2672 protected: | 2672 protected: |
2673 virtual void VerifyObject(HeapObject* obj); | 2673 virtual void VerifyObject(HeapObject* obj); |
2674 | 2674 |
2675 private: | 2675 private: |
2676 static const int kMapsPerPage = Page::kNonCodeObjectAreaSize / Map::kSize; | 2676 static const int kMapsPerPage = Page::kRegularObjectAreaSize / Map::kSize; |
2677 | 2677 |
2678 // Do map space compaction if there is a page gap. | 2678 // Do map space compaction if there is a page gap. |
2679 int CompactionThreshold() { | 2679 int CompactionThreshold() { |
2680 return kMapsPerPage * (max_map_space_pages_ - 1); | 2680 return kMapsPerPage * (max_map_space_pages_ - 1); |
2681 } | 2681 } |
2682 | 2682 |
2683 const int max_map_space_pages_; | 2683 const int max_map_space_pages_; |
2684 | 2684 |
2685 public: | 2685 public: |
2686 TRACK_MEMORY("MapSpace") | 2686 TRACK_MEMORY("MapSpace") |
(...skipping 247 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
2934 } | 2934 } |
2935 // Must be small, since an iteration is used for lookup. | 2935 // Must be small, since an iteration is used for lookup. |
2936 static const int kMaxComments = 64; | 2936 static const int kMaxComments = 64; |
2937 }; | 2937 }; |
2938 #endif | 2938 #endif |
2939 | 2939 |
2940 | 2940 |
2941 } } // namespace v8::internal | 2941 } } // namespace v8::internal |
2942 | 2942 |
2943 #endif // V8_SPACES_H_ | 2943 #endif // V8_SPACES_H_ |
OLD | NEW |